summaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/BurntSushi/toml/.gitignore5
-rw-r--r--vendor/github.com/BurntSushi/toml/.travis.yml15
-rw-r--r--vendor/github.com/BurntSushi/toml/COMPATIBLE3
-rw-r--r--vendor/github.com/BurntSushi/toml/COPYING27
-rw-r--r--vendor/github.com/BurntSushi/toml/Makefile19
-rw-r--r--vendor/github.com/BurntSushi/toml/README.md18
-rw-r--r--vendor/github.com/BurntSushi/toml/decode.go48
-rw-r--r--vendor/github.com/BurntSushi/toml/decode_meta.go3
-rw-r--r--vendor/github.com/BurntSushi/toml/doc.go2
-rw-r--r--vendor/github.com/BurntSushi/toml/encode.go81
-rw-r--r--vendor/github.com/BurntSushi/toml/lex.go494
-rw-r--r--vendor/github.com/BurntSushi/toml/parse.go133
-rw-r--r--vendor/github.com/BurntSushi/toml/session.vim1
-rw-r--r--vendor/github.com/BurntSushi/toml/type_fields.go9
-rw-r--r--vendor/github.com/DataDog/zstd/.travis.yml31
-rw-r--r--vendor/github.com/DataDog/zstd/LICENSE27
-rw-r--r--vendor/github.com/DataDog/zstd/README.md120
-rw-r--r--vendor/github.com/DataDog/zstd/ZSTD_LICENSE30
-rw-r--r--vendor/github.com/DataDog/zstd/bitstream.h455
-rw-r--r--vendor/github.com/DataDog/zstd/compiler.h140
-rw-r--r--vendor/github.com/DataDog/zstd/cover.c1126
-rw-r--r--vendor/github.com/DataDog/zstd/cover.h112
-rw-r--r--vendor/github.com/DataDog/zstd/cpu.h215
-rw-r--r--vendor/github.com/DataDog/zstd/debug.c44
-rw-r--r--vendor/github.com/DataDog/zstd/debug.h134
-rw-r--r--vendor/github.com/DataDog/zstd/divsufsort.c1913
-rw-r--r--vendor/github.com/DataDog/zstd/divsufsort.h67
-rw-r--r--vendor/github.com/DataDog/zstd/entropy_common.c236
-rw-r--r--vendor/github.com/DataDog/zstd/error_private.c54
-rw-r--r--vendor/github.com/DataDog/zstd/error_private.h76
-rw-r--r--vendor/github.com/DataDog/zstd/errors.go35
-rw-r--r--vendor/github.com/DataDog/zstd/fastcover.c740
-rw-r--r--vendor/github.com/DataDog/zstd/fse.h708
-rw-r--r--vendor/github.com/DataDog/zstd/fse_compress.c721
-rw-r--r--vendor/github.com/DataDog/zstd/fse_decompress.c309
-rw-r--r--vendor/github.com/DataDog/zstd/hist.c203
-rw-r--r--vendor/github.com/DataDog/zstd/hist.h95
-rw-r--r--vendor/github.com/DataDog/zstd/huf.h358
-rw-r--r--vendor/github.com/DataDog/zstd/huf_compress.c798
-rw-r--r--vendor/github.com/DataDog/zstd/huf_decompress.c1232
-rw-r--r--vendor/github.com/DataDog/zstd/mem.h380
-rw-r--r--vendor/github.com/DataDog/zstd/pool.c340
-rw-r--r--vendor/github.com/DataDog/zstd/pool.h84
-rw-r--r--vendor/github.com/DataDog/zstd/threading.c75
-rw-r--r--vendor/github.com/DataDog/zstd/threading.h123
-rw-r--r--vendor/github.com/DataDog/zstd/travis_test_32.sh18
-rw-r--r--vendor/github.com/DataDog/zstd/update.txt56
-rw-r--r--vendor/github.com/DataDog/zstd/xxhash.c876
-rw-r--r--vendor/github.com/DataDog/zstd/xxhash.h305
-rw-r--r--vendor/github.com/DataDog/zstd/zbuff.h213
-rw-r--r--vendor/github.com/DataDog/zstd/zbuff_common.c26
-rw-r--r--vendor/github.com/DataDog/zstd/zbuff_compress.c147
-rw-r--r--vendor/github.com/DataDog/zstd/zbuff_decompress.c75
-rw-r--r--vendor/github.com/DataDog/zstd/zdict.c1111
-rw-r--r--vendor/github.com/DataDog/zstd/zdict.h277
-rw-r--r--vendor/github.com/DataDog/zstd/zstd.go147
-rw-r--r--vendor/github.com/DataDog/zstd/zstd.h1913
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_common.c83
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_compress.c4417
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_compress_internal.h863
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_ddict.c240
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_ddict.h44
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_decompress.c1765
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_decompress_block.c1311
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_decompress_block.h59
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_decompress_internal.h175
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_double_fast.c499
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_double_fast.h38
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_errors.h93
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_fast.c470
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_fast.h37
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_internal.h319
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_lazy.c1106
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_lazy.h67
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_ldm.c597
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_ldm.h105
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_legacy.h411
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_opt.c1233
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_opt.h56
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_stream.go294
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_v01.c2158
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_v01.h94
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_v02.c3508
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_v02.h93
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_v03.c3150
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_v03.h93
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_v04.c3634
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_v04.h142
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_v05.c4039
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_v05.h162
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_v06.c4152
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_v06.h172
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_v07.c4535
-rw-r--r--vendor/github.com/DataDog/zstd/zstd_v07.h187
-rw-r--r--vendor/github.com/DataDog/zstd/zstdmt_compress.c2099
-rw-r--r--vendor/github.com/DataDog/zstd/zstdmt_compress.h191
-rw-r--r--vendor/github.com/Microsoft/go-winio/.gitignore1
-rw-r--r--vendor/github.com/Microsoft/hcsshim/.gitignore1
-rw-r--r--vendor/github.com/Microsoft/hcsshim/.gometalinter.json17
-rw-r--r--vendor/github.com/Microsoft/hcsshim/appveyor.yml29
-rw-r--r--vendor/github.com/Microsoft/hcsshim/functional_tests.ps112
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go17
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go40
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go3
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go9
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go54
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go172
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go32
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go121
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go4
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go2
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go6
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go4
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go6
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go5
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go2
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go7
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_machine.go3
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go25
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go26
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go23
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go25
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go25
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go26
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go24
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go33
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go19
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go28
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go24
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go26
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go4
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go20
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go23
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go25
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go91
-rw-r--r--vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go943
-rw-r--r--vendor/github.com/Microsoft/hcsshim/vendor.conf21
-rw-r--r--vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go6
-rw-r--r--vendor/github.com/VividCortex/ewma/.gitignore2
-rw-r--r--vendor/github.com/beorn7/perks/LICENSE (renamed from vendor/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE)2
-rw-r--r--vendor/github.com/beorn7/perks/quantile/exampledata.txt2388
-rw-r--r--vendor/github.com/beorn7/perks/quantile/stream.go316
-rw-r--r--vendor/github.com/blang/semver/.travis.yml21
-rw-r--r--vendor/github.com/blang/semver/README.md5
-rw-r--r--vendor/github.com/blang/semver/package.json17
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/.gitignore5
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/.travis.yml25
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/Makefile60
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/README.md8
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/test/main.go133
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/test/phaul-main.go192
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/test/piggie.c57
-rw-r--r--vendor/github.com/containerd/cgroups/README.md124
-rw-r--r--vendor/github.com/containerd/cgroups/blkio.go337
-rw-r--r--vendor/github.com/containerd/cgroups/cgroup.go529
-rw-r--r--vendor/github.com/containerd/cgroups/control.go88
-rw-r--r--vendor/github.com/containerd/cgroups/cpu.go129
-rw-r--r--vendor/github.com/containerd/cgroups/cpuacct.go121
-rw-r--r--vendor/github.com/containerd/cgroups/cpuset.go159
-rw-r--r--vendor/github.com/containerd/cgroups/devices.go93
-rw-r--r--vendor/github.com/containerd/cgroups/errors.go47
-rw-r--r--vendor/github.com/containerd/cgroups/freezer.go82
-rw-r--r--vendor/github.com/containerd/cgroups/hierarchy.go20
-rw-r--r--vendor/github.com/containerd/cgroups/hugetlb.go109
-rw-r--r--vendor/github.com/containerd/cgroups/memory.go325
-rw-r--r--vendor/github.com/containerd/cgroups/metrics.pb.go4288
-rw-r--r--vendor/github.com/containerd/cgroups/metrics.proto123
-rw-r--r--vendor/github.com/containerd/cgroups/named.go39
-rw-r--r--vendor/github.com/containerd/cgroups/net_cls.go58
-rw-r--r--vendor/github.com/containerd/cgroups/net_prio.go66
-rw-r--r--vendor/github.com/containerd/cgroups/opts.go61
-rw-r--r--vendor/github.com/containerd/cgroups/paths.go107
-rw-r--r--vendor/github.com/containerd/cgroups/perf_event.go37
-rw-r--r--vendor/github.com/containerd/cgroups/pids.go85
-rw-r--r--vendor/github.com/containerd/cgroups/rdma.go153
-rw-r--r--vendor/github.com/containerd/cgroups/state.go28
-rw-r--r--vendor/github.com/containerd/cgroups/subsystem.go112
-rw-r--r--vendor/github.com/containerd/cgroups/systemd.go160
-rw-r--r--vendor/github.com/containerd/cgroups/ticks.go26
-rw-r--r--vendor/github.com/containerd/cgroups/utils.go324
-rw-r--r--vendor/github.com/containerd/cgroups/v1.go81
-rw-r--r--vendor/github.com/containerd/continuity/AUTHORS16
-rw-r--r--vendor/github.com/containerd/continuity/README.md84
-rw-r--r--vendor/github.com/containerd/continuity/vendor.conf13
-rw-r--r--vendor/github.com/containernetworking/cni/README.md205
-rw-r--r--vendor/github.com/containernetworking/cni/libcni/api.go1
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/invoke/args.go68
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go29
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go4
-rw-r--r--vendor/github.com/containernetworking/plugins/README.md26
-rw-r--r--vendor/github.com/containernetworking/plugins/pkg/ns/README.md15
-rw-r--r--vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go93
-rw-r--r--vendor/github.com/containers/buildah/.gitignore4
-rw-r--r--vendor/github.com/containers/buildah/.papr.sh43
-rw-r--r--vendor/github.com/containers/buildah/.papr.yml87
-rw-r--r--vendor/github.com/containers/buildah/.travis.yml93
-rw-r--r--vendor/github.com/containers/buildah/CHANGELOG.md976
-rw-r--r--vendor/github.com/containers/buildah/CONTRIBUTING.md143
-rw-r--r--vendor/github.com/containers/buildah/MAINTAINERS4
-rw-r--r--vendor/github.com/containers/buildah/Makefile143
-rw-r--r--vendor/github.com/containers/buildah/add.go181
-rw-r--r--vendor/github.com/containers/buildah/btrfs_installed_tag.sh7
-rw-r--r--vendor/github.com/containers/buildah/btrfs_tag.sh7
-rw-r--r--vendor/github.com/containers/buildah/buildah.go2
-rw-r--r--vendor/github.com/containers/buildah/changelog.txt896
-rw-r--r--vendor/github.com/containers/buildah/chroot/run.go33
-rw-r--r--vendor/github.com/containers/buildah/config.go11
-rw-r--r--vendor/github.com/containers/buildah/developmentplan.md13
-rw-r--r--vendor/github.com/containers/buildah/docker/AUTHORS1788
-rw-r--r--vendor/github.com/containers/buildah/image.go2
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go130
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/util.go4
-rw-r--r--vendor/github.com/containers/buildah/install.md385
-rw-r--r--vendor/github.com/containers/buildah/libdm_tag.sh15
-rw-r--r--vendor/github.com/containers/buildah/ostree_tag.sh6
-rw-r--r--vendor/github.com/containers/buildah/pkg/chrootuser/user.go16
-rw-r--r--vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go4
-rw-r--r--vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go28
-rw-r--r--vendor/github.com/containers/buildah/pkg/overlay/overlay.go31
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse.go20
-rw-r--r--vendor/github.com/containers/buildah/pkg/secrets/secrets.go76
-rw-r--r--vendor/github.com/containers/buildah/pkg/unshare/unshare.c13
-rw-r--r--vendor/github.com/containers/buildah/pkg/unshare/unshare.go45
-rw-r--r--vendor/github.com/containers/buildah/release.sh73
-rw-r--r--vendor/github.com/containers/buildah/run_linux.go98
-rw-r--r--vendor/github.com/containers/buildah/selinux_tag.sh4
-rw-r--r--vendor/github.com/containers/buildah/troubleshooting.md110
-rw-r--r--vendor/github.com/containers/buildah/util.go168
-rw-r--r--vendor/github.com/containers/buildah/util/util.go46
-rw-r--r--vendor/github.com/containers/buildah/vendor.conf4
-rw-r--r--vendor/github.com/containers/image/README.md83
-rw-r--r--vendor/github.com/containers/image/copy/copy.go35
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_src.go74
-rw-r--r--vendor/github.com/containers/image/docker/reference/README.md2
-rw-r--r--vendor/github.com/containers/image/docker/reference/normalize.go29
-rw-r--r--vendor/github.com/containers/image/docker/reference/reference.go4
-rw-r--r--vendor/github.com/containers/image/docker/reference/regexp.go10
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/src.go83
-rw-r--r--vendor/github.com/containers/image/manifest/docker_schema1.go1
-rw-r--r--vendor/github.com/containers/image/manifest/docker_schema2.go1
-rw-r--r--vendor/github.com/containers/image/manifest/oci.go1
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_dest.go4
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_src.go2
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_transport.go2
-rw-r--r--vendor/github.com/containers/image/pkg/docker/config/config.go4
-rw-r--r--vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go240
-rw-r--r--vendor/github.com/containers/image/storage/storage_image.go13
-rw-r--r--vendor/github.com/containers/image/transports/alltransports/ostree.go2
-rw-r--r--vendor/github.com/containers/image/transports/alltransports/ostree_stub.go2
-rw-r--r--vendor/github.com/containers/image/types/types.go1
-rw-r--r--vendor/github.com/containers/image/vendor.conf51
-rw-r--r--vendor/github.com/containers/image/version/version.go8
-rw-r--r--vendor/github.com/containers/psgo/.gitignore15
-rw-r--r--vendor/github.com/containers/psgo/.travis.yml19
-rw-r--r--vendor/github.com/containers/psgo/Makefile60
-rw-r--r--vendor/github.com/containers/psgo/go.mod2
-rw-r--r--vendor/github.com/containers/psgo/go.sum23
-rw-r--r--vendor/github.com/containers/psgo/internal/host/host.go20
-rw-r--r--vendor/github.com/containers/psgo/internal/host/host_cgo.go37
-rw-r--r--vendor/github.com/containers/psgo/internal/host/host_nocgo.go84
-rw-r--r--vendor/github.com/containers/psgo/internal/process/process.go12
-rw-r--r--vendor/github.com/containers/storage/.cirrus.yml96
-rw-r--r--vendor/github.com/containers/storage/.dockerignore3
-rw-r--r--vendor/github.com/containers/storage/.gitignore29
-rw-r--r--vendor/github.com/containers/storage/.mailmap254
-rw-r--r--vendor/github.com/containers/storage/.travis.yml71
-rw-r--r--vendor/github.com/containers/storage/AUTHORS1522
-rw-r--r--vendor/github.com/containers/storage/CONTRIBUTING.md144
-rw-r--r--vendor/github.com/containers/storage/Makefile92
-rw-r--r--vendor/github.com/containers/storage/VERSION1
-rw-r--r--vendor/github.com/containers/storage/Vagrantfile25
-rw-r--r--vendor/github.com/containers/storage/containers.go4
-rw-r--r--vendor/github.com/containers/storage/drivers/aufs/aufs.go25
-rw-r--r--vendor/github.com/containers/storage/drivers/btrfs/btrfs.go10
-rw-r--r--vendor/github.com/containers/storage/drivers/chown.go3
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/driver.go8
-rw-r--r--vendor/github.com/containers/storage/drivers/driver.go15
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go124
-rw-r--r--vendor/github.com/containers/storage/drivers/quota/projectquota.go2
-rw-r--r--vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go32
-rw-r--r--vendor/github.com/containers/storage/drivers/vfs/driver.go8
-rw-r--r--vendor/github.com/containers/storage/drivers/windows/windows.go4
-rw-r--r--vendor/github.com/containers/storage/drivers/zfs/MAINTAINERS2
-rw-r--r--vendor/github.com/containers/storage/drivers/zfs/zfs.go10
-rw-r--r--vendor/github.com/containers/storage/images.go9
-rw-r--r--vendor/github.com/containers/storage/layers.go9
-rw-r--r--vendor/github.com/containers/storage/layers_ffjson.go2
-rw-r--r--vendor/github.com/containers/storage/lockfile.go4
-rw-r--r--vendor/github.com/containers/storage/lockfile_linux.go20
-rw-r--r--vendor/github.com/containers/storage/lockfile_otherunix.go19
-rw-r--r--vendor/github.com/containers/storage/lockfile_unix.go41
-rw-r--r--vendor/github.com/containers/storage/lockfile_windows.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go9
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_cgo.go17
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_nocgo.go16
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/example_changes.go97
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive.go36
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go130
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go9
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go1
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools.go11
-rw-r--r--vendor/github.com/containers/storage/pkg/system/stat_unix.go12
-rw-r--r--vendor/github.com/containers/storage/storage.conf130
-rw-r--r--vendor/github.com/containers/storage/store.go11
-rw-r--r--vendor/github.com/containers/storage/utils.go26
-rw-r--r--vendor/github.com/containers/storage/vendor.conf1
-rw-r--r--vendor/github.com/coreos/go-iptables/README.md10
-rw-r--r--vendor/github.com/coreos/go-iptables/iptables/iptables.go10
-rw-r--r--vendor/github.com/coreos/go-systemd/README.md69
-rw-r--r--vendor/github.com/coreos/go-systemd/dbus/dbus.go6
-rw-r--r--vendor/github.com/coreos/go-systemd/dbus/methods.go14
-rw-r--r--vendor/github.com/coreos/go-systemd/dbus/subscription.go2
-rw-r--r--vendor/github.com/coreos/go-systemd/journal/journal.go143
-rw-r--r--vendor/github.com/coreos/go-systemd/sdjournal/journal.go56
-rw-r--r--vendor/github.com/coreos/pkg/README.md4
-rw-r--r--vendor/github.com/cri-o/ocicni/README.md3
-rw-r--r--vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go165
-rw-r--r--vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go40
-rw-r--r--vendor/github.com/cri-o/ocicni/vendor.conf13
-rw-r--r--vendor/github.com/cyphar/filepath-securejoin/.travis.yml19
-rw-r--r--vendor/github.com/cyphar/filepath-securejoin/VERSION1
-rw-r--r--vendor/github.com/cyphar/filepath-securejoin/join.go3
-rw-r--r--vendor/github.com/davecgh/go-spew/LICENSE2
-rw-r--r--vendor/github.com/davecgh/go-spew/README.md205
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/bypass.go187
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/bypasssafe.go2
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/common.go2
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/dump.go10
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/format.go4
-rw-r--r--vendor/github.com/docker/distribution/.gitignore38
-rw-r--r--vendor/github.com/docker/distribution/.gometalinter.json16
-rw-r--r--vendor/github.com/docker/distribution/.mailmap32
-rw-r--r--vendor/github.com/docker/distribution/.travis.yml51
-rw-r--r--vendor/github.com/docker/distribution/BUILDING.md117
-rw-r--r--vendor/github.com/docker/distribution/CONTRIBUTING.md148
-rw-r--r--vendor/github.com/docker/distribution/Dockerfile23
-rw-r--r--vendor/github.com/docker/distribution/MAINTAINERS243
-rw-r--r--vendor/github.com/docker/distribution/Makefile102
-rw-r--r--vendor/github.com/docker/distribution/ROADMAP.md267
-rw-r--r--vendor/github.com/docker/distribution/blobs.go10
-rw-r--r--vendor/github.com/docker/distribution/errors.go4
-rw-r--r--vendor/github.com/docker/distribution/metrics/prometheus.go13
-rw-r--r--vendor/github.com/docker/distribution/registry.go20
-rw-r--r--vendor/github.com/docker/distribution/registry/api/errcode/handler.go6
-rw-r--r--vendor/github.com/docker/distribution/registry/api/v2/routes.go9
-rw-r--r--vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go8
-rw-r--r--vendor/github.com/docker/distribution/registry/client/repository.go78
-rw-r--r--vendor/github.com/docker/distribution/registry/client/transport/http_reader.go9
-rw-r--r--vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go10
-rw-r--r--vendor/github.com/docker/distribution/vendor.conf24
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/README.md82
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/credentials/version.go2
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.c228
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.go196
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.h14
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_go18.go13
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_non_go18.go41
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.c162
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.go118
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.h13
-rw-r--r--vendor/github.com/docker/docker/AUTHORS2016
-rw-r--r--vendor/github.com/docker/docker/README.md57
-rw-r--r--vendor/github.com/docker/docker/api/swagger-gen.yaml12
-rw-r--r--vendor/github.com/docker/docker/api/swagger.yaml10390
-rw-r--r--vendor/github.com/docker/docker/contrib/README.md4
-rw-r--r--vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c10
-rw-r--r--vendor/github.com/docker/docker/contrib/syscall-test/acct.c16
-rw-r--r--vendor/github.com/docker/docker/contrib/syscall-test/exit32.s7
-rw-r--r--vendor/github.com/docker/docker/contrib/syscall-test/ns.c63
-rw-r--r--vendor/github.com/docker/docker/contrib/syscall-test/raw.c14
-rw-r--r--vendor/github.com/docker/docker/contrib/syscall-test/setgid.c11
-rw-r--r--vendor/github.com/docker/docker/contrib/syscall-test/setuid.c11
-rw-r--r--vendor/github.com/docker/docker/contrib/syscall-test/socket.c30
-rw-r--r--vendor/github.com/docker/docker/contrib/syscall-test/userns.c63
-rw-r--r--vendor/github.com/docker/docker/opts/address_pools.go84
-rw-r--r--vendor/github.com/docker/docker/opts/env.go48
-rw-r--r--vendor/github.com/docker/docker/opts/hosts.go176
-rw-r--r--vendor/github.com/docker/docker/opts/hosts_unix.go8
-rw-r--r--vendor/github.com/docker/docker/opts/hosts_windows.go4
-rw-r--r--vendor/github.com/docker/docker/opts/ip.go47
-rw-r--r--vendor/github.com/docker/docker/opts/opts.go337
-rw-r--r--vendor/github.com/docker/docker/opts/opts_unix.go6
-rw-r--r--vendor/github.com/docker/docker/opts/opts_windows.go56
-rw-r--r--vendor/github.com/docker/docker/opts/quotedstring.go37
-rw-r--r--vendor/github.com/docker/docker/opts/runtime.go79
-rw-r--r--vendor/github.com/docker/docker/opts/ulimit.go81
-rw-r--r--vendor/github.com/docker/docker/pkg/README.md11
-rw-r--r--vendor/github.com/docker/docker/profiles/seccomp/default.json793
-rw-r--r--vendor/github.com/docker/docker/profiles/seccomp/generate.go32
-rw-r--r--vendor/github.com/docker/docker/vendor.conf161
-rw-r--r--vendor/github.com/docker/go-connections/README.md13
-rw-r--r--vendor/github.com/docker/go-metrics/CONTRIBUTING.md55
-rw-r--r--vendor/github.com/docker/go-metrics/LICENSE191
-rw-r--r--vendor/github.com/docker/go-metrics/LICENSE.docs425
-rw-r--r--vendor/github.com/docker/go-metrics/NOTICE16
-rw-r--r--vendor/github.com/docker/go-metrics/README.md91
-rw-r--r--vendor/github.com/docker/go-metrics/counter.go52
-rw-r--r--vendor/github.com/docker/go-metrics/docs.go3
-rw-r--r--vendor/github.com/docker/go-metrics/gauge.go72
-rw-r--r--vendor/github.com/docker/go-metrics/handler.go74
-rw-r--r--vendor/github.com/docker/go-metrics/helpers.go10
-rw-r--r--vendor/github.com/docker/go-metrics/namespace.go315
-rw-r--r--vendor/github.com/docker/go-metrics/register.go15
-rw-r--r--vendor/github.com/docker/go-metrics/timer.go85
-rw-r--r--vendor/github.com/docker/go-metrics/unit.go12
-rw-r--r--vendor/github.com/docker/go-units/CONTRIBUTING.md67
-rw-r--r--vendor/github.com/docker/go-units/MAINTAINERS46
-rw-r--r--vendor/github.com/docker/go-units/circle.yml11
-rw-r--r--vendor/github.com/docker/go-units/duration.go2
-rw-r--r--vendor/github.com/docker/go-units/size.go2
-rw-r--r--vendor/github.com/docker/go-units/ulimit.go9
-rw-r--r--vendor/github.com/docker/libnetwork/README.md100
-rw-r--r--vendor/github.com/docker/libnetwork/ipamutils/utils.go135
-rw-r--r--vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go15
-rw-r--r--vendor/github.com/docker/libnetwork/types/types.go27
-rw-r--r--vendor/github.com/docker/libnetwork/vendor.conf50
-rw-r--r--vendor/github.com/docker/libtrust/CONTRIBUTING.md13
-rw-r--r--vendor/github.com/docker/libtrust/MAINTAINERS3
-rw-r--r--vendor/github.com/docker/spdystream/CONTRIBUTING.md13
-rw-r--r--vendor/github.com/docker/spdystream/MAINTAINERS28
-rw-r--r--vendor/github.com/etcd-io/bbolt/.gitignore5
-rw-r--r--vendor/github.com/etcd-io/bbolt/.travis.yml17
-rw-r--r--vendor/github.com/etcd-io/bbolt/Makefile38
-rw-r--r--vendor/github.com/etcd-io/bbolt/bolt_riscv64.go12
-rw-r--r--vendor/github.com/etcd-io/bbolt/db.go12
-rw-r--r--vendor/github.com/etcd-io/bbolt/freelist.go22
-rw-r--r--vendor/github.com/etcd-io/bbolt/tx.go27
-rw-r--r--vendor/github.com/fatih/camelcase/.travis.yml3
-rw-r--r--vendor/github.com/fsnotify/fsnotify/.editorconfig5
-rw-r--r--vendor/github.com/fsnotify/fsnotify/.gitignore6
-rw-r--r--vendor/github.com/fsnotify/fsnotify/.travis.yml30
-rw-r--r--vendor/github.com/fsnotify/fsnotify/AUTHORS52
-rw-r--r--vendor/github.com/fsnotify/fsnotify/CHANGELOG.md317
-rw-r--r--vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md77
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/.gitignore4
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/.travis.yml33
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/AUTHORS200
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/Gopkg.toml23
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/LICENSE2
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/Makefile41
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/README.md (renamed from vendor/github.com/fsouza/go-dockerclient/README.markdown)53
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/appveyor.yml33
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/auth.go28
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/client.go33
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/client_unix.go9
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/client_windows.go11
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container.go33
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/event.go2
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/exec.go14
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/go.mod50
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/go.sum60
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/image.go40
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go56
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go2
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go2
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go4
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go125
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/term/term.go2
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go2
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/misc.go39
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/plugin.go13
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/registry_auth.go10
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/swarm_service.go2
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/system.go72
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/tls.go2
-rw-r--r--vendor/github.com/ghodss/yaml/.gitignore20
-rw-r--r--vendor/github.com/ghodss/yaml/.travis.yml7
-rw-r--r--vendor/github.com/godbus/dbus/.travis.yml46
-rw-r--r--vendor/github.com/godbus/dbus/CONTRIBUTING.md50
-rw-r--r--vendor/github.com/godbus/dbus/MAINTAINERS3
-rw-r--r--vendor/github.com/godbus/dbus/README.markdown2
-rw-r--r--vendor/github.com/godbus/dbus/auth.go1
-rw-r--r--vendor/github.com/godbus/dbus/auth_anonymous.go16
-rw-r--r--vendor/github.com/godbus/dbus/call.go26
-rw-r--r--vendor/github.com/godbus/dbus/conn.go616
-rw-r--r--vendor/github.com/godbus/dbus/conn_darwin.go4
-rw-r--r--vendor/github.com/godbus/dbus/conn_other.go63
-rw-r--r--vendor/github.com/godbus/dbus/conn_unix.go18
-rw-r--r--vendor/github.com/godbus/dbus/conn_windows.go15
-rw-r--r--vendor/github.com/godbus/dbus/decoder.go9
-rw-r--r--vendor/github.com/godbus/dbus/default_handler.go52
-rw-r--r--vendor/github.com/godbus/dbus/export.go17
-rw-r--r--vendor/github.com/godbus/dbus/go.mod1
-rw-r--r--vendor/github.com/godbus/dbus/object.go118
-rw-r--r--vendor/github.com/godbus/dbus/server_interfaces.go10
-rw-r--r--vendor/github.com/godbus/dbus/transport_generic.go2
-rw-r--r--vendor/github.com/godbus/dbus/transport_nonce_tcp.go39
-rw-r--r--vendor/github.com/godbus/dbus/transport_unix.go30
-rw-r--r--vendor/github.com/gogo/protobuf/AUTHORS15
-rw-r--r--vendor/github.com/gogo/protobuf/CONTRIBUTORS23
-rw-r--r--vendor/github.com/gogo/protobuf/README298
-rw-r--r--vendor/github.com/gogo/protobuf/Readme.md159
-rw-r--r--vendor/github.com/gogo/protobuf/gogoproto/doc.go169
-rw-r--r--vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go872
-rw-r--r--vendor/github.com/gogo/protobuf/gogoproto/gogo.proto144
-rw-r--r--vendor/github.com/gogo/protobuf/gogoproto/helper.go415
-rw-r--r--vendor/github.com/gogo/protobuf/proto/Makefile43
-rw-r--r--vendor/github.com/gogo/protobuf/proto/decode.go1
-rw-r--r--vendor/github.com/gogo/protobuf/proto/deprecated.go (renamed from vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto)55
-rw-r--r--vendor/github.com/gogo/protobuf/proto/extensions.go2
-rw-r--r--vendor/github.com/gogo/protobuf/proto/lib.go20
-rw-r--r--vendor/github.com/gogo/protobuf/proto/message_set.go137
-rw-r--r--vendor/github.com/gogo/protobuf/proto/properties.go9
-rw-r--r--vendor/github.com/gogo/protobuf/proto/table_marshal.go2
-rw-r--r--vendor/github.com/gogo/protobuf/proto/table_unmarshal.go4
-rw-r--r--vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto154
-rw-r--r--vendor/github.com/gogo/protobuf/protobuf/google/protobuf/api.proto210
-rw-r--r--vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto167
-rw-r--r--vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto872
-rw-r--r--vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto117
-rw-r--r--vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto252
-rw-r--r--vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto96
-rw-r--r--vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto135
-rw-r--r--vendor/github.com/gogo/protobuf/protobuf/google/protobuf/type.proto187
-rw-r--r--vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto118
-rw-r--r--vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go118
-rw-r--r--vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go2806
-rw-r--r--vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go744
-rw-r--r--vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go390
-rw-r--r--vendor/github.com/gogo/protobuf/test/issue270/a/a1.proto12
-rw-r--r--vendor/github.com/gogo/protobuf/test/issue270/a/a2.proto12
-rw-r--r--vendor/github.com/gogo/protobuf/test/issue270/b/b.proto6
-rw-r--r--vendor/github.com/golang/glog/README44
-rw-r--r--vendor/github.com/golang/protobuf/AUTHORS3
-rw-r--r--vendor/github.com/golang/protobuf/CONTRIBUTORS3
-rw-r--r--vendor/github.com/golang/protobuf/README.md281
-rw-r--r--vendor/github.com/golang/protobuf/proto/decode.go1
-rw-r--r--vendor/github.com/golang/protobuf/proto/deprecated.go (renamed from vendor/github.com/gogo/protobuf/protobuf/google/protobuf/source_context.proto)49
-rw-r--r--vendor/github.com/golang/protobuf/proto/equal.go3
-rw-r--r--vendor/github.com/golang/protobuf/proto/extensions.go78
-rw-r--r--vendor/github.com/golang/protobuf/proto/lib.go38
-rw-r--r--vendor/github.com/golang/protobuf/proto/message_set.go137
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_reflect.go5
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_unsafe.go15
-rw-r--r--vendor/github.com/golang/protobuf/proto/properties.go31
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_marshal.go45
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_unmarshal.go72
-rw-r--r--vendor/github.com/google/gofuzz/.travis.yml13
-rw-r--r--vendor/github.com/google/gofuzz/CONTRIBUTING.md67
-rw-r--r--vendor/github.com/google/gofuzz/go.mod3
-rw-r--r--vendor/github.com/gorilla/context/README.md10
-rw-r--r--vendor/github.com/gorilla/context/context.go143
-rw-r--r--vendor/github.com/gorilla/context/doc.go88
-rw-r--r--vendor/github.com/gorilla/mux/.travis.yml24
-rw-r--r--vendor/github.com/gorilla/mux/AUTHORS8
-rw-r--r--vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md11
-rw-r--r--vendor/github.com/gorilla/mux/LICENSE2
-rw-r--r--vendor/github.com/gorilla/mux/README.md16
-rw-r--r--vendor/github.com/gorilla/mux/context.go (renamed from vendor/github.com/gorilla/mux/context_native.go)6
-rw-r--r--vendor/github.com/gorilla/mux/context_gorilla.go26
-rw-r--r--vendor/github.com/gorilla/mux/go.mod1
-rw-r--r--vendor/github.com/gorilla/mux/mux.go129
-rw-r--r--vendor/github.com/gorilla/mux/regexp.go51
-rw-r--r--vendor/github.com/gorilla/mux/route.go141
-rw-r--r--vendor/github.com/hashicorp/go-multierror/.travis.yml12
-rw-r--r--vendor/github.com/hashicorp/go-multierror/Makefile31
-rw-r--r--vendor/github.com/hashicorp/go-multierror/go.sum4
-rw-r--r--vendor/github.com/hpcloud/tail/.gitignore3
-rw-r--r--vendor/github.com/hpcloud/tail/.travis.yml18
-rw-r--r--vendor/github.com/hpcloud/tail/CHANGES.md63
-rw-r--r--vendor/github.com/hpcloud/tail/Dockerfile19
-rw-r--r--vendor/github.com/hpcloud/tail/Makefile11
-rw-r--r--vendor/github.com/hpcloud/tail/appveyor.yml11
-rw-r--r--vendor/github.com/hpcloud/tail/ratelimiter/Licence7
-rw-r--r--vendor/github.com/ijc/Gotty/LICENSE (renamed from vendor/github.com/Nvveen/Gotty/LICENSE)0
-rw-r--r--vendor/github.com/ijc/Gotty/README (renamed from vendor/github.com/Nvveen/Gotty/README)0
-rw-r--r--vendor/github.com/ijc/Gotty/TODO3
-rw-r--r--vendor/github.com/ijc/Gotty/attributes.go (renamed from vendor/github.com/Nvveen/Gotty/attributes.go)0
-rw-r--r--vendor/github.com/ijc/Gotty/gotty.go (renamed from vendor/github.com/Nvveen/Gotty/gotty.go)56
-rw-r--r--vendor/github.com/ijc/Gotty/parser.go (renamed from vendor/github.com/Nvveen/Gotty/parser.go)0
-rw-r--r--vendor/github.com/ijc/Gotty/types.go (renamed from vendor/github.com/Nvveen/Gotty/types.go)0
-rw-r--r--vendor/github.com/imdario/mergo/.gitignore33
-rw-r--r--vendor/github.com/imdario/mergo/.travis.yml7
-rw-r--r--vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md46
-rw-r--r--vendor/github.com/imdario/mergo/README.md18
-rw-r--r--vendor/github.com/imdario/mergo/map.go1
-rw-r--r--vendor/github.com/imdario/mergo/merge.go19
-rw-r--r--vendor/github.com/ishidawataru/sctp/.gitignore14
-rw-r--r--vendor/github.com/ishidawataru/sctp/.travis.yml17
-rw-r--r--vendor/github.com/json-iterator/go/.codecov.yml3
-rw-r--r--vendor/github.com/json-iterator/go/.gitignore4
-rw-r--r--vendor/github.com/json-iterator/go/.travis.yml14
-rw-r--r--vendor/github.com/json-iterator/go/Gopkg.lock21
-rw-r--r--vendor/github.com/json-iterator/go/Gopkg.toml26
-rw-r--r--vendor/github.com/json-iterator/go/README.md4
-rw-r--r--vendor/github.com/json-iterator/go/any.go4
-rw-r--r--vendor/github.com/json-iterator/go/build.sh12
-rw-r--r--vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md7
-rw-r--r--vendor/github.com/json-iterator/go/iter_float.go20
-rw-r--r--vendor/github.com/json-iterator/go/iter_skip_strict.go14
-rw-r--r--vendor/github.com/json-iterator/go/reflect_extension.go2
-rw-r--r--vendor/github.com/json-iterator/go/reflect_map.go16
-rw-r--r--vendor/github.com/json-iterator/go/reflect_marshaler.go3
-rw-r--r--vendor/github.com/json-iterator/go/test.sh12
-rw-r--r--vendor/github.com/klauspost/compress/README.md160
-rw-r--r--vendor/github.com/klauspost/compress/flate/copy.go32
-rw-r--r--vendor/github.com/klauspost/compress/flate/deflate.go551
-rw-r--r--vendor/github.com/klauspost/compress/flate/gen.go265
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go138
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_code.go5
-rw-r--r--vendor/github.com/klauspost/compress/flate/snappy.go4
-rw-r--r--vendor/github.com/klauspost/compress/flate/token.go27
-rw-r--r--vendor/github.com/klauspost/cpuid/.gitignore24
-rw-r--r--vendor/github.com/klauspost/cpuid/.travis.yml23
-rw-r--r--vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt35
-rw-r--r--vendor/github.com/klauspost/cpuid/README.md2
-rw-r--r--vendor/github.com/klauspost/cpuid/cpuid.go17
-rw-r--r--vendor/github.com/klauspost/cpuid/private-gen.go476
-rw-r--r--vendor/github.com/klauspost/pgzip/.gitignore24
-rw-r--r--vendor/github.com/klauspost/pgzip/.travis.yml21
-rw-r--r--vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE9
-rw-r--r--vendor/github.com/konsorten/go-windows-terminal-sequences/README.md41
-rw-r--r--vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod1
-rw-r--r--vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go36
-rw-r--r--vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go11
-rw-r--r--vendor/github.com/mattn/go-isatty/.travis.yml13
-rw-r--r--vendor/github.com/mattn/go-isatty/go.mod3
-rw-r--r--vendor/github.com/mattn/go-isatty/go.sum2
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_android.go (renamed from vendor/github.com/mattn/go-isatty/isatty_linux.go)9
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_appengine.go15
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_bsd.go6
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go19
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_others.go11
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_solaris.go6
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_tcgets.go19
-rw-r--r--vendor/github.com/mattn/go-shellwords/.travis.yml8
-rw-r--r--vendor/github.com/mattn/go-shellwords/LICENSE21
-rw-r--r--vendor/github.com/mattn/go-shellwords/README.md47
-rw-r--r--vendor/github.com/mattn/go-shellwords/go.mod1
-rw-r--r--vendor/github.com/mattn/go-shellwords/shellwords.go195
-rw-r--r--vendor/github.com/mattn/go-shellwords/util_go15.go24
-rw-r--r--vendor/github.com/mattn/go-shellwords/util_posix.go22
-rw-r--r--vendor/github.com/mattn/go-shellwords/util_windows.go22
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE201
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE1
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore1
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile7
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go75
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go16
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go46
-rw-r--r--vendor/github.com/mistifyio/go-zfs/.gitignore1
-rw-r--r--vendor/github.com/mistifyio/go-zfs/CONTRIBUTING.md60
-rw-r--r--vendor/github.com/mistifyio/go-zfs/Vagrantfile34
-rw-r--r--vendor/github.com/modern-go/concurrent/.gitignore1
-rw-r--r--vendor/github.com/modern-go/concurrent/.travis.yml14
-rw-r--r--vendor/github.com/modern-go/concurrent/test.sh12
-rw-r--r--vendor/github.com/modern-go/reflect2/.gitignore2
-rw-r--r--vendor/github.com/modern-go/reflect2/.travis.yml15
-rw-r--r--vendor/github.com/modern-go/reflect2/Gopkg.lock15
-rw-r--r--vendor/github.com/modern-go/reflect2/Gopkg.toml35
-rw-r--r--vendor/github.com/modern-go/reflect2/test.sh12
-rw-r--r--vendor/github.com/mrunalp/fileutils/.gitignore1
-rw-r--r--vendor/github.com/mrunalp/fileutils/MAINTAINERS1
-rw-r--r--vendor/github.com/mtrmac/gpgme/.gitignore1
-rw-r--r--vendor/github.com/onsi/ginkgo/.travis.yml8
-rw-r--r--vendor/github.com/onsi/ginkgo/CHANGELOG.md11
-rw-r--r--vendor/github.com/onsi/ginkgo/config/config.go2
-rw-r--r--vendor/github.com/onsi/ginkgo/extensions/table/table_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/extensions/table/table_test.go64
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go14
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot_suite_test.go92
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot_test.go82
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go2
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go2
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner_test.go60
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/testsuite/testsuite_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/testsuite/testsuite_test.go212
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go15_test.go201
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo_dsl.go4
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/first_package/coverage.go21
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/first_package/coverage_fixture_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/first_package/coverage_fixture_test.go31
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/first_package/external_coverage_fixture/external_coverage.go9
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/second_package/coverage.go21
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/second_package/coverage_fixture_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/second_package/coverage_fixture_test.go29
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/extra_functions_test.go14
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/nested/nested_test.go10
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/nested_without_gofiles/subpackage/nested_subpackage_test.go9
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/outside_package_test.go16
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/xunit_test.go41
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/extra_functions_test.go17
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/fixtures_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_subpackage_test.go11
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/outside_package_test.go19
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/xunit_test.go44
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage.go25
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage_fixture_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage_fixture_test.go31
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture/external_coverage.go9
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/debug_parallel_fixture/debug_parallel_fixture_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/debug_parallel_fixture/debug_parallel_fixture_test.go18
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/does_not_compile/does_not_compile_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/does_not_compile/does_not_compile_test.go11
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/eventually_failing/eventually_failing_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/eventually_failing/eventually_failing_test.go29
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/exiting_synchronized_setup_tests/exiting_synchronized_setup_tests_suite_test.go35
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/fail_fixture/fail_fixture_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/fail_fixture/fail_fixture_test.go103
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_after_suite/failing_after_suite_suite_test.go22
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_after_suite/failing_after_suite_test.go15
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_before_suite/failing_before_suite_suite_test.go22
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_before_suite/failing_before_suite_test.go15
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests.go5
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests_test.go17
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags.go9
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags_test.go97
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture/README.md1
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture/focused_fixture_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture/focused_fixture_test.go73
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture_with_vendor/focused_fixture_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture_with_vendor/focused_fixture_test.go73
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/hanging_suite/hanging_suite_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/hanging_suite/hanging_suite_test.go30
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests.go5
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests_test.go17
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/no_test_fn/no_test_fn.go5
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/no_test_fn/no_test_fn_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/no_tests/no_tests.go4
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests.go9
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests_test.go30
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/passing_suite_setup/passing_suite_setup_suite_test.go26
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/passing_suite_setup/passing_suite_test.go28
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/progress_fixture/progress_fixture_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/progress_fixture/progress_fixture_test.go49
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/skip_fixture/skip_fixture_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/skip_fixture/skip_fixture_test.go71
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/suite_command_tests/suite_command.go9
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/suite_command_tests/suite_command_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/suite_command_tests/suite_command_test.go18
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/synchronized_setup_tests/synchronized_setup_tests_suite_test.go43
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/ignored_test.go17
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/tags_tests_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/tags_tests_test.go11
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/test_description/test_description_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/test_description/test_description_test.go23
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A.go7
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A_test.go14
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B.go7
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B_test.go14
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C.go5
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C.json3
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C_test.go14
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D.go7
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D_test.go14
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/xunit_tests/xunit_tests.go5
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/_fixtures/xunit_tests/xunit_tests_test.go11
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/convert_test.go121
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/coverage_test.go147
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/fail_test.go55
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/flags_test.go237
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/integration.go1
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/integration_suite_test.go129
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/interrupt_test.go51
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/precompiled_test.go53
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/progress_test.go94
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/run_test.go483
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/skip_test.go43
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/subcommand_test.go419
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/suite_command_test.go63
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/suite_setup_test.go178
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/tags_test.go27
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/test_description_test.go25
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/verbose_and_succinct_test.go90
-rw-r--r--vendor/github.com/onsi/ginkgo/integration/watch_test.go275
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_test.go80
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/containernode/container_node_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/containernode/container_node_test.go213
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/failer/failer_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/failer/failer_test.go141
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node_test.go22
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/leaf_node_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node_test.go155
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes_test.go48
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go353
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes_test.go230
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node_test.go199
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node_test.go446
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/aggregator_test.go315
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/fake_output_interceptor_test.go22
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/fake_poster_test.go33
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go181
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/remote_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/server_test.go269
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec/spec_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec/spec_test.go739
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec/specs.go45
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec/specs_test.go287
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer_test.go149
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go112
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator_test.go64
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator_test.go62
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_test.go785
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/suite/suite_suite_test.go35
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/suite/suite_test.go385
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/writer/writer_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/writer/writer_test.go75
-rw-r--r--vendor/github.com/onsi/ginkgo/reporters/default_reporter_test.go433
-rw-r--r--vendor/github.com/onsi/ginkgo/reporters/junit_reporter_test.go258
-rw-r--r--vendor/github.com/onsi/ginkgo/reporters/reporters_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/reporters/stenographer/support/README.md6
-rw-r--r--vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter_test.go214
-rw-r--r--vendor/github.com/onsi/ginkgo/types/types_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/types/types_test.go99
-rw-r--r--vendor/github.com/onsi/gomega/.travis.yml10
-rw-r--r--vendor/github.com/onsi/gomega/CHANGELOG.md11
-rw-r--r--vendor/github.com/onsi/gomega/Makefile6
-rw-r--r--vendor/github.com/onsi/gomega/format/format_suite_test.go13
-rw-r--r--vendor/github.com/onsi/gomega/format/format_test.go627
-rw-r--r--vendor/github.com/onsi/gomega/gbytes/buffer_test.go205
-rw-r--r--vendor/github.com/onsi/gomega/gbytes/gbuffer_suite_test.go13
-rw-r--r--vendor/github.com/onsi/gomega/gbytes/io_wrappers_test.go188
-rw-r--r--vendor/github.com/onsi/gomega/gbytes/say_matcher_test.go169
-rw-r--r--vendor/github.com/onsi/gomega/gexec/_fixture/firefly/main.go36
-rw-r--r--vendor/github.com/onsi/gomega/gexec/build_test.go112
-rw-r--r--vendor/github.com/onsi/gomega/gexec/exit_matcher_test.go114
-rw-r--r--vendor/github.com/onsi/gomega/gexec/gexec_suite_test.go26
-rw-r--r--vendor/github.com/onsi/gomega/gexec/prefixed_writer_test.go43
-rw-r--r--vendor/github.com/onsi/gomega/gexec/session_test.go336
-rw-r--r--vendor/github.com/onsi/gomega/ghttp/handlers.go322
-rw-r--r--vendor/github.com/onsi/gomega/ghttp/protobuf/protobuf.go3
-rw-r--r--vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.pb.go55
-rw-r--r--vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.proto9
-rw-r--r--vendor/github.com/onsi/gomega/ghttp/test_server.go422
-rw-r--r--vendor/github.com/onsi/gomega/ghttp/test_server_suite_test.go13
-rw-r--r--vendor/github.com/onsi/gomega/ghttp/test_server_test.go1129
-rw-r--r--vendor/github.com/onsi/gomega/gomega_dsl.go282
-rw-r--r--vendor/github.com/onsi/gomega/gstruct/elements.go159
-rw-r--r--vendor/github.com/onsi/gomega/gstruct/elements_test.go144
-rw-r--r--vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go72
-rw-r--r--vendor/github.com/onsi/gomega/gstruct/fields.go168
-rw-r--r--vendor/github.com/onsi/gomega/gstruct/fields_test.go76
-rw-r--r--vendor/github.com/onsi/gomega/gstruct/gstruct_tests_suite_test.go13
-rw-r--r--vendor/github.com/onsi/gomega/gstruct/ignore.go37
-rw-r--r--vendor/github.com/onsi/gomega/gstruct/ignore_test.go23
-rw-r--r--vendor/github.com/onsi/gomega/gstruct/pointer.go56
-rw-r--r--vendor/github.com/onsi/gomega/gstruct/pointer_test.go33
-rw-r--r--vendor/github.com/onsi/gomega/gstruct/types.go15
-rw-r--r--vendor/github.com/onsi/gomega/internal/assertion/assertion_suite_test.go13
-rw-r--r--vendor/github.com/onsi/gomega/internal/assertion/assertion_test.go258
-rw-r--r--vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_suite_test.go13
-rw-r--r--vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_test.go351
-rw-r--r--vendor/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go23
-rw-r--r--vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go92
-rw-r--r--vendor/github.com/onsi/gomega/matchers/and_test.go103
-rw-r--r--vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher_test.go46
-rw-r--r--vendor/github.com/onsi/gomega/matchers/be_a_directory_test.go40
-rw-r--r--vendor/github.com/onsi/gomega/matchers/be_a_regular_file_test.go40
-rw-r--r--vendor/github.com/onsi/gomega/matchers/be_an_existing_file_test.go40
-rw-r--r--vendor/github.com/onsi/gomega/matchers/be_closed_matcher_test.go70
-rw-r--r--vendor/github.com/onsi/gomega/matchers/be_empty_matcher_test.go52
-rw-r--r--vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher_test.go50
-rw-r--r--vendor/github.com/onsi/gomega/matchers/be_false_matcher_test.go20
-rw-r--r--vendor/github.com/onsi/gomega/matchers/be_identical_to_test.go61
-rw-r--r--vendor/github.com/onsi/gomega/matchers/be_nil_matcher_test.go28
-rw-r--r--vendor/github.com/onsi/gomega/matchers/be_numerically_matcher_test.go172
-rw-r--r--vendor/github.com/onsi/gomega/matchers/be_sent_matcher_test.go107
-rw-r--r--vendor/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go99
-rw-r--r--vendor/github.com/onsi/gomega/matchers/be_true_matcher_test.go20
-rw-r--r--vendor/github.com/onsi/gomega/matchers/be_zero_matcher_test.go40
-rw-r--r--vendor/github.com/onsi/gomega/matchers/consist_of_test.go75
-rw-r--r--vendor/github.com/onsi/gomega/matchers/contain_element_matcher_test.go76
-rw-r--r--vendor/github.com/onsi/gomega/matchers/contain_substring_matcher_test.go36
-rw-r--r--vendor/github.com/onsi/gomega/matchers/equal_matcher_test.go80
-rw-r--r--vendor/github.com/onsi/gomega/matchers/have_cap_matcher_test.go50
-rw-r--r--vendor/github.com/onsi/gomega/matchers/have_key_matcher_test.go73
-rw-r--r--vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher_test.go82
-rw-r--r--vendor/github.com/onsi/gomega/matchers/have_len_matcher_test.go53
-rw-r--r--vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go2
-rw-r--r--vendor/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go59
-rw-r--r--vendor/github.com/onsi/gomega/matchers/have_prefix_matcher_test.go36
-rw-r--r--vendor/github.com/onsi/gomega/matchers/have_suffix_matcher_test.go36
-rw-r--r--vendor/github.com/onsi/gomega/matchers/match_error_matcher_test.go107
-rw-r--r--vendor/github.com/onsi/gomega/matchers/match_json_matcher_test.go103
-rw-r--r--vendor/github.com/onsi/gomega/matchers/match_regexp_matcher_test.go44
-rw-r--r--vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go4
-rw-r--r--vendor/github.com/onsi/gomega/matchers/match_xml_matcher_test.go97
-rw-r--r--vendor/github.com/onsi/gomega/matchers/match_yaml_matcher_test.go101
-rw-r--r--vendor/github.com/onsi/gomega/matchers/matcher_tests_suite_test.go50
-rw-r--r--vendor/github.com/onsi/gomega/matchers/not_test.go64
-rw-r--r--vendor/github.com/onsi/gomega/matchers/or_test.go92
-rw-r--r--vendor/github.com/onsi/gomega/matchers/panic_matcher_test.go52
-rw-r--r--vendor/github.com/onsi/gomega/matchers/receive_matcher_test.go304
-rw-r--r--vendor/github.com/onsi/gomega/matchers/succeed_matcher_test.go72
-rw-r--r--vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_01.xml6
-rw-r--r--vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_02.xml9
-rw-r--r--vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_03.xml1
-rw-r--r--vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_04.xml6
-rw-r--r--vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_05.xml211
-rw-r--r--vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_06.xml13
-rw-r--r--vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_07.xml13
-rw-r--r--vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_08.xml13
-rw-r--r--vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_09.xml4
-rw-r--r--vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_10.xml4
-rw-r--r--vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_11.xml7
-rw-r--r--vendor/github.com/onsi/gomega/matchers/with_transform_test.go102
-rw-r--r--vendor/github.com/opencontainers/go-digest/.mailmap1
-rw-r--r--vendor/github.com/opencontainers/go-digest/.pullapprove.yml12
-rw-r--r--vendor/github.com/opencontainers/go-digest/.travis.yml4
-rw-r--r--vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md72
-rw-r--r--vendor/github.com/opencontainers/go-digest/LICENSE.code (renamed from vendor/github.com/opencontainers/go-digest/LICENSE)0
-rw-r--r--vendor/github.com/opencontainers/go-digest/MAINTAINERS9
-rw-r--r--vendor/github.com/opencontainers/go-digest/README.md2
-rw-r--r--vendor/github.com/opencontainers/image-spec/README.md167
-rw-r--r--vendor/github.com/opencontainers/image-spec/specs-go/version.go2
-rw-r--r--vendor/github.com/opencontainers/runc/README.md270
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/README.md330
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md44
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/nsenter/namespace.h32
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go12
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go25
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go5
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c995
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS2
-rw-r--r--vendor/github.com/opencontainers/runc/vendor.conf25
-rw-r--r--vendor/github.com/opencontainers/runtime-spec/README.md153
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/README.md128
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/generate/config.go35
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/generate/generate.go225
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/validate/validate_linux.go7
-rw-r--r--vendor/github.com/opencontainers/selinux/README.md7
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/label/label.go8
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go11
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go23
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go11
-rw-r--r--vendor/github.com/openshift/imagebuilder/.gitignore1
-rw-r--r--vendor/github.com/openshift/imagebuilder/.travis.yml16
-rw-r--r--vendor/github.com/openshift/imagebuilder/Makefile11
-rw-r--r--vendor/github.com/openshift/imagebuilder/OWNERS8
-rw-r--r--vendor/github.com/openshift/imagebuilder/imagebuilder.spec63
-rw-r--r--vendor/github.com/opentracing/opentracing-go/.gitignore1
-rw-r--r--vendor/github.com/opentracing/opentracing-go/.travis.yml20
-rw-r--r--vendor/github.com/opentracing/opentracing-go/CHANGELOG.md46
-rw-r--r--vendor/github.com/opentracing/opentracing-go/Makefile20
-rw-r--r--vendor/github.com/opentracing/opentracing-go/gocontext.go12
-rw-r--r--vendor/github.com/opentracing/opentracing-go/tracer.go3
-rw-r--r--vendor/github.com/ostreedev/ostree-go/README.md4
-rw-r--r--vendor/github.com/pkg/errors/.gitignore24
-rw-r--r--vendor/github.com/pkg/errors/.travis.yml15
-rw-r--r--vendor/github.com/pkg/errors/appveyor.yml32
-rw-r--r--vendor/github.com/pkg/profile/.travis.yml10
-rw-r--r--vendor/github.com/pkg/profile/AUTHORS1
-rw-r--r--vendor/github.com/pkg/profile/profile.go25
-rw-r--r--vendor/github.com/pmezard/go-difflib/README.md50
-rw-r--r--vendor/github.com/pquerna/ffjson/README.md232
-rw-r--r--vendor/github.com/prometheus/client_golang/LICENSE (renamed from vendor/github.com/containerd/cgroups/LICENSE)0
-rw-r--r--vendor/github.com/prometheus/client_golang/NOTICE23
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/.gitignore1
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/README.md1
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/build_info.go29
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go22
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/collector.go120
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/counter.go277
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/desc.go184
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/doc.go200
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go119
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/fnv.go42
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/gauge.go286
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/go_collector.go396
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/histogram.go586
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go85
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/labels.go87
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/metric.go174
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/observer.go52
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/process_collector.go151
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go65
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go112
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go357
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go349
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go219
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go447
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/registry.go945
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/summary.go736
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/timer.go54
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/untyped.go42
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/value.go162
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/vec.go472
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/wrap.go200
-rw-r--r--vendor/github.com/prometheus/client_model/LICENSE201
-rw-r--r--vendor/github.com/prometheus/client_model/NOTICE5
-rw-r--r--vendor/github.com/prometheus/client_model/go/metrics.pb.go629
-rw-r--r--vendor/github.com/prometheus/common/LICENSE201
-rw-r--r--vendor/github.com/prometheus/common/NOTICE5
-rw-r--r--vendor/github.com/prometheus/common/expfmt/decode.go429
-rw-r--r--vendor/github.com/prometheus/common/expfmt/encode.go88
-rw-r--r--vendor/github.com/prometheus/common/expfmt/expfmt.go38
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz.go36
-rw-r--r--vendor/github.com/prometheus/common/expfmt/text_create.go468
-rw-r--r--vendor/github.com/prometheus/common/expfmt/text_parse.go757
-rw-r--r--vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt67
-rw-r--r--vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go162
-rw-r--r--vendor/github.com/prometheus/common/model/alert.go136
-rw-r--r--vendor/github.com/prometheus/common/model/fingerprinting.go105
-rw-r--r--vendor/github.com/prometheus/common/model/fnv.go42
-rw-r--r--vendor/github.com/prometheus/common/model/labels.go210
-rw-r--r--vendor/github.com/prometheus/common/model/labelset.go169
-rw-r--r--vendor/github.com/prometheus/common/model/metric.go102
-rw-r--r--vendor/github.com/prometheus/common/model/model.go16
-rw-r--r--vendor/github.com/prometheus/common/model/signature.go144
-rw-r--r--vendor/github.com/prometheus/common/model/silence.go106
-rw-r--r--vendor/github.com/prometheus/common/model/time.go270
-rw-r--r--vendor/github.com/prometheus/common/model/value.go416
-rw-r--r--vendor/github.com/prometheus/procfs/.gitignore1
-rw-r--r--vendor/github.com/prometheus/procfs/.golangci.yml6
-rw-r--r--vendor/github.com/prometheus/procfs/CONTRIBUTING.md18
-rw-r--r--vendor/github.com/prometheus/procfs/LICENSE201
-rw-r--r--vendor/github.com/prometheus/procfs/MAINTAINERS.md2
-rw-r--r--vendor/github.com/prometheus/procfs/Makefile29
-rw-r--r--vendor/github.com/prometheus/procfs/Makefile.common272
-rw-r--r--vendor/github.com/prometheus/procfs/NOTICE7
-rw-r--r--vendor/github.com/prometheus/procfs/README.md53
-rw-r--r--vendor/github.com/prometheus/procfs/buddyinfo.go85
-rw-r--r--vendor/github.com/prometheus/procfs/doc.go45
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures.ttar1808
-rw-r--r--vendor/github.com/prometheus/procfs/fs.go43
-rw-r--r--vendor/github.com/prometheus/procfs/go.mod3
-rw-r--r--vendor/github.com/prometheus/procfs/go.sum2
-rw-r--r--vendor/github.com/prometheus/procfs/internal/fs/fs.go52
-rw-r--r--vendor/github.com/prometheus/procfs/ipvs.go239
-rw-r--r--vendor/github.com/prometheus/procfs/mdstat.go151
-rw-r--r--vendor/github.com/prometheus/procfs/mountstats.go621
-rw-r--r--vendor/github.com/prometheus/procfs/net_dev.go206
-rw-r--r--vendor/github.com/prometheus/procfs/net_unix.go275
-rw-r--r--vendor/github.com/prometheus/procfs/proc.go267
-rw-r--r--vendor/github.com/prometheus/procfs/proc_io.go65
-rw-r--r--vendor/github.com/prometheus/procfs/proc_limits.go157
-rw-r--r--vendor/github.com/prometheus/procfs/proc_ns.go68
-rw-r--r--vendor/github.com/prometheus/procfs/proc_psi.go101
-rw-r--r--vendor/github.com/prometheus/procfs/proc_stat.go198
-rw-r--r--vendor/github.com/prometheus/procfs/proc_status.go162
-rw-r--r--vendor/github.com/prometheus/procfs/stat.go244
-rw-r--r--vendor/github.com/prometheus/procfs/ttar413
-rw-r--r--vendor/github.com/prometheus/procfs/xfrm.go187
-rw-r--r--vendor/github.com/seccomp/containers-golang/.gitignore2
-rw-r--r--vendor/github.com/seccomp/containers-golang/Makefile18
-rw-r--r--vendor/github.com/seccomp/containers-golang/seccomp.json773
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/.gitignore4
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/CHANGELOG17
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/Makefile26
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/SUBMITTING_PATCHES112
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/seccomp.go117
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go237
-rw-r--r--vendor/github.com/sirupsen/logrus/.gitignore2
-rw-r--r--vendor/github.com/sirupsen/logrus/.travis.yml25
-rw-r--r--vendor/github.com/sirupsen/logrus/CHANGELOG.md200
-rw-r--r--vendor/github.com/sirupsen/logrus/README.md124
-rw-r--r--vendor/github.com/sirupsen/logrus/alt_exit.go18
-rw-r--r--vendor/github.com/sirupsen/logrus/appveyor.yml14
-rw-r--r--vendor/github.com/sirupsen/logrus/entry.go300
-rw-r--r--vendor/github.com/sirupsen/logrus/exported.go68
-rw-r--r--vendor/github.com/sirupsen/logrus/formatter.go51
-rw-r--r--vendor/github.com/sirupsen/logrus/go.mod10
-rw-r--r--vendor/github.com/sirupsen/logrus/go.sum16
-rw-r--r--vendor/github.com/sirupsen/logrus/hooks/syslog/README.md8
-rw-r--r--vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go7
-rw-r--r--vendor/github.com/sirupsen/logrus/json_formatter.go79
-rw-r--r--vendor/github.com/sirupsen/logrus/logger.go250
-rw-r--r--vendor/github.com/sirupsen/logrus/logrus.go75
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_appengine.go10
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_bsd.go10
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_check_appengine.go11
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_check_bsd.go13
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go11
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go17
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_check_solaris.go11
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_check_unix.go13
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_check_windows.go34
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_linux.go14
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_notwindows.go28
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_solaris.go21
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_windows.go82
-rw-r--r--vendor/github.com/sirupsen/logrus/text_formatter.go236
-rw-r--r--vendor/github.com/sirupsen/logrus/writer.go2
-rw-r--r--vendor/github.com/spf13/cobra/.gitignore38
-rw-r--r--vendor/github.com/spf13/cobra/.mailmap3
-rw-r--r--vendor/github.com/spf13/cobra/.travis.yml31
-rw-r--r--vendor/github.com/spf13/cobra/README.md61
-rw-r--r--vendor/github.com/spf13/cobra/args.go12
-rw-r--r--vendor/github.com/spf13/cobra/bash_completions.go63
-rw-r--r--vendor/github.com/spf13/cobra/bash_completions.md256
-rw-r--r--vendor/github.com/spf13/cobra/cobra.go7
-rw-r--r--vendor/github.com/spf13/cobra/command.go103
-rw-r--r--vendor/github.com/spf13/cobra/command_win.go8
-rw-r--r--vendor/github.com/spf13/cobra/go.mod13
-rw-r--r--vendor/github.com/spf13/cobra/go.sum51
-rw-r--r--vendor/github.com/spf13/cobra/powershell_completions.go100
-rw-r--r--vendor/github.com/spf13/cobra/powershell_completions.md14
-rw-r--r--vendor/github.com/spf13/cobra/shell_completions.go85
-rw-r--r--vendor/github.com/spf13/cobra/zsh_completions.go358
-rw-r--r--vendor/github.com/spf13/cobra/zsh_completions.md39
-rw-r--r--vendor/github.com/spf13/pflag/.gitignore2
-rw-r--r--vendor/github.com/spf13/pflag/.travis.yml21
-rw-r--r--vendor/github.com/stretchr/testify/README.md340
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl5
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl5
-rw-r--r--vendor/github.com/stretchr/testify/go.mod7
-rw-r--r--vendor/github.com/stretchr/testify/require/require.go.tmpl6
-rw-r--r--vendor/github.com/stretchr/testify/require/require_forward.go.tmpl5
-rw-r--r--vendor/github.com/tchap/go-patricia/AUTHORS3
-rw-r--r--vendor/github.com/tchap/go-patricia/README.md123
-rw-r--r--vendor/github.com/tchap/go-patricia/patricia/children.go38
-rw-r--r--vendor/github.com/tchap/go-patricia/patricia/patricia.go12
-rw-r--r--vendor/github.com/uber/jaeger-client-go/.gitignore15
-rw-r--r--vendor/github.com/uber/jaeger-client-go/.gitmodules3
-rw-r--r--vendor/github.com/uber/jaeger-client-go/.travis.yml53
-rw-r--r--vendor/github.com/uber/jaeger-client-go/CHANGELOG.md210
-rw-r--r--vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md170
-rw-r--r--vendor/github.com/uber/jaeger-client-go/DCO37
-rw-r--r--vendor/github.com/uber/jaeger-client-go/Gopkg.lock223
-rw-r--r--vendor/github.com/uber/jaeger-client-go/Gopkg.toml31
-rw-r--r--vendor/github.com/uber/jaeger-client-go/Makefile123
-rw-r--r--vendor/github.com/uber/jaeger-client-go/RELEASE.md11
-rw-r--r--vendor/github.com/uber/jaeger-client-go/config/config.go1
-rw-r--r--vendor/github.com/uber/jaeger-client-go/config/options.go8
-rw-r--r--vendor/github.com/uber/jaeger-client-go/constants.go2
-rw-r--r--vendor/github.com/uber/jaeger-client-go/context.go2
-rw-r--r--vendor/github.com/uber/jaeger-client-go/glide.lock90
-rw-r--r--vendor/github.com/uber/jaeger-client-go/glide.yaml22
-rw-r--r--vendor/github.com/uber/jaeger-client-go/header.go3
-rw-r--r--vendor/github.com/uber/jaeger-client-go/propagation.go39
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go3
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go90
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift/.nocover0
-rw-r--r--vendor/github.com/uber/jaeger-client-go/tracer.go12
-rw-r--r--vendor/github.com/uber/jaeger-client-go/tracer_options.go4
-rw-r--r--vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go8
-rw-r--r--vendor/github.com/uber/jaeger-lib/README.md27
-rw-r--r--vendor/github.com/ulikunitz/xz/.gitignore25
-rw-r--r--vendor/github.com/ulikunitz/xz/TODO.md323
-rw-r--r--vendor/github.com/ulikunitz/xz/example.go40
-rw-r--r--vendor/github.com/ulikunitz/xz/fox.xzbin0 -> 104 bytes
-rw-r--r--vendor/github.com/ulikunitz/xz/go.mod1
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/fox.lzmabin0 -> 67 bytes
-rw-r--r--vendor/github.com/ulikunitz/xz/make-docs5
-rw-r--r--vendor/github.com/varlink/go/.gitignore2
-rw-r--r--vendor/github.com/varlink/go/.travis.yml25
-rw-r--r--vendor/github.com/varlink/go/Makefile7
-rw-r--r--vendor/github.com/varlink/go/README.md7
-rw-r--r--vendor/github.com/varlink/go/cmd/varlink-go-certification/main.go619
-rw-r--r--vendor/github.com/varlink/go/cmd/varlink-go-certification/orgvarlinkcertification/generate.go3
-rw-r--r--vendor/github.com/varlink/go/cmd/varlink-go-certification/orgvarlinkcertification/org.varlink.certification.varlink89
-rw-r--r--vendor/github.com/varlink/go/cmd/varlink-go-interface-generator/generator_test.go90
-rw-r--r--vendor/github.com/varlink/go/cmd/varlink-go-type-generator/main.go172
-rw-r--r--vendor/github.com/varlink/go/cmd/varlink/main.go295
-rw-r--r--vendor/github.com/varlink/go/golang-github-varlink-go.spec44
-rw-r--r--vendor/github.com/varlink/go/varlink/external_test.go211
-rw-r--r--vendor/github.com/varlink/go/varlink/idl/idl_test.go150
-rw-r--r--vendor/github.com/varlink/go/varlink/varlink_test.go258
-rw-r--r--vendor/github.com/vbatts/tar-split/README.md137
-rw-r--r--vendor/github.com/vbauerster/mpb/.gitignore5
-rw-r--r--vendor/github.com/vbauerster/mpb/.travis.yml14
-rw-r--r--vendor/github.com/vbauerster/mpb/README.md4
-rw-r--r--vendor/github.com/vbauerster/mpb/bar.go186
-rw-r--r--vendor/github.com/vbauerster/mpb/bar_filler.go111
-rw-r--r--vendor/github.com/vbauerster/mpb/bar_option.go151
-rw-r--r--vendor/github.com/vbauerster/mpb/cwriter/writer.go8
-rw-r--r--vendor/github.com/vbauerster/mpb/cwriter/writer_windows.go2
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/counters.go6
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/decorator.go30
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/elapsed.go4
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/eta.go15
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/moving-average.go9
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/speed.go3
-rw-r--r--vendor/github.com/vbauerster/mpb/go.test.sh12
-rw-r--r--vendor/github.com/vbauerster/mpb/internal/percentage.go4
-rw-r--r--vendor/github.com/vbauerster/mpb/internal/round.go49
-rw-r--r--vendor/github.com/vbauerster/mpb/options.go43
-rw-r--r--vendor/github.com/vbauerster/mpb/options_go1.7.go15
-rw-r--r--vendor/github.com/vbauerster/mpb/progress.go52
-rw-r--r--vendor/github.com/vbauerster/mpb/spinner_filler.go48
-rw-r--r--vendor/github.com/vishvananda/netlink/.travis.yml13
-rw-r--r--vendor/github.com/vishvananda/netlink/CHANGELOG.md5
-rw-r--r--vendor/github.com/vishvananda/netlink/Makefile30
-rw-r--r--vendor/github.com/xeipuuv/gojsonschema/.gitignore3
-rw-r--r--vendor/github.com/xeipuuv/gojsonschema/.travis.yml12
-rw-r--r--vendor/github.com/xeipuuv/gojsonschema/glide.yaml13
-rw-r--r--vendor/golang.org/x/crypto/AUTHORS3
-rw-r--r--vendor/golang.org/x/crypto/CONTRIBUTORS3
-rw-r--r--vendor/golang.org/x/crypto/README.md21
-rw-r--r--vendor/golang.org/x/crypto/cast5/cast5.go11
-rw-r--r--vendor/golang.org/x/crypto/curve25519/const_amd64.h8
-rw-r--r--vendor/golang.org/x/crypto/curve25519/const_amd64.s20
-rw-r--r--vendor/golang.org/x/crypto/curve25519/cswap_amd64.s65
-rw-r--r--vendor/golang.org/x/crypto/curve25519/curve25519.go834
-rw-r--r--vendor/golang.org/x/crypto/curve25519/doc.go23
-rw-r--r--vendor/golang.org/x/crypto/curve25519/freeze_amd64.s73
-rw-r--r--vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s1377
-rw-r--r--vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go240
-rw-r--r--vendor/golang.org/x/crypto/curve25519/mul_amd64.s169
-rw-r--r--vendor/golang.org/x/crypto/curve25519/square_amd64.s132
-rw-r--r--vendor/golang.org/x/crypto/openpgp/keys.go14
-rw-r--r--vendor/golang.org/x/crypto/openpgp/packet/private_key.go26
-rw-r--r--vendor/golang.org/x/crypto/otr/libotr_test_helper.c197
-rw-r--r--vendor/golang.org/x/crypto/otr/otr.go1415
-rw-r--r--vendor/golang.org/x/crypto/otr/smp.go572
-rw-r--r--vendor/golang.org/x/crypto/ssh/terminal/terminal.go71
-rw-r--r--vendor/golang.org/x/crypto/ssh/terminal/util_windows.go6
-rw-r--r--vendor/golang.org/x/crypto/ssh/test/doc.go7
-rw-r--r--vendor/golang.org/x/crypto/ssh/test/sshd_test_pw.c173
-rw-r--r--vendor/golang.org/x/net/AUTHORS3
-rw-r--r--vendor/golang.org/x/net/CONTRIBUTORS3
-rw-r--r--vendor/golang.org/x/net/README.md16
-rw-r--r--vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go71
-rw-r--r--vendor/golang.org/x/net/html/atom/gen.go712
-rw-r--r--vendor/golang.org/x/net/html/node.go2
-rw-r--r--vendor/golang.org/x/net/html/parse.go158
-rw-r--r--vendor/golang.org/x/net/http2/.gitignore2
-rw-r--r--vendor/golang.org/x/net/http2/Dockerfile51
-rw-r--r--vendor/golang.org/x/net/http2/Makefile3
-rw-r--r--vendor/golang.org/x/net/http2/frame.go2
-rw-r--r--vendor/golang.org/x/net/http2/server.go50
-rw-r--r--vendor/golang.org/x/net/http2/transport.go17
-rw-r--r--vendor/golang.org/x/net/idna/idna10.0.0.go (renamed from vendor/golang.org/x/net/idna/idna.go)8
-rw-r--r--vendor/golang.org/x/net/idna/idna9.0.0.go682
-rw-r--r--vendor/golang.org/x/net/idna/tables10.0.0.go (renamed from vendor/golang.org/x/net/idna/tables.go)6
-rw-r--r--vendor/golang.org/x/net/idna/tables11.0.0.go4653
-rw-r--r--vendor/golang.org/x/net/idna/tables9.0.0.go4486
-rw-r--r--vendor/golang.org/x/net/proxy/dial.go54
-rw-r--r--vendor/golang.org/x/net/proxy/direct.go15
-rw-r--r--vendor/golang.org/x/net/proxy/per_host.go15
-rw-r--r--vendor/golang.org/x/net/proxy/proxy.go33
-rw-r--r--vendor/golang.org/x/net/proxy/socks5.go10
-rw-r--r--vendor/golang.org/x/oauth2/.travis.yml13
-rw-r--r--vendor/golang.org/x/oauth2/AUTHORS3
-rw-r--r--vendor/golang.org/x/oauth2/CONTRIBUTING.md26
-rw-r--r--vendor/golang.org/x/oauth2/CONTRIBUTORS3
-rw-r--r--vendor/golang.org/x/oauth2/LICENSE (renamed from vendor/github.com/gorilla/context/LICENSE)8
-rw-r--r--vendor/golang.org/x/oauth2/README.md35
-rw-r--r--vendor/golang.org/x/oauth2/go.mod10
-rw-r--r--vendor/golang.org/x/oauth2/go.sum12
-rw-r--r--vendor/golang.org/x/oauth2/internal/client_appengine.go (renamed from vendor/golang.org/x/sys/cpu/cpu_mips64x.go)10
-rw-r--r--vendor/golang.org/x/oauth2/internal/doc.go6
-rw-r--r--vendor/golang.org/x/oauth2/internal/oauth2.go37
-rw-r--r--vendor/golang.org/x/oauth2/internal/token.go294
-rw-r--r--vendor/golang.org/x/oauth2/internal/transport.go33
-rw-r--r--vendor/golang.org/x/oauth2/oauth2.go381
-rw-r--r--vendor/golang.org/x/oauth2/token.go178
-rw-r--r--vendor/golang.org/x/oauth2/transport.go144
-rw-r--r--vendor/golang.org/x/sync/AUTHORS3
-rw-r--r--vendor/golang.org/x/sync/CONTRIBUTORS3
-rw-r--r--vendor/golang.org/x/sync/README.md18
-rw-r--r--vendor/golang.org/x/sync/semaphore/semaphore.go2
-rw-r--r--vendor/golang.org/x/sys/AUTHORS3
-rw-r--r--vendor/golang.org/x/sys/CONTRIBUTORS3
-rw-r--r--vendor/golang.org/x/sys/README.md18
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu.go87
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_arm.go9
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_arm64.go67
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_gc_x86.go16
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_gccgo.c43
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_gccgo.go26
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_linux.go61
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_mipsx.go11
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_ppc64x.go32
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_s390x.go9
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_x86.go55
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_x86.s27
-rw-r--r--vendor/golang.org/x/sys/unix/.gitignore2
-rw-r--r--vendor/golang.org/x/sys/unix/README.md16
-rw-r--r--vendor/golang.org/x/sys/unix/asm_linux_riscv64.s54
-rw-r--r--vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s29
-rw-r--r--vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s29
-rw-r--r--vendor/golang.org/x/sys/unix/mkall.sh227
-rw-r--r--vendor/golang.org/x/sys/unix/mkasm_darwin.go61
-rw-r--r--vendor/golang.org/x/sys/unix/mkerrors.sh663
-rw-r--r--vendor/golang.org/x/sys/unix/mkpost.go122
-rw-r--r--vendor/golang.org/x/sys/unix/mksyscall.go407
-rw-r--r--vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go415
-rw-r--r--vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go614
-rw-r--r--vendor/golang.org/x/sys/unix/mksyscall_solaris.go335
-rw-r--r--vendor/golang.org/x/sys/unix/mksysctl_openbsd.go355
-rw-r--r--vendor/golang.org/x/sys/unix/mksysnum.go190
-rw-r--r--vendor/golang.org/x/sys/unix/pledge_openbsd.go (renamed from vendor/golang.org/x/sys/unix/openbsd_pledge.go)3
-rw-r--r--vendor/golang.org/x/sys/unix/readdirent_getdents.go12
-rw-r--r--vendor/golang.org/x/sys/unix/readdirent_getdirentries.go19
-rw-r--r--vendor/golang.org/x/sys/unix/sockcmsg_unix.go15
-rw-r--r--vendor/golang.org/x/sys/unix/syscall.go1
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_aix.go34
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_aix_ppc.go16
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go47
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_bsd.go12
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_darwin.go18
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_dragonfly.go2
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_freebsd.go114
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux.go170
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_386.go1
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_amd64.go1
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_arm.go20
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_arm64.go14
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go1
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go1
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go1
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go17
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_s390x.go1
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go1
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_netbsd.go25
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go33
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_openbsd.go42
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go37
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_solaris.go1
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_unix.go54
-rw-r--r--vendor/golang.org/x/sys/unix/types_aix.go237
-rw-r--r--vendor/golang.org/x/sys/unix/types_darwin.go283
-rw-r--r--vendor/golang.org/x/sys/unix/types_dragonfly.go263
-rw-r--r--vendor/golang.org/x/sys/unix/types_freebsd.go400
-rw-r--r--vendor/golang.org/x/sys/unix/types_netbsd.go290
-rw-r--r--vendor/golang.org/x/sys/unix/types_openbsd.go283
-rw-r--r--vendor/golang.org/x/sys/unix/types_solaris.go266
-rw-r--r--vendor/golang.org/x/sys/unix/unveil_openbsd.go (renamed from vendor/golang.org/x/sys/unix/openbsd_unveil.go)2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go4
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_386.go237
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go237
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_arm.go237
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go237
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mips.go237
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go237
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go237
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go237
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go237
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go237
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go237
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go237
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go237
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go1762
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go1789
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go52
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go50
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go56
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go46
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s2
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go37
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go12
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go12
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go12
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go12
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_386.go128
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go128
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go153
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go143
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go128
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go128
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go128
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go128
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go128
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go128
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go123
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go128
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go128
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go1826
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go1692
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go4
-rw-r--r--vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go275
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go6
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go23
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go23
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go23
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go445
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_386.go800
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go4
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go736
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go5
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go770
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go4
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go4
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go770
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go5
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go18
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go20
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go274
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go217
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go45
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go50
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_darwin_386.go46
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go48
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go46
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go48
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go38
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go185
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go182
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go159
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go160
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_386.go786
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go786
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_arm.go786
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go786
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mips.go786
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go786
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go786
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go786
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go786
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go786
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go788
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go786
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go786
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go35
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go41
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go41
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go473
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go11
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go11
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go11
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go565
-rw-r--r--vendor/golang.org/x/sys/windows/dll_windows.go8
-rw-r--r--vendor/golang.org/x/sys/windows/env_windows.go34
-rw-r--r--vendor/golang.org/x/sys/windows/mkerrors.bash63
-rw-r--r--vendor/golang.org/x/sys/windows/mkknownfolderids.bash27
-rw-r--r--vendor/golang.org/x/sys/windows/mksyscall.go2
-rw-r--r--vendor/golang.org/x/sys/windows/registry/mksyscall.go2
-rw-r--r--vendor/golang.org/x/sys/windows/registry/value.go9
-rw-r--r--vendor/golang.org/x/sys/windows/security_windows.go402
-rw-r--r--vendor/golang.org/x/sys/windows/service.go70
-rw-r--r--vendor/golang.org/x/sys/windows/svc/event.go48
-rw-r--r--vendor/golang.org/x/sys/windows/svc/go12.c24
-rw-r--r--vendor/golang.org/x/sys/windows/svc/go12.go11
-rw-r--r--vendor/golang.org/x/sys/windows/svc/go13.go31
-rw-r--r--vendor/golang.org/x/sys/windows/svc/security.go62
-rw-r--r--vendor/golang.org/x/sys/windows/svc/service.go363
-rw-r--r--vendor/golang.org/x/sys/windows/svc/sys_386.s68
-rw-r--r--vendor/golang.org/x/sys/windows/svc/sys_amd64.s42
-rw-r--r--vendor/golang.org/x/sys/windows/svc/sys_arm.s38
-rw-r--r--vendor/golang.org/x/sys/windows/syscall_windows.go129
-rw-r--r--vendor/golang.org/x/sys/windows/types_windows.go284
-rw-r--r--vendor/golang.org/x/sys/windows/zerrors_windows.go6853
-rw-r--r--vendor/golang.org/x/sys/windows/zknownfolderids_windows.go149
-rw-r--r--vendor/golang.org/x/sys/windows/zsyscall_windows.go655
-rw-r--r--vendor/golang.org/x/text/AUTHORS3
-rw-r--r--vendor/golang.org/x/text/CONTRIBUTORS3
-rw-r--r--vendor/golang.org/x/text/README.md93
-rw-r--r--vendor/golang.org/x/text/encoding/charmap/maketables.go556
-rw-r--r--vendor/golang.org/x/text/encoding/htmlindex/gen.go173
-rw-r--r--vendor/golang.org/x/text/encoding/internal/identifier/gen.go142
-rw-r--r--vendor/golang.org/x/text/encoding/internal/identifier/mib.go94
-rw-r--r--vendor/golang.org/x/text/encoding/japanese/maketables.go161
-rw-r--r--vendor/golang.org/x/text/encoding/korean/maketables.go143
-rw-r--r--vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go161
-rw-r--r--vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go140
-rw-r--r--vendor/golang.org/x/text/go.mod3
-rw-r--r--vendor/golang.org/x/text/internal/language/compact/gen.go64
-rw-r--r--vendor/golang.org/x/text/internal/language/compact/gen_index.go113
-rw-r--r--vendor/golang.org/x/text/internal/language/compact/gen_parents.go54
-rw-r--r--vendor/golang.org/x/text/internal/language/gen.go1520
-rw-r--r--vendor/golang.org/x/text/internal/language/gen_common.go20
-rw-r--r--vendor/golang.org/x/text/language/gen.go305
-rw-r--r--vendor/golang.org/x/text/language/language.go2
-rw-r--r--vendor/golang.org/x/text/transform/transform.go2
-rw-r--r--vendor/golang.org/x/text/unicode/bidi/gen.go133
-rw-r--r--vendor/golang.org/x/text/unicode/bidi/gen_ranges.go57
-rw-r--r--vendor/golang.org/x/text/unicode/bidi/gen_trieval.go64
-rw-r--r--vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go2
-rw-r--r--vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go1887
-rw-r--r--vendor/golang.org/x/text/unicode/norm/maketables.go986
-rw-r--r--vendor/golang.org/x/text/unicode/norm/tables10.0.0.go2
-rw-r--r--vendor/golang.org/x/text/unicode/norm/tables11.0.0.go7693
-rw-r--r--vendor/golang.org/x/text/unicode/norm/triegen.go117
-rw-r--r--vendor/golang.org/x/time/AUTHORS3
-rw-r--r--vendor/golang.org/x/time/CONTRIBUTORS3
-rw-r--r--vendor/golang.org/x/time/README.md17
-rw-r--r--vendor/google.golang.org/appengine/LICENSE202
-rw-r--r--vendor/google.golang.org/appengine/internal/api.go675
-rw-r--r--vendor/google.golang.org/appengine/internal/api_classic.go169
-rw-r--r--vendor/google.golang.org/appengine/internal/api_common.go123
-rw-r--r--vendor/google.golang.org/appengine/internal/app_id.go28
-rw-r--r--vendor/google.golang.org/appengine/internal/base/api_base.pb.go308
-rw-r--r--vendor/google.golang.org/appengine/internal/base/api_base.proto33
-rw-r--r--vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go4367
-rw-r--r--vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto551
-rw-r--r--vendor/google.golang.org/appengine/internal/identity.go55
-rw-r--r--vendor/google.golang.org/appengine/internal/identity_classic.go61
-rw-r--r--vendor/google.golang.org/appengine/internal/identity_flex.go11
-rw-r--r--vendor/google.golang.org/appengine/internal/identity_vm.go134
-rw-r--r--vendor/google.golang.org/appengine/internal/internal.go110
-rw-r--r--vendor/google.golang.org/appengine/internal/log/log_service.pb.go1313
-rw-r--r--vendor/google.golang.org/appengine/internal/log/log_service.proto150
-rw-r--r--vendor/google.golang.org/appengine/internal/main.go16
-rw-r--r--vendor/google.golang.org/appengine/internal/main_common.go7
-rw-r--r--vendor/google.golang.org/appengine/internal/main_vm.go69
-rw-r--r--vendor/google.golang.org/appengine/internal/metadata.go60
-rw-r--r--vendor/google.golang.org/appengine/internal/net.go56
-rw-r--r--vendor/google.golang.org/appengine/internal/regen.sh40
-rw-r--r--vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go361
-rw-r--r--vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto44
-rw-r--r--vendor/google.golang.org/appengine/internal/transaction.go115
-rw-r--r--vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go527
-rw-r--r--vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto64
-rw-r--r--vendor/google.golang.org/appengine/urlfetch/urlfetch.go210
-rw-r--r--vendor/gopkg.in/fsnotify.v1/.github/ISSUE_TEMPLATE.md11
-rw-r--r--vendor/gopkg.in/fsnotify.v1/.github/PULL_REQUEST_TEMPLATE.md8
-rw-r--r--vendor/gopkg.in/fsnotify.v1/example_test.go42
-rw-r--r--vendor/gopkg.in/fsnotify.v1/fsnotify_test.go70
-rw-r--r--vendor/gopkg.in/fsnotify.v1/inotify_poller_test.go229
-rw-r--r--vendor/gopkg.in/fsnotify.v1/inotify_test.go449
-rw-r--r--vendor/gopkg.in/fsnotify.v1/integration_darwin_test.go147
-rw-r--r--vendor/gopkg.in/fsnotify.v1/integration_test.go1237
-rw-r--r--vendor/gopkg.in/mgo.v2/LICENSE25
-rw-r--r--vendor/gopkg.in/mgo.v2/README.md4
-rw-r--r--vendor/gopkg.in/mgo.v2/internal/sasl/sasl.c77
-rw-r--r--vendor/gopkg.in/mgo.v2/internal/sasl/sasl.go138
-rw-r--r--vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.c122
-rw-r--r--vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.go142
-rw-r--r--vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.h7
-rw-r--r--vendor/gopkg.in/mgo.v2/internal/sasl/sspi_windows.c96
-rw-r--r--vendor/gopkg.in/mgo.v2/internal/sasl/sspi_windows.h70
-rw-r--r--vendor/gopkg.in/yaml.v2/.travis.yml12
-rw-r--r--vendor/k8s.io/api/README.md1
-rw-r--r--vendor/k8s.io/api/core/v1/annotation_key_constants.go46
-rw-r--r--vendor/k8s.io/api/core/v1/doc.go1
-rw-r--r--vendor/k8s.io/api/core/v1/generated.pb.go11738
-rw-r--r--vendor/k8s.io/api/core/v1/generated.proto788
-rw-r--r--vendor/k8s.io/api/core/v1/meta.go108
-rw-r--r--vendor/k8s.io/api/core/v1/register.go1
-rw-r--r--vendor/k8s.io/api/core/v1/resource.go7
-rw-r--r--vendor/k8s.io/api/core/v1/types.go892
-rw-r--r--vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go351
-rw-r--r--vendor/k8s.io/api/core/v1/well_known_labels.go36
-rw-r--r--vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go1952
-rw-r--r--vendor/k8s.io/apimachinery/README.md29
-rw-r--r--vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS26
-rw-r--r--vendor/k8s.io/apimachinery/pkg/api/errors/errors.go103
-rw-r--r--vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS18
-rw-r--r--vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go48
-rw-r--r--vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto29
-rw-r--r--vendor/k8s.io/apimachinery/pkg/api/resource/math.go4
-rw-r--r--vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go61
-rw-r--r--vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go2
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS33
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go61
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go46
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go1
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go15
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go2686
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto295
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go33
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go64
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go36
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go43
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go25
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go411
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go201
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go470
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go521
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go210
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go55
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go12
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go438
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go2
-rw-r--r--vendor/k8s.io/apimachinery/pkg/conversion/converter.go132
-rw-r--r--vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go7
-rw-r--r--vendor/k8s.io/apimachinery/pkg/fields/selector.go21
-rw-r--r--vendor/k8s.io/apimachinery/pkg/labels/labels.go2
-rw-r--r--vendor/k8s.io/apimachinery/pkg/labels/selector.go30
-rw-r--r--vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go2
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/codec.go38
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/conversion.go6
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/converter.go38
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/doc.go22
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/embedded.go28
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/error.go66
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/extension.go2
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go74
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/generated.proto24
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/helper.go55
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go51
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/mapper.go98
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go40
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto4
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go15
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/scheme.go248
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go53
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go164
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go48
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go48
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go2
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go145
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/types.go4
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go10
-rw-r--r--vendor/k8s.io/apimachinery/pkg/types/namespacedname.go17
-rw-r--r--vendor/k8s.io/apimachinery/pkg/types/patch.go1
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/clock/clock.go63
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/errors/errors.go38
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go6
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go4
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go29
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go8
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go23
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto4
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go15
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go93
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/net/http.go62
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/net/interface.go64
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/net/port_range.go66
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go47
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/sets/byte.go6
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/sets/doc.go4
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/sets/empty.go4
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/sets/int.go6
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/sets/int32.go203
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/sets/int64.go6
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/sets/string.go6
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go2
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/validation/validation.go25
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/wait/wait.go187
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go8
-rw-r--r--vendor/k8s.io/apimachinery/pkg/version/doc.go3
-rw-r--r--vendor/k8s.io/apimachinery/pkg/version/helpers.go88
-rw-r--r--vendor/k8s.io/apimachinery/pkg/watch/filter.go6
-rw-r--r--vendor/k8s.io/apimachinery/pkg/watch/mux.go6
-rw-r--r--vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go33
-rw-r--r--vendor/k8s.io/apimachinery/pkg/watch/until.go87
-rw-r--r--vendor/k8s.io/apimachinery/pkg/watch/watch.go58
-rw-r--r--vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go6
-rw-r--r--vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go2
-rw-r--r--vendor/k8s.io/client-go/README.md190
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS9
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go1
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go7
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go1
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go10
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go59
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go31
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go2
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go26
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go24
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go55
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go59
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go142
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go92
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go32
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go31
-rw-r--r--vendor/k8s.io/client-go/pkg/version/.gitattributes1
-rw-r--r--vendor/k8s.io/client-go/pkg/version/base.go2
-rw-r--r--vendor/k8s.io/client-go/pkg/version/def.bzl38
-rw-r--r--vendor/k8s.io/client-go/pkg/version/doc.go3
-rw-r--r--vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go166
-rw-r--r--vendor/k8s.io/client-go/rest/OWNERS26
-rw-r--r--vendor/k8s.io/client-go/rest/config.go149
-rw-r--r--vendor/k8s.io/client-go/rest/plugin.go4
-rw-r--r--vendor/k8s.io/client-go/rest/request.go153
-rw-r--r--vendor/k8s.io/client-go/rest/transport.go73
-rw-r--r--vendor/k8s.io/client-go/rest/urlbackoff.go8
-rw-r--r--vendor/k8s.io/client-go/rest/watch/decoder.go2
-rw-r--r--vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go2
-rw-r--r--vendor/k8s.io/client-go/tools/clientcmd/api/doc.go1
-rw-r--r--vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go9
-rw-r--r--vendor/k8s.io/client-go/tools/clientcmd/api/types.go44
-rw-r--r--vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go46
-rw-r--r--vendor/k8s.io/client-go/tools/metrics/OWNERS9
-rw-r--r--vendor/k8s.io/client-go/tools/remotecommand/reader.go41
-rw-r--r--vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go8
-rw-r--r--vendor/k8s.io/client-go/tools/remotecommand/v1.go10
-rw-r--r--vendor/k8s.io/client-go/tools/remotecommand/v2.go2
-rw-r--r--vendor/k8s.io/client-go/transport/OWNERS9
-rw-r--r--vendor/k8s.io/client-go/transport/cache.go8
-rw-r--r--vendor/k8s.io/client-go/transport/config.go33
-rw-r--r--vendor/k8s.io/client-go/transport/round_trippers.go82
-rw-r--r--vendor/k8s.io/client-go/transport/spdy/spdy.go2
-rw-r--r--vendor/k8s.io/client-go/transport/token_source.go149
-rw-r--r--vendor/k8s.io/client-go/transport/transport.go92
-rw-r--r--vendor/k8s.io/client-go/util/cert/OWNERS9
-rw-r--r--vendor/k8s.io/client-go/util/cert/cert.go157
-rw-r--r--vendor/k8s.io/client-go/util/cert/io.go60
-rw-r--r--vendor/k8s.io/client-go/util/cert/pem.go208
-rw-r--r--vendor/k8s.io/client-go/util/connrotation/connrotation.go105
-rw-r--r--vendor/k8s.io/client-go/util/flowcontrol/backoff.go4
-rw-r--r--vendor/k8s.io/client-go/util/keyutil/OWNERS7
-rw-r--r--vendor/k8s.io/client-go/util/keyutil/key.go323
-rw-r--r--vendor/k8s.io/klog/.travis.yml15
-rw-r--r--vendor/k8s.io/klog/CONTRIBUTING.md22
-rw-r--r--vendor/k8s.io/klog/LICENSE (renamed from vendor/github.com/golang/glog/LICENSE)0
-rw-r--r--vendor/k8s.io/klog/OWNERS19
-rw-r--r--vendor/k8s.io/klog/README.md97
-rw-r--r--vendor/k8s.io/klog/RELEASE.md9
-rw-r--r--vendor/k8s.io/klog/SECURITY_CONTACTS20
-rw-r--r--vendor/k8s.io/klog/code-of-conduct.md3
-rw-r--r--vendor/k8s.io/klog/klog.go (renamed from vendor/github.com/golang/glog/glog.go)162
-rw-r--r--vendor/k8s.io/klog/klog_file.go (renamed from vendor/github.com/golang/glog/glog_file.go)35
-rw-r--r--vendor/k8s.io/utils/LICENSE202
-rw-r--r--vendor/k8s.io/utils/integer/integer.go (renamed from vendor/k8s.io/client-go/util/integer/integer.go)6
-rw-r--r--vendor/modules.txt602
-rw-r--r--vendor/sigs.k8s.io/yaml/.gitignore20
-rw-r--r--vendor/sigs.k8s.io/yaml/.travis.yml14
-rw-r--r--vendor/sigs.k8s.io/yaml/CONTRIBUTING.md31
-rw-r--r--vendor/sigs.k8s.io/yaml/LICENSE50
-rw-r--r--vendor/sigs.k8s.io/yaml/OWNERS25
-rw-r--r--vendor/sigs.k8s.io/yaml/README.md121
-rw-r--r--vendor/sigs.k8s.io/yaml/RELEASE.md9
-rw-r--r--vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS17
-rw-r--r--vendor/sigs.k8s.io/yaml/code-of-conduct.md3
-rw-r--r--vendor/sigs.k8s.io/yaml/fields.go502
-rw-r--r--vendor/sigs.k8s.io/yaml/yaml.go319
-rw-r--r--vendor/sigs.k8s.io/yaml/yaml_go110.go14
1722 files changed, 216064 insertions, 74266 deletions
diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore
new file mode 100644
index 000000000..0cd380037
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/.gitignore
@@ -0,0 +1,5 @@
+TAGS
+tags
+.*.swp
+tomlcheck/tomlcheck
+toml.test
diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml
new file mode 100644
index 000000000..8b8afc4f0
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+go:
+ - 1.1
+ - 1.2
+ - 1.3
+ - 1.4
+ - 1.5
+ - 1.6
+ - tip
+install:
+ - go install ./...
+ - go get github.com/BurntSushi/toml-test
+script:
+ - export PATH="$PATH:$HOME/gopath/bin"
+ - make test
diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE
new file mode 100644
index 000000000..6efcfd0ce
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE
@@ -0,0 +1,3 @@
+Compatible with TOML version
+[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)
+
diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING
index 5a8e33254..01b574320 100644
--- a/vendor/github.com/BurntSushi/toml/COPYING
+++ b/vendor/github.com/BurntSushi/toml/COPYING
@@ -1,14 +1,21 @@
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- Version 2, December 2004
+The MIT License (MIT)
- Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
+Copyright (c) 2013 TOML authors
- Everyone is permitted to copy and distribute verbatim or modified
- copies of this license document, and changing it is allowed as long
- as the name is changed.
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
- 0. You just DO WHAT THE FUCK YOU WANT TO.
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile
new file mode 100644
index 000000000..3600848d3
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/Makefile
@@ -0,0 +1,19 @@
+install:
+ go install ./...
+
+test: install
+ go test -v
+ toml-test toml-test-decoder
+ toml-test -encoder toml-test-encoder
+
+fmt:
+ gofmt -w *.go */*.go
+ colcheck *.go */*.go
+
+tags:
+ find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
+
+push:
+ git push origin master
+ git push github master
+
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
index 5a5df6370..7c1b37ecc 100644
--- a/vendor/github.com/BurntSushi/toml/README.md
+++ b/vendor/github.com/BurntSushi/toml/README.md
@@ -1,17 +1,17 @@
## TOML parser and encoder for Go with reflection
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
-reflection interface similar to Go's standard library `json` and `xml`
+reflection interface similar to Go's standard library `json` and `xml`
packages. This package also supports the `encoding.TextUnmarshaler` and
-`encoding.TextMarshaler` interfaces so that you can define custom data
+`encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.)
-Spec: https://github.com/mojombo/toml
+Spec: https://github.com/toml-lang/toml
Compatible with TOML version
-[v0.2.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.2.0.md)
+[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
-Documentation: http://godoc.org/github.com/BurntSushi/toml
+Documentation: https://godoc.org/github.com/BurntSushi/toml
Installation:
@@ -26,8 +26,7 @@ go get github.com/BurntSushi/toml/cmd/tomlv
tomlv some-toml-file.toml
```
-[![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml)
-
+[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
### Testing
@@ -87,7 +86,7 @@ type TOML struct {
### Using the `encoding.TextUnmarshaler` interface
-Here's an example that automatically parses duration strings into
+Here's an example that automatically parses duration strings into
`time.Duration` values:
```toml
@@ -120,7 +119,7 @@ for _, s := range favorites.Song {
}
```
-And you'll also need a `duration` type that satisfies the
+And you'll also need a `duration` type that satisfies the
`encoding.TextUnmarshaler` interface:
```go
@@ -217,4 +216,3 @@ Note that a case insensitive match will be tried if an exact match can't be
found.
A working example of the above can be found in `_examples/example.{go,toml}`.
-
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
index c26b00c01..b0fd51d5b 100644
--- a/vendor/github.com/BurntSushi/toml/decode.go
+++ b/vendor/github.com/BurntSushi/toml/decode.go
@@ -10,7 +10,9 @@ import (
"time"
)
-var e = fmt.Errorf
+func e(format string, args ...interface{}) error {
+ return fmt.Errorf("toml: "+format, args...)
+}
// Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves.
@@ -103,6 +105,13 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
// This decoder will not handle cyclic types. If a cyclic type is passed,
// `Decode` will not terminate.
func Decode(data string, v interface{}) (MetaData, error) {
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr {
+ return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
+ }
+ if rv.IsNil() {
+ return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
+ }
p, err := parse(data)
if err != nil {
return MetaData{}, err
@@ -111,7 +120,7 @@ func Decode(data string, v interface{}) (MetaData, error) {
p.mapping, p.types, p.ordered,
make(map[string]bool, len(p.ordered)), nil,
}
- return md, md.unify(p.mapping, rvalue(v))
+ return md, md.unify(p.mapping, indirect(rv))
}
// DecodeFile is just like Decode, except it will automatically read the
@@ -211,7 +220,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
case reflect.Interface:
// we only support empty interfaces.
if rv.NumMethod() > 0 {
- return e("Unsupported type '%s'.", rv.Kind())
+ return e("unsupported type %s", rv.Type())
}
return md.unifyAnything(data, rv)
case reflect.Float32:
@@ -219,7 +228,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
case reflect.Float64:
return md.unifyFloat64(data, rv)
}
- return e("Unsupported type '%s'.", rv.Kind())
+ return e("unsupported type %s", rv.Kind())
}
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
@@ -228,7 +237,8 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
if mapping == nil {
return nil
}
- return mismatch(rv, "map", mapping)
+ return e("type mismatch for %s: expected table but found %T",
+ rv.Type().String(), mapping)
}
for key, datum := range tmap {
@@ -253,14 +263,13 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
md.decoded[md.context.add(key).String()] = true
md.context = append(md.context, key)
if err := md.unify(datum, subv); err != nil {
- return e("Type mismatch for '%s.%s': %s",
- rv.Type().String(), f.name, err)
+ return err
}
md.context = md.context[0 : len(md.context)-1]
} else if f.name != "" {
// Bad user! No soup for you!
- return e("Field '%s.%s' is unexported, and therefore cannot "+
- "be loaded with reflection.", rv.Type().String(), f.name)
+ return e("cannot write unexported field %s.%s",
+ rv.Type().String(), f.name)
}
}
}
@@ -378,15 +387,15 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
// No bounds checking necessary.
case reflect.Int8:
if num < math.MinInt8 || num > math.MaxInt8 {
- return e("Value '%d' is out of range for int8.", num)
+ return e("value %d is out of range for int8", num)
}
case reflect.Int16:
if num < math.MinInt16 || num > math.MaxInt16 {
- return e("Value '%d' is out of range for int16.", num)
+ return e("value %d is out of range for int16", num)
}
case reflect.Int32:
if num < math.MinInt32 || num > math.MaxInt32 {
- return e("Value '%d' is out of range for int32.", num)
+ return e("value %d is out of range for int32", num)
}
}
rv.SetInt(num)
@@ -397,15 +406,15 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
// No bounds checking necessary.
case reflect.Uint8:
if num < 0 || unum > math.MaxUint8 {
- return e("Value '%d' is out of range for uint8.", num)
+ return e("value %d is out of range for uint8", num)
}
case reflect.Uint16:
if num < 0 || unum > math.MaxUint16 {
- return e("Value '%d' is out of range for uint16.", num)
+ return e("value %d is out of range for uint16", num)
}
case reflect.Uint32:
if num < 0 || unum > math.MaxUint32 {
- return e("Value '%d' is out of range for uint32.", num)
+ return e("value %d is out of range for uint32", num)
}
}
rv.SetUint(unum)
@@ -471,7 +480,7 @@ func rvalue(v interface{}) reflect.Value {
// interest to us (like encoding.TextUnmarshaler).
func indirect(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Ptr {
- if v.CanAddr() {
+ if v.CanSet() {
pv := v.Addr()
if _, ok := pv.Interface().(TextUnmarshaler); ok {
return pv
@@ -496,10 +505,5 @@ func isUnifiable(rv reflect.Value) bool {
}
func badtype(expected string, data interface{}) error {
- return e("Expected %s but found '%T'.", expected, data)
-}
-
-func mismatch(user reflect.Value, expected string, data interface{}) error {
- return e("Type mismatch for %s. Expected %s but found '%T'.",
- user.Type().String(), expected, data)
+ return e("cannot load TOML value of type %T into a Go %s", data, expected)
}
diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go
index ef6f545fa..b9914a679 100644
--- a/vendor/github.com/BurntSushi/toml/decode_meta.go
+++ b/vendor/github.com/BurntSushi/toml/decode_meta.go
@@ -77,9 +77,8 @@ func (k Key) maybeQuoted(i int) string {
}
if quote {
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
- } else {
- return k[i]
}
+ return k[i]
}
func (k Key) add(piece string) Key {
diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go
index fe2680004..b371f396e 100644
--- a/vendor/github.com/BurntSushi/toml/doc.go
+++ b/vendor/github.com/BurntSushi/toml/doc.go
@@ -4,7 +4,7 @@ files via reflection. There is also support for delaying decoding with
the Primitive type, and querying the set of keys in a TOML document with the
MetaData type.
-The specification implemented: https://github.com/mojombo/toml
+The specification implemented: https://github.com/toml-lang/toml
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
whether a file is a valid TOML document. It can also be used to print the
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
index 4e4c97aed..d905c21a2 100644
--- a/vendor/github.com/BurntSushi/toml/encode.go
+++ b/vendor/github.com/BurntSushi/toml/encode.go
@@ -16,17 +16,17 @@ type tomlEncodeError struct{ error }
var (
errArrayMixedElementTypes = errors.New(
- "can't encode array with mixed element types")
+ "toml: cannot encode array with mixed element types")
errArrayNilElement = errors.New(
- "can't encode array with nil element")
+ "toml: cannot encode array with nil element")
errNonString = errors.New(
- "can't encode a map with non-string key type")
+ "toml: cannot encode a map with non-string key type")
errAnonNonStruct = errors.New(
- "can't encode an anonymous field that is not a struct")
+ "toml: cannot encode an anonymous field that is not a struct")
errArrayNoTable = errors.New(
- "TOML array element can't contain a table")
+ "toml: TOML array element cannot contain a table")
errNoKey = errors.New(
- "top-level values must be a Go map or struct")
+ "toml: top-level values must be Go maps or structs")
errAnything = errors.New("") // used in testing
)
@@ -148,7 +148,7 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
case reflect.Struct:
enc.eTable(key, rv)
default:
- panic(e("Unsupported type for key '%s': %s", key, k))
+ panic(e("unsupported type for key '%s': %s", key, k))
}
}
@@ -160,7 +160,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
// Special case time.Time as a primitive. Has to come before
// TextMarshaler below because time.Time implements
// encoding.TextMarshaler, but we need to always use UTC.
- enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z"))
+ enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
return
case TextMarshaler:
// Special case. Use text marshaler if it's available for this value.
@@ -191,7 +191,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
case reflect.String:
enc.writeQuoted(rv.String())
default:
- panic(e("Unexpected primitive type: %s", rv.Kind()))
+ panic(e("unexpected primitive type: %s", rv.Kind()))
}
}
@@ -241,7 +241,7 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
panicIfInvalidKey(key)
if len(key) == 1 {
- // Output an extra new line between top-level tables.
+ // Output an extra newline between top-level tables.
// (The newline isn't written if nothing else has been written though.)
enc.newline()
}
@@ -315,10 +315,16 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
t := f.Type
switch t.Kind() {
case reflect.Struct:
- addFields(t, frv, f.Index)
- continue
+ // Treat anonymous struct fields with
+ // tag names as though they are not
+ // anonymous, like encoding/json does.
+ if getOptions(f.Tag).name == "" {
+ addFields(t, frv, f.Index)
+ continue
+ }
case reflect.Ptr:
- if t.Elem().Kind() == reflect.Struct {
+ if t.Elem().Kind() == reflect.Struct &&
+ getOptions(f.Tag).name == "" {
if !frv.IsNil() {
addFields(t.Elem(), frv.Elem(), f.Index)
}
@@ -347,17 +353,18 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
continue
}
- tag := sft.Tag.Get("toml")
- if tag == "-" {
+ opts := getOptions(sft.Tag)
+ if opts.skip {
continue
}
- keyName, opts := getOptions(tag)
- if keyName == "" {
- keyName = sft.Name
+ keyName := sft.Name
+ if opts.name != "" {
+ keyName = opts.name
}
- if _, ok := opts["omitempty"]; ok && isEmpty(sf) {
+ if opts.omitempty && isEmpty(sf) {
continue
- } else if _, ok := opts["omitzero"]; ok && isZero(sf) {
+ }
+ if opts.omitzero && isZero(sf) {
continue
}
@@ -392,9 +399,8 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
case reflect.Array, reflect.Slice:
if typeEqual(tomlHash, tomlArrayType(rv)) {
return tomlArrayHash
- } else {
- return tomlArray
}
+ return tomlArray
case reflect.Ptr, reflect.Interface:
return tomlTypeOfGo(rv.Elem())
case reflect.String:
@@ -451,17 +457,30 @@ func tomlArrayType(rv reflect.Value) tomlType {
return firstType
}
-func getOptions(keyName string) (string, map[string]struct{}) {
- opts := make(map[string]struct{})
- ss := strings.Split(keyName, ",")
- name := ss[0]
- if len(ss) > 1 {
- for _, opt := range ss {
- opts[opt] = struct{}{}
+type tagOptions struct {
+ skip bool // "-"
+ name string
+ omitempty bool
+ omitzero bool
+}
+
+func getOptions(tag reflect.StructTag) tagOptions {
+ t := tag.Get("toml")
+ if t == "-" {
+ return tagOptions{skip: true}
+ }
+ var opts tagOptions
+ parts := strings.Split(t, ",")
+ opts.name = parts[0]
+ for _, s := range parts[1:] {
+ switch s {
+ case "omitempty":
+ opts.omitempty = true
+ case "omitzero":
+ opts.omitzero = true
}
}
-
- return name, opts
+ return opts
}
func isZero(rv reflect.Value) bool {
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
index 9b20b3a81..e0a742a88 100644
--- a/vendor/github.com/BurntSushi/toml/lex.go
+++ b/vendor/github.com/BurntSushi/toml/lex.go
@@ -3,6 +3,7 @@ package toml
import (
"fmt"
"strings"
+ "unicode"
"unicode/utf8"
)
@@ -29,24 +30,28 @@ const (
itemArrayTableEnd
itemKeyStart
itemCommentStart
+ itemInlineTableStart
+ itemInlineTableEnd
)
const (
- eof = 0
- tableStart = '['
- tableEnd = ']'
- arrayTableStart = '['
- arrayTableEnd = ']'
- tableSep = '.'
- keySep = '='
- arrayStart = '['
- arrayEnd = ']'
- arrayValTerm = ','
- commentStart = '#'
- stringStart = '"'
- stringEnd = '"'
- rawStringStart = '\''
- rawStringEnd = '\''
+ eof = 0
+ comma = ','
+ tableStart = '['
+ tableEnd = ']'
+ arrayTableStart = '['
+ arrayTableEnd = ']'
+ tableSep = '.'
+ keySep = '='
+ arrayStart = '['
+ arrayEnd = ']'
+ commentStart = '#'
+ stringStart = '"'
+ stringEnd = '"'
+ rawStringStart = '\''
+ rawStringEnd = '\''
+ inlineTableStart = '{'
+ inlineTableEnd = '}'
)
type stateFn func(lx *lexer) stateFn
@@ -55,11 +60,18 @@ type lexer struct {
input string
start int
pos int
- width int
line int
state stateFn
items chan item
+ // Allow for backing up up to three runes.
+ // This is necessary because TOML contains 3-rune tokens (""" and ''').
+ prevWidths [3]int
+ nprev int // how many of prevWidths are in use
+ // If we emit an eof, we can still back up, but it is not OK to call
+ // next again.
+ atEOF bool
+
// A stack of state functions used to maintain context.
// The idea is to reuse parts of the state machine in various places.
// For example, values can appear at the top level or within arbitrarily
@@ -87,7 +99,7 @@ func (lx *lexer) nextItem() item {
func lex(input string) *lexer {
lx := &lexer{
- input: input + "\n",
+ input: input,
state: lexTop,
line: 1,
items: make(chan item, 10),
@@ -102,7 +114,7 @@ func (lx *lexer) push(state stateFn) {
func (lx *lexer) pop() stateFn {
if len(lx.stack) == 0 {
- return lx.errorf("BUG in lexer: no states to pop.")
+ return lx.errorf("BUG in lexer: no states to pop")
}
last := lx.stack[len(lx.stack)-1]
lx.stack = lx.stack[0 : len(lx.stack)-1]
@@ -124,16 +136,25 @@ func (lx *lexer) emitTrim(typ itemType) {
}
func (lx *lexer) next() (r rune) {
+ if lx.atEOF {
+ panic("next called after EOF")
+ }
if lx.pos >= len(lx.input) {
- lx.width = 0
+ lx.atEOF = true
return eof
}
if lx.input[lx.pos] == '\n' {
lx.line++
}
- r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:])
- lx.pos += lx.width
+ lx.prevWidths[2] = lx.prevWidths[1]
+ lx.prevWidths[1] = lx.prevWidths[0]
+ if lx.nprev < 3 {
+ lx.nprev++
+ }
+ r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
+ lx.prevWidths[0] = w
+ lx.pos += w
return r
}
@@ -142,9 +163,20 @@ func (lx *lexer) ignore() {
lx.start = lx.pos
}
-// backup steps back one rune. Can be called only once per call of next.
+// backup steps back one rune. Can be called only twice between calls to next.
func (lx *lexer) backup() {
- lx.pos -= lx.width
+ if lx.atEOF {
+ lx.atEOF = false
+ return
+ }
+ if lx.nprev < 1 {
+ panic("backed up too far")
+ }
+ w := lx.prevWidths[0]
+ lx.prevWidths[0] = lx.prevWidths[1]
+ lx.prevWidths[1] = lx.prevWidths[2]
+ lx.nprev--
+ lx.pos -= w
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
lx.line--
}
@@ -166,9 +198,22 @@ func (lx *lexer) peek() rune {
return r
}
+// skip ignores all input that matches the given predicate.
+func (lx *lexer) skip(pred func(rune) bool) {
+ for {
+ r := lx.next()
+ if pred(r) {
+ continue
+ }
+ lx.backup()
+ lx.ignore()
+ return
+ }
+}
+
// errorf stops all lexing by emitting an error and returning `nil`.
// Note that any value that is a character is escaped if it's a special
-// character (new lines, tabs, etc.).
+// character (newlines, tabs, etc.).
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
lx.items <- item{
itemError,
@@ -184,7 +229,6 @@ func lexTop(lx *lexer) stateFn {
if isWhitespace(r) || isNL(r) {
return lexSkip(lx, lexTop)
}
-
switch r {
case commentStart:
lx.push(lexTop)
@@ -193,7 +237,7 @@ func lexTop(lx *lexer) stateFn {
return lexTableStart
case eof:
if lx.pos > lx.start {
- return lx.errorf("Unexpected EOF.")
+ return lx.errorf("unexpected EOF")
}
lx.emit(itemEOF)
return nil
@@ -208,12 +252,12 @@ func lexTop(lx *lexer) stateFn {
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
// or a table.) It must see only whitespace, and will turn back to lexTop
-// upon a new line. If it sees EOF, it will quit the lexer successfully.
+// upon a newline. If it sees EOF, it will quit the lexer successfully.
func lexTopEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case r == commentStart:
- // a comment will read to a new line for us.
+ // a comment will read to a newline for us.
lx.push(lexTop)
return lexCommentStart
case isWhitespace(r):
@@ -222,11 +266,11 @@ func lexTopEnd(lx *lexer) stateFn {
lx.ignore()
return lexTop
case r == eof:
- lx.ignore()
- return lexTop
+ lx.emit(itemEOF)
+ return nil
}
- return lx.errorf("Expected a top-level item to end with a new line, "+
- "comment or EOF, but got %q instead.", r)
+ return lx.errorf("expected a top-level item to end with a newline, "+
+ "comment, or EOF, but got %q instead", r)
}
// lexTable lexes the beginning of a table. Namely, it makes sure that
@@ -253,21 +297,22 @@ func lexTableEnd(lx *lexer) stateFn {
func lexArrayTableEnd(lx *lexer) stateFn {
if r := lx.next(); r != arrayTableEnd {
- return lx.errorf("Expected end of table array name delimiter %q, "+
- "but got %q instead.", arrayTableEnd, r)
+ return lx.errorf("expected end of table array name delimiter %q, "+
+ "but got %q instead", arrayTableEnd, r)
}
lx.emit(itemArrayTableEnd)
return lexTopEnd
}
func lexTableNameStart(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
switch r := lx.peek(); {
case r == tableEnd || r == eof:
- return lx.errorf("Unexpected end of table name. (Table names cannot " +
- "be empty.)")
+ return lx.errorf("unexpected end of table name " +
+ "(table names cannot be empty)")
case r == tableSep:
- return lx.errorf("Unexpected table separator. (Table names cannot " +
- "be empty.)")
+ return lx.errorf("unexpected table separator " +
+ "(table names cannot be empty)")
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.push(lexTableNameEnd)
@@ -277,24 +322,22 @@ func lexTableNameStart(lx *lexer) stateFn {
}
}
-// lexTableName lexes the name of a table. It assumes that at least one
+// lexBareTableName lexes the name of a table. It assumes that at least one
// valid character for the table has already been read.
func lexBareTableName(lx *lexer) stateFn {
- switch r := lx.next(); {
- case isBareKeyChar(r):
+ r := lx.next()
+ if isBareKeyChar(r) {
return lexBareTableName
- case r == tableSep || r == tableEnd:
- lx.backup()
- lx.emitTrim(itemText)
- return lexTableNameEnd
- default:
- return lx.errorf("Bare keys cannot contain %q.", r)
}
+ lx.backup()
+ lx.emit(itemText)
+ return lexTableNameEnd
}
// lexTableNameEnd reads the end of a piece of a table name, optionally
// consuming whitespace.
func lexTableNameEnd(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
switch r := lx.next(); {
case isWhitespace(r):
return lexTableNameEnd
@@ -304,8 +347,8 @@ func lexTableNameEnd(lx *lexer) stateFn {
case r == tableEnd:
return lx.pop()
default:
- return lx.errorf("Expected '.' or ']' to end table name, but got %q "+
- "instead.", r)
+ return lx.errorf("expected '.' or ']' to end table name, "+
+ "but got %q instead", r)
}
}
@@ -315,7 +358,7 @@ func lexKeyStart(lx *lexer) stateFn {
r := lx.peek()
switch {
case r == keySep:
- return lx.errorf("Unexpected key separator %q.", keySep)
+ return lx.errorf("unexpected key separator %q", keySep)
case isWhitespace(r) || isNL(r):
lx.next()
return lexSkip(lx, lexKeyStart)
@@ -338,14 +381,15 @@ func lexBareKey(lx *lexer) stateFn {
case isBareKeyChar(r):
return lexBareKey
case isWhitespace(r):
- lx.emitTrim(itemText)
+ lx.backup()
+ lx.emit(itemText)
return lexKeyEnd
case r == keySep:
lx.backup()
- lx.emitTrim(itemText)
+ lx.emit(itemText)
return lexKeyEnd
default:
- return lx.errorf("Bare keys cannot contain %q.", r)
+ return lx.errorf("bare keys cannot contain %q", r)
}
}
@@ -358,7 +402,7 @@ func lexKeyEnd(lx *lexer) stateFn {
case isWhitespace(r):
return lexSkip(lx, lexKeyEnd)
default:
- return lx.errorf("Expected key separator %q, but got %q instead.",
+ return lx.errorf("expected key separator %q, but got %q instead",
keySep, r)
}
}
@@ -367,20 +411,26 @@ func lexKeyEnd(lx *lexer) stateFn {
// lexValue will ignore whitespace.
// After a value is lexed, the last state on the next is popped and returned.
func lexValue(lx *lexer) stateFn {
- // We allow whitespace to precede a value, but NOT new lines.
- // In array syntax, the array states are responsible for ignoring new
- // lines.
+ // We allow whitespace to precede a value, but NOT newlines.
+ // In array syntax, the array states are responsible for ignoring newlines.
r := lx.next()
- if isWhitespace(r) {
+ switch {
+ case isWhitespace(r):
return lexSkip(lx, lexValue)
+ case isDigit(r):
+ lx.backup() // avoid an extra state and use the same as above
+ return lexNumberOrDateStart
}
-
- switch {
- case r == arrayStart:
+ switch r {
+ case arrayStart:
lx.ignore()
lx.emit(itemArray)
return lexArrayValue
- case r == stringStart:
+ case inlineTableStart:
+ lx.ignore()
+ lx.emit(itemInlineTableStart)
+ return lexInlineTableValue
+ case stringStart:
if lx.accept(stringStart) {
if lx.accept(stringStart) {
lx.ignore() // Ignore """
@@ -390,7 +440,7 @@ func lexValue(lx *lexer) stateFn {
}
lx.ignore() // ignore the '"'
return lexString
- case r == rawStringStart:
+ case rawStringStart:
if lx.accept(rawStringStart) {
if lx.accept(rawStringStart) {
lx.ignore() // Ignore """
@@ -400,23 +450,24 @@ func lexValue(lx *lexer) stateFn {
}
lx.ignore() // ignore the "'"
return lexRawString
- case r == 't':
- return lexTrue
- case r == 'f':
- return lexFalse
- case r == '-':
+ case '+', '-':
return lexNumberStart
- case isDigit(r):
- lx.backup() // avoid an extra state and use the same as above
- return lexNumberOrDateStart
- case r == '.': // special error case, be kind to users
- return lx.errorf("Floats must start with a digit, not '.'.")
+ case '.': // special error case, be kind to users
+ return lx.errorf("floats must start with a digit, not '.'")
+ }
+ if unicode.IsLetter(r) {
+ // Be permissive here; lexBool will give a nice error if the
+ // user wrote something like
+ // x = foo
+ // (i.e. not 'true' or 'false' but is something else word-like.)
+ lx.backup()
+ return lexBool
}
- return lx.errorf("Expected value but found %q instead.", r)
+ return lx.errorf("expected value but found %q instead", r)
}
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
-// have already been consumed. All whitespace and new lines are ignored.
+// have already been consumed. All whitespace and newlines are ignored.
func lexArrayValue(lx *lexer) stateFn {
r := lx.next()
switch {
@@ -425,10 +476,11 @@ func lexArrayValue(lx *lexer) stateFn {
case r == commentStart:
lx.push(lexArrayValue)
return lexCommentStart
- case r == arrayValTerm:
- return lx.errorf("Unexpected array value terminator %q.",
- arrayValTerm)
+ case r == comma:
+ return lx.errorf("unexpected comma")
case r == arrayEnd:
+ // NOTE(caleb): The spec isn't clear about whether you can have
+ // a trailing comma or not, so we'll allow it.
return lexArrayEnd
}
@@ -437,8 +489,9 @@ func lexArrayValue(lx *lexer) stateFn {
return lexValue
}
-// lexArrayValueEnd consumes the cruft between values of an array. Namely,
-// it ignores whitespace and expects either a ',' or a ']'.
+// lexArrayValueEnd consumes everything between the end of an array value and
+// the next value (or the end of the array): it ignores whitespace and newlines
+// and expects either a ',' or a ']'.
func lexArrayValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
@@ -447,31 +500,88 @@ func lexArrayValueEnd(lx *lexer) stateFn {
case r == commentStart:
lx.push(lexArrayValueEnd)
return lexCommentStart
- case r == arrayValTerm:
+ case r == comma:
lx.ignore()
return lexArrayValue // move on to the next value
case r == arrayEnd:
return lexArrayEnd
}
- return lx.errorf("Expected an array value terminator %q or an array "+
- "terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r)
+ return lx.errorf(
+ "expected a comma or array terminator %q, but got %q instead",
+ arrayEnd, r,
+ )
}
-// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has
-// just been consumed.
+// lexArrayEnd finishes the lexing of an array.
+// It assumes that a ']' has just been consumed.
func lexArrayEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemArrayEnd)
return lx.pop()
}
+// lexInlineTableValue consumes one key/value pair in an inline table.
+// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
+func lexInlineTableValue(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r):
+ return lexSkip(lx, lexInlineTableValue)
+ case isNL(r):
+ return lx.errorf("newlines not allowed within inline tables")
+ case r == commentStart:
+ lx.push(lexInlineTableValue)
+ return lexCommentStart
+ case r == comma:
+ return lx.errorf("unexpected comma")
+ case r == inlineTableEnd:
+ return lexInlineTableEnd
+ }
+ lx.backup()
+ lx.push(lexInlineTableValueEnd)
+ return lexKeyStart
+}
+
+// lexInlineTableValueEnd consumes everything between the end of an inline table
+// key/value pair and the next pair (or the end of the table):
+// it ignores whitespace and expects either a ',' or a '}'.
+func lexInlineTableValueEnd(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r):
+ return lexSkip(lx, lexInlineTableValueEnd)
+ case isNL(r):
+ return lx.errorf("newlines not allowed within inline tables")
+ case r == commentStart:
+ lx.push(lexInlineTableValueEnd)
+ return lexCommentStart
+ case r == comma:
+ lx.ignore()
+ return lexInlineTableValue
+ case r == inlineTableEnd:
+ return lexInlineTableEnd
+ }
+ return lx.errorf("expected a comma or an inline table terminator %q, "+
+ "but got %q instead", inlineTableEnd, r)
+}
+
+// lexInlineTableEnd finishes the lexing of an inline table.
+// It assumes that a '}' has just been consumed.
+func lexInlineTableEnd(lx *lexer) stateFn {
+ lx.ignore()
+ lx.emit(itemInlineTableEnd)
+ return lx.pop()
+}
+
// lexString consumes the inner contents of a string. It assumes that the
// beginning '"' has already been consumed and ignored.
func lexString(lx *lexer) stateFn {
r := lx.next()
switch {
+ case r == eof:
+ return lx.errorf("unexpected EOF")
case isNL(r):
- return lx.errorf("Strings cannot contain new lines.")
+ return lx.errorf("strings cannot contain newlines")
case r == '\\':
lx.push(lexString)
return lexStringEscape
@@ -488,11 +598,12 @@ func lexString(lx *lexer) stateFn {
// lexMultilineString consumes the inner contents of a string. It assumes that
// the beginning '"""' has already been consumed and ignored.
func lexMultilineString(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case r == '\\':
+ switch lx.next() {
+ case eof:
+ return lx.errorf("unexpected EOF")
+ case '\\':
return lexMultilineStringEscape
- case r == stringEnd:
+ case stringEnd:
if lx.accept(stringEnd) {
if lx.accept(stringEnd) {
lx.backup()
@@ -516,8 +627,10 @@ func lexMultilineString(lx *lexer) stateFn {
func lexRawString(lx *lexer) stateFn {
r := lx.next()
switch {
+ case r == eof:
+ return lx.errorf("unexpected EOF")
case isNL(r):
- return lx.errorf("Strings cannot contain new lines.")
+ return lx.errorf("strings cannot contain newlines")
case r == rawStringEnd:
lx.backup()
lx.emit(itemRawString)
@@ -529,12 +642,13 @@ func lexRawString(lx *lexer) stateFn {
}
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
-// a string. It assumes that the beginning "'" has already been consumed and
+// a string. It assumes that the beginning "'''" has already been consumed and
// ignored.
func lexMultilineRawString(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case r == rawStringEnd:
+ switch lx.next() {
+ case eof:
+ return lx.errorf("unexpected EOF")
+ case rawStringEnd:
if lx.accept(rawStringEnd) {
if lx.accept(rawStringEnd) {
lx.backup()
@@ -559,11 +673,10 @@ func lexMultilineStringEscape(lx *lexer) stateFn {
// Handle the special case first:
if isNL(lx.next()) {
return lexMultilineString
- } else {
- lx.backup()
- lx.push(lexMultilineString)
- return lexStringEscape(lx)
}
+ lx.backup()
+ lx.push(lexMultilineString)
+ return lexStringEscape(lx)
}
func lexStringEscape(lx *lexer) stateFn {
@@ -588,10 +701,9 @@ func lexStringEscape(lx *lexer) stateFn {
case 'U':
return lexLongUnicodeEscape
}
- return lx.errorf("Invalid escape character %q. Only the following "+
+ return lx.errorf("invalid escape character %q; only the following "+
"escape characters are allowed: "+
- "\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+
- "\\uXXXX and \\UXXXXXXXX.", r)
+ `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
}
func lexShortUnicodeEscape(lx *lexer) stateFn {
@@ -599,8 +711,8 @@ func lexShortUnicodeEscape(lx *lexer) stateFn {
for i := 0; i < 4; i++ {
r = lx.next()
if !isHexadecimal(r) {
- return lx.errorf("Expected four hexadecimal digits after '\\u', "+
- "but got '%s' instead.", lx.current())
+ return lx.errorf(`expected four hexadecimal digits after '\u', `+
+ "but got %q instead", lx.current())
}
}
return lx.pop()
@@ -611,40 +723,43 @@ func lexLongUnicodeEscape(lx *lexer) stateFn {
for i := 0; i < 8; i++ {
r = lx.next()
if !isHexadecimal(r) {
- return lx.errorf("Expected eight hexadecimal digits after '\\U', "+
- "but got '%s' instead.", lx.current())
+ return lx.errorf(`expected eight hexadecimal digits after '\U', `+
+ "but got %q instead", lx.current())
}
}
return lx.pop()
}
-// lexNumberOrDateStart consumes either a (positive) integer, float or
-// datetime. It assumes that NO negative sign has been consumed.
+// lexNumberOrDateStart consumes either an integer, a float, or datetime.
func lexNumberOrDateStart(lx *lexer) stateFn {
r := lx.next()
- if !isDigit(r) {
- if r == '.' {
- return lx.errorf("Floats must start with a digit, not '.'.")
- } else {
- return lx.errorf("Expected a digit but got %q.", r)
- }
+ if isDigit(r) {
+ return lexNumberOrDate
}
- return lexNumberOrDate
+ switch r {
+ case '_':
+ return lexNumber
+ case 'e', 'E':
+ return lexFloat
+ case '.':
+ return lx.errorf("floats must start with a digit, not '.'")
+ }
+ return lx.errorf("expected a digit but got %q", r)
}
-// lexNumberOrDate consumes either a (positive) integer, float or datetime.
+// lexNumberOrDate consumes either an integer, float or datetime.
func lexNumberOrDate(lx *lexer) stateFn {
r := lx.next()
- switch {
- case r == '-':
- if lx.pos-lx.start != 5 {
- return lx.errorf("All ISO8601 dates must be in full Zulu form.")
- }
- return lexDateAfterYear
- case isDigit(r):
+ if isDigit(r) {
return lexNumberOrDate
- case r == '.':
- return lexFloatStart
+ }
+ switch r {
+ case '-':
+ return lexDatetime
+ case '_':
+ return lexNumber
+ case '.', 'e', 'E':
+ return lexFloat
}
lx.backup()
@@ -652,46 +767,34 @@ func lexNumberOrDate(lx *lexer) stateFn {
return lx.pop()
}
-// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format.
-// It assumes that "YYYY-" has already been consumed.
-func lexDateAfterYear(lx *lexer) stateFn {
- formats := []rune{
- // digits are '0'.
- // everything else is direct equality.
- '0', '0', '-', '0', '0',
- 'T',
- '0', '0', ':', '0', '0', ':', '0', '0',
- 'Z',
+// lexDatetime consumes a Datetime, to a first approximation.
+// The parser validates that it matches one of the accepted formats.
+func lexDatetime(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexDatetime
}
- for _, f := range formats {
- r := lx.next()
- if f == '0' {
- if !isDigit(r) {
- return lx.errorf("Expected digit in ISO8601 datetime, "+
- "but found %q instead.", r)
- }
- } else if f != r {
- return lx.errorf("Expected %q in ISO8601 datetime, "+
- "but found %q instead.", f, r)
- }
+ switch r {
+ case '-', 'T', ':', '.', 'Z', '+':
+ return lexDatetime
}
+
+ lx.backup()
lx.emit(itemDatetime)
return lx.pop()
}
-// lexNumberStart consumes either an integer or a float. It assumes that
-// a negative sign has already been read, but that *no* digits have been
-// consumed. lexNumberStart will move to the appropriate integer or float
-// states.
+// lexNumberStart consumes either an integer or a float. It assumes that a sign
+// has already been read, but that *no* digits have been consumed.
+// lexNumberStart will move to the appropriate integer or float states.
func lexNumberStart(lx *lexer) stateFn {
- // we MUST see a digit. Even floats have to start with a digit.
+ // We MUST see a digit. Even floats have to start with a digit.
r := lx.next()
if !isDigit(r) {
if r == '.' {
- return lx.errorf("Floats must start with a digit, not '.'.")
- } else {
- return lx.errorf("Expected a digit but got %q.", r)
+ return lx.errorf("floats must start with a digit, not '.'")
}
+ return lx.errorf("expected a digit but got %q", r)
}
return lexNumber
}
@@ -699,11 +802,14 @@ func lexNumberStart(lx *lexer) stateFn {
// lexNumber consumes an integer or a float after seeing the first digit.
func lexNumber(lx *lexer) stateFn {
r := lx.next()
- switch {
- case isDigit(r):
+ if isDigit(r) {
return lexNumber
- case r == '.':
- return lexFloatStart
+ }
+ switch r {
+ case '_':
+ return lexNumber
+ case '.', 'e', 'E':
+ return lexFloat
}
lx.backup()
@@ -711,60 +817,42 @@ func lexNumber(lx *lexer) stateFn {
return lx.pop()
}
-// lexFloatStart starts the consumption of digits of a float after a '.'.
-// Namely, at least one digit is required.
-func lexFloatStart(lx *lexer) stateFn {
- r := lx.next()
- if !isDigit(r) {
- return lx.errorf("Floats must have a digit after the '.', but got "+
- "%q instead.", r)
- }
- return lexFloat
-}
-
-// lexFloat consumes the digits of a float after a '.'.
-// Assumes that one digit has been consumed after a '.' already.
+// lexFloat consumes the elements of a float. It allows any sequence of
+// float-like characters, so floats emitted by the lexer are only a first
+// approximation and must be validated by the parser.
func lexFloat(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexFloat
}
+ switch r {
+ case '_', '.', '-', '+', 'e', 'E':
+ return lexFloat
+ }
lx.backup()
lx.emit(itemFloat)
return lx.pop()
}
-// lexConst consumes the s[1:] in s. It assumes that s[0] has already been
-// consumed.
-func lexConst(lx *lexer, s string) stateFn {
- for i := range s[1:] {
- if r := lx.next(); r != rune(s[i+1]) {
- return lx.errorf("Expected %q, but found %q instead.", s[:i+1],
- s[:i]+string(r))
+// lexBool consumes a bool string: 'true' or 'false.
+func lexBool(lx *lexer) stateFn {
+ var rs []rune
+ for {
+ r := lx.next()
+ if !unicode.IsLetter(r) {
+ lx.backup()
+ break
}
+ rs = append(rs, r)
}
- return nil
-}
-
-// lexTrue consumes the "rue" in "true". It assumes that 't' has already
-// been consumed.
-func lexTrue(lx *lexer) stateFn {
- if fn := lexConst(lx, "true"); fn != nil {
- return fn
- }
- lx.emit(itemBool)
- return lx.pop()
-}
-
-// lexFalse consumes the "alse" in "false". It assumes that 'f' has already
-// been consumed.
-func lexFalse(lx *lexer) stateFn {
- if fn := lexConst(lx, "false"); fn != nil {
- return fn
+ s := string(rs)
+ switch s {
+ case "true", "false":
+ lx.emit(itemBool)
+ return lx.pop()
}
- lx.emit(itemBool)
- return lx.pop()
+ return lx.errorf("expected value but found %q instead", s)
}
// lexCommentStart begins the lexing of a comment. It will emit
@@ -776,7 +864,7 @@ func lexCommentStart(lx *lexer) stateFn {
}
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
-// It will consume *up to* the first new line character, and pass control
+// It will consume *up to* the first newline character, and pass control
// back to the last state on the stack.
func lexComment(lx *lexer) stateFn {
r := lx.peek()
@@ -834,13 +922,7 @@ func (itype itemType) String() string {
return "EOF"
case itemText:
return "Text"
- case itemString:
- return "String"
- case itemRawString:
- return "String"
- case itemMultilineString:
- return "String"
- case itemRawMultilineString:
+ case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
return "String"
case itemBool:
return "Bool"
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
index 6a82e84f6..50869ef92 100644
--- a/vendor/github.com/BurntSushi/toml/parse.go
+++ b/vendor/github.com/BurntSushi/toml/parse.go
@@ -2,7 +2,6 @@ package toml
import (
"fmt"
- "log"
"strconv"
"strings"
"time"
@@ -81,7 +80,7 @@ func (p *parser) next() item {
}
func (p *parser) bug(format string, v ...interface{}) {
- log.Panicf("BUG: %s\n\n", fmt.Sprintf(format, v...))
+ panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
}
func (p *parser) expect(typ itemType) item {
@@ -179,10 +178,18 @@ func (p *parser) value(it item) (interface{}, tomlType) {
}
p.bug("Expected boolean value, but got '%s'.", it.val)
case itemInteger:
- num, err := strconv.ParseInt(it.val, 10, 64)
+ if !numUnderscoresOK(it.val) {
+ p.panicf("Invalid integer %q: underscores must be surrounded by digits",
+ it.val)
+ }
+ val := strings.Replace(it.val, "_", "", -1)
+ num, err := strconv.ParseInt(val, 10, 64)
if err != nil {
- // See comment below for floats describing why we make a
- // distinction between a bug and a user error.
+ // Distinguish integer values. Normally, it'd be a bug if the lexer
+ // provides an invalid integer, but it's possible that the number is
+ // out of range of valid values (which the lexer cannot determine).
+ // So mark the former as a bug but the latter as a legitimate user
+ // error.
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
@@ -194,29 +201,57 @@ func (p *parser) value(it item) (interface{}, tomlType) {
}
return num, p.typeOfPrimitive(it)
case itemFloat:
- num, err := strconv.ParseFloat(it.val, 64)
+ parts := strings.FieldsFunc(it.val, func(r rune) bool {
+ switch r {
+ case '.', 'e', 'E':
+ return true
+ }
+ return false
+ })
+ for _, part := range parts {
+ if !numUnderscoresOK(part) {
+ p.panicf("Invalid float %q: underscores must be "+
+ "surrounded by digits", it.val)
+ }
+ }
+ if !numPeriodsOK(it.val) {
+ // As a special case, numbers like '123.' or '1.e2',
+ // which are valid as far as Go/strconv are concerned,
+ // must be rejected because TOML says that a fractional
+ // part consists of '.' followed by 1+ digits.
+ p.panicf("Invalid float %q: '.' must be followed "+
+ "by one or more digits", it.val)
+ }
+ val := strings.Replace(it.val, "_", "", -1)
+ num, err := strconv.ParseFloat(val, 64)
if err != nil {
- // Distinguish float values. Normally, it'd be a bug if the lexer
- // provides an invalid float, but it's possible that the float is
- // out of range of valid values (which the lexer cannot determine).
- // So mark the former as a bug but the latter as a legitimate user
- // error.
- //
- // This is also true for integers.
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Float '%s' is out of the range of 64-bit "+
"IEEE-754 floating-point numbers.", it.val)
} else {
- p.bug("Expected float value, but got '%s'.", it.val)
+ p.panicf("Invalid float value: %q", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemDatetime:
- t, err := time.Parse("2006-01-02T15:04:05Z", it.val)
- if err != nil {
- p.panicf("Invalid RFC3339 Zulu DateTime: '%s'.", it.val)
+ var t time.Time
+ var ok bool
+ var err error
+ for _, format := range []string{
+ "2006-01-02T15:04:05Z07:00",
+ "2006-01-02T15:04:05",
+ "2006-01-02",
+ } {
+ t, err = time.ParseInLocation(format, it.val, time.Local)
+ if err == nil {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ p.panicf("Invalid TOML Datetime: %q.", it.val)
}
return t, p.typeOfPrimitive(it)
case itemArray:
@@ -234,11 +269,75 @@ func (p *parser) value(it item) (interface{}, tomlType) {
types = append(types, typ)
}
return array, p.typeOfArray(types)
+ case itemInlineTableStart:
+ var (
+ hash = make(map[string]interface{})
+ outerContext = p.context
+ outerKey = p.currentKey
+ )
+
+ p.context = append(p.context, p.currentKey)
+ p.currentKey = ""
+ for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
+ if it.typ != itemKeyStart {
+ p.bug("Expected key start but instead found %q, around line %d",
+ it.val, p.approxLine)
+ }
+ if it.typ == itemCommentStart {
+ p.expect(itemText)
+ continue
+ }
+
+ // retrieve key
+ k := p.next()
+ p.approxLine = k.line
+ kname := p.keyString(k)
+
+ // retrieve value
+ p.currentKey = kname
+ val, typ := p.value(p.next())
+ // make sure we keep metadata up to date
+ p.setType(kname, typ)
+ p.ordered = append(p.ordered, p.context.add(p.currentKey))
+ hash[kname] = val
+ }
+ p.context = outerContext
+ p.currentKey = outerKey
+ return hash, tomlHash
}
p.bug("Unexpected value type: %s", it.typ)
panic("unreachable")
}
+// numUnderscoresOK checks whether each underscore in s is surrounded by
+// characters that are not underscores.
+func numUnderscoresOK(s string) bool {
+ accept := false
+ for _, r := range s {
+ if r == '_' {
+ if !accept {
+ return false
+ }
+ accept = false
+ continue
+ }
+ accept = true
+ }
+ return accept
+}
+
+// numPeriodsOK checks whether every period in s is followed by a digit.
+func numPeriodsOK(s string) bool {
+ period := false
+ for _, r := range s {
+ if period && !isDigit(r) {
+ return false
+ }
+ period = r == '.'
+ }
+ return !period
+}
+
// establishContext sets the current context of the parser,
// where the context is either a hash or an array of hashes. Which one is
// set depends on the value of the `array` parameter.
diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim
new file mode 100644
index 000000000..562164be0
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/session.vim
@@ -0,0 +1 @@
+au BufWritePost *.go silent!make tags > /dev/null 2>&1
diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go
index 6da608af4..608997c22 100644
--- a/vendor/github.com/BurntSushi/toml/type_fields.go
+++ b/vendor/github.com/BurntSushi/toml/type_fields.go
@@ -95,8 +95,8 @@ func typeFields(t reflect.Type) []field {
if sf.PkgPath != "" && !sf.Anonymous { // unexported
continue
}
- name, _ := getOptions(sf.Tag.Get("toml"))
- if name == "-" {
+ opts := getOptions(sf.Tag)
+ if opts.skip {
continue
}
index := make([]int, len(f.index)+1)
@@ -110,8 +110,9 @@ func typeFields(t reflect.Type) []field {
}
// Record found field and index sequence.
- if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
- tagged := name != ""
+ if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := opts.name != ""
+ name := opts.name
if name == "" {
name = sf.Name
}
diff --git a/vendor/github.com/DataDog/zstd/.travis.yml b/vendor/github.com/DataDog/zstd/.travis.yml
new file mode 100644
index 000000000..629470cf6
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/.travis.yml
@@ -0,0 +1,31 @@
+dist: xenial
+language: go
+
+go:
+ - 1.10.x
+ - 1.11.x
+ - 1.12.x
+
+os:
+ - linux
+ - osx
+
+matrix:
+ include:
+ name: "Go 1.11.x CentOS 32bits"
+ language: go
+ go: 1.11.x
+ os: linux
+ services:
+ - docker
+ script:
+ # Please update Go version in travis_test_32 as needed
+ - "docker run -i -v \"${PWD}:/zstd\" toopher/centos-i386:centos6 /bin/bash -c \"linux32 --32bit i386 /zstd/travis_test_32.sh\""
+
+install:
+ - "wget https://github.com/DataDog/zstd/files/2246767/mr.zip"
+ - "unzip mr.zip"
+script:
+ - "go build"
+ - "PAYLOAD=`pwd`/mr go test -v"
+ - "PAYLOAD=`pwd`/mr go test -bench ."
diff --git a/vendor/github.com/DataDog/zstd/LICENSE b/vendor/github.com/DataDog/zstd/LICENSE
new file mode 100644
index 000000000..345c1eb93
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/LICENSE
@@ -0,0 +1,27 @@
+Simplified BSD License
+
+Copyright (c) 2016, Datadog <info@datadoghq.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of the copyright holder nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/DataDog/zstd/README.md b/vendor/github.com/DataDog/zstd/README.md
new file mode 100644
index 000000000..a444e0b2c
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/README.md
@@ -0,0 +1,120 @@
+# Zstd Go Wrapper
+
+[C Zstd Homepage](https://github.com/Cyan4973/zstd)
+
+The current headers and C files are from *v1.4.0* (Commit
+[83b51e9](https://github.com/facebook/zstd/releases/tag/v1.4.0)).
+
+## Usage
+
+There are two main APIs:
+
+* simple Compress/Decompress
+* streaming API (io.Reader/io.Writer)
+
+The compress/decompress APIs mirror that of lz4, while the streaming API was
+designed to be a drop-in replacement for zlib.
+
+### Simple `Compress/Decompress`
+
+
+```go
+// Compress compresses the byte array given in src and writes it to dst.
+// If you already have a buffer allocated, you can pass it to prevent allocation
+// If not, you can pass nil as dst.
+// If the buffer is too small, it will be reallocated, resized, and returned bu the function
+// If dst is nil, this will allocate the worst case size (CompressBound(src))
+Compress(dst, src []byte) ([]byte, error)
+```
+
+```go
+// CompressLevel is the same as Compress but you can pass another compression level
+CompressLevel(dst, src []byte, level int) ([]byte, error)
+```
+
+```go
+// Decompress will decompress your payload into dst.
+// If you already have a buffer allocated, you can pass it to prevent allocation
+// If not, you can pass nil as dst (allocates a 4*src size as default).
+// If the buffer is too small, it will retry 3 times by doubling the dst size
+// After max retries, it will switch to the slower stream API to be sure to be able
+// to decompress. Currently switches if compression ratio > 4*2**3=32.
+Decompress(dst, src []byte) ([]byte, error)
+```
+
+### Stream API
+
+```go
+// NewWriter creates a new object that can optionally be initialized with
+// a precomputed dictionary. If dict is nil, compress without a dictionary.
+// The dictionary array should not be changed during the use of this object.
+// You MUST CALL Close() to write the last bytes of a zstd stream and free C objects.
+NewWriter(w io.Writer) *Writer
+NewWriterLevel(w io.Writer, level int) *Writer
+NewWriterLevelDict(w io.Writer, level int, dict []byte) *Writer
+
+// Write compresses the input data and write it to the underlying writer
+(w *Writer) Write(p []byte) (int, error)
+
+// Close flushes the buffer and frees C zstd objects
+(w *Writer) Close() error
+```
+
+```go
+// NewReader returns a new io.ReadCloser that will decompress data from the
+// underlying reader. If a dictionary is provided to NewReaderDict, it must
+// not be modified until Close is called. It is the caller's responsibility
+// to call Close, which frees up C objects.
+NewReader(r io.Reader) io.ReadCloser
+NewReaderDict(r io.Reader, dict []byte) io.ReadCloser
+```
+
+### Benchmarks (benchmarked with v0.5.0)
+
+The author of Zstd also wrote lz4. Zstd is intended to occupy a speed/ratio
+level similar to what zlib currently provides. In our tests, the can always
+be made to be better than zlib by chosing an appropriate level while still
+keeping compression and decompression time faster than zlib.
+
+You can run the benchmarks against your own payloads by using the Go benchmarks tool.
+Just export your payload filepath as the `PAYLOAD` environment variable and run the benchmarks:
+
+```go
+go test -bench .
+```
+
+Compression of a 7Mb pdf zstd (this wrapper) vs [czlib](https://github.com/DataDog/czlib):
+```
+BenchmarkCompression 5 221056624 ns/op 67.34 MB/s
+BenchmarkDecompression 100 18370416 ns/op 810.32 MB/s
+
+BenchmarkFzlibCompress 2 610156603 ns/op 24.40 MB/s
+BenchmarkFzlibDecompress 20 81195246 ns/op 183.33 MB/s
+```
+
+Ratio is also better by a margin of ~20%.
+Compression speed is always better than zlib on all the payloads we tested;
+However, [czlib](https://github.com/DataDog/czlib) has optimisations that make it
+faster at decompressiong small payloads:
+
+```
+Testing with size: 11... czlib: 8.97 MB/s, zstd: 3.26 MB/s
+Testing with size: 27... czlib: 23.3 MB/s, zstd: 8.22 MB/s
+Testing with size: 62... czlib: 31.6 MB/s, zstd: 19.49 MB/s
+Testing with size: 141... czlib: 74.54 MB/s, zstd: 42.55 MB/s
+Testing with size: 323... czlib: 155.14 MB/s, zstd: 99.39 MB/s
+Testing with size: 739... czlib: 235.9 MB/s, zstd: 216.45 MB/s
+Testing with size: 1689... czlib: 116.45 MB/s, zstd: 345.64 MB/s
+Testing with size: 3858... czlib: 176.39 MB/s, zstd: 617.56 MB/s
+Testing with size: 8811... czlib: 254.11 MB/s, zstd: 824.34 MB/s
+Testing with size: 20121... czlib: 197.43 MB/s, zstd: 1339.11 MB/s
+Testing with size: 45951... czlib: 201.62 MB/s, zstd: 1951.57 MB/s
+```
+
+zstd starts to shine with payloads > 1KB
+
+### Stability - Current state: STABLE
+
+The C library seems to be pretty stable and according to the author has been tested and fuzzed.
+
+For the Go wrapper, the test cover most usual cases and we have succesfully tested it on all staging and prod data.
diff --git a/vendor/github.com/DataDog/zstd/ZSTD_LICENSE b/vendor/github.com/DataDog/zstd/ZSTD_LICENSE
new file mode 100644
index 000000000..a793a8028
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/ZSTD_LICENSE
@@ -0,0 +1,30 @@
+BSD License
+
+For Zstandard software
+
+Copyright (c) 2016-present, Facebook, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ * Neither the name Facebook nor the names of its contributors may be used to
+ endorse or promote products derived from this software without specific
+ prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/DataDog/zstd/bitstream.h b/vendor/github.com/DataDog/zstd/bitstream.h
new file mode 100644
index 000000000..d955bd677
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/bitstream.h
@@ -0,0 +1,455 @@
+/* ******************************************************************
+ bitstream
+ Part of FSE library
+ Copyright (C) 2013-present, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef BITSTREAM_H_MODULE
+#define BITSTREAM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*
+* This API consists of small unitary functions, which must be inlined for best performance.
+* Since link-time-optimization is not available for all compilers,
+* these functions are defined into a .h to be included.
+*/
+
+/*-****************************************
+* Dependencies
+******************************************/
+#include "mem.h" /* unaligned access routines */
+#include "debug.h" /* assert(), DEBUGLOG(), RAWLOG() */
+#include "error_private.h" /* error codes and messages */
+
+
+/*=========================================
+* Target specific
+=========================================*/
+#if defined(__BMI__) && defined(__GNUC__)
+# include <immintrin.h> /* support for bextr (experimental) */
+#endif
+
+#define STREAM_ACCUMULATOR_MIN_32 25
+#define STREAM_ACCUMULATOR_MIN_64 57
+#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
+
+
+/*-******************************************
+* bitStream encoding API (write forward)
+********************************************/
+/* bitStream can mix input from multiple sources.
+ * A critical property of these streams is that they encode and decode in **reverse** direction.
+ * So the first bit sequence you add will be the last to be read, like a LIFO stack.
+ */
+typedef struct {
+ size_t bitContainer;
+ unsigned bitPos;
+ char* startPtr;
+ char* ptr;
+ char* endPtr;
+} BIT_CStream_t;
+
+MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);
+MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
+MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC);
+MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
+
+/* Start with initCStream, providing the size of buffer to write into.
+* bitStream will never write outside of this buffer.
+* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
+*
+* bits are first added to a local register.
+* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
+* Writing data into memory is an explicit operation, performed by the flushBits function.
+* Hence keep track how many bits are potentially stored into local register to avoid register overflow.
+* After a flushBits, a maximum of 7 bits might still be stored into local register.
+*
+* Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
+*
+* Last operation is to close the bitStream.
+* The function returns the final size of CStream in bytes.
+* If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
+*/
+
+
+/*-********************************************
+* bitStream decoding API (read backward)
+**********************************************/
+typedef struct {
+ size_t bitContainer;
+ unsigned bitsConsumed;
+ const char* ptr;
+ const char* start;
+ const char* limitPtr;
+} BIT_DStream_t;
+
+typedef enum { BIT_DStream_unfinished = 0,
+ BIT_DStream_endOfBuffer = 1,
+ BIT_DStream_completed = 2,
+ BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
+ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
+MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
+
+
+/* Start by invoking BIT_initDStream().
+* A chunk of the bitStream is then stored into a local register.
+* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
+* You can then retrieve bitFields stored into the local register, **in reverse order**.
+* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
+* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
+* Otherwise, it can be less than that, so proceed accordingly.
+* Checking if DStream has reached its end can be performed with BIT_endOfDStream().
+*/
+
+
+/*-****************************************
+* unsafe API
+******************************************/
+MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
+/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
+
+MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
+/* unsafe version; does not check buffer overflow */
+
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
+/* faster, but works only if nbBits >= 1 */
+
+
+
+/*-**************************************************************
+* Internal functions
+****************************************************************/
+MEM_STATIC unsigned BIT_highbit32 (U32 val)
+{
+ assert(val != 0);
+ {
+# if defined(_MSC_VER) /* Visual */
+ unsigned long r=0;
+ _BitScanReverse ( &r, val );
+ return (unsigned) r;
+# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
+ return 31 - __builtin_clz (val);
+# else /* Software version */
+ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29,
+ 11, 14, 16, 18, 22, 25, 3, 30,
+ 8, 12, 20, 28, 15, 17, 24, 7,
+ 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+# endif
+ }
+}
+
+/*===== Local Constants =====*/
+static const unsigned BIT_mask[] = {
+ 0, 1, 3, 7, 0xF, 0x1F,
+ 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
+ 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF,
+ 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,
+ 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,
+ 0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */
+#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0]))
+
+/*-**************************************************************
+* bitStream encoding
+****************************************************************/
+/*! BIT_initCStream() :
+ * `dstCapacity` must be > sizeof(size_t)
+ * @return : 0 if success,
+ * otherwise an error code (can be tested using ERR_isError()) */
+MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
+ void* startPtr, size_t dstCapacity)
+{
+ bitC->bitContainer = 0;
+ bitC->bitPos = 0;
+ bitC->startPtr = (char*)startPtr;
+ bitC->ptr = bitC->startPtr;
+ bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);
+ if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);
+ return 0;
+}
+
+/*! BIT_addBits() :
+ * can add up to 31 bits into `bitC`.
+ * Note : does not check for register overflow ! */
+MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
+ size_t value, unsigned nbBits)
+{
+ MEM_STATIC_ASSERT(BIT_MASK_SIZE == 32);
+ assert(nbBits < BIT_MASK_SIZE);
+ assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
+ bitC->bitPos += nbBits;
+}
+
+/*! BIT_addBitsFast() :
+ * works only if `value` is _clean_,
+ * meaning all high bits above nbBits are 0 */
+MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
+ size_t value, unsigned nbBits)
+{
+ assert((value>>nbBits) == 0);
+ assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ bitC->bitContainer |= value << bitC->bitPos;
+ bitC->bitPos += nbBits;
+}
+
+/*! BIT_flushBitsFast() :
+ * assumption : bitContainer has not overflowed
+ * unsafe version; does not check buffer overflow */
+MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
+{
+ size_t const nbBytes = bitC->bitPos >> 3;
+ assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ MEM_writeLEST(bitC->ptr, bitC->bitContainer);
+ bitC->ptr += nbBytes;
+ assert(bitC->ptr <= bitC->endPtr);
+ bitC->bitPos &= 7;
+ bitC->bitContainer >>= nbBytes*8;
+}
+
+/*! BIT_flushBits() :
+ * assumption : bitContainer has not overflowed
+ * safe version; check for buffer overflow, and prevents it.
+ * note : does not signal buffer overflow.
+ * overflow will be revealed later on using BIT_closeCStream() */
+MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
+{
+ size_t const nbBytes = bitC->bitPos >> 3;
+ assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ MEM_writeLEST(bitC->ptr, bitC->bitContainer);
+ bitC->ptr += nbBytes;
+ if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
+ bitC->bitPos &= 7;
+ bitC->bitContainer >>= nbBytes*8;
+}
+
+/*! BIT_closeCStream() :
+ * @return : size of CStream, in bytes,
+ * or 0 if it could not fit into dstBuffer */
+MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
+{
+ BIT_addBitsFast(bitC, 1, 1); /* endMark */
+ BIT_flushBits(bitC);
+ if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
+ return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
+}
+
+
+/*-********************************************************
+* bitStream decoding
+**********************************************************/
+/*! BIT_initDStream() :
+ * Initialize a BIT_DStream_t.
+ * `bitD` : a pointer to an already allocated BIT_DStream_t structure.
+ * `srcSize` must be the *exact* size of the bitStream, in bytes.
+ * @return : size of stream (== srcSize), or an errorCode if a problem is detected
+ */
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+ if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
+
+ bitD->start = (const char*)srcBuffer;
+ bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
+
+ if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
+ bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+ bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
+ if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
+ } else {
+ bitD->ptr = bitD->start;
+ bitD->bitContainer = *(const BYTE*)(bitD->start);
+ switch(srcSize)
+ {
+ case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
+ /* fall-through */
+
+ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
+ /* fall-through */
+
+ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
+ /* fall-through */
+
+ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
+ /* fall-through */
+
+ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
+ /* fall-through */
+
+ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
+ /* fall-through */
+
+ default: break;
+ }
+ { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+ bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
+ if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */
+ }
+ bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
+ }
+
+ return srcSize;
+}
+
+MEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
+{
+ return bitContainer >> start;
+}
+
+MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
+{
+ U32 const regMask = sizeof(bitContainer)*8 - 1;
+ /* if start > regMask, bitstream is corrupted, and result is undefined */
+ assert(nbBits < BIT_MASK_SIZE);
+ return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];
+}
+
+MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
+{
+ assert(nbBits < BIT_MASK_SIZE);
+ return bitContainer & BIT_mask[nbBits];
+}
+
+/*! BIT_lookBits() :
+ * Provides next n bits from local register.
+ * local register is not modified.
+ * On 32-bits, maxNbBits==24.
+ * On 64-bits, maxNbBits==56.
+ * @return : value extracted */
+MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
+{
+ /* arbitrate between double-shift and shift+mask */
+#if 1
+ /* if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8,
+ * bitstream is likely corrupted, and result is undefined */
+ return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
+#else
+ /* this code path is slower on my os-x laptop */
+ U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
+ return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);
+#endif
+}
+
+/*! BIT_lookBitsFast() :
+ * unsafe version; only works if nbBits >= 1 */
+MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
+{
+ U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
+ assert(nbBits >= 1);
+ return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
+}
+
+MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+ bitD->bitsConsumed += nbBits;
+}
+
+/*! BIT_readBits() :
+ * Read (consume) next n bits from local register and update.
+ * Pay attention to not read more than nbBits contained into local register.
+ * @return : extracted value. */
+MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
+{
+ size_t const value = BIT_lookBits(bitD, nbBits);
+ BIT_skipBits(bitD, nbBits);
+ return value;
+}
+
+/*! BIT_readBitsFast() :
+ * unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
+{
+ size_t const value = BIT_lookBitsFast(bitD, nbBits);
+ assert(nbBits >= 1);
+ BIT_skipBits(bitD, nbBits);
+ return value;
+}
+
+/*! BIT_reloadDStream() :
+ * Refill `bitD` from buffer previously set in BIT_initDStream() .
+ * This function is safe, it guarantees it will not read beyond src buffer.
+ * @return : status of `BIT_DStream_t` internal register.
+ * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
+{
+ if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
+ return BIT_DStream_overflow;
+
+ if (bitD->ptr >= bitD->limitPtr) {
+ bitD->ptr -= bitD->bitsConsumed >> 3;
+ bitD->bitsConsumed &= 7;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ return BIT_DStream_unfinished;
+ }
+ if (bitD->ptr == bitD->start) {
+ if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
+ return BIT_DStream_completed;
+ }
+ /* start < ptr < limitPtr */
+ { U32 nbBytes = bitD->bitsConsumed >> 3;
+ BIT_DStream_status result = BIT_DStream_unfinished;
+ if (bitD->ptr - nbBytes < bitD->start) {
+ nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
+ result = BIT_DStream_endOfBuffer;
+ }
+ bitD->ptr -= nbBytes;
+ bitD->bitsConsumed -= nbBytes*8;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
+ return result;
+ }
+}
+
+/*! BIT_endOfDStream() :
+ * @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
+ */
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
+{
+ return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* BITSTREAM_H_MODULE */
diff --git a/vendor/github.com/DataDog/zstd/compiler.h b/vendor/github.com/DataDog/zstd/compiler.h
new file mode 100644
index 000000000..0836e3ed2
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/compiler.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMPILER_H
+#define ZSTD_COMPILER_H
+
+/*-*******************************************************
+* Compiler specifics
+*********************************************************/
+/* force inlining */
+
+#if !defined(ZSTD_NO_INLINE)
+#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# define INLINE_KEYWORD inline
+#else
+# define INLINE_KEYWORD
+#endif
+
+#if defined(__GNUC__)
+# define FORCE_INLINE_ATTR __attribute__((always_inline))
+#elif defined(_MSC_VER)
+# define FORCE_INLINE_ATTR __forceinline
+#else
+# define FORCE_INLINE_ATTR
+#endif
+
+#else
+
+#define INLINE_KEYWORD
+#define FORCE_INLINE_ATTR
+
+#endif
+
+/**
+ * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
+ * parameters. They must be inlined for the compiler to eliminate the constant
+ * branches.
+ */
+#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
+/**
+ * HINT_INLINE is used to help the compiler generate better code. It is *not*
+ * used for "templates", so it can be tweaked based on the compilers
+ * performance.
+ *
+ * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
+ * always_inline attribute.
+ *
+ * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
+ * attribute.
+ */
+#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
+# define HINT_INLINE static INLINE_KEYWORD
+#else
+# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
+#endif
+
+/* force no inlining */
+#ifdef _MSC_VER
+# define FORCE_NOINLINE static __declspec(noinline)
+#else
+# ifdef __GNUC__
+# define FORCE_NOINLINE static __attribute__((__noinline__))
+# else
+# define FORCE_NOINLINE static
+# endif
+#endif
+
+/* target attribute */
+#ifndef __has_attribute
+ #define __has_attribute(x) 0 /* Compatibility with non-clang compilers. */
+#endif
+#if defined(__GNUC__)
+# define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
+#else
+# define TARGET_ATTRIBUTE(target)
+#endif
+
+/* Enable runtime BMI2 dispatch based on the CPU.
+ * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
+ */
+#ifndef DYNAMIC_BMI2
+ #if ((defined(__clang__) && __has_attribute(__target__)) \
+ || (defined(__GNUC__) \
+ && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
+ && (defined(__x86_64__) || defined(_M_X86)) \
+ && !defined(__BMI2__)
+ # define DYNAMIC_BMI2 1
+ #else
+ # define DYNAMIC_BMI2 0
+ #endif
+#endif
+
+/* prefetch
+ * can be disabled, by declaring NO_PREFETCH build macro */
+#if defined(NO_PREFETCH)
+# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */
+# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */
+#else
+# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */
+# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
+# define PREFETCH_L1(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
+# define PREFETCH_L2(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T1)
+# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
+# define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
+# define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
+# else
+# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */
+# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */
+# endif
+#endif /* NO_PREFETCH */
+
+#define CACHELINE_SIZE 64
+
+#define PREFETCH_AREA(p, s) { \
+ const char* const _ptr = (const char*)(p); \
+ size_t const _size = (size_t)(s); \
+ size_t _pos; \
+ for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \
+ PREFETCH_L2(_ptr + _pos); \
+ } \
+}
+
+/* disable warnings */
+#ifdef _MSC_VER /* Visual Studio */
+# include <intrin.h> /* For Visual 2005 */
+# pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
+# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
+# pragma warning(disable : 4324) /* disable: C4324: padded structure */
+#endif
+
+#endif /* ZSTD_COMPILER_H */
diff --git a/vendor/github.com/DataDog/zstd/cover.c b/vendor/github.com/DataDog/zstd/cover.c
new file mode 100644
index 000000000..21464ad03
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/cover.c
@@ -0,0 +1,1126 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* *****************************************************************************
+ * Constructs a dictionary using a heuristic based on the following paper:
+ *
+ * Liao, Petri, Moffat, Wirth
+ * Effective Construction of Relative Lempel-Ziv Dictionaries
+ * Published in WWW 2016.
+ *
+ * Adapted from code originally written by @ot (Giuseppe Ottaviano).
+ ******************************************************************************/
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include <stdio.h> /* fprintf */
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memset */
+#include <time.h> /* clock */
+
+#include "mem.h" /* read */
+#include "pool.h"
+#include "threading.h"
+#include "cover.h"
+#include "zstd_internal.h" /* includes zstd.h */
+#ifndef ZDICT_STATIC_LINKING_ONLY
+#define ZDICT_STATIC_LINKING_ONLY
+#endif
+#include "zdict.h"
+
+/*-*************************************
+* Constants
+***************************************/
+#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
+#define DEFAULT_SPLITPOINT 1.0
+
+/*-*************************************
+* Console display
+***************************************/
+static int g_displayLevel = 2;
+#define DISPLAY(...) \
+ { \
+ fprintf(stderr, __VA_ARGS__); \
+ fflush(stderr); \
+ }
+#define LOCALDISPLAYLEVEL(displayLevel, l, ...) \
+ if (displayLevel >= l) { \
+ DISPLAY(__VA_ARGS__); \
+ } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */
+#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)
+
+#define LOCALDISPLAYUPDATE(displayLevel, l, ...) \
+ if (displayLevel >= l) { \
+ if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) { \
+ g_time = clock(); \
+ DISPLAY(__VA_ARGS__); \
+ } \
+ }
+#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)
+static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;
+static clock_t g_time = 0;
+
+/*-*************************************
+* Hash table
+***************************************
+* A small specialized hash map for storing activeDmers.
+* The map does not resize, so if it becomes full it will loop forever.
+* Thus, the map must be large enough to store every value.
+* The map implements linear probing and keeps its load less than 0.5.
+*/
+
+#define MAP_EMPTY_VALUE ((U32)-1)
+typedef struct COVER_map_pair_t_s {
+ U32 key;
+ U32 value;
+} COVER_map_pair_t;
+
+typedef struct COVER_map_s {
+ COVER_map_pair_t *data;
+ U32 sizeLog;
+ U32 size;
+ U32 sizeMask;
+} COVER_map_t;
+
+/**
+ * Clear the map.
+ */
+static void COVER_map_clear(COVER_map_t *map) {
+ memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t));
+}
+
+/**
+ * Initializes a map of the given size.
+ * Returns 1 on success and 0 on failure.
+ * The map must be destroyed with COVER_map_destroy().
+ * The map is only guaranteed to be large enough to hold size elements.
+ */
+static int COVER_map_init(COVER_map_t *map, U32 size) {
+ map->sizeLog = ZSTD_highbit32(size) + 2;
+ map->size = (U32)1 << map->sizeLog;
+ map->sizeMask = map->size - 1;
+ map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t));
+ if (!map->data) {
+ map->sizeLog = 0;
+ map->size = 0;
+ return 0;
+ }
+ COVER_map_clear(map);
+ return 1;
+}
+
+/**
+ * Internal hash function
+ */
+static const U32 prime4bytes = 2654435761U;
+static U32 COVER_map_hash(COVER_map_t *map, U32 key) {
+ return (key * prime4bytes) >> (32 - map->sizeLog);
+}
+
+/**
+ * Helper function that returns the index that a key should be placed into.
+ */
+static U32 COVER_map_index(COVER_map_t *map, U32 key) {
+ const U32 hash = COVER_map_hash(map, key);
+ U32 i;
+ for (i = hash;; i = (i + 1) & map->sizeMask) {
+ COVER_map_pair_t *pos = &map->data[i];
+ if (pos->value == MAP_EMPTY_VALUE) {
+ return i;
+ }
+ if (pos->key == key) {
+ return i;
+ }
+ }
+}
+
+/**
+ * Returns the pointer to the value for key.
+ * If key is not in the map, it is inserted and the value is set to 0.
+ * The map must not be full.
+ */
+static U32 *COVER_map_at(COVER_map_t *map, U32 key) {
+ COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)];
+ if (pos->value == MAP_EMPTY_VALUE) {
+ pos->key = key;
+ pos->value = 0;
+ }
+ return &pos->value;
+}
+
+/**
+ * Deletes key from the map if present.
+ */
+static void COVER_map_remove(COVER_map_t *map, U32 key) {
+ U32 i = COVER_map_index(map, key);
+ COVER_map_pair_t *del = &map->data[i];
+ U32 shift = 1;
+ if (del->value == MAP_EMPTY_VALUE) {
+ return;
+ }
+ for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) {
+ COVER_map_pair_t *const pos = &map->data[i];
+ /* If the position is empty we are done */
+ if (pos->value == MAP_EMPTY_VALUE) {
+ del->value = MAP_EMPTY_VALUE;
+ return;
+ }
+ /* If pos can be moved to del do so */
+ if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) {
+ del->key = pos->key;
+ del->value = pos->value;
+ del = pos;
+ shift = 1;
+ } else {
+ ++shift;
+ }
+ }
+}
+
+/**
+ * Destroys a map that is inited with COVER_map_init().
+ */
+static void COVER_map_destroy(COVER_map_t *map) {
+ if (map->data) {
+ free(map->data);
+ }
+ map->data = NULL;
+ map->size = 0;
+}
+
+/*-*************************************
+* Context
+***************************************/
+
+typedef struct {
+ const BYTE *samples;
+ size_t *offsets;
+ const size_t *samplesSizes;
+ size_t nbSamples;
+ size_t nbTrainSamples;
+ size_t nbTestSamples;
+ U32 *suffix;
+ size_t suffixSize;
+ U32 *freqs;
+ U32 *dmerAt;
+ unsigned d;
+} COVER_ctx_t;
+
+/* We need a global context for qsort... */
+static COVER_ctx_t *g_ctx = NULL;
+
+/*-*************************************
+* Helper functions
+***************************************/
+
+/**
+ * Returns the sum of the sample sizes.
+ */
+size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) {
+ size_t sum = 0;
+ unsigned i;
+ for (i = 0; i < nbSamples; ++i) {
+ sum += samplesSizes[i];
+ }
+ return sum;
+}
+
+/**
+ * Returns -1 if the dmer at lp is less than the dmer at rp.
+ * Return 0 if the dmers at lp and rp are equal.
+ * Returns 1 if the dmer at lp is greater than the dmer at rp.
+ */
+static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) {
+ U32 const lhs = *(U32 const *)lp;
+ U32 const rhs = *(U32 const *)rp;
+ return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d);
+}
+/**
+ * Faster version for d <= 8.
+ */
+static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) {
+ U64 const mask = (ctx->d == 8) ? (U64)-1 : (((U64)1 << (8 * ctx->d)) - 1);
+ U64 const lhs = MEM_readLE64(ctx->samples + *(U32 const *)lp) & mask;
+ U64 const rhs = MEM_readLE64(ctx->samples + *(U32 const *)rp) & mask;
+ if (lhs < rhs) {
+ return -1;
+ }
+ return (lhs > rhs);
+}
+
+/**
+ * Same as COVER_cmp() except ties are broken by pointer value
+ * NOTE: g_ctx must be set to call this function. A global is required because
+ * qsort doesn't take an opaque pointer.
+ */
+static int COVER_strict_cmp(const void *lp, const void *rp) {
+ int result = COVER_cmp(g_ctx, lp, rp);
+ if (result == 0) {
+ result = lp < rp ? -1 : 1;
+ }
+ return result;
+}
+/**
+ * Faster version for d <= 8.
+ */
+static int COVER_strict_cmp8(const void *lp, const void *rp) {
+ int result = COVER_cmp8(g_ctx, lp, rp);
+ if (result == 0) {
+ result = lp < rp ? -1 : 1;
+ }
+ return result;
+}
+
+/**
+ * Returns the first pointer in [first, last) whose element does not compare
+ * less than value. If no such element exists it returns last.
+ */
+static const size_t *COVER_lower_bound(const size_t *first, const size_t *last,
+ size_t value) {
+ size_t count = last - first;
+ while (count != 0) {
+ size_t step = count / 2;
+ const size_t *ptr = first;
+ ptr += step;
+ if (*ptr < value) {
+ first = ++ptr;
+ count -= step + 1;
+ } else {
+ count = step;
+ }
+ }
+ return first;
+}
+
+/**
+ * Generic groupBy function.
+ * Groups an array sorted by cmp into groups with equivalent values.
+ * Calls grp for each group.
+ */
+static void
+COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx,
+ int (*cmp)(COVER_ctx_t *, const void *, const void *),
+ void (*grp)(COVER_ctx_t *, const void *, const void *)) {
+ const BYTE *ptr = (const BYTE *)data;
+ size_t num = 0;
+ while (num < count) {
+ const BYTE *grpEnd = ptr + size;
+ ++num;
+ while (num < count && cmp(ctx, ptr, grpEnd) == 0) {
+ grpEnd += size;
+ ++num;
+ }
+ grp(ctx, ptr, grpEnd);
+ ptr = grpEnd;
+ }
+}
+
+/*-*************************************
+* Cover functions
+***************************************/
+
+/**
+ * Called on each group of positions with the same dmer.
+ * Counts the frequency of each dmer and saves it in the suffix array.
+ * Fills `ctx->dmerAt`.
+ */
+static void COVER_group(COVER_ctx_t *ctx, const void *group,
+ const void *groupEnd) {
+ /* The group consists of all the positions with the same first d bytes. */
+ const U32 *grpPtr = (const U32 *)group;
+ const U32 *grpEnd = (const U32 *)groupEnd;
+ /* The dmerId is how we will reference this dmer.
+ * This allows us to map the whole dmer space to a much smaller space, the
+ * size of the suffix array.
+ */
+ const U32 dmerId = (U32)(grpPtr - ctx->suffix);
+ /* Count the number of samples this dmer shows up in */
+ U32 freq = 0;
+ /* Details */
+ const size_t *curOffsetPtr = ctx->offsets;
+ const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples;
+ /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a
+ * different sample than the last.
+ */
+ size_t curSampleEnd = ctx->offsets[0];
+ for (; grpPtr != grpEnd; ++grpPtr) {
+ /* Save the dmerId for this position so we can get back to it. */
+ ctx->dmerAt[*grpPtr] = dmerId;
+ /* Dictionaries only help for the first reference to the dmer.
+ * After that zstd can reference the match from the previous reference.
+ * So only count each dmer once for each sample it is in.
+ */
+ if (*grpPtr < curSampleEnd) {
+ continue;
+ }
+ freq += 1;
+ /* Binary search to find the end of the sample *grpPtr is in.
+ * In the common case that grpPtr + 1 == grpEnd we can skip the binary
+ * search because the loop is over.
+ */
+ if (grpPtr + 1 != grpEnd) {
+ const size_t *sampleEndPtr =
+ COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr);
+ curSampleEnd = *sampleEndPtr;
+ curOffsetPtr = sampleEndPtr + 1;
+ }
+ }
+ /* At this point we are never going to look at this segment of the suffix
+ * array again. We take advantage of this fact to save memory.
+ * We store the frequency of the dmer in the first position of the group,
+ * which is dmerId.
+ */
+ ctx->suffix[dmerId] = freq;
+}
+
+
+/**
+ * Selects the best segment in an epoch.
+ * Segments of are scored according to the function:
+ *
+ * Let F(d) be the frequency of dmer d.
+ * Let S_i be the dmer at position i of segment S which has length k.
+ *
+ * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
+ *
+ * Once the dmer d is in the dictionary we set F(d) = 0.
+ */
+static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs,
+ COVER_map_t *activeDmers, U32 begin,
+ U32 end,
+ ZDICT_cover_params_t parameters) {
+ /* Constants */
+ const U32 k = parameters.k;
+ const U32 d = parameters.d;
+ const U32 dmersInK = k - d + 1;
+ /* Try each segment (activeSegment) and save the best (bestSegment) */
+ COVER_segment_t bestSegment = {0, 0, 0};
+ COVER_segment_t activeSegment;
+ /* Reset the activeDmers in the segment */
+ COVER_map_clear(activeDmers);
+ /* The activeSegment starts at the beginning of the epoch. */
+ activeSegment.begin = begin;
+ activeSegment.end = begin;
+ activeSegment.score = 0;
+ /* Slide the activeSegment through the whole epoch.
+ * Save the best segment in bestSegment.
+ */
+ while (activeSegment.end < end) {
+ /* The dmerId for the dmer at the next position */
+ U32 newDmer = ctx->dmerAt[activeSegment.end];
+ /* The entry in activeDmers for this dmerId */
+ U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer);
+ /* If the dmer isn't already present in the segment add its score. */
+ if (*newDmerOcc == 0) {
+ /* The paper suggest using the L-0.5 norm, but experiments show that it
+ * doesn't help.
+ */
+ activeSegment.score += freqs[newDmer];
+ }
+ /* Add the dmer to the segment */
+ activeSegment.end += 1;
+ *newDmerOcc += 1;
+
+ /* If the window is now too large, drop the first position */
+ if (activeSegment.end - activeSegment.begin == dmersInK + 1) {
+ U32 delDmer = ctx->dmerAt[activeSegment.begin];
+ U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer);
+ activeSegment.begin += 1;
+ *delDmerOcc -= 1;
+ /* If this is the last occurrence of the dmer, subtract its score */
+ if (*delDmerOcc == 0) {
+ COVER_map_remove(activeDmers, delDmer);
+ activeSegment.score -= freqs[delDmer];
+ }
+ }
+
+ /* If this segment is the best so far save it */
+ if (activeSegment.score > bestSegment.score) {
+ bestSegment = activeSegment;
+ }
+ }
+ {
+ /* Trim off the zero frequency head and tail from the segment. */
+ U32 newBegin = bestSegment.end;
+ U32 newEnd = bestSegment.begin;
+ U32 pos;
+ for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
+ U32 freq = freqs[ctx->dmerAt[pos]];
+ if (freq != 0) {
+ newBegin = MIN(newBegin, pos);
+ newEnd = pos + 1;
+ }
+ }
+ bestSegment.begin = newBegin;
+ bestSegment.end = newEnd;
+ }
+ {
+ /* Zero out the frequency of each dmer covered by the chosen segment. */
+ U32 pos;
+ for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
+ freqs[ctx->dmerAt[pos]] = 0;
+ }
+ }
+ return bestSegment;
+}
+
+/**
+ * Check the validity of the parameters.
+ * Returns non-zero if the parameters are valid and 0 otherwise.
+ */
+static int COVER_checkParameters(ZDICT_cover_params_t parameters,
+ size_t maxDictSize) {
+ /* k and d are required parameters */
+ if (parameters.d == 0 || parameters.k == 0) {
+ return 0;
+ }
+ /* k <= maxDictSize */
+ if (parameters.k > maxDictSize) {
+ return 0;
+ }
+ /* d <= k */
+ if (parameters.d > parameters.k) {
+ return 0;
+ }
+ /* 0 < splitPoint <= 1 */
+ if (parameters.splitPoint <= 0 || parameters.splitPoint > 1){
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Clean up a context initialized with `COVER_ctx_init()`.
+ */
+static void COVER_ctx_destroy(COVER_ctx_t *ctx) {
+ if (!ctx) {
+ return;
+ }
+ if (ctx->suffix) {
+ free(ctx->suffix);
+ ctx->suffix = NULL;
+ }
+ if (ctx->freqs) {
+ free(ctx->freqs);
+ ctx->freqs = NULL;
+ }
+ if (ctx->dmerAt) {
+ free(ctx->dmerAt);
+ ctx->dmerAt = NULL;
+ }
+ if (ctx->offsets) {
+ free(ctx->offsets);
+ ctx->offsets = NULL;
+ }
+}
+
+/**
+ * Prepare a context for dictionary building.
+ * The context is only dependent on the parameter `d` and can used multiple
+ * times.
+ * Returns 1 on success or zero on error.
+ * The context must be destroyed with `COVER_ctx_destroy()`.
+ */
+static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
+ const size_t *samplesSizes, unsigned nbSamples,
+ unsigned d, double splitPoint) {
+ const BYTE *const samples = (const BYTE *)samplesBuffer;
+ const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
+ /* Split samples into testing and training sets */
+ const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples;
+ const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples;
+ const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;
+ const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize;
+ /* Checks */
+ if (totalSamplesSize < MAX(d, sizeof(U64)) ||
+ totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) {
+ DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
+ (unsigned)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20));
+ return 0;
+ }
+ /* Check if there are at least 5 training samples */
+ if (nbTrainSamples < 5) {
+ DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid.", nbTrainSamples);
+ return 0;
+ }
+ /* Check if there's testing sample */
+ if (nbTestSamples < 1) {
+ DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.", nbTestSamples);
+ return 0;
+ }
+ /* Zero the context */
+ memset(ctx, 0, sizeof(*ctx));
+ DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples,
+ (unsigned)trainingSamplesSize);
+ DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples,
+ (unsigned)testSamplesSize);
+ ctx->samples = samples;
+ ctx->samplesSizes = samplesSizes;
+ ctx->nbSamples = nbSamples;
+ ctx->nbTrainSamples = nbTrainSamples;
+ ctx->nbTestSamples = nbTestSamples;
+ /* Partial suffix array */
+ ctx->suffixSize = trainingSamplesSize - MAX(d, sizeof(U64)) + 1;
+ ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
+ /* Maps index to the dmerID */
+ ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
+ /* The offsets of each file */
+ ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t));
+ if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) {
+ DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n");
+ COVER_ctx_destroy(ctx);
+ return 0;
+ }
+ ctx->freqs = NULL;
+ ctx->d = d;
+
+ /* Fill offsets from the samplesSizes */
+ {
+ U32 i;
+ ctx->offsets[0] = 0;
+ for (i = 1; i <= nbSamples; ++i) {
+ ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
+ }
+ }
+ DISPLAYLEVEL(2, "Constructing partial suffix array\n");
+ {
+ /* suffix is a partial suffix array.
+ * It only sorts suffixes by their first parameters.d bytes.
+ * The sort is stable, so each dmer group is sorted by position in input.
+ */
+ U32 i;
+ for (i = 0; i < ctx->suffixSize; ++i) {
+ ctx->suffix[i] = i;
+ }
+ /* qsort doesn't take an opaque pointer, so pass as a global.
+ * On OpenBSD qsort() is not guaranteed to be stable, their mergesort() is.
+ */
+ g_ctx = ctx;
+#if defined(__OpenBSD__)
+ mergesort(ctx->suffix, ctx->suffixSize, sizeof(U32),
+ (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
+#else
+ qsort(ctx->suffix, ctx->suffixSize, sizeof(U32),
+ (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
+#endif
+ }
+ DISPLAYLEVEL(2, "Computing frequencies\n");
+ /* For each dmer group (group of positions with the same first d bytes):
+ * 1. For each position we set dmerAt[position] = dmerID. The dmerID is
+ * (groupBeginPtr - suffix). This allows us to go from position to
+ * dmerID so we can look up values in freq.
+ * 2. We calculate how many samples the dmer occurs in and save it in
+ * freqs[dmerId].
+ */
+ COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx,
+ (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group);
+ ctx->freqs = ctx->suffix;
+ ctx->suffix = NULL;
+ return 1;
+}
+
+void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel)
+{
+ const double ratio = (double)nbDmers / maxDictSize;
+ if (ratio >= 10) {
+ return;
+ }
+ LOCALDISPLAYLEVEL(displayLevel, 1,
+ "WARNING: The maximum dictionary size %u is too large "
+ "compared to the source size %u! "
+ "size(source)/size(dictionary) = %f, but it should be >= "
+ "10! This may lead to a subpar dictionary! We recommend "
+ "training on sources at least 10x, and up to 100x the "
+ "size of the dictionary!\n", (U32)maxDictSize,
+ (U32)nbDmers, ratio);
+}
+
+COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize,
+ U32 nbDmers, U32 k, U32 passes)
+{
+ const U32 minEpochSize = k * 10;
+ COVER_epoch_info_t epochs;
+ epochs.num = MAX(1, maxDictSize / k / passes);
+ epochs.size = nbDmers / epochs.num;
+ if (epochs.size >= minEpochSize) {
+ assert(epochs.size * epochs.num <= nbDmers);
+ return epochs;
+ }
+ epochs.size = MIN(minEpochSize, nbDmers);
+ epochs.num = nbDmers / epochs.size;
+ assert(epochs.size * epochs.num <= nbDmers);
+ return epochs;
+}
+
+/**
+ * Given the prepared context build the dictionary.
+ */
+static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs,
+ COVER_map_t *activeDmers, void *dictBuffer,
+ size_t dictBufferCapacity,
+ ZDICT_cover_params_t parameters) {
+ BYTE *const dict = (BYTE *)dictBuffer;
+ size_t tail = dictBufferCapacity;
+ /* Divide the data into epochs. We will select one segment from each epoch. */
+ const COVER_epoch_info_t epochs = COVER_computeEpochs(
+ (U32)dictBufferCapacity, (U32)ctx->suffixSize, parameters.k, 4);
+ const size_t maxZeroScoreRun = MAX(10, MIN(100, epochs.num >> 3));
+ size_t zeroScoreRun = 0;
+ size_t epoch;
+ DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n",
+ (U32)epochs.num, (U32)epochs.size);
+ /* Loop through the epochs until there are no more segments or the dictionary
+ * is full.
+ */
+ for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) {
+ const U32 epochBegin = (U32)(epoch * epochs.size);
+ const U32 epochEnd = epochBegin + epochs.size;
+ size_t segmentSize;
+ /* Select a segment */
+ COVER_segment_t segment = COVER_selectSegment(
+ ctx, freqs, activeDmers, epochBegin, epochEnd, parameters);
+ /* If the segment covers no dmers, then we are out of content.
+ * There may be new content in other epochs, for continue for some time.
+ */
+ if (segment.score == 0) {
+ if (++zeroScoreRun >= maxZeroScoreRun) {
+ break;
+ }
+ continue;
+ }
+ zeroScoreRun = 0;
+ /* Trim the segment if necessary and if it is too small then we are done */
+ segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
+ if (segmentSize < parameters.d) {
+ break;
+ }
+ /* We fill the dictionary from the back to allow the best segments to be
+ * referenced with the smallest offsets.
+ */
+ tail -= segmentSize;
+ memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
+ DISPLAYUPDATE(
+ 2, "\r%u%% ",
+ (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
+ }
+ DISPLAYLEVEL(2, "\r%79s\r", "");
+ return tail;
+}
+
+ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
+ void *dictBuffer, size_t dictBufferCapacity,
+ const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
+ ZDICT_cover_params_t parameters)
+{
+ BYTE* const dict = (BYTE*)dictBuffer;
+ COVER_ctx_t ctx;
+ COVER_map_t activeDmers;
+ parameters.splitPoint = 1.0;
+ /* Initialize global data */
+ g_displayLevel = parameters.zParams.notificationLevel;
+ /* Checks */
+ if (!COVER_checkParameters(parameters, dictBufferCapacity)) {
+ DISPLAYLEVEL(1, "Cover parameters incorrect\n");
+ return ERROR(GENERIC);
+ }
+ if (nbSamples == 0) {
+ DISPLAYLEVEL(1, "Cover must have at least one input file\n");
+ return ERROR(GENERIC);
+ }
+ if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
+ DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
+ ZDICT_DICTSIZE_MIN);
+ return ERROR(dstSize_tooSmall);
+ }
+ /* Initialize context and activeDmers */
+ if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
+ parameters.d, parameters.splitPoint)) {
+ return ERROR(GENERIC);
+ }
+ COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, g_displayLevel);
+ if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
+ DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
+ COVER_ctx_destroy(&ctx);
+ return ERROR(GENERIC);
+ }
+
+ DISPLAYLEVEL(2, "Building dictionary\n");
+ {
+ const size_t tail =
+ COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer,
+ dictBufferCapacity, parameters);
+ const size_t dictionarySize = ZDICT_finalizeDictionary(
+ dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
+ samplesBuffer, samplesSizes, nbSamples, parameters.zParams);
+ if (!ZSTD_isError(dictionarySize)) {
+ DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
+ (unsigned)dictionarySize);
+ }
+ COVER_ctx_destroy(&ctx);
+ COVER_map_destroy(&activeDmers);
+ return dictionarySize;
+ }
+}
+
+
+
+size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters,
+ const size_t *samplesSizes, const BYTE *samples,
+ size_t *offsets,
+ size_t nbTrainSamples, size_t nbSamples,
+ BYTE *const dict, size_t dictBufferCapacity) {
+ size_t totalCompressedSize = ERROR(GENERIC);
+ /* Pointers */
+ ZSTD_CCtx *cctx;
+ ZSTD_CDict *cdict;
+ void *dst;
+ /* Local variables */
+ size_t dstCapacity;
+ size_t i;
+ /* Allocate dst with enough space to compress the maximum sized sample */
+ {
+ size_t maxSampleSize = 0;
+ i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;
+ for (; i < nbSamples; ++i) {
+ maxSampleSize = MAX(samplesSizes[i], maxSampleSize);
+ }
+ dstCapacity = ZSTD_compressBound(maxSampleSize);
+ dst = malloc(dstCapacity);
+ }
+ /* Create the cctx and cdict */
+ cctx = ZSTD_createCCtx();
+ cdict = ZSTD_createCDict(dict, dictBufferCapacity,
+ parameters.zParams.compressionLevel);
+ if (!dst || !cctx || !cdict) {
+ goto _compressCleanup;
+ }
+ /* Compress each sample and sum their sizes (or error) */
+ totalCompressedSize = dictBufferCapacity;
+ i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;
+ for (; i < nbSamples; ++i) {
+ const size_t size = ZSTD_compress_usingCDict(
+ cctx, dst, dstCapacity, samples + offsets[i],
+ samplesSizes[i], cdict);
+ if (ZSTD_isError(size)) {
+ totalCompressedSize = ERROR(GENERIC);
+ goto _compressCleanup;
+ }
+ totalCompressedSize += size;
+ }
+_compressCleanup:
+ ZSTD_freeCCtx(cctx);
+ ZSTD_freeCDict(cdict);
+ if (dst) {
+ free(dst);
+ }
+ return totalCompressedSize;
+}
+
+
+/**
+ * Initialize the `COVER_best_t`.
+ */
+void COVER_best_init(COVER_best_t *best) {
+ if (best==NULL) return; /* compatible with init on NULL */
+ (void)ZSTD_pthread_mutex_init(&best->mutex, NULL);
+ (void)ZSTD_pthread_cond_init(&best->cond, NULL);
+ best->liveJobs = 0;
+ best->dict = NULL;
+ best->dictSize = 0;
+ best->compressedSize = (size_t)-1;
+ memset(&best->parameters, 0, sizeof(best->parameters));
+}
+
+/**
+ * Wait until liveJobs == 0.
+ */
+void COVER_best_wait(COVER_best_t *best) {
+ if (!best) {
+ return;
+ }
+ ZSTD_pthread_mutex_lock(&best->mutex);
+ while (best->liveJobs != 0) {
+ ZSTD_pthread_cond_wait(&best->cond, &best->mutex);
+ }
+ ZSTD_pthread_mutex_unlock(&best->mutex);
+}
+
+/**
+ * Call COVER_best_wait() and then destroy the COVER_best_t.
+ */
+void COVER_best_destroy(COVER_best_t *best) {
+ if (!best) {
+ return;
+ }
+ COVER_best_wait(best);
+ if (best->dict) {
+ free(best->dict);
+ }
+ ZSTD_pthread_mutex_destroy(&best->mutex);
+ ZSTD_pthread_cond_destroy(&best->cond);
+}
+
+/**
+ * Called when a thread is about to be launched.
+ * Increments liveJobs.
+ */
+void COVER_best_start(COVER_best_t *best) {
+ if (!best) {
+ return;
+ }
+ ZSTD_pthread_mutex_lock(&best->mutex);
+ ++best->liveJobs;
+ ZSTD_pthread_mutex_unlock(&best->mutex);
+}
+
+/**
+ * Called when a thread finishes executing, both on error or success.
+ * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
+ * If this dictionary is the best so far save it and its parameters.
+ */
+void COVER_best_finish(COVER_best_t *best, size_t compressedSize,
+ ZDICT_cover_params_t parameters, void *dict,
+ size_t dictSize) {
+ if (!best) {
+ return;
+ }
+ {
+ size_t liveJobs;
+ ZSTD_pthread_mutex_lock(&best->mutex);
+ --best->liveJobs;
+ liveJobs = best->liveJobs;
+ /* If the new dictionary is better */
+ if (compressedSize < best->compressedSize) {
+ /* Allocate space if necessary */
+ if (!best->dict || best->dictSize < dictSize) {
+ if (best->dict) {
+ free(best->dict);
+ }
+ best->dict = malloc(dictSize);
+ if (!best->dict) {
+ best->compressedSize = ERROR(GENERIC);
+ best->dictSize = 0;
+ ZSTD_pthread_cond_signal(&best->cond);
+ ZSTD_pthread_mutex_unlock(&best->mutex);
+ return;
+ }
+ }
+ /* Save the dictionary, parameters, and size */
+ memcpy(best->dict, dict, dictSize);
+ best->dictSize = dictSize;
+ best->parameters = parameters;
+ best->compressedSize = compressedSize;
+ }
+ if (liveJobs == 0) {
+ ZSTD_pthread_cond_broadcast(&best->cond);
+ }
+ ZSTD_pthread_mutex_unlock(&best->mutex);
+ }
+}
+
+/**
+ * Parameters for COVER_tryParameters().
+ */
+typedef struct COVER_tryParameters_data_s {
+ const COVER_ctx_t *ctx;
+ COVER_best_t *best;
+ size_t dictBufferCapacity;
+ ZDICT_cover_params_t parameters;
+} COVER_tryParameters_data_t;
+
+/**
+ * Tries a set of parameters and updates the COVER_best_t with the results.
+ * This function is thread safe if zstd is compiled with multithreaded support.
+ * It takes its parameters as an *OWNING* opaque pointer to support threading.
+ */
+static void COVER_tryParameters(void *opaque) {
+ /* Save parameters as local variables */
+ COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t *)opaque;
+ const COVER_ctx_t *const ctx = data->ctx;
+ const ZDICT_cover_params_t parameters = data->parameters;
+ size_t dictBufferCapacity = data->dictBufferCapacity;
+ size_t totalCompressedSize = ERROR(GENERIC);
+ /* Allocate space for hash table, dict, and freqs */
+ COVER_map_t activeDmers;
+ BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity);
+ U32 *freqs = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
+ if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
+ DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
+ goto _cleanup;
+ }
+ if (!dict || !freqs) {
+ DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n");
+ goto _cleanup;
+ }
+ /* Copy the frequencies because we need to modify them */
+ memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32));
+ /* Build the dictionary */
+ {
+ const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict,
+ dictBufferCapacity, parameters);
+ dictBufferCapacity = ZDICT_finalizeDictionary(
+ dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
+ ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples,
+ parameters.zParams);
+ if (ZDICT_isError(dictBufferCapacity)) {
+ DISPLAYLEVEL(1, "Failed to finalize dictionary\n");
+ goto _cleanup;
+ }
+ }
+ /* Check total compressed size */
+ totalCompressedSize = COVER_checkTotalCompressedSize(parameters, ctx->samplesSizes,
+ ctx->samples, ctx->offsets,
+ ctx->nbTrainSamples, ctx->nbSamples,
+ dict, dictBufferCapacity);
+
+_cleanup:
+ COVER_best_finish(data->best, totalCompressedSize, parameters, dict,
+ dictBufferCapacity);
+ free(data);
+ COVER_map_destroy(&activeDmers);
+ if (dict) {
+ free(dict);
+ }
+ if (freqs) {
+ free(freqs);
+ }
+}
+
+ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
+ void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer,
+ const size_t *samplesSizes, unsigned nbSamples,
+ ZDICT_cover_params_t *parameters) {
+ /* constants */
+ const unsigned nbThreads = parameters->nbThreads;
+ const double splitPoint =
+ parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint;
+ const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
+ const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
+ const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
+ const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k;
+ const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps;
+ const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
+ const unsigned kIterations =
+ (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
+ /* Local variables */
+ const int displayLevel = parameters->zParams.notificationLevel;
+ unsigned iteration = 1;
+ unsigned d;
+ unsigned k;
+ COVER_best_t best;
+ POOL_ctx *pool = NULL;
+ int warned = 0;
+
+ /* Checks */
+ if (splitPoint <= 0 || splitPoint > 1) {
+ LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
+ return ERROR(GENERIC);
+ }
+ if (kMinK < kMaxD || kMaxK < kMinK) {
+ LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
+ return ERROR(GENERIC);
+ }
+ if (nbSamples == 0) {
+ DISPLAYLEVEL(1, "Cover must have at least one input file\n");
+ return ERROR(GENERIC);
+ }
+ if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
+ DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
+ ZDICT_DICTSIZE_MIN);
+ return ERROR(dstSize_tooSmall);
+ }
+ if (nbThreads > 1) {
+ pool = POOL_create(nbThreads, 1);
+ if (!pool) {
+ return ERROR(memory_allocation);
+ }
+ }
+ /* Initialization */
+ COVER_best_init(&best);
+ /* Turn down global display level to clean up display at level 2 and below */
+ g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;
+ /* Loop through d first because each new value needs a new context */
+ LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n",
+ kIterations);
+ for (d = kMinD; d <= kMaxD; d += 2) {
+ /* Initialize the context for this value of d */
+ COVER_ctx_t ctx;
+ LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
+ if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint)) {
+ LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
+ COVER_best_destroy(&best);
+ POOL_free(pool);
+ return ERROR(GENERIC);
+ }
+ if (!warned) {
+ COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, displayLevel);
+ warned = 1;
+ }
+ /* Loop through k reusing the same context */
+ for (k = kMinK; k <= kMaxK; k += kStepSize) {
+ /* Prepare the arguments */
+ COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc(
+ sizeof(COVER_tryParameters_data_t));
+ LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k);
+ if (!data) {
+ LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n");
+ COVER_best_destroy(&best);
+ COVER_ctx_destroy(&ctx);
+ POOL_free(pool);
+ return ERROR(GENERIC);
+ }
+ data->ctx = &ctx;
+ data->best = &best;
+ data->dictBufferCapacity = dictBufferCapacity;
+ data->parameters = *parameters;
+ data->parameters.k = k;
+ data->parameters.d = d;
+ data->parameters.splitPoint = splitPoint;
+ data->parameters.steps = kSteps;
+ data->parameters.zParams.notificationLevel = g_displayLevel;
+ /* Check the parameters */
+ if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) {
+ DISPLAYLEVEL(1, "Cover parameters incorrect\n");
+ free(data);
+ continue;
+ }
+ /* Call the function and pass ownership of data to it */
+ COVER_best_start(&best);
+ if (pool) {
+ POOL_add(pool, &COVER_tryParameters, data);
+ } else {
+ COVER_tryParameters(data);
+ }
+ /* Print status */
+ LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ",
+ (unsigned)((iteration * 100) / kIterations));
+ ++iteration;
+ }
+ COVER_best_wait(&best);
+ COVER_ctx_destroy(&ctx);
+ }
+ LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", "");
+ /* Fill the output buffer and parameters with output of the best parameters */
+ {
+ const size_t dictSize = best.dictSize;
+ if (ZSTD_isError(best.compressedSize)) {
+ const size_t compressedSize = best.compressedSize;
+ COVER_best_destroy(&best);
+ POOL_free(pool);
+ return compressedSize;
+ }
+ *parameters = best.parameters;
+ memcpy(dictBuffer, best.dict, dictSize);
+ COVER_best_destroy(&best);
+ POOL_free(pool);
+ return dictSize;
+ }
+}
diff --git a/vendor/github.com/DataDog/zstd/cover.h b/vendor/github.com/DataDog/zstd/cover.h
new file mode 100644
index 000000000..efb46807c
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/cover.h
@@ -0,0 +1,112 @@
+#include <stdio.h> /* fprintf */
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memset */
+#include <time.h> /* clock */
+#include "mem.h" /* read */
+#include "pool.h"
+#include "threading.h"
+#include "zstd_internal.h" /* includes zstd.h */
+#ifndef ZDICT_STATIC_LINKING_ONLY
+#define ZDICT_STATIC_LINKING_ONLY
+#endif
+#include "zdict.h"
+
+/**
+ * COVER_best_t is used for two purposes:
+ * 1. Synchronizing threads.
+ * 2. Saving the best parameters and dictionary.
+ *
+ * All of the methods except COVER_best_init() are thread safe if zstd is
+ * compiled with multithreaded support.
+ */
+typedef struct COVER_best_s {
+ ZSTD_pthread_mutex_t mutex;
+ ZSTD_pthread_cond_t cond;
+ size_t liveJobs;
+ void *dict;
+ size_t dictSize;
+ ZDICT_cover_params_t parameters;
+ size_t compressedSize;
+} COVER_best_t;
+
+/**
+ * A segment is a range in the source as well as the score of the segment.
+ */
+typedef struct {
+ U32 begin;
+ U32 end;
+ U32 score;
+} COVER_segment_t;
+
+/**
+ *Number of epochs and size of each epoch.
+ */
+typedef struct {
+ U32 num;
+ U32 size;
+} COVER_epoch_info_t;
+
+/**
+ * Computes the number of epochs and the size of each epoch.
+ * We will make sure that each epoch gets at least 10 * k bytes.
+ *
+ * The COVER algorithms divide the data up into epochs of equal size and
+ * select one segment from each epoch.
+ *
+ * @param maxDictSize The maximum allowed dictionary size.
+ * @param nbDmers The number of dmers we are training on.
+ * @param k The parameter k (segment size).
+ * @param passes The target number of passes over the dmer corpus.
+ * More passes means a better dictionary.
+ */
+COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize, U32 nbDmers,
+ U32 k, U32 passes);
+
+/**
+ * Warns the user when their corpus is too small.
+ */
+void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel);
+
+/**
+ * Checks total compressed size of a dictionary
+ */
+size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters,
+ const size_t *samplesSizes, const BYTE *samples,
+ size_t *offsets,
+ size_t nbTrainSamples, size_t nbSamples,
+ BYTE *const dict, size_t dictBufferCapacity);
+
+/**
+ * Returns the sum of the sample sizes.
+ */
+size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) ;
+
+/**
+ * Initialize the `COVER_best_t`.
+ */
+void COVER_best_init(COVER_best_t *best);
+
+/**
+ * Wait until liveJobs == 0.
+ */
+void COVER_best_wait(COVER_best_t *best);
+
+/**
+ * Call COVER_best_wait() and then destroy the COVER_best_t.
+ */
+void COVER_best_destroy(COVER_best_t *best);
+
+/**
+ * Called when a thread is about to be launched.
+ * Increments liveJobs.
+ */
+void COVER_best_start(COVER_best_t *best);
+
+/**
+ * Called when a thread finishes executing, both on error or success.
+ * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
+ * If this dictionary is the best so far save it and its parameters.
+ */
+void COVER_best_finish(COVER_best_t *best, size_t compressedSize,
+ ZDICT_cover_params_t parameters, void *dict,
+ size_t dictSize);
diff --git a/vendor/github.com/DataDog/zstd/cpu.h b/vendor/github.com/DataDog/zstd/cpu.h
new file mode 100644
index 000000000..5f0923fc9
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/cpu.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2018-present, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMMON_CPU_H
+#define ZSTD_COMMON_CPU_H
+
+/**
+ * Implementation taken from folly/CpuId.h
+ * https://github.com/facebook/folly/blob/master/folly/CpuId.h
+ */
+
+#include <string.h>
+
+#include "mem.h"
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif
+
+typedef struct {
+ U32 f1c;
+ U32 f1d;
+ U32 f7b;
+ U32 f7c;
+} ZSTD_cpuid_t;
+
+MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) {
+ U32 f1c = 0;
+ U32 f1d = 0;
+ U32 f7b = 0;
+ U32 f7c = 0;
+#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))
+ int reg[4];
+ __cpuid((int*)reg, 0);
+ {
+ int const n = reg[0];
+ if (n >= 1) {
+ __cpuid((int*)reg, 1);
+ f1c = (U32)reg[2];
+ f1d = (U32)reg[3];
+ }
+ if (n >= 7) {
+ __cpuidex((int*)reg, 7, 0);
+ f7b = (U32)reg[1];
+ f7c = (U32)reg[2];
+ }
+ }
+#elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__)
+ /* The following block like the normal cpuid branch below, but gcc
+ * reserves ebx for use of its pic register so we must specially
+ * handle the save and restore to avoid clobbering the register
+ */
+ U32 n;
+ __asm__(
+ "pushl %%ebx\n\t"
+ "cpuid\n\t"
+ "popl %%ebx\n\t"
+ : "=a"(n)
+ : "a"(0)
+ : "ecx", "edx");
+ if (n >= 1) {
+ U32 f1a;
+ __asm__(
+ "pushl %%ebx\n\t"
+ "cpuid\n\t"
+ "popl %%ebx\n\t"
+ : "=a"(f1a), "=c"(f1c), "=d"(f1d)
+ : "a"(1));
+ }
+ if (n >= 7) {
+ __asm__(
+ "pushl %%ebx\n\t"
+ "cpuid\n\t"
+ "movl %%ebx, %%eax\n\t"
+ "popl %%ebx"
+ : "=a"(f7b), "=c"(f7c)
+ : "a"(7), "c"(0)
+ : "edx");
+ }
+#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__)
+ U32 n;
+ __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx");
+ if (n >= 1) {
+ U32 f1a;
+ __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx");
+ }
+ if (n >= 7) {
+ U32 f7a;
+ __asm__("cpuid"
+ : "=a"(f7a), "=b"(f7b), "=c"(f7c)
+ : "a"(7), "c"(0)
+ : "edx");
+ }
+#endif
+ {
+ ZSTD_cpuid_t cpuid;
+ cpuid.f1c = f1c;
+ cpuid.f1d = f1d;
+ cpuid.f7b = f7b;
+ cpuid.f7c = f7c;
+ return cpuid;
+ }
+}
+
+#define X(name, r, bit) \
+ MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) { \
+ return ((cpuid.r) & (1U << bit)) != 0; \
+ }
+
+/* cpuid(1): Processor Info and Feature Bits. */
+#define C(name, bit) X(name, f1c, bit)
+ C(sse3, 0)
+ C(pclmuldq, 1)
+ C(dtes64, 2)
+ C(monitor, 3)
+ C(dscpl, 4)
+ C(vmx, 5)
+ C(smx, 6)
+ C(eist, 7)
+ C(tm2, 8)
+ C(ssse3, 9)
+ C(cnxtid, 10)
+ C(fma, 12)
+ C(cx16, 13)
+ C(xtpr, 14)
+ C(pdcm, 15)
+ C(pcid, 17)
+ C(dca, 18)
+ C(sse41, 19)
+ C(sse42, 20)
+ C(x2apic, 21)
+ C(movbe, 22)
+ C(popcnt, 23)
+ C(tscdeadline, 24)
+ C(aes, 25)
+ C(xsave, 26)
+ C(osxsave, 27)
+ C(avx, 28)
+ C(f16c, 29)
+ C(rdrand, 30)
+#undef C
+#define D(name, bit) X(name, f1d, bit)
+ D(fpu, 0)
+ D(vme, 1)
+ D(de, 2)
+ D(pse, 3)
+ D(tsc, 4)
+ D(msr, 5)
+ D(pae, 6)
+ D(mce, 7)
+ D(cx8, 8)
+ D(apic, 9)
+ D(sep, 11)
+ D(mtrr, 12)
+ D(pge, 13)
+ D(mca, 14)
+ D(cmov, 15)
+ D(pat, 16)
+ D(pse36, 17)
+ D(psn, 18)
+ D(clfsh, 19)
+ D(ds, 21)
+ D(acpi, 22)
+ D(mmx, 23)
+ D(fxsr, 24)
+ D(sse, 25)
+ D(sse2, 26)
+ D(ss, 27)
+ D(htt, 28)
+ D(tm, 29)
+ D(pbe, 31)
+#undef D
+
+/* cpuid(7): Extended Features. */
+#define B(name, bit) X(name, f7b, bit)
+ B(bmi1, 3)
+ B(hle, 4)
+ B(avx2, 5)
+ B(smep, 7)
+ B(bmi2, 8)
+ B(erms, 9)
+ B(invpcid, 10)
+ B(rtm, 11)
+ B(mpx, 14)
+ B(avx512f, 16)
+ B(avx512dq, 17)
+ B(rdseed, 18)
+ B(adx, 19)
+ B(smap, 20)
+ B(avx512ifma, 21)
+ B(pcommit, 22)
+ B(clflushopt, 23)
+ B(clwb, 24)
+ B(avx512pf, 26)
+ B(avx512er, 27)
+ B(avx512cd, 28)
+ B(sha, 29)
+ B(avx512bw, 30)
+ B(avx512vl, 31)
+#undef B
+#define C(name, bit) X(name, f7c, bit)
+ C(prefetchwt1, 0)
+ C(avx512vbmi, 1)
+#undef C
+
+#undef X
+
+#endif /* ZSTD_COMMON_CPU_H */
diff --git a/vendor/github.com/DataDog/zstd/debug.c b/vendor/github.com/DataDog/zstd/debug.c
new file mode 100644
index 000000000..3ebdd1cb1
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/debug.c
@@ -0,0 +1,44 @@
+/* ******************************************************************
+ debug
+ Part of FSE library
+ Copyright (C) 2013-present, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+
+
+/*
+ * This module only hosts one global variable
+ * which can be used to dynamically influence the verbosity of traces,
+ * such as DEBUGLOG and RAWLOG
+ */
+
+#include "debug.h"
+
+int g_debuglevel = DEBUGLEVEL;
diff --git a/vendor/github.com/DataDog/zstd/debug.h b/vendor/github.com/DataDog/zstd/debug.h
new file mode 100644
index 000000000..b4fc89d49
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/debug.h
@@ -0,0 +1,134 @@
+/* ******************************************************************
+ debug
+ Part of FSE library
+ Copyright (C) 2013-present, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+
+
+/*
+ * The purpose of this header is to enable debug functions.
+ * They regroup assert(), DEBUGLOG() and RAWLOG() for run-time,
+ * and DEBUG_STATIC_ASSERT() for compile-time.
+ *
+ * By default, DEBUGLEVEL==0, which means run-time debug is disabled.
+ *
+ * Level 1 enables assert() only.
+ * Starting level 2, traces can be generated and pushed to stderr.
+ * The higher the level, the more verbose the traces.
+ *
+ * It's possible to dynamically adjust level using variable g_debug_level,
+ * which is only declared if DEBUGLEVEL>=2,
+ * and is a global variable, not multi-thread protected (use with care)
+ */
+
+#ifndef DEBUG_H_12987983217
+#define DEBUG_H_12987983217
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* static assert is triggered at compile time, leaving no runtime artefact.
+ * static assert only works with compile-time constants.
+ * Also, this variant can only be used inside a function. */
+#define DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1])
+
+
+/* DEBUGLEVEL is expected to be defined externally,
+ * typically through compiler command line.
+ * Value must be a number. */
+#ifndef DEBUGLEVEL
+# define DEBUGLEVEL 0
+#endif
+
+
+/* DEBUGFILE can be defined externally,
+ * typically through compiler command line.
+ * note : currently useless.
+ * Value must be stderr or stdout */
+#ifndef DEBUGFILE
+# define DEBUGFILE stderr
+#endif
+
+
+/* recommended values for DEBUGLEVEL :
+ * 0 : release mode, no debug, all run-time checks disabled
+ * 1 : enables assert() only, no display
+ * 2 : reserved, for currently active debug path
+ * 3 : events once per object lifetime (CCtx, CDict, etc.)
+ * 4 : events once per frame
+ * 5 : events once per block
+ * 6 : events once per sequence (verbose)
+ * 7+: events at every position (*very* verbose)
+ *
+ * It's generally inconvenient to output traces > 5.
+ * In which case, it's possible to selectively trigger high verbosity levels
+ * by modifying g_debug_level.
+ */
+
+#if (DEBUGLEVEL>=1)
+# include <assert.h>
+#else
+# ifndef assert /* assert may be already defined, due to prior #include <assert.h> */
+# define assert(condition) ((void)0) /* disable assert (default) */
+# endif
+#endif
+
+#if (DEBUGLEVEL>=2)
+# include <stdio.h>
+extern int g_debuglevel; /* the variable is only declared,
+ it actually lives in debug.c,
+ and is shared by the whole process.
+ It's not thread-safe.
+ It's useful when enabling very verbose levels
+ on selective conditions (such as position in src) */
+
+# define RAWLOG(l, ...) { \
+ if (l<=g_debuglevel) { \
+ fprintf(stderr, __VA_ARGS__); \
+ } }
+# define DEBUGLOG(l, ...) { \
+ if (l<=g_debuglevel) { \
+ fprintf(stderr, __FILE__ ": " __VA_ARGS__); \
+ fprintf(stderr, " \n"); \
+ } }
+#else
+# define RAWLOG(l, ...) {} /* disabled */
+# define DEBUGLOG(l, ...) {} /* disabled */
+#endif
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* DEBUG_H_12987983217 */
diff --git a/vendor/github.com/DataDog/zstd/divsufsort.c b/vendor/github.com/DataDog/zstd/divsufsort.c
new file mode 100644
index 000000000..ead922044
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/divsufsort.c
@@ -0,0 +1,1913 @@
+/*
+ * divsufsort.c for libdivsufsort-lite
+ * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*- Compiler specifics -*/
+#ifdef __clang__
+#pragma clang diagnostic ignored "-Wshorten-64-to-32"
+#endif
+
+#if defined(_MSC_VER)
+# pragma warning(disable : 4244)
+# pragma warning(disable : 4127) /* C4127 : Condition expression is constant */
+#endif
+
+
+/*- Dependencies -*/
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "divsufsort.h"
+
+/*- Constants -*/
+#if defined(INLINE)
+# undef INLINE
+#endif
+#if !defined(INLINE)
+# define INLINE __inline
+#endif
+#if defined(ALPHABET_SIZE) && (ALPHABET_SIZE < 1)
+# undef ALPHABET_SIZE
+#endif
+#if !defined(ALPHABET_SIZE)
+# define ALPHABET_SIZE (256)
+#endif
+#define BUCKET_A_SIZE (ALPHABET_SIZE)
+#define BUCKET_B_SIZE (ALPHABET_SIZE * ALPHABET_SIZE)
+#if defined(SS_INSERTIONSORT_THRESHOLD)
+# if SS_INSERTIONSORT_THRESHOLD < 1
+# undef SS_INSERTIONSORT_THRESHOLD
+# define SS_INSERTIONSORT_THRESHOLD (1)
+# endif
+#else
+# define SS_INSERTIONSORT_THRESHOLD (8)
+#endif
+#if defined(SS_BLOCKSIZE)
+# if SS_BLOCKSIZE < 0
+# undef SS_BLOCKSIZE
+# define SS_BLOCKSIZE (0)
+# elif 32768 <= SS_BLOCKSIZE
+# undef SS_BLOCKSIZE
+# define SS_BLOCKSIZE (32767)
+# endif
+#else
+# define SS_BLOCKSIZE (1024)
+#endif
+/* minstacksize = log(SS_BLOCKSIZE) / log(3) * 2 */
+#if SS_BLOCKSIZE == 0
+# define SS_MISORT_STACKSIZE (96)
+#elif SS_BLOCKSIZE <= 4096
+# define SS_MISORT_STACKSIZE (16)
+#else
+# define SS_MISORT_STACKSIZE (24)
+#endif
+#define SS_SMERGE_STACKSIZE (32)
+#define TR_INSERTIONSORT_THRESHOLD (8)
+#define TR_STACKSIZE (64)
+
+
+/*- Macros -*/
+#ifndef SWAP
+# define SWAP(_a, _b) do { t = (_a); (_a) = (_b); (_b) = t; } while(0)
+#endif /* SWAP */
+#ifndef MIN
+# define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b))
+#endif /* MIN */
+#ifndef MAX
+# define MAX(_a, _b) (((_a) > (_b)) ? (_a) : (_b))
+#endif /* MAX */
+#define STACK_PUSH(_a, _b, _c, _d)\
+ do {\
+ assert(ssize < STACK_SIZE);\
+ stack[ssize].a = (_a), stack[ssize].b = (_b),\
+ stack[ssize].c = (_c), stack[ssize++].d = (_d);\
+ } while(0)
+#define STACK_PUSH5(_a, _b, _c, _d, _e)\
+ do {\
+ assert(ssize < STACK_SIZE);\
+ stack[ssize].a = (_a), stack[ssize].b = (_b),\
+ stack[ssize].c = (_c), stack[ssize].d = (_d), stack[ssize++].e = (_e);\
+ } while(0)
+#define STACK_POP(_a, _b, _c, _d)\
+ do {\
+ assert(0 <= ssize);\
+ if(ssize == 0) { return; }\
+ (_a) = stack[--ssize].a, (_b) = stack[ssize].b,\
+ (_c) = stack[ssize].c, (_d) = stack[ssize].d;\
+ } while(0)
+#define STACK_POP5(_a, _b, _c, _d, _e)\
+ do {\
+ assert(0 <= ssize);\
+ if(ssize == 0) { return; }\
+ (_a) = stack[--ssize].a, (_b) = stack[ssize].b,\
+ (_c) = stack[ssize].c, (_d) = stack[ssize].d, (_e) = stack[ssize].e;\
+ } while(0)
+#define BUCKET_A(_c0) bucket_A[(_c0)]
+#if ALPHABET_SIZE == 256
+#define BUCKET_B(_c0, _c1) (bucket_B[((_c1) << 8) | (_c0)])
+#define BUCKET_BSTAR(_c0, _c1) (bucket_B[((_c0) << 8) | (_c1)])
+#else
+#define BUCKET_B(_c0, _c1) (bucket_B[(_c1) * ALPHABET_SIZE + (_c0)])
+#define BUCKET_BSTAR(_c0, _c1) (bucket_B[(_c0) * ALPHABET_SIZE + (_c1)])
+#endif
+
+
+/*- Private Functions -*/
+
+static const int lg_table[256]= {
+ -1,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
+ 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+ 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+ 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
+};
+
+#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE)
+
+static INLINE
+int
+ss_ilg(int n) {
+#if SS_BLOCKSIZE == 0
+ return (n & 0xffff0000) ?
+ ((n & 0xff000000) ?
+ 24 + lg_table[(n >> 24) & 0xff] :
+ 16 + lg_table[(n >> 16) & 0xff]) :
+ ((n & 0x0000ff00) ?
+ 8 + lg_table[(n >> 8) & 0xff] :
+ 0 + lg_table[(n >> 0) & 0xff]);
+#elif SS_BLOCKSIZE < 256
+ return lg_table[n];
+#else
+ return (n & 0xff00) ?
+ 8 + lg_table[(n >> 8) & 0xff] :
+ 0 + lg_table[(n >> 0) & 0xff];
+#endif
+}
+
+#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */
+
+#if SS_BLOCKSIZE != 0
+
+static const int sqq_table[256] = {
+ 0, 16, 22, 27, 32, 35, 39, 42, 45, 48, 50, 53, 55, 57, 59, 61,
+ 64, 65, 67, 69, 71, 73, 75, 76, 78, 80, 81, 83, 84, 86, 87, 89,
+ 90, 91, 93, 94, 96, 97, 98, 99, 101, 102, 103, 104, 106, 107, 108, 109,
+110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
+128, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+143, 144, 144, 145, 146, 147, 148, 149, 150, 150, 151, 152, 153, 154, 155, 155,
+156, 157, 158, 159, 160, 160, 161, 162, 163, 163, 164, 165, 166, 167, 167, 168,
+169, 170, 170, 171, 172, 173, 173, 174, 175, 176, 176, 177, 178, 178, 179, 180,
+181, 181, 182, 183, 183, 184, 185, 185, 186, 187, 187, 188, 189, 189, 190, 191,
+192, 192, 193, 193, 194, 195, 195, 196, 197, 197, 198, 199, 199, 200, 201, 201,
+202, 203, 203, 204, 204, 205, 206, 206, 207, 208, 208, 209, 209, 210, 211, 211,
+212, 212, 213, 214, 214, 215, 215, 216, 217, 217, 218, 218, 219, 219, 220, 221,
+221, 222, 222, 223, 224, 224, 225, 225, 226, 226, 227, 227, 228, 229, 229, 230,
+230, 231, 231, 232, 232, 233, 234, 234, 235, 235, 236, 236, 237, 237, 238, 238,
+239, 240, 240, 241, 241, 242, 242, 243, 243, 244, 244, 245, 245, 246, 246, 247,
+247, 248, 248, 249, 249, 250, 250, 251, 251, 252, 252, 253, 253, 254, 254, 255
+};
+
+static INLINE
+int
+ss_isqrt(int x) {
+ int y, e;
+
+ if(x >= (SS_BLOCKSIZE * SS_BLOCKSIZE)) { return SS_BLOCKSIZE; }
+ e = (x & 0xffff0000) ?
+ ((x & 0xff000000) ?
+ 24 + lg_table[(x >> 24) & 0xff] :
+ 16 + lg_table[(x >> 16) & 0xff]) :
+ ((x & 0x0000ff00) ?
+ 8 + lg_table[(x >> 8) & 0xff] :
+ 0 + lg_table[(x >> 0) & 0xff]);
+
+ if(e >= 16) {
+ y = sqq_table[x >> ((e - 6) - (e & 1))] << ((e >> 1) - 7);
+ if(e >= 24) { y = (y + 1 + x / y) >> 1; }
+ y = (y + 1 + x / y) >> 1;
+ } else if(e >= 8) {
+ y = (sqq_table[x >> ((e - 6) - (e & 1))] >> (7 - (e >> 1))) + 1;
+ } else {
+ return sqq_table[x] >> 4;
+ }
+
+ return (x < (y * y)) ? y - 1 : y;
+}
+
+#endif /* SS_BLOCKSIZE != 0 */
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Compares two suffixes. */
+static INLINE
+int
+ss_compare(const unsigned char *T,
+ const int *p1, const int *p2,
+ int depth) {
+ const unsigned char *U1, *U2, *U1n, *U2n;
+
+ for(U1 = T + depth + *p1,
+ U2 = T + depth + *p2,
+ U1n = T + *(p1 + 1) + 2,
+ U2n = T + *(p2 + 1) + 2;
+ (U1 < U1n) && (U2 < U2n) && (*U1 == *U2);
+ ++U1, ++U2) {
+ }
+
+ return U1 < U1n ?
+ (U2 < U2n ? *U1 - *U2 : 1) :
+ (U2 < U2n ? -1 : 0);
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+#if (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1)
+
+/* Insertionsort for small size groups */
+static
+void
+ss_insertionsort(const unsigned char *T, const int *PA,
+ int *first, int *last, int depth) {
+ int *i, *j;
+ int t;
+ int r;
+
+ for(i = last - 2; first <= i; --i) {
+ for(t = *i, j = i + 1; 0 < (r = ss_compare(T, PA + t, PA + *j, depth));) {
+ do { *(j - 1) = *j; } while((++j < last) && (*j < 0));
+ if(last <= j) { break; }
+ }
+ if(r == 0) { *j = ~*j; }
+ *(j - 1) = t;
+ }
+}
+
+#endif /* (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1) */
+
+
+/*---------------------------------------------------------------------------*/
+
+#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE)
+
+static INLINE
+void
+ss_fixdown(const unsigned char *Td, const int *PA,
+ int *SA, int i, int size) {
+ int j, k;
+ int v;
+ int c, d, e;
+
+ for(v = SA[i], c = Td[PA[v]]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) {
+ d = Td[PA[SA[k = j++]]];
+ if(d < (e = Td[PA[SA[j]]])) { k = j; d = e; }
+ if(d <= c) { break; }
+ }
+ SA[i] = v;
+}
+
+/* Simple top-down heapsort. */
+static
+void
+ss_heapsort(const unsigned char *Td, const int *PA, int *SA, int size) {
+ int i, m;
+ int t;
+
+ m = size;
+ if((size % 2) == 0) {
+ m--;
+ if(Td[PA[SA[m / 2]]] < Td[PA[SA[m]]]) { SWAP(SA[m], SA[m / 2]); }
+ }
+
+ for(i = m / 2 - 1; 0 <= i; --i) { ss_fixdown(Td, PA, SA, i, m); }
+ if((size % 2) == 0) { SWAP(SA[0], SA[m]); ss_fixdown(Td, PA, SA, 0, m); }
+ for(i = m - 1; 0 < i; --i) {
+ t = SA[0], SA[0] = SA[i];
+ ss_fixdown(Td, PA, SA, 0, i);
+ SA[i] = t;
+ }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Returns the median of three elements. */
+static INLINE
+int *
+ss_median3(const unsigned char *Td, const int *PA,
+ int *v1, int *v2, int *v3) {
+ int *t;
+ if(Td[PA[*v1]] > Td[PA[*v2]]) { SWAP(v1, v2); }
+ if(Td[PA[*v2]] > Td[PA[*v3]]) {
+ if(Td[PA[*v1]] > Td[PA[*v3]]) { return v1; }
+ else { return v3; }
+ }
+ return v2;
+}
+
+/* Returns the median of five elements. */
+static INLINE
+int *
+ss_median5(const unsigned char *Td, const int *PA,
+ int *v1, int *v2, int *v3, int *v4, int *v5) {
+ int *t;
+ if(Td[PA[*v2]] > Td[PA[*v3]]) { SWAP(v2, v3); }
+ if(Td[PA[*v4]] > Td[PA[*v5]]) { SWAP(v4, v5); }
+ if(Td[PA[*v2]] > Td[PA[*v4]]) { SWAP(v2, v4); SWAP(v3, v5); }
+ if(Td[PA[*v1]] > Td[PA[*v3]]) { SWAP(v1, v3); }
+ if(Td[PA[*v1]] > Td[PA[*v4]]) { SWAP(v1, v4); SWAP(v3, v5); }
+ if(Td[PA[*v3]] > Td[PA[*v4]]) { return v4; }
+ return v3;
+}
+
+/* Returns the pivot element. */
+static INLINE
+int *
+ss_pivot(const unsigned char *Td, const int *PA, int *first, int *last) {
+ int *middle;
+ int t;
+
+ t = last - first;
+ middle = first + t / 2;
+
+ if(t <= 512) {
+ if(t <= 32) {
+ return ss_median3(Td, PA, first, middle, last - 1);
+ } else {
+ t >>= 2;
+ return ss_median5(Td, PA, first, first + t, middle, last - 1 - t, last - 1);
+ }
+ }
+ t >>= 3;
+ first = ss_median3(Td, PA, first, first + t, first + (t << 1));
+ middle = ss_median3(Td, PA, middle - t, middle, middle + t);
+ last = ss_median3(Td, PA, last - 1 - (t << 1), last - 1 - t, last - 1);
+ return ss_median3(Td, PA, first, middle, last);
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Binary partition for substrings. */
+static INLINE
+int *
+ss_partition(const int *PA,
+ int *first, int *last, int depth) {
+ int *a, *b;
+ int t;
+ for(a = first - 1, b = last;;) {
+ for(; (++a < b) && ((PA[*a] + depth) >= (PA[*a + 1] + 1));) { *a = ~*a; }
+ for(; (a < --b) && ((PA[*b] + depth) < (PA[*b + 1] + 1));) { }
+ if(b <= a) { break; }
+ t = ~*b;
+ *b = *a;
+ *a = t;
+ }
+ if(first < a) { *first = ~*first; }
+ return a;
+}
+
+/* Multikey introsort for medium size groups. */
+static
+void
+ss_mintrosort(const unsigned char *T, const int *PA,
+ int *first, int *last,
+ int depth) {
+#define STACK_SIZE SS_MISORT_STACKSIZE
+ struct { int *a, *b, c; int d; } stack[STACK_SIZE];
+ const unsigned char *Td;
+ int *a, *b, *c, *d, *e, *f;
+ int s, t;
+ int ssize;
+ int limit;
+ int v, x = 0;
+
+ for(ssize = 0, limit = ss_ilg(last - first);;) {
+
+ if((last - first) <= SS_INSERTIONSORT_THRESHOLD) {
+#if 1 < SS_INSERTIONSORT_THRESHOLD
+ if(1 < (last - first)) { ss_insertionsort(T, PA, first, last, depth); }
+#endif
+ STACK_POP(first, last, depth, limit);
+ continue;
+ }
+
+ Td = T + depth;
+ if(limit-- == 0) { ss_heapsort(Td, PA, first, last - first); }
+ if(limit < 0) {
+ for(a = first + 1, v = Td[PA[*first]]; a < last; ++a) {
+ if((x = Td[PA[*a]]) != v) {
+ if(1 < (a - first)) { break; }
+ v = x;
+ first = a;
+ }
+ }
+ if(Td[PA[*first] - 1] < v) {
+ first = ss_partition(PA, first, a, depth);
+ }
+ if((a - first) <= (last - a)) {
+ if(1 < (a - first)) {
+ STACK_PUSH(a, last, depth, -1);
+ last = a, depth += 1, limit = ss_ilg(a - first);
+ } else {
+ first = a, limit = -1;
+ }
+ } else {
+ if(1 < (last - a)) {
+ STACK_PUSH(first, a, depth + 1, ss_ilg(a - first));
+ first = a, limit = -1;
+ } else {
+ last = a, depth += 1, limit = ss_ilg(a - first);
+ }
+ }
+ continue;
+ }
+
+ /* choose pivot */
+ a = ss_pivot(Td, PA, first, last);
+ v = Td[PA[*a]];
+ SWAP(*first, *a);
+
+ /* partition */
+ for(b = first; (++b < last) && ((x = Td[PA[*b]]) == v);) { }
+ if(((a = b) < last) && (x < v)) {
+ for(; (++b < last) && ((x = Td[PA[*b]]) <= v);) {
+ if(x == v) { SWAP(*b, *a); ++a; }
+ }
+ }
+ for(c = last; (b < --c) && ((x = Td[PA[*c]]) == v);) { }
+ if((b < (d = c)) && (x > v)) {
+ for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) {
+ if(x == v) { SWAP(*c, *d); --d; }
+ }
+ }
+ for(; b < c;) {
+ SWAP(*b, *c);
+ for(; (++b < c) && ((x = Td[PA[*b]]) <= v);) {
+ if(x == v) { SWAP(*b, *a); ++a; }
+ }
+ for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) {
+ if(x == v) { SWAP(*c, *d); --d; }
+ }
+ }
+
+ if(a <= d) {
+ c = b - 1;
+
+ if((s = a - first) > (t = b - a)) { s = t; }
+ for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
+ if((s = d - c) > (t = last - d - 1)) { s = t; }
+ for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
+
+ a = first + (b - a), c = last - (d - c);
+ b = (v <= Td[PA[*a] - 1]) ? a : ss_partition(PA, a, c, depth);
+
+ if((a - first) <= (last - c)) {
+ if((last - c) <= (c - b)) {
+ STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
+ STACK_PUSH(c, last, depth, limit);
+ last = a;
+ } else if((a - first) <= (c - b)) {
+ STACK_PUSH(c, last, depth, limit);
+ STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
+ last = a;
+ } else {
+ STACK_PUSH(c, last, depth, limit);
+ STACK_PUSH(first, a, depth, limit);
+ first = b, last = c, depth += 1, limit = ss_ilg(c - b);
+ }
+ } else {
+ if((a - first) <= (c - b)) {
+ STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
+ STACK_PUSH(first, a, depth, limit);
+ first = c;
+ } else if((last - c) <= (c - b)) {
+ STACK_PUSH(first, a, depth, limit);
+ STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
+ first = c;
+ } else {
+ STACK_PUSH(first, a, depth, limit);
+ STACK_PUSH(c, last, depth, limit);
+ first = b, last = c, depth += 1, limit = ss_ilg(c - b);
+ }
+ }
+ } else {
+ limit += 1;
+ if(Td[PA[*first] - 1] < v) {
+ first = ss_partition(PA, first, last, depth);
+ limit = ss_ilg(last - first);
+ }
+ depth += 1;
+ }
+ }
+#undef STACK_SIZE
+}
+
+#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */
+
+
+/*---------------------------------------------------------------------------*/
+
+#if SS_BLOCKSIZE != 0
+
+static INLINE
+void
+ss_blockswap(int *a, int *b, int n) {
+ int t;
+ for(; 0 < n; --n, ++a, ++b) {
+ t = *a, *a = *b, *b = t;
+ }
+}
+
+static INLINE
+void
+ss_rotate(int *first, int *middle, int *last) {
+ int *a, *b, t;
+ int l, r;
+ l = middle - first, r = last - middle;
+ for(; (0 < l) && (0 < r);) {
+ if(l == r) { ss_blockswap(first, middle, l); break; }
+ if(l < r) {
+ a = last - 1, b = middle - 1;
+ t = *a;
+ do {
+ *a-- = *b, *b-- = *a;
+ if(b < first) {
+ *a = t;
+ last = a;
+ if((r -= l + 1) <= l) { break; }
+ a -= 1, b = middle - 1;
+ t = *a;
+ }
+ } while(1);
+ } else {
+ a = first, b = middle;
+ t = *a;
+ do {
+ *a++ = *b, *b++ = *a;
+ if(last <= b) {
+ *a = t;
+ first = a + 1;
+ if((l -= r + 1) <= r) { break; }
+ a += 1, b = middle;
+ t = *a;
+ }
+ } while(1);
+ }
+ }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+static
+void
+ss_inplacemerge(const unsigned char *T, const int *PA,
+ int *first, int *middle, int *last,
+ int depth) {
+ const int *p;
+ int *a, *b;
+ int len, half;
+ int q, r;
+ int x;
+
+ for(;;) {
+ if(*(last - 1) < 0) { x = 1; p = PA + ~*(last - 1); }
+ else { x = 0; p = PA + *(last - 1); }
+ for(a = first, len = middle - first, half = len >> 1, r = -1;
+ 0 < len;
+ len = half, half >>= 1) {
+ b = a + half;
+ q = ss_compare(T, PA + ((0 <= *b) ? *b : ~*b), p, depth);
+ if(q < 0) {
+ a = b + 1;
+ half -= (len & 1) ^ 1;
+ } else {
+ r = q;
+ }
+ }
+ if(a < middle) {
+ if(r == 0) { *a = ~*a; }
+ ss_rotate(a, middle, last);
+ last -= middle - a;
+ middle = a;
+ if(first == middle) { break; }
+ }
+ --last;
+ if(x != 0) { while(*--last < 0) { } }
+ if(middle == last) { break; }
+ }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Merge-forward with internal buffer. */
+static
+void
+ss_mergeforward(const unsigned char *T, const int *PA,
+ int *first, int *middle, int *last,
+ int *buf, int depth) {
+ int *a, *b, *c, *bufend;
+ int t;
+ int r;
+
+ bufend = buf + (middle - first) - 1;
+ ss_blockswap(buf, first, middle - first);
+
+ for(t = *(a = first), b = buf, c = middle;;) {
+ r = ss_compare(T, PA + *b, PA + *c, depth);
+ if(r < 0) {
+ do {
+ *a++ = *b;
+ if(bufend <= b) { *bufend = t; return; }
+ *b++ = *a;
+ } while(*b < 0);
+ } else if(r > 0) {
+ do {
+ *a++ = *c, *c++ = *a;
+ if(last <= c) {
+ while(b < bufend) { *a++ = *b, *b++ = *a; }
+ *a = *b, *b = t;
+ return;
+ }
+ } while(*c < 0);
+ } else {
+ *c = ~*c;
+ do {
+ *a++ = *b;
+ if(bufend <= b) { *bufend = t; return; }
+ *b++ = *a;
+ } while(*b < 0);
+
+ do {
+ *a++ = *c, *c++ = *a;
+ if(last <= c) {
+ while(b < bufend) { *a++ = *b, *b++ = *a; }
+ *a = *b, *b = t;
+ return;
+ }
+ } while(*c < 0);
+ }
+ }
+}
+
+/* Merge-backward with internal buffer. */
+static
+void
+ss_mergebackward(const unsigned char *T, const int *PA,
+ int *first, int *middle, int *last,
+ int *buf, int depth) {
+ const int *p1, *p2;
+ int *a, *b, *c, *bufend;
+ int t;
+ int r;
+ int x;
+
+ bufend = buf + (last - middle) - 1;
+ ss_blockswap(buf, middle, last - middle);
+
+ x = 0;
+ if(*bufend < 0) { p1 = PA + ~*bufend; x |= 1; }
+ else { p1 = PA + *bufend; }
+ if(*(middle - 1) < 0) { p2 = PA + ~*(middle - 1); x |= 2; }
+ else { p2 = PA + *(middle - 1); }
+ for(t = *(a = last - 1), b = bufend, c = middle - 1;;) {
+ r = ss_compare(T, p1, p2, depth);
+ if(0 < r) {
+ if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; }
+ *a-- = *b;
+ if(b <= buf) { *buf = t; break; }
+ *b-- = *a;
+ if(*b < 0) { p1 = PA + ~*b; x |= 1; }
+ else { p1 = PA + *b; }
+ } else if(r < 0) {
+ if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; }
+ *a-- = *c, *c-- = *a;
+ if(c < first) {
+ while(buf < b) { *a-- = *b, *b-- = *a; }
+ *a = *b, *b = t;
+ break;
+ }
+ if(*c < 0) { p2 = PA + ~*c; x |= 2; }
+ else { p2 = PA + *c; }
+ } else {
+ if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; }
+ *a-- = ~*b;
+ if(b <= buf) { *buf = t; break; }
+ *b-- = *a;
+ if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; }
+ *a-- = *c, *c-- = *a;
+ if(c < first) {
+ while(buf < b) { *a-- = *b, *b-- = *a; }
+ *a = *b, *b = t;
+ break;
+ }
+ if(*b < 0) { p1 = PA + ~*b; x |= 1; }
+ else { p1 = PA + *b; }
+ if(*c < 0) { p2 = PA + ~*c; x |= 2; }
+ else { p2 = PA + *c; }
+ }
+ }
+}
+
+/* D&C based merge. */
+static
+void
+ss_swapmerge(const unsigned char *T, const int *PA,
+ int *first, int *middle, int *last,
+ int *buf, int bufsize, int depth) {
+#define STACK_SIZE SS_SMERGE_STACKSIZE
+#define GETIDX(a) ((0 <= (a)) ? (a) : (~(a)))
+#define MERGE_CHECK(a, b, c)\
+ do {\
+ if(((c) & 1) ||\
+ (((c) & 2) && (ss_compare(T, PA + GETIDX(*((a) - 1)), PA + *(a), depth) == 0))) {\
+ *(a) = ~*(a);\
+ }\
+ if(((c) & 4) && ((ss_compare(T, PA + GETIDX(*((b) - 1)), PA + *(b), depth) == 0))) {\
+ *(b) = ~*(b);\
+ }\
+ } while(0)
+ struct { int *a, *b, *c; int d; } stack[STACK_SIZE];
+ int *l, *r, *lm, *rm;
+ int m, len, half;
+ int ssize;
+ int check, next;
+
+ for(check = 0, ssize = 0;;) {
+ if((last - middle) <= bufsize) {
+ if((first < middle) && (middle < last)) {
+ ss_mergebackward(T, PA, first, middle, last, buf, depth);
+ }
+ MERGE_CHECK(first, last, check);
+ STACK_POP(first, middle, last, check);
+ continue;
+ }
+
+ if((middle - first) <= bufsize) {
+ if(first < middle) {
+ ss_mergeforward(T, PA, first, middle, last, buf, depth);
+ }
+ MERGE_CHECK(first, last, check);
+ STACK_POP(first, middle, last, check);
+ continue;
+ }
+
+ for(m = 0, len = MIN(middle - first, last - middle), half = len >> 1;
+ 0 < len;
+ len = half, half >>= 1) {
+ if(ss_compare(T, PA + GETIDX(*(middle + m + half)),
+ PA + GETIDX(*(middle - m - half - 1)), depth) < 0) {
+ m += half + 1;
+ half -= (len & 1) ^ 1;
+ }
+ }
+
+ if(0 < m) {
+ lm = middle - m, rm = middle + m;
+ ss_blockswap(lm, middle, m);
+ l = r = middle, next = 0;
+ if(rm < last) {
+ if(*rm < 0) {
+ *rm = ~*rm;
+ if(first < lm) { for(; *--l < 0;) { } next |= 4; }
+ next |= 1;
+ } else if(first < lm) {
+ for(; *r < 0; ++r) { }
+ next |= 2;
+ }
+ }
+
+ if((l - first) <= (last - r)) {
+ STACK_PUSH(r, rm, last, (next & 3) | (check & 4));
+ middle = lm, last = l, check = (check & 3) | (next & 4);
+ } else {
+ if((next & 2) && (r == middle)) { next ^= 6; }
+ STACK_PUSH(first, lm, l, (check & 3) | (next & 4));
+ first = r, middle = rm, check = (next & 3) | (check & 4);
+ }
+ } else {
+ if(ss_compare(T, PA + GETIDX(*(middle - 1)), PA + *middle, depth) == 0) {
+ *middle = ~*middle;
+ }
+ MERGE_CHECK(first, last, check);
+ STACK_POP(first, middle, last, check);
+ }
+ }
+#undef STACK_SIZE
+}
+
+#endif /* SS_BLOCKSIZE != 0 */
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Substring sort */
+static
+void
+sssort(const unsigned char *T, const int *PA,
+ int *first, int *last,
+ int *buf, int bufsize,
+ int depth, int n, int lastsuffix) {
+ int *a;
+#if SS_BLOCKSIZE != 0
+ int *b, *middle, *curbuf;
+ int j, k, curbufsize, limit;
+#endif
+ int i;
+
+ if(lastsuffix != 0) { ++first; }
+
+#if SS_BLOCKSIZE == 0
+ ss_mintrosort(T, PA, first, last, depth);
+#else
+ if((bufsize < SS_BLOCKSIZE) &&
+ (bufsize < (last - first)) &&
+ (bufsize < (limit = ss_isqrt(last - first)))) {
+ if(SS_BLOCKSIZE < limit) { limit = SS_BLOCKSIZE; }
+ buf = middle = last - limit, bufsize = limit;
+ } else {
+ middle = last, limit = 0;
+ }
+ for(a = first, i = 0; SS_BLOCKSIZE < (middle - a); a += SS_BLOCKSIZE, ++i) {
+#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
+ ss_mintrosort(T, PA, a, a + SS_BLOCKSIZE, depth);
+#elif 1 < SS_BLOCKSIZE
+ ss_insertionsort(T, PA, a, a + SS_BLOCKSIZE, depth);
+#endif
+ curbufsize = last - (a + SS_BLOCKSIZE);
+ curbuf = a + SS_BLOCKSIZE;
+ if(curbufsize <= bufsize) { curbufsize = bufsize, curbuf = buf; }
+ for(b = a, k = SS_BLOCKSIZE, j = i; j & 1; b -= k, k <<= 1, j >>= 1) {
+ ss_swapmerge(T, PA, b - k, b, b + k, curbuf, curbufsize, depth);
+ }
+ }
+#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
+ ss_mintrosort(T, PA, a, middle, depth);
+#elif 1 < SS_BLOCKSIZE
+ ss_insertionsort(T, PA, a, middle, depth);
+#endif
+ for(k = SS_BLOCKSIZE; i != 0; k <<= 1, i >>= 1) {
+ if(i & 1) {
+ ss_swapmerge(T, PA, a - k, a, middle, buf, bufsize, depth);
+ a -= k;
+ }
+ }
+ if(limit != 0) {
+#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
+ ss_mintrosort(T, PA, middle, last, depth);
+#elif 1 < SS_BLOCKSIZE
+ ss_insertionsort(T, PA, middle, last, depth);
+#endif
+ ss_inplacemerge(T, PA, first, middle, last, depth);
+ }
+#endif
+
+ if(lastsuffix != 0) {
+ /* Insert last type B* suffix. */
+ int PAi[2]; PAi[0] = PA[*(first - 1)], PAi[1] = n - 2;
+ for(a = first, i = *(first - 1);
+ (a < last) && ((*a < 0) || (0 < ss_compare(T, &(PAi[0]), PA + *a, depth)));
+ ++a) {
+ *(a - 1) = *a;
+ }
+ *(a - 1) = i;
+ }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+static INLINE
+int
+tr_ilg(int n) {
+ return (n & 0xffff0000) ?
+ ((n & 0xff000000) ?
+ 24 + lg_table[(n >> 24) & 0xff] :
+ 16 + lg_table[(n >> 16) & 0xff]) :
+ ((n & 0x0000ff00) ?
+ 8 + lg_table[(n >> 8) & 0xff] :
+ 0 + lg_table[(n >> 0) & 0xff]);
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Simple insertionsort for small size groups. */
+static
+void
+tr_insertionsort(const int *ISAd, int *first, int *last) {
+ int *a, *b;
+ int t, r;
+
+ for(a = first + 1; a < last; ++a) {
+ for(t = *a, b = a - 1; 0 > (r = ISAd[t] - ISAd[*b]);) {
+ do { *(b + 1) = *b; } while((first <= --b) && (*b < 0));
+ if(b < first) { break; }
+ }
+ if(r == 0) { *b = ~*b; }
+ *(b + 1) = t;
+ }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+static INLINE
+void
+tr_fixdown(const int *ISAd, int *SA, int i, int size) {
+ int j, k;
+ int v;
+ int c, d, e;
+
+ for(v = SA[i], c = ISAd[v]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) {
+ d = ISAd[SA[k = j++]];
+ if(d < (e = ISAd[SA[j]])) { k = j; d = e; }
+ if(d <= c) { break; }
+ }
+ SA[i] = v;
+}
+
+/* Simple top-down heapsort. */
+static
+void
+tr_heapsort(const int *ISAd, int *SA, int size) {
+ int i, m;
+ int t;
+
+ m = size;
+ if((size % 2) == 0) {
+ m--;
+ if(ISAd[SA[m / 2]] < ISAd[SA[m]]) { SWAP(SA[m], SA[m / 2]); }
+ }
+
+ for(i = m / 2 - 1; 0 <= i; --i) { tr_fixdown(ISAd, SA, i, m); }
+ if((size % 2) == 0) { SWAP(SA[0], SA[m]); tr_fixdown(ISAd, SA, 0, m); }
+ for(i = m - 1; 0 < i; --i) {
+ t = SA[0], SA[0] = SA[i];
+ tr_fixdown(ISAd, SA, 0, i);
+ SA[i] = t;
+ }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Returns the median of three elements. */
+static INLINE
+int *
+tr_median3(const int *ISAd, int *v1, int *v2, int *v3) {
+ int *t;
+ if(ISAd[*v1] > ISAd[*v2]) { SWAP(v1, v2); }
+ if(ISAd[*v2] > ISAd[*v3]) {
+ if(ISAd[*v1] > ISAd[*v3]) { return v1; }
+ else { return v3; }
+ }
+ return v2;
+}
+
+/* Returns the median of five elements. */
+static INLINE
+int *
+tr_median5(const int *ISAd,
+ int *v1, int *v2, int *v3, int *v4, int *v5) {
+ int *t;
+ if(ISAd[*v2] > ISAd[*v3]) { SWAP(v2, v3); }
+ if(ISAd[*v4] > ISAd[*v5]) { SWAP(v4, v5); }
+ if(ISAd[*v2] > ISAd[*v4]) { SWAP(v2, v4); SWAP(v3, v5); }
+ if(ISAd[*v1] > ISAd[*v3]) { SWAP(v1, v3); }
+ if(ISAd[*v1] > ISAd[*v4]) { SWAP(v1, v4); SWAP(v3, v5); }
+ if(ISAd[*v3] > ISAd[*v4]) { return v4; }
+ return v3;
+}
+
+/* Returns the pivot element. */
+static INLINE
+int *
+tr_pivot(const int *ISAd, int *first, int *last) {
+ int *middle;
+ int t;
+
+ t = last - first;
+ middle = first + t / 2;
+
+ if(t <= 512) {
+ if(t <= 32) {
+ return tr_median3(ISAd, first, middle, last - 1);
+ } else {
+ t >>= 2;
+ return tr_median5(ISAd, first, first + t, middle, last - 1 - t, last - 1);
+ }
+ }
+ t >>= 3;
+ first = tr_median3(ISAd, first, first + t, first + (t << 1));
+ middle = tr_median3(ISAd, middle - t, middle, middle + t);
+ last = tr_median3(ISAd, last - 1 - (t << 1), last - 1 - t, last - 1);
+ return tr_median3(ISAd, first, middle, last);
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+typedef struct _trbudget_t trbudget_t;
+struct _trbudget_t {
+ int chance;
+ int remain;
+ int incval;
+ int count;
+};
+
+static INLINE
+void
+trbudget_init(trbudget_t *budget, int chance, int incval) {
+ budget->chance = chance;
+ budget->remain = budget->incval = incval;
+}
+
+static INLINE
+int
+trbudget_check(trbudget_t *budget, int size) {
+ if(size <= budget->remain) { budget->remain -= size; return 1; }
+ if(budget->chance == 0) { budget->count += size; return 0; }
+ budget->remain += budget->incval - size;
+ budget->chance -= 1;
+ return 1;
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+static INLINE
+void
+tr_partition(const int *ISAd,
+ int *first, int *middle, int *last,
+ int **pa, int **pb, int v) {
+ int *a, *b, *c, *d, *e, *f;
+ int t, s;
+ int x = 0;
+
+ for(b = middle - 1; (++b < last) && ((x = ISAd[*b]) == v);) { }
+ if(((a = b) < last) && (x < v)) {
+ for(; (++b < last) && ((x = ISAd[*b]) <= v);) {
+ if(x == v) { SWAP(*b, *a); ++a; }
+ }
+ }
+ for(c = last; (b < --c) && ((x = ISAd[*c]) == v);) { }
+ if((b < (d = c)) && (x > v)) {
+ for(; (b < --c) && ((x = ISAd[*c]) >= v);) {
+ if(x == v) { SWAP(*c, *d); --d; }
+ }
+ }
+ for(; b < c;) {
+ SWAP(*b, *c);
+ for(; (++b < c) && ((x = ISAd[*b]) <= v);) {
+ if(x == v) { SWAP(*b, *a); ++a; }
+ }
+ for(; (b < --c) && ((x = ISAd[*c]) >= v);) {
+ if(x == v) { SWAP(*c, *d); --d; }
+ }
+ }
+
+ if(a <= d) {
+ c = b - 1;
+ if((s = a - first) > (t = b - a)) { s = t; }
+ for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
+ if((s = d - c) > (t = last - d - 1)) { s = t; }
+ for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
+ first += (b - a), last -= (d - c);
+ }
+ *pa = first, *pb = last;
+}
+
+static
+void
+tr_copy(int *ISA, const int *SA,
+ int *first, int *a, int *b, int *last,
+ int depth) {
+ /* sort suffixes of middle partition
+ by using sorted order of suffixes of left and right partition. */
+ int *c, *d, *e;
+ int s, v;
+
+ v = b - SA - 1;
+ for(c = first, d = a - 1; c <= d; ++c) {
+ if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
+ *++d = s;
+ ISA[s] = d - SA;
+ }
+ }
+ for(c = last - 1, e = d + 1, d = b; e < d; --c) {
+ if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
+ *--d = s;
+ ISA[s] = d - SA;
+ }
+ }
+}
+
+static
+void
+tr_partialcopy(int *ISA, const int *SA,
+ int *first, int *a, int *b, int *last,
+ int depth) {
+ int *c, *d, *e;
+ int s, v;
+ int rank, lastrank, newrank = -1;
+
+ v = b - SA - 1;
+ lastrank = -1;
+ for(c = first, d = a - 1; c <= d; ++c) {
+ if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
+ *++d = s;
+ rank = ISA[s + depth];
+ if(lastrank != rank) { lastrank = rank; newrank = d - SA; }
+ ISA[s] = newrank;
+ }
+ }
+
+ lastrank = -1;
+ for(e = d; first <= e; --e) {
+ rank = ISA[*e];
+ if(lastrank != rank) { lastrank = rank; newrank = e - SA; }
+ if(newrank != rank) { ISA[*e] = newrank; }
+ }
+
+ lastrank = -1;
+ for(c = last - 1, e = d + 1, d = b; e < d; --c) {
+ if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
+ *--d = s;
+ rank = ISA[s + depth];
+ if(lastrank != rank) { lastrank = rank; newrank = d - SA; }
+ ISA[s] = newrank;
+ }
+ }
+}
+
+static
+void
+tr_introsort(int *ISA, const int *ISAd,
+ int *SA, int *first, int *last,
+ trbudget_t *budget) {
+#define STACK_SIZE TR_STACKSIZE
+ struct { const int *a; int *b, *c; int d, e; }stack[STACK_SIZE];
+ int *a, *b, *c;
+ int t;
+ int v, x = 0;
+ int incr = ISAd - ISA;
+ int limit, next;
+ int ssize, trlink = -1;
+
+ for(ssize = 0, limit = tr_ilg(last - first);;) {
+
+ if(limit < 0) {
+ if(limit == -1) {
+ /* tandem repeat partition */
+ tr_partition(ISAd - incr, first, first, last, &a, &b, last - SA - 1);
+
+ /* update ranks */
+ if(a < last) {
+ for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; }
+ }
+ if(b < last) {
+ for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; }
+ }
+
+ /* push */
+ if(1 < (b - a)) {
+ STACK_PUSH5(NULL, a, b, 0, 0);
+ STACK_PUSH5(ISAd - incr, first, last, -2, trlink);
+ trlink = ssize - 2;
+ }
+ if((a - first) <= (last - b)) {
+ if(1 < (a - first)) {
+ STACK_PUSH5(ISAd, b, last, tr_ilg(last - b), trlink);
+ last = a, limit = tr_ilg(a - first);
+ } else if(1 < (last - b)) {
+ first = b, limit = tr_ilg(last - b);
+ } else {
+ STACK_POP5(ISAd, first, last, limit, trlink);
+ }
+ } else {
+ if(1 < (last - b)) {
+ STACK_PUSH5(ISAd, first, a, tr_ilg(a - first), trlink);
+ first = b, limit = tr_ilg(last - b);
+ } else if(1 < (a - first)) {
+ last = a, limit = tr_ilg(a - first);
+ } else {
+ STACK_POP5(ISAd, first, last, limit, trlink);
+ }
+ }
+ } else if(limit == -2) {
+ /* tandem repeat copy */
+ a = stack[--ssize].b, b = stack[ssize].c;
+ if(stack[ssize].d == 0) {
+ tr_copy(ISA, SA, first, a, b, last, ISAd - ISA);
+ } else {
+ if(0 <= trlink) { stack[trlink].d = -1; }
+ tr_partialcopy(ISA, SA, first, a, b, last, ISAd - ISA);
+ }
+ STACK_POP5(ISAd, first, last, limit, trlink);
+ } else {
+ /* sorted partition */
+ if(0 <= *first) {
+ a = first;
+ do { ISA[*a] = a - SA; } while((++a < last) && (0 <= *a));
+ first = a;
+ }
+ if(first < last) {
+ a = first; do { *a = ~*a; } while(*++a < 0);
+ next = (ISA[*a] != ISAd[*a]) ? tr_ilg(a - first + 1) : -1;
+ if(++a < last) { for(b = first, v = a - SA - 1; b < a; ++b) { ISA[*b] = v; } }
+
+ /* push */
+ if(trbudget_check(budget, a - first)) {
+ if((a - first) <= (last - a)) {
+ STACK_PUSH5(ISAd, a, last, -3, trlink);
+ ISAd += incr, last = a, limit = next;
+ } else {
+ if(1 < (last - a)) {
+ STACK_PUSH5(ISAd + incr, first, a, next, trlink);
+ first = a, limit = -3;
+ } else {
+ ISAd += incr, last = a, limit = next;
+ }
+ }
+ } else {
+ if(0 <= trlink) { stack[trlink].d = -1; }
+ if(1 < (last - a)) {
+ first = a, limit = -3;
+ } else {
+ STACK_POP5(ISAd, first, last, limit, trlink);
+ }
+ }
+ } else {
+ STACK_POP5(ISAd, first, last, limit, trlink);
+ }
+ }
+ continue;
+ }
+
+ if((last - first) <= TR_INSERTIONSORT_THRESHOLD) {
+ tr_insertionsort(ISAd, first, last);
+ limit = -3;
+ continue;
+ }
+
+ if(limit-- == 0) {
+ tr_heapsort(ISAd, first, last - first);
+ for(a = last - 1; first < a; a = b) {
+ for(x = ISAd[*a], b = a - 1; (first <= b) && (ISAd[*b] == x); --b) { *b = ~*b; }
+ }
+ limit = -3;
+ continue;
+ }
+
+ /* choose pivot */
+ a = tr_pivot(ISAd, first, last);
+ SWAP(*first, *a);
+ v = ISAd[*first];
+
+ /* partition */
+ tr_partition(ISAd, first, first + 1, last, &a, &b, v);
+ if((last - first) != (b - a)) {
+ next = (ISA[*a] != v) ? tr_ilg(b - a) : -1;
+
+ /* update ranks */
+ for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; }
+ if(b < last) { for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } }
+
+ /* push */
+ if((1 < (b - a)) && (trbudget_check(budget, b - a))) {
+ if((a - first) <= (last - b)) {
+ if((last - b) <= (b - a)) {
+ if(1 < (a - first)) {
+ STACK_PUSH5(ISAd + incr, a, b, next, trlink);
+ STACK_PUSH5(ISAd, b, last, limit, trlink);
+ last = a;
+ } else if(1 < (last - b)) {
+ STACK_PUSH5(ISAd + incr, a, b, next, trlink);
+ first = b;
+ } else {
+ ISAd += incr, first = a, last = b, limit = next;
+ }
+ } else if((a - first) <= (b - a)) {
+ if(1 < (a - first)) {
+ STACK_PUSH5(ISAd, b, last, limit, trlink);
+ STACK_PUSH5(ISAd + incr, a, b, next, trlink);
+ last = a;
+ } else {
+ STACK_PUSH5(ISAd, b, last, limit, trlink);
+ ISAd += incr, first = a, last = b, limit = next;
+ }
+ } else {
+ STACK_PUSH5(ISAd, b, last, limit, trlink);
+ STACK_PUSH5(ISAd, first, a, limit, trlink);
+ ISAd += incr, first = a, last = b, limit = next;
+ }
+ } else {
+ if((a - first) <= (b - a)) {
+ if(1 < (last - b)) {
+ STACK_PUSH5(ISAd + incr, a, b, next, trlink);
+ STACK_PUSH5(ISAd, first, a, limit, trlink);
+ first = b;
+ } else if(1 < (a - first)) {
+ STACK_PUSH5(ISAd + incr, a, b, next, trlink);
+ last = a;
+ } else {
+ ISAd += incr, first = a, last = b, limit = next;
+ }
+ } else if((last - b) <= (b - a)) {
+ if(1 < (last - b)) {
+ STACK_PUSH5(ISAd, first, a, limit, trlink);
+ STACK_PUSH5(ISAd + incr, a, b, next, trlink);
+ first = b;
+ } else {
+ STACK_PUSH5(ISAd, first, a, limit, trlink);
+ ISAd += incr, first = a, last = b, limit = next;
+ }
+ } else {
+ STACK_PUSH5(ISAd, first, a, limit, trlink);
+ STACK_PUSH5(ISAd, b, last, limit, trlink);
+ ISAd += incr, first = a, last = b, limit = next;
+ }
+ }
+ } else {
+ if((1 < (b - a)) && (0 <= trlink)) { stack[trlink].d = -1; }
+ if((a - first) <= (last - b)) {
+ if(1 < (a - first)) {
+ STACK_PUSH5(ISAd, b, last, limit, trlink);
+ last = a;
+ } else if(1 < (last - b)) {
+ first = b;
+ } else {
+ STACK_POP5(ISAd, first, last, limit, trlink);
+ }
+ } else {
+ if(1 < (last - b)) {
+ STACK_PUSH5(ISAd, first, a, limit, trlink);
+ first = b;
+ } else if(1 < (a - first)) {
+ last = a;
+ } else {
+ STACK_POP5(ISAd, first, last, limit, trlink);
+ }
+ }
+ }
+ } else {
+ if(trbudget_check(budget, last - first)) {
+ limit = tr_ilg(last - first), ISAd += incr;
+ } else {
+ if(0 <= trlink) { stack[trlink].d = -1; }
+ STACK_POP5(ISAd, first, last, limit, trlink);
+ }
+ }
+ }
+#undef STACK_SIZE
+}
+
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Tandem repeat sort */
+static
+void
+trsort(int *ISA, int *SA, int n, int depth) {
+ int *ISAd;
+ int *first, *last;
+ trbudget_t budget;
+ int t, skip, unsorted;
+
+ trbudget_init(&budget, tr_ilg(n) * 2 / 3, n);
+/* trbudget_init(&budget, tr_ilg(n) * 3 / 4, n); */
+ for(ISAd = ISA + depth; -n < *SA; ISAd += ISAd - ISA) {
+ first = SA;
+ skip = 0;
+ unsorted = 0;
+ do {
+ if((t = *first) < 0) { first -= t; skip += t; }
+ else {
+ if(skip != 0) { *(first + skip) = skip; skip = 0; }
+ last = SA + ISA[t] + 1;
+ if(1 < (last - first)) {
+ budget.count = 0;
+ tr_introsort(ISA, ISAd, SA, first, last, &budget);
+ if(budget.count != 0) { unsorted += budget.count; }
+ else { skip = first - last; }
+ } else if((last - first) == 1) {
+ skip = -1;
+ }
+ first = last;
+ }
+ } while(first < (SA + n));
+ if(skip != 0) { *(first + skip) = skip; }
+ if(unsorted == 0) { break; }
+ }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Sorts suffixes of type B*. */
+static
+int
+sort_typeBstar(const unsigned char *T, int *SA,
+ int *bucket_A, int *bucket_B,
+ int n, int openMP) {
+ int *PAb, *ISAb, *buf;
+#ifdef LIBBSC_OPENMP
+ int *curbuf;
+ int l;
+#endif
+ int i, j, k, t, m, bufsize;
+ int c0, c1;
+#ifdef LIBBSC_OPENMP
+ int d0, d1;
+#endif
+ (void)openMP;
+
+ /* Initialize bucket arrays. */
+ for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; }
+ for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; }
+
+ /* Count the number of occurrences of the first one or two characters of each
+ type A, B and B* suffix. Moreover, store the beginning position of all
+ type B* suffixes into the array SA. */
+ for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) {
+ /* type A suffix. */
+ do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1));
+ if(0 <= i) {
+ /* type B* suffix. */
+ ++BUCKET_BSTAR(c0, c1);
+ SA[--m] = i;
+ /* type B suffix. */
+ for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) {
+ ++BUCKET_B(c0, c1);
+ }
+ }
+ }
+ m = n - m;
+/*
+note:
+ A type B* suffix is lexicographically smaller than a type B suffix that
+ begins with the same first two characters.
+*/
+
+ /* Calculate the index of start/end point of each bucket. */
+ for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) {
+ t = i + BUCKET_A(c0);
+ BUCKET_A(c0) = i + j; /* start point */
+ i = t + BUCKET_B(c0, c0);
+ for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) {
+ j += BUCKET_BSTAR(c0, c1);
+ BUCKET_BSTAR(c0, c1) = j; /* end point */
+ i += BUCKET_B(c0, c1);
+ }
+ }
+
+ if(0 < m) {
+ /* Sort the type B* suffixes by their first two characters. */
+ PAb = SA + n - m; ISAb = SA + m;
+ for(i = m - 2; 0 <= i; --i) {
+ t = PAb[i], c0 = T[t], c1 = T[t + 1];
+ SA[--BUCKET_BSTAR(c0, c1)] = i;
+ }
+ t = PAb[m - 1], c0 = T[t], c1 = T[t + 1];
+ SA[--BUCKET_BSTAR(c0, c1)] = m - 1;
+
+ /* Sort the type B* substrings using sssort. */
+#ifdef LIBBSC_OPENMP
+ if (openMP)
+ {
+ buf = SA + m;
+ c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m;
+#pragma omp parallel default(shared) private(bufsize, curbuf, k, l, d0, d1)
+ {
+ bufsize = (n - (2 * m)) / omp_get_num_threads();
+ curbuf = buf + omp_get_thread_num() * bufsize;
+ k = 0;
+ for(;;) {
+ #pragma omp critical(sssort_lock)
+ {
+ if(0 < (l = j)) {
+ d0 = c0, d1 = c1;
+ do {
+ k = BUCKET_BSTAR(d0, d1);
+ if(--d1 <= d0) {
+ d1 = ALPHABET_SIZE - 1;
+ if(--d0 < 0) { break; }
+ }
+ } while(((l - k) <= 1) && (0 < (l = k)));
+ c0 = d0, c1 = d1, j = k;
+ }
+ }
+ if(l == 0) { break; }
+ sssort(T, PAb, SA + k, SA + l,
+ curbuf, bufsize, 2, n, *(SA + k) == (m - 1));
+ }
+ }
+ }
+ else
+ {
+ buf = SA + m, bufsize = n - (2 * m);
+ for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
+ for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
+ i = BUCKET_BSTAR(c0, c1);
+ if(1 < (j - i)) {
+ sssort(T, PAb, SA + i, SA + j,
+ buf, bufsize, 2, n, *(SA + i) == (m - 1));
+ }
+ }
+ }
+ }
+#else
+ buf = SA + m, bufsize = n - (2 * m);
+ for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
+ for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
+ i = BUCKET_BSTAR(c0, c1);
+ if(1 < (j - i)) {
+ sssort(T, PAb, SA + i, SA + j,
+ buf, bufsize, 2, n, *(SA + i) == (m - 1));
+ }
+ }
+ }
+#endif
+
+ /* Compute ranks of type B* substrings. */
+ for(i = m - 1; 0 <= i; --i) {
+ if(0 <= SA[i]) {
+ j = i;
+ do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i]));
+ SA[i + 1] = i - j;
+ if(i <= 0) { break; }
+ }
+ j = i;
+ do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0);
+ ISAb[SA[i]] = j;
+ }
+
+ /* Construct the inverse suffix array of type B* suffixes using trsort. */
+ trsort(ISAb, SA, m, 1);
+
+ /* Set the sorted order of tyoe B* suffixes. */
+ for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) {
+ for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { }
+ if(0 <= i) {
+ t = i;
+ for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { }
+ SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t;
+ }
+ }
+
+ /* Calculate the index of start/end point of each bucket. */
+ BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */
+ for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) {
+ i = BUCKET_A(c0 + 1) - 1;
+ for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) {
+ t = i - BUCKET_B(c0, c1);
+ BUCKET_B(c0, c1) = i; /* end point */
+
+ /* Move all type B* suffixes to the correct position. */
+ for(i = t, j = BUCKET_BSTAR(c0, c1);
+ j <= k;
+ --i, --k) { SA[i] = SA[k]; }
+ }
+ BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */
+ BUCKET_B(c0, c0) = i; /* end point */
+ }
+ }
+
+ return m;
+}
+
+/* Constructs the suffix array by using the sorted order of type B* suffixes. */
+static
+void
+construct_SA(const unsigned char *T, int *SA,
+ int *bucket_A, int *bucket_B,
+ int n, int m) {
+ int *i, *j, *k;
+ int s;
+ int c0, c1, c2;
+
+ if(0 < m) {
+ /* Construct the sorted order of type B suffixes by using
+ the sorted order of type B* suffixes. */
+ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
+ /* Scan the suffix array from right to left. */
+ for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
+ j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
+ i <= j;
+ --j) {
+ if(0 < (s = *j)) {
+ assert(T[s] == c1);
+ assert(((s + 1) < n) && (T[s] <= T[s + 1]));
+ assert(T[s - 1] <= T[s]);
+ *j = ~s;
+ c0 = T[--s];
+ if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
+ if(c0 != c2) {
+ if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
+ k = SA + BUCKET_B(c2 = c0, c1);
+ }
+ assert(k < j); assert(k != NULL);
+ *k-- = s;
+ } else {
+ assert(((s == 0) && (T[s] == c1)) || (s < 0));
+ *j = ~s;
+ }
+ }
+ }
+ }
+
+ /* Construct the suffix array by using
+ the sorted order of type B suffixes. */
+ k = SA + BUCKET_A(c2 = T[n - 1]);
+ *k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1);
+ /* Scan the suffix array from left to right. */
+ for(i = SA, j = SA + n; i < j; ++i) {
+ if(0 < (s = *i)) {
+ assert(T[s - 1] >= T[s]);
+ c0 = T[--s];
+ if((s == 0) || (T[s - 1] < c0)) { s = ~s; }
+ if(c0 != c2) {
+ BUCKET_A(c2) = k - SA;
+ k = SA + BUCKET_A(c2 = c0);
+ }
+ assert(i < k);
+ *k++ = s;
+ } else {
+ assert(s < 0);
+ *i = ~s;
+ }
+ }
+}
+
+/* Constructs the burrows-wheeler transformed string directly
+ by using the sorted order of type B* suffixes. */
+static
+int
+construct_BWT(const unsigned char *T, int *SA,
+ int *bucket_A, int *bucket_B,
+ int n, int m) {
+ int *i, *j, *k, *orig;
+ int s;
+ int c0, c1, c2;
+
+ if(0 < m) {
+ /* Construct the sorted order of type B suffixes by using
+ the sorted order of type B* suffixes. */
+ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
+ /* Scan the suffix array from right to left. */
+ for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
+ j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
+ i <= j;
+ --j) {
+ if(0 < (s = *j)) {
+ assert(T[s] == c1);
+ assert(((s + 1) < n) && (T[s] <= T[s + 1]));
+ assert(T[s - 1] <= T[s]);
+ c0 = T[--s];
+ *j = ~((int)c0);
+ if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
+ if(c0 != c2) {
+ if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
+ k = SA + BUCKET_B(c2 = c0, c1);
+ }
+ assert(k < j); assert(k != NULL);
+ *k-- = s;
+ } else if(s != 0) {
+ *j = ~s;
+#ifndef NDEBUG
+ } else {
+ assert(T[s] == c1);
+#endif
+ }
+ }
+ }
+ }
+
+ /* Construct the BWTed string by using
+ the sorted order of type B suffixes. */
+ k = SA + BUCKET_A(c2 = T[n - 1]);
+ *k++ = (T[n - 2] < c2) ? ~((int)T[n - 2]) : (n - 1);
+ /* Scan the suffix array from left to right. */
+ for(i = SA, j = SA + n, orig = SA; i < j; ++i) {
+ if(0 < (s = *i)) {
+ assert(T[s - 1] >= T[s]);
+ c0 = T[--s];
+ *i = c0;
+ if((0 < s) && (T[s - 1] < c0)) { s = ~((int)T[s - 1]); }
+ if(c0 != c2) {
+ BUCKET_A(c2) = k - SA;
+ k = SA + BUCKET_A(c2 = c0);
+ }
+ assert(i < k);
+ *k++ = s;
+ } else if(s != 0) {
+ *i = ~s;
+ } else {
+ orig = i;
+ }
+ }
+
+ return orig - SA;
+}
+
+/* Constructs the burrows-wheeler transformed string directly
+ by using the sorted order of type B* suffixes. */
+static
+int
+construct_BWT_indexes(const unsigned char *T, int *SA,
+ int *bucket_A, int *bucket_B,
+ int n, int m,
+ unsigned char * num_indexes, int * indexes) {
+ int *i, *j, *k, *orig;
+ int s;
+ int c0, c1, c2;
+
+ int mod = n / 8;
+ {
+ mod |= mod >> 1; mod |= mod >> 2;
+ mod |= mod >> 4; mod |= mod >> 8;
+ mod |= mod >> 16; mod >>= 1;
+
+ *num_indexes = (unsigned char)((n - 1) / (mod + 1));
+ }
+
+ if(0 < m) {
+ /* Construct the sorted order of type B suffixes by using
+ the sorted order of type B* suffixes. */
+ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
+ /* Scan the suffix array from right to left. */
+ for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
+ j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
+ i <= j;
+ --j) {
+ if(0 < (s = *j)) {
+ assert(T[s] == c1);
+ assert(((s + 1) < n) && (T[s] <= T[s + 1]));
+ assert(T[s - 1] <= T[s]);
+
+ if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = j - SA;
+
+ c0 = T[--s];
+ *j = ~((int)c0);
+ if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
+ if(c0 != c2) {
+ if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
+ k = SA + BUCKET_B(c2 = c0, c1);
+ }
+ assert(k < j); assert(k != NULL);
+ *k-- = s;
+ } else if(s != 0) {
+ *j = ~s;
+#ifndef NDEBUG
+ } else {
+ assert(T[s] == c1);
+#endif
+ }
+ }
+ }
+ }
+
+ /* Construct the BWTed string by using
+ the sorted order of type B suffixes. */
+ k = SA + BUCKET_A(c2 = T[n - 1]);
+ if (T[n - 2] < c2) {
+ if (((n - 1) & mod) == 0) indexes[(n - 1) / (mod + 1) - 1] = k - SA;
+ *k++ = ~((int)T[n - 2]);
+ }
+ else {
+ *k++ = n - 1;
+ }
+
+ /* Scan the suffix array from left to right. */
+ for(i = SA, j = SA + n, orig = SA; i < j; ++i) {
+ if(0 < (s = *i)) {
+ assert(T[s - 1] >= T[s]);
+
+ if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = i - SA;
+
+ c0 = T[--s];
+ *i = c0;
+ if(c0 != c2) {
+ BUCKET_A(c2) = k - SA;
+ k = SA + BUCKET_A(c2 = c0);
+ }
+ assert(i < k);
+ if((0 < s) && (T[s - 1] < c0)) {
+ if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = k - SA;
+ *k++ = ~((int)T[s - 1]);
+ } else
+ *k++ = s;
+ } else if(s != 0) {
+ *i = ~s;
+ } else {
+ orig = i;
+ }
+ }
+
+ return orig - SA;
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/*- Function -*/
+
+int
+divsufsort(const unsigned char *T, int *SA, int n, int openMP) {
+ int *bucket_A, *bucket_B;
+ int m;
+ int err = 0;
+
+ /* Check arguments. */
+ if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; }
+ else if(n == 0) { return 0; }
+ else if(n == 1) { SA[0] = 0; return 0; }
+ else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; }
+
+ bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int));
+ bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int));
+
+ /* Suffixsort. */
+ if((bucket_A != NULL) && (bucket_B != NULL)) {
+ m = sort_typeBstar(T, SA, bucket_A, bucket_B, n, openMP);
+ construct_SA(T, SA, bucket_A, bucket_B, n, m);
+ } else {
+ err = -2;
+ }
+
+ free(bucket_B);
+ free(bucket_A);
+
+ return err;
+}
+
+int
+divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP) {
+ int *B;
+ int *bucket_A, *bucket_B;
+ int m, pidx, i;
+
+ /* Check arguments. */
+ if((T == NULL) || (U == NULL) || (n < 0)) { return -1; }
+ else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; }
+
+ if((B = A) == NULL) { B = (int *)malloc((size_t)(n + 1) * sizeof(int)); }
+ bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int));
+ bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int));
+
+ /* Burrows-Wheeler Transform. */
+ if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) {
+ m = sort_typeBstar(T, B, bucket_A, bucket_B, n, openMP);
+
+ if (num_indexes == NULL || indexes == NULL) {
+ pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m);
+ } else {
+ pidx = construct_BWT_indexes(T, B, bucket_A, bucket_B, n, m, num_indexes, indexes);
+ }
+
+ /* Copy to output string. */
+ U[0] = T[n - 1];
+ for(i = 0; i < pidx; ++i) { U[i + 1] = (unsigned char)B[i]; }
+ for(i += 1; i < n; ++i) { U[i] = (unsigned char)B[i]; }
+ pidx += 1;
+ } else {
+ pidx = -2;
+ }
+
+ free(bucket_B);
+ free(bucket_A);
+ if(A == NULL) { free(B); }
+
+ return pidx;
+}
diff --git a/vendor/github.com/DataDog/zstd/divsufsort.h b/vendor/github.com/DataDog/zstd/divsufsort.h
new file mode 100644
index 000000000..5440994af
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/divsufsort.h
@@ -0,0 +1,67 @@
+/*
+ * divsufsort.h for libdivsufsort-lite
+ * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DIVSUFSORT_H
+#define _DIVSUFSORT_H 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+/*- Prototypes -*/
+
+/**
+ * Constructs the suffix array of a given string.
+ * @param T [0..n-1] The input string.
+ * @param SA [0..n-1] The output array of suffixes.
+ * @param n The length of the given string.
+ * @param openMP enables OpenMP optimization.
+ * @return 0 if no error occurred, -1 or -2 otherwise.
+ */
+int
+divsufsort(const unsigned char *T, int *SA, int n, int openMP);
+
+/**
+ * Constructs the burrows-wheeler transformed string of a given string.
+ * @param T [0..n-1] The input string.
+ * @param U [0..n-1] The output string. (can be T)
+ * @param A [0..n-1] The temporary array. (can be NULL)
+ * @param n The length of the given string.
+ * @param num_indexes The length of secondary indexes array. (can be NULL)
+ * @param indexes The secondary indexes array. (can be NULL)
+ * @param openMP enables OpenMP optimization.
+ * @return The primary index if no error occurred, -1 or -2 otherwise.
+ */
+int
+divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP);
+
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif /* __cplusplus */
+
+#endif /* _DIVSUFSORT_H */
diff --git a/vendor/github.com/DataDog/zstd/entropy_common.c b/vendor/github.com/DataDog/zstd/entropy_common.c
new file mode 100644
index 000000000..b12944e1d
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/entropy_common.c
@@ -0,0 +1,236 @@
+/*
+ Common functions of New Generation Entropy library
+ Copyright (C) 2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+*************************************************************************** */
+
+/* *************************************
+* Dependencies
+***************************************/
+#include "mem.h"
+#include "error_private.h" /* ERR_*, ERROR */
+#define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */
+#include "huf.h"
+
+
+/*=== Version ===*/
+unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
+
+
+/*=== Error Management ===*/
+unsigned FSE_isError(size_t code) { return ERR_isError(code); }
+const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+unsigned HUF_isError(size_t code) { return ERR_isError(code); }
+const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/*-**************************************************************
+* FSE NCount encoding-decoding
+****************************************************************/
+size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ const BYTE* const istart = (const BYTE*) headerBuffer;
+ const BYTE* const iend = istart + hbSize;
+ const BYTE* ip = istart;
+ int nbBits;
+ int remaining;
+ int threshold;
+ U32 bitStream;
+ int bitCount;
+ unsigned charnum = 0;
+ int previous0 = 0;
+
+ if (hbSize < 4) {
+ /* This function only works when hbSize >= 4 */
+ char buffer[4];
+ memset(buffer, 0, sizeof(buffer));
+ memcpy(buffer, headerBuffer, hbSize);
+ { size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
+ buffer, sizeof(buffer));
+ if (FSE_isError(countSize)) return countSize;
+ if (countSize > hbSize) return ERROR(corruption_detected);
+ return countSize;
+ } }
+ assert(hbSize >= 4);
+
+ /* init */
+ memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */
+ bitStream = MEM_readLE32(ip);
+ nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
+ if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
+ bitStream >>= 4;
+ bitCount = 4;
+ *tableLogPtr = nbBits;
+ remaining = (1<<nbBits)+1;
+ threshold = 1<<nbBits;
+ nbBits++;
+
+ while ((remaining>1) & (charnum<=*maxSVPtr)) {
+ if (previous0) {
+ unsigned n0 = charnum;
+ while ((bitStream & 0xFFFF) == 0xFFFF) {
+ n0 += 24;
+ if (ip < iend-5) {
+ ip += 2;
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ } else {
+ bitStream >>= 16;
+ bitCount += 16;
+ } }
+ while ((bitStream & 3) == 3) {
+ n0 += 3;
+ bitStream >>= 2;
+ bitCount += 2;
+ }
+ n0 += bitStream & 3;
+ bitCount += 2;
+ if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
+ while (charnum < n0) normalizedCounter[charnum++] = 0;
+ if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+ assert((bitCount >> 3) <= 3); /* For first condition to work */
+ ip += bitCount>>3;
+ bitCount &= 7;
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ } else {
+ bitStream >>= 2;
+ } }
+ { int const max = (2*threshold-1) - remaining;
+ int count;
+
+ if ((bitStream & (threshold-1)) < (U32)max) {
+ count = bitStream & (threshold-1);
+ bitCount += nbBits-1;
+ } else {
+ count = bitStream & (2*threshold-1);
+ if (count >= threshold) count -= max;
+ bitCount += nbBits;
+ }
+
+ count--; /* extra accuracy */
+ remaining -= count < 0 ? -count : count; /* -1 means +1 */
+ normalizedCounter[charnum++] = (short)count;
+ previous0 = !count;
+ while (remaining < threshold) {
+ nbBits--;
+ threshold >>= 1;
+ }
+
+ if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+ ip += bitCount>>3;
+ bitCount &= 7;
+ } else {
+ bitCount -= (int)(8 * (iend - 4 - ip));
+ ip = iend - 4;
+ }
+ bitStream = MEM_readLE32(ip) >> (bitCount & 31);
+ } } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */
+ if (remaining != 1) return ERROR(corruption_detected);
+ if (bitCount > 32) return ERROR(corruption_detected);
+ *maxSVPtr = charnum-1;
+
+ ip += (bitCount+7)>>3;
+ return ip-istart;
+}
+
+
+/*! HUF_readStats() :
+ Read compact Huffman tree, saved by HUF_writeCTable().
+ `huffWeight` is destination buffer.
+ `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
+ @return : size read from `src` , or an error Code .
+ Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
+*/
+size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize)
+{
+ U32 weightTotal;
+ const BYTE* ip = (const BYTE*) src;
+ size_t iSize;
+ size_t oSize;
+
+ if (!srcSize) return ERROR(srcSize_wrong);
+ iSize = ip[0];
+ /* memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */
+
+ if (iSize >= 128) { /* special header */
+ oSize = iSize - 127;
+ iSize = ((oSize+1)/2);
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ if (oSize >= hwSize) return ERROR(corruption_detected);
+ ip += 1;
+ { U32 n;
+ for (n=0; n<oSize; n+=2) {
+ huffWeight[n] = ip[n/2] >> 4;
+ huffWeight[n+1] = ip[n/2] & 15;
+ } } }
+ else { /* header compressed with FSE (normal case) */
+ FSE_DTable fseWorkspace[FSE_DTABLE_SIZE_U32(6)]; /* 6 is max possible tableLog for HUF header (maybe even 5, to be tested) */
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ oSize = FSE_decompress_wksp(huffWeight, hwSize-1, ip+1, iSize, fseWorkspace, 6); /* max (hwSize-1) values decoded, as last one is implied */
+ if (FSE_isError(oSize)) return oSize;
+ }
+
+ /* collect weight stats */
+ memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
+ weightTotal = 0;
+ { U32 n; for (n=0; n<oSize; n++) {
+ if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected);
+ rankStats[huffWeight[n]]++;
+ weightTotal += (1 << huffWeight[n]) >> 1;
+ } }
+ if (weightTotal == 0) return ERROR(corruption_detected);
+
+ /* get last non-null symbol weight (implied, total must be 2^n) */
+ { U32 const tableLog = BIT_highbit32(weightTotal) + 1;
+ if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
+ *tableLogPtr = tableLog;
+ /* determine last weight */
+ { U32 const total = 1 << tableLog;
+ U32 const rest = total - weightTotal;
+ U32 const verif = 1 << BIT_highbit32(rest);
+ U32 const lastWeight = BIT_highbit32(rest) + 1;
+ if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
+ huffWeight[oSize] = (BYTE)lastWeight;
+ rankStats[lastWeight]++;
+ } }
+
+ /* check tree construction validity */
+ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
+
+ /* results */
+ *nbSymbolsPtr = (U32)(oSize+1);
+ return iSize+1;
+}
diff --git a/vendor/github.com/DataDog/zstd/error_private.c b/vendor/github.com/DataDog/zstd/error_private.c
new file mode 100644
index 000000000..7c1bb67a2
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/error_private.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* The purpose of this file is to have a single list of error strings embedded in binary */
+
+#include "error_private.h"
+
+const char* ERR_getErrorString(ERR_enum code)
+{
+#ifdef ZSTD_STRIP_ERROR_STRINGS
+ (void)code;
+ return "Error strings stripped";
+#else
+ static const char* const notErrorCode = "Unspecified error code";
+ switch( code )
+ {
+ case PREFIX(no_error): return "No error detected";
+ case PREFIX(GENERIC): return "Error (generic)";
+ case PREFIX(prefix_unknown): return "Unknown frame descriptor";
+ case PREFIX(version_unsupported): return "Version not supported";
+ case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
+ case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
+ case PREFIX(corruption_detected): return "Corrupted block detected";
+ case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
+ case PREFIX(parameter_unsupported): return "Unsupported parameter";
+ case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
+ case PREFIX(init_missing): return "Context should be init first";
+ case PREFIX(memory_allocation): return "Allocation error : not enough memory";
+ case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough";
+ case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
+ case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
+ case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
+ case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
+ case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
+ case PREFIX(dictionary_wrong): return "Dictionary mismatch";
+ case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
+ case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
+ case PREFIX(srcSize_wrong): return "Src size is incorrect";
+ case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer";
+ /* following error codes are not stable and may be removed or changed in a future version */
+ case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
+ case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
+ case PREFIX(maxCode):
+ default: return notErrorCode;
+ }
+#endif
+}
diff --git a/vendor/github.com/DataDog/zstd/error_private.h b/vendor/github.com/DataDog/zstd/error_private.h
new file mode 100644
index 000000000..0d2fa7e34
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/error_private.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* Note : this module is expected to remain private, do not expose it */
+
+#ifndef ERROR_H_MODULE
+#define ERROR_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* ****************************************
+* Dependencies
+******************************************/
+#include <stddef.h> /* size_t */
+#include "zstd_errors.h" /* enum list */
+
+
+/* ****************************************
+* Compiler-specific
+******************************************/
+#if defined(__GNUC__)
+# define ERR_STATIC static __attribute__((unused))
+#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# define ERR_STATIC static inline
+#elif defined(_MSC_VER)
+# define ERR_STATIC static __inline
+#else
+# define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+
+/*-****************************************
+* Customization (error_public.h)
+******************************************/
+typedef ZSTD_ErrorCode ERR_enum;
+#define PREFIX(name) ZSTD_error_##name
+
+
+/*-****************************************
+* Error codes handling
+******************************************/
+#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */
+#define ERROR(name) ZSTD_ERROR(name)
+#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))
+
+ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
+
+ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
+
+
+/*-****************************************
+* Error Strings
+******************************************/
+
+const char* ERR_getErrorString(ERR_enum code); /* error_private.c */
+
+ERR_STATIC const char* ERR_getErrorName(size_t code)
+{
+ return ERR_getErrorString(ERR_getErrorCode(code));
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ERROR_H_MODULE */
diff --git a/vendor/github.com/DataDog/zstd/errors.go b/vendor/github.com/DataDog/zstd/errors.go
new file mode 100644
index 000000000..38db0d51c
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/errors.go
@@ -0,0 +1,35 @@
+package zstd
+
+/*
+#define ZSTD_STATIC_LINKING_ONLY
+#include "zstd.h"
+*/
+import "C"
+
+// ErrorCode is an error returned by the zstd library.
+type ErrorCode int
+
+// Error returns the error string given by zstd
+func (e ErrorCode) Error() string {
+ return C.GoString(C.ZSTD_getErrorName(C.size_t(e)))
+}
+
+func cIsError(code int) bool {
+ return int(C.ZSTD_isError(C.size_t(code))) != 0
+}
+
+// getError returns an error for the return code, or nil if it's not an error
+func getError(code int) error {
+ if code < 0 && cIsError(code) {
+ return ErrorCode(code)
+ }
+ return nil
+}
+
+// IsDstSizeTooSmallError returns whether the error correspond to zstd standard sDstSizeTooSmall error
+func IsDstSizeTooSmallError(e error) bool {
+ if e != nil && e.Error() == "Destination buffer is too small" {
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/DataDog/zstd/fastcover.c b/vendor/github.com/DataDog/zstd/fastcover.c
new file mode 100644
index 000000000..5b6b941a9
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/fastcover.c
@@ -0,0 +1,740 @@
+/*-*************************************
+* Dependencies
+***************************************/
+#include <stdio.h> /* fprintf */
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memset */
+#include <time.h> /* clock */
+
+#include "mem.h" /* read */
+#include "pool.h"
+#include "threading.h"
+#include "cover.h"
+#include "zstd_internal.h" /* includes zstd.h */
+#ifndef ZDICT_STATIC_LINKING_ONLY
+#define ZDICT_STATIC_LINKING_ONLY
+#endif
+#include "zdict.h"
+
+
+/*-*************************************
+* Constants
+***************************************/
+#define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
+#define FASTCOVER_MAX_F 31
+#define FASTCOVER_MAX_ACCEL 10
+#define DEFAULT_SPLITPOINT 0.75
+#define DEFAULT_F 20
+#define DEFAULT_ACCEL 1
+
+
+/*-*************************************
+* Console display
+***************************************/
+static int g_displayLevel = 2;
+#define DISPLAY(...) \
+ { \
+ fprintf(stderr, __VA_ARGS__); \
+ fflush(stderr); \
+ }
+#define LOCALDISPLAYLEVEL(displayLevel, l, ...) \
+ if (displayLevel >= l) { \
+ DISPLAY(__VA_ARGS__); \
+ } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */
+#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)
+
+#define LOCALDISPLAYUPDATE(displayLevel, l, ...) \
+ if (displayLevel >= l) { \
+ if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) { \
+ g_time = clock(); \
+ DISPLAY(__VA_ARGS__); \
+ } \
+ }
+#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)
+static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;
+static clock_t g_time = 0;
+
+
+/*-*************************************
+* Hash Functions
+***************************************/
+static const U64 prime6bytes = 227718039650203ULL;
+static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; }
+static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
+
+static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
+static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
+static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
+
+
+/**
+ * Hash the d-byte value pointed to by p and mod 2^f
+ */
+static size_t FASTCOVER_hashPtrToIndex(const void* p, U32 h, unsigned d) {
+ if (d == 6) {
+ return ZSTD_hash6Ptr(p, h) & ((1 << h) - 1);
+ }
+ return ZSTD_hash8Ptr(p, h) & ((1 << h) - 1);
+}
+
+
+/*-*************************************
+* Acceleration
+***************************************/
+typedef struct {
+ unsigned finalize; /* Percentage of training samples used for ZDICT_finalizeDictionary */
+ unsigned skip; /* Number of dmer skipped between each dmer counted in computeFrequency */
+} FASTCOVER_accel_t;
+
+
+static const FASTCOVER_accel_t FASTCOVER_defaultAccelParameters[FASTCOVER_MAX_ACCEL+1] = {
+ { 100, 0 }, /* accel = 0, should not happen because accel = 0 defaults to accel = 1 */
+ { 100, 0 }, /* accel = 1 */
+ { 50, 1 }, /* accel = 2 */
+ { 34, 2 }, /* accel = 3 */
+ { 25, 3 }, /* accel = 4 */
+ { 20, 4 }, /* accel = 5 */
+ { 17, 5 }, /* accel = 6 */
+ { 14, 6 }, /* accel = 7 */
+ { 13, 7 }, /* accel = 8 */
+ { 11, 8 }, /* accel = 9 */
+ { 10, 9 }, /* accel = 10 */
+};
+
+
+/*-*************************************
+* Context
+***************************************/
+typedef struct {
+ const BYTE *samples;
+ size_t *offsets;
+ const size_t *samplesSizes;
+ size_t nbSamples;
+ size_t nbTrainSamples;
+ size_t nbTestSamples;
+ size_t nbDmers;
+ U32 *freqs;
+ unsigned d;
+ unsigned f;
+ FASTCOVER_accel_t accelParams;
+} FASTCOVER_ctx_t;
+
+
+/*-*************************************
+* Helper functions
+***************************************/
+/**
+ * Selects the best segment in an epoch.
+ * Segments of are scored according to the function:
+ *
+ * Let F(d) be the frequency of all dmers with hash value d.
+ * Let S_i be hash value of the dmer at position i of segment S which has length k.
+ *
+ * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
+ *
+ * Once the dmer with hash value d is in the dictionary we set F(d) = 0.
+ */
+static COVER_segment_t FASTCOVER_selectSegment(const FASTCOVER_ctx_t *ctx,
+ U32 *freqs, U32 begin, U32 end,
+ ZDICT_cover_params_t parameters,
+ U16* segmentFreqs) {
+ /* Constants */
+ const U32 k = parameters.k;
+ const U32 d = parameters.d;
+ const U32 f = ctx->f;
+ const U32 dmersInK = k - d + 1;
+
+ /* Try each segment (activeSegment) and save the best (bestSegment) */
+ COVER_segment_t bestSegment = {0, 0, 0};
+ COVER_segment_t activeSegment;
+
+ /* Reset the activeDmers in the segment */
+ /* The activeSegment starts at the beginning of the epoch. */
+ activeSegment.begin = begin;
+ activeSegment.end = begin;
+ activeSegment.score = 0;
+
+ /* Slide the activeSegment through the whole epoch.
+ * Save the best segment in bestSegment.
+ */
+ while (activeSegment.end < end) {
+ /* Get hash value of current dmer */
+ const size_t idx = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, f, d);
+
+ /* Add frequency of this index to score if this is the first occurrence of index in active segment */
+ if (segmentFreqs[idx] == 0) {
+ activeSegment.score += freqs[idx];
+ }
+ /* Increment end of segment and segmentFreqs*/
+ activeSegment.end += 1;
+ segmentFreqs[idx] += 1;
+ /* If the window is now too large, drop the first position */
+ if (activeSegment.end - activeSegment.begin == dmersInK + 1) {
+ /* Get hash value of the dmer to be eliminated from active segment */
+ const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d);
+ segmentFreqs[delIndex] -= 1;
+ /* Subtract frequency of this index from score if this is the last occurrence of this index in active segment */
+ if (segmentFreqs[delIndex] == 0) {
+ activeSegment.score -= freqs[delIndex];
+ }
+ /* Increment start of segment */
+ activeSegment.begin += 1;
+ }
+
+ /* If this segment is the best so far save it */
+ if (activeSegment.score > bestSegment.score) {
+ bestSegment = activeSegment;
+ }
+ }
+
+ /* Zero out rest of segmentFreqs array */
+ while (activeSegment.begin < end) {
+ const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d);
+ segmentFreqs[delIndex] -= 1;
+ activeSegment.begin += 1;
+ }
+
+ {
+ /* Zero the frequency of hash value of each dmer covered by the chosen segment. */
+ U32 pos;
+ for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
+ const size_t i = FASTCOVER_hashPtrToIndex(ctx->samples + pos, f, d);
+ freqs[i] = 0;
+ }
+ }
+
+ return bestSegment;
+}
+
+
+static int FASTCOVER_checkParameters(ZDICT_cover_params_t parameters,
+ size_t maxDictSize, unsigned f,
+ unsigned accel) {
+ /* k, d, and f are required parameters */
+ if (parameters.d == 0 || parameters.k == 0) {
+ return 0;
+ }
+ /* d has to be 6 or 8 */
+ if (parameters.d != 6 && parameters.d != 8) {
+ return 0;
+ }
+ /* k <= maxDictSize */
+ if (parameters.k > maxDictSize) {
+ return 0;
+ }
+ /* d <= k */
+ if (parameters.d > parameters.k) {
+ return 0;
+ }
+ /* 0 < f <= FASTCOVER_MAX_F*/
+ if (f > FASTCOVER_MAX_F || f == 0) {
+ return 0;
+ }
+ /* 0 < splitPoint <= 1 */
+ if (parameters.splitPoint <= 0 || parameters.splitPoint > 1) {
+ return 0;
+ }
+ /* 0 < accel <= 10 */
+ if (accel > 10 || accel == 0) {
+ return 0;
+ }
+ return 1;
+}
+
+
+/**
+ * Clean up a context initialized with `FASTCOVER_ctx_init()`.
+ */
+static void
+FASTCOVER_ctx_destroy(FASTCOVER_ctx_t* ctx)
+{
+ if (!ctx) return;
+
+ free(ctx->freqs);
+ ctx->freqs = NULL;
+
+ free(ctx->offsets);
+ ctx->offsets = NULL;
+}
+
+
+/**
+ * Calculate for frequency of hash value of each dmer in ctx->samples
+ */
+static void
+FASTCOVER_computeFrequency(U32* freqs, const FASTCOVER_ctx_t* ctx)
+{
+ const unsigned f = ctx->f;
+ const unsigned d = ctx->d;
+ const unsigned skip = ctx->accelParams.skip;
+ const unsigned readLength = MAX(d, 8);
+ size_t i;
+ assert(ctx->nbTrainSamples >= 5);
+ assert(ctx->nbTrainSamples <= ctx->nbSamples);
+ for (i = 0; i < ctx->nbTrainSamples; i++) {
+ size_t start = ctx->offsets[i]; /* start of current dmer */
+ size_t const currSampleEnd = ctx->offsets[i+1];
+ while (start + readLength <= currSampleEnd) {
+ const size_t dmerIndex = FASTCOVER_hashPtrToIndex(ctx->samples + start, f, d);
+ freqs[dmerIndex]++;
+ start = start + skip + 1;
+ }
+ }
+}
+
+
+/**
+ * Prepare a context for dictionary building.
+ * The context is only dependent on the parameter `d` and can used multiple
+ * times.
+ * Returns 1 on success or zero on error.
+ * The context must be destroyed with `FASTCOVER_ctx_destroy()`.
+ */
+static int
+FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx,
+ const void* samplesBuffer,
+ const size_t* samplesSizes, unsigned nbSamples,
+ unsigned d, double splitPoint, unsigned f,
+ FASTCOVER_accel_t accelParams)
+{
+ const BYTE* const samples = (const BYTE*)samplesBuffer;
+ const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
+ /* Split samples into testing and training sets */
+ const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples;
+ const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples;
+ const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;
+ const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize;
+
+ /* Checks */
+ if (totalSamplesSize < MAX(d, sizeof(U64)) ||
+ totalSamplesSize >= (size_t)FASTCOVER_MAX_SAMPLES_SIZE) {
+ DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
+ (unsigned)(totalSamplesSize >> 20), (FASTCOVER_MAX_SAMPLES_SIZE >> 20));
+ return 0;
+ }
+
+ /* Check if there are at least 5 training samples */
+ if (nbTrainSamples < 5) {
+ DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid\n", nbTrainSamples);
+ return 0;
+ }
+
+ /* Check if there's testing sample */
+ if (nbTestSamples < 1) {
+ DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.\n", nbTestSamples);
+ return 0;
+ }
+
+ /* Zero the context */
+ memset(ctx, 0, sizeof(*ctx));
+ DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples,
+ (unsigned)trainingSamplesSize);
+ DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples,
+ (unsigned)testSamplesSize);
+
+ ctx->samples = samples;
+ ctx->samplesSizes = samplesSizes;
+ ctx->nbSamples = nbSamples;
+ ctx->nbTrainSamples = nbTrainSamples;
+ ctx->nbTestSamples = nbTestSamples;
+ ctx->nbDmers = trainingSamplesSize - MAX(d, sizeof(U64)) + 1;
+ ctx->d = d;
+ ctx->f = f;
+ ctx->accelParams = accelParams;
+
+ /* The offsets of each file */
+ ctx->offsets = (size_t*)calloc((nbSamples + 1), sizeof(size_t));
+ if (ctx->offsets == NULL) {
+ DISPLAYLEVEL(1, "Failed to allocate scratch buffers \n");
+ FASTCOVER_ctx_destroy(ctx);
+ return 0;
+ }
+
+ /* Fill offsets from the samplesSizes */
+ { U32 i;
+ ctx->offsets[0] = 0;
+ assert(nbSamples >= 5);
+ for (i = 1; i <= nbSamples; ++i) {
+ ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
+ }
+ }
+
+ /* Initialize frequency array of size 2^f */
+ ctx->freqs = (U32*)calloc(((U64)1 << f), sizeof(U32));
+ if (ctx->freqs == NULL) {
+ DISPLAYLEVEL(1, "Failed to allocate frequency table \n");
+ FASTCOVER_ctx_destroy(ctx);
+ return 0;
+ }
+
+ DISPLAYLEVEL(2, "Computing frequencies\n");
+ FASTCOVER_computeFrequency(ctx->freqs, ctx);
+
+ return 1;
+}
+
+
+/**
+ * Given the prepared context build the dictionary.
+ */
+static size_t
+FASTCOVER_buildDictionary(const FASTCOVER_ctx_t* ctx,
+ U32* freqs,
+ void* dictBuffer, size_t dictBufferCapacity,
+ ZDICT_cover_params_t parameters,
+ U16* segmentFreqs)
+{
+ BYTE *const dict = (BYTE *)dictBuffer;
+ size_t tail = dictBufferCapacity;
+ /* Divide the data into epochs. We will select one segment from each epoch. */
+ const COVER_epoch_info_t epochs = COVER_computeEpochs(
+ (U32)dictBufferCapacity, (U32)ctx->nbDmers, parameters.k, 1);
+ const size_t maxZeroScoreRun = 10;
+ size_t zeroScoreRun = 0;
+ size_t epoch;
+ DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n",
+ (U32)epochs.num, (U32)epochs.size);
+ /* Loop through the epochs until there are no more segments or the dictionary
+ * is full.
+ */
+ for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) {
+ const U32 epochBegin = (U32)(epoch * epochs.size);
+ const U32 epochEnd = epochBegin + epochs.size;
+ size_t segmentSize;
+ /* Select a segment */
+ COVER_segment_t segment = FASTCOVER_selectSegment(
+ ctx, freqs, epochBegin, epochEnd, parameters, segmentFreqs);
+
+ /* If the segment covers no dmers, then we are out of content.
+ * There may be new content in other epochs, for continue for some time.
+ */
+ if (segment.score == 0) {
+ if (++zeroScoreRun >= maxZeroScoreRun) {
+ break;
+ }
+ continue;
+ }
+ zeroScoreRun = 0;
+
+ /* Trim the segment if necessary and if it is too small then we are done */
+ segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
+ if (segmentSize < parameters.d) {
+ break;
+ }
+
+ /* We fill the dictionary from the back to allow the best segments to be
+ * referenced with the smallest offsets.
+ */
+ tail -= segmentSize;
+ memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
+ DISPLAYUPDATE(
+ 2, "\r%u%% ",
+ (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
+ }
+ DISPLAYLEVEL(2, "\r%79s\r", "");
+ return tail;
+}
+
+
+/**
+ * Parameters for FASTCOVER_tryParameters().
+ */
+typedef struct FASTCOVER_tryParameters_data_s {
+ const FASTCOVER_ctx_t* ctx;
+ COVER_best_t* best;
+ size_t dictBufferCapacity;
+ ZDICT_cover_params_t parameters;
+} FASTCOVER_tryParameters_data_t;
+
+
+/**
+ * Tries a set of parameters and updates the COVER_best_t with the results.
+ * This function is thread safe if zstd is compiled with multithreaded support.
+ * It takes its parameters as an *OWNING* opaque pointer to support threading.
+ */
+static void FASTCOVER_tryParameters(void *opaque)
+{
+ /* Save parameters as local variables */
+ FASTCOVER_tryParameters_data_t *const data = (FASTCOVER_tryParameters_data_t *)opaque;
+ const FASTCOVER_ctx_t *const ctx = data->ctx;
+ const ZDICT_cover_params_t parameters = data->parameters;
+ size_t dictBufferCapacity = data->dictBufferCapacity;
+ size_t totalCompressedSize = ERROR(GENERIC);
+ /* Initialize array to keep track of frequency of dmer within activeSegment */
+ U16* segmentFreqs = (U16 *)calloc(((U64)1 << ctx->f), sizeof(U16));
+ /* Allocate space for hash table, dict, and freqs */
+ BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity);
+ U32 *freqs = (U32*) malloc(((U64)1 << ctx->f) * sizeof(U32));
+ if (!segmentFreqs || !dict || !freqs) {
+ DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n");
+ goto _cleanup;
+ }
+ /* Copy the frequencies because we need to modify them */
+ memcpy(freqs, ctx->freqs, ((U64)1 << ctx->f) * sizeof(U32));
+ /* Build the dictionary */
+ { const size_t tail = FASTCOVER_buildDictionary(ctx, freqs, dict, dictBufferCapacity,
+ parameters, segmentFreqs);
+ const unsigned nbFinalizeSamples = (unsigned)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100);
+ dictBufferCapacity = ZDICT_finalizeDictionary(
+ dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
+ ctx->samples, ctx->samplesSizes, nbFinalizeSamples, parameters.zParams);
+ if (ZDICT_isError(dictBufferCapacity)) {
+ DISPLAYLEVEL(1, "Failed to finalize dictionary\n");
+ goto _cleanup;
+ }
+ }
+ /* Check total compressed size */
+ totalCompressedSize = COVER_checkTotalCompressedSize(parameters, ctx->samplesSizes,
+ ctx->samples, ctx->offsets,
+ ctx->nbTrainSamples, ctx->nbSamples,
+ dict, dictBufferCapacity);
+_cleanup:
+ COVER_best_finish(data->best, totalCompressedSize, parameters, dict,
+ dictBufferCapacity);
+ free(data);
+ free(segmentFreqs);
+ free(dict);
+ free(freqs);
+}
+
+
+static void
+FASTCOVER_convertToCoverParams(ZDICT_fastCover_params_t fastCoverParams,
+ ZDICT_cover_params_t* coverParams)
+{
+ coverParams->k = fastCoverParams.k;
+ coverParams->d = fastCoverParams.d;
+ coverParams->steps = fastCoverParams.steps;
+ coverParams->nbThreads = fastCoverParams.nbThreads;
+ coverParams->splitPoint = fastCoverParams.splitPoint;
+ coverParams->zParams = fastCoverParams.zParams;
+}
+
+
+static void
+FASTCOVER_convertToFastCoverParams(ZDICT_cover_params_t coverParams,
+ ZDICT_fastCover_params_t* fastCoverParams,
+ unsigned f, unsigned accel)
+{
+ fastCoverParams->k = coverParams.k;
+ fastCoverParams->d = coverParams.d;
+ fastCoverParams->steps = coverParams.steps;
+ fastCoverParams->nbThreads = coverParams.nbThreads;
+ fastCoverParams->splitPoint = coverParams.splitPoint;
+ fastCoverParams->f = f;
+ fastCoverParams->accel = accel;
+ fastCoverParams->zParams = coverParams.zParams;
+}
+
+
+ZDICTLIB_API size_t
+ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity,
+ const void* samplesBuffer,
+ const size_t* samplesSizes, unsigned nbSamples,
+ ZDICT_fastCover_params_t parameters)
+{
+ BYTE* const dict = (BYTE*)dictBuffer;
+ FASTCOVER_ctx_t ctx;
+ ZDICT_cover_params_t coverParams;
+ FASTCOVER_accel_t accelParams;
+ /* Initialize global data */
+ g_displayLevel = parameters.zParams.notificationLevel;
+ /* Assign splitPoint and f if not provided */
+ parameters.splitPoint = 1.0;
+ parameters.f = parameters.f == 0 ? DEFAULT_F : parameters.f;
+ parameters.accel = parameters.accel == 0 ? DEFAULT_ACCEL : parameters.accel;
+ /* Convert to cover parameter */
+ memset(&coverParams, 0 , sizeof(coverParams));
+ FASTCOVER_convertToCoverParams(parameters, &coverParams);
+ /* Checks */
+ if (!FASTCOVER_checkParameters(coverParams, dictBufferCapacity, parameters.f,
+ parameters.accel)) {
+ DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n");
+ return ERROR(GENERIC);
+ }
+ if (nbSamples == 0) {
+ DISPLAYLEVEL(1, "FASTCOVER must have at least one input file\n");
+ return ERROR(GENERIC);
+ }
+ if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
+ DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
+ ZDICT_DICTSIZE_MIN);
+ return ERROR(dstSize_tooSmall);
+ }
+ /* Assign corresponding FASTCOVER_accel_t to accelParams*/
+ accelParams = FASTCOVER_defaultAccelParameters[parameters.accel];
+ /* Initialize context */
+ if (!FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
+ coverParams.d, parameters.splitPoint, parameters.f,
+ accelParams)) {
+ DISPLAYLEVEL(1, "Failed to initialize context\n");
+ return ERROR(GENERIC);
+ }
+ COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel);
+ /* Build the dictionary */
+ DISPLAYLEVEL(2, "Building dictionary\n");
+ {
+ /* Initialize array to keep track of frequency of dmer within activeSegment */
+ U16* segmentFreqs = (U16 *)calloc(((U64)1 << parameters.f), sizeof(U16));
+ const size_t tail = FASTCOVER_buildDictionary(&ctx, ctx.freqs, dictBuffer,
+ dictBufferCapacity, coverParams, segmentFreqs);
+ const unsigned nbFinalizeSamples = (unsigned)(ctx.nbTrainSamples * ctx.accelParams.finalize / 100);
+ const size_t dictionarySize = ZDICT_finalizeDictionary(
+ dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
+ samplesBuffer, samplesSizes, nbFinalizeSamples, coverParams.zParams);
+ if (!ZSTD_isError(dictionarySize)) {
+ DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
+ (unsigned)dictionarySize);
+ }
+ FASTCOVER_ctx_destroy(&ctx);
+ free(segmentFreqs);
+ return dictionarySize;
+ }
+}
+
+
+ZDICTLIB_API size_t
+ZDICT_optimizeTrainFromBuffer_fastCover(
+ void* dictBuffer, size_t dictBufferCapacity,
+ const void* samplesBuffer,
+ const size_t* samplesSizes, unsigned nbSamples,
+ ZDICT_fastCover_params_t* parameters)
+{
+ ZDICT_cover_params_t coverParams;
+ FASTCOVER_accel_t accelParams;
+ /* constants */
+ const unsigned nbThreads = parameters->nbThreads;
+ const double splitPoint =
+ parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint;
+ const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
+ const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
+ const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
+ const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k;
+ const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps;
+ const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
+ const unsigned kIterations =
+ (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
+ const unsigned f = parameters->f == 0 ? DEFAULT_F : parameters->f;
+ const unsigned accel = parameters->accel == 0 ? DEFAULT_ACCEL : parameters->accel;
+ /* Local variables */
+ const int displayLevel = parameters->zParams.notificationLevel;
+ unsigned iteration = 1;
+ unsigned d;
+ unsigned k;
+ COVER_best_t best;
+ POOL_ctx *pool = NULL;
+ int warned = 0;
+ /* Checks */
+ if (splitPoint <= 0 || splitPoint > 1) {
+ LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect splitPoint\n");
+ return ERROR(GENERIC);
+ }
+ if (accel == 0 || accel > FASTCOVER_MAX_ACCEL) {
+ LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect accel\n");
+ return ERROR(GENERIC);
+ }
+ if (kMinK < kMaxD || kMaxK < kMinK) {
+ LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect k\n");
+ return ERROR(GENERIC);
+ }
+ if (nbSamples == 0) {
+ LOCALDISPLAYLEVEL(displayLevel, 1, "FASTCOVER must have at least one input file\n");
+ return ERROR(GENERIC);
+ }
+ if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
+ LOCALDISPLAYLEVEL(displayLevel, 1, "dictBufferCapacity must be at least %u\n",
+ ZDICT_DICTSIZE_MIN);
+ return ERROR(dstSize_tooSmall);
+ }
+ if (nbThreads > 1) {
+ pool = POOL_create(nbThreads, 1);
+ if (!pool) {
+ return ERROR(memory_allocation);
+ }
+ }
+ /* Initialization */
+ COVER_best_init(&best);
+ memset(&coverParams, 0 , sizeof(coverParams));
+ FASTCOVER_convertToCoverParams(*parameters, &coverParams);
+ accelParams = FASTCOVER_defaultAccelParameters[accel];
+ /* Turn down global display level to clean up display at level 2 and below */
+ g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;
+ /* Loop through d first because each new value needs a new context */
+ LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n",
+ kIterations);
+ for (d = kMinD; d <= kMaxD; d += 2) {
+ /* Initialize the context for this value of d */
+ FASTCOVER_ctx_t ctx;
+ LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
+ if (!FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams)) {
+ LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
+ COVER_best_destroy(&best);
+ POOL_free(pool);
+ return ERROR(GENERIC);
+ }
+ if (!warned) {
+ COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, displayLevel);
+ warned = 1;
+ }
+ /* Loop through k reusing the same context */
+ for (k = kMinK; k <= kMaxK; k += kStepSize) {
+ /* Prepare the arguments */
+ FASTCOVER_tryParameters_data_t *data = (FASTCOVER_tryParameters_data_t *)malloc(
+ sizeof(FASTCOVER_tryParameters_data_t));
+ LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k);
+ if (!data) {
+ LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n");
+ COVER_best_destroy(&best);
+ FASTCOVER_ctx_destroy(&ctx);
+ POOL_free(pool);
+ return ERROR(GENERIC);
+ }
+ data->ctx = &ctx;
+ data->best = &best;
+ data->dictBufferCapacity = dictBufferCapacity;
+ data->parameters = coverParams;
+ data->parameters.k = k;
+ data->parameters.d = d;
+ data->parameters.splitPoint = splitPoint;
+ data->parameters.steps = kSteps;
+ data->parameters.zParams.notificationLevel = g_displayLevel;
+ /* Check the parameters */
+ if (!FASTCOVER_checkParameters(data->parameters, dictBufferCapacity,
+ data->ctx->f, accel)) {
+ DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n");
+ free(data);
+ continue;
+ }
+ /* Call the function and pass ownership of data to it */
+ COVER_best_start(&best);
+ if (pool) {
+ POOL_add(pool, &FASTCOVER_tryParameters, data);
+ } else {
+ FASTCOVER_tryParameters(data);
+ }
+ /* Print status */
+ LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ",
+ (unsigned)((iteration * 100) / kIterations));
+ ++iteration;
+ }
+ COVER_best_wait(&best);
+ FASTCOVER_ctx_destroy(&ctx);
+ }
+ LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", "");
+ /* Fill the output buffer and parameters with output of the best parameters */
+ {
+ const size_t dictSize = best.dictSize;
+ if (ZSTD_isError(best.compressedSize)) {
+ const size_t compressedSize = best.compressedSize;
+ COVER_best_destroy(&best);
+ POOL_free(pool);
+ return compressedSize;
+ }
+ FASTCOVER_convertToFastCoverParams(best.parameters, parameters, f, accel);
+ memcpy(dictBuffer, best.dict, dictSize);
+ COVER_best_destroy(&best);
+ POOL_free(pool);
+ return dictSize;
+ }
+
+}
diff --git a/vendor/github.com/DataDog/zstd/fse.h b/vendor/github.com/DataDog/zstd/fse.h
new file mode 100644
index 000000000..811c670bd
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/fse.h
@@ -0,0 +1,708 @@
+/* ******************************************************************
+ FSE : Finite State Entropy codec
+ Public Prototypes declaration
+ Copyright (C) 2013-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#ifndef FSE_H
+#define FSE_H
+
+
+/*-*****************************************
+* Dependencies
+******************************************/
+#include <stddef.h> /* size_t, ptrdiff_t */
+
+
+/*-*****************************************
+* FSE_PUBLIC_API : control library symbols visibility
+******************************************/
+#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
+# define FSE_PUBLIC_API __attribute__ ((visibility ("default")))
+#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
+# define FSE_PUBLIC_API __declspec(dllexport)
+#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
+# define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+# define FSE_PUBLIC_API
+#endif
+
+/*------ Version ------*/
+#define FSE_VERSION_MAJOR 0
+#define FSE_VERSION_MINOR 9
+#define FSE_VERSION_RELEASE 0
+
+#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
+#define FSE_QUOTE(str) #str
+#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
+#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
+
+#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
+FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */
+
+
+/*-****************************************
+* FSE simple functions
+******************************************/
+/*! FSE_compress() :
+ Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.
+ 'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize).
+ @return : size of compressed data (<= dstCapacity).
+ Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
+ if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
+ if FSE_isError(return), compression failed (more details using FSE_getErrorName())
+*/
+FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+/*! FSE_decompress():
+ Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
+ into already allocated destination buffer 'dst', of size 'dstCapacity'.
+ @return : size of regenerated data (<= maxDstSize),
+ or an error code, which can be tested using FSE_isError() .
+
+ ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!!
+ Why ? : making this distinction requires a header.
+ Header management is intentionally delegated to the user layer, which can better manage special cases.
+*/
+FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity,
+ const void* cSrc, size_t cSrcSize);
+
+
+/*-*****************************************
+* Tool functions
+******************************************/
+FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */
+
+/* Error Management */
+FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
+FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */
+
+
+/*-*****************************************
+* FSE advanced functions
+******************************************/
+/*! FSE_compress2() :
+ Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog'
+ Both parameters can be defined as '0' to mean : use default value
+ @return : size of compressed data
+ Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!!
+ if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
+ if FSE_isError(return), it's an error code.
+*/
+FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
+
+
+/*-*****************************************
+* FSE detailed API
+******************************************/
+/*!
+FSE_compress() does the following:
+1. count symbol occurrence from source[] into table count[] (see hist.h)
+2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
+3. save normalized counters to memory buffer using writeNCount()
+4. build encoding table 'CTable' from normalized counters
+5. encode the data stream using encoding table 'CTable'
+
+FSE_decompress() does the following:
+1. read normalized counters with readNCount()
+2. build decoding table 'DTable' from normalized counters
+3. decode the data stream using decoding table 'DTable'
+
+The following API allows targeting specific sub-functions for advanced tasks.
+For example, it's possible to compress several blocks using the same 'CTable',
+or to save and provide normalized distribution using external method.
+*/
+
+/* *** COMPRESSION *** */
+
+/*! FSE_optimalTableLog():
+ dynamically downsize 'tableLog' when conditions are met.
+ It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
+ @return : recommended tableLog (necessarily <= 'maxTableLog') */
+FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
+
+/*! FSE_normalizeCount():
+ normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
+ 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
+ @return : tableLog,
+ or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog,
+ const unsigned* count, size_t srcSize, unsigned maxSymbolValue);
+
+/*! FSE_NCountWriteBound():
+ Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
+ Typically useful for allocation purpose. */
+FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_writeNCount():
+ Compactly save 'normalizedCounter' into 'buffer'.
+ @return : size of the compressed table,
+ or an errorCode, which can be tested using FSE_isError(). */
+FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize,
+ const short* normalizedCounter,
+ unsigned maxSymbolValue, unsigned tableLog);
+
+/*! Constructor and Destructor of FSE_CTable.
+ Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
+typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
+FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog);
+FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct);
+
+/*! FSE_buildCTable():
+ Builds `ct`, which must be already allocated, using FSE_createCTable().
+ @return : 0, or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_compress_usingCTable():
+ Compress `src` using `ct` into `dst` which must be already allocated.
+ @return : size of compressed data (<= `dstCapacity`),
+ or 0 if compressed data could not fit into `dst`,
+ or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
+
+/*!
+Tutorial :
+----------
+The first step is to count all symbols. FSE_count() does this job very fast.
+Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
+'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
+maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
+FSE_count() will return the number of occurrence of the most frequent symbol.
+This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
+
+The next step is to normalize the frequencies.
+FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
+It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
+You can use 'tableLog'==0 to mean "use default tableLog value".
+If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
+which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
+
+The result of FSE_normalizeCount() will be saved into a table,
+called 'normalizedCounter', which is a table of signed short.
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
+The return value is tableLog if everything proceeded as expected.
+It is 0 if there is a single symbol within distribution.
+If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
+
+'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
+'buffer' must be already allocated.
+For guaranteed success, buffer size must be at least FSE_headerBound().
+The result of the function is the number of bytes written into 'buffer'.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
+
+'normalizedCounter' can then be used to create the compression table 'CTable'.
+The space required by 'CTable' must be already allocated, using FSE_createCTable().
+You can then use FSE_buildCTable() to fill 'CTable'.
+If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
+
+'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
+Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
+The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
+If it returns '0', compressed data could not fit into 'dst'.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
+*/
+
+
+/* *** DECOMPRESSION *** */
+
+/*! FSE_readNCount():
+ Read compactly saved 'normalizedCounter' from 'rBuffer'.
+ @return : size read from 'rBuffer',
+ or an errorCode, which can be tested using FSE_isError().
+ maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
+FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter,
+ unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
+ const void* rBuffer, size_t rBuffSize);
+
+/*! Constructor and Destructor of FSE_DTable.
+ Note that its size depends on 'tableLog' */
+typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
+FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);
+FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt);
+
+/*! FSE_buildDTable():
+ Builds 'dt', which must be already allocated, using FSE_createDTable().
+ return : 0, or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_decompress_usingDTable():
+ Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
+ into `dst` which must be already allocated.
+ @return : size of regenerated data (necessarily <= `dstCapacity`),
+ or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
+
+/*!
+Tutorial :
+----------
+(Note : these functions only decompress FSE-compressed blocks.
+ If block is uncompressed, use memcpy() instead
+ If block is a single repeated byte, use memset() instead )
+
+The first step is to obtain the normalized frequencies of symbols.
+This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
+In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
+or size the table to handle worst case situations (typically 256).
+FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
+The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
+Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
+If there is an error, the function will return an error code, which can be tested using FSE_isError().
+
+The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
+This is performed by the function FSE_buildDTable().
+The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
+If there is an error, the function will return an error code, which can be tested using FSE_isError().
+
+`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
+`cSrcSize` must be strictly correct, otherwise decompression will fail.
+FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
+If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
+*/
+
+#endif /* FSE_H */
+
+#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
+#define FSE_H_FSE_STATIC_LINKING_ONLY
+
+/* *** Dependency *** */
+#include "bitstream.h"
+
+
+/* *****************************************
+* Static allocation
+*******************************************/
+/* FSE buffer bounds */
+#define FSE_NCOUNTBOUND 512
+#define FSE_BLOCKBOUND(size) (size + (size>>7))
+#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
+
+/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
+#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
+#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
+
+/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */
+#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue) (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))
+#define FSE_DTABLE_SIZE(maxTableLog) (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable))
+
+
+/* *****************************************
+ * FSE advanced API
+ ***************************************** */
+
+unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
+/**< same as FSE_optimalTableLog(), which used `minus==2` */
+
+/* FSE_compress_wksp() :
+ * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
+ * FSE_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
+ */
+#define FSE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
+size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
+
+size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
+/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
+
+size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
+/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
+
+/* FSE_buildCTable_wksp() :
+ * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
+ * `wkspSize` must be >= `(1<<tableLog)`.
+ */
+size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
+
+size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
+/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
+
+size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
+/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
+
+size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog);
+/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */
+
+typedef enum {
+ FSE_repeat_none, /**< Cannot use the previous table */
+ FSE_repeat_check, /**< Can use the previous table but it must be checked */
+ FSE_repeat_valid /**< Can use the previous table and it is assumed to be valid */
+ } FSE_repeat;
+
+/* *****************************************
+* FSE symbol compression API
+*******************************************/
+/*!
+ This API consists of small unitary functions, which highly benefit from being inlined.
+ Hence their body are included in next section.
+*/
+typedef struct {
+ ptrdiff_t value;
+ const void* stateTable;
+ const void* symbolTT;
+ unsigned stateLog;
+} FSE_CState_t;
+
+static void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct);
+
+static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol);
+
+static void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr);
+
+/**<
+These functions are inner components of FSE_compress_usingCTable().
+They allow the creation of custom streams, mixing multiple tables and bit sources.
+
+A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
+So the first symbol you will encode is the last you will decode, like a LIFO stack.
+
+You will need a few variables to track your CStream. They are :
+
+FSE_CTable ct; // Provided by FSE_buildCTable()
+BIT_CStream_t bitStream; // bitStream tracking structure
+FSE_CState_t state; // State tracking structure (can have several)
+
+
+The first thing to do is to init bitStream and state.
+ size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
+ FSE_initCState(&state, ct);
+
+Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
+You can then encode your input data, byte after byte.
+FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
+Remember decoding will be done in reverse direction.
+ FSE_encodeByte(&bitStream, &state, symbol);
+
+At any time, you can also add any bit sequence.
+Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
+ BIT_addBits(&bitStream, bitField, nbBits);
+
+The above methods don't commit data to memory, they just store it into local register, for speed.
+Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
+Writing data to memory is a manual operation, performed by the flushBits function.
+ BIT_flushBits(&bitStream);
+
+Your last FSE encoding operation shall be to flush your last state value(s).
+ FSE_flushState(&bitStream, &state);
+
+Finally, you must close the bitStream.
+The function returns the size of CStream in bytes.
+If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
+If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
+ size_t size = BIT_closeCStream(&bitStream);
+*/
+
+
+/* *****************************************
+* FSE symbol decompression API
+*******************************************/
+typedef struct {
+ size_t state;
+ const void* table; /* precise table may vary, depending on U16 */
+} FSE_DState_t;
+
+
+static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
+
+static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+
+static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
+
+/**<
+Let's now decompose FSE_decompress_usingDTable() into its unitary components.
+You will decode FSE-encoded symbols from the bitStream,
+and also any other bitFields you put in, **in reverse order**.
+
+You will need a few variables to track your bitStream. They are :
+
+BIT_DStream_t DStream; // Stream context
+FSE_DState_t DState; // State context. Multiple ones are possible
+FSE_DTable* DTablePtr; // Decoding table, provided by FSE_buildDTable()
+
+The first thing to do is to init the bitStream.
+ errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
+
+You should then retrieve your initial state(s)
+(in reverse flushing order if you have several ones) :
+ errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
+
+You can then decode your data, symbol after symbol.
+For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
+Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
+ unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
+
+You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
+Note : maximum allowed nbBits is 25, for 32-bits compatibility
+ size_t bitField = BIT_readBits(&DStream, nbBits);
+
+All above operations only read from local register (which size depends on size_t).
+Refueling the register from memory is manually performed by the reload method.
+ endSignal = FSE_reloadDStream(&DStream);
+
+BIT_reloadDStream() result tells if there is still some more data to read from DStream.
+BIT_DStream_unfinished : there is still some data left into the DStream.
+BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
+BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
+BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
+
+When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
+to properly detect the exact end of stream.
+After each decoded symbol, check if DStream is fully consumed using this simple test :
+ BIT_reloadDStream(&DStream) >= BIT_DStream_completed
+
+When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
+Checking if DStream has reached its end is performed by :
+ BIT_endOfDStream(&DStream);
+Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
+ FSE_endOfDState(&DState);
+*/
+
+
+/* *****************************************
+* FSE unsafe API
+*******************************************/
+static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
+
+
+/* *****************************************
+* Implementation of inlined functions
+*******************************************/
+typedef struct {
+ int deltaFindState;
+ U32 deltaNbBits;
+} FSE_symbolCompressionTransform; /* total 8 bytes */
+
+MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
+{
+ const void* ptr = ct;
+ const U16* u16ptr = (const U16*) ptr;
+ const U32 tableLog = MEM_read16(ptr);
+ statePtr->value = (ptrdiff_t)1<<tableLog;
+ statePtr->stateTable = u16ptr+2;
+ statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1);
+ statePtr->stateLog = tableLog;
+}
+
+
+/*! FSE_initCState2() :
+* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
+* uses the smallest state value possible, saving the cost of this symbol */
+MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol)
+{
+ FSE_initCState(statePtr, ct);
+ { const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
+ const U16* stateTable = (const U16*)(statePtr->stateTable);
+ U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16);
+ statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
+ statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
+ }
+}
+
+MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol)
+{
+ FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
+ const U16* const stateTable = (const U16*)(statePtr->stateTable);
+ U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
+ BIT_addBits(bitC, statePtr->value, nbBitsOut);
+ statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
+}
+
+MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
+{
+ BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
+ BIT_flushBits(bitC);
+}
+
+
+/* FSE_getMaxNbBits() :
+ * Approximate maximum cost of a symbol, in bits.
+ * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
+ * note 1 : assume symbolValue is valid (<= maxSymbolValue)
+ * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
+MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue)
+{
+ const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
+ return (symbolTT[symbolValue].deltaNbBits + ((1<<16)-1)) >> 16;
+}
+
+/* FSE_bitCost() :
+ * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
+ * note 1 : assume symbolValue is valid (<= maxSymbolValue)
+ * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
+MEM_STATIC U32 FSE_bitCost(const void* symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog)
+{
+ const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
+ U32 const minNbBits = symbolTT[symbolValue].deltaNbBits >> 16;
+ U32 const threshold = (minNbBits+1) << 16;
+ assert(tableLog < 16);
+ assert(accuracyLog < 31-tableLog); /* ensure enough room for renormalization double shift */
+ { U32 const tableSize = 1 << tableLog;
+ U32 const deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize);
+ U32 const normalizedDeltaFromThreshold = (deltaFromThreshold << accuracyLog) >> tableLog; /* linear interpolation (very approximate) */
+ U32 const bitMultiplier = 1 << accuracyLog;
+ assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold);
+ assert(normalizedDeltaFromThreshold <= bitMultiplier);
+ return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold;
+ }
+}
+
+
+/* ====== Decompression ====== */
+
+typedef struct {
+ U16 tableLog;
+ U16 fastMode;
+} FSE_DTableHeader; /* sizeof U32 */
+
+typedef struct
+{
+ unsigned short newState;
+ unsigned char symbol;
+ unsigned char nbBits;
+} FSE_decode_t; /* size == U32 */
+
+MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
+{
+ const void* ptr = dt;
+ const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
+ DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
+ BIT_reloadDStream(bitD);
+ DStatePtr->table = dt + 1;
+}
+
+MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr)
+{
+ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ return DInfo.symbol;
+}
+
+MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ size_t const lowBits = BIT_readBits(bitD, nbBits);
+ DStatePtr->state = DInfo.newState + lowBits;
+}
+
+MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ BYTE const symbol = DInfo.symbol;
+ size_t const lowBits = BIT_readBits(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+/*! FSE_decodeSymbolFast() :
+ unsafe, only works if no symbol has a probability > 50% */
+MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ BYTE const symbol = DInfo.symbol;
+ size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
+{
+ return DStatePtr->state == 0;
+}
+
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/* **************************************************************
+* Tuning parameters
+****************************************************************/
+/*!MEMORY_USAGE :
+* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+* Increasing memory usage improves compression ratio
+* Reduced memory usage can improve speed, due to cache effect
+* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#ifndef FSE_MAX_MEMORY_USAGE
+# define FSE_MAX_MEMORY_USAGE 14
+#endif
+#ifndef FSE_DEFAULT_MEMORY_USAGE
+# define FSE_DEFAULT_MEMORY_USAGE 13
+#endif
+
+/*!FSE_MAX_SYMBOL_VALUE :
+* Maximum symbol value authorized.
+* Required for proper stack allocation */
+#ifndef FSE_MAX_SYMBOL_VALUE
+# define FSE_MAX_SYMBOL_VALUE 255
+#endif
+
+/* **************************************************************
+* template functions type & suffix
+****************************************************************/
+#define FSE_FUNCTION_TYPE BYTE
+#define FSE_FUNCTION_EXTENSION
+#define FSE_DECODE_TYPE FSE_decode_t
+
+
+#endif /* !FSE_COMMONDEFS_ONLY */
+
+
+/* ***************************************************************
+* Constants
+*****************************************************************/
+#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
+#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
+#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
+#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
+#define FSE_MIN_TABLELOG 5
+
+#define FSE_TABLELOG_ABSOLUTE_MAX 15
+#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
+# error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+#define FSE_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3)
+
+
+#endif /* FSE_STATIC_LINKING_ONLY */
+
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/vendor/github.com/DataDog/zstd/fse_compress.c b/vendor/github.com/DataDog/zstd/fse_compress.c
new file mode 100644
index 000000000..68b47e109
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/fse_compress.c
@@ -0,0 +1,721 @@
+/* ******************************************************************
+ FSE : Finite State Entropy encoder
+ Copyright (C) 2013-present, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* **************************************************************
+* Includes
+****************************************************************/
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memcpy, memset */
+#include "compiler.h"
+#include "mem.h" /* U32, U16, etc. */
+#include "debug.h" /* assert, DEBUGLOG */
+#include "hist.h" /* HIST_count_wksp */
+#include "bitstream.h"
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#include "error_private.h"
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define FSE_isError ERR_isError
+
+
+/* **************************************************************
+* Templates
+****************************************************************/
+/*
+ designed to be included
+ for type-specific functions (template emulation in C)
+ Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+# error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+# error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+
+/* Function templates */
+
+/* FSE_buildCTable_wksp() :
+ * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
+ * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
+ * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
+ */
+size_t FSE_buildCTable_wksp(FSE_CTable* ct,
+ const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize)
+{
+ U32 const tableSize = 1 << tableLog;
+ U32 const tableMask = tableSize - 1;
+ void* const ptr = ct;
+ U16* const tableU16 = ( (U16*) ptr) + 2;
+ void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
+ FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
+ U32 const step = FSE_TABLESTEP(tableSize);
+ U32 cumul[FSE_MAX_SYMBOL_VALUE+2];
+
+ FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)workSpace;
+ U32 highThreshold = tableSize-1;
+
+ /* CTable header */
+ if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge);
+ tableU16[-2] = (U16) tableLog;
+ tableU16[-1] = (U16) maxSymbolValue;
+ assert(tableLog < 16); /* required for threshold strategy to work */
+
+ /* For explanations on how to distribute symbol values over the table :
+ * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
+
+ #ifdef __clang_analyzer__
+ memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */
+ #endif
+
+ /* symbol start positions */
+ { U32 u;
+ cumul[0] = 0;
+ for (u=1; u <= maxSymbolValue+1; u++) {
+ if (normalizedCounter[u-1]==-1) { /* Low proba symbol */
+ cumul[u] = cumul[u-1] + 1;
+ tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
+ } else {
+ cumul[u] = cumul[u-1] + normalizedCounter[u-1];
+ } }
+ cumul[maxSymbolValue+1] = tableSize+1;
+ }
+
+ /* Spread symbols */
+ { U32 position = 0;
+ U32 symbol;
+ for (symbol=0; symbol<=maxSymbolValue; symbol++) {
+ int nbOccurrences;
+ int const freq = normalizedCounter[symbol];
+ for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
+ tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
+ position = (position + step) & tableMask;
+ while (position > highThreshold)
+ position = (position + step) & tableMask; /* Low proba area */
+ } }
+
+ assert(position==0); /* Must have initialized all positions */
+ }
+
+ /* Build table */
+ { U32 u; for (u=0; u<tableSize; u++) {
+ FSE_FUNCTION_TYPE s = tableSymbol[u]; /* note : static analyzer may not understand tableSymbol is properly initialized */
+ tableU16[cumul[s]++] = (U16) (tableSize+u); /* TableU16 : sorted by symbol order; gives next state value */
+ } }
+
+ /* Build Symbol Transformation Table */
+ { unsigned total = 0;
+ unsigned s;
+ for (s=0; s<=maxSymbolValue; s++) {
+ switch (normalizedCounter[s])
+ {
+ case 0:
+ /* filling nonetheless, for compatibility with FSE_getMaxNbBits() */
+ symbolTT[s].deltaNbBits = ((tableLog+1) << 16) - (1<<tableLog);
+ break;
+
+ case -1:
+ case 1:
+ symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
+ symbolTT[s].deltaFindState = total - 1;
+ total ++;
+ break;
+ default :
+ {
+ U32 const maxBitsOut = tableLog - BIT_highbit32 (normalizedCounter[s]-1);
+ U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
+ symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
+ symbolTT[s].deltaFindState = total - normalizedCounter[s];
+ total += normalizedCounter[s];
+ } } } }
+
+#if 0 /* debug : symbol costs */
+ DEBUGLOG(5, "\n --- table statistics : ");
+ { U32 symbol;
+ for (symbol=0; symbol<=maxSymbolValue; symbol++) {
+ DEBUGLOG(5, "%3u: w=%3i, maxBits=%u, fracBits=%.2f",
+ symbol, normalizedCounter[symbol],
+ FSE_getMaxNbBits(symbolTT, symbol),
+ (double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256);
+ }
+ }
+#endif
+
+ return 0;
+}
+
+
+size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+ FSE_FUNCTION_TYPE tableSymbol[FSE_MAX_TABLESIZE]; /* memset() is not necessary, even if static analyzer complain about it */
+ return FSE_buildCTable_wksp(ct, normalizedCounter, maxSymbolValue, tableLog, tableSymbol, sizeof(tableSymbol));
+}
+
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+
+/*-**************************************************************
+* FSE NCount encoding
+****************************************************************/
+size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
+{
+ size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3;
+ return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
+}
+
+static size_t
+FSE_writeNCount_generic (void* header, size_t headerBufferSize,
+ const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
+ unsigned writeIsSafe)
+{
+ BYTE* const ostart = (BYTE*) header;
+ BYTE* out = ostart;
+ BYTE* const oend = ostart + headerBufferSize;
+ int nbBits;
+ const int tableSize = 1 << tableLog;
+ int remaining;
+ int threshold;
+ U32 bitStream = 0;
+ int bitCount = 0;
+ unsigned symbol = 0;
+ unsigned const alphabetSize = maxSymbolValue + 1;
+ int previousIs0 = 0;
+
+ /* Table Size */
+ bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
+ bitCount += 4;
+
+ /* Init */
+ remaining = tableSize+1; /* +1 for extra accuracy */
+ threshold = tableSize;
+ nbBits = tableLog+1;
+
+ while ((symbol < alphabetSize) && (remaining>1)) { /* stops at 1 */
+ if (previousIs0) {
+ unsigned start = symbol;
+ while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++;
+ if (symbol == alphabetSize) break; /* incorrect distribution */
+ while (symbol >= start+24) {
+ start+=24;
+ bitStream += 0xFFFFU << bitCount;
+ if ((!writeIsSafe) && (out > oend-2))
+ return ERROR(dstSize_tooSmall); /* Buffer overflow */
+ out[0] = (BYTE) bitStream;
+ out[1] = (BYTE)(bitStream>>8);
+ out+=2;
+ bitStream>>=16;
+ }
+ while (symbol >= start+3) {
+ start+=3;
+ bitStream += 3 << bitCount;
+ bitCount += 2;
+ }
+ bitStream += (symbol-start) << bitCount;
+ bitCount += 2;
+ if (bitCount>16) {
+ if ((!writeIsSafe) && (out > oend - 2))
+ return ERROR(dstSize_tooSmall); /* Buffer overflow */
+ out[0] = (BYTE)bitStream;
+ out[1] = (BYTE)(bitStream>>8);
+ out += 2;
+ bitStream >>= 16;
+ bitCount -= 16;
+ } }
+ { int count = normalizedCounter[symbol++];
+ int const max = (2*threshold-1) - remaining;
+ remaining -= count < 0 ? -count : count;
+ count++; /* +1 for extra accuracy */
+ if (count>=threshold)
+ count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
+ bitStream += count << bitCount;
+ bitCount += nbBits;
+ bitCount -= (count<max);
+ previousIs0 = (count==1);
+ if (remaining<1) return ERROR(GENERIC);
+ while (remaining<threshold) { nbBits--; threshold>>=1; }
+ }
+ if (bitCount>16) {
+ if ((!writeIsSafe) && (out > oend - 2))
+ return ERROR(dstSize_tooSmall); /* Buffer overflow */
+ out[0] = (BYTE)bitStream;
+ out[1] = (BYTE)(bitStream>>8);
+ out += 2;
+ bitStream >>= 16;
+ bitCount -= 16;
+ } }
+
+ if (remaining != 1)
+ return ERROR(GENERIC); /* incorrect normalized distribution */
+ assert(symbol <= alphabetSize);
+
+ /* flush remaining bitStream */
+ if ((!writeIsSafe) && (out > oend - 2))
+ return ERROR(dstSize_tooSmall); /* Buffer overflow */
+ out[0] = (BYTE)bitStream;
+ out[1] = (BYTE)(bitStream>>8);
+ out+= (bitCount+7) /8;
+
+ return (out-ostart);
+}
+
+
+size_t FSE_writeNCount (void* buffer, size_t bufferSize,
+ const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */
+ if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */
+
+ if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
+ return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
+
+ return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */);
+}
+
+
+/*-**************************************************************
+* FSE Compression Code
+****************************************************************/
+
+FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
+{
+ size_t size;
+ if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
+ size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
+ return (FSE_CTable*)malloc(size);
+}
+
+void FSE_freeCTable (FSE_CTable* ct) { free(ct); }
+
+/* provides the minimum logSize to safely represent a distribution */
+static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
+{
+ U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1;
+ U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
+ U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
+ assert(srcSize > 1); /* Not supported, RLE should be used instead */
+ return minBits;
+}
+
+unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
+{
+ U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
+ U32 tableLog = maxTableLog;
+ U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
+ assert(srcSize > 1); /* Not supported, RLE should be used instead */
+ if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
+ if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */
+ if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */
+ if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
+ if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
+ return tableLog;
+}
+
+unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
+{
+ return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
+}
+
+
+/* Secondary normalization method.
+ To be used when primary method fails. */
+
+static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue)
+{
+ short const NOT_YET_ASSIGNED = -2;
+ U32 s;
+ U32 distributed = 0;
+ U32 ToDistribute;
+
+ /* Init */
+ U32 const lowThreshold = (U32)(total >> tableLog);
+ U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
+
+ for (s=0; s<=maxSymbolValue; s++) {
+ if (count[s] == 0) {
+ norm[s]=0;
+ continue;
+ }
+ if (count[s] <= lowThreshold) {
+ norm[s] = -1;
+ distributed++;
+ total -= count[s];
+ continue;
+ }
+ if (count[s] <= lowOne) {
+ norm[s] = 1;
+ distributed++;
+ total -= count[s];
+ continue;
+ }
+
+ norm[s]=NOT_YET_ASSIGNED;
+ }
+ ToDistribute = (1 << tableLog) - distributed;
+
+ if (ToDistribute == 0)
+ return 0;
+
+ if ((total / ToDistribute) > lowOne) {
+ /* risk of rounding to zero */
+ lowOne = (U32)((total * 3) / (ToDistribute * 2));
+ for (s=0; s<=maxSymbolValue; s++) {
+ if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
+ norm[s] = 1;
+ distributed++;
+ total -= count[s];
+ continue;
+ } }
+ ToDistribute = (1 << tableLog) - distributed;
+ }
+
+ if (distributed == maxSymbolValue+1) {
+ /* all values are pretty poor;
+ probably incompressible data (should have already been detected);
+ find max, then give all remaining points to max */
+ U32 maxV = 0, maxC = 0;
+ for (s=0; s<=maxSymbolValue; s++)
+ if (count[s] > maxC) { maxV=s; maxC=count[s]; }
+ norm[maxV] += (short)ToDistribute;
+ return 0;
+ }
+
+ if (total == 0) {
+ /* all of the symbols were low enough for the lowOne or lowThreshold */
+ for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
+ if (norm[s] > 0) { ToDistribute--; norm[s]++; }
+ return 0;
+ }
+
+ { U64 const vStepLog = 62 - tableLog;
+ U64 const mid = (1ULL << (vStepLog-1)) - 1;
+ U64 const rStep = ((((U64)1<<vStepLog) * ToDistribute) + mid) / total; /* scale on remaining */
+ U64 tmpTotal = mid;
+ for (s=0; s<=maxSymbolValue; s++) {
+ if (norm[s]==NOT_YET_ASSIGNED) {
+ U64 const end = tmpTotal + (count[s] * rStep);
+ U32 const sStart = (U32)(tmpTotal >> vStepLog);
+ U32 const sEnd = (U32)(end >> vStepLog);
+ U32 const weight = sEnd - sStart;
+ if (weight < 1)
+ return ERROR(GENERIC);
+ norm[s] = (short)weight;
+ tmpTotal = end;
+ } } }
+
+ return 0;
+}
+
+
+size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
+ const unsigned* count, size_t total,
+ unsigned maxSymbolValue)
+{
+ /* Sanity checks */
+ if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
+ if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */
+ if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
+
+ { static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
+ U64 const scale = 62 - tableLog;
+ U64 const step = ((U64)1<<62) / total; /* <== here, one division ! */
+ U64 const vStep = 1ULL<<(scale-20);
+ int stillToDistribute = 1<<tableLog;
+ unsigned s;
+ unsigned largest=0;
+ short largestP=0;
+ U32 lowThreshold = (U32)(total >> tableLog);
+
+ for (s=0; s<=maxSymbolValue; s++) {
+ if (count[s] == total) return 0; /* rle special case */
+ if (count[s] == 0) { normalizedCounter[s]=0; continue; }
+ if (count[s] <= lowThreshold) {
+ normalizedCounter[s] = -1;
+ stillToDistribute--;
+ } else {
+ short proba = (short)((count[s]*step) >> scale);
+ if (proba<8) {
+ U64 restToBeat = vStep * rtbTable[proba];
+ proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
+ }
+ if (proba > largestP) { largestP=proba; largest=s; }
+ normalizedCounter[s] = proba;
+ stillToDistribute -= proba;
+ } }
+ if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
+ /* corner case, need another normalization method */
+ size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);
+ if (FSE_isError(errorCode)) return errorCode;
+ }
+ else normalizedCounter[largest] += (short)stillToDistribute;
+ }
+
+#if 0
+ { /* Print Table (debug) */
+ U32 s;
+ U32 nTotal = 0;
+ for (s=0; s<=maxSymbolValue; s++)
+ RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]);
+ for (s=0; s<=maxSymbolValue; s++)
+ nTotal += abs(normalizedCounter[s]);
+ if (nTotal != (1U<<tableLog))
+ RAWLOG(2, "Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
+ getchar();
+ }
+#endif
+
+ return tableLog;
+}
+
+
+/* fake FSE_CTable, for raw (uncompressed) input */
+size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
+{
+ const unsigned tableSize = 1 << nbBits;
+ const unsigned tableMask = tableSize - 1;
+ const unsigned maxSymbolValue = tableMask;
+ void* const ptr = ct;
+ U16* const tableU16 = ( (U16*) ptr) + 2;
+ void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1); /* assumption : tableLog >= 1 */
+ FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
+ unsigned s;
+
+ /* Sanity checks */
+ if (nbBits < 1) return ERROR(GENERIC); /* min size */
+
+ /* header */
+ tableU16[-2] = (U16) nbBits;
+ tableU16[-1] = (U16) maxSymbolValue;
+
+ /* Build table */
+ for (s=0; s<tableSize; s++)
+ tableU16[s] = (U16)(tableSize + s);
+
+ /* Build Symbol Transformation Table */
+ { const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
+ for (s=0; s<=maxSymbolValue; s++) {
+ symbolTT[s].deltaNbBits = deltaNbBits;
+ symbolTT[s].deltaFindState = s-1;
+ } }
+
+ return 0;
+}
+
+/* fake FSE_CTable, for rle input (always same symbol) */
+size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
+{
+ void* ptr = ct;
+ U16* tableU16 = ( (U16*) ptr) + 2;
+ void* FSCTptr = (U32*)ptr + 2;
+ FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr;
+
+ /* header */
+ tableU16[-2] = (U16) 0;
+ tableU16[-1] = (U16) symbolValue;
+
+ /* Build table */
+ tableU16[0] = 0;
+ tableU16[1] = 0; /* just in case */
+
+ /* Build Symbol Transformation Table */
+ symbolTT[symbolValue].deltaNbBits = 0;
+ symbolTT[symbolValue].deltaFindState = 0;
+
+ return 0;
+}
+
+
+static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const FSE_CTable* ct, const unsigned fast)
+{
+ const BYTE* const istart = (const BYTE*) src;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* ip=iend;
+
+ BIT_CStream_t bitC;
+ FSE_CState_t CState1, CState2;
+
+ /* init */
+ if (srcSize <= 2) return 0;
+ { size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
+ if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ }
+
+#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
+
+ if (srcSize & 1) {
+ FSE_initCState2(&CState1, ct, *--ip);
+ FSE_initCState2(&CState2, ct, *--ip);
+ FSE_encodeSymbol(&bitC, &CState1, *--ip);
+ FSE_FLUSHBITS(&bitC);
+ } else {
+ FSE_initCState2(&CState2, ct, *--ip);
+ FSE_initCState2(&CState1, ct, *--ip);
+ }
+
+ /* join to mod 4 */
+ srcSize -= 2;
+ if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) { /* test bit 2 */
+ FSE_encodeSymbol(&bitC, &CState2, *--ip);
+ FSE_encodeSymbol(&bitC, &CState1, *--ip);
+ FSE_FLUSHBITS(&bitC);
+ }
+
+ /* 2 or 4 encoding per loop */
+ while ( ip>istart ) {
+
+ FSE_encodeSymbol(&bitC, &CState2, *--ip);
+
+ if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 ) /* this test must be static */
+ FSE_FLUSHBITS(&bitC);
+
+ FSE_encodeSymbol(&bitC, &CState1, *--ip);
+
+ if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) { /* this test must be static */
+ FSE_encodeSymbol(&bitC, &CState2, *--ip);
+ FSE_encodeSymbol(&bitC, &CState1, *--ip);
+ }
+
+ FSE_FLUSHBITS(&bitC);
+ }
+
+ FSE_flushCState(&bitC, &CState2);
+ FSE_flushCState(&bitC, &CState1);
+ return BIT_closeCStream(&bitC);
+}
+
+size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const FSE_CTable* ct)
+{
+ unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
+
+ if (fast)
+ return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
+ else
+ return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
+}
+
+
+size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
+
+#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
+#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
+
+/* FSE_compress_wksp() :
+ * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
+ * `wkspSize` size must be `(1<<tableLog)`.
+ */
+size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
+{
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* op = ostart;
+ BYTE* const oend = ostart + dstSize;
+
+ unsigned count[FSE_MAX_SYMBOL_VALUE+1];
+ S16 norm[FSE_MAX_SYMBOL_VALUE+1];
+ FSE_CTable* CTable = (FSE_CTable*)workSpace;
+ size_t const CTableSize = FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue);
+ void* scratchBuffer = (void*)(CTable + CTableSize);
+ size_t const scratchBufferSize = wkspSize - (CTableSize * sizeof(FSE_CTable));
+
+ /* init conditions */
+ if (wkspSize < FSE_WKSP_SIZE_U32(tableLog, maxSymbolValue)) return ERROR(tableLog_tooLarge);
+ if (srcSize <= 1) return 0; /* Not compressible */
+ if (!maxSymbolValue) maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+ if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;
+
+ /* Scan input and build symbol stats */
+ { CHECK_V_F(maxCount, HIST_count_wksp(count, &maxSymbolValue, src, srcSize, scratchBuffer, scratchBufferSize) );
+ if (maxCount == srcSize) return 1; /* only a single symbol in src : rle */
+ if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
+ if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */
+ }
+
+ tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue);
+ CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) );
+
+ /* Write table description header */
+ { CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
+ op += nc_err;
+ }
+
+ /* Compress */
+ CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) );
+ { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) );
+ if (cSize == 0) return 0; /* not enough space for compressed data */
+ op += cSize;
+ }
+
+ /* check compressibility */
+ if ( (size_t)(op-ostart) >= srcSize-1 ) return 0;
+
+ return op-ostart;
+}
+
+typedef struct {
+ FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)];
+ BYTE scratchBuffer[1 << FSE_MAX_TABLELOG];
+} fseWkspMax_t;
+
+size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog)
+{
+ fseWkspMax_t scratchBuffer;
+ DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+ return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer));
+}
+
+size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG);
+}
+
+
+#endif /* FSE_COMMONDEFS_ONLY */
diff --git a/vendor/github.com/DataDog/zstd/fse_decompress.c b/vendor/github.com/DataDog/zstd/fse_decompress.c
new file mode 100644
index 000000000..72bbead5b
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/fse_decompress.c
@@ -0,0 +1,309 @@
+/* ******************************************************************
+ FSE : Finite State Entropy decoder
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+
+/* **************************************************************
+* Includes
+****************************************************************/
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memcpy, memset */
+#include "bitstream.h"
+#include "compiler.h"
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#include "error_private.h"
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define FSE_isError ERR_isError
+#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
+
+/* check and forward error code */
+#define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; }
+
+
+/* **************************************************************
+* Templates
+****************************************************************/
+/*
+ designed to be included
+ for type-specific functions (template emulation in C)
+ Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+# error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+# error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+
+/* Function templates */
+FSE_DTable* FSE_createDTable (unsigned tableLog)
+{
+ if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
+ return (FSE_DTable*)malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
+}
+
+void FSE_freeDTable (FSE_DTable* dt)
+{
+ free(dt);
+}
+
+size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+ void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
+ FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
+ U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
+
+ U32 const maxSV1 = maxSymbolValue + 1;
+ U32 const tableSize = 1 << tableLog;
+ U32 highThreshold = tableSize-1;
+
+ /* Sanity Checks */
+ if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+
+ /* Init, lay down lowprob symbols */
+ { FSE_DTableHeader DTableH;
+ DTableH.tableLog = (U16)tableLog;
+ DTableH.fastMode = 1;
+ { S16 const largeLimit= (S16)(1 << (tableLog-1));
+ U32 s;
+ for (s=0; s<maxSV1; s++) {
+ if (normalizedCounter[s]==-1) {
+ tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
+ symbolNext[s] = 1;
+ } else {
+ if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
+ symbolNext[s] = normalizedCounter[s];
+ } } }
+ memcpy(dt, &DTableH, sizeof(DTableH));
+ }
+
+ /* Spread symbols */
+ { U32 const tableMask = tableSize-1;
+ U32 const step = FSE_TABLESTEP(tableSize);
+ U32 s, position = 0;
+ for (s=0; s<maxSV1; s++) {
+ int i;
+ for (i=0; i<normalizedCounter[s]; i++) {
+ tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
+ position = (position + step) & tableMask;
+ while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
+ } }
+ if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+ }
+
+ /* Build Decoding table */
+ { U32 u;
+ for (u=0; u<tableSize; u++) {
+ FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
+ U32 const nextState = symbolNext[symbol]++;
+ tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
+ tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
+ } }
+
+ return 0;
+}
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/*-*******************************************************
+* Decompression (Byte symbols)
+*********************************************************/
+size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
+{
+ void* ptr = dt;
+ FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+ void* dPtr = dt + 1;
+ FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
+
+ DTableH->tableLog = 0;
+ DTableH->fastMode = 0;
+
+ cell->newState = 0;
+ cell->symbol = symbolValue;
+ cell->nbBits = 0;
+
+ return 0;
+}
+
+
+size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
+{
+ void* ptr = dt;
+ FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+ void* dPtr = dt + 1;
+ FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
+ const unsigned tableSize = 1 << nbBits;
+ const unsigned tableMask = tableSize - 1;
+ const unsigned maxSV1 = tableMask+1;
+ unsigned s;
+
+ /* Sanity checks */
+ if (nbBits < 1) return ERROR(GENERIC); /* min size */
+
+ /* Build Decoding Table */
+ DTableH->tableLog = (U16)nbBits;
+ DTableH->fastMode = 1;
+ for (s=0; s<maxSV1; s++) {
+ dinfo[s].newState = 0;
+ dinfo[s].symbol = (BYTE)s;
+ dinfo[s].nbBits = (BYTE)nbBits;
+ }
+
+ return 0;
+}
+
+FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
+ void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSE_DTable* dt, const unsigned fast)
+{
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* op = ostart;
+ BYTE* const omax = op + maxDstSize;
+ BYTE* const olimit = omax-3;
+
+ BIT_DStream_t bitD;
+ FSE_DState_t state1;
+ FSE_DState_t state2;
+
+ /* Init */
+ CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
+
+ FSE_initDState(&state1, &bitD, dt);
+ FSE_initDState(&state2, &bitD, dt);
+
+#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
+
+ /* 4 symbols per loop */
+ for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) {
+ op[0] = FSE_GETSYMBOL(&state1);
+
+ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BIT_reloadDStream(&bitD);
+
+ op[1] = FSE_GETSYMBOL(&state2);
+
+ if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
+
+ op[2] = FSE_GETSYMBOL(&state1);
+
+ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BIT_reloadDStream(&bitD);
+
+ op[3] = FSE_GETSYMBOL(&state2);
+ }
+
+ /* tail */
+ /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
+ while (1) {
+ if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+ *op++ = FSE_GETSYMBOL(&state1);
+ if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
+ *op++ = FSE_GETSYMBOL(&state2);
+ break;
+ }
+
+ if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+ *op++ = FSE_GETSYMBOL(&state2);
+ if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
+ *op++ = FSE_GETSYMBOL(&state1);
+ break;
+ } }
+
+ return op-ostart;
+}
+
+
+size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSE_DTable* dt)
+{
+ const void* ptr = dt;
+ const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
+ const U32 fastMode = DTableH->fastMode;
+
+ /* select fast mode (static) */
+ if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+ return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog)
+{
+ const BYTE* const istart = (const BYTE*)cSrc;
+ const BYTE* ip = istart;
+ short counting[FSE_MAX_SYMBOL_VALUE+1];
+ unsigned tableLog;
+ unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+
+ /* normal FSE decoding mode */
+ size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
+ if (FSE_isError(NCountLength)) return NCountLength;
+ //if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */
+ if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
+ ip += NCountLength;
+ cSrcSize -= NCountLength;
+
+ CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) );
+
+ return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace); /* always return, even if it is an error code */
+}
+
+
+typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
+
+size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize)
+{
+ DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
+ return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, dt, FSE_MAX_TABLELOG);
+}
+
+
+
+#endif /* FSE_COMMONDEFS_ONLY */
diff --git a/vendor/github.com/DataDog/zstd/hist.c b/vendor/github.com/DataDog/zstd/hist.c
new file mode 100644
index 000000000..45b7babc1
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/hist.c
@@ -0,0 +1,203 @@
+/* ******************************************************************
+ hist : Histogram functions
+ part of Finite State Entropy project
+ Copyright (C) 2013-present, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* --- dependencies --- */
+#include "mem.h" /* U32, BYTE, etc. */
+#include "debug.h" /* assert, DEBUGLOG */
+#include "error_private.h" /* ERROR */
+#include "hist.h"
+
+
+/* --- Error management --- */
+unsigned HIST_isError(size_t code) { return ERR_isError(code); }
+
+/*-**************************************************************
+ * Histogram functions
+ ****************************************************************/
+unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* const end = ip + srcSize;
+ unsigned maxSymbolValue = *maxSymbolValuePtr;
+ unsigned largestCount=0;
+
+ memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
+ if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
+
+ while (ip<end) {
+ assert(*ip <= maxSymbolValue);
+ count[*ip++]++;
+ }
+
+ while (!count[maxSymbolValue]) maxSymbolValue--;
+ *maxSymbolValuePtr = maxSymbolValue;
+
+ { U32 s;
+ for (s=0; s<=maxSymbolValue; s++)
+ if (count[s] > largestCount) largestCount = count[s];
+ }
+
+ return largestCount;
+}
+
+typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
+
+/* HIST_count_parallel_wksp() :
+ * store histogram into 4 intermediate tables, recombined at the end.
+ * this design makes better use of OoO cpus,
+ * and is noticeably faster when some values are heavily repeated.
+ * But it needs some additional workspace for intermediate tables.
+ * `workSpace` size must be a table of size >= HIST_WKSP_SIZE_U32.
+ * @return : largest histogram frequency,
+ * or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */
+static size_t HIST_count_parallel_wksp(
+ unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* source, size_t sourceSize,
+ HIST_checkInput_e check,
+ U32* const workSpace)
+{
+ const BYTE* ip = (const BYTE*)source;
+ const BYTE* const iend = ip+sourceSize;
+ unsigned maxSymbolValue = *maxSymbolValuePtr;
+ unsigned max=0;
+ U32* const Counting1 = workSpace;
+ U32* const Counting2 = Counting1 + 256;
+ U32* const Counting3 = Counting2 + 256;
+ U32* const Counting4 = Counting3 + 256;
+
+ memset(workSpace, 0, 4*256*sizeof(unsigned));
+
+ /* safety checks */
+ if (!sourceSize) {
+ memset(count, 0, maxSymbolValue + 1);
+ *maxSymbolValuePtr = 0;
+ return 0;
+ }
+ if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */
+
+ /* by stripes of 16 bytes */
+ { U32 cached = MEM_read32(ip); ip += 4;
+ while (ip < iend-15) {
+ U32 c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ Counting2[(BYTE)(c>>8) ]++;
+ Counting3[(BYTE)(c>>16)]++;
+ Counting4[ c>>24 ]++;
+ c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ Counting2[(BYTE)(c>>8) ]++;
+ Counting3[(BYTE)(c>>16)]++;
+ Counting4[ c>>24 ]++;
+ c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ Counting2[(BYTE)(c>>8) ]++;
+ Counting3[(BYTE)(c>>16)]++;
+ Counting4[ c>>24 ]++;
+ c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ Counting2[(BYTE)(c>>8) ]++;
+ Counting3[(BYTE)(c>>16)]++;
+ Counting4[ c>>24 ]++;
+ }
+ ip-=4;
+ }
+
+ /* finish last symbols */
+ while (ip<iend) Counting1[*ip++]++;
+
+ if (check) { /* verify stats will fit into destination table */
+ U32 s; for (s=255; s>maxSymbolValue; s--) {
+ Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
+ if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);
+ } }
+
+ { U32 s;
+ if (maxSymbolValue > 255) maxSymbolValue = 255;
+ for (s=0; s<=maxSymbolValue; s++) {
+ count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
+ if (count[s] > max) max = count[s];
+ } }
+
+ while (!count[maxSymbolValue]) maxSymbolValue--;
+ *maxSymbolValuePtr = maxSymbolValue;
+ return (size_t)max;
+}
+
+/* HIST_countFast_wksp() :
+ * Same as HIST_countFast(), but using an externally provided scratch buffer.
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
+ */
+size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* source, size_t sourceSize,
+ void* workSpace, size_t workSpaceSize)
+{
+ if (sourceSize < 1500) /* heuristic threshold */
+ return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
+ if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
+ if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
+ return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
+}
+
+/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
+size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* source, size_t sourceSize)
+{
+ unsigned tmpCounters[HIST_WKSP_SIZE_U32];
+ return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters));
+}
+
+/* HIST_count_wksp() :
+ * Same as HIST_count(), but using an externally provided scratch buffer.
+ * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
+size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* source, size_t sourceSize,
+ void* workSpace, size_t workSpaceSize)
+{
+ if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
+ if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
+ if (*maxSymbolValuePtr < 255)
+ return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
+ *maxSymbolValuePtr = 255;
+ return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
+}
+
+size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize)
+{
+ unsigned tmpCounters[HIST_WKSP_SIZE_U32];
+ return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(tmpCounters));
+}
diff --git a/vendor/github.com/DataDog/zstd/hist.h b/vendor/github.com/DataDog/zstd/hist.h
new file mode 100644
index 000000000..8b389358d
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/hist.h
@@ -0,0 +1,95 @@
+/* ******************************************************************
+ hist : Histogram functions
+ part of Finite State Entropy project
+ Copyright (C) 2013-present, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* --- dependencies --- */
+#include <stddef.h> /* size_t */
+
+
+/* --- simple histogram functions --- */
+
+/*! HIST_count():
+ * Provides the precise count of each byte within a table 'count'.
+ * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
+ * Updates *maxSymbolValuePtr with actual largest symbol value detected.
+ * @return : count of the most frequent symbol (which isn't identified).
+ * or an error code, which can be tested using HIST_isError().
+ * note : if return == srcSize, there is only one symbol.
+ */
+size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize);
+
+unsigned HIST_isError(size_t code); /**< tells if a return value is an error code */
+
+
+/* --- advanced histogram functions --- */
+
+#define HIST_WKSP_SIZE_U32 1024
+#define HIST_WKSP_SIZE (HIST_WKSP_SIZE_U32 * sizeof(unsigned))
+/** HIST_count_wksp() :
+ * Same as HIST_count(), but using an externally provided scratch buffer.
+ * Benefit is this function will use very little stack space.
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
+ */
+size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t workSpaceSize);
+
+/** HIST_countFast() :
+ * same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr.
+ * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr`
+ */
+size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize);
+
+/** HIST_countFast_wksp() :
+ * Same as HIST_countFast(), but using an externally provided scratch buffer.
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
+ */
+size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t workSpaceSize);
+
+/*! HIST_count_simple() :
+ * Same as HIST_countFast(), this function is unsafe,
+ * and will segfault if any value within `src` is `> *maxSymbolValuePtr`.
+ * It is also a bit slower for large inputs.
+ * However, it does not need any additional memory (not even on stack).
+ * @return : count of the most frequent symbol.
+ * Note this function doesn't produce any error (i.e. it must succeed).
+ */
+unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize);
diff --git a/vendor/github.com/DataDog/zstd/huf.h b/vendor/github.com/DataDog/zstd/huf.h
new file mode 100644
index 000000000..6b572c448
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/huf.h
@@ -0,0 +1,358 @@
+/* ******************************************************************
+ huff0 huffman codec,
+ part of Finite State Entropy library
+ Copyright (C) 2013-present, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#ifndef HUF_H_298734234
+#define HUF_H_298734234
+
+/* *** Dependencies *** */
+#include <stddef.h> /* size_t */
+
+
+/* *** library symbols visibility *** */
+/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
+ * HUF symbols remain "private" (internal symbols for library only).
+ * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
+#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
+# define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
+#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
+# define HUF_PUBLIC_API __declspec(dllexport)
+#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
+# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
+#else
+# define HUF_PUBLIC_API
+#endif
+
+
+/* ========================== */
+/* *** simple functions *** */
+/* ========================== */
+
+/** HUF_compress() :
+ * Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
+ * 'dst' buffer must be already allocated.
+ * Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
+ * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
+ * @return : size of compressed data (<= `dstCapacity`).
+ * Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
+ * if HUF_isError(return), compression failed (more details using HUF_getErrorName())
+ */
+HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+/** HUF_decompress() :
+ * Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
+ * into already allocated buffer 'dst', of minimum size 'dstSize'.
+ * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
+ * Note : in contrast with FSE, HUF_decompress can regenerate
+ * RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
+ * because it knows size to regenerate (originalSize).
+ * @return : size of regenerated data (== originalSize),
+ * or an error code, which can be tested using HUF_isError()
+ */
+HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize,
+ const void* cSrc, size_t cSrcSize);
+
+
+/* *** Tool functions *** */
+#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
+HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
+
+/* Error Management */
+HUF_PUBLIC_API unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
+HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */
+
+
+/* *** Advanced function *** */
+
+/** HUF_compress2() :
+ * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
+ * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
+ * `tableLog` must be `<= HUF_TABLELOG_MAX` . */
+HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog);
+
+/** HUF_compress4X_wksp() :
+ * Same as HUF_compress2(), but uses externally allocated `workSpace`.
+ * `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */
+#define HUF_WORKSPACE_SIZE (6 << 10)
+#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
+HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize);
+
+#endif /* HUF_H_298734234 */
+
+/* ******************************************************************
+ * WARNING !!
+ * The following section contains advanced and experimental definitions
+ * which shall never be used in the context of a dynamic library,
+ * because they are not guaranteed to remain stable in the future.
+ * Only consider them in association with static linking.
+ * *****************************************************************/
+#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
+#define HUF_H_HUF_STATIC_LINKING_ONLY
+
+/* *** Dependencies *** */
+#include "mem.h" /* U32 */
+
+
+/* *** Constants *** */
+#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
+#define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */
+#define HUF_SYMBOLVALUE_MAX 255
+
+#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
+#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
+# error "HUF_TABLELOG_MAX is too large !"
+#endif
+
+
+/* ****************************************
+* Static allocation
+******************************************/
+/* HUF buffer bounds */
+#define HUF_CTABLEBOUND 129
+#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */
+#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
+
+/* static allocation of HUF's Compression Table */
+#define HUF_CTABLE_SIZE_U32(maxSymbolValue) ((maxSymbolValue)+1) /* Use tables of U32, for proper alignment */
+#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32))
+#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
+ U32 name##hb[HUF_CTABLE_SIZE_U32(maxSymbolValue)]; \
+ void* name##hv = &(name##hb); \
+ HUF_CElt* name = (HUF_CElt*)(name##hv) /* no final ; */
+
+/* static allocation of HUF's DTable */
+typedef U32 HUF_DTable;
+#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog)))
+#define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \
+ HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
+#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+ HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
+
+
+/* ****************************************
+* Advanced decompression functions
+******************************************/
+size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
+#endif
+
+size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */
+size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */
+size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
+size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
+size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */
+#endif
+
+
+/* ****************************************
+ * HUF detailed API
+ * ****************************************/
+
+/*! HUF_compress() does the following:
+ * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
+ * 2. (optional) refine tableLog using HUF_optimalTableLog()
+ * 3. build Huffman table from count using HUF_buildCTable()
+ * 4. save Huffman table to memory buffer using HUF_writeCTable()
+ * 5. encode the data stream using HUF_compress4X_usingCTable()
+ *
+ * The following API allows targeting specific sub-functions for advanced tasks.
+ * For example, it's possible to compress several blocks using the same 'CTable',
+ * or to save and regenerate 'CTable' using external methods.
+ */
+unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
+typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */
+size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
+size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
+size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
+
+typedef enum {
+ HUF_repeat_none, /**< Cannot use the previous table */
+ HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
+ HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */
+ } HUF_repeat;
+/** HUF_compress4X_repeat() :
+ * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
+ * If it uses hufTable it does not modify hufTable or repeat.
+ * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
+ * If preferRepeat then the old table will always be used if valid. */
+size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
+ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
+
+/** HUF_buildCTable_wksp() :
+ * Same as HUF_buildCTable(), but using externally allocated scratch buffer.
+ * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
+ */
+#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
+#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
+size_t HUF_buildCTable_wksp (HUF_CElt* tree,
+ const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
+ void* workSpace, size_t wkspSize);
+
+/*! HUF_readStats() :
+ * Read compact Huffman tree, saved by HUF_writeCTable().
+ * `huffWeight` is destination buffer.
+ * @return : size read from `src` , or an error Code .
+ * Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
+size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
+ U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize);
+
+/** HUF_readCTable() :
+ * Loading a CTable saved with HUF_writeCTable() */
+size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
+
+/** HUF_getNbBits() :
+ * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
+ * Note 1 : is not inlined, as HUF_CElt definition is private
+ * Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */
+U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue);
+
+/*
+ * HUF_decompress() does the following:
+ * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics
+ * 2. build Huffman table from save, using HUF_readDTableX?()
+ * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
+ */
+
+/** HUF_selectDecoder() :
+ * Tells which decoder is likely to decode faster,
+ * based on a set of pre-computed metrics.
+ * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
+ * Assumption : 0 < dstSize <= 128 KB */
+U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
+
+/**
+ * The minimum workspace size for the `workSpace` used in
+ * HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp().
+ *
+ * The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
+ * HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
+ * Buffer overflow errors may potentially occur if code modifications result in
+ * a required workspace size greater than that specified in the following
+ * macro.
+ */
+#define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10)
+#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
+
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize);
+size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
+size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
+#endif
+
+size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#endif
+
+
+/* ====================== */
+/* single stream variants */
+/* ====================== */
+
+size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
+size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
+size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
+/** HUF_compress1X_repeat() :
+ * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
+ * If it uses hufTable it does not modify hufTable or repeat.
+ * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
+ * If preferRepeat then the old table will always be used if valid. */
+size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
+ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
+
+size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
+#endif
+
+size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
+size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */
+#endif
+
+size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#endif
+
+/* BMI2 variants.
+ * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
+ */
+size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
+#endif
+size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
+size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
+
+#endif /* HUF_STATIC_LINKING_ONLY */
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/vendor/github.com/DataDog/zstd/huf_compress.c b/vendor/github.com/DataDog/zstd/huf_compress.c
new file mode 100644
index 000000000..f074f1e0a
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/huf_compress.c
@@ -0,0 +1,798 @@
+/* ******************************************************************
+ Huffman encoder, part of New Generation Entropy library
+ Copyright (C) 2013-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* **************************************************************
+* Compiler specifics
+****************************************************************/
+#ifdef _MSC_VER /* Visual Studio */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+#endif
+
+
+/* **************************************************************
+* Includes
+****************************************************************/
+#include <string.h> /* memcpy, memset */
+#include <stdio.h> /* printf (debug) */
+#include "compiler.h"
+#include "bitstream.h"
+#include "hist.h"
+#define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */
+#include "fse.h" /* header compression */
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#include "error_private.h"
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define HUF_isError ERR_isError
+#define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
+#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
+#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
+
+
+/* **************************************************************
+* Utils
+****************************************************************/
+unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
+{
+ return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
+}
+
+
+/* *******************************************************
+* HUF : Huffman block compression
+*********************************************************/
+/* HUF_compressWeights() :
+ * Same as FSE_compress(), but dedicated to huff0's weights compression.
+ * The use case needs much less stack memory.
+ * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
+ */
+#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
+static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize)
+{
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* op = ostart;
+ BYTE* const oend = ostart + dstSize;
+
+ unsigned maxSymbolValue = HUF_TABLELOG_MAX;
+ U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
+
+ FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
+ BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER];
+
+ unsigned count[HUF_TABLELOG_MAX+1];
+ S16 norm[HUF_TABLELOG_MAX+1];
+
+ /* init conditions */
+ if (wtSize <= 1) return 0; /* Not compressible */
+
+ /* Scan input and build symbol stats */
+ { unsigned const maxCount = HIST_count_simple(count, &maxSymbolValue, weightTable, wtSize); /* never fails */
+ if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */
+ if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
+ }
+
+ tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
+ CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) );
+
+ /* Write table description header */
+ { CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
+ op += hSize;
+ }
+
+ /* Compress */
+ CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) );
+ { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) );
+ if (cSize == 0) return 0; /* not enough space for compressed data */
+ op += cSize;
+ }
+
+ return op-ostart;
+}
+
+
+struct HUF_CElt_s {
+ U16 val;
+ BYTE nbBits;
+}; /* typedef'd to HUF_CElt within "huf.h" */
+
+/*! HUF_writeCTable() :
+ `CTable` : Huffman tree to save, using huf representation.
+ @return : size of saved CTable */
+size_t HUF_writeCTable (void* dst, size_t maxDstSize,
+ const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
+{
+ BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
+ BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
+ BYTE* op = (BYTE*)dst;
+ U32 n;
+
+ /* check conditions */
+ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
+
+ /* convert to weight */
+ bitsToWeight[0] = 0;
+ for (n=1; n<huffLog+1; n++)
+ bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
+ for (n=0; n<maxSymbolValue; n++)
+ huffWeight[n] = bitsToWeight[CTable[n].nbBits];
+
+ /* attempt weights compression by FSE */
+ { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) );
+ if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
+ op[0] = (BYTE)hSize;
+ return hSize+1;
+ } }
+
+ /* write raw values as 4-bits (max : 15) */
+ if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
+ if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
+ op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
+ huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
+ for (n=0; n<maxSymbolValue; n+=2)
+ op[(n/2)+1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n+1]);
+ return ((maxSymbolValue+1)/2) + 1;
+}
+
+
+size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize)
+{
+ BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
+ U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
+ U32 tableLog = 0;
+ U32 nbSymbols = 0;
+
+ /* get symbol weights */
+ CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
+
+ /* check result */
+ if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+ if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
+
+ /* Prepare base value per rank */
+ { U32 n, nextRankStart = 0;
+ for (n=1; n<=tableLog; n++) {
+ U32 current = nextRankStart;
+ nextRankStart += (rankVal[n] << (n-1));
+ rankVal[n] = current;
+ } }
+
+ /* fill nbBits */
+ { U32 n; for (n=0; n<nbSymbols; n++) {
+ const U32 w = huffWeight[n];
+ CTable[n].nbBits = (BYTE)(tableLog + 1 - w);
+ } }
+
+ /* fill val */
+ { U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */
+ U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
+ { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; }
+ /* determine stating value per rank */
+ valPerRank[tableLog+1] = 0; /* for w==0 */
+ { U16 min = 0;
+ U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */
+ valPerRank[n] = min; /* get starting value within each rank */
+ min += nbPerRank[n];
+ min >>= 1;
+ } }
+ /* assign value within rank, symbol order */
+ { U32 n; for (n=0; n<nbSymbols; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }
+ }
+
+ *maxSymbolValuePtr = nbSymbols - 1;
+ return readSize;
+}
+
+U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue)
+{
+ const HUF_CElt* table = (const HUF_CElt*)symbolTable;
+ assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
+ return table[symbolValue].nbBits;
+}
+
+
+typedef struct nodeElt_s {
+ U32 count;
+ U16 parent;
+ BYTE byte;
+ BYTE nbBits;
+} nodeElt;
+
+static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
+{
+ const U32 largestBits = huffNode[lastNonNull].nbBits;
+ if (largestBits <= maxNbBits) return largestBits; /* early exit : no elt > maxNbBits */
+
+ /* there are several too large elements (at least >= 2) */
+ { int totalCost = 0;
+ const U32 baseCost = 1 << (largestBits - maxNbBits);
+ U32 n = lastNonNull;
+
+ while (huffNode[n].nbBits > maxNbBits) {
+ totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
+ huffNode[n].nbBits = (BYTE)maxNbBits;
+ n --;
+ } /* n stops at huffNode[n].nbBits <= maxNbBits */
+ while (huffNode[n].nbBits == maxNbBits) n--; /* n end at index of smallest symbol using < maxNbBits */
+
+ /* renorm totalCost */
+ totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */
+
+ /* repay normalized cost */
+ { U32 const noSymbol = 0xF0F0F0F0;
+ U32 rankLast[HUF_TABLELOG_MAX+2];
+ int pos;
+
+ /* Get pos of last (smallest) symbol per rank */
+ memset(rankLast, 0xF0, sizeof(rankLast));
+ { U32 currentNbBits = maxNbBits;
+ for (pos=n ; pos >= 0; pos--) {
+ if (huffNode[pos].nbBits >= currentNbBits) continue;
+ currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */
+ rankLast[maxNbBits-currentNbBits] = pos;
+ } }
+
+ while (totalCost > 0) {
+ U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1;
+ for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
+ U32 highPos = rankLast[nBitsToDecrease];
+ U32 lowPos = rankLast[nBitsToDecrease-1];
+ if (highPos == noSymbol) continue;
+ if (lowPos == noSymbol) break;
+ { U32 const highTotal = huffNode[highPos].count;
+ U32 const lowTotal = 2 * huffNode[lowPos].count;
+ if (highTotal <= lowTotal) break;
+ } }
+ /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
+ /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
+ while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
+ nBitsToDecrease ++;
+ totalCost -= 1 << (nBitsToDecrease-1);
+ if (rankLast[nBitsToDecrease-1] == noSymbol)
+ rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */
+ huffNode[rankLast[nBitsToDecrease]].nbBits ++;
+ if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
+ rankLast[nBitsToDecrease] = noSymbol;
+ else {
+ rankLast[nBitsToDecrease]--;
+ if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
+ rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
+ } } /* while (totalCost > 0) */
+
+ while (totalCost < 0) { /* Sometimes, cost correction overshoot */
+ if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
+ while (huffNode[n].nbBits == maxNbBits) n--;
+ huffNode[n+1].nbBits--;
+ rankLast[1] = n+1;
+ totalCost++;
+ continue;
+ }
+ huffNode[ rankLast[1] + 1 ].nbBits--;
+ rankLast[1]++;
+ totalCost ++;
+ } } } /* there are several too large elements (at least >= 2) */
+
+ return maxNbBits;
+}
+
+
+typedef struct {
+ U32 base;
+ U32 current;
+} rankPos;
+
+static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue)
+{
+ rankPos rank[32];
+ U32 n;
+
+ memset(rank, 0, sizeof(rank));
+ for (n=0; n<=maxSymbolValue; n++) {
+ U32 r = BIT_highbit32(count[n] + 1);
+ rank[r].base ++;
+ }
+ for (n=30; n>0; n--) rank[n-1].base += rank[n].base;
+ for (n=0; n<32; n++) rank[n].current = rank[n].base;
+ for (n=0; n<=maxSymbolValue; n++) {
+ U32 const c = count[n];
+ U32 const r = BIT_highbit32(c+1) + 1;
+ U32 pos = rank[r].current++;
+ while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) {
+ huffNode[pos] = huffNode[pos-1];
+ pos--;
+ }
+ huffNode[pos].count = c;
+ huffNode[pos].byte = (BYTE)n;
+ }
+}
+
+
+/** HUF_buildCTable_wksp() :
+ * Same as HUF_buildCTable(), but using externally allocated scratch buffer.
+ * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of HUF_CTABLE_WORKSPACE_SIZE_U32 unsigned.
+ */
+#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
+typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
+size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
+{
+ nodeElt* const huffNode0 = (nodeElt*)workSpace;
+ nodeElt* const huffNode = huffNode0+1;
+ U32 n, nonNullRank;
+ int lowS, lowN;
+ U16 nodeNb = STARTNODE;
+ U32 nodeRoot;
+
+ /* safety checks */
+ if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
+ if (wkspSize < sizeof(huffNodeTable)) return ERROR(workSpace_tooSmall);
+ if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
+ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
+ memset(huffNode0, 0, sizeof(huffNodeTable));
+
+ /* sort, decreasing order */
+ HUF_sort(huffNode, count, maxSymbolValue);
+
+ /* init for parents */
+ nonNullRank = maxSymbolValue;
+ while(huffNode[nonNullRank].count == 0) nonNullRank--;
+ lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
+ huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
+ huffNode[lowS].parent = huffNode[lowS-1].parent = nodeNb;
+ nodeNb++; lowS-=2;
+ for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
+ huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */
+
+ /* create parents */
+ while (nodeNb <= nodeRoot) {
+ U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
+ U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
+ huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
+ huffNode[n1].parent = huffNode[n2].parent = nodeNb;
+ nodeNb++;
+ }
+
+ /* distribute weights (unlimited tree height) */
+ huffNode[nodeRoot].nbBits = 0;
+ for (n=nodeRoot-1; n>=STARTNODE; n--)
+ huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
+ for (n=0; n<=nonNullRank; n++)
+ huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
+
+ /* enforce maxTableLog */
+ maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits);
+
+ /* fill result into tree (val, nbBits) */
+ { U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
+ U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
+ if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
+ for (n=0; n<=nonNullRank; n++)
+ nbPerRank[huffNode[n].nbBits]++;
+ /* determine stating value per rank */
+ { U16 min = 0;
+ for (n=maxNbBits; n>0; n--) {
+ valPerRank[n] = min; /* get starting value within each rank */
+ min += nbPerRank[n];
+ min >>= 1;
+ } }
+ for (n=0; n<=maxSymbolValue; n++)
+ tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
+ for (n=0; n<=maxSymbolValue; n++)
+ tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */
+ }
+
+ return maxNbBits;
+}
+
+/** HUF_buildCTable() :
+ * @return : maxNbBits
+ * Note : count is used before tree is written, so they can safely overlap
+ */
+size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits)
+{
+ huffNodeTable nodeTable;
+ return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable));
+}
+
+static size_t HUF_estimateCompressedSize(HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
+{
+ size_t nbBits = 0;
+ int s;
+ for (s = 0; s <= (int)maxSymbolValue; ++s) {
+ nbBits += CTable[s].nbBits * count[s];
+ }
+ return nbBits >> 3;
+}
+
+static int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
+ int bad = 0;
+ int s;
+ for (s = 0; s <= (int)maxSymbolValue; ++s) {
+ bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
+ }
+ return !bad;
+}
+
+size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
+
+FORCE_INLINE_TEMPLATE void
+HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)
+{
+ BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
+}
+
+#define HUF_FLUSHBITS(s) BIT_flushBits(s)
+
+#define HUF_FLUSHBITS_1(stream) \
+ if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)
+
+#define HUF_FLUSHBITS_2(stream) \
+ if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream)
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable)
+{
+ const BYTE* ip = (const BYTE*) src;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* op = ostart;
+ size_t n;
+ BIT_CStream_t bitC;
+
+ /* init */
+ if (dstSize < 8) return 0; /* not enough space to compress */
+ { size_t const initErr = BIT_initCStream(&bitC, op, oend-op);
+ if (HUF_isError(initErr)) return 0; }
+
+ n = srcSize & ~3; /* join to mod 4 */
+ switch (srcSize & 3)
+ {
+ case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable);
+ HUF_FLUSHBITS_2(&bitC);
+ /* fall-through */
+ case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable);
+ HUF_FLUSHBITS_1(&bitC);
+ /* fall-through */
+ case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable);
+ HUF_FLUSHBITS(&bitC);
+ /* fall-through */
+ case 0 : /* fall-through */
+ default: break;
+ }
+
+ for (; n>0; n-=4) { /* note : n&3==0 at this stage */
+ HUF_encodeSymbol(&bitC, ip[n- 1], CTable);
+ HUF_FLUSHBITS_1(&bitC);
+ HUF_encodeSymbol(&bitC, ip[n- 2], CTable);
+ HUF_FLUSHBITS_2(&bitC);
+ HUF_encodeSymbol(&bitC, ip[n- 3], CTable);
+ HUF_FLUSHBITS_1(&bitC);
+ HUF_encodeSymbol(&bitC, ip[n- 4], CTable);
+ HUF_FLUSHBITS(&bitC);
+ }
+
+ return BIT_closeCStream(&bitC);
+}
+
+#if DYNAMIC_BMI2
+
+static TARGET_ATTRIBUTE("bmi2") size_t
+HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable)
+{
+ return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
+}
+
+static size_t
+HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable)
+{
+ return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
+}
+
+static size_t
+HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable, const int bmi2)
+{
+ if (bmi2) {
+ return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
+ }
+ return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
+}
+
+#else
+
+static size_t
+HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable, const int bmi2)
+{
+ (void)bmi2;
+ return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
+}
+
+#endif
+
+size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
+{
+ return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
+}
+
+
+static size_t
+HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable, int bmi2)
+{
+ size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
+ const BYTE* ip = (const BYTE*) src;
+ const BYTE* const iend = ip + srcSize;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* op = ostart;
+
+ if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */
+ if (srcSize < 12) return 0; /* no saving possible : too small input */
+ op += 6; /* jumpTable */
+
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );
+ if (cSize==0) return 0;
+ assert(cSize <= 65535);
+ MEM_writeLE16(ostart, (U16)cSize);
+ op += cSize;
+ }
+
+ ip += segmentSize;
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );
+ if (cSize==0) return 0;
+ assert(cSize <= 65535);
+ MEM_writeLE16(ostart+2, (U16)cSize);
+ op += cSize;
+ }
+
+ ip += segmentSize;
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );
+ if (cSize==0) return 0;
+ assert(cSize <= 65535);
+ MEM_writeLE16(ostart+4, (U16)cSize);
+ op += cSize;
+ }
+
+ ip += segmentSize;
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, iend-ip, CTable, bmi2) );
+ if (cSize==0) return 0;
+ op += cSize;
+ }
+
+ return op-ostart;
+}
+
+size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
+{
+ return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
+}
+
+typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
+
+static size_t HUF_compressCTable_internal(
+ BYTE* const ostart, BYTE* op, BYTE* const oend,
+ const void* src, size_t srcSize,
+ HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2)
+{
+ size_t const cSize = (nbStreams==HUF_singleStream) ?
+ HUF_compress1X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2) :
+ HUF_compress4X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2);
+ if (HUF_isError(cSize)) { return cSize; }
+ if (cSize==0) { return 0; } /* uncompressible */
+ op += cSize;
+ /* check compressibility */
+ if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
+ return op-ostart;
+}
+
+typedef struct {
+ unsigned count[HUF_SYMBOLVALUE_MAX + 1];
+ HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
+ huffNodeTable nodeTable;
+} HUF_compress_tables_t;
+
+/* HUF_compress_internal() :
+ * `workSpace` must a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
+static size_t
+HUF_compress_internal (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ HUF_nbStreams_e nbStreams,
+ void* workSpace, size_t wkspSize,
+ HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
+ const int bmi2)
+{
+ HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* op = ostart;
+
+ /* checks & inits */
+ if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
+ if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall);
+ if (!srcSize) return 0; /* Uncompressed */
+ if (!dstSize) return 0; /* cannot fit anything within dst budget */
+ if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
+ if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
+ if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
+ if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
+
+ /* Heuristic : If old table is valid, use it for small inputs */
+ if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
+ return HUF_compressCTable_internal(ostart, op, oend,
+ src, srcSize,
+ nbStreams, oldHufTable, bmi2);
+ }
+
+ /* Scan input and build symbol stats */
+ { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace, wkspSize) );
+ if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
+ if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
+ }
+
+ /* Check validity of previous table */
+ if ( repeat
+ && *repeat == HUF_repeat_check
+ && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
+ *repeat = HUF_repeat_none;
+ }
+ /* Heuristic : use existing table for small inputs */
+ if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
+ return HUF_compressCTable_internal(ostart, op, oend,
+ src, srcSize,
+ nbStreams, oldHufTable, bmi2);
+ }
+
+ /* Build Huffman Tree */
+ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
+ { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
+ maxSymbolValue, huffLog,
+ table->nodeTable, sizeof(table->nodeTable));
+ CHECK_F(maxBits);
+ huffLog = (U32)maxBits;
+ /* Zero unused symbols in CTable, so we can check it for validity */
+ memset(table->CTable + (maxSymbolValue + 1), 0,
+ sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt)));
+ }
+
+ /* Write table description header */
+ { CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, table->CTable, maxSymbolValue, huffLog) );
+ /* Check if using previous huffman table is beneficial */
+ if (repeat && *repeat != HUF_repeat_none) {
+ size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
+ size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
+ if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
+ return HUF_compressCTable_internal(ostart, op, oend,
+ src, srcSize,
+ nbStreams, oldHufTable, bmi2);
+ } }
+
+ /* Use the new huffman table */
+ if (hSize + 12ul >= srcSize) { return 0; }
+ op += hSize;
+ if (repeat) { *repeat = HUF_repeat_none; }
+ if (oldHufTable)
+ memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */
+ }
+ return HUF_compressCTable_internal(ostart, op, oend,
+ src, srcSize,
+ nbStreams, table->CTable, bmi2);
+}
+
+
+size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_compress_internal(dst, dstSize, src, srcSize,
+ maxSymbolValue, huffLog, HUF_singleStream,
+ workSpace, wkspSize,
+ NULL, NULL, 0, 0 /*bmi2*/);
+}
+
+size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ void* workSpace, size_t wkspSize,
+ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
+{
+ return HUF_compress_internal(dst, dstSize, src, srcSize,
+ maxSymbolValue, huffLog, HUF_singleStream,
+ workSpace, wkspSize, hufTable,
+ repeat, preferRepeat, bmi2);
+}
+
+size_t HUF_compress1X (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog)
+{
+ unsigned workSpace[HUF_WORKSPACE_SIZE_U32];
+ return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
+}
+
+/* HUF_compress4X_repeat():
+ * compress input using 4 streams.
+ * provide workspace to generate compression tables */
+size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_compress_internal(dst, dstSize, src, srcSize,
+ maxSymbolValue, huffLog, HUF_fourStreams,
+ workSpace, wkspSize,
+ NULL, NULL, 0, 0 /*bmi2*/);
+}
+
+/* HUF_compress4X_repeat():
+ * compress input using 4 streams.
+ * re-use an existing huffman compression table */
+size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ void* workSpace, size_t wkspSize,
+ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
+{
+ return HUF_compress_internal(dst, dstSize, src, srcSize,
+ maxSymbolValue, huffLog, HUF_fourStreams,
+ workSpace, wkspSize,
+ hufTable, repeat, preferRepeat, bmi2);
+}
+
+size_t HUF_compress2 (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog)
+{
+ unsigned workSpace[HUF_WORKSPACE_SIZE_U32];
+ return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
+}
+
+size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT);
+}
diff --git a/vendor/github.com/DataDog/zstd/huf_decompress.c b/vendor/github.com/DataDog/zstd/huf_decompress.c
new file mode 100644
index 000000000..3f8bd2973
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/huf_decompress.c
@@ -0,0 +1,1232 @@
+/* ******************************************************************
+ huff0 huffman decoder,
+ part of Finite State Entropy library
+ Copyright (C) 2013-present, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+
+/* **************************************************************
+* Dependencies
+****************************************************************/
+#include <string.h> /* memcpy, memset */
+#include "compiler.h"
+#include "bitstream.h" /* BIT_* */
+#include "fse.h" /* to compress headers */
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#include "error_private.h"
+
+/* **************************************************************
+* Macros
+****************************************************************/
+
+/* These two optional macros force the use one way or another of the two
+ * Huffman decompression implementations. You can't force in both directions
+ * at the same time.
+ */
+#if defined(HUF_FORCE_DECOMPRESS_X1) && \
+ defined(HUF_FORCE_DECOMPRESS_X2)
+#error "Cannot force the use of the X1 and X2 decoders at the same time!"
+#endif
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define HUF_isError ERR_isError
+#define CHECK_F(f) { size_t const err_ = (f); if (HUF_isError(err_)) return err_; }
+
+
+/* **************************************************************
+* Byte alignment for workSpace management
+****************************************************************/
+#define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1)
+#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+
+
+/* **************************************************************
+* BMI2 Variant Wrappers
+****************************************************************/
+#if DYNAMIC_BMI2
+
+#define HUF_DGEN(fn) \
+ \
+ static size_t fn##_default( \
+ void* dst, size_t dstSize, \
+ const void* cSrc, size_t cSrcSize, \
+ const HUF_DTable* DTable) \
+ { \
+ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
+ } \
+ \
+ static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2( \
+ void* dst, size_t dstSize, \
+ const void* cSrc, size_t cSrcSize, \
+ const HUF_DTable* DTable) \
+ { \
+ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
+ } \
+ \
+ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
+ size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
+ { \
+ if (bmi2) { \
+ return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \
+ } \
+ return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \
+ }
+
+#else
+
+#define HUF_DGEN(fn) \
+ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
+ size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
+ { \
+ (void)bmi2; \
+ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
+ }
+
+#endif
+
+
+/*-***************************/
+/* generic DTableDesc */
+/*-***************************/
+typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
+
+static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
+{
+ DTableDesc dtd;
+ memcpy(&dtd, table, sizeof(dtd));
+ return dtd;
+}
+
+
+#ifndef HUF_FORCE_DECOMPRESS_X2
+
+/*-***************************/
+/* single-symbol decoding */
+/*-***************************/
+typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1; /* single-symbol decoding */
+
+size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
+{
+ U32 tableLog = 0;
+ U32 nbSymbols = 0;
+ size_t iSize;
+ void* const dtPtr = DTable + 1;
+ HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr;
+
+ U32* rankVal;
+ BYTE* huffWeight;
+ size_t spaceUsed32 = 0;
+
+ rankVal = (U32 *)workSpace + spaceUsed32;
+ spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
+ huffWeight = (BYTE *)((U32 *)workSpace + spaceUsed32);
+ spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
+
+ if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge);
+
+ DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
+ /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
+ if (HUF_isError(iSize)) return iSize;
+
+ /* Table header */
+ { DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
+ dtd.tableType = 0;
+ dtd.tableLog = (BYTE)tableLog;
+ memcpy(DTable, &dtd, sizeof(dtd));
+ }
+
+ /* Calculate starting value for each rank */
+ { U32 n, nextRankStart = 0;
+ for (n=1; n<tableLog+1; n++) {
+ U32 const current = nextRankStart;
+ nextRankStart += (rankVal[n] << (n-1));
+ rankVal[n] = current;
+ } }
+
+ /* fill DTable */
+ { U32 n;
+ for (n=0; n<nbSymbols; n++) {
+ U32 const w = huffWeight[n];
+ U32 const length = (1 << w) >> 1;
+ U32 u;
+ HUF_DEltX1 D;
+ D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
+ for (u = rankVal[w]; u < rankVal[w] + length; u++)
+ dt[u] = D;
+ rankVal[w] += length;
+ } }
+
+ return iSize;
+}
+
+size_t HUF_readDTableX1(HUF_DTable* DTable, const void* src, size_t srcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_readDTableX1_wksp(DTable, src, srcSize,
+ workSpace, sizeof(workSpace));
+}
+
+FORCE_INLINE_TEMPLATE BYTE
+HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog)
+{
+ size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+ BYTE const c = dt[val].byte;
+ BIT_skipBits(Dstream, dt[val].nbBits);
+ return c;
+}
+
+#define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \
+ *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
+
+#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
+
+HINT_INLINE size_t
+HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 4 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
+ HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX1_1(p, bitDPtr);
+ HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
+ }
+
+ /* [0-3] symbols remaining */
+ if (MEM_32bits())
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd))
+ HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
+
+ /* no more data to retrieve from bitstream, no need to reload */
+ while (p < pEnd)
+ HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
+
+ return pEnd-pStart;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress1X1_usingDTable_internal_body(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + dstSize;
+ const void* dtPtr = DTable + 1;
+ const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
+ BIT_DStream_t bitD;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ U32 const dtLog = dtd.tableLog;
+
+ CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
+
+ HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog);
+
+ if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+ return dstSize;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress4X1_usingDTable_internal_body(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ /* Check */
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ { const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable + 1;
+ const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
+
+ /* Init */
+ BIT_DStream_t bitD1;
+ BIT_DStream_t bitD2;
+ BIT_DStream_t bitD3;
+ BIT_DStream_t bitD4;
+ size_t const length1 = MEM_readLE16(istart);
+ size_t const length2 = MEM_readLE16(istart+2);
+ size_t const length3 = MEM_readLE16(istart+4);
+ size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ const size_t segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ U32 endSignal = BIT_DStream_unfinished;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ U32 const dtLog = dtd.tableLog;
+
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
+ CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
+ CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
+ CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
+
+ /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+ while ( (endSignal==BIT_DStream_unfinished) && (op4<(oend-3)) ) {
+ HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX1_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX1_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_0(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_0(op4, &bitD4);
+ BIT_reloadDStream(&bitD1);
+ BIT_reloadDStream(&bitD2);
+ BIT_reloadDStream(&bitD3);
+ BIT_reloadDStream(&bitD4);
+ }
+
+ /* check corruption */
+ /* note : should not be necessary : op# advance in lock step, and we control op4.
+ * but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 supposed already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog);
+ HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog);
+ HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog);
+ HUF_decodeStreamX1(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+ if (!endCheck) return ERROR(corruption_detected); }
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+
+typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,
+ const void *cSrc,
+ size_t cSrcSize,
+ const HUF_DTable *DTable);
+
+HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
+HUF_DGEN(HUF_decompress4X1_usingDTable_internal)
+
+
+
+size_t HUF_decompress1X1_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 0) return ERROR(GENERIC);
+ return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
+}
+
+
+size_t HUF_decompress1X1_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_decompress1X1_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
+ workSpace, sizeof(workSpace));
+}
+
+size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX);
+ return HUF_decompress1X1_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+size_t HUF_decompress4X1_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 0) return ERROR(GENERIC);
+ return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize, int bmi2)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUF_readDTableX1_wksp (dctx, cSrc, cSrcSize,
+ workSpace, wkspSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+}
+
+size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
+}
+
+
+size_t HUF_decompress4X1_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
+ workSpace, sizeof(workSpace));
+}
+size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX);
+ return HUF_decompress4X1_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+#endif /* HUF_FORCE_DECOMPRESS_X2 */
+
+
+#ifndef HUF_FORCE_DECOMPRESS_X1
+
+/* *************************/
+/* double-symbols decoding */
+/* *************************/
+
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2; /* double-symbols decoding */
+typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
+typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
+typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
+
+
+/* HUF_fillDTableX2Level2() :
+ * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
+static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed,
+ const U32* rankValOrigin, const int minWeight,
+ const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
+ U32 nbBitsBaseline, U16 baseSeq)
+{
+ HUF_DEltX2 DElt;
+ U32 rankVal[HUF_TABLELOG_MAX + 1];
+
+ /* get pre-calculated rankVal */
+ memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+ /* fill skipped values */
+ if (minWeight>1) {
+ U32 i, skipSize = rankVal[minWeight];
+ MEM_writeLE16(&(DElt.sequence), baseSeq);
+ DElt.nbBits = (BYTE)(consumed);
+ DElt.length = 1;
+ for (i = 0; i < skipSize; i++)
+ DTable[i] = DElt;
+ }
+
+ /* fill DTable */
+ { U32 s; for (s=0; s<sortedListSize; s++) { /* note : sortedSymbols already skipped */
+ const U32 symbol = sortedSymbols[s].symbol;
+ const U32 weight = sortedSymbols[s].weight;
+ const U32 nbBits = nbBitsBaseline - weight;
+ const U32 length = 1 << (sizeLog-nbBits);
+ const U32 start = rankVal[weight];
+ U32 i = start;
+ const U32 end = start + length;
+
+ MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
+ DElt.nbBits = (BYTE)(nbBits + consumed);
+ DElt.length = 2;
+ do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
+
+ rankVal[weight] += length;
+ } }
+}
+
+
+static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
+ const sortedSymbol_t* sortedList, const U32 sortedListSize,
+ const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
+ const U32 nbBitsBaseline)
+{
+ U32 rankVal[HUF_TABLELOG_MAX + 1];
+ const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
+ const U32 minBits = nbBitsBaseline - maxWeight;
+ U32 s;
+
+ memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+ /* fill DTable */
+ for (s=0; s<sortedListSize; s++) {
+ const U16 symbol = sortedList[s].symbol;
+ const U32 weight = sortedList[s].weight;
+ const U32 nbBits = nbBitsBaseline - weight;
+ const U32 start = rankVal[weight];
+ const U32 length = 1 << (targetLog-nbBits);
+
+ if (targetLog-nbBits >= minBits) { /* enough room for a second symbol */
+ U32 sortedRank;
+ int minWeight = nbBits + scaleLog;
+ if (minWeight < 1) minWeight = 1;
+ sortedRank = rankStart[minWeight];
+ HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits,
+ rankValOrigin[nbBits], minWeight,
+ sortedList+sortedRank, sortedListSize-sortedRank,
+ nbBitsBaseline, symbol);
+ } else {
+ HUF_DEltX2 DElt;
+ MEM_writeLE16(&(DElt.sequence), symbol);
+ DElt.nbBits = (BYTE)(nbBits);
+ DElt.length = 1;
+ { U32 const end = start + length;
+ U32 u;
+ for (u = start; u < end; u++) DTable[u] = DElt;
+ } }
+ rankVal[weight] += length;
+ }
+}
+
+size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize)
+{
+ U32 tableLog, maxW, sizeOfSort, nbSymbols;
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ U32 const maxTableLog = dtd.maxTableLog;
+ size_t iSize;
+ void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */
+ HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
+ U32 *rankStart;
+
+ rankValCol_t* rankVal;
+ U32* rankStats;
+ U32* rankStart0;
+ sortedSymbol_t* sortedSymbol;
+ BYTE* weightList;
+ size_t spaceUsed32 = 0;
+
+ rankVal = (rankValCol_t *)((U32 *)workSpace + spaceUsed32);
+ spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2;
+ rankStats = (U32 *)workSpace + spaceUsed32;
+ spaceUsed32 += HUF_TABLELOG_MAX + 1;
+ rankStart0 = (U32 *)workSpace + spaceUsed32;
+ spaceUsed32 += HUF_TABLELOG_MAX + 2;
+ sortedSymbol = (sortedSymbol_t *)workSpace + (spaceUsed32 * sizeof(U32)) / sizeof(sortedSymbol_t);
+ spaceUsed32 += HUF_ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2;
+ weightList = (BYTE *)((U32 *)workSpace + spaceUsed32);
+ spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
+
+ if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge);
+
+ rankStart = rankStart0 + 1;
+ memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1));
+
+ DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
+ if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+ /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUF_readStats(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
+ if (HUF_isError(iSize)) return iSize;
+
+ /* check result */
+ if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
+
+ /* find maxWeight */
+ for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
+
+ /* Get start index of each weight */
+ { U32 w, nextRankStart = 0;
+ for (w=1; w<maxW+1; w++) {
+ U32 current = nextRankStart;
+ nextRankStart += rankStats[w];
+ rankStart[w] = current;
+ }
+ rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
+ sizeOfSort = nextRankStart;
+ }
+
+ /* sort symbols by weight */
+ { U32 s;
+ for (s=0; s<nbSymbols; s++) {
+ U32 const w = weightList[s];
+ U32 const r = rankStart[w]++;
+ sortedSymbol[r].symbol = (BYTE)s;
+ sortedSymbol[r].weight = (BYTE)w;
+ }
+ rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
+ }
+
+ /* Build rankVal */
+ { U32* const rankVal0 = rankVal[0];
+ { int const rescale = (maxTableLog-tableLog) - 1; /* tableLog <= maxTableLog */
+ U32 nextRankVal = 0;
+ U32 w;
+ for (w=1; w<maxW+1; w++) {
+ U32 current = nextRankVal;
+ nextRankVal += rankStats[w] << (w+rescale);
+ rankVal0[w] = current;
+ } }
+ { U32 const minBits = tableLog+1 - maxW;
+ U32 consumed;
+ for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
+ U32* const rankValPtr = rankVal[consumed];
+ U32 w;
+ for (w = 1; w < maxW+1; w++) {
+ rankValPtr[w] = rankVal0[w] >> consumed;
+ } } } }
+
+ HUF_fillDTableX2(dt, maxTableLog,
+ sortedSymbol, sizeOfSort,
+ rankStart0, rankVal, maxW,
+ tableLog+1);
+
+ dtd.tableLog = (BYTE)maxTableLog;
+ dtd.tableType = 1;
+ memcpy(DTable, &dtd, sizeof(dtd));
+ return iSize;
+}
+
+size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_readDTableX2_wksp(DTable, src, srcSize,
+ workSpace, sizeof(workSpace));
+}
+
+
+FORCE_INLINE_TEMPLATE U32
+HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+ size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ memcpy(op, dt+val, 2);
+ BIT_skipBits(DStream, dt[val].nbBits);
+ return dt[val].length;
+}
+
+FORCE_INLINE_TEMPLATE U32
+HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+ size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ memcpy(op, dt+val, 1);
+ if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
+ else {
+ if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
+ BIT_skipBits(DStream, dt[val].nbBits);
+ if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+ /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+ DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
+ } }
+ return 1;
+}
+
+#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+HINT_INLINE size_t
+HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
+ const HUF_DEltX2* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 8 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ }
+
+ /* closer to end : up to 2 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+ while (p <= pEnd-2)
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
+
+ if (p < pEnd)
+ p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog);
+
+ return p-pStart;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress1X2_usingDTable_internal_body(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ BIT_DStream_t bitD;
+
+ /* Init */
+ CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
+
+ /* decode */
+ { BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */
+ const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog);
+ }
+
+ /* check */
+ if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+ /* decoded size */
+ return dstSize;
+}
+
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress4X2_usingDTable_internal_body(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ { const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable+1;
+ const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+
+ /* Init */
+ BIT_DStream_t bitD1;
+ BIT_DStream_t bitD2;
+ BIT_DStream_t bitD3;
+ BIT_DStream_t bitD4;
+ size_t const length1 = MEM_readLE16(istart);
+ size_t const length2 = MEM_readLE16(istart+2);
+ size_t const length3 = MEM_readLE16(istart+4);
+ size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ size_t const segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ U32 endSignal;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ U32 const dtLog = dtd.tableLog;
+
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
+ CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
+ CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
+ CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
+
+ /* 16-32 symbols per loop (4-8 symbols per stream) */
+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+ for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) {
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+
+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+ }
+
+ /* check corruption */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+ HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+ HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+ HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+ if (!endCheck) return ERROR(corruption_detected); }
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+HUF_DGEN(HUF_decompress1X2_usingDTable_internal)
+HUF_DGEN(HUF_decompress4X2_usingDTable_internal)
+
+size_t HUF_decompress1X2_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 1) return ERROR(GENERIC);
+ return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize,
+ workSpace, wkspSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
+}
+
+
+size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
+ workSpace, sizeof(workSpace));
+}
+
+size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
+ return HUF_decompress1X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+size_t HUF_decompress4X2_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 1) return ERROR(GENERIC);
+ return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize, int bmi2)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize,
+ workSpace, wkspSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+}
+
+size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
+}
+
+
+size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
+ workSpace, sizeof(workSpace));
+}
+
+size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
+ return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+#endif /* HUF_FORCE_DECOMPRESS_X1 */
+
+
+/* ***********************************/
+/* Universal decompression selectors */
+/* ***********************************/
+
+size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)dtd;
+ assert(dtd.tableType == 0);
+ return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)dtd;
+ assert(dtd.tableType == 1);
+ return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#else
+ return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
+ HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#endif
+}
+
+size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)dtd;
+ assert(dtd.tableType == 0);
+ return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)dtd;
+ assert(dtd.tableType == 1);
+ return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#else
+ return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
+ HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#endif
+}
+
+
+#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
+typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
+static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
+{
+ /* single, double, quad */
+ {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
+ {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
+ {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
+ {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
+ {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
+ {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
+ {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
+ {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
+ {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
+ {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
+ {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
+ {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
+ {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
+ {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
+ {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
+ {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
+};
+#endif
+
+/** HUF_selectDecoder() :
+ * Tells which decoder is likely to decode faster,
+ * based on a set of pre-computed metrics.
+ * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
+ * Assumption : 0 < dstSize <= 128 KB */
+U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
+{
+ assert(dstSize > 0);
+ assert(dstSize <= 128*1024);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)dstSize;
+ (void)cSrcSize;
+ return 0;
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)dstSize;
+ (void)cSrcSize;
+ return 1;
+#else
+ /* decoder timing evaluation */
+ { U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */
+ U32 const D256 = (U32)(dstSize >> 8);
+ U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
+ U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
+ DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, to reduce cache eviction */
+ return DTime1 < DTime0;
+ }
+#endif
+}
+
+
+typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+
+size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
+ static const decompressionAlgo decompress[2] = { HUF_decompress4X1, HUF_decompress4X2 };
+#endif
+
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
+ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)algoNb;
+ assert(algoNb == 0);
+ return HUF_decompress4X1(dst, dstSize, cSrc, cSrcSize);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)algoNb;
+ assert(algoNb == 1);
+ return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize);
+#else
+ return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
+#endif
+ }
+}
+
+size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
+ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)algoNb;
+ assert(algoNb == 0);
+ return HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)algoNb;
+ assert(algoNb == 1);
+ return HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize);
+#else
+ return algoNb ? HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
+ HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
+#endif
+ }
+}
+
+size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_decompress4X_hufOnly_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
+ workSpace, sizeof(workSpace));
+}
+
+
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
+ size_t dstSize, const void* cSrc,
+ size_t cSrcSize, void* workSpace,
+ size_t wkspSize)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize == 0) return ERROR(corruption_detected);
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)algoNb;
+ assert(algoNb == 0);
+ return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)algoNb;
+ assert(algoNb == 1);
+ return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
+#else
+ return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize):
+ HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
+#endif
+ }
+}
+
+size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
+ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)algoNb;
+ assert(algoNb == 0);
+ return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)algoNb;
+ assert(algoNb == 1);
+ return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize);
+#else
+ return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize):
+ HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize);
+#endif
+ }
+}
+
+size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
+ workSpace, sizeof(workSpace));
+}
+
+
+size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)dtd;
+ assert(dtd.tableType == 0);
+ return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)dtd;
+ assert(dtd.tableType == 1);
+ return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#else
+ return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
+ HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#endif
+}
+
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+}
+#endif
+
+size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)dtd;
+ assert(dtd.tableType == 0);
+ return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)dtd;
+ assert(dtd.tableType == 1);
+ return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#else
+ return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
+ HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#endif
+}
+
+size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize == 0) return ERROR(corruption_detected);
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)algoNb;
+ assert(algoNb == 0);
+ return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)algoNb;
+ assert(algoNb == 1);
+ return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+#else
+ return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
+ HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+#endif
+ }
+}
diff --git a/vendor/github.com/DataDog/zstd/mem.h b/vendor/github.com/DataDog/zstd/mem.h
new file mode 100644
index 000000000..5da248756
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/mem.h
@@ -0,0 +1,380 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef MEM_H_MODULE
+#define MEM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*-****************************************
+* Dependencies
+******************************************/
+#include <stddef.h> /* size_t, ptrdiff_t */
+#include <string.h> /* memcpy */
+
+
+/*-****************************************
+* Compiler specifics
+******************************************/
+#if defined(_MSC_VER) /* Visual Studio */
+# include <stdlib.h> /* _byteswap_ulong */
+# include <intrin.h> /* _byteswap_* */
+#endif
+#if defined(__GNUC__)
+# define MEM_STATIC static __inline __attribute__((unused))
+#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# define MEM_STATIC static inline
+#elif defined(_MSC_VER)
+# define MEM_STATIC static __inline
+#else
+# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+#ifndef __has_builtin
+# define __has_builtin(x) 0 /* compat. with non-clang compilers */
+#endif
+
+/* code only tested on 32 and 64 bits systems */
+#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
+MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
+
+
+/*-**************************************************************
+* Basic Types
+*****************************************************************/
+#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef int16_t S16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+ typedef int64_t S64;
+#else
+# include <limits.h>
+#if CHAR_BIT != 8
+# error "this implementation requires char to be exactly 8-bit type"
+#endif
+ typedef unsigned char BYTE;
+#if USHRT_MAX != 65535
+# error "this implementation requires short to be exactly 16-bit type"
+#endif
+ typedef unsigned short U16;
+ typedef signed short S16;
+#if UINT_MAX != 4294967295
+# error "this implementation requires int to be exactly 32-bit type"
+#endif
+ typedef unsigned int U32;
+ typedef signed int S32;
+/* note : there are no limits defined for long long type in C90.
+ * limits exist in C99, however, in such case, <stdint.h> is preferred */
+ typedef unsigned long long U64;
+ typedef signed long long S64;
+#endif
+
+
+/*-**************************************************************
+* Memory I/O
+*****************************************************************/
+/* MEM_FORCE_MEMORY_ACCESS :
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ * It can generate buggy code on targets depending on alignment.
+ * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6)
+ * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
+# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+# define MEM_FORCE_MEMORY_ACCESS 2
+# elif defined(__INTEL_COMPILER) || defined(__GNUC__)
+# define MEM_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
+MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
+
+MEM_STATIC unsigned MEM_isLittleEndian(void)
+{
+ const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+
+#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
+
+/* violates C standard, by lying on structure alignment.
+Only use if no other choice to achieve best performance on target platform */
+MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
+MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
+MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
+MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
+MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
+
+#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32))
+ __pragma( pack(push, 1) )
+ typedef struct { U16 v; } unalign16;
+ typedef struct { U32 v; } unalign32;
+ typedef struct { U64 v; } unalign64;
+ typedef struct { size_t v; } unalignArch;
+ __pragma( pack(pop) )
+#else
+ typedef struct { U16 v; } __attribute__((packed)) unalign16;
+ typedef struct { U32 v; } __attribute__((packed)) unalign32;
+ typedef struct { U64 v; } __attribute__((packed)) unalign64;
+ typedef struct { size_t v; } __attribute__((packed)) unalignArch;
+#endif
+
+MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; }
+MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; }
+MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; }
+MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; }
+MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; }
+MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; }
+
+#else
+
+/* default method, safe and standard.
+ can sometimes prove slower */
+
+MEM_STATIC U16 MEM_read16(const void* memPtr)
+{
+ U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U32 MEM_read32(const void* memPtr)
+{
+ U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U64 MEM_read64(const void* memPtr)
+{
+ U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC size_t MEM_readST(const void* memPtr)
+{
+ size_t val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value)
+{
+ memcpy(memPtr, &value, sizeof(value));
+}
+
+MEM_STATIC void MEM_write32(void* memPtr, U32 value)
+{
+ memcpy(memPtr, &value, sizeof(value));
+}
+
+MEM_STATIC void MEM_write64(void* memPtr, U64 value)
+{
+ memcpy(memPtr, &value, sizeof(value));
+}
+
+#endif /* MEM_FORCE_MEMORY_ACCESS */
+
+MEM_STATIC U32 MEM_swap32(U32 in)
+{
+#if defined(_MSC_VER) /* Visual Studio */
+ return _byteswap_ulong(in);
+#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
+ || (defined(__clang__) && __has_builtin(__builtin_bswap32))
+ return __builtin_bswap32(in);
+#else
+ return ((in << 24) & 0xff000000 ) |
+ ((in << 8) & 0x00ff0000 ) |
+ ((in >> 8) & 0x0000ff00 ) |
+ ((in >> 24) & 0x000000ff );
+#endif
+}
+
+MEM_STATIC U64 MEM_swap64(U64 in)
+{
+#if defined(_MSC_VER) /* Visual Studio */
+ return _byteswap_uint64(in);
+#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
+ || (defined(__clang__) && __has_builtin(__builtin_bswap64))
+ return __builtin_bswap64(in);
+#else
+ return ((in << 56) & 0xff00000000000000ULL) |
+ ((in << 40) & 0x00ff000000000000ULL) |
+ ((in << 24) & 0x0000ff0000000000ULL) |
+ ((in << 8) & 0x000000ff00000000ULL) |
+ ((in >> 8) & 0x00000000ff000000ULL) |
+ ((in >> 24) & 0x0000000000ff0000ULL) |
+ ((in >> 40) & 0x000000000000ff00ULL) |
+ ((in >> 56) & 0x00000000000000ffULL);
+#endif
+}
+
+MEM_STATIC size_t MEM_swapST(size_t in)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_swap32((U32)in);
+ else
+ return (size_t)MEM_swap64((U64)in);
+}
+
+/*=== Little endian r/w ===*/
+
+MEM_STATIC U16 MEM_readLE16(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read16(memPtr);
+ else {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U16)(p[0] + (p[1]<<8));
+ }
+}
+
+MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
+{
+ if (MEM_isLittleEndian()) {
+ MEM_write16(memPtr, val);
+ } else {
+ BYTE* p = (BYTE*)memPtr;
+ p[0] = (BYTE)val;
+ p[1] = (BYTE)(val>>8);
+ }
+}
+
+MEM_STATIC U32 MEM_readLE24(const void* memPtr)
+{
+ return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
+}
+
+MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val)
+{
+ MEM_writeLE16(memPtr, (U16)val);
+ ((BYTE*)memPtr)[2] = (BYTE)(val>>16);
+}
+
+MEM_STATIC U32 MEM_readLE32(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read32(memPtr);
+ else
+ return MEM_swap32(MEM_read32(memPtr));
+}
+
+MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32)
+{
+ if (MEM_isLittleEndian())
+ MEM_write32(memPtr, val32);
+ else
+ MEM_write32(memPtr, MEM_swap32(val32));
+}
+
+MEM_STATIC U64 MEM_readLE64(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read64(memPtr);
+ else
+ return MEM_swap64(MEM_read64(memPtr));
+}
+
+MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64)
+{
+ if (MEM_isLittleEndian())
+ MEM_write64(memPtr, val64);
+ else
+ MEM_write64(memPtr, MEM_swap64(val64));
+}
+
+MEM_STATIC size_t MEM_readLEST(const void* memPtr)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_readLE32(memPtr);
+ else
+ return (size_t)MEM_readLE64(memPtr);
+}
+
+MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val)
+{
+ if (MEM_32bits())
+ MEM_writeLE32(memPtr, (U32)val);
+ else
+ MEM_writeLE64(memPtr, (U64)val);
+}
+
+/*=== Big endian r/w ===*/
+
+MEM_STATIC U32 MEM_readBE32(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_swap32(MEM_read32(memPtr));
+ else
+ return MEM_read32(memPtr);
+}
+
+MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32)
+{
+ if (MEM_isLittleEndian())
+ MEM_write32(memPtr, MEM_swap32(val32));
+ else
+ MEM_write32(memPtr, val32);
+}
+
+MEM_STATIC U64 MEM_readBE64(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_swap64(MEM_read64(memPtr));
+ else
+ return MEM_read64(memPtr);
+}
+
+MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64)
+{
+ if (MEM_isLittleEndian())
+ MEM_write64(memPtr, MEM_swap64(val64));
+ else
+ MEM_write64(memPtr, val64);
+}
+
+MEM_STATIC size_t MEM_readBEST(const void* memPtr)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_readBE32(memPtr);
+ else
+ return (size_t)MEM_readBE64(memPtr);
+}
+
+MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
+{
+ if (MEM_32bits())
+ MEM_writeBE32(memPtr, (U32)val);
+ else
+ MEM_writeBE64(memPtr, (U64)val);
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* MEM_H_MODULE */
diff --git a/vendor/github.com/DataDog/zstd/pool.c b/vendor/github.com/DataDog/zstd/pool.c
new file mode 100644
index 000000000..7a8294543
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/pool.c
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/* ====== Dependencies ======= */
+#include <stddef.h> /* size_t */
+#include "debug.h" /* assert */
+#include "zstd_internal.h" /* ZSTD_malloc, ZSTD_free */
+#include "pool.h"
+
+/* ====== Compiler specifics ====== */
+#if defined(_MSC_VER)
+# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
+#endif
+
+
+#ifdef ZSTD_MULTITHREAD
+
+#include "threading.h" /* pthread adaptation */
+
+/* A job is a function and an opaque argument */
+typedef struct POOL_job_s {
+ POOL_function function;
+ void *opaque;
+} POOL_job;
+
+struct POOL_ctx_s {
+ ZSTD_customMem customMem;
+ /* Keep track of the threads */
+ ZSTD_pthread_t* threads;
+ size_t threadCapacity;
+ size_t threadLimit;
+
+ /* The queue is a circular buffer */
+ POOL_job *queue;
+ size_t queueHead;
+ size_t queueTail;
+ size_t queueSize;
+
+ /* The number of threads working on jobs */
+ size_t numThreadsBusy;
+ /* Indicates if the queue is empty */
+ int queueEmpty;
+
+ /* The mutex protects the queue */
+ ZSTD_pthread_mutex_t queueMutex;
+ /* Condition variable for pushers to wait on when the queue is full */
+ ZSTD_pthread_cond_t queuePushCond;
+ /* Condition variables for poppers to wait on when the queue is empty */
+ ZSTD_pthread_cond_t queuePopCond;
+ /* Indicates if the queue is shutting down */
+ int shutdown;
+};
+
+/* POOL_thread() :
+ * Work thread for the thread pool.
+ * Waits for jobs and executes them.
+ * @returns : NULL on failure else non-null.
+ */
+static void* POOL_thread(void* opaque) {
+ POOL_ctx* const ctx = (POOL_ctx*)opaque;
+ if (!ctx) { return NULL; }
+ for (;;) {
+ /* Lock the mutex and wait for a non-empty queue or until shutdown */
+ ZSTD_pthread_mutex_lock(&ctx->queueMutex);
+
+ while ( ctx->queueEmpty
+ || (ctx->numThreadsBusy >= ctx->threadLimit) ) {
+ if (ctx->shutdown) {
+ /* even if !queueEmpty, (possible if numThreadsBusy >= threadLimit),
+ * a few threads will be shutdown while !queueEmpty,
+ * but enough threads will remain active to finish the queue */
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+ return opaque;
+ }
+ ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
+ }
+ /* Pop a job off the queue */
+ { POOL_job const job = ctx->queue[ctx->queueHead];
+ ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
+ ctx->numThreadsBusy++;
+ ctx->queueEmpty = ctx->queueHead == ctx->queueTail;
+ /* Unlock the mutex, signal a pusher, and run the job */
+ ZSTD_pthread_cond_signal(&ctx->queuePushCond);
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+
+ job.function(job.opaque);
+
+ /* If the intended queue size was 0, signal after finishing job */
+ ZSTD_pthread_mutex_lock(&ctx->queueMutex);
+ ctx->numThreadsBusy--;
+ if (ctx->queueSize == 1) {
+ ZSTD_pthread_cond_signal(&ctx->queuePushCond);
+ }
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+ }
+ } /* for (;;) */
+ assert(0); /* Unreachable */
+}
+
+POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
+ return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
+}
+
+POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
+ ZSTD_customMem customMem) {
+ POOL_ctx* ctx;
+ /* Check parameters */
+ if (!numThreads) { return NULL; }
+ /* Allocate the context and zero initialize */
+ ctx = (POOL_ctx*)ZSTD_calloc(sizeof(POOL_ctx), customMem);
+ if (!ctx) { return NULL; }
+ /* Initialize the job queue.
+ * It needs one extra space since one space is wasted to differentiate
+ * empty and full queues.
+ */
+ ctx->queueSize = queueSize + 1;
+ ctx->queue = (POOL_job*)ZSTD_malloc(ctx->queueSize * sizeof(POOL_job), customMem);
+ ctx->queueHead = 0;
+ ctx->queueTail = 0;
+ ctx->numThreadsBusy = 0;
+ ctx->queueEmpty = 1;
+ (void)ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
+ (void)ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);
+ (void)ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
+ ctx->shutdown = 0;
+ /* Allocate space for the thread handles */
+ ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
+ ctx->threadCapacity = 0;
+ ctx->customMem = customMem;
+ /* Check for errors */
+ if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
+ /* Initialize the threads */
+ { size_t i;
+ for (i = 0; i < numThreads; ++i) {
+ if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
+ ctx->threadCapacity = i;
+ POOL_free(ctx);
+ return NULL;
+ } }
+ ctx->threadCapacity = numThreads;
+ ctx->threadLimit = numThreads;
+ }
+ return ctx;
+}
+
+/*! POOL_join() :
+ Shutdown the queue, wake any sleeping threads, and join all of the threads.
+*/
+static void POOL_join(POOL_ctx* ctx) {
+ /* Shut down the queue */
+ ZSTD_pthread_mutex_lock(&ctx->queueMutex);
+ ctx->shutdown = 1;
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+ /* Wake up sleeping threads */
+ ZSTD_pthread_cond_broadcast(&ctx->queuePushCond);
+ ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
+ /* Join all of the threads */
+ { size_t i;
+ for (i = 0; i < ctx->threadCapacity; ++i) {
+ ZSTD_pthread_join(ctx->threads[i], NULL); /* note : could fail */
+ } }
+}
+
+void POOL_free(POOL_ctx *ctx) {
+ if (!ctx) { return; }
+ POOL_join(ctx);
+ ZSTD_pthread_mutex_destroy(&ctx->queueMutex);
+ ZSTD_pthread_cond_destroy(&ctx->queuePushCond);
+ ZSTD_pthread_cond_destroy(&ctx->queuePopCond);
+ ZSTD_free(ctx->queue, ctx->customMem);
+ ZSTD_free(ctx->threads, ctx->customMem);
+ ZSTD_free(ctx, ctx->customMem);
+}
+
+
+
+size_t POOL_sizeof(POOL_ctx *ctx) {
+ if (ctx==NULL) return 0; /* supports sizeof NULL */
+ return sizeof(*ctx)
+ + ctx->queueSize * sizeof(POOL_job)
+ + ctx->threadCapacity * sizeof(ZSTD_pthread_t);
+}
+
+
+/* @return : 0 on success, 1 on error */
+static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)
+{
+ if (numThreads <= ctx->threadCapacity) {
+ if (!numThreads) return 1;
+ ctx->threadLimit = numThreads;
+ return 0;
+ }
+ /* numThreads > threadCapacity */
+ { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
+ if (!threadPool) return 1;
+ /* replace existing thread pool */
+ memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));
+ ZSTD_free(ctx->threads, ctx->customMem);
+ ctx->threads = threadPool;
+ /* Initialize additional threads */
+ { size_t threadId;
+ for (threadId = ctx->threadCapacity; threadId < numThreads; ++threadId) {
+ if (ZSTD_pthread_create(&threadPool[threadId], NULL, &POOL_thread, ctx)) {
+ ctx->threadCapacity = threadId;
+ return 1;
+ } }
+ } }
+ /* successfully expanded */
+ ctx->threadCapacity = numThreads;
+ ctx->threadLimit = numThreads;
+ return 0;
+}
+
+/* @return : 0 on success, 1 on error */
+int POOL_resize(POOL_ctx* ctx, size_t numThreads)
+{
+ int result;
+ if (ctx==NULL) return 1;
+ ZSTD_pthread_mutex_lock(&ctx->queueMutex);
+ result = POOL_resize_internal(ctx, numThreads);
+ ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+ return result;
+}
+
+/**
+ * Returns 1 if the queue is full and 0 otherwise.
+ *
+ * When queueSize is 1 (pool was created with an intended queueSize of 0),
+ * then a queue is empty if there is a thread free _and_ no job is waiting.
+ */
+static int isQueueFull(POOL_ctx const* ctx) {
+ if (ctx->queueSize > 1) {
+ return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize);
+ } else {
+ return (ctx->numThreadsBusy == ctx->threadLimit) ||
+ !ctx->queueEmpty;
+ }
+}
+
+
+static void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque)
+{
+ POOL_job const job = {function, opaque};
+ assert(ctx != NULL);
+ if (ctx->shutdown) return;
+
+ ctx->queueEmpty = 0;
+ ctx->queue[ctx->queueTail] = job;
+ ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;
+ ZSTD_pthread_cond_signal(&ctx->queuePopCond);
+}
+
+void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque)
+{
+ assert(ctx != NULL);
+ ZSTD_pthread_mutex_lock(&ctx->queueMutex);
+ /* Wait until there is space in the queue for the new job */
+ while (isQueueFull(ctx) && (!ctx->shutdown)) {
+ ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
+ }
+ POOL_add_internal(ctx, function, opaque);
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+}
+
+
+int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque)
+{
+ assert(ctx != NULL);
+ ZSTD_pthread_mutex_lock(&ctx->queueMutex);
+ if (isQueueFull(ctx)) {
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+ return 0;
+ }
+ POOL_add_internal(ctx, function, opaque);
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+ return 1;
+}
+
+
+#else /* ZSTD_MULTITHREAD not defined */
+
+/* ========================== */
+/* No multi-threading support */
+/* ========================== */
+
+
+/* We don't need any data, but if it is empty, malloc() might return NULL. */
+struct POOL_ctx_s {
+ int dummy;
+};
+static POOL_ctx g_ctx;
+
+POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
+ return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
+}
+
+POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) {
+ (void)numThreads;
+ (void)queueSize;
+ (void)customMem;
+ return &g_ctx;
+}
+
+void POOL_free(POOL_ctx* ctx) {
+ assert(!ctx || ctx == &g_ctx);
+ (void)ctx;
+}
+
+int POOL_resize(POOL_ctx* ctx, size_t numThreads) {
+ (void)ctx; (void)numThreads;
+ return 0;
+}
+
+void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) {
+ (void)ctx;
+ function(opaque);
+}
+
+int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {
+ (void)ctx;
+ function(opaque);
+ return 1;
+}
+
+size_t POOL_sizeof(POOL_ctx* ctx) {
+ if (ctx==NULL) return 0; /* supports sizeof NULL */
+ assert(ctx == &g_ctx);
+ return sizeof(*ctx);
+}
+
+#endif /* ZSTD_MULTITHREAD */
diff --git a/vendor/github.com/DataDog/zstd/pool.h b/vendor/github.com/DataDog/zstd/pool.h
new file mode 100644
index 000000000..458d37f13
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/pool.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef POOL_H
+#define POOL_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+#include <stddef.h> /* size_t */
+#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */
+#include "zstd.h"
+
+typedef struct POOL_ctx_s POOL_ctx;
+
+/*! POOL_create() :
+ * Create a thread pool with at most `numThreads` threads.
+ * `numThreads` must be at least 1.
+ * The maximum number of queued jobs before blocking is `queueSize`.
+ * @return : POOL_ctx pointer on success, else NULL.
+*/
+POOL_ctx* POOL_create(size_t numThreads, size_t queueSize);
+
+POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
+ ZSTD_customMem customMem);
+
+/*! POOL_free() :
+ * Free a thread pool returned by POOL_create().
+ */
+void POOL_free(POOL_ctx* ctx);
+
+/*! POOL_resize() :
+ * Expands or shrinks pool's number of threads.
+ * This is more efficient than releasing + creating a new context,
+ * since it tries to preserve and re-use existing threads.
+ * `numThreads` must be at least 1.
+ * @return : 0 when resize was successful,
+ * !0 (typically 1) if there is an error.
+ * note : only numThreads can be resized, queueSize remains unchanged.
+ */
+int POOL_resize(POOL_ctx* ctx, size_t numThreads);
+
+/*! POOL_sizeof() :
+ * @return threadpool memory usage
+ * note : compatible with NULL (returns 0 in this case)
+ */
+size_t POOL_sizeof(POOL_ctx* ctx);
+
+/*! POOL_function :
+ * The function type that can be added to a thread pool.
+ */
+typedef void (*POOL_function)(void*);
+
+/*! POOL_add() :
+ * Add the job `function(opaque)` to the thread pool. `ctx` must be valid.
+ * Possibly blocks until there is room in the queue.
+ * Note : The function may be executed asynchronously,
+ * therefore, `opaque` must live until function has been completed.
+ */
+void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque);
+
+
+/*! POOL_tryAdd() :
+ * Add the job `function(opaque)` to thread pool _if_ a worker is available.
+ * Returns immediately even if not (does not block).
+ * @return : 1 if successful, 0 if not.
+ */
+int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
diff --git a/vendor/github.com/DataDog/zstd/threading.c b/vendor/github.com/DataDog/zstd/threading.c
new file mode 100644
index 000000000..f3d4fa841
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/threading.c
@@ -0,0 +1,75 @@
+/**
+ * Copyright (c) 2016 Tino Reichardt
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ *
+ * You can contact the author at:
+ * - zstdmt source repository: https://github.com/mcmilk/zstdmt
+ */
+
+/**
+ * This file will hold wrapper for systems, which do not support pthreads
+ */
+
+/* create fake symbol to avoid empty translation unit warning */
+int g_ZSTD_threading_useless_symbol;
+
+#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
+
+/**
+ * Windows minimalist Pthread Wrapper, based on :
+ * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
+ */
+
+
+/* === Dependencies === */
+#include <process.h>
+#include <errno.h>
+#include "threading.h"
+
+
+/* === Implementation === */
+
+static unsigned __stdcall worker(void *arg)
+{
+ ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg;
+ thread->arg = thread->start_routine(thread->arg);
+ return 0;
+}
+
+int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
+ void* (*start_routine) (void*), void* arg)
+{
+ (void)unused;
+ thread->arg = arg;
+ thread->start_routine = start_routine;
+ thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL);
+
+ if (!thread->handle)
+ return errno;
+ else
+ return 0;
+}
+
+int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr)
+{
+ DWORD result;
+
+ if (!thread.handle) return 0;
+
+ result = WaitForSingleObject(thread.handle, INFINITE);
+ switch (result) {
+ case WAIT_OBJECT_0:
+ if (value_ptr) *value_ptr = thread.arg;
+ return 0;
+ case WAIT_ABANDONED:
+ return EINVAL;
+ default:
+ return GetLastError();
+ }
+}
+
+#endif /* ZSTD_MULTITHREAD */
diff --git a/vendor/github.com/DataDog/zstd/threading.h b/vendor/github.com/DataDog/zstd/threading.h
new file mode 100644
index 000000000..d806c89d0
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/threading.h
@@ -0,0 +1,123 @@
+/**
+ * Copyright (c) 2016 Tino Reichardt
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ *
+ * You can contact the author at:
+ * - zstdmt source repository: https://github.com/mcmilk/zstdmt
+ */
+
+#ifndef THREADING_H_938743
+#define THREADING_H_938743
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
+
+/**
+ * Windows minimalist Pthread Wrapper, based on :
+ * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
+ */
+#ifdef WINVER
+# undef WINVER
+#endif
+#define WINVER 0x0600
+
+#ifdef _WIN32_WINNT
+# undef _WIN32_WINNT
+#endif
+#define _WIN32_WINNT 0x0600
+
+#ifndef WIN32_LEAN_AND_MEAN
+# define WIN32_LEAN_AND_MEAN
+#endif
+
+#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */
+#include <windows.h>
+#undef ERROR
+#define ERROR(name) ZSTD_ERROR(name)
+
+
+/* mutex */
+#define ZSTD_pthread_mutex_t CRITICAL_SECTION
+#define ZSTD_pthread_mutex_init(a, b) ((void)(b), InitializeCriticalSection((a)), 0)
+#define ZSTD_pthread_mutex_destroy(a) DeleteCriticalSection((a))
+#define ZSTD_pthread_mutex_lock(a) EnterCriticalSection((a))
+#define ZSTD_pthread_mutex_unlock(a) LeaveCriticalSection((a))
+
+/* condition variable */
+#define ZSTD_pthread_cond_t CONDITION_VARIABLE
+#define ZSTD_pthread_cond_init(a, b) ((void)(b), InitializeConditionVariable((a)), 0)
+#define ZSTD_pthread_cond_destroy(a) ((void)(a))
+#define ZSTD_pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE)
+#define ZSTD_pthread_cond_signal(a) WakeConditionVariable((a))
+#define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a))
+
+/* ZSTD_pthread_create() and ZSTD_pthread_join() */
+typedef struct {
+ HANDLE handle;
+ void* (*start_routine)(void*);
+ void* arg;
+} ZSTD_pthread_t;
+
+int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
+ void* (*start_routine) (void*), void* arg);
+
+int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr);
+
+/**
+ * add here more wrappers as required
+ */
+
+
+#elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */
+/* === POSIX Systems === */
+# include <pthread.h>
+
+#define ZSTD_pthread_mutex_t pthread_mutex_t
+#define ZSTD_pthread_mutex_init(a, b) pthread_mutex_init((a), (b))
+#define ZSTD_pthread_mutex_destroy(a) pthread_mutex_destroy((a))
+#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock((a))
+#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock((a))
+
+#define ZSTD_pthread_cond_t pthread_cond_t
+#define ZSTD_pthread_cond_init(a, b) pthread_cond_init((a), (b))
+#define ZSTD_pthread_cond_destroy(a) pthread_cond_destroy((a))
+#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait((a), (b))
+#define ZSTD_pthread_cond_signal(a) pthread_cond_signal((a))
+#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast((a))
+
+#define ZSTD_pthread_t pthread_t
+#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
+#define ZSTD_pthread_join(a, b) pthread_join((a),(b))
+
+#else /* ZSTD_MULTITHREAD not defined */
+/* No multithreading support */
+
+typedef int ZSTD_pthread_mutex_t;
+#define ZSTD_pthread_mutex_init(a, b) ((void)(a), (void)(b), 0)
+#define ZSTD_pthread_mutex_destroy(a) ((void)(a))
+#define ZSTD_pthread_mutex_lock(a) ((void)(a))
+#define ZSTD_pthread_mutex_unlock(a) ((void)(a))
+
+typedef int ZSTD_pthread_cond_t;
+#define ZSTD_pthread_cond_init(a, b) ((void)(a), (void)(b), 0)
+#define ZSTD_pthread_cond_destroy(a) ((void)(a))
+#define ZSTD_pthread_cond_wait(a, b) ((void)(a), (void)(b))
+#define ZSTD_pthread_cond_signal(a) ((void)(a))
+#define ZSTD_pthread_cond_broadcast(a) ((void)(a))
+
+/* do not use ZSTD_pthread_t */
+
+#endif /* ZSTD_MULTITHREAD */
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* THREADING_H_938743 */
diff --git a/vendor/github.com/DataDog/zstd/travis_test_32.sh b/vendor/github.com/DataDog/zstd/travis_test_32.sh
new file mode 100644
index 000000000..d29c86c9e
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/travis_test_32.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+# Get utilities
+yum -y -q -e 0 install wget tar unzip gcc
+
+# Get Go
+wget -q https://dl.google.com/go/go1.11.1.linux-386.tar.gz
+tar -C /usr/local -xzf go1.11.1.linux-386.tar.gz
+export PATH=$PATH:/usr/local/go/bin
+
+# Get payload
+wget -q https://github.com/DataDog/zstd/files/2246767/mr.zip
+unzip mr.zip
+
+# Build and run tests
+cd zstd
+go build
+PAYLOAD=$(pwd)/mr go test -v
+PAYLOAD=$(pwd)/mr go test -bench .
diff --git a/vendor/github.com/DataDog/zstd/update.txt b/vendor/github.com/DataDog/zstd/update.txt
new file mode 100644
index 000000000..1de939f76
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/update.txt
@@ -0,0 +1,56 @@
+./lib/common/bitstream.h
+./lib/common/compiler.h
+./lib/compress/zstd_compress_internal.h
+./lib/compress/zstd_fast.h
+./lib/compress/zstd_double_fast.h
+./lib/compress/zstd_lazy.h
+./lib/compress/zstd_ldm.h
+./lib/dictBuilder/cover.c
+./lib/dictBuilder/divsufsort.c
+./lib/dictBuilder/divsufsort.h
+./lib/common/entropy_common.c
+./lib/common/error_private.c
+./lib/common/error_private.h
+./lib/compress/fse_compress.c
+./lib/common/fse_decompress.c
+./lib/common/fse.h
+./lib/compress/huf_compress.c
+./lib/decompress/huf_decompress.c
+./lib/common/huf.h
+./lib/common/mem.h
+./lib/common/pool.c
+./lib/common/pool.h
+./lib/common/threading.c
+./lib/common/threading.h
+./lib/common/xxhash.c
+./lib/common/xxhash.h
+./lib/deprecated/zbuff_common.c
+./lib/deprecated/zbuff_compress.c
+./lib/deprecated/zbuff_decompress.c
+./lib/deprecated/zbuff.h
+./lib/dictBuilder/zdict.c
+./lib/dictBuilder/zdict.h
+./lib/common/zstd_common.c
+./lib/compress/zstd_compress.c
+./lib/decompress/zstd_decompress.c
+./lib/common/zstd_errors.h
+./lib/zstd.h
+./lib/common/zstd_internal.h
+./lib/legacy/zstd_legacy.h
+./lib/compress/zstd_opt.c
+./lib/compress/zstd_opt.h
+./lib/legacy/zstd_v01.c
+./lib/legacy/zstd_v01.h
+./lib/legacy/zstd_v02.c
+./lib/legacy/zstd_v02.h
+./lib/legacy/zstd_v03.c
+./lib/legacy/zstd_v03.h
+./lib/legacy/zstd_v04.c
+./lib/legacy/zstd_v04.h
+./lib/legacy/zstd_v05.c
+./lib/legacy/zstd_v05.h
+./lib/legacy/zstd_v06.c
+./lib/legacy/zstd_v06.h
+./lib/legacy/zstd_v07.c
+./lib/legacy/zstd_v07.h
+
diff --git a/vendor/github.com/DataDog/zstd/xxhash.c b/vendor/github.com/DataDog/zstd/xxhash.c
new file mode 100644
index 000000000..30599aaae
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/xxhash.c
@@ -0,0 +1,876 @@
+/*
+* xxHash - Fast Hash algorithm
+* Copyright (C) 2012-2016, Yann Collet
+*
+* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following disclaimer
+* in the documentation and/or other materials provided with the
+* distribution.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+* You can contact the author at :
+* - xxHash homepage: http://www.xxhash.com
+* - xxHash source repository : https://github.com/Cyan4973/xxHash
+*/
+
+
+/* *************************************
+* Tuning parameters
+***************************************/
+/*!XXH_FORCE_MEMORY_ACCESS :
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
+ * It can generate buggy code on targets which do not support unaligned memory accesses.
+ * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://stackoverflow.com/a/32095106/646947 for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
+# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+# define XXH_FORCE_MEMORY_ACCESS 2
+# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
+ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+# define XXH_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+/*!XXH_ACCEPT_NULL_INPUT_POINTER :
+ * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
+ * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
+ * By default, this option is disabled. To enable it, uncomment below define :
+ */
+/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
+
+/*!XXH_FORCE_NATIVE_FORMAT :
+ * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
+ * Results are therefore identical for little-endian and big-endian CPU.
+ * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
+ * Should endian-independence be of no importance for your application, you may set the #define below to 1,
+ * to improve speed for Big-endian CPU.
+ * This option has no impact on Little_Endian CPU.
+ */
+#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */
+# define XXH_FORCE_NATIVE_FORMAT 0
+#endif
+
+/*!XXH_FORCE_ALIGN_CHECK :
+ * This is a minor performance trick, only useful with lots of very small keys.
+ * It means : check for aligned/unaligned input.
+ * The check costs one initial branch per hash; set to 0 when the input data
+ * is guaranteed to be aligned.
+ */
+#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
+# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
+# define XXH_FORCE_ALIGN_CHECK 0
+# else
+# define XXH_FORCE_ALIGN_CHECK 1
+# endif
+#endif
+
+
+/* *************************************
+* Includes & Memory related functions
+***************************************/
+/* Modify the local functions below should you wish to use some other memory routines */
+/* for malloc(), free() */
+#include <stdlib.h>
+#include <stddef.h> /* size_t */
+static void* XXH_malloc(size_t s) { return malloc(s); }
+static void XXH_free (void* p) { free(p); }
+/* for memcpy() */
+#include <string.h>
+static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
+
+#ifndef XXH_STATIC_LINKING_ONLY
+# define XXH_STATIC_LINKING_ONLY
+#endif
+#include "xxhash.h"
+
+
+/* *************************************
+* Compiler Specific Options
+***************************************/
+#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# define INLINE_KEYWORD inline
+#else
+# define INLINE_KEYWORD
+#endif
+
+#if defined(__GNUC__)
+# define FORCE_INLINE_ATTR __attribute__((always_inline))
+#elif defined(_MSC_VER)
+# define FORCE_INLINE_ATTR __forceinline
+#else
+# define FORCE_INLINE_ATTR
+#endif
+
+#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
+
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+#endif
+
+
+/* *************************************
+* Basic Types
+***************************************/
+#ifndef MEM_MODULE
+# define MEM_MODULE
+# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+# else
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef unsigned int U32;
+ typedef signed int S32;
+ typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
+# endif
+#endif
+
+
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
+
+/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
+static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
+static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign;
+
+static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
+
+#else
+
+/* portable and safe solution. Generally efficient.
+ * see : http://stackoverflow.com/a/32095106/646947
+ */
+
+static U32 XXH_read32(const void* memPtr)
+{
+ U32 val;
+ memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+static U64 XXH_read64(const void* memPtr)
+{
+ U64 val;
+ memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+
+/* ****************************************
+* Compiler-specific Functions and Macros
+******************************************/
+#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+
+/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
+#if defined(_MSC_VER)
+# define XXH_rotl32(x,r) _rotl(x,r)
+# define XXH_rotl64(x,r) _rotl64(x,r)
+#else
+# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
+# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
+#endif
+
+#if defined(_MSC_VER) /* Visual Studio */
+# define XXH_swap32 _byteswap_ulong
+# define XXH_swap64 _byteswap_uint64
+#elif GCC_VERSION >= 403
+# define XXH_swap32 __builtin_bswap32
+# define XXH_swap64 __builtin_bswap64
+#else
+static U32 XXH_swap32 (U32 x)
+{
+ return ((x << 24) & 0xff000000 ) |
+ ((x << 8) & 0x00ff0000 ) |
+ ((x >> 8) & 0x0000ff00 ) |
+ ((x >> 24) & 0x000000ff );
+}
+static U64 XXH_swap64 (U64 x)
+{
+ return ((x << 56) & 0xff00000000000000ULL) |
+ ((x << 40) & 0x00ff000000000000ULL) |
+ ((x << 24) & 0x0000ff0000000000ULL) |
+ ((x << 8) & 0x000000ff00000000ULL) |
+ ((x >> 8) & 0x00000000ff000000ULL) |
+ ((x >> 24) & 0x0000000000ff0000ULL) |
+ ((x >> 40) & 0x000000000000ff00ULL) |
+ ((x >> 56) & 0x00000000000000ffULL);
+}
+#endif
+
+
+/* *************************************
+* Architecture Macros
+***************************************/
+typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
+
+/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
+#ifndef XXH_CPU_LITTLE_ENDIAN
+ static const int g_one = 1;
+# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one))
+#endif
+
+
+/* ***************************
+* Memory reads
+*****************************/
+typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
+
+FORCE_INLINE_TEMPLATE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
+{
+ if (align==XXH_unaligned)
+ return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
+ else
+ return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
+}
+
+FORCE_INLINE_TEMPLATE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
+{
+ return XXH_readLE32_align(ptr, endian, XXH_unaligned);
+}
+
+static U32 XXH_readBE32(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
+}
+
+FORCE_INLINE_TEMPLATE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
+{
+ if (align==XXH_unaligned)
+ return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
+ else
+ return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
+}
+
+FORCE_INLINE_TEMPLATE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
+{
+ return XXH_readLE64_align(ptr, endian, XXH_unaligned);
+}
+
+static U64 XXH_readBE64(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
+}
+
+
+/* *************************************
+* Macros
+***************************************/
+#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+
+
+/* *************************************
+* Constants
+***************************************/
+static const U32 PRIME32_1 = 2654435761U;
+static const U32 PRIME32_2 = 2246822519U;
+static const U32 PRIME32_3 = 3266489917U;
+static const U32 PRIME32_4 = 668265263U;
+static const U32 PRIME32_5 = 374761393U;
+
+static const U64 PRIME64_1 = 11400714785074694791ULL;
+static const U64 PRIME64_2 = 14029467366897019727ULL;
+static const U64 PRIME64_3 = 1609587929392839161ULL;
+static const U64 PRIME64_4 = 9650029242287828579ULL;
+static const U64 PRIME64_5 = 2870177450012600261ULL;
+
+XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
+
+
+/* **************************
+* Utils
+****************************/
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState)
+{
+ memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState)
+{
+ memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+
+/* ***************************
+* Simple Hash Functions
+*****************************/
+
+static U32 XXH32_round(U32 seed, U32 input)
+{
+ seed += input * PRIME32_2;
+ seed = XXH_rotl32(seed, 13);
+ seed *= PRIME32_1;
+ return seed;
+}
+
+FORCE_INLINE_TEMPLATE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
+{
+ const BYTE* p = (const BYTE*)input;
+ const BYTE* bEnd = p + len;
+ U32 h32;
+#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+ if (p==NULL) {
+ len=0;
+ bEnd=p=(const BYTE*)(size_t)16;
+ }
+#endif
+
+ if (len>=16) {
+ const BYTE* const limit = bEnd - 16;
+ U32 v1 = seed + PRIME32_1 + PRIME32_2;
+ U32 v2 = seed + PRIME32_2;
+ U32 v3 = seed + 0;
+ U32 v4 = seed - PRIME32_1;
+
+ do {
+ v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
+ v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
+ v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
+ v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
+ } while (p<=limit);
+
+ h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
+ } else {
+ h32 = seed + PRIME32_5;
+ }
+
+ h32 += (U32) len;
+
+ while (p+4<=bEnd) {
+ h32 += XXH_get32bits(p) * PRIME32_3;
+ h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
+ p+=4;
+ }
+
+ while (p<bEnd) {
+ h32 += (*p) * PRIME32_5;
+ h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
+ p++;
+ }
+
+ h32 ^= h32 >> 15;
+ h32 *= PRIME32_2;
+ h32 ^= h32 >> 13;
+ h32 *= PRIME32_3;
+ h32 ^= h32 >> 16;
+
+ return h32;
+}
+
+
+XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
+{
+#if 0
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+ XXH32_CREATESTATE_STATIC(state);
+ XXH32_reset(state, seed);
+ XXH32_update(state, input, len);
+ return XXH32_digest(state);
+#else
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if (XXH_FORCE_ALIGN_CHECK) {
+ if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
+ else
+ return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
+ } }
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
+ else
+ return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
+#endif
+}
+
+
+static U64 XXH64_round(U64 acc, U64 input)
+{
+ acc += input * PRIME64_2;
+ acc = XXH_rotl64(acc, 31);
+ acc *= PRIME64_1;
+ return acc;
+}
+
+static U64 XXH64_mergeRound(U64 acc, U64 val)
+{
+ val = XXH64_round(0, val);
+ acc ^= val;
+ acc = acc * PRIME64_1 + PRIME64_4;
+ return acc;
+}
+
+FORCE_INLINE_TEMPLATE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
+{
+ const BYTE* p = (const BYTE*)input;
+ const BYTE* const bEnd = p + len;
+ U64 h64;
+#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+ if (p==NULL) {
+ len=0;
+ bEnd=p=(const BYTE*)(size_t)32;
+ }
+#endif
+
+ if (len>=32) {
+ const BYTE* const limit = bEnd - 32;
+ U64 v1 = seed + PRIME64_1 + PRIME64_2;
+ U64 v2 = seed + PRIME64_2;
+ U64 v3 = seed + 0;
+ U64 v4 = seed - PRIME64_1;
+
+ do {
+ v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
+ v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
+ v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
+ v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
+ } while (p<=limit);
+
+ h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+ h64 = XXH64_mergeRound(h64, v1);
+ h64 = XXH64_mergeRound(h64, v2);
+ h64 = XXH64_mergeRound(h64, v3);
+ h64 = XXH64_mergeRound(h64, v4);
+
+ } else {
+ h64 = seed + PRIME64_5;
+ }
+
+ h64 += (U64) len;
+
+ while (p+8<=bEnd) {
+ U64 const k1 = XXH64_round(0, XXH_get64bits(p));
+ h64 ^= k1;
+ h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
+ p+=8;
+ }
+
+ if (p+4<=bEnd) {
+ h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
+ h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
+ p+=4;
+ }
+
+ while (p<bEnd) {
+ h64 ^= (*p) * PRIME64_5;
+ h64 = XXH_rotl64(h64, 11) * PRIME64_1;
+ p++;
+ }
+
+ h64 ^= h64 >> 33;
+ h64 *= PRIME64_2;
+ h64 ^= h64 >> 29;
+ h64 *= PRIME64_3;
+ h64 ^= h64 >> 32;
+
+ return h64;
+}
+
+
+XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
+{
+#if 0
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+ XXH64_CREATESTATE_STATIC(state);
+ XXH64_reset(state, seed);
+ XXH64_update(state, input, len);
+ return XXH64_digest(state);
+#else
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if (XXH_FORCE_ALIGN_CHECK) {
+ if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
+ else
+ return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
+ } }
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
+ else
+ return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
+#endif
+}
+
+
+/* **************************************************
+* Advanced Hash Functions
+****************************************************/
+
+XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
+{
+ return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
+}
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
+{
+ XXH_free(statePtr);
+ return XXH_OK;
+}
+
+XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
+{
+ return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
+}
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
+{
+ XXH_free(statePtr);
+ return XXH_OK;
+}
+
+
+/*** Hash feed ***/
+
+XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
+{
+ XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
+ memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */
+ state.v1 = seed + PRIME32_1 + PRIME32_2;
+ state.v2 = seed + PRIME32_2;
+ state.v3 = seed + 0;
+ state.v4 = seed - PRIME32_1;
+ memcpy(statePtr, &state, sizeof(state));
+ return XXH_OK;
+}
+
+
+XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
+{
+ XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
+ memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */
+ state.v1 = seed + PRIME64_1 + PRIME64_2;
+ state.v2 = seed + PRIME64_2;
+ state.v3 = seed + 0;
+ state.v4 = seed - PRIME64_1;
+ memcpy(statePtr, &state, sizeof(state));
+ return XXH_OK;
+}
+
+
+FORCE_INLINE_TEMPLATE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
+{
+ const BYTE* p = (const BYTE*)input;
+ const BYTE* const bEnd = p + len;
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+ if (input==NULL) return XXH_ERROR;
+#endif
+
+ state->total_len_32 += (unsigned)len;
+ state->large_len |= (len>=16) | (state->total_len_32>=16);
+
+ if (state->memsize + len < 16) { /* fill in tmp buffer */
+ XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
+ state->memsize += (unsigned)len;
+ return XXH_OK;
+ }
+
+ if (state->memsize) { /* some data left from previous update */
+ XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
+ { const U32* p32 = state->mem32;
+ state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
+ state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
+ state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
+ state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
+ }
+ p += 16-state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p <= bEnd-16) {
+ const BYTE* const limit = bEnd - 16;
+ U32 v1 = state->v1;
+ U32 v2 = state->v2;
+ U32 v3 = state->v3;
+ U32 v4 = state->v4;
+
+ do {
+ v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
+ v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
+ v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
+ v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
+ } while (p<=limit);
+
+ state->v1 = v1;
+ state->v2 = v2;
+ state->v3 = v3;
+ state->v4 = v4;
+ }
+
+ if (p < bEnd) {
+ XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
+ state->memsize = (unsigned)(bEnd-p);
+ }
+
+ return XXH_OK;
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
+{
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
+ else
+ return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
+}
+
+
+
+FORCE_INLINE_TEMPLATE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
+{
+ const BYTE * p = (const BYTE*)state->mem32;
+ const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
+ U32 h32;
+
+ if (state->large_len) {
+ h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
+ } else {
+ h32 = state->v3 /* == seed */ + PRIME32_5;
+ }
+
+ h32 += state->total_len_32;
+
+ while (p+4<=bEnd) {
+ h32 += XXH_readLE32(p, endian) * PRIME32_3;
+ h32 = XXH_rotl32(h32, 17) * PRIME32_4;
+ p+=4;
+ }
+
+ while (p<bEnd) {
+ h32 += (*p) * PRIME32_5;
+ h32 = XXH_rotl32(h32, 11) * PRIME32_1;
+ p++;
+ }
+
+ h32 ^= h32 >> 15;
+ h32 *= PRIME32_2;
+ h32 ^= h32 >> 13;
+ h32 *= PRIME32_3;
+ h32 ^= h32 >> 16;
+
+ return h32;
+}
+
+
+XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
+{
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH32_digest_endian(state_in, XXH_littleEndian);
+ else
+ return XXH32_digest_endian(state_in, XXH_bigEndian);
+}
+
+
+
+/* **** XXH64 **** */
+
+FORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
+{
+ const BYTE* p = (const BYTE*)input;
+ const BYTE* const bEnd = p + len;
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+ if (input==NULL) return XXH_ERROR;
+#endif
+
+ state->total_len += len;
+
+ if (state->memsize + len < 32) { /* fill in tmp buffer */
+ XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
+ state->memsize += (U32)len;
+ return XXH_OK;
+ }
+
+ if (state->memsize) { /* tmp buffer is full */
+ XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
+ state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
+ state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
+ state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
+ state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
+ p += 32-state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p+32 <= bEnd) {
+ const BYTE* const limit = bEnd - 32;
+ U64 v1 = state->v1;
+ U64 v2 = state->v2;
+ U64 v3 = state->v3;
+ U64 v4 = state->v4;
+
+ do {
+ v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
+ v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
+ v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
+ v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
+ } while (p<=limit);
+
+ state->v1 = v1;
+ state->v2 = v2;
+ state->v3 = v3;
+ state->v4 = v4;
+ }
+
+ if (p < bEnd) {
+ XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
+ state->memsize = (unsigned)(bEnd-p);
+ }
+
+ return XXH_OK;
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
+{
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
+ else
+ return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
+}
+
+
+
+FORCE_INLINE_TEMPLATE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
+{
+ const BYTE * p = (const BYTE*)state->mem64;
+ const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
+ U64 h64;
+
+ if (state->total_len >= 32) {
+ U64 const v1 = state->v1;
+ U64 const v2 = state->v2;
+ U64 const v3 = state->v3;
+ U64 const v4 = state->v4;
+
+ h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+ h64 = XXH64_mergeRound(h64, v1);
+ h64 = XXH64_mergeRound(h64, v2);
+ h64 = XXH64_mergeRound(h64, v3);
+ h64 = XXH64_mergeRound(h64, v4);
+ } else {
+ h64 = state->v3 + PRIME64_5;
+ }
+
+ h64 += (U64) state->total_len;
+
+ while (p+8<=bEnd) {
+ U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
+ h64 ^= k1;
+ h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
+ p+=8;
+ }
+
+ if (p+4<=bEnd) {
+ h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
+ h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
+ p+=4;
+ }
+
+ while (p<bEnd) {
+ h64 ^= (*p) * PRIME64_5;
+ h64 = XXH_rotl64(h64, 11) * PRIME64_1;
+ p++;
+ }
+
+ h64 ^= h64 >> 33;
+ h64 *= PRIME64_2;
+ h64 ^= h64 >> 29;
+ h64 *= PRIME64_3;
+ h64 ^= h64 >> 32;
+
+ return h64;
+}
+
+
+XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
+{
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH64_digest_endian(state_in, XXH_littleEndian);
+ else
+ return XXH64_digest_endian(state_in, XXH_bigEndian);
+}
+
+
+/* **************************
+* Canonical representation
+****************************/
+
+/*! Default XXH result types are basic unsigned 32 and 64 bits.
+* The canonical representation follows human-readable write convention, aka big-endian (large digits first).
+* These functions allow transformation of hash result into and from its canonical format.
+* This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
+*/
+
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
+ memcpy(dst, &hash, sizeof(*dst));
+}
+
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
+ memcpy(dst, &hash, sizeof(*dst));
+}
+
+XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
+{
+ return XXH_readBE32(src);
+}
+
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
+{
+ return XXH_readBE64(src);
+}
diff --git a/vendor/github.com/DataDog/zstd/xxhash.h b/vendor/github.com/DataDog/zstd/xxhash.h
new file mode 100644
index 000000000..9bad1f59f
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/xxhash.h
@@ -0,0 +1,305 @@
+/*
+ xxHash - Extremely Fast Hash algorithm
+ Header File
+ Copyright (C) 2012-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - xxHash source repository : https://github.com/Cyan4973/xxHash
+*/
+
+/* Notice extracted from xxHash homepage :
+
+xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
+It also successfully passes all tests from the SMHasher suite.
+
+Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
+
+Name Speed Q.Score Author
+xxHash 5.4 GB/s 10
+CrapWow 3.2 GB/s 2 Andrew
+MumurHash 3a 2.7 GB/s 10 Austin Appleby
+SpookyHash 2.0 GB/s 10 Bob Jenkins
+SBox 1.4 GB/s 9 Bret Mulvey
+Lookup3 1.2 GB/s 9 Bob Jenkins
+SuperFastHash 1.2 GB/s 1 Paul Hsieh
+CityHash64 1.05 GB/s 10 Pike & Alakuijala
+FNV 0.55 GB/s 5 Fowler, Noll, Vo
+CRC32 0.43 GB/s 9
+MD5-32 0.33 GB/s 10 Ronald L. Rivest
+SHA1-32 0.28 GB/s 10
+
+Q.Score is a measure of quality of the hash function.
+It depends on successfully passing SMHasher test set.
+10 is a perfect score.
+
+A 64-bits version, named XXH64, is available since r35.
+It offers much better speed, but for 64-bits applications only.
+Name Speed on 64 bits Speed on 32 bits
+XXH64 13.8 GB/s 1.9 GB/s
+XXH32 6.8 GB/s 6.0 GB/s
+*/
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#ifndef XXHASH_H_5627135585666179
+#define XXHASH_H_5627135585666179 1
+
+
+/* ****************************
+* Definitions
+******************************/
+#include <stddef.h> /* size_t */
+typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
+
+
+/* ****************************
+* API modifier
+******************************/
+/** XXH_PRIVATE_API
+* This is useful if you want to include xxhash functions in `static` mode
+* in order to inline them, and remove their symbol from the public list.
+* Methodology :
+* #define XXH_PRIVATE_API
+* #include "xxhash.h"
+* `xxhash.c` is automatically included.
+* It's not useful to compile and link it as a separate module anymore.
+*/
+#ifdef XXH_PRIVATE_API
+# ifndef XXH_STATIC_LINKING_ONLY
+# define XXH_STATIC_LINKING_ONLY
+# endif
+# if defined(__GNUC__)
+# define XXH_PUBLIC_API static __inline __attribute__((unused))
+# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# define XXH_PUBLIC_API static inline
+# elif defined(_MSC_VER)
+# define XXH_PUBLIC_API static __inline
+# else
+# define XXH_PUBLIC_API static /* this version may generate warnings for unused static functions; disable the relevant warning */
+# endif
+#else
+# define XXH_PUBLIC_API /* do nothing */
+#endif /* XXH_PRIVATE_API */
+
+/*!XXH_NAMESPACE, aka Namespace Emulation :
+
+If you want to include _and expose_ xxHash functions from within your own library,
+but also want to avoid symbol collisions with another library which also includes xxHash,
+
+you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library
+with the value of XXH_NAMESPACE (so avoid to keep it NULL and avoid numeric values).
+
+Note that no change is required within the calling program as long as it includes `xxhash.h` :
+regular symbol name will be automatically translated by this header.
+*/
+#ifdef XXH_NAMESPACE
+# define XXH_CAT(A,B) A##B
+# define XXH_NAME2(A,B) XXH_CAT(A,B)
+# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
+# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
+# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
+# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
+# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
+# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
+# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
+# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
+# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
+# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
+# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
+# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
+# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
+# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
+# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
+# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
+# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
+# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
+# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
+#endif
+
+
+/* *************************************
+* Version
+***************************************/
+#define XXH_VERSION_MAJOR 0
+#define XXH_VERSION_MINOR 6
+#define XXH_VERSION_RELEASE 2
+#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
+XXH_PUBLIC_API unsigned XXH_versionNumber (void);
+
+
+/* ****************************
+* Simple Hash Functions
+******************************/
+typedef unsigned int XXH32_hash_t;
+typedef unsigned long long XXH64_hash_t;
+
+XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed);
+XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed);
+
+/*!
+XXH32() :
+ Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input".
+ The memory between input & input+length must be valid (allocated and read-accessible).
+ "seed" can be used to alter the result predictably.
+ Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s
+XXH64() :
+ Calculate the 64-bits hash of sequence of length "len" stored at memory address "input".
+ "seed" can be used to alter the result predictably.
+ This function runs 2x faster on 64-bits systems, but slower on 32-bits systems (see benchmark).
+*/
+
+
+/* ****************************
+* Streaming Hash Functions
+******************************/
+typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */
+typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
+
+/*! State allocation, compatible with dynamic libraries */
+
+XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
+
+XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
+
+
+/* hash streaming */
+
+XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed);
+XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
+XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
+
+XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed);
+XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
+XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr);
+
+/*
+These functions generate the xxHash of an input provided in multiple segments.
+Note that, for small input, they are slower than single-call functions, due to state management.
+For small input, prefer `XXH32()` and `XXH64()` .
+
+XXH state must first be allocated, using XXH*_createState() .
+
+Start a new hash by initializing state with a seed, using XXH*_reset().
+
+Then, feed the hash state by calling XXH*_update() as many times as necessary.
+Obviously, input must be allocated and read accessible.
+The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
+
+Finally, a hash value can be produced anytime, by using XXH*_digest().
+This function returns the nn-bits hash as an int or long long.
+
+It's still possible to continue inserting input into the hash state after a digest,
+and generate some new hashes later on, by calling again XXH*_digest().
+
+When done, free XXH state space if it was allocated dynamically.
+*/
+
+
+/* **************************
+* Utils
+****************************/
+#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* ! C99 */
+# define restrict /* disable restrict */
+#endif
+
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dst_state, const XXH32_state_t* restrict src_state);
+XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dst_state, const XXH64_state_t* restrict src_state);
+
+
+/* **************************
+* Canonical representation
+****************************/
+/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
+* The canonical representation uses human-readable write convention, aka big-endian (large digits first).
+* These functions allow transformation of hash result into and from its canonical format.
+* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.
+*/
+typedef struct { unsigned char digest[4]; } XXH32_canonical_t;
+typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
+
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
+
+XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
+
+#endif /* XXHASH_H_5627135585666179 */
+
+
+
+/* ================================================================================================
+ This section contains definitions which are not guaranteed to remain stable.
+ They may change in future versions, becoming incompatible with a different version of the library.
+ They shall only be used with static linking.
+ Never use these definitions in association with dynamic linking !
+=================================================================================================== */
+#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXH_STATIC_H_3543687687345)
+#define XXH_STATIC_H_3543687687345
+
+/* These definitions are only meant to allow allocation of XXH state
+ statically, on stack, or in a struct for example.
+ Do not use members directly. */
+
+ struct XXH32_state_s {
+ unsigned total_len_32;
+ unsigned large_len;
+ unsigned v1;
+ unsigned v2;
+ unsigned v3;
+ unsigned v4;
+ unsigned mem32[4]; /* buffer defined as U32 for alignment */
+ unsigned memsize;
+ unsigned reserved; /* never read nor write, will be removed in a future version */
+ }; /* typedef'd to XXH32_state_t */
+
+ struct XXH64_state_s {
+ unsigned long long total_len;
+ unsigned long long v1;
+ unsigned long long v2;
+ unsigned long long v3;
+ unsigned long long v4;
+ unsigned long long mem64[4]; /* buffer defined as U64 for alignment */
+ unsigned memsize;
+ unsigned reserved[2]; /* never read nor write, will be removed in a future version */
+ }; /* typedef'd to XXH64_state_t */
+
+
+# ifdef XXH_PRIVATE_API
+# include "xxhash.c" /* include xxhash functions as `static`, for inlining */
+# endif
+
+#endif /* XXH_STATIC_LINKING_ONLY && XXH_STATIC_H_3543687687345 */
+
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/vendor/github.com/DataDog/zstd/zbuff.h b/vendor/github.com/DataDog/zstd/zbuff.h
new file mode 100644
index 000000000..a93115da4
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zbuff.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* ***************************************************************
+* NOTES/WARNINGS
+******************************************************************/
+/* The streaming API defined here is deprecated.
+ * Consider migrating towards ZSTD_compressStream() API in `zstd.h`
+ * See 'lib/README.md'.
+ *****************************************************************/
+
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#ifndef ZSTD_BUFFERED_H_23987
+#define ZSTD_BUFFERED_H_23987
+
+/* *************************************
+* Dependencies
+***************************************/
+#include <stddef.h> /* size_t */
+#include "zstd.h" /* ZSTD_CStream, ZSTD_DStream, ZSTDLIB_API */
+
+
+/* ***************************************************************
+* Compiler specifics
+*****************************************************************/
+/* Deprecation warnings */
+/* Should these warnings be a problem,
+ it is generally possible to disable them,
+ typically with -Wno-deprecated-declarations for gcc
+ or _CRT_SECURE_NO_WARNINGS in Visual.
+ Otherwise, it's also possible to define ZBUFF_DISABLE_DEPRECATE_WARNINGS */
+#ifdef ZBUFF_DISABLE_DEPRECATE_WARNINGS
+# define ZBUFF_DEPRECATED(message) ZSTDLIB_API /* disable deprecation warnings */
+#else
+# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
+# define ZBUFF_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_API
+# elif (defined(__GNUC__) && (__GNUC__ >= 5)) || defined(__clang__)
+# define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated(message)))
+# elif defined(__GNUC__) && (__GNUC__ >= 3)
+# define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated))
+# elif defined(_MSC_VER)
+# define ZBUFF_DEPRECATED(message) ZSTDLIB_API __declspec(deprecated(message))
+# else
+# pragma message("WARNING: You need to implement ZBUFF_DEPRECATED for this compiler")
+# define ZBUFF_DEPRECATED(message) ZSTDLIB_API
+# endif
+#endif /* ZBUFF_DISABLE_DEPRECATE_WARNINGS */
+
+
+/* *************************************
+* Streaming functions
+***************************************/
+/* This is the easier "buffered" streaming API,
+* using an internal buffer to lift all restrictions on user-provided buffers
+* which can be any size, any place, for both input and output.
+* ZBUFF and ZSTD are 100% interoperable,
+* frames created by one can be decoded by the other one */
+
+typedef ZSTD_CStream ZBUFF_CCtx;
+ZBUFF_DEPRECATED("use ZSTD_createCStream") ZBUFF_CCtx* ZBUFF_createCCtx(void);
+ZBUFF_DEPRECATED("use ZSTD_freeCStream") size_t ZBUFF_freeCCtx(ZBUFF_CCtx* cctx);
+
+ZBUFF_DEPRECATED("use ZSTD_initCStream") size_t ZBUFF_compressInit(ZBUFF_CCtx* cctx, int compressionLevel);
+ZBUFF_DEPRECATED("use ZSTD_initCStream_usingDict") size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
+
+ZBUFF_DEPRECATED("use ZSTD_compressStream") size_t ZBUFF_compressContinue(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr);
+ZBUFF_DEPRECATED("use ZSTD_flushStream") size_t ZBUFF_compressFlush(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr);
+ZBUFF_DEPRECATED("use ZSTD_endStream") size_t ZBUFF_compressEnd(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr);
+
+/*-*************************************************
+* Streaming compression - howto
+*
+* A ZBUFF_CCtx object is required to track streaming operation.
+* Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources.
+* ZBUFF_CCtx objects can be reused multiple times.
+*
+* Start by initializing ZBUF_CCtx.
+* Use ZBUFF_compressInit() to start a new compression operation.
+* Use ZBUFF_compressInitDictionary() for a compression which requires a dictionary.
+*
+* Use ZBUFF_compressContinue() repetitively to consume input stream.
+* *srcSizePtr and *dstCapacityPtr can be any size.
+* The function will report how many bytes were read or written within *srcSizePtr and *dstCapacityPtr.
+* Note that it may not consume the entire input, in which case it's up to the caller to present again remaining data.
+* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each call, so save its content if it matters or change @dst .
+* @return : a hint to preferred nb of bytes to use as input for next function call (it's just a hint, to improve latency)
+* or an error code, which can be tested using ZBUFF_isError().
+*
+* At any moment, it's possible to flush whatever data remains within buffer, using ZBUFF_compressFlush().
+* The nb of bytes written into `dst` will be reported into *dstCapacityPtr.
+* Note that the function cannot output more than *dstCapacityPtr,
+* therefore, some content might still be left into internal buffer if *dstCapacityPtr is too small.
+* @return : nb of bytes still present into internal buffer (0 if it's empty)
+* or an error code, which can be tested using ZBUFF_isError().
+*
+* ZBUFF_compressEnd() instructs to finish a frame.
+* It will perform a flush and write frame epilogue.
+* The epilogue is required for decoders to consider a frame completed.
+* Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small.
+* In which case, call again ZBUFF_compressFlush() to complete the flush.
+* @return : nb of bytes still present into internal buffer (0 if it's empty)
+* or an error code, which can be tested using ZBUFF_isError().
+*
+* Hint : _recommended buffer_ sizes (not compulsory) : ZBUFF_recommendedCInSize() / ZBUFF_recommendedCOutSize()
+* input : ZBUFF_recommendedCInSize==128 KB block size is the internal unit, use this value to reduce intermediate stages (better latency)
+* output : ZBUFF_recommendedCOutSize==ZSTD_compressBound(128 KB) + 3 + 3 : ensures it's always possible to write/flush/end a full block. Skip some buffering.
+* By using both, it ensures that input will be entirely consumed, and output will always contain the result, reducing intermediate buffering.
+* **************************************************/
+
+
+typedef ZSTD_DStream ZBUFF_DCtx;
+ZBUFF_DEPRECATED("use ZSTD_createDStream") ZBUFF_DCtx* ZBUFF_createDCtx(void);
+ZBUFF_DEPRECATED("use ZSTD_freeDStream") size_t ZBUFF_freeDCtx(ZBUFF_DCtx* dctx);
+
+ZBUFF_DEPRECATED("use ZSTD_initDStream") size_t ZBUFF_decompressInit(ZBUFF_DCtx* dctx);
+ZBUFF_DEPRECATED("use ZSTD_initDStream_usingDict") size_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* dctx, const void* dict, size_t dictSize);
+
+ZBUFF_DEPRECATED("use ZSTD_decompressStream") size_t ZBUFF_decompressContinue(ZBUFF_DCtx* dctx,
+ void* dst, size_t* dstCapacityPtr,
+ const void* src, size_t* srcSizePtr);
+
+/*-***************************************************************************
+* Streaming decompression howto
+*
+* A ZBUFF_DCtx object is required to track streaming operations.
+* Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources.
+* Use ZBUFF_decompressInit() to start a new decompression operation,
+* or ZBUFF_decompressInitDictionary() if decompression requires a dictionary.
+* Note that ZBUFF_DCtx objects can be re-init multiple times.
+*
+* Use ZBUFF_decompressContinue() repetitively to consume your input.
+* *srcSizePtr and *dstCapacityPtr can be any size.
+* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
+* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
+* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.
+* @return : 0 when a frame is completely decoded and fully flushed,
+* 1 when there is still some data left within internal buffer to flush,
+* >1 when more data is expected, with value being a suggested next input size (it's just a hint, which helps latency),
+* or an error code, which can be tested using ZBUFF_isError().
+*
+* Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize() and ZBUFF_recommendedDOutSize()
+* output : ZBUFF_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
+* input : ZBUFF_recommendedDInSize == 128KB + 3;
+* just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
+* *******************************************************************************/
+
+
+/* *************************************
+* Tool functions
+***************************************/
+ZBUFF_DEPRECATED("use ZSTD_isError") unsigned ZBUFF_isError(size_t errorCode);
+ZBUFF_DEPRECATED("use ZSTD_getErrorName") const char* ZBUFF_getErrorName(size_t errorCode);
+
+/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
+* These sizes are just hints, they tend to offer better latency */
+ZBUFF_DEPRECATED("use ZSTD_CStreamInSize") size_t ZBUFF_recommendedCInSize(void);
+ZBUFF_DEPRECATED("use ZSTD_CStreamOutSize") size_t ZBUFF_recommendedCOutSize(void);
+ZBUFF_DEPRECATED("use ZSTD_DStreamInSize") size_t ZBUFF_recommendedDInSize(void);
+ZBUFF_DEPRECATED("use ZSTD_DStreamOutSize") size_t ZBUFF_recommendedDOutSize(void);
+
+#endif /* ZSTD_BUFFERED_H_23987 */
+
+
+#ifdef ZBUFF_STATIC_LINKING_ONLY
+#ifndef ZBUFF_STATIC_H_30298098432
+#define ZBUFF_STATIC_H_30298098432
+
+/* ====================================================================================
+ * The definitions in this section are considered experimental.
+ * They should never be used in association with a dynamic library, as they may change in the future.
+ * They are provided for advanced usages.
+ * Use them only in association with static linking.
+ * ==================================================================================== */
+
+/*--- Dependency ---*/
+#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters, ZSTD_customMem */
+#include "zstd.h"
+
+
+/*--- Custom memory allocator ---*/
+/*! ZBUFF_createCCtx_advanced() :
+ * Create a ZBUFF compression context using external alloc and free functions */
+ZBUFF_DEPRECATED("use ZSTD_createCStream_advanced") ZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem);
+
+/*! ZBUFF_createDCtx_advanced() :
+ * Create a ZBUFF decompression context using external alloc and free functions */
+ZBUFF_DEPRECATED("use ZSTD_createDStream_advanced") ZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem);
+
+
+/*--- Advanced Streaming Initialization ---*/
+ZBUFF_DEPRECATED("use ZSTD_initDStream_usingDict") size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc,
+ const void* dict, size_t dictSize,
+ ZSTD_parameters params, unsigned long long pledgedSrcSize);
+
+
+#endif /* ZBUFF_STATIC_H_30298098432 */
+#endif /* ZBUFF_STATIC_LINKING_ONLY */
+
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/vendor/github.com/DataDog/zstd/zbuff_common.c b/vendor/github.com/DataDog/zstd/zbuff_common.c
new file mode 100644
index 000000000..661b9b0e1
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zbuff_common.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include "error_private.h"
+#include "zbuff.h"
+
+/*-****************************************
+* ZBUFF Error Management (deprecated)
+******************************************/
+
+/*! ZBUFF_isError() :
+* tells if a return value is an error code */
+unsigned ZBUFF_isError(size_t errorCode) { return ERR_isError(errorCode); }
+/*! ZBUFF_getErrorName() :
+* provides error code string from function result (useful for debugging) */
+const char* ZBUFF_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
diff --git a/vendor/github.com/DataDog/zstd/zbuff_compress.c b/vendor/github.com/DataDog/zstd/zbuff_compress.c
new file mode 100644
index 000000000..f39c60d89
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zbuff_compress.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+
+/* *************************************
+* Dependencies
+***************************************/
+#define ZBUFF_STATIC_LINKING_ONLY
+#include "zbuff.h"
+
+
+/*-***********************************************************
+* Streaming compression
+*
+* A ZBUFF_CCtx object is required to track streaming operation.
+* Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources.
+* Use ZBUFF_compressInit() to start a new compression operation.
+* ZBUFF_CCtx objects can be reused multiple times.
+*
+* Use ZBUFF_compressContinue() repetitively to consume your input.
+* *srcSizePtr and *dstCapacityPtr can be any size.
+* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
+* Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input.
+* The content of dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters or change dst .
+* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)
+* or an error code, which can be tested using ZBUFF_isError().
+*
+* ZBUFF_compressFlush() can be used to instruct ZBUFF to compress and output whatever remains within its buffer.
+* Note that it will not output more than *dstCapacityPtr.
+* Therefore, some content might still be left into its internal buffer if dst buffer is too small.
+* @return : nb of bytes still present into internal buffer (0 if it's empty)
+* or an error code, which can be tested using ZBUFF_isError().
+*
+* ZBUFF_compressEnd() instructs to finish a frame.
+* It will perform a flush and write frame epilogue.
+* Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small.
+* @return : nb of bytes still present into internal buffer (0 if it's empty)
+* or an error code, which can be tested using ZBUFF_isError().
+*
+* Hint : recommended buffer sizes (not compulsory)
+* input : ZSTD_BLOCKSIZE_MAX (128 KB), internal unit size, it improves latency to use this value.
+* output : ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + ZBUFF_endFrameSize : ensures it's always possible to write/flush/end a full block at best speed.
+* ***********************************************************/
+
+ZBUFF_CCtx* ZBUFF_createCCtx(void)
+{
+ return ZSTD_createCStream();
+}
+
+ZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem)
+{
+ return ZSTD_createCStream_advanced(customMem);
+}
+
+size_t ZBUFF_freeCCtx(ZBUFF_CCtx* zbc)
+{
+ return ZSTD_freeCStream(zbc);
+}
+
+
+/* ====== Initialization ====== */
+
+size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc,
+ const void* dict, size_t dictSize,
+ ZSTD_parameters params, unsigned long long pledgedSrcSize)
+{
+ if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* preserve "0 == unknown" behavior */
+ return ZSTD_initCStream_advanced(zbc, dict, dictSize, params, pledgedSrcSize);
+}
+
+
+size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* zbc, const void* dict, size_t dictSize, int compressionLevel)
+{
+ return ZSTD_initCStream_usingDict(zbc, dict, dictSize, compressionLevel);
+}
+
+size_t ZBUFF_compressInit(ZBUFF_CCtx* zbc, int compressionLevel)
+{
+ return ZSTD_initCStream(zbc, compressionLevel);
+}
+
+/* ====== Compression ====== */
+
+
+size_t ZBUFF_compressContinue(ZBUFF_CCtx* zbc,
+ void* dst, size_t* dstCapacityPtr,
+ const void* src, size_t* srcSizePtr)
+{
+ size_t result;
+ ZSTD_outBuffer outBuff;
+ ZSTD_inBuffer inBuff;
+ outBuff.dst = dst;
+ outBuff.pos = 0;
+ outBuff.size = *dstCapacityPtr;
+ inBuff.src = src;
+ inBuff.pos = 0;
+ inBuff.size = *srcSizePtr;
+ result = ZSTD_compressStream(zbc, &outBuff, &inBuff);
+ *dstCapacityPtr = outBuff.pos;
+ *srcSizePtr = inBuff.pos;
+ return result;
+}
+
+
+
+/* ====== Finalize ====== */
+
+size_t ZBUFF_compressFlush(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr)
+{
+ size_t result;
+ ZSTD_outBuffer outBuff;
+ outBuff.dst = dst;
+ outBuff.pos = 0;
+ outBuff.size = *dstCapacityPtr;
+ result = ZSTD_flushStream(zbc, &outBuff);
+ *dstCapacityPtr = outBuff.pos;
+ return result;
+}
+
+
+size_t ZBUFF_compressEnd(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr)
+{
+ size_t result;
+ ZSTD_outBuffer outBuff;
+ outBuff.dst = dst;
+ outBuff.pos = 0;
+ outBuff.size = *dstCapacityPtr;
+ result = ZSTD_endStream(zbc, &outBuff);
+ *dstCapacityPtr = outBuff.pos;
+ return result;
+}
+
+
+
+/* *************************************
+* Tool functions
+***************************************/
+size_t ZBUFF_recommendedCInSize(void) { return ZSTD_CStreamInSize(); }
+size_t ZBUFF_recommendedCOutSize(void) { return ZSTD_CStreamOutSize(); }
diff --git a/vendor/github.com/DataDog/zstd/zbuff_decompress.c b/vendor/github.com/DataDog/zstd/zbuff_decompress.c
new file mode 100644
index 000000000..923c22b73
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zbuff_decompress.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+
+/* *************************************
+* Dependencies
+***************************************/
+#define ZBUFF_STATIC_LINKING_ONLY
+#include "zbuff.h"
+
+
+ZBUFF_DCtx* ZBUFF_createDCtx(void)
+{
+ return ZSTD_createDStream();
+}
+
+ZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem)
+{
+ return ZSTD_createDStream_advanced(customMem);
+}
+
+size_t ZBUFF_freeDCtx(ZBUFF_DCtx* zbd)
+{
+ return ZSTD_freeDStream(zbd);
+}
+
+
+/* *** Initialization *** */
+
+size_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* zbd, const void* dict, size_t dictSize)
+{
+ return ZSTD_initDStream_usingDict(zbd, dict, dictSize);
+}
+
+size_t ZBUFF_decompressInit(ZBUFF_DCtx* zbd)
+{
+ return ZSTD_initDStream(zbd);
+}
+
+
+/* *** Decompression *** */
+
+size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbd,
+ void* dst, size_t* dstCapacityPtr,
+ const void* src, size_t* srcSizePtr)
+{
+ ZSTD_outBuffer outBuff;
+ ZSTD_inBuffer inBuff;
+ size_t result;
+ outBuff.dst = dst;
+ outBuff.pos = 0;
+ outBuff.size = *dstCapacityPtr;
+ inBuff.src = src;
+ inBuff.pos = 0;
+ inBuff.size = *srcSizePtr;
+ result = ZSTD_decompressStream(zbd, &outBuff, &inBuff);
+ *dstCapacityPtr = outBuff.pos;
+ *srcSizePtr = inBuff.pos;
+ return result;
+}
+
+
+/* *************************************
+* Tool functions
+***************************************/
+size_t ZBUFF_recommendedDInSize(void) { return ZSTD_DStreamInSize(); }
+size_t ZBUFF_recommendedDOutSize(void) { return ZSTD_DStreamOutSize(); }
diff --git a/vendor/github.com/DataDog/zstd/zdict.c b/vendor/github.com/DataDog/zstd/zdict.c
new file mode 100644
index 000000000..c753da0db
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zdict.c
@@ -0,0 +1,1111 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/*-**************************************
+* Tuning parameters
+****************************************/
+#define MINRATIO 4 /* minimum nb of apparition to be selected in dictionary */
+#define ZDICT_MAX_SAMPLES_SIZE (2000U << 20)
+#define ZDICT_MIN_SAMPLES_SIZE (ZDICT_CONTENTSIZE_MIN * MINRATIO)
+
+
+/*-**************************************
+* Compiler Options
+****************************************/
+/* Unix Large Files support (>4GB) */
+#define _FILE_OFFSET_BITS 64
+#if (defined(__sun__) && (!defined(__LP64__))) /* Sun Solaris 32-bits requires specific definitions */
+# define _LARGEFILE_SOURCE
+#elif ! defined(__LP64__) /* No point defining Large file for 64 bit */
+# define _LARGEFILE64_SOURCE
+#endif
+
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include <stdlib.h> /* malloc, free */
+#include <string.h> /* memset */
+#include <stdio.h> /* fprintf, fopen, ftello64 */
+#include <time.h> /* clock */
+
+#include "mem.h" /* read */
+#include "fse.h" /* FSE_normalizeCount, FSE_writeNCount */
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h" /* HUF_buildCTable, HUF_writeCTable */
+#include "zstd_internal.h" /* includes zstd.h */
+#include "xxhash.h" /* XXH64 */
+#include "divsufsort.h"
+#ifndef ZDICT_STATIC_LINKING_ONLY
+# define ZDICT_STATIC_LINKING_ONLY
+#endif
+#include "zdict.h"
+
+
+/*-*************************************
+* Constants
+***************************************/
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define DICTLISTSIZE_DEFAULT 10000
+
+#define NOISELENGTH 32
+
+static const int g_compressionLevel_default = 3;
+static const U32 g_selectivity_default = 9;
+
+
+/*-*************************************
+* Console display
+***************************************/
+#define DISPLAY(...) { fprintf(stderr, __VA_ARGS__); fflush( stderr ); }
+#define DISPLAYLEVEL(l, ...) if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */
+
+static clock_t ZDICT_clockSpan(clock_t nPrevious) { return clock() - nPrevious; }
+
+static void ZDICT_printHex(const void* ptr, size_t length)
+{
+ const BYTE* const b = (const BYTE*)ptr;
+ size_t u;
+ for (u=0; u<length; u++) {
+ BYTE c = b[u];
+ if (c<32 || c>126) c = '.'; /* non-printable char */
+ DISPLAY("%c", c);
+ }
+}
+
+
+/*-********************************************************
+* Helper functions
+**********************************************************/
+unsigned ZDICT_isError(size_t errorCode) { return ERR_isError(errorCode); }
+
+const char* ZDICT_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
+
+unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize)
+{
+ if (dictSize < 8) return 0;
+ if (MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return 0;
+ return MEM_readLE32((const char*)dictBuffer + 4);
+}
+
+
+/*-********************************************************
+* Dictionary training functions
+**********************************************************/
+static unsigned ZDICT_NbCommonBytes (size_t val)
+{
+ if (MEM_isLittleEndian()) {
+ if (MEM_64bits()) {
+# if defined(_MSC_VER) && defined(_WIN64)
+ unsigned long r = 0;
+ _BitScanForward64( &r, (U64)val );
+ return (unsigned)(r>>3);
+# elif defined(__GNUC__) && (__GNUC__ >= 3)
+ return (__builtin_ctzll((U64)val) >> 3);
+# else
+ static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
+ return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
+# endif
+ } else { /* 32 bits */
+# if defined(_MSC_VER)
+ unsigned long r=0;
+ _BitScanForward( &r, (U32)val );
+ return (unsigned)(r>>3);
+# elif defined(__GNUC__) && (__GNUC__ >= 3)
+ return (__builtin_ctz((U32)val) >> 3);
+# else
+ static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
+ return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
+# endif
+ }
+ } else { /* Big Endian CPU */
+ if (MEM_64bits()) {
+# if defined(_MSC_VER) && defined(_WIN64)
+ unsigned long r = 0;
+ _BitScanReverse64( &r, val );
+ return (unsigned)(r>>3);
+# elif defined(__GNUC__) && (__GNUC__ >= 3)
+ return (__builtin_clzll(val) >> 3);
+# else
+ unsigned r;
+ const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
+ if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
+ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+ r += (!val);
+ return r;
+# endif
+ } else { /* 32 bits */
+# if defined(_MSC_VER)
+ unsigned long r = 0;
+ _BitScanReverse( &r, (unsigned long)val );
+ return (unsigned)(r>>3);
+# elif defined(__GNUC__) && (__GNUC__ >= 3)
+ return (__builtin_clz((U32)val) >> 3);
+# else
+ unsigned r;
+ if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
+ r += (!val);
+ return r;
+# endif
+ } }
+}
+
+
+/*! ZDICT_count() :
+ Count the nb of common bytes between 2 pointers.
+ Note : this function presumes end of buffer followed by noisy guard band.
+*/
+static size_t ZDICT_count(const void* pIn, const void* pMatch)
+{
+ const char* const pStart = (const char*)pIn;
+ for (;;) {
+ size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
+ if (!diff) {
+ pIn = (const char*)pIn+sizeof(size_t);
+ pMatch = (const char*)pMatch+sizeof(size_t);
+ continue;
+ }
+ pIn = (const char*)pIn+ZDICT_NbCommonBytes(diff);
+ return (size_t)((const char*)pIn - pStart);
+ }
+}
+
+
+typedef struct {
+ U32 pos;
+ U32 length;
+ U32 savings;
+} dictItem;
+
+static void ZDICT_initDictItem(dictItem* d)
+{
+ d->pos = 1;
+ d->length = 0;
+ d->savings = (U32)(-1);
+}
+
+
+#define LLIMIT 64 /* heuristic determined experimentally */
+#define MINMATCHLENGTH 7 /* heuristic determined experimentally */
+static dictItem ZDICT_analyzePos(
+ BYTE* doneMarks,
+ const int* suffix, U32 start,
+ const void* buffer, U32 minRatio, U32 notificationLevel)
+{
+ U32 lengthList[LLIMIT] = {0};
+ U32 cumulLength[LLIMIT] = {0};
+ U32 savings[LLIMIT] = {0};
+ const BYTE* b = (const BYTE*)buffer;
+ size_t maxLength = LLIMIT;
+ size_t pos = suffix[start];
+ U32 end = start;
+ dictItem solution;
+
+ /* init */
+ memset(&solution, 0, sizeof(solution));
+ doneMarks[pos] = 1;
+
+ /* trivial repetition cases */
+ if ( (MEM_read16(b+pos+0) == MEM_read16(b+pos+2))
+ ||(MEM_read16(b+pos+1) == MEM_read16(b+pos+3))
+ ||(MEM_read16(b+pos+2) == MEM_read16(b+pos+4)) ) {
+ /* skip and mark segment */
+ U16 const pattern16 = MEM_read16(b+pos+4);
+ U32 u, patternEnd = 6;
+ while (MEM_read16(b+pos+patternEnd) == pattern16) patternEnd+=2 ;
+ if (b[pos+patternEnd] == b[pos+patternEnd-1]) patternEnd++;
+ for (u=1; u<patternEnd; u++)
+ doneMarks[pos+u] = 1;
+ return solution;
+ }
+
+ /* look forward */
+ { size_t length;
+ do {
+ end++;
+ length = ZDICT_count(b + pos, b + suffix[end]);
+ } while (length >= MINMATCHLENGTH);
+ }
+
+ /* look backward */
+ { size_t length;
+ do {
+ length = ZDICT_count(b + pos, b + *(suffix+start-1));
+ if (length >=MINMATCHLENGTH) start--;
+ } while(length >= MINMATCHLENGTH);
+ }
+
+ /* exit if not found a minimum nb of repetitions */
+ if (end-start < minRatio) {
+ U32 idx;
+ for(idx=start; idx<end; idx++)
+ doneMarks[suffix[idx]] = 1;
+ return solution;
+ }
+
+ { int i;
+ U32 mml;
+ U32 refinedStart = start;
+ U32 refinedEnd = end;
+
+ DISPLAYLEVEL(4, "\n");
+ DISPLAYLEVEL(4, "found %3u matches of length >= %i at pos %7u ", (unsigned)(end-start), MINMATCHLENGTH, (unsigned)pos);
+ DISPLAYLEVEL(4, "\n");
+
+ for (mml = MINMATCHLENGTH ; ; mml++) {
+ BYTE currentChar = 0;
+ U32 currentCount = 0;
+ U32 currentID = refinedStart;
+ U32 id;
+ U32 selectedCount = 0;
+ U32 selectedID = currentID;
+ for (id =refinedStart; id < refinedEnd; id++) {
+ if (b[suffix[id] + mml] != currentChar) {
+ if (currentCount > selectedCount) {
+ selectedCount = currentCount;
+ selectedID = currentID;
+ }
+ currentID = id;
+ currentChar = b[ suffix[id] + mml];
+ currentCount = 0;
+ }
+ currentCount ++;
+ }
+ if (currentCount > selectedCount) { /* for last */
+ selectedCount = currentCount;
+ selectedID = currentID;
+ }
+
+ if (selectedCount < minRatio)
+ break;
+ refinedStart = selectedID;
+ refinedEnd = refinedStart + selectedCount;
+ }
+
+ /* evaluate gain based on new dict */
+ start = refinedStart;
+ pos = suffix[refinedStart];
+ end = start;
+ memset(lengthList, 0, sizeof(lengthList));
+
+ /* look forward */
+ { size_t length;
+ do {
+ end++;
+ length = ZDICT_count(b + pos, b + suffix[end]);
+ if (length >= LLIMIT) length = LLIMIT-1;
+ lengthList[length]++;
+ } while (length >=MINMATCHLENGTH);
+ }
+
+ /* look backward */
+ { size_t length = MINMATCHLENGTH;
+ while ((length >= MINMATCHLENGTH) & (start > 0)) {
+ length = ZDICT_count(b + pos, b + suffix[start - 1]);
+ if (length >= LLIMIT) length = LLIMIT - 1;
+ lengthList[length]++;
+ if (length >= MINMATCHLENGTH) start--;
+ }
+ }
+
+ /* largest useful length */
+ memset(cumulLength, 0, sizeof(cumulLength));
+ cumulLength[maxLength-1] = lengthList[maxLength-1];
+ for (i=(int)(maxLength-2); i>=0; i--)
+ cumulLength[i] = cumulLength[i+1] + lengthList[i];
+
+ for (i=LLIMIT-1; i>=MINMATCHLENGTH; i--) if (cumulLength[i]>=minRatio) break;
+ maxLength = i;
+
+ /* reduce maxLength in case of final into repetitive data */
+ { U32 l = (U32)maxLength;
+ BYTE const c = b[pos + maxLength-1];
+ while (b[pos+l-2]==c) l--;
+ maxLength = l;
+ }
+ if (maxLength < MINMATCHLENGTH) return solution; /* skip : no long-enough solution */
+
+ /* calculate savings */
+ savings[5] = 0;
+ for (i=MINMATCHLENGTH; i<=(int)maxLength; i++)
+ savings[i] = savings[i-1] + (lengthList[i] * (i-3));
+
+ DISPLAYLEVEL(4, "Selected dict at position %u, of length %u : saves %u (ratio: %.2f) \n",
+ (unsigned)pos, (unsigned)maxLength, (unsigned)savings[maxLength], (double)savings[maxLength] / maxLength);
+
+ solution.pos = (U32)pos;
+ solution.length = (U32)maxLength;
+ solution.savings = savings[maxLength];
+
+ /* mark positions done */
+ { U32 id;
+ for (id=start; id<end; id++) {
+ U32 p, pEnd, length;
+ U32 const testedPos = suffix[id];
+ if (testedPos == pos)
+ length = solution.length;
+ else {
+ length = (U32)ZDICT_count(b+pos, b+testedPos);
+ if (length > solution.length) length = solution.length;
+ }
+ pEnd = (U32)(testedPos + length);
+ for (p=testedPos; p<pEnd; p++)
+ doneMarks[p] = 1;
+ } } }
+
+ return solution;
+}
+
+
+static int isIncluded(const void* in, const void* container, size_t length)
+{
+ const char* const ip = (const char*) in;
+ const char* const into = (const char*) container;
+ size_t u;
+
+ for (u=0; u<length; u++) { /* works because end of buffer is a noisy guard band */
+ if (ip[u] != into[u]) break;
+ }
+
+ return u==length;
+}
+
+/*! ZDICT_tryMerge() :
+ check if dictItem can be merged, do it if possible
+ @return : id of destination elt, 0 if not merged
+*/
+static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const void* buffer)
+{
+ const U32 tableSize = table->pos;
+ const U32 eltEnd = elt.pos + elt.length;
+ const char* const buf = (const char*) buffer;
+
+ /* tail overlap */
+ U32 u; for (u=1; u<tableSize; u++) {
+ if (u==eltNbToSkip) continue;
+ if ((table[u].pos > elt.pos) && (table[u].pos <= eltEnd)) { /* overlap, existing > new */
+ /* append */
+ U32 const addedLength = table[u].pos - elt.pos;
+ table[u].length += addedLength;
+ table[u].pos = elt.pos;
+ table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */
+ table[u].savings += elt.length / 8; /* rough approx bonus */
+ elt = table[u];
+ /* sort : improve rank */
+ while ((u>1) && (table[u-1].savings < elt.savings))
+ table[u] = table[u-1], u--;
+ table[u] = elt;
+ return u;
+ } }
+
+ /* front overlap */
+ for (u=1; u<tableSize; u++) {
+ if (u==eltNbToSkip) continue;
+
+ if ((table[u].pos + table[u].length >= elt.pos) && (table[u].pos < elt.pos)) { /* overlap, existing < new */
+ /* append */
+ int const addedLength = (int)eltEnd - (table[u].pos + table[u].length);
+ table[u].savings += elt.length / 8; /* rough approx bonus */
+ if (addedLength > 0) { /* otherwise, elt fully included into existing */
+ table[u].length += addedLength;
+ table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */
+ }
+ /* sort : improve rank */
+ elt = table[u];
+ while ((u>1) && (table[u-1].savings < elt.savings))
+ table[u] = table[u-1], u--;
+ table[u] = elt;
+ return u;
+ }
+
+ if (MEM_read64(buf + table[u].pos) == MEM_read64(buf + elt.pos + 1)) {
+ if (isIncluded(buf + table[u].pos, buf + elt.pos + 1, table[u].length)) {
+ size_t const addedLength = MAX( (int)elt.length - (int)table[u].length , 1 );
+ table[u].pos = elt.pos;
+ table[u].savings += (U32)(elt.savings * addedLength / elt.length);
+ table[u].length = MIN(elt.length, table[u].length + 1);
+ return u;
+ }
+ }
+ }
+
+ return 0;
+}
+
+
+static void ZDICT_removeDictItem(dictItem* table, U32 id)
+{
+ /* convention : table[0].pos stores nb of elts */
+ U32 const max = table[0].pos;
+ U32 u;
+ if (!id) return; /* protection, should never happen */
+ for (u=id; u<max-1; u++)
+ table[u] = table[u+1];
+ table->pos--;
+}
+
+
+static void ZDICT_insertDictItem(dictItem* table, U32 maxSize, dictItem elt, const void* buffer)
+{
+ /* merge if possible */
+ U32 mergeId = ZDICT_tryMerge(table, elt, 0, buffer);
+ if (mergeId) {
+ U32 newMerge = 1;
+ while (newMerge) {
+ newMerge = ZDICT_tryMerge(table, table[mergeId], mergeId, buffer);
+ if (newMerge) ZDICT_removeDictItem(table, mergeId);
+ mergeId = newMerge;
+ }
+ return;
+ }
+
+ /* insert */
+ { U32 current;
+ U32 nextElt = table->pos;
+ if (nextElt >= maxSize) nextElt = maxSize-1;
+ current = nextElt-1;
+ while (table[current].savings < elt.savings) {
+ table[current+1] = table[current];
+ current--;
+ }
+ table[current+1] = elt;
+ table->pos = nextElt+1;
+ }
+}
+
+
+static U32 ZDICT_dictSize(const dictItem* dictList)
+{
+ U32 u, dictSize = 0;
+ for (u=1; u<dictList[0].pos; u++)
+ dictSize += dictList[u].length;
+ return dictSize;
+}
+
+
+static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize,
+ const void* const buffer, size_t bufferSize, /* buffer must end with noisy guard band */
+ const size_t* fileSizes, unsigned nbFiles,
+ unsigned minRatio, U32 notificationLevel)
+{
+ int* const suffix0 = (int*)malloc((bufferSize+2)*sizeof(*suffix0));
+ int* const suffix = suffix0+1;
+ U32* reverseSuffix = (U32*)malloc((bufferSize)*sizeof(*reverseSuffix));
+ BYTE* doneMarks = (BYTE*)malloc((bufferSize+16)*sizeof(*doneMarks)); /* +16 for overflow security */
+ U32* filePos = (U32*)malloc(nbFiles * sizeof(*filePos));
+ size_t result = 0;
+ clock_t displayClock = 0;
+ clock_t const refreshRate = CLOCKS_PER_SEC * 3 / 10;
+
+# define DISPLAYUPDATE(l, ...) if (notificationLevel>=l) { \
+ if (ZDICT_clockSpan(displayClock) > refreshRate) \
+ { displayClock = clock(); DISPLAY(__VA_ARGS__); \
+ if (notificationLevel>=4) fflush(stderr); } }
+
+ /* init */
+ DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */
+ if (!suffix0 || !reverseSuffix || !doneMarks || !filePos) {
+ result = ERROR(memory_allocation);
+ goto _cleanup;
+ }
+ if (minRatio < MINRATIO) minRatio = MINRATIO;
+ memset(doneMarks, 0, bufferSize+16);
+
+ /* limit sample set size (divsufsort limitation)*/
+ if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, "sample set too large : reduced to %u MB ...\n", (unsigned)(ZDICT_MAX_SAMPLES_SIZE>>20));
+ while (bufferSize > ZDICT_MAX_SAMPLES_SIZE) bufferSize -= fileSizes[--nbFiles];
+
+ /* sort */
+ DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (unsigned)(bufferSize>>20));
+ { int const divSuftSortResult = divsufsort((const unsigned char*)buffer, suffix, (int)bufferSize, 0);
+ if (divSuftSortResult != 0) { result = ERROR(GENERIC); goto _cleanup; }
+ }
+ suffix[bufferSize] = (int)bufferSize; /* leads into noise */
+ suffix0[0] = (int)bufferSize; /* leads into noise */
+ /* build reverse suffix sort */
+ { size_t pos;
+ for (pos=0; pos < bufferSize; pos++)
+ reverseSuffix[suffix[pos]] = (U32)pos;
+ /* note filePos tracks borders between samples.
+ It's not used at this stage, but planned to become useful in a later update */
+ filePos[0] = 0;
+ for (pos=1; pos<nbFiles; pos++)
+ filePos[pos] = (U32)(filePos[pos-1] + fileSizes[pos-1]);
+ }
+
+ DISPLAYLEVEL(2, "finding patterns ... \n");
+ DISPLAYLEVEL(3, "minimum ratio : %u \n", minRatio);
+
+ { U32 cursor; for (cursor=0; cursor < bufferSize; ) {
+ dictItem solution;
+ if (doneMarks[cursor]) { cursor++; continue; }
+ solution = ZDICT_analyzePos(doneMarks, suffix, reverseSuffix[cursor], buffer, minRatio, notificationLevel);
+ if (solution.length==0) { cursor++; continue; }
+ ZDICT_insertDictItem(dictList, dictListSize, solution, buffer);
+ cursor += solution.length;
+ DISPLAYUPDATE(2, "\r%4.2f %% \r", (double)cursor / bufferSize * 100);
+ } }
+
+_cleanup:
+ free(suffix0);
+ free(reverseSuffix);
+ free(doneMarks);
+ free(filePos);
+ return result;
+}
+
+
+static void ZDICT_fillNoise(void* buffer, size_t length)
+{
+ unsigned const prime1 = 2654435761U;
+ unsigned const prime2 = 2246822519U;
+ unsigned acc = prime1;
+ size_t p=0;;
+ for (p=0; p<length; p++) {
+ acc *= prime2;
+ ((unsigned char*)buffer)[p] = (unsigned char)(acc >> 21);
+ }
+}
+
+
+typedef struct
+{
+ ZSTD_CDict* dict; /* dictionary */
+ ZSTD_CCtx* zc; /* working context */
+ void* workPlace; /* must be ZSTD_BLOCKSIZE_MAX allocated */
+} EStats_ress_t;
+
+#define MAXREPOFFSET 1024
+
+static void ZDICT_countEStats(EStats_ress_t esr, ZSTD_parameters params,
+ unsigned* countLit, unsigned* offsetcodeCount, unsigned* matchlengthCount, unsigned* litlengthCount, U32* repOffsets,
+ const void* src, size_t srcSize,
+ U32 notificationLevel)
+{
+ size_t const blockSizeMax = MIN (ZSTD_BLOCKSIZE_MAX, 1 << params.cParams.windowLog);
+ size_t cSize;
+
+ if (srcSize > blockSizeMax) srcSize = blockSizeMax; /* protection vs large samples */
+ { size_t const errorCode = ZSTD_compressBegin_usingCDict(esr.zc, esr.dict);
+ if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_compressBegin_usingCDict failed \n"); return; }
+
+ }
+ cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize);
+ if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (unsigned)srcSize); return; }
+
+ if (cSize) { /* if == 0; block is not compressible */
+ const seqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc);
+
+ /* literals stats */
+ { const BYTE* bytePtr;
+ for(bytePtr = seqStorePtr->litStart; bytePtr < seqStorePtr->lit; bytePtr++)
+ countLit[*bytePtr]++;
+ }
+
+ /* seqStats */
+ { U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ ZSTD_seqToCodes(seqStorePtr);
+
+ { const BYTE* codePtr = seqStorePtr->ofCode;
+ U32 u;
+ for (u=0; u<nbSeq; u++) offsetcodeCount[codePtr[u]]++;
+ }
+
+ { const BYTE* codePtr = seqStorePtr->mlCode;
+ U32 u;
+ for (u=0; u<nbSeq; u++) matchlengthCount[codePtr[u]]++;
+ }
+
+ { const BYTE* codePtr = seqStorePtr->llCode;
+ U32 u;
+ for (u=0; u<nbSeq; u++) litlengthCount[codePtr[u]]++;
+ }
+
+ if (nbSeq >= 2) { /* rep offsets */
+ const seqDef* const seq = seqStorePtr->sequencesStart;
+ U32 offset1 = seq[0].offset - 3;
+ U32 offset2 = seq[1].offset - 3;
+ if (offset1 >= MAXREPOFFSET) offset1 = 0;
+ if (offset2 >= MAXREPOFFSET) offset2 = 0;
+ repOffsets[offset1] += 3;
+ repOffsets[offset2] += 1;
+ } } }
+}
+
+static size_t ZDICT_totalSampleSize(const size_t* fileSizes, unsigned nbFiles)
+{
+ size_t total=0;
+ unsigned u;
+ for (u=0; u<nbFiles; u++) total += fileSizes[u];
+ return total;
+}
+
+typedef struct { U32 offset; U32 count; } offsetCount_t;
+
+static void ZDICT_insertSortCount(offsetCount_t table[ZSTD_REP_NUM+1], U32 val, U32 count)
+{
+ U32 u;
+ table[ZSTD_REP_NUM].offset = val;
+ table[ZSTD_REP_NUM].count = count;
+ for (u=ZSTD_REP_NUM; u>0; u--) {
+ offsetCount_t tmp;
+ if (table[u-1].count >= table[u].count) break;
+ tmp = table[u-1];
+ table[u-1] = table[u];
+ table[u] = tmp;
+ }
+}
+
+/* ZDICT_flatLit() :
+ * rewrite `countLit` to contain a mostly flat but still compressible distribution of literals.
+ * necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode.
+ */
+static void ZDICT_flatLit(unsigned* countLit)
+{
+ int u;
+ for (u=1; u<256; u++) countLit[u] = 2;
+ countLit[0] = 4;
+ countLit[253] = 1;
+ countLit[254] = 1;
+}
+
+#define OFFCODE_MAX 30 /* only applicable to first block */
+static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
+ unsigned compressionLevel,
+ const void* srcBuffer, const size_t* fileSizes, unsigned nbFiles,
+ const void* dictBuffer, size_t dictBufferSize,
+ unsigned notificationLevel)
+{
+ unsigned countLit[256];
+ HUF_CREATE_STATIC_CTABLE(hufTable, 255);
+ unsigned offcodeCount[OFFCODE_MAX+1];
+ short offcodeNCount[OFFCODE_MAX+1];
+ U32 offcodeMax = ZSTD_highbit32((U32)(dictBufferSize + 128 KB));
+ unsigned matchLengthCount[MaxML+1];
+ short matchLengthNCount[MaxML+1];
+ unsigned litLengthCount[MaxLL+1];
+ short litLengthNCount[MaxLL+1];
+ U32 repOffset[MAXREPOFFSET];
+ offsetCount_t bestRepOffset[ZSTD_REP_NUM+1];
+ EStats_ress_t esr = { NULL, NULL, NULL };
+ ZSTD_parameters params;
+ U32 u, huffLog = 11, Offlog = OffFSELog, mlLog = MLFSELog, llLog = LLFSELog, total;
+ size_t pos = 0, errorCode;
+ size_t eSize = 0;
+ size_t const totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles);
+ size_t const averageSampleSize = totalSrcSize / (nbFiles + !nbFiles);
+ BYTE* dstPtr = (BYTE*)dstBuffer;
+
+ /* init */
+ DEBUGLOG(4, "ZDICT_analyzeEntropy");
+ if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionaryCreation_failed); goto _cleanup; } /* too large dictionary */
+ for (u=0; u<256; u++) countLit[u] = 1; /* any character must be described */
+ for (u=0; u<=offcodeMax; u++) offcodeCount[u] = 1;
+ for (u=0; u<=MaxML; u++) matchLengthCount[u] = 1;
+ for (u=0; u<=MaxLL; u++) litLengthCount[u] = 1;
+ memset(repOffset, 0, sizeof(repOffset));
+ repOffset[1] = repOffset[4] = repOffset[8] = 1;
+ memset(bestRepOffset, 0, sizeof(bestRepOffset));
+ if (compressionLevel==0) compressionLevel = g_compressionLevel_default;
+ params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize);
+
+ esr.dict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dlm_byRef, ZSTD_dct_rawContent, params.cParams, ZSTD_defaultCMem);
+ esr.zc = ZSTD_createCCtx();
+ esr.workPlace = malloc(ZSTD_BLOCKSIZE_MAX);
+ if (!esr.dict || !esr.zc || !esr.workPlace) {
+ eSize = ERROR(memory_allocation);
+ DISPLAYLEVEL(1, "Not enough memory \n");
+ goto _cleanup;
+ }
+
+ /* collect stats on all samples */
+ for (u=0; u<nbFiles; u++) {
+ ZDICT_countEStats(esr, params,
+ countLit, offcodeCount, matchLengthCount, litLengthCount, repOffset,
+ (const char*)srcBuffer + pos, fileSizes[u],
+ notificationLevel);
+ pos += fileSizes[u];
+ }
+
+ /* analyze, build stats, starting with literals */
+ { size_t maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog);
+ if (HUF_isError(maxNbBits)) {
+ eSize = ERROR(GENERIC);
+ DISPLAYLEVEL(1, " HUF_buildCTable error \n");
+ goto _cleanup;
+ }
+ if (maxNbBits==8) { /* not compressible : will fail on HUF_writeCTable() */
+ DISPLAYLEVEL(2, "warning : pathological dataset : literals are not compressible : samples are noisy or too regular \n");
+ ZDICT_flatLit(countLit); /* replace distribution by a fake "mostly flat but still compressible" distribution, that HUF_writeCTable() can encode */
+ maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog);
+ assert(maxNbBits==9);
+ }
+ huffLog = (U32)maxNbBits;
+ }
+
+ /* looking for most common first offsets */
+ { U32 offset;
+ for (offset=1; offset<MAXREPOFFSET; offset++)
+ ZDICT_insertSortCount(bestRepOffset, offset, repOffset[offset]);
+ }
+ /* note : the result of this phase should be used to better appreciate the impact on statistics */
+
+ total=0; for (u=0; u<=offcodeMax; u++) total+=offcodeCount[u];
+ errorCode = FSE_normalizeCount(offcodeNCount, Offlog, offcodeCount, total, offcodeMax);
+ if (FSE_isError(errorCode)) {
+ eSize = ERROR(GENERIC);
+ DISPLAYLEVEL(1, "FSE_normalizeCount error with offcodeCount \n");
+ goto _cleanup;
+ }
+ Offlog = (U32)errorCode;
+
+ total=0; for (u=0; u<=MaxML; u++) total+=matchLengthCount[u];
+ errorCode = FSE_normalizeCount(matchLengthNCount, mlLog, matchLengthCount, total, MaxML);
+ if (FSE_isError(errorCode)) {
+ eSize = ERROR(GENERIC);
+ DISPLAYLEVEL(1, "FSE_normalizeCount error with matchLengthCount \n");
+ goto _cleanup;
+ }
+ mlLog = (U32)errorCode;
+
+ total=0; for (u=0; u<=MaxLL; u++) total+=litLengthCount[u];
+ errorCode = FSE_normalizeCount(litLengthNCount, llLog, litLengthCount, total, MaxLL);
+ if (FSE_isError(errorCode)) {
+ eSize = ERROR(GENERIC);
+ DISPLAYLEVEL(1, "FSE_normalizeCount error with litLengthCount \n");
+ goto _cleanup;
+ }
+ llLog = (U32)errorCode;
+
+ /* write result to buffer */
+ { size_t const hhSize = HUF_writeCTable(dstPtr, maxDstSize, hufTable, 255, huffLog);
+ if (HUF_isError(hhSize)) {
+ eSize = ERROR(GENERIC);
+ DISPLAYLEVEL(1, "HUF_writeCTable error \n");
+ goto _cleanup;
+ }
+ dstPtr += hhSize;
+ maxDstSize -= hhSize;
+ eSize += hhSize;
+ }
+
+ { size_t const ohSize = FSE_writeNCount(dstPtr, maxDstSize, offcodeNCount, OFFCODE_MAX, Offlog);
+ if (FSE_isError(ohSize)) {
+ eSize = ERROR(GENERIC);
+ DISPLAYLEVEL(1, "FSE_writeNCount error with offcodeNCount \n");
+ goto _cleanup;
+ }
+ dstPtr += ohSize;
+ maxDstSize -= ohSize;
+ eSize += ohSize;
+ }
+
+ { size_t const mhSize = FSE_writeNCount(dstPtr, maxDstSize, matchLengthNCount, MaxML, mlLog);
+ if (FSE_isError(mhSize)) {
+ eSize = ERROR(GENERIC);
+ DISPLAYLEVEL(1, "FSE_writeNCount error with matchLengthNCount \n");
+ goto _cleanup;
+ }
+ dstPtr += mhSize;
+ maxDstSize -= mhSize;
+ eSize += mhSize;
+ }
+
+ { size_t const lhSize = FSE_writeNCount(dstPtr, maxDstSize, litLengthNCount, MaxLL, llLog);
+ if (FSE_isError(lhSize)) {
+ eSize = ERROR(GENERIC);
+ DISPLAYLEVEL(1, "FSE_writeNCount error with litlengthNCount \n");
+ goto _cleanup;
+ }
+ dstPtr += lhSize;
+ maxDstSize -= lhSize;
+ eSize += lhSize;
+ }
+
+ if (maxDstSize<12) {
+ eSize = ERROR(GENERIC);
+ DISPLAYLEVEL(1, "not enough space to write RepOffsets \n");
+ goto _cleanup;
+ }
+# if 0
+ MEM_writeLE32(dstPtr+0, bestRepOffset[0].offset);
+ MEM_writeLE32(dstPtr+4, bestRepOffset[1].offset);
+ MEM_writeLE32(dstPtr+8, bestRepOffset[2].offset);
+#else
+ /* at this stage, we don't use the result of "most common first offset",
+ as the impact of statistics is not properly evaluated */
+ MEM_writeLE32(dstPtr+0, repStartValue[0]);
+ MEM_writeLE32(dstPtr+4, repStartValue[1]);
+ MEM_writeLE32(dstPtr+8, repStartValue[2]);
+#endif
+ eSize += 12;
+
+_cleanup:
+ ZSTD_freeCDict(esr.dict);
+ ZSTD_freeCCtx(esr.zc);
+ free(esr.workPlace);
+
+ return eSize;
+}
+
+
+
+size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
+ const void* customDictContent, size_t dictContentSize,
+ const void* samplesBuffer, const size_t* samplesSizes,
+ unsigned nbSamples, ZDICT_params_t params)
+{
+ size_t hSize;
+#define HBUFFSIZE 256 /* should prove large enough for all entropy headers */
+ BYTE header[HBUFFSIZE];
+ int const compressionLevel = (params.compressionLevel == 0) ? g_compressionLevel_default : params.compressionLevel;
+ U32 const notificationLevel = params.notificationLevel;
+
+ /* check conditions */
+ DEBUGLOG(4, "ZDICT_finalizeDictionary");
+ if (dictBufferCapacity < dictContentSize) return ERROR(dstSize_tooSmall);
+ if (dictContentSize < ZDICT_CONTENTSIZE_MIN) return ERROR(srcSize_wrong);
+ if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) return ERROR(dstSize_tooSmall);
+
+ /* dictionary header */
+ MEM_writeLE32(header, ZSTD_MAGIC_DICTIONARY);
+ { U64 const randomID = XXH64(customDictContent, dictContentSize, 0);
+ U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;
+ U32 const dictID = params.dictID ? params.dictID : compliantID;
+ MEM_writeLE32(header+4, dictID);
+ }
+ hSize = 8;
+
+ /* entropy tables */
+ DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */
+ DISPLAYLEVEL(2, "statistics ... \n");
+ { size_t const eSize = ZDICT_analyzeEntropy(header+hSize, HBUFFSIZE-hSize,
+ compressionLevel,
+ samplesBuffer, samplesSizes, nbSamples,
+ customDictContent, dictContentSize,
+ notificationLevel);
+ if (ZDICT_isError(eSize)) return eSize;
+ hSize += eSize;
+ }
+
+ /* copy elements in final buffer ; note : src and dst buffer can overlap */
+ if (hSize + dictContentSize > dictBufferCapacity) dictContentSize = dictBufferCapacity - hSize;
+ { size_t const dictSize = hSize + dictContentSize;
+ char* dictEnd = (char*)dictBuffer + dictSize;
+ memmove(dictEnd - dictContentSize, customDictContent, dictContentSize);
+ memcpy(dictBuffer, header, hSize);
+ return dictSize;
+ }
+}
+
+
+static size_t ZDICT_addEntropyTablesFromBuffer_advanced(
+ void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
+ const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+ ZDICT_params_t params)
+{
+ int const compressionLevel = (params.compressionLevel == 0) ? g_compressionLevel_default : params.compressionLevel;
+ U32 const notificationLevel = params.notificationLevel;
+ size_t hSize = 8;
+
+ /* calculate entropy tables */
+ DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */
+ DISPLAYLEVEL(2, "statistics ... \n");
+ { size_t const eSize = ZDICT_analyzeEntropy((char*)dictBuffer+hSize, dictBufferCapacity-hSize,
+ compressionLevel,
+ samplesBuffer, samplesSizes, nbSamples,
+ (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize,
+ notificationLevel);
+ if (ZDICT_isError(eSize)) return eSize;
+ hSize += eSize;
+ }
+
+ /* add dictionary header (after entropy tables) */
+ MEM_writeLE32(dictBuffer, ZSTD_MAGIC_DICTIONARY);
+ { U64 const randomID = XXH64((char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, 0);
+ U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;
+ U32 const dictID = params.dictID ? params.dictID : compliantID;
+ MEM_writeLE32((char*)dictBuffer+4, dictID);
+ }
+
+ if (hSize + dictContentSize < dictBufferCapacity)
+ memmove((char*)dictBuffer + hSize, (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize);
+ return MIN(dictBufferCapacity, hSize+dictContentSize);
+}
+
+/* Hidden declaration for dbio.c */
+size_t ZDICT_trainFromBuffer_unsafe_legacy(
+ void* dictBuffer, size_t maxDictSize,
+ const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+ ZDICT_legacy_params_t params);
+/*! ZDICT_trainFromBuffer_unsafe_legacy() :
+* Warning : `samplesBuffer` must be followed by noisy guard band.
+* @return : size of dictionary, or an error code which can be tested with ZDICT_isError()
+*/
+size_t ZDICT_trainFromBuffer_unsafe_legacy(
+ void* dictBuffer, size_t maxDictSize,
+ const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+ ZDICT_legacy_params_t params)
+{
+ U32 const dictListSize = MAX(MAX(DICTLISTSIZE_DEFAULT, nbSamples), (U32)(maxDictSize/16));
+ dictItem* const dictList = (dictItem*)malloc(dictListSize * sizeof(*dictList));
+ unsigned const selectivity = params.selectivityLevel == 0 ? g_selectivity_default : params.selectivityLevel;
+ unsigned const minRep = (selectivity > 30) ? MINRATIO : nbSamples >> selectivity;
+ size_t const targetDictSize = maxDictSize;
+ size_t const samplesBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples);
+ size_t dictSize = 0;
+ U32 const notificationLevel = params.zParams.notificationLevel;
+
+ /* checks */
+ if (!dictList) return ERROR(memory_allocation);
+ if (maxDictSize < ZDICT_DICTSIZE_MIN) { free(dictList); return ERROR(dstSize_tooSmall); } /* requested dictionary size is too small */
+ if (samplesBuffSize < ZDICT_MIN_SAMPLES_SIZE) { free(dictList); return ERROR(dictionaryCreation_failed); } /* not enough source to create dictionary */
+
+ /* init */
+ ZDICT_initDictItem(dictList);
+
+ /* build dictionary */
+ ZDICT_trainBuffer_legacy(dictList, dictListSize,
+ samplesBuffer, samplesBuffSize,
+ samplesSizes, nbSamples,
+ minRep, notificationLevel);
+
+ /* display best matches */
+ if (params.zParams.notificationLevel>= 3) {
+ unsigned const nb = MIN(25, dictList[0].pos);
+ unsigned const dictContentSize = ZDICT_dictSize(dictList);
+ unsigned u;
+ DISPLAYLEVEL(3, "\n %u segments found, of total size %u \n", (unsigned)dictList[0].pos-1, dictContentSize);
+ DISPLAYLEVEL(3, "list %u best segments \n", nb-1);
+ for (u=1; u<nb; u++) {
+ unsigned const pos = dictList[u].pos;
+ unsigned const length = dictList[u].length;
+ U32 const printedLength = MIN(40, length);
+ if ((pos > samplesBuffSize) || ((pos + length) > samplesBuffSize)) {
+ free(dictList);
+ return ERROR(GENERIC); /* should never happen */
+ }
+ DISPLAYLEVEL(3, "%3u:%3u bytes at pos %8u, savings %7u bytes |",
+ u, length, pos, (unsigned)dictList[u].savings);
+ ZDICT_printHex((const char*)samplesBuffer+pos, printedLength);
+ DISPLAYLEVEL(3, "| \n");
+ } }
+
+
+ /* create dictionary */
+ { unsigned dictContentSize = ZDICT_dictSize(dictList);
+ if (dictContentSize < ZDICT_CONTENTSIZE_MIN) { free(dictList); return ERROR(dictionaryCreation_failed); } /* dictionary content too small */
+ if (dictContentSize < targetDictSize/4) {
+ DISPLAYLEVEL(2, "! warning : selected content significantly smaller than requested (%u < %u) \n", dictContentSize, (unsigned)maxDictSize);
+ if (samplesBuffSize < 10 * targetDictSize)
+ DISPLAYLEVEL(2, "! consider increasing the number of samples (total size : %u MB)\n", (unsigned)(samplesBuffSize>>20));
+ if (minRep > MINRATIO) {
+ DISPLAYLEVEL(2, "! consider increasing selectivity to produce larger dictionary (-s%u) \n", selectivity+1);
+ DISPLAYLEVEL(2, "! note : larger dictionaries are not necessarily better, test its efficiency on samples \n");
+ }
+ }
+
+ if ((dictContentSize > targetDictSize*3) && (nbSamples > 2*MINRATIO) && (selectivity>1)) {
+ unsigned proposedSelectivity = selectivity-1;
+ while ((nbSamples >> proposedSelectivity) <= MINRATIO) { proposedSelectivity--; }
+ DISPLAYLEVEL(2, "! note : calculated dictionary significantly larger than requested (%u > %u) \n", dictContentSize, (unsigned)maxDictSize);
+ DISPLAYLEVEL(2, "! consider increasing dictionary size, or produce denser dictionary (-s%u) \n", proposedSelectivity);
+ DISPLAYLEVEL(2, "! always test dictionary efficiency on real samples \n");
+ }
+
+ /* limit dictionary size */
+ { U32 const max = dictList->pos; /* convention : nb of useful elts within dictList */
+ U32 currentSize = 0;
+ U32 n; for (n=1; n<max; n++) {
+ currentSize += dictList[n].length;
+ if (currentSize > targetDictSize) { currentSize -= dictList[n].length; break; }
+ }
+ dictList->pos = n;
+ dictContentSize = currentSize;
+ }
+
+ /* build dict content */
+ { U32 u;
+ BYTE* ptr = (BYTE*)dictBuffer + maxDictSize;
+ for (u=1; u<dictList->pos; u++) {
+ U32 l = dictList[u].length;
+ ptr -= l;
+ if (ptr<(BYTE*)dictBuffer) { free(dictList); return ERROR(GENERIC); } /* should not happen */
+ memcpy(ptr, (const char*)samplesBuffer+dictList[u].pos, l);
+ } }
+
+ dictSize = ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, maxDictSize,
+ samplesBuffer, samplesSizes, nbSamples,
+ params.zParams);
+ }
+
+ /* clean up */
+ free(dictList);
+ return dictSize;
+}
+
+
+/* ZDICT_trainFromBuffer_legacy() :
+ * issue : samplesBuffer need to be followed by a noisy guard band.
+ * work around : duplicate the buffer, and add the noise */
+size_t ZDICT_trainFromBuffer_legacy(void* dictBuffer, size_t dictBufferCapacity,
+ const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+ ZDICT_legacy_params_t params)
+{
+ size_t result;
+ void* newBuff;
+ size_t const sBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples);
+ if (sBuffSize < ZDICT_MIN_SAMPLES_SIZE) return 0; /* not enough content => no dictionary */
+
+ newBuff = malloc(sBuffSize + NOISELENGTH);
+ if (!newBuff) return ERROR(memory_allocation);
+
+ memcpy(newBuff, samplesBuffer, sBuffSize);
+ ZDICT_fillNoise((char*)newBuff + sBuffSize, NOISELENGTH); /* guard band, for end of buffer condition */
+
+ result =
+ ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, dictBufferCapacity, newBuff,
+ samplesSizes, nbSamples, params);
+ free(newBuff);
+ return result;
+}
+
+
+size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
+ const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)
+{
+ ZDICT_fastCover_params_t params;
+ DEBUGLOG(3, "ZDICT_trainFromBuffer");
+ memset(&params, 0, sizeof(params));
+ params.d = 8;
+ params.steps = 4;
+ /* Default to level 6 since no compression level information is available */
+ params.zParams.compressionLevel = 3;
+#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=1)
+ params.zParams.notificationLevel = DEBUGLEVEL;
+#endif
+ return ZDICT_optimizeTrainFromBuffer_fastCover(dictBuffer, dictBufferCapacity,
+ samplesBuffer, samplesSizes, nbSamples,
+ &params);
+}
+
+size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
+ const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)
+{
+ ZDICT_params_t params;
+ memset(&params, 0, sizeof(params));
+ return ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, dictBufferCapacity,
+ samplesBuffer, samplesSizes, nbSamples,
+ params);
+}
diff --git a/vendor/github.com/DataDog/zstd/zdict.h b/vendor/github.com/DataDog/zstd/zdict.h
new file mode 100644
index 000000000..e22973173
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zdict.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef DICTBUILDER_H_001
+#define DICTBUILDER_H_001
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*====== Dependencies ======*/
+#include <stddef.h> /* size_t */
+
+
+/* ===== ZDICTLIB_API : control library symbols visibility ===== */
+#ifndef ZDICTLIB_VISIBILITY
+# if defined(__GNUC__) && (__GNUC__ >= 4)
+# define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default")))
+# else
+# define ZDICTLIB_VISIBILITY
+# endif
+#endif
+#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
+# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY
+#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
+# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+# define ZDICTLIB_API ZDICTLIB_VISIBILITY
+#endif
+
+
+/*! ZDICT_trainFromBuffer():
+ * Train a dictionary from an array of samples.
+ * Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4,
+ * f=20, and accel=1.
+ * Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
+ * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
+ * The resulting dictionary will be saved into `dictBuffer`.
+ * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+ * or an error code, which can be tested with ZDICT_isError().
+ * Note: Dictionary training will fail if there are not enough samples to construct a
+ * dictionary, or if most of the samples are too small (< 8 bytes being the lower limit).
+ * If dictionary training fails, you should use zstd without a dictionary, as the dictionary
+ * would've been ineffective anyways. If you believe your samples would benefit from a dictionary
+ * please open an issue with details, and we can look into it.
+ * Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB.
+ * Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
+ * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
+ * In general, it's recommended to provide a few thousands samples, though this can vary a lot.
+ * It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
+ */
+ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
+ const void* samplesBuffer,
+ const size_t* samplesSizes, unsigned nbSamples);
+
+
+/*====== Helper functions ======*/
+ZDICTLIB_API unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize); /**< extracts dictID; @return zero if error (not a valid dictionary) */
+ZDICTLIB_API unsigned ZDICT_isError(size_t errorCode);
+ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode);
+
+
+
+#ifdef ZDICT_STATIC_LINKING_ONLY
+
+/* ====================================================================================
+ * The definitions in this section are considered experimental.
+ * They should never be used with a dynamic library, as they may change in the future.
+ * They are provided for advanced usages.
+ * Use them only in association with static linking.
+ * ==================================================================================== */
+
+typedef struct {
+ int compressionLevel; /* optimize for a specific zstd compression level; 0 means default */
+ unsigned notificationLevel; /* Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */
+ unsigned dictID; /* force dictID value; 0 means auto mode (32-bits random value) */
+} ZDICT_params_t;
+
+/*! ZDICT_cover_params_t:
+ * k and d are the only required parameters.
+ * For others, value 0 means default.
+ */
+typedef struct {
+ unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */
+ unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */
+ unsigned steps; /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */
+ unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
+ double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */
+ ZDICT_params_t zParams;
+} ZDICT_cover_params_t;
+
+typedef struct {
+ unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */
+ unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */
+ unsigned f; /* log of size of frequency array : constraint: 0 < f <= 31 : 1 means default(20)*/
+ unsigned steps; /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */
+ unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
+ double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (0.75), 1.0 when all samples are used for both training and testing */
+ unsigned accel; /* Acceleration level: constraint: 0 < accel <= 10, higher means faster and less accurate, 0 means default(1) */
+ ZDICT_params_t zParams;
+} ZDICT_fastCover_params_t;
+
+/*! ZDICT_trainFromBuffer_cover():
+ * Train a dictionary from an array of samples using the COVER algorithm.
+ * Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
+ * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
+ * The resulting dictionary will be saved into `dictBuffer`.
+ * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+ * or an error code, which can be tested with ZDICT_isError().
+ * See ZDICT_trainFromBuffer() for details on failure modes.
+ * Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte.
+ * Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
+ * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
+ * In general, it's recommended to provide a few thousands samples, though this can vary a lot.
+ * It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
+ */
+ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
+ void *dictBuffer, size_t dictBufferCapacity,
+ const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
+ ZDICT_cover_params_t parameters);
+
+/*! ZDICT_optimizeTrainFromBuffer_cover():
+ * The same requirements as above hold for all the parameters except `parameters`.
+ * This function tries many parameter combinations and picks the best parameters.
+ * `*parameters` is filled with the best parameters found,
+ * dictionary constructed with those parameters is stored in `dictBuffer`.
+ *
+ * All of the parameters d, k, steps are optional.
+ * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.
+ * if steps is zero it defaults to its default value.
+ * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].
+ *
+ * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+ * or an error code, which can be tested with ZDICT_isError().
+ * On success `*parameters` contains the parameters selected.
+ * See ZDICT_trainFromBuffer() for details on failure modes.
+ * Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread.
+ */
+ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
+ void* dictBuffer, size_t dictBufferCapacity,
+ const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+ ZDICT_cover_params_t* parameters);
+
+/*! ZDICT_trainFromBuffer_fastCover():
+ * Train a dictionary from an array of samples using a modified version of COVER algorithm.
+ * Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
+ * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
+ * d and k are required.
+ * All other parameters are optional, will use default values if not provided
+ * The resulting dictionary will be saved into `dictBuffer`.
+ * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+ * or an error code, which can be tested with ZDICT_isError().
+ * See ZDICT_trainFromBuffer() for details on failure modes.
+ * Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory.
+ * Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
+ * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
+ * In general, it's recommended to provide a few thousands samples, though this can vary a lot.
+ * It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
+ */
+ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer,
+ size_t dictBufferCapacity, const void *samplesBuffer,
+ const size_t *samplesSizes, unsigned nbSamples,
+ ZDICT_fastCover_params_t parameters);
+
+/*! ZDICT_optimizeTrainFromBuffer_fastCover():
+ * The same requirements as above hold for all the parameters except `parameters`.
+ * This function tries many parameter combinations (specifically, k and d combinations)
+ * and picks the best parameters. `*parameters` is filled with the best parameters found,
+ * dictionary constructed with those parameters is stored in `dictBuffer`.
+ * All of the parameters d, k, steps, f, and accel are optional.
+ * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.
+ * if steps is zero it defaults to its default value.
+ * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].
+ * If f is zero, default value of 20 is used.
+ * If accel is zero, default value of 1 is used.
+ *
+ * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+ * or an error code, which can be tested with ZDICT_isError().
+ * On success `*parameters` contains the parameters selected.
+ * See ZDICT_trainFromBuffer() for details on failure modes.
+ * Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread.
+ */
+ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer,
+ size_t dictBufferCapacity, const void* samplesBuffer,
+ const size_t* samplesSizes, unsigned nbSamples,
+ ZDICT_fastCover_params_t* parameters);
+
+/*! ZDICT_finalizeDictionary():
+ * Given a custom content as a basis for dictionary, and a set of samples,
+ * finalize dictionary by adding headers and statistics.
+ *
+ * Samples must be stored concatenated in a flat buffer `samplesBuffer`,
+ * supplied with an array of sizes `samplesSizes`, providing the size of each sample in order.
+ *
+ * dictContentSize must be >= ZDICT_CONTENTSIZE_MIN bytes.
+ * maxDictSize must be >= dictContentSize, and must be >= ZDICT_DICTSIZE_MIN bytes.
+ *
+ * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`),
+ * or an error code, which can be tested by ZDICT_isError().
+ * Note: ZDICT_finalizeDictionary() will push notifications into stderr if instructed to, using notificationLevel>0.
+ * Note 2: dictBuffer and dictContent can overlap
+ */
+#define ZDICT_CONTENTSIZE_MIN 128
+#define ZDICT_DICTSIZE_MIN 256
+ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
+ const void* dictContent, size_t dictContentSize,
+ const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+ ZDICT_params_t parameters);
+
+typedef struct {
+ unsigned selectivityLevel; /* 0 means default; larger => select more => larger dictionary */
+ ZDICT_params_t zParams;
+} ZDICT_legacy_params_t;
+
+/*! ZDICT_trainFromBuffer_legacy():
+ * Train a dictionary from an array of samples.
+ * Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
+ * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
+ * The resulting dictionary will be saved into `dictBuffer`.
+ * `parameters` is optional and can be provided with values set to 0 to mean "default".
+ * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+ * or an error code, which can be tested with ZDICT_isError().
+ * See ZDICT_trainFromBuffer() for details on failure modes.
+ * Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
+ * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
+ * In general, it's recommended to provide a few thousands samples, though this can vary a lot.
+ * It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
+ * Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0.
+ */
+ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy(
+ void *dictBuffer, size_t dictBufferCapacity,
+ const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
+ ZDICT_legacy_params_t parameters);
+
+/* Deprecation warnings */
+/* It is generally possible to disable deprecation warnings from compiler,
+ for example with -Wno-deprecated-declarations for gcc
+ or _CRT_SECURE_NO_WARNINGS in Visual.
+ Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */
+#ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS
+# define ZDICT_DEPRECATED(message) ZDICTLIB_API /* disable deprecation warnings */
+#else
+# define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
+# define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API
+# elif (ZDICT_GCC_VERSION >= 405) || defined(__clang__)
+# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message)))
+# elif (ZDICT_GCC_VERSION >= 301)
+# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated))
+# elif defined(_MSC_VER)
+# define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message))
+# else
+# pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler")
+# define ZDICT_DEPRECATED(message) ZDICTLIB_API
+# endif
+#endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */
+
+ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead")
+size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
+ const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
+
+
+#endif /* ZDICT_STATIC_LINKING_ONLY */
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* DICTBUILDER_H_001 */
diff --git a/vendor/github.com/DataDog/zstd/zstd.go b/vendor/github.com/DataDog/zstd/zstd.go
new file mode 100644
index 000000000..b6af4eb19
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd.go
@@ -0,0 +1,147 @@
+package zstd
+
+/*
+#define ZSTD_STATIC_LINKING_ONLY
+#include "zstd.h"
+#include "stdint.h" // for uintptr_t
+
+// The following *_wrapper function are used for removing superflouos
+// memory allocations when calling the wrapped functions from Go code.
+// See https://github.com/golang/go/issues/24450 for details.
+
+static size_t ZSTD_compress_wrapper(uintptr_t dst, size_t maxDstSize, const uintptr_t src, size_t srcSize, int compressionLevel) {
+ return ZSTD_compress((void*)dst, maxDstSize, (const void*)src, srcSize, compressionLevel);
+}
+
+static size_t ZSTD_decompress_wrapper(uintptr_t dst, size_t maxDstSize, uintptr_t src, size_t srcSize) {
+ return ZSTD_decompress((void*)dst, maxDstSize, (const void *)src, srcSize);
+}
+
+*/
+import "C"
+import (
+ "bytes"
+ "errors"
+ "io/ioutil"
+ "runtime"
+ "unsafe"
+)
+
+// Defines best and standard values for zstd cli
+const (
+ BestSpeed = 1
+ BestCompression = 20
+ DefaultCompression = 5
+)
+
+var (
+ // ErrEmptySlice is returned when there is nothing to compress
+ ErrEmptySlice = errors.New("Bytes slice is empty")
+)
+
+// CompressBound returns the worst case size needed for a destination buffer,
+// which can be used to preallocate a destination buffer or select a previously
+// allocated buffer from a pool.
+// See zstd.h to mirror implementation of ZSTD_COMPRESSBOUND
+func CompressBound(srcSize int) int {
+ lowLimit := 128 << 10 // 128 kB
+ var margin int
+ if srcSize < lowLimit {
+ margin = (lowLimit - srcSize) >> 11
+ }
+ return srcSize + (srcSize >> 8) + margin
+}
+
+// cCompressBound is a cgo call to check the go implementation above against the c code.
+func cCompressBound(srcSize int) int {
+ return int(C.ZSTD_compressBound(C.size_t(srcSize)))
+}
+
+// Compress src into dst. If you have a buffer to use, you can pass it to
+// prevent allocation. If it is too small, or if nil is passed, a new buffer
+// will be allocated and returned.
+func Compress(dst, src []byte) ([]byte, error) {
+ return CompressLevel(dst, src, DefaultCompression)
+}
+
+// CompressLevel is the same as Compress but you can pass a compression level
+func CompressLevel(dst, src []byte, level int) ([]byte, error) {
+ bound := CompressBound(len(src))
+ if cap(dst) >= bound {
+ dst = dst[0:bound] // Reuse dst buffer
+ } else {
+ dst = make([]byte, bound)
+ }
+
+ srcPtr := C.uintptr_t(uintptr(0)) // Do not point anywhere, if src is empty
+ if len(src) > 0 {
+ srcPtr = C.uintptr_t(uintptr(unsafe.Pointer(&src[0])))
+ }
+
+ cWritten := C.ZSTD_compress_wrapper(
+ C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))),
+ C.size_t(len(dst)),
+ srcPtr,
+ C.size_t(len(src)),
+ C.int(level))
+
+ runtime.KeepAlive(src)
+ written := int(cWritten)
+ // Check if the return is an Error code
+ if err := getError(written); err != nil {
+ return nil, err
+ }
+ return dst[:written], nil
+}
+
+// Decompress src into dst. If you have a buffer to use, you can pass it to
+// prevent allocation. If it is too small, or if nil is passed, a new buffer
+// will be allocated and returned.
+func Decompress(dst, src []byte) ([]byte, error) {
+ if len(src) == 0 {
+ return []byte{}, ErrEmptySlice
+ }
+ decompress := func(dst, src []byte) ([]byte, error) {
+
+ cWritten := C.ZSTD_decompress_wrapper(
+ C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))),
+ C.size_t(len(dst)),
+ C.uintptr_t(uintptr(unsafe.Pointer(&src[0]))),
+ C.size_t(len(src)))
+
+ runtime.KeepAlive(src)
+ written := int(cWritten)
+ // Check error
+ if err := getError(written); err != nil {
+ return nil, err
+ }
+ return dst[:written], nil
+ }
+
+ if len(dst) == 0 {
+ // Attempt to use zStd to determine decompressed size (may result in error or 0)
+ size := int(C.size_t(C.ZSTD_getDecompressedSize(unsafe.Pointer(&src[0]), C.size_t(len(src)))))
+
+ if err := getError(size); err != nil {
+ return nil, err
+ }
+
+ if size > 0 {
+ dst = make([]byte, size)
+ } else {
+ dst = make([]byte, len(src)*3) // starting guess
+ }
+ }
+ for i := 0; i < 3; i++ { // 3 tries to allocate a bigger buffer
+ result, err := decompress(dst, src)
+ if !IsDstSizeTooSmallError(err) {
+ return result, err
+ }
+ dst = make([]byte, len(dst)*2) // Grow buffer by 2
+ }
+
+ // We failed getting a dst buffer of correct size, use stream API
+ r := NewReader(bytes.NewReader(src))
+ defer r.Close()
+ return ioutil.ReadAll(r)
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd.h b/vendor/github.com/DataDog/zstd/zstd.h
new file mode 100644
index 000000000..53470c18f
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd.h
@@ -0,0 +1,1913 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#ifndef ZSTD_H_235446
+#define ZSTD_H_235446
+
+/* ====== Dependency ======*/
+#include <stddef.h> /* size_t */
+
+
+/* ===== ZSTDLIB_API : control library symbols visibility ===== */
+#ifndef ZSTDLIB_VISIBILITY
+# if defined(__GNUC__) && (__GNUC__ >= 4)
+# define ZSTDLIB_VISIBILITY __attribute__ ((visibility ("default")))
+# else
+# define ZSTDLIB_VISIBILITY
+# endif
+#endif
+#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
+# define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBILITY
+#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
+# define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+# define ZSTDLIB_API ZSTDLIB_VISIBILITY
+#endif
+
+
+/*******************************************************************************
+ Introduction
+
+ zstd, short for Zstandard, is a fast lossless compression algorithm, targeting
+ real-time compression scenarios at zlib-level and better compression ratios.
+ The zstd compression library provides in-memory compression and decompression
+ functions.
+
+ The library supports regular compression levels from 1 up to ZSTD_maxCLevel(),
+ which is currently 22. Levels >= 20, labeled `--ultra`, should be used with
+ caution, as they require more memory. The library also offers negative
+ compression levels, which extend the range of speed vs. ratio preferences.
+ The lower the level, the faster the speed (at the cost of compression).
+
+ Compression can be done in:
+ - a single step (described as Simple API)
+ - a single step, reusing a context (described as Explicit context)
+ - unbounded multiple steps (described as Streaming compression)
+
+ The compression ratio achievable on small data can be highly improved using
+ a dictionary. Dictionary compression can be performed in:
+ - a single step (described as Simple dictionary API)
+ - a single step, reusing a dictionary (described as Bulk-processing
+ dictionary API)
+
+ Advanced experimental functions can be accessed using
+ `#define ZSTD_STATIC_LINKING_ONLY` before including zstd.h.
+
+ Advanced experimental APIs should never be used with a dynamically-linked
+ library. They are not "stable"; their definitions or signatures may change in
+ the future. Only static linking is allowed.
+*******************************************************************************/
+
+/*------ Version ------*/
+#define ZSTD_VERSION_MAJOR 1
+#define ZSTD_VERSION_MINOR 4
+#define ZSTD_VERSION_RELEASE 0
+
+#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
+ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< to check runtime library version */
+
+#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
+#define ZSTD_QUOTE(str) #str
+#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str)
+#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
+ZSTDLIB_API const char* ZSTD_versionString(void); /* requires v1.3.0+ */
+
+/***************************************
+* Default constant
+***************************************/
+#ifndef ZSTD_CLEVEL_DEFAULT
+# define ZSTD_CLEVEL_DEFAULT 3
+#endif
+
+/***************************************
+* Constants
+***************************************/
+
+/* All magic numbers are supposed read/written to/from files/memory using little-endian convention */
+#define ZSTD_MAGICNUMBER 0xFD2FB528 /* valid since v0.8.0 */
+#define ZSTD_MAGIC_DICTIONARY 0xEC30A437 /* valid since v0.7.0 */
+#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50 /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */
+#define ZSTD_MAGIC_SKIPPABLE_MASK 0xFFFFFFF0
+
+#define ZSTD_BLOCKSIZELOG_MAX 17
+#define ZSTD_BLOCKSIZE_MAX (1<<ZSTD_BLOCKSIZELOG_MAX)
+
+
+
+/***************************************
+* Simple API
+***************************************/
+/*! ZSTD_compress() :
+ * Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
+ * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
+ * @return : compressed size written into `dst` (<= `dstCapacity),
+ * or an error code if it fails (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel);
+
+/*! ZSTD_decompress() :
+ * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
+ * `dstCapacity` is an upper bound of originalSize to regenerate.
+ * If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
+ * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
+ * or an errorCode if it fails (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
+ const void* src, size_t compressedSize);
+
+/*! ZSTD_getFrameContentSize() : requires v1.3.0+
+ * `src` should point to the start of a ZSTD encoded frame.
+ * `srcSize` must be at least as large as the frame header.
+ * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
+ * @return : - decompressed size of `src` frame content, if known
+ * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
+ * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
+ * note 1 : a 0 return value means the frame is valid but "empty".
+ * note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode.
+ * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * Optionally, application can rely on some implicit limit,
+ * as ZSTD_decompress() only needs an upper bound of decompressed size.
+ * (For example, data could be necessarily cut into blocks <= 16 KB).
+ * note 3 : decompressed size is always present when compression is completed using single-pass functions,
+ * such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
+ * note 4 : decompressed size can be very large (64-bits value),
+ * potentially larger than what local system can handle as a single memory segment.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+ * Always ensure return value fits within application's authorized limits.
+ * Each application can set its own limits.
+ * note 6 : This function replaces ZSTD_getDecompressedSize() */
+#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
+#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
+ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
+
+/*! ZSTD_getDecompressedSize() :
+ * NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().
+ * Both functions work the same way, but ZSTD_getDecompressedSize() blends
+ * "empty", "unknown" and "error" results to the same return value (0),
+ * while ZSTD_getFrameContentSize() gives them separate return values.
+ * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
+ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
+
+/*! ZSTD_findFrameCompressedSize() :
+ * `src` should point to the start of a ZSTD frame or skippable frame.
+ * `srcSize` must be >= first frame size
+ * @return : the compressed size of the first frame starting at `src`,
+ * suitable to pass as `srcSize` to `ZSTD_decompress` or similar,
+ * or an error code if input is invalid */
+ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
+
+
+/*====== Helper functions ======*/
+#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
+ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
+ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
+ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */
+ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed */
+ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */
+
+
+/***************************************
+* Explicit context
+***************************************/
+/*= Compression context
+ * When compressing many times,
+ * it is recommended to allocate a context just once, and re-use it for each successive compression operation.
+ * This will make workload friendlier for system's memory.
+ * Use one context per thread for parallel execution in multi-threaded environments. */
+typedef struct ZSTD_CCtx_s ZSTD_CCtx;
+ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
+ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx);
+
+/*! ZSTD_compressCCtx() :
+ * Same as ZSTD_compress(), using an explicit ZSTD_CCtx
+ * The function will compress at requested compression level,
+ * ignoring any other parameter */
+ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel);
+
+/*= Decompression context
+ * When decompressing many times,
+ * it is recommended to allocate a context only once,
+ * and re-use it for each successive compression operation.
+ * This will make workload friendlier for system's memory.
+ * Use one context per thread for parallel execution. */
+typedef struct ZSTD_DCtx_s ZSTD_DCtx;
+ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void);
+ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
+
+/*! ZSTD_decompressDCtx() :
+ * Same as ZSTD_decompress(),
+ * requires an allocated ZSTD_DCtx.
+ * Compatible with sticky parameters.
+ */
+ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+
+/***************************************
+* Advanced compression API
+***************************************/
+
+/* API design :
+ * Parameters are pushed one by one into an existing context,
+ * using ZSTD_CCtx_set*() functions.
+ * Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame.
+ * "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` !
+ * They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()
+ *
+ * It's possible to reset all parameters to "default" using ZSTD_CCtx_reset().
+ *
+ * This API supercedes all other "advanced" API entry points in the experimental section.
+ * In the future, we expect to remove from experimental API entry points which are redundant with this API.
+ */
+
+
+/* Compression strategies, listed from fastest to strongest */
+typedef enum { ZSTD_fast=1,
+ ZSTD_dfast=2,
+ ZSTD_greedy=3,
+ ZSTD_lazy=4,
+ ZSTD_lazy2=5,
+ ZSTD_btlazy2=6,
+ ZSTD_btopt=7,
+ ZSTD_btultra=8,
+ ZSTD_btultra2=9
+ /* note : new strategies _might_ be added in the future.
+ Only the order (from fast to strong) is guaranteed */
+} ZSTD_strategy;
+
+
+typedef enum {
+
+ /* compression parameters
+ * Note: When compressing with a ZSTD_CDict these parameters are superseded
+ * by the parameters used to construct the ZSTD_CDict. See ZSTD_CCtx_refCDict()
+ * for more info (superseded-by-cdict). */
+ ZSTD_c_compressionLevel=100, /* Update all compression parameters according to pre-defined cLevel table
+ * Default level is ZSTD_CLEVEL_DEFAULT==3.
+ * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
+ * Note 1 : it's possible to pass a negative compression level.
+ * Note 2 : setting a level sets all default values of other compression parameters */
+ ZSTD_c_windowLog=101, /* Maximum allowed back-reference distance, expressed as power of 2.
+ * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
+ * Special: value 0 means "use default windowLog".
+ * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT
+ * requires explicitly allowing such window size at decompression stage if using streaming. */
+ ZSTD_c_hashLog=102, /* Size of the initial probe table, as a power of 2.
+ * Resulting memory usage is (1 << (hashLog+2)).
+ * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
+ * Larger tables improve compression ratio of strategies <= dFast,
+ * and improve speed of strategies > dFast.
+ * Special: value 0 means "use default hashLog". */
+ ZSTD_c_chainLog=103, /* Size of the multi-probe search table, as a power of 2.
+ * Resulting memory usage is (1 << (chainLog+2)).
+ * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX.
+ * Larger tables result in better and slower compression.
+ * This parameter is useless when using "fast" strategy.
+ * It's still useful when using "dfast" strategy,
+ * in which case it defines a secondary probe table.
+ * Special: value 0 means "use default chainLog". */
+ ZSTD_c_searchLog=104, /* Number of search attempts, as a power of 2.
+ * More attempts result in better and slower compression.
+ * This parameter is useless when using "fast" and "dFast" strategies.
+ * Special: value 0 means "use default searchLog". */
+ ZSTD_c_minMatch=105, /* Minimum size of searched matches.
+ * Note that Zstandard can still find matches of smaller size,
+ * it just tweaks its search algorithm to look for this size and larger.
+ * Larger values increase compression and decompression speed, but decrease ratio.
+ * Must be clamped between ZSTD_MINMATCH_MIN and ZSTD_MINMATCH_MAX.
+ * Note that currently, for all strategies < btopt, effective minimum is 4.
+ * , for all strategies > fast, effective maximum is 6.
+ * Special: value 0 means "use default minMatchLength". */
+ ZSTD_c_targetLength=106, /* Impact of this field depends on strategy.
+ * For strategies btopt, btultra & btultra2:
+ * Length of Match considered "good enough" to stop search.
+ * Larger values make compression stronger, and slower.
+ * For strategy fast:
+ * Distance between match sampling.
+ * Larger values make compression faster, and weaker.
+ * Special: value 0 means "use default targetLength". */
+ ZSTD_c_strategy=107, /* See ZSTD_strategy enum definition.
+ * The higher the value of selected strategy, the more complex it is,
+ * resulting in stronger and slower compression.
+ * Special: value 0 means "use default strategy". */
+
+ /* LDM mode parameters */
+ ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching.
+ * This parameter is designed to improve compression ratio
+ * for large inputs, by finding large matches at long distance.
+ * It increases memory usage and window size.
+ * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB
+ * except when expressly set to a different value. */
+ ZSTD_c_ldmHashLog=161, /* Size of the table for long distance matching, as a power of 2.
+ * Larger values increase memory usage and compression ratio,
+ * but decrease compression speed.
+ * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
+ * default: windowlog - 7.
+ * Special: value 0 means "automatically determine hashlog". */
+ ZSTD_c_ldmMinMatch=162, /* Minimum match size for long distance matcher.
+ * Larger/too small values usually decrease compression ratio.
+ * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX.
+ * Special: value 0 means "use default value" (default: 64). */
+ ZSTD_c_ldmBucketSizeLog=163, /* Log size of each bucket in the LDM hash table for collision resolution.
+ * Larger values improve collision resolution but decrease compression speed.
+ * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX.
+ * Special: value 0 means "use default value" (default: 3). */
+ ZSTD_c_ldmHashRateLog=164, /* Frequency of inserting/looking up entries into the LDM hash table.
+ * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN).
+ * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage.
+ * Larger values improve compression speed.
+ * Deviating far from default value will likely result in a compression ratio decrease.
+ * Special: value 0 means "automatically determine hashRateLog". */
+
+ /* frame parameters */
+ ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1)
+ * Content size must be known at the beginning of compression.
+ * This is automatically the case when using ZSTD_compress2(),
+ * For streaming variants, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
+ ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */
+ ZSTD_c_dictIDFlag=202, /* When applicable, dictionary's ID is written into frame header (default:1) */
+
+ /* multi-threading parameters */
+ /* These parameters are only useful if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD).
+ * They return an error otherwise. */
+ ZSTD_c_nbWorkers=400, /* Select how many threads will be spawned to compress in parallel.
+ * When nbWorkers >= 1, triggers asynchronous mode when used with ZSTD_compressStream*() :
+ * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller,
+ * while compression work is performed in parallel, within worker threads.
+ * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end :
+ * in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call).
+ * More workers improve speed, but also increase memory usage.
+ * Default value is `0`, aka "single-threaded mode" : no worker is spawned, compression is performed inside Caller's thread, all invocations are blocking */
+ ZSTD_c_jobSize=401, /* Size of a compression job. This value is enforced only when nbWorkers >= 1.
+ * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads.
+ * 0 means default, which is dynamically determined based on compression parameters.
+ * Job size must be a minimum of overlap size, or 1 MB, whichever is largest.
+ * The minimum size is automatically and transparently enforced */
+ ZSTD_c_overlapLog=402, /* Control the overlap size, as a fraction of window size.
+ * The overlap size is an amount of data reloaded from previous job at the beginning of a new job.
+ * It helps preserve compression ratio, while each job is compressed in parallel.
+ * This value is enforced only when nbWorkers >= 1.
+ * Larger values increase compression ratio, but decrease speed.
+ * Possible values range from 0 to 9 :
+ * - 0 means "default" : value will be determined by the library, depending on strategy
+ * - 1 means "no overlap"
+ * - 9 means "full overlap", using a full window size.
+ * Each intermediate rank increases/decreases load size by a factor 2 :
+ * 9: full window; 8: w/2; 7: w/4; 6: w/8; 5:w/16; 4: w/32; 3:w/64; 2:w/128; 1:no overlap; 0:default
+ * default value varies between 6 and 9, depending on strategy */
+
+ /* note : additional experimental parameters are also available
+ * within the experimental section of the API.
+ * At the time of this writing, they include :
+ * ZSTD_c_rsyncable
+ * ZSTD_c_format
+ * ZSTD_c_forceMaxWindow
+ * ZSTD_c_forceAttachDict
+ * ZSTD_c_literalCompressionMode
+ * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
+ * note : never ever use experimentalParam? names directly;
+ * also, the enums values themselves are unstable and can still change.
+ */
+ ZSTD_c_experimentalParam1=500,
+ ZSTD_c_experimentalParam2=10,
+ ZSTD_c_experimentalParam3=1000,
+ ZSTD_c_experimentalParam4=1001,
+ ZSTD_c_experimentalParam5=1002,
+} ZSTD_cParameter;
+
+typedef struct {
+ size_t error;
+ int lowerBound;
+ int upperBound;
+} ZSTD_bounds;
+
+/*! ZSTD_cParam_getBounds() :
+ * All parameters must belong to an interval with lower and upper bounds,
+ * otherwise they will either trigger an error or be automatically clamped.
+ * @return : a structure, ZSTD_bounds, which contains
+ * - an error status field, which must be tested using ZSTD_isError()
+ * - lower and upper bounds, both inclusive
+ */
+ZSTDLIB_API ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter cParam);
+
+/*! ZSTD_CCtx_setParameter() :
+ * Set one compression parameter, selected by enum ZSTD_cParameter.
+ * All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds().
+ * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
+ * Setting a parameter is generally only possible during frame initialization (before starting compression).
+ * Exception : when using multi-threading mode (nbWorkers >= 1),
+ * the following parameters can be updated _during_ compression (within same frame):
+ * => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.
+ * new parameters will be active for next job only (after a flush()).
+ * @return : an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value);
+
+/*! ZSTD_CCtx_setPledgedSrcSize() :
+ * Total input data size to be compressed as a single frame.
+ * Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag.
+ * This value will also be controlled at end of frame, and trigger an error if not respected.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame.
+ * In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN.
+ * ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame.
+ * Note 2 : pledgedSrcSize is only valid once, for the next frame.
+ * It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN.
+ * Note 3 : Whenever all input data is provided and consumed in a single round,
+ * for example with ZSTD_compress2(),
+ * or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),
+ * this value is automatically overridden by srcSize instead.
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
+
+typedef enum {
+ ZSTD_reset_session_only = 1,
+ ZSTD_reset_parameters = 2,
+ ZSTD_reset_session_and_parameters = 3
+} ZSTD_ResetDirective;
+
+/*! ZSTD_CCtx_reset() :
+ * There are 2 different things that can be reset, independently or jointly :
+ * - The session : will stop compressing current frame, and make CCtx ready to start a new one.
+ * Useful after an error, or to interrupt any ongoing compression.
+ * Any internal data not yet flushed is cancelled.
+ * Compression parameters and dictionary remain unchanged.
+ * They will be used to compress next frame.
+ * Resetting session never fails.
+ * - The parameters : changes all parameters back to "default".
+ * This removes any reference to any dictionary too.
+ * Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
+ * otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
+ * - Both : similar to resetting the session, followed by resetting parameters.
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset);
+
+/*! ZSTD_compress2() :
+ * Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.
+ * ZSTD_compress2() always starts a new frame.
+ * Should cctx hold data from a previously unfinished frame, everything about it is forgotten.
+ * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
+ * - The function is always blocking, returns when compression is completed.
+ * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
+ * @return : compressed size written into `dst` (<= `dstCapacity),
+ * or an error code if it fails (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+
+/***************************************
+* Advanced decompression API
+***************************************/
+
+/* The advanced API pushes parameters one by one into an existing DCtx context.
+ * Parameters are sticky, and remain valid for all following frames
+ * using the same DCtx context.
+ * It's possible to reset parameters to default values using ZSTD_DCtx_reset().
+ * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream().
+ * Therefore, no new decompression function is necessary.
+ */
+
+typedef enum {
+
+ ZSTD_d_windowLogMax=100, /* Select a size limit (in power of 2) beyond which
+ * the streaming API will refuse to allocate memory buffer
+ * in order to protect the host from unreasonable memory requirements.
+ * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
+ * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT).
+ * Special: value 0 means "use default maximum windowLog". */
+
+ /* note : additional experimental parameters are also available
+ * within the experimental section of the API.
+ * At the time of this writing, they include :
+ * ZSTD_c_format
+ * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
+ * note : never ever use experimentalParam? names directly
+ */
+ ZSTD_d_experimentalParam1=1000
+
+} ZSTD_dParameter;
+
+/*! ZSTD_dParam_getBounds() :
+ * All parameters must belong to an interval with lower and upper bounds,
+ * otherwise they will either trigger an error or be automatically clamped.
+ * @return : a structure, ZSTD_bounds, which contains
+ * - an error status field, which must be tested using ZSTD_isError()
+ * - both lower and upper bounds, inclusive
+ */
+ZSTDLIB_API ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam);
+
+/*! ZSTD_DCtx_setParameter() :
+ * Set one compression parameter, selected by enum ZSTD_dParameter.
+ * All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds().
+ * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
+ * Setting a parameter is only possible during frame initialization (before starting decompression).
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int value);
+
+/*! ZSTD_DCtx_reset() :
+ * Return a DCtx to clean state.
+ * Session and parameters can be reset jointly or separately.
+ * Parameters can only be reset when no active frame is being decompressed.
+ * @return : 0, or an error code, which can be tested with ZSTD_isError()
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset);
+
+
+/****************************
+* Streaming
+****************************/
+
+typedef struct ZSTD_inBuffer_s {
+ const void* src; /**< start of input buffer */
+ size_t size; /**< size of input buffer */
+ size_t pos; /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */
+} ZSTD_inBuffer;
+
+typedef struct ZSTD_outBuffer_s {
+ void* dst; /**< start of output buffer */
+ size_t size; /**< size of output buffer */
+ size_t pos; /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */
+} ZSTD_outBuffer;
+
+
+
+/*-***********************************************************************
+* Streaming compression - HowTo
+*
+* A ZSTD_CStream object is required to track streaming operation.
+* Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
+* ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
+* It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
+*
+* For parallel execution, use one separate ZSTD_CStream per thread.
+*
+* note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.
+*
+* Parameters are sticky : when starting a new compression on the same context,
+* it will re-use the same sticky parameters as previous compression session.
+* When in doubt, it's recommended to fully initialize the context before usage.
+* Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(),
+* ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to
+* set more specific parameters, the pledged source size, or load a dictionary.
+*
+* Use ZSTD_compressStream2() with ZSTD_e_continue as many times as necessary to
+* consume input stream. The function will automatically update both `pos`
+* fields within `input` and `output`.
+* Note that the function may not consume the entire input, for example, because
+* the output buffer is already full, in which case `input.pos < input.size`.
+* The caller must check if input has been entirely consumed.
+* If not, the caller must make some room to receive more compressed data,
+* and then present again remaining input data.
+* note: ZSTD_e_continue is guaranteed to make some forward progress when called,
+* but doesn't guarantee maximal forward progress. This is especially relevant
+* when compressing with multiple threads. The call won't block if it can
+* consume some input, but if it can't it will wait for some, but not all,
+* output to be flushed.
+* @return : provides a minimum amount of data remaining to be flushed from internal buffers
+* or an error code, which can be tested using ZSTD_isError().
+*
+* At any moment, it's possible to flush whatever data might remain stuck within internal buffer,
+* using ZSTD_compressStream2() with ZSTD_e_flush. `output->pos` will be updated.
+* Note that, if `output->size` is too small, a single invocation with ZSTD_e_flush might not be enough (return code > 0).
+* In which case, make some room to receive more compressed data, and call again ZSTD_compressStream2() with ZSTD_e_flush.
+* You must continue calling ZSTD_compressStream2() with ZSTD_e_flush until it returns 0, at which point you can change the
+* operation.
+* note: ZSTD_e_flush will flush as much output as possible, meaning when compressing with multiple threads, it will
+* block until the flush is complete or the output buffer is full.
+* @return : 0 if internal buffers are entirely flushed,
+* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
+* or an error code, which can be tested using ZSTD_isError().
+*
+* Calling ZSTD_compressStream2() with ZSTD_e_end instructs to finish a frame.
+* It will perform a flush and write frame epilogue.
+* The epilogue is required for decoders to consider a frame completed.
+* flush operation is the same, and follows same rules as calling ZSTD_compressStream2() with ZSTD_e_flush.
+* You must continue calling ZSTD_compressStream2() with ZSTD_e_end until it returns 0, at which point you are free to
+* start a new frame.
+* note: ZSTD_e_end will flush as much output as possible, meaning when compressing with multiple threads, it will
+* block until the flush is complete or the output buffer is full.
+* @return : 0 if frame fully completed and fully flushed,
+* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
+* or an error code, which can be tested using ZSTD_isError().
+*
+* *******************************************************************/
+
+typedef ZSTD_CCtx ZSTD_CStream; /**< CCtx and CStream are now effectively same object (>= v1.3.0) */
+ /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */
+/*===== ZSTD_CStream management functions =====*/
+ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
+ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
+
+/*===== Streaming compression functions =====*/
+typedef enum {
+ ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */
+ ZSTD_e_flush=1, /* flush any data provided so far,
+ * it creates (at least) one new block, that can be decoded immediately on reception;
+ * frame will continue: any future data can still reference previously compressed data, improving compression.
+ * note : multithreaded compression will block to flush as much output as possible. */
+ ZSTD_e_end=2 /* flush any remaining data _and_ close current frame.
+ * note that frame is only closed after compressed data is fully flushed (return value == 0).
+ * After that point, any additional data starts a new frame.
+ * note : each frame is independent (does not reference any content from previous frame).
+ : note : multithreaded compression will block to flush as much output as possible. */
+} ZSTD_EndDirective;
+
+/*! ZSTD_compressStream2() :
+ * Behaves about the same as ZSTD_compressStream, with additional control on end directive.
+ * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
+ * - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
+ * - output->pos must be <= dstCapacity, input->pos must be <= srcSize
+ * - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
+ * - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.
+ * - When nbWorkers>=1, function is non-blocking : it just acquires a copy of input, and distributes jobs to internal worker threads, flush whatever is available,
+ * and then immediately returns, just indicating that there is some data remaining to be flushed.
+ * The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
+ * - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.
+ * - @return provides a minimum amount of data remaining to be flushed from internal buffers
+ * or an error code, which can be tested using ZSTD_isError().
+ * if @return != 0, flush is not fully completed, there is still some data left within internal buffers.
+ * This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.
+ * For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.
+ * - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),
+ * only ZSTD_e_end or ZSTD_e_flush operations are allowed.
+ * Before starting a new compression job, or changing compression parameters,
+ * it is required to fully flush internal buffers.
+ */
+ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective endOp);
+
+ZSTDLIB_API size_t ZSTD_CStreamInSize(void); /**< recommended size for input buffer */
+ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block in all circumstances. */
+
+/*******************************************************************************
+ * This is a legacy streaming API, and can be replaced by ZSTD_CCtx_reset() and
+ * ZSTD_compressStream2(). It is redundant, but is still fully supported.
+ * Advanced parameters and dictionary compression can only be used through the
+ * new API.
+ ******************************************************************************/
+
+/**
+ * Equivalent to:
+ *
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
+ * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ */
+ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
+/**
+ * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).
+ * NOTE: The return value is different. ZSTD_compressStream() returns a hint for
+ * the next read size (if non-zero and not an error). ZSTD_compressStream2()
+ * returns the number of bytes left to flush (if non-zero and not an error).
+ */
+ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+/** Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */
+ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+/** Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */
+ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+
+
+/*-***************************************************************************
+* Streaming decompression - HowTo
+*
+* A ZSTD_DStream object is required to track streaming operations.
+* Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
+* ZSTD_DStream objects can be re-used multiple times.
+*
+* Use ZSTD_initDStream() to start a new decompression operation.
+* @return : recommended first input size
+* Alternatively, use advanced API to set specific properties.
+*
+* Use ZSTD_decompressStream() repetitively to consume your input.
+* The function will update both `pos` fields.
+* If `input.pos < input.size`, some input has not been consumed.
+* It's up to the caller to present again remaining data.
+* The function tries to flush all data decoded immediately, respecting output buffer size.
+* If `output.pos < output.size`, decoder has flushed everything it could.
+* But if `output.pos == output.size`, there might be some data left within internal buffers.,
+* In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
+* Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
+* @return : 0 when a frame is completely decoded and fully flushed,
+* or an error code, which can be tested using ZSTD_isError(),
+* or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
+* the return value is a suggested next input size (just a hint for better latency)
+* that will never request more than the remaining frame size.
+* *******************************************************************************/
+
+typedef ZSTD_DCtx ZSTD_DStream; /**< DCtx and DStream are now effectively same object (>= v1.3.0) */
+ /* For compatibility with versions <= v1.2.0, prefer differentiating them. */
+/*===== ZSTD_DStream management functions =====*/
+ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
+ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);
+
+/*===== Streaming decompression functions =====*/
+
+/* This function is redundant with the advanced API and equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds);
+ * ZSTD_DCtx_refDDict(zds, NULL);
+ */
+ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
+
+ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+
+ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */
+ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
+
+
+/**************************
+* Simple dictionary API
+***************************/
+/*! ZSTD_compress_usingDict() :
+ * Compression at an explicit compression level using a Dictionary.
+ * A dictionary can be any arbitrary data segment (also called a prefix),
+ * or a buffer with specified information (see dictBuilder/zdict.h).
+ * Note : This function loads the dictionary, resulting in significant startup delay.
+ * It's intended for a dictionary used only once.
+ * Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ int compressionLevel);
+
+/*! ZSTD_decompress_usingDict() :
+ * Decompression using a known Dictionary.
+ * Dictionary must be identical to the one used during compression.
+ * Note : This function loads the dictionary, resulting in significant startup delay.
+ * It's intended for a dictionary used only once.
+ * Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize);
+
+
+/***********************************
+ * Bulk processing dictionary API
+ **********************************/
+typedef struct ZSTD_CDict_s ZSTD_CDict;
+
+/*! ZSTD_createCDict() :
+ * When compressing multiple messages / blocks using the same dictionary, it's recommended to load it only once.
+ * ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup cost.
+ * ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
+ * `dictBuffer` can be released after ZSTD_CDict creation, because its content is copied within CDict.
+ * Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate `dictBuffer` content.
+ * Note : A ZSTD_CDict can be created from an empty dictBuffer, but it is inefficient when used to compress small data. */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
+ int compressionLevel);
+
+/*! ZSTD_freeCDict() :
+ * Function frees memory allocated by ZSTD_createCDict(). */
+ZSTDLIB_API size_t ZSTD_freeCDict(ZSTD_CDict* CDict);
+
+/*! ZSTD_compress_usingCDict() :
+ * Compression using a digested Dictionary.
+ * Recommended when same dictionary is used multiple times.
+ * Note : compression level is _decided at dictionary creation time_,
+ * and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */
+ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict);
+
+
+typedef struct ZSTD_DDict_s ZSTD_DDict;
+
+/*! ZSTD_createDDict() :
+ * Create a digested dictionary, ready to start decompression operation without startup delay.
+ * dictBuffer can be released after DDict creation, as its content is copied inside DDict. */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
+
+/*! ZSTD_freeDDict() :
+ * Function frees memory allocated with ZSTD_createDDict() */
+ZSTDLIB_API size_t ZSTD_freeDDict(ZSTD_DDict* ddict);
+
+/*! ZSTD_decompress_usingDDict() :
+ * Decompression using a digested Dictionary.
+ * Recommended when same dictionary is used multiple times. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_DDict* ddict);
+
+
+/********************************
+ * Dictionary helper functions
+ *******************************/
+
+/*! ZSTD_getDictID_fromDict() :
+ * Provides the dictID stored within dictionary.
+ * if @return == 0, the dictionary is not conformant with Zstandard specification.
+ * It can still be loaded, but as a content-only dictionary. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
+
+/*! ZSTD_getDictID_fromDDict() :
+ * Provides the dictID of the dictionary loaded into `ddict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
+
+/*! ZSTD_getDictID_fromFrame() :
+ * Provides the dictID required to decompressed the frame stored within `src`.
+ * If @return == 0, the dictID could not be decoded.
+ * This could for one of the following reasons :
+ * - The frame does not require a dictionary to be decoded (most common case).
+ * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
+ * Note : this use case also happens when using a non-conformant dictionary.
+ * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
+ * - This is not a Zstandard frame.
+ * When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
+
+
+/*******************************************************************************
+ * Advanced dictionary and prefix API
+ *
+ * This API allows dictionaries to be used with ZSTD_compress2(),
+ * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and
+ * only reset with the context is reset with ZSTD_reset_parameters or
+ * ZSTD_reset_session_and_parameters. Prefixes are single-use.
+ ******************************************************************************/
+
+
+/*! ZSTD_CCtx_loadDictionary() :
+ * Create an internal CDict from `dict` buffer.
+ * Decompression will have to use same dictionary.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,
+ * meaning "return to no-dictionary mode".
+ * Note 1 : Dictionary is sticky, it will be used for all future compressed frames.
+ * To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters).
+ * Note 2 : Loading a dictionary involves building tables.
+ * It's also a CPU consuming operation, with non-negligible impact on latency.
+ * Tables are dependent on compression parameters, and for this reason,
+ * compression parameters can no longer be changed after loading a dictionary.
+ * Note 3 :`dict` content will be copied internally.
+ * Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.
+ * In such a case, dictionary buffer must outlive its users.
+ * Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
+ * to precisely select how dictionary content must be interpreted. */
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_CCtx_refCDict() :
+ * Reference a prepared dictionary, to be used for all next compressed frames.
+ * Note that compression parameters are enforced from within CDict,
+ * and supersede any compression parameter previously set within CCtx.
+ * The parameters ignored are labled as "superseded-by-cdict" in the ZSTD_cParameter enum docs.
+ * The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.
+ * The dictionary will remain valid for future compressed frames using same CCtx.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special : Referencing a NULL CDict means "return to no-dictionary mode".
+ * Note 1 : Currently, only one dictionary can be managed.
+ * Referencing a new dictionary effectively "discards" any previous one.
+ * Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */
+ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
+
+/*! ZSTD_CCtx_refPrefix() :
+ * Reference a prefix (single-usage dictionary) for next compressed frame.
+ * A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).
+ * Decompression will need same prefix to properly regenerate data.
+ * Compressing with a prefix is similar in outcome as performing a diff and compressing it,
+ * but performs much faster, especially during decompression (compression speed is tunable with compression level).
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
+ * Note 1 : Prefix buffer is referenced. It **must** outlive compression.
+ * Its content must remain unmodified during compression.
+ * Note 2 : If the intention is to diff some large src data blob with some prior version of itself,
+ * ensure that the window size is large enough to contain the entire source.
+ * See ZSTD_c_windowLog.
+ * Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.
+ * It's a CPU consuming operation, with non-negligible impact on latency.
+ * If there is a need to use the same prefix multiple times, consider loadDictionary instead.
+ * Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dm_rawContent).
+ * Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */
+ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
+ const void* prefix, size_t prefixSize);
+
+/*! ZSTD_DCtx_loadDictionary() :
+ * Create an internal DDict from dict buffer,
+ * to be used to decompress next frames.
+ * The dictionary remains valid for all future frames, until explicitly invalidated.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
+ * meaning "return to no-dictionary mode".
+ * Note 1 : Loading a dictionary involves building tables,
+ * which has a non-negligible impact on CPU usage and latency.
+ * It's recommended to "load once, use many times", to amortize the cost
+ * Note 2 :`dict` content will be copied internally, so `dict` can be released after loading.
+ * Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead.
+ * Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of
+ * how dictionary content is loaded and interpreted.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_DCtx_refDDict() :
+ * Reference a prepared dictionary, to be used to decompress next frames.
+ * The dictionary remains active for decompression of future frames using same DCtx.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Note 1 : Currently, only one dictionary can be managed.
+ * Referencing a new dictionary effectively "discards" any previous one.
+ * Special: referencing a NULL DDict means "return to no-dictionary mode".
+ * Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+
+/*! ZSTD_DCtx_refPrefix() :
+ * Reference a prefix (single-usage dictionary) to decompress next frame.
+ * This is the reverse operation of ZSTD_CCtx_refPrefix(),
+ * and must use the same prefix as the one used during compression.
+ * Prefix is **only used once**. Reference is discarded at end of frame.
+ * End of frame is reached when ZSTD_decompressStream() returns 0.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary
+ * Note 2 : Prefix buffer is referenced. It **must** outlive decompression.
+ * Prefix buffer must remain unmodified up to the end of frame,
+ * reached when ZSTD_decompressStream() returns 0.
+ * Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent).
+ * Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)
+ * Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
+ * A full dictionary is more costly, as it requires building tables.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx,
+ const void* prefix, size_t prefixSize);
+
+/* === Memory management === */
+
+/*! ZSTD_sizeof_*() :
+ * These functions give the _current_ memory usage of selected object.
+ * Note that object memory usage can evolve (increase or decrease) over time. */
+ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
+ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
+ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
+ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
+ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
+
+#endif /* ZSTD_H_235446 */
+
+
+/****************************************************************************************
+ * ADVANCED AND EXPERIMENTAL FUNCTIONS
+ ****************************************************************************************
+ * The definitions in the following section are considered experimental.
+ * They are provided for advanced scenarios.
+ * They should never be used with a dynamic library, as prototypes may change in the future.
+ * Use them only in association with static linking.
+ * ***************************************************************************************/
+
+#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
+#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
+
+/****************************************************************************************
+ * experimental API (static linking only)
+ ****************************************************************************************
+ * The following symbols and constants
+ * are not planned to join "stable API" status in the near future.
+ * They can still change in future versions.
+ * Some of them are planned to remain in the static_only section indefinitely.
+ * Some of them might be removed in the future (especially when redundant with existing stable functions)
+ * ***************************************************************************************/
+
+#define ZSTD_FRAMEHEADERSIZE_PREFIX 5 /* minimum input size required to query frame header size */
+#define ZSTD_FRAMEHEADERSIZE_MIN 6
+#define ZSTD_FRAMEHEADERSIZE_MAX 18 /* can be useful for static allocation */
+#define ZSTD_SKIPPABLEHEADERSIZE 8
+
+/* compression parameter bounds */
+#define ZSTD_WINDOWLOG_MAX_32 30
+#define ZSTD_WINDOWLOG_MAX_64 31
+#define ZSTD_WINDOWLOG_MAX ((int)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64))
+#define ZSTD_WINDOWLOG_MIN 10
+#define ZSTD_HASHLOG_MAX ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30)
+#define ZSTD_HASHLOG_MIN 6
+#define ZSTD_CHAINLOG_MAX_32 29
+#define ZSTD_CHAINLOG_MAX_64 30
+#define ZSTD_CHAINLOG_MAX ((int)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64))
+#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN
+#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1)
+#define ZSTD_SEARCHLOG_MIN 1
+#define ZSTD_MINMATCH_MAX 7 /* only for ZSTD_fast, other strategies are limited to 6 */
+#define ZSTD_MINMATCH_MIN 3 /* only for ZSTD_btopt+, faster strategies are limited to 4 */
+#define ZSTD_TARGETLENGTH_MAX ZSTD_BLOCKSIZE_MAX
+#define ZSTD_TARGETLENGTH_MIN 0 /* note : comparing this constant to an unsigned results in a tautological test */
+#define ZSTD_STRATEGY_MIN ZSTD_fast
+#define ZSTD_STRATEGY_MAX ZSTD_btultra2
+
+
+#define ZSTD_OVERLAPLOG_MIN 0
+#define ZSTD_OVERLAPLOG_MAX 9
+
+#define ZSTD_WINDOWLOG_LIMIT_DEFAULT 27 /* by default, the streaming decoder will refuse any frame
+ * requiring larger than (1<<ZSTD_WINDOWLOG_LIMIT_DEFAULT) window size,
+ * to preserve host's memory from unreasonable requirements.
+ * This limit can be overridden using ZSTD_DCtx_setParameter(,ZSTD_d_windowLogMax,).
+ * The limit does not apply for one-pass decoders (such as ZSTD_decompress()), since no additional memory is allocated */
+
+
+/* LDM parameter bounds */
+#define ZSTD_LDM_HASHLOG_MIN ZSTD_HASHLOG_MIN
+#define ZSTD_LDM_HASHLOG_MAX ZSTD_HASHLOG_MAX
+#define ZSTD_LDM_MINMATCH_MIN 4
+#define ZSTD_LDM_MINMATCH_MAX 4096
+#define ZSTD_LDM_BUCKETSIZELOG_MIN 1
+#define ZSTD_LDM_BUCKETSIZELOG_MAX 8
+#define ZSTD_LDM_HASHRATELOG_MIN 0
+#define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
+
+/* internal */
+#define ZSTD_HASHLOG3_MAX 17
+
+
+/* --- Advanced types --- */
+
+typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params;
+
+typedef struct {
+ unsigned windowLog; /**< largest match distance : larger == more compression, more memory needed during decompression */
+ unsigned chainLog; /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */
+ unsigned hashLog; /**< dispatch table : larger == faster, more memory */
+ unsigned searchLog; /**< nb of searches : larger == more compression, slower */
+ unsigned minMatch; /**< match length searched : larger == faster decompression, sometimes less compression */
+ unsigned targetLength; /**< acceptable match size for optimal parser (only) : larger == more compression, slower */
+ ZSTD_strategy strategy; /**< see ZSTD_strategy definition above */
+} ZSTD_compressionParameters;
+
+typedef struct {
+ int contentSizeFlag; /**< 1: content size will be in frame header (when known) */
+ int checksumFlag; /**< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection */
+ int noDictIDFlag; /**< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression) */
+} ZSTD_frameParameters;
+
+typedef struct {
+ ZSTD_compressionParameters cParams;
+ ZSTD_frameParameters fParams;
+} ZSTD_parameters;
+
+typedef enum {
+ ZSTD_dct_auto = 0, /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */
+ ZSTD_dct_rawContent = 1, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */
+ ZSTD_dct_fullDict = 2 /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */
+} ZSTD_dictContentType_e;
+
+typedef enum {
+ ZSTD_dlm_byCopy = 0, /**< Copy dictionary content internally */
+ ZSTD_dlm_byRef = 1, /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
+} ZSTD_dictLoadMethod_e;
+
+typedef enum {
+ /* Opened question : should we have a format ZSTD_f_auto ?
+ * Today, it would mean exactly the same as ZSTD_f_zstd1.
+ * But, in the future, should several formats become supported,
+ * on the compression side, it would mean "default format".
+ * On the decompression side, it would mean "automatic format detection",
+ * so that ZSTD_f_zstd1 would mean "accept *only* zstd frames".
+ * Since meaning is a little different, another option could be to define different enums for compression and decompression.
+ * This question could be kept for later, when there are actually multiple formats to support,
+ * but there is also the question of pinning enum values, and pinning value `0` is especially important */
+ ZSTD_f_zstd1 = 0, /* zstd frame format, specified in zstd_compression_format.md (default) */
+ ZSTD_f_zstd1_magicless = 1, /* Variant of zstd frame format, without initial 4-bytes magic number.
+ * Useful to save 4 bytes per generated frame.
+ * Decoder cannot recognise automatically this format, requiring this instruction. */
+} ZSTD_format_e;
+
+typedef enum {
+ /* Note: this enum and the behavior it controls are effectively internal
+ * implementation details of the compressor. They are expected to continue
+ * to evolve and should be considered only in the context of extremely
+ * advanced performance tuning.
+ *
+ * Zstd currently supports the use of a CDict in two ways:
+ *
+ * - The contents of the CDict can be copied into the working context. This
+ * means that the compression can search both the dictionary and input
+ * while operating on a single set of internal tables. This makes
+ * the compression faster per-byte of input. However, the initial copy of
+ * the CDict's tables incurs a fixed cost at the beginning of the
+ * compression. For small compressions (< 8 KB), that copy can dominate
+ * the cost of the compression.
+ *
+ * - The CDict's tables can be used in-place. In this model, compression is
+ * slower per input byte, because the compressor has to search two sets of
+ * tables. However, this model incurs no start-up cost (as long as the
+ * working context's tables can be reused). For small inputs, this can be
+ * faster than copying the CDict's tables.
+ *
+ * Zstd has a simple internal heuristic that selects which strategy to use
+ * at the beginning of a compression. However, if experimentation shows that
+ * Zstd is making poor choices, it is possible to override that choice with
+ * this enum.
+ */
+ ZSTD_dictDefaultAttach = 0, /* Use the default heuristic. */
+ ZSTD_dictForceAttach = 1, /* Never copy the dictionary. */
+ ZSTD_dictForceCopy = 2, /* Always copy the dictionary. */
+} ZSTD_dictAttachPref_e;
+
+typedef enum {
+ ZSTD_lcm_auto = 0, /**< Automatically determine the compression mode based on the compression level.
+ * Negative compression levels will be uncompressed, and positive compression
+ * levels will be compressed. */
+ ZSTD_lcm_huffman = 1, /**< Always attempt Huffman compression. Uncompressed literals will still be
+ * emitted if Huffman compression is not profitable. */
+ ZSTD_lcm_uncompressed = 2, /**< Always emit uncompressed literals. */
+} ZSTD_literalCompressionMode_e;
+
+
+/***************************************
+* Frame size functions
+***************************************/
+
+/*! ZSTD_findDecompressedSize() :
+ * `src` should point to the start of a series of ZSTD encoded and/or skippable frames
+ * `srcSize` must be the _exact_ size of this series
+ * (i.e. there should be a frame boundary at `src + srcSize`)
+ * @return : - decompressed size of all data in all successive frames
+ * - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
+ * - if an error occurred: ZSTD_CONTENTSIZE_ERROR
+ *
+ * note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
+ * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 2 : decompressed size is always present when compression is done with ZSTD_compress()
+ * note 3 : decompressed size can be very large (64-bits value),
+ * potentially larger than what local system can handle as a single memory segment.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+ * Always ensure result fits within application's authorized limits.
+ * Each application can set its own limits.
+ * note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
+ * read each contained frame header. This is fast as most of the data is skipped,
+ * however it does mean that all frame data must be present and valid. */
+ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
+
+/** ZSTD_decompressBound() :
+ * `src` should point to the start of a series of ZSTD encoded and/or skippable frames
+ * `srcSize` must be the _exact_ size of this series
+ * (i.e. there should be a frame boundary at `src + srcSize`)
+ * @return : - upper-bound for the decompressed size of all data in all successive frames
+ * - if an error occured: ZSTD_CONTENTSIZE_ERROR
+ *
+ * note 1 : an error can occur if `src` contains an invalid or incorrectly formatted frame.
+ * note 2 : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`.
+ * in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value.
+ * note 3 : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:
+ * upper-bound = # blocks * min(128 KB, Window_Size)
+ */
+ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
+
+/*! ZSTD_frameHeaderSize() :
+ * srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.
+ * @return : size of the Frame Header,
+ * or an error code (if srcSize is too small) */
+ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
+
+
+/***************************************
+* Memory management
+***************************************/
+
+/*! ZSTD_estimate*() :
+ * These functions make it possible to estimate memory usage
+ * of a future {D,C}Ctx, before its creation.
+ * ZSTD_estimateCCtxSize() will provide a budget large enough for any compression level up to selected one.
+ * It will also consider src size to be arbitrarily "large", which is worst case.
+ * If srcSize is known to always be small, ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
+ * ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
+ * ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
+ * Note : CCtx size estimation is only correct for single-threaded compression. */
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
+ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
+
+/*! ZSTD_estimateCStreamSize() :
+ * ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
+ * It will also consider src size to be arbitrarily "large", which is worst case.
+ * If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
+ * ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
+ * ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
+ * Note : CStream size estimation is only correct for single-threaded compression.
+ * ZSTD_DStream memory budget depends on window Size.
+ * This information can be passed manually, using ZSTD_estimateDStreamSize,
+ * or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
+ * Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
+ * an internal ?Dict will be created, which additional size is not estimated here.
+ * In this case, get total size by adding ZSTD_estimate?DictSize */
+ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
+ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
+ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
+ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
+ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
+
+/*! ZSTD_estimate?DictSize() :
+ * ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
+ * ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().
+ * Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.
+ */
+ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
+ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
+
+/*! ZSTD_initStatic*() :
+ * Initialize an object using a pre-allocated fixed-size buffer.
+ * workspace: The memory area to emplace the object into.
+ * Provided pointer *must be 8-bytes aligned*.
+ * Buffer must outlive object.
+ * workspaceSize: Use ZSTD_estimate*Size() to determine
+ * how large workspace must be to support target scenario.
+ * @return : pointer to object (same address as workspace, just different type),
+ * or NULL if error (size too small, incorrect alignment, etc.)
+ * Note : zstd will never resize nor malloc() when using a static buffer.
+ * If the object requires more memory than available,
+ * zstd will just error out (typically ZSTD_error_memory_allocation).
+ * Note 2 : there is no corresponding "free" function.
+ * Since workspace is allocated externally, it must be freed externally too.
+ * Note 3 : cParams : use ZSTD_getCParams() to convert a compression level
+ * into its associated cParams.
+ * Limitation 1 : currently not compatible with internal dictionary creation, triggered by
+ * ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict().
+ * Limitation 2 : static cctx currently not compatible with multi-threading.
+ * Limitation 3 : static dctx is incompatible with legacy support.
+ */
+ZSTDLIB_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
+ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticCCtx() */
+
+ZSTDLIB_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
+ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticDCtx() */
+
+ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict(
+ void* workspace, size_t workspaceSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams);
+
+ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict(
+ void* workspace, size_t workspaceSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType);
+
+
+/*! Custom memory allocation :
+ * These prototypes make it possible to pass your own allocation/free functions.
+ * ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.
+ * All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.
+ */
+typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
+typedef void (*ZSTD_freeFunction) (void* opaque, void* address);
+typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
+static ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */
+
+ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
+ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
+ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
+ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
+
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams,
+ ZSTD_customMem customMem);
+
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_customMem customMem);
+
+
+
+/***************************************
+* Advanced compression functions
+***************************************/
+
+/*! ZSTD_createCDict_byReference() :
+ * Create a digested dictionary for compression
+ * Dictionary content is just referenced, not duplicated.
+ * As a consequence, `dictBuffer` **must** outlive CDict,
+ * and its content must remain unmodified throughout the lifetime of CDict. */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
+
+/*! ZSTD_getCParams() :
+ * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
+ * `estimatedSrcSize` value is optional, select 0 if not known */
+ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
+
+/*! ZSTD_getParams() :
+ * same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
+ * All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
+ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
+
+/*! ZSTD_checkCParams() :
+ * Ensure param values remain within authorized range.
+ * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */
+ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
+
+/*! ZSTD_adjustCParams() :
+ * optimize params for a given `srcSize` and `dictSize`.
+ * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN.
+ * `dictSize` must be `0` when there is no dictionary.
+ * cPar can be invalid : all parameters will be clamped within valid range in the @return struct.
+ * This function never fails (wide contract) */
+ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
+
+/*! ZSTD_compress_advanced() :
+ * Same as ZSTD_compress_usingDict(), with fine-tune control over compression parameters (by structure) */
+ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ ZSTD_parameters params);
+
+/*! ZSTD_compress_usingCDict_advanced() :
+ * Same as ZSTD_compress_usingCDict(), with fine-tune control over frame parameters */
+ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict,
+ ZSTD_frameParameters fParams);
+
+
+/*! ZSTD_CCtx_loadDictionary_byReference() :
+ * Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx.
+ * It saves some memory, but also requires that `dict` outlives its usage within `cctx` */
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_CCtx_loadDictionary_advanced() :
+ * Same as ZSTD_CCtx_loadDictionary(), but gives finer control over
+ * how to load the dictionary (by copy ? by reference ?)
+ * and how to interpret it (automatic ? force raw mode ? full mode only ?) */
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
+
+/*! ZSTD_CCtx_refPrefix_advanced() :
+ * Same as ZSTD_CCtx_refPrefix(), but gives finer control over
+ * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
+ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
+
+/* === experimental parameters === */
+/* these parameters can be used with ZSTD_setParameter()
+ * they are not guaranteed to remain supported in the future */
+
+ /* Enables rsyncable mode,
+ * which makes compressed files more rsync friendly
+ * by adding periodic synchronization points to the compressed data.
+ * The target average block size is ZSTD_c_jobSize / 2.
+ * It's possible to modify the job size to increase or decrease
+ * the granularity of the synchronization point.
+ * Once the jobSize is smaller than the window size,
+ * it will result in compression ratio degradation.
+ * NOTE 1: rsyncable mode only works when multithreading is enabled.
+ * NOTE 2: rsyncable performs poorly in combination with long range mode,
+ * since it will decrease the effectiveness of synchronization points,
+ * though mileage may vary.
+ * NOTE 3: Rsyncable mode limits maximum compression speed to ~400 MB/s.
+ * If the selected compression level is already running significantly slower,
+ * the overall speed won't be significantly impacted.
+ */
+ #define ZSTD_c_rsyncable ZSTD_c_experimentalParam1
+
+/* Select a compression format.
+ * The value must be of type ZSTD_format_e.
+ * See ZSTD_format_e enum definition for details */
+#define ZSTD_c_format ZSTD_c_experimentalParam2
+
+/* Force back-reference distances to remain < windowSize,
+ * even when referencing into Dictionary content (default:0) */
+#define ZSTD_c_forceMaxWindow ZSTD_c_experimentalParam3
+
+/* Controls whether the contents of a CDict
+ * are used in place, or copied into the working context.
+ * Accepts values from the ZSTD_dictAttachPref_e enum.
+ * See the comments on that enum for an explanation of the feature. */
+#define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4
+
+/* Controls how the literals are compressed (default is auto).
+ * The value must be of type ZSTD_literalCompressionMode_e.
+ * See ZSTD_literalCompressionMode_t enum definition for details.
+ */
+#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
+
+/*! ZSTD_CCtx_getParameter() :
+ * Get the requested compression parameter value, selected by enum ZSTD_cParameter,
+ * and store it into int* value.
+ * @return : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value);
+
+
+/*! ZSTD_CCtx_params :
+ * Quick howto :
+ * - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure
+ * - ZSTD_CCtxParams_setParameter() : Push parameters one by one into
+ * an existing ZSTD_CCtx_params structure.
+ * This is similar to
+ * ZSTD_CCtx_setParameter().
+ * - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to
+ * an existing CCtx.
+ * These parameters will be applied to
+ * all subsequent frames.
+ * - ZSTD_compressStream2() : Do compression using the CCtx.
+ * - ZSTD_freeCCtxParams() : Free the memory.
+ *
+ * This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()
+ * for static allocation of CCtx for single-threaded compression.
+ */
+ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
+ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);
+
+/*! ZSTD_CCtxParams_reset() :
+ * Reset params to default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
+
+/*! ZSTD_CCtxParams_init() :
+ * Initializes the compression parameters of cctxParams according to
+ * compression level. All other parameters are reset to their default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
+
+/*! ZSTD_CCtxParams_init_advanced() :
+ * Initializes the compression and frame parameters of cctxParams according to
+ * params. All other parameters are reset to their default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
+
+/*! ZSTD_CCtxParams_setParameter() :
+ * Similar to ZSTD_CCtx_setParameter.
+ * Set one compression parameter, selected by enum ZSTD_cParameter.
+ * Parameters must be applied to a ZSTD_CCtx using ZSTD_CCtx_setParametersUsingCCtxParams().
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
+
+/*! ZSTD_CCtxParams_getParameter() :
+ * Similar to ZSTD_CCtx_getParameter.
+ * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
+
+/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
+ * Apply a set of ZSTD_CCtx_params to the compression context.
+ * This can be done even after compression is started,
+ * if nbWorkers==0, this will have no impact until a new compression is started.
+ * if nbWorkers>=1, new parameters will be picked up at next job,
+ * with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
+ ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
+
+/*! ZSTD_compressStream2_simpleArgs() :
+ * Same as ZSTD_compressStream2(),
+ * but using only integral types as arguments.
+ * This variant might be helpful for binders from dynamic languages
+ * which have troubles handling structures containing memory pointers.
+ */
+ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs (
+ ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos,
+ ZSTD_EndDirective endOp);
+
+
+/***************************************
+* Advanced decompression functions
+***************************************/
+
+/*! ZSTD_isFrame() :
+ * Tells if the content of `buffer` starts with a valid Frame Identifier.
+ * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
+ * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
+ * Note 3 : Skippable Frame Identifiers are considered valid. */
+ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
+
+/*! ZSTD_createDDict_byReference() :
+ * Create a digested dictionary, ready to start decompression operation without startup delay.
+ * Dictionary content is referenced, and therefore stays in dictBuffer.
+ * It is important that dictBuffer outlives DDict,
+ * it must remain read accessible throughout the lifetime of DDict */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
+
+/*! ZSTD_DCtx_loadDictionary_byReference() :
+ * Same as ZSTD_DCtx_loadDictionary(),
+ * but references `dict` content instead of copying it into `dctx`.
+ * This saves memory if `dict` remains around.,
+ * However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_DCtx_loadDictionary_advanced() :
+ * Same as ZSTD_DCtx_loadDictionary(),
+ * but gives direct control over
+ * how to load the dictionary (by copy ? by reference ?)
+ * and how to interpret it (automatic ? force raw mode ? full mode only ?). */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
+
+/*! ZSTD_DCtx_refPrefix_advanced() :
+ * Same as ZSTD_DCtx_refPrefix(), but gives finer control over
+ * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
+ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
+
+/*! ZSTD_DCtx_setMaxWindowSize() :
+ * Refuses allocating internal buffers for frames requiring a window size larger than provided limit.
+ * This protects a decoder context from reserving too much memory for itself (potential attack scenario).
+ * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
+ * By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT)
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
+
+/* ZSTD_d_format
+ * experimental parameter,
+ * allowing selection between ZSTD_format_e input compression formats
+ */
+#define ZSTD_d_format ZSTD_d_experimentalParam1
+
+/*! ZSTD_DCtx_setFormat() :
+ * Instruct the decoder context about what kind of data to decode next.
+ * This instruction is mandatory to decode data without a fully-formed header,
+ * such ZSTD_f_zstd1_magicless for example.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
+
+/*! ZSTD_decompressStream_simpleArgs() :
+ * Same as ZSTD_decompressStream(),
+ * but using only integral types as arguments.
+ * This can be helpful for binders from dynamic languages
+ * which have troubles handling structures containing memory pointers.
+ */
+ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs (
+ ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos);
+
+
+/********************************************************************
+* Advanced streaming functions
+* Warning : most of these functions are now redundant with the Advanced API.
+* Once Advanced API reaches "stable" status,
+* redundant functions will be deprecated, and then at some point removed.
+********************************************************************/
+
+/*===== Advanced Streaming compression functions =====*/
+/**! ZSTD_initCStream_srcSize() :
+ * This function is deprecated, and equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
+ * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ *
+ * pledgedSrcSize must be correct. If it is not known at init time, use
+ * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,
+ * "0" also disables frame content size field. It may be enabled in the future.
+ */
+ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize);
+/**! ZSTD_initCStream_usingDict() :
+ * This function is deprecated, and is equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
+ *
+ * Creates of an internal CDict (incompatible with static CCtx), except if
+ * dict == NULL or dictSize < 8, in which case no dict is used.
+ * Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if
+ * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
+ */
+ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel);
+/**! ZSTD_initCStream_advanced() :
+ * This function is deprecated, and is approximately equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_setZstdParams(zcs, params); // Set the zstd params and leave the rest as-is
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
+ *
+ * pledgedSrcSize must be correct. If srcSize is not known at init time, use
+ * value ZSTD_CONTENTSIZE_UNKNOWN. dict is loaded with ZSTD_dm_auto and ZSTD_dlm_byCopy.
+ */
+ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
+ ZSTD_parameters params, unsigned long long pledgedSrcSize);
+/**! ZSTD_initCStream_usingCDict() :
+ * This function is deprecated, and equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_refCDict(zcs, cdict);
+ *
+ * note : cdict will just be referenced, and must outlive compression session
+ */
+ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
+/**! ZSTD_initCStream_usingCDict_advanced() :
+ * This function is deprecated, and is approximately equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_setZstdFrameParams(zcs, fParams); // Set the zstd frame params and leave the rest as-is
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ * ZSTD_CCtx_refCDict(zcs, cdict);
+ *
+ * same as ZSTD_initCStream_usingCDict(), with control over frame parameters.
+ * pledgedSrcSize must be correct. If srcSize is not known at init time, use
+ * value ZSTD_CONTENTSIZE_UNKNOWN.
+ */
+ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize);
+
+/*! ZSTD_resetCStream() :
+ * This function is deprecated, and is equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ *
+ * start a new frame, using same parameters from previous frame.
+ * This is typically useful to skip dictionary loading stage, since it will re-use it in-place.
+ * Note that zcs must be init at least once before using ZSTD_resetCStream().
+ * If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.
+ * If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
+ * For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
+ * but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError())
+ */
+ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
+
+
+typedef struct {
+ unsigned long long ingested; /* nb input bytes read and buffered */
+ unsigned long long consumed; /* nb input bytes actually compressed */
+ unsigned long long produced; /* nb of compressed bytes generated and buffered */
+ unsigned long long flushed; /* nb of compressed bytes flushed : not provided; can be tracked from caller side */
+ unsigned currentJobID; /* MT only : latest started job nb */
+ unsigned nbActiveWorkers; /* MT only : nb of workers actively compressing at probe time */
+} ZSTD_frameProgression;
+
+/* ZSTD_getFrameProgression() :
+ * tells how much data has been ingested (read from input)
+ * consumed (input actually compressed) and produced (output) for current frame.
+ * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed.
+ * Aggregates progression inside active worker threads.
+ */
+ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
+
+/*! ZSTD_toFlushNow() :
+ * Tell how many bytes are ready to be flushed immediately.
+ * Useful for multithreading scenarios (nbWorkers >= 1).
+ * Probe the oldest active job, defined as oldest job not yet entirely flushed,
+ * and check its output buffer.
+ * @return : amount of data stored in oldest job and ready to be flushed immediately.
+ * if @return == 0, it means either :
+ * + there is no active job (could be checked with ZSTD_frameProgression()), or
+ * + oldest job is still actively compressing data,
+ * but everything it has produced has also been flushed so far,
+ * therefore flush speed is limited by production speed of oldest job
+ * irrespective of the speed of concurrent (and newer) jobs.
+ */
+ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
+
+
+/*===== Advanced Streaming decompression functions =====*/
+/**
+ * This function is deprecated, and is equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ * ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
+ *
+ * note: no dictionary will be used if dict == NULL or dictSize < 8
+ */
+ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
+/**
+ * This function is deprecated, and is equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ * ZSTD_DCtx_refDDict(zds, ddict);
+ *
+ * note : ddict is referenced, it must outlive decompression session
+ */
+ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
+/**
+ * This function is deprecated, and is equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ *
+ * re-use decompression parameters from previous init; saves dictionary loading
+ */
+ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
+
+
+/*********************************************************************
+* Buffer-less and synchronous inner streaming functions
+*
+* This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
+* But it's also a complex one, with several restrictions, documented below.
+* Prefer normal streaming API for an easier experience.
+********************************************************************* */
+
+/**
+ Buffer-less streaming compression (synchronous mode)
+
+ A ZSTD_CCtx object is required to track streaming operations.
+ Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
+ ZSTD_CCtx object can be re-used multiple times within successive compression operations.
+
+ Start by initializing a context.
+ Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression,
+ or ZSTD_compressBegin_advanced(), for finer parameter control.
+ It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
+
+ Then, consume your input using ZSTD_compressContinue().
+ There are some important considerations to keep in mind when using this advanced function :
+ - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.
+ - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.
+ - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.
+ Worst case evaluation is provided by ZSTD_compressBound().
+ ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
+ - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).
+ It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)
+ - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.
+ In which case, it will "discard" the relevant memory section from its history.
+
+ Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
+ It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
+ Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
+
+ `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
+*/
+
+/*===== Buffer-less streaming compression functions =====*/
+ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
+ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
+ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
+ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
+
+ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+/*-
+ Buffer-less streaming decompression (synchronous mode)
+
+ A ZSTD_DCtx object is required to track streaming operations.
+ Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
+ A ZSTD_DCtx object can be re-used multiple times.
+
+ First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
+ Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
+ Data fragment must be large enough to ensure successful decoding.
+ `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
+ @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
+ >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
+ errorCode, which can be tested using ZSTD_isError().
+
+ It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
+ such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
+ Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
+ As a consequence, check that values remain within valid application range.
+ For example, do not allocate memory blindly, check that `windowSize` is within expectation.
+ Each application can set its own limits, depending on local restrictions.
+ For extended interoperability, it is recommended to support `windowSize` of at least 8 MB.
+
+ ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.
+ ZSTD_decompressContinue() is very sensitive to contiguity,
+ if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
+ or that previous contiguous segment is large enough to properly handle maximum back-reference distance.
+ There are multiple ways to guarantee this condition.
+
+ The most memory efficient way is to use a round buffer of sufficient size.
+ Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
+ which can @return an error code if required value is too large for current system (in 32-bits mode).
+ In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
+ up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
+ which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
+ At which point, decoding can resume from the beginning of the buffer.
+ Note that already decoded data stored in the buffer should be flushed before being overwritten.
+
+ There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.
+
+ Finally, if you control the compression process, you can also ignore all buffer size rules,
+ as long as the encoder and decoder progress in "lock-step",
+ aka use exactly the same buffer sizes, break contiguity at the same place, etc.
+
+ Once buffers are setup, start decompression, with ZSTD_decompressBegin().
+ If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
+
+ Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
+ ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+ ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
+
+ @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
+ It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
+ It can also be an error code, which can be tested with ZSTD_isError().
+
+ A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
+ Context can then be reset to start a new decompression.
+
+ Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().
+ This information is not required to properly decode a frame.
+
+ == Special case : skippable frames ==
+
+ Skippable frames allow integration of user-defined data into a flow of concatenated frames.
+ Skippable frames will be ignored (skipped) by decompressor.
+ The format of skippable frames is as follows :
+ a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
+ b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
+ c) Frame Content - any content (User Data) of length equal to Frame Size
+ For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.
+ For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
+*/
+
+/*===== Buffer-less streaming decompression functions =====*/
+typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
+typedef struct {
+ unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
+ unsigned long long windowSize; /* can be very large, up to <= frameContentSize */
+ unsigned blockSizeMax;
+ ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
+ unsigned headerSize;
+ unsigned dictID;
+ unsigned checksumFlag;
+} ZSTD_frameHeader;
+
+/** ZSTD_getFrameHeader() :
+ * decode Frame Header, or requires larger `srcSize`.
+ * @return : 0, `zfhPtr` is correctly filled,
+ * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * or an error code, which can be tested using ZSTD_isError() */
+ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */
+/*! ZSTD_getFrameHeader_advanced() :
+ * same as ZSTD_getFrameHeader(),
+ * with added capability to select a format (like ZSTD_f_zstd1_magicless) */
+ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
+ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
+
+ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+
+ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+/* misc */
+ZSTDLIB_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
+typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
+ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
+
+
+
+
+/* ============================ */
+/** Block level API */
+/* ============================ */
+
+/*!
+ Block functions produce and decode raw zstd blocks, without frame metadata.
+ Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).
+ User will have to take in charge required information to regenerate data, such as compressed and content sizes.
+
+ A few rules to respect :
+ - Compressing and decompressing require a context structure
+ + Use ZSTD_createCCtx() and ZSTD_createDCtx()
+ - It is necessary to init context before starting
+ + compression : any ZSTD_compressBegin*() variant, including with dictionary
+ + decompression : any ZSTD_decompressBegin*() variant, including with dictionary
+ + copyCCtx() and copyDCtx() can be used too
+ - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
+ + If input is larger than a block size, it's necessary to split input data into multiple blocks
+ + For inputs larger than a single block, really consider using regular ZSTD_compress() instead.
+ Frame metadata is not that costly, and quickly becomes negligible as source size grows larger.
+ - When a block is considered not compressible enough, ZSTD_compressBlock() result will be zero.
+ In which case, nothing is produced into `dst` !
+ + User must test for such outcome and deal directly with uncompressed data
+ + ZSTD_decompressBlock() doesn't accept uncompressed data as input !!!
+ + In case of multiple successive blocks, should some of them be uncompressed,
+ decoder must be informed of their existence in order to follow proper history.
+ Use ZSTD_insertBlock() for such a case.
+*/
+
+/*===== Raw zstd block functions =====*/
+ZSTDLIB_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx);
+ZSTDLIB_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
+
+
+#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/vendor/github.com/DataDog/zstd/zstd_common.c b/vendor/github.com/DataDog/zstd/zstd_common.c
new file mode 100644
index 000000000..667f4a27f
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_common.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include <stdlib.h> /* malloc, calloc, free */
+#include <string.h> /* memset */
+#include "error_private.h"
+#include "zstd_internal.h"
+
+
+/*-****************************************
+* Version
+******************************************/
+unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; }
+
+const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
+
+
+/*-****************************************
+* ZSTD Error Management
+******************************************/
+#undef ZSTD_isError /* defined within zstd_internal.h */
+/*! ZSTD_isError() :
+ * tells if a return value is an error code
+ * symbol is required for external callers */
+unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
+
+/*! ZSTD_getErrorName() :
+ * provides error code string from function result (useful for debugging) */
+const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+/*! ZSTD_getError() :
+ * convert a `size_t` function result into a proper ZSTD_errorCode enum */
+ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
+
+/*! ZSTD_getErrorString() :
+ * provides error code string from enum */
+const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
+
+
+
+/*=**************************************************************
+* Custom allocator
+****************************************************************/
+void* ZSTD_malloc(size_t size, ZSTD_customMem customMem)
+{
+ if (customMem.customAlloc)
+ return customMem.customAlloc(customMem.opaque, size);
+ return malloc(size);
+}
+
+void* ZSTD_calloc(size_t size, ZSTD_customMem customMem)
+{
+ if (customMem.customAlloc) {
+ /* calloc implemented as malloc+memset;
+ * not as efficient as calloc, but next best guess for custom malloc */
+ void* const ptr = customMem.customAlloc(customMem.opaque, size);
+ memset(ptr, 0, size);
+ return ptr;
+ }
+ return calloc(1, size);
+}
+
+void ZSTD_free(void* ptr, ZSTD_customMem customMem)
+{
+ if (ptr!=NULL) {
+ if (customMem.customFree)
+ customMem.customFree(customMem.opaque, ptr);
+ else
+ free(ptr);
+ }
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_compress.c b/vendor/github.com/DataDog/zstd/zstd_compress.c
new file mode 100644
index 000000000..2e163c8bf
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_compress.c
@@ -0,0 +1,4417 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include <limits.h> /* INT_MAX */
+#include <string.h> /* memset */
+#include "cpu.h"
+#include "mem.h"
+#include "hist.h" /* HIST_countFast_wksp */
+#define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#include "zstd_compress_internal.h"
+#include "zstd_fast.h"
+#include "zstd_double_fast.h"
+#include "zstd_lazy.h"
+#include "zstd_opt.h"
+#include "zstd_ldm.h"
+
+
+/*-*************************************
+* Helper functions
+***************************************/
+size_t ZSTD_compressBound(size_t srcSize) {
+ return ZSTD_COMPRESSBOUND(srcSize);
+}
+
+
+/*-*************************************
+* Context memory management
+***************************************/
+struct ZSTD_CDict_s {
+ void* dictBuffer;
+ const void* dictContent;
+ size_t dictContentSize;
+ void* workspace;
+ size_t workspaceSize;
+ ZSTD_matchState_t matchState;
+ ZSTD_compressedBlockState_t cBlockState;
+ ZSTD_customMem customMem;
+ U32 dictID;
+}; /* typedef'd to ZSTD_CDict within "zstd.h" */
+
+ZSTD_CCtx* ZSTD_createCCtx(void)
+{
+ return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
+}
+
+static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
+{
+ assert(cctx != NULL);
+ memset(cctx, 0, sizeof(*cctx));
+ cctx->customMem = memManager;
+ cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+ { size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
+ assert(!ZSTD_isError(err));
+ (void)err;
+ }
+}
+
+ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
+{
+ ZSTD_STATIC_ASSERT(zcss_init==0);
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
+ if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+ { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
+ if (!cctx) return NULL;
+ ZSTD_initCCtx(cctx, customMem);
+ return cctx;
+ }
+}
+
+ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize)
+{
+ ZSTD_CCtx* const cctx = (ZSTD_CCtx*) workspace;
+ if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */
+ if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */
+ memset(workspace, 0, workspaceSize); /* may be a bit generous, could memset be smaller ? */
+ cctx->staticSize = workspaceSize;
+ cctx->workSpace = (void*)(cctx+1);
+ cctx->workSpaceSize = workspaceSize - sizeof(ZSTD_CCtx);
+
+ /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
+ if (cctx->workSpaceSize < HUF_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t)) return NULL;
+ assert(((size_t)cctx->workSpace & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
+ cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)cctx->workSpace;
+ cctx->blockState.nextCBlock = cctx->blockState.prevCBlock + 1;
+ {
+ void* const ptr = cctx->blockState.nextCBlock + 1;
+ cctx->entropyWorkspace = (U32*)ptr;
+ }
+ cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+ return cctx;
+}
+
+/**
+ * Clears and frees all of the dictionaries in the CCtx.
+ */
+static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
+{
+ ZSTD_free(cctx->localDict.dictBuffer, cctx->customMem);
+ ZSTD_freeCDict(cctx->localDict.cdict);
+ memset(&cctx->localDict, 0, sizeof(cctx->localDict));
+ memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
+ cctx->cdict = NULL;
+}
+
+static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)
+{
+ size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;
+ size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);
+ return bufferSize + cdictSize;
+}
+
+static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
+{
+ assert(cctx != NULL);
+ assert(cctx->staticSize == 0);
+ ZSTD_free(cctx->workSpace, cctx->customMem); cctx->workSpace = NULL;
+ ZSTD_clearAllDicts(cctx);
+#ifdef ZSTD_MULTITHREAD
+ ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
+#endif
+}
+
+size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
+{
+ if (cctx==NULL) return 0; /* support free on NULL */
+ RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
+ "not compatible with static CCtx");
+ ZSTD_freeCCtxContent(cctx);
+ ZSTD_free(cctx, cctx->customMem);
+ return 0;
+}
+
+
+static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
+{
+#ifdef ZSTD_MULTITHREAD
+ return ZSTDMT_sizeof_CCtx(cctx->mtctx);
+#else
+ (void)cctx;
+ return 0;
+#endif
+}
+
+
+size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
+{
+ if (cctx==NULL) return 0; /* support sizeof on NULL */
+ return sizeof(*cctx) + cctx->workSpaceSize
+ + ZSTD_sizeof_localDict(cctx->localDict)
+ + ZSTD_sizeof_mtctx(cctx);
+}
+
+size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
+{
+ return ZSTD_sizeof_CCtx(zcs); /* same object */
+}
+
+/* private API call, for dictBuilder only */
+const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
+
+static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
+ ZSTD_compressionParameters cParams)
+{
+ ZSTD_CCtx_params cctxParams;
+ memset(&cctxParams, 0, sizeof(cctxParams));
+ cctxParams.cParams = cParams;
+ cctxParams.compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */
+ assert(!ZSTD_checkCParams(cParams));
+ cctxParams.fParams.contentSizeFlag = 1;
+ return cctxParams;
+}
+
+static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
+ ZSTD_customMem customMem)
+{
+ ZSTD_CCtx_params* params;
+ if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+ params = (ZSTD_CCtx_params*)ZSTD_calloc(
+ sizeof(ZSTD_CCtx_params), customMem);
+ if (!params) { return NULL; }
+ params->customMem = customMem;
+ params->compressionLevel = ZSTD_CLEVEL_DEFAULT;
+ params->fParams.contentSizeFlag = 1;
+ return params;
+}
+
+ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
+{
+ return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
+}
+
+size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
+{
+ if (params == NULL) { return 0; }
+ ZSTD_free(params, params->customMem);
+ return 0;
+}
+
+size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
+{
+ return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
+}
+
+size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
+ RETURN_ERROR_IF(!cctxParams, GENERIC);
+ memset(cctxParams, 0, sizeof(*cctxParams));
+ cctxParams->compressionLevel = compressionLevel;
+ cctxParams->fParams.contentSizeFlag = 1;
+ return 0;
+}
+
+size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
+{
+ RETURN_ERROR_IF(!cctxParams, GENERIC);
+ FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
+ memset(cctxParams, 0, sizeof(*cctxParams));
+ cctxParams->cParams = params.cParams;
+ cctxParams->fParams = params.fParams;
+ cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */
+ assert(!ZSTD_checkCParams(params.cParams));
+ return 0;
+}
+
+/* ZSTD_assignParamsToCCtxParams() :
+ * params is presumed valid at this stage */
+static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams(
+ ZSTD_CCtx_params cctxParams, ZSTD_parameters params)
+{
+ ZSTD_CCtx_params ret = cctxParams;
+ ret.cParams = params.cParams;
+ ret.fParams = params.fParams;
+ ret.compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */
+ assert(!ZSTD_checkCParams(params.cParams));
+ return ret;
+}
+
+ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
+{
+ ZSTD_bounds bounds = { 0, 0, 0 };
+
+ switch(param)
+ {
+ case ZSTD_c_compressionLevel:
+ bounds.lowerBound = ZSTD_minCLevel();
+ bounds.upperBound = ZSTD_maxCLevel();
+ return bounds;
+
+ case ZSTD_c_windowLog:
+ bounds.lowerBound = ZSTD_WINDOWLOG_MIN;
+ bounds.upperBound = ZSTD_WINDOWLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_hashLog:
+ bounds.lowerBound = ZSTD_HASHLOG_MIN;
+ bounds.upperBound = ZSTD_HASHLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_chainLog:
+ bounds.lowerBound = ZSTD_CHAINLOG_MIN;
+ bounds.upperBound = ZSTD_CHAINLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_searchLog:
+ bounds.lowerBound = ZSTD_SEARCHLOG_MIN;
+ bounds.upperBound = ZSTD_SEARCHLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_minMatch:
+ bounds.lowerBound = ZSTD_MINMATCH_MIN;
+ bounds.upperBound = ZSTD_MINMATCH_MAX;
+ return bounds;
+
+ case ZSTD_c_targetLength:
+ bounds.lowerBound = ZSTD_TARGETLENGTH_MIN;
+ bounds.upperBound = ZSTD_TARGETLENGTH_MAX;
+ return bounds;
+
+ case ZSTD_c_strategy:
+ bounds.lowerBound = ZSTD_STRATEGY_MIN;
+ bounds.upperBound = ZSTD_STRATEGY_MAX;
+ return bounds;
+
+ case ZSTD_c_contentSizeFlag:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_checksumFlag:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_dictIDFlag:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_nbWorkers:
+ bounds.lowerBound = 0;
+#ifdef ZSTD_MULTITHREAD
+ bounds.upperBound = ZSTDMT_NBWORKERS_MAX;
+#else
+ bounds.upperBound = 0;
+#endif
+ return bounds;
+
+ case ZSTD_c_jobSize:
+ bounds.lowerBound = 0;
+#ifdef ZSTD_MULTITHREAD
+ bounds.upperBound = ZSTDMT_JOBSIZE_MAX;
+#else
+ bounds.upperBound = 0;
+#endif
+ return bounds;
+
+ case ZSTD_c_overlapLog:
+ bounds.lowerBound = ZSTD_OVERLAPLOG_MIN;
+ bounds.upperBound = ZSTD_OVERLAPLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_enableLongDistanceMatching:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_ldmHashLog:
+ bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN;
+ bounds.upperBound = ZSTD_LDM_HASHLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_ldmMinMatch:
+ bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN;
+ bounds.upperBound = ZSTD_LDM_MINMATCH_MAX;
+ return bounds;
+
+ case ZSTD_c_ldmBucketSizeLog:
+ bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN;
+ bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX;
+ return bounds;
+
+ case ZSTD_c_ldmHashRateLog:
+ bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN;
+ bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX;
+ return bounds;
+
+ /* experimental parameters */
+ case ZSTD_c_rsyncable:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_forceMaxWindow :
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_format:
+ ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
+ bounds.lowerBound = ZSTD_f_zstd1;
+ bounds.upperBound = ZSTD_f_zstd1_magicless; /* note : how to ensure at compile time that this is the highest value enum ? */
+ return bounds;
+
+ case ZSTD_c_forceAttachDict:
+ ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceCopy);
+ bounds.lowerBound = ZSTD_dictDefaultAttach;
+ bounds.upperBound = ZSTD_dictForceCopy; /* note : how to ensure at compile time that this is the highest value enum ? */
+ return bounds;
+
+ case ZSTD_c_literalCompressionMode:
+ ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed);
+ bounds.lowerBound = ZSTD_lcm_auto;
+ bounds.upperBound = ZSTD_lcm_uncompressed;
+ return bounds;
+
+ default:
+ { ZSTD_bounds const boundError = { ERROR(parameter_unsupported), 0, 0 };
+ return boundError;
+ }
+ }
+}
+
+/* ZSTD_cParam_withinBounds:
+ * @return 1 if value is within cParam bounds,
+ * 0 otherwise */
+static int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
+{
+ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
+ if (ZSTD_isError(bounds.error)) return 0;
+ if (value < bounds.lowerBound) return 0;
+ if (value > bounds.upperBound) return 0;
+ return 1;
+}
+
+/* ZSTD_cParam_clampBounds:
+ * Clamps the value into the bounded range.
+ */
+static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
+{
+ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
+ if (ZSTD_isError(bounds.error)) return bounds.error;
+ if (*value < bounds.lowerBound) *value = bounds.lowerBound;
+ if (*value > bounds.upperBound) *value = bounds.upperBound;
+ return 0;
+}
+
+#define BOUNDCHECK(cParam, val) { \
+ RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
+ parameter_outOfBound); \
+}
+
+
+static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
+{
+ switch(param)
+ {
+ case ZSTD_c_compressionLevel:
+ case ZSTD_c_hashLog:
+ case ZSTD_c_chainLog:
+ case ZSTD_c_searchLog:
+ case ZSTD_c_minMatch:
+ case ZSTD_c_targetLength:
+ case ZSTD_c_strategy:
+ return 1;
+
+ case ZSTD_c_format:
+ case ZSTD_c_windowLog:
+ case ZSTD_c_contentSizeFlag:
+ case ZSTD_c_checksumFlag:
+ case ZSTD_c_dictIDFlag:
+ case ZSTD_c_forceMaxWindow :
+ case ZSTD_c_nbWorkers:
+ case ZSTD_c_jobSize:
+ case ZSTD_c_overlapLog:
+ case ZSTD_c_rsyncable:
+ case ZSTD_c_enableLongDistanceMatching:
+ case ZSTD_c_ldmHashLog:
+ case ZSTD_c_ldmMinMatch:
+ case ZSTD_c_ldmBucketSizeLog:
+ case ZSTD_c_ldmHashRateLog:
+ case ZSTD_c_forceAttachDict:
+ case ZSTD_c_literalCompressionMode:
+ default:
+ return 0;
+ }
+}
+
+size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
+{
+ DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value);
+ if (cctx->streamStage != zcss_init) {
+ if (ZSTD_isUpdateAuthorized(param)) {
+ cctx->cParamsChanged = 1;
+ } else {
+ RETURN_ERROR(stage_wrong);
+ } }
+
+ switch(param)
+ {
+ case ZSTD_c_nbWorkers:
+ RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
+ "MT not compatible with static alloc");
+ break;
+
+ case ZSTD_c_compressionLevel:
+ case ZSTD_c_windowLog:
+ case ZSTD_c_hashLog:
+ case ZSTD_c_chainLog:
+ case ZSTD_c_searchLog:
+ case ZSTD_c_minMatch:
+ case ZSTD_c_targetLength:
+ case ZSTD_c_strategy:
+ case ZSTD_c_ldmHashRateLog:
+ case ZSTD_c_format:
+ case ZSTD_c_contentSizeFlag:
+ case ZSTD_c_checksumFlag:
+ case ZSTD_c_dictIDFlag:
+ case ZSTD_c_forceMaxWindow:
+ case ZSTD_c_forceAttachDict:
+ case ZSTD_c_literalCompressionMode:
+ case ZSTD_c_jobSize:
+ case ZSTD_c_overlapLog:
+ case ZSTD_c_rsyncable:
+ case ZSTD_c_enableLongDistanceMatching:
+ case ZSTD_c_ldmHashLog:
+ case ZSTD_c_ldmMinMatch:
+ case ZSTD_c_ldmBucketSizeLog:
+ break;
+
+ default: RETURN_ERROR(parameter_unsupported);
+ }
+ return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
+}
+
+size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
+ ZSTD_cParameter param, int value)
+{
+ DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
+ switch(param)
+ {
+ case ZSTD_c_format :
+ BOUNDCHECK(ZSTD_c_format, value);
+ CCtxParams->format = (ZSTD_format_e)value;
+ return (size_t)CCtxParams->format;
+
+ case ZSTD_c_compressionLevel : {
+ FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
+ if (value) { /* 0 : does not change current level */
+ CCtxParams->compressionLevel = value;
+ }
+ if (CCtxParams->compressionLevel >= 0) return CCtxParams->compressionLevel;
+ return 0; /* return type (size_t) cannot represent negative values */
+ }
+
+ case ZSTD_c_windowLog :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_windowLog, value);
+ CCtxParams->cParams.windowLog = value;
+ return CCtxParams->cParams.windowLog;
+
+ case ZSTD_c_hashLog :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_hashLog, value);
+ CCtxParams->cParams.hashLog = value;
+ return CCtxParams->cParams.hashLog;
+
+ case ZSTD_c_chainLog :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_chainLog, value);
+ CCtxParams->cParams.chainLog = value;
+ return CCtxParams->cParams.chainLog;
+
+ case ZSTD_c_searchLog :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_searchLog, value);
+ CCtxParams->cParams.searchLog = value;
+ return value;
+
+ case ZSTD_c_minMatch :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_minMatch, value);
+ CCtxParams->cParams.minMatch = value;
+ return CCtxParams->cParams.minMatch;
+
+ case ZSTD_c_targetLength :
+ BOUNDCHECK(ZSTD_c_targetLength, value);
+ CCtxParams->cParams.targetLength = value;
+ return CCtxParams->cParams.targetLength;
+
+ case ZSTD_c_strategy :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_strategy, value);
+ CCtxParams->cParams.strategy = (ZSTD_strategy)value;
+ return (size_t)CCtxParams->cParams.strategy;
+
+ case ZSTD_c_contentSizeFlag :
+ /* Content size written in frame header _when known_ (default:1) */
+ DEBUGLOG(4, "set content size flag = %u", (value!=0));
+ CCtxParams->fParams.contentSizeFlag = value != 0;
+ return CCtxParams->fParams.contentSizeFlag;
+
+ case ZSTD_c_checksumFlag :
+ /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
+ CCtxParams->fParams.checksumFlag = value != 0;
+ return CCtxParams->fParams.checksumFlag;
+
+ case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
+ DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
+ CCtxParams->fParams.noDictIDFlag = !value;
+ return !CCtxParams->fParams.noDictIDFlag;
+
+ case ZSTD_c_forceMaxWindow :
+ CCtxParams->forceWindow = (value != 0);
+ return CCtxParams->forceWindow;
+
+ case ZSTD_c_forceAttachDict : {
+ const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
+ BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
+ CCtxParams->attachDictPref = pref;
+ return CCtxParams->attachDictPref;
+ }
+
+ case ZSTD_c_literalCompressionMode : {
+ const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value;
+ BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
+ CCtxParams->literalCompressionMode = lcm;
+ return CCtxParams->literalCompressionMode;
+ }
+
+ case ZSTD_c_nbWorkers :
+#ifndef ZSTD_MULTITHREAD
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+ return 0;
+#else
+ FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
+ CCtxParams->nbWorkers = value;
+ return CCtxParams->nbWorkers;
+#endif
+
+ case ZSTD_c_jobSize :
+#ifndef ZSTD_MULTITHREAD
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+ return 0;
+#else
+ /* Adjust to the minimum non-default value. */
+ if (value != 0 && value < ZSTDMT_JOBSIZE_MIN)
+ value = ZSTDMT_JOBSIZE_MIN;
+ FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
+ assert(value >= 0);
+ CCtxParams->jobSize = value;
+ return CCtxParams->jobSize;
+#endif
+
+ case ZSTD_c_overlapLog :
+#ifndef ZSTD_MULTITHREAD
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+ return 0;
+#else
+ FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value));
+ CCtxParams->overlapLog = value;
+ return CCtxParams->overlapLog;
+#endif
+
+ case ZSTD_c_rsyncable :
+#ifndef ZSTD_MULTITHREAD
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+ return 0;
+#else
+ FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value));
+ CCtxParams->rsyncable = value;
+ return CCtxParams->rsyncable;
+#endif
+
+ case ZSTD_c_enableLongDistanceMatching :
+ CCtxParams->ldmParams.enableLdm = (value!=0);
+ return CCtxParams->ldmParams.enableLdm;
+
+ case ZSTD_c_ldmHashLog :
+ if (value!=0) /* 0 ==> auto */
+ BOUNDCHECK(ZSTD_c_ldmHashLog, value);
+ CCtxParams->ldmParams.hashLog = value;
+ return CCtxParams->ldmParams.hashLog;
+
+ case ZSTD_c_ldmMinMatch :
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
+ CCtxParams->ldmParams.minMatchLength = value;
+ return CCtxParams->ldmParams.minMatchLength;
+
+ case ZSTD_c_ldmBucketSizeLog :
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
+ CCtxParams->ldmParams.bucketSizeLog = value;
+ return CCtxParams->ldmParams.bucketSizeLog;
+
+ case ZSTD_c_ldmHashRateLog :
+ RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN,
+ parameter_outOfBound);
+ CCtxParams->ldmParams.hashRateLog = value;
+ return CCtxParams->ldmParams.hashRateLog;
+
+ default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
+ }
+}
+
+size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value)
+{
+ return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
+}
+
+size_t ZSTD_CCtxParams_getParameter(
+ ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, int* value)
+{
+ switch(param)
+ {
+ case ZSTD_c_format :
+ *value = CCtxParams->format;
+ break;
+ case ZSTD_c_compressionLevel :
+ *value = CCtxParams->compressionLevel;
+ break;
+ case ZSTD_c_windowLog :
+ *value = CCtxParams->cParams.windowLog;
+ break;
+ case ZSTD_c_hashLog :
+ *value = CCtxParams->cParams.hashLog;
+ break;
+ case ZSTD_c_chainLog :
+ *value = CCtxParams->cParams.chainLog;
+ break;
+ case ZSTD_c_searchLog :
+ *value = CCtxParams->cParams.searchLog;
+ break;
+ case ZSTD_c_minMatch :
+ *value = CCtxParams->cParams.minMatch;
+ break;
+ case ZSTD_c_targetLength :
+ *value = CCtxParams->cParams.targetLength;
+ break;
+ case ZSTD_c_strategy :
+ *value = (unsigned)CCtxParams->cParams.strategy;
+ break;
+ case ZSTD_c_contentSizeFlag :
+ *value = CCtxParams->fParams.contentSizeFlag;
+ break;
+ case ZSTD_c_checksumFlag :
+ *value = CCtxParams->fParams.checksumFlag;
+ break;
+ case ZSTD_c_dictIDFlag :
+ *value = !CCtxParams->fParams.noDictIDFlag;
+ break;
+ case ZSTD_c_forceMaxWindow :
+ *value = CCtxParams->forceWindow;
+ break;
+ case ZSTD_c_forceAttachDict :
+ *value = CCtxParams->attachDictPref;
+ break;
+ case ZSTD_c_literalCompressionMode :
+ *value = CCtxParams->literalCompressionMode;
+ break;
+ case ZSTD_c_nbWorkers :
+#ifndef ZSTD_MULTITHREAD
+ assert(CCtxParams->nbWorkers == 0);
+#endif
+ *value = CCtxParams->nbWorkers;
+ break;
+ case ZSTD_c_jobSize :
+#ifndef ZSTD_MULTITHREAD
+ RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
+#else
+ assert(CCtxParams->jobSize <= INT_MAX);
+ *value = (int)CCtxParams->jobSize;
+ break;
+#endif
+ case ZSTD_c_overlapLog :
+#ifndef ZSTD_MULTITHREAD
+ RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
+#else
+ *value = CCtxParams->overlapLog;
+ break;
+#endif
+ case ZSTD_c_rsyncable :
+#ifndef ZSTD_MULTITHREAD
+ RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
+#else
+ *value = CCtxParams->rsyncable;
+ break;
+#endif
+ case ZSTD_c_enableLongDistanceMatching :
+ *value = CCtxParams->ldmParams.enableLdm;
+ break;
+ case ZSTD_c_ldmHashLog :
+ *value = CCtxParams->ldmParams.hashLog;
+ break;
+ case ZSTD_c_ldmMinMatch :
+ *value = CCtxParams->ldmParams.minMatchLength;
+ break;
+ case ZSTD_c_ldmBucketSizeLog :
+ *value = CCtxParams->ldmParams.bucketSizeLog;
+ break;
+ case ZSTD_c_ldmHashRateLog :
+ *value = CCtxParams->ldmParams.hashRateLog;
+ break;
+ default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
+ }
+ return 0;
+}
+
+/** ZSTD_CCtx_setParametersUsingCCtxParams() :
+ * just applies `params` into `cctx`
+ * no action is performed, parameters are merely stored.
+ * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
+ * This is possible even if a compression is ongoing.
+ * In which case, new parameters will be applied on the fly, starting with next compression job.
+ */
+size_t ZSTD_CCtx_setParametersUsingCCtxParams(
+ ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
+{
+ DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+ RETURN_ERROR_IF(cctx->cdict, stage_wrong);
+
+ cctx->requestedParams = *params;
+ return 0;
+}
+
+ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+ cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
+ return 0;
+}
+
+/**
+ * Initializes the local dict using the requested parameters.
+ * NOTE: This does not use the pledged src size, because it may be used for more
+ * than one compression.
+ */
+static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
+{
+ ZSTD_localDict* const dl = &cctx->localDict;
+ ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(
+ &cctx->requestedParams, 0, dl->dictSize);
+ if (dl->dict == NULL) {
+ /* No local dictionary. */
+ assert(dl->dictBuffer == NULL);
+ assert(dl->cdict == NULL);
+ assert(dl->dictSize == 0);
+ return 0;
+ }
+ if (dl->cdict != NULL) {
+ assert(cctx->cdict == dl->cdict);
+ /* Local dictionary already initialized. */
+ return 0;
+ }
+ assert(dl->dictSize > 0);
+ assert(cctx->cdict == NULL);
+ assert(cctx->prefixDict.dict == NULL);
+
+ dl->cdict = ZSTD_createCDict_advanced(
+ dl->dict,
+ dl->dictSize,
+ ZSTD_dlm_byRef,
+ dl->dictContentType,
+ cParams,
+ cctx->customMem);
+ RETURN_ERROR_IF(!dl->cdict, memory_allocation);
+ cctx->cdict = dl->cdict;
+ return 0;
+}
+
+size_t ZSTD_CCtx_loadDictionary_advanced(
+ ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
+{
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+ RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
+ "no malloc for static CCtx");
+ DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
+ ZSTD_clearAllDicts(cctx); /* in case one already exists */
+ if (dict == NULL || dictSize == 0) /* no dictionary mode */
+ return 0;
+ if (dictLoadMethod == ZSTD_dlm_byRef) {
+ cctx->localDict.dict = dict;
+ } else {
+ void* dictBuffer = ZSTD_malloc(dictSize, cctx->customMem);
+ RETURN_ERROR_IF(!dictBuffer, memory_allocation);
+ memcpy(dictBuffer, dict, dictSize);
+ cctx->localDict.dictBuffer = dictBuffer;
+ cctx->localDict.dict = dictBuffer;
+ }
+ cctx->localDict.dictSize = dictSize;
+ cctx->localDict.dictContentType = dictContentType;
+ return 0;
+}
+
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(
+ ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_CCtx_loadDictionary_advanced(
+ cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
+}
+
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_CCtx_loadDictionary_advanced(
+ cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
+}
+
+
+size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
+{
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+ /* Free the existing local cdict (if any) to save memory. */
+ ZSTD_clearAllDicts(cctx);
+ cctx->cdict = cdict;
+ return 0;
+}
+
+size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
+{
+ return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
+}
+
+size_t ZSTD_CCtx_refPrefix_advanced(
+ ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
+{
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+ ZSTD_clearAllDicts(cctx);
+ cctx->prefixDict.dict = prefix;
+ cctx->prefixDict.dictSize = prefixSize;
+ cctx->prefixDict.dictContentType = dictContentType;
+ return 0;
+}
+
+/*! ZSTD_CCtx_reset() :
+ * Also dumps dictionary */
+size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
+{
+ if ( (reset == ZSTD_reset_session_only)
+ || (reset == ZSTD_reset_session_and_parameters) ) {
+ cctx->streamStage = zcss_init;
+ cctx->pledgedSrcSizePlusOne = 0;
+ }
+ if ( (reset == ZSTD_reset_parameters)
+ || (reset == ZSTD_reset_session_and_parameters) ) {
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+ ZSTD_clearAllDicts(cctx);
+ return ZSTD_CCtxParams_reset(&cctx->requestedParams);
+ }
+ return 0;
+}
+
+
+/** ZSTD_checkCParams() :
+ control CParam values remain within authorized range.
+ @return : 0, or an error code if one value is beyond authorized range */
+size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
+{
+ BOUNDCHECK(ZSTD_c_windowLog, cParams.windowLog);
+ BOUNDCHECK(ZSTD_c_chainLog, cParams.chainLog);
+ BOUNDCHECK(ZSTD_c_hashLog, cParams.hashLog);
+ BOUNDCHECK(ZSTD_c_searchLog, cParams.searchLog);
+ BOUNDCHECK(ZSTD_c_minMatch, cParams.minMatch);
+ BOUNDCHECK(ZSTD_c_targetLength,cParams.targetLength);
+ BOUNDCHECK(ZSTD_c_strategy, cParams.strategy);
+ return 0;
+}
+
+/** ZSTD_clampCParams() :
+ * make CParam values within valid range.
+ * @return : valid CParams */
+static ZSTD_compressionParameters
+ZSTD_clampCParams(ZSTD_compressionParameters cParams)
+{
+# define CLAMP_TYPE(cParam, val, type) { \
+ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \
+ if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound; \
+ else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
+ }
+# define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, int)
+ CLAMP(ZSTD_c_windowLog, cParams.windowLog);
+ CLAMP(ZSTD_c_chainLog, cParams.chainLog);
+ CLAMP(ZSTD_c_hashLog, cParams.hashLog);
+ CLAMP(ZSTD_c_searchLog, cParams.searchLog);
+ CLAMP(ZSTD_c_minMatch, cParams.minMatch);
+ CLAMP(ZSTD_c_targetLength,cParams.targetLength);
+ CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
+ return cParams;
+}
+
+/** ZSTD_cycleLog() :
+ * condition for correct operation : hashLog > 1 */
+static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
+{
+ U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
+ return hashLog - btScale;
+}
+
+/** ZSTD_adjustCParams_internal() :
+ * optimize `cPar` for a specified input (`srcSize` and `dictSize`).
+ * mostly downsize to reduce memory consumption and initialization latency.
+ * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
+ * note : for the time being, `srcSize==0` means "unknown" too, for compatibility with older convention.
+ * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
+static ZSTD_compressionParameters
+ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
+ unsigned long long srcSize,
+ size_t dictSize)
+{
+ static const U64 minSrcSize = 513; /* (1<<9) + 1 */
+ static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
+ assert(ZSTD_checkCParams(cPar)==0);
+
+ if (dictSize && (srcSize+1<2) /* ZSTD_CONTENTSIZE_UNKNOWN and 0 mean "unknown" */ )
+ srcSize = minSrcSize; /* presumed small when there is a dictionary */
+ else if (srcSize == 0)
+ srcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* 0 == unknown : presumed large */
+
+ /* resize windowLog if input is small enough, to use less memory */
+ if ( (srcSize < maxWindowResize)
+ && (dictSize < maxWindowResize) ) {
+ U32 const tSize = (U32)(srcSize + dictSize);
+ static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
+ U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
+ ZSTD_highbit32(tSize-1) + 1;
+ if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
+ }
+ if (cPar.hashLog > cPar.windowLog+1) cPar.hashLog = cPar.windowLog+1;
+ { U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
+ if (cycleLog > cPar.windowLog)
+ cPar.chainLog -= (cycleLog - cPar.windowLog);
+ }
+
+ if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
+ cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */
+
+ return cPar;
+}
+
+ZSTD_compressionParameters
+ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
+ unsigned long long srcSize,
+ size_t dictSize)
+{
+ cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */
+ return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize);
+}
+
+ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
+ const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize)
+{
+ ZSTD_compressionParameters cParams = ZSTD_getCParams(CCtxParams->compressionLevel, srcSizeHint, dictSize);
+ if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
+ if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog;
+ if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog;
+ if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog;
+ if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog;
+ if (CCtxParams->cParams.minMatch) cParams.minMatch = CCtxParams->cParams.minMatch;
+ if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength;
+ if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy;
+ assert(!ZSTD_checkCParams(cParams));
+ return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize);
+}
+
+static size_t
+ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
+ const U32 forCCtx)
+{
+ size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
+ size_t const hSize = ((size_t)1) << cParams->hashLog;
+ U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
+ size_t const h3Size = ((size_t)1) << hashLog3;
+ size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
+ size_t const optPotentialSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits)) * sizeof(U32)
+ + (ZSTD_OPT_NUM+1) * (sizeof(ZSTD_match_t)+sizeof(ZSTD_optimal_t));
+ size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
+ ? optPotentialSpace
+ : 0;
+ DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
+ (U32)chainSize, (U32)hSize, (U32)h3Size);
+ return tableSpace + optSpace;
+}
+
+size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
+{
+ RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
+ { ZSTD_compressionParameters const cParams =
+ ZSTD_getCParamsFromCCtxParams(params, 0, 0);
+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
+ U32 const divider = (cParams.minMatch==3) ? 3 : 4;
+ size_t const maxNbSeq = blockSize / divider;
+ size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
+ size_t const entropySpace = HUF_WORKSPACE_SIZE;
+ size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
+ size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 1);
+
+ size_t const ldmSpace = ZSTD_ldm_getTableSize(params->ldmParams);
+ size_t const ldmSeqSpace = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize) * sizeof(rawSeq);
+
+ size_t const neededSpace = entropySpace + blockStateSpace + tokenSpace +
+ matchStateSize + ldmSpace + ldmSeqSpace;
+
+ DEBUGLOG(5, "sizeof(ZSTD_CCtx) : %u", (U32)sizeof(ZSTD_CCtx));
+ DEBUGLOG(5, "estimate workSpace : %u", (U32)neededSpace);
+ return sizeof(ZSTD_CCtx) + neededSpace;
+ }
+}
+
+size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
+{
+ ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
+ return ZSTD_estimateCCtxSize_usingCCtxParams(&params);
+}
+
+static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
+{
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
+ return ZSTD_estimateCCtxSize_usingCParams(cParams);
+}
+
+size_t ZSTD_estimateCCtxSize(int compressionLevel)
+{
+ int level;
+ size_t memBudget = 0;
+ for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
+ size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
+ if (newMB > memBudget) memBudget = newMB;
+ }
+ return memBudget;
+}
+
+size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
+{
+ RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
+ { ZSTD_compressionParameters const cParams =
+ ZSTD_getCParamsFromCCtxParams(params, 0, 0);
+ size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
+ size_t const inBuffSize = ((size_t)1 << cParams.windowLog) + blockSize;
+ size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
+ size_t const streamingSize = inBuffSize + outBuffSize;
+
+ return CCtxSize + streamingSize;
+ }
+}
+
+size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
+{
+ ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
+ return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
+}
+
+static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
+{
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
+ return ZSTD_estimateCStreamSize_usingCParams(cParams);
+}
+
+size_t ZSTD_estimateCStreamSize(int compressionLevel)
+{
+ int level;
+ size_t memBudget = 0;
+ for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
+ size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
+ if (newMB > memBudget) memBudget = newMB;
+ }
+ return memBudget;
+}
+
+/* ZSTD_getFrameProgression():
+ * tells how much data has been consumed (input) and produced (output) for current frame.
+ * able to count progression inside worker threads (non-blocking mode).
+ */
+ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
+{
+#ifdef ZSTD_MULTITHREAD
+ if (cctx->appliedParams.nbWorkers > 0) {
+ return ZSTDMT_getFrameProgression(cctx->mtctx);
+ }
+#endif
+ { ZSTD_frameProgression fp;
+ size_t const buffered = (cctx->inBuff == NULL) ? 0 :
+ cctx->inBuffPos - cctx->inToCompress;
+ if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
+ assert(buffered <= ZSTD_BLOCKSIZE_MAX);
+ fp.ingested = cctx->consumedSrcSize + buffered;
+ fp.consumed = cctx->consumedSrcSize;
+ fp.produced = cctx->producedCSize;
+ fp.flushed = cctx->producedCSize; /* simplified; some data might still be left within streaming output buffer */
+ fp.currentJobID = 0;
+ fp.nbActiveWorkers = 0;
+ return fp;
+} }
+
+/*! ZSTD_toFlushNow()
+ * Only useful for multithreading scenarios currently (nbWorkers >= 1).
+ */
+size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
+{
+#ifdef ZSTD_MULTITHREAD
+ if (cctx->appliedParams.nbWorkers > 0) {
+ return ZSTDMT_toFlushNow(cctx->mtctx);
+ }
+#endif
+ (void)cctx;
+ return 0; /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
+}
+
+
+
+static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1,
+ ZSTD_compressionParameters cParams2)
+{
+ return (cParams1.hashLog == cParams2.hashLog)
+ & (cParams1.chainLog == cParams2.chainLog)
+ & (cParams1.strategy == cParams2.strategy) /* opt parser space */
+ & ((cParams1.minMatch==3) == (cParams2.minMatch==3)); /* hashlog3 space */
+}
+
+static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
+ ZSTD_compressionParameters cParams2)
+{
+ (void)cParams1;
+ (void)cParams2;
+ assert(cParams1.windowLog == cParams2.windowLog);
+ assert(cParams1.chainLog == cParams2.chainLog);
+ assert(cParams1.hashLog == cParams2.hashLog);
+ assert(cParams1.searchLog == cParams2.searchLog);
+ assert(cParams1.minMatch == cParams2.minMatch);
+ assert(cParams1.targetLength == cParams2.targetLength);
+ assert(cParams1.strategy == cParams2.strategy);
+}
+
+/** The parameters are equivalent if ldm is not enabled in both sets or
+ * all the parameters are equivalent. */
+static U32 ZSTD_equivalentLdmParams(ldmParams_t ldmParams1,
+ ldmParams_t ldmParams2)
+{
+ return (!ldmParams1.enableLdm && !ldmParams2.enableLdm) ||
+ (ldmParams1.enableLdm == ldmParams2.enableLdm &&
+ ldmParams1.hashLog == ldmParams2.hashLog &&
+ ldmParams1.bucketSizeLog == ldmParams2.bucketSizeLog &&
+ ldmParams1.minMatchLength == ldmParams2.minMatchLength &&
+ ldmParams1.hashRateLog == ldmParams2.hashRateLog);
+}
+
+typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e;
+
+/* ZSTD_sufficientBuff() :
+ * check internal buffers exist for streaming if buffPol == ZSTDb_buffered .
+ * Note : they are assumed to be correctly sized if ZSTD_equivalentCParams()==1 */
+static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t maxNbSeq1,
+ size_t maxNbLit1,
+ ZSTD_buffered_policy_e buffPol2,
+ ZSTD_compressionParameters cParams2,
+ U64 pledgedSrcSize)
+{
+ size_t const windowSize2 = MAX(1, (size_t)MIN(((U64)1 << cParams2.windowLog), pledgedSrcSize));
+ size_t const blockSize2 = MIN(ZSTD_BLOCKSIZE_MAX, windowSize2);
+ size_t const maxNbSeq2 = blockSize2 / ((cParams2.minMatch == 3) ? 3 : 4);
+ size_t const maxNbLit2 = blockSize2;
+ size_t const neededBufferSize2 = (buffPol2==ZSTDb_buffered) ? windowSize2 + blockSize2 : 0;
+ DEBUGLOG(4, "ZSTD_sufficientBuff: is neededBufferSize2=%u <= bufferSize1=%u",
+ (U32)neededBufferSize2, (U32)bufferSize1);
+ DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbSeq2=%u <= maxNbSeq1=%u",
+ (U32)maxNbSeq2, (U32)maxNbSeq1);
+ DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbLit2=%u <= maxNbLit1=%u",
+ (U32)maxNbLit2, (U32)maxNbLit1);
+ return (maxNbLit2 <= maxNbLit1)
+ & (maxNbSeq2 <= maxNbSeq1)
+ & (neededBufferSize2 <= bufferSize1);
+}
+
+/** Equivalence for resetCCtx purposes */
+static U32 ZSTD_equivalentParams(ZSTD_CCtx_params params1,
+ ZSTD_CCtx_params params2,
+ size_t buffSize1,
+ size_t maxNbSeq1, size_t maxNbLit1,
+ ZSTD_buffered_policy_e buffPol2,
+ U64 pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTD_equivalentParams: pledgedSrcSize=%u", (U32)pledgedSrcSize);
+ if (!ZSTD_equivalentCParams(params1.cParams, params2.cParams)) {
+ DEBUGLOG(4, "ZSTD_equivalentCParams() == 0");
+ return 0;
+ }
+ if (!ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams)) {
+ DEBUGLOG(4, "ZSTD_equivalentLdmParams() == 0");
+ return 0;
+ }
+ if (!ZSTD_sufficientBuff(buffSize1, maxNbSeq1, maxNbLit1, buffPol2,
+ params2.cParams, pledgedSrcSize)) {
+ DEBUGLOG(4, "ZSTD_sufficientBuff() == 0");
+ return 0;
+ }
+ return 1;
+}
+
+static void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
+{
+ int i;
+ for (i = 0; i < ZSTD_REP_NUM; ++i)
+ bs->rep[i] = repStartValue[i];
+ bs->entropy.huf.repeatMode = HUF_repeat_none;
+ bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
+ bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
+ bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
+}
+
+/*! ZSTD_invalidateMatchState()
+ * Invalidate all the matches in the match finder tables.
+ * Requires nextSrc and base to be set (can be NULL).
+ */
+static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
+{
+ ZSTD_window_clear(&ms->window);
+
+ ms->nextToUpdate = ms->window.dictLimit;
+ ms->nextToUpdate3 = ms->window.dictLimit;
+ ms->loadedDictEnd = 0;
+ ms->opt.litLengthSum = 0; /* force reset of btopt stats */
+ ms->dictMatchState = NULL;
+}
+
+/*! ZSTD_continueCCtx() :
+ * reuse CCtx without reset (note : requires no dictionary) */
+static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_CCtx_params params, U64 pledgedSrcSize)
+{
+ size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
+ DEBUGLOG(4, "ZSTD_continueCCtx: re-use context in place");
+
+ cctx->blockSize = blockSize; /* previous block size could be different even for same windowLog, due to pledgedSrcSize */
+ cctx->appliedParams = params;
+ cctx->blockState.matchState.cParams = params.cParams;
+ cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
+ cctx->consumedSrcSize = 0;
+ cctx->producedCSize = 0;
+ if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
+ cctx->appliedParams.fParams.contentSizeFlag = 0;
+ DEBUGLOG(4, "pledged content size : %u ; flag : %u",
+ (U32)pledgedSrcSize, cctx->appliedParams.fParams.contentSizeFlag);
+ cctx->stage = ZSTDcs_init;
+ cctx->dictID = 0;
+ if (params.ldmParams.enableLdm)
+ ZSTD_window_clear(&cctx->ldmState.window);
+ ZSTD_referenceExternalSequences(cctx, NULL, 0);
+ ZSTD_invalidateMatchState(&cctx->blockState.matchState);
+ ZSTD_reset_compressedBlockState(cctx->blockState.prevCBlock);
+ XXH64_reset(&cctx->xxhState, 0);
+ return 0;
+}
+
+typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset } ZSTD_compResetPolicy_e;
+
+static void*
+ZSTD_reset_matchState(ZSTD_matchState_t* ms,
+ void* ptr,
+ const ZSTD_compressionParameters* cParams,
+ ZSTD_compResetPolicy_e const crp, U32 const forCCtx)
+{
+ size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
+ size_t const hSize = ((size_t)1) << cParams->hashLog;
+ U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
+ size_t const h3Size = ((size_t)1) << hashLog3;
+ size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
+
+ assert(((size_t)ptr & 3) == 0);
+
+ ms->hashLog3 = hashLog3;
+ memset(&ms->window, 0, sizeof(ms->window));
+ ms->window.dictLimit = 1; /* start from 1, so that 1st position is valid */
+ ms->window.lowLimit = 1; /* it ensures first and later CCtx usages compress the same */
+ ms->window.nextSrc = ms->window.base + 1; /* see issue #1241 */
+ ZSTD_invalidateMatchState(ms);
+
+ /* opt parser space */
+ if (forCCtx && (cParams->strategy >= ZSTD_btopt)) {
+ DEBUGLOG(4, "reserving optimal parser space");
+ ms->opt.litFreq = (unsigned*)ptr;
+ ms->opt.litLengthFreq = ms->opt.litFreq + (1<<Litbits);
+ ms->opt.matchLengthFreq = ms->opt.litLengthFreq + (MaxLL+1);
+ ms->opt.offCodeFreq = ms->opt.matchLengthFreq + (MaxML+1);
+ ptr = ms->opt.offCodeFreq + (MaxOff+1);
+ ms->opt.matchTable = (ZSTD_match_t*)ptr;
+ ptr = ms->opt.matchTable + ZSTD_OPT_NUM+1;
+ ms->opt.priceTable = (ZSTD_optimal_t*)ptr;
+ ptr = ms->opt.priceTable + ZSTD_OPT_NUM+1;
+ }
+
+ /* table Space */
+ DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_noMemset);
+ assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
+ if (crp!=ZSTDcrp_noMemset) memset(ptr, 0, tableSpace); /* reset tables only */
+ ms->hashTable = (U32*)(ptr);
+ ms->chainTable = ms->hashTable + hSize;
+ ms->hashTable3 = ms->chainTable + chainSize;
+ ptr = ms->hashTable3 + h3Size;
+
+ ms->cParams = *cParams;
+
+ assert(((size_t)ptr & 3) == 0);
+ return ptr;
+}
+
+#define ZSTD_WORKSPACETOOLARGE_FACTOR 3 /* define "workspace is too large" as this number of times larger than needed */
+#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128 /* when workspace is continuously too large
+ * during at least this number of times,
+ * context's memory usage is considered wasteful,
+ * because it's sized to handle a worst case scenario which rarely happens.
+ * In which case, resize it down to free some memory */
+
+/*! ZSTD_resetCCtx_internal() :
+ note : `params` are assumed fully validated at this stage */
+static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
+ ZSTD_CCtx_params params,
+ U64 pledgedSrcSize,
+ ZSTD_compResetPolicy_e const crp,
+ ZSTD_buffered_policy_e const zbuff)
+{
+ DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u",
+ (U32)pledgedSrcSize, params.cParams.windowLog);
+ assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+
+ if (crp == ZSTDcrp_continue) {
+ if (ZSTD_equivalentParams(zc->appliedParams, params,
+ zc->inBuffSize,
+ zc->seqStore.maxNbSeq, zc->seqStore.maxNbLit,
+ zbuff, pledgedSrcSize)) {
+ DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> continue mode (wLog1=%u, blockSize1=%zu)",
+ zc->appliedParams.cParams.windowLog, zc->blockSize);
+ zc->workSpaceOversizedDuration += (zc->workSpaceOversizedDuration > 0); /* if it was too large, it still is */
+ if (zc->workSpaceOversizedDuration <= ZSTD_WORKSPACETOOLARGE_MAXDURATION)
+ return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
+ } }
+ DEBUGLOG(4, "ZSTD_equivalentParams()==0 -> reset CCtx");
+
+ if (params.ldmParams.enableLdm) {
+ /* Adjust long distance matching parameters */
+ ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
+ assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
+ assert(params.ldmParams.hashRateLog < 32);
+ zc->ldmState.hashPower = ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);
+ }
+
+ { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
+ U32 const divider = (params.cParams.minMatch==3) ? 3 : 4;
+ size_t const maxNbSeq = blockSize / divider;
+ size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
+ size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0;
+ size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0;
+ size_t const matchStateSize = ZSTD_sizeof_matchState(&params.cParams, /* forCCtx */ 1);
+ size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
+ void* ptr; /* used to partition workSpace */
+
+ /* Check if workSpace is large enough, alloc a new one if needed */
+ { size_t const entropySpace = HUF_WORKSPACE_SIZE;
+ size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
+ size_t const bufferSpace = buffInSize + buffOutSize;
+ size_t const ldmSpace = ZSTD_ldm_getTableSize(params.ldmParams);
+ size_t const ldmSeqSpace = maxNbLdmSeq * sizeof(rawSeq);
+
+ size_t const neededSpace = entropySpace + blockStateSpace + ldmSpace +
+ ldmSeqSpace + matchStateSize + tokenSpace +
+ bufferSpace;
+
+ int const workSpaceTooSmall = zc->workSpaceSize < neededSpace;
+ int const workSpaceTooLarge = zc->workSpaceSize > ZSTD_WORKSPACETOOLARGE_FACTOR * neededSpace;
+ int const workSpaceWasteful = workSpaceTooLarge && (zc->workSpaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION);
+ zc->workSpaceOversizedDuration = workSpaceTooLarge ? zc->workSpaceOversizedDuration+1 : 0;
+
+ DEBUGLOG(4, "Need %zuKB workspace, including %zuKB for match state, and %zuKB for buffers",
+ neededSpace>>10, matchStateSize>>10, bufferSpace>>10);
+ DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
+
+ if (workSpaceTooSmall || workSpaceWasteful) {
+ DEBUGLOG(4, "Need to resize workSpaceSize from %zuKB to %zuKB",
+ zc->workSpaceSize >> 10,
+ neededSpace >> 10);
+
+ RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
+
+ zc->workSpaceSize = 0;
+ ZSTD_free(zc->workSpace, zc->customMem);
+ zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
+ RETURN_ERROR_IF(zc->workSpace == NULL, memory_allocation);
+ zc->workSpaceSize = neededSpace;
+ zc->workSpaceOversizedDuration = 0;
+
+ /* Statically sized space.
+ * entropyWorkspace never moves,
+ * though prev/next block swap places */
+ assert(((size_t)zc->workSpace & 3) == 0); /* ensure correct alignment */
+ assert(zc->workSpaceSize >= 2 * sizeof(ZSTD_compressedBlockState_t));
+ zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)zc->workSpace;
+ zc->blockState.nextCBlock = zc->blockState.prevCBlock + 1;
+ ptr = zc->blockState.nextCBlock + 1;
+ zc->entropyWorkspace = (U32*)ptr;
+ } }
+
+ /* init params */
+ zc->appliedParams = params;
+ zc->blockState.matchState.cParams = params.cParams;
+ zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
+ zc->consumedSrcSize = 0;
+ zc->producedCSize = 0;
+ if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
+ zc->appliedParams.fParams.contentSizeFlag = 0;
+ DEBUGLOG(4, "pledged content size : %u ; flag : %u",
+ (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
+ zc->blockSize = blockSize;
+
+ XXH64_reset(&zc->xxhState, 0);
+ zc->stage = ZSTDcs_init;
+ zc->dictID = 0;
+
+ ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
+
+ ptr = zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32;
+
+ /* ldm hash table */
+ /* initialize bucketOffsets table later for pointer alignment */
+ if (params.ldmParams.enableLdm) {
+ size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
+ memset(ptr, 0, ldmHSize * sizeof(ldmEntry_t));
+ assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
+ zc->ldmState.hashTable = (ldmEntry_t*)ptr;
+ ptr = zc->ldmState.hashTable + ldmHSize;
+ zc->ldmSequences = (rawSeq*)ptr;
+ ptr = zc->ldmSequences + maxNbLdmSeq;
+ zc->maxNbLdmSequences = maxNbLdmSeq;
+
+ memset(&zc->ldmState.window, 0, sizeof(zc->ldmState.window));
+ }
+ assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
+
+ ptr = ZSTD_reset_matchState(&zc->blockState.matchState, ptr, &params.cParams, crp, /* forCCtx */ 1);
+
+ /* sequences storage */
+ zc->seqStore.maxNbSeq = maxNbSeq;
+ zc->seqStore.sequencesStart = (seqDef*)ptr;
+ ptr = zc->seqStore.sequencesStart + maxNbSeq;
+ zc->seqStore.llCode = (BYTE*) ptr;
+ zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
+ zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
+ zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
+ /* ZSTD_wildcopy() is used to copy into the literals buffer,
+ * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
+ */
+ zc->seqStore.maxNbLit = blockSize;
+ ptr = zc->seqStore.litStart + blockSize + WILDCOPY_OVERLENGTH;
+
+ /* ldm bucketOffsets table */
+ if (params.ldmParams.enableLdm) {
+ size_t const ldmBucketSize =
+ ((size_t)1) << (params.ldmParams.hashLog -
+ params.ldmParams.bucketSizeLog);
+ memset(ptr, 0, ldmBucketSize);
+ zc->ldmState.bucketOffsets = (BYTE*)ptr;
+ ptr = zc->ldmState.bucketOffsets + ldmBucketSize;
+ ZSTD_window_clear(&zc->ldmState.window);
+ }
+ ZSTD_referenceExternalSequences(zc, NULL, 0);
+
+ /* buffers */
+ zc->inBuffSize = buffInSize;
+ zc->inBuff = (char*)ptr;
+ zc->outBuffSize = buffOutSize;
+ zc->outBuff = zc->inBuff + buffInSize;
+
+ return 0;
+ }
+}
+
+/* ZSTD_invalidateRepCodes() :
+ * ensures next compression will not use repcodes from previous block.
+ * Note : only works with regular variant;
+ * do not use with extDict variant ! */
+void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
+ int i;
+ for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
+ assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
+}
+
+/* These are the approximate sizes for each strategy past which copying the
+ * dictionary tables into the working context is faster than using them
+ * in-place.
+ */
+static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {
+ 8 KB, /* unused */
+ 8 KB, /* ZSTD_fast */
+ 16 KB, /* ZSTD_dfast */
+ 32 KB, /* ZSTD_greedy */
+ 32 KB, /* ZSTD_lazy */
+ 32 KB, /* ZSTD_lazy2 */
+ 32 KB, /* ZSTD_btlazy2 */
+ 32 KB, /* ZSTD_btopt */
+ 8 KB, /* ZSTD_btultra */
+ 8 KB /* ZSTD_btultra2 */
+};
+
+static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params,
+ U64 pledgedSrcSize)
+{
+ size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
+ return ( pledgedSrcSize <= cutoff
+ || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
+ || params.attachDictPref == ZSTD_dictForceAttach )
+ && params.attachDictPref != ZSTD_dictForceCopy
+ && !params.forceWindow; /* dictMatchState isn't correctly
+ * handled in _enforceMaxDist */
+}
+
+static size_t ZSTD_resetCCtx_byAttachingCDict(
+ ZSTD_CCtx* cctx,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params,
+ U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+ {
+ const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
+ unsigned const windowLog = params.cParams.windowLog;
+ assert(windowLog != 0);
+ /* Resize working context table params for input only, since the dict
+ * has its own tables. */
+ params.cParams = ZSTD_adjustCParams_internal(*cdict_cParams, pledgedSrcSize, 0);
+ params.cParams.windowLog = windowLog;
+ ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+ ZSTDcrp_continue, zbuff);
+ assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
+ }
+
+ {
+ const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
+ - cdict->matchState.window.base);
+ const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
+ if (cdictLen == 0) {
+ /* don't even attach dictionaries with no contents */
+ DEBUGLOG(4, "skipping attaching empty dictionary");
+ } else {
+ DEBUGLOG(4, "attaching dictionary into context");
+ cctx->blockState.matchState.dictMatchState = &cdict->matchState;
+
+ /* prep working match state so dict matches never have negative indices
+ * when they are translated to the working context's index space. */
+ if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
+ cctx->blockState.matchState.window.nextSrc =
+ cctx->blockState.matchState.window.base + cdictEnd;
+ ZSTD_window_clear(&cctx->blockState.matchState.window);
+ }
+ cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
+ }
+ }
+
+ cctx->dictID = cdict->dictID;
+
+ /* copy block state */
+ memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
+
+ return 0;
+}
+
+static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params,
+ U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+ const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
+
+ DEBUGLOG(4, "copying dictionary into context");
+
+ { unsigned const windowLog = params.cParams.windowLog;
+ assert(windowLog != 0);
+ /* Copy only compression parameters related to tables. */
+ params.cParams = *cdict_cParams;
+ params.cParams.windowLog = windowLog;
+ ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+ ZSTDcrp_noMemset, zbuff);
+ assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
+ assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
+ assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
+ }
+
+ /* copy tables */
+ { size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);
+ size_t const hSize = (size_t)1 << cdict_cParams->hashLog;
+ size_t const tableSpace = (chainSize + hSize) * sizeof(U32);
+ assert((U32*)cctx->blockState.matchState.chainTable == (U32*)cctx->blockState.matchState.hashTable + hSize); /* chainTable must follow hashTable */
+ assert((U32*)cctx->blockState.matchState.hashTable3 == (U32*)cctx->blockState.matchState.chainTable + chainSize);
+ assert((U32*)cdict->matchState.chainTable == (U32*)cdict->matchState.hashTable + hSize); /* chainTable must follow hashTable */
+ assert((U32*)cdict->matchState.hashTable3 == (U32*)cdict->matchState.chainTable + chainSize);
+ memcpy(cctx->blockState.matchState.hashTable, cdict->matchState.hashTable, tableSpace); /* presumes all tables follow each other */
+ }
+
+ /* Zero the hashTable3, since the cdict never fills it */
+ { size_t const h3Size = (size_t)1 << cctx->blockState.matchState.hashLog3;
+ assert(cdict->matchState.hashLog3 == 0);
+ memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
+ }
+
+ /* copy dictionary offsets */
+ { ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
+ ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
+ dstMatchState->window = srcMatchState->window;
+ dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
+ dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3;
+ dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
+ }
+
+ cctx->dictID = cdict->dictID;
+
+ /* copy block state */
+ memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
+
+ return 0;
+}
+
+/* We have a choice between copying the dictionary context into the working
+ * context, or referencing the dictionary context from the working context
+ * in-place. We decide here which strategy to use. */
+static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params,
+ U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+
+ DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
+ (unsigned)pledgedSrcSize);
+
+ if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
+ return ZSTD_resetCCtx_byAttachingCDict(
+ cctx, cdict, params, pledgedSrcSize, zbuff);
+ } else {
+ return ZSTD_resetCCtx_byCopyingCDict(
+ cctx, cdict, params, pledgedSrcSize, zbuff);
+ }
+}
+
+/*! ZSTD_copyCCtx_internal() :
+ * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
+ * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
+ * The "context", in this case, refers to the hash and chain tables,
+ * entropy tables, and dictionary references.
+ * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
+ * @return : 0, or an error code */
+static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
+ const ZSTD_CCtx* srcCCtx,
+ ZSTD_frameParameters fParams,
+ U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+ DEBUGLOG(5, "ZSTD_copyCCtx_internal");
+ RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong);
+
+ memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
+ { ZSTD_CCtx_params params = dstCCtx->requestedParams;
+ /* Copy only compression parameters related to tables. */
+ params.cParams = srcCCtx->appliedParams.cParams;
+ params.fParams = fParams;
+ ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
+ ZSTDcrp_noMemset, zbuff);
+ assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
+ assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
+ assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
+ assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
+ assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
+ }
+
+ /* copy tables */
+ { size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
+ size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
+ size_t const h3Size = (size_t)1 << srcCCtx->blockState.matchState.hashLog3;
+ size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
+ assert((U32*)dstCCtx->blockState.matchState.chainTable == (U32*)dstCCtx->blockState.matchState.hashTable + hSize); /* chainTable must follow hashTable */
+ assert((U32*)dstCCtx->blockState.matchState.hashTable3 == (U32*)dstCCtx->blockState.matchState.chainTable + chainSize);
+ memcpy(dstCCtx->blockState.matchState.hashTable, srcCCtx->blockState.matchState.hashTable, tableSpace); /* presumes all tables follow each other */
+ }
+
+ /* copy dictionary offsets */
+ {
+ const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
+ ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
+ dstMatchState->window = srcMatchState->window;
+ dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
+ dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3;
+ dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
+ }
+ dstCCtx->dictID = srcCCtx->dictID;
+
+ /* copy block state */
+ memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
+
+ return 0;
+}
+
+/*! ZSTD_copyCCtx() :
+ * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
+ * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
+ * pledgedSrcSize==0 means "unknown".
+* @return : 0, or an error code */
+size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
+{
+ ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
+ ZSTD_buffered_policy_e const zbuff = (ZSTD_buffered_policy_e)(srcCCtx->inBuffSize>0);
+ ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
+ if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
+ fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
+
+ return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
+ fParams, pledgedSrcSize,
+ zbuff);
+}
+
+
+#define ZSTD_ROWSIZE 16
+/*! ZSTD_reduceTable() :
+ * reduce table indexes by `reducerValue`, or squash to zero.
+ * PreserveMark preserves "unsorted mark" for btlazy2 strategy.
+ * It must be set to a clear 0/1 value, to remove branch during inlining.
+ * Presume table size is a multiple of ZSTD_ROWSIZE
+ * to help auto-vectorization */
+FORCE_INLINE_TEMPLATE void
+ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
+{
+ int const nbRows = (int)size / ZSTD_ROWSIZE;
+ int cellNb = 0;
+ int rowNb;
+ assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */
+ assert(size < (1U<<31)); /* can be casted to int */
+ for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
+ int column;
+ for (column=0; column<ZSTD_ROWSIZE; column++) {
+ if (preserveMark) {
+ U32 const adder = (table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) ? reducerValue : 0;
+ table[cellNb] += adder;
+ }
+ if (table[cellNb] < reducerValue) table[cellNb] = 0;
+ else table[cellNb] -= reducerValue;
+ cellNb++;
+ } }
+}
+
+static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
+{
+ ZSTD_reduceTable_internal(table, size, reducerValue, 0);
+}
+
+static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
+{
+ ZSTD_reduceTable_internal(table, size, reducerValue, 1);
+}
+
+/*! ZSTD_reduceIndex() :
+* rescale all indexes to avoid future overflow (indexes are U32) */
+static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue)
+{
+ ZSTD_matchState_t* const ms = &zc->blockState.matchState;
+ { U32 const hSize = (U32)1 << zc->appliedParams.cParams.hashLog;
+ ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
+ }
+
+ if (zc->appliedParams.cParams.strategy != ZSTD_fast) {
+ U32 const chainSize = (U32)1 << zc->appliedParams.cParams.chainLog;
+ if (zc->appliedParams.cParams.strategy == ZSTD_btlazy2)
+ ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
+ else
+ ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
+ }
+
+ if (ms->hashLog3) {
+ U32 const h3Size = (U32)1 << ms->hashLog3;
+ ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
+ }
+}
+
+
+/*-*******************************************************
+* Block entropic compression
+*********************************************************/
+
+/* See doc/zstd_compression_format.md for detailed format description */
+
+static size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
+{
+ U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
+ RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
+ dstSize_tooSmall);
+ MEM_writeLE24(dst, cBlockHeader24);
+ memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
+ return ZSTD_blockHeaderSize + srcSize;
+}
+
+static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ BYTE* const ostart = (BYTE* const)dst;
+ U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
+
+ RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall);
+
+ switch(flSize)
+ {
+ case 1: /* 2 - 1 - 5 */
+ ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
+ break;
+ case 2: /* 2 - 2 - 12 */
+ MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
+ break;
+ case 3: /* 2 - 2 - 20 */
+ MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
+ break;
+ default: /* not necessary : flSize is {1,2,3} */
+ assert(0);
+ }
+
+ memcpy(ostart + flSize, src, srcSize);
+ return srcSize + flSize;
+}
+
+static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ BYTE* const ostart = (BYTE* const)dst;
+ U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
+
+ (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
+
+ switch(flSize)
+ {
+ case 1: /* 2 - 1 - 5 */
+ ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
+ break;
+ case 2: /* 2 - 2 - 12 */
+ MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
+ break;
+ case 3: /* 2 - 2 - 20 */
+ MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
+ break;
+ default: /* not necessary : flSize is {1,2,3} */
+ assert(0);
+ }
+
+ ostart[flSize] = *(const BYTE*)src;
+ return flSize+1;
+}
+
+
+/* ZSTD_minGain() :
+ * minimum compression required
+ * to generate a compress block or a compressed literals section.
+ * note : use same formula for both situations */
+static size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
+{
+ U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
+ ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
+ return (srcSize >> minlog) + 2;
+}
+
+static size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
+ ZSTD_hufCTables_t* nextHuf,
+ ZSTD_strategy strategy, int disableLiteralCompression,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ void* workspace, size_t wkspSize,
+ const int bmi2)
+{
+ size_t const minGain = ZSTD_minGain(srcSize, strategy);
+ size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
+ BYTE* const ostart = (BYTE*)dst;
+ U32 singleStream = srcSize < 256;
+ symbolEncodingType_e hType = set_compressed;
+ size_t cLitSize;
+
+ DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i)",
+ disableLiteralCompression);
+
+ /* Prepare nextEntropy assuming reusing the existing table */
+ memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+
+ if (disableLiteralCompression)
+ return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+
+ /* small ? don't even attempt compression (speed opt) */
+# define COMPRESS_LITERALS_SIZE_MIN 63
+ { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
+ if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+ }
+
+ RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
+ { HUF_repeat repeat = prevHuf->repeatMode;
+ int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
+ if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
+ cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
+ workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2)
+ : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
+ workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
+ if (repeat != HUF_repeat_none) {
+ /* reused the existing table */
+ hType = set_repeat;
+ }
+ }
+
+ if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
+ memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+ }
+ if (cLitSize==1) {
+ memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
+ }
+
+ if (hType == set_compressed) {
+ /* using a newly constructed table */
+ nextHuf->repeatMode = HUF_repeat_check;
+ }
+
+ /* Build header */
+ switch(lhSize)
+ {
+ case 3: /* 2 - 2 - 10 - 10 */
+ { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
+ MEM_writeLE24(ostart, lhc);
+ break;
+ }
+ case 4: /* 2 - 2 - 14 - 14 */
+ { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
+ MEM_writeLE32(ostart, lhc);
+ break;
+ }
+ case 5: /* 2 - 2 - 18 - 18 */
+ { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
+ MEM_writeLE32(ostart, lhc);
+ ostart[4] = (BYTE)(cLitSize >> 10);
+ break;
+ }
+ default: /* not possible : lhSize is {3,4,5} */
+ assert(0);
+ }
+ return lhSize+cLitSize;
+}
+
+
+void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
+{
+ const seqDef* const sequences = seqStorePtr->sequencesStart;
+ BYTE* const llCodeTable = seqStorePtr->llCode;
+ BYTE* const ofCodeTable = seqStorePtr->ofCode;
+ BYTE* const mlCodeTable = seqStorePtr->mlCode;
+ U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ U32 u;
+ assert(nbSeq <= seqStorePtr->maxNbSeq);
+ for (u=0; u<nbSeq; u++) {
+ U32 const llv = sequences[u].litLength;
+ U32 const mlv = sequences[u].matchLength;
+ llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
+ ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
+ mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
+ }
+ if (seqStorePtr->longLengthID==1)
+ llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
+ if (seqStorePtr->longLengthID==2)
+ mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
+}
+
+
+/**
+ * -log2(x / 256) lookup table for x in [0, 256).
+ * If x == 0: Return 0
+ * Else: Return floor(-log2(x / 256) * 256)
+ */
+static unsigned const kInverseProbabilityLog256[256] = {
+ 0, 2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
+ 1130, 1100, 1073, 1047, 1024, 1001, 980, 960, 941, 923, 906, 889,
+ 874, 859, 844, 830, 817, 804, 791, 779, 768, 756, 745, 734,
+ 724, 714, 704, 694, 685, 676, 667, 658, 650, 642, 633, 626,
+ 618, 610, 603, 595, 588, 581, 574, 567, 561, 554, 548, 542,
+ 535, 529, 523, 517, 512, 506, 500, 495, 489, 484, 478, 473,
+ 468, 463, 458, 453, 448, 443, 438, 434, 429, 424, 420, 415,
+ 411, 407, 402, 398, 394, 390, 386, 382, 377, 373, 370, 366,
+ 362, 358, 354, 350, 347, 343, 339, 336, 332, 329, 325, 322,
+ 318, 315, 311, 308, 305, 302, 298, 295, 292, 289, 286, 282,
+ 279, 276, 273, 270, 267, 264, 261, 258, 256, 253, 250, 247,
+ 244, 241, 239, 236, 233, 230, 228, 225, 222, 220, 217, 215,
+ 212, 209, 207, 204, 202, 199, 197, 194, 192, 190, 187, 185,
+ 182, 180, 178, 175, 173, 171, 168, 166, 164, 162, 159, 157,
+ 155, 153, 151, 149, 146, 144, 142, 140, 138, 136, 134, 132,
+ 130, 128, 126, 123, 121, 119, 117, 115, 114, 112, 110, 108,
+ 106, 104, 102, 100, 98, 96, 94, 93, 91, 89, 87, 85,
+ 83, 82, 80, 78, 76, 74, 73, 71, 69, 67, 66, 64,
+ 62, 61, 59, 57, 55, 54, 52, 50, 49, 47, 46, 44,
+ 42, 41, 39, 37, 36, 34, 33, 31, 30, 28, 26, 25,
+ 23, 22, 20, 19, 17, 16, 14, 13, 11, 10, 8, 7,
+ 5, 4, 2, 1,
+};
+
+
+/**
+ * Returns the cost in bits of encoding the distribution described by count
+ * using the entropy bound.
+ */
+static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
+{
+ unsigned cost = 0;
+ unsigned s;
+ for (s = 0; s <= max; ++s) {
+ unsigned norm = (unsigned)((256 * count[s]) / total);
+ if (count[s] != 0 && norm == 0)
+ norm = 1;
+ assert(count[s] < total);
+ cost += count[s] * kInverseProbabilityLog256[norm];
+ }
+ return cost >> 8;
+}
+
+
+/**
+ * Returns the cost in bits of encoding the distribution in count using the
+ * table described by norm. The max symbol support by norm is assumed >= max.
+ * norm must be valid for every symbol with non-zero probability in count.
+ */
+static size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
+ unsigned const* count, unsigned const max)
+{
+ unsigned const shift = 8 - accuracyLog;
+ size_t cost = 0;
+ unsigned s;
+ assert(accuracyLog <= 8);
+ for (s = 0; s <= max; ++s) {
+ unsigned const normAcc = norm[s] != -1 ? norm[s] : 1;
+ unsigned const norm256 = normAcc << shift;
+ assert(norm256 > 0);
+ assert(norm256 < 256);
+ cost += count[s] * kInverseProbabilityLog256[norm256];
+ }
+ return cost >> 8;
+}
+
+
+static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
+ void const* ptr = ctable;
+ U16 const* u16ptr = (U16 const*)ptr;
+ U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
+ return maxSymbolValue;
+}
+
+
+/**
+ * Returns the cost in bits of encoding the distribution in count using ctable.
+ * Returns an error if ctable cannot represent all the symbols in count.
+ */
+static size_t ZSTD_fseBitCost(
+ FSE_CTable const* ctable,
+ unsigned const* count,
+ unsigned const max)
+{
+ unsigned const kAccuracyLog = 8;
+ size_t cost = 0;
+ unsigned s;
+ FSE_CState_t cstate;
+ FSE_initCState(&cstate, ctable);
+ RETURN_ERROR_IF(ZSTD_getFSEMaxSymbolValue(ctable) < max, GENERIC,
+ "Repeat FSE_CTable has maxSymbolValue %u < %u",
+ ZSTD_getFSEMaxSymbolValue(ctable), max);
+ for (s = 0; s <= max; ++s) {
+ unsigned const tableLog = cstate.stateLog;
+ unsigned const badCost = (tableLog + 1) << kAccuracyLog;
+ unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
+ if (count[s] == 0)
+ continue;
+ RETURN_ERROR_IF(bitCost >= badCost, GENERIC,
+ "Repeat FSE_CTable has Prob[%u] == 0", s);
+ cost += count[s] * bitCost;
+ }
+ return cost >> kAccuracyLog;
+}
+
+/**
+ * Returns the cost in bytes of encoding the normalized count header.
+ * Returns an error if any of the helper functions return an error.
+ */
+static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
+ size_t const nbSeq, unsigned const FSELog)
+{
+ BYTE wksp[FSE_NCOUNTBOUND];
+ S16 norm[MaxSeq + 1];
+ const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
+ FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));
+ return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
+}
+
+
+typedef enum {
+ ZSTD_defaultDisallowed = 0,
+ ZSTD_defaultAllowed = 1
+} ZSTD_defaultPolicy_e;
+
+MEM_STATIC symbolEncodingType_e
+ZSTD_selectEncodingType(
+ FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
+ size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
+ FSE_CTable const* prevCTable,
+ short const* defaultNorm, U32 defaultNormLog,
+ ZSTD_defaultPolicy_e const isDefaultAllowed,
+ ZSTD_strategy const strategy)
+{
+ ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
+ if (mostFrequent == nbSeq) {
+ *repeatMode = FSE_repeat_none;
+ if (isDefaultAllowed && nbSeq <= 2) {
+ /* Prefer set_basic over set_rle when there are 2 or less symbols,
+ * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
+ * If basic encoding isn't possible, always choose RLE.
+ */
+ DEBUGLOG(5, "Selected set_basic");
+ return set_basic;
+ }
+ DEBUGLOG(5, "Selected set_rle");
+ return set_rle;
+ }
+ if (strategy < ZSTD_lazy) {
+ if (isDefaultAllowed) {
+ size_t const staticFse_nbSeq_max = 1000;
+ size_t const mult = 10 - strategy;
+ size_t const baseLog = 3;
+ size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog; /* 28-36 for offset, 56-72 for lengths */
+ assert(defaultNormLog >= 5 && defaultNormLog <= 6); /* xx_DEFAULTNORMLOG */
+ assert(mult <= 9 && mult >= 7);
+ if ( (*repeatMode == FSE_repeat_valid)
+ && (nbSeq < staticFse_nbSeq_max) ) {
+ DEBUGLOG(5, "Selected set_repeat");
+ return set_repeat;
+ }
+ if ( (nbSeq < dynamicFse_nbSeq_min)
+ || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
+ DEBUGLOG(5, "Selected set_basic");
+ /* The format allows default tables to be repeated, but it isn't useful.
+ * When using simple heuristics to select encoding type, we don't want
+ * to confuse these tables with dictionaries. When running more careful
+ * analysis, we don't need to waste time checking both repeating tables
+ * and default tables.
+ */
+ *repeatMode = FSE_repeat_none;
+ return set_basic;
+ }
+ }
+ } else {
+ size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
+ size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
+ size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
+ size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
+
+ if (isDefaultAllowed) {
+ assert(!ZSTD_isError(basicCost));
+ assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
+ }
+ assert(!ZSTD_isError(NCountCost));
+ assert(compressedCost < ERROR(maxCode));
+ DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
+ (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);
+ if (basicCost <= repeatCost && basicCost <= compressedCost) {
+ DEBUGLOG(5, "Selected set_basic");
+ assert(isDefaultAllowed);
+ *repeatMode = FSE_repeat_none;
+ return set_basic;
+ }
+ if (repeatCost <= compressedCost) {
+ DEBUGLOG(5, "Selected set_repeat");
+ assert(!ZSTD_isError(repeatCost));
+ return set_repeat;
+ }
+ assert(compressedCost < basicCost && compressedCost < repeatCost);
+ }
+ DEBUGLOG(5, "Selected set_compressed");
+ *repeatMode = FSE_repeat_check;
+ return set_compressed;
+}
+
+MEM_STATIC size_t
+ZSTD_buildCTable(void* dst, size_t dstCapacity,
+ FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
+ unsigned* count, U32 max,
+ const BYTE* codeTable, size_t nbSeq,
+ const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+ const FSE_CTable* prevCTable, size_t prevCTableSize,
+ void* workspace, size_t workspaceSize)
+{
+ BYTE* op = (BYTE*)dst;
+ const BYTE* const oend = op + dstCapacity;
+ DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity);
+
+ switch (type) {
+ case set_rle:
+ FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max));
+ RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall);
+ *op = codeTable[0];
+ return 1;
+ case set_repeat:
+ memcpy(nextCTable, prevCTable, prevCTableSize);
+ return 0;
+ case set_basic:
+ FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize)); /* note : could be pre-calculated */
+ return 0;
+ case set_compressed: {
+ S16 norm[MaxSeq + 1];
+ size_t nbSeq_1 = nbSeq;
+ const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
+ if (count[codeTable[nbSeq-1]] > 1) {
+ count[codeTable[nbSeq-1]]--;
+ nbSeq_1--;
+ }
+ assert(nbSeq_1 > 1);
+ FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
+ { size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
+ FORWARD_IF_ERROR(NCountSize);
+ FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize));
+ return NCountSize;
+ }
+ }
+ default: assert(0); RETURN_ERROR(GENERIC);
+ }
+}
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_encodeSequences_body(
+ void* dst, size_t dstCapacity,
+ FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+ FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+ FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+ seqDef const* sequences, size_t nbSeq, int longOffsets)
+{
+ BIT_CStream_t blockStream;
+ FSE_CState_t stateMatchLength;
+ FSE_CState_t stateOffsetBits;
+ FSE_CState_t stateLitLength;
+
+ RETURN_ERROR_IF(
+ ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)),
+ dstSize_tooSmall, "not enough space remaining");
+ DEBUGLOG(6, "available space for bitstream : %i (dstCapacity=%u)",
+ (int)(blockStream.endPtr - blockStream.startPtr),
+ (unsigned)dstCapacity);
+
+ /* first symbols */
+ FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
+ FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]);
+ FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]);
+ BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
+ if (MEM_32bits()) BIT_flushBits(&blockStream);
+ BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
+ if (MEM_32bits()) BIT_flushBits(&blockStream);
+ if (longOffsets) {
+ U32 const ofBits = ofCodeTable[nbSeq-1];
+ int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
+ if (extraBits) {
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
+ BIT_flushBits(&blockStream);
+ }
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
+ ofBits - extraBits);
+ } else {
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
+ }
+ BIT_flushBits(&blockStream);
+
+ { size_t n;
+ for (n=nbSeq-2 ; n<nbSeq ; n--) { /* intentional underflow */
+ BYTE const llCode = llCodeTable[n];
+ BYTE const ofCode = ofCodeTable[n];
+ BYTE const mlCode = mlCodeTable[n];
+ U32 const llBits = LL_bits[llCode];
+ U32 const ofBits = ofCode;
+ U32 const mlBits = ML_bits[mlCode];
+ DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
+ (unsigned)sequences[n].litLength,
+ (unsigned)sequences[n].matchLength + MINMATCH,
+ (unsigned)sequences[n].offset);
+ /* 32b*/ /* 64b*/
+ /* (7)*/ /* (7)*/
+ FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
+ FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
+ if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/
+ FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
+ if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
+ BIT_flushBits(&blockStream); /* (7)*/
+ BIT_addBits(&blockStream, sequences[n].litLength, llBits);
+ if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
+ BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
+ if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
+ if (longOffsets) {
+ int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
+ if (extraBits) {
+ BIT_addBits(&blockStream, sequences[n].offset, extraBits);
+ BIT_flushBits(&blockStream); /* (7)*/
+ }
+ BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
+ ofBits - extraBits); /* 31 */
+ } else {
+ BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
+ }
+ BIT_flushBits(&blockStream); /* (7)*/
+ DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
+ } }
+
+ DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
+ FSE_flushCState(&blockStream, &stateMatchLength);
+ DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog);
+ FSE_flushCState(&blockStream, &stateOffsetBits);
+ DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog);
+ FSE_flushCState(&blockStream, &stateLitLength);
+
+ { size_t const streamSize = BIT_closeCStream(&blockStream);
+ RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space");
+ return streamSize;
+ }
+}
+
+static size_t
+ZSTD_encodeSequences_default(
+ void* dst, size_t dstCapacity,
+ FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+ FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+ FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+ seqDef const* sequences, size_t nbSeq, int longOffsets)
+{
+ return ZSTD_encodeSequences_body(dst, dstCapacity,
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq, longOffsets);
+}
+
+
+#if DYNAMIC_BMI2
+
+static TARGET_ATTRIBUTE("bmi2") size_t
+ZSTD_encodeSequences_bmi2(
+ void* dst, size_t dstCapacity,
+ FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+ FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+ FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+ seqDef const* sequences, size_t nbSeq, int longOffsets)
+{
+ return ZSTD_encodeSequences_body(dst, dstCapacity,
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq, longOffsets);
+}
+
+#endif
+
+static size_t ZSTD_encodeSequences(
+ void* dst, size_t dstCapacity,
+ FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+ FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+ FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+ seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
+{
+ DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
+#if DYNAMIC_BMI2
+ if (bmi2) {
+ return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq, longOffsets);
+ }
+#endif
+ (void)bmi2;
+ return ZSTD_encodeSequences_default(dst, dstCapacity,
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq, longOffsets);
+}
+
+static int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams)
+{
+ switch (cctxParams->literalCompressionMode) {
+ case ZSTD_lcm_huffman:
+ return 0;
+ case ZSTD_lcm_uncompressed:
+ return 1;
+ default:
+ assert(0 /* impossible: pre-validated */);
+ /* fall-through */
+ case ZSTD_lcm_auto:
+ return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
+ }
+}
+
+/* ZSTD_compressSequences_internal():
+ * actually compresses both literals and sequences */
+MEM_STATIC size_t
+ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ void* workspace, size_t wkspSize,
+ const int bmi2)
+{
+ const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
+ ZSTD_strategy const strategy = cctxParams->cParams.strategy;
+ unsigned count[MaxSeq+1];
+ FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
+ FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
+ FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
+ U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
+ const seqDef* const sequences = seqStorePtr->sequencesStart;
+ const BYTE* const ofCodeTable = seqStorePtr->ofCode;
+ const BYTE* const llCodeTable = seqStorePtr->llCode;
+ const BYTE* const mlCodeTable = seqStorePtr->mlCode;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstCapacity;
+ BYTE* op = ostart;
+ size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
+ BYTE* seqHead;
+ BYTE* lastNCount = NULL;
+
+ ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
+ DEBUGLOG(5, "ZSTD_compressSequences_internal");
+
+ /* Compress literals */
+ { const BYTE* const literals = seqStorePtr->litStart;
+ size_t const litSize = seqStorePtr->lit - literals;
+ size_t const cSize = ZSTD_compressLiterals(
+ &prevEntropy->huf, &nextEntropy->huf,
+ cctxParams->cParams.strategy,
+ ZSTD_disableLiteralsCompression(cctxParams),
+ op, dstCapacity,
+ literals, litSize,
+ workspace, wkspSize,
+ bmi2);
+ FORWARD_IF_ERROR(cSize);
+ assert(cSize <= dstCapacity);
+ op += cSize;
+ }
+
+ /* Sequences Header */
+ RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
+ dstSize_tooSmall);
+ if (nbSeq < 0x7F)
+ *op++ = (BYTE)nbSeq;
+ else if (nbSeq < LONGNBSEQ)
+ op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
+ else
+ op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
+ if (nbSeq==0) {
+ /* Copy the old tables over as if we repeated them */
+ memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
+ return op - ostart;
+ }
+
+ /* seqHead : flags for FSE encoding type */
+ seqHead = op++;
+
+ /* convert length/distances into codes */
+ ZSTD_seqToCodes(seqStorePtr);
+ /* build CTable for Literal Lengths */
+ { unsigned max = MaxLL;
+ size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
+ DEBUGLOG(5, "Building LL table");
+ nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
+ LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode,
+ count, max, mostFrequent, nbSeq,
+ LLFSELog, prevEntropy->fse.litlengthCTable,
+ LL_defaultNorm, LL_defaultNormLog,
+ ZSTD_defaultAllowed, strategy);
+ assert(set_basic < set_compressed && set_rle < set_compressed);
+ assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
+ count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
+ prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable),
+ workspace, wkspSize);
+ FORWARD_IF_ERROR(countSize);
+ if (LLtype == set_compressed)
+ lastNCount = op;
+ op += countSize;
+ } }
+ /* build CTable for Offsets */
+ { unsigned max = MaxOff;
+ size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
+ /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
+ ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
+ DEBUGLOG(5, "Building OF table");
+ nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;
+ Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode,
+ count, max, mostFrequent, nbSeq,
+ OffFSELog, prevEntropy->fse.offcodeCTable,
+ OF_defaultNorm, OF_defaultNormLog,
+ defaultPolicy, strategy);
+ assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
+ count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
+ prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable),
+ workspace, wkspSize);
+ FORWARD_IF_ERROR(countSize);
+ if (Offtype == set_compressed)
+ lastNCount = op;
+ op += countSize;
+ } }
+ /* build CTable for MatchLengths */
+ { unsigned max = MaxML;
+ size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
+ DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
+ nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
+ MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode,
+ count, max, mostFrequent, nbSeq,
+ MLFSELog, prevEntropy->fse.matchlengthCTable,
+ ML_defaultNorm, ML_defaultNormLog,
+ ZSTD_defaultAllowed, strategy);
+ assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
+ count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
+ prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable),
+ workspace, wkspSize);
+ FORWARD_IF_ERROR(countSize);
+ if (MLtype == set_compressed)
+ lastNCount = op;
+ op += countSize;
+ } }
+
+ *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
+
+ { size_t const bitstreamSize = ZSTD_encodeSequences(
+ op, oend - op,
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq,
+ longOffsets, bmi2);
+ FORWARD_IF_ERROR(bitstreamSize);
+ op += bitstreamSize;
+ /* zstd versions <= 1.3.4 mistakenly report corruption when
+ * FSE_readNCount() receives a buffer < 4 bytes.
+ * Fixed by https://github.com/facebook/zstd/pull/1146.
+ * This can happen when the last set_compressed table present is 2
+ * bytes and the bitstream is only one byte.
+ * In this exceedingly rare case, we will simply emit an uncompressed
+ * block, since it isn't worth optimizing.
+ */
+ if (lastNCount && (op - lastNCount) < 4) {
+ /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
+ assert(op - lastNCount == 3);
+ DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
+ "emitting an uncompressed block.");
+ return 0;
+ }
+ }
+
+ DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
+ return op - ostart;
+}
+
+MEM_STATIC size_t
+ZSTD_compressSequences(seqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ size_t srcSize,
+ void* workspace, size_t wkspSize,
+ int bmi2)
+{
+ size_t const cSize = ZSTD_compressSequences_internal(
+ seqStorePtr, prevEntropy, nextEntropy, cctxParams,
+ dst, dstCapacity,
+ workspace, wkspSize, bmi2);
+ if (cSize == 0) return 0;
+ /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
+ * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
+ */
+ if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
+ return 0; /* block not compressed */
+ FORWARD_IF_ERROR(cSize);
+
+ /* Check compressibility */
+ { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
+ if (cSize >= maxCSize) return 0; /* block not compressed */
+ }
+
+ return cSize;
+}
+
+/* ZSTD_selectBlockCompressor() :
+ * Not static, but internal use only (used by long distance matcher)
+ * assumption : strat is a valid strategy */
+ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode)
+{
+ static const ZSTD_blockCompressor blockCompressor[3][ZSTD_STRATEGY_MAX+1] = {
+ { ZSTD_compressBlock_fast /* default for 0 */,
+ ZSTD_compressBlock_fast,
+ ZSTD_compressBlock_doubleFast,
+ ZSTD_compressBlock_greedy,
+ ZSTD_compressBlock_lazy,
+ ZSTD_compressBlock_lazy2,
+ ZSTD_compressBlock_btlazy2,
+ ZSTD_compressBlock_btopt,
+ ZSTD_compressBlock_btultra,
+ ZSTD_compressBlock_btultra2 },
+ { ZSTD_compressBlock_fast_extDict /* default for 0 */,
+ ZSTD_compressBlock_fast_extDict,
+ ZSTD_compressBlock_doubleFast_extDict,
+ ZSTD_compressBlock_greedy_extDict,
+ ZSTD_compressBlock_lazy_extDict,
+ ZSTD_compressBlock_lazy2_extDict,
+ ZSTD_compressBlock_btlazy2_extDict,
+ ZSTD_compressBlock_btopt_extDict,
+ ZSTD_compressBlock_btultra_extDict,
+ ZSTD_compressBlock_btultra_extDict },
+ { ZSTD_compressBlock_fast_dictMatchState /* default for 0 */,
+ ZSTD_compressBlock_fast_dictMatchState,
+ ZSTD_compressBlock_doubleFast_dictMatchState,
+ ZSTD_compressBlock_greedy_dictMatchState,
+ ZSTD_compressBlock_lazy_dictMatchState,
+ ZSTD_compressBlock_lazy2_dictMatchState,
+ ZSTD_compressBlock_btlazy2_dictMatchState,
+ ZSTD_compressBlock_btopt_dictMatchState,
+ ZSTD_compressBlock_btultra_dictMatchState,
+ ZSTD_compressBlock_btultra_dictMatchState }
+ };
+ ZSTD_blockCompressor selectedCompressor;
+ ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
+
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
+ selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
+ assert(selectedCompressor != NULL);
+ return selectedCompressor;
+}
+
+static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
+ const BYTE* anchor, size_t lastLLSize)
+{
+ memcpy(seqStorePtr->lit, anchor, lastLLSize);
+ seqStorePtr->lit += lastLLSize;
+}
+
+void ZSTD_resetSeqStore(seqStore_t* ssPtr)
+{
+ ssPtr->lit = ssPtr->litStart;
+ ssPtr->sequences = ssPtr->sequencesStart;
+ ssPtr->longLengthID = 0;
+}
+
+static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ ZSTD_matchState_t* const ms = &zc->blockState.matchState;
+ size_t cSize;
+ DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
+ (unsigned)dstCapacity, (unsigned)ms->window.dictLimit, (unsigned)ms->nextToUpdate);
+ assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
+
+ /* Assert that we have correctly flushed the ctx params into the ms's copy */
+ ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
+
+ if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
+ ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
+ cSize = 0;
+ goto out; /* don't even attempt compression below a certain srcSize */
+ }
+ ZSTD_resetSeqStore(&(zc->seqStore));
+ /* required for optimal parser to read stats from dictionary */
+ ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
+ /* tell the optimal parser how we expect to compress literals */
+ ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
+
+ /* a gap between an attached dict and the current window is not safe,
+ * they must remain adjacent,
+ * and when that stops being the case, the dict must be unset */
+ assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
+
+ /* limited update after a very long match */
+ { const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
+ const U32 current = (U32)(istart-base);
+ if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */
+ if (current > ms->nextToUpdate + 384)
+ ms->nextToUpdate = current - MIN(192, (U32)(current - ms->nextToUpdate - 384));
+ }
+
+ /* select and store sequences */
+ { ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
+ size_t lastLLSize;
+ { int i;
+ for (i = 0; i < ZSTD_REP_NUM; ++i)
+ zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
+ }
+ if (zc->externSeqStore.pos < zc->externSeqStore.size) {
+ assert(!zc->appliedParams.ldmParams.enableLdm);
+ /* Updates ldmSeqStore.pos */
+ lastLLSize =
+ ZSTD_ldm_blockCompress(&zc->externSeqStore,
+ ms, &zc->seqStore,
+ zc->blockState.nextCBlock->rep,
+ src, srcSize);
+ assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
+ } else if (zc->appliedParams.ldmParams.enableLdm) {
+ rawSeqStore_t ldmSeqStore = {NULL, 0, 0, 0};
+
+ ldmSeqStore.seq = zc->ldmSequences;
+ ldmSeqStore.capacity = zc->maxNbLdmSequences;
+ /* Updates ldmSeqStore.size */
+ FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
+ &zc->appliedParams.ldmParams,
+ src, srcSize));
+ /* Updates ldmSeqStore.pos */
+ lastLLSize =
+ ZSTD_ldm_blockCompress(&ldmSeqStore,
+ ms, &zc->seqStore,
+ zc->blockState.nextCBlock->rep,
+ src, srcSize);
+ assert(ldmSeqStore.pos == ldmSeqStore.size);
+ } else { /* not long range mode */
+ ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);
+ lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
+ }
+ { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
+ ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
+ } }
+
+ /* encode sequences and literals */
+ cSize = ZSTD_compressSequences(&zc->seqStore,
+ &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
+ &zc->appliedParams,
+ dst, dstCapacity,
+ srcSize,
+ zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ zc->bmi2);
+
+out:
+ if (!ZSTD_isError(cSize) && cSize != 0) {
+ /* confirm repcodes and entropy tables when emitting a compressed block */
+ ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
+ zc->blockState.prevCBlock = zc->blockState.nextCBlock;
+ zc->blockState.nextCBlock = tmp;
+ }
+ /* We check that dictionaries have offset codes available for the first
+ * block. After the first block, the offcode table might not have large
+ * enough codes to represent the offsets in the data.
+ */
+ if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+ return cSize;
+}
+
+
+/*! ZSTD_compress_frameChunk() :
+* Compress a chunk of data into one or multiple blocks.
+* All blocks will be terminated, all input will be consumed.
+* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
+* Frame is supposed already started (header already produced)
+* @return : compressed size, or an error code
+*/
+static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ U32 lastFrameChunk)
+{
+ size_t blockSize = cctx->blockSize;
+ size_t remaining = srcSize;
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* op = ostart;
+ U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
+ assert(cctx->appliedParams.cParams.windowLog <= 31);
+
+ DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
+ if (cctx->appliedParams.fParams.checksumFlag && srcSize)
+ XXH64_update(&cctx->xxhState, src, srcSize);
+
+ while (remaining) {
+ ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
+ U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
+
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
+ dstSize_tooSmall,
+ "not enough space to store compressed block");
+ if (remaining < blockSize) blockSize = remaining;
+
+ if (ZSTD_window_needOverflowCorrection(ms->window, ip + blockSize)) {
+ U32 const cycleLog = ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy);
+ U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
+ ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
+ ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
+ ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
+ ZSTD_reduceIndex(cctx, correction);
+ if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
+ else ms->nextToUpdate -= correction;
+ ms->loadedDictEnd = 0;
+ ms->dictMatchState = NULL;
+ }
+ ZSTD_window_enforceMaxDist(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
+ if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
+
+ { size_t cSize = ZSTD_compressBlock_internal(cctx,
+ op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
+ ip, blockSize);
+ FORWARD_IF_ERROR(cSize);
+
+ if (cSize == 0) { /* block is not compressible */
+ cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cSize);
+ } else {
+ U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
+ MEM_writeLE24(op, cBlockHeader24);
+ cSize += ZSTD_blockHeaderSize;
+ }
+
+ ip += blockSize;
+ assert(remaining >= blockSize);
+ remaining -= blockSize;
+ op += cSize;
+ assert(dstCapacity >= cSize);
+ dstCapacity -= cSize;
+ DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
+ (unsigned)cSize);
+ } }
+
+ if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
+ return op-ostart;
+}
+
+
+static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
+ ZSTD_CCtx_params params, U64 pledgedSrcSize, U32 dictID)
+{ BYTE* const op = (BYTE*)dst;
+ U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */
+ U32 const dictIDSizeCode = params.fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */
+ U32 const checksumFlag = params.fParams.checksumFlag>0;
+ U32 const windowSize = (U32)1 << params.cParams.windowLog;
+ U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
+ BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
+ U32 const fcsCode = params.fParams.contentSizeFlag ?
+ (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */
+ BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
+ size_t pos=0;
+
+ assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
+ RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall);
+ DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
+ !params.fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
+
+ if (params.format == ZSTD_f_zstd1) {
+ MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
+ pos = 4;
+ }
+ op[pos++] = frameHeaderDescriptionByte;
+ if (!singleSegment) op[pos++] = windowLogByte;
+ switch(dictIDSizeCode)
+ {
+ default: assert(0); /* impossible */
+ case 0 : break;
+ case 1 : op[pos] = (BYTE)(dictID); pos++; break;
+ case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
+ case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
+ }
+ switch(fcsCode)
+ {
+ default: assert(0); /* impossible */
+ case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
+ case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
+ case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
+ case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
+ }
+ return pos;
+}
+
+/* ZSTD_writeLastEmptyBlock() :
+ * output an empty Block with end-of-frame mark to complete a frame
+ * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
+ * or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
+ */
+size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
+{
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall);
+ { U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1); /* 0 size */
+ MEM_writeLE24(dst, cBlockHeader24);
+ return ZSTD_blockHeaderSize;
+ }
+}
+
+size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
+{
+ RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong);
+ RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm,
+ parameter_unsupported);
+ cctx->externSeqStore.seq = seq;
+ cctx->externSeqStore.size = nbSeq;
+ cctx->externSeqStore.capacity = nbSeq;
+ cctx->externSeqStore.pos = 0;
+ return 0;
+}
+
+
+static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ U32 frame, U32 lastFrameChunk)
+{
+ ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
+ size_t fhSize = 0;
+
+ DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
+ cctx->stage, (unsigned)srcSize);
+ RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
+ "missing init (ZSTD_compressBegin)");
+
+ if (frame && (cctx->stage==ZSTDcs_init)) {
+ fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams,
+ cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
+ FORWARD_IF_ERROR(fhSize);
+ dstCapacity -= fhSize;
+ dst = (char*)dst + fhSize;
+ cctx->stage = ZSTDcs_ongoing;
+ }
+
+ if (!srcSize) return fhSize; /* do not generate an empty block if no input */
+
+ if (!ZSTD_window_update(&ms->window, src, srcSize)) {
+ ms->nextToUpdate = ms->window.dictLimit;
+ }
+ if (cctx->appliedParams.ldmParams.enableLdm) {
+ ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
+ }
+
+ if (!frame) {
+ /* overflow check and correction for block mode */
+ if (ZSTD_window_needOverflowCorrection(ms->window, (const char*)src + srcSize)) {
+ U32 const cycleLog = ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy);
+ U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, 1 << cctx->appliedParams.cParams.windowLog, src);
+ ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
+ ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
+ ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
+ ZSTD_reduceIndex(cctx, correction);
+ if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
+ else ms->nextToUpdate -= correction;
+ ms->loadedDictEnd = 0;
+ ms->dictMatchState = NULL;
+ }
+ }
+
+ DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
+ { size_t const cSize = frame ?
+ ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
+ ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
+ FORWARD_IF_ERROR(cSize);
+ cctx->consumedSrcSize += srcSize;
+ cctx->producedCSize += (cSize + fhSize);
+ assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
+ if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
+ RETURN_ERROR_IF(
+ cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
+ srcSize_wrong,
+ "error : pledgedSrcSize = %u, while realSrcSize >= %u",
+ (unsigned)cctx->pledgedSrcSizePlusOne-1,
+ (unsigned)cctx->consumedSrcSize);
+ }
+ return cSize + fhSize;
+ }
+}
+
+size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
+ return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
+}
+
+
+size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
+{
+ ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
+ assert(!ZSTD_checkCParams(cParams));
+ return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
+}
+
+size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
+ RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong);
+
+ return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
+}
+
+/*! ZSTD_loadDictionaryContent() :
+ * @return : 0, or an error code
+ */
+static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
+ ZSTD_CCtx_params const* params,
+ const void* src, size_t srcSize,
+ ZSTD_dictTableLoadMethod_e dtlm)
+{
+ const BYTE* const ip = (const BYTE*) src;
+ const BYTE* const iend = ip + srcSize;
+
+ ZSTD_window_update(&ms->window, src, srcSize);
+ ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
+
+ /* Assert that we the ms params match the params we're being given */
+ ZSTD_assertEqualCParams(params->cParams, ms->cParams);
+
+ if (srcSize <= HASH_READ_SIZE) return 0;
+
+ switch(params->cParams.strategy)
+ {
+ case ZSTD_fast:
+ ZSTD_fillHashTable(ms, iend, dtlm);
+ break;
+ case ZSTD_dfast:
+ ZSTD_fillDoubleHashTable(ms, iend, dtlm);
+ break;
+
+ case ZSTD_greedy:
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
+ if (srcSize >= HASH_READ_SIZE)
+ ZSTD_insertAndFindFirstIndex(ms, iend-HASH_READ_SIZE);
+ break;
+
+ case ZSTD_btlazy2: /* we want the dictionary table fully sorted */
+ case ZSTD_btopt:
+ case ZSTD_btultra:
+ case ZSTD_btultra2:
+ if (srcSize >= HASH_READ_SIZE)
+ ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend);
+ break;
+
+ default:
+ assert(0); /* not possible : not a valid strategy id */
+ }
+
+ ms->nextToUpdate = (U32)(iend - ms->window.base);
+ return 0;
+}
+
+
+/* Dictionaries that assign zero probability to symbols that show up causes problems
+ when FSE encoding. Refuse dictionaries that assign zero probability to symbols
+ that we may encounter during compression.
+ NOTE: This behavior is not standard and could be improved in the future. */
+static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) {
+ U32 s;
+ RETURN_ERROR_IF(dictMaxSymbolValue < maxSymbolValue, dictionary_corrupted);
+ for (s = 0; s <= maxSymbolValue; ++s) {
+ RETURN_ERROR_IF(normalizedCounter[s] == 0, dictionary_corrupted);
+ }
+ return 0;
+}
+
+
+/* Dictionary format :
+ * See :
+ * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
+ */
+/*! ZSTD_loadZstdDictionary() :
+ * @return : dictID, or an error code
+ * assumptions : magic number supposed already checked
+ * dictSize supposed > 8
+ */
+static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
+ ZSTD_matchState_t* ms,
+ ZSTD_CCtx_params const* params,
+ const void* dict, size_t dictSize,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ void* workspace)
+{
+ const BYTE* dictPtr = (const BYTE*)dict;
+ const BYTE* const dictEnd = dictPtr + dictSize;
+ short offcodeNCount[MaxOff+1];
+ unsigned offcodeMaxValue = MaxOff;
+ size_t dictID;
+
+ ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
+ assert(dictSize > 8);
+ assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
+
+ dictPtr += 4; /* skip magic number */
+ dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr);
+ dictPtr += 4;
+
+ { unsigned maxSymbolValue = 255;
+ size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr);
+ RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted);
+ RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted);
+ dictPtr += hufHeaderSize;
+ }
+
+ { unsigned offcodeLog;
+ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
+ RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted);
+ RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted);
+ /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
+ /* fill all offset symbols to avoid garbage at end of table */
+ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+ bs->entropy.fse.offcodeCTable,
+ offcodeNCount, MaxOff, offcodeLog,
+ workspace, HUF_WORKSPACE_SIZE)),
+ dictionary_corrupted);
+ dictPtr += offcodeHeaderSize;
+ }
+
+ { short matchlengthNCount[MaxML+1];
+ unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
+ RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted);
+ RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted);
+ /* Every match length code must have non-zero probability */
+ FORWARD_IF_ERROR( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
+ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+ bs->entropy.fse.matchlengthCTable,
+ matchlengthNCount, matchlengthMaxValue, matchlengthLog,
+ workspace, HUF_WORKSPACE_SIZE)),
+ dictionary_corrupted);
+ dictPtr += matchlengthHeaderSize;
+ }
+
+ { short litlengthNCount[MaxLL+1];
+ unsigned litlengthMaxValue = MaxLL, litlengthLog;
+ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
+ RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted);
+ RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted);
+ /* Every literal length code must have non-zero probability */
+ FORWARD_IF_ERROR( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
+ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+ bs->entropy.fse.litlengthCTable,
+ litlengthNCount, litlengthMaxValue, litlengthLog,
+ workspace, HUF_WORKSPACE_SIZE)),
+ dictionary_corrupted);
+ dictPtr += litlengthHeaderSize;
+ }
+
+ RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted);
+ bs->rep[0] = MEM_readLE32(dictPtr+0);
+ bs->rep[1] = MEM_readLE32(dictPtr+4);
+ bs->rep[2] = MEM_readLE32(dictPtr+8);
+ dictPtr += 12;
+
+ { size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
+ U32 offcodeMax = MaxOff;
+ if (dictContentSize <= ((U32)-1) - 128 KB) {
+ U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
+ offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
+ }
+ /* All offset values <= dictContentSize + 128 KB must be representable */
+ FORWARD_IF_ERROR(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
+ /* All repCodes must be <= dictContentSize and != 0*/
+ { U32 u;
+ for (u=0; u<3; u++) {
+ RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted);
+ RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted);
+ } }
+
+ bs->entropy.huf.repeatMode = HUF_repeat_valid;
+ bs->entropy.fse.offcode_repeatMode = FSE_repeat_valid;
+ bs->entropy.fse.matchlength_repeatMode = FSE_repeat_valid;
+ bs->entropy.fse.litlength_repeatMode = FSE_repeat_valid;
+ FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm));
+ return dictID;
+ }
+}
+
+/** ZSTD_compress_insertDictionary() :
+* @return : dictID, or an error code */
+static size_t
+ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
+ ZSTD_matchState_t* ms,
+ const ZSTD_CCtx_params* params,
+ const void* dict, size_t dictSize,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ void* workspace)
+{
+ DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
+ if ((dict==NULL) || (dictSize<=8)) return 0;
+
+ ZSTD_reset_compressedBlockState(bs);
+
+ /* dict restricted modes */
+ if (dictContentType == ZSTD_dct_rawContent)
+ return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
+
+ if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
+ if (dictContentType == ZSTD_dct_auto) {
+ DEBUGLOG(4, "raw content dictionary detected");
+ return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
+ }
+ RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong);
+ assert(0); /* impossible */
+ }
+
+ /* dict as full zstd dictionary */
+ return ZSTD_loadZstdDictionary(bs, ms, params, dict, dictSize, dtlm, workspace);
+}
+
+/*! ZSTD_compressBegin_internal() :
+ * @return : 0, or an error code */
+static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params, U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+ DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params.cParams.windowLog);
+ /* params are supposed to be fully validated at this point */
+ assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+ assert(!((dict) && (cdict))); /* either dict or cdict, not both */
+
+ if (cdict && cdict->dictContentSize>0) {
+ return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
+ }
+
+ FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+ ZSTDcrp_continue, zbuff) );
+ {
+ size_t const dictID = ZSTD_compress_insertDictionary(
+ cctx->blockState.prevCBlock, &cctx->blockState.matchState,
+ &params, dict, dictSize, dictContentType, dtlm, cctx->entropyWorkspace);
+ FORWARD_IF_ERROR(dictID);
+ assert(dictID <= (size_t)(U32)-1);
+ cctx->dictID = (U32)dictID;
+ }
+ return 0;
+}
+
+size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params,
+ unsigned long long pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params.cParams.windowLog);
+ /* compression parameters verification and optimization */
+ FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
+ return ZSTD_compressBegin_internal(cctx,
+ dict, dictSize, dictContentType, dtlm,
+ cdict,
+ params, pledgedSrcSize,
+ ZSTDb_not_buffered);
+}
+
+/*! ZSTD_compressBegin_advanced() :
+* @return : 0, or an error code */
+size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_parameters params, unsigned long long pledgedSrcSize)
+{
+ ZSTD_CCtx_params const cctxParams =
+ ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
+ return ZSTD_compressBegin_advanced_internal(cctx,
+ dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
+ NULL /*cdict*/,
+ cctxParams, pledgedSrcSize);
+}
+
+size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
+{
+ ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
+ ZSTD_CCtx_params const cctxParams =
+ ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
+ DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
+ return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
+ cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
+}
+
+size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
+{
+ return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
+}
+
+
+/*! ZSTD_writeEpilogue() :
+* Ends a frame.
+* @return : nb of bytes written into dst (or an error code) */
+static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
+{
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* op = ostart;
+ size_t fhSize = 0;
+
+ DEBUGLOG(4, "ZSTD_writeEpilogue");
+ RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
+
+ /* special case : empty frame */
+ if (cctx->stage == ZSTDcs_init) {
+ fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, 0, 0);
+ FORWARD_IF_ERROR(fhSize);
+ dstCapacity -= fhSize;
+ op += fhSize;
+ cctx->stage = ZSTDcs_ongoing;
+ }
+
+ if (cctx->stage != ZSTDcs_ending) {
+ /* write one last empty block, make it the "last" block */
+ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
+ RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall);
+ MEM_writeLE32(op, cBlockHeader24);
+ op += ZSTD_blockHeaderSize;
+ dstCapacity -= ZSTD_blockHeaderSize;
+ }
+
+ if (cctx->appliedParams.fParams.checksumFlag) {
+ U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
+ RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall);
+ DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
+ MEM_writeLE32(op, checksum);
+ op += 4;
+ }
+
+ cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
+ return op-ostart;
+}
+
+size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ size_t endResult;
+ size_t const cSize = ZSTD_compressContinue_internal(cctx,
+ dst, dstCapacity, src, srcSize,
+ 1 /* frame mode */, 1 /* last chunk */);
+ FORWARD_IF_ERROR(cSize);
+ endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
+ FORWARD_IF_ERROR(endResult);
+ assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
+ if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
+ DEBUGLOG(4, "end of frame : controlling src size");
+ RETURN_ERROR_IF(
+ cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
+ srcSize_wrong,
+ "error : pledgedSrcSize = %u, while realSrcSize = %u",
+ (unsigned)cctx->pledgedSrcSizePlusOne-1,
+ (unsigned)cctx->consumedSrcSize);
+ }
+ return cSize + endResult;
+}
+
+
+static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ ZSTD_parameters params)
+{
+ ZSTD_CCtx_params const cctxParams =
+ ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
+ DEBUGLOG(4, "ZSTD_compress_internal");
+ return ZSTD_compress_advanced_internal(cctx,
+ dst, dstCapacity,
+ src, srcSize,
+ dict, dictSize,
+ cctxParams);
+}
+
+size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ ZSTD_parameters params)
+{
+ DEBUGLOG(4, "ZSTD_compress_advanced");
+ FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams));
+ return ZSTD_compress_internal(cctx,
+ dst, dstCapacity,
+ src, srcSize,
+ dict, dictSize,
+ params);
+}
+
+/* Internal */
+size_t ZSTD_compress_advanced_internal(
+ ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ ZSTD_CCtx_params params)
+{
+ DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
+ FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
+ dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
+ params, srcSize, ZSTDb_not_buffered) );
+ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+}
+
+size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize,
+ int compressionLevel)
+{
+ ZSTD_parameters const params = ZSTD_getParams(compressionLevel, srcSize + (!srcSize), dict ? dictSize : 0);
+ ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
+ assert(params.fParams.contentSizeFlag == 1);
+ return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, cctxParams);
+}
+
+size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel)
+{
+ DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
+ assert(cctx != NULL);
+ return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
+}
+
+size_t ZSTD_compress(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel)
+{
+ size_t result;
+ ZSTD_CCtx ctxBody;
+ ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem);
+ result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
+ ZSTD_freeCCtxContent(&ctxBody); /* can't free ctxBody itself, as it's on stack; free only heap content */
+ return result;
+}
+
+
+/* ===== Dictionary API ===== */
+
+/*! ZSTD_estimateCDictSize_advanced() :
+ * Estimate amount of memory that will be needed to create a dictionary with following arguments */
+size_t ZSTD_estimateCDictSize_advanced(
+ size_t dictSize, ZSTD_compressionParameters cParams,
+ ZSTD_dictLoadMethod_e dictLoadMethod)
+{
+ DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
+ return sizeof(ZSTD_CDict) + HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
+ + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
+}
+
+size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
+{
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
+ return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
+}
+
+size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
+{
+ if (cdict==NULL) return 0; /* support sizeof on NULL */
+ DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
+ return cdict->workspaceSize + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict);
+}
+
+static size_t ZSTD_initCDict_internal(
+ ZSTD_CDict* cdict,
+ const void* dictBuffer, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams)
+{
+ DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
+ assert(!ZSTD_checkCParams(cParams));
+ cdict->matchState.cParams = cParams;
+ if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
+ cdict->dictBuffer = NULL;
+ cdict->dictContent = dictBuffer;
+ } else {
+ void* const internalBuffer = ZSTD_malloc(dictSize, cdict->customMem);
+ cdict->dictBuffer = internalBuffer;
+ cdict->dictContent = internalBuffer;
+ RETURN_ERROR_IF(!internalBuffer, memory_allocation);
+ memcpy(internalBuffer, dictBuffer, dictSize);
+ }
+ cdict->dictContentSize = dictSize;
+
+ /* Reset the state to no dictionary */
+ ZSTD_reset_compressedBlockState(&cdict->cBlockState);
+ { void* const end = ZSTD_reset_matchState(
+ &cdict->matchState,
+ (U32*)cdict->workspace + HUF_WORKSPACE_SIZE_U32,
+ &cParams, ZSTDcrp_continue, /* forCCtx */ 0);
+ assert(end == (char*)cdict->workspace + cdict->workspaceSize);
+ (void)end;
+ }
+ /* (Maybe) load the dictionary
+ * Skips loading the dictionary if it is <= 8 bytes.
+ */
+ { ZSTD_CCtx_params params;
+ memset(&params, 0, sizeof(params));
+ params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
+ params.fParams.contentSizeFlag = 1;
+ params.cParams = cParams;
+ { size_t const dictID = ZSTD_compress_insertDictionary(
+ &cdict->cBlockState, &cdict->matchState, &params,
+ cdict->dictContent, cdict->dictContentSize,
+ dictContentType, ZSTD_dtlm_full, cdict->workspace);
+ FORWARD_IF_ERROR(dictID);
+ assert(dictID <= (size_t)(U32)-1);
+ cdict->dictID = (U32)dictID;
+ }
+ }
+
+ return 0;
+}
+
+ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
+{
+ DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (unsigned)dictContentType);
+ if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+
+ { ZSTD_CDict* const cdict = (ZSTD_CDict*)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
+ size_t const workspaceSize = HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
+ void* const workspace = ZSTD_malloc(workspaceSize, customMem);
+
+ if (!cdict || !workspace) {
+ ZSTD_free(cdict, customMem);
+ ZSTD_free(workspace, customMem);
+ return NULL;
+ }
+ cdict->customMem = customMem;
+ cdict->workspace = workspace;
+ cdict->workspaceSize = workspaceSize;
+ if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
+ dictBuffer, dictSize,
+ dictLoadMethod, dictContentType,
+ cParams) )) {
+ ZSTD_freeCDict(cdict);
+ return NULL;
+ }
+
+ return cdict;
+ }
+}
+
+ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
+{
+ ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
+ return ZSTD_createCDict_advanced(dict, dictSize,
+ ZSTD_dlm_byCopy, ZSTD_dct_auto,
+ cParams, ZSTD_defaultCMem);
+}
+
+ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
+{
+ ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
+ return ZSTD_createCDict_advanced(dict, dictSize,
+ ZSTD_dlm_byRef, ZSTD_dct_auto,
+ cParams, ZSTD_defaultCMem);
+}
+
+size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
+{
+ if (cdict==NULL) return 0; /* support free on NULL */
+ { ZSTD_customMem const cMem = cdict->customMem;
+ ZSTD_free(cdict->workspace, cMem);
+ ZSTD_free(cdict->dictBuffer, cMem);
+ ZSTD_free(cdict, cMem);
+ return 0;
+ }
+}
+
+/*! ZSTD_initStaticCDict_advanced() :
+ * Generate a digested dictionary in provided memory area.
+ * workspace: The memory area to emplace the dictionary into.
+ * Provided pointer must 8-bytes aligned.
+ * It must outlive dictionary usage.
+ * workspaceSize: Use ZSTD_estimateCDictSize()
+ * to determine how large workspace must be.
+ * cParams : use ZSTD_getCParams() to transform a compression level
+ * into its relevants cParams.
+ * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
+ * Note : there is no corresponding "free" function.
+ * Since workspace was allocated externally, it must be freed externally.
+ */
+const ZSTD_CDict* ZSTD_initStaticCDict(
+ void* workspace, size_t workspaceSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams)
+{
+ size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
+ size_t const neededSize = sizeof(ZSTD_CDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize)
+ + HUF_WORKSPACE_SIZE + matchStateSize;
+ ZSTD_CDict* const cdict = (ZSTD_CDict*) workspace;
+ void* ptr;
+ if ((size_t)workspace & 7) return NULL; /* 8-aligned */
+ DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
+ (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
+ if (workspaceSize < neededSize) return NULL;
+
+ if (dictLoadMethod == ZSTD_dlm_byCopy) {
+ memcpy(cdict+1, dict, dictSize);
+ dict = cdict+1;
+ ptr = (char*)workspace + sizeof(ZSTD_CDict) + dictSize;
+ } else {
+ ptr = cdict+1;
+ }
+ cdict->workspace = ptr;
+ cdict->workspaceSize = HUF_WORKSPACE_SIZE + matchStateSize;
+
+ if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
+ dict, dictSize,
+ ZSTD_dlm_byRef, dictContentType,
+ cParams) ))
+ return NULL;
+
+ return cdict;
+}
+
+ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
+{
+ assert(cdict != NULL);
+ return cdict->matchState.cParams;
+}
+
+/* ZSTD_compressBegin_usingCDict_advanced() :
+ * cdict must be != NULL */
+size_t ZSTD_compressBegin_usingCDict_advanced(
+ ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
+ ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
+ RETURN_ERROR_IF(cdict==NULL, dictionary_wrong);
+ { ZSTD_CCtx_params params = cctx->requestedParams;
+ params.cParams = ZSTD_getCParamsFromCDict(cdict);
+ /* Increase window log to fit the entire dictionary and source if the
+ * source size is known. Limit the increase to 19, which is the
+ * window log for compression level 1 with the largest source size.
+ */
+ if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+ U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
+ U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
+ params.cParams.windowLog = MAX(params.cParams.windowLog, limitedSrcLog);
+ }
+ params.fParams = fParams;
+ return ZSTD_compressBegin_internal(cctx,
+ NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
+ cdict,
+ params, pledgedSrcSize,
+ ZSTDb_not_buffered);
+ }
+}
+
+/* ZSTD_compressBegin_usingCDict() :
+ * pledgedSrcSize=0 means "unknown"
+ * if pledgedSrcSize>0, it will enable contentSizeFlag */
+size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
+{
+ ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
+ DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
+ return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
+}
+
+size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
+{
+ FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize)); /* will check if cdict != NULL */
+ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+}
+
+/*! ZSTD_compress_usingCDict() :
+ * Compression using a digested Dictionary.
+ * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
+ * Note that compression parameters are decided at CDict creation time
+ * while frame parameters are hardcoded */
+size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict)
+{
+ ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
+ return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
+}
+
+
+
+/* ******************************************************************
+* Streaming
+********************************************************************/
+
+ZSTD_CStream* ZSTD_createCStream(void)
+{
+ DEBUGLOG(3, "ZSTD_createCStream");
+ return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
+}
+
+ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
+{
+ return ZSTD_initStaticCCtx(workspace, workspaceSize);
+}
+
+ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
+{ /* CStream and CCtx are now same object */
+ return ZSTD_createCCtx_advanced(customMem);
+}
+
+size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
+{
+ return ZSTD_freeCCtx(zcs); /* same object */
+}
+
+
+
+/*====== Initialization ======*/
+
+size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; }
+
+size_t ZSTD_CStreamOutSize(void)
+{
+ return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
+}
+
+static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx,
+ const void* const dict, size_t const dictSize, ZSTD_dictContentType_e const dictContentType,
+ const ZSTD_CDict* const cdict,
+ ZSTD_CCtx_params params, unsigned long long const pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTD_resetCStream_internal");
+ /* Finalize the compression parameters */
+ params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, dictSize);
+ /* params are supposed to be fully validated at this point */
+ assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+ assert(!((dict) && (cdict))); /* either dict or cdict, not both */
+
+ FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
+ dict, dictSize, dictContentType, ZSTD_dtlm_fast,
+ cdict,
+ params, pledgedSrcSize,
+ ZSTDb_buffered) );
+
+ cctx->inToCompress = 0;
+ cctx->inBuffPos = 0;
+ cctx->inBuffTarget = cctx->blockSize
+ + (cctx->blockSize == pledgedSrcSize); /* for small input: avoid automatic flush on reaching end of block, since it would require to add a 3-bytes null block to end frame */
+ cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
+ cctx->streamStage = zcss_load;
+ cctx->frameEnded = 0;
+ return 0; /* ready to go */
+}
+
+/* ZSTD_resetCStream():
+ * pledgedSrcSize == 0 means "unknown" */
+size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
+{
+ /* temporary : 0 interpreted as "unknown" during transition period.
+ * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
+ * 0 will be interpreted as "empty" in the future.
+ */
+ U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
+ DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
+ return 0;
+}
+
+/*! ZSTD_initCStream_internal() :
+ * Note : for lib/compress only. Used by zstdmt_compress.c.
+ * Assumption 1 : params are valid
+ * Assumption 2 : either dict, or cdict, is defined, not both */
+size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTD_initCStream_internal");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
+ assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+ zcs->requestedParams = params;
+ assert(!((dict) && (cdict))); /* either dict or cdict, not both */
+ if (dict) {
+ FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
+ } else {
+ /* Dictionary is cleared if !cdict */
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
+ }
+ return 0;
+}
+
+/* ZSTD_initCStream_usingCDict_advanced() :
+ * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
+size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
+ const ZSTD_CDict* cdict,
+ ZSTD_frameParameters fParams,
+ unsigned long long pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
+ zcs->requestedParams.fParams = fParams;
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
+ return 0;
+}
+
+/* note : cdict must outlive compression session */
+size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
+{
+ DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
+ return 0;
+}
+
+
+/* ZSTD_initCStream_advanced() :
+ * pledgedSrcSize must be exact.
+ * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
+ * dict is loaded with default parameters ZSTD_dm_auto and ZSTD_dlm_byCopy. */
+size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize,
+ ZSTD_parameters params, unsigned long long pss)
+{
+ /* for compatibility with older programs relying on this behavior.
+ * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
+ * This line will be removed in the future.
+ */
+ U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
+ DEBUGLOG(4, "ZSTD_initCStream_advanced");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
+ FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
+ zcs->requestedParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
+ FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
+ return 0;
+}
+
+size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
+{
+ DEBUGLOG(4, "ZSTD_initCStream_usingDict");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
+ return 0;
+}
+
+size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
+{
+ /* temporary : 0 interpreted as "unknown" during transition period.
+ * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
+ * 0 will be interpreted as "empty" in the future.
+ */
+ U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
+ DEBUGLOG(4, "ZSTD_initCStream_srcSize");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
+ return 0;
+}
+
+size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
+{
+ DEBUGLOG(4, "ZSTD_initCStream");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
+ return 0;
+}
+
+/*====== Compression ======*/
+
+static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
+{
+ size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
+ if (hintInSize==0) hintInSize = cctx->blockSize;
+ return hintInSize;
+}
+
+static size_t ZSTD_limitCopy(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ size_t const length = MIN(dstCapacity, srcSize);
+ if (length) memcpy(dst, src, length);
+ return length;
+}
+
+/** ZSTD_compressStream_generic():
+ * internal function for all *compressStream*() variants
+ * non-static, because can be called from zstdmt_compress.c
+ * @return : hint size for next input */
+static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective const flushMode)
+{
+ const char* const istart = (const char*)input->src;
+ const char* const iend = istart + input->size;
+ const char* ip = istart + input->pos;
+ char* const ostart = (char*)output->dst;
+ char* const oend = ostart + output->size;
+ char* op = ostart + output->pos;
+ U32 someMoreWork = 1;
+
+ /* check expectations */
+ DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
+ assert(zcs->inBuff != NULL);
+ assert(zcs->inBuffSize > 0);
+ assert(zcs->outBuff != NULL);
+ assert(zcs->outBuffSize > 0);
+ assert(output->pos <= output->size);
+ assert(input->pos <= input->size);
+
+ while (someMoreWork) {
+ switch(zcs->streamStage)
+ {
+ case zcss_init:
+ RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
+
+ case zcss_load:
+ if ( (flushMode == ZSTD_e_end)
+ && ((size_t)(oend-op) >= ZSTD_compressBound(iend-ip)) /* enough dstCapacity */
+ && (zcs->inBuffPos == 0) ) {
+ /* shortcut to compression pass directly into output buffer */
+ size_t const cSize = ZSTD_compressEnd(zcs,
+ op, oend-op, ip, iend-ip);
+ DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
+ FORWARD_IF_ERROR(cSize);
+ ip = iend;
+ op += cSize;
+ zcs->frameEnded = 1;
+ ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ someMoreWork = 0; break;
+ }
+ /* complete loading into inBuffer */
+ { size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
+ size_t const loaded = ZSTD_limitCopy(
+ zcs->inBuff + zcs->inBuffPos, toLoad,
+ ip, iend-ip);
+ zcs->inBuffPos += loaded;
+ ip += loaded;
+ if ( (flushMode == ZSTD_e_continue)
+ && (zcs->inBuffPos < zcs->inBuffTarget) ) {
+ /* not enough input to fill full block : stop here */
+ someMoreWork = 0; break;
+ }
+ if ( (flushMode == ZSTD_e_flush)
+ && (zcs->inBuffPos == zcs->inToCompress) ) {
+ /* empty */
+ someMoreWork = 0; break;
+ }
+ }
+ /* compress current block (note : this stage cannot be stopped in the middle) */
+ DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
+ { void* cDst;
+ size_t cSize;
+ size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
+ size_t oSize = oend-op;
+ unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
+ if (oSize >= ZSTD_compressBound(iSize))
+ cDst = op; /* compress into output buffer, to skip flush stage */
+ else
+ cDst = zcs->outBuff, oSize = zcs->outBuffSize;
+ cSize = lastBlock ?
+ ZSTD_compressEnd(zcs, cDst, oSize,
+ zcs->inBuff + zcs->inToCompress, iSize) :
+ ZSTD_compressContinue(zcs, cDst, oSize,
+ zcs->inBuff + zcs->inToCompress, iSize);
+ FORWARD_IF_ERROR(cSize);
+ zcs->frameEnded = lastBlock;
+ /* prepare next block */
+ zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
+ if (zcs->inBuffTarget > zcs->inBuffSize)
+ zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
+ DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
+ (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
+ if (!lastBlock)
+ assert(zcs->inBuffTarget <= zcs->inBuffSize);
+ zcs->inToCompress = zcs->inBuffPos;
+ if (cDst == op) { /* no need to flush */
+ op += cSize;
+ if (zcs->frameEnded) {
+ DEBUGLOG(5, "Frame completed directly in outBuffer");
+ someMoreWork = 0;
+ ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ }
+ break;
+ }
+ zcs->outBuffContentSize = cSize;
+ zcs->outBuffFlushedSize = 0;
+ zcs->streamStage = zcss_flush; /* pass-through to flush stage */
+ }
+ /* fall-through */
+ case zcss_flush:
+ DEBUGLOG(5, "flush stage");
+ { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
+ size_t const flushed = ZSTD_limitCopy(op, oend-op,
+ zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
+ DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
+ (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
+ op += flushed;
+ zcs->outBuffFlushedSize += flushed;
+ if (toFlush!=flushed) {
+ /* flush not fully completed, presumably because dst is too small */
+ assert(op==oend);
+ someMoreWork = 0;
+ break;
+ }
+ zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
+ if (zcs->frameEnded) {
+ DEBUGLOG(5, "Frame completed on flush");
+ someMoreWork = 0;
+ ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ break;
+ }
+ zcs->streamStage = zcss_load;
+ break;
+ }
+
+ default: /* impossible */
+ assert(0);
+ }
+ }
+
+ input->pos = ip - istart;
+ output->pos = op - ostart;
+ if (zcs->frameEnded) return 0;
+ return ZSTD_nextInputSizeHint(zcs);
+}
+
+static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
+{
+#ifdef ZSTD_MULTITHREAD
+ if (cctx->appliedParams.nbWorkers >= 1) {
+ assert(cctx->mtctx != NULL);
+ return ZSTDMT_nextInputSizeHint(cctx->mtctx);
+ }
+#endif
+ return ZSTD_nextInputSizeHint(cctx);
+
+}
+
+size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+ FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) );
+ return ZSTD_nextInputSizeHint_MTorST(zcs);
+}
+
+
+size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective endOp)
+{
+ DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
+ /* check conditions */
+ RETURN_ERROR_IF(output->pos > output->size, GENERIC);
+ RETURN_ERROR_IF(input->pos > input->size, GENERIC);
+ assert(cctx!=NULL);
+
+ /* transparent initialization stage */
+ if (cctx->streamStage == zcss_init) {
+ ZSTD_CCtx_params params = cctx->requestedParams;
+ ZSTD_prefixDict const prefixDict = cctx->prefixDict;
+ FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) ); /* Init the local dict if present. */
+ memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */
+ assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */
+ DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
+ if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = input->size + 1; /* auto-fix pledgedSrcSize */
+ params.cParams = ZSTD_getCParamsFromCCtxParams(
+ &cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/);
+
+
+#ifdef ZSTD_MULTITHREAD
+ if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) {
+ params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */
+ }
+ if (params.nbWorkers > 0) {
+ /* mt context creation */
+ if (cctx->mtctx == NULL) {
+ DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u",
+ params.nbWorkers);
+ cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbWorkers, cctx->customMem);
+ RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation);
+ }
+ /* mt compression */
+ DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers);
+ FORWARD_IF_ERROR( ZSTDMT_initCStream_internal(
+ cctx->mtctx,
+ prefixDict.dict, prefixDict.dictSize, ZSTD_dct_rawContent,
+ cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
+ cctx->streamStage = zcss_load;
+ cctx->appliedParams.nbWorkers = params.nbWorkers;
+ } else
+#endif
+ { FORWARD_IF_ERROR( ZSTD_resetCStream_internal(cctx,
+ prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,
+ cctx->cdict,
+ params, cctx->pledgedSrcSizePlusOne-1) );
+ assert(cctx->streamStage == zcss_load);
+ assert(cctx->appliedParams.nbWorkers == 0);
+ } }
+ /* end of transparent initialization stage */
+
+ /* compression stage */
+#ifdef ZSTD_MULTITHREAD
+ if (cctx->appliedParams.nbWorkers > 0) {
+ int const forceMaxProgress = (endOp == ZSTD_e_flush || endOp == ZSTD_e_end);
+ size_t flushMin;
+ assert(forceMaxProgress || endOp == ZSTD_e_continue /* Protection for a new flush type */);
+ if (cctx->cParamsChanged) {
+ ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams);
+ cctx->cParamsChanged = 0;
+ }
+ do {
+ flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
+ if ( ZSTD_isError(flushMin)
+ || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
+ ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
+ }
+ FORWARD_IF_ERROR(flushMin);
+ } while (forceMaxProgress && flushMin != 0 && output->pos < output->size);
+ DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic");
+ /* Either we don't require maximum forward progress, we've finished the
+ * flush, or we are out of output space.
+ */
+ assert(!forceMaxProgress || flushMin == 0 || output->pos == output->size);
+ return flushMin;
+ }
+#endif
+ FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) );
+ DEBUGLOG(5, "completed ZSTD_compressStream2");
+ return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
+}
+
+size_t ZSTD_compressStream2_simpleArgs (
+ ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos,
+ ZSTD_EndDirective endOp)
+{
+ ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
+ ZSTD_inBuffer input = { src, srcSize, *srcPos };
+ /* ZSTD_compressStream2() will check validity of dstPos and srcPos */
+ size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
+ *dstPos = output.pos;
+ *srcPos = input.pos;
+ return cErr;
+}
+
+size_t ZSTD_compress2(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
+ { size_t oPos = 0;
+ size_t iPos = 0;
+ size_t const result = ZSTD_compressStream2_simpleArgs(cctx,
+ dst, dstCapacity, &oPos,
+ src, srcSize, &iPos,
+ ZSTD_e_end);
+ FORWARD_IF_ERROR(result);
+ if (result != 0) { /* compression not completed, due to lack of output space */
+ assert(oPos == dstCapacity);
+ RETURN_ERROR(dstSize_tooSmall);
+ }
+ assert(iPos == srcSize); /* all input is expected consumed */
+ return oPos;
+ }
+}
+
+/*====== Finalize ======*/
+
+/*! ZSTD_flushStream() :
+ * @return : amount of data remaining to flush */
+size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
+{
+ ZSTD_inBuffer input = { NULL, 0, 0 };
+ return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
+}
+
+
+size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
+{
+ ZSTD_inBuffer input = { NULL, 0, 0 };
+ size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
+ FORWARD_IF_ERROR( remainingToFlush );
+ if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */
+ /* single thread mode : attempt to calculate remaining to flush more precisely */
+ { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
+ size_t const checksumSize = zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4;
+ size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
+ DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
+ return toFlush;
+ }
+}
+
+
+/*-===== Pre-defined compression levels =====-*/
+
+#define ZSTD_MAX_CLEVEL 22
+int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
+int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
+
+static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
+{ /* "default" - for any srcSize > 256 KB */
+ /* W, C, H, S, L, TL, strat */
+ { 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */
+ { 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */
+ { 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */
+ { 21, 16, 17, 1, 5, 1, ZSTD_dfast }, /* level 3 */
+ { 21, 18, 18, 1, 5, 1, ZSTD_dfast }, /* level 4 */
+ { 21, 18, 19, 2, 5, 2, ZSTD_greedy }, /* level 5 */
+ { 21, 19, 19, 3, 5, 4, ZSTD_greedy }, /* level 6 */
+ { 21, 19, 19, 3, 5, 8, ZSTD_lazy }, /* level 7 */
+ { 21, 19, 19, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */
+ { 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */
+ { 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */
+ { 22, 21, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */
+ { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */
+ { 22, 21, 22, 5, 5, 32, ZSTD_btlazy2 }, /* level 13 */
+ { 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */
+ { 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */
+ { 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */
+ { 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */
+ { 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */
+ { 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */
+ { 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */
+ { 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */
+ { 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */
+},
+{ /* for srcSize <= 256 KB */
+ /* W, C, H, S, L, T, strat */
+ { 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
+ { 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */
+ { 18, 14, 14, 1, 5, 1, ZSTD_dfast }, /* level 2 */
+ { 18, 16, 16, 1, 4, 1, ZSTD_dfast }, /* level 3 */
+ { 18, 16, 17, 2, 5, 2, ZSTD_greedy }, /* level 4.*/
+ { 18, 18, 18, 3, 5, 2, ZSTD_greedy }, /* level 5.*/
+ { 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/
+ { 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */
+ { 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
+ { 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
+ { 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
+ { 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/
+ { 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/
+ { 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */
+ { 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
+ { 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/
+ { 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/
+ { 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/
+ { 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/
+ { 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
+ { 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/
+ { 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/
+ { 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/
+},
+{ /* for srcSize <= 128 KB */
+ /* W, C, H, S, L, T, strat */
+ { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
+ { 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */
+ { 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */
+ { 17, 15, 16, 2, 5, 1, ZSTD_dfast }, /* level 3 */
+ { 17, 17, 17, 2, 4, 1, ZSTD_dfast }, /* level 4 */
+ { 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */
+ { 17, 17, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */
+ { 17, 17, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */
+ { 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
+ { 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
+ { 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
+ { 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */
+ { 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */
+ { 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/
+ { 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
+ { 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/
+ { 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/
+ { 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/
+ { 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/
+ { 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/
+ { 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/
+ { 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
+ { 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/
+},
+{ /* for srcSize <= 16 KB */
+ /* W, C, H, S, L, T, strat */
+ { 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
+ { 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */
+ { 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */
+ { 14, 14, 15, 2, 4, 1, ZSTD_dfast }, /* level 3 */
+ { 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */
+ { 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/
+ { 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */
+ { 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */
+ { 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/
+ { 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/
+ { 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/
+ { 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/
+ { 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/
+ { 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/
+ { 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/
+ { 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/
+ { 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/
+ { 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/
+ { 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/
+ { 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
+ { 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/
+ { 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
+ { 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/
+},
+};
+
+/*! ZSTD_getCParams() :
+ * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
+ * Size values are optional, provide 0 if not known or unused */
+ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
+{
+ size_t const addedSize = srcSizeHint ? 0 : 500;
+ U64 const rSize = srcSizeHint+dictSize ? srcSizeHint+dictSize+addedSize : ZSTD_CONTENTSIZE_UNKNOWN; /* intentional overflow for srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN */
+ U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
+ int row = compressionLevel;
+ DEBUGLOG(5, "ZSTD_getCParams (cLevel=%i)", compressionLevel);
+ if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
+ if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */
+ if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
+ { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
+ if (compressionLevel < 0) cp.targetLength = (unsigned)(-compressionLevel); /* acceleration factor */
+ return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize); /* refine parameters based on srcSize & dictSize */
+ }
+}
+
+/*! ZSTD_getParams() :
+ * same idea as ZSTD_getCParams()
+ * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
+ * Fields of `ZSTD_frameParameters` are set to default values */
+ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
+ ZSTD_parameters params;
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSizeHint, dictSize);
+ DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
+ memset(&params, 0, sizeof(params));
+ params.cParams = cParams;
+ params.fParams.contentSizeFlag = 1;
+ return params;
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_compress_internal.h b/vendor/github.com/DataDog/zstd/zstd_compress_internal.h
new file mode 100644
index 000000000..cc3cbb9da
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_compress_internal.h
@@ -0,0 +1,863 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* This header contains definitions
+ * that shall **only** be used by modules within lib/compress.
+ */
+
+#ifndef ZSTD_COMPRESS_H
+#define ZSTD_COMPRESS_H
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include "zstd_internal.h"
+#ifdef ZSTD_MULTITHREAD
+# include "zstdmt_compress.h"
+#endif
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*-*************************************
+* Constants
+***************************************/
+#define kSearchStrength 8
+#define HASH_READ_SIZE 8
+#define ZSTD_DUBT_UNSORTED_MARK 1 /* For btlazy2 strategy, index 1 now means "unsorted".
+ It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
+ It's not a big deal though : candidate will just be sorted again.
+ Additionally, candidate position 1 will be lost.
+ But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
+ The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy
+ Constant required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
+
+
+/*-*************************************
+* Context memory management
+***************************************/
+typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
+typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
+
+typedef struct ZSTD_prefixDict_s {
+ const void* dict;
+ size_t dictSize;
+ ZSTD_dictContentType_e dictContentType;
+} ZSTD_prefixDict;
+
+typedef struct {
+ void* dictBuffer;
+ void const* dict;
+ size_t dictSize;
+ ZSTD_dictContentType_e dictContentType;
+ ZSTD_CDict* cdict;
+} ZSTD_localDict;
+
+typedef struct {
+ U32 CTable[HUF_CTABLE_SIZE_U32(255)];
+ HUF_repeat repeatMode;
+} ZSTD_hufCTables_t;
+
+typedef struct {
+ FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
+ FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
+ FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
+ FSE_repeat offcode_repeatMode;
+ FSE_repeat matchlength_repeatMode;
+ FSE_repeat litlength_repeatMode;
+} ZSTD_fseCTables_t;
+
+typedef struct {
+ ZSTD_hufCTables_t huf;
+ ZSTD_fseCTables_t fse;
+} ZSTD_entropyCTables_t;
+
+typedef struct {
+ U32 off;
+ U32 len;
+} ZSTD_match_t;
+
+typedef struct {
+ int price;
+ U32 off;
+ U32 mlen;
+ U32 litlen;
+ U32 rep[ZSTD_REP_NUM];
+} ZSTD_optimal_t;
+
+typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;
+
+typedef struct {
+ /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
+ unsigned* litFreq; /* table of literals statistics, of size 256 */
+ unsigned* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */
+ unsigned* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */
+ unsigned* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */
+ ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */
+ ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
+
+ U32 litSum; /* nb of literals */
+ U32 litLengthSum; /* nb of litLength codes */
+ U32 matchLengthSum; /* nb of matchLength codes */
+ U32 offCodeSum; /* nb of offset codes */
+ U32 litSumBasePrice; /* to compare to log2(litfreq) */
+ U32 litLengthSumBasePrice; /* to compare to log2(llfreq) */
+ U32 matchLengthSumBasePrice;/* to compare to log2(mlfreq) */
+ U32 offCodeSumBasePrice; /* to compare to log2(offreq) */
+ ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */
+ const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */
+ ZSTD_literalCompressionMode_e literalCompressionMode;
+} optState_t;
+
+typedef struct {
+ ZSTD_entropyCTables_t entropy;
+ U32 rep[ZSTD_REP_NUM];
+} ZSTD_compressedBlockState_t;
+
+typedef struct {
+ BYTE const* nextSrc; /* next block here to continue on current prefix */
+ BYTE const* base; /* All regular indexes relative to this position */
+ BYTE const* dictBase; /* extDict indexes relative to this position */
+ U32 dictLimit; /* below that point, need extDict */
+ U32 lowLimit; /* below that point, no more data */
+} ZSTD_window_t;
+
+typedef struct ZSTD_matchState_t ZSTD_matchState_t;
+struct ZSTD_matchState_t {
+ ZSTD_window_t window; /* State for window round buffer management */
+ U32 loadedDictEnd; /* index of end of dictionary */
+ U32 nextToUpdate; /* index from which to continue table update */
+ U32 nextToUpdate3; /* index from which to continue table update */
+ U32 hashLog3; /* dispatch table : larger == faster, more memory */
+ U32* hashTable;
+ U32* hashTable3;
+ U32* chainTable;
+ optState_t opt; /* optimal parser state */
+ const ZSTD_matchState_t * dictMatchState;
+ ZSTD_compressionParameters cParams;
+};
+
+typedef struct {
+ ZSTD_compressedBlockState_t* prevCBlock;
+ ZSTD_compressedBlockState_t* nextCBlock;
+ ZSTD_matchState_t matchState;
+} ZSTD_blockState_t;
+
+typedef struct {
+ U32 offset;
+ U32 checksum;
+} ldmEntry_t;
+
+typedef struct {
+ ZSTD_window_t window; /* State for the window round buffer management */
+ ldmEntry_t* hashTable;
+ BYTE* bucketOffsets; /* Next position in bucket to insert entry */
+ U64 hashPower; /* Used to compute the rolling hash.
+ * Depends on ldmParams.minMatchLength */
+} ldmState_t;
+
+typedef struct {
+ U32 enableLdm; /* 1 if enable long distance matching */
+ U32 hashLog; /* Log size of hashTable */
+ U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */
+ U32 minMatchLength; /* Minimum match length */
+ U32 hashRateLog; /* Log number of entries to skip */
+ U32 windowLog; /* Window log for the LDM */
+} ldmParams_t;
+
+typedef struct {
+ U32 offset;
+ U32 litLength;
+ U32 matchLength;
+} rawSeq;
+
+typedef struct {
+ rawSeq* seq; /* The start of the sequences */
+ size_t pos; /* The position where reading stopped. <= size. */
+ size_t size; /* The number of sequences. <= capacity. */
+ size_t capacity; /* The capacity starting from `seq` pointer */
+} rawSeqStore_t;
+
+struct ZSTD_CCtx_params_s {
+ ZSTD_format_e format;
+ ZSTD_compressionParameters cParams;
+ ZSTD_frameParameters fParams;
+
+ int compressionLevel;
+ int forceWindow; /* force back-references to respect limit of
+ * 1<<wLog, even for dictionary */
+
+ ZSTD_dictAttachPref_e attachDictPref;
+ ZSTD_literalCompressionMode_e literalCompressionMode;
+
+ /* Multithreading: used to pass parameters to mtctx */
+ int nbWorkers;
+ size_t jobSize;
+ int overlapLog;
+ int rsyncable;
+
+ /* Long distance matching parameters */
+ ldmParams_t ldmParams;
+
+ /* Internal use, for createCCtxParams() and freeCCtxParams() only */
+ ZSTD_customMem customMem;
+}; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
+
+struct ZSTD_CCtx_s {
+ ZSTD_compressionStage_e stage;
+ int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
+ int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
+ ZSTD_CCtx_params requestedParams;
+ ZSTD_CCtx_params appliedParams;
+ U32 dictID;
+
+ int workSpaceOversizedDuration;
+ void* workSpace;
+ size_t workSpaceSize;
+ size_t blockSize;
+ unsigned long long pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */
+ unsigned long long consumedSrcSize;
+ unsigned long long producedCSize;
+ XXH64_state_t xxhState;
+ ZSTD_customMem customMem;
+ size_t staticSize;
+
+ seqStore_t seqStore; /* sequences storage ptrs */
+ ldmState_t ldmState; /* long distance matching state */
+ rawSeq* ldmSequences; /* Storage for the ldm output sequences */
+ size_t maxNbLdmSequences;
+ rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
+ ZSTD_blockState_t blockState;
+ U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
+
+ /* streaming */
+ char* inBuff;
+ size_t inBuffSize;
+ size_t inToCompress;
+ size_t inBuffPos;
+ size_t inBuffTarget;
+ char* outBuff;
+ size_t outBuffSize;
+ size_t outBuffContentSize;
+ size_t outBuffFlushedSize;
+ ZSTD_cStreamStage streamStage;
+ U32 frameEnded;
+
+ /* Dictionary */
+ ZSTD_localDict localDict;
+ const ZSTD_CDict* cdict;
+ ZSTD_prefixDict prefixDict; /* single-usage dictionary */
+
+ /* Multi-threading */
+#ifdef ZSTD_MULTITHREAD
+ ZSTDMT_CCtx* mtctx;
+#endif
+};
+
+typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
+
+typedef enum { ZSTD_noDict = 0, ZSTD_extDict = 1, ZSTD_dictMatchState = 2 } ZSTD_dictMode_e;
+
+
+typedef size_t (*ZSTD_blockCompressor) (
+ ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode);
+
+
+MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
+{
+ static const BYTE LL_Code[64] = { 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 20, 20, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22,
+ 23, 23, 23, 23, 23, 23, 23, 23,
+ 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24 };
+ static const U32 LL_deltaCode = 19;
+ return (litLength > 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
+}
+
+/* ZSTD_MLcode() :
+ * note : mlBase = matchLength - MINMATCH;
+ * because it's the format it's stored in seqStore->sequences */
+MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
+{
+ static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
+ 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+ 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
+ static const U32 ML_deltaCode = 36;
+ return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
+}
+
+/*! ZSTD_storeSeq() :
+ * Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
+ * `offsetCode` : distance to match + 3 (values 1-3 are repCodes).
+ * `mlBase` : matchLength - MINMATCH
+*/
+MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t mlBase)
+{
+#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
+ static const BYTE* g_start = NULL;
+ if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */
+ { U32 const pos = (U32)((const BYTE*)literals - g_start);
+ DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
+ pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offsetCode);
+ }
+#endif
+ assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
+ /* copy Literals */
+ assert(seqStorePtr->maxNbLit <= 128 KB);
+ assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
+ ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
+ seqStorePtr->lit += litLength;
+
+ /* literal Length */
+ if (litLength>0xFFFF) {
+ assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
+ seqStorePtr->longLengthID = 1;
+ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ }
+ seqStorePtr->sequences[0].litLength = (U16)litLength;
+
+ /* match offset */
+ seqStorePtr->sequences[0].offset = offsetCode + 1;
+
+ /* match Length */
+ if (mlBase>0xFFFF) {
+ assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
+ seqStorePtr->longLengthID = 2;
+ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ }
+ seqStorePtr->sequences[0].matchLength = (U16)mlBase;
+
+ seqStorePtr->sequences++;
+}
+
+
+/*-*************************************
+* Match length counter
+***************************************/
+static unsigned ZSTD_NbCommonBytes (size_t val)
+{
+ if (MEM_isLittleEndian()) {
+ if (MEM_64bits()) {
+# if defined(_MSC_VER) && defined(_WIN64)
+ unsigned long r = 0;
+ _BitScanForward64( &r, (U64)val );
+ return (unsigned)(r>>3);
+# elif defined(__GNUC__) && (__GNUC__ >= 4)
+ return (__builtin_ctzll((U64)val) >> 3);
+# else
+ static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
+ 0, 3, 1, 3, 1, 4, 2, 7,
+ 0, 2, 3, 6, 1, 5, 3, 5,
+ 1, 3, 4, 4, 2, 5, 6, 7,
+ 7, 0, 1, 2, 3, 3, 4, 6,
+ 2, 6, 5, 5, 3, 4, 5, 6,
+ 7, 1, 2, 4, 6, 4, 4, 5,
+ 7, 2, 6, 5, 7, 6, 7, 7 };
+ return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
+# endif
+ } else { /* 32 bits */
+# if defined(_MSC_VER)
+ unsigned long r=0;
+ _BitScanForward( &r, (U32)val );
+ return (unsigned)(r>>3);
+# elif defined(__GNUC__) && (__GNUC__ >= 3)
+ return (__builtin_ctz((U32)val) >> 3);
+# else
+ static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
+ 3, 2, 2, 1, 3, 2, 0, 1,
+ 3, 3, 1, 2, 2, 2, 2, 0,
+ 3, 1, 2, 0, 1, 0, 1, 1 };
+ return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
+# endif
+ }
+ } else { /* Big Endian CPU */
+ if (MEM_64bits()) {
+# if defined(_MSC_VER) && defined(_WIN64)
+ unsigned long r = 0;
+ _BitScanReverse64( &r, val );
+ return (unsigned)(r>>3);
+# elif defined(__GNUC__) && (__GNUC__ >= 4)
+ return (__builtin_clzll(val) >> 3);
+# else
+ unsigned r;
+ const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
+ if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
+ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+ r += (!val);
+ return r;
+# endif
+ } else { /* 32 bits */
+# if defined(_MSC_VER)
+ unsigned long r = 0;
+ _BitScanReverse( &r, (unsigned long)val );
+ return (unsigned)(r>>3);
+# elif defined(__GNUC__) && (__GNUC__ >= 3)
+ return (__builtin_clz((U32)val) >> 3);
+# else
+ unsigned r;
+ if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
+ r += (!val);
+ return r;
+# endif
+ } }
+}
+
+
+MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
+{
+ const BYTE* const pStart = pIn;
+ const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
+
+ if (pIn < pInLoopLimit) {
+ { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
+ if (diff) return ZSTD_NbCommonBytes(diff); }
+ pIn+=sizeof(size_t); pMatch+=sizeof(size_t);
+ while (pIn < pInLoopLimit) {
+ size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
+ if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
+ pIn += ZSTD_NbCommonBytes(diff);
+ return (size_t)(pIn - pStart);
+ } }
+ if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
+ if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
+ if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
+ return (size_t)(pIn - pStart);
+}
+
+/** ZSTD_count_2segments() :
+ * can count match length with `ip` & `match` in 2 different segments.
+ * convention : on reaching mEnd, match count continue starting from iStart
+ */
+MEM_STATIC size_t
+ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
+ const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
+{
+ const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
+ size_t const matchLength = ZSTD_count(ip, match, vEnd);
+ if (match + matchLength != mEnd) return matchLength;
+ DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength);
+ DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match);
+ DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip);
+ DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart);
+ DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));
+ return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
+}
+
+
+/*-*************************************
+ * Hashes
+ ***************************************/
+static const U32 prime3bytes = 506832829U;
+static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; }
+MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
+
+static const U32 prime4bytes = 2654435761U;
+static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
+static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
+
+static const U64 prime5bytes = 889523592379ULL;
+static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; }
+static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
+
+static const U64 prime6bytes = 227718039650203ULL;
+static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; }
+static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
+
+static const U64 prime7bytes = 58295818150454627ULL;
+static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; }
+static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
+
+static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
+static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
+static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
+
+MEM_STATIC size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
+{
+ switch(mls)
+ {
+ default:
+ case 4: return ZSTD_hash4Ptr(p, hBits);
+ case 5: return ZSTD_hash5Ptr(p, hBits);
+ case 6: return ZSTD_hash6Ptr(p, hBits);
+ case 7: return ZSTD_hash7Ptr(p, hBits);
+ case 8: return ZSTD_hash8Ptr(p, hBits);
+ }
+}
+
+/** ZSTD_ipow() :
+ * Return base^exponent.
+ */
+static U64 ZSTD_ipow(U64 base, U64 exponent)
+{
+ U64 power = 1;
+ while (exponent) {
+ if (exponent & 1) power *= base;
+ exponent >>= 1;
+ base *= base;
+ }
+ return power;
+}
+
+#define ZSTD_ROLL_HASH_CHAR_OFFSET 10
+
+/** ZSTD_rollingHash_append() :
+ * Add the buffer to the hash value.
+ */
+static U64 ZSTD_rollingHash_append(U64 hash, void const* buf, size_t size)
+{
+ BYTE const* istart = (BYTE const*)buf;
+ size_t pos;
+ for (pos = 0; pos < size; ++pos) {
+ hash *= prime8bytes;
+ hash += istart[pos] + ZSTD_ROLL_HASH_CHAR_OFFSET;
+ }
+ return hash;
+}
+
+/** ZSTD_rollingHash_compute() :
+ * Compute the rolling hash value of the buffer.
+ */
+MEM_STATIC U64 ZSTD_rollingHash_compute(void const* buf, size_t size)
+{
+ return ZSTD_rollingHash_append(0, buf, size);
+}
+
+/** ZSTD_rollingHash_primePower() :
+ * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash
+ * over a window of length bytes.
+ */
+MEM_STATIC U64 ZSTD_rollingHash_primePower(U32 length)
+{
+ return ZSTD_ipow(prime8bytes, length - 1);
+}
+
+/** ZSTD_rollingHash_rotate() :
+ * Rotate the rolling hash by one byte.
+ */
+MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower)
+{
+ hash -= (toRemove + ZSTD_ROLL_HASH_CHAR_OFFSET) * primePower;
+ hash *= prime8bytes;
+ hash += toAdd + ZSTD_ROLL_HASH_CHAR_OFFSET;
+ return hash;
+}
+
+/*-*************************************
+* Round buffer management
+***************************************/
+/* Max current allowed */
+#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
+/* Maximum chunk size before overflow correction needs to be called again */
+#define ZSTD_CHUNKSIZE_MAX \
+ ( ((U32)-1) /* Maximum ending current index */ \
+ - ZSTD_CURRENT_MAX) /* Maximum beginning lowLimit */
+
+/**
+ * ZSTD_window_clear():
+ * Clears the window containing the history by simply setting it to empty.
+ */
+MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
+{
+ size_t const endT = (size_t)(window->nextSrc - window->base);
+ U32 const end = (U32)endT;
+
+ window->lowLimit = end;
+ window->dictLimit = end;
+}
+
+/**
+ * ZSTD_window_hasExtDict():
+ * Returns non-zero if the window has a non-empty extDict.
+ */
+MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
+{
+ return window.lowLimit < window.dictLimit;
+}
+
+/**
+ * ZSTD_matchState_dictMode():
+ * Inspects the provided matchState and figures out what dictMode should be
+ * passed to the compressor.
+ */
+MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
+{
+ return ZSTD_window_hasExtDict(ms->window) ?
+ ZSTD_extDict :
+ ms->dictMatchState != NULL ?
+ ZSTD_dictMatchState :
+ ZSTD_noDict;
+}
+
+/**
+ * ZSTD_window_needOverflowCorrection():
+ * Returns non-zero if the indices are getting too large and need overflow
+ * protection.
+ */
+MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
+ void const* srcEnd)
+{
+ U32 const current = (U32)((BYTE const*)srcEnd - window.base);
+ return current > ZSTD_CURRENT_MAX;
+}
+
+/**
+ * ZSTD_window_correctOverflow():
+ * Reduces the indices to protect from index overflow.
+ * Returns the correction made to the indices, which must be applied to every
+ * stored index.
+ *
+ * The least significant cycleLog bits of the indices must remain the same,
+ * which may be 0. Every index up to maxDist in the past must be valid.
+ * NOTE: (maxDist & cycleMask) must be zero.
+ */
+MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
+ U32 maxDist, void const* src)
+{
+ /* preemptive overflow correction:
+ * 1. correction is large enough:
+ * lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog
+ * 1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
+ *
+ * current - newCurrent
+ * > (3<<29 + 1<<windowLog) - (1<<windowLog + 1<<chainLog)
+ * > (3<<29) - (1<<chainLog)
+ * > (3<<29) - (1<<30) (NOTE: chainLog <= 30)
+ * > 1<<29
+ *
+ * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow:
+ * After correction, current is less than (1<<chainLog + 1<<windowLog).
+ * In 64-bit mode we are safe, because we have 64-bit ptrdiff_t.
+ * In 32-bit mode we are safe, because (chainLog <= 29), so
+ * ip+ZSTD_CHUNKSIZE_MAX - cctx->base < 1<<32.
+ * 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:
+ * windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
+ */
+ U32 const cycleMask = (1U << cycleLog) - 1;
+ U32 const current = (U32)((BYTE const*)src - window->base);
+ U32 const newCurrent = (current & cycleMask) + maxDist;
+ U32 const correction = current - newCurrent;
+ assert((maxDist & cycleMask) == 0);
+ assert(current > newCurrent);
+ /* Loose bound, should be around 1<<29 (see above) */
+ assert(correction > 1<<28);
+
+ window->base += correction;
+ window->dictBase += correction;
+ window->lowLimit -= correction;
+ window->dictLimit -= correction;
+
+ DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
+ window->lowLimit);
+ return correction;
+}
+
+/**
+ * ZSTD_window_enforceMaxDist():
+ * Updates lowLimit so that:
+ * (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
+ *
+ * This allows a simple check that index >= lowLimit to see if index is valid.
+ * This must be called before a block compression call, with srcEnd as the block
+ * source end.
+ *
+ * If loadedDictEndPtr is not NULL, we set it to zero once we update lowLimit.
+ * This is because dictionaries are allowed to be referenced as long as the last
+ * byte of the dictionary is in the window, but once they are out of range,
+ * they cannot be referenced. If loadedDictEndPtr is NULL, we use
+ * loadedDictEnd == 0.
+ *
+ * In normal dict mode, the dict is between lowLimit and dictLimit. In
+ * dictMatchState mode, lowLimit and dictLimit are the same, and the dictionary
+ * is below them. forceWindow and dictMatchState are therefore incompatible.
+ */
+MEM_STATIC void
+ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
+ void const* srcEnd,
+ U32 maxDist,
+ U32* loadedDictEndPtr,
+ const ZSTD_matchState_t** dictMatchStatePtr)
+{
+ U32 const blockEndIdx = (U32)((BYTE const*)srcEnd - window->base);
+ U32 loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
+ DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u",
+ (unsigned)blockEndIdx, (unsigned)maxDist);
+ if (blockEndIdx > maxDist + loadedDictEnd) {
+ U32 const newLowLimit = blockEndIdx - maxDist;
+ if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
+ if (window->dictLimit < window->lowLimit) {
+ DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u",
+ (unsigned)window->dictLimit, (unsigned)window->lowLimit);
+ window->dictLimit = window->lowLimit;
+ }
+ if (loadedDictEndPtr)
+ *loadedDictEndPtr = 0;
+ if (dictMatchStatePtr)
+ *dictMatchStatePtr = NULL;
+ }
+}
+
+/**
+ * ZSTD_window_update():
+ * Updates the window by appending [src, src + srcSize) to the window.
+ * If it is not contiguous, the current prefix becomes the extDict, and we
+ * forget about the extDict. Handles overlap of the prefix and extDict.
+ * Returns non-zero if the segment is contiguous.
+ */
+MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
+ void const* src, size_t srcSize)
+{
+ BYTE const* const ip = (BYTE const*)src;
+ U32 contiguous = 1;
+ DEBUGLOG(5, "ZSTD_window_update");
+ /* Check if blocks follow each other */
+ if (src != window->nextSrc) {
+ /* not contiguous */
+ size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
+ DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit);
+ window->lowLimit = window->dictLimit;
+ assert(distanceFromBase == (size_t)(U32)distanceFromBase); /* should never overflow */
+ window->dictLimit = (U32)distanceFromBase;
+ window->dictBase = window->base;
+ window->base = ip - distanceFromBase;
+ // ms->nextToUpdate = window->dictLimit;
+ if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit; /* too small extDict */
+ contiguous = 0;
+ }
+ window->nextSrc = ip + srcSize;
+ /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
+ if ( (ip+srcSize > window->dictBase + window->lowLimit)
+ & (ip < window->dictBase + window->dictLimit)) {
+ ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
+ U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
+ window->lowLimit = lowLimitMax;
+ DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit);
+ }
+ return contiguous;
+}
+
+
+/* debug functions */
+#if (DEBUGLEVEL>=2)
+
+MEM_STATIC double ZSTD_fWeight(U32 rawStat)
+{
+ U32 const fp_accuracy = 8;
+ U32 const fp_multiplier = (1 << fp_accuracy);
+ U32 const newStat = rawStat + 1;
+ U32 const hb = ZSTD_highbit32(newStat);
+ U32 const BWeight = hb * fp_multiplier;
+ U32 const FWeight = (newStat << fp_accuracy) >> hb;
+ U32 const weight = BWeight + FWeight;
+ assert(hb + fp_accuracy < 31);
+ return (double)weight / fp_multiplier;
+}
+
+/* display a table content,
+ * listing each element, its frequency, and its predicted bit cost */
+MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
+{
+ unsigned u, sum;
+ for (u=0, sum=0; u<=max; u++) sum += table[u];
+ DEBUGLOG(2, "total nb elts: %u", sum);
+ for (u=0; u<=max; u++) {
+ DEBUGLOG(2, "%2u: %5u (%.2f)",
+ u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) );
+ }
+}
+
+#endif
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+
+/* ==============================================================
+ * Private declarations
+ * These prototypes shall only be called from within lib/compress
+ * ============================================================== */
+
+/* ZSTD_getCParamsFromCCtxParams() :
+ * cParams are built depending on compressionLevel, src size hints,
+ * LDM and manually set compression parameters.
+ */
+ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
+ const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize);
+
+/*! ZSTD_initCStream_internal() :
+ * Private use only. Init streaming operation.
+ * expects params to be valid.
+ * must receive dict, or cdict, or none, but not both.
+ * @return : 0, or an error code */
+size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
+
+void ZSTD_resetSeqStore(seqStore_t* ssPtr);
+
+/*! ZSTD_getCParamsFromCDict() :
+ * as the name implies */
+ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
+
+/* ZSTD_compressBegin_advanced_internal() :
+ * Private use only. To be called from zstdmt_compress.c. */
+size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params,
+ unsigned long long pledgedSrcSize);
+
+/* ZSTD_compress_advanced_internal() :
+ * Private use only. To be called from zstdmt_compress.c. */
+size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ ZSTD_CCtx_params params);
+
+
+/* ZSTD_writeLastEmptyBlock() :
+ * output an empty Block with end-of-frame mark to complete a frame
+ * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
+ * or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
+ */
+size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
+
+
+/* ZSTD_referenceExternalSequences() :
+ * Must be called before starting a compression operation.
+ * seqs must parse a prefix of the source.
+ * This cannot be used when long range matching is enabled.
+ * Zstd will use these sequences, and pass the literals to a secondary block
+ * compressor.
+ * @return : An error code on failure.
+ * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
+ * access and data corruption.
+ */
+size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
+
+
+#endif /* ZSTD_COMPRESS_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_ddict.c b/vendor/github.com/DataDog/zstd/zstd_ddict.c
new file mode 100644
index 000000000..0af3d23bf
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_ddict.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* zstd_ddict.c :
+ * concentrates all logic that needs to know the internals of ZSTD_DDict object */
+
+/*-*******************************************************
+* Dependencies
+*********************************************************/
+#include <string.h> /* memcpy, memmove, memset */
+#include "cpu.h" /* bmi2 */
+#include "mem.h" /* low level memory routines */
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#include "zstd_decompress_internal.h"
+#include "zstd_ddict.h"
+
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
+# include "zstd_legacy.h"
+#endif
+
+
+
+/*-*******************************************************
+* Types
+*********************************************************/
+struct ZSTD_DDict_s {
+ void* dictBuffer;
+ const void* dictContent;
+ size_t dictSize;
+ ZSTD_entropyDTables_t entropy;
+ U32 dictID;
+ U32 entropyPresent;
+ ZSTD_customMem cMem;
+}; /* typedef'd to ZSTD_DDict within "zstd.h" */
+
+const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict)
+{
+ assert(ddict != NULL);
+ return ddict->dictContent;
+}
+
+size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict)
+{
+ assert(ddict != NULL);
+ return ddict->dictSize;
+}
+
+void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
+{
+ DEBUGLOG(4, "ZSTD_copyDDictParameters");
+ assert(dctx != NULL);
+ assert(ddict != NULL);
+ dctx->dictID = ddict->dictID;
+ dctx->prefixStart = ddict->dictContent;
+ dctx->virtualStart = ddict->dictContent;
+ dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
+ dctx->previousDstEnd = dctx->dictEnd;
+ if (ddict->entropyPresent) {
+ dctx->litEntropy = 1;
+ dctx->fseEntropy = 1;
+ dctx->LLTptr = ddict->entropy.LLTable;
+ dctx->MLTptr = ddict->entropy.MLTable;
+ dctx->OFTptr = ddict->entropy.OFTable;
+ dctx->HUFptr = ddict->entropy.hufTable;
+ dctx->entropy.rep[0] = ddict->entropy.rep[0];
+ dctx->entropy.rep[1] = ddict->entropy.rep[1];
+ dctx->entropy.rep[2] = ddict->entropy.rep[2];
+ } else {
+ dctx->litEntropy = 0;
+ dctx->fseEntropy = 0;
+ }
+}
+
+
+static size_t
+ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict,
+ ZSTD_dictContentType_e dictContentType)
+{
+ ddict->dictID = 0;
+ ddict->entropyPresent = 0;
+ if (dictContentType == ZSTD_dct_rawContent) return 0;
+
+ if (ddict->dictSize < 8) {
+ if (dictContentType == ZSTD_dct_fullDict)
+ return ERROR(dictionary_corrupted); /* only accept specified dictionaries */
+ return 0; /* pure content mode */
+ }
+ { U32 const magic = MEM_readLE32(ddict->dictContent);
+ if (magic != ZSTD_MAGIC_DICTIONARY) {
+ if (dictContentType == ZSTD_dct_fullDict)
+ return ERROR(dictionary_corrupted); /* only accept specified dictionaries */
+ return 0; /* pure content mode */
+ }
+ }
+ ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
+
+ /* load entropy tables */
+ RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy(
+ &ddict->entropy, ddict->dictContent, ddict->dictSize)),
+ dictionary_corrupted);
+ ddict->entropyPresent = 1;
+ return 0;
+}
+
+
+static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType)
+{
+ if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
+ ddict->dictBuffer = NULL;
+ ddict->dictContent = dict;
+ if (!dict) dictSize = 0;
+ } else {
+ void* const internalBuffer = ZSTD_malloc(dictSize, ddict->cMem);
+ ddict->dictBuffer = internalBuffer;
+ ddict->dictContent = internalBuffer;
+ if (!internalBuffer) return ERROR(memory_allocation);
+ memcpy(internalBuffer, dict, dictSize);
+ }
+ ddict->dictSize = dictSize;
+ ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
+
+ /* parse dictionary content */
+ FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) );
+
+ return 0;
+}
+
+ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_customMem customMem)
+{
+ if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+
+ { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
+ if (ddict == NULL) return NULL;
+ ddict->cMem = customMem;
+ { size_t const initResult = ZSTD_initDDict_internal(ddict,
+ dict, dictSize,
+ dictLoadMethod, dictContentType);
+ if (ZSTD_isError(initResult)) {
+ ZSTD_freeDDict(ddict);
+ return NULL;
+ } }
+ return ddict;
+ }
+}
+
+/*! ZSTD_createDDict() :
+* Create a digested dictionary, to start decompression without startup delay.
+* `dict` content is copied inside DDict.
+* Consequently, `dict` can be released after `ZSTD_DDict` creation */
+ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
+{
+ ZSTD_customMem const allocator = { NULL, NULL, NULL };
+ return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator);
+}
+
+/*! ZSTD_createDDict_byReference() :
+ * Create a digested dictionary, to start decompression without startup delay.
+ * Dictionary content is simply referenced, it will be accessed during decompression.
+ * Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */
+ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
+{
+ ZSTD_customMem const allocator = { NULL, NULL, NULL };
+ return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator);
+}
+
+
+const ZSTD_DDict* ZSTD_initStaticDDict(
+ void* sBuffer, size_t sBufferSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType)
+{
+ size_t const neededSpace = sizeof(ZSTD_DDict)
+ + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
+ ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer;
+ assert(sBuffer != NULL);
+ assert(dict != NULL);
+ if ((size_t)sBuffer & 7) return NULL; /* 8-aligned */
+ if (sBufferSize < neededSpace) return NULL;
+ if (dictLoadMethod == ZSTD_dlm_byCopy) {
+ memcpy(ddict+1, dict, dictSize); /* local copy */
+ dict = ddict+1;
+ }
+ if (ZSTD_isError( ZSTD_initDDict_internal(ddict,
+ dict, dictSize,
+ ZSTD_dlm_byRef, dictContentType) ))
+ return NULL;
+ return ddict;
+}
+
+
+size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
+{
+ if (ddict==NULL) return 0; /* support free on NULL */
+ { ZSTD_customMem const cMem = ddict->cMem;
+ ZSTD_free(ddict->dictBuffer, cMem);
+ ZSTD_free(ddict, cMem);
+ return 0;
+ }
+}
+
+/*! ZSTD_estimateDDictSize() :
+ * Estimate amount of memory that will be needed to create a dictionary for decompression.
+ * Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */
+size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
+{
+ return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
+}
+
+size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
+{
+ if (ddict==NULL) return 0; /* support sizeof on NULL */
+ return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
+}
+
+/*! ZSTD_getDictID_fromDDict() :
+ * Provides the dictID of the dictionary loaded into `ddict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
+{
+ if (ddict==NULL) return 0;
+ return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_ddict.h b/vendor/github.com/DataDog/zstd/zstd_ddict.h
new file mode 100644
index 000000000..0479d11bb
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_ddict.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+#ifndef ZSTD_DDICT_H
+#define ZSTD_DDICT_H
+
+/*-*******************************************************
+ * Dependencies
+ *********************************************************/
+#include <stddef.h> /* size_t */
+#include "zstd.h" /* ZSTD_DDict, and several public functions */
+
+
+/*-*******************************************************
+ * Interface
+ *********************************************************/
+
+/* note: several prototypes are already published in `zstd.h` :
+ * ZSTD_createDDict()
+ * ZSTD_createDDict_byReference()
+ * ZSTD_createDDict_advanced()
+ * ZSTD_freeDDict()
+ * ZSTD_initStaticDDict()
+ * ZSTD_sizeof_DDict()
+ * ZSTD_estimateDDictSize()
+ * ZSTD_getDictID_fromDict()
+ */
+
+const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict);
+size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict);
+
+void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+
+
+
+#endif /* ZSTD_DDICT_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_decompress.c b/vendor/github.com/DataDog/zstd/zstd_decompress.c
new file mode 100644
index 000000000..675596f5a
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_decompress.c
@@ -0,0 +1,1765 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/* ***************************************************************
+* Tuning parameters
+*****************************************************************/
+/*!
+ * HEAPMODE :
+ * Select how default decompression function ZSTD_decompress() allocates its context,
+ * on stack (0), or into heap (1, default; requires malloc()).
+ * Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected.
+ */
+#ifndef ZSTD_HEAPMODE
+# define ZSTD_HEAPMODE 1
+#endif
+
+/*!
+* LEGACY_SUPPORT :
+* if set to 1+, ZSTD_decompress() can decode older formats (v0.1+)
+*/
+#ifndef ZSTD_LEGACY_SUPPORT
+# define ZSTD_LEGACY_SUPPORT 0
+#endif
+
+/*!
+ * MAXWINDOWSIZE_DEFAULT :
+ * maximum window size accepted by DStream __by default__.
+ * Frames requiring more memory will be rejected.
+ * It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize().
+ */
+#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
+# define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1)
+#endif
+
+/*!
+ * NO_FORWARD_PROGRESS_MAX :
+ * maximum allowed nb of calls to ZSTD_decompressStream()
+ * without any forward progress
+ * (defined as: no byte read from input, and no byte flushed to output)
+ * before triggering an error.
+ */
+#ifndef ZSTD_NO_FORWARD_PROGRESS_MAX
+# define ZSTD_NO_FORWARD_PROGRESS_MAX 16
+#endif
+
+
+/*-*******************************************************
+* Dependencies
+*********************************************************/
+#include <string.h> /* memcpy, memmove, memset */
+#include "cpu.h" /* bmi2 */
+#include "mem.h" /* low level memory routines */
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#include "zstd_internal.h" /* blockProperties_t */
+#include "zstd_decompress_internal.h" /* ZSTD_DCtx */
+#include "zstd_ddict.h" /* ZSTD_DDictDictContent */
+#include "zstd_decompress_block.h" /* ZSTD_decompressBlock_internal */
+
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
+# include "zstd_legacy.h"
+#endif
+
+
+/*-*************************************************************
+* Context management
+***************************************************************/
+size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx)
+{
+ if (dctx==NULL) return 0; /* support sizeof NULL */
+ return sizeof(*dctx)
+ + ZSTD_sizeof_DDict(dctx->ddictLocal)
+ + dctx->inBuffSize + dctx->outBuffSize;
+}
+
+size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }
+
+
+static size_t ZSTD_startingInputLength(ZSTD_format_e format)
+{
+ size_t const startingInputLength = (format==ZSTD_f_zstd1_magicless) ?
+ ZSTD_FRAMEHEADERSIZE_PREFIX - ZSTD_FRAMEIDSIZE :
+ ZSTD_FRAMEHEADERSIZE_PREFIX;
+ ZSTD_STATIC_ASSERT(ZSTD_FRAMEHEADERSIZE_PREFIX >= ZSTD_FRAMEIDSIZE);
+ /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
+ assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
+ return startingInputLength;
+}
+
+static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
+{
+ dctx->format = ZSTD_f_zstd1; /* ZSTD_decompressBegin() invokes ZSTD_startingInputLength() with argument dctx->format */
+ dctx->staticSize = 0;
+ dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
+ dctx->ddict = NULL;
+ dctx->ddictLocal = NULL;
+ dctx->dictEnd = NULL;
+ dctx->ddictIsCold = 0;
+ dctx->dictUses = ZSTD_dont_use;
+ dctx->inBuff = NULL;
+ dctx->inBuffSize = 0;
+ dctx->outBuffSize = 0;
+ dctx->streamStage = zdss_init;
+ dctx->legacyContext = NULL;
+ dctx->previousLegacyVersion = 0;
+ dctx->noForwardProgress = 0;
+ dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+}
+
+ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
+{
+ ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace;
+
+ if ((size_t)workspace & 7) return NULL; /* 8-aligned */
+ if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL; /* minimum size */
+
+ ZSTD_initDCtx_internal(dctx);
+ dctx->staticSize = workspaceSize;
+ dctx->inBuff = (char*)(dctx+1);
+ return dctx;
+}
+
+ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
+{
+ if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+
+ { ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_malloc(sizeof(*dctx), customMem);
+ if (!dctx) return NULL;
+ dctx->customMem = customMem;
+ ZSTD_initDCtx_internal(dctx);
+ return dctx;
+ }
+}
+
+ZSTD_DCtx* ZSTD_createDCtx(void)
+{
+ DEBUGLOG(3, "ZSTD_createDCtx");
+ return ZSTD_createDCtx_advanced(ZSTD_defaultCMem);
+}
+
+static void ZSTD_clearDict(ZSTD_DCtx* dctx)
+{
+ ZSTD_freeDDict(dctx->ddictLocal);
+ dctx->ddictLocal = NULL;
+ dctx->ddict = NULL;
+ dctx->dictUses = ZSTD_dont_use;
+}
+
+size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
+{
+ if (dctx==NULL) return 0; /* support free on NULL */
+ RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx");
+ { ZSTD_customMem const cMem = dctx->customMem;
+ ZSTD_clearDict(dctx);
+ ZSTD_free(dctx->inBuff, cMem);
+ dctx->inBuff = NULL;
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+ if (dctx->legacyContext)
+ ZSTD_freeLegacyStreamContext(dctx->legacyContext, dctx->previousLegacyVersion);
+#endif
+ ZSTD_free(dctx, cMem);
+ return 0;
+ }
+}
+
+/* no longer useful */
+void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
+{
+ size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx);
+ memcpy(dstDCtx, srcDCtx, toCopy); /* no need to copy workspace */
+}
+
+
+/*-*************************************************************
+ * Frame header decoding
+ ***************************************************************/
+
+/*! ZSTD_isFrame() :
+ * Tells if the content of `buffer` starts with a valid Frame Identifier.
+ * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
+ * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
+ * Note 3 : Skippable Frame Identifiers are considered valid. */
+unsigned ZSTD_isFrame(const void* buffer, size_t size)
+{
+ if (size < ZSTD_FRAMEIDSIZE) return 0;
+ { U32 const magic = MEM_readLE32(buffer);
+ if (magic == ZSTD_MAGICNUMBER) return 1;
+ if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
+ }
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+ if (ZSTD_isLegacy(buffer, size)) return 1;
+#endif
+ return 0;
+}
+
+/** ZSTD_frameHeaderSize_internal() :
+ * srcSize must be large enough to reach header size fields.
+ * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless.
+ * @return : size of the Frame Header
+ * or an error code, which can be tested with ZSTD_isError() */
+static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
+{
+ size_t const minInputSize = ZSTD_startingInputLength(format);
+ RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong);
+
+ { BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
+ U32 const dictID= fhd & 3;
+ U32 const singleSegment = (fhd >> 5) & 1;
+ U32 const fcsId = fhd >> 6;
+ return minInputSize + !singleSegment
+ + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId]
+ + (singleSegment && !fcsId);
+ }
+}
+
+/** ZSTD_frameHeaderSize() :
+ * srcSize must be >= ZSTD_frameHeaderSize_prefix.
+ * @return : size of the Frame Header,
+ * or an error code (if srcSize is too small) */
+size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
+{
+ return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1);
+}
+
+
+/** ZSTD_getFrameHeader_advanced() :
+ * decode Frame Header, or require larger `srcSize`.
+ * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
+ * @return : 0, `zfhPtr` is correctly filled,
+ * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
+{
+ const BYTE* ip = (const BYTE*)src;
+ size_t const minInputSize = ZSTD_startingInputLength(format);
+
+ memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
+ if (srcSize < minInputSize) return minInputSize;
+ RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter");
+
+ if ( (format != ZSTD_f_zstd1_magicless)
+ && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
+ if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+ /* skippable frame */
+ if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
+ return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */
+ memset(zfhPtr, 0, sizeof(*zfhPtr));
+ zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
+ zfhPtr->frameType = ZSTD_skippableFrame;
+ return 0;
+ }
+ RETURN_ERROR(prefix_unknown);
+ }
+
+ /* ensure there is enough `srcSize` to fully read/decode frame header */
+ { size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format);
+ if (srcSize < fhsize) return fhsize;
+ zfhPtr->headerSize = (U32)fhsize;
+ }
+
+ { BYTE const fhdByte = ip[minInputSize-1];
+ size_t pos = minInputSize;
+ U32 const dictIDSizeCode = fhdByte&3;
+ U32 const checksumFlag = (fhdByte>>2)&1;
+ U32 const singleSegment = (fhdByte>>5)&1;
+ U32 const fcsID = fhdByte>>6;
+ U64 windowSize = 0;
+ U32 dictID = 0;
+ U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
+ RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported,
+ "reserved bits, must be zero");
+
+ if (!singleSegment) {
+ BYTE const wlByte = ip[pos++];
+ U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
+ RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge);
+ windowSize = (1ULL << windowLog);
+ windowSize += (windowSize >> 3) * (wlByte&7);
+ }
+ switch(dictIDSizeCode)
+ {
+ default: assert(0); /* impossible */
+ case 0 : break;
+ case 1 : dictID = ip[pos]; pos++; break;
+ case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
+ case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break;
+ }
+ switch(fcsID)
+ {
+ default: assert(0); /* impossible */
+ case 0 : if (singleSegment) frameContentSize = ip[pos]; break;
+ case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
+ case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
+ case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
+ }
+ if (singleSegment) windowSize = frameContentSize;
+
+ zfhPtr->frameType = ZSTD_frame;
+ zfhPtr->frameContentSize = frameContentSize;
+ zfhPtr->windowSize = windowSize;
+ zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+ zfhPtr->dictID = dictID;
+ zfhPtr->checksumFlag = checksumFlag;
+ }
+ return 0;
+}
+
+/** ZSTD_getFrameHeader() :
+ * decode Frame Header, or require larger `srcSize`.
+ * note : this function does not consume input, it only reads it.
+ * @return : 0, `zfhPtr` is correctly filled,
+ * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
+{
+ return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1);
+}
+
+
+/** ZSTD_getFrameContentSize() :
+ * compatible with legacy mode
+ * @return : decompressed size of the single frame pointed to be `src` if known, otherwise
+ * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
+ * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
+unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
+{
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+ if (ZSTD_isLegacy(src, srcSize)) {
+ unsigned long long const ret = ZSTD_getDecompressedSize_legacy(src, srcSize);
+ return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret;
+ }
+#endif
+ { ZSTD_frameHeader zfh;
+ if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0)
+ return ZSTD_CONTENTSIZE_ERROR;
+ if (zfh.frameType == ZSTD_skippableFrame) {
+ return 0;
+ } else {
+ return zfh.frameContentSize;
+ } }
+}
+
+static size_t readSkippableFrameSize(void const* src, size_t srcSize)
+{
+ size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE;
+ U32 sizeU32;
+
+ RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong);
+
+ sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);
+ RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32,
+ frameParameter_unsupported);
+
+ return skippableHeaderSize + sizeU32;
+}
+
+/** ZSTD_findDecompressedSize() :
+ * compatible with legacy mode
+ * `srcSize` must be the exact length of some number of ZSTD compressed and/or
+ * skippable frames
+ * @return : decompressed size of the frames contained */
+unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
+{
+ unsigned long long totalDstSize = 0;
+
+ while (srcSize >= ZSTD_FRAMEHEADERSIZE_PREFIX) {
+ U32 const magicNumber = MEM_readLE32(src);
+
+ if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+ size_t const skippableSize = readSkippableFrameSize(src, srcSize);
+ if (ZSTD_isError(skippableSize))
+ return skippableSize;
+ if (srcSize < skippableSize) {
+ return ZSTD_CONTENTSIZE_ERROR;
+ }
+
+ src = (const BYTE *)src + skippableSize;
+ srcSize -= skippableSize;
+ continue;
+ }
+
+ { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
+ if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret;
+
+ /* check for overflow */
+ if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR;
+ totalDstSize += ret;
+ }
+ { size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
+ if (ZSTD_isError(frameSrcSize)) {
+ return ZSTD_CONTENTSIZE_ERROR;
+ }
+
+ src = (const BYTE *)src + frameSrcSize;
+ srcSize -= frameSrcSize;
+ }
+ } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
+
+ if (srcSize) return ZSTD_CONTENTSIZE_ERROR;
+
+ return totalDstSize;
+}
+
+/** ZSTD_getDecompressedSize() :
+ * compatible with legacy mode
+ * @return : decompressed size if known, 0 otherwise
+ note : 0 can mean any of the following :
+ - frame content is empty
+ - decompressed size field is not present in frame header
+ - frame header unknown / not supported
+ - frame header not complete (`srcSize` too small) */
+unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
+{
+ unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN);
+ return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret;
+}
+
+
+/** ZSTD_decodeFrameHeader() :
+ * `headerSize` must be the size provided by ZSTD_frameHeaderSize().
+ * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
+static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
+{
+ size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);
+ if (ZSTD_isError(result)) return result; /* invalid header */
+ RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small");
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ /* Skip the dictID check in fuzzing mode, because it makes the search
+ * harder.
+ */
+ RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID),
+ dictionary_wrong);
+#endif
+ if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);
+ return 0;
+}
+
+static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret)
+{
+ ZSTD_frameSizeInfo frameSizeInfo;
+ frameSizeInfo.compressedSize = ret;
+ frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
+ return frameSizeInfo;
+}
+
+static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize)
+{
+ ZSTD_frameSizeInfo frameSizeInfo;
+ memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo));
+
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+ if (ZSTD_isLegacy(src, srcSize))
+ return ZSTD_findFrameSizeInfoLegacy(src, srcSize);
+#endif
+
+ if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
+ && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+ frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize);
+ return frameSizeInfo;
+ } else {
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* const ipstart = ip;
+ size_t remainingSize = srcSize;
+ size_t nbBlocks = 0;
+ ZSTD_frameHeader zfh;
+
+ /* Extract Frame Header */
+ { size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
+ if (ZSTD_isError(ret))
+ return ZSTD_errorFrameSizeInfo(ret);
+ if (ret > 0)
+ return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
+ }
+
+ ip += zfh.headerSize;
+ remainingSize -= zfh.headerSize;
+
+ /* Iterate over each block */
+ while (1) {
+ blockProperties_t blockProperties;
+ size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+ if (ZSTD_isError(cBlockSize))
+ return ZSTD_errorFrameSizeInfo(cBlockSize);
+
+ if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
+ return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
+
+ ip += ZSTD_blockHeaderSize + cBlockSize;
+ remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
+ nbBlocks++;
+
+ if (blockProperties.lastBlock) break;
+ }
+
+ /* Final frame content checksum */
+ if (zfh.checksumFlag) {
+ if (remainingSize < 4)
+ return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
+ ip += 4;
+ }
+
+ frameSizeInfo.compressedSize = ip - ipstart;
+ frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN)
+ ? zfh.frameContentSize
+ : nbBlocks * zfh.blockSizeMax;
+ return frameSizeInfo;
+ }
+}
+
+/** ZSTD_findFrameCompressedSize() :
+ * compatible with legacy mode
+ * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
+ * `srcSize` must be at least as large as the frame contained
+ * @return : the compressed size of the frame starting at `src` */
+size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
+{
+ ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
+ return frameSizeInfo.compressedSize;
+}
+
+
+/** ZSTD_decompressBound() :
+ * compatible with legacy mode
+ * `src` must point to the start of a ZSTD frame or a skippeable frame
+ * `srcSize` must be at least as large as the frame contained
+ * @return : the maximum decompressed size of the compressed source
+ */
+unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
+{
+ unsigned long long bound = 0;
+ /* Iterate over each frame */
+ while (srcSize > 0) {
+ ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
+ size_t const compressedSize = frameSizeInfo.compressedSize;
+ unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
+ if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
+ return ZSTD_CONTENTSIZE_ERROR;
+ src = (const BYTE*)src + compressedSize;
+ srcSize -= compressedSize;
+ bound += decompressedBound;
+ }
+ return bound;
+}
+
+
+/*-*************************************************************
+ * Frame decoding
+ ***************************************************************/
+
+
+void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
+{
+ if (dst != dctx->previousDstEnd) { /* not contiguous */
+ dctx->dictEnd = dctx->previousDstEnd;
+ dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
+ dctx->prefixStart = dst;
+ dctx->previousDstEnd = dst;
+ }
+}
+
+/** ZSTD_insertBlock() :
+ insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
+size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
+{
+ ZSTD_checkContinuity(dctx, blockStart);
+ dctx->previousDstEnd = (const char*)blockStart + blockSize;
+ return blockSize;
+}
+
+
+static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_copyRawBlock");
+ if (dst == NULL) {
+ if (srcSize == 0) return 0;
+ RETURN_ERROR(dstBuffer_null);
+ }
+ RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall);
+ memcpy(dst, src, srcSize);
+ return srcSize;
+}
+
+static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
+ BYTE b,
+ size_t regenSize)
+{
+ if (dst == NULL) {
+ if (regenSize == 0) return 0;
+ RETURN_ERROR(dstBuffer_null);
+ }
+ RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall);
+ memset(dst, b, regenSize);
+ return regenSize;
+}
+
+
+/*! ZSTD_decompressFrame() :
+ * @dctx must be properly initialized
+ * will update *srcPtr and *srcSizePtr,
+ * to make *srcPtr progress by one frame. */
+static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void** srcPtr, size_t *srcSizePtr)
+{
+ const BYTE* ip = (const BYTE*)(*srcPtr);
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* const oend = ostart + dstCapacity;
+ BYTE* op = ostart;
+ size_t remainingSrcSize = *srcSizePtr;
+
+ DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr);
+
+ /* check */
+ RETURN_ERROR_IF(
+ remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN+ZSTD_blockHeaderSize,
+ srcSize_wrong);
+
+ /* Frame Header */
+ { size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_FRAMEHEADERSIZE_PREFIX);
+ if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
+ RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize,
+ srcSize_wrong);
+ FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) );
+ ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;
+ }
+
+ /* Loop on each block */
+ while (1) {
+ size_t decodedSize;
+ blockProperties_t blockProperties;
+ size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties);
+ if (ZSTD_isError(cBlockSize)) return cBlockSize;
+
+ ip += ZSTD_blockHeaderSize;
+ remainingSrcSize -= ZSTD_blockHeaderSize;
+ RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong);
+
+ switch(blockProperties.blockType)
+ {
+ case bt_compressed:
+ decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize, /* frame */ 1);
+ break;
+ case bt_raw :
+ decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize);
+ break;
+ case bt_rle :
+ decodedSize = ZSTD_setRleBlock(op, oend-op, *ip, blockProperties.origSize);
+ break;
+ case bt_reserved :
+ default:
+ RETURN_ERROR(corruption_detected);
+ }
+
+ if (ZSTD_isError(decodedSize)) return decodedSize;
+ if (dctx->fParams.checksumFlag)
+ XXH64_update(&dctx->xxhState, op, decodedSize);
+ op += decodedSize;
+ ip += cBlockSize;
+ remainingSrcSize -= cBlockSize;
+ if (blockProperties.lastBlock) break;
+ }
+
+ if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+ RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize,
+ corruption_detected);
+ }
+ if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
+ U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);
+ U32 checkRead;
+ RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong);
+ checkRead = MEM_readLE32(ip);
+ RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong);
+ ip += 4;
+ remainingSrcSize -= 4;
+ }
+
+ /* Allow caller to get size read */
+ *srcPtr = ip;
+ *srcSizePtr = remainingSrcSize;
+ return op-ostart;
+}
+
+static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize,
+ const ZSTD_DDict* ddict)
+{
+ void* const dststart = dst;
+ int moreThan1Frame = 0;
+
+ DEBUGLOG(5, "ZSTD_decompressMultiFrame");
+ assert(dict==NULL || ddict==NULL); /* either dict or ddict set, not both */
+
+ if (ddict) {
+ dict = ZSTD_DDict_dictContent(ddict);
+ dictSize = ZSTD_DDict_dictSize(ddict);
+ }
+
+ while (srcSize >= ZSTD_FRAMEHEADERSIZE_PREFIX) {
+
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+ if (ZSTD_isLegacy(src, srcSize)) {
+ size_t decodedSize;
+ size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
+ if (ZSTD_isError(frameSize)) return frameSize;
+ RETURN_ERROR_IF(dctx->staticSize, memory_allocation,
+ "legacy support is not compatible with static dctx");
+
+ decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize);
+ if (ZSTD_isError(decodedSize)) return decodedSize;
+
+ assert(decodedSize <=- dstCapacity);
+ dst = (BYTE*)dst + decodedSize;
+ dstCapacity -= decodedSize;
+
+ src = (const BYTE*)src + frameSize;
+ srcSize -= frameSize;
+
+ continue;
+ }
+#endif
+
+ { U32 const magicNumber = MEM_readLE32(src);
+ DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
+ (unsigned)magicNumber, ZSTD_MAGICNUMBER);
+ if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+ size_t const skippableSize = readSkippableFrameSize(src, srcSize);
+ if (ZSTD_isError(skippableSize))
+ return skippableSize;
+ RETURN_ERROR_IF(srcSize < skippableSize, srcSize_wrong);
+
+ src = (const BYTE *)src + skippableSize;
+ srcSize -= skippableSize;
+ continue;
+ } }
+
+ if (ddict) {
+ /* we were called from ZSTD_decompress_usingDDict */
+ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict));
+ } else {
+ /* this will initialize correctly with no dict if dict == NULL, so
+ * use this in all cases but ddict */
+ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
+ }
+ ZSTD_checkContinuity(dctx, dst);
+
+ { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
+ &src, &srcSize);
+ RETURN_ERROR_IF(
+ (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
+ && (moreThan1Frame==1),
+ srcSize_wrong,
+ "at least one frame successfully completed, but following "
+ "bytes are garbage: it's more likely to be a srcSize error, "
+ "specifying more bytes than compressed size of frame(s). This "
+ "error message replaces ERROR(prefix_unknown), which would be "
+ "confusing, as the first header is actually correct. Note that "
+ "one could be unlucky, it might be a corruption error instead, "
+ "happening right at the place where we expect zstd magic "
+ "bytes. But this is _much_ less likely than a srcSize field "
+ "error.");
+ if (ZSTD_isError(res)) return res;
+ assert(res <= dstCapacity);
+ dst = (BYTE*)dst + res;
+ dstCapacity -= res;
+ }
+ moreThan1Frame = 1;
+ } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
+
+ RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed");
+
+ return (BYTE*)dst - (BYTE*)dststart;
+}
+
+size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize)
+{
+ return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
+}
+
+
+static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx)
+{
+ switch (dctx->dictUses) {
+ default:
+ assert(0 /* Impossible */);
+ /* fall-through */
+ case ZSTD_dont_use:
+ ZSTD_clearDict(dctx);
+ return NULL;
+ case ZSTD_use_indefinitely:
+ return dctx->ddict;
+ case ZSTD_use_once:
+ dctx->dictUses = ZSTD_dont_use;
+ return dctx->ddict;
+ }
+}
+
+size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx));
+}
+
+
+size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
+ size_t regenSize;
+ ZSTD_DCtx* const dctx = ZSTD_createDCtx();
+ RETURN_ERROR_IF(dctx==NULL, memory_allocation);
+ regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
+ ZSTD_freeDCtx(dctx);
+ return regenSize;
+#else /* stack mode */
+ ZSTD_DCtx dctx;
+ ZSTD_initDCtx_internal(&dctx);
+ return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
+#endif
+}
+
+
+/*-**************************************
+* Advanced Streaming Decompression API
+* Bufferless and synchronous
+****************************************/
+size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; }
+
+ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
+ switch(dctx->stage)
+ {
+ default: /* should not happen */
+ assert(0);
+ case ZSTDds_getFrameHeaderSize:
+ case ZSTDds_decodeFrameHeader:
+ return ZSTDnit_frameHeader;
+ case ZSTDds_decodeBlockHeader:
+ return ZSTDnit_blockHeader;
+ case ZSTDds_decompressBlock:
+ return ZSTDnit_block;
+ case ZSTDds_decompressLastBlock:
+ return ZSTDnit_lastBlock;
+ case ZSTDds_checkChecksum:
+ return ZSTDnit_checksum;
+ case ZSTDds_decodeSkippableHeader:
+ case ZSTDds_skipFrame:
+ return ZSTDnit_skippableFrame;
+ }
+}
+
+static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; }
+
+/** ZSTD_decompressContinue() :
+ * srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress())
+ * @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
+ * or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
+ /* Sanity check */
+ RETURN_ERROR_IF(srcSize != dctx->expected, srcSize_wrong, "not allowed");
+ if (dstCapacity) ZSTD_checkContinuity(dctx, dst);
+
+ switch (dctx->stage)
+ {
+ case ZSTDds_getFrameHeaderSize :
+ assert(src != NULL);
+ if (dctx->format == ZSTD_f_zstd1) { /* allows header */
+ assert(srcSize >= ZSTD_FRAMEIDSIZE); /* to read skippable magic number */
+ if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
+ memcpy(dctx->headerBuffer, src, srcSize);
+ dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize; /* remaining to load to get full skippable frame header */
+ dctx->stage = ZSTDds_decodeSkippableHeader;
+ return 0;
+ } }
+ dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format);
+ if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize;
+ memcpy(dctx->headerBuffer, src, srcSize);
+ dctx->expected = dctx->headerSize - srcSize;
+ dctx->stage = ZSTDds_decodeFrameHeader;
+ return 0;
+
+ case ZSTDds_decodeFrameHeader:
+ assert(src != NULL);
+ memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
+ FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
+ dctx->expected = ZSTD_blockHeaderSize;
+ dctx->stage = ZSTDds_decodeBlockHeader;
+ return 0;
+
+ case ZSTDds_decodeBlockHeader:
+ { blockProperties_t bp;
+ size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
+ if (ZSTD_isError(cBlockSize)) return cBlockSize;
+ dctx->expected = cBlockSize;
+ dctx->bType = bp.blockType;
+ dctx->rleSize = bp.origSize;
+ if (cBlockSize) {
+ dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
+ return 0;
+ }
+ /* empty block */
+ if (bp.lastBlock) {
+ if (dctx->fParams.checksumFlag) {
+ dctx->expected = 4;
+ dctx->stage = ZSTDds_checkChecksum;
+ } else {
+ dctx->expected = 0; /* end of frame */
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ }
+ } else {
+ dctx->expected = ZSTD_blockHeaderSize; /* jump to next header */
+ dctx->stage = ZSTDds_decodeBlockHeader;
+ }
+ return 0;
+ }
+
+ case ZSTDds_decompressLastBlock:
+ case ZSTDds_decompressBlock:
+ DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock");
+ { size_t rSize;
+ switch(dctx->bType)
+ {
+ case bt_compressed:
+ DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
+ rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1);
+ break;
+ case bt_raw :
+ rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize);
+ break;
+ case bt_rle :
+ rSize = ZSTD_setRleBlock(dst, dstCapacity, *(const BYTE*)src, dctx->rleSize);
+ break;
+ case bt_reserved : /* should never happen */
+ default:
+ RETURN_ERROR(corruption_detected);
+ }
+ if (ZSTD_isError(rSize)) return rSize;
+ DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
+ dctx->decodedSize += rSize;
+ if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
+
+ if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
+ DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize);
+ RETURN_ERROR_IF(
+ dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
+ && dctx->decodedSize != dctx->fParams.frameContentSize,
+ corruption_detected);
+ if (dctx->fParams.checksumFlag) { /* another round for frame checksum */
+ dctx->expected = 4;
+ dctx->stage = ZSTDds_checkChecksum;
+ } else {
+ dctx->expected = 0; /* ends here */
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ }
+ } else {
+ dctx->stage = ZSTDds_decodeBlockHeader;
+ dctx->expected = ZSTD_blockHeaderSize;
+ dctx->previousDstEnd = (char*)dst + rSize;
+ }
+ return rSize;
+ }
+
+ case ZSTDds_checkChecksum:
+ assert(srcSize == 4); /* guaranteed by dctx->expected */
+ { U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
+ U32 const check32 = MEM_readLE32(src);
+ DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
+ RETURN_ERROR_IF(check32 != h32, checksum_wrong);
+ dctx->expected = 0;
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ return 0;
+ }
+
+ case ZSTDds_decodeSkippableHeader:
+ assert(src != NULL);
+ assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE);
+ memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */
+ dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */
+ dctx->stage = ZSTDds_skipFrame;
+ return 0;
+
+ case ZSTDds_skipFrame:
+ dctx->expected = 0;
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ return 0;
+
+ default:
+ assert(0); /* impossible */
+ RETURN_ERROR(GENERIC); /* some compiler require default to do something */
+ }
+}
+
+
+static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ dctx->dictEnd = dctx->previousDstEnd;
+ dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
+ dctx->prefixStart = dict;
+ dctx->previousDstEnd = (const char*)dict + dictSize;
+ return 0;
+}
+
+/*! ZSTD_loadDEntropy() :
+ * dict : must point at beginning of a valid zstd dictionary.
+ * @return : size of entropy tables read */
+size_t
+ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
+ const void* const dict, size_t const dictSize)
+{
+ const BYTE* dictPtr = (const BYTE*)dict;
+ const BYTE* const dictEnd = dictPtr + dictSize;
+
+ RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted);
+ assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY); /* dict must be valid */
+ dictPtr += 8; /* skip header = magic + dictID */
+
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable));
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable));
+ ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE);
+ { void* const workspace = &entropy->LLTable; /* use fse tables as temporary workspace; implies fse tables are grouped together */
+ size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable);
+#ifdef HUF_FORCE_DECOMPRESS_X1
+ /* in minimal huffman, we always use X1 variants */
+ size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable,
+ dictPtr, dictEnd - dictPtr,
+ workspace, workspaceSize);
+#else
+ size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable,
+ dictPtr, dictEnd - dictPtr,
+ workspace, workspaceSize);
+#endif
+ RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted);
+ dictPtr += hSize;
+ }
+
+ { short offcodeNCount[MaxOff+1];
+ unsigned offcodeMaxValue = MaxOff, offcodeLog;
+ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
+ RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted);
+ RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted);
+ RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted);
+ ZSTD_buildFSETable( entropy->OFTable,
+ offcodeNCount, offcodeMaxValue,
+ OF_base, OF_bits,
+ offcodeLog);
+ dictPtr += offcodeHeaderSize;
+ }
+
+ { short matchlengthNCount[MaxML+1];
+ unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
+ RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted);
+ RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted);
+ RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted);
+ ZSTD_buildFSETable( entropy->MLTable,
+ matchlengthNCount, matchlengthMaxValue,
+ ML_base, ML_bits,
+ matchlengthLog);
+ dictPtr += matchlengthHeaderSize;
+ }
+
+ { short litlengthNCount[MaxLL+1];
+ unsigned litlengthMaxValue = MaxLL, litlengthLog;
+ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
+ RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted);
+ RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted);
+ RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted);
+ ZSTD_buildFSETable( entropy->LLTable,
+ litlengthNCount, litlengthMaxValue,
+ LL_base, LL_bits,
+ litlengthLog);
+ dictPtr += litlengthHeaderSize;
+ }
+
+ RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted);
+ { int i;
+ size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
+ for (i=0; i<3; i++) {
+ U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
+ RETURN_ERROR_IF(rep==0 || rep >= dictContentSize,
+ dictionary_corrupted);
+ entropy->rep[i] = rep;
+ } }
+
+ return dictPtr - (const BYTE*)dict;
+}
+
+static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize);
+ { U32 const magic = MEM_readLE32(dict);
+ if (magic != ZSTD_MAGIC_DICTIONARY) {
+ return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */
+ } }
+ dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
+
+ /* load entropy tables */
+ { size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize);
+ RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted);
+ dict = (const char*)dict + eSize;
+ dictSize -= eSize;
+ }
+ dctx->litEntropy = dctx->fseEntropy = 1;
+
+ /* reference dictionary content */
+ return ZSTD_refDictContent(dctx, dict, dictSize);
+}
+
+size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
+{
+ assert(dctx != NULL);
+ dctx->expected = ZSTD_startingInputLength(dctx->format); /* dctx->format must be properly set */
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ dctx->decodedSize = 0;
+ dctx->previousDstEnd = NULL;
+ dctx->prefixStart = NULL;
+ dctx->virtualStart = NULL;
+ dctx->dictEnd = NULL;
+ dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
+ dctx->litEntropy = dctx->fseEntropy = 0;
+ dctx->dictID = 0;
+ ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
+ memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
+ dctx->LLTptr = dctx->entropy.LLTable;
+ dctx->MLTptr = dctx->entropy.MLTable;
+ dctx->OFTptr = dctx->entropy.OFTable;
+ dctx->HUFptr = dctx->entropy.hufTable;
+ return 0;
+}
+
+size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) );
+ if (dict && dictSize)
+ RETURN_ERROR_IF(
+ ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)),
+ dictionary_corrupted);
+ return 0;
+}
+
+
+/* ====== ZSTD_DDict ====== */
+
+size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
+{
+ DEBUGLOG(4, "ZSTD_decompressBegin_usingDDict");
+ assert(dctx != NULL);
+ if (ddict) {
+ const char* const dictStart = (const char*)ZSTD_DDict_dictContent(ddict);
+ size_t const dictSize = ZSTD_DDict_dictSize(ddict);
+ const void* const dictEnd = dictStart + dictSize;
+ dctx->ddictIsCold = (dctx->dictEnd != dictEnd);
+ DEBUGLOG(4, "DDict is %s",
+ dctx->ddictIsCold ? "~cold~" : "hot!");
+ }
+ FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) );
+ if (ddict) { /* NULL ddict is equivalent to no dictionary */
+ ZSTD_copyDDictParameters(dctx, ddict);
+ }
+ return 0;
+}
+
+/*! ZSTD_getDictID_fromDict() :
+ * Provides the dictID stored within dictionary.
+ * if @return == 0, the dictionary is not conformant with Zstandard specification.
+ * It can still be loaded, but as a content-only dictionary. */
+unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
+{
+ if (dictSize < 8) return 0;
+ if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0;
+ return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
+}
+
+/*! ZSTD_getDictID_fromFrame() :
+ * Provides the dictID required to decompress frame stored within `src`.
+ * If @return == 0, the dictID could not be decoded.
+ * This could for one of the following reasons :
+ * - The frame does not require a dictionary (most common case).
+ * - The frame was built with dictID intentionally removed.
+ * Needed dictionary is a hidden information.
+ * Note : this use case also happens when using a non-conformant dictionary.
+ * - `srcSize` is too small, and as a result, frame header could not be decoded.
+ * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`.
+ * - This is not a Zstandard frame.
+ * When identifying the exact failure cause, it's possible to use
+ * ZSTD_getFrameHeader(), which will provide a more precise error code. */
+unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
+{
+ ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 };
+ size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
+ if (ZSTD_isError(hError)) return 0;
+ return zfp.dictID;
+}
+
+
+/*! ZSTD_decompress_usingDDict() :
+* Decompression using a pre-digested Dictionary
+* Use dictionary without significant overhead. */
+size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_DDict* ddict)
+{
+ /* pass content and size in case legacy frames are encountered */
+ return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize,
+ NULL, 0,
+ ddict);
+}
+
+
+/*=====================================
+* Streaming decompression
+*====================================*/
+
+ZSTD_DStream* ZSTD_createDStream(void)
+{
+ DEBUGLOG(3, "ZSTD_createDStream");
+ return ZSTD_createDStream_advanced(ZSTD_defaultCMem);
+}
+
+ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize)
+{
+ return ZSTD_initStaticDCtx(workspace, workspaceSize);
+}
+
+ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem)
+{
+ return ZSTD_createDCtx_advanced(customMem);
+}
+
+size_t ZSTD_freeDStream(ZSTD_DStream* zds)
+{
+ return ZSTD_freeDCtx(zds);
+}
+
+
+/* *** Initialization *** */
+
+size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; }
+size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; }
+
+size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType)
+{
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
+ ZSTD_clearDict(dctx);
+ if (dict && dictSize >= 8) {
+ dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
+ RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation);
+ dctx->ddict = dctx->ddictLocal;
+ dctx->dictUses = ZSTD_use_indefinitely;
+ }
+ return 0;
+}
+
+size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
+}
+
+size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
+}
+
+size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
+{
+ FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType));
+ dctx->dictUses = ZSTD_use_once;
+ return 0;
+}
+
+size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize)
+{
+ return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent);
+}
+
+
+/* ZSTD_initDStream_usingDict() :
+ * return : expected size, aka ZSTD_FRAMEHEADERSIZE_PREFIX.
+ * this function cannot fail */
+size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
+{
+ DEBUGLOG(4, "ZSTD_initDStream_usingDict");
+ FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) );
+ FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );
+ return ZSTD_FRAMEHEADERSIZE_PREFIX;
+}
+
+/* note : this variant can't fail */
+size_t ZSTD_initDStream(ZSTD_DStream* zds)
+{
+ DEBUGLOG(4, "ZSTD_initDStream");
+ return ZSTD_initDStream_usingDDict(zds, NULL);
+}
+
+/* ZSTD_initDStream_usingDDict() :
+ * ddict will just be referenced, and must outlive decompression session
+ * this function cannot fail */
+size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
+{
+ FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) );
+ FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) );
+ return ZSTD_FRAMEHEADERSIZE_PREFIX;
+}
+
+/* ZSTD_resetDStream() :
+ * return : expected size, aka ZSTD_FRAMEHEADERSIZE_PREFIX.
+ * this function cannot fail */
+size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
+{
+ FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only));
+ return ZSTD_FRAMEHEADERSIZE_PREFIX;
+}
+
+
+size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
+{
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
+ ZSTD_clearDict(dctx);
+ if (ddict) {
+ dctx->ddict = ddict;
+ dctx->dictUses = ZSTD_use_indefinitely;
+ }
+ return 0;
+}
+
+/* ZSTD_DCtx_setMaxWindowSize() :
+ * note : no direct equivalence in ZSTD_DCtx_setParameter,
+ * since this version sets windowSize, and the other sets windowLog */
+size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
+{
+ ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax);
+ size_t const min = (size_t)1 << bounds.lowerBound;
+ size_t const max = (size_t)1 << bounds.upperBound;
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
+ RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound);
+ RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound);
+ dctx->maxWindowSize = maxWindowSize;
+ return 0;
+}
+
+size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)
+{
+ return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, format);
+}
+
+ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)
+{
+ ZSTD_bounds bounds = { 0, 0, 0 };
+ switch(dParam) {
+ case ZSTD_d_windowLogMax:
+ bounds.lowerBound = ZSTD_WINDOWLOG_ABSOLUTEMIN;
+ bounds.upperBound = ZSTD_WINDOWLOG_MAX;
+ return bounds;
+ case ZSTD_d_format:
+ bounds.lowerBound = (int)ZSTD_f_zstd1;
+ bounds.upperBound = (int)ZSTD_f_zstd1_magicless;
+ ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
+ return bounds;
+ default:;
+ }
+ bounds.error = ERROR(parameter_unsupported);
+ return bounds;
+}
+
+/* ZSTD_dParam_withinBounds:
+ * @return 1 if value is within dParam bounds,
+ * 0 otherwise */
+static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value)
+{
+ ZSTD_bounds const bounds = ZSTD_dParam_getBounds(dParam);
+ if (ZSTD_isError(bounds.error)) return 0;
+ if (value < bounds.lowerBound) return 0;
+ if (value > bounds.upperBound) return 0;
+ return 1;
+}
+
+#define CHECK_DBOUNDS(p,v) { \
+ RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound); \
+}
+
+size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value)
+{
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
+ switch(dParam) {
+ case ZSTD_d_windowLogMax:
+ if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT;
+ CHECK_DBOUNDS(ZSTD_d_windowLogMax, value);
+ dctx->maxWindowSize = ((size_t)1) << value;
+ return 0;
+ case ZSTD_d_format:
+ CHECK_DBOUNDS(ZSTD_d_format, value);
+ dctx->format = (ZSTD_format_e)value;
+ return 0;
+ default:;
+ }
+ RETURN_ERROR(parameter_unsupported);
+}
+
+size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
+{
+ if ( (reset == ZSTD_reset_session_only)
+ || (reset == ZSTD_reset_session_and_parameters) ) {
+ dctx->streamStage = zdss_init;
+ dctx->noForwardProgress = 0;
+ }
+ if ( (reset == ZSTD_reset_parameters)
+ || (reset == ZSTD_reset_session_and_parameters) ) {
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
+ ZSTD_clearDict(dctx);
+ dctx->format = ZSTD_f_zstd1;
+ dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
+ }
+ return 0;
+}
+
+
+size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx)
+{
+ return ZSTD_sizeof_DCtx(dctx);
+}
+
+size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
+{
+ size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+ unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
+ unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
+ size_t const minRBSize = (size_t) neededSize;
+ RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
+ frameParameter_windowTooLarge);
+ return minRBSize;
+}
+
+size_t ZSTD_estimateDStreamSize(size_t windowSize)
+{
+ size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+ size_t const inBuffSize = blockSize; /* no block can be larger */
+ size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN);
+ return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize;
+}
+
+size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
+{
+ U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable, but requires an additional parameter (or a dctx) */
+ ZSTD_frameHeader zfh;
+ size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
+ if (ZSTD_isError(err)) return err;
+ RETURN_ERROR_IF(err>0, srcSize_wrong);
+ RETURN_ERROR_IF(zfh.windowSize > windowSizeMax,
+ frameParameter_windowTooLarge);
+ return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
+}
+
+
+/* ***** Decompression ***** */
+
+MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ size_t const length = MIN(dstCapacity, srcSize);
+ memcpy(dst, src, length);
+ return length;
+}
+
+
+size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+ const char* const istart = (const char*)(input->src) + input->pos;
+ const char* const iend = (const char*)(input->src) + input->size;
+ const char* ip = istart;
+ char* const ostart = (char*)(output->dst) + output->pos;
+ char* const oend = (char*)(output->dst) + output->size;
+ char* op = ostart;
+ U32 someMoreWork = 1;
+
+ DEBUGLOG(5, "ZSTD_decompressStream");
+ RETURN_ERROR_IF(
+ input->pos > input->size,
+ srcSize_wrong,
+ "forbidden. in: pos: %u vs size: %u",
+ (U32)input->pos, (U32)input->size);
+ RETURN_ERROR_IF(
+ output->pos > output->size,
+ dstSize_tooSmall,
+ "forbidden. out: pos: %u vs size: %u",
+ (U32)output->pos, (U32)output->size);
+ DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
+
+ while (someMoreWork) {
+ switch(zds->streamStage)
+ {
+ case zdss_init :
+ DEBUGLOG(5, "stage zdss_init => transparent reset ");
+ zds->streamStage = zdss_loadHeader;
+ zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
+ zds->legacyVersion = 0;
+ zds->hostageByte = 0;
+ /* fall-through */
+
+ case zdss_loadHeader :
+ DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
+ if (zds->legacyVersion) {
+ RETURN_ERROR_IF(zds->staticSize, memory_allocation,
+ "legacy support is incompatible with static dctx");
+ { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input);
+ if (hint==0) zds->streamStage = zdss_init;
+ return hint;
+ } }
+#endif
+ { size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
+ DEBUGLOG(5, "header size : %u", (U32)hSize);
+ if (ZSTD_isError(hSize)) {
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
+ U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart);
+ if (legacyVersion) {
+ ZSTD_DDict const* const ddict = ZSTD_getDDict(zds);
+ const void* const dict = ddict ? ZSTD_DDict_dictContent(ddict) : NULL;
+ size_t const dictSize = ddict ? ZSTD_DDict_dictSize(ddict) : 0;
+ DEBUGLOG(5, "ZSTD_decompressStream: detected legacy version v0.%u", legacyVersion);
+ RETURN_ERROR_IF(zds->staticSize, memory_allocation,
+ "legacy support is incompatible with static dctx");
+ FORWARD_IF_ERROR(ZSTD_initLegacyStream(&zds->legacyContext,
+ zds->previousLegacyVersion, legacyVersion,
+ dict, dictSize));
+ zds->legacyVersion = zds->previousLegacyVersion = legacyVersion;
+ { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, legacyVersion, output, input);
+ if (hint==0) zds->streamStage = zdss_init; /* or stay in stage zdss_loadHeader */
+ return hint;
+ } }
+#endif
+ return hSize; /* error */
+ }
+ if (hSize != 0) { /* need more input */
+ size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */
+ size_t const remainingInput = (size_t)(iend-ip);
+ assert(iend >= ip);
+ if (toLoad > remainingInput) { /* not enough input to load full header */
+ if (remainingInput > 0) {
+ memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput);
+ zds->lhSize += remainingInput;
+ }
+ input->pos = input->size;
+ return (MAX(ZSTD_FRAMEHEADERSIZE_MIN, hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
+ }
+ assert(ip != NULL);
+ memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
+ break;
+ } }
+
+ /* check for single-pass mode opportunity */
+ if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */
+ && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
+ size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart);
+ if (cSize <= (size_t)(iend-istart)) {
+ /* shortcut : using single-pass mode */
+ size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, oend-op, istart, cSize, ZSTD_getDDict(zds));
+ if (ZSTD_isError(decompressedSize)) return decompressedSize;
+ DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
+ ip = istart + cSize;
+ op += decompressedSize;
+ zds->expected = 0;
+ zds->streamStage = zdss_init;
+ someMoreWork = 0;
+ break;
+ } }
+
+ /* Consume header (see ZSTDds_decodeFrameHeader) */
+ DEBUGLOG(4, "Consume header");
+ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)));
+
+ if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
+ zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
+ zds->stage = ZSTDds_skipFrame;
+ } else {
+ FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize));
+ zds->expected = ZSTD_blockHeaderSize;
+ zds->stage = ZSTDds_decodeBlockHeader;
+ }
+
+ /* control buffer memory usage */
+ DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)",
+ (U32)(zds->fParams.windowSize >>10),
+ (U32)(zds->maxWindowSize >> 10) );
+ zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
+ RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
+ frameParameter_windowTooLarge);
+
+ /* Adapt buffer sizes to frame header instructions */
+ { size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
+ size_t const neededOutBuffSize = ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize);
+ if ((zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize)) {
+ size_t const bufferSize = neededInBuffSize + neededOutBuffSize;
+ DEBUGLOG(4, "inBuff : from %u to %u",
+ (U32)zds->inBuffSize, (U32)neededInBuffSize);
+ DEBUGLOG(4, "outBuff : from %u to %u",
+ (U32)zds->outBuffSize, (U32)neededOutBuffSize);
+ if (zds->staticSize) { /* static DCtx */
+ DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
+ assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */
+ RETURN_ERROR_IF(
+ bufferSize > zds->staticSize - sizeof(ZSTD_DCtx),
+ memory_allocation);
+ } else {
+ ZSTD_free(zds->inBuff, zds->customMem);
+ zds->inBuffSize = 0;
+ zds->outBuffSize = 0;
+ zds->inBuff = (char*)ZSTD_malloc(bufferSize, zds->customMem);
+ RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation);
+ }
+ zds->inBuffSize = neededInBuffSize;
+ zds->outBuff = zds->inBuff + zds->inBuffSize;
+ zds->outBuffSize = neededOutBuffSize;
+ } }
+ zds->streamStage = zdss_read;
+ /* fall-through */
+
+ case zdss_read:
+ DEBUGLOG(5, "stage zdss_read");
+ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
+ DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize);
+ if (neededInSize==0) { /* end of frame */
+ zds->streamStage = zdss_init;
+ someMoreWork = 0;
+ break;
+ }
+ if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */
+ int const isSkipFrame = ZSTD_isSkipFrame(zds);
+ size_t const decodedSize = ZSTD_decompressContinue(zds,
+ zds->outBuff + zds->outStart, (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart),
+ ip, neededInSize);
+ if (ZSTD_isError(decodedSize)) return decodedSize;
+ ip += neededInSize;
+ if (!decodedSize && !isSkipFrame) break; /* this was just a header */
+ zds->outEnd = zds->outStart + decodedSize;
+ zds->streamStage = zdss_flush;
+ break;
+ } }
+ if (ip==iend) { someMoreWork = 0; break; } /* no more input */
+ zds->streamStage = zdss_load;
+ /* fall-through */
+
+ case zdss_load:
+ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
+ size_t const toLoad = neededInSize - zds->inPos;
+ int const isSkipFrame = ZSTD_isSkipFrame(zds);
+ size_t loadedSize;
+ if (isSkipFrame) {
+ loadedSize = MIN(toLoad, (size_t)(iend-ip));
+ } else {
+ RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos,
+ corruption_detected,
+ "should never happen");
+ loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip);
+ }
+ ip += loadedSize;
+ zds->inPos += loadedSize;
+ if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */
+
+ /* decode loaded input */
+ { size_t const decodedSize = ZSTD_decompressContinue(zds,
+ zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart,
+ zds->inBuff, neededInSize);
+ if (ZSTD_isError(decodedSize)) return decodedSize;
+ zds->inPos = 0; /* input is consumed */
+ if (!decodedSize && !isSkipFrame) { zds->streamStage = zdss_read; break; } /* this was just a header */
+ zds->outEnd = zds->outStart + decodedSize;
+ } }
+ zds->streamStage = zdss_flush;
+ /* fall-through */
+
+ case zdss_flush:
+ { size_t const toFlushSize = zds->outEnd - zds->outStart;
+ size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize);
+ op += flushedSize;
+ zds->outStart += flushedSize;
+ if (flushedSize == toFlushSize) { /* flush completed */
+ zds->streamStage = zdss_read;
+ if ( (zds->outBuffSize < zds->fParams.frameContentSize)
+ && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
+ DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)",
+ (int)(zds->outBuffSize - zds->outStart),
+ (U32)zds->fParams.blockSizeMax);
+ zds->outStart = zds->outEnd = 0;
+ }
+ break;
+ } }
+ /* cannot complete flush */
+ someMoreWork = 0;
+ break;
+
+ default:
+ assert(0); /* impossible */
+ RETURN_ERROR(GENERIC); /* some compiler require default to do something */
+ } }
+
+ /* result */
+ input->pos = (size_t)(ip - (const char*)(input->src));
+ output->pos = (size_t)(op - (char*)(output->dst));
+ if ((ip==istart) && (op==ostart)) { /* no forward progress */
+ zds->noForwardProgress ++;
+ if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
+ RETURN_ERROR_IF(op==oend, dstSize_tooSmall);
+ RETURN_ERROR_IF(ip==iend, srcSize_wrong);
+ assert(0);
+ }
+ } else {
+ zds->noForwardProgress = 0;
+ }
+ { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds);
+ if (!nextSrcSizeHint) { /* frame fully decoded */
+ if (zds->outEnd == zds->outStart) { /* output fully flushed */
+ if (zds->hostageByte) {
+ if (input->pos >= input->size) {
+ /* can't release hostage (not present) */
+ zds->streamStage = zdss_read;
+ return 1;
+ }
+ input->pos++; /* release hostage */
+ } /* zds->hostageByte */
+ return 0;
+ } /* zds->outEnd == zds->outStart */
+ if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
+ input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */
+ zds->hostageByte=1;
+ }
+ return 1;
+ } /* nextSrcSizeHint==0 */
+ nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block); /* preload header of next block */
+ assert(zds->inPos <= nextSrcSizeHint);
+ nextSrcSizeHint -= zds->inPos; /* part already loaded*/
+ return nextSrcSizeHint;
+ }
+}
+
+size_t ZSTD_decompressStream_simpleArgs (
+ ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos)
+{
+ ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
+ ZSTD_inBuffer input = { src, srcSize, *srcPos };
+ /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
+ size_t const cErr = ZSTD_decompressStream(dctx, &output, &input);
+ *dstPos = output.pos;
+ *srcPos = input.pos;
+ return cErr;
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_decompress_block.c b/vendor/github.com/DataDog/zstd/zstd_decompress_block.c
new file mode 100644
index 000000000..a2a7eedcf
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_decompress_block.c
@@ -0,0 +1,1311 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* zstd_decompress_block :
+ * this module takes care of decompressing _compressed_ block */
+
+/*-*******************************************************
+* Dependencies
+*********************************************************/
+#include <string.h> /* memcpy, memmove, memset */
+#include "compiler.h" /* prefetch */
+#include "cpu.h" /* bmi2 */
+#include "mem.h" /* low level memory routines */
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#include "zstd_internal.h"
+#include "zstd_decompress_internal.h" /* ZSTD_DCtx */
+#include "zstd_ddict.h" /* ZSTD_DDictDictContent */
+#include "zstd_decompress_block.h"
+
+/*_*******************************************************
+* Macros
+**********************************************************/
+
+/* These two optional macros force the use one way or another of the two
+ * ZSTD_decompressSequences implementations. You can't force in both directions
+ * at the same time.
+ */
+#if defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+ defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+#error "Cannot force the use of the short and the long ZSTD_decompressSequences variants!"
+#endif
+
+
+/*_*******************************************************
+* Memory operations
+**********************************************************/
+static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+
+
+/*-*************************************************************
+ * Block decoding
+ ***************************************************************/
+
+/*! ZSTD_getcBlockSize() :
+ * Provides the size of compressed block from block header `src` */
+size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
+ blockProperties_t* bpPtr)
+{
+ RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong);
+
+ { U32 const cBlockHeader = MEM_readLE24(src);
+ U32 const cSize = cBlockHeader >> 3;
+ bpPtr->lastBlock = cBlockHeader & 1;
+ bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
+ bpPtr->origSize = cSize; /* only useful for RLE */
+ if (bpPtr->blockType == bt_rle) return 1;
+ RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected);
+ return cSize;
+ }
+}
+
+
+/* Hidden declaration for fullbench */
+size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+ const void* src, size_t srcSize);
+/*! ZSTD_decodeLiteralsBlock() :
+ * @return : nb of bytes read from src (< srcSize )
+ * note : symbol not declared but exposed for fullbench */
+size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+ const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
+{
+ RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected);
+
+ { const BYTE* const istart = (const BYTE*) src;
+ symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
+
+ switch(litEncType)
+ {
+ case set_repeat:
+ RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted);
+ /* fall-through */
+
+ case set_compressed:
+ RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
+ { size_t lhSize, litSize, litCSize;
+ U32 singleStream=0;
+ U32 const lhlCode = (istart[0] >> 2) & 3;
+ U32 const lhc = MEM_readLE32(istart);
+ size_t hufSuccess;
+ switch(lhlCode)
+ {
+ case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */
+ /* 2 - 2 - 10 - 10 */
+ singleStream = !lhlCode;
+ lhSize = 3;
+ litSize = (lhc >> 4) & 0x3FF;
+ litCSize = (lhc >> 14) & 0x3FF;
+ break;
+ case 2:
+ /* 2 - 2 - 14 - 14 */
+ lhSize = 4;
+ litSize = (lhc >> 4) & 0x3FFF;
+ litCSize = lhc >> 18;
+ break;
+ case 3:
+ /* 2 - 2 - 18 - 18 */
+ lhSize = 5;
+ litSize = (lhc >> 4) & 0x3FFFF;
+ litCSize = (lhc >> 22) + (istart[4] << 10);
+ break;
+ }
+ RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);
+ RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected);
+
+ /* prefetch huffman table if cold */
+ if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
+ PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable));
+ }
+
+ if (litEncType==set_repeat) {
+ if (singleStream) {
+ hufSuccess = HUF_decompress1X_usingDTable_bmi2(
+ dctx->litBuffer, litSize, istart+lhSize, litCSize,
+ dctx->HUFptr, dctx->bmi2);
+ } else {
+ hufSuccess = HUF_decompress4X_usingDTable_bmi2(
+ dctx->litBuffer, litSize, istart+lhSize, litCSize,
+ dctx->HUFptr, dctx->bmi2);
+ }
+ } else {
+ if (singleStream) {
+#if defined(HUF_FORCE_DECOMPRESS_X2)
+ hufSuccess = HUF_decompress1X_DCtx_wksp(
+ dctx->entropy.hufTable, dctx->litBuffer, litSize,
+ istart+lhSize, litCSize, dctx->workspace,
+ sizeof(dctx->workspace));
+#else
+ hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2(
+ dctx->entropy.hufTable, dctx->litBuffer, litSize,
+ istart+lhSize, litCSize, dctx->workspace,
+ sizeof(dctx->workspace), dctx->bmi2);
+#endif
+ } else {
+ hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2(
+ dctx->entropy.hufTable, dctx->litBuffer, litSize,
+ istart+lhSize, litCSize, dctx->workspace,
+ sizeof(dctx->workspace), dctx->bmi2);
+ }
+ }
+
+ RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected);
+
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ dctx->litEntropy = 1;
+ if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
+ memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+ return litCSize + lhSize;
+ }
+
+ case set_basic:
+ { size_t litSize, lhSize;
+ U32 const lhlCode = ((istart[0]) >> 2) & 3;
+ switch(lhlCode)
+ {
+ case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
+ lhSize = 1;
+ litSize = istart[0] >> 3;
+ break;
+ case 1:
+ lhSize = 2;
+ litSize = MEM_readLE16(istart) >> 4;
+ break;
+ case 3:
+ lhSize = 3;
+ litSize = MEM_readLE24(istart) >> 4;
+ break;
+ }
+
+ if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
+ RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected);
+ memcpy(dctx->litBuffer, istart+lhSize, litSize);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+ return lhSize+litSize;
+ }
+ /* direct reference into compressed stream */
+ dctx->litPtr = istart+lhSize;
+ dctx->litSize = litSize;
+ return lhSize+litSize;
+ }
+
+ case set_rle:
+ { U32 const lhlCode = ((istart[0]) >> 2) & 3;
+ size_t litSize, lhSize;
+ switch(lhlCode)
+ {
+ case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
+ lhSize = 1;
+ litSize = istart[0] >> 3;
+ break;
+ case 1:
+ lhSize = 2;
+ litSize = MEM_readLE16(istart) >> 4;
+ break;
+ case 3:
+ lhSize = 3;
+ litSize = MEM_readLE24(istart) >> 4;
+ RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
+ break;
+ }
+ RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);
+ memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ return lhSize+1;
+ }
+ default:
+ RETURN_ERROR(corruption_detected, "impossible");
+ }
+ }
+}
+
+/* Default FSE distribution tables.
+ * These are pre-calculated FSE decoding tables using default distributions as defined in specification :
+ * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#default-distributions
+ * They were generated programmatically with following method :
+ * - start from default distributions, present in /lib/common/zstd_internal.h
+ * - generate tables normally, using ZSTD_buildFSETable()
+ * - printout the content of tables
+ * - pretify output, report below, test with fuzzer to ensure it's correct */
+
+/* Default FSE distribution table for Literal Lengths */
+static const ZSTD_seqSymbol LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = {
+ { 1, 1, 1, LL_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
+ /* nextState, nbAddBits, nbBits, baseVal */
+ { 0, 0, 4, 0}, { 16, 0, 4, 0},
+ { 32, 0, 5, 1}, { 0, 0, 5, 3},
+ { 0, 0, 5, 4}, { 0, 0, 5, 6},
+ { 0, 0, 5, 7}, { 0, 0, 5, 9},
+ { 0, 0, 5, 10}, { 0, 0, 5, 12},
+ { 0, 0, 6, 14}, { 0, 1, 5, 16},
+ { 0, 1, 5, 20}, { 0, 1, 5, 22},
+ { 0, 2, 5, 28}, { 0, 3, 5, 32},
+ { 0, 4, 5, 48}, { 32, 6, 5, 64},
+ { 0, 7, 5, 128}, { 0, 8, 6, 256},
+ { 0, 10, 6, 1024}, { 0, 12, 6, 4096},
+ { 32, 0, 4, 0}, { 0, 0, 4, 1},
+ { 0, 0, 5, 2}, { 32, 0, 5, 4},
+ { 0, 0, 5, 5}, { 32, 0, 5, 7},
+ { 0, 0, 5, 8}, { 32, 0, 5, 10},
+ { 0, 0, 5, 11}, { 0, 0, 6, 13},
+ { 32, 1, 5, 16}, { 0, 1, 5, 18},
+ { 32, 1, 5, 22}, { 0, 2, 5, 24},
+ { 32, 3, 5, 32}, { 0, 3, 5, 40},
+ { 0, 6, 4, 64}, { 16, 6, 4, 64},
+ { 32, 7, 5, 128}, { 0, 9, 6, 512},
+ { 0, 11, 6, 2048}, { 48, 0, 4, 0},
+ { 16, 0, 4, 1}, { 32, 0, 5, 2},
+ { 32, 0, 5, 3}, { 32, 0, 5, 5},
+ { 32, 0, 5, 6}, { 32, 0, 5, 8},
+ { 32, 0, 5, 9}, { 32, 0, 5, 11},
+ { 32, 0, 5, 12}, { 0, 0, 6, 15},
+ { 32, 1, 5, 18}, { 32, 1, 5, 20},
+ { 32, 2, 5, 24}, { 32, 2, 5, 28},
+ { 32, 3, 5, 40}, { 32, 4, 5, 48},
+ { 0, 16, 6,65536}, { 0, 15, 6,32768},
+ { 0, 14, 6,16384}, { 0, 13, 6, 8192},
+}; /* LL_defaultDTable */
+
+/* Default FSE distribution table for Offset Codes */
+static const ZSTD_seqSymbol OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = {
+ { 1, 1, 1, OF_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
+ /* nextState, nbAddBits, nbBits, baseVal */
+ { 0, 0, 5, 0}, { 0, 6, 4, 61},
+ { 0, 9, 5, 509}, { 0, 15, 5,32765},
+ { 0, 21, 5,2097149}, { 0, 3, 5, 5},
+ { 0, 7, 4, 125}, { 0, 12, 5, 4093},
+ { 0, 18, 5,262141}, { 0, 23, 5,8388605},
+ { 0, 5, 5, 29}, { 0, 8, 4, 253},
+ { 0, 14, 5,16381}, { 0, 20, 5,1048573},
+ { 0, 2, 5, 1}, { 16, 7, 4, 125},
+ { 0, 11, 5, 2045}, { 0, 17, 5,131069},
+ { 0, 22, 5,4194301}, { 0, 4, 5, 13},
+ { 16, 8, 4, 253}, { 0, 13, 5, 8189},
+ { 0, 19, 5,524285}, { 0, 1, 5, 1},
+ { 16, 6, 4, 61}, { 0, 10, 5, 1021},
+ { 0, 16, 5,65533}, { 0, 28, 5,268435453},
+ { 0, 27, 5,134217725}, { 0, 26, 5,67108861},
+ { 0, 25, 5,33554429}, { 0, 24, 5,16777213},
+}; /* OF_defaultDTable */
+
+
+/* Default FSE distribution table for Match Lengths */
+static const ZSTD_seqSymbol ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = {
+ { 1, 1, 1, ML_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
+ /* nextState, nbAddBits, nbBits, baseVal */
+ { 0, 0, 6, 3}, { 0, 0, 4, 4},
+ { 32, 0, 5, 5}, { 0, 0, 5, 6},
+ { 0, 0, 5, 8}, { 0, 0, 5, 9},
+ { 0, 0, 5, 11}, { 0, 0, 6, 13},
+ { 0, 0, 6, 16}, { 0, 0, 6, 19},
+ { 0, 0, 6, 22}, { 0, 0, 6, 25},
+ { 0, 0, 6, 28}, { 0, 0, 6, 31},
+ { 0, 0, 6, 34}, { 0, 1, 6, 37},
+ { 0, 1, 6, 41}, { 0, 2, 6, 47},
+ { 0, 3, 6, 59}, { 0, 4, 6, 83},
+ { 0, 7, 6, 131}, { 0, 9, 6, 515},
+ { 16, 0, 4, 4}, { 0, 0, 4, 5},
+ { 32, 0, 5, 6}, { 0, 0, 5, 7},
+ { 32, 0, 5, 9}, { 0, 0, 5, 10},
+ { 0, 0, 6, 12}, { 0, 0, 6, 15},
+ { 0, 0, 6, 18}, { 0, 0, 6, 21},
+ { 0, 0, 6, 24}, { 0, 0, 6, 27},
+ { 0, 0, 6, 30}, { 0, 0, 6, 33},
+ { 0, 1, 6, 35}, { 0, 1, 6, 39},
+ { 0, 2, 6, 43}, { 0, 3, 6, 51},
+ { 0, 4, 6, 67}, { 0, 5, 6, 99},
+ { 0, 8, 6, 259}, { 32, 0, 4, 4},
+ { 48, 0, 4, 4}, { 16, 0, 4, 5},
+ { 32, 0, 5, 7}, { 32, 0, 5, 8},
+ { 32, 0, 5, 10}, { 32, 0, 5, 11},
+ { 0, 0, 6, 14}, { 0, 0, 6, 17},
+ { 0, 0, 6, 20}, { 0, 0, 6, 23},
+ { 0, 0, 6, 26}, { 0, 0, 6, 29},
+ { 0, 0, 6, 32}, { 0, 16, 6,65539},
+ { 0, 15, 6,32771}, { 0, 14, 6,16387},
+ { 0, 13, 6, 8195}, { 0, 12, 6, 4099},
+ { 0, 11, 6, 2051}, { 0, 10, 6, 1027},
+}; /* ML_defaultDTable */
+
+
+static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddBits)
+{
+ void* ptr = dt;
+ ZSTD_seqSymbol_header* const DTableH = (ZSTD_seqSymbol_header*)ptr;
+ ZSTD_seqSymbol* const cell = dt + 1;
+
+ DTableH->tableLog = 0;
+ DTableH->fastMode = 0;
+
+ cell->nbBits = 0;
+ cell->nextState = 0;
+ assert(nbAddBits < 255);
+ cell->nbAdditionalBits = (BYTE)nbAddBits;
+ cell->baseValue = baseValue;
+}
+
+
+/* ZSTD_buildFSETable() :
+ * generate FSE decoding table for one symbol (ll, ml or off)
+ * cannot fail if input is valid =>
+ * all inputs are presumed validated at this stage */
+void
+ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U32* nbAdditionalBits,
+ unsigned tableLog)
+{
+ ZSTD_seqSymbol* const tableDecode = dt+1;
+ U16 symbolNext[MaxSeq+1];
+
+ U32 const maxSV1 = maxSymbolValue + 1;
+ U32 const tableSize = 1 << tableLog;
+ U32 highThreshold = tableSize-1;
+
+ /* Sanity Checks */
+ assert(maxSymbolValue <= MaxSeq);
+ assert(tableLog <= MaxFSELog);
+
+ /* Init, lay down lowprob symbols */
+ { ZSTD_seqSymbol_header DTableH;
+ DTableH.tableLog = tableLog;
+ DTableH.fastMode = 1;
+ { S16 const largeLimit= (S16)(1 << (tableLog-1));
+ U32 s;
+ for (s=0; s<maxSV1; s++) {
+ if (normalizedCounter[s]==-1) {
+ tableDecode[highThreshold--].baseValue = s;
+ symbolNext[s] = 1;
+ } else {
+ if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
+ symbolNext[s] = normalizedCounter[s];
+ } } }
+ memcpy(dt, &DTableH, sizeof(DTableH));
+ }
+
+ /* Spread symbols */
+ { U32 const tableMask = tableSize-1;
+ U32 const step = FSE_TABLESTEP(tableSize);
+ U32 s, position = 0;
+ for (s=0; s<maxSV1; s++) {
+ int i;
+ for (i=0; i<normalizedCounter[s]; i++) {
+ tableDecode[position].baseValue = s;
+ position = (position + step) & tableMask;
+ while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
+ } }
+ assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+ }
+
+ /* Build Decoding table */
+ { U32 u;
+ for (u=0; u<tableSize; u++) {
+ U32 const symbol = tableDecode[u].baseValue;
+ U32 const nextState = symbolNext[symbol]++;
+ tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
+ tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
+ assert(nbAdditionalBits[symbol] < 255);
+ tableDecode[u].nbAdditionalBits = (BYTE)nbAdditionalBits[symbol];
+ tableDecode[u].baseValue = baseValue[symbol];
+ } }
+}
+
+
+/*! ZSTD_buildSeqTable() :
+ * @return : nb bytes read from src,
+ * or an error code if it fails */
+static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,
+ symbolEncodingType_e type, unsigned max, U32 maxLog,
+ const void* src, size_t srcSize,
+ const U32* baseValue, const U32* nbAdditionalBits,
+ const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
+ int ddictIsCold, int nbSeq)
+{
+ switch(type)
+ {
+ case set_rle :
+ RETURN_ERROR_IF(!srcSize, srcSize_wrong);
+ RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected);
+ { U32 const symbol = *(const BYTE*)src;
+ U32 const baseline = baseValue[symbol];
+ U32 const nbBits = nbAdditionalBits[symbol];
+ ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits);
+ }
+ *DTablePtr = DTableSpace;
+ return 1;
+ case set_basic :
+ *DTablePtr = defaultTable;
+ return 0;
+ case set_repeat:
+ RETURN_ERROR_IF(!flagRepeatTable, corruption_detected);
+ /* prefetch FSE table if used */
+ if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
+ const void* const pStart = *DTablePtr;
+ size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog));
+ PREFETCH_AREA(pStart, pSize);
+ }
+ return 0;
+ case set_compressed :
+ { unsigned tableLog;
+ S16 norm[MaxSeq+1];
+ size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
+ RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected);
+ RETURN_ERROR_IF(tableLog > maxLog, corruption_detected);
+ ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog);
+ *DTablePtr = DTableSpace;
+ return headerSize;
+ }
+ default :
+ assert(0);
+ RETURN_ERROR(GENERIC, "impossible");
+ }
+}
+
+size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
+ const void* src, size_t srcSize)
+{
+ const BYTE* const istart = (const BYTE* const)src;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* ip = istart;
+ int nbSeq;
+ DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
+
+ /* check */
+ RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong);
+
+ /* SeqHead */
+ nbSeq = *ip++;
+ if (!nbSeq) {
+ *nbSeqPtr=0;
+ RETURN_ERROR_IF(srcSize != 1, srcSize_wrong);
+ return 1;
+ }
+ if (nbSeq > 0x7F) {
+ if (nbSeq == 0xFF) {
+ RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong);
+ nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
+ } else {
+ RETURN_ERROR_IF(ip >= iend, srcSize_wrong);
+ nbSeq = ((nbSeq-0x80)<<8) + *ip++;
+ }
+ }
+ *nbSeqPtr = nbSeq;
+
+ /* FSE table descriptors */
+ RETURN_ERROR_IF(ip+4 > iend, srcSize_wrong); /* minimum possible size */
+ { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
+ symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
+ symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
+ ip++;
+
+ /* Build DTables */
+ { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
+ LLtype, MaxLL, LLFSELog,
+ ip, iend-ip,
+ LL_base, LL_bits,
+ LL_defaultDTable, dctx->fseEntropy,
+ dctx->ddictIsCold, nbSeq);
+ RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected);
+ ip += llhSize;
+ }
+
+ { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
+ OFtype, MaxOff, OffFSELog,
+ ip, iend-ip,
+ OF_base, OF_bits,
+ OF_defaultDTable, dctx->fseEntropy,
+ dctx->ddictIsCold, nbSeq);
+ RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected);
+ ip += ofhSize;
+ }
+
+ { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
+ MLtype, MaxML, MLFSELog,
+ ip, iend-ip,
+ ML_base, ML_bits,
+ ML_defaultDTable, dctx->fseEntropy,
+ dctx->ddictIsCold, nbSeq);
+ RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected);
+ ip += mlhSize;
+ }
+ }
+
+ return ip-istart;
+}
+
+
+typedef struct {
+ size_t litLength;
+ size_t matchLength;
+ size_t offset;
+ const BYTE* match;
+} seq_t;
+
+typedef struct {
+ size_t state;
+ const ZSTD_seqSymbol* table;
+} ZSTD_fseState;
+
+typedef struct {
+ BIT_DStream_t DStream;
+ ZSTD_fseState stateLL;
+ ZSTD_fseState stateOffb;
+ ZSTD_fseState stateML;
+ size_t prevOffset[ZSTD_REP_NUM];
+ const BYTE* prefixStart;
+ const BYTE* dictEnd;
+ size_t pos;
+} seqState_t;
+
+
+/* ZSTD_execSequenceLast7():
+ * exceptional case : decompress a match starting within last 7 bytes of output buffer.
+ * requires more careful checks, to ensure there is no overflow.
+ * performance does not matter though.
+ * note : this case is supposed to be never generated "naturally" by reference encoder,
+ * since in most cases it needs at least 8 bytes to look for a match.
+ * but it's allowed by the specification. */
+FORCE_NOINLINE
+size_t ZSTD_execSequenceLast7(BYTE* op,
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
+{
+ BYTE* const oLitEnd = op + sequence.litLength;
+ size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+ BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
+ const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+ const BYTE* match = oLitEnd - sequence.offset;
+
+ /* check */
+ RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must fit within dstBuffer");
+ RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "try to read beyond literal buffer");
+
+ /* copy literals */
+ while (op < oLitEnd) *op++ = *(*litPtr)++;
+
+ /* copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - base)) {
+ /* offset beyond prefix */
+ RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - vBase),corruption_detected);
+ match = dictEnd - (base-match);
+ if (match + sequence.matchLength <= dictEnd) {
+ memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ { size_t const length1 = dictEnd - match;
+ memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = base;
+ } }
+ while (op < oMatchEnd) *op++ = *match++;
+ return sequenceLength;
+}
+
+
+HINT_INLINE
+size_t ZSTD_execSequence(BYTE* op,
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
+{
+ BYTE* const oLitEnd = op + sequence.litLength;
+ size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+ BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
+ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
+ const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+ const BYTE* match = oLitEnd - sequence.offset;
+
+ /* check */
+ RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
+ RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
+ if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
+
+ /* copy Literals */
+ ZSTD_copy8(op, *litPtr);
+ if (sequence.litLength > 8)
+ ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
+ op = oLitEnd;
+ *litPtr = iLitEnd; /* update for next sequence */
+
+ /* copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
+ /* offset beyond prefix -> go into extDict */
+ RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected);
+ match = dictEnd + (match - prefixStart);
+ if (match + sequence.matchLength <= dictEnd) {
+ memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ { size_t const length1 = dictEnd - match;
+ memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = prefixStart;
+ if (op > oend_w || sequence.matchLength < MINMATCH) {
+ U32 i;
+ for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
+ return sequenceLength;
+ }
+ } }
+ /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
+
+ /* match within prefix */
+ if (sequence.offset < 8) {
+ /* close range match, overlap */
+ static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
+ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
+ int const sub2 = dec64table[sequence.offset];
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += dec32table[sequence.offset];
+ ZSTD_copy4(op+4, match);
+ match -= sub2;
+ } else {
+ ZSTD_copy8(op, match);
+ }
+ op += 8; match += 8;
+
+ if (oMatchEnd > oend-(16-MINMATCH)) {
+ if (op < oend_w) {
+ ZSTD_wildcopy(op, match, oend_w - op);
+ match += oend_w - op;
+ op = oend_w;
+ }
+ while (op < oMatchEnd) *op++ = *match++;
+ } else {
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
+ }
+ return sequenceLength;
+}
+
+
+HINT_INLINE
+size_t ZSTD_execSequenceLong(BYTE* op,
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const prefixStart, const BYTE* const dictStart, const BYTE* const dictEnd)
+{
+ BYTE* const oLitEnd = op + sequence.litLength;
+ size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+ BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
+ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
+ const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+ const BYTE* match = sequence.match;
+
+ /* check */
+ RETURN_ERROR_IF(oMatchEnd > oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
+ RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
+ if (oLitEnd > oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, dictStart, dictEnd);
+
+ /* copy Literals */
+ ZSTD_copy8(op, *litPtr); /* note : op <= oLitEnd <= oend_w == oend - 8 */
+ if (sequence.litLength > 8)
+ ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
+ op = oLitEnd;
+ *litPtr = iLitEnd; /* update for next sequence */
+
+ /* copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
+ /* offset beyond prefix */
+ RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - dictStart), corruption_detected);
+ if (match + sequence.matchLength <= dictEnd) {
+ memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ { size_t const length1 = dictEnd - match;
+ memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = prefixStart;
+ if (op > oend_w || sequence.matchLength < MINMATCH) {
+ U32 i;
+ for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
+ return sequenceLength;
+ }
+ } }
+ assert(op <= oend_w);
+ assert(sequence.matchLength >= MINMATCH);
+
+ /* match within prefix */
+ if (sequence.offset < 8) {
+ /* close range match, overlap */
+ static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
+ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
+ int const sub2 = dec64table[sequence.offset];
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += dec32table[sequence.offset];
+ ZSTD_copy4(op+4, match);
+ match -= sub2;
+ } else {
+ ZSTD_copy8(op, match);
+ }
+ op += 8; match += 8;
+
+ if (oMatchEnd > oend-(16-MINMATCH)) {
+ if (op < oend_w) {
+ ZSTD_wildcopy(op, match, oend_w - op);
+ match += oend_w - op;
+ op = oend_w;
+ }
+ while (op < oMatchEnd) *op++ = *match++;
+ } else {
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
+ }
+ return sequenceLength;
+}
+
+static void
+ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt)
+{
+ const void* ptr = dt;
+ const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr;
+ DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
+ DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits",
+ (U32)DStatePtr->state, DTableH->tableLog);
+ BIT_reloadDStream(bitD);
+ DStatePtr->table = dt + 1;
+}
+
+FORCE_INLINE_TEMPLATE void
+ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)
+{
+ ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ size_t const lowBits = BIT_readBits(bitD, nbBits);
+ DStatePtr->state = DInfo.nextState + lowBits;
+}
+
+/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
+ * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
+ * bits before reloading. This value is the maximum number of bytes we read
+ * after reloading when we are decoding long offsets.
+ */
+#define LONG_OFFSETS_MAX_EXTRA_BITS_32 \
+ (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \
+ ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32 \
+ : 0)
+
+typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+FORCE_INLINE_TEMPLATE seq_t
+ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
+{
+ seq_t seq;
+ U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits;
+ U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits;
+ U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits;
+ U32 const totalBits = llBits+mlBits+ofBits;
+ U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue;
+ U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue;
+ U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue;
+
+ /* sequence */
+ { size_t offset;
+ if (!ofBits)
+ offset = 0;
+ else {
+ ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
+ ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
+ assert(ofBits <= MaxOff);
+ if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
+ U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
+ offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
+ BIT_reloadDStream(&seqState->DStream);
+ if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
+ assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */
+ } else {
+ offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
+ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
+ }
+ }
+
+ if (ofBits <= 1) {
+ offset += (llBase==0);
+ if (offset) {
+ size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
+ temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
+ if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset = temp;
+ } else { /* offset == 0 */
+ offset = seqState->prevOffset[0];
+ }
+ } else {
+ seqState->prevOffset[2] = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset;
+ }
+ seq.offset = offset;
+ }
+
+ seq.matchLength = mlBase
+ + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/) : 0); /* <= 16 bits */
+ if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
+ BIT_reloadDStream(&seqState->DStream);
+ if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
+ BIT_reloadDStream(&seqState->DStream);
+ /* Ensure there are enough bits to read the rest of data in 64-bit mode. */
+ ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
+
+ seq.litLength = llBase
+ + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits/*>0*/) : 0); /* <= 16 bits */
+ if (MEM_32bits())
+ BIT_reloadDStream(&seqState->DStream);
+
+ DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
+ (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
+
+ /* ANS state update */
+ ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
+ ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
+ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
+ ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
+
+ return seq;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset)
+{
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* const oend = ostart + maxDstSize;
+ BYTE* op = ostart;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* const litEnd = litPtr + dctx->litSize;
+ const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
+ const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);
+ const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+ DEBUGLOG(5, "ZSTD_decompressSequences_body");
+
+ /* Regen sequences */
+ if (nbSeq) {
+ seqState_t seqState;
+ dctx->fseEntropy = 1;
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
+ RETURN_ERROR_IF(
+ ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
+ corruption_detected);
+ ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+ ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+ ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+
+ for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) {
+ nbSeq--;
+ { seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+ size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
+ DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+ } }
+
+ /* check if reached exact end */
+ DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
+ RETURN_ERROR_IF(nbSeq, corruption_detected);
+ /* save reps for next block */
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
+ }
+
+ /* last literal segment */
+ { size_t const lastLLSize = litEnd - litPtr;
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall);
+ memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+
+ return op-ostart;
+}
+
+static size_t
+ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset)
+{
+ return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
+
+
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+FORCE_INLINE_TEMPLATE seq_t
+ZSTD_decodeSequenceLong(seqState_t* seqState, ZSTD_longOffset_e const longOffsets)
+{
+ seq_t seq;
+ U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits;
+ U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits;
+ U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits;
+ U32 const totalBits = llBits+mlBits+ofBits;
+ U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue;
+ U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue;
+ U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue;
+
+ /* sequence */
+ { size_t offset;
+ if (!ofBits)
+ offset = 0;
+ else {
+ ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
+ ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
+ assert(ofBits <= MaxOff);
+ if (MEM_32bits() && longOffsets) {
+ U32 const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN_32-1);
+ offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
+ if (MEM_32bits() || extraBits) BIT_reloadDStream(&seqState->DStream);
+ if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
+ } else {
+ offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
+ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
+ }
+ }
+
+ if (ofBits <= 1) {
+ offset += (llBase==0);
+ if (offset) {
+ size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
+ temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
+ if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset = temp;
+ } else {
+ offset = seqState->prevOffset[0];
+ }
+ } else {
+ seqState->prevOffset[2] = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset;
+ }
+ seq.offset = offset;
+ }
+
+ seq.matchLength = mlBase + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */
+ if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
+ BIT_reloadDStream(&seqState->DStream);
+ if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
+ BIT_reloadDStream(&seqState->DStream);
+ /* Verify that there is enough bits to read the rest of the data in 64-bit mode. */
+ ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
+
+ seq.litLength = llBase + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */
+ if (MEM_32bits())
+ BIT_reloadDStream(&seqState->DStream);
+
+ { size_t const pos = seqState->pos + seq.litLength;
+ const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart;
+ seq.match = matchBase + pos - seq.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
+ * No consequence though : no memory access will occur, overly large offset will be detected in ZSTD_execSequenceLong() */
+ seqState->pos = pos + seq.matchLength;
+ }
+
+ /* ANS state update */
+ ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
+ ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
+ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
+ ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
+
+ return seq;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_decompressSequencesLong_body(
+ ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset)
+{
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* const oend = ostart + maxDstSize;
+ BYTE* op = ostart;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* const litEnd = litPtr + dctx->litSize;
+ const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
+ const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);
+ const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+
+ /* Regen sequences */
+ if (nbSeq) {
+#define STORED_SEQS 4
+#define STORED_SEQS_MASK (STORED_SEQS-1)
+#define ADVANCED_SEQS 4
+ seq_t sequences[STORED_SEQS];
+ int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
+ seqState_t seqState;
+ int seqNb;
+ dctx->fseEntropy = 1;
+ { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
+ seqState.prefixStart = prefixStart;
+ seqState.pos = (size_t)(op-prefixStart);
+ seqState.dictEnd = dictEnd;
+ assert(iend >= ip);
+ RETURN_ERROR_IF(
+ ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
+ corruption_detected);
+ ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+ ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+ ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+
+ /* prepare in advance */
+ for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
+ sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
+ PREFETCH_L1(sequences[seqNb].match); PREFETCH_L1(sequences[seqNb].match + sequences[seqNb].matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
+ }
+ RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected);
+
+ /* decode and decompress */
+ for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
+ seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
+ size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ PREFETCH_L1(sequence.match); PREFETCH_L1(sequence.match + sequence.matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
+ sequences[seqNb & STORED_SEQS_MASK] = sequence;
+ op += oneSeqSize;
+ }
+ RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected);
+
+ /* finish queue */
+ seqNb -= seqAdvance;
+ for ( ; seqNb<nbSeq ; seqNb++) {
+ size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+ }
+
+ /* save reps for next block */
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
+ }
+
+ /* last literal segment */
+ { size_t const lastLLSize = litEnd - litPtr;
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall);
+ memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+
+ return op-ostart;
+}
+
+static size_t
+ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset)
+{
+ return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
+
+
+
+#if DYNAMIC_BMI2
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+static TARGET_ATTRIBUTE("bmi2") size_t
+ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset)
+{
+ return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+static TARGET_ATTRIBUTE("bmi2") size_t
+ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset)
+{
+ return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
+
+#endif /* DYNAMIC_BMI2 */
+
+typedef size_t (*ZSTD_decompressSequences_t)(
+ ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset);
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+static size_t
+ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset)
+{
+ DEBUGLOG(5, "ZSTD_decompressSequences");
+#if DYNAMIC_BMI2
+ if (dctx->bmi2) {
+ return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+ }
+#endif
+ return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
+
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+/* ZSTD_decompressSequencesLong() :
+ * decompression function triggered when a minimum share of offsets is considered "long",
+ * aka out of cache.
+ * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance".
+ * This function will try to mitigate main memory latency through the use of prefetching */
+static size_t
+ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset)
+{
+ DEBUGLOG(5, "ZSTD_decompressSequencesLong");
+#if DYNAMIC_BMI2
+ if (dctx->bmi2) {
+ return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+ }
+#endif
+ return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
+
+
+
+#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+/* ZSTD_getLongOffsetsShare() :
+ * condition : offTable must be valid
+ * @return : "share" of long offsets (arbitrarily defined as > (1<<23))
+ * compared to maximum possible of (1<<OffFSELog) */
+static unsigned
+ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)
+{
+ const void* ptr = offTable;
+ U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
+ const ZSTD_seqSymbol* table = offTable + 1;
+ U32 const max = 1 << tableLog;
+ U32 u, total = 0;
+ DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
+
+ assert(max <= (1 << OffFSELog)); /* max not too large */
+ for (u=0; u<max; u++) {
+ if (table[u].nbAdditionalBits > 22) total += 1;
+ }
+
+ assert(tableLog <= OffFSELog);
+ total <<= (OffFSELog - tableLog); /* scale to OffFSELog */
+
+ return total;
+}
+#endif
+
+
+size_t
+ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, const int frame)
+{ /* blockType == blockCompressed */
+ const BYTE* ip = (const BYTE*)src;
+ /* isLongOffset must be true if there are long offsets.
+ * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
+ * We don't expect that to be the case in 64-bit mode.
+ * In block mode, window size is not known, so we have to be conservative.
+ * (note: but it could be evaluated from current-lowLimit)
+ */
+ ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
+ DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
+
+ RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong);
+
+ /* Decode literals section */
+ { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
+ DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
+ if (ZSTD_isError(litCSize)) return litCSize;
+ ip += litCSize;
+ srcSize -= litCSize;
+ }
+
+ /* Build Decoding Tables */
+ {
+ /* These macros control at build-time which decompressor implementation
+ * we use. If neither is defined, we do some inspection and dispatch at
+ * runtime.
+ */
+#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+ int usePrefetchDecoder = dctx->ddictIsCold;
+#endif
+ int nbSeq;
+ size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
+ if (ZSTD_isError(seqHSize)) return seqHSize;
+ ip += seqHSize;
+ srcSize -= seqHSize;
+
+#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+ if ( !usePrefetchDecoder
+ && (!frame || (dctx->fParams.windowSize > (1<<24)))
+ && (nbSeq>ADVANCED_SEQS) ) { /* could probably use a larger nbSeq limit */
+ U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
+ U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
+ usePrefetchDecoder = (shareLongOffsets >= minShare);
+ }
+#endif
+
+ dctx->ddictIsCold = 0;
+
+#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+ if (usePrefetchDecoder)
+#endif
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+ return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
+#endif
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+ /* else */
+ return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
+#endif
+ }
+}
+
+
+size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ size_t dSize;
+ ZSTD_checkContinuity(dctx, dst);
+ dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);
+ dctx->previousDstEnd = (char*)dst + dSize;
+ return dSize;
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_decompress_block.h b/vendor/github.com/DataDog/zstd/zstd_decompress_block.h
new file mode 100644
index 000000000..7e9296041
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_decompress_block.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+#ifndef ZSTD_DEC_BLOCK_H
+#define ZSTD_DEC_BLOCK_H
+
+/*-*******************************************************
+ * Dependencies
+ *********************************************************/
+#include <stddef.h> /* size_t */
+#include "zstd.h" /* DCtx, and some public functions */
+#include "zstd_internal.h" /* blockProperties_t, and some public functions */
+#include "zstd_decompress_internal.h" /* ZSTD_seqSymbol */
+
+
+/* === Prototypes === */
+
+/* note: prototypes already published within `zstd.h` :
+ * ZSTD_decompressBlock()
+ */
+
+/* note: prototypes already published within `zstd_internal.h` :
+ * ZSTD_getcBlockSize()
+ * ZSTD_decodeSeqHeaders()
+ */
+
+
+/* ZSTD_decompressBlock_internal() :
+ * decompress block, starting at `src`,
+ * into destination buffer `dst`.
+ * @return : decompressed block size,
+ * or an error code (which can be tested using ZSTD_isError())
+ */
+size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, const int frame);
+
+/* ZSTD_buildFSETable() :
+ * generate FSE decoding table for one symbol (ll, ml or off)
+ * this function must be called with valid parameters only
+ * (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.)
+ * in which case it cannot fail.
+ * Internal use only.
+ */
+void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U32* nbAdditionalBits,
+ unsigned tableLog);
+
+
+#endif /* ZSTD_DEC_BLOCK_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_decompress_internal.h b/vendor/github.com/DataDog/zstd/zstd_decompress_internal.h
new file mode 100644
index 000000000..ccbdfa090
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_decompress_internal.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/* zstd_decompress_internal:
+ * objects and definitions shared within lib/decompress modules */
+
+ #ifndef ZSTD_DECOMPRESS_INTERNAL_H
+ #define ZSTD_DECOMPRESS_INTERNAL_H
+
+
+/*-*******************************************************
+ * Dependencies
+ *********************************************************/
+#include "mem.h" /* BYTE, U16, U32 */
+#include "zstd_internal.h" /* ZSTD_seqSymbol */
+
+
+
+/*-*******************************************************
+ * Constants
+ *********************************************************/
+static const U32 LL_base[MaxLL+1] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 18, 20, 22, 24, 28, 32, 40,
+ 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
+ 0x2000, 0x4000, 0x8000, 0x10000 };
+
+static const U32 OF_base[MaxOff+1] = {
+ 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D,
+ 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD,
+ 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
+ 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
+
+static const U32 OF_bits[MaxOff+1] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31 };
+
+static const U32 ML_base[MaxML+1] = {
+ 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 37, 39, 41, 43, 47, 51, 59,
+ 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
+ 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
+
+
+/*-*******************************************************
+ * Decompression types
+ *********************************************************/
+ typedef struct {
+ U32 fastMode;
+ U32 tableLog;
+ } ZSTD_seqSymbol_header;
+
+ typedef struct {
+ U16 nextState;
+ BYTE nbAdditionalBits;
+ BYTE nbBits;
+ U32 baseValue;
+ } ZSTD_seqSymbol;
+
+ #define SEQSYMBOL_TABLE_SIZE(log) (1 + (1 << (log)))
+
+typedef struct {
+ ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)]; /* Note : Space reserved for FSE Tables */
+ ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)]; /* is also used as temporary workspace while building hufTable during DDict creation */
+ ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)]; /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */
+ HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
+ U32 rep[ZSTD_REP_NUM];
+} ZSTD_entropyDTables_t;
+
+typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
+ ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
+ ZSTDds_decompressLastBlock, ZSTDds_checkChecksum,
+ ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage;
+
+typedef enum { zdss_init=0, zdss_loadHeader,
+ zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
+
+typedef enum {
+ ZSTD_use_indefinitely = -1, /* Use the dictionary indefinitely */
+ ZSTD_dont_use = 0, /* Do not use the dictionary (if one exists free it) */
+ ZSTD_use_once = 1 /* Use the dictionary once and set to ZSTD_dont_use */
+} ZSTD_dictUses_e;
+
+struct ZSTD_DCtx_s
+{
+ const ZSTD_seqSymbol* LLTptr;
+ const ZSTD_seqSymbol* MLTptr;
+ const ZSTD_seqSymbol* OFTptr;
+ const HUF_DTable* HUFptr;
+ ZSTD_entropyDTables_t entropy;
+ U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; /* space needed when building huffman tables */
+ const void* previousDstEnd; /* detect continuity */
+ const void* prefixStart; /* start of current segment */
+ const void* virtualStart; /* virtual start of previous segment if it was just before current one */
+ const void* dictEnd; /* end of previous segment */
+ size_t expected;
+ ZSTD_frameHeader fParams;
+ U64 decodedSize;
+ blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
+ ZSTD_dStage stage;
+ U32 litEntropy;
+ U32 fseEntropy;
+ XXH64_state_t xxhState;
+ size_t headerSize;
+ ZSTD_format_e format;
+ const BYTE* litPtr;
+ ZSTD_customMem customMem;
+ size_t litSize;
+ size_t rleSize;
+ size_t staticSize;
+ int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
+
+ /* dictionary */
+ ZSTD_DDict* ddictLocal;
+ const ZSTD_DDict* ddict; /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */
+ U32 dictID;
+ int ddictIsCold; /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
+ ZSTD_dictUses_e dictUses;
+
+ /* streaming */
+ ZSTD_dStreamStage streamStage;
+ char* inBuff;
+ size_t inBuffSize;
+ size_t inPos;
+ size_t maxWindowSize;
+ char* outBuff;
+ size_t outBuffSize;
+ size_t outStart;
+ size_t outEnd;
+ size_t lhSize;
+ void* legacyContext;
+ U32 previousLegacyVersion;
+ U32 legacyVersion;
+ U32 hostageByte;
+ int noForwardProgress;
+
+ /* workspace */
+ BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
+ BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
+}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
+
+
+/*-*******************************************************
+ * Shared internal functions
+ *********************************************************/
+
+/*! ZSTD_loadDEntropy() :
+ * dict : must point at beginning of a valid zstd dictionary.
+ * @return : size of entropy tables read */
+size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
+ const void* const dict, size_t const dictSize);
+
+/*! ZSTD_checkContinuity() :
+ * check if next `dst` follows previous position, where decompression ended.
+ * If yes, do nothing (continue on current segment).
+ * If not, classify previous segment as "external dictionary", and start a new segment.
+ * This function cannot fail. */
+void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst);
+
+
+#endif /* ZSTD_DECOMPRESS_INTERNAL_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_double_fast.c b/vendor/github.com/DataDog/zstd/zstd_double_fast.c
new file mode 100644
index 000000000..47faf6d64
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_double_fast.c
@@ -0,0 +1,499 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h"
+#include "zstd_double_fast.h"
+
+
+void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashLarge = ms->hashTable;
+ U32 const hBitsL = cParams->hashLog;
+ U32 const mls = cParams->minMatch;
+ U32* const hashSmall = ms->chainTable;
+ U32 const hBitsS = cParams->chainLog;
+ const BYTE* const base = ms->window.base;
+ const BYTE* ip = base + ms->nextToUpdate;
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+ const U32 fastHashFillStep = 3;
+
+ /* Always insert every fastHashFillStep position into the hash tables.
+ * Insert the other positions into the large hash table if their entry
+ * is empty.
+ */
+ for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
+ U32 const current = (U32)(ip - base);
+ U32 i;
+ for (i = 0; i < fastHashFillStep; ++i) {
+ size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
+ size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
+ if (i == 0)
+ hashSmall[smHash] = current + i;
+ if (i == 0 || hashLarge[lgHash] == 0)
+ hashLarge[lgHash] = current + i;
+ /* Only load extra positions for ZSTD_dtlm_full */
+ if (dtlm == ZSTD_dtlm_fast)
+ break;
+ }
+ }
+}
+
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_doubleFast_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize,
+ U32 const mls /* template */, ZSTD_dictMode_e const dictMode)
+{
+ ZSTD_compressionParameters const* cParams = &ms->cParams;
+ U32* const hashLong = ms->hashTable;
+ const U32 hBitsL = cParams->hashLog;
+ U32* const hashSmall = ms->chainTable;
+ const U32 hBitsS = cParams->chainLog;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const U32 prefixLowestIndex = ms->window.dictLimit;
+ const BYTE* const prefixLowest = base + prefixLowestIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
+ U32 offset_1=rep[0], offset_2=rep[1];
+ U32 offsetSaved = 0;
+
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_compressionParameters* const dictCParams =
+ dictMode == ZSTD_dictMatchState ?
+ &dms->cParams : NULL;
+ const U32* const dictHashLong = dictMode == ZSTD_dictMatchState ?
+ dms->hashTable : NULL;
+ const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ?
+ dms->chainTable : NULL;
+ const U32 dictStartIndex = dictMode == ZSTD_dictMatchState ?
+ dms->window.dictLimit : 0;
+ const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ?
+ dms->window.base : NULL;
+ const BYTE* const dictStart = dictMode == ZSTD_dictMatchState ?
+ dictBase + dictStartIndex : NULL;
+ const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ?
+ dms->window.nextSrc : NULL;
+ const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ?
+ prefixLowestIndex - (U32)(dictEnd - dictBase) :
+ 0;
+ const U32 dictHBitsL = dictMode == ZSTD_dictMatchState ?
+ dictCParams->hashLog : hBitsL;
+ const U32 dictHBitsS = dictMode == ZSTD_dictMatchState ?
+ dictCParams->chainLog : hBitsS;
+ const U32 dictAndPrefixLength = (U32)(ip - prefixLowest + dictEnd - dictStart);
+
+ assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState);
+
+ /* init */
+ ip += (dictAndPrefixLength == 0);
+ if (dictMode == ZSTD_noDict) {
+ U32 const maxRep = (U32)(ip - prefixLowest);
+ if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
+ }
+ if (dictMode == ZSTD_dictMatchState) {
+ /* dictMatchState repCode checks don't currently handle repCode == 0
+ * disabling. */
+ assert(offset_1 <= dictAndPrefixLength);
+ assert(offset_2 <= dictAndPrefixLength);
+ }
+
+ /* Main Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
+ size_t mLength;
+ U32 offset;
+ size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
+ size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
+ size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);
+ size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);
+ U32 const current = (U32)(ip-base);
+ U32 const matchIndexL = hashLong[h2];
+ U32 matchIndexS = hashSmall[h];
+ const BYTE* matchLong = base + matchIndexL;
+ const BYTE* match = base + matchIndexS;
+ const U32 repIndex = current + 1 - offset_1;
+ const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
+ && repIndex < prefixLowestIndex) ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ hashLong[h2] = hashSmall[h] = current; /* update hash tables */
+
+ /* check dictMatchState repcode */
+ if (dictMode == ZSTD_dictMatchState
+ && ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
+ goto _match_stored;
+ }
+
+ /* check noDict repcode */
+ if ( dictMode == ZSTD_noDict
+ && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
+ mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
+ goto _match_stored;
+ }
+
+ if (matchIndexL > prefixLowestIndex) {
+ /* check prefix long match */
+ if (MEM_read64(matchLong) == MEM_read64(ip)) {
+ mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
+ offset = (U32)(ip-matchLong);
+ while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
+ goto _match_found;
+ }
+ } else if (dictMode == ZSTD_dictMatchState) {
+ /* check dictMatchState long match */
+ U32 const dictMatchIndexL = dictHashLong[dictHL];
+ const BYTE* dictMatchL = dictBase + dictMatchIndexL;
+ assert(dictMatchL < dictEnd);
+
+ if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) {
+ mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8;
+ offset = (U32)(current - dictMatchIndexL - dictIndexDelta);
+ while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */
+ goto _match_found;
+ }
+ }
+
+ if (matchIndexS > prefixLowestIndex) {
+ /* check prefix short match */
+ if (MEM_read32(match) == MEM_read32(ip)) {
+ goto _search_next_long;
+ }
+ } else if (dictMode == ZSTD_dictMatchState) {
+ /* check dictMatchState short match */
+ U32 const dictMatchIndexS = dictHashSmall[dictHS];
+ match = dictBase + dictMatchIndexS;
+ matchIndexS = dictMatchIndexS + dictIndexDelta;
+
+ if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) {
+ goto _search_next_long;
+ }
+ }
+
+ ip += ((ip-anchor) >> kSearchStrength) + 1;
+ continue;
+
+_search_next_long:
+
+ {
+ size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
+ size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
+ U32 const matchIndexL3 = hashLong[hl3];
+ const BYTE* matchL3 = base + matchIndexL3;
+ hashLong[hl3] = current + 1;
+
+ /* check prefix long +1 match */
+ if (matchIndexL3 > prefixLowestIndex) {
+ if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
+ mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
+ ip++;
+ offset = (U32)(ip-matchL3);
+ while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
+ goto _match_found;
+ }
+ } else if (dictMode == ZSTD_dictMatchState) {
+ /* check dict long +1 match */
+ U32 const dictMatchIndexL3 = dictHashLong[dictHLNext];
+ const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
+ assert(dictMatchL3 < dictEnd);
+ if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
+ mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8;
+ ip++;
+ offset = (U32)(current + 1 - dictMatchIndexL3 - dictIndexDelta);
+ while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
+ goto _match_found;
+ }
+ }
+ }
+
+ /* if no long +1 match, explore the short match we found */
+ if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) {
+ mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
+ offset = (U32)(current - matchIndexS);
+ while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ } else {
+ mLength = ZSTD_count(ip+4, match+4, iend) + 4;
+ offset = (U32)(ip - match);
+ while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ }
+
+ /* fall-through */
+
+_match_found:
+ offset_2 = offset_1;
+ offset_1 = offset;
+
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+
+_match_stored:
+ /* match found */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
+ /* Fill Table */
+ hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] =
+ hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; /* here because current+2 could be > iend-8 */
+ hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] =
+ hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
+
+ /* check immediate repcode */
+ if (dictMode == ZSTD_dictMatchState) {
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState
+ && repIndex2 < prefixLowestIndex ?
+ dictBase - dictIndexDelta + repIndex2 :
+ base + repIndex2;
+ if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
+ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ }
+ }
+
+ if (dictMode == ZSTD_noDict) {
+ while ( (ip <= ilimit)
+ && ( (offset_2>0)
+ & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
+ /* store sequence */
+ size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
+ ip += rLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ } } } }
+
+ /* save reps for next block */
+ rep[0] = offset_1 ? offset_1 : offsetSaved;
+ rep[1] = offset_2 ? offset_2 : offsetSaved;
+
+ /* Return the last literals size */
+ return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_doubleFast(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ const U32 mls = ms->cParams.minMatch;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict);
+ case 5 :
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict);
+ case 6 :
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict);
+ case 7 :
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict);
+ }
+}
+
+
+size_t ZSTD_compressBlock_doubleFast_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ const U32 mls = ms->cParams.minMatch;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState);
+ case 5 :
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState);
+ case 6 :
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState);
+ case 7 :
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState);
+ }
+}
+
+
+static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize,
+ U32 const mls /* template */)
+{
+ ZSTD_compressionParameters const* cParams = &ms->cParams;
+ U32* const hashLong = ms->hashTable;
+ U32 const hBitsL = cParams->hashLog;
+ U32* const hashSmall = ms->chainTable;
+ U32 const hBitsS = cParams->chainLog;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ const U32 prefixStartIndex = ms->window.dictLimit;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const prefixStart = base + prefixStartIndex;
+ const U32 dictStartIndex = ms->window.lowLimit;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const BYTE* const dictStart = dictBase + dictStartIndex;
+ const BYTE* const dictEnd = dictBase + prefixStartIndex;
+ U32 offset_1=rep[0], offset_2=rep[1];
+
+ DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize);
+
+ /* Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because (ip+1) */
+ const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
+ const U32 matchIndex = hashSmall[hSmall];
+ const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* match = matchBase + matchIndex;
+
+ const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
+ const U32 matchLongIndex = hashLong[hLong];
+ const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* matchLong = matchLongBase + matchLongIndex;
+
+ const U32 current = (U32)(ip-base);
+ const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */
+ const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ size_t mLength;
+ hashSmall[hSmall] = hashLong[hLong] = current; /* update hash table */
+
+ if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
+ & (repIndex > dictStartIndex))
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
+ } else {
+ if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
+ const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
+ const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;
+ U32 offset;
+ mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;
+ offset = current - matchLongIndex;
+ while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+
+ } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
+ size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
+ U32 const matchIndex3 = hashLong[h3];
+ const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;
+ const BYTE* match3 = match3Base + matchIndex3;
+ U32 offset;
+ hashLong[h3] = current + 1;
+ if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
+ const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;
+ const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;
+ mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;
+ ip++;
+ offset = current+1 - matchIndex3;
+ while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
+ } else {
+ const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
+ const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
+ mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
+ offset = current - matchIndex;
+ while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ }
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+
+ } else {
+ ip += ((ip-anchor) >> kSearchStrength) + 1;
+ continue;
+ } }
+
+ /* found a match : store it */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
+ /* Fill Table */
+ hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2;
+ hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = current+2;
+ hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
+ hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
+ if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
+ & (repIndex2 > dictStartIndex))
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+ U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ } } }
+
+ /* save reps for next block */
+ rep[0] = offset_1;
+ rep[1] = offset_2;
+
+ /* Return the last literals size */
+ return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_doubleFast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ U32 const mls = ms->cParams.minMatch;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
+ case 5 :
+ return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
+ case 6 :
+ return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
+ case 7 :
+ return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
+ }
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_double_fast.h b/vendor/github.com/DataDog/zstd/zstd_double_fast.h
new file mode 100644
index 000000000..4fa31acfc
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_double_fast.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_DOUBLE_FAST_H
+#define ZSTD_DOUBLE_FAST_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "mem.h" /* U32 */
+#include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */
+
+void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm);
+size_t ZSTD_compressBlock_doubleFast(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_doubleFast_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_doubleFast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_DOUBLE_FAST_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_errors.h b/vendor/github.com/DataDog/zstd/zstd_errors.h
new file mode 100644
index 000000000..92a343389
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_errors.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_ERRORS_H_398273423
+#define ZSTD_ERRORS_H_398273423
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*===== dependency =====*/
+#include <stddef.h> /* size_t */
+
+
+/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */
+#ifndef ZSTDERRORLIB_VISIBILITY
+# if defined(__GNUC__) && (__GNUC__ >= 4)
+# define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default")))
+# else
+# define ZSTDERRORLIB_VISIBILITY
+# endif
+#endif
+#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
+# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY
+#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
+# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
+#endif
+
+/*-*********************************************
+ * Error codes list
+ *-*********************************************
+ * Error codes _values_ are pinned down since v1.3.1 only.
+ * Therefore, don't rely on values if you may link to any version < v1.3.1.
+ *
+ * Only values < 100 are considered stable.
+ *
+ * note 1 : this API shall be used with static linking only.
+ * dynamic linking is not yet officially supported.
+ * note 2 : Prefer relying on the enum than on its value whenever possible
+ * This is the only supported way to use the error list < v1.3.1
+ * note 3 : ZSTD_isError() is always correct, whatever the library version.
+ **********************************************/
+typedef enum {
+ ZSTD_error_no_error = 0,
+ ZSTD_error_GENERIC = 1,
+ ZSTD_error_prefix_unknown = 10,
+ ZSTD_error_version_unsupported = 12,
+ ZSTD_error_frameParameter_unsupported = 14,
+ ZSTD_error_frameParameter_windowTooLarge = 16,
+ ZSTD_error_corruption_detected = 20,
+ ZSTD_error_checksum_wrong = 22,
+ ZSTD_error_dictionary_corrupted = 30,
+ ZSTD_error_dictionary_wrong = 32,
+ ZSTD_error_dictionaryCreation_failed = 34,
+ ZSTD_error_parameter_unsupported = 40,
+ ZSTD_error_parameter_outOfBound = 42,
+ ZSTD_error_tableLog_tooLarge = 44,
+ ZSTD_error_maxSymbolValue_tooLarge = 46,
+ ZSTD_error_maxSymbolValue_tooSmall = 48,
+ ZSTD_error_stage_wrong = 60,
+ ZSTD_error_init_missing = 62,
+ ZSTD_error_memory_allocation = 64,
+ ZSTD_error_workSpace_tooSmall= 66,
+ ZSTD_error_dstSize_tooSmall = 70,
+ ZSTD_error_srcSize_wrong = 72,
+ ZSTD_error_dstBuffer_null = 74,
+ /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
+ ZSTD_error_frameIndex_tooLarge = 100,
+ ZSTD_error_seekableIO = 102,
+ ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
+} ZSTD_ErrorCode;
+
+/*! ZSTD_getErrorCode() :
+ convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
+ which can be used to compare with enum list published above */
+ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
+ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_ERRORS_H_398273423 */
diff --git a/vendor/github.com/DataDog/zstd/zstd_fast.c b/vendor/github.com/DataDog/zstd/zstd_fast.c
new file mode 100644
index 000000000..ed997b441
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_fast.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h"
+#include "zstd_fast.h"
+
+
+void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hBits = cParams->hashLog;
+ U32 const mls = cParams->minMatch;
+ const BYTE* const base = ms->window.base;
+ const BYTE* ip = base + ms->nextToUpdate;
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+ const U32 fastHashFillStep = 3;
+
+ /* Always insert every fastHashFillStep position into the hash table.
+ * Insert the other positions if their hash entry is empty.
+ */
+ for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
+ U32 const current = (U32)(ip - base);
+ size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
+ hashTable[hash0] = current;
+ if (dtlm == ZSTD_dtlm_fast) continue;
+ /* Only load extra positions for ZSTD_dtlm_full */
+ { U32 p;
+ for (p = 1; p < fastHashFillStep; ++p) {
+ size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
+ if (hashTable[hash] == 0) { /* not yet filled */
+ hashTable[hash] = current + p;
+ } } } }
+}
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_fast_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize,
+ U32 const mls)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hlog = cParams->hashLog;
+ /* support stepSize of 0 */
+ size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
+ /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */
+ const BYTE* ip0 = istart;
+ const BYTE* ip1;
+ const BYTE* anchor = istart;
+ const U32 prefixStartIndex = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + prefixStartIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
+ U32 offset_1=rep[0], offset_2=rep[1];
+ U32 offsetSaved = 0;
+
+ /* init */
+ ip0 += (ip0 == prefixStart);
+ ip1 = ip0 + 1;
+ {
+ U32 const maxRep = (U32)(ip0 - prefixStart);
+ if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
+ }
+
+ /* Main Search Loop */
+ while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */
+ size_t mLength;
+ BYTE const* ip2 = ip0 + 2;
+ size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);
+ U32 const val0 = MEM_read32(ip0);
+ size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);
+ U32 const val1 = MEM_read32(ip1);
+ U32 const current0 = (U32)(ip0-base);
+ U32 const current1 = (U32)(ip1-base);
+ U32 const matchIndex0 = hashTable[h0];
+ U32 const matchIndex1 = hashTable[h1];
+ BYTE const* repMatch = ip2-offset_1;
+ const BYTE* match0 = base + matchIndex0;
+ const BYTE* match1 = base + matchIndex1;
+ U32 offcode;
+ hashTable[h0] = current0; /* update hash table */
+ hashTable[h1] = current1; /* update hash table */
+
+ assert(ip0 + 1 == ip1);
+
+ if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
+ mLength = ip2[-1] == repMatch[-1] ? 1 : 0;
+ ip0 = ip2 - mLength;
+ match0 = repMatch - mLength;
+ offcode = 0;
+ goto _match;
+ }
+ if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {
+ /* found a regular match */
+ goto _offset;
+ }
+ if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {
+ /* found a regular match after one literal */
+ ip0 = ip1;
+ match0 = match1;
+ goto _offset;
+ }
+ {
+ size_t const step = ((ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
+ assert(step >= 2);
+ ip0 += step;
+ ip1 += step;
+ continue;
+ }
+_offset: /* Requires: ip0, match0 */
+ /* Compute the offset code */
+ offset_2 = offset_1;
+ offset_1 = (U32)(ip0-match0);
+ offcode = offset_1 + ZSTD_REP_MOVE;
+ mLength = 0;
+ /* Count the backwards match length */
+ while (((ip0>anchor) & (match0>prefixStart))
+ && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
+
+_match: /* Requires: ip0, match0, offcode */
+ /* Count the forward length */
+ mLength += ZSTD_count(ip0+mLength+4, match0+mLength+4, iend) + 4;
+ ZSTD_storeSeq(seqStore, ip0-anchor, anchor, offcode, mLength-MINMATCH);
+ /* match found */
+ ip0 += mLength;
+ anchor = ip0;
+ ip1 = ip0 + 1;
+
+ if (ip0 <= ilimit) {
+ /* Fill Table */
+ assert(base+current0+2 > istart); /* check base overflow */
+ hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
+ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
+
+ while ( (ip0 <= ilimit)
+ && ( (offset_2>0)
+ & (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) )) {
+ /* store sequence */
+ size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
+ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
+ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
+ ip0 += rLength;
+ ip1 = ip0 + 1;
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
+ anchor = ip0;
+ continue; /* faster when present (confirmed on gcc-8) ... (?) */
+ }
+ }
+ }
+
+ /* save reps for next block */
+ rep[0] = offset_1 ? offset_1 : offsetSaved;
+ rep[1] = offset_2 ? offset_2 : offsetSaved;
+
+ /* Return the last literals size */
+ return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_fast(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ ZSTD_compressionParameters const* cParams = &ms->cParams;
+ U32 const mls = cParams->minMatch;
+ assert(ms->dictMatchState == NULL);
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);
+ case 5 :
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);
+ case 6 :
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);
+ case 7 :
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);
+ }
+}
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_fast_dictMatchState_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize, U32 const mls)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hlog = cParams->hashLog;
+ /* support stepSize of 0 */
+ U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
+ const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const U32 prefixStartIndex = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + prefixStartIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
+ U32 offset_1=rep[0], offset_2=rep[1];
+ U32 offsetSaved = 0;
+
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
+ const U32* const dictHashTable = dms->hashTable;
+ const U32 dictStartIndex = dms->window.dictLimit;
+ const BYTE* const dictBase = dms->window.base;
+ const BYTE* const dictStart = dictBase + dictStartIndex;
+ const BYTE* const dictEnd = dms->window.nextSrc;
+ const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase);
+ const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart);
+ const U32 dictHLog = dictCParams->hashLog;
+
+ /* otherwise, we would get index underflow when translating a dict index
+ * into a local index */
+ assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
+
+ /* init */
+ ip += (dictAndPrefixLength == 0);
+ /* dictMatchState repCode checks don't currently handle repCode == 0
+ * disabling. */
+ assert(offset_1 <= dictAndPrefixLength);
+ assert(offset_2 <= dictAndPrefixLength);
+
+ /* Main Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
+ size_t mLength;
+ size_t const h = ZSTD_hashPtr(ip, hlog, mls);
+ U32 const current = (U32)(ip-base);
+ U32 const matchIndex = hashTable[h];
+ const BYTE* match = base + matchIndex;
+ const U32 repIndex = current + 1 - offset_1;
+ const BYTE* repMatch = (repIndex < prefixStartIndex) ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ hashTable[h] = current; /* update hash table */
+
+ if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
+ } else if ( (matchIndex <= prefixStartIndex) ) {
+ size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
+ U32 const dictMatchIndex = dictHashTable[dictHash];
+ const BYTE* dictMatch = dictBase + dictMatchIndex;
+ if (dictMatchIndex <= dictStartIndex ||
+ MEM_read32(dictMatch) != MEM_read32(ip)) {
+ assert(stepSize >= 1);
+ ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+ continue;
+ } else {
+ /* found a dict match */
+ U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta);
+ mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
+ while (((ip>anchor) & (dictMatch>dictStart))
+ && (ip[-1] == dictMatch[-1])) {
+ ip--; dictMatch--; mLength++;
+ } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+ }
+ } else if (MEM_read32(match) != MEM_read32(ip)) {
+ /* it's not a match, and we're not going to check the dictionary */
+ assert(stepSize >= 1);
+ ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+ continue;
+ } else {
+ /* found a regular match */
+ U32 const offset = (U32)(ip-match);
+ mLength = ZSTD_count(ip+4, match+4, iend) + 4;
+ while (((ip>anchor) & (match>prefixStart))
+ && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+ }
+
+ /* match found */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
+ /* Fill Table */
+ assert(base+current+2 > istart); /* check base overflow */
+ hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; /* here because current+2 could be > iend-8 */
+ hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
+
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
+ dictBase - dictIndexDelta + repIndex2 :
+ base + repIndex2;
+ if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
+ hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ }
+ }
+ }
+
+ /* save reps for next block */
+ rep[0] = offset_1 ? offset_1 : offsetSaved;
+ rep[1] = offset_2 ? offset_2 : offsetSaved;
+
+ /* Return the last literals size */
+ return iend - anchor;
+}
+
+size_t ZSTD_compressBlock_fast_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ ZSTD_compressionParameters const* cParams = &ms->cParams;
+ U32 const mls = cParams->minMatch;
+ assert(ms->dictMatchState != NULL);
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4);
+ case 5 :
+ return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5);
+ case 6 :
+ return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6);
+ case 7 :
+ return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7);
+ }
+}
+
+
+static size_t ZSTD_compressBlock_fast_extDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize, U32 const mls)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hlog = cParams->hashLog;
+ /* support stepSize of 0 */
+ U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const U32 dictStartIndex = ms->window.lowLimit;
+ const BYTE* const dictStart = dictBase + dictStartIndex;
+ const U32 prefixStartIndex = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + prefixStartIndex;
+ const BYTE* const dictEnd = dictBase + prefixStartIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ U32 offset_1=rep[0], offset_2=rep[1];
+
+ /* Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because (ip+1) */
+ const size_t h = ZSTD_hashPtr(ip, hlog, mls);
+ const U32 matchIndex = hashTable[h];
+ const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* match = matchBase + matchIndex;
+ const U32 current = (U32)(ip-base);
+ const U32 repIndex = current + 1 - offset_1;
+ const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ size_t mLength;
+ hashTable[h] = current; /* update hash table */
+ assert(offset_1 <= current +1); /* check repIndex */
+
+ if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
+ } else {
+ if ( (matchIndex < dictStartIndex) ||
+ (MEM_read32(match) != MEM_read32(ip)) ) {
+ assert(stepSize >= 1);
+ ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+ continue;
+ }
+ { const BYTE* matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
+ const BYTE* lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
+ U32 offset;
+ mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
+ while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ offset = current - matchIndex;
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+ } }
+
+ /* found a match : store it */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
+ /* Fill Table */
+ hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2;
+ hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
+ if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex)) /* intentional overflow */
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
+ hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ } } }
+
+ /* save reps for next block */
+ rep[0] = offset_1;
+ rep[1] = offset_2;
+
+ /* Return the last literals size */
+ return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_fast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ ZSTD_compressionParameters const* cParams = &ms->cParams;
+ U32 const mls = cParams->minMatch;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
+ case 5 :
+ return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
+ case 6 :
+ return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
+ case 7 :
+ return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
+ }
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_fast.h b/vendor/github.com/DataDog/zstd/zstd_fast.h
new file mode 100644
index 000000000..b74a88c57
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_fast.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_FAST_H
+#define ZSTD_FAST_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "mem.h" /* U32 */
+#include "zstd_compress_internal.h"
+
+void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm);
+size_t ZSTD_compressBlock_fast(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_fast_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_fast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_FAST_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_internal.h b/vendor/github.com/DataDog/zstd/zstd_internal.h
new file mode 100644
index 000000000..31f756ab5
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_internal.h
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_CCOMMON_H_MODULE
+#define ZSTD_CCOMMON_H_MODULE
+
+/* this module contains definitions which must be identical
+ * across compression, decompression and dictBuilder.
+ * It also contains a few functions useful to at least 2 of them
+ * and which benefit from being inlined */
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include "compiler.h"
+#include "mem.h"
+#include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
+#include "error_private.h"
+#define ZSTD_STATIC_LINKING_ONLY
+#include "zstd.h"
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#ifndef XXH_STATIC_LINKING_ONLY
+# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
+#endif
+#include "xxhash.h" /* XXH_reset, update, digest */
+
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* ---- static assert (debug) --- */
+#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
+#define ZSTD_isError ERR_isError /* for inlining */
+#define FSE_isError ERR_isError
+#define HUF_isError ERR_isError
+
+
+/*-*************************************
+* shared macros
+***************************************/
+#undef MIN
+#undef MAX
+#define MIN(a,b) ((a)<(b) ? (a) : (b))
+#define MAX(a,b) ((a)>(b) ? (a) : (b))
+
+/**
+ * Return the specified error if the condition evaluates to true.
+ *
+ * In debug modes, prints additional information. In order to do that
+ * (particularly, printing the conditional that failed), this can't just wrap
+ * RETURN_ERROR().
+ */
+#define RETURN_ERROR_IF(cond, err, ...) \
+ if (cond) { \
+ RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return ERROR(err); \
+ }
+
+/**
+ * Unconditionally return the specified error.
+ *
+ * In debug modes, prints additional information.
+ */
+#define RETURN_ERROR(err, ...) \
+ do { \
+ RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return ERROR(err); \
+ } while(0);
+
+/**
+ * If the provided expression evaluates to an error code, returns that error code.
+ *
+ * In debug modes, prints additional information.
+ */
+#define FORWARD_IF_ERROR(err, ...) \
+ do { \
+ size_t const err_code = (err); \
+ if (ERR_isError(err_code)) { \
+ RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return err_code; \
+ } \
+ } while(0);
+
+
+/*-*************************************
+* Common constants
+***************************************/
+#define ZSTD_OPT_NUM (1<<12)
+
+#define ZSTD_REP_NUM 3 /* number of repcodes */
+#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1)
+static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BIT7 128
+#define BIT6 64
+#define BIT5 32
+#define BIT4 16
+#define BIT1 2
+#define BIT0 1
+
+#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
+static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
+static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
+
+#define ZSTD_FRAMEIDSIZE 4 /* magic number size */
+
+#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
+static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
+typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
+
+#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
+#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
+
+#define HufLog 12
+typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
+
+#define LONGNBSEQ 0x7F00
+
+#define MINMATCH 3
+
+#define Litbits 8
+#define MaxLit ((1<<Litbits) - 1)
+#define MaxML 52
+#define MaxLL 35
+#define DefaultMaxOff 28
+#define MaxOff 31
+#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
+#define MLFSELog 9
+#define LLFSELog 9
+#define OffFSELog 8
+#define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
+
+static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3,
+ 4, 6, 7, 8, 9,10,11,12,
+ 13,14,15,16 };
+static const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 2, 1, 1, 1, 1, 1,
+ -1,-1,-1,-1 };
+#define LL_DEFAULTNORMLOG 6 /* for static allocation */
+static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
+
+static const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 7, 8, 9,10,11,
+ 12,13,14,15,16 };
+static const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1,-1,-1,
+ -1,-1,-1,-1,-1 };
+#define ML_DEFAULTNORMLOG 6 /* for static allocation */
+static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
+
+static const S16 OF_defaultNorm[DefaultMaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ -1,-1,-1,-1,-1 };
+#define OF_DEFAULTNORMLOG 5 /* for static allocation */
+static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
+
+
+/*-*******************************************
+* Shared functions to include for inlining
+*********************************************/
+static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
+#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
+
+/*! ZSTD_wildcopy() :
+ * custom version of memcpy(), can overwrite up to WILDCOPY_OVERLENGTH bytes (if length==0) */
+#define WILDCOPY_OVERLENGTH 8
+MEM_STATIC void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
+{
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + length;
+ do
+ COPY8(op, ip)
+ while (op < oend);
+}
+
+MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd) /* should be faster for decoding, but strangely, not verified on all platform */
+{
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = (BYTE*)dstEnd;
+ do
+ COPY8(op, ip)
+ while (op < oend);
+}
+
+
+/*-*******************************************
+* Private declarations
+*********************************************/
+typedef struct seqDef_s {
+ U32 offset;
+ U16 litLength;
+ U16 matchLength;
+} seqDef;
+
+typedef struct {
+ seqDef* sequencesStart;
+ seqDef* sequences;
+ BYTE* litStart;
+ BYTE* lit;
+ BYTE* llCode;
+ BYTE* mlCode;
+ BYTE* ofCode;
+ size_t maxNbSeq;
+ size_t maxNbLit;
+ U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
+ U32 longLengthPos;
+} seqStore_t;
+
+/**
+ * Contains the compressed frame size and an upper-bound for the decompressed frame size.
+ * Note: before using `compressedSize`, check for errors using ZSTD_isError().
+ * similarly, before using `decompressedBound`, check for errors using:
+ * `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
+ */
+typedef struct {
+ size_t compressedSize;
+ unsigned long long decompressedBound;
+} ZSTD_frameSizeInfo; /* decompress & legacy */
+
+const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
+void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
+
+/* custom memory allocation functions */
+void* ZSTD_malloc(size_t size, ZSTD_customMem customMem);
+void* ZSTD_calloc(size_t size, ZSTD_customMem customMem);
+void ZSTD_free(void* ptr, ZSTD_customMem customMem);
+
+
+MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */
+{
+ assert(val != 0);
+ {
+# if defined(_MSC_VER) /* Visual */
+ unsigned long r=0;
+ _BitScanReverse(&r, val);
+ return (unsigned)r;
+# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
+ return 31 - __builtin_clz(val);
+# else /* Software version */
+ static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];
+# endif
+ }
+}
+
+
+/* ZSTD_invalidateRepCodes() :
+ * ensures next compression will not use repcodes from previous block.
+ * Note : only works with regular variant;
+ * do not use with extDict variant ! */
+void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */
+
+
+typedef struct {
+ blockType_e blockType;
+ U32 lastBlock;
+ U32 origSize;
+} blockProperties_t; /* declared here for decompress and fullbench */
+
+/*! ZSTD_getcBlockSize() :
+ * Provides the size of compressed block from block header `src` */
+/* Used by: decompress, fullbench (does not get its definition from here) */
+size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
+ blockProperties_t* bpPtr);
+
+/*! ZSTD_decodeSeqHeaders() :
+ * decode sequence header from src */
+/* Used by: decompress, fullbench (does not get its definition from here) */
+size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
+ const void* src, size_t srcSize);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_CCOMMON_H_MODULE */
diff --git a/vendor/github.com/DataDog/zstd/zstd_lazy.c b/vendor/github.com/DataDog/zstd/zstd_lazy.c
new file mode 100644
index 000000000..53f998a43
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_lazy.c
@@ -0,0 +1,1106 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h"
+#include "zstd_lazy.h"
+
+
+/*-*************************************
+* Binary Tree search
+***************************************/
+
+static void
+ZSTD_updateDUBT(ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* iend,
+ U32 mls)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hashLog = cParams->hashLog;
+
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+
+ const BYTE* const base = ms->window.base;
+ U32 const target = (U32)(ip - base);
+ U32 idx = ms->nextToUpdate;
+
+ if (idx != target)
+ DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
+ idx, target, ms->window.dictLimit);
+ assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */
+ (void)iend;
+
+ assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */
+ for ( ; idx < target ; idx++) {
+ size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */
+ U32 const matchIndex = hashTable[h];
+
+ U32* const nextCandidatePtr = bt + 2*(idx&btMask);
+ U32* const sortMarkPtr = nextCandidatePtr + 1;
+
+ DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
+ hashTable[h] = idx; /* Update Hash Table */
+ *nextCandidatePtr = matchIndex; /* update BT like a chain */
+ *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
+ }
+ ms->nextToUpdate = target;
+}
+
+
+/** ZSTD_insertDUBT1() :
+ * sort one already inserted but unsorted position
+ * assumption : current >= btlow == (current - btmask)
+ * doesn't fail */
+static void
+ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
+ U32 current, const BYTE* inputEnd,
+ U32 nbCompares, U32 btLow,
+ const ZSTD_dictMode_e dictMode)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const ip = (current>=dictLimit) ? base + current : dictBase + current;
+ const BYTE* const iend = (current>=dictLimit) ? inputEnd : dictBase + dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* match;
+ U32* smallerPtr = bt + 2*(current&btMask);
+ U32* largerPtr = smallerPtr + 1;
+ U32 matchIndex = *smallerPtr; /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */
+ U32 dummy32; /* to be nullified at the end */
+ U32 const windowLow = ms->window.lowLimit;
+
+ DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
+ current, dictLimit, windowLow);
+ assert(current >= btLow);
+ assert(ip < iend); /* condition for ZSTD_count */
+
+ while (nbCompares-- && (matchIndex > windowLow)) {
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ assert(matchIndex < current);
+ /* note : all candidates are now supposed sorted,
+ * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK
+ * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */
+
+ if ( (dictMode != ZSTD_extDict)
+ || (matchIndex+matchLength >= dictLimit) /* both in current segment*/
+ || (current < dictLimit) /* both in extDict */) {
+ const BYTE* const mBase = ( (dictMode != ZSTD_extDict)
+ || (matchIndex+matchLength >= dictLimit)) ?
+ base : dictBase;
+ assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */
+ || (current < dictLimit) );
+ match = mBase + matchIndex;
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
+ } else {
+ match = dictBase + matchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* preparation for next read of match[matchLength] */
+ }
+
+ DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
+ current, matchIndex, (U32)matchLength);
+
+ if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
+ break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
+ }
+
+ if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
+ /* match is smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
+ matchIndex, btLow, nextPtr[1]);
+ smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
+ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
+ } else {
+ /* match is larger than current */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
+ matchIndex, btLow, nextPtr[0]);
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+}
+
+
+static size_t
+ZSTD_DUBT_findBetterDictMatch (
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+ size_t* offsetPtr,
+ size_t bestLength,
+ U32 nbCompares,
+ U32 const mls,
+ const ZSTD_dictMode_e dictMode)
+{
+ const ZSTD_matchState_t * const dms = ms->dictMatchState;
+ const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
+ const U32 * const dictHashTable = dms->hashTable;
+ U32 const hashLog = dmsCParams->hashLog;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32 dictMatchIndex = dictHashTable[h];
+
+ const BYTE* const base = ms->window.base;
+ const BYTE* const prefixStart = base + ms->window.dictLimit;
+ U32 const current = (U32)(ip-base);
+ const BYTE* const dictBase = dms->window.base;
+ const BYTE* const dictEnd = dms->window.nextSrc;
+ U32 const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);
+ U32 const dictLowLimit = dms->window.lowLimit;
+ U32 const dictIndexDelta = ms->window.lowLimit - dictHighLimit;
+
+ U32* const dictBt = dms->chainTable;
+ U32 const btLog = dmsCParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+ U32 const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask;
+
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+
+ (void)dictMode;
+ assert(dictMode == ZSTD_dictMatchState);
+
+ while (nbCompares-- && (dictMatchIndex > dictLowLimit)) {
+ U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ const BYTE* match = dictBase + dictMatchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (dictMatchIndex+matchLength >= dictHighLimit)
+ match = base + dictMatchIndex + dictIndexDelta; /* to prepare for next usage of match[matchLength] */
+
+ if (matchLength > bestLength) {
+ U32 matchIndex = dictMatchIndex + dictIndexDelta;
+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
+ DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
+ current, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + current - matchIndex, dictMatchIndex, matchIndex);
+ bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
+ }
+ if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
+ break; /* drop, to guarantee consistency (miss a little bit of compression) */
+ }
+ }
+
+ if (match[matchLength] < ip[matchLength]) {
+ if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ } else {
+ /* match is larger than current */
+ if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */
+ commonLengthLarger = matchLength;
+ dictMatchIndex = nextPtr[0];
+ }
+ }
+
+ if (bestLength >= MINMATCH) {
+ U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
+ DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
+ current, (U32)bestLength, (U32)*offsetPtr, mIndex);
+ }
+ return bestLength;
+
+}
+
+
+static size_t
+ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+ size_t* offsetPtr,
+ U32 const mls,
+ const ZSTD_dictMode_e dictMode)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hashLog = cParams->hashLog;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32 matchIndex = hashTable[h];
+
+ const BYTE* const base = ms->window.base;
+ U32 const current = (U32)(ip-base);
+ U32 const windowLow = ms->window.lowLimit;
+
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+ U32 const btLow = (btMask >= current) ? 0 : current - btMask;
+ U32 const unsortLimit = MAX(btLow, windowLow);
+
+ U32* nextCandidate = bt + 2*(matchIndex&btMask);
+ U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1;
+ U32 nbCompares = 1U << cParams->searchLog;
+ U32 nbCandidates = nbCompares;
+ U32 previousCandidate = 0;
+
+ DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", current);
+ assert(ip <= iend-8); /* required for h calculation */
+
+ /* reach end of unsorted candidates list */
+ while ( (matchIndex > unsortLimit)
+ && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
+ && (nbCandidates > 1) ) {
+ DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
+ matchIndex);
+ *unsortedMark = previousCandidate; /* the unsortedMark becomes a reversed chain, to move up back to original position */
+ previousCandidate = matchIndex;
+ matchIndex = *nextCandidate;
+ nextCandidate = bt + 2*(matchIndex&btMask);
+ unsortedMark = bt + 2*(matchIndex&btMask) + 1;
+ nbCandidates --;
+ }
+
+ /* nullify last candidate if it's still unsorted
+ * simplification, detrimental to compression ratio, beneficial for speed */
+ if ( (matchIndex > unsortLimit)
+ && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
+ DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
+ matchIndex);
+ *nextCandidate = *unsortedMark = 0;
+ }
+
+ /* batch sort stacked candidates */
+ matchIndex = previousCandidate;
+ while (matchIndex) { /* will end on matchIndex == 0 */
+ U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
+ U32 const nextCandidateIdx = *nextCandidateIdxPtr;
+ ZSTD_insertDUBT1(ms, matchIndex, iend,
+ nbCandidates, unsortLimit, dictMode);
+ matchIndex = nextCandidateIdx;
+ nbCandidates++;
+ }
+
+ /* find longest match */
+ { size_t commonLengthSmaller = 0, commonLengthLarger = 0;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ U32* smallerPtr = bt + 2*(current&btMask);
+ U32* largerPtr = bt + 2*(current&btMask) + 1;
+ U32 matchEndIdx = current + 8 + 1;
+ U32 dummy32; /* to be nullified at the end */
+ size_t bestLength = 0;
+
+ matchIndex = hashTable[h];
+ hashTable[h] = current; /* Update Hash Table */
+
+ while (nbCompares-- && (matchIndex > windowLow)) {
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ const BYTE* match;
+
+ if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) {
+ match = base + matchIndex;
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
+ } else {
+ match = dictBase + matchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
+ }
+
+ if (matchLength > bestLength) {
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
+ bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
+ if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
+ if (dictMode == ZSTD_dictMatchState) {
+ nbCompares = 0; /* in addition to avoiding checking any
+ * further in this loop, make sure we
+ * skip checking in the dictionary. */
+ }
+ break; /* drop, to guarantee consistency (miss a little bit of compression) */
+ }
+ }
+
+ if (match[matchLength] < ip[matchLength]) {
+ /* match is smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ } else {
+ /* match is larger than current */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+
+ if (dictMode == ZSTD_dictMatchState && nbCompares) {
+ bestLength = ZSTD_DUBT_findBetterDictMatch(
+ ms, ip, iend,
+ offsetPtr, bestLength, nbCompares,
+ mls, dictMode);
+ }
+
+ assert(matchEndIdx > current+8); /* ensure nextToUpdate is increased */
+ ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
+ if (bestLength >= MINMATCH) {
+ U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
+ DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
+ current, (U32)bestLength, (U32)*offsetPtr, mIndex);
+ }
+ return bestLength;
+ }
+}
+
+
+/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 mls /* template */,
+ const ZSTD_dictMode_e dictMode)
+{
+ DEBUGLOG(7, "ZSTD_BtFindBestMatch");
+ if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
+ ZSTD_updateDUBT(ms, ip, iLimit, mls);
+ return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
+}
+
+
+static size_t
+ZSTD_BtFindBestMatch_selectMLS ( ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
+{
+ switch(ms->cParams.minMatch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
+ case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
+ case 7 :
+ case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
+ }
+}
+
+
+static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS (
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
+{
+ switch(ms->cParams.minMatch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
+ case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
+ case 7 :
+ case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
+ }
+}
+
+
+static size_t ZSTD_BtFindBestMatch_extDict_selectMLS (
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
+{
+ switch(ms->cParams.minMatch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
+ case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
+ case 7 :
+ case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
+ }
+}
+
+
+
+/* *********************************
+* Hash Chain
+***********************************/
+#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & (mask)]
+
+/* Update chains up to ip (excluded)
+ Assumption : always within prefix (i.e. not within extDict) */
+static U32 ZSTD_insertAndFindFirstIndex_internal(
+ ZSTD_matchState_t* ms,
+ const ZSTD_compressionParameters* const cParams,
+ const BYTE* ip, U32 const mls)
+{
+ U32* const hashTable = ms->hashTable;
+ const U32 hashLog = cParams->hashLog;
+ U32* const chainTable = ms->chainTable;
+ const U32 chainMask = (1 << cParams->chainLog) - 1;
+ const BYTE* const base = ms->window.base;
+ const U32 target = (U32)(ip - base);
+ U32 idx = ms->nextToUpdate;
+
+ while(idx < target) { /* catch up */
+ size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
+ NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
+ hashTable[h] = idx;
+ idx++;
+ }
+
+ ms->nextToUpdate = target;
+ return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
+}
+
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
+}
+
+
+/* inlining is important to hardwire a hot branch (template emulation) */
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_HcFindBestMatch_generic (
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 mls, const ZSTD_dictMode_e dictMode)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const chainTable = ms->chainTable;
+ const U32 chainSize = (1 << cParams->chainLog);
+ const U32 chainMask = chainSize-1;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const U32 lowLimit = ms->window.lowLimit;
+ const U32 current = (U32)(ip-base);
+ const U32 minChain = current > chainSize ? current - chainSize : 0;
+ U32 nbAttempts = 1U << cParams->searchLog;
+ size_t ml=4-1;
+
+ /* HC4 match finder */
+ U32 matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
+
+ for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
+ size_t currentMl=0;
+ if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
+ const BYTE* const match = base + matchIndex;
+ assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
+ if (match[ml] == ip[ml]) /* potentially better */
+ currentMl = ZSTD_count(ip, match, iLimit);
+ } else {
+ const BYTE* const match = dictBase + matchIndex;
+ assert(match+4 <= dictEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
+ }
+
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = current - matchIndex + ZSTD_REP_MOVE;
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+
+ if (matchIndex <= minChain) break;
+ matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
+ }
+
+ if (dictMode == ZSTD_dictMatchState) {
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const U32* const dmsChainTable = dms->chainTable;
+ const U32 dmsChainSize = (1 << dms->cParams.chainLog);
+ const U32 dmsChainMask = dmsChainSize - 1;
+ const U32 dmsLowestIndex = dms->window.dictLimit;
+ const BYTE* const dmsBase = dms->window.base;
+ const BYTE* const dmsEnd = dms->window.nextSrc;
+ const U32 dmsSize = (U32)(dmsEnd - dmsBase);
+ const U32 dmsIndexDelta = dictLimit - dmsSize;
+ const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0;
+
+ matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];
+
+ for ( ; (matchIndex>dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
+ size_t currentMl=0;
+ const BYTE* const match = dmsBase + matchIndex;
+ assert(match+4 <= dmsEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
+
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = current - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+
+ if (matchIndex <= dmsMinChain) break;
+ matchIndex = dmsChainTable[matchIndex & dmsChainMask];
+ }
+ }
+
+ return ml;
+}
+
+
+FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
+{
+ switch(ms->cParams.minMatch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
+ case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
+ case 7 :
+ case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
+ }
+}
+
+
+static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS (
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
+{
+ switch(ms->cParams.minMatch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
+ case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
+ case 7 :
+ case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
+ }
+}
+
+
+FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
+{
+ switch(ms->cParams.minMatch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
+ case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
+ case 7 :
+ case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
+ }
+}
+
+
+/* *******************************
+* Common parser - lazy strategy
+*********************************/
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_lazy_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize,
+ const U32 searchMethod, const U32 depth,
+ ZSTD_dictMode_e const dictMode)
+{
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ const BYTE* const base = ms->window.base;
+ const U32 prefixLowestIndex = ms->window.dictLimit;
+ const BYTE* const prefixLowest = base + prefixLowestIndex;
+
+ typedef size_t (*searchMax_f)(
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
+ searchMax_f const searchMax = dictMode == ZSTD_dictMatchState ?
+ (searchMethod ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS : ZSTD_HcFindBestMatch_dictMatchState_selectMLS) :
+ (searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS);
+ U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
+
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const U32 dictLowestIndex = dictMode == ZSTD_dictMatchState ?
+ dms->window.dictLimit : 0;
+ const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ?
+ dms->window.base : NULL;
+ const BYTE* const dictLowest = dictMode == ZSTD_dictMatchState ?
+ dictBase + dictLowestIndex : NULL;
+ const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ?
+ dms->window.nextSrc : NULL;
+ const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ?
+ prefixLowestIndex - (U32)(dictEnd - dictBase) :
+ 0;
+ const U32 dictAndPrefixLength = (U32)(ip - prefixLowest + dictEnd - dictLowest);
+
+ /* init */
+ ip += (dictAndPrefixLength == 0);
+ ms->nextToUpdate3 = ms->nextToUpdate;
+ if (dictMode == ZSTD_noDict) {
+ U32 const maxRep = (U32)(ip - prefixLowest);
+ if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
+ }
+ if (dictMode == ZSTD_dictMatchState) {
+ /* dictMatchState repCode checks don't currently handle repCode == 0
+ * disabling. */
+ assert(offset_1 <= dictAndPrefixLength);
+ assert(offset_2 <= dictAndPrefixLength);
+ }
+
+ /* Match Loop */
+ while (ip < ilimit) {
+ size_t matchLength=0;
+ size_t offset=0;
+ const BYTE* start=ip+1;
+
+ /* check repCode */
+ if (dictMode == ZSTD_dictMatchState) {
+ const U32 repIndex = (U32)(ip - base) + 1 - offset_1;
+ const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
+ && repIndex < prefixLowestIndex) ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+ if (depth==0) goto _storeSequence;
+ }
+ }
+ if ( dictMode == ZSTD_noDict
+ && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
+ matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+ if (depth==0) goto _storeSequence;
+ }
+
+ /* first search (depth 0) */
+ { size_t offsetFound = 999999999;
+ size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
+ if (ml2 > matchLength)
+ matchLength = ml2, start = ip, offset=offsetFound;
+ }
+
+ if (matchLength < 4) {
+ ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
+ continue;
+ }
+
+ /* let's try to find a better solution */
+ if (depth>=1)
+ while (ip<ilimit) {
+ ip ++;
+ if ( (dictMode == ZSTD_noDict)
+ && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+ size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
+ int const gain2 = (int)(mlRep * 3);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
+ if ((mlRep >= 4) && (gain2 > gain1))
+ matchLength = mlRep, offset = 0, start = ip;
+ }
+ if (dictMode == ZSTD_dictMatchState) {
+ const U32 repIndex = (U32)(ip - base) - offset_1;
+ const BYTE* repMatch = repIndex < prefixLowestIndex ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+ size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+ int const gain2 = (int)(mlRep * 3);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
+ if ((mlRep >= 4) && (gain2 > gain1))
+ matchLength = mlRep, offset = 0, start = ip;
+ }
+ }
+ { size_t offset2=999999999;
+ size_t const ml2 = searchMax(ms, ip, iend, &offset2);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
+ if ((ml2 >= 4) && (gain2 > gain1)) {
+ matchLength = ml2, offset = offset2, start = ip;
+ continue; /* search a better one */
+ } }
+
+ /* let's find an even better one */
+ if ((depth==2) && (ip<ilimit)) {
+ ip ++;
+ if ( (dictMode == ZSTD_noDict)
+ && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+ size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
+ int const gain2 = (int)(mlRep * 4);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
+ if ((mlRep >= 4) && (gain2 > gain1))
+ matchLength = mlRep, offset = 0, start = ip;
+ }
+ if (dictMode == ZSTD_dictMatchState) {
+ const U32 repIndex = (U32)(ip - base) - offset_1;
+ const BYTE* repMatch = repIndex < prefixLowestIndex ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+ size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+ int const gain2 = (int)(mlRep * 4);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
+ if ((mlRep >= 4) && (gain2 > gain1))
+ matchLength = mlRep, offset = 0, start = ip;
+ }
+ }
+ { size_t offset2=999999999;
+ size_t const ml2 = searchMax(ms, ip, iend, &offset2);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
+ if ((ml2 >= 4) && (gain2 > gain1)) {
+ matchLength = ml2, offset = offset2, start = ip;
+ continue;
+ } } }
+ break; /* nothing found : store previous solution */
+ }
+
+ /* NOTE:
+ * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
+ * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
+ * overflows the pointer, which is undefined behavior.
+ */
+ /* catch up */
+ if (offset) {
+ if (dictMode == ZSTD_noDict) {
+ while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest))
+ && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) ) /* only search for offset within prefix */
+ { start--; matchLength++; }
+ }
+ if (dictMode == ZSTD_dictMatchState) {
+ U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
+ const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
+ const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
+ while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
+ }
+ offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
+ }
+ /* store sequence */
+_storeSequence:
+ { size_t const litLength = start - anchor;
+ ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
+ anchor = ip = start + matchLength;
+ }
+
+ /* check immediate repcode */
+ if (dictMode == ZSTD_dictMatchState) {
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex = current2 - offset_2;
+ const BYTE* repMatch = dictMode == ZSTD_dictMatchState
+ && repIndex < prefixLowestIndex ?
+ dictBase - dictIndexDelta + repIndex :
+ base + repIndex;
+ if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
+ offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
+ ip += matchLength;
+ anchor = ip;
+ continue;
+ }
+ break;
+ }
+ }
+
+ if (dictMode == ZSTD_noDict) {
+ while ( ((ip <= ilimit) & (offset_2>0))
+ && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
+ /* store sequence */
+ matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+ offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
+ ip += matchLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ } } }
+
+ /* Save reps for next block */
+ rep[0] = offset_1 ? offset_1 : savedOffset;
+ rep[1] = offset_2 ? offset_2 : savedOffset;
+
+ /* Return the last literals size */
+ return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_btlazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_lazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_lazy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_greedy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_btlazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_lazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_lazy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_greedy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_dictMatchState);
+}
+
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_lazy_extDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize,
+ const U32 searchMethod, const U32 depth)
+{
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ const BYTE* const base = ms->window.base;
+ const U32 dictLimit = ms->window.dictLimit;
+ const U32 lowestIndex = ms->window.lowLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const dictStart = dictBase + lowestIndex;
+
+ typedef size_t (*searchMax_f)(
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
+ searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
+
+ U32 offset_1 = rep[0], offset_2 = rep[1];
+
+ /* init */
+ ms->nextToUpdate3 = ms->nextToUpdate;
+ ip += (ip == prefixStart);
+
+ /* Match Loop */
+ while (ip < ilimit) {
+ size_t matchLength=0;
+ size_t offset=0;
+ const BYTE* start=ip+1;
+ U32 current = (U32)(ip-base);
+
+ /* check repCode */
+ { const U32 repIndex = (U32)(current+1 - offset_1);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
+ if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
+ /* repcode detected we should take it */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ if (depth==0) goto _storeSequence;
+ } }
+
+ /* first search (depth 0) */
+ { size_t offsetFound = 999999999;
+ size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
+ if (ml2 > matchLength)
+ matchLength = ml2, start = ip, offset=offsetFound;
+ }
+
+ if (matchLength < 4) {
+ ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
+ continue;
+ }
+
+ /* let's try to find a better solution */
+ if (depth>=1)
+ while (ip<ilimit) {
+ ip ++;
+ current++;
+ /* check repCode */
+ if (offset) {
+ const U32 repIndex = (U32)(current - offset_1);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
+ /* repcode detected */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ int const gain2 = (int)(repLength * 3);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
+ if ((repLength >= 4) && (gain2 > gain1))
+ matchLength = repLength, offset = 0, start = ip;
+ } }
+
+ /* search match, depth 1 */
+ { size_t offset2=999999999;
+ size_t const ml2 = searchMax(ms, ip, iend, &offset2);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
+ if ((ml2 >= 4) && (gain2 > gain1)) {
+ matchLength = ml2, offset = offset2, start = ip;
+ continue; /* search a better one */
+ } }
+
+ /* let's find an even better one */
+ if ((depth==2) && (ip<ilimit)) {
+ ip ++;
+ current++;
+ /* check repCode */
+ if (offset) {
+ const U32 repIndex = (U32)(current - offset_1);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
+ /* repcode detected */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ int const gain2 = (int)(repLength * 4);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
+ if ((repLength >= 4) && (gain2 > gain1))
+ matchLength = repLength, offset = 0, start = ip;
+ } }
+
+ /* search match, depth 2 */
+ { size_t offset2=999999999;
+ size_t const ml2 = searchMax(ms, ip, iend, &offset2);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
+ if ((ml2 >= 4) && (gain2 > gain1)) {
+ matchLength = ml2, offset = offset2, start = ip;
+ continue;
+ } } }
+ break; /* nothing found : store previous solution */
+ }
+
+ /* catch up */
+ if (offset) {
+ U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
+ const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
+ const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
+ while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
+ offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
+ }
+
+ /* store sequence */
+_storeSequence:
+ { size_t const litLength = start - anchor;
+ ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
+ anchor = ip = start + matchLength;
+ }
+
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ const U32 repIndex = (U32)((ip-base) - offset_2);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
+ /* repcode detected we should take it */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
+ ip += matchLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ }
+ break;
+ } }
+
+ /* Save reps for next block */
+ rep[0] = offset_1;
+ rep[1] = offset_2;
+
+ /* Return the last literals size */
+ return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_greedy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 0);
+}
+
+size_t ZSTD_compressBlock_lazy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 1);
+}
+
+size_t ZSTD_compressBlock_lazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 2);
+}
+
+size_t ZSTD_compressBlock_btlazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 1, 2);
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_lazy.h b/vendor/github.com/DataDog/zstd/zstd_lazy.h
new file mode 100644
index 000000000..bb1763069
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_lazy.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_LAZY_H
+#define ZSTD_LAZY_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "zstd_compress_internal.h"
+
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
+
+void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
+
+size_t ZSTD_compressBlock_btlazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_btlazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_greedy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btlazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_LAZY_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_ldm.c b/vendor/github.com/DataDog/zstd/zstd_ldm.c
new file mode 100644
index 000000000..784d20f3a
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_ldm.c
@@ -0,0 +1,597 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ */
+
+#include "zstd_ldm.h"
+
+#include "debug.h"
+#include "zstd_fast.h" /* ZSTD_fillHashTable() */
+#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
+
+#define LDM_BUCKET_SIZE_LOG 3
+#define LDM_MIN_MATCH_LENGTH 64
+#define LDM_HASH_RLOG 7
+#define LDM_HASH_CHAR_OFFSET 10
+
+void ZSTD_ldm_adjustParameters(ldmParams_t* params,
+ ZSTD_compressionParameters const* cParams)
+{
+ params->windowLog = cParams->windowLog;
+ ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
+ DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
+ if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
+ if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
+ if (cParams->strategy >= ZSTD_btopt) {
+ /* Get out of the way of the optimal parser */
+ U32 const minMatch = MAX(cParams->targetLength, params->minMatchLength);
+ assert(minMatch >= ZSTD_LDM_MINMATCH_MIN);
+ assert(minMatch <= ZSTD_LDM_MINMATCH_MAX);
+ params->minMatchLength = minMatch;
+ }
+ if (params->hashLog == 0) {
+ params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
+ assert(params->hashLog <= ZSTD_HASHLOG_MAX);
+ }
+ if (params->hashRateLog == 0) {
+ params->hashRateLog = params->windowLog < params->hashLog
+ ? 0
+ : params->windowLog - params->hashLog;
+ }
+ params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
+}
+
+size_t ZSTD_ldm_getTableSize(ldmParams_t params)
+{
+ size_t const ldmHSize = ((size_t)1) << params.hashLog;
+ size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
+ size_t const ldmBucketSize =
+ ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
+ size_t const totalSize = ldmBucketSize + ldmHSize * sizeof(ldmEntry_t);
+ return params.enableLdm ? totalSize : 0;
+}
+
+size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
+{
+ return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0;
+}
+
+/** ZSTD_ldm_getSmallHash() :
+ * numBits should be <= 32
+ * If numBits==0, returns 0.
+ * @return : the most significant numBits of value. */
+static U32 ZSTD_ldm_getSmallHash(U64 value, U32 numBits)
+{
+ assert(numBits <= 32);
+ return numBits == 0 ? 0 : (U32)(value >> (64 - numBits));
+}
+
+/** ZSTD_ldm_getChecksum() :
+ * numBitsToDiscard should be <= 32
+ * @return : the next most significant 32 bits after numBitsToDiscard */
+static U32 ZSTD_ldm_getChecksum(U64 hash, U32 numBitsToDiscard)
+{
+ assert(numBitsToDiscard <= 32);
+ return (hash >> (64 - 32 - numBitsToDiscard)) & 0xFFFFFFFF;
+}
+
+/** ZSTD_ldm_getTag() ;
+ * Given the hash, returns the most significant numTagBits bits
+ * after (32 + hbits) bits.
+ *
+ * If there are not enough bits remaining, return the last
+ * numTagBits bits. */
+static U32 ZSTD_ldm_getTag(U64 hash, U32 hbits, U32 numTagBits)
+{
+ assert(numTagBits < 32 && hbits <= 32);
+ if (32 - hbits < numTagBits) {
+ return hash & (((U32)1 << numTagBits) - 1);
+ } else {
+ return (hash >> (32 - hbits - numTagBits)) & (((U32)1 << numTagBits) - 1);
+ }
+}
+
+/** ZSTD_ldm_getBucket() :
+ * Returns a pointer to the start of the bucket associated with hash. */
+static ldmEntry_t* ZSTD_ldm_getBucket(
+ ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
+{
+ return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
+}
+
+/** ZSTD_ldm_insertEntry() :
+ * Insert the entry with corresponding hash into the hash table */
+static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
+ size_t const hash, const ldmEntry_t entry,
+ ldmParams_t const ldmParams)
+{
+ BYTE* const bucketOffsets = ldmState->bucketOffsets;
+ *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + bucketOffsets[hash]) = entry;
+ bucketOffsets[hash]++;
+ bucketOffsets[hash] &= ((U32)1 << ldmParams.bucketSizeLog) - 1;
+}
+
+/** ZSTD_ldm_makeEntryAndInsertByTag() :
+ *
+ * Gets the small hash, checksum, and tag from the rollingHash.
+ *
+ * If the tag matches (1 << ldmParams.hashRateLog)-1, then
+ * creates an ldmEntry from the offset, and inserts it into the hash table.
+ *
+ * hBits is the length of the small hash, which is the most significant hBits
+ * of rollingHash. The checksum is the next 32 most significant bits, followed
+ * by ldmParams.hashRateLog bits that make up the tag. */
+static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState,
+ U64 const rollingHash,
+ U32 const hBits,
+ U32 const offset,
+ ldmParams_t const ldmParams)
+{
+ U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashRateLog);
+ U32 const tagMask = ((U32)1 << ldmParams.hashRateLog) - 1;
+ if (tag == tagMask) {
+ U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits);
+ U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
+ ldmEntry_t entry;
+ entry.offset = offset;
+ entry.checksum = checksum;
+ ZSTD_ldm_insertEntry(ldmState, hash, entry, ldmParams);
+ }
+}
+
+/** ZSTD_ldm_countBackwardsMatch() :
+ * Returns the number of bytes that match backwards before pIn and pMatch.
+ *
+ * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
+static size_t ZSTD_ldm_countBackwardsMatch(
+ const BYTE* pIn, const BYTE* pAnchor,
+ const BYTE* pMatch, const BYTE* pBase)
+{
+ size_t matchLength = 0;
+ while (pIn > pAnchor && pMatch > pBase && pIn[-1] == pMatch[-1]) {
+ pIn--;
+ pMatch--;
+ matchLength++;
+ }
+ return matchLength;
+}
+
+/** ZSTD_ldm_fillFastTables() :
+ *
+ * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
+ * This is similar to ZSTD_loadDictionaryContent.
+ *
+ * The tables for the other strategies are filled within their
+ * block compressors. */
+static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
+ void const* end)
+{
+ const BYTE* const iend = (const BYTE*)end;
+
+ switch(ms->cParams.strategy)
+ {
+ case ZSTD_fast:
+ ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
+ break;
+
+ case ZSTD_dfast:
+ ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
+ break;
+
+ case ZSTD_greedy:
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
+ case ZSTD_btlazy2:
+ case ZSTD_btopt:
+ case ZSTD_btultra:
+ case ZSTD_btultra2:
+ break;
+ default:
+ assert(0); /* not possible : not a valid strategy id */
+ }
+
+ return 0;
+}
+
+/** ZSTD_ldm_fillLdmHashTable() :
+ *
+ * Fills hashTable from (lastHashed + 1) to iend (non-inclusive).
+ * lastHash is the rolling hash that corresponds to lastHashed.
+ *
+ * Returns the rolling hash corresponding to position iend-1. */
+static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state,
+ U64 lastHash, const BYTE* lastHashed,
+ const BYTE* iend, const BYTE* base,
+ U32 hBits, ldmParams_t const ldmParams)
+{
+ U64 rollingHash = lastHash;
+ const BYTE* cur = lastHashed + 1;
+
+ while (cur < iend) {
+ rollingHash = ZSTD_rollingHash_rotate(rollingHash, cur[-1],
+ cur[ldmParams.minMatchLength-1],
+ state->hashPower);
+ ZSTD_ldm_makeEntryAndInsertByTag(state,
+ rollingHash, hBits,
+ (U32)(cur - base), ldmParams);
+ ++cur;
+ }
+ return rollingHash;
+}
+
+
+/** ZSTD_ldm_limitTableUpdate() :
+ *
+ * Sets cctx->nextToUpdate to a position corresponding closer to anchor
+ * if it is far way
+ * (after a long match, only update tables a limited amount). */
+static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
+{
+ U32 const current = (U32)(anchor - ms->window.base);
+ if (current > ms->nextToUpdate + 1024) {
+ ms->nextToUpdate =
+ current - MIN(512, current - ms->nextToUpdate - 1024);
+ }
+}
+
+static size_t ZSTD_ldm_generateSequences_internal(
+ ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
+ ldmParams_t const* params, void const* src, size_t srcSize)
+{
+ /* LDM parameters */
+ int const extDict = ZSTD_window_hasExtDict(ldmState->window);
+ U32 const minMatchLength = params->minMatchLength;
+ U64 const hashPower = ldmState->hashPower;
+ U32 const hBits = params->hashLog - params->bucketSizeLog;
+ U32 const ldmBucketSize = 1U << params->bucketSizeLog;
+ U32 const hashRateLog = params->hashRateLog;
+ U32 const ldmTagMask = (1U << params->hashRateLog) - 1;
+ /* Prefix and extDict parameters */
+ U32 const dictLimit = ldmState->window.dictLimit;
+ U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
+ BYTE const* const base = ldmState->window.base;
+ BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
+ BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
+ BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
+ BYTE const* const lowPrefixPtr = base + dictLimit;
+ /* Input bounds */
+ BYTE const* const istart = (BYTE const*)src;
+ BYTE const* const iend = istart + srcSize;
+ BYTE const* const ilimit = iend - MAX(minMatchLength, HASH_READ_SIZE);
+ /* Input positions */
+ BYTE const* anchor = istart;
+ BYTE const* ip = istart;
+ /* Rolling hash */
+ BYTE const* lastHashed = NULL;
+ U64 rollingHash = 0;
+
+ while (ip <= ilimit) {
+ size_t mLength;
+ U32 const current = (U32)(ip - base);
+ size_t forwardMatchLength = 0, backwardMatchLength = 0;
+ ldmEntry_t* bestEntry = NULL;
+ if (ip != istart) {
+ rollingHash = ZSTD_rollingHash_rotate(rollingHash, lastHashed[0],
+ lastHashed[minMatchLength],
+ hashPower);
+ } else {
+ rollingHash = ZSTD_rollingHash_compute(ip, minMatchLength);
+ }
+ lastHashed = ip;
+
+ /* Do not insert and do not look for a match */
+ if (ZSTD_ldm_getTag(rollingHash, hBits, hashRateLog) != ldmTagMask) {
+ ip++;
+ continue;
+ }
+
+ /* Get the best entry and compute the match lengths */
+ {
+ ldmEntry_t* const bucket =
+ ZSTD_ldm_getBucket(ldmState,
+ ZSTD_ldm_getSmallHash(rollingHash, hBits),
+ *params);
+ ldmEntry_t* cur;
+ size_t bestMatchLength = 0;
+ U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
+
+ for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) {
+ size_t curForwardMatchLength, curBackwardMatchLength,
+ curTotalMatchLength;
+ if (cur->checksum != checksum || cur->offset <= lowestIndex) {
+ continue;
+ }
+ if (extDict) {
+ BYTE const* const curMatchBase =
+ cur->offset < dictLimit ? dictBase : base;
+ BYTE const* const pMatch = curMatchBase + cur->offset;
+ BYTE const* const matchEnd =
+ cur->offset < dictLimit ? dictEnd : iend;
+ BYTE const* const lowMatchPtr =
+ cur->offset < dictLimit ? dictStart : lowPrefixPtr;
+
+ curForwardMatchLength = ZSTD_count_2segments(
+ ip, pMatch, iend,
+ matchEnd, lowPrefixPtr);
+ if (curForwardMatchLength < minMatchLength) {
+ continue;
+ }
+ curBackwardMatchLength =
+ ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch,
+ lowMatchPtr);
+ curTotalMatchLength = curForwardMatchLength +
+ curBackwardMatchLength;
+ } else { /* !extDict */
+ BYTE const* const pMatch = base + cur->offset;
+ curForwardMatchLength = ZSTD_count(ip, pMatch, iend);
+ if (curForwardMatchLength < minMatchLength) {
+ continue;
+ }
+ curBackwardMatchLength =
+ ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch,
+ lowPrefixPtr);
+ curTotalMatchLength = curForwardMatchLength +
+ curBackwardMatchLength;
+ }
+
+ if (curTotalMatchLength > bestMatchLength) {
+ bestMatchLength = curTotalMatchLength;
+ forwardMatchLength = curForwardMatchLength;
+ backwardMatchLength = curBackwardMatchLength;
+ bestEntry = cur;
+ }
+ }
+ }
+
+ /* No match found -- continue searching */
+ if (bestEntry == NULL) {
+ ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash,
+ hBits, current,
+ *params);
+ ip++;
+ continue;
+ }
+
+ /* Match found */
+ mLength = forwardMatchLength + backwardMatchLength;
+ ip -= backwardMatchLength;
+
+ {
+ /* Store the sequence:
+ * ip = current - backwardMatchLength
+ * The match is at (bestEntry->offset - backwardMatchLength)
+ */
+ U32 const matchIndex = bestEntry->offset;
+ U32 const offset = current - matchIndex;
+ rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
+
+ /* Out of sequence storage */
+ if (rawSeqStore->size == rawSeqStore->capacity)
+ return ERROR(dstSize_tooSmall);
+ seq->litLength = (U32)(ip - anchor);
+ seq->matchLength = (U32)mLength;
+ seq->offset = offset;
+ rawSeqStore->size++;
+ }
+
+ /* Insert the current entry into the hash table */
+ ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
+ (U32)(lastHashed - base),
+ *params);
+
+ assert(ip + backwardMatchLength == lastHashed);
+
+ /* Fill the hash table from lastHashed+1 to ip+mLength*/
+ /* Heuristic: don't need to fill the entire table at end of block */
+ if (ip + mLength <= ilimit) {
+ rollingHash = ZSTD_ldm_fillLdmHashTable(
+ ldmState, rollingHash, lastHashed,
+ ip + mLength, base, hBits, *params);
+ lastHashed = ip + mLength - 1;
+ }
+ ip += mLength;
+ anchor = ip;
+ }
+ return iend - anchor;
+}
+
+/*! ZSTD_ldm_reduceTable() :
+ * reduce table indexes by `reducerValue` */
+static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
+ U32 const reducerValue)
+{
+ U32 u;
+ for (u = 0; u < size; u++) {
+ if (table[u].offset < reducerValue) table[u].offset = 0;
+ else table[u].offset -= reducerValue;
+ }
+}
+
+size_t ZSTD_ldm_generateSequences(
+ ldmState_t* ldmState, rawSeqStore_t* sequences,
+ ldmParams_t const* params, void const* src, size_t srcSize)
+{
+ U32 const maxDist = 1U << params->windowLog;
+ BYTE const* const istart = (BYTE const*)src;
+ BYTE const* const iend = istart + srcSize;
+ size_t const kMaxChunkSize = 1 << 20;
+ size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
+ size_t chunk;
+ size_t leftoverSize = 0;
+
+ assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
+ /* Check that ZSTD_window_update() has been called for this chunk prior
+ * to passing it to this function.
+ */
+ assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
+ /* The input could be very large (in zstdmt), so it must be broken up into
+ * chunks to enforce the maximum distance and handle overflow correction.
+ */
+ assert(sequences->pos <= sequences->size);
+ assert(sequences->size <= sequences->capacity);
+ for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
+ BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
+ size_t const remaining = (size_t)(iend - chunkStart);
+ BYTE const *const chunkEnd =
+ (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
+ size_t const chunkSize = chunkEnd - chunkStart;
+ size_t newLeftoverSize;
+ size_t const prevSize = sequences->size;
+
+ assert(chunkStart < iend);
+ /* 1. Perform overflow correction if necessary. */
+ if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) {
+ U32 const ldmHSize = 1U << params->hashLog;
+ U32 const correction = ZSTD_window_correctOverflow(
+ &ldmState->window, /* cycleLog */ 0, maxDist, src);
+ ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
+ }
+ /* 2. We enforce the maximum offset allowed.
+ *
+ * kMaxChunkSize should be small enough that we don't lose too much of
+ * the window through early invalidation.
+ * TODO: * Test the chunk size.
+ * * Try invalidation after the sequence generation and test the
+ * the offset against maxDist directly.
+ */
+ ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, NULL, NULL);
+ /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
+ newLeftoverSize = ZSTD_ldm_generateSequences_internal(
+ ldmState, sequences, params, chunkStart, chunkSize);
+ if (ZSTD_isError(newLeftoverSize))
+ return newLeftoverSize;
+ /* 4. We add the leftover literals from previous iterations to the first
+ * newly generated sequence, or add the `newLeftoverSize` if none are
+ * generated.
+ */
+ /* Prepend the leftover literals from the last call */
+ if (prevSize < sequences->size) {
+ sequences->seq[prevSize].litLength += (U32)leftoverSize;
+ leftoverSize = newLeftoverSize;
+ } else {
+ assert(newLeftoverSize == chunkSize);
+ leftoverSize += chunkSize;
+ }
+ }
+ return 0;
+}
+
+void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) {
+ while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
+ rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
+ if (srcSize <= seq->litLength) {
+ /* Skip past srcSize literals */
+ seq->litLength -= (U32)srcSize;
+ return;
+ }
+ srcSize -= seq->litLength;
+ seq->litLength = 0;
+ if (srcSize < seq->matchLength) {
+ /* Skip past the first srcSize of the match */
+ seq->matchLength -= (U32)srcSize;
+ if (seq->matchLength < minMatch) {
+ /* The match is too short, omit it */
+ if (rawSeqStore->pos + 1 < rawSeqStore->size) {
+ seq[1].litLength += seq[0].matchLength;
+ }
+ rawSeqStore->pos++;
+ }
+ return;
+ }
+ srcSize -= seq->matchLength;
+ seq->matchLength = 0;
+ rawSeqStore->pos++;
+ }
+}
+
+/**
+ * If the sequence length is longer than remaining then the sequence is split
+ * between this block and the next.
+ *
+ * Returns the current sequence to handle, or if the rest of the block should
+ * be literals, it returns a sequence with offset == 0.
+ */
+static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
+ U32 const remaining, U32 const minMatch)
+{
+ rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
+ assert(sequence.offset > 0);
+ /* Likely: No partial sequence */
+ if (remaining >= sequence.litLength + sequence.matchLength) {
+ rawSeqStore->pos++;
+ return sequence;
+ }
+ /* Cut the sequence short (offset == 0 ==> rest is literals). */
+ if (remaining <= sequence.litLength) {
+ sequence.offset = 0;
+ } else if (remaining < sequence.litLength + sequence.matchLength) {
+ sequence.matchLength = remaining - sequence.litLength;
+ if (sequence.matchLength < minMatch) {
+ sequence.offset = 0;
+ }
+ }
+ /* Skip past `remaining` bytes for the future sequences. */
+ ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
+ return sequence;
+}
+
+size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ unsigned const minMatch = cParams->minMatch;
+ ZSTD_blockCompressor const blockCompressor =
+ ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms));
+ /* Input bounds */
+ BYTE const* const istart = (BYTE const*)src;
+ BYTE const* const iend = istart + srcSize;
+ /* Input positions */
+ BYTE const* ip = istart;
+
+ DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
+ assert(rawSeqStore->pos <= rawSeqStore->size);
+ assert(rawSeqStore->size <= rawSeqStore->capacity);
+ /* Loop through each sequence and apply the block compressor to the lits */
+ while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
+ /* maybeSplitSequence updates rawSeqStore->pos */
+ rawSeq const sequence = maybeSplitSequence(rawSeqStore,
+ (U32)(iend - ip), minMatch);
+ int i;
+ /* End signal */
+ if (sequence.offset == 0)
+ break;
+
+ assert(sequence.offset <= (1U << cParams->windowLog));
+ assert(ip + sequence.litLength + sequence.matchLength <= iend);
+
+ /* Fill tables for block compressor */
+ ZSTD_ldm_limitTableUpdate(ms, ip);
+ ZSTD_ldm_fillFastTables(ms, ip);
+ /* Run the block compressor */
+ DEBUGLOG(5, "calling block compressor on segment of size %u", sequence.litLength);
+ {
+ size_t const newLitLength =
+ blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
+ ip += sequence.litLength;
+ /* Update the repcodes */
+ for (i = ZSTD_REP_NUM - 1; i > 0; i--)
+ rep[i] = rep[i-1];
+ rep[0] = sequence.offset;
+ /* Store the sequence */
+ ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength,
+ sequence.offset + ZSTD_REP_MOVE,
+ sequence.matchLength - MINMATCH);
+ ip += sequence.matchLength;
+ }
+ }
+ /* Fill the tables for the block compressor */
+ ZSTD_ldm_limitTableUpdate(ms, ip);
+ ZSTD_ldm_fillFastTables(ms, ip);
+ /* Compress the last literals */
+ return blockCompressor(ms, seqStore, rep, ip, iend - ip);
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_ldm.h b/vendor/github.com/DataDog/zstd/zstd_ldm.h
new file mode 100644
index 000000000..a47846128
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_ldm.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ */
+
+#ifndef ZSTD_LDM_H
+#define ZSTD_LDM_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "zstd_compress_internal.h" /* ldmParams_t, U32 */
+#include "zstd.h" /* ZSTD_CCtx, size_t */
+
+/*-*************************************
+* Long distance matching
+***************************************/
+
+#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT
+
+/**
+ * ZSTD_ldm_generateSequences():
+ *
+ * Generates the sequences using the long distance match finder.
+ * Generates long range matching sequences in `sequences`, which parse a prefix
+ * of the source. `sequences` must be large enough to store every sequence,
+ * which can be checked with `ZSTD_ldm_getMaxNbSeq()`.
+ * @returns 0 or an error code.
+ *
+ * NOTE: The user must have called ZSTD_window_update() for all of the input
+ * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks.
+ * NOTE: This function returns an error if it runs out of space to store
+ * sequences.
+ */
+size_t ZSTD_ldm_generateSequences(
+ ldmState_t* ldms, rawSeqStore_t* sequences,
+ ldmParams_t const* params, void const* src, size_t srcSize);
+
+/**
+ * ZSTD_ldm_blockCompress():
+ *
+ * Compresses a block using the predefined sequences, along with a secondary
+ * block compressor. The literals section of every sequence is passed to the
+ * secondary block compressor, and those sequences are interspersed with the
+ * predefined sequences. Returns the length of the last literals.
+ * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed.
+ * `rawSeqStore.seq` may also be updated to split the last sequence between two
+ * blocks.
+ * @return The length of the last literals.
+ *
+ * NOTE: The source must be at most the maximum block size, but the predefined
+ * sequences can be any size, and may be longer than the block. In the case that
+ * they are longer than the block, the last sequences may need to be split into
+ * two. We handle that case correctly, and update `rawSeqStore` appropriately.
+ * NOTE: This function does not return any errors.
+ */
+size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+/**
+ * ZSTD_ldm_skipSequences():
+ *
+ * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`.
+ * Avoids emitting matches less than `minMatch` bytes.
+ * Must be called for data with is not passed to ZSTD_ldm_blockCompress().
+ */
+void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
+ U32 const minMatch);
+
+
+/** ZSTD_ldm_getTableSize() :
+ * Estimate the space needed for long distance matching tables or 0 if LDM is
+ * disabled.
+ */
+size_t ZSTD_ldm_getTableSize(ldmParams_t params);
+
+/** ZSTD_ldm_getSeqSpace() :
+ * Return an upper bound on the number of sequences that can be produced by
+ * the long distance matcher, or 0 if LDM is disabled.
+ */
+size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);
+
+/** ZSTD_ldm_adjustParameters() :
+ * If the params->hashRateLog is not set, set it to its default value based on
+ * windowLog and params->hashLog.
+ *
+ * Ensures that params->bucketSizeLog is <= params->hashLog (setting it to
+ * params->hashLog if it is not).
+ *
+ * Ensures that the minMatchLength >= targetLength during optimal parsing.
+ */
+void ZSTD_ldm_adjustParameters(ldmParams_t* params,
+ ZSTD_compressionParameters const* cParams);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_FAST_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_legacy.h b/vendor/github.com/DataDog/zstd/zstd_legacy.h
new file mode 100644
index 000000000..e5b383ee4
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_legacy.h
@@ -0,0 +1,411 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_LEGACY_H
+#define ZSTD_LEGACY_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* *************************************
+* Includes
+***************************************/
+#include "mem.h" /* MEM_STATIC */
+#include "error_private.h" /* ERROR */
+#include "zstd_internal.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTD_frameSizeInfo */
+
+#if !defined (ZSTD_LEGACY_SUPPORT) || (ZSTD_LEGACY_SUPPORT == 0)
+# undef ZSTD_LEGACY_SUPPORT
+# define ZSTD_LEGACY_SUPPORT 8
+#endif
+
+#if (ZSTD_LEGACY_SUPPORT <= 1)
+# include "zstd_v01.h"
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 2)
+# include "zstd_v02.h"
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 3)
+# include "zstd_v03.h"
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 4)
+# include "zstd_v04.h"
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
+# include "zstd_v05.h"
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
+# include "zstd_v06.h"
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
+# include "zstd_v07.h"
+#endif
+
+/** ZSTD_isLegacy() :
+ @return : > 0 if supported by legacy decoder. 0 otherwise.
+ return value is the version.
+*/
+MEM_STATIC unsigned ZSTD_isLegacy(const void* src, size_t srcSize)
+{
+ U32 magicNumberLE;
+ if (srcSize<4) return 0;
+ magicNumberLE = MEM_readLE32(src);
+ switch(magicNumberLE)
+ {
+#if (ZSTD_LEGACY_SUPPORT <= 1)
+ case ZSTDv01_magicNumberLE:return 1;
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 2)
+ case ZSTDv02_magicNumber : return 2;
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 3)
+ case ZSTDv03_magicNumber : return 3;
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 4)
+ case ZSTDv04_magicNumber : return 4;
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
+ case ZSTDv05_MAGICNUMBER : return 5;
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
+ case ZSTDv06_MAGICNUMBER : return 6;
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
+ case ZSTDv07_MAGICNUMBER : return 7;
+#endif
+ default : return 0;
+ }
+}
+
+
+MEM_STATIC unsigned long long ZSTD_getDecompressedSize_legacy(const void* src, size_t srcSize)
+{
+ U32 const version = ZSTD_isLegacy(src, srcSize);
+ if (version < 5) return 0; /* no decompressed size in frame header, or not a legacy format */
+#if (ZSTD_LEGACY_SUPPORT <= 5)
+ if (version==5) {
+ ZSTDv05_parameters fParams;
+ size_t const frResult = ZSTDv05_getFrameParams(&fParams, src, srcSize);
+ if (frResult != 0) return 0;
+ return fParams.srcSize;
+ }
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
+ if (version==6) {
+ ZSTDv06_frameParams fParams;
+ size_t const frResult = ZSTDv06_getFrameParams(&fParams, src, srcSize);
+ if (frResult != 0) return 0;
+ return fParams.frameContentSize;
+ }
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
+ if (version==7) {
+ ZSTDv07_frameParams fParams;
+ size_t const frResult = ZSTDv07_getFrameParams(&fParams, src, srcSize);
+ if (frResult != 0) return 0;
+ return fParams.frameContentSize;
+ }
+#endif
+ return 0; /* should not be possible */
+}
+
+
+MEM_STATIC size_t ZSTD_decompressLegacy(
+ void* dst, size_t dstCapacity,
+ const void* src, size_t compressedSize,
+ const void* dict,size_t dictSize)
+{
+ U32 const version = ZSTD_isLegacy(src, compressedSize);
+ (void)dst; (void)dstCapacity; (void)dict; (void)dictSize; /* unused when ZSTD_LEGACY_SUPPORT >= 8 */
+ switch(version)
+ {
+#if (ZSTD_LEGACY_SUPPORT <= 1)
+ case 1 :
+ return ZSTDv01_decompress(dst, dstCapacity, src, compressedSize);
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 2)
+ case 2 :
+ return ZSTDv02_decompress(dst, dstCapacity, src, compressedSize);
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 3)
+ case 3 :
+ return ZSTDv03_decompress(dst, dstCapacity, src, compressedSize);
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 4)
+ case 4 :
+ return ZSTDv04_decompress(dst, dstCapacity, src, compressedSize);
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
+ case 5 :
+ { size_t result;
+ ZSTDv05_DCtx* const zd = ZSTDv05_createDCtx();
+ if (zd==NULL) return ERROR(memory_allocation);
+ result = ZSTDv05_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);
+ ZSTDv05_freeDCtx(zd);
+ return result;
+ }
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
+ case 6 :
+ { size_t result;
+ ZSTDv06_DCtx* const zd = ZSTDv06_createDCtx();
+ if (zd==NULL) return ERROR(memory_allocation);
+ result = ZSTDv06_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);
+ ZSTDv06_freeDCtx(zd);
+ return result;
+ }
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
+ case 7 :
+ { size_t result;
+ ZSTDv07_DCtx* const zd = ZSTDv07_createDCtx();
+ if (zd==NULL) return ERROR(memory_allocation);
+ result = ZSTDv07_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);
+ ZSTDv07_freeDCtx(zd);
+ return result;
+ }
+#endif
+ default :
+ return ERROR(prefix_unknown);
+ }
+}
+
+MEM_STATIC ZSTD_frameSizeInfo ZSTD_findFrameSizeInfoLegacy(const void *src, size_t srcSize)
+{
+ ZSTD_frameSizeInfo frameSizeInfo;
+ U32 const version = ZSTD_isLegacy(src, srcSize);
+ switch(version)
+ {
+#if (ZSTD_LEGACY_SUPPORT <= 1)
+ case 1 :
+ ZSTDv01_findFrameSizeInfoLegacy(src, srcSize,
+ &frameSizeInfo.compressedSize,
+ &frameSizeInfo.decompressedBound);
+ break;
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 2)
+ case 2 :
+ ZSTDv02_findFrameSizeInfoLegacy(src, srcSize,
+ &frameSizeInfo.compressedSize,
+ &frameSizeInfo.decompressedBound);
+ break;
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 3)
+ case 3 :
+ ZSTDv03_findFrameSizeInfoLegacy(src, srcSize,
+ &frameSizeInfo.compressedSize,
+ &frameSizeInfo.decompressedBound);
+ break;
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 4)
+ case 4 :
+ ZSTDv04_findFrameSizeInfoLegacy(src, srcSize,
+ &frameSizeInfo.compressedSize,
+ &frameSizeInfo.decompressedBound);
+ break;
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
+ case 5 :
+ ZSTDv05_findFrameSizeInfoLegacy(src, srcSize,
+ &frameSizeInfo.compressedSize,
+ &frameSizeInfo.decompressedBound);
+ break;
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
+ case 6 :
+ ZSTDv06_findFrameSizeInfoLegacy(src, srcSize,
+ &frameSizeInfo.compressedSize,
+ &frameSizeInfo.decompressedBound);
+ break;
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
+ case 7 :
+ ZSTDv07_findFrameSizeInfoLegacy(src, srcSize,
+ &frameSizeInfo.compressedSize,
+ &frameSizeInfo.decompressedBound);
+ break;
+#endif
+ default :
+ frameSizeInfo.compressedSize = ERROR(prefix_unknown);
+ frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
+ break;
+ }
+ return frameSizeInfo;
+}
+
+MEM_STATIC size_t ZSTD_findFrameCompressedSizeLegacy(const void *src, size_t srcSize)
+{
+ ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfoLegacy(src, srcSize);
+ return frameSizeInfo.compressedSize;
+}
+
+MEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version)
+{
+ switch(version)
+ {
+ default :
+ case 1 :
+ case 2 :
+ case 3 :
+ (void)legacyContext;
+ return ERROR(version_unsupported);
+#if (ZSTD_LEGACY_SUPPORT <= 4)
+ case 4 : return ZBUFFv04_freeDCtx((ZBUFFv04_DCtx*)legacyContext);
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
+ case 5 : return ZBUFFv05_freeDCtx((ZBUFFv05_DCtx*)legacyContext);
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
+ case 6 : return ZBUFFv06_freeDCtx((ZBUFFv06_DCtx*)legacyContext);
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
+ case 7 : return ZBUFFv07_freeDCtx((ZBUFFv07_DCtx*)legacyContext);
+#endif
+ }
+}
+
+
+MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U32 newVersion,
+ const void* dict, size_t dictSize)
+{
+ DEBUGLOG(5, "ZSTD_initLegacyStream for v0.%u", newVersion);
+ if (prevVersion != newVersion) ZSTD_freeLegacyStreamContext(*legacyContext, prevVersion);
+ switch(newVersion)
+ {
+ default :
+ case 1 :
+ case 2 :
+ case 3 :
+ (void)dict; (void)dictSize;
+ return 0;
+#if (ZSTD_LEGACY_SUPPORT <= 4)
+ case 4 :
+ {
+ ZBUFFv04_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv04_createDCtx() : (ZBUFFv04_DCtx*)*legacyContext;
+ if (dctx==NULL) return ERROR(memory_allocation);
+ ZBUFFv04_decompressInit(dctx);
+ ZBUFFv04_decompressWithDictionary(dctx, dict, dictSize);
+ *legacyContext = dctx;
+ return 0;
+ }
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
+ case 5 :
+ {
+ ZBUFFv05_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv05_createDCtx() : (ZBUFFv05_DCtx*)*legacyContext;
+ if (dctx==NULL) return ERROR(memory_allocation);
+ ZBUFFv05_decompressInitDictionary(dctx, dict, dictSize);
+ *legacyContext = dctx;
+ return 0;
+ }
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
+ case 6 :
+ {
+ ZBUFFv06_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv06_createDCtx() : (ZBUFFv06_DCtx*)*legacyContext;
+ if (dctx==NULL) return ERROR(memory_allocation);
+ ZBUFFv06_decompressInitDictionary(dctx, dict, dictSize);
+ *legacyContext = dctx;
+ return 0;
+ }
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
+ case 7 :
+ {
+ ZBUFFv07_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv07_createDCtx() : (ZBUFFv07_DCtx*)*legacyContext;
+ if (dctx==NULL) return ERROR(memory_allocation);
+ ZBUFFv07_decompressInitDictionary(dctx, dict, dictSize);
+ *legacyContext = dctx;
+ return 0;
+ }
+#endif
+ }
+}
+
+
+
+MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
+ ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+ DEBUGLOG(5, "ZSTD_decompressLegacyStream for v0.%u", version);
+ switch(version)
+ {
+ default :
+ case 1 :
+ case 2 :
+ case 3 :
+ (void)legacyContext; (void)output; (void)input;
+ return ERROR(version_unsupported);
+#if (ZSTD_LEGACY_SUPPORT <= 4)
+ case 4 :
+ {
+ ZBUFFv04_DCtx* dctx = (ZBUFFv04_DCtx*) legacyContext;
+ const void* src = (const char*)input->src + input->pos;
+ size_t readSize = input->size - input->pos;
+ void* dst = (char*)output->dst + output->pos;
+ size_t decodedSize = output->size - output->pos;
+ size_t const hintSize = ZBUFFv04_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
+ output->pos += decodedSize;
+ input->pos += readSize;
+ return hintSize;
+ }
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
+ case 5 :
+ {
+ ZBUFFv05_DCtx* dctx = (ZBUFFv05_DCtx*) legacyContext;
+ const void* src = (const char*)input->src + input->pos;
+ size_t readSize = input->size - input->pos;
+ void* dst = (char*)output->dst + output->pos;
+ size_t decodedSize = output->size - output->pos;
+ size_t const hintSize = ZBUFFv05_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
+ output->pos += decodedSize;
+ input->pos += readSize;
+ return hintSize;
+ }
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
+ case 6 :
+ {
+ ZBUFFv06_DCtx* dctx = (ZBUFFv06_DCtx*) legacyContext;
+ const void* src = (const char*)input->src + input->pos;
+ size_t readSize = input->size - input->pos;
+ void* dst = (char*)output->dst + output->pos;
+ size_t decodedSize = output->size - output->pos;
+ size_t const hintSize = ZBUFFv06_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
+ output->pos += decodedSize;
+ input->pos += readSize;
+ return hintSize;
+ }
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
+ case 7 :
+ {
+ ZBUFFv07_DCtx* dctx = (ZBUFFv07_DCtx*) legacyContext;
+ const void* src = (const char*)input->src + input->pos;
+ size_t readSize = input->size - input->pos;
+ void* dst = (char*)output->dst + output->pos;
+ size_t decodedSize = output->size - output->pos;
+ size_t const hintSize = ZBUFFv07_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
+ output->pos += decodedSize;
+ input->pos += readSize;
+ return hintSize;
+ }
+#endif
+ }
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_LEGACY_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_opt.c b/vendor/github.com/DataDog/zstd/zstd_opt.c
new file mode 100644
index 000000000..efb69d326
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_opt.c
@@ -0,0 +1,1233 @@
+/*
+ * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h"
+#include "hist.h"
+#include "zstd_opt.h"
+
+
+#define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
+#define ZSTD_FREQ_DIV 4 /* log factor when using previous stats to init next stats */
+#define ZSTD_MAX_PRICE (1<<30)
+
+#define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
+
+
+/*-*************************************
+* Price functions for optimal parser
+***************************************/
+
+#if 0 /* approximation at bit level */
+# define BITCOST_ACCURACY 0
+# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+# define WEIGHT(stat) ((void)opt, ZSTD_bitWeight(stat))
+#elif 0 /* fractional bit accuracy */
+# define BITCOST_ACCURACY 8
+# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+# define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
+#else /* opt==approx, ultra==accurate */
+# define BITCOST_ACCURACY 8
+# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+# define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
+#endif
+
+MEM_STATIC U32 ZSTD_bitWeight(U32 stat)
+{
+ return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER);
+}
+
+MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
+{
+ U32 const stat = rawStat + 1;
+ U32 const hb = ZSTD_highbit32(stat);
+ U32 const BWeight = hb * BITCOST_MULTIPLIER;
+ U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb;
+ U32 const weight = BWeight + FWeight;
+ assert(hb + BITCOST_ACCURACY < 31);
+ return weight;
+}
+
+#if (DEBUGLEVEL>=2)
+/* debugging function,
+ * @return price in bytes as fractional value
+ * for debug messages only */
+MEM_STATIC double ZSTD_fCost(U32 price)
+{
+ return (double)price / (BITCOST_MULTIPLIER*8);
+}
+#endif
+
+static int ZSTD_compressedLiterals(optState_t const* const optPtr)
+{
+ return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed;
+}
+
+static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
+{
+ if (ZSTD_compressedLiterals(optPtr))
+ optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
+ optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);
+ optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);
+ optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);
+}
+
+
+/* ZSTD_downscaleStat() :
+ * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus)
+ * return the resulting sum of elements */
+static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus)
+{
+ U32 s, sum=0;
+ DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1);
+ assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31);
+ for (s=0; s<lastEltIndex+1; s++) {
+ table[s] = 1 + (table[s] >> (ZSTD_FREQ_DIV+malus));
+ sum += table[s];
+ }
+ return sum;
+}
+
+/* ZSTD_rescaleFreqs() :
+ * if first block (detected by optPtr->litLengthSum == 0) : init statistics
+ * take hints from dictionary if there is one
+ * or init from zero, using src for literals stats, or flat 1 for match symbols
+ * otherwise downscale existing stats, to be used as seed for next block.
+ */
+static void
+ZSTD_rescaleFreqs(optState_t* const optPtr,
+ const BYTE* const src, size_t const srcSize,
+ int const optLevel)
+{
+ int const compressedLiterals = ZSTD_compressedLiterals(optPtr);
+ DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
+ optPtr->priceType = zop_dynamic;
+
+ if (optPtr->litLengthSum == 0) { /* first block : init */
+ if (srcSize <= ZSTD_PREDEF_THRESHOLD) { /* heuristic */
+ DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef");
+ optPtr->priceType = zop_predef;
+ }
+
+ assert(optPtr->symbolCosts != NULL);
+ if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) {
+ /* huffman table presumed generated by dictionary */
+ optPtr->priceType = zop_dynamic;
+
+ if (compressedLiterals) {
+ unsigned lit;
+ assert(optPtr->litFreq != NULL);
+ optPtr->litSum = 0;
+ for (lit=0; lit<=MaxLit; lit++) {
+ U32 const scaleLog = 11; /* scale to 2K */
+ U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit);
+ assert(bitCost <= scaleLog);
+ optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+ optPtr->litSum += optPtr->litFreq[lit];
+ } }
+
+ { unsigned ll;
+ FSE_CState_t llstate;
+ FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable);
+ optPtr->litLengthSum = 0;
+ for (ll=0; ll<=MaxLL; ll++) {
+ U32 const scaleLog = 10; /* scale to 1K */
+ U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll);
+ assert(bitCost < scaleLog);
+ optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+ optPtr->litLengthSum += optPtr->litLengthFreq[ll];
+ } }
+
+ { unsigned ml;
+ FSE_CState_t mlstate;
+ FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable);
+ optPtr->matchLengthSum = 0;
+ for (ml=0; ml<=MaxML; ml++) {
+ U32 const scaleLog = 10;
+ U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml);
+ assert(bitCost < scaleLog);
+ optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+ optPtr->matchLengthSum += optPtr->matchLengthFreq[ml];
+ } }
+
+ { unsigned of;
+ FSE_CState_t ofstate;
+ FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable);
+ optPtr->offCodeSum = 0;
+ for (of=0; of<=MaxOff; of++) {
+ U32 const scaleLog = 10;
+ U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of);
+ assert(bitCost < scaleLog);
+ optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+ optPtr->offCodeSum += optPtr->offCodeFreq[of];
+ } }
+
+ } else { /* not a dictionary */
+
+ assert(optPtr->litFreq != NULL);
+ if (compressedLiterals) {
+ unsigned lit = MaxLit;
+ HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */
+ optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
+ }
+
+ { unsigned ll;
+ for (ll=0; ll<=MaxLL; ll++)
+ optPtr->litLengthFreq[ll] = 1;
+ }
+ optPtr->litLengthSum = MaxLL+1;
+
+ { unsigned ml;
+ for (ml=0; ml<=MaxML; ml++)
+ optPtr->matchLengthFreq[ml] = 1;
+ }
+ optPtr->matchLengthSum = MaxML+1;
+
+ { unsigned of;
+ for (of=0; of<=MaxOff; of++)
+ optPtr->offCodeFreq[of] = 1;
+ }
+ optPtr->offCodeSum = MaxOff+1;
+
+ }
+
+ } else { /* new block : re-use previous statistics, scaled down */
+
+ if (compressedLiterals)
+ optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
+ optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0);
+ optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0);
+ optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0);
+ }
+
+ ZSTD_setBasePrices(optPtr, optLevel);
+}
+
+/* ZSTD_rawLiteralsCost() :
+ * price of literals (only) in specified segment (which length can be 0).
+ * does not include price of literalLength symbol */
+static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
+ const optState_t* const optPtr,
+ int optLevel)
+{
+ if (litLength == 0) return 0;
+
+ if (!ZSTD_compressedLiterals(optPtr))
+ return (litLength << 3) * BITCOST_MULTIPLIER; /* Uncompressed - 8 bytes per literal. */
+
+ if (optPtr->priceType == zop_predef)
+ return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */
+
+ /* dynamic statistics */
+ { U32 price = litLength * optPtr->litSumBasePrice;
+ U32 u;
+ for (u=0; u < litLength; u++) {
+ assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice); /* literal cost should never be negative */
+ price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel);
+ }
+ return price;
+ }
+}
+
+/* ZSTD_litLengthPrice() :
+ * cost of literalLength symbol */
+static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
+{
+ if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel);
+
+ /* dynamic statistics */
+ { U32 const llCode = ZSTD_LLcode(litLength);
+ return (LL_bits[llCode] * BITCOST_MULTIPLIER)
+ + optPtr->litLengthSumBasePrice
+ - WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
+ }
+}
+
+/* ZSTD_litLengthContribution() :
+ * @return ( cost(litlength) - cost(0) )
+ * this value can then be added to rawLiteralsCost()
+ * to provide a cost which is directly comparable to a match ending at same position */
+static int ZSTD_litLengthContribution(U32 const litLength, const optState_t* const optPtr, int optLevel)
+{
+ if (optPtr->priceType >= zop_predef) return WEIGHT(litLength, optLevel);
+
+ /* dynamic statistics */
+ { U32 const llCode = ZSTD_LLcode(litLength);
+ int const contribution = (LL_bits[llCode] * BITCOST_MULTIPLIER)
+ + WEIGHT(optPtr->litLengthFreq[0], optLevel) /* note: log2litLengthSum cancel out */
+ - WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
+#if 1
+ return contribution;
+#else
+ return MAX(0, contribution); /* sometimes better, sometimes not ... */
+#endif
+ }
+}
+
+/* ZSTD_literalsContribution() :
+ * creates a fake cost for the literals part of a sequence
+ * which can be compared to the ending cost of a match
+ * should a new match start at this position */
+static int ZSTD_literalsContribution(const BYTE* const literals, U32 const litLength,
+ const optState_t* const optPtr,
+ int optLevel)
+{
+ int const contribution = ZSTD_rawLiteralsCost(literals, litLength, optPtr, optLevel)
+ + ZSTD_litLengthContribution(litLength, optPtr, optLevel);
+ return contribution;
+}
+
+/* ZSTD_getMatchPrice() :
+ * Provides the cost of the match part (offset + matchLength) of a sequence
+ * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
+ * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */
+FORCE_INLINE_TEMPLATE U32
+ZSTD_getMatchPrice(U32 const offset,
+ U32 const matchLength,
+ const optState_t* const optPtr,
+ int const optLevel)
+{
+ U32 price;
+ U32 const offCode = ZSTD_highbit32(offset+1);
+ U32 const mlBase = matchLength - MINMATCH;
+ assert(matchLength >= MINMATCH);
+
+ if (optPtr->priceType == zop_predef) /* fixed scheme, do not use statistics */
+ return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER);
+
+ /* dynamic statistics */
+ price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));
+ if ((optLevel<2) /*static*/ && offCode >= 20)
+ price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */
+
+ /* match Length */
+ { U32 const mlCode = ZSTD_MLcode(mlBase);
+ price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel));
+ }
+
+ price += BITCOST_MULTIPLIER / 5; /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */
+
+ DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price);
+ return price;
+}
+
+/* ZSTD_updateStats() :
+ * assumption : literals + litLengtn <= iend */
+static void ZSTD_updateStats(optState_t* const optPtr,
+ U32 litLength, const BYTE* literals,
+ U32 offsetCode, U32 matchLength)
+{
+ /* literals */
+ if (ZSTD_compressedLiterals(optPtr)) {
+ U32 u;
+ for (u=0; u < litLength; u++)
+ optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
+ optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
+ }
+
+ /* literal Length */
+ { U32 const llCode = ZSTD_LLcode(litLength);
+ optPtr->litLengthFreq[llCode]++;
+ optPtr->litLengthSum++;
+ }
+
+ /* match offset code (0-2=>repCode; 3+=>offset+2) */
+ { U32 const offCode = ZSTD_highbit32(offsetCode+1);
+ assert(offCode <= MaxOff);
+ optPtr->offCodeFreq[offCode]++;
+ optPtr->offCodeSum++;
+ }
+
+ /* match Length */
+ { U32 const mlBase = matchLength - MINMATCH;
+ U32 const mlCode = ZSTD_MLcode(mlBase);
+ optPtr->matchLengthFreq[mlCode]++;
+ optPtr->matchLengthSum++;
+ }
+}
+
+
+/* ZSTD_readMINMATCH() :
+ * function safe only for comparisons
+ * assumption : memPtr must be at least 4 bytes before end of buffer */
+MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
+{
+ switch (length)
+ {
+ default :
+ case 4 : return MEM_read32(memPtr);
+ case 3 : if (MEM_isLittleEndian())
+ return MEM_read32(memPtr)<<8;
+ else
+ return MEM_read32(memPtr)>>8;
+ }
+}
+
+
+/* Update hashTable3 up to ip (excluded)
+ Assumption : always within prefix (i.e. not within extDict) */
+static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms, const BYTE* const ip)
+{
+ U32* const hashTable3 = ms->hashTable3;
+ U32 const hashLog3 = ms->hashLog3;
+ const BYTE* const base = ms->window.base;
+ U32 idx = ms->nextToUpdate3;
+ U32 const target = ms->nextToUpdate3 = (U32)(ip - base);
+ size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);
+ assert(hashLog3 > 0);
+
+ while(idx < target) {
+ hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
+ idx++;
+ }
+
+ return hashTable3[hash3];
+}
+
+
+/*-*************************************
+* Binary Tree search
+***************************************/
+/** ZSTD_insertBt1() : add one or multiple positions to tree.
+ * ip : assumed <= iend-8 .
+ * @return : nb of positions added */
+static U32 ZSTD_insertBt1(
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+ U32 const mls, const int extDict)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hashLog = cParams->hashLog;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+ U32 matchIndex = hashTable[h];
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* match;
+ const U32 current = (U32)(ip-base);
+ const U32 btLow = btMask >= current ? 0 : current - btMask;
+ U32* smallerPtr = bt + 2*(current&btMask);
+ U32* largerPtr = smallerPtr + 1;
+ U32 dummy32; /* to be nullified at the end */
+ U32 const windowLow = ms->window.lowLimit;
+ U32 matchEndIdx = current+8+1;
+ size_t bestLength = 8;
+ U32 nbCompares = 1U << cParams->searchLog;
+#ifdef ZSTD_C_PREDICT
+ U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0);
+ U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1);
+ predictedSmall += (predictedSmall>0);
+ predictedLarge += (predictedLarge>0);
+#endif /* ZSTD_C_PREDICT */
+
+ DEBUGLOG(8, "ZSTD_insertBt1 (%u)", current);
+
+ assert(ip <= iend-8); /* required for h calculation */
+ hashTable[h] = current; /* Update Hash Table */
+
+ assert(windowLow > 0);
+ while (nbCompares-- && (matchIndex >= windowLow)) {
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ assert(matchIndex < current);
+
+#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
+ const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
+ if (matchIndex == predictedSmall) {
+ /* no need to check length, result known */
+ *smallerPtr = matchIndex;
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ predictedSmall = predictPtr[1] + (predictPtr[1]>0);
+ continue;
+ }
+ if (matchIndex == predictedLarge) {
+ *largerPtr = matchIndex;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ predictedLarge = predictPtr[0] + (predictPtr[0]>0);
+ continue;
+ }
+#endif
+
+ if (!extDict || (matchIndex+matchLength >= dictLimit)) {
+ assert(matchIndex+matchLength >= dictLimit); /* might be wrong if actually extDict */
+ match = base + matchIndex;
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
+ } else {
+ match = dictBase + matchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
+ }
+
+ if (matchLength > bestLength) {
+ bestLength = matchLength;
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ }
+
+ if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
+ break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
+ }
+
+ if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
+ /* match is smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
+ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
+ } else {
+ /* match is larger than current */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+ if (bestLength > 384) return MIN(192, (U32)(bestLength - 384)); /* speed optimization */
+ assert(matchEndIdx > current + 8);
+ return matchEndIdx - (current + 8);
+}
+
+FORCE_INLINE_TEMPLATE
+void ZSTD_updateTree_internal(
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+ const U32 mls, const ZSTD_dictMode_e dictMode)
+{
+ const BYTE* const base = ms->window.base;
+ U32 const target = (U32)(ip - base);
+ U32 idx = ms->nextToUpdate;
+ DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)",
+ idx, target, dictMode);
+
+ while(idx < target)
+ idx += ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict);
+ ms->nextToUpdate = target;
+}
+
+void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
+ ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
+}
+
+FORCE_INLINE_TEMPLATE
+U32 ZSTD_insertBtAndGetAllMatches (
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode,
+ U32 rep[ZSTD_REP_NUM],
+ U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
+ ZSTD_match_t* matches,
+ const U32 lengthToBeat,
+ U32 const mls /* template */)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
+ const BYTE* const base = ms->window.base;
+ U32 const current = (U32)(ip-base);
+ U32 const hashLog = cParams->hashLog;
+ U32 const minMatch = (mls==3) ? 3 : 4;
+ U32* const hashTable = ms->hashTable;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32 matchIndex = hashTable[h];
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask= (1U << btLog) - 1;
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+ const BYTE* const dictBase = ms->window.dictBase;
+ U32 const dictLimit = ms->window.dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ U32 const btLow = btMask >= current ? 0 : current - btMask;
+ U32 const windowLow = ms->window.lowLimit;
+ U32 const matchLow = windowLow ? windowLow : 1;
+ U32* smallerPtr = bt + 2*(current&btMask);
+ U32* largerPtr = bt + 2*(current&btMask) + 1;
+ U32 matchEndIdx = current+8+1; /* farthest referenced position of any match => detects repetitive patterns */
+ U32 dummy32; /* to be nullified at the end */
+ U32 mnum = 0;
+ U32 nbCompares = 1U << cParams->searchLog;
+
+ const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
+ const ZSTD_compressionParameters* const dmsCParams =
+ dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;
+ const BYTE* const dmsBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL;
+ const BYTE* const dmsEnd = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL;
+ U32 const dmsHighLimit = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0;
+ U32 const dmsLowLimit = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0;
+ U32 const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0;
+ U32 const dmsHashLog = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog;
+ U32 const dmsBtLog = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog;
+ U32 const dmsBtMask = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0;
+ U32 const dmsBtLow = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit;
+
+ size_t bestLength = lengthToBeat-1;
+ DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", current);
+
+ /* check repCode */
+ assert(ll0 <= 1); /* necessarily 1 or 0 */
+ { U32 const lastR = ZSTD_REP_NUM + ll0;
+ U32 repCode;
+ for (repCode = ll0; repCode < lastR; repCode++) {
+ U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
+ U32 const repIndex = current - repOffset;
+ U32 repLen = 0;
+ assert(current >= dictLimit);
+ if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < current-dictLimit) { /* equivalent to `current > repIndex >= dictLimit` */
+ if (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch)) {
+ repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
+ }
+ } else { /* repIndex < dictLimit || repIndex >= current */
+ const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ?
+ dmsBase + repIndex - dmsIndexDelta :
+ dictBase + repIndex;
+ assert(current >= windowLow);
+ if ( dictMode == ZSTD_extDict
+ && ( ((repOffset-1) /*intentional overflow*/ < current - windowLow) /* equivalent to `current > repIndex >= windowLow` */
+ & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
+ && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
+ repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
+ }
+ if (dictMode == ZSTD_dictMatchState
+ && ( ((repOffset-1) /*intentional overflow*/ < current - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `current > repIndex >= dmsLowLimit` */
+ & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
+ repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
+ } }
+ /* save longer solution */
+ if (repLen > bestLength) {
+ DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
+ repCode, ll0, repOffset, repLen);
+ bestLength = repLen;
+ matches[mnum].off = repCode - ll0;
+ matches[mnum].len = (U32)repLen;
+ mnum++;
+ if ( (repLen > sufficient_len)
+ | (ip+repLen == iLimit) ) { /* best possible */
+ return mnum;
+ } } } }
+
+ /* HC3 match finder */
+ if ((mls == 3) /*static*/ && (bestLength < mls)) {
+ U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, ip);
+ if ((matchIndex3 >= matchLow)
+ & (current - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
+ size_t mlen;
+ if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) {
+ const BYTE* const match = base + matchIndex3;
+ mlen = ZSTD_count(ip, match, iLimit);
+ } else {
+ const BYTE* const match = dictBase + matchIndex3;
+ mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart);
+ }
+
+ /* save best solution */
+ if (mlen >= mls /* == 3 > bestLength */) {
+ DEBUGLOG(8, "found small match with hlog3, of length %u",
+ (U32)mlen);
+ bestLength = mlen;
+ assert(current > matchIndex3);
+ assert(mnum==0); /* no prior solution */
+ matches[0].off = (current - matchIndex3) + ZSTD_REP_MOVE;
+ matches[0].len = (U32)mlen;
+ mnum = 1;
+ if ( (mlen > sufficient_len) |
+ (ip+mlen == iLimit) ) { /* best possible length */
+ ms->nextToUpdate = current+1; /* skip insertion */
+ return 1;
+ }
+ }
+ }
+ /* no dictMatchState lookup: dicts don't have a populated HC3 table */
+ }
+
+ hashTable[h] = current; /* Update Hash Table */
+
+ while (nbCompares-- && (matchIndex >= matchLow)) {
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ const BYTE* match;
+ assert(current > matchIndex);
+
+ if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
+ assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */
+ match = base + matchIndex;
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
+ } else {
+ match = dictBase + matchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* prepare for match[matchLength] */
+ }
+
+ if (matchLength > bestLength) {
+ DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
+ (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE);
+ assert(matchEndIdx > matchIndex);
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ bestLength = matchLength;
+ matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE;
+ matches[mnum].len = (U32)matchLength;
+ mnum++;
+ if ( (matchLength > ZSTD_OPT_NUM)
+ | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
+ if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
+ break; /* drop, to preserve bt consistency (miss a little bit of compression) */
+ }
+ }
+
+ if (match[matchLength] < ip[matchLength]) {
+ /* match smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new candidate => larger than match, which was smaller than current */
+ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous, closer to current */
+ } else {
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+
+ if (dictMode == ZSTD_dictMatchState && nbCompares) {
+ size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);
+ U32 dictMatchIndex = dms->hashTable[dmsH];
+ const U32* const dmsBt = dms->chainTable;
+ commonLengthSmaller = commonLengthLarger = 0;
+ while (nbCompares-- && (dictMatchIndex > dmsLowLimit)) {
+ const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ const BYTE* match = dmsBase + dictMatchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart);
+ if (dictMatchIndex+matchLength >= dmsHighLimit)
+ match = base + dictMatchIndex + dmsIndexDelta; /* to prepare for next usage of match[matchLength] */
+
+ if (matchLength > bestLength) {
+ matchIndex = dictMatchIndex + dmsIndexDelta;
+ DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
+ (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE);
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ bestLength = matchLength;
+ matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE;
+ matches[mnum].len = (U32)matchLength;
+ mnum++;
+ if ( (matchLength > ZSTD_OPT_NUM)
+ | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
+ break; /* drop, to guarantee consistency (miss a little bit of compression) */
+ }
+ }
+
+ if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */
+ if (match[matchLength] < ip[matchLength]) {
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ } else {
+ /* match is larger than current */
+ commonLengthLarger = matchLength;
+ dictMatchIndex = nextPtr[0];
+ }
+ }
+ }
+
+ assert(matchEndIdx > current+8);
+ ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
+ return mnum;
+}
+
+
+FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode,
+ U32 rep[ZSTD_REP_NUM], U32 const ll0,
+ ZSTD_match_t* matches, U32 const lengthToBeat)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32 const matchLengthSearch = cParams->minMatch;
+ DEBUGLOG(8, "ZSTD_BtGetAllMatches");
+ if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
+ ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode);
+ switch(matchLengthSearch)
+ {
+ case 3 : return ZSTD_insertBtAndGetAllMatches(ms, ip, iHighLimit, dictMode, rep, ll0, matches, lengthToBeat, 3);
+ default :
+ case 4 : return ZSTD_insertBtAndGetAllMatches(ms, ip, iHighLimit, dictMode, rep, ll0, matches, lengthToBeat, 4);
+ case 5 : return ZSTD_insertBtAndGetAllMatches(ms, ip, iHighLimit, dictMode, rep, ll0, matches, lengthToBeat, 5);
+ case 7 :
+ case 6 : return ZSTD_insertBtAndGetAllMatches(ms, ip, iHighLimit, dictMode, rep, ll0, matches, lengthToBeat, 6);
+ }
+}
+
+
+/*-*******************************
+* Optimal parser
+*********************************/
+typedef struct repcodes_s {
+ U32 rep[3];
+} repcodes_t;
+
+static repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
+{
+ repcodes_t newReps;
+ if (offset >= ZSTD_REP_NUM) { /* full offset */
+ newReps.rep[2] = rep[1];
+ newReps.rep[1] = rep[0];
+ newReps.rep[0] = offset - ZSTD_REP_MOVE;
+ } else { /* repcode */
+ U32 const repCode = offset + ll0;
+ if (repCode > 0) { /* note : if repCode==0, no change */
+ U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
+ newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2];
+ newReps.rep[1] = rep[0];
+ newReps.rep[0] = currentOffset;
+ } else { /* repCode == 0 */
+ memcpy(&newReps, rep, sizeof(newReps));
+ }
+ }
+ return newReps;
+}
+
+
+static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
+{
+ return sol.litlen + sol.mlen;
+}
+
+#if 0 /* debug */
+
+static void
+listStats(const U32* table, int lastEltID)
+{
+ int const nbElts = lastEltID + 1;
+ int enb;
+ for (enb=0; enb < nbElts; enb++) {
+ (void)table;
+ //RAWLOG(2, "%3i:%3i, ", enb, table[enb]);
+ RAWLOG(2, "%4i,", table[enb]);
+ }
+ RAWLOG(2, " \n");
+}
+
+#endif
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
+ seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize,
+ const int optLevel,
+ const ZSTD_dictMode_e dictMode)
+{
+ optState_t* const optStatePtr = &ms->opt;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const prefixStart = base + ms->window.dictLimit;
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+
+ U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
+ U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
+
+ ZSTD_optimal_t* const opt = optStatePtr->priceTable;
+ ZSTD_match_t* const matches = optStatePtr->matchTable;
+ ZSTD_optimal_t lastSequence;
+
+ /* init */
+ DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u",
+ (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate);
+ assert(optLevel <= 2);
+ ms->nextToUpdate3 = ms->nextToUpdate;
+ ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel);
+ ip += (ip==prefixStart);
+
+ /* Match Loop */
+ while (ip < ilimit) {
+ U32 cur, last_pos = 0;
+
+ /* find first match */
+ { U32 const litlen = (U32)(ip - anchor);
+ U32 const ll0 = !litlen;
+ U32 const nbMatches = ZSTD_BtGetAllMatches(ms, ip, iend, dictMode, rep, ll0, matches, minMatch);
+ if (!nbMatches) { ip++; continue; }
+
+ /* initialize opt[0] */
+ { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
+ opt[0].mlen = 0; /* means is_a_literal */
+ opt[0].litlen = litlen;
+ opt[0].price = ZSTD_literalsContribution(anchor, litlen, optStatePtr, optLevel);
+
+ /* large match -> immediate encoding */
+ { U32 const maxML = matches[nbMatches-1].len;
+ U32 const maxOffset = matches[nbMatches-1].off;
+ DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
+ nbMatches, maxML, maxOffset, (U32)(ip-prefixStart));
+
+ if (maxML > sufficient_len) {
+ lastSequence.litlen = litlen;
+ lastSequence.mlen = maxML;
+ lastSequence.off = maxOffset;
+ DEBUGLOG(6, "large match (%u>%u), immediate encoding",
+ maxML, sufficient_len);
+ cur = 0;
+ last_pos = ZSTD_totalLen(lastSequence);
+ goto _shortestPath;
+ } }
+
+ /* set prices for first matches starting position == 0 */
+ { U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
+ U32 pos;
+ U32 matchNb;
+ for (pos = 1; pos < minMatch; pos++) {
+ opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */
+ }
+ for (matchNb = 0; matchNb < nbMatches; matchNb++) {
+ U32 const offset = matches[matchNb].off;
+ U32 const end = matches[matchNb].len;
+ repcodes_t const repHistory = ZSTD_updateRep(rep, offset, ll0);
+ for ( ; pos <= end ; pos++ ) {
+ U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel);
+ U32 const sequencePrice = literalsPrice + matchPrice;
+ DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
+ pos, ZSTD_fCost(sequencePrice));
+ opt[pos].mlen = pos;
+ opt[pos].off = offset;
+ opt[pos].litlen = litlen;
+ opt[pos].price = sequencePrice;
+ ZSTD_STATIC_ASSERT(sizeof(opt[pos].rep) == sizeof(repHistory));
+ memcpy(opt[pos].rep, &repHistory, sizeof(repHistory));
+ } }
+ last_pos = pos-1;
+ }
+ }
+
+ /* check further positions */
+ for (cur = 1; cur <= last_pos; cur++) {
+ const BYTE* const inr = ip + cur;
+ assert(cur < ZSTD_OPT_NUM);
+ DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur)
+
+ /* Fix current position with one literal if cheaper */
+ { U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
+ int const price = opt[cur-1].price
+ + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
+ + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
+ - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
+ assert(price < 1000000000); /* overflow check */
+ if (price <= opt[cur].price) {
+ DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
+ inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
+ opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
+ opt[cur].mlen = 0;
+ opt[cur].off = 0;
+ opt[cur].litlen = litlen;
+ opt[cur].price = price;
+ memcpy(opt[cur].rep, opt[cur-1].rep, sizeof(opt[cur].rep));
+ } else {
+ DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
+ inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
+ opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]);
+ }
+ }
+
+ /* last match must start at a minimum distance of 8 from oend */
+ if (inr > ilimit) continue;
+
+ if (cur == last_pos) break;
+
+ if ( (optLevel==0) /*static_test*/
+ && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
+ DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1);
+ continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
+ }
+
+ { U32 const ll0 = (opt[cur].mlen != 0);
+ U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
+ U32 const previousPrice = opt[cur].price;
+ U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
+ U32 const nbMatches = ZSTD_BtGetAllMatches(ms, inr, iend, dictMode, opt[cur].rep, ll0, matches, minMatch);
+ U32 matchNb;
+ if (!nbMatches) {
+ DEBUGLOG(7, "rPos:%u : no match found", cur);
+ continue;
+ }
+
+ { U32 const maxML = matches[nbMatches-1].len;
+ DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u",
+ inr-istart, cur, nbMatches, maxML);
+
+ if ( (maxML > sufficient_len)
+ || (cur + maxML >= ZSTD_OPT_NUM) ) {
+ lastSequence.mlen = maxML;
+ lastSequence.off = matches[nbMatches-1].off;
+ lastSequence.litlen = litlen;
+ cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0; /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
+ last_pos = cur + ZSTD_totalLen(lastSequence);
+ if (cur > ZSTD_OPT_NUM) cur = 0; /* underflow => first match */
+ goto _shortestPath;
+ } }
+
+ /* set prices using matches found at position == cur */
+ for (matchNb = 0; matchNb < nbMatches; matchNb++) {
+ U32 const offset = matches[matchNb].off;
+ repcodes_t const repHistory = ZSTD_updateRep(opt[cur].rep, offset, ll0);
+ U32 const lastML = matches[matchNb].len;
+ U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
+ U32 mlen;
+
+ DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u",
+ matchNb, matches[matchNb].off, lastML, litlen);
+
+ for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */
+ U32 const pos = cur + mlen;
+ int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
+
+ if ((pos > last_pos) || (price < opt[pos].price)) {
+ DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
+ pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
+ while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } /* fill empty positions */
+ opt[pos].mlen = mlen;
+ opt[pos].off = offset;
+ opt[pos].litlen = litlen;
+ opt[pos].price = price;
+ ZSTD_STATIC_ASSERT(sizeof(opt[pos].rep) == sizeof(repHistory));
+ memcpy(opt[pos].rep, &repHistory, sizeof(repHistory));
+ } else {
+ DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
+ pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
+ if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
+ }
+ } } }
+ } /* for (cur = 1; cur <= last_pos; cur++) */
+
+ lastSequence = opt[last_pos];
+ cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0; /* single sequence, and it starts before `ip` */
+ assert(cur < ZSTD_OPT_NUM); /* control overflow*/
+
+_shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
+ assert(opt[0].mlen == 0);
+
+ { U32 const storeEnd = cur + 1;
+ U32 storeStart = storeEnd;
+ U32 seqPos = cur;
+
+ DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
+ last_pos, cur); (void)last_pos;
+ assert(storeEnd < ZSTD_OPT_NUM);
+ DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
+ storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off);
+ opt[storeEnd] = lastSequence;
+ while (seqPos > 0) {
+ U32 const backDist = ZSTD_totalLen(opt[seqPos]);
+ storeStart--;
+ DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
+ seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off);
+ opt[storeStart] = opt[seqPos];
+ seqPos = (seqPos > backDist) ? seqPos - backDist : 0;
+ }
+
+ /* save sequences */
+ DEBUGLOG(6, "sending selected sequences into seqStore")
+ { U32 storePos;
+ for (storePos=storeStart; storePos <= storeEnd; storePos++) {
+ U32 const llen = opt[storePos].litlen;
+ U32 const mlen = opt[storePos].mlen;
+ U32 const offCode = opt[storePos].off;
+ U32 const advance = llen + mlen;
+ DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
+ anchor - istart, (unsigned)llen, (unsigned)mlen);
+
+ if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */
+ assert(storePos == storeEnd); /* must be last sequence */
+ ip = anchor + llen; /* last "sequence" is a bunch of literals => don't progress anchor */
+ continue; /* will finish */
+ }
+
+ /* repcodes update : like ZSTD_updateRep(), but update in place */
+ if (offCode >= ZSTD_REP_NUM) { /* full offset */
+ rep[2] = rep[1];
+ rep[1] = rep[0];
+ rep[0] = offCode - ZSTD_REP_MOVE;
+ } else { /* repcode */
+ U32 const repCode = offCode + (llen==0);
+ if (repCode) { /* note : if repCode==0, no change */
+ U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
+ if (repCode >= 2) rep[2] = rep[1];
+ rep[1] = rep[0];
+ rep[0] = currentOffset;
+ } }
+
+ assert(anchor + llen <= iend);
+ ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
+ ZSTD_storeSeq(seqStore, llen, anchor, offCode, mlen-MINMATCH);
+ anchor += advance;
+ ip = anchor;
+ } }
+ ZSTD_setBasePrices(optStatePtr, optLevel);
+ }
+
+ } /* while (ip < ilimit) */
+
+ /* Return the last literals size */
+ return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_btopt(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_compressBlock_btopt");
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict);
+}
+
+
+/* used in 2-pass strategy */
+static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus)
+{
+ U32 s, sum=0;
+ assert(ZSTD_FREQ_DIV+bonus >= 0);
+ for (s=0; s<lastEltIndex+1; s++) {
+ table[s] <<= ZSTD_FREQ_DIV+bonus;
+ table[s]--;
+ sum += table[s];
+ }
+ return sum;
+}
+
+/* used in 2-pass strategy */
+MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
+{
+ if (ZSTD_compressedLiterals(optPtr))
+ optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
+ optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0);
+ optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0);
+ optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0);
+}
+
+/* ZSTD_initStats_ultra():
+ * make a first compression pass, just to seed stats with more accurate starting values.
+ * only works on first block, with no dictionary and no ldm.
+ * this function cannot error, hence its contract must be respected.
+ */
+static void
+ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
+ seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */
+ memcpy(tmpRep, rep, sizeof(tmpRep));
+
+ DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize);
+ assert(ms->opt.litLengthSum == 0); /* first block */
+ assert(seqStore->sequences == seqStore->sequencesStart); /* no ldm */
+ assert(ms->window.dictLimit == ms->window.lowLimit); /* no dictionary */
+ assert(ms->window.dictLimit - ms->nextToUpdate <= 1); /* no prefix (note: intentional overflow, defined as 2-complement) */
+
+ ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); /* generate stats into ms->opt*/
+
+ /* invalidate first scan from history */
+ ZSTD_resetSeqStore(seqStore);
+ ms->window.base -= srcSize;
+ ms->window.dictLimit += (U32)srcSize;
+ ms->window.lowLimit = ms->window.dictLimit;
+ ms->nextToUpdate = ms->window.dictLimit;
+ ms->nextToUpdate3 = ms->window.dictLimit;
+
+ /* re-inforce weight of collected statistics */
+ ZSTD_upscaleStats(&ms->opt);
+}
+
+size_t ZSTD_compressBlock_btultra(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_btultra2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ U32 const current = (U32)((const BYTE*)src - ms->window.base);
+ DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize);
+
+ /* 2-pass strategy:
+ * this strategy makes a first pass over first block to collect statistics
+ * and seed next round's statistics with it.
+ * After 1st pass, function forgets everything, and starts a new block.
+ * Consequently, this can only work if no data has been previously loaded in tables,
+ * aka, no dictionary, no prefix, no ldm preprocessing.
+ * The compression ratio gain is generally small (~0.5% on first block),
+ * the cost is 2x cpu time on first block. */
+ assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
+ if ( (ms->opt.litLengthSum==0) /* first block */
+ && (seqStore->sequences == seqStore->sequencesStart) /* no ldm */
+ && (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */
+ && (current == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */
+ && (srcSize > ZSTD_PREDEF_THRESHOLD)
+ ) {
+ ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
+ }
+
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_btopt_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_btultra_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_btopt_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict);
+}
+
+size_t ZSTD_compressBlock_btultra_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict);
+}
+
+/* note : no btultra2 variant for extDict nor dictMatchState,
+ * because btultra2 is not meant to work with dictionaries
+ * and is only specific for the first block (no prefix) */
diff --git a/vendor/github.com/DataDog/zstd/zstd_opt.h b/vendor/github.com/DataDog/zstd/zstd_opt.h
new file mode 100644
index 000000000..094f74766
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_opt.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_OPT_H
+#define ZSTD_OPT_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "zstd_compress_internal.h"
+
+/* used in ZSTD_loadDictionaryContent() */
+void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
+
+size_t ZSTD_compressBlock_btopt(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+
+size_t ZSTD_compressBlock_btopt_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_btopt_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+ /* note : no btultra2 variant for extDict nor dictMatchState,
+ * because btultra2 is not meant to work with dictionaries
+ * and is only specific for the first block (no prefix) */
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_OPT_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_stream.go b/vendor/github.com/DataDog/zstd/zstd_stream.go
new file mode 100644
index 000000000..233035352
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_stream.go
@@ -0,0 +1,294 @@
+package zstd
+
+/*
+#define ZSTD_STATIC_LINKING_ONLY
+#define ZBUFF_DISABLE_DEPRECATE_WARNINGS
+#include "zstd.h"
+#include "zbuff.h"
+*/
+import "C"
+import (
+ "errors"
+ "fmt"
+ "io"
+ "runtime"
+ "unsafe"
+)
+
+var errShortRead = errors.New("short read")
+
+// Writer is an io.WriteCloser that zstd-compresses its input.
+type Writer struct {
+ CompressionLevel int
+
+ ctx *C.ZSTD_CCtx
+ dict []byte
+ dstBuffer []byte
+ firstError error
+ underlyingWriter io.Writer
+}
+
+func resize(in []byte, newSize int) []byte {
+ if in == nil {
+ return make([]byte, newSize)
+ }
+ if newSize <= cap(in) {
+ return in[:newSize]
+ }
+ toAdd := newSize - len(in)
+ return append(in, make([]byte, toAdd)...)
+}
+
+// NewWriter creates a new Writer with default compression options. Writes to
+// the writer will be written in compressed form to w.
+func NewWriter(w io.Writer) *Writer {
+ return NewWriterLevelDict(w, DefaultCompression, nil)
+}
+
+// NewWriterLevel is like NewWriter but specifies the compression level instead
+// of assuming default compression.
+//
+// The level can be DefaultCompression or any integer value between BestSpeed
+// and BestCompression inclusive.
+func NewWriterLevel(w io.Writer, level int) *Writer {
+ return NewWriterLevelDict(w, level, nil)
+
+}
+
+// NewWriterLevelDict is like NewWriterLevel but specifies a dictionary to
+// compress with. If the dictionary is empty or nil it is ignored. The dictionary
+// should not be modified until the writer is closed.
+func NewWriterLevelDict(w io.Writer, level int, dict []byte) *Writer {
+ var err error
+ ctx := C.ZSTD_createCCtx()
+
+ if dict == nil {
+ err = getError(int(C.ZSTD_compressBegin(ctx,
+ C.int(level))))
+ } else {
+ err = getError(int(C.ZSTD_compressBegin_usingDict(
+ ctx,
+ unsafe.Pointer(&dict[0]),
+ C.size_t(len(dict)),
+ C.int(level))))
+ }
+
+ return &Writer{
+ CompressionLevel: level,
+ ctx: ctx,
+ dict: dict,
+ dstBuffer: make([]byte, CompressBound(1024)),
+ firstError: err,
+ underlyingWriter: w,
+ }
+}
+
+// Write writes a compressed form of p to the underlying io.Writer.
+func (w *Writer) Write(p []byte) (int, error) {
+ if w.firstError != nil {
+ return 0, w.firstError
+ }
+ if len(p) == 0 {
+ return 0, nil
+ }
+ // Check if dstBuffer is enough
+ if len(w.dstBuffer) < CompressBound(len(p)) {
+ w.dstBuffer = make([]byte, CompressBound(len(p)))
+ }
+
+ retCode := C.ZSTD_compressContinue(
+ w.ctx,
+ unsafe.Pointer(&w.dstBuffer[0]),
+ C.size_t(len(w.dstBuffer)),
+ unsafe.Pointer(&p[0]),
+ C.size_t(len(p)))
+
+ if err := getError(int(retCode)); err != nil {
+ return 0, err
+ }
+ written := int(retCode)
+
+ // Write to underlying buffer
+ _, err := w.underlyingWriter.Write(w.dstBuffer[:written])
+
+ // Same behaviour as zlib, we can't know how much data we wrote, only
+ // if there was an error
+ if err != nil {
+ return 0, err
+ }
+ return len(p), err
+}
+
+// Close closes the Writer, flushing any unwritten data to the underlying
+// io.Writer and freeing objects, but does not close the underlying io.Writer.
+func (w *Writer) Close() error {
+ retCode := C.ZSTD_compressEnd(
+ w.ctx,
+ unsafe.Pointer(&w.dstBuffer[0]),
+ C.size_t(len(w.dstBuffer)),
+ unsafe.Pointer(nil),
+ C.size_t(0))
+
+ if err := getError(int(retCode)); err != nil {
+ return err
+ }
+ written := int(retCode)
+ retCode = C.ZSTD_freeCCtx(w.ctx) // Safely close buffer before writing the end
+
+ if err := getError(int(retCode)); err != nil {
+ return err
+ }
+
+ _, err := w.underlyingWriter.Write(w.dstBuffer[:written])
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// reader is an io.ReadCloser that decompresses when read from.
+type reader struct {
+ ctx *C.ZBUFF_DCtx
+ compressionBuffer []byte
+ compressionLeft int
+ decompressionBuffer []byte
+ decompOff int
+ decompSize int
+ dict []byte
+ firstError error
+ recommendedSrcSize int
+ underlyingReader io.Reader
+}
+
+// NewReader creates a new io.ReadCloser. Reads from the returned ReadCloser
+// read and decompress data from r. It is the caller's responsibility to call
+// Close on the ReadCloser when done. If this is not done, underlying objects
+// in the zstd library will not be freed.
+func NewReader(r io.Reader) io.ReadCloser {
+ return NewReaderDict(r, nil)
+}
+
+// NewReaderDict is like NewReader but uses a preset dictionary. NewReaderDict
+// ignores the dictionary if it is nil.
+func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
+ var err error
+ ctx := C.ZBUFF_createDCtx()
+ if len(dict) == 0 {
+ err = getError(int(C.ZBUFF_decompressInit(ctx)))
+ } else {
+ err = getError(int(C.ZBUFF_decompressInitDictionary(
+ ctx,
+ unsafe.Pointer(&dict[0]),
+ C.size_t(len(dict)))))
+ }
+ cSize := int(C.ZBUFF_recommendedDInSize())
+ dSize := int(C.ZBUFF_recommendedDOutSize())
+ if cSize <= 0 {
+ panic(fmt.Errorf("ZBUFF_recommendedDInSize() returned invalid size: %v", cSize))
+ }
+ if dSize <= 0 {
+ panic(fmt.Errorf("ZBUFF_recommendedDOutSize() returned invalid size: %v", dSize))
+ }
+
+ compressionBuffer := make([]byte, cSize)
+ decompressionBuffer := make([]byte, dSize)
+ return &reader{
+ ctx: ctx,
+ dict: dict,
+ compressionBuffer: compressionBuffer,
+ decompressionBuffer: decompressionBuffer,
+ firstError: err,
+ recommendedSrcSize: cSize,
+ underlyingReader: r,
+ }
+}
+
+// Close frees the allocated C objects
+func (r *reader) Close() error {
+ return getError(int(C.ZBUFF_freeDCtx(r.ctx)))
+}
+
+func (r *reader) Read(p []byte) (int, error) {
+
+ // If we already have enough bytes, return
+ if r.decompSize-r.decompOff >= len(p) {
+ copy(p, r.decompressionBuffer[r.decompOff:])
+ r.decompOff += len(p)
+ return len(p), nil
+ }
+
+ copy(p, r.decompressionBuffer[r.decompOff:r.decompSize])
+ got := r.decompSize - r.decompOff
+ r.decompSize = 0
+ r.decompOff = 0
+
+ for got < len(p) {
+ // Populate src
+ src := r.compressionBuffer
+ reader := r.underlyingReader
+ n, err := TryReadFull(reader, src[r.compressionLeft:])
+ if err != nil && err != errShortRead { // Handle underlying reader errors first
+ return 0, fmt.Errorf("failed to read from underlying reader: %s", err)
+ } else if n == 0 && r.compressionLeft == 0 {
+ return got, io.EOF
+ }
+ src = src[:r.compressionLeft+n]
+
+ // C code
+ cSrcSize := C.size_t(len(src))
+ cDstSize := C.size_t(len(r.decompressionBuffer))
+ retCode := int(C.ZBUFF_decompressContinue(
+ r.ctx,
+ unsafe.Pointer(&r.decompressionBuffer[0]),
+ &cDstSize,
+ unsafe.Pointer(&src[0]),
+ &cSrcSize))
+
+ // Keep src here eventhough, we reuse later, the code might be deleted at some point
+ runtime.KeepAlive(src)
+ if err = getError(retCode); err != nil {
+ return 0, fmt.Errorf("failed to decompress: %s", err)
+ }
+
+ // Put everything in buffer
+ if int(cSrcSize) < len(src) {
+ left := src[int(cSrcSize):]
+ copy(r.compressionBuffer, left)
+ }
+ r.compressionLeft = len(src) - int(cSrcSize)
+ r.decompSize = int(cDstSize)
+ r.decompOff = copy(p[got:], r.decompressionBuffer[:r.decompSize])
+ got += r.decompOff
+
+ // Resize buffers
+ nsize := retCode // Hint for next src buffer size
+ if nsize <= 0 {
+ // Reset to recommended size
+ nsize = r.recommendedSrcSize
+ }
+ if nsize < r.compressionLeft {
+ nsize = r.compressionLeft
+ }
+ r.compressionBuffer = resize(r.compressionBuffer, nsize)
+ }
+ return got, nil
+}
+
+// TryReadFull reads buffer just as ReadFull does
+// Here we expect that buffer may end and we do not return ErrUnexpectedEOF as ReadAtLeast does.
+// We return errShortRead instead to distinguish short reads and failures.
+// We cannot use ReadFull/ReadAtLeast because it masks Reader errors, such as network failures
+// and causes panic instead of error.
+func TryReadFull(r io.Reader, buf []byte) (n int, err error) {
+ for n < len(buf) && err == nil {
+ var nn int
+ nn, err = r.Read(buf[n:])
+ n += nn
+ }
+ if n == len(buf) && err == io.EOF {
+ err = nil // EOF at the end is somewhat expected
+ } else if err == io.EOF {
+ err = errShortRead
+ }
+ return
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_v01.c b/vendor/github.com/DataDog/zstd/zstd_v01.c
new file mode 100644
index 000000000..cad2b99b4
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v01.c
@@ -0,0 +1,2158 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/******************************************
+* Includes
+******************************************/
+#include <stddef.h> /* size_t, ptrdiff_t */
+#include "zstd_v01.h"
+#include "error_private.h"
+
+
+/******************************************
+* Static allocation
+******************************************/
+/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */
+#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
+
+/* You can statically allocate Huff0 DTable as a table of unsigned short using below macro */
+#define HUF_DTABLE_SIZE_U16(maxTableLog) (1 + (1<<maxTableLog))
+#define HUF_CREATE_STATIC_DTABLE(DTable, maxTableLog) \
+ unsigned short DTable[HUF_DTABLE_SIZE_U16(maxTableLog)] = { maxTableLog }
+
+
+/******************************************
+* Error Management
+******************************************/
+#define FSE_LIST_ERRORS(ITEM) \
+ ITEM(FSE_OK_NoError) ITEM(FSE_ERROR_GENERIC) \
+ ITEM(FSE_ERROR_tableLog_tooLarge) ITEM(FSE_ERROR_maxSymbolValue_tooLarge) ITEM(FSE_ERROR_maxSymbolValue_tooSmall) \
+ ITEM(FSE_ERROR_dstSize_tooSmall) ITEM(FSE_ERROR_srcSize_wrong)\
+ ITEM(FSE_ERROR_corruptionDetected) \
+ ITEM(FSE_ERROR_maxCode)
+
+#define FSE_GENERATE_ENUM(ENUM) ENUM,
+typedef enum { FSE_LIST_ERRORS(FSE_GENERATE_ENUM) } FSE_errorCodes; /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */
+
+
+/******************************************
+* FSE symbol compression API
+******************************************/
+/*
+ This API consists of small unitary functions, which highly benefit from being inlined.
+ You will want to enable link-time-optimization to ensure these functions are properly inlined in your binary.
+ Visual seems to do it automatically.
+ For gcc or clang, you'll need to add -flto flag at compilation and linking stages.
+ If none of these solutions is applicable, include "fse.c" directly.
+*/
+
+typedef unsigned FSE_CTable; /* don't allocate that. It's just a way to be more restrictive than void* */
+typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
+
+typedef struct
+{
+ size_t bitContainer;
+ int bitPos;
+ char* startPtr;
+ char* ptr;
+ char* endPtr;
+} FSE_CStream_t;
+
+typedef struct
+{
+ ptrdiff_t value;
+ const void* stateTable;
+ const void* symbolTT;
+ unsigned stateLog;
+} FSE_CState_t;
+
+typedef struct
+{
+ size_t bitContainer;
+ unsigned bitsConsumed;
+ const char* ptr;
+ const char* start;
+} FSE_DStream_t;
+
+typedef struct
+{
+ size_t state;
+ const void* table; /* precise table may vary, depending on U16 */
+} FSE_DState_t;
+
+typedef enum { FSE_DStream_unfinished = 0,
+ FSE_DStream_endOfBuffer = 1,
+ FSE_DStream_completed = 2,
+ FSE_DStream_tooFar = 3 } FSE_DStream_status; /* result of FSE_reloadDStream() */
+ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... ?! */
+
+
+/****************************************************************
+* Tuning parameters
+****************************************************************/
+/* MEMORY_USAGE :
+* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+* Increasing memory usage improves compression ratio
+* Reduced memory usage can improve speed, due to cache effect
+* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#define FSE_MAX_MEMORY_USAGE 14
+#define FSE_DEFAULT_MEMORY_USAGE 13
+
+/* FSE_MAX_SYMBOL_VALUE :
+* Maximum symbol value authorized.
+* Required for proper stack allocation */
+#define FSE_MAX_SYMBOL_VALUE 255
+
+
+/****************************************************************
+* template functions type & suffix
+****************************************************************/
+#define FSE_FUNCTION_TYPE BYTE
+#define FSE_FUNCTION_EXTENSION
+
+
+/****************************************************************
+* Byte symbol type
+****************************************************************/
+typedef struct
+{
+ unsigned short newState;
+ unsigned char symbol;
+ unsigned char nbBits;
+} FSE_decode_t; /* size == U32 */
+
+
+
+/****************************************************************
+* Compiler specifics
+****************************************************************/
+#ifdef _MSC_VER /* Visual Studio */
+# define FORCE_INLINE static __forceinline
+# include <intrin.h> /* For Visual 2005 */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
+#else
+# define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# ifdef __GNUC__
+# define FORCE_INLINE static inline __attribute__((always_inline))
+# else
+# define FORCE_INLINE static inline
+# endif
+# else
+# define FORCE_INLINE static
+# endif /* __STDC_VERSION__ */
+#endif
+
+
+/****************************************************************
+* Includes
+****************************************************************/
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memcpy, memset */
+#include <stdio.h> /* printf (debug) */
+
+
+#ifndef MEM_ACCESS_MODULE
+#define MEM_ACCESS_MODULE
+/****************************************************************
+* Basic Types
+*****************************************************************/
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# include <stdint.h>
+typedef uint8_t BYTE;
+typedef uint16_t U16;
+typedef int16_t S16;
+typedef uint32_t U32;
+typedef int32_t S32;
+typedef uint64_t U64;
+typedef int64_t S64;
+#else
+typedef unsigned char BYTE;
+typedef unsigned short U16;
+typedef signed short S16;
+typedef unsigned int U32;
+typedef signed int S32;
+typedef unsigned long long U64;
+typedef signed long long S64;
+#endif
+
+#endif /* MEM_ACCESS_MODULE */
+
+/****************************************************************
+* Memory I/O
+*****************************************************************/
+/* FSE_FORCE_MEMORY_ACCESS
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ * It can generate buggy code on targets generating assembly depending on alignment.
+ * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef FSE_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
+# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+# define FSE_FORCE_MEMORY_ACCESS 2
+# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
+ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+# define FSE_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+
+static unsigned FSE_32bits(void)
+{
+ return sizeof(void*)==4;
+}
+
+static unsigned FSE_isLittleEndian(void)
+{
+ const union { U32 i; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+
+#if defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==2)
+
+static U16 FSE_read16(const void* memPtr) { return *(const U16*) memPtr; }
+static U32 FSE_read32(const void* memPtr) { return *(const U32*) memPtr; }
+static U64 FSE_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+#elif defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;
+
+static U16 FSE_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
+static U32 FSE_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+static U64 FSE_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
+
+#else
+
+static U16 FSE_read16(const void* memPtr)
+{
+ U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+static U32 FSE_read32(const void* memPtr)
+{
+ U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+static U64 FSE_read64(const void* memPtr)
+{
+ U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+#endif // FSE_FORCE_MEMORY_ACCESS
+
+static U16 FSE_readLE16(const void* memPtr)
+{
+ if (FSE_isLittleEndian())
+ return FSE_read16(memPtr);
+ else
+ {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U16)(p[0] + (p[1]<<8));
+ }
+}
+
+static U32 FSE_readLE32(const void* memPtr)
+{
+ if (FSE_isLittleEndian())
+ return FSE_read32(memPtr);
+ else
+ {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
+ }
+}
+
+
+static U64 FSE_readLE64(const void* memPtr)
+{
+ if (FSE_isLittleEndian())
+ return FSE_read64(memPtr);
+ else
+ {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
+ + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
+ }
+}
+
+static size_t FSE_readLEST(const void* memPtr)
+{
+ if (FSE_32bits())
+ return (size_t)FSE_readLE32(memPtr);
+ else
+ return (size_t)FSE_readLE64(memPtr);
+}
+
+
+
+/****************************************************************
+* Constants
+*****************************************************************/
+#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
+#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
+#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
+#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
+#define FSE_MIN_TABLELOG 5
+
+#define FSE_TABLELOG_ABSOLUTE_MAX 15
+#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
+#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+
+/****************************************************************
+* Error Management
+****************************************************************/
+#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+
+
+/****************************************************************
+* Complex types
+****************************************************************/
+typedef struct
+{
+ int deltaFindState;
+ U32 deltaNbBits;
+} FSE_symbolCompressionTransform; /* total 8 bytes */
+
+typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
+
+/****************************************************************
+* Internal functions
+****************************************************************/
+FORCE_INLINE unsigned FSE_highbit32 (U32 val)
+{
+# if defined(_MSC_VER) /* Visual */
+ unsigned long r;
+ _BitScanReverse ( &r, val );
+ return (unsigned) r;
+# elif defined(__GNUC__) && (GCC_VERSION >= 304) /* GCC Intrinsic */
+ return 31 - __builtin_clz (val);
+# else /* Software version */
+ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ unsigned r;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+ return r;
+# endif
+}
+
+
+/****************************************************************
+* Templates
+****************************************************************/
+/*
+ designed to be included
+ for type-specific functions (template emulation in C)
+ Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+# error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+# error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+
+
+static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
+
+#define FSE_DECODE_TYPE FSE_decode_t
+
+
+typedef struct {
+ U16 tableLog;
+ U16 fastMode;
+} FSE_DTableHeader; /* sizeof U32 */
+
+static size_t FSE_buildDTable
+(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+ void* ptr = dt;
+ FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+ FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)(ptr) + 1; /* because dt is unsigned, 32-bits aligned on 32-bits */
+ const U32 tableSize = 1 << tableLog;
+ const U32 tableMask = tableSize-1;
+ const U32 step = FSE_tableStep(tableSize);
+ U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
+ U32 position = 0;
+ U32 highThreshold = tableSize-1;
+ const S16 largeLimit= (S16)(1 << (tableLog-1));
+ U32 noLarge = 1;
+ U32 s;
+
+ /* Sanity Checks */
+ if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return (size_t)-FSE_ERROR_maxSymbolValue_tooLarge;
+ if (tableLog > FSE_MAX_TABLELOG) return (size_t)-FSE_ERROR_tableLog_tooLarge;
+
+ /* Init, lay down lowprob symbols */
+ DTableH[0].tableLog = (U16)tableLog;
+ for (s=0; s<=maxSymbolValue; s++)
+ {
+ if (normalizedCounter[s]==-1)
+ {
+ tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
+ symbolNext[s] = 1;
+ }
+ else
+ {
+ if (normalizedCounter[s] >= largeLimit) noLarge=0;
+ symbolNext[s] = normalizedCounter[s];
+ }
+ }
+
+ /* Spread symbols */
+ for (s=0; s<=maxSymbolValue; s++)
+ {
+ int i;
+ for (i=0; i<normalizedCounter[s]; i++)
+ {
+ tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
+ position = (position + step) & tableMask;
+ while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
+ }
+ }
+
+ if (position!=0) return (size_t)-FSE_ERROR_GENERIC; /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+
+ /* Build Decoding table */
+ {
+ U32 i;
+ for (i=0; i<tableSize; i++)
+ {
+ FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);
+ U16 nextState = symbolNext[symbol]++;
+ tableDecode[i].nbBits = (BYTE) (tableLog - FSE_highbit32 ((U32)nextState) );
+ tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
+ }
+ }
+
+ DTableH->fastMode = (U16)noLarge;
+ return 0;
+}
+
+
+/******************************************
+* FSE byte symbol
+******************************************/
+#ifndef FSE_COMMONDEFS_ONLY
+
+static unsigned FSE_isError(size_t code) { return (code > (size_t)(-FSE_ERROR_maxCode)); }
+
+static short FSE_abs(short a)
+{
+ return a<0? -a : a;
+}
+
+
+/****************************************************************
+* Header bitstream management
+****************************************************************/
+static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ const BYTE* const istart = (const BYTE*) headerBuffer;
+ const BYTE* const iend = istart + hbSize;
+ const BYTE* ip = istart;
+ int nbBits;
+ int remaining;
+ int threshold;
+ U32 bitStream;
+ int bitCount;
+ unsigned charnum = 0;
+ int previous0 = 0;
+
+ if (hbSize < 4) return (size_t)-FSE_ERROR_srcSize_wrong;
+ bitStream = FSE_readLE32(ip);
+ nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
+ if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return (size_t)-FSE_ERROR_tableLog_tooLarge;
+ bitStream >>= 4;
+ bitCount = 4;
+ *tableLogPtr = nbBits;
+ remaining = (1<<nbBits)+1;
+ threshold = 1<<nbBits;
+ nbBits++;
+
+ while ((remaining>1) && (charnum<=*maxSVPtr))
+ {
+ if (previous0)
+ {
+ unsigned n0 = charnum;
+ while ((bitStream & 0xFFFF) == 0xFFFF)
+ {
+ n0+=24;
+ if (ip < iend-5)
+ {
+ ip+=2;
+ bitStream = FSE_readLE32(ip) >> bitCount;
+ }
+ else
+ {
+ bitStream >>= 16;
+ bitCount+=16;
+ }
+ }
+ while ((bitStream & 3) == 3)
+ {
+ n0+=3;
+ bitStream>>=2;
+ bitCount+=2;
+ }
+ n0 += bitStream & 3;
+ bitCount += 2;
+ if (n0 > *maxSVPtr) return (size_t)-FSE_ERROR_maxSymbolValue_tooSmall;
+ while (charnum < n0) normalizedCounter[charnum++] = 0;
+ if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
+ {
+ ip += bitCount>>3;
+ bitCount &= 7;
+ bitStream = FSE_readLE32(ip) >> bitCount;
+ }
+ else
+ bitStream >>= 2;
+ }
+ {
+ const short max = (short)((2*threshold-1)-remaining);
+ short count;
+
+ if ((bitStream & (threshold-1)) < (U32)max)
+ {
+ count = (short)(bitStream & (threshold-1));
+ bitCount += nbBits-1;
+ }
+ else
+ {
+ count = (short)(bitStream & (2*threshold-1));
+ if (count >= threshold) count -= max;
+ bitCount += nbBits;
+ }
+
+ count--; /* extra accuracy */
+ remaining -= FSE_abs(count);
+ normalizedCounter[charnum++] = count;
+ previous0 = !count;
+ while (remaining < threshold)
+ {
+ nbBits--;
+ threshold >>= 1;
+ }
+
+ {
+ if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
+ {
+ ip += bitCount>>3;
+ bitCount &= 7;
+ }
+ else
+ {
+ bitCount -= (int)(8 * (iend - 4 - ip));
+ ip = iend - 4;
+ }
+ bitStream = FSE_readLE32(ip) >> (bitCount & 31);
+ }
+ }
+ }
+ if (remaining != 1) return (size_t)-FSE_ERROR_GENERIC;
+ *maxSVPtr = charnum-1;
+
+ ip += (bitCount+7)>>3;
+ if ((size_t)(ip-istart) > hbSize) return (size_t)-FSE_ERROR_srcSize_wrong;
+ return ip-istart;
+}
+
+
+/*********************************************************
+* Decompression (Byte symbols)
+*********************************************************/
+static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
+{
+ void* ptr = dt;
+ FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+ FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */
+
+ DTableH->tableLog = 0;
+ DTableH->fastMode = 0;
+
+ cell->newState = 0;
+ cell->symbol = symbolValue;
+ cell->nbBits = 0;
+
+ return 0;
+}
+
+
+static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
+{
+ void* ptr = dt;
+ FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+ FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */
+ const unsigned tableSize = 1 << nbBits;
+ const unsigned tableMask = tableSize - 1;
+ const unsigned maxSymbolValue = tableMask;
+ unsigned s;
+
+ /* Sanity checks */
+ if (nbBits < 1) return (size_t)-FSE_ERROR_GENERIC; /* min size */
+
+ /* Build Decoding Table */
+ DTableH->tableLog = (U16)nbBits;
+ DTableH->fastMode = 1;
+ for (s=0; s<=maxSymbolValue; s++)
+ {
+ dinfo[s].newState = 0;
+ dinfo[s].symbol = (BYTE)s;
+ dinfo[s].nbBits = (BYTE)nbBits;
+ }
+
+ return 0;
+}
+
+
+/* FSE_initDStream
+ * Initialize a FSE_DStream_t.
+ * srcBuffer must point at the beginning of an FSE block.
+ * The function result is the size of the FSE_block (== srcSize).
+ * If srcSize is too small, the function will return an errorCode;
+ */
+static size_t FSE_initDStream(FSE_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+ if (srcSize < 1) return (size_t)-FSE_ERROR_srcSize_wrong;
+
+ if (srcSize >= sizeof(size_t))
+ {
+ U32 contain32;
+ bitD->start = (const char*)srcBuffer;
+ bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t);
+ bitD->bitContainer = FSE_readLEST(bitD->ptr);
+ contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
+ if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC; /* stop bit not present */
+ bitD->bitsConsumed = 8 - FSE_highbit32(contain32);
+ }
+ else
+ {
+ U32 contain32;
+ bitD->start = (const char*)srcBuffer;
+ bitD->ptr = bitD->start;
+ bitD->bitContainer = *(const BYTE*)(bitD->start);
+ switch(srcSize)
+ {
+ case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);
+ /* fallthrough */
+ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);
+ /* fallthrough */
+ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);
+ /* fallthrough */
+ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;
+ /* fallthrough */
+ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;
+ /* fallthrough */
+ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8;
+ /* fallthrough */
+ default:;
+ }
+ contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
+ if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC; /* stop bit not present */
+ bitD->bitsConsumed = 8 - FSE_highbit32(contain32);
+ bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
+ }
+
+ return srcSize;
+}
+
+
+/*!FSE_lookBits
+ * Provides next n bits from the bitContainer.
+ * bitContainer is not modified (bits are still present for next read/look)
+ * On 32-bits, maxNbBits==25
+ * On 64-bits, maxNbBits==57
+ * return : value extracted.
+ */
+static size_t FSE_lookBits(FSE_DStream_t* bitD, U32 nbBits)
+{
+ const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
+ return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
+}
+
+static size_t FSE_lookBitsFast(FSE_DStream_t* bitD, U32 nbBits) /* only if nbBits >= 1 !! */
+{
+ const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
+ return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
+}
+
+static void FSE_skipBits(FSE_DStream_t* bitD, U32 nbBits)
+{
+ bitD->bitsConsumed += nbBits;
+}
+
+
+/*!FSE_readBits
+ * Read next n bits from the bitContainer.
+ * On 32-bits, don't read more than maxNbBits==25
+ * On 64-bits, don't read more than maxNbBits==57
+ * Use the fast variant *only* if n >= 1.
+ * return : value extracted.
+ */
+static size_t FSE_readBits(FSE_DStream_t* bitD, U32 nbBits)
+{
+ size_t value = FSE_lookBits(bitD, nbBits);
+ FSE_skipBits(bitD, nbBits);
+ return value;
+}
+
+static size_t FSE_readBitsFast(FSE_DStream_t* bitD, U32 nbBits) /* only if nbBits >= 1 !! */
+{
+ size_t value = FSE_lookBitsFast(bitD, nbBits);
+ FSE_skipBits(bitD, nbBits);
+ return value;
+}
+
+static unsigned FSE_reloadDStream(FSE_DStream_t* bitD)
+{
+ if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
+ return FSE_DStream_tooFar;
+
+ if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
+ {
+ bitD->ptr -= bitD->bitsConsumed >> 3;
+ bitD->bitsConsumed &= 7;
+ bitD->bitContainer = FSE_readLEST(bitD->ptr);
+ return FSE_DStream_unfinished;
+ }
+ if (bitD->ptr == bitD->start)
+ {
+ if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return FSE_DStream_endOfBuffer;
+ return FSE_DStream_completed;
+ }
+ {
+ U32 nbBytes = bitD->bitsConsumed >> 3;
+ U32 result = FSE_DStream_unfinished;
+ if (bitD->ptr - nbBytes < bitD->start)
+ {
+ nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
+ result = FSE_DStream_endOfBuffer;
+ }
+ bitD->ptr -= nbBytes;
+ bitD->bitsConsumed -= nbBytes*8;
+ bitD->bitContainer = FSE_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
+ return result;
+ }
+}
+
+
+static void FSE_initDState(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD, const FSE_DTable* dt)
+{
+ const void* ptr = dt;
+ const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
+ DStatePtr->state = FSE_readBits(bitD, DTableH->tableLog);
+ FSE_reloadDStream(bitD);
+ DStatePtr->table = dt + 1;
+}
+
+static BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD)
+{
+ const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ const U32 nbBits = DInfo.nbBits;
+ BYTE symbol = DInfo.symbol;
+ size_t lowBits = FSE_readBits(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+static BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD)
+{
+ const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ const U32 nbBits = DInfo.nbBits;
+ BYTE symbol = DInfo.symbol;
+ size_t lowBits = FSE_readBitsFast(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+/* FSE_endOfDStream
+ Tells if bitD has reached end of bitStream or not */
+
+static unsigned FSE_endOfDStream(const FSE_DStream_t* bitD)
+{
+ return ((bitD->ptr == bitD->start) && (bitD->bitsConsumed == sizeof(bitD->bitContainer)*8));
+}
+
+static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
+{
+ return DStatePtr->state == 0;
+}
+
+
+FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
+ void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSE_DTable* dt, const unsigned fast)
+{
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* op = ostart;
+ BYTE* const omax = op + maxDstSize;
+ BYTE* const olimit = omax-3;
+
+ FSE_DStream_t bitD;
+ FSE_DState_t state1;
+ FSE_DState_t state2;
+ size_t errorCode;
+
+ /* Init */
+ errorCode = FSE_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */
+ if (FSE_isError(errorCode)) return errorCode;
+
+ FSE_initDState(&state1, &bitD, dt);
+ FSE_initDState(&state2, &bitD, dt);
+
+#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
+
+ /* 4 symbols per loop */
+ for ( ; (FSE_reloadDStream(&bitD)==FSE_DStream_unfinished) && (op<olimit) ; op+=4)
+ {
+ op[0] = FSE_GETSYMBOL(&state1);
+
+ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ FSE_reloadDStream(&bitD);
+
+ op[1] = FSE_GETSYMBOL(&state2);
+
+ if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ { if (FSE_reloadDStream(&bitD) > FSE_DStream_unfinished) { op+=2; break; } }
+
+ op[2] = FSE_GETSYMBOL(&state1);
+
+ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ FSE_reloadDStream(&bitD);
+
+ op[3] = FSE_GETSYMBOL(&state2);
+ }
+
+ /* tail */
+ /* note : FSE_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly FSE_DStream_completed */
+ while (1)
+ {
+ if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )
+ break;
+
+ *op++ = FSE_GETSYMBOL(&state1);
+
+ if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )
+ break;
+
+ *op++ = FSE_GETSYMBOL(&state2);
+ }
+
+ /* end ? */
+ if (FSE_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))
+ return op-ostart;
+
+ if (op==omax) return (size_t)-FSE_ERROR_dstSize_tooSmall; /* dst buffer is full, but cSrc unfinished */
+
+ return (size_t)-FSE_ERROR_corruptionDetected;
+}
+
+
+static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSE_DTable* dt)
+{
+ FSE_DTableHeader DTableH;
+ memcpy(&DTableH, dt, sizeof(DTableH)); /* memcpy() into local variable, to avoid strict aliasing warning */
+
+ /* select fast mode (static) */
+ if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+ return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
+{
+ const BYTE* const istart = (const BYTE*)cSrc;
+ const BYTE* ip = istart;
+ short counting[FSE_MAX_SYMBOL_VALUE+1];
+ DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
+ unsigned tableLog;
+ unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+ size_t errorCode;
+
+ if (cSrcSize<2) return (size_t)-FSE_ERROR_srcSize_wrong; /* too small input size */
+
+ /* normal FSE decoding mode */
+ errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
+ if (FSE_isError(errorCode)) return errorCode;
+ if (errorCode >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong; /* too small input size */
+ ip += errorCode;
+ cSrcSize -= errorCode;
+
+ errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);
+ if (FSE_isError(errorCode)) return errorCode;
+
+ /* always return, even if it is an error code */
+ return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
+}
+
+
+
+/* *******************************************************
+* Huff0 : Huffman block compression
+*********************************************************/
+#define HUF_MAX_SYMBOL_VALUE 255
+#define HUF_DEFAULT_TABLELOG 12 /* used by default, when not specified */
+#define HUF_MAX_TABLELOG 12 /* max possible tableLog; for allocation purpose; can be modified */
+#define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
+#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)
+# error "HUF_MAX_TABLELOG is too large !"
+#endif
+
+typedef struct HUF_CElt_s {
+ U16 val;
+ BYTE nbBits;
+} HUF_CElt ;
+
+typedef struct nodeElt_s {
+ U32 count;
+ U16 parent;
+ BYTE byte;
+ BYTE nbBits;
+} nodeElt;
+
+
+/* *******************************************************
+* Huff0 : Huffman block decompression
+*********************************************************/
+typedef struct {
+ BYTE byte;
+ BYTE nbBits;
+} HUF_DElt;
+
+static size_t HUF_readDTable (U16* DTable, const void* src, size_t srcSize)
+{
+ BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
+ U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */
+ U32 weightTotal;
+ U32 maxBits;
+ const BYTE* ip = (const BYTE*) src;
+ size_t iSize;
+ size_t oSize;
+ U32 n;
+ U32 nextRankStart;
+ void* ptr = DTable+1;
+ HUF_DElt* const dt = (HUF_DElt*)ptr;
+
+ if (!srcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
+ iSize = ip[0];
+
+ FSE_STATIC_ASSERT(sizeof(HUF_DElt) == sizeof(U16)); /* if compilation fails here, assertion is false */
+ //memset(huffWeight, 0, sizeof(huffWeight)); /* should not be necessary, but some analyzer complain ... */
+ if (iSize >= 128) /* special header */
+ {
+ if (iSize >= (242)) /* RLE */
+ {
+ static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
+ oSize = l[iSize-242];
+ memset(huffWeight, 1, sizeof(huffWeight));
+ iSize = 0;
+ }
+ else /* Incompressible */
+ {
+ oSize = iSize - 127;
+ iSize = ((oSize+1)/2);
+ if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
+ ip += 1;
+ for (n=0; n<oSize; n+=2)
+ {
+ huffWeight[n] = ip[n/2] >> 4;
+ huffWeight[n+1] = ip[n/2] & 15;
+ }
+ }
+ }
+ else /* header compressed with FSE (normal case) */
+ {
+ if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
+ oSize = FSE_decompress(huffWeight, HUF_MAX_SYMBOL_VALUE, ip+1, iSize); /* max 255 values decoded, last one is implied */
+ if (FSE_isError(oSize)) return oSize;
+ }
+
+ /* collect weight stats */
+ memset(rankVal, 0, sizeof(rankVal));
+ weightTotal = 0;
+ for (n=0; n<oSize; n++)
+ {
+ if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return (size_t)-FSE_ERROR_corruptionDetected;
+ rankVal[huffWeight[n]]++;
+ weightTotal += (1 << huffWeight[n]) >> 1;
+ }
+ if (weightTotal == 0) return (size_t)-FSE_ERROR_corruptionDetected;
+
+ /* get last non-null symbol weight (implied, total must be 2^n) */
+ maxBits = FSE_highbit32(weightTotal) + 1;
+ if (maxBits > DTable[0]) return (size_t)-FSE_ERROR_tableLog_tooLarge; /* DTable is too small */
+ DTable[0] = (U16)maxBits;
+ {
+ U32 total = 1 << maxBits;
+ U32 rest = total - weightTotal;
+ U32 verif = 1 << FSE_highbit32(rest);
+ U32 lastWeight = FSE_highbit32(rest) + 1;
+ if (verif != rest) return (size_t)-FSE_ERROR_corruptionDetected; /* last value must be a clean power of 2 */
+ huffWeight[oSize] = (BYTE)lastWeight;
+ rankVal[lastWeight]++;
+ }
+
+ /* check tree construction validity */
+ if ((rankVal[1] < 2) || (rankVal[1] & 1)) return (size_t)-FSE_ERROR_corruptionDetected; /* by construction : at least 2 elts of rank 1, must be even */
+
+ /* Prepare ranks */
+ nextRankStart = 0;
+ for (n=1; n<=maxBits; n++)
+ {
+ U32 current = nextRankStart;
+ nextRankStart += (rankVal[n] << (n-1));
+ rankVal[n] = current;
+ }
+
+ /* fill DTable */
+ for (n=0; n<=oSize; n++)
+ {
+ const U32 w = huffWeight[n];
+ const U32 length = (1 << w) >> 1;
+ U32 i;
+ HUF_DElt D;
+ D.byte = (BYTE)n; D.nbBits = (BYTE)(maxBits + 1 - w);
+ for (i = rankVal[w]; i < rankVal[w] + length; i++)
+ dt[i] = D;
+ rankVal[w] += length;
+ }
+
+ return iSize+1;
+}
+
+
+static BYTE HUF_decodeSymbol(FSE_DStream_t* Dstream, const HUF_DElt* dt, const U32 dtLog)
+{
+ const size_t val = FSE_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+ const BYTE c = dt[val].byte;
+ FSE_skipBits(Dstream, dt[val].nbBits);
+ return c;
+}
+
+static size_t HUF_decompress_usingDTable( /* -3% slower when non static */
+ void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const U16* DTable)
+{
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* op = ostart;
+ BYTE* const omax = op + maxDstSize;
+ BYTE* const olimit = omax-15;
+
+ const void* ptr = DTable;
+ const HUF_DElt* const dt = (const HUF_DElt*)(ptr)+1;
+ const U32 dtLog = DTable[0];
+ size_t errorCode;
+ U32 reloadStatus;
+
+ /* Init */
+
+ const U16* jumpTable = (const U16*)cSrc;
+ const size_t length1 = FSE_readLE16(jumpTable);
+ const size_t length2 = FSE_readLE16(jumpTable+1);
+ const size_t length3 = FSE_readLE16(jumpTable+2);
+ const size_t length4 = cSrcSize - 6 - length1 - length2 - length3; // check coherency !!
+ const char* const start1 = (const char*)(cSrc) + 6;
+ const char* const start2 = start1 + length1;
+ const char* const start3 = start2 + length2;
+ const char* const start4 = start3 + length3;
+ FSE_DStream_t bitD1, bitD2, bitD3, bitD4;
+
+ if (length1+length2+length3+6 >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
+
+ errorCode = FSE_initDStream(&bitD1, start1, length1);
+ if (FSE_isError(errorCode)) return errorCode;
+ errorCode = FSE_initDStream(&bitD2, start2, length2);
+ if (FSE_isError(errorCode)) return errorCode;
+ errorCode = FSE_initDStream(&bitD3, start3, length3);
+ if (FSE_isError(errorCode)) return errorCode;
+ errorCode = FSE_initDStream(&bitD4, start4, length4);
+ if (FSE_isError(errorCode)) return errorCode;
+
+ reloadStatus=FSE_reloadDStream(&bitD2);
+
+ /* 16 symbols per loop */
+ for ( ; (reloadStatus<FSE_DStream_completed) && (op<olimit); /* D2-3-4 are supposed to be synchronized and finish together */
+ op+=16, reloadStatus = FSE_reloadDStream(&bitD2) | FSE_reloadDStream(&bitD3) | FSE_reloadDStream(&bitD4), FSE_reloadDStream(&bitD1))
+ {
+#define HUF_DECODE_SYMBOL_0(n, Dstream) \
+ op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog);
+
+#define HUF_DECODE_SYMBOL_1(n, Dstream) \
+ op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \
+ if (FSE_32bits() && (HUF_MAX_TABLELOG>12)) FSE_reloadDStream(&Dstream)
+
+#define HUF_DECODE_SYMBOL_2(n, Dstream) \
+ op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \
+ if (FSE_32bits()) FSE_reloadDStream(&Dstream)
+
+ HUF_DECODE_SYMBOL_1( 0, bitD1);
+ HUF_DECODE_SYMBOL_1( 1, bitD2);
+ HUF_DECODE_SYMBOL_1( 2, bitD3);
+ HUF_DECODE_SYMBOL_1( 3, bitD4);
+ HUF_DECODE_SYMBOL_2( 4, bitD1);
+ HUF_DECODE_SYMBOL_2( 5, bitD2);
+ HUF_DECODE_SYMBOL_2( 6, bitD3);
+ HUF_DECODE_SYMBOL_2( 7, bitD4);
+ HUF_DECODE_SYMBOL_1( 8, bitD1);
+ HUF_DECODE_SYMBOL_1( 9, bitD2);
+ HUF_DECODE_SYMBOL_1(10, bitD3);
+ HUF_DECODE_SYMBOL_1(11, bitD4);
+ HUF_DECODE_SYMBOL_0(12, bitD1);
+ HUF_DECODE_SYMBOL_0(13, bitD2);
+ HUF_DECODE_SYMBOL_0(14, bitD3);
+ HUF_DECODE_SYMBOL_0(15, bitD4);
+ }
+
+ if (reloadStatus!=FSE_DStream_completed) /* not complete : some bitStream might be FSE_DStream_unfinished */
+ return (size_t)-FSE_ERROR_corruptionDetected;
+
+ /* tail */
+ {
+ // bitTail = bitD1; // *much* slower : -20% !??!
+ FSE_DStream_t bitTail;
+ bitTail.ptr = bitD1.ptr;
+ bitTail.bitsConsumed = bitD1.bitsConsumed;
+ bitTail.bitContainer = bitD1.bitContainer; // required in case of FSE_DStream_endOfBuffer
+ bitTail.start = start1;
+ for ( ; (FSE_reloadDStream(&bitTail) < FSE_DStream_completed) && (op<omax) ; op++)
+ {
+ HUF_DECODE_SYMBOL_0(0, bitTail);
+ }
+
+ if (FSE_endOfDStream(&bitTail))
+ return op-ostart;
+ }
+
+ if (op==omax) return (size_t)-FSE_ERROR_dstSize_tooSmall; /* dst buffer is full, but cSrc unfinished */
+
+ return (size_t)-FSE_ERROR_corruptionDetected;
+}
+
+
+static size_t HUF_decompress (void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUF_CREATE_STATIC_DTABLE(DTable, HUF_MAX_TABLELOG);
+ const BYTE* ip = (const BYTE*) cSrc;
+ size_t errorCode;
+
+ errorCode = HUF_readDTable (DTable, cSrc, cSrcSize);
+ if (FSE_isError(errorCode)) return errorCode;
+ if (errorCode >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
+ ip += errorCode;
+ cSrcSize -= errorCode;
+
+ return HUF_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, DTable);
+}
+
+
+#endif /* FSE_COMMONDEFS_ONLY */
+
+/*
+ zstd - standard compression library
+ Copyright (C) 2014-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd source repository : https://github.com/Cyan4973/zstd
+ - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+/****************************************************************
+* Tuning parameters
+*****************************************************************/
+/* MEMORY_USAGE :
+* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+* Increasing memory usage improves compression ratio
+* Reduced memory usage can improve speed, due to cache effect */
+#define ZSTD_MEMORY_USAGE 17
+
+
+/**************************************
+ CPU Feature Detection
+**************************************/
+/*
+ * Automated efficient unaligned memory access detection
+ * Based on known hardware architectures
+ * This list will be updated thanks to feedbacks
+ */
+#if defined(CPU_HAS_EFFICIENT_UNALIGNED_MEMORY_ACCESS) \
+ || defined(__ARM_FEATURE_UNALIGNED) \
+ || defined(__i386__) || defined(__x86_64__) \
+ || defined(_M_IX86) || defined(_M_X64) \
+ || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_8__) \
+ || (defined(_M_ARM) && (_M_ARM >= 7))
+# define ZSTD_UNALIGNED_ACCESS 1
+#else
+# define ZSTD_UNALIGNED_ACCESS 0
+#endif
+
+
+/********************************************************
+* Includes
+*********************************************************/
+#include <stdlib.h> /* calloc */
+#include <string.h> /* memcpy, memmove */
+#include <stdio.h> /* debug : printf */
+
+
+/********************************************************
+* Compiler specifics
+*********************************************************/
+#ifdef __AVX2__
+# include <immintrin.h> /* AVX2 intrinsics */
+#endif
+
+#ifdef _MSC_VER /* Visual Studio */
+# include <intrin.h> /* For Visual 2005 */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4324) /* disable: C4324: padded structure */
+#endif
+
+
+#ifndef MEM_ACCESS_MODULE
+#define MEM_ACCESS_MODULE
+/********************************************************
+* Basic Types
+*********************************************************/
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# include <stdint.h>
+typedef uint8_t BYTE;
+typedef uint16_t U16;
+typedef int16_t S16;
+typedef uint32_t U32;
+typedef int32_t S32;
+typedef uint64_t U64;
+#else
+typedef unsigned char BYTE;
+typedef unsigned short U16;
+typedef signed short S16;
+typedef unsigned int U32;
+typedef signed int S32;
+typedef unsigned long long U64;
+#endif
+
+#endif /* MEM_ACCESS_MODULE */
+
+
+/********************************************************
+* Constants
+*********************************************************/
+static const U32 ZSTD_magicNumber = 0xFD2FB51E; /* 3rd version : seqNb header */
+
+#define HASH_LOG (ZSTD_MEMORY_USAGE - 2)
+#define HASH_TABLESIZE (1 << HASH_LOG)
+#define HASH_MASK (HASH_TABLESIZE - 1)
+
+#define KNUTH 2654435761
+
+#define BIT7 128
+#define BIT6 64
+#define BIT5 32
+#define BIT4 16
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BLOCKSIZE (128 KB) /* define, for static allocation */
+
+#define WORKPLACESIZE (BLOCKSIZE*3)
+#define MINMATCH 4
+#define MLbits 7
+#define LLbits 6
+#define Offbits 5
+#define MaxML ((1<<MLbits )-1)
+#define MaxLL ((1<<LLbits )-1)
+#define MaxOff ((1<<Offbits)-1)
+#define LitFSELog 11
+#define MLFSELog 10
+#define LLFSELog 10
+#define OffFSELog 9
+#define MAX(a,b) ((a)<(b)?(b):(a))
+#define MaxSeq MAX(MaxLL, MaxML)
+
+#define LITERAL_NOENTROPY 63
+#define COMMAND_NOENTROPY 7 /* to remove */
+
+#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
+
+static const size_t ZSTD_blockHeaderSize = 3;
+static const size_t ZSTD_frameHeaderSize = 4;
+
+
+/********************************************************
+* Memory operations
+*********************************************************/
+static unsigned ZSTD_32bits(void) { return sizeof(void*)==4; }
+
+static unsigned ZSTD_isLittleEndian(void)
+{
+ const union { U32 i; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+
+static U16 ZSTD_read16(const void* p) { U16 r; memcpy(&r, p, sizeof(r)); return r; }
+
+static U32 ZSTD_read32(const void* p) { U32 r; memcpy(&r, p, sizeof(r)); return r; }
+
+static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+
+static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
+
+#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
+
+static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
+{
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + length;
+ while (op < oend) COPY8(op, ip);
+}
+
+static U16 ZSTD_readLE16(const void* memPtr)
+{
+ if (ZSTD_isLittleEndian()) return ZSTD_read16(memPtr);
+ else
+ {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U16)((U16)p[0] + ((U16)p[1]<<8));
+ }
+}
+
+
+static U32 ZSTD_readLE32(const void* memPtr)
+{
+ if (ZSTD_isLittleEndian())
+ return ZSTD_read32(memPtr);
+ else
+ {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
+ }
+}
+
+static U32 ZSTD_readBE32(const void* memPtr)
+{
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U32)(((U32)p[0]<<24) + ((U32)p[1]<<16) + ((U32)p[2]<<8) + ((U32)p[3]<<0));
+}
+
+
+/**************************************
+* Local structures
+***************************************/
+typedef struct ZSTD_Cctx_s ZSTD_Cctx;
+
+typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
+
+typedef struct
+{
+ blockType_t blockType;
+ U32 origSize;
+} blockProperties_t;
+
+typedef struct {
+ void* buffer;
+ U32* offsetStart;
+ U32* offset;
+ BYTE* offCodeStart;
+ BYTE* offCode;
+ BYTE* litStart;
+ BYTE* lit;
+ BYTE* litLengthStart;
+ BYTE* litLength;
+ BYTE* matchLengthStart;
+ BYTE* matchLength;
+ BYTE* dumpsStart;
+ BYTE* dumps;
+} seqStore_t;
+
+
+typedef struct ZSTD_Cctx_s
+{
+ const BYTE* base;
+ U32 current;
+ U32 nextUpdate;
+ seqStore_t seqStore;
+#ifdef __AVX2__
+ __m256i hashTable[HASH_TABLESIZE>>3];
+#else
+ U32 hashTable[HASH_TABLESIZE];
+#endif
+ BYTE buffer[WORKPLACESIZE];
+} cctxi_t;
+
+
+
+
+/**************************************
+* Error Management
+**************************************/
+/* published entry point */
+unsigned ZSTDv01_isError(size_t code) { return ERR_isError(code); }
+
+
+/**************************************
+* Tool functions
+**************************************/
+#define ZSTD_VERSION_MAJOR 0 /* for breaking interface changes */
+#define ZSTD_VERSION_MINOR 1 /* for new (non-breaking) interface capabilities */
+#define ZSTD_VERSION_RELEASE 3 /* for tweaks, bug-fixes, or development */
+#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
+
+/**************************************************************
+* Decompression code
+**************************************************************/
+
+static size_t ZSTDv01_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+{
+ const BYTE* const in = (const BYTE* const)src;
+ BYTE headerFlags;
+ U32 cSize;
+
+ if (srcSize < 3) return ERROR(srcSize_wrong);
+
+ headerFlags = *in;
+ cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
+
+ bpPtr->blockType = (blockType_t)(headerFlags >> 6);
+ bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
+
+ if (bpPtr->blockType == bt_end) return 0;
+ if (bpPtr->blockType == bt_rle) return 1;
+ return cSize;
+}
+
+
+static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
+ memcpy(dst, src, srcSize);
+ return srcSize;
+}
+
+
+static size_t ZSTD_decompressLiterals(void* ctx,
+ void* dst, size_t maxDstSize,
+ const void* src, size_t srcSize)
+{
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + maxDstSize;
+ const BYTE* ip = (const BYTE*)src;
+ size_t errorCode;
+ size_t litSize;
+
+ /* check : minimum 2, for litSize, +1, for content */
+ if (srcSize <= 3) return ERROR(corruption_detected);
+
+ litSize = ip[1] + (ip[0]<<8);
+ litSize += ((ip[-3] >> 3) & 7) << 16; // mmmmh....
+ op = oend - litSize;
+
+ (void)ctx;
+ if (litSize > maxDstSize) return ERROR(dstSize_tooSmall);
+ errorCode = HUF_decompress(op, litSize, ip+2, srcSize-2);
+ if (FSE_isError(errorCode)) return ERROR(GENERIC);
+ return litSize;
+}
+
+
+static size_t ZSTDv01_decodeLiteralsBlock(void* ctx,
+ void* dst, size_t maxDstSize,
+ const BYTE** litStart, size_t* litSize,
+ const void* src, size_t srcSize)
+{
+ const BYTE* const istart = (const BYTE* const)src;
+ const BYTE* ip = istart;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* const oend = ostart + maxDstSize;
+ blockProperties_t litbp;
+
+ size_t litcSize = ZSTDv01_getcBlockSize(src, srcSize, &litbp);
+ if (ZSTDv01_isError(litcSize)) return litcSize;
+ if (litcSize > srcSize - ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+ ip += ZSTD_blockHeaderSize;
+
+ switch(litbp.blockType)
+ {
+ case bt_raw:
+ *litStart = ip;
+ ip += litcSize;
+ *litSize = litcSize;
+ break;
+ case bt_rle:
+ {
+ size_t rleSize = litbp.origSize;
+ if (rleSize>maxDstSize) return ERROR(dstSize_tooSmall);
+ if (!srcSize) return ERROR(srcSize_wrong);
+ memset(oend - rleSize, *ip, rleSize);
+ *litStart = oend - rleSize;
+ *litSize = rleSize;
+ ip++;
+ break;
+ }
+ case bt_compressed:
+ {
+ size_t decodedLitSize = ZSTD_decompressLiterals(ctx, dst, maxDstSize, ip, litcSize);
+ if (ZSTDv01_isError(decodedLitSize)) return decodedLitSize;
+ *litStart = oend - decodedLitSize;
+ *litSize = decodedLitSize;
+ ip += litcSize;
+ break;
+ }
+ case bt_end:
+ default:
+ return ERROR(GENERIC);
+ }
+
+ return ip-istart;
+}
+
+
+static size_t ZSTDv01_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
+ FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
+ const void* src, size_t srcSize)
+{
+ const BYTE* const istart = (const BYTE* const)src;
+ const BYTE* ip = istart;
+ const BYTE* const iend = istart + srcSize;
+ U32 LLtype, Offtype, MLtype;
+ U32 LLlog, Offlog, MLlog;
+ size_t dumpsLength;
+
+ /* check */
+ if (srcSize < 5) return ERROR(srcSize_wrong);
+
+ /* SeqHead */
+ *nbSeq = ZSTD_readLE16(ip); ip+=2;
+ LLtype = *ip >> 6;
+ Offtype = (*ip >> 4) & 3;
+ MLtype = (*ip >> 2) & 3;
+ if (*ip & 2)
+ {
+ dumpsLength = ip[2];
+ dumpsLength += ip[1] << 8;
+ ip += 3;
+ }
+ else
+ {
+ dumpsLength = ip[1];
+ dumpsLength += (ip[0] & 1) << 8;
+ ip += 2;
+ }
+ *dumpsPtr = ip;
+ ip += dumpsLength;
+ *dumpsLengthPtr = dumpsLength;
+
+ /* check */
+ if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
+
+ /* sequences */
+ {
+ S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL and MaxOff */
+ size_t headerSize;
+
+ /* Build DTables */
+ switch(LLtype)
+ {
+ case bt_rle :
+ LLlog = 0;
+ FSE_buildDTable_rle(DTableLL, *ip++); break;
+ case bt_raw :
+ LLlog = LLbits;
+ FSE_buildDTable_raw(DTableLL, LLbits); break;
+ default :
+ { U32 max = MaxLL;
+ headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);
+ if (FSE_isError(headerSize)) return ERROR(GENERIC);
+ if (LLlog > LLFSELog) return ERROR(corruption_detected);
+ ip += headerSize;
+ FSE_buildDTable(DTableLL, norm, max, LLlog);
+ } }
+
+ switch(Offtype)
+ {
+ case bt_rle :
+ Offlog = 0;
+ if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
+ FSE_buildDTable_rle(DTableOffb, *ip++); break;
+ case bt_raw :
+ Offlog = Offbits;
+ FSE_buildDTable_raw(DTableOffb, Offbits); break;
+ default :
+ { U32 max = MaxOff;
+ headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);
+ if (FSE_isError(headerSize)) return ERROR(GENERIC);
+ if (Offlog > OffFSELog) return ERROR(corruption_detected);
+ ip += headerSize;
+ FSE_buildDTable(DTableOffb, norm, max, Offlog);
+ } }
+
+ switch(MLtype)
+ {
+ case bt_rle :
+ MLlog = 0;
+ if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
+ FSE_buildDTable_rle(DTableML, *ip++); break;
+ case bt_raw :
+ MLlog = MLbits;
+ FSE_buildDTable_raw(DTableML, MLbits); break;
+ default :
+ { U32 max = MaxML;
+ headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);
+ if (FSE_isError(headerSize)) return ERROR(GENERIC);
+ if (MLlog > MLFSELog) return ERROR(corruption_detected);
+ ip += headerSize;
+ FSE_buildDTable(DTableML, norm, max, MLlog);
+ } } }
+
+ return ip-istart;
+}
+
+
+typedef struct {
+ size_t litLength;
+ size_t offset;
+ size_t matchLength;
+} seq_t;
+
+typedef struct {
+ FSE_DStream_t DStream;
+ FSE_DState_t stateLL;
+ FSE_DState_t stateOffb;
+ FSE_DState_t stateML;
+ size_t prevOffset;
+ const BYTE* dumps;
+ const BYTE* dumpsEnd;
+} seqState_t;
+
+
+static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
+{
+ size_t litLength;
+ size_t prevOffset;
+ size_t offset;
+ size_t matchLength;
+ const BYTE* dumps = seqState->dumps;
+ const BYTE* const de = seqState->dumpsEnd;
+
+ /* Literal length */
+ litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
+ prevOffset = litLength ? seq->offset : seqState->prevOffset;
+ seqState->prevOffset = seq->offset;
+ if (litLength == MaxLL)
+ {
+ U32 add = dumps<de ? *dumps++ : 0;
+ if (add < 255) litLength += add;
+ else
+ {
+ if (dumps<=(de-3))
+ {
+ litLength = ZSTD_readLE32(dumps) & 0xFFFFFF; /* no pb : dumps is always followed by seq tables > 1 byte */
+ dumps += 3;
+ }
+ }
+ }
+
+ /* Offset */
+ {
+ U32 offsetCode, nbBits;
+ offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream));
+ if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream));
+ nbBits = offsetCode - 1;
+ if (offsetCode==0) nbBits = 0; /* cmove */
+ offset = ((size_t)1 << (nbBits & ((sizeof(offset)*8)-1))) + FSE_readBits(&(seqState->DStream), nbBits);
+ if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream));
+ if (offsetCode==0) offset = prevOffset;
+ }
+
+ /* MatchLength */
+ matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
+ if (matchLength == MaxML)
+ {
+ U32 add = dumps<de ? *dumps++ : 0;
+ if (add < 255) matchLength += add;
+ else
+ {
+ if (dumps<=(de-3))
+ {
+ matchLength = ZSTD_readLE32(dumps) & 0xFFFFFF; /* no pb : dumps is always followed by seq tables > 1 byte */
+ dumps += 3;
+ }
+ }
+ }
+ matchLength += MINMATCH;
+
+ /* save result */
+ seq->litLength = litLength;
+ seq->offset = offset;
+ seq->matchLength = matchLength;
+ seqState->dumps = dumps;
+}
+
+
+static size_t ZSTD_execSequence(BYTE* op,
+ seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ BYTE* const base, BYTE* const oend)
+{
+ static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */
+ static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* subtracted */
+ const BYTE* const ostart = op;
+ const size_t litLength = sequence.litLength;
+ BYTE* const endMatch = op + litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */
+ const BYTE* const litEnd = *litPtr + litLength;
+
+ /* check */
+ if (endMatch > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
+ if (litEnd > litLimit) return ERROR(corruption_detected);
+ if (sequence.matchLength > (size_t)(*litPtr-op)) return ERROR(dstSize_tooSmall); /* overwrite literal segment */
+
+ /* copy Literals */
+ if (((size_t)(*litPtr - op) < 8) || ((size_t)(oend-litEnd) < 8) || (op+litLength > oend-8))
+ memmove(op, *litPtr, litLength); /* overwrite risk */
+ else
+ ZSTD_wildcopy(op, *litPtr, litLength);
+ op += litLength;
+ *litPtr = litEnd; /* update for next sequence */
+
+ /* check : last match must be at a minimum distance of 8 from end of dest buffer */
+ if (oend-op < 8) return ERROR(dstSize_tooSmall);
+
+ /* copy Match */
+ {
+ const U32 overlapRisk = (((size_t)(litEnd - endMatch)) < 12);
+ const BYTE* match = op - sequence.offset; /* possible underflow at op - offset ? */
+ size_t qutt = 12;
+ U64 saved[2];
+
+ /* check */
+ if (match < base) return ERROR(corruption_detected);
+ if (sequence.offset > (size_t)base) return ERROR(corruption_detected);
+
+ /* save beginning of literal sequence, in case of write overlap */
+ if (overlapRisk)
+ {
+ if ((endMatch + qutt) > oend) qutt = oend-endMatch;
+ memcpy(saved, endMatch, qutt);
+ }
+
+ if (sequence.offset < 8)
+ {
+ const int dec64 = dec64table[sequence.offset];
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += dec32table[sequence.offset];
+ ZSTD_copy4(op+4, match);
+ match -= dec64;
+ } else { ZSTD_copy8(op, match); }
+ op += 8; match += 8;
+
+ if (endMatch > oend-(16-MINMATCH))
+ {
+ if (op < oend-8)
+ {
+ ZSTD_wildcopy(op, match, (oend-8) - op);
+ match += (oend-8) - op;
+ op = oend-8;
+ }
+ while (op<endMatch) *op++ = *match++;
+ }
+ else
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
+
+ /* restore, in case of overlap */
+ if (overlapRisk) memcpy(endMatch, saved, qutt);
+ }
+
+ return endMatch-ostart;
+}
+
+typedef struct ZSTDv01_Dctx_s
+{
+ U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
+ U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
+ U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
+ void* previousDstEnd;
+ void* base;
+ size_t expected;
+ blockType_t bType;
+ U32 phase;
+} dctx_t;
+
+
+static size_t ZSTD_decompressSequences(
+ void* ctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize,
+ const BYTE* litStart, size_t litSize)
+{
+ dctx_t* dctx = (dctx_t*)ctx;
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* op = ostart;
+ BYTE* const oend = ostart + maxDstSize;
+ size_t errorCode, dumpsLength;
+ const BYTE* litPtr = litStart;
+ const BYTE* const litEnd = litStart + litSize;
+ int nbSeq;
+ const BYTE* dumps;
+ U32* DTableLL = dctx->LLTable;
+ U32* DTableML = dctx->MLTable;
+ U32* DTableOffb = dctx->OffTable;
+ BYTE* const base = (BYTE*) (dctx->base);
+
+ /* Build Decoding Tables */
+ errorCode = ZSTDv01_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
+ DTableLL, DTableML, DTableOffb,
+ ip, iend-ip);
+ if (ZSTDv01_isError(errorCode)) return errorCode;
+ ip += errorCode;
+
+ /* Regen sequences */
+ {
+ seq_t sequence;
+ seqState_t seqState;
+
+ memset(&sequence, 0, sizeof(sequence));
+ seqState.dumps = dumps;
+ seqState.dumpsEnd = dumps + dumpsLength;
+ seqState.prevOffset = 1;
+ errorCode = FSE_initDStream(&(seqState.DStream), ip, iend-ip);
+ if (FSE_isError(errorCode)) return ERROR(corruption_detected);
+ FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
+ FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
+ FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
+
+ for ( ; (FSE_reloadDStream(&(seqState.DStream)) <= FSE_DStream_completed) && (nbSeq>0) ; )
+ {
+ size_t oneSeqSize;
+ nbSeq--;
+ ZSTD_decodeSequence(&sequence, &seqState);
+ oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend);
+ if (ZSTDv01_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+ }
+
+ /* check if reached exact end */
+ if ( !FSE_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* requested too much : data is corrupted */
+ if (nbSeq<0) return ERROR(corruption_detected); /* requested too many sequences : data is corrupted */
+
+ /* last literal segment */
+ {
+ size_t lastLLSize = litEnd - litPtr;
+ if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
+ if (op != litPtr) memmove(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+ }
+
+ return op-ostart;
+}
+
+
+static size_t ZSTD_decompressBlock(
+ void* ctx,
+ void* dst, size_t maxDstSize,
+ const void* src, size_t srcSize)
+{
+ /* blockType == blockCompressed, srcSize is trusted */
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* litPtr = NULL;
+ size_t litSize = 0;
+ size_t errorCode;
+
+ /* Decode literals sub-block */
+ errorCode = ZSTDv01_decodeLiteralsBlock(ctx, dst, maxDstSize, &litPtr, &litSize, src, srcSize);
+ if (ZSTDv01_isError(errorCode)) return errorCode;
+ ip += errorCode;
+ srcSize -= errorCode;
+
+ return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize, litPtr, litSize);
+}
+
+
+size_t ZSTDv01_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* iend = ip + srcSize;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* op = ostart;
+ BYTE* const oend = ostart + maxDstSize;
+ size_t remainingSize = srcSize;
+ U32 magicNumber;
+ size_t errorCode=0;
+ blockProperties_t blockProperties;
+
+ /* Frame Header */
+ if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+ magicNumber = ZSTD_readBE32(src);
+ if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
+ ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
+
+ /* Loop on each block */
+ while (1)
+ {
+ size_t blockSize = ZSTDv01_getcBlockSize(ip, iend-ip, &blockProperties);
+ if (ZSTDv01_isError(blockSize)) return blockSize;
+
+ ip += ZSTD_blockHeaderSize;
+ remainingSize -= ZSTD_blockHeaderSize;
+ if (blockSize > remainingSize) return ERROR(srcSize_wrong);
+
+ switch(blockProperties.blockType)
+ {
+ case bt_compressed:
+ errorCode = ZSTD_decompressBlock(ctx, op, oend-op, ip, blockSize);
+ break;
+ case bt_raw :
+ errorCode = ZSTD_copyUncompressedBlock(op, oend-op, ip, blockSize);
+ break;
+ case bt_rle :
+ return ERROR(GENERIC); /* not yet supported */
+ break;
+ case bt_end :
+ /* end of frame */
+ if (remainingSize) return ERROR(srcSize_wrong);
+ break;
+ default:
+ return ERROR(GENERIC);
+ }
+ if (blockSize == 0) break; /* bt_end */
+
+ if (ZSTDv01_isError(errorCode)) return errorCode;
+ op += errorCode;
+ ip += blockSize;
+ remainingSize -= blockSize;
+ }
+
+ return op-ostart;
+}
+
+size_t ZSTDv01_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ dctx_t ctx;
+ ctx.base = dst;
+ return ZSTDv01_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
+}
+
+/* ZSTD_errorFrameSizeInfoLegacy() :
+ assumes `cSize` and `dBound` are _not_ NULL */
+static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
+{
+ *cSize = ret;
+ *dBound = ZSTD_CONTENTSIZE_ERROR;
+}
+
+void ZSTDv01_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
+{
+ const BYTE* ip = (const BYTE*)src;
+ size_t remainingSize = srcSize;
+ size_t nbBlocks = 0;
+ U32 magicNumber;
+ blockProperties_t blockProperties;
+
+ /* Frame Header */
+ if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+ return;
+ }
+ magicNumber = ZSTD_readBE32(src);
+ if (magicNumber != ZSTD_magicNumber) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
+ return;
+ }
+ ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
+
+ /* Loop on each block */
+ while (1)
+ {
+ size_t blockSize = ZSTDv01_getcBlockSize(ip, remainingSize, &blockProperties);
+ if (ZSTDv01_isError(blockSize)) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, blockSize);
+ return;
+ }
+
+ ip += ZSTD_blockHeaderSize;
+ remainingSize -= ZSTD_blockHeaderSize;
+ if (blockSize > remainingSize) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+ return;
+ }
+
+ if (blockSize == 0) break; /* bt_end */
+
+ ip += blockSize;
+ remainingSize -= blockSize;
+ nbBlocks++;
+ }
+
+ *cSize = ip - (const BYTE*)src;
+ *dBound = nbBlocks * BLOCKSIZE;
+}
+
+/*******************************
+* Streaming Decompression API
+*******************************/
+
+size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx)
+{
+ dctx->expected = ZSTD_frameHeaderSize;
+ dctx->phase = 0;
+ dctx->previousDstEnd = NULL;
+ dctx->base = NULL;
+ return 0;
+}
+
+ZSTDv01_Dctx* ZSTDv01_createDCtx(void)
+{
+ ZSTDv01_Dctx* dctx = (ZSTDv01_Dctx*)malloc(sizeof(ZSTDv01_Dctx));
+ if (dctx==NULL) return NULL;
+ ZSTDv01_resetDCtx(dctx);
+ return dctx;
+}
+
+size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx)
+{
+ free(dctx);
+ return 0;
+}
+
+size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx)
+{
+ return ((dctx_t*)dctx)->expected;
+}
+
+size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ dctx_t* ctx = (dctx_t*)dctx;
+
+ /* Sanity check */
+ if (srcSize != ctx->expected) return ERROR(srcSize_wrong);
+ if (dst != ctx->previousDstEnd) /* not contiguous */
+ ctx->base = dst;
+
+ /* Decompress : frame header */
+ if (ctx->phase == 0)
+ {
+ /* Check frame magic header */
+ U32 magicNumber = ZSTD_readBE32(src);
+ if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
+ ctx->phase = 1;
+ ctx->expected = ZSTD_blockHeaderSize;
+ return 0;
+ }
+
+ /* Decompress : block header */
+ if (ctx->phase == 1)
+ {
+ blockProperties_t bp;
+ size_t blockSize = ZSTDv01_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
+ if (ZSTDv01_isError(blockSize)) return blockSize;
+ if (bp.blockType == bt_end)
+ {
+ ctx->expected = 0;
+ ctx->phase = 0;
+ }
+ else
+ {
+ ctx->expected = blockSize;
+ ctx->bType = bp.blockType;
+ ctx->phase = 2;
+ }
+
+ return 0;
+ }
+
+ /* Decompress : block content */
+ {
+ size_t rSize;
+ switch(ctx->bType)
+ {
+ case bt_compressed:
+ rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize);
+ break;
+ case bt_raw :
+ rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize);
+ break;
+ case bt_rle :
+ return ERROR(GENERIC); /* not yet handled */
+ break;
+ case bt_end : /* should never happen (filtered at phase 1) */
+ rSize = 0;
+ break;
+ default:
+ return ERROR(GENERIC);
+ }
+ ctx->phase = 1;
+ ctx->expected = ZSTD_blockHeaderSize;
+ ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);
+ return rSize;
+ }
+
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_v01.h b/vendor/github.com/DataDog/zstd/zstd_v01.h
new file mode 100644
index 000000000..245f9dd31
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v01.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_V01_H_28739879432
+#define ZSTD_V01_H_28739879432
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* *************************************
+* Includes
+***************************************/
+#include <stddef.h> /* size_t */
+
+
+/* *************************************
+* Simple one-step function
+***************************************/
+/**
+ZSTDv01_decompress() : decompress ZSTD frames compliant with v0.1.x format
+ compressedSize : is the exact source size
+ maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
+ It must be equal or larger than originalSize, otherwise decompression will fail.
+ return : the number of bytes decompressed into destination buffer (originalSize)
+ or an errorCode if it fails (which can be tested using ZSTDv01_isError())
+*/
+size_t ZSTDv01_decompress( void* dst, size_t maxOriginalSize,
+ const void* src, size_t compressedSize);
+
+ /**
+ ZSTDv01_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.1.x format
+ srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+ cSize (output parameter) : the number of bytes that would be read to decompress this frame
+ or an error code if it fails (which can be tested using ZSTDv01_isError())
+ dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
+ or ZSTD_CONTENTSIZE_ERROR if an error occurs
+
+ note : assumes `cSize` and `dBound` are _not_ NULL.
+ */
+void ZSTDv01_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
+ size_t* cSize, unsigned long long* dBound);
+
+/**
+ZSTDv01_isError() : tells if the result of ZSTDv01_decompress() is an error
+*/
+unsigned ZSTDv01_isError(size_t code);
+
+
+/* *************************************
+* Advanced functions
+***************************************/
+typedef struct ZSTDv01_Dctx_s ZSTDv01_Dctx;
+ZSTDv01_Dctx* ZSTDv01_createDCtx(void);
+size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx);
+
+size_t ZSTDv01_decompressDCtx(void* ctx,
+ void* dst, size_t maxOriginalSize,
+ const void* src, size_t compressedSize);
+
+/* *************************************
+* Streaming functions
+***************************************/
+size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx);
+
+size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx);
+size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
+/**
+ Use above functions alternatively.
+ ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+ ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
+ Result is the number of bytes regenerated within 'dst'.
+ It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
+*/
+
+/* *************************************
+* Prefix - version detection
+***************************************/
+#define ZSTDv01_magicNumber 0xFD2FB51E /* Big Endian version */
+#define ZSTDv01_magicNumberLE 0x1EB52FFD /* Little Endian version */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_V01_H_28739879432 */
diff --git a/vendor/github.com/DataDog/zstd/zstd_v02.c b/vendor/github.com/DataDog/zstd/zstd_v02.c
new file mode 100644
index 000000000..561bc412e
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v02.c
@@ -0,0 +1,3508 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+#include <stddef.h> /* size_t, ptrdiff_t */
+#include "zstd_v02.h"
+#include "error_private.h"
+
+
+/******************************************
+* Compiler-specific
+******************************************/
+#if defined(_MSC_VER) /* Visual Studio */
+# include <stdlib.h> /* _byteswap_ulong */
+# include <intrin.h> /* _byteswap_* */
+#endif
+
+
+/* ******************************************************************
+ mem.h
+ low-level memory access routines
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef MEM_H_MODULE
+#define MEM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/******************************************
+* Includes
+******************************************/
+#include <stddef.h> /* size_t, ptrdiff_t */
+#include <string.h> /* memcpy */
+
+
+/******************************************
+* Compiler-specific
+******************************************/
+#if defined(__GNUC__)
+# define MEM_STATIC static __attribute__((unused))
+#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# define MEM_STATIC static inline
+#elif defined(_MSC_VER)
+# define MEM_STATIC static __inline
+#else
+# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+
+/****************************************************************
+* Basic Types
+*****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef int16_t S16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+ typedef int64_t S64;
+#else
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef signed short S16;
+ typedef unsigned int U32;
+ typedef signed int S32;
+ typedef unsigned long long U64;
+ typedef signed long long S64;
+#endif
+
+
+/****************************************************************
+* Memory I/O
+*****************************************************************/
+/* MEM_FORCE_MEMORY_ACCESS
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ * It can generate buggy code on targets generating assembly depending on alignment.
+ * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
+# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+# define MEM_FORCE_MEMORY_ACCESS 2
+# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
+ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+# define MEM_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }
+MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }
+
+MEM_STATIC unsigned MEM_isLittleEndian(void)
+{
+ const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+
+#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
+
+/* violates C standard on structure alignment.
+Only use if no other choice to achieve best performance on target platform */
+MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
+MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
+MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+
+#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;
+
+MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
+MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
+
+#else
+
+/* default method, safe and standard.
+ can sometimes prove slower */
+
+MEM_STATIC U16 MEM_read16(const void* memPtr)
+{
+ U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U32 MEM_read32(const void* memPtr)
+{
+ U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U64 MEM_read64(const void* memPtr)
+{
+ U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value)
+{
+ memcpy(memPtr, &value, sizeof(value));
+}
+
+#endif // MEM_FORCE_MEMORY_ACCESS
+
+
+MEM_STATIC U16 MEM_readLE16(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read16(memPtr);
+ else
+ {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U16)(p[0] + (p[1]<<8));
+ }
+}
+
+MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
+{
+ if (MEM_isLittleEndian())
+ {
+ MEM_write16(memPtr, val);
+ }
+ else
+ {
+ BYTE* p = (BYTE*)memPtr;
+ p[0] = (BYTE)val;
+ p[1] = (BYTE)(val>>8);
+ }
+}
+
+MEM_STATIC U32 MEM_readLE32(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read32(memPtr);
+ else
+ {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
+ }
+}
+
+
+MEM_STATIC U64 MEM_readLE64(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read64(memPtr);
+ else
+ {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
+ + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
+ }
+}
+
+
+MEM_STATIC size_t MEM_readLEST(const void* memPtr)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_readLE32(memPtr);
+ else
+ return (size_t)MEM_readLE64(memPtr);
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* MEM_H_MODULE */
+
+
+/* ******************************************************************
+ bitstream
+ Part of NewGen Entropy library
+ header file (to include)
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef BITSTREAM_H_MODULE
+#define BITSTREAM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*
+* This API consists of small unitary functions, which highly benefit from being inlined.
+* Since link-time-optimization is not available for all compilers,
+* these functions are defined into a .h to be included.
+*/
+
+
+/**********************************************
+* bitStream decompression API (read backward)
+**********************************************/
+typedef struct
+{
+ size_t bitContainer;
+ unsigned bitsConsumed;
+ const char* ptr;
+ const char* start;
+} BIT_DStream_t;
+
+typedef enum { BIT_DStream_unfinished = 0,
+ BIT_DStream_endOfBuffer = 1,
+ BIT_DStream_completed = 2,
+ BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
+ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
+MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
+
+
+/******************************************
+* unsafe API
+******************************************/
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
+/* faster, but works only if nbBits >= 1 */
+
+
+
+/****************************************************************
+* Helper functions
+****************************************************************/
+MEM_STATIC unsigned BIT_highbit32 (U32 val)
+{
+# if defined(_MSC_VER) /* Visual */
+ unsigned long r=0;
+ _BitScanReverse ( &r, val );
+ return (unsigned) r;
+# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
+ return 31 - __builtin_clz (val);
+# else /* Software version */
+ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ unsigned r;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+ return r;
+# endif
+}
+
+
+
+/**********************************************************
+* bitStream decoding
+**********************************************************/
+
+/*!BIT_initDStream
+* Initialize a BIT_DStream_t.
+* @bitD : a pointer to an already allocated BIT_DStream_t structure
+* @srcBuffer must point at the beginning of a bitStream
+* @srcSize must be the exact size of the bitStream
+* @result : size of stream (== srcSize) or an errorCode if a problem is detected
+*/
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+ if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
+
+ if (srcSize >= sizeof(size_t)) /* normal case */
+ {
+ U32 contain32;
+ bitD->start = (const char*)srcBuffer;
+ bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t);
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
+ if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
+ bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
+ }
+ else
+ {
+ U32 contain32;
+ bitD->start = (const char*)srcBuffer;
+ bitD->ptr = bitD->start;
+ bitD->bitContainer = *(const BYTE*)(bitD->start);
+ switch(srcSize)
+ {
+ case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);
+ /* fallthrough */
+ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);
+ /* fallthrough */
+ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);
+ /* fallthrough */
+ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;
+ /* fallthrough */
+ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;
+ /* fallthrough */
+ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8;
+ /* fallthrough */
+ default:;
+ }
+ contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
+ if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
+ bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
+ bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
+ }
+
+ return srcSize;
+}
+
+MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+ const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
+ return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
+}
+
+/*! BIT_lookBitsFast :
+* unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits)
+{
+ const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
+ return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
+}
+
+MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+ bitD->bitsConsumed += nbBits;
+}
+
+MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+ size_t value = BIT_lookBits(bitD, nbBits);
+ BIT_skipBits(bitD, nbBits);
+ return value;
+}
+
+/*!BIT_readBitsFast :
+* unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
+{
+ size_t value = BIT_lookBitsFast(bitD, nbBits);
+ BIT_skipBits(bitD, nbBits);
+ return value;
+}
+
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
+{
+ if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
+ return BIT_DStream_overflow;
+
+ if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
+ {
+ bitD->ptr -= bitD->bitsConsumed >> 3;
+ bitD->bitsConsumed &= 7;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ return BIT_DStream_unfinished;
+ }
+ if (bitD->ptr == bitD->start)
+ {
+ if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
+ return BIT_DStream_completed;
+ }
+ {
+ U32 nbBytes = bitD->bitsConsumed >> 3;
+ BIT_DStream_status result = BIT_DStream_unfinished;
+ if (bitD->ptr - nbBytes < bitD->start)
+ {
+ nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
+ result = BIT_DStream_endOfBuffer;
+ }
+ bitD->ptr -= nbBytes;
+ bitD->bitsConsumed -= nbBytes*8;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
+ return result;
+ }
+}
+
+/*! BIT_endOfDStream
+* @return Tells if DStream has reached its exact end
+*/
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
+{
+ return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* BITSTREAM_H_MODULE */
+/* ******************************************************************
+ Error codes and messages
+ Copyright (C) 2013-2015, Yann Collet
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef ERROR_H_MODULE
+#define ERROR_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/******************************************
+* Compiler-specific
+******************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# define ERR_STATIC static inline
+#elif defined(_MSC_VER)
+# define ERR_STATIC static __inline
+#elif defined(__GNUC__)
+# define ERR_STATIC static __attribute__((unused))
+#else
+# define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+
+/******************************************
+* Error Management
+******************************************/
+#define PREFIX(name) ZSTD_error_##name
+
+#define ERROR(name) (size_t)-PREFIX(name)
+
+#define ERROR_LIST(ITEM) \
+ ITEM(PREFIX(No_Error)) ITEM(PREFIX(GENERIC)) \
+ ITEM(PREFIX(dstSize_tooSmall)) ITEM(PREFIX(srcSize_wrong)) \
+ ITEM(PREFIX(prefix_unknown)) ITEM(PREFIX(corruption_detected)) \
+ ITEM(PREFIX(tableLog_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooSmall)) \
+ ITEM(PREFIX(maxCode))
+
+#define ERROR_GENERATE_ENUM(ENUM) ENUM,
+typedef enum { ERROR_LIST(ERROR_GENERATE_ENUM) } ERR_codes; /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */
+
+#define ERROR_CONVERTTOSTRING(STRING) #STRING,
+#define ERROR_GENERATE_STRING(EXPR) ERROR_CONVERTTOSTRING(EXPR)
+static const char* ERR_strings[] = { ERROR_LIST(ERROR_GENERATE_STRING) };
+
+ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
+
+ERR_STATIC const char* ERR_getErrorName(size_t code)
+{
+ static const char* codeError = "Unspecified error code";
+ if (ERR_isError(code)) return ERR_strings[-(int)(code)];
+ return codeError;
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ERROR_H_MODULE */
+/*
+Constructor and Destructor of type FSE_CTable
+ Note that its size depends on 'tableLog' and 'maxSymbolValue' */
+typedef unsigned FSE_CTable; /* don't allocate that. It's just a way to be more restrictive than void* */
+typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
+
+
+/* ******************************************************************
+ FSE : Finite State Entropy coder
+ header file for static linking (only)
+ Copyright (C) 2013-2015, Yann Collet
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/******************************************
+* Static allocation
+******************************************/
+/* FSE buffer bounds */
+#define FSE_NCOUNTBOUND 512
+#define FSE_BLOCKBOUND(size) (size + (size>>7))
+#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
+
+/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */
+#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
+#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
+
+
+/******************************************
+* FSE advanced API
+******************************************/
+static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
+/* build a fake FSE_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
+
+static size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
+/* build a fake FSE_DTable, designed to always generate the same symbolValue */
+
+
+/******************************************
+* FSE symbol decompression API
+******************************************/
+typedef struct
+{
+ size_t state;
+ const void* table; /* precise table may vary, depending on U16 */
+} FSE_DState_t;
+
+
+static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
+
+static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+
+static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
+
+
+/******************************************
+* FSE unsafe API
+******************************************/
+static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
+
+
+/******************************************
+* Implementation of inline functions
+******************************************/
+
+/* decompression */
+
+typedef struct {
+ U16 tableLog;
+ U16 fastMode;
+} FSE_DTableHeader; /* sizeof U32 */
+
+typedef struct
+{
+ unsigned short newState;
+ unsigned char symbol;
+ unsigned char nbBits;
+} FSE_decode_t; /* size == U32 */
+
+MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
+{
+ FSE_DTableHeader DTableH;
+ memcpy(&DTableH, dt, sizeof(DTableH));
+ DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog);
+ BIT_reloadDStream(bitD);
+ DStatePtr->table = dt + 1;
+}
+
+MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ const U32 nbBits = DInfo.nbBits;
+ BYTE symbol = DInfo.symbol;
+ size_t lowBits = BIT_readBits(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ const U32 nbBits = DInfo.nbBits;
+ BYTE symbol = DInfo.symbol;
+ size_t lowBits = BIT_readBitsFast(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
+{
+ return DStatePtr->state == 0;
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+/* ******************************************************************
+ Huff0 : Huffman coder, part of New Generation Entropy library
+ header file for static linking (only)
+ Copyright (C) 2013-2015, Yann Collet
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/******************************************
+* Static allocation macros
+******************************************/
+/* Huff0 buffer bounds */
+#define HUF_CTABLEBOUND 129
+#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */
+#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
+
+/* static allocation of Huff0's DTable */
+#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<maxTableLog)) /* nb Cells; use unsigned short for X2, unsigned int for X4 */
+#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+ unsigned short DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
+#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
+ unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
+#define HUF_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
+ unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
+
+
+/******************************************
+* Advanced functions
+******************************************/
+static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
+static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbols decoder */
+static size_t HUF_decompress4X6 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* quad-symbols decoder */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+/*
+ zstd - standard compression library
+ Header File
+ Copyright (C) 2014-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd source repository : https://github.com/Cyan4973/zstd
+ - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* *************************************
+* Includes
+***************************************/
+#include <stddef.h> /* size_t */
+
+
+/* *************************************
+* Version
+***************************************/
+#define ZSTD_VERSION_MAJOR 0 /* for breaking interface changes */
+#define ZSTD_VERSION_MINOR 2 /* for new (non-breaking) interface capabilities */
+#define ZSTD_VERSION_RELEASE 2 /* for tweaks, bug-fixes, or development */
+#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
+
+
+/* *************************************
+* Advanced functions
+***************************************/
+typedef struct ZSTD_CCtx_s ZSTD_CCtx; /* incomplete type */
+
+#if defined (__cplusplus)
+}
+#endif
+/*
+ zstd - standard compression library
+ Header File for static linking only
+ Copyright (C) 2014-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd source repository : https://github.com/Cyan4973/zstd
+ - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+/* The objects defined into this file should be considered experimental.
+ * They are not labelled stable, as their prototype may change in the future.
+ * You can use them for tests, provide feedback, or if you can endure risk of future changes.
+ */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* *************************************
+* Streaming functions
+***************************************/
+
+typedef struct ZSTD_DCtx_s ZSTD_DCtx;
+
+/*
+ Use above functions alternatively.
+ ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+ ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
+ Result is the number of bytes regenerated within 'dst'.
+ It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
+*/
+
+/* *************************************
+* Prefix - version detection
+***************************************/
+#define ZSTD_magicNumber 0xFD2FB522 /* v0.2 (current)*/
+
+
+#if defined (__cplusplus)
+}
+#endif
+/* ******************************************************************
+ FSE : Finite State Entropy coder
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/****************************************************************
+* Tuning parameters
+****************************************************************/
+/* MEMORY_USAGE :
+* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+* Increasing memory usage improves compression ratio
+* Reduced memory usage can improve speed, due to cache effect
+* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#define FSE_MAX_MEMORY_USAGE 14
+#define FSE_DEFAULT_MEMORY_USAGE 13
+
+/* FSE_MAX_SYMBOL_VALUE :
+* Maximum symbol value authorized.
+* Required for proper stack allocation */
+#define FSE_MAX_SYMBOL_VALUE 255
+
+
+/****************************************************************
+* template functions type & suffix
+****************************************************************/
+#define FSE_FUNCTION_TYPE BYTE
+#define FSE_FUNCTION_EXTENSION
+
+
+/****************************************************************
+* Byte symbol type
+****************************************************************/
+#endif /* !FSE_COMMONDEFS_ONLY */
+
+
+/****************************************************************
+* Compiler specifics
+****************************************************************/
+#ifdef _MSC_VER /* Visual Studio */
+# define FORCE_INLINE static __forceinline
+# include <intrin.h> /* For Visual 2005 */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
+#else
+# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# ifdef __GNUC__
+# define FORCE_INLINE static inline __attribute__((always_inline))
+# else
+# define FORCE_INLINE static inline
+# endif
+# else
+# define FORCE_INLINE static
+# endif /* __STDC_VERSION__ */
+#endif
+
+
+/****************************************************************
+* Includes
+****************************************************************/
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memcpy, memset */
+#include <stdio.h> /* printf (debug) */
+
+/****************************************************************
+* Constants
+*****************************************************************/
+#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
+#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
+#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
+#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
+#define FSE_MIN_TABLELOG 5
+
+#define FSE_TABLELOG_ABSOLUTE_MAX 15
+#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
+#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+
+/****************************************************************
+* Error Management
+****************************************************************/
+#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+
+
+/****************************************************************
+* Complex types
+****************************************************************/
+typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
+
+
+/****************************************************************
+* Templates
+****************************************************************/
+/*
+ designed to be included
+ for type-specific functions (template emulation in C)
+ Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+# error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+# error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+
+/* Function templates */
+
+#define FSE_DECODE_TYPE FSE_decode_t
+
+static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
+
+static size_t FSE_buildDTable
+(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+ void* ptr = dt+1;
+ FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)ptr;
+ FSE_DTableHeader DTableH;
+ const U32 tableSize = 1 << tableLog;
+ const U32 tableMask = tableSize-1;
+ const U32 step = FSE_tableStep(tableSize);
+ U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
+ U32 position = 0;
+ U32 highThreshold = tableSize-1;
+ const S16 largeLimit= (S16)(1 << (tableLog-1));
+ U32 noLarge = 1;
+ U32 s;
+
+ /* Sanity Checks */
+ if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+
+ /* Init, lay down lowprob symbols */
+ DTableH.tableLog = (U16)tableLog;
+ for (s=0; s<=maxSymbolValue; s++)
+ {
+ if (normalizedCounter[s]==-1)
+ {
+ tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
+ symbolNext[s] = 1;
+ }
+ else
+ {
+ if (normalizedCounter[s] >= largeLimit) noLarge=0;
+ symbolNext[s] = normalizedCounter[s];
+ }
+ }
+
+ /* Spread symbols */
+ for (s=0; s<=maxSymbolValue; s++)
+ {
+ int i;
+ for (i=0; i<normalizedCounter[s]; i++)
+ {
+ tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
+ position = (position + step) & tableMask;
+ while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
+ }
+ }
+
+ if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+
+ /* Build Decoding table */
+ {
+ U32 i;
+ for (i=0; i<tableSize; i++)
+ {
+ FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);
+ U16 nextState = symbolNext[symbol]++;
+ tableDecode[i].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );
+ tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
+ }
+ }
+
+ DTableH.fastMode = (U16)noLarge;
+ memcpy(dt, &DTableH, sizeof(DTableH)); /* memcpy(), to avoid strict aliasing warnings */
+ return 0;
+}
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+/******************************************
+* FSE helper functions
+******************************************/
+static unsigned FSE_isError(size_t code) { return ERR_isError(code); }
+
+
+/****************************************************************
+* FSE NCount encoding-decoding
+****************************************************************/
+static short FSE_abs(short a)
+{
+ return (short)(a<0 ? -a : a);
+}
+
+static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ const BYTE* const istart = (const BYTE*) headerBuffer;
+ const BYTE* const iend = istart + hbSize;
+ const BYTE* ip = istart;
+ int nbBits;
+ int remaining;
+ int threshold;
+ U32 bitStream;
+ int bitCount;
+ unsigned charnum = 0;
+ int previous0 = 0;
+
+ if (hbSize < 4) return ERROR(srcSize_wrong);
+ bitStream = MEM_readLE32(ip);
+ nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
+ if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
+ bitStream >>= 4;
+ bitCount = 4;
+ *tableLogPtr = nbBits;
+ remaining = (1<<nbBits)+1;
+ threshold = 1<<nbBits;
+ nbBits++;
+
+ while ((remaining>1) && (charnum<=*maxSVPtr))
+ {
+ if (previous0)
+ {
+ unsigned n0 = charnum;
+ while ((bitStream & 0xFFFF) == 0xFFFF)
+ {
+ n0+=24;
+ if (ip < iend-5)
+ {
+ ip+=2;
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ }
+ else
+ {
+ bitStream >>= 16;
+ bitCount+=16;
+ }
+ }
+ while ((bitStream & 3) == 3)
+ {
+ n0+=3;
+ bitStream>>=2;
+ bitCount+=2;
+ }
+ n0 += bitStream & 3;
+ bitCount += 2;
+ if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
+ while (charnum < n0) normalizedCounter[charnum++] = 0;
+ if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
+ {
+ ip += bitCount>>3;
+ bitCount &= 7;
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ }
+ else
+ bitStream >>= 2;
+ }
+ {
+ const short max = (short)((2*threshold-1)-remaining);
+ short count;
+
+ if ((bitStream & (threshold-1)) < (U32)max)
+ {
+ count = (short)(bitStream & (threshold-1));
+ bitCount += nbBits-1;
+ }
+ else
+ {
+ count = (short)(bitStream & (2*threshold-1));
+ if (count >= threshold) count -= max;
+ bitCount += nbBits;
+ }
+
+ count--; /* extra accuracy */
+ remaining -= FSE_abs(count);
+ normalizedCounter[charnum++] = count;
+ previous0 = !count;
+ while (remaining < threshold)
+ {
+ nbBits--;
+ threshold >>= 1;
+ }
+
+ {
+ if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
+ {
+ ip += bitCount>>3;
+ bitCount &= 7;
+ }
+ else
+ {
+ bitCount -= (int)(8 * (iend - 4 - ip));
+ ip = iend - 4;
+ }
+ bitStream = MEM_readLE32(ip) >> (bitCount & 31);
+ }
+ }
+ }
+ if (remaining != 1) return ERROR(GENERIC);
+ *maxSVPtr = charnum-1;
+
+ ip += (bitCount+7)>>3;
+ if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
+ return ip-istart;
+}
+
+
+/*********************************************************
+* Decompression (Byte symbols)
+*********************************************************/
+static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
+{
+ void* ptr = dt;
+ FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+ FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */
+
+ DTableH->tableLog = 0;
+ DTableH->fastMode = 0;
+
+ cell->newState = 0;
+ cell->symbol = symbolValue;
+ cell->nbBits = 0;
+
+ return 0;
+}
+
+
+static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
+{
+ void* ptr = dt;
+ FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+ FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */
+ const unsigned tableSize = 1 << nbBits;
+ const unsigned tableMask = tableSize - 1;
+ const unsigned maxSymbolValue = tableMask;
+ unsigned s;
+
+ /* Sanity checks */
+ if (nbBits < 1) return ERROR(GENERIC); /* min size */
+
+ /* Build Decoding Table */
+ DTableH->tableLog = (U16)nbBits;
+ DTableH->fastMode = 1;
+ for (s=0; s<=maxSymbolValue; s++)
+ {
+ dinfo[s].newState = 0;
+ dinfo[s].symbol = (BYTE)s;
+ dinfo[s].nbBits = (BYTE)nbBits;
+ }
+
+ return 0;
+}
+
+FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
+ void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSE_DTable* dt, const unsigned fast)
+{
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* op = ostart;
+ BYTE* const omax = op + maxDstSize;
+ BYTE* const olimit = omax-3;
+
+ BIT_DStream_t bitD;
+ FSE_DState_t state1;
+ FSE_DState_t state2;
+ size_t errorCode;
+
+ /* Init */
+ errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */
+ if (FSE_isError(errorCode)) return errorCode;
+
+ FSE_initDState(&state1, &bitD, dt);
+ FSE_initDState(&state2, &bitD, dt);
+
+#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
+
+ /* 4 symbols per loop */
+ for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op<olimit) ; op+=4)
+ {
+ op[0] = FSE_GETSYMBOL(&state1);
+
+ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BIT_reloadDStream(&bitD);
+
+ op[1] = FSE_GETSYMBOL(&state2);
+
+ if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
+
+ op[2] = FSE_GETSYMBOL(&state1);
+
+ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BIT_reloadDStream(&bitD);
+
+ op[3] = FSE_GETSYMBOL(&state2);
+ }
+
+ /* tail */
+ /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
+ while (1)
+ {
+ if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )
+ break;
+
+ *op++ = FSE_GETSYMBOL(&state1);
+
+ if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )
+ break;
+
+ *op++ = FSE_GETSYMBOL(&state2);
+ }
+
+ /* end ? */
+ if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))
+ return op-ostart;
+
+ if (op==omax) return ERROR(dstSize_tooSmall); /* dst buffer is full, but cSrc unfinished */
+
+ return ERROR(corruption_detected);
+}
+
+
+static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSE_DTable* dt)
+{
+ FSE_DTableHeader DTableH;
+ memcpy(&DTableH, dt, sizeof(DTableH));
+
+ /* select fast mode (static) */
+ if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+ return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
+{
+ const BYTE* const istart = (const BYTE*)cSrc;
+ const BYTE* ip = istart;
+ short counting[FSE_MAX_SYMBOL_VALUE+1];
+ DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
+ unsigned tableLog;
+ unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+ size_t errorCode;
+
+ if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */
+
+ /* normal FSE decoding mode */
+ errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
+ if (FSE_isError(errorCode)) return errorCode;
+ if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */
+ ip += errorCode;
+ cSrcSize -= errorCode;
+
+ errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);
+ if (FSE_isError(errorCode)) return errorCode;
+
+ /* always return, even if it is an error code */
+ return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
+}
+
+
+
+#endif /* FSE_COMMONDEFS_ONLY */
+/* ******************************************************************
+ Huff0 : Huffman coder, part of New Generation Entropy library
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/****************************************************************
+* Compiler specifics
+****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+/* inline is defined */
+#elif defined(_MSC_VER)
+# define inline __inline
+#else
+# define inline /* disable inline */
+#endif
+
+
+#ifdef _MSC_VER /* Visual Studio */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+#endif
+
+
+/****************************************************************
+* Includes
+****************************************************************/
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memcpy, memset */
+#include <stdio.h> /* printf (debug) */
+
+/****************************************************************
+* Error Management
+****************************************************************/
+#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+
+
+/******************************************
+* Helper functions
+******************************************/
+static unsigned HUF_isError(size_t code) { return ERR_isError(code); }
+
+#define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
+#define HUF_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
+#define HUF_DEFAULT_TABLELOG HUF_MAX_TABLELOG /* tableLog by default, when not specified */
+#define HUF_MAX_SYMBOL_VALUE 255
+#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)
+# error "HUF_MAX_TABLELOG is too large !"
+#endif
+
+
+
+/*********************************************************
+* Huff0 : Huffman block decompression
+*********************************************************/
+typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */
+
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */
+
+typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
+
+/*! HUF_readStats
+ Read compact Huffman tree, saved by HUF_writeCTable
+ @huffWeight : destination buffer
+ @return : size read from `src`
+*/
+static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize)
+{
+ U32 weightTotal;
+ U32 tableLog;
+ const BYTE* ip = (const BYTE*) src;
+ size_t iSize;
+ size_t oSize;
+ U32 n;
+
+ if (!srcSize) return ERROR(srcSize_wrong);
+ iSize = ip[0];
+ //memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */
+
+ if (iSize >= 128) /* special header */
+ {
+ if (iSize >= (242)) /* RLE */
+ {
+ static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
+ oSize = l[iSize-242];
+ memset(huffWeight, 1, hwSize);
+ iSize = 0;
+ }
+ else /* Incompressible */
+ {
+ oSize = iSize - 127;
+ iSize = ((oSize+1)/2);
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ if (oSize >= hwSize) return ERROR(corruption_detected);
+ ip += 1;
+ for (n=0; n<oSize; n+=2)
+ {
+ huffWeight[n] = ip[n/2] >> 4;
+ huffWeight[n+1] = ip[n/2] & 15;
+ }
+ }
+ }
+ else /* header compressed with FSE (normal case) */
+ {
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */
+ if (FSE_isError(oSize)) return oSize;
+ }
+
+ /* collect weight stats */
+ memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
+ weightTotal = 0;
+ for (n=0; n<oSize; n++)
+ {
+ if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
+ rankStats[huffWeight[n]]++;
+ weightTotal += (1 << huffWeight[n]) >> 1;
+ }
+ if (weightTotal == 0) return ERROR(corruption_detected);
+
+ /* get last non-null symbol weight (implied, total must be 2^n) */
+ tableLog = BIT_highbit32(weightTotal) + 1;
+ if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
+ {
+ U32 total = 1 << tableLog;
+ U32 rest = total - weightTotal;
+ U32 verif = 1 << BIT_highbit32(rest);
+ U32 lastWeight = BIT_highbit32(rest) + 1;
+ if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
+ huffWeight[oSize] = (BYTE)lastWeight;
+ rankStats[lastWeight]++;
+ }
+
+ /* check tree construction validity */
+ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
+
+ /* results */
+ *nbSymbolsPtr = (U32)(oSize+1);
+ *tableLogPtr = tableLog;
+ return iSize+1;
+}
+
+
+/**************************/
+/* single-symbol decoding */
+/**************************/
+
+static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
+{
+ BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
+ U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */
+ U32 tableLog = 0;
+ const BYTE* ip = (const BYTE*) src;
+ size_t iSize = ip[0];
+ U32 nbSymbols = 0;
+ U32 n;
+ U32 nextRankStart;
+ void* ptr = DTable+1;
+ HUF_DEltX2* const dt = (HUF_DEltX2*)ptr;
+
+ HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */
+ //memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
+ if (HUF_isError(iSize)) return iSize;
+
+ /* check result */
+ if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */
+ DTable[0] = (U16)tableLog; /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */
+
+ /* Prepare ranks */
+ nextRankStart = 0;
+ for (n=1; n<=tableLog; n++)
+ {
+ U32 current = nextRankStart;
+ nextRankStart += (rankVal[n] << (n-1));
+ rankVal[n] = current;
+ }
+
+ /* fill DTable */
+ for (n=0; n<nbSymbols; n++)
+ {
+ const U32 w = huffWeight[n];
+ const U32 length = (1 << w) >> 1;
+ U32 i;
+ HUF_DEltX2 D;
+ D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
+ for (i = rankVal[w]; i < rankVal[w] + length; i++)
+ dt[i] = D;
+ rankVal[w] += length;
+ }
+
+ return iSize;
+}
+
+static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+ const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+ const BYTE c = dt[val].byte;
+ BIT_skipBits(Dstream, dt[val].nbBits);
+ return c;
+}
+
+#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+ *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
+ HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 4 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))
+ {
+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ }
+
+ /* closer to the end */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+ /* no more data to retrieve from bitstream, hence no need to reload */
+ while (p < pEnd)
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+ return pEnd-pStart;
+}
+
+
+static size_t HUF_decompress4X2_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const U16* DTable)
+{
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ {
+ const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+
+ const void* ptr = DTable;
+ const HUF_DEltX2* const dt = ((const HUF_DEltX2*)ptr) +1;
+ const U32 dtLog = DTable[0];
+ size_t errorCode;
+
+ /* Init */
+ BIT_DStream_t bitD1;
+ BIT_DStream_t bitD2;
+ BIT_DStream_t bitD3;
+ BIT_DStream_t bitD4;
+ const size_t length1 = MEM_readLE16(istart);
+ const size_t length2 = MEM_readLE16(istart+2);
+ const size_t length3 = MEM_readLE16(istart+4);
+ size_t length4;
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ const size_t segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ U32 endSignal;
+
+ length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ errorCode = BIT_initDStream(&bitD1, istart1, length1);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD2, istart2, length2);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD3, istart3, length3);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD4, istart4, length4);
+ if (HUF_isError(errorCode)) return errorCode;
+
+ /* 16-32 symbols per loop (4-8 symbols per stream) */
+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+ for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
+ {
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+
+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+ }
+
+ /* check corruption */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 supposed already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+ HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+ HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+ HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+ if (!endSignal) return ERROR(corruption_detected);
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+
+static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);
+ const BYTE* ip = (const BYTE*) cSrc;
+ size_t errorCode;
+
+ errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);
+ if (HUF_isError(errorCode)) return errorCode;
+ if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += errorCode;
+ cSrcSize -= errorCode;
+
+ return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+/***************************/
+/* double-symbols decoding */
+/***************************/
+
+static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
+ const U32* rankValOrigin, const int minWeight,
+ const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
+ U32 nbBitsBaseline, U16 baseSeq)
+{
+ HUF_DEltX4 DElt;
+ U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
+ U32 s;
+
+ /* get pre-calculated rankVal */
+ memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+ /* fill skipped values */
+ if (minWeight>1)
+ {
+ U32 i, skipSize = rankVal[minWeight];
+ MEM_writeLE16(&(DElt.sequence), baseSeq);
+ DElt.nbBits = (BYTE)(consumed);
+ DElt.length = 1;
+ for (i = 0; i < skipSize; i++)
+ DTable[i] = DElt;
+ }
+
+ /* fill DTable */
+ for (s=0; s<sortedListSize; s++) /* note : sortedSymbols already skipped */
+ {
+ const U32 symbol = sortedSymbols[s].symbol;
+ const U32 weight = sortedSymbols[s].weight;
+ const U32 nbBits = nbBitsBaseline - weight;
+ const U32 length = 1 << (sizeLog-nbBits);
+ const U32 start = rankVal[weight];
+ U32 i = start;
+ const U32 end = start + length;
+
+ MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
+ DElt.nbBits = (BYTE)(nbBits + consumed);
+ DElt.length = 2;
+ do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
+
+ rankVal[weight] += length;
+ }
+}
+
+typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1];
+
+static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
+ const sortedSymbol_t* sortedList, const U32 sortedListSize,
+ const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
+ const U32 nbBitsBaseline)
+{
+ U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
+ const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
+ const U32 minBits = nbBitsBaseline - maxWeight;
+ U32 s;
+
+ memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+ /* fill DTable */
+ for (s=0; s<sortedListSize; s++)
+ {
+ const U16 symbol = sortedList[s].symbol;
+ const U32 weight = sortedList[s].weight;
+ const U32 nbBits = nbBitsBaseline - weight;
+ const U32 start = rankVal[weight];
+ const U32 length = 1 << (targetLog-nbBits);
+
+ if (targetLog-nbBits >= minBits) /* enough room for a second symbol */
+ {
+ U32 sortedRank;
+ int minWeight = nbBits + scaleLog;
+ if (minWeight < 1) minWeight = 1;
+ sortedRank = rankStart[minWeight];
+ HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
+ rankValOrigin[nbBits], minWeight,
+ sortedList+sortedRank, sortedListSize-sortedRank,
+ nbBitsBaseline, symbol);
+ }
+ else
+ {
+ U32 i;
+ const U32 end = start + length;
+ HUF_DEltX4 DElt;
+
+ MEM_writeLE16(&(DElt.sequence), symbol);
+ DElt.nbBits = (BYTE)(nbBits);
+ DElt.length = 1;
+ for (i = start; i < end; i++)
+ DTable[i] = DElt;
+ }
+ rankVal[weight] += length;
+ }
+}
+
+static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
+{
+ BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];
+ sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];
+ U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
+ U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
+ U32* const rankStart = rankStart0+1;
+ rankVal_t rankVal;
+ U32 tableLog, maxW, sizeOfSort, nbSymbols;
+ const U32 memLog = DTable[0];
+ const BYTE* ip = (const BYTE*) src;
+ size_t iSize = ip[0];
+ void* ptr = DTable;
+ HUF_DEltX4* const dt = ((HUF_DEltX4*)ptr) + 1;
+
+ HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */
+ if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
+ //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
+ if (HUF_isError(iSize)) return iSize;
+
+ /* check result */
+ if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
+
+ /* find maxWeight */
+ for (maxW = tableLog; rankStats[maxW]==0; maxW--)
+ {if (!maxW) return ERROR(GENERIC); } /* necessarily finds a solution before maxW==0 */
+
+ /* Get start index of each weight */
+ {
+ U32 w, nextRankStart = 0;
+ for (w=1; w<=maxW; w++)
+ {
+ U32 current = nextRankStart;
+ nextRankStart += rankStats[w];
+ rankStart[w] = current;
+ }
+ rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
+ sizeOfSort = nextRankStart;
+ }
+
+ /* sort symbols by weight */
+ {
+ U32 s;
+ for (s=0; s<nbSymbols; s++)
+ {
+ U32 w = weightList[s];
+ U32 r = rankStart[w]++;
+ sortedSymbol[r].symbol = (BYTE)s;
+ sortedSymbol[r].weight = (BYTE)w;
+ }
+ rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
+ }
+
+ /* Build rankVal */
+ {
+ const U32 minBits = tableLog+1 - maxW;
+ U32 nextRankVal = 0;
+ U32 w, consumed;
+ const int rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */
+ U32* rankVal0 = rankVal[0];
+ for (w=1; w<=maxW; w++)
+ {
+ U32 current = nextRankVal;
+ nextRankVal += rankStats[w] << (w+rescale);
+ rankVal0[w] = current;
+ }
+ for (consumed = minBits; consumed <= memLog - minBits; consumed++)
+ {
+ U32* rankValPtr = rankVal[consumed];
+ for (w = 1; w <= maxW; w++)
+ {
+ rankValPtr[w] = rankVal0[w] >> consumed;
+ }
+ }
+ }
+
+ HUF_fillDTableX4(dt, memLog,
+ sortedSymbol, sizeOfSort,
+ rankStart0, rankVal, maxW,
+ tableLog+1);
+
+ return iSize;
+}
+
+
+static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
+{
+ const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ memcpy(op, dt+val, 2);
+ BIT_skipBits(DStream, dt[val].nbBits);
+ return dt[val].length;
+}
+
+static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
+{
+ const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ memcpy(op, dt+val, 1);
+ if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
+ else
+ {
+ if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))
+ {
+ BIT_skipBits(DStream, dt[val].nbBits);
+ if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+ DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+ }
+ }
+ return 1;
+}
+
+
+#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
+ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
+ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 8 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7))
+ {
+ HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
+ HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
+ }
+
+ /* closer to the end */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2))
+ HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
+
+ while (p <= pEnd-2)
+ HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
+
+ if (p < pEnd)
+ p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
+
+ return p-pStart;
+}
+
+
+
+static size_t HUF_decompress4X4_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const U32* DTable)
+{
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ {
+ const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+
+ const void* ptr = DTable;
+ const HUF_DEltX4* const dt = ((const HUF_DEltX4*)ptr) +1;
+ const U32 dtLog = DTable[0];
+ size_t errorCode;
+
+ /* Init */
+ BIT_DStream_t bitD1;
+ BIT_DStream_t bitD2;
+ BIT_DStream_t bitD3;
+ BIT_DStream_t bitD4;
+ const size_t length1 = MEM_readLE16(istart);
+ const size_t length2 = MEM_readLE16(istart+2);
+ const size_t length3 = MEM_readLE16(istart+4);
+ size_t length4;
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ const size_t segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ U32 endSignal;
+
+ length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ errorCode = BIT_initDStream(&bitD1, istart1, length1);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD2, istart2, length2);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD3, istart3, length3);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD4, istart4, length4);
+ if (HUF_isError(errorCode)) return errorCode;
+
+ /* 16-32 symbols per loop (4-8 symbols per stream) */
+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+ for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
+ {
+ HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
+ HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
+
+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+ }
+
+ /* check corruption */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 supposed already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
+ HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
+ HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
+ HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+ if (!endSignal) return ERROR(corruption_detected);
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+
+static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG);
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize;
+ cSrcSize -= hSize;
+
+ return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+/**********************************/
+/* quad-symbol decoding */
+/**********************************/
+typedef struct { BYTE nbBits; BYTE nbBytes; } HUF_DDescX6;
+typedef union { BYTE byte[4]; U32 sequence; } HUF_DSeqX6;
+
+/* recursive, up to level 3; may benefit from <template>-like strategy to nest each level inline */
+static void HUF_fillDTableX6LevelN(HUF_DDescX6* DDescription, HUF_DSeqX6* DSequence, int sizeLog,
+ const rankVal_t rankValOrigin, const U32 consumed, const int minWeight, const U32 maxWeight,
+ const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, const U32* rankStart,
+ const U32 nbBitsBaseline, HUF_DSeqX6 baseSeq, HUF_DDescX6 DDesc)
+{
+ const int scaleLog = nbBitsBaseline - sizeLog; /* note : targetLog >= (nbBitsBaseline-1), hence scaleLog <= 1 */
+ const int minBits = nbBitsBaseline - maxWeight;
+ const U32 level = DDesc.nbBytes;
+ U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
+ U32 symbolStartPos, s;
+
+ /* local rankVal, will be modified */
+ memcpy(rankVal, rankValOrigin[consumed], sizeof(rankVal));
+
+ /* fill skipped values */
+ if (minWeight>1)
+ {
+ U32 i;
+ const U32 skipSize = rankVal[minWeight];
+ for (i = 0; i < skipSize; i++)
+ {
+ DSequence[i] = baseSeq;
+ DDescription[i] = DDesc;
+ }
+ }
+
+ /* fill DTable */
+ DDesc.nbBytes++;
+ symbolStartPos = rankStart[minWeight];
+ for (s=symbolStartPos; s<sortedListSize; s++)
+ {
+ const BYTE symbol = sortedSymbols[s].symbol;
+ const U32 weight = sortedSymbols[s].weight; /* >= 1 (sorted) */
+ const int nbBits = nbBitsBaseline - weight; /* >= 1 (by construction) */
+ const int totalBits = consumed+nbBits;
+ const U32 start = rankVal[weight];
+ const U32 length = 1 << (sizeLog-nbBits);
+ baseSeq.byte[level] = symbol;
+ DDesc.nbBits = (BYTE)totalBits;
+
+ if ((level<3) && (sizeLog-totalBits >= minBits)) /* enough room for another symbol */
+ {
+ int nextMinWeight = totalBits + scaleLog;
+ if (nextMinWeight < 1) nextMinWeight = 1;
+ HUF_fillDTableX6LevelN(DDescription+start, DSequence+start, sizeLog-nbBits,
+ rankValOrigin, totalBits, nextMinWeight, maxWeight,
+ sortedSymbols, sortedListSize, rankStart,
+ nbBitsBaseline, baseSeq, DDesc); /* recursive (max : level 3) */
+ }
+ else
+ {
+ U32 i;
+ const U32 end = start + length;
+ for (i = start; i < end; i++)
+ {
+ DDescription[i] = DDesc;
+ DSequence[i] = baseSeq;
+ }
+ }
+ rankVal[weight] += length;
+ }
+}
+
+
+/* note : same preparation as X4 */
+static size_t HUF_readDTableX6 (U32* DTable, const void* src, size_t srcSize)
+{
+ BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];
+ sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];
+ U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
+ U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
+ U32* const rankStart = rankStart0+1;
+ U32 tableLog, maxW, sizeOfSort, nbSymbols;
+ rankVal_t rankVal;
+ const U32 memLog = DTable[0];
+ const BYTE* ip = (const BYTE*) src;
+ size_t iSize = ip[0];
+
+ if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
+ //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
+ if (HUF_isError(iSize)) return iSize;
+
+ /* check result */
+ if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable is too small */
+
+ /* find maxWeight */
+ for (maxW = tableLog; rankStats[maxW]==0; maxW--)
+ { if (!maxW) return ERROR(GENERIC); } /* necessarily finds a solution before maxW==0 */
+
+
+ /* Get start index of each weight */
+ {
+ U32 w, nextRankStart = 0;
+ for (w=1; w<=maxW; w++)
+ {
+ U32 current = nextRankStart;
+ nextRankStart += rankStats[w];
+ rankStart[w] = current;
+ }
+ rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
+ sizeOfSort = nextRankStart;
+ }
+
+ /* sort symbols by weight */
+ {
+ U32 s;
+ for (s=0; s<nbSymbols; s++)
+ {
+ U32 w = weightList[s];
+ U32 r = rankStart[w]++;
+ sortedSymbol[r].symbol = (BYTE)s;
+ sortedSymbol[r].weight = (BYTE)w;
+ }
+ rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
+ }
+
+ /* Build rankVal */
+ {
+ const U32 minBits = tableLog+1 - maxW;
+ U32 nextRankVal = 0;
+ U32 w, consumed;
+ const int rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */
+ U32* rankVal0 = rankVal[0];
+ for (w=1; w<=maxW; w++)
+ {
+ U32 current = nextRankVal;
+ nextRankVal += rankStats[w] << (w+rescale);
+ rankVal0[w] = current;
+ }
+ for (consumed = minBits; consumed <= memLog - minBits; consumed++)
+ {
+ U32* rankValPtr = rankVal[consumed];
+ for (w = 1; w <= maxW; w++)
+ {
+ rankValPtr[w] = rankVal0[w] >> consumed;
+ }
+ }
+ }
+
+
+ /* fill tables */
+ {
+ void* ptr = DTable+1;
+ HUF_DDescX6* DDescription = (HUF_DDescX6*)(ptr);
+ void* dSeqStart = DTable + 1 + ((size_t)1<<(memLog-1));
+ HUF_DSeqX6* DSequence = (HUF_DSeqX6*)(dSeqStart);
+ HUF_DSeqX6 DSeq;
+ HUF_DDescX6 DDesc;
+ DSeq.sequence = 0;
+ DDesc.nbBits = 0;
+ DDesc.nbBytes = 0;
+ HUF_fillDTableX6LevelN(DDescription, DSequence, memLog,
+ (const U32 (*)[HUF_ABSOLUTEMAX_TABLELOG + 1])rankVal, 0, 1, maxW,
+ sortedSymbol, sizeOfSort, rankStart0,
+ tableLog+1, DSeq, DDesc);
+ }
+
+ return iSize;
+}
+
+
+static U32 HUF_decodeSymbolX6(void* op, BIT_DStream_t* DStream, const HUF_DDescX6* dd, const HUF_DSeqX6* ds, const U32 dtLog)
+{
+ const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ memcpy(op, ds+val, sizeof(HUF_DSeqX6));
+ BIT_skipBits(DStream, dd[val].nbBits);
+ return dd[val].nbBytes;
+}
+
+static U32 HUF_decodeLastSymbolsX6(void* op, const U32 maxL, BIT_DStream_t* DStream,
+ const HUF_DDescX6* dd, const HUF_DSeqX6* ds, const U32 dtLog)
+{
+ const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ U32 length = dd[val].nbBytes;
+ if (length <= maxL)
+ {
+ memcpy(op, ds+val, length);
+ BIT_skipBits(DStream, dd[val].nbBits);
+ return length;
+ }
+ memcpy(op, ds+val, maxL);
+ if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))
+ {
+ BIT_skipBits(DStream, dd[val].nbBits);
+ if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+ DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+ }
+ return maxL;
+}
+
+
+#define HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr) \
+ ptr += HUF_decodeSymbolX6(ptr, DStreamPtr, dd, ds, dtLog)
+
+#define HUF_DECODE_SYMBOLX6_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
+ HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr)
+
+#define HUF_DECODE_SYMBOLX6_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr)
+
+static inline size_t HUF_decodeStreamX6(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const U32* DTable, const U32 dtLog)
+{
+ const void* ddPtr = DTable+1;
+ const HUF_DDescX6* dd = (const HUF_DDescX6*)(ddPtr);
+ const void* dsPtr = DTable + 1 + ((size_t)1<<(dtLog-1));
+ const HUF_DSeqX6* ds = (const HUF_DSeqX6*)(dsPtr);
+ BYTE* const pStart = p;
+
+ /* up to 16 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-16))
+ {
+ HUF_DECODE_SYMBOLX6_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX6_1(p, bitDPtr);
+ HUF_DECODE_SYMBOLX6_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX6_0(p, bitDPtr);
+ }
+
+ /* closer to the end, up to 4 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))
+ HUF_DECODE_SYMBOLX6_0(p, bitDPtr);
+
+ while (p <= pEnd-4)
+ HUF_DECODE_SYMBOLX6_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
+
+ while (p < pEnd)
+ p += HUF_decodeLastSymbolsX6(p, (U32)(pEnd-p), bitDPtr, dd, ds, dtLog);
+
+ return p-pStart;
+}
+
+
+
+static size_t HUF_decompress4X6_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const U32* DTable)
+{
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ {
+ const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+
+ const U32 dtLog = DTable[0];
+ const void* ddPtr = DTable+1;
+ const HUF_DDescX6* dd = (const HUF_DDescX6*)(ddPtr);
+ const void* dsPtr = DTable + 1 + ((size_t)1<<(dtLog-1));
+ const HUF_DSeqX6* ds = (const HUF_DSeqX6*)(dsPtr);
+ size_t errorCode;
+
+ /* Init */
+ BIT_DStream_t bitD1;
+ BIT_DStream_t bitD2;
+ BIT_DStream_t bitD3;
+ BIT_DStream_t bitD4;
+ const size_t length1 = MEM_readLE16(istart);
+ const size_t length2 = MEM_readLE16(istart+2);
+ const size_t length3 = MEM_readLE16(istart+4);
+ size_t length4;
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ const size_t segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ U32 endSignal;
+
+ length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ errorCode = BIT_initDStream(&bitD1, istart1, length1);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD2, istart2, length2);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD3, istart3, length3);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD4, istart4, length4);
+ if (HUF_isError(errorCode)) return errorCode;
+
+ /* 16-64 symbols per loop (4-16 symbols per stream) */
+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+ for ( ; (op3 <= opStart4) && (endSignal==BIT_DStream_unfinished) && (op4<=(oend-16)) ; )
+ {
+ HUF_DECODE_SYMBOLX6_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX6_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX6_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX6_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX6_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX6_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX6_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX6_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX6_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX6_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX6_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX6_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX6_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX6_0(op2, &bitD2);
+ HUF_DECODE_SYMBOLX6_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX6_0(op4, &bitD4);
+
+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+ }
+
+ /* check corruption */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 supposed already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUF_decodeStreamX6(op1, &bitD1, opStart2, DTable, dtLog);
+ HUF_decodeStreamX6(op2, &bitD2, opStart3, DTable, dtLog);
+ HUF_decodeStreamX6(op3, &bitD3, opStart4, DTable, dtLog);
+ HUF_decodeStreamX6(op4, &bitD4, oend, DTable, dtLog);
+
+ /* check */
+ endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+ if (!endSignal) return ERROR(corruption_detected);
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+
+static size_t HUF_decompress4X6 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUF_CREATE_STATIC_DTABLEX6(DTable, HUF_MAX_TABLELOG);
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t hSize = HUF_readDTableX6 (DTable, cSrc, cSrcSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize;
+ cSrcSize -= hSize;
+
+ return HUF_decompress4X6_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+/**********************************/
+/* Generic decompression selector */
+/**********************************/
+
+typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
+static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
+{
+ /* single, double, quad */
+ {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
+ {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
+ {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
+ {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
+ {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
+ {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
+ {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
+ {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
+ {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
+ {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
+ {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
+ {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
+ {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
+ {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
+ {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
+ {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
+};
+
+typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+
+static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ static const decompressionAlgo decompress[3] = { HUF_decompress4X2, HUF_decompress4X4, HUF_decompress4X6 };
+ /* estimate decompression time */
+ U32 Q;
+ const U32 D256 = (U32)(dstSize >> 8);
+ U32 Dtime[3];
+ U32 algoNb = 0;
+ int n;
+
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
+ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
+ /* decoder timing evaluation */
+ Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
+ for (n=0; n<3; n++)
+ Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
+
+ Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
+
+ if (Dtime[1] < Dtime[0]) algoNb = 1;
+ if (Dtime[2] < Dtime[algoNb]) algoNb = 2;
+
+ return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
+
+ //return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize); /* multi-streams single-symbol decoding */
+ //return HUF_decompress4X4(dst, dstSize, cSrc, cSrcSize); /* multi-streams double-symbols decoding */
+ //return HUF_decompress4X6(dst, dstSize, cSrc, cSrcSize); /* multi-streams quad-symbols decoding */
+}
+/*
+ zstd - standard compression library
+ Copyright (C) 2014-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd source repository : https://github.com/Cyan4973/zstd
+ - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+/* ***************************************************************
+* Tuning parameters
+*****************************************************************/
+/*!
+* MEMORY_USAGE :
+* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+* Increasing memory usage improves compression ratio
+* Reduced memory usage can improve speed, due to cache effect
+*/
+#define ZSTD_MEMORY_USAGE 17
+
+/*!
+ * HEAPMODE :
+ * Select how default compression functions will allocate memory for their hash table,
+ * in memory stack (0, fastest), or in memory heap (1, requires malloc())
+ * Note that compression context is fairly large, as a consequence heap memory is recommended.
+ */
+#ifndef ZSTD_HEAPMODE
+# define ZSTD_HEAPMODE 1
+#endif /* ZSTD_HEAPMODE */
+
+/*!
+* LEGACY_SUPPORT :
+* decompressor can decode older formats (starting from Zstd 0.1+)
+*/
+#ifndef ZSTD_LEGACY_SUPPORT
+# define ZSTD_LEGACY_SUPPORT 1
+#endif
+
+
+/* *******************************************************
+* Includes
+*********************************************************/
+#include <stdlib.h> /* calloc */
+#include <string.h> /* memcpy, memmove */
+#include <stdio.h> /* debug : printf */
+
+
+/* *******************************************************
+* Compiler specifics
+*********************************************************/
+#ifdef __AVX2__
+# include <immintrin.h> /* AVX2 intrinsics */
+#endif
+
+#ifdef _MSC_VER /* Visual Studio */
+# include <intrin.h> /* For Visual 2005 */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4324) /* disable: C4324: padded structure */
+#endif
+
+
+/* *******************************************************
+* Constants
+*********************************************************/
+#define HASH_LOG (ZSTD_MEMORY_USAGE - 2)
+#define HASH_TABLESIZE (1 << HASH_LOG)
+#define HASH_MASK (HASH_TABLESIZE - 1)
+
+#define KNUTH 2654435761
+
+#define BIT7 128
+#define BIT6 64
+#define BIT5 32
+#define BIT4 16
+#define BIT1 2
+#define BIT0 1
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BLOCKSIZE (128 KB) /* define, for static allocation */
+#define MIN_SEQUENCES_SIZE (2 /*seqNb*/ + 2 /*dumps*/ + 3 /*seqTables*/ + 1 /*bitStream*/)
+#define MIN_CBLOCK_SIZE (3 /*litCSize*/ + MIN_SEQUENCES_SIZE)
+#define IS_RAW BIT0
+#define IS_RLE BIT1
+
+#define WORKPLACESIZE (BLOCKSIZE*3)
+#define MINMATCH 4
+#define MLbits 7
+#define LLbits 6
+#define Offbits 5
+#define MaxML ((1<<MLbits )-1)
+#define MaxLL ((1<<LLbits )-1)
+#define MaxOff 31
+#define LitFSELog 11
+#define MLFSELog 10
+#define LLFSELog 10
+#define OffFSELog 9
+#define MAX(a,b) ((a)<(b)?(b):(a))
+#define MaxSeq MAX(MaxLL, MaxML)
+
+#define LITERAL_NOENTROPY 63
+#define COMMAND_NOENTROPY 7 /* to remove */
+
+#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
+
+static const size_t ZSTD_blockHeaderSize = 3;
+static const size_t ZSTD_frameHeaderSize = 4;
+
+
+/* *******************************************************
+* Memory operations
+**********************************************************/
+static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+
+static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
+
+#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
+
+/*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */
+static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
+{
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + length;
+ do COPY8(op, ip) while (op < oend);
+}
+
+
+/* **************************************
+* Local structures
+****************************************/
+typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
+
+typedef struct
+{
+ blockType_t blockType;
+ U32 origSize;
+} blockProperties_t;
+
+typedef struct {
+ void* buffer;
+ U32* offsetStart;
+ U32* offset;
+ BYTE* offCodeStart;
+ BYTE* offCode;
+ BYTE* litStart;
+ BYTE* lit;
+ BYTE* litLengthStart;
+ BYTE* litLength;
+ BYTE* matchLengthStart;
+ BYTE* matchLength;
+ BYTE* dumpsStart;
+ BYTE* dumps;
+} seqStore_t;
+
+
+/* *************************************
+* Error Management
+***************************************/
+/*! ZSTD_isError
+* tells if a return value is an error code */
+static unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
+
+
+
+/* *************************************************************
+* Decompression section
+***************************************************************/
+struct ZSTD_DCtx_s
+{
+ U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
+ U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
+ U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
+ void* previousDstEnd;
+ void* base;
+ size_t expected;
+ blockType_t bType;
+ U32 phase;
+ const BYTE* litPtr;
+ size_t litSize;
+ BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];
+}; /* typedef'd to ZSTD_Dctx within "zstd_static.h" */
+
+
+static size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+{
+ const BYTE* const in = (const BYTE* const)src;
+ BYTE headerFlags;
+ U32 cSize;
+
+ if (srcSize < 3) return ERROR(srcSize_wrong);
+
+ headerFlags = *in;
+ cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
+
+ bpPtr->blockType = (blockType_t)(headerFlags >> 6);
+ bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
+
+ if (bpPtr->blockType == bt_end) return 0;
+ if (bpPtr->blockType == bt_rle) return 1;
+ return cSize;
+}
+
+static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
+ memcpy(dst, src, srcSize);
+ return srcSize;
+}
+
+
+/** ZSTD_decompressLiterals
+ @return : nb of bytes read from src, or an error code*/
+static size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr,
+ const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+
+ const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+ const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+
+ if (litSize > *maxDstSizePtr) return ERROR(corruption_detected);
+ if (litCSize + 5 > srcSize) return ERROR(corruption_detected);
+
+ if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected);
+
+ *maxDstSizePtr = litSize;
+ return litCSize + 5;
+}
+
+
+/** ZSTD_decodeLiteralsBlock
+ @return : nb of bytes read from src (< srcSize )*/
+static size_t ZSTD_decodeLiteralsBlock(void* ctx,
+ const void* src, size_t srcSize)
+{
+ ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;
+ const BYTE* const istart = (const BYTE* const)src;
+
+ /* any compressed block with literals segment must be at least this size */
+ if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
+
+ switch(*istart & 3)
+ {
+ default:
+ case 0:
+ {
+ size_t litSize = BLOCKSIZE;
+ const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ memset(dctx->litBuffer + dctx->litSize, 0, 8);
+ return readSize; /* works if it's an error too */
+ }
+ case IS_RAW:
+ {
+ const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+ if (litSize > srcSize-11) /* risk of reading too far with wildcopy */
+ {
+ if (litSize > srcSize-3) return ERROR(corruption_detected);
+ memcpy(dctx->litBuffer, istart, litSize);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ memset(dctx->litBuffer + dctx->litSize, 0, 8);
+ return litSize+3;
+ }
+ /* direct reference into compressed stream */
+ dctx->litPtr = istart+3;
+ dctx->litSize = litSize;
+ return litSize+3;
+ }
+ case IS_RLE:
+ {
+ const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+ if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
+ memset(dctx->litBuffer, istart[3], litSize + 8);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ return 4;
+ }
+ }
+}
+
+
+static size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
+ FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
+ const void* src, size_t srcSize)
+{
+ const BYTE* const istart = (const BYTE* const)src;
+ const BYTE* ip = istart;
+ const BYTE* const iend = istart + srcSize;
+ U32 LLtype, Offtype, MLtype;
+ U32 LLlog, Offlog, MLlog;
+ size_t dumpsLength;
+
+ /* check */
+ if (srcSize < 5) return ERROR(srcSize_wrong);
+
+ /* SeqHead */
+ *nbSeq = MEM_readLE16(ip); ip+=2;
+ LLtype = *ip >> 6;
+ Offtype = (*ip >> 4) & 3;
+ MLtype = (*ip >> 2) & 3;
+ if (*ip & 2)
+ {
+ dumpsLength = ip[2];
+ dumpsLength += ip[1] << 8;
+ ip += 3;
+ }
+ else
+ {
+ dumpsLength = ip[1];
+ dumpsLength += (ip[0] & 1) << 8;
+ ip += 2;
+ }
+ *dumpsPtr = ip;
+ ip += dumpsLength;
+ *dumpsLengthPtr = dumpsLength;
+
+ /* check */
+ if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
+
+ /* sequences */
+ {
+ S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL and MaxOff */
+ size_t headerSize;
+
+ /* Build DTables */
+ switch(LLtype)
+ {
+ case bt_rle :
+ LLlog = 0;
+ FSE_buildDTable_rle(DTableLL, *ip++); break;
+ case bt_raw :
+ LLlog = LLbits;
+ FSE_buildDTable_raw(DTableLL, LLbits); break;
+ default :
+ { U32 max = MaxLL;
+ headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);
+ if (FSE_isError(headerSize)) return ERROR(GENERIC);
+ if (LLlog > LLFSELog) return ERROR(corruption_detected);
+ ip += headerSize;
+ FSE_buildDTable(DTableLL, norm, max, LLlog);
+ } }
+
+ switch(Offtype)
+ {
+ case bt_rle :
+ Offlog = 0;
+ if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
+ FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */
+ break;
+ case bt_raw :
+ Offlog = Offbits;
+ FSE_buildDTable_raw(DTableOffb, Offbits); break;
+ default :
+ { U32 max = MaxOff;
+ headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);
+ if (FSE_isError(headerSize)) return ERROR(GENERIC);
+ if (Offlog > OffFSELog) return ERROR(corruption_detected);
+ ip += headerSize;
+ FSE_buildDTable(DTableOffb, norm, max, Offlog);
+ } }
+
+ switch(MLtype)
+ {
+ case bt_rle :
+ MLlog = 0;
+ if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
+ FSE_buildDTable_rle(DTableML, *ip++); break;
+ case bt_raw :
+ MLlog = MLbits;
+ FSE_buildDTable_raw(DTableML, MLbits); break;
+ default :
+ { U32 max = MaxML;
+ headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);
+ if (FSE_isError(headerSize)) return ERROR(GENERIC);
+ if (MLlog > MLFSELog) return ERROR(corruption_detected);
+ ip += headerSize;
+ FSE_buildDTable(DTableML, norm, max, MLlog);
+ } } }
+
+ return ip-istart;
+}
+
+
+typedef struct {
+ size_t litLength;
+ size_t offset;
+ size_t matchLength;
+} seq_t;
+
+typedef struct {
+ BIT_DStream_t DStream;
+ FSE_DState_t stateLL;
+ FSE_DState_t stateOffb;
+ FSE_DState_t stateML;
+ size_t prevOffset;
+ const BYTE* dumps;
+ const BYTE* dumpsEnd;
+} seqState_t;
+
+
+static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
+{
+ size_t litLength;
+ size_t prevOffset;
+ size_t offset;
+ size_t matchLength;
+ const BYTE* dumps = seqState->dumps;
+ const BYTE* const de = seqState->dumpsEnd;
+
+ /* Literal length */
+ litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
+ prevOffset = litLength ? seq->offset : seqState->prevOffset;
+ seqState->prevOffset = seq->offset;
+ if (litLength == MaxLL)
+ {
+ U32 add = *dumps++;
+ if (add < 255) litLength += add;
+ else
+ {
+ litLength = MEM_readLE32(dumps) & 0xFFFFFF; /* no pb : dumps is always followed by seq tables > 1 byte */
+ dumps += 3;
+ }
+ if (dumps >= de) dumps = de-1; /* late correction, to avoid read overflow (data is now corrupted anyway) */
+ }
+
+ /* Offset */
+ {
+ static const size_t offsetPrefix[MaxOff+1] = { /* note : size_t faster than U32 */
+ 1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,
+ 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,
+ 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };
+ U32 offsetCode, nbBits;
+ offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream)); /* <= maxOff, by table construction */
+ if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
+ nbBits = offsetCode - 1;
+ if (offsetCode==0) nbBits = 0; /* cmove */
+ offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits);
+ if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
+ if (offsetCode==0) offset = prevOffset; /* cmove */
+ }
+
+ /* MatchLength */
+ matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
+ if (matchLength == MaxML)
+ {
+ U32 add = *dumps++;
+ if (add < 255) matchLength += add;
+ else
+ {
+ matchLength = MEM_readLE32(dumps) & 0xFFFFFF; /* no pb : dumps is always followed by seq tables > 1 byte */
+ dumps += 3;
+ }
+ if (dumps >= de) dumps = de-1; /* late correction, to avoid read overflow (data is now corrupted anyway) */
+ }
+ matchLength += MINMATCH;
+
+ /* save result */
+ seq->litLength = litLength;
+ seq->offset = offset;
+ seq->matchLength = matchLength;
+ seqState->dumps = dumps;
+}
+
+
+static size_t ZSTD_execSequence(BYTE* op,
+ seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ BYTE* const base, BYTE* const oend)
+{
+ static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */
+ static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* subtracted */
+ const BYTE* const ostart = op;
+ BYTE* const oLitEnd = op + sequence.litLength;
+ BYTE* const oMatchEnd = op + sequence.litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */
+ BYTE* const oend_8 = oend-8;
+ const BYTE* const litEnd = *litPtr + sequence.litLength;
+
+ /* checks */
+ if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */
+ if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
+ if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */
+
+ /* copy Literals */
+ ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
+ op = oLitEnd;
+ *litPtr = litEnd; /* update for next sequence */
+
+ /* copy Match */
+ {
+ const BYTE* match = op - sequence.offset;
+
+ /* check */
+ if (sequence.offset > (size_t)op) return ERROR(corruption_detected); /* address space overflow test (this test seems kept by clang optimizer) */
+ //if (match > op) return ERROR(corruption_detected); /* address space overflow test (is clang optimizer removing this test ?) */
+ if (match < base) return ERROR(corruption_detected);
+
+ /* close range match, overlap */
+ if (sequence.offset < 8)
+ {
+ const int dec64 = dec64table[sequence.offset];
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += dec32table[sequence.offset];
+ ZSTD_copy4(op+4, match);
+ match -= dec64;
+ }
+ else
+ {
+ ZSTD_copy8(op, match);
+ }
+ op += 8; match += 8;
+
+ if (oMatchEnd > oend-(16-MINMATCH))
+ {
+ if (op < oend_8)
+ {
+ ZSTD_wildcopy(op, match, oend_8 - op);
+ match += oend_8 - op;
+ op = oend_8;
+ }
+ while (op < oMatchEnd) *op++ = *match++;
+ }
+ else
+ {
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
+ }
+ }
+
+ return oMatchEnd - ostart;
+}
+
+static size_t ZSTD_decompressSequences(
+ void* ctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize)
+{
+ ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* op = ostart;
+ BYTE* const oend = ostart + maxDstSize;
+ size_t errorCode, dumpsLength;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* const litEnd = litPtr + dctx->litSize;
+ int nbSeq;
+ const BYTE* dumps;
+ U32* DTableLL = dctx->LLTable;
+ U32* DTableML = dctx->MLTable;
+ U32* DTableOffb = dctx->OffTable;
+ BYTE* const base = (BYTE*) (dctx->base);
+
+ /* Build Decoding Tables */
+ errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
+ DTableLL, DTableML, DTableOffb,
+ ip, iend-ip);
+ if (ZSTD_isError(errorCode)) return errorCode;
+ ip += errorCode;
+
+ /* Regen sequences */
+ {
+ seq_t sequence;
+ seqState_t seqState;
+
+ memset(&sequence, 0, sizeof(sequence));
+ seqState.dumps = dumps;
+ seqState.dumpsEnd = dumps + dumpsLength;
+ seqState.prevOffset = 1;
+ errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip);
+ if (ERR_isError(errorCode)) return ERROR(corruption_detected);
+ FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
+ FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
+ FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
+
+ for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (nbSeq>0) ; )
+ {
+ size_t oneSeqSize;
+ nbSeq--;
+ ZSTD_decodeSequence(&sequence, &seqState);
+ oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend);
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+ }
+
+ /* check if reached exact end */
+ if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* requested too much : data is corrupted */
+ if (nbSeq<0) return ERROR(corruption_detected); /* requested too many sequences : data is corrupted */
+
+ /* last literal segment */
+ {
+ size_t lastLLSize = litEnd - litPtr;
+ if (litPtr > litEnd) return ERROR(corruption_detected);
+ if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
+ if (op != litPtr) memmove(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+ }
+
+ return op-ostart;
+}
+
+
+static size_t ZSTD_decompressBlock(
+ void* ctx,
+ void* dst, size_t maxDstSize,
+ const void* src, size_t srcSize)
+{
+ /* blockType == blockCompressed */
+ const BYTE* ip = (const BYTE*)src;
+
+ /* Decode literals sub-block */
+ size_t litCSize = ZSTD_decodeLiteralsBlock(ctx, src, srcSize);
+ if (ZSTD_isError(litCSize)) return litCSize;
+ ip += litCSize;
+ srcSize -= litCSize;
+
+ return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize);
+}
+
+
+static size_t ZSTD_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* iend = ip + srcSize;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* op = ostart;
+ BYTE* const oend = ostart + maxDstSize;
+ size_t remainingSize = srcSize;
+ U32 magicNumber;
+ blockProperties_t blockProperties;
+
+ /* Frame Header */
+ if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+ magicNumber = MEM_readLE32(src);
+ if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
+ ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
+
+ /* Loop on each block */
+ while (1)
+ {
+ size_t decodedSize=0;
+ size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties);
+ if (ZSTD_isError(cBlockSize)) return cBlockSize;
+
+ ip += ZSTD_blockHeaderSize;
+ remainingSize -= ZSTD_blockHeaderSize;
+ if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+ switch(blockProperties.blockType)
+ {
+ case bt_compressed:
+ decodedSize = ZSTD_decompressBlock(ctx, op, oend-op, ip, cBlockSize);
+ break;
+ case bt_raw :
+ decodedSize = ZSTD_copyUncompressedBlock(op, oend-op, ip, cBlockSize);
+ break;
+ case bt_rle :
+ return ERROR(GENERIC); /* not yet supported */
+ break;
+ case bt_end :
+ /* end of frame */
+ if (remainingSize) return ERROR(srcSize_wrong);
+ break;
+ default:
+ return ERROR(GENERIC); /* impossible */
+ }
+ if (cBlockSize == 0) break; /* bt_end */
+
+ if (ZSTD_isError(decodedSize)) return decodedSize;
+ op += decodedSize;
+ ip += cBlockSize;
+ remainingSize -= cBlockSize;
+ }
+
+ return op-ostart;
+}
+
+static size_t ZSTD_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ ZSTD_DCtx ctx;
+ ctx.base = dst;
+ return ZSTD_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
+}
+
+/* ZSTD_errorFrameSizeInfoLegacy() :
+ assumes `cSize` and `dBound` are _not_ NULL */
+static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
+{
+ *cSize = ret;
+ *dBound = ZSTD_CONTENTSIZE_ERROR;
+}
+
+void ZSTDv02_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
+{
+ const BYTE* ip = (const BYTE*)src;
+ size_t remainingSize = srcSize;
+ size_t nbBlocks = 0;
+ U32 magicNumber;
+ blockProperties_t blockProperties;
+
+ /* Frame Header */
+ if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+ return;
+ }
+ magicNumber = MEM_readLE32(src);
+ if (magicNumber != ZSTD_magicNumber) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
+ return;
+ }
+ ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
+
+ /* Loop on each block */
+ while (1)
+ {
+ size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+ if (ZSTD_isError(cBlockSize)) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
+ return;
+ }
+
+ ip += ZSTD_blockHeaderSize;
+ remainingSize -= ZSTD_blockHeaderSize;
+ if (cBlockSize > remainingSize) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+ return;
+ }
+
+ if (cBlockSize == 0) break; /* bt_end */
+
+ ip += cBlockSize;
+ remainingSize -= cBlockSize;
+ nbBlocks++;
+ }
+
+ *cSize = ip - (const BYTE*)src;
+ *dBound = nbBlocks * BLOCKSIZE;
+}
+
+/*******************************
+* Streaming Decompression API
+*******************************/
+
+static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx)
+{
+ dctx->expected = ZSTD_frameHeaderSize;
+ dctx->phase = 0;
+ dctx->previousDstEnd = NULL;
+ dctx->base = NULL;
+ return 0;
+}
+
+static ZSTD_DCtx* ZSTD_createDCtx(void)
+{
+ ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx));
+ if (dctx==NULL) return NULL;
+ ZSTD_resetDCtx(dctx);
+ return dctx;
+}
+
+static size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
+{
+ free(dctx);
+ return 0;
+}
+
+static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx)
+{
+ return dctx->expected;
+}
+
+static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ /* Sanity check */
+ if (srcSize != ctx->expected) return ERROR(srcSize_wrong);
+ if (dst != ctx->previousDstEnd) /* not contiguous */
+ ctx->base = dst;
+
+ /* Decompress : frame header */
+ if (ctx->phase == 0)
+ {
+ /* Check frame magic header */
+ U32 magicNumber = MEM_readLE32(src);
+ if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
+ ctx->phase = 1;
+ ctx->expected = ZSTD_blockHeaderSize;
+ return 0;
+ }
+
+ /* Decompress : block header */
+ if (ctx->phase == 1)
+ {
+ blockProperties_t bp;
+ size_t blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
+ if (ZSTD_isError(blockSize)) return blockSize;
+ if (bp.blockType == bt_end)
+ {
+ ctx->expected = 0;
+ ctx->phase = 0;
+ }
+ else
+ {
+ ctx->expected = blockSize;
+ ctx->bType = bp.blockType;
+ ctx->phase = 2;
+ }
+
+ return 0;
+ }
+
+ /* Decompress : block content */
+ {
+ size_t rSize;
+ switch(ctx->bType)
+ {
+ case bt_compressed:
+ rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize);
+ break;
+ case bt_raw :
+ rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize);
+ break;
+ case bt_rle :
+ return ERROR(GENERIC); /* not yet handled */
+ break;
+ case bt_end : /* should never happen (filtered at phase 1) */
+ rSize = 0;
+ break;
+ default:
+ return ERROR(GENERIC);
+ }
+ ctx->phase = 1;
+ ctx->expected = ZSTD_blockHeaderSize;
+ ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);
+ return rSize;
+ }
+
+}
+
+
+/* wrapper layer */
+
+unsigned ZSTDv02_isError(size_t code)
+{
+ return ZSTD_isError(code);
+}
+
+size_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize,
+ const void* src, size_t compressedSize)
+{
+ return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
+}
+
+ZSTDv02_Dctx* ZSTDv02_createDCtx(void)
+{
+ return (ZSTDv02_Dctx*)ZSTD_createDCtx();
+}
+
+size_t ZSTDv02_freeDCtx(ZSTDv02_Dctx* dctx)
+{
+ return ZSTD_freeDCtx((ZSTD_DCtx*)dctx);
+}
+
+size_t ZSTDv02_resetDCtx(ZSTDv02_Dctx* dctx)
+{
+ return ZSTD_resetDCtx((ZSTD_DCtx*)dctx);
+}
+
+size_t ZSTDv02_nextSrcSizeToDecompress(ZSTDv02_Dctx* dctx)
+{
+ return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx);
+}
+
+size_t ZSTDv02_decompressContinue(ZSTDv02_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize);
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_v02.h b/vendor/github.com/DataDog/zstd/zstd_v02.h
new file mode 100644
index 000000000..9d7d8d9b5
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v02.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_V02_H_4174539423
+#define ZSTD_V02_H_4174539423
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* *************************************
+* Includes
+***************************************/
+#include <stddef.h> /* size_t */
+
+
+/* *************************************
+* Simple one-step function
+***************************************/
+/**
+ZSTDv02_decompress() : decompress ZSTD frames compliant with v0.2.x format
+ compressedSize : is the exact source size
+ maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
+ It must be equal or larger than originalSize, otherwise decompression will fail.
+ return : the number of bytes decompressed into destination buffer (originalSize)
+ or an errorCode if it fails (which can be tested using ZSTDv01_isError())
+*/
+size_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize,
+ const void* src, size_t compressedSize);
+
+ /**
+ ZSTDv02_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.2.x format
+ srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+ cSize (output parameter) : the number of bytes that would be read to decompress this frame
+ or an error code if it fails (which can be tested using ZSTDv01_isError())
+ dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
+ or ZSTD_CONTENTSIZE_ERROR if an error occurs
+
+ note : assumes `cSize` and `dBound` are _not_ NULL.
+ */
+void ZSTDv02_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
+ size_t* cSize, unsigned long long* dBound);
+
+/**
+ZSTDv02_isError() : tells if the result of ZSTDv02_decompress() is an error
+*/
+unsigned ZSTDv02_isError(size_t code);
+
+
+/* *************************************
+* Advanced functions
+***************************************/
+typedef struct ZSTDv02_Dctx_s ZSTDv02_Dctx;
+ZSTDv02_Dctx* ZSTDv02_createDCtx(void);
+size_t ZSTDv02_freeDCtx(ZSTDv02_Dctx* dctx);
+
+size_t ZSTDv02_decompressDCtx(void* ctx,
+ void* dst, size_t maxOriginalSize,
+ const void* src, size_t compressedSize);
+
+/* *************************************
+* Streaming functions
+***************************************/
+size_t ZSTDv02_resetDCtx(ZSTDv02_Dctx* dctx);
+
+size_t ZSTDv02_nextSrcSizeToDecompress(ZSTDv02_Dctx* dctx);
+size_t ZSTDv02_decompressContinue(ZSTDv02_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
+/**
+ Use above functions alternatively.
+ ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+ ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
+ Result is the number of bytes regenerated within 'dst'.
+ It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
+*/
+
+/* *************************************
+* Prefix - version detection
+***************************************/
+#define ZSTDv02_magicNumber 0xFD2FB522 /* v0.2 */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_V02_H_4174539423 */
diff --git a/vendor/github.com/DataDog/zstd/zstd_v03.c b/vendor/github.com/DataDog/zstd/zstd_v03.c
new file mode 100644
index 000000000..a1bf0fa9b
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v03.c
@@ -0,0 +1,3150 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+#include <stddef.h> /* size_t, ptrdiff_t */
+#include "zstd_v03.h"
+#include "error_private.h"
+
+
+/******************************************
+* Compiler-specific
+******************************************/
+#if defined(_MSC_VER) /* Visual Studio */
+# include <stdlib.h> /* _byteswap_ulong */
+# include <intrin.h> /* _byteswap_* */
+#endif
+
+
+
+/* ******************************************************************
+ mem.h
+ low-level memory access routines
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef MEM_H_MODULE
+#define MEM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/******************************************
+* Includes
+******************************************/
+#include <stddef.h> /* size_t, ptrdiff_t */
+#include <string.h> /* memcpy */
+
+
+/******************************************
+* Compiler-specific
+******************************************/
+#if defined(__GNUC__)
+# define MEM_STATIC static __attribute__((unused))
+#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# define MEM_STATIC static inline
+#elif defined(_MSC_VER)
+# define MEM_STATIC static __inline
+#else
+# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+
+/****************************************************************
+* Basic Types
+*****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef int16_t S16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+ typedef int64_t S64;
+#else
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef signed short S16;
+ typedef unsigned int U32;
+ typedef signed int S32;
+ typedef unsigned long long U64;
+ typedef signed long long S64;
+#endif
+
+
+/****************************************************************
+* Memory I/O
+*****************************************************************/
+/* MEM_FORCE_MEMORY_ACCESS
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ * It can generate buggy code on targets generating assembly depending on alignment.
+ * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
+# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+# define MEM_FORCE_MEMORY_ACCESS 2
+# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
+ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+# define MEM_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }
+MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }
+
+MEM_STATIC unsigned MEM_isLittleEndian(void)
+{
+ const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+
+#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
+
+/* violates C standard on structure alignment.
+Only use if no other choice to achieve best performance on target platform */
+MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
+MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
+MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+
+#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;
+
+MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
+MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
+
+#else
+
+/* default method, safe and standard.
+ can sometimes prove slower */
+
+MEM_STATIC U16 MEM_read16(const void* memPtr)
+{
+ U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U32 MEM_read32(const void* memPtr)
+{
+ U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U64 MEM_read64(const void* memPtr)
+{
+ U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value)
+{
+ memcpy(memPtr, &value, sizeof(value));
+}
+
+
+#endif // MEM_FORCE_MEMORY_ACCESS
+
+
+MEM_STATIC U16 MEM_readLE16(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read16(memPtr);
+ else
+ {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U16)(p[0] + (p[1]<<8));
+ }
+}
+
+MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
+{
+ if (MEM_isLittleEndian())
+ {
+ MEM_write16(memPtr, val);
+ }
+ else
+ {
+ BYTE* p = (BYTE*)memPtr;
+ p[0] = (BYTE)val;
+ p[1] = (BYTE)(val>>8);
+ }
+}
+
+MEM_STATIC U32 MEM_readLE32(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read32(memPtr);
+ else
+ {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
+ }
+}
+
+MEM_STATIC U64 MEM_readLE64(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read64(memPtr);
+ else
+ {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
+ + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
+ }
+}
+
+
+MEM_STATIC size_t MEM_readLEST(const void* memPtr)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_readLE32(memPtr);
+ else
+ return (size_t)MEM_readLE64(memPtr);
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* MEM_H_MODULE */
+
+
+/* ******************************************************************
+ bitstream
+ Part of NewGen Entropy library
+ header file (to include)
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef BITSTREAM_H_MODULE
+#define BITSTREAM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*
+* This API consists of small unitary functions, which highly benefit from being inlined.
+* Since link-time-optimization is not available for all compilers,
+* these functions are defined into a .h to be included.
+*/
+
+
+/**********************************************
+* bitStream decompression API (read backward)
+**********************************************/
+typedef struct
+{
+ size_t bitContainer;
+ unsigned bitsConsumed;
+ const char* ptr;
+ const char* start;
+} BIT_DStream_t;
+
+typedef enum { BIT_DStream_unfinished = 0,
+ BIT_DStream_endOfBuffer = 1,
+ BIT_DStream_completed = 2,
+ BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
+ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
+MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
+
+
+
+/******************************************
+* unsafe API
+******************************************/
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
+/* faster, but works only if nbBits >= 1 */
+
+
+
+/****************************************************************
+* Helper functions
+****************************************************************/
+MEM_STATIC unsigned BIT_highbit32 (U32 val)
+{
+# if defined(_MSC_VER) /* Visual */
+ unsigned long r=0;
+ _BitScanReverse ( &r, val );
+ return (unsigned) r;
+# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
+ return 31 - __builtin_clz (val);
+# else /* Software version */
+ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ unsigned r;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+ return r;
+# endif
+}
+
+
+
+/**********************************************************
+* bitStream decoding
+**********************************************************/
+
+/*!BIT_initDStream
+* Initialize a BIT_DStream_t.
+* @bitD : a pointer to an already allocated BIT_DStream_t structure
+* @srcBuffer must point at the beginning of a bitStream
+* @srcSize must be the exact size of the bitStream
+* @result : size of stream (== srcSize) or an errorCode if a problem is detected
+*/
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+ if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
+
+ if (srcSize >= sizeof(size_t)) /* normal case */
+ {
+ U32 contain32;
+ bitD->start = (const char*)srcBuffer;
+ bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t);
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
+ if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
+ bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
+ }
+ else
+ {
+ U32 contain32;
+ bitD->start = (const char*)srcBuffer;
+ bitD->ptr = bitD->start;
+ bitD->bitContainer = *(const BYTE*)(bitD->start);
+ switch(srcSize)
+ {
+ case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);
+ /* fallthrough */
+ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);
+ /* fallthrough */
+ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);
+ /* fallthrough */
+ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;
+ /* fallthrough */
+ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;
+ /* fallthrough */
+ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8;
+ /* fallthrough */
+ default:;
+ }
+ contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
+ if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
+ bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
+ bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
+ }
+
+ return srcSize;
+}
+MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+ const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
+ return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
+}
+
+/*! BIT_lookBitsFast :
+* unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits)
+{
+ const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
+ return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
+}
+
+MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+ bitD->bitsConsumed += nbBits;
+}
+
+MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+ size_t value = BIT_lookBits(bitD, nbBits);
+ BIT_skipBits(bitD, nbBits);
+ return value;
+}
+
+/*!BIT_readBitsFast :
+* unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
+{
+ size_t value = BIT_lookBitsFast(bitD, nbBits);
+ BIT_skipBits(bitD, nbBits);
+ return value;
+}
+
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
+{
+ if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
+ return BIT_DStream_overflow;
+
+ if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
+ {
+ bitD->ptr -= bitD->bitsConsumed >> 3;
+ bitD->bitsConsumed &= 7;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ return BIT_DStream_unfinished;
+ }
+ if (bitD->ptr == bitD->start)
+ {
+ if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
+ return BIT_DStream_completed;
+ }
+ {
+ U32 nbBytes = bitD->bitsConsumed >> 3;
+ BIT_DStream_status result = BIT_DStream_unfinished;
+ if (bitD->ptr - nbBytes < bitD->start)
+ {
+ nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
+ result = BIT_DStream_endOfBuffer;
+ }
+ bitD->ptr -= nbBytes;
+ bitD->bitsConsumed -= nbBytes*8;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
+ return result;
+ }
+}
+
+/*! BIT_endOfDStream
+* @return Tells if DStream has reached its exact end
+*/
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
+{
+ return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* BITSTREAM_H_MODULE */
+/* ******************************************************************
+ Error codes and messages
+ Copyright (C) 2013-2015, Yann Collet
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef ERROR_H_MODULE
+#define ERROR_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/******************************************
+* Compiler-specific
+******************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# define ERR_STATIC static inline
+#elif defined(_MSC_VER)
+# define ERR_STATIC static __inline
+#elif defined(__GNUC__)
+# define ERR_STATIC static __attribute__((unused))
+#else
+# define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+
+/******************************************
+* Error Management
+******************************************/
+#define PREFIX(name) ZSTD_error_##name
+
+#define ERROR(name) (size_t)-PREFIX(name)
+
+#define ERROR_LIST(ITEM) \
+ ITEM(PREFIX(No_Error)) ITEM(PREFIX(GENERIC)) \
+ ITEM(PREFIX(dstSize_tooSmall)) ITEM(PREFIX(srcSize_wrong)) \
+ ITEM(PREFIX(prefix_unknown)) ITEM(PREFIX(corruption_detected)) \
+ ITEM(PREFIX(tableLog_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooSmall)) \
+ ITEM(PREFIX(maxCode))
+
+#define ERROR_GENERATE_ENUM(ENUM) ENUM,
+typedef enum { ERROR_LIST(ERROR_GENERATE_ENUM) } ERR_codes; /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */
+
+#define ERROR_CONVERTTOSTRING(STRING) #STRING,
+#define ERROR_GENERATE_STRING(EXPR) ERROR_CONVERTTOSTRING(EXPR)
+static const char* ERR_strings[] = { ERROR_LIST(ERROR_GENERATE_STRING) };
+
+ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
+
+ERR_STATIC const char* ERR_getErrorName(size_t code)
+{
+ static const char* codeError = "Unspecified error code";
+ if (ERR_isError(code)) return ERR_strings[-(int)(code)];
+ return codeError;
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ERROR_H_MODULE */
+/*
+Constructor and Destructor of type FSE_CTable
+ Note that its size depends on 'tableLog' and 'maxSymbolValue' */
+typedef unsigned FSE_CTable; /* don't allocate that. It's just a way to be more restrictive than void* */
+typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
+
+
+/* ******************************************************************
+ FSE : Finite State Entropy coder
+ header file for static linking (only)
+ Copyright (C) 2013-2015, Yann Collet
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/******************************************
+* Static allocation
+******************************************/
+/* FSE buffer bounds */
+#define FSE_NCOUNTBOUND 512
+#define FSE_BLOCKBOUND(size) (size + (size>>7))
+#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
+
+/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */
+#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
+#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
+
+
+/******************************************
+* FSE advanced API
+******************************************/
+static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
+/* build a fake FSE_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
+
+static size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
+/* build a fake FSE_DTable, designed to always generate the same symbolValue */
+
+
+/******************************************
+* FSE symbol decompression API
+******************************************/
+typedef struct
+{
+ size_t state;
+ const void* table; /* precise table may vary, depending on U16 */
+} FSE_DState_t;
+
+
+static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
+
+static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+
+static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
+
+
+/******************************************
+* FSE unsafe API
+******************************************/
+static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
+
+
+/******************************************
+* Implementation of inline functions
+******************************************/
+
+/* decompression */
+
+typedef struct {
+ U16 tableLog;
+ U16 fastMode;
+} FSE_DTableHeader; /* sizeof U32 */
+
+typedef struct
+{
+ unsigned short newState;
+ unsigned char symbol;
+ unsigned char nbBits;
+} FSE_decode_t; /* size == U32 */
+
+MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
+{
+ FSE_DTableHeader DTableH;
+ memcpy(&DTableH, dt, sizeof(DTableH));
+ DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog);
+ BIT_reloadDStream(bitD);
+ DStatePtr->table = dt + 1;
+}
+
+MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ const U32 nbBits = DInfo.nbBits;
+ BYTE symbol = DInfo.symbol;
+ size_t lowBits = BIT_readBits(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ const U32 nbBits = DInfo.nbBits;
+ BYTE symbol = DInfo.symbol;
+ size_t lowBits = BIT_readBitsFast(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
+{
+ return DStatePtr->state == 0;
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+/* ******************************************************************
+ Huff0 : Huffman coder, part of New Generation Entropy library
+ header file for static linking (only)
+ Copyright (C) 2013-2015, Yann Collet
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/******************************************
+* Static allocation macros
+******************************************/
+/* Huff0 buffer bounds */
+#define HUF_CTABLEBOUND 129
+#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */
+#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
+
+/* static allocation of Huff0's DTable */
+#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<maxTableLog)) /* nb Cells; use unsigned short for X2, unsigned int for X4 */
+#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+ unsigned short DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
+#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
+ unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
+#define HUF_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
+ unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
+
+
+/******************************************
+* Advanced functions
+******************************************/
+static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
+static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbols decoder */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+/*
+ zstd - standard compression library
+ Header File
+ Copyright (C) 2014-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd source repository : https://github.com/Cyan4973/zstd
+ - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* *************************************
+* Includes
+***************************************/
+#include <stddef.h> /* size_t */
+
+
+/* *************************************
+* Version
+***************************************/
+#define ZSTD_VERSION_MAJOR 0 /* for breaking interface changes */
+#define ZSTD_VERSION_MINOR 2 /* for new (non-breaking) interface capabilities */
+#define ZSTD_VERSION_RELEASE 2 /* for tweaks, bug-fixes, or development */
+#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
+
+
+/* *************************************
+* Advanced functions
+***************************************/
+typedef struct ZSTD_CCtx_s ZSTD_CCtx; /* incomplete type */
+
+#if defined (__cplusplus)
+}
+#endif
+/*
+ zstd - standard compression library
+ Header File for static linking only
+ Copyright (C) 2014-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd source repository : https://github.com/Cyan4973/zstd
+ - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+/* The objects defined into this file should be considered experimental.
+ * They are not labelled stable, as their prototype may change in the future.
+ * You can use them for tests, provide feedback, or if you can endure risk of future changes.
+ */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* *************************************
+* Streaming functions
+***************************************/
+
+typedef struct ZSTD_DCtx_s ZSTD_DCtx;
+
+/*
+ Use above functions alternatively.
+ ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+ ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
+ Result is the number of bytes regenerated within 'dst'.
+ It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
+*/
+
+/* *************************************
+* Prefix - version detection
+***************************************/
+#define ZSTD_magicNumber 0xFD2FB523 /* v0.3 */
+
+
+#if defined (__cplusplus)
+}
+#endif
+/* ******************************************************************
+ FSE : Finite State Entropy coder
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/****************************************************************
+* Tuning parameters
+****************************************************************/
+/* MEMORY_USAGE :
+* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+* Increasing memory usage improves compression ratio
+* Reduced memory usage can improve speed, due to cache effect
+* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#define FSE_MAX_MEMORY_USAGE 14
+#define FSE_DEFAULT_MEMORY_USAGE 13
+
+/* FSE_MAX_SYMBOL_VALUE :
+* Maximum symbol value authorized.
+* Required for proper stack allocation */
+#define FSE_MAX_SYMBOL_VALUE 255
+
+
+/****************************************************************
+* template functions type & suffix
+****************************************************************/
+#define FSE_FUNCTION_TYPE BYTE
+#define FSE_FUNCTION_EXTENSION
+
+
+/****************************************************************
+* Byte symbol type
+****************************************************************/
+#endif /* !FSE_COMMONDEFS_ONLY */
+
+
+/****************************************************************
+* Compiler specifics
+****************************************************************/
+#ifdef _MSC_VER /* Visual Studio */
+# define FORCE_INLINE static __forceinline
+# include <intrin.h> /* For Visual 2005 */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
+#else
+# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# ifdef __GNUC__
+# define FORCE_INLINE static inline __attribute__((always_inline))
+# else
+# define FORCE_INLINE static inline
+# endif
+# else
+# define FORCE_INLINE static
+# endif /* __STDC_VERSION__ */
+#endif
+
+
+/****************************************************************
+* Includes
+****************************************************************/
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memcpy, memset */
+#include <stdio.h> /* printf (debug) */
+
+/****************************************************************
+* Constants
+*****************************************************************/
+#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
+#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
+#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
+#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
+#define FSE_MIN_TABLELOG 5
+
+#define FSE_TABLELOG_ABSOLUTE_MAX 15
+#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
+#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+
+/****************************************************************
+* Error Management
+****************************************************************/
+#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+
+
+/****************************************************************
+* Complex types
+****************************************************************/
+typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
+
+
+/****************************************************************
+* Templates
+****************************************************************/
+/*
+ designed to be included
+ for type-specific functions (template emulation in C)
+ Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+# error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+# error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+
+/* Function templates */
+
+#define FSE_DECODE_TYPE FSE_decode_t
+
+static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
+
+static size_t FSE_buildDTable
+(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+ void* ptr = dt+1;
+ FSE_DTableHeader DTableH;
+ FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)ptr;
+ const U32 tableSize = 1 << tableLog;
+ const U32 tableMask = tableSize-1;
+ const U32 step = FSE_tableStep(tableSize);
+ U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
+ U32 position = 0;
+ U32 highThreshold = tableSize-1;
+ const S16 largeLimit= (S16)(1 << (tableLog-1));
+ U32 noLarge = 1;
+ U32 s;
+
+ /* Sanity Checks */
+ if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+
+ /* Init, lay down lowprob symbols */
+ DTableH.tableLog = (U16)tableLog;
+ for (s=0; s<=maxSymbolValue; s++)
+ {
+ if (normalizedCounter[s]==-1)
+ {
+ tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
+ symbolNext[s] = 1;
+ }
+ else
+ {
+ if (normalizedCounter[s] >= largeLimit) noLarge=0;
+ symbolNext[s] = normalizedCounter[s];
+ }
+ }
+
+ /* Spread symbols */
+ for (s=0; s<=maxSymbolValue; s++)
+ {
+ int i;
+ for (i=0; i<normalizedCounter[s]; i++)
+ {
+ tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
+ position = (position + step) & tableMask;
+ while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
+ }
+ }
+
+ if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+
+ /* Build Decoding table */
+ {
+ U32 i;
+ for (i=0; i<tableSize; i++)
+ {
+ FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);
+ U16 nextState = symbolNext[symbol]++;
+ tableDecode[i].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );
+ tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
+ }
+ }
+
+ DTableH.fastMode = (U16)noLarge;
+ memcpy(dt, &DTableH, sizeof(DTableH));
+ return 0;
+}
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+/******************************************
+* FSE helper functions
+******************************************/
+static unsigned FSE_isError(size_t code) { return ERR_isError(code); }
+
+
+/****************************************************************
+* FSE NCount encoding-decoding
+****************************************************************/
+static short FSE_abs(short a)
+{
+ return a<0 ? -a : a;
+}
+
+static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ const BYTE* const istart = (const BYTE*) headerBuffer;
+ const BYTE* const iend = istart + hbSize;
+ const BYTE* ip = istart;
+ int nbBits;
+ int remaining;
+ int threshold;
+ U32 bitStream;
+ int bitCount;
+ unsigned charnum = 0;
+ int previous0 = 0;
+
+ if (hbSize < 4) return ERROR(srcSize_wrong);
+ bitStream = MEM_readLE32(ip);
+ nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
+ if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
+ bitStream >>= 4;
+ bitCount = 4;
+ *tableLogPtr = nbBits;
+ remaining = (1<<nbBits)+1;
+ threshold = 1<<nbBits;
+ nbBits++;
+
+ while ((remaining>1) && (charnum<=*maxSVPtr))
+ {
+ if (previous0)
+ {
+ unsigned n0 = charnum;
+ while ((bitStream & 0xFFFF) == 0xFFFF)
+ {
+ n0+=24;
+ if (ip < iend-5)
+ {
+ ip+=2;
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ }
+ else
+ {
+ bitStream >>= 16;
+ bitCount+=16;
+ }
+ }
+ while ((bitStream & 3) == 3)
+ {
+ n0+=3;
+ bitStream>>=2;
+ bitCount+=2;
+ }
+ n0 += bitStream & 3;
+ bitCount += 2;
+ if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
+ while (charnum < n0) normalizedCounter[charnum++] = 0;
+ if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
+ {
+ ip += bitCount>>3;
+ bitCount &= 7;
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ }
+ else
+ bitStream >>= 2;
+ }
+ {
+ const short max = (short)((2*threshold-1)-remaining);
+ short count;
+
+ if ((bitStream & (threshold-1)) < (U32)max)
+ {
+ count = (short)(bitStream & (threshold-1));
+ bitCount += nbBits-1;
+ }
+ else
+ {
+ count = (short)(bitStream & (2*threshold-1));
+ if (count >= threshold) count -= max;
+ bitCount += nbBits;
+ }
+
+ count--; /* extra accuracy */
+ remaining -= FSE_abs(count);
+ normalizedCounter[charnum++] = count;
+ previous0 = !count;
+ while (remaining < threshold)
+ {
+ nbBits--;
+ threshold >>= 1;
+ }
+
+ {
+ if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
+ {
+ ip += bitCount>>3;
+ bitCount &= 7;
+ }
+ else
+ {
+ bitCount -= (int)(8 * (iend - 4 - ip));
+ ip = iend - 4;
+ }
+ bitStream = MEM_readLE32(ip) >> (bitCount & 31);
+ }
+ }
+ }
+ if (remaining != 1) return ERROR(GENERIC);
+ *maxSVPtr = charnum-1;
+
+ ip += (bitCount+7)>>3;
+ if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
+ return ip-istart;
+}
+
+
+/*********************************************************
+* Decompression (Byte symbols)
+*********************************************************/
+static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
+{
+ void* ptr = dt;
+ FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+ FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1;
+
+ DTableH->tableLog = 0;
+ DTableH->fastMode = 0;
+
+ cell->newState = 0;
+ cell->symbol = symbolValue;
+ cell->nbBits = 0;
+
+ return 0;
+}
+
+
+static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
+{
+ void* ptr = dt;
+ FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+ FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1;
+ const unsigned tableSize = 1 << nbBits;
+ const unsigned tableMask = tableSize - 1;
+ const unsigned maxSymbolValue = tableMask;
+ unsigned s;
+
+ /* Sanity checks */
+ if (nbBits < 1) return ERROR(GENERIC); /* min size */
+
+ /* Build Decoding Table */
+ DTableH->tableLog = (U16)nbBits;
+ DTableH->fastMode = 1;
+ for (s=0; s<=maxSymbolValue; s++)
+ {
+ dinfo[s].newState = 0;
+ dinfo[s].symbol = (BYTE)s;
+ dinfo[s].nbBits = (BYTE)nbBits;
+ }
+
+ return 0;
+}
+
+FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
+ void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSE_DTable* dt, const unsigned fast)
+{
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* op = ostart;
+ BYTE* const omax = op + maxDstSize;
+ BYTE* const olimit = omax-3;
+
+ BIT_DStream_t bitD;
+ FSE_DState_t state1;
+ FSE_DState_t state2;
+ size_t errorCode;
+
+ /* Init */
+ errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */
+ if (FSE_isError(errorCode)) return errorCode;
+
+ FSE_initDState(&state1, &bitD, dt);
+ FSE_initDState(&state2, &bitD, dt);
+
+#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
+
+ /* 4 symbols per loop */
+ for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op<olimit) ; op+=4)
+ {
+ op[0] = FSE_GETSYMBOL(&state1);
+
+ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BIT_reloadDStream(&bitD);
+
+ op[1] = FSE_GETSYMBOL(&state2);
+
+ if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
+
+ op[2] = FSE_GETSYMBOL(&state1);
+
+ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BIT_reloadDStream(&bitD);
+
+ op[3] = FSE_GETSYMBOL(&state2);
+ }
+
+ /* tail */
+ /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
+ while (1)
+ {
+ if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )
+ break;
+
+ *op++ = FSE_GETSYMBOL(&state1);
+
+ if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )
+ break;
+
+ *op++ = FSE_GETSYMBOL(&state2);
+ }
+
+ /* end ? */
+ if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))
+ return op-ostart;
+
+ if (op==omax) return ERROR(dstSize_tooSmall); /* dst buffer is full, but cSrc unfinished */
+
+ return ERROR(corruption_detected);
+}
+
+
+static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSE_DTable* dt)
+{
+ FSE_DTableHeader DTableH;
+ memcpy(&DTableH, dt, sizeof(DTableH));
+
+ /* select fast mode (static) */
+ if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+ return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
+{
+ const BYTE* const istart = (const BYTE*)cSrc;
+ const BYTE* ip = istart;
+ short counting[FSE_MAX_SYMBOL_VALUE+1];
+ DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
+ unsigned tableLog;
+ unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+ size_t errorCode;
+
+ if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */
+
+ /* normal FSE decoding mode */
+ errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
+ if (FSE_isError(errorCode)) return errorCode;
+ if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */
+ ip += errorCode;
+ cSrcSize -= errorCode;
+
+ errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);
+ if (FSE_isError(errorCode)) return errorCode;
+
+ /* always return, even if it is an error code */
+ return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
+}
+
+
+
+#endif /* FSE_COMMONDEFS_ONLY */
+/* ******************************************************************
+ Huff0 : Huffman coder, part of New Generation Entropy library
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/****************************************************************
+* Compiler specifics
+****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+/* inline is defined */
+#elif defined(_MSC_VER)
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# define inline __inline
+#else
+# define inline /* disable inline */
+#endif
+
+
+/****************************************************************
+* Includes
+****************************************************************/
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memcpy, memset */
+#include <stdio.h> /* printf (debug) */
+
+/****************************************************************
+* Error Management
+****************************************************************/
+#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+
+
+/******************************************
+* Helper functions
+******************************************/
+static unsigned HUF_isError(size_t code) { return ERR_isError(code); }
+
+#define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
+#define HUF_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
+#define HUF_DEFAULT_TABLELOG HUF_MAX_TABLELOG /* tableLog by default, when not specified */
+#define HUF_MAX_SYMBOL_VALUE 255
+#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)
+# error "HUF_MAX_TABLELOG is too large !"
+#endif
+
+
+
+/*********************************************************
+* Huff0 : Huffman block decompression
+*********************************************************/
+typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */
+
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */
+
+typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
+
+/*! HUF_readStats
+ Read compact Huffman tree, saved by HUF_writeCTable
+ @huffWeight : destination buffer
+ @return : size read from `src`
+*/
+static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize)
+{
+ U32 weightTotal;
+ U32 tableLog;
+ const BYTE* ip = (const BYTE*) src;
+ size_t iSize;
+ size_t oSize;
+ U32 n;
+
+ if (!srcSize) return ERROR(srcSize_wrong);
+ iSize = ip[0];
+ //memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */
+
+ if (iSize >= 128) /* special header */
+ {
+ if (iSize >= (242)) /* RLE */
+ {
+ static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
+ oSize = l[iSize-242];
+ memset(huffWeight, 1, hwSize);
+ iSize = 0;
+ }
+ else /* Incompressible */
+ {
+ oSize = iSize - 127;
+ iSize = ((oSize+1)/2);
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ if (oSize >= hwSize) return ERROR(corruption_detected);
+ ip += 1;
+ for (n=0; n<oSize; n+=2)
+ {
+ huffWeight[n] = ip[n/2] >> 4;
+ huffWeight[n+1] = ip[n/2] & 15;
+ }
+ }
+ }
+ else /* header compressed with FSE (normal case) */
+ {
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */
+ if (FSE_isError(oSize)) return oSize;
+ }
+
+ /* collect weight stats */
+ memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
+ weightTotal = 0;
+ for (n=0; n<oSize; n++)
+ {
+ if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
+ rankStats[huffWeight[n]]++;
+ weightTotal += (1 << huffWeight[n]) >> 1;
+ }
+ if (weightTotal == 0) return ERROR(corruption_detected);
+
+ /* get last non-null symbol weight (implied, total must be 2^n) */
+ tableLog = BIT_highbit32(weightTotal) + 1;
+ if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
+ {
+ U32 total = 1 << tableLog;
+ U32 rest = total - weightTotal;
+ U32 verif = 1 << BIT_highbit32(rest);
+ U32 lastWeight = BIT_highbit32(rest) + 1;
+ if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
+ huffWeight[oSize] = (BYTE)lastWeight;
+ rankStats[lastWeight]++;
+ }
+
+ /* check tree construction validity */
+ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
+
+ /* results */
+ *nbSymbolsPtr = (U32)(oSize+1);
+ *tableLogPtr = tableLog;
+ return iSize+1;
+}
+
+
+/**************************/
+/* single-symbol decoding */
+/**************************/
+
+static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
+{
+ BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
+ U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */
+ U32 tableLog = 0;
+ const BYTE* ip = (const BYTE*) src;
+ size_t iSize = ip[0];
+ U32 nbSymbols = 0;
+ U32 n;
+ U32 nextRankStart;
+ void* ptr = DTable+1;
+ HUF_DEltX2* const dt = (HUF_DEltX2*)(ptr);
+
+ HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */
+ //memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
+ if (HUF_isError(iSize)) return iSize;
+
+ /* check result */
+ if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */
+ DTable[0] = (U16)tableLog; /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */
+
+ /* Prepare ranks */
+ nextRankStart = 0;
+ for (n=1; n<=tableLog; n++)
+ {
+ U32 current = nextRankStart;
+ nextRankStart += (rankVal[n] << (n-1));
+ rankVal[n] = current;
+ }
+
+ /* fill DTable */
+ for (n=0; n<nbSymbols; n++)
+ {
+ const U32 w = huffWeight[n];
+ const U32 length = (1 << w) >> 1;
+ U32 i;
+ HUF_DEltX2 D;
+ D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
+ for (i = rankVal[w]; i < rankVal[w] + length; i++)
+ dt[i] = D;
+ rankVal[w] += length;
+ }
+
+ return iSize;
+}
+
+static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+ const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+ const BYTE c = dt[val].byte;
+ BIT_skipBits(Dstream, dt[val].nbBits);
+ return c;
+}
+
+#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+ *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
+ HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 4 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))
+ {
+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ }
+
+ /* closer to the end */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+ /* no more data to retrieve from bitstream, hence no need to reload */
+ while (p < pEnd)
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+ return pEnd-pStart;
+}
+
+
+static size_t HUF_decompress4X2_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const U16* DTable)
+{
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ {
+ const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+
+ const void* ptr = DTable;
+ const HUF_DEltX2* const dt = ((const HUF_DEltX2*)ptr) +1;
+ const U32 dtLog = DTable[0];
+ size_t errorCode;
+
+ /* Init */
+ BIT_DStream_t bitD1;
+ BIT_DStream_t bitD2;
+ BIT_DStream_t bitD3;
+ BIT_DStream_t bitD4;
+ const size_t length1 = MEM_readLE16(istart);
+ const size_t length2 = MEM_readLE16(istart+2);
+ const size_t length3 = MEM_readLE16(istart+4);
+ size_t length4;
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ const size_t segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ U32 endSignal;
+
+ length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ errorCode = BIT_initDStream(&bitD1, istart1, length1);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD2, istart2, length2);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD3, istart3, length3);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD4, istart4, length4);
+ if (HUF_isError(errorCode)) return errorCode;
+
+ /* 16-32 symbols per loop (4-8 symbols per stream) */
+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+ for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
+ {
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+
+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+ }
+
+ /* check corruption */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 supposed already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+ HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+ HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+ HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+ if (!endSignal) return ERROR(corruption_detected);
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+
+static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);
+ const BYTE* ip = (const BYTE*) cSrc;
+ size_t errorCode;
+
+ errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);
+ if (HUF_isError(errorCode)) return errorCode;
+ if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += errorCode;
+ cSrcSize -= errorCode;
+
+ return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+/***************************/
+/* double-symbols decoding */
+/***************************/
+
+static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
+ const U32* rankValOrigin, const int minWeight,
+ const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
+ U32 nbBitsBaseline, U16 baseSeq)
+{
+ HUF_DEltX4 DElt;
+ U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
+ U32 s;
+
+ /* get pre-calculated rankVal */
+ memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+ /* fill skipped values */
+ if (minWeight>1)
+ {
+ U32 i, skipSize = rankVal[minWeight];
+ MEM_writeLE16(&(DElt.sequence), baseSeq);
+ DElt.nbBits = (BYTE)(consumed);
+ DElt.length = 1;
+ for (i = 0; i < skipSize; i++)
+ DTable[i] = DElt;
+ }
+
+ /* fill DTable */
+ for (s=0; s<sortedListSize; s++) /* note : sortedSymbols already skipped */
+ {
+ const U32 symbol = sortedSymbols[s].symbol;
+ const U32 weight = sortedSymbols[s].weight;
+ const U32 nbBits = nbBitsBaseline - weight;
+ const U32 length = 1 << (sizeLog-nbBits);
+ const U32 start = rankVal[weight];
+ U32 i = start;
+ const U32 end = start + length;
+
+ MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
+ DElt.nbBits = (BYTE)(nbBits + consumed);
+ DElt.length = 2;
+ do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
+
+ rankVal[weight] += length;
+ }
+}
+
+typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1];
+
+static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
+ const sortedSymbol_t* sortedList, const U32 sortedListSize,
+ const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
+ const U32 nbBitsBaseline)
+{
+ U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
+ const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
+ const U32 minBits = nbBitsBaseline - maxWeight;
+ U32 s;
+
+ memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+ /* fill DTable */
+ for (s=0; s<sortedListSize; s++)
+ {
+ const U16 symbol = sortedList[s].symbol;
+ const U32 weight = sortedList[s].weight;
+ const U32 nbBits = nbBitsBaseline - weight;
+ const U32 start = rankVal[weight];
+ const U32 length = 1 << (targetLog-nbBits);
+
+ if (targetLog-nbBits >= minBits) /* enough room for a second symbol */
+ {
+ U32 sortedRank;
+ int minWeight = nbBits + scaleLog;
+ if (minWeight < 1) minWeight = 1;
+ sortedRank = rankStart[minWeight];
+ HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
+ rankValOrigin[nbBits], minWeight,
+ sortedList+sortedRank, sortedListSize-sortedRank,
+ nbBitsBaseline, symbol);
+ }
+ else
+ {
+ U32 i;
+ const U32 end = start + length;
+ HUF_DEltX4 DElt;
+
+ MEM_writeLE16(&(DElt.sequence), symbol);
+ DElt.nbBits = (BYTE)(nbBits);
+ DElt.length = 1;
+ for (i = start; i < end; i++)
+ DTable[i] = DElt;
+ }
+ rankVal[weight] += length;
+ }
+}
+
+static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
+{
+ BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];
+ sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];
+ U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
+ U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
+ U32* const rankStart = rankStart0+1;
+ rankVal_t rankVal;
+ U32 tableLog, maxW, sizeOfSort, nbSymbols;
+ const U32 memLog = DTable[0];
+ const BYTE* ip = (const BYTE*) src;
+ size_t iSize = ip[0];
+ void* ptr = DTable;
+ HUF_DEltX4* const dt = ((HUF_DEltX4*)ptr) + 1;
+
+ HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */
+ if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
+ //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
+ if (HUF_isError(iSize)) return iSize;
+
+ /* check result */
+ if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
+
+ /* find maxWeight */
+ for (maxW = tableLog; rankStats[maxW]==0; maxW--)
+ { if (!maxW) return ERROR(GENERIC); } /* necessarily finds a solution before maxW==0 */
+
+ /* Get start index of each weight */
+ {
+ U32 w, nextRankStart = 0;
+ for (w=1; w<=maxW; w++)
+ {
+ U32 current = nextRankStart;
+ nextRankStart += rankStats[w];
+ rankStart[w] = current;
+ }
+ rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
+ sizeOfSort = nextRankStart;
+ }
+
+ /* sort symbols by weight */
+ {
+ U32 s;
+ for (s=0; s<nbSymbols; s++)
+ {
+ U32 w = weightList[s];
+ U32 r = rankStart[w]++;
+ sortedSymbol[r].symbol = (BYTE)s;
+ sortedSymbol[r].weight = (BYTE)w;
+ }
+ rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
+ }
+
+ /* Build rankVal */
+ {
+ const U32 minBits = tableLog+1 - maxW;
+ U32 nextRankVal = 0;
+ U32 w, consumed;
+ const int rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */
+ U32* rankVal0 = rankVal[0];
+ for (w=1; w<=maxW; w++)
+ {
+ U32 current = nextRankVal;
+ nextRankVal += rankStats[w] << (w+rescale);
+ rankVal0[w] = current;
+ }
+ for (consumed = minBits; consumed <= memLog - minBits; consumed++)
+ {
+ U32* rankValPtr = rankVal[consumed];
+ for (w = 1; w <= maxW; w++)
+ {
+ rankValPtr[w] = rankVal0[w] >> consumed;
+ }
+ }
+ }
+
+ HUF_fillDTableX4(dt, memLog,
+ sortedSymbol, sizeOfSort,
+ rankStart0, rankVal, maxW,
+ tableLog+1);
+
+ return iSize;
+}
+
+
+static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
+{
+ const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ memcpy(op, dt+val, 2);
+ BIT_skipBits(DStream, dt[val].nbBits);
+ return dt[val].length;
+}
+
+static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
+{
+ const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ memcpy(op, dt+val, 1);
+ if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
+ else
+ {
+ if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))
+ {
+ BIT_skipBits(DStream, dt[val].nbBits);
+ if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+ DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+ }
+ }
+ return 1;
+}
+
+
+#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
+ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
+ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 8 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7))
+ {
+ HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
+ HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
+ }
+
+ /* closer to the end */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2))
+ HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
+
+ while (p <= pEnd-2)
+ HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
+
+ if (p < pEnd)
+ p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
+
+ return p-pStart;
+}
+
+
+
+static size_t HUF_decompress4X4_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const U32* DTable)
+{
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ {
+ const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+
+ const void* ptr = DTable;
+ const HUF_DEltX4* const dt = ((const HUF_DEltX4*)ptr) +1;
+ const U32 dtLog = DTable[0];
+ size_t errorCode;
+
+ /* Init */
+ BIT_DStream_t bitD1;
+ BIT_DStream_t bitD2;
+ BIT_DStream_t bitD3;
+ BIT_DStream_t bitD4;
+ const size_t length1 = MEM_readLE16(istart);
+ const size_t length2 = MEM_readLE16(istart+2);
+ const size_t length3 = MEM_readLE16(istart+4);
+ size_t length4;
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ const size_t segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ U32 endSignal;
+
+ length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ errorCode = BIT_initDStream(&bitD1, istart1, length1);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD2, istart2, length2);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD3, istart3, length3);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD4, istart4, length4);
+ if (HUF_isError(errorCode)) return errorCode;
+
+ /* 16-32 symbols per loop (4-8 symbols per stream) */
+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+ for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
+ {
+ HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
+ HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
+
+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+ }
+
+ /* check corruption */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 supposed already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
+ HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
+ HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
+ HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+ if (!endSignal) return ERROR(corruption_detected);
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+
+static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG);
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize;
+ cSrcSize -= hSize;
+
+ return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+/**********************************/
+/* Generic decompression selector */
+/**********************************/
+
+typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
+static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
+{
+ /* single, double, quad */
+ {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
+ {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
+ {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
+ {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
+ {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
+ {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
+ {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
+ {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
+ {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
+ {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
+ {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
+ {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
+ {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
+ {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
+ {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
+ {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
+};
+
+typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+
+static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ static const decompressionAlgo decompress[3] = { HUF_decompress4X2, HUF_decompress4X4, NULL };
+ /* estimate decompression time */
+ U32 Q;
+ const U32 D256 = (U32)(dstSize >> 8);
+ U32 Dtime[3];
+ U32 algoNb = 0;
+ int n;
+
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
+ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
+ /* decoder timing evaluation */
+ Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
+ for (n=0; n<3; n++)
+ Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
+
+ Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
+
+ if (Dtime[1] < Dtime[0]) algoNb = 1;
+
+ return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
+
+ //return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize); /* multi-streams single-symbol decoding */
+ //return HUF_decompress4X4(dst, dstSize, cSrc, cSrcSize); /* multi-streams double-symbols decoding */
+ //return HUF_decompress4X6(dst, dstSize, cSrc, cSrcSize); /* multi-streams quad-symbols decoding */
+}
+/*
+ zstd - standard compression library
+ Copyright (C) 2014-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd source repository : https://github.com/Cyan4973/zstd
+ - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+/* ***************************************************************
+* Tuning parameters
+*****************************************************************/
+/*!
+* MEMORY_USAGE :
+* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+* Increasing memory usage improves compression ratio
+* Reduced memory usage can improve speed, due to cache effect
+*/
+#define ZSTD_MEMORY_USAGE 17
+
+/*!
+ * HEAPMODE :
+ * Select how default compression functions will allocate memory for their hash table,
+ * in memory stack (0, fastest), or in memory heap (1, requires malloc())
+ * Note that compression context is fairly large, as a consequence heap memory is recommended.
+ */
+#ifndef ZSTD_HEAPMODE
+# define ZSTD_HEAPMODE 1
+#endif /* ZSTD_HEAPMODE */
+
+/*!
+* LEGACY_SUPPORT :
+* decompressor can decode older formats (starting from Zstd 0.1+)
+*/
+#ifndef ZSTD_LEGACY_SUPPORT
+# define ZSTD_LEGACY_SUPPORT 1
+#endif
+
+
+/* *******************************************************
+* Includes
+*********************************************************/
+#include <stdlib.h> /* calloc */
+#include <string.h> /* memcpy, memmove */
+#include <stdio.h> /* debug : printf */
+
+
+/* *******************************************************
+* Compiler specifics
+*********************************************************/
+#ifdef __AVX2__
+# include <immintrin.h> /* AVX2 intrinsics */
+#endif
+
+#ifdef _MSC_VER /* Visual Studio */
+# include <intrin.h> /* For Visual 2005 */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4324) /* disable: C4324: padded structure */
+#else
+# define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+#endif
+
+
+/* *******************************************************
+* Constants
+*********************************************************/
+#define HASH_LOG (ZSTD_MEMORY_USAGE - 2)
+#define HASH_TABLESIZE (1 << HASH_LOG)
+#define HASH_MASK (HASH_TABLESIZE - 1)
+
+#define KNUTH 2654435761
+
+#define BIT7 128
+#define BIT6 64
+#define BIT5 32
+#define BIT4 16
+#define BIT1 2
+#define BIT0 1
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BLOCKSIZE (128 KB) /* define, for static allocation */
+#define MIN_SEQUENCES_SIZE (2 /*seqNb*/ + 2 /*dumps*/ + 3 /*seqTables*/ + 1 /*bitStream*/)
+#define MIN_CBLOCK_SIZE (3 /*litCSize*/ + MIN_SEQUENCES_SIZE)
+#define IS_RAW BIT0
+#define IS_RLE BIT1
+
+#define WORKPLACESIZE (BLOCKSIZE*3)
+#define MINMATCH 4
+#define MLbits 7
+#define LLbits 6
+#define Offbits 5
+#define MaxML ((1<<MLbits )-1)
+#define MaxLL ((1<<LLbits )-1)
+#define MaxOff 31
+#define LitFSELog 11
+#define MLFSELog 10
+#define LLFSELog 10
+#define OffFSELog 9
+#define MAX(a,b) ((a)<(b)?(b):(a))
+#define MaxSeq MAX(MaxLL, MaxML)
+
+#define LITERAL_NOENTROPY 63
+#define COMMAND_NOENTROPY 7 /* to remove */
+
+#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
+
+static const size_t ZSTD_blockHeaderSize = 3;
+static const size_t ZSTD_frameHeaderSize = 4;
+
+
+/* *******************************************************
+* Memory operations
+**********************************************************/
+static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+
+static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
+
+#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
+
+/*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */
+static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
+{
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + length;
+ do COPY8(op, ip) while (op < oend);
+}
+
+
+/* **************************************
+* Local structures
+****************************************/
+typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
+
+typedef struct
+{
+ blockType_t blockType;
+ U32 origSize;
+} blockProperties_t;
+
+typedef struct {
+ void* buffer;
+ U32* offsetStart;
+ U32* offset;
+ BYTE* offCodeStart;
+ BYTE* offCode;
+ BYTE* litStart;
+ BYTE* lit;
+ BYTE* litLengthStart;
+ BYTE* litLength;
+ BYTE* matchLengthStart;
+ BYTE* matchLength;
+ BYTE* dumpsStart;
+ BYTE* dumps;
+} seqStore_t;
+
+
+/* *************************************
+* Error Management
+***************************************/
+/*! ZSTD_isError
+* tells if a return value is an error code */
+static unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
+
+
+
+/* *************************************************************
+* Decompression section
+***************************************************************/
+struct ZSTD_DCtx_s
+{
+ U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
+ U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
+ U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
+ void* previousDstEnd;
+ void* base;
+ size_t expected;
+ blockType_t bType;
+ U32 phase;
+ const BYTE* litPtr;
+ size_t litSize;
+ BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];
+}; /* typedef'd to ZSTD_Dctx within "zstd_static.h" */
+
+
+static size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+{
+ const BYTE* const in = (const BYTE* const)src;
+ BYTE headerFlags;
+ U32 cSize;
+
+ if (srcSize < 3) return ERROR(srcSize_wrong);
+
+ headerFlags = *in;
+ cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
+
+ bpPtr->blockType = (blockType_t)(headerFlags >> 6);
+ bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
+
+ if (bpPtr->blockType == bt_end) return 0;
+ if (bpPtr->blockType == bt_rle) return 1;
+ return cSize;
+}
+
+static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
+ memcpy(dst, src, srcSize);
+ return srcSize;
+}
+
+
+/** ZSTD_decompressLiterals
+ @return : nb of bytes read from src, or an error code*/
+static size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr,
+ const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+
+ const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+ const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+
+ if (litSize > *maxDstSizePtr) return ERROR(corruption_detected);
+ if (litCSize + 5 > srcSize) return ERROR(corruption_detected);
+
+ if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected);
+
+ *maxDstSizePtr = litSize;
+ return litCSize + 5;
+}
+
+
+/** ZSTD_decodeLiteralsBlock
+ @return : nb of bytes read from src (< srcSize )*/
+static size_t ZSTD_decodeLiteralsBlock(void* ctx,
+ const void* src, size_t srcSize)
+{
+ ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;
+ const BYTE* const istart = (const BYTE* const)src;
+
+ /* any compressed block with literals segment must be at least this size */
+ if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
+
+ switch(*istart & 3)
+ {
+ default:
+ case 0:
+ {
+ size_t litSize = BLOCKSIZE;
+ const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ memset(dctx->litBuffer + dctx->litSize, 0, 8);
+ return readSize; /* works if it's an error too */
+ }
+ case IS_RAW:
+ {
+ const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+ if (litSize > srcSize-11) /* risk of reading too far with wildcopy */
+ {
+ if (litSize > srcSize-3) return ERROR(corruption_detected);
+ memcpy(dctx->litBuffer, istart, litSize);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ memset(dctx->litBuffer + dctx->litSize, 0, 8);
+ return litSize+3;
+ }
+ /* direct reference into compressed stream */
+ dctx->litPtr = istart+3;
+ dctx->litSize = litSize;
+ return litSize+3;
+ }
+ case IS_RLE:
+ {
+ const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+ if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
+ memset(dctx->litBuffer, istart[3], litSize + 8);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ return 4;
+ }
+ }
+}
+
+
+static size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
+ FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
+ const void* src, size_t srcSize)
+{
+ const BYTE* const istart = (const BYTE* const)src;
+ const BYTE* ip = istart;
+ const BYTE* const iend = istart + srcSize;
+ U32 LLtype, Offtype, MLtype;
+ U32 LLlog, Offlog, MLlog;
+ size_t dumpsLength;
+
+ /* check */
+ if (srcSize < 5) return ERROR(srcSize_wrong);
+
+ /* SeqHead */
+ *nbSeq = MEM_readLE16(ip); ip+=2;
+ LLtype = *ip >> 6;
+ Offtype = (*ip >> 4) & 3;
+ MLtype = (*ip >> 2) & 3;
+ if (*ip & 2)
+ {
+ dumpsLength = ip[2];
+ dumpsLength += ip[1] << 8;
+ ip += 3;
+ }
+ else
+ {
+ dumpsLength = ip[1];
+ dumpsLength += (ip[0] & 1) << 8;
+ ip += 2;
+ }
+ *dumpsPtr = ip;
+ ip += dumpsLength;
+ *dumpsLengthPtr = dumpsLength;
+
+ /* check */
+ if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
+
+ /* sequences */
+ {
+ S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL and MaxOff */
+ size_t headerSize;
+
+ /* Build DTables */
+ switch(LLtype)
+ {
+ case bt_rle :
+ LLlog = 0;
+ FSE_buildDTable_rle(DTableLL, *ip++); break;
+ case bt_raw :
+ LLlog = LLbits;
+ FSE_buildDTable_raw(DTableLL, LLbits); break;
+ default :
+ { U32 max = MaxLL;
+ headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);
+ if (FSE_isError(headerSize)) return ERROR(GENERIC);
+ if (LLlog > LLFSELog) return ERROR(corruption_detected);
+ ip += headerSize;
+ FSE_buildDTable(DTableLL, norm, max, LLlog);
+ } }
+
+ switch(Offtype)
+ {
+ case bt_rle :
+ Offlog = 0;
+ if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
+ FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */
+ break;
+ case bt_raw :
+ Offlog = Offbits;
+ FSE_buildDTable_raw(DTableOffb, Offbits); break;
+ default :
+ { U32 max = MaxOff;
+ headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);
+ if (FSE_isError(headerSize)) return ERROR(GENERIC);
+ if (Offlog > OffFSELog) return ERROR(corruption_detected);
+ ip += headerSize;
+ FSE_buildDTable(DTableOffb, norm, max, Offlog);
+ } }
+
+ switch(MLtype)
+ {
+ case bt_rle :
+ MLlog = 0;
+ if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
+ FSE_buildDTable_rle(DTableML, *ip++); break;
+ case bt_raw :
+ MLlog = MLbits;
+ FSE_buildDTable_raw(DTableML, MLbits); break;
+ default :
+ { U32 max = MaxML;
+ headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);
+ if (FSE_isError(headerSize)) return ERROR(GENERIC);
+ if (MLlog > MLFSELog) return ERROR(corruption_detected);
+ ip += headerSize;
+ FSE_buildDTable(DTableML, norm, max, MLlog);
+ } } }
+
+ return ip-istart;
+}
+
+
+typedef struct {
+ size_t litLength;
+ size_t offset;
+ size_t matchLength;
+} seq_t;
+
+typedef struct {
+ BIT_DStream_t DStream;
+ FSE_DState_t stateLL;
+ FSE_DState_t stateOffb;
+ FSE_DState_t stateML;
+ size_t prevOffset;
+ const BYTE* dumps;
+ const BYTE* dumpsEnd;
+} seqState_t;
+
+
+static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
+{
+ size_t litLength;
+ size_t prevOffset;
+ size_t offset;
+ size_t matchLength;
+ const BYTE* dumps = seqState->dumps;
+ const BYTE* const de = seqState->dumpsEnd;
+
+ /* Literal length */
+ litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
+ prevOffset = litLength ? seq->offset : seqState->prevOffset;
+ seqState->prevOffset = seq->offset;
+ if (litLength == MaxLL)
+ {
+ U32 add = *dumps++;
+ if (add < 255) litLength += add;
+ else
+ {
+ litLength = MEM_readLE32(dumps) & 0xFFFFFF; /* no pb : dumps is always followed by seq tables > 1 byte */
+ dumps += 3;
+ }
+ if (dumps >= de) dumps = de-1; /* late correction, to avoid read overflow (data is now corrupted anyway) */
+ }
+
+ /* Offset */
+ {
+ static const size_t offsetPrefix[MaxOff+1] = { /* note : size_t faster than U32 */
+ 1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,
+ 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,
+ 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };
+ U32 offsetCode, nbBits;
+ offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream)); /* <= maxOff, by table construction */
+ if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
+ nbBits = offsetCode - 1;
+ if (offsetCode==0) nbBits = 0; /* cmove */
+ offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits);
+ if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
+ if (offsetCode==0) offset = prevOffset; /* cmove */
+ }
+
+ /* MatchLength */
+ matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
+ if (matchLength == MaxML)
+ {
+ U32 add = *dumps++;
+ if (add < 255) matchLength += add;
+ else
+ {
+ matchLength = MEM_readLE32(dumps) & 0xFFFFFF; /* no pb : dumps is always followed by seq tables > 1 byte */
+ dumps += 3;
+ }
+ if (dumps >= de) dumps = de-1; /* late correction, to avoid read overflow (data is now corrupted anyway) */
+ }
+ matchLength += MINMATCH;
+
+ /* save result */
+ seq->litLength = litLength;
+ seq->offset = offset;
+ seq->matchLength = matchLength;
+ seqState->dumps = dumps;
+}
+
+
+static size_t ZSTD_execSequence(BYTE* op,
+ seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ BYTE* const base, BYTE* const oend)
+{
+ static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */
+ static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* subtracted */
+ const BYTE* const ostart = op;
+ BYTE* const oLitEnd = op + sequence.litLength;
+ BYTE* const oMatchEnd = op + sequence.litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */
+ BYTE* const oend_8 = oend-8;
+ const BYTE* const litEnd = *litPtr + sequence.litLength;
+
+ /* checks */
+ if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */
+ if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
+ if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */
+
+ /* copy Literals */
+ ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
+ op = oLitEnd;
+ *litPtr = litEnd; /* update for next sequence */
+
+ /* copy Match */
+ {
+ const BYTE* match = op - sequence.offset;
+
+ /* check */
+ if (sequence.offset > (size_t)op) return ERROR(corruption_detected); /* address space overflow test (this test seems kept by clang optimizer) */
+ //if (match > op) return ERROR(corruption_detected); /* address space overflow test (is clang optimizer removing this test ?) */
+ if (match < base) return ERROR(corruption_detected);
+
+ /* close range match, overlap */
+ if (sequence.offset < 8)
+ {
+ const int dec64 = dec64table[sequence.offset];
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += dec32table[sequence.offset];
+ ZSTD_copy4(op+4, match);
+ match -= dec64;
+ }
+ else
+ {
+ ZSTD_copy8(op, match);
+ }
+ op += 8; match += 8;
+
+ if (oMatchEnd > oend-(16-MINMATCH))
+ {
+ if (op < oend_8)
+ {
+ ZSTD_wildcopy(op, match, oend_8 - op);
+ match += oend_8 - op;
+ op = oend_8;
+ }
+ while (op < oMatchEnd) *op++ = *match++;
+ }
+ else
+ {
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
+ }
+ }
+
+ return oMatchEnd - ostart;
+}
+
+static size_t ZSTD_decompressSequences(
+ void* ctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize)
+{
+ ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* op = ostart;
+ BYTE* const oend = ostart + maxDstSize;
+ size_t errorCode, dumpsLength;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* const litEnd = litPtr + dctx->litSize;
+ int nbSeq;
+ const BYTE* dumps;
+ U32* DTableLL = dctx->LLTable;
+ U32* DTableML = dctx->MLTable;
+ U32* DTableOffb = dctx->OffTable;
+ BYTE* const base = (BYTE*) (dctx->base);
+
+ /* Build Decoding Tables */
+ errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
+ DTableLL, DTableML, DTableOffb,
+ ip, iend-ip);
+ if (ZSTD_isError(errorCode)) return errorCode;
+ ip += errorCode;
+
+ /* Regen sequences */
+ {
+ seq_t sequence;
+ seqState_t seqState;
+
+ memset(&sequence, 0, sizeof(sequence));
+ seqState.dumps = dumps;
+ seqState.dumpsEnd = dumps + dumpsLength;
+ seqState.prevOffset = sequence.offset = 4;
+ errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip);
+ if (ERR_isError(errorCode)) return ERROR(corruption_detected);
+ FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
+ FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
+ FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
+
+ for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (nbSeq>0) ; )
+ {
+ size_t oneSeqSize;
+ nbSeq--;
+ ZSTD_decodeSequence(&sequence, &seqState);
+ oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend);
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+ }
+
+ /* check if reached exact end */
+ if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* requested too much : data is corrupted */
+ if (nbSeq<0) return ERROR(corruption_detected); /* requested too many sequences : data is corrupted */
+
+ /* last literal segment */
+ {
+ size_t lastLLSize = litEnd - litPtr;
+ if (litPtr > litEnd) return ERROR(corruption_detected);
+ if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
+ if (op != litPtr) memmove(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+ }
+
+ return op-ostart;
+}
+
+
+static size_t ZSTD_decompressBlock(
+ void* ctx,
+ void* dst, size_t maxDstSize,
+ const void* src, size_t srcSize)
+{
+ /* blockType == blockCompressed */
+ const BYTE* ip = (const BYTE*)src;
+
+ /* Decode literals sub-block */
+ size_t litCSize = ZSTD_decodeLiteralsBlock(ctx, src, srcSize);
+ if (ZSTD_isError(litCSize)) return litCSize;
+ ip += litCSize;
+ srcSize -= litCSize;
+
+ return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize);
+}
+
+
+static size_t ZSTD_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* iend = ip + srcSize;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* op = ostart;
+ BYTE* const oend = ostart + maxDstSize;
+ size_t remainingSize = srcSize;
+ U32 magicNumber;
+ blockProperties_t blockProperties;
+
+ /* Frame Header */
+ if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+ magicNumber = MEM_readLE32(src);
+ if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
+ ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
+
+ /* Loop on each block */
+ while (1)
+ {
+ size_t decodedSize=0;
+ size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties);
+ if (ZSTD_isError(cBlockSize)) return cBlockSize;
+
+ ip += ZSTD_blockHeaderSize;
+ remainingSize -= ZSTD_blockHeaderSize;
+ if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+ switch(blockProperties.blockType)
+ {
+ case bt_compressed:
+ decodedSize = ZSTD_decompressBlock(ctx, op, oend-op, ip, cBlockSize);
+ break;
+ case bt_raw :
+ decodedSize = ZSTD_copyUncompressedBlock(op, oend-op, ip, cBlockSize);
+ break;
+ case bt_rle :
+ return ERROR(GENERIC); /* not yet supported */
+ break;
+ case bt_end :
+ /* end of frame */
+ if (remainingSize) return ERROR(srcSize_wrong);
+ break;
+ default:
+ return ERROR(GENERIC); /* impossible */
+ }
+ if (cBlockSize == 0) break; /* bt_end */
+
+ if (ZSTD_isError(decodedSize)) return decodedSize;
+ op += decodedSize;
+ ip += cBlockSize;
+ remainingSize -= cBlockSize;
+ }
+
+ return op-ostart;
+}
+
+static size_t ZSTD_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ ZSTD_DCtx ctx;
+ ctx.base = dst;
+ return ZSTD_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
+}
+
+/* ZSTD_errorFrameSizeInfoLegacy() :
+ assumes `cSize` and `dBound` are _not_ NULL */
+MEM_STATIC void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
+{
+ *cSize = ret;
+ *dBound = ZSTD_CONTENTSIZE_ERROR;
+}
+
+void ZSTDv03_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
+{
+ const BYTE* ip = (const BYTE*)src;
+ size_t remainingSize = srcSize;
+ size_t nbBlocks = 0;
+ U32 magicNumber;
+ blockProperties_t blockProperties;
+
+ /* Frame Header */
+ if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+ return;
+ }
+ magicNumber = MEM_readLE32(src);
+ if (magicNumber != ZSTD_magicNumber) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
+ return;
+ }
+ ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
+
+ /* Loop on each block */
+ while (1)
+ {
+ size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+ if (ZSTD_isError(cBlockSize)) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
+ return;
+ }
+
+ ip += ZSTD_blockHeaderSize;
+ remainingSize -= ZSTD_blockHeaderSize;
+ if (cBlockSize > remainingSize) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+ return;
+ }
+
+ if (cBlockSize == 0) break; /* bt_end */
+
+ ip += cBlockSize;
+ remainingSize -= cBlockSize;
+ nbBlocks++;
+ }
+
+ *cSize = ip - (const BYTE*)src;
+ *dBound = nbBlocks * BLOCKSIZE;
+}
+
+
+/*******************************
+* Streaming Decompression API
+*******************************/
+
+static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx)
+{
+ dctx->expected = ZSTD_frameHeaderSize;
+ dctx->phase = 0;
+ dctx->previousDstEnd = NULL;
+ dctx->base = NULL;
+ return 0;
+}
+
+static ZSTD_DCtx* ZSTD_createDCtx(void)
+{
+ ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx));
+ if (dctx==NULL) return NULL;
+ ZSTD_resetDCtx(dctx);
+ return dctx;
+}
+
+static size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
+{
+ free(dctx);
+ return 0;
+}
+
+static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx)
+{
+ return dctx->expected;
+}
+
+static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ /* Sanity check */
+ if (srcSize != ctx->expected) return ERROR(srcSize_wrong);
+ if (dst != ctx->previousDstEnd) /* not contiguous */
+ ctx->base = dst;
+
+ /* Decompress : frame header */
+ if (ctx->phase == 0)
+ {
+ /* Check frame magic header */
+ U32 magicNumber = MEM_readLE32(src);
+ if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
+ ctx->phase = 1;
+ ctx->expected = ZSTD_blockHeaderSize;
+ return 0;
+ }
+
+ /* Decompress : block header */
+ if (ctx->phase == 1)
+ {
+ blockProperties_t bp;
+ size_t blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
+ if (ZSTD_isError(blockSize)) return blockSize;
+ if (bp.blockType == bt_end)
+ {
+ ctx->expected = 0;
+ ctx->phase = 0;
+ }
+ else
+ {
+ ctx->expected = blockSize;
+ ctx->bType = bp.blockType;
+ ctx->phase = 2;
+ }
+
+ return 0;
+ }
+
+ /* Decompress : block content */
+ {
+ size_t rSize;
+ switch(ctx->bType)
+ {
+ case bt_compressed:
+ rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize);
+ break;
+ case bt_raw :
+ rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize);
+ break;
+ case bt_rle :
+ return ERROR(GENERIC); /* not yet handled */
+ break;
+ case bt_end : /* should never happen (filtered at phase 1) */
+ rSize = 0;
+ break;
+ default:
+ return ERROR(GENERIC);
+ }
+ ctx->phase = 1;
+ ctx->expected = ZSTD_blockHeaderSize;
+ ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);
+ return rSize;
+ }
+
+}
+
+
+/* wrapper layer */
+
+unsigned ZSTDv03_isError(size_t code)
+{
+ return ZSTD_isError(code);
+}
+
+size_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize,
+ const void* src, size_t compressedSize)
+{
+ return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
+}
+
+ZSTDv03_Dctx* ZSTDv03_createDCtx(void)
+{
+ return (ZSTDv03_Dctx*)ZSTD_createDCtx();
+}
+
+size_t ZSTDv03_freeDCtx(ZSTDv03_Dctx* dctx)
+{
+ return ZSTD_freeDCtx((ZSTD_DCtx*)dctx);
+}
+
+size_t ZSTDv03_resetDCtx(ZSTDv03_Dctx* dctx)
+{
+ return ZSTD_resetDCtx((ZSTD_DCtx*)dctx);
+}
+
+size_t ZSTDv03_nextSrcSizeToDecompress(ZSTDv03_Dctx* dctx)
+{
+ return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx);
+}
+
+size_t ZSTDv03_decompressContinue(ZSTDv03_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize);
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_v03.h b/vendor/github.com/DataDog/zstd/zstd_v03.h
new file mode 100644
index 000000000..efd8c2b92
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v03.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_V03_H_298734209782
+#define ZSTD_V03_H_298734209782
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* *************************************
+* Includes
+***************************************/
+#include <stddef.h> /* size_t */
+
+
+/* *************************************
+* Simple one-step function
+***************************************/
+/**
+ZSTDv03_decompress() : decompress ZSTD frames compliant with v0.3.x format
+ compressedSize : is the exact source size
+ maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
+ It must be equal or larger than originalSize, otherwise decompression will fail.
+ return : the number of bytes decompressed into destination buffer (originalSize)
+ or an errorCode if it fails (which can be tested using ZSTDv01_isError())
+*/
+size_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize,
+ const void* src, size_t compressedSize);
+
+ /**
+ ZSTDv03_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.3.x format
+ srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+ cSize (output parameter) : the number of bytes that would be read to decompress this frame
+ or an error code if it fails (which can be tested using ZSTDv01_isError())
+ dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
+ or ZSTD_CONTENTSIZE_ERROR if an error occurs
+
+ note : assumes `cSize` and `dBound` are _not_ NULL.
+ */
+ void ZSTDv03_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
+ size_t* cSize, unsigned long long* dBound);
+
+ /**
+ZSTDv03_isError() : tells if the result of ZSTDv03_decompress() is an error
+*/
+unsigned ZSTDv03_isError(size_t code);
+
+
+/* *************************************
+* Advanced functions
+***************************************/
+typedef struct ZSTDv03_Dctx_s ZSTDv03_Dctx;
+ZSTDv03_Dctx* ZSTDv03_createDCtx(void);
+size_t ZSTDv03_freeDCtx(ZSTDv03_Dctx* dctx);
+
+size_t ZSTDv03_decompressDCtx(void* ctx,
+ void* dst, size_t maxOriginalSize,
+ const void* src, size_t compressedSize);
+
+/* *************************************
+* Streaming functions
+***************************************/
+size_t ZSTDv03_resetDCtx(ZSTDv03_Dctx* dctx);
+
+size_t ZSTDv03_nextSrcSizeToDecompress(ZSTDv03_Dctx* dctx);
+size_t ZSTDv03_decompressContinue(ZSTDv03_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
+/**
+ Use above functions alternatively.
+ ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+ ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
+ Result is the number of bytes regenerated within 'dst'.
+ It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
+*/
+
+/* *************************************
+* Prefix - version detection
+***************************************/
+#define ZSTDv03_magicNumber 0xFD2FB523 /* v0.3 */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_V03_H_298734209782 */
diff --git a/vendor/github.com/DataDog/zstd/zstd_v04.c b/vendor/github.com/DataDog/zstd/zstd_v04.c
new file mode 100644
index 000000000..4342330e2
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v04.c
@@ -0,0 +1,3634 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+ /******************************************
+ * Includes
+ ******************************************/
+#include <stddef.h> /* size_t, ptrdiff_t */
+#include <string.h> /* memcpy */
+
+#include "zstd_v04.h"
+#include "error_private.h"
+
+
+/* ******************************************************************
+ * mem.h
+ *******************************************************************/
+#ifndef MEM_H_MODULE
+#define MEM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/******************************************
+* Compiler-specific
+******************************************/
+#if defined(_MSC_VER) /* Visual Studio */
+# include <stdlib.h> /* _byteswap_ulong */
+# include <intrin.h> /* _byteswap_* */
+#endif
+#if defined(__GNUC__)
+# define MEM_STATIC static __attribute__((unused))
+#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# define MEM_STATIC static inline
+#elif defined(_MSC_VER)
+# define MEM_STATIC static __inline
+#else
+# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+
+/****************************************************************
+* Basic Types
+*****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef int16_t S16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+ typedef int64_t S64;
+#else
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef signed short S16;
+ typedef unsigned int U32;
+ typedef signed int S32;
+ typedef unsigned long long U64;
+ typedef signed long long S64;
+#endif
+
+
+/*-*************************************
+* Debug
+***************************************/
+#include "debug.h"
+#ifndef assert
+# define assert(condition) ((void)0)
+#endif
+
+
+/****************************************************************
+* Memory I/O
+*****************************************************************/
+/* MEM_FORCE_MEMORY_ACCESS
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ * It can generate buggy code on targets generating assembly depending on alignment.
+ * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
+# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+# define MEM_FORCE_MEMORY_ACCESS 2
+# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
+ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+# define MEM_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }
+MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }
+
+MEM_STATIC unsigned MEM_isLittleEndian(void)
+{
+ const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+
+#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
+
+/* violates C standard on structure alignment.
+Only use if no other choice to achieve best performance on target platform */
+MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
+MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
+MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+
+#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;
+
+MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
+MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
+
+#else
+
+/* default method, safe and standard.
+ can sometimes prove slower */
+
+MEM_STATIC U16 MEM_read16(const void* memPtr)
+{
+ U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U32 MEM_read32(const void* memPtr)
+{
+ U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U64 MEM_read64(const void* memPtr)
+{
+ U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value)
+{
+ memcpy(memPtr, &value, sizeof(value));
+}
+
+#endif // MEM_FORCE_MEMORY_ACCESS
+
+
+MEM_STATIC U16 MEM_readLE16(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read16(memPtr);
+ else
+ {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U16)(p[0] + (p[1]<<8));
+ }
+}
+
+MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
+{
+ if (MEM_isLittleEndian())
+ {
+ MEM_write16(memPtr, val);
+ }
+ else
+ {
+ BYTE* p = (BYTE*)memPtr;
+ p[0] = (BYTE)val;
+ p[1] = (BYTE)(val>>8);
+ }
+}
+
+MEM_STATIC U32 MEM_readLE32(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read32(memPtr);
+ else
+ {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
+ }
+}
+
+
+MEM_STATIC U64 MEM_readLE64(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read64(memPtr);
+ else
+ {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
+ + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
+ }
+}
+
+
+MEM_STATIC size_t MEM_readLEST(const void* memPtr)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_readLE32(memPtr);
+ else
+ return (size_t)MEM_readLE64(memPtr);
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* MEM_H_MODULE */
+
+/*
+ zstd - standard compression library
+ Header File for static linking only
+*/
+#ifndef ZSTD_STATIC_H
+#define ZSTD_STATIC_H
+
+
+/* *************************************
+* Types
+***************************************/
+#define ZSTD_WINDOWLOG_ABSOLUTEMIN 11
+
+/** from faster to stronger */
+typedef enum { ZSTD_fast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2, ZSTD_btlazy2 } ZSTD_strategy;
+
+typedef struct
+{
+ U64 srcSize; /* optional : tells how much bytes are present in the frame. Use 0 if not known. */
+ U32 windowLog; /* largest match distance : larger == more compression, more memory needed during decompression */
+ U32 contentLog; /* full search segment : larger == more compression, slower, more memory (useless for fast) */
+ U32 hashLog; /* dispatch table : larger == more memory, faster */
+ U32 searchLog; /* nb of searches : larger == more compression, slower */
+ U32 searchLength; /* size of matches : larger == faster decompression, sometimes less compression */
+ ZSTD_strategy strategy;
+} ZSTD_parameters;
+
+typedef ZSTDv04_Dctx ZSTD_DCtx;
+
+/* *************************************
+* Advanced functions
+***************************************/
+/** ZSTD_decompress_usingDict
+* Same as ZSTD_decompressDCtx, using a Dictionary content as prefix
+* Note : dict can be NULL, in which case, it's equivalent to ZSTD_decompressDCtx() */
+static size_t ZSTD_decompress_usingDict(ZSTD_DCtx* ctx,
+ void* dst, size_t maxDstSize,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize);
+
+
+/* **************************************
+* Streaming functions (direct mode)
+****************************************/
+static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx);
+static size_t ZSTD_getFrameParams(ZSTD_parameters* params, const void* src, size_t srcSize);
+static void ZSTD_decompress_insertDictionary(ZSTD_DCtx* ctx, const void* src, size_t srcSize);
+
+static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
+static size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
+
+/**
+ Streaming decompression, bufferless mode
+
+ A ZSTD_DCtx object is required to track streaming operations.
+ Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
+ A ZSTD_DCtx object can be re-used multiple times. Use ZSTD_resetDCtx() to return to fresh status.
+
+ First operation is to retrieve frame parameters, using ZSTD_getFrameParams().
+ This function doesn't consume its input. It needs enough input data to properly decode the frame header.
+ Objective is to retrieve *params.windowlog, to know minimum amount of memory required during decoding.
+ Result : 0 when successful, it means the ZSTD_parameters structure has been filled.
+ >0 : means there is not enough data into src. Provides the expected size to successfully decode header.
+ errorCode, which can be tested using ZSTD_isError() (For example, if it's not a ZSTD header)
+
+ Then, you can optionally insert a dictionary.
+ This operation must mimic the compressor behavior, otherwise decompression will fail or be corrupted.
+
+ Then it's possible to start decompression.
+ Use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
+ ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+ ZSTD_decompressContinue() requires this exact amount of bytes, or it will fail.
+ ZSTD_decompressContinue() needs previous data blocks during decompression, up to (1 << windowlog).
+ They should preferably be located contiguously, prior to current block. Alternatively, a round buffer is also possible.
+
+ @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst'.
+ It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
+
+ A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
+ Context can then be reset to start a new decompression.
+*/
+
+
+
+
+#endif /* ZSTD_STATIC_H */
+
+
+/*
+ zstd_internal - common functions to include
+ Header File for include
+*/
+#ifndef ZSTD_CCOMMON_H_MODULE
+#define ZSTD_CCOMMON_H_MODULE
+
+/* *************************************
+* Common macros
+***************************************/
+#define MIN(a,b) ((a)<(b) ? (a) : (b))
+#define MAX(a,b) ((a)>(b) ? (a) : (b))
+
+
+/* *************************************
+* Common constants
+***************************************/
+#define ZSTD_MAGICNUMBER 0xFD2FB524 /* v0.4 */
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BLOCKSIZE (128 KB) /* define, for static allocation */
+
+static const size_t ZSTD_blockHeaderSize = 3;
+static const size_t ZSTD_frameHeaderSize_min = 5;
+#define ZSTD_frameHeaderSize_max 5 /* define, for static allocation */
+
+#define BIT7 128
+#define BIT6 64
+#define BIT5 32
+#define BIT4 16
+#define BIT1 2
+#define BIT0 1
+
+#define IS_RAW BIT0
+#define IS_RLE BIT1
+
+#define MINMATCH 4
+#define REPCODE_STARTVALUE 4
+
+#define MLbits 7
+#define LLbits 6
+#define Offbits 5
+#define MaxML ((1<<MLbits) - 1)
+#define MaxLL ((1<<LLbits) - 1)
+#define MaxOff ((1<<Offbits)- 1)
+#define MLFSELog 10
+#define LLFSELog 10
+#define OffFSELog 9
+#define MaxSeq MAX(MaxLL, MaxML)
+
+#define MIN_SEQUENCES_SIZE (2 /*seqNb*/ + 2 /*dumps*/ + 3 /*seqTables*/ + 1 /*bitStream*/)
+#define MIN_CBLOCK_SIZE (3 /*litCSize*/ + MIN_SEQUENCES_SIZE)
+
+#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
+
+typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
+
+
+/* ******************************************
+* Shared functions to include for inlining
+********************************************/
+static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
+
+#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
+
+/*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */
+static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
+{
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + length;
+ do
+ COPY8(op, ip)
+ while (op < oend);
+}
+
+
+
+/* ******************************************************************
+ FSE : Finite State Entropy coder
+ header file
+****************************************************************** */
+#ifndef FSE_H
+#define FSE_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* *****************************************
+* Includes
+******************************************/
+#include <stddef.h> /* size_t, ptrdiff_t */
+
+
+/* *****************************************
+* FSE simple functions
+******************************************/
+static size_t FSE_decompress(void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize);
+/*!
+FSE_decompress():
+ Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
+ into already allocated destination buffer 'dst', of size 'maxDstSize'.
+ return : size of regenerated data (<= maxDstSize)
+ or an error code, which can be tested using FSE_isError()
+
+ ** Important ** : FSE_decompress() doesn't decompress non-compressible nor RLE data !!!
+ Why ? : making this distinction requires a header.
+ Header management is intentionally delegated to the user layer, which can better manage special cases.
+*/
+
+
+/* *****************************************
+* Tool functions
+******************************************/
+/* Error Management */
+static unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
+
+
+
+/* *****************************************
+* FSE detailed API
+******************************************/
+/*!
+FSE_compress() does the following:
+1. count symbol occurrence from source[] into table count[]
+2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
+3. save normalized counters to memory buffer using writeNCount()
+4. build encoding table 'CTable' from normalized counters
+5. encode the data stream using encoding table 'CTable'
+
+FSE_decompress() does the following:
+1. read normalized counters with readNCount()
+2. build decoding table 'DTable' from normalized counters
+3. decode the data stream using decoding table 'DTable'
+
+The following API allows targeting specific sub-functions for advanced tasks.
+For example, it's possible to compress several blocks using the same 'CTable',
+or to save and provide normalized distribution using external method.
+*/
+
+
+/* *** DECOMPRESSION *** */
+
+/*!
+FSE_readNCount():
+ Read compactly saved 'normalizedCounter' from 'rBuffer'.
+ return : size read from 'rBuffer'
+ or an errorCode, which can be tested using FSE_isError()
+ maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
+static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
+
+/*!
+Constructor and Destructor of type FSE_DTable
+ Note that its size depends on 'tableLog' */
+typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
+
+/*!
+FSE_buildDTable():
+ Builds 'dt', which must be already allocated, using FSE_createDTable()
+ return : 0,
+ or an errorCode, which can be tested using FSE_isError() */
+static size_t FSE_buildDTable ( FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*!
+FSE_decompress_usingDTable():
+ Decompress compressed source 'cSrc' of size 'cSrcSize' using 'dt'
+ into 'dst' which must be already allocated.
+ return : size of regenerated data (necessarily <= maxDstSize)
+ or an errorCode, which can be tested using FSE_isError() */
+static size_t FSE_decompress_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
+
+/*!
+Tutorial :
+----------
+(Note : these functions only decompress FSE-compressed blocks.
+ If block is uncompressed, use memcpy() instead
+ If block is a single repeated byte, use memset() instead )
+
+The first step is to obtain the normalized frequencies of symbols.
+This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
+In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
+or size the table to handle worst case situations (typically 256).
+FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
+The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
+Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
+If there is an error, the function will return an error code, which can be tested using FSE_isError().
+
+The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
+This is performed by the function FSE_buildDTable().
+The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
+If there is an error, the function will return an error code, which can be tested using FSE_isError().
+
+'FSE_DTable' can then be used to decompress 'cSrc', with FSE_decompress_usingDTable().
+'cSrcSize' must be strictly correct, otherwise decompression will fail.
+FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=maxDstSize).
+If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
+*/
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* FSE_H */
+
+
+/* ******************************************************************
+ bitstream
+ Part of NewGen Entropy library
+ header file (to include)
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef BITSTREAM_H_MODULE
+#define BITSTREAM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*
+* This API consists of small unitary functions, which highly benefit from being inlined.
+* Since link-time-optimization is not available for all compilers,
+* these functions are defined into a .h to be included.
+*/
+
+/**********************************************
+* bitStream decompression API (read backward)
+**********************************************/
+typedef struct
+{
+ size_t bitContainer;
+ unsigned bitsConsumed;
+ const char* ptr;
+ const char* start;
+} BIT_DStream_t;
+
+typedef enum { BIT_DStream_unfinished = 0,
+ BIT_DStream_endOfBuffer = 1,
+ BIT_DStream_completed = 2,
+ BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
+ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
+MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
+
+
+
+
+/******************************************
+* unsafe API
+******************************************/
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
+/* faster, but works only if nbBits >= 1 */
+
+
+
+/****************************************************************
+* Helper functions
+****************************************************************/
+MEM_STATIC unsigned BIT_highbit32 (U32 val)
+{
+# if defined(_MSC_VER) /* Visual */
+ unsigned long r=0;
+ _BitScanReverse ( &r, val );
+ return (unsigned) r;
+# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
+ return 31 - __builtin_clz (val);
+# else /* Software version */
+ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ unsigned r;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+ return r;
+# endif
+}
+
+
+/**********************************************************
+* bitStream decoding
+**********************************************************/
+
+/*!BIT_initDStream
+* Initialize a BIT_DStream_t.
+* @bitD : a pointer to an already allocated BIT_DStream_t structure
+* @srcBuffer must point at the beginning of a bitStream
+* @srcSize must be the exact size of the bitStream
+* @result : size of stream (== srcSize) or an errorCode if a problem is detected
+*/
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+ if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
+
+ if (srcSize >= sizeof(size_t)) /* normal case */
+ {
+ U32 contain32;
+ bitD->start = (const char*)srcBuffer;
+ bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t);
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
+ if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
+ bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
+ }
+ else
+ {
+ U32 contain32;
+ bitD->start = (const char*)srcBuffer;
+ bitD->ptr = bitD->start;
+ bitD->bitContainer = *(const BYTE*)(bitD->start);
+ switch(srcSize)
+ {
+ case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);/* fall-through */
+ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);/* fall-through */
+ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);/* fall-through */
+ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fall-through */
+ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fall-through */
+ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; /* fall-through */
+ default: break;
+ }
+ contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
+ if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
+ bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
+ bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
+ }
+
+ return srcSize;
+}
+
+MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+ const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
+ return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
+}
+
+/*! BIT_lookBitsFast :
+* unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits)
+{
+ const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
+ return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
+}
+
+MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+ bitD->bitsConsumed += nbBits;
+}
+
+MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+ size_t value = BIT_lookBits(bitD, nbBits);
+ BIT_skipBits(bitD, nbBits);
+ return value;
+}
+
+/*!BIT_readBitsFast :
+* unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
+{
+ size_t value = BIT_lookBitsFast(bitD, nbBits);
+ BIT_skipBits(bitD, nbBits);
+ return value;
+}
+
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
+{
+ if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
+ return BIT_DStream_overflow;
+
+ if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
+ {
+ bitD->ptr -= bitD->bitsConsumed >> 3;
+ bitD->bitsConsumed &= 7;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ return BIT_DStream_unfinished;
+ }
+ if (bitD->ptr == bitD->start)
+ {
+ if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
+ return BIT_DStream_completed;
+ }
+ {
+ U32 nbBytes = bitD->bitsConsumed >> 3;
+ BIT_DStream_status result = BIT_DStream_unfinished;
+ if (bitD->ptr - nbBytes < bitD->start)
+ {
+ nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
+ result = BIT_DStream_endOfBuffer;
+ }
+ bitD->ptr -= nbBytes;
+ bitD->bitsConsumed -= nbBytes*8;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
+ return result;
+ }
+}
+
+/*! BIT_endOfDStream
+* @return Tells if DStream has reached its exact end
+*/
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
+{
+ return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* BITSTREAM_H_MODULE */
+
+
+
+/* ******************************************************************
+ FSE : Finite State Entropy coder
+ header file for static linking (only)
+ Copyright (C) 2013-2015, Yann Collet
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef FSE_STATIC_H
+#define FSE_STATIC_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* *****************************************
+* Static allocation
+*******************************************/
+/* FSE buffer bounds */
+#define FSE_NCOUNTBOUND 512
+#define FSE_BLOCKBOUND(size) (size + (size>>7))
+#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
+
+/* It is possible to statically allocate FSE CTable/DTable as a table of unsigned using below macros */
+#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
+#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
+
+
+/* *****************************************
+* FSE advanced API
+*******************************************/
+static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
+/* build a fake FSE_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
+
+static size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
+/* build a fake FSE_DTable, designed to always generate the same symbolValue */
+
+
+
+/* *****************************************
+* FSE symbol decompression API
+*******************************************/
+typedef struct
+{
+ size_t state;
+ const void* table; /* precise table may vary, depending on U16 */
+} FSE_DState_t;
+
+
+static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
+
+static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+
+static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
+
+
+/* *****************************************
+* FSE unsafe API
+*******************************************/
+static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
+
+
+/* *****************************************
+* Implementation of inlined functions
+*******************************************/
+/* decompression */
+
+typedef struct {
+ U16 tableLog;
+ U16 fastMode;
+} FSE_DTableHeader; /* sizeof U32 */
+
+typedef struct
+{
+ unsigned short newState;
+ unsigned char symbol;
+ unsigned char nbBits;
+} FSE_decode_t; /* size == U32 */
+
+MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
+{
+ FSE_DTableHeader DTableH;
+ memcpy(&DTableH, dt, sizeof(DTableH));
+ DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog);
+ BIT_reloadDStream(bitD);
+ DStatePtr->table = dt + 1;
+}
+
+MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ const U32 nbBits = DInfo.nbBits;
+ BYTE symbol = DInfo.symbol;
+ size_t lowBits = BIT_readBits(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ const U32 nbBits = DInfo.nbBits;
+ BYTE symbol = DInfo.symbol;
+ size_t lowBits = BIT_readBitsFast(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
+{
+ return DStatePtr->state == 0;
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* FSE_STATIC_H */
+
+/* ******************************************************************
+ FSE : Finite State Entropy coder
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/* **************************************************************
+* Tuning parameters
+****************************************************************/
+/*!MEMORY_USAGE :
+* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+* Increasing memory usage improves compression ratio
+* Reduced memory usage can improve speed, due to cache effect
+* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#define FSE_MAX_MEMORY_USAGE 14
+#define FSE_DEFAULT_MEMORY_USAGE 13
+
+/*!FSE_MAX_SYMBOL_VALUE :
+* Maximum symbol value authorized.
+* Required for proper stack allocation */
+#define FSE_MAX_SYMBOL_VALUE 255
+
+
+/* **************************************************************
+* template functions type & suffix
+****************************************************************/
+#define FSE_FUNCTION_TYPE BYTE
+#define FSE_FUNCTION_EXTENSION
+#define FSE_DECODE_TYPE FSE_decode_t
+
+
+#endif /* !FSE_COMMONDEFS_ONLY */
+
+/* **************************************************************
+* Compiler specifics
+****************************************************************/
+#ifdef _MSC_VER /* Visual Studio */
+# define FORCE_INLINE static __forceinline
+# include <intrin.h> /* For Visual 2005 */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
+#else
+# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# ifdef __GNUC__
+# define FORCE_INLINE static inline __attribute__((always_inline))
+# else
+# define FORCE_INLINE static inline
+# endif
+# else
+# define FORCE_INLINE static
+# endif /* __STDC_VERSION__ */
+#endif
+
+
+/* **************************************************************
+* Dependencies
+****************************************************************/
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memcpy, memset */
+#include <stdio.h> /* printf (debug) */
+
+
+/* ***************************************************************
+* Constants
+*****************************************************************/
+#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
+#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
+#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
+#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
+#define FSE_MIN_TABLELOG 5
+
+#define FSE_TABLELOG_ABSOLUTE_MAX 15
+#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
+#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+
+
+/* **************************************************************
+* Complex types
+****************************************************************/
+typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
+
+
+/*-**************************************************************
+* Templates
+****************************************************************/
+/*
+ designed to be included
+ for type-specific functions (template emulation in C)
+ Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+# error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+# error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
+
+
+static size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+ FSE_DTableHeader DTableH;
+ void* const tdPtr = dt+1; /* because dt is unsigned, 32-bits aligned on 32-bits */
+ FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
+ const U32 tableSize = 1 << tableLog;
+ const U32 tableMask = tableSize-1;
+ const U32 step = FSE_tableStep(tableSize);
+ U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
+ U32 position = 0;
+ U32 highThreshold = tableSize-1;
+ const S16 largeLimit= (S16)(1 << (tableLog-1));
+ U32 noLarge = 1;
+ U32 s;
+
+ /* Sanity Checks */
+ if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+
+ /* Init, lay down lowprob symbols */
+ memset(tableDecode, 0, sizeof(FSE_DECODE_TYPE) * (maxSymbolValue+1) ); /* useless init, but keep static analyzer happy, and we don't need to performance optimize legacy decoders */
+ DTableH.tableLog = (U16)tableLog;
+ for (s=0; s<=maxSymbolValue; s++)
+ {
+ if (normalizedCounter[s]==-1)
+ {
+ tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
+ symbolNext[s] = 1;
+ }
+ else
+ {
+ if (normalizedCounter[s] >= largeLimit) noLarge=0;
+ symbolNext[s] = normalizedCounter[s];
+ }
+ }
+
+ /* Spread symbols */
+ for (s=0; s<=maxSymbolValue; s++)
+ {
+ int i;
+ for (i=0; i<normalizedCounter[s]; i++)
+ {
+ tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
+ position = (position + step) & tableMask;
+ while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
+ }
+ }
+
+ if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+
+ /* Build Decoding table */
+ {
+ U32 i;
+ for (i=0; i<tableSize; i++)
+ {
+ FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);
+ U16 nextState = symbolNext[symbol]++;
+ tableDecode[i].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );
+ tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
+ }
+ }
+
+ DTableH.fastMode = (U16)noLarge;
+ memcpy(dt, &DTableH, sizeof(DTableH));
+ return 0;
+}
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+/******************************************
+* FSE helper functions
+******************************************/
+static unsigned FSE_isError(size_t code) { return ERR_isError(code); }
+
+
+/****************************************************************
+* FSE NCount encoding-decoding
+****************************************************************/
+static short FSE_abs(short a)
+{
+ return a<0 ? -a : a;
+}
+
+static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ const BYTE* const istart = (const BYTE*) headerBuffer;
+ const BYTE* const iend = istart + hbSize;
+ const BYTE* ip = istart;
+ int nbBits;
+ int remaining;
+ int threshold;
+ U32 bitStream;
+ int bitCount;
+ unsigned charnum = 0;
+ int previous0 = 0;
+
+ if (hbSize < 4) return ERROR(srcSize_wrong);
+ bitStream = MEM_readLE32(ip);
+ nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
+ if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
+ bitStream >>= 4;
+ bitCount = 4;
+ *tableLogPtr = nbBits;
+ remaining = (1<<nbBits)+1;
+ threshold = 1<<nbBits;
+ nbBits++;
+
+ while ((remaining>1) && (charnum<=*maxSVPtr))
+ {
+ if (previous0)
+ {
+ unsigned n0 = charnum;
+ while ((bitStream & 0xFFFF) == 0xFFFF)
+ {
+ n0+=24;
+ if (ip < iend-5)
+ {
+ ip+=2;
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ }
+ else
+ {
+ bitStream >>= 16;
+ bitCount+=16;
+ }
+ }
+ while ((bitStream & 3) == 3)
+ {
+ n0+=3;
+ bitStream>>=2;
+ bitCount+=2;
+ }
+ n0 += bitStream & 3;
+ bitCount += 2;
+ if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
+ while (charnum < n0) normalizedCounter[charnum++] = 0;
+ if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
+ {
+ ip += bitCount>>3;
+ bitCount &= 7;
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ }
+ else
+ bitStream >>= 2;
+ }
+ {
+ const short max = (short)((2*threshold-1)-remaining);
+ short count;
+
+ if ((bitStream & (threshold-1)) < (U32)max)
+ {
+ count = (short)(bitStream & (threshold-1));
+ bitCount += nbBits-1;
+ }
+ else
+ {
+ count = (short)(bitStream & (2*threshold-1));
+ if (count >= threshold) count -= max;
+ bitCount += nbBits;
+ }
+
+ count--; /* extra accuracy */
+ remaining -= FSE_abs(count);
+ normalizedCounter[charnum++] = count;
+ previous0 = !count;
+ while (remaining < threshold)
+ {
+ nbBits--;
+ threshold >>= 1;
+ }
+
+ {
+ if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
+ {
+ ip += bitCount>>3;
+ bitCount &= 7;
+ }
+ else
+ {
+ bitCount -= (int)(8 * (iend - 4 - ip));
+ ip = iend - 4;
+ }
+ bitStream = MEM_readLE32(ip) >> (bitCount & 31);
+ }
+ }
+ }
+ if (remaining != 1) return ERROR(GENERIC);
+ *maxSVPtr = charnum-1;
+
+ ip += (bitCount+7)>>3;
+ if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
+ return ip-istart;
+}
+
+
+/*********************************************************
+* Decompression (Byte symbols)
+*********************************************************/
+static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
+{
+ void* ptr = dt;
+ FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+ void* dPtr = dt + 1;
+ FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
+
+ DTableH->tableLog = 0;
+ DTableH->fastMode = 0;
+
+ cell->newState = 0;
+ cell->symbol = symbolValue;
+ cell->nbBits = 0;
+
+ return 0;
+}
+
+
+static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
+{
+ void* ptr = dt;
+ FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+ void* dPtr = dt + 1;
+ FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
+ const unsigned tableSize = 1 << nbBits;
+ const unsigned tableMask = tableSize - 1;
+ const unsigned maxSymbolValue = tableMask;
+ unsigned s;
+
+ /* Sanity checks */
+ if (nbBits < 1) return ERROR(GENERIC); /* min size */
+
+ /* Build Decoding Table */
+ DTableH->tableLog = (U16)nbBits;
+ DTableH->fastMode = 1;
+ for (s=0; s<=maxSymbolValue; s++)
+ {
+ dinfo[s].newState = 0;
+ dinfo[s].symbol = (BYTE)s;
+ dinfo[s].nbBits = (BYTE)nbBits;
+ }
+
+ return 0;
+}
+
+FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
+ void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSE_DTable* dt, const unsigned fast)
+{
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* op = ostart;
+ BYTE* const omax = op + maxDstSize;
+ BYTE* const olimit = omax-3;
+
+ BIT_DStream_t bitD;
+ FSE_DState_t state1;
+ FSE_DState_t state2;
+ size_t errorCode;
+
+ /* Init */
+ errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */
+ if (FSE_isError(errorCode)) return errorCode;
+
+ FSE_initDState(&state1, &bitD, dt);
+ FSE_initDState(&state2, &bitD, dt);
+
+#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
+
+ /* 4 symbols per loop */
+ for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op<olimit) ; op+=4)
+ {
+ op[0] = FSE_GETSYMBOL(&state1);
+
+ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BIT_reloadDStream(&bitD);
+
+ op[1] = FSE_GETSYMBOL(&state2);
+
+ if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
+
+ op[2] = FSE_GETSYMBOL(&state1);
+
+ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BIT_reloadDStream(&bitD);
+
+ op[3] = FSE_GETSYMBOL(&state2);
+ }
+
+ /* tail */
+ /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
+ while (1)
+ {
+ if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )
+ break;
+
+ *op++ = FSE_GETSYMBOL(&state1);
+
+ if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )
+ break;
+
+ *op++ = FSE_GETSYMBOL(&state2);
+ }
+
+ /* end ? */
+ if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))
+ return op-ostart;
+
+ if (op==omax) return ERROR(dstSize_tooSmall); /* dst buffer is full, but cSrc unfinished */
+
+ return ERROR(corruption_detected);
+}
+
+
+static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSE_DTable* dt)
+{
+ FSE_DTableHeader DTableH;
+ U32 fastMode;
+
+ memcpy(&DTableH, dt, sizeof(DTableH));
+ fastMode = DTableH.fastMode;
+
+ /* select fast mode (static) */
+ if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+ return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
+{
+ const BYTE* const istart = (const BYTE*)cSrc;
+ const BYTE* ip = istart;
+ short counting[FSE_MAX_SYMBOL_VALUE+1];
+ DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
+ unsigned tableLog;
+ unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+ size_t errorCode;
+
+ if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */
+
+ /* normal FSE decoding mode */
+ errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
+ if (FSE_isError(errorCode)) return errorCode;
+ if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */
+ ip += errorCode;
+ cSrcSize -= errorCode;
+
+ errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);
+ if (FSE_isError(errorCode)) return errorCode;
+
+ /* always return, even if it is an error code */
+ return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
+}
+
+
+
+#endif /* FSE_COMMONDEFS_ONLY */
+
+
+/* ******************************************************************
+ Huff0 : Huffman coder, part of New Generation Entropy library
+ header file
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef HUFF0_H
+#define HUFF0_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* ****************************************
+* Dependency
+******************************************/
+#include <stddef.h> /* size_t */
+
+
+/* ****************************************
+* Huff0 simple functions
+******************************************/
+static size_t HUF_decompress(void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize);
+/*!
+HUF_decompress():
+ Decompress Huff0 data from buffer 'cSrc', of size 'cSrcSize',
+ into already allocated destination buffer 'dst', of size 'dstSize'.
+ 'dstSize' must be the exact size of original (uncompressed) data.
+ Note : in contrast with FSE, HUF_decompress can regenerate RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, because it knows size to regenerate.
+ @return : size of regenerated data (== dstSize)
+ or an error code, which can be tested using HUF_isError()
+*/
+
+
+/* ****************************************
+* Tool functions
+******************************************/
+/* Error Management */
+static unsigned HUF_isError(size_t code); /* tells if a return value is an error code */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* HUFF0_H */
+
+
+/* ******************************************************************
+ Huff0 : Huffman coder, part of New Generation Entropy library
+ header file for static linking (only)
+ Copyright (C) 2013-2015, Yann Collet
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef HUFF0_STATIC_H
+#define HUFF0_STATIC_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+/* ****************************************
+* Static allocation macros
+******************************************/
+/* static allocation of Huff0's DTable */
+#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<maxTableLog)) /* nb Cells; use unsigned short for X2, unsigned int for X4 */
+#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+ unsigned short DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
+#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
+ unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
+#define HUF_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
+ unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
+
+
+/* ****************************************
+* Advanced decompression functions
+******************************************/
+static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
+static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbols decoder */
+
+
+/* ****************************************
+* Huff0 detailed API
+******************************************/
+/*!
+HUF_decompress() does the following:
+1. select the decompression algorithm (X2, X4, X6) based on pre-computed heuristics
+2. build Huffman table from save, using HUF_readDTableXn()
+3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable
+
+*/
+static size_t HUF_readDTableX2 (unsigned short* DTable, const void* src, size_t srcSize);
+static size_t HUF_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize);
+
+static size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
+static size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* HUFF0_STATIC_H */
+
+
+
+/* ******************************************************************
+ Huff0 : Huffman coder, part of New Generation Entropy library
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+
+/* **************************************************************
+* Compiler specifics
+****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+/* inline is defined */
+#elif defined(_MSC_VER)
+# define inline __inline
+#else
+# define inline /* disable inline */
+#endif
+
+
+#ifdef _MSC_VER /* Visual Studio */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+#endif
+
+
+/* **************************************************************
+* Includes
+****************************************************************/
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memcpy, memset */
+#include <stdio.h> /* printf (debug) */
+
+
+/* **************************************************************
+* Constants
+****************************************************************/
+#define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
+#define HUF_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
+#define HUF_DEFAULT_TABLELOG HUF_MAX_TABLELOG /* tableLog by default, when not specified */
+#define HUF_MAX_SYMBOL_VALUE 255
+#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)
+# error "HUF_MAX_TABLELOG is too large !"
+#endif
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+static unsigned HUF_isError(size_t code) { return ERR_isError(code); }
+#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+
+
+
+/*-*******************************************************
+* Huff0 : Huffman block decompression
+*********************************************************/
+typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */
+
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */
+
+typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
+
+/*! HUF_readStats
+ Read compact Huffman tree, saved by HUF_writeCTable
+ @huffWeight : destination buffer
+ @return : size read from `src`
+*/
+static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize)
+{
+ U32 weightTotal;
+ U32 tableLog;
+ const BYTE* ip = (const BYTE*) src;
+ size_t iSize;
+ size_t oSize;
+ U32 n;
+
+ if (!srcSize) return ERROR(srcSize_wrong);
+ iSize = ip[0];
+ //memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */
+
+ if (iSize >= 128) /* special header */
+ {
+ if (iSize >= (242)) /* RLE */
+ {
+ static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
+ oSize = l[iSize-242];
+ memset(huffWeight, 1, hwSize);
+ iSize = 0;
+ }
+ else /* Incompressible */
+ {
+ oSize = iSize - 127;
+ iSize = ((oSize+1)/2);
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ if (oSize >= hwSize) return ERROR(corruption_detected);
+ ip += 1;
+ for (n=0; n<oSize; n+=2)
+ {
+ huffWeight[n] = ip[n/2] >> 4;
+ huffWeight[n+1] = ip[n/2] & 15;
+ }
+ }
+ }
+ else /* header compressed with FSE (normal case) */
+ {
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */
+ if (FSE_isError(oSize)) return oSize;
+ }
+
+ /* collect weight stats */
+ memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
+ weightTotal = 0;
+ for (n=0; n<oSize; n++)
+ {
+ if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
+ rankStats[huffWeight[n]]++;
+ weightTotal += (1 << huffWeight[n]) >> 1;
+ }
+ if (weightTotal == 0) return ERROR(corruption_detected);
+
+ /* get last non-null symbol weight (implied, total must be 2^n) */
+ tableLog = BIT_highbit32(weightTotal) + 1;
+ if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
+ {
+ U32 total = 1 << tableLog;
+ U32 rest = total - weightTotal;
+ U32 verif = 1 << BIT_highbit32(rest);
+ U32 lastWeight = BIT_highbit32(rest) + 1;
+ if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
+ huffWeight[oSize] = (BYTE)lastWeight;
+ rankStats[lastWeight]++;
+ }
+
+ /* check tree construction validity */
+ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
+
+ /* results */
+ *nbSymbolsPtr = (U32)(oSize+1);
+ *tableLogPtr = tableLog;
+ return iSize+1;
+}
+
+
+/**************************/
+/* single-symbol decoding */
+/**************************/
+
+static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
+{
+ BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
+ U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */
+ U32 tableLog = 0;
+ size_t iSize;
+ U32 nbSymbols = 0;
+ U32 n;
+ U32 nextRankStart;
+ void* const dtPtr = DTable + 1;
+ HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
+
+ HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */
+ //memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
+ if (HUF_isError(iSize)) return iSize;
+
+ /* check result */
+ if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */
+ DTable[0] = (U16)tableLog; /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */
+
+ /* Prepare ranks */
+ nextRankStart = 0;
+ for (n=1; n<=tableLog; n++)
+ {
+ U32 current = nextRankStart;
+ nextRankStart += (rankVal[n] << (n-1));
+ rankVal[n] = current;
+ }
+
+ /* fill DTable */
+ for (n=0; n<nbSymbols; n++)
+ {
+ const U32 w = huffWeight[n];
+ const U32 length = (1 << w) >> 1;
+ U32 i;
+ HUF_DEltX2 D;
+ D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
+ for (i = rankVal[w]; i < rankVal[w] + length; i++)
+ dt[i] = D;
+ rankVal[w] += length;
+ }
+
+ return iSize;
+}
+
+static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+ const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+ const BYTE c = dt[val].byte;
+ BIT_skipBits(Dstream, dt[val].nbBits);
+ return c;
+}
+
+#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+ *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
+ HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 4 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))
+ {
+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ }
+
+ /* closer to the end */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+ /* no more data to retrieve from bitstream, hence no need to reload */
+ while (p < pEnd)
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+ return pEnd-pStart;
+}
+
+
+static size_t HUF_decompress4X2_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const U16* DTable)
+{
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ {
+ const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable;
+ const HUF_DEltX2* const dt = ((const HUF_DEltX2*)dtPtr) +1;
+ const U32 dtLog = DTable[0];
+ size_t errorCode;
+
+ /* Init */
+ BIT_DStream_t bitD1;
+ BIT_DStream_t bitD2;
+ BIT_DStream_t bitD3;
+ BIT_DStream_t bitD4;
+ const size_t length1 = MEM_readLE16(istart);
+ const size_t length2 = MEM_readLE16(istart+2);
+ const size_t length3 = MEM_readLE16(istart+4);
+ size_t length4;
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ const size_t segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ U32 endSignal;
+
+ length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ errorCode = BIT_initDStream(&bitD1, istart1, length1);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD2, istart2, length2);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD3, istart3, length3);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD4, istart4, length4);
+ if (HUF_isError(errorCode)) return errorCode;
+
+ /* 16-32 symbols per loop (4-8 symbols per stream) */
+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+ for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
+ {
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+
+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+ }
+
+ /* check corruption */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 supposed already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+ HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+ HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+ HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+ if (!endSignal) return ERROR(corruption_detected);
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+
+static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);
+ const BYTE* ip = (const BYTE*) cSrc;
+ size_t errorCode;
+
+ errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);
+ if (HUF_isError(errorCode)) return errorCode;
+ if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += errorCode;
+ cSrcSize -= errorCode;
+
+ return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+/***************************/
+/* double-symbols decoding */
+/***************************/
+
+static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
+ const U32* rankValOrigin, const int minWeight,
+ const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
+ U32 nbBitsBaseline, U16 baseSeq)
+{
+ HUF_DEltX4 DElt;
+ U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
+ U32 s;
+
+ /* get pre-calculated rankVal */
+ memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+ /* fill skipped values */
+ if (minWeight>1)
+ {
+ U32 i, skipSize = rankVal[minWeight];
+ MEM_writeLE16(&(DElt.sequence), baseSeq);
+ DElt.nbBits = (BYTE)(consumed);
+ DElt.length = 1;
+ for (i = 0; i < skipSize; i++)
+ DTable[i] = DElt;
+ }
+
+ /* fill DTable */
+ for (s=0; s<sortedListSize; s++) /* note : sortedSymbols already skipped */
+ {
+ const U32 symbol = sortedSymbols[s].symbol;
+ const U32 weight = sortedSymbols[s].weight;
+ const U32 nbBits = nbBitsBaseline - weight;
+ const U32 length = 1 << (sizeLog-nbBits);
+ const U32 start = rankVal[weight];
+ U32 i = start;
+ const U32 end = start + length;
+
+ MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
+ DElt.nbBits = (BYTE)(nbBits + consumed);
+ DElt.length = 2;
+ do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
+
+ rankVal[weight] += length;
+ }
+}
+
+typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1];
+
+static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
+ const sortedSymbol_t* sortedList, const U32 sortedListSize,
+ const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
+ const U32 nbBitsBaseline)
+{
+ U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
+ const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
+ const U32 minBits = nbBitsBaseline - maxWeight;
+ U32 s;
+
+ memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+ /* fill DTable */
+ for (s=0; s<sortedListSize; s++)
+ {
+ const U16 symbol = sortedList[s].symbol;
+ const U32 weight = sortedList[s].weight;
+ const U32 nbBits = nbBitsBaseline - weight;
+ const U32 start = rankVal[weight];
+ const U32 length = 1 << (targetLog-nbBits);
+
+ if (targetLog-nbBits >= minBits) /* enough room for a second symbol */
+ {
+ U32 sortedRank;
+ int minWeight = nbBits + scaleLog;
+ if (minWeight < 1) minWeight = 1;
+ sortedRank = rankStart[minWeight];
+ HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
+ rankValOrigin[nbBits], minWeight,
+ sortedList+sortedRank, sortedListSize-sortedRank,
+ nbBitsBaseline, symbol);
+ }
+ else
+ {
+ U32 i;
+ const U32 end = start + length;
+ HUF_DEltX4 DElt;
+
+ MEM_writeLE16(&(DElt.sequence), symbol);
+ DElt.nbBits = (BYTE)(nbBits);
+ DElt.length = 1;
+ for (i = start; i < end; i++)
+ DTable[i] = DElt;
+ }
+ rankVal[weight] += length;
+ }
+}
+
+static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
+{
+ BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];
+ sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];
+ U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
+ U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
+ U32* const rankStart = rankStart0+1;
+ rankVal_t rankVal;
+ U32 tableLog, maxW, sizeOfSort, nbSymbols;
+ const U32 memLog = DTable[0];
+ size_t iSize;
+ void* dtPtr = DTable;
+ HUF_DEltX4* const dt = ((HUF_DEltX4*)dtPtr) + 1;
+
+ HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */
+ if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
+ //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
+ if (HUF_isError(iSize)) return iSize;
+
+ /* check result */
+ if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
+
+ /* find maxWeight */
+ for (maxW = tableLog; rankStats[maxW]==0; maxW--)
+ { if (!maxW) return ERROR(GENERIC); } /* necessarily finds a solution before maxW==0 */
+
+ /* Get start index of each weight */
+ {
+ U32 w, nextRankStart = 0;
+ for (w=1; w<=maxW; w++)
+ {
+ U32 current = nextRankStart;
+ nextRankStart += rankStats[w];
+ rankStart[w] = current;
+ }
+ rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
+ sizeOfSort = nextRankStart;
+ }
+
+ /* sort symbols by weight */
+ {
+ U32 s;
+ for (s=0; s<nbSymbols; s++)
+ {
+ U32 w = weightList[s];
+ U32 r = rankStart[w]++;
+ sortedSymbol[r].symbol = (BYTE)s;
+ sortedSymbol[r].weight = (BYTE)w;
+ }
+ rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
+ }
+
+ /* Build rankVal */
+ {
+ const U32 minBits = tableLog+1 - maxW;
+ U32 nextRankVal = 0;
+ U32 w, consumed;
+ const int rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */
+ U32* rankVal0 = rankVal[0];
+ for (w=1; w<=maxW; w++)
+ {
+ U32 current = nextRankVal;
+ nextRankVal += rankStats[w] << (w+rescale);
+ rankVal0[w] = current;
+ }
+ for (consumed = minBits; consumed <= memLog - minBits; consumed++)
+ {
+ U32* rankValPtr = rankVal[consumed];
+ for (w = 1; w <= maxW; w++)
+ {
+ rankValPtr[w] = rankVal0[w] >> consumed;
+ }
+ }
+ }
+
+ HUF_fillDTableX4(dt, memLog,
+ sortedSymbol, sizeOfSort,
+ rankStart0, rankVal, maxW,
+ tableLog+1);
+
+ return iSize;
+}
+
+
+static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
+{
+ const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ memcpy(op, dt+val, 2);
+ BIT_skipBits(DStream, dt[val].nbBits);
+ return dt[val].length;
+}
+
+static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
+{
+ const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ memcpy(op, dt+val, 1);
+ if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
+ else
+ {
+ if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))
+ {
+ BIT_skipBits(DStream, dt[val].nbBits);
+ if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+ DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+ }
+ }
+ return 1;
+}
+
+
+#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
+ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
+ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 8 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7))
+ {
+ HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
+ HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
+ }
+
+ /* closer to the end */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2))
+ HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
+
+ while (p <= pEnd-2)
+ HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
+
+ if (p < pEnd)
+ p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
+
+ return p-pStart;
+}
+
+static size_t HUF_decompress4X4_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const U32* DTable)
+{
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ {
+ const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable;
+ const HUF_DEltX4* const dt = ((const HUF_DEltX4*)dtPtr) +1;
+ const U32 dtLog = DTable[0];
+ size_t errorCode;
+
+ /* Init */
+ BIT_DStream_t bitD1;
+ BIT_DStream_t bitD2;
+ BIT_DStream_t bitD3;
+ BIT_DStream_t bitD4;
+ const size_t length1 = MEM_readLE16(istart);
+ const size_t length2 = MEM_readLE16(istart+2);
+ const size_t length3 = MEM_readLE16(istart+4);
+ size_t length4;
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ const size_t segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ U32 endSignal;
+
+ length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ errorCode = BIT_initDStream(&bitD1, istart1, length1);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD2, istart2, length2);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD3, istart3, length3);
+ if (HUF_isError(errorCode)) return errorCode;
+ errorCode = BIT_initDStream(&bitD4, istart4, length4);
+ if (HUF_isError(errorCode)) return errorCode;
+
+ /* 16-32 symbols per loop (4-8 symbols per stream) */
+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+ for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
+ {
+ HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
+ HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
+
+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+ }
+
+ /* check corruption */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 supposed already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
+ HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
+ HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
+ HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+ if (!endSignal) return ERROR(corruption_detected);
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+
+static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG);
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize;
+ cSrcSize -= hSize;
+
+ return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+/**********************************/
+/* Generic decompression selector */
+/**********************************/
+
+typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
+static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
+{
+ /* single, double, quad */
+ {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
+ {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
+ {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
+ {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
+ {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
+ {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
+ {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
+ {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
+ {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
+ {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
+ {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
+ {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
+ {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
+ {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
+ {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
+ {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
+};
+
+typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+
+static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ static const decompressionAlgo decompress[3] = { HUF_decompress4X2, HUF_decompress4X4, NULL };
+ /* estimate decompression time */
+ U32 Q;
+ const U32 D256 = (U32)(dstSize >> 8);
+ U32 Dtime[3];
+ U32 algoNb = 0;
+ int n;
+
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
+ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
+ /* decoder timing evaluation */
+ Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
+ for (n=0; n<3; n++)
+ Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
+
+ Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
+
+ if (Dtime[1] < Dtime[0]) algoNb = 1;
+
+ return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
+
+ //return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize); /* multi-streams single-symbol decoding */
+ //return HUF_decompress4X4(dst, dstSize, cSrc, cSrcSize); /* multi-streams double-symbols decoding */
+ //return HUF_decompress4X6(dst, dstSize, cSrc, cSrcSize); /* multi-streams quad-symbols decoding */
+}
+
+
+
+#endif /* ZSTD_CCOMMON_H_MODULE */
+
+
+/*
+ zstd - decompression module fo v0.4 legacy format
+ Copyright (C) 2015-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd source repository : https://github.com/Cyan4973/zstd
+ - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+/* ***************************************************************
+* Tuning parameters
+*****************************************************************/
+/*!
+ * HEAPMODE :
+ * Select how default decompression function ZSTD_decompress() will allocate memory,
+ * in memory stack (0), or in memory heap (1, requires malloc())
+ */
+#ifndef ZSTD_HEAPMODE
+# define ZSTD_HEAPMODE 1
+#endif
+
+
+/* *******************************************************
+* Includes
+*********************************************************/
+#include <stdlib.h> /* calloc */
+#include <string.h> /* memcpy, memmove */
+#include <stdio.h> /* debug : printf */
+
+
+/* *******************************************************
+* Compiler specifics
+*********************************************************/
+#ifdef _MSC_VER /* Visual Studio */
+# include <intrin.h> /* For Visual 2005 */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4324) /* disable: C4324: padded structure */
+#endif
+
+
+/* *************************************
+* Local types
+***************************************/
+typedef struct
+{
+ blockType_t blockType;
+ U32 origSize;
+} blockProperties_t;
+
+
+/* *******************************************************
+* Memory operations
+**********************************************************/
+static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+
+
+/* *************************************
+* Error Management
+***************************************/
+
+/*! ZSTD_isError
+* tells if a return value is an error code */
+static unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
+
+
+/* *************************************************************
+* Context management
+***************************************************************/
+typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
+ ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock } ZSTD_dStage;
+
+struct ZSTDv04_Dctx_s
+{
+ U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
+ U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
+ U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
+ const void* previousDstEnd;
+ const void* base;
+ const void* vBase;
+ const void* dictEnd;
+ size_t expected;
+ size_t headerSize;
+ ZSTD_parameters params;
+ blockType_t bType;
+ ZSTD_dStage stage;
+ const BYTE* litPtr;
+ size_t litSize;
+ BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];
+ BYTE headerBuffer[ZSTD_frameHeaderSize_max];
+}; /* typedef'd to ZSTD_DCtx within "zstd_static.h" */
+
+static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx)
+{
+ dctx->expected = ZSTD_frameHeaderSize_min;
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ dctx->previousDstEnd = NULL;
+ dctx->base = NULL;
+ dctx->vBase = NULL;
+ dctx->dictEnd = NULL;
+ return 0;
+}
+
+static ZSTD_DCtx* ZSTD_createDCtx(void)
+{
+ ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx));
+ if (dctx==NULL) return NULL;
+ ZSTD_resetDCtx(dctx);
+ return dctx;
+}
+
+static size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
+{
+ free(dctx);
+ return 0;
+}
+
+
+/* *************************************************************
+* Decompression section
+***************************************************************/
+/** ZSTD_decodeFrameHeader_Part1
+* decode the 1st part of the Frame Header, which tells Frame Header size.
+* srcSize must be == ZSTD_frameHeaderSize_min
+* @return : the full size of the Frame Header */
+static size_t ZSTD_decodeFrameHeader_Part1(ZSTD_DCtx* zc, const void* src, size_t srcSize)
+{
+ U32 magicNumber;
+ if (srcSize != ZSTD_frameHeaderSize_min) return ERROR(srcSize_wrong);
+ magicNumber = MEM_readLE32(src);
+ if (magicNumber != ZSTD_MAGICNUMBER) return ERROR(prefix_unknown);
+ zc->headerSize = ZSTD_frameHeaderSize_min;
+ return zc->headerSize;
+}
+
+
+static size_t ZSTD_getFrameParams(ZSTD_parameters* params, const void* src, size_t srcSize)
+{
+ U32 magicNumber;
+ if (srcSize < ZSTD_frameHeaderSize_min) return ZSTD_frameHeaderSize_max;
+ magicNumber = MEM_readLE32(src);
+ if (magicNumber != ZSTD_MAGICNUMBER) return ERROR(prefix_unknown);
+ memset(params, 0, sizeof(*params));
+ params->windowLog = (((const BYTE*)src)[4] & 15) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
+ if ((((const BYTE*)src)[4] >> 4) != 0) return ERROR(frameParameter_unsupported); /* reserved bits */
+ return 0;
+}
+
+/** ZSTD_decodeFrameHeader_Part2
+* decode the full Frame Header
+* srcSize must be the size provided by ZSTD_decodeFrameHeader_Part1
+* @return : 0, or an error code, which can be tested using ZSTD_isError() */
+static size_t ZSTD_decodeFrameHeader_Part2(ZSTD_DCtx* zc, const void* src, size_t srcSize)
+{
+ size_t result;
+ if (srcSize != zc->headerSize) return ERROR(srcSize_wrong);
+ result = ZSTD_getFrameParams(&(zc->params), src, srcSize);
+ if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupported);
+ return result;
+}
+
+
+static size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+{
+ const BYTE* const in = (const BYTE* const)src;
+ BYTE headerFlags;
+ U32 cSize;
+
+ if (srcSize < 3) return ERROR(srcSize_wrong);
+
+ headerFlags = *in;
+ cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
+
+ bpPtr->blockType = (blockType_t)(headerFlags >> 6);
+ bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
+
+ if (bpPtr->blockType == bt_end) return 0;
+ if (bpPtr->blockType == bt_rle) return 1;
+ return cSize;
+}
+
+static size_t ZSTD_copyRawBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
+ memcpy(dst, src, srcSize);
+ return srcSize;
+}
+
+
+/** ZSTD_decompressLiterals
+ @return : nb of bytes read from src, or an error code*/
+static size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr,
+ const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+
+ const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+ const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+
+ if (litSize > *maxDstSizePtr) return ERROR(corruption_detected);
+ if (litCSize + 5 > srcSize) return ERROR(corruption_detected);
+
+ if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected);
+
+ *maxDstSizePtr = litSize;
+ return litCSize + 5;
+}
+
+
+/** ZSTD_decodeLiteralsBlock
+ @return : nb of bytes read from src (< srcSize ) */
+static size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+ const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
+{
+ const BYTE* const istart = (const BYTE*) src;
+
+ /* any compressed block with literals segment must be at least this size */
+ if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
+
+ switch(*istart & 3)
+ {
+ /* compressed */
+ case 0:
+ {
+ size_t litSize = BLOCKSIZE;
+ const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ memset(dctx->litBuffer + dctx->litSize, 0, 8);
+ return readSize; /* works if it's an error too */
+ }
+ case IS_RAW:
+ {
+ const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+ if (litSize > srcSize-11) /* risk of reading too far with wildcopy */
+ {
+ if (litSize > srcSize-3) return ERROR(corruption_detected);
+ memcpy(dctx->litBuffer, istart, litSize);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ memset(dctx->litBuffer + dctx->litSize, 0, 8);
+ return litSize+3;
+ }
+ /* direct reference into compressed stream */
+ dctx->litPtr = istart+3;
+ dctx->litSize = litSize;
+ return litSize+3; }
+ case IS_RLE:
+ {
+ const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+ if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
+ memset(dctx->litBuffer, istart[3], litSize + 8);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ return 4;
+ }
+ default:
+ return ERROR(corruption_detected); /* forbidden nominal case */
+ }
+}
+
+
+static size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
+ FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
+ const void* src, size_t srcSize)
+{
+ const BYTE* const istart = (const BYTE* const)src;
+ const BYTE* ip = istart;
+ const BYTE* const iend = istart + srcSize;
+ U32 LLtype, Offtype, MLtype;
+ U32 LLlog, Offlog, MLlog;
+ size_t dumpsLength;
+
+ /* check */
+ if (srcSize < 5) return ERROR(srcSize_wrong);
+
+ /* SeqHead */
+ *nbSeq = MEM_readLE16(ip); ip+=2;
+ LLtype = *ip >> 6;
+ Offtype = (*ip >> 4) & 3;
+ MLtype = (*ip >> 2) & 3;
+ if (*ip & 2)
+ {
+ dumpsLength = ip[2];
+ dumpsLength += ip[1] << 8;
+ ip += 3;
+ }
+ else
+ {
+ dumpsLength = ip[1];
+ dumpsLength += (ip[0] & 1) << 8;
+ ip += 2;
+ }
+ *dumpsPtr = ip;
+ ip += dumpsLength;
+ *dumpsLengthPtr = dumpsLength;
+
+ /* check */
+ if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
+
+ /* sequences */
+ {
+ S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL >= MaxOff */
+ size_t headerSize;
+
+ /* Build DTables */
+ switch(LLtype)
+ {
+ case bt_rle :
+ LLlog = 0;
+ FSE_buildDTable_rle(DTableLL, *ip++); break;
+ case bt_raw :
+ LLlog = LLbits;
+ FSE_buildDTable_raw(DTableLL, LLbits); break;
+ default :
+ { U32 max = MaxLL;
+ headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);
+ if (FSE_isError(headerSize)) return ERROR(GENERIC);
+ if (LLlog > LLFSELog) return ERROR(corruption_detected);
+ ip += headerSize;
+ FSE_buildDTable(DTableLL, norm, max, LLlog);
+ } }
+
+ switch(Offtype)
+ {
+ case bt_rle :
+ Offlog = 0;
+ if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
+ FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */
+ break;
+ case bt_raw :
+ Offlog = Offbits;
+ FSE_buildDTable_raw(DTableOffb, Offbits); break;
+ default :
+ { U32 max = MaxOff;
+ headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);
+ if (FSE_isError(headerSize)) return ERROR(GENERIC);
+ if (Offlog > OffFSELog) return ERROR(corruption_detected);
+ ip += headerSize;
+ FSE_buildDTable(DTableOffb, norm, max, Offlog);
+ } }
+
+ switch(MLtype)
+ {
+ case bt_rle :
+ MLlog = 0;
+ if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
+ FSE_buildDTable_rle(DTableML, *ip++); break;
+ case bt_raw :
+ MLlog = MLbits;
+ FSE_buildDTable_raw(DTableML, MLbits); break;
+ default :
+ { U32 max = MaxML;
+ headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);
+ if (FSE_isError(headerSize)) return ERROR(GENERIC);
+ if (MLlog > MLFSELog) return ERROR(corruption_detected);
+ ip += headerSize;
+ FSE_buildDTable(DTableML, norm, max, MLlog);
+ } } }
+
+ return ip-istart;
+}
+
+
+typedef struct {
+ size_t litLength;
+ size_t offset;
+ size_t matchLength;
+} seq_t;
+
+typedef struct {
+ BIT_DStream_t DStream;
+ FSE_DState_t stateLL;
+ FSE_DState_t stateOffb;
+ FSE_DState_t stateML;
+ size_t prevOffset;
+ const BYTE* dumps;
+ const BYTE* dumpsEnd;
+} seqState_t;
+
+
+static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
+{
+ size_t litLength;
+ size_t prevOffset;
+ size_t offset;
+ size_t matchLength;
+ const BYTE* dumps = seqState->dumps;
+ const BYTE* const de = seqState->dumpsEnd;
+
+ /* Literal length */
+ litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
+ prevOffset = litLength ? seq->offset : seqState->prevOffset;
+ if (litLength == MaxLL) {
+ U32 add = *dumps++;
+ if (add < 255) litLength += add;
+ else {
+ litLength = dumps[0] + (dumps[1]<<8) + (dumps[2]<<16);
+ dumps += 3;
+ }
+ if (dumps > de) { litLength = MaxLL+255; } /* late correction, to avoid using uninitialized memory */
+ if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */
+ }
+
+ /* Offset */
+ { static const U32 offsetPrefix[MaxOff+1] = {
+ 1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,
+ 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,
+ 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };
+ U32 offsetCode, nbBits;
+ offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream)); /* <= maxOff, by table construction */
+ if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
+ nbBits = offsetCode - 1;
+ if (offsetCode==0) nbBits = 0; /* cmove */
+ offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits);
+ if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
+ if (offsetCode==0) offset = prevOffset; /* cmove */
+ if (offsetCode | !litLength) seqState->prevOffset = seq->offset; /* cmove */
+ }
+
+ /* MatchLength */
+ matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
+ if (matchLength == MaxML) {
+ U32 add = *dumps++;
+ if (add < 255) matchLength += add;
+ else {
+ matchLength = dumps[0] + (dumps[1]<<8) + (dumps[2]<<16);
+ dumps += 3;
+ }
+ if (dumps > de) { matchLength = MaxML+255; } /* late correction, to avoid using uninitialized memory */
+ if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */
+ }
+ matchLength += MINMATCH;
+
+ /* save result */
+ seq->litLength = litLength;
+ seq->offset = offset;
+ seq->matchLength = matchLength;
+ seqState->dumps = dumps;
+}
+
+
+static size_t ZSTD_execSequence(BYTE* op,
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
+{
+ static const int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
+ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
+ BYTE* const oLitEnd = op + sequence.litLength;
+ const size_t sequenceLength = sequence.litLength + sequence.matchLength;
+ BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
+ BYTE* const oend_8 = oend-8;
+ const BYTE* const litEnd = *litPtr + sequence.litLength;
+ const BYTE* match = oLitEnd - sequence.offset;
+
+ /* check */
+ if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */
+ if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
+ if (litEnd > litLimit) return ERROR(corruption_detected); /* risk read beyond lit buffer */
+
+ /* copy Literals */
+ ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
+ op = oLitEnd;
+ *litPtr = litEnd; /* update for next sequence */
+
+ /* copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - base))
+ {
+ /* offset beyond prefix */
+ if (sequence.offset > (size_t)(oLitEnd - vBase))
+ return ERROR(corruption_detected);
+ match = dictEnd - (base-match);
+ if (match + sequence.matchLength <= dictEnd)
+ {
+ memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ {
+ size_t length1 = dictEnd - match;
+ memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = base;
+ if (op > oend_8 || sequence.matchLength < MINMATCH) {
+ while (op < oMatchEnd) *op++ = *match++;
+ return sequenceLength;
+ }
+ }
+ }
+ /* Requirement: op <= oend_8 */
+
+ /* match within prefix */
+ if (sequence.offset < 8) {
+ /* close range match, overlap */
+ const int sub2 = dec64table[sequence.offset];
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += dec32table[sequence.offset];
+ ZSTD_copy4(op+4, match);
+ match -= sub2;
+ } else {
+ ZSTD_copy8(op, match);
+ }
+ op += 8; match += 8;
+
+ if (oMatchEnd > oend-(16-MINMATCH))
+ {
+ if (op < oend_8)
+ {
+ ZSTD_wildcopy(op, match, oend_8 - op);
+ match += oend_8 - op;
+ op = oend_8;
+ }
+ while (op < oMatchEnd) *op++ = *match++;
+ }
+ else
+ {
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8, but must be signed */
+ }
+ return sequenceLength;
+}
+
+
+static size_t ZSTD_decompressSequences(
+ ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize)
+{
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* op = ostart;
+ BYTE* const oend = ostart + maxDstSize;
+ size_t errorCode, dumpsLength;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* const litEnd = litPtr + dctx->litSize;
+ int nbSeq;
+ const BYTE* dumps;
+ U32* DTableLL = dctx->LLTable;
+ U32* DTableML = dctx->MLTable;
+ U32* DTableOffb = dctx->OffTable;
+ const BYTE* const base = (const BYTE*) (dctx->base);
+ const BYTE* const vBase = (const BYTE*) (dctx->vBase);
+ const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+
+ /* Build Decoding Tables */
+ errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
+ DTableLL, DTableML, DTableOffb,
+ ip, iend-ip);
+ if (ZSTD_isError(errorCode)) return errorCode;
+ ip += errorCode;
+
+ /* Regen sequences */
+ {
+ seq_t sequence;
+ seqState_t seqState;
+
+ memset(&sequence, 0, sizeof(sequence));
+ sequence.offset = 4;
+ seqState.dumps = dumps;
+ seqState.dumpsEnd = dumps + dumpsLength;
+ seqState.prevOffset = 4;
+ errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip);
+ if (ERR_isError(errorCode)) return ERROR(corruption_detected);
+ FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
+ FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
+ FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
+
+ for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; )
+ {
+ size_t oneSeqSize;
+ nbSeq--;
+ ZSTD_decodeSequence(&sequence, &seqState);
+ oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+ }
+
+ /* check if reached exact end */
+ if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* DStream should be entirely and exactly consumed; otherwise data is corrupted */
+
+ /* last literal segment */
+ {
+ size_t lastLLSize = litEnd - litPtr;
+ if (litPtr > litEnd) return ERROR(corruption_detected);
+ if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
+ if (op != litPtr) memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+ }
+
+ return op-ostart;
+}
+
+
+static void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
+{
+ if (dst != dctx->previousDstEnd) /* not contiguous */
+ {
+ dctx->dictEnd = dctx->previousDstEnd;
+ dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
+ dctx->base = dst;
+ dctx->previousDstEnd = dst;
+ }
+}
+
+
+static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* src, size_t srcSize)
+{
+ /* blockType == blockCompressed */
+ const BYTE* ip = (const BYTE*)src;
+
+ /* Decode literals sub-block */
+ size_t litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
+ if (ZSTD_isError(litCSize)) return litCSize;
+ ip += litCSize;
+ srcSize -= litCSize;
+
+ return ZSTD_decompressSequences(dctx, dst, maxDstSize, ip, srcSize);
+}
+
+
+static size_t ZSTD_decompress_usingDict(ZSTD_DCtx* ctx,
+ void* dst, size_t maxDstSize,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* iend = ip + srcSize;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* op = ostart;
+ BYTE* const oend = ostart + maxDstSize;
+ size_t remainingSize = srcSize;
+ blockProperties_t blockProperties;
+
+ /* init */
+ ZSTD_resetDCtx(ctx);
+ if (dict)
+ {
+ ZSTD_decompress_insertDictionary(ctx, dict, dictSize);
+ ctx->dictEnd = ctx->previousDstEnd;
+ ctx->vBase = (const char*)dst - ((const char*)(ctx->previousDstEnd) - (const char*)(ctx->base));
+ ctx->base = dst;
+ }
+ else
+ {
+ ctx->vBase = ctx->base = ctx->dictEnd = dst;
+ }
+
+ /* Frame Header */
+ {
+ size_t frameHeaderSize;
+ if (srcSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+ frameHeaderSize = ZSTD_decodeFrameHeader_Part1(ctx, src, ZSTD_frameHeaderSize_min);
+ if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
+ if (srcSize < frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+ ip += frameHeaderSize; remainingSize -= frameHeaderSize;
+ frameHeaderSize = ZSTD_decodeFrameHeader_Part2(ctx, src, frameHeaderSize);
+ if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
+ }
+
+ /* Loop on each block */
+ while (1)
+ {
+ size_t decodedSize=0;
+ size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties);
+ if (ZSTD_isError(cBlockSize)) return cBlockSize;
+
+ ip += ZSTD_blockHeaderSize;
+ remainingSize -= ZSTD_blockHeaderSize;
+ if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+ switch(blockProperties.blockType)
+ {
+ case bt_compressed:
+ decodedSize = ZSTD_decompressBlock_internal(ctx, op, oend-op, ip, cBlockSize);
+ break;
+ case bt_raw :
+ decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize);
+ break;
+ case bt_rle :
+ return ERROR(GENERIC); /* not yet supported */
+ break;
+ case bt_end :
+ /* end of frame */
+ if (remainingSize) return ERROR(srcSize_wrong);
+ break;
+ default:
+ return ERROR(GENERIC); /* impossible */
+ }
+ if (cBlockSize == 0) break; /* bt_end */
+
+ if (ZSTD_isError(decodedSize)) return decodedSize;
+ op += decodedSize;
+ ip += cBlockSize;
+ remainingSize -= cBlockSize;
+ }
+
+ return op-ostart;
+}
+
+/* ZSTD_errorFrameSizeInfoLegacy() :
+ assumes `cSize` and `dBound` are _not_ NULL */
+static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
+{
+ *cSize = ret;
+ *dBound = ZSTD_CONTENTSIZE_ERROR;
+}
+
+void ZSTDv04_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
+{
+ const BYTE* ip = (const BYTE*)src;
+ size_t remainingSize = srcSize;
+ size_t nbBlocks = 0;
+ blockProperties_t blockProperties;
+
+ /* Frame Header */
+ if (srcSize < ZSTD_frameHeaderSize_min) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+ return;
+ }
+ if (MEM_readLE32(src) != ZSTD_MAGICNUMBER) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
+ return;
+ }
+ ip += ZSTD_frameHeaderSize_min; remainingSize -= ZSTD_frameHeaderSize_min;
+
+ /* Loop on each block */
+ while (1)
+ {
+ size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+ if (ZSTD_isError(cBlockSize)) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
+ return;
+ }
+
+ ip += ZSTD_blockHeaderSize;
+ remainingSize -= ZSTD_blockHeaderSize;
+ if (cBlockSize > remainingSize) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+ return;
+ }
+
+ if (cBlockSize == 0) break; /* bt_end */
+
+ ip += cBlockSize;
+ remainingSize -= cBlockSize;
+ nbBlocks++;
+ }
+
+ *cSize = ip - (const BYTE*)src;
+ *dBound = nbBlocks * BLOCKSIZE;
+}
+
+/* ******************************
+* Streaming Decompression API
+********************************/
+static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx)
+{
+ return dctx->expected;
+}
+
+static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ /* Sanity check */
+ if (srcSize != ctx->expected) return ERROR(srcSize_wrong);
+ ZSTD_checkContinuity(ctx, dst);
+
+ /* Decompress : frame header; part 1 */
+ switch (ctx->stage)
+ {
+ case ZSTDds_getFrameHeaderSize :
+ /* get frame header size */
+ if (srcSize != ZSTD_frameHeaderSize_min) return ERROR(srcSize_wrong); /* impossible */
+ ctx->headerSize = ZSTD_decodeFrameHeader_Part1(ctx, src, ZSTD_frameHeaderSize_min);
+ if (ZSTD_isError(ctx->headerSize)) return ctx->headerSize;
+ memcpy(ctx->headerBuffer, src, ZSTD_frameHeaderSize_min);
+ if (ctx->headerSize > ZSTD_frameHeaderSize_min) return ERROR(GENERIC); /* impossible */
+ ctx->expected = 0; /* not necessary to copy more */
+ /* fallthrough */
+ case ZSTDds_decodeFrameHeader:
+ /* get frame header */
+ { size_t const result = ZSTD_decodeFrameHeader_Part2(ctx, ctx->headerBuffer, ctx->headerSize);
+ if (ZSTD_isError(result)) return result;
+ ctx->expected = ZSTD_blockHeaderSize;
+ ctx->stage = ZSTDds_decodeBlockHeader;
+ return 0;
+ }
+ case ZSTDds_decodeBlockHeader:
+ /* Decode block header */
+ { blockProperties_t bp;
+ size_t const blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
+ if (ZSTD_isError(blockSize)) return blockSize;
+ if (bp.blockType == bt_end)
+ {
+ ctx->expected = 0;
+ ctx->stage = ZSTDds_getFrameHeaderSize;
+ }
+ else
+ {
+ ctx->expected = blockSize;
+ ctx->bType = bp.blockType;
+ ctx->stage = ZSTDds_decompressBlock;
+ }
+ return 0;
+ }
+ case ZSTDds_decompressBlock:
+ {
+ /* Decompress : block content */
+ size_t rSize;
+ switch(ctx->bType)
+ {
+ case bt_compressed:
+ rSize = ZSTD_decompressBlock_internal(ctx, dst, maxDstSize, src, srcSize);
+ break;
+ case bt_raw :
+ rSize = ZSTD_copyRawBlock(dst, maxDstSize, src, srcSize);
+ break;
+ case bt_rle :
+ return ERROR(GENERIC); /* not yet handled */
+ break;
+ case bt_end : /* should never happen (filtered at phase 1) */
+ rSize = 0;
+ break;
+ default:
+ return ERROR(GENERIC);
+ }
+ ctx->stage = ZSTDds_decodeBlockHeader;
+ ctx->expected = ZSTD_blockHeaderSize;
+ ctx->previousDstEnd = (char*)dst + rSize;
+ return rSize;
+ }
+ default:
+ return ERROR(GENERIC); /* impossible */
+ }
+}
+
+
+static void ZSTD_decompress_insertDictionary(ZSTD_DCtx* ctx, const void* dict, size_t dictSize)
+{
+ ctx->dictEnd = ctx->previousDstEnd;
+ ctx->vBase = (const char*)dict - ((const char*)(ctx->previousDstEnd) - (const char*)(ctx->base));
+ ctx->base = dict;
+ ctx->previousDstEnd = (const char*)dict + dictSize;
+}
+
+
+
+/*
+ Buffered version of Zstd compression library
+ Copyright (C) 2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd source repository : https://github.com/Cyan4973/zstd
+ - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+/* The objects defined into this file should be considered experimental.
+ * They are not labelled stable, as their prototype may change in the future.
+ * You can use them for tests, provide feedback, or if you can endure risk of future changes.
+ */
+
+/* *************************************
+* Includes
+***************************************/
+#include <stdlib.h>
+
+
+/** ************************************************
+* Streaming decompression
+*
+* A ZBUFF_DCtx object is required to track streaming operation.
+* Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources.
+* Use ZBUFF_decompressInit() to start a new decompression operation.
+* ZBUFF_DCtx objects can be reused multiple times.
+*
+* Use ZBUFF_decompressContinue() repetitively to consume your input.
+* *srcSizePtr and *maxDstSizePtr can be any size.
+* The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr.
+* Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input.
+* The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst .
+* return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)
+* or 0 when a frame is completely decoded
+* or an error code, which can be tested using ZBUFF_isError().
+*
+* Hint : recommended buffer sizes (not compulsory)
+* output : 128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded.
+* input : just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
+* **************************************************/
+
+typedef enum { ZBUFFds_init, ZBUFFds_readHeader, ZBUFFds_loadHeader, ZBUFFds_decodeHeader,
+ ZBUFFds_read, ZBUFFds_load, ZBUFFds_flush } ZBUFF_dStage;
+
+/* *** Resource management *** */
+
+#define ZSTD_frameHeaderSize_max 5 /* too magical, should come from reference */
+struct ZBUFFv04_DCtx_s {
+ ZSTD_DCtx* zc;
+ ZSTD_parameters params;
+ char* inBuff;
+ size_t inBuffSize;
+ size_t inPos;
+ char* outBuff;
+ size_t outBuffSize;
+ size_t outStart;
+ size_t outEnd;
+ size_t hPos;
+ const char* dict;
+ size_t dictSize;
+ ZBUFF_dStage stage;
+ unsigned char headerBuffer[ZSTD_frameHeaderSize_max];
+}; /* typedef'd to ZBUFF_DCtx within "zstd_buffered.h" */
+
+typedef ZBUFFv04_DCtx ZBUFF_DCtx;
+
+
+static ZBUFF_DCtx* ZBUFF_createDCtx(void)
+{
+ ZBUFF_DCtx* zbc = (ZBUFF_DCtx*)malloc(sizeof(ZBUFF_DCtx));
+ if (zbc==NULL) return NULL;
+ memset(zbc, 0, sizeof(*zbc));
+ zbc->zc = ZSTD_createDCtx();
+ zbc->stage = ZBUFFds_init;
+ return zbc;
+}
+
+static size_t ZBUFF_freeDCtx(ZBUFF_DCtx* zbc)
+{
+ if (zbc==NULL) return 0; /* support free on null */
+ ZSTD_freeDCtx(zbc->zc);
+ free(zbc->inBuff);
+ free(zbc->outBuff);
+ free(zbc);
+ return 0;
+}
+
+
+/* *** Initialization *** */
+
+static size_t ZBUFF_decompressInit(ZBUFF_DCtx* zbc)
+{
+ zbc->stage = ZBUFFds_readHeader;
+ zbc->hPos = zbc->inPos = zbc->outStart = zbc->outEnd = zbc->dictSize = 0;
+ return ZSTD_resetDCtx(zbc->zc);
+}
+
+
+static size_t ZBUFF_decompressWithDictionary(ZBUFF_DCtx* zbc, const void* src, size_t srcSize)
+{
+ zbc->dict = (const char*)src;
+ zbc->dictSize = srcSize;
+ return 0;
+}
+
+static size_t ZBUFF_limitCopy(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ size_t length = MIN(maxDstSize, srcSize);
+ memcpy(dst, src, length);
+ return length;
+}
+
+/* *** Decompression *** */
+
+static size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbc, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr)
+{
+ const char* const istart = (const char*)src;
+ const char* ip = istart;
+ const char* const iend = istart + *srcSizePtr;
+ char* const ostart = (char*)dst;
+ char* op = ostart;
+ char* const oend = ostart + *maxDstSizePtr;
+ U32 notDone = 1;
+
+ DEBUGLOG(5, "ZBUFF_decompressContinue");
+ while (notDone)
+ {
+ switch(zbc->stage)
+ {
+
+ case ZBUFFds_init :
+ DEBUGLOG(5, "ZBUFF_decompressContinue: stage==ZBUFFds_init => ERROR(init_missing)");
+ return ERROR(init_missing);
+
+ case ZBUFFds_readHeader :
+ /* read header from src */
+ { size_t const headerSize = ZSTD_getFrameParams(&(zbc->params), src, *srcSizePtr);
+ if (ZSTD_isError(headerSize)) return headerSize;
+ if (headerSize) {
+ /* not enough input to decode header : tell how many bytes would be necessary */
+ memcpy(zbc->headerBuffer+zbc->hPos, src, *srcSizePtr);
+ zbc->hPos += *srcSizePtr;
+ *maxDstSizePtr = 0;
+ zbc->stage = ZBUFFds_loadHeader;
+ return headerSize - zbc->hPos;
+ }
+ zbc->stage = ZBUFFds_decodeHeader;
+ break;
+ }
+
+ case ZBUFFds_loadHeader:
+ /* complete header from src */
+ { size_t headerSize = ZBUFF_limitCopy(
+ zbc->headerBuffer + zbc->hPos, ZSTD_frameHeaderSize_max - zbc->hPos,
+ src, *srcSizePtr);
+ zbc->hPos += headerSize;
+ ip += headerSize;
+ headerSize = ZSTD_getFrameParams(&(zbc->params), zbc->headerBuffer, zbc->hPos);
+ if (ZSTD_isError(headerSize)) return headerSize;
+ if (headerSize) {
+ /* not enough input to decode header : tell how many bytes would be necessary */
+ *maxDstSizePtr = 0;
+ return headerSize - zbc->hPos;
+ } }
+ /* intentional fallthrough */
+
+ case ZBUFFds_decodeHeader:
+ /* apply header to create / resize buffers */
+ { size_t const neededOutSize = (size_t)1 << zbc->params.windowLog;
+ size_t const neededInSize = BLOCKSIZE; /* a block is never > BLOCKSIZE */
+ if (zbc->inBuffSize < neededInSize) {
+ free(zbc->inBuff);
+ zbc->inBuffSize = neededInSize;
+ zbc->inBuff = (char*)malloc(neededInSize);
+ if (zbc->inBuff == NULL) return ERROR(memory_allocation);
+ }
+ if (zbc->outBuffSize < neededOutSize) {
+ free(zbc->outBuff);
+ zbc->outBuffSize = neededOutSize;
+ zbc->outBuff = (char*)malloc(neededOutSize);
+ if (zbc->outBuff == NULL) return ERROR(memory_allocation);
+ } }
+ if (zbc->dictSize)
+ ZSTD_decompress_insertDictionary(zbc->zc, zbc->dict, zbc->dictSize);
+ if (zbc->hPos) {
+ /* some data already loaded into headerBuffer : transfer into inBuff */
+ memcpy(zbc->inBuff, zbc->headerBuffer, zbc->hPos);
+ zbc->inPos = zbc->hPos;
+ zbc->hPos = 0;
+ zbc->stage = ZBUFFds_load;
+ break;
+ }
+ zbc->stage = ZBUFFds_read;
+ /* fall-through */
+ case ZBUFFds_read:
+ {
+ size_t neededInSize = ZSTD_nextSrcSizeToDecompress(zbc->zc);
+ if (neededInSize==0) /* end of frame */
+ {
+ zbc->stage = ZBUFFds_init;
+ notDone = 0;
+ break;
+ }
+ if ((size_t)(iend-ip) >= neededInSize)
+ {
+ /* directly decode from src */
+ size_t decodedSize = ZSTD_decompressContinue(zbc->zc,
+ zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,
+ ip, neededInSize);
+ if (ZSTD_isError(decodedSize)) return decodedSize;
+ ip += neededInSize;
+ if (!decodedSize) break; /* this was just a header */
+ zbc->outEnd = zbc->outStart + decodedSize;
+ zbc->stage = ZBUFFds_flush;
+ break;
+ }
+ if (ip==iend) { notDone = 0; break; } /* no more input */
+ zbc->stage = ZBUFFds_load;
+ }
+ /* fall-through */
+ case ZBUFFds_load:
+ {
+ size_t neededInSize = ZSTD_nextSrcSizeToDecompress(zbc->zc);
+ size_t toLoad = neededInSize - zbc->inPos; /* should always be <= remaining space within inBuff */
+ size_t loadedSize;
+ if (toLoad > zbc->inBuffSize - zbc->inPos) return ERROR(corruption_detected); /* should never happen */
+ loadedSize = ZBUFF_limitCopy(zbc->inBuff + zbc->inPos, toLoad, ip, iend-ip);
+ ip += loadedSize;
+ zbc->inPos += loadedSize;
+ if (loadedSize < toLoad) { notDone = 0; break; } /* not enough input, wait for more */
+ {
+ size_t decodedSize = ZSTD_decompressContinue(zbc->zc,
+ zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,
+ zbc->inBuff, neededInSize);
+ if (ZSTD_isError(decodedSize)) return decodedSize;
+ zbc->inPos = 0; /* input is consumed */
+ if (!decodedSize) { zbc->stage = ZBUFFds_read; break; } /* this was just a header */
+ zbc->outEnd = zbc->outStart + decodedSize;
+ zbc->stage = ZBUFFds_flush;
+ /* ZBUFFds_flush follows */
+ }
+ }
+ /* fall-through */
+ case ZBUFFds_flush:
+ {
+ size_t toFlushSize = zbc->outEnd - zbc->outStart;
+ size_t flushedSize = ZBUFF_limitCopy(op, oend-op, zbc->outBuff + zbc->outStart, toFlushSize);
+ op += flushedSize;
+ zbc->outStart += flushedSize;
+ if (flushedSize == toFlushSize)
+ {
+ zbc->stage = ZBUFFds_read;
+ if (zbc->outStart + BLOCKSIZE > zbc->outBuffSize)
+ zbc->outStart = zbc->outEnd = 0;
+ break;
+ }
+ /* cannot flush everything */
+ notDone = 0;
+ break;
+ }
+ default: return ERROR(GENERIC); /* impossible */
+ }
+ }
+
+ *srcSizePtr = ip-istart;
+ *maxDstSizePtr = op-ostart;
+
+ {
+ size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zbc->zc);
+ if (nextSrcSizeHint > 3) nextSrcSizeHint+= 3; /* get the next block header while at it */
+ nextSrcSizeHint -= zbc->inPos; /* already loaded*/
+ return nextSrcSizeHint;
+ }
+}
+
+
+/* *************************************
+* Tool functions
+***************************************/
+unsigned ZBUFFv04_isError(size_t errorCode) { return ERR_isError(errorCode); }
+const char* ZBUFFv04_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
+
+size_t ZBUFFv04_recommendedDInSize() { return BLOCKSIZE + 3; }
+size_t ZBUFFv04_recommendedDOutSize() { return BLOCKSIZE; }
+
+
+
+/*- ========================================================================= -*/
+
+/* final wrapping stage */
+
+size_t ZSTDv04_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ return ZSTD_decompress_usingDict(dctx, dst, maxDstSize, src, srcSize, NULL, 0);
+}
+
+size_t ZSTDv04_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE==1)
+ size_t regenSize;
+ ZSTD_DCtx* dctx = ZSTD_createDCtx();
+ if (dctx==NULL) return ERROR(memory_allocation);
+ regenSize = ZSTDv04_decompressDCtx(dctx, dst, maxDstSize, src, srcSize);
+ ZSTD_freeDCtx(dctx);
+ return regenSize;
+#else
+ ZSTD_DCtx dctx;
+ return ZSTDv04_decompressDCtx(&dctx, dst, maxDstSize, src, srcSize);
+#endif
+}
+
+size_t ZSTDv04_resetDCtx(ZSTDv04_Dctx* dctx) { return ZSTD_resetDCtx(dctx); }
+
+size_t ZSTDv04_nextSrcSizeToDecompress(ZSTDv04_Dctx* dctx)
+{
+ return ZSTD_nextSrcSizeToDecompress(dctx);
+}
+
+size_t ZSTDv04_decompressContinue(ZSTDv04_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ return ZSTD_decompressContinue(dctx, dst, maxDstSize, src, srcSize);
+}
+
+
+
+ZBUFFv04_DCtx* ZBUFFv04_createDCtx(void) { return ZBUFF_createDCtx(); }
+size_t ZBUFFv04_freeDCtx(ZBUFFv04_DCtx* dctx) { return ZBUFF_freeDCtx(dctx); }
+
+size_t ZBUFFv04_decompressInit(ZBUFFv04_DCtx* dctx) { return ZBUFF_decompressInit(dctx); }
+size_t ZBUFFv04_decompressWithDictionary(ZBUFFv04_DCtx* dctx, const void* src, size_t srcSize)
+{ return ZBUFF_decompressWithDictionary(dctx, src, srcSize); }
+
+size_t ZBUFFv04_decompressContinue(ZBUFFv04_DCtx* dctx, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr)
+{
+ DEBUGLOG(5, "ZBUFFv04_decompressContinue");
+ return ZBUFF_decompressContinue(dctx, dst, maxDstSizePtr, src, srcSizePtr);
+}
+
+ZSTD_DCtx* ZSTDv04_createDCtx(void) { return ZSTD_createDCtx(); }
+size_t ZSTDv04_freeDCtx(ZSTD_DCtx* dctx) { return ZSTD_freeDCtx(dctx); }
diff --git a/vendor/github.com/DataDog/zstd/zstd_v04.h b/vendor/github.com/DataDog/zstd/zstd_v04.h
new file mode 100644
index 000000000..bb5f3b7d0
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v04.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_V04_H_91868324769238
+#define ZSTD_V04_H_91868324769238
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* *************************************
+* Includes
+***************************************/
+#include <stddef.h> /* size_t */
+
+
+/* *************************************
+* Simple one-step function
+***************************************/
+/**
+ZSTDv04_decompress() : decompress ZSTD frames compliant with v0.4.x format
+ compressedSize : is the exact source size
+ maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
+ It must be equal or larger than originalSize, otherwise decompression will fail.
+ return : the number of bytes decompressed into destination buffer (originalSize)
+ or an errorCode if it fails (which can be tested using ZSTDv01_isError())
+*/
+size_t ZSTDv04_decompress( void* dst, size_t maxOriginalSize,
+ const void* src, size_t compressedSize);
+
+ /**
+ ZSTDv04_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.4.x format
+ srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+ cSize (output parameter) : the number of bytes that would be read to decompress this frame
+ or an error code if it fails (which can be tested using ZSTDv01_isError())
+ dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
+ or ZSTD_CONTENTSIZE_ERROR if an error occurs
+
+ note : assumes `cSize` and `dBound` are _not_ NULL.
+ */
+ void ZSTDv04_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
+ size_t* cSize, unsigned long long* dBound);
+
+/**
+ZSTDv04_isError() : tells if the result of ZSTDv04_decompress() is an error
+*/
+unsigned ZSTDv04_isError(size_t code);
+
+
+/* *************************************
+* Advanced functions
+***************************************/
+typedef struct ZSTDv04_Dctx_s ZSTDv04_Dctx;
+ZSTDv04_Dctx* ZSTDv04_createDCtx(void);
+size_t ZSTDv04_freeDCtx(ZSTDv04_Dctx* dctx);
+
+size_t ZSTDv04_decompressDCtx(ZSTDv04_Dctx* dctx,
+ void* dst, size_t maxOriginalSize,
+ const void* src, size_t compressedSize);
+
+
+/* *************************************
+* Direct Streaming
+***************************************/
+size_t ZSTDv04_resetDCtx(ZSTDv04_Dctx* dctx);
+
+size_t ZSTDv04_nextSrcSizeToDecompress(ZSTDv04_Dctx* dctx);
+size_t ZSTDv04_decompressContinue(ZSTDv04_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
+/**
+ Use above functions alternatively.
+ ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+ ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
+ Result is the number of bytes regenerated within 'dst'.
+ It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
+*/
+
+
+/* *************************************
+* Buffered Streaming
+***************************************/
+typedef struct ZBUFFv04_DCtx_s ZBUFFv04_DCtx;
+ZBUFFv04_DCtx* ZBUFFv04_createDCtx(void);
+size_t ZBUFFv04_freeDCtx(ZBUFFv04_DCtx* dctx);
+
+size_t ZBUFFv04_decompressInit(ZBUFFv04_DCtx* dctx);
+size_t ZBUFFv04_decompressWithDictionary(ZBUFFv04_DCtx* dctx, const void* dict, size_t dictSize);
+
+size_t ZBUFFv04_decompressContinue(ZBUFFv04_DCtx* dctx, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr);
+
+/** ************************************************
+* Streaming decompression
+*
+* A ZBUFF_DCtx object is required to track streaming operation.
+* Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources.
+* Use ZBUFF_decompressInit() to start a new decompression operation.
+* ZBUFF_DCtx objects can be reused multiple times.
+*
+* Optionally, a reference to a static dictionary can be set, using ZBUFF_decompressWithDictionary()
+* It must be the same content as the one set during compression phase.
+* Dictionary content must remain accessible during the decompression process.
+*
+* Use ZBUFF_decompressContinue() repetitively to consume your input.
+* *srcSizePtr and *maxDstSizePtr can be any size.
+* The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr.
+* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
+* The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst.
+* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)
+* or 0 when a frame is completely decoded
+* or an error code, which can be tested using ZBUFF_isError().
+*
+* Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize / ZBUFF_recommendedDOutSize
+* output : ZBUFF_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded.
+* input : ZBUFF_recommendedDInSize==128Kb+3; just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
+* **************************************************/
+unsigned ZBUFFv04_isError(size_t errorCode);
+const char* ZBUFFv04_getErrorName(size_t errorCode);
+
+
+/** The below functions provide recommended buffer sizes for Compression or Decompression operations.
+* These sizes are not compulsory, they just tend to offer better latency */
+size_t ZBUFFv04_recommendedDInSize(void);
+size_t ZBUFFv04_recommendedDOutSize(void);
+
+
+/* *************************************
+* Prefix - version detection
+***************************************/
+#define ZSTDv04_magicNumber 0xFD2FB524 /* v0.4 */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_V04_H_91868324769238 */
diff --git a/vendor/github.com/DataDog/zstd/zstd_v05.c b/vendor/github.com/DataDog/zstd/zstd_v05.c
new file mode 100644
index 000000000..caaf15f9b
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v05.c
@@ -0,0 +1,4039 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/*- Dependencies -*/
+#include "zstd_v05.h"
+#include "error_private.h"
+
+
+/* ******************************************************************
+ mem.h
+ low-level memory access routines
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSEv05 source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef MEM_H_MODULE
+#define MEM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*-****************************************
+* Dependencies
+******************************************/
+#include <stddef.h> /* size_t, ptrdiff_t */
+#include <string.h> /* memcpy */
+
+
+/*-****************************************
+* Compiler specifics
+******************************************/
+#if defined(__GNUC__)
+# define MEM_STATIC static __attribute__((unused))
+#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# define MEM_STATIC static inline
+#elif defined(_MSC_VER)
+# define MEM_STATIC static __inline
+#else
+# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+
+/*-**************************************************************
+* Basic Types
+*****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef int16_t S16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+ typedef int64_t S64;
+#else
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef signed short S16;
+ typedef unsigned int U32;
+ typedef signed int S32;
+ typedef unsigned long long U64;
+ typedef signed long long S64;
+#endif
+
+
+/*-**************************************************************
+* Memory I/O
+*****************************************************************/
+/* MEM_FORCE_MEMORY_ACCESS :
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ * It can generate buggy code on targets depending on alignment.
+ * In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
+# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+# define MEM_FORCE_MEMORY_ACCESS 2
+# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
+ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+# define MEM_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }
+MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }
+
+MEM_STATIC unsigned MEM_isLittleEndian(void)
+{
+ const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+
+#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
+
+/* violates C standard, by lying on structure alignment.
+Only use if no other choice to achieve best performance on target platform */
+MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
+MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
+MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
+MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
+
+#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign;
+
+MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
+MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
+MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
+MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign*)memPtr)->u64 = value; }
+
+#else
+
+/* default method, safe and standard.
+ can sometimes prove slower */
+
+MEM_STATIC U16 MEM_read16(const void* memPtr)
+{
+ U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U32 MEM_read32(const void* memPtr)
+{
+ U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U64 MEM_read64(const void* memPtr)
+{
+ U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value)
+{
+ memcpy(memPtr, &value, sizeof(value));
+}
+
+MEM_STATIC void MEM_write32(void* memPtr, U32 value)
+{
+ memcpy(memPtr, &value, sizeof(value));
+}
+
+MEM_STATIC void MEM_write64(void* memPtr, U64 value)
+{
+ memcpy(memPtr, &value, sizeof(value));
+}
+
+#endif /* MEM_FORCE_MEMORY_ACCESS */
+
+
+MEM_STATIC U16 MEM_readLE16(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read16(memPtr);
+ else {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U16)(p[0] + (p[1]<<8));
+ }
+}
+
+MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
+{
+ if (MEM_isLittleEndian()) {
+ MEM_write16(memPtr, val);
+ } else {
+ BYTE* p = (BYTE*)memPtr;
+ p[0] = (BYTE)val;
+ p[1] = (BYTE)(val>>8);
+ }
+}
+
+MEM_STATIC U32 MEM_readLE32(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read32(memPtr);
+ else {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
+ }
+}
+
+
+MEM_STATIC U64 MEM_readLE64(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read64(memPtr);
+ else {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
+ + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
+ }
+}
+
+
+MEM_STATIC size_t MEM_readLEST(const void* memPtr)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_readLE32(memPtr);
+ else
+ return (size_t)MEM_readLE64(memPtr);
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* MEM_H_MODULE */
+
+/*
+ zstd - standard compression library
+ Header File for static linking only
+ Copyright (C) 2014-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd homepage : http://www.zstd.net
+*/
+#ifndef ZSTD_STATIC_H
+#define ZSTD_STATIC_H
+
+/* The prototypes defined within this file are considered experimental.
+ * They should not be used in the context DLL as they may change in the future.
+ * Prefer static linking if you need them, to control breaking version changes issues.
+ */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+/*-*************************************
+* Types
+***************************************/
+#define ZSTDv05_WINDOWLOG_ABSOLUTEMIN 11
+
+
+/*-*************************************
+* Advanced functions
+***************************************/
+/*- Advanced Decompression functions -*/
+
+/*! ZSTDv05_decompress_usingPreparedDCtx() :
+* Same as ZSTDv05_decompress_usingDict, but using a reference context `preparedDCtx`, where dictionary has been loaded.
+* It avoids reloading the dictionary each time.
+* `preparedDCtx` must have been properly initialized using ZSTDv05_decompressBegin_usingDict().
+* Requires 2 contexts : 1 for reference, which will not be modified, and 1 to run the decompression operation */
+size_t ZSTDv05_decompress_usingPreparedDCtx(
+ ZSTDv05_DCtx* dctx, const ZSTDv05_DCtx* preparedDCtx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+
+/* **************************************
+* Streaming functions (direct mode)
+****************************************/
+size_t ZSTDv05_decompressBegin(ZSTDv05_DCtx* dctx);
+
+/*
+ Streaming decompression, direct mode (bufferless)
+
+ A ZSTDv05_DCtx object is required to track streaming operations.
+ Use ZSTDv05_createDCtx() / ZSTDv05_freeDCtx() to manage it.
+ A ZSTDv05_DCtx object can be re-used multiple times.
+
+ First typical operation is to retrieve frame parameters, using ZSTDv05_getFrameParams().
+ This operation is independent, and just needs enough input data to properly decode the frame header.
+ Objective is to retrieve *params.windowlog, to know minimum amount of memory required during decoding.
+ Result : 0 when successful, it means the ZSTDv05_parameters structure has been filled.
+ >0 : means there is not enough data into src. Provides the expected size to successfully decode header.
+ errorCode, which can be tested using ZSTDv05_isError()
+
+ Start decompression, with ZSTDv05_decompressBegin() or ZSTDv05_decompressBegin_usingDict()
+ Alternatively, you can copy a prepared context, using ZSTDv05_copyDCtx()
+
+ Then use ZSTDv05_nextSrcSizeToDecompress() and ZSTDv05_decompressContinue() alternatively.
+ ZSTDv05_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTDv05_decompressContinue().
+ ZSTDv05_decompressContinue() requires this exact amount of bytes, or it will fail.
+ ZSTDv05_decompressContinue() needs previous data blocks during decompression, up to (1 << windowlog).
+ They should preferably be located contiguously, prior to current block. Alternatively, a round buffer is also possible.
+
+ @result of ZSTDv05_decompressContinue() is the number of bytes regenerated within 'dst'.
+ It can be zero, which is not an error; it just means ZSTDv05_decompressContinue() has decoded some header.
+
+ A frame is fully decoded when ZSTDv05_nextSrcSizeToDecompress() returns zero.
+ Context can then be reset to start a new decompression.
+*/
+
+
+/* **************************************
+* Block functions
+****************************************/
+/*! Block functions produce and decode raw zstd blocks, without frame metadata.
+ User will have to take in charge required information to regenerate data, such as block sizes.
+
+ A few rules to respect :
+ - Uncompressed block size must be <= 128 KB
+ - Compressing or decompressing requires a context structure
+ + Use ZSTDv05_createCCtx() and ZSTDv05_createDCtx()
+ - It is necessary to init context before starting
+ + compression : ZSTDv05_compressBegin()
+ + decompression : ZSTDv05_decompressBegin()
+ + variants _usingDict() are also allowed
+ + copyCCtx() and copyDCtx() work too
+ - When a block is considered not compressible enough, ZSTDv05_compressBlock() result will be zero.
+ In which case, nothing is produced into `dst`.
+ + User must test for such outcome and deal directly with uncompressed data
+ + ZSTDv05_decompressBlock() doesn't accept uncompressed data as input !!
+*/
+
+size_t ZSTDv05_decompressBlock(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTDv05_STATIC_H */
+
+
+/*
+ zstd_internal - common functions to include
+ Header File for include
+ Copyright (C) 2014-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd source repository : https://github.com/Cyan4973/zstd
+*/
+#ifndef ZSTD_CCOMMON_H_MODULE
+#define ZSTD_CCOMMON_H_MODULE
+
+
+
+/*-*************************************
+* Common macros
+***************************************/
+#define MIN(a,b) ((a)<(b) ? (a) : (b))
+#define MAX(a,b) ((a)>(b) ? (a) : (b))
+
+
+/*-*************************************
+* Common constants
+***************************************/
+#define ZSTDv05_DICT_MAGIC 0xEC30A435
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BLOCKSIZE (128 KB) /* define, for static allocation */
+
+static const size_t ZSTDv05_blockHeaderSize = 3;
+static const size_t ZSTDv05_frameHeaderSize_min = 5;
+#define ZSTDv05_frameHeaderSize_max 5 /* define, for static allocation */
+
+#define BITv057 128
+#define BITv056 64
+#define BITv055 32
+#define BITv054 16
+#define BITv051 2
+#define BITv050 1
+
+#define IS_HUFv05 0
+#define IS_PCH 1
+#define IS_RAW 2
+#define IS_RLE 3
+
+#define MINMATCH 4
+#define REPCODE_STARTVALUE 1
+
+#define Litbits 8
+#define MLbits 7
+#define LLbits 6
+#define Offbits 5
+#define MaxLit ((1<<Litbits) - 1)
+#define MaxML ((1<<MLbits) - 1)
+#define MaxLL ((1<<LLbits) - 1)
+#define MaxOff ((1<<Offbits)- 1)
+#define MLFSEv05Log 10
+#define LLFSEv05Log 10
+#define OffFSEv05Log 9
+#define MaxSeq MAX(MaxLL, MaxML)
+
+#define FSEv05_ENCODING_RAW 0
+#define FSEv05_ENCODING_RLE 1
+#define FSEv05_ENCODING_STATIC 2
+#define FSEv05_ENCODING_DYNAMIC 3
+
+
+#define HufLog 12
+
+#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
+#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
+
+#define WILDCOPY_OVERLENGTH 8
+
+#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
+
+typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
+
+
+/*-*******************************************
+* Shared functions to include for inlining
+*********************************************/
+static void ZSTDv05_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
+
+#define COPY8(d,s) { ZSTDv05_copy8(d,s); d+=8; s+=8; }
+
+/*! ZSTDv05_wildcopy() :
+* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
+MEM_STATIC void ZSTDv05_wildcopy(void* dst, const void* src, ptrdiff_t length)
+{
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + length;
+ do
+ COPY8(op, ip)
+ while (op < oend);
+}
+
+
+/*-*******************************************
+* Private interfaces
+*********************************************/
+typedef struct {
+ void* buffer;
+ U32* offsetStart;
+ U32* offset;
+ BYTE* offCodeStart;
+ BYTE* offCode;
+ BYTE* litStart;
+ BYTE* lit;
+ BYTE* litLengthStart;
+ BYTE* litLength;
+ BYTE* matchLengthStart;
+ BYTE* matchLength;
+ BYTE* dumpsStart;
+ BYTE* dumps;
+ /* opt */
+ U32* matchLengthFreq;
+ U32* litLengthFreq;
+ U32* litFreq;
+ U32* offCodeFreq;
+ U32 matchLengthSum;
+ U32 litLengthSum;
+ U32 litSum;
+ U32 offCodeSum;
+} seqStore_t;
+
+
+
+#endif /* ZSTDv05_CCOMMON_H_MODULE */
+/* ******************************************************************
+ FSEv05 : Finite State Entropy coder
+ header file
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef FSEv05_H
+#define FSEv05_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* *****************************************
+* Includes
+******************************************/
+#include <stddef.h> /* size_t, ptrdiff_t */
+
+
+/*-****************************************
+* FSEv05 simple functions
+******************************************/
+size_t FSEv05_decompress(void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize);
+/*!
+FSEv05_decompress():
+ Decompress FSEv05 data from buffer 'cSrc', of size 'cSrcSize',
+ into already allocated destination buffer 'dst', of size 'maxDstSize'.
+ return : size of regenerated data (<= maxDstSize)
+ or an error code, which can be tested using FSEv05_isError()
+
+ ** Important ** : FSEv05_decompress() doesn't decompress non-compressible nor RLE data !!!
+ Why ? : making this distinction requires a header.
+ Header management is intentionally delegated to the user layer, which can better manage special cases.
+*/
+
+
+/* *****************************************
+* Tool functions
+******************************************/
+/* Error Management */
+unsigned FSEv05_isError(size_t code); /* tells if a return value is an error code */
+const char* FSEv05_getErrorName(size_t code); /* provides error code string (useful for debugging) */
+
+
+
+
+/* *****************************************
+* FSEv05 detailed API
+******************************************/
+/* *** DECOMPRESSION *** */
+
+/*!
+FSEv05_readNCount():
+ Read compactly saved 'normalizedCounter' from 'rBuffer'.
+ return : size read from 'rBuffer'
+ or an errorCode, which can be tested using FSEv05_isError()
+ maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
+size_t FSEv05_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
+
+/*!
+Constructor and Destructor of type FSEv05_DTable
+ Note that its size depends on 'tableLog' */
+typedef unsigned FSEv05_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
+FSEv05_DTable* FSEv05_createDTable(unsigned tableLog);
+void FSEv05_freeDTable(FSEv05_DTable* dt);
+
+/*!
+FSEv05_buildDTable():
+ Builds 'dt', which must be already allocated, using FSEv05_createDTable()
+ @return : 0,
+ or an errorCode, which can be tested using FSEv05_isError() */
+size_t FSEv05_buildDTable (FSEv05_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*!
+FSEv05_decompress_usingDTable():
+ Decompress compressed source @cSrc of size @cSrcSize using `dt`
+ into `dst` which must be already allocated.
+ @return : size of regenerated data (necessarily <= @dstCapacity)
+ or an errorCode, which can be tested using FSEv05_isError() */
+size_t FSEv05_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSEv05_DTable* dt);
+
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* FSEv05_H */
+/* ******************************************************************
+ bitstream
+ Part of FSEv05 library
+ header file (to include)
+ Copyright (C) 2013-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef BITv05STREAM_H_MODULE
+#define BITv05STREAM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*
+* This API consists of small unitary functions, which highly benefit from being inlined.
+* Since link-time-optimization is not available for all compilers,
+* these functions are defined into a .h to be included.
+*/
+
+
+
+/*-********************************************
+* bitStream decoding API (read backward)
+**********************************************/
+typedef struct
+{
+ size_t bitContainer;
+ unsigned bitsConsumed;
+ const char* ptr;
+ const char* start;
+} BITv05_DStream_t;
+
+typedef enum { BITv05_DStream_unfinished = 0,
+ BITv05_DStream_endOfBuffer = 1,
+ BITv05_DStream_completed = 2,
+ BITv05_DStream_overflow = 3 } BITv05_DStream_status; /* result of BITv05_reloadDStream() */
+ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+
+MEM_STATIC size_t BITv05_initDStream(BITv05_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
+MEM_STATIC size_t BITv05_readBits(BITv05_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BITv05_DStream_status BITv05_reloadDStream(BITv05_DStream_t* bitD);
+MEM_STATIC unsigned BITv05_endOfDStream(const BITv05_DStream_t* bitD);
+
+
+/*-****************************************
+* unsafe API
+******************************************/
+MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits);
+/* faster, but works only if nbBits >= 1 */
+
+
+
+/*-**************************************************************
+* Helper functions
+****************************************************************/
+MEM_STATIC unsigned BITv05_highbit32 (U32 val)
+{
+# if defined(_MSC_VER) /* Visual */
+ unsigned long r=0;
+ _BitScanReverse ( &r, val );
+ return (unsigned) r;
+# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
+ return 31 - __builtin_clz (val);
+# else /* Software version */
+ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ unsigned r;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+ return r;
+# endif
+}
+
+
+
+/*-********************************************************
+* bitStream decoding
+**********************************************************/
+/*!BITv05_initDStream
+* Initialize a BITv05_DStream_t.
+* @bitD : a pointer to an already allocated BITv05_DStream_t structure
+* @srcBuffer must point at the beginning of a bitStream
+* @srcSize must be the exact size of the bitStream
+* @result : size of stream (== srcSize) or an errorCode if a problem is detected
+*/
+MEM_STATIC size_t BITv05_initDStream(BITv05_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+ if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
+
+ if (srcSize >= sizeof(size_t)) { /* normal case */
+ U32 contain32;
+ bitD->start = (const char*)srcBuffer;
+ bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t);
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
+ if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
+ bitD->bitsConsumed = 8 - BITv05_highbit32(contain32);
+ } else {
+ U32 contain32;
+ bitD->start = (const char*)srcBuffer;
+ bitD->ptr = bitD->start;
+ bitD->bitContainer = *(const BYTE*)(bitD->start);
+ switch(srcSize)
+ {
+ case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);/* fall-through */
+ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);/* fall-through */
+ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);/* fall-through */
+ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fall-through */
+ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fall-through */
+ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; /* fall-through */
+ default: break;
+ }
+ contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
+ if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
+ bitD->bitsConsumed = 8 - BITv05_highbit32(contain32);
+ bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
+ }
+
+ return srcSize;
+}
+
+MEM_STATIC size_t BITv05_lookBits(BITv05_DStream_t* bitD, U32 nbBits)
+{
+ const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
+ return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
+}
+
+/*! BITv05_lookBitsFast :
+* unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BITv05_lookBitsFast(BITv05_DStream_t* bitD, U32 nbBits)
+{
+ const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
+ return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
+}
+
+MEM_STATIC void BITv05_skipBits(BITv05_DStream_t* bitD, U32 nbBits)
+{
+ bitD->bitsConsumed += nbBits;
+}
+
+MEM_STATIC size_t BITv05_readBits(BITv05_DStream_t* bitD, unsigned nbBits)
+{
+ size_t value = BITv05_lookBits(bitD, nbBits);
+ BITv05_skipBits(bitD, nbBits);
+ return value;
+}
+
+/*!BITv05_readBitsFast :
+* unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits)
+{
+ size_t value = BITv05_lookBitsFast(bitD, nbBits);
+ BITv05_skipBits(bitD, nbBits);
+ return value;
+}
+
+MEM_STATIC BITv05_DStream_status BITv05_reloadDStream(BITv05_DStream_t* bitD)
+{
+ if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
+ return BITv05_DStream_overflow;
+
+ if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
+ bitD->ptr -= bitD->bitsConsumed >> 3;
+ bitD->bitsConsumed &= 7;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ return BITv05_DStream_unfinished;
+ }
+ if (bitD->ptr == bitD->start) {
+ if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BITv05_DStream_endOfBuffer;
+ return BITv05_DStream_completed;
+ }
+ {
+ U32 nbBytes = bitD->bitsConsumed >> 3;
+ BITv05_DStream_status result = BITv05_DStream_unfinished;
+ if (bitD->ptr - nbBytes < bitD->start) {
+ nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
+ result = BITv05_DStream_endOfBuffer;
+ }
+ bitD->ptr -= nbBytes;
+ bitD->bitsConsumed -= nbBytes*8;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
+ return result;
+ }
+}
+
+/*! BITv05_endOfDStream
+* @return Tells if DStream has reached its exact end
+*/
+MEM_STATIC unsigned BITv05_endOfDStream(const BITv05_DStream_t* DStream)
+{
+ return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* BITv05STREAM_H_MODULE */
+/* ******************************************************************
+ FSEv05 : Finite State Entropy coder
+ header file for static linking (only)
+ Copyright (C) 2013-2015, Yann Collet
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef FSEv05_STATIC_H
+#define FSEv05_STATIC_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+/* *****************************************
+* Static allocation
+*******************************************/
+/* It is possible to statically allocate FSEv05 CTable/DTable as a table of unsigned using below macros */
+#define FSEv05_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
+
+
+/* *****************************************
+* FSEv05 advanced API
+*******************************************/
+size_t FSEv05_buildDTable_raw (FSEv05_DTable* dt, unsigned nbBits);
+/* build a fake FSEv05_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
+
+size_t FSEv05_buildDTable_rle (FSEv05_DTable* dt, unsigned char symbolValue);
+/* build a fake FSEv05_DTable, designed to always generate the same symbolValue */
+
+
+
+/* *****************************************
+* FSEv05 symbol decompression API
+*******************************************/
+typedef struct
+{
+ size_t state;
+ const void* table; /* precise table may vary, depending on U16 */
+} FSEv05_DState_t;
+
+
+static void FSEv05_initDState(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD, const FSEv05_DTable* dt);
+
+static unsigned char FSEv05_decodeSymbol(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD);
+
+static unsigned FSEv05_endOfDState(const FSEv05_DState_t* DStatePtr);
+
+
+
+/* *****************************************
+* FSEv05 unsafe API
+*******************************************/
+static unsigned char FSEv05_decodeSymbolFast(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD);
+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
+
+
+/* *****************************************
+* Implementation of inlined functions
+*******************************************/
+/* decompression */
+
+typedef struct {
+ U16 tableLog;
+ U16 fastMode;
+} FSEv05_DTableHeader; /* sizeof U32 */
+
+typedef struct
+{
+ unsigned short newState;
+ unsigned char symbol;
+ unsigned char nbBits;
+} FSEv05_decode_t; /* size == U32 */
+
+MEM_STATIC void FSEv05_initDState(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD, const FSEv05_DTable* dt)
+{
+ const void* ptr = dt;
+ const FSEv05_DTableHeader* const DTableH = (const FSEv05_DTableHeader*)ptr;
+ DStatePtr->state = BITv05_readBits(bitD, DTableH->tableLog);
+ BITv05_reloadDStream(bitD);
+ DStatePtr->table = dt + 1;
+}
+
+MEM_STATIC BYTE FSEv05_peakSymbol(FSEv05_DState_t* DStatePtr)
+{
+ const FSEv05_decode_t DInfo = ((const FSEv05_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ return DInfo.symbol;
+}
+
+MEM_STATIC BYTE FSEv05_decodeSymbol(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD)
+{
+ const FSEv05_decode_t DInfo = ((const FSEv05_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ const U32 nbBits = DInfo.nbBits;
+ BYTE symbol = DInfo.symbol;
+ size_t lowBits = BITv05_readBits(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+MEM_STATIC BYTE FSEv05_decodeSymbolFast(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD)
+{
+ const FSEv05_decode_t DInfo = ((const FSEv05_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ const U32 nbBits = DInfo.nbBits;
+ BYTE symbol = DInfo.symbol;
+ size_t lowBits = BITv05_readBitsFast(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+MEM_STATIC unsigned FSEv05_endOfDState(const FSEv05_DState_t* DStatePtr)
+{
+ return DStatePtr->state == 0;
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* FSEv05_STATIC_H */
+/* ******************************************************************
+ FSEv05 : Finite State Entropy coder
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSEv05 source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+#ifndef FSEv05_COMMONDEFS_ONLY
+
+/* **************************************************************
+* Tuning parameters
+****************************************************************/
+/*!MEMORY_USAGE :
+* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+* Increasing memory usage improves compression ratio
+* Reduced memory usage can improve speed, due to cache effect
+* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#define FSEv05_MAX_MEMORY_USAGE 14
+#define FSEv05_DEFAULT_MEMORY_USAGE 13
+
+/*!FSEv05_MAX_SYMBOL_VALUE :
+* Maximum symbol value authorized.
+* Required for proper stack allocation */
+#define FSEv05_MAX_SYMBOL_VALUE 255
+
+
+/* **************************************************************
+* template functions type & suffix
+****************************************************************/
+#define FSEv05_FUNCTION_TYPE BYTE
+#define FSEv05_FUNCTION_EXTENSION
+#define FSEv05_DECODE_TYPE FSEv05_decode_t
+
+
+#endif /* !FSEv05_COMMONDEFS_ONLY */
+
+/* **************************************************************
+* Compiler specifics
+****************************************************************/
+#ifdef _MSC_VER /* Visual Studio */
+# define FORCE_INLINE static __forceinline
+# include <intrin.h> /* For Visual 2005 */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
+#else
+# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# ifdef __GNUC__
+# define FORCE_INLINE static inline __attribute__((always_inline))
+# else
+# define FORCE_INLINE static inline
+# endif
+# else
+# define FORCE_INLINE static
+# endif /* __STDC_VERSION__ */
+#endif
+
+
+/* **************************************************************
+* Includes
+****************************************************************/
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memcpy, memset */
+#include <stdio.h> /* printf (debug) */
+
+
+
+/* ***************************************************************
+* Constants
+*****************************************************************/
+#define FSEv05_MAX_TABLELOG (FSEv05_MAX_MEMORY_USAGE-2)
+#define FSEv05_MAX_TABLESIZE (1U<<FSEv05_MAX_TABLELOG)
+#define FSEv05_MAXTABLESIZE_MASK (FSEv05_MAX_TABLESIZE-1)
+#define FSEv05_DEFAULT_TABLELOG (FSEv05_DEFAULT_MEMORY_USAGE-2)
+#define FSEv05_MIN_TABLELOG 5
+
+#define FSEv05_TABLELOG_ABSOLUTE_MAX 15
+#if FSEv05_MAX_TABLELOG > FSEv05_TABLELOG_ABSOLUTE_MAX
+#error "FSEv05_MAX_TABLELOG > FSEv05_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define FSEv05_STATIC_ASSERT(c) { enum { FSEv05_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+
+
+/* **************************************************************
+* Complex types
+****************************************************************/
+typedef unsigned DTable_max_t[FSEv05_DTABLE_SIZE_U32(FSEv05_MAX_TABLELOG)];
+
+
+/* **************************************************************
+* Templates
+****************************************************************/
+/*
+ designed to be included
+ for type-specific functions (template emulation in C)
+ Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSEv05_FUNCTION_EXTENSION
+# error "FSEv05_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSEv05_FUNCTION_TYPE
+# error "FSEv05_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSEv05_CAT(X,Y) X##Y
+#define FSEv05_FUNCTION_NAME(X,Y) FSEv05_CAT(X,Y)
+#define FSEv05_TYPE_NAME(X,Y) FSEv05_CAT(X,Y)
+
+
+/* Function templates */
+static U32 FSEv05_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
+
+
+
+FSEv05_DTable* FSEv05_createDTable (unsigned tableLog)
+{
+ if (tableLog > FSEv05_TABLELOG_ABSOLUTE_MAX) tableLog = FSEv05_TABLELOG_ABSOLUTE_MAX;
+ return (FSEv05_DTable*)malloc( FSEv05_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
+}
+
+void FSEv05_freeDTable (FSEv05_DTable* dt)
+{
+ free(dt);
+}
+
+size_t FSEv05_buildDTable(FSEv05_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+ FSEv05_DTableHeader DTableH;
+ void* const tdPtr = dt+1; /* because dt is unsigned, 32-bits aligned on 32-bits */
+ FSEv05_DECODE_TYPE* const tableDecode = (FSEv05_DECODE_TYPE*) (tdPtr);
+ const U32 tableSize = 1 << tableLog;
+ const U32 tableMask = tableSize-1;
+ const U32 step = FSEv05_tableStep(tableSize);
+ U16 symbolNext[FSEv05_MAX_SYMBOL_VALUE+1];
+ U32 position = 0;
+ U32 highThreshold = tableSize-1;
+ const S16 largeLimit= (S16)(1 << (tableLog-1));
+ U32 noLarge = 1;
+ U32 s;
+
+ /* Sanity Checks */
+ if (maxSymbolValue > FSEv05_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
+ if (tableLog > FSEv05_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+
+ /* Init, lay down lowprob symbols */
+ memset(tableDecode, 0, sizeof(FSEv05_FUNCTION_TYPE) * (maxSymbolValue+1) ); /* useless init, but keep static analyzer happy, and we don't need to performance optimize legacy decoders */
+ DTableH.tableLog = (U16)tableLog;
+ for (s=0; s<=maxSymbolValue; s++) {
+ if (normalizedCounter[s]==-1) {
+ tableDecode[highThreshold--].symbol = (FSEv05_FUNCTION_TYPE)s;
+ symbolNext[s] = 1;
+ } else {
+ if (normalizedCounter[s] >= largeLimit) noLarge=0;
+ symbolNext[s] = normalizedCounter[s];
+ } }
+
+ /* Spread symbols */
+ for (s=0; s<=maxSymbolValue; s++) {
+ int i;
+ for (i=0; i<normalizedCounter[s]; i++) {
+ tableDecode[position].symbol = (FSEv05_FUNCTION_TYPE)s;
+ position = (position + step) & tableMask;
+ while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
+ } }
+
+ if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+
+ /* Build Decoding table */
+ {
+ U32 i;
+ for (i=0; i<tableSize; i++) {
+ FSEv05_FUNCTION_TYPE symbol = (FSEv05_FUNCTION_TYPE)(tableDecode[i].symbol);
+ U16 nextState = symbolNext[symbol]++;
+ tableDecode[i].nbBits = (BYTE) (tableLog - BITv05_highbit32 ((U32)nextState) );
+ tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
+ } }
+
+ DTableH.fastMode = (U16)noLarge;
+ memcpy(dt, &DTableH, sizeof(DTableH));
+ return 0;
+}
+
+
+#ifndef FSEv05_COMMONDEFS_ONLY
+/*-****************************************
+* FSEv05 helper functions
+******************************************/
+unsigned FSEv05_isError(size_t code) { return ERR_isError(code); }
+
+const char* FSEv05_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/*-**************************************************************
+* FSEv05 NCount encoding-decoding
+****************************************************************/
+static short FSEv05_abs(short a) { return a<0 ? -a : a; }
+
+
+size_t FSEv05_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ const BYTE* const istart = (const BYTE*) headerBuffer;
+ const BYTE* const iend = istart + hbSize;
+ const BYTE* ip = istart;
+ int nbBits;
+ int remaining;
+ int threshold;
+ U32 bitStream;
+ int bitCount;
+ unsigned charnum = 0;
+ int previous0 = 0;
+
+ if (hbSize < 4) return ERROR(srcSize_wrong);
+ bitStream = MEM_readLE32(ip);
+ nbBits = (bitStream & 0xF) + FSEv05_MIN_TABLELOG; /* extract tableLog */
+ if (nbBits > FSEv05_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
+ bitStream >>= 4;
+ bitCount = 4;
+ *tableLogPtr = nbBits;
+ remaining = (1<<nbBits)+1;
+ threshold = 1<<nbBits;
+ nbBits++;
+
+ while ((remaining>1) && (charnum<=*maxSVPtr)) {
+ if (previous0) {
+ unsigned n0 = charnum;
+ while ((bitStream & 0xFFFF) == 0xFFFF) {
+ n0+=24;
+ if (ip < iend-5) {
+ ip+=2;
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ } else {
+ bitStream >>= 16;
+ bitCount+=16;
+ } }
+ while ((bitStream & 3) == 3) {
+ n0+=3;
+ bitStream>>=2;
+ bitCount+=2;
+ }
+ n0 += bitStream & 3;
+ bitCount += 2;
+ if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
+ while (charnum < n0) normalizedCounter[charnum++] = 0;
+ if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+ ip += bitCount>>3;
+ bitCount &= 7;
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ }
+ else
+ bitStream >>= 2;
+ }
+ {
+ const short max = (short)((2*threshold-1)-remaining);
+ short count;
+
+ if ((bitStream & (threshold-1)) < (U32)max) {
+ count = (short)(bitStream & (threshold-1));
+ bitCount += nbBits-1;
+ } else {
+ count = (short)(bitStream & (2*threshold-1));
+ if (count >= threshold) count -= max;
+ bitCount += nbBits;
+ }
+
+ count--; /* extra accuracy */
+ remaining -= FSEv05_abs(count);
+ normalizedCounter[charnum++] = count;
+ previous0 = !count;
+ while (remaining < threshold) {
+ nbBits--;
+ threshold >>= 1;
+ }
+
+ if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+ ip += bitCount>>3;
+ bitCount &= 7;
+ } else {
+ bitCount -= (int)(8 * (iend - 4 - ip));
+ ip = iend - 4;
+ }
+ bitStream = MEM_readLE32(ip) >> (bitCount & 31);
+ } }
+ if (remaining != 1) return ERROR(GENERIC);
+ *maxSVPtr = charnum-1;
+
+ ip += (bitCount+7)>>3;
+ if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
+ return ip-istart;
+}
+
+
+
+/*-*******************************************************
+* Decompression (Byte symbols)
+*********************************************************/
+size_t FSEv05_buildDTable_rle (FSEv05_DTable* dt, BYTE symbolValue)
+{
+ void* ptr = dt;
+ FSEv05_DTableHeader* const DTableH = (FSEv05_DTableHeader*)ptr;
+ void* dPtr = dt + 1;
+ FSEv05_decode_t* const cell = (FSEv05_decode_t*)dPtr;
+
+ DTableH->tableLog = 0;
+ DTableH->fastMode = 0;
+
+ cell->newState = 0;
+ cell->symbol = symbolValue;
+ cell->nbBits = 0;
+
+ return 0;
+}
+
+
+size_t FSEv05_buildDTable_raw (FSEv05_DTable* dt, unsigned nbBits)
+{
+ void* ptr = dt;
+ FSEv05_DTableHeader* const DTableH = (FSEv05_DTableHeader*)ptr;
+ void* dPtr = dt + 1;
+ FSEv05_decode_t* const dinfo = (FSEv05_decode_t*)dPtr;
+ const unsigned tableSize = 1 << nbBits;
+ const unsigned tableMask = tableSize - 1;
+ const unsigned maxSymbolValue = tableMask;
+ unsigned s;
+
+ /* Sanity checks */
+ if (nbBits < 1) return ERROR(GENERIC); /* min size */
+
+ /* Build Decoding Table */
+ DTableH->tableLog = (U16)nbBits;
+ DTableH->fastMode = 1;
+ for (s=0; s<=maxSymbolValue; s++) {
+ dinfo[s].newState = 0;
+ dinfo[s].symbol = (BYTE)s;
+ dinfo[s].nbBits = (BYTE)nbBits;
+ }
+
+ return 0;
+}
+
+FORCE_INLINE size_t FSEv05_decompress_usingDTable_generic(
+ void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSEv05_DTable* dt, const unsigned fast)
+{
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* op = ostart;
+ BYTE* const omax = op + maxDstSize;
+ BYTE* const olimit = omax-3;
+
+ BITv05_DStream_t bitD;
+ FSEv05_DState_t state1;
+ FSEv05_DState_t state2;
+ size_t errorCode;
+
+ /* Init */
+ errorCode = BITv05_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */
+ if (FSEv05_isError(errorCode)) return errorCode;
+
+ FSEv05_initDState(&state1, &bitD, dt);
+ FSEv05_initDState(&state2, &bitD, dt);
+
+#define FSEv05_GETSYMBOL(statePtr) fast ? FSEv05_decodeSymbolFast(statePtr, &bitD) : FSEv05_decodeSymbol(statePtr, &bitD)
+
+ /* 4 symbols per loop */
+ for ( ; (BITv05_reloadDStream(&bitD)==BITv05_DStream_unfinished) && (op<olimit) ; op+=4) {
+ op[0] = FSEv05_GETSYMBOL(&state1);
+
+ if (FSEv05_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BITv05_reloadDStream(&bitD);
+
+ op[1] = FSEv05_GETSYMBOL(&state2);
+
+ if (FSEv05_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ { if (BITv05_reloadDStream(&bitD) > BITv05_DStream_unfinished) { op+=2; break; } }
+
+ op[2] = FSEv05_GETSYMBOL(&state1);
+
+ if (FSEv05_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BITv05_reloadDStream(&bitD);
+
+ op[3] = FSEv05_GETSYMBOL(&state2);
+ }
+
+ /* tail */
+ /* note : BITv05_reloadDStream(&bitD) >= FSEv05_DStream_partiallyFilled; Ends at exactly BITv05_DStream_completed */
+ while (1) {
+ if ( (BITv05_reloadDStream(&bitD)>BITv05_DStream_completed) || (op==omax) || (BITv05_endOfDStream(&bitD) && (fast || FSEv05_endOfDState(&state1))) )
+ break;
+
+ *op++ = FSEv05_GETSYMBOL(&state1);
+
+ if ( (BITv05_reloadDStream(&bitD)>BITv05_DStream_completed) || (op==omax) || (BITv05_endOfDStream(&bitD) && (fast || FSEv05_endOfDState(&state2))) )
+ break;
+
+ *op++ = FSEv05_GETSYMBOL(&state2);
+ }
+
+ /* end ? */
+ if (BITv05_endOfDStream(&bitD) && FSEv05_endOfDState(&state1) && FSEv05_endOfDState(&state2))
+ return op-ostart;
+
+ if (op==omax) return ERROR(dstSize_tooSmall); /* dst buffer is full, but cSrc unfinished */
+
+ return ERROR(corruption_detected);
+}
+
+
+size_t FSEv05_decompress_usingDTable(void* dst, size_t originalSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSEv05_DTable* dt)
+{
+ const void* ptr = dt;
+ const FSEv05_DTableHeader* DTableH = (const FSEv05_DTableHeader*)ptr;
+ const U32 fastMode = DTableH->fastMode;
+
+ /* select fast mode (static) */
+ if (fastMode) return FSEv05_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+ return FSEv05_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+size_t FSEv05_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
+{
+ const BYTE* const istart = (const BYTE*)cSrc;
+ const BYTE* ip = istart;
+ short counting[FSEv05_MAX_SYMBOL_VALUE+1];
+ DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
+ unsigned tableLog;
+ unsigned maxSymbolValue = FSEv05_MAX_SYMBOL_VALUE;
+ size_t errorCode;
+
+ if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */
+
+ /* normal FSEv05 decoding mode */
+ errorCode = FSEv05_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
+ if (FSEv05_isError(errorCode)) return errorCode;
+ if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */
+ ip += errorCode;
+ cSrcSize -= errorCode;
+
+ errorCode = FSEv05_buildDTable (dt, counting, maxSymbolValue, tableLog);
+ if (FSEv05_isError(errorCode)) return errorCode;
+
+ /* always return, even if it is an error code */
+ return FSEv05_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
+}
+
+
+
+#endif /* FSEv05_COMMONDEFS_ONLY */
+/* ******************************************************************
+ Huff0 : Huffman coder, part of New Generation Entropy library
+ header file
+ Copyright (C) 2013-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef HUFF0_H
+#define HUFF0_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+/* ****************************************
+* Huff0 simple functions
+******************************************/
+size_t HUFv05_decompress(void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize);
+/*!
+HUFv05_decompress():
+ Decompress Huff0 data from buffer 'cSrc', of size 'cSrcSize',
+ into already allocated destination buffer 'dst', of size 'dstSize'.
+ @dstSize : must be the **exact** size of original (uncompressed) data.
+ Note : in contrast with FSEv05, HUFv05_decompress can regenerate
+ RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
+ because it knows size to regenerate.
+ @return : size of regenerated data (== dstSize)
+ or an error code, which can be tested using HUFv05_isError()
+*/
+
+
+/* ****************************************
+* Tool functions
+******************************************/
+/* Error Management */
+unsigned HUFv05_isError(size_t code); /* tells if a return value is an error code */
+const char* HUFv05_getErrorName(size_t code); /* provides error code string (useful for debugging) */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* HUF0_H */
+/* ******************************************************************
+ Huff0 : Huffman codec, part of New Generation Entropy library
+ header file, for static linking only
+ Copyright (C) 2013-2016, Yann Collet
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef HUF0_STATIC_H
+#define HUF0_STATIC_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+/* ****************************************
+* Static allocation
+******************************************/
+/* static allocation of Huff0's DTable */
+#define HUFv05_DTABLE_SIZE(maxTableLog) (1 + (1<<maxTableLog))
+#define HUFv05_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+ unsigned short DTable[HUFv05_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
+#define HUFv05_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
+ unsigned int DTable[HUFv05_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
+#define HUFv05_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
+ unsigned int DTable[HUFv05_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
+
+
+/* ****************************************
+* Advanced decompression functions
+******************************************/
+size_t HUFv05_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
+size_t HUFv05_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbols decoder */
+
+
+/* ****************************************
+* Huff0 detailed API
+******************************************/
+/*!
+HUFv05_decompress() does the following:
+1. select the decompression algorithm (X2, X4, X6) based on pre-computed heuristics
+2. build Huffman table from save, using HUFv05_readDTableXn()
+3. decode 1 or 4 segments in parallel using HUFv05_decompressSXn_usingDTable
+*/
+size_t HUFv05_readDTableX2 (unsigned short* DTable, const void* src, size_t srcSize);
+size_t HUFv05_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize);
+
+size_t HUFv05_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
+size_t HUFv05_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
+
+
+/* single stream variants */
+
+size_t HUFv05_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
+size_t HUFv05_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
+
+size_t HUFv05_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
+size_t HUFv05_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
+
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* HUF0_STATIC_H */
+/* ******************************************************************
+ Huff0 : Huffman coder, part of New Generation Entropy library
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSEv05+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* **************************************************************
+* Compiler specifics
+****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+/* inline is defined */
+#elif defined(_MSC_VER)
+# define inline __inline
+#else
+# define inline /* disable inline */
+#endif
+
+
+#ifdef _MSC_VER /* Visual Studio */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+#endif
+
+
+/* **************************************************************
+* Includes
+****************************************************************/
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memcpy, memset */
+#include <stdio.h> /* printf (debug) */
+
+
+/* **************************************************************
+* Constants
+****************************************************************/
+#define HUFv05_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUFv05_MAX_TABLELOG. Beyond that value, code does not work */
+#define HUFv05_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUFv05_ABSOLUTEMAX_TABLELOG */
+#define HUFv05_DEFAULT_TABLELOG HUFv05_MAX_TABLELOG /* tableLog by default, when not specified */
+#define HUFv05_MAX_SYMBOL_VALUE 255
+#if (HUFv05_MAX_TABLELOG > HUFv05_ABSOLUTEMAX_TABLELOG)
+# error "HUFv05_MAX_TABLELOG is too large !"
+#endif
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+unsigned HUFv05_isError(size_t code) { return ERR_isError(code); }
+const char* HUFv05_getErrorName(size_t code) { return ERR_getErrorName(code); }
+#define HUFv05_STATIC_ASSERT(c) { enum { HUFv05_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+
+
+/* *******************************************************
+* Huff0 : Huffman block decompression
+*********************************************************/
+typedef struct { BYTE byte; BYTE nbBits; } HUFv05_DEltX2; /* single-symbol decoding */
+
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUFv05_DEltX4; /* double-symbols decoding */
+
+typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
+
+/*! HUFv05_readStats
+ Read compact Huffman tree, saved by HUFv05_writeCTable
+ @huffWeight : destination buffer
+ @return : size read from `src`
+*/
+static size_t HUFv05_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize)
+{
+ U32 weightTotal;
+ U32 tableLog;
+ const BYTE* ip = (const BYTE*) src;
+ size_t iSize;
+ size_t oSize;
+ U32 n;
+
+ if (!srcSize) return ERROR(srcSize_wrong);
+ iSize = ip[0];
+ //memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */
+
+ if (iSize >= 128) { /* special header */
+ if (iSize >= (242)) { /* RLE */
+ static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
+ oSize = l[iSize-242];
+ memset(huffWeight, 1, hwSize);
+ iSize = 0;
+ }
+ else { /* Incompressible */
+ oSize = iSize - 127;
+ iSize = ((oSize+1)/2);
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ if (oSize >= hwSize) return ERROR(corruption_detected);
+ ip += 1;
+ for (n=0; n<oSize; n+=2) {
+ huffWeight[n] = ip[n/2] >> 4;
+ huffWeight[n+1] = ip[n/2] & 15;
+ } } }
+ else { /* header compressed with FSEv05 (normal case) */
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ oSize = FSEv05_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */
+ if (FSEv05_isError(oSize)) return oSize;
+ }
+
+ /* collect weight stats */
+ memset(rankStats, 0, (HUFv05_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
+ weightTotal = 0;
+ for (n=0; n<oSize; n++) {
+ if (huffWeight[n] >= HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
+ rankStats[huffWeight[n]]++;
+ weightTotal += (1 << huffWeight[n]) >> 1;
+ }
+ if (weightTotal == 0) return ERROR(corruption_detected);
+
+ /* get last non-null symbol weight (implied, total must be 2^n) */
+ tableLog = BITv05_highbit32(weightTotal) + 1;
+ if (tableLog > HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
+ { /* determine last weight */
+ U32 total = 1 << tableLog;
+ U32 rest = total - weightTotal;
+ U32 verif = 1 << BITv05_highbit32(rest);
+ U32 lastWeight = BITv05_highbit32(rest) + 1;
+ if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
+ huffWeight[oSize] = (BYTE)lastWeight;
+ rankStats[lastWeight]++;
+ }
+
+ /* check tree construction validity */
+ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
+
+ /* results */
+ *nbSymbolsPtr = (U32)(oSize+1);
+ *tableLogPtr = tableLog;
+ return iSize+1;
+}
+
+
+/*-***************************/
+/* single-symbol decoding */
+/*-***************************/
+
+size_t HUFv05_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
+{
+ BYTE huffWeight[HUFv05_MAX_SYMBOL_VALUE + 1];
+ U32 rankVal[HUFv05_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */
+ U32 tableLog = 0;
+ size_t iSize;
+ U32 nbSymbols = 0;
+ U32 n;
+ U32 nextRankStart;
+ void* const dtPtr = DTable + 1;
+ HUFv05_DEltX2* const dt = (HUFv05_DEltX2*)dtPtr;
+
+ HUFv05_STATIC_ASSERT(sizeof(HUFv05_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */
+ //memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUFv05_readStats(huffWeight, HUFv05_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
+ if (HUFv05_isError(iSize)) return iSize;
+
+ /* check result */
+ if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */
+ DTable[0] = (U16)tableLog; /* maybe should separate sizeof allocated DTable, from used size of DTable, in case of re-use */
+
+ /* Prepare ranks */
+ nextRankStart = 0;
+ for (n=1; n<=tableLog; n++) {
+ U32 current = nextRankStart;
+ nextRankStart += (rankVal[n] << (n-1));
+ rankVal[n] = current;
+ }
+
+ /* fill DTable */
+ for (n=0; n<nbSymbols; n++) {
+ const U32 w = huffWeight[n];
+ const U32 length = (1 << w) >> 1;
+ U32 i;
+ HUFv05_DEltX2 D;
+ D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
+ for (i = rankVal[w]; i < rankVal[w] + length; i++)
+ dt[i] = D;
+ rankVal[w] += length;
+ }
+
+ return iSize;
+}
+
+static BYTE HUFv05_decodeSymbolX2(BITv05_DStream_t* Dstream, const HUFv05_DEltX2* dt, const U32 dtLog)
+{
+ const size_t val = BITv05_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+ const BYTE c = dt[val].byte;
+ BITv05_skipBits(Dstream, dt[val].nbBits);
+ return c;
+}
+
+#define HUFv05_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+ *ptr++ = HUFv05_decodeSymbolX2(DStreamPtr, dt, dtLog)
+
+#define HUFv05_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUFv05_MAX_TABLELOG<=12)) \
+ HUFv05_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+#define HUFv05_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ HUFv05_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+static inline size_t HUFv05_decodeStreamX2(BYTE* p, BITv05_DStream_t* const bitDPtr, BYTE* const pEnd, const HUFv05_DEltX2* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 4 symbols at a time */
+ while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p <= pEnd-4)) {
+ HUFv05_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUFv05_DECODE_SYMBOLX2_1(p, bitDPtr);
+ HUFv05_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUFv05_DECODE_SYMBOLX2_0(p, bitDPtr);
+ }
+
+ /* closer to the end */
+ while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p < pEnd))
+ HUFv05_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+ /* no more data to retrieve from bitstream, hence no need to reload */
+ while (p < pEnd)
+ HUFv05_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+ return pEnd-pStart;
+}
+
+size_t HUFv05_decompress1X2_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const U16* DTable)
+{
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + dstSize;
+ const U32 dtLog = DTable[0];
+ const void* dtPtr = DTable;
+ const HUFv05_DEltX2* const dt = ((const HUFv05_DEltX2*)dtPtr)+1;
+ BITv05_DStream_t bitD;
+
+ if (dstSize <= cSrcSize) return ERROR(dstSize_tooSmall);
+ { size_t const errorCode = BITv05_initDStream(&bitD, cSrc, cSrcSize);
+ if (HUFv05_isError(errorCode)) return errorCode; }
+
+ HUFv05_decodeStreamX2(op, &bitD, oend, dt, dtLog);
+
+ /* check */
+ if (!BITv05_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+ return dstSize;
+}
+
+size_t HUFv05_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUFv05_CREATE_STATIC_DTABLEX2(DTable, HUFv05_MAX_TABLELOG);
+ const BYTE* ip = (const BYTE*) cSrc;
+ size_t errorCode;
+
+ errorCode = HUFv05_readDTableX2 (DTable, cSrc, cSrcSize);
+ if (HUFv05_isError(errorCode)) return errorCode;
+ if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += errorCode;
+ cSrcSize -= errorCode;
+
+ return HUFv05_decompress1X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+size_t HUFv05_decompress4X2_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const U16* DTable)
+{
+ const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable;
+ const HUFv05_DEltX2* const dt = ((const HUFv05_DEltX2*)dtPtr) +1;
+ const U32 dtLog = DTable[0];
+ size_t errorCode;
+
+ /* Init */
+ BITv05_DStream_t bitD1;
+ BITv05_DStream_t bitD2;
+ BITv05_DStream_t bitD3;
+ BITv05_DStream_t bitD4;
+ const size_t length1 = MEM_readLE16(istart);
+ const size_t length2 = MEM_readLE16(istart+2);
+ const size_t length3 = MEM_readLE16(istart+4);
+ size_t length4;
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ const size_t segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ U32 endSignal;
+
+ /* Check */
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ errorCode = BITv05_initDStream(&bitD1, istart1, length1);
+ if (HUFv05_isError(errorCode)) return errorCode;
+ errorCode = BITv05_initDStream(&bitD2, istart2, length2);
+ if (HUFv05_isError(errorCode)) return errorCode;
+ errorCode = BITv05_initDStream(&bitD3, istart3, length3);
+ if (HUFv05_isError(errorCode)) return errorCode;
+ errorCode = BITv05_initDStream(&bitD4, istart4, length4);
+ if (HUFv05_isError(errorCode)) return errorCode;
+
+ /* 16-32 symbols per loop (4-8 symbols per stream) */
+ endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
+ for ( ; (endSignal==BITv05_DStream_unfinished) && (op4<(oend-7)) ; ) {
+ HUFv05_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUFv05_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUFv05_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUFv05_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUFv05_DECODE_SYMBOLX2_1(op1, &bitD1);
+ HUFv05_DECODE_SYMBOLX2_1(op2, &bitD2);
+ HUFv05_DECODE_SYMBOLX2_1(op3, &bitD3);
+ HUFv05_DECODE_SYMBOLX2_1(op4, &bitD4);
+ HUFv05_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUFv05_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUFv05_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUFv05_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUFv05_DECODE_SYMBOLX2_0(op1, &bitD1);
+ HUFv05_DECODE_SYMBOLX2_0(op2, &bitD2);
+ HUFv05_DECODE_SYMBOLX2_0(op3, &bitD3);
+ HUFv05_DECODE_SYMBOLX2_0(op4, &bitD4);
+ endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
+ }
+
+ /* check corruption */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 supposed already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUFv05_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+ HUFv05_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+ HUFv05_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+ HUFv05_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ endSignal = BITv05_endOfDStream(&bitD1) & BITv05_endOfDStream(&bitD2) & BITv05_endOfDStream(&bitD3) & BITv05_endOfDStream(&bitD4);
+ if (!endSignal) return ERROR(corruption_detected);
+
+ /* decoded size */
+ return dstSize;
+}
+
+
+size_t HUFv05_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUFv05_CREATE_STATIC_DTABLEX2(DTable, HUFv05_MAX_TABLELOG);
+ const BYTE* ip = (const BYTE*) cSrc;
+ size_t errorCode;
+
+ errorCode = HUFv05_readDTableX2 (DTable, cSrc, cSrcSize);
+ if (HUFv05_isError(errorCode)) return errorCode;
+ if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += errorCode;
+ cSrcSize -= errorCode;
+
+ return HUFv05_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+/* *************************/
+/* double-symbols decoding */
+/* *************************/
+
+static void HUFv05_fillDTableX4Level2(HUFv05_DEltX4* DTable, U32 sizeLog, const U32 consumed,
+ const U32* rankValOrigin, const int minWeight,
+ const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
+ U32 nbBitsBaseline, U16 baseSeq)
+{
+ HUFv05_DEltX4 DElt;
+ U32 rankVal[HUFv05_ABSOLUTEMAX_TABLELOG + 1];
+ U32 s;
+
+ /* get pre-calculated rankVal */
+ memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+ /* fill skipped values */
+ if (minWeight>1) {
+ U32 i, skipSize = rankVal[minWeight];
+ MEM_writeLE16(&(DElt.sequence), baseSeq);
+ DElt.nbBits = (BYTE)(consumed);
+ DElt.length = 1;
+ for (i = 0; i < skipSize; i++)
+ DTable[i] = DElt;
+ }
+
+ /* fill DTable */
+ for (s=0; s<sortedListSize; s++) { /* note : sortedSymbols already skipped */
+ const U32 symbol = sortedSymbols[s].symbol;
+ const U32 weight = sortedSymbols[s].weight;
+ const U32 nbBits = nbBitsBaseline - weight;
+ const U32 length = 1 << (sizeLog-nbBits);
+ const U32 start = rankVal[weight];
+ U32 i = start;
+ const U32 end = start + length;
+
+ MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
+ DElt.nbBits = (BYTE)(nbBits + consumed);
+ DElt.length = 2;
+ do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
+
+ rankVal[weight] += length;
+ }
+}
+
+typedef U32 rankVal_t[HUFv05_ABSOLUTEMAX_TABLELOG][HUFv05_ABSOLUTEMAX_TABLELOG + 1];
+
+static void HUFv05_fillDTableX4(HUFv05_DEltX4* DTable, const U32 targetLog,
+ const sortedSymbol_t* sortedList, const U32 sortedListSize,
+ const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
+ const U32 nbBitsBaseline)
+{
+ U32 rankVal[HUFv05_ABSOLUTEMAX_TABLELOG + 1];
+ const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
+ const U32 minBits = nbBitsBaseline - maxWeight;
+ U32 s;
+
+ memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+ /* fill DTable */
+ for (s=0; s<sortedListSize; s++) {
+ const U16 symbol = sortedList[s].symbol;
+ const U32 weight = sortedList[s].weight;
+ const U32 nbBits = nbBitsBaseline - weight;
+ const U32 start = rankVal[weight];
+ const U32 length = 1 << (targetLog-nbBits);
+
+ if (targetLog-nbBits >= minBits) { /* enough room for a second symbol */
+ U32 sortedRank;
+ int minWeight = nbBits + scaleLog;
+ if (minWeight < 1) minWeight = 1;
+ sortedRank = rankStart[minWeight];
+ HUFv05_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
+ rankValOrigin[nbBits], minWeight,
+ sortedList+sortedRank, sortedListSize-sortedRank,
+ nbBitsBaseline, symbol);
+ } else {
+ U32 i;
+ const U32 end = start + length;
+ HUFv05_DEltX4 DElt;
+
+ MEM_writeLE16(&(DElt.sequence), symbol);
+ DElt.nbBits = (BYTE)(nbBits);
+ DElt.length = 1;
+ for (i = start; i < end; i++)
+ DTable[i] = DElt;
+ }
+ rankVal[weight] += length;
+ }
+}
+
+size_t HUFv05_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize)
+{
+ BYTE weightList[HUFv05_MAX_SYMBOL_VALUE + 1];
+ sortedSymbol_t sortedSymbol[HUFv05_MAX_SYMBOL_VALUE + 1];
+ U32 rankStats[HUFv05_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
+ U32 rankStart0[HUFv05_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
+ U32* const rankStart = rankStart0+1;
+ rankVal_t rankVal;
+ U32 tableLog, maxW, sizeOfSort, nbSymbols;
+ const U32 memLog = DTable[0];
+ size_t iSize;
+ void* dtPtr = DTable;
+ HUFv05_DEltX4* const dt = ((HUFv05_DEltX4*)dtPtr) + 1;
+
+ HUFv05_STATIC_ASSERT(sizeof(HUFv05_DEltX4) == sizeof(unsigned)); /* if compilation fails here, assertion is false */
+ if (memLog > HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
+ //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUFv05_readStats(weightList, HUFv05_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
+ if (HUFv05_isError(iSize)) return iSize;
+
+ /* check result */
+ if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
+
+ /* find maxWeight */
+ for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
+
+ /* Get start index of each weight */
+ {
+ U32 w, nextRankStart = 0;
+ for (w=1; w<=maxW; w++) {
+ U32 current = nextRankStart;
+ nextRankStart += rankStats[w];
+ rankStart[w] = current;
+ }
+ rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
+ sizeOfSort = nextRankStart;
+ }
+
+ /* sort symbols by weight */
+ {
+ U32 s;
+ for (s=0; s<nbSymbols; s++) {
+ U32 w = weightList[s];
+ U32 r = rankStart[w]++;
+ sortedSymbol[r].symbol = (BYTE)s;
+ sortedSymbol[r].weight = (BYTE)w;
+ }
+ rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
+ }
+
+ /* Build rankVal */
+ {
+ const U32 minBits = tableLog+1 - maxW;
+ U32 nextRankVal = 0;
+ U32 w, consumed;
+ const int rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */
+ U32* rankVal0 = rankVal[0];
+ for (w=1; w<=maxW; w++) {
+ U32 current = nextRankVal;
+ nextRankVal += rankStats[w] << (w+rescale);
+ rankVal0[w] = current;
+ }
+ for (consumed = minBits; consumed <= memLog - minBits; consumed++) {
+ U32* rankValPtr = rankVal[consumed];
+ for (w = 1; w <= maxW; w++) {
+ rankValPtr[w] = rankVal0[w] >> consumed;
+ } } }
+
+ HUFv05_fillDTableX4(dt, memLog,
+ sortedSymbol, sizeOfSort,
+ rankStart0, rankVal, maxW,
+ tableLog+1);
+
+ return iSize;
+}
+
+
+static U32 HUFv05_decodeSymbolX4(void* op, BITv05_DStream_t* DStream, const HUFv05_DEltX4* dt, const U32 dtLog)
+{
+ const size_t val = BITv05_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ memcpy(op, dt+val, 2);
+ BITv05_skipBits(DStream, dt[val].nbBits);
+ return dt[val].length;
+}
+
+static U32 HUFv05_decodeLastSymbolX4(void* op, BITv05_DStream_t* DStream, const HUFv05_DEltX4* dt, const U32 dtLog)
+{
+ const size_t val = BITv05_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ memcpy(op, dt+val, 1);
+ if (dt[val].length==1) BITv05_skipBits(DStream, dt[val].nbBits);
+ else {
+ if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
+ BITv05_skipBits(DStream, dt[val].nbBits);
+ if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+ DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+ } }
+ return 1;
+}
+
+
+#define HUFv05_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
+ ptr += HUFv05_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUFv05_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUFv05_MAX_TABLELOG<=12)) \
+ ptr += HUFv05_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUFv05_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ ptr += HUFv05_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+static inline size_t HUFv05_decodeStreamX4(BYTE* p, BITv05_DStream_t* bitDPtr, BYTE* const pEnd, const HUFv05_DEltX4* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 8 symbols at a time */
+ while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p < pEnd-7)) {
+ HUFv05_DECODE_SYMBOLX4_2(p, bitDPtr);
+ HUFv05_DECODE_SYMBOLX4_1(p, bitDPtr);
+ HUFv05_DECODE_SYMBOLX4_2(p, bitDPtr);
+ HUFv05_DECODE_SYMBOLX4_0(p, bitDPtr);
+ }
+
+ /* closer to the end */
+ while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p <= pEnd-2))
+ HUFv05_DECODE_SYMBOLX4_0(p, bitDPtr);
+
+ while (p <= pEnd-2)
+ HUFv05_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
+
+ if (p < pEnd)
+ p += HUFv05_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
+
+ return p-pStart;
+}
+
+
+size_t HUFv05_decompress1X4_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const unsigned* DTable)
+{
+ const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+
+ const U32 dtLog = DTable[0];
+ const void* const dtPtr = DTable;
+ const HUFv05_DEltX4* const dt = ((const HUFv05_DEltX4*)dtPtr) +1;
+ size_t errorCode;
+
+ /* Init */
+ BITv05_DStream_t bitD;
+ errorCode = BITv05_initDStream(&bitD, istart, cSrcSize);
+ if (HUFv05_isError(errorCode)) return errorCode;
+
+ /* finish bitStreams one by one */
+ HUFv05_decodeStreamX4(ostart, &bitD, oend, dt, dtLog);
+
+ /* check */
+ if (!BITv05_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+ /* decoded size */
+ return dstSize;
+}
+
+size_t HUFv05_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUFv05_CREATE_STATIC_DTABLEX4(DTable, HUFv05_MAX_TABLELOG);
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t hSize = HUFv05_readDTableX4 (DTable, cSrc, cSrcSize);
+ if (HUFv05_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize;
+ cSrcSize -= hSize;
+
+ return HUFv05_decompress1X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+size_t HUFv05_decompress4X4_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const unsigned* DTable)
+{
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ {
+ const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable;
+ const HUFv05_DEltX4* const dt = ((const HUFv05_DEltX4*)dtPtr) +1;
+ const U32 dtLog = DTable[0];
+ size_t errorCode;
+
+ /* Init */
+ BITv05_DStream_t bitD1;
+ BITv05_DStream_t bitD2;
+ BITv05_DStream_t bitD3;
+ BITv05_DStream_t bitD4;
+ const size_t length1 = MEM_readLE16(istart);
+ const size_t length2 = MEM_readLE16(istart+2);
+ const size_t length3 = MEM_readLE16(istart+4);
+ size_t length4;
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ const size_t segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ U32 endSignal;
+
+ length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ errorCode = BITv05_initDStream(&bitD1, istart1, length1);
+ if (HUFv05_isError(errorCode)) return errorCode;
+ errorCode = BITv05_initDStream(&bitD2, istart2, length2);
+ if (HUFv05_isError(errorCode)) return errorCode;
+ errorCode = BITv05_initDStream(&bitD3, istart3, length3);
+ if (HUFv05_isError(errorCode)) return errorCode;
+ errorCode = BITv05_initDStream(&bitD4, istart4, length4);
+ if (HUFv05_isError(errorCode)) return errorCode;
+
+ /* 16-32 symbols per loop (4-8 symbols per stream) */
+ endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
+ for ( ; (endSignal==BITv05_DStream_unfinished) && (op4<(oend-7)) ; ) {
+ HUFv05_DECODE_SYMBOLX4_2(op1, &bitD1);
+ HUFv05_DECODE_SYMBOLX4_2(op2, &bitD2);
+ HUFv05_DECODE_SYMBOLX4_2(op3, &bitD3);
+ HUFv05_DECODE_SYMBOLX4_2(op4, &bitD4);
+ HUFv05_DECODE_SYMBOLX4_1(op1, &bitD1);
+ HUFv05_DECODE_SYMBOLX4_1(op2, &bitD2);
+ HUFv05_DECODE_SYMBOLX4_1(op3, &bitD3);
+ HUFv05_DECODE_SYMBOLX4_1(op4, &bitD4);
+ HUFv05_DECODE_SYMBOLX4_2(op1, &bitD1);
+ HUFv05_DECODE_SYMBOLX4_2(op2, &bitD2);
+ HUFv05_DECODE_SYMBOLX4_2(op3, &bitD3);
+ HUFv05_DECODE_SYMBOLX4_2(op4, &bitD4);
+ HUFv05_DECODE_SYMBOLX4_0(op1, &bitD1);
+ HUFv05_DECODE_SYMBOLX4_0(op2, &bitD2);
+ HUFv05_DECODE_SYMBOLX4_0(op3, &bitD3);
+ HUFv05_DECODE_SYMBOLX4_0(op4, &bitD4);
+
+ endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
+ }
+
+ /* check corruption */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 supposed already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUFv05_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
+ HUFv05_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
+ HUFv05_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
+ HUFv05_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ endSignal = BITv05_endOfDStream(&bitD1) & BITv05_endOfDStream(&bitD2) & BITv05_endOfDStream(&bitD3) & BITv05_endOfDStream(&bitD4);
+ if (!endSignal) return ERROR(corruption_detected);
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+
+size_t HUFv05_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUFv05_CREATE_STATIC_DTABLEX4(DTable, HUFv05_MAX_TABLELOG);
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t hSize = HUFv05_readDTableX4 (DTable, cSrc, cSrcSize);
+ if (HUFv05_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize;
+ cSrcSize -= hSize;
+
+ return HUFv05_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+/* ********************************/
+/* Generic decompression selector */
+/* ********************************/
+
+typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
+static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
+{
+ /* single, double, quad */
+ {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
+ {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
+ {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
+ {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
+ {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
+ {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
+ {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
+ {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
+ {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
+ {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
+ {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
+ {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
+ {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
+ {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
+ {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
+ {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
+};
+
+typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+
+size_t HUFv05_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ static const decompressionAlgo decompress[3] = { HUFv05_decompress4X2, HUFv05_decompress4X4, NULL };
+ /* estimate decompression time */
+ U32 Q;
+ const U32 D256 = (U32)(dstSize >> 8);
+ U32 Dtime[3];
+ U32 algoNb = 0;
+ int n;
+
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize >= dstSize) return ERROR(corruption_detected); /* invalid, or not compressed, but not compressed already dealt with */
+ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
+ /* decoder timing evaluation */
+ Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
+ for (n=0; n<3; n++)
+ Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
+
+ Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
+
+ if (Dtime[1] < Dtime[0]) algoNb = 1;
+
+ return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
+
+ //return HUFv05_decompress4X2(dst, dstSize, cSrc, cSrcSize); /* multi-streams single-symbol decoding */
+ //return HUFv05_decompress4X4(dst, dstSize, cSrc, cSrcSize); /* multi-streams double-symbols decoding */
+ //return HUFv05_decompress4X6(dst, dstSize, cSrc, cSrcSize); /* multi-streams quad-symbols decoding */
+}
+/*
+ zstd - standard compression library
+ Copyright (C) 2014-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd source repository : https://github.com/Cyan4973/zstd
+*/
+
+/* ***************************************************************
+* Tuning parameters
+*****************************************************************/
+/*!
+ * HEAPMODE :
+ * Select how default decompression function ZSTDv05_decompress() will allocate memory,
+ * in memory stack (0), or in memory heap (1, requires malloc())
+ */
+#ifndef ZSTDv05_HEAPMODE
+# define ZSTDv05_HEAPMODE 1
+#endif
+
+
+/*-*******************************************************
+* Dependencies
+*********************************************************/
+#include <stdlib.h> /* calloc */
+#include <string.h> /* memcpy, memmove */
+#include <stdio.h> /* debug only : printf */
+
+
+/*-*******************************************************
+* Compiler specifics
+*********************************************************/
+#ifdef _MSC_VER /* Visual Studio */
+# include <intrin.h> /* For Visual 2005 */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4324) /* disable: C4324: padded structure */
+#endif
+
+
+/*-*************************************
+* Local types
+***************************************/
+typedef struct
+{
+ blockType_t blockType;
+ U32 origSize;
+} blockProperties_t;
+
+
+/* *******************************************************
+* Memory operations
+**********************************************************/
+static void ZSTDv05_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+
+
+/* *************************************
+* Error Management
+***************************************/
+/*! ZSTDv05_isError() :
+* tells if a return value is an error code */
+unsigned ZSTDv05_isError(size_t code) { return ERR_isError(code); }
+
+
+/*! ZSTDv05_getErrorName() :
+* provides error code string (useful for debugging) */
+const char* ZSTDv05_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/* *************************************************************
+* Context management
+***************************************************************/
+typedef enum { ZSTDv05ds_getFrameHeaderSize, ZSTDv05ds_decodeFrameHeader,
+ ZSTDv05ds_decodeBlockHeader, ZSTDv05ds_decompressBlock } ZSTDv05_dStage;
+
+struct ZSTDv05_DCtx_s
+{
+ FSEv05_DTable LLTable[FSEv05_DTABLE_SIZE_U32(LLFSEv05Log)];
+ FSEv05_DTable OffTable[FSEv05_DTABLE_SIZE_U32(OffFSEv05Log)];
+ FSEv05_DTable MLTable[FSEv05_DTABLE_SIZE_U32(MLFSEv05Log)];
+ unsigned hufTableX4[HUFv05_DTABLE_SIZE(HufLog)];
+ const void* previousDstEnd;
+ const void* base;
+ const void* vBase;
+ const void* dictEnd;
+ size_t expected;
+ size_t headerSize;
+ ZSTDv05_parameters params;
+ blockType_t bType; /* used in ZSTDv05_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
+ ZSTDv05_dStage stage;
+ U32 flagStaticTables;
+ const BYTE* litPtr;
+ size_t litSize;
+ BYTE litBuffer[BLOCKSIZE + WILDCOPY_OVERLENGTH];
+ BYTE headerBuffer[ZSTDv05_frameHeaderSize_max];
+}; /* typedef'd to ZSTDv05_DCtx within "zstd_static.h" */
+
+size_t ZSTDv05_sizeofDCtx (void); /* Hidden declaration */
+size_t ZSTDv05_sizeofDCtx (void) { return sizeof(ZSTDv05_DCtx); }
+
+size_t ZSTDv05_decompressBegin(ZSTDv05_DCtx* dctx)
+{
+ dctx->expected = ZSTDv05_frameHeaderSize_min;
+ dctx->stage = ZSTDv05ds_getFrameHeaderSize;
+ dctx->previousDstEnd = NULL;
+ dctx->base = NULL;
+ dctx->vBase = NULL;
+ dctx->dictEnd = NULL;
+ dctx->hufTableX4[0] = HufLog;
+ dctx->flagStaticTables = 0;
+ return 0;
+}
+
+ZSTDv05_DCtx* ZSTDv05_createDCtx(void)
+{
+ ZSTDv05_DCtx* dctx = (ZSTDv05_DCtx*)malloc(sizeof(ZSTDv05_DCtx));
+ if (dctx==NULL) return NULL;
+ ZSTDv05_decompressBegin(dctx);
+ return dctx;
+}
+
+size_t ZSTDv05_freeDCtx(ZSTDv05_DCtx* dctx)
+{
+ free(dctx);
+ return 0; /* reserved as a potential error code in the future */
+}
+
+void ZSTDv05_copyDCtx(ZSTDv05_DCtx* dstDCtx, const ZSTDv05_DCtx* srcDCtx)
+{
+ memcpy(dstDCtx, srcDCtx,
+ sizeof(ZSTDv05_DCtx) - (BLOCKSIZE+WILDCOPY_OVERLENGTH + ZSTDv05_frameHeaderSize_max)); /* no need to copy workspace */
+}
+
+
+/* *************************************************************
+* Decompression section
+***************************************************************/
+
+/* Frame format description
+ Frame Header - [ Block Header - Block ] - Frame End
+ 1) Frame Header
+ - 4 bytes - Magic Number : ZSTDv05_MAGICNUMBER (defined within zstd_internal.h)
+ - 1 byte - Window Descriptor
+ 2) Block Header
+ - 3 bytes, starting with a 2-bits descriptor
+ Uncompressed, Compressed, Frame End, unused
+ 3) Block
+ See Block Format Description
+ 4) Frame End
+ - 3 bytes, compatible with Block Header
+*/
+
+/* Block format description
+
+ Block = Literal Section - Sequences Section
+ Prerequisite : size of (compressed) block, maximum size of regenerated data
+
+ 1) Literal Section
+
+ 1.1) Header : 1-5 bytes
+ flags: 2 bits
+ 00 compressed by Huff0
+ 01 unused
+ 10 is Raw (uncompressed)
+ 11 is Rle
+ Note : using 01 => Huff0 with precomputed table ?
+ Note : delta map ? => compressed ?
+
+ 1.1.1) Huff0-compressed literal block : 3-5 bytes
+ srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
+ srcSize < 1 KB => 3 bytes (2-2-10-10)
+ srcSize < 16KB => 4 bytes (2-2-14-14)
+ else => 5 bytes (2-2-18-18)
+ big endian convention
+
+ 1.1.2) Raw (uncompressed) literal block header : 1-3 bytes
+ size : 5 bits: (IS_RAW<<6) + (0<<4) + size
+ 12 bits: (IS_RAW<<6) + (2<<4) + (size>>8)
+ size&255
+ 20 bits: (IS_RAW<<6) + (3<<4) + (size>>16)
+ size>>8&255
+ size&255
+
+ 1.1.3) Rle (repeated single byte) literal block header : 1-3 bytes
+ size : 5 bits: (IS_RLE<<6) + (0<<4) + size
+ 12 bits: (IS_RLE<<6) + (2<<4) + (size>>8)
+ size&255
+ 20 bits: (IS_RLE<<6) + (3<<4) + (size>>16)
+ size>>8&255
+ size&255
+
+ 1.1.4) Huff0-compressed literal block, using precomputed CTables : 3-5 bytes
+ srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
+ srcSize < 1 KB => 3 bytes (2-2-10-10)
+ srcSize < 16KB => 4 bytes (2-2-14-14)
+ else => 5 bytes (2-2-18-18)
+ big endian convention
+
+ 1- CTable available (stored into workspace ?)
+ 2- Small input (fast heuristic ? Full comparison ? depend on clevel ?)
+
+
+ 1.2) Literal block content
+
+ 1.2.1) Huff0 block, using sizes from header
+ See Huff0 format
+
+ 1.2.2) Huff0 block, using prepared table
+
+ 1.2.3) Raw content
+
+ 1.2.4) single byte
+
+
+ 2) Sequences section
+ TO DO
+*/
+
+
+/** ZSTDv05_decodeFrameHeader_Part1() :
+* decode the 1st part of the Frame Header, which tells Frame Header size.
+* srcSize must be == ZSTDv05_frameHeaderSize_min.
+* @return : the full size of the Frame Header */
+static size_t ZSTDv05_decodeFrameHeader_Part1(ZSTDv05_DCtx* zc, const void* src, size_t srcSize)
+{
+ U32 magicNumber;
+ if (srcSize != ZSTDv05_frameHeaderSize_min)
+ return ERROR(srcSize_wrong);
+ magicNumber = MEM_readLE32(src);
+ if (magicNumber != ZSTDv05_MAGICNUMBER) return ERROR(prefix_unknown);
+ zc->headerSize = ZSTDv05_frameHeaderSize_min;
+ return zc->headerSize;
+}
+
+
+size_t ZSTDv05_getFrameParams(ZSTDv05_parameters* params, const void* src, size_t srcSize)
+{
+ U32 magicNumber;
+ if (srcSize < ZSTDv05_frameHeaderSize_min) return ZSTDv05_frameHeaderSize_max;
+ magicNumber = MEM_readLE32(src);
+ if (magicNumber != ZSTDv05_MAGICNUMBER) return ERROR(prefix_unknown);
+ memset(params, 0, sizeof(*params));
+ params->windowLog = (((const BYTE*)src)[4] & 15) + ZSTDv05_WINDOWLOG_ABSOLUTEMIN;
+ if ((((const BYTE*)src)[4] >> 4) != 0) return ERROR(frameParameter_unsupported); /* reserved bits */
+ return 0;
+}
+
+/** ZSTDv05_decodeFrameHeader_Part2() :
+* decode the full Frame Header.
+* srcSize must be the size provided by ZSTDv05_decodeFrameHeader_Part1().
+* @return : 0, or an error code, which can be tested using ZSTDv05_isError() */
+static size_t ZSTDv05_decodeFrameHeader_Part2(ZSTDv05_DCtx* zc, const void* src, size_t srcSize)
+{
+ size_t result;
+ if (srcSize != zc->headerSize)
+ return ERROR(srcSize_wrong);
+ result = ZSTDv05_getFrameParams(&(zc->params), src, srcSize);
+ if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupported);
+ return result;
+}
+
+
+static size_t ZSTDv05_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+{
+ const BYTE* const in = (const BYTE* const)src;
+ BYTE headerFlags;
+ U32 cSize;
+
+ if (srcSize < 3)
+ return ERROR(srcSize_wrong);
+
+ headerFlags = *in;
+ cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
+
+ bpPtr->blockType = (blockType_t)(headerFlags >> 6);
+ bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
+
+ if (bpPtr->blockType == bt_end) return 0;
+ if (bpPtr->blockType == bt_rle) return 1;
+ return cSize;
+}
+
+
+static size_t ZSTDv05_copyRawBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ if (dst==NULL) return ERROR(dstSize_tooSmall);
+ if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
+ memcpy(dst, src, srcSize);
+ return srcSize;
+}
+
+
+/*! ZSTDv05_decodeLiteralsBlock() :
+ @return : nb of bytes read from src (< srcSize ) */
+static size_t ZSTDv05_decodeLiteralsBlock(ZSTDv05_DCtx* dctx,
+ const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
+{
+ const BYTE* const istart = (const BYTE*) src;
+
+ /* any compressed block with literals segment must be at least this size */
+ if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
+
+ switch(istart[0]>> 6)
+ {
+ case IS_HUFv05:
+ {
+ size_t litSize, litCSize, singleStream=0;
+ U32 lhSize = ((istart[0]) >> 4) & 3;
+ if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
+ switch(lhSize)
+ {
+ case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */
+ /* 2 - 2 - 10 - 10 */
+ lhSize=3;
+ singleStream = istart[0] & 16;
+ litSize = ((istart[0] & 15) << 6) + (istart[1] >> 2);
+ litCSize = ((istart[1] & 3) << 8) + istart[2];
+ break;
+ case 2:
+ /* 2 - 2 - 14 - 14 */
+ lhSize=4;
+ litSize = ((istart[0] & 15) << 10) + (istart[1] << 2) + (istart[2] >> 6);
+ litCSize = ((istart[2] & 63) << 8) + istart[3];
+ break;
+ case 3:
+ /* 2 - 2 - 18 - 18 */
+ lhSize=5;
+ litSize = ((istart[0] & 15) << 14) + (istart[1] << 6) + (istart[2] >> 2);
+ litCSize = ((istart[2] & 3) << 16) + (istart[3] << 8) + istart[4];
+ break;
+ }
+ if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
+ if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
+
+ if (HUFv05_isError(singleStream ?
+ HUFv05_decompress1X2(dctx->litBuffer, litSize, istart+lhSize, litCSize) :
+ HUFv05_decompress (dctx->litBuffer, litSize, istart+lhSize, litCSize) ))
+ return ERROR(corruption_detected);
+
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+ return litCSize + lhSize;
+ }
+ case IS_PCH:
+ {
+ size_t errorCode;
+ size_t litSize, litCSize;
+ U32 lhSize = ((istart[0]) >> 4) & 3;
+ if (lhSize != 1) /* only case supported for now : small litSize, single stream */
+ return ERROR(corruption_detected);
+ if (!dctx->flagStaticTables)
+ return ERROR(dictionary_corrupted);
+
+ /* 2 - 2 - 10 - 10 */
+ lhSize=3;
+ litSize = ((istart[0] & 15) << 6) + (istart[1] >> 2);
+ litCSize = ((istart[1] & 3) << 8) + istart[2];
+ if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
+
+ errorCode = HUFv05_decompress1X4_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->hufTableX4);
+ if (HUFv05_isError(errorCode)) return ERROR(corruption_detected);
+
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+ return litCSize + lhSize;
+ }
+ case IS_RAW:
+ {
+ size_t litSize;
+ U32 lhSize = ((istart[0]) >> 4) & 3;
+ switch(lhSize)
+ {
+ case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */
+ lhSize=1;
+ litSize = istart[0] & 31;
+ break;
+ case 2:
+ litSize = ((istart[0] & 15) << 8) + istart[1];
+ break;
+ case 3:
+ litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
+ break;
+ }
+
+ if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
+ if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
+ memcpy(dctx->litBuffer, istart+lhSize, litSize);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+ return lhSize+litSize;
+ }
+ /* direct reference into compressed stream */
+ dctx->litPtr = istart+lhSize;
+ dctx->litSize = litSize;
+ return lhSize+litSize;
+ }
+ case IS_RLE:
+ {
+ size_t litSize;
+ U32 lhSize = ((istart[0]) >> 4) & 3;
+ switch(lhSize)
+ {
+ case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */
+ lhSize = 1;
+ litSize = istart[0] & 31;
+ break;
+ case 2:
+ litSize = ((istart[0] & 15) << 8) + istart[1];
+ break;
+ case 3:
+ litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
+ if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
+ break;
+ }
+ if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
+ memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ return lhSize+1;
+ }
+ default:
+ return ERROR(corruption_detected); /* impossible */
+ }
+}
+
+
+static size_t ZSTDv05_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
+ FSEv05_DTable* DTableLL, FSEv05_DTable* DTableML, FSEv05_DTable* DTableOffb,
+ const void* src, size_t srcSize, U32 flagStaticTable)
+{
+ const BYTE* const istart = (const BYTE* const)src;
+ const BYTE* ip = istart;
+ const BYTE* const iend = istart + srcSize;
+ U32 LLtype, Offtype, MLtype;
+ unsigned LLlog, Offlog, MLlog;
+ size_t dumpsLength;
+
+ /* check */
+ if (srcSize < MIN_SEQUENCES_SIZE)
+ return ERROR(srcSize_wrong);
+
+ /* SeqHead */
+ *nbSeq = *ip++;
+ if (*nbSeq==0) return 1;
+ if (*nbSeq >= 128) {
+ if (ip >= iend) return ERROR(srcSize_wrong);
+ *nbSeq = ((nbSeq[0]-128)<<8) + *ip++;
+ }
+
+ if (ip >= iend) return ERROR(srcSize_wrong);
+ LLtype = *ip >> 6;
+ Offtype = (*ip >> 4) & 3;
+ MLtype = (*ip >> 2) & 3;
+ if (*ip & 2) {
+ if (ip+3 > iend) return ERROR(srcSize_wrong);
+ dumpsLength = ip[2];
+ dumpsLength += ip[1] << 8;
+ ip += 3;
+ } else {
+ if (ip+2 > iend) return ERROR(srcSize_wrong);
+ dumpsLength = ip[1];
+ dumpsLength += (ip[0] & 1) << 8;
+ ip += 2;
+ }
+ *dumpsPtr = ip;
+ ip += dumpsLength;
+ *dumpsLengthPtr = dumpsLength;
+
+ /* check */
+ if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
+
+ /* sequences */
+ {
+ S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL >= MaxOff */
+ size_t headerSize;
+
+ /* Build DTables */
+ switch(LLtype)
+ {
+ case FSEv05_ENCODING_RLE :
+ LLlog = 0;
+ FSEv05_buildDTable_rle(DTableLL, *ip++);
+ break;
+ case FSEv05_ENCODING_RAW :
+ LLlog = LLbits;
+ FSEv05_buildDTable_raw(DTableLL, LLbits);
+ break;
+ case FSEv05_ENCODING_STATIC:
+ if (!flagStaticTable) return ERROR(corruption_detected);
+ break;
+ case FSEv05_ENCODING_DYNAMIC :
+ default : /* impossible */
+ { unsigned max = MaxLL;
+ headerSize = FSEv05_readNCount(norm, &max, &LLlog, ip, iend-ip);
+ if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
+ if (LLlog > LLFSEv05Log) return ERROR(corruption_detected);
+ ip += headerSize;
+ FSEv05_buildDTable(DTableLL, norm, max, LLlog);
+ } }
+
+ switch(Offtype)
+ {
+ case FSEv05_ENCODING_RLE :
+ Offlog = 0;
+ if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
+ FSEv05_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */
+ break;
+ case FSEv05_ENCODING_RAW :
+ Offlog = Offbits;
+ FSEv05_buildDTable_raw(DTableOffb, Offbits);
+ break;
+ case FSEv05_ENCODING_STATIC:
+ if (!flagStaticTable) return ERROR(corruption_detected);
+ break;
+ case FSEv05_ENCODING_DYNAMIC :
+ default : /* impossible */
+ { unsigned max = MaxOff;
+ headerSize = FSEv05_readNCount(norm, &max, &Offlog, ip, iend-ip);
+ if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
+ if (Offlog > OffFSEv05Log) return ERROR(corruption_detected);
+ ip += headerSize;
+ FSEv05_buildDTable(DTableOffb, norm, max, Offlog);
+ } }
+
+ switch(MLtype)
+ {
+ case FSEv05_ENCODING_RLE :
+ MLlog = 0;
+ if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
+ FSEv05_buildDTable_rle(DTableML, *ip++);
+ break;
+ case FSEv05_ENCODING_RAW :
+ MLlog = MLbits;
+ FSEv05_buildDTable_raw(DTableML, MLbits);
+ break;
+ case FSEv05_ENCODING_STATIC:
+ if (!flagStaticTable) return ERROR(corruption_detected);
+ break;
+ case FSEv05_ENCODING_DYNAMIC :
+ default : /* impossible */
+ { unsigned max = MaxML;
+ headerSize = FSEv05_readNCount(norm, &max, &MLlog, ip, iend-ip);
+ if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
+ if (MLlog > MLFSEv05Log) return ERROR(corruption_detected);
+ ip += headerSize;
+ FSEv05_buildDTable(DTableML, norm, max, MLlog);
+ } } }
+
+ return ip-istart;
+}
+
+
+typedef struct {
+ size_t litLength;
+ size_t matchLength;
+ size_t offset;
+} seq_t;
+
+typedef struct {
+ BITv05_DStream_t DStream;
+ FSEv05_DState_t stateLL;
+ FSEv05_DState_t stateOffb;
+ FSEv05_DState_t stateML;
+ size_t prevOffset;
+ const BYTE* dumps;
+ const BYTE* dumpsEnd;
+} seqState_t;
+
+
+
+static void ZSTDv05_decodeSequence(seq_t* seq, seqState_t* seqState)
+{
+ size_t litLength;
+ size_t prevOffset;
+ size_t offset;
+ size_t matchLength;
+ const BYTE* dumps = seqState->dumps;
+ const BYTE* const de = seqState->dumpsEnd;
+
+ /* Literal length */
+ litLength = FSEv05_peakSymbol(&(seqState->stateLL));
+ prevOffset = litLength ? seq->offset : seqState->prevOffset;
+ if (litLength == MaxLL) {
+ U32 add = *dumps++;
+ if (add < 255) litLength += add;
+ else {
+ litLength = MEM_readLE32(dumps) & 0xFFFFFF; /* no risk : dumps is always followed by seq tables > 1 byte */
+ if (litLength&1) litLength>>=1, dumps += 3;
+ else litLength = (U16)(litLength)>>1, dumps += 2;
+ }
+ if (dumps > de) { litLength = MaxLL+255; } /* late correction, to avoid using uninitialized memory */
+ if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */
+ }
+
+ /* Offset */
+ {
+ static const U32 offsetPrefix[MaxOff+1] = {
+ 1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,
+ 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,
+ 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };
+ U32 offsetCode = FSEv05_peakSymbol(&(seqState->stateOffb)); /* <= maxOff, by table construction */
+ U32 nbBits = offsetCode - 1;
+ if (offsetCode==0) nbBits = 0; /* cmove */
+ offset = offsetPrefix[offsetCode] + BITv05_readBits(&(seqState->DStream), nbBits);
+ if (MEM_32bits()) BITv05_reloadDStream(&(seqState->DStream));
+ if (offsetCode==0) offset = prevOffset; /* repcode, cmove */
+ if (offsetCode | !litLength) seqState->prevOffset = seq->offset; /* cmove */
+ FSEv05_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream)); /* update */
+ }
+
+ /* Literal length update */
+ FSEv05_decodeSymbol(&(seqState->stateLL), &(seqState->DStream)); /* update */
+ if (MEM_32bits()) BITv05_reloadDStream(&(seqState->DStream));
+
+ /* MatchLength */
+ matchLength = FSEv05_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
+ if (matchLength == MaxML) {
+ U32 add = *dumps++;
+ if (add < 255) matchLength += add;
+ else {
+ matchLength = MEM_readLE32(dumps) & 0xFFFFFF; /* no pb : dumps is always followed by seq tables > 1 byte */
+ if (matchLength&1) matchLength>>=1, dumps += 3;
+ else matchLength = (U16)(matchLength)>>1, dumps += 2;
+ }
+ if (dumps > de) { matchLength = MaxML+255; } /* late correction, to avoid using uninitialized memory */
+ if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */
+ }
+ matchLength += MINMATCH;
+
+ /* save result */
+ seq->litLength = litLength;
+ seq->offset = offset;
+ seq->matchLength = matchLength;
+ seqState->dumps = dumps;
+
+#if 0 /* debug */
+ {
+ static U64 totalDecoded = 0;
+ printf("pos %6u : %3u literals & match %3u bytes at distance %6u \n",
+ (U32)(totalDecoded), (U32)litLength, (U32)matchLength, (U32)offset);
+ totalDecoded += litLength + matchLength;
+ }
+#endif
+}
+
+
+static size_t ZSTDv05_execSequence(BYTE* op,
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
+{
+ static const int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
+ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
+ BYTE* const oLitEnd = op + sequence.litLength;
+ const size_t sequenceLength = sequence.litLength + sequence.matchLength;
+ BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
+ BYTE* const oend_8 = oend-8;
+ const BYTE* const litEnd = *litPtr + sequence.litLength;
+ const BYTE* match = oLitEnd - sequence.offset;
+
+ /* check */
+ if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */
+ if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
+ if (litEnd > litLimit) return ERROR(corruption_detected); /* risk read beyond lit buffer */
+
+ /* copy Literals */
+ ZSTDv05_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
+ op = oLitEnd;
+ *litPtr = litEnd; /* update for next sequence */
+
+ /* copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - base)) {
+ /* offset beyond prefix */
+ if (sequence.offset > (size_t)(oLitEnd - vBase))
+ return ERROR(corruption_detected);
+ match = dictEnd - (base-match);
+ if (match + sequence.matchLength <= dictEnd) {
+ memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ {
+ size_t length1 = dictEnd - match;
+ memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = base;
+ if (op > oend_8 || sequence.matchLength < MINMATCH) {
+ while (op < oMatchEnd) *op++ = *match++;
+ return sequenceLength;
+ }
+ } }
+ /* Requirement: op <= oend_8 */
+
+ /* match within prefix */
+ if (sequence.offset < 8) {
+ /* close range match, overlap */
+ const int sub2 = dec64table[sequence.offset];
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += dec32table[sequence.offset];
+ ZSTDv05_copy4(op+4, match);
+ match -= sub2;
+ } else {
+ ZSTDv05_copy8(op, match);
+ }
+ op += 8; match += 8;
+
+ if (oMatchEnd > oend-(16-MINMATCH)) {
+ if (op < oend_8) {
+ ZSTDv05_wildcopy(op, match, oend_8 - op);
+ match += oend_8 - op;
+ op = oend_8;
+ }
+ while (op < oMatchEnd)
+ *op++ = *match++;
+ } else {
+ ZSTDv05_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
+ }
+ return sequenceLength;
+}
+
+
+static size_t ZSTDv05_decompressSequences(
+ ZSTDv05_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize)
+{
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* op = ostart;
+ BYTE* const oend = ostart + maxDstSize;
+ size_t errorCode, dumpsLength=0;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* const litEnd = litPtr + dctx->litSize;
+ int nbSeq=0;
+ const BYTE* dumps = NULL;
+ unsigned* DTableLL = dctx->LLTable;
+ unsigned* DTableML = dctx->MLTable;
+ unsigned* DTableOffb = dctx->OffTable;
+ const BYTE* const base = (const BYTE*) (dctx->base);
+ const BYTE* const vBase = (const BYTE*) (dctx->vBase);
+ const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+
+ /* Build Decoding Tables */
+ errorCode = ZSTDv05_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
+ DTableLL, DTableML, DTableOffb,
+ ip, seqSize, dctx->flagStaticTables);
+ if (ZSTDv05_isError(errorCode)) return errorCode;
+ ip += errorCode;
+
+ /* Regen sequences */
+ if (nbSeq) {
+ seq_t sequence;
+ seqState_t seqState;
+
+ memset(&sequence, 0, sizeof(sequence));
+ sequence.offset = REPCODE_STARTVALUE;
+ seqState.dumps = dumps;
+ seqState.dumpsEnd = dumps + dumpsLength;
+ seqState.prevOffset = REPCODE_STARTVALUE;
+ errorCode = BITv05_initDStream(&(seqState.DStream), ip, iend-ip);
+ if (ERR_isError(errorCode)) return ERROR(corruption_detected);
+ FSEv05_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
+ FSEv05_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
+ FSEv05_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
+
+ for ( ; (BITv05_reloadDStream(&(seqState.DStream)) <= BITv05_DStream_completed) && nbSeq ; ) {
+ size_t oneSeqSize;
+ nbSeq--;
+ ZSTDv05_decodeSequence(&sequence, &seqState);
+ oneSeqSize = ZSTDv05_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
+ if (ZSTDv05_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+ }
+
+ /* check if reached exact end */
+ if (nbSeq) return ERROR(corruption_detected);
+ }
+
+ /* last literal segment */
+ {
+ size_t lastLLSize = litEnd - litPtr;
+ if (litPtr > litEnd) return ERROR(corruption_detected); /* too many literals already used */
+ if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
+ memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+
+ return op-ostart;
+}
+
+
+static void ZSTDv05_checkContinuity(ZSTDv05_DCtx* dctx, const void* dst)
+{
+ if (dst != dctx->previousDstEnd) { /* not contiguous */
+ dctx->dictEnd = dctx->previousDstEnd;
+ dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
+ dctx->base = dst;
+ dctx->previousDstEnd = dst;
+ }
+}
+
+
+static size_t ZSTDv05_decompressBlock_internal(ZSTDv05_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{ /* blockType == blockCompressed */
+ const BYTE* ip = (const BYTE*)src;
+ size_t litCSize;
+
+ if (srcSize >= BLOCKSIZE) return ERROR(srcSize_wrong);
+
+ /* Decode literals sub-block */
+ litCSize = ZSTDv05_decodeLiteralsBlock(dctx, src, srcSize);
+ if (ZSTDv05_isError(litCSize)) return litCSize;
+ ip += litCSize;
+ srcSize -= litCSize;
+
+ return ZSTDv05_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
+}
+
+
+size_t ZSTDv05_decompressBlock(ZSTDv05_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ ZSTDv05_checkContinuity(dctx, dst);
+ return ZSTDv05_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
+}
+
+
+/*! ZSTDv05_decompress_continueDCtx
+* dctx must have been properly initialized */
+static size_t ZSTDv05_decompress_continueDCtx(ZSTDv05_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* iend = ip + srcSize;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* op = ostart;
+ BYTE* const oend = ostart + maxDstSize;
+ size_t remainingSize = srcSize;
+ blockProperties_t blockProperties;
+ memset(&blockProperties, 0, sizeof(blockProperties));
+
+ /* Frame Header */
+ { size_t frameHeaderSize;
+ if (srcSize < ZSTDv05_frameHeaderSize_min+ZSTDv05_blockHeaderSize) return ERROR(srcSize_wrong);
+ frameHeaderSize = ZSTDv05_decodeFrameHeader_Part1(dctx, src, ZSTDv05_frameHeaderSize_min);
+ if (ZSTDv05_isError(frameHeaderSize)) return frameHeaderSize;
+ if (srcSize < frameHeaderSize+ZSTDv05_blockHeaderSize) return ERROR(srcSize_wrong);
+ ip += frameHeaderSize; remainingSize -= frameHeaderSize;
+ frameHeaderSize = ZSTDv05_decodeFrameHeader_Part2(dctx, src, frameHeaderSize);
+ if (ZSTDv05_isError(frameHeaderSize)) return frameHeaderSize;
+ }
+
+ /* Loop on each block */
+ while (1)
+ {
+ size_t decodedSize=0;
+ size_t cBlockSize = ZSTDv05_getcBlockSize(ip, iend-ip, &blockProperties);
+ if (ZSTDv05_isError(cBlockSize)) return cBlockSize;
+
+ ip += ZSTDv05_blockHeaderSize;
+ remainingSize -= ZSTDv05_blockHeaderSize;
+ if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+ switch(blockProperties.blockType)
+ {
+ case bt_compressed:
+ decodedSize = ZSTDv05_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize);
+ break;
+ case bt_raw :
+ decodedSize = ZSTDv05_copyRawBlock(op, oend-op, ip, cBlockSize);
+ break;
+ case bt_rle :
+ return ERROR(GENERIC); /* not yet supported */
+ break;
+ case bt_end :
+ /* end of frame */
+ if (remainingSize) return ERROR(srcSize_wrong);
+ break;
+ default:
+ return ERROR(GENERIC); /* impossible */
+ }
+ if (cBlockSize == 0) break; /* bt_end */
+
+ if (ZSTDv05_isError(decodedSize)) return decodedSize;
+ op += decodedSize;
+ ip += cBlockSize;
+ remainingSize -= cBlockSize;
+ }
+
+ return op-ostart;
+}
+
+
+size_t ZSTDv05_decompress_usingPreparedDCtx(ZSTDv05_DCtx* dctx, const ZSTDv05_DCtx* refDCtx,
+ void* dst, size_t maxDstSize,
+ const void* src, size_t srcSize)
+{
+ ZSTDv05_copyDCtx(dctx, refDCtx);
+ ZSTDv05_checkContinuity(dctx, dst);
+ return ZSTDv05_decompress_continueDCtx(dctx, dst, maxDstSize, src, srcSize);
+}
+
+
+size_t ZSTDv05_decompress_usingDict(ZSTDv05_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize)
+{
+ ZSTDv05_decompressBegin_usingDict(dctx, dict, dictSize);
+ ZSTDv05_checkContinuity(dctx, dst);
+ return ZSTDv05_decompress_continueDCtx(dctx, dst, maxDstSize, src, srcSize);
+}
+
+
+size_t ZSTDv05_decompressDCtx(ZSTDv05_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ return ZSTDv05_decompress_usingDict(dctx, dst, maxDstSize, src, srcSize, NULL, 0);
+}
+
+size_t ZSTDv05_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+#if defined(ZSTDv05_HEAPMODE) && (ZSTDv05_HEAPMODE==1)
+ size_t regenSize;
+ ZSTDv05_DCtx* dctx = ZSTDv05_createDCtx();
+ if (dctx==NULL) return ERROR(memory_allocation);
+ regenSize = ZSTDv05_decompressDCtx(dctx, dst, maxDstSize, src, srcSize);
+ ZSTDv05_freeDCtx(dctx);
+ return regenSize;
+#else
+ ZSTDv05_DCtx dctx;
+ return ZSTDv05_decompressDCtx(&dctx, dst, maxDstSize, src, srcSize);
+#endif
+}
+
+/* ZSTD_errorFrameSizeInfoLegacy() :
+ assumes `cSize` and `dBound` are _not_ NULL */
+static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
+{
+ *cSize = ret;
+ *dBound = ZSTD_CONTENTSIZE_ERROR;
+}
+
+void ZSTDv05_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
+{
+ const BYTE* ip = (const BYTE*)src;
+ size_t remainingSize = srcSize;
+ size_t nbBlocks = 0;
+ blockProperties_t blockProperties;
+
+ /* Frame Header */
+ if (srcSize < ZSTDv05_frameHeaderSize_min) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+ return;
+ }
+ if (MEM_readLE32(src) != ZSTDv05_MAGICNUMBER) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
+ return;
+ }
+ ip += ZSTDv05_frameHeaderSize_min; remainingSize -= ZSTDv05_frameHeaderSize_min;
+
+ /* Loop on each block */
+ while (1)
+ {
+ size_t cBlockSize = ZSTDv05_getcBlockSize(ip, remainingSize, &blockProperties);
+ if (ZSTDv05_isError(cBlockSize)) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
+ return;
+ }
+
+ ip += ZSTDv05_blockHeaderSize;
+ remainingSize -= ZSTDv05_blockHeaderSize;
+ if (cBlockSize > remainingSize) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+ return;
+ }
+
+ if (cBlockSize == 0) break; /* bt_end */
+
+ ip += cBlockSize;
+ remainingSize -= cBlockSize;
+ nbBlocks++;
+ }
+
+ *cSize = ip - (const BYTE*)src;
+ *dBound = nbBlocks * BLOCKSIZE;
+}
+
+/* ******************************
+* Streaming Decompression API
+********************************/
+size_t ZSTDv05_nextSrcSizeToDecompress(ZSTDv05_DCtx* dctx)
+{
+ return dctx->expected;
+}
+
+size_t ZSTDv05_decompressContinue(ZSTDv05_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ /* Sanity check */
+ if (srcSize != dctx->expected) return ERROR(srcSize_wrong);
+ ZSTDv05_checkContinuity(dctx, dst);
+
+ /* Decompress : frame header; part 1 */
+ switch (dctx->stage)
+ {
+ case ZSTDv05ds_getFrameHeaderSize :
+ /* get frame header size */
+ if (srcSize != ZSTDv05_frameHeaderSize_min) return ERROR(srcSize_wrong); /* impossible */
+ dctx->headerSize = ZSTDv05_decodeFrameHeader_Part1(dctx, src, ZSTDv05_frameHeaderSize_min);
+ if (ZSTDv05_isError(dctx->headerSize)) return dctx->headerSize;
+ memcpy(dctx->headerBuffer, src, ZSTDv05_frameHeaderSize_min);
+ if (dctx->headerSize > ZSTDv05_frameHeaderSize_min) return ERROR(GENERIC); /* should never happen */
+ dctx->expected = 0; /* not necessary to copy more */
+ /* fallthrough */
+ case ZSTDv05ds_decodeFrameHeader:
+ /* get frame header */
+ { size_t const result = ZSTDv05_decodeFrameHeader_Part2(dctx, dctx->headerBuffer, dctx->headerSize);
+ if (ZSTDv05_isError(result)) return result;
+ dctx->expected = ZSTDv05_blockHeaderSize;
+ dctx->stage = ZSTDv05ds_decodeBlockHeader;
+ return 0;
+ }
+ case ZSTDv05ds_decodeBlockHeader:
+ {
+ /* Decode block header */
+ blockProperties_t bp;
+ size_t blockSize = ZSTDv05_getcBlockSize(src, ZSTDv05_blockHeaderSize, &bp);
+ if (ZSTDv05_isError(blockSize)) return blockSize;
+ if (bp.blockType == bt_end) {
+ dctx->expected = 0;
+ dctx->stage = ZSTDv05ds_getFrameHeaderSize;
+ }
+ else {
+ dctx->expected = blockSize;
+ dctx->bType = bp.blockType;
+ dctx->stage = ZSTDv05ds_decompressBlock;
+ }
+ return 0;
+ }
+ case ZSTDv05ds_decompressBlock:
+ {
+ /* Decompress : block content */
+ size_t rSize;
+ switch(dctx->bType)
+ {
+ case bt_compressed:
+ rSize = ZSTDv05_decompressBlock_internal(dctx, dst, maxDstSize, src, srcSize);
+ break;
+ case bt_raw :
+ rSize = ZSTDv05_copyRawBlock(dst, maxDstSize, src, srcSize);
+ break;
+ case bt_rle :
+ return ERROR(GENERIC); /* not yet handled */
+ break;
+ case bt_end : /* should never happen (filtered at phase 1) */
+ rSize = 0;
+ break;
+ default:
+ return ERROR(GENERIC); /* impossible */
+ }
+ dctx->stage = ZSTDv05ds_decodeBlockHeader;
+ dctx->expected = ZSTDv05_blockHeaderSize;
+ dctx->previousDstEnd = (char*)dst + rSize;
+ return rSize;
+ }
+ default:
+ return ERROR(GENERIC); /* impossible */
+ }
+}
+
+
+static void ZSTDv05_refDictContent(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ dctx->dictEnd = dctx->previousDstEnd;
+ dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
+ dctx->base = dict;
+ dctx->previousDstEnd = (const char*)dict + dictSize;
+}
+
+static size_t ZSTDv05_loadEntropy(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ size_t hSize, offcodeHeaderSize, matchlengthHeaderSize, errorCode, litlengthHeaderSize;
+ short offcodeNCount[MaxOff+1];
+ unsigned offcodeMaxValue=MaxOff, offcodeLog;
+ short matchlengthNCount[MaxML+1];
+ unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+ short litlengthNCount[MaxLL+1];
+ unsigned litlengthMaxValue = MaxLL, litlengthLog;
+
+ hSize = HUFv05_readDTableX4(dctx->hufTableX4, dict, dictSize);
+ if (HUFv05_isError(hSize)) return ERROR(dictionary_corrupted);
+ dict = (const char*)dict + hSize;
+ dictSize -= hSize;
+
+ offcodeHeaderSize = FSEv05_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dict, dictSize);
+ if (FSEv05_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
+ if (offcodeLog > OffFSEv05Log) return ERROR(dictionary_corrupted);
+ errorCode = FSEv05_buildDTable(dctx->OffTable, offcodeNCount, offcodeMaxValue, offcodeLog);
+ if (FSEv05_isError(errorCode)) return ERROR(dictionary_corrupted);
+ dict = (const char*)dict + offcodeHeaderSize;
+ dictSize -= offcodeHeaderSize;
+
+ matchlengthHeaderSize = FSEv05_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dict, dictSize);
+ if (FSEv05_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
+ if (matchlengthLog > MLFSEv05Log) return ERROR(dictionary_corrupted);
+ errorCode = FSEv05_buildDTable(dctx->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog);
+ if (FSEv05_isError(errorCode)) return ERROR(dictionary_corrupted);
+ dict = (const char*)dict + matchlengthHeaderSize;
+ dictSize -= matchlengthHeaderSize;
+
+ litlengthHeaderSize = FSEv05_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dict, dictSize);
+ if (litlengthLog > LLFSEv05Log) return ERROR(dictionary_corrupted);
+ if (FSEv05_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
+ errorCode = FSEv05_buildDTable(dctx->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog);
+ if (FSEv05_isError(errorCode)) return ERROR(dictionary_corrupted);
+
+ dctx->flagStaticTables = 1;
+ return hSize + offcodeHeaderSize + matchlengthHeaderSize + litlengthHeaderSize;
+}
+
+static size_t ZSTDv05_decompress_insertDictionary(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ size_t eSize;
+ U32 magic = MEM_readLE32(dict);
+ if (magic != ZSTDv05_DICT_MAGIC) {
+ /* pure content mode */
+ ZSTDv05_refDictContent(dctx, dict, dictSize);
+ return 0;
+ }
+ /* load entropy tables */
+ dict = (const char*)dict + 4;
+ dictSize -= 4;
+ eSize = ZSTDv05_loadEntropy(dctx, dict, dictSize);
+ if (ZSTDv05_isError(eSize)) return ERROR(dictionary_corrupted);
+
+ /* reference dictionary content */
+ dict = (const char*)dict + eSize;
+ dictSize -= eSize;
+ ZSTDv05_refDictContent(dctx, dict, dictSize);
+
+ return 0;
+}
+
+
+size_t ZSTDv05_decompressBegin_usingDict(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ size_t errorCode;
+ errorCode = ZSTDv05_decompressBegin(dctx);
+ if (ZSTDv05_isError(errorCode)) return errorCode;
+
+ if (dict && dictSize) {
+ errorCode = ZSTDv05_decompress_insertDictionary(dctx, dict, dictSize);
+ if (ZSTDv05_isError(errorCode)) return ERROR(dictionary_corrupted);
+ }
+
+ return 0;
+}
+
+/*
+ Buffered version of Zstd compression library
+ Copyright (C) 2015-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd source repository : https://github.com/Cyan4973/zstd
+ - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+/* The objects defined into this file should be considered experimental.
+ * They are not labelled stable, as their prototype may change in the future.
+ * You can use them for tests, provide feedback, or if you can endure risk of future changes.
+ */
+
+
+
+/* *************************************
+* Constants
+***************************************/
+static size_t ZBUFFv05_blockHeaderSize = 3;
+
+
+
+/* *** Compression *** */
+
+static size_t ZBUFFv05_limitCopy(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ size_t length = MIN(maxDstSize, srcSize);
+ memcpy(dst, src, length);
+ return length;
+}
+
+
+
+
+/** ************************************************
+* Streaming decompression
+*
+* A ZBUFFv05_DCtx object is required to track streaming operation.
+* Use ZBUFFv05_createDCtx() and ZBUFFv05_freeDCtx() to create/release resources.
+* Use ZBUFFv05_decompressInit() to start a new decompression operation.
+* ZBUFFv05_DCtx objects can be reused multiple times.
+*
+* Use ZBUFFv05_decompressContinue() repetitively to consume your input.
+* *srcSizePtr and *maxDstSizePtr can be any size.
+* The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr.
+* Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input.
+* The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst .
+* return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)
+* or 0 when a frame is completely decoded
+* or an error code, which can be tested using ZBUFFv05_isError().
+*
+* Hint : recommended buffer sizes (not compulsory)
+* output : 128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded.
+* input : just follow indications from ZBUFFv05_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
+* **************************************************/
+
+typedef enum { ZBUFFv05ds_init, ZBUFFv05ds_readHeader, ZBUFFv05ds_loadHeader, ZBUFFv05ds_decodeHeader,
+ ZBUFFv05ds_read, ZBUFFv05ds_load, ZBUFFv05ds_flush } ZBUFFv05_dStage;
+
+/* *** Resource management *** */
+
+#define ZSTDv05_frameHeaderSize_max 5 /* too magical, should come from reference */
+struct ZBUFFv05_DCtx_s {
+ ZSTDv05_DCtx* zc;
+ ZSTDv05_parameters params;
+ char* inBuff;
+ size_t inBuffSize;
+ size_t inPos;
+ char* outBuff;
+ size_t outBuffSize;
+ size_t outStart;
+ size_t outEnd;
+ size_t hPos;
+ ZBUFFv05_dStage stage;
+ unsigned char headerBuffer[ZSTDv05_frameHeaderSize_max];
+}; /* typedef'd to ZBUFFv05_DCtx within "zstd_buffered.h" */
+
+
+ZBUFFv05_DCtx* ZBUFFv05_createDCtx(void)
+{
+ ZBUFFv05_DCtx* zbc = (ZBUFFv05_DCtx*)malloc(sizeof(ZBUFFv05_DCtx));
+ if (zbc==NULL) return NULL;
+ memset(zbc, 0, sizeof(*zbc));
+ zbc->zc = ZSTDv05_createDCtx();
+ zbc->stage = ZBUFFv05ds_init;
+ return zbc;
+}
+
+size_t ZBUFFv05_freeDCtx(ZBUFFv05_DCtx* zbc)
+{
+ if (zbc==NULL) return 0; /* support free on null */
+ ZSTDv05_freeDCtx(zbc->zc);
+ free(zbc->inBuff);
+ free(zbc->outBuff);
+ free(zbc);
+ return 0;
+}
+
+
+/* *** Initialization *** */
+
+size_t ZBUFFv05_decompressInitDictionary(ZBUFFv05_DCtx* zbc, const void* dict, size_t dictSize)
+{
+ zbc->stage = ZBUFFv05ds_readHeader;
+ zbc->hPos = zbc->inPos = zbc->outStart = zbc->outEnd = 0;
+ return ZSTDv05_decompressBegin_usingDict(zbc->zc, dict, dictSize);
+}
+
+size_t ZBUFFv05_decompressInit(ZBUFFv05_DCtx* zbc)
+{
+ return ZBUFFv05_decompressInitDictionary(zbc, NULL, 0);
+}
+
+
+/* *** Decompression *** */
+
+size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* zbc, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr)
+{
+ const char* const istart = (const char*)src;
+ const char* ip = istart;
+ const char* const iend = istart + *srcSizePtr;
+ char* const ostart = (char*)dst;
+ char* op = ostart;
+ char* const oend = ostart + *maxDstSizePtr;
+ U32 notDone = 1;
+
+ while (notDone) {
+ switch(zbc->stage)
+ {
+ case ZBUFFv05ds_init :
+ return ERROR(init_missing);
+
+ case ZBUFFv05ds_readHeader :
+ /* read header from src */
+ {
+ size_t headerSize = ZSTDv05_getFrameParams(&(zbc->params), src, *srcSizePtr);
+ if (ZSTDv05_isError(headerSize)) return headerSize;
+ if (headerSize) {
+ /* not enough input to decode header : tell how many bytes would be necessary */
+ memcpy(zbc->headerBuffer+zbc->hPos, src, *srcSizePtr);
+ zbc->hPos += *srcSizePtr;
+ *maxDstSizePtr = 0;
+ zbc->stage = ZBUFFv05ds_loadHeader;
+ return headerSize - zbc->hPos;
+ }
+ zbc->stage = ZBUFFv05ds_decodeHeader;
+ break;
+ }
+ /* fall-through */
+ case ZBUFFv05ds_loadHeader:
+ /* complete header from src */
+ {
+ size_t headerSize = ZBUFFv05_limitCopy(
+ zbc->headerBuffer + zbc->hPos, ZSTDv05_frameHeaderSize_max - zbc->hPos,
+ src, *srcSizePtr);
+ zbc->hPos += headerSize;
+ ip += headerSize;
+ headerSize = ZSTDv05_getFrameParams(&(zbc->params), zbc->headerBuffer, zbc->hPos);
+ if (ZSTDv05_isError(headerSize)) return headerSize;
+ if (headerSize) {
+ /* not enough input to decode header : tell how many bytes would be necessary */
+ *maxDstSizePtr = 0;
+ return headerSize - zbc->hPos;
+ }
+ // zbc->stage = ZBUFFv05ds_decodeHeader; break; /* useless : stage follows */
+ }
+ /* fall-through */
+ case ZBUFFv05ds_decodeHeader:
+ /* apply header to create / resize buffers */
+ {
+ size_t neededOutSize = (size_t)1 << zbc->params.windowLog;
+ size_t neededInSize = BLOCKSIZE; /* a block is never > BLOCKSIZE */
+ if (zbc->inBuffSize < neededInSize) {
+ free(zbc->inBuff);
+ zbc->inBuffSize = neededInSize;
+ zbc->inBuff = (char*)malloc(neededInSize);
+ if (zbc->inBuff == NULL) return ERROR(memory_allocation);
+ }
+ if (zbc->outBuffSize < neededOutSize) {
+ free(zbc->outBuff);
+ zbc->outBuffSize = neededOutSize;
+ zbc->outBuff = (char*)malloc(neededOutSize);
+ if (zbc->outBuff == NULL) return ERROR(memory_allocation);
+ } }
+ if (zbc->hPos) {
+ /* some data already loaded into headerBuffer : transfer into inBuff */
+ memcpy(zbc->inBuff, zbc->headerBuffer, zbc->hPos);
+ zbc->inPos = zbc->hPos;
+ zbc->hPos = 0;
+ zbc->stage = ZBUFFv05ds_load;
+ break;
+ }
+ zbc->stage = ZBUFFv05ds_read;
+ /* fall-through */
+ case ZBUFFv05ds_read:
+ {
+ size_t neededInSize = ZSTDv05_nextSrcSizeToDecompress(zbc->zc);
+ if (neededInSize==0) { /* end of frame */
+ zbc->stage = ZBUFFv05ds_init;
+ notDone = 0;
+ break;
+ }
+ if ((size_t)(iend-ip) >= neededInSize) {
+ /* directly decode from src */
+ size_t decodedSize = ZSTDv05_decompressContinue(zbc->zc,
+ zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,
+ ip, neededInSize);
+ if (ZSTDv05_isError(decodedSize)) return decodedSize;
+ ip += neededInSize;
+ if (!decodedSize) break; /* this was just a header */
+ zbc->outEnd = zbc->outStart + decodedSize;
+ zbc->stage = ZBUFFv05ds_flush;
+ break;
+ }
+ if (ip==iend) { notDone = 0; break; } /* no more input */
+ zbc->stage = ZBUFFv05ds_load;
+ }
+ /* fall-through */
+ case ZBUFFv05ds_load:
+ {
+ size_t neededInSize = ZSTDv05_nextSrcSizeToDecompress(zbc->zc);
+ size_t toLoad = neededInSize - zbc->inPos; /* should always be <= remaining space within inBuff */
+ size_t loadedSize;
+ if (toLoad > zbc->inBuffSize - zbc->inPos) return ERROR(corruption_detected); /* should never happen */
+ loadedSize = ZBUFFv05_limitCopy(zbc->inBuff + zbc->inPos, toLoad, ip, iend-ip);
+ ip += loadedSize;
+ zbc->inPos += loadedSize;
+ if (loadedSize < toLoad) { notDone = 0; break; } /* not enough input, wait for more */
+ {
+ size_t decodedSize = ZSTDv05_decompressContinue(zbc->zc,
+ zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,
+ zbc->inBuff, neededInSize);
+ if (ZSTDv05_isError(decodedSize)) return decodedSize;
+ zbc->inPos = 0; /* input is consumed */
+ if (!decodedSize) { zbc->stage = ZBUFFv05ds_read; break; } /* this was just a header */
+ zbc->outEnd = zbc->outStart + decodedSize;
+ zbc->stage = ZBUFFv05ds_flush;
+ // break; /* ZBUFFv05ds_flush follows */
+ }
+ }
+ /* fall-through */
+ case ZBUFFv05ds_flush:
+ {
+ size_t toFlushSize = zbc->outEnd - zbc->outStart;
+ size_t flushedSize = ZBUFFv05_limitCopy(op, oend-op, zbc->outBuff + zbc->outStart, toFlushSize);
+ op += flushedSize;
+ zbc->outStart += flushedSize;
+ if (flushedSize == toFlushSize) {
+ zbc->stage = ZBUFFv05ds_read;
+ if (zbc->outStart + BLOCKSIZE > zbc->outBuffSize)
+ zbc->outStart = zbc->outEnd = 0;
+ break;
+ }
+ /* cannot flush everything */
+ notDone = 0;
+ break;
+ }
+ default: return ERROR(GENERIC); /* impossible */
+ } }
+
+ *srcSizePtr = ip-istart;
+ *maxDstSizePtr = op-ostart;
+
+ { size_t nextSrcSizeHint = ZSTDv05_nextSrcSizeToDecompress(zbc->zc);
+ if (nextSrcSizeHint > ZBUFFv05_blockHeaderSize) nextSrcSizeHint+= ZBUFFv05_blockHeaderSize; /* get next block header too */
+ nextSrcSizeHint -= zbc->inPos; /* already loaded*/
+ return nextSrcSizeHint;
+ }
+}
+
+
+
+/* *************************************
+* Tool functions
+***************************************/
+unsigned ZBUFFv05_isError(size_t errorCode) { return ERR_isError(errorCode); }
+const char* ZBUFFv05_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
+
+size_t ZBUFFv05_recommendedDInSize(void) { return BLOCKSIZE + ZBUFFv05_blockHeaderSize /* block header size*/ ; }
+size_t ZBUFFv05_recommendedDOutSize(void) { return BLOCKSIZE; }
diff --git a/vendor/github.com/DataDog/zstd/zstd_v05.h b/vendor/github.com/DataDog/zstd/zstd_v05.h
new file mode 100644
index 000000000..4a979854b
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v05.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTDv05_H
+#define ZSTDv05_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include <stddef.h> /* size_t */
+#include "mem.h" /* U64, U32 */
+
+
+/* *************************************
+* Simple functions
+***************************************/
+/*! ZSTDv05_decompress() :
+ `compressedSize` : is the _exact_ size of the compressed blob, otherwise decompression will fail.
+ `dstCapacity` must be large enough, equal or larger than originalSize.
+ @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
+ or an errorCode if it fails (which can be tested using ZSTDv05_isError()) */
+size_t ZSTDv05_decompress( void* dst, size_t dstCapacity,
+ const void* src, size_t compressedSize);
+
+ /**
+ ZSTDv05_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.5.x format
+ srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+ cSize (output parameter) : the number of bytes that would be read to decompress this frame
+ or an error code if it fails (which can be tested using ZSTDv01_isError())
+ dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
+ or ZSTD_CONTENTSIZE_ERROR if an error occurs
+
+ note : assumes `cSize` and `dBound` are _not_ NULL.
+ */
+void ZSTDv05_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
+ size_t* cSize, unsigned long long* dBound);
+
+/* *************************************
+* Helper functions
+***************************************/
+/* Error Management */
+unsigned ZSTDv05_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
+const char* ZSTDv05_getErrorName(size_t code); /*!< provides readable string for an error code */
+
+
+/* *************************************
+* Explicit memory management
+***************************************/
+/** Decompression context */
+typedef struct ZSTDv05_DCtx_s ZSTDv05_DCtx;
+ZSTDv05_DCtx* ZSTDv05_createDCtx(void);
+size_t ZSTDv05_freeDCtx(ZSTDv05_DCtx* dctx); /*!< @return : errorCode */
+
+/** ZSTDv05_decompressDCtx() :
+* Same as ZSTDv05_decompress(), but requires an already allocated ZSTDv05_DCtx (see ZSTDv05_createDCtx()) */
+size_t ZSTDv05_decompressDCtx(ZSTDv05_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+/*-***********************
+* Simple Dictionary API
+*************************/
+/*! ZSTDv05_decompress_usingDict() :
+* Decompression using a pre-defined Dictionary content (see dictBuilder).
+* Dictionary must be identical to the one used during compression, otherwise regenerated data will be corrupted.
+* Note : dict can be NULL, in which case, it's equivalent to ZSTDv05_decompressDCtx() */
+size_t ZSTDv05_decompress_usingDict(ZSTDv05_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize);
+
+/*-************************
+* Advanced Streaming API
+***************************/
+typedef enum { ZSTDv05_fast, ZSTDv05_greedy, ZSTDv05_lazy, ZSTDv05_lazy2, ZSTDv05_btlazy2, ZSTDv05_opt, ZSTDv05_btopt } ZSTDv05_strategy;
+typedef struct {
+ U64 srcSize;
+ U32 windowLog; /* the only useful information to retrieve */
+ U32 contentLog; U32 hashLog; U32 searchLog; U32 searchLength; U32 targetLength; ZSTDv05_strategy strategy;
+} ZSTDv05_parameters;
+size_t ZSTDv05_getFrameParams(ZSTDv05_parameters* params, const void* src, size_t srcSize);
+
+size_t ZSTDv05_decompressBegin_usingDict(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize);
+void ZSTDv05_copyDCtx(ZSTDv05_DCtx* dstDCtx, const ZSTDv05_DCtx* srcDCtx);
+size_t ZSTDv05_nextSrcSizeToDecompress(ZSTDv05_DCtx* dctx);
+size_t ZSTDv05_decompressContinue(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+/*-***********************
+* ZBUFF API
+*************************/
+typedef struct ZBUFFv05_DCtx_s ZBUFFv05_DCtx;
+ZBUFFv05_DCtx* ZBUFFv05_createDCtx(void);
+size_t ZBUFFv05_freeDCtx(ZBUFFv05_DCtx* dctx);
+
+size_t ZBUFFv05_decompressInit(ZBUFFv05_DCtx* dctx);
+size_t ZBUFFv05_decompressInitDictionary(ZBUFFv05_DCtx* dctx, const void* dict, size_t dictSize);
+
+size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* dctx,
+ void* dst, size_t* dstCapacityPtr,
+ const void* src, size_t* srcSizePtr);
+
+/*-***************************************************************************
+* Streaming decompression
+*
+* A ZBUFFv05_DCtx object is required to track streaming operations.
+* Use ZBUFFv05_createDCtx() and ZBUFFv05_freeDCtx() to create/release resources.
+* Use ZBUFFv05_decompressInit() to start a new decompression operation,
+* or ZBUFFv05_decompressInitDictionary() if decompression requires a dictionary.
+* Note that ZBUFFv05_DCtx objects can be reused multiple times.
+*
+* Use ZBUFFv05_decompressContinue() repetitively to consume your input.
+* *srcSizePtr and *dstCapacityPtr can be any size.
+* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
+* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
+* The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters or change @dst.
+* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency)
+* or 0 when a frame is completely decoded
+* or an error code, which can be tested using ZBUFFv05_isError().
+*
+* Hint : recommended buffer sizes (not compulsory) : ZBUFFv05_recommendedDInSize() / ZBUFFv05_recommendedDOutSize()
+* output : ZBUFFv05_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
+* input : ZBUFFv05_recommendedDInSize==128Kb+3; just follow indications from ZBUFFv05_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
+* *******************************************************************************/
+
+
+/* *************************************
+* Tool functions
+***************************************/
+unsigned ZBUFFv05_isError(size_t errorCode);
+const char* ZBUFFv05_getErrorName(size_t errorCode);
+
+/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
+* These sizes are just hints, and tend to offer better latency */
+size_t ZBUFFv05_recommendedDInSize(void);
+size_t ZBUFFv05_recommendedDOutSize(void);
+
+
+
+/*-*************************************
+* Constants
+***************************************/
+#define ZSTDv05_MAGICNUMBER 0xFD2FB525 /* v0.5 */
+
+
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTDv0505_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_v06.c b/vendor/github.com/DataDog/zstd/zstd_v06.c
new file mode 100644
index 000000000..a695cbb8a
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v06.c
@@ -0,0 +1,4152 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/*- Dependencies -*/
+#include "zstd_v06.h"
+#include <stddef.h> /* size_t, ptrdiff_t */
+#include <string.h> /* memcpy */
+#include <stdlib.h> /* malloc, free, qsort */
+#include "error_private.h"
+
+
+
+/* ******************************************************************
+ mem.h
+ low-level memory access routines
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef MEM_H_MODULE
+#define MEM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*-****************************************
+* Compiler specifics
+******************************************/
+#if defined(_MSC_VER) /* Visual Studio */
+# include <stdlib.h> /* _byteswap_ulong */
+# include <intrin.h> /* _byteswap_* */
+#endif
+#if defined(__GNUC__)
+# define MEM_STATIC static __attribute__((unused))
+#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# define MEM_STATIC static inline
+#elif defined(_MSC_VER)
+# define MEM_STATIC static __inline
+#else
+# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+
+/*-**************************************************************
+* Basic Types
+*****************************************************************/
+#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef int16_t S16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+ typedef int64_t S64;
+#else
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef signed short S16;
+ typedef unsigned int U32;
+ typedef signed int S32;
+ typedef unsigned long long U64;
+ typedef signed long long S64;
+#endif
+
+
+/*-**************************************************************
+* Memory I/O
+*****************************************************************/
+/* MEM_FORCE_MEMORY_ACCESS :
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ * It can generate buggy code on targets depending on alignment.
+ * In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
+# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+# define MEM_FORCE_MEMORY_ACCESS 2
+# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
+ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+# define MEM_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
+MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
+
+MEM_STATIC unsigned MEM_isLittleEndian(void)
+{
+ const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+
+#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
+
+/* violates C standard, by lying on structure alignment.
+Only use if no other choice to achieve best performance on target platform */
+MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
+MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
+MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+
+#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign;
+
+MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
+MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
+
+#else
+
+/* default method, safe and standard.
+ can sometimes prove slower */
+
+MEM_STATIC U16 MEM_read16(const void* memPtr)
+{
+ U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U32 MEM_read32(const void* memPtr)
+{
+ U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U64 MEM_read64(const void* memPtr)
+{
+ U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value)
+{
+ memcpy(memPtr, &value, sizeof(value));
+}
+
+
+#endif /* MEM_FORCE_MEMORY_ACCESS */
+
+MEM_STATIC U32 MEM_swap32(U32 in)
+{
+#if defined(_MSC_VER) /* Visual Studio */
+ return _byteswap_ulong(in);
+#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
+ return __builtin_bswap32(in);
+#else
+ return ((in << 24) & 0xff000000 ) |
+ ((in << 8) & 0x00ff0000 ) |
+ ((in >> 8) & 0x0000ff00 ) |
+ ((in >> 24) & 0x000000ff );
+#endif
+}
+
+MEM_STATIC U64 MEM_swap64(U64 in)
+{
+#if defined(_MSC_VER) /* Visual Studio */
+ return _byteswap_uint64(in);
+#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
+ return __builtin_bswap64(in);
+#else
+ return ((in << 56) & 0xff00000000000000ULL) |
+ ((in << 40) & 0x00ff000000000000ULL) |
+ ((in << 24) & 0x0000ff0000000000ULL) |
+ ((in << 8) & 0x000000ff00000000ULL) |
+ ((in >> 8) & 0x00000000ff000000ULL) |
+ ((in >> 24) & 0x0000000000ff0000ULL) |
+ ((in >> 40) & 0x000000000000ff00ULL) |
+ ((in >> 56) & 0x00000000000000ffULL);
+#endif
+}
+
+
+/*=== Little endian r/w ===*/
+
+MEM_STATIC U16 MEM_readLE16(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read16(memPtr);
+ else {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U16)(p[0] + (p[1]<<8));
+ }
+}
+
+MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
+{
+ if (MEM_isLittleEndian()) {
+ MEM_write16(memPtr, val);
+ } else {
+ BYTE* p = (BYTE*)memPtr;
+ p[0] = (BYTE)val;
+ p[1] = (BYTE)(val>>8);
+ }
+}
+
+MEM_STATIC U32 MEM_readLE32(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read32(memPtr);
+ else
+ return MEM_swap32(MEM_read32(memPtr));
+}
+
+
+MEM_STATIC U64 MEM_readLE64(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read64(memPtr);
+ else
+ return MEM_swap64(MEM_read64(memPtr));
+}
+
+
+MEM_STATIC size_t MEM_readLEST(const void* memPtr)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_readLE32(memPtr);
+ else
+ return (size_t)MEM_readLE64(memPtr);
+}
+
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* MEM_H_MODULE */
+
+/*
+ zstd - standard compression library
+ Header File for static linking only
+ Copyright (C) 2014-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd homepage : http://www.zstd.net
+*/
+#ifndef ZSTDv06_STATIC_H
+#define ZSTDv06_STATIC_H
+
+/* The prototypes defined within this file are considered experimental.
+ * They should not be used in the context DLL as they may change in the future.
+ * Prefer static linking if you need them, to control breaking version changes issues.
+ */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+/*- Advanced Decompression functions -*/
+
+/*! ZSTDv06_decompress_usingPreparedDCtx() :
+* Same as ZSTDv06_decompress_usingDict, but using a reference context `preparedDCtx`, where dictionary has been loaded.
+* It avoids reloading the dictionary each time.
+* `preparedDCtx` must have been properly initialized using ZSTDv06_decompressBegin_usingDict().
+* Requires 2 contexts : 1 for reference (preparedDCtx), which will not be modified, and 1 to run the decompression operation (dctx) */
+ZSTDLIBv06_API size_t ZSTDv06_decompress_usingPreparedDCtx(
+ ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* preparedDCtx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+
+
+#define ZSTDv06_FRAMEHEADERSIZE_MAX 13 /* for static allocation */
+static const size_t ZSTDv06_frameHeaderSize_min = 5;
+static const size_t ZSTDv06_frameHeaderSize_max = ZSTDv06_FRAMEHEADERSIZE_MAX;
+
+ZSTDLIBv06_API size_t ZSTDv06_decompressBegin(ZSTDv06_DCtx* dctx);
+
+/*
+ Streaming decompression, direct mode (bufferless)
+
+ A ZSTDv06_DCtx object is required to track streaming operations.
+ Use ZSTDv06_createDCtx() / ZSTDv06_freeDCtx() to manage it.
+ A ZSTDv06_DCtx object can be re-used multiple times.
+
+ First optional operation is to retrieve frame parameters, using ZSTDv06_getFrameParams(), which doesn't consume the input.
+ It can provide the minimum size of rolling buffer required to properly decompress data,
+ and optionally the final size of uncompressed content.
+ (Note : content size is an optional info that may not be present. 0 means : content size unknown)
+ Frame parameters are extracted from the beginning of compressed frame.
+ The amount of data to read is variable, from ZSTDv06_frameHeaderSize_min to ZSTDv06_frameHeaderSize_max (so if `srcSize` >= ZSTDv06_frameHeaderSize_max, it will always work)
+ If `srcSize` is too small for operation to succeed, function will return the minimum size it requires to produce a result.
+ Result : 0 when successful, it means the ZSTDv06_frameParams structure has been filled.
+ >0 : means there is not enough data into `src`. Provides the expected size to successfully decode header.
+ errorCode, which can be tested using ZSTDv06_isError()
+
+ Start decompression, with ZSTDv06_decompressBegin() or ZSTDv06_decompressBegin_usingDict().
+ Alternatively, you can copy a prepared context, using ZSTDv06_copyDCtx().
+
+ Then use ZSTDv06_nextSrcSizeToDecompress() and ZSTDv06_decompressContinue() alternatively.
+ ZSTDv06_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTDv06_decompressContinue().
+ ZSTDv06_decompressContinue() requires this exact amount of bytes, or it will fail.
+ ZSTDv06_decompressContinue() needs previous data blocks during decompression, up to (1 << windowlog).
+ They should preferably be located contiguously, prior to current block. Alternatively, a round buffer is also possible.
+
+ @result of ZSTDv06_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity)
+ It can be zero, which is not an error; it just means ZSTDv06_decompressContinue() has decoded some header.
+
+ A frame is fully decoded when ZSTDv06_nextSrcSizeToDecompress() returns zero.
+ Context can then be reset to start a new decompression.
+*/
+
+
+/* **************************************
+* Block functions
+****************************************/
+/*! Block functions produce and decode raw zstd blocks, without frame metadata.
+ User will have to take in charge required information to regenerate data, such as compressed and content sizes.
+
+ A few rules to respect :
+ - Uncompressed block size must be <= ZSTDv06_BLOCKSIZE_MAX (128 KB)
+ - Compressing or decompressing requires a context structure
+ + Use ZSTDv06_createCCtx() and ZSTDv06_createDCtx()
+ - It is necessary to init context before starting
+ + compression : ZSTDv06_compressBegin()
+ + decompression : ZSTDv06_decompressBegin()
+ + variants _usingDict() are also allowed
+ + copyCCtx() and copyDCtx() work too
+ - When a block is considered not compressible enough, ZSTDv06_compressBlock() result will be zero.
+ In which case, nothing is produced into `dst`.
+ + User must test for such outcome and deal directly with uncompressed data
+ + ZSTDv06_decompressBlock() doesn't accept uncompressed data as input !!
+*/
+
+#define ZSTDv06_BLOCKSIZE_MAX (128 * 1024) /* define, for static allocation */
+ZSTDLIBv06_API size_t ZSTDv06_decompressBlock(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTDv06_STATIC_H */
+/*
+ zstd_internal - common functions to include
+ Header File for include
+ Copyright (C) 2014-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd homepage : https://www.zstd.net
+*/
+#ifndef ZSTDv06_CCOMMON_H_MODULE
+#define ZSTDv06_CCOMMON_H_MODULE
+
+
+/*-*************************************
+* Common macros
+***************************************/
+#define MIN(a,b) ((a)<(b) ? (a) : (b))
+#define MAX(a,b) ((a)>(b) ? (a) : (b))
+
+
+/*-*************************************
+* Common constants
+***************************************/
+#define ZSTDv06_DICT_MAGIC 0xEC30A436
+
+#define ZSTDv06_REP_NUM 3
+#define ZSTDv06_REP_INIT ZSTDv06_REP_NUM
+#define ZSTDv06_REP_MOVE (ZSTDv06_REP_NUM-1)
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BIT7 128
+#define BIT6 64
+#define BIT5 32
+#define BIT4 16
+#define BIT1 2
+#define BIT0 1
+
+#define ZSTDv06_WINDOWLOG_ABSOLUTEMIN 12
+static const size_t ZSTDv06_fcs_fieldSize[4] = { 0, 1, 2, 8 };
+
+#define ZSTDv06_BLOCKHEADERSIZE 3 /* because C standard does not allow a static const value to be defined using another static const value .... :( */
+static const size_t ZSTDv06_blockHeaderSize = ZSTDv06_BLOCKHEADERSIZE;
+typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
+
+#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
+#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
+
+#define HufLog 12
+
+#define IS_HUF 0
+#define IS_PCH 1
+#define IS_RAW 2
+#define IS_RLE 3
+
+#define LONGNBSEQ 0x7F00
+
+#define MINMATCH 3
+#define EQUAL_READ32 4
+#define REPCODE_STARTVALUE 1
+
+#define Litbits 8
+#define MaxLit ((1<<Litbits) - 1)
+#define MaxML 52
+#define MaxLL 35
+#define MaxOff 28
+#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
+#define MLFSELog 9
+#define LLFSELog 9
+#define OffFSELog 8
+
+#define FSEv06_ENCODING_RAW 0
+#define FSEv06_ENCODING_RLE 1
+#define FSEv06_ENCODING_STATIC 2
+#define FSEv06_ENCODING_DYNAMIC 3
+
+#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
+
+static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9,10,11,12,
+ 13,14,15,16 };
+static const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
+ -1,-1,-1,-1 };
+static const U32 LL_defaultNormLog = 6;
+
+static const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9,10,11,
+ 12,13,14,15,16 };
+static const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,
+ -1,-1,-1,-1,-1 };
+static const U32 ML_defaultNormLog = 6;
+
+static const S16 OF_defaultNorm[MaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 };
+static const U32 OF_defaultNormLog = 5;
+
+
+/*-*******************************************
+* Shared functions to include for inlining
+*********************************************/
+static void ZSTDv06_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
+#define COPY8(d,s) { ZSTDv06_copy8(d,s); d+=8; s+=8; }
+
+/*! ZSTDv06_wildcopy() :
+* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
+#define WILDCOPY_OVERLENGTH 8
+MEM_STATIC void ZSTDv06_wildcopy(void* dst, const void* src, ptrdiff_t length)
+{
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + length;
+ do
+ COPY8(op, ip)
+ while (op < oend);
+}
+
+
+
+/*-*******************************************
+* Private interfaces
+*********************************************/
+typedef struct {
+ U32 off;
+ U32 len;
+} ZSTDv06_match_t;
+
+typedef struct {
+ U32 price;
+ U32 off;
+ U32 mlen;
+ U32 litlen;
+ U32 rep[ZSTDv06_REP_INIT];
+} ZSTDv06_optimal_t;
+
+typedef struct { U32 unused; } ZSTDv06_stats_t;
+
+typedef struct {
+ void* buffer;
+ U32* offsetStart;
+ U32* offset;
+ BYTE* offCodeStart;
+ BYTE* litStart;
+ BYTE* lit;
+ U16* litLengthStart;
+ U16* litLength;
+ BYTE* llCodeStart;
+ U16* matchLengthStart;
+ U16* matchLength;
+ BYTE* mlCodeStart;
+ U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
+ U32 longLengthPos;
+ /* opt */
+ ZSTDv06_optimal_t* priceTable;
+ ZSTDv06_match_t* matchTable;
+ U32* matchLengthFreq;
+ U32* litLengthFreq;
+ U32* litFreq;
+ U32* offCodeFreq;
+ U32 matchLengthSum;
+ U32 matchSum;
+ U32 litLengthSum;
+ U32 litSum;
+ U32 offCodeSum;
+ U32 log2matchLengthSum;
+ U32 log2matchSum;
+ U32 log2litLengthSum;
+ U32 log2litSum;
+ U32 log2offCodeSum;
+ U32 factor;
+ U32 cachedPrice;
+ U32 cachedLitLength;
+ const BYTE* cachedLiterals;
+ ZSTDv06_stats_t stats;
+} seqStore_t;
+
+void ZSTDv06_seqToCodes(const seqStore_t* seqStorePtr, size_t const nbSeq);
+
+
+#endif /* ZSTDv06_CCOMMON_H_MODULE */
+/* ******************************************************************
+ FSE : Finite State Entropy codec
+ Public Prototypes declaration
+ Copyright (C) 2013-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef FSEv06_H
+#define FSEv06_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+/*-****************************************
+* FSE simple functions
+******************************************/
+/*! FSEv06_decompress():
+ Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
+ into already allocated destination buffer 'dst', of size 'dstCapacity'.
+ @return : size of regenerated data (<= maxDstSize),
+ or an error code, which can be tested using FSEv06_isError() .
+
+ ** Important ** : FSEv06_decompress() does not decompress non-compressible nor RLE data !!!
+ Why ? : making this distinction requires a header.
+ Header management is intentionally delegated to the user layer, which can better manage special cases.
+*/
+size_t FSEv06_decompress(void* dst, size_t dstCapacity,
+ const void* cSrc, size_t cSrcSize);
+
+
+/*-*****************************************
+* Tool functions
+******************************************/
+size_t FSEv06_compressBound(size_t size); /* maximum compressed size */
+
+/* Error Management */
+unsigned FSEv06_isError(size_t code); /* tells if a return value is an error code */
+const char* FSEv06_getErrorName(size_t code); /* provides error code string (useful for debugging) */
+
+
+
+/*-*****************************************
+* FSE detailed API
+******************************************/
+/*!
+
+FSEv06_decompress() does the following:
+1. read normalized counters with readNCount()
+2. build decoding table 'DTable' from normalized counters
+3. decode the data stream using decoding table 'DTable'
+
+The following API allows targeting specific sub-functions for advanced tasks.
+For example, it's possible to compress several blocks using the same 'CTable',
+or to save and provide normalized distribution using external method.
+*/
+
+
+/* *** DECOMPRESSION *** */
+
+/*! FSEv06_readNCount():
+ Read compactly saved 'normalizedCounter' from 'rBuffer'.
+ @return : size read from 'rBuffer',
+ or an errorCode, which can be tested using FSEv06_isError().
+ maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
+size_t FSEv06_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
+
+/*! Constructor and Destructor of FSEv06_DTable.
+ Note that its size depends on 'tableLog' */
+typedef unsigned FSEv06_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
+FSEv06_DTable* FSEv06_createDTable(unsigned tableLog);
+void FSEv06_freeDTable(FSEv06_DTable* dt);
+
+/*! FSEv06_buildDTable():
+ Builds 'dt', which must be already allocated, using FSEv06_createDTable().
+ return : 0, or an errorCode, which can be tested using FSEv06_isError() */
+size_t FSEv06_buildDTable (FSEv06_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSEv06_decompress_usingDTable():
+ Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
+ into `dst` which must be already allocated.
+ @return : size of regenerated data (necessarily <= `dstCapacity`),
+ or an errorCode, which can be tested using FSEv06_isError() */
+size_t FSEv06_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSEv06_DTable* dt);
+
+/*!
+Tutorial :
+----------
+(Note : these functions only decompress FSE-compressed blocks.
+ If block is uncompressed, use memcpy() instead
+ If block is a single repeated byte, use memset() instead )
+
+The first step is to obtain the normalized frequencies of symbols.
+This can be performed by FSEv06_readNCount() if it was saved using FSEv06_writeNCount().
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
+In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
+or size the table to handle worst case situations (typically 256).
+FSEv06_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
+The result of FSEv06_readNCount() is the number of bytes read from 'rBuffer'.
+Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
+If there is an error, the function will return an error code, which can be tested using FSEv06_isError().
+
+The next step is to build the decompression tables 'FSEv06_DTable' from 'normalizedCounter'.
+This is performed by the function FSEv06_buildDTable().
+The space required by 'FSEv06_DTable' must be already allocated using FSEv06_createDTable().
+If there is an error, the function will return an error code, which can be tested using FSEv06_isError().
+
+`FSEv06_DTable` can then be used to decompress `cSrc`, with FSEv06_decompress_usingDTable().
+`cSrcSize` must be strictly correct, otherwise decompression will fail.
+FSEv06_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
+If there is an error, the function will return an error code, which can be tested using FSEv06_isError(). (ex: dst buffer too small)
+*/
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* FSEv06_H */
+/* ******************************************************************
+ bitstream
+ Part of FSE library
+ header file (to include)
+ Copyright (C) 2013-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef BITSTREAM_H_MODULE
+#define BITSTREAM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*
+* This API consists of small unitary functions, which must be inlined for best performance.
+* Since link-time-optimization is not available for all compilers,
+* these functions are defined into a .h to be included.
+*/
+
+
+/*=========================================
+* Target specific
+=========================================*/
+#if defined(__BMI__) && defined(__GNUC__)
+# include <immintrin.h> /* support for bextr (experimental) */
+#endif
+
+
+
+/*-********************************************
+* bitStream decoding API (read backward)
+**********************************************/
+typedef struct
+{
+ size_t bitContainer;
+ unsigned bitsConsumed;
+ const char* ptr;
+ const char* start;
+} BITv06_DStream_t;
+
+typedef enum { BITv06_DStream_unfinished = 0,
+ BITv06_DStream_endOfBuffer = 1,
+ BITv06_DStream_completed = 2,
+ BITv06_DStream_overflow = 3 } BITv06_DStream_status; /* result of BITv06_reloadDStream() */
+ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+
+MEM_STATIC size_t BITv06_initDStream(BITv06_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
+MEM_STATIC size_t BITv06_readBits(BITv06_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BITv06_DStream_status BITv06_reloadDStream(BITv06_DStream_t* bitD);
+MEM_STATIC unsigned BITv06_endOfDStream(const BITv06_DStream_t* bitD);
+
+
+
+/*-****************************************
+* unsafe API
+******************************************/
+MEM_STATIC size_t BITv06_readBitsFast(BITv06_DStream_t* bitD, unsigned nbBits);
+/* faster, but works only if nbBits >= 1 */
+
+
+
+/*-**************************************************************
+* Internal functions
+****************************************************************/
+MEM_STATIC unsigned BITv06_highbit32 ( U32 val)
+{
+# if defined(_MSC_VER) /* Visual */
+ unsigned long r=0;
+ _BitScanReverse ( &r, val );
+ return (unsigned) r;
+# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
+ return 31 - __builtin_clz (val);
+# else /* Software version */
+ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ unsigned r;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+ return r;
+# endif
+}
+
+
+
+/*-********************************************************
+* bitStream decoding
+**********************************************************/
+/*! BITv06_initDStream() :
+* Initialize a BITv06_DStream_t.
+* `bitD` : a pointer to an already allocated BITv06_DStream_t structure.
+* `srcSize` must be the *exact* size of the bitStream, in bytes.
+* @return : size of stream (== srcSize) or an errorCode if a problem is detected
+*/
+MEM_STATIC size_t BITv06_initDStream(BITv06_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+ if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
+
+ if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
+ bitD->start = (const char*)srcBuffer;
+ bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+ if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */
+ bitD->bitsConsumed = 8 - BITv06_highbit32(lastByte); }
+ } else {
+ bitD->start = (const char*)srcBuffer;
+ bitD->ptr = bitD->start;
+ bitD->bitContainer = *(const BYTE*)(bitD->start);
+ switch(srcSize)
+ {
+ case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);/* fall-through */
+ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);/* fall-through */
+ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);/* fall-through */
+ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */
+ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */
+ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; /* fall-through */
+ default: break;
+ }
+ { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+ if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */
+ bitD->bitsConsumed = 8 - BITv06_highbit32(lastByte); }
+ bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
+ }
+
+ return srcSize;
+}
+
+
+ MEM_STATIC size_t BITv06_lookBits(const BITv06_DStream_t* bitD, U32 nbBits)
+{
+ U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
+ return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
+}
+
+/*! BITv06_lookBitsFast() :
+* unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BITv06_lookBitsFast(const BITv06_DStream_t* bitD, U32 nbBits)
+{
+ U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
+ return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
+}
+
+MEM_STATIC void BITv06_skipBits(BITv06_DStream_t* bitD, U32 nbBits)
+{
+ bitD->bitsConsumed += nbBits;
+}
+
+MEM_STATIC size_t BITv06_readBits(BITv06_DStream_t* bitD, U32 nbBits)
+{
+ size_t const value = BITv06_lookBits(bitD, nbBits);
+ BITv06_skipBits(bitD, nbBits);
+ return value;
+}
+
+/*! BITv06_readBitsFast() :
+* unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BITv06_readBitsFast(BITv06_DStream_t* bitD, U32 nbBits)
+{
+ size_t const value = BITv06_lookBitsFast(bitD, nbBits);
+ BITv06_skipBits(bitD, nbBits);
+ return value;
+}
+
+MEM_STATIC BITv06_DStream_status BITv06_reloadDStream(BITv06_DStream_t* bitD)
+{
+ if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
+ return BITv06_DStream_overflow;
+
+ if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
+ bitD->ptr -= bitD->bitsConsumed >> 3;
+ bitD->bitsConsumed &= 7;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ return BITv06_DStream_unfinished;
+ }
+ if (bitD->ptr == bitD->start) {
+ if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BITv06_DStream_endOfBuffer;
+ return BITv06_DStream_completed;
+ }
+ { U32 nbBytes = bitD->bitsConsumed >> 3;
+ BITv06_DStream_status result = BITv06_DStream_unfinished;
+ if (bitD->ptr - nbBytes < bitD->start) {
+ nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
+ result = BITv06_DStream_endOfBuffer;
+ }
+ bitD->ptr -= nbBytes;
+ bitD->bitsConsumed -= nbBytes*8;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
+ return result;
+ }
+}
+
+/*! BITv06_endOfDStream() :
+* @return Tells if DStream has exactly reached its end (all bits consumed).
+*/
+MEM_STATIC unsigned BITv06_endOfDStream(const BITv06_DStream_t* DStream)
+{
+ return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* BITSTREAM_H_MODULE */
+/* ******************************************************************
+ FSE : Finite State Entropy coder
+ header file for static linking (only)
+ Copyright (C) 2013-2015, Yann Collet
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef FSEv06_STATIC_H
+#define FSEv06_STATIC_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* *****************************************
+* Static allocation
+*******************************************/
+/* FSE buffer bounds */
+#define FSEv06_NCOUNTBOUND 512
+#define FSEv06_BLOCKBOUND(size) (size + (size>>7))
+#define FSEv06_COMPRESSBOUND(size) (FSEv06_NCOUNTBOUND + FSEv06_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
+
+/* It is possible to statically allocate FSE CTable/DTable as a table of unsigned using below macros */
+#define FSEv06_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
+
+
+/* *****************************************
+* FSE advanced API
+*******************************************/
+size_t FSEv06_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
+/* same as FSEv06_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr */
+
+size_t FSEv06_buildDTable_raw (FSEv06_DTable* dt, unsigned nbBits);
+/* build a fake FSEv06_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
+
+size_t FSEv06_buildDTable_rle (FSEv06_DTable* dt, unsigned char symbolValue);
+/* build a fake FSEv06_DTable, designed to always generate the same symbolValue */
+
+
+/* *****************************************
+* FSE symbol decompression API
+*******************************************/
+typedef struct
+{
+ size_t state;
+ const void* table; /* precise table may vary, depending on U16 */
+} FSEv06_DState_t;
+
+
+static void FSEv06_initDState(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD, const FSEv06_DTable* dt);
+
+static unsigned char FSEv06_decodeSymbol(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD);
+
+
+/* *****************************************
+* FSE unsafe API
+*******************************************/
+static unsigned char FSEv06_decodeSymbolFast(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD);
+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
+
+
+/* *****************************************
+* Implementation of inlined functions
+*******************************************/
+
+
+/* ====== Decompression ====== */
+
+typedef struct {
+ U16 tableLog;
+ U16 fastMode;
+} FSEv06_DTableHeader; /* sizeof U32 */
+
+typedef struct
+{
+ unsigned short newState;
+ unsigned char symbol;
+ unsigned char nbBits;
+} FSEv06_decode_t; /* size == U32 */
+
+MEM_STATIC void FSEv06_initDState(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD, const FSEv06_DTable* dt)
+{
+ const void* ptr = dt;
+ const FSEv06_DTableHeader* const DTableH = (const FSEv06_DTableHeader*)ptr;
+ DStatePtr->state = BITv06_readBits(bitD, DTableH->tableLog);
+ BITv06_reloadDStream(bitD);
+ DStatePtr->table = dt + 1;
+}
+
+MEM_STATIC BYTE FSEv06_peekSymbol(const FSEv06_DState_t* DStatePtr)
+{
+ FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ return DInfo.symbol;
+}
+
+MEM_STATIC void FSEv06_updateState(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD)
+{
+ FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ size_t const lowBits = BITv06_readBits(bitD, nbBits);
+ DStatePtr->state = DInfo.newState + lowBits;
+}
+
+MEM_STATIC BYTE FSEv06_decodeSymbol(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD)
+{
+ FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ BYTE const symbol = DInfo.symbol;
+ size_t const lowBits = BITv06_readBits(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+/*! FSEv06_decodeSymbolFast() :
+ unsafe, only works if no symbol has a probability > 50% */
+MEM_STATIC BYTE FSEv06_decodeSymbolFast(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD)
+{
+ FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ BYTE const symbol = DInfo.symbol;
+ size_t const lowBits = BITv06_readBitsFast(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+
+
+#ifndef FSEv06_COMMONDEFS_ONLY
+
+/* **************************************************************
+* Tuning parameters
+****************************************************************/
+/*!MEMORY_USAGE :
+* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+* Increasing memory usage improves compression ratio
+* Reduced memory usage can improve speed, due to cache effect
+* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#define FSEv06_MAX_MEMORY_USAGE 14
+#define FSEv06_DEFAULT_MEMORY_USAGE 13
+
+/*!FSEv06_MAX_SYMBOL_VALUE :
+* Maximum symbol value authorized.
+* Required for proper stack allocation */
+#define FSEv06_MAX_SYMBOL_VALUE 255
+
+
+/* **************************************************************
+* template functions type & suffix
+****************************************************************/
+#define FSEv06_FUNCTION_TYPE BYTE
+#define FSEv06_FUNCTION_EXTENSION
+#define FSEv06_DECODE_TYPE FSEv06_decode_t
+
+
+#endif /* !FSEv06_COMMONDEFS_ONLY */
+
+
+/* ***************************************************************
+* Constants
+*****************************************************************/
+#define FSEv06_MAX_TABLELOG (FSEv06_MAX_MEMORY_USAGE-2)
+#define FSEv06_MAX_TABLESIZE (1U<<FSEv06_MAX_TABLELOG)
+#define FSEv06_MAXTABLESIZE_MASK (FSEv06_MAX_TABLESIZE-1)
+#define FSEv06_DEFAULT_TABLELOG (FSEv06_DEFAULT_MEMORY_USAGE-2)
+#define FSEv06_MIN_TABLELOG 5
+
+#define FSEv06_TABLELOG_ABSOLUTE_MAX 15
+#if FSEv06_MAX_TABLELOG > FSEv06_TABLELOG_ABSOLUTE_MAX
+#error "FSEv06_MAX_TABLELOG > FSEv06_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+#define FSEv06_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3)
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* FSEv06_STATIC_H */
+/*
+ Common functions of New Generation Entropy library
+ Copyright (C) 2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+*************************************************************************** */
+
+
+/*-****************************************
+* FSE Error Management
+******************************************/
+unsigned FSEv06_isError(size_t code) { return ERR_isError(code); }
+
+const char* FSEv06_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/* **************************************************************
+* HUF Error Management
+****************************************************************/
+static unsigned HUFv06_isError(size_t code) { return ERR_isError(code); }
+
+
+/*-**************************************************************
+* FSE NCount encoding-decoding
+****************************************************************/
+static short FSEv06_abs(short a) { return a<0 ? -a : a; }
+
+size_t FSEv06_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ const BYTE* const istart = (const BYTE*) headerBuffer;
+ const BYTE* const iend = istart + hbSize;
+ const BYTE* ip = istart;
+ int nbBits;
+ int remaining;
+ int threshold;
+ U32 bitStream;
+ int bitCount;
+ unsigned charnum = 0;
+ int previous0 = 0;
+
+ if (hbSize < 4) return ERROR(srcSize_wrong);
+ bitStream = MEM_readLE32(ip);
+ nbBits = (bitStream & 0xF) + FSEv06_MIN_TABLELOG; /* extract tableLog */
+ if (nbBits > FSEv06_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
+ bitStream >>= 4;
+ bitCount = 4;
+ *tableLogPtr = nbBits;
+ remaining = (1<<nbBits)+1;
+ threshold = 1<<nbBits;
+ nbBits++;
+
+ while ((remaining>1) && (charnum<=*maxSVPtr)) {
+ if (previous0) {
+ unsigned n0 = charnum;
+ while ((bitStream & 0xFFFF) == 0xFFFF) {
+ n0+=24;
+ if (ip < iend-5) {
+ ip+=2;
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ } else {
+ bitStream >>= 16;
+ bitCount+=16;
+ } }
+ while ((bitStream & 3) == 3) {
+ n0+=3;
+ bitStream>>=2;
+ bitCount+=2;
+ }
+ n0 += bitStream & 3;
+ bitCount += 2;
+ if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
+ while (charnum < n0) normalizedCounter[charnum++] = 0;
+ if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+ ip += bitCount>>3;
+ bitCount &= 7;
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ }
+ else
+ bitStream >>= 2;
+ }
+ { short const max = (short)((2*threshold-1)-remaining);
+ short count;
+
+ if ((bitStream & (threshold-1)) < (U32)max) {
+ count = (short)(bitStream & (threshold-1));
+ bitCount += nbBits-1;
+ } else {
+ count = (short)(bitStream & (2*threshold-1));
+ if (count >= threshold) count -= max;
+ bitCount += nbBits;
+ }
+
+ count--; /* extra accuracy */
+ remaining -= FSEv06_abs(count);
+ normalizedCounter[charnum++] = count;
+ previous0 = !count;
+ while (remaining < threshold) {
+ nbBits--;
+ threshold >>= 1;
+ }
+
+ if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+ ip += bitCount>>3;
+ bitCount &= 7;
+ } else {
+ bitCount -= (int)(8 * (iend - 4 - ip));
+ ip = iend - 4;
+ }
+ bitStream = MEM_readLE32(ip) >> (bitCount & 31);
+ } } /* while ((remaining>1) && (charnum<=*maxSVPtr)) */
+ if (remaining != 1) return ERROR(GENERIC);
+ *maxSVPtr = charnum-1;
+
+ ip += (bitCount+7)>>3;
+ if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
+ return ip-istart;
+}
+/* ******************************************************************
+ FSE : Finite State Entropy decoder
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+
+/* **************************************************************
+* Compiler specifics
+****************************************************************/
+#ifdef _MSC_VER /* Visual Studio */
+# define FORCE_INLINE static __forceinline
+# include <intrin.h> /* For Visual 2005 */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
+#else
+# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# ifdef __GNUC__
+# define FORCE_INLINE static inline __attribute__((always_inline))
+# else
+# define FORCE_INLINE static inline
+# endif
+# else
+# define FORCE_INLINE static
+# endif /* __STDC_VERSION__ */
+#endif
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define FSEv06_isError ERR_isError
+#define FSEv06_STATIC_ASSERT(c) { enum { FSEv06_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+
+
+/* **************************************************************
+* Complex types
+****************************************************************/
+typedef U32 DTable_max_t[FSEv06_DTABLE_SIZE_U32(FSEv06_MAX_TABLELOG)];
+
+
+/* **************************************************************
+* Templates
+****************************************************************/
+/*
+ designed to be included
+ for type-specific functions (template emulation in C)
+ Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSEv06_FUNCTION_EXTENSION
+# error "FSEv06_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSEv06_FUNCTION_TYPE
+# error "FSEv06_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSEv06_CAT(X,Y) X##Y
+#define FSEv06_FUNCTION_NAME(X,Y) FSEv06_CAT(X,Y)
+#define FSEv06_TYPE_NAME(X,Y) FSEv06_CAT(X,Y)
+
+
+/* Function templates */
+FSEv06_DTable* FSEv06_createDTable (unsigned tableLog)
+{
+ if (tableLog > FSEv06_TABLELOG_ABSOLUTE_MAX) tableLog = FSEv06_TABLELOG_ABSOLUTE_MAX;
+ return (FSEv06_DTable*)malloc( FSEv06_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
+}
+
+void FSEv06_freeDTable (FSEv06_DTable* dt)
+{
+ free(dt);
+}
+
+size_t FSEv06_buildDTable(FSEv06_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+ void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
+ FSEv06_DECODE_TYPE* const tableDecode = (FSEv06_DECODE_TYPE*) (tdPtr);
+ U16 symbolNext[FSEv06_MAX_SYMBOL_VALUE+1];
+
+ U32 const maxSV1 = maxSymbolValue + 1;
+ U32 const tableSize = 1 << tableLog;
+ U32 highThreshold = tableSize-1;
+
+ /* Sanity Checks */
+ if (maxSymbolValue > FSEv06_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
+ if (tableLog > FSEv06_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+
+ /* Init, lay down lowprob symbols */
+ { FSEv06_DTableHeader DTableH;
+ DTableH.tableLog = (U16)tableLog;
+ DTableH.fastMode = 1;
+ { S16 const largeLimit= (S16)(1 << (tableLog-1));
+ U32 s;
+ for (s=0; s<maxSV1; s++) {
+ if (normalizedCounter[s]==-1) {
+ tableDecode[highThreshold--].symbol = (FSEv06_FUNCTION_TYPE)s;
+ symbolNext[s] = 1;
+ } else {
+ if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
+ symbolNext[s] = normalizedCounter[s];
+ } } }
+ memcpy(dt, &DTableH, sizeof(DTableH));
+ }
+
+ /* Spread symbols */
+ { U32 const tableMask = tableSize-1;
+ U32 const step = FSEv06_TABLESTEP(tableSize);
+ U32 s, position = 0;
+ for (s=0; s<maxSV1; s++) {
+ int i;
+ for (i=0; i<normalizedCounter[s]; i++) {
+ tableDecode[position].symbol = (FSEv06_FUNCTION_TYPE)s;
+ position = (position + step) & tableMask;
+ while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
+ } }
+
+ if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+ }
+
+ /* Build Decoding table */
+ { U32 u;
+ for (u=0; u<tableSize; u++) {
+ FSEv06_FUNCTION_TYPE const symbol = (FSEv06_FUNCTION_TYPE)(tableDecode[u].symbol);
+ U16 nextState = symbolNext[symbol]++;
+ tableDecode[u].nbBits = (BYTE) (tableLog - BITv06_highbit32 ((U32)nextState) );
+ tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
+ } }
+
+ return 0;
+}
+
+
+
+#ifndef FSEv06_COMMONDEFS_ONLY
+
+/*-*******************************************************
+* Decompression (Byte symbols)
+*********************************************************/
+size_t FSEv06_buildDTable_rle (FSEv06_DTable* dt, BYTE symbolValue)
+{
+ void* ptr = dt;
+ FSEv06_DTableHeader* const DTableH = (FSEv06_DTableHeader*)ptr;
+ void* dPtr = dt + 1;
+ FSEv06_decode_t* const cell = (FSEv06_decode_t*)dPtr;
+
+ DTableH->tableLog = 0;
+ DTableH->fastMode = 0;
+
+ cell->newState = 0;
+ cell->symbol = symbolValue;
+ cell->nbBits = 0;
+
+ return 0;
+}
+
+
+size_t FSEv06_buildDTable_raw (FSEv06_DTable* dt, unsigned nbBits)
+{
+ void* ptr = dt;
+ FSEv06_DTableHeader* const DTableH = (FSEv06_DTableHeader*)ptr;
+ void* dPtr = dt + 1;
+ FSEv06_decode_t* const dinfo = (FSEv06_decode_t*)dPtr;
+ const unsigned tableSize = 1 << nbBits;
+ const unsigned tableMask = tableSize - 1;
+ const unsigned maxSV1 = tableMask+1;
+ unsigned s;
+
+ /* Sanity checks */
+ if (nbBits < 1) return ERROR(GENERIC); /* min size */
+
+ /* Build Decoding Table */
+ DTableH->tableLog = (U16)nbBits;
+ DTableH->fastMode = 1;
+ for (s=0; s<maxSV1; s++) {
+ dinfo[s].newState = 0;
+ dinfo[s].symbol = (BYTE)s;
+ dinfo[s].nbBits = (BYTE)nbBits;
+ }
+
+ return 0;
+}
+
+FORCE_INLINE size_t FSEv06_decompress_usingDTable_generic(
+ void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSEv06_DTable* dt, const unsigned fast)
+{
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* op = ostart;
+ BYTE* const omax = op + maxDstSize;
+ BYTE* const olimit = omax-3;
+
+ BITv06_DStream_t bitD;
+ FSEv06_DState_t state1;
+ FSEv06_DState_t state2;
+
+ /* Init */
+ { size_t const errorCode = BITv06_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */
+ if (FSEv06_isError(errorCode)) return errorCode; }
+
+ FSEv06_initDState(&state1, &bitD, dt);
+ FSEv06_initDState(&state2, &bitD, dt);
+
+#define FSEv06_GETSYMBOL(statePtr) fast ? FSEv06_decodeSymbolFast(statePtr, &bitD) : FSEv06_decodeSymbol(statePtr, &bitD)
+
+ /* 4 symbols per loop */
+ for ( ; (BITv06_reloadDStream(&bitD)==BITv06_DStream_unfinished) && (op<olimit) ; op+=4) {
+ op[0] = FSEv06_GETSYMBOL(&state1);
+
+ if (FSEv06_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BITv06_reloadDStream(&bitD);
+
+ op[1] = FSEv06_GETSYMBOL(&state2);
+
+ if (FSEv06_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ { if (BITv06_reloadDStream(&bitD) > BITv06_DStream_unfinished) { op+=2; break; } }
+
+ op[2] = FSEv06_GETSYMBOL(&state1);
+
+ if (FSEv06_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BITv06_reloadDStream(&bitD);
+
+ op[3] = FSEv06_GETSYMBOL(&state2);
+ }
+
+ /* tail */
+ /* note : BITv06_reloadDStream(&bitD) >= FSEv06_DStream_partiallyFilled; Ends at exactly BITv06_DStream_completed */
+ while (1) {
+ if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+
+ *op++ = FSEv06_GETSYMBOL(&state1);
+
+ if (BITv06_reloadDStream(&bitD)==BITv06_DStream_overflow) {
+ *op++ = FSEv06_GETSYMBOL(&state2);
+ break;
+ }
+
+ if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+
+ *op++ = FSEv06_GETSYMBOL(&state2);
+
+ if (BITv06_reloadDStream(&bitD)==BITv06_DStream_overflow) {
+ *op++ = FSEv06_GETSYMBOL(&state1);
+ break;
+ } }
+
+ return op-ostart;
+}
+
+
+size_t FSEv06_decompress_usingDTable(void* dst, size_t originalSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSEv06_DTable* dt)
+{
+ const void* ptr = dt;
+ const FSEv06_DTableHeader* DTableH = (const FSEv06_DTableHeader*)ptr;
+ const U32 fastMode = DTableH->fastMode;
+
+ /* select fast mode (static) */
+ if (fastMode) return FSEv06_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+ return FSEv06_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+size_t FSEv06_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
+{
+ const BYTE* const istart = (const BYTE*)cSrc;
+ const BYTE* ip = istart;
+ short counting[FSEv06_MAX_SYMBOL_VALUE+1];
+ DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
+ unsigned tableLog;
+ unsigned maxSymbolValue = FSEv06_MAX_SYMBOL_VALUE;
+
+ if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */
+
+ /* normal FSE decoding mode */
+ { size_t const NCountLength = FSEv06_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
+ if (FSEv06_isError(NCountLength)) return NCountLength;
+ if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */
+ ip += NCountLength;
+ cSrcSize -= NCountLength;
+ }
+
+ { size_t const errorCode = FSEv06_buildDTable (dt, counting, maxSymbolValue, tableLog);
+ if (FSEv06_isError(errorCode)) return errorCode; }
+
+ return FSEv06_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); /* always return, even if it is an error code */
+}
+
+
+
+#endif /* FSEv06_COMMONDEFS_ONLY */
+/* ******************************************************************
+ Huffman coder, part of New Generation Entropy library
+ header file
+ Copyright (C) 2013-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef HUFv06_H
+#define HUFv06_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* ****************************************
+* HUF simple functions
+******************************************/
+size_t HUFv06_decompress(void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize);
+/*
+HUFv06_decompress() :
+ Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
+ into already allocated destination buffer 'dst', of size 'dstSize'.
+ `dstSize` : must be the **exact** size of original (uncompressed) data.
+ Note : in contrast with FSE, HUFv06_decompress can regenerate
+ RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
+ because it knows size to regenerate.
+ @return : size of regenerated data (== dstSize)
+ or an error code, which can be tested using HUFv06_isError()
+*/
+
+
+/* ****************************************
+* Tool functions
+******************************************/
+size_t HUFv06_compressBound(size_t size); /**< maximum compressed size */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* HUFv06_H */
+/* ******************************************************************
+ Huffman codec, part of New Generation Entropy library
+ header file, for static linking only
+ Copyright (C) 2013-2016, Yann Collet
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef HUFv06_STATIC_H
+#define HUFv06_STATIC_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* ****************************************
+* Static allocation
+******************************************/
+/* HUF buffer bounds */
+#define HUFv06_CTABLEBOUND 129
+#define HUFv06_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */
+#define HUFv06_COMPRESSBOUND(size) (HUFv06_CTABLEBOUND + HUFv06_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
+
+/* static allocation of HUF's DTable */
+#define HUFv06_DTABLE_SIZE(maxTableLog) (1 + (1<<maxTableLog))
+#define HUFv06_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+ unsigned short DTable[HUFv06_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
+#define HUFv06_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
+ unsigned int DTable[HUFv06_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
+#define HUFv06_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
+ unsigned int DTable[HUFv06_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
+
+
+/* ****************************************
+* Advanced decompression functions
+******************************************/
+size_t HUFv06_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
+size_t HUFv06_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbols decoder */
+
+
+
+/*!
+HUFv06_decompress() does the following:
+1. select the decompression algorithm (X2, X4, X6) based on pre-computed heuristics
+2. build Huffman table from save, using HUFv06_readDTableXn()
+3. decode 1 or 4 segments in parallel using HUFv06_decompressSXn_usingDTable
+*/
+size_t HUFv06_readDTableX2 (unsigned short* DTable, const void* src, size_t srcSize);
+size_t HUFv06_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize);
+
+size_t HUFv06_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
+size_t HUFv06_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
+
+
+/* single stream variants */
+size_t HUFv06_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
+size_t HUFv06_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
+
+size_t HUFv06_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
+size_t HUFv06_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
+
+
+
+/* **************************************************************
+* Constants
+****************************************************************/
+#define HUFv06_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUFv06_MAX_TABLELOG. Beyond that value, code does not work */
+#define HUFv06_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUFv06_ABSOLUTEMAX_TABLELOG */
+#define HUFv06_DEFAULT_TABLELOG HUFv06_MAX_TABLELOG /* tableLog by default, when not specified */
+#define HUFv06_MAX_SYMBOL_VALUE 255
+#if (HUFv06_MAX_TABLELOG > HUFv06_ABSOLUTEMAX_TABLELOG)
+# error "HUFv06_MAX_TABLELOG is too large !"
+#endif
+
+
+
+/*! HUFv06_readStats() :
+ Read compact Huffman tree, saved by HUFv06_writeCTable().
+ `huffWeight` is destination buffer.
+ @return : size read from `src`
+*/
+MEM_STATIC size_t HUFv06_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize)
+{
+ U32 weightTotal;
+ const BYTE* ip = (const BYTE*) src;
+ size_t iSize;
+ size_t oSize;
+
+ if (!srcSize) return ERROR(srcSize_wrong);
+ iSize = ip[0];
+ //memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */
+
+ if (iSize >= 128) { /* special header */
+ if (iSize >= (242)) { /* RLE */
+ static U32 l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
+ oSize = l[iSize-242];
+ memset(huffWeight, 1, hwSize);
+ iSize = 0;
+ }
+ else { /* Incompressible */
+ oSize = iSize - 127;
+ iSize = ((oSize+1)/2);
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ if (oSize >= hwSize) return ERROR(corruption_detected);
+ ip += 1;
+ { U32 n;
+ for (n=0; n<oSize; n+=2) {
+ huffWeight[n] = ip[n/2] >> 4;
+ huffWeight[n+1] = ip[n/2] & 15;
+ } } } }
+ else { /* header compressed with FSE (normal case) */
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ oSize = FSEv06_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */
+ if (FSEv06_isError(oSize)) return oSize;
+ }
+
+ /* collect weight stats */
+ memset(rankStats, 0, (HUFv06_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
+ weightTotal = 0;
+ { U32 n; for (n=0; n<oSize; n++) {
+ if (huffWeight[n] >= HUFv06_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
+ rankStats[huffWeight[n]]++;
+ weightTotal += (1 << huffWeight[n]) >> 1;
+ } }
+ if (weightTotal == 0) return ERROR(corruption_detected);
+
+ /* get last non-null symbol weight (implied, total must be 2^n) */
+ { U32 const tableLog = BITv06_highbit32(weightTotal) + 1;
+ if (tableLog > HUFv06_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
+ *tableLogPtr = tableLog;
+ /* determine last weight */
+ { U32 const total = 1 << tableLog;
+ U32 const rest = total - weightTotal;
+ U32 const verif = 1 << BITv06_highbit32(rest);
+ U32 const lastWeight = BITv06_highbit32(rest) + 1;
+ if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
+ huffWeight[oSize] = (BYTE)lastWeight;
+ rankStats[lastWeight]++;
+ } }
+
+ /* check tree construction validity */
+ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
+
+ /* results */
+ *nbSymbolsPtr = (U32)(oSize+1);
+ return iSize+1;
+}
+
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* HUFv06_STATIC_H */
+/* ******************************************************************
+ Huffman decoder, part of New Generation Entropy library
+ Copyright (C) 2013-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* **************************************************************
+* Compiler specifics
+****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+/* inline is defined */
+#elif defined(_MSC_VER)
+# define inline __inline
+#else
+# define inline /* disable inline */
+#endif
+
+
+#ifdef _MSC_VER /* Visual Studio */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+#endif
+
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define HUFv06_STATIC_ASSERT(c) { enum { HUFv06_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+
+
+
+/* *******************************************************
+* HUF : Huffman block decompression
+*********************************************************/
+typedef struct { BYTE byte; BYTE nbBits; } HUFv06_DEltX2; /* single-symbol decoding */
+
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUFv06_DEltX4; /* double-symbols decoding */
+
+typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
+
+
+
+/*-***************************/
+/* single-symbol decoding */
+/*-***************************/
+
+size_t HUFv06_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
+{
+ BYTE huffWeight[HUFv06_MAX_SYMBOL_VALUE + 1];
+ U32 rankVal[HUFv06_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */
+ U32 tableLog = 0;
+ size_t iSize;
+ U32 nbSymbols = 0;
+ U32 n;
+ U32 nextRankStart;
+ void* const dtPtr = DTable + 1;
+ HUFv06_DEltX2* const dt = (HUFv06_DEltX2*)dtPtr;
+
+ HUFv06_STATIC_ASSERT(sizeof(HUFv06_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */
+ //memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUFv06_readStats(huffWeight, HUFv06_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
+ if (HUFv06_isError(iSize)) return iSize;
+
+ /* check result */
+ if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */
+ DTable[0] = (U16)tableLog; /* maybe should separate sizeof allocated DTable, from used size of DTable, in case of re-use */
+
+ /* Prepare ranks */
+ nextRankStart = 0;
+ for (n=1; n<tableLog+1; n++) {
+ U32 current = nextRankStart;
+ nextRankStart += (rankVal[n] << (n-1));
+ rankVal[n] = current;
+ }
+
+ /* fill DTable */
+ for (n=0; n<nbSymbols; n++) {
+ const U32 w = huffWeight[n];
+ const U32 length = (1 << w) >> 1;
+ U32 i;
+ HUFv06_DEltX2 D;
+ D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
+ for (i = rankVal[w]; i < rankVal[w] + length; i++)
+ dt[i] = D;
+ rankVal[w] += length;
+ }
+
+ return iSize;
+}
+
+
+static BYTE HUFv06_decodeSymbolX2(BITv06_DStream_t* Dstream, const HUFv06_DEltX2* dt, const U32 dtLog)
+{
+ const size_t val = BITv06_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+ const BYTE c = dt[val].byte;
+ BITv06_skipBits(Dstream, dt[val].nbBits);
+ return c;
+}
+
+#define HUFv06_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+ *ptr++ = HUFv06_decodeSymbolX2(DStreamPtr, dt, dtLog)
+
+#define HUFv06_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUFv06_MAX_TABLELOG<=12)) \
+ HUFv06_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+#define HUFv06_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ HUFv06_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+static inline size_t HUFv06_decodeStreamX2(BYTE* p, BITv06_DStream_t* const bitDPtr, BYTE* const pEnd, const HUFv06_DEltX2* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 4 symbols at a time */
+ while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p <= pEnd-4)) {
+ HUFv06_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUFv06_DECODE_SYMBOLX2_1(p, bitDPtr);
+ HUFv06_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUFv06_DECODE_SYMBOLX2_0(p, bitDPtr);
+ }
+
+ /* closer to the end */
+ while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p < pEnd))
+ HUFv06_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+ /* no more data to retrieve from bitstream, hence no need to reload */
+ while (p < pEnd)
+ HUFv06_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+ return pEnd-pStart;
+}
+
+size_t HUFv06_decompress1X2_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const U16* DTable)
+{
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + dstSize;
+ const U32 dtLog = DTable[0];
+ const void* dtPtr = DTable;
+ const HUFv06_DEltX2* const dt = ((const HUFv06_DEltX2*)dtPtr)+1;
+ BITv06_DStream_t bitD;
+
+ { size_t const errorCode = BITv06_initDStream(&bitD, cSrc, cSrcSize);
+ if (HUFv06_isError(errorCode)) return errorCode; }
+
+ HUFv06_decodeStreamX2(op, &bitD, oend, dt, dtLog);
+
+ /* check */
+ if (!BITv06_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+ return dstSize;
+}
+
+size_t HUFv06_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUFv06_CREATE_STATIC_DTABLEX2(DTable, HUFv06_MAX_TABLELOG);
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const errorCode = HUFv06_readDTableX2 (DTable, cSrc, cSrcSize);
+ if (HUFv06_isError(errorCode)) return errorCode;
+ if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += errorCode;
+ cSrcSize -= errorCode;
+
+ return HUFv06_decompress1X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+size_t HUFv06_decompress4X2_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const U16* DTable)
+{
+ /* Check */
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ { const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable;
+ const HUFv06_DEltX2* const dt = ((const HUFv06_DEltX2*)dtPtr) +1;
+ const U32 dtLog = DTable[0];
+ size_t errorCode;
+
+ /* Init */
+ BITv06_DStream_t bitD1;
+ BITv06_DStream_t bitD2;
+ BITv06_DStream_t bitD3;
+ BITv06_DStream_t bitD4;
+ const size_t length1 = MEM_readLE16(istart);
+ const size_t length2 = MEM_readLE16(istart+2);
+ const size_t length3 = MEM_readLE16(istart+4);
+ size_t length4;
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ const size_t segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ U32 endSignal;
+
+ length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ errorCode = BITv06_initDStream(&bitD1, istart1, length1);
+ if (HUFv06_isError(errorCode)) return errorCode;
+ errorCode = BITv06_initDStream(&bitD2, istart2, length2);
+ if (HUFv06_isError(errorCode)) return errorCode;
+ errorCode = BITv06_initDStream(&bitD3, istart3, length3);
+ if (HUFv06_isError(errorCode)) return errorCode;
+ errorCode = BITv06_initDStream(&bitD4, istart4, length4);
+ if (HUFv06_isError(errorCode)) return errorCode;
+
+ /* 16-32 symbols per loop (4-8 symbols per stream) */
+ endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4);
+ for ( ; (endSignal==BITv06_DStream_unfinished) && (op4<(oend-7)) ; ) {
+ HUFv06_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUFv06_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUFv06_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUFv06_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUFv06_DECODE_SYMBOLX2_1(op1, &bitD1);
+ HUFv06_DECODE_SYMBOLX2_1(op2, &bitD2);
+ HUFv06_DECODE_SYMBOLX2_1(op3, &bitD3);
+ HUFv06_DECODE_SYMBOLX2_1(op4, &bitD4);
+ HUFv06_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUFv06_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUFv06_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUFv06_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUFv06_DECODE_SYMBOLX2_0(op1, &bitD1);
+ HUFv06_DECODE_SYMBOLX2_0(op2, &bitD2);
+ HUFv06_DECODE_SYMBOLX2_0(op3, &bitD3);
+ HUFv06_DECODE_SYMBOLX2_0(op4, &bitD4);
+ endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4);
+ }
+
+ /* check corruption */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 supposed already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUFv06_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+ HUFv06_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+ HUFv06_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+ HUFv06_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ endSignal = BITv06_endOfDStream(&bitD1) & BITv06_endOfDStream(&bitD2) & BITv06_endOfDStream(&bitD3) & BITv06_endOfDStream(&bitD4);
+ if (!endSignal) return ERROR(corruption_detected);
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+
+size_t HUFv06_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUFv06_CREATE_STATIC_DTABLEX2(DTable, HUFv06_MAX_TABLELOG);
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const errorCode = HUFv06_readDTableX2 (DTable, cSrc, cSrcSize);
+ if (HUFv06_isError(errorCode)) return errorCode;
+ if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += errorCode;
+ cSrcSize -= errorCode;
+
+ return HUFv06_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+/* *************************/
+/* double-symbols decoding */
+/* *************************/
+
+static void HUFv06_fillDTableX4Level2(HUFv06_DEltX4* DTable, U32 sizeLog, const U32 consumed,
+ const U32* rankValOrigin, const int minWeight,
+ const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
+ U32 nbBitsBaseline, U16 baseSeq)
+{
+ HUFv06_DEltX4 DElt;
+ U32 rankVal[HUFv06_ABSOLUTEMAX_TABLELOG + 1];
+
+ /* get pre-calculated rankVal */
+ memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+ /* fill skipped values */
+ if (minWeight>1) {
+ U32 i, skipSize = rankVal[minWeight];
+ MEM_writeLE16(&(DElt.sequence), baseSeq);
+ DElt.nbBits = (BYTE)(consumed);
+ DElt.length = 1;
+ for (i = 0; i < skipSize; i++)
+ DTable[i] = DElt;
+ }
+
+ /* fill DTable */
+ { U32 s; for (s=0; s<sortedListSize; s++) { /* note : sortedSymbols already skipped */
+ const U32 symbol = sortedSymbols[s].symbol;
+ const U32 weight = sortedSymbols[s].weight;
+ const U32 nbBits = nbBitsBaseline - weight;
+ const U32 length = 1 << (sizeLog-nbBits);
+ const U32 start = rankVal[weight];
+ U32 i = start;
+ const U32 end = start + length;
+
+ MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
+ DElt.nbBits = (BYTE)(nbBits + consumed);
+ DElt.length = 2;
+ do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
+
+ rankVal[weight] += length;
+ }}
+}
+
+typedef U32 rankVal_t[HUFv06_ABSOLUTEMAX_TABLELOG][HUFv06_ABSOLUTEMAX_TABLELOG + 1];
+
+static void HUFv06_fillDTableX4(HUFv06_DEltX4* DTable, const U32 targetLog,
+ const sortedSymbol_t* sortedList, const U32 sortedListSize,
+ const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
+ const U32 nbBitsBaseline)
+{
+ U32 rankVal[HUFv06_ABSOLUTEMAX_TABLELOG + 1];
+ const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
+ const U32 minBits = nbBitsBaseline - maxWeight;
+ U32 s;
+
+ memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+ /* fill DTable */
+ for (s=0; s<sortedListSize; s++) {
+ const U16 symbol = sortedList[s].symbol;
+ const U32 weight = sortedList[s].weight;
+ const U32 nbBits = nbBitsBaseline - weight;
+ const U32 start = rankVal[weight];
+ const U32 length = 1 << (targetLog-nbBits);
+
+ if (targetLog-nbBits >= minBits) { /* enough room for a second symbol */
+ U32 sortedRank;
+ int minWeight = nbBits + scaleLog;
+ if (minWeight < 1) minWeight = 1;
+ sortedRank = rankStart[minWeight];
+ HUFv06_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
+ rankValOrigin[nbBits], minWeight,
+ sortedList+sortedRank, sortedListSize-sortedRank,
+ nbBitsBaseline, symbol);
+ } else {
+ HUFv06_DEltX4 DElt;
+ MEM_writeLE16(&(DElt.sequence), symbol);
+ DElt.nbBits = (BYTE)(nbBits);
+ DElt.length = 1;
+ { U32 u;
+ const U32 end = start + length;
+ for (u = start; u < end; u++) DTable[u] = DElt;
+ } }
+ rankVal[weight] += length;
+ }
+}
+
+size_t HUFv06_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
+{
+ BYTE weightList[HUFv06_MAX_SYMBOL_VALUE + 1];
+ sortedSymbol_t sortedSymbol[HUFv06_MAX_SYMBOL_VALUE + 1];
+ U32 rankStats[HUFv06_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
+ U32 rankStart0[HUFv06_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
+ U32* const rankStart = rankStart0+1;
+ rankVal_t rankVal;
+ U32 tableLog, maxW, sizeOfSort, nbSymbols;
+ const U32 memLog = DTable[0];
+ size_t iSize;
+ void* dtPtr = DTable;
+ HUFv06_DEltX4* const dt = ((HUFv06_DEltX4*)dtPtr) + 1;
+
+ HUFv06_STATIC_ASSERT(sizeof(HUFv06_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */
+ if (memLog > HUFv06_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
+ //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUFv06_readStats(weightList, HUFv06_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
+ if (HUFv06_isError(iSize)) return iSize;
+
+ /* check result */
+ if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
+
+ /* find maxWeight */
+ for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
+
+ /* Get start index of each weight */
+ { U32 w, nextRankStart = 0;
+ for (w=1; w<maxW+1; w++) {
+ U32 current = nextRankStart;
+ nextRankStart += rankStats[w];
+ rankStart[w] = current;
+ }
+ rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
+ sizeOfSort = nextRankStart;
+ }
+
+ /* sort symbols by weight */
+ { U32 s;
+ for (s=0; s<nbSymbols; s++) {
+ U32 const w = weightList[s];
+ U32 const r = rankStart[w]++;
+ sortedSymbol[r].symbol = (BYTE)s;
+ sortedSymbol[r].weight = (BYTE)w;
+ }
+ rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
+ }
+
+ /* Build rankVal */
+ { U32* const rankVal0 = rankVal[0];
+ { int const rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */
+ U32 nextRankVal = 0;
+ U32 w;
+ for (w=1; w<maxW+1; w++) {
+ U32 current = nextRankVal;
+ nextRankVal += rankStats[w] << (w+rescale);
+ rankVal0[w] = current;
+ } }
+ { U32 const minBits = tableLog+1 - maxW;
+ U32 consumed;
+ for (consumed = minBits; consumed < memLog - minBits + 1; consumed++) {
+ U32* const rankValPtr = rankVal[consumed];
+ U32 w;
+ for (w = 1; w < maxW+1; w++) {
+ rankValPtr[w] = rankVal0[w] >> consumed;
+ } } } }
+
+ HUFv06_fillDTableX4(dt, memLog,
+ sortedSymbol, sizeOfSort,
+ rankStart0, rankVal, maxW,
+ tableLog+1);
+
+ return iSize;
+}
+
+
+static U32 HUFv06_decodeSymbolX4(void* op, BITv06_DStream_t* DStream, const HUFv06_DEltX4* dt, const U32 dtLog)
+{
+ const size_t val = BITv06_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ memcpy(op, dt+val, 2);
+ BITv06_skipBits(DStream, dt[val].nbBits);
+ return dt[val].length;
+}
+
+static U32 HUFv06_decodeLastSymbolX4(void* op, BITv06_DStream_t* DStream, const HUFv06_DEltX4* dt, const U32 dtLog)
+{
+ const size_t val = BITv06_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ memcpy(op, dt+val, 1);
+ if (dt[val].length==1) BITv06_skipBits(DStream, dt[val].nbBits);
+ else {
+ if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
+ BITv06_skipBits(DStream, dt[val].nbBits);
+ if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+ DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+ } }
+ return 1;
+}
+
+
+#define HUFv06_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
+ ptr += HUFv06_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUFv06_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUFv06_MAX_TABLELOG<=12)) \
+ ptr += HUFv06_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUFv06_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ ptr += HUFv06_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+static inline size_t HUFv06_decodeStreamX4(BYTE* p, BITv06_DStream_t* bitDPtr, BYTE* const pEnd, const HUFv06_DEltX4* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 8 symbols at a time */
+ while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p < pEnd-7)) {
+ HUFv06_DECODE_SYMBOLX4_2(p, bitDPtr);
+ HUFv06_DECODE_SYMBOLX4_1(p, bitDPtr);
+ HUFv06_DECODE_SYMBOLX4_2(p, bitDPtr);
+ HUFv06_DECODE_SYMBOLX4_0(p, bitDPtr);
+ }
+
+ /* closer to the end */
+ while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p <= pEnd-2))
+ HUFv06_DECODE_SYMBOLX4_0(p, bitDPtr);
+
+ while (p <= pEnd-2)
+ HUFv06_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
+
+ if (p < pEnd)
+ p += HUFv06_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
+
+ return p-pStart;
+}
+
+
+size_t HUFv06_decompress1X4_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const U32* DTable)
+{
+ const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+
+ const U32 dtLog = DTable[0];
+ const void* const dtPtr = DTable;
+ const HUFv06_DEltX4* const dt = ((const HUFv06_DEltX4*)dtPtr) +1;
+
+ /* Init */
+ BITv06_DStream_t bitD;
+ { size_t const errorCode = BITv06_initDStream(&bitD, istart, cSrcSize);
+ if (HUFv06_isError(errorCode)) return errorCode; }
+
+ /* decode */
+ HUFv06_decodeStreamX4(ostart, &bitD, oend, dt, dtLog);
+
+ /* check */
+ if (!BITv06_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+ /* decoded size */
+ return dstSize;
+}
+
+size_t HUFv06_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUFv06_CREATE_STATIC_DTABLEX4(DTable, HUFv06_MAX_TABLELOG);
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUFv06_readDTableX4 (DTable, cSrc, cSrcSize);
+ if (HUFv06_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize;
+ cSrcSize -= hSize;
+
+ return HUFv06_decompress1X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+size_t HUFv06_decompress4X4_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const U32* DTable)
+{
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ { const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable;
+ const HUFv06_DEltX4* const dt = ((const HUFv06_DEltX4*)dtPtr) +1;
+ const U32 dtLog = DTable[0];
+ size_t errorCode;
+
+ /* Init */
+ BITv06_DStream_t bitD1;
+ BITv06_DStream_t bitD2;
+ BITv06_DStream_t bitD3;
+ BITv06_DStream_t bitD4;
+ const size_t length1 = MEM_readLE16(istart);
+ const size_t length2 = MEM_readLE16(istart+2);
+ const size_t length3 = MEM_readLE16(istart+4);
+ size_t length4;
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ const size_t segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ U32 endSignal;
+
+ length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ errorCode = BITv06_initDStream(&bitD1, istart1, length1);
+ if (HUFv06_isError(errorCode)) return errorCode;
+ errorCode = BITv06_initDStream(&bitD2, istart2, length2);
+ if (HUFv06_isError(errorCode)) return errorCode;
+ errorCode = BITv06_initDStream(&bitD3, istart3, length3);
+ if (HUFv06_isError(errorCode)) return errorCode;
+ errorCode = BITv06_initDStream(&bitD4, istart4, length4);
+ if (HUFv06_isError(errorCode)) return errorCode;
+
+ /* 16-32 symbols per loop (4-8 symbols per stream) */
+ endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4);
+ for ( ; (endSignal==BITv06_DStream_unfinished) && (op4<(oend-7)) ; ) {
+ HUFv06_DECODE_SYMBOLX4_2(op1, &bitD1);
+ HUFv06_DECODE_SYMBOLX4_2(op2, &bitD2);
+ HUFv06_DECODE_SYMBOLX4_2(op3, &bitD3);
+ HUFv06_DECODE_SYMBOLX4_2(op4, &bitD4);
+ HUFv06_DECODE_SYMBOLX4_1(op1, &bitD1);
+ HUFv06_DECODE_SYMBOLX4_1(op2, &bitD2);
+ HUFv06_DECODE_SYMBOLX4_1(op3, &bitD3);
+ HUFv06_DECODE_SYMBOLX4_1(op4, &bitD4);
+ HUFv06_DECODE_SYMBOLX4_2(op1, &bitD1);
+ HUFv06_DECODE_SYMBOLX4_2(op2, &bitD2);
+ HUFv06_DECODE_SYMBOLX4_2(op3, &bitD3);
+ HUFv06_DECODE_SYMBOLX4_2(op4, &bitD4);
+ HUFv06_DECODE_SYMBOLX4_0(op1, &bitD1);
+ HUFv06_DECODE_SYMBOLX4_0(op2, &bitD2);
+ HUFv06_DECODE_SYMBOLX4_0(op3, &bitD3);
+ HUFv06_DECODE_SYMBOLX4_0(op4, &bitD4);
+
+ endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4);
+ }
+
+ /* check corruption */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 supposed already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUFv06_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
+ HUFv06_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
+ HUFv06_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
+ HUFv06_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ endSignal = BITv06_endOfDStream(&bitD1) & BITv06_endOfDStream(&bitD2) & BITv06_endOfDStream(&bitD3) & BITv06_endOfDStream(&bitD4);
+ if (!endSignal) return ERROR(corruption_detected);
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+
+size_t HUFv06_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUFv06_CREATE_STATIC_DTABLEX4(DTable, HUFv06_MAX_TABLELOG);
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t hSize = HUFv06_readDTableX4 (DTable, cSrc, cSrcSize);
+ if (HUFv06_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize;
+ cSrcSize -= hSize;
+
+ return HUFv06_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+
+
+/* ********************************/
+/* Generic decompression selector */
+/* ********************************/
+
+typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
+static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
+{
+ /* single, double, quad */
+ {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
+ {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
+ {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
+ {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
+ {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
+ {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
+ {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
+ {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
+ {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
+ {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
+ {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
+ {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
+ {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
+ {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
+ {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
+ {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
+};
+
+typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+
+size_t HUFv06_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ static const decompressionAlgo decompress[3] = { HUFv06_decompress4X2, HUFv06_decompress4X4, NULL };
+ U32 Dtime[3]; /* decompression time estimation */
+
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
+ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
+ /* decoder timing evaluation */
+ { U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
+ U32 const D256 = (U32)(dstSize >> 8);
+ U32 n; for (n=0; n<3; n++)
+ Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
+ }
+
+ Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
+
+ { U32 algoNb = 0;
+ if (Dtime[1] < Dtime[0]) algoNb = 1;
+ // if (Dtime[2] < Dtime[algoNb]) algoNb = 2; /* current speed of HUFv06_decompress4X6 is not good */
+ return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
+ }
+
+ //return HUFv06_decompress4X2(dst, dstSize, cSrc, cSrcSize); /* multi-streams single-symbol decoding */
+ //return HUFv06_decompress4X4(dst, dstSize, cSrc, cSrcSize); /* multi-streams double-symbols decoding */
+ //return HUFv06_decompress4X6(dst, dstSize, cSrc, cSrcSize); /* multi-streams quad-symbols decoding */
+}
+/*
+ Common functions of Zstd compression library
+ Copyright (C) 2015-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd homepage : http://www.zstd.net/
+*/
+
+
+/*-****************************************
+* Version
+******************************************/
+
+/*-****************************************
+* ZSTD Error Management
+******************************************/
+/*! ZSTDv06_isError() :
+* tells if a return value is an error code */
+unsigned ZSTDv06_isError(size_t code) { return ERR_isError(code); }
+
+/*! ZSTDv06_getErrorName() :
+* provides error code string from function result (useful for debugging) */
+const char* ZSTDv06_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/* **************************************************************
+* ZBUFF Error Management
+****************************************************************/
+unsigned ZBUFFv06_isError(size_t errorCode) { return ERR_isError(errorCode); }
+
+const char* ZBUFFv06_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
+/*
+ zstd - standard compression library
+ Copyright (C) 2014-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd homepage : http://www.zstd.net
+*/
+
+/* ***************************************************************
+* Tuning parameters
+*****************************************************************/
+/*!
+ * HEAPMODE :
+ * Select how default decompression function ZSTDv06_decompress() will allocate memory,
+ * in memory stack (0), or in memory heap (1, requires malloc())
+ */
+#ifndef ZSTDv06_HEAPMODE
+# define ZSTDv06_HEAPMODE 1
+#endif
+
+
+
+/*-*******************************************************
+* Compiler specifics
+*********************************************************/
+#ifdef _MSC_VER /* Visual Studio */
+# include <intrin.h> /* For Visual 2005 */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4324) /* disable: C4324: padded structure */
+#endif
+
+
+/*-*************************************
+* Macros
+***************************************/
+#define ZSTDv06_isError ERR_isError /* for inlining */
+#define FSEv06_isError ERR_isError
+#define HUFv06_isError ERR_isError
+
+
+/*_*******************************************************
+* Memory operations
+**********************************************************/
+static void ZSTDv06_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+
+
+/*-*************************************************************
+* Context management
+***************************************************************/
+typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
+ ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock } ZSTDv06_dStage;
+
+struct ZSTDv06_DCtx_s
+{
+ FSEv06_DTable LLTable[FSEv06_DTABLE_SIZE_U32(LLFSELog)];
+ FSEv06_DTable OffTable[FSEv06_DTABLE_SIZE_U32(OffFSELog)];
+ FSEv06_DTable MLTable[FSEv06_DTABLE_SIZE_U32(MLFSELog)];
+ unsigned hufTableX4[HUFv06_DTABLE_SIZE(HufLog)];
+ const void* previousDstEnd;
+ const void* base;
+ const void* vBase;
+ const void* dictEnd;
+ size_t expected;
+ size_t headerSize;
+ ZSTDv06_frameParams fParams;
+ blockType_t bType; /* used in ZSTDv06_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
+ ZSTDv06_dStage stage;
+ U32 flagRepeatTable;
+ const BYTE* litPtr;
+ size_t litSize;
+ BYTE litBuffer[ZSTDv06_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
+ BYTE headerBuffer[ZSTDv06_FRAMEHEADERSIZE_MAX];
+}; /* typedef'd to ZSTDv06_DCtx within "zstd_static.h" */
+
+size_t ZSTDv06_sizeofDCtx (void); /* Hidden declaration */
+size_t ZSTDv06_sizeofDCtx (void) { return sizeof(ZSTDv06_DCtx); }
+
+size_t ZSTDv06_decompressBegin(ZSTDv06_DCtx* dctx)
+{
+ dctx->expected = ZSTDv06_frameHeaderSize_min;
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ dctx->previousDstEnd = NULL;
+ dctx->base = NULL;
+ dctx->vBase = NULL;
+ dctx->dictEnd = NULL;
+ dctx->hufTableX4[0] = HufLog;
+ dctx->flagRepeatTable = 0;
+ return 0;
+}
+
+ZSTDv06_DCtx* ZSTDv06_createDCtx(void)
+{
+ ZSTDv06_DCtx* dctx = (ZSTDv06_DCtx*)malloc(sizeof(ZSTDv06_DCtx));
+ if (dctx==NULL) return NULL;
+ ZSTDv06_decompressBegin(dctx);
+ return dctx;
+}
+
+size_t ZSTDv06_freeDCtx(ZSTDv06_DCtx* dctx)
+{
+ free(dctx);
+ return 0; /* reserved as a potential error code in the future */
+}
+
+void ZSTDv06_copyDCtx(ZSTDv06_DCtx* dstDCtx, const ZSTDv06_DCtx* srcDCtx)
+{
+ memcpy(dstDCtx, srcDCtx,
+ sizeof(ZSTDv06_DCtx) - (ZSTDv06_BLOCKSIZE_MAX+WILDCOPY_OVERLENGTH + ZSTDv06_frameHeaderSize_max)); /* no need to copy workspace */
+}
+
+
+/*-*************************************************************
+* Decompression section
+***************************************************************/
+
+/* Frame format description
+ Frame Header - [ Block Header - Block ] - Frame End
+ 1) Frame Header
+ - 4 bytes - Magic Number : ZSTDv06_MAGICNUMBER (defined within zstd_static.h)
+ - 1 byte - Frame Descriptor
+ 2) Block Header
+ - 3 bytes, starting with a 2-bits descriptor
+ Uncompressed, Compressed, Frame End, unused
+ 3) Block
+ See Block Format Description
+ 4) Frame End
+ - 3 bytes, compatible with Block Header
+*/
+
+
+/* Frame descriptor
+
+ 1 byte, using :
+ bit 0-3 : windowLog - ZSTDv06_WINDOWLOG_ABSOLUTEMIN (see zstd_internal.h)
+ bit 4 : minmatch 4(0) or 3(1)
+ bit 5 : reserved (must be zero)
+ bit 6-7 : Frame content size : unknown, 1 byte, 2 bytes, 8 bytes
+
+ Optional : content size (0, 1, 2 or 8 bytes)
+ 0 : unknown
+ 1 : 0-255 bytes
+ 2 : 256 - 65535+256
+ 8 : up to 16 exa
+*/
+
+
+/* Compressed Block, format description
+
+ Block = Literal Section - Sequences Section
+ Prerequisite : size of (compressed) block, maximum size of regenerated data
+
+ 1) Literal Section
+
+ 1.1) Header : 1-5 bytes
+ flags: 2 bits
+ 00 compressed by Huff0
+ 01 unused
+ 10 is Raw (uncompressed)
+ 11 is Rle
+ Note : using 01 => Huff0 with precomputed table ?
+ Note : delta map ? => compressed ?
+
+ 1.1.1) Huff0-compressed literal block : 3-5 bytes
+ srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
+ srcSize < 1 KB => 3 bytes (2-2-10-10)
+ srcSize < 16KB => 4 bytes (2-2-14-14)
+ else => 5 bytes (2-2-18-18)
+ big endian convention
+
+ 1.1.2) Raw (uncompressed) literal block header : 1-3 bytes
+ size : 5 bits: (IS_RAW<<6) + (0<<4) + size
+ 12 bits: (IS_RAW<<6) + (2<<4) + (size>>8)
+ size&255
+ 20 bits: (IS_RAW<<6) + (3<<4) + (size>>16)
+ size>>8&255
+ size&255
+
+ 1.1.3) Rle (repeated single byte) literal block header : 1-3 bytes
+ size : 5 bits: (IS_RLE<<6) + (0<<4) + size
+ 12 bits: (IS_RLE<<6) + (2<<4) + (size>>8)
+ size&255
+ 20 bits: (IS_RLE<<6) + (3<<4) + (size>>16)
+ size>>8&255
+ size&255
+
+ 1.1.4) Huff0-compressed literal block, using precomputed CTables : 3-5 bytes
+ srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
+ srcSize < 1 KB => 3 bytes (2-2-10-10)
+ srcSize < 16KB => 4 bytes (2-2-14-14)
+ else => 5 bytes (2-2-18-18)
+ big endian convention
+
+ 1- CTable available (stored into workspace ?)
+ 2- Small input (fast heuristic ? Full comparison ? depend on clevel ?)
+
+
+ 1.2) Literal block content
+
+ 1.2.1) Huff0 block, using sizes from header
+ See Huff0 format
+
+ 1.2.2) Huff0 block, using prepared table
+
+ 1.2.3) Raw content
+
+ 1.2.4) single byte
+
+
+ 2) Sequences section
+ TO DO
+*/
+
+/** ZSTDv06_frameHeaderSize() :
+* srcSize must be >= ZSTDv06_frameHeaderSize_min.
+* @return : size of the Frame Header */
+static size_t ZSTDv06_frameHeaderSize(const void* src, size_t srcSize)
+{
+ if (srcSize < ZSTDv06_frameHeaderSize_min) return ERROR(srcSize_wrong);
+ { U32 const fcsId = (((const BYTE*)src)[4]) >> 6;
+ return ZSTDv06_frameHeaderSize_min + ZSTDv06_fcs_fieldSize[fcsId]; }
+}
+
+
+/** ZSTDv06_getFrameParams() :
+* decode Frame Header, or provide expected `srcSize`.
+* @return : 0, `fparamsPtr` is correctly filled,
+* >0, `srcSize` is too small, result is expected `srcSize`,
+* or an error code, which can be tested using ZSTDv06_isError() */
+size_t ZSTDv06_getFrameParams(ZSTDv06_frameParams* fparamsPtr, const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+
+ if (srcSize < ZSTDv06_frameHeaderSize_min) return ZSTDv06_frameHeaderSize_min;
+ if (MEM_readLE32(src) != ZSTDv06_MAGICNUMBER) return ERROR(prefix_unknown);
+
+ /* ensure there is enough `srcSize` to fully read/decode frame header */
+ { size_t const fhsize = ZSTDv06_frameHeaderSize(src, srcSize);
+ if (srcSize < fhsize) return fhsize; }
+
+ memset(fparamsPtr, 0, sizeof(*fparamsPtr));
+ { BYTE const frameDesc = ip[4];
+ fparamsPtr->windowLog = (frameDesc & 0xF) + ZSTDv06_WINDOWLOG_ABSOLUTEMIN;
+ if ((frameDesc & 0x20) != 0) return ERROR(frameParameter_unsupported); /* reserved 1 bit */
+ switch(frameDesc >> 6) /* fcsId */
+ {
+ default: /* impossible */
+ case 0 : fparamsPtr->frameContentSize = 0; break;
+ case 1 : fparamsPtr->frameContentSize = ip[5]; break;
+ case 2 : fparamsPtr->frameContentSize = MEM_readLE16(ip+5)+256; break;
+ case 3 : fparamsPtr->frameContentSize = MEM_readLE64(ip+5); break;
+ } }
+ return 0;
+}
+
+
+/** ZSTDv06_decodeFrameHeader() :
+* `srcSize` must be the size provided by ZSTDv06_frameHeaderSize().
+* @return : 0 if success, or an error code, which can be tested using ZSTDv06_isError() */
+static size_t ZSTDv06_decodeFrameHeader(ZSTDv06_DCtx* zc, const void* src, size_t srcSize)
+{
+ size_t const result = ZSTDv06_getFrameParams(&(zc->fParams), src, srcSize);
+ if ((MEM_32bits()) && (zc->fParams.windowLog > 25)) return ERROR(frameParameter_unsupported);
+ return result;
+}
+
+
+typedef struct
+{
+ blockType_t blockType;
+ U32 origSize;
+} blockProperties_t;
+
+/*! ZSTDv06_getcBlockSize() :
+* Provides the size of compressed block from block header `src` */
+static size_t ZSTDv06_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+{
+ const BYTE* const in = (const BYTE* const)src;
+ U32 cSize;
+
+ if (srcSize < ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong);
+
+ bpPtr->blockType = (blockType_t)((*in) >> 6);
+ cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
+ bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
+
+ if (bpPtr->blockType == bt_end) return 0;
+ if (bpPtr->blockType == bt_rle) return 1;
+ return cSize;
+}
+
+
+static size_t ZSTDv06_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ if (dst==NULL) return ERROR(dstSize_tooSmall);
+ if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
+ memcpy(dst, src, srcSize);
+ return srcSize;
+}
+
+
+/*! ZSTDv06_decodeLiteralsBlock() :
+ @return : nb of bytes read from src (< srcSize ) */
+static size_t ZSTDv06_decodeLiteralsBlock(ZSTDv06_DCtx* dctx,
+ const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
+{
+ const BYTE* const istart = (const BYTE*) src;
+
+ /* any compressed block with literals segment must be at least this size */
+ if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
+
+ switch(istart[0]>> 6)
+ {
+ case IS_HUF:
+ { size_t litSize, litCSize, singleStream=0;
+ U32 lhSize = ((istart[0]) >> 4) & 3;
+ if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for lhSize, + cSize (+nbSeq) */
+ switch(lhSize)
+ {
+ case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */
+ /* 2 - 2 - 10 - 10 */
+ lhSize=3;
+ singleStream = istart[0] & 16;
+ litSize = ((istart[0] & 15) << 6) + (istart[1] >> 2);
+ litCSize = ((istart[1] & 3) << 8) + istart[2];
+ break;
+ case 2:
+ /* 2 - 2 - 14 - 14 */
+ lhSize=4;
+ litSize = ((istart[0] & 15) << 10) + (istart[1] << 2) + (istart[2] >> 6);
+ litCSize = ((istart[2] & 63) << 8) + istart[3];
+ break;
+ case 3:
+ /* 2 - 2 - 18 - 18 */
+ lhSize=5;
+ litSize = ((istart[0] & 15) << 14) + (istart[1] << 6) + (istart[2] >> 2);
+ litCSize = ((istart[2] & 3) << 16) + (istart[3] << 8) + istart[4];
+ break;
+ }
+ if (litSize > ZSTDv06_BLOCKSIZE_MAX) return ERROR(corruption_detected);
+ if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
+
+ if (HUFv06_isError(singleStream ?
+ HUFv06_decompress1X2(dctx->litBuffer, litSize, istart+lhSize, litCSize) :
+ HUFv06_decompress (dctx->litBuffer, litSize, istart+lhSize, litCSize) ))
+ return ERROR(corruption_detected);
+
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+ return litCSize + lhSize;
+ }
+ case IS_PCH:
+ { size_t litSize, litCSize;
+ U32 lhSize = ((istart[0]) >> 4) & 3;
+ if (lhSize != 1) /* only case supported for now : small litSize, single stream */
+ return ERROR(corruption_detected);
+ if (!dctx->flagRepeatTable)
+ return ERROR(dictionary_corrupted);
+
+ /* 2 - 2 - 10 - 10 */
+ lhSize=3;
+ litSize = ((istart[0] & 15) << 6) + (istart[1] >> 2);
+ litCSize = ((istart[1] & 3) << 8) + istart[2];
+ if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
+
+ { size_t const errorCode = HUFv06_decompress1X4_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->hufTableX4);
+ if (HUFv06_isError(errorCode)) return ERROR(corruption_detected);
+ }
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+ return litCSize + lhSize;
+ }
+ case IS_RAW:
+ { size_t litSize;
+ U32 lhSize = ((istart[0]) >> 4) & 3;
+ switch(lhSize)
+ {
+ case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */
+ lhSize=1;
+ litSize = istart[0] & 31;
+ break;
+ case 2:
+ litSize = ((istart[0] & 15) << 8) + istart[1];
+ break;
+ case 3:
+ litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
+ break;
+ }
+
+ if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
+ if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
+ memcpy(dctx->litBuffer, istart+lhSize, litSize);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+ return lhSize+litSize;
+ }
+ /* direct reference into compressed stream */
+ dctx->litPtr = istart+lhSize;
+ dctx->litSize = litSize;
+ return lhSize+litSize;
+ }
+ case IS_RLE:
+ { size_t litSize;
+ U32 lhSize = ((istart[0]) >> 4) & 3;
+ switch(lhSize)
+ {
+ case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */
+ lhSize = 1;
+ litSize = istart[0] & 31;
+ break;
+ case 2:
+ litSize = ((istart[0] & 15) << 8) + istart[1];
+ break;
+ case 3:
+ litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
+ if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
+ break;
+ }
+ if (litSize > ZSTDv06_BLOCKSIZE_MAX) return ERROR(corruption_detected);
+ memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ return lhSize+1;
+ }
+ default:
+ return ERROR(corruption_detected); /* impossible */
+ }
+}
+
+
+/*! ZSTDv06_buildSeqTable() :
+ @return : nb bytes read from src,
+ or an error code if it fails, testable with ZSTDv06_isError()
+*/
+static size_t ZSTDv06_buildSeqTable(FSEv06_DTable* DTable, U32 type, U32 max, U32 maxLog,
+ const void* src, size_t srcSize,
+ const S16* defaultNorm, U32 defaultLog, U32 flagRepeatTable)
+{
+ switch(type)
+ {
+ case FSEv06_ENCODING_RLE :
+ if (!srcSize) return ERROR(srcSize_wrong);
+ if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);
+ FSEv06_buildDTable_rle(DTable, *(const BYTE*)src); /* if *src > max, data is corrupted */
+ return 1;
+ case FSEv06_ENCODING_RAW :
+ FSEv06_buildDTable(DTable, defaultNorm, max, defaultLog);
+ return 0;
+ case FSEv06_ENCODING_STATIC:
+ if (!flagRepeatTable) return ERROR(corruption_detected);
+ return 0;
+ default : /* impossible */
+ case FSEv06_ENCODING_DYNAMIC :
+ { U32 tableLog;
+ S16 norm[MaxSeq+1];
+ size_t const headerSize = FSEv06_readNCount(norm, &max, &tableLog, src, srcSize);
+ if (FSEv06_isError(headerSize)) return ERROR(corruption_detected);
+ if (tableLog > maxLog) return ERROR(corruption_detected);
+ FSEv06_buildDTable(DTable, norm, max, tableLog);
+ return headerSize;
+ } }
+}
+
+
+static size_t ZSTDv06_decodeSeqHeaders(int* nbSeqPtr,
+ FSEv06_DTable* DTableLL, FSEv06_DTable* DTableML, FSEv06_DTable* DTableOffb, U32 flagRepeatTable,
+ const void* src, size_t srcSize)
+{
+ const BYTE* const istart = (const BYTE* const)src;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* ip = istart;
+
+ /* check */
+ if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
+
+ /* SeqHead */
+ { int nbSeq = *ip++;
+ if (!nbSeq) { *nbSeqPtr=0; return 1; }
+ if (nbSeq > 0x7F) {
+ if (nbSeq == 0xFF) {
+ if (ip+2 > iend) return ERROR(srcSize_wrong);
+ nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
+ } else {
+ if (ip >= iend) return ERROR(srcSize_wrong);
+ nbSeq = ((nbSeq-0x80)<<8) + *ip++;
+ }
+ }
+ *nbSeqPtr = nbSeq;
+ }
+
+ /* FSE table descriptors */
+ { U32 const LLtype = *ip >> 6;
+ U32 const Offtype = (*ip >> 4) & 3;
+ U32 const MLtype = (*ip >> 2) & 3;
+ ip++;
+
+ /* check */
+ if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
+
+ /* Build DTables */
+ { size_t const bhSize = ZSTDv06_buildSeqTable(DTableLL, LLtype, MaxLL, LLFSELog, ip, iend-ip, LL_defaultNorm, LL_defaultNormLog, flagRepeatTable);
+ if (ZSTDv06_isError(bhSize)) return ERROR(corruption_detected);
+ ip += bhSize;
+ }
+ { size_t const bhSize = ZSTDv06_buildSeqTable(DTableOffb, Offtype, MaxOff, OffFSELog, ip, iend-ip, OF_defaultNorm, OF_defaultNormLog, flagRepeatTable);
+ if (ZSTDv06_isError(bhSize)) return ERROR(corruption_detected);
+ ip += bhSize;
+ }
+ { size_t const bhSize = ZSTDv06_buildSeqTable(DTableML, MLtype, MaxML, MLFSELog, ip, iend-ip, ML_defaultNorm, ML_defaultNormLog, flagRepeatTable);
+ if (ZSTDv06_isError(bhSize)) return ERROR(corruption_detected);
+ ip += bhSize;
+ } }
+
+ return ip-istart;
+}
+
+
+typedef struct {
+ size_t litLength;
+ size_t matchLength;
+ size_t offset;
+} seq_t;
+
+typedef struct {
+ BITv06_DStream_t DStream;
+ FSEv06_DState_t stateLL;
+ FSEv06_DState_t stateOffb;
+ FSEv06_DState_t stateML;
+ size_t prevOffset[ZSTDv06_REP_INIT];
+} seqState_t;
+
+
+
+static void ZSTDv06_decodeSequence(seq_t* seq, seqState_t* seqState)
+{
+ /* Literal length */
+ U32 const llCode = FSEv06_peekSymbol(&(seqState->stateLL));
+ U32 const mlCode = FSEv06_peekSymbol(&(seqState->stateML));
+ U32 const ofCode = FSEv06_peekSymbol(&(seqState->stateOffb)); /* <= maxOff, by table construction */
+
+ U32 const llBits = LL_bits[llCode];
+ U32 const mlBits = ML_bits[mlCode];
+ U32 const ofBits = ofCode;
+ U32 const totalBits = llBits+mlBits+ofBits;
+
+ static const U32 LL_base[MaxLL+1] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
+ 0x2000, 0x4000, 0x8000, 0x10000 };
+
+ static const U32 ML_base[MaxML+1] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 34, 36, 38, 40, 44, 48, 56, 64, 80, 96, 0x80, 0x100, 0x200, 0x400, 0x800,
+ 0x1000, 0x2000, 0x4000, 0x8000, 0x10000 };
+
+ static const U32 OF_base[MaxOff+1] = {
+ 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F,
+ 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF,
+ 0xFFFF, 0x1FFFF, 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,
+ 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, /*fake*/ 1, 1 };
+
+ /* sequence */
+ { size_t offset;
+ if (!ofCode)
+ offset = 0;
+ else {
+ offset = OF_base[ofCode] + BITv06_readBits(&(seqState->DStream), ofBits); /* <= 26 bits */
+ if (MEM_32bits()) BITv06_reloadDStream(&(seqState->DStream));
+ }
+
+ if (offset < ZSTDv06_REP_NUM) {
+ if (llCode == 0 && offset <= 1) offset = 1-offset;
+
+ if (offset != 0) {
+ size_t temp = seqState->prevOffset[offset];
+ if (offset != 1) {
+ seqState->prevOffset[2] = seqState->prevOffset[1];
+ }
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset = temp;
+
+ } else {
+ offset = seqState->prevOffset[0];
+ }
+ } else {
+ offset -= ZSTDv06_REP_MOVE;
+ seqState->prevOffset[2] = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset;
+ }
+ seq->offset = offset;
+ }
+
+ seq->matchLength = ML_base[mlCode] + MINMATCH + ((mlCode>31) ? BITv06_readBits(&(seqState->DStream), mlBits) : 0); /* <= 16 bits */
+ if (MEM_32bits() && (mlBits+llBits>24)) BITv06_reloadDStream(&(seqState->DStream));
+
+ seq->litLength = LL_base[llCode] + ((llCode>15) ? BITv06_readBits(&(seqState->DStream), llBits) : 0); /* <= 16 bits */
+ if (MEM_32bits() ||
+ (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BITv06_reloadDStream(&(seqState->DStream));
+
+ /* ANS state update */
+ FSEv06_updateState(&(seqState->stateLL), &(seqState->DStream)); /* <= 9 bits */
+ FSEv06_updateState(&(seqState->stateML), &(seqState->DStream)); /* <= 9 bits */
+ if (MEM_32bits()) BITv06_reloadDStream(&(seqState->DStream)); /* <= 18 bits */
+ FSEv06_updateState(&(seqState->stateOffb), &(seqState->DStream)); /* <= 8 bits */
+}
+
+
+static size_t ZSTDv06_execSequence(BYTE* op,
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
+{
+ BYTE* const oLitEnd = op + sequence.litLength;
+ size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+ BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
+ BYTE* const oend_8 = oend-8;
+ const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+ const BYTE* match = oLitEnd - sequence.offset;
+
+ /* check */
+ if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */
+ if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
+ if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
+
+ /* copy Literals */
+ ZSTDv06_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
+ op = oLitEnd;
+ *litPtr = iLitEnd; /* update for next sequence */
+
+ /* copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - base)) {
+ /* offset beyond prefix */
+ if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
+ match = dictEnd - (base-match);
+ if (match + sequence.matchLength <= dictEnd) {
+ memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ { size_t const length1 = dictEnd - match;
+ memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = base;
+ if (op > oend_8 || sequence.matchLength < MINMATCH) {
+ while (op < oMatchEnd) *op++ = *match++;
+ return sequenceLength;
+ }
+ } }
+ /* Requirement: op <= oend_8 */
+
+ /* match within prefix */
+ if (sequence.offset < 8) {
+ /* close range match, overlap */
+ static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
+ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
+ int const sub2 = dec64table[sequence.offset];
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += dec32table[sequence.offset];
+ ZSTDv06_copy4(op+4, match);
+ match -= sub2;
+ } else {
+ ZSTDv06_copy8(op, match);
+ }
+ op += 8; match += 8;
+
+ if (oMatchEnd > oend-(16-MINMATCH)) {
+ if (op < oend_8) {
+ ZSTDv06_wildcopy(op, match, oend_8 - op);
+ match += oend_8 - op;
+ op = oend_8;
+ }
+ while (op < oMatchEnd) *op++ = *match++;
+ } else {
+ ZSTDv06_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
+ }
+ return sequenceLength;
+}
+
+
+static size_t ZSTDv06_decompressSequences(
+ ZSTDv06_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize)
+{
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* const oend = ostart + maxDstSize;
+ BYTE* op = ostart;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* const litEnd = litPtr + dctx->litSize;
+ FSEv06_DTable* DTableLL = dctx->LLTable;
+ FSEv06_DTable* DTableML = dctx->MLTable;
+ FSEv06_DTable* DTableOffb = dctx->OffTable;
+ const BYTE* const base = (const BYTE*) (dctx->base);
+ const BYTE* const vBase = (const BYTE*) (dctx->vBase);
+ const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+ int nbSeq;
+
+ /* Build Decoding Tables */
+ { size_t const seqHSize = ZSTDv06_decodeSeqHeaders(&nbSeq, DTableLL, DTableML, DTableOffb, dctx->flagRepeatTable, ip, seqSize);
+ if (ZSTDv06_isError(seqHSize)) return seqHSize;
+ ip += seqHSize;
+ dctx->flagRepeatTable = 0;
+ }
+
+ /* Regen sequences */
+ if (nbSeq) {
+ seq_t sequence;
+ seqState_t seqState;
+
+ memset(&sequence, 0, sizeof(sequence));
+ sequence.offset = REPCODE_STARTVALUE;
+ { U32 i; for (i=0; i<ZSTDv06_REP_INIT; i++) seqState.prevOffset[i] = REPCODE_STARTVALUE; }
+ { size_t const errorCode = BITv06_initDStream(&(seqState.DStream), ip, iend-ip);
+ if (ERR_isError(errorCode)) return ERROR(corruption_detected); }
+ FSEv06_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
+ FSEv06_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
+ FSEv06_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
+
+ for ( ; (BITv06_reloadDStream(&(seqState.DStream)) <= BITv06_DStream_completed) && nbSeq ; ) {
+ nbSeq--;
+ ZSTDv06_decodeSequence(&sequence, &seqState);
+
+#if 0 /* debug */
+ static BYTE* start = NULL;
+ if (start==NULL) start = op;
+ size_t pos = (size_t)(op-start);
+ if ((pos >= 5810037) && (pos < 5810400))
+ printf("Dpos %6u :%5u literals & match %3u bytes at distance %6u \n",
+ pos, (U32)sequence.litLength, (U32)sequence.matchLength, (U32)sequence.offset);
+#endif
+
+ { size_t const oneSeqSize = ZSTDv06_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
+ if (ZSTDv06_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+ } }
+
+ /* check if reached exact end */
+ if (nbSeq) return ERROR(corruption_detected);
+ }
+
+ /* last literal segment */
+ { size_t const lastLLSize = litEnd - litPtr;
+ if (litPtr > litEnd) return ERROR(corruption_detected); /* too many literals already used */
+ if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
+ memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+
+ return op-ostart;
+}
+
+
+static void ZSTDv06_checkContinuity(ZSTDv06_DCtx* dctx, const void* dst)
+{
+ if (dst != dctx->previousDstEnd) { /* not contiguous */
+ dctx->dictEnd = dctx->previousDstEnd;
+ dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
+ dctx->base = dst;
+ dctx->previousDstEnd = dst;
+ }
+}
+
+
+static size_t ZSTDv06_decompressBlock_internal(ZSTDv06_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{ /* blockType == blockCompressed */
+ const BYTE* ip = (const BYTE*)src;
+
+ if (srcSize >= ZSTDv06_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);
+
+ /* Decode literals sub-block */
+ { size_t const litCSize = ZSTDv06_decodeLiteralsBlock(dctx, src, srcSize);
+ if (ZSTDv06_isError(litCSize)) return litCSize;
+ ip += litCSize;
+ srcSize -= litCSize;
+ }
+ return ZSTDv06_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
+}
+
+
+size_t ZSTDv06_decompressBlock(ZSTDv06_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ ZSTDv06_checkContinuity(dctx, dst);
+ return ZSTDv06_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
+}
+
+
+/*! ZSTDv06_decompressFrame() :
+* `dctx` must be properly initialized */
+static size_t ZSTDv06_decompressFrame(ZSTDv06_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* const iend = ip + srcSize;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* op = ostart;
+ BYTE* const oend = ostart + dstCapacity;
+ size_t remainingSize = srcSize;
+ blockProperties_t blockProperties = { bt_compressed, 0 };
+
+ /* check */
+ if (srcSize < ZSTDv06_frameHeaderSize_min+ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong);
+
+ /* Frame Header */
+ { size_t const frameHeaderSize = ZSTDv06_frameHeaderSize(src, ZSTDv06_frameHeaderSize_min);
+ if (ZSTDv06_isError(frameHeaderSize)) return frameHeaderSize;
+ if (srcSize < frameHeaderSize+ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong);
+ if (ZSTDv06_decodeFrameHeader(dctx, src, frameHeaderSize)) return ERROR(corruption_detected);
+ ip += frameHeaderSize; remainingSize -= frameHeaderSize;
+ }
+
+ /* Loop on each block */
+ while (1) {
+ size_t decodedSize=0;
+ size_t const cBlockSize = ZSTDv06_getcBlockSize(ip, iend-ip, &blockProperties);
+ if (ZSTDv06_isError(cBlockSize)) return cBlockSize;
+
+ ip += ZSTDv06_blockHeaderSize;
+ remainingSize -= ZSTDv06_blockHeaderSize;
+ if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+ switch(blockProperties.blockType)
+ {
+ case bt_compressed:
+ decodedSize = ZSTDv06_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize);
+ break;
+ case bt_raw :
+ decodedSize = ZSTDv06_copyRawBlock(op, oend-op, ip, cBlockSize);
+ break;
+ case bt_rle :
+ return ERROR(GENERIC); /* not yet supported */
+ break;
+ case bt_end :
+ /* end of frame */
+ if (remainingSize) return ERROR(srcSize_wrong);
+ break;
+ default:
+ return ERROR(GENERIC); /* impossible */
+ }
+ if (cBlockSize == 0) break; /* bt_end */
+
+ if (ZSTDv06_isError(decodedSize)) return decodedSize;
+ op += decodedSize;
+ ip += cBlockSize;
+ remainingSize -= cBlockSize;
+ }
+
+ return op-ostart;
+}
+
+
+size_t ZSTDv06_decompress_usingPreparedDCtx(ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* refDCtx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ ZSTDv06_copyDCtx(dctx, refDCtx);
+ ZSTDv06_checkContinuity(dctx, dst);
+ return ZSTDv06_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
+}
+
+
+size_t ZSTDv06_decompress_usingDict(ZSTDv06_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize)
+{
+ ZSTDv06_decompressBegin_usingDict(dctx, dict, dictSize);
+ ZSTDv06_checkContinuity(dctx, dst);
+ return ZSTDv06_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
+}
+
+
+size_t ZSTDv06_decompressDCtx(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ return ZSTDv06_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
+}
+
+
+size_t ZSTDv06_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+#if defined(ZSTDv06_HEAPMODE) && (ZSTDv06_HEAPMODE==1)
+ size_t regenSize;
+ ZSTDv06_DCtx* dctx = ZSTDv06_createDCtx();
+ if (dctx==NULL) return ERROR(memory_allocation);
+ regenSize = ZSTDv06_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
+ ZSTDv06_freeDCtx(dctx);
+ return regenSize;
+#else /* stack mode */
+ ZSTDv06_DCtx dctx;
+ return ZSTDv06_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
+#endif
+}
+
+/* ZSTD_errorFrameSizeInfoLegacy() :
+ assumes `cSize` and `dBound` are _not_ NULL */
+static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
+{
+ *cSize = ret;
+ *dBound = ZSTD_CONTENTSIZE_ERROR;
+}
+
+void ZSTDv06_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
+{
+ const BYTE* ip = (const BYTE*)src;
+ size_t remainingSize = srcSize;
+ size_t nbBlocks = 0;
+ blockProperties_t blockProperties = { bt_compressed, 0 };
+
+ /* Frame Header */
+ { size_t const frameHeaderSize = ZSTDv06_frameHeaderSize(src, ZSTDv06_frameHeaderSize_min);
+ if (ZSTDv06_isError(frameHeaderSize)) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, frameHeaderSize);
+ return;
+ }
+ if (MEM_readLE32(src) != ZSTDv06_MAGICNUMBER) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
+ return;
+ }
+ if (srcSize < frameHeaderSize+ZSTDv06_blockHeaderSize) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+ return;
+ }
+ ip += frameHeaderSize; remainingSize -= frameHeaderSize;
+ }
+
+ /* Loop on each block */
+ while (1) {
+ size_t const cBlockSize = ZSTDv06_getcBlockSize(ip, remainingSize, &blockProperties);
+ if (ZSTDv06_isError(cBlockSize)) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
+ return;
+ }
+
+ ip += ZSTDv06_blockHeaderSize;
+ remainingSize -= ZSTDv06_blockHeaderSize;
+ if (cBlockSize > remainingSize) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+ return;
+ }
+
+ if (cBlockSize == 0) break; /* bt_end */
+
+ ip += cBlockSize;
+ remainingSize -= cBlockSize;
+ nbBlocks++;
+ }
+
+ *cSize = ip - (const BYTE*)src;
+ *dBound = nbBlocks * ZSTDv06_BLOCKSIZE_MAX;
+}
+
+/*_******************************
+* Streaming Decompression API
+********************************/
+size_t ZSTDv06_nextSrcSizeToDecompress(ZSTDv06_DCtx* dctx)
+{
+ return dctx->expected;
+}
+
+size_t ZSTDv06_decompressContinue(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ /* Sanity check */
+ if (srcSize != dctx->expected) return ERROR(srcSize_wrong);
+ if (dstCapacity) ZSTDv06_checkContinuity(dctx, dst);
+
+ /* Decompress : frame header; part 1 */
+ switch (dctx->stage)
+ {
+ case ZSTDds_getFrameHeaderSize :
+ if (srcSize != ZSTDv06_frameHeaderSize_min) return ERROR(srcSize_wrong); /* impossible */
+ dctx->headerSize = ZSTDv06_frameHeaderSize(src, ZSTDv06_frameHeaderSize_min);
+ if (ZSTDv06_isError(dctx->headerSize)) return dctx->headerSize;
+ memcpy(dctx->headerBuffer, src, ZSTDv06_frameHeaderSize_min);
+ if (dctx->headerSize > ZSTDv06_frameHeaderSize_min) {
+ dctx->expected = dctx->headerSize - ZSTDv06_frameHeaderSize_min;
+ dctx->stage = ZSTDds_decodeFrameHeader;
+ return 0;
+ }
+ dctx->expected = 0; /* not necessary to copy more */
+ /* fall-through */
+ case ZSTDds_decodeFrameHeader:
+ { size_t result;
+ memcpy(dctx->headerBuffer + ZSTDv06_frameHeaderSize_min, src, dctx->expected);
+ result = ZSTDv06_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize);
+ if (ZSTDv06_isError(result)) return result;
+ dctx->expected = ZSTDv06_blockHeaderSize;
+ dctx->stage = ZSTDds_decodeBlockHeader;
+ return 0;
+ }
+ case ZSTDds_decodeBlockHeader:
+ { blockProperties_t bp;
+ size_t const cBlockSize = ZSTDv06_getcBlockSize(src, ZSTDv06_blockHeaderSize, &bp);
+ if (ZSTDv06_isError(cBlockSize)) return cBlockSize;
+ if (bp.blockType == bt_end) {
+ dctx->expected = 0;
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ } else {
+ dctx->expected = cBlockSize;
+ dctx->bType = bp.blockType;
+ dctx->stage = ZSTDds_decompressBlock;
+ }
+ return 0;
+ }
+ case ZSTDds_decompressBlock:
+ { size_t rSize;
+ switch(dctx->bType)
+ {
+ case bt_compressed:
+ rSize = ZSTDv06_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
+ break;
+ case bt_raw :
+ rSize = ZSTDv06_copyRawBlock(dst, dstCapacity, src, srcSize);
+ break;
+ case bt_rle :
+ return ERROR(GENERIC); /* not yet handled */
+ break;
+ case bt_end : /* should never happen (filtered at phase 1) */
+ rSize = 0;
+ break;
+ default:
+ return ERROR(GENERIC); /* impossible */
+ }
+ dctx->stage = ZSTDds_decodeBlockHeader;
+ dctx->expected = ZSTDv06_blockHeaderSize;
+ dctx->previousDstEnd = (char*)dst + rSize;
+ return rSize;
+ }
+ default:
+ return ERROR(GENERIC); /* impossible */
+ }
+}
+
+
+static void ZSTDv06_refDictContent(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ dctx->dictEnd = dctx->previousDstEnd;
+ dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
+ dctx->base = dict;
+ dctx->previousDstEnd = (const char*)dict + dictSize;
+}
+
+static size_t ZSTDv06_loadEntropy(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ size_t hSize, offcodeHeaderSize, matchlengthHeaderSize, litlengthHeaderSize;
+
+ hSize = HUFv06_readDTableX4(dctx->hufTableX4, dict, dictSize);
+ if (HUFv06_isError(hSize)) return ERROR(dictionary_corrupted);
+ dict = (const char*)dict + hSize;
+ dictSize -= hSize;
+
+ { short offcodeNCount[MaxOff+1];
+ U32 offcodeMaxValue=MaxOff, offcodeLog;
+ offcodeHeaderSize = FSEv06_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dict, dictSize);
+ if (FSEv06_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
+ if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
+ { size_t const errorCode = FSEv06_buildDTable(dctx->OffTable, offcodeNCount, offcodeMaxValue, offcodeLog);
+ if (FSEv06_isError(errorCode)) return ERROR(dictionary_corrupted); }
+ dict = (const char*)dict + offcodeHeaderSize;
+ dictSize -= offcodeHeaderSize;
+ }
+
+ { short matchlengthNCount[MaxML+1];
+ unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+ matchlengthHeaderSize = FSEv06_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dict, dictSize);
+ if (FSEv06_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
+ if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
+ { size_t const errorCode = FSEv06_buildDTable(dctx->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog);
+ if (FSEv06_isError(errorCode)) return ERROR(dictionary_corrupted); }
+ dict = (const char*)dict + matchlengthHeaderSize;
+ dictSize -= matchlengthHeaderSize;
+ }
+
+ { short litlengthNCount[MaxLL+1];
+ unsigned litlengthMaxValue = MaxLL, litlengthLog;
+ litlengthHeaderSize = FSEv06_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dict, dictSize);
+ if (FSEv06_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
+ if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
+ { size_t const errorCode = FSEv06_buildDTable(dctx->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog);
+ if (FSEv06_isError(errorCode)) return ERROR(dictionary_corrupted); }
+ }
+
+ dctx->flagRepeatTable = 1;
+ return hSize + offcodeHeaderSize + matchlengthHeaderSize + litlengthHeaderSize;
+}
+
+static size_t ZSTDv06_decompress_insertDictionary(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ size_t eSize;
+ U32 const magic = MEM_readLE32(dict);
+ if (magic != ZSTDv06_DICT_MAGIC) {
+ /* pure content mode */
+ ZSTDv06_refDictContent(dctx, dict, dictSize);
+ return 0;
+ }
+ /* load entropy tables */
+ dict = (const char*)dict + 4;
+ dictSize -= 4;
+ eSize = ZSTDv06_loadEntropy(dctx, dict, dictSize);
+ if (ZSTDv06_isError(eSize)) return ERROR(dictionary_corrupted);
+
+ /* reference dictionary content */
+ dict = (const char*)dict + eSize;
+ dictSize -= eSize;
+ ZSTDv06_refDictContent(dctx, dict, dictSize);
+
+ return 0;
+}
+
+
+size_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ { size_t const errorCode = ZSTDv06_decompressBegin(dctx);
+ if (ZSTDv06_isError(errorCode)) return errorCode; }
+
+ if (dict && dictSize) {
+ size_t const errorCode = ZSTDv06_decompress_insertDictionary(dctx, dict, dictSize);
+ if (ZSTDv06_isError(errorCode)) return ERROR(dictionary_corrupted);
+ }
+
+ return 0;
+}
+
+/*
+ Buffered version of Zstd compression library
+ Copyright (C) 2015-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd homepage : http://www.zstd.net/
+*/
+
+
+/*-***************************************************************************
+* Streaming decompression howto
+*
+* A ZBUFFv06_DCtx object is required to track streaming operations.
+* Use ZBUFFv06_createDCtx() and ZBUFFv06_freeDCtx() to create/release resources.
+* Use ZBUFFv06_decompressInit() to start a new decompression operation,
+* or ZBUFFv06_decompressInitDictionary() if decompression requires a dictionary.
+* Note that ZBUFFv06_DCtx objects can be re-init multiple times.
+*
+* Use ZBUFFv06_decompressContinue() repetitively to consume your input.
+* *srcSizePtr and *dstCapacityPtr can be any size.
+* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
+* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
+* The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change @dst.
+* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),
+* or 0 when a frame is completely decoded,
+* or an error code, which can be tested using ZBUFFv06_isError().
+*
+* Hint : recommended buffer sizes (not compulsory) : ZBUFFv06_recommendedDInSize() and ZBUFFv06_recommendedDOutSize()
+* output : ZBUFFv06_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
+* input : ZBUFFv06_recommendedDInSize == 128KB + 3;
+* just follow indications from ZBUFFv06_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
+* *******************************************************************************/
+
+typedef enum { ZBUFFds_init, ZBUFFds_loadHeader,
+ ZBUFFds_read, ZBUFFds_load, ZBUFFds_flush } ZBUFFv06_dStage;
+
+/* *** Resource management *** */
+struct ZBUFFv06_DCtx_s {
+ ZSTDv06_DCtx* zd;
+ ZSTDv06_frameParams fParams;
+ ZBUFFv06_dStage stage;
+ char* inBuff;
+ size_t inBuffSize;
+ size_t inPos;
+ char* outBuff;
+ size_t outBuffSize;
+ size_t outStart;
+ size_t outEnd;
+ size_t blockSize;
+ BYTE headerBuffer[ZSTDv06_FRAMEHEADERSIZE_MAX];
+ size_t lhSize;
+}; /* typedef'd to ZBUFFv06_DCtx within "zstd_buffered.h" */
+
+
+ZBUFFv06_DCtx* ZBUFFv06_createDCtx(void)
+{
+ ZBUFFv06_DCtx* zbd = (ZBUFFv06_DCtx*)malloc(sizeof(ZBUFFv06_DCtx));
+ if (zbd==NULL) return NULL;
+ memset(zbd, 0, sizeof(*zbd));
+ zbd->zd = ZSTDv06_createDCtx();
+ zbd->stage = ZBUFFds_init;
+ return zbd;
+}
+
+size_t ZBUFFv06_freeDCtx(ZBUFFv06_DCtx* zbd)
+{
+ if (zbd==NULL) return 0; /* support free on null */
+ ZSTDv06_freeDCtx(zbd->zd);
+ free(zbd->inBuff);
+ free(zbd->outBuff);
+ free(zbd);
+ return 0;
+}
+
+
+/* *** Initialization *** */
+
+size_t ZBUFFv06_decompressInitDictionary(ZBUFFv06_DCtx* zbd, const void* dict, size_t dictSize)
+{
+ zbd->stage = ZBUFFds_loadHeader;
+ zbd->lhSize = zbd->inPos = zbd->outStart = zbd->outEnd = 0;
+ return ZSTDv06_decompressBegin_usingDict(zbd->zd, dict, dictSize);
+}
+
+size_t ZBUFFv06_decompressInit(ZBUFFv06_DCtx* zbd)
+{
+ return ZBUFFv06_decompressInitDictionary(zbd, NULL, 0);
+}
+
+
+
+MEM_STATIC size_t ZBUFFv06_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ size_t length = MIN(dstCapacity, srcSize);
+ memcpy(dst, src, length);
+ return length;
+}
+
+
+/* *** Decompression *** */
+
+size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* zbd,
+ void* dst, size_t* dstCapacityPtr,
+ const void* src, size_t* srcSizePtr)
+{
+ const char* const istart = (const char*)src;
+ const char* const iend = istart + *srcSizePtr;
+ const char* ip = istart;
+ char* const ostart = (char*)dst;
+ char* const oend = ostart + *dstCapacityPtr;
+ char* op = ostart;
+ U32 notDone = 1;
+
+ while (notDone) {
+ switch(zbd->stage)
+ {
+ case ZBUFFds_init :
+ return ERROR(init_missing);
+
+ case ZBUFFds_loadHeader :
+ { size_t const hSize = ZSTDv06_getFrameParams(&(zbd->fParams), zbd->headerBuffer, zbd->lhSize);
+ if (hSize != 0) {
+ size_t const toLoad = hSize - zbd->lhSize; /* if hSize!=0, hSize > zbd->lhSize */
+ if (ZSTDv06_isError(hSize)) return hSize;
+ if (toLoad > (size_t)(iend-ip)) { /* not enough input to load full header */
+ memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip);
+ zbd->lhSize += iend-ip;
+ *dstCapacityPtr = 0;
+ return (hSize - zbd->lhSize) + ZSTDv06_blockHeaderSize; /* remaining header bytes + next block header */
+ }
+ memcpy(zbd->headerBuffer + zbd->lhSize, ip, toLoad); zbd->lhSize = hSize; ip += toLoad;
+ break;
+ } }
+
+ /* Consume header */
+ { size_t const h1Size = ZSTDv06_nextSrcSizeToDecompress(zbd->zd); /* == ZSTDv06_frameHeaderSize_min */
+ size_t const h1Result = ZSTDv06_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer, h1Size);
+ if (ZSTDv06_isError(h1Result)) return h1Result;
+ if (h1Size < zbd->lhSize) { /* long header */
+ size_t const h2Size = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);
+ size_t const h2Result = ZSTDv06_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer+h1Size, h2Size);
+ if (ZSTDv06_isError(h2Result)) return h2Result;
+ } }
+
+ /* Frame header instruct buffer sizes */
+ { size_t const blockSize = MIN(1 << zbd->fParams.windowLog, ZSTDv06_BLOCKSIZE_MAX);
+ zbd->blockSize = blockSize;
+ if (zbd->inBuffSize < blockSize) {
+ free(zbd->inBuff);
+ zbd->inBuffSize = blockSize;
+ zbd->inBuff = (char*)malloc(blockSize);
+ if (zbd->inBuff == NULL) return ERROR(memory_allocation);
+ }
+ { size_t const neededOutSize = ((size_t)1 << zbd->fParams.windowLog) + blockSize + WILDCOPY_OVERLENGTH * 2;
+ if (zbd->outBuffSize < neededOutSize) {
+ free(zbd->outBuff);
+ zbd->outBuffSize = neededOutSize;
+ zbd->outBuff = (char*)malloc(neededOutSize);
+ if (zbd->outBuff == NULL) return ERROR(memory_allocation);
+ } } }
+ zbd->stage = ZBUFFds_read;
+ /* fall-through */
+ case ZBUFFds_read:
+ { size_t const neededInSize = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);
+ if (neededInSize==0) { /* end of frame */
+ zbd->stage = ZBUFFds_init;
+ notDone = 0;
+ break;
+ }
+ if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */
+ size_t const decodedSize = ZSTDv06_decompressContinue(zbd->zd,
+ zbd->outBuff + zbd->outStart, zbd->outBuffSize - zbd->outStart,
+ ip, neededInSize);
+ if (ZSTDv06_isError(decodedSize)) return decodedSize;
+ ip += neededInSize;
+ if (!decodedSize) break; /* this was just a header */
+ zbd->outEnd = zbd->outStart + decodedSize;
+ zbd->stage = ZBUFFds_flush;
+ break;
+ }
+ if (ip==iend) { notDone = 0; break; } /* no more input */
+ zbd->stage = ZBUFFds_load;
+ }
+ /* fall-through */
+ case ZBUFFds_load:
+ { size_t const neededInSize = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);
+ size_t const toLoad = neededInSize - zbd->inPos; /* should always be <= remaining space within inBuff */
+ size_t loadedSize;
+ if (toLoad > zbd->inBuffSize - zbd->inPos) return ERROR(corruption_detected); /* should never happen */
+ loadedSize = ZBUFFv06_limitCopy(zbd->inBuff + zbd->inPos, toLoad, ip, iend-ip);
+ ip += loadedSize;
+ zbd->inPos += loadedSize;
+ if (loadedSize < toLoad) { notDone = 0; break; } /* not enough input, wait for more */
+
+ /* decode loaded input */
+ { size_t const decodedSize = ZSTDv06_decompressContinue(zbd->zd,
+ zbd->outBuff + zbd->outStart, zbd->outBuffSize - zbd->outStart,
+ zbd->inBuff, neededInSize);
+ if (ZSTDv06_isError(decodedSize)) return decodedSize;
+ zbd->inPos = 0; /* input is consumed */
+ if (!decodedSize) { zbd->stage = ZBUFFds_read; break; } /* this was just a header */
+ zbd->outEnd = zbd->outStart + decodedSize;
+ zbd->stage = ZBUFFds_flush;
+ // break; /* ZBUFFds_flush follows */
+ }
+ }
+ /* fall-through */
+ case ZBUFFds_flush:
+ { size_t const toFlushSize = zbd->outEnd - zbd->outStart;
+ size_t const flushedSize = ZBUFFv06_limitCopy(op, oend-op, zbd->outBuff + zbd->outStart, toFlushSize);
+ op += flushedSize;
+ zbd->outStart += flushedSize;
+ if (flushedSize == toFlushSize) {
+ zbd->stage = ZBUFFds_read;
+ if (zbd->outStart + zbd->blockSize > zbd->outBuffSize)
+ zbd->outStart = zbd->outEnd = 0;
+ break;
+ }
+ /* cannot flush everything */
+ notDone = 0;
+ break;
+ }
+ default: return ERROR(GENERIC); /* impossible */
+ } }
+
+ /* result */
+ *srcSizePtr = ip-istart;
+ *dstCapacityPtr = op-ostart;
+ { size_t nextSrcSizeHint = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);
+ if (nextSrcSizeHint > ZSTDv06_blockHeaderSize) nextSrcSizeHint+= ZSTDv06_blockHeaderSize; /* get following block header too */
+ nextSrcSizeHint -= zbd->inPos; /* already loaded*/
+ return nextSrcSizeHint;
+ }
+}
+
+
+
+/* *************************************
+* Tool functions
+***************************************/
+size_t ZBUFFv06_recommendedDInSize(void) { return ZSTDv06_BLOCKSIZE_MAX + ZSTDv06_blockHeaderSize /* block header size*/ ; }
+size_t ZBUFFv06_recommendedDOutSize(void) { return ZSTDv06_BLOCKSIZE_MAX; }
diff --git a/vendor/github.com/DataDog/zstd/zstd_v06.h b/vendor/github.com/DataDog/zstd/zstd_v06.h
new file mode 100644
index 000000000..07818571d
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v06.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTDv06_H
+#define ZSTDv06_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*====== Dependency ======*/
+#include <stddef.h> /* size_t */
+
+
+/*====== Export for Windows ======*/
+/*!
+* ZSTDv06_DLL_EXPORT :
+* Enable exporting of functions when building a Windows DLL
+*/
+#if defined(_WIN32) && defined(ZSTDv06_DLL_EXPORT) && (ZSTDv06_DLL_EXPORT==1)
+# define ZSTDLIBv06_API __declspec(dllexport)
+#else
+# define ZSTDLIBv06_API
+#endif
+
+
+/* *************************************
+* Simple functions
+***************************************/
+/*! ZSTDv06_decompress() :
+ `compressedSize` : is the _exact_ size of the compressed blob, otherwise decompression will fail.
+ `dstCapacity` must be large enough, equal or larger than originalSize.
+ @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
+ or an errorCode if it fails (which can be tested using ZSTDv06_isError()) */
+ZSTDLIBv06_API size_t ZSTDv06_decompress( void* dst, size_t dstCapacity,
+ const void* src, size_t compressedSize);
+
+/**
+ZSTDv06_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.6.x format
+ srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+ cSize (output parameter) : the number of bytes that would be read to decompress this frame
+ or an error code if it fails (which can be tested using ZSTDv01_isError())
+ dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
+ or ZSTD_CONTENTSIZE_ERROR if an error occurs
+
+ note : assumes `cSize` and `dBound` are _not_ NULL.
+*/
+void ZSTDv06_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
+ size_t* cSize, unsigned long long* dBound);
+
+/* *************************************
+* Helper functions
+***************************************/
+ZSTDLIBv06_API size_t ZSTDv06_compressBound(size_t srcSize); /*!< maximum compressed size (worst case scenario) */
+
+/* Error Management */
+ZSTDLIBv06_API unsigned ZSTDv06_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
+ZSTDLIBv06_API const char* ZSTDv06_getErrorName(size_t code); /*!< provides readable string for an error code */
+
+
+/* *************************************
+* Explicit memory management
+***************************************/
+/** Decompression context */
+typedef struct ZSTDv06_DCtx_s ZSTDv06_DCtx;
+ZSTDLIBv06_API ZSTDv06_DCtx* ZSTDv06_createDCtx(void);
+ZSTDLIBv06_API size_t ZSTDv06_freeDCtx(ZSTDv06_DCtx* dctx); /*!< @return : errorCode */
+
+/** ZSTDv06_decompressDCtx() :
+* Same as ZSTDv06_decompress(), but requires an already allocated ZSTDv06_DCtx (see ZSTDv06_createDCtx()) */
+ZSTDLIBv06_API size_t ZSTDv06_decompressDCtx(ZSTDv06_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+/*-***********************
+* Dictionary API
+*************************/
+/*! ZSTDv06_decompress_usingDict() :
+* Decompression using a pre-defined Dictionary content (see dictBuilder).
+* Dictionary must be identical to the one used during compression, otherwise regenerated data will be corrupted.
+* Note : dict can be NULL, in which case, it's equivalent to ZSTDv06_decompressDCtx() */
+ZSTDLIBv06_API size_t ZSTDv06_decompress_usingDict(ZSTDv06_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize);
+
+
+/*-************************
+* Advanced Streaming API
+***************************/
+struct ZSTDv06_frameParams_s { unsigned long long frameContentSize; unsigned windowLog; };
+typedef struct ZSTDv06_frameParams_s ZSTDv06_frameParams;
+
+ZSTDLIBv06_API size_t ZSTDv06_getFrameParams(ZSTDv06_frameParams* fparamsPtr, const void* src, size_t srcSize); /**< doesn't consume input */
+ZSTDLIBv06_API size_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize);
+ZSTDLIBv06_API void ZSTDv06_copyDCtx(ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* preparedDCtx);
+
+ZSTDLIBv06_API size_t ZSTDv06_nextSrcSizeToDecompress(ZSTDv06_DCtx* dctx);
+ZSTDLIBv06_API size_t ZSTDv06_decompressContinue(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+
+/* *************************************
+* ZBUFF API
+***************************************/
+
+typedef struct ZBUFFv06_DCtx_s ZBUFFv06_DCtx;
+ZSTDLIBv06_API ZBUFFv06_DCtx* ZBUFFv06_createDCtx(void);
+ZSTDLIBv06_API size_t ZBUFFv06_freeDCtx(ZBUFFv06_DCtx* dctx);
+
+ZSTDLIBv06_API size_t ZBUFFv06_decompressInit(ZBUFFv06_DCtx* dctx);
+ZSTDLIBv06_API size_t ZBUFFv06_decompressInitDictionary(ZBUFFv06_DCtx* dctx, const void* dict, size_t dictSize);
+
+ZSTDLIBv06_API size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* dctx,
+ void* dst, size_t* dstCapacityPtr,
+ const void* src, size_t* srcSizePtr);
+
+/*-***************************************************************************
+* Streaming decompression howto
+*
+* A ZBUFFv06_DCtx object is required to track streaming operations.
+* Use ZBUFFv06_createDCtx() and ZBUFFv06_freeDCtx() to create/release resources.
+* Use ZBUFFv06_decompressInit() to start a new decompression operation,
+* or ZBUFFv06_decompressInitDictionary() if decompression requires a dictionary.
+* Note that ZBUFFv06_DCtx objects can be re-init multiple times.
+*
+* Use ZBUFFv06_decompressContinue() repetitively to consume your input.
+* *srcSizePtr and *dstCapacityPtr can be any size.
+* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
+* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
+* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.
+* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),
+* or 0 when a frame is completely decoded,
+* or an error code, which can be tested using ZBUFFv06_isError().
+*
+* Hint : recommended buffer sizes (not compulsory) : ZBUFFv06_recommendedDInSize() and ZBUFFv06_recommendedDOutSize()
+* output : ZBUFFv06_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
+* input : ZBUFFv06_recommendedDInSize == 128KB + 3;
+* just follow indications from ZBUFFv06_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
+* *******************************************************************************/
+
+
+/* *************************************
+* Tool functions
+***************************************/
+ZSTDLIBv06_API unsigned ZBUFFv06_isError(size_t errorCode);
+ZSTDLIBv06_API const char* ZBUFFv06_getErrorName(size_t errorCode);
+
+/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
+* These sizes are just hints, they tend to offer better latency */
+ZSTDLIBv06_API size_t ZBUFFv06_recommendedDInSize(void);
+ZSTDLIBv06_API size_t ZBUFFv06_recommendedDOutSize(void);
+
+
+/*-*************************************
+* Constants
+***************************************/
+#define ZSTDv06_MAGICNUMBER 0xFD2FB526 /* v0.6 */
+
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTDv06_BUFFERED_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_v07.c b/vendor/github.com/DataDog/zstd/zstd_v07.c
new file mode 100644
index 000000000..6b9488931
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v07.c
@@ -0,0 +1,4535 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/*- Dependencies -*/
+#include <stddef.h> /* size_t, ptrdiff_t */
+#include <string.h> /* memcpy */
+#include <stdlib.h> /* malloc, free, qsort */
+
+#ifndef XXH_STATIC_LINKING_ONLY
+# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
+#endif
+#include "xxhash.h" /* XXH64_* */
+#include "zstd_v07.h"
+
+#define FSEv07_STATIC_LINKING_ONLY /* FSEv07_MIN_TABLELOG */
+#define HUFv07_STATIC_LINKING_ONLY /* HUFv07_TABLELOG_ABSOLUTEMAX */
+#define ZSTDv07_STATIC_LINKING_ONLY
+
+#include "error_private.h"
+
+
+#ifdef ZSTDv07_STATIC_LINKING_ONLY
+
+/* ====================================================================================
+ * The definitions in this section are considered experimental.
+ * They should never be used with a dynamic library, as they may change in the future.
+ * They are provided for advanced usages.
+ * Use them only in association with static linking.
+ * ==================================================================================== */
+
+/*--- Constants ---*/
+#define ZSTDv07_MAGIC_SKIPPABLE_START 0x184D2A50U
+
+#define ZSTDv07_WINDOWLOG_MAX_32 25
+#define ZSTDv07_WINDOWLOG_MAX_64 27
+#define ZSTDv07_WINDOWLOG_MAX ((U32)(MEM_32bits() ? ZSTDv07_WINDOWLOG_MAX_32 : ZSTDv07_WINDOWLOG_MAX_64))
+#define ZSTDv07_WINDOWLOG_MIN 18
+#define ZSTDv07_CHAINLOG_MAX (ZSTDv07_WINDOWLOG_MAX+1)
+#define ZSTDv07_CHAINLOG_MIN 4
+#define ZSTDv07_HASHLOG_MAX ZSTDv07_WINDOWLOG_MAX
+#define ZSTDv07_HASHLOG_MIN 12
+#define ZSTDv07_HASHLOG3_MAX 17
+#define ZSTDv07_SEARCHLOG_MAX (ZSTDv07_WINDOWLOG_MAX-1)
+#define ZSTDv07_SEARCHLOG_MIN 1
+#define ZSTDv07_SEARCHLENGTH_MAX 7
+#define ZSTDv07_SEARCHLENGTH_MIN 3
+#define ZSTDv07_TARGETLENGTH_MIN 4
+#define ZSTDv07_TARGETLENGTH_MAX 999
+
+#define ZSTDv07_FRAMEHEADERSIZE_MAX 18 /* for static allocation */
+static const size_t ZSTDv07_frameHeaderSize_min = 5;
+static const size_t ZSTDv07_frameHeaderSize_max = ZSTDv07_FRAMEHEADERSIZE_MAX;
+static const size_t ZSTDv07_skippableHeaderSize = 8; /* magic number + skippable frame length */
+
+
+/* custom memory allocation functions */
+typedef void* (*ZSTDv07_allocFunction) (void* opaque, size_t size);
+typedef void (*ZSTDv07_freeFunction) (void* opaque, void* address);
+typedef struct { ZSTDv07_allocFunction customAlloc; ZSTDv07_freeFunction customFree; void* opaque; } ZSTDv07_customMem;
+
+
+/*--- Advanced Decompression functions ---*/
+
+/*! ZSTDv07_estimateDCtxSize() :
+ * Gives the potential amount of memory allocated to create a ZSTDv07_DCtx */
+ZSTDLIBv07_API size_t ZSTDv07_estimateDCtxSize(void);
+
+/*! ZSTDv07_createDCtx_advanced() :
+ * Create a ZSTD decompression context using external alloc and free functions */
+ZSTDLIBv07_API ZSTDv07_DCtx* ZSTDv07_createDCtx_advanced(ZSTDv07_customMem customMem);
+
+/*! ZSTDv07_sizeofDCtx() :
+ * Gives the amount of memory used by a given ZSTDv07_DCtx */
+ZSTDLIBv07_API size_t ZSTDv07_sizeofDCtx(const ZSTDv07_DCtx* dctx);
+
+
+/* ******************************************************************
+* Buffer-less streaming functions (synchronous mode)
+********************************************************************/
+
+ZSTDLIBv07_API size_t ZSTDv07_decompressBegin(ZSTDv07_DCtx* dctx);
+ZSTDLIBv07_API size_t ZSTDv07_decompressBegin_usingDict(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize);
+ZSTDLIBv07_API void ZSTDv07_copyDCtx(ZSTDv07_DCtx* dctx, const ZSTDv07_DCtx* preparedDCtx);
+
+ZSTDLIBv07_API size_t ZSTDv07_nextSrcSizeToDecompress(ZSTDv07_DCtx* dctx);
+ZSTDLIBv07_API size_t ZSTDv07_decompressContinue(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+/*
+ Buffer-less streaming decompression (synchronous mode)
+
+ A ZSTDv07_DCtx object is required to track streaming operations.
+ Use ZSTDv07_createDCtx() / ZSTDv07_freeDCtx() to manage it.
+ A ZSTDv07_DCtx object can be re-used multiple times.
+
+ First optional operation is to retrieve frame parameters, using ZSTDv07_getFrameParams(), which doesn't consume the input.
+ It can provide the minimum size of rolling buffer required to properly decompress data (`windowSize`),
+ and optionally the final size of uncompressed content.
+ (Note : content size is an optional info that may not be present. 0 means : content size unknown)
+ Frame parameters are extracted from the beginning of compressed frame.
+ The amount of data to read is variable, from ZSTDv07_frameHeaderSize_min to ZSTDv07_frameHeaderSize_max (so if `srcSize` >= ZSTDv07_frameHeaderSize_max, it will always work)
+ If `srcSize` is too small for operation to succeed, function will return the minimum size it requires to produce a result.
+ Result : 0 when successful, it means the ZSTDv07_frameParams structure has been filled.
+ >0 : means there is not enough data into `src`. Provides the expected size to successfully decode header.
+ errorCode, which can be tested using ZSTDv07_isError()
+
+ Start decompression, with ZSTDv07_decompressBegin() or ZSTDv07_decompressBegin_usingDict().
+ Alternatively, you can copy a prepared context, using ZSTDv07_copyDCtx().
+
+ Then use ZSTDv07_nextSrcSizeToDecompress() and ZSTDv07_decompressContinue() alternatively.
+ ZSTDv07_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTDv07_decompressContinue().
+ ZSTDv07_decompressContinue() requires this exact amount of bytes, or it will fail.
+
+ @result of ZSTDv07_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
+ It can be zero, which is not an error; it just means ZSTDv07_decompressContinue() has decoded some header.
+
+ ZSTDv07_decompressContinue() needs previous data blocks during decompression, up to `windowSize`.
+ They should preferably be located contiguously, prior to current block.
+ Alternatively, a round buffer of sufficient size is also possible. Sufficient size is determined by frame parameters.
+ ZSTDv07_decompressContinue() is very sensitive to contiguity,
+ if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
+ or that previous contiguous segment is large enough to properly handle maximum back-reference.
+
+ A frame is fully decoded when ZSTDv07_nextSrcSizeToDecompress() returns zero.
+ Context can then be reset to start a new decompression.
+
+
+ == Special case : skippable frames ==
+
+ Skippable frames allow the integration of user-defined data into a flow of concatenated frames.
+ Skippable frames will be ignored (skipped) by a decompressor. The format of skippable frame is following:
+ a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
+ b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
+ c) Frame Content - any content (User Data) of length equal to Frame Size
+ For skippable frames ZSTDv07_decompressContinue() always returns 0.
+ For skippable frames ZSTDv07_getFrameParams() returns fparamsPtr->windowLog==0 what means that a frame is skippable.
+ It also returns Frame Size as fparamsPtr->frameContentSize.
+*/
+
+
+/* **************************************
+* Block functions
+****************************************/
+/*! Block functions produce and decode raw zstd blocks, without frame metadata.
+ Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).
+ User will have to take in charge required information to regenerate data, such as compressed and content sizes.
+
+ A few rules to respect :
+ - Compressing and decompressing require a context structure
+ + Use ZSTDv07_createCCtx() and ZSTDv07_createDCtx()
+ - It is necessary to init context before starting
+ + compression : ZSTDv07_compressBegin()
+ + decompression : ZSTDv07_decompressBegin()
+ + variants _usingDict() are also allowed
+ + copyCCtx() and copyDCtx() work too
+ - Block size is limited, it must be <= ZSTDv07_getBlockSizeMax()
+ + If you need to compress more, cut data into multiple blocks
+ + Consider using the regular ZSTDv07_compress() instead, as frame metadata costs become negligible when source size is large.
+ - When a block is considered not compressible enough, ZSTDv07_compressBlock() result will be zero.
+ In which case, nothing is produced into `dst`.
+ + User must test for such outcome and deal directly with uncompressed data
+ + ZSTDv07_decompressBlock() doesn't accept uncompressed data as input !!!
+ + In case of multiple successive blocks, decoder must be informed of uncompressed block existence to follow proper history.
+ Use ZSTDv07_insertBlock() in such a case.
+*/
+
+#define ZSTDv07_BLOCKSIZE_ABSOLUTEMAX (128 * 1024) /* define, for static allocation */
+ZSTDLIBv07_API size_t ZSTDv07_decompressBlock(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIBv07_API size_t ZSTDv07_insertBlock(ZSTDv07_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert block into `dctx` history. Useful for uncompressed blocks */
+
+
+#endif /* ZSTDv07_STATIC_LINKING_ONLY */
+
+
+/* ******************************************************************
+ mem.h
+ low-level memory access routines
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef MEM_H_MODULE
+#define MEM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*-****************************************
+* Compiler specifics
+******************************************/
+#if defined(_MSC_VER) /* Visual Studio */
+# include <stdlib.h> /* _byteswap_ulong */
+# include <intrin.h> /* _byteswap_* */
+#endif
+#if defined(__GNUC__)
+# define MEM_STATIC static __attribute__((unused))
+#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# define MEM_STATIC static inline
+#elif defined(_MSC_VER)
+# define MEM_STATIC static __inline
+#else
+# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+
+/*-**************************************************************
+* Basic Types
+*****************************************************************/
+#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef int16_t S16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+ typedef int64_t S64;
+#else
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef signed short S16;
+ typedef unsigned int U32;
+ typedef signed int S32;
+ typedef unsigned long long U64;
+ typedef signed long long S64;
+#endif
+
+
+/*-**************************************************************
+* Memory I/O
+*****************************************************************/
+/* MEM_FORCE_MEMORY_ACCESS :
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ * It can generate buggy code on targets depending on alignment.
+ * In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
+# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+# define MEM_FORCE_MEMORY_ACCESS 2
+# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
+ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+# define MEM_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
+MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
+
+MEM_STATIC unsigned MEM_isLittleEndian(void)
+{
+ const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+
+#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
+
+/* violates C standard, by lying on structure alignment.
+Only use if no other choice to achieve best performance on target platform */
+MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
+MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
+MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+
+#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign;
+
+MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
+MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
+
+#else
+
+/* default method, safe and standard.
+ can sometimes prove slower */
+
+MEM_STATIC U16 MEM_read16(const void* memPtr)
+{
+ U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U32 MEM_read32(const void* memPtr)
+{
+ U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U64 MEM_read64(const void* memPtr)
+{
+ U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value)
+{
+ memcpy(memPtr, &value, sizeof(value));
+}
+
+#endif /* MEM_FORCE_MEMORY_ACCESS */
+
+MEM_STATIC U32 MEM_swap32(U32 in)
+{
+#if defined(_MSC_VER) /* Visual Studio */
+ return _byteswap_ulong(in);
+#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
+ return __builtin_bswap32(in);
+#else
+ return ((in << 24) & 0xff000000 ) |
+ ((in << 8) & 0x00ff0000 ) |
+ ((in >> 8) & 0x0000ff00 ) |
+ ((in >> 24) & 0x000000ff );
+#endif
+}
+
+MEM_STATIC U64 MEM_swap64(U64 in)
+{
+#if defined(_MSC_VER) /* Visual Studio */
+ return _byteswap_uint64(in);
+#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
+ return __builtin_bswap64(in);
+#else
+ return ((in << 56) & 0xff00000000000000ULL) |
+ ((in << 40) & 0x00ff000000000000ULL) |
+ ((in << 24) & 0x0000ff0000000000ULL) |
+ ((in << 8) & 0x000000ff00000000ULL) |
+ ((in >> 8) & 0x00000000ff000000ULL) |
+ ((in >> 24) & 0x0000000000ff0000ULL) |
+ ((in >> 40) & 0x000000000000ff00ULL) |
+ ((in >> 56) & 0x00000000000000ffULL);
+#endif
+}
+
+
+/*=== Little endian r/w ===*/
+
+MEM_STATIC U16 MEM_readLE16(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read16(memPtr);
+ else {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U16)(p[0] + (p[1]<<8));
+ }
+}
+
+MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
+{
+ if (MEM_isLittleEndian()) {
+ MEM_write16(memPtr, val);
+ } else {
+ BYTE* p = (BYTE*)memPtr;
+ p[0] = (BYTE)val;
+ p[1] = (BYTE)(val>>8);
+ }
+}
+
+MEM_STATIC U32 MEM_readLE32(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read32(memPtr);
+ else
+ return MEM_swap32(MEM_read32(memPtr));
+}
+
+
+MEM_STATIC U64 MEM_readLE64(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read64(memPtr);
+ else
+ return MEM_swap64(MEM_read64(memPtr));
+}
+
+MEM_STATIC size_t MEM_readLEST(const void* memPtr)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_readLE32(memPtr);
+ else
+ return (size_t)MEM_readLE64(memPtr);
+}
+
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* MEM_H_MODULE */
+/* ******************************************************************
+ bitstream
+ Part of FSE library
+ header file (to include)
+ Copyright (C) 2013-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef BITSTREAM_H_MODULE
+#define BITSTREAM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*
+* This API consists of small unitary functions, which must be inlined for best performance.
+* Since link-time-optimization is not available for all compilers,
+* these functions are defined into a .h to be included.
+*/
+
+
+/*=========================================
+* Target specific
+=========================================*/
+#if defined(__BMI__) && defined(__GNUC__)
+# include <immintrin.h> /* support for bextr (experimental) */
+#endif
+
+/*-********************************************
+* bitStream decoding API (read backward)
+**********************************************/
+typedef struct
+{
+ size_t bitContainer;
+ unsigned bitsConsumed;
+ const char* ptr;
+ const char* start;
+} BITv07_DStream_t;
+
+typedef enum { BITv07_DStream_unfinished = 0,
+ BITv07_DStream_endOfBuffer = 1,
+ BITv07_DStream_completed = 2,
+ BITv07_DStream_overflow = 3 } BITv07_DStream_status; /* result of BITv07_reloadDStream() */
+ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+
+MEM_STATIC size_t BITv07_initDStream(BITv07_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
+MEM_STATIC size_t BITv07_readBits(BITv07_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BITv07_DStream_status BITv07_reloadDStream(BITv07_DStream_t* bitD);
+MEM_STATIC unsigned BITv07_endOfDStream(const BITv07_DStream_t* bitD);
+
+
+
+/*-****************************************
+* unsafe API
+******************************************/
+MEM_STATIC size_t BITv07_readBitsFast(BITv07_DStream_t* bitD, unsigned nbBits);
+/* faster, but works only if nbBits >= 1 */
+
+
+
+/*-**************************************************************
+* Internal functions
+****************************************************************/
+MEM_STATIC unsigned BITv07_highbit32 (U32 val)
+{
+# if defined(_MSC_VER) /* Visual */
+ unsigned long r=0;
+ _BitScanReverse ( &r, val );
+ return (unsigned) r;
+# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
+ return 31 - __builtin_clz (val);
+# else /* Software version */
+ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+# endif
+}
+
+
+
+/*-********************************************************
+* bitStream decoding
+**********************************************************/
+/*! BITv07_initDStream() :
+* Initialize a BITv07_DStream_t.
+* `bitD` : a pointer to an already allocated BITv07_DStream_t structure.
+* `srcSize` must be the *exact* size of the bitStream, in bytes.
+* @return : size of stream (== srcSize) or an errorCode if a problem is detected
+*/
+MEM_STATIC size_t BITv07_initDStream(BITv07_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+ if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
+
+ if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
+ bitD->start = (const char*)srcBuffer;
+ bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+ bitD->bitsConsumed = lastByte ? 8 - BITv07_highbit32(lastByte) : 0;
+ if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
+ } else {
+ bitD->start = (const char*)srcBuffer;
+ bitD->ptr = bitD->start;
+ bitD->bitContainer = *(const BYTE*)(bitD->start);
+ switch(srcSize)
+ {
+ case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);/* fall-through */
+ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);/* fall-through */
+ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);/* fall-through */
+ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */
+ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */
+ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; /* fall-through */
+ default: break;
+ }
+ { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+ bitD->bitsConsumed = lastByte ? 8 - BITv07_highbit32(lastByte) : 0;
+ if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
+ bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
+ }
+
+ return srcSize;
+}
+
+
+ MEM_STATIC size_t BITv07_lookBits(const BITv07_DStream_t* bitD, U32 nbBits)
+{
+ U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
+ return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
+}
+
+/*! BITv07_lookBitsFast() :
+* unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BITv07_lookBitsFast(const BITv07_DStream_t* bitD, U32 nbBits)
+{
+ U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
+ return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
+}
+
+MEM_STATIC void BITv07_skipBits(BITv07_DStream_t* bitD, U32 nbBits)
+{
+ bitD->bitsConsumed += nbBits;
+}
+
+MEM_STATIC size_t BITv07_readBits(BITv07_DStream_t* bitD, U32 nbBits)
+{
+ size_t const value = BITv07_lookBits(bitD, nbBits);
+ BITv07_skipBits(bitD, nbBits);
+ return value;
+}
+
+/*! BITv07_readBitsFast() :
+* unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BITv07_readBitsFast(BITv07_DStream_t* bitD, U32 nbBits)
+{
+ size_t const value = BITv07_lookBitsFast(bitD, nbBits);
+ BITv07_skipBits(bitD, nbBits);
+ return value;
+}
+
+MEM_STATIC BITv07_DStream_status BITv07_reloadDStream(BITv07_DStream_t* bitD)
+{
+ if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should not happen => corruption detected */
+ return BITv07_DStream_overflow;
+
+ if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
+ bitD->ptr -= bitD->bitsConsumed >> 3;
+ bitD->bitsConsumed &= 7;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ return BITv07_DStream_unfinished;
+ }
+ if (bitD->ptr == bitD->start) {
+ if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BITv07_DStream_endOfBuffer;
+ return BITv07_DStream_completed;
+ }
+ { U32 nbBytes = bitD->bitsConsumed >> 3;
+ BITv07_DStream_status result = BITv07_DStream_unfinished;
+ if (bitD->ptr - nbBytes < bitD->start) {
+ nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
+ result = BITv07_DStream_endOfBuffer;
+ }
+ bitD->ptr -= nbBytes;
+ bitD->bitsConsumed -= nbBytes*8;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
+ return result;
+ }
+}
+
+/*! BITv07_endOfDStream() :
+* @return Tells if DStream has exactly reached its end (all bits consumed).
+*/
+MEM_STATIC unsigned BITv07_endOfDStream(const BITv07_DStream_t* DStream)
+{
+ return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* BITSTREAM_H_MODULE */
+/* ******************************************************************
+ FSE : Finite State Entropy codec
+ Public Prototypes declaration
+ Copyright (C) 2013-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef FSEv07_H
+#define FSEv07_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+/*-****************************************
+* FSE simple functions
+******************************************/
+
+/*! FSEv07_decompress():
+ Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
+ into already allocated destination buffer 'dst', of size 'dstCapacity'.
+ @return : size of regenerated data (<= maxDstSize),
+ or an error code, which can be tested using FSEv07_isError() .
+
+ ** Important ** : FSEv07_decompress() does not decompress non-compressible nor RLE data !!!
+ Why ? : making this distinction requires a header.
+ Header management is intentionally delegated to the user layer, which can better manage special cases.
+*/
+size_t FSEv07_decompress(void* dst, size_t dstCapacity,
+ const void* cSrc, size_t cSrcSize);
+
+
+/* Error Management */
+unsigned FSEv07_isError(size_t code); /* tells if a return value is an error code */
+const char* FSEv07_getErrorName(size_t code); /* provides error code string (useful for debugging) */
+
+
+/*-*****************************************
+* FSE detailed API
+******************************************/
+/*!
+FSEv07_decompress() does the following:
+1. read normalized counters with readNCount()
+2. build decoding table 'DTable' from normalized counters
+3. decode the data stream using decoding table 'DTable'
+
+The following API allows targeting specific sub-functions for advanced tasks.
+For example, it's possible to compress several blocks using the same 'CTable',
+or to save and provide normalized distribution using external method.
+*/
+
+
+/* *** DECOMPRESSION *** */
+
+/*! FSEv07_readNCount():
+ Read compactly saved 'normalizedCounter' from 'rBuffer'.
+ @return : size read from 'rBuffer',
+ or an errorCode, which can be tested using FSEv07_isError().
+ maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
+size_t FSEv07_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
+
+/*! Constructor and Destructor of FSEv07_DTable.
+ Note that its size depends on 'tableLog' */
+typedef unsigned FSEv07_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
+FSEv07_DTable* FSEv07_createDTable(unsigned tableLog);
+void FSEv07_freeDTable(FSEv07_DTable* dt);
+
+/*! FSEv07_buildDTable():
+ Builds 'dt', which must be already allocated, using FSEv07_createDTable().
+ return : 0, or an errorCode, which can be tested using FSEv07_isError() */
+size_t FSEv07_buildDTable (FSEv07_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSEv07_decompress_usingDTable():
+ Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
+ into `dst` which must be already allocated.
+ @return : size of regenerated data (necessarily <= `dstCapacity`),
+ or an errorCode, which can be tested using FSEv07_isError() */
+size_t FSEv07_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSEv07_DTable* dt);
+
+/*!
+Tutorial :
+----------
+(Note : these functions only decompress FSE-compressed blocks.
+ If block is uncompressed, use memcpy() instead
+ If block is a single repeated byte, use memset() instead )
+
+The first step is to obtain the normalized frequencies of symbols.
+This can be performed by FSEv07_readNCount() if it was saved using FSEv07_writeNCount().
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
+In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
+or size the table to handle worst case situations (typically 256).
+FSEv07_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
+The result of FSEv07_readNCount() is the number of bytes read from 'rBuffer'.
+Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
+If there is an error, the function will return an error code, which can be tested using FSEv07_isError().
+
+The next step is to build the decompression tables 'FSEv07_DTable' from 'normalizedCounter'.
+This is performed by the function FSEv07_buildDTable().
+The space required by 'FSEv07_DTable' must be already allocated using FSEv07_createDTable().
+If there is an error, the function will return an error code, which can be tested using FSEv07_isError().
+
+`FSEv07_DTable` can then be used to decompress `cSrc`, with FSEv07_decompress_usingDTable().
+`cSrcSize` must be strictly correct, otherwise decompression will fail.
+FSEv07_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
+If there is an error, the function will return an error code, which can be tested using FSEv07_isError(). (ex: dst buffer too small)
+*/
+
+
+#ifdef FSEv07_STATIC_LINKING_ONLY
+
+
+/* *****************************************
+* Static allocation
+*******************************************/
+/* FSE buffer bounds */
+#define FSEv07_NCOUNTBOUND 512
+#define FSEv07_BLOCKBOUND(size) (size + (size>>7))
+
+/* It is possible to statically allocate FSE CTable/DTable as a table of unsigned using below macros */
+#define FSEv07_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
+
+
+/* *****************************************
+* FSE advanced API
+*******************************************/
+size_t FSEv07_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
+/**< same as FSEv07_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr */
+
+unsigned FSEv07_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
+/**< same as FSEv07_optimalTableLog(), which used `minus==2` */
+
+size_t FSEv07_buildDTable_raw (FSEv07_DTable* dt, unsigned nbBits);
+/**< build a fake FSEv07_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
+
+size_t FSEv07_buildDTable_rle (FSEv07_DTable* dt, unsigned char symbolValue);
+/**< build a fake FSEv07_DTable, designed to always generate the same symbolValue */
+
+
+
+/* *****************************************
+* FSE symbol decompression API
+*******************************************/
+typedef struct
+{
+ size_t state;
+ const void* table; /* precise table may vary, depending on U16 */
+} FSEv07_DState_t;
+
+
+static void FSEv07_initDState(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD, const FSEv07_DTable* dt);
+
+static unsigned char FSEv07_decodeSymbol(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD);
+
+
+
+/* *****************************************
+* FSE unsafe API
+*******************************************/
+static unsigned char FSEv07_decodeSymbolFast(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD);
+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
+
+
+/* ====== Decompression ====== */
+
+typedef struct {
+ U16 tableLog;
+ U16 fastMode;
+} FSEv07_DTableHeader; /* sizeof U32 */
+
+typedef struct
+{
+ unsigned short newState;
+ unsigned char symbol;
+ unsigned char nbBits;
+} FSEv07_decode_t; /* size == U32 */
+
+MEM_STATIC void FSEv07_initDState(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD, const FSEv07_DTable* dt)
+{
+ const void* ptr = dt;
+ const FSEv07_DTableHeader* const DTableH = (const FSEv07_DTableHeader*)ptr;
+ DStatePtr->state = BITv07_readBits(bitD, DTableH->tableLog);
+ BITv07_reloadDStream(bitD);
+ DStatePtr->table = dt + 1;
+}
+
+MEM_STATIC BYTE FSEv07_peekSymbol(const FSEv07_DState_t* DStatePtr)
+{
+ FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ return DInfo.symbol;
+}
+
+MEM_STATIC void FSEv07_updateState(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD)
+{
+ FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ size_t const lowBits = BITv07_readBits(bitD, nbBits);
+ DStatePtr->state = DInfo.newState + lowBits;
+}
+
+MEM_STATIC BYTE FSEv07_decodeSymbol(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD)
+{
+ FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ BYTE const symbol = DInfo.symbol;
+ size_t const lowBits = BITv07_readBits(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+/*! FSEv07_decodeSymbolFast() :
+ unsafe, only works if no symbol has a probability > 50% */
+MEM_STATIC BYTE FSEv07_decodeSymbolFast(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD)
+{
+ FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ BYTE const symbol = DInfo.symbol;
+ size_t const lowBits = BITv07_readBitsFast(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+
+
+#ifndef FSEv07_COMMONDEFS_ONLY
+
+/* **************************************************************
+* Tuning parameters
+****************************************************************/
+/*!MEMORY_USAGE :
+* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+* Increasing memory usage improves compression ratio
+* Reduced memory usage can improve speed, due to cache effect
+* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#define FSEv07_MAX_MEMORY_USAGE 14
+#define FSEv07_DEFAULT_MEMORY_USAGE 13
+
+/*!FSEv07_MAX_SYMBOL_VALUE :
+* Maximum symbol value authorized.
+* Required for proper stack allocation */
+#define FSEv07_MAX_SYMBOL_VALUE 255
+
+
+/* **************************************************************
+* template functions type & suffix
+****************************************************************/
+#define FSEv07_FUNCTION_TYPE BYTE
+#define FSEv07_FUNCTION_EXTENSION
+#define FSEv07_DECODE_TYPE FSEv07_decode_t
+
+
+#endif /* !FSEv07_COMMONDEFS_ONLY */
+
+
+/* ***************************************************************
+* Constants
+*****************************************************************/
+#define FSEv07_MAX_TABLELOG (FSEv07_MAX_MEMORY_USAGE-2)
+#define FSEv07_MAX_TABLESIZE (1U<<FSEv07_MAX_TABLELOG)
+#define FSEv07_MAXTABLESIZE_MASK (FSEv07_MAX_TABLESIZE-1)
+#define FSEv07_DEFAULT_TABLELOG (FSEv07_DEFAULT_MEMORY_USAGE-2)
+#define FSEv07_MIN_TABLELOG 5
+
+#define FSEv07_TABLELOG_ABSOLUTE_MAX 15
+#if FSEv07_MAX_TABLELOG > FSEv07_TABLELOG_ABSOLUTE_MAX
+# error "FSEv07_MAX_TABLELOG > FSEv07_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+#define FSEv07_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3)
+
+
+#endif /* FSEv07_STATIC_LINKING_ONLY */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* FSEv07_H */
+/* ******************************************************************
+ Huffman coder, part of New Generation Entropy library
+ header file
+ Copyright (C) 2013-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef HUFv07_H_298734234
+#define HUFv07_H_298734234
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+/* *** simple functions *** */
+/**
+HUFv07_decompress() :
+ Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
+ into already allocated buffer 'dst', of minimum size 'dstSize'.
+ `dstSize` : **must** be the ***exact*** size of original (uncompressed) data.
+ Note : in contrast with FSE, HUFv07_decompress can regenerate
+ RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
+ because it knows size to regenerate.
+ @return : size of regenerated data (== dstSize),
+ or an error code, which can be tested using HUFv07_isError()
+*/
+size_t HUFv07_decompress(void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize);
+
+
+/* ****************************************
+* Tool functions
+******************************************/
+#define HUFv07_BLOCKSIZE_MAX (128 * 1024)
+
+/* Error Management */
+unsigned HUFv07_isError(size_t code); /**< tells if a return value is an error code */
+const char* HUFv07_getErrorName(size_t code); /**< provides error code string (useful for debugging) */
+
+
+/* *** Advanced function *** */
+
+
+#ifdef HUFv07_STATIC_LINKING_ONLY
+
+
+/* *** Constants *** */
+#define HUFv07_TABLELOG_ABSOLUTEMAX 16 /* absolute limit of HUFv07_MAX_TABLELOG. Beyond that value, code does not work */
+#define HUFv07_TABLELOG_MAX 12 /* max configured tableLog (for static allocation); can be modified up to HUFv07_ABSOLUTEMAX_TABLELOG */
+#define HUFv07_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */
+#define HUFv07_SYMBOLVALUE_MAX 255
+#if (HUFv07_TABLELOG_MAX > HUFv07_TABLELOG_ABSOLUTEMAX)
+# error "HUFv07_TABLELOG_MAX is too large !"
+#endif
+
+
+/* ****************************************
+* Static allocation
+******************************************/
+/* HUF buffer bounds */
+#define HUFv07_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */
+
+/* static allocation of HUF's DTable */
+typedef U32 HUFv07_DTable;
+#define HUFv07_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog)))
+#define HUFv07_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+ HUFv07_DTable DTable[HUFv07_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1)*0x1000001) }
+#define HUFv07_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
+ HUFv07_DTable DTable[HUFv07_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog)*0x1000001) }
+
+
+/* ****************************************
+* Advanced decompression functions
+******************************************/
+size_t HUFv07_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
+size_t HUFv07_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
+
+size_t HUFv07_decompress4X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */
+size_t HUFv07_decompress4X_hufOnly(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
+size_t HUFv07_decompress4X2_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
+size_t HUFv07_decompress4X4_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
+
+size_t HUFv07_decompress1X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+size_t HUFv07_decompress1X2_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
+size_t HUFv07_decompress1X4_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
+
+
+/* ****************************************
+* HUF detailed API
+******************************************/
+/*!
+The following API allows targeting specific sub-functions for advanced tasks.
+For example, it's possible to compress several blocks using the same 'CTable',
+or to save and regenerate 'CTable' using external methods.
+*/
+/* FSEv07_count() : find it within "fse.h" */
+
+/*! HUFv07_readStats() :
+ Read compact Huffman tree, saved by HUFv07_writeCTable().
+ `huffWeight` is destination buffer.
+ @return : size read from `src` , or an error Code .
+ Note : Needed by HUFv07_readCTable() and HUFv07_readDTableXn() . */
+size_t HUFv07_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize);
+
+
+/*
+HUFv07_decompress() does the following:
+1. select the decompression algorithm (X2, X4) based on pre-computed heuristics
+2. build Huffman table from save, using HUFv07_readDTableXn()
+3. decode 1 or 4 segments in parallel using HUFv07_decompressSXn_usingDTable
+*/
+
+/** HUFv07_selectDecoder() :
+* Tells which decoder is likely to decode faster,
+* based on a set of pre-determined metrics.
+* @return : 0==HUFv07_decompress4X2, 1==HUFv07_decompress4X4 .
+* Assumption : 0 < cSrcSize < dstSize <= 128 KB */
+U32 HUFv07_selectDecoder (size_t dstSize, size_t cSrcSize);
+
+size_t HUFv07_readDTableX2 (HUFv07_DTable* DTable, const void* src, size_t srcSize);
+size_t HUFv07_readDTableX4 (HUFv07_DTable* DTable, const void* src, size_t srcSize);
+
+size_t HUFv07_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
+size_t HUFv07_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
+size_t HUFv07_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
+
+
+/* single stream variants */
+size_t HUFv07_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
+size_t HUFv07_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
+
+size_t HUFv07_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
+size_t HUFv07_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
+size_t HUFv07_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
+
+
+#endif /* HUFv07_STATIC_LINKING_ONLY */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* HUFv07_H_298734234 */
+/*
+ Common functions of New Generation Entropy library
+ Copyright (C) 2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+*************************************************************************** */
+
+
+
+/*-****************************************
+* FSE Error Management
+******************************************/
+unsigned FSEv07_isError(size_t code) { return ERR_isError(code); }
+
+const char* FSEv07_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/* **************************************************************
+* HUF Error Management
+****************************************************************/
+unsigned HUFv07_isError(size_t code) { return ERR_isError(code); }
+
+const char* HUFv07_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/*-**************************************************************
+* FSE NCount encoding-decoding
+****************************************************************/
+static short FSEv07_abs(short a) { return (short)(a<0 ? -a : a); }
+
+size_t FSEv07_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ const BYTE* const istart = (const BYTE*) headerBuffer;
+ const BYTE* const iend = istart + hbSize;
+ const BYTE* ip = istart;
+ int nbBits;
+ int remaining;
+ int threshold;
+ U32 bitStream;
+ int bitCount;
+ unsigned charnum = 0;
+ int previous0 = 0;
+
+ if (hbSize < 4) return ERROR(srcSize_wrong);
+ bitStream = MEM_readLE32(ip);
+ nbBits = (bitStream & 0xF) + FSEv07_MIN_TABLELOG; /* extract tableLog */
+ if (nbBits > FSEv07_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
+ bitStream >>= 4;
+ bitCount = 4;
+ *tableLogPtr = nbBits;
+ remaining = (1<<nbBits)+1;
+ threshold = 1<<nbBits;
+ nbBits++;
+
+ while ((remaining>1) && (charnum<=*maxSVPtr)) {
+ if (previous0) {
+ unsigned n0 = charnum;
+ while ((bitStream & 0xFFFF) == 0xFFFF) {
+ n0+=24;
+ if (ip < iend-5) {
+ ip+=2;
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ } else {
+ bitStream >>= 16;
+ bitCount+=16;
+ } }
+ while ((bitStream & 3) == 3) {
+ n0+=3;
+ bitStream>>=2;
+ bitCount+=2;
+ }
+ n0 += bitStream & 3;
+ bitCount += 2;
+ if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
+ while (charnum < n0) normalizedCounter[charnum++] = 0;
+ if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+ ip += bitCount>>3;
+ bitCount &= 7;
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ }
+ else
+ bitStream >>= 2;
+ }
+ { short const max = (short)((2*threshold-1)-remaining);
+ short count;
+
+ if ((bitStream & (threshold-1)) < (U32)max) {
+ count = (short)(bitStream & (threshold-1));
+ bitCount += nbBits-1;
+ } else {
+ count = (short)(bitStream & (2*threshold-1));
+ if (count >= threshold) count -= max;
+ bitCount += nbBits;
+ }
+
+ count--; /* extra accuracy */
+ remaining -= FSEv07_abs(count);
+ normalizedCounter[charnum++] = count;
+ previous0 = !count;
+ while (remaining < threshold) {
+ nbBits--;
+ threshold >>= 1;
+ }
+
+ if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+ ip += bitCount>>3;
+ bitCount &= 7;
+ } else {
+ bitCount -= (int)(8 * (iend - 4 - ip));
+ ip = iend - 4;
+ }
+ bitStream = MEM_readLE32(ip) >> (bitCount & 31);
+ } } /* while ((remaining>1) && (charnum<=*maxSVPtr)) */
+ if (remaining != 1) return ERROR(GENERIC);
+ *maxSVPtr = charnum-1;
+
+ ip += (bitCount+7)>>3;
+ if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
+ return ip-istart;
+}
+
+
+/*! HUFv07_readStats() :
+ Read compact Huffman tree, saved by HUFv07_writeCTable().
+ `huffWeight` is destination buffer.
+ @return : size read from `src` , or an error Code .
+ Note : Needed by HUFv07_readCTable() and HUFv07_readDTableXn() .
+*/
+size_t HUFv07_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize)
+{
+ U32 weightTotal;
+ const BYTE* ip = (const BYTE*) src;
+ size_t iSize;
+ size_t oSize;
+
+ if (!srcSize) return ERROR(srcSize_wrong);
+ iSize = ip[0];
+ //memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */
+
+ if (iSize >= 128) { /* special header */
+ if (iSize >= (242)) { /* RLE */
+ static U32 l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
+ oSize = l[iSize-242];
+ memset(huffWeight, 1, hwSize);
+ iSize = 0;
+ }
+ else { /* Incompressible */
+ oSize = iSize - 127;
+ iSize = ((oSize+1)/2);
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ if (oSize >= hwSize) return ERROR(corruption_detected);
+ ip += 1;
+ { U32 n;
+ for (n=0; n<oSize; n+=2) {
+ huffWeight[n] = ip[n/2] >> 4;
+ huffWeight[n+1] = ip[n/2] & 15;
+ } } } }
+ else { /* header compressed with FSE (normal case) */
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ oSize = FSEv07_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */
+ if (FSEv07_isError(oSize)) return oSize;
+ }
+
+ /* collect weight stats */
+ memset(rankStats, 0, (HUFv07_TABLELOG_ABSOLUTEMAX + 1) * sizeof(U32));
+ weightTotal = 0;
+ { U32 n; for (n=0; n<oSize; n++) {
+ if (huffWeight[n] >= HUFv07_TABLELOG_ABSOLUTEMAX) return ERROR(corruption_detected);
+ rankStats[huffWeight[n]]++;
+ weightTotal += (1 << huffWeight[n]) >> 1;
+ } }
+ if (weightTotal == 0) return ERROR(corruption_detected);
+
+ /* get last non-null symbol weight (implied, total must be 2^n) */
+ { U32 const tableLog = BITv07_highbit32(weightTotal) + 1;
+ if (tableLog > HUFv07_TABLELOG_ABSOLUTEMAX) return ERROR(corruption_detected);
+ *tableLogPtr = tableLog;
+ /* determine last weight */
+ { U32 const total = 1 << tableLog;
+ U32 const rest = total - weightTotal;
+ U32 const verif = 1 << BITv07_highbit32(rest);
+ U32 const lastWeight = BITv07_highbit32(rest) + 1;
+ if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
+ huffWeight[oSize] = (BYTE)lastWeight;
+ rankStats[lastWeight]++;
+ } }
+
+ /* check tree construction validity */
+ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
+
+ /* results */
+ *nbSymbolsPtr = (U32)(oSize+1);
+ return iSize+1;
+}
+/* ******************************************************************
+ FSE : Finite State Entropy decoder
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+
+/* **************************************************************
+* Compiler specifics
+****************************************************************/
+#ifdef _MSC_VER /* Visual Studio */
+# define FORCE_INLINE static __forceinline
+# include <intrin.h> /* For Visual 2005 */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
+#else
+# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# ifdef __GNUC__
+# define FORCE_INLINE static inline __attribute__((always_inline))
+# else
+# define FORCE_INLINE static inline
+# endif
+# else
+# define FORCE_INLINE static
+# endif /* __STDC_VERSION__ */
+#endif
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define FSEv07_isError ERR_isError
+#define FSEv07_STATIC_ASSERT(c) { enum { FSEv07_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+
+
+/* **************************************************************
+* Complex types
+****************************************************************/
+typedef U32 DTable_max_t[FSEv07_DTABLE_SIZE_U32(FSEv07_MAX_TABLELOG)];
+
+
+/* **************************************************************
+* Templates
+****************************************************************/
+/*
+ designed to be included
+ for type-specific functions (template emulation in C)
+ Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSEv07_FUNCTION_EXTENSION
+# error "FSEv07_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSEv07_FUNCTION_TYPE
+# error "FSEv07_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSEv07_CAT(X,Y) X##Y
+#define FSEv07_FUNCTION_NAME(X,Y) FSEv07_CAT(X,Y)
+#define FSEv07_TYPE_NAME(X,Y) FSEv07_CAT(X,Y)
+
+
+/* Function templates */
+FSEv07_DTable* FSEv07_createDTable (unsigned tableLog)
+{
+ if (tableLog > FSEv07_TABLELOG_ABSOLUTE_MAX) tableLog = FSEv07_TABLELOG_ABSOLUTE_MAX;
+ return (FSEv07_DTable*)malloc( FSEv07_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
+}
+
+void FSEv07_freeDTable (FSEv07_DTable* dt)
+{
+ free(dt);
+}
+
+size_t FSEv07_buildDTable(FSEv07_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+ void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
+ FSEv07_DECODE_TYPE* const tableDecode = (FSEv07_DECODE_TYPE*) (tdPtr);
+ U16 symbolNext[FSEv07_MAX_SYMBOL_VALUE+1];
+
+ U32 const maxSV1 = maxSymbolValue + 1;
+ U32 const tableSize = 1 << tableLog;
+ U32 highThreshold = tableSize-1;
+
+ /* Sanity Checks */
+ if (maxSymbolValue > FSEv07_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
+ if (tableLog > FSEv07_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+
+ /* Init, lay down lowprob symbols */
+ { FSEv07_DTableHeader DTableH;
+ DTableH.tableLog = (U16)tableLog;
+ DTableH.fastMode = 1;
+ { S16 const largeLimit= (S16)(1 << (tableLog-1));
+ U32 s;
+ for (s=0; s<maxSV1; s++) {
+ if (normalizedCounter[s]==-1) {
+ tableDecode[highThreshold--].symbol = (FSEv07_FUNCTION_TYPE)s;
+ symbolNext[s] = 1;
+ } else {
+ if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
+ symbolNext[s] = normalizedCounter[s];
+ } } }
+ memcpy(dt, &DTableH, sizeof(DTableH));
+ }
+
+ /* Spread symbols */
+ { U32 const tableMask = tableSize-1;
+ U32 const step = FSEv07_TABLESTEP(tableSize);
+ U32 s, position = 0;
+ for (s=0; s<maxSV1; s++) {
+ int i;
+ for (i=0; i<normalizedCounter[s]; i++) {
+ tableDecode[position].symbol = (FSEv07_FUNCTION_TYPE)s;
+ position = (position + step) & tableMask;
+ while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
+ } }
+
+ if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+ }
+
+ /* Build Decoding table */
+ { U32 u;
+ for (u=0; u<tableSize; u++) {
+ FSEv07_FUNCTION_TYPE const symbol = (FSEv07_FUNCTION_TYPE)(tableDecode[u].symbol);
+ U16 nextState = symbolNext[symbol]++;
+ tableDecode[u].nbBits = (BYTE) (tableLog - BITv07_highbit32 ((U32)nextState) );
+ tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
+ } }
+
+ return 0;
+}
+
+
+
+#ifndef FSEv07_COMMONDEFS_ONLY
+
+/*-*******************************************************
+* Decompression (Byte symbols)
+*********************************************************/
+size_t FSEv07_buildDTable_rle (FSEv07_DTable* dt, BYTE symbolValue)
+{
+ void* ptr = dt;
+ FSEv07_DTableHeader* const DTableH = (FSEv07_DTableHeader*)ptr;
+ void* dPtr = dt + 1;
+ FSEv07_decode_t* const cell = (FSEv07_decode_t*)dPtr;
+
+ DTableH->tableLog = 0;
+ DTableH->fastMode = 0;
+
+ cell->newState = 0;
+ cell->symbol = symbolValue;
+ cell->nbBits = 0;
+
+ return 0;
+}
+
+
+size_t FSEv07_buildDTable_raw (FSEv07_DTable* dt, unsigned nbBits)
+{
+ void* ptr = dt;
+ FSEv07_DTableHeader* const DTableH = (FSEv07_DTableHeader*)ptr;
+ void* dPtr = dt + 1;
+ FSEv07_decode_t* const dinfo = (FSEv07_decode_t*)dPtr;
+ const unsigned tableSize = 1 << nbBits;
+ const unsigned tableMask = tableSize - 1;
+ const unsigned maxSV1 = tableMask+1;
+ unsigned s;
+
+ /* Sanity checks */
+ if (nbBits < 1) return ERROR(GENERIC); /* min size */
+
+ /* Build Decoding Table */
+ DTableH->tableLog = (U16)nbBits;
+ DTableH->fastMode = 1;
+ for (s=0; s<maxSV1; s++) {
+ dinfo[s].newState = 0;
+ dinfo[s].symbol = (BYTE)s;
+ dinfo[s].nbBits = (BYTE)nbBits;
+ }
+
+ return 0;
+}
+
+FORCE_INLINE size_t FSEv07_decompress_usingDTable_generic(
+ void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSEv07_DTable* dt, const unsigned fast)
+{
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* op = ostart;
+ BYTE* const omax = op + maxDstSize;
+ BYTE* const olimit = omax-3;
+
+ BITv07_DStream_t bitD;
+ FSEv07_DState_t state1;
+ FSEv07_DState_t state2;
+
+ /* Init */
+ { size_t const errorCode = BITv07_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */
+ if (FSEv07_isError(errorCode)) return errorCode; }
+
+ FSEv07_initDState(&state1, &bitD, dt);
+ FSEv07_initDState(&state2, &bitD, dt);
+
+#define FSEv07_GETSYMBOL(statePtr) fast ? FSEv07_decodeSymbolFast(statePtr, &bitD) : FSEv07_decodeSymbol(statePtr, &bitD)
+
+ /* 4 symbols per loop */
+ for ( ; (BITv07_reloadDStream(&bitD)==BITv07_DStream_unfinished) && (op<olimit) ; op+=4) {
+ op[0] = FSEv07_GETSYMBOL(&state1);
+
+ if (FSEv07_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BITv07_reloadDStream(&bitD);
+
+ op[1] = FSEv07_GETSYMBOL(&state2);
+
+ if (FSEv07_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ { if (BITv07_reloadDStream(&bitD) > BITv07_DStream_unfinished) { op+=2; break; } }
+
+ op[2] = FSEv07_GETSYMBOL(&state1);
+
+ if (FSEv07_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BITv07_reloadDStream(&bitD);
+
+ op[3] = FSEv07_GETSYMBOL(&state2);
+ }
+
+ /* tail */
+ /* note : BITv07_reloadDStream(&bitD) >= FSEv07_DStream_partiallyFilled; Ends at exactly BITv07_DStream_completed */
+ while (1) {
+ if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+
+ *op++ = FSEv07_GETSYMBOL(&state1);
+
+ if (BITv07_reloadDStream(&bitD)==BITv07_DStream_overflow) {
+ *op++ = FSEv07_GETSYMBOL(&state2);
+ break;
+ }
+
+ if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+
+ *op++ = FSEv07_GETSYMBOL(&state2);
+
+ if (BITv07_reloadDStream(&bitD)==BITv07_DStream_overflow) {
+ *op++ = FSEv07_GETSYMBOL(&state1);
+ break;
+ } }
+
+ return op-ostart;
+}
+
+
+size_t FSEv07_decompress_usingDTable(void* dst, size_t originalSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSEv07_DTable* dt)
+{
+ const void* ptr = dt;
+ const FSEv07_DTableHeader* DTableH = (const FSEv07_DTableHeader*)ptr;
+ const U32 fastMode = DTableH->fastMode;
+
+ /* select fast mode (static) */
+ if (fastMode) return FSEv07_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+ return FSEv07_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+size_t FSEv07_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
+{
+ const BYTE* const istart = (const BYTE*)cSrc;
+ const BYTE* ip = istart;
+ short counting[FSEv07_MAX_SYMBOL_VALUE+1];
+ DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
+ unsigned tableLog;
+ unsigned maxSymbolValue = FSEv07_MAX_SYMBOL_VALUE;
+
+ if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */
+
+ /* normal FSE decoding mode */
+ { size_t const NCountLength = FSEv07_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
+ if (FSEv07_isError(NCountLength)) return NCountLength;
+ if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */
+ ip += NCountLength;
+ cSrcSize -= NCountLength;
+ }
+
+ { size_t const errorCode = FSEv07_buildDTable (dt, counting, maxSymbolValue, tableLog);
+ if (FSEv07_isError(errorCode)) return errorCode; }
+
+ return FSEv07_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); /* always return, even if it is an error code */
+}
+
+
+
+#endif /* FSEv07_COMMONDEFS_ONLY */
+
+/* ******************************************************************
+ Huffman decoder, part of New Generation Entropy library
+ Copyright (C) 2013-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* **************************************************************
+* Compiler specifics
+****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+/* inline is defined */
+#elif defined(_MSC_VER)
+# define inline __inline
+#else
+# define inline /* disable inline */
+#endif
+
+
+#ifdef _MSC_VER /* Visual Studio */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+#endif
+
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define HUFv07_STATIC_ASSERT(c) { enum { HUFv07_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+
+
+/*-***************************/
+/* generic DTableDesc */
+/*-***************************/
+
+typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
+
+static DTableDesc HUFv07_getDTableDesc(const HUFv07_DTable* table)
+{
+ DTableDesc dtd;
+ memcpy(&dtd, table, sizeof(dtd));
+ return dtd;
+}
+
+
+/*-***************************/
+/* single-symbol decoding */
+/*-***************************/
+
+typedef struct { BYTE byte; BYTE nbBits; } HUFv07_DEltX2; /* single-symbol decoding */
+
+size_t HUFv07_readDTableX2 (HUFv07_DTable* DTable, const void* src, size_t srcSize)
+{
+ BYTE huffWeight[HUFv07_SYMBOLVALUE_MAX + 1];
+ U32 rankVal[HUFv07_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
+ U32 tableLog = 0;
+ U32 nbSymbols = 0;
+ size_t iSize;
+ void* const dtPtr = DTable + 1;
+ HUFv07_DEltX2* const dt = (HUFv07_DEltX2*)dtPtr;
+
+ HUFv07_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUFv07_DTable));
+ //memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUFv07_readStats(huffWeight, HUFv07_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
+ if (HUFv07_isError(iSize)) return iSize;
+
+ /* Table header */
+ { DTableDesc dtd = HUFv07_getDTableDesc(DTable);
+ if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, huffman tree cannot fit in */
+ dtd.tableType = 0;
+ dtd.tableLog = (BYTE)tableLog;
+ memcpy(DTable, &dtd, sizeof(dtd));
+ }
+
+ /* Prepare ranks */
+ { U32 n, nextRankStart = 0;
+ for (n=1; n<tableLog+1; n++) {
+ U32 current = nextRankStart;
+ nextRankStart += (rankVal[n] << (n-1));
+ rankVal[n] = current;
+ } }
+
+ /* fill DTable */
+ { U32 n;
+ for (n=0; n<nbSymbols; n++) {
+ U32 const w = huffWeight[n];
+ U32 const length = (1 << w) >> 1;
+ U32 i;
+ HUFv07_DEltX2 D;
+ D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
+ for (i = rankVal[w]; i < rankVal[w] + length; i++)
+ dt[i] = D;
+ rankVal[w] += length;
+ } }
+
+ return iSize;
+}
+
+
+static BYTE HUFv07_decodeSymbolX2(BITv07_DStream_t* Dstream, const HUFv07_DEltX2* dt, const U32 dtLog)
+{
+ size_t const val = BITv07_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+ BYTE const c = dt[val].byte;
+ BITv07_skipBits(Dstream, dt[val].nbBits);
+ return c;
+}
+
+#define HUFv07_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+ *ptr++ = HUFv07_decodeSymbolX2(DStreamPtr, dt, dtLog)
+
+#define HUFv07_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUFv07_TABLELOG_MAX<=12)) \
+ HUFv07_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+#define HUFv07_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ HUFv07_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+static inline size_t HUFv07_decodeStreamX2(BYTE* p, BITv07_DStream_t* const bitDPtr, BYTE* const pEnd, const HUFv07_DEltX2* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 4 symbols at a time */
+ while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p <= pEnd-4)) {
+ HUFv07_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUFv07_DECODE_SYMBOLX2_1(p, bitDPtr);
+ HUFv07_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUFv07_DECODE_SYMBOLX2_0(p, bitDPtr);
+ }
+
+ /* closer to the end */
+ while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p < pEnd))
+ HUFv07_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+ /* no more data to retrieve from bitstream, hence no need to reload */
+ while (p < pEnd)
+ HUFv07_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+ return pEnd-pStart;
+}
+
+static size_t HUFv07_decompress1X2_usingDTable_internal(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUFv07_DTable* DTable)
+{
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + dstSize;
+ const void* dtPtr = DTable + 1;
+ const HUFv07_DEltX2* const dt = (const HUFv07_DEltX2*)dtPtr;
+ BITv07_DStream_t bitD;
+ DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
+ U32 const dtLog = dtd.tableLog;
+
+ { size_t const errorCode = BITv07_initDStream(&bitD, cSrc, cSrcSize);
+ if (HUFv07_isError(errorCode)) return errorCode; }
+
+ HUFv07_decodeStreamX2(op, &bitD, oend, dt, dtLog);
+
+ /* check */
+ if (!BITv07_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+ return dstSize;
+}
+
+size_t HUFv07_decompress1X2_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUFv07_DTable* DTable)
+{
+ DTableDesc dtd = HUFv07_getDTableDesc(DTable);
+ if (dtd.tableType != 0) return ERROR(GENERIC);
+ return HUFv07_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+
+size_t HUFv07_decompress1X2_DCtx (HUFv07_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUFv07_readDTableX2 (DCtx, cSrc, cSrcSize);
+ if (HUFv07_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUFv07_decompress1X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);
+}
+
+size_t HUFv07_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUFv07_CREATE_STATIC_DTABLEX2(DTable, HUFv07_TABLELOG_MAX);
+ return HUFv07_decompress1X2_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+
+static size_t HUFv07_decompress4X2_usingDTable_internal(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUFv07_DTable* DTable)
+{
+ /* Check */
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ { const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable + 1;
+ const HUFv07_DEltX2* const dt = (const HUFv07_DEltX2*)dtPtr;
+
+ /* Init */
+ BITv07_DStream_t bitD1;
+ BITv07_DStream_t bitD2;
+ BITv07_DStream_t bitD3;
+ BITv07_DStream_t bitD4;
+ size_t const length1 = MEM_readLE16(istart);
+ size_t const length2 = MEM_readLE16(istart+2);
+ size_t const length3 = MEM_readLE16(istart+4);
+ size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ const size_t segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ U32 endSignal;
+ DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
+ U32 const dtLog = dtd.tableLog;
+
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ { size_t const errorCode = BITv07_initDStream(&bitD1, istart1, length1);
+ if (HUFv07_isError(errorCode)) return errorCode; }
+ { size_t const errorCode = BITv07_initDStream(&bitD2, istart2, length2);
+ if (HUFv07_isError(errorCode)) return errorCode; }
+ { size_t const errorCode = BITv07_initDStream(&bitD3, istart3, length3);
+ if (HUFv07_isError(errorCode)) return errorCode; }
+ { size_t const errorCode = BITv07_initDStream(&bitD4, istart4, length4);
+ if (HUFv07_isError(errorCode)) return errorCode; }
+
+ /* 16-32 symbols per loop (4-8 symbols per stream) */
+ endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4);
+ for ( ; (endSignal==BITv07_DStream_unfinished) && (op4<(oend-7)) ; ) {
+ HUFv07_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUFv07_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUFv07_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUFv07_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUFv07_DECODE_SYMBOLX2_1(op1, &bitD1);
+ HUFv07_DECODE_SYMBOLX2_1(op2, &bitD2);
+ HUFv07_DECODE_SYMBOLX2_1(op3, &bitD3);
+ HUFv07_DECODE_SYMBOLX2_1(op4, &bitD4);
+ HUFv07_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUFv07_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUFv07_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUFv07_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUFv07_DECODE_SYMBOLX2_0(op1, &bitD1);
+ HUFv07_DECODE_SYMBOLX2_0(op2, &bitD2);
+ HUFv07_DECODE_SYMBOLX2_0(op3, &bitD3);
+ HUFv07_DECODE_SYMBOLX2_0(op4, &bitD4);
+ endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4);
+ }
+
+ /* check corruption */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 supposed already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUFv07_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+ HUFv07_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+ HUFv07_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+ HUFv07_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ endSignal = BITv07_endOfDStream(&bitD1) & BITv07_endOfDStream(&bitD2) & BITv07_endOfDStream(&bitD3) & BITv07_endOfDStream(&bitD4);
+ if (!endSignal) return ERROR(corruption_detected);
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+
+size_t HUFv07_decompress4X2_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUFv07_DTable* DTable)
+{
+ DTableDesc dtd = HUFv07_getDTableDesc(DTable);
+ if (dtd.tableType != 0) return ERROR(GENERIC);
+ return HUFv07_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+
+
+size_t HUFv07_decompress4X2_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUFv07_readDTableX2 (dctx, cSrc, cSrcSize);
+ if (HUFv07_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUFv07_decompress4X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, dctx);
+}
+
+size_t HUFv07_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUFv07_CREATE_STATIC_DTABLEX2(DTable, HUFv07_TABLELOG_MAX);
+ return HUFv07_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+
+/* *************************/
+/* double-symbols decoding */
+/* *************************/
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUFv07_DEltX4; /* double-symbols decoding */
+
+typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
+
+static void HUFv07_fillDTableX4Level2(HUFv07_DEltX4* DTable, U32 sizeLog, const U32 consumed,
+ const U32* rankValOrigin, const int minWeight,
+ const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
+ U32 nbBitsBaseline, U16 baseSeq)
+{
+ HUFv07_DEltX4 DElt;
+ U32 rankVal[HUFv07_TABLELOG_ABSOLUTEMAX + 1];
+
+ /* get pre-calculated rankVal */
+ memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+ /* fill skipped values */
+ if (minWeight>1) {
+ U32 i, skipSize = rankVal[minWeight];
+ MEM_writeLE16(&(DElt.sequence), baseSeq);
+ DElt.nbBits = (BYTE)(consumed);
+ DElt.length = 1;
+ for (i = 0; i < skipSize; i++)
+ DTable[i] = DElt;
+ }
+
+ /* fill DTable */
+ { U32 s; for (s=0; s<sortedListSize; s++) { /* note : sortedSymbols already skipped */
+ const U32 symbol = sortedSymbols[s].symbol;
+ const U32 weight = sortedSymbols[s].weight;
+ const U32 nbBits = nbBitsBaseline - weight;
+ const U32 length = 1 << (sizeLog-nbBits);
+ const U32 start = rankVal[weight];
+ U32 i = start;
+ const U32 end = start + length;
+
+ MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
+ DElt.nbBits = (BYTE)(nbBits + consumed);
+ DElt.length = 2;
+ do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
+
+ rankVal[weight] += length;
+ }}
+}
+
+typedef U32 rankVal_t[HUFv07_TABLELOG_ABSOLUTEMAX][HUFv07_TABLELOG_ABSOLUTEMAX + 1];
+
+static void HUFv07_fillDTableX4(HUFv07_DEltX4* DTable, const U32 targetLog,
+ const sortedSymbol_t* sortedList, const U32 sortedListSize,
+ const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
+ const U32 nbBitsBaseline)
+{
+ U32 rankVal[HUFv07_TABLELOG_ABSOLUTEMAX + 1];
+ const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
+ const U32 minBits = nbBitsBaseline - maxWeight;
+ U32 s;
+
+ memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+ /* fill DTable */
+ for (s=0; s<sortedListSize; s++) {
+ const U16 symbol = sortedList[s].symbol;
+ const U32 weight = sortedList[s].weight;
+ const U32 nbBits = nbBitsBaseline - weight;
+ const U32 start = rankVal[weight];
+ const U32 length = 1 << (targetLog-nbBits);
+
+ if (targetLog-nbBits >= minBits) { /* enough room for a second symbol */
+ U32 sortedRank;
+ int minWeight = nbBits + scaleLog;
+ if (minWeight < 1) minWeight = 1;
+ sortedRank = rankStart[minWeight];
+ HUFv07_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
+ rankValOrigin[nbBits], minWeight,
+ sortedList+sortedRank, sortedListSize-sortedRank,
+ nbBitsBaseline, symbol);
+ } else {
+ HUFv07_DEltX4 DElt;
+ MEM_writeLE16(&(DElt.sequence), symbol);
+ DElt.nbBits = (BYTE)(nbBits);
+ DElt.length = 1;
+ { U32 u;
+ const U32 end = start + length;
+ for (u = start; u < end; u++) DTable[u] = DElt;
+ } }
+ rankVal[weight] += length;
+ }
+}
+
+size_t HUFv07_readDTableX4 (HUFv07_DTable* DTable, const void* src, size_t srcSize)
+{
+ BYTE weightList[HUFv07_SYMBOLVALUE_MAX + 1];
+ sortedSymbol_t sortedSymbol[HUFv07_SYMBOLVALUE_MAX + 1];
+ U32 rankStats[HUFv07_TABLELOG_ABSOLUTEMAX + 1] = { 0 };
+ U32 rankStart0[HUFv07_TABLELOG_ABSOLUTEMAX + 2] = { 0 };
+ U32* const rankStart = rankStart0+1;
+ rankVal_t rankVal;
+ U32 tableLog, maxW, sizeOfSort, nbSymbols;
+ DTableDesc dtd = HUFv07_getDTableDesc(DTable);
+ U32 const maxTableLog = dtd.maxTableLog;
+ size_t iSize;
+ void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */
+ HUFv07_DEltX4* const dt = (HUFv07_DEltX4*)dtPtr;
+
+ HUFv07_STATIC_ASSERT(sizeof(HUFv07_DEltX4) == sizeof(HUFv07_DTable)); /* if compilation fails here, assertion is false */
+ if (maxTableLog > HUFv07_TABLELOG_ABSOLUTEMAX) return ERROR(tableLog_tooLarge);
+ //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUFv07_readStats(weightList, HUFv07_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
+ if (HUFv07_isError(iSize)) return iSize;
+
+ /* check result */
+ if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
+
+ /* find maxWeight */
+ for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
+
+ /* Get start index of each weight */
+ { U32 w, nextRankStart = 0;
+ for (w=1; w<maxW+1; w++) {
+ U32 current = nextRankStart;
+ nextRankStart += rankStats[w];
+ rankStart[w] = current;
+ }
+ rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
+ sizeOfSort = nextRankStart;
+ }
+
+ /* sort symbols by weight */
+ { U32 s;
+ for (s=0; s<nbSymbols; s++) {
+ U32 const w = weightList[s];
+ U32 const r = rankStart[w]++;
+ sortedSymbol[r].symbol = (BYTE)s;
+ sortedSymbol[r].weight = (BYTE)w;
+ }
+ rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
+ }
+
+ /* Build rankVal */
+ { U32* const rankVal0 = rankVal[0];
+ { int const rescale = (maxTableLog-tableLog) - 1; /* tableLog <= maxTableLog */
+ U32 nextRankVal = 0;
+ U32 w;
+ for (w=1; w<maxW+1; w++) {
+ U32 current = nextRankVal;
+ nextRankVal += rankStats[w] << (w+rescale);
+ rankVal0[w] = current;
+ } }
+ { U32 const minBits = tableLog+1 - maxW;
+ U32 consumed;
+ for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
+ U32* const rankValPtr = rankVal[consumed];
+ U32 w;
+ for (w = 1; w < maxW+1; w++) {
+ rankValPtr[w] = rankVal0[w] >> consumed;
+ } } } }
+
+ HUFv07_fillDTableX4(dt, maxTableLog,
+ sortedSymbol, sizeOfSort,
+ rankStart0, rankVal, maxW,
+ tableLog+1);
+
+ dtd.tableLog = (BYTE)maxTableLog;
+ dtd.tableType = 1;
+ memcpy(DTable, &dtd, sizeof(dtd));
+ return iSize;
+}
+
+
+static U32 HUFv07_decodeSymbolX4(void* op, BITv07_DStream_t* DStream, const HUFv07_DEltX4* dt, const U32 dtLog)
+{
+ const size_t val = BITv07_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ memcpy(op, dt+val, 2);
+ BITv07_skipBits(DStream, dt[val].nbBits);
+ return dt[val].length;
+}
+
+static U32 HUFv07_decodeLastSymbolX4(void* op, BITv07_DStream_t* DStream, const HUFv07_DEltX4* dt, const U32 dtLog)
+{
+ const size_t val = BITv07_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ memcpy(op, dt+val, 1);
+ if (dt[val].length==1) BITv07_skipBits(DStream, dt[val].nbBits);
+ else {
+ if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
+ BITv07_skipBits(DStream, dt[val].nbBits);
+ if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+ DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+ } }
+ return 1;
+}
+
+
+#define HUFv07_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
+ ptr += HUFv07_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUFv07_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUFv07_TABLELOG_MAX<=12)) \
+ ptr += HUFv07_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUFv07_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ ptr += HUFv07_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+static inline size_t HUFv07_decodeStreamX4(BYTE* p, BITv07_DStream_t* bitDPtr, BYTE* const pEnd, const HUFv07_DEltX4* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 8 symbols at a time */
+ while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p < pEnd-7)) {
+ HUFv07_DECODE_SYMBOLX4_2(p, bitDPtr);
+ HUFv07_DECODE_SYMBOLX4_1(p, bitDPtr);
+ HUFv07_DECODE_SYMBOLX4_2(p, bitDPtr);
+ HUFv07_DECODE_SYMBOLX4_0(p, bitDPtr);
+ }
+
+ /* closer to end : up to 2 symbols at a time */
+ while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p <= pEnd-2))
+ HUFv07_DECODE_SYMBOLX4_0(p, bitDPtr);
+
+ while (p <= pEnd-2)
+ HUFv07_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
+
+ if (p < pEnd)
+ p += HUFv07_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
+
+ return p-pStart;
+}
+
+
+static size_t HUFv07_decompress1X4_usingDTable_internal(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUFv07_DTable* DTable)
+{
+ BITv07_DStream_t bitD;
+
+ /* Init */
+ { size_t const errorCode = BITv07_initDStream(&bitD, cSrc, cSrcSize);
+ if (HUFv07_isError(errorCode)) return errorCode;
+ }
+
+ /* decode */
+ { BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */
+ const HUFv07_DEltX4* const dt = (const HUFv07_DEltX4*)dtPtr;
+ DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
+ HUFv07_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog);
+ }
+
+ /* check */
+ if (!BITv07_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+ /* decoded size */
+ return dstSize;
+}
+
+size_t HUFv07_decompress1X4_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUFv07_DTable* DTable)
+{
+ DTableDesc dtd = HUFv07_getDTableDesc(DTable);
+ if (dtd.tableType != 1) return ERROR(GENERIC);
+ return HUFv07_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+
+size_t HUFv07_decompress1X4_DCtx (HUFv07_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUFv07_readDTableX4 (DCtx, cSrc, cSrcSize);
+ if (HUFv07_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUFv07_decompress1X4_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);
+}
+
+size_t HUFv07_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUFv07_CREATE_STATIC_DTABLEX4(DTable, HUFv07_TABLELOG_MAX);
+ return HUFv07_decompress1X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+static size_t HUFv07_decompress4X4_usingDTable_internal(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUFv07_DTable* DTable)
+{
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ { const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable+1;
+ const HUFv07_DEltX4* const dt = (const HUFv07_DEltX4*)dtPtr;
+
+ /* Init */
+ BITv07_DStream_t bitD1;
+ BITv07_DStream_t bitD2;
+ BITv07_DStream_t bitD3;
+ BITv07_DStream_t bitD4;
+ size_t const length1 = MEM_readLE16(istart);
+ size_t const length2 = MEM_readLE16(istart+2);
+ size_t const length3 = MEM_readLE16(istart+4);
+ size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ size_t const segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ U32 endSignal;
+ DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
+ U32 const dtLog = dtd.tableLog;
+
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ { size_t const errorCode = BITv07_initDStream(&bitD1, istart1, length1);
+ if (HUFv07_isError(errorCode)) return errorCode; }
+ { size_t const errorCode = BITv07_initDStream(&bitD2, istart2, length2);
+ if (HUFv07_isError(errorCode)) return errorCode; }
+ { size_t const errorCode = BITv07_initDStream(&bitD3, istart3, length3);
+ if (HUFv07_isError(errorCode)) return errorCode; }
+ { size_t const errorCode = BITv07_initDStream(&bitD4, istart4, length4);
+ if (HUFv07_isError(errorCode)) return errorCode; }
+
+ /* 16-32 symbols per loop (4-8 symbols per stream) */
+ endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4);
+ for ( ; (endSignal==BITv07_DStream_unfinished) && (op4<(oend-7)) ; ) {
+ HUFv07_DECODE_SYMBOLX4_2(op1, &bitD1);
+ HUFv07_DECODE_SYMBOLX4_2(op2, &bitD2);
+ HUFv07_DECODE_SYMBOLX4_2(op3, &bitD3);
+ HUFv07_DECODE_SYMBOLX4_2(op4, &bitD4);
+ HUFv07_DECODE_SYMBOLX4_1(op1, &bitD1);
+ HUFv07_DECODE_SYMBOLX4_1(op2, &bitD2);
+ HUFv07_DECODE_SYMBOLX4_1(op3, &bitD3);
+ HUFv07_DECODE_SYMBOLX4_1(op4, &bitD4);
+ HUFv07_DECODE_SYMBOLX4_2(op1, &bitD1);
+ HUFv07_DECODE_SYMBOLX4_2(op2, &bitD2);
+ HUFv07_DECODE_SYMBOLX4_2(op3, &bitD3);
+ HUFv07_DECODE_SYMBOLX4_2(op4, &bitD4);
+ HUFv07_DECODE_SYMBOLX4_0(op1, &bitD1);
+ HUFv07_DECODE_SYMBOLX4_0(op2, &bitD2);
+ HUFv07_DECODE_SYMBOLX4_0(op3, &bitD3);
+ HUFv07_DECODE_SYMBOLX4_0(op4, &bitD4);
+
+ endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4);
+ }
+
+ /* check corruption */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 supposed already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUFv07_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
+ HUFv07_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
+ HUFv07_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
+ HUFv07_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ { U32 const endCheck = BITv07_endOfDStream(&bitD1) & BITv07_endOfDStream(&bitD2) & BITv07_endOfDStream(&bitD3) & BITv07_endOfDStream(&bitD4);
+ if (!endCheck) return ERROR(corruption_detected); }
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+
+size_t HUFv07_decompress4X4_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUFv07_DTable* DTable)
+{
+ DTableDesc dtd = HUFv07_getDTableDesc(DTable);
+ if (dtd.tableType != 1) return ERROR(GENERIC);
+ return HUFv07_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+
+
+size_t HUFv07_decompress4X4_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t hSize = HUFv07_readDTableX4 (dctx, cSrc, cSrcSize);
+ if (HUFv07_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUFv07_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
+}
+
+size_t HUFv07_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUFv07_CREATE_STATIC_DTABLEX4(DTable, HUFv07_TABLELOG_MAX);
+ return HUFv07_decompress4X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+
+/* ********************************/
+/* Generic decompression selector */
+/* ********************************/
+
+size_t HUFv07_decompress1X_usingDTable(void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUFv07_DTable* DTable)
+{
+ DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
+ return dtd.tableType ? HUFv07_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) :
+ HUFv07_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
+}
+
+size_t HUFv07_decompress4X_usingDTable(void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUFv07_DTable* DTable)
+{
+ DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
+ return dtd.tableType ? HUFv07_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) :
+ HUFv07_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
+}
+
+
+typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
+static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
+{
+ /* single, double, quad */
+ {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
+ {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
+ {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
+ {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
+ {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
+ {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
+ {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
+ {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
+ {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
+ {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
+ {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
+ {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
+ {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
+ {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
+ {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
+ {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
+};
+
+/** HUFv07_selectDecoder() :
+* Tells which decoder is likely to decode faster,
+* based on a set of pre-determined metrics.
+* @return : 0==HUFv07_decompress4X2, 1==HUFv07_decompress4X4 .
+* Assumption : 0 < cSrcSize < dstSize <= 128 KB */
+U32 HUFv07_selectDecoder (size_t dstSize, size_t cSrcSize)
+{
+ /* decoder timing evaluation */
+ U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
+ U32 const D256 = (U32)(dstSize >> 8);
+ U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
+ U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
+ DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, for cache eviction */
+
+ return DTime1 < DTime0;
+}
+
+
+typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+
+size_t HUFv07_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ static const decompressionAlgo decompress[2] = { HUFv07_decompress4X2, HUFv07_decompress4X4 };
+
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
+ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
+ { U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize);
+ return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
+ }
+
+ //return HUFv07_decompress4X2(dst, dstSize, cSrc, cSrcSize); /* multi-streams single-symbol decoding */
+ //return HUFv07_decompress4X4(dst, dstSize, cSrc, cSrcSize); /* multi-streams double-symbols decoding */
+}
+
+size_t HUFv07_decompress4X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
+ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
+ { U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize);
+ return algoNb ? HUFv07_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
+ HUFv07_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
+ }
+}
+
+size_t HUFv07_decompress4X_hufOnly (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if ((cSrcSize >= dstSize) || (cSrcSize <= 1)) return ERROR(corruption_detected); /* invalid */
+
+ { U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize);
+ return algoNb ? HUFv07_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
+ HUFv07_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
+ }
+}
+
+size_t HUFv07_decompress1X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
+ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
+ { U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize);
+ return algoNb ? HUFv07_decompress1X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
+ HUFv07_decompress1X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
+ }
+}
+/*
+ Common functions of Zstd compression library
+ Copyright (C) 2015-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd homepage : http://www.zstd.net/
+*/
+
+
+
+/*-****************************************
+* ZSTD Error Management
+******************************************/
+/*! ZSTDv07_isError() :
+* tells if a return value is an error code */
+unsigned ZSTDv07_isError(size_t code) { return ERR_isError(code); }
+
+/*! ZSTDv07_getErrorName() :
+* provides error code string from function result (useful for debugging) */
+const char* ZSTDv07_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+
+/* **************************************************************
+* ZBUFF Error Management
+****************************************************************/
+unsigned ZBUFFv07_isError(size_t errorCode) { return ERR_isError(errorCode); }
+
+const char* ZBUFFv07_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
+
+
+
+static void* ZSTDv07_defaultAllocFunction(void* opaque, size_t size)
+{
+ void* address = malloc(size);
+ (void)opaque;
+ /* printf("alloc %p, %d opaque=%p \n", address, (int)size, opaque); */
+ return address;
+}
+
+static void ZSTDv07_defaultFreeFunction(void* opaque, void* address)
+{
+ (void)opaque;
+ /* if (address) printf("free %p opaque=%p \n", address, opaque); */
+ free(address);
+}
+/*
+ zstd_internal - common functions to include
+ Header File for include
+ Copyright (C) 2014-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd homepage : https://www.zstd.net
+*/
+#ifndef ZSTDv07_CCOMMON_H_MODULE
+#define ZSTDv07_CCOMMON_H_MODULE
+
+
+/*-*************************************
+* Common macros
+***************************************/
+#define MIN(a,b) ((a)<(b) ? (a) : (b))
+#define MAX(a,b) ((a)>(b) ? (a) : (b))
+
+
+/*-*************************************
+* Common constants
+***************************************/
+#define ZSTDv07_OPT_NUM (1<<12)
+#define ZSTDv07_DICT_MAGIC 0xEC30A437 /* v0.7 */
+
+#define ZSTDv07_REP_NUM 3
+#define ZSTDv07_REP_INIT ZSTDv07_REP_NUM
+#define ZSTDv07_REP_MOVE (ZSTDv07_REP_NUM-1)
+static const U32 repStartValue[ZSTDv07_REP_NUM] = { 1, 4, 8 };
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BIT7 128
+#define BIT6 64
+#define BIT5 32
+#define BIT4 16
+#define BIT1 2
+#define BIT0 1
+
+#define ZSTDv07_WINDOWLOG_ABSOLUTEMIN 10
+static const size_t ZSTDv07_fcs_fieldSize[4] = { 0, 2, 4, 8 };
+static const size_t ZSTDv07_did_fieldSize[4] = { 0, 1, 2, 4 };
+
+#define ZSTDv07_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
+static const size_t ZSTDv07_blockHeaderSize = ZSTDv07_BLOCKHEADERSIZE;
+typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
+
+#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
+#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
+
+#define HufLog 12
+typedef enum { lbt_huffman, lbt_repeat, lbt_raw, lbt_rle } litBlockType_t;
+
+#define LONGNBSEQ 0x7F00
+
+#define MINMATCH 3
+#define EQUAL_READ32 4
+
+#define Litbits 8
+#define MaxLit ((1<<Litbits) - 1)
+#define MaxML 52
+#define MaxLL 35
+#define MaxOff 28
+#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
+#define MLFSELog 9
+#define LLFSELog 9
+#define OffFSELog 8
+
+#define FSEv07_ENCODING_RAW 0
+#define FSEv07_ENCODING_RLE 1
+#define FSEv07_ENCODING_STATIC 2
+#define FSEv07_ENCODING_DYNAMIC 3
+
+#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
+
+static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9,10,11,12,
+ 13,14,15,16 };
+static const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
+ -1,-1,-1,-1 };
+static const U32 LL_defaultNormLog = 6;
+
+static const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9,10,11,
+ 12,13,14,15,16 };
+static const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,
+ -1,-1,-1,-1,-1 };
+static const U32 ML_defaultNormLog = 6;
+
+static const S16 OF_defaultNorm[MaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 };
+static const U32 OF_defaultNormLog = 5;
+
+
+/*-*******************************************
+* Shared functions to include for inlining
+*********************************************/
+static void ZSTDv07_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
+#define COPY8(d,s) { ZSTDv07_copy8(d,s); d+=8; s+=8; }
+
+/*! ZSTDv07_wildcopy() :
+* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
+#define WILDCOPY_OVERLENGTH 8
+MEM_STATIC void ZSTDv07_wildcopy(void* dst, const void* src, ptrdiff_t length)
+{
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + length;
+ do
+ COPY8(op, ip)
+ while (op < oend);
+}
+
+
+/*-*******************************************
+* Private interfaces
+*********************************************/
+typedef struct ZSTDv07_stats_s ZSTDv07_stats_t;
+
+typedef struct {
+ U32 off;
+ U32 len;
+} ZSTDv07_match_t;
+
+typedef struct {
+ U32 price;
+ U32 off;
+ U32 mlen;
+ U32 litlen;
+ U32 rep[ZSTDv07_REP_INIT];
+} ZSTDv07_optimal_t;
+
+struct ZSTDv07_stats_s { U32 unused; };
+
+typedef struct {
+ void* buffer;
+ U32* offsetStart;
+ U32* offset;
+ BYTE* offCodeStart;
+ BYTE* litStart;
+ BYTE* lit;
+ U16* litLengthStart;
+ U16* litLength;
+ BYTE* llCodeStart;
+ U16* matchLengthStart;
+ U16* matchLength;
+ BYTE* mlCodeStart;
+ U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
+ U32 longLengthPos;
+ /* opt */
+ ZSTDv07_optimal_t* priceTable;
+ ZSTDv07_match_t* matchTable;
+ U32* matchLengthFreq;
+ U32* litLengthFreq;
+ U32* litFreq;
+ U32* offCodeFreq;
+ U32 matchLengthSum;
+ U32 matchSum;
+ U32 litLengthSum;
+ U32 litSum;
+ U32 offCodeSum;
+ U32 log2matchLengthSum;
+ U32 log2matchSum;
+ U32 log2litLengthSum;
+ U32 log2litSum;
+ U32 log2offCodeSum;
+ U32 factor;
+ U32 cachedPrice;
+ U32 cachedLitLength;
+ const BYTE* cachedLiterals;
+ ZSTDv07_stats_t stats;
+} seqStore_t;
+
+void ZSTDv07_seqToCodes(const seqStore_t* seqStorePtr, size_t const nbSeq);
+
+/* custom memory allocation functions */
+static const ZSTDv07_customMem defaultCustomMem = { ZSTDv07_defaultAllocFunction, ZSTDv07_defaultFreeFunction, NULL };
+
+#endif /* ZSTDv07_CCOMMON_H_MODULE */
+/*
+ zstd - standard compression library
+ Copyright (C) 2014-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd homepage : http://www.zstd.net
+*/
+
+/* ***************************************************************
+* Tuning parameters
+*****************************************************************/
+/*!
+ * HEAPMODE :
+ * Select how default decompression function ZSTDv07_decompress() will allocate memory,
+ * in memory stack (0), or in memory heap (1, requires malloc())
+ */
+#ifndef ZSTDv07_HEAPMODE
+# define ZSTDv07_HEAPMODE 1
+#endif
+
+
+/*-*******************************************************
+* Compiler specifics
+*********************************************************/
+#ifdef _MSC_VER /* Visual Studio */
+# include <intrin.h> /* For Visual 2005 */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4324) /* disable: C4324: padded structure */
+# pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */
+#endif
+
+
+/*-*************************************
+* Macros
+***************************************/
+#define ZSTDv07_isError ERR_isError /* for inlining */
+#define FSEv07_isError ERR_isError
+#define HUFv07_isError ERR_isError
+
+
+/*_*******************************************************
+* Memory operations
+**********************************************************/
+static void ZSTDv07_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+
+
+/*-*************************************************************
+* Context management
+***************************************************************/
+typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
+ ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
+ ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTDv07_dStage;
+
+struct ZSTDv07_DCtx_s
+{
+ FSEv07_DTable LLTable[FSEv07_DTABLE_SIZE_U32(LLFSELog)];
+ FSEv07_DTable OffTable[FSEv07_DTABLE_SIZE_U32(OffFSELog)];
+ FSEv07_DTable MLTable[FSEv07_DTABLE_SIZE_U32(MLFSELog)];
+ HUFv07_DTable hufTable[HUFv07_DTABLE_SIZE(HufLog)]; /* can accommodate HUFv07_decompress4X */
+ const void* previousDstEnd;
+ const void* base;
+ const void* vBase;
+ const void* dictEnd;
+ size_t expected;
+ U32 rep[3];
+ ZSTDv07_frameParams fParams;
+ blockType_t bType; /* used in ZSTDv07_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
+ ZSTDv07_dStage stage;
+ U32 litEntropy;
+ U32 fseEntropy;
+ XXH64_state_t xxhState;
+ size_t headerSize;
+ U32 dictID;
+ const BYTE* litPtr;
+ ZSTDv07_customMem customMem;
+ size_t litSize;
+ BYTE litBuffer[ZSTDv07_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH];
+ BYTE headerBuffer[ZSTDv07_FRAMEHEADERSIZE_MAX];
+}; /* typedef'd to ZSTDv07_DCtx within "zstd_static.h" */
+
+int ZSTDv07_isSkipFrame(ZSTDv07_DCtx* dctx);
+
+size_t ZSTDv07_sizeofDCtx (const ZSTDv07_DCtx* dctx) { return sizeof(*dctx); }
+
+size_t ZSTDv07_estimateDCtxSize(void) { return sizeof(ZSTDv07_DCtx); }
+
+size_t ZSTDv07_decompressBegin(ZSTDv07_DCtx* dctx)
+{
+ dctx->expected = ZSTDv07_frameHeaderSize_min;
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ dctx->previousDstEnd = NULL;
+ dctx->base = NULL;
+ dctx->vBase = NULL;
+ dctx->dictEnd = NULL;
+ dctx->hufTable[0] = (HUFv07_DTable)((HufLog)*0x1000001);
+ dctx->litEntropy = dctx->fseEntropy = 0;
+ dctx->dictID = 0;
+ { int i; for (i=0; i<ZSTDv07_REP_NUM; i++) dctx->rep[i] = repStartValue[i]; }
+ return 0;
+}
+
+ZSTDv07_DCtx* ZSTDv07_createDCtx_advanced(ZSTDv07_customMem customMem)
+{
+ ZSTDv07_DCtx* dctx;
+
+ if (!customMem.customAlloc && !customMem.customFree)
+ customMem = defaultCustomMem;
+
+ if (!customMem.customAlloc || !customMem.customFree)
+ return NULL;
+
+ dctx = (ZSTDv07_DCtx*) customMem.customAlloc(customMem.opaque, sizeof(ZSTDv07_DCtx));
+ if (!dctx) return NULL;
+ memcpy(&dctx->customMem, &customMem, sizeof(ZSTDv07_customMem));
+ ZSTDv07_decompressBegin(dctx);
+ return dctx;
+}
+
+ZSTDv07_DCtx* ZSTDv07_createDCtx(void)
+{
+ return ZSTDv07_createDCtx_advanced(defaultCustomMem);
+}
+
+size_t ZSTDv07_freeDCtx(ZSTDv07_DCtx* dctx)
+{
+ if (dctx==NULL) return 0; /* support free on NULL */
+ dctx->customMem.customFree(dctx->customMem.opaque, dctx);
+ return 0; /* reserved as a potential error code in the future */
+}
+
+void ZSTDv07_copyDCtx(ZSTDv07_DCtx* dstDCtx, const ZSTDv07_DCtx* srcDCtx)
+{
+ memcpy(dstDCtx, srcDCtx,
+ sizeof(ZSTDv07_DCtx) - (ZSTDv07_BLOCKSIZE_ABSOLUTEMAX+WILDCOPY_OVERLENGTH + ZSTDv07_frameHeaderSize_max)); /* no need to copy workspace */
+}
+
+
+/*-*************************************************************
+* Decompression section
+***************************************************************/
+
+/* Frame format description
+ Frame Header - [ Block Header - Block ] - Frame End
+ 1) Frame Header
+ - 4 bytes - Magic Number : ZSTDv07_MAGICNUMBER (defined within zstd.h)
+ - 1 byte - Frame Descriptor
+ 2) Block Header
+ - 3 bytes, starting with a 2-bits descriptor
+ Uncompressed, Compressed, Frame End, unused
+ 3) Block
+ See Block Format Description
+ 4) Frame End
+ - 3 bytes, compatible with Block Header
+*/
+
+
+/* Frame Header :
+
+ 1 byte - FrameHeaderDescription :
+ bit 0-1 : dictID (0, 1, 2 or 4 bytes)
+ bit 2 : checksumFlag
+ bit 3 : reserved (must be zero)
+ bit 4 : reserved (unused, can be any value)
+ bit 5 : Single Segment (if 1, WindowLog byte is not present)
+ bit 6-7 : FrameContentFieldSize (0, 2, 4, or 8)
+ if (SkippedWindowLog && !FrameContentFieldsize) FrameContentFieldsize=1;
+
+ Optional : WindowLog (0 or 1 byte)
+ bit 0-2 : octal Fractional (1/8th)
+ bit 3-7 : Power of 2, with 0 = 1 KB (up to 2 TB)
+
+ Optional : dictID (0, 1, 2 or 4 bytes)
+ Automatic adaptation
+ 0 : no dictID
+ 1 : 1 - 255
+ 2 : 256 - 65535
+ 4 : all other values
+
+ Optional : content size (0, 1, 2, 4 or 8 bytes)
+ 0 : unknown (fcfs==0 and swl==0)
+ 1 : 0-255 bytes (fcfs==0 and swl==1)
+ 2 : 256 - 65535+256 (fcfs==1)
+ 4 : 0 - 4GB-1 (fcfs==2)
+ 8 : 0 - 16EB-1 (fcfs==3)
+*/
+
+
+/* Compressed Block, format description
+
+ Block = Literal Section - Sequences Section
+ Prerequisite : size of (compressed) block, maximum size of regenerated data
+
+ 1) Literal Section
+
+ 1.1) Header : 1-5 bytes
+ flags: 2 bits
+ 00 compressed by Huff0
+ 01 unused
+ 10 is Raw (uncompressed)
+ 11 is Rle
+ Note : using 01 => Huff0 with precomputed table ?
+ Note : delta map ? => compressed ?
+
+ 1.1.1) Huff0-compressed literal block : 3-5 bytes
+ srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
+ srcSize < 1 KB => 3 bytes (2-2-10-10)
+ srcSize < 16KB => 4 bytes (2-2-14-14)
+ else => 5 bytes (2-2-18-18)
+ big endian convention
+
+ 1.1.2) Raw (uncompressed) literal block header : 1-3 bytes
+ size : 5 bits: (IS_RAW<<6) + (0<<4) + size
+ 12 bits: (IS_RAW<<6) + (2<<4) + (size>>8)
+ size&255
+ 20 bits: (IS_RAW<<6) + (3<<4) + (size>>16)
+ size>>8&255
+ size&255
+
+ 1.1.3) Rle (repeated single byte) literal block header : 1-3 bytes
+ size : 5 bits: (IS_RLE<<6) + (0<<4) + size
+ 12 bits: (IS_RLE<<6) + (2<<4) + (size>>8)
+ size&255
+ 20 bits: (IS_RLE<<6) + (3<<4) + (size>>16)
+ size>>8&255
+ size&255
+
+ 1.1.4) Huff0-compressed literal block, using precomputed CTables : 3-5 bytes
+ srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
+ srcSize < 1 KB => 3 bytes (2-2-10-10)
+ srcSize < 16KB => 4 bytes (2-2-14-14)
+ else => 5 bytes (2-2-18-18)
+ big endian convention
+
+ 1- CTable available (stored into workspace ?)
+ 2- Small input (fast heuristic ? Full comparison ? depend on clevel ?)
+
+
+ 1.2) Literal block content
+
+ 1.2.1) Huff0 block, using sizes from header
+ See Huff0 format
+
+ 1.2.2) Huff0 block, using prepared table
+
+ 1.2.3) Raw content
+
+ 1.2.4) single byte
+
+
+ 2) Sequences section
+ TO DO
+*/
+
+/** ZSTDv07_frameHeaderSize() :
+* srcSize must be >= ZSTDv07_frameHeaderSize_min.
+* @return : size of the Frame Header */
+static size_t ZSTDv07_frameHeaderSize(const void* src, size_t srcSize)
+{
+ if (srcSize < ZSTDv07_frameHeaderSize_min) return ERROR(srcSize_wrong);
+ { BYTE const fhd = ((const BYTE*)src)[4];
+ U32 const dictID= fhd & 3;
+ U32 const directMode = (fhd >> 5) & 1;
+ U32 const fcsId = fhd >> 6;
+ return ZSTDv07_frameHeaderSize_min + !directMode + ZSTDv07_did_fieldSize[dictID] + ZSTDv07_fcs_fieldSize[fcsId]
+ + (directMode && !ZSTDv07_fcs_fieldSize[fcsId]);
+ }
+}
+
+
+/** ZSTDv07_getFrameParams() :
+* decode Frame Header, or require larger `srcSize`.
+* @return : 0, `fparamsPtr` is correctly filled,
+* >0, `srcSize` is too small, result is expected `srcSize`,
+* or an error code, which can be tested using ZSTDv07_isError() */
+size_t ZSTDv07_getFrameParams(ZSTDv07_frameParams* fparamsPtr, const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+
+ if (srcSize < ZSTDv07_frameHeaderSize_min) return ZSTDv07_frameHeaderSize_min;
+ memset(fparamsPtr, 0, sizeof(*fparamsPtr));
+ if (MEM_readLE32(src) != ZSTDv07_MAGICNUMBER) {
+ if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTDv07_MAGIC_SKIPPABLE_START) {
+ if (srcSize < ZSTDv07_skippableHeaderSize) return ZSTDv07_skippableHeaderSize; /* magic number + skippable frame length */
+ fparamsPtr->frameContentSize = MEM_readLE32((const char *)src + 4);
+ fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */
+ return 0;
+ }
+ return ERROR(prefix_unknown);
+ }
+
+ /* ensure there is enough `srcSize` to fully read/decode frame header */
+ { size_t const fhsize = ZSTDv07_frameHeaderSize(src, srcSize);
+ if (srcSize < fhsize) return fhsize; }
+
+ { BYTE const fhdByte = ip[4];
+ size_t pos = 5;
+ U32 const dictIDSizeCode = fhdByte&3;
+ U32 const checksumFlag = (fhdByte>>2)&1;
+ U32 const directMode = (fhdByte>>5)&1;
+ U32 const fcsID = fhdByte>>6;
+ U32 const windowSizeMax = 1U << ZSTDv07_WINDOWLOG_MAX;
+ U32 windowSize = 0;
+ U32 dictID = 0;
+ U64 frameContentSize = 0;
+ if ((fhdByte & 0x08) != 0) /* reserved bits, which must be zero */
+ return ERROR(frameParameter_unsupported);
+ if (!directMode) {
+ BYTE const wlByte = ip[pos++];
+ U32 const windowLog = (wlByte >> 3) + ZSTDv07_WINDOWLOG_ABSOLUTEMIN;
+ if (windowLog > ZSTDv07_WINDOWLOG_MAX)
+ return ERROR(frameParameter_unsupported);
+ windowSize = (1U << windowLog);
+ windowSize += (windowSize >> 3) * (wlByte&7);
+ }
+
+ switch(dictIDSizeCode)
+ {
+ default: /* impossible */
+ case 0 : break;
+ case 1 : dictID = ip[pos]; pos++; break;
+ case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
+ case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break;
+ }
+ switch(fcsID)
+ {
+ default: /* impossible */
+ case 0 : if (directMode) frameContentSize = ip[pos]; break;
+ case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
+ case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
+ case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
+ }
+ if (!windowSize) windowSize = (U32)frameContentSize;
+ if (windowSize > windowSizeMax)
+ return ERROR(frameParameter_unsupported);
+ fparamsPtr->frameContentSize = frameContentSize;
+ fparamsPtr->windowSize = windowSize;
+ fparamsPtr->dictID = dictID;
+ fparamsPtr->checksumFlag = checksumFlag;
+ }
+ return 0;
+}
+
+
+/** ZSTDv07_getDecompressedSize() :
+* compatible with legacy mode
+* @return : decompressed size if known, 0 otherwise
+ note : 0 can mean any of the following :
+ - decompressed size is not provided within frame header
+ - frame header unknown / not supported
+ - frame header not completely provided (`srcSize` too small) */
+unsigned long long ZSTDv07_getDecompressedSize(const void* src, size_t srcSize)
+{
+ ZSTDv07_frameParams fparams;
+ size_t const frResult = ZSTDv07_getFrameParams(&fparams, src, srcSize);
+ if (frResult!=0) return 0;
+ return fparams.frameContentSize;
+}
+
+
+/** ZSTDv07_decodeFrameHeader() :
+* `srcSize` must be the size provided by ZSTDv07_frameHeaderSize().
+* @return : 0 if success, or an error code, which can be tested using ZSTDv07_isError() */
+static size_t ZSTDv07_decodeFrameHeader(ZSTDv07_DCtx* dctx, const void* src, size_t srcSize)
+{
+ size_t const result = ZSTDv07_getFrameParams(&(dctx->fParams), src, srcSize);
+ if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID)) return ERROR(dictionary_wrong);
+ if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);
+ return result;
+}
+
+
+typedef struct
+{
+ blockType_t blockType;
+ U32 origSize;
+} blockProperties_t;
+
+/*! ZSTDv07_getcBlockSize() :
+* Provides the size of compressed block from block header `src` */
+static size_t ZSTDv07_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+{
+ const BYTE* const in = (const BYTE* const)src;
+ U32 cSize;
+
+ if (srcSize < ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);
+
+ bpPtr->blockType = (blockType_t)((*in) >> 6);
+ cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
+ bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
+
+ if (bpPtr->blockType == bt_end) return 0;
+ if (bpPtr->blockType == bt_rle) return 1;
+ return cSize;
+}
+
+
+static size_t ZSTDv07_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
+ memcpy(dst, src, srcSize);
+ return srcSize;
+}
+
+
+/*! ZSTDv07_decodeLiteralsBlock() :
+ @return : nb of bytes read from src (< srcSize ) */
+static size_t ZSTDv07_decodeLiteralsBlock(ZSTDv07_DCtx* dctx,
+ const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
+{
+ const BYTE* const istart = (const BYTE*) src;
+
+ if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
+
+ switch((litBlockType_t)(istart[0]>> 6))
+ {
+ case lbt_huffman:
+ { size_t litSize, litCSize, singleStream=0;
+ U32 lhSize = (istart[0] >> 4) & 3;
+ if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for lhSize, + cSize (+nbSeq) */
+ switch(lhSize)
+ {
+ case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */
+ /* 2 - 2 - 10 - 10 */
+ lhSize=3;
+ singleStream = istart[0] & 16;
+ litSize = ((istart[0] & 15) << 6) + (istart[1] >> 2);
+ litCSize = ((istart[1] & 3) << 8) + istart[2];
+ break;
+ case 2:
+ /* 2 - 2 - 14 - 14 */
+ lhSize=4;
+ litSize = ((istart[0] & 15) << 10) + (istart[1] << 2) + (istart[2] >> 6);
+ litCSize = ((istart[2] & 63) << 8) + istart[3];
+ break;
+ case 3:
+ /* 2 - 2 - 18 - 18 */
+ lhSize=5;
+ litSize = ((istart[0] & 15) << 14) + (istart[1] << 6) + (istart[2] >> 2);
+ litCSize = ((istart[2] & 3) << 16) + (istart[3] << 8) + istart[4];
+ break;
+ }
+ if (litSize > ZSTDv07_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected);
+ if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
+
+ if (HUFv07_isError(singleStream ?
+ HUFv07_decompress1X2_DCtx(dctx->hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize) :
+ HUFv07_decompress4X_hufOnly (dctx->hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize) ))
+ return ERROR(corruption_detected);
+
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ dctx->litEntropy = 1;
+ memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+ return litCSize + lhSize;
+ }
+ case lbt_repeat:
+ { size_t litSize, litCSize;
+ U32 lhSize = ((istart[0]) >> 4) & 3;
+ if (lhSize != 1) /* only case supported for now : small litSize, single stream */
+ return ERROR(corruption_detected);
+ if (dctx->litEntropy==0)
+ return ERROR(dictionary_corrupted);
+
+ /* 2 - 2 - 10 - 10 */
+ lhSize=3;
+ litSize = ((istart[0] & 15) << 6) + (istart[1] >> 2);
+ litCSize = ((istart[1] & 3) << 8) + istart[2];
+ if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
+
+ { size_t const errorCode = HUFv07_decompress1X4_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->hufTable);
+ if (HUFv07_isError(errorCode)) return ERROR(corruption_detected);
+ }
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+ return litCSize + lhSize;
+ }
+ case lbt_raw:
+ { size_t litSize;
+ U32 lhSize = ((istart[0]) >> 4) & 3;
+ switch(lhSize)
+ {
+ case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */
+ lhSize=1;
+ litSize = istart[0] & 31;
+ break;
+ case 2:
+ litSize = ((istart[0] & 15) << 8) + istart[1];
+ break;
+ case 3:
+ litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
+ break;
+ }
+
+ if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
+ if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
+ memcpy(dctx->litBuffer, istart+lhSize, litSize);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+ return lhSize+litSize;
+ }
+ /* direct reference into compressed stream */
+ dctx->litPtr = istart+lhSize;
+ dctx->litSize = litSize;
+ return lhSize+litSize;
+ }
+ case lbt_rle:
+ { size_t litSize;
+ U32 lhSize = ((istart[0]) >> 4) & 3;
+ switch(lhSize)
+ {
+ case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */
+ lhSize = 1;
+ litSize = istart[0] & 31;
+ break;
+ case 2:
+ litSize = ((istart[0] & 15) << 8) + istart[1];
+ break;
+ case 3:
+ litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
+ if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
+ break;
+ }
+ if (litSize > ZSTDv07_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected);
+ memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ return lhSize+1;
+ }
+ default:
+ return ERROR(corruption_detected); /* impossible */
+ }
+}
+
+
+/*! ZSTDv07_buildSeqTable() :
+ @return : nb bytes read from src,
+ or an error code if it fails, testable with ZSTDv07_isError()
+*/
+static size_t ZSTDv07_buildSeqTable(FSEv07_DTable* DTable, U32 type, U32 max, U32 maxLog,
+ const void* src, size_t srcSize,
+ const S16* defaultNorm, U32 defaultLog, U32 flagRepeatTable)
+{
+ switch(type)
+ {
+ case FSEv07_ENCODING_RLE :
+ if (!srcSize) return ERROR(srcSize_wrong);
+ if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);
+ FSEv07_buildDTable_rle(DTable, *(const BYTE*)src); /* if *src > max, data is corrupted */
+ return 1;
+ case FSEv07_ENCODING_RAW :
+ FSEv07_buildDTable(DTable, defaultNorm, max, defaultLog);
+ return 0;
+ case FSEv07_ENCODING_STATIC:
+ if (!flagRepeatTable) return ERROR(corruption_detected);
+ return 0;
+ default : /* impossible */
+ case FSEv07_ENCODING_DYNAMIC :
+ { U32 tableLog;
+ S16 norm[MaxSeq+1];
+ size_t const headerSize = FSEv07_readNCount(norm, &max, &tableLog, src, srcSize);
+ if (FSEv07_isError(headerSize)) return ERROR(corruption_detected);
+ if (tableLog > maxLog) return ERROR(corruption_detected);
+ FSEv07_buildDTable(DTable, norm, max, tableLog);
+ return headerSize;
+ } }
+}
+
+
+static size_t ZSTDv07_decodeSeqHeaders(int* nbSeqPtr,
+ FSEv07_DTable* DTableLL, FSEv07_DTable* DTableML, FSEv07_DTable* DTableOffb, U32 flagRepeatTable,
+ const void* src, size_t srcSize)
+{
+ const BYTE* const istart = (const BYTE* const)src;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* ip = istart;
+
+ /* check */
+ if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
+
+ /* SeqHead */
+ { int nbSeq = *ip++;
+ if (!nbSeq) { *nbSeqPtr=0; return 1; }
+ if (nbSeq > 0x7F) {
+ if (nbSeq == 0xFF) {
+ if (ip+2 > iend) return ERROR(srcSize_wrong);
+ nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
+ } else {
+ if (ip >= iend) return ERROR(srcSize_wrong);
+ nbSeq = ((nbSeq-0x80)<<8) + *ip++;
+ }
+ }
+ *nbSeqPtr = nbSeq;
+ }
+
+ /* FSE table descriptors */
+ { U32 const LLtype = *ip >> 6;
+ U32 const OFtype = (*ip >> 4) & 3;
+ U32 const MLtype = (*ip >> 2) & 3;
+ ip++;
+
+ /* check */
+ if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
+
+ /* Build DTables */
+ { size_t const llhSize = ZSTDv07_buildSeqTable(DTableLL, LLtype, MaxLL, LLFSELog, ip, iend-ip, LL_defaultNorm, LL_defaultNormLog, flagRepeatTable);
+ if (ZSTDv07_isError(llhSize)) return ERROR(corruption_detected);
+ ip += llhSize;
+ }
+ { size_t const ofhSize = ZSTDv07_buildSeqTable(DTableOffb, OFtype, MaxOff, OffFSELog, ip, iend-ip, OF_defaultNorm, OF_defaultNormLog, flagRepeatTable);
+ if (ZSTDv07_isError(ofhSize)) return ERROR(corruption_detected);
+ ip += ofhSize;
+ }
+ { size_t const mlhSize = ZSTDv07_buildSeqTable(DTableML, MLtype, MaxML, MLFSELog, ip, iend-ip, ML_defaultNorm, ML_defaultNormLog, flagRepeatTable);
+ if (ZSTDv07_isError(mlhSize)) return ERROR(corruption_detected);
+ ip += mlhSize;
+ } }
+
+ return ip-istart;
+}
+
+
+typedef struct {
+ size_t litLength;
+ size_t matchLength;
+ size_t offset;
+} seq_t;
+
+typedef struct {
+ BITv07_DStream_t DStream;
+ FSEv07_DState_t stateLL;
+ FSEv07_DState_t stateOffb;
+ FSEv07_DState_t stateML;
+ size_t prevOffset[ZSTDv07_REP_INIT];
+} seqState_t;
+
+
+static seq_t ZSTDv07_decodeSequence(seqState_t* seqState)
+{
+ seq_t seq;
+
+ U32 const llCode = FSEv07_peekSymbol(&(seqState->stateLL));
+ U32 const mlCode = FSEv07_peekSymbol(&(seqState->stateML));
+ U32 const ofCode = FSEv07_peekSymbol(&(seqState->stateOffb)); /* <= maxOff, by table construction */
+
+ U32 const llBits = LL_bits[llCode];
+ U32 const mlBits = ML_bits[mlCode];
+ U32 const ofBits = ofCode;
+ U32 const totalBits = llBits+mlBits+ofBits;
+
+ static const U32 LL_base[MaxLL+1] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
+ 0x2000, 0x4000, 0x8000, 0x10000 };
+
+ static const U32 ML_base[MaxML+1] = {
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
+ 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
+
+ static const U32 OF_base[MaxOff+1] = {
+ 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D,
+ 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD,
+ 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
+ 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD };
+
+ /* sequence */
+ { size_t offset;
+ if (!ofCode)
+ offset = 0;
+ else {
+ offset = OF_base[ofCode] + BITv07_readBits(&(seqState->DStream), ofBits); /* <= (ZSTDv07_WINDOWLOG_MAX-1) bits */
+ if (MEM_32bits()) BITv07_reloadDStream(&(seqState->DStream));
+ }
+
+ if (ofCode <= 1) {
+ if ((llCode == 0) & (offset <= 1)) offset = 1-offset;
+ if (offset) {
+ size_t const temp = seqState->prevOffset[offset];
+ if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset = temp;
+ } else {
+ offset = seqState->prevOffset[0];
+ }
+ } else {
+ seqState->prevOffset[2] = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset;
+ }
+ seq.offset = offset;
+ }
+
+ seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BITv07_readBits(&(seqState->DStream), mlBits) : 0); /* <= 16 bits */
+ if (MEM_32bits() && (mlBits+llBits>24)) BITv07_reloadDStream(&(seqState->DStream));
+
+ seq.litLength = LL_base[llCode] + ((llCode>15) ? BITv07_readBits(&(seqState->DStream), llBits) : 0); /* <= 16 bits */
+ if (MEM_32bits() ||
+ (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BITv07_reloadDStream(&(seqState->DStream));
+
+ /* ANS state update */
+ FSEv07_updateState(&(seqState->stateLL), &(seqState->DStream)); /* <= 9 bits */
+ FSEv07_updateState(&(seqState->stateML), &(seqState->DStream)); /* <= 9 bits */
+ if (MEM_32bits()) BITv07_reloadDStream(&(seqState->DStream)); /* <= 18 bits */
+ FSEv07_updateState(&(seqState->stateOffb), &(seqState->DStream)); /* <= 8 bits */
+
+ return seq;
+}
+
+
+static
+size_t ZSTDv07_execSequence(BYTE* op,
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
+{
+ BYTE* const oLitEnd = op + sequence.litLength;
+ size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+ BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
+ BYTE* const oend_w = oend-WILDCOPY_OVERLENGTH;
+ const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+ const BYTE* match = oLitEnd - sequence.offset;
+
+ /* check */
+ if ((oLitEnd>oend_w) | (oMatchEnd>oend)) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
+ if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
+
+ /* copy Literals */
+ ZSTDv07_wildcopy(op, *litPtr, sequence.litLength); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
+ op = oLitEnd;
+ *litPtr = iLitEnd; /* update for next sequence */
+
+ /* copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - base)) {
+ /* offset beyond prefix */
+ if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
+ match = dictEnd - (base-match);
+ if (match + sequence.matchLength <= dictEnd) {
+ memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ { size_t const length1 = dictEnd - match;
+ memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = base;
+ if (op > oend_w || sequence.matchLength < MINMATCH) {
+ while (op < oMatchEnd) *op++ = *match++;
+ return sequenceLength;
+ }
+ } }
+ /* Requirement: op <= oend_w */
+
+ /* match within prefix */
+ if (sequence.offset < 8) {
+ /* close range match, overlap */
+ static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
+ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
+ int const sub2 = dec64table[sequence.offset];
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += dec32table[sequence.offset];
+ ZSTDv07_copy4(op+4, match);
+ match -= sub2;
+ } else {
+ ZSTDv07_copy8(op, match);
+ }
+ op += 8; match += 8;
+
+ if (oMatchEnd > oend-(16-MINMATCH)) {
+ if (op < oend_w) {
+ ZSTDv07_wildcopy(op, match, oend_w - op);
+ match += oend_w - op;
+ op = oend_w;
+ }
+ while (op < oMatchEnd) *op++ = *match++;
+ } else {
+ ZSTDv07_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
+ }
+ return sequenceLength;
+}
+
+
+static size_t ZSTDv07_decompressSequences(
+ ZSTDv07_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize)
+{
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* const oend = ostart + maxDstSize;
+ BYTE* op = ostart;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* const litEnd = litPtr + dctx->litSize;
+ FSEv07_DTable* DTableLL = dctx->LLTable;
+ FSEv07_DTable* DTableML = dctx->MLTable;
+ FSEv07_DTable* DTableOffb = dctx->OffTable;
+ const BYTE* const base = (const BYTE*) (dctx->base);
+ const BYTE* const vBase = (const BYTE*) (dctx->vBase);
+ const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+ int nbSeq;
+
+ /* Build Decoding Tables */
+ { size_t const seqHSize = ZSTDv07_decodeSeqHeaders(&nbSeq, DTableLL, DTableML, DTableOffb, dctx->fseEntropy, ip, seqSize);
+ if (ZSTDv07_isError(seqHSize)) return seqHSize;
+ ip += seqHSize;
+ }
+
+ /* Regen sequences */
+ if (nbSeq) {
+ seqState_t seqState;
+ dctx->fseEntropy = 1;
+ { U32 i; for (i=0; i<ZSTDv07_REP_INIT; i++) seqState.prevOffset[i] = dctx->rep[i]; }
+ { size_t const errorCode = BITv07_initDStream(&(seqState.DStream), ip, iend-ip);
+ if (ERR_isError(errorCode)) return ERROR(corruption_detected); }
+ FSEv07_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
+ FSEv07_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
+ FSEv07_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
+
+ for ( ; (BITv07_reloadDStream(&(seqState.DStream)) <= BITv07_DStream_completed) && nbSeq ; ) {
+ nbSeq--;
+ { seq_t const sequence = ZSTDv07_decodeSequence(&seqState);
+ size_t const oneSeqSize = ZSTDv07_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
+ if (ZSTDv07_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+ } }
+
+ /* check if reached exact end */
+ if (nbSeq) return ERROR(corruption_detected);
+ /* save reps for next block */
+ { U32 i; for (i=0; i<ZSTDv07_REP_INIT; i++) dctx->rep[i] = (U32)(seqState.prevOffset[i]); }
+ }
+
+ /* last literal segment */
+ { size_t const lastLLSize = litEnd - litPtr;
+ //if (litPtr > litEnd) return ERROR(corruption_detected); /* too many literals already used */
+ if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
+ memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+
+ return op-ostart;
+}
+
+
+static void ZSTDv07_checkContinuity(ZSTDv07_DCtx* dctx, const void* dst)
+{
+ if (dst != dctx->previousDstEnd) { /* not contiguous */
+ dctx->dictEnd = dctx->previousDstEnd;
+ dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
+ dctx->base = dst;
+ dctx->previousDstEnd = dst;
+ }
+}
+
+
+static size_t ZSTDv07_decompressBlock_internal(ZSTDv07_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{ /* blockType == blockCompressed */
+ const BYTE* ip = (const BYTE*)src;
+
+ if (srcSize >= ZSTDv07_BLOCKSIZE_ABSOLUTEMAX) return ERROR(srcSize_wrong);
+
+ /* Decode literals sub-block */
+ { size_t const litCSize = ZSTDv07_decodeLiteralsBlock(dctx, src, srcSize);
+ if (ZSTDv07_isError(litCSize)) return litCSize;
+ ip += litCSize;
+ srcSize -= litCSize;
+ }
+ return ZSTDv07_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
+}
+
+
+size_t ZSTDv07_decompressBlock(ZSTDv07_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ size_t dSize;
+ ZSTDv07_checkContinuity(dctx, dst);
+ dSize = ZSTDv07_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
+ dctx->previousDstEnd = (char*)dst + dSize;
+ return dSize;
+}
+
+
+/** ZSTDv07_insertBlock() :
+ insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
+ZSTDLIBv07_API size_t ZSTDv07_insertBlock(ZSTDv07_DCtx* dctx, const void* blockStart, size_t blockSize)
+{
+ ZSTDv07_checkContinuity(dctx, blockStart);
+ dctx->previousDstEnd = (const char*)blockStart + blockSize;
+ return blockSize;
+}
+
+
+static size_t ZSTDv07_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length)
+{
+ if (length > dstCapacity) return ERROR(dstSize_tooSmall);
+ memset(dst, byte, length);
+ return length;
+}
+
+
+/*! ZSTDv07_decompressFrame() :
+* `dctx` must be properly initialized */
+static size_t ZSTDv07_decompressFrame(ZSTDv07_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* const iend = ip + srcSize;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* const oend = ostart + dstCapacity;
+ BYTE* op = ostart;
+ size_t remainingSize = srcSize;
+
+ /* check */
+ if (srcSize < ZSTDv07_frameHeaderSize_min+ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);
+
+ /* Frame Header */
+ { size_t const frameHeaderSize = ZSTDv07_frameHeaderSize(src, ZSTDv07_frameHeaderSize_min);
+ if (ZSTDv07_isError(frameHeaderSize)) return frameHeaderSize;
+ if (srcSize < frameHeaderSize+ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);
+ if (ZSTDv07_decodeFrameHeader(dctx, src, frameHeaderSize)) return ERROR(corruption_detected);
+ ip += frameHeaderSize; remainingSize -= frameHeaderSize;
+ }
+
+ /* Loop on each block */
+ while (1) {
+ size_t decodedSize;
+ blockProperties_t blockProperties;
+ size_t const cBlockSize = ZSTDv07_getcBlockSize(ip, iend-ip, &blockProperties);
+ if (ZSTDv07_isError(cBlockSize)) return cBlockSize;
+
+ ip += ZSTDv07_blockHeaderSize;
+ remainingSize -= ZSTDv07_blockHeaderSize;
+ if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+ switch(blockProperties.blockType)
+ {
+ case bt_compressed:
+ decodedSize = ZSTDv07_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize);
+ break;
+ case bt_raw :
+ decodedSize = ZSTDv07_copyRawBlock(op, oend-op, ip, cBlockSize);
+ break;
+ case bt_rle :
+ decodedSize = ZSTDv07_generateNxBytes(op, oend-op, *ip, blockProperties.origSize);
+ break;
+ case bt_end :
+ /* end of frame */
+ if (remainingSize) return ERROR(srcSize_wrong);
+ decodedSize = 0;
+ break;
+ default:
+ return ERROR(GENERIC); /* impossible */
+ }
+ if (blockProperties.blockType == bt_end) break; /* bt_end */
+
+ if (ZSTDv07_isError(decodedSize)) return decodedSize;
+ if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, op, decodedSize);
+ op += decodedSize;
+ ip += cBlockSize;
+ remainingSize -= cBlockSize;
+ }
+
+ return op-ostart;
+}
+
+
+/*! ZSTDv07_decompress_usingPreparedDCtx() :
+* Same as ZSTDv07_decompress_usingDict, but using a reference context `preparedDCtx`, where dictionary has been loaded.
+* It avoids reloading the dictionary each time.
+* `preparedDCtx` must have been properly initialized using ZSTDv07_decompressBegin_usingDict().
+* Requires 2 contexts : 1 for reference (preparedDCtx), which will not be modified, and 1 to run the decompression operation (dctx) */
+static size_t ZSTDv07_decompress_usingPreparedDCtx(ZSTDv07_DCtx* dctx, const ZSTDv07_DCtx* refDCtx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ ZSTDv07_copyDCtx(dctx, refDCtx);
+ ZSTDv07_checkContinuity(dctx, dst);
+ return ZSTDv07_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
+}
+
+
+size_t ZSTDv07_decompress_usingDict(ZSTDv07_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize)
+{
+ ZSTDv07_decompressBegin_usingDict(dctx, dict, dictSize);
+ ZSTDv07_checkContinuity(dctx, dst);
+ return ZSTDv07_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
+}
+
+
+size_t ZSTDv07_decompressDCtx(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ return ZSTDv07_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
+}
+
+
+size_t ZSTDv07_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+#if defined(ZSTDv07_HEAPMODE) && (ZSTDv07_HEAPMODE==1)
+ size_t regenSize;
+ ZSTDv07_DCtx* const dctx = ZSTDv07_createDCtx();
+ if (dctx==NULL) return ERROR(memory_allocation);
+ regenSize = ZSTDv07_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
+ ZSTDv07_freeDCtx(dctx);
+ return regenSize;
+#else /* stack mode */
+ ZSTDv07_DCtx dctx;
+ return ZSTDv07_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
+#endif
+}
+
+/* ZSTD_errorFrameSizeInfoLegacy() :
+ assumes `cSize` and `dBound` are _not_ NULL */
+static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
+{
+ *cSize = ret;
+ *dBound = ZSTD_CONTENTSIZE_ERROR;
+}
+
+void ZSTDv07_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
+{
+ const BYTE* ip = (const BYTE*)src;
+ size_t remainingSize = srcSize;
+ size_t nbBlocks = 0;
+
+ /* check */
+ if (srcSize < ZSTDv07_frameHeaderSize_min+ZSTDv07_blockHeaderSize) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+ return;
+ }
+
+ /* Frame Header */
+ { size_t const frameHeaderSize = ZSTDv07_frameHeaderSize(src, ZSTDv07_frameHeaderSize_min);
+ if (ZSTDv07_isError(frameHeaderSize)) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, frameHeaderSize);
+ return;
+ }
+ if (MEM_readLE32(src) != ZSTDv07_MAGICNUMBER) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
+ return;
+ }
+ if (srcSize < frameHeaderSize+ZSTDv07_blockHeaderSize) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+ return;
+ }
+ ip += frameHeaderSize; remainingSize -= frameHeaderSize;
+ }
+
+ /* Loop on each block */
+ while (1) {
+ blockProperties_t blockProperties;
+ size_t const cBlockSize = ZSTDv07_getcBlockSize(ip, remainingSize, &blockProperties);
+ if (ZSTDv07_isError(cBlockSize)) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
+ return;
+ }
+
+ ip += ZSTDv07_blockHeaderSize;
+ remainingSize -= ZSTDv07_blockHeaderSize;
+
+ if (blockProperties.blockType == bt_end) break;
+
+ if (cBlockSize > remainingSize) {
+ ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+ return;
+ }
+
+ ip += cBlockSize;
+ remainingSize -= cBlockSize;
+ nbBlocks++;
+ }
+
+ *cSize = ip - (const BYTE*)src;
+ *dBound = nbBlocks * ZSTDv07_BLOCKSIZE_ABSOLUTEMAX;
+}
+
+/*_******************************
+* Streaming Decompression API
+********************************/
+size_t ZSTDv07_nextSrcSizeToDecompress(ZSTDv07_DCtx* dctx)
+{
+ return dctx->expected;
+}
+
+int ZSTDv07_isSkipFrame(ZSTDv07_DCtx* dctx)
+{
+ return dctx->stage == ZSTDds_skipFrame;
+}
+
+/** ZSTDv07_decompressContinue() :
+* @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
+* or an error code, which can be tested using ZSTDv07_isError() */
+size_t ZSTDv07_decompressContinue(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ /* Sanity check */
+ if (srcSize != dctx->expected) return ERROR(srcSize_wrong);
+ if (dstCapacity) ZSTDv07_checkContinuity(dctx, dst);
+
+ switch (dctx->stage)
+ {
+ case ZSTDds_getFrameHeaderSize :
+ if (srcSize != ZSTDv07_frameHeaderSize_min) return ERROR(srcSize_wrong); /* impossible */
+ if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTDv07_MAGIC_SKIPPABLE_START) {
+ memcpy(dctx->headerBuffer, src, ZSTDv07_frameHeaderSize_min);
+ dctx->expected = ZSTDv07_skippableHeaderSize - ZSTDv07_frameHeaderSize_min; /* magic number + skippable frame length */
+ dctx->stage = ZSTDds_decodeSkippableHeader;
+ return 0;
+ }
+ dctx->headerSize = ZSTDv07_frameHeaderSize(src, ZSTDv07_frameHeaderSize_min);
+ if (ZSTDv07_isError(dctx->headerSize)) return dctx->headerSize;
+ memcpy(dctx->headerBuffer, src, ZSTDv07_frameHeaderSize_min);
+ if (dctx->headerSize > ZSTDv07_frameHeaderSize_min) {
+ dctx->expected = dctx->headerSize - ZSTDv07_frameHeaderSize_min;
+ dctx->stage = ZSTDds_decodeFrameHeader;
+ return 0;
+ }
+ dctx->expected = 0; /* not necessary to copy more */
+ /* fall-through */
+ case ZSTDds_decodeFrameHeader:
+ { size_t result;
+ memcpy(dctx->headerBuffer + ZSTDv07_frameHeaderSize_min, src, dctx->expected);
+ result = ZSTDv07_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize);
+ if (ZSTDv07_isError(result)) return result;
+ dctx->expected = ZSTDv07_blockHeaderSize;
+ dctx->stage = ZSTDds_decodeBlockHeader;
+ return 0;
+ }
+ case ZSTDds_decodeBlockHeader:
+ { blockProperties_t bp;
+ size_t const cBlockSize = ZSTDv07_getcBlockSize(src, ZSTDv07_blockHeaderSize, &bp);
+ if (ZSTDv07_isError(cBlockSize)) return cBlockSize;
+ if (bp.blockType == bt_end) {
+ if (dctx->fParams.checksumFlag) {
+ U64 const h64 = XXH64_digest(&dctx->xxhState);
+ U32 const h32 = (U32)(h64>>11) & ((1<<22)-1);
+ const BYTE* const ip = (const BYTE*)src;
+ U32 const check32 = ip[2] + (ip[1] << 8) + ((ip[0] & 0x3F) << 16);
+ if (check32 != h32) return ERROR(checksum_wrong);
+ }
+ dctx->expected = 0;
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ } else {
+ dctx->expected = cBlockSize;
+ dctx->bType = bp.blockType;
+ dctx->stage = ZSTDds_decompressBlock;
+ }
+ return 0;
+ }
+ case ZSTDds_decompressBlock:
+ { size_t rSize;
+ switch(dctx->bType)
+ {
+ case bt_compressed:
+ rSize = ZSTDv07_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
+ break;
+ case bt_raw :
+ rSize = ZSTDv07_copyRawBlock(dst, dstCapacity, src, srcSize);
+ break;
+ case bt_rle :
+ return ERROR(GENERIC); /* not yet handled */
+ break;
+ case bt_end : /* should never happen (filtered at phase 1) */
+ rSize = 0;
+ break;
+ default:
+ return ERROR(GENERIC); /* impossible */
+ }
+ dctx->stage = ZSTDds_decodeBlockHeader;
+ dctx->expected = ZSTDv07_blockHeaderSize;
+ dctx->previousDstEnd = (char*)dst + rSize;
+ if (ZSTDv07_isError(rSize)) return rSize;
+ if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
+ return rSize;
+ }
+ case ZSTDds_decodeSkippableHeader:
+ { memcpy(dctx->headerBuffer + ZSTDv07_frameHeaderSize_min, src, dctx->expected);
+ dctx->expected = MEM_readLE32(dctx->headerBuffer + 4);
+ dctx->stage = ZSTDds_skipFrame;
+ return 0;
+ }
+ case ZSTDds_skipFrame:
+ { dctx->expected = 0;
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ return 0;
+ }
+ default:
+ return ERROR(GENERIC); /* impossible */
+ }
+}
+
+
+static size_t ZSTDv07_refDictContent(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ dctx->dictEnd = dctx->previousDstEnd;
+ dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
+ dctx->base = dict;
+ dctx->previousDstEnd = (const char*)dict + dictSize;
+ return 0;
+}
+
+static size_t ZSTDv07_loadEntropy(ZSTDv07_DCtx* dctx, const void* const dict, size_t const dictSize)
+{
+ const BYTE* dictPtr = (const BYTE*)dict;
+ const BYTE* const dictEnd = dictPtr + dictSize;
+
+ { size_t const hSize = HUFv07_readDTableX4(dctx->hufTable, dict, dictSize);
+ if (HUFv07_isError(hSize)) return ERROR(dictionary_corrupted);
+ dictPtr += hSize;
+ }
+
+ { short offcodeNCount[MaxOff+1];
+ U32 offcodeMaxValue=MaxOff, offcodeLog;
+ size_t const offcodeHeaderSize = FSEv07_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
+ if (FSEv07_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
+ if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
+ { size_t const errorCode = FSEv07_buildDTable(dctx->OffTable, offcodeNCount, offcodeMaxValue, offcodeLog);
+ if (FSEv07_isError(errorCode)) return ERROR(dictionary_corrupted); }
+ dictPtr += offcodeHeaderSize;
+ }
+
+ { short matchlengthNCount[MaxML+1];
+ unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+ size_t const matchlengthHeaderSize = FSEv07_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
+ if (FSEv07_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
+ if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
+ { size_t const errorCode = FSEv07_buildDTable(dctx->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog);
+ if (FSEv07_isError(errorCode)) return ERROR(dictionary_corrupted); }
+ dictPtr += matchlengthHeaderSize;
+ }
+
+ { short litlengthNCount[MaxLL+1];
+ unsigned litlengthMaxValue = MaxLL, litlengthLog;
+ size_t const litlengthHeaderSize = FSEv07_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
+ if (FSEv07_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
+ if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
+ { size_t const errorCode = FSEv07_buildDTable(dctx->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog);
+ if (FSEv07_isError(errorCode)) return ERROR(dictionary_corrupted); }
+ dictPtr += litlengthHeaderSize;
+ }
+
+ if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
+ dctx->rep[0] = MEM_readLE32(dictPtr+0); if (dctx->rep[0] == 0 || dctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);
+ dctx->rep[1] = MEM_readLE32(dictPtr+4); if (dctx->rep[1] == 0 || dctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);
+ dctx->rep[2] = MEM_readLE32(dictPtr+8); if (dctx->rep[2] == 0 || dctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);
+ dictPtr += 12;
+
+ dctx->litEntropy = dctx->fseEntropy = 1;
+ return dictPtr - (const BYTE*)dict;
+}
+
+static size_t ZSTDv07_decompress_insertDictionary(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ if (dictSize < 8) return ZSTDv07_refDictContent(dctx, dict, dictSize);
+ { U32 const magic = MEM_readLE32(dict);
+ if (magic != ZSTDv07_DICT_MAGIC) {
+ return ZSTDv07_refDictContent(dctx, dict, dictSize); /* pure content mode */
+ } }
+ dctx->dictID = MEM_readLE32((const char*)dict + 4);
+
+ /* load entropy tables */
+ dict = (const char*)dict + 8;
+ dictSize -= 8;
+ { size_t const eSize = ZSTDv07_loadEntropy(dctx, dict, dictSize);
+ if (ZSTDv07_isError(eSize)) return ERROR(dictionary_corrupted);
+ dict = (const char*)dict + eSize;
+ dictSize -= eSize;
+ }
+
+ /* reference dictionary content */
+ return ZSTDv07_refDictContent(dctx, dict, dictSize);
+}
+
+
+size_t ZSTDv07_decompressBegin_usingDict(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ { size_t const errorCode = ZSTDv07_decompressBegin(dctx);
+ if (ZSTDv07_isError(errorCode)) return errorCode; }
+
+ if (dict && dictSize) {
+ size_t const errorCode = ZSTDv07_decompress_insertDictionary(dctx, dict, dictSize);
+ if (ZSTDv07_isError(errorCode)) return ERROR(dictionary_corrupted);
+ }
+
+ return 0;
+}
+
+
+struct ZSTDv07_DDict_s {
+ void* dict;
+ size_t dictSize;
+ ZSTDv07_DCtx* refContext;
+}; /* typedef'd tp ZSTDv07_CDict within zstd.h */
+
+static ZSTDv07_DDict* ZSTDv07_createDDict_advanced(const void* dict, size_t dictSize, ZSTDv07_customMem customMem)
+{
+ if (!customMem.customAlloc && !customMem.customFree)
+ customMem = defaultCustomMem;
+
+ if (!customMem.customAlloc || !customMem.customFree)
+ return NULL;
+
+ { ZSTDv07_DDict* const ddict = (ZSTDv07_DDict*) customMem.customAlloc(customMem.opaque, sizeof(*ddict));
+ void* const dictContent = customMem.customAlloc(customMem.opaque, dictSize);
+ ZSTDv07_DCtx* const dctx = ZSTDv07_createDCtx_advanced(customMem);
+
+ if (!dictContent || !ddict || !dctx) {
+ customMem.customFree(customMem.opaque, dictContent);
+ customMem.customFree(customMem.opaque, ddict);
+ customMem.customFree(customMem.opaque, dctx);
+ return NULL;
+ }
+
+ memcpy(dictContent, dict, dictSize);
+ { size_t const errorCode = ZSTDv07_decompressBegin_usingDict(dctx, dictContent, dictSize);
+ if (ZSTDv07_isError(errorCode)) {
+ customMem.customFree(customMem.opaque, dictContent);
+ customMem.customFree(customMem.opaque, ddict);
+ customMem.customFree(customMem.opaque, dctx);
+ return NULL;
+ } }
+
+ ddict->dict = dictContent;
+ ddict->dictSize = dictSize;
+ ddict->refContext = dctx;
+ return ddict;
+ }
+}
+
+/*! ZSTDv07_createDDict() :
+* Create a digested dictionary, ready to start decompression without startup delay.
+* `dict` can be released after `ZSTDv07_DDict` creation */
+ZSTDv07_DDict* ZSTDv07_createDDict(const void* dict, size_t dictSize)
+{
+ ZSTDv07_customMem const allocator = { NULL, NULL, NULL };
+ return ZSTDv07_createDDict_advanced(dict, dictSize, allocator);
+}
+
+size_t ZSTDv07_freeDDict(ZSTDv07_DDict* ddict)
+{
+ ZSTDv07_freeFunction const cFree = ddict->refContext->customMem.customFree;
+ void* const opaque = ddict->refContext->customMem.opaque;
+ ZSTDv07_freeDCtx(ddict->refContext);
+ cFree(opaque, ddict->dict);
+ cFree(opaque, ddict);
+ return 0;
+}
+
+/*! ZSTDv07_decompress_usingDDict() :
+* Decompression using a pre-digested Dictionary
+* Use dictionary without significant overhead. */
+ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTDv07_DDict* ddict)
+{
+ return ZSTDv07_decompress_usingPreparedDCtx(dctx, ddict->refContext,
+ dst, dstCapacity,
+ src, srcSize);
+}
+/*
+ Buffered version of Zstd compression library
+ Copyright (C) 2015-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd homepage : http://www.zstd.net/
+*/
+
+
+
+/*-***************************************************************************
+* Streaming decompression howto
+*
+* A ZBUFFv07_DCtx object is required to track streaming operations.
+* Use ZBUFFv07_createDCtx() and ZBUFFv07_freeDCtx() to create/release resources.
+* Use ZBUFFv07_decompressInit() to start a new decompression operation,
+* or ZBUFFv07_decompressInitDictionary() if decompression requires a dictionary.
+* Note that ZBUFFv07_DCtx objects can be re-init multiple times.
+*
+* Use ZBUFFv07_decompressContinue() repetitively to consume your input.
+* *srcSizePtr and *dstCapacityPtr can be any size.
+* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
+* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
+* The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change @dst.
+* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),
+* or 0 when a frame is completely decoded,
+* or an error code, which can be tested using ZBUFFv07_isError().
+*
+* Hint : recommended buffer sizes (not compulsory) : ZBUFFv07_recommendedDInSize() and ZBUFFv07_recommendedDOutSize()
+* output : ZBUFFv07_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
+* input : ZBUFFv07_recommendedDInSize == 128KB + 3;
+* just follow indications from ZBUFFv07_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
+* *******************************************************************************/
+
+typedef enum { ZBUFFds_init, ZBUFFds_loadHeader,
+ ZBUFFds_read, ZBUFFds_load, ZBUFFds_flush } ZBUFFv07_dStage;
+
+/* *** Resource management *** */
+struct ZBUFFv07_DCtx_s {
+ ZSTDv07_DCtx* zd;
+ ZSTDv07_frameParams fParams;
+ ZBUFFv07_dStage stage;
+ char* inBuff;
+ size_t inBuffSize;
+ size_t inPos;
+ char* outBuff;
+ size_t outBuffSize;
+ size_t outStart;
+ size_t outEnd;
+ size_t blockSize;
+ BYTE headerBuffer[ZSTDv07_FRAMEHEADERSIZE_MAX];
+ size_t lhSize;
+ ZSTDv07_customMem customMem;
+}; /* typedef'd to ZBUFFv07_DCtx within "zstd_buffered.h" */
+
+ZSTDLIBv07_API ZBUFFv07_DCtx* ZBUFFv07_createDCtx_advanced(ZSTDv07_customMem customMem);
+
+ZBUFFv07_DCtx* ZBUFFv07_createDCtx(void)
+{
+ return ZBUFFv07_createDCtx_advanced(defaultCustomMem);
+}
+
+ZBUFFv07_DCtx* ZBUFFv07_createDCtx_advanced(ZSTDv07_customMem customMem)
+{
+ ZBUFFv07_DCtx* zbd;
+
+ if (!customMem.customAlloc && !customMem.customFree)
+ customMem = defaultCustomMem;
+
+ if (!customMem.customAlloc || !customMem.customFree)
+ return NULL;
+
+ zbd = (ZBUFFv07_DCtx*)customMem.customAlloc(customMem.opaque, sizeof(ZBUFFv07_DCtx));
+ if (zbd==NULL) return NULL;
+ memset(zbd, 0, sizeof(ZBUFFv07_DCtx));
+ memcpy(&zbd->customMem, &customMem, sizeof(ZSTDv07_customMem));
+ zbd->zd = ZSTDv07_createDCtx_advanced(customMem);
+ if (zbd->zd == NULL) { ZBUFFv07_freeDCtx(zbd); return NULL; }
+ zbd->stage = ZBUFFds_init;
+ return zbd;
+}
+
+size_t ZBUFFv07_freeDCtx(ZBUFFv07_DCtx* zbd)
+{
+ if (zbd==NULL) return 0; /* support free on null */
+ ZSTDv07_freeDCtx(zbd->zd);
+ if (zbd->inBuff) zbd->customMem.customFree(zbd->customMem.opaque, zbd->inBuff);
+ if (zbd->outBuff) zbd->customMem.customFree(zbd->customMem.opaque, zbd->outBuff);
+ zbd->customMem.customFree(zbd->customMem.opaque, zbd);
+ return 0;
+}
+
+
+/* *** Initialization *** */
+
+size_t ZBUFFv07_decompressInitDictionary(ZBUFFv07_DCtx* zbd, const void* dict, size_t dictSize)
+{
+ zbd->stage = ZBUFFds_loadHeader;
+ zbd->lhSize = zbd->inPos = zbd->outStart = zbd->outEnd = 0;
+ return ZSTDv07_decompressBegin_usingDict(zbd->zd, dict, dictSize);
+}
+
+size_t ZBUFFv07_decompressInit(ZBUFFv07_DCtx* zbd)
+{
+ return ZBUFFv07_decompressInitDictionary(zbd, NULL, 0);
+}
+
+
+/* internal util function */
+MEM_STATIC size_t ZBUFFv07_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ size_t const length = MIN(dstCapacity, srcSize);
+ memcpy(dst, src, length);
+ return length;
+}
+
+
+/* *** Decompression *** */
+
+size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* zbd,
+ void* dst, size_t* dstCapacityPtr,
+ const void* src, size_t* srcSizePtr)
+{
+ const char* const istart = (const char*)src;
+ const char* const iend = istart + *srcSizePtr;
+ const char* ip = istart;
+ char* const ostart = (char*)dst;
+ char* const oend = ostart + *dstCapacityPtr;
+ char* op = ostart;
+ U32 notDone = 1;
+
+ while (notDone) {
+ switch(zbd->stage)
+ {
+ case ZBUFFds_init :
+ return ERROR(init_missing);
+
+ case ZBUFFds_loadHeader :
+ { size_t const hSize = ZSTDv07_getFrameParams(&(zbd->fParams), zbd->headerBuffer, zbd->lhSize);
+ if (ZSTDv07_isError(hSize)) return hSize;
+ if (hSize != 0) {
+ size_t const toLoad = hSize - zbd->lhSize; /* if hSize!=0, hSize > zbd->lhSize */
+ if (toLoad > (size_t)(iend-ip)) { /* not enough input to load full header */
+ memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip);
+ zbd->lhSize += iend-ip;
+ *dstCapacityPtr = 0;
+ return (hSize - zbd->lhSize) + ZSTDv07_blockHeaderSize; /* remaining header bytes + next block header */
+ }
+ memcpy(zbd->headerBuffer + zbd->lhSize, ip, toLoad); zbd->lhSize = hSize; ip += toLoad;
+ break;
+ } }
+
+ /* Consume header */
+ { size_t const h1Size = ZSTDv07_nextSrcSizeToDecompress(zbd->zd); /* == ZSTDv07_frameHeaderSize_min */
+ size_t const h1Result = ZSTDv07_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer, h1Size);
+ if (ZSTDv07_isError(h1Result)) return h1Result;
+ if (h1Size < zbd->lhSize) { /* long header */
+ size_t const h2Size = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);
+ size_t const h2Result = ZSTDv07_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer+h1Size, h2Size);
+ if (ZSTDv07_isError(h2Result)) return h2Result;
+ } }
+
+ zbd->fParams.windowSize = MAX(zbd->fParams.windowSize, 1U << ZSTDv07_WINDOWLOG_ABSOLUTEMIN);
+
+ /* Frame header instruct buffer sizes */
+ { size_t const blockSize = MIN(zbd->fParams.windowSize, ZSTDv07_BLOCKSIZE_ABSOLUTEMAX);
+ zbd->blockSize = blockSize;
+ if (zbd->inBuffSize < blockSize) {
+ zbd->customMem.customFree(zbd->customMem.opaque, zbd->inBuff);
+ zbd->inBuffSize = blockSize;
+ zbd->inBuff = (char*)zbd->customMem.customAlloc(zbd->customMem.opaque, blockSize);
+ if (zbd->inBuff == NULL) return ERROR(memory_allocation);
+ }
+ { size_t const neededOutSize = zbd->fParams.windowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
+ if (zbd->outBuffSize < neededOutSize) {
+ zbd->customMem.customFree(zbd->customMem.opaque, zbd->outBuff);
+ zbd->outBuffSize = neededOutSize;
+ zbd->outBuff = (char*)zbd->customMem.customAlloc(zbd->customMem.opaque, neededOutSize);
+ if (zbd->outBuff == NULL) return ERROR(memory_allocation);
+ } } }
+ zbd->stage = ZBUFFds_read;
+ /* pass-through */
+ /* fall-through */
+ case ZBUFFds_read:
+ { size_t const neededInSize = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);
+ if (neededInSize==0) { /* end of frame */
+ zbd->stage = ZBUFFds_init;
+ notDone = 0;
+ break;
+ }
+ if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */
+ const int isSkipFrame = ZSTDv07_isSkipFrame(zbd->zd);
+ size_t const decodedSize = ZSTDv07_decompressContinue(zbd->zd,
+ zbd->outBuff + zbd->outStart, (isSkipFrame ? 0 : zbd->outBuffSize - zbd->outStart),
+ ip, neededInSize);
+ if (ZSTDv07_isError(decodedSize)) return decodedSize;
+ ip += neededInSize;
+ if (!decodedSize && !isSkipFrame) break; /* this was just a header */
+ zbd->outEnd = zbd->outStart + decodedSize;
+ zbd->stage = ZBUFFds_flush;
+ break;
+ }
+ if (ip==iend) { notDone = 0; break; } /* no more input */
+ zbd->stage = ZBUFFds_load;
+ }
+ /* fall-through */
+ case ZBUFFds_load:
+ { size_t const neededInSize = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);
+ size_t const toLoad = neededInSize - zbd->inPos; /* should always be <= remaining space within inBuff */
+ size_t loadedSize;
+ if (toLoad > zbd->inBuffSize - zbd->inPos) return ERROR(corruption_detected); /* should never happen */
+ loadedSize = ZBUFFv07_limitCopy(zbd->inBuff + zbd->inPos, toLoad, ip, iend-ip);
+ ip += loadedSize;
+ zbd->inPos += loadedSize;
+ if (loadedSize < toLoad) { notDone = 0; break; } /* not enough input, wait for more */
+
+ /* decode loaded input */
+ { const int isSkipFrame = ZSTDv07_isSkipFrame(zbd->zd);
+ size_t const decodedSize = ZSTDv07_decompressContinue(zbd->zd,
+ zbd->outBuff + zbd->outStart, zbd->outBuffSize - zbd->outStart,
+ zbd->inBuff, neededInSize);
+ if (ZSTDv07_isError(decodedSize)) return decodedSize;
+ zbd->inPos = 0; /* input is consumed */
+ if (!decodedSize && !isSkipFrame) { zbd->stage = ZBUFFds_read; break; } /* this was just a header */
+ zbd->outEnd = zbd->outStart + decodedSize;
+ zbd->stage = ZBUFFds_flush;
+ /* break; */
+ /* pass-through */
+ }
+ }
+ /* fall-through */
+ case ZBUFFds_flush:
+ { size_t const toFlushSize = zbd->outEnd - zbd->outStart;
+ size_t const flushedSize = ZBUFFv07_limitCopy(op, oend-op, zbd->outBuff + zbd->outStart, toFlushSize);
+ op += flushedSize;
+ zbd->outStart += flushedSize;
+ if (flushedSize == toFlushSize) {
+ zbd->stage = ZBUFFds_read;
+ if (zbd->outStart + zbd->blockSize > zbd->outBuffSize)
+ zbd->outStart = zbd->outEnd = 0;
+ break;
+ }
+ /* cannot flush everything */
+ notDone = 0;
+ break;
+ }
+ default: return ERROR(GENERIC); /* impossible */
+ } }
+
+ /* result */
+ *srcSizePtr = ip-istart;
+ *dstCapacityPtr = op-ostart;
+ { size_t nextSrcSizeHint = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);
+ nextSrcSizeHint -= zbd->inPos; /* already loaded*/
+ return nextSrcSizeHint;
+ }
+}
+
+
+
+/* *************************************
+* Tool functions
+***************************************/
+size_t ZBUFFv07_recommendedDInSize(void) { return ZSTDv07_BLOCKSIZE_ABSOLUTEMAX + ZSTDv07_blockHeaderSize /* block header size*/ ; }
+size_t ZBUFFv07_recommendedDOutSize(void) { return ZSTDv07_BLOCKSIZE_ABSOLUTEMAX; }
diff --git a/vendor/github.com/DataDog/zstd/zstd_v07.h b/vendor/github.com/DataDog/zstd/zstd_v07.h
new file mode 100644
index 000000000..a566c1d10
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v07.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTDv07_H_235446
+#define ZSTDv07_H_235446
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*====== Dependency ======*/
+#include <stddef.h> /* size_t */
+
+
+/*====== Export for Windows ======*/
+/*!
+* ZSTDv07_DLL_EXPORT :
+* Enable exporting of functions when building a Windows DLL
+*/
+#if defined(_WIN32) && defined(ZSTDv07_DLL_EXPORT) && (ZSTDv07_DLL_EXPORT==1)
+# define ZSTDLIBv07_API __declspec(dllexport)
+#else
+# define ZSTDLIBv07_API
+#endif
+
+
+/* *************************************
+* Simple API
+***************************************/
+/*! ZSTDv07_getDecompressedSize() :
+* @return : decompressed size if known, 0 otherwise.
+ note 1 : if `0`, follow up with ZSTDv07_getFrameParams() to know precise failure cause.
+ note 2 : decompressed size could be wrong or intentionally modified !
+ always ensure results fit within application's authorized limits */
+unsigned long long ZSTDv07_getDecompressedSize(const void* src, size_t srcSize);
+
+/*! ZSTDv07_decompress() :
+ `compressedSize` : must be _exact_ size of compressed input, otherwise decompression will fail.
+ `dstCapacity` must be equal or larger than originalSize.
+ @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
+ or an errorCode if it fails (which can be tested using ZSTDv07_isError()) */
+ZSTDLIBv07_API size_t ZSTDv07_decompress( void* dst, size_t dstCapacity,
+ const void* src, size_t compressedSize);
+
+/**
+ZSTDv07_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.7.x format
+ srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+ cSize (output parameter) : the number of bytes that would be read to decompress this frame
+ or an error code if it fails (which can be tested using ZSTDv01_isError())
+ dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
+ or ZSTD_CONTENTSIZE_ERROR if an error occurs
+
+ note : assumes `cSize` and `dBound` are _not_ NULL.
+*/
+void ZSTDv07_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
+ size_t* cSize, unsigned long long* dBound);
+
+/*====== Helper functions ======*/
+ZSTDLIBv07_API unsigned ZSTDv07_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
+ZSTDLIBv07_API const char* ZSTDv07_getErrorName(size_t code); /*!< provides readable string from an error code */
+
+
+/*-*************************************
+* Explicit memory management
+***************************************/
+/** Decompression context */
+typedef struct ZSTDv07_DCtx_s ZSTDv07_DCtx;
+ZSTDLIBv07_API ZSTDv07_DCtx* ZSTDv07_createDCtx(void);
+ZSTDLIBv07_API size_t ZSTDv07_freeDCtx(ZSTDv07_DCtx* dctx); /*!< @return : errorCode */
+
+/** ZSTDv07_decompressDCtx() :
+* Same as ZSTDv07_decompress(), requires an allocated ZSTDv07_DCtx (see ZSTDv07_createDCtx()) */
+ZSTDLIBv07_API size_t ZSTDv07_decompressDCtx(ZSTDv07_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+/*-************************
+* Simple dictionary API
+***************************/
+/*! ZSTDv07_decompress_usingDict() :
+* Decompression using a pre-defined Dictionary content (see dictBuilder).
+* Dictionary must be identical to the one used during compression.
+* Note : This function load the dictionary, resulting in a significant startup time */
+ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDict(ZSTDv07_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize);
+
+
+/*-**************************
+* Advanced Dictionary API
+****************************/
+/*! ZSTDv07_createDDict() :
+* Create a digested dictionary, ready to start decompression operation without startup delay.
+* `dict` can be released after creation */
+typedef struct ZSTDv07_DDict_s ZSTDv07_DDict;
+ZSTDLIBv07_API ZSTDv07_DDict* ZSTDv07_createDDict(const void* dict, size_t dictSize);
+ZSTDLIBv07_API size_t ZSTDv07_freeDDict(ZSTDv07_DDict* ddict);
+
+/*! ZSTDv07_decompress_usingDDict() :
+* Decompression using a pre-digested Dictionary
+* Faster startup than ZSTDv07_decompress_usingDict(), recommended when same dictionary is used multiple times. */
+ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTDv07_DDict* ddict);
+
+typedef struct {
+ unsigned long long frameContentSize;
+ unsigned windowSize;
+ unsigned dictID;
+ unsigned checksumFlag;
+} ZSTDv07_frameParams;
+
+ZSTDLIBv07_API size_t ZSTDv07_getFrameParams(ZSTDv07_frameParams* fparamsPtr, const void* src, size_t srcSize); /**< doesn't consume input */
+
+
+
+
+/* *************************************
+* Streaming functions
+***************************************/
+typedef struct ZBUFFv07_DCtx_s ZBUFFv07_DCtx;
+ZSTDLIBv07_API ZBUFFv07_DCtx* ZBUFFv07_createDCtx(void);
+ZSTDLIBv07_API size_t ZBUFFv07_freeDCtx(ZBUFFv07_DCtx* dctx);
+
+ZSTDLIBv07_API size_t ZBUFFv07_decompressInit(ZBUFFv07_DCtx* dctx);
+ZSTDLIBv07_API size_t ZBUFFv07_decompressInitDictionary(ZBUFFv07_DCtx* dctx, const void* dict, size_t dictSize);
+
+ZSTDLIBv07_API size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* dctx,
+ void* dst, size_t* dstCapacityPtr,
+ const void* src, size_t* srcSizePtr);
+
+/*-***************************************************************************
+* Streaming decompression howto
+*
+* A ZBUFFv07_DCtx object is required to track streaming operations.
+* Use ZBUFFv07_createDCtx() and ZBUFFv07_freeDCtx() to create/release resources.
+* Use ZBUFFv07_decompressInit() to start a new decompression operation,
+* or ZBUFFv07_decompressInitDictionary() if decompression requires a dictionary.
+* Note that ZBUFFv07_DCtx objects can be re-init multiple times.
+*
+* Use ZBUFFv07_decompressContinue() repetitively to consume your input.
+* *srcSizePtr and *dstCapacityPtr can be any size.
+* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
+* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
+* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.
+* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),
+* or 0 when a frame is completely decoded,
+* or an error code, which can be tested using ZBUFFv07_isError().
+*
+* Hint : recommended buffer sizes (not compulsory) : ZBUFFv07_recommendedDInSize() and ZBUFFv07_recommendedDOutSize()
+* output : ZBUFFv07_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
+* input : ZBUFFv07_recommendedDInSize == 128KB + 3;
+* just follow indications from ZBUFFv07_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
+* *******************************************************************************/
+
+
+/* *************************************
+* Tool functions
+***************************************/
+ZSTDLIBv07_API unsigned ZBUFFv07_isError(size_t errorCode);
+ZSTDLIBv07_API const char* ZBUFFv07_getErrorName(size_t errorCode);
+
+/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
+* These sizes are just hints, they tend to offer better latency */
+ZSTDLIBv07_API size_t ZBUFFv07_recommendedDInSize(void);
+ZSTDLIBv07_API size_t ZBUFFv07_recommendedDOutSize(void);
+
+
+/*-*************************************
+* Constants
+***************************************/
+#define ZSTDv07_MAGICNUMBER 0xFD2FB527 /* v0.7 */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTDv07_H_235446 */
diff --git a/vendor/github.com/DataDog/zstd/zstdmt_compress.c b/vendor/github.com/DataDog/zstd/zstdmt_compress.c
new file mode 100644
index 000000000..38fbb9076
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstdmt_compress.c
@@ -0,0 +1,2099 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/* ====== Compiler specifics ====== */
+#if defined(_MSC_VER)
+# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
+#endif
+
+
+/* ====== Constants ====== */
+#define ZSTDMT_OVERLAPLOG_DEFAULT 0
+
+
+/* ====== Dependencies ====== */
+#include <string.h> /* memcpy, memset */
+#include <limits.h> /* INT_MAX, UINT_MAX */
+#include "mem.h" /* MEM_STATIC */
+#include "pool.h" /* threadpool */
+#include "threading.h" /* mutex */
+#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
+#include "zstd_ldm.h"
+#include "zstdmt_compress.h"
+
+/* Guards code to support resizing the SeqPool.
+ * We will want to resize the SeqPool to save memory in the future.
+ * Until then, comment the code out since it is unused.
+ */
+#define ZSTD_RESIZE_SEQPOOL 0
+
+/* ====== Debug ====== */
+#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=2) \
+ && !defined(_MSC_VER) \
+ && !defined(__MINGW32__)
+
+# include <stdio.h>
+# include <unistd.h>
+# include <sys/times.h>
+
+# define DEBUG_PRINTHEX(l,p,n) { \
+ unsigned debug_u; \
+ for (debug_u=0; debug_u<(n); debug_u++) \
+ RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \
+ RAWLOG(l, " \n"); \
+}
+
+static unsigned long long GetCurrentClockTimeMicroseconds(void)
+{
+ static clock_t _ticksPerSecond = 0;
+ if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK);
+
+ { struct tms junk; clock_t newTicks = (clock_t) times(&junk);
+ return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond);
+} }
+
+#define MUTEX_WAIT_TIME_DLEVEL 6
+#define ZSTD_PTHREAD_MUTEX_LOCK(mutex) { \
+ if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) { \
+ unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \
+ ZSTD_pthread_mutex_lock(mutex); \
+ { unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \
+ unsigned long long const elapsedTime = (afterTime-beforeTime); \
+ if (elapsedTime > 1000) { /* or whatever threshold you like; I'm using 1 millisecond here */ \
+ DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \
+ elapsedTime, #mutex); \
+ } } \
+ } else { \
+ ZSTD_pthread_mutex_lock(mutex); \
+ } \
+}
+
+#else
+
+# define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m)
+# define DEBUG_PRINTHEX(l,p,n) {}
+
+#endif
+
+
+/* ===== Buffer Pool ===== */
+/* a single Buffer Pool can be invoked from multiple threads in parallel */
+
+typedef struct buffer_s {
+ void* start;
+ size_t capacity;
+} buffer_t;
+
+static const buffer_t g_nullBuffer = { NULL, 0 };
+
+typedef struct ZSTDMT_bufferPool_s {
+ ZSTD_pthread_mutex_t poolMutex;
+ size_t bufferSize;
+ unsigned totalBuffers;
+ unsigned nbBuffers;
+ ZSTD_customMem cMem;
+ buffer_t bTable[1]; /* variable size */
+} ZSTDMT_bufferPool;
+
+static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbWorkers, ZSTD_customMem cMem)
+{
+ unsigned const maxNbBuffers = 2*nbWorkers + 3;
+ ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_calloc(
+ sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem);
+ if (bufPool==NULL) return NULL;
+ if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) {
+ ZSTD_free(bufPool, cMem);
+ return NULL;
+ }
+ bufPool->bufferSize = 64 KB;
+ bufPool->totalBuffers = maxNbBuffers;
+ bufPool->nbBuffers = 0;
+ bufPool->cMem = cMem;
+ return bufPool;
+}
+
+static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
+{
+ unsigned u;
+ DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool);
+ if (!bufPool) return; /* compatibility with free on NULL */
+ for (u=0; u<bufPool->totalBuffers; u++) {
+ DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start);
+ ZSTD_free(bufPool->bTable[u].start, bufPool->cMem);
+ }
+ ZSTD_pthread_mutex_destroy(&bufPool->poolMutex);
+ ZSTD_free(bufPool, bufPool->cMem);
+}
+
+/* only works at initialization, not during compression */
+static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool)
+{
+ size_t const poolSize = sizeof(*bufPool)
+ + (bufPool->totalBuffers - 1) * sizeof(buffer_t);
+ unsigned u;
+ size_t totalBufferSize = 0;
+ ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
+ for (u=0; u<bufPool->totalBuffers; u++)
+ totalBufferSize += bufPool->bTable[u].capacity;
+ ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
+
+ return poolSize + totalBufferSize;
+}
+
+/* ZSTDMT_setBufferSize() :
+ * all future buffers provided by this buffer pool will have _at least_ this size
+ * note : it's better for all buffers to have same size,
+ * as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */
+static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const bSize)
+{
+ ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
+ DEBUGLOG(4, "ZSTDMT_setBufferSize: bSize = %u", (U32)bSize);
+ bufPool->bufferSize = bSize;
+ ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
+}
+
+
+static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, U32 nbWorkers)
+{
+ unsigned const maxNbBuffers = 2*nbWorkers + 3;
+ if (srcBufPool==NULL) return NULL;
+ if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */
+ return srcBufPool;
+ /* need a larger buffer pool */
+ { ZSTD_customMem const cMem = srcBufPool->cMem;
+ size_t const bSize = srcBufPool->bufferSize; /* forward parameters */
+ ZSTDMT_bufferPool* newBufPool;
+ ZSTDMT_freeBufferPool(srcBufPool);
+ newBufPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
+ if (newBufPool==NULL) return newBufPool;
+ ZSTDMT_setBufferSize(newBufPool, bSize);
+ return newBufPool;
+ }
+}
+
+/** ZSTDMT_getBuffer() :
+ * assumption : bufPool must be valid
+ * @return : a buffer, with start pointer and size
+ * note: allocation may fail, in this case, start==NULL and size==0 */
+static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
+{
+ size_t const bSize = bufPool->bufferSize;
+ DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize);
+ ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
+ if (bufPool->nbBuffers) { /* try to use an existing buffer */
+ buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)];
+ size_t const availBufferSize = buf.capacity;
+ bufPool->bTable[bufPool->nbBuffers] = g_nullBuffer;
+ if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) {
+ /* large enough, but not too much */
+ DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u",
+ bufPool->nbBuffers, (U32)buf.capacity);
+ ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
+ return buf;
+ }
+ /* size conditions not respected : scratch this buffer, create new one */
+ DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing");
+ ZSTD_free(buf.start, bufPool->cMem);
+ }
+ ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
+ /* create new buffer */
+ DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer");
+ { buffer_t buffer;
+ void* const start = ZSTD_malloc(bSize, bufPool->cMem);
+ buffer.start = start; /* note : start can be NULL if malloc fails ! */
+ buffer.capacity = (start==NULL) ? 0 : bSize;
+ if (start==NULL) {
+ DEBUGLOG(5, "ZSTDMT_getBuffer: buffer allocation failure !!");
+ } else {
+ DEBUGLOG(5, "ZSTDMT_getBuffer: created buffer of size %u", (U32)bSize);
+ }
+ return buffer;
+ }
+}
+
+#if ZSTD_RESIZE_SEQPOOL
+/** ZSTDMT_resizeBuffer() :
+ * assumption : bufPool must be valid
+ * @return : a buffer that is at least the buffer pool buffer size.
+ * If a reallocation happens, the data in the input buffer is copied.
+ */
+static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer)
+{
+ size_t const bSize = bufPool->bufferSize;
+ if (buffer.capacity < bSize) {
+ void* const start = ZSTD_malloc(bSize, bufPool->cMem);
+ buffer_t newBuffer;
+ newBuffer.start = start;
+ newBuffer.capacity = start == NULL ? 0 : bSize;
+ if (start != NULL) {
+ assert(newBuffer.capacity >= buffer.capacity);
+ memcpy(newBuffer.start, buffer.start, buffer.capacity);
+ DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize);
+ return newBuffer;
+ }
+ DEBUGLOG(5, "ZSTDMT_resizeBuffer: buffer allocation failure !!");
+ }
+ return buffer;
+}
+#endif
+
+/* store buffer for later re-use, up to pool capacity */
+static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
+{
+ DEBUGLOG(5, "ZSTDMT_releaseBuffer");
+ if (buf.start == NULL) return; /* compatible with release on NULL */
+ ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
+ if (bufPool->nbBuffers < bufPool->totalBuffers) {
+ bufPool->bTable[bufPool->nbBuffers++] = buf; /* stored for later use */
+ DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u",
+ (U32)buf.capacity, (U32)(bufPool->nbBuffers-1));
+ ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
+ return;
+ }
+ ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
+ /* Reached bufferPool capacity (should not happen) */
+ DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing ");
+ ZSTD_free(buf.start, bufPool->cMem);
+}
+
+
+/* ===== Seq Pool Wrapper ====== */
+
+static rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0};
+
+typedef ZSTDMT_bufferPool ZSTDMT_seqPool;
+
+static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool)
+{
+ return ZSTDMT_sizeof_bufferPool(seqPool);
+}
+
+static rawSeqStore_t bufferToSeq(buffer_t buffer)
+{
+ rawSeqStore_t seq = {NULL, 0, 0, 0};
+ seq.seq = (rawSeq*)buffer.start;
+ seq.capacity = buffer.capacity / sizeof(rawSeq);
+ return seq;
+}
+
+static buffer_t seqToBuffer(rawSeqStore_t seq)
+{
+ buffer_t buffer;
+ buffer.start = seq.seq;
+ buffer.capacity = seq.capacity * sizeof(rawSeq);
+ return buffer;
+}
+
+static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool)
+{
+ if (seqPool->bufferSize == 0) {
+ return kNullRawSeqStore;
+ }
+ return bufferToSeq(ZSTDMT_getBuffer(seqPool));
+}
+
+#if ZSTD_RESIZE_SEQPOOL
+static rawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
+{
+ return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq)));
+}
+#endif
+
+static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
+{
+ ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq));
+}
+
+static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq)
+{
+ ZSTDMT_setBufferSize(seqPool, nbSeq * sizeof(rawSeq));
+}
+
+static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem)
+{
+ ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
+ if (seqPool == NULL) return NULL;
+ ZSTDMT_setNbSeq(seqPool, 0);
+ return seqPool;
+}
+
+static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool)
+{
+ ZSTDMT_freeBufferPool(seqPool);
+}
+
+static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers)
+{
+ return ZSTDMT_expandBufferPool(pool, nbWorkers);
+}
+
+
+/* ===== CCtx Pool ===== */
+/* a single CCtx Pool can be invoked from multiple threads in parallel */
+
+typedef struct {
+ ZSTD_pthread_mutex_t poolMutex;
+ int totalCCtx;
+ int availCCtx;
+ ZSTD_customMem cMem;
+ ZSTD_CCtx* cctx[1]; /* variable size */
+} ZSTDMT_CCtxPool;
+
+/* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */
+static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
+{
+ int cid;
+ for (cid=0; cid<pool->totalCCtx; cid++)
+ ZSTD_freeCCtx(pool->cctx[cid]); /* note : compatible with free on NULL */
+ ZSTD_pthread_mutex_destroy(&pool->poolMutex);
+ ZSTD_free(pool, pool->cMem);
+}
+
+/* ZSTDMT_createCCtxPool() :
+ * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */
+static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers,
+ ZSTD_customMem cMem)
+{
+ ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_calloc(
+ sizeof(ZSTDMT_CCtxPool) + (nbWorkers-1)*sizeof(ZSTD_CCtx*), cMem);
+ assert(nbWorkers > 0);
+ if (!cctxPool) return NULL;
+ if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) {
+ ZSTD_free(cctxPool, cMem);
+ return NULL;
+ }
+ cctxPool->cMem = cMem;
+ cctxPool->totalCCtx = nbWorkers;
+ cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */
+ cctxPool->cctx[0] = ZSTD_createCCtx_advanced(cMem);
+ if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; }
+ DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers);
+ return cctxPool;
+}
+
+static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool,
+ int nbWorkers)
+{
+ if (srcPool==NULL) return NULL;
+ if (nbWorkers <= srcPool->totalCCtx) return srcPool; /* good enough */
+ /* need a larger cctx pool */
+ { ZSTD_customMem const cMem = srcPool->cMem;
+ ZSTDMT_freeCCtxPool(srcPool);
+ return ZSTDMT_createCCtxPool(nbWorkers, cMem);
+ }
+}
+
+/* only works during initialization phase, not during compression */
+static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
+{
+ ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
+ { unsigned const nbWorkers = cctxPool->totalCCtx;
+ size_t const poolSize = sizeof(*cctxPool)
+ + (nbWorkers-1) * sizeof(ZSTD_CCtx*);
+ unsigned u;
+ size_t totalCCtxSize = 0;
+ for (u=0; u<nbWorkers; u++) {
+ totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctx[u]);
+ }
+ ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
+ assert(nbWorkers > 0);
+ return poolSize + totalCCtxSize;
+ }
+}
+
+static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool)
+{
+ DEBUGLOG(5, "ZSTDMT_getCCtx");
+ ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
+ if (cctxPool->availCCtx) {
+ cctxPool->availCCtx--;
+ { ZSTD_CCtx* const cctx = cctxPool->cctx[cctxPool->availCCtx];
+ ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
+ return cctx;
+ } }
+ ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
+ DEBUGLOG(5, "create one more CCtx");
+ return ZSTD_createCCtx_advanced(cctxPool->cMem); /* note : can be NULL, when creation fails ! */
+}
+
+static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
+{
+ if (cctx==NULL) return; /* compatibility with release on NULL */
+ ZSTD_pthread_mutex_lock(&pool->poolMutex);
+ if (pool->availCCtx < pool->totalCCtx)
+ pool->cctx[pool->availCCtx++] = cctx;
+ else {
+ /* pool overflow : should not happen, since totalCCtx==nbWorkers */
+ DEBUGLOG(4, "CCtx pool overflow : free cctx");
+ ZSTD_freeCCtx(cctx);
+ }
+ ZSTD_pthread_mutex_unlock(&pool->poolMutex);
+}
+
+/* ==== Serial State ==== */
+
+typedef struct {
+ void const* start;
+ size_t size;
+} range_t;
+
+typedef struct {
+ /* All variables in the struct are protected by mutex. */
+ ZSTD_pthread_mutex_t mutex;
+ ZSTD_pthread_cond_t cond;
+ ZSTD_CCtx_params params;
+ ldmState_t ldmState;
+ XXH64_state_t xxhState;
+ unsigned nextJobID;
+ /* Protects ldmWindow.
+ * Must be acquired after the main mutex when acquiring both.
+ */
+ ZSTD_pthread_mutex_t ldmWindowMutex;
+ ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is updated */
+ ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */
+} serialState_t;
+
+static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool* seqPool, ZSTD_CCtx_params params, size_t jobSize)
+{
+ /* Adjust parameters */
+ if (params.ldmParams.enableLdm) {
+ DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10);
+ ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
+ assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
+ assert(params.ldmParams.hashRateLog < 32);
+ serialState->ldmState.hashPower =
+ ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);
+ } else {
+ memset(&params.ldmParams, 0, sizeof(params.ldmParams));
+ }
+ serialState->nextJobID = 0;
+ if (params.fParams.checksumFlag)
+ XXH64_reset(&serialState->xxhState, 0);
+ if (params.ldmParams.enableLdm) {
+ ZSTD_customMem cMem = params.customMem;
+ unsigned const hashLog = params.ldmParams.hashLog;
+ size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t);
+ unsigned const bucketLog =
+ params.ldmParams.hashLog - params.ldmParams.bucketSizeLog;
+ size_t const bucketSize = (size_t)1 << bucketLog;
+ unsigned const prevBucketLog =
+ serialState->params.ldmParams.hashLog -
+ serialState->params.ldmParams.bucketSizeLog;
+ /* Size the seq pool tables */
+ ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize));
+ /* Reset the window */
+ ZSTD_window_clear(&serialState->ldmState.window);
+ serialState->ldmWindow = serialState->ldmState.window;
+ /* Resize tables and output space if necessary. */
+ if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) {
+ ZSTD_free(serialState->ldmState.hashTable, cMem);
+ serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_malloc(hashSize, cMem);
+ }
+ if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) {
+ ZSTD_free(serialState->ldmState.bucketOffsets, cMem);
+ serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_malloc(bucketSize, cMem);
+ }
+ if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets)
+ return 1;
+ /* Zero the tables */
+ memset(serialState->ldmState.hashTable, 0, hashSize);
+ memset(serialState->ldmState.bucketOffsets, 0, bucketSize);
+ }
+ serialState->params = params;
+ serialState->params.jobSize = (U32)jobSize;
+ return 0;
+}
+
+static int ZSTDMT_serialState_init(serialState_t* serialState)
+{
+ int initError = 0;
+ memset(serialState, 0, sizeof(*serialState));
+ initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL);
+ initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL);
+ initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL);
+ initError |= ZSTD_pthread_cond_init(&serialState->ldmWindowCond, NULL);
+ return initError;
+}
+
+static void ZSTDMT_serialState_free(serialState_t* serialState)
+{
+ ZSTD_customMem cMem = serialState->params.customMem;
+ ZSTD_pthread_mutex_destroy(&serialState->mutex);
+ ZSTD_pthread_cond_destroy(&serialState->cond);
+ ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex);
+ ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond);
+ ZSTD_free(serialState->ldmState.hashTable, cMem);
+ ZSTD_free(serialState->ldmState.bucketOffsets, cMem);
+}
+
+static void ZSTDMT_serialState_update(serialState_t* serialState,
+ ZSTD_CCtx* jobCCtx, rawSeqStore_t seqStore,
+ range_t src, unsigned jobID)
+{
+ /* Wait for our turn */
+ ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
+ while (serialState->nextJobID < jobID) {
+ DEBUGLOG(5, "wait for serialState->cond");
+ ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex);
+ }
+ /* A future job may error and skip our job */
+ if (serialState->nextJobID == jobID) {
+ /* It is now our turn, do any processing necessary */
+ if (serialState->params.ldmParams.enableLdm) {
+ size_t error;
+ assert(seqStore.seq != NULL && seqStore.pos == 0 &&
+ seqStore.size == 0 && seqStore.capacity > 0);
+ assert(src.size <= serialState->params.jobSize);
+ ZSTD_window_update(&serialState->ldmState.window, src.start, src.size);
+ error = ZSTD_ldm_generateSequences(
+ &serialState->ldmState, &seqStore,
+ &serialState->params.ldmParams, src.start, src.size);
+ /* We provide a large enough buffer to never fail. */
+ assert(!ZSTD_isError(error)); (void)error;
+ /* Update ldmWindow to match the ldmState.window and signal the main
+ * thread if it is waiting for a buffer.
+ */
+ ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
+ serialState->ldmWindow = serialState->ldmState.window;
+ ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
+ ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
+ }
+ if (serialState->params.fParams.checksumFlag && src.size > 0)
+ XXH64_update(&serialState->xxhState, src.start, src.size);
+ }
+ /* Now it is the next jobs turn */
+ serialState->nextJobID++;
+ ZSTD_pthread_cond_broadcast(&serialState->cond);
+ ZSTD_pthread_mutex_unlock(&serialState->mutex);
+
+ if (seqStore.size > 0) {
+ size_t const err = ZSTD_referenceExternalSequences(
+ jobCCtx, seqStore.seq, seqStore.size);
+ assert(serialState->params.ldmParams.enableLdm);
+ assert(!ZSTD_isError(err));
+ (void)err;
+ }
+}
+
+static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState,
+ unsigned jobID, size_t cSize)
+{
+ ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
+ if (serialState->nextJobID <= jobID) {
+ assert(ZSTD_isError(cSize)); (void)cSize;
+ DEBUGLOG(5, "Skipping past job %u because of error", jobID);
+ serialState->nextJobID = jobID + 1;
+ ZSTD_pthread_cond_broadcast(&serialState->cond);
+
+ ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
+ ZSTD_window_clear(&serialState->ldmWindow);
+ ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
+ ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
+ }
+ ZSTD_pthread_mutex_unlock(&serialState->mutex);
+
+}
+
+
+/* ------------------------------------------ */
+/* ===== Worker thread ===== */
+/* ------------------------------------------ */
+
+static const range_t kNullRange = { NULL, 0 };
+
+typedef struct {
+ size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */
+ size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */
+ ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */
+ ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */
+ ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */
+ ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */
+ ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */
+ serialState_t* serial; /* Thread-safe - used by mtctx and (all) workers */
+ buffer_t dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */
+ range_t prefix; /* set by mtctx, then read by worker & mtctx => no barrier */
+ range_t src; /* set by mtctx, then read by worker & mtctx => no barrier */
+ unsigned jobID; /* set by mtctx, then read by worker => no barrier */
+ unsigned firstJob; /* set by mtctx, then read by worker => no barrier */
+ unsigned lastJob; /* set by mtctx, then read by worker => no barrier */
+ ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */
+ const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */
+ unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */
+ size_t dstFlushed; /* used only by mtctx */
+ unsigned frameChecksumNeeded; /* used only by mtctx */
+} ZSTDMT_jobDescription;
+
+#define JOB_ERROR(e) { \
+ ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); \
+ job->cSize = e; \
+ ZSTD_pthread_mutex_unlock(&job->job_mutex); \
+ goto _endJob; \
+}
+
+/* ZSTDMT_compressionJob() is a POOL_function type */
+static void ZSTDMT_compressionJob(void* jobDescription)
+{
+ ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
+ ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */
+ ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool);
+ rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool);
+ buffer_t dstBuff = job->dstBuff;
+ size_t lastCBlockSize = 0;
+
+ /* resources */
+ if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation));
+ if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */
+ dstBuff = ZSTDMT_getBuffer(job->bufPool);
+ if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation));
+ job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */
+ }
+ if (jobParams.ldmParams.enableLdm && rawSeqStore.seq == NULL)
+ JOB_ERROR(ERROR(memory_allocation));
+
+ /* Don't compute the checksum for chunks, since we compute it externally,
+ * but write it in the header.
+ */
+ if (job->jobID != 0) jobParams.fParams.checksumFlag = 0;
+ /* Don't run LDM for the chunks, since we handle it externally */
+ jobParams.ldmParams.enableLdm = 0;
+
+
+ /* init */
+ if (job->cdict) {
+ size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, jobParams, job->fullFrameSize);
+ assert(job->firstJob); /* only allowed for first job */
+ if (ZSTD_isError(initError)) JOB_ERROR(initError);
+ } else { /* srcStart points at reloaded section */
+ U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;
+ { size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
+ if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);
+ }
+ { size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
+ job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
+ ZSTD_dtlm_fast,
+ NULL, /*cdict*/
+ jobParams, pledgedSrcSize);
+ if (ZSTD_isError(initError)) JOB_ERROR(initError);
+ } }
+
+ /* Perform serial step as early as possible, but after CCtx initialization */
+ ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID);
+
+ if (!job->firstJob) { /* flush and overwrite frame header when it's not first job */
+ size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0);
+ if (ZSTD_isError(hSize)) JOB_ERROR(hSize);
+ DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize);
+ ZSTD_invalidateRepCodes(cctx);
+ }
+
+ /* compress */
+ { size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX;
+ int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize);
+ const BYTE* ip = (const BYTE*) job->src.start;
+ BYTE* const ostart = (BYTE*)dstBuff.start;
+ BYTE* op = ostart;
+ BYTE* oend = op + dstBuff.capacity;
+ int chunkNb;
+ if (sizeof(size_t) > sizeof(int)) assert(job->src.size < ((size_t)INT_MAX) * chunkSize); /* check overflow */
+ DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks);
+ assert(job->cSize == 0);
+ for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) {
+ size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize);
+ if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
+ ip += chunkSize;
+ op += cSize; assert(op < oend);
+ /* stats */
+ ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
+ job->cSize += cSize;
+ job->consumed = chunkSize * chunkNb;
+ DEBUGLOG(5, "ZSTDMT_compressionJob: compress new block : cSize==%u bytes (total: %u)",
+ (U32)cSize, (U32)job->cSize);
+ ZSTD_pthread_cond_signal(&job->job_cond); /* warns some more data is ready to be flushed */
+ ZSTD_pthread_mutex_unlock(&job->job_mutex);
+ }
+ /* last block */
+ assert(chunkSize > 0);
+ assert((chunkSize & (chunkSize - 1)) == 0); /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */
+ if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) {
+ size_t const lastBlockSize1 = job->src.size & (chunkSize-1);
+ size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1;
+ size_t const cSize = (job->lastJob) ?
+ ZSTD_compressEnd (cctx, op, oend-op, ip, lastBlockSize) :
+ ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize);
+ if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
+ lastCBlockSize = cSize;
+ } }
+
+_endJob:
+ ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize);
+ if (job->prefix.size > 0)
+ DEBUGLOG(5, "Finished with prefix: %zx", (size_t)job->prefix.start);
+ DEBUGLOG(5, "Finished with source: %zx", (size_t)job->src.start);
+ /* release resources */
+ ZSTDMT_releaseSeq(job->seqPool, rawSeqStore);
+ ZSTDMT_releaseCCtx(job->cctxPool, cctx);
+ /* report */
+ ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
+ if (ZSTD_isError(job->cSize)) assert(lastCBlockSize == 0);
+ job->cSize += lastCBlockSize;
+ job->consumed = job->src.size; /* when job->consumed == job->src.size , compression job is presumed completed */
+ ZSTD_pthread_cond_signal(&job->job_cond);
+ ZSTD_pthread_mutex_unlock(&job->job_mutex);
+}
+
+
+/* ------------------------------------------ */
+/* ===== Multi-threaded compression ===== */
+/* ------------------------------------------ */
+
+typedef struct {
+ range_t prefix; /* read-only non-owned prefix buffer */
+ buffer_t buffer;
+ size_t filled;
+} inBuff_t;
+
+typedef struct {
+ BYTE* buffer; /* The round input buffer. All jobs get references
+ * to pieces of the buffer. ZSTDMT_tryGetInputRange()
+ * handles handing out job input buffers, and makes
+ * sure it doesn't overlap with any pieces still in use.
+ */
+ size_t capacity; /* The capacity of buffer. */
+ size_t pos; /* The position of the current inBuff in the round
+ * buffer. Updated past the end if the inBuff once
+ * the inBuff is sent to the worker thread.
+ * pos <= capacity.
+ */
+} roundBuff_t;
+
+static const roundBuff_t kNullRoundBuff = {NULL, 0, 0};
+
+#define RSYNC_LENGTH 32
+
+typedef struct {
+ U64 hash;
+ U64 hitMask;
+ U64 primePower;
+} rsyncState_t;
+
+struct ZSTDMT_CCtx_s {
+ POOL_ctx* factory;
+ ZSTDMT_jobDescription* jobs;
+ ZSTDMT_bufferPool* bufPool;
+ ZSTDMT_CCtxPool* cctxPool;
+ ZSTDMT_seqPool* seqPool;
+ ZSTD_CCtx_params params;
+ size_t targetSectionSize;
+ size_t targetPrefixSize;
+ int jobReady; /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */
+ inBuff_t inBuff;
+ roundBuff_t roundBuff;
+ serialState_t serial;
+ rsyncState_t rsync;
+ unsigned singleBlockingThread;
+ unsigned jobIDMask;
+ unsigned doneJobID;
+ unsigned nextJobID;
+ unsigned frameEnded;
+ unsigned allJobsCompleted;
+ unsigned long long frameContentSize;
+ unsigned long long consumed;
+ unsigned long long produced;
+ ZSTD_customMem cMem;
+ ZSTD_CDict* cdictLocal;
+ const ZSTD_CDict* cdict;
+};
+
+static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem)
+{
+ U32 jobNb;
+ if (jobTable == NULL) return;
+ for (jobNb=0; jobNb<nbJobs; jobNb++) {
+ ZSTD_pthread_mutex_destroy(&jobTable[jobNb].job_mutex);
+ ZSTD_pthread_cond_destroy(&jobTable[jobNb].job_cond);
+ }
+ ZSTD_free(jobTable, cMem);
+}
+
+/* ZSTDMT_allocJobsTable()
+ * allocate and init a job table.
+ * update *nbJobsPtr to next power of 2 value, as size of table */
+static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_customMem cMem)
+{
+ U32 const nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1;
+ U32 const nbJobs = 1 << nbJobsLog2;
+ U32 jobNb;
+ ZSTDMT_jobDescription* const jobTable = (ZSTDMT_jobDescription*)
+ ZSTD_calloc(nbJobs * sizeof(ZSTDMT_jobDescription), cMem);
+ int initError = 0;
+ if (jobTable==NULL) return NULL;
+ *nbJobsPtr = nbJobs;
+ for (jobNb=0; jobNb<nbJobs; jobNb++) {
+ initError |= ZSTD_pthread_mutex_init(&jobTable[jobNb].job_mutex, NULL);
+ initError |= ZSTD_pthread_cond_init(&jobTable[jobNb].job_cond, NULL);
+ }
+ if (initError != 0) {
+ ZSTDMT_freeJobsTable(jobTable, nbJobs, cMem);
+ return NULL;
+ }
+ return jobTable;
+}
+
+static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {
+ U32 nbJobs = nbWorkers + 2;
+ if (nbJobs > mtctx->jobIDMask+1) { /* need more job capacity */
+ ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
+ mtctx->jobIDMask = 0;
+ mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem);
+ if (mtctx->jobs==NULL) return ERROR(memory_allocation);
+ assert((nbJobs != 0) && ((nbJobs & (nbJobs - 1)) == 0)); /* ensure nbJobs is a power of 2 */
+ mtctx->jobIDMask = nbJobs - 1;
+ }
+ return 0;
+}
+
+
+/* ZSTDMT_CCtxParam_setNbWorkers():
+ * Internal use only */
+size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
+{
+ return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers);
+}
+
+MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem)
+{
+ ZSTDMT_CCtx* mtctx;
+ U32 nbJobs = nbWorkers + 2;
+ int initError;
+ DEBUGLOG(3, "ZSTDMT_createCCtx_advanced (nbWorkers = %u)", nbWorkers);
+
+ if (nbWorkers < 1) return NULL;
+ nbWorkers = MIN(nbWorkers , ZSTDMT_NBWORKERS_MAX);
+ if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL))
+ /* invalid custom allocator */
+ return NULL;
+
+ mtctx = (ZSTDMT_CCtx*) ZSTD_calloc(sizeof(ZSTDMT_CCtx), cMem);
+ if (!mtctx) return NULL;
+ ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
+ mtctx->cMem = cMem;
+ mtctx->allJobsCompleted = 1;
+ mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem);
+ mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem);
+ assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */
+ mtctx->jobIDMask = nbJobs - 1;
+ mtctx->bufPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
+ mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem);
+ mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem);
+ initError = ZSTDMT_serialState_init(&mtctx->serial);
+ mtctx->roundBuff = kNullRoundBuff;
+ if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool | !mtctx->seqPool | initError) {
+ ZSTDMT_freeCCtx(mtctx);
+ return NULL;
+ }
+ DEBUGLOG(3, "mt_cctx created, for %u threads", nbWorkers);
+ return mtctx;
+}
+
+ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)
+{
+#ifdef ZSTD_MULTITHREAD
+ return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem);
+#else
+ (void)nbWorkers;
+ (void)cMem;
+ return NULL;
+#endif
+}
+
+ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers)
+{
+ return ZSTDMT_createCCtx_advanced(nbWorkers, ZSTD_defaultCMem);
+}
+
+
+/* ZSTDMT_releaseAllJobResources() :
+ * note : ensure all workers are killed first ! */
+static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
+{
+ unsigned jobID;
+ DEBUGLOG(3, "ZSTDMT_releaseAllJobResources");
+ for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
+ DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start);
+ ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
+ mtctx->jobs[jobID].dstBuff = g_nullBuffer;
+ mtctx->jobs[jobID].cSize = 0;
+ }
+ memset(mtctx->jobs, 0, (mtctx->jobIDMask+1)*sizeof(ZSTDMT_jobDescription));
+ mtctx->inBuff.buffer = g_nullBuffer;
+ mtctx->inBuff.filled = 0;
+ mtctx->allJobsCompleted = 1;
+}
+
+static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx)
+{
+ DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted");
+ while (mtctx->doneJobID < mtctx->nextJobID) {
+ unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask;
+ ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
+ while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
+ DEBUGLOG(4, "waiting for jobCompleted signal from job %u", mtctx->doneJobID); /* we want to block when waiting for data to flush */
+ ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
+ }
+ ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
+ mtctx->doneJobID++;
+ }
+}
+
+size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
+{
+ if (mtctx==NULL) return 0; /* compatible with free on NULL */
+ POOL_free(mtctx->factory); /* stop and free worker threads */
+ ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */
+ ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
+ ZSTDMT_freeBufferPool(mtctx->bufPool);
+ ZSTDMT_freeCCtxPool(mtctx->cctxPool);
+ ZSTDMT_freeSeqPool(mtctx->seqPool);
+ ZSTDMT_serialState_free(&mtctx->serial);
+ ZSTD_freeCDict(mtctx->cdictLocal);
+ if (mtctx->roundBuff.buffer)
+ ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem);
+ ZSTD_free(mtctx, mtctx->cMem);
+ return 0;
+}
+
+size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)
+{
+ if (mtctx == NULL) return 0; /* supports sizeof NULL */
+ return sizeof(*mtctx)
+ + POOL_sizeof(mtctx->factory)
+ + ZSTDMT_sizeof_bufferPool(mtctx->bufPool)
+ + (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription)
+ + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool)
+ + ZSTDMT_sizeof_seqPool(mtctx->seqPool)
+ + ZSTD_sizeof_CDict(mtctx->cdictLocal)
+ + mtctx->roundBuff.capacity;
+}
+
+/* Internal only */
+size_t
+ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params,
+ ZSTDMT_parameter parameter,
+ int value)
+{
+ DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter");
+ switch(parameter)
+ {
+ case ZSTDMT_p_jobSize :
+ DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %i", value);
+ return ZSTD_CCtxParams_setParameter(params, ZSTD_c_jobSize, value);
+ case ZSTDMT_p_overlapLog :
+ DEBUGLOG(4, "ZSTDMT_p_overlapLog : %i", value);
+ return ZSTD_CCtxParams_setParameter(params, ZSTD_c_overlapLog, value);
+ case ZSTDMT_p_rsyncable :
+ DEBUGLOG(4, "ZSTD_p_rsyncable : %i", value);
+ return ZSTD_CCtxParams_setParameter(params, ZSTD_c_rsyncable, value);
+ default :
+ return ERROR(parameter_unsupported);
+ }
+}
+
+size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value)
+{
+ DEBUGLOG(4, "ZSTDMT_setMTCtxParameter");
+ return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value);
+}
+
+size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value)
+{
+ switch (parameter) {
+ case ZSTDMT_p_jobSize:
+ return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_jobSize, value);
+ case ZSTDMT_p_overlapLog:
+ return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_overlapLog, value);
+ case ZSTDMT_p_rsyncable:
+ return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_rsyncable, value);
+ default:
+ return ERROR(parameter_unsupported);
+ }
+}
+
+/* Sets parameters relevant to the compression job,
+ * initializing others to default values. */
+static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
+{
+ ZSTD_CCtx_params jobParams = params;
+ /* Clear parameters related to multithreading */
+ jobParams.forceWindow = 0;
+ jobParams.nbWorkers = 0;
+ jobParams.jobSize = 0;
+ jobParams.overlapLog = 0;
+ jobParams.rsyncable = 0;
+ memset(&jobParams.ldmParams, 0, sizeof(ldmParams_t));
+ memset(&jobParams.customMem, 0, sizeof(ZSTD_customMem));
+ return jobParams;
+}
+
+
+/* ZSTDMT_resize() :
+ * @return : error code if fails, 0 on success */
+static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
+{
+ if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
+ FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) );
+ mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers);
+ if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
+ mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
+ if (mtctx->cctxPool == NULL) return ERROR(memory_allocation);
+ mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers);
+ if (mtctx->seqPool == NULL) return ERROR(memory_allocation);
+ ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
+ return 0;
+}
+
+
+/*! ZSTDMT_updateCParams_whileCompressing() :
+ * Updates a selected set of compression parameters, remaining compatible with currently active frame.
+ * New parameters will be applied to next compression job. */
+void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams)
+{
+ U32 const saved_wlog = mtctx->params.cParams.windowLog; /* Do not modify windowLog while compressing */
+ int const compressionLevel = cctxParams->compressionLevel;
+ DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)",
+ compressionLevel);
+ mtctx->params.compressionLevel = compressionLevel;
+ { ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, 0, 0);
+ cParams.windowLog = saved_wlog;
+ mtctx->params.cParams = cParams;
+ }
+}
+
+/* ZSTDMT_getFrameProgression():
+ * tells how much data has been consumed (input) and produced (output) for current frame.
+ * able to count progression inside worker threads.
+ * Note : mutex will be acquired during statistics collection inside workers. */
+ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx)
+{
+ ZSTD_frameProgression fps;
+ DEBUGLOG(5, "ZSTDMT_getFrameProgression");
+ fps.ingested = mtctx->consumed + mtctx->inBuff.filled;
+ fps.consumed = mtctx->consumed;
+ fps.produced = fps.flushed = mtctx->produced;
+ fps.currentJobID = mtctx->nextJobID;
+ fps.nbActiveWorkers = 0;
+ { unsigned jobNb;
+ unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1);
+ DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)",
+ mtctx->doneJobID, lastJobNb, mtctx->jobReady)
+ for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) {
+ unsigned const wJobID = jobNb & mtctx->jobIDMask;
+ ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID];
+ ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
+ { size_t const cResult = jobPtr->cSize;
+ size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
+ size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
+ assert(flushed <= produced);
+ fps.ingested += jobPtr->src.size;
+ fps.consumed += jobPtr->consumed;
+ fps.produced += produced;
+ fps.flushed += flushed;
+ fps.nbActiveWorkers += (jobPtr->consumed < jobPtr->src.size);
+ }
+ ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
+ }
+ }
+ return fps;
+}
+
+
+size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx)
+{
+ size_t toFlush;
+ unsigned const jobID = mtctx->doneJobID;
+ assert(jobID <= mtctx->nextJobID);
+ if (jobID == mtctx->nextJobID) return 0; /* no active job => nothing to flush */
+
+ /* look into oldest non-fully-flushed job */
+ { unsigned const wJobID = jobID & mtctx->jobIDMask;
+ ZSTDMT_jobDescription* const jobPtr = &mtctx->jobs[wJobID];
+ ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
+ { size_t const cResult = jobPtr->cSize;
+ size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
+ size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
+ assert(flushed <= produced);
+ toFlush = produced - flushed;
+ if (toFlush==0 && (jobPtr->consumed >= jobPtr->src.size)) {
+ /* doneJobID is not-fully-flushed, but toFlush==0 : doneJobID should be compressing some more data */
+ assert(jobPtr->consumed < jobPtr->src.size);
+ }
+ }
+ ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
+ }
+
+ return toFlush;
+}
+
+
+/* ------------------------------------------ */
+/* ===== Multi-threaded compression ===== */
+/* ------------------------------------------ */
+
+static unsigned ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params const params)
+{
+ if (params.ldmParams.enableLdm)
+ /* In Long Range Mode, the windowLog is typically oversized.
+ * In which case, it's preferable to determine the jobSize
+ * based on chainLog instead. */
+ return MAX(21, params.cParams.chainLog + 4);
+ return MAX(20, params.cParams.windowLog + 2);
+}
+
+static int ZSTDMT_overlapLog_default(ZSTD_strategy strat)
+{
+ switch(strat)
+ {
+ case ZSTD_btultra2:
+ return 9;
+ case ZSTD_btultra:
+ case ZSTD_btopt:
+ return 8;
+ case ZSTD_btlazy2:
+ case ZSTD_lazy2:
+ return 7;
+ case ZSTD_lazy:
+ case ZSTD_greedy:
+ case ZSTD_dfast:
+ case ZSTD_fast:
+ default:;
+ }
+ return 6;
+}
+
+static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat)
+{
+ assert(0 <= ovlog && ovlog <= 9);
+ if (ovlog == 0) return ZSTDMT_overlapLog_default(strat);
+ return ovlog;
+}
+
+static size_t ZSTDMT_computeOverlapSize(ZSTD_CCtx_params const params)
+{
+ int const overlapRLog = 9 - ZSTDMT_overlapLog(params.overlapLog, params.cParams.strategy);
+ int ovLog = (overlapRLog >= 8) ? 0 : (params.cParams.windowLog - overlapRLog);
+ assert(0 <= overlapRLog && overlapRLog <= 8);
+ if (params.ldmParams.enableLdm) {
+ /* In Long Range Mode, the windowLog is typically oversized.
+ * In which case, it's preferable to determine the jobSize
+ * based on chainLog instead.
+ * Then, ovLog becomes a fraction of the jobSize, rather than windowSize */
+ ovLog = MIN(params.cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
+ - overlapRLog;
+ }
+ assert(0 <= ovLog && ovLog <= 30);
+ DEBUGLOG(4, "overlapLog : %i", params.overlapLog);
+ DEBUGLOG(4, "overlap size : %i", 1 << ovLog);
+ return (ovLog==0) ? 0 : (size_t)1 << ovLog;
+}
+
+static unsigned
+ZSTDMT_computeNbJobs(ZSTD_CCtx_params params, size_t srcSize, unsigned nbWorkers)
+{
+ assert(nbWorkers>0);
+ { size_t const jobSizeTarget = (size_t)1 << ZSTDMT_computeTargetJobLog(params);
+ size_t const jobMaxSize = jobSizeTarget << 2;
+ size_t const passSizeMax = jobMaxSize * nbWorkers;
+ unsigned const multiplier = (unsigned)(srcSize / passSizeMax) + 1;
+ unsigned const nbJobsLarge = multiplier * nbWorkers;
+ unsigned const nbJobsMax = (unsigned)(srcSize / jobSizeTarget) + 1;
+ unsigned const nbJobsSmall = MIN(nbJobsMax, nbWorkers);
+ return (multiplier>1) ? nbJobsLarge : nbJobsSmall;
+} }
+
+/* ZSTDMT_compress_advanced_internal() :
+ * This is a blocking function : it will only give back control to caller after finishing its compression job.
+ */
+static size_t ZSTDMT_compress_advanced_internal(
+ ZSTDMT_CCtx* mtctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params)
+{
+ ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(params);
+ size_t const overlapSize = ZSTDMT_computeOverlapSize(params);
+ unsigned const nbJobs = ZSTDMT_computeNbJobs(params, srcSize, params.nbWorkers);
+ size_t const proposedJobSize = (srcSize + (nbJobs-1)) / nbJobs;
+ size_t const avgJobSize = (((proposedJobSize-1) & 0x1FFFF) < 0x7FFF) ? proposedJobSize + 0xFFFF : proposedJobSize; /* avoid too small last block */
+ const char* const srcStart = (const char*)src;
+ size_t remainingSrcSize = srcSize;
+ unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbJobs : (unsigned)(dstCapacity / ZSTD_compressBound(avgJobSize)); /* presumes avgJobSize >= 256 KB, which should be the case */
+ size_t frameStartPos = 0, dstBufferPos = 0;
+ assert(jobParams.nbWorkers == 0);
+ assert(mtctx->cctxPool->totalCCtx == params.nbWorkers);
+
+ params.jobSize = (U32)avgJobSize;
+ DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: nbJobs=%2u (rawSize=%u bytes; fixedSize=%u) ",
+ nbJobs, (U32)proposedJobSize, (U32)avgJobSize);
+
+ if ((nbJobs==1) | (params.nbWorkers<=1)) { /* fallback to single-thread mode : this is a blocking invocation anyway */
+ ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0];
+ DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: fallback to single-thread mode");
+ if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, jobParams.fParams);
+ return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, jobParams);
+ }
+
+ assert(avgJobSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */
+ ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgJobSize) );
+ if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize))
+ return ERROR(memory_allocation);
+
+ FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbJobs) ); /* only expands if necessary */
+
+ { unsigned u;
+ for (u=0; u<nbJobs; u++) {
+ size_t const jobSize = MIN(remainingSrcSize, avgJobSize);
+ size_t const dstBufferCapacity = ZSTD_compressBound(jobSize);
+ buffer_t const dstAsBuffer = { (char*)dst + dstBufferPos, dstBufferCapacity };
+ buffer_t const dstBuffer = u < compressWithinDst ? dstAsBuffer : g_nullBuffer;
+ size_t dictSize = u ? overlapSize : 0;
+
+ mtctx->jobs[u].prefix.start = srcStart + frameStartPos - dictSize;
+ mtctx->jobs[u].prefix.size = dictSize;
+ mtctx->jobs[u].src.start = srcStart + frameStartPos;
+ mtctx->jobs[u].src.size = jobSize; assert(jobSize > 0); /* avoid job.src.size == 0 */
+ mtctx->jobs[u].consumed = 0;
+ mtctx->jobs[u].cSize = 0;
+ mtctx->jobs[u].cdict = (u==0) ? cdict : NULL;
+ mtctx->jobs[u].fullFrameSize = srcSize;
+ mtctx->jobs[u].params = jobParams;
+ /* do not calculate checksum within sections, but write it in header for first section */
+ mtctx->jobs[u].dstBuff = dstBuffer;
+ mtctx->jobs[u].cctxPool = mtctx->cctxPool;
+ mtctx->jobs[u].bufPool = mtctx->bufPool;
+ mtctx->jobs[u].seqPool = mtctx->seqPool;
+ mtctx->jobs[u].serial = &mtctx->serial;
+ mtctx->jobs[u].jobID = u;
+ mtctx->jobs[u].firstJob = (u==0);
+ mtctx->jobs[u].lastJob = (u==nbJobs-1);
+
+ DEBUGLOG(5, "ZSTDMT_compress_advanced_internal: posting job %u (%u bytes)", u, (U32)jobSize);
+ DEBUG_PRINTHEX(6, mtctx->jobs[u].prefix.start, 12);
+ POOL_add(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[u]);
+
+ frameStartPos += jobSize;
+ dstBufferPos += dstBufferCapacity;
+ remainingSrcSize -= jobSize;
+ } }
+
+ /* collect result */
+ { size_t error = 0, dstPos = 0;
+ unsigned jobID;
+ for (jobID=0; jobID<nbJobs; jobID++) {
+ DEBUGLOG(5, "waiting for job %u ", jobID);
+ ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
+ while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
+ DEBUGLOG(5, "waiting for jobCompleted signal from job %u", jobID);
+ ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
+ }
+ ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
+ DEBUGLOG(5, "ready to write job %u ", jobID);
+
+ { size_t const cSize = mtctx->jobs[jobID].cSize;
+ if (ZSTD_isError(cSize)) error = cSize;
+ if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall);
+ if (jobID) { /* note : job 0 is written directly at dst, which is correct position */
+ if (!error)
+ memmove((char*)dst + dstPos, mtctx->jobs[jobID].dstBuff.start, cSize); /* may overlap when job compressed within dst */
+ if (jobID >= compressWithinDst) { /* job compressed into its own buffer, which must be released */
+ DEBUGLOG(5, "releasing buffer %u>=%u", jobID, compressWithinDst);
+ ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
+ } }
+ mtctx->jobs[jobID].dstBuff = g_nullBuffer;
+ mtctx->jobs[jobID].cSize = 0;
+ dstPos += cSize ;
+ }
+ } /* for (jobID=0; jobID<nbJobs; jobID++) */
+
+ DEBUGLOG(4, "checksumFlag : %u ", params.fParams.checksumFlag);
+ if (params.fParams.checksumFlag) {
+ U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);
+ if (dstPos + 4 > dstCapacity) {
+ error = ERROR(dstSize_tooSmall);
+ } else {
+ DEBUGLOG(4, "writing checksum : %08X \n", checksum);
+ MEM_writeLE32((char*)dst + dstPos, checksum);
+ dstPos += 4;
+ } }
+
+ if (!error) DEBUGLOG(4, "compressed size : %u ", (U32)dstPos);
+ return error ? error : dstPos;
+ }
+}
+
+size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict,
+ ZSTD_parameters params,
+ int overlapLog)
+{
+ ZSTD_CCtx_params cctxParams = mtctx->params;
+ cctxParams.cParams = params.cParams;
+ cctxParams.fParams = params.fParams;
+ assert(ZSTD_OVERLAPLOG_MIN <= overlapLog && overlapLog <= ZSTD_OVERLAPLOG_MAX);
+ cctxParams.overlapLog = overlapLog;
+ return ZSTDMT_compress_advanced_internal(mtctx,
+ dst, dstCapacity,
+ src, srcSize,
+ cdict, cctxParams);
+}
+
+
+size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel)
+{
+ ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0);
+ int const overlapLog = ZSTDMT_overlapLog_default(params.cParams.strategy);
+ params.fParams.contentSizeFlag = 1;
+ return ZSTDMT_compress_advanced(mtctx, dst, dstCapacity, src, srcSize, NULL, params, overlapLog);
+}
+
+
+/* ====================================== */
+/* ======= Streaming API ======= */
+/* ====================================== */
+
+size_t ZSTDMT_initCStream_internal(
+ ZSTDMT_CCtx* mtctx,
+ const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
+ const ZSTD_CDict* cdict, ZSTD_CCtx_params params,
+ unsigned long long pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u)",
+ (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx);
+
+ /* params supposed partially fully validated at this point */
+ assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+ assert(!((dict) && (cdict))); /* either dict or cdict, not both */
+
+ /* init */
+ if (params.nbWorkers != mtctx->params.nbWorkers)
+ FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) );
+
+ if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
+ if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = ZSTDMT_JOBSIZE_MAX;
+
+ mtctx->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN); /* do not trigger multi-threading when srcSize is too small */
+ if (mtctx->singleBlockingThread) {
+ ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(params);
+ DEBUGLOG(5, "ZSTDMT_initCStream_internal: switch to single blocking thread mode");
+ assert(singleThreadParams.nbWorkers == 0);
+ return ZSTD_initCStream_internal(mtctx->cctxPool->cctx[0],
+ dict, dictSize, cdict,
+ singleThreadParams, pledgedSrcSize);
+ }
+
+ DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers);
+
+ if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */
+ ZSTDMT_waitForAllJobsCompleted(mtctx);
+ ZSTDMT_releaseAllJobResources(mtctx);
+ mtctx->allJobsCompleted = 1;
+ }
+
+ mtctx->params = params;
+ mtctx->frameContentSize = pledgedSrcSize;
+ if (dict) {
+ ZSTD_freeCDict(mtctx->cdictLocal);
+ mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
+ ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */
+ params.cParams, mtctx->cMem);
+ mtctx->cdict = mtctx->cdictLocal;
+ if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation);
+ } else {
+ ZSTD_freeCDict(mtctx->cdictLocal);
+ mtctx->cdictLocal = NULL;
+ mtctx->cdict = cdict;
+ }
+
+ mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(params);
+ DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10));
+ mtctx->targetSectionSize = params.jobSize;
+ if (mtctx->targetSectionSize == 0) {
+ mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(params);
+ }
+ if (params.rsyncable) {
+ /* Aim for the targetsectionSize as the average job size. */
+ U32 const jobSizeMB = (U32)(mtctx->targetSectionSize >> 20);
+ U32 const rsyncBits = ZSTD_highbit32(jobSizeMB) + 20;
+ assert(jobSizeMB >= 1);
+ DEBUGLOG(4, "rsyncLog = %u", rsyncBits);
+ mtctx->rsync.hash = 0;
+ mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1;
+ mtctx->rsync.primePower = ZSTD_rollingHash_primePower(RSYNC_LENGTH);
+ }
+ if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize; /* job size must be >= overlap size */
+ DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), (U32)params.jobSize);
+ DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10));
+ ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize));
+ {
+ /* If ldm is enabled we need windowSize space. */
+ size_t const windowSize = mtctx->params.ldmParams.enableLdm ? (1U << mtctx->params.cParams.windowLog) : 0;
+ /* Two buffers of slack, plus extra space for the overlap
+ * This is the minimum slack that LDM works with. One extra because
+ * flush might waste up to targetSectionSize-1 bytes. Another extra
+ * for the overlap (if > 0), then one to fill which doesn't overlap
+ * with the LDM window.
+ */
+ size_t const nbSlackBuffers = 2 + (mtctx->targetPrefixSize > 0);
+ size_t const slackSize = mtctx->targetSectionSize * nbSlackBuffers;
+ /* Compute the total size, and always have enough slack */
+ size_t const nbWorkers = MAX(mtctx->params.nbWorkers, 1);
+ size_t const sectionsSize = mtctx->targetSectionSize * nbWorkers;
+ size_t const capacity = MAX(windowSize, sectionsSize) + slackSize;
+ if (mtctx->roundBuff.capacity < capacity) {
+ if (mtctx->roundBuff.buffer)
+ ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem);
+ mtctx->roundBuff.buffer = (BYTE*)ZSTD_malloc(capacity, mtctx->cMem);
+ if (mtctx->roundBuff.buffer == NULL) {
+ mtctx->roundBuff.capacity = 0;
+ return ERROR(memory_allocation);
+ }
+ mtctx->roundBuff.capacity = capacity;
+ }
+ }
+ DEBUGLOG(4, "roundBuff capacity : %u KB", (U32)(mtctx->roundBuff.capacity>>10));
+ mtctx->roundBuff.pos = 0;
+ mtctx->inBuff.buffer = g_nullBuffer;
+ mtctx->inBuff.filled = 0;
+ mtctx->inBuff.prefix = kNullRange;
+ mtctx->doneJobID = 0;
+ mtctx->nextJobID = 0;
+ mtctx->frameEnded = 0;
+ mtctx->allJobsCompleted = 0;
+ mtctx->consumed = 0;
+ mtctx->produced = 0;
+ if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize))
+ return ERROR(memory_allocation);
+ return 0;
+}
+
+size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
+ const void* dict, size_t dictSize,
+ ZSTD_parameters params,
+ unsigned long long pledgedSrcSize)
+{
+ ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */
+ DEBUGLOG(4, "ZSTDMT_initCStream_advanced (pledgedSrcSize=%u)", (U32)pledgedSrcSize);
+ cctxParams.cParams = params.cParams;
+ cctxParams.fParams = params.fParams;
+ return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, ZSTD_dct_auto, NULL,
+ cctxParams, pledgedSrcSize);
+}
+
+size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
+ const ZSTD_CDict* cdict,
+ ZSTD_frameParameters fParams,
+ unsigned long long pledgedSrcSize)
+{
+ ZSTD_CCtx_params cctxParams = mtctx->params;
+ if (cdict==NULL) return ERROR(dictionary_wrong); /* method incompatible with NULL cdict */
+ cctxParams.cParams = ZSTD_getCParamsFromCDict(cdict);
+ cctxParams.fParams = fParams;
+ return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, ZSTD_dct_auto, cdict,
+ cctxParams, pledgedSrcSize);
+}
+
+
+/* ZSTDMT_resetCStream() :
+ * pledgedSrcSize can be zero == unknown (for the time being)
+ * prefer using ZSTD_CONTENTSIZE_UNKNOWN,
+ * as `0` might mean "empty" in the future */
+size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize)
+{
+ if (!pledgedSrcSize) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
+ return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, 0, mtctx->params,
+ pledgedSrcSize);
+}
+
+size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel) {
+ ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0);
+ ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */
+ DEBUGLOG(4, "ZSTDMT_initCStream (cLevel=%i)", compressionLevel);
+ cctxParams.cParams = params.cParams;
+ cctxParams.fParams = params.fParams;
+ return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN);
+}
+
+
+/* ZSTDMT_writeLastEmptyBlock()
+ * Write a single empty block with an end-of-frame to finish a frame.
+ * Job must be created from streaming variant.
+ * This function is always successful if expected conditions are fulfilled.
+ */
+static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job)
+{
+ assert(job->lastJob == 1);
+ assert(job->src.size == 0); /* last job is empty -> will be simplified into a last empty block */
+ assert(job->firstJob == 0); /* cannot be first job, as it also needs to create frame header */
+ assert(job->dstBuff.start == NULL); /* invoked from streaming variant only (otherwise, dstBuff might be user's output) */
+ job->dstBuff = ZSTDMT_getBuffer(job->bufPool);
+ if (job->dstBuff.start == NULL) {
+ job->cSize = ERROR(memory_allocation);
+ return;
+ }
+ assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize); /* no buffer should ever be that small */
+ job->src = kNullRange;
+ job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity);
+ assert(!ZSTD_isError(job->cSize));
+ assert(job->consumed == 0);
+}
+
+static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZSTD_EndDirective endOp)
+{
+ unsigned const jobID = mtctx->nextJobID & mtctx->jobIDMask;
+ int const endFrame = (endOp == ZSTD_e_end);
+
+ if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) {
+ DEBUGLOG(5, "ZSTDMT_createCompressionJob: will not create new job : table is full");
+ assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask));
+ return 0;
+ }
+
+ if (!mtctx->jobReady) {
+ BYTE const* src = (BYTE const*)mtctx->inBuff.buffer.start;
+ DEBUGLOG(5, "ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload ",
+ mtctx->nextJobID, (U32)srcSize, (U32)mtctx->inBuff.prefix.size);
+ mtctx->jobs[jobID].src.start = src;
+ mtctx->jobs[jobID].src.size = srcSize;
+ assert(mtctx->inBuff.filled >= srcSize);
+ mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix;
+ mtctx->jobs[jobID].consumed = 0;
+ mtctx->jobs[jobID].cSize = 0;
+ mtctx->jobs[jobID].params = mtctx->params;
+ mtctx->jobs[jobID].cdict = mtctx->nextJobID==0 ? mtctx->cdict : NULL;
+ mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize;
+ mtctx->jobs[jobID].dstBuff = g_nullBuffer;
+ mtctx->jobs[jobID].cctxPool = mtctx->cctxPool;
+ mtctx->jobs[jobID].bufPool = mtctx->bufPool;
+ mtctx->jobs[jobID].seqPool = mtctx->seqPool;
+ mtctx->jobs[jobID].serial = &mtctx->serial;
+ mtctx->jobs[jobID].jobID = mtctx->nextJobID;
+ mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0);
+ mtctx->jobs[jobID].lastJob = endFrame;
+ mtctx->jobs[jobID].frameChecksumNeeded = mtctx->params.fParams.checksumFlag && endFrame && (mtctx->nextJobID>0);
+ mtctx->jobs[jobID].dstFlushed = 0;
+
+ /* Update the round buffer pos and clear the input buffer to be reset */
+ mtctx->roundBuff.pos += srcSize;
+ mtctx->inBuff.buffer = g_nullBuffer;
+ mtctx->inBuff.filled = 0;
+ /* Set the prefix */
+ if (!endFrame) {
+ size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize);
+ mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize;
+ mtctx->inBuff.prefix.size = newPrefixSize;
+ } else { /* endFrame==1 => no need for another input buffer */
+ mtctx->inBuff.prefix = kNullRange;
+ mtctx->frameEnded = endFrame;
+ if (mtctx->nextJobID == 0) {
+ /* single job exception : checksum is already calculated directly within worker thread */
+ mtctx->params.fParams.checksumFlag = 0;
+ } }
+
+ if ( (srcSize == 0)
+ && (mtctx->nextJobID>0)/*single job must also write frame header*/ ) {
+ DEBUGLOG(5, "ZSTDMT_createCompressionJob: creating a last empty block to end frame");
+ assert(endOp == ZSTD_e_end); /* only possible case : need to end the frame with an empty last block */
+ ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID);
+ mtctx->nextJobID++;
+ return 0;
+ }
+ }
+
+ DEBUGLOG(5, "ZSTDMT_createCompressionJob: posting job %u : %u bytes (end:%u, jobNb == %u (mod:%u))",
+ mtctx->nextJobID,
+ (U32)mtctx->jobs[jobID].src.size,
+ mtctx->jobs[jobID].lastJob,
+ mtctx->nextJobID,
+ jobID);
+ if (POOL_tryAdd(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[jobID])) {
+ mtctx->nextJobID++;
+ mtctx->jobReady = 0;
+ } else {
+ DEBUGLOG(5, "ZSTDMT_createCompressionJob: no worker available for job %u", mtctx->nextJobID);
+ mtctx->jobReady = 1;
+ }
+ return 0;
+}
+
+
+/*! ZSTDMT_flushProduced() :
+ * flush whatever data has been produced but not yet flushed in current job.
+ * move to next job if current one is fully flushed.
+ * `output` : `pos` will be updated with amount of data flushed .
+ * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush .
+ * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */
+static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned blockToFlush, ZSTD_EndDirective end)
+{
+ unsigned const wJobID = mtctx->doneJobID & mtctx->jobIDMask;
+ DEBUGLOG(5, "ZSTDMT_flushProduced (blocking:%u , job %u <= %u)",
+ blockToFlush, mtctx->doneJobID, mtctx->nextJobID);
+ assert(output->size >= output->pos);
+
+ ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
+ if ( blockToFlush
+ && (mtctx->doneJobID < mtctx->nextJobID) ) {
+ assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize);
+ while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) { /* nothing to flush */
+ if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) {
+ DEBUGLOG(5, "job %u is completely consumed (%u == %u) => don't wait for cond, there will be none",
+ mtctx->doneJobID, (U32)mtctx->jobs[wJobID].consumed, (U32)mtctx->jobs[wJobID].src.size);
+ break;
+ }
+ DEBUGLOG(5, "waiting for something to flush from job %u (currently flushed: %u bytes)",
+ mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
+ ZSTD_pthread_cond_wait(&mtctx->jobs[wJobID].job_cond, &mtctx->jobs[wJobID].job_mutex); /* block when nothing to flush but some to come */
+ } }
+
+ /* try to flush something */
+ { size_t cSize = mtctx->jobs[wJobID].cSize; /* shared */
+ size_t const srcConsumed = mtctx->jobs[wJobID].consumed; /* shared */
+ size_t const srcSize = mtctx->jobs[wJobID].src.size; /* read-only, could be done after mutex lock, but no-declaration-after-statement */
+ ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
+ if (ZSTD_isError(cSize)) {
+ DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s",
+ mtctx->doneJobID, ZSTD_getErrorName(cSize));
+ ZSTDMT_waitForAllJobsCompleted(mtctx);
+ ZSTDMT_releaseAllJobResources(mtctx);
+ return cSize;
+ }
+ /* add frame checksum if necessary (can only happen once) */
+ assert(srcConsumed <= srcSize);
+ if ( (srcConsumed == srcSize) /* job completed -> worker no longer active */
+ && mtctx->jobs[wJobID].frameChecksumNeeded ) {
+ U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);
+ DEBUGLOG(4, "ZSTDMT_flushProduced: writing checksum : %08X \n", checksum);
+ MEM_writeLE32((char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum);
+ cSize += 4;
+ mtctx->jobs[wJobID].cSize += 4; /* can write this shared value, as worker is no longer active */
+ mtctx->jobs[wJobID].frameChecksumNeeded = 0;
+ }
+
+ if (cSize > 0) { /* compression is ongoing or completed */
+ size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos);
+ DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)",
+ (U32)toFlush, mtctx->doneJobID, (U32)srcConsumed, (U32)srcSize, (U32)cSize);
+ assert(mtctx->doneJobID < mtctx->nextJobID);
+ assert(cSize >= mtctx->jobs[wJobID].dstFlushed);
+ assert(mtctx->jobs[wJobID].dstBuff.start != NULL);
+ memcpy((char*)output->dst + output->pos,
+ (const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed,
+ toFlush);
+ output->pos += toFlush;
+ mtctx->jobs[wJobID].dstFlushed += toFlush; /* can write : this value is only used by mtctx */
+
+ if ( (srcConsumed == srcSize) /* job is completed */
+ && (mtctx->jobs[wJobID].dstFlushed == cSize) ) { /* output buffer fully flushed => free this job position */
+ DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one",
+ mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
+ ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff);
+ DEBUGLOG(5, "dstBuffer released");
+ mtctx->jobs[wJobID].dstBuff = g_nullBuffer;
+ mtctx->jobs[wJobID].cSize = 0; /* ensure this job slot is considered "not started" in future check */
+ mtctx->consumed += srcSize;
+ mtctx->produced += cSize;
+ mtctx->doneJobID++;
+ } }
+
+ /* return value : how many bytes left in buffer ; fake it to 1 when unknown but >0 */
+ if (cSize > mtctx->jobs[wJobID].dstFlushed) return (cSize - mtctx->jobs[wJobID].dstFlushed);
+ if (srcSize > srcConsumed) return 1; /* current job not completely compressed */
+ }
+ if (mtctx->doneJobID < mtctx->nextJobID) return 1; /* some more jobs ongoing */
+ if (mtctx->jobReady) return 1; /* one job is ready to push, just not yet in the list */
+ if (mtctx->inBuff.filled > 0) return 1; /* input is not empty, and still needs to be converted into a job */
+ mtctx->allJobsCompleted = mtctx->frameEnded; /* all jobs are entirely flushed => if this one is last one, frame is completed */
+ if (end == ZSTD_e_end) return !mtctx->frameEnded; /* for ZSTD_e_end, question becomes : is frame completed ? instead of : are internal buffers fully flushed ? */
+ return 0; /* internal buffers fully flushed */
+}
+
+/**
+ * Returns the range of data used by the earliest job that is not yet complete.
+ * If the data of the first job is broken up into two segments, we cover both
+ * sections.
+ */
+static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx)
+{
+ unsigned const firstJobID = mtctx->doneJobID;
+ unsigned const lastJobID = mtctx->nextJobID;
+ unsigned jobID;
+
+ for (jobID = firstJobID; jobID < lastJobID; ++jobID) {
+ unsigned const wJobID = jobID & mtctx->jobIDMask;
+ size_t consumed;
+
+ ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
+ consumed = mtctx->jobs[wJobID].consumed;
+ ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
+
+ if (consumed < mtctx->jobs[wJobID].src.size) {
+ range_t range = mtctx->jobs[wJobID].prefix;
+ if (range.size == 0) {
+ /* Empty prefix */
+ range = mtctx->jobs[wJobID].src;
+ }
+ /* Job source in multiple segments not supported yet */
+ assert(range.start <= mtctx->jobs[wJobID].src.start);
+ return range;
+ }
+ }
+ return kNullRange;
+}
+
+/**
+ * Returns non-zero iff buffer and range overlap.
+ */
+static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range)
+{
+ BYTE const* const bufferStart = (BYTE const*)buffer.start;
+ BYTE const* const bufferEnd = bufferStart + buffer.capacity;
+ BYTE const* const rangeStart = (BYTE const*)range.start;
+ BYTE const* const rangeEnd = rangeStart + range.size;
+
+ if (rangeStart == NULL || bufferStart == NULL)
+ return 0;
+ /* Empty ranges cannot overlap */
+ if (bufferStart == bufferEnd || rangeStart == rangeEnd)
+ return 0;
+
+ return bufferStart < rangeEnd && rangeStart < bufferEnd;
+}
+
+static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window)
+{
+ range_t extDict;
+ range_t prefix;
+
+ DEBUGLOG(5, "ZSTDMT_doesOverlapWindow");
+ extDict.start = window.dictBase + window.lowLimit;
+ extDict.size = window.dictLimit - window.lowLimit;
+
+ prefix.start = window.base + window.dictLimit;
+ prefix.size = window.nextSrc - (window.base + window.dictLimit);
+ DEBUGLOG(5, "extDict [0x%zx, 0x%zx)",
+ (size_t)extDict.start,
+ (size_t)extDict.start + extDict.size);
+ DEBUGLOG(5, "prefix [0x%zx, 0x%zx)",
+ (size_t)prefix.start,
+ (size_t)prefix.start + prefix.size);
+
+ return ZSTDMT_isOverlapped(buffer, extDict)
+ || ZSTDMT_isOverlapped(buffer, prefix);
+}
+
+static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer)
+{
+ if (mtctx->params.ldmParams.enableLdm) {
+ ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex;
+ DEBUGLOG(5, "ZSTDMT_waitForLdmComplete");
+ DEBUGLOG(5, "source [0x%zx, 0x%zx)",
+ (size_t)buffer.start,
+ (size_t)buffer.start + buffer.capacity);
+ ZSTD_PTHREAD_MUTEX_LOCK(mutex);
+ while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) {
+ DEBUGLOG(5, "Waiting for LDM to finish...");
+ ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex);
+ }
+ DEBUGLOG(6, "Done waiting for LDM to finish");
+ ZSTD_pthread_mutex_unlock(mutex);
+ }
+}
+
+/**
+ * Attempts to set the inBuff to the next section to fill.
+ * If any part of the new section is still in use we give up.
+ * Returns non-zero if the buffer is filled.
+ */
+static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
+{
+ range_t const inUse = ZSTDMT_getInputDataInUse(mtctx);
+ size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos;
+ size_t const target = mtctx->targetSectionSize;
+ buffer_t buffer;
+
+ DEBUGLOG(5, "ZSTDMT_tryGetInputRange");
+ assert(mtctx->inBuff.buffer.start == NULL);
+ assert(mtctx->roundBuff.capacity >= target);
+
+ if (spaceLeft < target) {
+ /* ZSTD_invalidateRepCodes() doesn't work for extDict variants.
+ * Simply copy the prefix to the beginning in that case.
+ */
+ BYTE* const start = (BYTE*)mtctx->roundBuff.buffer;
+ size_t const prefixSize = mtctx->inBuff.prefix.size;
+
+ buffer.start = start;
+ buffer.capacity = prefixSize;
+ if (ZSTDMT_isOverlapped(buffer, inUse)) {
+ DEBUGLOG(5, "Waiting for buffer...");
+ return 0;
+ }
+ ZSTDMT_waitForLdmComplete(mtctx, buffer);
+ memmove(start, mtctx->inBuff.prefix.start, prefixSize);
+ mtctx->inBuff.prefix.start = start;
+ mtctx->roundBuff.pos = prefixSize;
+ }
+ buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos;
+ buffer.capacity = target;
+
+ if (ZSTDMT_isOverlapped(buffer, inUse)) {
+ DEBUGLOG(5, "Waiting for buffer...");
+ return 0;
+ }
+ assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix));
+
+ ZSTDMT_waitForLdmComplete(mtctx, buffer);
+
+ DEBUGLOG(5, "Using prefix range [%zx, %zx)",
+ (size_t)mtctx->inBuff.prefix.start,
+ (size_t)mtctx->inBuff.prefix.start + mtctx->inBuff.prefix.size);
+ DEBUGLOG(5, "Using source range [%zx, %zx)",
+ (size_t)buffer.start,
+ (size_t)buffer.start + buffer.capacity);
+
+
+ mtctx->inBuff.buffer = buffer;
+ mtctx->inBuff.filled = 0;
+ assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity);
+ return 1;
+}
+
+typedef struct {
+ size_t toLoad; /* The number of bytes to load from the input. */
+ int flush; /* Boolean declaring if we must flush because we found a synchronization point. */
+} syncPoint_t;
+
+/**
+ * Searches through the input for a synchronization point. If one is found, we
+ * will instruct the caller to flush, and return the number of bytes to load.
+ * Otherwise, we will load as many bytes as possible and instruct the caller
+ * to continue as normal.
+ */
+static syncPoint_t
+findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
+{
+ BYTE const* const istart = (BYTE const*)input.src + input.pos;
+ U64 const primePower = mtctx->rsync.primePower;
+ U64 const hitMask = mtctx->rsync.hitMask;
+
+ syncPoint_t syncPoint;
+ U64 hash;
+ BYTE const* prev;
+ size_t pos;
+
+ syncPoint.toLoad = MIN(input.size - input.pos, mtctx->targetSectionSize - mtctx->inBuff.filled);
+ syncPoint.flush = 0;
+ if (!mtctx->params.rsyncable)
+ /* Rsync is disabled. */
+ return syncPoint;
+ if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH)
+ /* Not enough to compute the hash.
+ * We will miss any synchronization points in this RSYNC_LENGTH byte
+ * window. However, since it depends only in the internal buffers, if the
+ * state is already synchronized, we will remain synchronized.
+ * Additionally, the probability that we miss a synchronization point is
+ * low: RSYNC_LENGTH / targetSectionSize.
+ */
+ return syncPoint;
+ /* Initialize the loop variables. */
+ if (mtctx->inBuff.filled >= RSYNC_LENGTH) {
+ /* We have enough bytes buffered to initialize the hash.
+ * Start scanning at the beginning of the input.
+ */
+ pos = 0;
+ prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
+ hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
+ } else {
+ /* We don't have enough bytes buffered to initialize the hash, but
+ * we know we have at least RSYNC_LENGTH bytes total.
+ * Start scanning after the first RSYNC_LENGTH bytes less the bytes
+ * already buffered.
+ */
+ pos = RSYNC_LENGTH - mtctx->inBuff.filled;
+ prev = (BYTE const*)mtctx->inBuff.buffer.start - pos;
+ hash = ZSTD_rollingHash_compute(mtctx->inBuff.buffer.start, mtctx->inBuff.filled);
+ hash = ZSTD_rollingHash_append(hash, istart, pos);
+ }
+ /* Starting with the hash of the previous RSYNC_LENGTH bytes, roll
+ * through the input. If we hit a synchronization point, then cut the
+ * job off, and tell the compressor to flush the job. Otherwise, load
+ * all the bytes and continue as normal.
+ * If we go too long without a synchronization point (targetSectionSize)
+ * then a block will be emitted anyways, but this is okay, since if we
+ * are already synchronized we will remain synchronized.
+ */
+ for (; pos < syncPoint.toLoad; ++pos) {
+ BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH];
+ /* if (pos >= RSYNC_LENGTH) assert(ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); */
+ hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower);
+ if ((hash & hitMask) == hitMask) {
+ syncPoint.toLoad = pos + 1;
+ syncPoint.flush = 1;
+ break;
+ }
+ }
+ return syncPoint;
+}
+
+size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx)
+{
+ size_t hintInSize = mtctx->targetSectionSize - mtctx->inBuff.filled;
+ if (hintInSize==0) hintInSize = mtctx->targetSectionSize;
+ return hintInSize;
+}
+
+/** ZSTDMT_compressStream_generic() :
+ * internal use only - exposed to be invoked from zstd_compress.c
+ * assumption : output and input are valid (pos <= size)
+ * @return : minimum amount of data remaining to flush, 0 if none */
+size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective endOp)
+{
+ unsigned forwardInputProgress = 0;
+ DEBUGLOG(5, "ZSTDMT_compressStream_generic (endOp=%u, srcSize=%u)",
+ (U32)endOp, (U32)(input->size - input->pos));
+ assert(output->pos <= output->size);
+ assert(input->pos <= input->size);
+
+ if (mtctx->singleBlockingThread) { /* delegate to single-thread (synchronous) */
+ return ZSTD_compressStream2(mtctx->cctxPool->cctx[0], output, input, endOp);
+ }
+
+ if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
+ /* current frame being ended. Only flush/end are allowed */
+ return ERROR(stage_wrong);
+ }
+
+ /* single-pass shortcut (note : synchronous-mode) */
+ if ( (!mtctx->params.rsyncable) /* rsyncable mode is disabled */
+ && (mtctx->nextJobID == 0) /* just started */
+ && (mtctx->inBuff.filled == 0) /* nothing buffered */
+ && (!mtctx->jobReady) /* no job already created */
+ && (endOp == ZSTD_e_end) /* end order */
+ && (output->size - output->pos >= ZSTD_compressBound(input->size - input->pos)) ) { /* enough space in dst */
+ size_t const cSize = ZSTDMT_compress_advanced_internal(mtctx,
+ (char*)output->dst + output->pos, output->size - output->pos,
+ (const char*)input->src + input->pos, input->size - input->pos,
+ mtctx->cdict, mtctx->params);
+ if (ZSTD_isError(cSize)) return cSize;
+ input->pos = input->size;
+ output->pos += cSize;
+ mtctx->allJobsCompleted = 1;
+ mtctx->frameEnded = 1;
+ return 0;
+ }
+
+ /* fill input buffer */
+ if ( (!mtctx->jobReady)
+ && (input->size > input->pos) ) { /* support NULL input */
+ if (mtctx->inBuff.buffer.start == NULL) {
+ assert(mtctx->inBuff.filled == 0); /* Can't fill an empty buffer */
+ if (!ZSTDMT_tryGetInputRange(mtctx)) {
+ /* It is only possible for this operation to fail if there are
+ * still compression jobs ongoing.
+ */
+ DEBUGLOG(5, "ZSTDMT_tryGetInputRange failed");
+ assert(mtctx->doneJobID != mtctx->nextJobID);
+ } else
+ DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start);
+ }
+ if (mtctx->inBuff.buffer.start != NULL) {
+ syncPoint_t const syncPoint = findSynchronizationPoint(mtctx, *input);
+ if (syncPoint.flush && endOp == ZSTD_e_continue) {
+ endOp = ZSTD_e_flush;
+ }
+ assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize);
+ DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u",
+ (U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize);
+ memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad);
+ input->pos += syncPoint.toLoad;
+ mtctx->inBuff.filled += syncPoint.toLoad;
+ forwardInputProgress = syncPoint.toLoad>0;
+ }
+ if ((input->pos < input->size) && (endOp == ZSTD_e_end))
+ endOp = ZSTD_e_flush; /* can't end now : not all input consumed */
+ }
+
+ if ( (mtctx->jobReady)
+ || (mtctx->inBuff.filled >= mtctx->targetSectionSize) /* filled enough : let's compress */
+ || ((endOp != ZSTD_e_continue) && (mtctx->inBuff.filled > 0)) /* something to flush : let's go */
+ || ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */
+ size_t const jobSize = mtctx->inBuff.filled;
+ assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);
+ FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) );
+ }
+
+ /* check for potential compressed data ready to be flushed */
+ { size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */
+ if (input->pos < input->size) return MAX(remainingToFlush, 1); /* input not consumed : do not end flush yet */
+ DEBUGLOG(5, "end of ZSTDMT_compressStream_generic: remainingToFlush = %u", (U32)remainingToFlush);
+ return remainingToFlush;
+ }
+}
+
+
+size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+ FORWARD_IF_ERROR( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) );
+
+ /* recommended next input size : fill current input buffer */
+ return mtctx->targetSectionSize - mtctx->inBuff.filled; /* note : could be zero when input buffer is fully filled and no more availability to create new job */
+}
+
+
+static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_EndDirective endFrame)
+{
+ size_t const srcSize = mtctx->inBuff.filled;
+ DEBUGLOG(5, "ZSTDMT_flushStream_internal");
+
+ if ( mtctx->jobReady /* one job ready for a worker to pick up */
+ || (srcSize > 0) /* still some data within input buffer */
+ || ((endFrame==ZSTD_e_end) && !mtctx->frameEnded)) { /* need a last 0-size block to end frame */
+ DEBUGLOG(5, "ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u)",
+ (U32)srcSize, (U32)endFrame);
+ FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) );
+ }
+
+ /* check if there is any data available to flush */
+ return ZSTDMT_flushProduced(mtctx, output, 1 /* blockToFlush */, endFrame);
+}
+
+
+size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)
+{
+ DEBUGLOG(5, "ZSTDMT_flushStream");
+ if (mtctx->singleBlockingThread)
+ return ZSTD_flushStream(mtctx->cctxPool->cctx[0], output);
+ return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_flush);
+}
+
+size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)
+{
+ DEBUGLOG(4, "ZSTDMT_endStream");
+ if (mtctx->singleBlockingThread)
+ return ZSTD_endStream(mtctx->cctxPool->cctx[0], output);
+ return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_end);
+}
diff --git a/vendor/github.com/DataDog/zstd/zstdmt_compress.h b/vendor/github.com/DataDog/zstd/zstdmt_compress.h
new file mode 100644
index 000000000..12e6bcb3a
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstdmt_compress.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+ #ifndef ZSTDMT_COMPRESS_H
+ #define ZSTDMT_COMPRESS_H
+
+ #if defined (__cplusplus)
+ extern "C" {
+ #endif
+
+
+/* Note : This is an internal API.
+ * These APIs used to be exposed with ZSTDLIB_API,
+ * because it used to be the only way to invoke MT compression.
+ * Now, it's recommended to use ZSTD_compress2 and ZSTD_compressStream2()
+ * instead.
+ *
+ * If you depend on these APIs and can't switch, then define
+ * ZSTD_LEGACY_MULTITHREADED_API when making the dynamic library.
+ * However, we may completely remove these functions in a future
+ * release, so please switch soon.
+ *
+ * This API requires ZSTD_MULTITHREAD to be defined during compilation,
+ * otherwise ZSTDMT_createCCtx*() will fail.
+ */
+
+#ifdef ZSTD_LEGACY_MULTITHREADED_API
+# define ZSTDMT_API ZSTDLIB_API
+#else
+# define ZSTDMT_API
+#endif
+
+/* === Dependencies === */
+#include <stddef.h> /* size_t */
+#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */
+#include "zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */
+
+
+/* === Constants === */
+#ifndef ZSTDMT_NBWORKERS_MAX
+# define ZSTDMT_NBWORKERS_MAX 200
+#endif
+#ifndef ZSTDMT_JOBSIZE_MIN
+# define ZSTDMT_JOBSIZE_MIN (1 MB)
+#endif
+#define ZSTDMT_JOBSIZE_MAX (MEM_32bits() ? (512 MB) : (1024 MB))
+
+
+/* === Memory management === */
+typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
+/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */
+ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers);
+/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */
+ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers,
+ ZSTD_customMem cMem);
+ZSTDMT_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
+
+ZSTDMT_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
+
+
+/* === Simple one-pass compression function === */
+
+ZSTDMT_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel);
+
+
+
+/* === Streaming functions === */
+
+ZSTDMT_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
+ZSTDMT_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize); /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean "empty" */
+
+ZSTDMT_API size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx);
+ZSTDMT_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+
+ZSTDMT_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
+ZSTDMT_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
+
+
+/* === Advanced functions and parameters === */
+
+ZSTDMT_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict,
+ ZSTD_parameters params,
+ int overlapLog);
+
+ZSTDMT_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
+ const void* dict, size_t dictSize, /* dict can be released after init, a local copy is preserved within zcs */
+ ZSTD_parameters params,
+ unsigned long long pledgedSrcSize); /* pledgedSrcSize is optional and can be zero == unknown */
+
+ZSTDMT_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
+ const ZSTD_CDict* cdict,
+ ZSTD_frameParameters fparams,
+ unsigned long long pledgedSrcSize); /* note : zero means empty */
+
+/* ZSTDMT_parameter :
+ * List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
+typedef enum {
+ ZSTDMT_p_jobSize, /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */
+ ZSTDMT_p_overlapLog, /* Each job may reload a part of previous job to enhance compression ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
+ ZSTDMT_p_rsyncable /* Enables rsyncable mode. */
+} ZSTDMT_parameter;
+
+/* ZSTDMT_setMTCtxParameter() :
+ * allow setting individual parameters, one at a time, among a list of enums defined in ZSTDMT_parameter.
+ * The function must be called typically after ZSTD_createCCtx() but __before ZSTDMT_init*() !__
+ * Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()) */
+ZSTDMT_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value);
+
+/* ZSTDMT_getMTCtxParameter() :
+ * Query the ZSTDMT_CCtx for a parameter value.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()) */
+ZSTDMT_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value);
+
+
+/*! ZSTDMT_compressStream_generic() :
+ * Combines ZSTDMT_compressStream() with optional ZSTDMT_flushStream() or ZSTDMT_endStream()
+ * depending on flush directive.
+ * @return : minimum amount of data still to be flushed
+ * 0 if fully flushed
+ * or an error code
+ * note : needs to be init using any ZSTD_initCStream*() variant */
+ZSTDMT_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective endOp);
+
+
+/* ========================================================
+ * === Private interface, for use by ZSTD_compress.c ===
+ * === Not exposed in libzstd. Never invoke directly ===
+ * ======================================================== */
+
+ /*! ZSTDMT_toFlushNow()
+ * Tell how many bytes are ready to be flushed immediately.
+ * Probe the oldest active job (not yet entirely flushed) and check its output buffer.
+ * If return 0, it means there is no active job,
+ * or, it means oldest job is still active, but everything produced has been flushed so far,
+ * therefore flushing is limited by speed of oldest job. */
+size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx);
+
+/*! ZSTDMT_CCtxParam_setMTCtxParameter()
+ * like ZSTDMT_setMTCtxParameter(), but into a ZSTD_CCtx_Params */
+size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, int value);
+
+/*! ZSTDMT_CCtxParam_setNbWorkers()
+ * Set nbWorkers, and clamp it.
+ * Also reset jobSize and overlapLog */
+size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers);
+
+/*! ZSTDMT_updateCParams_whileCompressing() :
+ * Updates only a selected set of compression parameters, to remain compatible with current frame.
+ * New parameters will be applied to next compression job. */
+void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams);
+
+/*! ZSTDMT_getFrameProgression():
+ * tells how much data has been consumed (input) and produced (output) for current frame.
+ * able to count progression inside worker threads.
+ */
+ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx);
+
+
+/*! ZSTDMT_initCStream_internal() :
+ * Private use only. Init streaming operation.
+ * expects params to be valid.
+ * must receive dict, or cdict, or none, but not both.
+ * @return : 0, or an error code */
+size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
+ const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTDMT_COMPRESS_H */
diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore
new file mode 100644
index 000000000..b883f1fdc
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/.gitignore
@@ -0,0 +1 @@
+*.exe
diff --git a/vendor/github.com/Microsoft/hcsshim/.gitignore b/vendor/github.com/Microsoft/hcsshim/.gitignore
new file mode 100644
index 000000000..b883f1fdc
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/.gitignore
@@ -0,0 +1 @@
+*.exe
diff --git a/vendor/github.com/Microsoft/hcsshim/.gometalinter.json b/vendor/github.com/Microsoft/hcsshim/.gometalinter.json
new file mode 100644
index 000000000..00e9a6e2e
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/.gometalinter.json
@@ -0,0 +1,17 @@
+{
+ "Vendor": true,
+ "Deadline": "2m",
+ "Sort": [
+ "linter",
+ "severity",
+ "path",
+ "line"
+ ],
+ "Skip": [
+ "internal\\schema2"
+ ],
+ "EnableGC": true,
+ "Enable": [
+ "gofmt"
+ ]
+} \ No newline at end of file
diff --git a/vendor/github.com/Microsoft/hcsshim/appveyor.yml b/vendor/github.com/Microsoft/hcsshim/appveyor.yml
new file mode 100644
index 000000000..a8ec5a593
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/appveyor.yml
@@ -0,0 +1,29 @@
+version: 0.1.{build}
+
+image: Visual Studio 2017
+
+clone_folder: c:\gopath\src\github.com\Microsoft\hcsshim
+
+environment:
+ GOPATH: c:\gopath
+ PATH: C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin;%GOPATH%\bin;C:\gometalinter-2.0.12-windows-amd64;%PATH%
+
+stack: go 1.11
+
+build_script:
+ - appveyor DownloadFile https://github.com/alecthomas/gometalinter/releases/download/v2.0.12/gometalinter-2.0.12-windows-amd64.zip
+ - 7z x gometalinter-2.0.12-windows-amd64.zip -y -oC:\ > NUL
+ - gometalinter.exe --config .gometalinter.json ./...
+ - go build ./cmd/wclayer
+ - go build ./cmd/runhcs
+ - go build ./cmd/tar2ext4
+ - go test -v ./... -tags admin
+ - go test -c ./test/functional/ -tags functional
+ - go test -c ./test/runhcs/ -tags integration
+
+artifacts:
+ - path: 'wclayer.exe'
+ - path: 'runhcs.exe'
+ - path: 'tar2ext4.exe'
+ - path: 'functional.test.exe'
+ - path: 'runhcs.test.exe' \ No newline at end of file
diff --git a/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 b/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1
new file mode 100644
index 000000000..ce6edbcf3
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1
@@ -0,0 +1,12 @@
+# Requirements so far:
+# dockerd running
+# - image microsoft/nanoserver (matching host base image) docker load -i c:\baseimages\nanoserver.tar
+# - image alpine (linux) docker pull --platform=linux alpine
+
+
+# TODO: Add this a parameter for debugging. ie "functional-tests -debug=$true"
+#$env:HCSSHIM_FUNCTIONAL_TESTS_DEBUG="yes please"
+
+#pushd uvm
+go test -v -tags "functional uvmcreate uvmscratch uvmscsi uvmvpmem uvmvsmb uvmp9" ./...
+#popd \ No newline at end of file
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go b/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go
index 9f926c6be..5d3d0dfef 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go
@@ -1,6 +1,8 @@
package guestrequest
-import "github.com/Microsoft/hcsshim/internal/schema2"
+import (
+ "github.com/Microsoft/hcsshim/internal/schema2"
+)
// Arguably, many of these (at least CombinedLayers) should have been generated
// by swagger.
@@ -47,6 +49,19 @@ type LCOWMappedVPMemDevice struct {
MountPath string `json:"MountPath,omitempty"` // /tmp/pN
}
+type LCOWNetworkAdapter struct {
+ NamespaceID string `json:",omitempty"`
+ ID string `json:",omitempty"`
+ MacAddress string `json:",omitempty"`
+ IPAddress string `json:",omitempty"`
+ PrefixLength uint8 `json:",omitempty"`
+ GatewayAddress string `json:",omitempty"`
+ DNSSuffix string `json:",omitempty"`
+ DNSServerList string `json:",omitempty"`
+ EnableLowMetric bool `json:",omitempty"`
+ EncapOverhead uint16 `json:",omitempty"`
+}
+
type ResourceType string
const (
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go
index 5119c2555..f9a922a4b 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go
@@ -16,11 +16,20 @@ var (
notificationWatcherCallback = syscall.NewCallback(notificationWatcher)
// Notifications for HCS_SYSTEM handles
- hcsNotificationSystemExited hcsNotification = 0x00000001
- hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002
- hcsNotificationSystemStartCompleted hcsNotification = 0x00000003
- hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004
- hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005
+ hcsNotificationSystemExited hcsNotification = 0x00000001
+ hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002
+ hcsNotificationSystemStartCompleted hcsNotification = 0x00000003
+ hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004
+ hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005
+ hcsNotificationSystemCrashReport hcsNotification = 0x00000006
+ hcsNotificationSystemSiloJobCreated hcsNotification = 0x00000007
+ hcsNotificationSystemSaveCompleted hcsNotification = 0x00000008
+ hcsNotificationSystemRdpEnhancedModeStateChanged hcsNotification = 0x00000009
+ hcsNotificationSystemShutdownFailed hcsNotification = 0x0000000A
+ hcsNotificationSystemGetPropertiesCompleted hcsNotification = 0x0000000B
+ hcsNotificationSystemModifyCompleted hcsNotification = 0x0000000C
+ hcsNotificationSystemCrashInitiated hcsNotification = 0x0000000D
+ hcsNotificationSystemGuestConnectionClosed hcsNotification = 0x0000000E
// Notifications for HCS_PROCESS handles
hcsNotificationProcessExited hcsNotification = 0x00010000
@@ -50,16 +59,23 @@ func newChannels() notificationChannels {
channels[hcsNotificationSystemResumeCompleted] = make(notificationChannel, 1)
channels[hcsNotificationProcessExited] = make(notificationChannel, 1)
channels[hcsNotificationServiceDisconnect] = make(notificationChannel, 1)
+ channels[hcsNotificationSystemCrashReport] = make(notificationChannel, 1)
+ channels[hcsNotificationSystemSiloJobCreated] = make(notificationChannel, 1)
+ channels[hcsNotificationSystemSaveCompleted] = make(notificationChannel, 1)
+ channels[hcsNotificationSystemRdpEnhancedModeStateChanged] = make(notificationChannel, 1)
+ channels[hcsNotificationSystemShutdownFailed] = make(notificationChannel, 1)
+ channels[hcsNotificationSystemGetPropertiesCompleted] = make(notificationChannel, 1)
+ channels[hcsNotificationSystemModifyCompleted] = make(notificationChannel, 1)
+ channels[hcsNotificationSystemCrashInitiated] = make(notificationChannel, 1)
+ channels[hcsNotificationSystemGuestConnectionClosed] = make(notificationChannel, 1)
+
return channels
}
+
func closeChannels(channels notificationChannels) {
- close(channels[hcsNotificationSystemExited])
- close(channels[hcsNotificationSystemCreateCompleted])
- close(channels[hcsNotificationSystemStartCompleted])
- close(channels[hcsNotificationSystemPauseCompleted])
- close(channels[hcsNotificationSystemResumeCompleted])
- close(channels[hcsNotificationProcessExited])
- close(channels[hcsNotificationServiceDisconnect])
+ for _, c := range channels {
+ close(c)
+ }
}
func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr {
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
index 6368b5717..079b56535 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
@@ -73,6 +73,9 @@ var (
// ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message
ErrVmcomputeUnknownMessage = syscall.Errno(0xc037010b)
+ // ErrVmcomputeUnexpectedExit is an error encountered when the compute system terminates unexpectedly
+ ErrVmcomputeUnexpectedExit = syscall.Errno(0xC0370106)
+
// ErrNotSupported is an error encountered when hcs doesn't support the request
ErrPlatformNotSupported = errors.New("unsupported platform request")
)
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go
index 90d164e35..6d03b17a2 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go
@@ -7,9 +7,14 @@ func logOperationBegin(ctx logrus.Fields, msg string) {
}
func logOperationEnd(ctx logrus.Fields, msg string, err error) {
+ // Copy the log and fields first.
+ log := logrus.WithFields(ctx)
if err == nil {
- logrus.WithFields(ctx).Debug(msg)
+ log.Debug(msg)
} else {
- logrus.WithFields(ctx).WithError(err).Error(msg)
+ // Edit only the copied field data to avoid race conditions on the
+ // write.
+ log.Data[logrus.ErrorKey] = err
+ log.Error(msg)
}
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
index 42598cf2f..41e20bbf9 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
@@ -31,9 +31,8 @@ func newProcess(process hcsProcess, processID int, computeSystem *System) *Proce
processID: processID,
system: computeSystem,
logctx: logrus.Fields{
- logfields.HCSOperation: "",
- logfields.ContainerID: computeSystem.ID(),
- logfields.ProcessID: processID,
+ logfields.ContainerID: computeSystem.ID(),
+ logfields.ProcessID: processID,
},
}
}
@@ -88,13 +87,12 @@ func (process *Process) SystemID() string {
}
func (process *Process) logOperationBegin(operation string) {
- process.logctx[logfields.HCSOperation] = operation
logOperationBegin(
process.logctx,
- "hcsshim::Process - Begin Operation")
+ operation+" - Begin Operation")
}
-func (process *Process) logOperationEnd(err error) {
+func (process *Process) logOperationEnd(operation string, err error) {
var result string
if err == nil {
result = "Success"
@@ -104,9 +102,8 @@ func (process *Process) logOperationEnd(err error) {
logOperationEnd(
process.logctx,
- "hcsshim::Process - End Operation - "+result,
+ operation+" - End Operation - "+result,
err)
- process.logctx[logfields.HCSOperation] = ""
}
// Signal signals the process with `options`.
@@ -116,7 +113,7 @@ func (process *Process) Signal(options guestrequest.SignalProcessOptions) (err e
operation := "hcsshim::Process::Signal"
process.logOperationBegin(operation)
- defer process.logOperationEnd(err)
+ defer func() { process.logOperationEnd(operation, err) }()
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
@@ -130,10 +127,9 @@ func (process *Process) Signal(options guestrequest.SignalProcessOptions) (err e
optionsStr := string(optionsb)
var resultp *uint16
- completed := false
- go syscallWatcher(process.logctx, &completed)
- err = hcsSignalProcess(process.handle, optionsStr, &resultp)
- completed = true
+ syscallWatcher(process.logctx, func() {
+ err = hcsSignalProcess(process.handle, optionsStr, &resultp)
+ })
events := processHcsResult(resultp)
if err != nil {
return makeProcessError(process, operation, err, events)
@@ -149,17 +145,16 @@ func (process *Process) Kill() (err error) {
operation := "hcsshim::Process::Kill"
process.logOperationBegin(operation)
- defer process.logOperationEnd(err)
+ defer func() { process.logOperationEnd(operation, err) }()
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
var resultp *uint16
- completed := false
- go syscallWatcher(process.logctx, &completed)
- err = hcsTerminateProcess(process.handle, &resultp)
- completed = true
+ syscallWatcher(process.logctx, func() {
+ err = hcsTerminateProcess(process.handle, &resultp)
+ })
events := processHcsResult(resultp)
if err != nil {
return makeProcessError(process, operation, err, events)
@@ -172,7 +167,7 @@ func (process *Process) Kill() (err error) {
func (process *Process) Wait() (err error) {
operation := "hcsshim::Process::Wait"
process.logOperationBegin(operation)
- defer process.logOperationEnd(err)
+ defer func() { process.logOperationEnd(operation, err) }()
err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil)
if err != nil {
@@ -187,7 +182,7 @@ func (process *Process) Wait() (err error) {
func (process *Process) WaitTimeout(timeout time.Duration) (err error) {
operation := "hcssshim::Process::WaitTimeout"
process.logOperationBegin(operation)
- defer process.logOperationEnd(err)
+ defer func() { process.logOperationEnd(operation, err) }()
err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout)
if err != nil {
@@ -204,7 +199,7 @@ func (process *Process) ResizeConsole(width, height uint16) (err error) {
operation := "hcsshim::Process::ResizeConsole"
process.logOperationBegin(operation)
- defer process.logOperationEnd(err)
+ defer func() { process.logOperationEnd(operation, err) }()
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
@@ -241,7 +236,7 @@ func (process *Process) Properties() (_ *ProcessStatus, err error) {
operation := "hcsshim::Process::Properties"
process.logOperationBegin(operation)
- defer process.logOperationEnd(err)
+ defer func() { process.logOperationEnd(operation, err) }()
if process.handle == 0 {
return nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
@@ -251,10 +246,9 @@ func (process *Process) Properties() (_ *ProcessStatus, err error) {
resultp *uint16
propertiesp *uint16
)
- completed := false
- go syscallWatcher(process.logctx, &completed)
- err = hcsGetProcessProperties(process.handle, &propertiesp, &resultp)
- completed = true
+ syscallWatcher(process.logctx, func() {
+ err = hcsGetProcessProperties(process.handle, &propertiesp, &resultp)
+ })
events := processHcsResult(resultp)
if err != nil {
return nil, makeProcessError(process, operation, err, events)
@@ -278,7 +272,7 @@ func (process *Process) Properties() (_ *ProcessStatus, err error) {
func (process *Process) ExitCode() (_ int, err error) {
operation := "hcsshim::Process::ExitCode"
process.logOperationBegin(operation)
- defer process.logOperationEnd(err)
+ defer func() { process.logOperationEnd(operation, err) }()
properties, err := process.Properties()
if err != nil {
@@ -305,7 +299,7 @@ func (process *Process) Stdio() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadClo
operation := "hcsshim::Process::Stdio"
process.logOperationBegin(operation)
- defer process.logOperationEnd(err)
+ defer func() { process.logOperationEnd(operation, err) }()
if process.handle == 0 {
return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
@@ -349,7 +343,7 @@ func (process *Process) CloseStdin() (err error) {
operation := "hcsshim::Process::CloseStdin"
process.logOperationBegin(operation)
- defer process.logOperationEnd(err)
+ defer func() { process.logOperationEnd(operation, err) }()
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
@@ -387,7 +381,7 @@ func (process *Process) Close() (err error) {
operation := "hcsshim::Process::Close"
process.logOperationBegin(operation)
- defer process.logOperationEnd(err)
+ defer func() { process.logOperationEnd(operation, err) }()
// Don't double free this
if process.handle == 0 {
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
index 2629380fd..20b242524 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
@@ -49,20 +49,18 @@ func newSystem(id string) *System {
return &System{
id: id,
logctx: logrus.Fields{
- logfields.HCSOperation: "",
- logfields.ContainerID: id,
+ logfields.ContainerID: id,
},
}
}
func (computeSystem *System) logOperationBegin(operation string) {
- computeSystem.logctx[logfields.HCSOperation] = operation
logOperationBegin(
computeSystem.logctx,
- "hcsshim::ComputeSystem - Begin Operation")
+ operation+" - Begin Operation")
}
-func (computeSystem *System) logOperationEnd(err error) {
+func (computeSystem *System) logOperationEnd(operation string, err error) {
var result string
if err == nil {
result = "Success"
@@ -72,9 +70,8 @@ func (computeSystem *System) logOperationEnd(err error) {
logOperationEnd(
computeSystem.logctx,
- "hcsshim::ComputeSystem - End Operation - "+result,
+ operation+" - End Operation - "+result,
err)
- computeSystem.logctx[logfields.HCSOperation] = ""
}
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
@@ -83,7 +80,7 @@ func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (_ *System
computeSystem := newSystem(id)
computeSystem.logOperationBegin(operation)
- defer computeSystem.logOperationEnd(err)
+ defer func() { computeSystem.logOperationEnd(operation, err) }()
hcsDocumentB, err := json.Marshal(hcsDocumentInterface)
if err != nil {
@@ -97,13 +94,13 @@ func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (_ *System
Debug("HCS ComputeSystem Document")
var (
- resultp *uint16
- identity syscall.Handle
+ resultp *uint16
+ identity syscall.Handle
+ createError error
)
- completed := false
- go syscallWatcher(computeSystem.logctx, &completed)
- createError := hcsCreateComputeSystem(id, hcsDocument, identity, &computeSystem.handle, &resultp)
- completed = true
+ syscallWatcher(computeSystem.logctx, func() {
+ createError = hcsCreateComputeSystem(id, hcsDocument, identity, &computeSystem.handle, &resultp)
+ })
if createError == nil || IsPending(createError) {
if err = computeSystem.registerCallback(); err != nil {
@@ -133,7 +130,13 @@ func OpenComputeSystem(id string) (_ *System, err error) {
computeSystem := newSystem(id)
computeSystem.logOperationBegin(operation)
- defer computeSystem.logOperationEnd(err)
+ defer func() {
+ if IsNotExist(err) {
+ computeSystem.logOperationEnd(operation, nil)
+ } else {
+ computeSystem.logOperationEnd(operation, err)
+ }
+ }()
var (
handle hcsSystem
@@ -157,12 +160,10 @@ func OpenComputeSystem(id string) (_ *System, err error) {
// GetComputeSystems gets a list of the compute systems on the system that match the query
func GetComputeSystems(q schema1.ComputeSystemQuery) (_ []schema1.ContainerProperties, err error) {
operation := "hcsshim::GetComputeSystems"
- fields := logrus.Fields{
- logfields.HCSOperation: operation,
- }
+ fields := logrus.Fields{}
logOperationBegin(
fields,
- "hcsshim::ComputeSystem - Begin Operation")
+ operation+" - Begin Operation")
defer func() {
var result string
@@ -174,7 +175,7 @@ func GetComputeSystems(q schema1.ComputeSystemQuery) (_ []schema1.ContainerPrope
logOperationEnd(
fields,
- "hcsshim::ComputeSystem - End Operation - "+result,
+ operation+" - End Operation - "+result,
err)
}()
@@ -193,10 +194,10 @@ func GetComputeSystems(q schema1.ComputeSystemQuery) (_ []schema1.ContainerPrope
resultp *uint16
computeSystemsp *uint16
)
- completed := false
- go syscallWatcher(fields, &completed)
- err = hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp)
- completed = true
+
+ syscallWatcher(fields, func() {
+ err = hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp)
+ })
events := processHcsResult(resultp)
if err != nil {
return nil, &HcsError{Op: operation, Err: err, Events: events}
@@ -221,7 +222,7 @@ func (computeSystem *System) Start() (err error) {
operation := "hcsshim::ComputeSystem::Start"
computeSystem.logOperationBegin(operation)
- defer computeSystem.logOperationEnd(err)
+ defer func() { computeSystem.logOperationEnd(operation, err) }()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Start", "", ErrAlreadyClosed, nil)
@@ -254,10 +255,9 @@ func (computeSystem *System) Start() (err error) {
}
var resultp *uint16
- completed := false
- go syscallWatcher(computeSystem.logctx, &completed)
- err = hcsStartComputeSystem(computeSystem.handle, "", &resultp)
- completed = true
+ syscallWatcher(computeSystem.logctx, func() {
+ err = hcsStartComputeSystem(computeSystem.handle, "", &resultp)
+ })
events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart)
if err != nil {
return makeSystemError(computeSystem, "Start", "", err, events)
@@ -279,17 +279,22 @@ func (computeSystem *System) Shutdown() (err error) {
operation := "hcsshim::ComputeSystem::Shutdown"
computeSystem.logOperationBegin(operation)
- defer computeSystem.logOperationEnd(err)
+ defer func() {
+ if IsAlreadyStopped(err) {
+ computeSystem.logOperationEnd(operation, nil)
+ } else {
+ computeSystem.logOperationEnd(operation, err)
+ }
+ }()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Shutdown", "", ErrAlreadyClosed, nil)
}
var resultp *uint16
- completed := false
- go syscallWatcher(computeSystem.logctx, &completed)
- err = hcsShutdownComputeSystem(computeSystem.handle, "", &resultp)
- completed = true
+ syscallWatcher(computeSystem.logctx, func() {
+ err = hcsShutdownComputeSystem(computeSystem.handle, "", &resultp)
+ })
events := processHcsResult(resultp)
if err != nil {
return makeSystemError(computeSystem, "Shutdown", "", err, events)
@@ -306,19 +311,24 @@ func (computeSystem *System) Terminate() (err error) {
operation := "hcsshim::ComputeSystem::Terminate"
computeSystem.logOperationBegin(operation)
- defer computeSystem.logOperationEnd(err)
+ defer func() {
+ if IsPending(err) {
+ computeSystem.logOperationEnd(operation, nil)
+ } else {
+ computeSystem.logOperationEnd(operation, err)
+ }
+ }()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Terminate", "", ErrAlreadyClosed, nil)
}
var resultp *uint16
- completed := false
- go syscallWatcher(computeSystem.logctx, &completed)
- err = hcsTerminateComputeSystem(computeSystem.handle, "", &resultp)
- completed = true
+ syscallWatcher(computeSystem.logctx, func() {
+ err = hcsTerminateComputeSystem(computeSystem.handle, "", &resultp)
+ })
events := processHcsResult(resultp)
- if err != nil {
+ if err != nil && err != ErrVmcomputeAlreadyStopped {
return makeSystemError(computeSystem, "Terminate", "", err, events)
}
@@ -329,7 +339,7 @@ func (computeSystem *System) Terminate() (err error) {
func (computeSystem *System) Wait() (err error) {
operation := "hcsshim::ComputeSystem::Wait"
computeSystem.logOperationBegin(operation)
- defer computeSystem.logOperationEnd(err)
+ defer func() { computeSystem.logOperationEnd(operation, err) }()
err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
if err != nil {
@@ -339,12 +349,27 @@ func (computeSystem *System) Wait() (err error) {
return nil
}
+// WaitExpectedError synchronously waits for the compute system to shutdown or
+// terminate, and ignores the passed error if it occurs.
+func (computeSystem *System) WaitExpectedError(expected error) (err error) {
+ operation := "hcsshim::ComputeSystem::WaitExpectedError"
+ computeSystem.logOperationBegin(operation)
+ defer func() { computeSystem.logOperationEnd(operation, err) }()
+
+ err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
+ if err != nil && getInnerError(err) != expected {
+ return makeSystemError(computeSystem, "WaitExpectedError", "", err, nil)
+ }
+
+ return nil
+}
+
// WaitTimeout synchronously waits for the compute system to terminate or the duration to elapse.
// If the timeout expires, IsTimeout(err) == true
func (computeSystem *System) WaitTimeout(timeout time.Duration) (err error) {
operation := "hcsshim::ComputeSystem::WaitTimeout"
computeSystem.logOperationBegin(operation)
- defer computeSystem.logOperationEnd(err)
+ defer func() { computeSystem.logOperationEnd(operation, err) }()
err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, &timeout)
if err != nil {
@@ -360,7 +385,7 @@ func (computeSystem *System) Properties(types ...schema1.PropertyType) (_ *schem
operation := "hcsshim::ComputeSystem::Properties"
computeSystem.logOperationBegin(operation)
- defer computeSystem.logOperationEnd(err)
+ defer func() { computeSystem.logOperationEnd(operation, err) }()
queryj, err := json.Marshal(schema1.PropertyQuery{types})
if err != nil {
@@ -372,10 +397,9 @@ func (computeSystem *System) Properties(types ...schema1.PropertyType) (_ *schem
Debug("HCS ComputeSystem Properties Query")
var resultp, propertiesp *uint16
- completed := false
- go syscallWatcher(computeSystem.logctx, &completed)
- err = hcsGetComputeSystemProperties(computeSystem.handle, string(queryj), &propertiesp, &resultp)
- completed = true
+ syscallWatcher(computeSystem.logctx, func() {
+ err = hcsGetComputeSystemProperties(computeSystem.handle, string(queryj), &propertiesp, &resultp)
+ })
events := processHcsResult(resultp)
if err != nil {
return nil, makeSystemError(computeSystem, "Properties", "", err, events)
@@ -400,17 +424,16 @@ func (computeSystem *System) Pause() (err error) {
operation := "hcsshim::ComputeSystem::Pause"
computeSystem.logOperationBegin(operation)
- defer computeSystem.logOperationEnd(err)
+ defer func() { computeSystem.logOperationEnd(operation, err) }()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Pause", "", ErrAlreadyClosed, nil)
}
var resultp *uint16
- completed := false
- go syscallWatcher(computeSystem.logctx, &completed)
- err = hcsPauseComputeSystem(computeSystem.handle, "", &resultp)
- completed = true
+ syscallWatcher(computeSystem.logctx, func() {
+ err = hcsPauseComputeSystem(computeSystem.handle, "", &resultp)
+ })
events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause)
if err != nil {
return makeSystemError(computeSystem, "Pause", "", err, events)
@@ -426,17 +449,16 @@ func (computeSystem *System) Resume() (err error) {
operation := "hcsshim::ComputeSystem::Resume"
computeSystem.logOperationBegin(operation)
- defer computeSystem.logOperationEnd(err)
+ defer func() { computeSystem.logOperationEnd(operation, err) }()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Resume", "", ErrAlreadyClosed, nil)
}
var resultp *uint16
- completed := false
- go syscallWatcher(computeSystem.logctx, &completed)
- err = hcsResumeComputeSystem(computeSystem.handle, "", &resultp)
- completed = true
+ syscallWatcher(computeSystem.logctx, func() {
+ err = hcsResumeComputeSystem(computeSystem.handle, "", &resultp)
+ })
events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume)
if err != nil {
return makeSystemError(computeSystem, "Resume", "", err, events)
@@ -452,7 +474,7 @@ func (computeSystem *System) CreateProcess(c interface{}) (_ *Process, err error
operation := "hcsshim::ComputeSystem::CreateProcess"
computeSystem.logOperationBegin(operation)
- defer computeSystem.logOperationEnd(err)
+ defer func() { computeSystem.logOperationEnd(operation, err) }()
var (
processInfo hcsProcessInformation
@@ -475,10 +497,9 @@ func (computeSystem *System) CreateProcess(c interface{}) (_ *Process, err error
WithField(logfields.JSON, configuration).
Debug("HCS ComputeSystem Process Document")
- completed := false
- go syscallWatcher(computeSystem.logctx, &completed)
- err = hcsCreateProcess(computeSystem.handle, configuration, &processInfo, &processHandle, &resultp)
- completed = true
+ syscallWatcher(computeSystem.logctx, func() {
+ err = hcsCreateProcess(computeSystem.handle, configuration, &processInfo, &processHandle, &resultp)
+ })
events := processHcsResult(resultp)
if err != nil {
return nil, makeSystemError(computeSystem, "CreateProcess", configuration, err, events)
@@ -513,7 +534,7 @@ func (computeSystem *System) OpenProcess(pid int) (_ *Process, err error) {
operation := "hcsshim::ComputeSystem::OpenProcess"
computeSystem.logOperationBegin(operation)
- defer computeSystem.logOperationEnd(err)
+ defer func() { computeSystem.logOperationEnd(operation, err) }()
var (
processHandle hcsProcess
@@ -524,10 +545,9 @@ func (computeSystem *System) OpenProcess(pid int) (_ *Process, err error) {
return nil, makeSystemError(computeSystem, "OpenProcess", "", ErrAlreadyClosed, nil)
}
- completed := false
- go syscallWatcher(computeSystem.logctx, &completed)
- err = hcsOpenProcess(computeSystem.handle, uint32(pid), &processHandle, &resultp)
- completed = true
+ syscallWatcher(computeSystem.logctx, func() {
+ err = hcsOpenProcess(computeSystem.handle, uint32(pid), &processHandle, &resultp)
+ })
events := processHcsResult(resultp)
if err != nil {
return nil, makeSystemError(computeSystem, "OpenProcess", "", err, events)
@@ -548,7 +568,7 @@ func (computeSystem *System) Close() (err error) {
operation := "hcsshim::ComputeSystem::Close"
computeSystem.logOperationBegin(operation)
- defer computeSystem.logOperationEnd(err)
+ defer func() { computeSystem.logOperationEnd(operation, err) }()
// Don't double free this
if computeSystem.handle == 0 {
@@ -559,10 +579,9 @@ func (computeSystem *System) Close() (err error) {
return makeSystemError(computeSystem, "Close", "", err, nil)
}
- completed := false
- go syscallWatcher(computeSystem.logctx, &completed)
- err = hcsCloseComputeSystem(computeSystem.handle)
- completed = true
+ syscallWatcher(computeSystem.logctx, func() {
+ err = hcsCloseComputeSystem(computeSystem.handle)
+ })
if err != nil {
return makeSystemError(computeSystem, "Close", "", err, nil)
}
@@ -636,7 +655,7 @@ func (computeSystem *System) Modify(config interface{}) (err error) {
operation := "hcsshim::ComputeSystem::Modify"
computeSystem.logOperationBegin(operation)
- defer computeSystem.logOperationEnd(err)
+ defer func() { computeSystem.logOperationEnd(operation, err) }()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Modify", "", ErrAlreadyClosed, nil)
@@ -654,10 +673,9 @@ func (computeSystem *System) Modify(config interface{}) (err error) {
Debug("HCS ComputeSystem Modify Document")
var resultp *uint16
- completed := false
- go syscallWatcher(computeSystem.logctx, &completed)
- err = hcsModifyComputeSystem(computeSystem.handle, requestString, &resultp)
- completed = true
+ syscallWatcher(computeSystem.logctx, func() {
+ err = hcsModifyComputeSystem(computeSystem.handle, requestString, &resultp)
+ })
events := processHcsResult(resultp)
if err != nil {
return makeSystemError(computeSystem, "Modify", requestString, err, events)
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go
index e09dd1334..f85ed3187 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go
@@ -1,7 +1,7 @@
package hcs
import (
- "time"
+ "context"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/timeout"
@@ -17,17 +17,25 @@ import (
//
// Usage is:
//
-// completed := false
-// go syscallWatcher(context, &completed)
-// <syscall>
-// completed = true
+// syscallWatcher(logContext, func() {
+// err = <syscall>(args...)
+// })
//
-func syscallWatcher(context logrus.Fields, syscallCompleted *bool) {
- time.Sleep(timeout.SyscallWatcher)
- if *syscallCompleted {
- return
+
+func syscallWatcher(logContext logrus.Fields, syscallLambda func()) {
+ ctx, cancel := context.WithTimeout(context.Background(), timeout.SyscallWatcher)
+ defer cancel()
+ go watchFunc(ctx, logContext)
+ syscallLambda()
+}
+
+func watchFunc(ctx context.Context, logContext logrus.Fields) {
+ select {
+ case <-ctx.Done():
+ if ctx.Err() != context.Canceled {
+ logrus.WithFields(logContext).
+ WithField(logfields.Timeout, timeout.SyscallWatcher).
+ Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see if there is a syscall stuck in the platform API for a significant length of time.")
+ }
}
- logrus.WithFields(context).
- WithField(logfields.Timeout, timeout.SyscallWatcher).
- Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see if there is a syscall stuck in the platform API for a significant length of time.")
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go
index 877c69a14..fcd5cdc87 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go
@@ -6,7 +6,6 @@ import (
"syscall"
"unsafe"
- "github.com/Microsoft/hcsshim/internal/interop"
"golang.org/x/sys/windows"
)
@@ -81,7 +80,10 @@ func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result
}
r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -106,7 +108,10 @@ func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall
}
r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -126,7 +131,10 @@ func _hcsOpenComputeSystem(id *uint16, computeSystem *hcsSystem, result **uint16
}
r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -137,7 +145,10 @@ func hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) {
}
r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -157,7 +168,10 @@ func _hcsStartComputeSystem(computeSystem hcsSystem, options *uint16, result **u
}
r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -177,7 +191,10 @@ func _hcsShutdownComputeSystem(computeSystem hcsSystem, options *uint16, result
}
r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -197,7 +214,10 @@ func _hcsTerminateComputeSystem(computeSystem hcsSystem, options *uint16, result
}
r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -217,7 +237,10 @@ func _hcsPauseComputeSystem(computeSystem hcsSystem, options *uint16, result **u
}
r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -237,7 +260,10 @@ func _hcsResumeComputeSystem(computeSystem hcsSystem, options *uint16, result **
}
r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -257,7 +283,10 @@ func _hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery *uint
}
r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -277,7 +306,10 @@ func _hcsModifyComputeSystem(computeSystem hcsSystem, configuration *uint16, res
}
r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -288,7 +320,10 @@ func hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr,
}
r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -299,7 +334,10 @@ func hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) {
}
r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -319,7 +357,10 @@ func _hcsCreateProcess(computeSystem hcsSystem, processParameters *uint16, proce
}
r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -330,7 +371,10 @@ func hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, re
}
r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -341,7 +385,10 @@ func hcsCloseProcess(process hcsProcess) (hr error) {
}
r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -352,7 +399,10 @@ func hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) {
}
r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -372,7 +422,10 @@ func _hcsSignalProcess(process hcsProcess, options *uint16, result **uint16) (hr
}
r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -383,7 +436,10 @@ func hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInforma
}
r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -394,7 +450,10 @@ func hcsGetProcessProperties(process hcsProcess, processProperties **uint16, res
}
r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -414,7 +473,10 @@ func _hcsModifyProcess(process hcsProcess, settings *uint16, result **uint16) (h
}
r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -434,7 +496,10 @@ func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result
}
r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -445,7 +510,10 @@ func hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context ui
}
r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -456,7 +524,10 @@ func hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) {
}
r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go
index c8d362c66..921c2c855 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go
@@ -36,10 +36,6 @@ func New(err error, title, rest string) error {
return &HcsError{title, rest, err}
}
-func Errorf(err error, title, format string, a ...interface{}) error {
- return New(err, title, fmt.Sprintf(format, a...))
-}
-
func Win32FromError(err error) uint32 {
if herr, ok := err.(*HcsError); ok {
return Win32FromError(herr.Err)
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
index ce636458c..59ec7004c 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
@@ -23,7 +23,9 @@ type HNSEndpoint struct {
DisableICC bool `json:",omitempty"`
PrefixLength uint8 `json:",omitempty"`
IsRemoteEndpoint bool `json:",omitempty"`
+ EnableLowMetric bool `json:",omitempty"`
Namespace *Namespace `json:",omitempty"`
+ EncapOverhead uint16 `json:",omitempty"`
}
//SystemType represents the type of the system on which actions are done
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go
index 3a3232a06..204633a48 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go
@@ -6,7 +6,6 @@ import (
"syscall"
"unsafe"
- "github.com/Microsoft/hcsshim/internal/interop"
"golang.org/x/sys/windows"
)
@@ -68,7 +67,10 @@ func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16)
}
r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go
index f10c88d08..2f6ec029e 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go
@@ -5,9 +5,9 @@ import (
"unsafe"
)
-//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go interop.go
+//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go interop.go
-//sys coTaskMemFree(buffer unsafe.Pointer) = ole32.CoTaskMemFree
+//sys coTaskMemFree(buffer unsafe.Pointer) = api_ms_win_core_com_l1_1_0.CoTaskMemFree
func ConvertAndFreeCoTaskMemString(buffer *uint16) string {
str := syscall.UTF16ToString((*[1 << 29]uint16)(unsafe.Pointer(buffer))[:])
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go
index 2f5bf8f55..12b0c71c5 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go
@@ -1,4 +1,4 @@
-// Code generated by 'go generate'; DO NOT EDIT.
+// Code generated mksyscall_windows.exe DO NOT EDIT
package interop
@@ -37,9 +37,9 @@ func errnoErr(e syscall.Errno) error {
}
var (
- modole32 = windows.NewLazySystemDLL("ole32.dll")
+ modapi_ms_win_core_com_l1_1_0 = windows.NewLazySystemDLL("api-ms-win-core-com-l1-1-0.dll")
- procCoTaskMemFree = modole32.NewProc("CoTaskMemFree")
+ procCoTaskMemFree = modapi_ms_win_core_com_l1_1_0.NewProc("CoTaskMemFree")
)
func coTaskMemFree(buffer unsafe.Pointer) {
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go
index a1527d706..cf2c166d9 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go
@@ -26,11 +26,6 @@ const (
Uint32 = "uint32"
Uint64 = "uint64"
- // HCS
-
- HCSOperation = "hcs-op"
- HCSOperationResult = "hcs-op-result"
-
// runhcs
VMShimOperation = "vmshim-op"
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go
index 0c0b1159f..f31edfaf8 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go
@@ -87,7 +87,7 @@ func OpenRoot(path string) (*os.File, error) {
func ntRelativePath(path string) ([]uint16, error) {
path = filepath.Clean(path)
- if strings.Contains(":", path) {
+ if strings.Contains(path, ":") {
// Since alternate data streams must follow the file they
// are attached to, finding one here (out of order) is invalid.
return nil, errors.New("path contains invalid character `:`")
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go
index b2bc58b83..eb171817a 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go
@@ -20,6 +20,13 @@ type Plan9Share struct {
Port int32 `json:"Port,omitempty"`
+ // Flags are marked private. Until they are exported correctly
+ //
+ // ReadOnly 0x00000001
+ // LinuxMetadata 0x00000004
+ // CaseSensitive 0x00000008
+ Flags int32 `json:"Flags,omitempty"`
+
ReadOnly bool `json:"ReadOnly,omitempty"`
UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"`
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_machine.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_machine.go
index 11f39eea7..2d22b1bcb 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_machine.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_machine.go
@@ -11,6 +11,9 @@ package hcsschema
type VirtualMachine struct {
+ // StopOnReset is private in the schema. If regenerated need to put back.
+ StopOnReset bool `json:"StopOnReset,omitempty"`
+
Chipset *Chipset `json:"Chipset,omitempty"`
ComputeTopology *Topology `json:"ComputeTopology,omitempty"`
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go
index 3a0d4bc58..dcb919268 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go
@@ -9,17 +9,24 @@ import (
// For a read/write layer, the mounted filesystem will appear as a volume on the
// host, while a read-only layer is generally expected to be a no-op.
// An activated layer must later be deactivated via DeactivateLayer.
-func ActivateLayer(path string) error {
- title := "hcsshim::ActivateLayer "
- logrus.Debugf(title+"path %s", path)
+func ActivateLayer(path string) (err error) {
+ title := "hcsshim::ActivateLayer"
+ fields := logrus.Fields{
+ "path": path,
+ }
+ logrus.WithFields(fields).Debug(title)
+ defer func() {
+ if err != nil {
+ fields[logrus.ErrorKey] = err
+ logrus.WithFields(fields).Error(err)
+ } else {
+ logrus.WithFields(fields).Debug(title + " - succeeded")
+ }
+ }()
- err := activateLayer(&stdDriverInfo, path)
+ err = activateLayer(&stdDriverInfo, path)
if err != nil {
- err = hcserror.Errorf(err, title, "path=%s", path)
- logrus.Error(err)
- return err
+ return hcserror.New(err, title+" - failed", "")
}
-
- logrus.Debugf(title+" - succeeded path=%s", path)
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go
index d15817730..be2bc3fd6 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go
@@ -7,17 +7,25 @@ import (
// CreateLayer creates a new, empty, read-only layer on the filesystem based on
// the parent layer provided.
-func CreateLayer(path, parent string) error {
- title := "hcsshim::CreateLayer "
- logrus.Debugf(title+"ID %s parent %s", path, parent)
+func CreateLayer(path, parent string) (err error) {
+ title := "hcsshim::CreateLayer"
+ fields := logrus.Fields{
+ "parent": parent,
+ "path": path,
+ }
+ logrus.WithFields(fields).Debug(title)
+ defer func() {
+ if err != nil {
+ fields[logrus.ErrorKey] = err
+ logrus.WithFields(fields).Error(err)
+ } else {
+ logrus.WithFields(fields).Debug(title + " - succeeded")
+ }
+ }()
- err := createLayer(&stdDriverInfo, path, parent)
+ err = createLayer(&stdDriverInfo, path, parent)
if err != nil {
- err = hcserror.Errorf(err, title, "path=%s parent=%s", path, parent)
- logrus.Error(err)
- return err
+ return hcserror.New(err, title+" - failed", "")
}
-
- logrus.Debugf(title+"- succeeded path=%s parent=%s", path, parent)
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go
index bf2fece19..7e3351289 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go
@@ -9,9 +9,20 @@ import (
// This requires both the id of the direct parent layer, as well as the full list
// of paths to all parent layers up to the base (and including the direct parent
// whose id was provided).
-func CreateScratchLayer(path string, parentLayerPaths []string) error {
- title := "hcsshim::CreateScratchLayer "
- logrus.Debugf(title+"path %s", path)
+func CreateScratchLayer(path string, parentLayerPaths []string) (err error) {
+ title := "hcsshim::CreateScratchLayer"
+ fields := logrus.Fields{
+ "path": path,
+ }
+ logrus.WithFields(fields).Debug(title)
+ defer func() {
+ if err != nil {
+ fields[logrus.ErrorKey] = err
+ logrus.WithFields(fields).Error(err)
+ } else {
+ logrus.WithFields(fields).Debug(title + " - succeeded")
+ }
+ }()
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
@@ -21,11 +32,7 @@ func CreateScratchLayer(path string, parentLayerPaths []string) error {
err = createSandboxLayer(&stdDriverInfo, path, 0, layers)
if err != nil {
- err = hcserror.Errorf(err, title, "path=%s", path)
- logrus.Error(err)
- return err
+ return hcserror.New(err, title+" - failed", "")
}
-
- logrus.Debugf(title+"- succeeded path=%s", path)
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go
index b998f8a19..2dd5d5715 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go
@@ -6,17 +6,24 @@ import (
)
// DeactivateLayer will dismount a layer that was mounted via ActivateLayer.
-func DeactivateLayer(path string) error {
- title := "hcsshim::DeactivateLayer "
- logrus.Debugf(title+"path %s", path)
+func DeactivateLayer(path string) (err error) {
+ title := "hcsshim::DeactivateLayer"
+ fields := logrus.Fields{
+ "path": path,
+ }
+ logrus.WithFields(fields).Debug(title)
+ defer func() {
+ if err != nil {
+ fields[logrus.ErrorKey] = err
+ logrus.WithFields(fields).Error(err)
+ } else {
+ logrus.WithFields(fields).Debug(title + " - succeeded")
+ }
+ }()
- err := deactivateLayer(&stdDriverInfo, path)
+ err = deactivateLayer(&stdDriverInfo, path)
if err != nil {
- err = hcserror.Errorf(err, title, "path=%s", path)
- logrus.Error(err)
- return err
+ return hcserror.New(err, title+"- failed", "")
}
-
- logrus.Debugf(title+"succeeded path=%s", path)
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go
index dc14cecc4..4da690c20 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go
@@ -7,17 +7,24 @@ import (
// DestroyLayer will remove the on-disk files representing the layer with the given
// path, including that layer's containing folder, if any.
-func DestroyLayer(path string) error {
- title := "hcsshim::DestroyLayer "
- logrus.Debugf(title+"path %s", path)
+func DestroyLayer(path string) (err error) {
+ title := "hcsshim::DestroyLayer"
+ fields := logrus.Fields{
+ "path": path,
+ }
+ logrus.WithFields(fields).Debug(title)
+ defer func() {
+ if err != nil {
+ fields[logrus.ErrorKey] = err
+ logrus.WithFields(fields).Error(err)
+ } else {
+ logrus.WithFields(fields).Debug(title + " - succeeded")
+ }
+ }()
- err := destroyLayer(&stdDriverInfo, path)
+ err = destroyLayer(&stdDriverInfo, path)
if err != nil {
- err = hcserror.Errorf(err, title, "path=%s", path)
- logrus.Error(err)
- return err
+ return hcserror.New(err, title+" - failed", "")
}
-
- logrus.Debugf(title+"succeeded path=%s", path)
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go
index 7832bb452..651676fb2 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go
@@ -6,17 +6,25 @@ import (
)
// ExpandScratchSize expands the size of a layer to at least size bytes.
-func ExpandScratchSize(path string, size uint64) error {
- title := "hcsshim::ExpandScratchSize "
- logrus.Debugf(title+"path=%s size=%d", path, size)
+func ExpandScratchSize(path string, size uint64) (err error) {
+ title := "hcsshim::ExpandScratchSize"
+ fields := logrus.Fields{
+ "path": path,
+ "size": size,
+ }
+ logrus.WithFields(fields).Debug(title)
+ defer func() {
+ if err != nil {
+ fields[logrus.ErrorKey] = err
+ logrus.WithFields(fields).Error(err)
+ } else {
+ logrus.WithFields(fields).Debug(title + " - succeeded")
+ }
+ }()
- err := expandSandboxSize(&stdDriverInfo, path, size)
+ err = expandSandboxSize(&stdDriverInfo, path, size)
if err != nil {
- err = hcserror.Errorf(err, title, "path=%s size=%d", path, size)
- logrus.Error(err)
- return err
+ return hcserror.New(err, title+" - failed", "")
}
-
- logrus.Debugf(title+"- succeeded path=%s size=%d", path, size)
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go
index d43e941ee..0425b3395 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go
@@ -14,9 +14,21 @@ import (
// format includes any metadata required for later importing the layer (using
// ImportLayer), and requires the full list of parent layer paths in order to
// perform the export.
-func ExportLayer(path string, exportFolderPath string, parentLayerPaths []string) error {
- title := "hcsshim::ExportLayer "
- logrus.Debugf(title+"path %s folder %s", path, exportFolderPath)
+func ExportLayer(path string, exportFolderPath string, parentLayerPaths []string) (err error) {
+ title := "hcsshim::ExportLayer"
+ fields := logrus.Fields{
+ "path": path,
+ "exportFolderPath": exportFolderPath,
+ }
+ logrus.WithFields(fields).Debug(title)
+ defer func() {
+ if err != nil {
+ fields[logrus.ErrorKey] = err
+ logrus.WithFields(fields).Error(err)
+ } else {
+ logrus.WithFields(fields).Debug(title + " - succeeded")
+ }
+ }()
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
@@ -26,12 +38,8 @@ func ExportLayer(path string, exportFolderPath string, parentLayerPaths []string
err = exportLayer(&stdDriverInfo, path, exportFolderPath, layers)
if err != nil {
- err = hcserror.Errorf(err, title, "path=%s folder=%s", path, exportFolderPath)
- logrus.Error(err)
- return err
+ return hcserror.New(err, title+" - failed", "")
}
-
- logrus.Debugf(title+"succeeded path=%s folder=%s", path, exportFolderPath)
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go
index 8c37549a0..d60b6ed53 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go
@@ -11,20 +11,29 @@ import (
// the path at which that layer can be accessed. This path may be a volume path
// if the layer is a mounted read-write layer, otherwise it is expected to be the
// folder path at which the layer is stored.
-func GetLayerMountPath(path string) (string, error) {
- title := "hcsshim::GetLayerMountPath "
- logrus.Debugf(title+"path %s", path)
+func GetLayerMountPath(path string) (_ string, err error) {
+ title := "hcsshim::GetLayerMountPath"
+ fields := logrus.Fields{
+ "path": path,
+ }
+ logrus.WithFields(fields).Debug(title)
+ defer func() {
+ if err != nil {
+ fields[logrus.ErrorKey] = err
+ logrus.WithFields(fields).Error(err)
+ } else {
+ logrus.WithFields(fields).Debug(title + " - succeeded")
+ }
+ }()
var mountPathLength uintptr
mountPathLength = 0
// Call the procedure itself.
- logrus.Debugf("Calling proc (1)")
- err := getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil)
+ logrus.WithFields(fields).Debug("Calling proc (1)")
+ err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil)
if err != nil {
- err = hcserror.Errorf(err, title, "(first call) path=%s", path)
- logrus.Error(err)
- return "", err
+ return "", hcserror.New(err, title+" - failed", "(first call)")
}
// Allocate a mount path of the returned length.
@@ -35,15 +44,13 @@ func GetLayerMountPath(path string) (string, error) {
mountPathp[0] = 0
// Call the procedure again
- logrus.Debugf("Calling proc (2)")
+ logrus.WithFields(fields).Debug("Calling proc (2)")
err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0])
if err != nil {
- err = hcserror.Errorf(err, title, "(second call) path=%s", path)
- logrus.Error(err)
- return "", err
+ return "", hcserror.New(err, title+" - failed", "(second call)")
}
mountPath := syscall.UTF16ToString(mountPathp[0:])
- logrus.Debugf(title+"succeeded path=%s mountPath=%s", path, mountPath)
+ fields["mountPath"] = mountPath
return mountPath, nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go
index 10899c68a..dbd83ef2b 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go
@@ -10,17 +10,20 @@ import (
// image store and return descriptive info about those images for the purpose
// of registering them with the graphdriver, graph, and tagstore.
func GetSharedBaseImages() (imageData string, err error) {
- title := "hcsshim::GetSharedBaseImages "
+ title := "hcsshim::GetSharedBaseImages"
+ logrus.Debug(title)
+ defer func() {
+ if err != nil {
+ logrus.WithError(err).Error(err)
+ } else {
+ logrus.WithField("imageData", imageData).Debug(title + " - succeeded")
+ }
+ }()
- logrus.Debugf("Calling proc")
var buffer *uint16
err = getBaseImages(&buffer)
if err != nil {
- err = hcserror.New(err, title, "")
- logrus.Error(err)
- return
+ return "", hcserror.New(err, title+" - failed", "")
}
- imageData = interop.ConvertAndFreeCoTaskMemString(buffer)
- logrus.Debugf(title+" - succeeded output=%s", imageData)
- return
+ return interop.ConvertAndFreeCoTaskMemString(buffer), nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go
index d86e67827..05735df6c 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go
@@ -1,24 +1,30 @@
package wclayer
import (
- "fmt"
-
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus"
)
// GrantVmAccess adds access to a file for a given VM
-func GrantVmAccess(vmid string, filepath string) error {
- title := fmt.Sprintf("hcsshim::GrantVmAccess id:%s path:%s ", vmid, filepath)
- logrus.Debugf(title)
+func GrantVmAccess(vmid string, filepath string) (err error) {
+ title := "hcsshim::GrantVmAccess"
+ fields := logrus.Fields{
+ "vm-id": vmid,
+ "path": filepath,
+ }
+ logrus.WithFields(fields).Debug(title)
+ defer func() {
+ if err != nil {
+ fields[logrus.ErrorKey] = err
+ logrus.WithFields(fields).Error(err)
+ } else {
+ logrus.WithFields(fields).Debug(title + " - succeeded")
+ }
+ }()
- err := grantVmAccess(vmid, filepath)
+ err = grantVmAccess(vmid, filepath)
if err != nil {
- err = hcserror.Errorf(err, title, "path=%s", filepath)
- logrus.Error(err)
- return err
+ return hcserror.New(err, title+" - failed", "")
}
-
- logrus.Debugf(title + " - succeeded")
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go
index 486d55470..76a804f2a 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go
@@ -15,9 +15,21 @@ import (
// that into a layer with the id layerId. Note that in order to correctly populate
// the layer and interperet the transport format, all parent layers must already
// be present on the system at the paths provided in parentLayerPaths.
-func ImportLayer(path string, importFolderPath string, parentLayerPaths []string) error {
- title := "hcsshim::ImportLayer "
- logrus.Debugf(title+"path %s folder %s", path, importFolderPath)
+func ImportLayer(path string, importFolderPath string, parentLayerPaths []string) (err error) {
+ title := "hcsshim::ImportLayer"
+ fields := logrus.Fields{
+ "path": path,
+ "importFolderPath": importFolderPath,
+ }
+ logrus.WithFields(fields).Debug(title)
+ defer func() {
+ if err != nil {
+ fields[logrus.ErrorKey] = err
+ logrus.WithFields(fields).Error(err)
+ } else {
+ logrus.WithFields(fields).Debug(title + " - succeeded")
+ }
+ }()
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
@@ -27,12 +39,8 @@ func ImportLayer(path string, importFolderPath string, parentLayerPaths []string
err = importLayer(&stdDriverInfo, path, importFolderPath, layers)
if err != nil {
- err = hcserror.Errorf(err, title, "path=%s folder=%s", path, importFolderPath)
- logrus.Error(err)
- return err
+ return hcserror.New(err, title+" - failed", "")
}
-
- logrus.Debugf(title+"succeeded path=%s folder=%s", path, importFolderPath)
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go
index 71287ff8a..258167a57 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go
@@ -7,19 +7,27 @@ import (
// LayerExists will return true if a layer with the given id exists and is known
// to the system.
-func LayerExists(path string) (bool, error) {
- title := "hcsshim::LayerExists "
- logrus.Debugf(title+"path %s", path)
+func LayerExists(path string) (_ bool, err error) {
+ title := "hcsshim::LayerExists"
+ fields := logrus.Fields{
+ "path": path,
+ }
+ logrus.WithFields(fields).Debug(title)
+ defer func() {
+ if err != nil {
+ fields[logrus.ErrorKey] = err
+ logrus.WithFields(fields).Error(err)
+ } else {
+ logrus.WithFields(fields).Debug(title + " - succeeded")
+ }
+ }()
// Call the procedure itself.
var exists uint32
- err := layerExists(&stdDriverInfo, path, &exists)
+ err = layerExists(&stdDriverInfo, path, &exists)
if err != nil {
- err = hcserror.Errorf(err, title, "path=%s", path)
- logrus.Error(err)
- return false, err
+ return false, hcserror.New(err, title+" - failed", "")
}
-
- logrus.Debugf(title+"succeeded path=%s exists=%d", path, exists)
+ fields["layer-exists"] = exists != 0
return exists != 0, nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go
index a1b8b9882..6d0ae8a07 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go
@@ -75,13 +75,13 @@ func layerPathsToDescriptors(parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR,
for i := 0; i < len(parentLayerPaths); i++ {
g, err := LayerID(parentLayerPaths[i])
if err != nil {
- logrus.Debugf("Failed to convert name to guid %s", err)
+ logrus.WithError(err).Debug("Failed to convert name to guid")
return nil, err
}
p, err := syscall.UTF16PtrFromString(parentLayerPaths[i])
if err != nil {
- logrus.Debugf("Failed conversion of parentLayerPath to pointer %s", err)
+ logrus.WithError(err).Debug("Failed conversion of parentLayerPath to pointer")
return nil, err
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go
index 741994ba4..45a63cf65 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go
@@ -10,15 +10,25 @@ import (
// Host Compute Service, ensuring GUIDs generated with the same string are common
// across all clients.
func NameToGuid(name string) (id guid.GUID, err error) {
- title := "hcsshim::NameToGuid "
+ title := "hcsshim::NameToGuid"
+ fields := logrus.Fields{
+ "name": name,
+ }
+ logrus.WithFields(fields).Debug(title)
+ defer func() {
+ if err != nil {
+ fields[logrus.ErrorKey] = err
+ logrus.WithFields(fields).Error(err)
+ } else {
+ logrus.WithFields(fields).Debug(title + " - succeeded")
+ }
+ }()
err = nameToGuid(name, &id)
if err != nil {
- err = hcserror.Errorf(err, title, "name=%s", name)
- logrus.Error(err)
+ err = hcserror.New(err, title+" - failed", "")
return
}
-
- logrus.Debugf(title+"name:%s guid:%s", name, id.String())
+ fields["guid"] = id.String()
return
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go
index bd4005dc4..2b65b0186 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go
@@ -14,9 +14,20 @@ var prepareLayerLock sync.Mutex
// parent layers, and is necessary in order to view or interact with the layer
// as an actual filesystem (reading and writing files, creating directories, etc).
// Disabling the filter must be done via UnprepareLayer.
-func PrepareLayer(path string, parentLayerPaths []string) error {
- title := "hcsshim::PrepareLayer "
- logrus.Debugf(title+"path %s", path)
+func PrepareLayer(path string, parentLayerPaths []string) (err error) {
+ title := "hcsshim::PrepareLayer"
+ fields := logrus.Fields{
+ "path": path,
+ }
+ logrus.WithFields(fields).Debug(title)
+ defer func() {
+ if err != nil {
+ fields[logrus.ErrorKey] = err
+ logrus.WithFields(fields).Error(err)
+ } else {
+ logrus.WithFields(fields).Debug(title + " - succeeded")
+ }
+ }()
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
@@ -30,11 +41,7 @@ func PrepareLayer(path string, parentLayerPaths []string) error {
defer prepareLayerLock.Unlock()
err = prepareLayer(&stdDriverInfo, path, layers)
if err != nil {
- err = hcserror.Errorf(err, title, "path=%s", path)
- logrus.Error(err)
- return err
+ return hcserror.New(err, title+" - failed", "")
}
-
- logrus.Debugf(title+"succeeded path=%s", path)
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go
index 5f1b4f4f4..bccd45969 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go
@@ -7,17 +7,24 @@ import (
// UnprepareLayer disables the filesystem filter for the read-write layer with
// the given id.
-func UnprepareLayer(path string) error {
- title := "hcsshim::UnprepareLayer "
- logrus.Debugf(title+"path %s", path)
+func UnprepareLayer(path string) (err error) {
+ title := "hcsshim::UnprepareLayer"
+ fields := logrus.Fields{
+ "path": path,
+ }
+ logrus.WithFields(fields).Debug(title)
+ defer func() {
+ if err != nil {
+ fields[logrus.ErrorKey] = err
+ logrus.WithFields(fields).Error(err)
+ } else {
+ logrus.WithFields(fields).Debug(title + " - succeeded")
+ }
+ }()
- err := unprepareLayer(&stdDriverInfo, path)
+ err = unprepareLayer(&stdDriverInfo, path)
if err != nil {
- err = hcserror.Errorf(err, title, "path=%s", path)
- logrus.Error(err)
- return err
+ return hcserror.New(err, title+" - failed", "")
}
-
- logrus.Debugf(title+"succeeded path=%s", path)
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go
index 2105703dc..d853ab259 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go
@@ -6,7 +6,6 @@ import (
"syscall"
"unsafe"
- "github.com/Microsoft/hcsshim/internal/interop"
"golang.org/x/sys/windows"
)
@@ -75,7 +74,10 @@ func _activateLayer(info *driverInfo, id *uint16) (hr error) {
}
r0, _, _ := syscall.Syscall(procActivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -104,7 +106,10 @@ func _copyLayer(info *driverInfo, srcId *uint16, dstId *uint16, descriptors []WC
}
r0, _, _ := syscall.Syscall6(procCopyLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -129,7 +134,10 @@ func _createLayer(info *driverInfo, id *uint16, parent *uint16) (hr error) {
}
r0, _, _ := syscall.Syscall(procCreateLayer.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent)))
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -153,7 +161,10 @@ func _createSandboxLayer(info *driverInfo, id *uint16, parent uintptr, descripto
}
r0, _, _ := syscall.Syscall6(procCreateSandboxLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -173,7 +184,10 @@ func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) {
}
r0, _, _ := syscall.Syscall(procExpandSandboxSize.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size))
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -193,7 +207,10 @@ func _deactivateLayer(info *driverInfo, id *uint16) (hr error) {
}
r0, _, _ := syscall.Syscall(procDeactivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -213,7 +230,10 @@ func _destroyLayer(info *driverInfo, id *uint16) (hr error) {
}
r0, _, _ := syscall.Syscall(procDestroyLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -242,7 +262,10 @@ func _exportLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_L
}
r0, _, _ := syscall.Syscall6(procExportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -262,7 +285,10 @@ func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *u
}
r0, _, _ := syscall.Syscall6(procGetLayerMountPath.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)), 0, 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -273,7 +299,10 @@ func getBaseImages(buffer **uint16) (hr error) {
}
r0, _, _ := syscall.Syscall(procGetBaseImages.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -302,7 +331,10 @@ func _importLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_L
}
r0, _, _ := syscall.Syscall6(procImportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -322,7 +354,10 @@ func _layerExists(info *driverInfo, id *uint16, exists *uint32) (hr error) {
}
r0, _, _ := syscall.Syscall(procLayerExists.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists)))
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -342,7 +377,10 @@ func _nameToGuid(name *uint16, guid *_guid) (hr error) {
}
r0, _, _ := syscall.Syscall(procNameToGuid.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)), 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -366,7 +404,10 @@ func _prepareLayer(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPT
}
r0, _, _ := syscall.Syscall6(procPrepareLayer.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0, 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -386,7 +427,10 @@ func _unprepareLayer(info *driverInfo, id *uint16) (hr error) {
}
r0, _, _ := syscall.Syscall(procUnprepareLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -406,7 +450,10 @@ func _processBaseImage(path *uint16) (hr error) {
}
r0, _, _ := syscall.Syscall(procProcessBaseImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -426,7 +473,10 @@ func _processUtilityImage(path *uint16) (hr error) {
}
r0, _, _ := syscall.Syscall(procProcessUtilityImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
@@ -451,7 +501,10 @@ func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) {
}
r0, _, _ := syscall.Syscall(procGrantVmAccess.Addr(), 2, uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath)), 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
diff --git a/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go
new file mode 100644
index 000000000..7647734de
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go
@@ -0,0 +1,943 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+mksyscall_windows generates windows system call bodies
+
+It parses all files specified on command line containing function
+prototypes (like syscall_windows.go) and prints system call bodies
+to standard output.
+
+The prototypes are marked by lines beginning with "//sys" and read
+like func declarations if //sys is replaced by func, but:
+
+* The parameter lists must give a name for each argument. This
+ includes return parameters.
+
+* The parameter lists must give a type for each argument:
+ the (x, y, z int) shorthand is not allowed.
+
+* If the return parameter is an error number, it must be named err.
+
+* If go func name needs to be different from it's winapi dll name,
+ the winapi name could be specified at the end, after "=" sign, like
+ //sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA
+
+* Each function that returns err needs to supply a condition, that
+ return value of winapi will be tested against to detect failure.
+ This would set err to windows "last-error", otherwise it will be nil.
+ The value can be provided at end of //sys declaration, like
+ //sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA
+ and is [failretval==0] by default.
+
+Usage:
+ mksyscall_windows [flags] [path ...]
+
+The flags are:
+ -output
+ Specify output file name (outputs to console if blank).
+ -trace
+ Generate print statement after every syscall.
+*/
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "text/template"
+)
+
+var (
+ filename = flag.String("output", "", "output file name (standard output if omitted)")
+ printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall")
+ systemDLL = flag.Bool("systemdll", true, "whether all DLLs should be loaded from the Windows system directory")
+ winio = flag.Bool("winio", false, "import go-winio")
+)
+
+func trim(s string) string {
+ return strings.Trim(s, " \t")
+}
+
+var packageName string
+
+func packagename() string {
+ return packageName
+}
+
+func syscalldot() string {
+ if packageName == "syscall" {
+ return ""
+ }
+ return "syscall."
+}
+
+// Param is function parameter
+type Param struct {
+ Name string
+ Type string
+ fn *Fn
+ tmpVarIdx int
+}
+
+// tmpVar returns temp variable name that will be used to represent p during syscall.
+func (p *Param) tmpVar() string {
+ if p.tmpVarIdx < 0 {
+ p.tmpVarIdx = p.fn.curTmpVarIdx
+ p.fn.curTmpVarIdx++
+ }
+ return fmt.Sprintf("_p%d", p.tmpVarIdx)
+}
+
+// BoolTmpVarCode returns source code for bool temp variable.
+func (p *Param) BoolTmpVarCode() string {
+ const code = `var %s uint32
+ if %s {
+ %s = 1
+ } else {
+ %s = 0
+ }`
+ tmp := p.tmpVar()
+ return fmt.Sprintf(code, tmp, p.Name, tmp, tmp)
+}
+
+// SliceTmpVarCode returns source code for slice temp variable.
+func (p *Param) SliceTmpVarCode() string {
+ const code = `var %s *%s
+ if len(%s) > 0 {
+ %s = &%s[0]
+ }`
+ tmp := p.tmpVar()
+ return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name)
+}
+
+// StringTmpVarCode returns source code for string temp variable.
+func (p *Param) StringTmpVarCode() string {
+ errvar := p.fn.Rets.ErrorVarName()
+ if errvar == "" {
+ errvar = "_"
+ }
+ tmp := p.tmpVar()
+ const code = `var %s %s
+ %s, %s = %s(%s)`
+ s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name)
+ if errvar == "-" {
+ return s
+ }
+ const morecode = `
+ if %s != nil {
+ return
+ }`
+ return s + fmt.Sprintf(morecode, errvar)
+}
+
+// TmpVarCode returns source code for temp variable.
+func (p *Param) TmpVarCode() string {
+ switch {
+ case p.Type == "bool":
+ return p.BoolTmpVarCode()
+ case strings.HasPrefix(p.Type, "[]"):
+ return p.SliceTmpVarCode()
+ default:
+ return ""
+ }
+}
+
+// TmpVarHelperCode returns source code for helper's temp variable.
+func (p *Param) TmpVarHelperCode() string {
+ if p.Type != "string" {
+ return ""
+ }
+ return p.StringTmpVarCode()
+}
+
+// SyscallArgList returns source code fragments representing p parameter
+// in syscall. Slices are translated into 2 syscall parameters: pointer to
+// the first element and length.
+func (p *Param) SyscallArgList() []string {
+ t := p.HelperType()
+ var s string
+ switch {
+ case t[0] == '*':
+ s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name)
+ case t == "bool":
+ s = p.tmpVar()
+ case strings.HasPrefix(t, "[]"):
+ return []string{
+ fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()),
+ fmt.Sprintf("uintptr(len(%s))", p.Name),
+ }
+ default:
+ s = p.Name
+ }
+ return []string{fmt.Sprintf("uintptr(%s)", s)}
+}
+
+// IsError determines if p parameter is used to return error.
+func (p *Param) IsError() bool {
+ return p.Name == "err" && p.Type == "error"
+}
+
+// HelperType returns type of parameter p used in helper function.
+func (p *Param) HelperType() string {
+ if p.Type == "string" {
+ return p.fn.StrconvType()
+ }
+ return p.Type
+}
+
+// join concatenates parameters ps into a string with sep separator.
+// Each parameter is converted into string by applying fn to it
+// before conversion.
+func join(ps []*Param, fn func(*Param) string, sep string) string {
+ if len(ps) == 0 {
+ return ""
+ }
+ a := make([]string, 0)
+ for _, p := range ps {
+ a = append(a, fn(p))
+ }
+ return strings.Join(a, sep)
+}
+
+// Rets describes function return parameters.
+type Rets struct {
+ Name string
+ Type string
+ ReturnsError bool
+ FailCond string
+}
+
+// ErrorVarName returns error variable name for r.
+func (r *Rets) ErrorVarName() string {
+ if r.ReturnsError {
+ return "err"
+ }
+ if r.Type == "error" {
+ return r.Name
+ }
+ return ""
+}
+
+// ToParams converts r into slice of *Param.
+func (r *Rets) ToParams() []*Param {
+ ps := make([]*Param, 0)
+ if len(r.Name) > 0 {
+ ps = append(ps, &Param{Name: r.Name, Type: r.Type})
+ }
+ if r.ReturnsError {
+ ps = append(ps, &Param{Name: "err", Type: "error"})
+ }
+ return ps
+}
+
+// List returns source code of syscall return parameters.
+func (r *Rets) List() string {
+ s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ")
+ if len(s) > 0 {
+ s = "(" + s + ")"
+ }
+ return s
+}
+
+// PrintList returns source code of trace printing part correspondent
+// to syscall return values.
+func (r *Rets) PrintList() string {
+ return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
+}
+
+// SetReturnValuesCode returns source code that accepts syscall return values.
+func (r *Rets) SetReturnValuesCode() string {
+ if r.Name == "" && !r.ReturnsError {
+ return ""
+ }
+ retvar := "r0"
+ if r.Name == "" {
+ retvar = "r1"
+ }
+ errvar := "_"
+ if r.ReturnsError {
+ errvar = "e1"
+ }
+ return fmt.Sprintf("%s, _, %s := ", retvar, errvar)
+}
+
+func (r *Rets) useLongHandleErrorCode(retvar string) string {
+ const code = `if %s {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = %sEINVAL
+ }
+ }`
+ cond := retvar + " == 0"
+ if r.FailCond != "" {
+ cond = strings.Replace(r.FailCond, "failretval", retvar, 1)
+ }
+ return fmt.Sprintf(code, cond, syscalldot())
+}
+
+// SetErrorCode returns source code that sets return parameters.
+func (r *Rets) SetErrorCode() string {
+ const code = `if r0 != 0 {
+ %s = %sErrno(r0)
+ }`
+ const hrCode = `if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ %s = %sErrno(r0)
+ }`
+ if r.Name == "" && !r.ReturnsError {
+ return ""
+ }
+ if r.Name == "" {
+ return r.useLongHandleErrorCode("r1")
+ }
+ if r.Type == "error" {
+ if r.Name == "hr" {
+ return fmt.Sprintf(hrCode, r.Name, syscalldot())
+ } else {
+ return fmt.Sprintf(code, r.Name, syscalldot())
+ }
+ }
+ s := ""
+ switch {
+ case r.Type[0] == '*':
+ s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type)
+ case r.Type == "bool":
+ s = fmt.Sprintf("%s = r0 != 0", r.Name)
+ default:
+ s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type)
+ }
+ if !r.ReturnsError {
+ return s
+ }
+ return s + "\n\t" + r.useLongHandleErrorCode(r.Name)
+}
+
+// Fn describes syscall function.
+type Fn struct {
+ Name string
+ Params []*Param
+ Rets *Rets
+ PrintTrace bool
+ confirmproc bool
+ dllname string
+ dllfuncname string
+ src string
+ // TODO: get rid of this field and just use parameter index instead
+ curTmpVarIdx int // insure tmp variables have uniq names
+}
+
+// extractParams parses s to extract function parameters.
+func extractParams(s string, f *Fn) ([]*Param, error) {
+ s = trim(s)
+ if s == "" {
+ return nil, nil
+ }
+ a := strings.Split(s, ",")
+ ps := make([]*Param, len(a))
+ for i := range ps {
+ s2 := trim(a[i])
+ b := strings.Split(s2, " ")
+ if len(b) != 2 {
+ b = strings.Split(s2, "\t")
+ if len(b) != 2 {
+ return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"")
+ }
+ }
+ ps[i] = &Param{
+ Name: trim(b[0]),
+ Type: trim(b[1]),
+ fn: f,
+ tmpVarIdx: -1,
+ }
+ }
+ return ps, nil
+}
+
+// extractSection extracts text out of string s starting after start
+// and ending just before end. found return value will indicate success,
+// and prefix, body and suffix will contain correspondent parts of string s.
+func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) {
+ s = trim(s)
+ if strings.HasPrefix(s, string(start)) {
+ // no prefix
+ body = s[1:]
+ } else {
+ a := strings.SplitN(s, string(start), 2)
+ if len(a) != 2 {
+ return "", "", s, false
+ }
+ prefix = a[0]
+ body = a[1]
+ }
+ a := strings.SplitN(body, string(end), 2)
+ if len(a) != 2 {
+ return "", "", "", false
+ }
+ return prefix, a[0], a[1], true
+}
+
+// newFn parses string s and return created function Fn.
+func newFn(s string) (*Fn, error) {
+ s = trim(s)
+ f := &Fn{
+ Rets: &Rets{},
+ src: s,
+ PrintTrace: *printTraceFlag,
+ }
+ // function name and args
+ prefix, body, s, found := extractSection(s, '(', ')')
+ if !found || prefix == "" {
+ return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"")
+ }
+ f.Name = prefix
+ var err error
+ f.Params, err = extractParams(body, f)
+ if err != nil {
+ return nil, err
+ }
+ // return values
+ _, body, s, found = extractSection(s, '(', ')')
+ if found {
+ r, err := extractParams(body, f)
+ if err != nil {
+ return nil, err
+ }
+ switch len(r) {
+ case 0:
+ case 1:
+ if r[0].IsError() {
+ f.Rets.ReturnsError = true
+ } else {
+ f.Rets.Name = r[0].Name
+ f.Rets.Type = r[0].Type
+ }
+ case 2:
+ if !r[1].IsError() {
+ return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"")
+ }
+ f.Rets.ReturnsError = true
+ f.Rets.Name = r[0].Name
+ f.Rets.Type = r[0].Type
+ default:
+ return nil, errors.New("Too many return values in \"" + f.src + "\"")
+ }
+ }
+ // fail condition
+ _, body, s, found = extractSection(s, '[', ']')
+ if found {
+ f.Rets.FailCond = body
+ }
+ // dll and dll function names
+ s = trim(s)
+ if s == "" {
+ return f, nil
+ }
+ if !strings.HasPrefix(s, "=") {
+ return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
+ }
+ s = trim(s[1:])
+ a := strings.Split(s, ".")
+ switch len(a) {
+ case 1:
+ f.dllfuncname = a[0]
+ case 2:
+ f.dllname = a[0]
+ f.dllfuncname = a[1]
+ default:
+ return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
+ }
+ if f.dllfuncname[len(f.dllfuncname)-1] == '?' {
+ f.confirmproc = true
+ f.dllfuncname = f.dllfuncname[0 : len(f.dllfuncname)-1]
+ }
+ return f, nil
+}
+
+// DLLName returns DLL name for function f.
+func (f *Fn) DLLName() string {
+ if f.dllname == "" {
+ return "kernel32"
+ }
+ return f.dllname
+}
+
+// DLLName returns DLL function name for function f.
+func (f *Fn) DLLFuncName() string {
+ if f.dllfuncname == "" {
+ return f.Name
+ }
+ return f.dllfuncname
+}
+
+func (f *Fn) ConfirmProc() bool {
+ return f.confirmproc
+}
+
+// ParamList returns source code for function f parameters.
+func (f *Fn) ParamList() string {
+ return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ")
+}
+
+// HelperParamList returns source code for helper function f parameters.
+func (f *Fn) HelperParamList() string {
+ return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ")
+}
+
+// ParamPrintList returns source code of trace printing part correspondent
+// to syscall input parameters.
+func (f *Fn) ParamPrintList() string {
+ return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
+}
+
+// ParamCount return number of syscall parameters for function f.
+func (f *Fn) ParamCount() int {
+ n := 0
+ for _, p := range f.Params {
+ n += len(p.SyscallArgList())
+ }
+ return n
+}
+
+// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/...
+// to use. It returns parameter count for correspondent SyscallX function.
+func (f *Fn) SyscallParamCount() int {
+ n := f.ParamCount()
+ switch {
+ case n <= 3:
+ return 3
+ case n <= 6:
+ return 6
+ case n <= 9:
+ return 9
+ case n <= 12:
+ return 12
+ case n <= 15:
+ return 15
+ default:
+ panic("too many arguments to system call")
+ }
+}
+
+// Syscall determines which SyscallX function to use for function f.
+func (f *Fn) Syscall() string {
+ c := f.SyscallParamCount()
+ if c == 3 {
+ return syscalldot() + "Syscall"
+ }
+ return syscalldot() + "Syscall" + strconv.Itoa(c)
+}
+
+// SyscallParamList returns source code for SyscallX parameters for function f.
+func (f *Fn) SyscallParamList() string {
+ a := make([]string, 0)
+ for _, p := range f.Params {
+ a = append(a, p.SyscallArgList()...)
+ }
+ for len(a) < f.SyscallParamCount() {
+ a = append(a, "0")
+ }
+ return strings.Join(a, ", ")
+}
+
+// HelperCallParamList returns source code of call into function f helper.
+func (f *Fn) HelperCallParamList() string {
+ a := make([]string, 0, len(f.Params))
+ for _, p := range f.Params {
+ s := p.Name
+ if p.Type == "string" {
+ s = p.tmpVar()
+ }
+ a = append(a, s)
+ }
+ return strings.Join(a, ", ")
+}
+
+// IsUTF16 is true, if f is W (utf16) function. It is false
+// for all A (ascii) functions.
+func (_ *Fn) IsUTF16() bool {
+ return true
+}
+
+// StrconvFunc returns name of Go string to OS string function for f.
+func (f *Fn) StrconvFunc() string {
+ if f.IsUTF16() {
+ return syscalldot() + "UTF16PtrFromString"
+ }
+ return syscalldot() + "BytePtrFromString"
+}
+
+// StrconvType returns Go type name used for OS string for f.
+func (f *Fn) StrconvType() string {
+ if f.IsUTF16() {
+ return "*uint16"
+ }
+ return "*byte"
+}
+
+// HasStringParam is true, if f has at least one string parameter.
+// Otherwise it is false.
+func (f *Fn) HasStringParam() bool {
+ for _, p := range f.Params {
+ if p.Type == "string" {
+ return true
+ }
+ }
+ return false
+}
+
+var uniqDllFuncName = make(map[string]bool)
+
+// IsNotDuplicate is true if f is not a duplicated function
+func (f *Fn) IsNotDuplicate() bool {
+ funcName := f.DLLFuncName()
+ if uniqDllFuncName[funcName] == false {
+ uniqDllFuncName[funcName] = true
+ return true
+ }
+ return false
+}
+
+// HelperName returns name of function f helper.
+func (f *Fn) HelperName() string {
+ if !f.HasStringParam() {
+ return f.Name
+ }
+ return "_" + f.Name
+}
+
+// Source files and functions.
+type Source struct {
+ Funcs []*Fn
+ Files []string
+ StdLibImports []string
+ ExternalImports []string
+}
+
+func (src *Source) Import(pkg string) {
+ src.StdLibImports = append(src.StdLibImports, pkg)
+ sort.Strings(src.StdLibImports)
+}
+
+func (src *Source) ExternalImport(pkg string) {
+ src.ExternalImports = append(src.ExternalImports, pkg)
+ sort.Strings(src.ExternalImports)
+}
+
+// ParseFiles parses files listed in fs and extracts all syscall
+// functions listed in sys comments. It returns source files
+// and functions collection *Source if successful.
+func ParseFiles(fs []string) (*Source, error) {
+ src := &Source{
+ Funcs: make([]*Fn, 0),
+ Files: make([]string, 0),
+ StdLibImports: []string{
+ "unsafe",
+ },
+ ExternalImports: make([]string, 0),
+ }
+ for _, file := range fs {
+ if err := src.ParseFile(file); err != nil {
+ return nil, err
+ }
+ }
+ return src, nil
+}
+
+// DLLs return dll names for a source set src.
+func (src *Source) DLLs() []string {
+ uniq := make(map[string]bool)
+ r := make([]string, 0)
+ for _, f := range src.Funcs {
+ name := f.DLLName()
+ if _, found := uniq[name]; !found {
+ uniq[name] = true
+ r = append(r, name)
+ }
+ }
+ return r
+}
+
+// ParseFile adds additional file path to a source set src.
+func (src *Source) ParseFile(path string) error {
+ file, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ s := bufio.NewScanner(file)
+ for s.Scan() {
+ t := trim(s.Text())
+ if len(t) < 7 {
+ continue
+ }
+ if !strings.HasPrefix(t, "//sys") {
+ continue
+ }
+ t = t[5:]
+ if !(t[0] == ' ' || t[0] == '\t') {
+ continue
+ }
+ f, err := newFn(t[1:])
+ if err != nil {
+ return err
+ }
+ src.Funcs = append(src.Funcs, f)
+ }
+ if err := s.Err(); err != nil {
+ return err
+ }
+ src.Files = append(src.Files, path)
+
+ // get package name
+ fset := token.NewFileSet()
+ _, err = file.Seek(0, 0)
+ if err != nil {
+ return err
+ }
+ pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly)
+ if err != nil {
+ return err
+ }
+ packageName = pkg.Name.Name
+
+ return nil
+}
+
+// IsStdRepo returns true if src is part of standard library.
+func (src *Source) IsStdRepo() (bool, error) {
+ if len(src.Files) == 0 {
+ return false, errors.New("no input files provided")
+ }
+ abspath, err := filepath.Abs(src.Files[0])
+ if err != nil {
+ return false, err
+ }
+ goroot := runtime.GOROOT()
+ if runtime.GOOS == "windows" {
+ abspath = strings.ToLower(abspath)
+ goroot = strings.ToLower(goroot)
+ }
+ sep := string(os.PathSeparator)
+ if !strings.HasSuffix(goroot, sep) {
+ goroot += sep
+ }
+ return strings.HasPrefix(abspath, goroot), nil
+}
+
+// Generate output source file from a source set src.
+func (src *Source) Generate(w io.Writer) error {
+ const (
+ pkgStd = iota // any package in std library
+ pkgXSysWindows // x/sys/windows package
+ pkgOther
+ )
+ isStdRepo, err := src.IsStdRepo()
+ if err != nil {
+ return err
+ }
+ var pkgtype int
+ switch {
+ case isStdRepo:
+ pkgtype = pkgStd
+ case packageName == "windows":
+ // TODO: this needs better logic than just using package name
+ pkgtype = pkgXSysWindows
+ default:
+ pkgtype = pkgOther
+ }
+ if *systemDLL {
+ switch pkgtype {
+ case pkgStd:
+ src.Import("internal/syscall/windows/sysdll")
+ case pkgXSysWindows:
+ default:
+ src.ExternalImport("golang.org/x/sys/windows")
+ }
+ }
+ if *winio {
+ src.ExternalImport("github.com/Microsoft/go-winio")
+ }
+ if packageName != "syscall" {
+ src.Import("syscall")
+ }
+ funcMap := template.FuncMap{
+ "packagename": packagename,
+ "syscalldot": syscalldot,
+ "newlazydll": func(dll string) string {
+ arg := "\"" + dll + ".dll\""
+ if !*systemDLL {
+ return syscalldot() + "NewLazyDLL(" + arg + ")"
+ }
+ if strings.HasPrefix(dll, "api_") || strings.HasPrefix(dll, "ext_") {
+ arg = strings.Replace(arg, "_", "-", -1)
+ }
+ switch pkgtype {
+ case pkgStd:
+ return syscalldot() + "NewLazyDLL(sysdll.Add(" + arg + "))"
+ case pkgXSysWindows:
+ return "NewLazySystemDLL(" + arg + ")"
+ default:
+ return "windows.NewLazySystemDLL(" + arg + ")"
+ }
+ },
+ }
+ t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate))
+ err = t.Execute(w, src)
+ if err != nil {
+ return errors.New("Failed to execute template: " + err.Error())
+ }
+ return nil
+}
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: mksyscall_windows [flags] [path ...]\n")
+ flag.PrintDefaults()
+ os.Exit(1)
+}
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+ if len(flag.Args()) <= 0 {
+ fmt.Fprintf(os.Stderr, "no files to parse provided\n")
+ usage()
+ }
+
+ src, err := ParseFiles(flag.Args())
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ var buf bytes.Buffer
+ if err := src.Generate(&buf); err != nil {
+ log.Fatal(err)
+ }
+
+ data, err := format.Source(buf.Bytes())
+ if err != nil {
+ log.Fatal(err)
+ }
+ if *filename == "" {
+ _, err = os.Stdout.Write(data)
+ } else {
+ err = ioutil.WriteFile(*filename, data, 0644)
+ }
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+// TODO: use println instead to print in the following template
+const srcTemplate = `
+
+{{define "main"}}// Code generated mksyscall_windows.exe DO NOT EDIT
+
+package {{packagename}}
+
+import (
+{{range .StdLibImports}}"{{.}}"
+{{end}}
+
+{{range .ExternalImports}}"{{.}}"
+{{end}}
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+ errnoERROR_IO_PENDING = 997
+)
+
+var (
+ errERROR_IO_PENDING error = {{syscalldot}}Errno(errnoERROR_IO_PENDING)
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e {{syscalldot}}Errno) error {
+ switch e {
+ case 0:
+ return nil
+ case errnoERROR_IO_PENDING:
+ return errERROR_IO_PENDING
+ }
+ // TODO: add more here, after collecting data on the common
+ // error values see on Windows. (perhaps when running
+ // all.bat?)
+ return e
+}
+
+var (
+{{template "dlls" .}}
+{{template "funcnames" .}})
+{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}}
+{{end}}
+
+{{/* help functions */}}
+
+{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}}
+{{end}}{{end}}
+
+{{define "funcnames"}}{{range .Funcs}}{{if .IsNotDuplicate}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}"){{end}}
+{{end}}{{end}}
+
+{{define "helperbody"}}
+func {{.Name}}({{.ParamList}}) {{template "results" .}}{
+{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}})
+}
+{{end}}
+
+{{define "funcbody"}}
+func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{
+{{template "tmpvars" .}} {{template "syscallcheck" .}}{{template "syscall" .}}
+{{template "seterror" .}}{{template "printtrace" .}} return
+}
+{{end}}
+
+{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}}
+{{end}}{{end}}{{end}}
+
+{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}}
+{{end}}{{end}}{{end}}
+
+{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}}
+
+{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}}
+
+{{define "syscallcheck"}}{{if .ConfirmProc}}if {{.Rets.ErrorVarName}} = proc{{.DLLFuncName}}.Find(); {{.Rets.ErrorVarName}} != nil {
+ return
+}
+{{end}}{{end}}
+
+
+{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}}
+{{end}}{{end}}
+
+{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n")
+{{end}}{{end}}
+
+`
diff --git a/vendor/github.com/Microsoft/hcsshim/vendor.conf b/vendor/github.com/Microsoft/hcsshim/vendor.conf
new file mode 100644
index 000000000..6e0ed1566
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/vendor.conf
@@ -0,0 +1,21 @@
+github.com/blang/semver v3.1.0
+github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
+github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
+github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55
+github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
+github.com/konsorten/go-windows-terminal-sequences v1.0.1
+github.com/linuxkit/virtsock 8e79449dea0735c1c056d814934dd035734cc97c
+github.com/Microsoft/go-winio 16cfc975803886a5e47c4257a24c8d8c52e178b2
+github.com/Microsoft/opengcs v0.3.9
+github.com/opencontainers/runtime-spec eba862dc2470385a233c7507392675cbeadf7353
+github.com/opencontainers/runtime-tools 1d69bd0f9c39677d0630e50664fbc3154ae61b88
+github.com/pkg/errors v0.8.1
+github.com/sirupsen/logrus v1.3.0
+github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
+github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
+github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
+github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
+github.com/xeipuuv/gojsonschema 1d523034197ff1f222f6429836dd36a2457a1874
+golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908
+golang.org/x/sync 37e7f081c4d4c64e13b10787722085407fe5d15f
+golang.org/x/sys e5ecc2a6747ce8d4af18ed98b3de5ae30eb3a5bb \ No newline at end of file
diff --git a/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go
index d2dff9cb0..8bed84857 100644
--- a/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go
+++ b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go
@@ -6,7 +6,6 @@ import (
"syscall"
"unsafe"
- "github.com/Microsoft/hcsshim/internal/interop"
"golang.org/x/sys/windows"
)
@@ -46,7 +45,10 @@ var (
func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) {
r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0)
if int32(r0) < 0 {
- hr = interop.Win32FromHresult(r0)
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
}
return
}
diff --git a/vendor/github.com/VividCortex/ewma/.gitignore b/vendor/github.com/VividCortex/ewma/.gitignore
new file mode 100644
index 000000000..6c7104aef
--- /dev/null
+++ b/vendor/github.com/VividCortex/ewma/.gitignore
@@ -0,0 +1,2 @@
+.DS_Store
+.*.sw?
diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE b/vendor/github.com/beorn7/perks/LICENSE
index 8edd8175a..339177be6 100644
--- a/vendor/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE
+++ b/vendor/github.com/beorn7/perks/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2014 Amit Kumar Gupta
+Copyright (C) 2013 Blake Mizerany
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
new file mode 100644
index 000000000..1602287d7
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
@@ -0,0 +1,2388 @@
+8
+5
+26
+12
+5
+235
+13
+6
+28
+30
+3
+3
+3
+3
+5
+2
+33
+7
+2
+4
+7
+12
+14
+5
+8
+3
+10
+4
+5
+3
+6
+6
+209
+20
+3
+10
+14
+3
+4
+6
+8
+5
+11
+7
+3
+2
+3
+3
+212
+5
+222
+4
+10
+10
+5
+6
+3
+8
+3
+10
+254
+220
+2
+3
+5
+24
+5
+4
+222
+7
+3
+3
+223
+8
+15
+12
+14
+14
+3
+2
+2
+3
+13
+3
+11
+4
+4
+6
+5
+7
+13
+5
+3
+5
+2
+5
+3
+5
+2
+7
+15
+17
+14
+3
+6
+6
+3
+17
+5
+4
+7
+6
+4
+4
+8
+6
+8
+3
+9
+3
+6
+3
+4
+5
+3
+3
+660
+4
+6
+10
+3
+6
+3
+2
+5
+13
+2
+4
+4
+10
+4
+8
+4
+3
+7
+9
+9
+3
+10
+37
+3
+13
+4
+12
+3
+6
+10
+8
+5
+21
+2
+3
+8
+3
+2
+3
+3
+4
+12
+2
+4
+8
+8
+4
+3
+2
+20
+1
+6
+32
+2
+11
+6
+18
+3
+8
+11
+3
+212
+3
+4
+2
+6
+7
+12
+11
+3
+2
+16
+10
+6
+4
+6
+3
+2
+7
+3
+2
+2
+2
+2
+5
+6
+4
+3
+10
+3
+4
+6
+5
+3
+4
+4
+5
+6
+4
+3
+4
+4
+5
+7
+5
+5
+3
+2
+7
+2
+4
+12
+4
+5
+6
+2
+4
+4
+8
+4
+15
+13
+7
+16
+5
+3
+23
+5
+5
+7
+3
+2
+9
+8
+7
+5
+8
+11
+4
+10
+76
+4
+47
+4
+3
+2
+7
+4
+2
+3
+37
+10
+4
+2
+20
+5
+4
+4
+10
+10
+4
+3
+7
+23
+240
+7
+13
+5
+5
+3
+3
+2
+5
+4
+2
+8
+7
+19
+2
+23
+8
+7
+2
+5
+3
+8
+3
+8
+13
+5
+5
+5
+2
+3
+23
+4
+9
+8
+4
+3
+3
+5
+220
+2
+3
+4
+6
+14
+3
+53
+6
+2
+5
+18
+6
+3
+219
+6
+5
+2
+5
+3
+6
+5
+15
+4
+3
+17
+3
+2
+4
+7
+2
+3
+3
+4
+4
+3
+2
+664
+6
+3
+23
+5
+5
+16
+5
+8
+2
+4
+2
+24
+12
+3
+2
+3
+5
+8
+3
+5
+4
+3
+14
+3
+5
+8
+2
+3
+7
+9
+4
+2
+3
+6
+8
+4
+3
+4
+6
+5
+3
+3
+6
+3
+19
+4
+4
+6
+3
+6
+3
+5
+22
+5
+4
+4
+3
+8
+11
+4
+9
+7
+6
+13
+4
+4
+4
+6
+17
+9
+3
+3
+3
+4
+3
+221
+5
+11
+3
+4
+2
+12
+6
+3
+5
+7
+5
+7
+4
+9
+7
+14
+37
+19
+217
+16
+3
+5
+2
+2
+7
+19
+7
+6
+7
+4
+24
+5
+11
+4
+7
+7
+9
+13
+3
+4
+3
+6
+28
+4
+4
+5
+5
+2
+5
+6
+4
+4
+6
+10
+5
+4
+3
+2
+3
+3
+6
+5
+5
+4
+3
+2
+3
+7
+4
+6
+18
+16
+8
+16
+4
+5
+8
+6
+9
+13
+1545
+6
+215
+6
+5
+6
+3
+45
+31
+5
+2
+2
+4
+3
+3
+2
+5
+4
+3
+5
+7
+7
+4
+5
+8
+5
+4
+749
+2
+31
+9
+11
+2
+11
+5
+4
+4
+7
+9
+11
+4
+5
+4
+7
+3
+4
+6
+2
+15
+3
+4
+3
+4
+3
+5
+2
+13
+5
+5
+3
+3
+23
+4
+4
+5
+7
+4
+13
+2
+4
+3
+4
+2
+6
+2
+7
+3
+5
+5
+3
+29
+5
+4
+4
+3
+10
+2
+3
+79
+16
+6
+6
+7
+7
+3
+5
+5
+7
+4
+3
+7
+9
+5
+6
+5
+9
+6
+3
+6
+4
+17
+2
+10
+9
+3
+6
+2
+3
+21
+22
+5
+11
+4
+2
+17
+2
+224
+2
+14
+3
+4
+4
+2
+4
+4
+4
+4
+5
+3
+4
+4
+10
+2
+6
+3
+3
+5
+7
+2
+7
+5
+6
+3
+218
+2
+2
+5
+2
+6
+3
+5
+222
+14
+6
+33
+3
+2
+5
+3
+3
+3
+9
+5
+3
+3
+2
+7
+4
+3
+4
+3
+5
+6
+5
+26
+4
+13
+9
+7
+3
+221
+3
+3
+4
+4
+4
+4
+2
+18
+5
+3
+7
+9
+6
+8
+3
+10
+3
+11
+9
+5
+4
+17
+5
+5
+6
+6
+3
+2
+4
+12
+17
+6
+7
+218
+4
+2
+4
+10
+3
+5
+15
+3
+9
+4
+3
+3
+6
+29
+3
+3
+4
+5
+5
+3
+8
+5
+6
+6
+7
+5
+3
+5
+3
+29
+2
+31
+5
+15
+24
+16
+5
+207
+4
+3
+3
+2
+15
+4
+4
+13
+5
+5
+4
+6
+10
+2
+7
+8
+4
+6
+20
+5
+3
+4
+3
+12
+12
+5
+17
+7
+3
+3
+3
+6
+10
+3
+5
+25
+80
+4
+9
+3
+2
+11
+3
+3
+2
+3
+8
+7
+5
+5
+19
+5
+3
+3
+12
+11
+2
+6
+5
+5
+5
+3
+3
+3
+4
+209
+14
+3
+2
+5
+19
+4
+4
+3
+4
+14
+5
+6
+4
+13
+9
+7
+4
+7
+10
+2
+9
+5
+7
+2
+8
+4
+6
+5
+5
+222
+8
+7
+12
+5
+216
+3
+4
+4
+6
+3
+14
+8
+7
+13
+4
+3
+3
+3
+3
+17
+5
+4
+3
+33
+6
+6
+33
+7
+5
+3
+8
+7
+5
+2
+9
+4
+2
+233
+24
+7
+4
+8
+10
+3
+4
+15
+2
+16
+3
+3
+13
+12
+7
+5
+4
+207
+4
+2
+4
+27
+15
+2
+5
+2
+25
+6
+5
+5
+6
+13
+6
+18
+6
+4
+12
+225
+10
+7
+5
+2
+2
+11
+4
+14
+21
+8
+10
+3
+5
+4
+232
+2
+5
+5
+3
+7
+17
+11
+6
+6
+23
+4
+6
+3
+5
+4
+2
+17
+3
+6
+5
+8
+3
+2
+2
+14
+9
+4
+4
+2
+5
+5
+3
+7
+6
+12
+6
+10
+3
+6
+2
+2
+19
+5
+4
+4
+9
+2
+4
+13
+3
+5
+6
+3
+6
+5
+4
+9
+6
+3
+5
+7
+3
+6
+6
+4
+3
+10
+6
+3
+221
+3
+5
+3
+6
+4
+8
+5
+3
+6
+4
+4
+2
+54
+5
+6
+11
+3
+3
+4
+4
+4
+3
+7
+3
+11
+11
+7
+10
+6
+13
+223
+213
+15
+231
+7
+3
+7
+228
+2
+3
+4
+4
+5
+6
+7
+4
+13
+3
+4
+5
+3
+6
+4
+6
+7
+2
+4
+3
+4
+3
+3
+6
+3
+7
+3
+5
+18
+5
+6
+8
+10
+3
+3
+3
+2
+4
+2
+4
+4
+5
+6
+6
+4
+10
+13
+3
+12
+5
+12
+16
+8
+4
+19
+11
+2
+4
+5
+6
+8
+5
+6
+4
+18
+10
+4
+2
+216
+6
+6
+6
+2
+4
+12
+8
+3
+11
+5
+6
+14
+5
+3
+13
+4
+5
+4
+5
+3
+28
+6
+3
+7
+219
+3
+9
+7
+3
+10
+6
+3
+4
+19
+5
+7
+11
+6
+15
+19
+4
+13
+11
+3
+7
+5
+10
+2
+8
+11
+2
+6
+4
+6
+24
+6
+3
+3
+3
+3
+6
+18
+4
+11
+4
+2
+5
+10
+8
+3
+9
+5
+3
+4
+5
+6
+2
+5
+7
+4
+4
+14
+6
+4
+4
+5
+5
+7
+2
+4
+3
+7
+3
+3
+6
+4
+5
+4
+4
+4
+3
+3
+3
+3
+8
+14
+2
+3
+5
+3
+2
+4
+5
+3
+7
+3
+3
+18
+3
+4
+4
+5
+7
+3
+3
+3
+13
+5
+4
+8
+211
+5
+5
+3
+5
+2
+5
+4
+2
+655
+6
+3
+5
+11
+2
+5
+3
+12
+9
+15
+11
+5
+12
+217
+2
+6
+17
+3
+3
+207
+5
+5
+4
+5
+9
+3
+2
+8
+5
+4
+3
+2
+5
+12
+4
+14
+5
+4
+2
+13
+5
+8
+4
+225
+4
+3
+4
+5
+4
+3
+3
+6
+23
+9
+2
+6
+7
+233
+4
+4
+6
+18
+3
+4
+6
+3
+4
+4
+2
+3
+7
+4
+13
+227
+4
+3
+5
+4
+2
+12
+9
+17
+3
+7
+14
+6
+4
+5
+21
+4
+8
+9
+2
+9
+25
+16
+3
+6
+4
+7
+8
+5
+2
+3
+5
+4
+3
+3
+5
+3
+3
+3
+2
+3
+19
+2
+4
+3
+4
+2
+3
+4
+4
+2
+4
+3
+3
+3
+2
+6
+3
+17
+5
+6
+4
+3
+13
+5
+3
+3
+3
+4
+9
+4
+2
+14
+12
+4
+5
+24
+4
+3
+37
+12
+11
+21
+3
+4
+3
+13
+4
+2
+3
+15
+4
+11
+4
+4
+3
+8
+3
+4
+4
+12
+8
+5
+3
+3
+4
+2
+220
+3
+5
+223
+3
+3
+3
+10
+3
+15
+4
+241
+9
+7
+3
+6
+6
+23
+4
+13
+7
+3
+4
+7
+4
+9
+3
+3
+4
+10
+5
+5
+1
+5
+24
+2
+4
+5
+5
+6
+14
+3
+8
+2
+3
+5
+13
+13
+3
+5
+2
+3
+15
+3
+4
+2
+10
+4
+4
+4
+5
+5
+3
+5
+3
+4
+7
+4
+27
+3
+6
+4
+15
+3
+5
+6
+6
+5
+4
+8
+3
+9
+2
+6
+3
+4
+3
+7
+4
+18
+3
+11
+3
+3
+8
+9
+7
+24
+3
+219
+7
+10
+4
+5
+9
+12
+2
+5
+4
+4
+4
+3
+3
+19
+5
+8
+16
+8
+6
+22
+3
+23
+3
+242
+9
+4
+3
+3
+5
+7
+3
+3
+5
+8
+3
+7
+5
+14
+8
+10
+3
+4
+3
+7
+4
+6
+7
+4
+10
+4
+3
+11
+3
+7
+10
+3
+13
+6
+8
+12
+10
+5
+7
+9
+3
+4
+7
+7
+10
+8
+30
+9
+19
+4
+3
+19
+15
+4
+13
+3
+215
+223
+4
+7
+4
+8
+17
+16
+3
+7
+6
+5
+5
+4
+12
+3
+7
+4
+4
+13
+4
+5
+2
+5
+6
+5
+6
+6
+7
+10
+18
+23
+9
+3
+3
+6
+5
+2
+4
+2
+7
+3
+3
+2
+5
+5
+14
+10
+224
+6
+3
+4
+3
+7
+5
+9
+3
+6
+4
+2
+5
+11
+4
+3
+3
+2
+8
+4
+7
+4
+10
+7
+3
+3
+18
+18
+17
+3
+3
+3
+4
+5
+3
+3
+4
+12
+7
+3
+11
+13
+5
+4
+7
+13
+5
+4
+11
+3
+12
+3
+6
+4
+4
+21
+4
+6
+9
+5
+3
+10
+8
+4
+6
+4
+4
+6
+5
+4
+8
+6
+4
+6
+4
+4
+5
+9
+6
+3
+4
+2
+9
+3
+18
+2
+4
+3
+13
+3
+6
+6
+8
+7
+9
+3
+2
+16
+3
+4
+6
+3
+2
+33
+22
+14
+4
+9
+12
+4
+5
+6
+3
+23
+9
+4
+3
+5
+5
+3
+4
+5
+3
+5
+3
+10
+4
+5
+5
+8
+4
+4
+6
+8
+5
+4
+3
+4
+6
+3
+3
+3
+5
+9
+12
+6
+5
+9
+3
+5
+3
+2
+2
+2
+18
+3
+2
+21
+2
+5
+4
+6
+4
+5
+10
+3
+9
+3
+2
+10
+7
+3
+6
+6
+4
+4
+8
+12
+7
+3
+7
+3
+3
+9
+3
+4
+5
+4
+4
+5
+5
+10
+15
+4
+4
+14
+6
+227
+3
+14
+5
+216
+22
+5
+4
+2
+2
+6
+3
+4
+2
+9
+9
+4
+3
+28
+13
+11
+4
+5
+3
+3
+2
+3
+3
+5
+3
+4
+3
+5
+23
+26
+3
+4
+5
+6
+4
+6
+3
+5
+5
+3
+4
+3
+2
+2
+2
+7
+14
+3
+6
+7
+17
+2
+2
+15
+14
+16
+4
+6
+7
+13
+6
+4
+5
+6
+16
+3
+3
+28
+3
+6
+15
+3
+9
+2
+4
+6
+3
+3
+22
+4
+12
+6
+7
+2
+5
+4
+10
+3
+16
+6
+9
+2
+5
+12
+7
+5
+5
+5
+5
+2
+11
+9
+17
+4
+3
+11
+7
+3
+5
+15
+4
+3
+4
+211
+8
+7
+5
+4
+7
+6
+7
+6
+3
+6
+5
+6
+5
+3
+4
+4
+26
+4
+6
+10
+4
+4
+3
+2
+3
+3
+4
+5
+9
+3
+9
+4
+4
+5
+5
+8
+2
+4
+2
+3
+8
+4
+11
+19
+5
+8
+6
+3
+5
+6
+12
+3
+2
+4
+16
+12
+3
+4
+4
+8
+6
+5
+6
+6
+219
+8
+222
+6
+16
+3
+13
+19
+5
+4
+3
+11
+6
+10
+4
+7
+7
+12
+5
+3
+3
+5
+6
+10
+3
+8
+2
+5
+4
+7
+2
+4
+4
+2
+12
+9
+6
+4
+2
+40
+2
+4
+10
+4
+223
+4
+2
+20
+6
+7
+24
+5
+4
+5
+2
+20
+16
+6
+5
+13
+2
+3
+3
+19
+3
+2
+4
+5
+6
+7
+11
+12
+5
+6
+7
+7
+3
+5
+3
+5
+3
+14
+3
+4
+4
+2
+11
+1
+7
+3
+9
+6
+11
+12
+5
+8
+6
+221
+4
+2
+12
+4
+3
+15
+4
+5
+226
+7
+218
+7
+5
+4
+5
+18
+4
+5
+9
+4
+4
+2
+9
+18
+18
+9
+5
+6
+6
+3
+3
+7
+3
+5
+4
+4
+4
+12
+3
+6
+31
+5
+4
+7
+3
+6
+5
+6
+5
+11
+2
+2
+11
+11
+6
+7
+5
+8
+7
+10
+5
+23
+7
+4
+3
+5
+34
+2
+5
+23
+7
+3
+6
+8
+4
+4
+4
+2
+5
+3
+8
+5
+4
+8
+25
+2
+3
+17
+8
+3
+4
+8
+7
+3
+15
+6
+5
+7
+21
+9
+5
+6
+6
+5
+3
+2
+3
+10
+3
+6
+3
+14
+7
+4
+4
+8
+7
+8
+2
+6
+12
+4
+213
+6
+5
+21
+8
+2
+5
+23
+3
+11
+2
+3
+6
+25
+2
+3
+6
+7
+6
+6
+4
+4
+6
+3
+17
+9
+7
+6
+4
+3
+10
+7
+2
+3
+3
+3
+11
+8
+3
+7
+6
+4
+14
+36
+3
+4
+3
+3
+22
+13
+21
+4
+2
+7
+4
+4
+17
+15
+3
+7
+11
+2
+4
+7
+6
+209
+6
+3
+2
+2
+24
+4
+9
+4
+3
+3
+3
+29
+2
+2
+4
+3
+3
+5
+4
+6
+3
+3
+2
+4
diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go
new file mode 100644
index 000000000..d7d14f8eb
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/stream.go
@@ -0,0 +1,316 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+ "math"
+ "sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+ Value float64 `json:",string"`
+ Width float64 `json:",string"`
+ Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * r
+ }
+ return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * (s.n - r)
+ }
+ return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targetMap map[float64]float64) *Stream {
+ // Convert map to slice to avoid slow iterations on a map.
+ // ƒ is called on the hot path, so converting the map to a slice
+ // beforehand results in significant CPU savings.
+ targets := targetMapToSlice(targetMap)
+
+ ƒ := func(s *stream, r float64) float64 {
+ var m = math.MaxFloat64
+ var f float64
+ for _, t := range targets {
+ if t.quantile*s.n <= r {
+ f = (2 * t.epsilon * r) / t.quantile
+ } else {
+ f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
+ }
+ if f < m {
+ m = f
+ }
+ }
+ return m
+ }
+ return newStream(ƒ)
+}
+
+type target struct {
+ quantile float64
+ epsilon float64
+}
+
+func targetMapToSlice(targetMap map[float64]float64) []target {
+ targets := make([]target, 0, len(targetMap))
+
+ for quantile, epsilon := range targetMap {
+ t := target{
+ quantile: quantile,
+ epsilon: epsilon,
+ }
+ targets = append(targets, t)
+ }
+
+ return targets
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+ *stream
+ b Samples
+ sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+ x := &stream{ƒ: ƒ}
+ return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+ s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+ s.b = append(s.b, sample)
+ s.sorted = false
+ if len(s.b) == cap(s.b) {
+ s.flush()
+ }
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+ if !s.flushed() {
+ // Fast path when there hasn't been enough data for a flush;
+ // this also yields better accuracy for small sets of data.
+ l := len(s.b)
+ if l == 0 {
+ return 0
+ }
+ i := int(math.Ceil(float64(l) * q))
+ if i > 0 {
+ i -= 1
+ }
+ s.maybeSort()
+ return s.b[i].Value
+ }
+ s.flush()
+ return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+ sort.Sort(samples)
+ s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+ s.stream.reset()
+ s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+ if !s.flushed() {
+ return s.b
+ }
+ s.flush()
+ return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+ return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+ s.maybeSort()
+ s.stream.merge(s.b)
+ s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+ if !s.sorted {
+ s.sorted = true
+ sort.Sort(s.b)
+ }
+}
+
+func (s *Stream) flushed() bool {
+ return len(s.stream.l) > 0
+}
+
+type stream struct {
+ n float64
+ l []Sample
+ ƒ invariant
+}
+
+func (s *stream) reset() {
+ s.l = s.l[:0]
+ s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+ s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+ // TODO(beorn7): This tries to merge not only individual samples, but
+ // whole summaries. The paper doesn't mention merging summaries at
+ // all. Unittests show that the merging is inaccurate. Find out how to
+ // do merges properly.
+ var r float64
+ i := 0
+ for _, sample := range samples {
+ for ; i < len(s.l); i++ {
+ c := s.l[i]
+ if c.Value > sample.Value {
+ // Insert at position i.
+ s.l = append(s.l, Sample{})
+ copy(s.l[i+1:], s.l[i:])
+ s.l[i] = Sample{
+ sample.Value,
+ sample.Width,
+ math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+ // TODO(beorn7): How to calculate delta correctly?
+ }
+ i++
+ goto inserted
+ }
+ r += c.Width
+ }
+ s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+ i++
+ inserted:
+ s.n += sample.Width
+ r += sample.Width
+ }
+ s.compress()
+}
+
+func (s *stream) count() int {
+ return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+ t := math.Ceil(q * s.n)
+ t += math.Ceil(s.ƒ(s, t) / 2)
+ p := s.l[0]
+ var r float64
+ for _, c := range s.l[1:] {
+ r += p.Width
+ if r+c.Width+c.Delta > t {
+ return p.Value
+ }
+ p = c
+ }
+ return p.Value
+}
+
+func (s *stream) compress() {
+ if len(s.l) < 2 {
+ return
+ }
+ x := s.l[len(s.l)-1]
+ xi := len(s.l) - 1
+ r := s.n - 1 - x.Width
+
+ for i := len(s.l) - 2; i >= 0; i-- {
+ c := s.l[i]
+ if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+ x.Width += c.Width
+ s.l[xi] = x
+ // Remove element at i.
+ copy(s.l[i:], s.l[i+1:])
+ s.l = s.l[:len(s.l)-1]
+ xi -= 1
+ } else {
+ x = c
+ xi = i
+ }
+ r -= c.Width
+ }
+}
+
+func (s *stream) samples() Samples {
+ samples := make(Samples, len(s.l))
+ copy(samples, s.l)
+ return samples
+}
diff --git a/vendor/github.com/blang/semver/.travis.yml b/vendor/github.com/blang/semver/.travis.yml
new file mode 100644
index 000000000..102fb9a69
--- /dev/null
+++ b/vendor/github.com/blang/semver/.travis.yml
@@ -0,0 +1,21 @@
+language: go
+matrix:
+ include:
+ - go: 1.4.3
+ - go: 1.5.4
+ - go: 1.6.3
+ - go: 1.7
+ - go: tip
+ allow_failures:
+ - go: tip
+install:
+- go get golang.org/x/tools/cmd/cover
+- go get github.com/mattn/goveralls
+script:
+- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci
+ -repotoken $COVERALLS_TOKEN
+- echo "Build examples" ; cd examples && go build
+- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .)
+env:
+ global:
+ secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw=
diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md
index 4399639e2..08b2e4a3d 100644
--- a/vendor/github.com/blang/semver/README.md
+++ b/vendor/github.com/blang/semver/README.md
@@ -1,4 +1,4 @@
-semver for golang [![Build Status](https://drone.io/github.com/blang/semver/status.png)](https://drone.io/github.com/blang/semver/latest) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master)
+semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master)
======
semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
@@ -41,6 +41,7 @@ Features
- Compare Helper Methods
- InPlace manipulation
- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1`
+- Wildcards `>=1.x`, `<=2.5.x`
- Sortable (implements sort.Interface)
- database/sql compatible (sql.Scanner/Valuer)
- encoding/json compatible (json.Marshaler/Unmarshaler)
@@ -59,6 +60,8 @@ A condition is composed of an operator and a version. The supported operators ar
- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0`
- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`.
+Note that spaces between the operator and the version will be gracefully tolerated.
+
A `Range` can link multiple `Ranges` separated by space:
Ranges can be linked by logical AND:
diff --git a/vendor/github.com/blang/semver/package.json b/vendor/github.com/blang/semver/package.json
new file mode 100644
index 000000000..1cf8ebdd9
--- /dev/null
+++ b/vendor/github.com/blang/semver/package.json
@@ -0,0 +1,17 @@
+{
+ "author": "blang",
+ "bugs": {
+ "URL": "https://github.com/blang/semver/issues",
+ "url": "https://github.com/blang/semver/issues"
+ },
+ "gx": {
+ "dvcsimport": "github.com/blang/semver"
+ },
+ "gxVersion": "0.10.0",
+ "language": "go",
+ "license": "MIT",
+ "name": "semver",
+ "releaseCmd": "git commit -a -m \"gx publish $VERSION\"",
+ "version": "3.5.1"
+}
+
diff --git a/vendor/github.com/checkpoint-restore/go-criu/.gitignore b/vendor/github.com/checkpoint-restore/go-criu/.gitignore
new file mode 100644
index 000000000..f1c90e3d5
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/go-criu/.gitignore
@@ -0,0 +1,5 @@
+test/test
+test/piggie
+test/phaul
+image
+rpc/rpc.proto
diff --git a/vendor/github.com/checkpoint-restore/go-criu/.travis.yml b/vendor/github.com/checkpoint-restore/go-criu/.travis.yml
new file mode 100644
index 000000000..741dbf0a1
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/go-criu/.travis.yml
@@ -0,0 +1,25 @@
+language: go
+sudo: required
+os:
+ - linux
+go:
+ - "1.8"
+ - "1.9"
+ - "1.10"
+env:
+ # Run the tests with CRIU master and criu-dev
+ - CRIU_BRANCH="master"
+ - CRIU_BRANCH="criu-dev"
+install:
+ - sudo apt-get update
+ - sudo apt-get install -y libprotobuf-dev libprotobuf-c0-dev protobuf-c-compiler protobuf-compiler python-protobuf libnl-3-dev libnet-dev libcap-dev
+ - go get github.com/checkpoint-restore/go-criu
+ - git clone --single-branch -b ${CRIU_BRANCH} https://github.com/checkpoint-restore/criu.git
+ - cd criu; make
+ - sudo install -D -m 755 criu/criu /usr/sbin/
+ - cd ..
+script:
+ # This builds the code without running the tests.
+ - make build phaul test/test test/phaul test/piggie
+ # Run actual test as root as it uses CRIU.
+ - sudo make test phaul-test
diff --git a/vendor/github.com/checkpoint-restore/go-criu/Makefile b/vendor/github.com/checkpoint-restore/go-criu/Makefile
new file mode 100644
index 000000000..ee44ee448
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/go-criu/Makefile
@@ -0,0 +1,60 @@
+GO ?= go
+CC ?= gcc
+ifeq ($(GOPATH),)
+export GOPATH := $(shell $(GO) env GOPATH)
+endif
+FIRST_GOPATH := $(firstword $(subst :, ,$(GOPATH)))
+GOBIN := $(shell $(GO) env GOBIN)
+ifeq ($(GOBIN),)
+ GOBIN := $(FIRST_GOPATH)/bin
+endif
+
+all: build test phaul phaul-test
+
+lint:
+ @golint . test phaul
+build:
+ @$(GO) build -v
+
+test/piggie: test/piggie.c
+ @$(CC) $^ -o $@
+
+test/test: test/main.go
+ @$(GO) build -v -o test/test test/main.go
+
+test: test/test test/piggie
+ mkdir -p image
+ test/piggie
+ test/test dump `pidof piggie` image
+ test/test restore image
+ pkill -9 piggie || :
+
+phaul:
+ @cd phaul; go build -v
+
+test/phaul: test/phaul-main.go
+ @$(GO) build -v -o test/phaul test/phaul-main.go
+
+phaul-test: test/phaul test/piggie
+ rm -rf image
+ test/piggie
+ test/phaul `pidof piggie`
+ pkill -9 piggie || :
+
+clean:
+ @rm -f test/test test/piggie test/phaul
+ @rm -rf image
+ @rm -f rpc/rpc.proto
+
+install.tools:
+ if [ ! -x "$(GOBIN)/golint" ]; then \
+ $(GO) get -u golang.org/x/lint/golint; \
+ fi
+
+rpc/rpc.proto:
+ curl -s https://raw.githubusercontent.com/checkpoint-restore/criu/master/images/rpc.proto -o $@
+
+rpc/rpc.pb.go: rpc/rpc.proto
+ protoc --go_out=. $^
+
+.PHONY: build test clean lint phaul
diff --git a/vendor/github.com/checkpoint-restore/go-criu/README.md b/vendor/github.com/checkpoint-restore/go-criu/README.md
index 610756dbd..539627324 100644
--- a/vendor/github.com/checkpoint-restore/go-criu/README.md
+++ b/vendor/github.com/checkpoint-restore/go-criu/README.md
@@ -21,6 +21,14 @@ or to just check if at least a certain CRIU version is installed:
result, err := c.IsCriuAtLeast(31100)
```
+## Releases
+
+go-criu will carry the same version number as CRIU. This implies that each
+go-criu release will pull in the necessary changes from CRIU before making a
+release.
+
+The first go-criu release was 3.11 based on CRIU 3.11.
+
## How to contribute
While bug fixes can first be identified via an "issue", that is not required.
diff --git a/vendor/github.com/checkpoint-restore/go-criu/test/main.go b/vendor/github.com/checkpoint-restore/go-criu/test/main.go
deleted file mode 100644
index 418ebb843..000000000
--- a/vendor/github.com/checkpoint-restore/go-criu/test/main.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package main
-
-import (
- "fmt"
- "github.com/checkpoint-restore/go-criu"
- "github.com/checkpoint-restore/go-criu/rpc"
- "github.com/golang/protobuf/proto"
- "os"
- "strconv"
-)
-
-// TestNfy struct
-type TestNfy struct {
- criu.NoNotify
-}
-
-// PreDump test function
-func (c TestNfy) PreDump() error {
- fmt.Printf("TEST PRE DUMP\n")
- return nil
-}
-
-func doDump(c *criu.Criu, pidS string, imgDir string, pre bool, prevImg string) error {
- fmt.Printf("Dumping\n")
- pid, _ := strconv.Atoi(pidS)
- img, err := os.Open(imgDir)
- if err != nil {
- return fmt.Errorf("can't open image dir (%s)", err)
- }
- defer img.Close()
-
- opts := rpc.CriuOpts{
- Pid: proto.Int32(int32(pid)),
- ImagesDirFd: proto.Int32(int32(img.Fd())),
- LogLevel: proto.Int32(4),
- LogFile: proto.String("dump.log"),
- }
-
- if prevImg != "" {
- opts.ParentImg = proto.String(prevImg)
- opts.TrackMem = proto.Bool(true)
- }
-
- if pre {
- err = c.PreDump(opts, TestNfy{})
- } else {
- err = c.Dump(opts, TestNfy{})
- }
- if err != nil {
- return fmt.Errorf("dump fail (%s)", err)
- }
-
- return nil
-}
-
-// Usage: test $act $pid $images_dir
-func main() {
- c := criu.MakeCriu()
- // Read out CRIU version
- version, err := c.GetCriuVersion()
- if err != nil {
- fmt.Println(err)
- os.Exit(1)
- }
- fmt.Println("CRIU version", version)
- // Check if version at least 3.2
- result, err := c.IsCriuAtLeast(30200)
- if err != nil {
- fmt.Println(err)
- os.Exit(1)
- }
- if !result {
- fmt.Println("CRIU too old")
- os.Exit(1)
- }
- act := os.Args[1]
- switch act {
- case "dump":
- err := doDump(c, os.Args[2], os.Args[3], false, "")
- if err != nil {
- fmt.Print(err)
- os.Exit(1)
- }
- case "dump2":
- err := c.Prepare()
- if err != nil {
- fmt.Print(err)
- os.Exit(1)
- }
-
- err = doDump(c, os.Args[2], os.Args[3]+"/pre", true, "")
- if err != nil {
- fmt.Printf("pre-dump failed")
- fmt.Print(err)
- os.Exit(1)
- }
- err = doDump(c, os.Args[2], os.Args[3], false, "./pre")
- if err != nil {
- fmt.Printf("dump failed")
- fmt.Print(err)
- os.Exit(1)
- }
-
- c.Cleanup()
- case "restore":
- fmt.Printf("Restoring\n")
- img, err := os.Open(os.Args[2])
- if err != nil {
- fmt.Printf("can't open image dir")
- os.Exit(1)
- }
- defer img.Close()
-
- opts := rpc.CriuOpts{
- ImagesDirFd: proto.Int32(int32(img.Fd())),
- LogLevel: proto.Int32(4),
- LogFile: proto.String("restore.log"),
- }
-
- err = c.Restore(opts, nil)
- if err != nil {
- fmt.Printf("Error:")
- fmt.Print(err)
- fmt.Printf("\n")
- os.Exit(1)
- }
- default:
- fmt.Printf("unknown action\n")
- os.Exit(1)
- }
-
- fmt.Printf("Success\n")
-}
diff --git a/vendor/github.com/checkpoint-restore/go-criu/test/phaul-main.go b/vendor/github.com/checkpoint-restore/go-criu/test/phaul-main.go
deleted file mode 100644
index f1bec2c55..000000000
--- a/vendor/github.com/checkpoint-restore/go-criu/test/phaul-main.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package main
-
-import (
- "fmt"
- "os"
- "strconv"
- "strings"
- "syscall"
-
- "github.com/checkpoint-restore/go-criu"
- "github.com/checkpoint-restore/go-criu/phaul"
- "github.com/checkpoint-restore/go-criu/rpc"
- "github.com/golang/protobuf/proto"
-)
-
-type testLocal struct {
- criu.NoNotify
- r *testRemote
-}
-
-type testRemote struct {
- srv *phaul.Server
-}
-
-/* Dir where test will put dump images */
-const imagesDir = "image"
-
-func prepareImages() error {
- err := os.Mkdir(imagesDir, 0700)
- if err != nil {
- return err
- }
-
- /* Work dir for PhaulClient */
- err = os.Mkdir(imagesDir+"/local", 0700)
- if err != nil {
- return err
- }
-
- /* Work dir for PhaulServer */
- err = os.Mkdir(imagesDir+"/remote", 0700)
- if err != nil {
- return err
- }
-
- /* Work dir for DumpCopyRestore */
- err = os.Mkdir(imagesDir+"/test", 0700)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func mergeImages(dumpDir, lastPreDumpDir string) error {
- idir, err := os.Open(dumpDir)
- if err != nil {
- return err
- }
-
- defer idir.Close()
-
- imgs, err := idir.Readdirnames(0)
- if err != nil {
- return err
- }
-
- for _, fname := range imgs {
- if !strings.HasSuffix(fname, ".img") {
- continue
- }
-
- fmt.Printf("\t%s -> %s/\n", fname, lastPreDumpDir)
- err = syscall.Link(dumpDir+"/"+fname, lastPreDumpDir+"/"+fname)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *testRemote) doRestore() error {
- lastSrvImagesDir := r.srv.LastImagesDir()
- /*
- * In imagesDir we have images from dump, in the
- * lastSrvImagesDir -- where server-side images
- * (from page server, with pages and pagemaps) are.
- * Need to put former into latter and restore from
- * them.
- */
- err := mergeImages(imagesDir+"/test", lastSrvImagesDir)
- if err != nil {
- return err
- }
-
- imgDir, err := os.Open(lastSrvImagesDir)
- if err != nil {
- return err
- }
- defer imgDir.Close()
-
- opts := rpc.CriuOpts{
- LogLevel: proto.Int32(4),
- LogFile: proto.String("restore.log"),
- ImagesDirFd: proto.Int32(int32(imgDir.Fd())),
- }
-
- cr := r.srv.GetCriu()
- fmt.Printf("Do restore\n")
- return cr.Restore(opts, nil)
-}
-
-func (l *testLocal) PostDump() error {
- return l.r.doRestore()
-}
-
-func (l *testLocal) DumpCopyRestore(cr *criu.Criu, cfg phaul.Config, lastClnImagesDir string) error {
- fmt.Printf("Final stage\n")
-
- imgDir, err := os.Open(imagesDir + "/test")
- if err != nil {
- return err
- }
- defer imgDir.Close()
-
- psi := rpc.CriuPageServerInfo{
- Fd: proto.Int32(int32(cfg.Memfd)),
- }
-
- opts := rpc.CriuOpts{
- Pid: proto.Int32(int32(cfg.Pid)),
- LogLevel: proto.Int32(4),
- LogFile: proto.String("dump.log"),
- ImagesDirFd: proto.Int32(int32(imgDir.Fd())),
- TrackMem: proto.Bool(true),
- ParentImg: proto.String(lastClnImagesDir),
- Ps: &psi,
- }
-
- fmt.Printf("Do dump\n")
- return cr.Dump(opts, l)
-}
-
-func main() {
- pid, _ := strconv.Atoi(os.Args[1])
- fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM, 0)
- if err != nil {
- fmt.Printf("Can't make socketpair: %v\n", err)
- os.Exit(1)
- }
-
- err = prepareImages()
- if err != nil {
- fmt.Printf("Can't prepare dirs for images: %v\n", err)
- os.Exit(1)
- return
- }
-
- fmt.Printf("Make server part (socket %d)\n", fds[1])
- srv, err := phaul.MakePhaulServer(phaul.Config{
- Pid: pid,
- Memfd: fds[1],
- Wdir: imagesDir + "/remote"})
- if err != nil {
- fmt.Printf("Unable to run a server: %v", err)
- os.Exit(1)
- return
- }
-
- r := &testRemote{srv}
-
- fmt.Printf("Make client part (socket %d)\n", fds[0])
- cln, err := phaul.MakePhaulClient(&testLocal{r: r}, srv,
- phaul.Config{
- Pid: pid,
- Memfd: fds[0],
- Wdir: imagesDir + "/local"})
- if err != nil {
- fmt.Printf("Unable to run a client: %v\n", err)
- os.Exit(1)
- }
-
- fmt.Printf("Migrate\n")
- err = cln.Migrate()
- if err != nil {
- fmt.Printf("Failed: %v\n", err)
- os.Exit(1)
- }
-
- fmt.Printf("SUCCESS!\n")
-}
diff --git a/vendor/github.com/checkpoint-restore/go-criu/test/piggie.c b/vendor/github.com/checkpoint-restore/go-criu/test/piggie.c
deleted file mode 100644
index 1dc0801c0..000000000
--- a/vendor/github.com/checkpoint-restore/go-criu/test/piggie.c
+++ /dev/null
@@ -1,57 +0,0 @@
-#define _GNU_SOURCE
-#include <stdio.h>
-#include <signal.h>
-#include <unistd.h>
-#include <sys/mman.h>
-#include <fcntl.h>
-#include <sched.h>
-
-#define STKS (4*4096)
-
-#ifndef CLONE_NEWPID
-#define CLONE_NEWPID 0x20000000
-#endif
-
-static int do_test(void *logf)
-{
- int fd, i = 0;
-
- setsid();
-
- close(0);
- close(1);
- close(2);
-
- fd = open("/dev/null", O_RDONLY);
- if (fd != 0) {
- dup2(fd, 0);
- close(fd);
- }
-
- fd = open(logf, O_WRONLY | O_TRUNC | O_CREAT, 0600);
- dup2(fd, 1);
- dup2(fd, 2);
- if (fd != 1 && fd != 2)
- close(fd);
-
- while (1) {
- sleep(1);
- printf("%d\n", i++);
- fflush(stdout);
- }
-
- return 0;
-}
-
-int main(int argc, char **argv)
-{
- int pid;
- void *stk;
-
- stk = mmap(NULL, STKS, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANON | MAP_GROWSDOWN, 0, 0);
- pid = clone(do_test, stk + STKS, SIGCHLD | CLONE_NEWPID, argv[1]);
- printf("Child forked, pid %d\n", pid);
-
- return 0;
-}
diff --git a/vendor/github.com/containerd/cgroups/README.md b/vendor/github.com/containerd/cgroups/README.md
deleted file mode 100644
index 81ad11cc7..000000000
--- a/vendor/github.com/containerd/cgroups/README.md
+++ /dev/null
@@ -1,124 +0,0 @@
-# cgroups
-
-[![Build Status](https://travis-ci.org/containerd/cgroups.svg?branch=master)](https://travis-ci.org/containerd/cgroups)
-[![codecov](https://codecov.io/gh/containerd/cgroups/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/cgroups)
-[![GoDoc](https://godoc.org/github.com/containerd/cgroups?status.svg)](https://godoc.org/github.com/containerd/cgroups)
-[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/cgroups)](https://goreportcard.com/report/github.com/containerd/cgroups)
-
-Go package for creating, managing, inspecting, and destroying cgroups.
-The resources format for settings on the cgroup uses the OCI runtime-spec found
-[here](https://github.com/opencontainers/runtime-spec).
-
-## Examples
-
-### Create a new cgroup
-
-This creates a new cgroup using a static path for all subsystems under `/test`.
-
-* /sys/fs/cgroup/cpu/test
-* /sys/fs/cgroup/memory/test
-* etc....
-
-It uses a single hierarchy and specifies cpu shares as a resource constraint and
-uses the v1 implementation of cgroups.
-
-
-```go
-shares := uint64(100)
-control, err := cgroups.New(cgroups.V1, cgroups.StaticPath("/test"), &specs.LinuxResources{
- CPU: &specs.CPU{
- Shares: &shares,
- },
-})
-defer control.Delete()
-```
-
-### Create with systemd slice support
-
-
-```go
-control, err := cgroups.New(cgroups.Systemd, cgroups.Slice("system.slice", "runc-test"), &specs.LinuxResources{
- CPU: &specs.CPU{
- Shares: &shares,
- },
-})
-
-```
-
-### Load an existing cgroup
-
-```go
-control, err = cgroups.Load(cgroups.V1, cgroups.StaticPath("/test"))
-```
-
-### Add a process to the cgroup
-
-```go
-if err := control.Add(cgroups.Process{Pid:1234}); err != nil {
-}
-```
-
-### Update the cgroup
-
-To update the resources applied in the cgroup
-
-```go
-shares = uint64(200)
-if err := control.Update(&specs.LinuxResources{
- CPU: &specs.CPU{
- Shares: &shares,
- },
-}); err != nil {
-}
-```
-
-### Freeze and Thaw the cgroup
-
-```go
-if err := control.Freeze(); err != nil {
-}
-if err := control.Thaw(); err != nil {
-}
-```
-
-### List all processes in the cgroup or recursively
-
-```go
-processes, err := control.Processes(cgroups.Devices, recursive)
-```
-
-### Get Stats on the cgroup
-
-```go
-stats, err := control.Stat()
-```
-
-By adding `cgroups.IgnoreNotExist` all non-existent files will be ignored, e.g. swap memory stats without swap enabled
-```go
-stats, err := control.Stat(cgroups.IgnoreNotExist)
-```
-
-### Move process across cgroups
-
-This allows you to take processes from one cgroup and move them to another.
-
-```go
-err := control.MoveTo(destination)
-```
-
-### Create subcgroup
-
-```go
-subCgroup, err := control.New("child", resources)
-```
-
-## Project details
-
-Cgroups is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
-As a containerd sub-project, you will find the:
-
- * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
- * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
- * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
-
-information in our [`containerd/project`](https://github.com/containerd/project) repository.
diff --git a/vendor/github.com/containerd/cgroups/blkio.go b/vendor/github.com/containerd/cgroups/blkio.go
deleted file mode 100644
index 875fb5546..000000000
--- a/vendor/github.com/containerd/cgroups/blkio.go
+++ /dev/null
@@ -1,337 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "bufio"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "strconv"
- "strings"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-func NewBlkio(root string) *blkioController {
- return &blkioController{
- root: filepath.Join(root, string(Blkio)),
- }
-}
-
-type blkioController struct {
- root string
-}
-
-func (b *blkioController) Name() Name {
- return Blkio
-}
-
-func (b *blkioController) Path(path string) string {
- return filepath.Join(b.root, path)
-}
-
-func (b *blkioController) Create(path string, resources *specs.LinuxResources) error {
- if err := os.MkdirAll(b.Path(path), defaultDirPerm); err != nil {
- return err
- }
- if resources.BlockIO == nil {
- return nil
- }
- for _, t := range createBlkioSettings(resources.BlockIO) {
- if t.value != nil {
- if err := ioutil.WriteFile(
- filepath.Join(b.Path(path), fmt.Sprintf("blkio.%s", t.name)),
- t.format(t.value),
- defaultFilePerm,
- ); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (b *blkioController) Update(path string, resources *specs.LinuxResources) error {
- return b.Create(path, resources)
-}
-
-func (b *blkioController) Stat(path string, stats *Metrics) error {
- stats.Blkio = &BlkIOStat{}
- settings := []blkioStatSettings{
- {
- name: "throttle.io_serviced",
- entry: &stats.Blkio.IoServicedRecursive,
- },
- {
- name: "throttle.io_service_bytes",
- entry: &stats.Blkio.IoServiceBytesRecursive,
- },
- }
- // Try to read CFQ stats available on all CFQ enabled kernels first
- if _, err := os.Lstat(filepath.Join(b.Path(path), fmt.Sprintf("blkio.io_serviced_recursive"))); err == nil {
- settings = append(settings,
- blkioStatSettings{
- name: "sectors_recursive",
- entry: &stats.Blkio.SectorsRecursive,
- },
- blkioStatSettings{
- name: "io_service_bytes_recursive",
- entry: &stats.Blkio.IoServiceBytesRecursive,
- },
- blkioStatSettings{
- name: "io_serviced_recursive",
- entry: &stats.Blkio.IoServicedRecursive,
- },
- blkioStatSettings{
- name: "io_queued_recursive",
- entry: &stats.Blkio.IoQueuedRecursive,
- },
- blkioStatSettings{
- name: "io_service_time_recursive",
- entry: &stats.Blkio.IoServiceTimeRecursive,
- },
- blkioStatSettings{
- name: "io_wait_time_recursive",
- entry: &stats.Blkio.IoWaitTimeRecursive,
- },
- blkioStatSettings{
- name: "io_merged_recursive",
- entry: &stats.Blkio.IoMergedRecursive,
- },
- blkioStatSettings{
- name: "time_recursive",
- entry: &stats.Blkio.IoTimeRecursive,
- },
- )
- }
- f, err := os.Open("/proc/diskstats")
- if err != nil {
- return err
- }
- defer f.Close()
-
- devices, err := getDevices(f)
- if err != nil {
- return err
- }
-
- for _, t := range settings {
- if err := b.readEntry(devices, path, t.name, t.entry); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (b *blkioController) readEntry(devices map[deviceKey]string, path, name string, entry *[]*BlkIOEntry) error {
- f, err := os.Open(filepath.Join(b.Path(path), fmt.Sprintf("blkio.%s", name)))
- if err != nil {
- return err
- }
- defer f.Close()
- sc := bufio.NewScanner(f)
- for sc.Scan() {
- if err := sc.Err(); err != nil {
- return err
- }
- // format: dev type amount
- fields := strings.FieldsFunc(sc.Text(), splitBlkIOStatLine)
- if len(fields) < 3 {
- if len(fields) == 2 && fields[0] == "Total" {
- // skip total line
- continue
- } else {
- return fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text())
- }
- }
- major, err := strconv.ParseUint(fields[0], 10, 64)
- if err != nil {
- return err
- }
- minor, err := strconv.ParseUint(fields[1], 10, 64)
- if err != nil {
- return err
- }
- op := ""
- valueField := 2
- if len(fields) == 4 {
- op = fields[2]
- valueField = 3
- }
- v, err := strconv.ParseUint(fields[valueField], 10, 64)
- if err != nil {
- return err
- }
- *entry = append(*entry, &BlkIOEntry{
- Device: devices[deviceKey{major, minor}],
- Major: major,
- Minor: minor,
- Op: op,
- Value: v,
- })
- }
- return nil
-}
-
-func createBlkioSettings(blkio *specs.LinuxBlockIO) []blkioSettings {
- settings := []blkioSettings{}
-
- if blkio.Weight != nil {
- settings = append(settings,
- blkioSettings{
- name: "weight",
- value: blkio.Weight,
- format: uintf,
- })
- }
- if blkio.LeafWeight != nil {
- settings = append(settings,
- blkioSettings{
- name: "leaf_weight",
- value: blkio.LeafWeight,
- format: uintf,
- })
- }
- for _, wd := range blkio.WeightDevice {
- if wd.Weight != nil {
- settings = append(settings,
- blkioSettings{
- name: "weight_device",
- value: wd,
- format: weightdev,
- })
- }
- if wd.LeafWeight != nil {
- settings = append(settings,
- blkioSettings{
- name: "leaf_weight_device",
- value: wd,
- format: weightleafdev,
- })
- }
- }
- for _, t := range []struct {
- name string
- list []specs.LinuxThrottleDevice
- }{
- {
- name: "throttle.read_bps_device",
- list: blkio.ThrottleReadBpsDevice,
- },
- {
- name: "throttle.read_iops_device",
- list: blkio.ThrottleReadIOPSDevice,
- },
- {
- name: "throttle.write_bps_device",
- list: blkio.ThrottleWriteBpsDevice,
- },
- {
- name: "throttle.write_iops_device",
- list: blkio.ThrottleWriteIOPSDevice,
- },
- } {
- for _, td := range t.list {
- settings = append(settings, blkioSettings{
- name: t.name,
- value: td,
- format: throttleddev,
- })
- }
- }
- return settings
-}
-
-type blkioSettings struct {
- name string
- value interface{}
- format func(v interface{}) []byte
-}
-
-type blkioStatSettings struct {
- name string
- entry *[]*BlkIOEntry
-}
-
-func uintf(v interface{}) []byte {
- return []byte(strconv.FormatUint(uint64(*v.(*uint16)), 10))
-}
-
-func weightdev(v interface{}) []byte {
- wd := v.(specs.LinuxWeightDevice)
- return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, *wd.Weight))
-}
-
-func weightleafdev(v interface{}) []byte {
- wd := v.(specs.LinuxWeightDevice)
- return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, *wd.LeafWeight))
-}
-
-func throttleddev(v interface{}) []byte {
- td := v.(specs.LinuxThrottleDevice)
- return []byte(fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate))
-}
-
-func splitBlkIOStatLine(r rune) bool {
- return r == ' ' || r == ':'
-}
-
-type deviceKey struct {
- major, minor uint64
-}
-
-// getDevices makes a best effort attempt to read all the devices into a map
-// keyed by major and minor number. Since devices may be mapped multiple times,
-// we err on taking the first occurrence.
-func getDevices(r io.Reader) (map[deviceKey]string, error) {
-
- var (
- s = bufio.NewScanner(r)
- devices = make(map[deviceKey]string)
- )
- for s.Scan() {
- fields := strings.Fields(s.Text())
- major, err := strconv.Atoi(fields[0])
- if err != nil {
- return nil, err
- }
- minor, err := strconv.Atoi(fields[1])
- if err != nil {
- return nil, err
- }
- key := deviceKey{
- major: uint64(major),
- minor: uint64(minor),
- }
- if _, ok := devices[key]; ok {
- continue
- }
- devices[key] = filepath.Join("/dev", fields[2])
- }
- return devices, s.Err()
-}
-
-func major(devNumber uint64) uint64 {
- return (devNumber >> 8) & 0xfff
-}
-
-func minor(devNumber uint64) uint64 {
- return (devNumber & 0xff) | ((devNumber >> 12) & 0xfff00)
-}
diff --git a/vendor/github.com/containerd/cgroups/cgroup.go b/vendor/github.com/containerd/cgroups/cgroup.go
deleted file mode 100644
index e3ef07651..000000000
--- a/vendor/github.com/containerd/cgroups/cgroup.go
+++ /dev/null
@@ -1,529 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "sync"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
-)
-
-// New returns a new control via the cgroup cgroups interface
-func New(hierarchy Hierarchy, path Path, resources *specs.LinuxResources, opts ...InitOpts) (Cgroup, error) {
- config := newInitConfig()
- for _, o := range opts {
- if err := o(config); err != nil {
- return nil, err
- }
- }
- subsystems, err := hierarchy()
- if err != nil {
- return nil, err
- }
- var active []Subsystem
- for _, s := range subsystems {
- // check if subsystem exists
- if err := initializeSubsystem(s, path, resources); err != nil {
- if err == ErrControllerNotActive {
- if config.InitCheck != nil {
- if skerr := config.InitCheck(s, path, err); skerr != nil {
- if skerr != ErrIgnoreSubsystem {
- return nil, skerr
- }
- }
- }
- continue
- }
- return nil, err
- }
- active = append(active, s)
- }
- return &cgroup{
- path: path,
- subsystems: active,
- }, nil
-}
-
-// Load will load an existing cgroup and allow it to be controlled
-func Load(hierarchy Hierarchy, path Path, opts ...InitOpts) (Cgroup, error) {
- config := newInitConfig()
- for _, o := range opts {
- if err := o(config); err != nil {
- return nil, err
- }
- }
- var activeSubsystems []Subsystem
- subsystems, err := hierarchy()
- if err != nil {
- return nil, err
- }
- // check that the subsystems still exist, and keep only those that actually exist
- for _, s := range pathers(subsystems) {
- p, err := path(s.Name())
- if err != nil {
- if os.IsNotExist(errors.Cause(err)) {
- return nil, ErrCgroupDeleted
- }
- if err == ErrControllerNotActive {
- if config.InitCheck != nil {
- if skerr := config.InitCheck(s, path, err); skerr != nil {
- if skerr != ErrIgnoreSubsystem {
- return nil, skerr
- }
- }
- }
- continue
- }
- return nil, err
- }
- if _, err := os.Lstat(s.Path(p)); err != nil {
- if os.IsNotExist(err) {
- continue
- }
- return nil, err
- }
- activeSubsystems = append(activeSubsystems, s)
- }
- // if we do not have any active systems then the cgroup is deleted
- if len(activeSubsystems) == 0 {
- return nil, ErrCgroupDeleted
- }
- return &cgroup{
- path: path,
- subsystems: activeSubsystems,
- }, nil
-}
-
-type cgroup struct {
- path Path
-
- subsystems []Subsystem
- mu sync.Mutex
- err error
-}
-
-// New returns a new sub cgroup
-func (c *cgroup) New(name string, resources *specs.LinuxResources) (Cgroup, error) {
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.err != nil {
- return nil, c.err
- }
- path := subPath(c.path, name)
- for _, s := range c.subsystems {
- if err := initializeSubsystem(s, path, resources); err != nil {
- return nil, err
- }
- }
- return &cgroup{
- path: path,
- subsystems: c.subsystems,
- }, nil
-}
-
-// Subsystems returns all the subsystems that are currently being
-// consumed by the group
-func (c *cgroup) Subsystems() []Subsystem {
- return c.subsystems
-}
-
-// Add moves the provided process into the new cgroup
-func (c *cgroup) Add(process Process) error {
- if process.Pid <= 0 {
- return ErrInvalidPid
- }
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.err != nil {
- return c.err
- }
- return c.add(process)
-}
-
-func (c *cgroup) add(process Process) error {
- for _, s := range pathers(c.subsystems) {
- p, err := c.path(s.Name())
- if err != nil {
- return err
- }
- if err := ioutil.WriteFile(
- filepath.Join(s.Path(p), cgroupProcs),
- []byte(strconv.Itoa(process.Pid)),
- defaultFilePerm,
- ); err != nil {
- return err
- }
- }
- return nil
-}
-
-// AddTask moves the provided tasks (threads) into the new cgroup
-func (c *cgroup) AddTask(process Process) error {
- if process.Pid <= 0 {
- return ErrInvalidPid
- }
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.err != nil {
- return c.err
- }
- return c.addTask(process)
-}
-
-func (c *cgroup) addTask(process Process) error {
- for _, s := range pathers(c.subsystems) {
- p, err := c.path(s.Name())
- if err != nil {
- return err
- }
- if err := ioutil.WriteFile(
- filepath.Join(s.Path(p), cgroupTasks),
- []byte(strconv.Itoa(process.Pid)),
- defaultFilePerm,
- ); err != nil {
- return err
- }
- }
- return nil
-}
-
-// Delete will remove the control group from each of the subsystems registered
-func (c *cgroup) Delete() error {
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.err != nil {
- return c.err
- }
- var errors []string
- for _, s := range c.subsystems {
- if d, ok := s.(deleter); ok {
- sp, err := c.path(s.Name())
- if err != nil {
- return err
- }
- if err := d.Delete(sp); err != nil {
- errors = append(errors, string(s.Name()))
- }
- continue
- }
- if p, ok := s.(pather); ok {
- sp, err := c.path(s.Name())
- if err != nil {
- return err
- }
- path := p.Path(sp)
- if err := remove(path); err != nil {
- errors = append(errors, path)
- }
- }
- }
- if len(errors) > 0 {
- return fmt.Errorf("cgroups: unable to remove paths %s", strings.Join(errors, ", "))
- }
- c.err = ErrCgroupDeleted
- return nil
-}
-
-// Stat returns the current metrics for the cgroup
-func (c *cgroup) Stat(handlers ...ErrorHandler) (*Metrics, error) {
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.err != nil {
- return nil, c.err
- }
- if len(handlers) == 0 {
- handlers = append(handlers, errPassthrough)
- }
- var (
- stats = &Metrics{
- CPU: &CPUStat{
- Throttling: &Throttle{},
- Usage: &CPUUsage{},
- },
- }
- wg = &sync.WaitGroup{}
- errs = make(chan error, len(c.subsystems))
- )
- for _, s := range c.subsystems {
- if ss, ok := s.(stater); ok {
- sp, err := c.path(s.Name())
- if err != nil {
- return nil, err
- }
- wg.Add(1)
- go func() {
- defer wg.Done()
- if err := ss.Stat(sp, stats); err != nil {
- for _, eh := range handlers {
- if herr := eh(err); herr != nil {
- errs <- herr
- }
- }
- }
- }()
- }
- }
- wg.Wait()
- close(errs)
- for err := range errs {
- return nil, err
- }
- return stats, nil
-}
-
-// Update updates the cgroup with the new resource values provided
-//
-// Be prepared to handle EBUSY when trying to update a cgroup with
-// live processes and other operations like Stats being performed at the
-// same time
-func (c *cgroup) Update(resources *specs.LinuxResources) error {
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.err != nil {
- return c.err
- }
- for _, s := range c.subsystems {
- if u, ok := s.(updater); ok {
- sp, err := c.path(s.Name())
- if err != nil {
- return err
- }
- if err := u.Update(sp, resources); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// Processes returns the processes running inside the cgroup along
-// with the subsystem used, pid, and path
-func (c *cgroup) Processes(subsystem Name, recursive bool) ([]Process, error) {
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.err != nil {
- return nil, c.err
- }
- return c.processes(subsystem, recursive)
-}
-
-func (c *cgroup) processes(subsystem Name, recursive bool) ([]Process, error) {
- s := c.getSubsystem(subsystem)
- sp, err := c.path(subsystem)
- if err != nil {
- return nil, err
- }
- path := s.(pather).Path(sp)
- var processes []Process
- err = filepath.Walk(path, func(p string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- if !recursive && info.IsDir() {
- if p == path {
- return nil
- }
- return filepath.SkipDir
- }
- dir, name := filepath.Split(p)
- if name != cgroupProcs {
- return nil
- }
- procs, err := readPids(dir, subsystem)
- if err != nil {
- return err
- }
- processes = append(processes, procs...)
- return nil
- })
- return processes, err
-}
-
-// Tasks returns the tasks running inside the cgroup along
-// with the subsystem used, pid, and path
-func (c *cgroup) Tasks(subsystem Name, recursive bool) ([]Task, error) {
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.err != nil {
- return nil, c.err
- }
- return c.tasks(subsystem, recursive)
-}
-
-func (c *cgroup) tasks(subsystem Name, recursive bool) ([]Task, error) {
- s := c.getSubsystem(subsystem)
- sp, err := c.path(subsystem)
- if err != nil {
- return nil, err
- }
- path := s.(pather).Path(sp)
- var tasks []Task
- err = filepath.Walk(path, func(p string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- if !recursive && info.IsDir() {
- if p == path {
- return nil
- }
- return filepath.SkipDir
- }
- dir, name := filepath.Split(p)
- if name != cgroupTasks {
- return nil
- }
- procs, err := readTasksPids(dir, subsystem)
- if err != nil {
- return err
- }
- tasks = append(tasks, procs...)
- return nil
- })
- return tasks, err
-}
-
-// Freeze freezes the entire cgroup and all the processes inside it
-func (c *cgroup) Freeze() error {
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.err != nil {
- return c.err
- }
- s := c.getSubsystem(Freezer)
- if s == nil {
- return ErrFreezerNotSupported
- }
- sp, err := c.path(Freezer)
- if err != nil {
- return err
- }
- return s.(*freezerController).Freeze(sp)
-}
-
-// Thaw thaws out the cgroup and all the processes inside it
-func (c *cgroup) Thaw() error {
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.err != nil {
- return c.err
- }
- s := c.getSubsystem(Freezer)
- if s == nil {
- return ErrFreezerNotSupported
- }
- sp, err := c.path(Freezer)
- if err != nil {
- return err
- }
- return s.(*freezerController).Thaw(sp)
-}
-
-// OOMEventFD returns the memory cgroup's out of memory event fd that triggers
-// when processes inside the cgroup receive an oom event. Returns
-// ErrMemoryNotSupported if memory cgroups is not supported.
-func (c *cgroup) OOMEventFD() (uintptr, error) {
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.err != nil {
- return 0, c.err
- }
- s := c.getSubsystem(Memory)
- if s == nil {
- return 0, ErrMemoryNotSupported
- }
- sp, err := c.path(Memory)
- if err != nil {
- return 0, err
- }
- return s.(*memoryController).OOMEventFD(sp)
-}
-
-// State returns the state of the cgroup and its processes
-func (c *cgroup) State() State {
- c.mu.Lock()
- defer c.mu.Unlock()
- c.checkExists()
- if c.err != nil && c.err == ErrCgroupDeleted {
- return Deleted
- }
- s := c.getSubsystem(Freezer)
- if s == nil {
- return Thawed
- }
- sp, err := c.path(Freezer)
- if err != nil {
- return Unknown
- }
- state, err := s.(*freezerController).state(sp)
- if err != nil {
- return Unknown
- }
- return state
-}
-
-// MoveTo does a recursive move subsystem by subsystem of all the processes
-// inside the group
-func (c *cgroup) MoveTo(destination Cgroup) error {
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.err != nil {
- return c.err
- }
- for _, s := range c.subsystems {
- processes, err := c.processes(s.Name(), true)
- if err != nil {
- return err
- }
- for _, p := range processes {
- if err := destination.Add(p); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (c *cgroup) getSubsystem(n Name) Subsystem {
- for _, s := range c.subsystems {
- if s.Name() == n {
- return s
- }
- }
- return nil
-}
-
-func (c *cgroup) checkExists() {
- for _, s := range pathers(c.subsystems) {
- p, err := c.path(s.Name())
- if err != nil {
- return
- }
- if _, err := os.Lstat(s.Path(p)); err != nil {
- if os.IsNotExist(err) {
- c.err = ErrCgroupDeleted
- return
- }
- }
- }
-}
diff --git a/vendor/github.com/containerd/cgroups/control.go b/vendor/github.com/containerd/cgroups/control.go
deleted file mode 100644
index 1f62c54f3..000000000
--- a/vendor/github.com/containerd/cgroups/control.go
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "os"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-const (
- cgroupProcs = "cgroup.procs"
- cgroupTasks = "tasks"
- defaultDirPerm = 0755
-)
-
-// defaultFilePerm is a var so that the test framework can change the filemode
-// of all files created when the tests are running. The difference between the
-// tests and real world use is that files like "cgroup.procs" will exist when writing
-// to a read cgroup filesystem and do not exist prior when running in the tests.
-// this is set to a non 0 value in the test code
-var defaultFilePerm = os.FileMode(0)
-
-type Process struct {
- // Subsystem is the name of the subsystem that the process is in
- Subsystem Name
- // Pid is the process id of the process
- Pid int
- // Path is the full path of the subsystem and location that the process is in
- Path string
-}
-
-type Task struct {
- // Subsystem is the name of the subsystem that the task is in
- Subsystem Name
- // Pid is the process id of the task
- Pid int
- // Path is the full path of the subsystem and location that the task is in
- Path string
-}
-
-// Cgroup handles interactions with the individual groups to perform
-// actions on them as them main interface to this cgroup package
-type Cgroup interface {
- // New creates a new cgroup under the calling cgroup
- New(string, *specs.LinuxResources) (Cgroup, error)
- // Add adds a process to the cgroup (cgroup.procs)
- Add(Process) error
- // AddTask adds a process to the cgroup (tasks)
- AddTask(Process) error
- // Delete removes the cgroup as a whole
- Delete() error
- // MoveTo moves all the processes under the calling cgroup to the provided one
- // subsystems are moved one at a time
- MoveTo(Cgroup) error
- // Stat returns the stats for all subsystems in the cgroup
- Stat(...ErrorHandler) (*Metrics, error)
- // Update updates all the subsystems with the provided resource changes
- Update(resources *specs.LinuxResources) error
- // Processes returns all the processes in a select subsystem for the cgroup
- Processes(Name, bool) ([]Process, error)
- // Tasks returns all the tasks in a select subsystem for the cgroup
- Tasks(Name, bool) ([]Task, error)
- // Freeze freezes or pauses all processes inside the cgroup
- Freeze() error
- // Thaw thaw or resumes all processes inside the cgroup
- Thaw() error
- // OOMEventFD returns the memory subsystem's event fd for OOM events
- OOMEventFD() (uintptr, error)
- // State returns the cgroups current state
- State() State
- // Subsystems returns all the subsystems in the cgroup
- Subsystems() []Subsystem
-}
diff --git a/vendor/github.com/containerd/cgroups/cpu.go b/vendor/github.com/containerd/cgroups/cpu.go
deleted file mode 100644
index 431cd3e51..000000000
--- a/vendor/github.com/containerd/cgroups/cpu.go
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "bufio"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "strconv"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-func NewCpu(root string) *cpuController {
- return &cpuController{
- root: filepath.Join(root, string(Cpu)),
- }
-}
-
-type cpuController struct {
- root string
-}
-
-func (c *cpuController) Name() Name {
- return Cpu
-}
-
-func (c *cpuController) Path(path string) string {
- return filepath.Join(c.root, path)
-}
-
-func (c *cpuController) Create(path string, resources *specs.LinuxResources) error {
- if err := os.MkdirAll(c.Path(path), defaultDirPerm); err != nil {
- return err
- }
- if cpu := resources.CPU; cpu != nil {
- for _, t := range []struct {
- name string
- ivalue *int64
- uvalue *uint64
- }{
- {
- name: "rt_period_us",
- uvalue: cpu.RealtimePeriod,
- },
- {
- name: "rt_runtime_us",
- ivalue: cpu.RealtimeRuntime,
- },
- {
- name: "shares",
- uvalue: cpu.Shares,
- },
- {
- name: "cfs_period_us",
- uvalue: cpu.Period,
- },
- {
- name: "cfs_quota_us",
- ivalue: cpu.Quota,
- },
- } {
- var value []byte
- if t.uvalue != nil {
- value = []byte(strconv.FormatUint(*t.uvalue, 10))
- } else if t.ivalue != nil {
- value = []byte(strconv.FormatInt(*t.ivalue, 10))
- }
- if value != nil {
- if err := ioutil.WriteFile(
- filepath.Join(c.Path(path), fmt.Sprintf("cpu.%s", t.name)),
- value,
- defaultFilePerm,
- ); err != nil {
- return err
- }
- }
- }
- }
- return nil
-}
-
-func (c *cpuController) Update(path string, resources *specs.LinuxResources) error {
- return c.Create(path, resources)
-}
-
-func (c *cpuController) Stat(path string, stats *Metrics) error {
- f, err := os.Open(filepath.Join(c.Path(path), "cpu.stat"))
- if err != nil {
- return err
- }
- defer f.Close()
- // get or create the cpu field because cpuacct can also set values on this struct
- sc := bufio.NewScanner(f)
- for sc.Scan() {
- if err := sc.Err(); err != nil {
- return err
- }
- key, v, err := parseKV(sc.Text())
- if err != nil {
- return err
- }
- switch key {
- case "nr_periods":
- stats.CPU.Throttling.Periods = v
- case "nr_throttled":
- stats.CPU.Throttling.ThrottledPeriods = v
- case "throttled_time":
- stats.CPU.Throttling.ThrottledTime = v
- }
- }
- return nil
-}
diff --git a/vendor/github.com/containerd/cgroups/cpuacct.go b/vendor/github.com/containerd/cgroups/cpuacct.go
deleted file mode 100644
index 42a490a87..000000000
--- a/vendor/github.com/containerd/cgroups/cpuacct.go
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "fmt"
- "io/ioutil"
- "path/filepath"
- "strconv"
- "strings"
-)
-
-const nanosecondsInSecond = 1000000000
-
-var clockTicks = getClockTicks()
-
-func NewCpuacct(root string) *cpuacctController {
- return &cpuacctController{
- root: filepath.Join(root, string(Cpuacct)),
- }
-}
-
-type cpuacctController struct {
- root string
-}
-
-func (c *cpuacctController) Name() Name {
- return Cpuacct
-}
-
-func (c *cpuacctController) Path(path string) string {
- return filepath.Join(c.root, path)
-}
-
-func (c *cpuacctController) Stat(path string, stats *Metrics) error {
- user, kernel, err := c.getUsage(path)
- if err != nil {
- return err
- }
- total, err := readUint(filepath.Join(c.Path(path), "cpuacct.usage"))
- if err != nil {
- return err
- }
- percpu, err := c.percpuUsage(path)
- if err != nil {
- return err
- }
- stats.CPU.Usage.Total = total
- stats.CPU.Usage.User = user
- stats.CPU.Usage.Kernel = kernel
- stats.CPU.Usage.PerCPU = percpu
- return nil
-}
-
-func (c *cpuacctController) percpuUsage(path string) ([]uint64, error) {
- var usage []uint64
- data, err := ioutil.ReadFile(filepath.Join(c.Path(path), "cpuacct.usage_percpu"))
- if err != nil {
- return nil, err
- }
- for _, v := range strings.Fields(string(data)) {
- u, err := strconv.ParseUint(v, 10, 64)
- if err != nil {
- return nil, err
- }
- usage = append(usage, u)
- }
- return usage, nil
-}
-
-func (c *cpuacctController) getUsage(path string) (user uint64, kernel uint64, err error) {
- statPath := filepath.Join(c.Path(path), "cpuacct.stat")
- data, err := ioutil.ReadFile(statPath)
- if err != nil {
- return 0, 0, err
- }
- fields := strings.Fields(string(data))
- if len(fields) != 4 {
- return 0, 0, fmt.Errorf("%q is expected to have 4 fields", statPath)
- }
- for _, t := range []struct {
- index int
- name string
- value *uint64
- }{
- {
- index: 0,
- name: "user",
- value: &user,
- },
- {
- index: 2,
- name: "system",
- value: &kernel,
- },
- } {
- if fields[t.index] != t.name {
- return 0, 0, fmt.Errorf("expected field %q but found %q in %q", t.name, fields[t.index], statPath)
- }
- v, err := strconv.ParseUint(fields[t.index+1], 10, 64)
- if err != nil {
- return 0, 0, err
- }
- *t.value = v
- }
- return (user * nanosecondsInSecond) / clockTicks, (kernel * nanosecondsInSecond) / clockTicks, nil
-}
diff --git a/vendor/github.com/containerd/cgroups/cpuset.go b/vendor/github.com/containerd/cgroups/cpuset.go
deleted file mode 100644
index 30208515e..000000000
--- a/vendor/github.com/containerd/cgroups/cpuset.go
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-func NewCputset(root string) *cpusetController {
- return &cpusetController{
- root: filepath.Join(root, string(Cpuset)),
- }
-}
-
-type cpusetController struct {
- root string
-}
-
-func (c *cpusetController) Name() Name {
- return Cpuset
-}
-
-func (c *cpusetController) Path(path string) string {
- return filepath.Join(c.root, path)
-}
-
-func (c *cpusetController) Create(path string, resources *specs.LinuxResources) error {
- if err := c.ensureParent(c.Path(path), c.root); err != nil {
- return err
- }
- if err := os.MkdirAll(c.Path(path), defaultDirPerm); err != nil {
- return err
- }
- if err := c.copyIfNeeded(c.Path(path), filepath.Dir(c.Path(path))); err != nil {
- return err
- }
- if resources.CPU != nil {
- for _, t := range []struct {
- name string
- value string
- }{
- {
- name: "cpus",
- value: resources.CPU.Cpus,
- },
- {
- name: "mems",
- value: resources.CPU.Mems,
- },
- } {
- if t.value != "" {
- if err := ioutil.WriteFile(
- filepath.Join(c.Path(path), fmt.Sprintf("cpuset.%s", t.name)),
- []byte(t.value),
- defaultFilePerm,
- ); err != nil {
- return err
- }
- }
- }
- }
- return nil
-}
-
-func (c *cpusetController) Update(path string, resources *specs.LinuxResources) error {
- return c.Create(path, resources)
-}
-
-func (c *cpusetController) getValues(path string) (cpus []byte, mems []byte, err error) {
- if cpus, err = ioutil.ReadFile(filepath.Join(path, "cpuset.cpus")); err != nil && !os.IsNotExist(err) {
- return
- }
- if mems, err = ioutil.ReadFile(filepath.Join(path, "cpuset.mems")); err != nil && !os.IsNotExist(err) {
- return
- }
- return cpus, mems, nil
-}
-
-// ensureParent makes sure that the parent directory of current is created
-// and populated with the proper cpus and mems files copied from
-// it's parent.
-func (c *cpusetController) ensureParent(current, root string) error {
- parent := filepath.Dir(current)
- if _, err := filepath.Rel(root, parent); err != nil {
- return nil
- }
- // Avoid infinite recursion.
- if parent == current {
- return fmt.Errorf("cpuset: cgroup parent path outside cgroup root")
- }
- if cleanPath(parent) != root {
- if err := c.ensureParent(parent, root); err != nil {
- return err
- }
- }
- if err := os.MkdirAll(current, defaultDirPerm); err != nil {
- return err
- }
- return c.copyIfNeeded(current, parent)
-}
-
-// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent
-// directory to the current directory if the file's contents are 0
-func (c *cpusetController) copyIfNeeded(current, parent string) error {
- var (
- err error
- currentCpus, currentMems []byte
- parentCpus, parentMems []byte
- )
- if currentCpus, currentMems, err = c.getValues(current); err != nil {
- return err
- }
- if parentCpus, parentMems, err = c.getValues(parent); err != nil {
- return err
- }
- if isEmpty(currentCpus) {
- if err := ioutil.WriteFile(
- filepath.Join(current, "cpuset.cpus"),
- parentCpus,
- defaultFilePerm,
- ); err != nil {
- return err
- }
- }
- if isEmpty(currentMems) {
- if err := ioutil.WriteFile(
- filepath.Join(current, "cpuset.mems"),
- parentMems,
- defaultFilePerm,
- ); err != nil {
- return err
- }
- }
- return nil
-}
-
-func isEmpty(b []byte) bool {
- return len(bytes.Trim(b, "\n")) == 0
-}
diff --git a/vendor/github.com/containerd/cgroups/devices.go b/vendor/github.com/containerd/cgroups/devices.go
deleted file mode 100644
index f6a3b1947..000000000
--- a/vendor/github.com/containerd/cgroups/devices.go
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-const (
- allowDeviceFile = "devices.allow"
- denyDeviceFile = "devices.deny"
- wildcard = -1
-)
-
-func NewDevices(root string) *devicesController {
- return &devicesController{
- root: filepath.Join(root, string(Devices)),
- }
-}
-
-type devicesController struct {
- root string
-}
-
-func (d *devicesController) Name() Name {
- return Devices
-}
-
-func (d *devicesController) Path(path string) string {
- return filepath.Join(d.root, path)
-}
-
-func (d *devicesController) Create(path string, resources *specs.LinuxResources) error {
- if err := os.MkdirAll(d.Path(path), defaultDirPerm); err != nil {
- return err
- }
- for _, device := range resources.Devices {
- file := denyDeviceFile
- if device.Allow {
- file = allowDeviceFile
- }
- if device.Type == "" {
- device.Type = "a"
- }
- if err := ioutil.WriteFile(
- filepath.Join(d.Path(path), file),
- []byte(deviceString(device)),
- defaultFilePerm,
- ); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (d *devicesController) Update(path string, resources *specs.LinuxResources) error {
- return d.Create(path, resources)
-}
-
-func deviceString(device specs.LinuxDeviceCgroup) string {
- return fmt.Sprintf("%s %s:%s %s",
- device.Type,
- deviceNumber(device.Major),
- deviceNumber(device.Minor),
- device.Access,
- )
-}
-
-func deviceNumber(number *int64) string {
- if number == nil || *number == wildcard {
- return "*"
- }
- return fmt.Sprint(*number)
-}
diff --git a/vendor/github.com/containerd/cgroups/errors.go b/vendor/github.com/containerd/cgroups/errors.go
deleted file mode 100644
index f1ad8315c..000000000
--- a/vendor/github.com/containerd/cgroups/errors.go
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "errors"
- "os"
-)
-
-var (
- ErrInvalidPid = errors.New("cgroups: pid must be greater than 0")
- ErrMountPointNotExist = errors.New("cgroups: cgroup mountpoint does not exist")
- ErrInvalidFormat = errors.New("cgroups: parsing file with invalid format failed")
- ErrFreezerNotSupported = errors.New("cgroups: freezer cgroup not supported on this system")
- ErrMemoryNotSupported = errors.New("cgroups: memory cgroup not supported on this system")
- ErrCgroupDeleted = errors.New("cgroups: cgroup deleted")
- ErrNoCgroupMountDestination = errors.New("cgroups: cannot find cgroup mount destination")
-)
-
-// ErrorHandler is a function that handles and acts on errors
-type ErrorHandler func(err error) error
-
-// IgnoreNotExist ignores any errors that are for not existing files
-func IgnoreNotExist(err error) error {
- if os.IsNotExist(err) {
- return nil
- }
- return err
-}
-
-func errPassthrough(err error) error {
- return err
-}
diff --git a/vendor/github.com/containerd/cgroups/freezer.go b/vendor/github.com/containerd/cgroups/freezer.go
deleted file mode 100644
index 5e668408a..000000000
--- a/vendor/github.com/containerd/cgroups/freezer.go
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "io/ioutil"
- "path/filepath"
- "strings"
- "time"
-)
-
-func NewFreezer(root string) *freezerController {
- return &freezerController{
- root: filepath.Join(root, string(Freezer)),
- }
-}
-
-type freezerController struct {
- root string
-}
-
-func (f *freezerController) Name() Name {
- return Freezer
-}
-
-func (f *freezerController) Path(path string) string {
- return filepath.Join(f.root, path)
-}
-
-func (f *freezerController) Freeze(path string) error {
- return f.waitState(path, Frozen)
-}
-
-func (f *freezerController) Thaw(path string) error {
- return f.waitState(path, Thawed)
-}
-
-func (f *freezerController) changeState(path string, state State) error {
- return ioutil.WriteFile(
- filepath.Join(f.root, path, "freezer.state"),
- []byte(strings.ToUpper(string(state))),
- defaultFilePerm,
- )
-}
-
-func (f *freezerController) state(path string) (State, error) {
- current, err := ioutil.ReadFile(filepath.Join(f.root, path, "freezer.state"))
- if err != nil {
- return "", err
- }
- return State(strings.ToLower(strings.TrimSpace(string(current)))), nil
-}
-
-func (f *freezerController) waitState(path string, state State) error {
- for {
- if err := f.changeState(path, state); err != nil {
- return err
- }
- current, err := f.state(path)
- if err != nil {
- return err
- }
- if current == state {
- return nil
- }
- time.Sleep(1 * time.Millisecond)
- }
-}
diff --git a/vendor/github.com/containerd/cgroups/hierarchy.go b/vendor/github.com/containerd/cgroups/hierarchy.go
deleted file mode 100644
index 9221bf3f1..000000000
--- a/vendor/github.com/containerd/cgroups/hierarchy.go
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-// Hierarchy enableds both unified and split hierarchy for cgroups
-type Hierarchy func() ([]Subsystem, error)
diff --git a/vendor/github.com/containerd/cgroups/hugetlb.go b/vendor/github.com/containerd/cgroups/hugetlb.go
deleted file mode 100644
index 3718706d7..000000000
--- a/vendor/github.com/containerd/cgroups/hugetlb.go
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
- "strconv"
- "strings"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-func NewHugetlb(root string) (*hugetlbController, error) {
- sizes, err := hugePageSizes()
- if err != nil {
- return nil, err
- }
-
- return &hugetlbController{
- root: filepath.Join(root, string(Hugetlb)),
- sizes: sizes,
- }, nil
-}
-
-type hugetlbController struct {
- root string
- sizes []string
-}
-
-func (h *hugetlbController) Name() Name {
- return Hugetlb
-}
-
-func (h *hugetlbController) Path(path string) string {
- return filepath.Join(h.root, path)
-}
-
-func (h *hugetlbController) Create(path string, resources *specs.LinuxResources) error {
- if err := os.MkdirAll(h.Path(path), defaultDirPerm); err != nil {
- return err
- }
- for _, limit := range resources.HugepageLimits {
- if err := ioutil.WriteFile(
- filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", limit.Pagesize, "limit_in_bytes"}, ".")),
- []byte(strconv.FormatUint(limit.Limit, 10)),
- defaultFilePerm,
- ); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (h *hugetlbController) Stat(path string, stats *Metrics) error {
- for _, size := range h.sizes {
- s, err := h.readSizeStat(path, size)
- if err != nil {
- return err
- }
- stats.Hugetlb = append(stats.Hugetlb, s)
- }
- return nil
-}
-
-func (h *hugetlbController) readSizeStat(path, size string) (*HugetlbStat, error) {
- s := HugetlbStat{
- Pagesize: size,
- }
- for _, t := range []struct {
- name string
- value *uint64
- }{
- {
- name: "usage_in_bytes",
- value: &s.Usage,
- },
- {
- name: "max_usage_in_bytes",
- value: &s.Max,
- },
- {
- name: "failcnt",
- value: &s.Failcnt,
- },
- } {
- v, err := readUint(filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", size, t.name}, ".")))
- if err != nil {
- return nil, err
- }
- *t.value = v
- }
- return &s, nil
-}
diff --git a/vendor/github.com/containerd/cgroups/memory.go b/vendor/github.com/containerd/cgroups/memory.go
deleted file mode 100644
index ce15ca2b9..000000000
--- a/vendor/github.com/containerd/cgroups/memory.go
+++ /dev/null
@@ -1,325 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "bufio"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "syscall"
-
- "golang.org/x/sys/unix"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-func NewMemory(root string) *memoryController {
- return &memoryController{
- root: filepath.Join(root, string(Memory)),
- }
-}
-
-type memoryController struct {
- root string
-}
-
-func (m *memoryController) Name() Name {
- return Memory
-}
-
-func (m *memoryController) Path(path string) string {
- return filepath.Join(m.root, path)
-}
-
-func (m *memoryController) Create(path string, resources *specs.LinuxResources) error {
- if err := os.MkdirAll(m.Path(path), defaultDirPerm); err != nil {
- return err
- }
- if resources.Memory == nil {
- return nil
- }
- if resources.Memory.Kernel != nil {
- // Check if kernel memory is enabled
- // We have to limit the kernel memory here as it won't be accounted at all
- // until a limit is set on the cgroup and limit cannot be set once the
- // cgroup has children, or if there are already tasks in the cgroup.
- for _, i := range []int64{1, -1} {
- if err := ioutil.WriteFile(
- filepath.Join(m.Path(path), "memory.kmem.limit_in_bytes"),
- []byte(strconv.FormatInt(i, 10)),
- defaultFilePerm,
- ); err != nil {
- return checkEBUSY(err)
- }
- }
- }
- return m.set(path, getMemorySettings(resources))
-}
-
-func (m *memoryController) Update(path string, resources *specs.LinuxResources) error {
- if resources.Memory == nil {
- return nil
- }
- g := func(v *int64) bool {
- return v != nil && *v > 0
- }
- settings := getMemorySettings(resources)
- if g(resources.Memory.Limit) && g(resources.Memory.Swap) {
- // if the updated swap value is larger than the current memory limit set the swap changes first
- // then set the memory limit as swap must always be larger than the current limit
- current, err := readUint(filepath.Join(m.Path(path), "memory.limit_in_bytes"))
- if err != nil {
- return err
- }
- if current < uint64(*resources.Memory.Swap) {
- settings[0], settings[1] = settings[1], settings[0]
- }
- }
- return m.set(path, settings)
-}
-
-func (m *memoryController) Stat(path string, stats *Metrics) error {
- f, err := os.Open(filepath.Join(m.Path(path), "memory.stat"))
- if err != nil {
- return err
- }
- defer f.Close()
- stats.Memory = &MemoryStat{
- Usage: &MemoryEntry{},
- Swap: &MemoryEntry{},
- Kernel: &MemoryEntry{},
- KernelTCP: &MemoryEntry{},
- }
- if err := m.parseStats(f, stats.Memory); err != nil {
- return err
- }
- for _, t := range []struct {
- module string
- entry *MemoryEntry
- }{
- {
- module: "",
- entry: stats.Memory.Usage,
- },
- {
- module: "memsw",
- entry: stats.Memory.Swap,
- },
- {
- module: "kmem",
- entry: stats.Memory.Kernel,
- },
- {
- module: "kmem.tcp",
- entry: stats.Memory.KernelTCP,
- },
- } {
- for _, tt := range []struct {
- name string
- value *uint64
- }{
- {
- name: "usage_in_bytes",
- value: &t.entry.Usage,
- },
- {
- name: "max_usage_in_bytes",
- value: &t.entry.Max,
- },
- {
- name: "failcnt",
- value: &t.entry.Failcnt,
- },
- {
- name: "limit_in_bytes",
- value: &t.entry.Limit,
- },
- } {
- parts := []string{"memory"}
- if t.module != "" {
- parts = append(parts, t.module)
- }
- parts = append(parts, tt.name)
- v, err := readUint(filepath.Join(m.Path(path), strings.Join(parts, ".")))
- if err != nil {
- return err
- }
- *tt.value = v
- }
- }
- return nil
-}
-
-func (m *memoryController) OOMEventFD(path string) (uintptr, error) {
- root := m.Path(path)
- f, err := os.Open(filepath.Join(root, "memory.oom_control"))
- if err != nil {
- return 0, err
- }
- defer f.Close()
- fd, _, serr := unix.RawSyscall(unix.SYS_EVENTFD2, 0, unix.EFD_CLOEXEC, 0)
- if serr != 0 {
- return 0, serr
- }
- if err := writeEventFD(root, f.Fd(), fd); err != nil {
- unix.Close(int(fd))
- return 0, err
- }
- return fd, nil
-}
-
-func writeEventFD(root string, cfd, efd uintptr) error {
- f, err := os.OpenFile(filepath.Join(root, "cgroup.event_control"), os.O_WRONLY, 0)
- if err != nil {
- return err
- }
- _, err = f.WriteString(fmt.Sprintf("%d %d", efd, cfd))
- f.Close()
- return err
-}
-
-func (m *memoryController) parseStats(r io.Reader, stat *MemoryStat) error {
- var (
- raw = make(map[string]uint64)
- sc = bufio.NewScanner(r)
- line int
- )
- for sc.Scan() {
- if err := sc.Err(); err != nil {
- return err
- }
- key, v, err := parseKV(sc.Text())
- if err != nil {
- return fmt.Errorf("%d: %v", line, err)
- }
- raw[key] = v
- line++
- }
- stat.Cache = raw["cache"]
- stat.RSS = raw["rss"]
- stat.RSSHuge = raw["rss_huge"]
- stat.MappedFile = raw["mapped_file"]
- stat.Dirty = raw["dirty"]
- stat.Writeback = raw["writeback"]
- stat.PgPgIn = raw["pgpgin"]
- stat.PgPgOut = raw["pgpgout"]
- stat.PgFault = raw["pgfault"]
- stat.PgMajFault = raw["pgmajfault"]
- stat.InactiveAnon = raw["inactive_anon"]
- stat.ActiveAnon = raw["active_anon"]
- stat.InactiveFile = raw["inactive_file"]
- stat.ActiveFile = raw["active_file"]
- stat.Unevictable = raw["unevictable"]
- stat.HierarchicalMemoryLimit = raw["hierarchical_memory_limit"]
- stat.HierarchicalSwapLimit = raw["hierarchical_memsw_limit"]
- stat.TotalCache = raw["total_cache"]
- stat.TotalRSS = raw["total_rss"]
- stat.TotalRSSHuge = raw["total_rss_huge"]
- stat.TotalMappedFile = raw["total_mapped_file"]
- stat.TotalDirty = raw["total_dirty"]
- stat.TotalWriteback = raw["total_writeback"]
- stat.TotalPgPgIn = raw["total_pgpgin"]
- stat.TotalPgPgOut = raw["total_pgpgout"]
- stat.TotalPgFault = raw["total_pgfault"]
- stat.TotalPgMajFault = raw["total_pgmajfault"]
- stat.TotalInactiveAnon = raw["total_inactive_anon"]
- stat.TotalActiveAnon = raw["total_active_anon"]
- stat.TotalInactiveFile = raw["total_inactive_file"]
- stat.TotalActiveFile = raw["total_active_file"]
- stat.TotalUnevictable = raw["total_unevictable"]
- return nil
-}
-
-func (m *memoryController) set(path string, settings []memorySettings) error {
- for _, t := range settings {
- if t.value != nil {
- if err := ioutil.WriteFile(
- filepath.Join(m.Path(path), fmt.Sprintf("memory.%s", t.name)),
- []byte(strconv.FormatInt(*t.value, 10)),
- defaultFilePerm,
- ); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-type memorySettings struct {
- name string
- value *int64
-}
-
-func getMemorySettings(resources *specs.LinuxResources) []memorySettings {
- mem := resources.Memory
- var swappiness *int64
- if mem.Swappiness != nil {
- v := int64(*mem.Swappiness)
- swappiness = &v
- }
- return []memorySettings{
- {
- name: "limit_in_bytes",
- value: mem.Limit,
- },
- {
- name: "memsw.limit_in_bytes",
- value: mem.Swap,
- },
- {
- name: "kmem.limit_in_bytes",
- value: mem.Kernel,
- },
- {
- name: "kmem.tcp.limit_in_bytes",
- value: mem.KernelTCP,
- },
- {
- name: "oom_control",
- value: getOomControlValue(mem),
- },
- {
- name: "swappiness",
- value: swappiness,
- },
- }
-}
-
-func checkEBUSY(err error) error {
- if pathErr, ok := err.(*os.PathError); ok {
- if errNo, ok := pathErr.Err.(syscall.Errno); ok {
- if errNo == unix.EBUSY {
- return fmt.Errorf(
- "failed to set memory.kmem.limit_in_bytes, because either tasks have already joined this cgroup or it has children")
- }
- }
- }
- return err
-}
-
-func getOomControlValue(mem *specs.LinuxMemory) *int64 {
- if mem.DisableOOMKiller != nil && *mem.DisableOOMKiller {
- i := int64(1)
- return &i
- }
- return nil
-}
diff --git a/vendor/github.com/containerd/cgroups/metrics.pb.go b/vendor/github.com/containerd/cgroups/metrics.pb.go
deleted file mode 100644
index 6043a8f7d..000000000
--- a/vendor/github.com/containerd/cgroups/metrics.pb.go
+++ /dev/null
@@ -1,4288 +0,0 @@
-// Code generated by protoc-gen-gogo.
-// source: github.com/containerd/cgroups/metrics.proto
-// DO NOT EDIT!
-
-/*
- Package cgroups is a generated protocol buffer package.
-
- It is generated from these files:
- github.com/containerd/cgroups/metrics.proto
-
- It has these top-level messages:
- Metrics
- HugetlbStat
- PidsStat
- CPUStat
- CPUUsage
- Throttle
- MemoryStat
- MemoryEntry
- BlkIOStat
- BlkIOEntry
- RdmaStat
- RdmaEntry
-*/
-package cgroups
-
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
-
-type Metrics struct {
- Hugetlb []*HugetlbStat `protobuf:"bytes,1,rep,name=hugetlb" json:"hugetlb,omitempty"`
- Pids *PidsStat `protobuf:"bytes,2,opt,name=pids" json:"pids,omitempty"`
- CPU *CPUStat `protobuf:"bytes,3,opt,name=cpu" json:"cpu,omitempty"`
- Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory" json:"memory,omitempty"`
- Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio" json:"blkio,omitempty"`
- Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma" json:"rdma,omitempty"`
-}
-
-func (m *Metrics) Reset() { *m = Metrics{} }
-func (*Metrics) ProtoMessage() {}
-func (*Metrics) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{0} }
-
-type HugetlbStat struct {
- Usage uint64 `protobuf:"varint,1,opt,name=usage,proto3" json:"usage,omitempty"`
- Max uint64 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"`
- Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt,proto3" json:"failcnt,omitempty"`
- Pagesize string `protobuf:"bytes,4,opt,name=pagesize,proto3" json:"pagesize,omitempty"`
-}
-
-func (m *HugetlbStat) Reset() { *m = HugetlbStat{} }
-func (*HugetlbStat) ProtoMessage() {}
-func (*HugetlbStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{1} }
-
-type PidsStat struct {
- Current uint64 `protobuf:"varint,1,opt,name=current,proto3" json:"current,omitempty"`
- Limit uint64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
-}
-
-func (m *PidsStat) Reset() { *m = PidsStat{} }
-func (*PidsStat) ProtoMessage() {}
-func (*PidsStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{2} }
-
-type CPUStat struct {
- Usage *CPUUsage `protobuf:"bytes,1,opt,name=usage" json:"usage,omitempty"`
- Throttling *Throttle `protobuf:"bytes,2,opt,name=throttling" json:"throttling,omitempty"`
-}
-
-func (m *CPUStat) Reset() { *m = CPUStat{} }
-func (*CPUStat) ProtoMessage() {}
-func (*CPUStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{3} }
-
-type CPUUsage struct {
- // values in nanoseconds
- Total uint64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"`
- Kernel uint64 `protobuf:"varint,2,opt,name=kernel,proto3" json:"kernel,omitempty"`
- User uint64 `protobuf:"varint,3,opt,name=user,proto3" json:"user,omitempty"`
- PerCPU []uint64 `protobuf:"varint,4,rep,packed,name=per_cpu,json=perCpu" json:"per_cpu,omitempty"`
-}
-
-func (m *CPUUsage) Reset() { *m = CPUUsage{} }
-func (*CPUUsage) ProtoMessage() {}
-func (*CPUUsage) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{4} }
-
-type Throttle struct {
- Periods uint64 `protobuf:"varint,1,opt,name=periods,proto3" json:"periods,omitempty"`
- ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods,proto3" json:"throttled_periods,omitempty"`
- ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime,proto3" json:"throttled_time,omitempty"`
-}
-
-func (m *Throttle) Reset() { *m = Throttle{} }
-func (*Throttle) ProtoMessage() {}
-func (*Throttle) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{5} }
-
-type MemoryStat struct {
- Cache uint64 `protobuf:"varint,1,opt,name=cache,proto3" json:"cache,omitempty"`
- RSS uint64 `protobuf:"varint,2,opt,name=rss,proto3" json:"rss,omitempty"`
- RSSHuge uint64 `protobuf:"varint,3,opt,name=rss_huge,json=rssHuge,proto3" json:"rss_huge,omitempty"`
- MappedFile uint64 `protobuf:"varint,4,opt,name=mapped_file,json=mappedFile,proto3" json:"mapped_file,omitempty"`
- Dirty uint64 `protobuf:"varint,5,opt,name=dirty,proto3" json:"dirty,omitempty"`
- Writeback uint64 `protobuf:"varint,6,opt,name=writeback,proto3" json:"writeback,omitempty"`
- PgPgIn uint64 `protobuf:"varint,7,opt,name=pg_pg_in,json=pgPgIn,proto3" json:"pg_pg_in,omitempty"`
- PgPgOut uint64 `protobuf:"varint,8,opt,name=pg_pg_out,json=pgPgOut,proto3" json:"pg_pg_out,omitempty"`
- PgFault uint64 `protobuf:"varint,9,opt,name=pg_fault,json=pgFault,proto3" json:"pg_fault,omitempty"`
- PgMajFault uint64 `protobuf:"varint,10,opt,name=pg_maj_fault,json=pgMajFault,proto3" json:"pg_maj_fault,omitempty"`
- InactiveAnon uint64 `protobuf:"varint,11,opt,name=inactive_anon,json=inactiveAnon,proto3" json:"inactive_anon,omitempty"`
- ActiveAnon uint64 `protobuf:"varint,12,opt,name=active_anon,json=activeAnon,proto3" json:"active_anon,omitempty"`
- InactiveFile uint64 `protobuf:"varint,13,opt,name=inactive_file,json=inactiveFile,proto3" json:"inactive_file,omitempty"`
- ActiveFile uint64 `protobuf:"varint,14,opt,name=active_file,json=activeFile,proto3" json:"active_file,omitempty"`
- Unevictable uint64 `protobuf:"varint,15,opt,name=unevictable,proto3" json:"unevictable,omitempty"`
- HierarchicalMemoryLimit uint64 `protobuf:"varint,16,opt,name=hierarchical_memory_limit,json=hierarchicalMemoryLimit,proto3" json:"hierarchical_memory_limit,omitempty"`
- HierarchicalSwapLimit uint64 `protobuf:"varint,17,opt,name=hierarchical_swap_limit,json=hierarchicalSwapLimit,proto3" json:"hierarchical_swap_limit,omitempty"`
- TotalCache uint64 `protobuf:"varint,18,opt,name=total_cache,json=totalCache,proto3" json:"total_cache,omitempty"`
- TotalRSS uint64 `protobuf:"varint,19,opt,name=total_rss,json=totalRss,proto3" json:"total_rss,omitempty"`
- TotalRSSHuge uint64 `protobuf:"varint,20,opt,name=total_rss_huge,json=totalRssHuge,proto3" json:"total_rss_huge,omitempty"`
- TotalMappedFile uint64 `protobuf:"varint,21,opt,name=total_mapped_file,json=totalMappedFile,proto3" json:"total_mapped_file,omitempty"`
- TotalDirty uint64 `protobuf:"varint,22,opt,name=total_dirty,json=totalDirty,proto3" json:"total_dirty,omitempty"`
- TotalWriteback uint64 `protobuf:"varint,23,opt,name=total_writeback,json=totalWriteback,proto3" json:"total_writeback,omitempty"`
- TotalPgPgIn uint64 `protobuf:"varint,24,opt,name=total_pg_pg_in,json=totalPgPgIn,proto3" json:"total_pg_pg_in,omitempty"`
- TotalPgPgOut uint64 `protobuf:"varint,25,opt,name=total_pg_pg_out,json=totalPgPgOut,proto3" json:"total_pg_pg_out,omitempty"`
- TotalPgFault uint64 `protobuf:"varint,26,opt,name=total_pg_fault,json=totalPgFault,proto3" json:"total_pg_fault,omitempty"`
- TotalPgMajFault uint64 `protobuf:"varint,27,opt,name=total_pg_maj_fault,json=totalPgMajFault,proto3" json:"total_pg_maj_fault,omitempty"`
- TotalInactiveAnon uint64 `protobuf:"varint,28,opt,name=total_inactive_anon,json=totalInactiveAnon,proto3" json:"total_inactive_anon,omitempty"`
- TotalActiveAnon uint64 `protobuf:"varint,29,opt,name=total_active_anon,json=totalActiveAnon,proto3" json:"total_active_anon,omitempty"`
- TotalInactiveFile uint64 `protobuf:"varint,30,opt,name=total_inactive_file,json=totalInactiveFile,proto3" json:"total_inactive_file,omitempty"`
- TotalActiveFile uint64 `protobuf:"varint,31,opt,name=total_active_file,json=totalActiveFile,proto3" json:"total_active_file,omitempty"`
- TotalUnevictable uint64 `protobuf:"varint,32,opt,name=total_unevictable,json=totalUnevictable,proto3" json:"total_unevictable,omitempty"`
- Usage *MemoryEntry `protobuf:"bytes,33,opt,name=usage" json:"usage,omitempty"`
- Swap *MemoryEntry `protobuf:"bytes,34,opt,name=swap" json:"swap,omitempty"`
- Kernel *MemoryEntry `protobuf:"bytes,35,opt,name=kernel" json:"kernel,omitempty"`
- KernelTCP *MemoryEntry `protobuf:"bytes,36,opt,name=kernel_tcp,json=kernelTcp" json:"kernel_tcp,omitempty"`
-}
-
-func (m *MemoryStat) Reset() { *m = MemoryStat{} }
-func (*MemoryStat) ProtoMessage() {}
-func (*MemoryStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{6} }
-
-type MemoryEntry struct {
- Limit uint64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"`
- Usage uint64 `protobuf:"varint,2,opt,name=usage,proto3" json:"usage,omitempty"`
- Max uint64 `protobuf:"varint,3,opt,name=max,proto3" json:"max,omitempty"`
- Failcnt uint64 `protobuf:"varint,4,opt,name=failcnt,proto3" json:"failcnt,omitempty"`
-}
-
-func (m *MemoryEntry) Reset() { *m = MemoryEntry{} }
-func (*MemoryEntry) ProtoMessage() {}
-func (*MemoryEntry) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{7} }
-
-type BlkIOStat struct {
- IoServiceBytesRecursive []*BlkIOEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive" json:"io_service_bytes_recursive,omitempty"`
- IoServicedRecursive []*BlkIOEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive" json:"io_serviced_recursive,omitempty"`
- IoQueuedRecursive []*BlkIOEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive" json:"io_queued_recursive,omitempty"`
- IoServiceTimeRecursive []*BlkIOEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive" json:"io_service_time_recursive,omitempty"`
- IoWaitTimeRecursive []*BlkIOEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive" json:"io_wait_time_recursive,omitempty"`
- IoMergedRecursive []*BlkIOEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive" json:"io_merged_recursive,omitempty"`
- IoTimeRecursive []*BlkIOEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive" json:"io_time_recursive,omitempty"`
- SectorsRecursive []*BlkIOEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive" json:"sectors_recursive,omitempty"`
-}
-
-func (m *BlkIOStat) Reset() { *m = BlkIOStat{} }
-func (*BlkIOStat) ProtoMessage() {}
-func (*BlkIOStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{8} }
-
-type BlkIOEntry struct {
- Op string `protobuf:"bytes,1,opt,name=op,proto3" json:"op,omitempty"`
- Device string `protobuf:"bytes,2,opt,name=device,proto3" json:"device,omitempty"`
- Major uint64 `protobuf:"varint,3,opt,name=major,proto3" json:"major,omitempty"`
- Minor uint64 `protobuf:"varint,4,opt,name=minor,proto3" json:"minor,omitempty"`
- Value uint64 `protobuf:"varint,5,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *BlkIOEntry) Reset() { *m = BlkIOEntry{} }
-func (*BlkIOEntry) ProtoMessage() {}
-func (*BlkIOEntry) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{9} }
-
-type RdmaStat struct {
- Current []*RdmaEntry `protobuf:"bytes,1,rep,name=current" json:"current,omitempty"`
- Limit []*RdmaEntry `protobuf:"bytes,2,rep,name=limit" json:"limit,omitempty"`
-}
-
-func (m *RdmaStat) Reset() { *m = RdmaStat{} }
-func (*RdmaStat) ProtoMessage() {}
-func (*RdmaStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{10} }
-
-type RdmaEntry struct {
- Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"`
- HcaHandles uint32 `protobuf:"varint,2,opt,name=hca_handles,json=hcaHandles,proto3" json:"hca_handles,omitempty"`
- HcaObjects uint32 `protobuf:"varint,3,opt,name=hca_objects,json=hcaObjects,proto3" json:"hca_objects,omitempty"`
-}
-
-func (m *RdmaEntry) Reset() { *m = RdmaEntry{} }
-func (*RdmaEntry) ProtoMessage() {}
-func (*RdmaEntry) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{11} }
-
-func init() {
- proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v1.Metrics")
- proto.RegisterType((*HugetlbStat)(nil), "io.containerd.cgroups.v1.HugetlbStat")
- proto.RegisterType((*PidsStat)(nil), "io.containerd.cgroups.v1.PidsStat")
- proto.RegisterType((*CPUStat)(nil), "io.containerd.cgroups.v1.CPUStat")
- proto.RegisterType((*CPUUsage)(nil), "io.containerd.cgroups.v1.CPUUsage")
- proto.RegisterType((*Throttle)(nil), "io.containerd.cgroups.v1.Throttle")
- proto.RegisterType((*MemoryStat)(nil), "io.containerd.cgroups.v1.MemoryStat")
- proto.RegisterType((*MemoryEntry)(nil), "io.containerd.cgroups.v1.MemoryEntry")
- proto.RegisterType((*BlkIOStat)(nil), "io.containerd.cgroups.v1.BlkIOStat")
- proto.RegisterType((*BlkIOEntry)(nil), "io.containerd.cgroups.v1.BlkIOEntry")
- proto.RegisterType((*RdmaStat)(nil), "io.containerd.cgroups.v1.RdmaStat")
- proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v1.RdmaEntry")
-}
-func (m *Metrics) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Metrics) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if len(m.Hugetlb) > 0 {
- for _, msg := range m.Hugetlb {
- dAtA[i] = 0xa
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
- }
- if m.Pids != nil {
- dAtA[i] = 0x12
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Pids.Size()))
- n1, err := m.Pids.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n1
- }
- if m.CPU != nil {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.CPU.Size()))
- n2, err := m.CPU.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n2
- }
- if m.Memory != nil {
- dAtA[i] = 0x22
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Memory.Size()))
- n3, err := m.Memory.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n3
- }
- if m.Blkio != nil {
- dAtA[i] = 0x2a
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Blkio.Size()))
- n4, err := m.Blkio.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n4
- }
- if m.Rdma != nil {
- dAtA[i] = 0x32
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Rdma.Size()))
- n5, err := m.Rdma.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n5
- }
- return i, nil
-}
-
-func (m *HugetlbStat) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *HugetlbStat) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if m.Usage != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Usage))
- }
- if m.Max != 0 {
- dAtA[i] = 0x10
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Max))
- }
- if m.Failcnt != 0 {
- dAtA[i] = 0x18
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt))
- }
- if len(m.Pagesize) > 0 {
- dAtA[i] = 0x22
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.Pagesize)))
- i += copy(dAtA[i:], m.Pagesize)
- }
- return i, nil
-}
-
-func (m *PidsStat) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *PidsStat) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if m.Current != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Current))
- }
- if m.Limit != 0 {
- dAtA[i] = 0x10
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Limit))
- }
- return i, nil
-}
-
-func (m *CPUStat) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *CPUStat) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if m.Usage != nil {
- dAtA[i] = 0xa
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Usage.Size()))
- n5, err := m.Usage.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n5
- }
- if m.Throttling != nil {
- dAtA[i] = 0x12
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Throttling.Size()))
- n6, err := m.Throttling.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n6
- }
- return i, nil
-}
-
-func (m *CPUUsage) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *CPUUsage) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if m.Total != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Total))
- }
- if m.Kernel != 0 {
- dAtA[i] = 0x10
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel))
- }
- if m.User != 0 {
- dAtA[i] = 0x18
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.User))
- }
- if len(m.PerCPU) > 0 {
- dAtA8 := make([]byte, len(m.PerCPU)*10)
- var j7 int
- for _, num := range m.PerCPU {
- for num >= 1<<7 {
- dAtA8[j7] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j7++
- }
- dAtA8[j7] = uint8(num)
- j7++
- }
- dAtA[i] = 0x22
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(j7))
- i += copy(dAtA[i:], dAtA8[:j7])
- }
- return i, nil
-}
-
-func (m *Throttle) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Throttle) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if m.Periods != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Periods))
- }
- if m.ThrottledPeriods != 0 {
- dAtA[i] = 0x10
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledPeriods))
- }
- if m.ThrottledTime != 0 {
- dAtA[i] = 0x18
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledTime))
- }
- return i, nil
-}
-
-func (m *MemoryStat) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if m.Cache != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Cache))
- }
- if m.RSS != 0 {
- dAtA[i] = 0x10
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.RSS))
- }
- if m.RSSHuge != 0 {
- dAtA[i] = 0x18
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.RSSHuge))
- }
- if m.MappedFile != 0 {
- dAtA[i] = 0x20
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.MappedFile))
- }
- if m.Dirty != 0 {
- dAtA[i] = 0x28
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Dirty))
- }
- if m.Writeback != 0 {
- dAtA[i] = 0x30
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Writeback))
- }
- if m.PgPgIn != 0 {
- dAtA[i] = 0x38
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgIn))
- }
- if m.PgPgOut != 0 {
- dAtA[i] = 0x40
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgOut))
- }
- if m.PgFault != 0 {
- dAtA[i] = 0x48
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.PgFault))
- }
- if m.PgMajFault != 0 {
- dAtA[i] = 0x50
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.PgMajFault))
- }
- if m.InactiveAnon != 0 {
- dAtA[i] = 0x58
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveAnon))
- }
- if m.ActiveAnon != 0 {
- dAtA[i] = 0x60
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveAnon))
- }
- if m.InactiveFile != 0 {
- dAtA[i] = 0x68
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveFile))
- }
- if m.ActiveFile != 0 {
- dAtA[i] = 0x70
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveFile))
- }
- if m.Unevictable != 0 {
- dAtA[i] = 0x78
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Unevictable))
- }
- if m.HierarchicalMemoryLimit != 0 {
- dAtA[i] = 0x80
- i++
- dAtA[i] = 0x1
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalMemoryLimit))
- }
- if m.HierarchicalSwapLimit != 0 {
- dAtA[i] = 0x88
- i++
- dAtA[i] = 0x1
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalSwapLimit))
- }
- if m.TotalCache != 0 {
- dAtA[i] = 0x90
- i++
- dAtA[i] = 0x1
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalCache))
- }
- if m.TotalRSS != 0 {
- dAtA[i] = 0x98
- i++
- dAtA[i] = 0x1
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSS))
- }
- if m.TotalRSSHuge != 0 {
- dAtA[i] = 0xa0
- i++
- dAtA[i] = 0x1
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSSHuge))
- }
- if m.TotalMappedFile != 0 {
- dAtA[i] = 0xa8
- i++
- dAtA[i] = 0x1
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalMappedFile))
- }
- if m.TotalDirty != 0 {
- dAtA[i] = 0xb0
- i++
- dAtA[i] = 0x1
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalDirty))
- }
- if m.TotalWriteback != 0 {
- dAtA[i] = 0xb8
- i++
- dAtA[i] = 0x1
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalWriteback))
- }
- if m.TotalPgPgIn != 0 {
- dAtA[i] = 0xc0
- i++
- dAtA[i] = 0x1
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgIn))
- }
- if m.TotalPgPgOut != 0 {
- dAtA[i] = 0xc8
- i++
- dAtA[i] = 0x1
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgOut))
- }
- if m.TotalPgFault != 0 {
- dAtA[i] = 0xd0
- i++
- dAtA[i] = 0x1
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgFault))
- }
- if m.TotalPgMajFault != 0 {
- dAtA[i] = 0xd8
- i++
- dAtA[i] = 0x1
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgMajFault))
- }
- if m.TotalInactiveAnon != 0 {
- dAtA[i] = 0xe0
- i++
- dAtA[i] = 0x1
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveAnon))
- }
- if m.TotalActiveAnon != 0 {
- dAtA[i] = 0xe8
- i++
- dAtA[i] = 0x1
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveAnon))
- }
- if m.TotalInactiveFile != 0 {
- dAtA[i] = 0xf0
- i++
- dAtA[i] = 0x1
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveFile))
- }
- if m.TotalActiveFile != 0 {
- dAtA[i] = 0xf8
- i++
- dAtA[i] = 0x1
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveFile))
- }
- if m.TotalUnevictable != 0 {
- dAtA[i] = 0x80
- i++
- dAtA[i] = 0x2
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.TotalUnevictable))
- }
- if m.Usage != nil {
- dAtA[i] = 0x8a
- i++
- dAtA[i] = 0x2
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Usage.Size()))
- n9, err := m.Usage.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n9
- }
- if m.Swap != nil {
- dAtA[i] = 0x92
- i++
- dAtA[i] = 0x2
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Swap.Size()))
- n10, err := m.Swap.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n10
- }
- if m.Kernel != nil {
- dAtA[i] = 0x9a
- i++
- dAtA[i] = 0x2
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel.Size()))
- n11, err := m.Kernel.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n11
- }
- if m.KernelTCP != nil {
- dAtA[i] = 0xa2
- i++
- dAtA[i] = 0x2
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.KernelTCP.Size()))
- n12, err := m.KernelTCP.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n12
- }
- return i, nil
-}
-
-func (m *MemoryEntry) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemoryEntry) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
-
- if m.Limit != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Limit))
- }
- if m.Usage != 0 {
- dAtA[i] = 0x10
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Usage))
- }
- if m.Max != 0 {
- dAtA[i] = 0x18
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Max))
- }
- if m.Failcnt != 0 {
- dAtA[i] = 0x20
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt))
- }
- return i, nil
-}
-
-func (m *BlkIOStat) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *BlkIOStat) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if len(m.IoServiceBytesRecursive) > 0 {
- for _, msg := range m.IoServiceBytesRecursive {
- dAtA[i] = 0xa
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
- }
- if len(m.IoServicedRecursive) > 0 {
- for _, msg := range m.IoServicedRecursive {
- dAtA[i] = 0x12
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
- }
- if len(m.IoQueuedRecursive) > 0 {
- for _, msg := range m.IoQueuedRecursive {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
- }
- if len(m.IoServiceTimeRecursive) > 0 {
- for _, msg := range m.IoServiceTimeRecursive {
- dAtA[i] = 0x22
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
- }
- if len(m.IoWaitTimeRecursive) > 0 {
- for _, msg := range m.IoWaitTimeRecursive {
- dAtA[i] = 0x2a
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
- }
- if len(m.IoMergedRecursive) > 0 {
- for _, msg := range m.IoMergedRecursive {
- dAtA[i] = 0x32
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
- }
- if len(m.IoTimeRecursive) > 0 {
- for _, msg := range m.IoTimeRecursive {
- dAtA[i] = 0x3a
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
- }
- if len(m.SectorsRecursive) > 0 {
- for _, msg := range m.SectorsRecursive {
- dAtA[i] = 0x42
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
- }
- return i, nil
-}
-
-func (m *BlkIOEntry) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *BlkIOEntry) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if len(m.Op) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.Op)))
- i += copy(dAtA[i:], m.Op)
- }
- if len(m.Device) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device)))
- i += copy(dAtA[i:], m.Device)
- }
- if m.Major != 0 {
- dAtA[i] = 0x18
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Major))
- }
- if m.Minor != 0 {
- dAtA[i] = 0x20
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Minor))
- }
- if m.Value != 0 {
- dAtA[i] = 0x28
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.Value))
- }
- return i, nil
-}
-
-func (m *RdmaStat) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *RdmaStat) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if len(m.Current) > 0 {
- for _, msg := range m.Current {
- dAtA[i] = 0xa
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
- }
- if len(m.Limit) > 0 {
- for _, msg := range m.Limit {
- dAtA[i] = 0x12
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
- }
- return i, nil
-}
-
-func (m *RdmaEntry) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *RdmaEntry) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if len(m.Device) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device)))
- i += copy(dAtA[i:], m.Device)
- }
- if m.HcaHandles != 0 {
- dAtA[i] = 0x10
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.HcaHandles))
- }
- if m.HcaObjects != 0 {
- dAtA[i] = 0x18
- i++
- i = encodeVarintMetrics(dAtA, i, uint64(m.HcaObjects))
- }
- return i, nil
-}
-
-func encodeFixed64Metrics(dAtA []byte, offset int, v uint64) int {
- dAtA[offset] = uint8(v)
- dAtA[offset+1] = uint8(v >> 8)
- dAtA[offset+2] = uint8(v >> 16)
- dAtA[offset+3] = uint8(v >> 24)
- dAtA[offset+4] = uint8(v >> 32)
- dAtA[offset+5] = uint8(v >> 40)
- dAtA[offset+6] = uint8(v >> 48)
- dAtA[offset+7] = uint8(v >> 56)
- return offset + 8
-}
-func encodeFixed32Metrics(dAtA []byte, offset int, v uint32) int {
- dAtA[offset] = uint8(v)
- dAtA[offset+1] = uint8(v >> 8)
- dAtA[offset+2] = uint8(v >> 16)
- dAtA[offset+3] = uint8(v >> 24)
- return offset + 4
-}
-func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return offset + 1
-}
-func (m *Metrics) Size() (n int) {
- var l int
- _ = l
- if len(m.Hugetlb) > 0 {
- for _, e := range m.Hugetlb {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.Pids != nil {
- l = m.Pids.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.CPU != nil {
- l = m.CPU.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.Memory != nil {
- l = m.Memory.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.Blkio != nil {
- l = m.Blkio.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.Rdma != nil {
- l = m.Rdma.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- return n
-}
-
-func (m *HugetlbStat) Size() (n int) {
- var l int
- _ = l
- if m.Usage != 0 {
- n += 1 + sovMetrics(uint64(m.Usage))
- }
- if m.Max != 0 {
- n += 1 + sovMetrics(uint64(m.Max))
- }
- if m.Failcnt != 0 {
- n += 1 + sovMetrics(uint64(m.Failcnt))
- }
- l = len(m.Pagesize)
- if l > 0 {
- n += 1 + l + sovMetrics(uint64(l))
- }
- return n
-}
-
-func (m *PidsStat) Size() (n int) {
- var l int
- _ = l
- if m.Current != 0 {
- n += 1 + sovMetrics(uint64(m.Current))
- }
- if m.Limit != 0 {
- n += 1 + sovMetrics(uint64(m.Limit))
- }
- return n
-}
-
-func (m *CPUStat) Size() (n int) {
- var l int
- _ = l
- if m.Usage != nil {
- l = m.Usage.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.Throttling != nil {
- l = m.Throttling.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- return n
-}
-
-func (m *CPUUsage) Size() (n int) {
- var l int
- _ = l
- if m.Total != 0 {
- n += 1 + sovMetrics(uint64(m.Total))
- }
- if m.Kernel != 0 {
- n += 1 + sovMetrics(uint64(m.Kernel))
- }
- if m.User != 0 {
- n += 1 + sovMetrics(uint64(m.User))
- }
- if len(m.PerCPU) > 0 {
- l = 0
- for _, e := range m.PerCPU {
- l += sovMetrics(uint64(e))
- }
- n += 1 + sovMetrics(uint64(l)) + l
- }
- return n
-}
-
-func (m *Throttle) Size() (n int) {
- var l int
- _ = l
- if m.Periods != 0 {
- n += 1 + sovMetrics(uint64(m.Periods))
- }
- if m.ThrottledPeriods != 0 {
- n += 1 + sovMetrics(uint64(m.ThrottledPeriods))
- }
- if m.ThrottledTime != 0 {
- n += 1 + sovMetrics(uint64(m.ThrottledTime))
- }
- return n
-}
-
-func (m *MemoryStat) Size() (n int) {
- var l int
- _ = l
- if m.Cache != 0 {
- n += 1 + sovMetrics(uint64(m.Cache))
- }
- if m.RSS != 0 {
- n += 1 + sovMetrics(uint64(m.RSS))
- }
- if m.RSSHuge != 0 {
- n += 1 + sovMetrics(uint64(m.RSSHuge))
- }
- if m.MappedFile != 0 {
- n += 1 + sovMetrics(uint64(m.MappedFile))
- }
- if m.Dirty != 0 {
- n += 1 + sovMetrics(uint64(m.Dirty))
- }
- if m.Writeback != 0 {
- n += 1 + sovMetrics(uint64(m.Writeback))
- }
- if m.PgPgIn != 0 {
- n += 1 + sovMetrics(uint64(m.PgPgIn))
- }
- if m.PgPgOut != 0 {
- n += 1 + sovMetrics(uint64(m.PgPgOut))
- }
- if m.PgFault != 0 {
- n += 1 + sovMetrics(uint64(m.PgFault))
- }
- if m.PgMajFault != 0 {
- n += 1 + sovMetrics(uint64(m.PgMajFault))
- }
- if m.InactiveAnon != 0 {
- n += 1 + sovMetrics(uint64(m.InactiveAnon))
- }
- if m.ActiveAnon != 0 {
- n += 1 + sovMetrics(uint64(m.ActiveAnon))
- }
- if m.InactiveFile != 0 {
- n += 1 + sovMetrics(uint64(m.InactiveFile))
- }
- if m.ActiveFile != 0 {
- n += 1 + sovMetrics(uint64(m.ActiveFile))
- }
- if m.Unevictable != 0 {
- n += 1 + sovMetrics(uint64(m.Unevictable))
- }
- if m.HierarchicalMemoryLimit != 0 {
- n += 2 + sovMetrics(uint64(m.HierarchicalMemoryLimit))
- }
- if m.HierarchicalSwapLimit != 0 {
- n += 2 + sovMetrics(uint64(m.HierarchicalSwapLimit))
- }
- if m.TotalCache != 0 {
- n += 2 + sovMetrics(uint64(m.TotalCache))
- }
- if m.TotalRSS != 0 {
- n += 2 + sovMetrics(uint64(m.TotalRSS))
- }
- if m.TotalRSSHuge != 0 {
- n += 2 + sovMetrics(uint64(m.TotalRSSHuge))
- }
- if m.TotalMappedFile != 0 {
- n += 2 + sovMetrics(uint64(m.TotalMappedFile))
- }
- if m.TotalDirty != 0 {
- n += 2 + sovMetrics(uint64(m.TotalDirty))
- }
- if m.TotalWriteback != 0 {
- n += 2 + sovMetrics(uint64(m.TotalWriteback))
- }
- if m.TotalPgPgIn != 0 {
- n += 2 + sovMetrics(uint64(m.TotalPgPgIn))
- }
- if m.TotalPgPgOut != 0 {
- n += 2 + sovMetrics(uint64(m.TotalPgPgOut))
- }
- if m.TotalPgFault != 0 {
- n += 2 + sovMetrics(uint64(m.TotalPgFault))
- }
- if m.TotalPgMajFault != 0 {
- n += 2 + sovMetrics(uint64(m.TotalPgMajFault))
- }
- if m.TotalInactiveAnon != 0 {
- n += 2 + sovMetrics(uint64(m.TotalInactiveAnon))
- }
- if m.TotalActiveAnon != 0 {
- n += 2 + sovMetrics(uint64(m.TotalActiveAnon))
- }
- if m.TotalInactiveFile != 0 {
- n += 2 + sovMetrics(uint64(m.TotalInactiveFile))
- }
- if m.TotalActiveFile != 0 {
- n += 2 + sovMetrics(uint64(m.TotalActiveFile))
- }
- if m.TotalUnevictable != 0 {
- n += 2 + sovMetrics(uint64(m.TotalUnevictable))
- }
- if m.Usage != nil {
- l = m.Usage.Size()
- n += 2 + l + sovMetrics(uint64(l))
- }
- if m.Swap != nil {
- l = m.Swap.Size()
- n += 2 + l + sovMetrics(uint64(l))
- }
- if m.Kernel != nil {
- l = m.Kernel.Size()
- n += 2 + l + sovMetrics(uint64(l))
- }
- if m.KernelTCP != nil {
- l = m.KernelTCP.Size()
- n += 2 + l + sovMetrics(uint64(l))
- }
- return n
-}
-
-func (m *MemoryEntry) Size() (n int) {
- var l int
- _ = l
- if m.Limit != 0 {
- n += 1 + sovMetrics(uint64(m.Limit))
- }
- if m.Usage != 0 {
- n += 1 + sovMetrics(uint64(m.Usage))
- }
- if m.Max != 0 {
- n += 1 + sovMetrics(uint64(m.Max))
- }
- if m.Failcnt != 0 {
- n += 1 + sovMetrics(uint64(m.Failcnt))
- }
- return n
-}
-
-func (m *BlkIOStat) Size() (n int) {
- var l int
- _ = l
- if len(m.IoServiceBytesRecursive) > 0 {
- for _, e := range m.IoServiceBytesRecursive {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.IoServicedRecursive) > 0 {
- for _, e := range m.IoServicedRecursive {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.IoQueuedRecursive) > 0 {
- for _, e := range m.IoQueuedRecursive {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.IoServiceTimeRecursive) > 0 {
- for _, e := range m.IoServiceTimeRecursive {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.IoWaitTimeRecursive) > 0 {
- for _, e := range m.IoWaitTimeRecursive {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.IoMergedRecursive) > 0 {
- for _, e := range m.IoMergedRecursive {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.IoTimeRecursive) > 0 {
- for _, e := range m.IoTimeRecursive {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.SectorsRecursive) > 0 {
- for _, e := range m.SectorsRecursive {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- return n
-}
-
-func (m *BlkIOEntry) Size() (n int) {
- var l int
- _ = l
- l = len(m.Op)
- if l > 0 {
- n += 1 + l + sovMetrics(uint64(l))
- }
- l = len(m.Device)
- if l > 0 {
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.Major != 0 {
- n += 1 + sovMetrics(uint64(m.Major))
- }
- if m.Minor != 0 {
- n += 1 + sovMetrics(uint64(m.Minor))
- }
- if m.Value != 0 {
- n += 1 + sovMetrics(uint64(m.Value))
- }
- return n
-}
-
-func (m *RdmaStat) Size() (n int) {
- var l int
- _ = l
- if len(m.Current) > 0 {
- for _, e := range m.Current {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.Limit) > 0 {
- for _, e := range m.Limit {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- return n
-}
-
-func (m *RdmaEntry) Size() (n int) {
- var l int
- _ = l
- l = len(m.Device)
- if l > 0 {
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.HcaHandles != 0 {
- n += 1 + sovMetrics(uint64(m.HcaHandles))
- }
- if m.HcaObjects != 0 {
- n += 1 + sovMetrics(uint64(m.HcaObjects))
- }
- return n
-}
-
-func sovMetrics(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
-}
-func sozMetrics(x uint64) (n int) {
- return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (this *Metrics) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&Metrics{`,
- `Hugetlb:` + strings.Replace(fmt.Sprintf("%v", this.Hugetlb), "HugetlbStat", "HugetlbStat", 1) + `,`,
- `Pids:` + strings.Replace(fmt.Sprintf("%v", this.Pids), "PidsStat", "PidsStat", 1) + `,`,
- `CPU:` + strings.Replace(fmt.Sprintf("%v", this.CPU), "CPUStat", "CPUStat", 1) + `,`,
- `Memory:` + strings.Replace(fmt.Sprintf("%v", this.Memory), "MemoryStat", "MemoryStat", 1) + `,`,
- `Blkio:` + strings.Replace(fmt.Sprintf("%v", this.Blkio), "BlkIOStat", "BlkIOStat", 1) + `,`,
- `Rdma:` + strings.Replace(fmt.Sprintf("%v", this.Rdma), "RdmaStat", "RdmaStat", 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *HugetlbStat) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&HugetlbStat{`,
- `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`,
- `Max:` + fmt.Sprintf("%v", this.Max) + `,`,
- `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`,
- `Pagesize:` + fmt.Sprintf("%v", this.Pagesize) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *PidsStat) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&PidsStat{`,
- `Current:` + fmt.Sprintf("%v", this.Current) + `,`,
- `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *CPUStat) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&CPUStat{`,
- `Usage:` + strings.Replace(fmt.Sprintf("%v", this.Usage), "CPUUsage", "CPUUsage", 1) + `,`,
- `Throttling:` + strings.Replace(fmt.Sprintf("%v", this.Throttling), "Throttle", "Throttle", 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *CPUUsage) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&CPUUsage{`,
- `Total:` + fmt.Sprintf("%v", this.Total) + `,`,
- `Kernel:` + fmt.Sprintf("%v", this.Kernel) + `,`,
- `User:` + fmt.Sprintf("%v", this.User) + `,`,
- `PerCPU:` + fmt.Sprintf("%v", this.PerCPU) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *Throttle) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&Throttle{`,
- `Periods:` + fmt.Sprintf("%v", this.Periods) + `,`,
- `ThrottledPeriods:` + fmt.Sprintf("%v", this.ThrottledPeriods) + `,`,
- `ThrottledTime:` + fmt.Sprintf("%v", this.ThrottledTime) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *MemoryStat) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&MemoryStat{`,
- `Cache:` + fmt.Sprintf("%v", this.Cache) + `,`,
- `RSS:` + fmt.Sprintf("%v", this.RSS) + `,`,
- `RSSHuge:` + fmt.Sprintf("%v", this.RSSHuge) + `,`,
- `MappedFile:` + fmt.Sprintf("%v", this.MappedFile) + `,`,
- `Dirty:` + fmt.Sprintf("%v", this.Dirty) + `,`,
- `Writeback:` + fmt.Sprintf("%v", this.Writeback) + `,`,
- `PgPgIn:` + fmt.Sprintf("%v", this.PgPgIn) + `,`,
- `PgPgOut:` + fmt.Sprintf("%v", this.PgPgOut) + `,`,
- `PgFault:` + fmt.Sprintf("%v", this.PgFault) + `,`,
- `PgMajFault:` + fmt.Sprintf("%v", this.PgMajFault) + `,`,
- `InactiveAnon:` + fmt.Sprintf("%v", this.InactiveAnon) + `,`,
- `ActiveAnon:` + fmt.Sprintf("%v", this.ActiveAnon) + `,`,
- `InactiveFile:` + fmt.Sprintf("%v", this.InactiveFile) + `,`,
- `ActiveFile:` + fmt.Sprintf("%v", this.ActiveFile) + `,`,
- `Unevictable:` + fmt.Sprintf("%v", this.Unevictable) + `,`,
- `HierarchicalMemoryLimit:` + fmt.Sprintf("%v", this.HierarchicalMemoryLimit) + `,`,
- `HierarchicalSwapLimit:` + fmt.Sprintf("%v", this.HierarchicalSwapLimit) + `,`,
- `TotalCache:` + fmt.Sprintf("%v", this.TotalCache) + `,`,
- `TotalRSS:` + fmt.Sprintf("%v", this.TotalRSS) + `,`,
- `TotalRSSHuge:` + fmt.Sprintf("%v", this.TotalRSSHuge) + `,`,
- `TotalMappedFile:` + fmt.Sprintf("%v", this.TotalMappedFile) + `,`,
- `TotalDirty:` + fmt.Sprintf("%v", this.TotalDirty) + `,`,
- `TotalWriteback:` + fmt.Sprintf("%v", this.TotalWriteback) + `,`,
- `TotalPgPgIn:` + fmt.Sprintf("%v", this.TotalPgPgIn) + `,`,
- `TotalPgPgOut:` + fmt.Sprintf("%v", this.TotalPgPgOut) + `,`,
- `TotalPgFault:` + fmt.Sprintf("%v", this.TotalPgFault) + `,`,
- `TotalPgMajFault:` + fmt.Sprintf("%v", this.TotalPgMajFault) + `,`,
- `TotalInactiveAnon:` + fmt.Sprintf("%v", this.TotalInactiveAnon) + `,`,
- `TotalActiveAnon:` + fmt.Sprintf("%v", this.TotalActiveAnon) + `,`,
- `TotalInactiveFile:` + fmt.Sprintf("%v", this.TotalInactiveFile) + `,`,
- `TotalActiveFile:` + fmt.Sprintf("%v", this.TotalActiveFile) + `,`,
- `TotalUnevictable:` + fmt.Sprintf("%v", this.TotalUnevictable) + `,`,
- `Usage:` + strings.Replace(fmt.Sprintf("%v", this.Usage), "MemoryEntry", "MemoryEntry", 1) + `,`,
- `Swap:` + strings.Replace(fmt.Sprintf("%v", this.Swap), "MemoryEntry", "MemoryEntry", 1) + `,`,
- `Kernel:` + strings.Replace(fmt.Sprintf("%v", this.Kernel), "MemoryEntry", "MemoryEntry", 1) + `,`,
- `KernelTCP:` + strings.Replace(fmt.Sprintf("%v", this.KernelTCP), "MemoryEntry", "MemoryEntry", 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *MemoryEntry) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&MemoryEntry{`,
- `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`,
- `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`,
- `Max:` + fmt.Sprintf("%v", this.Max) + `,`,
- `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *BlkIOStat) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&BlkIOStat{`,
- `IoServiceBytesRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoServiceBytesRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`,
- `IoServicedRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoServicedRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`,
- `IoQueuedRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoQueuedRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`,
- `IoServiceTimeRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoServiceTimeRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`,
- `IoWaitTimeRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoWaitTimeRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`,
- `IoMergedRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoMergedRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`,
- `IoTimeRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoTimeRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`,
- `SectorsRecursive:` + strings.Replace(fmt.Sprintf("%v", this.SectorsRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *BlkIOEntry) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&BlkIOEntry{`,
- `Op:` + fmt.Sprintf("%v", this.Op) + `,`,
- `Device:` + fmt.Sprintf("%v", this.Device) + `,`,
- `Major:` + fmt.Sprintf("%v", this.Major) + `,`,
- `Minor:` + fmt.Sprintf("%v", this.Minor) + `,`,
- `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *RdmaStat) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&RdmaStat{`,
- `Current:` + strings.Replace(fmt.Sprintf("%v", this.Current), "RdmaEntry", "RdmaEntry", 1) + `,`,
- `Limit:` + strings.Replace(fmt.Sprintf("%v", this.Limit), "RdmaEntry", "RdmaEntry", 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *RdmaEntry) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&RdmaEntry{`,
- `Device:` + fmt.Sprintf("%v", this.Device) + `,`,
- `HcaHandles:` + fmt.Sprintf("%v", this.HcaHandles) + `,`,
- `HcaObjects:` + fmt.Sprintf("%v", this.HcaObjects) + `,`,
- `}`,
- }, "")
- return s
-}
-func valueToStringMetrics(v interface{}) string {
- rv := reflect.ValueOf(v)
- if rv.IsNil() {
- return "nil"
- }
- pv := reflect.Indirect(rv).Interface()
- return fmt.Sprintf("*%v", pv)
-}
-func (m *Metrics) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
-
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Metrics: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Metrics: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Hugetlb", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Hugetlb = append(m.Hugetlb, &HugetlbStat{})
- if err := m.Hugetlb[len(m.Hugetlb)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Pids", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Pids == nil {
- m.Pids = &PidsStat{}
- }
- if err := m.Pids.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.CPU == nil {
- m.CPU = &CPUStat{}
- }
- if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Memory == nil {
- m.Memory = &MemoryStat{}
- }
- if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Blkio", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Blkio == nil {
- m.Blkio = &BlkIOStat{}
- }
- if err := m.Blkio.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Rdma", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Rdma == nil {
- m.Rdma = &RdmaStat{}
- }
- if err := m.Rdma.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *HugetlbStat) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: HugetlbStat: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: HugetlbStat: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType)
- }
- m.Usage = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Usage |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
- }
- m.Max = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Max |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType)
- }
- m.Failcnt = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Failcnt |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Pagesize", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Pagesize = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *PidsStat) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: PidsStat: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: PidsStat: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType)
- }
- m.Current = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Current |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
- }
- m.Limit = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Limit |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *CPUStat) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: CPUStat: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: CPUStat: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Usage == nil {
- m.Usage = &CPUUsage{}
- }
- if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Throttling", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Throttling == nil {
- m.Throttling = &Throttle{}
- }
- if err := m.Throttling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *CPUUsage) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: CPUUsage: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: CPUUsage: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType)
- }
- m.Total = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Total |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType)
- }
- m.Kernel = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Kernel |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
- }
- m.User = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.User |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType == 0 {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.PerCPU = append(m.PerCPU, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + packedLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- for iNdEx < postIndex {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.PerCPU = append(m.PerCPU, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field PerCPU", wireType)
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Throttle) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Throttle: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Throttle: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Periods", wireType)
- }
- m.Periods = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Periods |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ThrottledPeriods", wireType)
- }
- m.ThrottledPeriods = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ThrottledPeriods |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ThrottledTime", wireType)
- }
- m.ThrottledTime = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ThrottledTime |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemoryStat) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemoryStat: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemoryStat: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType)
- }
- m.Cache = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Cache |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RSS", wireType)
- }
- m.RSS = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RSS |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RSSHuge", wireType)
- }
- m.RSSHuge = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RSSHuge |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MappedFile", wireType)
- }
- m.MappedFile = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.MappedFile |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Dirty", wireType)
- }
- m.Dirty = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Dirty |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Writeback", wireType)
- }
- m.Writeback = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Writeback |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PgPgIn", wireType)
- }
- m.PgPgIn = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.PgPgIn |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PgPgOut", wireType)
- }
- m.PgPgOut = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.PgPgOut |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 9:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PgFault", wireType)
- }
- m.PgFault = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.PgFault |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PgMajFault", wireType)
- }
- m.PgMajFault = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.PgMajFault |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 11:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field InactiveAnon", wireType)
- }
- m.InactiveAnon = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.InactiveAnon |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 12:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ActiveAnon", wireType)
- }
- m.ActiveAnon = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ActiveAnon |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 13:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field InactiveFile", wireType)
- }
- m.InactiveFile = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.InactiveFile |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 14:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ActiveFile", wireType)
- }
- m.ActiveFile = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ActiveFile |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 15:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Unevictable", wireType)
- }
- m.Unevictable = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Unevictable |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 16:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalMemoryLimit", wireType)
- }
- m.HierarchicalMemoryLimit = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.HierarchicalMemoryLimit |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 17:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalSwapLimit", wireType)
- }
- m.HierarchicalSwapLimit = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.HierarchicalSwapLimit |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 18:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalCache", wireType)
- }
- m.TotalCache = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalCache |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 19:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalRSS", wireType)
- }
- m.TotalRSS = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalRSS |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 20:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalRSSHuge", wireType)
- }
- m.TotalRSSHuge = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalRSSHuge |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 21:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalMappedFile", wireType)
- }
- m.TotalMappedFile = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalMappedFile |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 22:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalDirty", wireType)
- }
- m.TotalDirty = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalDirty |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 23:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalWriteback", wireType)
- }
- m.TotalWriteback = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalWriteback |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 24:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgIn", wireType)
- }
- m.TotalPgPgIn = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalPgPgIn |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 25:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgOut", wireType)
- }
- m.TotalPgPgOut = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalPgPgOut |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 26:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalPgFault", wireType)
- }
- m.TotalPgFault = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalPgFault |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 27:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalPgMajFault", wireType)
- }
- m.TotalPgMajFault = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalPgMajFault |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 28:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveAnon", wireType)
- }
- m.TotalInactiveAnon = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalInactiveAnon |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 29:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveAnon", wireType)
- }
- m.TotalActiveAnon = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalActiveAnon |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 30:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveFile", wireType)
- }
- m.TotalInactiveFile = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalInactiveFile |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 31:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveFile", wireType)
- }
- m.TotalActiveFile = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalActiveFile |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 32:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TotalUnevictable", wireType)
- }
- m.TotalUnevictable = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TotalUnevictable |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 33:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Usage == nil {
- m.Usage = &MemoryEntry{}
- }
- if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 34:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Swap", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Swap == nil {
- m.Swap = &MemoryEntry{}
- }
- if err := m.Swap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 35:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Kernel == nil {
- m.Kernel = &MemoryEntry{}
- }
- if err := m.Kernel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 36:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field KernelTCP", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.KernelTCP == nil {
- m.KernelTCP = &MemoryEntry{}
- }
- if err := m.KernelTCP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemoryEntry) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemoryEntry: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemoryEntry: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
- }
- m.Limit = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Limit |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType)
- }
- m.Usage = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Usage |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
- }
- m.Max = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Max |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType)
- }
- m.Failcnt = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Failcnt |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *BlkIOStat) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: BlkIOStat: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: BlkIOStat: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IoServiceBytesRecursive", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.IoServiceBytesRecursive = append(m.IoServiceBytesRecursive, &BlkIOEntry{})
- if err := m.IoServiceBytesRecursive[len(m.IoServiceBytesRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IoServicedRecursive", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.IoServicedRecursive = append(m.IoServicedRecursive, &BlkIOEntry{})
- if err := m.IoServicedRecursive[len(m.IoServicedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IoQueuedRecursive", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.IoQueuedRecursive = append(m.IoQueuedRecursive, &BlkIOEntry{})
- if err := m.IoQueuedRecursive[len(m.IoQueuedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IoServiceTimeRecursive", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.IoServiceTimeRecursive = append(m.IoServiceTimeRecursive, &BlkIOEntry{})
- if err := m.IoServiceTimeRecursive[len(m.IoServiceTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IoWaitTimeRecursive", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.IoWaitTimeRecursive = append(m.IoWaitTimeRecursive, &BlkIOEntry{})
- if err := m.IoWaitTimeRecursive[len(m.IoWaitTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IoMergedRecursive", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.IoMergedRecursive = append(m.IoMergedRecursive, &BlkIOEntry{})
- if err := m.IoMergedRecursive[len(m.IoMergedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IoTimeRecursive", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.IoTimeRecursive = append(m.IoTimeRecursive, &BlkIOEntry{})
- if err := m.IoTimeRecursive[len(m.IoTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 8:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SectorsRecursive", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SectorsRecursive = append(m.SectorsRecursive, &BlkIOEntry{})
- if err := m.SectorsRecursive[len(m.SectorsRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *BlkIOEntry) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: BlkIOEntry: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: BlkIOEntry: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Op = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Device = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Major", wireType)
- }
- m.Major = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Major |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Minor", wireType)
- }
- m.Minor = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Minor |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- m.Value = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Value |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *RdmaStat) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: RdmaStat: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: RdmaStat: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Current = append(m.Current, &RdmaEntry{})
- if err := m.Current[len(m.Current)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Limit = append(m.Limit, &RdmaEntry{})
- if err := m.Limit[len(m.Limit)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *RdmaEntry) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: RdmaEntry: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: RdmaEntry: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Device = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field HcaHandles", wireType)
- }
- m.HcaHandles = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.HcaHandles |= (uint32(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field HcaObjects", wireType)
- }
- m.HcaObjects = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.HcaObjects |= (uint32(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-
-func skipMetrics(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- return iNdEx, nil
- case 1:
- iNdEx += 8
- return iNdEx, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- iNdEx += length
- if length < 0 {
- return 0, ErrInvalidLengthMetrics
- }
- return iNdEx, nil
- case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipMetrics(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- }
- return iNdEx, nil
- case 4:
- return iNdEx, nil
- case 5:
- iNdEx += 4
- return iNdEx, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
-
-var (
- ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow")
-)
-
-func init() { proto.RegisterFile("github.com/containerd/cgroups/metrics.proto", fileDescriptorMetrics) }
-
-var fileDescriptorMetrics = []byte{
- // 1325 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x57, 0x4d, 0x6f, 0x1b, 0xb7,
- 0x16, 0x8d, 0xac, 0xb1, 0x3e, 0xae, 0x6c, 0xc7, 0xa6, 0x13, 0x67, 0xec, 0x97, 0x27, 0x29, 0xb2,
- 0xfd, 0x9e, 0x5b, 0x03, 0x32, 0x9a, 0x02, 0x41, 0x93, 0xa6, 0x28, 0x22, 0xb7, 0x41, 0x83, 0xd6,
- 0x88, 0x32, 0xb2, 0x91, 0x76, 0x35, 0x18, 0x8d, 0x98, 0x31, 0xe3, 0xd1, 0x70, 0xc2, 0xe1, 0xc8,
- 0x71, 0x57, 0xdd, 0xf5, 0x37, 0xf5, 0x1f, 0x64, 0xd9, 0x4d, 0x81, 0x76, 0x63, 0x34, 0xfa, 0x25,
- 0x05, 0x2f, 0xe7, 0x4b, 0x49, 0xdc, 0x40, 0xbb, 0xb9, 0xbc, 0xe7, 0x1c, 0x5e, 0x5e, 0x1e, 0x8a,
- 0x14, 0xec, 0x7b, 0x4c, 0x9e, 0xc6, 0xc3, 0xae, 0xcb, 0xc7, 0x07, 0x2e, 0x0f, 0xa4, 0xc3, 0x02,
- 0x2a, 0x46, 0x07, 0xae, 0x27, 0x78, 0x1c, 0x46, 0x07, 0x63, 0x2a, 0x05, 0x73, 0xa3, 0x6e, 0x28,
- 0xb8, 0xe4, 0xc4, 0x64, 0xbc, 0x9b, 0x83, 0xba, 0x09, 0xa8, 0x3b, 0xf9, 0x6c, 0xeb, 0x86, 0xc7,
- 0x3d, 0x8e, 0xa0, 0x03, 0xf5, 0xa5, 0xf1, 0x9d, 0xdf, 0x16, 0xa0, 0x7a, 0xa4, 0x15, 0xc8, 0xd7,
- 0x50, 0x3d, 0x8d, 0x3d, 0x2a, 0xfd, 0xa1, 0x59, 0x6a, 0x97, 0xf7, 0x1a, 0x77, 0x77, 0xbb, 0x57,
- 0xa9, 0x75, 0xbf, 0xd3, 0xc0, 0x81, 0x74, 0xa4, 0x95, 0xb2, 0xc8, 0x3d, 0x30, 0x42, 0x36, 0x8a,
- 0xcc, 0x85, 0x76, 0x69, 0xaf, 0x71, 0xb7, 0x73, 0x35, 0xbb, 0xcf, 0x46, 0x11, 0x52, 0x11, 0x4f,
- 0x1e, 0x42, 0xd9, 0x0d, 0x63, 0xb3, 0x8c, 0xb4, 0x3b, 0x57, 0xd3, 0x0e, 0xfb, 0x27, 0x8a, 0xd5,
- 0xab, 0x4e, 0x2f, 0x5b, 0xe5, 0xc3, 0xfe, 0x89, 0xa5, 0x68, 0xe4, 0x21, 0x54, 0xc6, 0x74, 0xcc,
- 0xc5, 0x85, 0x69, 0xa0, 0xc0, 0xce, 0xd5, 0x02, 0x47, 0x88, 0xc3, 0x99, 0x13, 0x0e, 0xb9, 0x0f,
- 0x8b, 0x43, 0xff, 0x8c, 0x71, 0x73, 0x11, 0xc9, 0xdb, 0x57, 0x93, 0x7b, 0xfe, 0xd9, 0x93, 0xa7,
- 0xc8, 0xd5, 0x8c, 0xce, 0x19, 0x34, 0x0a, 0x6d, 0x20, 0x37, 0x60, 0x31, 0x8e, 0x1c, 0x8f, 0x9a,
- 0xa5, 0x76, 0x69, 0xcf, 0xb0, 0x74, 0x40, 0x56, 0xa1, 0x3c, 0x76, 0x5e, 0x63, 0x4b, 0x0c, 0x4b,
- 0x7d, 0x12, 0x13, 0xaa, 0x2f, 0x1c, 0xe6, 0xbb, 0x81, 0xc4, 0x15, 0x1b, 0x56, 0x1a, 0x92, 0x2d,
- 0xa8, 0x85, 0x8e, 0x47, 0x23, 0xf6, 0x33, 0xc5, 0xb5, 0xd4, 0xad, 0x2c, 0xee, 0x3c, 0x80, 0x5a,
- 0xda, 0x35, 0xa5, 0xe0, 0xc6, 0x42, 0xd0, 0x40, 0x26, 0x73, 0xa5, 0xa1, 0xaa, 0xc1, 0x67, 0x63,
- 0x26, 0x93, 0xf9, 0x74, 0xd0, 0xf9, 0xb5, 0x04, 0xd5, 0xa4, 0x77, 0xe4, 0x8b, 0x62, 0x95, 0xff,
- 0xba, 0x49, 0x87, 0xfd, 0x93, 0x13, 0x85, 0x4c, 0x57, 0xd2, 0x03, 0x90, 0xa7, 0x82, 0x4b, 0xe9,
- 0xb3, 0xc0, 0xfb, 0xf8, 0x1e, 0x1f, 0x6b, 0x2c, 0xb5, 0x0a, 0xac, 0xce, 0x2b, 0xa8, 0xa5, 0xb2,
- 0xaa, 0x56, 0xc9, 0xa5, 0xe3, 0xa7, 0xfd, 0xc2, 0x80, 0x6c, 0x40, 0xe5, 0x8c, 0x8a, 0x80, 0xfa,
- 0xc9, 0x12, 0x92, 0x88, 0x10, 0x30, 0xe2, 0x88, 0x8a, 0xa4, 0x65, 0xf8, 0x4d, 0xb6, 0xa1, 0x1a,
- 0x52, 0x61, 0x2b, 0xef, 0x18, 0xed, 0xf2, 0x9e, 0xd1, 0x83, 0xe9, 0x65, 0xab, 0xd2, 0xa7, 0x42,
- 0x79, 0xa3, 0x12, 0x52, 0x71, 0x18, 0xc6, 0x9d, 0xd7, 0x50, 0x4b, 0x4b, 0x51, 0x8d, 0x0b, 0xa9,
- 0x60, 0x7c, 0x14, 0xa5, 0x8d, 0x4b, 0x42, 0xb2, 0x0f, 0x6b, 0x49, 0x99, 0x74, 0x64, 0xa7, 0x18,
- 0x5d, 0xc1, 0x6a, 0x96, 0xe8, 0x27, 0xe0, 0x5d, 0x58, 0xc9, 0xc1, 0x92, 0x8d, 0x69, 0x52, 0xd5,
- 0x72, 0x36, 0x7a, 0xcc, 0xc6, 0xb4, 0xf3, 0x57, 0x03, 0x20, 0x77, 0x9c, 0x5a, 0xaf, 0xeb, 0xb8,
- 0xa7, 0x99, 0x3f, 0x30, 0x20, 0x9b, 0x50, 0x16, 0x51, 0x32, 0x95, 0x36, 0xb6, 0x35, 0x18, 0x58,
- 0x6a, 0x8c, 0xfc, 0x0f, 0x6a, 0x22, 0x8a, 0x6c, 0x75, 0xba, 0xf4, 0x04, 0xbd, 0xc6, 0xf4, 0xb2,
- 0x55, 0xb5, 0x06, 0x03, 0x65, 0x3b, 0xab, 0x2a, 0xa2, 0x48, 0x7d, 0x90, 0x16, 0x34, 0xc6, 0x4e,
- 0x18, 0xd2, 0x91, 0xfd, 0x82, 0xf9, 0xda, 0x39, 0x86, 0x05, 0x7a, 0xe8, 0x31, 0xf3, 0xb1, 0xd3,
- 0x23, 0x26, 0xe4, 0x05, 0x7a, 0xdc, 0xb0, 0x74, 0x40, 0x6e, 0x43, 0xfd, 0x5c, 0x30, 0x49, 0x87,
- 0x8e, 0x7b, 0x66, 0x56, 0x30, 0x93, 0x0f, 0x10, 0x13, 0x6a, 0xa1, 0x67, 0x87, 0x9e, 0xcd, 0x02,
- 0xb3, 0xaa, 0x77, 0x22, 0xf4, 0xfa, 0xde, 0x93, 0x80, 0x6c, 0x41, 0x5d, 0x67, 0x78, 0x2c, 0xcd,
- 0x5a, 0xd2, 0x46, 0xaf, 0xef, 0x3d, 0x8d, 0x25, 0xd9, 0x44, 0xd6, 0x0b, 0x27, 0xf6, 0xa5, 0x59,
- 0x4f, 0x53, 0x8f, 0x55, 0x48, 0xda, 0xb0, 0x14, 0x7a, 0xf6, 0xd8, 0x79, 0x99, 0xa4, 0x41, 0x97,
- 0x19, 0x7a, 0x47, 0xce, 0x4b, 0x8d, 0xd8, 0x86, 0x65, 0x16, 0x38, 0xae, 0x64, 0x13, 0x6a, 0x3b,
- 0x01, 0x0f, 0xcc, 0x06, 0x42, 0x96, 0xd2, 0xc1, 0x47, 0x01, 0x0f, 0xd4, 0x62, 0x8b, 0x90, 0x25,
- 0xad, 0x52, 0x00, 0x14, 0x55, 0xb0, 0x1f, 0xcb, 0xb3, 0x2a, 0xd8, 0x91, 0x5c, 0x05, 0x21, 0x2b,
- 0x45, 0x15, 0x04, 0xb4, 0xa1, 0x11, 0x07, 0x74, 0xc2, 0x5c, 0xe9, 0x0c, 0x7d, 0x6a, 0x5e, 0x47,
- 0x40, 0x71, 0x88, 0x3c, 0x80, 0xcd, 0x53, 0x46, 0x85, 0x23, 0xdc, 0x53, 0xe6, 0x3a, 0xbe, 0xad,
- 0x7f, 0x4f, 0x6c, 0x7d, 0xfc, 0x56, 0x11, 0x7f, 0xab, 0x08, 0xd0, 0x4e, 0xf8, 0x41, 0xa5, 0xc9,
- 0x3d, 0x98, 0x49, 0xd9, 0xd1, 0xb9, 0x13, 0x26, 0xcc, 0x35, 0x64, 0xde, 0x2c, 0xa6, 0x07, 0xe7,
- 0x4e, 0xa8, 0x79, 0x2d, 0x68, 0xe0, 0x29, 0xb1, 0xb5, 0x91, 0x88, 0x2e, 0x1b, 0x87, 0x0e, 0xd1,
- 0x4d, 0x9f, 0x40, 0x5d, 0x03, 0x94, 0xa7, 0xd6, 0xd1, 0x33, 0x4b, 0xd3, 0xcb, 0x56, 0xed, 0x58,
- 0x0d, 0x2a, 0x63, 0xd5, 0x30, 0x6d, 0x45, 0x11, 0xb9, 0x07, 0x2b, 0x19, 0x54, 0x7b, 0xec, 0x06,
- 0xe2, 0x57, 0xa7, 0x97, 0xad, 0xa5, 0x14, 0x8f, 0x46, 0x5b, 0x4a, 0x39, 0xe8, 0xb6, 0x4f, 0x61,
- 0x4d, 0xf3, 0x8a, 0x9e, 0xbb, 0x89, 0x95, 0x5c, 0xc7, 0xc4, 0x51, 0x6e, 0xbc, 0xac, 0x5e, 0x6d,
- 0xbf, 0x8d, 0x42, 0xbd, 0xdf, 0xa0, 0x07, 0xff, 0x0f, 0x9a, 0x63, 0xe7, 0x4e, 0xbc, 0x85, 0x20,
- 0x5d, 0xdb, 0xf3, 0xcc, 0x8e, 0xdb, 0x69, 0xb5, 0x99, 0x29, 0x4d, 0xbd, 0x25, 0x38, 0xda, 0xd7,
- 0xce, 0xdc, 0x4d, 0xd5, 0x72, 0x7f, 0x6e, 0xea, 0xcd, 0xcf, 0x50, 0xca, 0xa4, 0x3b, 0x05, 0x2d,
- 0xed, 0xc5, 0xad, 0x19, 0x94, 0x76, 0xe3, 0x3e, 0x90, 0x0c, 0x95, 0xbb, 0xf6, 0x3f, 0x85, 0x85,
- 0xf6, 0x73, 0xeb, 0x76, 0x61, 0x5d, 0x83, 0x67, 0x0d, 0x7c, 0x1b, 0xd1, 0xba, 0x5f, 0x4f, 0x8a,
- 0x2e, 0xce, 0x9a, 0x58, 0x44, 0xff, 0xb7, 0xa0, 0xfd, 0x28, 0xc7, 0xbe, 0xaf, 0x8d, 0x2d, 0x6f,
- 0x7e, 0x40, 0x1b, 0x9b, 0xfe, 0xae, 0x36, 0xa2, 0x5b, 0xef, 0x69, 0x23, 0x76, 0x3f, 0xc5, 0x16,
- 0xcd, 0xde, 0x4e, 0x7e, 0xf6, 0x54, 0xe2, 0xa4, 0xe0, 0xf8, 0x2f, 0xd3, 0xab, 0xe3, 0x0e, 0xfe,
- 0xf6, 0xef, 0x7e, 0xec, 0x9e, 0xfd, 0x36, 0x90, 0xe2, 0x22, 0xbd, 0x3d, 0xee, 0x83, 0xa1, 0x5c,
- 0x6e, 0x76, 0xe6, 0xe1, 0x22, 0x85, 0x7c, 0x95, 0x5d, 0x09, 0xdb, 0xf3, 0x90, 0xd3, 0x9b, 0x63,
- 0x00, 0xa0, 0xbf, 0x6c, 0xe9, 0x86, 0xe6, 0xce, 0x1c, 0x12, 0xbd, 0xe5, 0xe9, 0x65, 0xab, 0xfe,
- 0x3d, 0x92, 0x8f, 0x0f, 0xfb, 0x56, 0x5d, 0xeb, 0x1c, 0xbb, 0x61, 0x87, 0x42, 0xa3, 0x00, 0xcc,
- 0xef, 0xdd, 0x52, 0xe1, 0xde, 0xcd, 0x5f, 0x04, 0x0b, 0x1f, 0x78, 0x11, 0x94, 0x3f, 0xf8, 0x22,
- 0x30, 0x66, 0x5e, 0x04, 0x9d, 0x3f, 0x16, 0xa1, 0x9e, 0xbd, 0x3b, 0x88, 0x03, 0x5b, 0x8c, 0xdb,
- 0x11, 0x15, 0x13, 0xe6, 0x52, 0x7b, 0x78, 0x21, 0x69, 0x64, 0x0b, 0xea, 0xc6, 0x22, 0x62, 0x13,
- 0x9a, 0xbc, 0xd9, 0x76, 0x3e, 0xf2, 0x80, 0xd1, 0xbd, 0xb9, 0xc5, 0xf8, 0x40, 0xcb, 0xf4, 0x94,
- 0x8a, 0x95, 0x8a, 0x90, 0x1f, 0xe1, 0x66, 0x3e, 0xc5, 0xa8, 0xa0, 0xbe, 0x30, 0x87, 0xfa, 0x7a,
- 0xa6, 0x3e, 0xca, 0x95, 0x8f, 0x61, 0x9d, 0x71, 0xfb, 0x55, 0x4c, 0xe3, 0x19, 0xdd, 0xf2, 0x1c,
- 0xba, 0x6b, 0x8c, 0x3f, 0x43, 0x7e, 0xae, 0x6a, 0xc3, 0x66, 0xa1, 0x25, 0xea, 0x2e, 0x2e, 0x68,
- 0x1b, 0x73, 0x68, 0x6f, 0x64, 0x35, 0xab, 0xbb, 0x3b, 0x9f, 0xe0, 0x27, 0xd8, 0x60, 0xdc, 0x3e,
- 0x77, 0x98, 0x7c, 0x57, 0x7d, 0x71, 0xbe, 0x8e, 0x3c, 0x77, 0x98, 0x9c, 0x95, 0xd6, 0x1d, 0x19,
- 0x53, 0xe1, 0xcd, 0x74, 0xa4, 0x32, 0x5f, 0x47, 0x8e, 0x90, 0x9f, 0xab, 0xf6, 0x61, 0x8d, 0xf1,
- 0x77, 0x6b, 0xad, 0xce, 0xa1, 0x79, 0x9d, 0xf1, 0xd9, 0x3a, 0x9f, 0xc1, 0x5a, 0x44, 0x5d, 0xc9,
- 0x45, 0xd1, 0x6d, 0xb5, 0x39, 0x14, 0x57, 0x13, 0x7a, 0x26, 0xd9, 0x99, 0x00, 0xe4, 0x79, 0xb2,
- 0x02, 0x0b, 0x3c, 0xc4, 0xa3, 0x53, 0xb7, 0x16, 0x78, 0xa8, 0xde, 0x80, 0x23, 0xf5, 0xb3, 0xa3,
- 0x0f, 0x4e, 0xdd, 0x4a, 0x22, 0x75, 0x9e, 0xc6, 0xce, 0x4b, 0x9e, 0x3e, 0x02, 0x75, 0x80, 0xa3,
- 0x2c, 0xe0, 0x22, 0x39, 0x3b, 0x3a, 0x50, 0xa3, 0x13, 0xc7, 0x8f, 0x69, 0xfa, 0xe6, 0xc1, 0xa0,
- 0x67, 0xbe, 0x79, 0xdb, 0xbc, 0xf6, 0xe7, 0xdb, 0xe6, 0xb5, 0x5f, 0xa6, 0xcd, 0xd2, 0x9b, 0x69,
- 0xb3, 0xf4, 0xfb, 0xb4, 0x59, 0xfa, 0x7b, 0xda, 0x2c, 0x0d, 0x2b, 0xf8, 0x7f, 0xe8, 0xf3, 0x7f,
- 0x02, 0x00, 0x00, 0xff, 0xff, 0xb2, 0x21, 0x0b, 0xcd, 0x6e, 0x0d, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/cgroups/metrics.proto b/vendor/github.com/containerd/cgroups/metrics.proto
deleted file mode 100644
index 642623fce..000000000
--- a/vendor/github.com/containerd/cgroups/metrics.proto
+++ /dev/null
@@ -1,123 +0,0 @@
-syntax = "proto3";
-
-package io.containerd.cgroups.v1;
-
-import "gogoproto/gogo.proto";
-
-message Metrics {
- repeated HugetlbStat hugetlb = 1;
- PidsStat pids = 2;
- CPUStat cpu = 3 [(gogoproto.customname) = "CPU"];
- MemoryStat memory = 4;
- BlkIOStat blkio = 5;
- RdmaStat rdma = 6;
-}
-
-message HugetlbStat {
- uint64 usage = 1;
- uint64 max = 2;
- uint64 failcnt = 3;
- string pagesize = 4;
-}
-
-message PidsStat {
- uint64 current = 1;
- uint64 limit = 2;
-}
-
-message CPUStat {
- CPUUsage usage = 1;
- Throttle throttling = 2;
-}
-
-message CPUUsage {
- // values in nanoseconds
- uint64 total = 1;
- uint64 kernel = 2;
- uint64 user = 3;
- repeated uint64 per_cpu = 4 [(gogoproto.customname) = "PerCPU"];
-
-}
-
-message Throttle {
- uint64 periods = 1;
- uint64 throttled_periods = 2;
- uint64 throttled_time = 3;
-}
-
-message MemoryStat {
- uint64 cache = 1;
- uint64 rss = 2 [(gogoproto.customname) = "RSS"];
- uint64 rss_huge = 3 [(gogoproto.customname) = "RSSHuge"];
- uint64 mapped_file = 4;
- uint64 dirty = 5;
- uint64 writeback = 6;
- uint64 pg_pg_in = 7;
- uint64 pg_pg_out = 8;
- uint64 pg_fault = 9;
- uint64 pg_maj_fault = 10;
- uint64 inactive_anon = 11;
- uint64 active_anon = 12;
- uint64 inactive_file = 13;
- uint64 active_file = 14;
- uint64 unevictable = 15;
- uint64 hierarchical_memory_limit = 16;
- uint64 hierarchical_swap_limit = 17;
- uint64 total_cache = 18;
- uint64 total_rss = 19 [(gogoproto.customname) = "TotalRSS"];
- uint64 total_rss_huge = 20 [(gogoproto.customname) = "TotalRSSHuge"];
- uint64 total_mapped_file = 21;
- uint64 total_dirty = 22;
- uint64 total_writeback = 23;
- uint64 total_pg_pg_in = 24;
- uint64 total_pg_pg_out = 25;
- uint64 total_pg_fault = 26;
- uint64 total_pg_maj_fault = 27;
- uint64 total_inactive_anon = 28;
- uint64 total_active_anon = 29;
- uint64 total_inactive_file = 30;
- uint64 total_active_file = 31;
- uint64 total_unevictable = 32;
- MemoryEntry usage = 33;
- MemoryEntry swap = 34;
- MemoryEntry kernel = 35;
- MemoryEntry kernel_tcp = 36 [(gogoproto.customname) = "KernelTCP"];
-
-}
-
-message MemoryEntry {
- uint64 limit = 1;
- uint64 usage = 2;
- uint64 max = 3;
- uint64 failcnt = 4;
-}
-
-message BlkIOStat {
- repeated BlkIOEntry io_service_bytes_recursive = 1;
- repeated BlkIOEntry io_serviced_recursive = 2;
- repeated BlkIOEntry io_queued_recursive = 3;
- repeated BlkIOEntry io_service_time_recursive = 4;
- repeated BlkIOEntry io_wait_time_recursive = 5;
- repeated BlkIOEntry io_merged_recursive = 6;
- repeated BlkIOEntry io_time_recursive = 7;
- repeated BlkIOEntry sectors_recursive = 8;
-}
-
-message BlkIOEntry {
- string op = 1;
- string device = 2;
- uint64 major = 3;
- uint64 minor = 4;
- uint64 value = 5;
-}
-
-message RdmaStat {
- repeated RdmaEntry current = 1;
- repeated RdmaEntry limit = 2;
-}
-
-message RdmaEntry {
- string device = 1;
- uint32 hca_handles = 2;
- uint32 hca_objects = 3;
-}
diff --git a/vendor/github.com/containerd/cgroups/named.go b/vendor/github.com/containerd/cgroups/named.go
deleted file mode 100644
index 06b16c3b1..000000000
--- a/vendor/github.com/containerd/cgroups/named.go
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import "path/filepath"
-
-func NewNamed(root string, name Name) *namedController {
- return &namedController{
- root: root,
- name: name,
- }
-}
-
-type namedController struct {
- root string
- name Name
-}
-
-func (n *namedController) Name() Name {
- return n.name
-}
-
-func (n *namedController) Path(path string) string {
- return filepath.Join(n.root, string(n.name), path)
-}
diff --git a/vendor/github.com/containerd/cgroups/net_cls.go b/vendor/github.com/containerd/cgroups/net_cls.go
deleted file mode 100644
index 8f1a2651f..000000000
--- a/vendor/github.com/containerd/cgroups/net_cls.go
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
- "strconv"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-func NewNetCls(root string) *netclsController {
- return &netclsController{
- root: filepath.Join(root, string(NetCLS)),
- }
-}
-
-type netclsController struct {
- root string
-}
-
-func (n *netclsController) Name() Name {
- return NetCLS
-}
-
-func (n *netclsController) Path(path string) string {
- return filepath.Join(n.root, path)
-}
-
-func (n *netclsController) Create(path string, resources *specs.LinuxResources) error {
- if err := os.MkdirAll(n.Path(path), defaultDirPerm); err != nil {
- return err
- }
- if resources.Network != nil && resources.Network.ClassID != nil && *resources.Network.ClassID > 0 {
- return ioutil.WriteFile(
- filepath.Join(n.Path(path), "net_cls.classid"),
- []byte(strconv.FormatUint(uint64(*resources.Network.ClassID), 10)),
- defaultFilePerm,
- )
- }
- return nil
-}
diff --git a/vendor/github.com/containerd/cgroups/net_prio.go b/vendor/github.com/containerd/cgroups/net_prio.go
deleted file mode 100644
index 612e1bcd2..000000000
--- a/vendor/github.com/containerd/cgroups/net_prio.go
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-func NewNetPrio(root string) *netprioController {
- return &netprioController{
- root: filepath.Join(root, string(NetPrio)),
- }
-}
-
-type netprioController struct {
- root string
-}
-
-func (n *netprioController) Name() Name {
- return NetPrio
-}
-
-func (n *netprioController) Path(path string) string {
- return filepath.Join(n.root, path)
-}
-
-func (n *netprioController) Create(path string, resources *specs.LinuxResources) error {
- if err := os.MkdirAll(n.Path(path), defaultDirPerm); err != nil {
- return err
- }
- if resources.Network != nil {
- for _, prio := range resources.Network.Priorities {
- if err := ioutil.WriteFile(
- filepath.Join(n.Path(path), "net_prio.ifpriomap"),
- formatPrio(prio.Name, prio.Priority),
- defaultFilePerm,
- ); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func formatPrio(name string, prio uint32) []byte {
- return []byte(fmt.Sprintf("%s %d", name, prio))
-}
diff --git a/vendor/github.com/containerd/cgroups/opts.go b/vendor/github.com/containerd/cgroups/opts.go
deleted file mode 100644
index 7c5d9fb9c..000000000
--- a/vendor/github.com/containerd/cgroups/opts.go
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "github.com/pkg/errors"
-)
-
-var (
- // ErrIgnoreSubsystem allows the specific subsystem to be skipped
- ErrIgnoreSubsystem = errors.New("skip subsystem")
- // ErrDevicesRequired is returned when the devices subsystem is required but
- // does not exist or is not active
- ErrDevicesRequired = errors.New("devices subsystem is required")
-)
-
-// InitOpts allows configuration for the creation or loading of a cgroup
-type InitOpts func(*InitConfig) error
-
-// InitConfig provides configuration options for the creation
-// or loading of a cgroup and its subsystems
-type InitConfig struct {
- // InitCheck can be used to check initialization errors from the subsystem
- InitCheck InitCheck
-}
-
-func newInitConfig() *InitConfig {
- return &InitConfig{
- InitCheck: RequireDevices,
- }
-}
-
-// InitCheck allows subsystems errors to be checked when initialized or loaded
-type InitCheck func(Subsystem, Path, error) error
-
-// AllowAny allows any subsystem errors to be skipped
-func AllowAny(s Subsystem, p Path, err error) error {
- return ErrIgnoreSubsystem
-}
-
-// RequireDevices requires the device subsystem but no others
-func RequireDevices(s Subsystem, p Path, err error) error {
- if s.Name() == Devices {
- return ErrDevicesRequired
- }
- return ErrIgnoreSubsystem
-}
diff --git a/vendor/github.com/containerd/cgroups/paths.go b/vendor/github.com/containerd/cgroups/paths.go
deleted file mode 100644
index f45fd4256..000000000
--- a/vendor/github.com/containerd/cgroups/paths.go
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "fmt"
- "path/filepath"
-
- "github.com/pkg/errors"
-)
-
-type Path func(subsystem Name) (string, error)
-
-func RootPath(subsysem Name) (string, error) {
- return "/", nil
-}
-
-// StaticPath returns a static path to use for all cgroups
-func StaticPath(path string) Path {
- return func(_ Name) (string, error) {
- return path, nil
- }
-}
-
-// NestedPath will nest the cgroups based on the calling processes cgroup
-// placing its child processes inside its own path
-func NestedPath(suffix string) Path {
- paths, err := parseCgroupFile("/proc/self/cgroup")
- if err != nil {
- return errorPath(err)
- }
- return existingPath(paths, suffix)
-}
-
-// PidPath will return the correct cgroup paths for an existing process running inside a cgroup
-// This is commonly used for the Load function to restore an existing container
-func PidPath(pid int) Path {
- p := fmt.Sprintf("/proc/%d/cgroup", pid)
- paths, err := parseCgroupFile(p)
- if err != nil {
- return errorPath(errors.Wrapf(err, "parse cgroup file %s", p))
- }
- return existingPath(paths, "")
-}
-
-// ErrControllerNotActive is returned when a controller is not supported or enabled
-var ErrControllerNotActive = errors.New("controller is not supported")
-
-func existingPath(paths map[string]string, suffix string) Path {
- // localize the paths based on the root mount dest for nested cgroups
- for n, p := range paths {
- dest, err := getCgroupDestination(string(n))
- if err != nil {
- return errorPath(err)
- }
- rel, err := filepath.Rel(dest, p)
- if err != nil {
- return errorPath(err)
- }
- if rel == "." {
- rel = dest
- }
- paths[n] = filepath.Join("/", rel)
- }
- return func(name Name) (string, error) {
- root, ok := paths[string(name)]
- if !ok {
- if root, ok = paths[fmt.Sprintf("name=%s", name)]; !ok {
- return "", ErrControllerNotActive
- }
- }
- if suffix != "" {
- return filepath.Join(root, suffix), nil
- }
- return root, nil
- }
-}
-
-func subPath(path Path, subName string) Path {
- return func(name Name) (string, error) {
- p, err := path(name)
- if err != nil {
- return "", err
- }
- return filepath.Join(p, subName), nil
- }
-}
-
-func errorPath(err error) Path {
- return func(_ Name) (string, error) {
- return "", err
- }
-}
diff --git a/vendor/github.com/containerd/cgroups/perf_event.go b/vendor/github.com/containerd/cgroups/perf_event.go
deleted file mode 100644
index 648786db6..000000000
--- a/vendor/github.com/containerd/cgroups/perf_event.go
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import "path/filepath"
-
-func NewPerfEvent(root string) *PerfEventController {
- return &PerfEventController{
- root: filepath.Join(root, string(PerfEvent)),
- }
-}
-
-type PerfEventController struct {
- root string
-}
-
-func (p *PerfEventController) Name() Name {
- return PerfEvent
-}
-
-func (p *PerfEventController) Path(path string) string {
- return filepath.Join(p.root, path)
-}
diff --git a/vendor/github.com/containerd/cgroups/pids.go b/vendor/github.com/containerd/cgroups/pids.go
deleted file mode 100644
index a1cfcb88d..000000000
--- a/vendor/github.com/containerd/cgroups/pids.go
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
- "strconv"
- "strings"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-func NewPids(root string) *pidsController {
- return &pidsController{
- root: filepath.Join(root, string(Pids)),
- }
-}
-
-type pidsController struct {
- root string
-}
-
-func (p *pidsController) Name() Name {
- return Pids
-}
-
-func (p *pidsController) Path(path string) string {
- return filepath.Join(p.root, path)
-}
-
-func (p *pidsController) Create(path string, resources *specs.LinuxResources) error {
- if err := os.MkdirAll(p.Path(path), defaultDirPerm); err != nil {
- return err
- }
- if resources.Pids != nil && resources.Pids.Limit > 0 {
- return ioutil.WriteFile(
- filepath.Join(p.Path(path), "pids.max"),
- []byte(strconv.FormatInt(resources.Pids.Limit, 10)),
- defaultFilePerm,
- )
- }
- return nil
-}
-
-func (p *pidsController) Update(path string, resources *specs.LinuxResources) error {
- return p.Create(path, resources)
-}
-
-func (p *pidsController) Stat(path string, stats *Metrics) error {
- current, err := readUint(filepath.Join(p.Path(path), "pids.current"))
- if err != nil {
- return err
- }
- var max uint64
- maxData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "pids.max"))
- if err != nil {
- return err
- }
- if maxS := strings.TrimSpace(string(maxData)); maxS != "max" {
- if max, err = parseUint(maxS, 10, 64); err != nil {
- return err
- }
- }
- stats.Pids = &PidsStat{
- Current: current,
- Limit: max,
- }
- return nil
-}
diff --git a/vendor/github.com/containerd/cgroups/rdma.go b/vendor/github.com/containerd/cgroups/rdma.go
deleted file mode 100644
index 4f423d33a..000000000
--- a/vendor/github.com/containerd/cgroups/rdma.go
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "io/ioutil"
- "math"
- "os"
- "path/filepath"
- "strconv"
- "strings"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-type rdmaController struct {
- root string
-}
-
-func (p *rdmaController) Name() Name {
- return Rdma
-}
-
-func (p *rdmaController) Path(path string) string {
- return filepath.Join(p.root, path)
-}
-
-func NewRdma(root string) *rdmaController {
- return &rdmaController{
- root: filepath.Join(root, string(Rdma)),
- }
-}
-
-func createCmdString(device string, limits *specs.LinuxRdma) string {
- var cmdString string
-
- cmdString = device
- if limits.HcaHandles != nil {
- cmdString = cmdString + " " + "hca_handle=" + strconv.FormatUint(uint64(*limits.HcaHandles), 10)
- }
-
- if limits.HcaObjects != nil {
- cmdString = cmdString + " " + "hca_object=" + strconv.FormatUint(uint64(*limits.HcaObjects), 10)
- }
- return cmdString
-}
-
-func (p *rdmaController) Create(path string, resources *specs.LinuxResources) error {
- if err := os.MkdirAll(p.Path(path), defaultDirPerm); err != nil {
- return err
- }
-
- for device, limit := range resources.Rdma {
- if device != "" && (limit.HcaHandles != nil || limit.HcaObjects != nil) {
- return ioutil.WriteFile(
- filepath.Join(p.Path(path), "rdma.max"),
- []byte(createCmdString(device, &limit)),
- defaultFilePerm,
- )
- }
- }
- return nil
-}
-
-func (p *rdmaController) Update(path string, resources *specs.LinuxResources) error {
- return p.Create(path, resources)
-}
-
-func parseRdmaKV(raw string, entry *RdmaEntry) {
- var value uint64
- var err error
-
- parts := strings.Split(raw, "=")
- switch len(parts) {
- case 2:
- if parts[1] == "max" {
- value = math.MaxUint32
- } else {
- value, err = parseUint(parts[1], 10, 32)
- if err != nil {
- return
- }
- }
- if parts[0] == "hca_handle" {
- entry.HcaHandles = uint32(value)
- } else if parts[0] == "hca_object" {
- entry.HcaObjects = uint32(value)
- }
- }
-}
-
-func toRdmaEntry(strEntries []string) []*RdmaEntry {
- var rdmaEntries []*RdmaEntry
- for i := range strEntries {
- parts := strings.Fields(strEntries[i])
- switch len(parts) {
- case 3:
- entry := new(RdmaEntry)
- entry.Device = parts[0]
- parseRdmaKV(parts[1], entry)
- parseRdmaKV(parts[2], entry)
-
- rdmaEntries = append(rdmaEntries, entry)
- default:
- continue
- }
- }
- return rdmaEntries
-}
-
-func (p *rdmaController) Stat(path string, stats *Metrics) error {
-
- currentData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "rdma.current"))
- if err != nil {
- return err
- }
- currentPerDevices := strings.Split(string(currentData), "\n")
-
- maxData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "rdma.max"))
- if err != nil {
- return err
- }
- maxPerDevices := strings.Split(string(maxData), "\n")
-
- // If device got removed between reading two files, ignore returning
- // stats.
- if len(currentPerDevices) != len(maxPerDevices) {
- return nil
- }
-
- currentEntries := toRdmaEntry(currentPerDevices)
- maxEntries := toRdmaEntry(maxPerDevices)
-
- stats.Rdma = &RdmaStat{
- Current: currentEntries,
- Limit: maxEntries,
- }
- return nil
-}
diff --git a/vendor/github.com/containerd/cgroups/state.go b/vendor/github.com/containerd/cgroups/state.go
deleted file mode 100644
index cfeabbbc6..000000000
--- a/vendor/github.com/containerd/cgroups/state.go
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-// State is a type that represents the state of the current cgroup
-type State string
-
-const (
- Unknown State = ""
- Thawed State = "thawed"
- Frozen State = "frozen"
- Freezing State = "freezing"
- Deleted State = "deleted"
-)
diff --git a/vendor/github.com/containerd/cgroups/subsystem.go b/vendor/github.com/containerd/cgroups/subsystem.go
deleted file mode 100644
index 23de04d49..000000000
--- a/vendor/github.com/containerd/cgroups/subsystem.go
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "fmt"
-
- specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-// Name is a typed name for a cgroup subsystem
-type Name string
-
-const (
- Devices Name = "devices"
- Hugetlb Name = "hugetlb"
- Freezer Name = "freezer"
- Pids Name = "pids"
- NetCLS Name = "net_cls"
- NetPrio Name = "net_prio"
- PerfEvent Name = "perf_event"
- Cpuset Name = "cpuset"
- Cpu Name = "cpu"
- Cpuacct Name = "cpuacct"
- Memory Name = "memory"
- Blkio Name = "blkio"
- Rdma Name = "rdma"
-)
-
-// Subsystems returns a complete list of the default cgroups
-// available on most linux systems
-func Subsystems() []Name {
- n := []Name{
- Hugetlb,
- Freezer,
- Pids,
- NetCLS,
- NetPrio,
- PerfEvent,
- Cpuset,
- Cpu,
- Cpuacct,
- Memory,
- Blkio,
- Rdma,
- }
- if !isUserNS {
- n = append(n, Devices)
- }
- return n
-}
-
-type Subsystem interface {
- Name() Name
-}
-
-type pather interface {
- Subsystem
- Path(path string) string
-}
-
-type creator interface {
- Subsystem
- Create(path string, resources *specs.LinuxResources) error
-}
-
-type deleter interface {
- Subsystem
- Delete(path string) error
-}
-
-type stater interface {
- Subsystem
- Stat(path string, stats *Metrics) error
-}
-
-type updater interface {
- Subsystem
- Update(path string, resources *specs.LinuxResources) error
-}
-
-// SingleSubsystem returns a single cgroup subsystem within the base Hierarchy
-func SingleSubsystem(baseHierarchy Hierarchy, subsystem Name) Hierarchy {
- return func() ([]Subsystem, error) {
- subsystems, err := baseHierarchy()
- if err != nil {
- return nil, err
- }
- for _, s := range subsystems {
- if s.Name() == subsystem {
- return []Subsystem{
- s,
- }, nil
- }
- }
- return nil, fmt.Errorf("unable to find subsystem %s", subsystem)
- }
-}
diff --git a/vendor/github.com/containerd/cgroups/systemd.go b/vendor/github.com/containerd/cgroups/systemd.go
deleted file mode 100644
index c5d4e3081..000000000
--- a/vendor/github.com/containerd/cgroups/systemd.go
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "fmt"
- "path/filepath"
- "strings"
- "sync"
-
- systemdDbus "github.com/coreos/go-systemd/dbus"
- "github.com/godbus/dbus"
- specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-const (
- SystemdDbus Name = "systemd"
- defaultSlice = "system.slice"
-)
-
-var (
- canDelegate bool
- once sync.Once
-)
-
-func Systemd() ([]Subsystem, error) {
- root, err := v1MountPoint()
- if err != nil {
- return nil, err
- }
- defaultSubsystems, err := defaults(root)
- if err != nil {
- return nil, err
- }
- s, err := NewSystemd(root)
- if err != nil {
- return nil, err
- }
- // make sure the systemd controller is added first
- return append([]Subsystem{s}, defaultSubsystems...), nil
-}
-
-func Slice(slice, name string) Path {
- if slice == "" {
- slice = defaultSlice
- }
- return func(subsystem Name) (string, error) {
- return filepath.Join(slice, name), nil
- }
-}
-
-func NewSystemd(root string) (*SystemdController, error) {
- return &SystemdController{
- root: root,
- }, nil
-}
-
-type SystemdController struct {
- mu sync.Mutex
- root string
-}
-
-func (s *SystemdController) Name() Name {
- return SystemdDbus
-}
-
-func (s *SystemdController) Create(path string, resources *specs.LinuxResources) error {
- conn, err := systemdDbus.New()
- if err != nil {
- return err
- }
- defer conn.Close()
- slice, name := splitName(path)
- // We need to see if systemd can handle the delegate property
- // Systemd will return an error if it cannot handle delegate regardless
- // of its bool setting.
- checkDelegate := func() {
- canDelegate = true
- dlSlice := newProperty("Delegate", true)
- if _, err := conn.StartTransientUnit(slice, "testdelegate", []systemdDbus.Property{dlSlice}, nil); err != nil {
- if dbusError, ok := err.(dbus.Error); ok {
- // Starting with systemd v237, Delegate is not even a property of slices anymore,
- // so the D-Bus call fails with "InvalidArgs" error.
- if strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.PropertyReadOnly") || strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.InvalidArgs") {
- canDelegate = false
- }
- }
- }
-
- conn.StopUnit(slice, "testDelegate", nil)
- }
- once.Do(checkDelegate)
- properties := []systemdDbus.Property{
- systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)),
- systemdDbus.PropWants(slice),
- newProperty("DefaultDependencies", false),
- newProperty("MemoryAccounting", true),
- newProperty("CPUAccounting", true),
- newProperty("BlockIOAccounting", true),
- }
-
- // If we can delegate, we add the property back in
- if canDelegate {
- properties = append(properties, newProperty("Delegate", true))
- }
-
- ch := make(chan string)
- _, err = conn.StartTransientUnit(name, "replace", properties, ch)
- if err != nil {
- return err
- }
- <-ch
- return nil
-}
-
-func (s *SystemdController) Delete(path string) error {
- conn, err := systemdDbus.New()
- if err != nil {
- return err
- }
- defer conn.Close()
- _, name := splitName(path)
- ch := make(chan string)
- _, err = conn.StopUnit(name, "replace", ch)
- if err != nil {
- return err
- }
- <-ch
- return nil
-}
-
-func newProperty(name string, units interface{}) systemdDbus.Property {
- return systemdDbus.Property{
- Name: name,
- Value: dbus.MakeVariant(units),
- }
-}
-
-func unitName(name string) string {
- return fmt.Sprintf("%s.slice", name)
-}
-
-func splitName(path string) (slice string, unit string) {
- slice, unit = filepath.Split(path)
- return strings.TrimSuffix(slice, "/"), unit
-}
diff --git a/vendor/github.com/containerd/cgroups/ticks.go b/vendor/github.com/containerd/cgroups/ticks.go
deleted file mode 100644
index 84dc38d0c..000000000
--- a/vendor/github.com/containerd/cgroups/ticks.go
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-func getClockTicks() uint64 {
- // The value comes from `C.sysconf(C._SC_CLK_TCK)`, and
- // on Linux it's a constant which is safe to be hard coded,
- // so we can avoid using cgo here.
- // See https://github.com/containerd/cgroups/pull/12 for
- // more details.
- return 100
-}
diff --git a/vendor/github.com/containerd/cgroups/utils.go b/vendor/github.com/containerd/cgroups/utils.go
deleted file mode 100644
index f3129b1a3..000000000
--- a/vendor/github.com/containerd/cgroups/utils.go
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "bufio"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "time"
-
- units "github.com/docker/go-units"
- specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-var isUserNS = runningInUserNS()
-
-// runningInUserNS detects whether we are currently running in a user namespace.
-// Copied from github.com/lxc/lxd/shared/util.go
-func runningInUserNS() bool {
- file, err := os.Open("/proc/self/uid_map")
- if err != nil {
- // This kernel-provided file only exists if user namespaces are supported
- return false
- }
- defer file.Close()
-
- buf := bufio.NewReader(file)
- l, _, err := buf.ReadLine()
- if err != nil {
- return false
- }
-
- line := string(l)
- var a, b, c int64
- fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
- /*
- * We assume we are in the initial user namespace if we have a full
- * range - 4294967295 uids starting at uid 0.
- */
- if a == 0 && b == 0 && c == 4294967295 {
- return false
- }
- return true
-}
-
-// defaults returns all known groups
-func defaults(root string) ([]Subsystem, error) {
- h, err := NewHugetlb(root)
- if err != nil && !os.IsNotExist(err) {
- return nil, err
- }
- s := []Subsystem{
- NewNamed(root, "systemd"),
- NewFreezer(root),
- NewPids(root),
- NewNetCls(root),
- NewNetPrio(root),
- NewPerfEvent(root),
- NewCputset(root),
- NewCpu(root),
- NewCpuacct(root),
- NewMemory(root),
- NewBlkio(root),
- NewRdma(root),
- }
- // only add the devices cgroup if we are not in a user namespace
- // because modifications are not allowed
- if !isUserNS {
- s = append(s, NewDevices(root))
- }
- // add the hugetlb cgroup if error wasn't due to missing hugetlb
- // cgroup support on the host
- if err == nil {
- s = append(s, h)
- }
- return s, nil
-}
-
-// remove will remove a cgroup path handling EAGAIN and EBUSY errors and
-// retrying the remove after a exp timeout
-func remove(path string) error {
- delay := 10 * time.Millisecond
- for i := 0; i < 5; i++ {
- if i != 0 {
- time.Sleep(delay)
- delay *= 2
- }
- if err := os.RemoveAll(path); err == nil {
- return nil
- }
- }
- return fmt.Errorf("cgroups: unable to remove path %q", path)
-}
-
-// readPids will read all the pids of processes in a cgroup by the provided path
-func readPids(path string, subsystem Name) ([]Process, error) {
- f, err := os.Open(filepath.Join(path, cgroupProcs))
- if err != nil {
- return nil, err
- }
- defer f.Close()
- var (
- out []Process
- s = bufio.NewScanner(f)
- )
- for s.Scan() {
- if t := s.Text(); t != "" {
- pid, err := strconv.Atoi(t)
- if err != nil {
- return nil, err
- }
- out = append(out, Process{
- Pid: pid,
- Subsystem: subsystem,
- Path: path,
- })
- }
- }
- return out, nil
-}
-
-// readTasksPids will read all the pids of tasks in a cgroup by the provided path
-func readTasksPids(path string, subsystem Name) ([]Task, error) {
- f, err := os.Open(filepath.Join(path, cgroupTasks))
- if err != nil {
- return nil, err
- }
- defer f.Close()
- var (
- out []Task
- s = bufio.NewScanner(f)
- )
- for s.Scan() {
- if t := s.Text(); t != "" {
- pid, err := strconv.Atoi(t)
- if err != nil {
- return nil, err
- }
- out = append(out, Task{
- Pid: pid,
- Subsystem: subsystem,
- Path: path,
- })
- }
- }
- return out, nil
-}
-
-func hugePageSizes() ([]string, error) {
- var (
- pageSizes []string
- sizeList = []string{"B", "kB", "MB", "GB", "TB", "PB"}
- )
- files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages")
- if err != nil {
- return nil, err
- }
- for _, st := range files {
- nameArray := strings.Split(st.Name(), "-")
- pageSize, err := units.RAMInBytes(nameArray[1])
- if err != nil {
- return nil, err
- }
- pageSizes = append(pageSizes, units.CustomSize("%g%s", float64(pageSize), 1024.0, sizeList))
- }
- return pageSizes, nil
-}
-
-func readUint(path string) (uint64, error) {
- v, err := ioutil.ReadFile(path)
- if err != nil {
- return 0, err
- }
- return parseUint(strings.TrimSpace(string(v)), 10, 64)
-}
-
-func parseUint(s string, base, bitSize int) (uint64, error) {
- v, err := strconv.ParseUint(s, base, bitSize)
- if err != nil {
- intValue, intErr := strconv.ParseInt(s, base, bitSize)
- // 1. Handle negative values greater than MinInt64 (and)
- // 2. Handle negative values lesser than MinInt64
- if intErr == nil && intValue < 0 {
- return 0, nil
- } else if intErr != nil &&
- intErr.(*strconv.NumError).Err == strconv.ErrRange &&
- intValue < 0 {
- return 0, nil
- }
- return 0, err
- }
- return v, nil
-}
-
-func parseKV(raw string) (string, uint64, error) {
- parts := strings.Fields(raw)
- switch len(parts) {
- case 2:
- v, err := parseUint(parts[1], 10, 64)
- if err != nil {
- return "", 0, err
- }
- return parts[0], v, nil
- default:
- return "", 0, ErrInvalidFormat
- }
-}
-
-func parseCgroupFile(path string) (map[string]string, error) {
- f, err := os.Open(path)
- if err != nil {
- return nil, err
- }
- defer f.Close()
- return parseCgroupFromReader(f)
-}
-
-func parseCgroupFromReader(r io.Reader) (map[string]string, error) {
- var (
- cgroups = make(map[string]string)
- s = bufio.NewScanner(r)
- )
- for s.Scan() {
- if err := s.Err(); err != nil {
- return nil, err
- }
- var (
- text = s.Text()
- parts = strings.SplitN(text, ":", 3)
- )
- if len(parts) < 3 {
- return nil, fmt.Errorf("invalid cgroup entry: %q", text)
- }
- for _, subs := range strings.Split(parts[1], ",") {
- if subs != "" {
- cgroups[subs] = parts[2]
- }
- }
- }
- return cgroups, nil
-}
-
-func getCgroupDestination(subsystem string) (string, error) {
- f, err := os.Open("/proc/self/mountinfo")
- if err != nil {
- return "", err
- }
- defer f.Close()
- s := bufio.NewScanner(f)
- for s.Scan() {
- if err := s.Err(); err != nil {
- return "", err
- }
- fields := strings.Fields(s.Text())
- for _, opt := range strings.Split(fields[len(fields)-1], ",") {
- if opt == subsystem {
- return fields[3], nil
- }
- }
- }
- return "", ErrNoCgroupMountDestination
-}
-
-func pathers(subystems []Subsystem) []pather {
- var out []pather
- for _, s := range subystems {
- if p, ok := s.(pather); ok {
- out = append(out, p)
- }
- }
- return out
-}
-
-func initializeSubsystem(s Subsystem, path Path, resources *specs.LinuxResources) error {
- if c, ok := s.(creator); ok {
- p, err := path(s.Name())
- if err != nil {
- return err
- }
- if err := c.Create(p, resources); err != nil {
- return err
- }
- } else if c, ok := s.(pather); ok {
- p, err := path(s.Name())
- if err != nil {
- return err
- }
- // do the default create if the group does not have a custom one
- if err := os.MkdirAll(c.Path(p), defaultDirPerm); err != nil {
- return err
- }
- }
- return nil
-}
-
-func cleanPath(path string) string {
- if path == "" {
- return ""
- }
- path = filepath.Clean(path)
- if !filepath.IsAbs(path) {
- path, _ = filepath.Rel(string(os.PathSeparator), filepath.Clean(string(os.PathSeparator)+path))
- }
- return filepath.Clean(path)
-}
diff --git a/vendor/github.com/containerd/cgroups/v1.go b/vendor/github.com/containerd/cgroups/v1.go
deleted file mode 100644
index a076d4692..000000000
--- a/vendor/github.com/containerd/cgroups/v1.go
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cgroups
-
-import (
- "bufio"
- "fmt"
- "os"
- "path/filepath"
- "strings"
-)
-
-// V1 returns all the groups in the default cgroups mountpoint in a single hierarchy
-func V1() ([]Subsystem, error) {
- root, err := v1MountPoint()
- if err != nil {
- return nil, err
- }
- subsystems, err := defaults(root)
- if err != nil {
- return nil, err
- }
- var enabled []Subsystem
- for _, s := range pathers(subsystems) {
- // check and remove the default groups that do not exist
- if _, err := os.Lstat(s.Path("/")); err == nil {
- enabled = append(enabled, s)
- }
- }
- return enabled, nil
-}
-
-// v1MountPoint returns the mount point where the cgroup
-// mountpoints are mounted in a single hiearchy
-func v1MountPoint() (string, error) {
- f, err := os.Open("/proc/self/mountinfo")
- if err != nil {
- return "", err
- }
- defer f.Close()
- scanner := bufio.NewScanner(f)
- for scanner.Scan() {
- if err := scanner.Err(); err != nil {
- return "", err
- }
- var (
- text = scanner.Text()
- fields = strings.Split(text, " ")
- // safe as mountinfo encodes mountpoints with spaces as \040.
- index = strings.Index(text, " - ")
- postSeparatorFields = strings.Fields(text[index+3:])
- numPostFields = len(postSeparatorFields)
- )
- // this is an error as we can't detect if the mount is for "cgroup"
- if numPostFields == 0 {
- return "", fmt.Errorf("Found no fields post '-' in %q", text)
- }
- if postSeparatorFields[0] == "cgroup" {
- // check that the mount is properly formated.
- if numPostFields < 3 {
- return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
- }
- return filepath.Dir(fields[4]), nil
- }
- }
- return "", ErrMountPointNotExist
-}
diff --git a/vendor/github.com/containerd/continuity/AUTHORS b/vendor/github.com/containerd/continuity/AUTHORS
new file mode 100644
index 000000000..4043394cc
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/AUTHORS
@@ -0,0 +1,16 @@
+Aaron Lehmann <aaron.lehmann@docker.com>
+Akash Gupta <akagup@microsoft.com>
+Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
+Andrew Pennebaker <apennebaker@datapipe.com>
+Brandon Philips <brandon.philips@coreos.com>
+Christopher Jones <tophj@linux.vnet.ibm.com>
+Daniel, Dao Quang Minh <dqminh89@gmail.com>
+Derek McGowan <derek@mcgstyle.net>
+Edward Pilatowicz <edward.pilatowicz@oracle.com>
+Ian Campbell <ijc@docker.com>
+Justin Cormack <justin.cormack@docker.com>
+Justin Cummins <sul3n3t@gmail.com>
+Phil Estes <estesp@gmail.com>
+Stephen J Day <stephen.day@docker.com>
+Tobias Klauser <tklauser@distanz.ch>
+Tonis Tiigi <tonistiigi@gmail.com>
diff --git a/vendor/github.com/containerd/continuity/README.md b/vendor/github.com/containerd/continuity/README.md
deleted file mode 100644
index f9f9ef0f9..000000000
--- a/vendor/github.com/containerd/continuity/README.md
+++ /dev/null
@@ -1,84 +0,0 @@
-# continuity
-
-[![GoDoc](https://godoc.org/github.com/containerd/continuity?status.svg)](https://godoc.org/github.com/containerd/continuity)
-[![Build Status](https://travis-ci.org/containerd/continuity.svg?branch=master)](https://travis-ci.org/containerd/continuity)
-
-A transport-agnostic, filesystem metadata manifest system
-
-This project is a staging area for experiments in providing transport agnostic
-metadata storage.
-
-Please see https://github.com/opencontainers/specs/issues/11 for more details.
-
-## Manifest Format
-
-A continuity manifest encodes filesystem metadata in Protocol Buffers.
-Please refer to [proto/manifest.proto](proto/manifest.proto).
-
-## Usage
-
-Build:
-
-```console
-$ make
-```
-
-Create a manifest (of this repo itself):
-
-```console
-$ ./bin/continuity build . > /tmp/a.pb
-```
-
-Dump a manifest:
-
-```console
-$ ./bin/continuity ls /tmp/a.pb
-...
--rw-rw-r-- 270 B /.gitignore
--rw-rw-r-- 88 B /.mailmap
--rw-rw-r-- 187 B /.travis.yml
--rw-rw-r-- 359 B /AUTHORS
--rw-rw-r-- 11 kB /LICENSE
--rw-rw-r-- 1.5 kB /Makefile
-...
--rw-rw-r-- 986 B /testutil_test.go
-drwxrwxr-x 0 B /version
--rw-rw-r-- 478 B /version/version.go
-```
-
-Verify a manifest:
-
-```console
-$ ./bin/continuity verify . /tmp/a.pb
-```
-
-Break the directory and restore using the manifest:
-```console
-$ chmod 777 Makefile
-$ ./bin/continuity verify . /tmp/a.pb
-2017/06/23 08:00:34 error verifying manifest: resource "/Makefile" has incorrect mode: -rwxrwxrwx != -rw-rw-r--
-$ ./bin/continuity apply . /tmp/a.pb
-$ stat -c %a Makefile
-664
-$ ./bin/continuity verify . /tmp/a.pb
-```
-
-
-## Contribution Guide
-### Building Proto Package
-
-If you change the proto file you will need to rebuild the generated Go with `go generate`.
-
-```console
-$ go generate ./proto
-```
-
-## Project details
-
-continuity is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
-As a containerd sub-project, you will find the:
- * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
- * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
- * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
-
-information in our [`containerd/project`](https://github.com/containerd/project) repository.
diff --git a/vendor/github.com/containerd/continuity/vendor.conf b/vendor/github.com/containerd/continuity/vendor.conf
deleted file mode 100644
index 5bd88d5fd..000000000
--- a/vendor/github.com/containerd/continuity/vendor.conf
+++ /dev/null
@@ -1,13 +0,0 @@
-bazil.org/fuse 371fbbdaa8987b715bdd21d6adc4c9b20155f748
-github.com/dustin/go-humanize bb3d318650d48840a39aa21a027c6630e198e626
-github.com/golang/protobuf 1e59b77b52bf8e4b449a57e6f79f21226d571845
-github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
-github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf
-github.com/pkg/errors f15c970de5b76fac0b59abb32d62c17cc7bed265
-github.com/sirupsen/logrus 89742aefa4b206dcf400792f3bd35b542998eb3b
-github.com/spf13/cobra 2da4a54c5ceefcee7ca5dd0eea1e18a3b6366489
-github.com/spf13/pflag 4c012f6dcd9546820e378d0bdda4d8fc772cdfea
-golang.org/x/crypto 9f005a07e0d31d45e6656d241bb5c0f2efd4bc94
-golang.org/x/net a337091b0525af65de94df2eb7e98bd9962dcbe2
-golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
-golang.org/x/sys 77b0e4315053a57ed2962443614bdb28db152054
diff --git a/vendor/github.com/containernetworking/cni/README.md b/vendor/github.com/containernetworking/cni/README.md
deleted file mode 100644
index 3968d908a..000000000
--- a/vendor/github.com/containernetworking/cni/README.md
+++ /dev/null
@@ -1,205 +0,0 @@
-[![Linux Build Status](https://travis-ci.org/containernetworking/cni.svg?branch=master)](https://travis-ci.org/containernetworking/cni)
-[![Windows Build Status](https://ci.appveyor.com/api/projects/status/wtrkou8oow7x533e/branch/master?svg=true)](https://ci.appveyor.com/project/cni-bot/cni/branch/master)
-[![Coverage Status](https://coveralls.io/repos/github/containernetworking/cni/badge.svg?branch=master)](https://coveralls.io/github/containernetworking/cni?branch=master)
-[![Slack Status](https://cryptic-tundra-43194.herokuapp.com/badge.svg)](https://cryptic-tundra-43194.herokuapp.com/)
-
-![CNI Logo](logo.png)
-
----
-
-# Community Sync Meeting
-
-There is a community sync meeting for users and developers every 1-2 months. The next meeting will help on a Google Hangout and the link is in the [agenda](https://docs.google.com/document/d/10ECyT2mBGewsJUcmYmS8QNo1AcNgy2ZIe2xS7lShYhE/edit?usp=sharing) (Notes from previous meeting are also in this doc).
-
-The next meeting will be held on *Wednesday, January 30th, 2019* at *4:00pm UTC / 11:00am EDT / 8:00am PDT* [Add to Calendar](https://www.worldtimebuddy.com/?qm=1&lid=100,5,2643743,5391959&h=100&date=2019-01-30&sln=16-17).
-
----
-
-# CNI - the Container Network Interface
-
-## What is CNI?
-
-CNI (_Container Network Interface_), a [Cloud Native Computing Foundation](https://cncf.io) project, consists of a specification and libraries for writing plugins to configure network interfaces in Linux containers, along with a number of supported plugins.
-CNI concerns itself only with network connectivity of containers and removing allocated resources when the container is deleted.
-Because of this focus, CNI has a wide range of support and the specification is simple to implement.
-
-As well as the [specification](SPEC.md), this repository contains the Go source code of a [library for integrating CNI into applications](libcni) and an [example command-line tool](cnitool) for executing CNI plugins. A [separate repository contains reference plugins](https://github.com/containernetworking/plugins) and a template for making new plugins.
-
-The template code makes it straight-forward to create a CNI plugin for an existing container networking project.
-CNI also makes a good framework for creating a new container networking project from scratch.
-
-## Why develop CNI?
-
-Application containers on Linux are a rapidly evolving area, and within this area networking is not well addressed as it is highly environment-specific.
-We believe that many container runtimes and orchestrators will seek to solve the same problem of making the network layer pluggable.
-
-To avoid duplication, we think it is prudent to define a common interface between the network plugins and container execution: hence we put forward this specification, along with libraries for Go and a set of plugins.
-
-## Who is using CNI?
-### Container runtimes
-- [rkt - container engine](https://coreos.com/blog/rkt-cni-networking.html)
-- [Kubernetes - a system to simplify container operations](https://kubernetes.io/docs/admin/network-plugins/)
-- [OpenShift - Kubernetes with additional enterprise features](https://github.com/openshift/origin/blob/master/docs/openshift_networking_requirements.md)
-- [Cloud Foundry - a platform for cloud applications](https://github.com/cloudfoundry-incubator/cf-networking-release)
-- [Apache Mesos - a distributed systems kernel](https://github.com/apache/mesos/blob/master/docs/cni.md)
-- [Amazon ECS - a highly scalable, high performance container management service](https://aws.amazon.com/ecs/)
-- [Singularity - container platform optimized for HPC, EPC, and AI](https://github.com/sylabs/singularity)
-- [OpenSVC - orchestrator for legacy and containerized application stacks](https://docs.opensvc.com/latest/fr/agent.configure.cni.html)
-
-### 3rd party plugins
-- [Project Calico - a layer 3 virtual network](https://github.com/projectcalico/calico-cni)
-- [Weave - a multi-host Docker network](https://github.com/weaveworks/weave)
-- [Contiv Networking - policy networking for various use cases](https://github.com/contiv/netplugin)
-- [SR-IOV](https://github.com/hustcat/sriov-cni)
-- [Cilium - BPF & XDP for containers](https://github.com/cilium/cilium)
-- [Infoblox - enterprise IP address management for containers](https://github.com/infobloxopen/cni-infoblox)
-- [Multus - a Multi plugin](https://github.com/Intel-Corp/multus-cni)
-- [Romana - Layer 3 CNI plugin supporting network policy for Kubernetes](https://github.com/romana/kube)
-- [CNI-Genie - generic CNI network plugin](https://github.com/Huawei-PaaS/CNI-Genie)
-- [Nuage CNI - Nuage Networks SDN plugin for network policy kubernetes support ](https://github.com/nuagenetworks/nuage-cni)
-- [Silk - a CNI plugin designed for Cloud Foundry](https://github.com/cloudfoundry-incubator/silk)
-- [Linen - a CNI plugin designed for overlay networks with Open vSwitch and fit in SDN/OpenFlow network environment](https://github.com/John-Lin/linen-cni)
-- [Vhostuser - a Dataplane network plugin - Supports OVS-DPDK & VPP](https://github.com/intel/vhost-user-net-plugin)
-- [Amazon ECS CNI Plugins - a collection of CNI Plugins to configure containers with Amazon EC2 elastic network interfaces (ENIs)](https://github.com/aws/amazon-ecs-cni-plugins)
-- [Bonding CNI - a Link aggregating plugin to address failover and high availability network](https://github.com/Intel-Corp/bond-cni)
-- [ovn-kubernetes - an container network plugin built on Open vSwitch (OVS) and Open Virtual Networking (OVN) with support for both Linux and Windows](https://github.com/openvswitch/ovn-kubernetes)
-- [Juniper Contrail](https://www.juniper.net/cloud) / [TungstenFabric](https://tungstenfabric.io) - Provides overlay SDN solution, delivering multicloud networking, hybrid cloud networking, simultaneous overlay-underlay support, network policy enforcement, network isolation, service chaining and flexible load balancing
-- [Knitter - a CNI plugin supporting multiple networking for Kubernetes](https://github.com/ZTE/Knitter)
-- [DANM - a CNI-compliant networking solution for TelCo workloads running on Kubernetes](https://github.com/nokia/danm)
-- [VMware NSX – a CNI plugin that enables automated NSX L2/L3 networking and L4/L7 Load Balancing; network isolation at the pod, node, and cluster level; and zero-trust security policy for your Kubernetes cluster.](https://docs.vmware.com/en/VMware-NSX-T/2.2/com.vmware.nsxt.ncp_kubernetes.doc/GUID-6AFA724E-BB62-4693-B95C-321E8DDEA7E1.html)
-
-The CNI team also maintains some [core plugins in a separate repository](https://github.com/containernetworking/plugins).
-
-
-## Contributing to CNI
-
-We welcome contributions, including [bug reports](https://github.com/containernetworking/cni/issues), and code and documentation improvements.
-If you intend to contribute to code or documentation, please read [CONTRIBUTING.md](CONTRIBUTING.md). Also see the [contact section](#contact) in this README.
-
-## How do I use CNI?
-
-### Requirements
-
-The CNI spec is language agnostic. To use the Go language libraries in this repository, you'll need a recent version of Go. You can find the Go versions covered by our [automated tests](https://travis-ci.org/containernetworking/cni/builds) in [.travis.yaml](.travis.yml).
-
-### Reference Plugins
-
-The CNI project maintains a set of [reference plugins](https://github.com/containernetworking/plugins) that implement the CNI specification.
-NOTE: the reference plugins used to live in this repository but have been split out into a [separate repository](https://github.com/containernetworking/plugins) as of May 2017.
-
-### Running the plugins
-
-After building and installing the [reference plugins](https://github.com/containernetworking/plugins), you can use the `priv-net-run.sh` and `docker-run.sh` scripts in the `scripts/` directory to exercise the plugins.
-
-**note - priv-net-run.sh depends on `jq`**
-
-Start out by creating a netconf file to describe a network:
-
-```bash
-$ mkdir -p /etc/cni/net.d
-$ cat >/etc/cni/net.d/10-mynet.conf <<EOF
-{
- "cniVersion": "0.2.0",
- "name": "mynet",
- "type": "bridge",
- "bridge": "cni0",
- "isGateway": true,
- "ipMasq": true,
- "ipam": {
- "type": "host-local",
- "subnet": "10.22.0.0/16",
- "routes": [
- { "dst": "0.0.0.0/0" }
- ]
- }
-}
-EOF
-$ cat >/etc/cni/net.d/99-loopback.conf <<EOF
-{
- "cniVersion": "0.2.0",
- "name": "lo",
- "type": "loopback"
-}
-EOF
-```
-
-The directory `/etc/cni/net.d` is the default location in which the scripts will look for net configurations.
-
-Next, build the plugins:
-
-```bash
-$ cd $GOPATH/src/github.com/containernetworking/plugins
-$ ./build_linux.sh # or build_windows.sh
-```
-
-Finally, execute a command (`ifconfig` in this example) in a private network namespace that has joined the `mynet` network:
-
-```bash
-$ CNI_PATH=$GOPATH/src/github.com/containernetworking/plugins/bin
-$ cd $GOPATH/src/github.com/containernetworking/cni/scripts
-$ sudo CNI_PATH=$CNI_PATH ./priv-net-run.sh ifconfig
-eth0 Link encap:Ethernet HWaddr f2:c2:6f:54:b8:2b
- inet addr:10.22.0.2 Bcast:0.0.0.0 Mask:255.255.0.0
- inet6 addr: fe80::f0c2:6fff:fe54:b82b/64 Scope:Link
- UP BROADCAST MULTICAST MTU:1500 Metric:1
- RX packets:1 errors:0 dropped:0 overruns:0 frame:0
- TX packets:0 errors:0 dropped:1 overruns:0 carrier:0
- collisions:0 txqueuelen:0
- RX bytes:90 (90.0 B) TX bytes:0 (0.0 B)
-
-lo Link encap:Local Loopback
- inet addr:127.0.0.1 Mask:255.0.0.0
- inet6 addr: ::1/128 Scope:Host
- UP LOOPBACK RUNNING MTU:65536 Metric:1
- RX packets:0 errors:0 dropped:0 overruns:0 frame:0
- TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
- collisions:0 txqueuelen:0
- RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
-```
-
-The environment variable `CNI_PATH` tells the scripts and library where to look for plugin executables.
-
-## Running a Docker container with network namespace set up by CNI plugins
-
-Use the instructions in the previous section to define a netconf and build the plugins.
-Next, docker-run.sh script wraps `docker run`, to execute the plugins prior to entering the container:
-
-```bash
-$ CNI_PATH=$GOPATH/src/github.com/containernetworking/plugins/bin
-$ cd $GOPATH/src/github.com/containernetworking/cni/scripts
-$ sudo CNI_PATH=$CNI_PATH ./docker-run.sh --rm busybox:latest ifconfig
-eth0 Link encap:Ethernet HWaddr fa:60:70:aa:07:d1
- inet addr:10.22.0.2 Bcast:0.0.0.0 Mask:255.255.0.0
- inet6 addr: fe80::f860:70ff:feaa:7d1/64 Scope:Link
- UP BROADCAST MULTICAST MTU:1500 Metric:1
- RX packets:1 errors:0 dropped:0 overruns:0 frame:0
- TX packets:0 errors:0 dropped:1 overruns:0 carrier:0
- collisions:0 txqueuelen:0
- RX bytes:90 (90.0 B) TX bytes:0 (0.0 B)
-
-lo Link encap:Local Loopback
- inet addr:127.0.0.1 Mask:255.0.0.0
- inet6 addr: ::1/128 Scope:Host
- UP LOOPBACK RUNNING MTU:65536 Metric:1
- RX packets:0 errors:0 dropped:0 overruns:0 frame:0
- TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
- collisions:0 txqueuelen:0
- RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
-```
-
-## What might CNI do in the future?
-
-CNI currently covers a wide range of needs for network configuration due to it simple model and API.
-However, in the future CNI might want to branch out into other directions:
-
-- Dynamic updates to existing network configuration
-- Dynamic policies for network bandwidth and firewall rules
-
-If these topics of are interest, please contact the team via the mailing list or IRC and find some like-minded people in the community to put a proposal together.
-
-## Contact
-
-For any questions about CNI, please reach out on the mailing list:
-- Email: [cni-dev](https://groups.google.com/forum/#!forum/cni-dev)
-- IRC: #[containernetworking](irc://irc.freenode.org:6667/#containernetworking) channel on freenode.org
-- Slack: [containernetworking.slack.com](https://cryptic-tundra-43194.herokuapp.com)
diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go
index 360733e74..0f14d3427 100644
--- a/vendor/github.com/containernetworking/cni/libcni/api.go
+++ b/vendor/github.com/containernetworking/cni/libcni/api.go
@@ -69,6 +69,7 @@ type CNI interface {
AddNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) (types.Result, error)
CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error
DelNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error
+ GetNetworkListCachedResult(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error)
AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go
index 39b639723..913528c1d 100644
--- a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go
@@ -15,6 +15,7 @@
package invoke
import (
+ "fmt"
"os"
"strings"
)
@@ -22,6 +23,8 @@ import (
type CNIArgs interface {
// For use with os/exec; i.e., return nil to inherit the
// environment from this process
+ // For use in delegation; inherit the environment from this
+ // process and allow overrides
AsEnv() []string
}
@@ -57,17 +60,17 @@ func (args *Args) AsEnv() []string {
pluginArgsStr = stringify(args.PluginArgs)
}
- // Ensure that the custom values are first, so any value present in
- // the process environment won't override them.
- env = append([]string{
- "CNI_COMMAND=" + args.Command,
- "CNI_CONTAINERID=" + args.ContainerID,
- "CNI_NETNS=" + args.NetNS,
- "CNI_ARGS=" + pluginArgsStr,
- "CNI_IFNAME=" + args.IfName,
- "CNI_PATH=" + args.Path,
- }, env...)
- return env
+ // Duplicated values which come first will be overrided, so we must put the
+ // custom values in the end to avoid being overrided by the process environments.
+ env = append(env,
+ "CNI_COMMAND="+args.Command,
+ "CNI_CONTAINERID="+args.ContainerID,
+ "CNI_NETNS="+args.NetNS,
+ "CNI_ARGS="+pluginArgsStr,
+ "CNI_IFNAME="+args.IfName,
+ "CNI_PATH="+args.Path,
+ )
+ return dedupEnv(env)
}
// taken from rkt/networking/net_plugin.go
@@ -80,3 +83,46 @@ func stringify(pluginArgs [][2]string) string {
return strings.Join(entries, ";")
}
+
+// DelegateArgs implements the CNIArgs interface
+// used for delegation to inherit from environments
+// and allow some overrides like CNI_COMMAND
+var _ CNIArgs = &DelegateArgs{}
+
+type DelegateArgs struct {
+ Command string
+}
+
+func (d *DelegateArgs) AsEnv() []string {
+ env := os.Environ()
+
+ // The custom values should come in the end to override the existing
+ // process environment of the same key.
+ env = append(env,
+ "CNI_COMMAND="+d.Command,
+ )
+ return dedupEnv(env)
+}
+
+// dedupEnv returns a copy of env with any duplicates removed, in favor of later values.
+// Items not of the normal environment "key=value" form are preserved unchanged.
+func dedupEnv(env []string) []string {
+ out := make([]string, 0, len(env))
+ envMap := map[string]string{}
+
+ for _, kv := range env {
+ // find the first "=" in environment, if not, just keep it
+ eq := strings.Index(kv, "=")
+ if eq < 0 {
+ out = append(out, kv)
+ continue
+ }
+ envMap[kv[:eq]] = kv[eq+1:]
+ }
+
+ for k, v := range envMap {
+ out = append(out, fmt.Sprintf("%s=%s", k, v))
+ }
+
+ return out
+}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go
index 30b4672f1..8defe4dd3 100644
--- a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go
@@ -16,22 +16,17 @@ package invoke
import (
"context"
- "fmt"
"os"
"path/filepath"
"github.com/containernetworking/cni/pkg/types"
)
-func delegateCommon(expectedCommand, delegatePlugin string, exec Exec) (string, Exec, error) {
+func delegateCommon(delegatePlugin string, exec Exec) (string, Exec, error) {
if exec == nil {
exec = defaultExec
}
- if os.Getenv("CNI_COMMAND") != expectedCommand {
- return "", nil, fmt.Errorf("CNI_COMMAND is not " + expectedCommand)
- }
-
paths := filepath.SplitList(os.Getenv("CNI_PATH"))
pluginPath, err := exec.FindInPath(delegatePlugin, paths)
if err != nil {
@@ -44,32 +39,42 @@ func delegateCommon(expectedCommand, delegatePlugin string, exec Exec) (string,
// DelegateAdd calls the given delegate plugin with the CNI ADD action and
// JSON configuration
func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) {
- pluginPath, realExec, err := delegateCommon("ADD", delegatePlugin, exec)
+ pluginPath, realExec, err := delegateCommon(delegatePlugin, exec)
if err != nil {
return nil, err
}
- return ExecPluginWithResult(ctx, pluginPath, netconf, ArgsFromEnv(), realExec)
+ // DelegateAdd will override the original "CNI_COMMAND" env from process with ADD
+ return ExecPluginWithResult(ctx, pluginPath, netconf, delegateArgs("ADD"), realExec)
}
// DelegateCheck calls the given delegate plugin with the CNI CHECK action and
// JSON configuration
func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error {
- pluginPath, realExec, err := delegateCommon("CHECK", delegatePlugin, exec)
+ pluginPath, realExec, err := delegateCommon(delegatePlugin, exec)
if err != nil {
return err
}
- return ExecPluginWithoutResult(ctx, pluginPath, netconf, ArgsFromEnv(), realExec)
+ // DelegateCheck will override the original CNI_COMMAND env from process with CHECK
+ return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("CHECK"), realExec)
}
// DelegateDel calls the given delegate plugin with the CNI DEL action and
// JSON configuration
func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error {
- pluginPath, realExec, err := delegateCommon("DEL", delegatePlugin, exec)
+ pluginPath, realExec, err := delegateCommon(delegatePlugin, exec)
if err != nil {
return err
}
- return ExecPluginWithoutResult(ctx, pluginPath, netconf, ArgsFromEnv(), realExec)
+ // DelegateDel will override the original CNI_COMMAND env from process with DEL
+ return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("DEL"), realExec)
+}
+
+// return CNIArgs used by delegation
+func delegateArgs(action string) *DelegateArgs {
+ return &DelegateArgs{
+ Command: action,
+ }
}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go
index e5b86634d..ad8498ba2 100644
--- a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go
@@ -46,7 +46,9 @@ func (e *RawExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData [
func pluginErr(err error, output []byte) error {
if _, ok := err.(*exec.ExitError); ok {
emsg := types.Error{}
- if perr := json.Unmarshal(output, &emsg); perr != nil {
+ if len(output) == 0 {
+ emsg.Msg = "netplugin failed with no error message"
+ } else if perr := json.Unmarshal(output, &emsg); perr != nil {
emsg.Msg = fmt.Sprintf("netplugin failed but error parsing its diagnostic message %q: %v", string(output), perr)
}
return &emsg
diff --git a/vendor/github.com/containernetworking/plugins/README.md b/vendor/github.com/containernetworking/plugins/README.md
deleted file mode 100644
index f0e444355..000000000
--- a/vendor/github.com/containernetworking/plugins/README.md
+++ /dev/null
@@ -1,26 +0,0 @@
-[![Linux Build Status](https://travis-ci.org/containernetworking/plugins.svg?branch=master)](https://travis-ci.org/containernetworking/plugins)
-[![Windows Build Status](https://ci.appveyor.com/api/projects/status/kcuubx0chr76ev86/branch/master?svg=true)](https://ci.appveyor.com/project/cni-bot/plugins/branch/master)
-
-# plugins
-Some CNI network plugins, maintained by the containernetworking team. For more information, see the individual READMEs.
-
-## Plugins supplied:
-### Main: interface-creating
-* `bridge`: Creates a bridge, adds the host and the container to it.
-* `ipvlan`: Adds an [ipvlan](https://www.kernel.org/doc/Documentation/networking/ipvlan.txt) interface in the container
-* `loopback`: Creates a loopback interface
-* `macvlan`: Creates a new MAC address, forwards all traffic to that to the container
-* `ptp`: Creates a veth pair.
-* `vlan`: Allocates a vlan device.
-
-### IPAM: IP address allocation
-* `dhcp`: Runs a daemon on the host to make DHCP requests on behalf of the container
-* `host-local`: maintains a local database of allocated IPs
-
-### Meta: other plugins
-* `flannel`: generates an interface corresponding to a flannel config file
-* `tuning`: Tweaks sysctl parameters of an existing interface
-* `portmap`: An iptables-based portmapping plugin. Maps ports from the host's address space to the container.
-
-### Sample
-The sample plugin provides an example for building your own plugin.
diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/README.md b/vendor/github.com/containernetworking/plugins/pkg/ns/README.md
index c0f5cf2e8..1e265c7a0 100644
--- a/vendor/github.com/containernetworking/plugins/pkg/ns/README.md
+++ b/vendor/github.com/containernetworking/plugins/pkg/ns/README.md
@@ -12,10 +12,6 @@ For example, you cannot rely on the `ns.Set()` namespace being the current names
The `ns.Do()` method provides **partial** control over network namespaces for you by implementing these strategies. All code dependent on a particular network namespace (including the root namespace) should be wrapped in the `ns.Do()` method to ensure the correct namespace is selected for the duration of your code. For example:
```go
-targetNs, err := ns.NewNS()
-if err != nil {
- return err
-}
err = targetNs.Do(func(hostNs ns.NetNS) error {
dummy := &netlink.Dummy{
LinkAttrs: netlink.LinkAttrs{
@@ -26,11 +22,16 @@ err = targetNs.Do(func(hostNs ns.NetNS) error {
})
```
-Note this requirement to wrap every network call is very onerous - any libraries you call might call out to network services such as DNS, and all such calls need to be protected after you call `ns.Do()`. The CNI plugins all exit very soon after calling `ns.Do()` which helps to minimize the problem.
+Note this requirement to wrap every network call is very onerous - any libraries you call might call out to network services such as DNS, and all such calls need to be protected after you call `ns.Do()`. All goroutines spawned from within the `ns.Do` will not inherit the new namespace. The CNI plugins all exit very soon after calling `ns.Do()` which helps to minimize the problem.
-Also: If the runtime spawns a new OS thread, it will inherit the network namespace of the parent thread, which may have been temporarily switched, and thus the new OS thread will be permanently "stuck in the wrong namespace".
+When a new thread is spawned in Linux, it inherits the namespace of its parent. In versions of go **prior to 1.10**, if the runtime spawns a new OS thread, it picks the parent randomly. If the chosen parent thread has been moved to a new namespace (even temporarily), the new OS thread will be permanently "stuck in the wrong namespace", and goroutines will non-deterministically switch namespaces as they are rescheduled.
+
+In short, **there was no safe way to change network namespaces, even temporarily, from within a long-lived, multithreaded Go process**. If you wish to do this, you must use go 1.10 or greater.
+
+
+### Creating network namespaces
+Earlier versions of this library managed namespace creation, but as CNI does not actually utilize this feature (and it was essentially unmaintained), it was removed. If you're writing a container runtime, you should implement namespace management yourself. However, there are some gotchas when doing so, especially around handling `/var/run/netns`. A reasonably correct reference implementation, borrowed from `rkt`, can be found in `pkg/testutils/netns_linux.go` if you're in need of a source of inspiration.
-In short, **there is no safe way to change network namespaces from within a long-lived, multithreaded Go process**. If your daemon process needs to be namespace aware, consider spawning a separate process (like a CNI plugin) for each namespace.
### Further Reading
- https://github.com/golang/go/wiki/LockOSThread
diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
index 4ce989467..31ad5f622 100644
--- a/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
+++ b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
@@ -15,10 +15,8 @@
package ns
import (
- "crypto/rand"
"fmt"
"os"
- "path"
"runtime"
"sync"
"syscall"
@@ -38,82 +36,6 @@ func getCurrentThreadNetNSPath() string {
return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid())
}
-// Creates a new persistent network namespace and returns an object
-// representing that namespace, without switching to it
-func NewNS() (NetNS, error) {
- const nsRunDir = "/var/run/netns"
-
- b := make([]byte, 16)
- _, err := rand.Reader.Read(b)
- if err != nil {
- return nil, fmt.Errorf("failed to generate random netns name: %v", err)
- }
-
- err = os.MkdirAll(nsRunDir, 0755)
- if err != nil {
- return nil, err
- }
-
- // create an empty file at the mount point
- nsName := fmt.Sprintf("cni-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
- nsPath := path.Join(nsRunDir, nsName)
- mountPointFd, err := os.Create(nsPath)
- if err != nil {
- return nil, err
- }
- mountPointFd.Close()
-
- // Ensure the mount point is cleaned up on errors; if the namespace
- // was successfully mounted this will have no effect because the file
- // is in-use
- defer os.RemoveAll(nsPath)
-
- var wg sync.WaitGroup
- wg.Add(1)
-
- // do namespace work in a dedicated goroutine, so that we can safely
- // Lock/Unlock OSThread without upsetting the lock/unlock state of
- // the caller of this function
- var fd *os.File
- go (func() {
- defer wg.Done()
- runtime.LockOSThread()
-
- var origNS NetNS
- origNS, err = GetNS(getCurrentThreadNetNSPath())
- if err != nil {
- return
- }
- defer origNS.Close()
-
- // create a new netns on the current thread
- err = unix.Unshare(unix.CLONE_NEWNET)
- if err != nil {
- return
- }
- defer origNS.Set()
-
- // bind mount the new netns from the current thread onto the mount point
- err = unix.Mount(getCurrentThreadNetNSPath(), nsPath, "none", unix.MS_BIND, "")
- if err != nil {
- return
- }
-
- fd, err = os.Open(nsPath)
- if err != nil {
- return
- }
- })()
- wg.Wait()
-
- if err != nil {
- unix.Unmount(nsPath, unix.MNT_DETACH)
- return nil, fmt.Errorf("failed to create namespace: %v", err)
- }
-
- return &netNS{file: fd, mounted: true}, nil
-}
-
func (ns *netNS) Close() error {
if err := ns.errorIfClosed(); err != nil {
return err
@@ -124,16 +46,6 @@ func (ns *netNS) Close() error {
}
ns.closed = true
- if ns.mounted {
- if err := unix.Unmount(ns.file.Name(), unix.MNT_DETACH); err != nil {
- return fmt.Errorf("Failed to unmount namespace %s: %v", ns.file.Name(), err)
- }
- if err := os.RemoveAll(ns.file.Name()); err != nil {
- return fmt.Errorf("Failed to clean up namespace %s: %v", ns.file.Name(), err)
- }
- ns.mounted = false
- }
-
return nil
}
@@ -180,9 +92,8 @@ type NetNS interface {
}
type netNS struct {
- file *os.File
- mounted bool
- closed bool
+ file *os.File
+ closed bool
}
// netNS implements the NetNS interface
diff --git a/vendor/github.com/containers/buildah/.gitignore b/vendor/github.com/containers/buildah/.gitignore
new file mode 100644
index 000000000..ff8e05bfe
--- /dev/null
+++ b/vendor/github.com/containers/buildah/.gitignore
@@ -0,0 +1,4 @@
+docs/buildah*.1
+/buildah
+/imgtype
+/build/
diff --git a/vendor/github.com/containers/buildah/.papr.sh b/vendor/github.com/containers/buildah/.papr.sh
new file mode 100644
index 000000000..25ab4c29d
--- /dev/null
+++ b/vendor/github.com/containers/buildah/.papr.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+set -xeuo pipefail
+export GOPATH=/go
+export PATH=$HOME/gopath/bin:$PATH:$GOPATH/bin
+export GOSRC=$GOPATH/src/github.com/containers/buildah
+
+cp -fv /etc/yum.repos.d{.host/*.repo,}
+
+dnf install -y \
+ bats \
+ btrfs-progs-devel \
+ bzip2 \
+ device-mapper-devel \
+ findutils \
+ git \
+ glib2-devel \
+ glibc-static \
+ gnupg \
+ golang \
+ gpgme-devel \
+ libassuan-devel \
+ libseccomp-devel \
+ libselinux-devel \
+ libselinux-static \
+ libseccomp-static \
+ libselinux-utils \
+ make \
+ openssl \
+ ostree-devel \
+ skopeo-containers \
+ which
+
+
+# Install gomega
+go get github.com/onsi/gomega/...
+
+# PAPR adds a merge commit, for testing, which fails the
+# short-commit-subject validation test, so tell git-validate.sh to only check
+# up to, but not including, the merge commit.
+export GITVALIDATE_TIP=$(cd $GOSRC; git log -2 --pretty='%H' | tail -n 1)
+make -C $GOSRC install.tools runc all validate test-unit test-integration static
+env BUILDAH_ISOLATION=chroot make -C $GOSRC test-integration
+env BUILDAH_ISOLATION=rootless make -C $GOSRC test-integration
diff --git a/vendor/github.com/containers/buildah/.papr.yml b/vendor/github.com/containers/buildah/.papr.yml
new file mode 100644
index 000000000..aece0004f
--- /dev/null
+++ b/vendor/github.com/containers/buildah/.papr.yml
@@ -0,0 +1,87 @@
+branches:
+ - master
+ - auto
+ - try
+
+host:
+ distro: fedora/28/atomic
+
+required: true
+
+tests:
+ # Let's create a self signed certificate and get it in the right places
+ - hostname
+ - ip a
+ - ping -c 3 localhost
+ - cat /etc/hostname
+ - mkdir -p /home/travis/auth
+ - openssl req -newkey rsa:4096 -nodes -sha256 -keyout /home/travis/auth/domain.key -x509 -days 2 -out /home/travis/auth/domain.crt -subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=localhost"
+ - cp /home/travis/auth/domain.crt /home/travis/auth/domain.cert
+ - sudo mkdir -p /etc/docker/certs.d/docker.io/
+ - sudo cp /home/travis/auth/domain.crt /etc/docker/certs.d/docker.io/ca.crt
+ - sudo mkdir -p /etc/docker/certs.d/localhost:5000/
+ - sudo cp /home/travis/auth/domain.crt /etc/docker/certs.d/localhost:5000/ca.crt
+ - sudo cp /home/travis/auth/domain.crt /etc/docker/certs.d/localhost:5000/domain.crt
+ # Create the credentials file, then start up the Docker registry
+ - podman run --entrypoint htpasswd registry:2 -Bbn testuser testpassword > /home/travis/auth/htpasswd
+ - podman run -d -p 5000:5000 --name registry -v /home/travis/auth:/home/travis/auth:Z -e "REGISTRY_AUTH=htpasswd" -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" -e REGISTRY_AUTH_HTPASSWD_PATH=/home/travis/auth/htpasswd -e REGISTRY_HTTP_TLS_CERTIFICATE=/home/travis/auth/domain.crt -e REGISTRY_HTTP_TLS_KEY=/home/travis/auth/domain.key registry:2
+
+# Test Podman setup
+ - podman version
+ - podman info
+ - podman ps --all
+ - podman images
+ - ls -alF /home/travis/auth
+ - podman pull alpine
+ - podman login localhost:5000 --username testuser --password testpassword
+ - podman tag alpine localhost:5000/my-alpine
+ - podman push --creds=testuser:testpassword localhost:5000/my-alpine
+ - podman ps --all
+ - podman images
+ - podman rmi docker.io/alpine
+ - podman rmi localhost:5000/my-alpine
+ - podman pull --creds=testuser:testpassword localhost:5000/my-alpine
+ - podman ps --all
+ - podman images
+ - podman rmi localhost:5000/my-alpine
+
+ # mount yum repos to inherit injected mirrors from PAPR
+ - podman run --net=host --security-opt label=disable --cap-add all --security-opt seccomp=unconfined -v /etc/yum.repos.d:/etc/yum.repos.d.host:ro
+ -v $PWD:/go/src/github.com/containers/buildah
+ --workdir /go/src/github.com/containers/buildah
+ registry.fedoraproject.org/fedora:28 bash -c sh ./.papr.sh
+
+---
+
+container:
+ image: registry.fedoraproject.org/fedora:28
+
+packages:
+ - btrfs-progs-devel
+ - bzip2
+ - device-mapper-devel
+ - findutils
+ - git
+ - glib2-devel
+ - gnupg
+ - golang
+ - libassuan-devel
+ - make
+ - ostree-devel
+ - skopeo-containers
+
+required: false
+pulls: true
+
+env:
+ GOPATH: /go
+ GOSRC: /go/src/github.com/containers
+
+tests:
+ - mkdir -p $GOSRC && ln -s /var/tmp/checkout $GOSRC/buildah
+ - cd $GOSRC/buildah && make darwin
+
+artifacts:
+ - test-suite.log
+
+context: "darwin CI"
diff --git a/vendor/github.com/containers/buildah/.travis.yml b/vendor/github.com/containers/buildah/.travis.yml
new file mode 100644
index 000000000..9199b8f89
--- /dev/null
+++ b/vendor/github.com/containers/buildah/.travis.yml
@@ -0,0 +1,93 @@
+language: go
+dist: xenial
+sudo: required
+go:
+ - 1.11.x
+ - 1.12.x
+ - tip
+
+env:
+ global:
+ - TRAVIS_ENV="-e TRAVIS=$TRAVIS
+ -e CI=$CI
+ -e TRAVIS_COMMIT=$TRAVIS_COMMIT
+ -e TRAVIS_COMMIT_RANGE=$TRAVIS_COMMIT_RANGE
+ -e TRAVIS_REPO_SLUG=$TRAVIS_REPO_SLUG
+ -e TRAVIS_PULL_REQUEST=$TRAVIS_PULL_REQUEST
+ -e TRAVIS_PULL_REQUEST_SHA=$TRAVIS_PULL_REQUEST_SHA
+ -e TRAVIS_PULL_REQUEST_SLUG=$TRAVIS_PULL_REQUEST_SLUG
+ -e TRAVIS_BRANCH=$TRAVIS_BRANCH
+ -e TRAVIS_JOB_ID=$TRAVIS_JOB_ID
+ -e TRAVIS_BUILD_DIR=$TRAVIS_BUILD_DIR"
+ matrix:
+ - BUILDAH_ISOLATION=oci
+ DISTRO="ubuntu"
+ - BUILDAH_ISOLATION=chroot
+ DISTRO="ubuntu"
+ - BUILDAH_ISOLATION=rootless
+ DISTRO="ubuntu"
+matrix:
+ # If the latest unstable development version of go fails, that's OK.
+ allow_failures:
+ - go: tip
+ - env: TEST_GROUP=conformance
+
+ # Don't hold on the tip tests to finish. Mark tests green if the
+ # stable versions pass.
+ fast_finish: true
+
+services:
+ - docker
+before_install:
+ - make vendor
+ - ./hack/tree_status.sh
+ - sudo apt-get update
+ - sudo apt-get -qq install software-properties-common
+ - sudo add-apt-repository -y ppa:duggan/bats
+ - sudo apt-get update
+ - sudo apt-get -qq install bats btrfs-tools git libapparmor-dev libc-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libselinux1-dev linux-libc-dev realpath
+ - sudo apt-get -qq update
+ - sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce
+ - mkdir /home/travis/auth
+ - sudo mkdir -p /var/lib/containers/storage/overlay
+install:
+ # Let's create a self signed certificate and get it in the right places
+ - hostname
+ - ip a
+ - ping -c 3 localhost
+ - cat /etc/hostname
+ - openssl req -newkey rsa:4096 -nodes -sha256 -keyout /home/travis/auth/domain.key -x509 -days 2 -out /home/travis/auth/domain.crt -subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=localhost"
+ - cp /home/travis/auth/domain.crt /home/travis/auth/domain.cert
+ - sudo mkdir -p /etc/docker/certs.d/docker.io/
+ - sudo cp /home/travis/auth/domain.crt /etc/docker/certs.d/docker.io/ca.crt
+ - sudo mkdir -p /etc/docker/certs.d/localhost:5000/
+ - sudo cp /home/travis/auth/domain.crt /etc/docker/certs.d/localhost:5000/ca.crt
+ - sudo cp /home/travis/auth/domain.crt /etc/docker/certs.d/localhost:5000/domain.crt
+ # Create the credentials file, then start up the Docker registry
+ - docker run --entrypoint htpasswd registry:2 -Bbn testuser testpassword > /home/travis/auth/htpasswd
+ - docker run -d -p 5000:5000 --name registry -v /home/travis/auth:/home/travis/auth:Z -e "REGISTRY_AUTH=htpasswd" -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" -e REGISTRY_AUTH_HTPASSWD_PATH=/home/travis/auth/htpasswd -e REGISTRY_HTTP_TLS_CERTIFICATE=/home/travis/auth/domain.crt -e REGISTRY_HTTP_TLS_KEY=/home/travis/auth/domain.key registry:2
+script:
+ # Fail fast
+ - set -e
+ # Let's do some docker stuff just for verification purposes
+ - docker ps --all
+ - docker images
+ - ls -alF /home/travis/auth
+ - docker pull docker.io/alpine
+ - echo testpassword | docker login localhost:5000 --username testuser --password-stdin
+ - docker tag alpine localhost:5000/my-alpine
+ - docker push localhost:5000/my-alpine
+ - docker ps --all
+ - docker images
+ - docker rmi docker.io/alpine
+ - docker rmi localhost:5000/my-alpine
+ - docker pull localhost:5000/my-alpine
+ - docker ps --all
+ - docker images
+ - docker rmi localhost:5000/my-alpine
+ # Setting up Docker Registry is complete, let's do Buildah testing!
+ - make install.tools install.libseccomp.sudo all runc validate SECURITYTAGS="apparmor seccomp"
+ - go test -c -tags "apparmor seccomp `./btrfs_tag.sh` `./libdm_tag.sh` `./ostree_tag.sh` `./selinux_tag.sh`" ./cmd/buildah
+ - tmp=`mktemp -d`; mkdir $tmp/root $tmp/runroot; sudo PATH="$PATH" ./buildah.test -test.v --root $tmp/root --runroot $tmp/runroot --storage-driver vfs --signature-policy `pwd`/tests/policy.json --registries-conf `pwd`/tests/registries.conf
+ - cd tests; sudo PATH="$PATH" ./test_runner.sh
+ - cd ..
diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md
new file mode 100644
index 000000000..1c59a67ad
--- /dev/null
+++ b/vendor/github.com/containers/buildah/CHANGELOG.md
@@ -0,0 +1,976 @@
+![buildah logo](https://cdn.rawgit.com/containers/buildah/master/logos/buildah-logo_large.png)
+
+# Changelog
+
+## v1.8.4 (2019-06-13)
+ Update containers/image to v2.0.0
+ run: fix hang with run and --isolation=chroot
+ run: fix hang when using run
+ chroot: drop unused function call
+ remove --> before imgageID on build
+ Always close stdin pipe
+ Write deny to setgroups when doing single user mapping
+ Avoid including linux/memfd.h
+ Add a test for the symlink pointing to a directory
+ Add missing continue
+ Fix the handling of symlinks to absolute paths
+ Only set default network sysctls if not rootless
+ Support --dns=none like podman
+ fix bug --cpu-shares parsing typo
+ Fix validate complaint
+ Update vendor on containers/storage to v1.12.10
+ Create directory paths for COPY thereby ensuring correct perms
+ imagebuildah: use a stable sort for comparing build args
+ imagebuildah: tighten up cache checking
+ bud.bats: add a test verying the order of --build-args
+ add -t to podman run
+ imagebuildah: simplify screening by top layers
+ imagebuildah: handle ID mappings for COPY --from
+ imagebuildah: apply additionalTags ourselves
+ bud.bats: test additional tags with cached images
+ bud.bats: add a test for WORKDIR and COPY with absolute destinations
+ Cleanup Overlay Mounts content
+
+## v1.8.3 (2019-06-04)
+ * Add support for file secret mounts
+ * Add ability to skip secrets in mounts file
+ * allow 32bit builds
+ * fix tutorial instructions
+ * imagebuilder: pass the right contextDir to Add()
+ * add: use fileutils.PatternMatcher for .dockerignore
+ * bud.bats: add another .dockerignore test
+ * unshare: fallback to single usermapping
+ * addHelperSymlink: clear the destination on os.IsExist errors
+ * bud.bats: test replacing symbolic links
+ * imagebuildah: fix handling of destinations that end with '/'
+ * bud.bats: test COPY with a final "/" in the destination
+ * linux: add check for sysctl before using it
+ * unshare: set _CONTAINERS_ROOTLESS_GID
+ * Rework buildahimamges
+ * build context: support https git repos
+ * Add a test for ENV special chars behaviour
+ * Check in new Dockerfiles
+ * Apply custom SHELL during build time
+ * config: expand variables only at the command line
+ * SetEnv: we only need to expand v once
+ * Add default /root if empty on chroot iso
+ * Add support for Overlay volumes into the container.
+ * Export buildah validate volume functions so it can share code with libpod
+ * Bump baseline test to F30
+ * Fix rootless handling of /dev/shm size
+ * Avoid fmt.Printf() in the library
+ * imagebuildah: tighten cache checking back up
+ * Handle WORKDIR with dangling target
+ * Default Authfile to proper path
+ * Make buildah run --isolation follow BUILDAH_ISOLATION environment
+ * Vendor in latest containers/storage and containers/image
+ * getParent/getChildren: handle layerless images
+ * imagebuildah: recognize cache images for layerless images
+ * bud.bats: test scratch images with --layers caching
+ * Get CHANGELOG.md updates
+ * Add some symlinks to test our .dockerignore logic
+ * imagebuildah: addHelper: handle symbolic links
+ * commit/push: use an everything-allowed policy
+ * Correct manpage formatting in files section
+ * Remove must be root statement from buildah doc
+ * Change image names to stable, testing and upstream
+ * Bump back to v1.9.0-dev
+
+## v1.8.2 (2019-05-02)
+ Vendor Storage 1.12.6
+ Create scratch file in TESTDIR
+ Test bud-copy-dot with --layers picks up changed file
+ Bump back to 1.9.0-dev
+
+## v1.8.1 (2019-05-01)
+ Don't create directory on container
+ Replace kubernetes/pause in tests with k8s.gcr.io/pause
+ imagebuildah: don't remove intermediate images if we need them
+ Rework buildahimagegit to buildahimageupstream
+ Fix Transient Mounts
+ Handle WORKDIRs that are symlinks
+ allow podman to build a client for windows
+ Touch up 1.9-dev to 1.9.0-dev
+ Bump to 1.9-dev
+
+## v1.8.0 (2019-04-26)
+ Resolve symlink when checking container path
+ commit: commit on every instruction, but not always with layers
+ CommitOptions: drop the unused OnBuild field
+ makeImageRef: pass in the whole CommitOptions structure
+ cmd: API cleanup: stores before images
+ run: check if SELinux is enabled
+ Fix buildahimages Dockerfiles to include support for additionalimages mounted from host.
+ Detect changes in rootdir
+ Fix typo in buildah-pull(1)
+ Vendor in latest containers/storage
+ Keep track of any build-args used during buildah bud --layers
+ commit: always set a parent ID
+ imagebuildah: rework unused-argument detection
+ fix bug dest path when COPY .dockerignore
+ Move Host IDMAppings code from util to unshare
+ Add BUILDAH_ISOLATION rootless back
+ Travis CI: fail fast, upon error in any step
+ imagebuildah: only commit images for intermediate stages if we have to
+ Use errors.Cause() when checking for IsNotExist errors
+ auto pass http_proxy to container
+ Bump back to 1.8-dev
+
+## v1.7.3 (2019-04-16)
+* Tue Apr 16, 2019 Tom Sweeney <tsweeney@redhat.com> 1.7.3
+ imagebuildah: don't leak image structs
+ Add Dockerfiles for buildahimages
+ Bump to Replace golang 1.10 with 1.12
+ add --dns* flags to buildah bud
+ Add hack/build_speed.sh test speeds on building container images
+ Create buildahimage Dockerfile for Quay
+ rename 'is' to 'expect_output'
+ squash.bats: test squashing in multi-layered builds
+ bud.bats: test COPY --from in a Dockerfile while using the cache
+ commit: make target image names optional
+ Fix bud-args to allow comma separation
+ oops, missed some tests in commit.bats
+ new helper: expect_line_count
+ New tests for #1467 (string slices in cmdline opts)
+ Workarounds for dealing with travis; review feedback
+ BATS tests - extensive but minor cleanup
+ imagebuildah: defer pulling images for COPY --from
+ imagebuildah: centralize COMMIT and image ID output
+ Travis: do not use traviswait
+ imagebuildah: only initialize imagebuilder configuration once per stage
+ Make cleaner error on Dockerfile build errors
+ unshare: move to pkg/
+ unshare: move some code from cmd/buildah/unshare
+ Fix handling of Slices versus Arrays
+ imagebuildah: reorganize stage and per-stage logic
+ imagebuildah: add empty layers for instructions
+ Add missing step in installing into Ubuntu
+ fix bug in .dockerignore support
+ imagebuildah: deduplicate prepended "FROM" instructions
+ Touch up intro
+ commit: set created-by to the shell if it isn't set
+ commit: check that we always set a "created-by"
+ docs/buildah.md: add "containers-" prefixes under "SEE ALSO"
+ Bump back to 1.8-dev
+
+## v1.7.2 (2019-03-28)
+ mount: do not create automatically a namespace
+ buildah: correctly create the userns if euid!=0
+ imagebuildah.Build: consolidate cleanup logic
+ CommitOptions: drop the redundant Store field
+ Move pkg/chrootuser from libpod to buildah.
+ imagebuildah: record image IDs and references more often
+ vendor imagebuilder v1.1.0
+ imagebuildah: fix requiresStart/noRunsRemaining confusion
+ imagebuildah: check for unused args across stages
+ bump github.com/containernetworking/cni to v0.7.0-rc2
+ imagebuildah: use "useCache" instead of "noCache"
+ imagebuildah.resolveNameToImageRef(): take name as a parameter
+ Export fields of the DokcerIgnore struct
+ imagebuildah: drop the duplicate containerIDs list
+ rootless: by default use the host network namespace
+ imagebuildah: split Executor and per-stage execution
+ imagebuildah: move some fields around
+ golint: make golint happy
+ docs: 01-intro.md: add missing . in Dockerfile examples
+ fix bug using .dockerignore
+ Do not create empty mounts.conf file
+ images: suppress a spurious blank line with no images
+ from: distinguish between ADD and COPY
+ fix bug to not separate each --label value with comma
+ buildah-bud.md: correct a typo, note a default
+ Remove mistaken code that got merged in other PR
+ add sample registries.conf to docs
+ escape shell variables in README example
+ slirp4netns: set mtu to 65520
+ images: imageReposToMap() already adds <none>:<none>
+ imagebuildah.ReposToMap: move to cmd
+ Build: resolve copyFrom references earlier
+ Allow rootless users to use the cache directory in homedir
+ bud.bats: use the per-test temp directory
+ bud.bats: log output before counting length
+ Simplify checks for leftover args
+ Print commitID with --layers
+ fix bug images use the template to print results
+ rootless: honor --net host
+ onsi/gomeage add missing files
+ vendor latest openshift/imagebuilder
+ Remove noop from squash help
+ Prepend a comment to files setup in container
+ imagebuildah resolveSymlink: fix handling of relative links
+ Errors should be printed to stderr
+ Add recommends for slirp4netns and fuse-overlay
+ Update pull and pull-always flags
+ Hide from users command options that we don't want them to use.
+ Update secrets fipsmode patch to work on rootless containers
+ fix unshare option handling and documentation
+ Vendor in latest containers/storage
+ Hard-code docker.Transport use in pull --all-tags
+ Use a types.ImageReference instead of (transport, name) strings in pullImage etc.
+ Move the computation of srcRef before first pullAndFindImage
+ Don't throw away user-specified tag for pull --all-tags
+ CHANGES BEHAVIOR: Remove the string format input to localImageNameForReference
+ Don't try to parse imageName as transport:image in pullImage
+ Use reference.WithTag instead of manual string manipulation in Pull
+ Don't pass image = transport:repo:tag, transport=transport to pullImage
+ Fix confusing variable naming in Pull
+ Don't try to parse image name as a transport:image
+ Fix error reporting when parsing trans+image
+ Remove 'transport == ""' handling from the pull path
+ Clean up "pulls" of local image IDs / ID prefixes
+ Simplify ExpandNames
+ Document the semantics of transport+name returned by ResolveName
+ UPdate gitvalidation epoch
+ Bump back to 1.8-dev
+
+## v1.7.1 (2019-02-26)
+ vendor containers/image v1.5
+ Move secrets code from libpod into buildah
+ Update CHANGELOG.md with the past changes
+ README.md: fix typo
+ Fix a few issues found by tests/validate/gometalinter.sh
+ Neutralize buildah/unshare on non-Linux platforms
+ Explicitly specify a directory to find(1)
+ README.md: rephrase Buildah description
+ Stop printing default twice in cli --help
+ install.md: add section about vendoring
+ Bump to 1.8-dev
+
+## v1.7 (2019-02-21)
+ vendor containers/image v1.4
+ Make "images --all" faster
+ Remove a misleading comment
+ Remove quiet option from pull options
+ Make sure buildah pull --all-tags only works with docker transport
+ Support oci layout format
+ Fix pulling of images within buildah
+ Fix tls-verify polarity
+ Travis: execute make vendor and hack/tree_status.sh
+ vendor.conf: remove unused dependencies
+ add missing vendor/github.com/containers/libpod/vendor.conf
+ vendor.conf: remove github.com/inconshreveable/mousetrap
+ make vendor: always fetch the latest vndr
+ add hack/tree_status.sh script
+ Bump c/Storage to 1.10
+ Add --all-tags test to pull
+ mount: make error clearer
+ Remove global flags from cli help
+ Set --disable-compression to true as documented
+ Help document using buildah mount in rootless mode
+ healthcheck start-period: update documentation
+ Vendor in latest c/storage and c/image
+ dumpbolt: handle nested buckets
+ Fix buildah commit compress by default
+ Test on xenial, not trusty
+ unshare: reexec using a memfd copy instead of the binary
+ Add --target to bud command
+ Fix example for setting multiple environment variables
+ main: fix rootless mode
+ buildah: force umask 022
+ pull.bats: specify registry config when using registries
+ pull.bats: use the temporary directory, not /tmp
+ unshare: do not set rootless mode if euid=0
+ Touch up cli help examples and a few nits
+ Add an undocumented dumpbolt command
+ Move tar commands into containers/storage
+ Fix bud issue with 2 line Dockerfile
+ Add package install descriptions
+ Note configuration file requirements
+ Replace urfave/cli with cobra
+ cleanup vendor.conf
+ Vendor in latest containers/storage
+ Add Quiet to PullOptions and PushOptions
+ cmd/commit: add flag omit-timestamp to allow for deterministic builds
+ Add options for empty-layer history entries
+ Make CLI help descriptions and usage a bit more consistent
+ vndr opencontainers/selinux
+ Bump baseline test Fedora to 29
+ Bump to v1.7-dev-1
+ Bump to v1.6-1
+ Add support for ADD --chown
+ imagebuildah: make EnsureContainerPath() check/create the right one
+ Bump 1.7-dev
+ Fix contrib/rpm/bulidah.spec changelog date
+
+## v1.6-1 (2019-01-18)
+ Add support for ADD --chown
+ imagebuildah: make EnsureContainerPath() check/create the right one
+ Fix contrib/rpm/bulidah.spec changelog date
+ Vendor in latest containers/storage
+ Revendor everything
+ Revendor in latest code by release
+ unshare: do not set USER=root
+ run: ignore EIO when flushing at the end, avoid double log
+ build-using-dockerfile,commit: disable compression by default
+ Update some comments
+ Make rootless work under no_pivot_root
+ Add CreatedAtRaw date field for use with Format
+ Properly format images JSON output
+ pull: add all-tags option
+ Fix support for multiple Short options
+ pkg/blobcache: add synchronization
+ Skip empty files in file check of conformance test
+ Use NoPivot also for RUN, not only for run
+ Remove no longer used isReferenceInsecure / isRegistryInsecure
+ Do not set OCIInsecureSkipTLSVerify based on registries.conf
+ Remove duplicate entries from images JSON output
+ vendor parallel-copy from containers/image
+ blobcache.bats: adjust explicit push tests
+ Handle one line Dockerfile with layers
+ We should only warn if user actually requests Hostname be set in image
+ Fix compiler Warning about comparing different size types
+ imagebuildah: don't walk if rootdir and path are equal
+ Add aliases for buildah containers, so buildah list, ls and ps work
+ vendor: use faster version instead compress/gzip
+ vendor: update libpod
+ Properly handle Hostname inside of RUN command
+ docs: mention how to mount in rootless mode
+ tests: use fully qualified name for centos image
+ travis.yml: use the fully qualified name for alpine
+ mount: allow mount only when using vfs
+ Add some tests for buildah pull
+ Touch up images -q processing
+ Refactor: Use library shared idtools.ParseIDMap() instead of bundling it
+ bump GITVALIDATE_EPOCH
+ cli.BudFlags: add `--platform` nop
+ Makefile: allow packagers to more easily add tags
+ Makefile: soften the requirement on git
+ tests: add containers json test
+ Inline blobCache.putBlob into blobCacheDestination.PutBlob
+ Move saveStream and putBlob near blobCacheDestination.PutBlob
+ Remove BlobCache.PutBlob
+ Update for API changes
+ Vendor c/image after merging c/image#536
+ Handle 'COPY --from' in Dockerfile
+ Vendor in latest content from github.com/containers/storage
+ Clarify docker.io default in push with docker-daemon
+ Test blob caching
+ Wire in a hidden --blob-cache option
+ Use a blob cache when we're asked to use one
+ Add --disable-compression to 'build-using-dockerfile'
+ Add a blob cache implementation
+ vendor: update containers/storage
+ Update for sysregistriesv2 API changes
+ Update containers/image to 63a1cbdc5e6537056695cf0d627c0a33b334df53
+ clean up makefile variables
+ Fix file permission
+ Complete the instructions for the command
+ Show warning when a build arg not used
+ Assume user 0 group 0, if /etc/passwd file in container.
+ Add buildah info command
+ Enable -q when --filter is used for images command
+ Add v1.5 Release Announcement
+ Fix dangling filter for images command
+ Fix completions to print Names as well as IDs
+ tests: Fix file permissions
+ Bump 1.6-dev
+
+## v1.5-1 (2018-11-21)
+ Bump min go to 1.10 in install.md
+ vendor: update ostree-go
+ Update docker build command line in conformance test
+ Print command in SystemExec as debug information
+ Add some skip word for inspect check in conformance test
+ Update regex for multi stage base test
+ Sort CLI flags
+ vendor: update containers/storage
+ Add note to install about non-root on RHEL/CentOS
+ Update imagebuild depdency to support heading ARGs in Dockerfile
+ rootless: do not specify --rootless to the OCI runtime
+ Export resolvesymlink function
+ Exclude --force-rm from common bud cli flags
+ run: bind mount /etc/hosts and /etc/resolv.conf if not in a volume
+ rootless: use slirp4netns to setup the network namespace
+ Instructions for completing the pull command
+ Fix travis to not run environment variable patch
+ rootless: only discard network configuration names
+ run: only set up /etc/hosts or /etc/resolv.conf with network
+ common: getFormat: match entire string not only the prefix
+ vendor: update libpod
+ Change validation EPOCH
+ Fixing broken link for container-registries.conf
+ Restore rootless isolation test for from volume ro test
+ ostree: fix tag for build constraint
+ Handle directories better in bud -f
+ vndr in latest containers/storage
+ Fix unshare gofmt issue
+ runSetupBuiltinVolumes(): break up volume setup
+ common: support a per-user registries conf file
+ unshare: do not override the configuration
+ common: honor the rootless configuration file
+ unshare: create a new mount namespace
+ unshare: support libpod rootless pkg
+ Use libpod GetDefaultStorage to report proper storage config
+ Allow container storage to manage the SELinux labels
+ Resolve image names with default transport in from command
+ run: When the value of isolation is set, use the set value instead of the default value.
+ Vendor in latest containers/storage and opencontainers/selinux
+ Remove no longer valid todo
+ Check for empty buildTime in version
+ Change gofmt so it runs on all but 1.10
+ Run gofmt only on Go 1.11
+ Walk symlinks when checking cached images for copied/added files
+ ReserveSELinuxLabels(): handle wrapped errors from OpenBuilder
+ Set WorkingDir to empty, not / for conformance
+ Update calls in e2e to addres 1101
+ imagebuilder.BuildDockerfiles: return the image ID
+ Update for changes in the containers/image API
+ bump(github.com/containers/image)
+ Allow setting --no-pivot default with an env var
+ Add man page and bash completion, for --no-pivot
+ Add the --no-pivot flag to the run command
+ Improve reporting about individual pull failures
+ Move the "short name but no search registries" error handling to resolveImage
+ Return a "search registries were needed but empty" indication in util.ResolveName
+ Simplify handling of the "tried to pull an image but found nothing" case in newBuilder
+ Don't even invoke the pull loop if options.FromImage == ""
+ Eliminate the long-running ref and img variables in resolveImage
+ In resolveImage, return immediately on success
+ Fix From As in Dockerfile
+ Vendor latest containers/image
+ Vendor in latest libpod
+ Sort CLI flags of buildah bud
+ Change from testing with golang 1.9 to 1.11.
+ unshare: detect when unprivileged userns are disabled
+ Optimize redundant code
+ fix missing format param
+ chroot: fix the args check
+ imagebuildah: make ResolveSymLink public
+ Update copy chown test
+ buildah: use the same logic for XDG_RUNTIME_DIR as podman
+ V1.4 Release Announcement
+ Podman --privileged selinux is broken
+ papr: mount source at gopath
+ parse: Modify the return value
+ parse: modify the verification of the isolation value
+ Make sure we log or return every error
+ pullImage(): when completing an image name, try docker://
+ Fix up Tutorial 3 to account for format
+ Vendor in latest containers/storage and containers/image
+ docs/tutorials/01-intro.md: enhanced installation instructions
+ Enforce "blocked" for registries for the "docker" transport
+ Correctly set DockerInsecureSkipTLSVerify when pulling images
+ chroot: set up seccomp and capabilities after supplemental groups
+ chroot: fix capabilities list setup and application
+ .papr.yml: log the podman version
+ namespaces.bats: fix handling of uidmap/gidmap options in pairs
+ chroot: only create user namespaces when we know we need them
+ Check /proc/sys/user/max_user_namespaces on unshare(NEWUSERNS)
+ bash/buildah: add isolation option to the from command
+
+## v1.4 (2018-10-02)
+ from: fix isolation option
+ Touchup pull manpage
+ Export buildah ReserveSELinuxLables so podman can use it
+ Add buildah.io to README.md and doc fixes
+ Update rmi man for prune changes
+ Ignore file not found removal error in bud
+ bump(github.com/containers/{storage,image})
+ NewImageSource(): only create one Diff() at a time
+ Copy ExposedPorts from base image into the config
+ tests: run conformance test suite in Travis
+ Change rmi --prune to not accept an imageID
+ Clear intermediate container IDs after each stage
+ Request podman version for build issues
+ unshare: keep the additional groups of the user
+ Builtin volumes should be owned by the UID/GID of the container
+ Get rid of dangling whitespace in markdown files
+ Move buildah from projecatatomic/buildah to containers/buildah
+ nitpick: parse.validateFlags loop in bud cli
+ bash: Completion options
+ Add signature policy to push tests
+ vendor in latest containers/image
+ Fix grammar in Container Tools Guide
+ Don't build btrfs if it is not installed
+ new: Return image-pulling errors from resolveImage
+ pull: Return image-pulling errors from pullImage
+ Add more volume mount tests
+ chroot: create missing parent directories for volume mounts
+ Push: Allow an empty destination
+ Add Podman relationship to readme, create container tools guide
+ Fix arg usage in buildah-tag
+ Add flags/arguments order verification to other commands
+ Handle ErrDuplicateName errors from store.CreateContainer()
+ Evaluate symbolic links on Add/Copy Commands
+ Vendor in latest containers/image and containers/storage
+ Retain bounding set when running containers as non root
+ run container-diff tests in Travis
+ buildah-images.md: Fix option contents
+ push: show image digest after push succeed
+ Vendor in latest containers/storage,image,libpod and runc
+ Change references to cri-o to point at new repository
+ Exclude --layers from the common bug cli flags
+ demos: Increase the executable permissions
+ run: clear default seccomp filter if not enabled
+ Bump maximum cyclomatic complexity to 45
+ stdin: on HUP, read everything
+ nitpick: use tabs in tests/helpers.bash
+ Add flags/arguments order verification to one arg commands
+ nitpick: decrease cognitive complexity in buildah-bud
+ rename: Avoid renaming the same name as other containers
+ chroot isolation: chroot() before setting up seccomp
+ Small nitpick at the "if" condition in tag.go
+ cmd/images: Modify json option
+ cmd/images: Disallow the input of image when using the -a option
+ Fix examples to include context directory
+ Update containers/image to fix commit layer issue
+ cmd/containers: End loop early when using the json option
+ Make buildah-from error message clear when flags are after arg
+ Touch up README.md for conformance tests
+ Update container/storage for lock fix
+ cmd/rm: restore the correct containerID display
+ Remove debug lines
+ Remove docker build image after each test
+ Add README for conformance test
+ Update the MakeOptions to accept all command options for buildah
+ Update regrex to fit the docker output in test "run with JSON"
+ cmd/buildah: Remove redundant variable declarations
+ Warn about using Commands in Dockerfile that are not supported by OCI.
+ Add buildah bud conformance test
+ Fix rename to also change container name in builder
+ Makefile: use $(GO) env-var everywhere
+ Cleanup code to more closely match Docker Build images
+ Document BUILDAH_* environment variables in buildah bud --help output
+ Return error immediately if error occurs in Prepare step
+ Fix --layers ADD from url issue
+ Add "Sign your PRs" TOC item to contributing.md.
+ Display the correct ID after deleting image
+ rmi: Modify the handling of errors
+ Let util.ResolveName() return parsing errors
+ Explain Open Container Initiative (OCI) acronym, add link
+ Update vendor for urfave/cli back to master
+ Handle COPY --chown in Dockerfile
+ Switch to Recommends container-selinux
+ Update vendor for containernetworking, imagebuildah and podman
+ Document STORAGE_DRIVER and STORAGE_OPTS environment variable
+ Change references to projectatomic/libpod to containers/libpod
+ Add container PATH retrieval example
+ Expand variables names for --env
+ imagebuildah: provide a way to provide stdin for RUN
+ Remove an unused srcRef.NewImageSource in pullImage
+ chroot: correct a comment
+ chroot: bind mount an empty directory for masking
+ Don't bother with --no-pivot for rootless isolation
+ CentOS need EPEL repo
+ Export a Pull() function
+ Remove stream options, since docker build does not have it
+ release v1.3: mention openSUSE
+ Add Release Announcements directory
+ Bump to v1.4-dev
+
+## 1.3 (2018-08-4)
+ Revert pull error handling from 881
+ bud should not search context directory for Dockerfile
+ Set BUILDAH_ISOLATION=rootless when running unprivileged
+ .papr.sh: Also test with BUILDAH_ISOLATION=rootless
+ Skip certain tests when we're using "rootless" isolation
+ .travis.yml: run integration tests with BUILDAH_ISOLATION=chroot
+ Add and implement IsolationOCIRootless
+ Add a value for IsolationOCIRootless
+ Fix rmi to remove intermediate images associated with an image
+ Return policy error on pull
+ Update containers/image to 216acb1bcd2c1abef736ee322e17147ee2b7d76c
+ Switch to github.com/containers/image/pkg/sysregistriesv2
+ unshare: make adjusting the OOM score optional
+ Add flags validation
+ chroot: handle raising process limits
+ chroot: make the resource limits name map module-global
+ Remove rpm.bats, we need to run this manually
+ Set the default ulimits to match Docker
+ buildah: no args is out of bounds
+ unshare: error message missed the pid
+ preprocess ".in" suffixed Dockerfiles
+ Fix the the in buildah-config man page
+ Only test rpmbuild on latest fedora
+ Add support for multiple Short options
+ Update to latest urvave/cli
+ Add additional SELinux tests
+ Vendor in latest github.com/containers/{image;storage}
+ Stop testing with golang 1.8
+ Fix volume cache issue with buildah bud --layers
+ Create buildah pull command
+ Increase the deadline for gometalinter during 'make validate'
+ .papr.sh: Also test with BUILDAH_ISOLATION=chroot
+ .travis.yml: run integration tests with BUILDAH_ISOLATION=chroot
+ Add a Dockerfile
+ Set BUILDAH_ISOLATION=chroot when running unprivileged
+ Add and implement IsolationChroot
+ Update github.com/opencontainers/runc
+ maybeReexecUsingUserNamespace: add a default for root
+ Allow ping command without NET_RAW Capabilities
+ rmi.storageImageID: fix Wrapf format warning
+ Allow Dockerfile content to come from stdin
+ Vendor latest container/storage to fix overlay mountopt
+ userns: assign additional IDs sequentially
+ Remove default dev/pts
+ Add OnBuild test to baseline test
+ tests/run.bats(volumes): use :z when SELinux is enabled
+ Avoid a stall in runCollectOutput()
+ Use manifest from container/image
+ Vendor in latest containers/image and containers/storage
+ add rename command
+ Completion command
+ Update CHANGELOG.md
+ Update vendor for runc to fix 32 bit builds
+ bash completion: remove shebang
+ Update vendor for runc to fix 32 bit builds
+
+## 1.2 (2018-07-14)
+ Vendor in lates containers/image
+ build-using-dockerfile: let -t include transports again
+ Block use of /proc/acpi and /proc/keys from inside containers
+ Fix handling of --registries-conf
+ Fix becoming a maintainer link
+ add optional CI test fo darwin
+ Don't pass a nil error to errors.Wrapf()
+ image filter test: use kubernetes/pause as a "since"
+ Add --cidfile option to from
+ vendor: update containers/storage
+ Contributors need to find the CONTRIBUTOR.md file easier
+ Add a --loglevel option to build-with-dockerfile
+ Create Development plan
+ cmd: Code improvement
+ allow buildah cross compile for a darwin target
+ Add unused function param lint check
+ docs: Follow man-pages(7) suggestions for SYNOPSIS
+ Start using github.com/seccomp/containers-golang
+ umount: add all option to umount all mounted containers
+ runConfigureNetwork(): remove an unused parameter
+ Update github.com/opencontainers/selinux
+ Fix buildah bud --layers
+ Force ownership of /etc/hosts and /etc/resolv.conf to 0:0
+ main: if unprivileged, reexec in a user namespace
+ Vendor in latest imagebuilder
+ Reduce the complexity of the buildah.Run function
+ mount: output it before replacing lastError
+ Vendor in latest selinux-go code
+ Implement basic recognition of the "--isolation" option
+ Run(): try to resolve non-absolute paths using $PATH
+ Run(): don't include any default environment variables
+ build without seccomp
+ vendor in latest runtime-tools
+ bind/mount_unsupported.go: remove import errors
+ Update github.com/opencontainers/runc
+ Add Capabilities lists to BuilderInfo
+ Tweaks for commit tests
+ commit: recognize committing to second storage locations
+ Fix ARGS parsing for run commands
+ Add info on registries.conf to from manpage
+ Switch from using docker to podman for testing in .papr
+ buildah: set the HTTP User-Agent
+ ONBUILD tutorial
+ Add information about the configuration files to the install docs
+ Makefile: add uninstall
+ Add tilde info for push to troubleshooting
+ mount: support multiple inputs
+ Use the right formatting when adding entries to /etc/hosts
+ Vendor in latest go-selinux bindings
+ Allow --userns-uid-map/--userns-gid-map to be global options
+ bind: factor out UnmountMountpoints
+ Run(): simplify runCopyStdio()
+ Run(): handle POLLNVAL results
+ Run(): tweak terminal mode handling
+ Run(): rename 'copyStdio' to 'copyPipes'
+ Run(): don't set a Pdeathsig for the runtime
+ Run(): add options for adding and removing capabilities
+ Run(): don't use a callback when a slice will do
+ setupSeccomp(): refactor
+ Change RunOptions.Stdin/Stdout/Stderr to just be Reader/Writers
+ Escape use of '_' in .md docs
+ Break out getProcIDMappings()
+ Break out SetupIntermediateMountNamespace()
+ Add Multi From Demo
+ Use the c/image conversion code instead of converting configs manually
+ Don't throw away the manifest MIME type and guess again
+ Consolidate loading manifest and config in initConfig
+ Pass a types.Image to Builder.initConfig
+ Require an image ID in importBuilderDataFromImage
+ Use c/image/manifest.GuessMIMEType instead of a custom heuristic
+ Do not ignore any parsing errors in initConfig
+ Explicitly handle "from scratch" images in Builder.initConfig
+ Fix parsing of OCI images
+ Simplify dead but dangerous-looking error handling
+ Don't ignore v2s1 history if docker_version is not set
+ Add --rm and --force-rm to buildah bud
+ Add --all,-a flag to buildah images
+ Separate stdio buffering from writing
+ Remove tty check from images --format
+ Add environment variable BUILDAH_RUNTIME
+ Add --layers and --no-cache to buildah bud
+ Touch up images man
+ version.md: fix DESCRIPTION
+ tests: add containers test
+ tests: add images test
+ images: fix usage
+ fix make clean error
+ Change 'registries' to 'container registries' in man
+ add commit test
+ Add(): learn to record hashes of what we add
+ Minor update to buildah config documentation for entrypoint
+ Bump to v1.2-dev
+ Add registries.conf link to a few man pages
+
+## 1.1 (2018-06-08)
+ Drop capabilities if running container processes as non root
+ Print Warning message if cmd will not be used based on entrypoint
+ Update 01-intro.md
+ Shouldn't add insecure registries to list of search registries
+ Report errors on bad transports specification when pushing images
+ Move parsing code out of common for namespaces and into pkg/parse.go
+ Add disable-content-trust noop flag to bud
+ Change freenode chan to buildah
+ runCopyStdio(): don't close stdin unless we saw POLLHUP
+ Add registry errors for pull
+ runCollectOutput(): just read until the pipes are closed on us
+ Run(): provide redirection for stdio
+ rmi, rm: add test
+ add mount test
+ Add parameter judgment for commands that do not require parameters
+ Add context dir to bud command in baseline test
+ run.bats: check that we can run with symlinks in the bundle path
+ Give better messages to users when image can not be found
+ use absolute path for bundlePath
+ Add environment variable to buildah --format
+ rm: add validation to args and all option
+ Accept json array input for config entrypoint
+ Run(): process RunOptions.Mounts, and its flags
+ Run(): only collect error output from stdio pipes if we created some
+ Add OnBuild support for Dockerfiles
+ Quick fix on demo readme
+ run: fix validate flags
+ buildah bud should require a context directory or URL
+ Touchup tutorial for run changes
+ Validate common bud and from flags
+ images: Error if the specified imagename does not exist
+ inspect: Increase err judgments to avoid panic
+ add test to inspect
+ buildah bud picks up ENV from base image
+ Extend the amount of time travis_wait should wait
+ Add a make target for Installing CNI plugins
+ Add tests for namespace control flags
+ copy.bats: check ownerships in the container
+ Fix SELinux test errors when SELinux is enabled
+ Add example CNI configurations
+ Run: set supplemental group IDs
+ Run: use a temporary mount namespace
+ Use CNI to configure container networks
+ add/secrets/commit: Use mappings when setting permissions on added content
+ Add CLI options for specifying namespace and cgroup setup
+ Always set mappings when using user namespaces
+ Run(): break out creation of stdio pipe descriptors
+ Read UID/GID mapping information from containers and images
+ Additional bud CI tests
+ Run integration tests under travis_wait in Travis
+ build-using-dockerfile: add --annotation
+ Implement --squash for build-using-dockerfile and commit
+ Vendor in latest container/storage for devicemapper support
+ add test to inspect
+ Vendor github.com/onsi/ginkgo and github.com/onsi/gomega
+ Test with Go 1.10, too
+ Add console syntax highlighting to troubleshooting page
+ bud.bats: print "$output" before checking its contents
+ Manage "Run" containers more closely
+ Break Builder.Run()'s "run runc" bits out
+ util.ResolveName(): handle completion for tagged/digested image names
+ Handle /etc/hosts and /etc/resolv.conf properly in container
+ Documentation fixes
+ Make it easier to parse our temporary directory as an image name
+ Makefile: list new pkg/ subdirectoris as dependencies for buildah
+ containerImageSource: return more-correct errors
+ API cleanup: PullPolicy and TerminalPolicy should be types
+ Make "run --terminal" and "run -t" aliases for "run --tty"
+ Vendor github.com/containernetworking/cni v0.6.0
+ Update github.com/containers/storage
+ Update github.com/containers/libpod
+ Add support for buildah bud --label
+ buildah push/from can push and pull images with no reference
+ Vendor in latest containers/image
+ Update gometalinter to fix install.tools error
+ Update troubleshooting with new run workaround
+ Added a bud demo and tidied up
+ Attempt to download file from url, if fails assume Dockerfile
+ Add buildah bud CI tests for ENV variables
+ Re-enable rpm .spec version check and new commit test
+ Update buildah scratch demo to support el7
+ Added Docker compatibility demo
+ Update to F28 and new run format in baseline test
+ Touchup man page short options across man pages
+ Added demo dir and a demo. chged distrorlease
+ builder-inspect: fix format option
+ Add cpu-shares short flag (-c) and cpu-shares CI tests
+ Minor fixes to formatting in rpm spec changelog
+ Fix rpm .spec changelog formatting
+ CI tests and minor fix for cache related noop flags
+ buildah-from: add effective value to mount propagation
+
+## 1.0 (2018-05-06)
+ Declare Buildah 1.0
+ Add cache-from and no-cache noops, and fix doco
+ Update option and documentation for --force-rm
+ Adding noop for --force-rm to match --rm
+ Add buildah bud ENTRYPOINT,CMD,RUN tests
+ Adding buildah bud RUN test scenarios
+ Extend tests for empty buildah run command
+ Fix formatting error in run.go
+ Update buildah run to make command required
+ Expanding buildah run cmd/entrypoint tests
+ Update test cases for buildah run behaviour
+ Remove buildah run cmd and entrypoint execution
+ Add Files section with registries.conf to pertinent man pages
+ tests/config: perfect test
+ tests/from: add name test
+ Do not print directly to stdout in Commit()
+ Touch up auth test commands
+ Force "localhost" as a default registry
+ Drop util.GetLocalTime()
+ Vendor in latest containers/image
+ Validate host and container paths passed to --volume
+ test/from: add add-host test
+ Add --compress, --rm, --squash flags as a noop for bud
+ Add FIPS mode secret to buildah run and bud
+ Add config --comment/--domainname/--history-comment/--hostname
+ 'buildah config': stop replacing Created-By whenever it's not specified
+ Modify man pages so they compile correctly in mandb
+ Add description on how to do --isolation to buildah-bud man page
+ Add support for --iidfile to bud and commit
+ Refactor buildah bud for vendoring
+ Fail if date or git not installed
+ Revert update of entrypoint behaviour to match docker
+ Vendor in latest imagebuilder code to fix multiple stage builds
+ Add /bin/sh -c to entrypoint in config
+ image_test: Improve the test
+ Fix README example of buildah config
+ buildah-image: add validation to 'format'
+ Simple changes to allow buildah to pass make validate
+ Clarify the use of buildah config options
+ containers_test: Perfect testing
+ buildah images and podman images are listing different sizes
+ buildah-containers: add tests and example to the man page
+ buildah-containers: add validation to 'format'
+ Clarify the use of buildah config options
+ Minor fix for lighttpd example in README
+ Add tls-verification to troubleshooting
+ Modify buildah rmi to account for changes in containers/storage
+ Vendor in latest containers/image and containers/storage
+ addcopy: add src validation
+ Remove tarball as an option from buildah push --help
+ Fix secrets patch
+ Update entrypoint behaviour to match docker
+ Display imageId after commit
+ config: add support for StopSignal
+ Fix docker login issue in travis.yml
+ Allow referencing stages as index and names
+ Add multi-stage builds tests
+ Add multi-stage builds support
+ Add accessor functions for comment and stop signal
+ Vendor in latest imagebuilder, to get mixed case AS support
+ Allow umount to have multi-containers
+ Update buildah push doc
+ buildah bud walks symlinks
+ Imagename is required for commit atm, update manpage
+
+## 0.16.0 (2018-04-08)
+ Bump to v0.16.0
+ Remove requires for ostree-lib in rpm spec file
+ Add support for shell
+ buildah.spec should require ostree-libs
+ Vendor in latest containers/image
+ bash: prefer options
+ Change image time to locale, add troubleshooting.md, add logo to other mds
+ buildah-run.md: fix error SYNOPSIS
+ docs: fix error example
+ Allow --cmd parameter to have commands as values
+ Touchup README to re-enable logo
+ Clean up README.md
+ Make default-mounts-file a hidden option
+ Document the mounts.conf file
+ Fix man pages to format correctly
+ Add various transport support to buildah from
+ Add unit tests to run.go
+ If the user overrides the storage driver, the options should be dropped
+ Show Config/Manifest as JSON string in inspect when format is not set
+ Switch which for that in README.md
+ Remove COPR
+ Fix wrong order of parameters
+ Vendor in latest containers/image
+ Remove shallowCopy(), which shouldn't be saving us time any more
+ shallowCopy: avoid a second read of the container's layer
+
+## 0.5 - 2017-11-07
+ Add secrets patch to buildah
+ Add proper SELinux labeling to buildah run
+ Add tls-verify to bud command
+ Make filtering by date use the image's date
+ images: don't list unnamed images twice
+ Fix timeout issue
+ Add further tty verbiage to buildah run
+ Make inspect try an image on failure if type not specified
+ Add support for `buildah run --hostname`
+ Tons of bug fixes and code cleanup
+
+## 0.4 - 2017-09-22
+### Added
+ Update buildah spec file to match new version
+ Bump to version 0.4
+ Add default transport to push if not provided
+ Add authentication to commit and push
+ Remove --transport flag
+ Run: don't complain about missing volume locations
+ Add credentials to buildah from
+ Remove export command
+ Bump containers/storage and containers/image
+
+## 0.3 - 2017-07-20
+## 0.2 - 2017-07-18
+### Added
+ Vendor in latest containers/image and containers/storage
+ Update image-spec and runtime-spec to v1.0.0
+ Add support for -- ending options parsing to buildah run
+ Add/Copy need to support glob syntax
+ Add flag to remove containers on commit
+ Add buildah export support
+ update 'buildah images' and 'buildah rmi' commands
+ buildah containers/image: Add JSON output option
+ Add 'buildah version' command
+ Handle "run" without an explicit command correctly
+ Ensure volume points get created, and with perms
+ Add a -a/--all option to "buildah containers"
+
+## 0.1 - 2017-06-14
+### Added
+ Vendor in latest container/storage container/image
+ Add a "push" command
+ Add an option to specify a Create date for images
+ Allow building a source image from another image
+ Improve buildah commit performance
+ Add a --volume flag to "buildah run"
+ Fix inspect/tag-by-truncated-image-ID
+ Include image-spec and runtime-spec versions
+ buildah mount command should list mounts when no arguments are given.
+ Make the output image format selectable
+ commit images in multiple formats
+ Also import configurations from V2S1 images
+ Add a "tag" command
+ Add an "inspect" command
+ Update reference comments for docker types origins
+ Improve configuration preservation in imagebuildah
+ Report pull/commit progress by default
+ Contribute buildah.spec
+ Remove --mount from buildah-from
+ Add a build-using-dockerfile command (alias: bud)
+ Create manpages for the buildah project
+ Add installation for buildah and bash completions
+ Rename "list"/"delete" to "containers"/"rm"
+ Switch `buildah list quiet` option to only list container id's
+ buildah delete should be able to delete multiple containers
+ Correctly set tags on the names of pulled images
+ Don't mix "config" in with "run" and "commit"
+ Add a "list" command, for listing active builders
+ Add "add" and "copy" commands
+ Add a "run" command, using runc
+ Massive refactoring
+ Make a note to distinguish compression of layers
+
+## 0.0 - 2017-01-26
+### Added
+ Initial version, needs work
diff --git a/vendor/github.com/containers/buildah/CONTRIBUTING.md b/vendor/github.com/containers/buildah/CONTRIBUTING.md
new file mode 100644
index 000000000..7dc301a55
--- /dev/null
+++ b/vendor/github.com/containers/buildah/CONTRIBUTING.md
@@ -0,0 +1,143 @@
+![buildah logo](https://cdn.rawgit.com/containers/buildah/master/logos/buildah-logo_large.png)
+
+# Contributing to Buildah
+
+We'd love to have you join the community! Below summarizes the processes
+that we follow.
+
+## Topics
+
+* [Reporting Issues](#reporting-issues)
+* [Submitting Pull Requests](#submitting-pull-requests)
+* [Sign your PRs](#sign-your-prs)
+* [Communications](#communications)
+* [Becoming a Maintainer](#becoming-a-maintainer)
+
+## Reporting Issues
+
+Before reporting an issue, check our backlog of
+[open issues](https://github.com/containers/buildah/issues)
+to see if someone else has already reported it. If so, feel free to add
+your scenario, or additional information, to the discussion. Or simply
+"subscribe" to it to be notified when it is updated.
+
+If you find a new issue with the project we'd love to hear about it! The most
+important aspect of a bug report is that it includes enough information for
+us to reproduce it. So, please include as much detail as possible and try
+to remove the extra stuff that doesn't really relate to the issue itself.
+The easier it is for us to reproduce it, the faster it'll be fixed!
+
+Please don't include any private/sensitive information in your issue!
+
+## Submitting Pull Requests
+
+No Pull Request (PR) is too small! Typos, additional comments in the code,
+new testcases, bug fixes, new features, more documentation, ... it's all
+welcome!
+
+While bug fixes can first be identified via an "issue", that is not required.
+It's ok to just open up a PR with the fix, but make sure you include the same
+information you would have included in an issue - like how to reproduce it.
+
+PRs for new features should include some background on what use cases the
+new code is trying to address. When possible and when it makes sense, try to break-up
+larger PRs into smaller ones - it's easier to review smaller
+code changes. But only if those smaller ones make sense as stand-alone PRs.
+
+Regardless of the type of PR, all PRs should include:
+* well documented code changes
+* additional testcases. Ideally, they should fail w/o your code change applied
+* documentation changes
+
+Squash your commits into logical pieces of work that might want to be reviewed
+separate from the rest of the PRs. But, squashing down to just one commit is ok
+too since in the end the entire PR will be reviewed anyway. When in doubt,
+squash.
+
+PRs that fix issues should include a reference like `Closes #XXXX` in the
+commit message so that github will automatically close the referenced issue
+when the PR is merged.
+
+<!--
+All PRs require at least two LGTMs (Looks Good To Me) from maintainers.
+-->
+
+### Sign your PRs
+
+The sign-off is a line at the end of the explanation for the patch. Your
+signature certifies that you wrote the patch or otherwise have the right to pass
+it on as an open-source patch. The rules are simple: if you can certify
+the below (from [developercertificate.org](http://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+Then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith <joe.smith@email.com>
+
+Use your real name (sorry, no pseudonyms or anonymous contributions.)
+
+If you set your `user.name` and `user.email` git configs, you can sign your
+commit automatically with `git commit -s`.
+
+## Communications
+
+For general questions, or discussions, please use the
+IRC group on `irc.freenode.net` called `buildah`
+that has been setup.
+
+For discussions around issues/bugs and features, you can use the github
+[issues](https://github.com/containers/buildah/issues)
+and
+[PRs](https://github.com/containers/buildah/pulls)
+tracking system.
+
+## Becoming a Maintainer
+
+To become a maintainer you must first be nominated by an existing maintainer.
+If a majority (>50%) of maintainers agree then the proposal is adopted and
+you will be added to the list.
+
+Removing a maintainer requires at least 75% of the remaining maintainers
+approval, or if the person requests to be removed then it is automatic.
+Normally, a maintainer will only be removed if they are considered to be
+inactive for a long period of time or are viewed as disruptive to the community.
+
+The current list of maintainers can be found in the
+[MAINTAINERS](MAINTAINERS) file.
diff --git a/vendor/github.com/containers/buildah/MAINTAINERS b/vendor/github.com/containers/buildah/MAINTAINERS
new file mode 100644
index 000000000..5725ca981
--- /dev/null
+++ b/vendor/github.com/containers/buildah/MAINTAINERS
@@ -0,0 +1,4 @@
+Dan Walsh <dwalsh@redhat.com> (@rhatdan)
+Nalin Dahyabhai <nalin@redhat.com> (@nalind)
+Tom Sweeney <tsweeney@redhat.com> (@tomsweeneyredhat)
+Urvashi Mohnani <umohnani@redhat.com> (@umohnani8)
diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile
new file mode 100644
index 000000000..3e66a63a0
--- /dev/null
+++ b/vendor/github.com/containers/buildah/Makefile
@@ -0,0 +1,143 @@
+SELINUXTAG := $(shell ./selinux_tag.sh)
+STORAGETAGS := $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./libdm_tag.sh) $(shell ./ostree_tag.sh)
+SECURITYTAGS ?= seccomp $(SELINUXTAG)
+TAGS ?= $(SECURITYTAGS) $(STORAGETAGS)
+BUILDTAGS += $(TAGS)
+PREFIX := /usr/local
+BINDIR := $(PREFIX)/bin
+BASHINSTALLDIR = $(PREFIX)/share/bash-completion/completions
+BUILDFLAGS := -tags "$(BUILDTAGS)"
+BUILDAH := buildah
+GO := go
+GO110 := 1.10
+GOVERSION := $(findstring $(GO110),$(shell go version))
+GIT_COMMIT ?= $(if $(shell git rev-parse --short HEAD),$(shell git rev-parse --short HEAD),$(error "git failed"))
+BUILD_INFO := $(if $(shell date +%s),$(shell date +%s),$(error "date failed"))
+CNI_COMMIT := $(if $(shell sed -e '\,github.com/containernetworking/cni, !d' -e 's,.* ,,g' vendor.conf),$(shell sed -e '\,github.com/containernetworking/cni, !d' -e 's,.* ,,g' vendor.conf),$(error "sed failed"))
+STATIC_STORAGETAGS = "containers_image_ostree_stub containers_image_openpgp exclude_graphdriver_devicemapper $(STORAGE_TAGS)"
+
+RUNC_COMMIT := 2c632d1a2de0192c3f18a2542ccb6f30a8719b1f
+LIBSECCOMP_COMMIT := release-2.3
+
+EXTRALDFLAGS :=
+LDFLAGS := -ldflags '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(BUILD_INFO) -X main.cniVersion=$(CNI_COMMIT)' $(EXTRALDFLAGS)
+SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go cmd/buildah/*.go docker/*.go pkg/blobcache/*.go pkg/cli/*.go pkg/parse/*.go pkg/unshare/*.c pkg/unshare/*.go util/*.go
+
+all: buildah imgtype docs
+
+.PHONY: static
+static: $(SOURCES)
+ $(MAKE) SECURITYTAGS="$(SECURITYTAGS)" STORAGETAGS=$(STATIC_STORAGETAGS) EXTRALDFLAGS='-ldflags "-extldflags '-static'"' BUILDAH=buildah.static binary
+
+.PHONY: binary
+binary: $(SOURCES)
+ $(GO) build $(LDFLAGS) -o $(BUILDAH) $(BUILDFLAGS) ./cmd/buildah
+
+buildah: binary
+
+darwin:
+ GOOS=darwin $(GO) build $(LDFLAGS) -o buildah.darwin -tags "containers_image_openpgp" ./cmd/buildah
+
+imgtype: *.go docker/*.go util/*.go tests/imgtype/imgtype.go
+ $(GO) build $(LDFLAGS) -o imgtype $(BUILDFLAGS) ./tests/imgtype/imgtype.go
+
+.PHONY: clean
+clean:
+ $(RM) -r buildah imgtype build buildah.static
+ $(MAKE) -C docs clean
+
+.PHONY: docs
+docs: ## build the docs on the host
+ $(MAKE) -C docs
+
+# For vendoring to work right, the checkout directory must be such that our top
+# level is at $GOPATH/src/github.com/containers/buildah.
+.PHONY: gopath
+gopath:
+ test $(shell pwd) = $(shell cd ../../../../src/github.com/containers/buildah ; pwd)
+
+# We use https://github.com/lk4d4/vndr to manage dependencies.
+.PHONY: deps
+deps: gopath
+ env GOPATH=$(shell cd ../../../.. ; pwd) vndr
+
+.PHONY: validate
+validate:
+ # Run gofmt on version 1.11 and higher
+ifneq ($(GO110),$(GOVERSION))
+ @./tests/validate/gofmt.sh
+endif
+ @./tests/validate/whitespace.sh
+ @./tests/validate/govet.sh
+ @./tests/validate/git-validation.sh
+ @./tests/validate/gometalinter.sh . cmd/buildah
+
+.PHONY: install.tools
+install.tools:
+ $(GO) get -u $(BUILDFLAGS) github.com/cpuguy83/go-md2man
+ $(GO) get -u $(BUILDFLAGS) github.com/vbatts/git-validation
+ $(GO) get -u $(BUILDFLAGS) github.com/onsi/ginkgo/ginkgo
+ $(GO) get -u $(BUILDFLAGS) gopkg.in/alecthomas/gometalinter.v1
+ $(GOPATH)/bin/gometalinter.v1 -i
+
+.PHONY: runc
+runc: gopath
+ rm -rf ../../opencontainers/runc
+ git clone https://github.com/opencontainers/runc ../../opencontainers/runc
+ cd ../../opencontainers/runc && git checkout $(RUNC_COMMIT) && $(GO) build -tags "$(STORAGETAGS) $(SECURITYTAGS)"
+ ln -sf ../../opencontainers/runc/runc
+
+.PHONY: install.libseccomp.sudo
+install.libseccomp.sudo: gopath
+ rm -rf ../../seccomp/libseccomp
+ git clone https://github.com/seccomp/libseccomp ../../seccomp/libseccomp
+ cd ../../seccomp/libseccomp && git checkout $(LIBSECCOMP_COMMIT) && ./autogen.sh && ./configure --prefix=/usr && make all && sudo make install
+
+.PHONY: install.cni.sudo
+install.cni.sudo: gopath
+ rm -rf ../../containernetworking/plugins
+ git clone https://github.com/containernetworking/plugins ../../containernetworking/plugins
+ cd ../../containernetworking/plugins && ./build.sh && mkdir -p /opt/cni/bin && sudo install -v -m755 bin/* /opt/cni/bin/
+
+.PHONY: install
+install:
+ install -D -m0755 buildah $(DESTDIR)/$(BINDIR)/buildah
+ $(MAKE) -C docs install
+
+.PHONY: uninstall
+uninstall:
+ rm -f $(DESTDIR)/$(BINDIR)/buildah
+ rm -f $(PREFIX)/share/man/man1/buildah*.1
+ rm -f $(DESTDIR)/$(BASHINSTALLDIR)/buildah
+
+.PHONY: install.completions
+install.completions:
+ install -m 644 -D contrib/completions/bash/buildah $(DESTDIR)/$(BASHINSTALLDIR)/buildah
+
+.PHONY: install.runc
+install.runc:
+ install -m 755 ../../opencontainers/runc/runc $(DESTDIR)/$(BINDIR)/
+
+.PHONY: test-integration
+test-integration:
+ ginkgo -v tests/e2e/.
+ cd tests; ./test_runner.sh
+
+tests/testreport/testreport: tests/testreport/testreport.go
+ $(GO) build -ldflags "-linkmode external -extldflags -static" -tags "$(STORAGETAGS) $(SECURITYTAGS)" -o tests/testreport/testreport ./tests/testreport
+
+.PHONY: test-unit
+test-unit: tests/testreport/testreport
+ $(GO) test -v -tags "$(STOAGETAGS) $(SECURITYTAGS)" -race $(shell $(GO) list ./... | grep -v vendor | grep -v tests | grep -v cmd)
+ tmp=$(shell mktemp -d) ; \
+ mkdir -p $$tmp/root $$tmp/runroot; \
+ $(GO) test -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" ./cmd/buildah -args -root $$tmp/root -runroot $$tmp/runroot -storage-driver vfs -signature-policy $(shell pwd)/tests/policy.json -registries-conf $(shell pwd)/tests/registries.conf
+
+.PHONY: .install.vndr
+.install.vndr:
+ $(GO) get -u github.com/LK4D4/vndr
+
+.PHONY: vendor
+vendor: vendor.conf .install.vndr
+ $(GOPATH)/bin/vndr \
+ -whitelist "github.com/onsi/gomega"
diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go
index 589e090a8..b03aa65b2 100644
--- a/vendor/github.com/containers/buildah/add.go
+++ b/vendor/github.com/containers/buildah/add.go
@@ -14,6 +14,7 @@ import (
"github.com/containers/buildah/pkg/chrootuser"
"github.com/containers/buildah/util"
"github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
"github.com/opencontainers/runtime-spec/specs-go"
@@ -33,10 +34,14 @@ type AddAndCopyOptions struct {
// If the sources include directory trees, Hasher will be passed
// tar-format archives of the directory trees.
Hasher io.Writer
- // Exludes contents in the .dockerignore file
+ // Excludes is the contents of the .dockerignore file
Excludes []string
- // current directory on host
+ // The base directory for Excludes and data to copy in
ContextDir string
+ // ID mapping options to use when contents to be copied are part of
+ // another container, and need ownerships to be mapped from the host to
+ // that container's values before copying them into the container.
+ IDMappingOptions *IDMappingOptions
}
// addURL copies the contents of the source URL to the destination. This is
@@ -89,7 +94,10 @@ func addURL(destination, srcurl string, owner idtools.IDPair, hasher io.Writer)
// filesystem, optionally extracting contents of local files that look like
// non-empty archives.
func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, source ...string) error {
- excludes := dockerIgnoreHelper(options.Excludes, options.ContextDir)
+ excludes, err := dockerIgnoreMatcher(options.Excludes, options.ContextDir)
+ if err != nil {
+ return err
+ }
mountPoint, err := b.Mount(b.MountLabel)
if err != nil {
return err
@@ -100,7 +108,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
}
}()
// Find out which user (and group) the destination should belong to.
- user, err := b.user(mountPoint, options.Chown)
+ user, _, err := b.user(mountPoint, options.Chown)
if err != nil {
return err
}
@@ -112,6 +120,12 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
hostOwner := idtools.IDPair{UID: int(hostUID), GID: int(hostGID)}
dest := mountPoint
if destination != "" && filepath.IsAbs(destination) {
+ dir := filepath.Dir(destination)
+ if dir != "." && dir != "/" {
+ if err = idtools.MkdirAllAndChownNew(filepath.Join(dest, dir), 0755, hostOwner); err != nil {
+ return errors.Wrapf(err, "error creating directory %q", filepath.Join(dest, dir))
+ }
+ }
dest = filepath.Join(dest, destination)
} else {
if err = idtools.MkdirAllAndChownNew(filepath.Join(dest, b.WorkDir()), 0755, hostOwner); err != nil {
@@ -142,8 +156,8 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
if len(source) > 1 && (destfi == nil || !destfi.IsDir()) {
return errors.Errorf("destination %q is not a directory", dest)
}
- copyFileWithTar := b.copyFileWithTar(&containerOwner, options.Hasher)
- copyWithTar := b.copyWithTar(&containerOwner, options.Hasher)
+ copyFileWithTar := b.copyFileWithTar(options.IDMappingOptions, &containerOwner, options.Hasher)
+ copyWithTar := b.copyWithTar(options.IDMappingOptions, &containerOwner, options.Hasher)
untarPath := b.untarPath(nil, options.Hasher)
err = addHelper(excludes, extract, dest, destfi, hostOwner, options, copyFileWithTar, copyWithTar, untarPath, source...)
if err != nil {
@@ -153,12 +167,12 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
}
// user returns the user (and group) information which the destination should belong to.
-func (b *Builder) user(mountPoint string, userspec string) (specs.User, error) {
+func (b *Builder) user(mountPoint string, userspec string) (specs.User, string, error) {
if userspec == "" {
userspec = b.User()
}
- uid, gid, err := chrootuser.GetUser(mountPoint, userspec)
+ uid, gid, homeDir, err := chrootuser.GetUser(mountPoint, userspec)
u := specs.User{
UID: uid,
GID: gid,
@@ -175,45 +189,48 @@ func (b *Builder) user(mountPoint string, userspec string) (specs.User, error) {
}
}
- return u, err
+ return u, homeDir, err
}
-// dockerIgnore struct keep info from .dockerignore
-type dockerIgnore struct {
- ExcludePath string
- IsExcluded bool
-}
-
-// dockerIgnoreHelper returns the lines from .dockerignore file without the comments
-// and reverses the order
-func dockerIgnoreHelper(lines []string, contextDir string) []dockerIgnore {
- var excludes []dockerIgnore
- // the last match of a file in the .dockerignmatches determines whether it is included or excluded
- // reverse the order
- for i := len(lines) - 1; i >= 0; i-- {
- exclude := lines[i]
- // ignore the comment in .dockerignore
- if strings.HasPrefix(exclude, "#") || len(exclude) == 0 {
+// dockerIgnoreMatcher returns a matcher based on the contents of the .dockerignore file under contextDir
+func dockerIgnoreMatcher(lines []string, contextDir string) (*fileutils.PatternMatcher, error) {
+ // if there's no context dir, there's no .dockerignore file to consult
+ if contextDir == "" {
+ return nil, nil
+ }
+ patterns := []string{".dockerignore"}
+ for _, ignoreSpec := range lines {
+ ignoreSpec = strings.TrimSpace(ignoreSpec)
+ // ignore comments passed back from .dockerignore
+ if ignoreSpec == "" || ignoreSpec[0] == '#' {
continue
}
- excludeFlag := true
- if strings.HasPrefix(exclude, "!") {
- exclude = strings.TrimPrefix(exclude, "!")
- excludeFlag = false
+ // if the spec starts with '!' it means the pattern
+ // should be included. make a note so that we can move
+ // it to the front of the updated pattern
+ includeFlag := ""
+ if strings.HasPrefix(ignoreSpec, "!") {
+ includeFlag = "!"
+ ignoreSpec = ignoreSpec[1:]
+ }
+ if ignoreSpec == "" {
+ continue
}
- excludes = append(excludes, dockerIgnore{ExcludePath: filepath.Join(contextDir, exclude), IsExcluded: excludeFlag})
+ patterns = append(patterns, includeFlag+filepath.Join(contextDir, ignoreSpec))
}
- if len(excludes) != 0 {
- excludes = append(excludes, dockerIgnore{ExcludePath: filepath.Join(contextDir, ".dockerignore"), IsExcluded: true})
+ // if there are no patterns, save time by not constructing the object
+ if len(patterns) == 0 {
+ return nil, nil
}
- return excludes
-}
-
-func addHelper(excludes []dockerIgnore, extract bool, dest string, destfi os.FileInfo, hostOwner idtools.IDPair, options AddAndCopyOptions, copyFileWithTar, copyWithTar, untarPath func(src, dest string) error, source ...string) error {
- dirsInDockerignore, err := getDirsInDockerignore(options.ContextDir, excludes)
+ // return a matcher object
+ matcher, err := fileutils.NewPatternMatcher(patterns)
if err != nil {
- return errors.Wrapf(err, "error checking directories in .dockerignore")
+ return nil, errors.Wrapf(err, "error creating file matcher using patterns %v", patterns)
}
+ return matcher, nil
+}
+
+func addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, destfi os.FileInfo, hostOwner idtools.IDPair, options AddAndCopyOptions, copyFileWithTar, copyWithTar, untarPath func(src, dest string) error, source ...string) error {
for _, src := range source {
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
// We assume that source is a file, and we're copying
@@ -242,7 +259,7 @@ func addHelper(excludes []dockerIgnore, extract bool, dest string, destfi os.Fil
if len(glob) == 0 {
return errors.Wrapf(syscall.ENOENT, "no files found matching %q", src)
}
- outer:
+
for _, gsrc := range glob {
esrc, err := filepath.EvalSymlinks(gsrc)
if err != nil {
@@ -261,7 +278,7 @@ func addHelper(excludes []dockerIgnore, extract bool, dest string, destfi os.Fil
return errors.Wrapf(err, "error creating directory %q", dest)
}
logrus.Debugf("copying %q to %q", esrc+string(os.PathSeparator)+"*", dest+string(os.PathSeparator)+"*")
- if len(excludes) == 0 {
+ if excludes == nil {
if err = copyWithTar(esrc, dest); err != nil {
return errors.Wrapf(err, "error copying %q to %q", esrc, dest)
}
@@ -271,23 +288,12 @@ func addHelper(excludes []dockerIgnore, extract bool, dest string, destfi os.Fil
if err != nil {
return err
}
- for _, exclude := range excludes {
- match, err := filepath.Match(filepath.Clean(exclude.ExcludePath), filepath.Clean(path))
- if err != nil {
- return err
- }
- prefix, exist := dirsInDockerignore[exclude.ExcludePath]
- hasPrefix := false
- if exist {
- hasPrefix = filepath.HasPrefix(path, prefix)
- }
- if !(match || hasPrefix) {
- continue
- }
- if (hasPrefix && exclude.IsExcluded) || (match && exclude.IsExcluded) {
- return nil
- }
- break
+ skip, err := excludes.Matches(path)
+ if err != nil {
+ return errors.Wrapf(err, "error checking if %s is an excluded path", path)
+ }
+ if skip {
+ return nil
}
// combine the filename with the dest directory
fpath, err := filepath.Rel(esrc, path)
@@ -297,8 +303,8 @@ func addHelper(excludes []dockerIgnore, extract bool, dest string, destfi os.Fil
mtime := info.ModTime()
atime := mtime
times := []syscall.Timespec{
- {Sec: atime.Unix(), Nsec: atime.UnixNano() % 1000000000},
- {Sec: mtime.Unix(), Nsec: mtime.UnixNano() % 1000000000},
+ syscall.NsecToTimespec(atime.Unix()),
+ syscall.NsecToTimespec(mtime.Unix()),
}
if info.IsDir() {
return addHelperDirectory(esrc, path, filepath.Join(dest, fpath), info, hostOwner, times)
@@ -320,20 +326,6 @@ func addHelper(excludes []dockerIgnore, extract bool, dest string, destfi os.Fil
continue
}
- for _, exclude := range excludes {
- match, err := filepath.Match(filepath.Clean(exclude.ExcludePath), esrc)
- if err != nil {
- return err
- }
- if !match {
- continue
- }
- if exclude.IsExcluded {
- continue outer
- }
- break
- }
-
if !extract || !archive.IsArchivePath(esrc) {
// This source is a file, and either it's not an
// archive, or we don't care whether or not it's an
@@ -349,6 +341,7 @@ func addHelper(excludes []dockerIgnore, extract bool, dest string, destfi os.Fil
}
continue
}
+
// We're extracting an archive into the destination directory.
logrus.Debugf("extracting contents of %q into %q", esrc, dest)
if err = untarPath(esrc, dest); err != nil {
@@ -381,7 +374,15 @@ func addHelperSymlink(src, dest string, info os.FileInfo, hostOwner idtools.IDPa
return errors.Wrapf(err, "error reading contents of symbolic link at %q", src)
}
if err = os.Symlink(linkContents, dest); err != nil {
- return errors.Wrapf(err, "error creating symbolic link to %q at %q", linkContents, dest)
+ if !os.IsExist(err) {
+ return errors.Wrapf(err, "error creating symbolic link to %q at %q", linkContents, dest)
+ }
+ if err = os.RemoveAll(dest); err != nil {
+ return errors.Wrapf(err, "error clearing symbolic link target %q", dest)
+ }
+ if err = os.Symlink(linkContents, dest); err != nil {
+ return errors.Wrapf(err, "error creating symbolic link to %q at %q (second try)", linkContents, dest)
+ }
}
if err = idtools.SafeLchown(dest, hostOwner.UID, hostOwner.GID); err != nil {
return errors.Wrapf(err, "error setting owner of symbolic link %q to %d:%d", dest, hostOwner.UID, hostOwner.GID)
@@ -392,35 +393,3 @@ func addHelperSymlink(src, dest string, info os.FileInfo, hostOwner idtools.IDPa
logrus.Debugf("Symlink(%s, %s)", linkContents, dest)
return nil
}
-
-func getDirsInDockerignore(srcAbsPath string, excludes []dockerIgnore) (map[string]string, error) {
- visitedDir := make(map[string]string)
- if len(excludes) == 0 {
- return visitedDir, nil
- }
- err := filepath.Walk(srcAbsPath, func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- if info.IsDir() {
- for _, exclude := range excludes {
- match, err := filepath.Match(filepath.Clean(exclude.ExcludePath), filepath.Clean(path))
- if err != nil {
- return err
- }
- if !match {
- continue
- }
- if _, exist := visitedDir[exclude.ExcludePath]; exist {
- continue
- }
- visitedDir[exclude.ExcludePath] = path
- }
- }
- return nil
- })
- if err != nil {
- return visitedDir, err
- }
- return visitedDir, nil
-}
diff --git a/vendor/github.com/containers/buildah/btrfs_installed_tag.sh b/vendor/github.com/containers/buildah/btrfs_installed_tag.sh
new file mode 100644
index 000000000..357f33b8b
--- /dev/null
+++ b/vendor/github.com/containers/buildah/btrfs_installed_tag.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+cc -E - > /dev/null 2> /dev/null << EOF
+#include <btrfs/ioctl.h>
+EOF
+if test $? -ne 0 ; then
+ echo exclude_graphdriver_btrfs
+fi
diff --git a/vendor/github.com/containers/buildah/btrfs_tag.sh b/vendor/github.com/containers/buildah/btrfs_tag.sh
new file mode 100644
index 000000000..cc48504ab
--- /dev/null
+++ b/vendor/github.com/containers/buildah/btrfs_tag.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+cc -E - > /dev/null 2> /dev/null << EOF
+#include <btrfs/version.h>
+EOF
+if test $? -ne 0 ; then
+ echo btrfs_noversion
+fi
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index 33b7afccd..329835d7a 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -26,7 +26,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.9.0-dev"
+ Version = "1.9.0"
// The value we use to identify what type of information, currently a
// serialized Builder structure, we are using as per-container state.
// This should only be changed when we make incompatible changes to
diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt
new file mode 100644
index 000000000..d35d79dfb
--- /dev/null
+++ b/vendor/github.com/containers/buildah/changelog.txt
@@ -0,0 +1,896 @@
+- Changelog for v1.9.0 (2019-06-15)
+ * buildah-run: fix-out-of-range panic (2)
+ * Bump back to v1.9.0-dev
+
+- Changelog for v1.8.4 (2019-06-13)
+ Update containers/image to v2.0.0
+ run: fix hang with run and --isolation=chroot
+ run: fix hang when using run
+ chroot: drop unused function call
+ remove --> before imgageID on build
+ Always close stdin pipe
+ Write deny to setgroups when doing single user mapping
+ Avoid including linux/memfd.h
+ Add a test for the symlink pointing to a directory
+ Add missing continue
+ Fix the handling of symlinks to absolute paths
+ Only set default network sysctls if not rootless
+ Support --dns=none like podman
+ fix bug --cpu-shares parsing typo
+ Fix validate complaint
+ Update vendor on containers/storage to v1.12.10
+ Create directory paths for COPY thereby ensuring correct perms
+ imagebuildah: use a stable sort for comparing build args
+ imagebuildah: tighten up cache checking
+ bud.bats: add a test verying the order of --build-args
+ add -t to podman run
+ imagebuildah: simplify screening by top layers
+ imagebuildah: handle ID mappings for COPY --from
+ imagebuildah: apply additionalTags ourselves
+ bud.bats: test additional tags with cached images
+ bud.bats: add a test for WORKDIR and COPY with absolute destinations
+ Cleanup Overlay Mounts content
+
+- Changelog for v1.8.3 (2019-06-04)
+ * Add support for file secret mounts
+ * Add ability to skip secrets in mounts file
+ * allow 32bit builds
+ * fix tutorial instructions
+ * imagebuilder: pass the right contextDir to Add()
+ * add: use fileutils.PatternMatcher for .dockerignore
+ * bud.bats: add another .dockerignore test
+ * unshare: fallback to single usermapping
+ * addHelperSymlink: clear the destination on os.IsExist errors
+ * bud.bats: test replacing symbolic links
+ * imagebuildah: fix handling of destinations that end with '/'
+ * bud.bats: test COPY with a final "/" in the destination
+ * linux: add check for sysctl before using it
+ * unshare: set _CONTAINERS_ROOTLESS_GID
+ * Rework buildahimamges
+ * build context: support https git repos
+ * Add a test for ENV special chars behaviour
+ * Check in new Dockerfiles
+ * Apply custom SHELL during build time
+ * config: expand variables only at the command line
+ * SetEnv: we only need to expand v once
+ * Add default /root if empty on chroot iso
+ * Add support for Overlay volumes into the container.
+ * Export buildah validate volume functions so it can share code with libpod
+ * Bump baseline test to F30
+ * Fix rootless handling of /dev/shm size
+ * Avoid fmt.Printf() in the library
+ * imagebuildah: tighten cache checking back up
+ * Handle WORKDIR with dangling target
+ * Default Authfile to proper path
+ * Make buildah run --isolation follow BUILDAH_ISOLATION environment
+ * Vendor in latest containers/storage and containers/image
+ * getParent/getChildren: handle layerless images
+ * imagebuildah: recognize cache images for layerless images
+ * bud.bats: test scratch images with --layers caching
+ * Get CHANGELOG.md updates
+ * Add some symlinks to test our .dockerignore logic
+ * imagebuildah: addHelper: handle symbolic links
+ * commit/push: use an everything-allowed policy
+ * Correct manpage formatting in files section
+ * Remove must be root statement from buildah doc
+ * Change image names to stable, testing and upstream
+ * Bump back to v1.9.0-dev
+
+- Changelog for v1.8.2 (2019-05-02)
+ * Vendor Storage 1.12.6
+ * Create scratch file in TESTDIR
+ * Test bud-copy-dot with --layers picks up changed file
+ * Bump back to 1.9.0-dev
+
+- Changelog for v1.8.1 (2019-05-01)
+ * Don't create directory on container
+ * Replace kubernetes/pause in tests with k8s.gcr.io/pause
+ * imagebuildah: don't remove intermediate images if we need them
+ * Rework buildahimagegit to buildahimageupstream
+ * Fix Transient Mounts
+ * Handle WORKDIRs that are symlinks
+ * allow podman to build a client for windows
+ * Touch up 1.9-dev to 1.9.0-dev
+ * Bump to 1.9-dev
+
+- Changelog for v1.8.0 (2019-04-26)
+ * Resolve symlink when checking container path
+ * commit: commit on every instruction, but not always with layers
+ * CommitOptions: drop the unused OnBuild field
+ * makeImageRef: pass in the whole CommitOptions structure
+ * cmd: API cleanup: stores before images
+ * run: check if SELinux is enabled
+ * Fix buildahimages Dockerfiles to include support for additionalimages mounted from host.
+ * Detect changes in rootdir
+ * Fix typo in buildah-pull(1)
+ * Vendor in latest containers/storage
+ * Keep track of any build-args used during buildah bud --layers
+ * commit: always set a parent ID
+ * imagebuildah: rework unused-argument detection
+ * fix bug dest path when COPY .dockerignore
+ * Move Host IDMAppings code from util to unshare
+ * Add BUILDAH_ISOLATION rootless back
+ * Travis CI: fail fast, upon error in any step
+ * imagebuildah: only commit images for intermediate stages if we have to
+ * Use errors.Cause() when checking for IsNotExist errors
+ * auto pass http_proxy to container
+ * Bump back to 1.8-dev
+
+- Changelog for v1.7.3 (2019-04-16)
+ * imagebuildah: don't leak image structs
+ * Add Dockerfiles for buildahimages
+ * Bump to Replace golang 1.10 with 1.12
+ * add --dns* flags to buildah bud
+ * Add hack/build_speed.sh test speeds on building container images
+ * Create buildahimage Dockerfile for Quay
+ * rename 'is' to 'expect_output'
+ * squash.bats: test squashing in multi-layered builds
+ * bud.bats: test COPY --from in a Dockerfile while using the cache
+ * commit: make target image names optional
+ * Fix bud-args to allow comma separation
+ * oops, missed some tests in commit.bats
+ * new helper: expect_line_count
+ * New tests for #1467 (string slices in cmdline opts)
+ * Workarounds for dealing with travis; review feedback
+ * BATS tests - extensive but minor cleanup
+ * imagebuildah: defer pulling images for COPY --from
+ * imagebuildah: centralize COMMIT and image ID output
+ * Travis: do not use traviswait
+ * imagebuildah: only initialize imagebuilder configuration once per stage
+ * Make cleaner error on Dockerfile build errors
+ * unshare: move to pkg/
+ * unshare: move some code from cmd/buildah/unshare
+ * Fix handling of Slices versus Arrays
+ * imagebuildah: reorganize stage and per-stage logic
+ * imagebuildah: add empty layers for instructions
+ * Add missing step in installing into Ubuntu
+ * fix bug in .dockerignore support
+ * imagebuildah: deduplicate prepended "FROM" instructions
+ * Touch up intro
+ * commit: set created-by to the shell if it isn't set
+ * commit: check that we always set a "created-by"
+ * docs/buildah.md: add "containers-" prefixes under "SEE ALSO"
+ * Bump back to 1.8-dev
+
+- Changelog for v1.7.2 (2019-03-28)
+ * mount: do not create automatically a namespace
+ * buildah: correctly create the userns if euid!=0
+ * imagebuildah.Build: consolidate cleanup logic
+ * CommitOptions: drop the redundant Store field
+ * Move pkg/chrootuser from libpod to buildah.
+ * imagebuildah: record image IDs and references more often
+ * vendor imagebuilder v1.1.0
+ * imagebuildah: fix requiresStart/noRunsRemaining confusion
+ * imagebuildah: check for unused args across stages
+ * bump github.com/containernetworking/cni to v0.7.0-rc2
+ * imagebuildah: use "useCache" instead of "noCache"
+ * imagebuildah.resolveNameToImageRef(): take name as a parameter
+ * Export fields of the DokcerIgnore struct
+ * imagebuildah: drop the duplicate containerIDs list
+ * rootless: by default use the host network namespace
+ * imagebuildah: split Executor and per-stage execution
+ * imagebuildah: move some fields around
+ * golint: make golint happy
+ * docs: 01-intro.md: add missing . in Dockerfile examples
+ * fix bug using .dockerignore
+ * Do not create empty mounts.conf file
+ * images: suppress a spurious blank line with no images
+ * from: distinguish between ADD and COPY
+ * fix bug to not separate each --label value with comma
+ * buildah-bud.md: correct a typo, note a default
+ * Remove mistaken code that got merged in other PR
+ * add sample registries.conf to docs
+ * escape shell variables in README example
+ * slirp4netns: set mtu to 65520
+ * images: imageReposToMap() already adds <none>:<none>
+ * imagebuildah.ReposToMap: move to cmd
+ * Build: resolve copyFrom references earlier
+ * Allow rootless users to use the cache directory in homedir
+ * bud.bats: use the per-test temp directory
+ * bud.bats: log output before counting length
+ * Simplify checks for leftover args
+ * Print commitID with --layers
+ * fix bug images use the template to print results
+ * rootless: honor --net host
+ * onsi/gomeage add missing files
+ * vendor latest openshift/imagebuilder
+ * Remove noop from squash help
+ * Prepend a comment to files setup in container
+ * imagebuildah resolveSymlink: fix handling of relative links
+ * Errors should be printed to stderr
+ * Add recommends for slirp4netns and fuse-overlay
+ * Update pull and pull-always flags
+ * Hide from users command options that we don't want them to use.
+ * Update secrets fipsmode patch to work on rootless containers
+ * fix unshare option handling and documentation
+ * Vendor in latest containers/storage
+ * Hard-code docker.Transport use in pull --all-tags
+ * Use a types.ImageReference instead of (transport, name) strings in pullImage etc.
+ * Move the computation of srcRef before first pullAndFindImage
+ * Don't throw away user-specified tag for pull --all-tags
+ * CHANGES BEHAVIOR: Remove the string format input to localImageNameForReference
+ * Don't try to parse imageName as transport:image in pullImage
+ * Use reference.WithTag instead of manual string manipulation in Pull
+ * Don't pass image = transport:repo:tag, transport=transport to pullImage
+ * Fix confusing variable naming in Pull
+ * Don't try to parse image name as a transport:image
+ * Fix error reporting when parsing trans+image
+ * Remove 'transport == ""' handling from the pull path
+ * Clean up "pulls" of local image IDs / ID prefixes
+ * Simplify ExpandNames
+ * Document the semantics of transport+name returned by ResolveName
+ * UPdate gitvalidation epoch
+ * Bump back to 1.8-dev
+
+- Changelog for v1.7.1 (2019-02-26)
+ * vendor containers/image v1.5
+ * Move secrets code from libpod into buildah
+ * Update CHANGELOG.md with the past changes
+ * README.md: fix typo
+ * Fix a few issues found by tests/validate/gometalinter.sh
+ * Neutralize buildah/unshare on non-Linux platforms
+ * Explicitly specify a directory to find(1)
+ * README.md: rephrase Buildah description
+ * Stop printing default twice in cli --help
+ * install.md: add section about vendoring
+ * Bump to 1.8-dev
+
+- Changelog for v1.7 (2019-02-21)
+ * vendor containers/image v1.4
+ * Make "images --all" faster
+ * Remove a misleading comment
+ * Remove quiet option from pull options
+ * Make sure buildah pull --all-tags only works with docker transport
+ * Support oci layout format
+ * Fix pulling of images within buildah
+ * Fix tls-verify polarity
+ * Travis: execute make vendor and hack/tree_status.sh
+ * vendor.conf: remove unused dependencies
+ * add missing vendor/github.com/containers/libpod/vendor.conf
+ * vendor.conf: remove github.com/inconshreveable/mousetrap
+ * make vendor: always fetch the latest vndr
+ * add hack/tree_status.sh script
+ * Bump c/Storage to 1.10
+ * Add --all-tags test to pull
+ * mount: make error clearer
+ * Remove global flags from cli help
+ * Set --disable-compression to true as documented
+ * Help document using buildah mount in rootless mode
+ * healthcheck start-period: update documentation
+ * Vendor in latest c/storage and c/image
+ * dumpbolt: handle nested buckets
+ * Fix buildah commit compress by default
+ * Test on xenial, not trusty
+ * unshare: reexec using a memfd copy instead of the binary
+ * Add --target to bud command
+ * Fix example for setting multiple environment variables
+ * main: fix rootless mode
+ * buildah: force umask 022
+ * pull.bats: specify registry config when using registries
+ * pull.bats: use the temporary directory, not /tmp
+ * unshare: do not set rootless mode if euid=0
+ * Touch up cli help examples and a few nits
+ * Add an undocumented dumpbolt command
+ * Move tar commands into containers/storage
+ * Fix bud issue with 2 line Dockerfile
+ * Add package install descriptions
+ * Note configuration file requirements
+ * Replace urfave/cli with cobra
+ * cleanup vendor.conf
+ * Vendor in latest containers/storage
+ * Add Quiet to PullOptions and PushOptions
+ * cmd/commit: add flag omit-timestamp to allow for deterministic builds
+ * Add options for empty-layer history entries
+ * Make CLI help descriptions and usage a bit more consistent
+ * vndr opencontainers/selinux
+ * Bump baseline test Fedora to 29
+ * Bump to v1.7-dev-1
+ * Bump to v1.6-1
+ * Add support for ADD --chown
+ * imagebuildah: make EnsureContainerPath() check/create the right one
+ * Bump 1.7-dev
+ * Fix contrib/rpm/bulidah.spec changelog date
+
+- Changelog for v1.6-1 (2019-01-18)
+ * Add support for ADD --chown
+ * imagebuildah: make EnsureContainerPath() check/create the right one
+ * Fix contrib/rpm/bulidah.spec changelog date
+ * Vendor in latest containers/storage
+ * Revendor everything
+ * Revendor in latest code by release
+ * unshare: do not set USER=root
+ * run: ignore EIO when flushing at the end, avoid double log
+ * build-using-dockerfile,commit: disable compression by default
+ * Update some comments
+ * Make rootless work under no_pivot_root
+ * Add CreatedAtRaw date field for use with Format
+ * Properly format images JSON output
+ * pull: add all-tags option
+ * Fix support for multiple Short options
+ * pkg/blobcache: add synchronization
+ * Skip empty files in file check of conformance test
+ * Use NoPivot also for RUN, not only for run
+ * Remove no longer used isReferenceInsecure / isRegistryInsecure
+ * Do not set OCIInsecureSkipTLSVerify based on registries.conf
+ * Remove duplicate entries from images JSON output
+ * vendor parallel-copy from containers/image
+ * blobcache.bats: adjust explicit push tests
+ * Handle one line Dockerfile with layers
+ * We should only warn if user actually requests Hostname be set in image
+ * Fix compiler Warning about comparing different size types
+ * imagebuildah: don't walk if rootdir and path are equal
+ * Add aliases for buildah containers, so buildah list, ls and ps work
+ * vendor: use faster version instead compress/gzip
+ * vendor: update libpod
+ * Properly handle Hostname inside of RUN command
+ * docs: mention how to mount in rootless mode
+ * tests: use fully qualified name for centos image
+ * travis.yml: use the fully qualified name for alpine
+ * mount: allow mount only when using vfs
+ * Add some tests for buildah pull
+ * Touch up images -q processing
+ * Refactor: Use library shared idtools.ParseIDMap() instead of bundling it
+ * bump GITVALIDATE_EPOCH
+ * cli.BudFlags: add `--platform` nop
+ * Makefile: allow packagers to more easily add tags
+ * Makefile: soften the requirement on git
+ * tests: add containers json test
+ * Inline blobCache.putBlob into blobCacheDestination.PutBlob
+ * Move saveStream and putBlob near blobCacheDestination.PutBlob
+ * Remove BlobCache.PutBlob
+ * Update for API changes
+ * Vendor c/image after merging c/image#536
+ * Handle 'COPY --from' in Dockerfile
+ * Vendor in latest content from github.com/containers/storage
+ * Clarify docker.io default in push with docker-daemon
+ * Test blob caching
+ * Wire in a hidden --blob-cache option
+ * Use a blob cache when we're asked to use one
+ * Add --disable-compression to 'build-using-dockerfile'
+ * Add a blob cache implementation
+ * vendor: update containers/storage
+ * Update for sysregistriesv2 API changes
+ * Update containers/image to 63a1cbdc5e6537056695cf0d627c0a33b334df53
+ * clean up makefile variables
+ * Fix file permission
+ * Complete the instructions for the command
+ * Show warning when a build arg not used
+ * Assume user 0 group 0, if /etc/passwd file in container.
+ * Add buildah info command
+ * Enable -q when --filter is used for images command
+ * Add v1.5 Release Announcement
+ * Fix dangling filter for images command
+ * Fix completions to print Names as well as IDs
+ * tests: Fix file permissions
+ * Bump 1.6-dev
+
+- Changelog for v1.5-1 (2018-11-21)
+ * Bump min go to 1.10 in install.md
+ * vendor: update ostree-go
+ * Update docker build command line in conformance test
+ * Print command in SystemExec as debug information
+ * Add some skip word for inspect check in conformance test
+ * Update regex for multi stage base test
+ * Sort CLI flags
+ * vendor: update containers/storage
+ * Add note to install about non-root on RHEL/CentOS
+ * Update imagebuild depdency to support heading ARGs in Dockerfile
+ * rootless: do not specify --rootless to the OCI runtime
+ * Export resolvesymlink function
+ * Exclude --force-rm from common bud cli flags
+ * run: bind mount /etc/hosts and /etc/resolv.conf if not in a volume
+ * rootless: use slirp4netns to setup the network namespace
+ * Instructions for completing the pull command
+ * Fix travis to not run environment variable patch
+ * rootless: only discard network configuration names
+ * run: only set up /etc/hosts or /etc/resolv.conf with network
+ * common: getFormat: match entire string not only the prefix
+ * vendor: update libpod
+ * Change validation EPOCH
+ * Fixing broken link for container-registries.conf
+ * Restore rootless isolation test for from volume ro test
+ * ostree: fix tag for build constraint
+ * Handle directories better in bud -f
+ * vndr in latest containers/storage
+ * Fix unshare gofmt issue
+ * runSetupBuiltinVolumes(): break up volume setup
+ * common: support a per-user registries conf file
+ * unshare: do not override the configuration
+ * common: honor the rootless configuration file
+ * unshare: create a new mount namespace
+ * unshare: support libpod rootless pkg
+ * Use libpod GetDefaultStorage to report proper storage config
+ * Allow container storage to manage the SELinux labels
+ * Resolve image names with default transport in from command
+ * run: When the value of isolation is set, use the set value instead of the default value.
+ * Vendor in latest containers/storage and opencontainers/selinux
+ * Remove no longer valid todo
+ * Check for empty buildTime in version
+ * Change gofmt so it runs on all but 1.10
+ * Run gofmt only on Go 1.11
+ * Walk symlinks when checking cached images for copied/added files
+ * ReserveSELinuxLabels(): handle wrapped errors from OpenBuilder
+ * Set WorkingDir to empty, not / for conformance
+ * Update calls in e2e to addres 1101
+ * imagebuilder.BuildDockerfiles: return the image ID
+ * Update for changes in the containers/image API
+ * bump(github.com/containers/image)
+ * Allow setting --no-pivot default with an env var
+ * Add man page and bash completion, for --no-pivot
+ * Add the --no-pivot flag to the run command
+ * Improve reporting about individual pull failures
+ * Move the "short name but no search registries" error handling to resolveImage
+ * Return a "search registries were needed but empty" indication in util.ResolveName
+ * Simplify handling of the "tried to pull an image but found nothing" case in newBuilder
+ * Don't even invoke the pull loop if options.FromImage == ""
+ * Eliminate the long-running ref and img variables in resolveImage
+ * In resolveImage, return immediately on success
+ * Fix From As in Dockerfile
+ * Vendor latest containers/image
+ * Vendor in latest libpod
+ * Sort CLI flags of buildah bud
+ * Change from testing with golang 1.9 to 1.11.
+ * unshare: detect when unprivileged userns are disabled
+ * Optimize redundant code
+ * fix missing format param
+ * chroot: fix the args check
+ * imagebuildah: make ResolveSymLink public
+ * Update copy chown test
+ * buildah: use the same logic for XDG_RUNTIME_DIR as podman
+ * V1.4 Release Announcement
+ * Podman --privileged selinux is broken
+ * papr: mount source at gopath
+ * parse: Modify the return value
+ * parse: modify the verification of the isolation value
+ * Make sure we log or return every error
+ * pullImage(): when completing an image name, try docker://
+ * Fix up Tutorial 3 to account for format
+ * Vendor in latest containers/storage and containers/image
+ * docs/tutorials/01-intro.md: enhanced installation instructions
+ * Enforce "blocked" for registries for the "docker" transport
+ * Correctly set DockerInsecureSkipTLSVerify when pulling images
+ * chroot: set up seccomp and capabilities after supplemental groups
+ * chroot: fix capabilities list setup and application
+ * .papr.yml: log the podman version
+ * namespaces.bats: fix handling of uidmap/gidmap options in pairs
+ * chroot: only create user namespaces when we know we need them
+ * Check /proc/sys/user/max_user_namespaces on unshare(NEWUSERNS)
+ * bash/buildah: add isolation option to the from command
+
+- Changelog for v1.4 (2018-10-02)
+ * from: fix isolation option
+ * Touchup pull manpage
+ * Export buildah ReserveSELinuxLables so podman can use it
+ * Add buildah.io to README.md and doc fixes
+ * Update rmi man for prune changes
+ * Ignore file not found removal error in bud
+ * bump(github.com/containers/{storage,image})
+ * NewImageSource(): only create one Diff() at a time
+ * Copy ExposedPorts from base image into the config
+ * tests: run conformance test suite in Travis
+ * Change rmi --prune to not accept an imageID
+ * Clear intermediate container IDs after each stage
+ * Request podman version for build issues
+ * unshare: keep the additional groups of the user
+ * Builtin volumes should be owned by the UID/GID of the container
+ * Get rid of dangling whitespace in markdown files
+ * Move buildah from projecatatomic/buildah to containers/buildah
+ * nitpick: parse.validateFlags loop in bud cli
+ * bash: Completion options
+ * Add signature policy to push tests
+ * vendor in latest containers/image
+ * Fix grammar in Container Tools Guide
+ * Don't build btrfs if it is not installed
+ * new: Return image-pulling errors from resolveImage
+ * pull: Return image-pulling errors from pullImage
+ * Add more volume mount tests
+ * chroot: create missing parent directories for volume mounts
+ * Push: Allow an empty destination
+ * Add Podman relationship to readme, create container tools guide
+ * Fix arg usage in buildah-tag
+ * Add flags/arguments order verification to other commands
+ * Handle ErrDuplicateName errors from store.CreateContainer()
+ * Evaluate symbolic links on Add/Copy Commands
+ * Vendor in latest containers/image and containers/storage
+ * Retain bounding set when running containers as non root
+ * run container-diff tests in Travis
+ * buildah-images.md: Fix option contents
+ * push: show image digest after push succeed
+ * Vendor in latest containers/storage,image,libpod and runc
+ * Change references to cri-o to point at new repository
+ * Exclude --layers from the common bug cli flags
+ * demos: Increase the executable permissions
+ * run: clear default seccomp filter if not enabled
+ * Bump maximum cyclomatic complexity to 45
+ * stdin: on HUP, read everything
+ * nitpick: use tabs in tests/helpers.bash
+ * Add flags/arguments order verification to one arg commands
+ * nitpick: decrease cognitive complexity in buildah-bud
+ * rename: Avoid renaming the same name as other containers
+ * chroot isolation: chroot() before setting up seccomp
+ * Small nitpick at the "if" condition in tag.go
+ * cmd/images: Modify json option
+ * cmd/images: Disallow the input of image when using the -a option
+ * Fix examples to include context directory
+ * Update containers/image to fix commit layer issue
+ * cmd/containers: End loop early when using the json option
+ * Make buildah-from error message clear when flags are after arg
+ * Touch up README.md for conformance tests
+ * Update container/storage for lock fix
+ * cmd/rm: restore the correct containerID display
+ * Remove debug lines
+ * Remove docker build image after each test
+ * Add README for conformance test
+ * Update the MakeOptions to accept all command options for buildah
+ * Update regrex to fit the docker output in test "run with JSON"
+ * cmd/buildah: Remove redundant variable declarations
+ * Warn about using Commands in Dockerfile that are not supported by OCI.
+ * Add buildah bud conformance test
+ * Fix rename to also change container name in builder
+ * Makefile: use $(GO) env-var everywhere
+ * Cleanup code to more closely match Docker Build images
+ * Document BUILDAH_* environment variables in buildah bud --help output
+ * Return error immediately if error occurs in Prepare step
+ * Fix --layers ADD from url issue
+ * Add "Sign your PRs" TOC item to contributing.md.
+ * Display the correct ID after deleting image
+ * rmi: Modify the handling of errors
+ * Let util.ResolveName() return parsing errors
+ * Explain Open Container Initiative (OCI) acronym, add link
+ * Update vendor for urfave/cli back to master
+ * Handle COPY --chown in Dockerfile
+ * Switch to Recommends container-selinux
+ * Update vendor for containernetworking, imagebuildah and podman
+ * Document STORAGE_DRIVER and STORAGE_OPTS environment variable
+ * Change references to projectatomic/libpod to containers/libpod
+ * Add container PATH retrieval example
+ * Expand variables names for --env
+ * imagebuildah: provide a way to provide stdin for RUN
+ * Remove an unused srcRef.NewImageSource in pullImage
+ * chroot: correct a comment
+ * chroot: bind mount an empty directory for masking
+ * Don't bother with --no-pivot for rootless isolation
+ * CentOS need EPEL repo
+ * Export a Pull() function
+ * Remove stream options, since docker build does not have it
+ * release v1.3: mention openSUSE
+ * Add Release Announcements directory
+ * Bump to v1.4-dev
+
+- Changelog for v1.3 (2018-08-04)
+ * Revert pull error handling from 881
+ * bud should not search context directory for Dockerfile
+ * Set BUILDAH_ISOLATION=rootless when running unprivileged
+ * .papr.sh: Also test with BUILDAH_ISOLATION=rootless
+ * Skip certain tests when we're using "rootless" isolation
+ * .travis.yml: run integration tests with BUILDAH_ISOLATION=chroot
+ * Add and implement IsolationOCIRootless
+ * Add a value for IsolationOCIRootless
+ * Fix rmi to remove intermediate images associated with an image
+ * Return policy error on pull
+ * Update containers/image to 216acb1bcd2c1abef736ee322e17147ee2b7d76c
+ * Switch to github.com/containers/image/pkg/sysregistriesv2
+ * unshare: make adjusting the OOM score optional
+ * Add flags validation
+ * chroot: handle raising process limits
+ * chroot: make the resource limits name map module-global
+ * Remove rpm.bats, we need to run this manually
+ * Set the default ulimits to match Docker
+ * buildah: no args is out of bounds
+ * unshare: error message missed the pid
+ * preprocess ".in" suffixed Dockerfiles
+ * Fix the the in buildah-config man page
+ * Only test rpmbuild on latest fedora
+ * Add support for multiple Short options
+ * Update to latest urvave/cli
+ * Add additional SELinux tests
+ * Vendor in latest github.com/containers/{image;storage}
+ * Stop testing with golang 1.8
+ * Fix volume cache issue with buildah bud --layers
+ * Create buildah pull command
+ * Increase the deadline for gometalinter during 'make validate'
+ * .papr.sh: Also test with BUILDAH_ISOLATION=chroot
+ * .travis.yml: run integration tests with BUILDAH_ISOLATION=chroot
+ * Add a Dockerfile
+ * Set BUILDAH_ISOLATION=chroot when running unprivileged
+ * Add and implement IsolationChroot
+ * Update github.com/opencontainers/runc
+ * maybeReexecUsingUserNamespace: add a default for root
+ * Allow ping command without NET_RAW Capabilities
+ * rmi.storageImageID: fix Wrapf format warning
+ * Allow Dockerfile content to come from stdin
+ * Vendor latest container/storage to fix overlay mountopt
+ * userns: assign additional IDs sequentially
+ * Remove default dev/pts
+ * Add OnBuild test to baseline test
+ * tests/run.bats(volumes): use :z when SELinux is enabled
+ * Avoid a stall in runCollectOutput()
+ * Use manifest from container/image
+ * Vendor in latest containers/image and containers/storage
+ * add rename command
+ * Completion command
+ * Update CHANGELOG.md
+ * Update vendor for runc to fix 32 bit builds
+ * bash completion: remove shebang
+ * Update vendor for runc to fix 32 bit builds
+
+- Changelog for v1.2 (2018-07-14)
+ * Vendor in lates containers/image
+ * build-using-dockerfile: let -t include transports again
+ * Block use of /proc/acpi and /proc/keys from inside containers
+ * Fix handling of --registries-conf
+ * Fix becoming a maintainer link
+ * add optional CI test fo darwin
+ * Don't pass a nil error to errors.Wrapf()
+ * image filter test: use kubernetes/pause as a "since"
+ * Add --cidfile option to from
+ * vendor: update containers/storage
+ * Contributors need to find the CONTRIBUTOR.md file easier
+ * Add a --loglevel option to build-with-dockerfile
+ * Create Development plan
+ * cmd: Code improvement
+ * allow buildah cross compile for a darwin target
+ * Add unused function param lint check
+ * docs: Follow man-pages(7) suggestions for SYNOPSIS
+ * Start using github.com/seccomp/containers-golang
+ * umount: add all option to umount all mounted containers
+ * runConfigureNetwork(): remove an unused parameter
+ * Update github.com/opencontainers/selinux
+ * Fix buildah bud --layers
+ * Force ownership of /etc/hosts and /etc/resolv.conf to 0:0
+ * main: if unprivileged, reexec in a user namespace
+ * Vendor in latest imagebuilder
+ * Reduce the complexity of the buildah.Run function
+ * mount: output it before replacing lastError
+ * Vendor in latest selinux-go code
+ * Implement basic recognition of the "--isolation" option
+ * Run(): try to resolve non-absolute paths using $PATH
+ * Run(): don't include any default environment variables
+ * build without seccomp
+ * vendor in latest runtime-tools
+ * bind/mount_unsupported.go: remove import errors
+ * Update github.com/opencontainers/runc
+ * Add Capabilities lists to BuilderInfo
+ * Tweaks for commit tests
+ * commit: recognize committing to second storage locations
+ * Fix ARGS parsing for run commands
+ * Add info on registries.conf to from manpage
+ * Switch from using docker to podman for testing in .papr
+ * buildah: set the HTTP User-Agent
+ * ONBUILD tutorial
+ * Add information about the configuration files to the install docs
+ * Makefile: add uninstall
+ * Add tilde info for push to troubleshooting
+ * mount: support multiple inputs
+ * Use the right formatting when adding entries to /etc/hosts
+ * Vendor in latest go-selinux bindings
+ * Allow --userns-uid-map/--userns-gid-map to be global options
+ * bind: factor out UnmountMountpoints
+ * Run(): simplify runCopyStdio()
+ * Run(): handle POLLNVAL results
+ * Run(): tweak terminal mode handling
+ * Run(): rename 'copyStdio' to 'copyPipes'
+ * Run(): don't set a Pdeathsig for the runtime
+ * Run(): add options for adding and removing capabilities
+ * Run(): don't use a callback when a slice will do
+ * setupSeccomp(): refactor
+ * Change RunOptions.Stdin/Stdout/Stderr to just be Reader/Writers
+ * Escape use of '_' in .md docs
+ * Break out getProcIDMappings()
+ * Break out SetupIntermediateMountNamespace()
+ * Add Multi From Demo
+ * Use the c/image conversion code instead of converting configs manually
+ * Don't throw away the manifest MIME type and guess again
+ * Consolidate loading manifest and config in initConfig
+ * Pass a types.Image to Builder.initConfig
+ * Require an image ID in importBuilderDataFromImage
+ * Use c/image/manifest.GuessMIMEType instead of a custom heuristic
+ * Do not ignore any parsing errors in initConfig
+ * Explicitly handle "from scratch" images in Builder.initConfig
+ * Fix parsing of OCI images
+ * Simplify dead but dangerous-looking error handling
+ * Don't ignore v2s1 history if docker_version is not set
+ * Add --rm and --force-rm to buildah bud
+ * Add --all,-a flag to buildah images
+ * Separate stdio buffering from writing
+ * Remove tty check from images --format
+ * Add environment variable BUILDAH_RUNTIME
+ * Add --layers and --no-cache to buildah bud
+ * Touch up images man
+ * version.md: fix DESCRIPTION
+ * tests: add containers test
+ * tests: add images test
+ * images: fix usage
+ * fix make clean error
+ * Change 'registries' to 'container registries' in man
+ * add commit test
+ * Add(): learn to record hashes of what we add
+ * Minor update to buildah config documentation for entrypoint
+ * Bump to v1.2-dev
+ * Add registries.conf link to a few man pages
+
+- Changelog for v1.1 (2018-06-08)
+ * Drop capabilities if running container processes as non root
+ * Print Warning message if cmd will not be used based on entrypoint
+ * Update 01-intro.md
+ * Shouldn't add insecure registries to list of search registries
+ * Report errors on bad transports specification when pushing images
+ * Move parsing code out of common for namespaces and into pkg/parse.go
+ * Add disable-content-trust noop flag to bud
+ * Change freenode chan to buildah
+ * runCopyStdio(): don't close stdin unless we saw POLLHUP
+ * Add registry errors for pull
+ * runCollectOutput(): just read until the pipes are closed on us
+ * Run(): provide redirection for stdio
+ * rmi, rm: add test
+ * add mount test
+ * Add parameter judgment for commands that do not require parameters
+ * Add context dir to bud command in baseline test
+ * run.bats: check that we can run with symlinks in the bundle path
+ * Give better messages to users when image can not be found
+ * use absolute path for bundlePath
+ * Add environment variable to buildah --format
+ * rm: add validation to args and all option
+ * Accept json array input for config entrypoint
+ * Run(): process RunOptions.Mounts, and its flags
+ * Run(): only collect error output from stdio pipes if we created some
+ * Add OnBuild support for Dockerfiles
+ * Quick fix on demo readme
+ * run: fix validate flags
+ * buildah bud should require a context directory or URL
+ * Touchup tutorial for run changes
+ * Validate common bud and from flags
+ * images: Error if the specified imagename does not exist
+ * inspect: Increase err judgments to avoid panic
+ * add test to inspect
+ * buildah bud picks up ENV from base image
+ * Extend the amount of time travis_wait should wait
+ * Add a make target for Installing CNI plugins
+ * Add tests for namespace control flags
+ * copy.bats: check ownerships in the container
+ * Fix SELinux test errors when SELinux is enabled
+ * Add example CNI configurations
+ * Run: set supplemental group IDs
+ * Run: use a temporary mount namespace
+ * Use CNI to configure container networks
+ * add/secrets/commit: Use mappings when setting permissions on added content
+ * Add CLI options for specifying namespace and cgroup setup
+ * Always set mappings when using user namespaces
+ * Run(): break out creation of stdio pipe descriptors
+ * Read UID/GID mapping information from containers and images
+ * Additional bud CI tests
+ * Run integration tests under travis_wait in Travis
+ * build-using-dockerfile: add --annotation
+ * Implement --squash for build-using-dockerfile and commit
+ * Vendor in latest container/storage for devicemapper support
+ * add test to inspect
+ * Vendor github.com/onsi/ginkgo and github.com/onsi/gomega
+ * Test with Go 1.10, too
+ * Add console syntax highlighting to troubleshooting page
+ * bud.bats: print "$output" before checking its contents
+ * Manage "Run" containers more closely
+ * Break Builder.Run()'s "run runc" bits out
+ * util.ResolveName(): handle completion for tagged/digested image names
+ * Handle /etc/hosts and /etc/resolv.conf properly in container
+ * Documentation fixes
+ * Make it easier to parse our temporary directory as an image name
+ * Makefile: list new pkg/ subdirectoris as dependencies for buildah
+ * containerImageSource: return more-correct errors
+ * API cleanup: PullPolicy and TerminalPolicy should be types
+ * Make "run --terminal" and "run -t" aliases for "run --tty"
+ * Vendor github.com/containernetworking/cni v0.6.0
+ * Update github.com/containers/storage
+ * Update github.com/projectatomic/libpod
+ * Add support for buildah bud --label
+ * buildah push/from can push and pull images with no reference
+ * Vendor in latest containers/image
+ * Update gometalinter to fix install.tools error
+ * Update troubleshooting with new run workaround
+ * Added a bud demo and tidied up
+ * Attempt to download file from url, if fails assume Dockerfile
+ * Add buildah bud CI tests for ENV variables
+ * Re-enable rpm .spec version check and new commit test
+ * Update buildah scratch demo to support el7
+ * Added Docker compatibility demo
+ * Update to F28 and new run format in baseline test
+ * Touchup man page short options across man pages
+ * Added demo dir and a demo. chged distrorlease
+ * builder-inspect: fix format option
+ * Add cpu-shares short flag (-c) and cpu-shares CI tests
+ * Minor fixes to formatting in rpm spec changelog
+ * Fix rpm .spec changelog formatting
+ * CI tests and minor fix for cache related noop flags
+ * buildah-from: add effective value to mount propagation
+
+- Changelog for v1.0 (2018-05-06)
+ * Declare Buildah 1.0
+ * Add cache-from and no-cache noops, and fix doco
+ * Update option and documentation for --force-rm
+ * Adding noop for --force-rm to match --rm
+ * Add buildah bud ENTRYPOINT,CMD,RUN tests
+ * Adding buildah bud RUN test scenarios
+ * Extend tests for empty buildah run command
+ * Fix formatting error in run.go
+ * Update buildah run to make command required
+ * Expanding buildah run cmd/entrypoint tests
+ * Update test cases for buildah run behaviour
+ * Remove buildah run cmd and entrypoint execution
+ * Add Files section with registries.conf to pertinent man pages
+ * tests/config: perfect test
+ * tests/from: add name test
+ * Do not print directly to stdout in Commit()
+ * Touch up auth test commands
+ * Force "localhost" as a default registry
+ * Drop util.GetLocalTime()
+ * Vendor in latest containers/image
+ * Validate host and container paths passed to --volume
+ * test/from: add add-host test
+ * Add --compress, --rm, --squash flags as a noop for bud
+ * Add FIPS mode secret to buildah run and bud
+ * Add config --comment/--domainname/--history-comment/--hostname
+ * 'buildah config': stop replacing Created-By whenever it's not specified
+ * Modify man pages so they compile correctly in mandb
+ * Add description on how to do --isolation to buildah-bud man page
+ * Add support for --iidfile to bud and commit
+ * Refactor buildah bud for vendoring
+ * Fail if date or git not installed
+ * Revert update of entrypoint behaviour to match docker
+ * Vendor in latest imagebuilder code to fix multiple stage builds
+ * Add /bin/sh -c to entrypoint in config
+ * image_test: Improve the test
+ * Fix README example of buildah config
+ * buildah-image: add validation to 'format'
+ * Simple changes to allow buildah to pass make validate
+ * Clarify the use of buildah config options
+ * containers_test: Perfect testing
+ * buildah images and podman images are listing different sizes
+ * buildah-containers: add tests and example to the man page
+ * buildah-containers: add validation to 'format'
+ * Clarify the use of buildah config options
+ * Minor fix for lighttpd example in README
+ * Add tls-verification to troubleshooting
+ * Modify buildah rmi to account for changes in containers/storage
+ * Vendor in latest containers/image and containers/storage
+ * addcopy: add src validation
+ * Remove tarball as an option from buildah push --help
+ * Fix secrets patch
+ * Update entrypoint behaviour to match docker
+ * Display imageId after commit
+ * config: add support for StopSignal
+ * Fix docker login issue in travis.yml
+ * Allow referencing stages as index and names
+ * Add multi-stage builds tests
+ * Add multi-stage builds support
+ * Add accessor functions for comment and stop signal
+ * Vendor in latest imagebuilder, to get mixed case AS support
+ * Allow umount to have multi-containers
+ * Update buildah push doc
+ * buildah bud walks symlinks
+ * Imagename is required for commit atm, update manpage
+
+- Changelog for v0.16.0 (2018-04-08)
+ * Bump to v0.16.0
+ * Remove requires for ostree-lib in rpm spec file
+ * Add support for shell
+ * buildah.spec should require ostree-libs
+ * Vendor in latest containers/image
+ * bash: prefer options
+ * Change image time to locale, add troubleshooting.md, add logo to other mds
+ * buildah-run.md: fix error SYNOPSIS
+ * docs: fix error example
+ * Allow --cmd parameter to have commands as values
+ * Touchup README to re-enable logo
+ * Clean up README.md
+ * Make default-mounts-file a hidden option
+ * Document the mounts.conf file
+ * Fix man pages to format correctly
+ * Add various transport support to buildah from
+ * Add unit tests to run.go
+ * If the user overrides the storage driver, the options should be dropped
+ * Show Config/Manifest as JSON string in inspect when format is not set
+ * Switch which for that in README.md
+ * Remove COPR
+ * Fix wrong order of parameters
+ * Vendor in latest containers/image
+ * Remove shallowCopy(), which shouldn't be saving us time any more
+ * shallowCopy: avoid a second read of the container's layer
diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go
index c65926c8e..ae60d9bbe 100644
--- a/vendor/github.com/containers/buildah/chroot/run.go
+++ b/vendor/github.com/containers/buildah/chroot/run.go
@@ -84,9 +84,18 @@ type runUsingChrootExecSubprocOptions struct {
// RunUsingChroot runs a chrooted process, using some of the settings from the
// passed-in spec, and using the specified bundlePath to hold temporary files,
// directories, and mountpoints.
-func RunUsingChroot(spec *specs.Spec, bundlePath string, stdin io.Reader, stdout, stderr io.Writer) (err error) {
+func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reader, stdout, stderr io.Writer) (err error) {
var confwg sync.WaitGroup
-
+ var homeFound bool
+ for _, env := range spec.Process.Env {
+ if strings.HasPrefix(env, "HOME=") {
+ homeFound = true
+ break
+ }
+ }
+ if !homeFound {
+ spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOME=%s", homeDir))
+ }
runtime.LockOSThread()
defer runtime.UnlockOSThread()
@@ -211,7 +220,6 @@ func runUsingChrootMain() {
var stdout io.Writer
var stderr io.Writer
fdDesc := make(map[int]string)
- deferred := func() {}
if options.Spec.Process.Terminal {
// Create a pseudo-terminal -- open a copy of the master side.
ptyMasterFd, err := unix.Open("/dev/ptmx", os.O_RDWR, 0600)
@@ -361,12 +369,16 @@ func runUsingChrootMain() {
return
}
}
+ if err := unix.SetNonblock(relays[unix.Stdin], true); err != nil {
+ logrus.Errorf("error setting %d to nonblocking: %v", relays[unix.Stdin], err)
+ }
go func() {
buffers := make(map[int]*bytes.Buffer)
for _, writeFd := range relays {
buffers[writeFd] = new(bytes.Buffer)
}
pollTimeout := -1
+ stdinClose := false
for len(relays) > 0 {
fds := make([]unix.PollFd, 0, len(relays))
for fd := range relays {
@@ -386,6 +398,9 @@ func runUsingChrootMain() {
removeFds[int(rfd.Fd)] = struct{}{}
}
if rfd.Revents&unix.POLLIN == 0 {
+ if stdinClose && stdinCopy == nil {
+ continue
+ }
continue
}
b := make([]byte, 8192)
@@ -440,8 +455,19 @@ func runUsingChrootMain() {
if buffer.Len() > 0 {
pollTimeout = 100
}
+ if wfd == relays[unix.Stdin] && stdinClose && buffer.Len() == 0 {
+ stdinCopy.Close()
+ delete(relays, unix.Stdin)
+ }
}
for rfd := range removeFds {
+ if rfd == unix.Stdin {
+ buffer, found := buffers[relays[unix.Stdin]]
+ if found && buffer.Len() > 0 {
+ stdinClose = true
+ continue
+ }
+ }
if !options.Spec.Process.Terminal && rfd == unix.Stdin {
stdinCopy.Close()
}
@@ -452,7 +478,6 @@ func runUsingChrootMain() {
// Set up mounts and namespaces, and run the parent subprocess.
status, err := runUsingChroot(options.Spec, options.BundlePath, ctty, stdin, stdout, stderr, closeOnceRunning)
- deferred()
if err != nil {
fmt.Fprintf(os.Stderr, "error running subprocess: %v\n", err)
os.Exit(1)
diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go
index 05b0abb23..234f93259 100644
--- a/vendor/github.com/containers/buildah/config.go
+++ b/vendor/github.com/containers/buildah/config.go
@@ -3,7 +3,6 @@ package buildah
import (
"context"
"encoding/json"
- "os"
"runtime"
"strings"
"time"
@@ -269,21 +268,11 @@ func (b *Builder) Env() []string {
// built using an image built from this container.
func (b *Builder) SetEnv(k string, v string) {
reset := func(s *[]string) {
- getenv := func(name string) string {
- for i := range *s {
- val := strings.SplitN((*s)[i], "=", 2)
- if len(val) == 2 && val[0] == name {
- return val[1]
- }
- }
- return name
- }
n := []string{}
for i := range *s {
if !strings.HasPrefix((*s)[i], k+"=") {
n = append(n, (*s)[i])
}
- v = os.Expand(v, getenv)
}
n = append(n, k+"="+v)
*s = n
diff --git a/vendor/github.com/containers/buildah/developmentplan.md b/vendor/github.com/containers/buildah/developmentplan.md
new file mode 100644
index 000000000..0eb08c0f1
--- /dev/null
+++ b/vendor/github.com/containers/buildah/developmentplan.md
@@ -0,0 +1,13 @@
+![buildah logo](https://cdn.rawgit.com/containers/buildah/master/logos/buildah-logo_large.png)
+
+# Development Plan
+
+## Development goals for Buildah
+
+ * Integration into Kubernetes and potentially other tools. The biggest requirement for this is to be able run Buildah within a standard linux container without SYS_ADMIN privileges. This would allow Buildah to run non-privileged containers inside of Kubernetes, so you could distribute your container workloads.
+
+ * Integration with User Namespace, Podman has this already and the goal is to get `buildah bud` and `buildah run` to be able to run its containers in a usernamespace to give the builder better security isolation from the host.
+
+ * Buildah `buildah bud` command's goal is to have feature parity with other OCI image and container build systems.
+
+ * Addressing issues from the community as reported in the [Issues](https://github.com/containers/buildah/issues) page.
diff --git a/vendor/github.com/containers/buildah/docker/AUTHORS b/vendor/github.com/containers/buildah/docker/AUTHORS
new file mode 100644
index 000000000..b2cd9ecbe
--- /dev/null
+++ b/vendor/github.com/containers/buildah/docker/AUTHORS
@@ -0,0 +1,1788 @@
+# This file lists all individuals having contributed content to the repository.
+# For how it is generated, see `hack/generate-authors.sh`.
+
+Aanand Prasad <aanand.prasad@gmail.com>
+Aaron Davidson <aaron@databricks.com>
+Aaron Feng <aaron.feng@gmail.com>
+Aaron Huslage <huslage@gmail.com>
+Aaron Lehmann <aaron.lehmann@docker.com>
+Aaron Welch <welch@packet.net>
+Aaron.L.Xu <likexu@harmonycloud.cn>
+Abel Muiño <amuino@gmail.com>
+Abhijeet Kasurde <akasurde@redhat.com>
+Abhinav Ajgaonkar <abhinav316@gmail.com>
+Abhishek Chanda <abhishek.becs@gmail.com>
+Abin Shahab <ashahab@altiscale.com>
+Adam Avilla <aavilla@yp.com>
+Adam Eijdenberg <adam.eijdenberg@gmail.com>
+Adam Kunk <adam.kunk@tiaa-cref.org>
+Adam Miller <admiller@redhat.com>
+Adam Mills <adam@armills.info>
+Adam Singer <financeCoding@gmail.com>
+Adam Walz <adam@adamwalz.net>
+Addam Hardy <addam.hardy@gmail.com>
+Aditi Rajagopal <arajagopal@us.ibm.com>
+Aditya <aditya@netroy.in>
+Adolfo Ochagavía <aochagavia92@gmail.com>
+Adria Casas <adriacasas88@gmail.com>
+Adrian Moisey <adrian@changeover.za.net>
+Adrian Mouat <adrian.mouat@gmail.com>
+Adrian Oprea <adrian@codesi.nz>
+Adrien Folie <folie.adrien@gmail.com>
+Adrien Gallouët <adrien@gallouet.fr>
+Ahmed Kamal <email.ahmedkamal@googlemail.com>
+Ahmet Alp Balkan <ahmetb@microsoft.com>
+Aidan Feldman <aidan.feldman@gmail.com>
+Aidan Hobson Sayers <aidanhs@cantab.net>
+AJ Bowen <aj@gandi.net>
+Ajey Charantimath <ajey.charantimath@gmail.com>
+ajneu <ajneu@users.noreply.github.com>
+Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
+Akira Koyasu <mail@akirakoyasu.net>
+Akshay Karle <akshay.a.karle@gmail.com>
+Al Tobey <al@ooyala.com>
+alambike <alambike@gmail.com>
+Alan Scherger <flyinprogrammer@gmail.com>
+Alan Thompson <cloojure@gmail.com>
+Albert Callarisa <shark234@gmail.com>
+Albert Zhang <zhgwenming@gmail.com>
+Aleksa Sarai <asarai@suse.de>
+Aleksandrs Fadins <aleks@s-ko.net>
+Alena Prokharchyk <alena@rancher.com>
+Alessandro Boch <aboch@docker.com>
+Alessio Biancalana <dottorblaster@gmail.com>
+Alex Chan <alex@alexwlchan.net>
+Alex Chen <alexchenunix@gmail.com>
+Alex Coventry <alx@empirical.com>
+Alex Crawford <alex.crawford@coreos.com>
+Alex Ellis <alexellis2@gmail.com>
+Alex Gaynor <alex.gaynor@gmail.com>
+Alex Olshansky <i@creagenics.com>
+Alex Samorukov <samm@os2.kiev.ua>
+Alex Warhawk <ax.warhawk@gmail.com>
+Alexander Artemenko <svetlyak.40wt@gmail.com>
+Alexander Boyd <alex@opengroove.org>
+Alexander Larsson <alexl@redhat.com>
+Alexander Morozov <lk4d4@docker.com>
+Alexander Shopov <ash@kambanaria.org>
+Alexandre Beslic <alexandre.beslic@gmail.com>
+Alexandre González <agonzalezro@gmail.com>
+Alexandru Sfirlogea <alexandru.sfirlogea@gmail.com>
+Alexey Guskov <lexag@mail.ru>
+Alexey Kotlyarov <alexey@infoxchange.net.au>
+Alexey Shamrin <shamrin@gmail.com>
+Alexis THOMAS <fr.alexisthomas@gmail.com>
+Alfred Landrum <alfred.landrum@docker.com>
+Ali Dehghani <ali.dehghani.g@gmail.com>
+Alicia Lauerman <alicia@eta.im>
+Alihan Demir <alihan_6153@hotmail.com>
+Allen Madsen <blatyo@gmail.com>
+Allen Sun <allen.sun@daocloud.io>
+almoehi <almoehi@users.noreply.github.com>
+Alvaro Saurin <alvaro.saurin@gmail.com>
+Alvin Richards <alvin.richards@docker.com>
+amangoel <amangoel@gmail.com>
+Amen Belayneh <amenbelayneh@gmail.com>
+Amir Goldstein <amir73il@aquasec.com>
+Amit Bakshi <ambakshi@gmail.com>
+Amit Krishnan <amit.krishnan@oracle.com>
+Amit Shukla <amit.shukla@docker.com>
+Amy Lindburg <amy.lindburg@docker.com>
+Anand Patil <anand.prabhakar.patil@gmail.com>
+AnandkumarPatel <anandkumarpatel@gmail.com>
+Anatoly Borodin <anatoly.borodin@gmail.com>
+Anchal Agrawal <aagrawa4@illinois.edu>
+Anders Janmyr <anders@janmyr.com>
+Andre Dublin <81dublin@gmail.com>
+Andre Granovsky <robotciti@live.com>
+Andrea Luzzardi <aluzzardi@gmail.com>
+Andrea Turli <andrea.turli@gmail.com>
+Andreas Köhler <andi5.py@gmx.net>
+Andreas Savvides <andreas@editd.com>
+Andreas Tiefenthaler <at@an-ti.eu>
+Andrei Gherzan <andrei@resin.io>
+Andrew C. Bodine <acbodine@us.ibm.com>
+Andrew Clay Shafer <andrewcshafer@gmail.com>
+Andrew Duckworth <grillopress@gmail.com>
+Andrew France <andrew@avito.co.uk>
+Andrew Gerrand <adg@golang.org>
+Andrew Guenther <guenther.andrew.j@gmail.com>
+Andrew Hsu <andrewhsu@docker.com>
+Andrew Kuklewicz <kookster@gmail.com>
+Andrew Macgregor <andrew.macgregor@agworld.com.au>
+Andrew Macpherson <hopscotch23@gmail.com>
+Andrew Martin <sublimino@gmail.com>
+Andrew McDonnell <bugs@andrewmcdonnell.net>
+Andrew Munsell <andrew@wizardapps.net>
+Andrew Po <absourd.noise@gmail.com>
+Andrew Weiss <andrew.weiss@outlook.com>
+Andrew Williams <williams.andrew@gmail.com>
+Andrews Medina <andrewsmedina@gmail.com>
+Andrey Petrov <andrey.petrov@shazow.net>
+Andrey Stolbovsky <andrey.stolbovsky@gmail.com>
+André Martins <aanm90@gmail.com>
+andy <ztao@tibco-support.com>
+Andy Chambers <anchambers@paypal.com>
+andy diller <dillera@gmail.com>
+Andy Goldstein <agoldste@redhat.com>
+Andy Kipp <andy@rstudio.com>
+Andy Rothfusz <github@developersupport.net>
+Andy Smith <github@anarkystic.com>
+Andy Wilson <wilson.andrew.j+github@gmail.com>
+Anes Hasicic <anes.hasicic@gmail.com>
+Anil Belur <askb23@gmail.com>
+Anil Madhavapeddy <anil@recoil.org>
+Ankush Agarwal <ankushagarwal11@gmail.com>
+Anonmily <michelle@michelleliu.io>
+Anran Qiao <anran.qiao@daocloud.io>
+Anthon van der Neut <anthon@mnt.org>
+Anthony Baire <Anthony.Baire@irisa.fr>
+Anthony Bishopric <git@anthonybishopric.com>
+Anthony Dahanne <anthony.dahanne@gmail.com>
+Anthony Sottile <asottile@umich.edu>
+Anton Löfgren <anton.lofgren@gmail.com>
+Anton Nikitin <anton.k.nikitin@gmail.com>
+Anton Polonskiy <anton.polonskiy@gmail.com>
+Anton Tiurin <noxiouz@yandex.ru>
+Antonio Murdaca <antonio.murdaca@gmail.com>
+Antonis Kalipetis <akalipetis@gmail.com>
+Antony Messerli <amesserl@rackspace.com>
+Anuj Bahuguna <anujbahuguna.dev@gmail.com>
+Anusha Ragunathan <anusha.ragunathan@docker.com>
+apocas <petermdias@gmail.com>
+Arash Deshmeh <adeshmeh@ca.ibm.com>
+ArikaChen <eaglesora@gmail.com>
+Arnaud Lefebvre <a.lefebvre@outlook.fr>
+Arnaud Porterie <arnaud.porterie@docker.com>
+Arthur Barr <arthur.barr@uk.ibm.com>
+Arthur Gautier <baloo@gandi.net>
+Artur Meyster <arthurfbi@yahoo.com>
+Arun Gupta <arun.gupta@gmail.com>
+Asbjørn Enge <asbjorn@hanafjedle.net>
+averagehuman <averagehuman@users.noreply.github.com>
+Avi Das <andas222@gmail.com>
+Avi Miller <avi.miller@oracle.com>
+Avi Vaid <avaid1996@gmail.com>
+ayoshitake <airandfingers@gmail.com>
+Azat Khuyiyakhmetov <shadow_uz@mail.ru>
+Bardia Keyoumarsi <bkeyouma@ucsc.edu>
+Barnaby Gray <barnaby@pickle.me.uk>
+Barry Allard <barry.allard@gmail.com>
+Bartłomiej Piotrowski <b@bpiotrowski.pl>
+Bastiaan Bakker <bbakker@xebia.com>
+bdevloed <boris.de.vloed@gmail.com>
+Ben Bonnefoy <frenchben@docker.com>
+Ben Firshman <ben@firshman.co.uk>
+Ben Golub <ben.golub@dotcloud.com>
+Ben Hall <ben@benhall.me.uk>
+Ben Sargent <ben@brokendigits.com>
+Ben Severson <BenSeverson@users.noreply.github.com>
+Ben Toews <mastahyeti@gmail.com>
+Ben Wiklund <ben@daisyowl.com>
+Benjamin Atkin <ben@benatkin.com>
+Benoit Chesneau <bchesneau@gmail.com>
+Bernerd Schaefer <bj.schaefer@gmail.com>
+Bert Goethals <bert@bertg.be>
+Bharath Thiruveedula <bharath_ves@hotmail.com>
+Bhiraj Butala <abhiraj.butala@gmail.com>
+Bhumika Bayani <bhumikabayani@gmail.com>
+Bilal Amarni <bilal.amarni@gmail.com>
+Bill W <SydOps@users.noreply.github.com>
+bin liu <liubin0329@users.noreply.github.com>
+Bingshen Wang <bingshen.wbs@alibaba-inc.com>
+Blake Geno <blakegeno@gmail.com>
+Boaz Shuster <ripcurld.github@gmail.com>
+bobby abbott <ttobbaybbob@gmail.com>
+Boshi Lian <farmer1992@gmail.com>
+boucher <rboucher@gmail.com>
+Bouke Haarsma <bouke@webatoom.nl>
+Boyd Hemphill <boyd@feedmagnet.com>
+boynux <boynux@gmail.com>
+Bradley Cicenas <bradley.cicenas@gmail.com>
+Bradley Wright <brad@intranation.com>
+Brandon Liu <bdon@bdon.org>
+Brandon Philips <brandon@ifup.org>
+Brandon Rhodes <brandon@rhodesmill.org>
+Brendan Dixon <brendand@microsoft.com>
+Brent Salisbury <brent.salisbury@docker.com>
+Brett Higgins <brhiggins@arbor.net>
+Brett Kochendorfer <brett.kochendorfer@gmail.com>
+Brian (bex) Exelbierd <bexelbie@redhat.com>
+Brian Bland <brian.bland@docker.com>
+Brian DeHamer <brian@dehamer.com>
+Brian Dorsey <brian@dorseys.org>
+Brian Flad <bflad417@gmail.com>
+Brian Goff <cpuguy83@gmail.com>
+Brian McCallister <brianm@skife.org>
+Brian Olsen <brian@maven-group.org>
+Brian Shumate <brian@couchbase.com>
+Brian Torres-Gil <brian@dralth.com>
+Brian Trump <btrump@yelp.com>
+Brice Jaglin <bjaglin@teads.tv>
+Briehan Lombaard <briehan.lombaard@gmail.com>
+Bruno Bigras <bigras.bruno@gmail.com>
+Bruno Binet <bruno.binet@gmail.com>
+Bruno Gazzera <bgazzera@paginar.com>
+Bruno Renié <brutasse@gmail.com>
+Bruno Tavares <btavare@thoughtworks.com>
+Bryan Bess <squarejaw@bsbess.com>
+Bryan Boreham <bjboreham@gmail.com>
+Bryan Matsuo <bryan.matsuo@gmail.com>
+Bryan Murphy <bmurphy1976@gmail.com>
+buddhamagnet <buddhamagnet@gmail.com>
+Burke Libbey <burke@libbey.me>
+Byung Kang <byung.kang.ctr@amrdec.army.mil>
+Caleb Spare <cespare@gmail.com>
+Calen Pennington <cale@edx.org>
+Cameron Boehmer <cameron.boehmer@gmail.com>
+Cameron Spear <cameronspear@gmail.com>
+Campbell Allen <campbell.allen@gmail.com>
+Candid Dauth <cdauth@cdauth.eu>
+Cao Weiwei <cao.weiwei30@zte.com.cn>
+Carl Henrik Lunde <chlunde@ping.uio.no>
+Carl Loa Odin <carlodin@gmail.com>
+Carl X. Su <bcbcarl@gmail.com>
+Carlos Alexandro Becker <caarlos0@gmail.com>
+Carlos Sanchez <carlos@apache.org>
+Carol Fager-Higgins <carol.fager-higgins@docker.com>
+Cary <caryhartline@users.noreply.github.com>
+Casey Bisson <casey.bisson@joyent.com>
+Ce Gao <ce.gao@outlook.com>
+Cedric Davies <cedricda@microsoft.com>
+Cezar Sa Espinola <cezarsa@gmail.com>
+Chad Swenson <chadswen@gmail.com>
+Chance Zibolski <chance.zibolski@gmail.com>
+Chander G <chandergovind@gmail.com>
+Charles Chan <charleswhchan@users.noreply.github.com>
+Charles Hooper <charles.hooper@dotcloud.com>
+Charles Law <claw@conduce.com>
+Charles Lindsay <chaz@chazomatic.us>
+Charles Merriam <charles.merriam@gmail.com>
+Charles Sarrazin <charles@sarraz.in>
+Charles Smith <charles.smith@docker.com>
+Charlie Drage <charlie@charliedrage.com>
+Charlie Lewis <charliel@lab41.org>
+Chase Bolt <chase.bolt@gmail.com>
+ChaYoung You <yousbe@gmail.com>
+Chen Chao <cc272309126@gmail.com>
+Chen Chuanliang <chen.chuanliang@zte.com.cn>
+Chen Hanxiao <chenhanxiao@cn.fujitsu.com>
+Chen Mingjie <chenmingjie0828@163.com>
+cheney90 <cheney-90@hotmail.com>
+Chewey <prosto-chewey@users.noreply.github.com>
+Chia-liang Kao <clkao@clkao.org>
+chli <chli@freewheel.tv>
+Cholerae Hu <choleraehyq@gmail.com>
+Chris Alfonso <calfonso@redhat.com>
+Chris Armstrong <chris@opdemand.com>
+Chris Dituri <csdituri@gmail.com>
+Chris Fordham <chris@fordham-nagy.id.au>
+Chris Gavin <chris@chrisgavin.me>
+Chris Khoo <chris.khoo@gmail.com>
+Chris McKinnel <chrismckinnel@gmail.com>
+Chris Seto <chriskseto@gmail.com>
+Chris Snow <chsnow123@gmail.com>
+Chris St. Pierre <chris.a.st.pierre@gmail.com>
+Chris Stivers <chris@stivers.us>
+Chris Swan <chris.swan@iee.org>
+Chris Wahl <github@wahlnetwork.com>
+Chris Weyl <cweyl@alumni.drew.edu>
+chrismckinnel <chris.mckinnel@tangentlabs.co.uk>
+Christian Berendt <berendt@b1-systems.de>
+Christian Böhme <developement@boehme3d.de>
+Christian Persson <saser@live.se>
+Christian Rotzoll <ch.rotzoll@gmail.com>
+Christian Simon <simon@swine.de>
+Christian Stefanescu <st.chris@gmail.com>
+ChristoperBiscardi <biscarch@sketcht.com>
+Christophe Mehay <cmehay@online.net>
+Christophe Troestler <christophe.Troestler@umons.ac.be>
+Christopher Currie <codemonkey+github@gmail.com>
+Christopher Jones <tophj@linux.vnet.ibm.com>
+Christopher Latham <sudosurootdev@gmail.com>
+Christopher Rigor <crigor@gmail.com>
+Christy Perez <christy@linux.vnet.ibm.com>
+Chun Chen <ramichen@tencent.com>
+Ciro S. Costa <ciro.costa@usp.br>
+Clayton Coleman <ccoleman@redhat.com>
+Clinton Kitson <clintonskitson@gmail.com>
+Coenraad Loubser <coenraad@wish.org.za>
+Colin Dunklau <colin.dunklau@gmail.com>
+Colin Rice <colin@daedrum.net>
+Colin Walters <walters@verbum.org>
+Collin Guarino <collin.guarino@gmail.com>
+Colm Hally <colmhally@gmail.com>
+companycy <companycy@gmail.com>
+Cory Forsyth <cory.forsyth@gmail.com>
+cressie176 <github@stephen-cresswell.net>
+CrimsonGlory <CrimsonGlory@users.noreply.github.com>
+Cristian Staretu <cristian.staretu@gmail.com>
+cristiano balducci <cristiano.balducci@gmail.com>
+Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
+CUI Wei <ghostplant@qq.com>
+Cyprian Gracz <cyprian.gracz@micro-jumbo.eu>
+Cyril F <cyrilf7x@gmail.com>
+Daan van Berkel <daan.v.berkel.1980@gmail.com>
+Daehyeok Mun <daehyeok@gmail.com>
+Dafydd Crosby <dtcrsby@gmail.com>
+dalanlan <dalanlan925@gmail.com>
+Damian Smyth <damian@dsau.co>
+Damien Nadé <github@livna.org>
+Damien Nozay <damien.nozay@gmail.com>
+Damjan Georgievski <gdamjan@gmail.com>
+Dan Anolik <dan@anolik.net>
+Dan Buch <d.buch@modcloth.com>
+Dan Cotora <dan@bluevision.ro>
+Dan Feldman <danf@jfrog.com>
+Dan Griffin <dgriffin@peer1.com>
+Dan Hirsch <thequux@upstandinghackers.com>
+Dan Keder <dan.keder@gmail.com>
+Dan Levy <dan@danlevy.net>
+Dan McPherson <dmcphers@redhat.com>
+Dan Stine <sw@stinemail.com>
+Dan Walsh <dwalsh@redhat.com>
+Dan Williams <me@deedubs.com>
+Daniel Antlinger <d.antlinger@gmx.at>
+Daniel Exner <dex@dragonslave.de>
+Daniel Farrell <dfarrell@redhat.com>
+Daniel Garcia <daniel@danielgarcia.info>
+Daniel Gasienica <daniel@gasienica.ch>
+Daniel Hiltgen <daniel.hiltgen@docker.com>
+Daniel Menet <membership@sontags.ch>
+Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com>
+Daniel Nephin <dnephin@docker.com>
+Daniel Norberg <dano@spotify.com>
+Daniel Nordberg <dnordberg@gmail.com>
+Daniel Robinson <gottagetmac@gmail.com>
+Daniel S <dan.streby@gmail.com>
+Daniel Von Fange <daniel@leancoder.com>
+Daniel X Moore <yahivin@gmail.com>
+Daniel YC Lin <dlin.tw@gmail.com>
+Daniel Zhang <jmzwcn@gmail.com>
+Daniel, Dao Quang Minh <dqminh@cloudflare.com>
+Danny Berger <dpb587@gmail.com>
+Danny Yates <danny@codeaholics.org>
+Darren Coxall <darren@darrencoxall.com>
+Darren Shepherd <darren.s.shepherd@gmail.com>
+Darren Stahl <darst@microsoft.com>
+Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com>
+Davanum Srinivas <davanum@gmail.com>
+Dave Barboza <dbarboza@datto.com>
+Dave Henderson <dhenderson@gmail.com>
+Dave MacDonald <mindlapse@gmail.com>
+Dave Tucker <dt@docker.com>
+David Anderson <dave@natulte.net>
+David Calavera <david.calavera@gmail.com>
+David Corking <dmc-source@dcorking.com>
+David Cramer <davcrame@cisco.com>
+David Currie <david_currie@uk.ibm.com>
+David Davis <daviddavis@redhat.com>
+David Dooling <dooling@gmail.com>
+David Gageot <david@gageot.net>
+David Gebler <davidgebler@gmail.com>
+David Lawrence <david.lawrence@docker.com>
+David Lechner <david@lechnology.com>
+David M. Karr <davidmichaelkarr@gmail.com>
+David Mackey <tdmackey@booleanhaiku.com>
+David Mat <david@davidmat.com>
+David Mcanulty <github@hellspark.com>
+David Pelaez <pelaez89@gmail.com>
+David R. Jenni <david.r.jenni@gmail.com>
+David Röthlisberger <david@rothlis.net>
+David Sheets <sheets@alum.mit.edu>
+David Sissitka <me@dsissitka.com>
+David Trott <github@davidtrott.com>
+David Williamson <davidwilliamson@users.noreply.github.com>
+David Xia <dxia@spotify.com>
+David Young <yangboh@cn.ibm.com>
+Davide Ceretti <davide.ceretti@hogarthww.com>
+Dawn Chen <dawnchen@google.com>
+dbdd <wangtong2712@gmail.com>
+dcylabs <dcylabs@gmail.com>
+decadent <decadent@users.noreply.github.com>
+deed02392 <georgehafiz@gmail.com>
+Deng Guangxing <dengguangxing@huawei.com>
+Deni Bertovic <deni@kset.org>
+Denis Gladkikh <denis@gladkikh.email>
+Denis Ollier <larchunix@users.noreply.github.com>
+Dennis Chen <barracks510@gmail.com>
+Dennis Docter <dennis@d23.nl>
+Derek <crq@kernel.org>
+Derek <crquan@gmail.com>
+Derek Ch <denc716@gmail.com>
+Derek McGowan <derek@mcgstyle.net>
+Deric Crago <deric.crago@gmail.com>
+Deshi Xiao <dxiao@redhat.com>
+devmeyster <arthurfbi@yahoo.com>
+Devvyn Murphy <devvyn@devvyn.com>
+Dharmit Shah <shahdharmit@gmail.com>
+Diego Romero <idiegoromero@gmail.com>
+Diego Siqueira <dieg0@live.com>
+Dieter Reuter <dieter.reuter@me.com>
+Dillon Dixon <dillondixon@gmail.com>
+Dima Stopel <dima@twistlock.com>
+Dimitri John Ledkov <dimitri.j.ledkov@intel.com>
+Dimitris Rozakis <dimrozakis@gmail.com>
+Dimitry Andric <d.andric@activevideo.com>
+Dinesh Subhraveti <dineshs@altiscale.com>
+Ding Fei <dingfei@stars.org.cn>
+Diogo Monica <diogo@docker.com>
+DiuDiugirl <sophia.wang@pku.edu.cn>
+Djibril Koné <kone.djibril@gmail.com>
+dkumor <daniel@dkumor.com>
+Dmitri Logvinenko <dmitri.logvinenko@gmail.com>
+Dmitri Shuralyov <shurcooL@gmail.com>
+Dmitry Demeshchuk <demeshchuk@gmail.com>
+Dmitry Gusev <dmitry.gusev@gmail.com>
+Dmitry Kononenko <d@dm42.ru>
+Dmitry Shyshkin <dmitry@shyshkin.org.ua>
+Dmitry Smirnov <onlyjob@member.fsf.org>
+Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
+Dmitry Vorobev <dimahabr@gmail.com>
+Dolph Mathews <dolph.mathews@gmail.com>
+Dominik Dingel <dingel@linux.vnet.ibm.com>
+Dominik Finkbeiner <finkes93@gmail.com>
+Dominik Honnef <dominik@honnef.co>
+Don Kirkby <donkirkby@users.noreply.github.com>
+Don Kjer <don.kjer@gmail.com>
+Don Spaulding <donspauldingii@gmail.com>
+Donald Huang <don.hcd@gmail.com>
+Dong Chen <dongluo.chen@docker.com>
+Donovan Jones <git@gamma.net.nz>
+Doron Podoleanu <doronp@il.ibm.com>
+Doug Davis <dug@us.ibm.com>
+Doug MacEachern <dougm@vmware.com>
+Doug Tangren <d.tangren@gmail.com>
+Dr Nic Williams <drnicwilliams@gmail.com>
+dragon788 <dragon788@users.noreply.github.com>
+Dražen Lučanin <kermit666@gmail.com>
+Drew Erny <drew.erny@docker.com>
+Dustin Sallings <dustin@spy.net>
+Ed Costello <epc@epcostello.com>
+Edmund Wagner <edmund-wagner@web.de>
+Eiichi Tsukata <devel@etsukata.com>
+Eike Herzbach <eike@herzbach.net>
+Eivin Giske Skaaren <eivinsn@axis.com>
+Eivind Uggedal <eivind@uggedal.com>
+Elan Ruusamäe <glen@delfi.ee>
+Elena Morozova <lelenanam@gmail.com>
+Elias Probst <mail@eliasprobst.eu>
+Elijah Zupancic <elijah@zupancic.name>
+eluck <mail@eluck.me>
+Elvir Kuric <elvirkuric@gmail.com>
+Emil Hernvall <emil@quench.at>
+Emily Maier <emily@emilymaier.net>
+Emily Rose <emily@contactvibe.com>
+Emir Ozer <emirozer@yandex.com>
+Enguerran <engcolson@gmail.com>
+Eohyung Lee <liquidnuker@gmail.com>
+epeterso <epeterson@breakpoint-labs.com>
+Eric Barch <barch@tomesoftware.com>
+Eric Curtin <ericcurtin17@gmail.com>
+Eric Hanchrow <ehanchrow@ine.com>
+Eric Lee <thenorthsecedes@gmail.com>
+Eric Myhre <hash@exultant.us>
+Eric Paris <eparis@redhat.com>
+Eric Rafaloff <erafaloff@gmail.com>
+Eric Rosenberg <ehaydenr@users.noreply.github.com>
+Eric Sage <eric.david.sage@gmail.com>
+Erica Windisch <erica@windisch.us>
+Eric Yang <windfarer@gmail.com>
+Eric-Olivier Lamey <eo@lamey.me>
+Erik Bray <erik.m.bray@gmail.com>
+Erik Dubbelboer <erik@dubbelboer.com>
+Erik Hollensbe <github@hollensbe.org>
+Erik Inge Bolsø <knan@redpill-linpro.com>
+Erik Kristensen <erik@erikkristensen.com>
+Erik St. Martin <alakriti@gmail.com>
+Erik Weathers <erikdw@gmail.com>
+Erno Hopearuoho <erno.hopearuoho@gmail.com>
+Erwin van der Koogh <info@erronis.nl>
+Euan <euank@amazon.com>
+Eugene Yakubovich <eugene.yakubovich@coreos.com>
+eugenkrizo <eugen.krizo@gmail.com>
+evalle <shmarnev@gmail.com>
+Evan Allrich <evan@unguku.com>
+Evan Carmi <carmi@users.noreply.github.com>
+Evan Hazlett <ehazlett@users.noreply.github.com>
+Evan Hazlett <ejhazlett@gmail.com>
+Evan Krall <krall@yelp.com>
+Evan Phoenix <evan@fallingsnow.net>
+Evan Wies <evan@neomantra.net>
+Evelyn Xu <evelynhsu21@gmail.com>
+Everett Toews <everett.toews@rackspace.com>
+Evgeny Vereshchagin <evvers@ya.ru>
+Ewa Czechowska <ewa@ai-traders.com>
+Eystein Måløy Stenberg <eystein.maloy.stenberg@cfengine.com>
+ezbercih <cem.ezberci@gmail.com>
+Ezra Silvera <ezra@il.ibm.com>
+Fabiano Rosas <farosas@br.ibm.com>
+Fabio Falci <fabiofalci@gmail.com>
+Fabio Rapposelli <fabio@vmware.com>
+Fabio Rehm <fgrehm@gmail.com>
+Fabrizio Regini <freegenie@gmail.com>
+Fabrizio Soppelsa <fsoppelsa@mirantis.com>
+Faiz Khan <faizkhan00@gmail.com>
+falmp <chico.lopes@gmail.com>
+Fangyuan Gao <21551127@zju.edu.cn>
+Fareed Dudhia <fareeddudhia@googlemail.com>
+Fathi Boudra <fathi.boudra@linaro.org>
+Federico Gimenez <fgimenez@coit.es>
+Felipe Oliveira <felipeweb.programador@gmail.com>
+Felix Abecassis <fabecassis@nvidia.com>
+Felix Geisendörfer <felix@debuggable.com>
+Felix Hupfeld <quofelix@users.noreply.github.com>
+Felix Rabe <felix@rabe.io>
+Felix Ruess <felix.ruess@gmail.com>
+Felix Schindler <fschindler@weluse.de>
+Ferenc Szabo <pragmaticfrank@gmail.com>
+Fernando <fermayo@gmail.com>
+Fero Volar <alian@alian.info>
+Ferran Rodenas <frodenas@gmail.com>
+Filipe Brandenburger <filbranden@google.com>
+Filipe Oliveira <contato@fmoliveira.com.br>
+fl0yd <fl0yd@me.com>
+Flavio Castelli <fcastelli@suse.com>
+FLGMwt <ryan.stelly@live.com>
+Florian <FWirtz@users.noreply.github.com>
+Florian Klein <florian.klein@free.fr>
+Florian Maier <marsmensch@users.noreply.github.com>
+Florian Weingarten <flo@hackvalue.de>
+Florin Asavoaie <florin.asavoaie@gmail.com>
+fonglh <fonglh@gmail.com>
+fortinux <fortinux@users.noreply.github.com>
+Francesc Campoy <campoy@google.com>
+Francis Chuang <francis.chuang@boostport.com>
+Francisco Carriedo <fcarriedo@gmail.com>
+Francisco Souza <f@souza.cc>
+Frank Groeneveld <frank@ivaldi.nl>
+Frank Herrmann <fgh@4gh.tv>
+Frank Macreery <frank@macreery.com>
+Frank Rosquin <frank.rosquin+github@gmail.com>
+Fred Lifton <fred.lifton@docker.com>
+Frederick F. Kautz IV <fkautz@redhat.com>
+Frederik Loeffert <frederik@zitrusmedia.de>
+Frederik Nordahl Jul Sabroe <frederikns@gmail.com>
+Freek Kalter <freek@kalteronline.org>
+frosforever <frosforever@users.noreply.github.com>
+fy2462 <fy2462@gmail.com>
+Félix Baylac-Jacqué <baylac.felix@gmail.com>
+Félix Cantournet <felix.cantournet@cloudwatt.com>
+Gabe Rosenhouse <gabe@missionst.com>
+Gabor Nagy <mail@aigeruth.hu>
+Gabriel Linder <linder.gabriel@gmail.com>
+Gabriel Monroy <gabriel@opdemand.com>
+Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
+Gaetan de Villele <gdevillele@gmail.com>
+Galen Sampson <galen.sampson@gmail.com>
+Gang Qiao <qiaohai8866@gmail.com>
+Gareth Rushgrove <gareth@morethanseven.net>
+Garrett Barboza <garrett@garrettbarboza.com>
+Gaurav <gaurav.gosec@gmail.com>
+gautam, prasanna <prasannagautam@gmail.com>
+Gaël PORTAY <gael.portay@savoirfairelinux.com>
+GennadySpb <lipenkov@gmail.com>
+Geoffrey Bachelet <grosfrais@gmail.com>
+George MacRorie <gmacr31@gmail.com>
+George Xie <georgexsh@gmail.com>
+Georgi Hristozov <georgi@forkbomb.nl>
+Gereon Frey <gereon.frey@dynport.de>
+German DZ <germ@ndz.com.ar>
+Gert van Valkenhoef <g.h.m.van.valkenhoef@rug.nl>
+Gerwim <gerwim@gmail.com>
+Gianluca Borello <g.borello@gmail.com>
+Gildas Cuisinier <gildas.cuisinier@gcuisinier.net>
+gissehel <public-devgit-dantus@gissehel.org>
+Giuseppe Mazzotta <gdm85@users.noreply.github.com>
+Gleb Fotengauer-Malinovskiy <glebfm@altlinux.org>
+Gleb M Borisov <borisov.gleb@gmail.com>
+Glyn Normington <gnormington@gopivotal.com>
+GoBella <caili_welcome@163.com>
+Goffert van Gool <goffert@phusion.nl>
+Gosuke Miyashita <gosukenator@gmail.com>
+Gou Rao <gourao@users.noreply.github.com>
+Govinda Fichtner <govinda.fichtner@googlemail.com>
+Grant Reaber <grant.reaber@gmail.com>
+Graydon Hoare <graydon@pobox.com>
+Greg Fausak <greg@tacodata.com>
+Greg Thornton <xdissent@me.com>
+grossws <grossws@gmail.com>
+grunny <mwgrunny@gmail.com>
+gs11 <gustav.sinder@gmail.com>
+Guilhem Lettron <guilhem+github@lettron.fr>
+Guilherme Salgado <gsalgado@gmail.com>
+Guillaume Dufour <gdufour.prestataire@voyages-sncf.com>
+Guillaume J. Charmes <guillaume.charmes@docker.com>
+guoxiuyan <guoxiuyan@huawei.com>
+Gurjeet Singh <gurjeet@singh.im>
+Guruprasad <lgp171188@gmail.com>
+gwx296173 <gaojing3@huawei.com>
+Günter Zöchbauer <guenter@gzoechbauer.com>
+Hans Kristian Flaatten <hans@starefossen.com>
+Hans Rødtang <hansrodtang@gmail.com>
+Hao Shu Wei <haosw@cn.ibm.com>
+Hao Zhang <21521210@zju.edu.cn>
+Harald Albers <github@albersweb.de>
+Harley Laue <losinggeneration@gmail.com>
+Harold Cooper <hrldcpr@gmail.com>
+Harry Zhang <harryz@hyper.sh>
+Harshal Patil <harshalp@linux.vnet.ibm.com>
+He Simei <hesimei@zju.edu.cn>
+He Xin <he_xinworld@126.com>
+heartlock <21521209@zju.edu.cn>
+Hector Castro <hectcastro@gmail.com>
+Helen Xie <chenjg@harmonycloud.cn>
+Henning Sprang <henning.sprang@gmail.com>
+Hobofan <goisser94@gmail.com>
+Hollie Teal <hollie@docker.com>
+Hong Xu <hong@topbug.net>
+Hongbin Lu <hongbin034@gmail.com>
+hsinko <21551195@zju.edu.cn>
+Hu Keping <hukeping@huawei.com>
+Hu Tao <hutao@cn.fujitsu.com>
+Huanzhong Zhang <zhanghuanzhong90@gmail.com>
+Huayi Zhang <irachex@gmail.com>
+Hugo Duncan <hugo@hugoduncan.org>
+Hugo Marisco <0x6875676f@gmail.com>
+Hunter Blanks <hunter@twilio.com>
+huqun <huqun@zju.edu.cn>
+Huu Nguyen <huu@prismskylabs.com>
+hyeongkyu.lee <hyeongkyu.lee@navercorp.com>
+hyp3rdino <markus.kortlang@lhsystems.com>
+Hyzhou <1187766782@qq.com>
+Ian Babrou <ibobrik@gmail.com>
+Ian Bishop <ianbishop@pace7.com>
+Ian Bull <irbull@gmail.com>
+Ian Calvert <ianjcalvert@gmail.com>
+Ian Campbell <ian.campbell@docker.com>
+Ian Lee <IanLee1521@gmail.com>
+Ian Main <imain@redhat.com>
+Ian Truslove <ian.truslove@gmail.com>
+Iavael <iavaelooeyt@gmail.com>
+Icaro Seara <icaro.seara@gmail.com>
+Igor Dolzhikov <bluesriverz@gmail.com>
+Iliana Weller <iweller@amazon.com>
+Ilkka Laukkanen <ilkka@ilkka.io>
+Ilya Dmitrichenko <errordeveloper@gmail.com>
+Ilya Gusev <mail@igusev.ru>
+ILYA Khlopotov <ilya.khlopotov@gmail.com>
+imre Fitos <imre.fitos+github@gmail.com>
+inglesp <peter.inglesby@gmail.com>
+Ingo Gottwald <in.gottwald@gmail.com>
+Isaac Dupree <antispam@idupree.com>
+Isabel Jimenez <contact.isabeljimenez@gmail.com>
+Isao Jonas <isao.jonas@gmail.com>
+Ivan Babrou <ibobrik@gmail.com>
+Ivan Fraixedes <ifcdev@gmail.com>
+Ivan Grcic <igrcic@gmail.com>
+J Bruni <joaohbruni@yahoo.com.br>
+J. Nunn <jbnunn@gmail.com>
+Jack Danger Canty <jackdanger@squareup.com>
+Jacob Atzen <jacob@jacobatzen.dk>
+Jacob Edelman <edelman.jd@gmail.com>
+Jacob Tomlinson <jacob@tom.linson.uk>
+Jake Champlin <jake.champlin.27@gmail.com>
+Jake Moshenko <jake@devtable.com>
+Jake Sanders <jsand@google.com>
+jakedt <jake@devtable.com>
+James Allen <jamesallen0108@gmail.com>
+James Carey <jecarey@us.ibm.com>
+James Carr <james.r.carr@gmail.com>
+James DeFelice <james.defelice@ishisystems.com>
+James Harrison Fisher <jameshfisher@gmail.com>
+James Kyburz <james.kyburz@gmail.com>
+James Kyle <james@jameskyle.org>
+James Lal <james@lightsofapollo.com>
+James Mills <prologic@shortcircuit.net.au>
+James Nugent <james@jen20.com>
+James Turnbull <james@lovedthanlost.net>
+Jamie Hannaford <jamie.hannaford@rackspace.com>
+Jamshid Afshar <jafshar@yahoo.com>
+Jan Keromnes <janx@linux.com>
+Jan Koprowski <jan.koprowski@gmail.com>
+Jan Pazdziora <jpazdziora@redhat.com>
+Jan Toebes <jan@toebes.info>
+Jan-Gerd Tenberge <janten@gmail.com>
+Jan-Jaap Driessen <janjaapdriessen@gmail.com>
+Jana Radhakrishnan <mrjana@docker.com>
+Jannick Fahlbusch <git@jf-projects.de>
+Janonymous <janonymous.codevulture@gmail.com>
+Januar Wayong <januar@gmail.com>
+Jared Biel <jared.biel@bolderthinking.com>
+Jared Hocutt <jaredh@netapp.com>
+Jaroslaw Zabiello <hipertracker@gmail.com>
+jaseg <jaseg@jaseg.net>
+Jasmine Hegman <jasmine@jhegman.com>
+Jason Divock <jdivock@gmail.com>
+Jason Giedymin <jasong@apache.org>
+Jason Green <Jason.Green@AverInformatics.Com>
+Jason Hall <imjasonh@gmail.com>
+Jason Heiss <jheiss@aput.net>
+Jason Livesay <ithkuil@gmail.com>
+Jason McVetta <jason.mcvetta@gmail.com>
+Jason Plum <jplum@devonit.com>
+Jason Shepherd <jason@jasonshepherd.net>
+Jason Smith <jasonrichardsmith@gmail.com>
+Jason Sommer <jsdirv@gmail.com>
+Jason Stangroome <jason@codeassassin.com>
+jaxgeller <jacksongeller@gmail.com>
+Jay <imjching@hotmail.com>
+Jay <teguhwpurwanto@gmail.com>
+Jay Kamat <github@jgkamat.33mail.com>
+Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
+Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
+Jean-Christophe Berthon <huygens@berthon.eu>
+Jean-Paul Calderone <exarkun@twistedmatrix.com>
+Jean-Pierre Huynh <jean-pierre.huynh@ounet.fr>
+Jean-Tiare Le Bigot <jt@yadutaf.fr>
+Jeff Anderson <jeff@docker.com>
+Jeff Johnston <jeff.johnston.mn@gmail.com>
+Jeff Lindsay <progrium@gmail.com>
+Jeff Mickey <j@codemac.net>
+Jeff Minard <jeff@creditkarma.com>
+Jeff Nickoloff <jeff.nickoloff@gmail.com>
+Jeff Silberman <jsilberm@gmail.com>
+Jeff Welch <whatthejeff@gmail.com>
+Jeffrey Bolle <jeffreybolle@gmail.com>
+Jeffrey Morgan <jmorganca@gmail.com>
+Jeffrey van Gogh <jvg@google.com>
+Jenny Gebske <jennifer@gebske.de>
+Jeremy Grosser <jeremy@synack.me>
+Jeremy Price <jprice.rhit@gmail.com>
+Jeremy Qian <vanpire110@163.com>
+Jeremy Unruh <jeremybunruh@gmail.com>
+Jeroen Jacobs <github@jeroenj.be>
+Jesse Dearing <jesse.dearing@gmail.com>
+Jesse Dubay <jesse@thefortytwo.net>
+Jessica Frazelle <jessfraz@google.com>
+Jezeniel Zapanta <jpzapanta22@gmail.com>
+jgeiger <jgeiger@gmail.com>
+Jhon Honce <jhonce@redhat.com>
+Ji.Zhilong <zhilongji@gmail.com>
+Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
+jianbosun <wonderflow.sun@gmail.com>
+Jie Luo <luo612@zju.edu.cn>
+Jilles Oldenbeuving <ojilles@gmail.com>
+Jim Alateras <jima@comware.com.au>
+Jim Minter <jminter@redhat.com>
+Jim Perrin <jperrin@centos.org>
+Jimmy Cuadra <jimmy@jimmycuadra.com>
+Jimmy Puckett <jimmy.puckett@spinen.com>
+jimmyxian <jimmyxian2004@yahoo.com.cn>
+Jinsoo Park <cellpjs@gmail.com>
+Jiri Popelka <jpopelka@redhat.com>
+Jiuyue Ma <majiuyue@huawei.com>
+Jiří Župka <jzupka@redhat.com>
+jjy <jiangjinyang@outlook.com>
+jmzwcn <jmzwcn@gmail.com>
+Joao Fernandes <joao.fernandes@docker.com>
+Joe Beda <joe.github@bedafamily.com>
+Joe Doliner <jdoliner@pachyderm.io>
+Joe Ferguson <joe@infosiftr.com>
+Joe Gordon <joe.gordon0@gmail.com>
+Joe Shaw <joe@joeshaw.org>
+Joe Van Dyk <joe@tanga.com>
+Joel Friedly <joelfriedly@gmail.com>
+Joel Handwell <joelhandwell@gmail.com>
+Joel Hansson <joel.hansson@ecraft.com>
+Joel Wurtz <jwurtz@jolicode.com>
+Joey Geiger <jgeiger@users.noreply.github.com>
+Joey Gibson <joey@joeygibson.com>
+Joffrey F <joffrey@docker.com>
+Johan Euphrosine <proppy@google.com>
+Johan Rydberg <johan.rydberg@gmail.com>
+Johanan Lieberman <johanan.lieberman@gmail.com>
+Johannes 'fish' Ziemke <github@freigeist.org>
+John Costa <john.costa@gmail.com>
+John Feminella <jxf@jxf.me>
+John Gardiner Myers <jgmyers@proofpoint.com>
+John Gossman <johngos@microsoft.com>
+John Howard (VM) <John.Howard@microsoft.com>
+John Mulhausen <john@docker.com>
+John OBrien III <jobrieniii@yahoo.com>
+John Starks <jostarks@microsoft.com>
+John Stephens <johnstep@docker.com>
+John Tims <john.k.tims@gmail.com>
+John Warwick <jwarwick@gmail.com>
+John Willis <john.willis@docker.com>
+johnharris85 <john@johnharris.io>
+Jon Wedaman <jweede@gmail.com>
+Jonas Pfenniger <jonas@pfenniger.name>
+Jonathan A. Sternberg <jonathansternberg@gmail.com>
+Jonathan Boulle <jonathanboulle@gmail.com>
+Jonathan Camp <jonathan@irondojo.com>
+Jonathan Dowland <jon+github@alcopop.org>
+Jonathan Lebon <jlebon@redhat.com>
+Jonathan Lomas <jonathan@floatinglomas.ca>
+Jonathan McCrohan <jmccrohan@gmail.com>
+Jonathan Mueller <j.mueller@apoveda.ch>
+Jonathan Pares <jonathanpa@users.noreply.github.com>
+Jonathan Rudenberg <jonathan@titanous.com>
+Jonathan Stoppani <jonathan.stoppani@divio.com>
+Jonh Wendell <jonh.wendell@redhat.com>
+Joost Cassee <joost@cassee.net>
+Jordan <jjn2009@users.noreply.github.com>
+Jordan Arentsen <blissdev@gmail.com>
+Jordan Sissel <jls@semicomplete.com>
+Jorge Marin <chipironcin@users.noreply.github.com>
+Jose Diaz-Gonzalez <jose@seatgeek.com>
+Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
+Joseph Hager <ajhager@gmail.com>
+Joseph Kern <jkern@semafour.net>
+Josh <jokajak@gmail.com>
+Josh Bodah <jb3689@yahoo.com>
+Josh Chorlton <jchorlton@gmail.com>
+Josh Eveleth <joshe@opendns.com>
+Josh Hawn <josh.hawn@docker.com>
+Josh Horwitz <horwitz@addthis.com>
+Josh Poimboeuf <jpoimboe@redhat.com>
+Josh Wilson <josh.wilson@fivestars.com>
+Josiah Kiehl <jkiehl@riotgames.com>
+José Tomás Albornoz <jojo@eljojo.net>
+JP <jpellerin@leapfrogonline.com>
+jrabbit <jackjrabbit@gmail.com>
+jroenf <jeroenfranse@gmail.com>
+Julian Taylor <jtaylor.debian@googlemail.com>
+Julien Barbier <write0@gmail.com>
+Julien Bisconti <veggiemonk@users.noreply.github.com>
+Julien Bordellier <julienbordellier@gmail.com>
+Julien Dubois <julien.dubois@gmail.com>
+Julien Pervillé <julien.perville@perfect-memory.com>
+Julio Montes <imc.coder@gmail.com>
+Jun-Ru Chang <jrjang@gmail.com>
+Jussi Nummelin <jussi.nummelin@gmail.com>
+Justas Brazauskas <brazauskasjustas@gmail.com>
+Justin Cormack <justin.cormack@docker.com>
+Justin Force <justin.force@gmail.com>
+Justin Plock <jplock@users.noreply.github.com>
+Justin Simonelis <justin.p.simonelis@gmail.com>
+Justin Terry <juterry@microsoft.com>
+Justyn Temme <justyntemme@gmail.com>
+Jyrki Puttonen <jyrkiput@gmail.com>
+Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
+Jörg Thalheim <joerg@higgsboson.tk>
+Kai Blin <kai@samba.org>
+Kai Qiang Wu(Kennan) <wkq5325@gmail.com>
+Kamil Domański <kamil@domanski.co>
+kamjar gerami <kami.gerami@gmail.com>
+Kanstantsin Shautsou <kanstantsin.sha@gmail.com>
+Kara Alexandra <kalexandra@us.ibm.com>
+Karan Lyons <karan@karanlyons.com>
+Kareem Khazem <karkhaz@karkhaz.com>
+kargakis <kargakis@users.noreply.github.com>
+Karl Grzeszczak <karlgrz@gmail.com>
+Karol Duleba <mr.fuxi@gmail.com>
+Katie McLaughlin <katie@glasnt.com>
+Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
+Katrina Owen <katrina.owen@gmail.com>
+Kawsar Saiyeed <kawsar.saiyeed@projiris.com>
+Kay Yan <kay.yan@daocloud.io>
+kayrus <kay.diam@gmail.com>
+Ke Li <kel@splunk.com>
+Ke Xu <leonhartx.k@gmail.com>
+Kei Ohmura <ohmura.kei@gmail.com>
+Keith Hudgins <greenman@greenman.org>
+Keli Hu <dev@keli.hu>
+Ken Cochrane <kencochrane@gmail.com>
+Ken Herner <kherner@progress.com>
+Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
+Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
+Kenjiro Nakayama <nakayamakenjiro@gmail.com>
+Kent Johnson <kentoj@gmail.com>
+Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
+Kevin Burke <kev@inburke.com>
+Kevin Clark <kevin.clark@gmail.com>
+Kevin J. Lynagh <kevin@keminglabs.com>
+Kevin Jing Qiu <kevin@idempotent.ca>
+Kevin Kern <kaiwentan@harmonycloud.cn>
+Kevin Menard <kevin@nirvdrum.com>
+Kevin P. Kucharczyk <kevinkucharczyk@gmail.com>
+Kevin Richardson <kevin@kevinrichardson.co>
+Kevin Shi <kshi@andrew.cmu.edu>
+Kevin Wallace <kevin@pentabarf.net>
+Kevin Yap <me@kevinyap.ca>
+kevinmeredith <kevin.m.meredith@gmail.com>
+Keyvan Fatehi <keyvanfatehi@gmail.com>
+kies <lleelm@gmail.com>
+Kim BKC Carlbacker <kim.carlbacker@gmail.com>
+Kim Eik <kim@heldig.org>
+Kimbro Staken <kstaken@kstaken.com>
+Kir Kolyshkin <kir@openvz.org>
+Kiran Gangadharan <kiran.daredevil@gmail.com>
+Kirill Kolyshkin <kolyshkin@users.noreply.github.com>
+Kirill SIbirev <l0kix2@gmail.com>
+knappe <tyler.knappe@gmail.com>
+Kohei Tsuruta <coheyxyz@gmail.com>
+Koichi Shiraishi <k@zchee.io>
+Konrad Kleine <konrad.wilhelm.kleine@gmail.com>
+Konstantin L <sw.double@gmail.com>
+Konstantin Pelykh <kpelykh@zettaset.com>
+Krasi Georgiev <krasi@vip-consult.solutions>
+Krasimir Georgiev <support@vip-consult.co.uk>
+Kris-Mikael Krister <krismikael@protonmail.com>
+Kristian Haugene <kristian.haugene@capgemini.com>
+Kristina Zabunova <triara.xiii@gmail.com>
+krrg <krrgithub@gmail.com>
+Kun Zhang <zkazure@gmail.com>
+Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
+Kyle Conroy <kyle.j.conroy@gmail.com>
+Kyle Linden <linden.kyle@gmail.com>
+kyu <leehk1227@gmail.com>
+Lachlan Coote <lcoote@vmware.com>
+Lai Jiangshan <jiangshanlai@gmail.com>
+Lajos Papp <lajos.papp@sequenceiq.com>
+Lakshan Perera <lakshan@laktek.com>
+Lalatendu Mohanty <lmohanty@redhat.com>
+Lance Chen <cyen0312@gmail.com>
+Lance Kinley <lkinley@loyaltymethods.com>
+Lars Butler <Lars.Butler@gmail.com>
+Lars Kellogg-Stedman <lars@redhat.com>
+Lars R. Damerow <lars@pixar.com>
+Lars-Magnus Skog <ralphtheninja@riseup.net>
+Laszlo Meszaros <lacienator@gmail.com>
+Laura Frank <ljfrank@gmail.com>
+Laurent Erignoux <lerignoux@gmail.com>
+Laurie Voss <github@seldo.com>
+Leandro Siqueira <leandro.siqueira@gmail.com>
+Lee Chao <932819864@qq.com>
+Lee, Meng-Han <sunrisedm4@gmail.com>
+leeplay <hyeongkyu.lee@navercorp.com>
+Lei Jitang <leijitang@huawei.com>
+Len Weincier <len@cloudafrica.net>
+Lennie <github@consolejunkie.net>
+Leo Gallucci <elgalu3@gmail.com>
+Leszek Kowalski <github@leszekkowalski.pl>
+Levi Blackstone <levi.blackstone@rackspace.com>
+Levi Gross <levi@levigross.com>
+Lewis Daly <lewisdaly@me.com>
+Lewis Marshall <lewis@lmars.net>
+Lewis Peckover <lew+github@lew.io>
+Liam Macgillavry <liam@kumina.nl>
+Liana Lo <liana.lixia@gmail.com>
+Liang Mingqiang <mqliang.zju@gmail.com>
+Liang-Chi Hsieh <viirya@gmail.com>
+liaoqingwei <liaoqingwei@huawei.com>
+Lily Guo <lily.guo@docker.com>
+limsy <seongyeol37@gmail.com>
+Lin Lu <doraalin@163.com>
+LingFaKe <lingfake@huawei.com>
+Linus Heckemann <lheckemann@twig-world.com>
+Liran Tal <liran.tal@gmail.com>
+Liron Levin <liron@twistlock.com>
+Liu Bo <bo.li.liu@oracle.com>
+Liu Hua <sdu.liu@huawei.com>
+liwenqi <vikilwq@zju.edu.cn>
+lixiaobing10051267 <li.xiaobing1@zte.com.cn>
+Liz Zhang <lizzha@microsoft.com>
+LIZAO LI <lzlarryli@gmail.com>
+Lizzie Dixon <_@lizzie.io>
+Lloyd Dewolf <foolswisdom@gmail.com>
+Lokesh Mandvekar <lsm5@fedoraproject.org>
+longliqiang88 <394564827@qq.com>
+Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
+Lorenzo Fontana <fontanalorenzo@me.com>
+Louis Opter <kalessin@kalessin.fr>
+Luca Favatella <lucafavatella@users.noreply.github.com>
+Luca Marturana <lucamarturana@gmail.com>
+Luca Orlandi <luca.orlandi@gmail.com>
+Luca-Bogdan Grigorescu <Luca-Bogdan Grigorescu>
+Lucas Chan <lucas-github@lucaschan.com>
+Lucas Chi <lucas@teacherspayteachers.com>
+Luciano Mores <leslau@gmail.com>
+Luis Martínez de Bartolomé Izquierdo <lmartinez@biicode.com>
+Luiz Svoboda <luizek@gmail.com>
+Lukas Waslowski <cr7pt0gr4ph7@gmail.com>
+lukaspustina <lukas.pustina@centerdevice.com>
+Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
+lukemarsden <luke@digital-crocus.com>
+Lyn <energylyn@zju.edu.cn>
+Lynda O'Leary <lyndaoleary29@gmail.com>
+Lénaïc Huard <lhuard@amadeus.com>
+Ma Müller <mueller-ma@users.noreply.github.com>
+Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
+Mabin <bin.ma@huawei.com>
+Madhav Puri <madhav.puri@gmail.com>
+Madhu Venugopal <madhu@socketplane.io>
+Mageee <21521230.zju.edu.cn>
+Mahesh Tiyyagura <tmahesh@gmail.com>
+malnick <malnick@gmail..com>
+Malte Janduda <mail@janduda.net>
+manchoz <giampaolo@trampolineup.com>
+Manfred Touron <m@42.am>
+Manfred Zabarauskas <manfredas@zabarauskas.com>
+Mansi Nahar <mmn4185@rit.edu>
+mansinahar <mansinahar@users.noreply.github.com>
+Manuel Meurer <manuel@krautcomputing.com>
+Manuel Woelker <github@manuel.woelker.org>
+mapk0y <mapk0y@gmail.com>
+Marc Abramowitz <marc@marc-abramowitz.com>
+Marc Kuo <kuomarc2@gmail.com>
+Marc Tamsky <mtamsky@gmail.com>
+Marcelo Salazar <chelosalazar@gmail.com>
+Marco Hennings <marco.hennings@freiheit.com>
+Marcus Cobden <mcobden@cisco.com>
+Marcus Farkas <toothlessgear@finitebox.com>
+Marcus Linke <marcus.linke@gmx.de>
+Marcus Ramberg <marcus@nordaaker.com>
+Marek Goldmann <marek.goldmann@gmail.com>
+Marian Marinov <mm@yuhu.biz>
+Marianna Tessel <mtesselh@gmail.com>
+Mario Loriedo <mario.loriedo@gmail.com>
+Marius Gundersen <me@mariusgundersen.net>
+Marius Sturm <marius@graylog.com>
+Marius Voila <marius.voila@gmail.com>
+Mark Allen <mrallen1@yahoo.com>
+Mark McGranaghan <mmcgrana@gmail.com>
+Mark McKinstry <mmckinst@umich.edu>
+Mark Milstein <mark@epiloque.com>
+Mark Parker <godefroi@users.noreply.github.com>
+Mark West <markewest@gmail.com>
+Marko Mikulicic <mmikulicic@gmail.com>
+Marko Tibold <marko@tibold.nl>
+Markus Fix <lispmeister@gmail.com>
+Martijn Dwars <ikben@martijndwars.nl>
+Martijn van Oosterhout <kleptog@svana.org>
+Martin Honermeyer <maze@strahlungsfrei.de>
+Martin Kelly <martin@surround.io>
+Martin Mosegaard Amdisen <martin.amdisen@praqma.com>
+Martin Redmond <redmond.martin@gmail.com>
+Mary Anthony <mary.anthony@docker.com>
+Masahito Zembutsu <zembutsu@users.noreply.github.com>
+Masayuki Morita <minamijoyo@gmail.com>
+Mason Malone <mason.malone@gmail.com>
+Mateusz Sulima <sulima.mateusz@gmail.com>
+Mathias Monnerville <mathias@monnerville.com>
+Mathieu Le Marec - Pasquet <kiorky@cryptelium.net>
+Mathieu Parent <math.parent@gmail.com>
+Matt Apperson <me@mattapperson.com>
+Matt Bachmann <bachmann.matt@gmail.com>
+Matt Bentley <matt.bentley@docker.com>
+Matt Haggard <haggardii@gmail.com>
+Matt Hoyle <matt@deployable.co>
+Matt McCormick <matt.mccormick@kitware.com>
+Matt Moore <mattmoor@google.com>
+Matt Richardson <matt@redgumtech.com.au>
+Matt Robenolt <matt@ydekproductions.com>
+Matthew Heon <mheon@redhat.com>
+Matthew Lapworth <matthewl@bit-shift.net>
+Matthew Mayer <matthewkmayer@gmail.com>
+Matthew Mueller <mattmuelle@gmail.com>
+Matthew Riley <mattdr@google.com>
+Matthias Klumpp <matthias@tenstral.net>
+Matthias Kühnle <git.nivoc@neverbox.com>
+Matthias Rampke <mr@soundcloud.com>
+Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
+mattymo <raytrac3r@gmail.com>
+mattyw <mattyw@me.com>
+Mauricio Garavaglia <mauricio@medallia.com>
+mauriyouth <mauriyouth@gmail.com>
+Max Shytikov <mshytikov@gmail.com>
+Maxim Fedchyshyn <sevmax@gmail.com>
+Maxim Ivanov <ivanov.maxim@gmail.com>
+Maxim Kulkin <mkulkin@mirantis.com>
+Maxim Treskin <zerthurd@gmail.com>
+Maxime Petazzoni <max@signalfuse.com>
+Meaglith Ma <genedna@gmail.com>
+meejah <meejah@meejah.ca>
+Megan Kostick <mkostick@us.ibm.com>
+Mehul Kar <mehul.kar@gmail.com>
+Mei ChunTao <mei.chuntao@zte.com.cn>
+Mengdi Gao <usrgdd@gmail.com>
+Mert Yazıcıoğlu <merty@users.noreply.github.com>
+mgniu <mgniu@dataman-inc.com>
+Micah Zoltu <micah@newrelic.com>
+Michael A. Smith <michael@smith-li.com>
+Michael Bridgen <mikeb@squaremobius.net>
+Michael Brown <michael@netdirect.ca>
+Michael Chiang <mchiang@docker.com>
+Michael Crosby <michael@docker.com>
+Michael Currie <mcurrie@bruceforceresearch.com>
+Michael Friis <friism@gmail.com>
+Michael Gorsuch <gorsuch@github.com>
+Michael Grauer <michael.grauer@kitware.com>
+Michael Holzheu <holzheu@linux.vnet.ibm.com>
+Michael Hudson-Doyle <michael.hudson@canonical.com>
+Michael Huettermann <michael@huettermann.net>
+Michael Irwin <mikesir87@gmail.com>
+Michael Käufl <docker@c.michael-kaeufl.de>
+Michael Neale <michael.neale@gmail.com>
+Michael Prokop <github@michael-prokop.at>
+Michael Scharf <github@scharf.gr>
+Michael Stapelberg <michael+gh@stapelberg.de>
+Michael Steinert <mike.steinert@gmail.com>
+Michael Thies <michaelthies78@gmail.com>
+Michael West <mwest@mdsol.com>
+Michal Fojtik <mfojtik@redhat.com>
+Michal Gebauer <mishak@mishak.net>
+Michal Jemala <michal.jemala@gmail.com>
+Michal Minář <miminar@redhat.com>
+Michal Wieczorek <wieczorek-michal@wp.pl>
+Michaël Pailloncy <mpapo.dev@gmail.com>
+Michał Czeraszkiewicz <czerasz@gmail.com>
+Michiel@unhosted <michiel@unhosted.org>
+Mickaël FORTUNATO <morsi.morsicus@gmail.com>
+Miguel Angel Fernández <elmendalerenda@gmail.com>
+Miguel Morales <mimoralea@gmail.com>
+Mihai Borobocea <MihaiBorob@gmail.com>
+Mihuleacc Sergiu <mihuleac.sergiu@gmail.com>
+Mike Brown <brownwm@us.ibm.com>
+Mike Chelen <michael.chelen@gmail.com>
+Mike Danese <mikedanese@google.com>
+Mike Dillon <mike@embody.org>
+Mike Dougherty <mike.dougherty@docker.com>
+Mike Gaffney <mike@uberu.com>
+Mike Goelzer <mike.goelzer@docker.com>
+Mike Leone <mleone896@gmail.com>
+Mike MacCana <mike.maccana@gmail.com>
+Mike Naberezny <mike@naberezny.com>
+Mike Snitzer <snitzer@redhat.com>
+mikelinjie <294893458@qq.com>
+Mikhail Sobolev <mss@mawhrin.net>
+Milind Chawre <milindchawre@gmail.com>
+Miloslav Trmač <mitr@redhat.com>
+mingqing <limingqing@cyou-inc.com>
+Mingzhen Feng <fmzhen@zju.edu.cn>
+Misty Stanley-Jones <misty@docker.com>
+Mitch Capper <mitch.capper@gmail.com>
+mlarcher <github@ringabell.org>
+Mohammad Banikazemi <mb@us.ibm.com>
+Mohammed Aaqib Ansari <maaquib@gmail.com>
+Mohit Soni <mosoni@ebay.com>
+Morgan Bauer <mbauer@us.ibm.com>
+Morgante Pell <morgante.pell@morgante.net>
+Morgy93 <thomas@ulfertsprygoda.de>
+Morten Siebuhr <sbhr@sbhr.dk>
+Morton Fox <github@qslw.com>
+Moysés Borges <moysesb@gmail.com>
+mqliang <mqliang.zju@gmail.com>
+Mrunal Patel <mrunalp@gmail.com>
+msabansal <sabansal@microsoft.com>
+mschurenko <matt.schurenko@gmail.com>
+Muayyad Alsadi <alsadi@gmail.com>
+muge <stevezhang2014@gmail.com>
+Mustafa Akın <mustafa91@gmail.com>
+Muthukumar R <muthur@gmail.com>
+Máximo Cuadros <mcuadros@gmail.com>
+Médi-Rémi Hashim <medimatrix@users.noreply.github.com>
+Nahum Shalman <nshalman@omniti.com>
+Nakul Pathak <nakulpathak3@hotmail.com>
+Nalin Dahyabhai <nalin@redhat.com>
+Nan Monnand Deng <monnand@gmail.com>
+Naoki Orii <norii@cs.cmu.edu>
+Natalie Parker <nparker@omnifone.com>
+Natanael Copa <natanael.copa@docker.com>
+Nate Brennand <nate.brennand@clever.com>
+Nate Eagleson <nate@nateeag.com>
+Nate Jones <nate@endot.org>
+Nathan Hsieh <hsieh.nathan@gmail.com>
+Nathan Kleyn <nathan@nathankleyn.com>
+Nathan LeClaire <nathan.leclaire@docker.com>
+Nathan McCauley <nathan.mccauley@docker.com>
+Nathan Williams <nathan@teamtreehouse.com>
+Neal McBurnett <neal@mcburnett.org>
+Neil Peterson <neilpeterson@outlook.com>
+Nelson Chen <crazysim@gmail.com>
+Neyazul Haque <nuhaque@gmail.com>
+Nghia Tran <nghia@google.com>
+Niall O'Higgins <niallo@unworkable.org>
+Nicholas E. Rabenau <nerab@gmx.at>
+nick <nicholasjamesrusso@gmail.com>
+Nick DeCoursin <n.decoursin@foodpanda.com>
+Nick Irvine <nfirvine@nfirvine.com>
+Nick Parker <nikaios@gmail.com>
+Nick Payne <nick@kurai.co.uk>
+Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
+Nick Stinemates <nick@stinemates.org>
+NickrenREN <yuquan.ren@easystack.cn>
+Nicola Kabar <nicolaka@gmail.com>
+Nicolas Borboën <ponsfrilus@users.noreply.github.com>
+Nicolas De loof <nicolas.deloof@gmail.com>
+Nicolas Dudebout <nicolas.dudebout@gatech.edu>
+Nicolas Goy <kuon@goyman.com>
+Nicolas Kaiser <nikai@nikai.net>
+Nicolás Hock Isaza <nhocki@gmail.com>
+Nigel Poulton <nigelpoulton@hotmail.com>
+NikolaMandic <mn080202@gmail.com>
+nikolas <nnyby@columbia.edu>
+Nikolay Milovanov <nmil@itransformers.net>
+Nirmal Mehta <nirmalkmehta@gmail.com>
+Nishant Totla <nishanttotla@gmail.com>
+NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
+Noah Treuhaft <noah.treuhaft@docker.com>
+noducks <onemannoducks@gmail.com>
+Nolan Darilek <nolan@thewordnerd.info>
+nponeccop <andy.melnikov@gmail.com>
+Nuutti Kotivuori <naked@iki.fi>
+nzwsch <hi@nzwsch.com>
+O.S. Tezer <ostezer@gmail.com>
+objectified <objectified@gmail.com>
+OddBloke <daniel@daniel-watkins.co.uk>
+odk- <github@odkurzacz.org>
+Oguz Bilgic <fisyonet@gmail.com>
+Oh Jinkyun <tintypemolly@gmail.com>
+Ohad Schneider <ohadschn@users.noreply.github.com>
+ohmystack <jun.jiang02@ele.me>
+Ole Reifschneider <mail@ole-reifschneider.de>
+Oliver Neal <ItsVeryWindy@users.noreply.github.com>
+Olivier Gambier <dmp42@users.noreply.github.com>
+Olle Jonsson <olle.jonsson@gmail.com>
+Oriol Francès <oriolfa@gmail.com>
+orkaa <orkica@gmail.com>
+Oskar Niburski <oskarniburski@gmail.com>
+Otto Kekäläinen <otto@seravo.fi>
+Ovidio Mallo <ovidio.mallo@gmail.com>
+oyld <oyld0210@163.com>
+ozlerhakan <hakan.ozler@kodcu.com>
+paetling <paetling@gmail.com>
+pandrew <letters@paulnotcom.se>
+panticz <mail@konczalski.de>
+Paolo G. Giarrusso <p.giarrusso@gmail.com>
+Pascal Borreli <pascal@borreli.com>
+Pascal Hartig <phartig@rdrei.net>
+Patrick Böänziger <patrick.baenziger@bsi-software.com>
+Patrick Devine <patrick.devine@docker.com>
+Patrick Hemmer <patrick.hemmer@gmail.com>
+Patrick Stapleton <github@gdi2290.com>
+pattichen <craftsbear@gmail.com>
+Paul <paul9869@gmail.com>
+paul <paul@inkling.com>
+Paul Annesley <paul@annesley.cc>
+Paul Bellamy <paul.a.bellamy@gmail.com>
+Paul Bowsher <pbowsher@globalpersonals.co.uk>
+Paul Furtado <pfurtado@hubspot.com>
+Paul Hammond <paul@paulhammond.org>
+Paul Jimenez <pj@place.org>
+Paul Kehrer <paul.l.kehrer@gmail.com>
+Paul Lietar <paul@lietar.net>
+Paul Liljenberg <liljenberg.paul@gmail.com>
+Paul Morie <pmorie@gmail.com>
+Paul Nasrat <pnasrat@gmail.com>
+Paul Weaver <pauweave@cisco.com>
+Paulo Ribeiro <paigr.io@gmail.com>
+Pavel Lobashov <ShockwaveNN@gmail.com>
+Pavel Pospisil <pospispa@gmail.com>
+Pavel Sutyrin <pavel.sutyrin@gmail.com>
+Pavel Tikhomirov <ptikhomirov@parallels.com>
+Pavlos Ratis <dastergon@gentoo.org>
+Pavol Vargovcik <pallly.vargovcik@gmail.com>
+Peeyush Gupta <gpeeyush@linux.vnet.ibm.com>
+Peggy Li <peggyli.224@gmail.com>
+Pei Su <sillyousu@gmail.com>
+Penghan Wang <ph.wang@daocloud.io>
+perhapszzy@sina.com <perhapszzy@sina.com>
+pestophagous <pestophagous@users.noreply.github.com>
+Peter Bourgon <peter@bourgon.org>
+Peter Braden <peterbraden@peterbraden.co.uk>
+Peter Choi <reikani@Peters-MacBook-Pro.local>
+Peter Dave Hello <PeterDaveHello@users.noreply.github.com>
+Peter Edge <peter.edge@gmail.com>
+Peter Ericson <pdericson@gmail.com>
+Peter Esbensen <pkesbensen@gmail.com>
+Peter Malmgren <ptmalmgren@gmail.com>
+Peter Salvatore <peter@psftw.com>
+Peter Volpe <petervo@redhat.com>
+Peter Waller <p@pwaller.net>
+Petr Švihlík <svihlik.petr@gmail.com>
+Phil <underscorephil@gmail.com>
+Phil Estes <estesp@linux.vnet.ibm.com>
+Phil Spitler <pspitler@gmail.com>
+Philip Monroe <phil@philmonroe.com>
+Philipp Wahala <philipp.wahala@gmail.com>
+Philipp Weissensteiner <mail@philippweissensteiner.com>
+Phillip Alexander <git@phillipalexander.io>
+pidster <pid@pidster.com>
+Piergiuliano Bossi <pgbossi@gmail.com>
+Pierre <py@poujade.org>
+Pierre Carrier <pierre@meteor.com>
+Pierre Dal-Pra <dalpra.pierre@gmail.com>
+Pierre Wacrenier <pierre.wacrenier@gmail.com>
+Pierre-Alain RIVIERE <pariviere@ippon.fr>
+Piotr Bogdan <ppbogdan@gmail.com>
+pixelistik <pixelistik@users.noreply.github.com>
+Porjo <porjo38@yahoo.com.au>
+Poul Kjeldager Sørensen <pks@s-innovations.net>
+Pradeep Chhetri <pradeep@indix.com>
+Prasanna Gautam <prasannagautam@gmail.com>
+Prayag Verma <prayag.verma@gmail.com>
+Przemek Hejman <przemyslaw.hejman@gmail.com>
+pysqz <randomq@126.com>
+qhuang <h.huangqiang@huawei.com>
+Qiang Huang <h.huangqiang@huawei.com>
+Qinglan Peng <qinglanpeng@zju.edu.cn>
+qudongfang <qudongfang@gmail.com>
+Quentin Brossard <qbrossard@gmail.com>
+Quentin Perez <qperez@ocs.online.net>
+Quentin Tayssier <qtayssier@gmail.com>
+r0n22 <cameron.regan@gmail.com>
+Rafal Jeczalik <rjeczalik@gmail.com>
+Rafe Colton <rafael.colton@gmail.com>
+Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
+Raghuram Devarakonda <draghuram@gmail.com>
+Rajat Pandit <rp@rajatpandit.com>
+Rajdeep Dua <dua_rajdeep@yahoo.com>
+Ralf Sippl <ralf.sippl@gmail.com>
+Ralle <spam@rasmusa.net>
+Ralph Bean <rbean@redhat.com>
+Ramkumar Ramachandra <artagnon@gmail.com>
+Ramon Brooker <rbrooker@aetherealmind.com>
+Ramon van Alteren <ramon@vanalteren.nl>
+Ray Tsang <saturnism@users.noreply.github.com>
+ReadmeCritic <frankensteinbot@gmail.com>
+Recursive Madman <recursive.madman@gmx.de>
+Reficul <xuzhenglun@gmail.com>
+Regan McCooey <rmccooey27@aol.com>
+Remi Rampin <remirampin@gmail.com>
+Renato Riccieri Santos Zannon <renato.riccieri@gmail.com>
+resouer <resouer@163.com>
+rgstephens <greg@udon.org>
+Rhys Hiltner <rhys@twitch.tv>
+Rich Moyse <rich@moyse.us>
+Rich Seymour <rseymour@gmail.com>
+Richard <richard.scothern@gmail.com>
+Richard Burnison <rburnison@ebay.com>
+Richard Harvey <richard@squarecows.com>
+Richard Mathie <richard.mathie@amey.co.uk>
+Richard Metzler <richard@paadee.com>
+Richard Scothern <richard.scothern@gmail.com>
+Richo Healey <richo@psych0tik.net>
+Rick Bradley <rick@users.noreply.github.com>
+Rick van de Loo <rickvandeloo@gmail.com>
+Rick Wieman <git@rickw.nl>
+Rik Nijessen <rik@keefo.nl>
+Riku Voipio <riku.voipio@linaro.org>
+Riley Guerin <rileytg.dev@gmail.com>
+Ritesh H Shukla <sritesh@vmware.com>
+Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
+Rob Vesse <rvesse@dotnetrdf.org>
+Robert Bachmann <rb@robertbachmann.at>
+Robert Bittle <guywithnose@gmail.com>
+Robert Obryk <robryk@gmail.com>
+Robert Stern <lexandro2000@gmail.com>
+Robert Terhaar <robbyt@users.noreply.github.com>
+Robert Wallis <smilingrob@gmail.com>
+Roberto G. Hashioka <roberto.hashioka@docker.com>
+Roberto Muñoz Fernández <robertomf@gmail.com>
+Robin Naundorf <r.naundorf@fh-muenster.de>
+Robin Schneider <ypid@riseup.net>
+Robin Speekenbrink <robin@kingsquare.nl>
+robpc <rpcann@gmail.com>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Rodrigo Vaz <rodrigo.vaz@gmail.com>
+Roel Van Nyen <roel.vannyen@gmail.com>
+Roger Peppe <rogpeppe@gmail.com>
+Rohit Jnagal <jnagal@google.com>
+Rohit Kadam <rohit.d.kadam@gmail.com>
+Rojin George <rojingeorge@huawei.com>
+Roland Huß <roland@jolokia.org>
+Roland Kammerer <roland.kammerer@linbit.com>
+Roland Moriz <rmoriz@users.noreply.github.com>
+Roma Sokolov <sokolov.r.v@gmail.com>
+Roman Strashkin <roman.strashkin@gmail.com>
+Ron Smits <ron.smits@gmail.com>
+Ron Williams <ron.a.williams@gmail.com>
+root <docker-dummy@example.com>
+root <root@localhost>
+root <root@lxdebmas.marist.edu>
+root <root@ubuntu-14.04-amd64-vbox>
+root <root@webm215.cluster016.ha.ovh.net>
+Rory Hunter <roryhunter2@gmail.com>
+Rory McCune <raesene@gmail.com>
+Ross Boucher <rboucher@gmail.com>
+Rovanion Luckey <rovanion.luckey@gmail.com>
+Rozhnov Alexandr <nox73@ya.ru>
+rsmoorthy <rsmoorthy@users.noreply.github.com>
+Rudolph Gottesheim <r.gottesheim@loot.at>
+Rui Lopes <rgl@ruilopes.com>
+Runshen Zhu <runshen.zhu@gmail.com>
+Ryan Abrams <rdabrams@gmail.com>
+Ryan Anderson <anderson.ryanc@gmail.com>
+Ryan Aslett <github@mixologic.com>
+Ryan Belgrave <rmb1993@gmail.com>
+Ryan Detzel <ryan.detzel@gmail.com>
+Ryan Fowler <rwfowler@gmail.com>
+Ryan McLaughlin <rmclaughlin@insidesales.com>
+Ryan O'Donnell <odonnellryanc@gmail.com>
+Ryan Seto <ryanseto@yak.net>
+Ryan Thomas <rthomas@atlassian.com>
+Ryan Trauntvein <rtrauntvein@novacoast.com>
+Ryan Wallner <ryan.wallner@clusterhq.com>
+Ryan Zhang <ryan.zhang@docker.com>
+RyanDeng <sheldon.d1018@gmail.com>
+Rémy Greinhofer <remy.greinhofer@livelovely.com>
+s. rannou <mxs@sbrk.org>
+s00318865 <sunyuan3@huawei.com>
+Sabin Basyal <sabin.basyal@gmail.com>
+Sachin Joshi <sachin_jayant_joshi@hotmail.com>
+Sagar Hani <sagarhani33@gmail.com>
+Sainath Grandhi <sainath.grandhi@intel.com>
+sakeven <jc5930@sina.cn>
+Sally O'Malley <somalley@redhat.com>
+Sam Abed <sam.abed@gmail.com>
+Sam Alba <sam.alba@gmail.com>
+Sam Bailey <cyprix@cyprix.com.au>
+Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
+Sam Neirinck <sam@samneirinck.com>
+Sam Reis <sreis@atlassian.com>
+Sam Rijs <srijs@airpost.net>
+Sambuddha Basu <sambuddhabasu1@gmail.com>
+Sami Wagiaalla <swagiaal@redhat.com>
+Samuel Andaya <samuel@andaya.net>
+Samuel Dion-Girardeau <samuel.diongirardeau@gmail.com>
+Samuel Karp <skarp@amazon.com>
+Samuel PHAN <samuel-phan@users.noreply.github.com>
+Sandeep Bansal <msabansal@microsoft.com>
+Sankar சங்கர் <sankar.curiosity@gmail.com>
+Sanket Saurav <sanketsaurav@gmail.com>
+Santhosh Manohar <santhosh@docker.com>
+sapphiredev <se.imas.kr@gmail.com>
+Satnam Singh <satnam@raintown.org>
+satoru <satorulogic@gmail.com>
+Satoshi Amemiya <satoshi_amemiya@voyagegroup.com>
+Satoshi Tagomori <tagomoris@gmail.com>
+scaleoutsean <scaleoutsean@users.noreply.github.com>
+Scott Bessler <scottbessler@gmail.com>
+Scott Collier <emailscottcollier@gmail.com>
+Scott Johnston <scott@docker.com>
+Scott Stamp <scottstamp851@gmail.com>
+Scott Walls <sawalls@umich.edu>
+sdreyesg <sdreyesg@gmail.com>
+Sean Christopherson <sean.j.christopherson@intel.com>
+Sean Cronin <seancron@gmail.com>
+Sean McIntyre <s.mcintyre@xverba.ca>
+Sean OMeara <sean@chef.io>
+Sean P. Kane <skane@newrelic.com>
+Sean Rodman <srodman7689@gmail.com>
+Sebastiaan van Steenis <mail@superseb.nl>
+Sebastiaan van Stijn <github@gone.nl>
+Senthil Kumar Selvaraj <senthil.thecoder@gmail.com>
+Senthil Kumaran <senthil@uthcode.com>
+SeongJae Park <sj38.park@gmail.com>
+Seongyeol Lim <seongyeol37@gmail.com>
+Serge Hallyn <serge.hallyn@ubuntu.com>
+Sergey Alekseev <sergey.alekseev.minsk@gmail.com>
+Sergey Evstifeev <sergey.evstifeev@gmail.com>
+Sergii Kabashniuk <skabashnyuk@codenvy.com>
+Serhat Gülçiçek <serhat25@gmail.com>
+Sevki Hasirci <s@sevki.org>
+Shane Canon <scanon@lbl.gov>
+Shane da Silva <shane@dasilva.io>
+shaunol <shaunol@gmail.com>
+Shawn Landden <shawn@churchofgit.com>
+Shawn Siefkas <shawn.siefkas@meredith.com>
+shawnhe <shawnhe@shawnhedeMacBook-Pro.local>
+Shayne Wang <shaynexwang@gmail.com>
+Shekhar Gulati <shekhargulati84@gmail.com>
+Sheng Yang <sheng@yasker.org>
+Shengbo Song <thomassong@tencent.com>
+Shev Yan <yandong_8212@163.com>
+Shih-Yuan Lee <fourdollars@gmail.com>
+Shijiang Wei <mountkin@gmail.com>
+Shishir Mahajan <shishir.mahajan@redhat.com>
+Shoubhik Bose <sbose78@gmail.com>
+Shourya Sarcar <shourya.sarcar@gmail.com>
+shuai-z <zs.broccoli@gmail.com>
+Shukui Yang <yangshukui@huawei.com>
+Shuwei Hao <haosw@cn.ibm.com>
+Sian Lerk Lau <kiawin@gmail.com>
+sidharthamani <sid@rancher.com>
+Silas Sewell <silas@sewell.org>
+Silvan Jegen <s.jegen@gmail.com>
+Simei He <hesimei@zju.edu.cn>
+Simon Eskildsen <sirup@sirupsen.com>
+Simon Leinen <simon.leinen@gmail.com>
+Simon Taranto <simon.taranto@gmail.com>
+Sindhu S <sindhus@live.in>
+Sjoerd Langkemper <sjoerd-github@linuxonly.nl>
+skaasten <shaunk@gmail.com>
+Solganik Alexander <solganik@gmail.com>
+Solomon Hykes <solomon@docker.com>
+Song Gao <song@gao.io>
+Soshi Katsuta <soshi.katsuta@gmail.com>
+Soulou <leo@unbekandt.eu>
+Spencer Brown <spencer@spencerbrown.org>
+Spencer Smith <robertspencersmith@gmail.com>
+Sridatta Thatipamala <sthatipamala@gmail.com>
+Sridhar Ratnakumar <sridharr@activestate.com>
+Srini Brahmaroutu <srbrahma@us.ibm.com>
+srinsriv <srinsriv@users.noreply.github.com>
+Steeve Morin <steeve.morin@gmail.com>
+Stefan Berger <stefanb@linux.vnet.ibm.com>
+Stefan J. Wernli <swernli@microsoft.com>
+Stefan Praszalowicz <stefan@greplin.com>
+Stefan S. <tronicum@user.github.com>
+Stefan Scherer <scherer_stefan@icloud.com>
+Stefan Staudenmeyer <doerte@instana.com>
+Stefan Weil <sw@weilnetz.de>
+Stephen Crosby <stevecrozz@gmail.com>
+Stephen Day <stephen.day@docker.com>
+Stephen Drake <stephen@xenolith.net>
+Stephen Rust <srust@blockbridge.com>
+Steve Dougherty <steve@asksteved.com>
+Steve Durrheimer <s.durrheimer@gmail.com>
+Steve Francia <steve.francia@gmail.com>
+Steve Koch <stevekochscience@gmail.com>
+Steven Burgess <steven.a.burgess@hotmail.com>
+Steven Erenst <stevenerenst@gmail.com>
+Steven Hartland <steven.hartland@multiplay.co.uk>
+Steven Iveson <sjiveson@outlook.com>
+Steven Merrill <steven.merrill@gmail.com>
+Steven Richards <steven@axiomzen.co>
+Steven Taylor <steven.taylor@me.com>
+Subhajit Ghosh <isubuz.g@gmail.com>
+Sujith Haridasan <sujith.h@gmail.com>
+Sun Gengze <690388648@qq.com>
+Suryakumar Sudar <surya.trunks@gmail.com>
+Sven Dowideit <SvenDowideit@home.org.au>
+Swapnil Daingade <swapnil.daingade@gmail.com>
+Sylvain Baubeau <sbaubeau@redhat.com>
+Sylvain Bellemare <sylvain@ascribe.io>
+Sébastien <sebastien@yoozio.com>
+Sébastien Luttringer <seblu@seblu.net>
+Sébastien Stormacq <sebsto@users.noreply.github.com>
+Tadej Janež <tadej.j@nez.si>
+TAGOMORI Satoshi <tagomoris@gmail.com>
+tang0th <tang0th@gmx.com>
+Tangi COLIN <tangicolin@gmail.com>
+Tatsuki Sugiura <sugi@nemui.org>
+Tatsushi Inagaki <e29253@jp.ibm.com>
+Taylor Jones <monitorjbl@gmail.com>
+tbonza <tylers.pile@gmail.com>
+Ted M. Young <tedyoung@gmail.com>
+Tehmasp Chaudhri <tehmasp@gmail.com>
+Tejesh Mehta <tejesh.mehta@gmail.com>
+terryding77 <550147740@qq.com>
+tgic <farmer1992@gmail.com>
+Thatcher Peskens <thatcher@docker.com>
+theadactyl <thea.lamkin@gmail.com>
+Thell 'Bo' Fowler <thell@tbfowler.name>
+Thermionix <bond711@gmail.com>
+Thijs Terlouw <thijsterlouw@gmail.com>
+Thomas Bikeev <thomas.bikeev@mac.com>
+Thomas Frössman <thomasf@jossystem.se>
+Thomas Gazagnaire <thomas@gazagnaire.org>
+Thomas Grainger <tagrain@gmail.com>
+Thomas Hansen <thomas.hansen@gmail.com>
+Thomas Leonard <thomas.leonard@docker.com>
+Thomas LEVEIL <thomasleveil@gmail.com>
+Thomas Orozco <thomas@orozco.fr>
+Thomas Riccardi <riccardi@systran.fr>
+Thomas Schroeter <thomas@cliqz.com>
+Thomas Sjögren <konstruktoid@users.noreply.github.com>
+Thomas Swift <tgs242@gmail.com>
+Thomas Tanaka <thomas.tanaka@oracle.com>
+Thomas Texier <sharkone@en-mousse.org>
+Tianon Gravi <admwiggin@gmail.com>
+Tianyi Wang <capkurmagati@gmail.com>
+Tibor Vass <teabee89@gmail.com>
+Tiffany Jernigan <tiffany.f.j@gmail.com>
+Tiffany Low <tiffany@box.com>
+Tim Bosse <taim@bosboot.org>
+Tim Dettrick <t.dettrick@uq.edu.au>
+Tim Düsterhus <tim@bastelstu.be>
+Tim Hockin <thockin@google.com>
+Tim Ruffles <oi@truffles.me.uk>
+Tim Smith <timbot@google.com>
+Tim Terhorst <mynamewastaken+git@gmail.com>
+Tim Wang <timwangdev@gmail.com>
+Tim Waugh <twaugh@redhat.com>
+Tim Wraight <tim.wraight@tangentlabs.co.uk>
+Tim Zju <21651152@zju.edu.cn>
+timfeirg <kkcocogogo@gmail.com>
+Timothy Hobbs <timothyhobbs@seznam.cz>
+tjwebb123 <tjwebb123@users.noreply.github.com>
+tobe <tobegit3hub@gmail.com>
+Tobias Bieniek <Tobias.Bieniek@gmx.de>
+Tobias Bradtke <webwurst@gmail.com>
+Tobias Gesellchen <tobias@gesellix.de>
+Tobias Klauser <tklauser@distanz.ch>
+Tobias Munk <schmunk@usrbin.de>
+Tobias Schmidt <ts@soundcloud.com>
+Tobias Schwab <tobias.schwab@dynport.de>
+Todd Crane <todd@toddcrane.com>
+Todd Lunter <tlunter@gmail.com>
+Todd Whiteman <todd.whiteman@joyent.com>
+Toli Kuznets <toli@docker.com>
+Tom Barlow <tomwbarlow@gmail.com>
+Tom Booth <tombooth@gmail.com>
+Tom Denham <tom@tomdee.co.uk>
+Tom Fotherby <tom+github@peopleperhour.com>
+Tom Howe <tom.howe@enstratius.com>
+Tom Hulihan <hulihan.tom159@gmail.com>
+Tom Maaswinkel <tom.maaswinkel@12wiki.eu>
+Tom Wilkie <tom.wilkie@gmail.com>
+Tom X. Tobin <tomxtobin@tomxtobin.com>
+Tomas Tomecek <ttomecek@redhat.com>
+Tomasz Kopczynski <tomek@kopczynski.net.pl>
+Tomasz Lipinski <tlipinski@users.noreply.github.com>
+Tomasz Nurkiewicz <nurkiewicz@gmail.com>
+Tommaso Visconti <tommaso.visconti@gmail.com>
+Tomáš Hrčka <thrcka@redhat.com>
+Tonny Xu <tonny.xu@gmail.com>
+Tony Abboud <tdabboud@hotmail.com>
+Tony Daws <tony@daws.ca>
+Tony Miller <mcfiredrill@gmail.com>
+toogley <toogley@mailbox.org>
+Torstein Husebø <torstein@huseboe.net>
+Tõnis Tiigi <tonistiigi@gmail.com>
+tpng <benny.tpng@gmail.com>
+tracylihui <793912329@qq.com>
+Trapier Marshall <trapier.marshall@docker.com>
+Travis Cline <travis.cline@gmail.com>
+Travis Thieman <travis.thieman@gmail.com>
+Trent Ogren <tedwardo2@gmail.com>
+Trevor <trevinwoodstock@gmail.com>
+Trevor Pounds <trevor.pounds@gmail.com>
+Trevor Sullivan <pcgeek86@gmail.com>
+trishnaguha <trishnaguha17@gmail.com>
+Tristan Carel <tristan@cogniteev.com>
+Troy Denton <trdenton@gmail.com>
+Tyler Brock <tyler.brock@gmail.com>
+Tzu-Jung Lee <roylee17@gmail.com>
+Ulysse Carion <ulyssecarion@gmail.com>
+unknown <sebastiaan@ws-key-sebas3.dpi1.dpi>
+vagrant <vagrant@ubuntu-14.04-amd64-vbox>
+Vaidas Jablonskis <jablonskis@gmail.com>
+Veres Lajos <vlajos@gmail.com>
+vgeta <gopikannan.venugopalsamy@gmail.com>
+Victor Algaze <valgaze@gmail.com>
+Victor Coisne <victor.coisne@dotcloud.com>
+Victor Costan <costan@gmail.com>
+Victor I. Wood <viw@t2am.com>
+Victor Lyuboslavsky <victor@victoreda.com>
+Victor Marmol <vmarmol@google.com>
+Victor Palma <palma.victor@gmail.com>
+Victor Vieux <victor.vieux@docker.com>
+Victoria Bialas <victoria.bialas@docker.com>
+Vijaya Kumar K <vijayak@caviumnetworks.com>
+Viktor Stanchev <me@viktorstanchev.com>
+Viktor Vojnovski <viktor.vojnovski@amadeus.com>
+VinayRaghavanKS <raghavan.vinay@gmail.com>
+Vincent Batts <vbatts@redhat.com>
+Vincent Bernat <bernat@luffy.cx>
+Vincent Bernat <Vincent.Bernat@exoscale.ch>
+Vincent Demeester <vincent@sbr.pm>
+Vincent Giersch <vincent.giersch@ovh.net>
+Vincent Mayers <vincent.mayers@inbloom.org>
+Vincent Woo <me@vincentwoo.com>
+Vinod Kulkarni <vinod.kulkarni@gmail.com>
+Vishal Doshi <vishal.doshi@gmail.com>
+Vishnu Kannan <vishnuk@google.com>
+Vitor Monteiro <vmrmonteiro@gmail.com>
+Vivek Agarwal <me@vivek.im>
+Vivek Dasgupta <vdasgupt@redhat.com>
+Vivek Goyal <vgoyal@redhat.com>
+Vladimir Bulyga <xx@ccxx.cc>
+Vladimir Kirillov <proger@wilab.org.ua>
+Vladimir Pouzanov <farcaller@google.com>
+Vladimir Rutsky <altsysrq@gmail.com>
+Vladimir Varankin <nek.narqo+git@gmail.com>
+VladimirAus <v_roudakov@yahoo.com>
+Vojtech Vitek (V-Teq) <vvitek@redhat.com>
+waitingkuo <waitingkuo0527@gmail.com>
+Walter Leibbrandt <github@wrl.co.za>
+Walter Stanish <walter@pratyeka.org>
+WANG Chao <wcwxyz@gmail.com>
+Wang Long <long.wanglong@huawei.com>
+Wang Ping <present.wp@icloud.com>
+Wang Xing <hzwangxing@corp.netease.com>
+Wang Yuexiao <wang.yuexiao@zte.com.cn>
+Ward Vandewege <ward@jhvc.com>
+WarheadsSE <max@warheads.net>
+Wayne Chang <wayne@neverfear.org>
+Wei Wu <wuwei4455@gmail.com>
+Wei-Ting Kuo <waitingkuo0527@gmail.com>
+weiyan <weiyan3@huawei.com>
+Weiyang Zhu <cnresonant@gmail.com>
+Wen Cheng Ma <wenchma@cn.ibm.com>
+Wendel Fleming <wfleming@usc.edu>
+Wenkai Yin <yinw@vmware.com>
+Wentao Zhang <zhangwentao234@huawei.com>
+Wenxuan Zhao <viz@linux.com>
+Wenyu You <21551128@zju.edu.cn>
+Wenzhi Liang <wenzhi.liang@gmail.com>
+Wes Morgan <cap10morgan@gmail.com>
+Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
+Will Dietz <w@wdtz.org>
+Will Rouesnel <w.rouesnel@gmail.com>
+Will Weaver <monkey@buildingbananas.com>
+willhf <willhf@gmail.com>
+William Delanoue <william.delanoue@gmail.com>
+William Henry <whenry@redhat.com>
+William Hubbs <w.d.hubbs@gmail.com>
+William Martin <wmartin@pivotal.io>
+William Riancho <wr.wllm@gmail.com>
+William Thurston <thurstw@amazon.com>
+WiseTrem <shepelyov.g@gmail.com>
+wlan0 <sidharthamn@gmail.com>
+Wolfgang Powisch <powo@powo.priv.at>
+wonderflow <wonderflow.sun@gmail.com>
+Wonjun Kim <wonjun.kim@navercorp.com>
+xamyzhao <x.amy.zhao@gmail.com>
+Xianglin Gao <xlgao@zju.edu.cn>
+Xianlu Bird <xianlubird@gmail.com>
+XiaoBing Jiang <s7v7nislands@gmail.com>
+Xiaoxu Chen <chenxiaoxu14@otcaix.iscas.ac.cn>
+xiekeyang <xiekeyang@huawei.com>
+Xinbo Weng <xihuanbo_0521@zju.edu.cn>
+Xinzi Zhou <imdreamrunner@gmail.com>
+Xiuming Chen <cc@cxm.cc>
+xlgao-zju <xlgao@zju.edu.cn>
+xuzhaokui <cynicholas@gmail.com>
+Yahya <ya7yaz@gmail.com>
+YAMADA Tsuyoshi <tyamada@minimum2scp.org>
+Yan Feng <yanfeng2@huawei.com>
+Yang Bai <hamo.by@gmail.com>
+yangshukui <yangshukui@huawei.com>
+Yanqiang Miao <miao.yanqiang@zte.com.cn>
+Yasunori Mahata <nori@mahata.net>
+Yestin Sun <sunyi0804@gmail.com>
+Yi EungJun <eungjun.yi@navercorp.com>
+Yibai Zhang <xm1994@gmail.com>
+Yihang Ho <hoyihang5@gmail.com>
+Ying Li <ying.li@docker.com>
+Yohei Ueda <yohei@jp.ibm.com>
+Yong Tang <yong.tang.github@outlook.com>
+Yongzhi Pan <panyongzhi@gmail.com>
+yorkie <yorkiefixer@gmail.com>
+You-Sheng Yang (楊有勝) <vicamo@gmail.com>
+Youcef YEKHLEF <yyekhlef@gmail.com>
+Yu Peng <yu.peng36@zte.com.cn>
+Yuan Sun <sunyuan3@huawei.com>
+yuchangchun <yuchangchun1@huawei.com>
+yuchengxia <yuchengxia@huawei.com>
+Yunxiang Huang <hyxqshk@vip.qq.com>
+Yurii Rashkovskii <yrashk@gmail.com>
+yuzou <zouyu7@huawei.com>
+Zac Dover <zdover@redhat.com>
+Zach Borboa <zachborboa@gmail.com>
+Zachary Jaffee <zij@case.edu>
+Zain Memon <zain@inzain.net>
+Zaiste! <oh@zaiste.net>
+Zane DeGraffenried <zane.deg@gmail.com>
+Zefan Li <lizefan@huawei.com>
+Zen Lin(Zhinan Lin) <linzhinan@huawei.com>
+Zhang Kun <zkazure@gmail.com>
+Zhang Wei <zhangwei555@huawei.com>
+Zhang Wentao <zhangwentao234@huawei.com>
+zhangxianwei <xianwei.zw@alibaba-inc.com>
+Zhenan Ye <21551168@zju.edu.cn>
+zhenghenghuo <zhenghenghuo@zju.edu.cn>
+Zhenkun Bi <bi.zhenkun@zte.com.cn>
+zhouhao <zhouhao@cn.fujitsu.com>
+Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
+Zhu Kunjia <zhu.kunjia@zte.com.cn>
+Zhuoyun Wei <wzyboy@wzyboy.org>
+Zilin Du <zilin.du@gmail.com>
+zimbatm <zimbatm@zimbatm.com>
+Ziming Dong <bnudzm@foxmail.com>
+ZJUshuaizhou <21551191@zju.edu.cn>
+zmarouf <zeid.marouf@gmail.com>
+Zoltan Tombol <zoltan.tombol@gmail.com>
+zqh <zqhxuyuan@gmail.com>
+Zuhayr Elahi <elahi.zuhayr@gmail.com>
+Zunayed Ali <zunayed@gmail.com>
+Álex González <agonzalezro@gmail.com>
+Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
+Átila Camurça Alves <camurca.home@gmail.com>
+尹吉峰 <jifeng.yin@gmail.com>
+搏通 <yufeng.pyf@alibaba-inc.com>
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index 215920cc3..dc2d323d4 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -707,7 +707,7 @@ func (b *Builder) makeImageRef(options CommitOptions, exporting bool) (types.Ima
exporting: exporting,
squash: options.Squash,
emptyLayer: options.EmptyLayer,
- tarPath: b.tarPath(),
+ tarPath: b.tarPath(&b.IDMappingOptions),
parent: parent,
blobDirectory: options.BlobDirectory,
preEmptyLayers: b.PrependedEmptyLayers,
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index b8b9db0f3..20d6715f5 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -27,7 +27,7 @@ import (
"github.com/containers/image/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
- "github.com/cyphar/filepath-securejoin"
+ securejoin "github.com/cyphar/filepath-securejoin"
docker "github.com/fsouza/go-dockerclient"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runtime-spec/specs-go"
@@ -210,7 +210,6 @@ type Executor struct {
annotations []string
onbuild []string
layers bool
- topLayers []string
useCache bool
removeIntermediateCtrs bool
forceRmIntermediateCtrs bool
@@ -487,6 +486,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
// Check the file and see if part of it is a symlink.
// Convert it to the target if so. To be ultrasafe
// do the same for the mountpoint.
+ hadFinalPathSeparator := len(copy.Dest) > 0 && copy.Dest[len(copy.Dest)-1] == os.PathSeparator
secureMountPoint, err := securejoin.SecureJoin("", s.mountPoint)
finalPath, err := securejoin.SecureJoin(secureMountPoint, copy.Dest)
if err != nil {
@@ -496,6 +496,11 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
return errors.Wrapf(err, "error resolving copy destination %s", copy.Dest)
}
copy.Dest = strings.TrimPrefix(finalPath, secureMountPoint)
+ if len(copy.Dest) == 0 || copy.Dest[len(copy.Dest)-1] != os.PathSeparator {
+ if hadFinalPathSeparator {
+ copy.Dest += string(os.PathSeparator)
+ }
+ }
if copy.Download {
logrus.Debugf("ADD %#v, %#v", excludes, copy)
@@ -507,29 +512,61 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
}
sources := []string{}
for _, src := range copy.Src {
+ contextDir := s.executor.contextDir
+ copyExcludes := excludes
+ var idMappingOptions *buildah.IDMappingOptions
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
sources = append(sources, src)
} else if len(copy.From) > 0 {
+ var srcRoot string
if other, ok := s.executor.stages[copy.From]; ok && other.index < s.index {
- sources = append(sources, filepath.Join(other.mountPoint, src))
+ srcRoot = other.mountPoint
+ contextDir = other.mountPoint
+ idMappingOptions = &other.builder.IDMappingOptions
} else if builder, ok := s.executor.containerMap[copy.From]; ok {
- sources = append(sources, filepath.Join(builder.MountPoint, src))
+ srcRoot = builder.MountPoint
+ contextDir = builder.MountPoint
+ idMappingOptions = &builder.IDMappingOptions
} else {
return errors.Errorf("the stage %q has not been built", copy.From)
}
+ srcSecure, err := securejoin.SecureJoin(srcRoot, src)
+ if err != nil {
+ return err
+ }
+ // If destination is a folder, we need to take extra care to
+ // ensure that files are copied with correct names (since
+ // resolving a symlink may result in a different name).
+ if hadFinalPathSeparator {
+ _, srcName := filepath.Split(src)
+ _, srcNameSecure := filepath.Split(srcSecure)
+ if srcName != srcNameSecure {
+ options := buildah.AddAndCopyOptions{
+ Chown: copy.Chown,
+ ContextDir: contextDir,
+ Excludes: copyExcludes,
+ }
+ if err := s.builder.Add(filepath.Join(copy.Dest, srcName), copy.Download, options, srcSecure); err != nil {
+ return err
+ }
+ continue
+ }
+ }
+ sources = append(sources, srcSecure)
+
} else {
sources = append(sources, filepath.Join(s.executor.contextDir, src))
+ copyExcludes = append(s.executor.excludes, excludes...)
+ }
+ options := buildah.AddAndCopyOptions{
+ Chown: copy.Chown,
+ ContextDir: contextDir,
+ Excludes: copyExcludes,
+ IDMappingOptions: idMappingOptions,
+ }
+ if err := s.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil {
+ return err
}
- }
-
- options := buildah.AddAndCopyOptions{
- Chown: copy.Chown,
- ContextDir: s.executor.contextDir,
- Excludes: s.executor.excludes,
- }
-
- if err := s.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil {
- return err
}
}
return nil
@@ -590,7 +627,11 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
args := run.Args
if run.Shell {
- args = append([]string{"/bin/sh", "-c"}, args...)
+ if len(config.Shell) > 0 && s.builder.Format == buildah.Dockerv2ImageManifest {
+ args = append(config.Shell, args...)
+ } else {
+ args = append([]string{"/bin/sh", "-c"}, args...)
+ }
}
if err := s.volumeCacheSave(); err != nil {
return err
@@ -847,9 +888,6 @@ func (s *StageExecutor) prepare(ctx context.Context, stage imagebuilder.Stage, f
// Make this our "current" working container.
s.mountPoint = mountPoint
s.builder = builder
- // Add the top layer of this image to b.topLayers so we can
- // keep track of them when building with cached images.
- s.executor.topLayers = append(s.executor.topLayers, builder.TopLayer)
}
logrus.Debugln("Container ID:", builder.ContainerID)
return builder, nil
@@ -954,7 +992,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
}
logImageID := func(imgID string) {
if s.executor.iidfile == "" {
- fmt.Fprintf(s.executor.out, "--> %s\n", imgID)
+ fmt.Fprintf(s.executor.out, "%s\n", imgID)
}
}
@@ -972,7 +1010,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
// We don't need to squash the base image, so just
// reuse the base image.
logCommit(s.output, -1)
- if imgID, ref, err = s.copyExistingImage(ctx, s.builder.FromImageID, s.output); err != nil {
+ if imgID, ref, err = s.tagExistingImage(ctx, s.builder.FromImageID, s.output); err != nil {
return "", nil, err
}
}
@@ -1097,7 +1135,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
imgID = cacheID
if commitName != "" {
logCommit(commitName, i)
- if imgID, ref, err = s.copyExistingImage(ctx, cacheID, commitName); err != nil {
+ if imgID, ref, err = s.tagExistingImage(ctx, cacheID, commitName); err != nil {
return "", nil, err
}
logImageID(imgID)
@@ -1166,8 +1204,8 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
return imgID, ref, nil
}
-// copyExistingImage creates a copy of an image already in the store
-func (s *StageExecutor) copyExistingImage(ctx context.Context, cacheID, output string) (string, reference.Canonical, error) {
+// tagExistingImage adds names to an image already in the store
+func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output string) (string, reference.Canonical, error) {
// If we don't need to attach a name to the image, just return the cache ID.
if output == "" {
return cacheID, nil, nil
@@ -1234,11 +1272,11 @@ func (s *StageExecutor) layerExists(ctx context.Context, currNode *parser.Node,
return "", errors.Wrapf(err, "error getting top layer info")
}
}
- // If the parent of the top layer of an image is equal to the last entry in b.topLayers
+ // If the parent of the top layer of an image is equal to the current build image's top layer,
// it means that this image is potentially a cached intermediate image from a previous
// build. Next we double check that the history of this image is equivalent to the previous
// lines in the Dockerfile up till the point we are at in the build.
- if imageTopLayer == nil || imageTopLayer.Parent == s.executor.topLayers[len(s.executor.topLayers)-1] || imageTopLayer.ID == s.executor.topLayers[len(s.executor.topLayers)-1] {
+ if imageTopLayer == nil || (s.builder.TopLayer != "" && (imageTopLayer.Parent == s.builder.TopLayer || imageTopLayer.ID == s.builder.TopLayer)) {
history, err := s.executor.getImageHistory(ctx, image.ID)
if err != nil {
return "", errors.Wrapf(err, "error getting history of %q", image.ID)
@@ -1327,26 +1365,8 @@ func (b *Executor) historyMatches(baseHistory []v1.History, child *parser.Node,
return false
}
}
- instruction := child.Original
- switch strings.ToUpper(child.Value) {
- case "RUN":
- instruction = instruction[4:]
- buildArgs := b.getBuildArgs()
- // If a previous image was built with some build-args but the new build process doesn't have any build-args
- // specified, the command might be expanded differently, so compare the lengths of the old instruction with
- // the current one. 11 is the length of "/bin/sh -c " that is used to run the run commands.
- if buildArgs == "" && len(history[len(baseHistory)].CreatedBy) > len(instruction)+11 {
- return false
- }
- // There are build-args, so check if anything with the build-args has changed
- if buildArgs != "" && !strings.Contains(history[len(baseHistory)].CreatedBy, buildArgs) {
- return false
- }
- fallthrough
- default:
- if !strings.Contains(history[len(baseHistory)].CreatedBy, instruction) {
- return false
- }
+ if history[len(baseHistory)].CreatedBy != b.getCreatedBy(child) {
+ return false
}
return true
}
@@ -1360,6 +1380,7 @@ func (b *Executor) getBuildArgs() string {
buildArgs = append(buildArgs, k+"="+v)
}
}
+ sort.Strings(buildArgs)
return strings.Join(buildArgs, " ")
}
@@ -1532,7 +1553,6 @@ func (s *StageExecutor) commit(ctx context.Context, ib *imagebuilder.Builder, cr
options := buildah.CommitOptions{
Compression: s.executor.compression,
SignaturePolicyPath: s.executor.signaturePolicyPath,
- AdditionalTags: s.executor.additionalTags,
ReportWriter: writer,
PreferredManifestType: s.executor.outputFormat,
SystemContext: s.executor.systemContext,
@@ -1718,6 +1738,24 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
fmt.Fprintf(b.out, "[Warning] one or more build args were not consumed: %v\n", unusedList)
}
+ if len(b.additionalTags) > 0 {
+ if dest, err := b.resolveNameToImageRef(b.output); err == nil {
+ switch dest.Transport().Name() {
+ case is.Transport.Name():
+ img, err := is.Transport.GetStoreImage(b.store, dest)
+ if err != nil {
+ return imageID, ref, errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest))
+ }
+ if err = util.AddImageNames(b.store, "", b.systemContext, img, b.additionalTags); err != nil {
+ return imageID, ref, errors.Wrapf(err, "error setting image names to %v", append(img.Names, b.additionalTags...))
+ }
+ logrus.Debugf("assigned names %v to image %q", img.Names, img.ID)
+ default:
+ logrus.Warnf("don't know how to add tags to images stored in %q transport", dest.Transport().Name())
+ }
+ }
+ }
+
if err := cleanup(); err != nil {
return "", nil, err
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/util.go b/vendor/github.com/containers/buildah/imagebuildah/util.go
index f982fcebf..3962d1a9d 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/util.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/util.go
@@ -17,7 +17,7 @@ import (
)
func cloneToDirectory(url, dir string) error {
- if !strings.HasPrefix(url, "git://") {
+ if !strings.HasPrefix(url, "git://") && !strings.HasSuffix(url, ".git") {
url = "git://" + url
}
logrus.Debugf("cloning %q to %q", url, dir)
@@ -72,7 +72,7 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err
if err != nil {
return "", "", errors.Wrapf(err, "error creating temporary directory for %q", url)
}
- if strings.HasPrefix(url, "git://") {
+ if strings.HasPrefix(url, "git://") || strings.HasSuffix(url, ".git") {
err = cloneToDirectory(url, name)
if err != nil {
if err2 := os.Remove(name); err2 != nil {
diff --git a/vendor/github.com/containers/buildah/install.md b/vendor/github.com/containers/buildah/install.md
new file mode 100644
index 000000000..931a45e88
--- /dev/null
+++ b/vendor/github.com/containers/buildah/install.md
@@ -0,0 +1,385 @@
+![buildah logo](https://cdn.rawgit.com/containers/buildah/master/logos/buildah-logo_large.png)
+
+# Installation Instructions
+
+## Installing packaged versions of buildah
+
+### [Arch Linux](https://www.archlinux.org)
+
+```bash
+sudo pacman -S buildah
+```
+
+### [Fedora](https://www.fedoraproject.org), [CentOS](https://www.centos.org)
+
+```bash
+sudo yum -y install buildah
+```
+
+### [Fedora SilverBlue](https://silverblue.fedoraproject.org)
+
+Installed by default
+
+### [Fedora CoreOS](https://coreos.fedoraproject.org)
+
+Not Available. Must be installed via package layering.
+
+rpm-ostree install buildah
+
+Note: `[podman](https://podman.io) build` is available by default.
+
+### [Gentoo](https://www.gentoo.org)
+
+```bash
+sudo emerge app-emulation/libpod
+```
+
+### [openSUSE](https://www.opensuse.org)
+
+```bash
+sudo zypper install buildah
+```
+
+### [openSUSE Kubic](https://kubic.opensuse.org)
+
+transactional-update pkg in buildah
+
+### [RHEL7](https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux)
+
+Subscribe, then enable Extras channel and install buildah.
+
+```bash
+sudo subscription-manager repos --enable=rhel-7-server-extras-rpms
+sudo yum -y install buildah
+```
+
+### [RHEL8 Beta](https://www.redhat.com/en/blog/powering-its-future-while-preserving-present-introducing-red-hat-enterprise-linux-8-beta?intcmp=701f2000001Cz6OAAS)
+
+```bash
+sudo yum module enable -y container-tools:1.0
+sudo yum module install -y buildah
+```
+
+### [Ubuntu](https://www.ubuntu.com)
+
+```bash
+sudo apt-get update -qq
+sudo apt-get install -qq -y software-properties-common
+sudo add-apt-repository -y ppa:projectatomic/ppa
+sudo apt-get update -qq
+sudo apt-get -qq -y install buildah
+```
+
+# Building from scratch
+
+## System Requirements
+
+### Kernel Version Requirements
+To run Buildah on Red Hat Enterprise Linux or CentOS, version 7.4 or higher is required.
+On other Linux distributions Buildah requires a kernel version of 4.0 or
+higher in order to support the OverlayFS filesystem. The kernel version can be checked
+with the 'uname -a' command.
+
+### runc Requirement
+
+Buildah uses `runc` to run commands when `buildah run` is used, or when `buildah build-using-dockerfile`
+encounters a `RUN` instruction, so you'll also need to build and install a compatible version of
+[runc](https://github.com/opencontainers/runc) for Buildah to call for those cases. If Buildah is installed
+via a package manager such as yum, dnf or apt-get, runc will be installed as part of that process.
+
+### CNI Requirement
+
+When Buildah uses `runc` to run commands, it defaults to running those commands
+in the host's network namespace. If the command is being run in a separate
+user namespace, though, for example when ID mapping is used, then the command
+will also be run in a separate network namespace.
+
+A newly-created network namespace starts with no network interfaces, so
+commands which are run in that namespace are effectively disconnected from the
+network unless additional setup is done. Buildah relies on the CNI
+[library](https://github.com/containernetworking/cni) and
+[plugins](https://github.com/containernetworking/plugins) to set up interfaces
+and routing for network namespaces.
+
+If Buildah is installed via a package manager such as yum, dnf or apt-get, a
+package containing CNI plugins may be available (in Fedora, the package is
+named `containernetworking-cni`). If not, they will need to be installed,
+for example using:
+```
+ git clone https://github.com/containernetworking/plugins
+ ( cd ./plugins; ./build.sh )
+ mkdir -p /opt/cni/bin
+ install -v ./plugins/bin/* /opt/cni/bin
+```
+
+The CNI library needs to be configured so that it will know which plugins to
+call to set up namespaces. Usually, this configuration takes the form of one
+or more configuration files in the `/etc/cni/net.d` directory. A set of example
+configuration files is included in the
+[`docs/cni-examples`](https://github.com/containers/buildah/tree/master/docs/cni-examples)
+directory of this source tree.
+
+## Package Installation
+
+Buildah is available on several software repositories and can be installed via a package manager such
+as yum, dnf or apt-get on a number of Linux distributions.
+
+## Installation from GitHub
+
+Prior to installing Buildah, install the following packages on your Linux distro:
+* make
+* golang (Requires version 1.10 or higher.)
+* bats
+* btrfs-progs-devel
+* bzip2
+* device-mapper-devel
+* git
+* go-md2man
+* gpgme-devel
+* glib2-devel
+* libassuan-devel
+* libseccomp-devel
+* ostree-devel
+* runc (Requires version 1.0 RC4 or higher.)
+* containers-common
+
+### Fedora
+
+In Fedora, you can use this command:
+
+```
+ dnf -y install \
+ make \
+ golang \
+ bats \
+ btrfs-progs-devel \
+ device-mapper-devel \
+ glib2-devel \
+ gpgme-devel \
+ libassuan-devel \
+ libseccomp-devel \
+ ostree-devel \
+ git \
+ bzip2 \
+ go-md2man \
+ runc \
+ containers-common
+```
+
+Then to install Buildah on Fedora follow the steps in this example:
+
+```
+ mkdir ~/buildah
+ cd ~/buildah
+ export GOPATH=`pwd`
+ git clone https://github.com/containers/buildah ./src/github.com/containers/buildah
+ cd ./src/github.com/containers/buildah
+ make
+ sudo make install
+ buildah --help
+```
+
+### RHEL, CentOS
+
+In RHEL and CentOS 7, ensure that you are subscribed to the `rhel-7-server-rpms`,
+`rhel-7-server-extras-rpms`, `rhel-7-server-optional-rpms` and `EPEL` repositories, then
+run this command:
+
+```
+ yum -y install \
+ make \
+ golang \
+ bats \
+ btrfs-progs-devel \
+ device-mapper-devel \
+ glib2-devel \
+ gpgme-devel \
+ libassuan-devel \
+ libseccomp-devel \
+ ostree-devel \
+ git \
+ bzip2 \
+ go-md2man \
+ runc \
+ skopeo-containers
+```
+
+The build steps for Buildah on RHEL or CentOS are the same as for Fedora, above.
+
+*NOTE:* Buildah on RHEL or CentOS version 7.* is not supported running as non-root due to
+these systems not having newuidmap or newgidmap installed. It is possible to pull
+the shadow-utils source RPM from Fedora 29 and build and install from that in order to
+run Buildah as non-root on these systems.
+
+### openSUSE
+
+On openSUSE Tumbleweed, install go via `zypper in go`, then run this command:
+
+```
+ zypper in make \
+ git \
+ golang \
+ runc \
+ bzip2 \
+ libgpgme-devel \
+ libseccomp-devel \
+ device-mapper-devel \
+ libbtrfs-devel \
+ go-md2man
+```
+
+The build steps for Buildah on SUSE / openSUSE are the same as for Fedora, above.
+
+
+### Ubuntu
+
+In Ubuntu zesty and xenial, you can use these commands:
+
+```
+ apt-get -y install software-properties-common
+ add-apt-repository -y ppa:alexlarsson/flatpak
+ add-apt-repository -y ppa:gophers/archive
+ apt-add-repository -y ppa:projectatomic/ppa
+ apt-get -y -qq update
+ apt-get -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libostree-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man
+ apt-get -y install golang-1.10
+```
+Then to install Buildah on Ubuntu follow the steps in this example:
+
+```
+ mkdir ~/buildah
+ cd ~/buildah
+ export GOPATH=`pwd`
+ git clone https://github.com/containers/buildah ./src/github.com/containers/buildah
+ cd ./src/github.com/containers/buildah
+ PATH=/usr/lib/go-1.10/bin:$PATH make runc all SECURITYTAGS="apparmor seccomp"
+ sudo make install install.runc
+ buildah --help
+```
+
+### Debian
+
+To install the required dependencies, you can use those commands, tested under Debian GNU/Linux amd64 9.3 (stretch):
+
+```
+gpg --recv-keys 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D
+gpg --export 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D >> /usr/share/keyrings/projectatomic-ppa.gpg
+echo 'deb [signed-by=/usr/share/keyrings/projectatomic-ppa.gpg] http://ppa.launchpad.net/projectatomic/ppa/ubuntu zesty main' > /etc/apt/sources.list.d/projectatomic-ppa.list
+apt update
+apt -y install -t stretch-backports libostree-dev golang
+apt -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man
+```
+
+The build steps on Debian are otherwise the same as Ubuntu, above.
+
+## Vendoring - Dependency Management
+
+This project is using [vndr](https://github.com/LK4D4/vndr) for managing dependencies, which is a tedious and error-prone task. Doing it manually is likely to cause inconsistencies between the `./vendor` directory (i.e., the downloaded dependencies), the source code that imports those dependencies and the `vendor.conf` configuration file that describes which packages in which version (e.g., a release or git commit) are a dependency.
+
+To ease updating dependencies, we provide the `make vendor` target, which fetches all dependencies mentioned in `vendor.conf`. `make vendor` whitelists certain packages to prevent the `vndr` tool from removing packages that the test suite (see `./test`) imports.
+
+The CI of this project makes sure that each pull request leaves a clean vendor state behind by first running the aforementioned `make vendor` followed by running `./hack/tree_status.sh` which checks if any file in the git tree has changed.
+
+### Vendor Troubleshooting
+
+If the CI is complaining about a pull request leaving behind an unclean state, it is very likely right about it. Make sure to run `make vendor` and add all the changes to the commit. Also make sure that your local git tree does not include files not under version control that may reference other go packages. If some dependencies are removed but they should not, for instance, because the CI is needing them, then whitelist those dependencies in the `make vendor` target of the Makefile. Whitelisting a package will instruct `vndr` to not remove if during its cleanup phase.
+sd
+
+## Configuration files
+
+The following configuration files are required in order for Buildah to run appropriately. The
+majority of these files are commonly contained in the `containers-common` package.
+
+### [registries.conf](https://github.com/containers/buildah/blob/master/docs/samples/registries.conf)
+
+#### Man Page: [registries.conf.5](https://github.com/containers/image/blob/master/docs/containers-registries.conf.5.md)
+
+`/etc/containers/registries.conf`
+
+registries.conf is the configuration file which specifies which container registries should be consulted when completing image names which do not include a registry or domain portion.
+
+#### Example from the Fedora `containers-common` package
+
+```
+cat /etc/containers/registries.conf
+# This is a system-wide configuration file used to
+# keep track of registries for various container backends.
+# It adheres to TOML format and does not support recursive
+# lists of registries.
+
+# The default location for this configuration file is /etc/containers/registries.conf.
+
+# The only valid categories are: 'registries.search', 'registries.insecure',
+# and 'registries.block'.
+
+[registries.search]
+registries = ['docker.io', 'registry.fedoraproject.org', 'quay.io', 'registry.access.redhat.com', 'registry.centos.org']
+
+# If you need to access insecure registries, add the registry's fully-qualified name.
+# An insecure registry is one that does not have a valid SSL certificate or only does HTTP.
+[registries.insecure]
+registries = []
+
+
+# If you need to block pull access from a registry, uncomment the section below
+# and add the registries fully-qualified name.
+#
+# Docker only
+[registries.block]
+registries = []
+```
+
+### [mounts.conf](https://src.fedoraproject.org/rpms/skopeo/blob/master/f/mounts.conf)
+
+`/usr/share/containers/mounts.conf` and optionally `/etc/containers/mounts.conf`
+
+The mounts.conf files specify volume mount directories that are automatically mounted inside containers when executing the `buildah run` or `buildah build-using-dockerfile` commands. Container process can then use this content. The volume mount content does not get committed to the final image. This file is usually provided by the containers-common package.
+
+Usually these directories are used for passing secrets or credentials required by the package software to access remote package repositories.
+
+For example, a mounts.conf with the line "`/usr/share/rhel/secrets:/run/secrets`", the content of `/usr/share/rhel/secrets` directory is mounted on `/run/secrets` inside the container. This mountpoint allows Red Hat Enterprise Linux subscriptions from the host to be used within the container.
+
+Note this is not a volume mount. The content of the volumes is copied into container storage, not bind mounted directly from the host.
+
+#### Example from the Fedora `containers-common` package:
+
+```
+cat /usr/share/containers/mounts.conf
+/usr/share/rhel/secrets:/run/secrets
+```
+
+### [seccomp.json](https://src.fedoraproject.org/rpms/skopeo/blob/master/f/seccomp.json)
+
+`/usr/share/containers/seccomp.json`
+
+seccomp.json contains the whitelist of seccomp rules to be allowed inside of
+containers. This file is usually provided by the containers-common package.
+
+The link above takes you to the seccomp.json
+
+### [policy.json](https://github.com/containers/skopeo/blob/master/default-policy.json)
+
+`/etc/containers/policy.json`
+
+#### Man Page: [policy.json.5](https://github.com/containers/image/blob/master/docs/policy.json.md)
+
+
+#### Example from the Fedora `containers-common` package:
+
+```
+cat /etc/containers/policy.json
+{
+ "default": [
+ {
+ "type": "insecureAcceptAnything"
+ }
+ ],
+ "transports":
+ {
+ "docker-daemon":
+ {
+ "": [{"type":"insecureAcceptAnything"}]
+ }
+ }
+}
+```
diff --git a/vendor/github.com/containers/buildah/libdm_tag.sh b/vendor/github.com/containers/buildah/libdm_tag.sh
new file mode 100644
index 000000000..d1f83ba10
--- /dev/null
+++ b/vendor/github.com/containers/buildah/libdm_tag.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+tmpdir="$PWD/tmp.$RANDOM"
+mkdir -p "$tmpdir"
+trap 'rm -fr "$tmpdir"' EXIT
+cc -o "$tmpdir"/libdm_tag -ldevmapper -x c - > /dev/null 2> /dev/null << EOF
+#include <libdevmapper.h>
+int main() {
+ struct dm_task *task;
+ dm_task_deferred_remove(task);
+ return 0;
+}
+EOF
+if test $? -ne 0 ; then
+ echo libdm_no_deferred_remove
+fi
diff --git a/vendor/github.com/containers/buildah/ostree_tag.sh b/vendor/github.com/containers/buildah/ostree_tag.sh
new file mode 100644
index 000000000..bae9d5108
--- /dev/null
+++ b/vendor/github.com/containers/buildah/ostree_tag.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+if pkg-config ostree-1 2> /dev/null ; then
+ echo ostree
+else
+ echo containers_image_ostree_stub
+fi
diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user.go
index c83dcc230..26a67c35a 100644
--- a/vendor/github.com/containers/buildah/pkg/chrootuser/user.go
+++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user.go
@@ -18,7 +18,7 @@ var (
// it will use the /etc/passwd and /etc/group files inside of the rootdir
// to return this information.
// userspec format [user | user:group | uid | uid:gid | user:gid | uid:group ]
-func GetUser(rootdir, userspec string) (uint32, uint32, error) {
+func GetUser(rootdir, userspec string) (uint32, uint32, string, error) {
var gid64 uint64
var gerr error = user.UnknownGroupError("error looking up group")
@@ -26,7 +26,7 @@ func GetUser(rootdir, userspec string) (uint32, uint32, error) {
userspec = spec[0]
groupspec := ""
if userspec == "" {
- return 0, 0, nil
+ return 0, 0, "/", nil
}
if len(spec) > 1 {
groupspec = spec[1]
@@ -65,15 +65,21 @@ func GetUser(rootdir, userspec string) (uint32, uint32, error) {
}
}
+ homedir, err := lookupHomedirInContainer(rootdir, uid64)
+ if err != nil {
+ homedir = "/"
+ }
+
if uerr == nil && gerr == nil {
- return uint32(uid64), uint32(gid64), nil
+ return uint32(uid64), uint32(gid64), homedir, nil
}
- err := errors.Wrapf(uerr, "error determining run uid")
+ err = errors.Wrapf(uerr, "error determining run uid")
if uerr == nil {
err = errors.Wrapf(gerr, "error determining run gid")
}
- return 0, 0, err
+
+ return 0, 0, homedir, err
}
// GetGroup returns the gid by looking it up in the /etc/group file
diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go
index 79b0b24b5..6c997c4c9 100644
--- a/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go
+++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go
@@ -25,3 +25,7 @@ func lookupAdditionalGroupsForUIDInContainer(rootdir string, userid uint64) (gid
func lookupUIDInContainer(rootdir string, uid uint64) (string, uint64, error) {
return "", 0, errors.New("UID lookup not supported")
}
+
+func lookupHomedirInContainer(rootdir string, uid uint64) (string, error) {
+ return "", errors.New("Home directory lookup not supported")
+}
diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go
index 583eca569..ea20fca80 100644
--- a/vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go
+++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go
@@ -84,6 +84,7 @@ type lookupPasswdEntry struct {
name string
uid uint64
gid uint64
+ home string
}
type lookupGroupEntry struct {
name string
@@ -135,6 +136,7 @@ func parseNextPasswd(rc *bufio.Reader) *lookupPasswdEntry {
name: fields[0],
uid: uid,
gid: gid,
+ home: fields[5],
}
}
@@ -291,3 +293,29 @@ func lookupUIDInContainer(rootdir string, uid uint64) (string, uint64, error) {
return "", 0, user.UnknownUserError(fmt.Sprintf("error looking up uid %q", uid))
}
+
+func lookupHomedirInContainer(rootdir string, uid uint64) (string, error) {
+ cmd, f, err := openChrootedFile(rootdir, "/etc/passwd")
+ if err != nil {
+ return "", err
+ }
+ defer func() {
+ _ = cmd.Wait()
+ }()
+ rc := bufio.NewReader(f)
+ defer f.Close()
+
+ lookupUser.Lock()
+ defer lookupUser.Unlock()
+
+ pwd := parseNextPasswd(rc)
+ for pwd != nil {
+ if pwd.uid != uid {
+ pwd = parseNextPasswd(rc)
+ continue
+ }
+ return pwd.home, nil
+ }
+
+ return "", user.UnknownUserError(fmt.Sprintf("error looking up uid %q for homedir", uid))
+}
diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
index 31f0c2cec..14d29a25b 100644
--- a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
+++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
@@ -2,6 +2,7 @@ package overlay
import (
"fmt"
+ "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -15,13 +16,27 @@ import (
// MountTemp creates a subdir of the contentDir based on the source directory
// from the source system. It then mounds up the source directory on to the
// generated mount point and returns the mount point to the caller.
-func MountTemp(store storage.Store, containerId, source, dest string, rootUID, rootGID int) (specs.Mount, string, error) {
- mount := specs.Mount{}
+func MountTemp(store storage.Store, containerId, source, dest string, rootUID, rootGID int) (mount specs.Mount, contentDir string, Err error) {
- contentDir, err := store.ContainerDirectory(containerId)
+ containerDir, err := store.ContainerDirectory(containerId)
if err != nil {
return mount, "", err
}
+ contentDir = filepath.Join(containerDir, "overlay")
+ if err := idtools.MkdirAllAs(contentDir, 0700, rootUID, rootGID); err != nil {
+ return mount, "", errors.Wrapf(err, "failed to create the overlay %s directory", contentDir)
+ }
+
+ contentDir, err = ioutil.TempDir(contentDir, "")
+ if err != nil {
+ return mount, "", errors.Wrapf(err, "failed to create TempDir in the overlay %s directory", contentDir)
+ }
+ defer func() {
+ if Err != nil {
+ os.RemoveAll(contentDir)
+ }
+ }()
+
upperDir := filepath.Join(contentDir, "upper")
workDir := filepath.Join(contentDir, "work")
if err := idtools.MkdirAllAs(upperDir, 0700, rootUID, rootGID); err != nil {
@@ -44,3 +59,13 @@ func MountTemp(store storage.Store, containerId, source, dest string, rootUID, r
func RemoveTemp(contentDir string) error {
return os.RemoveAll(contentDir)
}
+
+// CleanupContent removes all temporary mountpoint and all content from
+// directory
+func CleanupContent(containerDir string) (Err error) {
+ contentDir := filepath.Join(containerDir, "overlay")
+ if err := os.RemoveAll(contentDir); err != nil && !os.IsNotExist(err) {
+ return errors.Wrapf(err, "failed to cleanup overlay %s directory", contentDir)
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go
index 6c58f1194..61e70cdd3 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go
@@ -37,6 +37,7 @@ func CommonBuildOptions(c *cobra.Command) (*buildah.CommonBuildOptions, error) {
var (
memoryLimit int64
memorySwap int64
+ noDNS bool
err error
)
@@ -67,9 +68,26 @@ func CommonBuildOptions(c *cobra.Command) (*buildah.CommonBuildOptions, error) {
}
}
+ noDNS = false
dnsServers, _ := c.Flags().GetStringSlice("dns")
+ for _, server := range dnsServers {
+ if strings.ToLower(server) == "none" {
+ noDNS = true
+ }
+ }
+ if noDNS && len(dnsServers) > 1 {
+ return nil, errors.Errorf("invalid --dns, --dns=none may not be used with any other --dns options")
+ }
+
dnsSearch, _ := c.Flags().GetStringSlice("dns-search")
+ if noDNS && len(dnsSearch) > 0 {
+ return nil, errors.Errorf("invalid --dns-search, --dns-search may not be used with --dns=none")
+ }
+
dnsOptions, _ := c.Flags().GetStringSlice("dns-option")
+ if noDNS && len(dnsOptions) > 0 {
+ return nil, errors.Errorf("invalid --dns-option, --dns-option may not be used with --dns=none")
+ }
if _, err := units.FromHumanSize(c.Flag("shm-size").Value.String()); err != nil {
return nil, errors.Wrapf(err, "invalid --shm-size")
@@ -80,7 +98,7 @@ func CommonBuildOptions(c *cobra.Command) (*buildah.CommonBuildOptions, error) {
}
cpuPeriod, _ := c.Flags().GetUint64("cpu-period")
cpuQuota, _ := c.Flags().GetInt64("cpu-quota")
- cpuShares, _ := c.Flags().GetUint64("cpu-shared")
+ cpuShares, _ := c.Flags().GetUint64("cpu-shares")
httpProxy, _ := c.Flags().GetBool("http-proxy")
ulimit, _ := c.Flags().GetStringSlice("ulimit")
commonOpts := &buildah.CommonBuildOptions{
diff --git a/vendor/github.com/containers/buildah/pkg/secrets/secrets.go b/vendor/github.com/containers/buildah/pkg/secrets/secrets.go
index 97b681125..70bd6a4b7 100644
--- a/vendor/github.com/containers/buildah/pkg/secrets/secrets.go
+++ b/vendor/github.com/containers/buildah/pkg/secrets/secrets.go
@@ -117,7 +117,12 @@ func getMounts(filePath string) []string {
}
var mounts []string
for scanner.Scan() {
- mounts = append(mounts, scanner.Text())
+ if strings.HasPrefix(strings.TrimSpace(scanner.Text()), "/") {
+ mounts = append(mounts, scanner.Text())
+ } else {
+ logrus.Debugf("skipping unrecognized mount in %v: %q",
+ filePath, scanner.Text())
+ }
}
return mounts
}
@@ -190,58 +195,79 @@ func addSecretsFromMountsFile(filePath, mountLabel, containerWorkingDir, mountPr
var mounts []rspec.Mount
defaultMountsPaths := getMounts(filePath)
for _, path := range defaultMountsPaths {
- hostDir, ctrDir, err := getMountsMap(path)
+ hostDirOrFile, ctrDirOrFile, err := getMountsMap(path)
if err != nil {
return nil, err
}
- // skip if the hostDir path doesn't exist
- if _, err = os.Stat(hostDir); err != nil {
+ // skip if the hostDirOrFile path doesn't exist
+ fileInfo, err := os.Stat(hostDirOrFile)
+ if err != nil {
if os.IsNotExist(err) {
- logrus.Warnf("Path %q from %q doesn't exist, skipping", hostDir, filePath)
+ logrus.Warnf("Path %q from %q doesn't exist, skipping", hostDirOrFile, filePath)
continue
}
- return nil, errors.Wrapf(err, "failed to stat %q", hostDir)
+ return nil, errors.Wrapf(err, "failed to stat %q", hostDirOrFile)
}
- ctrDirOnHost := filepath.Join(containerWorkingDir, ctrDir)
+ ctrDirOrFileOnHost := filepath.Join(containerWorkingDir, ctrDirOrFile)
- // In the event of a restart, don't want to copy secrets over again as they already would exist in ctrDirOnHost
- _, err = os.Stat(ctrDirOnHost)
+ // In the event of a restart, don't want to copy secrets over again as they already would exist in ctrDirOrFileOnHost
+ _, err = os.Stat(ctrDirOrFileOnHost)
if os.IsNotExist(err) {
- if err = os.MkdirAll(ctrDirOnHost, 0755); err != nil {
- return nil, errors.Wrapf(err, "making container directory %q failed", ctrDirOnHost)
- }
- hostDir, err = resolveSymbolicLink(hostDir)
+
+ hostDirOrFile, err = resolveSymbolicLink(hostDirOrFile)
if err != nil {
return nil, err
}
- data, err := getHostSecretData(hostDir)
- if err != nil {
- return nil, errors.Wrapf(err, "getting host secret data failed")
- }
- for _, s := range data {
- if err := s.saveTo(ctrDirOnHost); err != nil {
- return nil, errors.Wrapf(err, "error saving data to container filesystem on host %q", ctrDirOnHost)
+ switch mode := fileInfo.Mode(); {
+ case mode.IsDir():
+ if err = os.MkdirAll(ctrDirOrFileOnHost, 0755); err != nil {
+ return nil, errors.Wrapf(err, "making container directory %q failed", ctrDirOrFileOnHost)
+ }
+ data, err := getHostSecretData(hostDirOrFile)
+ if err != nil {
+ return nil, errors.Wrapf(err, "getting host secret data failed")
+ }
+ for _, s := range data {
+ if err := s.saveTo(ctrDirOrFileOnHost); err != nil {
+ return nil, errors.Wrapf(err, "error saving data to container filesystem on host %q", ctrDirOrFileOnHost)
+ }
+ }
+ case mode.IsRegular():
+ data, err := readFile("", hostDirOrFile)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading file %q", hostDirOrFile)
+
+ }
+ for _, s := range data {
+ if err := os.MkdirAll(filepath.Dir(ctrDirOrFileOnHost), 0700); err != nil {
+ return nil, err
+ }
+ if err := ioutil.WriteFile(ctrDirOrFileOnHost, s.data, 0700); err != nil {
+ return nil, errors.Wrapf(err, "error saving data to container filesystem on host %q", ctrDirOrFileOnHost)
+ }
}
+ default:
+ return nil, errors.Errorf("unsupported file type for: %q", hostDirOrFile)
}
- err = label.Relabel(ctrDirOnHost, mountLabel, false)
+ err = label.Relabel(ctrDirOrFileOnHost, mountLabel, false)
if err != nil {
return nil, errors.Wrap(err, "error applying correct labels")
}
if uid != 0 || gid != 0 {
- if err := rchown(ctrDirOnHost, uid, gid); err != nil {
+ if err := rchown(ctrDirOrFileOnHost, uid, gid); err != nil {
return nil, err
}
}
} else if err != nil {
- return nil, errors.Wrapf(err, "error getting status of %q", ctrDirOnHost)
+ return nil, errors.Wrapf(err, "error getting status of %q", ctrDirOrFileOnHost)
}
m := rspec.Mount{
- Source: filepath.Join(mountPrefix, ctrDir),
- Destination: ctrDir,
+ Source: filepath.Join(mountPrefix, ctrDirOrFile),
+ Destination: ctrDirOrFile,
Type: "bind",
Options: []string{"bind", "rprivate"},
}
diff --git a/vendor/github.com/containers/buildah/pkg/unshare/unshare.c b/vendor/github.com/containers/buildah/pkg/unshare/unshare.c
index 67a3e0e4d..fd0d48d43 100644
--- a/vendor/github.com/containers/buildah/pkg/unshare/unshare.c
+++ b/vendor/github.com/containers/buildah/pkg/unshare/unshare.c
@@ -3,7 +3,7 @@
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/syscall.h>
-#include <linux/memfd.h>
+#include <sys/mman.h>
#include <fcntl.h>
#include <grp.h>
#include <sched.h>
@@ -14,6 +14,17 @@
#include <errno.h>
#include <unistd.h>
+/* Open Source projects like conda-forge, want to package podman and are based
+ off of centos:6, Conda-force has minimal libc requirements and is lacking
+ the memfd.h file, so we use mmam.h
+*/
+#ifndef MFD_ALLOW_SEALING
+#define MFD_ALLOW_SEALING 2U
+#endif
+#ifndef MFD_CLOEXEC
+#define MFD_CLOEXEC 1U
+#endif
+
#ifndef F_LINUX_SPECIFIC_BASE
#define F_LINUX_SPECIFIC_BASE 1024
#endif
diff --git a/vendor/github.com/containers/buildah/pkg/unshare/unshare.go b/vendor/github.com/containers/buildah/pkg/unshare/unshare.go
index 33232740e..21b102cf5 100644
--- a/vendor/github.com/containers/buildah/pkg/unshare/unshare.go
+++ b/vendor/github.com/containers/buildah/pkg/unshare/unshare.go
@@ -64,6 +64,7 @@ func (c *Cmd) Start() error {
if os.Geteuid() != 0 {
c.Env = append(c.Env, "_CONTAINERS_USERNS_CONFIGURED=done")
c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%d", os.Geteuid()))
+ c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_GID=%d", os.Getegid()))
}
// Create the pipe for reading the child's PID.
@@ -183,6 +184,7 @@ func (c *Cmd) Start() error {
for _, m := range c.GidMappings {
fmt.Fprintf(g, "%d %d %d\n", m.ContainerID, m.HostID, m.Size)
}
+ gidmapSet := false
// Set the GID map.
if c.UseNewgidmap {
cmd := exec.Command("newgidmap", append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...)
@@ -190,11 +192,28 @@ func (c *Cmd) Start() error {
cmd.Stdout = g
cmd.Stderr = g
err := cmd.Run()
- if err != nil {
- fmt.Fprintf(continueWrite, "error running newgidmap: %v: %s", err, g.String())
- return errors.Wrapf(err, "error running newgidmap: %s", g.String())
+ if err == nil {
+ gidmapSet = true
+ } else {
+ logrus.Warnf("error running newgidmap: %v: %s", err, g.String())
+ logrus.Warnf("falling back to single mapping")
+ g.Reset()
+ g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid())))
+ }
+ }
+ if !gidmapSet {
+ if c.UseNewgidmap {
+ setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0)
+ if err != nil {
+ fmt.Fprintf(continueWrite, "error opening /proc/%s/setgroups: %v", pidString, err)
+ return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString)
+ }
+ defer setgroups.Close()
+ if _, err := fmt.Fprintf(setgroups, "deny"); err != nil {
+ fmt.Fprintf(continueWrite, "error writing 'deny' to /proc/%s/setgroups: %v", pidString, err)
+ return errors.Wrapf(err, "error writing 'deny' to /proc/%s/setgroups", pidString)
+ }
}
- } else {
gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
fmt.Fprintf(continueWrite, "error opening /proc/%s/gid_map: %v", pidString, err)
@@ -214,6 +233,7 @@ func (c *Cmd) Start() error {
for _, m := range c.UidMappings {
fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size)
}
+ uidmapSet := false
// Set the GID map.
if c.UseNewuidmap {
cmd := exec.Command("newuidmap", append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...)
@@ -221,11 +241,16 @@ func (c *Cmd) Start() error {
cmd.Stdout = u
cmd.Stderr = u
err := cmd.Run()
- if err != nil {
- fmt.Fprintf(continueWrite, "error running newuidmap: %v: %s", err, u.String())
- return errors.Wrapf(err, "error running newuidmap: %s", u.String())
+ if err == nil {
+ uidmapSet = true
+ } else {
+ logrus.Warnf("error running newuidmap: %v: %s", err, u.String())
+ logrus.Warnf("falling back to single mapping")
+ u.Reset()
+ u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid())))
}
- } else {
+ }
+ if !uidmapSet {
uidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/uid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
fmt.Fprintf(continueWrite, "error opening /proc/%s/uid_map: %v", pidString, err)
@@ -354,7 +379,9 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
// range in /etc/subuid and /etc/subgid file is a starting host
// ID and a range size.
uidmap, gidmap, err = GetSubIDMappings(me.Username, me.Username)
- bailOnError(err, "error reading allowed ID mappings")
+ if err != nil {
+ logrus.Warnf("error reading allowed ID mappings: %v", err)
+ }
if len(uidmap) == 0 {
logrus.Warnf("Found no UID ranges set aside for user %q in /etc/subuid.", me.Username)
}
diff --git a/vendor/github.com/containers/buildah/release.sh b/vendor/github.com/containers/buildah/release.sh
new file mode 100644
index 000000000..5c09941ba
--- /dev/null
+++ b/vendor/github.com/containers/buildah/release.sh
@@ -0,0 +1,73 @@
+#!/bin/sh
+#
+# Cut a buildah release. Usage:
+#
+# $ hack/release.sh <version> <next-version>
+#
+# For example:
+#
+# $ hack/release.sh 1.2.3 1.3.0
+#
+# for "I'm cutting 1.2.3, and want to use 1.3.0-dev for future work".
+
+VERSION="$1"
+NEXT_VERSION="$2"
+DATE=$(date '+%Y-%m-%d')
+LAST_TAG=$(git describe --tags --abbrev=0)
+
+write_go_version()
+{
+ LOCAL_VERSION="$1"
+ sed -i "s/^\(.*Version = \"\).*/\1${LOCAL_VERSION}\"/" buildah.go
+}
+
+write_spec_version()
+{
+ LOCAL_VERSION="$1"
+ sed -i "s/^\(Version: *\).*/\1${LOCAL_VERSION}/" contrib/rpm/buildah.spec
+}
+
+write_makefile_epoch()
+{
+ LOCAL_EPOCH="$1"
+ sed -i "s/^\(EPOCH_TEST_COMMIT ?= \).*/\1${LOCAL_EPOCH}/" Makefile
+}
+
+write_changelog()
+{
+ echo "- Changelog for v${VERSION} (${DATE})" >.changelog.txt &&
+ git log --no-merges --format=' * %s' "${LAST_TAG}..HEAD" >>.changelog.txt &&
+ echo >>.changelog.txt &&
+ cat changelog.txt >>.changelog.txt &&
+ mv -f .changelog.txt changelog.txt
+}
+
+release_commit()
+{
+ write_go_version "${VERSION}" &&
+ write_spec_version "${VERSION}" &&
+ write_changelog &&
+ git commit -asm "Bump to v${VERSION}"
+}
+
+dev_version_commit()
+{
+ write_go_version "${NEXT_VERSION}-dev" &&
+ write_spec_version "${NEXT_VERSION}" &&
+ git commit -asm "Bump to v${NEXT_VERSION}-dev"
+}
+
+epoch_commit()
+{
+ LOCAL_EPOCH="$1"
+ write_makefile_epoch "${LOCAL_EPOCH}" &&
+ git commit -asm 'Bump gitvalidation epoch'
+}
+
+git fetch origin &&
+git checkout -b "bump-${VERSION}" origin/master &&
+EPOCH=$(git rev-parse HEAD) &&
+release_commit &&
+git tag -s -m "version ${VERSION}" "v${VERSION}" &&
+dev_version_commit &&
+epoch_commit "${EPOCH}"
diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go
index 81ce2b944..0bf37da59 100644
--- a/vendor/github.com/containers/buildah/run_linux.go
+++ b/vendor/github.com/containers/buildah/run_linux.go
@@ -131,7 +131,8 @@ func (b *Builder) Run(command []string, options RunOptions) error {
return err
}
- if err := b.configureUIDGID(g, mountPoint, options); err != nil {
+ homeDir, err := b.configureUIDGID(g, mountPoint, options)
+ if err != nil {
return err
}
@@ -173,7 +174,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
bindFiles["/etc/hosts"] = hostFile
}
- if !contains(volumes, "/etc/resolv.conf") {
+ if !(contains(volumes, "/etc/resolv.conf") || (len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none")) {
resolvFile, err := b.addNetworkConfig(path, "/etc/resolv.conf", rootIDPair, b.CommonBuildOpts.DNSServers, b.CommonBuildOpts.DNSSearch, b.CommonBuildOpts.DNSOptions)
if err != nil {
return err
@@ -210,7 +211,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
}
err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, Package+"-"+filepath.Base(path))
case IsolationChroot:
- err = chroot.RunUsingChroot(spec, path, options.Stdin, options.Stdout, options.Stderr)
+ err = chroot.RunUsingChroot(spec, path, homeDir, options.Stdin, options.Stdout, options.Stderr)
case IsolationOCIRootless:
moreCreateArgs := []string{"--no-new-keyring"}
if options.NoPivot {
@@ -433,7 +434,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
// Add temporary copies of the contents of volume locations at the
// volume locations, unless we already have something there.
- copyWithTar := b.copyWithTar(nil, nil)
+ copyWithTar := b.copyWithTar(nil, nil, nil)
builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes, int(rootUID), int(rootGID))
if err != nil {
return err
@@ -1048,6 +1049,18 @@ func runConfigureNetwork(isolation Isolation, options RunOptions, configureNetwo
return teardown, nil
}
+func setNonblock(fd int, description string, nonblocking bool) error {
+ err := unix.SetNonblock(fd, nonblocking)
+ if err != nil {
+ if nonblocking {
+ logrus.Errorf("error setting %s to nonblocking: %v", description, err)
+ } else {
+ logrus.Errorf("error setting descriptor %s blocking: %v", description, err)
+ }
+ }
+ return err
+}
+
func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copyConsole bool, consoleListener *net.UnixListener, finishCopy []int, finishedCopy chan struct{}, spec *specs.Spec) {
defer func() {
unix.Close(finishCopy[0])
@@ -1115,14 +1128,22 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy
}
// Set our reading descriptors to non-blocking.
for rfd, wfd := range relayMap {
- if err := unix.SetNonblock(rfd, true); err != nil {
- logrus.Errorf("error setting %s to nonblocking: %v", readDesc[rfd], err)
+ if err := setNonblock(rfd, readDesc[rfd], true); err != nil {
return
}
- if err := unix.SetNonblock(wfd, false); err != nil {
- logrus.Errorf("error setting descriptor %d (%s) blocking: %v", wfd, writeDesc[wfd], err)
- }
+ setNonblock(wfd, writeDesc[wfd], false)
}
+
+ if copyPipes {
+ setNonblock(stdioPipe[unix.Stdin][1], writeDesc[stdioPipe[unix.Stdin][1]], true)
+ }
+
+ runCopyStdioPassData(stdio, copyPipes, stdioPipe, copyConsole, consoleListener, finishCopy, finishedCopy, spec, relayMap, relayBuffer, readDesc, writeDesc)
+}
+
+func runCopyStdioPassData(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copyConsole bool, consoleListener *net.UnixListener, finishCopy []int, finishedCopy chan struct{}, spec *specs.Spec, relayMap map[int]int, relayBuffer map[int]*bytes.Buffer, readDesc map[int]string, writeDesc map[int]string) {
+ closeStdin := false
+
// Pass data back and forth.
pollTimeout := -1
for len(relayMap) > 0 {
@@ -1154,12 +1175,6 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy
}
// If the POLLIN flag isn't set, then there's no data to be read from this descriptor.
if pollFd.Revents&unix.POLLIN == 0 {
- // If we're using pipes and it's our stdin and it's closed, close the writing
- // end of the corresponding pipe.
- if copyPipes && int(pollFd.Fd) == unix.Stdin && pollFd.Revents&unix.POLLHUP != 0 {
- unix.Close(stdioPipe[unix.Stdin][1])
- stdioPipe[unix.Stdin][1] = -1
- }
continue
}
// Read whatever there is to be read.
@@ -1174,10 +1189,8 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy
// using pipes, it's an EOF, so close the stdin
// pipe's writing end.
if n == 0 && copyPipes && int(pollFd.Fd) == unix.Stdin {
- unix.Close(stdioPipe[unix.Stdin][1])
- stdioPipe[unix.Stdin][1] = -1
- }
- if n > 0 {
+ removes[int(pollFd.Fd)] = struct{}{}
+ } else if n > 0 {
// Buffer the data in case we get blocked on where they need to go.
nwritten, err := relayBuffer[writeFD].Write(buf[:n])
if err != nil {
@@ -1221,6 +1234,11 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy
if n > 0 {
relayBuffer[writeFD].Next(n)
}
+ if closeStdin && writeFD == stdioPipe[unix.Stdin][1] && stdioPipe[unix.Stdin][1] >= 0 && relayBuffer[stdioPipe[unix.Stdin][1]].Len() == 0 {
+ logrus.Debugf("closing stdin")
+ unix.Close(stdioPipe[unix.Stdin][1])
+ stdioPipe[unix.Stdin][1] = -1
+ }
}
if relayBuffer[writeFD].Len() > 0 {
pollTimeout = 100
@@ -1228,6 +1246,14 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy
}
// Remove any descriptors which we don't need to poll any more from the poll descriptor list.
for remove := range removes {
+ if copyPipes && remove == unix.Stdin {
+ closeStdin = true
+ if relayBuffer[stdioPipe[unix.Stdin][1]].Len() == 0 {
+ logrus.Debugf("closing stdin")
+ unix.Close(stdioPipe[unix.Stdin][1])
+ stdioPipe[unix.Stdin][1] = -1
+ }
+ }
delete(relayMap, remove)
}
// If the we-can-return pipe had anything for us, we're done.
@@ -1452,9 +1478,20 @@ func setupNamespaces(g *generate.Generator, namespaceOptions NamespaceOptions, i
}
}
}
- if configureNetwork {
+ if configureNetwork && !unshare.IsRootless() {
for name, val := range util.DefaultNetworkSysctl {
- g.AddLinuxSysctl(name, val)
+ // Check that the sysctl we are adding is actually supported
+ // by the kernel
+ p := filepath.Join("/proc/sys", strings.Replace(name, ".", "/", -1))
+ _, err := os.Stat(p)
+ if err != nil && !os.IsNotExist(err) {
+ return false, nil, false, errors.Wrapf(err, "cannot stat %s", p)
+ }
+ if err == nil {
+ g.AddLinuxSysctl(name, val)
+ } else {
+ logrus.Warnf("ignoring sysctl %s since %s doesn't exist", name, p)
+ }
}
}
return configureNetwork, configureNetworks, configureUTS, nil
@@ -1552,6 +1589,15 @@ func (b *Builder) cleanupTempVolumes() {
func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, rootUID, rootGID int) (mounts []specs.Mount, Err error) {
+ // Make sure the overlay directory is clean before running
+ containerDir, err := b.store.ContainerDirectory(b.ContainerID)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error looking up container directory for %s", b.ContainerID)
+ }
+ if err := overlay.CleanupContent(containerDir); err != nil {
+ return nil, errors.Wrapf(err, "error cleaning up overlay content for %s", b.ContainerID)
+ }
+
parseMount := func(host, container string, options []string) (specs.Mount, error) {
var foundrw, foundro, foundz, foundZ, foundO bool
var rootProp string
@@ -1775,14 +1821,14 @@ func getDNSIP(dnsServers []string) (dns []net.IP, err error) {
return dns, nil
}
-func (b *Builder) configureUIDGID(g *generate.Generator, mountPoint string, options RunOptions) error {
+func (b *Builder) configureUIDGID(g *generate.Generator, mountPoint string, options RunOptions) (string, error) {
// Set the user UID/GID/supplemental group list/capabilities lists.
- user, err := b.user(mountPoint, options.User)
+ user, homeDir, err := b.user(mountPoint, options.User)
if err != nil {
- return err
+ return "", err
}
if err := setupCapabilities(g, b.AddCapabilities, b.DropCapabilities, options.AddCapabilities, options.DropCapabilities); err != nil {
- return err
+ return "", err
}
g.SetProcessUID(user.UID)
g.SetProcessGID(user.GID)
@@ -1797,7 +1843,7 @@ func (b *Builder) configureUIDGID(g *generate.Generator, mountPoint string, opti
g.Config.Process.Capabilities.Bounding = bounding
}
- return nil
+ return homeDir, nil
}
func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions) {
diff --git a/vendor/github.com/containers/buildah/selinux_tag.sh b/vendor/github.com/containers/buildah/selinux_tag.sh
new file mode 100644
index 000000000..ff80fda04
--- /dev/null
+++ b/vendor/github.com/containers/buildah/selinux_tag.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+if pkg-config libselinux 2> /dev/null ; then
+ echo selinux
+fi
diff --git a/vendor/github.com/containers/buildah/troubleshooting.md b/vendor/github.com/containers/buildah/troubleshooting.md
new file mode 100644
index 000000000..4b13bf317
--- /dev/null
+++ b/vendor/github.com/containers/buildah/troubleshooting.md
@@ -0,0 +1,110 @@
+![buildah logo](https://cdn.rawgit.com/containers/buildah/master/logos/buildah-logo_large.png)
+
+# Troubleshooting
+
+## A list of common issues and solutions for Buildah
+
+---
+### 1) No such image
+
+When doing a `buildah pull` or `buildah bud` command and a "common" image can not be pulled,
+it is likely that the `/etc/containers/registries.conf` file is either not installed or possibly
+misconfigured. This issue might also indicate that other required files as listed in the
+[Configuration Files](https://github.com/containers/buildah/blob/master/install.md#configuration-files)
+section of the Installation Instructions are also not installed.
+
+#### Symptom
+```console
+$ sudo buildah bud -f Dockerfile .
+STEP 1: FROM alpine
+error creating build container: 2 errors occurred:
+
+* Error determining manifest MIME type for docker://localhost/alpine:latest: pinging docker registry returned: Get https://localhost/v2/: dial tcp [::1]:443: connect: connection refused
+* Error determining manifest MIME type for docker://registry.access.redhat.com/alpine:latest: Error reading manifest latest in registry.access.redhat.com/alpine: unknown: Not Found
+error building: error creating build container: no such image "alpine" in registry: image not known
+```
+
+#### Solution
+
+ * Verify that the `/etc/containers/registries.conf` file exists. If not, verify that the containers-common package is installed.
+ * Verify that the entries in the `[registries.search]` section of the /etc/containers/registries file are valid and reachable.
+ * Verify that the image you requested is either fully qualified, or that it exists on one of your search registries.
+ * Verify that the image is public or that you have logged in to at least one search registry which contains the private image.
+ * Verify that the other required [Configuration Files](https://github.com/containers/buildah/blob/master/install.md#configuration-files) are installed.
+
+---
+### 2) http: server gave HTTP response to HTTPS client
+
+When doing a Buildah command such as `bud`, `commit`, `from`, or `push` to a registry,
+tls verification is turned on by default. If authentication is not used with
+those commands, this error can occur.
+
+#### Symptom
+```console
+# buildah push alpine docker://localhost:5000/myalpine:latest
+Getting image source signatures
+Get https://localhost:5000/v2/: http: server gave HTTP response to HTTPS client
+```
+
+#### Solution
+
+By default tls verification is turned on when communicating to registries from
+Buildah. If the registry does not require authentication the Buildah commands
+such as `bud`, `commit`, `from` and `pull` will fail unless tls verification is turned
+off using the `--tls-verify` option. **NOTE:** It is not at all recommended to
+communicate with a registry and not use tls verification.
+
+ * Turn off tls verification by passing false to the tls-verification option.
+ * I.e. `buildah push --tls-verify=false alpine docker://localhost:5000/myalpine:latest`
+
+---
+### 3) `buildah run` command fails with pipe or output redirection
+
+When doing a `buildah run` command while using a pipe ('|') or output redirection ('>>'),
+the command will fail, often times with a `command not found` type of error.
+
+#### Symptom
+When executing a `buildah run` command with a pipe or output redirection such as the
+following commands:
+
+```console
+# buildah run $whalecontainer /usr/games/fortune -a | cowsay
+# buildah run $newcontainer echo "daemon off;" >> /etc/nginx/nginx.conf
+# buildah run $newcontainer echo "nginx on Fedora" > /usr/share/nginx/html/index.html
+```
+the `buildah run` command will not complete and an error will be raised.
+
+#### Solution
+There are two solutions to this problem. The
+[`podman run`](https://github.com/containers/libpod/blob/master/docs/podman-run.1.md)
+command can be used in place of `buildah run`. To still use `buildah run`, surround
+the command with single quotes and use `bash -c`. The previous examples would be
+changed to:
+
+```console
+# buildah run bash -c '$whalecontainer /usr/games/fortune -a | cowsay'
+# buildah run bash -c '$newcontainer echo "daemon off;" >> /etc/nginx/nginx.conf'
+# buildah run bash -c '$newcontainer echo "nginx on Fedora" > /usr/share/nginx/html/index.html'
+```
+
+---
+### 4) `buildah push alpine oci:~/myalpine:latest` fails with lstat error
+
+When doing a `buildah push` command and the target image has a tilde (`~`) character
+in it, an lstat error will be raised stating there is no such file or directory.
+This is expected behavior for shell expansion of the tilde character as it is only
+expanded at the start of a word. This behavior is documented
+[here](https://www.gnu.org/software/libc/manual/html_node/Tilde-Expansion.html).
+
+#### Symptom
+```console
+$ sudo pull alpine
+$ sudo buildah push alpine oci:~/myalpine:latest
+lstat /home/myusername/~: no such file or directory
+```
+
+#### Solution
+
+ * Replace `~` with `$HOME` or the fully specified directory `/home/myusername`.
+ * `$ sudo buildah push alpine oci:${HOME}/myalpine:latest`
+---
diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go
index 08fb99706..ce21d2651 100644
--- a/vendor/github.com/containers/buildah/util.go
+++ b/vendor/github.com/containers/buildah/util.go
@@ -1,9 +1,12 @@
package buildah
import (
+ "archive/tar"
"io"
"os"
+ "path/filepath"
+ "github.com/containers/buildah/util"
"github.com/containers/image/docker/reference"
"github.com/containers/image/pkg/sysregistries"
"github.com/containers/image/pkg/sysregistriesv2"
@@ -12,7 +15,9 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/pools"
"github.com/containers/storage/pkg/reexec"
+ "github.com/containers/storage/pkg/system"
"github.com/opencontainers/image-spec/specs-go/v1"
rspec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux"
@@ -105,19 +110,108 @@ func convertRuntimeIDMaps(UIDMap, GIDMap []rspec.LinuxIDMapping) ([]idtools.IDMa
}
// copyFileWithTar returns a function which copies a single file from outside
-// of any container into our working container, mapping permissions using the
-// container's ID maps, possibly overridden using the passed-in chownOpts
-func (b *Builder) copyFileWithTar(chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error {
- convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
- return chrootarchive.CopyFileWithTarAndChown(chownOpts, hasher, convertedUIDMap, convertedGIDMap)
+// of any container, or another container, into our working container, mapping
+// read permissions using the passed-in ID maps, writing using the container's
+// ID mappings, possibly overridden using the passed-in chownOpts
+func (b *Builder) copyFileWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error {
+ if tarIDMappingOptions == nil {
+ tarIDMappingOptions = &IDMappingOptions{
+ HostUIDMapping: true,
+ HostGIDMapping: true,
+ }
+ }
+ return func(src, dest string) error {
+ logrus.Debugf("copyFileWithTar(%s, %s)", src, dest)
+ f, err := os.Open(src)
+ if err != nil {
+ return errors.Wrapf(err, "error opening %q to copy its contents", src)
+ }
+ defer func() {
+ if f != nil {
+ f.Close()
+ }
+ }()
+
+ sysfi, err := system.Lstat(src)
+ if err != nil {
+ return errors.Wrapf(err, "error reading attributes of %q", src)
+ }
+
+ hostUID := sysfi.UID()
+ hostGID := sysfi.GID()
+ containerUID, containerGID, err := util.GetContainerIDs(tarIDMappingOptions.UIDMap, tarIDMappingOptions.GIDMap, hostUID, hostGID)
+ if err != nil {
+ return errors.Wrapf(err, "error mapping owner IDs of %q: %d/%d", src, hostUID, hostGID)
+ }
+
+ fi, err := os.Lstat(src)
+ if err != nil {
+ return errors.Wrapf(err, "error reading attributes of %q", src)
+ }
+
+ hdr, err := tar.FileInfoHeader(fi, filepath.Base(src))
+ if err != nil {
+ return errors.Wrapf(err, "error generating tar header for: %q", src)
+ }
+ hdr.Name = filepath.Base(dest)
+ hdr.Uid = int(containerUID)
+ hdr.Gid = int(containerGID)
+
+ pipeReader, pipeWriter := io.Pipe()
+ writer := tar.NewWriter(pipeWriter)
+ var copyErr error
+ go func(srcFile *os.File) {
+ err := writer.WriteHeader(hdr)
+ if err != nil {
+ logrus.Debugf("error writing header for %s: %v", srcFile.Name(), err)
+ copyErr = err
+ }
+ n, err := pools.Copy(writer, srcFile)
+ if n != hdr.Size {
+ logrus.Debugf("expected to write %d bytes for %s, wrote %d instead", hdr.Size, srcFile.Name(), n)
+ }
+ if err != nil {
+ logrus.Debugf("error reading %s: %v", srcFile.Name(), err)
+ copyErr = err
+ }
+ if err = writer.Close(); err != nil {
+ logrus.Debugf("error closing write pipe for %s: %v", srcFile.Name(), err)
+ }
+ if err = srcFile.Close(); err != nil {
+ logrus.Debugf("error closing %s: %v", srcFile.Name(), err)
+ }
+ pipeWriter.Close()
+ pipeWriter = nil
+ return
+ }(f)
+
+ untar := b.untar(chownOpts, hasher)
+ err = untar(pipeReader, filepath.Dir(dest))
+ if err == nil {
+ err = copyErr
+ }
+ f = nil
+ if pipeWriter != nil {
+ pipeWriter.Close()
+ }
+ return err
+ }
}
// copyWithTar returns a function which copies a directory tree from outside of
-// any container into our working container, mapping permissions using the
-// container's ID maps, possibly overridden using the passed-in chownOpts
-func (b *Builder) copyWithTar(chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error {
- convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
- return chrootarchive.CopyWithTarAndChown(chownOpts, hasher, convertedUIDMap, convertedGIDMap)
+// our container or from another container, into our working container, mapping
+// permissions at read-time using the container's ID maps, with ownership at
+// write-time possibly overridden using the passed-in chownOpts
+func (b *Builder) copyWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error {
+ tar := b.tarPath(tarIDMappingOptions)
+ untar := b.untar(chownOpts, hasher)
+ return func(src, dest string) error {
+ rc, err := tar(src)
+ if err != nil {
+ return errors.Wrapf(err, "error archiving %q for copy", src)
+ }
+ return untar(rc, dest)
+ }
}
// untarPath returns a function which extracts an archive in a specified
@@ -128,12 +222,58 @@ func (b *Builder) untarPath(chownOpts *idtools.IDPair, hasher io.Writer) func(sr
return chrootarchive.UntarPathAndChown(chownOpts, hasher, convertedUIDMap, convertedGIDMap)
}
-// tarPath returns a function which creates an archive of a specified
+// tarPath returns a function which creates an archive of a specified location,
+// which is often somewhere in the container's filesystem, mapping permissions
+// using the container's ID maps, or the passed-in maps if specified
+func (b *Builder) tarPath(idMappingOptions *IDMappingOptions) func(path string) (io.ReadCloser, error) {
+ var uidmap, gidmap []idtools.IDMap
+ if idMappingOptions == nil {
+ idMappingOptions = &IDMappingOptions{
+ HostUIDMapping: true,
+ HostGIDMapping: true,
+ }
+ }
+ convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(idMappingOptions.UIDMap, idMappingOptions.GIDMap)
+ tarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap)
+ uidmap = tarMappings.UIDs()
+ gidmap = tarMappings.GIDs()
+ options := &archive.TarOptions{
+ Compression: archive.Uncompressed,
+ UIDMaps: uidmap,
+ GIDMaps: gidmap,
+ }
+ return func(path string) (io.ReadCloser, error) {
+ return archive.TarWithOptions(path, options)
+ }
+}
+
+// untar returns a function which extracts an archive stream to a specified
// location in the container's filesystem, mapping permissions using the
-// container's ID maps
-func (b *Builder) tarPath() func(path string) (io.ReadCloser, error) {
+// container's ID maps, possibly overridden using the passed-in chownOpts
+func (b *Builder) untar(chownOpts *idtools.IDPair, hasher io.Writer) func(tarArchive io.ReadCloser, dest string) error {
convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
- return archive.TarPath(convertedUIDMap, convertedGIDMap)
+ untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap)
+ options := &archive.TarOptions{
+ UIDMaps: untarMappings.UIDs(),
+ GIDMaps: untarMappings.GIDs(),
+ ChownOpts: chownOpts,
+ }
+ untar := chrootarchive.Untar
+ if hasher != nil {
+ originalUntar := untar
+ untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
+ return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
+ }
+ }
+ return func(tarArchive io.ReadCloser, dest string) error {
+ err := untar(tarArchive, dest, options)
+ if err2 := tarArchive.Close(); err2 != nil {
+ if err == nil {
+ err = err2
+ }
+ }
+ return err
+ }
}
// isRegistryBlocked checks if the named registry is marked as blocked
diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go
index 30afe8313..4736d7b77 100644
--- a/vendor/github.com/containers/buildah/util/util.go
+++ b/vendor/github.com/containers/buildah/util/util.go
@@ -106,13 +106,19 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
// Figure out the list of registries.
var registries []string
- searchRegistries, err := sysregistriesv2.FindUnqualifiedSearchRegistries(sc)
+ searchRegistries, err := sysregistriesv2.UnqualifiedSearchRegistries(sc)
if err != nil {
logrus.Debugf("unable to read configured registries to complete %q: %v", name, err)
+ searchRegistries = nil
}
for _, registry := range searchRegistries {
- if !registry.Blocked {
- registries = append(registries, registry.Location)
+ reg, err := sysregistriesv2.FindRegistry(sc, registry)
+ if err != nil {
+ logrus.Debugf("unable to read registry configuraitno for %#v: %v", registry, err)
+ continue
+ }
+ if reg == nil || !reg.Blocked {
+ registries = append(registries, registry)
}
}
searchRegistriesAreEmpty := len(registries) == 0
@@ -257,6 +263,36 @@ func StringInSlice(s string, slice []string) bool {
return false
}
+// GetContainerIDs uses ID mappings to compute the container-level IDs that will
+// correspond to a UID/GID pair on the host.
+func GetContainerIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32, uint32, error) {
+ uidMapped := true
+ for _, m := range uidmap {
+ uidMapped = false
+ if uid >= m.HostID && uid < m.HostID+m.Size {
+ uid = (uid - m.HostID) + m.ContainerID
+ uidMapped = true
+ break
+ }
+ }
+ if !uidMapped {
+ return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid)
+ }
+ gidMapped := true
+ for _, m := range gidmap {
+ gidMapped = false
+ if gid >= m.HostID && gid < m.HostID+m.Size {
+ gid = (gid - m.HostID) + m.ContainerID
+ gidMapped = true
+ break
+ }
+ }
+ if !gidMapped {
+ return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid)
+ }
+ return uid, gid, nil
+}
+
// GetHostIDs uses ID mappings to compute the host-level IDs that will
// correspond to a UID/GID pair in the container.
func GetHostIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32, uint32, error) {
@@ -270,7 +306,7 @@ func GetHostIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32,
}
}
if !uidMapped {
- return 0, 0, errors.Errorf("container uses ID mappings, but doesn't map UID %d", uid)
+ return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid)
}
gidMapped := true
for _, m := range gidmap {
@@ -282,7 +318,7 @@ func GetHostIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32,
}
}
if !gidMapped {
- return 0, 0, errors.Errorf("container uses ID mappings, but doesn't map GID %d", gid)
+ return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid)
}
return uid, gid, nil
}
diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf
index 0c982626a..88148947a 100644
--- a/vendor/github.com/containers/buildah/vendor.conf
+++ b/vendor/github.com/containers/buildah/vendor.conf
@@ -3,12 +3,12 @@ github.com/blang/semver v3.5.0
github.com/BurntSushi/toml v0.2.0
github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
github.com/containernetworking/cni v0.7.0-rc2
-github.com/containers/image 9467ac9cfd92c545aa389f22f27e552de053c0f2
+github.com/containers/image v2.0.0
github.com/cyphar/filepath-securejoin v0.2.1
github.com/vbauerster/mpb v3.3.4
github.com/mattn/go-isatty v0.0.4
github.com/VividCortex/ewma v1.1.1
-github.com/containers/storage v1.12.7
+github.com/containers/storage v1.12.10
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
github.com/docker/docker 54dddadc7d5d89fe0be88f76979f6f6ab0dede83
github.com/docker/docker-credential-helpers v0.6.1
diff --git a/vendor/github.com/containers/image/README.md b/vendor/github.com/containers/image/README.md
deleted file mode 100644
index 571e8342e..000000000
--- a/vendor/github.com/containers/image/README.md
+++ /dev/null
@@ -1,83 +0,0 @@
-[![GoDoc](https://godoc.org/github.com/containers/image?status.svg)](https://godoc.org/github.com/containers/image) [![Build Status](https://travis-ci.org/containers/image.svg?branch=master)](https://travis-ci.org/containers/image)
-=
-
-`image` is a set of Go libraries aimed at working in various way with
-containers' images and container image registries.
-
-The containers/image library allows application to pull and push images from
-container image registries, like the upstream docker registry. It also
-implements "simple image signing".
-
-The containers/image library also allows you to inspect a repository on a
-container registry without pulling down the image. This means it fetches the
-repository's manifest and it is able to show you a `docker inspect`-like json
-output about a whole repository or a tag. This library, in contrast to `docker
-inspect`, helps you gather useful information about a repository or a tag
-without requiring you to run `docker pull`.
-
-The containers/image library also allows you to translate from one image format
-to another, for example docker container images to OCI images. It also allows
-you to copy container images between various registries, possibly converting
-them as necessary, and to sign and verify images.
-
-## Command-line usage
-
-The containers/image project is only a library with no user interface;
-you can either incorporate it into your Go programs, or use the `skopeo` tool:
-
-The [skopeo](https://github.com/containers/skopeo) tool uses the
-containers/image library and takes advantage of many of its features,
-e.g. `skopeo copy` exposes the `containers/image/copy.Image` functionality.
-
-## Dependencies
-
-This library does not ship a committed version of its dependencies in a `vendor`
-subdirectory. This is so you can make well-informed decisions about which
-libraries you should use with this package in your own projects, and because
-types defined in the `vendor` directory would be impossible to use from your projects.
-
-What this project tests against dependencies-wise is located
-[in vendor.conf](https://github.com/containers/image/blob/master/vendor.conf).
-
-## Building
-
-If you want to see what the library can do, or an example of how it is called,
-consider starting with the [skopeo](https://github.com/containers/skopeo) tool
-instead.
-
-To integrate this library into your project, put it into `$GOPATH` or use
-your preferred vendoring tool to include a copy in your project.
-Ensure that the dependencies documented [in vendor.conf](https://github.com/containers/image/blob/master/vendor.conf)
-are also available
-(using those exact versions or different versions of your choosing).
-
-This library, by default, also depends on the GpgME and libostree C libraries. Either install them:
-```sh
-Fedora$ dnf install gpgme-devel libassuan-devel ostree-devel
-macOS$ brew install gpgme
-```
-or use the build tags described below to avoid the dependencies (e.g. using `go build -tags …`)
-
-### Supported build tags
-
-- `containers_image_openpgp`: Use a Golang-only OpenPGP implementation for signature verification instead of the default cgo/gpgme-based implementation;
-the primary downside is that creating new signatures with the Golang-only implementation is not supported.
-- `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries. The `github.com/containers/image/ostree` package is completely disabled
-and impossible to import when this build tag is in use.
-
-## [Contributing](CONTRIBUTING.md)
-
-Information about contributing to this project.
-
-When developing this library, please use `make` (or `make … BUILDTAGS=…`) to take advantage of the tests and validation.
-
-## License
-
-Apache License 2.0
-
-SPDX-License-Identifier: Apache-2.0
-
-## Contact
-
-- Mailing list: [containers-dev](https://groups.google.com/forum/?hl=en#!forum/containers-dev)
-- IRC: #[container-projects](irc://irc.freenode.net:6667/#container-projects) on freenode.net
diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go
index 3ed8a2b82..f1b029f97 100644
--- a/vendor/github.com/containers/image/copy/copy.go
+++ b/vendor/github.com/containers/image/copy/copy.go
@@ -597,15 +597,32 @@ func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind
prefix = prefix[:maxPrefixLen]
}
- bar := pool.AddBar(info.Size,
- mpb.BarClearOnComplete(),
- mpb.PrependDecorators(
- decor.Name(prefix),
- ),
- mpb.AppendDecorators(
- decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), " "+onComplete),
- ),
- )
+ // Use a normal progress bar when we know the size (i.e., size > 0).
+ // Otherwise, use a spinner to indicate that something's happening.
+ var bar *mpb.Bar
+ if info.Size > 0 {
+ bar = pool.AddBar(info.Size,
+ mpb.BarClearOnComplete(),
+ mpb.PrependDecorators(
+ decor.Name(prefix),
+ ),
+ mpb.AppendDecorators(
+ decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), " "+onComplete),
+ ),
+ )
+ } else {
+ bar = pool.AddSpinner(info.Size,
+ mpb.SpinnerOnLeft,
+ mpb.BarClearOnComplete(),
+ mpb.SpinnerStyle([]string{".", "..", "...", "....", ""}),
+ mpb.PrependDecorators(
+ decor.Name(prefix),
+ ),
+ mpb.AppendDecorators(
+ decor.OnComplete(decor.Name(""), " "+onComplete),
+ ),
+ )
+ }
if c.progressOutput == ioutil.Discard {
c.Printf("Copying %s %s\n", kind, info.Digest)
}
diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go
index c8fdb407c..6951f31e9 100644
--- a/vendor/github.com/containers/image/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/docker/docker_image_src.go
@@ -29,44 +29,16 @@ type dockerImageSource struct {
cachedManifestMIMEType string // Only valid if cachedManifest != nil
}
-// newImageSource creates a new `ImageSource` for the specified image reference
-// `ref`.
-//
-// The following steps will be done during the instance creation:
-//
-// - Lookup the registry within the configured location in
-// `sys.SystemRegistriesConfPath`. If there is no configured registry available,
-// we fallback to the provided docker reference `ref`.
-//
-// - References which contain a configured prefix will be automatically rewritten
-// to the correct target reference. For example, if the configured
-// `prefix = "example.com/foo"`, `location = "example.com"` and the image will be
-// pulled from the ref `example.com/foo/image`, then the resulting pull will
-// effectively point to `example.com/image`.
-//
-// - If the rewritten reference succeeds, it will be used as the `dockerRef`
-// in the client. If the rewrite fails, the function immediately returns an error.
-//
-// - Each mirror will be used (in the configured order) to test the
-// availability of the image manifest on the remote location. For example,
-// if the manifest is not reachable due to connectivity issues, then the next
-// mirror will be tested instead. If no mirror is configured or contains the
-// target manifest, then the initial `ref` will be tested as fallback. The
-// creation of the new `dockerImageSource` only succeeds if a remote
-// location with the available manifest was found.
-//
-// A cleanup call to `.Close()` is needed if the caller is done using the returned
-// `ImageSource`.
+// newImageSource creates a new ImageSource for the specified image reference.
+// The caller must call .Close() on the returned ImageSource.
func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name())
if err != nil {
return nil, errors.Wrapf(err, "error loading registries configuration")
}
-
if registry == nil {
- // No configuration was found for the provided reference, so we create
- // a fallback registry by hand to make the client creation below work
- // as intended.
+ // No configuration was found for the provided reference, so use the
+ // equivalent of a default configuration.
registry = &sysregistriesv2.Registry{
Endpoint: sysregistriesv2.Endpoint{
Location: ref.ref.String(),
@@ -76,18 +48,19 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
}
primaryDomain := reference.Domain(ref.ref)
- // Found the registry within the sysregistriesv2 configuration. Now we test
- // all endpoints for the manifest availability. If a working image source
- // was found, it will be used for all future pull actions.
+ // Check all endpoints for the manifest availability. If we find one that does
+ // contain the image, it will be used for all future pull actions. Always try the
+ // non-mirror original location last; this both transparently handles the case
+ // of no mirrors configured, and ensures we return the error encountered when
+ // acessing the upstream location if all endpoints fail.
manifestLoadErr := errors.New("Internal error: newImageSource returned without trying any endpoint")
- for _, endpoint := range append(registry.Mirrors, registry.Endpoint) {
- logrus.Debugf("Trying to pull %q from endpoint %q", ref.ref, endpoint.Location)
-
- newRef, err := endpoint.RewriteReference(ref.ref, registry.Prefix)
- if err != nil {
- return nil, err
- }
- dockerRef, err := newReference(newRef)
+ pullSources, err := registry.PullSourcesFromReference(ref.ref)
+ if err != nil {
+ return nil, err
+ }
+ for _, pullSource := range pullSources {
+ logrus.Debugf("Trying to pull %q", pullSource.Reference)
+ dockerRef, err := newReference(pullSource.Reference)
if err != nil {
return nil, err
}
@@ -104,7 +77,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
if err != nil {
return nil, err
}
- client.tlsClientConfig.InsecureSkipVerify = endpoint.Insecure
+ client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure
testImageSource := &dockerImageSource{
ref: dockerRef,
@@ -165,8 +138,9 @@ func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *dig
func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) {
path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest)
- headers := make(map[string][]string)
- headers["Accept"] = manifest.DefaultRequestedManifestMIMETypes
+ headers := map[string][]string{
+ "Accept": manifest.DefaultRequestedManifestMIMETypes,
+ }
res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil)
if err != nil {
return nil, "", err
@@ -408,11 +382,9 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
return err
}
- // When retrieving the digest from a registry >= 2.3 use the following header:
- // "Accept": "application/vnd.docker.distribution.manifest.v2+json"
- headers := make(map[string][]string)
- headers["Accept"] = []string{manifest.DockerV2Schema2MediaType}
-
+ headers := map[string][]string{
+ "Accept": manifest.DefaultRequestedManifestMIMETypes,
+ }
refTail, err := ref.tagOrDigest()
if err != nil {
return err
diff --git a/vendor/github.com/containers/image/docker/reference/README.md b/vendor/github.com/containers/image/docker/reference/README.md
index 53a88de82..3c4d74eb4 100644
--- a/vendor/github.com/containers/image/docker/reference/README.md
+++ b/vendor/github.com/containers/image/docker/reference/README.md
@@ -1,2 +1,2 @@
-This is a copy of github.com/docker/distribution/reference as of commit fb0bebc4b64e3881cc52a2478d749845ed76d2a8,
+This is a copy of github.com/docker/distribution/reference as of commit 3226863cbcba6dbc2f6c83a37b28126c934af3f8,
except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset. \ No newline at end of file
diff --git a/vendor/github.com/containers/image/docker/reference/normalize.go b/vendor/github.com/containers/image/docker/reference/normalize.go
index fcc436a39..6a86ec64f 100644
--- a/vendor/github.com/containers/image/docker/reference/normalize.go
+++ b/vendor/github.com/containers/image/docker/reference/normalize.go
@@ -55,6 +55,35 @@ func ParseNormalizedNamed(s string) (Named, error) {
return named, nil
}
+// ParseDockerRef normalizes the image reference following the docker convention. This is added
+// mainly for backward compatibility.
+// The reference returned can only be either tagged or digested. For reference contains both tag
+// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@
+// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as
+// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa.
+func ParseDockerRef(ref string) (Named, error) {
+ named, err := ParseNormalizedNamed(ref)
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := named.(NamedTagged); ok {
+ if canonical, ok := named.(Canonical); ok {
+ // The reference is both tagged and digested, only
+ // return digested.
+ newNamed, err := WithName(canonical.Name())
+ if err != nil {
+ return nil, err
+ }
+ newCanonical, err := WithDigest(newNamed, canonical.Digest())
+ if err != nil {
+ return nil, err
+ }
+ return newCanonical, nil
+ }
+ }
+ return TagNameOnly(named), nil
+}
+
// splitDockerDomain splits a repository name to domain and remotename string.
// If no valid domain is found, the default domain is used. Repository name
// needs to be already validated before.
diff --git a/vendor/github.com/containers/image/docker/reference/reference.go b/vendor/github.com/containers/image/docker/reference/reference.go
index fd3510e9e..8c0c23b2f 100644
--- a/vendor/github.com/containers/image/docker/reference/reference.go
+++ b/vendor/github.com/containers/image/docker/reference/reference.go
@@ -15,7 +15,7 @@
// tag := /[\w][\w.-]{0,127}/
//
// digest := digest-algorithm ":" digest-hex
-// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]
+// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
// digest-algorithm-separator := /[+.-_]/
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
@@ -205,7 +205,7 @@ func Parse(s string) (Reference, error) {
var repo repository
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
- if nameMatch != nil && len(nameMatch) == 3 {
+ if len(nameMatch) == 3 {
repo.domain = nameMatch[1]
repo.path = nameMatch[2]
} else {
diff --git a/vendor/github.com/containers/image/docker/reference/regexp.go b/vendor/github.com/containers/image/docker/reference/regexp.go
index 405e995db..786034932 100644
--- a/vendor/github.com/containers/image/docker/reference/regexp.go
+++ b/vendor/github.com/containers/image/docker/reference/regexp.go
@@ -20,15 +20,15 @@ var (
optional(repeated(separatorRegexp, alphaNumericRegexp)))
// domainComponentRegexp restricts the registry domain component of a
- // repository name to start with a component as defined by domainRegexp
+ // repository name to start with a component as defined by DomainRegexp
// and followed by an optional port.
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
- // domainRegexp defines the structure of potential domain components
+ // DomainRegexp defines the structure of potential domain components
// that may be part of image names. This is purposely a subset of what is
// allowed by DNS to ensure backwards compatibility with Docker image
// names.
- domainRegexp = expression(
+ DomainRegexp = expression(
domainComponentRegexp,
optional(repeated(literal(`.`), domainComponentRegexp)),
optional(literal(`:`), match(`[0-9]+`)))
@@ -51,14 +51,14 @@ var (
// regexp has capturing groups for the domain and name part omitting
// the separating forward slash from either.
NameRegexp = expression(
- optional(domainRegexp, literal(`/`)),
+ optional(DomainRegexp, literal(`/`)),
nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp)))
// anchoredNameRegexp is used to parse a name value, capturing the
// domain and trailing components.
anchoredNameRegexp = anchored(
- optional(capture(domainRegexp), literal(`/`)),
+ optional(capture(DomainRegexp), literal(`/`)),
capture(nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp))))
diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go
index 03735f8a4..dd5d78fe8 100644
--- a/vendor/github.com/containers/image/docker/tarfile/src.go
+++ b/vendor/github.com/containers/image/docker/tarfile/src.go
@@ -15,24 +15,24 @@ import (
"github.com/containers/image/manifest"
"github.com/containers/image/pkg/compression"
"github.com/containers/image/types"
- "github.com/opencontainers/go-digest"
+ digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
// Source is a partial implementation of types.ImageSource for reading from tarPath.
type Source struct {
tarPath string
- removeTarPathOnClose bool // Remove temp file on close if true
- cacheDataLock sync.Once // Atomic way to ensure that ensureCachedDataIsPresent is only invoked once
+ removeTarPathOnClose bool // Remove temp file on close if true
// The following data is only available after ensureCachedDataIsPresent() succeeds
- cacheDataResult error // The return value of ensureCachedDataIsPresent, since it should be as safe to cache as the side effects
tarManifest *ManifestItem // nil if not available yet.
configBytes []byte
configDigest digest.Digest
orderedDiffIDList []digest.Digest
knownLayers map[digest.Digest]*layerInfo
// Other state
- generatedManifest []byte // Private cache for GetManifest(), nil if not set yet.
+ generatedManifest []byte // Private cache for GetManifest(), nil if not set yet.
+ cacheDataLock sync.Once // Private state for ensureCachedDataIsPresent to make it concurrency-safe
+ cacheDataResult error // Private state for ensureCachedDataIsPresent
}
type layerInfo struct {
@@ -201,47 +201,50 @@ func (s *Source) readTarComponent(path string) ([]byte, error) {
}
// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
+// It is safe to call this from multi-threaded code.
func (s *Source) ensureCachedDataIsPresent() error {
s.cacheDataLock.Do(func() {
- // Read and parse manifest.json
- tarManifest, err := s.loadTarManifest()
- if err != nil {
- s.cacheDataResult = err
- return
- }
+ s.cacheDataResult = s.ensureCachedDataIsPresentPrivate()
+ })
+ return s.cacheDataResult
+}
- // Check to make sure length is 1
- if len(tarManifest) != 1 {
- s.cacheDataResult = errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest))
- return
- }
+// ensureCachedDataIsPresentPrivate is a private implementation detail of ensureCachedDataIsPresent.
+// Call ensureCachedDataIsPresent instead.
+func (s *Source) ensureCachedDataIsPresentPrivate() error {
+ // Read and parse manifest.json
+ tarManifest, err := s.loadTarManifest()
+ if err != nil {
+ return err
+ }
- // Read and parse config.
- configBytes, err := s.readTarComponent(tarManifest[0].Config)
- if err != nil {
- s.cacheDataResult = err
- return
- }
- var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
- if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
- s.cacheDataResult = errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config)
- return
- }
+ // Check to make sure length is 1
+ if len(tarManifest) != 1 {
+ return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest))
+ }
- knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig)
- if err != nil {
- s.cacheDataResult = err
- return
- }
+ // Read and parse config.
+ configBytes, err := s.readTarComponent(tarManifest[0].Config)
+ if err != nil {
+ return err
+ }
+ var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
+ if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
+ return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config)
+ }
- // Success; commit.
- s.tarManifest = &tarManifest[0]
- s.configBytes = configBytes
- s.configDigest = digest.FromBytes(configBytes)
- s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
- s.knownLayers = knownLayers
- })
- return s.cacheDataResult
+ knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig)
+ if err != nil {
+ return err
+ }
+
+ // Success; commit.
+ s.tarManifest = &tarManifest[0]
+ s.configBytes = configBytes
+ s.configDigest = digest.FromBytes(configBytes)
+ s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
+ s.knownLayers = knownLayers
+ return nil
}
// loadTarManifest loads and decodes the manifest.json.
diff --git a/vendor/github.com/containers/image/manifest/docker_schema1.go b/vendor/github.com/containers/image/manifest/docker_schema1.go
index 1ba65d5be..b76286b8f 100644
--- a/vendor/github.com/containers/image/manifest/docker_schema1.go
+++ b/vendor/github.com/containers/image/manifest/docker_schema1.go
@@ -226,6 +226,7 @@ func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageI
}
if s1.Config != nil {
i.Labels = s1.Config.Labels
+ i.Env = s1.Config.Env
}
return i, nil
}
diff --git a/vendor/github.com/containers/image/manifest/docker_schema2.go b/vendor/github.com/containers/image/manifest/docker_schema2.go
index 0671aed9f..76a80e5a6 100644
--- a/vendor/github.com/containers/image/manifest/docker_schema2.go
+++ b/vendor/github.com/containers/image/manifest/docker_schema2.go
@@ -241,6 +241,7 @@ func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*t
}
if s2.Config != nil {
i.Labels = s2.Config.Labels
+ i.Env = s2.Config.Env
}
return i, nil
}
diff --git a/vendor/github.com/containers/image/manifest/oci.go b/vendor/github.com/containers/image/manifest/oci.go
index 91705045b..dd65e0ba2 100644
--- a/vendor/github.com/containers/image/manifest/oci.go
+++ b/vendor/github.com/containers/image/manifest/oci.go
@@ -116,6 +116,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
Architecture: v1.Architecture,
Os: v1.OS,
Layers: layerInfosToStrings(m.LayerInfos()),
+ Env: d1.Config.Env,
}
return i, nil
}
diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go
index d69f4fa33..06a905aed 100644
--- a/vendor/github.com/containers/image/ostree/ostree_dest.go
+++ b/vendor/github.com/containers/image/ostree/ostree_dest.go
@@ -1,4 +1,4 @@
-// +build !containers_image_ostree_stub
+// +build containers_image_ostree
package ostree
@@ -218,7 +218,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user
defer C.free(unsafe.Pointer(fullpathC))
res, err = C.lsetfilecon_raw(fullpathC, context)
if int(res) < 0 {
- return errors.Wrapf(err, "cannot setfilecon_raw %s", fullpath)
+ return errors.Wrapf(err, "cannot setfilecon_raw %s to %s", fullpath, C.GoString(context))
}
}
}
diff --git a/vendor/github.com/containers/image/ostree/ostree_src.go b/vendor/github.com/containers/image/ostree/ostree_src.go
index 35d852139..dc52ccb6e 100644
--- a/vendor/github.com/containers/image/ostree/ostree_src.go
+++ b/vendor/github.com/containers/image/ostree/ostree_src.go
@@ -1,4 +1,4 @@
-// +build !containers_image_ostree_stub
+// +build containers_image_ostree
package ostree
diff --git a/vendor/github.com/containers/image/ostree/ostree_transport.go b/vendor/github.com/containers/image/ostree/ostree_transport.go
index c9856530b..2e86623ac 100644
--- a/vendor/github.com/containers/image/ostree/ostree_transport.go
+++ b/vendor/github.com/containers/image/ostree/ostree_transport.go
@@ -1,4 +1,4 @@
-// +build !containers_image_ostree_stub
+// +build containers_image_ostree
package ostree
diff --git a/vendor/github.com/containers/image/pkg/docker/config/config.go b/vendor/github.com/containers/image/pkg/docker/config/config.go
index 57b548e26..2e6bb378f 100644
--- a/vendor/github.com/containers/image/pkg/docker/config/config.go
+++ b/vendor/github.com/containers/image/pkg/docker/config/config.go
@@ -56,6 +56,7 @@ func SetAuthentication(sys *types.SystemContext, registry, username, password st
// If an entry is not found empty strings are returned for the username and password
func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) {
if sys != nil && sys.DockerAuthConfig != nil {
+ logrus.Debug("Returning credentials from DockerAuthConfig")
return sys.DockerAuthConfig.Username, sys.DockerAuthConfig.Password, nil
}
@@ -76,12 +77,15 @@ func GetAuthentication(sys *types.SystemContext, registry string) (string, strin
legacyFormat := path == dockerLegacyPath
username, password, err := findAuthentication(registry, path, legacyFormat)
if err != nil {
+ logrus.Debugf("Credentials not found")
return "", "", err
}
if username != "" && password != "" {
+ logrus.Debugf("Returning credentials from %s", path)
return username, password, nil
}
}
+ logrus.Debugf("Credentials not found")
return "", "", nil
}
diff --git a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
index 99ae65774..0c13913ed 100644
--- a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
+++ b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
@@ -5,6 +5,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
+ "regexp"
"strings"
"sync"
@@ -29,16 +30,16 @@ const builtinRegistriesConfPath = "/etc/containers/registries.conf"
// Endpoint describes a remote location of a registry.
type Endpoint struct {
// The endpoint's remote location.
- Location string `toml:"location"`
+ Location string `toml:"location,omitempty"`
// If true, certs verification will be skipped and HTTP (non-TLS)
// connections will be allowed.
- Insecure bool `toml:"insecure"`
+ Insecure bool `toml:"insecure,omitempty"`
}
-// RewriteReference will substitute the provided reference `prefix` to the
+// rewriteReference will substitute the provided reference `prefix` to the
// endpoints `location` from the `ref` and creates a new named reference from it.
// The function errors if the newly created reference is not parsable.
-func (e *Endpoint) RewriteReference(ref reference.Named, prefix string) (reference.Named, error) {
+func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (reference.Named, error) {
refString := ref.String()
if !refMatchesPrefix(refString, prefix) {
return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString)
@@ -55,20 +56,57 @@ func (e *Endpoint) RewriteReference(ref reference.Named, prefix string) (referen
// Registry represents a registry.
type Registry struct {
- // A registry is an Endpoint too
- Endpoint
- // The registry's mirrors.
- Mirrors []Endpoint `toml:"mirror"`
- // If true, pulling from the registry will be blocked.
- Blocked bool `toml:"blocked"`
- // If true, the registry can be used when pulling an unqualified image.
- Search bool `toml:"unqualified-search"`
// Prefix is used for matching images, and to translate one namespace to
// another. If `Prefix="example.com/bar"`, `location="example.com/foo/bar"`
// and we pull from "example.com/bar/myimage:latest", the image will
// effectively be pulled from "example.com/foo/bar/myimage:latest".
// If no Prefix is specified, it defaults to the specified location.
Prefix string `toml:"prefix"`
+ // A registry is an Endpoint too
+ Endpoint
+ // The registry's mirrors.
+ Mirrors []Endpoint `toml:"mirror,omitempty"`
+ // If true, pulling from the registry will be blocked.
+ Blocked bool `toml:"blocked,omitempty"`
+ // If true, mirrors will only be used for digest pulls. Pulling images by
+ // tag can potentially yield different images, depending on which endpoint
+ // we pull from. Forcing digest-pulls for mirrors avoids that issue.
+ MirrorByDigestOnly bool `toml:"mirror-by-digest-only,omitempty"`
+}
+
+// PullSource consists of an Endpoint and a Reference. Note that the reference is
+// rewritten according to the registries prefix and the Endpoint's location.
+type PullSource struct {
+ Endpoint Endpoint
+ Reference reference.Named
+}
+
+// PullSourcesFromReference returns a slice of PullSource's based on the passed
+// reference.
+func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) {
+ var endpoints []Endpoint
+
+ if r.MirrorByDigestOnly {
+ // Only use mirrors when the reference is a digest one.
+ if _, isDigested := ref.(reference.Canonical); isDigested {
+ endpoints = append(r.Mirrors, r.Endpoint)
+ } else {
+ endpoints = []Endpoint{r.Endpoint}
+ }
+ } else {
+ endpoints = append(r.Mirrors, r.Endpoint)
+ }
+
+ sources := []PullSource{}
+ for _, ep := range endpoints {
+ rewritten, err := ep.rewriteReference(ref, r.Prefix)
+ if err != nil {
+ return nil, err
+ }
+ sources = append(sources, PullSource{Endpoint: ep, Reference: rewritten})
+ }
+
+ return sources, nil
}
// V1TOMLregistries is for backwards compatibility to sysregistries v1
@@ -83,11 +121,35 @@ type V1TOMLConfig struct {
Block V1TOMLregistries `toml:"block"`
}
+// V1RegistriesConf is the sysregistries v1 configuration format.
+type V1RegistriesConf struct {
+ V1TOMLConfig `toml:"registries"`
+}
+
+// Nonempty returns true if config contains at least one configuration entry.
+func (config *V1RegistriesConf) Nonempty() bool {
+ return (len(config.V1TOMLConfig.Search.Registries) != 0 ||
+ len(config.V1TOMLConfig.Insecure.Registries) != 0 ||
+ len(config.V1TOMLConfig.Block.Registries) != 0)
+}
+
+// V2RegistriesConf is the sysregistries v2 configuration format.
+type V2RegistriesConf struct {
+ Registries []Registry `toml:"registry"`
+ // An array of host[:port] (not prefix!) entries to use for resolving unqualified image references
+ UnqualifiedSearchRegistries []string `toml:"unqualified-search-registries"`
+}
+
+// Nonempty returns true if config contains at least one configuration entry.
+func (config *V2RegistriesConf) Nonempty() bool {
+ return (len(config.Registries) != 0 ||
+ len(config.UnqualifiedSearchRegistries) != 0)
+}
+
// tomlConfig is the data type used to unmarshal the toml config.
type tomlConfig struct {
- Registries []Registry `toml:"registry"`
- // backwards compatability to sysregistries v1
- V1TOMLConfig `toml:"registries"`
+ V2RegistriesConf
+ V1RegistriesConf // for backwards compatibility with sysregistries v1
}
// InvalidRegistries represents an invalid registry configurations. An example
@@ -120,12 +182,10 @@ func parseLocation(input string) (string, error) {
return trimmed, nil
}
-// getV1Registries transforms v1 registries in the config into an array of v2
-// registries of type Registry.
-func getV1Registries(config *tomlConfig) ([]Registry, error) {
+// ConvertToV2 returns a v2 config corresponding to a v1 one.
+func (config *V1RegistriesConf) ConvertToV2() (*V2RegistriesConf, error) {
regMap := make(map[string]*Registry)
- // We must preserve the order of config.V1Registries.Search.Registries at least. The order of the
- // other registries is not really important, but make it deterministic (the same for the same config file)
+ // The order of the registries is not really important, but make it deterministic (the same for the same config file)
// to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations.
registryOrder := []string{}
@@ -148,15 +208,6 @@ func getV1Registries(config *tomlConfig) ([]Registry, error) {
return reg, nil
}
- // Note: config.V1Registries.Search needs to be processed first to ensure registryOrder is populated in the right order
- // if one of the search registries is also in one of the other lists.
- for _, search := range config.V1TOMLConfig.Search.Registries {
- reg, err := getRegistry(search)
- if err != nil {
- return nil, err
- }
- reg.Search = true
- }
for _, blocked := range config.V1TOMLConfig.Block.Registries {
reg, err := getRegistry(blocked)
if err != nil {
@@ -172,28 +223,31 @@ func getV1Registries(config *tomlConfig) ([]Registry, error) {
reg.Insecure = true
}
- registries := []Registry{}
+ res := &V2RegistriesConf{
+ UnqualifiedSearchRegistries: config.V1TOMLConfig.Search.Registries,
+ }
for _, location := range registryOrder {
reg := regMap[location]
- registries = append(registries, *reg)
+ res.Registries = append(res.Registries, *reg)
}
- return registries, nil
+ return res, nil
}
-// postProcessRegistries checks the consistency of all registries (e.g., set
-// the Prefix to Location if not set) and applies conflict checks. It returns an
-// array of cleaned registries and error in case of conflicts.
-func postProcessRegistries(regs []Registry) ([]Registry, error) {
- var registries []Registry
- regMap := make(map[string][]Registry)
+// anchoredDomainRegexp is an internal implementation detail of postProcess, defining the valid values of elements of UnqualifiedSearchRegistries.
+var anchoredDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "$")
- for _, reg := range regs {
- var err error
+// postProcess checks the consistency of all the configuration, looks for conflicts,
+// and normalizes the configuration (e.g., sets the Prefix to Location if not set).
+func (config *V2RegistriesConf) postProcess() error {
+ regMap := make(map[string][]*Registry)
+ for i := range config.Registries {
+ reg := &config.Registries[i]
// make sure Location and Prefix are valid
+ var err error
reg.Location, err = parseLocation(reg.Location)
if err != nil {
- return nil, err
+ return err
}
if reg.Prefix == "" {
@@ -201,7 +255,7 @@ func postProcessRegistries(regs []Registry) ([]Registry, error) {
} else {
reg.Prefix, err = parseLocation(reg.Prefix)
if err != nil {
- return nil, err
+ return err
}
}
@@ -209,10 +263,9 @@ func postProcessRegistries(regs []Registry) ([]Registry, error) {
for _, mir := range reg.Mirrors {
mir.Location, err = parseLocation(mir.Location)
if err != nil {
- return nil, err
+ return err
}
}
- registries = append(registries, reg)
regMap[reg.Location] = append(regMap[reg.Location], reg)
}
@@ -222,22 +275,32 @@ func postProcessRegistries(regs []Registry) ([]Registry, error) {
//
// Note: we need to iterate over the registries array to ensure a
// deterministic behavior which is not guaranteed by maps.
- for _, reg := range registries {
+ for _, reg := range config.Registries {
others, _ := regMap[reg.Location]
for _, other := range others {
if reg.Insecure != other.Insecure {
msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location)
-
- return nil, &InvalidRegistries{s: msg}
+ return &InvalidRegistries{s: msg}
}
if reg.Blocked != other.Blocked {
msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.Location)
- return nil, &InvalidRegistries{s: msg}
+ return &InvalidRegistries{s: msg}
}
}
}
- return registries, nil
+ for i := range config.UnqualifiedSearchRegistries {
+ registry, err := parseLocation(config.UnqualifiedSearchRegistries[i])
+ if err != nil {
+ return err
+ }
+ if !anchoredDomainRegexp.MatchString(registry) {
+ return &InvalidRegistries{fmt.Sprintf("Invalid unqualified-search-registries entry %#v", registry)}
+ }
+ config.UnqualifiedSearchRegistries[i] = registry
+ }
+
+ return nil
}
// getConfigPath returns the system-registries config path if specified.
@@ -260,7 +323,7 @@ var configMutex = sync.Mutex{}
// configCache caches already loaded configs with config paths as keys and is
// used to avoid redudantly parsing configs. Concurrent accesses to the cache
// are synchronized via configMutex.
-var configCache = make(map[string][]Registry)
+var configCache = make(map[string]*V2RegistriesConf)
// InvalidateCache invalidates the registry cache. This function is meant to be
// used for long-running processes that need to reload potential changes made to
@@ -268,20 +331,18 @@ var configCache = make(map[string][]Registry)
func InvalidateCache() {
configMutex.Lock()
defer configMutex.Unlock()
- configCache = make(map[string][]Registry)
+ configCache = make(map[string]*V2RegistriesConf)
}
-// GetRegistries loads and returns the registries specified in the config.
-// Note the parsed content of registry config files is cached. For reloading,
-// use `InvalidateCache` and re-call `GetRegistries`.
-func GetRegistries(ctx *types.SystemContext) ([]Registry, error) {
+// getConfig returns the config object corresponding to ctx, loading it if it is not yet cached.
+func getConfig(ctx *types.SystemContext) (*V2RegistriesConf, error) {
configPath := getConfigPath(ctx)
configMutex.Lock()
defer configMutex.Unlock()
// if the config has already been loaded, return the cached registries
- if registries, inCache := configCache[configPath]; inCache {
- return registries, nil
+ if config, inCache := configCache[configPath]; inCache {
+ return config, nil
}
// load the config
@@ -292,51 +353,53 @@ func GetRegistries(ctx *types.SystemContext) ([]Registry, error) {
// isn't set. Note: if ctx.SystemRegistriesConfPath points to
// the default config, we will still return an error.
if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") {
- return []Registry{}, nil
+ return &V2RegistriesConf{Registries: []Registry{}}, nil
}
return nil, err
}
- registries := config.Registries
+ v2Config := &config.V2RegistriesConf
// backwards compatibility for v1 configs
- v1Registries, err := getV1Registries(config)
- if err != nil {
- return nil, err
- }
- if len(v1Registries) > 0 {
- if len(registries) > 0 {
+ if config.V1RegistriesConf.Nonempty() {
+ if config.V2RegistriesConf.Nonempty() {
return nil, &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"}
}
- registries = v1Registries
+ v2, err := config.V1RegistriesConf.ConvertToV2()
+ if err != nil {
+ return nil, err
+ }
+ v2Config = v2
}
- registries, err = postProcessRegistries(registries)
- if err != nil {
+ if err := v2Config.postProcess(); err != nil {
return nil, err
}
// populate the cache
- configCache[configPath] = registries
-
- return registries, err
+ configCache[configPath] = v2Config
+ return v2Config, nil
}
-// FindUnqualifiedSearchRegistries returns all registries that are configured
-// for unqualified image search (i.e., with Registry.Search == true).
-func FindUnqualifiedSearchRegistries(ctx *types.SystemContext) ([]Registry, error) {
- registries, err := GetRegistries(ctx)
+// GetRegistries loads and returns the registries specified in the config.
+// Note the parsed content of registry config files is cached. For reloading,
+// use `InvalidateCache` and re-call `GetRegistries`.
+func GetRegistries(ctx *types.SystemContext) ([]Registry, error) {
+ config, err := getConfig(ctx)
if err != nil {
return nil, err
}
+ return config.Registries, nil
+}
- unqualified := []Registry{}
- for _, reg := range registries {
- if reg.Search {
- unqualified = append(unqualified, reg)
- }
+// UnqualifiedSearchRegistries returns a list of host[:port] entries to try
+// for unqualified image search, in the returned order)
+func UnqualifiedSearchRegistries(ctx *types.SystemContext) ([]string, error) {
+ config, err := getConfig(ctx)
+ if err != nil {
+ return nil, err
}
- return unqualified, nil
+ return config.UnqualifiedSearchRegistries, nil
}
// refMatchesPrefix returns true iff ref,
@@ -371,14 +434,14 @@ func refMatchesPrefix(ref, prefix string) bool {
// — note that this requires the name to start with an explicit hostname!).
// If no Registry prefixes the image, nil is returned.
func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) {
- registries, err := GetRegistries(ctx)
+ config, err := getConfig(ctx)
if err != nil {
return nil, err
}
reg := Registry{}
prefixLen := 0
- for _, r := range registries {
+ for _, r := range config.Registries {
if refMatchesPrefix(ref, r.Prefix) {
length := len(r.Prefix)
if length > prefixLen {
@@ -393,21 +456,12 @@ func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) {
return nil, nil
}
-// Reads the global registry file from the filesystem. Returns a byte array.
-func readRegistryConf(configPath string) ([]byte, error) {
- configBytes, err := ioutil.ReadFile(configPath)
- return configBytes, err
-}
-
-// Used in unittests to parse custom configs without a types.SystemContext.
-var readConf = readRegistryConf
-
// Loads the registry configuration file from the filesystem and then unmarshals
// it. Returns the unmarshalled object.
func loadRegistryConf(configPath string) (*tomlConfig, error) {
config := &tomlConfig{}
- configBytes, err := readConf(configPath)
+ configBytes, err := ioutil.ReadFile(configPath)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go
index b39d2bcc0..946a85f7b 100644
--- a/vendor/github.com/containers/image/storage/storage_image.go
+++ b/vendor/github.com/containers/image/storage/storage_image.go
@@ -491,14 +491,21 @@ func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo t
// Does the blob correspond to a known DiffID which we already have available?
// Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
- // uncompressed layer, and that can happen only if canSubstitute.
- if canSubstitute {
+ // uncompressed layer, and that can happen only if canSubstitute, or if the incoming manifest already specifies the size.
+ if canSubstitute || blobinfo.Size != -1 {
if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest {
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, uncompressedDigest)
}
if len(layers) > 0 {
+ if blobinfo.Size != -1 {
+ s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
+ return true, blobinfo, nil
+ }
+ if !canSubstitute {
+ return false, types.BlobInfo{}, fmt.Errorf("Internal error: canSubstitute was expected to be true for blobInfo %v", blobinfo)
+ }
s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
return true, types.BlobInfo{
Digest: uncompressedDigest,
@@ -627,7 +634,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
if !ok {
// Try to find the layer with contents matching that blobsum.
layer := ""
- layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(blob.Digest)
+ layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID)
if err2 == nil && len(layers) > 0 {
layer = layers[0].ID
} else {
diff --git a/vendor/github.com/containers/image/transports/alltransports/ostree.go b/vendor/github.com/containers/image/transports/alltransports/ostree.go
index 4a3b29a00..cc4d69fe8 100644
--- a/vendor/github.com/containers/image/transports/alltransports/ostree.go
+++ b/vendor/github.com/containers/image/transports/alltransports/ostree.go
@@ -1,4 +1,4 @@
-// +build !containers_image_ostree_stub,linux
+// +build containers_image_ostree,linux
package alltransports
diff --git a/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go b/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go
index 48fcaa58d..fb5b96e54 100644
--- a/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go
+++ b/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go
@@ -1,4 +1,4 @@
-// +build containers_image_ostree_stub !linux
+// +build !containers_image_ostree !linux
package alltransports
diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go
index 789504348..08b4241e0 100644
--- a/vendor/github.com/containers/image/types/types.go
+++ b/vendor/github.com/containers/image/types/types.go
@@ -398,6 +398,7 @@ type ImageInspectInfo struct {
Architecture string
Os string
Layers []string
+ Env []string
}
// DockerAuthConfig contains authorization information for connecting to a registry.
diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf
deleted file mode 100644
index 438cab17a..000000000
--- a/vendor/github.com/containers/image/vendor.conf
+++ /dev/null
@@ -1,51 +0,0 @@
-github.com/containers/image
-
-github.com/sirupsen/logrus v1.0.0
-github.com/containers/storage v1.12.2
-github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
-github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
-github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
-github.com/docker/docker da99009bbb1165d1ac5688b5c81d2f589d418341
-github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6
-github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
-github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
-github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371
-github.com/ghodss/yaml 04f313413ffd65ce25f2541bfd2b2ceec5c0908c
-github.com/gorilla/mux 94e7d24fd285520f3d12ae998f7fdd6b5393d453
-github.com/imdario/mergo 50d4dbd4eb0e84778abe37cefef140271d96fade
-github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
-github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
-github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
-github.com/opencontainers/image-spec v1.0.0
-github.com/opencontainers/runc 6b1d0e76f239ffb435445e5ae316d2676c07c6e3
-github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
-github.com/pkg/errors 248dadf4e9068a0b3e79f02ed0a610d935de5302
-github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
-github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
-github.com/vbatts/tar-split v0.10.2
-golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8
-golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe
-golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
-golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08
-gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a
-k8s.io/client-go bcde30fb7eaed76fd98a36b4120321b94995ffb6
-github.com/xeipuuv/gojsonschema master
-github.com/xeipuuv/gojsonreference master
-github.com/xeipuuv/gojsonpointer master
-github.com/tchap/go-patricia v2.2.6
-github.com/opencontainers/selinux 077c8b6d1c18456fb7c792bc0de52295a0d1900e
-github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0
-github.com/ostreedev/ostree-go 56f3a639dbc0f2f5051c6d52dade28a882ba78ce
-github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8
-github.com/pquerna/ffjson master
-github.com/syndtr/gocapability master
-github.com/Microsoft/go-winio ab35fc04b6365e8fcb18e6e9e41ea4a02b10b175
-github.com/Microsoft/hcsshim eca7177590cdcbd25bbc5df27e3b693a54b53a6a
-github.com/ulikunitz/xz v0.5.4
-github.com/etcd-io/bbolt v1.3.2
-github.com/klauspost/pgzip v1.2.1
-github.com/klauspost/compress v1.4.1
-github.com/klauspost/cpuid v1.2.0
-github.com/vbauerster/mpb v3.3.4
-github.com/mattn/go-isatty v0.0.4
-github.com/VividCortex/ewma v1.1.1
diff --git a/vendor/github.com/containers/image/version/version.go b/vendor/github.com/containers/image/version/version.go
index 184274736..807daf7a2 100644
--- a/vendor/github.com/containers/image/version/version.go
+++ b/vendor/github.com/containers/image/version/version.go
@@ -4,14 +4,14 @@ import "fmt"
const (
// VersionMajor is for an API incompatible changes
- VersionMajor = 1
+ VersionMajor = 2
// VersionMinor is for functionality in a backwards-compatible manner
- VersionMinor = 7
+ VersionMinor = 0
// VersionPatch is for backwards-compatible bug fixes
- VersionPatch = 0
+ VersionPatch = 1
// VersionDev indicates development branch. Releases will be empty string.
- VersionDev = "-dev"
+ VersionDev = ""
)
// Version is the specification version that the package types support.
diff --git a/vendor/github.com/containers/psgo/.gitignore b/vendor/github.com/containers/psgo/.gitignore
new file mode 100644
index 000000000..fc8fbcad3
--- /dev/null
+++ b/vendor/github.com/containers/psgo/.gitignore
@@ -0,0 +1,15 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# everything in build dir
+bin/*
diff --git a/vendor/github.com/containers/psgo/.travis.yml b/vendor/github.com/containers/psgo/.travis.yml
new file mode 100644
index 000000000..1399d24d3
--- /dev/null
+++ b/vendor/github.com/containers/psgo/.travis.yml
@@ -0,0 +1,19 @@
+language: go
+
+sudo: required
+
+servics:
+ - docker
+
+go:
+ - tip
+
+before_install:
+ - sudo add-apt-repository ppa:duggan/bats --yes
+ - sudo apt-get update -qq
+ - sudo apt-get install -qq bats
+
+script:
+ - make validate || travis_terminate 1
+ - make build || travis_terminate 1
+ - make test || travis_terminate 1
diff --git a/vendor/github.com/containers/psgo/Makefile b/vendor/github.com/containers/psgo/Makefile
new file mode 100644
index 000000000..6050b9d5b
--- /dev/null
+++ b/vendor/github.com/containers/psgo/Makefile
@@ -0,0 +1,60 @@
+export GO111MODULE=off
+
+SHELL= /bin/bash
+GO ?= go
+BUILD_DIR := ./bin
+BIN_DIR := /usr/local/bin
+NAME := psgo
+PROJECT := github.com/containers/psgo
+BATS_TESTS := *.bats
+GO_SRC=$(shell find . -name \*.go)
+
+all: validate build
+
+.PHONY: build
+build: $(GO_SRC)
+ $(GO) build -buildmode=pie -o $(BUILD_DIR)/$(NAME) $(PROJECT)/sample
+
+.PHONY: clean
+clean:
+ rm -rf $(BUILD_DIR)
+
+.PHONY: vendor
+vendor:
+ GO111MODULE=on go mod tidy
+ GO111MODULE=on go mod vendor
+ GO111MODULE=on go mod verify
+
+.PHONY: validate
+validate: .install.lint
+ @which gofmt >/dev/null 2>/dev/null || (echo "ERROR: gofmt not found." && false)
+ test -z "$$(gofmt -s -l . | grep -vE 'vendor/' | tee /dev/stderr)"
+ @which golangci-lint >/dev/null 2>/dev/null|| (echo "ERROR: golangci-lint not found." && false)
+ test -z "$$(golangci-lint run)"
+ @go doc cmd/vet >/dev/null 2>/dev/null|| (echo "ERROR: go vet not found." && false)
+ test -z "$$($(GO) vet $$($(GO) list $(PROJECT)/...) 2>&1 | tee /dev/stderr)"
+
+.PHONY: test
+test: test-unit test-integration
+
+.PHONY: test-integration
+test-integration:
+ bats test/$(BATS_TESTS)
+
+.PHONY: test-unit
+test-unit:
+ go test -v $(PROJECT)
+ go test -v $(PROJECT)/internal/...
+
+.PHONY: install
+install:
+ sudo install -D -m755 $(BUILD_DIR)/$(NAME) $(BIN_DIR)
+
+.PHONY: .install.lint
+.install.lint:
+ # Workaround for https://github.com/golangci/golangci-lint/issues/523
+ go get -u github.com/golangci/golangci-lint/cmd/golangci-lint
+
+.PHONY: uninstall
+uninstall:
+ sudo rm $(BIN_DIR)/$(NAME)
diff --git a/vendor/github.com/containers/psgo/go.mod b/vendor/github.com/containers/psgo/go.mod
index dd671bbb0..a194ec196 100644
--- a/vendor/github.com/containers/psgo/go.mod
+++ b/vendor/github.com/containers/psgo/go.mod
@@ -6,6 +6,6 @@ require (
github.com/opencontainers/runc v0.0.0-20190425234816-dae70e8efea4
github.com/pkg/errors v0.0.0-20190227000051-27936f6d90f9
github.com/sirupsen/logrus v0.0.0-20190403091019-9b3cdde74fbe
- github.com/stretchr/testify v1.2.2
+ github.com/stretchr/testify v1.3.0
golang.org/x/sys v0.0.0-20190425145619-16072639606e
)
diff --git a/vendor/github.com/containers/psgo/go.sum b/vendor/github.com/containers/psgo/go.sum
new file mode 100644
index 000000000..da6c750db
--- /dev/null
+++ b/vendor/github.com/containers/psgo/go.sum
@@ -0,0 +1,23 @@
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/opencontainers/runc v0.0.0-20190425234816-dae70e8efea4 h1:y5PMn9UW9cD5xvjoMs9pJ77zs5DgGkiDv0BI/Qew51o=
+github.com/opencontainers/runc v0.0.0-20190425234816-dae70e8efea4/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/pkg/errors v0.0.0-20190227000051-27936f6d90f9 h1:dIsTcVF0w9viTLHXUEkDI7cXITMe+M/MRRM2MwisVow=
+github.com/pkg/errors v0.0.0-20190227000051-27936f6d90f9/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/sirupsen/logrus v0.0.0-20190403091019-9b3cdde74fbe h1:PBQLA9wc7FrXiUBnlfs/diNlg3ZdrP21tzcgL3OlVhU=
+github.com/sirupsen/logrus v0.0.0-20190403091019-9b3cdde74fbe/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190425145619-16072639606e h1:4ktJgTV34+N3qOZUc5fAaG3Pb11qzMm3PkAoTAgUZ2I=
+golang.org/x/sys v0.0.0-20190425145619-16072639606e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/containers/psgo/internal/host/host.go b/vendor/github.com/containers/psgo/internal/host/host.go
index 4b145ecfb..33ad67a11 100644
--- a/vendor/github.com/containers/psgo/internal/host/host.go
+++ b/vendor/github.com/containers/psgo/internal/host/host.go
@@ -24,26 +24,6 @@ import (
"strings"
)
-/*
-#include <unistd.h>
-*/
-import "C"
-
-var (
- // cache host queries to redundant calculations
- clockTicks *int64
- bootTime *int64
-)
-
-// ClockTicks returns sysconf(SC_CLK_TCK).
-func ClockTicks() int64 {
- if clockTicks == nil {
- ticks := int64(C.sysconf(C._SC_CLK_TCK))
- clockTicks = &ticks
- }
- return *clockTicks
-}
-
// BootTime parses /proc/uptime returns the boot time in seconds since the
// Epoch, 1970-01-01 00:00:00 +0000 (UTC).
func BootTime() (int64, error) {
diff --git a/vendor/github.com/containers/psgo/internal/host/host_cgo.go b/vendor/github.com/containers/psgo/internal/host/host_cgo.go
new file mode 100644
index 000000000..eac9fe5ce
--- /dev/null
+++ b/vendor/github.com/containers/psgo/internal/host/host_cgo.go
@@ -0,0 +1,37 @@
+// Copyright 2018 psgo authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package host extracts data from the host, such as the system's boot time or
+// the tick rate of the system clock.
+package host
+
+/*
+#include <unistd.h>
+*/
+import "C"
+
+var (
+ // cache host queries to redundant calculations
+ clockTicks *int64
+ bootTime *int64
+)
+
+// ClockTicks returns sysconf(SC_CLK_TCK).
+func ClockTicks() (int64, error) {
+ if clockTicks == nil {
+ ticks := int64(C.sysconf(C._SC_CLK_TCK))
+ clockTicks = &ticks
+ }
+ return *clockTicks, nil
+}
diff --git a/vendor/github.com/containers/psgo/internal/host/host_nocgo.go b/vendor/github.com/containers/psgo/internal/host/host_nocgo.go
new file mode 100644
index 000000000..6ff337415
--- /dev/null
+++ b/vendor/github.com/containers/psgo/internal/host/host_nocgo.go
@@ -0,0 +1,84 @@
+// +build !cgo
+
+// Copyright 2018 psgo authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package host extracts data from the host, such as the system's boot time or
+// the tick rate of the system clock.
+package host
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io/ioutil"
+ "unsafe"
+)
+
+var (
+ // cache host queries to redundant calculations
+ clockTicks *int64
+ bootTime *int64
+)
+
+func getNativeEndianness() binary.ByteOrder {
+ var i int32 = 0x00000001
+ u := unsafe.Pointer(&i)
+ if *((*byte)(u)) == 0x01 {
+ return binary.LittleEndian
+ }
+ return binary.BigEndian
+}
+
+const (
+ atClktck = 17
+)
+
+func getFromAuxv(what uint, whatName string) (uint, error) {
+ dataLen := int(unsafe.Sizeof(int(0)))
+ p, err := ioutil.ReadFile("/proc/self/auxv")
+ if err != nil {
+ return 0, err
+ }
+ native := getNativeEndianness()
+ for i := 0; i < len(p); {
+ var k, v uint
+
+ switch dataLen {
+ case 4:
+ k = uint(native.Uint32(p[i : i+dataLen]))
+ v = uint(native.Uint32(p[i+dataLen : i+dataLen*2]))
+ case 8:
+ k = uint(native.Uint64(p[i : i+dataLen]))
+ v = uint(native.Uint64(p[i+dataLen : i+dataLen*2]))
+ }
+ i += dataLen * 2
+ if k == what {
+ return v, nil
+ }
+ }
+ return 0, fmt.Errorf("cannot find %s in auxv", whatName)
+}
+
+// ClockTicks returns sysconf(SC_CLK_TCK).
+func ClockTicks() (int64, error) {
+ if clockTicks == nil {
+ ret, err := getFromAuxv(atClktck, "AT_CLKTCK")
+ if err != nil {
+ return -1, err
+ }
+ ticks := int64(ret)
+ clockTicks = &ticks
+ }
+ return *clockTicks, nil
+}
diff --git a/vendor/github.com/containers/psgo/internal/process/process.go b/vendor/github.com/containers/psgo/internal/process/process.go
index 68241264e..20e40163f 100644
--- a/vendor/github.com/containers/psgo/internal/process/process.go
+++ b/vendor/github.com/containers/psgo/internal/process/process.go
@@ -192,8 +192,12 @@ func (p *Process) ElapsedTime() (time.Duration, error) {
if err != nil {
return 0, err
}
+ clockTicks, err := host.ClockTicks()
+ if err != nil {
+ return 0, err
+ }
- sinceBoot = sinceBoot / host.ClockTicks()
+ sinceBoot = sinceBoot / clockTicks
bootTime, err := host.BootTime()
if err != nil {
@@ -213,7 +217,11 @@ func (p *Process) CPUTime() (time.Duration, error) {
if err != nil {
return 0, err
}
- secs := (user + system) / host.ClockTicks()
+ clockTicks, err := host.ClockTicks()
+ if err != nil {
+ return 0, err
+ }
+ secs := (user + system) / clockTicks
cpu := time.Unix(secs, 0)
return cpu.Sub(time.Unix(0, 0)), nil
}
diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml
new file mode 100644
index 000000000..e10bc5dba
--- /dev/null
+++ b/vendor/github.com/containers/storage/.cirrus.yml
@@ -0,0 +1,96 @@
+---
+
+# Main collection of env. vars to set for all tasks and scripts.
+env:
+ ####
+ #### Global variables used for all tasks
+ ####
+ # Overrides default location (/tmp/cirrus) for repo clone
+ CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/storage"
+ # Shell used to execute all script commands
+ CIRRUS_SHELL: "/bin/bash"
+ # Automation script path relative to $CIRRUS_WORKING_DIR)
+ SCRIPT_BASE: "./contrib/cirrus"
+ # No need to go crazy, but grab enough to cover most PRs
+ CIRRUS_CLONE_DEPTH: 50
+
+ ####
+ #### Cache-image names to test with
+ ####
+ # GCE project where images live
+ IMAGE_PROJECT: "libpod-218412"
+ FEDORA_CACHE_IMAGE_NAME: "fedora-cloud-base-30-1-2-1556821664"
+ PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-cloud-base-29-1-2-1541789245"
+ UBUNTU_CACHE_IMAGE_NAME: "ubuntu-1904-disco-v20190514"
+
+ ####
+ #### Command variables to help avoid duplication
+ ####
+ # Command to prefix every output line with a timestamp
+ # (can't do inline awk script, Cirrus-CI or YAML mangles quoting)
+ _TIMESTAMP: 'awk --file ${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/timestamp.awk'
+ _DFCMD: 'df -lhTx tmpfs'
+ _RAUDITCMD: 'cat /var/log/audit/audit.log'
+ _UAUDITCMD: 'cat /var/log/kern.log'
+ _JOURNALCMD: 'journalctl -b'
+
+gcp_credentials: ENCRYPTED[c87717f04fb15499d19a3b3fa0ad2cdedecc047e82967785d101e9bc418e93219f755e662feac8390088a2df1a4d8464]
+
+# Default timeout for each task
+timeout_in: 120m
+
+# Default VM to use unless set or modified by task
+gce_instance:
+ image_project: "${IMAGE_PROJECT}"
+ zone: "us-central1-b" # Required by Cirrus for the time being
+ cpu: 2
+ memory: "4Gb"
+ disk: 200
+ image_name: "${FEDORA_CACHE_IMAGE_NAME}"
+
+testing_task:
+ gce_instance: # Only need to specify differences from defaults (above)
+ matrix: # Duplicate this task for each matrix product.
+ image_name: "${FEDORA_CACHE_IMAGE_NAME}"
+ image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
+ image_name: "${UBUNTU_CACHE_IMAGE_NAME}"
+
+ # Separate scripts for separate outputs, makes debugging easier.
+ setup_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
+ build_and_test_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/build_and_test.sh |& ${_TIMESTAMP}'
+
+ # Log collection when job was successful
+ df_script: '${_DFCMD} || true'
+ rh_audit_log_script: '${_RAUDITCMD} || true'
+ ubuntu_audit_log_script: '${_UAUDITCMD} || true'
+ journal_log_script: '${_JOURNALCMD} || true'
+
+ on_failure: # Script names must be different from above
+ failure_df_script: '${_DFCMD} || true'
+ failure_rh_audit_log_script: '${_RAUDITCMD} || true'
+ failure_ubuntu_audit_log_script: '${_UAUDITCMD} || true'
+ failure_journal_log_script: '${_JOURNALCMD} || true'
+
+
+# Update metadata on VM images referenced by this repository state
+meta_task:
+
+ container:
+ image: "quay.io/libpod/imgts:latest" # see contrib/imgts
+ cpu: 1
+ memory: 1
+
+ env:
+ # Space-separated list of images used by this repository state
+ IMGNAMES: |-
+ ${FEDORA_CACHE_IMAGE_NAME}
+ ${PRIOR_FEDORA_CACHE_IMAGE_NAME}
+ ${UBUNTU_CACHE_IMAGE_NAME}
+ BUILDID: "${CIRRUS_BUILD_ID}"
+ REPOREF: "${CIRRUS_CHANGE_IN_REPO}"
+ GCPJSON: ENCRYPTED[244a93fe8b386b48b96f748342bf741350e43805eee81dd04b45093bdf737e540b993fc735df41f131835fa0f9b65826]
+ GCPNAME: ENCRYPTED[91cf7aa421858b26b67835978d224b4a5c46afcf52a0f1ec1b69a99b248715dc8e92a1b56fde18e092acf256fa80ae9c]
+ GCPPROJECT: ENCRYPTED[79b0f7eb5958e25bc7095d5d368fa8d94447a43ffacb9c693de438186e2f767b7efe9563d6954297ae4730220e10aa9c]
+ CIRRUS_CLONE_DEPTH: 1 # source not used
+
+ script: '/usr/local/bin/entrypoint.sh |& ${_TIMESTAMP}'
diff --git a/vendor/github.com/containers/storage/.dockerignore b/vendor/github.com/containers/storage/.dockerignore
new file mode 100644
index 000000000..9bd2c0219
--- /dev/null
+++ b/vendor/github.com/containers/storage/.dockerignore
@@ -0,0 +1,3 @@
+bundles
+.gopath
+vendor/pkg
diff --git a/vendor/github.com/containers/storage/.gitignore b/vendor/github.com/containers/storage/.gitignore
new file mode 100644
index 000000000..4bb64e3f3
--- /dev/null
+++ b/vendor/github.com/containers/storage/.gitignore
@@ -0,0 +1,29 @@
+# containers/storage project generated files to ignore
+# if you want to ignore files created by your editor/tools,
+# please consider a global .gitignore https://help.github.com/articles/ignoring-files
+*.1
+*.exe
+*~
+*.orig
+*.test
+.*.swp
+.DS_Store
+# a .bashrc may be added to customize the build environment
+.bashrc
+.gopath/
+docs/AWS_S3_BUCKET
+docs/GITCOMMIT
+docs/GIT_BRANCH
+docs/VERSION
+docs/_build
+docs/_static
+docs/_templates
+docs/changed-files
+# generated by man/md2man-all.sh
+man/man1
+man/man5
+man/man8
+vendor/pkg/
+.vagrant
+/containers-storage
+/containers-storage.*
diff --git a/vendor/github.com/containers/storage/.mailmap b/vendor/github.com/containers/storage/.mailmap
new file mode 100644
index 000000000..0527b6d84
--- /dev/null
+++ b/vendor/github.com/containers/storage/.mailmap
@@ -0,0 +1,254 @@
+# Generate AUTHORS: hack/generate-authors.sh
+
+# Tip for finding duplicates (besides scanning the output of AUTHORS for name
+# duplicates that aren't also email duplicates): scan the output of:
+# git log --format='%aE - %aN' | sort -uf
+#
+# For explanation on this file format: man git-shortlog
+
+Patrick Stapleton <github@gdi2290.com>
+Shishir Mahajan <shishir.mahajan@redhat.com> <smahajan@redhat.com>
+Erwin van der Koogh <info@erronis.nl>
+Ahmed Kamal <email.ahmedkamal@googlemail.com>
+Tejesh Mehta <tejesh.mehta@gmail.com> <tj@init.me>
+Cristian Staretu <cristian.staretu@gmail.com>
+Cristian Staretu <cristian.staretu@gmail.com> <unclejacksons@gmail.com>
+Cristian Staretu <cristian.staretu@gmail.com> <unclejack@users.noreply.github.com>
+Marcus Linke <marcus.linke@gmx.de>
+Aleksandrs Fadins <aleks@s-ko.net>
+Christopher Latham <sudosurootdev@gmail.com>
+Hu Keping <hukeping@huawei.com>
+Wayne Chang <wayne@neverfear.org>
+Chen Chao <cc272309126@gmail.com>
+Daehyeok Mun <daehyeok@gmail.com>
+<daehyeok@gmail.com> <daehyeok@daehyeokui-MacBook-Air.local>
+<jt@yadutaf.fr> <admin@jtlebi.fr>
+<jeff@docker.com> <jefferya@programmerq.net>
+<charles.hooper@dotcloud.com> <chooper@plumata.com>
+<daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
+<daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
+Guillaume J. Charmes <guillaume.charmes@docker.com> <charmes.guillaume@gmail.com>
+<guillaume.charmes@docker.com> <guillaume@dotcloud.com>
+<guillaume.charmes@docker.com> <guillaume@docker.com>
+<guillaume.charmes@docker.com> <guillaume.charmes@dotcloud.com>
+<guillaume.charmes@docker.com> <guillaume@charmes.net>
+<kencochrane@gmail.com> <KenCochrane@gmail.com>
+Thatcher Peskens <thatcher@docker.com>
+Thatcher Peskens <thatcher@docker.com> <thatcher@dotcloud.com>
+Thatcher Peskens <thatcher@docker.com> dhrp <thatcher@gmx.net>
+Jérôme Petazzoni <jerome.petazzoni@dotcloud.com> jpetazzo <jerome.petazzoni@dotcloud.com>
+Jérôme Petazzoni <jerome.petazzoni@dotcloud.com> <jp@enix.org>
+Joffrey F <joffrey@docker.com>
+Joffrey F <joffrey@docker.com> <joffrey@dotcloud.com>
+Joffrey F <joffrey@docker.com> <f.joffrey@gmail.com>
+Tim Terhorst <mynamewastaken+git@gmail.com>
+Andy Smith <github@anarkystic.com>
+<kalessin@kalessin.fr> <louis@dotcloud.com>
+<victor.vieux@docker.com> <victor.vieux@dotcloud.com>
+<victor.vieux@docker.com> <victor@dotcloud.com>
+<victor.vieux@docker.com> <dev@vvieux.com>
+<victor.vieux@docker.com> <victor@docker.com>
+<victor.vieux@docker.com> <vieux@docker.com>
+<victor.vieux@docker.com> <victorvieux@gmail.com>
+<dominik@honnef.co> <dominikh@fork-bomb.org>
+<ehanchrow@ine.com> <eric.hanchrow@gmail.com>
+Walter Stanish <walter@pratyeka.org>
+<daniel@gasienica.ch> <dgasienica@zynga.com>
+Roberto Hashioka <roberto_hashioka@hotmail.com>
+Konstantin Pelykh <kpelykh@zettaset.com>
+David Sissitka <me@dsissitka.com>
+Nolan Darilek <nolan@thewordnerd.info>
+<mastahyeti@gmail.com> <mastahyeti@users.noreply.github.com>
+Benoit Chesneau <bchesneau@gmail.com>
+Jordan Arentsen <blissdev@gmail.com>
+Daniel Garcia <daniel@danielgarcia.info>
+Miguel Angel Fernández <elmendalerenda@gmail.com>
+Bhiraj Butala <abhiraj.butala@gmail.com>
+Faiz Khan <faizkhan00@gmail.com>
+Victor Lyuboslavsky <victor@victoreda.com>
+Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
+Matthew Mueller <mattmuelle@gmail.com>
+<mosoni@ebay.com> <mohitsoni1989@gmail.com>
+Shih-Yuan Lee <fourdollars@gmail.com>
+Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> root <root@vagrant-ubuntu-12.10.vagrantup.com>
+Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
+<proppy@google.com> <proppy@aminche.com>
+<michael@docker.com> <michael@crosbymichael.com>
+<michael@docker.com> <crosby.michael@gmail.com>
+<michael@docker.com> <crosbymichael@gmail.com>
+<github@developersupport.net> <github@metaliveblog.com>
+<brandon@ifup.org> <brandon@ifup.co>
+<dano@spotify.com> <daniel.norberg@gmail.com>
+<danny@codeaholics.org> <Danny.Yates@mailonline.co.uk>
+<gurjeet@singh.im> <singh.gurjeet@gmail.com>
+<shawn@churchofgit.com> <shawnlandden@gmail.com>
+<sjoerd-github@linuxonly.nl> <sjoerd@byte.nl>
+<solomon@docker.com> <solomon.hykes@dotcloud.com>
+<solomon@docker.com> <solomon@dotcloud.com>
+<solomon@docker.com> <s@docker.com>
+Sven Dowideit <SvenDowideit@home.org.au>
+Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@fosiki.com>
+Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@docker.com>
+Sven Dowideit <SvenDowideit@home.org.au> <¨SvenDowideit@home.org.au¨>
+Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@home.org.au>
+Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@users.noreply.github.com>
+Sven Dowideit <SvenDowideit@home.org.au> <sven@t440s.home.gateway>
+<alexl@redhat.com> <alexander.larsson@gmail.com>
+Alexander Morozov <lk4d4@docker.com> <lk4d4math@gmail.com>
+Alexander Morozov <lk4d4@docker.com>
+<git.nivoc@neverbox.com> <kuehnle@online.de>
+O.S. Tezer <ostezer@gmail.com>
+<ostezer@gmail.com> <ostezer@users.noreply.github.com>
+Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
+<justin.p.simonelis@gmail.com> <justin.simonelis@PTS-JSIMON2.toronto.exclamation.com>
+<taim@bosboot.org> <maztaim@users.noreply.github.com>
+<viktor.vojnovski@amadeus.com> <vojnovski@gmail.com>
+<vbatts@redhat.com> <vbatts@hashbangbash.com>
+<altsysrq@gmail.com> <iamironbob@gmail.com>
+Sridhar Ratnakumar <sridharr@activestate.com>
+Sridhar Ratnakumar <sridharr@activestate.com> <github@srid.name>
+Liang-Chi Hsieh <viirya@gmail.com>
+Aleksa Sarai <asarai@suse.de>
+Aleksa Sarai <asarai@suse.de> <asarai@suse.com>
+Aleksa Sarai <asarai@suse.de> <cyphar@cyphar.com>
+Will Weaver <monkey@buildingbananas.com>
+Timothy Hobbs <timothyhobbs@seznam.cz>
+Nathan LeClaire <nathan.leclaire@docker.com> <nathan.leclaire@gmail.com>
+Nathan LeClaire <nathan.leclaire@docker.com> <nathanleclaire@gmail.com>
+<github@hollensbe.org> <erik+github@hollensbe.org>
+<github@albersweb.de> <albers@users.noreply.github.com>
+<lsm5@fedoraproject.org> <lsm5@redhat.com>
+<marc@marc-abramowitz.com> <msabramo@gmail.com>
+Matthew Heon <mheon@redhat.com> <mheon@mheonlaptop.redhat.com>
+<bernat@luffy.cx> <vincent@bernat.im>
+<bernat@luffy.cx> <Vincent.Bernat@exoscale.ch>
+<p@pwaller.net> <peter@scraperwiki.com>
+<andrew.weiss@outlook.com> <andrew.weiss@microsoft.com>
+Francisco Carriedo <fcarriedo@gmail.com>
+<julienbordellier@gmail.com> <git@julienbordellier.com>
+<ahmetb@microsoft.com> <ahmetalpbalkan@gmail.com>
+<arnaud.porterie@docker.com> <icecrime@gmail.com>
+<baloo@gandi.net> <superbaloo+registrations.github@superbaloo.net>
+Brian Goff <cpuguy83@gmail.com>
+<cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.home>
+<eric@windisch.us> <ewindisch@docker.com>
+<frank.rosquin+github@gmail.com> <frank.rosquin@gmail.com>
+Hollie Teal <hollie@docker.com>
+<hollie@docker.com> <hollie.teal@docker.com>
+<hollie@docker.com> <hollietealok@users.noreply.github.com>
+<huu@prismskylabs.com> <whoshuu@gmail.com>
+Jessica Frazelle <jess@mesosphere.com>
+Jessica Frazelle <jess@mesosphere.com> <jfrazelle@users.noreply.github.com>
+Jessica Frazelle <jess@mesosphere.com> <acidburn@docker.com>
+Jessica Frazelle <jess@mesosphere.com> <jess@docker.com>
+Jessica Frazelle <jess@mesosphere.com> <princess@docker.com>
+<konrad.wilhelm.kleine@gmail.com> <kwk@users.noreply.github.com>
+<tintypemolly@gmail.com> <tintypemolly@Ohui-MacBook-Pro.local>
+<estesp@linux.vnet.ibm.com> <estesp@gmail.com>
+<github@gone.nl> <thaJeztah@users.noreply.github.com>
+Thomas LEVEIL <thomasleveil@gmail.com> Thomas LÉVEIL <thomasleveil@users.noreply.github.com>
+<oi@truffles.me.uk> <timruffles@googlemail.com>
+<Vincent.Bernat@exoscale.ch> <bernat@luffy.cx>
+Antonio Murdaca <antonio.murdaca@gmail.com> <amurdaca@redhat.com>
+Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@redhat.com>
+Antonio Murdaca <antonio.murdaca@gmail.com> <me@runcom.ninja>
+Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@linux.com>
+Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@users.noreply.github.com>
+Darren Shepherd <darren.s.shepherd@gmail.com> <darren@rancher.com>
+Deshi Xiao <dxiao@redhat.com> <dsxiao@dataman-inc.com>
+Deshi Xiao <dxiao@redhat.com> <xiaods@gmail.com>
+Doug Davis <dug@us.ibm.com> <duglin@users.noreply.github.com>
+Jacob Atzen <jacob@jacobatzen.dk> <jatzen@gmail.com>
+Jeff Nickoloff <jeff.nickoloff@gmail.com> <jeff@allingeek.com>
+John Howard (VM) <John.Howard@microsoft.com> <jhowardmsft@users.noreply.github.com>
+John Howard (VM) <John.Howard@microsoft.com>
+John Howard (VM) <John.Howard@microsoft.com> <john.howard@microsoft.com>
+John Howard (VM) <John.Howard@microsoft.com> <jhoward@microsoft.com>
+Madhu Venugopal <madhu@socketplane.io> <madhu@docker.com>
+Mary Anthony <mary.anthony@docker.com> <mary@docker.com>
+Mary Anthony <mary.anthony@docker.com> moxiegirl <mary@docker.com>
+Mary Anthony <mary.anthony@docker.com> <moxieandmore@gmail.com>
+mattyw <mattyw@me.com> <gh@mattyw.net>
+resouer <resouer@163.com> <resouer@gmail.com>
+AJ Bowen <aj@gandi.net> soulshake <amy@gandi.net>
+AJ Bowen <aj@gandi.net> soulshake <aj@gandi.net>
+Tibor Vass <teabee89@gmail.com> <tibor@docker.com>
+Tibor Vass <teabee89@gmail.com> <tiborvass@users.noreply.github.com>
+Vincent Bernat <bernat@luffy.cx> <Vincent.Bernat@exoscale.ch>
+Yestin Sun <sunyi0804@gmail.com> <yestin.sun@polyera.com>
+bin liu <liubin0329@users.noreply.github.com> <liubin0329@gmail.com>
+John Howard (VM) <John.Howard@microsoft.com> jhowardmsft <jhoward@microsoft.com>
+Ankush Agarwal <ankushagarwal11@gmail.com> <ankushagarwal@users.noreply.github.com>
+Tangi COLIN <tangicolin@gmail.com> tangicolin <tangicolin@gmail.com>
+Allen Sun <allen.sun@daocloud.io>
+Adrien Gallouët <adrien@gallouet.fr> <angt@users.noreply.github.com>
+<aanm90@gmail.com> <martins@noironetworks.com>
+Anuj Bahuguna <anujbahuguna.dev@gmail.com>
+Anusha Ragunathan <anusha.ragunathan@docker.com> <anusha@docker.com>
+Avi Miller <avi.miller@oracle.com> <avi.miller@gmail.com>
+Brent Salisbury <brent.salisbury@docker.com> <brent@docker.com>
+Chander G <chandergovind@gmail.com>
+Chun Chen <ramichen@tencent.com> <chenchun.feed@gmail.com>
+Ying Li <cyli@twistedmatrix.com>
+Daehyeok Mun <daehyeok@gmail.com> <daehyeok@daehyeok-ui-MacBook-Air.local>
+<dqminh@cloudflare.com> <dqminh89@gmail.com>
+Daniel, Dao Quang Minh <dqminh@cloudflare.com>
+Daniel Nephin <dnephin@docker.com> <dnephin@gmail.com>
+Dave Tucker <dt@docker.com> <dave@dtucker.co.uk>
+Doug Tangren <d.tangren@gmail.com>
+Frederick F. Kautz IV <fkautz@redhat.com> <fkautz@alumni.cmu.edu>
+Ben Golub <ben.golub@dotcloud.com>
+Harold Cooper <hrldcpr@gmail.com>
+hsinko <21551195@zju.edu.cn> <hsinko@users.noreply.github.com>
+Josh Hawn <josh.hawn@docker.com> <jlhawn@berkeley.edu>
+Justin Cormack <justin.cormack@docker.com>
+<justin.cormack@docker.com> <justin.cormack@unikernel.com>
+<justin.cormack@docker.com> <justin@specialbusservice.com>
+Kamil Domański <kamil@domanski.co>
+Lei Jitang <leijitang@huawei.com>
+<leijitang@huawei.com> <leijitang@gmail.com>
+Linus Heckemann <lheckemann@twig-world.com>
+<lheckemann@twig-world.com> <anonymouse2048@gmail.com>
+Lynda O'Leary <lyndaoleary29@gmail.com>
+<lyndaoleary29@gmail.com> <lyndaoleary@hotmail.com>
+Marianna Tessel <mtesselh@gmail.com>
+Michael Huettermann <michael@huettermann.net>
+Moysés Borges <moysesb@gmail.com>
+<moysesb@gmail.com> <moyses.furtado@wplex.com.br>
+Nigel Poulton <nigelpoulton@hotmail.com>
+Qiang Huang <h.huangqiang@huawei.com>
+<h.huangqiang@huawei.com> <qhuang@10.0.2.15>
+Boaz Shuster <ripcurld.github@gmail.com>
+Shuwei Hao <haosw@cn.ibm.com>
+<haosw@cn.ibm.com> <haoshuwei24@gmail.com>
+Soshi Katsuta <soshi.katsuta@gmail.com>
+<soshi.katsuta@gmail.com> <katsuta_soshi@cyberagent.co.jp>
+Stefan Berger <stefanb@linux.vnet.ibm.com>
+<stefanb@linux.vnet.ibm.com> <stefanb@us.ibm.com>
+Stephen Day <stephen.day@docker.com>
+<stephen.day@docker.com> <stevvooe@users.noreply.github.com>
+Toli Kuznets <toli@docker.com>
+Tristan Carel <tristan@cogniteev.com>
+<tristan@cogniteev.com> <tristan.carel@gmail.com>
+Vincent Demeester <vincent@sbr.pm>
+<vincent@sbr.pm> <vincent+github@demeester.fr>
+Vishnu Kannan <vishnuk@google.com>
+xlgao-zju <xlgao@zju.edu.cn> xlgao <xlgao@zju.edu.cn>
+yuchangchun <yuchangchun1@huawei.com> y00277921 <yuchangchun1@huawei.com>
+<zij@case.edu> <zjaffee@us.ibm.com>
+<anujbahuguna.dev@gmail.com> <abahuguna@fiberlink.com>
+<eungjun.yi@navercorp.com> <semtlenori@gmail.com>
+<haosw@cn.ibm.com> <haoshuwei1989@163.com>
+Hao Shu Wei <haosw@cn.ibm.com>
+<matt.bentley@docker.com> <mbentley@mbentley.net>
+<MihaiBorob@gmail.com> <MihaiBorobocea@gmail.com>
+<redmond.martin@gmail.com> <xgithub@redmond5.com>
+<redmond.martin@gmail.com> <martin@tinychat.com>
+<srbrahma@us.ibm.com> <sbrahma@us.ibm.com>
+<suda.akihiro@lab.ntt.co.jp> <suda.kyoto@gmail.com>
+<thomas@gazagnaire.org> <thomas@gazagnaire.com>
+Shengbo Song <thomassong@tencent.com> mYmNeo <mymneo@163.com>
+Shengbo Song <thomassong@tencent.com>
+<sylvain@ascribe.io> <sylvain.bellemare@ezeep.com>
+Sylvain Bellemare <sylvain@ascribe.io>
+
diff --git a/vendor/github.com/containers/storage/.travis.yml b/vendor/github.com/containers/storage/.travis.yml
new file mode 100644
index 000000000..dc1c61391
--- /dev/null
+++ b/vendor/github.com/containers/storage/.travis.yml
@@ -0,0 +1,71 @@
+---
+
+sudo: required
+
+# N/B: host go env. not actually used, see .run_ci_tests.sh
+language: go
+go:
+ - master
+
+services:
+ - docker
+
+env:
+ # Ubuntu
+ - GO_VERSION="stable"
+ DISTRO="ubuntu"
+
+ - GO_VERSION="1.11"
+ DISTRO="ubuntu"
+
+ - GO_VERSION="1.12"
+ DISTRO="ubuntu"
+
+ # Fedora
+ - GO_VERSION="stable"
+ DISTRO="fedora"
+
+ - GO_VERSION="1.11"
+ DISTRO="fedora"
+
+ - GO_VERSION="1.12"
+ DISTRO="fedora"
+
+ # CentOS
+ - GO_VERSION="stable"
+ DISTRO="centos"
+
+ - GO_VERSION="1.11"
+ DISTRO="centos"
+
+ - GO_VERSION="1.12"
+ DISTRO="centos"
+
+# GO_VERSION="stable" builds successfully, but tests fail on all platforms.
+# Run the tests, but ignore the result (for now)
+matrix:
+ allow_failures:
+ - env: GO_VERSION="stable" DISTRO="ubuntu"
+ - env: GO_VERSION="stable" DISTRO="fedora"
+ - env: GO_VERSION="stable" DISTRO="centos"
+
+before_install:
+ - sudo apt-get -qq update
+ - sudo apt-get -qq install realpath
+
+script:
+ - echo "Travis/host environment:"
+ - export TRAVIS_ENV="-e TRAVIS=$TRAVIS
+ -e CI=$CI
+ -e TRAVIS_COMMIT=$TRAVIS_COMMIT
+ -e TRAVIS_COMMIT_RANGE=$TRAVIS_COMMIT_RANGE
+ -e TRAVIS_REPO_SLUG=$TRAVIS_REPO_SLUG
+ -e TRAVIS_PULL_REQUEST=$TRAVIS_PULL_REQUEST
+ -e TRAVIS_PULL_REQUEST_SHA=$TRAVIS_PULL_REQUEST_SHA
+ -e TRAVIS_PULL_REQUEST_SLUG=$TRAVIS_PULL_REQUEST_SLUG
+ -e TRAVIS_BRANCH=$TRAVIS_BRANCH
+ -e TRAVIS_JOB_ID=$TRAVIS_JOB_ID
+ -e TRAVIS_BUILD_DIR=$TRAVIS_BUILD_DIR"
+ - env
+ - echo "Running tests in SPC using ./hack/run_ci_tests.sh"
+ - ./hack/run_ci_tests.sh
diff --git a/vendor/github.com/containers/storage/AUTHORS b/vendor/github.com/containers/storage/AUTHORS
new file mode 100644
index 000000000..11cd83d14
--- /dev/null
+++ b/vendor/github.com/containers/storage/AUTHORS
@@ -0,0 +1,1522 @@
+# This file lists all individuals having contributed content to the repository.
+# For how it is generated, see `hack/generate-authors.sh`.
+
+Aanand Prasad <aanand.prasad@gmail.com>
+Aaron Davidson <aaron@databricks.com>
+Aaron Feng <aaron.feng@gmail.com>
+Aaron Huslage <huslage@gmail.com>
+Aaron Lehmann <aaron.lehmann@docker.com>
+Aaron Welch <welch@packet.net>
+Abel Muiño <amuino@gmail.com>
+Abhijeet Kasurde <akasurde@redhat.com>
+Abhinav Ajgaonkar <abhinav316@gmail.com>
+Abhishek Chanda <abhishek.becs@gmail.com>
+Abin Shahab <ashahab@altiscale.com>
+Adam Miller <admiller@redhat.com>
+Adam Singer <financeCoding@gmail.com>
+Aditi Rajagopal <arajagopal@us.ibm.com>
+Aditya <aditya@netroy.in>
+Adria Casas <adriacasas88@gmail.com>
+Adrian Mouat <adrian.mouat@gmail.com>
+Adrian Oprea <adrian@codesi.nz>
+Adrien Folie <folie.adrien@gmail.com>
+Adrien Gallouët <adrien@gallouet.fr>
+Ahmed Kamal <email.ahmedkamal@googlemail.com>
+Ahmet Alp Balkan <ahmetb@microsoft.com>
+Aidan Feldman <aidan.feldman@gmail.com>
+Aidan Hobson Sayers <aidanhs@cantab.net>
+AJ Bowen <aj@gandi.net>
+Ajey Charantimath <ajey.charantimath@gmail.com>
+ajneu <ajneu@users.noreply.github.com>
+Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
+Al Tobey <al@ooyala.com>
+alambike <alambike@gmail.com>
+Alan Scherger <flyinprogrammer@gmail.com>
+Alan Thompson <cloojure@gmail.com>
+Albert Callarisa <shark234@gmail.com>
+Albert Zhang <zhgwenming@gmail.com>
+Aleksa Sarai <asarai@suse.de>
+Aleksandrs Fadins <aleks@s-ko.net>
+Alena Prokharchyk <alena@rancher.com>
+Alessandro Boch <aboch@docker.com>
+Alessio Biancalana <dottorblaster@gmail.com>
+Alex Chan <alex@alexwlchan.net>
+Alex Crawford <alex.crawford@coreos.com>
+Alex Ellis <alexellis2@gmail.com>
+Alex Gaynor <alex.gaynor@gmail.com>
+Alex Samorukov <samm@os2.kiev.ua>
+Alex Warhawk <ax.warhawk@gmail.com>
+Alexander Artemenko <svetlyak.40wt@gmail.com>
+Alexander Boyd <alex@opengroove.org>
+Alexander Larsson <alexl@redhat.com>
+Alexander Morozov <lk4d4@docker.com>
+Alexander Shopov <ash@kambanaria.org>
+Alexandre Beslic <abronan@docker.com>
+Alexandre González <agonzalezro@gmail.com>
+Alexandru Sfirlogea <alexandru.sfirlogea@gmail.com>
+Alexey Guskov <lexag@mail.ru>
+Alexey Kotlyarov <alexey@infoxchange.net.au>
+Alexey Shamrin <shamrin@gmail.com>
+Alexis THOMAS <fr.alexisthomas@gmail.com>
+Ali Dehghani <ali.dehghani.g@gmail.com>
+Allen Madsen <blatyo@gmail.com>
+Allen Sun <allen.sun@daocloud.io>
+almoehi <almoehi@users.noreply.github.com>
+Alvin Richards <alvin.richards@docker.com>
+amangoel <amangoel@gmail.com>
+Amen Belayneh <amenbelayneh@gmail.com>
+Amit Bakshi <ambakshi@gmail.com>
+Amit Krishnan <amit.krishnan@oracle.com>
+Amy Lindburg <amy.lindburg@docker.com>
+Anand Patil <anand.prabhakar.patil@gmail.com>
+AnandkumarPatel <anandkumarpatel@gmail.com>
+Anatoly Borodin <anatoly.borodin@gmail.com>
+Anchal Agrawal <aagrawa4@illinois.edu>
+Anders Janmyr <anders@janmyr.com>
+Andre Dublin <81dublin@gmail.com>
+Andre Granovsky <robotciti@live.com>
+Andrea Luzzardi <aluzzardi@gmail.com>
+Andrea Turli <andrea.turli@gmail.com>
+Andreas Köhler <andi5.py@gmx.net>
+Andreas Savvides <andreas@editd.com>
+Andreas Tiefenthaler <at@an-ti.eu>
+Andrew C. Bodine <acbodine@us.ibm.com>
+Andrew Clay Shafer <andrewcshafer@gmail.com>
+Andrew Duckworth <grillopress@gmail.com>
+Andrew France <andrew@avito.co.uk>
+Andrew Gerrand <adg@golang.org>
+Andrew Guenther <guenther.andrew.j@gmail.com>
+Andrew Kuklewicz <kookster@gmail.com>
+Andrew Macgregor <andrew.macgregor@agworld.com.au>
+Andrew Macpherson <hopscotch23@gmail.com>
+Andrew Martin <sublimino@gmail.com>
+Andrew Munsell <andrew@wizardapps.net>
+Andrew Weiss <andrew.weiss@outlook.com>
+Andrew Williams <williams.andrew@gmail.com>
+Andrews Medina <andrewsmedina@gmail.com>
+Andrey Petrov <andrey.petrov@shazow.net>
+Andrey Stolbovsky <andrey.stolbovsky@gmail.com>
+André Martins <aanm90@gmail.com>
+andy <ztao@tibco-support.com>
+Andy Chambers <anchambers@paypal.com>
+andy diller <dillera@gmail.com>
+Andy Goldstein <agoldste@redhat.com>
+Andy Kipp <andy@rstudio.com>
+Andy Rothfusz <github@developersupport.net>
+Andy Smith <github@anarkystic.com>
+Andy Wilson <wilson.andrew.j+github@gmail.com>
+Anes Hasicic <anes.hasicic@gmail.com>
+Anil Belur <askb23@gmail.com>
+Ankush Agarwal <ankushagarwal11@gmail.com>
+Anonmily <michelle@michelleliu.io>
+Anthon van der Neut <anthon@mnt.org>
+Anthony Baire <Anthony.Baire@irisa.fr>
+Anthony Bishopric <git@anthonybishopric.com>
+Anthony Dahanne <anthony.dahanne@gmail.com>
+Anton Löfgren <anton.lofgren@gmail.com>
+Anton Nikitin <anton.k.nikitin@gmail.com>
+Anton Polonskiy <anton.polonskiy@gmail.com>
+Anton Tiurin <noxiouz@yandex.ru>
+Antonio Murdaca <antonio.murdaca@gmail.com>
+Antony Messerli <amesserl@rackspace.com>
+Anuj Bahuguna <anujbahuguna.dev@gmail.com>
+Anusha Ragunathan <anusha.ragunathan@docker.com>
+apocas <petermdias@gmail.com>
+ArikaChen <eaglesora@gmail.com>
+Arnaud Porterie <arnaud.porterie@docker.com>
+Arthur Barr <arthur.barr@uk.ibm.com>
+Arthur Gautier <baloo@gandi.net>
+Artur Meyster <arthurfbi@yahoo.com>
+Arun Gupta <arun.gupta@gmail.com>
+Asbjørn Enge <asbjorn@hanafjedle.net>
+averagehuman <averagehuman@users.noreply.github.com>
+Avi Das <andas222@gmail.com>
+Avi Miller <avi.miller@oracle.com>
+ayoshitake <airandfingers@gmail.com>
+Azat Khuyiyakhmetov <shadow_uz@mail.ru>
+Bardia Keyoumarsi <bkeyouma@ucsc.edu>
+Barnaby Gray <barnaby@pickle.me.uk>
+Barry Allard <barry.allard@gmail.com>
+Bartłomiej Piotrowski <b@bpiotrowski.pl>
+Bastiaan Bakker <bbakker@xebia.com>
+bdevloed <boris.de.vloed@gmail.com>
+Ben Firshman <ben@firshman.co.uk>
+Ben Golub <ben.golub@dotcloud.com>
+Ben Hall <ben@benhall.me.uk>
+Ben Sargent <ben@brokendigits.com>
+Ben Severson <BenSeverson@users.noreply.github.com>
+Ben Toews <mastahyeti@gmail.com>
+Ben Wiklund <ben@daisyowl.com>
+Benjamin Atkin <ben@benatkin.com>
+Benoit Chesneau <bchesneau@gmail.com>
+Bernerd Schaefer <bj.schaefer@gmail.com>
+Bert Goethals <bert@bertg.be>
+Bharath Thiruveedula <bharath_ves@hotmail.com>
+Bhiraj Butala <abhiraj.butala@gmail.com>
+Bill W <SydOps@users.noreply.github.com>
+bin liu <liubin0329@users.noreply.github.com>
+Blake Geno <blakegeno@gmail.com>
+Boaz Shuster <ripcurld.github@gmail.com>
+bobby abbott <ttobbaybbob@gmail.com>
+boucher <rboucher@gmail.com>
+Bouke Haarsma <bouke@webatoom.nl>
+Boyd Hemphill <boyd@feedmagnet.com>
+boynux <boynux@gmail.com>
+Bradley Cicenas <bradley.cicenas@gmail.com>
+Bradley Wright <brad@intranation.com>
+Brandon Liu <bdon@bdon.org>
+Brandon Philips <brandon@ifup.org>
+Brandon Rhodes <brandon@rhodesmill.org>
+Brendan Dixon <brendand@microsoft.com>
+Brent Salisbury <brent.salisbury@docker.com>
+Brett Higgins <brhiggins@arbor.net>
+Brett Kochendorfer <brett.kochendorfer@gmail.com>
+Brian (bex) Exelbierd <bexelbie@redhat.com>
+Brian Bland <brian.bland@docker.com>
+Brian DeHamer <brian@dehamer.com>
+Brian Dorsey <brian@dorseys.org>
+Brian Flad <bflad417@gmail.com>
+Brian Goff <cpuguy83@gmail.com>
+Brian McCallister <brianm@skife.org>
+Brian Olsen <brian@maven-group.org>
+Brian Shumate <brian@couchbase.com>
+Brian Torres-Gil <brian@dralth.com>
+Brian Trump <btrump@yelp.com>
+Brice Jaglin <bjaglin@teads.tv>
+Briehan Lombaard <briehan.lombaard@gmail.com>
+Bruno Bigras <bigras.bruno@gmail.com>
+Bruno Binet <bruno.binet@gmail.com>
+Bruno Gazzera <bgazzera@paginar.com>
+Bruno Renié <brutasse@gmail.com>
+Bryan Bess <squarejaw@bsbess.com>
+Bryan Boreham <bjboreham@gmail.com>
+Bryan Matsuo <bryan.matsuo@gmail.com>
+Bryan Murphy <bmurphy1976@gmail.com>
+buddhamagnet <buddhamagnet@gmail.com>
+Burke Libbey <burke@libbey.me>
+Byung Kang <byung.kang.ctr@amrdec.army.mil>
+Caleb Spare <cespare@gmail.com>
+Calen Pennington <cale@edx.org>
+Cameron Boehmer <cameron.boehmer@gmail.com>
+Cameron Spear <cameronspear@gmail.com>
+Campbell Allen <campbell.allen@gmail.com>
+Candid Dauth <cdauth@cdauth.eu>
+Carl Henrik Lunde <chlunde@ping.uio.no>
+Carl X. Su <bcbcarl@gmail.com>
+Carlos Alexandro Becker <caarlos0@gmail.com>
+Carlos Sanchez <carlos@apache.org>
+Carol Fager-Higgins <carol.fager-higgins@docker.com>
+Cary <caryhartline@users.noreply.github.com>
+Casey Bisson <casey.bisson@joyent.com>
+Cedric Davies <cedricda@microsoft.com>
+Cezar Sa Espinola <cezarsa@gmail.com>
+Chad Swenson <chadswen@gmail.com>
+Chance Zibolski <chance.zibolski@gmail.com>
+Chander G <chandergovind@gmail.com>
+Charles Chan <charleswhchan@users.noreply.github.com>
+Charles Hooper <charles.hooper@dotcloud.com>
+Charles Law <claw@conduce.com>
+Charles Lindsay <chaz@chazomatic.us>
+Charles Merriam <charles.merriam@gmail.com>
+Charles Sarrazin <charles@sarraz.in>
+Charlie Lewis <charliel@lab41.org>
+Chase Bolt <chase.bolt@gmail.com>
+ChaYoung You <yousbe@gmail.com>
+Chen Chao <cc272309126@gmail.com>
+Chen Hanxiao <chenhanxiao@cn.fujitsu.com>
+cheney90 <cheney-90@hotmail.com>
+Chewey <prosto-chewey@users.noreply.github.com>
+Chia-liang Kao <clkao@clkao.org>
+chli <chli@freewheel.tv>
+Cholerae Hu <choleraehyq@gmail.com>
+Chris Alfonso <calfonso@redhat.com>
+Chris Armstrong <chris@opdemand.com>
+Chris Dituri <csdituri@gmail.com>
+Chris Fordham <chris@fordham-nagy.id.au>
+Chris Khoo <chris.khoo@gmail.com>
+Chris McKinnel <chrismckinnel@gmail.com>
+Chris Seto <chriskseto@gmail.com>
+Chris Snow <chsnow123@gmail.com>
+Chris St. Pierre <chris.a.st.pierre@gmail.com>
+Chris Stivers <chris@stivers.us>
+Chris Swan <chris.swan@iee.org>
+Chris Wahl <github@wahlnetwork.com>
+Chris Weyl <cweyl@alumni.drew.edu>
+chrismckinnel <chris.mckinnel@tangentlabs.co.uk>
+Christian Berendt <berendt@b1-systems.de>
+Christian Böhme <developement@boehme3d.de>
+Christian Persson <saser@live.se>
+Christian Rotzoll <ch.rotzoll@gmail.com>
+Christian Simon <simon@swine.de>
+Christian Stefanescu <st.chris@gmail.com>
+ChristoperBiscardi <biscarch@sketcht.com>
+Christophe Mehay <cmehay@online.net>
+Christophe Troestler <christophe.Troestler@umons.ac.be>
+Christopher Currie <codemonkey+github@gmail.com>
+Christopher Jones <tophj@linux.vnet.ibm.com>
+Christopher Latham <sudosurootdev@gmail.com>
+Christopher Rigor <crigor@gmail.com>
+Christy Perez <christy@linux.vnet.ibm.com>
+Chun Chen <ramichen@tencent.com>
+Ciro S. Costa <ciro.costa@usp.br>
+Clayton Coleman <ccoleman@redhat.com>
+Clinton Kitson <clintonskitson@gmail.com>
+Coenraad Loubser <coenraad@wish.org.za>
+Colin Dunklau <colin.dunklau@gmail.com>
+Colin Rice <colin@daedrum.net>
+Colin Walters <walters@verbum.org>
+Collin Guarino <collin.guarino@gmail.com>
+Colm Hally <colmhally@gmail.com>
+companycy <companycy@gmail.com>
+Cory Forsyth <cory.forsyth@gmail.com>
+cressie176 <github@stephen-cresswell.net>
+Cristian Staretu <cristian.staretu@gmail.com>
+cristiano balducci <cristiano.balducci@gmail.com>
+Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
+Cyril F <cyrilf7x@gmail.com>
+Daan van Berkel <daan.v.berkel.1980@gmail.com>
+Daehyeok Mun <daehyeok@gmail.com>
+Dafydd Crosby <dtcrsby@gmail.com>
+dalanlan <dalanlan925@gmail.com>
+Damien Nadé <github@livna.org>
+Damien Nozay <damien.nozay@gmail.com>
+Damjan Georgievski <gdamjan@gmail.com>
+Dan Anolik <dan@anolik.net>
+Dan Buch <d.buch@modcloth.com>
+Dan Cotora <dan@bluevision.ro>
+Dan Griffin <dgriffin@peer1.com>
+Dan Hirsch <thequux@upstandinghackers.com>
+Dan Keder <dan.keder@gmail.com>
+Dan Levy <dan@danlevy.net>
+Dan McPherson <dmcphers@redhat.com>
+Dan Stine <sw@stinemail.com>
+Dan Walsh <dwalsh@redhat.com>
+Dan Williams <me@deedubs.com>
+Daniel Antlinger <d.antlinger@gmx.at>
+Daniel Exner <dex@dragonslave.de>
+Daniel Farrell <dfarrell@redhat.com>
+Daniel Garcia <daniel@danielgarcia.info>
+Daniel Gasienica <daniel@gasienica.ch>
+Daniel Hiltgen <daniel.hiltgen@docker.com>
+Daniel Menet <membership@sontags.ch>
+Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com>
+Daniel Nephin <dnephin@docker.com>
+Daniel Norberg <dano@spotify.com>
+Daniel Nordberg <dnordberg@gmail.com>
+Daniel Robinson <gottagetmac@gmail.com>
+Daniel S <dan.streby@gmail.com>
+Daniel Von Fange <daniel@leancoder.com>
+Daniel YC Lin <dlin.tw@gmail.com>
+Daniel Zhang <jmzwcn@gmail.com>
+Daniel, Dao Quang Minh <dqminh@cloudflare.com>
+Danny Berger <dpb587@gmail.com>
+Danny Yates <danny@codeaholics.org>
+Darren Coxall <darren@darrencoxall.com>
+Darren Shepherd <darren.s.shepherd@gmail.com>
+Darren Stahl <darst@microsoft.com>
+Dave Barboza <dbarboza@datto.com>
+Dave Henderson <Dave.Henderson@ca.ibm.com>
+Dave MacDonald <mindlapse@gmail.com>
+Dave Tucker <dt@docker.com>
+David Anderson <dave@natulte.net>
+David Calavera <david.calavera@gmail.com>
+David Corking <dmc-source@dcorking.com>
+David Cramer <davcrame@cisco.com>
+David Currie <david_currie@uk.ibm.com>
+David Davis <daviddavis@redhat.com>
+David Gageot <david@gageot.net>
+David Gebler <davidgebler@gmail.com>
+David Lawrence <david.lawrence@docker.com>
+David Mackey <tdmackey@booleanhaiku.com>
+David Mat <david@davidmat.com>
+David Mcanulty <github@hellspark.com>
+David Pelaez <pelaez89@gmail.com>
+David R. Jenni <david.r.jenni@gmail.com>
+David Röthlisberger <david@rothlis.net>
+David Sheets <sheets@alum.mit.edu>
+David Sissitka <me@dsissitka.com>
+David Xia <dxia@spotify.com>
+David Young <yangboh@cn.ibm.com>
+Davide Ceretti <davide.ceretti@hogarthww.com>
+Dawn Chen <dawnchen@google.com>
+dcylabs <dcylabs@gmail.com>
+decadent <decadent@users.noreply.github.com>
+deed02392 <georgehafiz@gmail.com>
+Deng Guangxing <dengguangxing@huawei.com>
+Deni Bertovic <deni@kset.org>
+Denis Gladkikh <denis@gladkikh.email>
+Denis Ollier <larchunix@users.noreply.github.com>
+Dennis Docter <dennis@d23.nl>
+Derek <crq@kernel.org>
+Derek <crquan@gmail.com>
+Derek Ch <denc716@gmail.com>
+Derek McGowan <derek@mcgstyle.net>
+Deric Crago <deric.crago@gmail.com>
+Deshi Xiao <dxiao@redhat.com>
+devmeyster <arthurfbi@yahoo.com>
+Devvyn Murphy <devvyn@devvyn.com>
+Dharmit Shah <shahdharmit@gmail.com>
+Dieter Reuter <dieter.reuter@me.com>
+Dima Stopel <dima@twistlock.com>
+Dimitri John Ledkov <dimitri.j.ledkov@intel.com>
+Dimitry Andric <d.andric@activevideo.com>
+Dinesh Subhraveti <dineshs@altiscale.com>
+Diogo Monica <diogo@docker.com>
+DiuDiugirl <sophia.wang@pku.edu.cn>
+Djibril Koné <kone.djibril@gmail.com>
+dkumor <daniel@dkumor.com>
+Dmitri Logvinenko <dmitri.logvinenko@gmail.com>
+Dmitry Demeshchuk <demeshchuk@gmail.com>
+Dmitry Gusev <dmitry.gusev@gmail.com>
+Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
+Dmitry Vorobev <dimahabr@gmail.com>
+Dolph Mathews <dolph.mathews@gmail.com>
+Dominik Finkbeiner <finkes93@gmail.com>
+Dominik Honnef <dominik@honnef.co>
+Don Kirkby <donkirkby@users.noreply.github.com>
+Don Kjer <don.kjer@gmail.com>
+Don Spaulding <donspauldingii@gmail.com>
+Donald Huang <don.hcd@gmail.com>
+Dong Chen <dongluo.chen@docker.com>
+Donovan Jones <git@gamma.net.nz>
+Doug Davis <dug@us.ibm.com>
+Doug MacEachern <dougm@vmware.com>
+Doug Tangren <d.tangren@gmail.com>
+Dr Nic Williams <drnicwilliams@gmail.com>
+dragon788 <dragon788@users.noreply.github.com>
+Dražen Lučanin <kermit666@gmail.com>
+Dustin Sallings <dustin@spy.net>
+Ed Costello <epc@epcostello.com>
+Edmund Wagner <edmund-wagner@web.de>
+Eiichi Tsukata <devel@etsukata.com>
+Eike Herzbach <eike@herzbach.net>
+Eivind Uggedal <eivind@uggedal.com>
+Elan Ruusamäe <glen@delfi.ee>
+Elias Probst <mail@eliasprobst.eu>
+Elijah Zupancic <elijah@zupancic.name>
+eluck <mail@eluck.me>
+Elvir Kuric <elvirkuric@gmail.com>
+Emil Hernvall <emil@quench.at>
+Emily Maier <emily@emilymaier.net>
+Emily Rose <emily@contactvibe.com>
+Emir Ozer <emirozer@yandex.com>
+Enguerran <engcolson@gmail.com>
+Eohyung Lee <liquidnuker@gmail.com>
+Eric Hanchrow <ehanchrow@ine.com>
+Eric Lee <thenorthsecedes@gmail.com>
+Eric Myhre <hash@exultant.us>
+Eric Paris <eparis@redhat.com>
+Eric Rafaloff <erafaloff@gmail.com>
+Eric Rosenberg <ehaydenr@users.noreply.github.com>
+Eric Sage <eric.david.sage@gmail.com>
+Eric Windisch <eric@windisch.us>
+Eric Yang <windfarer@gmail.com>
+Eric-Olivier Lamey <eo@lamey.me>
+Erik Bray <erik.m.bray@gmail.com>
+Erik Dubbelboer <erik@dubbelboer.com>
+Erik Hollensbe <github@hollensbe.org>
+Erik Inge Bolsø <knan@redpill-linpro.com>
+Erik Kristensen <erik@erikkristensen.com>
+Erik Weathers <erikdw@gmail.com>
+Erno Hopearuoho <erno.hopearuoho@gmail.com>
+Erwin van der Koogh <info@erronis.nl>
+Euan <euank@amazon.com>
+Eugene Yakubovich <eugene.yakubovich@coreos.com>
+eugenkrizo <eugen.krizo@gmail.com>
+evalle <shmarnev@gmail.com>
+Evan Allrich <evan@unguku.com>
+Evan Carmi <carmi@users.noreply.github.com>
+Evan Hazlett <ejhazlett@gmail.com>
+Evan Krall <krall@yelp.com>
+Evan Phoenix <evan@fallingsnow.net>
+Evan Wies <evan@neomantra.net>
+Evgeny Vereshchagin <evvers@ya.ru>
+Ewa Czechowska <ewa@ai-traders.com>
+Eystein Måløy Stenberg <eystein.maloy.stenberg@cfengine.com>
+ezbercih <cem.ezberci@gmail.com>
+Fabiano Rosas <farosas@br.ibm.com>
+Fabio Falci <fabiofalci@gmail.com>
+Fabio Rehm <fgrehm@gmail.com>
+Fabrizio Regini <freegenie@gmail.com>
+Fabrizio Soppelsa <fsoppelsa@mirantis.com>
+Faiz Khan <faizkhan00@gmail.com>
+falmp <chico.lopes@gmail.com>
+Fangyuan Gao <21551127@zju.edu.cn>
+Fareed Dudhia <fareeddudhia@googlemail.com>
+Fathi Boudra <fathi.boudra@linaro.org>
+Federico Gimenez <fgimenez@coit.es>
+Felix Geisendörfer <felix@debuggable.com>
+Felix Hupfeld <quofelix@users.noreply.github.com>
+Felix Rabe <felix@rabe.io>
+Felix Schindler <fschindler@weluse.de>
+Ferenc Szabo <pragmaticfrank@gmail.com>
+Fernando <fermayo@gmail.com>
+Fero Volar <alian@alian.info>
+Filipe Brandenburger <filbranden@google.com>
+Filipe Oliveira <contato@fmoliveira.com.br>
+fl0yd <fl0yd@me.com>
+Flavio Castelli <fcastelli@suse.com>
+FLGMwt <ryan.stelly@live.com>
+Florian <FWirtz@users.noreply.github.com>
+Florian Klein <florian.klein@free.fr>
+Florian Maier <marsmensch@users.noreply.github.com>
+Florian Weingarten <flo@hackvalue.de>
+Florin Asavoaie <florin.asavoaie@gmail.com>
+Francesc Campoy <campoy@google.com>
+Francisco Carriedo <fcarriedo@gmail.com>
+Francisco Souza <f@souza.cc>
+Frank Groeneveld <frank@ivaldi.nl>
+Frank Herrmann <fgh@4gh.tv>
+Frank Macreery <frank@macreery.com>
+Frank Rosquin <frank.rosquin+github@gmail.com>
+Fred Lifton <fred.lifton@docker.com>
+Frederick F. Kautz IV <fkautz@redhat.com>
+Frederik Loeffert <frederik@zitrusmedia.de>
+Frederik Nordahl Jul Sabroe <frederikns@gmail.com>
+Freek Kalter <freek@kalteronline.org>
+fy2462 <fy2462@gmail.com>
+Félix Baylac-Jacqué <baylac.felix@gmail.com>
+Félix Cantournet <felix.cantournet@cloudwatt.com>
+Gabe Rosenhouse <gabe@missionst.com>
+Gabor Nagy <mail@aigeruth.hu>
+Gabriel Monroy <gabriel@opdemand.com>
+GabrielNicolasAvellaneda <avellaneda.gabriel@gmail.com>
+Galen Sampson <galen.sampson@gmail.com>
+Gareth Rushgrove <gareth@morethanseven.net>
+Garrett Barboza <garrett@garrettbarboza.com>
+Gaurav <gaurav.gosec@gmail.com>
+gautam, prasanna <prasannagautam@gmail.com>
+GennadySpb <lipenkov@gmail.com>
+Geoffrey Bachelet <grosfrais@gmail.com>
+George MacRorie <gmacr31@gmail.com>
+George Xie <georgexsh@gmail.com>
+Georgi Hristozov <georgi@forkbomb.nl>
+Gereon Frey <gereon.frey@dynport.de>
+German DZ <germ@ndz.com.ar>
+Gert van Valkenhoef <g.h.m.van.valkenhoef@rug.nl>
+Gianluca Borello <g.borello@gmail.com>
+Gildas Cuisinier <gildas.cuisinier@gcuisinier.net>
+gissehel <public-devgit-dantus@gissehel.org>
+Giuseppe Mazzotta <gdm85@users.noreply.github.com>
+Gleb Fotengauer-Malinovskiy <glebfm@altlinux.org>
+Gleb M Borisov <borisov.gleb@gmail.com>
+Glyn Normington <gnormington@gopivotal.com>
+GoBella <caili_welcome@163.com>
+Goffert van Gool <goffert@phusion.nl>
+Gosuke Miyashita <gosukenator@gmail.com>
+Gou Rao <gourao@users.noreply.github.com>
+Govinda Fichtner <govinda.fichtner@googlemail.com>
+Grant Reaber <grant.reaber@gmail.com>
+Graydon Hoare <graydon@pobox.com>
+Greg Fausak <greg@tacodata.com>
+Greg Thornton <xdissent@me.com>
+grossws <grossws@gmail.com>
+grunny <mwgrunny@gmail.com>
+gs11 <gustav.sinder@gmail.com>
+Guilhem Lettron <guilhem+github@lettron.fr>
+Guilherme Salgado <gsalgado@gmail.com>
+Guillaume Dufour <gdufour.prestataire@voyages-sncf.com>
+Guillaume J. Charmes <guillaume.charmes@docker.com>
+guoxiuyan <guoxiuyan@huawei.com>
+Gurjeet Singh <gurjeet@singh.im>
+Guruprasad <lgp171188@gmail.com>
+gwx296173 <gaojing3@huawei.com>
+Günter Zöchbauer <guenter@gzoechbauer.com>
+Hans Kristian Flaatten <hans@starefossen.com>
+Hans Rødtang <hansrodtang@gmail.com>
+Hao Shu Wei <haosw@cn.ibm.com>
+Hao Zhang <21521210@zju.edu.cn>
+Harald Albers <github@albersweb.de>
+Harley Laue <losinggeneration@gmail.com>
+Harold Cooper <hrldcpr@gmail.com>
+Harry Zhang <harryzhang@zju.edu.cn>
+He Simei <hesimei@zju.edu.cn>
+heartlock <21521209@zju.edu.cn>
+Hector Castro <hectcastro@gmail.com>
+Henning Sprang <henning.sprang@gmail.com>
+Hobofan <goisser94@gmail.com>
+Hollie Teal <hollie@docker.com>
+Hong Xu <hong@topbug.net>
+hsinko <21551195@zju.edu.cn>
+Hu Keping <hukeping@huawei.com>
+Hu Tao <hutao@cn.fujitsu.com>
+Huanzhong Zhang <zhanghuanzhong90@gmail.com>
+Huayi Zhang <irachex@gmail.com>
+Hugo Duncan <hugo@hugoduncan.org>
+Hugo Marisco <0x6875676f@gmail.com>
+Hunter Blanks <hunter@twilio.com>
+huqun <huqun@zju.edu.cn>
+Huu Nguyen <huu@prismskylabs.com>
+hyeongkyu.lee <hyeongkyu.lee@navercorp.com>
+hyp3rdino <markus.kortlang@lhsystems.com>
+Hyzhou <1187766782@qq.com>
+Ian Babrou <ibobrik@gmail.com>
+Ian Bishop <ianbishop@pace7.com>
+Ian Bull <irbull@gmail.com>
+Ian Calvert <ianjcalvert@gmail.com>
+Ian Lee <IanLee1521@gmail.com>
+Ian Main <imain@redhat.com>
+Ian Truslove <ian.truslove@gmail.com>
+Iavael <iavaelooeyt@gmail.com>
+Icaro Seara <icaro.seara@gmail.com>
+Igor Dolzhikov <bluesriverz@gmail.com>
+Ilkka Laukkanen <ilkka@ilkka.io>
+Ilya Dmitrichenko <errordeveloper@gmail.com>
+Ilya Gusev <mail@igusev.ru>
+ILYA Khlopotov <ilya.khlopotov@gmail.com>
+imre Fitos <imre.fitos+github@gmail.com>
+inglesp <peter.inglesby@gmail.com>
+Ingo Gottwald <in.gottwald@gmail.com>
+Isaac Dupree <antispam@idupree.com>
+Isabel Jimenez <contact.isabeljimenez@gmail.com>
+Isao Jonas <isao.jonas@gmail.com>
+Ivan Babrou <ibobrik@gmail.com>
+Ivan Fraixedes <ifcdev@gmail.com>
+Ivan Grcic <igrcic@gmail.com>
+J Bruni <joaohbruni@yahoo.com.br>
+J. Nunn <jbnunn@gmail.com>
+Jack Danger Canty <jackdanger@squareup.com>
+Jacob Atzen <jacob@jacobatzen.dk>
+Jacob Edelman <edelman.jd@gmail.com>
+Jake Champlin <jake.champlin.27@gmail.com>
+Jake Moshenko <jake@devtable.com>
+jakedt <jake@devtable.com>
+James Allen <jamesallen0108@gmail.com>
+James Carey <jecarey@us.ibm.com>
+James Carr <james.r.carr@gmail.com>
+James DeFelice <james.defelice@ishisystems.com>
+James Harrison Fisher <jameshfisher@gmail.com>
+James Kyburz <james.kyburz@gmail.com>
+James Kyle <james@jameskyle.org>
+James Lal <james@lightsofapollo.com>
+James Mills <prologic@shortcircuit.net.au>
+James Nugent <james@jen20.com>
+James Turnbull <james@lovedthanlost.net>
+Jamie Hannaford <jamie.hannaford@rackspace.com>
+Jamshid Afshar <jafshar@yahoo.com>
+Jan Keromnes <janx@linux.com>
+Jan Koprowski <jan.koprowski@gmail.com>
+Jan Pazdziora <jpazdziora@redhat.com>
+Jan Toebes <jan@toebes.info>
+Jan-Gerd Tenberge <janten@gmail.com>
+Jan-Jaap Driessen <janjaapdriessen@gmail.com>
+Jana Radhakrishnan <mrjana@docker.com>
+Januar Wayong <januar@gmail.com>
+Jared Biel <jared.biel@bolderthinking.com>
+Jared Hocutt <jaredh@netapp.com>
+Jaroslaw Zabiello <hipertracker@gmail.com>
+jaseg <jaseg@jaseg.net>
+Jasmine Hegman <jasmine@jhegman.com>
+Jason Divock <jdivock@gmail.com>
+Jason Giedymin <jasong@apache.org>
+Jason Green <Jason.Green@AverInformatics.Com>
+Jason Hall <imjasonh@gmail.com>
+Jason Heiss <jheiss@aput.net>
+Jason Livesay <ithkuil@gmail.com>
+Jason McVetta <jason.mcvetta@gmail.com>
+Jason Plum <jplum@devonit.com>
+Jason Shepherd <jason@jasonshepherd.net>
+Jason Smith <jasonrichardsmith@gmail.com>
+Jason Sommer <jsdirv@gmail.com>
+Jason Stangroome <jason@codeassassin.com>
+jaxgeller <jacksongeller@gmail.com>
+Jay <imjching@hotmail.com>
+Jay <teguhwpurwanto@gmail.com>
+Jay Kamat <github@jgkamat.33mail.com>
+Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
+Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
+Jean-Paul Calderone <exarkun@twistedmatrix.com>
+Jean-Tiare Le Bigot <jt@yadutaf.fr>
+Jeff Anderson <jeff@docker.com>
+Jeff Johnston <jeff.johnston.mn@gmail.com>
+Jeff Lindsay <progrium@gmail.com>
+Jeff Mickey <j@codemac.net>
+Jeff Minard <jeff@creditkarma.com>
+Jeff Nickoloff <jeff.nickoloff@gmail.com>
+Jeff Welch <whatthejeff@gmail.com>
+Jeffrey Bolle <jeffreybolle@gmail.com>
+Jeffrey Morgan <jmorganca@gmail.com>
+Jeffrey van Gogh <jvg@google.com>
+Jenny Gebske <jennifer@gebske.de>
+Jeremy Grosser <jeremy@synack.me>
+Jeremy Price <jprice.rhit@gmail.com>
+Jeremy Qian <vanpire110@163.com>
+Jeremy Unruh <jeremybunruh@gmail.com>
+Jeroen Jacobs <github@jeroenj.be>
+Jesse Dearing <jesse.dearing@gmail.com>
+Jesse Dubay <jesse@thefortytwo.net>
+Jessica Frazelle <jess@mesosphere.com>
+Jezeniel Zapanta <jpzapanta22@gmail.com>
+jgeiger <jgeiger@gmail.com>
+Jhon Honce <jhonce@redhat.com>
+Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
+jianbosun <wonderflow.sun@gmail.com>
+Jilles Oldenbeuving <ojilles@gmail.com>
+Jim Alateras <jima@comware.com.au>
+Jim Perrin <jperrin@centos.org>
+Jimmy Cuadra <jimmy@jimmycuadra.com>
+Jimmy Puckett <jimmy.puckett@spinen.com>
+jimmyxian <jimmyxian2004@yahoo.com.cn>
+Jinsoo Park <cellpjs@gmail.com>
+Jiri Popelka <jpopelka@redhat.com>
+Jiří Župka <jzupka@redhat.com>
+jjy <jiangjinyang@outlook.com>
+jmzwcn <jmzwcn@gmail.com>
+Joe Beda <joe.github@bedafamily.com>
+Joe Doliner <jdoliner@pachyderm.io>
+Joe Ferguson <joe@infosiftr.com>
+Joe Gordon <joe.gordon0@gmail.com>
+Joe Shaw <joe@joeshaw.org>
+Joe Van Dyk <joe@tanga.com>
+Joel Friedly <joelfriedly@gmail.com>
+Joel Handwell <joelhandwell@gmail.com>
+Joel Hansson <joel.hansson@ecraft.com>
+Joel Wurtz <jwurtz@jolicode.com>
+Joey Geiger <jgeiger@users.noreply.github.com>
+Joey Gibson <joey@joeygibson.com>
+Joffrey F <joffrey@docker.com>
+Johan Euphrosine <proppy@google.com>
+Johan Rydberg <johan.rydberg@gmail.com>
+Johannes 'fish' Ziemke <github@freigeist.org>
+John Costa <john.costa@gmail.com>
+John Feminella <jxf@jxf.me>
+John Gardiner Myers <jgmyers@proofpoint.com>
+John Gossman <johngos@microsoft.com>
+John Howard (VM) <John.Howard@microsoft.com>
+John OBrien III <jobrieniii@yahoo.com>
+John Starks <jostarks@microsoft.com>
+John Tims <john.k.tims@gmail.com>
+John Warwick <jwarwick@gmail.com>
+John Willis <john.willis@docker.com>
+Jon Wedaman <jweede@gmail.com>
+Jonas Pfenniger <jonas@pfenniger.name>
+Jonathan A. Sternberg <jonathansternberg@gmail.com>
+Jonathan Boulle <jonathanboulle@gmail.com>
+Jonathan Camp <jonathan@irondojo.com>
+Jonathan Dowland <jon+github@alcopop.org>
+Jonathan Lebon <jlebon@redhat.com>
+Jonathan McCrohan <jmccrohan@gmail.com>
+Jonathan Mueller <j.mueller@apoveda.ch>
+Jonathan Pares <jonathanpa@users.noreply.github.com>
+Jonathan Rudenberg <jonathan@titanous.com>
+Joost Cassee <joost@cassee.net>
+Jordan <jjn2009@users.noreply.github.com>
+Jordan Arentsen <blissdev@gmail.com>
+Jordan Sissel <jls@semicomplete.com>
+Jose Diaz-Gonzalez <josegonzalez@users.noreply.github.com>
+Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
+Joseph Hager <ajhager@gmail.com>
+Joseph Kern <jkern@semafour.net>
+Josh <jokajak@gmail.com>
+Josh Hawn <josh.hawn@docker.com>
+Josh Poimboeuf <jpoimboe@redhat.com>
+Josiah Kiehl <jkiehl@riotgames.com>
+José Tomás Albornoz <jojo@eljojo.net>
+JP <jpellerin@leapfrogonline.com>
+jrabbit <jackjrabbit@gmail.com>
+Julian Taylor <jtaylor.debian@googlemail.com>
+Julien Barbier <write0@gmail.com>
+Julien Bisconti <veggiemonk@users.noreply.github.com>
+Julien Bordellier <julienbordellier@gmail.com>
+Julien Dubois <julien.dubois@gmail.com>
+Julien Pervillé <julien.perville@perfect-memory.com>
+Julio Montes <imc.coder@gmail.com>
+Jun-Ru Chang <jrjang@gmail.com>
+Jussi Nummelin <jussi.nummelin@gmail.com>
+Justas Brazauskas <brazauskasjustas@gmail.com>
+Justin Cormack <justin.cormack@docker.com>
+Justin Force <justin.force@gmail.com>
+Justin Plock <jplock@users.noreply.github.com>
+Justin Simonelis <justin.p.simonelis@gmail.com>
+Justin Terry <juterry@microsoft.com>
+Jyrki Puttonen <jyrkiput@gmail.com>
+Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
+Jörg Thalheim <joerg@higgsboson.tk>
+Kai Blin <kai@samba.org>
+Kai Qiang Wu(Kennan) <wkqwu@cn.ibm.com>
+Kamil Domański <kamil@domanski.co>
+kamjar gerami <kami.gerami@gmail.com>
+Kanstantsin Shautsou <kanstantsin.sha@gmail.com>
+Karan Lyons <karan@karanlyons.com>
+Kareem Khazem <karkhaz@karkhaz.com>
+kargakis <kargakis@users.noreply.github.com>
+Karl Grzeszczak <karlgrz@gmail.com>
+Karol Duleba <mr.fuxi@gmail.com>
+Katie McLaughlin <katie@glasnt.com>
+Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
+Katrina Owen <katrina.owen@gmail.com>
+Kawsar Saiyeed <kawsar.saiyeed@projiris.com>
+kayrus <kay.diam@gmail.com>
+Ke Xu <leonhartx.k@gmail.com>
+Keli Hu <dev@keli.hu>
+Ken Cochrane <kencochrane@gmail.com>
+Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
+Kenfe-Mickael Laventure <mickael.laventure@gmail.com>
+Kenjiro Nakayama <nakayamakenjiro@gmail.com>
+Kent Johnson <kentoj@gmail.com>
+Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
+Kevin Clark <kevin.clark@gmail.com>
+Kevin J. Lynagh <kevin@keminglabs.com>
+Kevin Menard <kevin@nirvdrum.com>
+Kevin P. Kucharczyk <kevinkucharczyk@gmail.com>
+Kevin Shi <kshi@andrew.cmu.edu>
+Kevin Wallace <kevin@pentabarf.net>
+Kevin Yap <me@kevinyap.ca>
+kevinmeredith <kevin.m.meredith@gmail.com>
+Keyvan Fatehi <keyvanfatehi@gmail.com>
+kies <lleelm@gmail.com>
+Kim BKC Carlbacker <kim.carlbacker@gmail.com>
+Kim Eik <kim@heldig.org>
+Kimbro Staken <kstaken@kstaken.com>
+Kir Kolyshkin <kir@openvz.org>
+Kiran Gangadharan <kiran.daredevil@gmail.com>
+Kirill SIbirev <l0kix2@gmail.com>
+knappe <tyler.knappe@gmail.com>
+Kohei Tsuruta <coheyxyz@gmail.com>
+Koichi Shiraishi <k@zchee.io>
+Konrad Kleine <konrad.wilhelm.kleine@gmail.com>
+Konstantin Pelykh <kpelykh@zettaset.com>
+Krasimir Georgiev <support@vip-consult.co.uk>
+Kristian Haugene <kristian.haugene@capgemini.com>
+Kristina Zabunova <triara.xiii@gmail.com>
+krrg <krrgithub@gmail.com>
+Kun Zhang <zkazure@gmail.com>
+Kunal Kushwaha <kunal.kushwaha@gmail.com>
+Kyle Conroy <kyle.j.conroy@gmail.com>
+kyu <leehk1227@gmail.com>
+Lachlan Coote <lcoote@vmware.com>
+Lai Jiangshan <jiangshanlai@gmail.com>
+Lajos Papp <lajos.papp@sequenceiq.com>
+Lakshan Perera <lakshan@laktek.com>
+Lalatendu Mohanty <lmohanty@redhat.com>
+lalyos <lalyos@yahoo.com>
+Lance Chen <cyen0312@gmail.com>
+Lance Kinley <lkinley@loyaltymethods.com>
+Lars Butler <Lars.Butler@gmail.com>
+Lars Kellogg-Stedman <lars@redhat.com>
+Lars R. Damerow <lars@pixar.com>
+Laszlo Meszaros <lacienator@gmail.com>
+Laurent Erignoux <lerignoux@gmail.com>
+Laurie Voss <github@seldo.com>
+Leandro Siqueira <leandro.siqueira@gmail.com>
+Lee, Meng-Han <sunrisedm4@gmail.com>
+leeplay <hyeongkyu.lee@navercorp.com>
+Lei Jitang <leijitang@huawei.com>
+Len Weincier <len@cloudafrica.net>
+Lennie <github@consolejunkie.net>
+Leszek Kowalski <github@leszekkowalski.pl>
+Levi Blackstone <levi.blackstone@rackspace.com>
+Levi Gross <levi@levigross.com>
+Lewis Marshall <lewis@lmars.net>
+Lewis Peckover <lew+github@lew.io>
+Liana Lo <liana.lixia@gmail.com>
+Liang Mingqiang <mqliang.zju@gmail.com>
+Liang-Chi Hsieh <viirya@gmail.com>
+liaoqingwei <liaoqingwei@huawei.com>
+limsy <seongyeol37@gmail.com>
+Lin Lu <doraalin@163.com>
+LingFaKe <lingfake@huawei.com>
+Linus Heckemann <lheckemann@twig-world.com>
+Liran Tal <liran.tal@gmail.com>
+Liron Levin <liron@twistlock.com>
+Liu Bo <bo.li.liu@oracle.com>
+Liu Hua <sdu.liu@huawei.com>
+LIZAO LI <lzlarryli@gmail.com>
+Lloyd Dewolf <foolswisdom@gmail.com>
+Lokesh Mandvekar <lsm5@fedoraproject.org>
+longliqiang88 <394564827@qq.com>
+Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
+Lorenzo Fontana <fontanalorenzo@me.com>
+Louis Opter <kalessin@kalessin.fr>
+Luca Marturana <lucamarturana@gmail.com>
+Luca Orlandi <luca.orlandi@gmail.com>
+Luca-Bogdan Grigorescu <Luca-Bogdan Grigorescu>
+Lucas Chan <lucas-github@lucaschan.com>
+Luis Martínez de Bartolomé Izquierdo <lmartinez@biicode.com>
+Lukas Waslowski <cr7pt0gr4ph7@gmail.com>
+lukaspustina <lukas.pustina@centerdevice.com>
+Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
+lukemarsden <luke@digital-crocus.com>
+Lynda O'Leary <lyndaoleary29@gmail.com>
+Lénaïc Huard <lhuard@amadeus.com>
+Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
+Mabin <bin.ma@huawei.com>
+Madhav Puri <madhav.puri@gmail.com>
+Madhu Venugopal <madhu@socketplane.io>
+Mageee <21521230.zju.edu.cn>
+Mahesh Tiyyagura <tmahesh@gmail.com>
+malnick <malnick@gmail..com>
+Malte Janduda <mail@janduda.net>
+manchoz <giampaolo@trampolineup.com>
+Manfred Touron <m@42.am>
+Manfred Zabarauskas <manfredas@zabarauskas.com>
+mansinahar <mansinahar@users.noreply.github.com>
+Manuel Meurer <manuel@krautcomputing.com>
+Manuel Woelker <github@manuel.woelker.org>
+mapk0y <mapk0y@gmail.com>
+Marc Abramowitz <marc@marc-abramowitz.com>
+Marc Kuo <kuomarc2@gmail.com>
+Marc Tamsky <mtamsky@gmail.com>
+Marcelo Salazar <chelosalazar@gmail.com>
+Marco Hennings <marco.hennings@freiheit.com>
+Marcus Farkas <toothlessgear@finitebox.com>
+Marcus Linke <marcus.linke@gmx.de>
+Marcus Ramberg <marcus@nordaaker.com>
+Marek Goldmann <marek.goldmann@gmail.com>
+Marian Marinov <mm@yuhu.biz>
+Marianna Tessel <mtesselh@gmail.com>
+Mario Loriedo <mario.loriedo@gmail.com>
+Marius Gundersen <me@mariusgundersen.net>
+Marius Sturm <marius@graylog.com>
+Marius Voila <marius.voila@gmail.com>
+Mark Allen <mrallen1@yahoo.com>
+Mark McGranaghan <mmcgrana@gmail.com>
+Mark McKinstry <mmckinst@umich.edu>
+Mark West <markewest@gmail.com>
+Marko Mikulicic <mmikulicic@gmail.com>
+Marko Tibold <marko@tibold.nl>
+Markus Fix <lispmeister@gmail.com>
+Martijn Dwars <ikben@martijndwars.nl>
+Martijn van Oosterhout <kleptog@svana.org>
+Martin Honermeyer <maze@strahlungsfrei.de>
+Martin Kelly <martin@surround.io>
+Martin Mosegaard Amdisen <martin.amdisen@praqma.com>
+Martin Redmond <redmond.martin@gmail.com>
+Mary Anthony <mary.anthony@docker.com>
+Masahito Zembutsu <zembutsu@users.noreply.github.com>
+Mason Malone <mason.malone@gmail.com>
+Mateusz Sulima <sulima.mateusz@gmail.com>
+Mathias Monnerville <mathias@monnerville.com>
+Mathieu Le Marec - Pasquet <kiorky@cryptelium.net>
+Matt Apperson <me@mattapperson.com>
+Matt Bachmann <bachmann.matt@gmail.com>
+Matt Bentley <matt.bentley@docker.com>
+Matt Haggard <haggardii@gmail.com>
+Matt McCormick <matt.mccormick@kitware.com>
+Matt Moore <mattmoor@google.com>
+Matt Robenolt <matt@ydekproductions.com>
+Matthew Heon <mheon@redhat.com>
+Matthew Mayer <matthewkmayer@gmail.com>
+Matthew Mueller <mattmuelle@gmail.com>
+Matthew Riley <mattdr@google.com>
+Matthias Klumpp <matthias@tenstral.net>
+Matthias Kühnle <git.nivoc@neverbox.com>
+Matthias Rampke <mr@soundcloud.com>
+Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
+mattymo <raytrac3r@gmail.com>
+mattyw <mattyw@me.com>
+Mauricio Garavaglia <mauriciogaravaglia@gmail.com>
+mauriyouth <mauriyouth@gmail.com>
+Max Shytikov <mshytikov@gmail.com>
+Maxim Ivanov <ivanov.maxim@gmail.com>
+Maxim Kulkin <mkulkin@mirantis.com>
+Maxim Treskin <zerthurd@gmail.com>
+Maxime Petazzoni <max@signalfuse.com>
+Meaglith Ma <genedna@gmail.com>
+meejah <meejah@meejah.ca>
+Megan Kostick <mkostick@us.ibm.com>
+Mehul Kar <mehul.kar@gmail.com>
+Mengdi Gao <usrgdd@gmail.com>
+Mert Yazıcıoğlu <merty@users.noreply.github.com>
+Micah Zoltu <micah@newrelic.com>
+Michael A. Smith <michael@smith-li.com>
+Michael Bridgen <mikeb@squaremobius.net>
+Michael Brown <michael@netdirect.ca>
+Michael Chiang <mchiang@docker.com>
+Michael Crosby <michael@docker.com>
+Michael Currie <mcurrie@bruceforceresearch.com>
+Michael Friis <friism@gmail.com>
+Michael Gorsuch <gorsuch@github.com>
+Michael Grauer <michael.grauer@kitware.com>
+Michael Holzheu <holzheu@linux.vnet.ibm.com>
+Michael Hudson-Doyle <michael.hudson@linaro.org>
+Michael Huettermann <michael@huettermann.net>
+Michael Käufl <docker@c.michael-kaeufl.de>
+Michael Neale <michael.neale@gmail.com>
+Michael Prokop <github@michael-prokop.at>
+Michael Scharf <github@scharf.gr>
+Michael Stapelberg <michael+gh@stapelberg.de>
+Michael Steinert <mike.steinert@gmail.com>
+Michael Thies <michaelthies78@gmail.com>
+Michael West <mwest@mdsol.com>
+Michal Fojtik <mfojtik@redhat.com>
+Michal Gebauer <mishak@mishak.net>
+Michal Jemala <michal.jemala@gmail.com>
+Michal Minar <miminar@redhat.com>
+Michaël Pailloncy <mpapo.dev@gmail.com>
+Michał Czeraszkiewicz <czerasz@gmail.com>
+Michiel@unhosted <michiel@unhosted.org>
+Miguel Angel Fernández <elmendalerenda@gmail.com>
+Miguel Morales <mimoralea@gmail.com>
+Mihai Borobocea <MihaiBorob@gmail.com>
+Mihuleacc Sergiu <mihuleac.sergiu@gmail.com>
+Mike Brown <brownwm@us.ibm.com>
+Mike Chelen <michael.chelen@gmail.com>
+Mike Danese <mikedanese@google.com>
+Mike Dillon <mike@embody.org>
+Mike Dougherty <mike.dougherty@docker.com>
+Mike Gaffney <mike@uberu.com>
+Mike Goelzer <mgoelzer@docker.com>
+Mike Leone <mleone896@gmail.com>
+Mike MacCana <mike.maccana@gmail.com>
+Mike Naberezny <mike@naberezny.com>
+Mike Snitzer <snitzer@redhat.com>
+mikelinjie <294893458@qq.com>
+Mikhail Sobolev <mss@mawhrin.net>
+Miloslav Trmač <mitr@redhat.com>
+mingqing <limingqing@cyou-inc.com>
+Mingzhen Feng <fmzhen@zju.edu.cn>
+Mitch Capper <mitch.capper@gmail.com>
+mlarcher <github@ringabell.org>
+Mohammad Banikazemi <mb@us.ibm.com>
+Mohammed Aaqib Ansari <maaquib@gmail.com>
+Mohit Soni <mosoni@ebay.com>
+Morgan Bauer <mbauer@us.ibm.com>
+Morgante Pell <morgante.pell@morgante.net>
+Morgy93 <thomas@ulfertsprygoda.de>
+Morten Siebuhr <sbhr@sbhr.dk>
+Morton Fox <github@qslw.com>
+Moysés Borges <moysesb@gmail.com>
+mqliang <mqliang.zju@gmail.com>
+Mrunal Patel <mrunalp@gmail.com>
+msabansal <sabansal@microsoft.com>
+mschurenko <matt.schurenko@gmail.com>
+muge <stevezhang2014@gmail.com>
+Mustafa Akın <mustafa91@gmail.com>
+Muthukumar R <muthur@gmail.com>
+Máximo Cuadros <mcuadros@gmail.com>
+Médi-Rémi Hashim <medimatrix@users.noreply.github.com>
+Nahum Shalman <nshalman@omniti.com>
+Nakul Pathak <nakulpathak3@hotmail.com>
+Nalin Dahyabhai <nalin@redhat.com>
+Nan Monnand Deng <monnand@gmail.com>
+Naoki Orii <norii@cs.cmu.edu>
+Natalie Parker <nparker@omnifone.com>
+Natanael Copa <natanael.copa@docker.com>
+Nate Brennand <nate.brennand@clever.com>
+Nate Eagleson <nate@nateeag.com>
+Nate Jones <nate@endot.org>
+Nathan Hsieh <hsieh.nathan@gmail.com>
+Nathan Kleyn <nathan@nathankleyn.com>
+Nathan LeClaire <nathan.leclaire@docker.com>
+Nathan McCauley <nathan.mccauley@docker.com>
+Nathan Williams <nathan@teamtreehouse.com>
+Neal McBurnett <neal@mcburnett.org>
+Nelson Chen <crazysim@gmail.com>
+Nghia Tran <nghia@google.com>
+Niall O'Higgins <niallo@unworkable.org>
+Nicholas E. Rabenau <nerab@gmx.at>
+Nick Irvine <nfirvine@nfirvine.com>
+Nick Parker <nikaios@gmail.com>
+Nick Payne <nick@kurai.co.uk>
+Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
+Nick Stinemates <nick@stinemates.org>
+Nicolas Borboën <ponsfrilus@users.noreply.github.com>
+Nicolas De loof <nicolas.deloof@gmail.com>
+Nicolas Dudebout <nicolas.dudebout@gatech.edu>
+Nicolas Goy <kuon@goyman.com>
+Nicolas Kaiser <nikai@nikai.net>
+Nicolás Hock Isaza <nhocki@gmail.com>
+Nigel Poulton <nigelpoulton@hotmail.com>
+NikolaMandic <mn080202@gmail.com>
+nikolas <nnyby@columbia.edu>
+Nirmal Mehta <nirmalkmehta@gmail.com>
+Nishant Totla <nishanttotla@gmail.com>
+NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
+noducks <onemannoducks@gmail.com>
+Nolan Darilek <nolan@thewordnerd.info>
+nponeccop <andy.melnikov@gmail.com>
+Nuutti Kotivuori <naked@iki.fi>
+nzwsch <hi@nzwsch.com>
+O.S. Tezer <ostezer@gmail.com>
+objectified <objectified@gmail.com>
+OddBloke <daniel@daniel-watkins.co.uk>
+odk- <github@odkurzacz.org>
+Oguz Bilgic <fisyonet@gmail.com>
+Oh Jinkyun <tintypemolly@gmail.com>
+Ohad Schneider <ohadschn@users.noreply.github.com>
+Ole Reifschneider <mail@ole-reifschneider.de>
+Oliver Neal <ItsVeryWindy@users.noreply.github.com>
+Olivier Gambier <dmp42@users.noreply.github.com>
+Olle Jonsson <olle.jonsson@gmail.com>
+Oriol Francès <oriolfa@gmail.com>
+Otto Kekäläinen <otto@seravo.fi>
+oyld <oyld0210@163.com>
+ozlerhakan <hakan.ozler@kodcu.com>
+paetling <paetling@gmail.com>
+pandrew <letters@paulnotcom.se>
+panticz <mail@konczalski.de>
+Paolo G. Giarrusso <p.giarrusso@gmail.com>
+Pascal Borreli <pascal@borreli.com>
+Pascal Hartig <phartig@rdrei.net>
+Patrick Devine <patrick.devine@docker.com>
+Patrick Hemmer <patrick.hemmer@gmail.com>
+Patrick Stapleton <github@gdi2290.com>
+pattichen <craftsbear@gmail.com>
+Paul <paul9869@gmail.com>
+paul <paul@inkling.com>
+Paul Annesley <paul@annesley.cc>
+Paul Bellamy <paul.a.bellamy@gmail.com>
+Paul Bowsher <pbowsher@globalpersonals.co.uk>
+Paul Hammond <paul@paulhammond.org>
+Paul Jimenez <pj@place.org>
+Paul Lietar <paul@lietar.net>
+Paul Liljenberg <liljenberg.paul@gmail.com>
+Paul Morie <pmorie@gmail.com>
+Paul Nasrat <pnasrat@gmail.com>
+Paul Weaver <pauweave@cisco.com>
+Pavel Lobashov <ShockwaveNN@gmail.com>
+Pavel Pospisil <pospispa@gmail.com>
+Pavel Sutyrin <pavel.sutyrin@gmail.com>
+Pavel Tikhomirov <ptikhomirov@parallels.com>
+Pavlos Ratis <dastergon@gentoo.org>
+Peeyush Gupta <gpeeyush@linux.vnet.ibm.com>
+Peggy Li <peggyli.224@gmail.com>
+Pei Su <sillyousu@gmail.com>
+Penghan Wang <ph.wang@daocloud.io>
+perhapszzy@sina.com <perhapszzy@sina.com>
+Peter Bourgon <peter@bourgon.org>
+Peter Braden <peterbraden@peterbraden.co.uk>
+Peter Choi <reikani@Peters-MacBook-Pro.local>
+Peter Dave Hello <PeterDaveHello@users.noreply.github.com>
+Peter Edge <peter.edge@gmail.com>
+Peter Ericson <pdericson@gmail.com>
+Peter Esbensen <pkesbensen@gmail.com>
+Peter Malmgren <ptmalmgren@gmail.com>
+Peter Salvatore <peter@psftw.com>
+Peter Volpe <petervo@redhat.com>
+Peter Waller <p@pwaller.net>
+Phil <underscorephil@gmail.com>
+Phil Estes <estesp@linux.vnet.ibm.com>
+Phil Spitler <pspitler@gmail.com>
+Philip Monroe <phil@philmonroe.com>
+Philipp Wahala <philipp.wahala@gmail.com>
+Philipp Weissensteiner <mail@philippweissensteiner.com>
+Phillip Alexander <git@phillipalexander.io>
+pidster <pid@pidster.com>
+Piergiuliano Bossi <pgbossi@gmail.com>
+Pierre <py@poujade.org>
+Pierre Carrier <pierre@meteor.com>
+Pierre Wacrenier <pierre.wacrenier@gmail.com>
+Pierre-Alain RIVIERE <pariviere@ippon.fr>
+Piotr Bogdan <ppbogdan@gmail.com>
+pixelistik <pixelistik@users.noreply.github.com>
+Porjo <porjo38@yahoo.com.au>
+Poul Kjeldager Sørensen <pks@s-innovations.net>
+Pradeep Chhetri <pradeep@indix.com>
+Prasanna Gautam <prasannagautam@gmail.com>
+Prayag Verma <prayag.verma@gmail.com>
+Przemek Hejman <przemyslaw.hejman@gmail.com>
+pysqz <randomq@126.com>
+qg <1373319223@qq.com>
+qhuang <h.huangqiang@huawei.com>
+Qiang Huang <h.huangqiang@huawei.com>
+qq690388648 <690388648@qq.com>
+Quentin Brossard <qbrossard@gmail.com>
+Quentin Perez <qperez@ocs.online.net>
+Quentin Tayssier <qtayssier@gmail.com>
+r0n22 <cameron.regan@gmail.com>
+Rafal Jeczalik <rjeczalik@gmail.com>
+Rafe Colton <rafael.colton@gmail.com>
+Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
+Raghuram Devarakonda <draghuram@gmail.com>
+Rajat Pandit <rp@rajatpandit.com>
+Rajdeep Dua <dua_rajdeep@yahoo.com>
+Ralle <spam@rasmusa.net>
+Ralph Bean <rbean@redhat.com>
+Ramkumar Ramachandra <artagnon@gmail.com>
+Ramon van Alteren <ramon@vanalteren.nl>
+Ray Tsang <saturnism@users.noreply.github.com>
+ReadmeCritic <frankensteinbot@gmail.com>
+Recursive Madman <recursive.madman@gmx.de>
+Regan McCooey <rmccooey27@aol.com>
+Remi Rampin <remirampin@gmail.com>
+Renato Riccieri Santos Zannon <renato.riccieri@gmail.com>
+resouer <resouer@163.com>
+rgstephens <greg@udon.org>
+Rhys Hiltner <rhys@twitch.tv>
+Rich Seymour <rseymour@gmail.com>
+Richard <richard.scothern@gmail.com>
+Richard Burnison <rburnison@ebay.com>
+Richard Harvey <richard@squarecows.com>
+Richard Metzler <richard@paadee.com>
+Richard Scothern <richard.scothern@gmail.com>
+Richo Healey <richo@psych0tik.net>
+Rick Bradley <rick@users.noreply.github.com>
+Rick van de Loo <rickvandeloo@gmail.com>
+Rick Wieman <git@rickw.nl>
+Rik Nijessen <rik@keefo.nl>
+Riku Voipio <riku.voipio@linaro.org>
+Riley Guerin <rileytg.dev@gmail.com>
+Ritesh H Shukla <sritesh@vmware.com>
+Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
+Rob Vesse <rvesse@dotnetrdf.org>
+Robert Bachmann <rb@robertbachmann.at>
+Robert Bittle <guywithnose@gmail.com>
+Robert Obryk <robryk@gmail.com>
+Robert Stern <lexandro2000@gmail.com>
+Robert Wallis <smilingrob@gmail.com>
+Roberto G. Hashioka <roberto.hashioka@docker.com>
+Robin Naundorf <r.naundorf@fh-muenster.de>
+Robin Schneider <ypid@riseup.net>
+Robin Speekenbrink <robin@kingsquare.nl>
+robpc <rpcann@gmail.com>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Rodrigo Vaz <rodrigo.vaz@gmail.com>
+Roel Van Nyen <roel.vannyen@gmail.com>
+Roger Peppe <rogpeppe@gmail.com>
+Rohit Jnagal <jnagal@google.com>
+Rohit Kadam <rohit.d.kadam@gmail.com>
+Roland Huß <roland@jolokia.org>
+Roland Kammerer <roland.kammerer@linbit.com>
+Roland Moriz <rmoriz@users.noreply.github.com>
+Roma Sokolov <sokolov.r.v@gmail.com>
+Roman Strashkin <roman.strashkin@gmail.com>
+Ron Smits <ron.smits@gmail.com>
+root <docker-dummy@example.com>
+root <root@localhost>
+root <root@ubuntu-14.04-amd64-vbox>
+root <root@webm215.cluster016.ha.ovh.net>
+Rory Hunter <roryhunter2@gmail.com>
+Rory McCune <raesene@gmail.com>
+Ross Boucher <rboucher@gmail.com>
+Rovanion Luckey <rovanion.luckey@gmail.com>
+Rozhnov Alexandr <nox73@ya.ru>
+rsmoorthy <rsmoorthy@users.noreply.github.com>
+Rudolph Gottesheim <r.gottesheim@loot.at>
+Rui Lopes <rgl@ruilopes.com>
+Ryan Anderson <anderson.ryanc@gmail.com>
+Ryan Aslett <github@mixologic.com>
+Ryan Belgrave <rmb1993@gmail.com>
+Ryan Detzel <ryan.detzel@gmail.com>
+Ryan Fowler <rwfowler@gmail.com>
+Ryan McLaughlin <rmclaughlin@insidesales.com>
+Ryan O'Donnell <odonnellryanc@gmail.com>
+Ryan Seto <ryanseto@yak.net>
+Ryan Thomas <rthomas@atlassian.com>
+Ryan Trauntvein <rtrauntvein@novacoast.com>
+Ryan Wallner <ryan.wallner@clusterhq.com>
+RyanDeng <sheldon.d1018@gmail.com>
+Rémy Greinhofer <remy.greinhofer@livelovely.com>
+s. rannou <mxs@sbrk.org>
+s00318865 <sunyuan3@huawei.com>
+Sabin Basyal <sabin.basyal@gmail.com>
+Sachin Joshi <sachin_jayant_joshi@hotmail.com>
+Sagar Hani <sagarhani33@gmail.com>
+Sainath Grandhi <sainath.grandhi@intel.com>
+Sally O'Malley <somalley@redhat.com>
+Sam Abed <sam.abed@gmail.com>
+Sam Alba <sam.alba@gmail.com>
+Sam Bailey <cyprix@cyprix.com.au>
+Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
+Sam Neirinck <sam@samneirinck.com>
+Sam Reis <sreis@atlassian.com>
+Sam Rijs <srijs@airpost.net>
+Sambuddha Basu <sambuddhabasu1@gmail.com>
+Sami Wagiaalla <swagiaal@redhat.com>
+Samuel Andaya <samuel@andaya.net>
+Samuel Dion-Girardeau <samuel.diongirardeau@gmail.com>
+Samuel Karp <skarp@amazon.com>
+Samuel PHAN <samuel-phan@users.noreply.github.com>
+Sankar சங்கர் <sankar.curiosity@gmail.com>
+Sanket Saurav <sanketsaurav@gmail.com>
+Santhosh Manohar <santhosh@docker.com>
+sapphiredev <se.imas.kr@gmail.com>
+Satnam Singh <satnam@raintown.org>
+satoru <satorulogic@gmail.com>
+Satoshi Amemiya <satoshi_amemiya@voyagegroup.com>
+scaleoutsean <scaleoutsean@users.noreply.github.com>
+Scott Bessler <scottbessler@gmail.com>
+Scott Collier <emailscottcollier@gmail.com>
+Scott Johnston <scott@docker.com>
+Scott Stamp <scottstamp851@gmail.com>
+Scott Walls <sawalls@umich.edu>
+sdreyesg <sdreyesg@gmail.com>
+Sean Christopherson <sean.j.christopherson@intel.com>
+Sean Cronin <seancron@gmail.com>
+Sean OMeara <sean@chef.io>
+Sean P. Kane <skane@newrelic.com>
+Sebastiaan van Steenis <mail@superseb.nl>
+Sebastiaan van Stijn <github@gone.nl>
+Senthil Kumar Selvaraj <senthil.thecoder@gmail.com>
+Senthil Kumaran <senthil@uthcode.com>
+SeongJae Park <sj38.park@gmail.com>
+Seongyeol Lim <seongyeol37@gmail.com>
+Serge Hallyn <serge.hallyn@ubuntu.com>
+Sergey Alekseev <sergey.alekseev.minsk@gmail.com>
+Sergey Evstifeev <sergey.evstifeev@gmail.com>
+Sevki Hasirci <s@sevki.org>
+Shane Canon <scanon@lbl.gov>
+Shane da Silva <shane@dasilva.io>
+shaunol <shaunol@gmail.com>
+Shawn Landden <shawn@churchofgit.com>
+Shawn Siefkas <shawn.siefkas@meredith.com>
+Shekhar Gulati <shekhargulati84@gmail.com>
+Sheng Yang <sheng@yasker.org>
+Shengbo Song <thomassong@tencent.com>
+Shih-Yuan Lee <fourdollars@gmail.com>
+Shijiang Wei <mountkin@gmail.com>
+Shishir Mahajan <shishir.mahajan@redhat.com>
+shuai-z <zs.broccoli@gmail.com>
+Shuwei Hao <haosw@cn.ibm.com>
+Sian Lerk Lau <kiawin@gmail.com>
+sidharthamani <sid@rancher.com>
+Silas Sewell <silas@sewell.org>
+Simei He <hesimei@zju.edu.cn>
+Simon Eskildsen <sirup@sirupsen.com>
+Simon Leinen <simon.leinen@gmail.com>
+Simon Taranto <simon.taranto@gmail.com>
+Sindhu S <sindhus@live.in>
+Sjoerd Langkemper <sjoerd-github@linuxonly.nl>
+Solganik Alexander <solganik@gmail.com>
+Solomon Hykes <solomon@docker.com>
+Song Gao <song@gao.io>
+Soshi Katsuta <soshi.katsuta@gmail.com>
+Soulou <leo@unbekandt.eu>
+Spencer Brown <spencer@spencerbrown.org>
+Spencer Smith <robertspencersmith@gmail.com>
+Sridatta Thatipamala <sthatipamala@gmail.com>
+Sridhar Ratnakumar <sridharr@activestate.com>
+Srini Brahmaroutu <srbrahma@us.ibm.com>
+srinsriv <srinsriv@users.noreply.github.com>
+Steeve Morin <steeve.morin@gmail.com>
+Stefan Berger <stefanb@linux.vnet.ibm.com>
+Stefan J. Wernli <swernli@microsoft.com>
+Stefan Praszalowicz <stefan@greplin.com>
+Stefan Scherer <scherer_stefan@icloud.com>
+Stefan Staudenmeyer <doerte@instana.com>
+Stefan Weil <sw@weilnetz.de>
+Stephen Crosby <stevecrozz@gmail.com>
+Stephen Day <stephen.day@docker.com>
+Stephen Rust <srust@blockbridge.com>
+Steve Durrheimer <s.durrheimer@gmail.com>
+Steve Francia <steve.francia@gmail.com>
+Steve Koch <stevekochscience@gmail.com>
+Steven Burgess <steven.a.burgess@hotmail.com>
+Steven Iveson <sjiveson@outlook.com>
+Steven Merrill <steven.merrill@gmail.com>
+Steven Richards <steven@axiomzen.co>
+Steven Taylor <steven.taylor@me.com>
+Subhajit Ghosh <isubuz.g@gmail.com>
+Sujith Haridasan <sujith.h@gmail.com>
+Suryakumar Sudar <surya.trunks@gmail.com>
+Sven Dowideit <SvenDowideit@home.org.au>
+Swapnil Daingade <swapnil.daingade@gmail.com>
+Sylvain Baubeau <sbaubeau@redhat.com>
+Sylvain Bellemare <sylvain@ascribe.io>
+Sébastien <sebastien@yoozio.com>
+Sébastien Luttringer <seblu@seblu.net>
+Sébastien Stormacq <sebsto@users.noreply.github.com>
+TAGOMORI Satoshi <tagomoris@gmail.com>
+tang0th <tang0th@gmx.com>
+Tangi COLIN <tangicolin@gmail.com>
+Tatsuki Sugiura <sugi@nemui.org>
+Tatsushi Inagaki <e29253@jp.ibm.com>
+Taylor Jones <monitorjbl@gmail.com>
+tbonza <tylers.pile@gmail.com>
+Ted M. Young <tedyoung@gmail.com>
+Tehmasp Chaudhri <tehmasp@gmail.com>
+Tejesh Mehta <tejesh.mehta@gmail.com>
+terryding77 <550147740@qq.com>
+tgic <farmer1992@gmail.com>
+Thatcher Peskens <thatcher@docker.com>
+theadactyl <thea.lamkin@gmail.com>
+Thell 'Bo' Fowler <thell@tbfowler.name>
+Thermionix <bond711@gmail.com>
+Thijs Terlouw <thijsterlouw@gmail.com>
+Thomas Bikeev <thomas.bikeev@mac.com>
+Thomas Frössman <thomasf@jossystem.se>
+Thomas Gazagnaire <thomas@gazagnaire.org>
+Thomas Grainger <tagrain@gmail.com>
+Thomas Hansen <thomas.hansen@gmail.com>
+Thomas Leonard <thomas.leonard@docker.com>
+Thomas LEVEIL <thomasleveil@gmail.com>
+Thomas Orozco <thomas@orozco.fr>
+Thomas Riccardi <riccardi@systran.fr>
+Thomas Schroeter <thomas@cliqz.com>
+Thomas Sjögren <konstruktoid@users.noreply.github.com>
+Thomas Swift <tgs242@gmail.com>
+Thomas Tanaka <thomas.tanaka@oracle.com>
+Thomas Texier <sharkone@en-mousse.org>
+Tianon Gravi <admwiggin@gmail.com>
+Tibor Vass <teabee89@gmail.com>
+Tiffany Low <tiffany@box.com>
+Tim Bosse <taim@bosboot.org>
+Tim Dettrick <t.dettrick@uq.edu.au>
+Tim Düsterhus <tim@bastelstu.be>
+Tim Hockin <thockin@google.com>
+Tim Ruffles <oi@truffles.me.uk>
+Tim Smith <timbot@google.com>
+Tim Terhorst <mynamewastaken+git@gmail.com>
+Tim Wang <timwangdev@gmail.com>
+Tim Waugh <twaugh@redhat.com>
+Tim Wraight <tim.wraight@tangentlabs.co.uk>
+Timothy Hobbs <timothyhobbs@seznam.cz>
+tjwebb123 <tjwebb123@users.noreply.github.com>
+tobe <tobegit3hub@gmail.com>
+Tobias Bieniek <Tobias.Bieniek@gmx.de>
+Tobias Bradtke <webwurst@gmail.com>
+Tobias Gesellchen <tobias@gesellix.de>
+Tobias Klauser <tklauser@distanz.ch>
+Tobias Schmidt <ts@soundcloud.com>
+Tobias Schwab <tobias.schwab@dynport.de>
+Todd Crane <todd@toddcrane.com>
+Todd Lunter <tlunter@gmail.com>
+Todd Whiteman <todd.whiteman@joyent.com>
+Toli Kuznets <toli@docker.com>
+Tom Barlow <tomwbarlow@gmail.com>
+Tom Denham <tom@tomdee.co.uk>
+Tom Fotherby <tom+github@peopleperhour.com>
+Tom Howe <tom.howe@enstratius.com>
+Tom Hulihan <hulihan.tom159@gmail.com>
+Tom Maaswinkel <tom.maaswinkel@12wiki.eu>
+Tom X. Tobin <tomxtobin@tomxtobin.com>
+Tomas Tomecek <ttomecek@redhat.com>
+Tomasz Kopczynski <tomek@kopczynski.net.pl>
+Tomasz Lipinski <tlipinski@users.noreply.github.com>
+Tomasz Nurkiewicz <nurkiewicz@gmail.com>
+Tommaso Visconti <tommaso.visconti@gmail.com>
+Tomáš Hrčka <thrcka@redhat.com>
+Tonis Tiigi <tonistiigi@gmail.com>
+Tonny Xu <tonny.xu@gmail.com>
+Tony Daws <tony@daws.ca>
+Tony Miller <mcfiredrill@gmail.com>
+toogley <toogley@mailbox.org>
+Torstein Husebø <torstein@huseboe.net>
+tpng <benny.tpng@gmail.com>
+tracylihui <793912329@qq.com>
+Travis Cline <travis.cline@gmail.com>
+Travis Thieman <travis.thieman@gmail.com>
+Trent Ogren <tedwardo2@gmail.com>
+Trevor <trevinwoodstock@gmail.com>
+Trevor Pounds <trevor.pounds@gmail.com>
+trishnaguha <trishnaguha17@gmail.com>
+Tristan Carel <tristan@cogniteev.com>
+Troy Denton <trdenton@gmail.com>
+Tyler Brock <tyler.brock@gmail.com>
+Tzu-Jung Lee <roylee17@gmail.com>
+Tõnis Tiigi <tonistiigi@gmail.com>
+Ulysse Carion <ulyssecarion@gmail.com>
+unknown <sebastiaan@ws-key-sebas3.dpi1.dpi>
+vagrant <vagrant@ubuntu-14.04-amd64-vbox>
+Vaidas Jablonskis <jablonskis@gmail.com>
+Veres Lajos <vlajos@gmail.com>
+vgeta <gopikannan.venugopalsamy@gmail.com>
+Victor Coisne <victor.coisne@dotcloud.com>
+Victor Costan <costan@gmail.com>
+Victor I. Wood <viw@t2am.com>
+Victor Lyuboslavsky <victor@victoreda.com>
+Victor Marmol <vmarmol@google.com>
+Victor Palma <palma.victor@gmail.com>
+Victor Vieux <victor.vieux@docker.com>
+Victoria Bialas <victoria.bialas@docker.com>
+Vijaya Kumar K <vijayak@caviumnetworks.com>
+Viktor Stanchev <me@viktorstanchev.com>
+Viktor Vojnovski <viktor.vojnovski@amadeus.com>
+VinayRaghavanKS <raghavan.vinay@gmail.com>
+Vincent Batts <vbatts@redhat.com>
+Vincent Bernat <bernat@luffy.cx>
+Vincent Bernat <Vincent.Bernat@exoscale.ch>
+Vincent Demeester <vincent@sbr.pm>
+Vincent Giersch <vincent.giersch@ovh.net>
+Vincent Mayers <vincent.mayers@inbloom.org>
+Vincent Woo <me@vincentwoo.com>
+Vinod Kulkarni <vinod.kulkarni@gmail.com>
+Vishal Doshi <vishal.doshi@gmail.com>
+Vishnu Kannan <vishnuk@google.com>
+Vitor Monteiro <vmrmonteiro@gmail.com>
+Vivek Agarwal <me@vivek.im>
+Vivek Dasgupta <vdasgupt@redhat.com>
+Vivek Goyal <vgoyal@redhat.com>
+Vladimir Bulyga <xx@ccxx.cc>
+Vladimir Kirillov <proger@wilab.org.ua>
+Vladimir Rutsky <altsysrq@gmail.com>
+Vladimir Varankin <nek.narqo+git@gmail.com>
+VladimirAus <v_roudakov@yahoo.com>
+Vojtech Vitek (V-Teq) <vvitek@redhat.com>
+waitingkuo <waitingkuo0527@gmail.com>
+Walter Leibbrandt <github@wrl.co.za>
+Walter Stanish <walter@pratyeka.org>
+WANG Chao <wcwxyz@gmail.com>
+Wang Xing <hzwangxing@corp.netease.com>
+Ward Vandewege <ward@jhvc.com>
+WarheadsSE <max@warheads.net>
+Wayne Chang <wayne@neverfear.org>
+Wei-Ting Kuo <waitingkuo0527@gmail.com>
+weiyan <weiyan3@huawei.com>
+Weiyang Zhu <cnresonant@gmail.com>
+Wen Cheng Ma <wenchma@cn.ibm.com>
+Wendel Fleming <wfleming@usc.edu>
+Wenxuan Zhao <viz@linux.com>
+Wenyu You <21551128@zju.edu.cn>
+Wes Morgan <cap10morgan@gmail.com>
+Will Dietz <w@wdtz.org>
+Will Rouesnel <w.rouesnel@gmail.com>
+Will Weaver <monkey@buildingbananas.com>
+willhf <willhf@gmail.com>
+William Delanoue <william.delanoue@gmail.com>
+William Henry <whenry@redhat.com>
+William Hubbs <w.d.hubbs@gmail.com>
+William Riancho <wr.wllm@gmail.com>
+William Thurston <thurstw@amazon.com>
+WiseTrem <shepelyov.g@gmail.com>
+wlan0 <sidharthamn@gmail.com>
+Wolfgang Powisch <powo@powo.priv.at>
+wonderflow <wonderflow.sun@gmail.com>
+xamyzhao <x.amy.zhao@gmail.com>
+XiaoBing Jiang <s7v7nislands@gmail.com>
+Xiaoxu Chen <chenxiaoxu14@otcaix.iscas.ac.cn>
+xiekeyang <xiekeyang@huawei.com>
+Xinzi Zhou <imdreamrunner@gmail.com>
+Xiuming Chen <cc@cxm.cc>
+xlgao-zju <xlgao@zju.edu.cn>
+xuzhaokui <cynicholas@gmail.com>
+Yahya <ya7yaz@gmail.com>
+YAMADA Tsuyoshi <tyamada@minimum2scp.org>
+Yan Feng <yanfeng2@huawei.com>
+Yang Bai <hamo.by@gmail.com>
+yangshukui <yangshukui@huawei.com>
+Yasunori Mahata <nori@mahata.net>
+Yestin Sun <sunyi0804@gmail.com>
+Yi EungJun <eungjun.yi@navercorp.com>
+Yibai Zhang <xm1994@gmail.com>
+Yihang Ho <hoyihang5@gmail.com>
+Ying Li <cyli@twistedmatrix.com>
+Yohei Ueda <yohei@jp.ibm.com>
+Yong Tang <yong.tang.github@outlook.com>
+Yongzhi Pan <panyongzhi@gmail.com>
+yorkie <yorkiefixer@gmail.com>
+Youcef YEKHLEF <yyekhlef@gmail.com>
+Yuan Sun <sunyuan3@huawei.com>
+yuchangchun <yuchangchun1@huawei.com>
+yuchengxia <yuchengxia@huawei.com>
+Yurii Rashkovskii <yrashk@gmail.com>
+yuzou <zouyu7@huawei.com>
+Zac Dover <zdover@redhat.com>
+Zach Borboa <zachborboa@gmail.com>
+Zachary Jaffee <zij@case.edu>
+Zain Memon <zain@inzain.net>
+Zaiste! <oh@zaiste.net>
+Zane DeGraffenried <zane.deg@gmail.com>
+Zefan Li <lizefan@huawei.com>
+Zen Lin(Zhinan Lin) <linzhinan@huawei.com>
+Zhang Kun <zkazure@gmail.com>
+Zhang Wei <zhangwei555@huawei.com>
+Zhang Wentao <zhangwentao234@huawei.com>
+Zhenan Ye <21551168@zju.edu.cn>
+Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
+Zhuoyun Wei <wzyboy@wzyboy.org>
+Zilin Du <zilin.du@gmail.com>
+zimbatm <zimbatm@zimbatm.com>
+Ziming Dong <bnudzm@foxmail.com>
+ZJUshuaizhou <21551191@zju.edu.cn>
+zmarouf <zeid.marouf@gmail.com>
+Zoltan Tombol <zoltan.tombol@gmail.com>
+zqh <zqhxuyuan@gmail.com>
+Zuhayr Elahi <elahi.zuhayr@gmail.com>
+Zunayed Ali <zunayed@gmail.com>
+Álex González <agonzalezro@gmail.com>
+Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
+Átila Camurça Alves <camurca.home@gmail.com>
+尹吉峰 <jifeng.yin@gmail.com>
+搏通 <yufeng.pyf@alibaba-inc.com>
diff --git a/vendor/github.com/containers/storage/CONTRIBUTING.md b/vendor/github.com/containers/storage/CONTRIBUTING.md
new file mode 100644
index 000000000..5364be769
--- /dev/null
+++ b/vendor/github.com/containers/storage/CONTRIBUTING.md
@@ -0,0 +1,144 @@
+# Contributing to Containers/Storage
+
+We'd love to have you join the community! Below summarizes the processes
+that we follow.
+
+## Topics
+
+* [Reporting Issues](#reporting-issues)
+* [Submitting Pull Requests](#submitting-pull-requests)
+* [Communications](#communications)
+<!--
+* [Becoming a Maintainer](#becoming-a-maintainer)
+-->
+
+## Reporting Issues
+
+Before reporting an issue, check our backlog of
+[open issues](https://github.com/containers/storage/issues)
+to see if someone else has already reported it. If so, feel free to add
+your scenario, or additional information, to the discussion. Or simply
+"subscribe" to it to be notified when it is updated.
+
+If you find a new issue with the project we'd love to hear about it! The most
+important aspect of a bug report is that it includes enough information for
+us to reproduce it. So, please include as much detail as possible and try
+to remove the extra stuff that doesn't really relate to the issue itself.
+The easier it is for us to reproduce it, the faster it'll be fixed!
+
+Please don't include any private/sensitive information in your issue!
+
+## Submitting Pull Requests
+
+No Pull Request (PR) is too small! Typos, additional comments in the code,
+new testcases, bug fixes, new features, more documentation, ... it's all
+welcome!
+
+While bug fixes can first be identified via an "issue", that is not required.
+It's ok to just open up a PR with the fix, but make sure you include the same
+information you would have included in an issue - like how to reproduce it.
+
+PRs for new features should include some background on what use cases the
+new code is trying to address. When possible and when it makes sense, try to break-up
+larger PRs into smaller ones - it's easier to review smaller
+code changes. But only if those smaller ones make sense as stand-alone PRs.
+
+Regardless of the type of PR, all PRs should include:
+* well documented code changes
+* additional testcases. Ideally, they should fail w/o your code change applied
+* documentation changes
+
+Squash your commits into logical pieces of work that might want to be reviewed
+separate from the rest of the PRs. But, squashing down to just one commit is ok
+too since in the end the entire PR will be reviewed anyway. When in doubt,
+squash.
+
+PRs that fix issues should include a reference like `Closes #XXXX` in the
+commit message so that github will automatically close the referenced issue
+when the PR is merged.
+
+<!--
+All PRs require at least two LGTMs (Looks Good To Me) from maintainers.
+-->
+
+### Sign your PRs
+
+The sign-off is a line at the end of the explanation for the patch. Your
+signature certifies that you wrote the patch or otherwise have the right to pass
+it on as an open-source patch. The rules are simple: if you can certify
+the below (from [developercertificate.org](http://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+Then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith <joe.smith@email.com>
+
+Use your real name (sorry, no pseudonyms or anonymous contributions.)
+
+If you set your `user.name` and `user.email` git configs, you can sign your
+commit automatically with `git commit -s`.
+
+## Communications
+
+For general questions, or discussions, please use the
+IRC group on `irc.freenode.net` called `container-projects`
+that has been setup.
+
+For discussions around issues/bugs and features, you can use the github
+[issues](https://github.com/containers/storage/issues)
+and
+[PRs](https://github.com/containers/storage/pulls)
+tracking system.
+
+<!--
+## Becoming a Maintainer
+
+To become a maintainer you must first be nominated by an existing maintainer.
+If a majority (>50%) of maintainers agree then the proposal is adopted and
+you will be added to the list.
+
+Removing a maintainer requires at least 75% of the remaining maintainers
+approval, or if the person requests to be removed then it is automatic.
+Normally, a maintainer will only be removed if they are considered to be
+inactive for a long period of time or are viewed as disruptive to the community.
+
+The current list of maintainers can be found in the
+[MAINTAINERS](MAINTAINERS) file.
+-->
diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile
new file mode 100644
index 000000000..4236ef68e
--- /dev/null
+++ b/vendor/github.com/containers/storage/Makefile
@@ -0,0 +1,92 @@
+.PHONY: all binary clean cross default docs gccgo help install.tools local-binary local-cross local-gccgo local-test-integration local-test-unit local-validate test test-integration test-unit validate
+
+PACKAGE := github.com/containers/storage
+GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
+GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
+EPOCH_TEST_COMMIT := 0418ebf59f9e1f564831c0ba9378b7f8e40a1c73
+NATIVETAGS := exclude_graphdriver_devicemapper exclude_graphdriver_btrfs exclude_graphdriver_overlay
+AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/ostree_tag.sh)
+BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS)
+GO := go
+
+RUNINVM := vagrant/runinvm.sh
+
+default all: local-binary docs local-validate local-cross local-gccgo test-unit test-integration ## validate all checks, build and cross-build\nbinaries and docs, run tests in a VM
+
+clean: ## remove all built files
+ $(RM) -f containers-storage containers-storage.* docs/*.1 docs/*.5
+
+sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*.go pkg/*/*.go pkg/*/*/*.go) layers_ffjson.go images_ffjson.go containers_ffjson.go pkg/archive/archive_ffjson.go
+
+containers-storage: $(sources) ## build using gc on the host
+ $(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
+
+layers_ffjson.go: layers.go
+ $(RM) $@
+ ffjson layers.go
+
+images_ffjson.go: images.go
+ $(RM) $@
+ ffjson images.go
+
+containers_ffjson.go: containers.go
+ $(RM) $@
+ ffjson containers.go
+
+pkg/archive/archive_ffjson.go: pkg/archive/archive.go
+ $(RM) $@
+ ffjson pkg/archive/archive.go
+
+binary local-binary: containers-storage
+
+local-gccgo: ## build using gccgo on the host
+ GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage
+
+local-cross: ## cross build the binaries for arm, darwin, and\nfreebsd
+ @for target in linux/amd64 linux/386 linux/arm darwin/amd64 windows/amd64 ; do \
+ os=`echo $${target} | cut -f1 -d/` ; \
+ arch=`echo $${target} | cut -f2 -d/` ; \
+ suffix=$${os}.$${arch} ; \
+ $(MAKE) GOOS=$${os} GOARCH=$${arch} FLAGS="-o containers-storage.$${suffix}" AUTOTAGS="$(NATIVETAGS)" local-binary || exit 1; \
+ done
+
+cross: ## cross build the binaries for arm, darwin, and\nfreebsd using VMs
+ $(RUNINVM) make local-$@
+
+docs: ## build the docs on the host
+ $(MAKE) -C docs docs
+
+gccgo: ## build using gccgo using VMs
+ $(RUNINVM) make local-$@
+
+test: local-binary ## build the binaries and run the tests using VMs
+ $(RUNINVM) make local-binary local-cross local-test-unit local-test-integration
+
+local-test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges)
+ @$(GO) test $(BUILDFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
+
+test-unit: local-binary ## run the unit tests using VMs
+ $(RUNINVM) make local-$@
+
+local-test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges)
+ @cd tests; ./test_runner.bash
+
+test-integration: local-binary ## run the integration tests using VMs
+ $(RUNINVM) make local-$@
+
+local-validate: ## validate DCO and gofmt on the host
+ @./hack/git-validation.sh
+ @./hack/gofmt.sh
+
+validate: ## validate DCO, gofmt, ./pkg/ isolation, golint,\ngo vet and vendor using VMs
+ $(RUNINVM) make local-$@
+
+install.tools:
+ go get -u $(BUILDFLAGS) github.com/cpuguy83/go-md2man
+ go get -u $(BUILDFLAGS) github.com/vbatts/git-validation
+ go get -u $(BUILDFLAGS) gopkg.in/alecthomas/gometalinter.v1
+ go get -u $(BUILDFLAGS) github.com/pquerna/ffjson
+ gometalinter.v1 -i
+
+help: ## this help
+ @awk 'BEGIN {FS = ":.*?## "} /^[a-z A-Z_-]+:.*?## / {gsub(" ",",",$$1);gsub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-21s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
new file mode 100644
index 000000000..434711004
--- /dev/null
+++ b/vendor/github.com/containers/storage/VERSION
@@ -0,0 +1 @@
+1.12.13
diff --git a/vendor/github.com/containers/storage/Vagrantfile b/vendor/github.com/containers/storage/Vagrantfile
new file mode 100644
index 000000000..c82c1f81b
--- /dev/null
+++ b/vendor/github.com/containers/storage/Vagrantfile
@@ -0,0 +1,25 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+#
+# The fedora/28-cloud-base and debian/jessie64 boxes are also available for
+# the "virtualbox" provider. Set the VAGRANT_PROVIDER environment variable to
+# "virtualbox" to use them instead.
+#
+Vagrant.configure("2") do |config|
+ config.vm.define "fedora" do |c|
+ c.vm.box = "fedora/28-cloud-base"
+ c.vm.synced_folder ".", "/vagrant", type: "rsync",
+ rsync__exclude: "bundles", rsync__args: ["-vadz", "--delete"]
+ c.vm.provision "shell", inline: <<-SHELL
+ sudo /vagrant/vagrant/provision.sh
+ SHELL
+ end
+ config.vm.define "debian" do |c|
+ c.vm.box = "debian/jessie64"
+ c.vm.synced_folder ".", "/vagrant", type: "rsync",
+ rsync__exclude: "bundles", rsync__args: ["-vadz", "--delete"]
+ c.vm.provision "shell", inline: <<-SHELL
+ sudo /vagrant/vagrant/provision.sh
+ SHELL
+ end
+end
diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go
index bbac78b60..e69552361 100644
--- a/vendor/github.com/containers/storage/containers.go
+++ b/vendor/github.com/containers/storage/containers.go
@@ -572,6 +572,10 @@ func (r *containerStore) Lock() {
r.lockfile.Lock()
}
+func (r *containerStore) RecursiveLock() {
+ r.lockfile.RecursiveLock()
+}
+
func (r *containerStore) RLock() {
r.lockfile.RLock()
}
diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
index e821bc0c5..56bb081e9 100644
--- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go
+++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
@@ -83,7 +83,7 @@ type Driver struct {
// Init returns a new AUFS driver.
// An error is returned if AUFS is not supported.
-func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
+func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
// Try to load the aufs kernel module
if err := supportsAufs(); err != nil {
@@ -91,7 +91,7 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
}
- fsMagic, err := graphdriver.GetFSMagic(root)
+ fsMagic, err := graphdriver.GetFSMagic(home)
if err != nil {
return nil, err
}
@@ -106,7 +106,7 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
}
var mountOptions string
- for _, option := range options {
+ for _, option := range options.DriverOptions {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil {
return nil, err
@@ -126,36 +126,36 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
}
a := &Driver{
- root: root,
- uidMaps: uidMaps,
- gidMaps: gidMaps,
+ root: home,
+ uidMaps: options.UIDMaps,
+ gidMaps: options.GIDMaps,
pathCache: make(map[string]string),
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)),
locker: locker.New(),
mountOptions: mountOptions,
}
- rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
+ rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return nil, err
}
// Create the root aufs driver dir and return
// if it already exists
// If not populate the dir structure
- if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil {
if os.IsExist(err) {
return a, nil
}
return nil, err
}
- if err := mountpk.MakePrivate(root); err != nil {
+ if err := mountpk.MakePrivate(home); err != nil {
return nil, err
}
// Populate the dir structure
for _, p := range paths {
- if err := idtools.MkdirAllAs(path.Join(root, p), 0700, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAllAs(path.Join(home, p), 0700, rootUID, rootGID); err != nil {
return nil, err
}
}
@@ -165,7 +165,7 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
})
for _, path := range []string{"mnt", "diff"} {
- p := filepath.Join(root, path)
+ p := filepath.Join(home, path)
entries, err := ioutil.ReadDir(p)
if err != nil {
logger.WithError(err).WithField("dir", p).Error("error reading dir entries")
@@ -255,6 +255,9 @@ func (a *Driver) AdditionalImageStores() []string {
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
func (a *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
+ if opts == nil {
+ opts = &graphdriver.CreateOpts{}
+ }
return graphdriver.NaiveCreateFromTemplate(a, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite)
}
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
index 30254d9fb..6f632a98d 100644
--- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
+++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
@@ -49,7 +49,7 @@ type btrfsOptions struct {
// Init returns a new BTRFS driver.
// An error is returned if BTRFS is not supported.
-func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
+func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
fsMagic, err := graphdriver.GetFSMagic(home)
if err != nil {
@@ -60,7 +60,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "%q is not on a btrfs filesystem", home)
}
- rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
+ rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return nil, err
}
@@ -72,15 +72,15 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
return nil, err
}
- opt, userDiskQuota, err := parseOptions(options)
+ opt, userDiskQuota, err := parseOptions(options.DriverOptions)
if err != nil {
return nil, err
}
driver := &Driver{
home: home,
- uidMaps: uidMaps,
- gidMaps: gidMaps,
+ uidMaps: options.UIDMaps,
+ gidMaps: options.GIDMaps,
options: opt,
}
diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go
index 4d4011ee0..f2f1ec386 100644
--- a/vendor/github.com/containers/storage/drivers/chown.go
+++ b/vendor/github.com/containers/storage/drivers/chown.go
@@ -55,6 +55,9 @@ func chownByMapsMain() {
if err != nil {
return fmt.Errorf("error walking to %q: %v", path, err)
}
+ if path == "." {
+ return nil
+ }
return platformLChown(path, info, toHost, toContainer)
}
if err := filepath.Walk(".", chown); err != nil {
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
index 13677c93a..f384a6242 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
@@ -34,8 +34,8 @@ type Driver struct {
}
// Init creates a driver with the given home and the set of options.
-func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
- deviceSet, err := NewDeviceSet(home, true, options, uidMaps, gidMaps)
+func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
+ deviceSet, err := NewDeviceSet(home, true, options.DriverOptions, options.UIDMaps, options.GIDMaps)
if err != nil {
return nil, err
}
@@ -47,8 +47,8 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
d := &Driver{
DeviceSet: deviceSet,
home: home,
- uidMaps: uidMaps,
- gidMaps: gidMaps,
+ uidMaps: options.UIDMaps,
+ gidMaps: options.GIDMaps,
ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
locker: locker.New(),
}
diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go
index e8f8bd5a7..3b56fd2e1 100644
--- a/vendor/github.com/containers/storage/drivers/driver.go
+++ b/vendor/github.com/containers/storage/drivers/driver.go
@@ -54,7 +54,7 @@ type MountOpts struct {
}
// InitFunc initializes the storage driver.
-type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error)
+type InitFunc func(homedir string, options Options) (Driver, error)
// ProtoDriver defines the basic capabilities of a driver.
// This interface exists solely to be a minimum set of methods
@@ -203,7 +203,7 @@ func Register(name string, initFunc InitFunc) error {
// GetDriver initializes and returns the registered driver
func GetDriver(name string, config Options) (Driver, error) {
if initFunc, exists := drivers[name]; exists {
- return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps)
+ return initFunc(filepath.Join(config.Root, name), config)
}
logrus.Errorf("Failed to GetDriver graph %s %s", name, config.Root)
@@ -211,9 +211,9 @@ func GetDriver(name string, config Options) (Driver, error) {
}
// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins
-func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) {
+func getBuiltinDriver(name, home string, options Options) (Driver, error) {
if initFunc, exists := drivers[name]; exists {
- return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps)
+ return initFunc(filepath.Join(home, name), options)
}
logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home)
return nil, errors.Wrapf(ErrNotSupported, "failed to built-in GetDriver graph %s %s", name, home)
@@ -222,6 +222,7 @@ func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []id
// Options is used to initialize a graphdriver
type Options struct {
Root string
+ RunRoot string
DriverOptions []string
UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap
@@ -245,7 +246,7 @@ func New(name string, config Options) (Driver, error) {
if _, prior := driversMap[name]; prior {
// of the state found from prior drivers, check in order of our priority
// which we would prefer
- driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps)
+ driver, err := getBuiltinDriver(name, config.Root, config)
if err != nil {
// unlike below, we will return error here, because there is prior
// state, and now it is no longer supported/prereq/compatible, so
@@ -273,7 +274,7 @@ func New(name string, config Options) (Driver, error) {
// Check for priority drivers first
for _, name := range priority {
- driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps)
+ driver, err := getBuiltinDriver(name, config.Root, config)
if err != nil {
if isDriverNotSupported(err) {
continue
@@ -285,7 +286,7 @@ func New(name string, config Options) (Driver, error) {
// Check all registered drivers if no priority driver is found
for name, initFunc := range drivers {
- driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps)
+ driver, err := initFunc(filepath.Join(config.Root, name), config)
if err != nil {
if isDriverNotSupported(err) {
continue
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index ef83b6c87..00327a92e 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -97,6 +97,7 @@ type overlayOptions struct {
type Driver struct {
name string
home string
+ runhome string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
@@ -125,8 +126,8 @@ func init() {
// Init returns the a native diff driver for overlay filesystem.
// If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error.
// If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned.
-func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
- opts, err := parseOptions(options)
+func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
+ opts, err := parseOptions(options.DriverOptions)
if err != nil {
return nil, err
}
@@ -148,7 +149,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
}
}
- rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
+ rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return nil, err
}
@@ -157,32 +158,72 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
return nil, err
}
+ runhome := filepath.Join(options.RunRoot, filepath.Base(home))
+ if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
+ return nil, err
+ }
var usingMetacopy bool
var supportsDType bool
if opts.mountProgram != "" {
supportsDType = true
} else {
- supportsDType, err = supportsOverlay(home, fsMagic, rootUID, rootGID)
- if err != nil {
- os.Remove(filepath.Join(home, linkDir))
- os.Remove(home)
- patherr, ok := err.(*os.PathError)
- if ok && patherr.Err == syscall.ENOSPC {
+ feature := "overlay"
+ overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature)
+ if err == nil {
+ if overlayCacheResult {
+ logrus.Debugf("cached value indicated that overlay is supported")
+ } else {
+ logrus.Debugf("cached value indicated that overlay is not supported")
+ }
+ supportsDType = overlayCacheResult
+ if !supportsDType {
+ return nil, errors.New(overlayCacheText)
+ }
+ } else {
+ supportsDType, err = supportsOverlay(home, fsMagic, rootUID, rootGID)
+ if err != nil {
+ os.Remove(filepath.Join(home, linkDir))
+ os.Remove(home)
+ patherr, ok := err.(*os.PathError)
+ if ok && patherr.Err == syscall.ENOSPC {
+ return nil, err
+ }
+ err = errors.Wrap(err, "kernel does not support overlay fs")
+ if err2 := cachedFeatureRecord(runhome, feature, false, err.Error()); err2 != nil {
+ return nil, errors.Wrapf(err2, "error recording overlay not being supported (%v)", err)
+ }
return nil, err
}
- return nil, errors.Wrap(err, "kernel does not support overlay fs")
+ if err = cachedFeatureRecord(runhome, feature, supportsDType, ""); err != nil {
+ return nil, errors.Wrap(err, "error recording overlay support status")
+ }
}
- usingMetacopy, err = doesMetacopy(home, opts.mountOptions)
+
+ feature = fmt.Sprintf("metacopy(%s)", opts.mountOptions)
+ metacopyCacheResult, _, err := cachedFeatureCheck(runhome, feature)
if err == nil {
- if usingMetacopy {
- logrus.Debugf("overlay test mount indicated that metacopy is being used")
+ if metacopyCacheResult {
+ logrus.Debugf("cached value indicated that metacopy is being used")
} else {
- logrus.Debugf("overlay test mount indicated that metacopy is not being used")
+ logrus.Debugf("cached value indicated that metacopy is not being used")
}
+ usingMetacopy = metacopyCacheResult
} else {
- logrus.Warnf("overlay test mount did not indicate whether or not metacopy is being used: %v", err)
- return nil, err
+ usingMetacopy, err = doesMetacopy(home, opts.mountOptions)
+ if err == nil {
+ if usingMetacopy {
+ logrus.Debugf("overlay test mount indicated that metacopy is being used")
+ } else {
+ logrus.Debugf("overlay test mount indicated that metacopy is not being used")
+ }
+ if err = cachedFeatureRecord(runhome, feature, usingMetacopy, ""); err != nil {
+ return nil, errors.Wrap(err, "error recording metacopy-being-used status")
+ }
+ } else {
+ logrus.Warnf("overlay test mount did not indicate whether or not metacopy is being used: %v", err)
+ return nil, err
+ }
}
}
@@ -201,8 +242,9 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
d := &Driver{
name: "overlay",
home: home,
- uidMaps: uidMaps,
- gidMaps: gidMaps,
+ runhome: runhome,
+ uidMaps: options.UIDMaps,
+ gidMaps: options.GIDMaps,
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)),
supportsDType: supportsDType,
usingMetacopy: usingMetacopy,
@@ -293,6 +335,36 @@ func parseOptions(options []string) (*overlayOptions, error) {
return o, nil
}
+func cachedFeatureSet(feature string, set bool) string {
+ if set {
+ return fmt.Sprintf("%s-true", feature)
+ }
+ return fmt.Sprintf("%s-false", feature)
+}
+
+func cachedFeatureCheck(runhome, feature string) (supported bool, text string, err error) {
+ content, err := ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, true)))
+ if err == nil {
+ return true, string(content), nil
+ }
+ content, err = ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, false)))
+ if err == nil {
+ return false, string(content), nil
+ }
+ return false, "", err
+}
+
+func cachedFeatureRecord(runhome, feature string, supported bool, text string) (err error) {
+ f, err := os.Create(filepath.Join(runhome, cachedFeatureSet(feature, supported)))
+ if f != nil {
+ if text != "" {
+ fmt.Fprintf(f, "%s", text)
+ }
+ f.Close()
+ }
+ return err
+}
+
func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) {
// We can try to modprobe overlay first
@@ -369,10 +441,24 @@ func (d *Driver) useNaiveDiff() bool {
useNaiveDiffOnly = true
return
}
+ feature := fmt.Sprintf("native-diff(%s)", d.options.mountOptions)
+ nativeDiffCacheResult, nativeDiffCacheText, err := cachedFeatureCheck(d.runhome, feature)
+ if err == nil {
+ if nativeDiffCacheResult {
+ logrus.Debugf("cached value indicated that native-diff is usable")
+ } else {
+ logrus.Debugf("cached value indicated that native-diff is not being used")
+ logrus.Warn(nativeDiffCacheText)
+ }
+ useNaiveDiffOnly = !nativeDiffCacheResult
+ return
+ }
if err := doesSupportNativeDiff(d.home, d.options.mountOptions); err != nil {
- logrus.Warnf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err)
+ nativeDiffCacheText = fmt.Sprintf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err)
+ logrus.Warn(nativeDiffCacheText)
useNaiveDiffOnly = true
}
+ cachedFeatureRecord(d.runhome, feature, !useNaiveDiffOnly, nativeDiffCacheText)
})
return useNaiveDiffOnly
}
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
index 93e744371..6ef35d8ad 100644
--- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
@@ -1,4 +1,4 @@
-// +build linux
+// +build linux,!exclude_disk_quota
//
// projectquota.go - implements XFS project quota controls
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
new file mode 100644
index 000000000..b6db1e1d8
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
@@ -0,0 +1,32 @@
+// +build linux,exclude_disk_quota
+
+package quota
+
+import (
+ "github.com/pkg/errors"
+)
+
+// Quota limit params - currently we only control blocks hard limit
+type Quota struct {
+ Size uint64
+}
+
+// Control - Context to be used by storage driver (e.g. overlay)
+// who wants to apply project quotas to container dirs
+type Control struct {
+}
+
+func NewControl(basePath string) (*Control, error) {
+ return nil, errors.New("filesystem does not support, or has not enabled quotas")
+}
+
+// SetQuota - assign a unique project id to directory and set the quota limits
+// for that project id
+func (q *Control) SetQuota(targetPath string, quota Quota) error {
+ return errors.New("filesystem does not support, or has not enabled quotas")
+}
+
+// GetQuota - get the quota limits of a directory that was configured with SetQuota
+func (q *Control) GetQuota(targetPath string, quota *Quota) error {
+ return errors.New("filesystem does not support, or has not enabled quotas")
+}
diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go
index 9e256858c..d9cc9483e 100644
--- a/vendor/github.com/containers/storage/drivers/vfs/driver.go
+++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go
@@ -24,16 +24,16 @@ func init() {
// Init returns a new VFS driver.
// This sets the home directory for the driver and returns NaiveDiffDriver.
-func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
+func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
d := &Driver{
homes: []string{home},
- idMappings: idtools.NewIDMappingsFromMaps(uidMaps, gidMaps),
+ idMappings: idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
}
rootIDs := d.idMappings.RootPair()
if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil {
return nil, err
}
- for _, option := range options {
+ for _, option := range options.DriverOptions {
if strings.HasPrefix(option, "vfs.imagestore=") {
d.homes = append(d.homes, strings.Split(option[15:], ",")...)
continue
@@ -59,7 +59,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
}
}
if d.ostreeRepo != "" {
- rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
+ rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go
index c7df1c1fe..f0437a275 100644
--- a/vendor/github.com/containers/storage/drivers/windows/windows.go
+++ b/vendor/github.com/containers/storage/drivers/windows/windows.go
@@ -83,10 +83,10 @@ type Driver struct {
}
// InitFilter returns a new Windows storage filter driver.
-func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
+func InitFilter(home string, options graphdriver.Options) (graphdriver.Driver, error) {
logrus.Debugf("WindowsGraphDriver InitFilter at %s", home)
- for _, option := range options {
+ for _, option := range options.DriverOptions {
if strings.HasPrefix(option, "windows.mountopt=") {
return nil, fmt.Errorf("windows driver does not support mount options")
} else {
diff --git a/vendor/github.com/containers/storage/drivers/zfs/MAINTAINERS b/vendor/github.com/containers/storage/drivers/zfs/MAINTAINERS
new file mode 100644
index 000000000..9c270c541
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/zfs/MAINTAINERS
@@ -0,0 +1,2 @@
+Jörg Thalheim <joerg@higgsboson.tk> (@Mic92)
+Arthur Gautier <baloo@gandi.net> (@baloose)
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
index eaa9e8bc5..a2bf5565b 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
@@ -44,7 +44,7 @@ func (*Logger) Log(cmd []string) {
// Init returns a new ZFS driver.
// It takes base mount path and an array of options which are represented as key value pairs.
// Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options.
-func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
+func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) {
var err error
logger := logrus.WithField("storage-driver", "zfs")
@@ -61,7 +61,7 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri
}
defer file.Close()
- options, err := parseOptions(opt)
+ options, err := parseOptions(opt.DriverOptions)
if err != nil {
return nil, err
}
@@ -103,7 +103,7 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri
return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName)
}
- rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
+ rootUID, rootGID, err := idtools.GetRootUIDGID(opt.UIDMaps, opt.GIDMaps)
if err != nil {
return nil, fmt.Errorf("Failed to get root uid/guid: %v", err)
}
@@ -115,8 +115,8 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri
dataset: rootDataset,
options: options,
filesystemsCache: filesystemsCache,
- uidMaps: uidMaps,
- gidMaps: gidMaps,
+ uidMaps: opt.UIDMaps,
+ gidMaps: opt.GIDMaps,
ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
}
return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil
diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go
index 38b5a3ef3..6f487504a 100644
--- a/vendor/github.com/containers/storage/images.go
+++ b/vendor/github.com/containers/storage/images.go
@@ -82,6 +82,9 @@ type Image struct {
// is set before using it.
Created time.Time `json:"created,omitempty"`
+ // ReadOnly is true if this image resides in a read-only layer store.
+ ReadOnly bool `json:"-"`
+
Flags map[string]interface{} `json:"flags,omitempty"`
}
@@ -159,6 +162,7 @@ func copyImage(i *Image) *Image {
BigDataSizes: copyStringInt64Map(i.BigDataSizes),
BigDataDigests: copyStringDigestMap(i.BigDataDigests),
Created: i.Created,
+ ReadOnly: i.ReadOnly,
Flags: copyStringInterfaceMap(i.Flags),
}
}
@@ -269,6 +273,7 @@ func (r *imageStore) Load() error {
list := digests[digest]
digests[digest] = append(list, image)
}
+ image.ReadOnly = !r.IsReadWrite()
}
}
if shouldSave && (!r.IsReadWrite() || !r.Locked()) {
@@ -739,6 +744,10 @@ func (r *imageStore) Lock() {
r.lockfile.Lock()
}
+func (r *imageStore) RecursiveLock() {
+ r.lockfile.RecursiveLock()
+}
+
func (r *imageStore) RLock() {
r.lockfile.RLock()
}
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index a35dd476b..fb79238cd 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -103,6 +103,9 @@ type Layer struct {
// for use inside of a user namespace where UID mapping is being used.
UIDMap []idtools.IDMap `json:"uidmap,omitempty"`
GIDMap []idtools.IDMap `json:"gidmap,omitempty"`
+
+ // ReadOnly is true if this layer resides in a read-only layer store.
+ ReadOnly bool `json:"-"`
}
type layerMountPoint struct {
@@ -259,6 +262,7 @@ func copyLayer(l *Layer) *Layer {
UncompressedDigest: l.UncompressedDigest,
UncompressedSize: l.UncompressedSize,
CompressionType: l.CompressionType,
+ ReadOnly: l.ReadOnly,
Flags: copyStringInterfaceMap(l.Flags),
UIDMap: copyIDMap(l.UIDMap),
GIDMap: copyIDMap(l.GIDMap),
@@ -318,6 +322,7 @@ func (r *layerStore) Load() error {
if layer.MountLabel != "" {
label.ReserveLabel(layer.MountLabel)
}
+ layer.ReadOnly = !r.IsReadWrite()
}
err = nil
}
@@ -1304,6 +1309,10 @@ func (r *layerStore) Lock() {
r.lockfile.Lock()
}
+func (r *layerStore) RecursiveLock() {
+ r.lockfile.RecursiveLock()
+}
+
func (r *layerStore) RLock() {
r.lockfile.RLock()
}
diff --git a/vendor/github.com/containers/storage/layers_ffjson.go b/vendor/github.com/containers/storage/layers_ffjson.go
index 09b5d0f33..125b5d8c9 100644
--- a/vendor/github.com/containers/storage/layers_ffjson.go
+++ b/vendor/github.com/containers/storage/layers_ffjson.go
@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
-// source: ./layers.go
+// source: layers.go
package storage
diff --git a/vendor/github.com/containers/storage/lockfile.go b/vendor/github.com/containers/storage/lockfile.go
index ed8753337..c4f1b5549 100644
--- a/vendor/github.com/containers/storage/lockfile.go
+++ b/vendor/github.com/containers/storage/lockfile.go
@@ -15,6 +15,10 @@ type Locker interface {
// Acquire a writer lock.
Lock()
+ // Acquire a writer lock recursively, allowing for recursive acquisitions
+ // within the same process space.
+ RecursiveLock()
+
// Unlock the lock.
Unlock()
diff --git a/vendor/github.com/containers/storage/lockfile_linux.go b/vendor/github.com/containers/storage/lockfile_linux.go
deleted file mode 100644
index 903387c66..000000000
--- a/vendor/github.com/containers/storage/lockfile_linux.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build linux solaris
-
-package storage
-
-import (
- "time"
-
- "golang.org/x/sys/unix"
-)
-
-// TouchedSince indicates if the lock file has been touched since the specified time
-func (l *lockfile) TouchedSince(when time.Time) bool {
- st := unix.Stat_t{}
- err := unix.Fstat(int(l.fd), &st)
- if err != nil {
- return true
- }
- touched := time.Unix(st.Mtim.Unix())
- return when.Before(touched)
-}
diff --git a/vendor/github.com/containers/storage/lockfile_otherunix.go b/vendor/github.com/containers/storage/lockfile_otherunix.go
deleted file mode 100644
index 041d54c05..000000000
--- a/vendor/github.com/containers/storage/lockfile_otherunix.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build darwin freebsd
-
-package storage
-
-import (
- "time"
-
- "golang.org/x/sys/unix"
-)
-
-func (l *lockfile) TouchedSince(when time.Time) bool {
- st := unix.Stat_t{}
- err := unix.Fstat(int(l.fd), &st)
- if err != nil {
- return true
- }
- touched := time.Unix(st.Mtimespec.Unix())
- return when.Before(touched)
-}
diff --git a/vendor/github.com/containers/storage/lockfile_unix.go b/vendor/github.com/containers/storage/lockfile_unix.go
index 8e0f22cb5..00215e928 100644
--- a/vendor/github.com/containers/storage/lockfile_unix.go
+++ b/vendor/github.com/containers/storage/lockfile_unix.go
@@ -9,6 +9,7 @@ import (
"time"
"github.com/containers/storage/pkg/stringid"
+ "github.com/containers/storage/pkg/system"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
@@ -25,6 +26,7 @@ type lockfile struct {
locktype int16
locked bool
ro bool
+ recursive bool
}
// openLock opens the file at path and returns the corresponding file
@@ -75,7 +77,7 @@ func createLockerForPath(path string, ro bool) (Locker, error) {
// lock locks the lockfile via FCTNL(2) based on the specified type and
// command.
-func (l *lockfile) lock(l_type int16) {
+func (l *lockfile) lock(l_type int16, recursive bool) {
lk := unix.Flock_t{
Type: l_type,
Whence: int16(os.SEEK_SET),
@@ -86,7 +88,13 @@ func (l *lockfile) lock(l_type int16) {
case unix.F_RDLCK:
l.rwMutex.RLock()
case unix.F_WRLCK:
- l.rwMutex.Lock()
+ if recursive {
+ // NOTE: that's okay as recursive is only set in RecursiveLock(), so
+ // there's no need to protect against hypothetical RDLCK cases.
+ l.rwMutex.RLock()
+ } else {
+ l.rwMutex.Lock()
+ }
default:
panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", l_type))
}
@@ -110,6 +118,7 @@ func (l *lockfile) lock(l_type int16) {
}
l.locktype = l_type
l.locked = true
+ l.recursive = recursive
l.counter++
}
@@ -119,13 +128,24 @@ func (l *lockfile) Lock() {
if l.ro {
l.RLock()
} else {
- l.lock(unix.F_WRLCK)
+ l.lock(unix.F_WRLCK, false)
+ }
+}
+
+// RecursiveLock locks the lockfile as a writer but allows for recursive
+// acquisitions within the same process space. Note that RLock() will be called
+// if it's a lockTypReader lock.
+func (l *lockfile) RecursiveLock() {
+ if l.ro {
+ l.RLock()
+ } else {
+ l.lock(unix.F_WRLCK, true)
}
}
// LockRead locks the lockfile as a reader.
func (l *lockfile) RLock() {
- l.lock(unix.F_RDLCK)
+ l.lock(unix.F_RDLCK, false)
}
// Unlock unlocks the lockfile.
@@ -161,7 +181,7 @@ func (l *lockfile) Unlock() {
// Close the file descriptor on the last unlock.
unix.Close(int(l.fd))
}
- if l.locktype == unix.F_RDLCK {
+ if l.locktype == unix.F_RDLCK || l.recursive {
l.rwMutex.RUnlock()
} else {
l.rwMutex.Unlock()
@@ -232,3 +252,14 @@ func (l *lockfile) Modified() (bool, error) {
func (l *lockfile) IsReadWrite() bool {
return !l.ro
}
+
+// TouchedSince indicates if the lock file has been touched since the specified time
+func (l *lockfile) TouchedSince(when time.Time) bool {
+ st, err := system.Fstat(int(l.fd))
+ if err != nil {
+ return true
+ }
+ mtim := st.Mtim()
+ touched := time.Unix(mtim.Unix())
+ return when.Before(touched)
+}
diff --git a/vendor/github.com/containers/storage/lockfile_windows.go b/vendor/github.com/containers/storage/lockfile_windows.go
index c02069495..caf7c184a 100644
--- a/vendor/github.com/containers/storage/lockfile_windows.go
+++ b/vendor/github.com/containers/storage/lockfile_windows.go
@@ -36,6 +36,12 @@ func (l *lockfile) Lock() {
l.locked = true
}
+func (l *lockfile) RecursiveLock() {
+ // We don't support Windows but a recursive writer-lock in one process-space
+ // is really a writer lock, so just panic.
+ panic("not supported")
+}
+
func (l *lockfile) RLock() {
l.mu.Lock()
l.locked = true
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index 9cc717e5a..a90ae1e9e 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -98,6 +98,8 @@ const (
Gzip
// Xz is xz compression algorithm.
Xz
+ // Zstd is zstd compression algorithm.
+ Zstd
)
const (
@@ -141,6 +143,7 @@ func DetectCompression(source []byte) Compression {
Bzip2: {0x42, 0x5A, 0x68},
Gzip: {0x1F, 0x8B, 0x08},
Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
+ Zstd: {0x28, 0xb5, 0x2f, 0xfd},
} {
if len(source) < len(m) {
logrus.Debug("Len too short")
@@ -200,6 +203,8 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
<-chdone
return readBufWrapper.Close()
}), nil
+ case Zstd:
+ return zstdReader(buf)
default:
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
}
@@ -217,6 +222,8 @@ func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, er
gzWriter := gzip.NewWriter(dest)
writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
return writeBufWrapper, nil
+ case Zstd:
+ return zstdWriter(dest)
case Bzip2, Xz:
// archive/bzip2 does not support writing, and there is no xz support at all
// However, this is not a problem as docker only currently generates gzipped tars
@@ -324,6 +331,8 @@ func (compression *Compression) Extension() string {
return "tar.gz"
case Xz:
return "tar.xz"
+ case Zstd:
+ return "tar.zst"
}
return ""
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_cgo.go b/vendor/github.com/containers/storage/pkg/archive/archive_cgo.go
new file mode 100644
index 000000000..9faea5f01
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_cgo.go
@@ -0,0 +1,17 @@
+// +build cgo
+
+package archive
+
+import (
+ "io"
+
+ "github.com/DataDog/zstd"
+)
+
+func zstdReader(buf io.Reader) (io.ReadCloser, error) {
+ return zstd.NewReader(buf), nil
+}
+
+func zstdWriter(dest io.Writer) (io.WriteCloser, error) {
+ return zstd.NewWriter(dest), nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_nocgo.go b/vendor/github.com/containers/storage/pkg/archive/archive_nocgo.go
new file mode 100644
index 000000000..34ad1d2d1
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_nocgo.go
@@ -0,0 +1,16 @@
+// +build !cgo
+
+package archive
+
+import (
+ "fmt"
+ "io"
+)
+
+func zstdReader(buf io.Reader) (io.ReadCloser, error) {
+ return nil, fmt.Errorf("zstd not supported on this platform")
+}
+
+func zstdWriter(dest io.Writer) (io.WriteCloser, error) {
+ return nil, fmt.Errorf("zstd not supported on this platform")
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/example_changes.go b/vendor/github.com/containers/storage/pkg/archive/example_changes.go
new file mode 100644
index 000000000..70f9c5564
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/example_changes.go
@@ -0,0 +1,97 @@
+// +build ignore
+
+// Simple tool to create an archive stream from an old and new directory
+//
+// By default it will stream the comparison of two temporary directories with junk files
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+
+ "github.com/containers/storage/pkg/archive"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ flDebug = flag.Bool("D", false, "debugging output")
+ flNewDir = flag.String("newdir", "", "")
+ flOldDir = flag.String("olddir", "", "")
+ log = logrus.New()
+)
+
+func main() {
+ flag.Usage = func() {
+ fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
+ fmt.Printf("%s [OPTIONS]\n", os.Args[0])
+ flag.PrintDefaults()
+ }
+ flag.Parse()
+ log.Out = os.Stderr
+ if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
+ logrus.SetLevel(logrus.DebugLevel)
+ }
+ var newDir, oldDir string
+
+ if len(*flNewDir) == 0 {
+ var err error
+ newDir, err = ioutil.TempDir("", "storage-test-newDir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(newDir)
+ if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
+ log.Fatal(err)
+ }
+ } else {
+ newDir = *flNewDir
+ }
+
+ if len(*flOldDir) == 0 {
+ oldDir, err := ioutil.TempDir("", "storage-test-oldDir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(oldDir)
+ } else {
+ oldDir = *flOldDir
+ }
+
+ changes, err := archive.ChangesDirs(newDir, oldDir)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ a, err := archive.ExportChanges(newDir, changes)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer a.Close()
+
+ i, err := io.Copy(os.Stdout, a)
+ if err != nil && err != io.EOF {
+ log.Fatal(err)
+ }
+ fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
+}
+
+func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
+ fileData := []byte("fooo")
+ for n := 0; n < numberOfFiles; n++ {
+ fileName := fmt.Sprintf("file-%d", n)
+ if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
+ return 0, err
+ }
+ if makeLinks {
+ if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
+ return 0, err
+ }
+ }
+ }
+ totalSize := numberOfFiles * len(fileData)
+ return totalSize, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
index a36ff1cb1..33ba6a128 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
@@ -1,7 +1,7 @@
package chrootarchive
import (
- "archive/tar"
+ stdtar "archive/tar"
"fmt"
"io"
"io/ioutil"
@@ -34,18 +34,34 @@ func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.
// The archive may be compressed with one of the following algorithms:
// identity (uncompressed), gzip, bzip2, xz.
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
- return untarHandler(tarArchive, dest, options, true)
+ return untarHandler(tarArchive, dest, options, true, dest)
+}
+
+// UntarWithRoot is the same as `Untar`, but allows you to pass in a root directory
+// The root directory is the directory that will be chrooted to.
+// `dest` must be a path within `root`, if it is not an error will be returned.
+//
+// `root` should set to a directory which is not controlled by any potentially
+// malicious process.
+//
+// This should be used to prevent a potential attacker from manipulating `dest`
+// such that it would provide access to files outside of `dest` through things
+// like symlinks. Normally `ResolveSymlinksInScope` would handle this, however
+// sanitizing symlinks in this manner is inherrently racey:
+// ref: CVE-2018-15664
+func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
+ return untarHandler(tarArchive, dest, options, true, root)
}
// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive must be an uncompressed stream.
func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
- return untarHandler(tarArchive, dest, options, false)
+ return untarHandler(tarArchive, dest, options, false, dest)
}
// Handler for teasing out the automatic decompression
-func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error {
+func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error {
if tarArchive == nil {
return fmt.Errorf("Empty archive")
}
@@ -77,7 +93,15 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
r = decompressedArchive
}
- return invokeUnpack(r, dest, options)
+ return invokeUnpack(r, dest, options, root)
+}
+
+// Tar tars the requested path while chrooted to the specified root.
+func Tar(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
+ if options == nil {
+ options = &archive.TarOptions{}
+ }
+ return invokePack(srcPath, options, root)
}
// CopyFileWithTarAndChown returns a function which copies a single file from outside
@@ -99,7 +123,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
var hashWorker sync.WaitGroup
hashWorker.Add(1)
go func() {
- t := tar.NewReader(contentReader)
+ t := stdtar.NewReader(contentReader)
_, err := t.Next()
if err != nil {
hashError = err
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
index e04ed787c..ca9fb10d7 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
@@ -10,10 +10,13 @@ import (
"io"
"io/ioutil"
"os"
+ "path/filepath"
"runtime"
+ "strings"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/reexec"
+ "github.com/pkg/errors"
)
// untar is the entry-point for storage-untar on re-exec. This is not used on
@@ -23,18 +26,28 @@ func untar() {
runtime.LockOSThread()
flag.Parse()
- var options *archive.TarOptions
+ var options archive.TarOptions
//read the options from the pipe "ExtraFiles"
if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
fatal(err)
}
- if err := chroot(flag.Arg(0)); err != nil {
+ dst := flag.Arg(0)
+ var root string
+ if len(flag.Args()) > 1 {
+ root = flag.Arg(1)
+ }
+
+ if root == "" {
+ root = dst
+ }
+
+ if err := chroot(root); err != nil {
fatal(err)
}
- if err := archive.Unpack(os.Stdin, "/", options); err != nil {
+ if err := archive.Unpack(os.Stdin, dst, &options); err != nil {
fatal(err)
}
// fully consume stdin in case it is zero padded
@@ -45,7 +58,10 @@ func untar() {
os.Exit(0)
}
-func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error {
+func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
+ if root == "" {
+ return errors.New("must specify a root to chroot to")
+ }
// We can't pass a potentially large exclude list directly via cmd line
// because we easily overrun the kernel's max argument/environment size
@@ -57,7 +73,21 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
return fmt.Errorf("Untar pipe failure: %v", err)
}
- cmd := reexec.Command("storage-untar", dest)
+ if root != "" {
+ relDest, err := filepath.Rel(root, dest)
+ if err != nil {
+ return err
+ }
+ if relDest == "." {
+ relDest = "/"
+ }
+ if relDest[0] != '/' {
+ relDest = "/" + relDest
+ }
+ dest = relDest
+ }
+
+ cmd := reexec.Command("storage-untar", dest, root)
cmd.Stdin = decompressedArchive
cmd.ExtraFiles = append(cmd.ExtraFiles, r)
@@ -68,6 +98,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
if err := cmd.Start(); err != nil {
return fmt.Errorf("Untar error on re-exec cmd: %v", err)
}
+
//write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
return fmt.Errorf("Untar json encode to pipe failed: %v", err)
@@ -84,3 +115,92 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
}
return nil
}
+
+func tar() {
+ runtime.LockOSThread()
+ flag.Parse()
+
+ src := flag.Arg(0)
+ var root string
+ if len(flag.Args()) > 1 {
+ root = flag.Arg(1)
+ }
+
+ if root == "" {
+ root = src
+ }
+
+ if err := realChroot(root); err != nil {
+ fatal(err)
+ }
+
+ var options archive.TarOptions
+ if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil {
+ fatal(err)
+ }
+
+ rdr, err := archive.TarWithOptions(src, &options)
+ if err != nil {
+ fatal(err)
+ }
+ defer rdr.Close()
+
+ if _, err := io.Copy(os.Stdout, rdr); err != nil {
+ fatal(err)
+ }
+
+ os.Exit(0)
+}
+
+func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
+ if root == "" {
+ return nil, errors.New("root path must not be empty")
+ }
+
+ relSrc, err := filepath.Rel(root, srcPath)
+ if err != nil {
+ return nil, err
+ }
+ if relSrc == "." {
+ relSrc = "/"
+ }
+ if relSrc[0] != '/' {
+ relSrc = "/" + relSrc
+ }
+
+ // make sure we didn't trim a trailing slash with the call to `Rel`
+ if strings.HasSuffix(srcPath, "/") && !strings.HasSuffix(relSrc, "/") {
+ relSrc += "/"
+ }
+
+ cmd := reexec.Command("storage-tar", relSrc, root)
+
+ errBuff := bytes.NewBuffer(nil)
+ cmd.Stderr = errBuff
+
+ tarR, tarW := io.Pipe()
+ cmd.Stdout = tarW
+
+ stdin, err := cmd.StdinPipe()
+ if err != nil {
+ return nil, errors.Wrap(err, "error getting options pipe for tar process")
+ }
+
+ if err := cmd.Start(); err != nil {
+ return nil, errors.Wrap(err, "tar error on re-exec cmd")
+ }
+
+ go func() {
+ err := cmd.Wait()
+ err = errors.Wrapf(err, "error processing tar file: %s", errBuff)
+ tarW.CloseWithError(err)
+ }()
+
+ if err := json.NewEncoder(stdin).Encode(options); err != nil {
+ stdin.Close()
+ return nil, errors.Wrap(err, "tar json encode to pipe failed")
+ }
+ stdin.Close()
+
+ return tarR, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go
index 93fde4220..8a5c680b1 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go
@@ -14,9 +14,16 @@ func chroot(path string) error {
func invokeUnpack(decompressedArchive io.ReadCloser,
dest string,
- options *archive.TarOptions) error {
+ options *archive.TarOptions, root string) error {
// Windows is different to Linux here because Windows does not support
// chroot. Hence there is no point sandboxing a chrooted process to
// do the unpack. We call inline instead within the daemon process.
return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
}
+
+func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
+ // Windows is different to Linux here because Windows does not support
+ // chroot. Hence there is no point sandboxing a chrooted process to
+ // do the pack. We call inline instead within the daemon process.
+ return archive.TarWithOptions(srcPath, options)
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
index f9b5dece8..83278ee50 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
@@ -4,9 +4,13 @@ package chrootarchive
import "golang.org/x/sys/unix"
-func chroot(path string) error {
+func realChroot(path string) error {
if err := unix.Chroot(path); err != nil {
return err
}
return unix.Chdir("/")
}
+
+func chroot(path string) error {
+ return realChroot(path)
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
index 21cd87992..ea08135e4 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
@@ -14,6 +14,7 @@ import (
func init() {
reexec.Register("storage-applyLayer", applyLayer)
reexec.Register("storage-untar", untar)
+ reexec.Register("storage-tar", tar)
}
func fatal(err error) {
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
index 815589382..a5c73d311 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
@@ -4,6 +4,7 @@ import (
"bufio"
"fmt"
"os"
+ "os/user"
"sort"
"strconv"
"strings"
@@ -244,7 +245,13 @@ func parseSubgid(username string) (ranges, error) {
// and return all found ranges for a specified username. If the special value
// "ALL" is supplied for username, then all ranges in the file will be returned
func parseSubidFile(path, username string) (ranges, error) {
- var rangeList ranges
+ var (
+ rangeList ranges
+ uidstr string
+ )
+ if u, err := user.Lookup(username); err == nil {
+ uidstr = u.Uid
+ }
subidFile, err := os.Open(path)
if err != nil {
@@ -266,7 +273,7 @@ func parseSubidFile(path, username string) (ranges, error) {
if len(parts) != 3 {
return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
}
- if parts[0] == username || username == "ALL" {
+ if parts[0] == username || username == "ALL" || (parts[0] == uidstr && parts[0] != "") {
startid, err := strconv.Atoi(parts[1])
if err != nil {
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_unix.go b/vendor/github.com/containers/storage/pkg/system/stat_unix.go
index 91c7d121c..f9a1b4877 100644
--- a/vendor/github.com/containers/storage/pkg/system/stat_unix.go
+++ b/vendor/github.com/containers/storage/pkg/system/stat_unix.go
@@ -58,3 +58,15 @@ func Stat(path string) (*StatT, error) {
}
return fromStatT(s)
}
+
+// Fstat takes an open file descriptor and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file descriptor is invalid
+func Fstat(fd int) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Fstat(fd, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf
new file mode 100644
index 000000000..8b63e3ea8
--- /dev/null
+++ b/vendor/github.com/containers/storage/storage.conf
@@ -0,0 +1,130 @@
+# This file is is the configuration file for all tools
+# that use the containers/storage library.
+# See man 5 containers-storage.conf for more information
+# The "container storage" table contains all of the server options.
+[storage]
+
+# Default Storage Driver
+driver = ""
+
+# Temporary storage location
+runroot = "/var/run/containers/storage"
+
+# Primary Read/Write location of container storage
+graphroot = "/var/lib/containers/storage"
+
+[storage.options]
+# Storage options to be passed to underlying storage drivers
+
+# AdditionalImageStores is used to pass paths to additional Read/Only image stores
+# Must be comma separated list.
+additionalimagestores = [
+]
+
+# Size is used to set a maximum size of the container image. Only supported by
+# certain container storage drivers.
+size = ""
+
+# Path to an helper program to use for mounting the file system instead of mounting it
+# directly.
+#mount_program = "/usr/bin/fuse-overlayfs"
+
+# mountopt specifies comma separated list of extra mount options
+mountopt = "nodev"
+
+# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
+# a container, to the UIDs/GIDs as they should appear outside of the container,
+# and the length of the range of UIDs/GIDs. Additional mapped sets can be
+# listed and will be heeded by libraries, but there are limits to the number of
+# mappings which the kernel will allow when you later attempt to run a
+# container.
+#
+# remap-uids = 0:1668442479:65536
+# remap-gids = 0:1668442479:65536
+
+# Remap-User/Group is a user name which can be used to look up one or more UID/GID
+# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting
+# with an in-container ID of 0 and then a host-level ID taken from the lowest
+# range that matches the specified name, and using the length of that range.
+# Additional ranges are then assigned, using the ranges which specify the
+# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID,
+# until all of the entries have been used for maps.
+#
+# remap-user = "storage"
+# remap-group = "storage"
+
+# If specified, use OSTree to deduplicate files with the overlay backend.
+ostree_repo = ""
+
+# Set to skip a PRIVATE bind mount on the storage home directory. Only supported by
+# certain container storage drivers (overlay).
+skip_mount_home = "false"
+
+[storage.options.thinpool]
+# Storage Options for thinpool
+
+# autoextend_percent determines the amount by which pool needs to be
+# grown. This is specified in terms of % of pool size. So a value of 20 means
+# that when threshold is hit, pool will be grown by 20% of existing
+# pool size.
+# autoextend_percent = "20"
+
+# autoextend_threshold determines the pool extension threshold in terms
+# of percentage of pool size. For example, if threshold is 60, that means when
+# pool is 60% full, threshold has been hit.
+# autoextend_threshold = "80"
+
+# basesize specifies the size to use when creating the base device, which
+# limits the size of images and containers.
+# basesize = "10G"
+
+# blocksize specifies a custom blocksize to use for the thin pool.
+# blocksize="64k"
+
+# directlvm_device specifies a custom block storage device to use for the
+# thin pool. Required if you setup devicemapper.
+# directlvm_device = ""
+
+# directlvm_device_force wipes device even if device already has a filesystem.
+# directlvm_device_force = "True"
+
+# fs specifies the filesystem type to use for the base device.
+# fs="xfs"
+
+# log_level sets the log level of devicemapper.
+# 0: LogLevelSuppress 0 (Default)
+# 2: LogLevelFatal
+# 3: LogLevelErr
+# 4: LogLevelWarn
+# 5: LogLevelNotice
+# 6: LogLevelInfo
+# 7: LogLevelDebug
+# log_level = "7"
+
+# min_free_space specifies the min free space percent in a thin pool require for
+# new device creation to succeed. Valid values are from 0% - 99%.
+# Value 0% disables
+# min_free_space = "10%"
+
+# mkfsarg specifies extra mkfs arguments to be used when creating the base.
+# device.
+# mkfsarg = ""
+
+# use_deferred_removal marks devicemapper block device for deferred removal.
+# If the thinpool is in use when the driver attempts to remove it, the driver
+# tells the kernel to remove it as soon as possible. Note this does not free
+# up the disk space, use deferred deletion to fully remove the thinpool.
+# use_deferred_removal = "True"
+
+# use_deferred_deletion marks thinpool device for deferred deletion.
+# If the device is busy when the driver attempts to delete it, the driver
+# will attempt to delete device every 30 seconds until successful.
+# If the program using the driver exits, the driver will continue attempting
+# to cleanup the next time the driver is used. Deferred deletion permanently
+# deletes the device and all data stored in device will be lost.
+# use_deferred_deletion = "True"
+
+# xfs_nospace_max_retries specifies the maximum number of retries XFS should
+# attempt to complete IO when ENOSPC (no space) error is returned by
+# underlying storage device.
+# xfs_nospace_max_retries = "0"
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index 9b967db6d..acfcef022 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -722,6 +722,7 @@ func (s *store) getGraphDriver() (drivers.Driver, error) {
}
config := drivers.Options{
Root: s.graphRoot,
+ RunRoot: s.runRoot,
DriverOptions: s.graphOptions,
UIDMaps: s.uidMap,
GIDMaps: s.gidMap,
@@ -3398,12 +3399,18 @@ func init() {
ReloadConfigurationFile(defaultConfigFile, &defaultStoreOptions)
}
+// GetDefaultMountOptions returns the default mountoptions defined in container/storage
func GetDefaultMountOptions() ([]string, error) {
+ return GetMountOptions(defaultStoreOptions.GraphDriverName, defaultStoreOptions.GraphDriverOptions)
+}
+
+// GetMountOptions returns the mountoptions for the specified driver and graphDriverOptions
+func GetMountOptions(driver string, graphDriverOptions []string) ([]string, error) {
mountOpts := []string{
".mountopt",
- fmt.Sprintf("%s.mountopt", defaultStoreOptions.GraphDriverName),
+ fmt.Sprintf("%s.mountopt", driver),
}
- for _, option := range defaultStoreOptions.GraphDriverOptions {
+ for _, option := range graphDriverOptions {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil {
return nil, err
diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go
index 6c9f163a3..54627731a 100644
--- a/vendor/github.com/containers/storage/utils.go
+++ b/vendor/github.com/containers/storage/utils.go
@@ -71,14 +71,16 @@ func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap stri
// GetRootlessRuntimeDir returns the runtime directory when running as non root
func GetRootlessRuntimeDir(rootlessUid int) (string, error) {
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
- if runtimeDir == "" {
- tmpDir := fmt.Sprintf("/run/user/%d", rootlessUid)
- st, err := system.Stat(tmpDir)
- if err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0700 == 0700 && st.Mode()&0066 == 0000 {
- return tmpDir, nil
- }
+
+ if runtimeDir != "" {
+ return runtimeDir, nil
}
- tmpDir := fmt.Sprintf("%s/%d", os.TempDir(), rootlessUid)
+ tmpDir := fmt.Sprintf("/run/user/%d", rootlessUid)
+ st, err := system.Stat(tmpDir)
+ if err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0700 == 0700 && st.Mode()&0066 == 0000 {
+ return tmpDir, nil
+ }
+ tmpDir = fmt.Sprintf("%s/%d", os.TempDir(), rootlessUid)
if err := os.MkdirAll(tmpDir, 0700); err != nil {
logrus.Errorf("failed to create %s: %v", tmpDir, err)
} else {
@@ -193,17 +195,17 @@ func DefaultStoreOptions(rootless bool, rootlessUid int) (StoreOptions, error) {
if err != nil {
return storageOpts, err
}
- if _, err = os.Stat(storageConf); err == nil {
+ _, err = os.Stat(storageConf)
+ if err != nil && !os.IsNotExist(err) {
+ return storageOpts, errors.Wrapf(err, "cannot stat %s", storageConf)
+ }
+ if err == nil {
defaultRootlessRunRoot = storageOpts.RunRoot
defaultRootlessGraphRoot = storageOpts.GraphRoot
storageOpts = StoreOptions{}
ReloadConfigurationFile(storageConf, &storageOpts)
}
- if !os.IsNotExist(err) {
- return storageOpts, errors.Wrapf(err, "cannot stat %s", storageConf)
- }
-
if rootless && rootlessUid != 0 {
if err == nil {
// If the file did not specify a graphroot or runroot,
diff --git a/vendor/github.com/containers/storage/vendor.conf b/vendor/github.com/containers/storage/vendor.conf
index 62a3f98ca..1677363d0 100644
--- a/vendor/github.com/containers/storage/vendor.conf
+++ b/vendor/github.com/containers/storage/vendor.conf
@@ -26,3 +26,4 @@ golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
gotest.tools master
github.com/google/go-cmp master
+github.com/DataDog/zstd 1.x
diff --git a/vendor/github.com/coreos/go-iptables/README.md b/vendor/github.com/coreos/go-iptables/README.md
deleted file mode 100644
index 974a983e0..000000000
--- a/vendor/github.com/coreos/go-iptables/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# go-iptables
-
-[![GoDoc](https://godoc.org/github.com/coreos/go-iptables/iptables?status.svg)](https://godoc.org/github.com/coreos/go-iptables/iptables)
-[![Build Status](https://travis-ci.org/coreos/go-iptables.png?branch=master)](https://travis-ci.org/coreos/go-iptables)
-
-Go bindings for iptables utility.
-
-In-kernel netfilter does not have a good userspace API. The tables are manipulated via setsockopt that sets/replaces the entire table. Changes to existing table need to be resolved by userspace code which is difficult and error-prone. Netfilter developers heavily advocate using iptables utlity for programmatic manipulation.
-
-go-iptables wraps invocation of iptables utility with functions to append and delete rules; create, clear and delete chains.
diff --git a/vendor/github.com/coreos/go-iptables/iptables/iptables.go b/vendor/github.com/coreos/go-iptables/iptables/iptables.go
index 8db2597a4..9601bc78a 100644
--- a/vendor/github.com/coreos/go-iptables/iptables/iptables.go
+++ b/vendor/github.com/coreos/go-iptables/iptables/iptables.go
@@ -31,6 +31,7 @@ type Error struct {
exec.ExitError
cmd exec.Cmd
msg string
+ proto Protocol
exitStatus *int //for overriding
}
@@ -48,8 +49,8 @@ func (e *Error) Error() string {
// IsNotExist returns true if the error is due to the chain or rule not existing
func (e *Error) IsNotExist() bool {
return e.ExitStatus() == 1 &&
- (e.msg == "iptables: Bad rule (does a matching rule exist in that chain?).\n" ||
- e.msg == "iptables: No chain/target/match by that name.\n")
+ (e.msg == fmt.Sprintf("%s: Bad rule (does a matching rule exist in that chain?).\n", getIptablesCommand(e.proto)) ||
+ e.msg == fmt.Sprintf("%s: No chain/target/match by that name.\n", getIptablesCommand(e.proto)))
}
// Protocol to differentiate between IPv4 and IPv6
@@ -282,7 +283,8 @@ func (ipt *IPTables) executeList(args []string) ([]string, error) {
v := 1
return nil, &Error{
cmd: exec.Cmd{Args: args},
- msg: "iptables: No chain/target/match by that name.",
+ msg: fmt.Sprintf("%s: No chain/target/match by that name.\n", getIptablesCommand(ipt.proto)),
+ proto: ipt.proto,
exitStatus: &v,
}
}
@@ -385,7 +387,7 @@ func (ipt *IPTables) runWithOutput(args []string, stdout io.Writer) error {
if err := cmd.Run(); err != nil {
switch e := err.(type) {
case *exec.ExitError:
- return &Error{*e, cmd, stderr.String(), nil}
+ return &Error{*e, cmd, stderr.String(), ipt.proto, nil}
default:
return err
}
diff --git a/vendor/github.com/coreos/go-systemd/README.md b/vendor/github.com/coreos/go-systemd/README.md
deleted file mode 100644
index cad04a803..000000000
--- a/vendor/github.com/coreos/go-systemd/README.md
+++ /dev/null
@@ -1,69 +0,0 @@
-# go-systemd
-
-[![Build Status](https://travis-ci.org/coreos/go-systemd.png?branch=master)](https://travis-ci.org/coreos/go-systemd)
-[![godoc](https://godoc.org/github.com/coreos/go-systemd?status.svg)](http://godoc.org/github.com/coreos/go-systemd)
-
-Go bindings to systemd. The project has several packages:
-
-- `activation` - for writing and using socket activation from Go
-- `daemon` - for notifying systemd of service status changes
-- `dbus` - for starting/stopping/inspecting running services and units
-- `journal` - for writing to systemd's logging service, journald
-- `sdjournal` - for reading from journald by wrapping its C API
-- `login1` - for integration with the systemd logind API
-- `machine1` - for registering machines/containers with systemd
-- `unit` - for (de)serialization and comparison of unit files
-
-## Socket Activation
-
-An example HTTP server using socket activation can be quickly set up by following this README on a Linux machine running systemd:
-
-https://github.com/coreos/go-systemd/tree/master/examples/activation/httpserver
-
-## systemd Service Notification
-
-The `daemon` package is an implementation of the [sd_notify protocol](https://www.freedesktop.org/software/systemd/man/sd_notify.html#Description). It can be used to inform systemd of service start-up completion, watchdog events, and other status changes.
-
-## D-Bus
-
-The `dbus` package connects to the [systemd D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/dbus/) and lets you start, stop and introspect systemd units. The API docs are here:
-
-http://godoc.org/github.com/coreos/go-systemd/dbus
-
-### Debugging
-
-Create `/etc/dbus-1/system-local.conf` that looks like this:
-
-```
-<!DOCTYPE busconfig PUBLIC
-"-//freedesktop//DTD D-Bus Bus Configuration 1.0//EN"
-"http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
-<busconfig>
- <policy user="root">
- <allow eavesdrop="true"/>
- <allow eavesdrop="true" send_destination="*"/>
- </policy>
-</busconfig>
-```
-
-## Journal
-
-### Writing to the Journal
-
-Using the pure-Go `journal` package you can submit journal entries directly to systemd's journal, taking advantage of features like indexed key/value pairs for each log entry.
-
-### Reading from the Journal
-
-The `sdjournal` package provides read access to the journal by wrapping around journald's native C API; consequently it requires cgo and the journal headers to be available.
-
-## logind
-
-The `login1` package provides functions to integrate with the [systemd logind API](http://www.freedesktop.org/wiki/Software/systemd/logind/).
-
-## machined
-
-The `machine1` package allows interaction with the [systemd machined D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/machined/).
-
-## Units
-
-The `unit` package provides various functions for working with [systemd unit files](http://www.freedesktop.org/software/systemd/man/systemd.unit.html).
diff --git a/vendor/github.com/coreos/go-systemd/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/dbus/dbus.go
index 1d54810af..f652582e6 100644
--- a/vendor/github.com/coreos/go-systemd/dbus/dbus.go
+++ b/vendor/github.com/coreos/go-systemd/dbus/dbus.go
@@ -143,7 +143,7 @@ func NewUserConnection() (*Conn, error) {
func NewSystemdConnection() (*Conn, error) {
return NewConnection(func() (*dbus.Conn, error) {
// We skip Hello when talking directly to systemd.
- return dbusAuthConnection(func() (*dbus.Conn, error) {
+ return dbusAuthConnection(func(opts ...dbus.ConnOption) (*dbus.Conn, error) {
return dbus.Dial("unix:path=/run/systemd/private")
})
})
@@ -201,7 +201,7 @@ func (c *Conn) GetManagerProperty(prop string) (string, error) {
return variant.String(), nil
}
-func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
+func dbusAuthConnection(createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) {
conn, err := createBus()
if err != nil {
return nil, err
@@ -221,7 +221,7 @@ func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error
return conn, nil
}
-func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
+func dbusAuthHelloConnection(createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) {
conn, err := dbusAuthConnection(createBus)
if err != nil {
return nil, err
diff --git a/vendor/github.com/coreos/go-systemd/dbus/methods.go b/vendor/github.com/coreos/go-systemd/dbus/methods.go
index 0b4207229..5859583eb 100644
--- a/vendor/github.com/coreos/go-systemd/dbus/methods.go
+++ b/vendor/github.com/coreos/go-systemd/dbus/methods.go
@@ -117,13 +117,13 @@ func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int,
return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode)
}
-// ReloadOrRestart attempts a reload if the unit supports it and use a restart
+// ReloadOrRestartUnit attempts a reload if the unit supports it and use a restart
// otherwise.
func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode)
}
-// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try"
+// ReloadOrTryRestartUnit attempts a reload if the unit supports it and use a "Try"
// flavored restart otherwise.
func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode)
@@ -192,11 +192,17 @@ func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) {
return c.getProperties(path, "org.freedesktop.systemd1.Unit")
}
-// GetUnitProperties takes the (escaped) unit path and returns all of its dbus object properties.
+// GetUnitPathProperties takes the (escaped) unit path and returns all of its dbus object properties.
func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) {
return c.getProperties(path, "org.freedesktop.systemd1.Unit")
}
+// GetAllProperties takes the (unescaped) unit name and returns all of its dbus object properties.
+func (c *Conn) GetAllProperties(unit string) (map[string]interface{}, error) {
+ path := unitPath(unit)
+ return c.getProperties(path, "")
+}
+
func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) {
var err error
var prop dbus.Variant
@@ -291,6 +297,8 @@ func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) {
// ListUnits returns an array with all currently loaded units. Note that
// units may be known by multiple names at the same time, and hence there might
// be more unit names loaded than actual units behind them.
+// Also note that a unit is only loaded if it is active and/or enabled.
+// Units that are both disabled and inactive will thus not be returned.
func (c *Conn) ListUnits() ([]UnitStatus, error) {
return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store)
}
diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/dbus/subscription.go
index 70e63a6f1..f6d7a08a1 100644
--- a/vendor/github.com/coreos/go-systemd/dbus/subscription.go
+++ b/vendor/github.com/coreos/go-systemd/dbus/subscription.go
@@ -94,7 +94,7 @@ func (c *Conn) dispatch() {
}()
}
-// Returns two unbuffered channels which will receive all changed units every
+// SubscribeUnits returns two unbuffered channels which will receive all changed units every
// interval. Deleted units are sent as nil.
func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) {
return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil)
diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go
index ef85a3ba2..a0f4837a0 100644
--- a/vendor/github.com/coreos/go-systemd/journal/journal.go
+++ b/vendor/github.com/coreos/go-systemd/journal/journal.go
@@ -33,7 +33,10 @@ import (
"os"
"strconv"
"strings"
+ "sync"
+ "sync/atomic"
"syscall"
+ "unsafe"
)
// Priority of a journal message
@@ -50,19 +53,35 @@ const (
PriDebug
)
-var conn net.Conn
+var (
+ // This can be overridden at build-time:
+ // https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable
+ journalSocket = "/run/systemd/journal/socket"
+
+ // unixConnPtr atomically holds the local unconnected Unix-domain socket.
+ // Concrete safe pointer type: *net.UnixConn
+ unixConnPtr unsafe.Pointer
+ // onceConn ensures that unixConnPtr is initialized exactly once.
+ onceConn sync.Once
+)
func init() {
- var err error
- conn, err = net.Dial("unixgram", "/run/systemd/journal/socket")
- if err != nil {
- conn = nil
- }
+ onceConn.Do(initConn)
}
-// Enabled returns true if the local systemd journal is available for logging
+// Enabled checks whether the local systemd journal is available for logging.
func Enabled() bool {
- return conn != nil
+ onceConn.Do(initConn)
+
+ if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil {
+ return false
+ }
+
+ if _, err := net.Dial("unixgram", journalSocket); err != nil {
+ return false
+ }
+
+ return true
}
// Send a message to the local systemd journal. vars is a map of journald
@@ -73,8 +92,14 @@ func Enabled() bool {
// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
// for more details. vars may be nil.
func Send(message string, priority Priority, vars map[string]string) error {
+ conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
if conn == nil {
- return journalError("could not connect to journald socket")
+ return errors.New("could not initialize socket to journald")
+ }
+
+ socketAddr := &net.UnixAddr{
+ Name: journalSocket,
+ Net: "unixgram",
}
data := new(bytes.Buffer)
@@ -84,32 +109,30 @@ func Send(message string, priority Priority, vars map[string]string) error {
appendVariable(data, k, v)
}
- _, err := io.Copy(conn, data)
- if err != nil && isSocketSpaceError(err) {
- file, err := tempFd()
- if err != nil {
- return journalError(err.Error())
- }
- defer file.Close()
- _, err = io.Copy(file, data)
- if err != nil {
- return journalError(err.Error())
- }
-
- rights := syscall.UnixRights(int(file.Fd()))
+ _, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr)
+ if err == nil {
+ return nil
+ }
+ if !isSocketSpaceError(err) {
+ return err
+ }
- /* this connection should always be a UnixConn, but better safe than sorry */
- unixConn, ok := conn.(*net.UnixConn)
- if !ok {
- return journalError("can't send file through non-Unix connection")
- }
- _, _, err = unixConn.WriteMsgUnix([]byte{}, rights, nil)
- if err != nil {
- return journalError(err.Error())
- }
- } else if err != nil {
- return journalError(err.Error())
+ // Large log entry, send it via tempfile and ancillary-fd.
+ file, err := tempFd()
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+ _, err = io.Copy(file, data)
+ if err != nil {
+ return err
}
+ rights := syscall.UnixRights(int(file.Fd()))
+ _, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr)
+ if err != nil {
+ return err
+ }
+
return nil
}
@@ -119,8 +142,8 @@ func Print(priority Priority, format string, a ...interface{}) error {
}
func appendVariable(w io.Writer, name, value string) {
- if !validVarName(name) {
- journalError("variable name contains invalid character, ignoring")
+ if err := validVarName(name); err != nil {
+ fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name)
}
if strings.ContainsRune(value, '\n') {
/* When the value contains a newline, we write:
@@ -137,32 +160,42 @@ func appendVariable(w io.Writer, name, value string) {
}
}
-func validVarName(name string) bool {
- /* The variable name must be in uppercase and consist only of characters,
- * numbers and underscores, and may not begin with an underscore. (from the docs)
- */
+// validVarName validates a variable name to make sure journald will accept it.
+// The variable name must be in uppercase and consist only of characters,
+// numbers and underscores, and may not begin with an underscore:
+// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html
+func validVarName(name string) error {
+ if name == "" {
+ return errors.New("Empty variable name")
+ } else if name[0] == '_' {
+ return errors.New("Variable name begins with an underscore")
+ }
- valid := name[0] != '_'
for _, c := range name {
- valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_'
+ if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') {
+ return errors.New("Variable name contains invalid characters")
+ }
}
- return valid
+ return nil
}
+// isSocketSpaceError checks whether the error is signaling
+// an "overlarge message" condition.
func isSocketSpaceError(err error) bool {
opErr, ok := err.(*net.OpError)
- if !ok {
+ if !ok || opErr == nil {
return false
}
- sysErr, ok := opErr.Err.(syscall.Errno)
- if !ok {
+ sysErr, ok := opErr.Err.(*os.SyscallError)
+ if !ok || sysErr == nil {
return false
}
- return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS
+ return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS
}
+// tempFd creates a temporary, unlinked file under `/dev/shm`.
func tempFd() (*os.File, error) {
file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
if err != nil {
@@ -175,8 +208,18 @@ func tempFd() (*os.File, error) {
return file, nil
}
-func journalError(s string) error {
- s = "journal error: " + s
- fmt.Fprintln(os.Stderr, s)
- return errors.New(s)
+// initConn initializes the global `unixConnPtr` socket.
+// It is meant to be called exactly once, at program startup.
+func initConn() {
+ autobind, err := net.ResolveUnixAddr("unixgram", "")
+ if err != nil {
+ return
+ }
+
+ sock, err := net.ListenUnixgram("unixgram", autobind)
+ if err != nil {
+ return
+ }
+
+ atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock))
}
diff --git a/vendor/github.com/coreos/go-systemd/sdjournal/journal.go b/vendor/github.com/coreos/go-systemd/sdjournal/journal.go
index 9f3d92342..7f840def8 100644
--- a/vendor/github.com/coreos/go-systemd/sdjournal/journal.go
+++ b/vendor/github.com/coreos/go-systemd/sdjournal/journal.go
@@ -414,7 +414,7 @@ func NewJournal() (j *Journal, err error) {
r := C.my_sd_journal_open(sd_journal_open, &j.cjournal, C.SD_JOURNAL_LOCAL_ONLY)
if r < 0 {
- return nil, fmt.Errorf("failed to open journal: %d", syscall.Errno(-r))
+ return nil, fmt.Errorf("failed to open journal: %s", syscall.Errno(-r).Error())
}
return j, nil
@@ -435,7 +435,7 @@ func NewJournalFromDir(path string) (j *Journal, err error) {
r := C.my_sd_journal_open_directory(sd_journal_open_directory, &j.cjournal, p, 0)
if r < 0 {
- return nil, fmt.Errorf("failed to open journal in directory %q: %d", path, syscall.Errno(-r))
+ return nil, fmt.Errorf("failed to open journal in directory %q: %s", path, syscall.Errno(-r).Error())
}
return j, nil
@@ -461,7 +461,7 @@ func NewJournalFromFiles(paths ...string) (j *Journal, err error) {
r := C.my_sd_journal_open_files(sd_journal_open_files, &j.cjournal, &cPaths[0], 0)
if r < 0 {
- return nil, fmt.Errorf("failed to open journals in paths %q: %d", paths, syscall.Errno(-r))
+ return nil, fmt.Errorf("failed to open journals in paths %q: %s", paths, syscall.Errno(-r).Error())
}
return j, nil
@@ -496,7 +496,7 @@ func (j *Journal) AddMatch(match string) error {
j.mu.Unlock()
if r < 0 {
- return fmt.Errorf("failed to add match: %d", syscall.Errno(-r))
+ return fmt.Errorf("failed to add match: %s", syscall.Errno(-r).Error())
}
return nil
@@ -514,7 +514,7 @@ func (j *Journal) AddDisjunction() error {
j.mu.Unlock()
if r < 0 {
- return fmt.Errorf("failed to add a disjunction in the match list: %d", syscall.Errno(-r))
+ return fmt.Errorf("failed to add a disjunction in the match list: %s", syscall.Errno(-r).Error())
}
return nil
@@ -532,7 +532,7 @@ func (j *Journal) AddConjunction() error {
j.mu.Unlock()
if r < 0 {
- return fmt.Errorf("failed to add a conjunction in the match list: %d", syscall.Errno(-r))
+ return fmt.Errorf("failed to add a conjunction in the match list: %s", syscall.Errno(-r).Error())
}
return nil
@@ -562,7 +562,7 @@ func (j *Journal) Next() (uint64, error) {
j.mu.Unlock()
if r < 0 {
- return 0, fmt.Errorf("failed to iterate journal: %d", syscall.Errno(-r))
+ return 0, fmt.Errorf("failed to iterate journal: %s", syscall.Errno(-r).Error())
}
return uint64(r), nil
@@ -581,7 +581,7 @@ func (j *Journal) NextSkip(skip uint64) (uint64, error) {
j.mu.Unlock()
if r < 0 {
- return 0, fmt.Errorf("failed to iterate journal: %d", syscall.Errno(-r))
+ return 0, fmt.Errorf("failed to iterate journal: %s", syscall.Errno(-r).Error())
}
return uint64(r), nil
@@ -599,7 +599,7 @@ func (j *Journal) Previous() (uint64, error) {
j.mu.Unlock()
if r < 0 {
- return 0, fmt.Errorf("failed to iterate journal: %d", syscall.Errno(-r))
+ return 0, fmt.Errorf("failed to iterate journal: %s", syscall.Errno(-r).Error())
}
return uint64(r), nil
@@ -618,7 +618,7 @@ func (j *Journal) PreviousSkip(skip uint64) (uint64, error) {
j.mu.Unlock()
if r < 0 {
- return 0, fmt.Errorf("failed to iterate journal: %d", syscall.Errno(-r))
+ return 0, fmt.Errorf("failed to iterate journal: %s", syscall.Errno(-r).Error())
}
return uint64(r), nil
@@ -641,7 +641,7 @@ func (j *Journal) getData(field string) (unsafe.Pointer, C.int, error) {
j.mu.Unlock()
if r < 0 {
- return nil, 0, fmt.Errorf("failed to read message: %d", syscall.Errno(-r))
+ return nil, 0, fmt.Errorf("failed to read message: %s", syscall.Errno(-r).Error())
}
return d, C.int(l), nil
@@ -736,7 +736,7 @@ func (j *Journal) GetEntry() (*JournalEntry, error) {
var realtimeUsec C.uint64_t
r = C.my_sd_journal_get_realtime_usec(sd_journal_get_realtime_usec, j.cjournal, &realtimeUsec)
if r < 0 {
- return nil, fmt.Errorf("failed to get realtime timestamp: %d", syscall.Errno(-r))
+ return nil, fmt.Errorf("failed to get realtime timestamp: %s", syscall.Errno(-r).Error())
}
entry.RealtimeTimestamp = uint64(realtimeUsec)
@@ -746,7 +746,7 @@ func (j *Journal) GetEntry() (*JournalEntry, error) {
r = C.my_sd_journal_get_monotonic_usec(sd_journal_get_monotonic_usec, j.cjournal, &monotonicUsec, &boot_id)
if r < 0 {
- return nil, fmt.Errorf("failed to get monotonic timestamp: %d", syscall.Errno(-r))
+ return nil, fmt.Errorf("failed to get monotonic timestamp: %s", syscall.Errno(-r).Error())
}
entry.MonotonicTimestamp = uint64(monotonicUsec)
@@ -757,7 +757,7 @@ func (j *Journal) GetEntry() (*JournalEntry, error) {
r = C.my_sd_journal_get_cursor(sd_journal_get_cursor, j.cjournal, &c)
defer C.free(unsafe.Pointer(c))
if r < 0 {
- return nil, fmt.Errorf("failed to get cursor: %d", syscall.Errno(-r))
+ return nil, fmt.Errorf("failed to get cursor: %s", syscall.Errno(-r).Error())
}
entry.Cursor = C.GoString(c)
@@ -773,7 +773,7 @@ func (j *Journal) GetEntry() (*JournalEntry, error) {
}
if r < 0 {
- return nil, fmt.Errorf("failed to read message field: %d", syscall.Errno(-r))
+ return nil, fmt.Errorf("failed to read message field: %s", syscall.Errno(-r).Error())
}
msg := C.GoStringN((*C.char)(d), C.int(l))
@@ -803,7 +803,7 @@ func (j *Journal) SetDataThreshold(threshold uint64) error {
j.mu.Unlock()
if r < 0 {
- return fmt.Errorf("failed to set data threshold: %d", syscall.Errno(-r))
+ return fmt.Errorf("failed to set data threshold: %s", syscall.Errno(-r).Error())
}
return nil
@@ -826,7 +826,7 @@ func (j *Journal) GetRealtimeUsec() (uint64, error) {
j.mu.Unlock()
if r < 0 {
- return 0, fmt.Errorf("failed to get realtime timestamp: %d", syscall.Errno(-r))
+ return 0, fmt.Errorf("failed to get realtime timestamp: %s", syscall.Errno(-r).Error())
}
return uint64(usec), nil
@@ -850,7 +850,7 @@ func (j *Journal) GetMonotonicUsec() (uint64, error) {
j.mu.Unlock()
if r < 0 {
- return 0, fmt.Errorf("failed to get monotonic timestamp: %d", syscall.Errno(-r))
+ return 0, fmt.Errorf("failed to get monotonic timestamp: %s", syscall.Errno(-r).Error())
}
return uint64(usec), nil
@@ -875,7 +875,7 @@ func (j *Journal) GetCursor() (string, error) {
defer C.free(unsafe.Pointer(d))
if r < 0 {
- return "", fmt.Errorf("failed to get cursor: %d", syscall.Errno(-r))
+ return "", fmt.Errorf("failed to get cursor: %s", syscall.Errno(-r).Error())
}
cursor := C.GoString(d)
@@ -899,7 +899,7 @@ func (j *Journal) TestCursor(cursor string) error {
j.mu.Unlock()
if r < 0 {
- return fmt.Errorf("failed to test to cursor %q: %d", cursor, syscall.Errno(-r))
+ return fmt.Errorf("failed to test to cursor %q: %s", cursor, syscall.Errno(-r).Error())
} else if r == 0 {
return ErrNoTestCursor
}
@@ -921,7 +921,7 @@ func (j *Journal) SeekHead() error {
j.mu.Unlock()
if r < 0 {
- return fmt.Errorf("failed to seek to head of journal: %d", syscall.Errno(-r))
+ return fmt.Errorf("failed to seek to head of journal: %s", syscall.Errno(-r).Error())
}
return nil
@@ -941,7 +941,7 @@ func (j *Journal) SeekTail() error {
j.mu.Unlock()
if r < 0 {
- return fmt.Errorf("failed to seek to tail of journal: %d", syscall.Errno(-r))
+ return fmt.Errorf("failed to seek to tail of journal: %s", syscall.Errno(-r).Error())
}
return nil
@@ -961,7 +961,7 @@ func (j *Journal) SeekRealtimeUsec(usec uint64) error {
j.mu.Unlock()
if r < 0 {
- return fmt.Errorf("failed to seek to %d: %d", usec, syscall.Errno(-r))
+ return fmt.Errorf("failed to seek to %d: %s", usec, syscall.Errno(-r).Error())
}
return nil
@@ -984,7 +984,7 @@ func (j *Journal) SeekCursor(cursor string) error {
j.mu.Unlock()
if r < 0 {
- return fmt.Errorf("failed to seek to cursor %q: %d", cursor, syscall.Errno(-r))
+ return fmt.Errorf("failed to seek to cursor %q: %s", cursor, syscall.Errno(-r).Error())
}
return nil
@@ -1031,7 +1031,7 @@ func (j *Journal) GetUsage() (uint64, error) {
j.mu.Unlock()
if r < 0 {
- return 0, fmt.Errorf("failed to get journal disk space usage: %d", syscall.Errno(-r))
+ return 0, fmt.Errorf("failed to get journal disk space usage: %s", syscall.Errno(-r).Error())
}
return uint64(out), nil
@@ -1065,7 +1065,7 @@ func (j *Journal) GetUniqueValues(field string) ([]string, error) {
r := C.my_sd_journal_query_unique(sd_journal_query_unique, j.cjournal, f)
if r < 0 {
- return nil, fmt.Errorf("failed to query journal: %d", syscall.Errno(-r))
+ return nil, fmt.Errorf("failed to query journal: %s", syscall.Errno(-r).Error())
}
// Implements the SD_JOURNAL_FOREACH_UNIQUE macro from sd-journal.h
@@ -1079,7 +1079,7 @@ func (j *Journal) GetUniqueValues(field string) ([]string, error) {
}
if r < 0 {
- return nil, fmt.Errorf("failed to read message field: %d", syscall.Errno(-r))
+ return nil, fmt.Errorf("failed to read message field: %s", syscall.Errno(-r).Error())
}
msg := C.GoStringN((*C.char)(d), C.int(l))
@@ -1111,7 +1111,7 @@ func (j *Journal) GetCatalog() (string, error) {
defer C.free(unsafe.Pointer(c))
if r < 0 {
- return "", fmt.Errorf("failed to retrieve catalog entry for current journal entry: %d", syscall.Errno(-r))
+ return "", fmt.Errorf("failed to retrieve catalog entry for current journal entry: %s", syscall.Errno(-r).Error())
}
catalog := C.GoString(c)
diff --git a/vendor/github.com/coreos/pkg/README.md b/vendor/github.com/coreos/pkg/README.md
deleted file mode 100644
index ca68a07f0..000000000
--- a/vendor/github.com/coreos/pkg/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-a collection of go utility packages
-
-[![Build Status](https://travis-ci.org/coreos/pkg.png?branch=master)](https://travis-ci.org/coreos/pkg)
-[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/coreos/pkg)
diff --git a/vendor/github.com/cri-o/ocicni/README.md b/vendor/github.com/cri-o/ocicni/README.md
deleted file mode 100644
index 99c103c83..000000000
--- a/vendor/github.com/cri-o/ocicni/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# ocicni
-
-API layer to call the CNI plugins from an OCI lifecycle daemon
diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
index a08be9ecd..8743abc56 100644
--- a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
+++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
@@ -382,7 +382,7 @@ func (plugin *cniNetworkPlugin) Name() string {
return CNIPluginName
}
-func (plugin *cniNetworkPlugin) forEachNetwork(podNetwork *PodNetwork, forEachFunc func(*cniNetwork, string, *PodNetwork) error) error {
+func (plugin *cniNetworkPlugin) forEachNetwork(podNetwork *PodNetwork, forEachFunc func(*cniNetwork, string, *PodNetwork, RuntimeConfig) error) error {
networks := podNetwork.Networks
if len(networks) == 0 {
networks = append(networks, plugin.GetDefaultNetworkName())
@@ -395,7 +395,7 @@ func (plugin *cniNetworkPlugin) forEachNetwork(podNetwork *PodNetwork, forEachFu
logrus.Errorf(err.Error())
return err
}
- if err := forEachFunc(network, ifName, podNetwork); err != nil {
+ if err := forEachFunc(network, ifName, podNetwork, podNetwork.RuntimeConfig[netName]); err != nil {
return err
}
}
@@ -410,20 +410,15 @@ func (plugin *cniNetworkPlugin) SetUpPod(podNetwork PodNetwork) ([]cnitypes.Resu
plugin.podLock(podNetwork).Lock()
defer plugin.podUnlock(podNetwork)
- _, err := plugin.loNetwork.addToNetwork(plugin.cacheDir, &podNetwork, "lo", "")
+ _, err := plugin.loNetwork.addToNetwork(plugin.cacheDir, &podNetwork, "lo", RuntimeConfig{})
if err != nil {
logrus.Errorf("Error while adding to cni lo network: %s", err)
return nil, err
}
results := make([]cnitypes.Result, 0)
- if err := plugin.forEachNetwork(&podNetwork, func(network *cniNetwork, ifName string, podNetwork *PodNetwork) error {
- ip := ""
- if conf, ok := podNetwork.NetworkConfig[network.name]; ok {
- ip = conf.IP
- }
-
- result, err := network.addToNetwork(plugin.cacheDir, podNetwork, ifName, ip)
+ if err := plugin.forEachNetwork(&podNetwork, func(network *cniNetwork, ifName string, podNetwork *PodNetwork, runtimeConfig RuntimeConfig) error {
+ result, err := network.addToNetwork(plugin.cacheDir, podNetwork, ifName, runtimeConfig)
if err != nil {
logrus.Errorf("Error while adding pod to CNI network %q: %s", network.name, err)
return err
@@ -445,13 +440,8 @@ func (plugin *cniNetworkPlugin) TearDownPod(podNetwork PodNetwork) error {
plugin.podLock(podNetwork).Lock()
defer plugin.podUnlock(podNetwork)
- return plugin.forEachNetwork(&podNetwork, func(network *cniNetwork, ifName string, podNetwork *PodNetwork) error {
- ip := ""
- if conf, ok := podNetwork.NetworkConfig[network.name]; ok {
- ip = conf.IP
- }
-
- if err := network.deleteFromNetwork(plugin.cacheDir, podNetwork, ifName, ip); err != nil {
+ return plugin.forEachNetwork(&podNetwork, func(network *cniNetwork, ifName string, podNetwork *PodNetwork, runtimeConfig RuntimeConfig) error {
+ if err := network.deleteFromNetwork(plugin.cacheDir, podNetwork, ifName, runtimeConfig); err != nil {
logrus.Errorf("Error while removing pod from CNI network %q: %s", network.name, err)
return err
}
@@ -466,35 +456,15 @@ func (plugin *cniNetworkPlugin) GetPodNetworkStatus(podNetwork PodNetwork) ([]cn
defer plugin.podUnlock(podNetwork)
results := make([]cnitypes.Result, 0)
- if err := plugin.forEachNetwork(&podNetwork, func(network *cniNetwork, ifName string, podNetwork *PodNetwork) error {
- version := "4"
- ip, mac, err := getContainerDetails(plugin.nsManager, podNetwork.NetNS, ifName, "-4")
+ if err := plugin.forEachNetwork(&podNetwork, func(network *cniNetwork, ifName string, podNetwork *PodNetwork, runtimeConfig RuntimeConfig) error {
+ result, err := network.checkNetwork(plugin.cacheDir, podNetwork, ifName, runtimeConfig, plugin.nsManager)
if err != nil {
- ip, mac, err = getContainerDetails(plugin.nsManager, podNetwork.NetNS, ifName, "-6")
- if err != nil {
- return err
- }
- version = "6"
+ logrus.Errorf("Error while checking pod to CNI network %q: %s", network.name, err)
+ return err
+ }
+ if result != nil {
+ results = append(results, result)
}
-
- // Until CNI's GET request lands, construct the Result manually
- results = append(results, &cnicurrent.Result{
- CNIVersion: "0.3.1",
- Interfaces: []*cnicurrent.Interface{
- {
- Name: ifName,
- Mac: mac.String(),
- Sandbox: podNetwork.NetNS,
- },
- },
- IPs: []*cnicurrent.IPConfig{
- {
- Version: version,
- Interface: cnicurrent.Int(0),
- Address: *ip,
- },
- },
- })
return nil
}); err != nil {
return nil, err
@@ -503,8 +473,8 @@ func (plugin *cniNetworkPlugin) GetPodNetworkStatus(podNetwork PodNetwork) ([]cn
return results, nil
}
-func (network *cniNetwork) addToNetwork(cacheDir string, podNetwork *PodNetwork, ifName, ip string) (cnitypes.Result, error) {
- rt, err := buildCNIRuntimeConf(cacheDir, podNetwork, ifName, ip)
+func (network *cniNetwork) addToNetwork(cacheDir string, podNetwork *PodNetwork, ifName string, runtimeConfig RuntimeConfig) (cnitypes.Result, error) {
+ rt, err := buildCNIRuntimeConf(cacheDir, podNetwork, ifName, runtimeConfig)
if err != nil {
logrus.Errorf("Error adding network: %v", err)
return nil, err
@@ -521,8 +491,82 @@ func (network *cniNetwork) addToNetwork(cacheDir string, podNetwork *PodNetwork,
return res, nil
}
-func (network *cniNetwork) deleteFromNetwork(cacheDir string, podNetwork *PodNetwork, ifName, ip string) error {
- rt, err := buildCNIRuntimeConf(cacheDir, podNetwork, ifName, ip)
+func (network *cniNetwork) checkNetwork(cacheDir string, podNetwork *PodNetwork, ifName string, runtimeConfig RuntimeConfig, nsManager *nsManager) (cnitypes.Result, error) {
+
+ rt, err := buildCNIRuntimeConf(cacheDir, podNetwork, ifName, runtimeConfig)
+ if err != nil {
+ logrus.Errorf("Error checking network: %v", err)
+ return nil, err
+ }
+
+ netconf, cninet := network.NetworkConfig, network.CNIConfig
+ logrus.Infof("About to check CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type)
+
+ gtet, err := cniversion.GreaterThanOrEqualTo(netconf.CNIVersion, "0.4.0")
+ if err != nil {
+ return nil, err
+ }
+
+ var result cnitypes.Result
+
+ // When CNIVersion supports Check, use it. Otherwise fall back on what was done initially.
+ if gtet {
+ err = cninet.CheckNetworkList(context.Background(), netconf, rt)
+ logrus.Infof("Checking CNI network %s (config version=%v)", netconf.Name, netconf.CNIVersion)
+ if err != nil {
+ logrus.Errorf("Error checking network: %v", err)
+ return nil, err
+ }
+ }
+
+ result, err = cninet.GetNetworkListCachedResult(netconf, rt)
+ if err != nil {
+ logrus.Errorf("Error GetNetworkListCachedResult: %v", err)
+ return nil, err
+ } else if result != nil {
+ return result, nil
+ }
+
+ // result doesn't exist, create one
+ logrus.Infof("Checking CNI network %s (config version=%v) nsManager=%v", netconf.Name, netconf.CNIVersion, nsManager)
+
+ var cniInterface *cnicurrent.Interface
+ ips := []*cnicurrent.IPConfig{}
+ errs := []error{}
+ for _, version := range []string{"4", "6"} {
+ ip, mac, err := getContainerDetails(nsManager, podNetwork.NetNS, ifName, "-"+version)
+ if err == nil {
+ if cniInterface == nil {
+ cniInterface = &cnicurrent.Interface{
+ Name: ifName,
+ Mac: mac.String(),
+ Sandbox: podNetwork.NetNS,
+ }
+ }
+ ips = append(ips, &cnicurrent.IPConfig{
+ Version: version,
+ Interface: cnicurrent.Int(0),
+ Address: *ip,
+ })
+ } else {
+ errs = append(errs, err)
+ }
+ }
+ if cniInterface == nil || len(ips) == 0 {
+ return nil, fmt.Errorf("neither IPv4 nor IPv6 found when retrieving network status: %v", errs)
+ }
+
+ result = &cnicurrent.Result{
+ CNIVersion: netconf.CNIVersion,
+ Interfaces: []*cnicurrent.Interface{cniInterface},
+ IPs: ips,
+ }
+
+ return result, nil
+}
+
+func (network *cniNetwork) deleteFromNetwork(cacheDir string, podNetwork *PodNetwork, ifName string, runtimeConfig RuntimeConfig) error {
+ rt, err := buildCNIRuntimeConf(cacheDir, podNetwork, ifName, runtimeConfig)
if err != nil {
logrus.Errorf("Error deleting network: %v", err)
return err
@@ -538,7 +582,7 @@ func (network *cniNetwork) deleteFromNetwork(cacheDir string, podNetwork *PodNet
return nil
}
-func buildCNIRuntimeConf(cacheDir string, podNetwork *PodNetwork, ifName, ip string) (*libcni.RuntimeConf, error) {
+func buildCNIRuntimeConf(cacheDir string, podNetwork *PodNetwork, ifName string, runtimeConfig RuntimeConfig) (*libcni.RuntimeConf, error) {
logrus.Infof("Got pod network %+v", podNetwork)
rt := &libcni.RuntimeConf{
@@ -552,9 +596,11 @@ func buildCNIRuntimeConf(cacheDir string, podNetwork *PodNetwork, ifName, ip str
{"K8S_POD_NAME", podNetwork.Name},
{"K8S_POD_INFRA_CONTAINER_ID", podNetwork.ID},
},
+ CapabilityArgs: map[string]interface{}{},
}
// Add requested static IP to CNI_ARGS
+ ip := runtimeConfig.IP
if ip != "" {
if tstIP := net.ParseIP(ip); tstIP == nil {
return nil, fmt.Errorf("unable to parse IP address %q", ip)
@@ -562,13 +608,26 @@ func buildCNIRuntimeConf(cacheDir string, podNetwork *PodNetwork, ifName, ip str
rt.Args = append(rt.Args, [2]string{"IP", ip})
}
- if len(podNetwork.PortMappings) == 0 {
- return rt, nil
+ // Set PortMappings in Capabilities
+ if len(runtimeConfig.PortMappings) != 0 {
+ rt.CapabilityArgs["portMappings"] = runtimeConfig.PortMappings
+ }
+
+ // Set Bandwidth in Capabilities
+ if runtimeConfig.Bandwidth != nil {
+ rt.CapabilityArgs["bandwidth"] = map[string]uint64{
+ "ingressRate": runtimeConfig.Bandwidth.IngressRate,
+ "ingressBurst": runtimeConfig.Bandwidth.IngressBurst,
+ "egressRate": runtimeConfig.Bandwidth.EgressRate,
+ "egressBurst": runtimeConfig.Bandwidth.EgressBurst,
+ }
}
- rt.CapabilityArgs = map[string]interface{}{
- "portMappings": podNetwork.PortMappings,
+ // Set IpRanges in Capabilities
+ if len(runtimeConfig.IpRanges) > 0 {
+ rt.CapabilityArgs["ipRanges"] = runtimeConfig.IpRanges
}
+
return rt, nil
}
diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go
index d76094292..8709711e0 100644
--- a/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go
+++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go
@@ -24,12 +24,44 @@ type PortMapping struct {
HostIP string `json:"hostIP"`
}
-// NetworkConfig is additional configuration for a single CNI network.
-type NetworkConfig struct {
+// IpRange maps to the standard CNI ipRanges Capability
+// see: https://github.com/containernetworking/cni/blob/master/CONVENTIONS.md
+type IpRange struct {
+ // Subnet is the whole CIDR
+ Subnet string `json:"subnet"`
+ // RangeStart is the first available IP in subnet
+ RangeStart string `json:"rangeStart,omitempty"`
+ // RangeEnd is the last available IP in subnet
+ RangeEnd string `json:"rangeEnd,omitempty"`
+ // Gateway is the gateway of subnet
+ Gateway string `json:"gateway,omitempty"`
+}
+
+// RuntimeConfig is additional configuration for a single CNI network that
+// is pod-specific rather than general to the network.
+type RuntimeConfig struct {
// IP is a static IP to be specified in the network. Can only be used
// with the hostlocal IP allocator. If left unset, an IP will be
// dynamically allocated.
IP string
+ // PortMappings is the port mapping of the sandbox.
+ PortMappings []PortMapping
+ // Bandwidth is the bandwidth limiting of the pod
+ Bandwidth *BandwidthConfig
+ // IpRanges is the ip range gather which is used for address allocation
+ IpRanges [][]IpRange
+}
+
+// BandwidthConfig maps to the standard CNI bandwidth Capability
+// see: https://github.com/containernetworking/cni/blob/master/CONVENTIONS.md
+type BandwidthConfig struct {
+ // IngressRate is a limit for incoming traffic in bps
+ IngressRate uint64
+ IngressBurst uint64
+
+ // EgressRate is a limit for outgoing traffic in bps
+ EgressRate uint64
+ EgressBurst uint64
}
// PodNetwork configures the network of a pod sandbox.
@@ -42,8 +74,6 @@ type PodNetwork struct {
ID string
// NetNS is the network namespace path of the sandbox.
NetNS string
- // PortMappings is the port mapping of the sandbox.
- PortMappings []PortMapping
// Networks is a list of CNI network names to attach to the sandbox
// Leave this list empty to attach the default network to the sandbox
@@ -52,7 +82,7 @@ type PodNetwork struct {
// NetworkConfig is configuration specific to a single CNI network.
// It is optional, and can be omitted for some or all specified networks
// without issue.
- NetworkConfig map[string]NetworkConfig
+ RuntimeConfig map[string]RuntimeConfig
}
// CNIPlugin is the interface that needs to be implemented by a plugin
diff --git a/vendor/github.com/cri-o/ocicni/vendor.conf b/vendor/github.com/cri-o/ocicni/vendor.conf
deleted file mode 100644
index d769d5177..000000000
--- a/vendor/github.com/cri-o/ocicni/vendor.conf
+++ /dev/null
@@ -1,13 +0,0 @@
-github.com/containernetworking/cni fbb95fff8a5239a4295c991efa8a397d43118f7e
-github.com/fsnotify/fsnotify 1485a34d5d5723fea214f5710708e19a831720e4
-github.com/sirupsen/logrus 787e519fa85519b874dead61020de598e9a23944
-github.com/onsi/ginkgo eea6ad008b96acdaa524f5b409513bf062b500ad
-github.com/onsi/gomega 90e289841c1ed79b7a598a7cd9959750cb5e89e2
-golang.org/x/net 63eda1eb0650888965ead1296efd04d0b2b61128
-gopkg.in/yaml.v2 51d6538a90f86fe93ac480b35f37b2be17fef232
-golang.org/x/text e3703dcdd614d2d7488fff034c75c551ea25da95
-golang.org/x/sys f49334f85ddcf0f08d7fb6dd7363e9e6d6b777eb
-github.com/hpcloud/tail a1dbeea552b7c8df4b542c66073e393de198a800
-gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
-gopkg.in/fsnotify/fsnotify.v1 c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9
-github.com/konsorten/go-windows-terminal-sequences f55edac94c9bbba5d6182a4be46d86a2c9b5b50e
diff --git a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml
new file mode 100644
index 000000000..3938f3834
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml
@@ -0,0 +1,19 @@
+# Copyright (C) 2017 SUSE LLC. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+language: go
+go:
+ - 1.7.x
+ - 1.8.x
+ - tip
+
+os:
+ - linux
+ - osx
+
+script:
+ - go test -cover -v ./...
+
+notifications:
+ email: false
diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION
new file mode 100644
index 000000000..ee1372d33
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION
@@ -0,0 +1 @@
+0.2.2
diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go
index f20985479..c4ca3d713 100644
--- a/vendor/github.com/cyphar/filepath-securejoin/join.go
+++ b/vendor/github.com/cyphar/filepath-securejoin/join.go
@@ -12,7 +12,6 @@ package securejoin
import (
"bytes"
- "fmt"
"os"
"path/filepath"
"strings"
@@ -23,7 +22,7 @@ import (
// ErrSymlinkLoop is returned by SecureJoinVFS when too many symlinks have been
// evaluated in attempting to securely join the two given paths.
-var ErrSymlinkLoop = fmt.Errorf("SecureJoin: too many links")
+var ErrSymlinkLoop = errors.Wrap(syscall.ELOOP, "secure join")
// IsNotExist tells you if err is an error that implies that either the path
// accessed does not exist (or path components don't exist). This is
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
index c83641619..bc52e96f2 100644
--- a/vendor/github.com/davecgh/go-spew/LICENSE
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -2,7 +2,7 @@ ISC License
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
-Permission to use, copy, modify, and distribute this software for any
+Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
diff --git a/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/davecgh/go-spew/README.md
deleted file mode 100644
index 262430449..000000000
--- a/vendor/github.com/davecgh/go-spew/README.md
+++ /dev/null
@@ -1,205 +0,0 @@
-go-spew
-=======
-
-[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)]
-(https://travis-ci.org/davecgh/go-spew) [![ISC License]
-(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status]
-(https://img.shields.io/coveralls/davecgh/go-spew.svg)]
-(https://coveralls.io/r/davecgh/go-spew?branch=master)
-
-
-Go-spew implements a deep pretty printer for Go data structures to aid in
-debugging. A comprehensive suite of tests with 100% test coverage is provided
-to ensure proper functionality. See `test_coverage.txt` for the gocov coverage
-report. Go-spew is licensed under the liberal ISC license, so it may be used in
-open source or commercial projects.
-
-If you're interested in reading about how this package came to life and some
-of the challenges involved in providing a deep pretty printer, there is a blog
-post about it
-[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/).
-
-## Documentation
-
-[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)]
-(http://godoc.org/github.com/davecgh/go-spew/spew)
-
-Full `go doc` style documentation for the project can be viewed online without
-installing this package by using the excellent GoDoc site here:
-http://godoc.org/github.com/davecgh/go-spew/spew
-
-You can also view the documentation locally once the package is installed with
-the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to
-http://localhost:6060/pkg/github.com/davecgh/go-spew/spew
-
-## Installation
-
-```bash
-$ go get -u github.com/davecgh/go-spew/spew
-```
-
-## Quick Start
-
-Add this import line to the file you're working in:
-
-```Go
-import "github.com/davecgh/go-spew/spew"
-```
-
-To dump a variable with full newlines, indentation, type, and pointer
-information use Dump, Fdump, or Sdump:
-
-```Go
-spew.Dump(myVar1, myVar2, ...)
-spew.Fdump(someWriter, myVar1, myVar2, ...)
-str := spew.Sdump(myVar1, myVar2, ...)
-```
-
-Alternatively, if you would prefer to use format strings with a compacted inline
-printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most
-compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types
-and pointer addresses):
-
-```Go
-spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
-spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
-spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
-spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
-```
-
-## Debugging a Web Application Example
-
-Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production.
-
-```Go
-package main
-
-import (
- "fmt"
- "html"
- "net/http"
-
- "github.com/davecgh/go-spew/spew"
-)
-
-func handler(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "text/html")
- fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:])
- fmt.Fprintf(w, "<!--\n" + html.EscapeString(spew.Sdump(w)) + "\n-->")
-}
-
-func main() {
- http.HandleFunc("/", handler)
- http.ListenAndServe(":8080", nil)
-}
-```
-
-## Sample Dump Output
-
-```
-(main.Foo) {
- unexportedField: (*main.Bar)(0xf84002e210)({
- flag: (main.Flag) flagTwo,
- data: (uintptr) <nil>
- }),
- ExportedField: (map[interface {}]interface {}) {
- (string) "one": (bool) true
- }
-}
-([]uint8) {
- 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
- 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
- 00000020 31 32 |12|
-}
-```
-
-## Sample Formatter Output
-
-Double pointer to a uint8:
-```
- %v: <**>5
- %+v: <**>(0xf8400420d0->0xf8400420c8)5
- %#v: (**uint8)5
- %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
-```
-
-Pointer to circular struct with a uint8 field and a pointer to itself:
-```
- %v: <*>{1 <*><shown>}
- %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
- %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
- %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
-```
-
-## Configuration Options
-
-Configuration of spew is handled by fields in the ConfigState type. For
-convenience, all of the top-level functions use a global state available via the
-spew.Config global.
-
-It is also possible to create a ConfigState instance that provides methods
-equivalent to the top-level functions. This allows concurrent configuration
-options. See the ConfigState documentation for more details.
-
-```
-* Indent
- String to use for each indentation level for Dump functions.
- It is a single space by default. A popular alternative is "\t".
-
-* MaxDepth
- Maximum number of levels to descend into nested data structures.
- There is no limit by default.
-
-* DisableMethods
- Disables invocation of error and Stringer interface methods.
- Method invocation is enabled by default.
-
-* DisablePointerMethods
- Disables invocation of error and Stringer interface methods on types
- which only accept pointer receivers from non-pointer variables. This option
- relies on access to the unsafe package, so it will not have any effect when
- running in environments without access to the unsafe package such as Google
- App Engine or with the "safe" build tag specified.
- Pointer method invocation is enabled by default.
-
-* DisablePointerAddresses
- DisablePointerAddresses specifies whether to disable the printing of
- pointer addresses. This is useful when diffing data structures in tests.
-
-* DisableCapacities
- DisableCapacities specifies whether to disable the printing of capacities
- for arrays, slices, maps and channels. This is useful when diffing data
- structures in tests.
-
-* ContinueOnMethod
- Enables recursion into types after invoking error and Stringer interface
- methods. Recursion after method invocation is disabled by default.
-
-* SortKeys
- Specifies map keys should be sorted before being printed. Use
- this to have a more deterministic, diffable output. Note that
- only native types (bool, int, uint, floats, uintptr and string)
- and types which implement error or Stringer interfaces are supported,
- with other types sorted according to the reflect.Value.String() output
- which guarantees display stability. Natural map order is used by
- default.
-
-* SpewKeys
- SpewKeys specifies that, as a last resort attempt, map keys should be
- spewed to strings and sorted by those strings. This is only considered
- if SortKeys is true.
-
-```
-
-## Unsafe Package Dependency
-
-This package relies on the unsafe package to perform some of the more advanced
-features, however it also supports a "limited" mode which allows it to work in
-environments where the unsafe package is not available. By default, it will
-operate in this mode on Google App Engine and when compiled with GopherJS. The
-"safe" build tag may also be specified to force the package to build without
-using the unsafe package.
-
-## License
-
-Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
index 8a4a6589a..792994785 100644
--- a/vendor/github.com/davecgh/go-spew/spew/bypass.go
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -16,7 +16,9 @@
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
-// +build !js,!appengine,!safe,!disableunsafe
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
package spew
@@ -34,80 +36,49 @@ const (
ptrSize = unsafe.Sizeof((*byte)(nil))
)
+type flag uintptr
+
var (
- // offsetPtr, offsetScalar, and offsetFlag are the offsets for the
- // internal reflect.Value fields. These values are valid before golang
- // commit ecccf07e7f9d which changed the format. The are also valid
- // after commit 82f48826c6c7 which changed the format again to mirror
- // the original format. Code in the init function updates these offsets
- // as necessary.
- offsetPtr = uintptr(ptrSize)
- offsetScalar = uintptr(0)
- offsetFlag = uintptr(ptrSize * 2)
-
- // flagKindWidth and flagKindShift indicate various bits that the
- // reflect package uses internally to track kind information.
- //
- // flagRO indicates whether or not the value field of a reflect.Value is
- // read-only.
- //
- // flagIndir indicates whether the value field of a reflect.Value is
- // the actual data or a pointer to the data.
- //
- // These values are valid before golang commit 90a7c3c86944 which
- // changed their positions. Code in the init function updates these
- // flags as necessary.
- flagKindWidth = uintptr(5)
- flagKindShift = uintptr(flagKindWidth - 1)
- flagRO = uintptr(1 << 0)
- flagIndir = uintptr(1 << 1)
+ // flagRO indicates whether the value field of a reflect.Value
+ // is read-only.
+ flagRO flag
+
+ // flagAddr indicates whether the address of the reflect.Value's
+ // value may be taken.
+ flagAddr flag
)
-func init() {
- // Older versions of reflect.Value stored small integers directly in the
- // ptr field (which is named val in the older versions). Versions
- // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
- // scalar for this purpose which unfortunately came before the flag
- // field, so the offset of the flag field is different for those
- // versions.
- //
- // This code constructs a new reflect.Value from a known small integer
- // and checks if the size of the reflect.Value struct indicates it has
- // the scalar field. When it does, the offsets are updated accordingly.
- vv := reflect.ValueOf(0xf00)
- if unsafe.Sizeof(vv) == (ptrSize * 4) {
- offsetScalar = ptrSize * 2
- offsetFlag = ptrSize * 3
- }
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
- // Commit 90a7c3c86944 changed the flag positions such that the low
- // order bits are the kind. This code extracts the kind from the flags
- // field and ensures it's the correct type. When it's not, the flag
- // order has been changed to the newer format, so the flags are updated
- // accordingly.
- upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
- upfv := *(*uintptr)(upf)
- flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
- if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
- flagKindShift = 0
- flagRO = 1 << 5
- flagIndir = 1 << 6
-
- // Commit adf9b30e5594 modified the flags to separate the
- // flagRO flag into two bits which specifies whether or not the
- // field is embedded. This causes flagIndir to move over a bit
- // and means that flagRO is the combination of either of the
- // original flagRO bit and the new bit.
- //
- // This code detects the change by extracting what used to be
- // the indirect bit to ensure it's set. When it's not, the flag
- // order has been changed to the newer format, so the flags are
- // updated accordingly.
- if upfv&flagIndir == 0 {
- flagRO = 3 << 5
- flagIndir = 1 << 7
- }
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+ ro, addr flag
+}{{
+ // From Go 1.4 to 1.5
+ ro: 1 << 5,
+ addr: 1 << 7,
+}, {
+ // Up to Go tip.
+ ro: 1<<5 | 1<<6,
+ addr: 1 << 8,
+}}
+
+var flagValOffset = func() uintptr {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
}
+ return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+ return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
@@ -119,34 +90,56 @@ func init() {
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
-func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
- indirects := 1
- vt := v.Type()
- upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
- rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
- if rvf&flagIndir != 0 {
- vt = reflect.PtrTo(v.Type())
- indirects++
- } else if offsetScalar != 0 {
- // The value is in the scalar field when it's not one of the
- // reference types.
- switch vt.Kind() {
- case reflect.Uintptr:
- case reflect.Chan:
- case reflect.Func:
- case reflect.Map:
- case reflect.Ptr:
- case reflect.UnsafePointer:
- default:
- upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
- offsetScalar)
- }
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+ return v
}
+ flagFieldPtr := flagField(&v)
+ *flagFieldPtr &^= flagRO
+ *flagFieldPtr |= flagAddr
+ return v
+}
- pv := reflect.NewAt(vt, upv)
- rv = pv
- for i := 0; i < indirects; i++ {
- rv = rv.Elem()
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+ panic("reflect.Value flag field has changed kind")
+ }
+ type t0 int
+ var t struct {
+ A t0
+ // t0 will have flagEmbedRO set.
+ t0
+ // a will have flagStickyRO set
+ a t0
+ }
+ vA := reflect.ValueOf(t).FieldByName("A")
+ va := reflect.ValueOf(t).FieldByName("a")
+ vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+ // Infer flagRO from the difference between the flags
+ // for the (otherwise identical) fields in t.
+ flagPublic := *flagField(&vA)
+ flagWithRO := *flagField(&va) | *flagField(&vt0)
+ flagRO = flagPublic ^ flagWithRO
+
+ // Infer flagAddr from the difference between a value
+ // taken from a pointer and not.
+ vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+ flagNoPtr := *flagField(&vA)
+ flagPtr := *flagField(&vPtrA)
+ flagAddr = flagNoPtr ^ flagPtr
+
+ // Check that the inferred flags tally with one of the known versions.
+ for _, f := range okFlags {
+ if flagRO == f.ro && flagAddr == f.addr {
+ return
+ }
}
- return rv
+ panic("reflect.Value read-only flag has changed semantics")
}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
index 1fe3cf3d5..205c28d68 100644
--- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -16,7 +16,7 @@
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
-// +build js appengine safe disableunsafe
+// +build js appengine safe disableunsafe !go1.4
package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
index 7c519ff47..1be8ce945 100644
--- a/vendor/github.com/davecgh/go-spew/spew/common.go
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) {
w.Write(closeParenBytes)
}
-// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
// prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) {
// Null pointer.
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
index df1d582a7..f78d89fc1 100644
--- a/vendor/github.com/davecgh/go-spew/spew/dump.go
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -35,16 +35,16 @@ var (
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
- cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
+ cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
- cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
+ cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
- cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
+ cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
)
// dumpState contains information about the state of a dump operation.
@@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
- case nilFound == true:
+ case nilFound:
d.w.Write(nilAngleBytes)
- case cycleFound == true:
+ case cycleFound:
d.w.Write(circularBytes)
default:
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
index c49875bac..b04edb7d7 100644
--- a/vendor/github.com/davecgh/go-spew/spew/format.go
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) {
// Display dereferenced value.
switch {
- case nilFound == true:
+ case nilFound:
f.fs.Write(nilAngleBytes)
- case cycleFound == true:
+ case cycleFound:
f.fs.Write(circularShortBytes)
default:
diff --git a/vendor/github.com/docker/distribution/.gitignore b/vendor/github.com/docker/distribution/.gitignore
new file mode 100644
index 000000000..4cf7888e9
--- /dev/null
+++ b/vendor/github.com/docker/distribution/.gitignore
@@ -0,0 +1,38 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
+# never checkin from the bin file (for now)
+bin/*
+
+# Test key files
+*.pem
+
+# Cover profiles
+*.out
+
+# Editor/IDE specific files.
+*.sublime-project
+*.sublime-workspace
+.idea/*
diff --git a/vendor/github.com/docker/distribution/.gometalinter.json b/vendor/github.com/docker/distribution/.gometalinter.json
new file mode 100644
index 000000000..9df5b14bc
--- /dev/null
+++ b/vendor/github.com/docker/distribution/.gometalinter.json
@@ -0,0 +1,16 @@
+{
+ "Vendor": true,
+ "Deadline": "2m",
+ "Sort": ["linter", "severity", "path", "line"],
+ "EnableGC": true,
+ "Enable": [
+ "structcheck",
+ "staticcheck",
+ "unconvert",
+
+ "gofmt",
+ "goimports",
+ "golint",
+ "vet"
+ ]
+}
diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap
new file mode 100644
index 000000000..0f48321d4
--- /dev/null
+++ b/vendor/github.com/docker/distribution/.mailmap
@@ -0,0 +1,32 @@
+Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@users.noreply.github.com>
+Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@gmail.com>
+Olivier Gambier <olivier@docker.com> Olivier Gambier <dmp42@users.noreply.github.com>
+Brian Bland <brian.bland@docker.com> Brian Bland <r4nd0m1n4t0r@gmail.com>
+Brian Bland <brian.bland@docker.com> Brian Bland <brian.t.bland@gmail.com>
+Josh Hawn <josh.hawn@docker.com> Josh Hawn <jlhawn@berkeley.edu>
+Richard Scothern <richard.scothern@docker.com> Richard <richard.scothern@gmail.com>
+Richard Scothern <richard.scothern@docker.com> Richard Scothern <richard.scothern@gmail.com>
+Andrew Meredith <andymeredith@gmail.com> Andrew Meredith <kendru@users.noreply.github.com>
+harche <p.harshal@gmail.com> harche <harche@users.noreply.github.com>
+Jessie Frazelle <jessie@docker.com> <jfrazelle@users.noreply.github.com>
+Sharif Nassar <sharif@mrwacky.com> Sharif Nassar <mrwacky42@users.noreply.github.com>
+Sven Dowideit <SvenDowideit@home.org.au> Sven Dowideit <SvenDowideit@users.noreply.github.com>
+Vincent Giersch <vincent.giersch@ovh.net> Vincent Giersch <vincent@giersch.fr>
+davidli <wenquan.li@hp.com> davidli <wenquan.li@hpe.com>
+Omer Cohen <git@omer.io> Omer Cohen <git@omerc.net>
+Eric Yang <windfarer@gmail.com> Eric Yang <Windfarer@users.noreply.github.com>
+Nikita Tarasov <nikita@mygento.ru> Nikita <luckyraul@users.noreply.github.com>
+Yu Wang <yuwa@microsoft.com> yuwaMSFT2 <yuwa@microsoft.com>
+Yu Wang <yuwa@microsoft.com> Yu Wang (UC) <yuwa@microsoft.com>
+Olivier Gambier <olivier@docker.com> dmp <dmp@loaner.local>
+Olivier Gambier <olivier@docker.com> Olivier <o+github@gambier.email>
+Olivier Gambier <olivier@docker.com> Olivier <dmp42@users.noreply.github.com>
+Elsan Li 李楠 <elsanli@tencent.com> elsanli(李楠) <elsanli@tencent.com>
+Rui Cao <ruicao@alauda.io> ruicao <ruicao@alauda.io>
+Gwendolynne Barr <gwendolynne.barr@docker.com> gbarr01 <gwendolynne.barr@docker.com>
+Haibing Zhou 周海兵 <zhouhaibing089@gmail.com> zhouhaibing089 <zhouhaibing089@gmail.com>
+Feng Honglin <tifayuki@gmail.com> tifayuki <tifayuki@gmail.com>
+Helen Xie <xieyulin821@harmonycloud.cn> Helen-xie <xieyulin821@harmonycloud.cn>
+Mike Brown <brownwm@us.ibm.com> Mike Brown <mikebrow@users.noreply.github.com>
+Manish Tomar <manish.tomar@docker.com> Manish Tomar <manishtomar@users.noreply.github.com>
+Sakeven Jiang <jc5930@sina.cn> sakeven <jc5930@sina.cn>
diff --git a/vendor/github.com/docker/distribution/.travis.yml b/vendor/github.com/docker/distribution/.travis.yml
new file mode 100644
index 000000000..44ced6045
--- /dev/null
+++ b/vendor/github.com/docker/distribution/.travis.yml
@@ -0,0 +1,51 @@
+dist: trusty
+sudo: required
+# setup travis so that we can run containers for integration tests
+services:
+ - docker
+
+language: go
+
+go:
+ - "1.11.x"
+
+go_import_path: github.com/docker/distribution
+
+addons:
+ apt:
+ packages:
+ - python-minimal
+
+
+env:
+ - TRAVIS_GOOS=linux DOCKER_BUILDTAGS="include_oss include_gcs" TRAVIS_CGO_ENABLED=1
+
+before_install:
+ - uname -r
+ - sudo apt-get -q update
+
+install:
+ - go get -u github.com/vbatts/git-validation
+ # TODO: Add enforcement of license
+ # - go get -u github.com/kunalkushwaha/ltag
+ - cd $TRAVIS_BUILD_DIR
+
+script:
+ - export GOOS=$TRAVIS_GOOS
+ - export CGO_ENABLED=$TRAVIS_CGO_ENABLED
+ - DCO_VERBOSITY=-q script/validate/dco
+ - GOOS=linux script/setup/install-dev-tools
+ - script/validate/vendor
+ - go build -i .
+ - make check
+ - make build
+ - make binaries
+ # Currently takes too long
+ #- if [ "$GOOS" = "linux" ]; then make test-race ; fi
+ - if [ "$GOOS" = "linux" ]; then make coverage ; fi
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash) -F linux
+
+before_deploy:
+ # Run tests with storage driver configurations
diff --git a/vendor/github.com/docker/distribution/BUILDING.md b/vendor/github.com/docker/distribution/BUILDING.md
new file mode 100644
index 000000000..2981d016b
--- /dev/null
+++ b/vendor/github.com/docker/distribution/BUILDING.md
@@ -0,0 +1,117 @@
+
+# Building the registry source
+
+## Use-case
+
+This is useful if you intend to actively work on the registry.
+
+### Alternatives
+
+Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/).
+
+People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`.
+
+OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md).
+
+### Gotchas
+
+You are expected to know your way around with go & git.
+
+If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you.
+
+## Build the development environment
+
+The first prerequisite of properly building distribution targets is to have a Go
+development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html)
+for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the
+environment.
+
+If a Go development environment is setup, one can use `go get` to install the
+`registry` command from the current latest:
+
+ go get github.com/docker/distribution/cmd/registry
+
+The above will install the source repository into the `GOPATH`.
+
+Now create the directory for the registry data (this might require you to set permissions properly)
+
+ mkdir -p /var/lib/registry
+
+... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location.
+
+The `registry`
+binary can then be run with the following:
+
+ $ $GOPATH/bin/registry --version
+ $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown
+
+> __NOTE:__ While you do not need to use `go get` to checkout the distribution
+> project, for these build instructions to work, the project must be checked
+> out in the correct location in the `GOPATH`. This should almost always be
+> `$GOPATH/src/github.com/docker/distribution`.
+
+The registry can be run with the default config using the following
+incantation:
+
+ $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml
+ INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
+ INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
+ INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
+ INFO[0000] debug server listening localhost:5001
+
+If it is working, one should see the above log messages.
+
+### Repeatable Builds
+
+For the full development experience, one should `cd` into
+`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go`
+commands, such as `go test`, should work per package (please see
+[Developing](#developing) if they don't work).
+
+A `Makefile` has been provided as a convenience to support repeatable builds.
+Please install the following into `GOPATH` for it to work:
+
+ go get github.com/golang/lint/golint
+
+Once these commands are available in the `GOPATH`, run `make` to get a full
+build:
+
+ $ make
+ + clean
+ + fmt
+ + vet
+ + lint
+ + build
+ github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar
+ github.com/sirupsen/logrus
+ github.com/docker/libtrust
+ ...
+ github.com/yvasiyarov/gorelic
+ github.com/docker/distribution/registry/handlers
+ github.com/docker/distribution/cmd/registry
+ + test
+ ...
+ ok github.com/docker/distribution/digest 7.875s
+ ok github.com/docker/distribution/manifest 0.028s
+ ok github.com/docker/distribution/notifications 17.322s
+ ? github.com/docker/distribution/registry [no test files]
+ ok github.com/docker/distribution/registry/api/v2 0.101s
+ ? github.com/docker/distribution/registry/auth [no test files]
+ ok github.com/docker/distribution/registry/auth/silly 0.011s
+ ...
+ + /Users/sday/go/src/github.com/docker/distribution/bin/registry
+ + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template
+ + binaries
+
+The above provides a repeatable build using the contents of the vendor
+directory. This includes formatting, vetting, linting, building,
+testing and generating tagged binaries. We can verify this worked by running
+the registry binary generated in the "./bin" directory:
+
+ $ ./bin/registry --version
+ ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m
+
+### Optional build tags
+
+Optional [build tags](http://golang.org/pkg/go/build/) can be provided using
+the environment variable `DOCKER_BUILDTAGS`.
diff --git a/vendor/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md
new file mode 100644
index 000000000..4c067d9e7
--- /dev/null
+++ b/vendor/github.com/docker/distribution/CONTRIBUTING.md
@@ -0,0 +1,148 @@
+# Contributing to the registry
+
+## Before reporting an issue...
+
+### If your problem is with...
+
+ - automated builds
+ - your account on the [Docker Hub](https://hub.docker.com/)
+ - any other [Docker Hub](https://hub.docker.com/) issue
+
+Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com)
+
+### If you...
+
+ - need help setting up your registry
+ - can't figure out something
+ - are not sure what's going on or what your problem is
+
+Then please do not open an issue here yet - you should first try one of the following support forums:
+
+ - irc: #docker-distribution on freenode
+ - mailing-list: <distribution@dockerproject.org> or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
+
+### Reporting security issues
+
+The Docker maintainers take security seriously. If you discover a security
+issue, please bring it to their attention right away!
+
+Please **DO NOT** file a public issue, instead send your report privately to
+[security@docker.com](mailto:security@docker.com).
+
+## Reporting an issue properly
+
+By following these simple rules you will get better and faster feedback on your issue.
+
+ - search the bugtracker for an already reported issue
+
+### If you found an issue that describes your problem:
+
+ - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
+ - please refrain from adding "same thing here" or "+1" comments
+ - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button
+ - comment if you have some new, technical and relevant information to add to the case
+ - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue.
+
+### If you have not found an existing issue that describes your problem:
+
+ 1. create a new issue, with a succinct title that describes your issue:
+ - bad title: "It doesn't work with my docker"
+ - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST"
+ 2. copy the output of:
+ - `docker version`
+ - `docker info`
+ - `docker exec <registry-container> registry --version`
+ 3. copy the command line you used to launch your Registry
+ 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments)
+ 5. reproduce your problem and get your docker daemon logs showing the error
+ 6. if relevant, copy your registry logs that show the error
+ 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used)
+ 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry
+
+## Contributing a patch for a known bug, or a small correction
+
+You should follow the basic GitHub workflow:
+
+ 1. fork
+ 2. commit a change
+ 3. make sure the tests pass
+ 4. PR
+
+Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple:
+
+ - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com`
+ - sign your commits using `-s`: `git commit -s -m "My commit"`
+
+Some simple rules to ensure quick merge:
+
+ - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`)
+ - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once
+ - if you need to amend your PR following comments, please squash instead of adding more commits
+
+## Contributing new features
+
+You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve.
+
+If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning.
+If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work.
+
+Then you should submit your implementation, clearly linking to the issue (and possible proposal).
+
+Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged.
+
+It's mandatory to:
+
+ - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines)
+ - address maintainers' comments and modify your submission accordingly
+ - write tests for any new code
+
+Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry.
+
+Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493)
+
+## Coding Style
+
+Unless explicitly stated, we follow all coding guidelines from the Go
+community. While some of these standards may seem arbitrary, they somehow seem
+to result in a solid, consistent codebase.
+
+It is possible that the code base does not currently comply with these
+guidelines. We are not looking for a massive PR that fixes this, since that
+goes against the spirit of the guidelines. All new contributions should make a
+best effort to clean up and make the code base better than they left it.
+Obviously, apply your best judgement. Remember, the goal here is to make the
+code base easier for humans to navigate and understand. Always keep that in
+mind when nudging others to comply.
+
+The rules:
+
+1. All code should be formatted with `gofmt -s`.
+2. All code should pass the default levels of
+ [`golint`](https://github.com/golang/lint).
+3. All code should follow the guidelines covered in [Effective
+ Go](http://golang.org/doc/effective_go.html) and [Go Code Review
+ Comments](https://github.com/golang/go/wiki/CodeReviewComments).
+4. Comment the code. Tell us the why, the history and the context.
+5. Document _all_ declarations and methods, even private ones. Declare
+ expectations, caveats and anything else that may be important. If a type
+ gets exported, having the comments already there will ensure it's ready.
+6. Variable name length should be proportional to its context and no longer.
+ `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`.
+ In practice, short methods will have short variable names and globals will
+ have longer names.
+7. No underscores in package names. If you need a compound name, step back,
+ and re-examine why you need a compound name. If you still think you need a
+ compound name, lose the underscore.
+8. No utils or helpers packages. If a function is not general enough to
+ warrant its own package, it has not been written generally enough to be a
+ part of a util package. Just leave it unexported and well-documented.
+9. All tests should run with `go test` and outside tooling should not be
+ required. No, we don't need another unit testing framework. Assertion
+ packages are acceptable if they provide _real_ incremental value.
+10. Even though we call these "rules" above, they are actually just
+ guidelines. Since you've read all the rules, you now know that.
+
+If you are having trouble getting into the mood of idiomatic Go, we recommend
+reading through [Effective Go](http://golang.org/doc/effective_go.html). The
+[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the
+kool-aid is a lot easier than going thirsty.
diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile
new file mode 100644
index 000000000..9537817ca
--- /dev/null
+++ b/vendor/github.com/docker/distribution/Dockerfile
@@ -0,0 +1,23 @@
+FROM golang:1.11-alpine AS build
+
+ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
+ENV BUILDTAGS include_oss include_gcs
+
+ARG GOOS=linux
+ARG GOARCH=amd64
+ARG GOARM=6
+
+RUN set -ex \
+ && apk add --no-cache make git file
+
+WORKDIR $DISTRIBUTION_DIR
+COPY . $DISTRIBUTION_DIR
+RUN CGO_ENABLED=0 make PREFIX=/go clean binaries && file ./bin/registry | grep "statically linked"
+
+FROM alpine
+COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml
+COPY --from=build /go/src/github.com/docker/distribution/bin/registry /bin/registry
+VOLUME ["/var/lib/registry"]
+EXPOSE 5000
+ENTRYPOINT ["registry"]
+CMD ["serve", "/etc/docker/registry/config.yml"]
diff --git a/vendor/github.com/docker/distribution/MAINTAINERS b/vendor/github.com/docker/distribution/MAINTAINERS
new file mode 100644
index 000000000..3183620c5
--- /dev/null
+++ b/vendor/github.com/docker/distribution/MAINTAINERS
@@ -0,0 +1,243 @@
+# Distribution maintainers file
+#
+# This file describes who runs the docker/distribution project and how.
+# This is a living document - if you see something out of date or missing, speak up!
+#
+# It is structured to be consumable by both humans and programs.
+# To extract its contents programmatically, use any TOML-compliant parser.
+#
+
+[Rules]
+
+ [Rules.maintainers]
+
+ title = "What is a maintainer?"
+
+ text = """
+There are different types of maintainers, with different responsibilities, but
+all maintainers have 3 things in common:
+
+1) They share responsibility in the project's success.
+2) They have made a long-term, recurring time investment to improve the project.
+3) They spend that time doing whatever needs to be done, not necessarily what
+is the most interesting or fun.
+
+Maintainers are often under-appreciated, because their work is harder to appreciate.
+It's easy to appreciate a really cool and technically advanced feature. It's harder
+to appreciate the absence of bugs, the slow but steady improvement in stability,
+or the reliability of a release process. But those things distinguish a good
+project from a great one.
+"""
+
+ [Rules.reviewer]
+
+ title = "What is a reviewer?"
+
+ text = """
+A reviewer is a core role within the project.
+They share in reviewing issues and pull requests and their LGTM count towards the
+required LGTM count to merge a code change into the project.
+
+Reviewers are part of the organization but do not have write access.
+Becoming a reviewer is a core aspect in the journey to becoming a maintainer.
+"""
+
+ [Rules.adding-maintainers]
+
+ title = "How are maintainers added?"
+
+ text = """
+Maintainers are first and foremost contributors that have shown they are
+committed to the long term success of a project. Contributors wanting to become
+maintainers are expected to be deeply involved in contributing code, pull
+request review, and triage of issues in the project for more than three months.
+
+Just contributing does not make you a maintainer, it is about building trust
+with the current maintainers of the project and being a person that they can
+depend on and trust to make decisions in the best interest of the project.
+
+Periodically, the existing maintainers curate a list of contributors that have
+shown regular activity on the project over the prior months. From this list,
+maintainer candidates are selected and proposed on the maintainers mailing list.
+
+After a candidate has been announced on the maintainers mailing list, the
+existing maintainers are given five business days to discuss the candidate,
+raise objections and cast their vote. Candidates must be approved by at least 66% of the current maintainers by adding their vote on the mailing
+list. Only maintainers of the repository that the candidate is proposed for are
+allowed to vote.
+
+If a candidate is approved, a maintainer will contact the candidate to invite
+the candidate to open a pull request that adds the contributor to the
+MAINTAINERS file. The candidate becomes a maintainer once the pull request is
+merged.
+"""
+
+ [Rules.stepping-down-policy]
+
+ title = "Stepping down policy"
+
+ text = """
+Life priorities, interests, and passions can change. If you're a maintainer but
+feel you must remove yourself from the list, inform other maintainers that you
+intend to step down, and if possible, help find someone to pick up your work.
+At the very least, ensure your work can be continued where you left off.
+
+After you've informed other maintainers, create a pull request to remove
+yourself from the MAINTAINERS file.
+"""
+
+ [Rules.inactive-maintainers]
+
+ title = "Removal of inactive maintainers"
+
+ text = """
+Similar to the procedure for adding new maintainers, existing maintainers can
+be removed from the list if they do not show significant activity on the
+project. Periodically, the maintainers review the list of maintainers and their
+activity over the last three months.
+
+If a maintainer has shown insufficient activity over this period, a neutral
+person will contact the maintainer to ask if they want to continue being
+a maintainer. If the maintainer decides to step down as a maintainer, they
+open a pull request to be removed from the MAINTAINERS file.
+
+If the maintainer wants to remain a maintainer, but is unable to perform the
+required duties they can be removed with a vote of at least 66% of
+the current maintainers. An e-mail is sent to the
+mailing list, inviting maintainers of the project to vote. The voting period is
+five business days. Issues related to a maintainer's performance should be
+discussed with them among the other maintainers so that they are not surprised
+by a pull request removing them.
+"""
+
+ [Rules.decisions]
+
+ title = "How are decisions made?"
+
+ text = """
+Short answer: EVERYTHING IS A PULL REQUEST.
+
+distribution is an open-source project with an open design philosophy. This means
+that the repository is the source of truth for EVERY aspect of the project,
+including its philosophy, design, road map, and APIs. *If it's part of the
+project, it's in the repo. If it's in the repo, it's part of the project.*
+
+As a result, all decisions can be expressed as changes to the repository. An
+implementation change is a change to the source code. An API change is a change
+to the API specification. A philosophy change is a change to the philosophy
+manifesto, and so on.
+
+All decisions affecting distribution, big and small, follow the same 3 steps:
+
+* Step 1: Open a pull request. Anyone can do this.
+
+* Step 2: Discuss the pull request. Anyone can do this.
+
+* Step 3: Merge or refuse the pull request. Who does this depends on the nature
+of the pull request and which areas of the project it affects.
+"""
+
+ [Rules.DCO]
+
+ title = "Helping contributors with the DCO"
+
+ text = """
+The [DCO or `Sign your work`](
+https://github.com/moby/moby/blob/master/CONTRIBUTING.md#sign-your-work)
+requirement is not intended as a roadblock or speed bump.
+
+Some distribution contributors are not as familiar with `git`, or have used a web
+based editor, and thus asking them to `git commit --amend -s` is not the best
+way forward.
+
+In this case, maintainers can update the commits based on clause (c) of the DCO.
+The most trivial way for a contributor to allow the maintainer to do this, is to
+add a DCO signature in a pull requests's comment, or a maintainer can simply
+note that the change is sufficiently trivial that it does not substantially
+change the existing contribution - i.e., a spelling change.
+
+When you add someone's DCO, please also add your own to keep a log.
+"""
+
+ [Rules."no direct push"]
+
+ title = "I'm a maintainer. Should I make pull requests too?"
+
+ text = """
+Yes. Nobody should ever push to master directly. All changes should be
+made through a pull request.
+"""
+
+ [Rules.tsc]
+
+ title = "Conflict Resolution and technical disputes"
+
+ text = """
+distribution defers to the [Technical Steering Committee](https://github.com/moby/tsc) for escalations and resolution on disputes for technical matters."
+ """
+
+ [Rules.meta]
+
+ title = "How is this process changed?"
+
+ text = "Just like everything else: by making a pull request :)"
+
+# Current project organization
+[Org]
+
+ [Org.Maintainers]
+ people = [
+ "dmcgowan",
+ "dmp42",
+ "stevvooe",
+ ]
+ [Org.Reviewers]
+ people = [
+ "manishtomar",
+ "caervs",
+ "davidswu",
+ "RobbKistler"
+ ]
+
+[people]
+
+# A reference list of all people associated with the project.
+# All other sections should refer to people by their canonical key
+# in the people section.
+
+ # ADD YOURSELF HERE IN ALPHABETICAL ORDER
+
+ [people.caervs]
+ Name = "Ryan Abrams"
+ Email = "rdabrams@gmail.com"
+ GitHub = "caervs"
+
+ [people.davidswu]
+ Name = "David Wu"
+ Email = "dwu7401@gmail.com"
+ GitHub = "davidswu"
+
+ [people.dmcgowan]
+ Name = "Derek McGowan"
+ Email = "derek@mcgstyle.net"
+ GitHub = "dmcgowan"
+
+ [people.dmp42]
+ Name = "Olivier Gambier"
+ Email = "olivier@docker.com"
+ GitHub = "dmp42"
+
+ [people.manishtomar]
+ Name = "Manish Tomar"
+ Email = "manish.tomar@docker.com"
+ GitHub = "manishtomar"
+
+ [people.RobbKistler]
+ Name = "Robb Kistler"
+ Email = "robb.kistler@docker.com"
+ GitHub = "RobbKistler"
+
+ [people.stevvooe]
+ Name = "Stephen Day"
+ Email = "stephen.day@docker.com"
+ GitHub = "stevvooe"
diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile
new file mode 100644
index 000000000..4635c6eca
--- /dev/null
+++ b/vendor/github.com/docker/distribution/Makefile
@@ -0,0 +1,102 @@
+# Root directory of the project (absolute path).
+ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
+
+# Used to populate version variable in main package.
+VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
+REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi)
+
+
+PKG=github.com/docker/distribution
+
+# Project packages.
+PACKAGES=$(shell go list -tags "${BUILDTAGS}" ./... | grep -v /vendor/)
+INTEGRATION_PACKAGE=${PKG}
+COVERAGE_PACKAGES=$(filter-out ${PKG}/registry/storage/driver/%,${PACKAGES})
+
+
+# Project binaries.
+COMMANDS=registry digest registry-api-descriptor-template
+
+# Allow turning off function inlining and variable registerization
+ifeq (${DISABLE_OPTIMIZATION},true)
+ GO_GCFLAGS=-gcflags "-N -l"
+ VERSION:="$(VERSION)-noopt"
+endif
+
+WHALE = "+"
+
+# Go files
+#
+TESTFLAGS_RACE=
+GOFILES=$(shell find . -type f -name '*.go')
+GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",)
+GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)'
+
+BINARIES=$(addprefix bin/,$(COMMANDS))
+
+# Flags passed to `go test`
+TESTFLAGS ?= -v $(TESTFLAGS_RACE)
+TESTFLAGS_PARALLEL ?= 8
+
+.PHONY: all build binaries check clean test test-race test-full integration coverage
+.DEFAULT: all
+
+all: binaries
+
+# This only needs to be generated by hand when cutting full releases.
+version/version.go:
+ @echo "$(WHALE) $@"
+ ./version/version.sh > $@
+
+check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck")
+ @echo "$(WHALE) $@"
+ gometalinter --config .gometalinter.json ./...
+
+test: ## run tests, except integration test with test.short
+ @echo "$(WHALE) $@"
+ @go test ${GO_TAGS} -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES})
+
+test-race: ## run tests, except integration test with test.short and race
+ @echo "$(WHALE) $@"
+ @go test ${GO_TAGS} -race -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES})
+
+test-full: ## run tests, except integration tests
+ @echo "$(WHALE) $@"
+ @go test ${GO_TAGS} ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES})
+
+integration: ## run integration tests
+ @echo "$(WHALE) $@"
+ @go test ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} ${INTEGRATION_PACKAGE}
+
+coverage: ## generate coverprofiles from the unit tests
+ @echo "$(WHALE) $@"
+ @rm -f coverage.txt
+ @go test ${GO_TAGS} -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}) 2> /dev/null
+ @( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}); do \
+ go test ${GO_TAGS} ${TESTFLAGS} \
+ -cover \
+ -coverprofile=profile.out \
+ -covermode=atomic $$pkg || exit; \
+ if [ -f profile.out ]; then \
+ cat profile.out >> coverage.txt; \
+ rm profile.out; \
+ fi; \
+ done )
+
+FORCE:
+
+# Build a binary from a cmd.
+bin/%: cmd/% FORCE
+ @echo "$(WHALE) $@${BINARY_SUFFIX}"
+ @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ./$<
+
+binaries: $(BINARIES) ## build binaries
+ @echo "$(WHALE) $@"
+
+build:
+ @echo "$(WHALE) $@"
+ @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${GO_LDFLAGS} ${GO_TAGS} $(PACKAGES)
+
+clean: ## clean up binaries
+ @echo "$(WHALE) $@"
+ @rm -f $(BINARIES)
diff --git a/vendor/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md
new file mode 100644
index 000000000..701127afe
--- /dev/null
+++ b/vendor/github.com/docker/distribution/ROADMAP.md
@@ -0,0 +1,267 @@
+# Roadmap
+
+The Distribution Project consists of several components, some of which are
+still being defined. This document defines the high-level goals of the
+project, identifies the current components, and defines the release-
+relationship to the Docker Platform.
+
+* [Distribution Goals](#distribution-goals)
+* [Distribution Components](#distribution-components)
+* [Project Planning](#project-planning): release-relationship to the Docker Platform.
+
+This road map is a living document, providing an overview of the goals and
+considerations made in respect of the future of the project.
+
+## Distribution Goals
+
+- Replace the existing [docker registry](github.com/docker/docker-registry)
+ implementation as the primary implementation.
+- Replace the existing push and pull code in the docker engine with the
+ distribution package.
+- Define a strong data model for distributing docker images
+- Provide a flexible distribution tool kit for use in the docker platform
+- Unlock new distribution models
+
+## Distribution Components
+
+Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming
+features and bugfixes for a component will be added to the relevant milestone. If a feature or
+bugfix is not part of a milestone, it is currently unscheduled for
+implementation.
+
+* [Registry](#registry)
+* [Distribution Package](#distribution-package)
+
+***
+
+### Registry
+
+The new Docker registry is the main portion of the distribution repository.
+Registry 2.0 is the first release of the next-generation registry. This was
+primarily focused on implementing the [new registry
+API](https://github.com/docker/distribution/blob/master/docs/spec/api.md),
+with a focus on security and performance.
+
+Following from the Distribution project goals above, we have a set of goals
+for registry v2 that we would like to follow in the design. New features
+should be compared against these goals.
+
+#### Data Storage and Distribution First
+
+The registry's first goal is to provide a reliable, consistent storage
+location for Docker images. The registry should only provide the minimal
+amount of indexing required to fetch image data and no more.
+
+This means we should be selective in new features and API additions, including
+those that may require expensive, ever growing indexes. Requests should be
+servable in "constant time".
+
+#### Content Addressability
+
+All data objects used in the registry API should be content addressable.
+Content identifiers should be secure and verifiable. This provides a secure,
+reliable base from which to build more advanced content distribution systems.
+
+#### Content Agnostic
+
+In the past, changes to the image format would require large changes in Docker
+and the Registry. By decoupling the distribution and image format, we can
+allow the formats to progress without having to coordinate between the two.
+This means that we should be focused on decoupling Docker from the registry
+just as much as decoupling the registry from Docker. Such an approach will
+allow us to unlock new distribution models that haven't been possible before.
+
+We can take this further by saying that the new registry should be content
+agnostic. The registry provides a model of names, tags, manifests and content
+addresses and that model can be used to work with content.
+
+#### Simplicity
+
+The new registry should be closer to a microservice component than its
+predecessor. This means it should have a narrower API and a low number of
+service dependencies. It should be easy to deploy.
+
+This means that other solutions should be explored before changing the API or
+adding extra dependencies. If functionality is required, can it be added as an
+extension or companion service.
+
+#### Extensibility
+
+The registry should provide extension points to add functionality. By keeping
+the scope narrow, but providing the ability to add functionality.
+
+Features like search, indexing, synchronization and registry explorers fall
+into this category. No such feature should be added unless we've found it
+impossible to do through an extension.
+
+#### Active Feature Discussions
+
+The following are feature discussions that are currently active.
+
+If you don't see your favorite, unimplemented feature, feel free to contact us
+via IRC or the mailing list and we can talk about adding it. The goal here is
+to make sure that new features go through a rigid design process before
+landing in the registry.
+
+##### Proxying to other Registries
+
+A _pull-through caching_ mode exists for the registry, but is restricted from
+within the docker client to only mirror the official Docker Hub. This functionality
+can be expanded when image provenance has been specified and implemented in the
+distribution project.
+
+##### Metadata storage
+
+Metadata for the registry is currently stored with the manifest and layer data on
+the storage backend. While this is a big win for simplicity and reliably maintaining
+state, it comes with the cost of consistency and high latency. The mutable registry
+metadata operations should be abstracted behind an API which will allow ACID compliant
+storage systems to handle metadata.
+
+##### Peer to Peer transfer
+
+Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit
+
+##### Indexing, Search and Discovery
+
+The original registry provided some implementation of search for use with
+private registries. Support has been elided from V2 since we'd like to both
+decouple search functionality from the registry. The makes the registry
+simpler to deploy, especially in use cases where search is not needed, and
+let's us decouple the image format from the registry.
+
+There are explorations into using the catalog API and notification system to
+build external indexes. The current line of thought is that we will define a
+common search API to index and query docker images. Such a system could be run
+as a companion to a registry or set of registries to power discovery.
+
+The main issue with search and discovery is that there are so many ways to
+accomplish it. There are two aspects to this project. The first is deciding on
+how it will be done, including an API definition that can work with changing
+data formats. The second is the process of integrating with `docker search`.
+We expect that someone attempts to address the problem with the existing tools
+and propose it as a standard search API or uses it to inform a standardization
+process. Once this has been explored, we integrate with the docker client.
+
+Please see the following for more detail:
+
+- https://github.com/docker/distribution/issues/206
+
+##### Deletes
+
+> __NOTE:__ Deletes are a much asked for feature. Before requesting this
+feature or participating in discussion, we ask that you read this section in
+full and understand the problems behind deletes.
+
+While, at first glance, implementing deleting seems simple, there are a number
+mitigating factors that make many solutions not ideal or even pathological in
+the context of a registry. The following paragraph discuss the background and
+approaches that could be applied to arrive at a solution.
+
+The goal of deletes in any system is to remove unused or unneeded data. Only
+data requested for deletion should be removed and no other data. Removing
+unintended data is worse than _not_ removing data that was requested for
+removal but ideally, both are supported. Generally, according to this rule, we
+err on holding data longer than needed, ensuring that it is only removed when
+we can be certain that it can be removed. With the current behavior, we opt to
+hold onto the data forever, ensuring that data cannot be incorrectly removed.
+
+To understand the problems with implementing deletes, one must understand the
+data model. All registry data is stored in a filesystem layout, implemented on
+a "storage driver", effectively a _virtual file system_ (VFS). The storage
+system must assume that this VFS layer will be eventually consistent and has
+poor read- after-write consistency, since this is the lower common denominator
+among the storage drivers. This is mitigated by writing values in reverse-
+dependent order, but makes wider transactional operations unsafe.
+
+Layered on the VFS model is a content-addressable _directed, acyclic graph_
+(DAG) made up of blobs. Manifests reference layers. Tags reference manifests.
+Since the same data can be referenced by multiple manifests, we only store
+data once, even if it is in different repositories. Thus, we have a set of
+blobs, referenced by tags and manifests. If we want to delete a blob we need
+to be certain that it is no longer referenced by another manifest or tag. When
+we delete a manifest, we also can try to delete the referenced blobs. Deciding
+whether or not a blob has an active reference is the crux of the problem.
+
+Conceptually, deleting a manifest and its resources is quite simple. Just find
+all the manifests, enumerate the referenced blobs and delete the blobs not in
+that set. An astute observer will recognize this as a garbage collection
+problem. As with garbage collection in programming languages, this is very
+simple when one always has a consistent view. When one adds parallelism and an
+inconsistent view of data, it becomes very challenging.
+
+A simple example can demonstrate this. Let's say we are deleting a manifest
+_A_ in one process. We scan the manifest and decide that all the blobs are
+ready for deletion. Concurrently, we have another process accepting a new
+manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_
+is accepted and all the blobs are considered present, so the operation
+proceeds. The original process then deletes the referenced blobs, assuming
+they were unreferenced. The manifest _B_, which we thought had all of its data
+present, can no longer be served by the registry, since the dependent data has
+been deleted.
+
+Deleting data from the registry safely requires some way to coordinate this
+operation. The following approaches are being considered:
+
+- _Reference Counting_ - Maintain a count of references to each blob. This is
+ challenging for a number of reasons: 1. maintaining a consistent consensus
+ of reference counts across a set of Registries and 2. Building the initial
+ list of reference counts for an existing registry. These challenges can be
+ met with a consensus protocol like Paxos or Raft in the first case and a
+ necessary but simple scan in the second..
+- _Lock the World GC_ - Halt all writes to the data store. Walk the data store
+ and find all blob references. Delete all unreferenced blobs. This approach
+ is very simple but requires disabling writes for a period of time while the
+ service reads all data. This is slow and expensive but very accurate and
+ effective.
+- _Generational GC_ - Do something similar to above but instead of blocking
+ writes, writes are sent to another storage backend while reads are broadcast
+ to the new and old backends. GC is then performed on the read-only portion.
+ Because writes land in the new backend, the data in the read-only section
+ can be safely deleted. The main drawbacks of this approach are complexity
+ and coordination.
+- _Centralized Oracle_ - Using a centralized, transactional database, we can
+ know exactly which data is referenced at any given time. This avoids
+ coordination problem by managing this data in a single location. We trade
+ off metadata scalability for simplicity and performance. This is a very good
+ option for most registry deployments. This would create a bottleneck for
+ registry metadata. However, metadata is generally not the main bottleneck
+ when serving images.
+
+Please let us know if other solutions exist that we have yet to enumerate.
+Note that for any approach, implementation is a massive consideration. For
+example, a mark-sweep based solution may seem simple but the amount of work in
+coordination offset the extra work it might take to build a _Centralized
+Oracle_. We'll accept proposals for any solution but please coordinate with us
+before dropping code.
+
+At this time, we have traded off simplicity and ease of deployment for disk
+space. Simplicity and ease of deployment tend to reduce developer involvement,
+which is currently the most expensive resource in software engineering. Taking
+on any solution for deletes will greatly effect these factors, trading off
+very cheap disk space for a complex deployment and operational story.
+
+Please see the following issues for more detail:
+
+- https://github.com/docker/distribution/issues/422
+- https://github.com/docker/distribution/issues/461
+- https://github.com/docker/distribution/issues/462
+
+### Distribution Package
+
+At its core, the Distribution Project is a set of Go packages that make up
+Distribution Components. At this time, most of these packages make up the
+Registry implementation.
+
+The package itself is considered unstable. If you're using it, please take care to vendor the dependent version.
+
+For feature additions, please see the Registry section. In the future, we may break out a
+separate Roadmap for distribution-specific features that apply to more than
+just the registry.
+
+***
+
+### Project Planning
+
+An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress.
+
diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go
index 145b07853..c0e9261be 100644
--- a/vendor/github.com/docker/distribution/blobs.go
+++ b/vendor/github.com/docker/distribution/blobs.go
@@ -10,6 +10,7 @@ import (
"github.com/docker/distribution/reference"
"github.com/opencontainers/go-digest"
+ "github.com/opencontainers/image-spec/specs-go/v1"
)
var (
@@ -66,12 +67,19 @@ type Descriptor struct {
Size int64 `json:"size,omitempty"`
// Digest uniquely identifies the content. A byte stream can be verified
- // against against this digest.
+ // against this digest.
Digest digest.Digest `json:"digest,omitempty"`
// URLs contains the source URLs of this content.
URLs []string `json:"urls,omitempty"`
+ // Annotations contains arbitrary metadata relating to the targeted content.
+ Annotations map[string]string `json:"annotations,omitempty"`
+
+ // Platform describes the platform which the image in the manifest runs on.
+ // This should only be used when referring to a manifest.
+ Platform *v1.Platform `json:"platform,omitempty"`
+
// NOTE: Before adding a field here, please ensure that all
// other options have been exhausted. Much of the type relationships
// depend on the simplicity of this type.
diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go
index 020d33258..8e0b788d6 100644
--- a/vendor/github.com/docker/distribution/errors.go
+++ b/vendor/github.com/docker/distribution/errors.go
@@ -20,6 +20,10 @@ var ErrManifestNotModified = errors.New("manifest not modified")
// performed
var ErrUnsupported = errors.New("operation unsupported")
+// ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1
+// manifest but the registry is configured to reject it
+var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported")
+
// ErrTagUnknown is returned if the given tag is not known by the tag service
type ErrTagUnknown struct {
Tag string
diff --git a/vendor/github.com/docker/distribution/metrics/prometheus.go b/vendor/github.com/docker/distribution/metrics/prometheus.go
new file mode 100644
index 000000000..b5a532144
--- /dev/null
+++ b/vendor/github.com/docker/distribution/metrics/prometheus.go
@@ -0,0 +1,13 @@
+package metrics
+
+import "github.com/docker/go-metrics"
+
+const (
+ // NamespacePrefix is the namespace of prometheus metrics
+ NamespacePrefix = "registry"
+)
+
+var (
+ // StorageNamespace is the prometheus namespace of blob/cache related operations
+ StorageNamespace = metrics.NewNamespace(NamespacePrefix, "storage", nil)
+)
diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go
index c34207d03..6c3210989 100644
--- a/vendor/github.com/docker/distribution/registry.go
+++ b/vendor/github.com/docker/distribution/registry.go
@@ -54,6 +54,11 @@ type RepositoryEnumerator interface {
Enumerate(ctx context.Context, ingester func(string) error) error
}
+// RepositoryRemover removes given repository
+type RepositoryRemover interface {
+ Remove(ctx context.Context, name reference.Named) error
+}
+
// ManifestServiceOption is a function argument for Manifest Service methods
type ManifestServiceOption interface {
Apply(ManifestService) error
@@ -73,6 +78,21 @@ func (o WithTagOption) Apply(m ManifestService) error {
return nil
}
+// WithManifestMediaTypes lists the media types the client wishes
+// the server to provide.
+func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption {
+ return WithManifestMediaTypesOption{mediaTypes}
+}
+
+// WithManifestMediaTypesOption holds a list of accepted media types
+type WithManifestMediaTypesOption struct{ MediaTypes []string }
+
+// Apply conforms to the ManifestServiceOption interface
+func (o WithManifestMediaTypesOption) Apply(m ManifestService) error {
+ // no implementation
+ return nil
+}
+
// Repository is a named collection of manifests and layers.
type Repository interface {
// Named returns the name of the repository.
diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go
index 49a64a86e..d77e70473 100644
--- a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go
+++ b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go
@@ -36,9 +36,5 @@ func ServeJSON(w http.ResponseWriter, err error) error {
w.WriteHeader(sc)
- if err := json.NewEncoder(w).Encode(err); err != nil {
- return err
- }
-
- return nil
+ return json.NewEncoder(w).Encode(err)
}
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/routes.go b/vendor/github.com/docker/distribution/registry/api/v2/routes.go
index 5b80d5be7..9612ac2e5 100644
--- a/vendor/github.com/docker/distribution/registry/api/v2/routes.go
+++ b/vendor/github.com/docker/distribution/registry/api/v2/routes.go
@@ -14,15 +14,6 @@ const (
RouteNameCatalog = "catalog"
)
-var allEndpoints = []string{
- RouteNameManifest,
- RouteNameCatalog,
- RouteNameTags,
- RouteNameBlob,
- RouteNameBlobUpload,
- RouteNameBlobUploadChunk,
-}
-
// Router builds a gorilla router with named routes for the various API
// methods. This can be used directly by both server implementations and
// clients.
diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
index c9bdfc355..6e3f1ccc4 100644
--- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
+++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
@@ -45,13 +45,13 @@ type Manager interface {
// to a backend.
func NewSimpleManager() Manager {
return &simpleManager{
- Challanges: make(map[string][]Challenge),
+ Challenges: make(map[string][]Challenge),
}
}
type simpleManager struct {
sync.RWMutex
- Challanges map[string][]Challenge
+ Challenges map[string][]Challenge
}
func normalizeURL(endpoint *url.URL) {
@@ -64,7 +64,7 @@ func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
m.RLock()
defer m.RUnlock()
- challenges := m.Challanges[endpoint.String()]
+ challenges := m.Challenges[endpoint.String()]
return challenges, nil
}
@@ -82,7 +82,7 @@ func (m *simpleManager) AddResponse(resp *http.Response) error {
m.Lock()
defer m.Unlock()
- m.Challanges[urlCopy.String()] = challenges
+ m.Challenges[urlCopy.String()] = challenges
return nil
}
diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go
index 8bd2c3fb6..aa442e654 100644
--- a/vendor/github.com/docker/distribution/registry/client/repository.go
+++ b/vendor/github.com/docker/distribution/registry/client/repository.go
@@ -62,7 +62,7 @@ func checkHTTPRedirect(req *http.Request, via []*http.Request) error {
}
// NewRegistry creates a registry namespace which can be used to get a listing of repositories
-func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) {
+func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) {
ub, err := v2.NewURLBuilderFromString(baseURL, false)
if err != nil {
return nil, err
@@ -75,16 +75,14 @@ func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTrippe
}
return &registry{
- client: client,
- ub: ub,
- context: ctx,
+ client: client,
+ ub: ub,
}, nil
}
type registry struct {
- client *http.Client
- ub *v2.URLBuilder
- context context.Context
+ client *http.Client
+ ub *v2.URLBuilder
}
// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size
@@ -133,7 +131,7 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri
}
// NewRepository creates a new Repository for the given repository name and base URL.
-func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
+func NewRepository(name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
ub, err := v2.NewURLBuilderFromString(baseURL, false)
if err != nil {
return nil, err
@@ -146,18 +144,16 @@ func NewRepository(ctx context.Context, name reference.Named, baseURL string, tr
}
return &repository{
- client: client,
- ub: ub,
- name: name,
- context: ctx,
+ client: client,
+ ub: ub,
+ name: name,
}, nil
}
type repository struct {
- client *http.Client
- ub *v2.URLBuilder
- context context.Context
- name reference.Named
+ client *http.Client
+ ub *v2.URLBuilder
+ name reference.Named
}
func (r *repository) Named() reference.Named {
@@ -190,32 +186,35 @@ func (r *repository) Manifests(ctx context.Context, options ...distribution.Mani
func (r *repository) Tags(ctx context.Context) distribution.TagService {
return &tags{
- client: r.client,
- ub: r.ub,
- context: r.context,
- name: r.Named(),
+ client: r.client,
+ ub: r.ub,
+ name: r.Named(),
}
}
// tags implements remote tagging operations.
type tags struct {
- client *http.Client
- ub *v2.URLBuilder
- context context.Context
- name reference.Named
+ client *http.Client
+ ub *v2.URLBuilder
+ name reference.Named
}
// All returns all tags
func (t *tags) All(ctx context.Context) ([]string, error) {
var tags []string
- u, err := t.ub.BuildTagsURL(t.name)
+ listURLStr, err := t.ub.BuildTagsURL(t.name)
+ if err != nil {
+ return tags, err
+ }
+
+ listURL, err := url.Parse(listURLStr)
if err != nil {
return tags, err
}
for {
- resp, err := t.client.Get(u)
+ resp, err := t.client.Get(listURL.String())
if err != nil {
return tags, err
}
@@ -235,7 +234,13 @@ func (t *tags) All(ctx context.Context) ([]string, error) {
}
tags = append(tags, tagsResponse.Tags...)
if link := resp.Header.Get("Link"); link != "" {
- u = strings.Trim(strings.Split(link, ";")[0], "<>")
+ linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
+ linkURL, err := url.Parse(linkURLStr)
+ if err != nil {
+ return tags, err
+ }
+
+ listURL = listURL.ResolveReference(linkURL)
} else {
return tags, nil
}
@@ -321,7 +326,8 @@ func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, er
defer resp.Body.Close()
switch {
- case resp.StatusCode >= 200 && resp.StatusCode < 400:
+ case resp.StatusCode >= 200 && resp.StatusCode < 400 && len(resp.Header.Get("Docker-Content-Digest")) > 0:
+ // if the response is a success AND a Docker-Content-Digest can be retrieved from the headers
return descriptorFromResponse(resp)
default:
// if the response is an error - there will be no body to decode.
@@ -421,18 +427,22 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis
ref reference.Named
err error
contentDgst *digest.Digest
+ mediaTypes []string
)
for _, option := range options {
- if opt, ok := option.(distribution.WithTagOption); ok {
+ switch opt := option.(type) {
+ case distribution.WithTagOption:
digestOrTag = opt.Tag
ref, err = reference.WithTag(ms.name, opt.Tag)
if err != nil {
return nil, err
}
- } else if opt, ok := option.(contentDigestOption); ok {
+ case contentDigestOption:
contentDgst = opt.digest
- } else {
+ case distribution.WithManifestMediaTypesOption:
+ mediaTypes = opt.MediaTypes
+ default:
err := option.Apply(ms)
if err != nil {
return nil, err
@@ -448,6 +458,10 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis
}
}
+ if len(mediaTypes) == 0 {
+ mediaTypes = distribution.ManifestMediaTypes()
+ }
+
u, err := ms.ub.BuildManifestURL(ref)
if err != nil {
return nil, err
@@ -458,7 +472,7 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis
return nil, err
}
- for _, t := range distribution.ManifestMediaTypes() {
+ for _, t := range mediaTypes {
req.Header.Add("Accept", t)
}
diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
index e5ff09d75..1d0b382fb 100644
--- a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
+++ b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
@@ -5,7 +5,6 @@ import (
"fmt"
"io"
"net/http"
- "os"
"regexp"
"strconv"
)
@@ -97,7 +96,7 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
lastReaderOffset := hrs.readerOffset
- if whence == os.SEEK_SET && hrs.rc == nil {
+ if whence == io.SeekStart && hrs.rc == nil {
// If no request has been made yet, and we are seeking to an
// absolute position, set the read offset as well to avoid an
// unnecessary request.
@@ -113,14 +112,14 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
newOffset := hrs.seekOffset
switch whence {
- case os.SEEK_CUR:
+ case io.SeekCurrent:
newOffset += offset
- case os.SEEK_END:
+ case io.SeekEnd:
if hrs.size < 0 {
return 0, errors.New("content length not known")
}
newOffset = hrs.size + offset
- case os.SEEK_SET:
+ case io.SeekStart:
newOffset = offset
}
diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go
index cdc34f5fe..ac4c45211 100644
--- a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go
+++ b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go
@@ -4,6 +4,7 @@ import (
"context"
"github.com/docker/distribution"
+ prometheus "github.com/docker/distribution/metrics"
"github.com/opencontainers/go-digest"
)
@@ -38,6 +39,11 @@ type cachedBlobStatter struct {
tracker MetricsTracker
}
+var (
+ // cacheCount is the number of total cache request received/hits/misses
+ cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type")
+)
+
// NewCachedBlobStatter creates a new statter which prefers a cache and
// falls back to a backend.
func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService {
@@ -58,6 +64,7 @@ func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, b
}
func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+ cacheCount.WithValues("Request").Inc(1)
desc, err := cbds.cache.Stat(ctx, dgst)
if err != nil {
if err != distribution.ErrBlobUnknown {
@@ -66,12 +73,13 @@ func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (di
goto fallback
}
-
+ cacheCount.WithValues("Hit").Inc(1)
if cbds.tracker != nil {
cbds.tracker.Hit()
}
return desc, nil
fallback:
+ cacheCount.WithValues("Miss").Inc(1)
if cbds.tracker != nil {
cbds.tracker.Miss()
}
diff --git a/vendor/github.com/docker/distribution/vendor.conf b/vendor/github.com/docker/distribution/vendor.conf
index d67edd779..a249caf26 100644
--- a/vendor/github.com/docker/distribution/vendor.conf
+++ b/vendor/github.com/docker/distribution/vendor.conf
@@ -1,29 +1,36 @@
-github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e
-github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356
+github.com/Azure/azure-sdk-for-go 4650843026a7fdec254a8d9cf893693a254edd0b
+github.com/Azure/go-autorest eaa7994b2278094c904d31993d26f56324db3052
github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4
-github.com/aws/aws-sdk-go c6fc52983ea2375810aa38ddb5370e9cdf611716
+github.com/aws/aws-sdk-go f831d5a0822a1ad72420ab18c6269bca1ddaf490
github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a
+github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274
github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702
github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782
github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2
github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04
-github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85
+github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab
github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21
github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257
github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c
github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3
-github.com/gorilla/context 14f550f51af52180c2eefed15e5fd18d63c0a64a
github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b
github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
+github.com/marstr/guid 8bd9a64bf37eb297b492a4101fb28e80ac0b290f
+github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3
+github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39
github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef
-github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6
+github.com/ncw/swift a0320860b16212c2b59b4912bb6508cda1d7cee6
+github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564
+github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
+github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563
+github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd
+github.com/Shopify/logrus-bugsnag 577dee27f20dd8f1a529f82210094af593be12bd
github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064
github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842
-github.com/stevvooe/resumable 2aaf90b2ceea5072cb503ef2a620b08ff3119870
github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985
github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e
github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128
@@ -38,6 +45,7 @@ google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2
google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994
gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673
gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b
-gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420
+gopkg.in/yaml.v2 v2.2.1
rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git
github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
+github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882
diff --git a/vendor/github.com/docker/docker-credential-helpers/README.md b/vendor/github.com/docker/docker-credential-helpers/README.md
deleted file mode 100644
index f9cbc3fb5..000000000
--- a/vendor/github.com/docker/docker-credential-helpers/README.md
+++ /dev/null
@@ -1,82 +0,0 @@
-## Introduction
-
-docker-credential-helpers is a suite of programs to use native stores to keep Docker credentials safe.
-
-## Installation
-
-Go to the [Releases](https://github.com/docker/docker-credential-helpers/releases) page and download the binary that works better for you. Put that binary in your `$PATH`, so Docker can find it.
-
-### Building from scratch
-
-The programs in this repository are written with the Go programming language. These instructions assume that you have previous knowledge about the language and you have it installed in your machine.
-
-1 - Download the source and put it in your `$GOPATH` with `go get`.
-
-```
-$ go get github.com/docker/docker-credential-helpers
-```
-
-2 - Use `make` to build the program you want. That will leave any executable in the `bin` directory inside the repository.
-
-```
-$ cd $GOPATH/docker/docker-credentials-helpers
-$ make osxkeychain
-```
-
-3 - Put that binary in your `$PATH`, so Docker can find it.
-
-## Usage
-
-### With the Docker Engine
-
-Set the `credsStore` option in your `.docker/config.json` file with the suffix of the program you want to use. For instance, set it to `osxkeychain` if you want to use `docker-credential-osxkeychain`.
-
-```json
-{
- "credsStore": "osxkeychain"
-}
-```
-
-### With other command line applications
-
-The sub-package [client](https://godoc.org/github.com/docker/docker-credential-helpers/client) includes
-functions to call external programs from your own command line applications.
-
-There are three things you need to know if you need to interact with a helper:
-
-1. The name of the program to execute, for instance `docker-credential-osxkeychain`.
-2. The server address to identify the credentials, for instance `https://example.com`.
-3. The username and secret to store, when you want to store credentials.
-
-You can see examples of each function in the [client](https://godoc.org/github.com/docker/docker-credential-helpers/client) documentation.
-
-### Available programs
-
-1. osxkeychain: Provides a helper to use the OS X keychain as credentials store.
-2. secretservice: Provides a helper to use the D-Bus secret service as credentials store.
-3. wincred: Provides a helper to use Windows credentials manager as store.
-4. pass: Provides a helper to use `pass` as credentials store.
-
-#### Note
-
-`pass` needs to be configured for `docker-credential-pass` to work properly.
-It must be initialized with a `gpg2` key ID. Make sure your GPG key exists is in `gpg2` keyring as `pass` uses `gpg2` instead of the regular `gpg`.
-
-## Development
-
-A credential helper can be any program that can read values from the standard input. We use the first argument in the command line to differentiate the kind of command to execute. There are four valid values:
-
-- `store`: Adds credentials to the keychain. The payload in the standard input is a JSON document with `ServerURL`, `Username` and `Secret`.
-- `get`: Retrieves credentials from the keychain. The payload in the standard input is the raw value for the `ServerURL`.
-- `erase`: Removes credentials from the keychain. The payload in the standard input is the raw value for the `ServerURL`.
-- `list`: Lists stored credentials. There is no standard input payload.
-
-This repository also includes libraries to implement new credentials programs in Go. Adding a new helper program is pretty easy. You can see how the OS X keychain helper works in the [osxkeychain](osxkeychain) directory.
-
-1. Implement the interface `credentials.Helper` in `YOUR_PACKAGE/YOUR_PACKAGE_$GOOS.go`
-2. Create a main program in `YOUR_PACKAGE/cmd/main_$GOOS.go`.
-3. Add make tasks to build your program and run tests.
-
-## License
-
-MIT. See [LICENSE](LICENSE) for more information.
diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go
index 033a5fee5..ecc23e68b 100644
--- a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go
+++ b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go
@@ -1,4 +1,4 @@
package credentials
// Version holds a string describing the current version
-const Version = "0.6.0"
+const Version = "0.6.2"
diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.c b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.c
deleted file mode 100644
index f84d61ee5..000000000
--- a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.c
+++ /dev/null
@@ -1,228 +0,0 @@
-#include "osxkeychain_darwin.h"
-#include <CoreFoundation/CoreFoundation.h>
-#include <Foundation/NSValue.h>
-#include <stdio.h>
-#include <string.h>
-
-char *get_error(OSStatus status) {
- char *buf = malloc(128);
- CFStringRef str = SecCopyErrorMessageString(status, NULL);
- int success = CFStringGetCString(str, buf, 128, kCFStringEncodingUTF8);
- if (!success) {
- strncpy(buf, "Unknown error", 128);
- }
- return buf;
-}
-
-char *keychain_add(struct Server *server, char *label, char *username, char *secret) {
- SecKeychainItemRef item;
-
- OSStatus status = SecKeychainAddInternetPassword(
- NULL,
- strlen(server->host), server->host,
- 0, NULL,
- strlen(username), username,
- strlen(server->path), server->path,
- server->port,
- server->proto,
- kSecAuthenticationTypeDefault,
- strlen(secret), secret,
- &item
- );
-
- if (status) {
- return get_error(status);
- }
-
- SecKeychainAttribute attribute;
- SecKeychainAttributeList attrs;
- attribute.tag = kSecLabelItemAttr;
- attribute.data = label;
- attribute.length = strlen(label);
- attrs.count = 1;
- attrs.attr = &attribute;
-
- status = SecKeychainItemModifyContent(item, &attrs, 0, NULL);
-
- if (status) {
- return get_error(status);
- }
-
- return NULL;
-}
-
-char *keychain_get(struct Server *server, unsigned int *username_l, char **username, unsigned int *secret_l, char **secret) {
- char *tmp;
- SecKeychainItemRef item;
-
- OSStatus status = SecKeychainFindInternetPassword(
- NULL,
- strlen(server->host), server->host,
- 0, NULL,
- 0, NULL,
- strlen(server->path), server->path,
- server->port,
- server->proto,
- kSecAuthenticationTypeDefault,
- secret_l, (void **)&tmp,
- &item);
-
- if (status) {
- return get_error(status);
- }
-
- *secret = strdup(tmp);
- SecKeychainItemFreeContent(NULL, tmp);
-
- SecKeychainAttributeList list;
- SecKeychainAttribute attr;
-
- list.count = 1;
- list.attr = &attr;
- attr.tag = kSecAccountItemAttr;
-
- status = SecKeychainItemCopyContent(item, NULL, &list, NULL, NULL);
- if (status) {
- return get_error(status);
- }
-
- *username = strdup(attr.data);
- *username_l = attr.length;
- SecKeychainItemFreeContent(&list, NULL);
-
- return NULL;
-}
-
-char *keychain_delete(struct Server *server) {
- SecKeychainItemRef item;
-
- OSStatus status = SecKeychainFindInternetPassword(
- NULL,
- strlen(server->host), server->host,
- 0, NULL,
- 0, NULL,
- strlen(server->path), server->path,
- server->port,
- server->proto,
- kSecAuthenticationTypeDefault,
- 0, NULL,
- &item);
-
- if (status) {
- return get_error(status);
- }
-
- status = SecKeychainItemDelete(item);
- if (status) {
- return get_error(status);
- }
- return NULL;
-}
-
-char * CFStringToCharArr(CFStringRef aString) {
- if (aString == NULL) {
- return NULL;
- }
- CFIndex length = CFStringGetLength(aString);
- CFIndex maxSize =
- CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8) + 1;
- char *buffer = (char *)malloc(maxSize);
- if (CFStringGetCString(aString, buffer, maxSize,
- kCFStringEncodingUTF8)) {
- return buffer;
- }
- return NULL;
-}
-
-char *keychain_list(char *credsLabel, char *** paths, char *** accts, unsigned int *list_l) {
- CFStringRef credsLabelCF = CFStringCreateWithCString(NULL, credsLabel, kCFStringEncodingUTF8);
- CFMutableDictionaryRef query = CFDictionaryCreateMutable (NULL, 1, NULL, NULL);
- CFDictionaryAddValue(query, kSecClass, kSecClassInternetPassword);
- CFDictionaryAddValue(query, kSecReturnAttributes, kCFBooleanTrue);
- CFDictionaryAddValue(query, kSecMatchLimit, kSecMatchLimitAll);
- CFDictionaryAddValue(query, kSecAttrLabel, credsLabelCF);
- //Use this query dictionary
- CFTypeRef result= NULL;
- OSStatus status = SecItemCopyMatching(
- query,
- &result);
-
- CFRelease(credsLabelCF);
-
- //Ran a search and store the results in result
- if (status) {
- return get_error(status);
- }
- CFIndex numKeys = CFArrayGetCount(result);
- *paths = (char **) malloc((int)sizeof(char *)*numKeys);
- *accts = (char **) malloc((int)sizeof(char *)*numKeys);
- //result is of type CFArray
- for(CFIndex i=0; i<numKeys; i++) {
- CFDictionaryRef currKey = CFArrayGetValueAtIndex(result,i);
-
- CFStringRef protocolTmp = CFDictionaryGetValue(currKey, CFSTR("ptcl"));
- if (protocolTmp != NULL) {
- CFStringRef protocolStr = CFStringCreateWithFormat(NULL, NULL, CFSTR("%@"), protocolTmp);
- if (CFStringCompare(protocolStr, CFSTR("htps"), 0) == kCFCompareEqualTo) {
- protocolTmp = CFSTR("https://");
- }
- else {
- protocolTmp = CFSTR("http://");
- }
- CFRelease(protocolStr);
- }
- else {
- char * path = "0";
- char * acct = "0";
- (*paths)[i] = (char *) malloc(sizeof(char)*(strlen(path)));
- memcpy((*paths)[i], path, sizeof(char)*(strlen(path)));
- (*accts)[i] = (char *) malloc(sizeof(char)*(strlen(acct)));
- memcpy((*accts)[i], acct, sizeof(char)*(strlen(acct)));
- continue;
- }
-
- CFMutableStringRef str = CFStringCreateMutableCopy(NULL, 0, protocolTmp);
- CFStringRef serverTmp = CFDictionaryGetValue(currKey, CFSTR("srvr"));
- if (serverTmp != NULL) {
- CFStringAppend(str, serverTmp);
- }
-
- CFStringRef pathTmp = CFDictionaryGetValue(currKey, CFSTR("path"));
- if (pathTmp != NULL) {
- CFStringAppend(str, pathTmp);
- }
-
- const NSNumber * portTmp = CFDictionaryGetValue(currKey, CFSTR("port"));
- if (portTmp != NULL && portTmp.integerValue != 0) {
- CFStringRef portStr = CFStringCreateWithFormat(NULL, NULL, CFSTR("%@"), portTmp);
- CFStringAppend(str, CFSTR(":"));
- CFStringAppend(str, portStr);
- CFRelease(portStr);
- }
-
- CFStringRef acctTmp = CFDictionaryGetValue(currKey, CFSTR("acct"));
- if (acctTmp == NULL) {
- acctTmp = CFSTR("account not defined");
- }
-
- char * path = CFStringToCharArr(str);
- char * acct = CFStringToCharArr(acctTmp);
-
- //We now have all we need, username and servername. Now export this to .go
- (*paths)[i] = (char *) malloc(sizeof(char)*(strlen(path)+1));
- memcpy((*paths)[i], path, sizeof(char)*(strlen(path)+1));
- (*accts)[i] = (char *) malloc(sizeof(char)*(strlen(acct)+1));
- memcpy((*accts)[i], acct, sizeof(char)*(strlen(acct)+1));
-
- CFRelease(str);
- }
- *list_l = (int)numKeys;
- return NULL;
-}
-
-void freeListData(char *** data, unsigned int length) {
- for(int i=0; i<length; i++) {
- free((*data)[i]);
- }
- free(*data);
-}
diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.go b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.go
deleted file mode 100644
index 439126761..000000000
--- a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.go
+++ /dev/null
@@ -1,196 +0,0 @@
-package osxkeychain
-
-/*
-#cgo CFLAGS: -x objective-c -mmacosx-version-min=10.10
-#cgo LDFLAGS: -framework Security -framework Foundation -mmacosx-version-min=10.10
-
-#include "osxkeychain_darwin.h"
-#include <stdlib.h>
-*/
-import "C"
-import (
- "errors"
- "net/url"
- "strconv"
- "strings"
- "unsafe"
-
- "github.com/docker/docker-credential-helpers/credentials"
-)
-
-// errCredentialsNotFound is the specific error message returned by OS X
-// when the credentials are not in the keychain.
-const errCredentialsNotFound = "The specified item could not be found in the keychain."
-
-// Osxkeychain handles secrets using the OS X Keychain as store.
-type Osxkeychain struct{}
-
-// Add adds new credentials to the keychain.
-func (h Osxkeychain) Add(creds *credentials.Credentials) error {
- h.Delete(creds.ServerURL)
-
- s, err := splitServer(creds.ServerURL)
- if err != nil {
- return err
- }
- defer freeServer(s)
-
- label := C.CString(credentials.CredsLabel)
- defer C.free(unsafe.Pointer(label))
- username := C.CString(creds.Username)
- defer C.free(unsafe.Pointer(username))
- secret := C.CString(creds.Secret)
- defer C.free(unsafe.Pointer(secret))
-
- errMsg := C.keychain_add(s, label, username, secret)
- if errMsg != nil {
- defer C.free(unsafe.Pointer(errMsg))
- return errors.New(C.GoString(errMsg))
- }
-
- return nil
-}
-
-// Delete removes credentials from the keychain.
-func (h Osxkeychain) Delete(serverURL string) error {
- s, err := splitServer(serverURL)
- if err != nil {
- return err
- }
- defer freeServer(s)
-
- errMsg := C.keychain_delete(s)
- if errMsg != nil {
- defer C.free(unsafe.Pointer(errMsg))
- return errors.New(C.GoString(errMsg))
- }
-
- return nil
-}
-
-// Get returns the username and secret to use for a given registry server URL.
-func (h Osxkeychain) Get(serverURL string) (string, string, error) {
- s, err := splitServer(serverURL)
- if err != nil {
- return "", "", err
- }
- defer freeServer(s)
-
- var usernameLen C.uint
- var username *C.char
- var secretLen C.uint
- var secret *C.char
- defer C.free(unsafe.Pointer(username))
- defer C.free(unsafe.Pointer(secret))
-
- errMsg := C.keychain_get(s, &usernameLen, &username, &secretLen, &secret)
- if errMsg != nil {
- defer C.free(unsafe.Pointer(errMsg))
- goMsg := C.GoString(errMsg)
- if goMsg == errCredentialsNotFound {
- return "", "", credentials.NewErrCredentialsNotFound()
- }
-
- return "", "", errors.New(goMsg)
- }
-
- user := C.GoStringN(username, C.int(usernameLen))
- pass := C.GoStringN(secret, C.int(secretLen))
- return user, pass, nil
-}
-
-// List returns the stored URLs and corresponding usernames.
-func (h Osxkeychain) List() (map[string]string, error) {
- credsLabelC := C.CString(credentials.CredsLabel)
- defer C.free(unsafe.Pointer(credsLabelC))
-
- var pathsC **C.char
- defer C.free(unsafe.Pointer(pathsC))
- var acctsC **C.char
- defer C.free(unsafe.Pointer(acctsC))
- var listLenC C.uint
- errMsg := C.keychain_list(credsLabelC, &pathsC, &acctsC, &listLenC)
- if errMsg != nil {
- defer C.free(unsafe.Pointer(errMsg))
- goMsg := C.GoString(errMsg)
- return nil, errors.New(goMsg)
- }
-
- defer C.freeListData(&pathsC, listLenC)
- defer C.freeListData(&acctsC, listLenC)
-
- var listLen int
- listLen = int(listLenC)
- pathTmp := (*[1 << 30]*C.char)(unsafe.Pointer(pathsC))[:listLen:listLen]
- acctTmp := (*[1 << 30]*C.char)(unsafe.Pointer(acctsC))[:listLen:listLen]
- //taking the array of c strings into go while ignoring all the stuff irrelevant to credentials-helper
- resp := make(map[string]string)
- for i := 0; i < listLen; i++ {
- if C.GoString(pathTmp[i]) == "0" {
- continue
- }
- resp[C.GoString(pathTmp[i])] = C.GoString(acctTmp[i])
- }
- return resp, nil
-}
-
-func splitServer(serverURL string) (*C.struct_Server, error) {
- u, err := parseURL(serverURL)
- if err != nil {
- return nil, err
- }
-
- proto := C.kSecProtocolTypeHTTPS
- if u.Scheme == "http" {
- proto = C.kSecProtocolTypeHTTP
- }
- var port int
- p := getPort(u)
- if p != "" {
- port, err = strconv.Atoi(p)
- if err != nil {
- return nil, err
- }
- }
-
- return &C.struct_Server{
- proto: C.SecProtocolType(proto),
- host: C.CString(getHostname(u)),
- port: C.uint(port),
- path: C.CString(u.Path),
- }, nil
-}
-
-func freeServer(s *C.struct_Server) {
- C.free(unsafe.Pointer(s.host))
- C.free(unsafe.Pointer(s.path))
-}
-
-// parseURL parses and validates a given serverURL to an url.URL, and
-// returns an error if validation failed. Querystring parameters are
-// omitted in the resulting URL, because they are not used in the helper.
-//
-// If serverURL does not have a valid scheme, `//` is used as scheme
-// before parsing. This prevents the hostname being used as path,
-// and the credentials being stored without host.
-func parseURL(serverURL string) (*url.URL, error) {
- // Check if serverURL has a scheme, otherwise add `//` as scheme.
- if !strings.Contains(serverURL, "://") && !strings.HasPrefix(serverURL, "//") {
- serverURL = "//" + serverURL
- }
-
- u, err := url.Parse(serverURL)
- if err != nil {
- return nil, err
- }
-
- if u.Scheme != "" && u.Scheme != "https" && u.Scheme != "http" {
- return nil, errors.New("unsupported scheme: " + u.Scheme)
- }
- if getHostname(u) == "" {
- return nil, errors.New("no hostname in URL")
- }
-
- u.RawQuery = ""
- return u, nil
-}
diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.h b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.h
deleted file mode 100644
index c54e7d728..000000000
--- a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#include <Security/Security.h>
-
-struct Server {
- SecProtocolType proto;
- char *host;
- char *path;
- unsigned int port;
-};
-
-char *keychain_add(struct Server *server, char *label, char *username, char *secret);
-char *keychain_get(struct Server *server, unsigned int *username_l, char **username, unsigned int *secret_l, char **secret);
-char *keychain_delete(struct Server *server);
-char *keychain_list(char *credsLabel, char *** data, char *** accts, unsigned int *list_l);
-void freeListData(char *** data, unsigned int length); \ No newline at end of file
diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_go18.go b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_go18.go
deleted file mode 100644
index 0b7297d2f..000000000
--- a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_go18.go
+++ /dev/null
@@ -1,13 +0,0 @@
-//+build go1.8
-
-package osxkeychain
-
-import "net/url"
-
-func getHostname(u *url.URL) string {
- return u.Hostname()
-}
-
-func getPort(u *url.URL) string {
- return u.Port()
-}
diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_non_go18.go b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_non_go18.go
deleted file mode 100644
index bdf9b7b00..000000000
--- a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_non_go18.go
+++ /dev/null
@@ -1,41 +0,0 @@
-//+build !go1.8
-
-package osxkeychain
-
-import (
- "net/url"
- "strings"
-)
-
-func getHostname(u *url.URL) string {
- return stripPort(u.Host)
-}
-
-func getPort(u *url.URL) string {
- return portOnly(u.Host)
-}
-
-func stripPort(hostport string) string {
- colon := strings.IndexByte(hostport, ':')
- if colon == -1 {
- return hostport
- }
- if i := strings.IndexByte(hostport, ']'); i != -1 {
- return strings.TrimPrefix(hostport[:i], "[")
- }
- return hostport[:colon]
-}
-
-func portOnly(hostport string) string {
- colon := strings.IndexByte(hostport, ':')
- if colon == -1 {
- return ""
- }
- if i := strings.Index(hostport, "]:"); i != -1 {
- return hostport[i+len("]:"):]
- }
- if strings.Contains(hostport, "]") {
- return ""
- }
- return hostport[colon+len(":"):]
-}
diff --git a/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.c b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.c
deleted file mode 100644
index 35dea92da..000000000
--- a/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.c
+++ /dev/null
@@ -1,162 +0,0 @@
-#include <string.h>
-#include <stdlib.h>
-#include "secretservice_linux.h"
-
-const SecretSchema *docker_get_schema(void)
-{
- static const SecretSchema docker_schema = {
- "io.docker.Credentials", SECRET_SCHEMA_NONE,
- {
- { "label", SECRET_SCHEMA_ATTRIBUTE_STRING },
- { "server", SECRET_SCHEMA_ATTRIBUTE_STRING },
- { "username", SECRET_SCHEMA_ATTRIBUTE_STRING },
- { "docker_cli", SECRET_SCHEMA_ATTRIBUTE_STRING },
- { "NULL", 0 },
- }
- };
- return &docker_schema;
-}
-
-GError *add(char *label, char *server, char *username, char *secret) {
- GError *err = NULL;
-
- secret_password_store_sync (DOCKER_SCHEMA, SECRET_COLLECTION_DEFAULT,
- server, secret, NULL, &err,
- "label", label,
- "server", server,
- "username", username,
- "docker_cli", "1",
- NULL);
- return err;
-}
-
-GError *delete(char *server) {
- GError *err = NULL;
-
- secret_password_clear_sync(DOCKER_SCHEMA, NULL, &err,
- "server", server,
- "docker_cli", "1",
- NULL);
- if (err != NULL)
- return err;
- return NULL;
-}
-
-char *get_attribute(const char *attribute, SecretItem *item) {
- GHashTable *attributes;
- GHashTableIter iter;
- gchar *value, *key;
-
- attributes = secret_item_get_attributes(item);
- g_hash_table_iter_init(&iter, attributes);
- while (g_hash_table_iter_next(&iter, (void **)&key, (void **)&value)) {
- if (strncmp(key, attribute, strlen(key)) == 0)
- return (char *)value;
- }
- g_hash_table_unref(attributes);
- return NULL;
-}
-
-GError *get(char *server, char **username, char **secret) {
- GError *err = NULL;
- GHashTable *attributes;
- SecretService *service;
- GList *items, *l;
- SecretSearchFlags flags = SECRET_SEARCH_LOAD_SECRETS | SECRET_SEARCH_ALL | SECRET_SEARCH_UNLOCK;
- SecretValue *secretValue;
- gsize length;
- gchar *value;
-
- attributes = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, g_free);
- g_hash_table_insert(attributes, g_strdup("server"), g_strdup(server));
- g_hash_table_insert(attributes, g_strdup("docker_cli"), g_strdup("1"));
-
- service = secret_service_get_sync(SECRET_SERVICE_NONE, NULL, &err);
- if (err == NULL) {
- items = secret_service_search_sync(service, DOCKER_SCHEMA, attributes, flags, NULL, &err);
- if (err == NULL) {
- for (l = items; l != NULL; l = g_list_next(l)) {
- value = secret_item_get_schema_name(l->data);
- if (strncmp(value, "io.docker.Credentials", strlen(value)) != 0) {
- g_free(value);
- continue;
- }
- g_free(value);
- secretValue = secret_item_get_secret(l->data);
- if (secret != NULL) {
- *secret = strdup(secret_value_get(secretValue, &length));
- secret_value_unref(secretValue);
- }
- *username = get_attribute("username", l->data);
- }
- g_list_free_full(items, g_object_unref);
- }
- g_object_unref(service);
- }
- g_hash_table_unref(attributes);
- if (err != NULL) {
- return err;
- }
- return NULL;
-}
-
-GError *list(char *ref_label, char *** paths, char *** accts, unsigned int *list_l) {
- GList *items;
- GError *err = NULL;
- SecretService *service;
- SecretSearchFlags flags = SECRET_SEARCH_LOAD_SECRETS | SECRET_SEARCH_ALL | SECRET_SEARCH_UNLOCK;
- GHashTable *attributes = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, g_free);
-
- // List credentials with the right label only
- g_hash_table_insert(attributes, g_strdup("label"), g_strdup(ref_label));
-
- service = secret_service_get_sync(SECRET_SERVICE_NONE, NULL, &err);
- if (err != NULL) {
- return err;
- }
-
- items = secret_service_search_sync(service, NULL, attributes, flags, NULL, &err);
- int numKeys = g_list_length(items);
- if (err != NULL) {
- return err;
- }
-
- char **tmp_paths = (char **) calloc(1,(int)sizeof(char *)*numKeys);
- char **tmp_accts = (char **) calloc(1,(int)sizeof(char *)*numKeys);
-
- // items now contains our keys from the gnome keyring
- // we will now put it in our two lists to return it to go
- GList *current;
- int listNumber = 0;
- for(current = items; current!=NULL; current = current->next) {
- char *pathTmp = secret_item_get_label(current->data);
- // you cannot have a key without a label in the gnome keyring
- char *acctTmp = get_attribute("username",current->data);
- if (acctTmp==NULL) {
- acctTmp = "account not defined";
- }
-
- tmp_paths[listNumber] = (char *) calloc(1, sizeof(char)*(strlen(pathTmp)+1));
- tmp_accts[listNumber] = (char *) calloc(1, sizeof(char)*(strlen(acctTmp)+1));
-
- memcpy(tmp_paths[listNumber], pathTmp, sizeof(char)*(strlen(pathTmp)+1));
- memcpy(tmp_accts[listNumber], acctTmp, sizeof(char)*(strlen(acctTmp)+1));
-
- listNumber = listNumber + 1;
- }
-
- *paths = (char **) realloc(tmp_paths, (int)sizeof(char *)*listNumber);
- *accts = (char **) realloc(tmp_accts, (int)sizeof(char *)*listNumber);
-
- *list_l = listNumber;
-
- return NULL;
-}
-
-void freeListData(char *** data, unsigned int length) {
- int i;
- for(i=0; i<length; i++) {
- free((*data)[i]);
- }
- free(*data);
-}
diff --git a/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.go b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.go
deleted file mode 100644
index 95a1310b6..000000000
--- a/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package secretservice
-
-/*
-#cgo pkg-config: libsecret-1
-
-#include "secretservice_linux.h"
-#include <stdlib.h>
-*/
-import "C"
-import (
- "errors"
- "unsafe"
-
- "github.com/docker/docker-credential-helpers/credentials"
-)
-
-// Secretservice handles secrets using Linux secret-service as a store.
-type Secretservice struct{}
-
-// Add adds new credentials to the keychain.
-func (h Secretservice) Add(creds *credentials.Credentials) error {
- if creds == nil {
- return errors.New("missing credentials")
- }
- credsLabel := C.CString(credentials.CredsLabel)
- defer C.free(unsafe.Pointer(credsLabel))
- server := C.CString(creds.ServerURL)
- defer C.free(unsafe.Pointer(server))
- username := C.CString(creds.Username)
- defer C.free(unsafe.Pointer(username))
- secret := C.CString(creds.Secret)
- defer C.free(unsafe.Pointer(secret))
-
- if err := C.add(credsLabel, server, username, secret); err != nil {
- defer C.g_error_free(err)
- errMsg := (*C.char)(unsafe.Pointer(err.message))
- return errors.New(C.GoString(errMsg))
- }
- return nil
-}
-
-// Delete removes credentials from the store.
-func (h Secretservice) Delete(serverURL string) error {
- if serverURL == "" {
- return errors.New("missing server url")
- }
- server := C.CString(serverURL)
- defer C.free(unsafe.Pointer(server))
-
- if err := C.delete(server); err != nil {
- defer C.g_error_free(err)
- errMsg := (*C.char)(unsafe.Pointer(err.message))
- return errors.New(C.GoString(errMsg))
- }
- return nil
-}
-
-// Get returns the username and secret to use for a given registry server URL.
-func (h Secretservice) Get(serverURL string) (string, string, error) {
- if serverURL == "" {
- return "", "", errors.New("missing server url")
- }
- var username *C.char
- defer C.free(unsafe.Pointer(username))
- var secret *C.char
- defer C.free(unsafe.Pointer(secret))
- server := C.CString(serverURL)
- defer C.free(unsafe.Pointer(server))
-
- err := C.get(server, &username, &secret)
- if err != nil {
- defer C.g_error_free(err)
- errMsg := (*C.char)(unsafe.Pointer(err.message))
- return "", "", errors.New(C.GoString(errMsg))
- }
- user := C.GoString(username)
- pass := C.GoString(secret)
- if pass == "" {
- return "", "", credentials.NewErrCredentialsNotFound()
- }
- return user, pass, nil
-}
-
-// List returns the stored URLs and corresponding usernames for a given credentials label
-func (h Secretservice) List() (map[string]string, error) {
- credsLabelC := C.CString(credentials.CredsLabel)
- defer C.free(unsafe.Pointer(credsLabelC))
-
- var pathsC **C.char
- defer C.free(unsafe.Pointer(pathsC))
- var acctsC **C.char
- defer C.free(unsafe.Pointer(acctsC))
- var listLenC C.uint
- err := C.list(credsLabelC, &pathsC, &acctsC, &listLenC)
- if err != nil {
- defer C.free(unsafe.Pointer(err))
- return nil, errors.New("Error from list function in secretservice_linux.c likely due to error in secretservice library")
- }
- defer C.freeListData(&pathsC, listLenC)
- defer C.freeListData(&acctsC, listLenC)
-
- resp := make(map[string]string)
-
- listLen := int(listLenC)
- if listLen == 0 {
- return resp, nil
- }
- // The maximum capacity of the following two slices is limited to (2^29)-1 to remain compatible
- // with 32-bit platforms. The size of a `*C.char` (a pointer) is 4 Byte on a 32-bit system
- // and (2^29)*4 == math.MaxInt32 + 1. -- See issue golang/go#13656
- pathTmp := (*[(1 << 29) - 1]*C.char)(unsafe.Pointer(pathsC))[:listLen:listLen]
- acctTmp := (*[(1 << 29) - 1]*C.char)(unsafe.Pointer(acctsC))[:listLen:listLen]
- for i := 0; i < listLen; i++ {
- resp[C.GoString(pathTmp[i])] = C.GoString(acctTmp[i])
- }
-
- return resp, nil
-}
diff --git a/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.h b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.h
deleted file mode 100644
index a28179db3..000000000
--- a/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#define SECRET_WITH_UNSTABLE 1
-#define SECRET_API_SUBJECT_TO_CHANGE 1
-#include <libsecret/secret.h>
-
-const SecretSchema *docker_get_schema(void) G_GNUC_CONST;
-
-#define DOCKER_SCHEMA docker_get_schema()
-
-GError *add(char *label, char *server, char *username, char *secret);
-GError *delete(char *server);
-GError *get(char *server, char **username, char **secret);
-GError *list(char *label, char *** paths, char *** accts, unsigned int *list_l);
-void freeListData(char *** data, unsigned int length);
diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS
new file mode 100644
index 000000000..c6c8fb40e
--- /dev/null
+++ b/vendor/github.com/docker/docker/AUTHORS
@@ -0,0 +1,2016 @@
+# This file lists all individuals having contributed content to the repository.
+# For how it is generated, see `hack/generate-authors.sh`.
+
+Aanand Prasad <aanand.prasad@gmail.com>
+Aaron Davidson <aaron@databricks.com>
+Aaron Feng <aaron.feng@gmail.com>
+Aaron Huslage <huslage@gmail.com>
+Aaron L. Xu <liker.xu@foxmail.com>
+Aaron Lehmann <aaron.lehmann@docker.com>
+Aaron Welch <welch@packet.net>
+Aaron.L.Xu <likexu@harmonycloud.cn>
+Abel Muiño <amuino@gmail.com>
+Abhijeet Kasurde <akasurde@redhat.com>
+Abhinandan Prativadi <abhi@docker.com>
+Abhinav Ajgaonkar <abhinav316@gmail.com>
+Abhishek Chanda <abhishek.becs@gmail.com>
+Abhishek Sharma <abhishek@asharma.me>
+Abin Shahab <ashahab@altiscale.com>
+Adam Avilla <aavilla@yp.com>
+Adam Eijdenberg <adam.eijdenberg@gmail.com>
+Adam Kunk <adam.kunk@tiaa-cref.org>
+Adam Miller <admiller@redhat.com>
+Adam Mills <adam@armills.info>
+Adam Pointer <adam.pointer@skybettingandgaming.com>
+Adam Singer <financeCoding@gmail.com>
+Adam Walz <adam@adamwalz.net>
+Addam Hardy <addam.hardy@gmail.com>
+Aditi Rajagopal <arajagopal@us.ibm.com>
+Aditya <aditya@netroy.in>
+Adnan Khan <adnkha@amazon.com>
+Adolfo Ochagavía <aochagavia92@gmail.com>
+Adria Casas <adriacasas88@gmail.com>
+Adrian Moisey <adrian@changeover.za.net>
+Adrian Mouat <adrian.mouat@gmail.com>
+Adrian Oprea <adrian@codesi.nz>
+Adrien Folie <folie.adrien@gmail.com>
+Adrien Gallouët <adrien@gallouet.fr>
+Ahmed Kamal <email.ahmedkamal@googlemail.com>
+Ahmet Alp Balkan <ahmetb@microsoft.com>
+Aidan Feldman <aidan.feldman@gmail.com>
+Aidan Hobson Sayers <aidanhs@cantab.net>
+AJ Bowen <aj@soulshake.net>
+Ajey Charantimath <ajey.charantimath@gmail.com>
+ajneu <ajneu@users.noreply.github.com>
+Akash Gupta <akagup@microsoft.com>
+Akihiro Matsushima <amatsusbit@gmail.com>
+Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
+Akim Demaille <akim.demaille@docker.com>
+Akira Koyasu <mail@akirakoyasu.net>
+Akshay Karle <akshay.a.karle@gmail.com>
+Al Tobey <al@ooyala.com>
+alambike <alambike@gmail.com>
+Alan Scherger <flyinprogrammer@gmail.com>
+Alan Thompson <cloojure@gmail.com>
+Albert Callarisa <shark234@gmail.com>
+Albert Zhang <zhgwenming@gmail.com>
+Alejandro González Hevia <alejandrgh11@gmail.com>
+Aleksa Sarai <asarai@suse.de>
+Aleksandrs Fadins <aleks@s-ko.net>
+Alena Prokharchyk <alena@rancher.com>
+Alessandro Boch <aboch@tetrationanalytics.com>
+Alessio Biancalana <dottorblaster@gmail.com>
+Alex Chan <alex@alexwlchan.net>
+Alex Chen <alexchenunix@gmail.com>
+Alex Coventry <alx@empirical.com>
+Alex Crawford <alex.crawford@coreos.com>
+Alex Ellis <alexellis2@gmail.com>
+Alex Gaynor <alex.gaynor@gmail.com>
+Alex Goodman <wagoodman@gmail.com>
+Alex Olshansky <i@creagenics.com>
+Alex Samorukov <samm@os2.kiev.ua>
+Alex Warhawk <ax.warhawk@gmail.com>
+Alexander Artemenko <svetlyak.40wt@gmail.com>
+Alexander Boyd <alex@opengroove.org>
+Alexander Larsson <alexl@redhat.com>
+Alexander Midlash <amidlash@docker.com>
+Alexander Morozov <lk4d4@docker.com>
+Alexander Shopov <ash@kambanaria.org>
+Alexandre Beslic <alexandre.beslic@gmail.com>
+Alexandre Garnier <zigarn@gmail.com>
+Alexandre González <agonzalezro@gmail.com>
+Alexandre Jomin <alexandrejomin@gmail.com>
+Alexandru Sfirlogea <alexandru.sfirlogea@gmail.com>
+Alexey Guskov <lexag@mail.ru>
+Alexey Kotlyarov <alexey@infoxchange.net.au>
+Alexey Shamrin <shamrin@gmail.com>
+Alexis THOMAS <fr.alexisthomas@gmail.com>
+Alfred Landrum <alfred.landrum@docker.com>
+Ali Dehghani <ali.dehghani.g@gmail.com>
+Alicia Lauerman <alicia@eta.im>
+Alihan Demir <alihan_6153@hotmail.com>
+Allen Madsen <blatyo@gmail.com>
+Allen Sun <allensun.shl@alibaba-inc.com>
+almoehi <almoehi@users.noreply.github.com>
+Alvaro Saurin <alvaro.saurin@gmail.com>
+Alvin Deng <alvin.q.deng@utexas.edu>
+Alvin Richards <alvin.richards@docker.com>
+amangoel <amangoel@gmail.com>
+Amen Belayneh <amenbelayneh@gmail.com>
+Amir Goldstein <amir73il@aquasec.com>
+Amit Bakshi <ambakshi@gmail.com>
+Amit Krishnan <amit.krishnan@oracle.com>
+Amit Shukla <amit.shukla@docker.com>
+Amr Gawish <amr.gawish@gmail.com>
+Amy Lindburg <amy.lindburg@docker.com>
+Anand Patil <anand.prabhakar.patil@gmail.com>
+AnandkumarPatel <anandkumarpatel@gmail.com>
+Anatoly Borodin <anatoly.borodin@gmail.com>
+Anchal Agrawal <aagrawa4@illinois.edu>
+Anda Xu <anda.xu@docker.com>
+Anders Janmyr <anders@janmyr.com>
+Andre Dublin <81dublin@gmail.com>
+Andre Granovsky <robotciti@live.com>
+Andrea Luzzardi <aluzzardi@gmail.com>
+Andrea Turli <andrea.turli@gmail.com>
+Andreas Elvers <andreas@work.de>
+Andreas Köhler <andi5.py@gmx.net>
+Andreas Savvides <andreas@editd.com>
+Andreas Tiefenthaler <at@an-ti.eu>
+Andrei Gherzan <andrei@resin.io>
+Andrew C. Bodine <acbodine@us.ibm.com>
+Andrew Clay Shafer <andrewcshafer@gmail.com>
+Andrew Duckworth <grillopress@gmail.com>
+Andrew France <andrew@avito.co.uk>
+Andrew Gerrand <adg@golang.org>
+Andrew Guenther <guenther.andrew.j@gmail.com>
+Andrew He <he.andrew.mail@gmail.com>
+Andrew Hsu <andrewhsu@docker.com>
+Andrew Kuklewicz <kookster@gmail.com>
+Andrew Macgregor <andrew.macgregor@agworld.com.au>
+Andrew Macpherson <hopscotch23@gmail.com>
+Andrew Martin <sublimino@gmail.com>
+Andrew McDonnell <bugs@andrewmcdonnell.net>
+Andrew Munsell <andrew@wizardapps.net>
+Andrew Pennebaker <andrew.pennebaker@gmail.com>
+Andrew Po <absourd.noise@gmail.com>
+Andrew Weiss <andrew.weiss@docker.com>
+Andrew Williams <williams.andrew@gmail.com>
+Andrews Medina <andrewsmedina@gmail.com>
+Andrey Petrov <andrey.petrov@shazow.net>
+Andrey Stolbovsky <andrey.stolbovsky@gmail.com>
+André Martins <aanm90@gmail.com>
+andy <ztao@tibco-support.com>
+Andy Chambers <anchambers@paypal.com>
+andy diller <dillera@gmail.com>
+Andy Goldstein <agoldste@redhat.com>
+Andy Kipp <andy@rstudio.com>
+Andy Rothfusz <github@developersupport.net>
+Andy Smith <github@anarkystic.com>
+Andy Wilson <wilson.andrew.j+github@gmail.com>
+Anes Hasicic <anes.hasicic@gmail.com>
+Anil Belur <askb23@gmail.com>
+Anil Madhavapeddy <anil@recoil.org>
+Ankush Agarwal <ankushagarwal11@gmail.com>
+Anonmily <michelle@michelleliu.io>
+Anran Qiao <anran.qiao@daocloud.io>
+Anshul Pundir <anshul.pundir@docker.com>
+Anthon van der Neut <anthon@mnt.org>
+Anthony Baire <Anthony.Baire@irisa.fr>
+Anthony Bishopric <git@anthonybishopric.com>
+Anthony Dahanne <anthony.dahanne@gmail.com>
+Anthony Sottile <asottile@umich.edu>
+Anton Löfgren <anton.lofgren@gmail.com>
+Anton Nikitin <anton.k.nikitin@gmail.com>
+Anton Polonskiy <anton.polonskiy@gmail.com>
+Anton Tiurin <noxiouz@yandex.ru>
+Antonio Murdaca <antonio.murdaca@gmail.com>
+Antonis Kalipetis <akalipetis@gmail.com>
+Antony Messerli <amesserl@rackspace.com>
+Anuj Bahuguna <anujbahuguna.dev@gmail.com>
+Anusha Ragunathan <anusha.ragunathan@docker.com>
+apocas <petermdias@gmail.com>
+Arash Deshmeh <adeshmeh@ca.ibm.com>
+ArikaChen <eaglesora@gmail.com>
+Arnaud Lefebvre <a.lefebvre@outlook.fr>
+Arnaud Porterie <arnaud.porterie@docker.com>
+Arthur Barr <arthur.barr@uk.ibm.com>
+Arthur Gautier <baloo@gandi.net>
+Artur Meyster <arthurfbi@yahoo.com>
+Arun Gupta <arun.gupta@gmail.com>
+Asad Saeeduddin <masaeedu@gmail.com>
+Asbjørn Enge <asbjorn@hanafjedle.net>
+averagehuman <averagehuman@users.noreply.github.com>
+Avi Das <andas222@gmail.com>
+Avi Miller <avi.miller@oracle.com>
+Avi Vaid <avaid1996@gmail.com>
+ayoshitake <airandfingers@gmail.com>
+Azat Khuyiyakhmetov <shadow_uz@mail.ru>
+Bardia Keyoumarsi <bkeyouma@ucsc.edu>
+Barnaby Gray <barnaby@pickle.me.uk>
+Barry Allard <barry.allard@gmail.com>
+Bartłomiej Piotrowski <b@bpiotrowski.pl>
+Bastiaan Bakker <bbakker@xebia.com>
+bdevloed <boris.de.vloed@gmail.com>
+Ben Bonnefoy <frenchben@docker.com>
+Ben Firshman <ben@firshman.co.uk>
+Ben Golub <ben.golub@dotcloud.com>
+Ben Hall <ben@benhall.me.uk>
+Ben Sargent <ben@brokendigits.com>
+Ben Severson <BenSeverson@users.noreply.github.com>
+Ben Toews <mastahyeti@gmail.com>
+Ben Wiklund <ben@daisyowl.com>
+Benjamin Atkin <ben@benatkin.com>
+Benjamin Baker <Benjamin.baker@utexas.edu>
+Benjamin Boudreau <boudreau.benjamin@gmail.com>
+Benjamin Yolken <yolken@stripe.com>
+Benoit Chesneau <bchesneau@gmail.com>
+Bernerd Schaefer <bj.schaefer@gmail.com>
+Bernhard M. Wiedemann <bwiedemann@suse.de>
+Bert Goethals <bert@bertg.be>
+Bharath Thiruveedula <bharath_ves@hotmail.com>
+Bhiraj Butala <abhiraj.butala@gmail.com>
+Bhumika Bayani <bhumikabayani@gmail.com>
+Bilal Amarni <bilal.amarni@gmail.com>
+Bill Wang <ozbillwang@gmail.com>
+Bin Liu <liubin0329@gmail.com>
+Bingshen Wang <bingshen.wbs@alibaba-inc.com>
+Blake Geno <blakegeno@gmail.com>
+Boaz Shuster <ripcurld.github@gmail.com>
+bobby abbott <ttobbaybbob@gmail.com>
+Boris Pruessmann <boris@pruessmann.org>
+Boshi Lian <farmer1992@gmail.com>
+Bouke Haarsma <bouke@webatoom.nl>
+Boyd Hemphill <boyd@feedmagnet.com>
+boynux <boynux@gmail.com>
+Bradley Cicenas <bradley.cicenas@gmail.com>
+Bradley Wright <brad@intranation.com>
+Brandon Liu <bdon@bdon.org>
+Brandon Philips <brandon.philips@coreos.com>
+Brandon Rhodes <brandon@rhodesmill.org>
+Brendan Dixon <brendand@microsoft.com>
+Brent Salisbury <brent.salisbury@docker.com>
+Brett Higgins <brhiggins@arbor.net>
+Brett Kochendorfer <brett.kochendorfer@gmail.com>
+Brett Randall <javabrett@gmail.com>
+Brian (bex) Exelbierd <bexelbie@redhat.com>
+Brian Bland <brian.bland@docker.com>
+Brian DeHamer <brian@dehamer.com>
+Brian Dorsey <brian@dorseys.org>
+Brian Flad <bflad417@gmail.com>
+Brian Goff <cpuguy83@gmail.com>
+Brian McCallister <brianm@skife.org>
+Brian Olsen <brian@maven-group.org>
+Brian Schwind <brianmschwind@gmail.com>
+Brian Shumate <brian@couchbase.com>
+Brian Torres-Gil <brian@dralth.com>
+Brian Trump <btrump@yelp.com>
+Brice Jaglin <bjaglin@teads.tv>
+Briehan Lombaard <briehan.lombaard@gmail.com>
+Brielle Broder <bbroder@google.com>
+Bruno Bigras <bigras.bruno@gmail.com>
+Bruno Binet <bruno.binet@gmail.com>
+Bruno Gazzera <bgazzera@paginar.com>
+Bruno Renié <brutasse@gmail.com>
+Bruno Tavares <btavare@thoughtworks.com>
+Bryan Bess <squarejaw@bsbess.com>
+Bryan Boreham <bjboreham@gmail.com>
+Bryan Matsuo <bryan.matsuo@gmail.com>
+Bryan Murphy <bmurphy1976@gmail.com>
+Burke Libbey <burke@libbey.me>
+Byung Kang <byung.kang.ctr@amrdec.army.mil>
+Caleb Spare <cespare@gmail.com>
+Calen Pennington <cale@edx.org>
+Cameron Boehmer <cameron.boehmer@gmail.com>
+Cameron Spear <cameronspear@gmail.com>
+Campbell Allen <campbell.allen@gmail.com>
+Candid Dauth <cdauth@cdauth.eu>
+Cao Weiwei <cao.weiwei30@zte.com.cn>
+Carl Henrik Lunde <chlunde@ping.uio.no>
+Carl Loa Odin <carlodin@gmail.com>
+Carl X. Su <bcbcarl@gmail.com>
+Carlo Mion <mion00@gmail.com>
+Carlos Alexandro Becker <caarlos0@gmail.com>
+Carlos Sanchez <carlos@apache.org>
+Carol Fager-Higgins <carol.fager-higgins@docker.com>
+Cary <caryhartline@users.noreply.github.com>
+Casey Bisson <casey.bisson@joyent.com>
+Catalin Pirvu <pirvu.catalin94@gmail.com>
+Ce Gao <ce.gao@outlook.com>
+Cedric Davies <cedricda@microsoft.com>
+Cezar Sa Espinola <cezarsa@gmail.com>
+Chad Swenson <chadswen@gmail.com>
+Chance Zibolski <chance.zibolski@gmail.com>
+Chander Govindarajan <chandergovind@gmail.com>
+Chanhun Jeong <keyolk@gmail.com>
+Chao Wang <wangchao.fnst@cn.fujitsu.com>
+Charles Chan <charleswhchan@users.noreply.github.com>
+Charles Hooper <charles.hooper@dotcloud.com>
+Charles Law <claw@conduce.com>
+Charles Lindsay <chaz@chazomatic.us>
+Charles Merriam <charles.merriam@gmail.com>
+Charles Sarrazin <charles@sarraz.in>
+Charles Smith <charles.smith@docker.com>
+Charlie Drage <charlie@charliedrage.com>
+Charlie Lewis <charliel@lab41.org>
+Chase Bolt <chase.bolt@gmail.com>
+ChaYoung You <yousbe@gmail.com>
+Chen Chao <cc272309126@gmail.com>
+Chen Chuanliang <chen.chuanliang@zte.com.cn>
+Chen Hanxiao <chenhanxiao@cn.fujitsu.com>
+Chen Min <chenmin46@huawei.com>
+Chen Mingjie <chenmingjie0828@163.com>
+Chen Qiu <cheney-90@hotmail.com>
+Cheng-mean Liu <soccerl@microsoft.com>
+Chengguang Xu <cgxu519@gmx.com>
+chenyuzhu <chenyuzhi@oschina.cn>
+Chetan Birajdar <birajdar.chetan@gmail.com>
+Chewey <prosto-chewey@users.noreply.github.com>
+Chia-liang Kao <clkao@clkao.org>
+chli <chli@freewheel.tv>
+Cholerae Hu <choleraehyq@gmail.com>
+Chris Alfonso <calfonso@redhat.com>
+Chris Armstrong <chris@opdemand.com>
+Chris Dias <cdias@microsoft.com>
+Chris Dituri <csdituri@gmail.com>
+Chris Fordham <chris@fordham-nagy.id.au>
+Chris Gavin <chris@chrisgavin.me>
+Chris Gibson <chris@chrisg.io>
+Chris Khoo <chris.khoo@gmail.com>
+Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
+Chris McKinnel <chrismckinnel@gmail.com>
+Chris Seto <chriskseto@gmail.com>
+Chris Snow <chsnow123@gmail.com>
+Chris St. Pierre <chris.a.st.pierre@gmail.com>
+Chris Stivers <chris@stivers.us>
+Chris Swan <chris.swan@iee.org>
+Chris Telfer <ctelfer@docker.com>
+Chris Wahl <github@wahlnetwork.com>
+Chris Weyl <cweyl@alumni.drew.edu>
+Chris White <me@cwprogram.com>
+Christian Berendt <berendt@b1-systems.de>
+Christian Brauner <christian.brauner@ubuntu.com>
+Christian Böhme <developement@boehme3d.de>
+Christian Muehlhaeuser <muesli@gmail.com>
+Christian Persson <saser@live.se>
+Christian Rotzoll <ch.rotzoll@gmail.com>
+Christian Simon <simon@swine.de>
+Christian Stefanescu <st.chris@gmail.com>
+Christophe Mehay <cmehay@online.net>
+Christophe Troestler <christophe.Troestler@umons.ac.be>
+Christophe Vidal <kriss@krizalys.com>
+Christopher Biscardi <biscarch@sketcht.com>
+Christopher Crone <christopher.crone@docker.com>
+Christopher Currie <codemonkey+github@gmail.com>
+Christopher Jones <tophj@linux.vnet.ibm.com>
+Christopher Latham <sudosurootdev@gmail.com>
+Christopher Rigor <crigor@gmail.com>
+Christy Perez <christy@linux.vnet.ibm.com>
+Chun Chen <ramichen@tencent.com>
+Ciro S. Costa <ciro.costa@usp.br>
+Clayton Coleman <ccoleman@redhat.com>
+Clinton Kitson <clintonskitson@gmail.com>
+Cody Roseborough <crrosebo@amazon.com>
+Coenraad Loubser <coenraad@wish.org.za>
+Colin Dunklau <colin.dunklau@gmail.com>
+Colin Hebert <hebert.colin@gmail.com>
+Colin Rice <colin@daedrum.net>
+Colin Walters <walters@verbum.org>
+Collin Guarino <collin.guarino@gmail.com>
+Colm Hally <colmhally@gmail.com>
+companycy <companycy@gmail.com>
+Corbin Coleman <corbin.coleman@docker.com>
+Corey Farrell <git@cfware.com>
+Cory Forsyth <cory.forsyth@gmail.com>
+cressie176 <github@stephen-cresswell.net>
+CrimsonGlory <CrimsonGlory@users.noreply.github.com>
+Cristian Staretu <cristian.staretu@gmail.com>
+cristiano balducci <cristiano.balducci@gmail.com>
+Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
+CUI Wei <ghostplant@qq.com>
+Cyprian Gracz <cyprian.gracz@micro-jumbo.eu>
+Cyril F <cyrilf7x@gmail.com>
+Daan van Berkel <daan.v.berkel.1980@gmail.com>
+Daehyeok Mun <daehyeok@gmail.com>
+Dafydd Crosby <dtcrsby@gmail.com>
+dalanlan <dalanlan925@gmail.com>
+Damian Smyth <damian@dsau.co>
+Damien Nadé <github@livna.org>
+Damien Nozay <damien.nozay@gmail.com>
+Damjan Georgievski <gdamjan@gmail.com>
+Dan Anolik <dan@anolik.net>
+Dan Buch <d.buch@modcloth.com>
+Dan Cotora <dan@bluevision.ro>
+Dan Feldman <danf@jfrog.com>
+Dan Griffin <dgriffin@peer1.com>
+Dan Hirsch <thequux@upstandinghackers.com>
+Dan Keder <dan.keder@gmail.com>
+Dan Levy <dan@danlevy.net>
+Dan McPherson <dmcphers@redhat.com>
+Dan Stine <sw@stinemail.com>
+Dan Williams <me@deedubs.com>
+Dani Louca <dani.louca@docker.com>
+Daniel Antlinger <d.antlinger@gmx.at>
+Daniel Dao <dqminh@cloudflare.com>
+Daniel Exner <dex@dragonslave.de>
+Daniel Farrell <dfarrell@redhat.com>
+Daniel Garcia <daniel@danielgarcia.info>
+Daniel Gasienica <daniel@gasienica.ch>
+Daniel Grunwell <mwgrunny@gmail.com>
+Daniel Hiltgen <daniel.hiltgen@docker.com>
+Daniel J Walsh <dwalsh@redhat.com>
+Daniel Menet <membership@sontags.ch>
+Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com>
+Daniel Nephin <dnephin@docker.com>
+Daniel Norberg <dano@spotify.com>
+Daniel Nordberg <dnordberg@gmail.com>
+Daniel Robinson <gottagetmac@gmail.com>
+Daniel S <dan.streby@gmail.com>
+Daniel Von Fange <daniel@leancoder.com>
+Daniel Watkins <daniel@daniel-watkins.co.uk>
+Daniel X Moore <yahivin@gmail.com>
+Daniel YC Lin <dlin.tw@gmail.com>
+Daniel Zhang <jmzwcn@gmail.com>
+Danny Berger <dpb587@gmail.com>
+Danny Yates <danny@codeaholics.org>
+Danyal Khaliq <danyal.khaliq@tenpearls.com>
+Darren Coxall <darren@darrencoxall.com>
+Darren Shepherd <darren.s.shepherd@gmail.com>
+Darren Stahl <darst@microsoft.com>
+Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com>
+Davanum Srinivas <davanum@gmail.com>
+Dave Barboza <dbarboza@datto.com>
+Dave Goodchild <buddhamagnet@gmail.com>
+Dave Henderson <dhenderson@gmail.com>
+Dave MacDonald <mindlapse@gmail.com>
+Dave Tucker <dt@docker.com>
+David Anderson <dave@natulte.net>
+David Calavera <david.calavera@gmail.com>
+David Chung <david.chung@docker.com>
+David Corking <dmc-source@dcorking.com>
+David Cramer <davcrame@cisco.com>
+David Currie <david_currie@uk.ibm.com>
+David Davis <daviddavis@redhat.com>
+David Dooling <dooling@gmail.com>
+David Gageot <david@gageot.net>
+David Gebler <davidgebler@gmail.com>
+David Glasser <glasser@davidglasser.net>
+David Lawrence <david.lawrence@docker.com>
+David Lechner <david@lechnology.com>
+David M. Karr <davidmichaelkarr@gmail.com>
+David Mackey <tdmackey@booleanhaiku.com>
+David Mat <david@davidmat.com>
+David Mcanulty <github@hellspark.com>
+David McKay <david@rawkode.com>
+David Pelaez <pelaez89@gmail.com>
+David R. Jenni <david.r.jenni@gmail.com>
+David Röthlisberger <david@rothlis.net>
+David Sheets <dsheets@docker.com>
+David Sissitka <me@dsissitka.com>
+David Trott <github@davidtrott.com>
+David Wang <00107082@163.com>
+David Williamson <david.williamson@docker.com>
+David Xia <dxia@spotify.com>
+David Young <yangboh@cn.ibm.com>
+Davide Ceretti <davide.ceretti@hogarthww.com>
+Dawn Chen <dawnchen@google.com>
+dbdd <wangtong2712@gmail.com>
+dcylabs <dcylabs@gmail.com>
+Debayan De <debayande@users.noreply.github.com>
+Deborah Gertrude Digges <deborah.gertrude.digges@gmail.com>
+deed02392 <georgehafiz@gmail.com>
+Deng Guangxing <dengguangxing@huawei.com>
+Deni Bertovic <deni@kset.org>
+Denis Defreyne <denis@soundcloud.com>
+Denis Gladkikh <denis@gladkikh.email>
+Denis Ollier <larchunix@users.noreply.github.com>
+Dennis Chen <barracks510@gmail.com>
+Dennis Chen <dennis.chen@arm.com>
+Dennis Docter <dennis@d23.nl>
+Derek <crq@kernel.org>
+Derek <crquan@gmail.com>
+Derek Ch <denc716@gmail.com>
+Derek McGowan <derek@mcgstyle.net>
+Deric Crago <deric.crago@gmail.com>
+Deshi Xiao <dxiao@redhat.com>
+devmeyster <arthurfbi@yahoo.com>
+Devvyn Murphy <devvyn@devvyn.com>
+Dharmit Shah <shahdharmit@gmail.com>
+Dhawal Yogesh Bhanushali <dbhanushali@vmware.com>
+Diego Romero <idiegoromero@gmail.com>
+Diego Siqueira <dieg0@live.com>
+Dieter Reuter <dieter.reuter@me.com>
+Dillon Dixon <dillondixon@gmail.com>
+Dima Stopel <dima@twistlock.com>
+Dimitri John Ledkov <dimitri.j.ledkov@intel.com>
+Dimitris Rozakis <dimrozakis@gmail.com>
+Dimitry Andric <d.andric@activevideo.com>
+Dinesh Subhraveti <dineshs@altiscale.com>
+Ding Fei <dingfei@stars.org.cn>
+Diogo Monica <diogo@docker.com>
+DiuDiugirl <sophia.wang@pku.edu.cn>
+Djibril Koné <kone.djibril@gmail.com>
+dkumor <daniel@dkumor.com>
+Dmitri Logvinenko <dmitri.logvinenko@gmail.com>
+Dmitri Shuralyov <shurcooL@gmail.com>
+Dmitry Demeshchuk <demeshchuk@gmail.com>
+Dmitry Gusev <dmitry.gusev@gmail.com>
+Dmitry Kononenko <d@dm42.ru>
+Dmitry Shyshkin <dmitry@shyshkin.org.ua>
+Dmitry Smirnov <onlyjob@member.fsf.org>
+Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
+Dmitry Vorobev <dimahabr@gmail.com>
+Dolph Mathews <dolph.mathews@gmail.com>
+Dominik Dingel <dingel@linux.vnet.ibm.com>
+Dominik Finkbeiner <finkes93@gmail.com>
+Dominik Honnef <dominik@honnef.co>
+Don Kirkby <donkirkby@users.noreply.github.com>
+Don Kjer <don.kjer@gmail.com>
+Don Spaulding <donspauldingii@gmail.com>
+Donald Huang <don.hcd@gmail.com>
+Dong Chen <dongluo.chen@docker.com>
+Donghwa Kim <shanytt@gmail.com>
+Donovan Jones <git@gamma.net.nz>
+Doron Podoleanu <doronp@il.ibm.com>
+Doug Davis <dug@us.ibm.com>
+Doug MacEachern <dougm@vmware.com>
+Doug Tangren <d.tangren@gmail.com>
+Douglas Curtis <dougcurtis1@gmail.com>
+Dr Nic Williams <drnicwilliams@gmail.com>
+dragon788 <dragon788@users.noreply.github.com>
+Dražen Lučanin <kermit666@gmail.com>
+Drew Erny <drew.erny@docker.com>
+Drew Hubl <drew.hubl@gmail.com>
+Dustin Sallings <dustin@spy.net>
+Ed Costello <epc@epcostello.com>
+Edmund Wagner <edmund-wagner@web.de>
+Eiichi Tsukata <devel@etsukata.com>
+Eike Herzbach <eike@herzbach.net>
+Eivin Giske Skaaren <eivinsn@axis.com>
+Eivind Uggedal <eivind@uggedal.com>
+Elan Ruusamäe <glen@pld-linux.org>
+Elango Sivanandam <elango.siva@docker.com>
+Elena Morozova <lelenanam@gmail.com>
+Eli Uriegas <eli.uriegas@docker.com>
+Elias Faxö <elias.faxo@tre.se>
+Elias Probst <mail@eliasprobst.eu>
+Elijah Zupancic <elijah@zupancic.name>
+eluck <mail@eluck.me>
+Elvir Kuric <elvirkuric@gmail.com>
+Emil Davtyan <emil2k@gmail.com>
+Emil Hernvall <emil@quench.at>
+Emily Maier <emily@emilymaier.net>
+Emily Rose <emily@contactvibe.com>
+Emir Ozer <emirozer@yandex.com>
+Enguerran <engcolson@gmail.com>
+Eohyung Lee <liquidnuker@gmail.com>
+epeterso <epeterson@breakpoint-labs.com>
+Eric Barch <barch@tomesoftware.com>
+Eric Curtin <ericcurtin17@gmail.com>
+Eric G. Noriega <enoriega@vizuri.com>
+Eric Hanchrow <ehanchrow@ine.com>
+Eric Lee <thenorthsecedes@gmail.com>
+Eric Myhre <hash@exultant.us>
+Eric Paris <eparis@redhat.com>
+Eric Rafaloff <erafaloff@gmail.com>
+Eric Rosenberg <ehaydenr@gmail.com>
+Eric Sage <eric.david.sage@gmail.com>
+Eric Soderstrom <ericsoderstrom@gmail.com>
+Eric Yang <windfarer@gmail.com>
+Eric-Olivier Lamey <eo@lamey.me>
+Erica Windisch <erica@windisch.us>
+Erik Bray <erik.m.bray@gmail.com>
+Erik Dubbelboer <erik@dubbelboer.com>
+Erik Hollensbe <github@hollensbe.org>
+Erik Inge Bolsø <knan@redpill-linpro.com>
+Erik Kristensen <erik@erikkristensen.com>
+Erik St. Martin <alakriti@gmail.com>
+Erik Weathers <erikdw@gmail.com>
+Erno Hopearuoho <erno.hopearuoho@gmail.com>
+Erwin van der Koogh <info@erronis.nl>
+Ethan Bell <ebgamer29@gmail.com>
+Euan Kemp <euan.kemp@coreos.com>
+Eugen Krizo <eugen.krizo@gmail.com>
+Eugene Yakubovich <eugene.yakubovich@coreos.com>
+Evan Allrich <evan@unguku.com>
+Evan Carmi <carmi@users.noreply.github.com>
+Evan Hazlett <ejhazlett@gmail.com>
+Evan Krall <krall@yelp.com>
+Evan Phoenix <evan@fallingsnow.net>
+Evan Wies <evan@neomantra.net>
+Evelyn Xu <evelynhsu21@gmail.com>
+Everett Toews <everett.toews@rackspace.com>
+Evgeny Shmarnev <shmarnev@gmail.com>
+Evgeny Vereshchagin <evvers@ya.ru>
+Ewa Czechowska <ewa@ai-traders.com>
+Eystein Måløy Stenberg <eystein.maloy.stenberg@cfengine.com>
+ezbercih <cem.ezberci@gmail.com>
+Ezra Silvera <ezra@il.ibm.com>
+Fabian Lauer <kontakt@softwareschmiede-saar.de>
+Fabian Raetz <fabian.raetz@gmail.com>
+Fabiano Rosas <farosas@br.ibm.com>
+Fabio Falci <fabiofalci@gmail.com>
+Fabio Kung <fabio.kung@gmail.com>
+Fabio Rapposelli <fabio@vmware.com>
+Fabio Rehm <fgrehm@gmail.com>
+Fabrizio Regini <freegenie@gmail.com>
+Fabrizio Soppelsa <fsoppelsa@mirantis.com>
+Faiz Khan <faizkhan00@gmail.com>
+falmp <chico.lopes@gmail.com>
+Fangming Fang <fangming.fang@arm.com>
+Fangyuan Gao <21551127@zju.edu.cn>
+fanjiyun <fan.jiyun@zte.com.cn>
+Fareed Dudhia <fareeddudhia@googlemail.com>
+Fathi Boudra <fathi.boudra@linaro.org>
+Federico Gimenez <fgimenez@coit.es>
+Felipe Oliveira <felipeweb.programador@gmail.com>
+Felix Abecassis <fabecassis@nvidia.com>
+Felix Geisendörfer <felix@debuggable.com>
+Felix Hupfeld <felix@quobyte.com>
+Felix Rabe <felix@rabe.io>
+Felix Ruess <felix.ruess@gmail.com>
+Felix Schindler <fschindler@weluse.de>
+Feng Yan <fy2462@gmail.com>
+Fengtu Wang <wangfengtu@huawei.com>
+Ferenc Szabo <pragmaticfrank@gmail.com>
+Fernando <fermayo@gmail.com>
+Fero Volar <alian@alian.info>
+Ferran Rodenas <frodenas@gmail.com>
+Filipe Brandenburger <filbranden@google.com>
+Filipe Oliveira <contato@fmoliveira.com.br>
+Flavio Castelli <fcastelli@suse.com>
+Flavio Crisciani <flavio.crisciani@docker.com>
+Florian <FWirtz@users.noreply.github.com>
+Florian Klein <florian.klein@free.fr>
+Florian Maier <marsmensch@users.noreply.github.com>
+Florian Noeding <noeding@adobe.com>
+Florian Weingarten <flo@hackvalue.de>
+Florin Asavoaie <florin.asavoaie@gmail.com>
+Florin Patan <florinpatan@gmail.com>
+fonglh <fonglh@gmail.com>
+Foysal Iqbal <foysal.iqbal.fb@gmail.com>
+Francesc Campoy <campoy@google.com>
+Francesco Mari <mari.francesco@gmail.com>
+Francis Chuang <francis.chuang@boostport.com>
+Francisco Carriedo <fcarriedo@gmail.com>
+Francisco Souza <f@souza.cc>
+Frank Groeneveld <frank@ivaldi.nl>
+Frank Herrmann <fgh@4gh.tv>
+Frank Macreery <frank@macreery.com>
+Frank Rosquin <frank.rosquin+github@gmail.com>
+Fred Lifton <fred.lifton@docker.com>
+Frederick F. Kautz IV <fkautz@redhat.com>
+Frederik Loeffert <frederik@zitrusmedia.de>
+Frederik Nordahl Jul Sabroe <frederikns@gmail.com>
+Freek Kalter <freek@kalteronline.org>
+Frieder Bluemle <frieder.bluemle@gmail.com>
+Félix Baylac-Jacqué <baylac.felix@gmail.com>
+Félix Cantournet <felix.cantournet@cloudwatt.com>
+Gabe Rosenhouse <gabe@missionst.com>
+Gabor Nagy <mail@aigeruth.hu>
+Gabriel Linder <linder.gabriel@gmail.com>
+Gabriel Monroy <gabriel@opdemand.com>
+Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
+Gaetan de Villele <gdevillele@gmail.com>
+Galen Sampson <galen.sampson@gmail.com>
+Gang Qiao <qiaohai8866@gmail.com>
+Gareth Rushgrove <gareth@morethanseven.net>
+Garrett Barboza <garrett@garrettbarboza.com>
+Gary Schaetz <gary@schaetzkc.com>
+Gaurav <gaurav.gosec@gmail.com>
+gautam, prasanna <prasannagautam@gmail.com>
+Gaël PORTAY <gael.portay@savoirfairelinux.com>
+Genki Takiuchi <genki@s21g.com>
+GennadySpb <lipenkov@gmail.com>
+Geoffrey Bachelet <grosfrais@gmail.com>
+Geon Kim <geon0250@gmail.com>
+George Kontridze <george@bugsnag.com>
+George MacRorie <gmacr31@gmail.com>
+George Xie <georgexsh@gmail.com>
+Georgi Hristozov <georgi@forkbomb.nl>
+Gereon Frey <gereon.frey@dynport.de>
+German DZ <germ@ndz.com.ar>
+Gert van Valkenhoef <g.h.m.van.valkenhoef@rug.nl>
+Gerwim Feiken <g.feiken@tfe.nl>
+Ghislain Bourgeois <ghislain.bourgeois@gmail.com>
+Giampaolo Mancini <giampaolo@trampolineup.com>
+Gianluca Borello <g.borello@gmail.com>
+Gildas Cuisinier <gildas.cuisinier@gcuisinier.net>
+gissehel <public-devgit-dantus@gissehel.org>
+Giuseppe Mazzotta <gdm85@users.noreply.github.com>
+Gleb Fotengauer-Malinovskiy <glebfm@altlinux.org>
+Gleb M Borisov <borisov.gleb@gmail.com>
+Glyn Normington <gnormington@gopivotal.com>
+GoBella <caili_welcome@163.com>
+Goffert van Gool <goffert@phusion.nl>
+Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com>
+Gosuke Miyashita <gosukenator@gmail.com>
+Gou Rao <gou@portworx.com>
+Govinda Fichtner <govinda.fichtner@googlemail.com>
+Grant Millar <grant@cylo.io>
+Grant Reaber <grant.reaber@gmail.com>
+Graydon Hoare <graydon@pobox.com>
+Greg Fausak <greg@tacodata.com>
+Greg Pflaum <gpflaum@users.noreply.github.com>
+Greg Stephens <greg@udon.org>
+Greg Thornton <xdissent@me.com>
+Grzegorz Jaśkiewicz <gj.jaskiewicz@gmail.com>
+Guilhem Lettron <guilhem+github@lettron.fr>
+Guilherme Salgado <gsalgado@gmail.com>
+Guillaume Dufour <gdufour.prestataire@voyages-sncf.com>
+Guillaume J. Charmes <guillaume.charmes@docker.com>
+guoxiuyan <guoxiuyan@huawei.com>
+Guri <odg0318@gmail.com>
+Gurjeet Singh <gurjeet@singh.im>
+Guruprasad <lgp171188@gmail.com>
+Gustav Sinder <gustav.sinder@gmail.com>
+gwx296173 <gaojing3@huawei.com>
+Günter Zöchbauer <guenter@gzoechbauer.com>
+haikuoliu <haikuo@amazon.com>
+Hakan Özler <hakan.ozler@kodcu.com>
+Hans Kristian Flaatten <hans@starefossen.com>
+Hans Rødtang <hansrodtang@gmail.com>
+Hao Shu Wei <haosw@cn.ibm.com>
+Hao Zhang <21521210@zju.edu.cn>
+Harald Albers <github@albersweb.de>
+Harley Laue <losinggeneration@gmail.com>
+Harold Cooper <hrldcpr@gmail.com>
+Harry Zhang <harryz@hyper.sh>
+Harshal Patil <harshal.patil@in.ibm.com>
+Harshal Patil <harshalp@linux.vnet.ibm.com>
+He Simei <hesimei@zju.edu.cn>
+He Xiaoxi <tossmilestone@gmail.com>
+He Xin <he_xinworld@126.com>
+heartlock <21521209@zju.edu.cn>
+Hector Castro <hectcastro@gmail.com>
+Helen Xie <chenjg@harmonycloud.cn>
+Henning Sprang <henning.sprang@gmail.com>
+Hiroshi Hatake <hatake@clear-code.com>
+Hobofan <goisser94@gmail.com>
+Hollie Teal <hollie@docker.com>
+Hong Xu <hong@topbug.net>
+Hongbin Lu <hongbin034@gmail.com>
+hsinko <21551195@zju.edu.cn>
+Hu Keping <hukeping@huawei.com>
+Hu Tao <hutao@cn.fujitsu.com>
+Huanzhong Zhang <zhanghuanzhong90@gmail.com>
+Huayi Zhang <irachex@gmail.com>
+Hugo Duncan <hugo@hugoduncan.org>
+Hugo Marisco <0x6875676f@gmail.com>
+Hunter Blanks <hunter@twilio.com>
+huqun <huqun@zju.edu.cn>
+Huu Nguyen <huu@prismskylabs.com>
+hyeongkyu.lee <hyeongkyu.lee@navercorp.com>
+Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
+Iago López Galeiras <iago@kinvolk.io>
+Ian Babrou <ibobrik@gmail.com>
+Ian Bishop <ianbishop@pace7.com>
+Ian Bull <irbull@gmail.com>
+Ian Calvert <ianjcalvert@gmail.com>
+Ian Campbell <ian.campbell@docker.com>
+Ian Chen <ianre657@gmail.com>
+Ian Lee <IanLee1521@gmail.com>
+Ian Main <imain@redhat.com>
+Ian Philpot <ian.philpot@microsoft.com>
+Ian Truslove <ian.truslove@gmail.com>
+Iavael <iavaelooeyt@gmail.com>
+Icaro Seara <icaro.seara@gmail.com>
+Ignacio Capurro <icapurrofagian@gmail.com>
+Igor Dolzhikov <bluesriverz@gmail.com>
+Igor Karpovich <i.karpovich@currencysolutions.com>
+Iliana Weller <iweller@amazon.com>
+Ilkka Laukkanen <ilkka@ilkka.io>
+Ilya Dmitrichenko <errordeveloper@gmail.com>
+Ilya Gusev <mail@igusev.ru>
+Ilya Khlopotov <ilya.khlopotov@gmail.com>
+imre Fitos <imre.fitos+github@gmail.com>
+inglesp <peter.inglesby@gmail.com>
+Ingo Gottwald <in.gottwald@gmail.com>
+Isaac Dupree <antispam@idupree.com>
+Isabel Jimenez <contact.isabeljimenez@gmail.com>
+Isao Jonas <isao.jonas@gmail.com>
+Iskander Sharipov <quasilyte@gmail.com>
+Ivan Babrou <ibobrik@gmail.com>
+Ivan Fraixedes <ifcdev@gmail.com>
+Ivan Grcic <igrcic@gmail.com>
+Ivan Markin <sw@nogoegst.net>
+J Bruni <joaohbruni@yahoo.com.br>
+J. Nunn <jbnunn@gmail.com>
+Jack Danger Canty <jackdanger@squareup.com>
+Jack Laxson <jackjrabbit@gmail.com>
+Jacob Atzen <jacob@jacobatzen.dk>
+Jacob Edelman <edelman.jd@gmail.com>
+Jacob Tomlinson <jacob@tom.linson.uk>
+Jacob Vallejo <jakeev@amazon.com>
+Jacob Wen <jian.w.wen@oracle.com>
+Jaivish Kothari <janonymous.codevulture@gmail.com>
+Jake Champlin <jake.champlin.27@gmail.com>
+Jake Moshenko <jake@devtable.com>
+Jake Sanders <jsand@google.com>
+jakedt <jake@devtable.com>
+James Allen <jamesallen0108@gmail.com>
+James Carey <jecarey@us.ibm.com>
+James Carr <james.r.carr@gmail.com>
+James DeFelice <james.defelice@ishisystems.com>
+James Harrison Fisher <jameshfisher@gmail.com>
+James Kyburz <james.kyburz@gmail.com>
+James Kyle <james@jameskyle.org>
+James Lal <james@lightsofapollo.com>
+James Mills <prologic@shortcircuit.net.au>
+James Nesbitt <james.nesbitt@wunderkraut.com>
+James Nugent <james@jen20.com>
+James Turnbull <james@lovedthanlost.net>
+Jamie Hannaford <jamie@limetree.org>
+Jamshid Afshar <jafshar@yahoo.com>
+Jan Keromnes <janx@linux.com>
+Jan Koprowski <jan.koprowski@gmail.com>
+Jan Pazdziora <jpazdziora@redhat.com>
+Jan Toebes <jan@toebes.info>
+Jan-Gerd Tenberge <janten@gmail.com>
+Jan-Jaap Driessen <janjaapdriessen@gmail.com>
+Jana Radhakrishnan <mrjana@docker.com>
+Jannick Fahlbusch <git@jf-projects.de>
+Januar Wayong <januar@gmail.com>
+Jared Biel <jared.biel@bolderthinking.com>
+Jared Hocutt <jaredh@netapp.com>
+Jaroslaw Zabiello <hipertracker@gmail.com>
+jaseg <jaseg@jaseg.net>
+Jasmine Hegman <jasmine@jhegman.com>
+Jason Divock <jdivock@gmail.com>
+Jason Giedymin <jasong@apache.org>
+Jason Green <Jason.Green@AverInformatics.Com>
+Jason Hall <imjasonh@gmail.com>
+Jason Heiss <jheiss@aput.net>
+Jason Livesay <ithkuil@gmail.com>
+Jason McVetta <jason.mcvetta@gmail.com>
+Jason Plum <jplum@devonit.com>
+Jason Shepherd <jason@jasonshepherd.net>
+Jason Smith <jasonrichardsmith@gmail.com>
+Jason Sommer <jsdirv@gmail.com>
+Jason Stangroome <jason@codeassassin.com>
+jaxgeller <jacksongeller@gmail.com>
+Jay <imjching@hotmail.com>
+Jay <teguhwpurwanto@gmail.com>
+Jay Kamat <github@jgkamat.33mail.com>
+Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
+Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
+Jean-Christophe Berthon <huygens@berthon.eu>
+Jean-Paul Calderone <exarkun@twistedmatrix.com>
+Jean-Pierre Huynh <jean-pierre.huynh@ounet.fr>
+Jean-Tiare Le Bigot <jt@yadutaf.fr>
+Jeeva S. Chelladhurai <sjeeva@gmail.com>
+Jeff Anderson <jeff@docker.com>
+Jeff Hajewski <jeff.hajewski@gmail.com>
+Jeff Johnston <jeff.johnston.mn@gmail.com>
+Jeff Lindsay <progrium@gmail.com>
+Jeff Mickey <j@codemac.net>
+Jeff Minard <jeff@creditkarma.com>
+Jeff Nickoloff <jeff.nickoloff@gmail.com>
+Jeff Silberman <jsilberm@gmail.com>
+Jeff Welch <whatthejeff@gmail.com>
+Jeffrey Bolle <jeffreybolle@gmail.com>
+Jeffrey Morgan <jmorganca@gmail.com>
+Jeffrey van Gogh <jvg@google.com>
+Jenny Gebske <jennifer@gebske.de>
+Jeremy Chambers <jeremy@thehipbot.com>
+Jeremy Grosser <jeremy@synack.me>
+Jeremy Price <jprice.rhit@gmail.com>
+Jeremy Qian <vanpire110@163.com>
+Jeremy Unruh <jeremybunruh@gmail.com>
+Jeremy Yallop <yallop@docker.com>
+Jeroen Franse <jeroenfranse@gmail.com>
+Jeroen Jacobs <github@jeroenj.be>
+Jesse Dearing <jesse.dearing@gmail.com>
+Jesse Dubay <jesse@thefortytwo.net>
+Jessica Frazelle <acidburn@microsoft.com>
+Jezeniel Zapanta <jpzapanta22@gmail.com>
+Jhon Honce <jhonce@redhat.com>
+Ji.Zhilong <zhilongji@gmail.com>
+Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
+Jie Luo <luo612@zju.edu.cn>
+Jihyun Hwang <jhhwang@telcoware.com>
+Jilles Oldenbeuving <ojilles@gmail.com>
+Jim Alateras <jima@comware.com.au>
+Jim Galasyn <jim.galasyn@docker.com>
+Jim Minter <jminter@redhat.com>
+Jim Perrin <jperrin@centos.org>
+Jimmy Cuadra <jimmy@jimmycuadra.com>
+Jimmy Puckett <jimmy.puckett@spinen.com>
+Jimmy Song <rootsongjc@gmail.com>
+jimmyxian <jimmyxian2004@yahoo.com.cn>
+Jinsoo Park <cellpjs@gmail.com>
+Jiri Popelka <jpopelka@redhat.com>
+Jiuyue Ma <majiuyue@huawei.com>
+Jiří Župka <jzupka@redhat.com>
+jjy <jiangjinyang@outlook.com>
+jmzwcn <jmzwcn@gmail.com>
+Joao Fernandes <joao.fernandes@docker.com>
+Joe Beda <joe.github@bedafamily.com>
+Joe Doliner <jdoliner@pachyderm.io>
+Joe Ferguson <joe@infosiftr.com>
+Joe Gordon <joe.gordon0@gmail.com>
+Joe Shaw <joe@joeshaw.org>
+Joe Van Dyk <joe@tanga.com>
+Joel Friedly <joelfriedly@gmail.com>
+Joel Handwell <joelhandwell@gmail.com>
+Joel Hansson <joel.hansson@ecraft.com>
+Joel Wurtz <jwurtz@jolicode.com>
+Joey Geiger <jgeiger@gmail.com>
+Joey Geiger <jgeiger@users.noreply.github.com>
+Joey Gibson <joey@joeygibson.com>
+Joffrey F <joffrey@docker.com>
+Johan Euphrosine <proppy@google.com>
+Johan Rydberg <johan.rydberg@gmail.com>
+Johanan Lieberman <johanan.lieberman@gmail.com>
+Johannes 'fish' Ziemke <github@freigeist.org>
+John Costa <john.costa@gmail.com>
+John Feminella <jxf@jxf.me>
+John Gardiner Myers <jgmyers@proofpoint.com>
+John Gossman <johngos@microsoft.com>
+John Harris <john@johnharris.io>
+John Howard (VM) <John.Howard@microsoft.com>
+John Laswell <john.n.laswell@gmail.com>
+John Maguire <jmaguire@duosecurity.com>
+John Mulhausen <john@docker.com>
+John OBrien III <jobrieniii@yahoo.com>
+John Starks <jostarks@microsoft.com>
+John Stephens <johnstep@docker.com>
+John Tims <john.k.tims@gmail.com>
+John V. Martinez <jvmatl@gmail.com>
+John Warwick <jwarwick@gmail.com>
+John Willis <john.willis@docker.com>
+Jon Johnson <jonjohnson@google.com>
+Jon Surrell <jon.surrell@gmail.com>
+Jon Wedaman <jweede@gmail.com>
+Jonas Pfenniger <jonas@pfenniger.name>
+Jonathan A. Sternberg <jonathansternberg@gmail.com>
+Jonathan Boulle <jonathanboulle@gmail.com>
+Jonathan Camp <jonathan@irondojo.com>
+Jonathan Choy <jonathan.j.choy@gmail.com>
+Jonathan Dowland <jon+github@alcopop.org>
+Jonathan Lebon <jlebon@redhat.com>
+Jonathan Lomas <jonathan@floatinglomas.ca>
+Jonathan McCrohan <jmccrohan@gmail.com>
+Jonathan Mueller <j.mueller@apoveda.ch>
+Jonathan Pares <jonathanpa@users.noreply.github.com>
+Jonathan Rudenberg <jonathan@titanous.com>
+Jonathan Stoppani <jonathan.stoppani@divio.com>
+Jonh Wendell <jonh.wendell@redhat.com>
+Joni Sar <yoni@cocycles.com>
+Joost Cassee <joost@cassee.net>
+Jordan Arentsen <blissdev@gmail.com>
+Jordan Jennings <jjn2009@gmail.com>
+Jordan Sissel <jls@semicomplete.com>
+Jorge Marin <chipironcin@users.noreply.github.com>
+Jorit Kleine-Möllhoff <joppich@bricknet.de>
+Jose Diaz-Gonzalez <jose@seatgeek.com>
+Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
+Joseph Hager <ajhager@gmail.com>
+Joseph Kern <jkern@semafour.net>
+Joseph Rothrock <rothrock@rothrock.org>
+Josh <jokajak@gmail.com>
+Josh Bodah <jb3689@yahoo.com>
+Josh Bonczkowski <josh.bonczkowski@gmail.com>
+Josh Chorlton <jchorlton@gmail.com>
+Josh Eveleth <joshe@opendns.com>
+Josh Hawn <josh.hawn@docker.com>
+Josh Horwitz <horwitz@addthis.com>
+Josh Poimboeuf <jpoimboe@redhat.com>
+Josh Soref <jsoref@gmail.com>
+Josh Wilson <josh.wilson@fivestars.com>
+Josiah Kiehl <jkiehl@riotgames.com>
+José Tomás Albornoz <jojo@eljojo.net>
+Joyce Jang <mail@joycejang.com>
+JP <jpellerin@leapfrogonline.com>
+Julian Taylor <jtaylor.debian@googlemail.com>
+Julien Barbier <write0@gmail.com>
+Julien Bisconti <veggiemonk@users.noreply.github.com>
+Julien Bordellier <julienbordellier@gmail.com>
+Julien Dubois <julien.dubois@gmail.com>
+Julien Kassar <github@kassisol.com>
+Julien Maitrehenry <julien.maitrehenry@me.com>
+Julien Pervillé <julien.perville@perfect-memory.com>
+Julio Montes <imc.coder@gmail.com>
+Jun-Ru Chang <jrjang@gmail.com>
+Jussi Nummelin <jussi.nummelin@gmail.com>
+Justas Brazauskas <brazauskasjustas@gmail.com>
+Justin Cormack <justin.cormack@docker.com>
+Justin Force <justin.force@gmail.com>
+Justin Menga <justin.menga@gmail.com>
+Justin Plock <jplock@users.noreply.github.com>
+Justin Simonelis <justin.p.simonelis@gmail.com>
+Justin Terry <juterry@microsoft.com>
+Justyn Temme <justyntemme@gmail.com>
+Jyrki Puttonen <jyrkiput@gmail.com>
+Jérôme Petazzoni <jerome.petazzoni@docker.com>
+Jörg Thalheim <joerg@higgsboson.tk>
+K. Heller <pestophagous@gmail.com>
+Kai Blin <kai@samba.org>
+Kai Qiang Wu (Kennan) <wkq5325@gmail.com>
+Kamil Domański <kamil@domanski.co>
+Kamjar Gerami <kami.gerami@gmail.com>
+Kanstantsin Shautsou <kanstantsin.sha@gmail.com>
+Kara Alexandra <kalexandra@us.ibm.com>
+Karan Lyons <karan@karanlyons.com>
+Kareem Khazem <karkhaz@karkhaz.com>
+kargakis <kargakis@users.noreply.github.com>
+Karl Grzeszczak <karlgrz@gmail.com>
+Karol Duleba <mr.fuxi@gmail.com>
+Karthik Karanth <karanth.karthik@gmail.com>
+Karthik Nayak <Karthik.188@gmail.com>
+Kasper Fabæch Brandt <poizan@poizan.dk>
+Kate Heddleston <kate.heddleston@gmail.com>
+Katie McLaughlin <katie@glasnt.com>
+Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
+Katrina Owen <katrina.owen@gmail.com>
+Kawsar Saiyeed <kawsar.saiyeed@projiris.com>
+Kay Yan <kay.yan@daocloud.io>
+kayrus <kay.diam@gmail.com>
+Kazuhiro Sera <seratch@gmail.com>
+Ke Li <kel@splunk.com>
+Ke Xu <leonhartx.k@gmail.com>
+Kei Ohmura <ohmura.kei@gmail.com>
+Keith Hudgins <greenman@greenman.org>
+Keli Hu <dev@keli.hu>
+Ken Cochrane <kencochrane@gmail.com>
+Ken Herner <kherner@progress.com>
+Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
+Ken Reese <krrgithub@gmail.com>
+Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
+Kenjiro Nakayama <nakayamakenjiro@gmail.com>
+Kent Johnson <kentoj@gmail.com>
+Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
+Kevin Burke <kev@inburke.com>
+Kevin Clark <kevin.clark@gmail.com>
+Kevin Feyrer <kevin.feyrer@btinternet.com>
+Kevin J. Lynagh <kevin@keminglabs.com>
+Kevin Jing Qiu <kevin@idempotent.ca>
+Kevin Kern <kaiwentan@harmonycloud.cn>
+Kevin Menard <kevin@nirvdrum.com>
+Kevin Meredith <kevin.m.meredith@gmail.com>
+Kevin P. Kucharczyk <kevinkucharczyk@gmail.com>
+Kevin Richardson <kevin@kevinrichardson.co>
+Kevin Shi <kshi@andrew.cmu.edu>
+Kevin Wallace <kevin@pentabarf.net>
+Kevin Yap <me@kevinyap.ca>
+Keyvan Fatehi <keyvanfatehi@gmail.com>
+kies <lleelm@gmail.com>
+Kim BKC Carlbacker <kim.carlbacker@gmail.com>
+Kim Eik <kim@heldig.org>
+Kimbro Staken <kstaken@kstaken.com>
+Kir Kolyshkin <kolyshkin@gmail.com>
+Kiran Gangadharan <kiran.daredevil@gmail.com>
+Kirill SIbirev <l0kix2@gmail.com>
+knappe <tyler.knappe@gmail.com>
+Kohei Tsuruta <coheyxyz@gmail.com>
+Koichi Shiraishi <k@zchee.io>
+Konrad Kleine <konrad.wilhelm.kleine@gmail.com>
+Konstantin Gribov <grossws@gmail.com>
+Konstantin L <sw.double@gmail.com>
+Konstantin Pelykh <kpelykh@zettaset.com>
+Krasi Georgiev <krasi@vip-consult.solutions>
+Krasimir Georgiev <support@vip-consult.co.uk>
+Kris-Mikael Krister <krismikael@protonmail.com>
+Kristian Haugene <kristian.haugene@capgemini.com>
+Kristina Zabunova <triara.xiii@gmail.com>
+Kun Zhang <zkazure@gmail.com>
+Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
+Kunal Tyagi <tyagi.kunal@live.com>
+Kyle Conroy <kyle.j.conroy@gmail.com>
+Kyle Linden <linden.kyle@gmail.com>
+kyu <leehk1227@gmail.com>
+Lachlan Coote <lcoote@vmware.com>
+Lai Jiangshan <jiangshanlai@gmail.com>
+Lajos Papp <lajos.papp@sequenceiq.com>
+Lakshan Perera <lakshan@laktek.com>
+Lalatendu Mohanty <lmohanty@redhat.com>
+Lance Chen <cyen0312@gmail.com>
+Lance Kinley <lkinley@loyaltymethods.com>
+Lars Butler <Lars.Butler@gmail.com>
+Lars Kellogg-Stedman <lars@redhat.com>
+Lars R. Damerow <lars@pixar.com>
+Lars-Magnus Skog <ralphtheninja@riseup.net>
+Laszlo Meszaros <lacienator@gmail.com>
+Laura Frank <ljfrank@gmail.com>
+Laurent Erignoux <lerignoux@gmail.com>
+Laurie Voss <github@seldo.com>
+Leandro Siqueira <leandro.siqueira@gmail.com>
+Lee Chao <932819864@qq.com>
+Lee, Meng-Han <sunrisedm4@gmail.com>
+leeplay <hyeongkyu.lee@navercorp.com>
+Lei Gong <lgong@alauda.io>
+Lei Jitang <leijitang@huawei.com>
+Len Weincier <len@cloudafrica.net>
+Lennie <github@consolejunkie.net>
+Leo Gallucci <elgalu3@gmail.com>
+Leszek Kowalski <github@leszekkowalski.pl>
+Levi Blackstone <levi.blackstone@rackspace.com>
+Levi Gross <levi@levigross.com>
+Lewis Daly <lewisdaly@me.com>
+Lewis Marshall <lewis@lmars.net>
+Lewis Peckover <lew+github@lew.io>
+Li Yi <denverdino@gmail.com>
+Liam Macgillavry <liam@kumina.nl>
+Liana Lo <liana.lixia@gmail.com>
+Liang Mingqiang <mqliang.zju@gmail.com>
+Liang-Chi Hsieh <viirya@gmail.com>
+Liao Qingwei <liaoqingwei@huawei.com>
+Lily Guo <lily.guo@docker.com>
+limsy <seongyeol37@gmail.com>
+Lin Lu <doraalin@163.com>
+LingFaKe <lingfake@huawei.com>
+Linus Heckemann <lheckemann@twig-world.com>
+Liran Tal <liran.tal@gmail.com>
+Liron Levin <liron@twistlock.com>
+Liu Bo <bo.li.liu@oracle.com>
+Liu Hua <sdu.liu@huawei.com>
+liwenqi <vikilwq@zju.edu.cn>
+lixiaobing10051267 <li.xiaobing1@zte.com.cn>
+Liz Zhang <lizzha@microsoft.com>
+LIZAO LI <lzlarryli@gmail.com>
+Lizzie Dixon <_@lizzie.io>
+Lloyd Dewolf <foolswisdom@gmail.com>
+Lokesh Mandvekar <lsm5@fedoraproject.org>
+longliqiang88 <394564827@qq.com>
+Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
+Lorenzo Fontana <lo@linux.com>
+Lotus Fenn <fenn.lotus@gmail.com>
+Louis Opter <kalessin@kalessin.fr>
+Luca Favatella <luca.favatella@erlang-solutions.com>
+Luca Marturana <lucamarturana@gmail.com>
+Luca Orlandi <luca.orlandi@gmail.com>
+Luca-Bogdan Grigorescu <Luca-Bogdan Grigorescu>
+Lucas Chan <lucas-github@lucaschan.com>
+Lucas Chi <lucas@teacherspayteachers.com>
+Lucas Molas <lmolas@fundacionsadosky.org.ar>
+Luciano Mores <leslau@gmail.com>
+Luis Martínez de Bartolomé Izquierdo <lmartinez@biicode.com>
+Luiz Svoboda <luizek@gmail.com>
+Lukas Waslowski <cr7pt0gr4ph7@gmail.com>
+lukaspustina <lukas.pustina@centerdevice.com>
+Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
+Luke Marsden <me@lukemarsden.net>
+Lyn <energylyn@zju.edu.cn>
+Lynda O'Leary <lyndaoleary29@gmail.com>
+Lénaïc Huard <lhuard@amadeus.com>
+Ma Müller <mueller-ma@users.noreply.github.com>
+Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
+Mabin <bin.ma@huawei.com>
+Madhan Raj Mookkandy <MadhanRaj.Mookkandy@microsoft.com>
+Madhav Puri <madhav.puri@gmail.com>
+Madhu Venugopal <madhu@socketplane.io>
+Mageee <fangpuyi@foxmail.com>
+Mahesh Tiyyagura <tmahesh@gmail.com>
+malnick <malnick@gmail..com>
+Malte Janduda <mail@janduda.net>
+Manfred Touron <m@42.am>
+Manfred Zabarauskas <manfredas@zabarauskas.com>
+Manjunath A Kumatagi <mkumatag@in.ibm.com>
+Mansi Nahar <mmn4185@rit.edu>
+Manuel Meurer <manuel@krautcomputing.com>
+Manuel Rüger <manuel@rueg.eu>
+Manuel Woelker <github@manuel.woelker.org>
+mapk0y <mapk0y@gmail.com>
+Marc Abramowitz <marc@marc-abramowitz.com>
+Marc Kuo <kuomarc2@gmail.com>
+Marc Tamsky <mtamsky@gmail.com>
+Marcel Edmund Franke <marcel.edmund.franke@gmail.com>
+Marcelo Horacio Fortino <info@fortinux.com>
+Marcelo Salazar <chelosalazar@gmail.com>
+Marco Hennings <marco.hennings@freiheit.com>
+Marcus Cobden <mcobden@cisco.com>
+Marcus Farkas <toothlessgear@finitebox.com>
+Marcus Linke <marcus.linke@gmx.de>
+Marcus Martins <marcus@docker.com>
+Marcus Ramberg <marcus@nordaaker.com>
+Marek Goldmann <marek.goldmann@gmail.com>
+Marian Marinov <mm@yuhu.biz>
+Marianna Tessel <mtesselh@gmail.com>
+Mario Loriedo <mario.loriedo@gmail.com>
+Marius Gundersen <me@mariusgundersen.net>
+Marius Sturm <marius@graylog.com>
+Marius Voila <marius.voila@gmail.com>
+Mark Allen <mrallen1@yahoo.com>
+Mark McGranaghan <mmcgrana@gmail.com>
+Mark McKinstry <mmckinst@umich.edu>
+Mark Milstein <mark@epiloque.com>
+Mark Oates <fl0yd@me.com>
+Mark Parker <godefroi@users.noreply.github.com>
+Mark West <markewest@gmail.com>
+Markan Patel <mpatel678@gmail.com>
+Marko Mikulicic <mmikulicic@gmail.com>
+Marko Tibold <marko@tibold.nl>
+Markus Fix <lispmeister@gmail.com>
+Markus Kortlang <hyp3rdino@googlemail.com>
+Martijn Dwars <ikben@martijndwars.nl>
+Martijn van Oosterhout <kleptog@svana.org>
+Martin Honermeyer <maze@strahlungsfrei.de>
+Martin Kelly <martin@surround.io>
+Martin Mosegaard Amdisen <martin.amdisen@praqma.com>
+Martin Muzatko <martin@happy-css.com>
+Martin Redmond <redmond.martin@gmail.com>
+Mary Anthony <mary.anthony@docker.com>
+Masahito Zembutsu <zembutsu@users.noreply.github.com>
+Masato Ohba <over.rye@gmail.com>
+Masayuki Morita <minamijoyo@gmail.com>
+Mason Malone <mason.malone@gmail.com>
+Mateusz Sulima <sulima.mateusz@gmail.com>
+Mathias Monnerville <mathias@monnerville.com>
+Mathieu Champlon <mathieu.champlon@docker.com>
+Mathieu Le Marec - Pasquet <kiorky@cryptelium.net>
+Mathieu Parent <math.parent@gmail.com>
+Matt Apperson <me@mattapperson.com>
+Matt Bachmann <bachmann.matt@gmail.com>
+Matt Bentley <matt.bentley@docker.com>
+Matt Haggard <haggardii@gmail.com>
+Matt Hoyle <matt@deployable.co>
+Matt McCormick <matt.mccormick@kitware.com>
+Matt Moore <mattmoor@google.com>
+Matt Richardson <matt@redgumtech.com.au>
+Matt Rickard <mrick@google.com>
+Matt Robenolt <matt@ydekproductions.com>
+Matt Schurenko <matt.schurenko@gmail.com>
+Matt Williams <mattyw@me.com>
+Matthew Heon <mheon@redhat.com>
+Matthew Lapworth <matthewl@bit-shift.net>
+Matthew Mayer <matthewkmayer@gmail.com>
+Matthew Mosesohn <raytrac3r@gmail.com>
+Matthew Mueller <mattmuelle@gmail.com>
+Matthew Riley <mattdr@google.com>
+Matthias Klumpp <matthias@tenstral.net>
+Matthias Kühnle <git.nivoc@neverbox.com>
+Matthias Rampke <mr@soundcloud.com>
+Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
+Mauricio Garavaglia <mauricio@medallia.com>
+mauriyouth <mauriyouth@gmail.com>
+Max Shytikov <mshytikov@gmail.com>
+Maxim Fedchyshyn <sevmax@gmail.com>
+Maxim Ivanov <ivanov.maxim@gmail.com>
+Maxim Kulkin <mkulkin@mirantis.com>
+Maxim Treskin <zerthurd@gmail.com>
+Maxime Petazzoni <max@signalfuse.com>
+Meaglith Ma <genedna@gmail.com>
+meejah <meejah@meejah.ca>
+Megan Kostick <mkostick@us.ibm.com>
+Mehul Kar <mehul.kar@gmail.com>
+Mei ChunTao <mei.chuntao@zte.com.cn>
+Mengdi Gao <usrgdd@gmail.com>
+Mert Yazıcıoğlu <merty@users.noreply.github.com>
+mgniu <mgniu@dataman-inc.com>
+Micah Zoltu <micah@newrelic.com>
+Michael A. Smith <michael@smith-li.com>
+Michael Bridgen <mikeb@squaremobius.net>
+Michael Brown <michael@netdirect.ca>
+Michael Chiang <mchiang@docker.com>
+Michael Crosby <michael@docker.com>
+Michael Currie <mcurrie@bruceforceresearch.com>
+Michael Friis <friism@gmail.com>
+Michael Gorsuch <gorsuch@github.com>
+Michael Grauer <michael.grauer@kitware.com>
+Michael Holzheu <holzheu@linux.vnet.ibm.com>
+Michael Hudson-Doyle <michael.hudson@canonical.com>
+Michael Huettermann <michael@huettermann.net>
+Michael Irwin <mikesir87@gmail.com>
+Michael Käufl <docker@c.michael-kaeufl.de>
+Michael Neale <michael.neale@gmail.com>
+Michael Nussbaum <michael.nussbaum@getbraintree.com>
+Michael Prokop <github@michael-prokop.at>
+Michael Scharf <github@scharf.gr>
+Michael Spetsiotis <michael_spets@hotmail.com>
+Michael Stapelberg <michael+gh@stapelberg.de>
+Michael Steinert <mike.steinert@gmail.com>
+Michael Thies <michaelthies78@gmail.com>
+Michael West <mwest@mdsol.com>
+Michal Fojtik <mfojtik@redhat.com>
+Michal Gebauer <mishak@mishak.net>
+Michal Jemala <michal.jemala@gmail.com>
+Michal Minář <miminar@redhat.com>
+Michal Wieczorek <wieczorek-michal@wp.pl>
+Michaël Pailloncy <mpapo.dev@gmail.com>
+Michał Czeraszkiewicz <czerasz@gmail.com>
+Michał Gryko <github@odkurzacz.org>
+Michiel de Jong <michiel@unhosted.org>
+Mickaël Fortunato <morsi.morsicus@gmail.com>
+Mickaël Remars <mickael@remars.com>
+Miguel Angel Fernández <elmendalerenda@gmail.com>
+Miguel Morales <mimoralea@gmail.com>
+Mihai Borobocea <MihaiBorob@gmail.com>
+Mihuleacc Sergiu <mihuleac.sergiu@gmail.com>
+Mike Brown <brownwm@us.ibm.com>
+Mike Casas <mkcsas0@gmail.com>
+Mike Chelen <michael.chelen@gmail.com>
+Mike Danese <mikedanese@google.com>
+Mike Dillon <mike@embody.org>
+Mike Dougherty <mike.dougherty@docker.com>
+Mike Estes <mike.estes@logos.com>
+Mike Gaffney <mike@uberu.com>
+Mike Goelzer <mike.goelzer@docker.com>
+Mike Leone <mleone896@gmail.com>
+Mike Lundy <mike@fluffypenguin.org>
+Mike MacCana <mike.maccana@gmail.com>
+Mike Naberezny <mike@naberezny.com>
+Mike Snitzer <snitzer@redhat.com>
+mikelinjie <294893458@qq.com>
+Mikhail Sobolev <mss@mawhrin.net>
+Miklos Szegedi <miklos.szegedi@cloudera.com>
+Milind Chawre <milindchawre@gmail.com>
+Miloslav Trmač <mitr@redhat.com>
+mingqing <limingqing@cyou-inc.com>
+Mingzhen Feng <fmzhen@zju.edu.cn>
+Misty Stanley-Jones <misty@docker.com>
+Mitch Capper <mitch.capper@gmail.com>
+Mizuki Urushida <z11111001011@gmail.com>
+mlarcher <github@ringabell.org>
+Mohammad Banikazemi <mb@us.ibm.com>
+Mohammed Aaqib Ansari <maaquib@gmail.com>
+Mohit Soni <mosoni@ebay.com>
+Moorthy RS <rsmoorthy@gmail.com>
+Morgan Bauer <mbauer@us.ibm.com>
+Morgante Pell <morgante.pell@morgante.net>
+Morgy93 <thomas@ulfertsprygoda.de>
+Morten Siebuhr <sbhr@sbhr.dk>
+Morton Fox <github@qslw.com>
+Moysés Borges <moysesb@gmail.com>
+mrfly <mr.wrfly@gmail.com>
+Mrunal Patel <mrunalp@gmail.com>
+Muayyad Alsadi <alsadi@gmail.com>
+Mustafa Akın <mustafa91@gmail.com>
+Muthukumar R <muthur@gmail.com>
+Máximo Cuadros <mcuadros@gmail.com>
+Médi-Rémi Hashim <medimatrix@users.noreply.github.com>
+Nace Oroz <orkica@gmail.com>
+Nahum Shalman <nshalman@omniti.com>
+Nakul Pathak <nakulpathak3@hotmail.com>
+Nalin Dahyabhai <nalin@redhat.com>
+Nan Monnand Deng <monnand@gmail.com>
+Naoki Orii <norii@cs.cmu.edu>
+Natalie Parker <nparker@omnifone.com>
+Natanael Copa <natanael.copa@docker.com>
+Nate Brennand <nate.brennand@clever.com>
+Nate Eagleson <nate@nateeag.com>
+Nate Jones <nate@endot.org>
+Nathan Hsieh <hsieh.nathan@gmail.com>
+Nathan Kleyn <nathan@nathankleyn.com>
+Nathan LeClaire <nathan.leclaire@docker.com>
+Nathan McCauley <nathan.mccauley@docker.com>
+Nathan Williams <nathan@teamtreehouse.com>
+Naveed Jamil <naveed.jamil@tenpearls.com>
+Neal McBurnett <neal@mcburnett.org>
+Neil Horman <nhorman@tuxdriver.com>
+Neil Peterson <neilpeterson@outlook.com>
+Nelson Chen <crazysim@gmail.com>
+Neyazul Haque <nuhaque@gmail.com>
+Nghia Tran <nghia@google.com>
+Niall O'Higgins <niallo@unworkable.org>
+Nicholas E. Rabenau <nerab@gmx.at>
+Nick DeCoursin <n.decoursin@foodpanda.com>
+Nick Irvine <nfirvine@nfirvine.com>
+Nick Neisen <nwneisen@gmail.com>
+Nick Parker <nikaios@gmail.com>
+Nick Payne <nick@kurai.co.uk>
+Nick Russo <nicholasjamesrusso@gmail.com>
+Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
+Nick Stinemates <nick@stinemates.org>
+NickrenREN <yuquan.ren@easystack.cn>
+Nicola Kabar <nicolaka@gmail.com>
+Nicolas Borboën <ponsfrilus@gmail.com>
+Nicolas De Loof <nicolas.deloof@gmail.com>
+Nicolas Dudebout <nicolas.dudebout@gatech.edu>
+Nicolas Goy <kuon@goyman.com>
+Nicolas Kaiser <nikai@nikai.net>
+Nicolas Sterchele <sterchele.nicolas@gmail.com>
+Nicolas V Castet <nvcastet@us.ibm.com>
+Nicolás Hock Isaza <nhocki@gmail.com>
+Nigel Poulton <nigelpoulton@hotmail.com>
+Nik Nyby <nikolas@gnu.org>
+Nikhil Chawla <chawlanikhil24@gmail.com>
+NikolaMandic <mn080202@gmail.com>
+Nikolas Garofil <nikolas.garofil@uantwerpen.be>
+Nikolay Milovanov <nmil@itransformers.net>
+Nirmal Mehta <nirmalkmehta@gmail.com>
+Nishant Totla <nishanttotla@gmail.com>
+NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
+Noah Meyerhans <nmeyerha@amazon.com>
+Noah Treuhaft <noah.treuhaft@docker.com>
+NobodyOnSE <ich@sektor.selfip.com>
+noducks <onemannoducks@gmail.com>
+Nolan Darilek <nolan@thewordnerd.info>
+nponeccop <andy.melnikov@gmail.com>
+Nuutti Kotivuori <naked@iki.fi>
+nzwsch <hi@nzwsch.com>
+O.S. Tezer <ostezer@gmail.com>
+objectified <objectified@gmail.com>
+Oguz Bilgic <fisyonet@gmail.com>
+Oh Jinkyun <tintypemolly@gmail.com>
+Ohad Schneider <ohadschn@users.noreply.github.com>
+ohmystack <jun.jiang02@ele.me>
+Ole Reifschneider <mail@ole-reifschneider.de>
+Oliver Neal <ItsVeryWindy@users.noreply.github.com>
+Olivier Gambier <dmp42@users.noreply.github.com>
+Olle Jonsson <olle.jonsson@gmail.com>
+Oriol Francès <oriolfa@gmail.com>
+Oskar Niburski <oskarniburski@gmail.com>
+Otto Kekäläinen <otto@seravo.fi>
+Ouyang Liduo <oyld0210@163.com>
+Ovidio Mallo <ovidio.mallo@gmail.com>
+Panagiotis Moustafellos <pmoust@elastic.co>
+Paolo G. Giarrusso <p.giarrusso@gmail.com>
+Pascal <pascalgn@users.noreply.github.com>
+Pascal Borreli <pascal@borreli.com>
+Pascal Hartig <phartig@rdrei.net>
+Patrick Böänziger <patrick.baenziger@bsi-software.com>
+Patrick Devine <patrick.devine@docker.com>
+Patrick Hemmer <patrick.hemmer@gmail.com>
+Patrick Stapleton <github@gdi2290.com>
+Patrik Cyvoct <patrik@ptrk.io>
+pattichen <craftsbear@gmail.com>
+Paul <paul9869@gmail.com>
+paul <paul@inkling.com>
+Paul Annesley <paul@annesley.cc>
+Paul Bellamy <paul.a.bellamy@gmail.com>
+Paul Bowsher <pbowsher@globalpersonals.co.uk>
+Paul Furtado <pfurtado@hubspot.com>
+Paul Hammond <paul@paulhammond.org>
+Paul Jimenez <pj@place.org>
+Paul Kehrer <paul.l.kehrer@gmail.com>
+Paul Lietar <paul@lietar.net>
+Paul Liljenberg <liljenberg.paul@gmail.com>
+Paul Morie <pmorie@gmail.com>
+Paul Nasrat <pnasrat@gmail.com>
+Paul Weaver <pauweave@cisco.com>
+Paulo Ribeiro <paigr.io@gmail.com>
+Pavel Lobashov <ShockwaveNN@gmail.com>
+Pavel Pletenev <cpp.create@gmail.com>
+Pavel Pospisil <pospispa@gmail.com>
+Pavel Sutyrin <pavel.sutyrin@gmail.com>
+Pavel Tikhomirov <ptikhomirov@virtuozzo.com>
+Pavlos Ratis <dastergon@gentoo.org>
+Pavol Vargovcik <pallly.vargovcik@gmail.com>
+Pawel Konczalski <mail@konczalski.de>
+Peeyush Gupta <gpeeyush@linux.vnet.ibm.com>
+Peggy Li <peggyli.224@gmail.com>
+Pei Su <sillyousu@gmail.com>
+Peng Tao <bergwolf@gmail.com>
+Penghan Wang <ph.wang@daocloud.io>
+Per Weijnitz <per.weijnitz@gmail.com>
+perhapszzy@sina.com <perhapszzy@sina.com>
+Peter Bourgon <peter@bourgon.org>
+Peter Braden <peterbraden@peterbraden.co.uk>
+Peter Bücker <peter.buecker@pressrelations.de>
+Peter Choi <phkchoi89@gmail.com>
+Peter Dave Hello <hsu@peterdavehello.org>
+Peter Edge <peter.edge@gmail.com>
+Peter Ericson <pdericson@gmail.com>
+Peter Esbensen <pkesbensen@gmail.com>
+Peter Jaffe <pjaffe@nevo.com>
+Peter Malmgren <ptmalmgren@gmail.com>
+Peter Salvatore <peter@psftw.com>
+Peter Volpe <petervo@redhat.com>
+Peter Waller <p@pwaller.net>
+Petr Švihlík <svihlik.petr@gmail.com>
+Phil <underscorephil@gmail.com>
+Phil Estes <estesp@linux.vnet.ibm.com>
+Phil Spitler <pspitler@gmail.com>
+Philip Alexander Etling <paetling@gmail.com>
+Philip Monroe <phil@philmonroe.com>
+Philipp Gillé <philipp.gille@gmail.com>
+Philipp Wahala <philipp.wahala@gmail.com>
+Philipp Weissensteiner <mail@philippweissensteiner.com>
+Phillip Alexander <git@phillipalexander.io>
+phineas <phin@phineas.io>
+pidster <pid@pidster.com>
+Piergiuliano Bossi <pgbossi@gmail.com>
+Pierre <py@poujade.org>
+Pierre Carrier <pierre@meteor.com>
+Pierre Dal-Pra <dalpra.pierre@gmail.com>
+Pierre Wacrenier <pierre.wacrenier@gmail.com>
+Pierre-Alain RIVIERE <pariviere@ippon.fr>
+Piotr Bogdan <ppbogdan@gmail.com>
+pixelistik <pixelistik@users.noreply.github.com>
+Porjo <porjo38@yahoo.com.au>
+Poul Kjeldager Sørensen <pks@s-innovations.net>
+Pradeep Chhetri <pradeep@indix.com>
+Pradip Dhara <pradipd@microsoft.com>
+Prasanna Gautam <prasannagautam@gmail.com>
+Pratik Karki <prertik@outlook.com>
+Prayag Verma <prayag.verma@gmail.com>
+Priya Wadhwa <priyawadhwa@google.com>
+Projjol Banerji <probaner23@gmail.com>
+Przemek Hejman <przemyslaw.hejman@gmail.com>
+Pure White <daniel48@126.com>
+pysqz <randomq@126.com>
+Qiang Huang <h.huangqiang@huawei.com>
+Qinglan Peng <qinglanpeng@zju.edu.cn>
+qudongfang <qudongfang@gmail.com>
+Quentin Brossard <qbrossard@gmail.com>
+Quentin Perez <qperez@ocs.online.net>
+Quentin Tayssier <qtayssier@gmail.com>
+r0n22 <cameron.regan@gmail.com>
+Rafal Jeczalik <rjeczalik@gmail.com>
+Rafe Colton <rafael.colton@gmail.com>
+Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
+Raghuram Devarakonda <draghuram@gmail.com>
+Raja Sami <raja.sami@tenpearls.com>
+Rajat Pandit <rp@rajatpandit.com>
+Rajdeep Dua <dua_rajdeep@yahoo.com>
+Ralf Sippl <ralf.sippl@gmail.com>
+Ralle <spam@rasmusa.net>
+Ralph Bean <rbean@redhat.com>
+Ramkumar Ramachandra <artagnon@gmail.com>
+Ramon Brooker <rbrooker@aetherealmind.com>
+Ramon van Alteren <ramon@vanalteren.nl>
+Ray Tsang <rayt@google.com>
+ReadmeCritic <frankensteinbot@gmail.com>
+Recursive Madman <recursive.madman@gmx.de>
+Reficul <xuzhenglun@gmail.com>
+Regan McCooey <rmccooey27@aol.com>
+Remi Rampin <remirampin@gmail.com>
+Remy Suen <remy.suen@gmail.com>
+Renato Riccieri Santos Zannon <renato.riccieri@gmail.com>
+Renaud Gaubert <rgaubert@nvidia.com>
+Rhys Hiltner <rhys@twitch.tv>
+Ri Xu <xuri.me@gmail.com>
+Ricardo N Feliciano <FelicianoTech@gmail.com>
+Rich Moyse <rich@moyse.us>
+Rich Seymour <rseymour@gmail.com>
+Richard <richard.scothern@gmail.com>
+Richard Burnison <rburnison@ebay.com>
+Richard Harvey <richard@squarecows.com>
+Richard Mathie <richard.mathie@amey.co.uk>
+Richard Metzler <richard@paadee.com>
+Richard Scothern <richard.scothern@gmail.com>
+Richo Healey <richo@psych0tik.net>
+Rick Bradley <rick@users.noreply.github.com>
+Rick van de Loo <rickvandeloo@gmail.com>
+Rick Wieman <git@rickw.nl>
+Rik Nijessen <rik@keefo.nl>
+Riku Voipio <riku.voipio@linaro.org>
+Riley Guerin <rileytg.dev@gmail.com>
+Ritesh H Shukla <sritesh@vmware.com>
+Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
+Rob Vesse <rvesse@dotnetrdf.org>
+Robert Bachmann <rb@robertbachmann.at>
+Robert Bittle <guywithnose@gmail.com>
+Robert Obryk <robryk@gmail.com>
+Robert Schneider <mail@shakeme.info>
+Robert Stern <lexandro2000@gmail.com>
+Robert Terhaar <rterhaar@atlanticdynamic.com>
+Robert Wallis <smilingrob@gmail.com>
+Roberto G. Hashioka <roberto.hashioka@docker.com>
+Roberto Muñoz Fernández <robertomf@gmail.com>
+Robin Naundorf <r.naundorf@fh-muenster.de>
+Robin Schneider <ypid@riseup.net>
+Robin Speekenbrink <robin@kingsquare.nl>
+robpc <rpcann@gmail.com>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Rodrigo Vaz <rodrigo.vaz@gmail.com>
+Roel Van Nyen <roel.vannyen@gmail.com>
+Roger Peppe <rogpeppe@gmail.com>
+Rohit Jnagal <jnagal@google.com>
+Rohit Kadam <rohit.d.kadam@gmail.com>
+Rojin George <rojingeorge@huawei.com>
+Roland Huß <roland@jolokia.org>
+Roland Kammerer <roland.kammerer@linbit.com>
+Roland Moriz <rmoriz@users.noreply.github.com>
+Roma Sokolov <sokolov.r.v@gmail.com>
+Roman Dudin <katrmr@gmail.com>
+Roman Strashkin <roman.strashkin@gmail.com>
+Ron Smits <ron.smits@gmail.com>
+Ron Williams <ron.a.williams@gmail.com>
+root <docker-dummy@example.com>
+root <root@lxdebmas.marist.edu>
+root <root@ubuntu-14.04-amd64-vbox>
+root <root@webm215.cluster016.ha.ovh.net>
+Rory Hunter <roryhunter2@gmail.com>
+Rory McCune <raesene@gmail.com>
+Ross Boucher <rboucher@gmail.com>
+Rovanion Luckey <rovanion.luckey@gmail.com>
+Royce Remer <royceremer@gmail.com>
+Rozhnov Alexandr <nox73@ya.ru>
+Rudolph Gottesheim <r.gottesheim@loot.at>
+Rui Lopes <rgl@ruilopes.com>
+Runshen Zhu <runshen.zhu@gmail.com>
+Russ Magee <rmagee@gmail.com>
+Ryan Abrams <rdabrams@gmail.com>
+Ryan Anderson <anderson.ryanc@gmail.com>
+Ryan Aslett <github@mixologic.com>
+Ryan Belgrave <rmb1993@gmail.com>
+Ryan Detzel <ryan.detzel@gmail.com>
+Ryan Fowler <rwfowler@gmail.com>
+Ryan Liu <ryanlyy@me.com>
+Ryan McLaughlin <rmclaughlin@insidesales.com>
+Ryan O'Donnell <odonnellryanc@gmail.com>
+Ryan Seto <ryanseto@yak.net>
+Ryan Simmen <ryan.simmen@gmail.com>
+Ryan Stelly <ryan.stelly@live.com>
+Ryan Thomas <rthomas@atlassian.com>
+Ryan Trauntvein <rtrauntvein@novacoast.com>
+Ryan Wallner <ryan.wallner@clusterhq.com>
+Ryan Zhang <ryan.zhang@docker.com>
+ryancooper7 <ryan.cooper7@gmail.com>
+RyanDeng <sheldon.d1018@gmail.com>
+Rémy Greinhofer <remy.greinhofer@livelovely.com>
+s. rannou <mxs@sbrk.org>
+s00318865 <sunyuan3@huawei.com>
+Sabin Basyal <sabin.basyal@gmail.com>
+Sachin Joshi <sachin_jayant_joshi@hotmail.com>
+Sagar Hani <sagarhani33@gmail.com>
+Sainath Grandhi <sainath.grandhi@intel.com>
+Sakeven Jiang <jc5930@sina.cn>
+Salahuddin Khan <salah@docker.com>
+Sally O'Malley <somalley@redhat.com>
+Sam Abed <sam.abed@gmail.com>
+Sam Alba <sam.alba@gmail.com>
+Sam Bailey <cyprix@cyprix.com.au>
+Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
+Sam Neirinck <sam@samneirinck.com>
+Sam Reis <sreis@atlassian.com>
+Sam Rijs <srijs@airpost.net>
+Sambuddha Basu <sambuddhabasu1@gmail.com>
+Sami Wagiaalla <swagiaal@redhat.com>
+Samuel Andaya <samuel@andaya.net>
+Samuel Dion-Girardeau <samuel.diongirardeau@gmail.com>
+Samuel Karp <skarp@amazon.com>
+Samuel PHAN <samuel-phan@users.noreply.github.com>
+Sandeep Bansal <sabansal@microsoft.com>
+Sankar சங்கர் <sankar.curiosity@gmail.com>
+Sanket Saurav <sanketsaurav@gmail.com>
+Santhosh Manohar <santhosh@docker.com>
+sapphiredev <se.imas.kr@gmail.com>
+Sargun Dhillon <sargun@netflix.com>
+Sascha Andres <sascha.andres@outlook.com>
+Satnam Singh <satnam@raintown.org>
+Satoshi Amemiya <satoshi_amemiya@voyagegroup.com>
+Satoshi Tagomori <tagomoris@gmail.com>
+Scott Bessler <scottbessler@gmail.com>
+Scott Collier <emailscottcollier@gmail.com>
+Scott Johnston <scott@docker.com>
+Scott Stamp <scottstamp851@gmail.com>
+Scott Walls <sawalls@umich.edu>
+sdreyesg <sdreyesg@gmail.com>
+Sean Christopherson <sean.j.christopherson@intel.com>
+Sean Cronin <seancron@gmail.com>
+Sean Lee <seanlee@tw.ibm.com>
+Sean McIntyre <s.mcintyre@xverba.ca>
+Sean OMeara <sean@chef.io>
+Sean P. Kane <skane@newrelic.com>
+Sean Rodman <srodman7689@gmail.com>
+Sebastiaan van Steenis <mail@superseb.nl>
+Sebastiaan van Stijn <github@gone.nl>
+Senthil Kumar Selvaraj <senthil.thecoder@gmail.com>
+Senthil Kumaran <senthil@uthcode.com>
+SeongJae Park <sj38.park@gmail.com>
+Seongyeol Lim <seongyeol37@gmail.com>
+Serge Hallyn <serge.hallyn@ubuntu.com>
+Sergey Alekseev <sergey.alekseev.minsk@gmail.com>
+Sergey Evstifeev <sergey.evstifeev@gmail.com>
+Sergii Kabashniuk <skabashnyuk@codenvy.com>
+Serhat Gülçiçek <serhat25@gmail.com>
+SeungUkLee <lsy931106@gmail.com>
+Sevki Hasirci <s@sevki.org>
+Shane Canon <scanon@lbl.gov>
+Shane da Silva <shane@dasilva.io>
+Shaun Kaasten <shaunk@gmail.com>
+shaunol <shaunol@gmail.com>
+Shawn Landden <shawn@churchofgit.com>
+Shawn Siefkas <shawn.siefkas@meredith.com>
+shawnhe <shawnhe@shawnhedeMacBook-Pro.local>
+Shayne Wang <shaynexwang@gmail.com>
+Shekhar Gulati <shekhargulati84@gmail.com>
+Sheng Yang <sheng@yasker.org>
+Shengbo Song <thomassong@tencent.com>
+Shev Yan <yandong_8212@163.com>
+Shih-Yuan Lee <fourdollars@gmail.com>
+Shijiang Wei <mountkin@gmail.com>
+Shijun Qin <qinshijun16@mails.ucas.ac.cn>
+Shishir Mahajan <shishir.mahajan@redhat.com>
+Shoubhik Bose <sbose78@gmail.com>
+Shourya Sarcar <shourya.sarcar@gmail.com>
+shuai-z <zs.broccoli@gmail.com>
+Shukui Yang <yangshukui@huawei.com>
+Shuwei Hao <haosw@cn.ibm.com>
+Sian Lerk Lau <kiawin@gmail.com>
+Sidhartha Mani <sidharthamn@gmail.com>
+sidharthamani <sid@rancher.com>
+Silas Sewell <silas@sewell.org>
+Silvan Jegen <s.jegen@gmail.com>
+Simei He <hesimei@zju.edu.cn>
+Simon Eskildsen <sirup@sirupsen.com>
+Simon Ferquel <simon.ferquel@docker.com>
+Simon Leinen <simon.leinen@gmail.com>
+Simon Menke <simon.menke@gmail.com>
+Simon Taranto <simon.taranto@gmail.com>
+Simon Vikstrom <pullreq@devsn.se>
+Sindhu S <sindhus@live.in>
+Sjoerd Langkemper <sjoerd-github@linuxonly.nl>
+Solganik Alexander <solganik@gmail.com>
+Solomon Hykes <solomon@docker.com>
+Song Gao <song@gao.io>
+Soshi Katsuta <soshi.katsuta@gmail.com>
+Soulou <leo@unbekandt.eu>
+Spencer Brown <spencer@spencerbrown.org>
+Spencer Smith <robertspencersmith@gmail.com>
+Sridatta Thatipamala <sthatipamala@gmail.com>
+Sridhar Ratnakumar <sridharr@activestate.com>
+Srini Brahmaroutu <srbrahma@us.ibm.com>
+Srinivasan Srivatsan <srinivasan.srivatsan@hpe.com>
+Stanislav Bondarenko <stanislav.bondarenko@gmail.com>
+Steeve Morin <steeve.morin@gmail.com>
+Stefan Berger <stefanb@linux.vnet.ibm.com>
+Stefan J. Wernli <swernli@microsoft.com>
+Stefan Praszalowicz <stefan@greplin.com>
+Stefan S. <tronicum@user.github.com>
+Stefan Scherer <scherer_stefan@icloud.com>
+Stefan Staudenmeyer <doerte@instana.com>
+Stefan Weil <sw@weilnetz.de>
+Stephan Spindler <shutefan@gmail.com>
+Stephen Crosby <stevecrozz@gmail.com>
+Stephen Day <stephen.day@docker.com>
+Stephen Drake <stephen@xenolith.net>
+Stephen Rust <srust@blockbridge.com>
+Steve Desmond <steve@vtsv.ca>
+Steve Dougherty <steve@asksteved.com>
+Steve Durrheimer <s.durrheimer@gmail.com>
+Steve Francia <steve.francia@gmail.com>
+Steve Koch <stevekochscience@gmail.com>
+Steven Burgess <steven.a.burgess@hotmail.com>
+Steven Erenst <stevenerenst@gmail.com>
+Steven Hartland <steven.hartland@multiplay.co.uk>
+Steven Iveson <sjiveson@outlook.com>
+Steven Merrill <steven.merrill@gmail.com>
+Steven Richards <steven@axiomzen.co>
+Steven Taylor <steven.taylor@me.com>
+Subhajit Ghosh <isubuz.g@gmail.com>
+Sujith Haridasan <sujith.h@gmail.com>
+Sun Gengze <690388648@qq.com>
+Sun Jianbo <wonderflow.sun@gmail.com>
+Sunny Gogoi <indiasuny000@gmail.com>
+Suryakumar Sudar <surya.trunks@gmail.com>
+Sven Dowideit <SvenDowideit@home.org.au>
+Swapnil Daingade <swapnil.daingade@gmail.com>
+Sylvain Baubeau <sbaubeau@redhat.com>
+Sylvain Bellemare <sylvain@ascribe.io>
+Sébastien <sebastien@yoozio.com>
+Sébastien HOUZÉ <cto@verylastroom.com>
+Sébastien Luttringer <seblu@seblu.net>
+Sébastien Stormacq <sebsto@users.noreply.github.com>
+Tabakhase <mail@tabakhase.com>
+Tadej Janež <tadej.j@nez.si>
+TAGOMORI Satoshi <tagomoris@gmail.com>
+tang0th <tang0th@gmx.com>
+Tangi Colin <tangicolin@gmail.com>
+Tatsuki Sugiura <sugi@nemui.org>
+Tatsushi Inagaki <e29253@jp.ibm.com>
+Taylan Isikdemir <taylani@google.com>
+Taylor Jones <monitorjbl@gmail.com>
+Ted M. Young <tedyoung@gmail.com>
+Tehmasp Chaudhri <tehmasp@gmail.com>
+Tejaswini Duggaraju <naduggar@microsoft.com>
+Tejesh Mehta <tejesh.mehta@gmail.com>
+terryding77 <550147740@qq.com>
+tgic <farmer1992@gmail.com>
+Thatcher Peskens <thatcher@docker.com>
+theadactyl <thea.lamkin@gmail.com>
+Thell 'Bo' Fowler <thell@tbfowler.name>
+Thermionix <bond711@gmail.com>
+Thijs Terlouw <thijsterlouw@gmail.com>
+Thomas Bikeev <thomas.bikeev@mac.com>
+Thomas Frössman <thomasf@jossystem.se>
+Thomas Gazagnaire <thomas@gazagnaire.org>
+Thomas Grainger <tagrain@gmail.com>
+Thomas Hansen <thomas.hansen@gmail.com>
+Thomas Leonard <thomas.leonard@docker.com>
+Thomas Léveil <thomasleveil@gmail.com>
+Thomas Orozco <thomas@orozco.fr>
+Thomas Riccardi <riccardi@systran.fr>
+Thomas Schroeter <thomas@cliqz.com>
+Thomas Sjögren <konstruktoid@users.noreply.github.com>
+Thomas Swift <tgs242@gmail.com>
+Thomas Tanaka <thomas.tanaka@oracle.com>
+Thomas Texier <sharkone@en-mousse.org>
+Ti Zhou <tizhou1986@gmail.com>
+Tianon Gravi <admwiggin@gmail.com>
+Tianyi Wang <capkurmagati@gmail.com>
+Tibor Vass <teabee89@gmail.com>
+Tiffany Jernigan <tiffany.f.j@gmail.com>
+Tiffany Low <tiffany@box.com>
+Tim Bart <tim@fewagainstmany.com>
+Tim Bosse <taim@bosboot.org>
+Tim Dettrick <t.dettrick@uq.edu.au>
+Tim Düsterhus <tim@bastelstu.be>
+Tim Hockin <thockin@google.com>
+Tim Potter <tpot@hpe.com>
+Tim Ruffles <oi@truffles.me.uk>
+Tim Smith <timbot@google.com>
+Tim Terhorst <mynamewastaken+git@gmail.com>
+Tim Wang <timwangdev@gmail.com>
+Tim Waugh <twaugh@redhat.com>
+Tim Wraight <tim.wraight@tangentlabs.co.uk>
+Tim Zju <21651152@zju.edu.cn>
+timfeirg <kkcocogogo@gmail.com>
+Timothy Hobbs <timothyhobbs@seznam.cz>
+tjwebb123 <tjwebb123@users.noreply.github.com>
+tobe <tobegit3hub@gmail.com>
+Tobias Bieniek <Tobias.Bieniek@gmx.de>
+Tobias Bradtke <webwurst@gmail.com>
+Tobias Gesellchen <tobias@gesellix.de>
+Tobias Klauser <tklauser@distanz.ch>
+Tobias Munk <schmunk@usrbin.de>
+Tobias Schmidt <ts@soundcloud.com>
+Tobias Schwab <tobias.schwab@dynport.de>
+Todd Crane <todd@toddcrane.com>
+Todd Lunter <tlunter@gmail.com>
+Todd Whiteman <todd.whiteman@joyent.com>
+Toli Kuznets <toli@docker.com>
+Tom Barlow <tomwbarlow@gmail.com>
+Tom Booth <tombooth@gmail.com>
+Tom Denham <tom@tomdee.co.uk>
+Tom Fotherby <tom+github@peopleperhour.com>
+Tom Howe <tom.howe@enstratius.com>
+Tom Hulihan <hulihan.tom159@gmail.com>
+Tom Maaswinkel <tom.maaswinkel@12wiki.eu>
+Tom Sweeney <tsweeney@redhat.com>
+Tom Wilkie <tom.wilkie@gmail.com>
+Tom X. Tobin <tomxtobin@tomxtobin.com>
+Tomas Tomecek <ttomecek@redhat.com>
+Tomasz Kopczynski <tomek@kopczynski.net.pl>
+Tomasz Lipinski <tlipinski@users.noreply.github.com>
+Tomasz Nurkiewicz <nurkiewicz@gmail.com>
+Tommaso Visconti <tommaso.visconti@gmail.com>
+Tomáš Hrčka <thrcka@redhat.com>
+Tonny Xu <tonny.xu@gmail.com>
+Tony Abboud <tdabboud@hotmail.com>
+Tony Daws <tony@daws.ca>
+Tony Miller <mcfiredrill@gmail.com>
+toogley <toogley@mailbox.org>
+Torstein Husebø <torstein@huseboe.net>
+Tõnis Tiigi <tonistiigi@gmail.com>
+tpng <benny.tpng@gmail.com>
+tracylihui <793912329@qq.com>
+Trapier Marshall <trapier.marshall@docker.com>
+Travis Cline <travis.cline@gmail.com>
+Travis Thieman <travis.thieman@gmail.com>
+Trent Ogren <tedwardo2@gmail.com>
+Trevor <trevinwoodstock@gmail.com>
+Trevor Pounds <trevor.pounds@gmail.com>
+Trevor Sullivan <pcgeek86@gmail.com>
+Trishna Guha <trishnaguha17@gmail.com>
+Tristan Carel <tristan@cogniteev.com>
+Troy Denton <trdenton@gmail.com>
+Tycho Andersen <tycho@docker.com>
+Tyler Brock <tyler.brock@gmail.com>
+Tyler Brown <tylers.pile@gmail.com>
+Tzu-Jung Lee <roylee17@gmail.com>
+uhayate <uhayate.gong@daocloud.io>
+Ulysse Carion <ulyssecarion@gmail.com>
+Umesh Yadav <umesh4257@gmail.com>
+Utz Bacher <utz.bacher@de.ibm.com>
+vagrant <vagrant@ubuntu-14.04-amd64-vbox>
+Vaidas Jablonskis <jablonskis@gmail.com>
+vanderliang <lansheng@meili-inc.com>
+Veres Lajos <vlajos@gmail.com>
+Victor Algaze <valgaze@gmail.com>
+Victor Coisne <victor.coisne@dotcloud.com>
+Victor Costan <costan@gmail.com>
+Victor I. Wood <viw@t2am.com>
+Victor Lyuboslavsky <victor@victoreda.com>
+Victor Marmol <vmarmol@google.com>
+Victor Palma <palma.victor@gmail.com>
+Victor Vieux <victor.vieux@docker.com>
+Victoria Bialas <victoria.bialas@docker.com>
+Vijaya Kumar K <vijayak@caviumnetworks.com>
+Viktor Stanchev <me@viktorstanchev.com>
+Viktor Vojnovski <viktor.vojnovski@amadeus.com>
+VinayRaghavanKS <raghavan.vinay@gmail.com>
+Vincent Batts <vbatts@redhat.com>
+Vincent Bernat <Vincent.Bernat@exoscale.ch>
+Vincent Demeester <vincent.demeester@docker.com>
+Vincent Giersch <vincent.giersch@ovh.net>
+Vincent Mayers <vincent.mayers@inbloom.org>
+Vincent Woo <me@vincentwoo.com>
+Vinod Kulkarni <vinod.kulkarni@gmail.com>
+Vishal Doshi <vishal.doshi@gmail.com>
+Vishnu Kannan <vishnuk@google.com>
+Vitaly Ostrosablin <vostrosablin@virtuozzo.com>
+Vitor Monteiro <vmrmonteiro@gmail.com>
+Vivek Agarwal <me@vivek.im>
+Vivek Dasgupta <vdasgupt@redhat.com>
+Vivek Goyal <vgoyal@redhat.com>
+Vladimir Bulyga <xx@ccxx.cc>
+Vladimir Kirillov <proger@wilab.org.ua>
+Vladimir Pouzanov <farcaller@google.com>
+Vladimir Rutsky <altsysrq@gmail.com>
+Vladimir Varankin <nek.narqo+git@gmail.com>
+VladimirAus <v_roudakov@yahoo.com>
+Vlastimil Zeman <vlastimil.zeman@diffblue.com>
+Vojtech Vitek (V-Teq) <vvitek@redhat.com>
+waitingkuo <waitingkuo0527@gmail.com>
+Walter Leibbrandt <github@wrl.co.za>
+Walter Stanish <walter@pratyeka.org>
+Wang Chao <chao.wang@ucloud.cn>
+Wang Guoliang <liangcszzu@163.com>
+Wang Jie <wangjie5@chinaskycloud.com>
+Wang Long <long.wanglong@huawei.com>
+Wang Ping <present.wp@icloud.com>
+Wang Xing <hzwangxing@corp.netease.com>
+Wang Yuexiao <wang.yuexiao@zte.com.cn>
+Ward Vandewege <ward@jhvc.com>
+WarheadsSE <max@warheads.net>
+Wassim Dhif <wassimdhif@gmail.com>
+Wayne Chang <wayne@neverfear.org>
+Wayne Song <wsong@docker.com>
+Weerasak Chongnguluam <singpor@gmail.com>
+Wei Wu <wuwei4455@gmail.com>
+Wei-Ting Kuo <waitingkuo0527@gmail.com>
+weipeng <weipeng@tuscloud.io>
+weiyan <weiyan3@huawei.com>
+Weiyang Zhu <cnresonant@gmail.com>
+Wen Cheng Ma <wenchma@cn.ibm.com>
+Wendel Fleming <wfleming@usc.edu>
+Wenjun Tang <tangwj2@lenovo.com>
+Wenkai Yin <yinw@vmware.com>
+Wentao Zhang <zhangwentao234@huawei.com>
+Wenxuan Zhao <viz@linux.com>
+Wenyu You <21551128@zju.edu.cn>
+Wenzhi Liang <wenzhi.liang@gmail.com>
+Wes Morgan <cap10morgan@gmail.com>
+Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
+Will Dietz <w@wdtz.org>
+Will Rouesnel <w.rouesnel@gmail.com>
+Will Weaver <monkey@buildingbananas.com>
+willhf <willhf@gmail.com>
+William Delanoue <william.delanoue@gmail.com>
+William Henry <whenry@redhat.com>
+William Hubbs <w.d.hubbs@gmail.com>
+William Martin <wmartin@pivotal.io>
+William Riancho <wr.wllm@gmail.com>
+William Thurston <thurstw@amazon.com>
+WiseTrem <shepelyov.g@gmail.com>
+Wolfgang Powisch <powo@powo.priv.at>
+Wonjun Kim <wonjun.kim@navercorp.com>
+xamyzhao <x.amy.zhao@gmail.com>
+Xianglin Gao <xlgao@zju.edu.cn>
+Xianlu Bird <xianlubird@gmail.com>
+XiaoBing Jiang <s7v7nislands@gmail.com>
+Xiaoxu Chen <chenxiaoxu14@otcaix.iscas.ac.cn>
+Xiaoyu Zhang <zhang.xiaoyu33@zte.com.cn>
+xiekeyang <xiekeyang@huawei.com>
+Ximo Guanter Gonzálbez <joaquin.guantergonzalbez@telefonica.com>
+Xinbo Weng <xihuanbo_0521@zju.edu.cn>
+Xinzi Zhou <imdreamrunner@gmail.com>
+Xiuming Chen <cc@cxm.cc>
+Xuecong Liao <satorulogic@gmail.com>
+xuzhaokui <cynicholas@gmail.com>
+Yadnyawalkya Tale <ytale@redhat.com>
+Yahya <ya7yaz@gmail.com>
+YAMADA Tsuyoshi <tyamada@minimum2scp.org>
+Yamasaki Masahide <masahide.y@gmail.com>
+Yan Feng <yanfeng2@huawei.com>
+Yang Bai <hamo.by@gmail.com>
+Yang Pengfei <yangpengfei4@huawei.com>
+yangchenliang <yangchenliang@huawei.com>
+Yanqiang Miao <miao.yanqiang@zte.com.cn>
+Yao Zaiyong <yaozaiyong@hotmail.com>
+Yassine Tijani <yasstij11@gmail.com>
+Yasunori Mahata <nori@mahata.net>
+Yazhong Liu <yorkiefixer@gmail.com>
+Yestin Sun <sunyi0804@gmail.com>
+Yi EungJun <eungjun.yi@navercorp.com>
+Yibai Zhang <xm1994@gmail.com>
+Yihang Ho <hoyihang5@gmail.com>
+Ying Li <ying.li@docker.com>
+Yohei Ueda <yohei@jp.ibm.com>
+Yong Tang <yong.tang.github@outlook.com>
+Yongzhi Pan <panyongzhi@gmail.com>
+Yosef Fertel <yfertel@gmail.com>
+You-Sheng Yang (楊有勝) <vicamo@gmail.com>
+Youcef YEKHLEF <yyekhlef@gmail.com>
+Yu Changchun <yuchangchun1@huawei.com>
+Yu Chengxia <yuchengxia@huawei.com>
+Yu Peng <yu.peng36@zte.com.cn>
+Yu-Ju Hong <yjhong@google.com>
+Yuan Sun <sunyuan3@huawei.com>
+Yuanhong Peng <pengyuanhong@huawei.com>
+Yuhao Fang <fangyuhao@gmail.com>
+Yuichiro Kaneko <spiketeika@gmail.com>
+Yunxiang Huang <hyxqshk@vip.qq.com>
+Yurii Rashkovskii <yrashk@gmail.com>
+Yves Junqueira <yves.junqueira@gmail.com>
+Zac Dover <zdover@redhat.com>
+Zach Borboa <zachborboa@gmail.com>
+Zachary Jaffee <zjaffee@us.ibm.com>
+Zain Memon <zain@inzain.net>
+Zaiste! <oh@zaiste.net>
+Zane DeGraffenried <zane.deg@gmail.com>
+Zefan Li <lizefan@huawei.com>
+Zen Lin(Zhinan Lin) <linzhinan@huawei.com>
+Zhang Kun <zkazure@gmail.com>
+Zhang Wei <zhangwei555@huawei.com>
+Zhang Wentao <zhangwentao234@huawei.com>
+ZhangHang <stevezhang2014@gmail.com>
+zhangxianwei <xianwei.zw@alibaba-inc.com>
+Zhenan Ye <21551168@zju.edu.cn>
+zhenghenghuo <zhenghenghuo@zju.edu.cn>
+Zhenkun Bi <bi.zhenkun@zte.com.cn>
+Zhou Hao <zhouhao@cn.fujitsu.com>
+Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
+Zhu Kunjia <zhu.kunjia@zte.com.cn>
+Zhuoyun Wei <wzyboy@wzyboy.org>
+Zilin Du <zilin.du@gmail.com>
+zimbatm <zimbatm@zimbatm.com>
+Ziming Dong <bnudzm@foxmail.com>
+ZJUshuaizhou <21551191@zju.edu.cn>
+zmarouf <zeid.marouf@gmail.com>
+Zoltan Tombol <zoltan.tombol@gmail.com>
+Zou Yu <zouyu7@huawei.com>
+zqh <zqhxuyuan@gmail.com>
+Zuhayr Elahi <elahi.zuhayr@gmail.com>
+Zunayed Ali <zunayed@gmail.com>
+Álex González <agonzalezro@gmail.com>
+Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
+Átila Camurça Alves <camurca.home@gmail.com>
+尹吉峰 <jifeng.yin@gmail.com>
+徐俊杰 <paco.xu@daocloud.io>
+慕陶 <jihui.xjh@alibaba-inc.com>
+搏通 <yufeng.pyf@alibaba-inc.com>
+黄艳红00139573 <huang.yanhong@zte.com.cn>
diff --git a/vendor/github.com/docker/docker/README.md b/vendor/github.com/docker/docker/README.md
deleted file mode 100644
index 534fd97db..000000000
--- a/vendor/github.com/docker/docker/README.md
+++ /dev/null
@@ -1,57 +0,0 @@
-The Moby Project
-================
-
-![Moby Project logo](docs/static_files/moby-project-logo.png "The Moby Project")
-
-Moby is an open-source project created by Docker to enable and accelerate software containerization.
-
-It provides a "Lego set" of toolkit components, the framework for assembling them into custom container-based systems, and a place for all container enthusiasts and professionals to experiment and exchange ideas.
-Components include container build tools, a container registry, orchestration tools, a runtime and more, and these can be used as building blocks in conjunction with other tools and projects.
-
-## Principles
-
-Moby is an open project guided by strong principles, aiming to be modular, flexible and without too strong an opinion on user experience.
-It is open to the community to help set its direction.
-
-- Modular: the project includes lots of components that have well-defined functions and APIs that work together.
-- Batteries included but swappable: Moby includes enough components to build fully featured container system, but its modular architecture ensures that most of the components can be swapped by different implementations.
-- Usable security: Moby provides secure defaults without compromising usability.
-- Developer focused: The APIs are intended to be functional and useful to build powerful tools.
-They are not necessarily intended as end user tools but as components aimed at developers.
-Documentation and UX is aimed at developers not end users.
-
-## Audience
-
-The Moby Project is intended for engineers, integrators and enthusiasts looking to modify, hack, fix, experiment, invent and build systems based on containers.
-It is not for people looking for a commercially supported system, but for people who want to work and learn with open source code.
-
-## Relationship with Docker
-
-The components and tools in the Moby Project are initially the open source components that Docker and the community have built for the Docker Project.
-New projects can be added if they fit with the community goals. Docker is committed to using Moby as the upstream for the Docker Product.
-However, other projects are also encouraged to use Moby as an upstream, and to reuse the components in diverse ways, and all these uses will be treated in the same way. External maintainers and contributors are welcomed.
-
-The Moby project is not intended as a location for support or feature requests for Docker products, but as a place for contributors to work on open source code, fix bugs, and make the code more useful.
-The releases are supported by the maintainers, community and users, on a best efforts basis only, and are not intended for customers who want enterprise or commercial support; Docker EE is the appropriate product for these use cases.
-
------
-
-Legal
-=====
-
-*Brought to you courtesy of our legal counsel. For more context,
-please see the [NOTICE](https://github.com/moby/moby/blob/master/NOTICE) document in this repo.*
-
-Use and transfer of Moby may be subject to certain restrictions by the
-United States and other governments.
-
-It is your responsibility to ensure that your use and/or transfer does not
-violate applicable laws.
-
-For more information, please see https://www.bis.doc.gov
-
-Licensing
-=========
-Moby is licensed under the Apache License, Version 2.0. See
-[LICENSE](https://github.com/moby/moby/blob/master/LICENSE) for the full
-license text.
diff --git a/vendor/github.com/docker/docker/api/swagger-gen.yaml b/vendor/github.com/docker/docker/api/swagger-gen.yaml
new file mode 100644
index 000000000..f07a02737
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/swagger-gen.yaml
@@ -0,0 +1,12 @@
+
+layout:
+ models:
+ - name: definition
+ source: asset:model
+ target: "{{ joinFilePath .Target .ModelPackage }}"
+ file_name: "{{ (snakize (pascalize .Name)) }}.go"
+ operations:
+ - name: handler
+ source: asset:serverOperation
+ target: "{{ joinFilePath .Target .APIPackage .Package }}"
+ file_name: "{{ (snakize (pascalize .Name)) }}.go"
diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml
new file mode 100644
index 000000000..8652c368c
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/swagger.yaml
@@ -0,0 +1,10390 @@
+# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API.
+#
+# This is used for generating API documentation and the types used by the
+# client/server. See api/README.md for more information.
+#
+# Some style notes:
+# - This file is used by ReDoc, which allows GitHub Flavored Markdown in
+# descriptions.
+# - There is no maximum line length, for ease of editing and pretty diffs.
+# - operationIds are in the format "NounVerb", with a singular noun.
+
+swagger: "2.0"
+schemes:
+ - "http"
+ - "https"
+produces:
+ - "application/json"
+ - "text/plain"
+consumes:
+ - "application/json"
+ - "text/plain"
+basePath: "/v1.40"
+info:
+ title: "Docker Engine API"
+ version: "1.40"
+ x-logo:
+ url: "https://docs.docker.com/images/logo-docker-main.png"
+ description: |
+ The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API.
+
+ Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls.
+
+ # Errors
+
+ The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format:
+
+ ```
+ {
+ "message": "page not found"
+ }
+ ```
+
+ # Versioning
+
+ The API is usually changed in each release, so API calls are versioned to
+ ensure that clients don't break. To lock to a specific version of the API,
+ you prefix the URL with its version, for example, call `/v1.30/info` to use
+ the v1.30 version of the `/info` endpoint. If the API version specified in
+ the URL is not supported by the daemon, a HTTP `400 Bad Request` error message
+ is returned.
+
+ If you omit the version-prefix, the current version of the API (v1.40) is used.
+ For example, calling `/info` is the same as calling `/v1.40/info`. Using the
+ API without a version-prefix is deprecated and will be removed in a future release.
+
+ Engine releases in the near future should support this version of the API,
+ so your client will continue to work even if it is talking to a newer Engine.
+
+ The API uses an open schema model, which means server may add extra properties
+ to responses. Likewise, the server will ignore any extra query parameters and
+ request body properties. When you write clients, you need to ignore additional
+ properties in responses to ensure they do not break when talking to newer
+ daemons.
+
+
+ # Authentication
+
+ Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure:
+
+ ```
+ {
+ "username": "string",
+ "password": "string",
+ "email": "string",
+ "serveraddress": "string"
+ }
+ ```
+
+ The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required.
+
+ If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials:
+
+ ```
+ {
+ "identitytoken": "9cbaf023786cd7..."
+ }
+ ```
+
+# The tags on paths define the menu sections in the ReDoc documentation, so
+# the usage of tags must make sense for that:
+# - They should be singular, not plural.
+# - There should not be too many tags, or the menu becomes unwieldy. For
+# example, it is preferable to add a path to the "System" tag instead of
+# creating a tag with a single path in it.
+# - The order of tags in this list defines the order in the menu.
+tags:
+ # Primary objects
+ - name: "Container"
+ x-displayName: "Containers"
+ description: |
+ Create and manage containers.
+ - name: "Image"
+ x-displayName: "Images"
+ - name: "Network"
+ x-displayName: "Networks"
+ description: |
+ Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information.
+ - name: "Volume"
+ x-displayName: "Volumes"
+ description: |
+ Create and manage persistent storage that can be attached to containers.
+ - name: "Exec"
+ x-displayName: "Exec"
+ description: |
+ Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information.
+
+ To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`.
+ # Swarm things
+ - name: "Swarm"
+ x-displayName: "Swarm"
+ description: |
+ Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information.
+ - name: "Node"
+ x-displayName: "Nodes"
+ description: |
+ Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work.
+ - name: "Service"
+ x-displayName: "Services"
+ description: |
+ Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work.
+ - name: "Task"
+ x-displayName: "Tasks"
+ description: |
+ A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work.
+ - name: "Secret"
+ x-displayName: "Secrets"
+ description: |
+ Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work.
+ - name: "Config"
+ x-displayName: "Configs"
+ description: |
+ Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work.
+ # System things
+ - name: "Plugin"
+ x-displayName: "Plugins"
+ - name: "System"
+ x-displayName: "System"
+
+definitions:
+ Port:
+ type: "object"
+ description: "An open port on a container"
+ required: [PrivatePort, Type]
+ properties:
+ IP:
+ type: "string"
+ format: "ip-address"
+ description: "Host IP address that the container's port is mapped to"
+ PrivatePort:
+ type: "integer"
+ format: "uint16"
+ x-nullable: false
+ description: "Port on the container"
+ PublicPort:
+ type: "integer"
+ format: "uint16"
+ description: "Port exposed on the host"
+ Type:
+ type: "string"
+ x-nullable: false
+ enum: ["tcp", "udp", "sctp"]
+ example:
+ PrivatePort: 8080
+ PublicPort: 80
+ Type: "tcp"
+
+ MountPoint:
+ type: "object"
+ description: "A mount point inside a container"
+ properties:
+ Type:
+ type: "string"
+ Name:
+ type: "string"
+ Source:
+ type: "string"
+ Destination:
+ type: "string"
+ Driver:
+ type: "string"
+ Mode:
+ type: "string"
+ RW:
+ type: "boolean"
+ Propagation:
+ type: "string"
+
+ DeviceMapping:
+ type: "object"
+ description: "A device mapping between the host and container"
+ properties:
+ PathOnHost:
+ type: "string"
+ PathInContainer:
+ type: "string"
+ CgroupPermissions:
+ type: "string"
+ example:
+ PathOnHost: "/dev/deviceName"
+ PathInContainer: "/dev/deviceName"
+ CgroupPermissions: "mrw"
+
+ ThrottleDevice:
+ type: "object"
+ properties:
+ Path:
+ description: "Device path"
+ type: "string"
+ Rate:
+ description: "Rate"
+ type: "integer"
+ format: "int64"
+ minimum: 0
+
+ Mount:
+ type: "object"
+ properties:
+ Target:
+ description: "Container path."
+ type: "string"
+ Source:
+ description: "Mount source (e.g. a volume name, a host path)."
+ type: "string"
+ Type:
+ description: |
+ The mount type. Available types:
+
+ - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container.
+ - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed.
+ - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs.
+ - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container.
+ type: "string"
+ enum:
+ - "bind"
+ - "volume"
+ - "tmpfs"
+ - "npipe"
+ ReadOnly:
+ description: "Whether the mount should be read-only."
+ type: "boolean"
+ Consistency:
+ description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`."
+ type: "string"
+ BindOptions:
+ description: "Optional configuration for the `bind` type."
+ type: "object"
+ properties:
+ Propagation:
+ description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`."
+ type: "string"
+ enum:
+ - "private"
+ - "rprivate"
+ - "shared"
+ - "rshared"
+ - "slave"
+ - "rslave"
+ NonRecursive:
+ description: "Disable recursive bind mount."
+ type: "boolean"
+ default: false
+ VolumeOptions:
+ description: "Optional configuration for the `volume` type."
+ type: "object"
+ properties:
+ NoCopy:
+ description: "Populate volume with data from the target."
+ type: "boolean"
+ default: false
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ DriverConfig:
+ description: "Map of driver specific options"
+ type: "object"
+ properties:
+ Name:
+ description: "Name of the driver to use to create the volume."
+ type: "string"
+ Options:
+ description: "key/value map of driver specific options."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ TmpfsOptions:
+ description: "Optional configuration for the `tmpfs` type."
+ type: "object"
+ properties:
+ SizeBytes:
+ description: "The size for the tmpfs mount in bytes."
+ type: "integer"
+ format: "int64"
+ Mode:
+ description: "The permission mode for the tmpfs mount in an integer."
+ type: "integer"
+
+ RestartPolicy:
+ description: |
+ The behavior to apply when the container exits. The default is not to restart.
+
+ An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server.
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ description: |
+ - Empty string means not to restart
+ - `always` Always restart
+ - `unless-stopped` Restart always except when the user has manually stopped the container
+ - `on-failure` Restart only when the container exit code is non-zero
+ enum:
+ - ""
+ - "always"
+ - "unless-stopped"
+ - "on-failure"
+ MaximumRetryCount:
+ type: "integer"
+ description: "If `on-failure` is used, the number of times to retry before giving up"
+
+ Resources:
+ description: "A container's resources (cgroups config, ulimits, etc)"
+ type: "object"
+ properties:
+ # Applicable to all platforms
+ CpuShares:
+ description: "An integer value representing this container's relative CPU weight versus other containers."
+ type: "integer"
+ Memory:
+ description: "Memory limit in bytes."
+ type: "integer"
+ format: "int64"
+ default: 0
+ # Applicable to UNIX platforms
+ CgroupParent:
+ description: "Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist."
+ type: "string"
+ BlkioWeight:
+ description: "Block IO weight (relative weight)."
+ type: "integer"
+ minimum: 0
+ maximum: 1000
+ BlkioWeightDevice:
+ description: |
+ Block IO weight (relative device weight) in the form `[{"Path": "device_path", "Weight": weight}]`.
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Path:
+ type: "string"
+ Weight:
+ type: "integer"
+ minimum: 0
+ BlkioDeviceReadBps:
+ description: |
+ Limit read rate (bytes per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`.
+ type: "array"
+ items:
+ $ref: "#/definitions/ThrottleDevice"
+ BlkioDeviceWriteBps:
+ description: |
+ Limit write rate (bytes per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`.
+ type: "array"
+ items:
+ $ref: "#/definitions/ThrottleDevice"
+ BlkioDeviceReadIOps:
+ description: |
+ Limit read rate (IO per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`.
+ type: "array"
+ items:
+ $ref: "#/definitions/ThrottleDevice"
+ BlkioDeviceWriteIOps:
+ description: |
+ Limit write rate (IO per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`.
+ type: "array"
+ items:
+ $ref: "#/definitions/ThrottleDevice"
+ CpuPeriod:
+ description: "The length of a CPU period in microseconds."
+ type: "integer"
+ format: "int64"
+ CpuQuota:
+ description: "Microseconds of CPU time that the container can get in a CPU period."
+ type: "integer"
+ format: "int64"
+ CpuRealtimePeriod:
+ description: "The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks."
+ type: "integer"
+ format: "int64"
+ CpuRealtimeRuntime:
+ description: "The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks."
+ type: "integer"
+ format: "int64"
+ CpusetCpus:
+ description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)"
+ type: "string"
+ example: "0-3"
+ CpusetMems:
+ description: "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems."
+ type: "string"
+ Devices:
+ description: "A list of devices to add to the container."
+ type: "array"
+ items:
+ $ref: "#/definitions/DeviceMapping"
+ DeviceCgroupRules:
+ description: "a list of cgroup rules to apply to the container"
+ type: "array"
+ items:
+ type: "string"
+ example: "c 13:* rwm"
+ DiskQuota:
+ description: "Disk limit (in bytes)."
+ type: "integer"
+ format: "int64"
+ KernelMemory:
+ description: "Kernel memory limit in bytes."
+ type: "integer"
+ format: "int64"
+ example: 209715200
+ KernelMemoryTCP:
+ description: "Hard limit for kernel TCP buffer memory (in bytes)."
+ type: "integer"
+ format: "int64"
+ MemoryReservation:
+ description: "Memory soft limit in bytes."
+ type: "integer"
+ format: "int64"
+ MemorySwap:
+ description: "Total memory limit (memory + swap). Set as `-1` to enable unlimited swap."
+ type: "integer"
+ format: "int64"
+ MemorySwappiness:
+ description: "Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100."
+ type: "integer"
+ format: "int64"
+ minimum: 0
+ maximum: 100
+ NanoCPUs:
+ description: "CPU quota in units of 10<sup>-9</sup> CPUs."
+ type: "integer"
+ format: "int64"
+ OomKillDisable:
+ description: "Disable OOM Killer for the container."
+ type: "boolean"
+ Init:
+ description: "Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used."
+ type: "boolean"
+ x-nullable: true
+ PidsLimit:
+ description: |
+ Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` to not change.
+ type: "integer"
+ format: "int64"
+ x-nullable: true
+ Ulimits:
+ description: |
+ A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`"
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Name:
+ description: "Name of ulimit"
+ type: "string"
+ Soft:
+ description: "Soft limit"
+ type: "integer"
+ Hard:
+ description: "Hard limit"
+ type: "integer"
+ # Applicable to Windows
+ CpuCount:
+ description: |
+ The number of usable CPUs (Windows only).
+
+ On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last.
+ type: "integer"
+ format: "int64"
+ CpuPercent:
+ description: |
+ The usable percentage of the available CPUs (Windows only).
+
+ On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last.
+ type: "integer"
+ format: "int64"
+ IOMaximumIOps:
+ description: "Maximum IOps for the container system drive (Windows only)"
+ type: "integer"
+ format: "int64"
+ IOMaximumBandwidth:
+ description: "Maximum IO in bytes per second for the container system drive (Windows only)"
+ type: "integer"
+ format: "int64"
+
+ ResourceObject:
+ description: "An object describing the resources which can be advertised by a node and requested by a task"
+ type: "object"
+ properties:
+ NanoCPUs:
+ type: "integer"
+ format: "int64"
+ example: 4000000000
+ MemoryBytes:
+ type: "integer"
+ format: "int64"
+ example: 8272408576
+ GenericResources:
+ $ref: "#/definitions/GenericResources"
+
+ GenericResources:
+ description: "User-defined resources can be either Integer resources (e.g, `SSD=3`) or String resources (e.g, `GPU=UUID1`)"
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ NamedResourceSpec:
+ type: "object"
+ properties:
+ Kind:
+ type: "string"
+ Value:
+ type: "string"
+ DiscreteResourceSpec:
+ type: "object"
+ properties:
+ Kind:
+ type: "string"
+ Value:
+ type: "integer"
+ format: "int64"
+ example:
+ - DiscreteResourceSpec:
+ Kind: "SSD"
+ Value: 3
+ - NamedResourceSpec:
+ Kind: "GPU"
+ Value: "UUID1"
+ - NamedResourceSpec:
+ Kind: "GPU"
+ Value: "UUID2"
+
+ HealthConfig:
+ description: "A test to perform to check that the container is healthy."
+ type: "object"
+ properties:
+ Test:
+ description: |
+ The test to perform. Possible values are:
+
+ - `[]` inherit healthcheck from image or parent image
+ - `["NONE"]` disable healthcheck
+ - `["CMD", args...]` exec arguments directly
+ - `["CMD-SHELL", command]` run command with system's default shell
+ type: "array"
+ items:
+ type: "string"
+ Interval:
+ description: "The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit."
+ type: "integer"
+ Timeout:
+ description: "The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit."
+ type: "integer"
+ Retries:
+ description: "The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit."
+ type: "integer"
+ StartPeriod:
+ description: "Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit."
+ type: "integer"
+
+ HostConfig:
+ description: "Container configuration that depends on the host we are running on"
+ allOf:
+ - $ref: "#/definitions/Resources"
+ - type: "object"
+ properties:
+ # Applicable to all platforms
+ Binds:
+ type: "array"
+ description: |
+ A list of volume bindings for this container. Each volume binding is a string in one of these forms:
+
+ - `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path.
+ - `host-src:container-dest:ro` to make the bind mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path.
+ - `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path.
+ - `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path.
+ items:
+ type: "string"
+ ContainerIDFile:
+ type: "string"
+ description: "Path to a file where the container ID is written"
+ LogConfig:
+ type: "object"
+ description: "The logging configuration for this container"
+ properties:
+ Type:
+ type: "string"
+ enum:
+ - "json-file"
+ - "syslog"
+ - "journald"
+ - "gelf"
+ - "fluentd"
+ - "awslogs"
+ - "splunk"
+ - "etwlogs"
+ - "none"
+ Config:
+ type: "object"
+ additionalProperties:
+ type: "string"
+ NetworkMode:
+ type: "string"
+ description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken
+ as a custom network's name to which this container should connect to."
+ PortBindings:
+ $ref: "#/definitions/PortMap"
+ RestartPolicy:
+ $ref: "#/definitions/RestartPolicy"
+ AutoRemove:
+ type: "boolean"
+ description: "Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set."
+ VolumeDriver:
+ type: "string"
+ description: "Driver that this container uses to mount volumes."
+ VolumesFrom:
+ type: "array"
+ description: "A list of volumes to inherit from another container, specified in the form `<container name>[:<ro|rw>]`."
+ items:
+ type: "string"
+ Mounts:
+ description: "Specification for mounts to be added to the container."
+ type: "array"
+ items:
+ $ref: "#/definitions/Mount"
+
+ # Applicable to UNIX platforms
+ Capabilities:
+ type: "array"
+ description: |
+ A list of kernel capabilities to be available for container (this overrides the default set).
+
+ Conflicts with options 'CapAdd' and 'CapDrop'"
+ items:
+ type: "string"
+ CapAdd:
+ type: "array"
+ description: "A list of kernel capabilities to add to the container. Conflicts with option 'Capabilities'"
+ items:
+ type: "string"
+ CapDrop:
+ type: "array"
+ description: "A list of kernel capabilities to drop from the container. Conflicts with option 'Capabilities'"
+ items:
+ type: "string"
+ Dns:
+ type: "array"
+ description: "A list of DNS servers for the container to use."
+ items:
+ type: "string"
+ DnsOptions:
+ type: "array"
+ description: "A list of DNS options."
+ items:
+ type: "string"
+ DnsSearch:
+ type: "array"
+ description: "A list of DNS search domains."
+ items:
+ type: "string"
+ ExtraHosts:
+ type: "array"
+ description: |
+ A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`.
+ items:
+ type: "string"
+ GroupAdd:
+ type: "array"
+ description: "A list of additional groups that the container process will run as."
+ items:
+ type: "string"
+ IpcMode:
+ type: "string"
+ description: |
+ IPC sharing mode for the container. Possible values are:
+
+ - `"none"`: own private IPC namespace, with /dev/shm not mounted
+ - `"private"`: own private IPC namespace
+ - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers
+ - `"container:<name|id>"`: join another (shareable) container's IPC namespace
+ - `"host"`: use the host system's IPC namespace
+
+ If not specified, daemon default is used, which can either be `"private"`
+ or `"shareable"`, depending on daemon version and configuration.
+ Cgroup:
+ type: "string"
+ description: "Cgroup to use for the container."
+ Links:
+ type: "array"
+ description: "A list of links for the container in the form `container_name:alias`."
+ items:
+ type: "string"
+ OomScoreAdj:
+ type: "integer"
+ description: "An integer value containing the score given to the container in order to tune OOM killer preferences."
+ example: 500
+ PidMode:
+ type: "string"
+ description: |
+ Set the PID (Process) Namespace mode for the container. It can be either:
+
+ - `"container:<name|id>"`: joins another container's PID namespace
+ - `"host"`: use the host's PID namespace inside the container
+ Privileged:
+ type: "boolean"
+ description: "Gives the container full access to the host."
+ PublishAllPorts:
+ type: "boolean"
+ description: |
+ Allocates an ephemeral host port for all of a container's
+ exposed ports.
+
+ Ports are de-allocated when the container stops and allocated when the container starts.
+ The allocated port might be changed when restarting the container.
+
+ The port is selected from the ephemeral port range that depends on the kernel.
+ For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`.
+ ReadonlyRootfs:
+ type: "boolean"
+ description: "Mount the container's root filesystem as read only."
+ SecurityOpt:
+ type: "array"
+ description: "A list of string values to customize labels for MLS
+ systems, such as SELinux."
+ items:
+ type: "string"
+ StorageOpt:
+ type: "object"
+ description: |
+ Storage driver options for this container, in the form `{"size": "120G"}`.
+ additionalProperties:
+ type: "string"
+ Tmpfs:
+ type: "object"
+ description: |
+ A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: `{ "/run": "rw,noexec,nosuid,size=65536k" }`.
+ additionalProperties:
+ type: "string"
+ UTSMode:
+ type: "string"
+ description: "UTS namespace to use for the container."
+ UsernsMode:
+ type: "string"
+ description: "Sets the usernamespace mode for the container when usernamespace remapping option is enabled."
+ ShmSize:
+ type: "integer"
+ description: "Size of `/dev/shm` in bytes. If omitted, the system uses 64MB."
+ minimum: 0
+ Sysctls:
+ type: "object"
+ description: |
+ A list of kernel parameters (sysctls) to set in the container. For example: `{"net.ipv4.ip_forward": "1"}`
+ additionalProperties:
+ type: "string"
+ Runtime:
+ type: "string"
+ description: "Runtime to use with this container."
+ # Applicable to Windows
+ ConsoleSize:
+ type: "array"
+ description: "Initial console size, as an `[height, width]` array. (Windows only)"
+ minItems: 2
+ maxItems: 2
+ items:
+ type: "integer"
+ minimum: 0
+ Isolation:
+ type: "string"
+ description: "Isolation technology of the container. (Windows only)"
+ enum:
+ - "default"
+ - "process"
+ - "hyperv"
+ MaskedPaths:
+ type: "array"
+ description: "The list of paths to be masked inside the container (this overrides the default set of paths)"
+ items:
+ type: "string"
+ ReadonlyPaths:
+ type: "array"
+ description: "The list of paths to be set as read-only inside the container (this overrides the default set of paths)"
+ items:
+ type: "string"
+
+ ContainerConfig:
+ description: "Configuration for a container that is portable between hosts"
+ type: "object"
+ properties:
+ Hostname:
+ description: "The hostname to use for the container, as a valid RFC 1123 hostname."
+ type: "string"
+ Domainname:
+ description: "The domain name to use for the container."
+ type: "string"
+ User:
+ description: "The user that commands are run as inside the container."
+ type: "string"
+ AttachStdin:
+ description: "Whether to attach to `stdin`."
+ type: "boolean"
+ default: false
+ AttachStdout:
+ description: "Whether to attach to `stdout`."
+ type: "boolean"
+ default: true
+ AttachStderr:
+ description: "Whether to attach to `stderr`."
+ type: "boolean"
+ default: true
+ ExposedPorts:
+ description: |
+ An object mapping ports to an empty object in the form:
+
+ `{"<port>/<tcp|udp|sctp>": {}}`
+ type: "object"
+ additionalProperties:
+ type: "object"
+ enum:
+ - {}
+ default: {}
+ Tty:
+ description: "Attach standard streams to a TTY, including `stdin` if it is not closed."
+ type: "boolean"
+ default: false
+ OpenStdin:
+ description: "Open `stdin`"
+ type: "boolean"
+ default: false
+ StdinOnce:
+ description: "Close `stdin` after one attached client disconnects"
+ type: "boolean"
+ default: false
+ Env:
+ description: |
+ A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value.
+ type: "array"
+ items:
+ type: "string"
+ Cmd:
+ description: "Command to run specified as a string or an array of strings."
+ type: "array"
+ items:
+ type: "string"
+ Healthcheck:
+ $ref: "#/definitions/HealthConfig"
+ ArgsEscaped:
+ description: "Command is already escaped (Windows only)"
+ type: "boolean"
+ Image:
+ description: "The name of the image to use when creating the container"
+ type: "string"
+ Volumes:
+ description: "An object mapping mount point paths inside the container to empty objects."
+ type: "object"
+ additionalProperties:
+ type: "object"
+ enum:
+ - {}
+ default: {}
+ WorkingDir:
+ description: "The working directory for commands to run in."
+ type: "string"
+ Entrypoint:
+ description: |
+ The entry point for the container as a string or an array of strings.
+
+ If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).
+ type: "array"
+ items:
+ type: "string"
+ NetworkDisabled:
+ description: "Disable networking for the container."
+ type: "boolean"
+ MacAddress:
+ description: "MAC address of the container."
+ type: "string"
+ OnBuild:
+ description: "`ONBUILD` metadata that were defined in the image's `Dockerfile`."
+ type: "array"
+ items:
+ type: "string"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ StopSignal:
+ description: "Signal to stop a container as a string or unsigned integer."
+ type: "string"
+ default: "SIGTERM"
+ StopTimeout:
+ description: "Timeout to stop a container in seconds."
+ type: "integer"
+ default: 10
+ Shell:
+ description: "Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell."
+ type: "array"
+ items:
+ type: "string"
+
+ NetworkSettings:
+ description: "NetworkSettings exposes the network settings in the API"
+ type: "object"
+ properties:
+ Bridge:
+ description: Name of the network'a bridge (for example, `docker0`).
+ type: "string"
+ example: "docker0"
+ SandboxID:
+ description: SandboxID uniquely represents a container's network stack.
+ type: "string"
+ example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3"
+ HairpinMode:
+ description: |
+ Indicates if hairpin NAT should be enabled on the virtual interface.
+ type: "boolean"
+ example: false
+ LinkLocalIPv6Address:
+ description: IPv6 unicast address using the link-local prefix.
+ type: "string"
+ example: "fe80::42:acff:fe11:1"
+ LinkLocalIPv6PrefixLen:
+ description: Prefix length of the IPv6 unicast address.
+ type: "integer"
+ example: "64"
+ Ports:
+ $ref: "#/definitions/PortMap"
+ SandboxKey:
+ description: SandboxKey identifies the sandbox
+ type: "string"
+ example: "/var/run/docker/netns/8ab54b426c38"
+
+ # TODO is SecondaryIPAddresses actually used?
+ SecondaryIPAddresses:
+ description: ""
+ type: "array"
+ items:
+ $ref: "#/definitions/Address"
+ x-nullable: true
+
+ # TODO is SecondaryIPv6Addresses actually used?
+ SecondaryIPv6Addresses:
+ description: ""
+ type: "array"
+ items:
+ $ref: "#/definitions/Address"
+ x-nullable: true
+
+ # TODO properties below are part of DefaultNetworkSettings, which is
+ # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12
+ EndpointID:
+ description: |
+ EndpointID uniquely represents a service endpoint in a Sandbox.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "string"
+ example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b"
+ Gateway:
+ description: |
+ Gateway address for the default "bridge" network.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "string"
+ example: "172.17.0.1"
+ GlobalIPv6Address:
+ description: |
+ Global IPv6 address for the default "bridge" network.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "string"
+ example: "2001:db8::5689"
+ GlobalIPv6PrefixLen:
+ description: |
+ Mask length of the global IPv6 address.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "integer"
+ example: 64
+ IPAddress:
+ description: |
+ IPv4 address for the default "bridge" network.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "string"
+ example: "172.17.0.4"
+ IPPrefixLen:
+ description: |
+ Mask length of the IPv4 address.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "integer"
+ example: 16
+ IPv6Gateway:
+ description: |
+ IPv6 gateway address for this network.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "string"
+ example: "2001:db8:2::100"
+ MacAddress:
+ description: |
+ MAC address for the container on the default "bridge" network.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "string"
+ example: "02:42:ac:11:00:04"
+ Networks:
+ description: |
+ Information about all networks that the container is connected to.
+ type: "object"
+ additionalProperties:
+ $ref: "#/definitions/EndpointSettings"
+
+ Address:
+ description: Address represents an IPv4 or IPv6 IP address.
+ type: "object"
+ properties:
+ Addr:
+ description: IP address.
+ type: "string"
+ PrefixLen:
+ description: Mask length of the IP address.
+ type: "integer"
+
+ PortMap:
+ description: |
+ PortMap describes the mapping of container ports to host ports, using the
+ container's port-number and protocol as key in the format `<port>/<protocol>`,
+ for example, `80/udp`.
+
+ If a container's port is mapped for multiple protocols, separate entries
+ are added to the mapping table.
+ type: "object"
+ additionalProperties:
+ type: "array"
+ items:
+ $ref: "#/definitions/PortBinding"
+ example:
+ "443/tcp":
+ - HostIp: "127.0.0.1"
+ HostPort: "4443"
+ "80/tcp":
+ - HostIp: "0.0.0.0"
+ HostPort: "80"
+ - HostIp: "0.0.0.0"
+ HostPort: "8080"
+ "80/udp":
+ - HostIp: "0.0.0.0"
+ HostPort: "80"
+ "53/udp":
+ - HostIp: "0.0.0.0"
+ HostPort: "53"
+ "2377/tcp": null
+
+ PortBinding:
+ description: |
+ PortBinding represents a binding between a host IP address and a host
+ port.
+ type: "object"
+ x-nullable: true
+ properties:
+ HostIp:
+ description: "Host IP address that the container's port is mapped to."
+ type: "string"
+ example: "127.0.0.1"
+ HostPort:
+ description: "Host port number that the container's port is mapped to."
+ type: "string"
+ example: "4443"
+
+ GraphDriverData:
+ description: "Information about a container's graph driver."
+ type: "object"
+ required: [Name, Data]
+ properties:
+ Name:
+ type: "string"
+ x-nullable: false
+ Data:
+ type: "object"
+ x-nullable: false
+ additionalProperties:
+ type: "string"
+
+ Image:
+ type: "object"
+ required:
+ - Id
+ - Parent
+ - Comment
+ - Created
+ - Container
+ - DockerVersion
+ - Author
+ - Architecture
+ - Os
+ - Size
+ - VirtualSize
+ - GraphDriver
+ - RootFS
+ properties:
+ Id:
+ type: "string"
+ x-nullable: false
+ RepoTags:
+ type: "array"
+ items:
+ type: "string"
+ RepoDigests:
+ type: "array"
+ items:
+ type: "string"
+ Parent:
+ type: "string"
+ x-nullable: false
+ Comment:
+ type: "string"
+ x-nullable: false
+ Created:
+ type: "string"
+ x-nullable: false
+ Container:
+ type: "string"
+ x-nullable: false
+ ContainerConfig:
+ $ref: "#/definitions/ContainerConfig"
+ DockerVersion:
+ type: "string"
+ x-nullable: false
+ Author:
+ type: "string"
+ x-nullable: false
+ Config:
+ $ref: "#/definitions/ContainerConfig"
+ Architecture:
+ type: "string"
+ x-nullable: false
+ Os:
+ type: "string"
+ x-nullable: false
+ OsVersion:
+ type: "string"
+ Size:
+ type: "integer"
+ format: "int64"
+ x-nullable: false
+ VirtualSize:
+ type: "integer"
+ format: "int64"
+ x-nullable: false
+ GraphDriver:
+ $ref: "#/definitions/GraphDriverData"
+ RootFS:
+ type: "object"
+ required: [Type]
+ properties:
+ Type:
+ type: "string"
+ x-nullable: false
+ Layers:
+ type: "array"
+ items:
+ type: "string"
+ BaseLayer:
+ type: "string"
+ Metadata:
+ type: "object"
+ properties:
+ LastTagTime:
+ type: "string"
+ format: "dateTime"
+
+ ImageSummary:
+ type: "object"
+ required:
+ - Id
+ - ParentId
+ - RepoTags
+ - RepoDigests
+ - Created
+ - Size
+ - SharedSize
+ - VirtualSize
+ - Labels
+ - Containers
+ properties:
+ Id:
+ type: "string"
+ x-nullable: false
+ ParentId:
+ type: "string"
+ x-nullable: false
+ RepoTags:
+ type: "array"
+ x-nullable: false
+ items:
+ type: "string"
+ RepoDigests:
+ type: "array"
+ x-nullable: false
+ items:
+ type: "string"
+ Created:
+ type: "integer"
+ x-nullable: false
+ Size:
+ type: "integer"
+ x-nullable: false
+ SharedSize:
+ type: "integer"
+ x-nullable: false
+ VirtualSize:
+ type: "integer"
+ x-nullable: false
+ Labels:
+ type: "object"
+ x-nullable: false
+ additionalProperties:
+ type: "string"
+ Containers:
+ x-nullable: false
+ type: "integer"
+
+ AuthConfig:
+ type: "object"
+ properties:
+ username:
+ type: "string"
+ password:
+ type: "string"
+ email:
+ type: "string"
+ serveraddress:
+ type: "string"
+ example:
+ username: "hannibal"
+ password: "xxxx"
+ serveraddress: "https://index.docker.io/v1/"
+
+ ProcessConfig:
+ type: "object"
+ properties:
+ privileged:
+ type: "boolean"
+ user:
+ type: "string"
+ tty:
+ type: "boolean"
+ entrypoint:
+ type: "string"
+ arguments:
+ type: "array"
+ items:
+ type: "string"
+
+ Volume:
+ type: "object"
+ required: [Name, Driver, Mountpoint, Labels, Scope, Options]
+ properties:
+ Name:
+ type: "string"
+ description: "Name of the volume."
+ x-nullable: false
+ Driver:
+ type: "string"
+ description: "Name of the volume driver used by the volume."
+ x-nullable: false
+ Mountpoint:
+ type: "string"
+ description: "Mount path of the volume on the host."
+ x-nullable: false
+ CreatedAt:
+ type: "string"
+ format: "dateTime"
+ description: "Date/Time the volume was created."
+ Status:
+ type: "object"
+ description: |
+ Low-level details about the volume, provided by the volume driver.
+ Details are returned as a map with key/value pairs:
+ `{"key":"value","key2":"value2"}`.
+
+ The `Status` field is optional, and is omitted if the volume driver
+ does not support this feature.
+ additionalProperties:
+ type: "object"
+ Labels:
+ type: "object"
+ description: "User-defined key/value metadata."
+ x-nullable: false
+ additionalProperties:
+ type: "string"
+ Scope:
+ type: "string"
+ description: "The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level."
+ default: "local"
+ x-nullable: false
+ enum: ["local", "global"]
+ Options:
+ type: "object"
+ description: "The driver specific options used when creating the volume."
+ additionalProperties:
+ type: "string"
+ UsageData:
+ type: "object"
+ x-nullable: true
+ required: [Size, RefCount]
+ description: |
+ Usage details about the volume. This information is used by the
+ `GET /system/df` endpoint, and omitted in other endpoints.
+ properties:
+ Size:
+ type: "integer"
+ default: -1
+ description: |
+ Amount of disk space used by the volume (in bytes). This information
+ is only available for volumes created with the `"local"` volume
+ driver. For volumes created with other volume drivers, this field
+ is set to `-1` ("not available")
+ x-nullable: false
+ RefCount:
+ type: "integer"
+ default: -1
+ description: |
+ The number of containers referencing this volume. This field
+ is set to `-1` if the reference-count is not available.
+ x-nullable: false
+
+ example:
+ Name: "tardis"
+ Driver: "custom"
+ Mountpoint: "/var/lib/docker/volumes/tardis"
+ Status:
+ hello: "world"
+ Labels:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ Scope: "local"
+ CreatedAt: "2016-06-07T20:31:11.853781916Z"
+
+ Network:
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ Id:
+ type: "string"
+ Created:
+ type: "string"
+ format: "dateTime"
+ Scope:
+ type: "string"
+ Driver:
+ type: "string"
+ EnableIPv6:
+ type: "boolean"
+ IPAM:
+ $ref: "#/definitions/IPAM"
+ Internal:
+ type: "boolean"
+ Attachable:
+ type: "boolean"
+ Ingress:
+ type: "boolean"
+ Containers:
+ type: "object"
+ additionalProperties:
+ $ref: "#/definitions/NetworkContainer"
+ Options:
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Labels:
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ Name: "net01"
+ Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99"
+ Created: "2016-10-19T04:33:30.360899459Z"
+ Scope: "local"
+ Driver: "bridge"
+ EnableIPv6: false
+ IPAM:
+ Driver: "default"
+ Config:
+ - Subnet: "172.19.0.0/16"
+ Gateway: "172.19.0.1"
+ Options:
+ foo: "bar"
+ Internal: false
+ Attachable: false
+ Ingress: false
+ Containers:
+ 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c:
+ Name: "test"
+ EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a"
+ MacAddress: "02:42:ac:13:00:02"
+ IPv4Address: "172.19.0.2/16"
+ IPv6Address: ""
+ Options:
+ com.docker.network.bridge.default_bridge: "true"
+ com.docker.network.bridge.enable_icc: "true"
+ com.docker.network.bridge.enable_ip_masquerade: "true"
+ com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
+ com.docker.network.bridge.name: "docker0"
+ com.docker.network.driver.mtu: "1500"
+ Labels:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ IPAM:
+ type: "object"
+ properties:
+ Driver:
+ description: "Name of the IPAM driver to use."
+ type: "string"
+ default: "default"
+ Config:
+ description: "List of IPAM configuration options, specified as a map: `{\"Subnet\": <CIDR>, \"IPRange\": <CIDR>, \"Gateway\": <IP address>, \"AuxAddress\": <device_name:IP address>}`"
+ type: "array"
+ items:
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Options:
+ description: "Driver-specific options, specified as a map."
+ type: "object"
+ additionalProperties:
+ type: "string"
+
+ NetworkContainer:
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ EndpointID:
+ type: "string"
+ MacAddress:
+ type: "string"
+ IPv4Address:
+ type: "string"
+ IPv6Address:
+ type: "string"
+
+ BuildInfo:
+ type: "object"
+ properties:
+ id:
+ type: "string"
+ stream:
+ type: "string"
+ error:
+ type: "string"
+ errorDetail:
+ $ref: "#/definitions/ErrorDetail"
+ status:
+ type: "string"
+ progress:
+ type: "string"
+ progressDetail:
+ $ref: "#/definitions/ProgressDetail"
+ aux:
+ $ref: "#/definitions/ImageID"
+
+ BuildCache:
+ type: "object"
+ properties:
+ ID:
+ type: "string"
+ Parent:
+ type: "string"
+ Type:
+ type: "string"
+ Description:
+ type: "string"
+ InUse:
+ type: "boolean"
+ Shared:
+ type: "boolean"
+ Size:
+ type: "integer"
+ CreatedAt:
+ type: "integer"
+ LastUsedAt:
+ type: "integer"
+ x-nullable: true
+ UsageCount:
+ type: "integer"
+
+ ImageID:
+ type: "object"
+ description: "Image ID or Digest"
+ properties:
+ ID:
+ type: "string"
+ example:
+ ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c"
+
+ CreateImageInfo:
+ type: "object"
+ properties:
+ id:
+ type: "string"
+ error:
+ type: "string"
+ status:
+ type: "string"
+ progress:
+ type: "string"
+ progressDetail:
+ $ref: "#/definitions/ProgressDetail"
+
+ PushImageInfo:
+ type: "object"
+ properties:
+ error:
+ type: "string"
+ status:
+ type: "string"
+ progress:
+ type: "string"
+ progressDetail:
+ $ref: "#/definitions/ProgressDetail"
+
+ ErrorDetail:
+ type: "object"
+ properties:
+ code:
+ type: "integer"
+ message:
+ type: "string"
+
+ ProgressDetail:
+ type: "object"
+ properties:
+ current:
+ type: "integer"
+ total:
+ type: "integer"
+
+ ErrorResponse:
+ description: "Represents an error."
+ type: "object"
+ required: ["message"]
+ properties:
+ message:
+ description: "The error message."
+ type: "string"
+ x-nullable: false
+ example:
+ message: "Something went wrong."
+
+ IdResponse:
+ description: "Response to an API call that returns just an Id"
+ type: "object"
+ required: ["Id"]
+ properties:
+ Id:
+ description: "The id of the newly created object."
+ type: "string"
+ x-nullable: false
+
+ EndpointSettings:
+ description: "Configuration for a network endpoint."
+ type: "object"
+ properties:
+ # Configurations
+ IPAMConfig:
+ $ref: "#/definitions/EndpointIPAMConfig"
+ Links:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "container_1"
+ - "container_2"
+ Aliases:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "server_x"
+ - "server_y"
+
+ # Operational data
+ NetworkID:
+ description: |
+ Unique ID of the network.
+ type: "string"
+ example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a"
+ EndpointID:
+ description: |
+ Unique ID for the service endpoint in a Sandbox.
+ type: "string"
+ example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b"
+ Gateway:
+ description: |
+ Gateway address for this network.
+ type: "string"
+ example: "172.17.0.1"
+ IPAddress:
+ description: |
+ IPv4 address.
+ type: "string"
+ example: "172.17.0.4"
+ IPPrefixLen:
+ description: |
+ Mask length of the IPv4 address.
+ type: "integer"
+ example: 16
+ IPv6Gateway:
+ description: |
+ IPv6 gateway address.
+ type: "string"
+ example: "2001:db8:2::100"
+ GlobalIPv6Address:
+ description: |
+ Global IPv6 address.
+ type: "string"
+ example: "2001:db8::5689"
+ GlobalIPv6PrefixLen:
+ description: |
+ Mask length of the global IPv6 address.
+ type: "integer"
+ format: "int64"
+ example: 64
+ MacAddress:
+ description: |
+ MAC address for the endpoint on this network.
+ type: "string"
+ example: "02:42:ac:11:00:04"
+ DriverOpts:
+ description: |
+ DriverOpts is a mapping of driver options and values. These options
+ are passed directly to the driver and are driver specific.
+ type: "object"
+ x-nullable: true
+ additionalProperties:
+ type: "string"
+ example:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+
+ EndpointIPAMConfig:
+ description: |
+ EndpointIPAMConfig represents an endpoint's IPAM configuration.
+ type: "object"
+ x-nullable: true
+ properties:
+ IPv4Address:
+ type: "string"
+ example: "172.20.30.33"
+ IPv6Address:
+ type: "string"
+ example: "2001:db8:abcd::3033"
+ LinkLocalIPs:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "169.254.34.68"
+ - "fe80::3468"
+
+ PluginMount:
+ type: "object"
+ x-nullable: false
+ required: [Name, Description, Settable, Source, Destination, Type, Options]
+ properties:
+ Name:
+ type: "string"
+ x-nullable: false
+ example: "some-mount"
+ Description:
+ type: "string"
+ x-nullable: false
+ example: "This is a mount that's used by the plugin."
+ Settable:
+ type: "array"
+ items:
+ type: "string"
+ Source:
+ type: "string"
+ example: "/var/lib/docker/plugins/"
+ Destination:
+ type: "string"
+ x-nullable: false
+ example: "/mnt/state"
+ Type:
+ type: "string"
+ x-nullable: false
+ example: "bind"
+ Options:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "rbind"
+ - "rw"
+
+ PluginDevice:
+ type: "object"
+ required: [Name, Description, Settable, Path]
+ x-nullable: false
+ properties:
+ Name:
+ type: "string"
+ x-nullable: false
+ Description:
+ type: "string"
+ x-nullable: false
+ Settable:
+ type: "array"
+ items:
+ type: "string"
+ Path:
+ type: "string"
+ example: "/dev/fuse"
+
+ PluginEnv:
+ type: "object"
+ x-nullable: false
+ required: [Name, Description, Settable, Value]
+ properties:
+ Name:
+ x-nullable: false
+ type: "string"
+ Description:
+ x-nullable: false
+ type: "string"
+ Settable:
+ type: "array"
+ items:
+ type: "string"
+ Value:
+ type: "string"
+
+ PluginInterfaceType:
+ type: "object"
+ x-nullable: false
+ required: [Prefix, Capability, Version]
+ properties:
+ Prefix:
+ type: "string"
+ x-nullable: false
+ Capability:
+ type: "string"
+ x-nullable: false
+ Version:
+ type: "string"
+ x-nullable: false
+
+ Plugin:
+ description: "A plugin for the Engine API"
+ type: "object"
+ required: [Settings, Enabled, Config, Name]
+ properties:
+ Id:
+ type: "string"
+ example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078"
+ Name:
+ type: "string"
+ x-nullable: false
+ example: "tiborvass/sample-volume-plugin"
+ Enabled:
+ description: "True if the plugin is running. False if the plugin is not running, only installed."
+ type: "boolean"
+ x-nullable: false
+ example: true
+ Settings:
+ description: "Settings that can be modified by users."
+ type: "object"
+ x-nullable: false
+ required: [Args, Devices, Env, Mounts]
+ properties:
+ Mounts:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginMount"
+ Env:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "DEBUG=0"
+ Args:
+ type: "array"
+ items:
+ type: "string"
+ Devices:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginDevice"
+ PluginReference:
+ description: "plugin remote reference used to push/pull the plugin"
+ type: "string"
+ x-nullable: false
+ example: "localhost:5000/tiborvass/sample-volume-plugin:latest"
+ Config:
+ description: "The config of a plugin."
+ type: "object"
+ x-nullable: false
+ required:
+ - Description
+ - Documentation
+ - Interface
+ - Entrypoint
+ - WorkDir
+ - Network
+ - Linux
+ - PidHost
+ - PropagatedMount
+ - IpcHost
+ - Mounts
+ - Env
+ - Args
+ properties:
+ DockerVersion:
+ description: "Docker Version used to create the plugin"
+ type: "string"
+ x-nullable: false
+ example: "17.06.0-ce"
+ Description:
+ type: "string"
+ x-nullable: false
+ example: "A sample volume plugin for Docker"
+ Documentation:
+ type: "string"
+ x-nullable: false
+ example: "https://docs.docker.com/engine/extend/plugins/"
+ Interface:
+ description: "The interface between Docker and the plugin"
+ x-nullable: false
+ type: "object"
+ required: [Types, Socket]
+ properties:
+ Types:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginInterfaceType"
+ example:
+ - "docker.volumedriver/1.0"
+ Socket:
+ type: "string"
+ x-nullable: false
+ example: "plugins.sock"
+ ProtocolScheme:
+ type: "string"
+ example: "some.protocol/v1.0"
+ description: "Protocol to use for clients connecting to the plugin."
+ enum:
+ - ""
+ - "moby.plugins.http/v1"
+ Entrypoint:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "/usr/bin/sample-volume-plugin"
+ - "/data"
+ WorkDir:
+ type: "string"
+ x-nullable: false
+ example: "/bin/"
+ User:
+ type: "object"
+ x-nullable: false
+ properties:
+ UID:
+ type: "integer"
+ format: "uint32"
+ example: 1000
+ GID:
+ type: "integer"
+ format: "uint32"
+ example: 1000
+ Network:
+ type: "object"
+ x-nullable: false
+ required: [Type]
+ properties:
+ Type:
+ x-nullable: false
+ type: "string"
+ example: "host"
+ Linux:
+ type: "object"
+ x-nullable: false
+ required: [Capabilities, AllowAllDevices, Devices]
+ properties:
+ Capabilities:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "CAP_SYS_ADMIN"
+ - "CAP_SYSLOG"
+ AllowAllDevices:
+ type: "boolean"
+ x-nullable: false
+ example: false
+ Devices:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginDevice"
+ PropagatedMount:
+ type: "string"
+ x-nullable: false
+ example: "/mnt/volumes"
+ IpcHost:
+ type: "boolean"
+ x-nullable: false
+ example: false
+ PidHost:
+ type: "boolean"
+ x-nullable: false
+ example: false
+ Mounts:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginMount"
+ Env:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginEnv"
+ example:
+ - Name: "DEBUG"
+ Description: "If set, prints debug messages"
+ Settable: null
+ Value: "0"
+ Args:
+ type: "object"
+ x-nullable: false
+ required: [Name, Description, Settable, Value]
+ properties:
+ Name:
+ x-nullable: false
+ type: "string"
+ example: "args"
+ Description:
+ x-nullable: false
+ type: "string"
+ example: "command line arguments"
+ Settable:
+ type: "array"
+ items:
+ type: "string"
+ Value:
+ type: "array"
+ items:
+ type: "string"
+ rootfs:
+ type: "object"
+ properties:
+ type:
+ type: "string"
+ example: "layers"
+ diff_ids:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887"
+ - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8"
+
+ ObjectVersion:
+ description: |
+ The version number of the object such as node, service, etc. This is needed to avoid conflicting writes.
+ The client must send the version number along with the modified specification when updating these objects.
+ This approach ensures safe concurrency and determinism in that the change on the object
+ may not be applied if the version number has changed from the last read. In other words,
+ if two update requests specify the same base version, only one of the requests can succeed.
+ As a result, two separate update requests that happen at the same time will not
+ unintentionally overwrite each other.
+ type: "object"
+ properties:
+ Index:
+ type: "integer"
+ format: "uint64"
+ example: 373531
+
+ NodeSpec:
+ type: "object"
+ properties:
+ Name:
+ description: "Name for the node."
+ type: "string"
+ example: "my-node"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Role:
+ description: "Role of the node."
+ type: "string"
+ enum:
+ - "worker"
+ - "manager"
+ example: "manager"
+ Availability:
+ description: "Availability of the node."
+ type: "string"
+ enum:
+ - "active"
+ - "pause"
+ - "drain"
+ example: "active"
+ example:
+ Availability: "active"
+ Name: "node-name"
+ Role: "manager"
+ Labels:
+ foo: "bar"
+
+ Node:
+ type: "object"
+ properties:
+ ID:
+ type: "string"
+ example: "24ifsmvkjbyhk"
+ Version:
+ $ref: "#/definitions/ObjectVersion"
+ CreatedAt:
+ description: |
+ Date and time at which the node was added to the swarm in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "dateTime"
+ example: "2016-08-18T10:44:24.496525531Z"
+ UpdatedAt:
+ description: |
+ Date and time at which the node was last updated in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "dateTime"
+ example: "2017-08-09T07:09:37.632105588Z"
+ Spec:
+ $ref: "#/definitions/NodeSpec"
+ Description:
+ $ref: "#/definitions/NodeDescription"
+ Status:
+ $ref: "#/definitions/NodeStatus"
+ ManagerStatus:
+ $ref: "#/definitions/ManagerStatus"
+
+ NodeDescription:
+ description: |
+ NodeDescription encapsulates the properties of the Node as reported by the
+ agent.
+ type: "object"
+ properties:
+ Hostname:
+ type: "string"
+ example: "bf3067039e47"
+ Platform:
+ $ref: "#/definitions/Platform"
+ Resources:
+ $ref: "#/definitions/ResourceObject"
+ Engine:
+ $ref: "#/definitions/EngineDescription"
+ TLSInfo:
+ $ref: "#/definitions/TLSInfo"
+
+ Platform:
+ description: |
+ Platform represents the platform (Arch/OS).
+ type: "object"
+ properties:
+ Architecture:
+ description: |
+ Architecture represents the hardware architecture (for example,
+ `x86_64`).
+ type: "string"
+ example: "x86_64"
+ OS:
+ description: |
+ OS represents the Operating System (for example, `linux` or `windows`).
+ type: "string"
+ example: "linux"
+
+ EngineDescription:
+ description: "EngineDescription provides information about an engine."
+ type: "object"
+ properties:
+ EngineVersion:
+ type: "string"
+ example: "17.06.0"
+ Labels:
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ foo: "bar"
+ Plugins:
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Type:
+ type: "string"
+ Name:
+ type: "string"
+ example:
+ - Type: "Log"
+ Name: "awslogs"
+ - Type: "Log"
+ Name: "fluentd"
+ - Type: "Log"
+ Name: "gcplogs"
+ - Type: "Log"
+ Name: "gelf"
+ - Type: "Log"
+ Name: "journald"
+ - Type: "Log"
+ Name: "json-file"
+ - Type: "Log"
+ Name: "logentries"
+ - Type: "Log"
+ Name: "splunk"
+ - Type: "Log"
+ Name: "syslog"
+ - Type: "Network"
+ Name: "bridge"
+ - Type: "Network"
+ Name: "host"
+ - Type: "Network"
+ Name: "ipvlan"
+ - Type: "Network"
+ Name: "macvlan"
+ - Type: "Network"
+ Name: "null"
+ - Type: "Network"
+ Name: "overlay"
+ - Type: "Volume"
+ Name: "local"
+ - Type: "Volume"
+ Name: "localhost:5000/vieux/sshfs:latest"
+ - Type: "Volume"
+ Name: "vieux/sshfs:latest"
+
+ TLSInfo:
+ description: "Information about the issuer of leaf TLS certificates and the trusted root CA certificate"
+ type: "object"
+ properties:
+ TrustRoot:
+ description: "The root CA certificate(s) that are used to validate leaf TLS certificates"
+ type: "string"
+ CertIssuerSubject:
+ description: "The base64-url-safe-encoded raw subject bytes of the issuer"
+ type: "string"
+ CertIssuerPublicKey:
+ description: "The base64-url-safe-encoded raw public key bytes of the issuer"
+ type: "string"
+ example:
+ TrustRoot: |
+ -----BEGIN CERTIFICATE-----
+ MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw
+ EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0
+ MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH
+ A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf
+ 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB
+ Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO
+ PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz
+ pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H
+ -----END CERTIFICATE-----
+ CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh"
+ CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A=="
+
+ NodeStatus:
+ description: |
+ NodeStatus represents the status of a node.
+
+ It provides the current status of the node, as seen by the manager.
+ type: "object"
+ properties:
+ State:
+ $ref: "#/definitions/NodeState"
+ Message:
+ type: "string"
+ example: ""
+ Addr:
+ description: "IP address of the node."
+ type: "string"
+ example: "172.17.0.2"
+
+ NodeState:
+ description: "NodeState represents the state of a node."
+ type: "string"
+ enum:
+ - "unknown"
+ - "down"
+ - "ready"
+ - "disconnected"
+ example: "ready"
+
+ ManagerStatus:
+ description: |
+ ManagerStatus represents the status of a manager.
+
+ It provides the current status of a node's manager component, if the node
+ is a manager.
+ x-nullable: true
+ type: "object"
+ properties:
+ Leader:
+ type: "boolean"
+ default: false
+ example: true
+ Reachability:
+ $ref: "#/definitions/Reachability"
+ Addr:
+ description: |
+ The IP address and port at which the manager is reachable.
+ type: "string"
+ example: "10.0.0.46:2377"
+
+ Reachability:
+ description: "Reachability represents the reachability of a node."
+ type: "string"
+ enum:
+ - "unknown"
+ - "unreachable"
+ - "reachable"
+ example: "reachable"
+
+ SwarmSpec:
+ description: "User modifiable swarm configuration."
+ type: "object"
+ properties:
+ Name:
+ description: "Name of the swarm."
+ type: "string"
+ example: "default"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ com.example.corp.type: "production"
+ com.example.corp.department: "engineering"
+ Orchestration:
+ description: "Orchestration configuration."
+ type: "object"
+ x-nullable: true
+ properties:
+ TaskHistoryRetentionLimit:
+ description: "The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks."
+ type: "integer"
+ format: "int64"
+ example: 10
+ Raft:
+ description: "Raft configuration."
+ type: "object"
+ properties:
+ SnapshotInterval:
+ description: "The number of log entries between snapshots."
+ type: "integer"
+ format: "uint64"
+ example: 10000
+ KeepOldSnapshots:
+ description: "The number of snapshots to keep beyond the current snapshot."
+ type: "integer"
+ format: "uint64"
+ LogEntriesForSlowFollowers:
+ description: "The number of log entries to keep around to sync up slow followers after a snapshot is created."
+ type: "integer"
+ format: "uint64"
+ example: 500
+ ElectionTick:
+ description: |
+ The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`.
+
+ A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed.
+ type: "integer"
+ example: 3
+ HeartbeatTick:
+ description: |
+ The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers.
+
+ A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed.
+ type: "integer"
+ example: 1
+ Dispatcher:
+ description: "Dispatcher configuration."
+ type: "object"
+ x-nullable: true
+ properties:
+ HeartbeatPeriod:
+ description: "The delay for an agent to send a heartbeat to the dispatcher."
+ type: "integer"
+ format: "int64"
+ example: 5000000000
+ CAConfig:
+ description: "CA configuration."
+ type: "object"
+ x-nullable: true
+ properties:
+ NodeCertExpiry:
+ description: "The duration node certificates are issued for."
+ type: "integer"
+ format: "int64"
+ example: 7776000000000000
+ ExternalCAs:
+ description: "Configuration for forwarding signing requests to an external certificate authority."
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Protocol:
+ description: "Protocol for communication with the external CA (currently only `cfssl` is supported)."
+ type: "string"
+ enum:
+ - "cfssl"
+ default: "cfssl"
+ URL:
+ description: "URL where certificate signing requests should be sent."
+ type: "string"
+ Options:
+ description: "An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ CACert:
+ description: "The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided)."
+ type: "string"
+ SigningCACert:
+ description: "The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format."
+ type: "string"
+ SigningCAKey:
+ description: "The desired signing CA key for all swarm node TLS leaf certificates, in PEM format."
+ type: "string"
+ ForceRotate:
+ description: "An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey`"
+ format: "uint64"
+ type: "integer"
+ EncryptionConfig:
+ description: "Parameters related to encryption-at-rest."
+ type: "object"
+ properties:
+ AutoLockManagers:
+ description: "If set, generate a key and use it to lock data stored on the managers."
+ type: "boolean"
+ example: false
+ TaskDefaults:
+ description: "Defaults for creating tasks in this cluster."
+ type: "object"
+ properties:
+ LogDriver:
+ description: |
+ The log driver to use for tasks created in the orchestrator if
+ unspecified by a service.
+
+ Updating this value only affects new tasks. Existing tasks continue
+ to use their previously configured log driver until recreated.
+ type: "object"
+ properties:
+ Name:
+ description: |
+ The log driver to use as a default for new tasks.
+ type: "string"
+ example: "json-file"
+ Options:
+ description: |
+ Driver-specific options for the selectd log driver, specified
+ as key/value pairs.
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ "max-file": "10"
+ "max-size": "100m"
+
+ # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but
+ # without `JoinTokens`.
+ ClusterInfo:
+ description: |
+ ClusterInfo represents information about the swarm as is returned by the
+ "/info" endpoint. Join-tokens are not included.
+ x-nullable: true
+ type: "object"
+ properties:
+ ID:
+ description: "The ID of the swarm."
+ type: "string"
+ example: "abajmipo7b4xz5ip2nrla6b11"
+ Version:
+ $ref: "#/definitions/ObjectVersion"
+ CreatedAt:
+ description: |
+ Date and time at which the swarm was initialised in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "dateTime"
+ example: "2016-08-18T10:44:24.496525531Z"
+ UpdatedAt:
+ description: |
+ Date and time at which the swarm was last updated in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "dateTime"
+ example: "2017-08-09T07:09:37.632105588Z"
+ Spec:
+ $ref: "#/definitions/SwarmSpec"
+ TLSInfo:
+ $ref: "#/definitions/TLSInfo"
+ RootRotationInProgress:
+ description: "Whether there is currently a root CA rotation in progress for the swarm"
+ type: "boolean"
+ example: false
+ DataPathPort:
+ description: |
+ DataPathPort specifies the data path port number for data traffic.
+ Acceptable port range is 1024 to 49151.
+ If no port is set or is set to 0, the default port (4789) is used.
+ type: "integer"
+ format: "uint32"
+ default: 4789
+ example: 4789
+ DefaultAddrPool:
+ description: |
+ Default Address Pool specifies default subnet pools for global scope networks.
+ type: "array"
+ items:
+ type: "string"
+ format: "CIDR"
+ example: ["10.10.0.0/16", "20.20.0.0/16"]
+ SubnetSize:
+ description: |
+ SubnetSize specifies the subnet size of the networks created from the default subnet pool
+ type: "integer"
+ format: "uint32"
+ maximum: 29
+ default: 24
+ example: 24
+
+ JoinTokens:
+ description: |
+ JoinTokens contains the tokens workers and managers need to join the swarm.
+ type: "object"
+ properties:
+ Worker:
+ description: |
+ The token workers can use to join the swarm.
+ type: "string"
+ example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx"
+ Manager:
+ description: |
+ The token managers can use to join the swarm.
+ type: "string"
+ example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
+
+ Swarm:
+ type: "object"
+ allOf:
+ - $ref: "#/definitions/ClusterInfo"
+ - type: "object"
+ properties:
+ JoinTokens:
+ $ref: "#/definitions/JoinTokens"
+
+ TaskSpec:
+ description: "User modifiable task configuration."
+ type: "object"
+ properties:
+ PluginSpec:
+ type: "object"
+ description: |
+ Plugin spec for the service. *(Experimental release only.)*
+
+ <p><br /></p>
+
+ > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are
+ > mutually exclusive. PluginSpec is only used when the Runtime field
+ > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime
+ > field is set to `attachment`.
+ properties:
+ Name:
+ description: "The name or 'alias' to use for the plugin."
+ type: "string"
+ Remote:
+ description: "The plugin image reference to use."
+ type: "string"
+ Disabled:
+ description: "Disable the plugin once scheduled."
+ type: "boolean"
+ PluginPrivilege:
+ type: "array"
+ items:
+ description: "Describes a permission accepted by the user upon installing the plugin."
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ Description:
+ type: "string"
+ Value:
+ type: "array"
+ items:
+ type: "string"
+ ContainerSpec:
+ type: "object"
+ description: |
+ Container spec for the service.
+
+ <p><br /></p>
+
+ > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are
+ > mutually exclusive. PluginSpec is only used when the Runtime field
+ > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime
+ > field is set to `attachment`.
+ properties:
+ Image:
+ description: "The image name to use for the container"
+ type: "string"
+ Labels:
+ description: "User-defined key/value data."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Command:
+ description: "The command to be run in the image."
+ type: "array"
+ items:
+ type: "string"
+ Args:
+ description: "Arguments to the command."
+ type: "array"
+ items:
+ type: "string"
+ Hostname:
+ description: "The hostname to use for the container, as a valid RFC 1123 hostname."
+ type: "string"
+ Env:
+ description: "A list of environment variables in the form `VAR=value`."
+ type: "array"
+ items:
+ type: "string"
+ Dir:
+ description: "The working directory for commands to run in."
+ type: "string"
+ User:
+ description: "The user inside the container."
+ type: "string"
+ Groups:
+ type: "array"
+ description: "A list of additional groups that the container process will run as."
+ items:
+ type: "string"
+ Privileges:
+ type: "object"
+ description: "Security options for the container"
+ properties:
+ CredentialSpec:
+ type: "object"
+ description: "CredentialSpec for managed service account (Windows only)"
+ properties:
+ Config:
+ type: "string"
+ example: "0bt9dmxjvjiqermk6xrop3ekq"
+ description: |
+ Load credential spec from a Swarm Config with the given ID.
+ The specified config must also be present in the Configs field with the Runtime property set.
+
+ <p><br /></p>
+
+
+ > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive.
+ File:
+ type: "string"
+ example: "spec.json"
+ description: |
+ Load credential spec from this file. The file is read by the daemon, and must be present in the
+ `CredentialSpecs` subdirectory in the docker data directory, which defaults to
+ `C:\ProgramData\Docker\` on Windows.
+
+ For example, specifying `spec.json` loads `C:\ProgramData\Docker\CredentialSpecs\spec.json`.
+
+ <p><br /></p>
+
+ > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive.
+ Registry:
+ type: "string"
+ description: |
+ Load credential spec from this value in the Windows registry. The specified registry value must be
+ located in:
+
+ `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs`
+
+ <p><br /></p>
+
+
+ > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive.
+ SELinuxContext:
+ type: "object"
+ description: "SELinux labels of the container"
+ properties:
+ Disable:
+ type: "boolean"
+ description: "Disable SELinux"
+ User:
+ type: "string"
+ description: "SELinux user label"
+ Role:
+ type: "string"
+ description: "SELinux role label"
+ Type:
+ type: "string"
+ description: "SELinux type label"
+ Level:
+ type: "string"
+ description: "SELinux level label"
+ TTY:
+ description: "Whether a pseudo-TTY should be allocated."
+ type: "boolean"
+ OpenStdin:
+ description: "Open `stdin`"
+ type: "boolean"
+ ReadOnly:
+ description: "Mount the container's root filesystem as read only."
+ type: "boolean"
+ Mounts:
+ description: "Specification for mounts to be added to containers created as part of the service."
+ type: "array"
+ items:
+ $ref: "#/definitions/Mount"
+ StopSignal:
+ description: "Signal to stop the container."
+ type: "string"
+ StopGracePeriod:
+ description: "Amount of time to wait for the container to terminate before forcefully killing it."
+ type: "integer"
+ format: "int64"
+ HealthCheck:
+ $ref: "#/definitions/HealthConfig"
+ Hosts:
+ type: "array"
+ description: |
+ A list of hostname/IP mappings to add to the container's `hosts`
+ file. The format of extra hosts is specified in the
+ [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html)
+ man page:
+
+ IP_address canonical_hostname [aliases...]
+ items:
+ type: "string"
+ DNSConfig:
+ description: "Specification for DNS related configurations in resolver configuration file (`resolv.conf`)."
+ type: "object"
+ properties:
+ Nameservers:
+ description: "The IP addresses of the name servers."
+ type: "array"
+ items:
+ type: "string"
+ Search:
+ description: "A search list for host-name lookup."
+ type: "array"
+ items:
+ type: "string"
+ Options:
+ description: "A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.)."
+ type: "array"
+ items:
+ type: "string"
+ Secrets:
+ description: "Secrets contains references to zero or more secrets that will be exposed to the service."
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ File:
+ description: "File represents a specific target that is backed by a file."
+ type: "object"
+ properties:
+ Name:
+ description: "Name represents the final filename in the filesystem."
+ type: "string"
+ UID:
+ description: "UID represents the file UID."
+ type: "string"
+ GID:
+ description: "GID represents the file GID."
+ type: "string"
+ Mode:
+ description: "Mode represents the FileMode of the file."
+ type: "integer"
+ format: "uint32"
+ SecretID:
+ description: "SecretID represents the ID of the specific secret that we're referencing."
+ type: "string"
+ SecretName:
+ description: |
+ SecretName is the name of the secret that this references, but this is just provided for
+ lookup/display purposes. The secret in the reference will be identified by its ID.
+ type: "string"
+ Configs:
+ description: "Configs contains references to zero or more configs that will be exposed to the service."
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ File:
+ description: |
+ File represents a specific target that is backed by a file.
+
+ <p><br /><p>
+
+ > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive
+ type: "object"
+ properties:
+ Name:
+ description: "Name represents the final filename in the filesystem."
+ type: "string"
+ UID:
+ description: "UID represents the file UID."
+ type: "string"
+ GID:
+ description: "GID represents the file GID."
+ type: "string"
+ Mode:
+ description: "Mode represents the FileMode of the file."
+ type: "integer"
+ format: "uint32"
+ Runtime:
+ description: |
+ Runtime represents a target that is not mounted into the container but is used by the task
+
+ <p><br /><p>
+
+ > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive
+ type: "object"
+ ConfigID:
+ description: "ConfigID represents the ID of the specific config that we're referencing."
+ type: "string"
+ ConfigName:
+ description: |
+ ConfigName is the name of the config that this references, but this is just provided for
+ lookup/display purposes. The config in the reference will be identified by its ID.
+ type: "string"
+ Isolation:
+ type: "string"
+ description: "Isolation technology of the containers running the service. (Windows only)"
+ enum:
+ - "default"
+ - "process"
+ - "hyperv"
+ Init:
+ description: "Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used."
+ type: "boolean"
+ x-nullable: true
+ Sysctls:
+ description: |
+ Set kernel namedspaced parameters (sysctls) in the container.
+ The Sysctls option on services accepts the same sysctls as the
+ are supported on containers. Note that while the same sysctls are
+ supported, no guarantees or checks are made about their
+ suitability for a clustered environment, and it's up to the user
+ to determine whether a given sysctl will work properly in a
+ Service.
+ type: "object"
+ additionalProperties:
+ type: "string"
+ NetworkAttachmentSpec:
+ description: |
+ Read-only spec type for non-swarm containers attached to swarm overlay
+ networks.
+
+ <p><br /></p>
+
+ > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are
+ > mutually exclusive. PluginSpec is only used when the Runtime field
+ > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime
+ > field is set to `attachment`.
+ type: "object"
+ properties:
+ ContainerID:
+ description: "ID of the container represented by this task"
+ type: "string"
+ Resources:
+ description: "Resource requirements which apply to each individual container created as part of the service."
+ type: "object"
+ properties:
+ Limits:
+ description: "Define resources limits."
+ $ref: "#/definitions/ResourceObject"
+ Reservation:
+ description: "Define resources reservation."
+ $ref: "#/definitions/ResourceObject"
+ RestartPolicy:
+ description: "Specification for the restart policy which applies to containers created as part of this service."
+ type: "object"
+ properties:
+ Condition:
+ description: "Condition for restart."
+ type: "string"
+ enum:
+ - "none"
+ - "on-failure"
+ - "any"
+ Delay:
+ description: "Delay between restart attempts."
+ type: "integer"
+ format: "int64"
+ MaxAttempts:
+ description: "Maximum attempts to restart a given container before giving up (default value is 0, which is ignored)."
+ type: "integer"
+ format: "int64"
+ default: 0
+ Window:
+ description: "Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded)."
+ type: "integer"
+ format: "int64"
+ default: 0
+ Placement:
+ type: "object"
+ properties:
+ Constraints:
+ description: "An array of constraints."
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "node.hostname!=node3.corp.example.com"
+ - "node.role!=manager"
+ - "node.labels.type==production"
+ Preferences:
+ description: "Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence."
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Spread:
+ type: "object"
+ properties:
+ SpreadDescriptor:
+ description: "label descriptor, such as engine.labels.az"
+ type: "string"
+ example:
+ - Spread:
+ SpreadDescriptor: "node.labels.datacenter"
+ - Spread:
+ SpreadDescriptor: "node.labels.rack"
+ MaxReplicas:
+ description: "Maximum number of replicas for per node (default value is 0, which is unlimited)"
+ type: "integer"
+ format: "int64"
+ default: 0
+ Platforms:
+ description: |
+ Platforms stores all the platforms that the service's image can
+ run on. This field is used in the platform filter for scheduling.
+ If empty, then the platform filter is off, meaning there are no
+ scheduling restrictions.
+ type: "array"
+ items:
+ $ref: "#/definitions/Platform"
+ ForceUpdate:
+ description: "A counter that triggers an update even if no relevant parameters have been changed."
+ type: "integer"
+ Runtime:
+ description: "Runtime is the type of runtime specified for the task executor."
+ type: "string"
+ Networks:
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Target:
+ type: "string"
+ Aliases:
+ type: "array"
+ items:
+ type: "string"
+ LogDriver:
+ description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified."
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ Options:
+ type: "object"
+ additionalProperties:
+ type: "string"
+
+ TaskState:
+ type: "string"
+ enum:
+ - "new"
+ - "allocated"
+ - "pending"
+ - "assigned"
+ - "accepted"
+ - "preparing"
+ - "ready"
+ - "starting"
+ - "running"
+ - "complete"
+ - "shutdown"
+ - "failed"
+ - "rejected"
+ - "remove"
+ - "orphaned"
+
+ Task:
+ type: "object"
+ properties:
+ ID:
+ description: "The ID of the task."
+ type: "string"
+ Version:
+ $ref: "#/definitions/ObjectVersion"
+ CreatedAt:
+ type: "string"
+ format: "dateTime"
+ UpdatedAt:
+ type: "string"
+ format: "dateTime"
+ Name:
+ description: "Name of the task."
+ type: "string"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Spec:
+ $ref: "#/definitions/TaskSpec"
+ ServiceID:
+ description: "The ID of the service this task is part of."
+ type: "string"
+ Slot:
+ type: "integer"
+ NodeID:
+ description: "The ID of the node that this task is on."
+ type: "string"
+ AssignedGenericResources:
+ $ref: "#/definitions/GenericResources"
+ Status:
+ type: "object"
+ properties:
+ Timestamp:
+ type: "string"
+ format: "dateTime"
+ State:
+ $ref: "#/definitions/TaskState"
+ Message:
+ type: "string"
+ Err:
+ type: "string"
+ ContainerStatus:
+ type: "object"
+ properties:
+ ContainerID:
+ type: "string"
+ PID:
+ type: "integer"
+ ExitCode:
+ type: "integer"
+ DesiredState:
+ $ref: "#/definitions/TaskState"
+ example:
+ ID: "0kzzo1i0y4jz6027t0k7aezc7"
+ Version:
+ Index: 71
+ CreatedAt: "2016-06-07T21:07:31.171892745Z"
+ UpdatedAt: "2016-06-07T21:07:31.376370513Z"
+ Spec:
+ ContainerSpec:
+ Image: "redis"
+ Resources:
+ Limits: {}
+ Reservations: {}
+ RestartPolicy:
+ Condition: "any"
+ MaxAttempts: 0
+ Placement: {}
+ ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz"
+ Slot: 1
+ NodeID: "60gvrl6tm78dmak4yl7srz94v"
+ Status:
+ Timestamp: "2016-06-07T21:07:31.290032978Z"
+ State: "running"
+ Message: "started"
+ ContainerStatus:
+ ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035"
+ PID: 677
+ DesiredState: "running"
+ NetworksAttachments:
+ - Network:
+ ID: "4qvuz4ko70xaltuqbt8956gd1"
+ Version:
+ Index: 18
+ CreatedAt: "2016-06-07T20:31:11.912919752Z"
+ UpdatedAt: "2016-06-07T21:07:29.955277358Z"
+ Spec:
+ Name: "ingress"
+ Labels:
+ com.docker.swarm.internal: "true"
+ DriverConfiguration: {}
+ IPAMOptions:
+ Driver: {}
+ Configs:
+ - Subnet: "10.255.0.0/16"
+ Gateway: "10.255.0.1"
+ DriverState:
+ Name: "overlay"
+ Options:
+ com.docker.network.driver.overlay.vxlanid_list: "256"
+ IPAMOptions:
+ Driver:
+ Name: "default"
+ Configs:
+ - Subnet: "10.255.0.0/16"
+ Gateway: "10.255.0.1"
+ Addresses:
+ - "10.255.0.10/16"
+ AssignedGenericResources:
+ - DiscreteResourceSpec:
+ Kind: "SSD"
+ Value: 3
+ - NamedResourceSpec:
+ Kind: "GPU"
+ Value: "UUID1"
+ - NamedResourceSpec:
+ Kind: "GPU"
+ Value: "UUID2"
+
+ ServiceSpec:
+ description: "User modifiable configuration for a service."
+ properties:
+ Name:
+ description: "Name of the service."
+ type: "string"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ TaskTemplate:
+ $ref: "#/definitions/TaskSpec"
+ Mode:
+ description: "Scheduling mode for the service."
+ type: "object"
+ properties:
+ Replicated:
+ type: "object"
+ properties:
+ Replicas:
+ type: "integer"
+ format: "int64"
+ Global:
+ type: "object"
+ UpdateConfig:
+ description: "Specification for the update strategy of the service."
+ type: "object"
+ properties:
+ Parallelism:
+ description: "Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism)."
+ type: "integer"
+ format: "int64"
+ Delay:
+ description: "Amount of time between updates, in nanoseconds."
+ type: "integer"
+ format: "int64"
+ FailureAction:
+ description: "Action to take if an updated task fails to run, or stops running during the update."
+ type: "string"
+ enum:
+ - "continue"
+ - "pause"
+ - "rollback"
+ Monitor:
+ description: "Amount of time to monitor each updated task for failures, in nanoseconds."
+ type: "integer"
+ format: "int64"
+ MaxFailureRatio:
+ description: "The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1."
+ type: "number"
+ default: 0
+ Order:
+ description: "The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down."
+ type: "string"
+ enum:
+ - "stop-first"
+ - "start-first"
+ RollbackConfig:
+ description: "Specification for the rollback strategy of the service."
+ type: "object"
+ properties:
+ Parallelism:
+ description: "Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism)."
+ type: "integer"
+ format: "int64"
+ Delay:
+ description: "Amount of time between rollback iterations, in nanoseconds."
+ type: "integer"
+ format: "int64"
+ FailureAction:
+ description: "Action to take if an rolled back task fails to run, or stops running during the rollback."
+ type: "string"
+ enum:
+ - "continue"
+ - "pause"
+ Monitor:
+ description: "Amount of time to monitor each rolled back task for failures, in nanoseconds."
+ type: "integer"
+ format: "int64"
+ MaxFailureRatio:
+ description: "The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1."
+ type: "number"
+ default: 0
+ Order:
+ description: "The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down."
+ type: "string"
+ enum:
+ - "stop-first"
+ - "start-first"
+ Networks:
+ description: "Array of network names or IDs to attach the service to."
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Target:
+ type: "string"
+ Aliases:
+ type: "array"
+ items:
+ type: "string"
+ EndpointSpec:
+ $ref: "#/definitions/EndpointSpec"
+
+ EndpointPortConfig:
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ Protocol:
+ type: "string"
+ enum:
+ - "tcp"
+ - "udp"
+ - "sctp"
+ TargetPort:
+ description: "The port inside the container."
+ type: "integer"
+ PublishedPort:
+ description: "The port on the swarm hosts."
+ type: "integer"
+ PublishMode:
+ description: |
+ The mode in which port is published.
+
+ <p><br /></p>
+
+ - "ingress" makes the target port accessible on on every node,
+ regardless of whether there is a task for the service running on
+ that node or not.
+ - "host" bypasses the routing mesh and publish the port directly on
+ the swarm node where that service is running.
+
+ type: "string"
+ enum:
+ - "ingress"
+ - "host"
+ default: "ingress"
+ example: "ingress"
+
+ EndpointSpec:
+ description: "Properties that can be configured to access and load balance a service."
+ type: "object"
+ properties:
+ Mode:
+ description: "The mode of resolution to use for internal load balancing
+ between tasks."
+ type: "string"
+ enum:
+ - "vip"
+ - "dnsrr"
+ default: "vip"
+ Ports:
+ description: "List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used."
+ type: "array"
+ items:
+ $ref: "#/definitions/EndpointPortConfig"
+
+ Service:
+ type: "object"
+ properties:
+ ID:
+ type: "string"
+ Version:
+ $ref: "#/definitions/ObjectVersion"
+ CreatedAt:
+ type: "string"
+ format: "dateTime"
+ UpdatedAt:
+ type: "string"
+ format: "dateTime"
+ Spec:
+ $ref: "#/definitions/ServiceSpec"
+ Endpoint:
+ type: "object"
+ properties:
+ Spec:
+ $ref: "#/definitions/EndpointSpec"
+ Ports:
+ type: "array"
+ items:
+ $ref: "#/definitions/EndpointPortConfig"
+ VirtualIPs:
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ NetworkID:
+ type: "string"
+ Addr:
+ type: "string"
+ UpdateStatus:
+ description: "The status of a service update."
+ type: "object"
+ properties:
+ State:
+ type: "string"
+ enum:
+ - "updating"
+ - "paused"
+ - "completed"
+ StartedAt:
+ type: "string"
+ format: "dateTime"
+ CompletedAt:
+ type: "string"
+ format: "dateTime"
+ Message:
+ type: "string"
+ example:
+ ID: "9mnpnzenvg8p8tdbtq4wvbkcz"
+ Version:
+ Index: 19
+ CreatedAt: "2016-06-07T21:05:51.880065305Z"
+ UpdatedAt: "2016-06-07T21:07:29.962229872Z"
+ Spec:
+ Name: "hopeful_cori"
+ TaskTemplate:
+ ContainerSpec:
+ Image: "redis"
+ Resources:
+ Limits: {}
+ Reservations: {}
+ RestartPolicy:
+ Condition: "any"
+ MaxAttempts: 0
+ Placement: {}
+ ForceUpdate: 0
+ Mode:
+ Replicated:
+ Replicas: 1
+ UpdateConfig:
+ Parallelism: 1
+ Delay: 1000000000
+ FailureAction: "pause"
+ Monitor: 15000000000
+ MaxFailureRatio: 0.15
+ RollbackConfig:
+ Parallelism: 1
+ Delay: 1000000000
+ FailureAction: "pause"
+ Monitor: 15000000000
+ MaxFailureRatio: 0.15
+ EndpointSpec:
+ Mode: "vip"
+ Ports:
+ -
+ Protocol: "tcp"
+ TargetPort: 6379
+ PublishedPort: 30001
+ Endpoint:
+ Spec:
+ Mode: "vip"
+ Ports:
+ -
+ Protocol: "tcp"
+ TargetPort: 6379
+ PublishedPort: 30001
+ Ports:
+ -
+ Protocol: "tcp"
+ TargetPort: 6379
+ PublishedPort: 30001
+ VirtualIPs:
+ -
+ NetworkID: "4qvuz4ko70xaltuqbt8956gd1"
+ Addr: "10.255.0.2/16"
+ -
+ NetworkID: "4qvuz4ko70xaltuqbt8956gd1"
+ Addr: "10.255.0.3/16"
+
+ ImageDeleteResponseItem:
+ type: "object"
+ properties:
+ Untagged:
+ description: "The image ID of an image that was untagged"
+ type: "string"
+ Deleted:
+ description: "The image ID of an image that was deleted"
+ type: "string"
+
+ ServiceUpdateResponse:
+ type: "object"
+ properties:
+ Warnings:
+ description: "Optional warning messages"
+ type: "array"
+ items:
+ type: "string"
+ example:
+ Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"
+
+ ContainerSummary:
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Id:
+ description: "The ID of this container"
+ type: "string"
+ x-go-name: "ID"
+ Names:
+ description: "The names that this container has been given"
+ type: "array"
+ items:
+ type: "string"
+ Image:
+ description: "The name of the image used when creating this container"
+ type: "string"
+ ImageID:
+ description: "The ID of the image that this container was created from"
+ type: "string"
+ Command:
+ description: "Command to run when starting the container"
+ type: "string"
+ Created:
+ description: "When the container was created"
+ type: "integer"
+ format: "int64"
+ Ports:
+ description: "The ports exposed by this container"
+ type: "array"
+ items:
+ $ref: "#/definitions/Port"
+ SizeRw:
+ description: "The size of files that have been created or changed by this container"
+ type: "integer"
+ format: "int64"
+ SizeRootFs:
+ description: "The total size of all the files in this container"
+ type: "integer"
+ format: "int64"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ State:
+ description: "The state of this container (e.g. `Exited`)"
+ type: "string"
+ Status:
+ description: "Additional human-readable status of this container (e.g. `Exit 0`)"
+ type: "string"
+ HostConfig:
+ type: "object"
+ properties:
+ NetworkMode:
+ type: "string"
+ NetworkSettings:
+ description: "A summary of the container's network settings"
+ type: "object"
+ properties:
+ Networks:
+ type: "object"
+ additionalProperties:
+ $ref: "#/definitions/EndpointSettings"
+ Mounts:
+ type: "array"
+ items:
+ $ref: "#/definitions/Mount"
+
+ Driver:
+ description: "Driver represents a driver (network, logging, secrets)."
+ type: "object"
+ required: [Name]
+ properties:
+ Name:
+ description: "Name of the driver."
+ type: "string"
+ x-nullable: false
+ example: "some-driver"
+ Options:
+ description: "Key/value map of driver-specific options."
+ type: "object"
+ x-nullable: false
+ additionalProperties:
+ type: "string"
+ example:
+ OptionA: "value for driver-specific option A"
+ OptionB: "value for driver-specific option B"
+
+ SecretSpec:
+ type: "object"
+ properties:
+ Name:
+ description: "User-defined name of the secret."
+ type: "string"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ Data:
+ description: |
+ Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2))
+ data to store as secret.
+
+ This field is only used to _create_ a secret, and is not returned by
+ other endpoints.
+ type: "string"
+ example: ""
+ Driver:
+ description: "Name of the secrets driver used to fetch the secret's value from an external secret store"
+ $ref: "#/definitions/Driver"
+ Templating:
+ description: |
+ Templating driver, if applicable
+
+ Templating controls whether and how to evaluate the config payload as
+ a template. If no driver is set, no templating is used.
+ $ref: "#/definitions/Driver"
+
+ Secret:
+ type: "object"
+ properties:
+ ID:
+ type: "string"
+ example: "blt1owaxmitz71s9v5zh81zun"
+ Version:
+ $ref: "#/definitions/ObjectVersion"
+ CreatedAt:
+ type: "string"
+ format: "dateTime"
+ example: "2017-07-20T13:55:28.678958722Z"
+ UpdatedAt:
+ type: "string"
+ format: "dateTime"
+ example: "2017-07-20T13:55:28.678958722Z"
+ Spec:
+ $ref: "#/definitions/SecretSpec"
+
+ ConfigSpec:
+ type: "object"
+ properties:
+ Name:
+ description: "User-defined name of the config."
+ type: "string"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Data:
+ description: |
+ Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2))
+ config data.
+ type: "string"
+ Templating:
+ description: |
+ Templating driver, if applicable
+
+ Templating controls whether and how to evaluate the config payload as
+ a template. If no driver is set, no templating is used.
+ $ref: "#/definitions/Driver"
+
+ Config:
+ type: "object"
+ properties:
+ ID:
+ type: "string"
+ Version:
+ $ref: "#/definitions/ObjectVersion"
+ CreatedAt:
+ type: "string"
+ format: "dateTime"
+ UpdatedAt:
+ type: "string"
+ format: "dateTime"
+ Spec:
+ $ref: "#/definitions/ConfigSpec"
+
+ SystemInfo:
+ type: "object"
+ properties:
+ ID:
+ description: |
+ Unique identifier of the daemon.
+
+ <p><br /></p>
+
+ > **Note**: The format of the ID itself is not part of the API, and
+ > should not be considered stable.
+ type: "string"
+ example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS"
+ Containers:
+ description: "Total number of containers on the host."
+ type: "integer"
+ example: 14
+ ContainersRunning:
+ description: |
+ Number of containers with status `"running"`.
+ type: "integer"
+ example: 3
+ ContainersPaused:
+ description: |
+ Number of containers with status `"paused"`.
+ type: "integer"
+ example: 1
+ ContainersStopped:
+ description: |
+ Number of containers with status `"stopped"`.
+ type: "integer"
+ example: 10
+ Images:
+ description: |
+ Total number of images on the host.
+
+ Both _tagged_ and _untagged_ (dangling) images are counted.
+ type: "integer"
+ example: 508
+ Driver:
+ description: "Name of the storage driver in use."
+ type: "string"
+ example: "overlay2"
+ DriverStatus:
+ description: |
+ Information specific to the storage driver, provided as
+ "label" / "value" pairs.
+
+ This information is provided by the storage driver, and formatted
+ in a way consistent with the output of `docker info` on the command
+ line.
+
+ <p><br /></p>
+
+ > **Note**: The information returned in this field, including the
+ > formatting of values and labels, should not be considered stable,
+ > and may change without notice.
+ type: "array"
+ items:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - ["Backing Filesystem", "extfs"]
+ - ["Supports d_type", "true"]
+ - ["Native Overlay Diff", "true"]
+ DockerRootDir:
+ description: |
+ Root directory of persistent Docker state.
+
+ Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker`
+ on Windows.
+ type: "string"
+ example: "/var/lib/docker"
+ SystemStatus:
+ description: |
+ Status information about this node (standalone Swarm API).
+
+ <p><br /></p>
+
+ > **Note**: The information returned in this field is only propagated
+ > by the Swarm standalone API, and is empty (`null`) when using
+ > built-in swarm mode.
+ type: "array"
+ items:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - ["Role", "primary"]
+ - ["State", "Healthy"]
+ - ["Strategy", "spread"]
+ - ["Filters", "health, port, containerslots, dependency, affinity, constraint, whitelist"]
+ - ["Nodes", "2"]
+ - [" swarm-agent-00", "192.168.99.102:2376"]
+ - [" └ ID", "5CT6:FBGO:RVGO:CZL4:PB2K:WCYN:2JSV:KSHH:GGFW:QOPG:6J5Q:IOZ2|192.168.99.102:2376"]
+ - [" └ Status", "Healthy"]
+ - [" └ Containers", "1 (1 Running, 0 Paused, 0 Stopped)"]
+ - [" └ Reserved CPUs", "0 / 1"]
+ - [" └ Reserved Memory", "0 B / 1.021 GiB"]
+ - [" └ Labels", "kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"]
+ - [" └ UpdatedAt", "2017-08-09T10:03:46Z"]
+ - [" └ ServerVersion", "17.06.0-ce"]
+ - [" swarm-manager", "192.168.99.101:2376"]
+ - [" └ ID", "TAMD:7LL3:SEF7:LW2W:4Q2X:WVFH:RTXX:JSYS:XY2P:JEHL:ZMJK:JGIW|192.168.99.101:2376"]
+ - [" └ Status", "Healthy"]
+ - [" └ Containers", "2 (2 Running, 0 Paused, 0 Stopped)"]
+ - [" └ Reserved CPUs", "0 / 1"]
+ - [" └ Reserved Memory", "0 B / 1.021 GiB"]
+ - [" └ Labels", "kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"]
+ - [" └ UpdatedAt", "2017-08-09T10:04:11Z"]
+ - [" └ ServerVersion", "17.06.0-ce"]
+ Plugins:
+ $ref: "#/definitions/PluginsInfo"
+ MemoryLimit:
+ description: "Indicates if the host has memory limit support enabled."
+ type: "boolean"
+ example: true
+ SwapLimit:
+ description: "Indicates if the host has memory swap limit support enabled."
+ type: "boolean"
+ example: true
+ KernelMemory:
+ description: "Indicates if the host has kernel memory limit support enabled."
+ type: "boolean"
+ example: true
+ CpuCfsPeriod:
+ description: "Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host."
+ type: "boolean"
+ example: true
+ CpuCfsQuota:
+ description: "Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by the host."
+ type: "boolean"
+ example: true
+ CPUShares:
+ description: "Indicates if CPU Shares limiting is supported by the host."
+ type: "boolean"
+ example: true
+ CPUSet:
+ description: |
+ Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host.
+
+ See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt)
+ type: "boolean"
+ example: true
+ PidsLimit:
+ description: "Indicates if the host kernel has PID limit support enabled."
+ type: "boolean"
+ example: true
+ OomKillDisable:
+ description: "Indicates if OOM killer disable is supported on the host."
+ type: "boolean"
+ IPv4Forwarding:
+ description: "Indicates IPv4 forwarding is enabled."
+ type: "boolean"
+ example: true
+ BridgeNfIptables:
+ description: "Indicates if `bridge-nf-call-iptables` is available on the host."
+ type: "boolean"
+ example: true
+ BridgeNfIp6tables:
+ description: "Indicates if `bridge-nf-call-ip6tables` is available on the host."
+ type: "boolean"
+ example: true
+ Debug:
+ description: "Indicates if the daemon is running in debug-mode / with debug-level logging enabled."
+ type: "boolean"
+ example: true
+ NFd:
+ description: |
+ The total number of file Descriptors in use by the daemon process.
+
+ This information is only returned if debug-mode is enabled.
+ type: "integer"
+ example: 64
+ NGoroutines:
+ description: |
+ The number of goroutines that currently exist.
+
+ This information is only returned if debug-mode is enabled.
+ type: "integer"
+ example: 174
+ SystemTime:
+ description: |
+ Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt)
+ format with nano-seconds.
+ type: "string"
+ example: "2017-08-08T20:28:29.06202363Z"
+ LoggingDriver:
+ description: |
+ The logging driver to use as a default for new containers.
+ type: "string"
+ CgroupDriver:
+ description: |
+ The driver to use for managing cgroups.
+ type: "string"
+ enum: ["cgroupfs", "systemd"]
+ default: "cgroupfs"
+ example: "cgroupfs"
+ NEventsListener:
+ description: "Number of event listeners subscribed."
+ type: "integer"
+ example: 30
+ KernelVersion:
+ description: |
+ Kernel version of the host.
+
+ On Linux, this information obtained from `uname`. On Windows this
+ information is queried from the <kbd>HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\</kbd>
+ registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_.
+ type: "string"
+ example: "4.9.38-moby"
+ OperatingSystem:
+ description: |
+ Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS"
+ or "Windows Server 2016 Datacenter"
+ type: "string"
+ example: "Alpine Linux v3.5"
+ OSType:
+ description: |
+ Generic type of the operating system of the host, as returned by the
+ Go runtime (`GOOS`).
+
+ Currently returned values are "linux" and "windows". A full list of
+ possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment).
+ type: "string"
+ example: "linux"
+ Architecture:
+ description: |
+ Hardware architecture of the host, as returned by the Go runtime
+ (`GOARCH`).
+
+ A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment).
+ type: "string"
+ example: "x86_64"
+ NCPU:
+ description: |
+ The number of logical CPUs usable by the daemon.
+
+ The number of available CPUs is checked by querying the operating
+ system when the daemon starts. Changes to operating system CPU
+ allocation after the daemon is started are not reflected.
+ type: "integer"
+ example: 4
+ MemTotal:
+ description: |
+ Total amount of physical memory available on the host, in kilobytes (kB).
+ type: "integer"
+ format: "int64"
+ example: 2095882240
+
+ IndexServerAddress:
+ description: |
+ Address / URL of the index server that is used for image search,
+ and as a default for user authentication for Docker Hub and Docker Cloud.
+ default: "https://index.docker.io/v1/"
+ type: "string"
+ example: "https://index.docker.io/v1/"
+ RegistryConfig:
+ $ref: "#/definitions/RegistryServiceConfig"
+ GenericResources:
+ $ref: "#/definitions/GenericResources"
+ HttpProxy:
+ description: |
+ HTTP-proxy configured for the daemon. This value is obtained from the
+ [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.
+ Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL
+ are masked in the API response.
+
+ Containers do not automatically inherit this configuration.
+ type: "string"
+ example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080"
+ HttpsProxy:
+ description: |
+ HTTPS-proxy configured for the daemon. This value is obtained from the
+ [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.
+ Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL
+ are masked in the API response.
+
+ Containers do not automatically inherit this configuration.
+ type: "string"
+ example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443"
+ NoProxy:
+ description: |
+ Comma-separated list of domain extensions for which no proxy should be
+ used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html)
+ environment variable.
+
+ Containers do not automatically inherit this configuration.
+ type: "string"
+ example: "*.local, 169.254/16"
+ Name:
+ description: "Hostname of the host."
+ type: "string"
+ example: "node5.corp.example.com"
+ Labels:
+ description: |
+ User-defined labels (key/value metadata) as set on the daemon.
+
+ <p><br /></p>
+
+ > **Note**: When part of a Swarm, nodes can both have _daemon_ labels,
+ > set through the daemon configuration, and _node_ labels, set from a
+ > manager node in the Swarm. Node labels are not included in this
+ > field. Node labels can be retrieved using the `/nodes/(id)` endpoint
+ > on a manager node in the Swarm.
+ type: "array"
+ items:
+ type: "string"
+ example: ["storage=ssd", "production"]
+ ExperimentalBuild:
+ description: |
+ Indicates if experimental features are enabled on the daemon.
+ type: "boolean"
+ example: true
+ ServerVersion:
+ description: |
+ Version string of the daemon.
+
+ > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/)
+ > returns the Swarm version instead of the daemon version, for example
+ > `swarm/1.2.8`.
+ type: "string"
+ example: "17.06.0-ce"
+ ClusterStore:
+ description: |
+ URL of the distributed storage backend.
+
+
+ The storage backend is used for multihost networking (to store
+ network and endpoint information) and by the node discovery mechanism.
+
+ <p><br /></p>
+
+ > **Note**: This field is only propagated when using standalone Swarm
+ > mode, and overlay networking using an external k/v store. Overlay
+ > networks with Swarm mode enabled use the built-in raft store, and
+ > this field will be empty.
+ type: "string"
+ example: "consul://consul.corp.example.com:8600/some/path"
+ ClusterAdvertise:
+ description: |
+ The network endpoint that the Engine advertises for the purpose of
+ node discovery. ClusterAdvertise is a `host:port` combination on which
+ the daemon is reachable by other hosts.
+
+ <p><br /></p>
+
+ > **Note**: This field is only propagated when using standalone Swarm
+ > mode, and overlay networking using an external k/v store. Overlay
+ > networks with Swarm mode enabled use the built-in raft store, and
+ > this field will be empty.
+ type: "string"
+ example: "node5.corp.example.com:8000"
+ Runtimes:
+ description: |
+ List of [OCI compliant](https://github.com/opencontainers/runtime-spec)
+ runtimes configured on the daemon. Keys hold the "name" used to
+ reference the runtime.
+
+ The Docker daemon relies on an OCI compliant runtime (invoked via the
+ `containerd` daemon) as its interface to the Linux kernel namespaces,
+ cgroups, and SELinux.
+
+ The default runtime is `runc`, and automatically configured. Additional
+ runtimes can be configured by the user and will be listed here.
+ type: "object"
+ additionalProperties:
+ $ref: "#/definitions/Runtime"
+ default:
+ runc:
+ path: "runc"
+ example:
+ runc:
+ path: "runc"
+ runc-master:
+ path: "/go/bin/runc"
+ custom:
+ path: "/usr/local/bin/my-oci-runtime"
+ runtimeArgs: ["--debug", "--systemd-cgroup=false"]
+ DefaultRuntime:
+ description: |
+ Name of the default OCI runtime that is used when starting containers.
+
+ The default can be overridden per-container at create time.
+ type: "string"
+ default: "runc"
+ example: "runc"
+ Swarm:
+ $ref: "#/definitions/SwarmInfo"
+ LiveRestoreEnabled:
+ description: |
+ Indicates if live restore is enabled.
+
+ If enabled, containers are kept running when the daemon is shutdown
+ or upon daemon start if running containers are detected.
+ type: "boolean"
+ default: false
+ example: false
+ Isolation:
+ description: |
+ Represents the isolation technology to use as a default for containers.
+ The supported values are platform-specific.
+
+ If no isolation value is specified on daemon start, on Windows client,
+ the default is `hyperv`, and on Windows server, the default is `process`.
+
+ This option is currently not used on other platforms.
+ default: "default"
+ type: "string"
+ enum:
+ - "default"
+ - "hyperv"
+ - "process"
+ InitBinary:
+ description: |
+ Name and, optional, path of the `docker-init` binary.
+
+ If the path is omitted, the daemon searches the host's `$PATH` for the
+ binary and uses the first result.
+ type: "string"
+ example: "docker-init"
+ ContainerdCommit:
+ $ref: "#/definitions/Commit"
+ RuncCommit:
+ $ref: "#/definitions/Commit"
+ InitCommit:
+ $ref: "#/definitions/Commit"
+ SecurityOptions:
+ description: |
+ List of security features that are enabled on the daemon, such as
+ apparmor, seccomp, SELinux, and user-namespaces (userns).
+
+ Additional configuration options for each security feature may
+ be present, and are included as a comma-separated list of key/value
+ pairs.
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "name=apparmor"
+ - "name=seccomp,profile=default"
+ - "name=selinux"
+ - "name=userns"
+ ProductLicense:
+ description: |
+ Reports a summary of the product license on the daemon.
+
+ If a commercial license has been applied to the daemon, information
+ such as number of nodes, and expiration are included.
+ type: "string"
+ example: "Community Engine"
+ Warnings:
+ description: |
+ List of warnings / informational messages about missing features, or
+ issues related to the daemon configuration.
+
+ These messages can be printed by the client as information to the user.
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "WARNING: No memory limit support"
+ - "WARNING: bridge-nf-call-iptables is disabled"
+ - "WARNING: bridge-nf-call-ip6tables is disabled"
+
+
+ # PluginsInfo is a temp struct holding Plugins name
+ # registered with docker daemon. It is used by Info struct
+ PluginsInfo:
+ description: |
+ Available plugins per type.
+
+ <p><br /></p>
+
+ > **Note**: Only unmanaged (V1) plugins are included in this list.
+ > V1 plugins are "lazily" loaded, and are not returned in this list
+ > if there is no resource using the plugin.
+ type: "object"
+ properties:
+ Volume:
+ description: "Names of available volume-drivers, and network-driver plugins."
+ type: "array"
+ items:
+ type: "string"
+ example: ["local"]
+ Network:
+ description: "Names of available network-drivers, and network-driver plugins."
+ type: "array"
+ items:
+ type: "string"
+ example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"]
+ Authorization:
+ description: "Names of available authorization plugins."
+ type: "array"
+ items:
+ type: "string"
+ example: ["img-authz-plugin", "hbm"]
+ Log:
+ description: "Names of available logging-drivers, and logging-driver plugins."
+ type: "array"
+ items:
+ type: "string"
+ example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"]
+
+
+ RegistryServiceConfig:
+ description: |
+ RegistryServiceConfig stores daemon registry services configuration.
+ type: "object"
+ x-nullable: true
+ properties:
+ AllowNondistributableArtifactsCIDRs:
+ description: |
+ List of IP ranges to which nondistributable artifacts can be pushed,
+ using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632).
+
+ Some images (for example, Windows base images) contain artifacts
+ whose distribution is restricted by license. When these images are
+ pushed to a registry, restricted artifacts are not included.
+
+ This configuration override this behavior, and enables the daemon to
+ push nondistributable artifacts to all registries whose resolved IP
+ address is within the subnet described by the CIDR syntax.
+
+ This option is useful when pushing images containing
+ nondistributable artifacts to a registry on an air-gapped network so
+ hosts on that network can pull the images without connecting to
+ another server.
+
+ > **Warning**: Nondistributable artifacts typically have restrictions
+ > on how and where they can be distributed and shared. Only use this
+ > feature to push artifacts to private registries and ensure that you
+ > are in compliance with any terms that cover redistributing
+ > nondistributable artifacts.
+
+ type: "array"
+ items:
+ type: "string"
+ example: ["::1/128", "127.0.0.0/8"]
+ AllowNondistributableArtifactsHostnames:
+ description: |
+ List of registry hostnames to which nondistributable artifacts can be
+ pushed, using the format `<hostname>[:<port>]` or `<IP address>[:<port>]`.
+
+ Some images (for example, Windows base images) contain artifacts
+ whose distribution is restricted by license. When these images are
+ pushed to a registry, restricted artifacts are not included.
+
+ This configuration override this behavior for the specified
+ registries.
+
+ This option is useful when pushing images containing
+ nondistributable artifacts to a registry on an air-gapped network so
+ hosts on that network can pull the images without connecting to
+ another server.
+
+ > **Warning**: Nondistributable artifacts typically have restrictions
+ > on how and where they can be distributed and shared. Only use this
+ > feature to push artifacts to private registries and ensure that you
+ > are in compliance with any terms that cover redistributing
+ > nondistributable artifacts.
+ type: "array"
+ items:
+ type: "string"
+ example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"]
+ InsecureRegistryCIDRs:
+ description: |
+ List of IP ranges of insecure registries, using the CIDR syntax
+ ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries
+ accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates
+ from unknown CAs) communication.
+
+ By default, local registries (`127.0.0.0/8`) are configured as
+ insecure. All other registries are secure. Communicating with an
+ insecure registry is not possible if the daemon assumes that registry
+ is secure.
+
+ This configuration override this behavior, insecure communication with
+ registries whose resolved IP address is within the subnet described by
+ the CIDR syntax.
+
+ Registries can also be marked insecure by hostname. Those registries
+ are listed under `IndexConfigs` and have their `Secure` field set to
+ `false`.
+
+ > **Warning**: Using this option can be useful when running a local
+ > registry, but introduces security vulnerabilities. This option
+ > should therefore ONLY be used for testing purposes. For increased
+ > security, users should add their CA to their system's list of trusted
+ > CAs instead of enabling this option.
+ type: "array"
+ items:
+ type: "string"
+ example: ["::1/128", "127.0.0.0/8"]
+ IndexConfigs:
+ type: "object"
+ additionalProperties:
+ $ref: "#/definitions/IndexInfo"
+ example:
+ "127.0.0.1:5000":
+ "Name": "127.0.0.1:5000"
+ "Mirrors": []
+ "Secure": false
+ "Official": false
+ "[2001:db8:a0b:12f0::1]:80":
+ "Name": "[2001:db8:a0b:12f0::1]:80"
+ "Mirrors": []
+ "Secure": false
+ "Official": false
+ "docker.io":
+ Name: "docker.io"
+ Mirrors: ["https://hub-mirror.corp.example.com:5000/"]
+ Secure: true
+ Official: true
+ "registry.internal.corp.example.com:3000":
+ Name: "registry.internal.corp.example.com:3000"
+ Mirrors: []
+ Secure: false
+ Official: false
+ Mirrors:
+ description: |
+ List of registry URLs that act as a mirror for the official
+ (`docker.io`) registry.
+
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "https://hub-mirror.corp.example.com:5000/"
+ - "https://[2001:db8:a0b:12f0::1]/"
+
+ IndexInfo:
+ description:
+ IndexInfo contains information about a registry.
+ type: "object"
+ x-nullable: true
+ properties:
+ Name:
+ description: |
+ Name of the registry, such as "docker.io".
+ type: "string"
+ example: "docker.io"
+ Mirrors:
+ description: |
+ List of mirrors, expressed as URIs.
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "https://hub-mirror.corp.example.com:5000/"
+ - "https://registry-2.docker.io/"
+ - "https://registry-3.docker.io/"
+ Secure:
+ description: |
+ Indicates if the registry is part of the list of insecure
+ registries.
+
+ If `false`, the registry is insecure. Insecure registries accept
+ un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from
+ unknown CAs) communication.
+
+ > **Warning**: Insecure registries can be useful when running a local
+ > registry. However, because its use creates security vulnerabilities
+ > it should ONLY be enabled for testing purposes. For increased
+ > security, users should add their CA to their system's list of
+ > trusted CAs instead of enabling this option.
+ type: "boolean"
+ example: true
+ Official:
+ description: |
+ Indicates whether this is an official registry (i.e., Docker Hub / docker.io)
+ type: "boolean"
+ example: true
+
+ Runtime:
+ description: |
+ Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec)
+ runtime.
+
+ The runtime is invoked by the daemon via the `containerd` daemon. OCI
+ runtimes act as an interface to the Linux kernel namespaces, cgroups,
+ and SELinux.
+ type: "object"
+ properties:
+ path:
+ description: |
+ Name and, optional, path, of the OCI executable binary.
+
+ If the path is omitted, the daemon searches the host's `$PATH` for the
+ binary and uses the first result.
+ type: "string"
+ example: "/usr/local/bin/my-oci-runtime"
+ runtimeArgs:
+ description: |
+ List of command-line arguments to pass to the runtime when invoked.
+ type: "array"
+ x-nullable: true
+ items:
+ type: "string"
+ example: ["--debug", "--systemd-cgroup=false"]
+
+ Commit:
+ description: |
+ Commit holds the Git-commit (SHA1) that a binary was built from, as
+ reported in the version-string of external tools, such as `containerd`,
+ or `runC`.
+ type: "object"
+ properties:
+ ID:
+ description: "Actual commit ID of external tool."
+ type: "string"
+ example: "cfb82a876ecc11b5ca0977d1733adbe58599088a"
+ Expected:
+ description: |
+ Commit ID of external tool expected by dockerd as set at build time.
+ type: "string"
+ example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4"
+
+ SwarmInfo:
+ description: |
+ Represents generic information about swarm.
+ type: "object"
+ properties:
+ NodeID:
+ description: "Unique identifier of for this node in the swarm."
+ type: "string"
+ default: ""
+ example: "k67qz4598weg5unwwffg6z1m1"
+ NodeAddr:
+ description: |
+ IP address at which this node can be reached by other nodes in the
+ swarm.
+ type: "string"
+ default: ""
+ example: "10.0.0.46"
+ LocalNodeState:
+ $ref: "#/definitions/LocalNodeState"
+ ControlAvailable:
+ type: "boolean"
+ default: false
+ example: true
+ Error:
+ type: "string"
+ default: ""
+ RemoteManagers:
+ description: |
+ List of ID's and addresses of other managers in the swarm.
+ type: "array"
+ default: null
+ x-nullable: true
+ items:
+ $ref: "#/definitions/PeerNode"
+ example:
+ - NodeID: "71izy0goik036k48jg985xnds"
+ Addr: "10.0.0.158:2377"
+ - NodeID: "79y6h1o4gv8n120drcprv5nmc"
+ Addr: "10.0.0.159:2377"
+ - NodeID: "k67qz4598weg5unwwffg6z1m1"
+ Addr: "10.0.0.46:2377"
+ Nodes:
+ description: "Total number of nodes in the swarm."
+ type: "integer"
+ x-nullable: true
+ example: 4
+ Managers:
+ description: "Total number of managers in the swarm."
+ type: "integer"
+ x-nullable: true
+ example: 3
+ Cluster:
+ $ref: "#/definitions/ClusterInfo"
+
+ LocalNodeState:
+ description: "Current local status of this node."
+ type: "string"
+ default: ""
+ enum:
+ - ""
+ - "inactive"
+ - "pending"
+ - "active"
+ - "error"
+ - "locked"
+ example: "active"
+
+ PeerNode:
+ description: "Represents a peer-node in the swarm"
+ properties:
+ NodeID:
+ description: "Unique identifier of for this node in the swarm."
+ type: "string"
+ Addr:
+ description: |
+ IP address and ports at which this node can be reached.
+ type: "string"
+
+paths:
+ /containers/json:
+ get:
+ summary: "List containers"
+ description: |
+ Returns a list of containers. For details on the format, see [the inspect endpoint](#operation/ContainerInspect).
+
+ Note that it uses a different, smaller representation of a container than inspecting a single container. For example,
+ the list of linked containers is not propagated .
+ operationId: "ContainerList"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "all"
+ in: "query"
+ description: "Return all containers. By default, only running containers are shown"
+ type: "boolean"
+ default: false
+ - name: "limit"
+ in: "query"
+ description: "Return this number of most recently created containers, including non-running ones."
+ type: "integer"
+ - name: "size"
+ in: "query"
+ description: "Return the size of container as fields `SizeRw` and `SizeRootFs`."
+ type: "boolean"
+ default: false
+ - name: "filters"
+ in: "query"
+ description: |
+ Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters:
+
+ - `ancestor`=(`<image-name>[:<tag>]`, `<image id>`, or `<image@digest>`)
+ - `before`=(`<container id>` or `<container name>`)
+ - `expose`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`)
+ - `exited=<int>` containers with exit code of `<int>`
+ - `health`=(`starting`|`healthy`|`unhealthy`|`none`)
+ - `id=<ID>` a container's ID
+ - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only)
+ - `is-task=`(`true`|`false`)
+ - `label=key` or `label="key=value"` of a container label
+ - `name=<name>` a container's name
+ - `network`=(`<network id>` or `<network name>`)
+ - `publish`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`)
+ - `since`=(`<container id>` or `<container name>`)
+ - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`)
+ - `volume`=(`<volume name>` or `<mount point destination>`)
+ type: "string"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/ContainerSummary"
+ examples:
+ application/json:
+ - Id: "8dfafdbc3a40"
+ Names:
+ - "/boring_feynman"
+ Image: "ubuntu:latest"
+ ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82"
+ Command: "echo 1"
+ Created: 1367854155
+ State: "Exited"
+ Status: "Exit 0"
+ Ports:
+ - PrivatePort: 2222
+ PublicPort: 3333
+ Type: "tcp"
+ Labels:
+ com.example.vendor: "Acme"
+ com.example.license: "GPL"
+ com.example.version: "1.0"
+ SizeRw: 12288
+ SizeRootFs: 0
+ HostConfig:
+ NetworkMode: "default"
+ NetworkSettings:
+ Networks:
+ bridge:
+ NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"
+ EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f"
+ Gateway: "172.17.0.1"
+ IPAddress: "172.17.0.2"
+ IPPrefixLen: 16
+ IPv6Gateway: ""
+ GlobalIPv6Address: ""
+ GlobalIPv6PrefixLen: 0
+ MacAddress: "02:42:ac:11:00:02"
+ Mounts:
+ - Name: "fac362...80535"
+ Source: "/data"
+ Destination: "/data"
+ Driver: "local"
+ Mode: "ro,Z"
+ RW: false
+ Propagation: ""
+ - Id: "9cd87474be90"
+ Names:
+ - "/coolName"
+ Image: "ubuntu:latest"
+ ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82"
+ Command: "echo 222222"
+ Created: 1367854155
+ State: "Exited"
+ Status: "Exit 0"
+ Ports: []
+ Labels: {}
+ SizeRw: 12288
+ SizeRootFs: 0
+ HostConfig:
+ NetworkMode: "default"
+ NetworkSettings:
+ Networks:
+ bridge:
+ NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"
+ EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a"
+ Gateway: "172.17.0.1"
+ IPAddress: "172.17.0.8"
+ IPPrefixLen: 16
+ IPv6Gateway: ""
+ GlobalIPv6Address: ""
+ GlobalIPv6PrefixLen: 0
+ MacAddress: "02:42:ac:11:00:08"
+ Mounts: []
+ - Id: "3176a2479c92"
+ Names:
+ - "/sleepy_dog"
+ Image: "ubuntu:latest"
+ ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82"
+ Command: "echo 3333333333333333"
+ Created: 1367854154
+ State: "Exited"
+ Status: "Exit 0"
+ Ports: []
+ Labels: {}
+ SizeRw: 12288
+ SizeRootFs: 0
+ HostConfig:
+ NetworkMode: "default"
+ NetworkSettings:
+ Networks:
+ bridge:
+ NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"
+ EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d"
+ Gateway: "172.17.0.1"
+ IPAddress: "172.17.0.6"
+ IPPrefixLen: 16
+ IPv6Gateway: ""
+ GlobalIPv6Address: ""
+ GlobalIPv6PrefixLen: 0
+ MacAddress: "02:42:ac:11:00:06"
+ Mounts: []
+ - Id: "4cb07b47f9fb"
+ Names:
+ - "/running_cat"
+ Image: "ubuntu:latest"
+ ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82"
+ Command: "echo 444444444444444444444444444444444"
+ Created: 1367854152
+ State: "Exited"
+ Status: "Exit 0"
+ Ports: []
+ Labels: {}
+ SizeRw: 12288
+ SizeRootFs: 0
+ HostConfig:
+ NetworkMode: "default"
+ NetworkSettings:
+ Networks:
+ bridge:
+ NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"
+ EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9"
+ Gateway: "172.17.0.1"
+ IPAddress: "172.17.0.5"
+ IPPrefixLen: 16
+ IPv6Gateway: ""
+ GlobalIPv6Address: ""
+ GlobalIPv6PrefixLen: 0
+ MacAddress: "02:42:ac:11:00:05"
+ Mounts: []
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Container"]
+ /containers/create:
+ post:
+ summary: "Create a container"
+ operationId: "ContainerCreate"
+ consumes:
+ - "application/json"
+ - "application/octet-stream"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "name"
+ in: "query"
+ description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`."
+ type: "string"
+ pattern: "/?[a-zA-Z0-9_-]+"
+ - name: "body"
+ in: "body"
+ description: "Container to create"
+ schema:
+ allOf:
+ - $ref: "#/definitions/ContainerConfig"
+ - type: "object"
+ properties:
+ HostConfig:
+ $ref: "#/definitions/HostConfig"
+ NetworkingConfig:
+ description: "This container's networking configuration."
+ type: "object"
+ properties:
+ EndpointsConfig:
+ description: "A mapping of network name to endpoint configuration for that network."
+ type: "object"
+ additionalProperties:
+ $ref: "#/definitions/EndpointSettings"
+ example:
+ Hostname: ""
+ Domainname: ""
+ User: ""
+ AttachStdin: false
+ AttachStdout: true
+ AttachStderr: true
+ Tty: false
+ OpenStdin: false
+ StdinOnce: false
+ Env:
+ - "FOO=bar"
+ - "BAZ=quux"
+ Cmd:
+ - "date"
+ Entrypoint: ""
+ Image: "ubuntu"
+ Labels:
+ com.example.vendor: "Acme"
+ com.example.license: "GPL"
+ com.example.version: "1.0"
+ Volumes:
+ /volumes/data: {}
+ WorkingDir: ""
+ NetworkDisabled: false
+ MacAddress: "12:34:56:78:9a:bc"
+ ExposedPorts:
+ 22/tcp: {}
+ StopSignal: "SIGTERM"
+ StopTimeout: 10
+ HostConfig:
+ Binds:
+ - "/tmp:/tmp"
+ Links:
+ - "redis3:redis"
+ Memory: 0
+ MemorySwap: 0
+ MemoryReservation: 0
+ KernelMemory: 0
+ NanoCPUs: 500000
+ CpuPercent: 80
+ CpuShares: 512
+ CpuPeriod: 100000
+ CpuRealtimePeriod: 1000000
+ CpuRealtimeRuntime: 10000
+ CpuQuota: 50000
+ CpusetCpus: "0,1"
+ CpusetMems: "0,1"
+ MaximumIOps: 0
+ MaximumIOBps: 0
+ BlkioWeight: 300
+ BlkioWeightDevice:
+ - {}
+ BlkioDeviceReadBps:
+ - {}
+ BlkioDeviceReadIOps:
+ - {}
+ BlkioDeviceWriteBps:
+ - {}
+ BlkioDeviceWriteIOps:
+ - {}
+ MemorySwappiness: 60
+ OomKillDisable: false
+ OomScoreAdj: 500
+ PidMode: ""
+ PidsLimit: 0
+ PortBindings:
+ 22/tcp:
+ - HostPort: "11022"
+ PublishAllPorts: false
+ Privileged: false
+ ReadonlyRootfs: false
+ Dns:
+ - "8.8.8.8"
+ DnsOptions:
+ - ""
+ DnsSearch:
+ - ""
+ VolumesFrom:
+ - "parent"
+ - "other:ro"
+ CapAdd:
+ - "NET_ADMIN"
+ CapDrop:
+ - "MKNOD"
+ GroupAdd:
+ - "newgroup"
+ RestartPolicy:
+ Name: ""
+ MaximumRetryCount: 0
+ AutoRemove: true
+ NetworkMode: "bridge"
+ Devices: []
+ Ulimits:
+ - {}
+ LogConfig:
+ Type: "json-file"
+ Config: {}
+ SecurityOpt: []
+ StorageOpt: {}
+ CgroupParent: ""
+ VolumeDriver: ""
+ ShmSize: 67108864
+ NetworkingConfig:
+ EndpointsConfig:
+ isolated_nw:
+ IPAMConfig:
+ IPv4Address: "172.20.30.33"
+ IPv6Address: "2001:db8:abcd::3033"
+ LinkLocalIPs:
+ - "169.254.34.68"
+ - "fe80::3468"
+ Links:
+ - "container_1"
+ - "container_2"
+ Aliases:
+ - "server_x"
+ - "server_y"
+
+ required: true
+ responses:
+ 201:
+ description: "Container created successfully"
+ schema:
+ type: "object"
+ title: "ContainerCreateResponse"
+ description: "OK response to ContainerCreate operation"
+ required: [Id, Warnings]
+ properties:
+ Id:
+ description: "The ID of the created container"
+ type: "string"
+ x-nullable: false
+ Warnings:
+ description: "Warnings encountered when creating the container"
+ type: "array"
+ x-nullable: false
+ items:
+ type: "string"
+ examples:
+ application/json:
+ Id: "e90e34656806"
+ Warnings: []
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 409:
+ description: "conflict"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Container"]
+ /containers/{id}/json:
+ get:
+ summary: "Inspect a container"
+ description: "Return low-level information about a container."
+ operationId: "ContainerInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "object"
+ title: "ContainerInspectResponse"
+ properties:
+ Id:
+ description: "The ID of the container"
+ type: "string"
+ Created:
+ description: "The time the container was created"
+ type: "string"
+ Path:
+ description: "The path to the command being run"
+ type: "string"
+ Args:
+ description: "The arguments to the command being run"
+ type: "array"
+ items:
+ type: "string"
+ State:
+ description: "The state of the container."
+ type: "object"
+ properties:
+ Status:
+ description: |
+ The status of the container. For example, `"running"` or `"exited"`.
+ type: "string"
+ enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"]
+ Running:
+ description: |
+ Whether this container is running.
+
+ Note that a running container can be _paused_. The `Running` and `Paused`
+ booleans are not mutually exclusive:
+
+ When pausing a container (on Linux), the cgroups freezer is used to suspend
+ all processes in the container. Freezing the process requires the process to
+ be running. As a result, paused containers are both `Running` _and_ `Paused`.
+
+ Use the `Status` field instead to determine if a container's state is "running".
+ type: "boolean"
+ Paused:
+ description: "Whether this container is paused."
+ type: "boolean"
+ Restarting:
+ description: "Whether this container is restarting."
+ type: "boolean"
+ OOMKilled:
+ description: "Whether this container has been killed because it ran out of memory."
+ type: "boolean"
+ Dead:
+ type: "boolean"
+ Pid:
+ description: "The process ID of this container"
+ type: "integer"
+ ExitCode:
+ description: "The last exit code of this container"
+ type: "integer"
+ Error:
+ type: "string"
+ StartedAt:
+ description: "The time when this container was last started."
+ type: "string"
+ FinishedAt:
+ description: "The time when this container last exited."
+ type: "string"
+ Image:
+ description: "The container's image"
+ type: "string"
+ ResolvConfPath:
+ type: "string"
+ HostnamePath:
+ type: "string"
+ HostsPath:
+ type: "string"
+ LogPath:
+ type: "string"
+ Node:
+ description: "TODO"
+ type: "object"
+ Name:
+ type: "string"
+ RestartCount:
+ type: "integer"
+ Driver:
+ type: "string"
+ MountLabel:
+ type: "string"
+ ProcessLabel:
+ type: "string"
+ AppArmorProfile:
+ type: "string"
+ ExecIDs:
+ description: "IDs of exec instances that are running in the container."
+ type: "array"
+ items:
+ type: "string"
+ x-nullable: true
+ HostConfig:
+ $ref: "#/definitions/HostConfig"
+ GraphDriver:
+ $ref: "#/definitions/GraphDriverData"
+ SizeRw:
+ description: "The size of files that have been created or changed by this container."
+ type: "integer"
+ format: "int64"
+ SizeRootFs:
+ description: "The total size of all the files in this container."
+ type: "integer"
+ format: "int64"
+ Mounts:
+ type: "array"
+ items:
+ $ref: "#/definitions/MountPoint"
+ Config:
+ $ref: "#/definitions/ContainerConfig"
+ NetworkSettings:
+ $ref: "#/definitions/NetworkSettings"
+ examples:
+ application/json:
+ AppArmorProfile: ""
+ Args:
+ - "-c"
+ - "exit 9"
+ Config:
+ AttachStderr: true
+ AttachStdin: false
+ AttachStdout: true
+ Cmd:
+ - "/bin/sh"
+ - "-c"
+ - "exit 9"
+ Domainname: ""
+ Env:
+ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ Hostname: "ba033ac44011"
+ Image: "ubuntu"
+ Labels:
+ com.example.vendor: "Acme"
+ com.example.license: "GPL"
+ com.example.version: "1.0"
+ MacAddress: ""
+ NetworkDisabled: false
+ OpenStdin: false
+ StdinOnce: false
+ Tty: false
+ User: ""
+ Volumes:
+ /volumes/data: {}
+ WorkingDir: ""
+ StopSignal: "SIGTERM"
+ StopTimeout: 10
+ Created: "2015-01-06T15:47:31.485331387Z"
+ Driver: "devicemapper"
+ ExecIDs:
+ - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca"
+ - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4"
+ HostConfig:
+ MaximumIOps: 0
+ MaximumIOBps: 0
+ BlkioWeight: 0
+ BlkioWeightDevice:
+ - {}
+ BlkioDeviceReadBps:
+ - {}
+ BlkioDeviceWriteBps:
+ - {}
+ BlkioDeviceReadIOps:
+ - {}
+ BlkioDeviceWriteIOps:
+ - {}
+ ContainerIDFile: ""
+ CpusetCpus: ""
+ CpusetMems: ""
+ CpuPercent: 80
+ CpuShares: 0
+ CpuPeriod: 100000
+ CpuRealtimePeriod: 1000000
+ CpuRealtimeRuntime: 10000
+ Devices: []
+ IpcMode: ""
+ LxcConf: []
+ Memory: 0
+ MemorySwap: 0
+ MemoryReservation: 0
+ KernelMemory: 0
+ OomKillDisable: false
+ OomScoreAdj: 500
+ NetworkMode: "bridge"
+ PidMode: ""
+ PortBindings: {}
+ Privileged: false
+ ReadonlyRootfs: false
+ PublishAllPorts: false
+ RestartPolicy:
+ MaximumRetryCount: 2
+ Name: "on-failure"
+ LogConfig:
+ Type: "json-file"
+ Sysctls:
+ net.ipv4.ip_forward: "1"
+ Ulimits:
+ - {}
+ VolumeDriver: ""
+ ShmSize: 67108864
+ HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname"
+ HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts"
+ LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log"
+ Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39"
+ Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2"
+ MountLabel: ""
+ Name: "/boring_euclid"
+ NetworkSettings:
+ Bridge: ""
+ SandboxID: ""
+ HairpinMode: false
+ LinkLocalIPv6Address: ""
+ LinkLocalIPv6PrefixLen: 0
+ SandboxKey: ""
+ EndpointID: ""
+ Gateway: ""
+ GlobalIPv6Address: ""
+ GlobalIPv6PrefixLen: 0
+ IPAddress: ""
+ IPPrefixLen: 0
+ IPv6Gateway: ""
+ MacAddress: ""
+ Networks:
+ bridge:
+ NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"
+ EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d"
+ Gateway: "172.17.0.1"
+ IPAddress: "172.17.0.2"
+ IPPrefixLen: 16
+ IPv6Gateway: ""
+ GlobalIPv6Address: ""
+ GlobalIPv6PrefixLen: 0
+ MacAddress: "02:42:ac:12:00:02"
+ Path: "/bin/sh"
+ ProcessLabel: ""
+ ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf"
+ RestartCount: 1
+ State:
+ Error: ""
+ ExitCode: 9
+ FinishedAt: "2015-01-06T15:47:32.080254511Z"
+ OOMKilled: false
+ Dead: false
+ Paused: false
+ Pid: 0
+ Restarting: false
+ Running: true
+ StartedAt: "2015-01-06T15:47:32.072697474Z"
+ Status: "running"
+ Mounts:
+ - Name: "fac362...80535"
+ Source: "/data"
+ Destination: "/data"
+ Driver: "local"
+ Mode: "ro,Z"
+ RW: false
+ Propagation: ""
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "size"
+ in: "query"
+ type: "boolean"
+ default: false
+ description: "Return the size of container as fields `SizeRw` and `SizeRootFs`"
+ tags: ["Container"]
+ /containers/{id}/top:
+ get:
+ summary: "List processes running inside a container"
+ description: "On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows."
+ operationId: "ContainerTop"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "object"
+ title: "ContainerTopResponse"
+ description: "OK response to ContainerTop operation"
+ properties:
+ Titles:
+ description: "The ps column titles"
+ type: "array"
+ items:
+ type: "string"
+ Processes:
+ description: "Each process running in the container, where each is process is an array of values corresponding to the titles"
+ type: "array"
+ items:
+ type: "array"
+ items:
+ type: "string"
+ examples:
+ application/json:
+ Titles:
+ - "UID"
+ - "PID"
+ - "PPID"
+ - "C"
+ - "STIME"
+ - "TTY"
+ - "TIME"
+ - "CMD"
+ Processes:
+ -
+ - "root"
+ - "13642"
+ - "882"
+ - "0"
+ - "17:03"
+ - "pts/0"
+ - "00:00:00"
+ - "/bin/bash"
+ -
+ - "root"
+ - "13735"
+ - "13642"
+ - "0"
+ - "17:06"
+ - "pts/0"
+ - "00:00:00"
+ - "sleep 10"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "ps_args"
+ in: "query"
+ description: "The arguments to pass to `ps`. For example, `aux`"
+ type: "string"
+ default: "-ef"
+ tags: ["Container"]
+ /containers/{id}/logs:
+ get:
+ summary: "Get container logs"
+ description: |
+ Get `stdout` and `stderr` logs from a container.
+
+ Note: This endpoint works only for containers with the `json-file` or `journald` logging driver.
+ operationId: "ContainerLogs"
+ responses:
+ 101:
+ description: "logs returned as a stream"
+ schema:
+ type: "string"
+ format: "binary"
+ 200:
+ description: "logs returned as a string in response body"
+ schema:
+ type: "string"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "follow"
+ in: "query"
+ description: |
+ Return the logs as a stream.
+
+ This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach).
+ type: "boolean"
+ default: false
+ - name: "stdout"
+ in: "query"
+ description: "Return logs from `stdout`"
+ type: "boolean"
+ default: false
+ - name: "stderr"
+ in: "query"
+ description: "Return logs from `stderr`"
+ type: "boolean"
+ default: false
+ - name: "since"
+ in: "query"
+ description: "Only return logs since this time, as a UNIX timestamp"
+ type: "integer"
+ default: 0
+ - name: "until"
+ in: "query"
+ description: "Only return logs before this time, as a UNIX timestamp"
+ type: "integer"
+ default: 0
+ - name: "timestamps"
+ in: "query"
+ description: "Add timestamps to every log line"
+ type: "boolean"
+ default: false
+ - name: "tail"
+ in: "query"
+ description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines."
+ type: "string"
+ default: "all"
+ tags: ["Container"]
+ /containers/{id}/changes:
+ get:
+ summary: "Get changes on a container’s filesystem"
+ description: |
+ Returns which files in a container's filesystem have been added, deleted,
+ or modified. The `Kind` of modification can be one of:
+
+ - `0`: Modified
+ - `1`: Added
+ - `2`: Deleted
+ operationId: "ContainerChanges"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "The list of changes"
+ schema:
+ type: "array"
+ items:
+ type: "object"
+ x-go-name: "ContainerChangeResponseItem"
+ title: "ContainerChangeResponseItem"
+ description: "change item in response to ContainerChanges operation"
+ required: [Path, Kind]
+ properties:
+ Path:
+ description: "Path to file that has changed"
+ type: "string"
+ x-nullable: false
+ Kind:
+ description: "Kind of change"
+ type: "integer"
+ format: "uint8"
+ enum: [0, 1, 2]
+ x-nullable: false
+ examples:
+ application/json:
+ - Path: "/dev"
+ Kind: 0
+ - Path: "/dev/kmsg"
+ Kind: 1
+ - Path: "/test"
+ Kind: 1
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ tags: ["Container"]
+ /containers/{id}/export:
+ get:
+ summary: "Export a container"
+ description: "Export the contents of a container as a tarball."
+ operationId: "ContainerExport"
+ produces:
+ - "application/octet-stream"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ tags: ["Container"]
+ /containers/{id}/stats:
+ get:
+ summary: "Get container stats based on resource usage"
+ description: |
+ This endpoint returns a live stream of a container’s resource usage
+ statistics.
+
+ The `precpu_stats` is the CPU statistic of the *previous* read, and is
+ used to calculate the CPU usage percentage. It is not an exact copy
+ of the `cpu_stats` field.
+
+ If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is
+ nil then for compatibility with older daemons the length of the
+ corresponding `cpu_usage.percpu_usage` array should be used.
+ operationId: "ContainerStats"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "object"
+ examples:
+ application/json:
+ read: "2015-01-08T22:57:31.547920715Z"
+ pids_stats:
+ current: 3
+ networks:
+ eth0:
+ rx_bytes: 5338
+ rx_dropped: 0
+ rx_errors: 0
+ rx_packets: 36
+ tx_bytes: 648
+ tx_dropped: 0
+ tx_errors: 0
+ tx_packets: 8
+ eth5:
+ rx_bytes: 4641
+ rx_dropped: 0
+ rx_errors: 0
+ rx_packets: 26
+ tx_bytes: 690
+ tx_dropped: 0
+ tx_errors: 0
+ tx_packets: 9
+ memory_stats:
+ stats:
+ total_pgmajfault: 0
+ cache: 0
+ mapped_file: 0
+ total_inactive_file: 0
+ pgpgout: 414
+ rss: 6537216
+ total_mapped_file: 0
+ writeback: 0
+ unevictable: 0
+ pgpgin: 477
+ total_unevictable: 0
+ pgmajfault: 0
+ total_rss: 6537216
+ total_rss_huge: 6291456
+ total_writeback: 0
+ total_inactive_anon: 0
+ rss_huge: 6291456
+ hierarchical_memory_limit: 67108864
+ total_pgfault: 964
+ total_active_file: 0
+ active_anon: 6537216
+ total_active_anon: 6537216
+ total_pgpgout: 414
+ total_cache: 0
+ inactive_anon: 0
+ active_file: 0
+ pgfault: 964
+ inactive_file: 0
+ total_pgpgin: 477
+ max_usage: 6651904
+ usage: 6537216
+ failcnt: 0
+ limit: 67108864
+ blkio_stats: {}
+ cpu_stats:
+ cpu_usage:
+ percpu_usage:
+ - 8646879
+ - 24472255
+ - 36438778
+ - 30657443
+ usage_in_usermode: 50000000
+ total_usage: 100215355
+ usage_in_kernelmode: 30000000
+ system_cpu_usage: 739306590000000
+ online_cpus: 4
+ throttling_data:
+ periods: 0
+ throttled_periods: 0
+ throttled_time: 0
+ precpu_stats:
+ cpu_usage:
+ percpu_usage:
+ - 8646879
+ - 24350896
+ - 36438778
+ - 30657443
+ usage_in_usermode: 50000000
+ total_usage: 100093996
+ usage_in_kernelmode: 30000000
+ system_cpu_usage: 9492140000000
+ online_cpus: 4
+ throttling_data:
+ periods: 0
+ throttled_periods: 0
+ throttled_time: 0
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "stream"
+ in: "query"
+ description: "Stream the output. If false, the stats will be output once and then it will disconnect."
+ type: "boolean"
+ default: true
+ tags: ["Container"]
+ /containers/{id}/resize:
+ post:
+ summary: "Resize a container TTY"
+ description: "Resize the TTY for a container. You must restart the container for the resize to take effect."
+ operationId: "ContainerResize"
+ consumes:
+ - "application/octet-stream"
+ produces:
+ - "text/plain"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "cannot resize container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "h"
+ in: "query"
+ description: "Height of the tty session in characters"
+ type: "integer"
+ - name: "w"
+ in: "query"
+ description: "Width of the tty session in characters"
+ type: "integer"
+ tags: ["Container"]
+ /containers/{id}/start:
+ post:
+ summary: "Start a container"
+ operationId: "ContainerStart"
+ responses:
+ 204:
+ description: "no error"
+ 304:
+ description: "container already started"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "detachKeys"
+ in: "query"
+ description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`."
+ type: "string"
+ tags: ["Container"]
+ /containers/{id}/stop:
+ post:
+ summary: "Stop a container"
+ operationId: "ContainerStop"
+ responses:
+ 204:
+ description: "no error"
+ 304:
+ description: "container already stopped"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "t"
+ in: "query"
+ description: "Number of seconds to wait before killing the container"
+ type: "integer"
+ tags: ["Container"]
+ /containers/{id}/restart:
+ post:
+ summary: "Restart a container"
+ operationId: "ContainerRestart"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "t"
+ in: "query"
+ description: "Number of seconds to wait before killing the container"
+ type: "integer"
+ tags: ["Container"]
+ /containers/{id}/kill:
+ post:
+ summary: "Kill a container"
+ description: "Send a POSIX signal to a container, defaulting to killing to the container."
+ operationId: "ContainerKill"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 409:
+ description: "container is not running"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "signal"
+ in: "query"
+ description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)"
+ type: "string"
+ default: "SIGKILL"
+ tags: ["Container"]
+ /containers/{id}/update:
+ post:
+ summary: "Update a container"
+ description: "Change various configuration options of a container without having to recreate it."
+ operationId: "ContainerUpdate"
+ consumes: ["application/json"]
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "The container has been updated."
+ schema:
+ type: "object"
+ title: "ContainerUpdateResponse"
+ description: "OK response to ContainerUpdate operation"
+ properties:
+ Warnings:
+ type: "array"
+ items:
+ type: "string"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "update"
+ in: "body"
+ required: true
+ schema:
+ allOf:
+ - $ref: "#/definitions/Resources"
+ - type: "object"
+ properties:
+ RestartPolicy:
+ $ref: "#/definitions/RestartPolicy"
+ example:
+ BlkioWeight: 300
+ CpuShares: 512
+ CpuPeriod: 100000
+ CpuQuota: 50000
+ CpuRealtimePeriod: 1000000
+ CpuRealtimeRuntime: 10000
+ CpusetCpus: "0,1"
+ CpusetMems: "0"
+ Memory: 314572800
+ MemorySwap: 514288000
+ MemoryReservation: 209715200
+ KernelMemory: 52428800
+ RestartPolicy:
+ MaximumRetryCount: 4
+ Name: "on-failure"
+ tags: ["Container"]
+ /containers/{id}/rename:
+ post:
+ summary: "Rename a container"
+ operationId: "ContainerRename"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 409:
+ description: "name already in use"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "name"
+ in: "query"
+ required: true
+ description: "New name for the container"
+ type: "string"
+ tags: ["Container"]
+ /containers/{id}/pause:
+ post:
+ summary: "Pause a container"
+ description: |
+ Use the cgroups freezer to suspend all processes in a container.
+
+ Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed.
+ operationId: "ContainerPause"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ tags: ["Container"]
+ /containers/{id}/unpause:
+ post:
+ summary: "Unpause a container"
+ description: "Resume a container which has been paused."
+ operationId: "ContainerUnpause"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ tags: ["Container"]
+ /containers/{id}/attach:
+ post:
+ summary: "Attach to a container"
+ description: |
+ Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached.
+
+ Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything.
+
+ See [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details.
+
+ ### Hijacking
+
+ This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket.
+
+ This is the response from the daemon for an attach request:
+
+ ```
+ HTTP/1.1 200 OK
+ Content-Type: application/vnd.docker.raw-stream
+
+ [STREAM]
+ ```
+
+ After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server.
+
+ To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers.
+
+ For example, the client sends this request to upgrade the connection:
+
+ ```
+ POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1
+ Upgrade: tcp
+ Connection: Upgrade
+ ```
+
+ The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream:
+
+ ```
+ HTTP/1.1 101 UPGRADED
+ Content-Type: application/vnd.docker.raw-stream
+ Connection: Upgrade
+ Upgrade: tcp
+
+ [STREAM]
+ ```
+
+ ### Stream format
+
+ When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload.
+
+ The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`).
+
+ It is encoded on the first eight bytes like this:
+
+ ```go
+ header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}
+ ```
+
+ `STREAM_TYPE` can be:
+
+ - 0: `stdin` (is written on `stdout`)
+ - 1: `stdout`
+ - 2: `stderr`
+
+ `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian.
+
+ Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`.
+
+ The simplest way to implement this protocol is the following:
+
+ 1. Read 8 bytes.
+ 2. Choose `stdout` or `stderr` depending on the first byte.
+ 3. Extract the frame size from the last four bytes.
+ 4. Read the extracted size and output it on the correct output.
+ 5. Goto 1.
+
+ ### Stream format when using a TTY
+
+ When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`.
+
+ operationId: "ContainerAttach"
+ produces:
+ - "application/vnd.docker.raw-stream"
+ responses:
+ 101:
+ description: "no error, hints proxy about hijacking"
+ 200:
+ description: "no error, no upgrade header found"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "detachKeys"
+ in: "query"
+ description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`."
+ type: "string"
+ - name: "logs"
+ in: "query"
+ description: |
+ Replay previous logs from the container.
+
+ This is useful for attaching to a container that has started and you want to output everything since the container started.
+
+ If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output.
+ type: "boolean"
+ default: false
+ - name: "stream"
+ in: "query"
+ description: "Stream attached streams from the time the request was made onwards"
+ type: "boolean"
+ default: false
+ - name: "stdin"
+ in: "query"
+ description: "Attach to `stdin`"
+ type: "boolean"
+ default: false
+ - name: "stdout"
+ in: "query"
+ description: "Attach to `stdout`"
+ type: "boolean"
+ default: false
+ - name: "stderr"
+ in: "query"
+ description: "Attach to `stderr`"
+ type: "boolean"
+ default: false
+ tags: ["Container"]
+ /containers/{id}/attach/ws:
+ get:
+ summary: "Attach to a container via a websocket"
+ operationId: "ContainerAttachWebsocket"
+ responses:
+ 101:
+ description: "no error, hints proxy about hijacking"
+ 200:
+ description: "no error, no upgrade header found"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "detachKeys"
+ in: "query"
+ description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`."
+ type: "string"
+ - name: "logs"
+ in: "query"
+ description: "Return logs"
+ type: "boolean"
+ default: false
+ - name: "stream"
+ in: "query"
+ description: "Return stream"
+ type: "boolean"
+ default: false
+ - name: "stdin"
+ in: "query"
+ description: "Attach to `stdin`"
+ type: "boolean"
+ default: false
+ - name: "stdout"
+ in: "query"
+ description: "Attach to `stdout`"
+ type: "boolean"
+ default: false
+ - name: "stderr"
+ in: "query"
+ description: "Attach to `stderr`"
+ type: "boolean"
+ default: false
+ tags: ["Container"]
+ /containers/{id}/wait:
+ post:
+ summary: "Wait for a container"
+ description: "Block until a container stops, then returns the exit code."
+ operationId: "ContainerWait"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "The container has exit."
+ schema:
+ type: "object"
+ title: "ContainerWaitResponse"
+ description: "OK response to ContainerWait operation"
+ required: [StatusCode]
+ properties:
+ StatusCode:
+ description: "Exit code of the container"
+ type: "integer"
+ x-nullable: false
+ Error:
+ description: "container waiting error, if any"
+ type: "object"
+ properties:
+ Message:
+ description: "Details of an error"
+ type: "string"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "condition"
+ in: "query"
+ description: "Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'."
+ type: "string"
+ default: "not-running"
+ tags: ["Container"]
+ /containers/{id}:
+ delete:
+ summary: "Remove a container"
+ operationId: "ContainerDelete"
+ responses:
+ 204:
+ description: "no error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 409:
+ description: "conflict"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "v"
+ in: "query"
+ description: "Remove the volumes associated with the container."
+ type: "boolean"
+ default: false
+ - name: "force"
+ in: "query"
+ description: "If the container is running, kill it before removing it."
+ type: "boolean"
+ default: false
+ - name: "link"
+ in: "query"
+ description: "Remove the specified link associated with the container."
+ type: "boolean"
+ default: false
+ tags: ["Container"]
+ /containers/{id}/archive:
+ head:
+ summary: "Get information about files in a container"
+ description: "A response header `X-Docker-Container-Path-Stat` is return containing a base64 - encoded JSON object with some filesystem header information about the path."
+ operationId: "ContainerArchiveInfo"
+ responses:
+ 200:
+ description: "no error"
+ headers:
+ X-Docker-Container-Path-Stat:
+ type: "string"
+ description: "A base64 - encoded JSON object with some filesystem header information about the path"
+ 400:
+ description: "Bad parameter"
+ schema:
+ allOf:
+ - $ref: "#/definitions/ErrorResponse"
+ - type: "object"
+ properties:
+ message:
+ description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)."
+ type: "string"
+ x-nullable: false
+ 404:
+ description: "Container or path does not exist"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "path"
+ in: "query"
+ required: true
+ description: "Resource in the container’s filesystem to archive."
+ type: "string"
+ tags: ["Container"]
+ get:
+ summary: "Get an archive of a filesystem resource in a container"
+ description: "Get a tar archive of a resource in the filesystem of container id."
+ operationId: "ContainerArchive"
+ produces: ["application/x-tar"]
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "Bad parameter"
+ schema:
+ allOf:
+ - $ref: "#/definitions/ErrorResponse"
+ - type: "object"
+ properties:
+ message:
+ description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)."
+ type: "string"
+ x-nullable: false
+ 404:
+ description: "Container or path does not exist"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "path"
+ in: "query"
+ required: true
+ description: "Resource in the container’s filesystem to archive."
+ type: "string"
+ tags: ["Container"]
+ put:
+ summary: "Extract an archive of files or folders to a directory in a container"
+ description: "Upload a tar archive to be extracted to a path in the filesystem of container id."
+ operationId: "PutContainerArchive"
+ consumes: ["application/x-tar", "application/octet-stream"]
+ responses:
+ 200:
+ description: "The content was extracted successfully"
+ 400:
+ description: "Bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 403:
+ description: "Permission denied, the volume or container rootfs is marked as read-only."
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "No such container or path does not exist inside the container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "path"
+ in: "query"
+ required: true
+ description: "Path to a directory in the container to extract the archive’s contents into. "
+ type: "string"
+ - name: "noOverwriteDirNonDir"
+ in: "query"
+ description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa."
+ type: "string"
+ - name: "inputStream"
+ in: "body"
+ required: true
+ description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz."
+ schema:
+ type: "string"
+ tags: ["Container"]
+ /containers/prune:
+ post:
+ summary: "Delete stopped containers"
+ produces:
+ - "application/json"
+ operationId: "ContainerPrune"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
+
+ Available filters:
+ - `until=<timestamp>` Prune containers created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.
+ - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune containers with (or without, in case `label!=...` is used) the specified labels.
+ type: "string"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "object"
+ title: "ContainerPruneResponse"
+ properties:
+ ContainersDeleted:
+ description: "Container IDs that were deleted"
+ type: "array"
+ items:
+ type: "string"
+ SpaceReclaimed:
+ description: "Disk space reclaimed in bytes"
+ type: "integer"
+ format: "int64"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Container"]
+ /images/json:
+ get:
+ summary: "List Images"
+ description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image."
+ operationId: "ImageList"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "Summary image data for the images matching the query"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/ImageSummary"
+ examples:
+ application/json:
+ - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8"
+ ParentId: ""
+ RepoTags:
+ - "ubuntu:12.04"
+ - "ubuntu:precise"
+ RepoDigests:
+ - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787"
+ Created: 1474925151
+ Size: 103579269
+ VirtualSize: 103579269
+ SharedSize: 0
+ Labels: {}
+ Containers: 2
+ - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175"
+ ParentId: ""
+ RepoTags:
+ - "ubuntu:12.10"
+ - "ubuntu:quantal"
+ RepoDigests:
+ - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7"
+ - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3"
+ Created: 1403128455
+ Size: 172064416
+ VirtualSize: 172064416
+ SharedSize: 0
+ Labels: {}
+ Containers: 5
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "all"
+ in: "query"
+ description: "Show all images. Only images from a final layer (no children) are shown by default."
+ type: "boolean"
+ default: false
+ - name: "filters"
+ in: "query"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters:
+
+ - `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`)
+ - `dangling=true`
+ - `label=key` or `label="key=value"` of an image label
+ - `reference`=(`<image-name>[:<tag>]`)
+ - `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`)
+ type: "string"
+ - name: "digests"
+ in: "query"
+ description: "Show digest information as a `RepoDigests` field on each image."
+ type: "boolean"
+ default: false
+ tags: ["Image"]
+ /build:
+ post:
+ summary: "Build an image"
+ description: |
+ Build an image from a tar archive with a `Dockerfile` in it.
+
+ The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/).
+
+ The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output.
+
+ The build is canceled if the client drops the connection by quitting or being killed.
+ operationId: "ImageBuild"
+ consumes:
+ - "application/octet-stream"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "inputStream"
+ in: "body"
+ description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz."
+ schema:
+ type: "string"
+ format: "binary"
+ - name: "dockerfile"
+ in: "query"
+ description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`."
+ type: "string"
+ default: "Dockerfile"
+ - name: "t"
+ in: "query"
+ description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters."
+ type: "string"
+ - name: "extrahosts"
+ in: "query"
+ description: "Extra hosts to add to /etc/hosts"
+ type: "string"
+ - name: "remote"
+ in: "query"
+ description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball."
+ type: "string"
+ - name: "q"
+ in: "query"
+ description: "Suppress verbose build output."
+ type: "boolean"
+ default: false
+ - name: "nocache"
+ in: "query"
+ description: "Do not use the cache when building the image."
+ type: "boolean"
+ default: false
+ - name: "cachefrom"
+ in: "query"
+ description: "JSON array of images used for build cache resolution."
+ type: "string"
+ - name: "pull"
+ in: "query"
+ description: "Attempt to pull the image even if an older image exists locally."
+ type: "string"
+ - name: "rm"
+ in: "query"
+ description: "Remove intermediate containers after a successful build."
+ type: "boolean"
+ default: true
+ - name: "forcerm"
+ in: "query"
+ description: "Always remove intermediate containers, even upon failure."
+ type: "boolean"
+ default: false
+ - name: "memory"
+ in: "query"
+ description: "Set memory limit for build."
+ type: "integer"
+ - name: "memswap"
+ in: "query"
+ description: "Total memory (memory + swap). Set as `-1` to disable swap."
+ type: "integer"
+ - name: "cpushares"
+ in: "query"
+ description: "CPU shares (relative weight)."
+ type: "integer"
+ - name: "cpusetcpus"
+ in: "query"
+ description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)."
+ type: "string"
+ - name: "cpuperiod"
+ in: "query"
+ description: "The length of a CPU period in microseconds."
+ type: "integer"
+ - name: "cpuquota"
+ in: "query"
+ description: "Microseconds of CPU time that the container can get in a CPU period."
+ type: "integer"
+ - name: "buildargs"
+ in: "query"
+ description: >
+ JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker
+ uses the buildargs as the environment context for commands run via the `Dockerfile` RUN
+ instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for
+ passing secret values.
+
+
+ For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the
+ the query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded.
+
+
+ [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)
+ type: "string"
+ - name: "shmsize"
+ in: "query"
+ description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB."
+ type: "integer"
+ - name: "squash"
+ in: "query"
+ description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*"
+ type: "boolean"
+ - name: "labels"
+ in: "query"
+ description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs."
+ type: "string"
+ - name: "networkmode"
+ in: "query"
+ description: "Sets the networking mode for the run commands during
+ build. Supported standard values are: `bridge`, `host`, `none`, and
+ `container:<name|id>`. Any other value is taken as a custom network's
+ name to which this container should connect to."
+ type: "string"
+ - name: "Content-type"
+ in: "header"
+ type: "string"
+ enum:
+ - "application/x-tar"
+ default: "application/x-tar"
+ - name: "X-Registry-Config"
+ in: "header"
+ description: |
+ This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to.
+
+ The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example:
+
+ ```
+ {
+ "docker.example.com": {
+ "username": "janedoe",
+ "password": "hunter2"
+ },
+ "https://index.docker.io/v1/": {
+ "username": "mobydock",
+ "password": "conta1n3rize14"
+ }
+ }
+ ```
+
+ Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API.
+ type: "string"
+ - name: "platform"
+ in: "query"
+ description: "Platform in the format os[/arch[/variant]]"
+ type: "string"
+ default: ""
+ - name: "target"
+ in: "query"
+ description: "Target build stage"
+ type: "string"
+ default: ""
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "Bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Image"]
+ /build/prune:
+ post:
+ summary: "Delete builder cache"
+ produces:
+ - "application/json"
+ operationId: "BuildPrune"
+ parameters:
+ - name: "keep-storage"
+ in: "query"
+ description: "Amount of disk space in bytes to keep for cache"
+ type: "integer"
+ format: "int64"
+ - name: "all"
+ in: "query"
+ type: "boolean"
+ description: "Remove all types of build cache"
+ - name: "filters"
+ in: "query"
+ type: "string"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to process on the list of build cache objects. Available filters:
+ - `until=<duration>`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h')
+ - `id=<id>`
+ - `parent=<id>`
+ - `type=<string>`
+ - `description=<string>`
+ - `inuse`
+ - `shared`
+ - `private`
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "object"
+ title: "BuildPruneResponse"
+ properties:
+ CachesDeleted:
+ type: "array"
+ items:
+ description: "ID of build cache object"
+ type: "string"
+ SpaceReclaimed:
+ description: "Disk space reclaimed in bytes"
+ type: "integer"
+ format: "int64"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Image"]
+ /images/create:
+ post:
+ summary: "Create an image"
+ description: "Create an image by either pulling it from a registry or importing it."
+ operationId: "ImageCreate"
+ consumes:
+ - "text/plain"
+ - "application/octet-stream"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "repository does not exist or no read access"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "fromImage"
+ in: "query"
+ description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed."
+ type: "string"
+ - name: "fromSrc"
+ in: "query"
+ description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image."
+ type: "string"
+ - name: "repo"
+ in: "query"
+ description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image."
+ type: "string"
+ - name: "tag"
+ in: "query"
+ description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled."
+ type: "string"
+ - name: "inputImage"
+ in: "body"
+ description: "Image content if the value `-` has been specified in fromSrc query parameter"
+ schema:
+ type: "string"
+ required: false
+ - name: "X-Registry-Auth"
+ in: "header"
+ description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)"
+ type: "string"
+ - name: "platform"
+ in: "query"
+ description: "Platform in the format os[/arch[/variant]]"
+ type: "string"
+ default: ""
+ tags: ["Image"]
+ /images/{name}/json:
+ get:
+ summary: "Inspect an image"
+ description: "Return low-level information about an image."
+ operationId: "ImageInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ $ref: "#/definitions/Image"
+ examples:
+ application/json:
+ Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c"
+ Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a"
+ Comment: ""
+ Os: "linux"
+ Architecture: "amd64"
+ Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"
+ ContainerConfig:
+ Tty: false
+ Hostname: "e611e15f9c9d"
+ Domainname: ""
+ AttachStdout: false
+ PublishService: ""
+ AttachStdin: false
+ OpenStdin: false
+ StdinOnce: false
+ NetworkDisabled: false
+ OnBuild: []
+ Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"
+ User: ""
+ WorkingDir: ""
+ MacAddress: ""
+ AttachStderr: false
+ Labels:
+ com.example.license: "GPL"
+ com.example.version: "1.0"
+ com.example.vendor: "Acme"
+ Env:
+ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ Cmd:
+ - "/bin/sh"
+ - "-c"
+ - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0"
+ DockerVersion: "1.9.0-dev"
+ VirtualSize: 188359297
+ Size: 0
+ Author: ""
+ Created: "2015-09-10T08:30:53.26995814Z"
+ GraphDriver:
+ Name: "aufs"
+ Data: {}
+ RepoDigests:
+ - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"
+ RepoTags:
+ - "example:1.0"
+ - "example:latest"
+ - "example:stable"
+ Config:
+ Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"
+ NetworkDisabled: false
+ OnBuild: []
+ StdinOnce: false
+ PublishService: ""
+ AttachStdin: false
+ OpenStdin: false
+ Domainname: ""
+ AttachStdout: false
+ Tty: false
+ Hostname: "e611e15f9c9d"
+ Cmd:
+ - "/bin/bash"
+ Env:
+ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ Labels:
+ com.example.vendor: "Acme"
+ com.example.version: "1.0"
+ com.example.license: "GPL"
+ MacAddress: ""
+ AttachStderr: false
+ WorkingDir: ""
+ User: ""
+ RootFS:
+ Type: "layers"
+ Layers:
+ - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6"
+ - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
+ 404:
+ description: "No such image"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such image: someimage (tag: latest)"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "Image name or id"
+ type: "string"
+ required: true
+ tags: ["Image"]
+ /images/{name}/history:
+ get:
+ summary: "Get the history of an image"
+ description: "Return parent layers of an image."
+ operationId: "ImageHistory"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "List of image layers"
+ schema:
+ type: "array"
+ items:
+ type: "object"
+ x-go-name: HistoryResponseItem
+ title: "HistoryResponseItem"
+ description: "individual image layer information in response to ImageHistory operation"
+ required: [Id, Created, CreatedBy, Tags, Size, Comment]
+ properties:
+ Id:
+ type: "string"
+ x-nullable: false
+ Created:
+ type: "integer"
+ format: "int64"
+ x-nullable: false
+ CreatedBy:
+ type: "string"
+ x-nullable: false
+ Tags:
+ type: "array"
+ items:
+ type: "string"
+ Size:
+ type: "integer"
+ format: "int64"
+ x-nullable: false
+ Comment:
+ type: "string"
+ x-nullable: false
+ examples:
+ application/json:
+ - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710"
+ Created: 1398108230
+ CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /"
+ Tags:
+ - "ubuntu:lucid"
+ - "ubuntu:10.04"
+ Size: 182964289
+ Comment: ""
+ - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8"
+ Created: 1398108222
+ CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi <admwiggin@gmail.com> - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/"
+ Tags: []
+ Size: 0
+ Comment: ""
+ - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"
+ Created: 1371157430
+ CreatedBy: ""
+ Tags:
+ - "scratch12:latest"
+ - "scratch:latest"
+ Size: 0
+ Comment: "Imported from -"
+ 404:
+ description: "No such image"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "Image name or ID"
+ type: "string"
+ required: true
+ tags: ["Image"]
+ /images/{name}/push:
+ post:
+ summary: "Push an image"
+ description: |
+ Push an image to a registry.
+
+ If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`.
+
+ The push is cancelled if the HTTP connection is closed.
+ operationId: "ImagePush"
+ consumes:
+ - "application/octet-stream"
+ responses:
+ 200:
+ description: "No error"
+ 404:
+ description: "No such image"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "Image name or ID."
+ type: "string"
+ required: true
+ - name: "tag"
+ in: "query"
+ description: "The tag to associate with the image on the registry."
+ type: "string"
+ - name: "X-Registry-Auth"
+ in: "header"
+ description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)"
+ type: "string"
+ required: true
+ tags: ["Image"]
+ /images/{name}/tag:
+ post:
+ summary: "Tag an image"
+ description: "Tag an image so that it becomes part of a repository."
+ operationId: "ImageTag"
+ responses:
+ 201:
+ description: "No error"
+ 400:
+ description: "Bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "No such image"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 409:
+ description: "Conflict"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "Image name or ID to tag."
+ type: "string"
+ required: true
+ - name: "repo"
+ in: "query"
+ description: "The repository to tag in. For example, `someuser/someimage`."
+ type: "string"
+ - name: "tag"
+ in: "query"
+ description: "The name of the new tag."
+ type: "string"
+ tags: ["Image"]
+ /images/{name}:
+ delete:
+ summary: "Remove an image"
+ description: |
+ Remove an image, along with any untagged parent images that were
+ referenced by that image.
+
+ Images can't be removed if they have descendant images, are being
+ used by a running container or are being used by a build.
+ operationId: "ImageDelete"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "The image was deleted successfully"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/ImageDeleteResponseItem"
+ examples:
+ application/json:
+ - Untagged: "3e2f21a89f"
+ - Deleted: "3e2f21a89f"
+ - Deleted: "53b4f83ac9"
+ 404:
+ description: "No such image"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 409:
+ description: "Conflict"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "Image name or ID"
+ type: "string"
+ required: true
+ - name: "force"
+ in: "query"
+ description: "Remove the image even if it is being used by stopped containers or has other tags"
+ type: "boolean"
+ default: false
+ - name: "noprune"
+ in: "query"
+ description: "Do not delete untagged parent images"
+ type: "boolean"
+ default: false
+ tags: ["Image"]
+ /images/search:
+ get:
+ summary: "Search images"
+ description: "Search for an image on Docker Hub."
+ operationId: "ImageSearch"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "array"
+ items:
+ type: "object"
+ title: "ImageSearchResponseItem"
+ properties:
+ description:
+ type: "string"
+ is_official:
+ type: "boolean"
+ is_automated:
+ type: "boolean"
+ name:
+ type: "string"
+ star_count:
+ type: "integer"
+ examples:
+ application/json:
+ - description: ""
+ is_official: false
+ is_automated: false
+ name: "wma55/u1210sshd"
+ star_count: 0
+ - description: ""
+ is_official: false
+ is_automated: false
+ name: "jdswinbank/sshd"
+ star_count: 0
+ - description: ""
+ is_official: false
+ is_automated: false
+ name: "vgauthier/sshd"
+ star_count: 0
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "term"
+ in: "query"
+ description: "Term to search"
+ type: "string"
+ required: true
+ - name: "limit"
+ in: "query"
+ description: "Maximum number of results to return"
+ type: "integer"
+ - name: "filters"
+ in: "query"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters:
+
+ - `is-automated=(true|false)`
+ - `is-official=(true|false)`
+ - `stars=<number>` Matches images that has at least 'number' stars.
+ type: "string"
+ tags: ["Image"]
+ /images/prune:
+ post:
+ summary: "Delete unused images"
+ produces:
+ - "application/json"
+ operationId: "ImagePrune"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters:
+
+ - `dangling=<boolean>` When set to `true` (or `1`), prune only
+ unused *and* untagged images. When set to `false`
+ (or `0`), all unused images are pruned.
+ - `until=<string>` Prune images created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.
+ - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune images with (or without, in case `label!=...` is used) the specified labels.
+ type: "string"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "object"
+ title: "ImagePruneResponse"
+ properties:
+ ImagesDeleted:
+ description: "Images that were deleted"
+ type: "array"
+ items:
+ $ref: "#/definitions/ImageDeleteResponseItem"
+ SpaceReclaimed:
+ description: "Disk space reclaimed in bytes"
+ type: "integer"
+ format: "int64"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Image"]
+ /auth:
+ post:
+ summary: "Check auth configuration"
+ description: "Validate credentials for a registry and, if available, get an identity token for accessing the registry without password."
+ operationId: "SystemAuth"
+ consumes: ["application/json"]
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "An identity token was generated successfully."
+ schema:
+ type: "object"
+ title: "SystemAuthResponse"
+ required: [Status]
+ properties:
+ Status:
+ description: "The status of the authentication"
+ type: "string"
+ x-nullable: false
+ IdentityToken:
+ description: "An opaque token used to authenticate a user after a successful login"
+ type: "string"
+ x-nullable: false
+ examples:
+ application/json:
+ Status: "Login Succeeded"
+ IdentityToken: "9cbaf023786cd7..."
+ 204:
+ description: "No error"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "authConfig"
+ in: "body"
+ description: "Authentication to check"
+ schema:
+ $ref: "#/definitions/AuthConfig"
+ tags: ["System"]
+ /info:
+ get:
+ summary: "Get system information"
+ operationId: "SystemInfo"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ $ref: "#/definitions/SystemInfo"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["System"]
+ /version:
+ get:
+ summary: "Get version"
+ description: "Returns the version of Docker that is running and various information about the system that Docker is running on."
+ operationId: "SystemVersion"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "object"
+ title: "SystemVersionResponse"
+ properties:
+ Platform:
+ type: "object"
+ required: [Name]
+ properties:
+ Name:
+ type: "string"
+ Components:
+ type: "array"
+ items:
+ type: "object"
+ x-go-name: ComponentVersion
+ required: [Name, Version]
+ properties:
+ Name:
+ type: "string"
+ Version:
+ type: "string"
+ x-nullable: false
+ Details:
+ type: "object"
+ x-nullable: true
+
+ Version:
+ type: "string"
+ ApiVersion:
+ type: "string"
+ MinAPIVersion:
+ type: "string"
+ GitCommit:
+ type: "string"
+ GoVersion:
+ type: "string"
+ Os:
+ type: "string"
+ Arch:
+ type: "string"
+ KernelVersion:
+ type: "string"
+ Experimental:
+ type: "boolean"
+ BuildTime:
+ type: "string"
+ examples:
+ application/json:
+ Version: "17.04.0"
+ Os: "linux"
+ KernelVersion: "3.19.0-23-generic"
+ GoVersion: "go1.7.5"
+ GitCommit: "deadbee"
+ Arch: "amd64"
+ ApiVersion: "1.27"
+ MinAPIVersion: "1.12"
+ BuildTime: "2016-06-14T07:09:13.444803460+00:00"
+ Experimental: true
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["System"]
+ /_ping:
+ get:
+ summary: "Ping"
+ description: "This is a dummy endpoint you can use to test if the server is accessible."
+ operationId: "SystemPing"
+ produces: ["text/plain"]
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "string"
+ example: "OK"
+ headers:
+ API-Version:
+ type: "string"
+ description: "Max API Version the server supports"
+ BuildKit-Version:
+ type: "string"
+ description: "Default version of docker image builder"
+ Docker-Experimental:
+ type: "boolean"
+ description: "If the server is running with experimental mode enabled"
+ Cache-Control:
+ type: "string"
+ default: "no-cache, no-store, must-revalidate"
+ Pragma:
+ type: "string"
+ default: "no-cache"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ headers:
+ Cache-Control:
+ type: "string"
+ default: "no-cache, no-store, must-revalidate"
+ Pragma:
+ type: "string"
+ default: "no-cache"
+ tags: ["System"]
+ head:
+ summary: "Ping"
+ description: "This is a dummy endpoint you can use to test if the server is accessible."
+ operationId: "SystemPingHead"
+ produces: ["text/plain"]
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "string"
+ example: "(empty)"
+ headers:
+ API-Version:
+ type: "string"
+ description: "Max API Version the server supports"
+ BuildKit-Version:
+ type: "string"
+ description: "Default version of docker image builder"
+ Docker-Experimental:
+ type: "boolean"
+ description: "If the server is running with experimental mode enabled"
+ Cache-Control:
+ type: "string"
+ default: "no-cache, no-store, must-revalidate"
+ Pragma:
+ type: "string"
+ default: "no-cache"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["System"]
+ /commit:
+ post:
+ summary: "Create a new image from a container"
+ operationId: "ImageCommit"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ responses:
+ 201:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/IdResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "containerConfig"
+ in: "body"
+ description: "The container configuration"
+ schema:
+ $ref: "#/definitions/ContainerConfig"
+ - name: "container"
+ in: "query"
+ description: "The ID or name of the container to commit"
+ type: "string"
+ - name: "repo"
+ in: "query"
+ description: "Repository name for the created image"
+ type: "string"
+ - name: "tag"
+ in: "query"
+ description: "Tag name for the create image"
+ type: "string"
+ - name: "comment"
+ in: "query"
+ description: "Commit message"
+ type: "string"
+ - name: "author"
+ in: "query"
+ description: "Author of the image (e.g., `John Hannibal Smith <hannibal@a-team.com>`)"
+ type: "string"
+ - name: "pause"
+ in: "query"
+ description: "Whether to pause the container before committing"
+ type: "boolean"
+ default: true
+ - name: "changes"
+ in: "query"
+ description: "`Dockerfile` instructions to apply while committing"
+ type: "string"
+ tags: ["Image"]
+ /events:
+ get:
+ summary: "Monitor events"
+ description: |
+ Stream real-time events from the server.
+
+ Various objects within Docker report events when something happens to them.
+
+ Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, and `update`
+
+ Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, and `untag`
+
+ Volumes report these events: `create`, `mount`, `unmount`, and `destroy`
+
+ Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, and `remove`
+
+ The Docker daemon reports these events: `reload`
+
+ Services report these events: `create`, `update`, and `remove`
+
+ Nodes report these events: `create`, `update`, and `remove`
+
+ Secrets report these events: `create`, `update`, and `remove`
+
+ Configs report these events: `create`, `update`, and `remove`
+
+ operationId: "SystemEvents"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "object"
+ title: "SystemEventsResponse"
+ properties:
+ Type:
+ description: "The type of object emitting the event"
+ type: "string"
+ Action:
+ description: "The type of event"
+ type: "string"
+ Actor:
+ type: "object"
+ properties:
+ ID:
+ description: "The ID of the object emitting the event"
+ type: "string"
+ Attributes:
+ description: "Various key/value attributes of the object, depending on its type"
+ type: "object"
+ additionalProperties:
+ type: "string"
+ time:
+ description: "Timestamp of event"
+ type: "integer"
+ timeNano:
+ description: "Timestamp of event, with nanosecond accuracy"
+ type: "integer"
+ format: "int64"
+ examples:
+ application/json:
+ Type: "container"
+ Action: "create"
+ Actor:
+ ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743"
+ Attributes:
+ com.example.some-label: "some-label-value"
+ image: "alpine"
+ name: "my-container"
+ time: 1461943101
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "since"
+ in: "query"
+ description: "Show events created since this timestamp then stream new events."
+ type: "string"
+ - name: "until"
+ in: "query"
+ description: "Show events created until this timestamp then stop streaming."
+ type: "string"
+ - name: "filters"
+ in: "query"
+ description: |
+ A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters:
+
+ - `config=<string>` config name or ID
+ - `container=<string>` container name or ID
+ - `daemon=<string>` daemon name or ID
+ - `event=<string>` event type
+ - `image=<string>` image name or ID
+ - `label=<string>` image or container label
+ - `network=<string>` network name or ID
+ - `node=<string>` node ID
+ - `plugin`=<string> plugin name or ID
+ - `scope`=<string> local or swarm
+ - `secret=<string>` secret name or ID
+ - `service=<string>` service name or ID
+ - `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config`
+ - `volume=<string>` volume name
+ type: "string"
+ tags: ["System"]
+ /system/df:
+ get:
+ summary: "Get data usage information"
+ operationId: "SystemDataUsage"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "object"
+ title: "SystemDataUsageResponse"
+ properties:
+ LayersSize:
+ type: "integer"
+ format: "int64"
+ Images:
+ type: "array"
+ items:
+ $ref: "#/definitions/ImageSummary"
+ Containers:
+ type: "array"
+ items:
+ $ref: "#/definitions/ContainerSummary"
+ Volumes:
+ type: "array"
+ items:
+ $ref: "#/definitions/Volume"
+ BuildCache:
+ type: "array"
+ items:
+ $ref: "#/definitions/BuildCache"
+ example:
+ LayersSize: 1092588
+ Images:
+ -
+ Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749"
+ ParentId: ""
+ RepoTags:
+ - "busybox:latest"
+ RepoDigests:
+ - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6"
+ Created: 1466724217
+ Size: 1092588
+ SharedSize: 0
+ VirtualSize: 1092588
+ Labels: {}
+ Containers: 1
+ Containers:
+ -
+ Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148"
+ Names:
+ - "/top"
+ Image: "busybox"
+ ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749"
+ Command: "top"
+ Created: 1472592424
+ Ports: []
+ SizeRootFs: 1092588
+ Labels: {}
+ State: "exited"
+ Status: "Exited (0) 56 minutes ago"
+ HostConfig:
+ NetworkMode: "default"
+ NetworkSettings:
+ Networks:
+ bridge:
+ IPAMConfig: null
+ Links: null
+ Aliases: null
+ NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92"
+ EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a"
+ Gateway: "172.18.0.1"
+ IPAddress: "172.18.0.2"
+ IPPrefixLen: 16
+ IPv6Gateway: ""
+ GlobalIPv6Address: ""
+ GlobalIPv6PrefixLen: 0
+ MacAddress: "02:42:ac:12:00:02"
+ Mounts: []
+ Volumes:
+ -
+ Name: "my-volume"
+ Driver: "local"
+ Mountpoint: "/var/lib/docker/volumes/my-volume/_data"
+ Labels: null
+ Scope: "local"
+ Options: null
+ UsageData:
+ Size: 10920104
+ RefCount: 2
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["System"]
+ /images/{name}/get:
+ get:
+ summary: "Export an image"
+ description: |
+ Get a tarball containing all images and metadata for a repository.
+
+ If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced.
+
+ ### Image tarball format
+
+ An image tarball contains one directory per image layer (named using its long ID), each containing these files:
+
+ - `VERSION`: currently `1.0` - the file format version
+ - `json`: detailed layer information, similar to `docker inspect layer_id`
+ - `layer.tar`: A tarfile containing the filesystem changes in this layer
+
+ The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions.
+
+ If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs.
+
+ ```json
+ {
+ "hello-world": {
+ "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"
+ }
+ }
+ ```
+ operationId: "ImageGet"
+ produces:
+ - "application/x-tar"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "string"
+ format: "binary"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "Image name or ID"
+ type: "string"
+ required: true
+ tags: ["Image"]
+ /images/get:
+ get:
+ summary: "Export several images"
+ description: |
+ Get a tarball containing all images and metadata for several image repositories.
+
+ For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID.
+
+ For details on the format, see [the export image endpoint](#operation/ImageGet).
+ operationId: "ImageGetAll"
+ produces:
+ - "application/x-tar"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "string"
+ format: "binary"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "names"
+ in: "query"
+ description: "Image names to filter by"
+ type: "array"
+ items:
+ type: "string"
+ tags: ["Image"]
+ /images/load:
+ post:
+ summary: "Import images"
+ description: |
+ Load a set of images and tags into a repository.
+
+ For details on the format, see [the export image endpoint](#operation/ImageGet).
+ operationId: "ImageLoad"
+ consumes:
+ - "application/x-tar"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "imagesTarball"
+ in: "body"
+ description: "Tar archive containing images"
+ schema:
+ type: "string"
+ format: "binary"
+ - name: "quiet"
+ in: "query"
+ description: "Suppress progress details during load."
+ type: "boolean"
+ default: false
+ tags: ["Image"]
+ /containers/{id}/exec:
+ post:
+ summary: "Create an exec instance"
+ description: "Run a command inside a running container."
+ operationId: "ContainerExec"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ responses:
+ 201:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/IdResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 409:
+ description: "container is paused"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "execConfig"
+ in: "body"
+ description: "Exec configuration"
+ schema:
+ type: "object"
+ properties:
+ AttachStdin:
+ type: "boolean"
+ description: "Attach to `stdin` of the exec command."
+ AttachStdout:
+ type: "boolean"
+ description: "Attach to `stdout` of the exec command."
+ AttachStderr:
+ type: "boolean"
+ description: "Attach to `stderr` of the exec command."
+ DetachKeys:
+ type: "string"
+ description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`."
+ Tty:
+ type: "boolean"
+ description: "Allocate a pseudo-TTY."
+ Env:
+ description: "A list of environment variables in the form `[\"VAR=value\", ...]`."
+ type: "array"
+ items:
+ type: "string"
+ Cmd:
+ type: "array"
+ description: "Command to run, as a string or array of strings."
+ items:
+ type: "string"
+ Privileged:
+ type: "boolean"
+ description: "Runs the exec process with extended privileges."
+ default: false
+ User:
+ type: "string"
+ description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`."
+ WorkingDir:
+ type: "string"
+ description: "The working directory for the exec process inside the container."
+ example:
+ AttachStdin: false
+ AttachStdout: true
+ AttachStderr: true
+ DetachKeys: "ctrl-p,ctrl-q"
+ Tty: false
+ Cmd:
+ - "date"
+ Env:
+ - "FOO=bar"
+ - "BAZ=quux"
+ required: true
+ - name: "id"
+ in: "path"
+ description: "ID or name of container"
+ type: "string"
+ required: true
+ tags: ["Exec"]
+ /exec/{id}/start:
+ post:
+ summary: "Start an exec instance"
+ description: "Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command."
+ operationId: "ExecStart"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/vnd.docker.raw-stream"
+ responses:
+ 200:
+ description: "No error"
+ 404:
+ description: "No such exec instance"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 409:
+ description: "Container is stopped or paused"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "execStartConfig"
+ in: "body"
+ schema:
+ type: "object"
+ properties:
+ Detach:
+ type: "boolean"
+ description: "Detach from the command."
+ Tty:
+ type: "boolean"
+ description: "Allocate a pseudo-TTY."
+ example:
+ Detach: false
+ Tty: false
+ - name: "id"
+ in: "path"
+ description: "Exec instance ID"
+ required: true
+ type: "string"
+ tags: ["Exec"]
+ /exec/{id}/resize:
+ post:
+ summary: "Resize an exec instance"
+ description: "Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance."
+ operationId: "ExecResize"
+ responses:
+ 201:
+ description: "No error"
+ 404:
+ description: "No such exec instance"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "Exec instance ID"
+ required: true
+ type: "string"
+ - name: "h"
+ in: "query"
+ description: "Height of the TTY session in characters"
+ type: "integer"
+ - name: "w"
+ in: "query"
+ description: "Width of the TTY session in characters"
+ type: "integer"
+ tags: ["Exec"]
+ /exec/{id}/json:
+ get:
+ summary: "Inspect an exec instance"
+ description: "Return low-level information about an exec instance."
+ operationId: "ExecInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "object"
+ title: "ExecInspectResponse"
+ properties:
+ CanRemove:
+ type: "boolean"
+ DetachKeys:
+ type: "string"
+ ID:
+ type: "string"
+ Running:
+ type: "boolean"
+ ExitCode:
+ type: "integer"
+ ProcessConfig:
+ $ref: "#/definitions/ProcessConfig"
+ OpenStdin:
+ type: "boolean"
+ OpenStderr:
+ type: "boolean"
+ OpenStdout:
+ type: "boolean"
+ ContainerID:
+ type: "string"
+ Pid:
+ type: "integer"
+ description: "The system process ID for the exec process."
+ examples:
+ application/json:
+ CanRemove: false
+ ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126"
+ DetachKeys: ""
+ ExitCode: 2
+ ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b"
+ OpenStderr: true
+ OpenStdin: true
+ OpenStdout: true
+ ProcessConfig:
+ arguments:
+ - "-c"
+ - "exit 2"
+ entrypoint: "sh"
+ privileged: false
+ tty: true
+ user: "1000"
+ Running: false
+ Pid: 42000
+ 404:
+ description: "No such exec instance"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "Exec instance ID"
+ required: true
+ type: "string"
+ tags: ["Exec"]
+
+ /volumes:
+ get:
+ summary: "List volumes"
+ operationId: "VolumeList"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "Summary volume data that matches the query"
+ schema:
+ type: "object"
+ title: "VolumeListResponse"
+ description: "Volume list response"
+ required: [Volumes, Warnings]
+ properties:
+ Volumes:
+ type: "array"
+ x-nullable: false
+ description: "List of volumes"
+ items:
+ $ref: "#/definitions/Volume"
+ Warnings:
+ type: "array"
+ x-nullable: false
+ description: "Warnings that occurred when fetching the list of volumes"
+ items:
+ type: "string"
+
+ examples:
+ application/json:
+ Volumes:
+ - CreatedAt: "2017-07-19T12:00:26Z"
+ Name: "tardis"
+ Driver: "local"
+ Mountpoint: "/var/lib/docker/volumes/tardis"
+ Labels:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ Scope: "local"
+ Options:
+ device: "tmpfs"
+ o: "size=100m,uid=1000"
+ type: "tmpfs"
+ Warnings: []
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ JSON encoded value of the filters (a `map[string][]string`) to
+ process on the volumes list. Available filters:
+
+ - `dangling=<boolean>` When set to `true` (or `1`), returns all
+ volumes that are not in use by a container. When set to `false`
+ (or `0`), only volumes that are in use by one or more
+ containers are returned.
+ - `driver=<volume-driver-name>` Matches volumes based on their driver.
+ - `label=<key>` or `label=<key>:<value>` Matches volumes based on
+ the presence of a `label` alone or a `label` and a value.
+ - `name=<volume-name>` Matches all or part of a volume name.
+ type: "string"
+ format: "json"
+ tags: ["Volume"]
+
+ /volumes/create:
+ post:
+ summary: "Create a volume"
+ operationId: "VolumeCreate"
+ consumes: ["application/json"]
+ produces: ["application/json"]
+ responses:
+ 201:
+ description: "The volume was created successfully"
+ schema:
+ $ref: "#/definitions/Volume"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "volumeConfig"
+ in: "body"
+ required: true
+ description: "Volume configuration"
+ schema:
+ type: "object"
+ description: "Volume configuration"
+ title: "VolumeConfig"
+ properties:
+ Name:
+ description: "The new volume's name. If not specified, Docker generates a name."
+ type: "string"
+ x-nullable: false
+ Driver:
+ description: "Name of the volume driver to use."
+ type: "string"
+ default: "local"
+ x-nullable: false
+ DriverOpts:
+ description: "A mapping of driver options and values. These options are passed directly to the driver and are driver specific."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ Name: "tardis"
+ Labels:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ Driver: "custom"
+ tags: ["Volume"]
+
+ /volumes/{name}:
+ get:
+ summary: "Inspect a volume"
+ operationId: "VolumeInspect"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ $ref: "#/definitions/Volume"
+ 404:
+ description: "No such volume"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ required: true
+ description: "Volume name or ID"
+ type: "string"
+ tags: ["Volume"]
+
+ delete:
+ summary: "Remove a volume"
+ description: "Instruct the driver to remove the volume."
+ operationId: "VolumeDelete"
+ responses:
+ 204:
+ description: "The volume was removed"
+ 404:
+ description: "No such volume or volume driver"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 409:
+ description: "Volume is in use and cannot be removed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ required: true
+ description: "Volume name or ID"
+ type: "string"
+ - name: "force"
+ in: "query"
+ description: "Force the removal of the volume"
+ type: "boolean"
+ default: false
+ tags: ["Volume"]
+ /volumes/prune:
+ post:
+ summary: "Delete unused volumes"
+ produces:
+ - "application/json"
+ operationId: "VolumePrune"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
+
+ Available filters:
+ - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels.
+ type: "string"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "object"
+ title: "VolumePruneResponse"
+ properties:
+ VolumesDeleted:
+ description: "Volumes that were deleted"
+ type: "array"
+ items:
+ type: "string"
+ SpaceReclaimed:
+ description: "Disk space reclaimed in bytes"
+ type: "integer"
+ format: "int64"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Volume"]
+ /networks:
+ get:
+ summary: "List networks"
+ description: |
+ Returns a list of networks. For details on the format, see [the network inspect endpoint](#operation/NetworkInspect).
+
+ Note that it uses a different, smaller representation of a network than inspecting a single network. For example,
+ the list of containers attached to the network is not propagated in API versions 1.28 and up.
+ operationId: "NetworkList"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Network"
+ examples:
+ application/json:
+ - Name: "bridge"
+ Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566"
+ Created: "2016-10-19T06:21:00.416543526Z"
+ Scope: "local"
+ Driver: "bridge"
+ EnableIPv6: false
+ Internal: false
+ Attachable: false
+ Ingress: false
+ IPAM:
+ Driver: "default"
+ Config:
+ -
+ Subnet: "172.17.0.0/16"
+ Options:
+ com.docker.network.bridge.default_bridge: "true"
+ com.docker.network.bridge.enable_icc: "true"
+ com.docker.network.bridge.enable_ip_masquerade: "true"
+ com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
+ com.docker.network.bridge.name: "docker0"
+ com.docker.network.driver.mtu: "1500"
+ - Name: "none"
+ Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794"
+ Created: "0001-01-01T00:00:00Z"
+ Scope: "local"
+ Driver: "null"
+ EnableIPv6: false
+ Internal: false
+ Attachable: false
+ Ingress: false
+ IPAM:
+ Driver: "default"
+ Config: []
+ Containers: {}
+ Options: {}
+ - Name: "host"
+ Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e"
+ Created: "0001-01-01T00:00:00Z"
+ Scope: "local"
+ Driver: "host"
+ EnableIPv6: false
+ Internal: false
+ Attachable: false
+ Ingress: false
+ IPAM:
+ Driver: "default"
+ Config: []
+ Containers: {}
+ Options: {}
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters:
+
+ - `dangling=<boolean>` When set to `true` (or `1`), returns all
+ networks that are not in use by a container. When set to `false`
+ (or `0`), only networks that are in use by one or more
+ containers are returned.
+ - `driver=<driver-name>` Matches a network's driver.
+ - `id=<network-id>` Matches all or part of a network ID.
+ - `label=<key>` or `label=<key>=<value>` of a network label.
+ - `name=<network-name>` Matches all or part of a network name.
+ - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`).
+ - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks.
+ type: "string"
+ tags: ["Network"]
+
+ /networks/{id}:
+ get:
+ summary: "Inspect a network"
+ operationId: "NetworkInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ $ref: "#/definitions/Network"
+ 404:
+ description: "Network not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "Network ID or name"
+ required: true
+ type: "string"
+ - name: "verbose"
+ in: "query"
+ description: "Detailed inspect output for troubleshooting"
+ type: "boolean"
+ default: false
+ - name: "scope"
+ in: "query"
+ description: "Filter the network by scope (swarm, global, or local)"
+ type: "string"
+ tags: ["Network"]
+
+ delete:
+ summary: "Remove a network"
+ operationId: "NetworkDelete"
+ responses:
+ 204:
+ description: "No error"
+ 403:
+ description: "operation not supported for pre-defined networks"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such network"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "Network ID or name"
+ required: true
+ type: "string"
+ tags: ["Network"]
+
+ /networks/create:
+ post:
+ summary: "Create a network"
+ operationId: "NetworkCreate"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ responses:
+ 201:
+ description: "No error"
+ schema:
+ type: "object"
+ title: "NetworkCreateResponse"
+ properties:
+ Id:
+ description: "The ID of the created network."
+ type: "string"
+ Warning:
+ type: "string"
+ example:
+ Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30"
+ Warning: ""
+ 403:
+ description: "operation not supported for pre-defined networks"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "plugin not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "networkConfig"
+ in: "body"
+ description: "Network configuration"
+ required: true
+ schema:
+ type: "object"
+ required: ["Name"]
+ properties:
+ Name:
+ description: "The network's name."
+ type: "string"
+ CheckDuplicate:
+ description: "Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions."
+ type: "boolean"
+ Driver:
+ description: "Name of the network driver plugin to use."
+ type: "string"
+ default: "bridge"
+ Internal:
+ description: "Restrict external access to the network."
+ type: "boolean"
+ Attachable:
+ description: "Globally scoped network is manually attachable by regular containers from workers in swarm mode."
+ type: "boolean"
+ Ingress:
+ description: "Ingress network is the network which provides the routing-mesh in swarm mode."
+ type: "boolean"
+ IPAM:
+ description: "Optional custom IP scheme for the network."
+ $ref: "#/definitions/IPAM"
+ EnableIPv6:
+ description: "Enable IPv6 on the network."
+ type: "boolean"
+ Options:
+ description: "Network specific options to be used by the drivers."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ Name: "isolated_nw"
+ CheckDuplicate: false
+ Driver: "bridge"
+ EnableIPv6: true
+ IPAM:
+ Driver: "default"
+ Config:
+ - Subnet: "172.20.0.0/16"
+ IPRange: "172.20.10.0/24"
+ Gateway: "172.20.10.11"
+ - Subnet: "2001:db8:abcd::/64"
+ Gateway: "2001:db8:abcd::1011"
+ Options:
+ foo: "bar"
+ Internal: true
+ Attachable: false
+ Ingress: false
+ Options:
+ com.docker.network.bridge.default_bridge: "true"
+ com.docker.network.bridge.enable_icc: "true"
+ com.docker.network.bridge.enable_ip_masquerade: "true"
+ com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
+ com.docker.network.bridge.name: "docker0"
+ com.docker.network.driver.mtu: "1500"
+ Labels:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ tags: ["Network"]
+
+ /networks/{id}/connect:
+ post:
+ summary: "Connect a container to a network"
+ operationId: "NetworkConnect"
+ consumes:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ 403:
+ description: "Operation not supported for swarm scoped networks"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "Network or container not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "Network ID or name"
+ required: true
+ type: "string"
+ - name: "container"
+ in: "body"
+ required: true
+ schema:
+ type: "object"
+ properties:
+ Container:
+ type: "string"
+ description: "The ID or name of the container to connect to the network."
+ EndpointConfig:
+ $ref: "#/definitions/EndpointSettings"
+ example:
+ Container: "3613f73ba0e4"
+ EndpointConfig:
+ IPAMConfig:
+ IPv4Address: "172.24.56.89"
+ IPv6Address: "2001:db8::5689"
+ tags: ["Network"]
+
+ /networks/{id}/disconnect:
+ post:
+ summary: "Disconnect a container from a network"
+ operationId: "NetworkDisconnect"
+ consumes:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ 403:
+ description: "Operation not supported for swarm scoped networks"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "Network or container not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "Network ID or name"
+ required: true
+ type: "string"
+ - name: "container"
+ in: "body"
+ required: true
+ schema:
+ type: "object"
+ properties:
+ Container:
+ type: "string"
+ description: "The ID or name of the container to disconnect from the network."
+ Force:
+ type: "boolean"
+ description: "Force the container to disconnect from the network."
+ tags: ["Network"]
+ /networks/prune:
+ post:
+ summary: "Delete unused networks"
+ produces:
+ - "application/json"
+ operationId: "NetworkPrune"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
+
+ Available filters:
+ - `until=<timestamp>` Prune networks created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.
+ - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune networks with (or without, in case `label!=...` is used) the specified labels.
+ type: "string"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "object"
+ title: "NetworkPruneResponse"
+ properties:
+ NetworksDeleted:
+ description: "Networks that were deleted"
+ type: "array"
+ items:
+ type: "string"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Network"]
+ /plugins:
+ get:
+ summary: "List plugins"
+ operationId: "PluginList"
+ description: "Returns information about installed plugins."
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Plugin"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ type: "string"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters:
+
+ - `capability=<capability name>`
+ - `enable=<true>|<false>`
+ tags: ["Plugin"]
+
+ /plugins/privileges:
+ get:
+ summary: "Get plugin privileges"
+ operationId: "GetPluginPrivileges"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "array"
+ items:
+ description: "Describes a permission the user has to accept upon installing the plugin."
+ type: "object"
+ title: "PluginPrivilegeItem"
+ properties:
+ Name:
+ type: "string"
+ Description:
+ type: "string"
+ Value:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - Name: "network"
+ Description: ""
+ Value:
+ - "host"
+ - Name: "mount"
+ Description: ""
+ Value:
+ - "/data"
+ - Name: "device"
+ Description: ""
+ Value:
+ - "/dev/cpu_dma_latency"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "remote"
+ in: "query"
+ description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ required: true
+ type: "string"
+ tags:
+ - "Plugin"
+
+ /plugins/pull:
+ post:
+ summary: "Install a plugin"
+ operationId: "PluginPull"
+ description: |
+ Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable).
+ produces:
+ - "application/json"
+ responses:
+ 204:
+ description: "no error"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "remote"
+ in: "query"
+ description: |
+ Remote reference for plugin to install.
+
+ The `:latest` tag is optional, and is used as the default if omitted.
+ required: true
+ type: "string"
+ - name: "name"
+ in: "query"
+ description: |
+ Local name for the pulled plugin.
+
+ The `:latest` tag is optional, and is used as the default if omitted.
+ required: false
+ type: "string"
+ - name: "X-Registry-Auth"
+ in: "header"
+ description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)"
+ type: "string"
+ - name: "body"
+ in: "body"
+ schema:
+ type: "array"
+ items:
+ description: "Describes a permission accepted by the user upon installing the plugin."
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ Description:
+ type: "string"
+ Value:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - Name: "network"
+ Description: ""
+ Value:
+ - "host"
+ - Name: "mount"
+ Description: ""
+ Value:
+ - "/data"
+ - Name: "device"
+ Description: ""
+ Value:
+ - "/dev/cpu_dma_latency"
+ tags: ["Plugin"]
+ /plugins/{name}/json:
+ get:
+ summary: "Inspect a plugin"
+ operationId: "PluginInspect"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Plugin"
+ 404:
+ description: "plugin is not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ required: true
+ type: "string"
+ tags: ["Plugin"]
+ /plugins/{name}:
+ delete:
+ summary: "Remove a plugin"
+ operationId: "PluginDelete"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Plugin"
+ 404:
+ description: "plugin is not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ required: true
+ type: "string"
+ - name: "force"
+ in: "query"
+ description: "Disable the plugin before removing. This may result in issues if the plugin is in use by a container."
+ type: "boolean"
+ default: false
+ tags: ["Plugin"]
+ /plugins/{name}/enable:
+ post:
+ summary: "Enable a plugin"
+ operationId: "PluginEnable"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "plugin is not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ required: true
+ type: "string"
+ - name: "timeout"
+ in: "query"
+ description: "Set the HTTP client timeout (in seconds)"
+ type: "integer"
+ default: 0
+ tags: ["Plugin"]
+ /plugins/{name}/disable:
+ post:
+ summary: "Disable a plugin"
+ operationId: "PluginDisable"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "plugin is not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ required: true
+ type: "string"
+ tags: ["Plugin"]
+ /plugins/{name}/upgrade:
+ post:
+ summary: "Upgrade a plugin"
+ operationId: "PluginUpgrade"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "plugin not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ required: true
+ type: "string"
+ - name: "remote"
+ in: "query"
+ description: |
+ Remote reference to upgrade to.
+
+ The `:latest` tag is optional, and is used as the default if omitted.
+ required: true
+ type: "string"
+ - name: "X-Registry-Auth"
+ in: "header"
+ description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)"
+ type: "string"
+ - name: "body"
+ in: "body"
+ schema:
+ type: "array"
+ items:
+ description: "Describes a permission accepted by the user upon installing the plugin."
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ Description:
+ type: "string"
+ Value:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - Name: "network"
+ Description: ""
+ Value:
+ - "host"
+ - Name: "mount"
+ Description: ""
+ Value:
+ - "/data"
+ - Name: "device"
+ Description: ""
+ Value:
+ - "/dev/cpu_dma_latency"
+ tags: ["Plugin"]
+ /plugins/create:
+ post:
+ summary: "Create a plugin"
+ operationId: "PluginCreate"
+ consumes:
+ - "application/x-tar"
+ responses:
+ 204:
+ description: "no error"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "query"
+ description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ required: true
+ type: "string"
+ - name: "tarContext"
+ in: "body"
+ description: "Path to tar containing plugin rootfs and manifest"
+ schema:
+ type: "string"
+ format: "binary"
+ tags: ["Plugin"]
+ /plugins/{name}/push:
+ post:
+ summary: "Push a plugin"
+ operationId: "PluginPush"
+ description: |
+ Push a plugin to the registry.
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ required: true
+ type: "string"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "plugin not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Plugin"]
+ /plugins/{name}/set:
+ post:
+ summary: "Configure a plugin"
+ operationId: "PluginSet"
+ consumes:
+ - "application/json"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ required: true
+ type: "string"
+ - name: "body"
+ in: "body"
+ schema:
+ type: "array"
+ items:
+ type: "string"
+ example: ["DEBUG=1"]
+ responses:
+ 204:
+ description: "No error"
+ 404:
+ description: "Plugin not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Plugin"]
+ /nodes:
+ get:
+ summary: "List nodes"
+ operationId: "NodeList"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Node"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ Filters to process on the nodes list, encoded as JSON (a `map[string][]string`).
+
+ Available filters:
+ - `id=<node id>`
+ - `label=<engine label>`
+ - `membership=`(`accepted`|`pending`)`
+ - `name=<node name>`
+ - `node.label=<node label>`
+ - `role=`(`manager`|`worker`)`
+ type: "string"
+ tags: ["Node"]
+ /nodes/{id}:
+ get:
+ summary: "Inspect a node"
+ operationId: "NodeInspect"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Node"
+ 404:
+ description: "no such node"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "The ID or name of the node"
+ type: "string"
+ required: true
+ tags: ["Node"]
+ delete:
+ summary: "Delete a node"
+ operationId: "NodeDelete"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "no such node"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "The ID or name of the node"
+ type: "string"
+ required: true
+ - name: "force"
+ in: "query"
+ description: "Force remove a node from the swarm"
+ default: false
+ type: "boolean"
+ tags: ["Node"]
+ /nodes/{id}/update:
+ post:
+ summary: "Update a node"
+ operationId: "NodeUpdate"
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such node"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "The ID of the node"
+ type: "string"
+ required: true
+ - name: "body"
+ in: "body"
+ schema:
+ $ref: "#/definitions/NodeSpec"
+ - name: "version"
+ in: "query"
+ description: "The version number of the node object being updated. This is required to avoid conflicting writes."
+ type: "integer"
+ format: "int64"
+ required: true
+ tags: ["Node"]
+ /swarm:
+ get:
+ summary: "Inspect swarm"
+ operationId: "SwarmInspect"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Swarm"
+ 404:
+ description: "no such swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Swarm"]
+ /swarm/init:
+ post:
+ summary: "Initialize a new swarm"
+ operationId: "SwarmInit"
+ produces:
+ - "application/json"
+ - "text/plain"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ description: "The node ID"
+ type: "string"
+ example: "7v2t30z9blmxuhnyo6s4cpenp"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is already part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "body"
+ in: "body"
+ required: true
+ schema:
+ type: "object"
+ properties:
+ ListenAddr:
+ description: "Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used."
+ type: "string"
+ AdvertiseAddr:
+ description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible."
+ type: "string"
+ DataPathAddr:
+ description: |
+ Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`,
+ or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr`
+ is used.
+
+ The `DataPathAddr` specifies the address that global scope network drivers will publish towards other
+ nodes in order to reach the containers running on this node. Using this parameter it is possible to
+ separate the container data traffic from the management traffic of the cluster.
+ type: "string"
+ DataPathPort:
+ description: |
+ DataPathPort specifies the data path port number for data traffic.
+ Acceptable port range is 1024 to 49151.
+ if no port is set or is set to 0, default port 4789 will be used.
+ type: "integer"
+ format: "uint32"
+ DefaultAddrPool:
+ description: |
+ Default Address Pool specifies default subnet pools for global scope networks.
+ type: "array"
+ items:
+ type: "string"
+ example: ["10.10.0.0/16", "20.20.0.0/16"]
+ ForceNewCluster:
+ description: "Force creation of a new swarm."
+ type: "boolean"
+ SubnetSize:
+ description: |
+ SubnetSize specifies the subnet size of the networks created from the default subnet pool
+ type: "integer"
+ format: "uint32"
+ Spec:
+ $ref: "#/definitions/SwarmSpec"
+ example:
+ ListenAddr: "0.0.0.0:2377"
+ AdvertiseAddr: "192.168.1.1:2377"
+ DataPathPort: 4789
+ DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"]
+ SubnetSize: 24
+ ForceNewCluster: false
+ Spec:
+ Orchestration: {}
+ Raft: {}
+ Dispatcher: {}
+ CAConfig: {}
+ EncryptionConfig:
+ AutoLockManagers: false
+ tags: ["Swarm"]
+ /swarm/join:
+ post:
+ summary: "Join an existing swarm"
+ operationId: "SwarmJoin"
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is already part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "body"
+ in: "body"
+ required: true
+ schema:
+ type: "object"
+ properties:
+ ListenAddr:
+ description: "Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP)."
+ type: "string"
+ AdvertiseAddr:
+ description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible."
+ type: "string"
+ DataPathAddr:
+ description: |
+ Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`,
+ or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr`
+ is used.
+
+ The `DataPathAddr` specifies the address that global scope network drivers will publish towards other
+ nodes in order to reach the containers running on this node. Using this parameter it is possible to
+ separate the container data traffic from the management traffic of the cluster.
+
+ type: "string"
+ RemoteAddrs:
+ description: "Addresses of manager nodes already participating in the swarm."
+ type: "string"
+ JoinToken:
+ description: "Secret token for joining this swarm."
+ type: "string"
+ example:
+ ListenAddr: "0.0.0.0:2377"
+ AdvertiseAddr: "192.168.1.1:2377"
+ RemoteAddrs:
+ - "node1:2377"
+ JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
+ tags: ["Swarm"]
+ /swarm/leave:
+ post:
+ summary: "Leave a swarm"
+ operationId: "SwarmLeave"
+ responses:
+ 200:
+ description: "no error"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "force"
+ description: "Force leave swarm, even if this is the last manager or that it will break the cluster."
+ in: "query"
+ type: "boolean"
+ default: false
+ tags: ["Swarm"]
+ /swarm/update:
+ post:
+ summary: "Update a swarm"
+ operationId: "SwarmUpdate"
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "body"
+ in: "body"
+ required: true
+ schema:
+ $ref: "#/definitions/SwarmSpec"
+ - name: "version"
+ in: "query"
+ description: "The version number of the swarm object being updated. This is required to avoid conflicting writes."
+ type: "integer"
+ format: "int64"
+ required: true
+ - name: "rotateWorkerToken"
+ in: "query"
+ description: "Rotate the worker join token."
+ type: "boolean"
+ default: false
+ - name: "rotateManagerToken"
+ in: "query"
+ description: "Rotate the manager join token."
+ type: "boolean"
+ default: false
+ - name: "rotateManagerUnlockKey"
+ in: "query"
+ description: "Rotate the manager unlock key."
+ type: "boolean"
+ default: false
+ tags: ["Swarm"]
+ /swarm/unlockkey:
+ get:
+ summary: "Get the unlock key"
+ operationId: "SwarmUnlockkey"
+ consumes:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "object"
+ title: "UnlockKeyResponse"
+ properties:
+ UnlockKey:
+ description: "The swarm's unlock key."
+ type: "string"
+ example:
+ UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Swarm"]
+ /swarm/unlock:
+ post:
+ summary: "Unlock a locked manager"
+ operationId: "SwarmUnlock"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "body"
+ in: "body"
+ required: true
+ schema:
+ type: "object"
+ properties:
+ UnlockKey:
+ description: "The swarm's unlock key."
+ type: "string"
+ example:
+ UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8"
+ responses:
+ 200:
+ description: "no error"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Swarm"]
+ /services:
+ get:
+ summary: "List services"
+ operationId: "ServiceList"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Service"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ type: "string"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters:
+
+ - `id=<service id>`
+ - `label=<service label>`
+ - `mode=["replicated"|"global"]`
+ - `name=<service name>`
+ tags: ["Service"]
+ /services/create:
+ post:
+ summary: "Create a service"
+ operationId: "ServiceCreate"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ responses:
+ 201:
+ description: "no error"
+ schema:
+ type: "object"
+ title: "ServiceCreateResponse"
+ properties:
+ ID:
+ description: "The ID of the created service."
+ type: "string"
+ Warning:
+ description: "Optional warning message"
+ type: "string"
+ example:
+ ID: "ak7w3gjqoa3kuz8xcpnyy0pvl"
+ Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 403:
+ description: "network is not eligible for services"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 409:
+ description: "name conflicts with an existing service"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "body"
+ in: "body"
+ required: true
+ schema:
+ allOf:
+ - $ref: "#/definitions/ServiceSpec"
+ - type: "object"
+ example:
+ Name: "web"
+ TaskTemplate:
+ ContainerSpec:
+ Image: "nginx:alpine"
+ Mounts:
+ -
+ ReadOnly: true
+ Source: "web-data"
+ Target: "/usr/share/nginx/html"
+ Type: "volume"
+ VolumeOptions:
+ DriverConfig: {}
+ Labels:
+ com.example.something: "something-value"
+ Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"]
+ User: "33"
+ DNSConfig:
+ Nameservers: ["8.8.8.8"]
+ Search: ["example.org"]
+ Options: ["timeout:3"]
+ Secrets:
+ -
+ File:
+ Name: "www.example.org.key"
+ UID: "33"
+ GID: "33"
+ Mode: 384
+ SecretID: "fpjqlhnwb19zds35k8wn80lq9"
+ SecretName: "example_org_domain_key"
+ LogDriver:
+ Name: "json-file"
+ Options:
+ max-file: "3"
+ max-size: "10M"
+ Placement: {}
+ Resources:
+ Limits:
+ MemoryBytes: 104857600
+ Reservations: {}
+ RestartPolicy:
+ Condition: "on-failure"
+ Delay: 10000000000
+ MaxAttempts: 10
+ Mode:
+ Replicated:
+ Replicas: 4
+ UpdateConfig:
+ Parallelism: 2
+ Delay: 1000000000
+ FailureAction: "pause"
+ Monitor: 15000000000
+ MaxFailureRatio: 0.15
+ RollbackConfig:
+ Parallelism: 1
+ Delay: 1000000000
+ FailureAction: "pause"
+ Monitor: 15000000000
+ MaxFailureRatio: 0.15
+ EndpointSpec:
+ Ports:
+ -
+ Protocol: "tcp"
+ PublishedPort: 8080
+ TargetPort: 80
+ Labels:
+ foo: "bar"
+ - name: "X-Registry-Auth"
+ in: "header"
+ description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)"
+ type: "string"
+ tags: ["Service"]
+ /services/{id}:
+ get:
+ summary: "Inspect a service"
+ operationId: "ServiceInspect"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Service"
+ 404:
+ description: "no such service"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "ID or name of service."
+ required: true
+ type: "string"
+ - name: "insertDefaults"
+ in: "query"
+ description: "Fill empty fields with default values."
+ type: "boolean"
+ default: false
+ tags: ["Service"]
+ delete:
+ summary: "Delete a service"
+ operationId: "ServiceDelete"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "no such service"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "ID or name of service."
+ required: true
+ type: "string"
+ tags: ["Service"]
+ /services/{id}/update:
+ post:
+ summary: "Update a service"
+ operationId: "ServiceUpdate"
+ consumes: ["application/json"]
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/ServiceUpdateResponse"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such service"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "ID or name of service."
+ required: true
+ type: "string"
+ - name: "body"
+ in: "body"
+ required: true
+ schema:
+ allOf:
+ - $ref: "#/definitions/ServiceSpec"
+ - type: "object"
+ example:
+ Name: "top"
+ TaskTemplate:
+ ContainerSpec:
+ Image: "busybox"
+ Args:
+ - "top"
+ Resources:
+ Limits: {}
+ Reservations: {}
+ RestartPolicy:
+ Condition: "any"
+ MaxAttempts: 0
+ Placement: {}
+ ForceUpdate: 0
+ Mode:
+ Replicated:
+ Replicas: 1
+ UpdateConfig:
+ Parallelism: 2
+ Delay: 1000000000
+ FailureAction: "pause"
+ Monitor: 15000000000
+ MaxFailureRatio: 0.15
+ RollbackConfig:
+ Parallelism: 1
+ Delay: 1000000000
+ FailureAction: "pause"
+ Monitor: 15000000000
+ MaxFailureRatio: 0.15
+ EndpointSpec:
+ Mode: "vip"
+
+ - name: "version"
+ in: "query"
+ description: "The version number of the service object being updated.
+ This is required to avoid conflicting writes.
+ This version number should be the value as currently set on the service *before* the update.
+ You can find the current version by calling `GET /services/{id}`"
+ required: true
+ type: "integer"
+ - name: "registryAuthFrom"
+ in: "query"
+ type: "string"
+ description: "If the X-Registry-Auth header is not specified, this
+ parameter indicates where to find registry authorization credentials. The
+ valid values are `spec` and `previous-spec`."
+ default: "spec"
+ - name: "rollback"
+ in: "query"
+ type: "string"
+ description: "Set to this parameter to `previous` to cause a
+ server-side rollback to the previous service spec. The supplied spec will be
+ ignored in this case."
+ - name: "X-Registry-Auth"
+ in: "header"
+ description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)"
+ type: "string"
+
+ tags: ["Service"]
+ /services/{id}/logs:
+ get:
+ summary: "Get service logs"
+ description: |
+ Get `stdout` and `stderr` logs from a service.
+
+ **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers.
+ operationId: "ServiceLogs"
+ produces:
+ - "application/vnd.docker.raw-stream"
+ - "application/json"
+ responses:
+ 101:
+ description: "logs returned as a stream"
+ schema:
+ type: "string"
+ format: "binary"
+ 200:
+ description: "logs returned as a string in response body"
+ schema:
+ type: "string"
+ 404:
+ description: "no such service"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such service: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the service"
+ type: "string"
+ - name: "details"
+ in: "query"
+ description: "Show service context and extra details provided to logs."
+ type: "boolean"
+ default: false
+ - name: "follow"
+ in: "query"
+ description: |
+ Return the logs as a stream.
+
+ This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach).
+ type: "boolean"
+ default: false
+ - name: "stdout"
+ in: "query"
+ description: "Return logs from `stdout`"
+ type: "boolean"
+ default: false
+ - name: "stderr"
+ in: "query"
+ description: "Return logs from `stderr`"
+ type: "boolean"
+ default: false
+ - name: "since"
+ in: "query"
+ description: "Only return logs since this time, as a UNIX timestamp"
+ type: "integer"
+ default: 0
+ - name: "timestamps"
+ in: "query"
+ description: "Add timestamps to every log line"
+ type: "boolean"
+ default: false
+ - name: "tail"
+ in: "query"
+ description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines."
+ type: "string"
+ default: "all"
+ tags: ["Service"]
+ /tasks:
+ get:
+ summary: "List tasks"
+ operationId: "TaskList"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Task"
+ example:
+ - ID: "0kzzo1i0y4jz6027t0k7aezc7"
+ Version:
+ Index: 71
+ CreatedAt: "2016-06-07T21:07:31.171892745Z"
+ UpdatedAt: "2016-06-07T21:07:31.376370513Z"
+ Spec:
+ ContainerSpec:
+ Image: "redis"
+ Resources:
+ Limits: {}
+ Reservations: {}
+ RestartPolicy:
+ Condition: "any"
+ MaxAttempts: 0
+ Placement: {}
+ ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz"
+ Slot: 1
+ NodeID: "60gvrl6tm78dmak4yl7srz94v"
+ Status:
+ Timestamp: "2016-06-07T21:07:31.290032978Z"
+ State: "running"
+ Message: "started"
+ ContainerStatus:
+ ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035"
+ PID: 677
+ DesiredState: "running"
+ NetworksAttachments:
+ - Network:
+ ID: "4qvuz4ko70xaltuqbt8956gd1"
+ Version:
+ Index: 18
+ CreatedAt: "2016-06-07T20:31:11.912919752Z"
+ UpdatedAt: "2016-06-07T21:07:29.955277358Z"
+ Spec:
+ Name: "ingress"
+ Labels:
+ com.docker.swarm.internal: "true"
+ DriverConfiguration: {}
+ IPAMOptions:
+ Driver: {}
+ Configs:
+ - Subnet: "10.255.0.0/16"
+ Gateway: "10.255.0.1"
+ DriverState:
+ Name: "overlay"
+ Options:
+ com.docker.network.driver.overlay.vxlanid_list: "256"
+ IPAMOptions:
+ Driver:
+ Name: "default"
+ Configs:
+ - Subnet: "10.255.0.0/16"
+ Gateway: "10.255.0.1"
+ Addresses:
+ - "10.255.0.10/16"
+ - ID: "1yljwbmlr8er2waf8orvqpwms"
+ Version:
+ Index: 30
+ CreatedAt: "2016-06-07T21:07:30.019104782Z"
+ UpdatedAt: "2016-06-07T21:07:30.231958098Z"
+ Name: "hopeful_cori"
+ Spec:
+ ContainerSpec:
+ Image: "redis"
+ Resources:
+ Limits: {}
+ Reservations: {}
+ RestartPolicy:
+ Condition: "any"
+ MaxAttempts: 0
+ Placement: {}
+ ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz"
+ Slot: 1
+ NodeID: "60gvrl6tm78dmak4yl7srz94v"
+ Status:
+ Timestamp: "2016-06-07T21:07:30.202183143Z"
+ State: "shutdown"
+ Message: "shutdown"
+ ContainerStatus:
+ ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213"
+ DesiredState: "shutdown"
+ NetworksAttachments:
+ - Network:
+ ID: "4qvuz4ko70xaltuqbt8956gd1"
+ Version:
+ Index: 18
+ CreatedAt: "2016-06-07T20:31:11.912919752Z"
+ UpdatedAt: "2016-06-07T21:07:29.955277358Z"
+ Spec:
+ Name: "ingress"
+ Labels:
+ com.docker.swarm.internal: "true"
+ DriverConfiguration: {}
+ IPAMOptions:
+ Driver: {}
+ Configs:
+ - Subnet: "10.255.0.0/16"
+ Gateway: "10.255.0.1"
+ DriverState:
+ Name: "overlay"
+ Options:
+ com.docker.network.driver.overlay.vxlanid_list: "256"
+ IPAMOptions:
+ Driver:
+ Name: "default"
+ Configs:
+ - Subnet: "10.255.0.0/16"
+ Gateway: "10.255.0.1"
+ Addresses:
+ - "10.255.0.5/16"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ type: "string"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters:
+
+ - `desired-state=(running | shutdown | accepted)`
+ - `id=<task id>`
+ - `label=key` or `label="key=value"`
+ - `name=<task name>`
+ - `node=<node id or name>`
+ - `service=<service name>`
+ tags: ["Task"]
+ /tasks/{id}:
+ get:
+ summary: "Inspect a task"
+ operationId: "TaskInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Task"
+ 404:
+ description: "no such task"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "ID of the task"
+ required: true
+ type: "string"
+ tags: ["Task"]
+ /tasks/{id}/logs:
+ get:
+ summary: "Get task logs"
+ description: |
+ Get `stdout` and `stderr` logs from a task.
+
+ **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers.
+ operationId: "TaskLogs"
+ produces:
+ - "application/vnd.docker.raw-stream"
+ - "application/json"
+ responses:
+ 101:
+ description: "logs returned as a stream"
+ schema:
+ type: "string"
+ format: "binary"
+ 200:
+ description: "logs returned as a string in response body"
+ schema:
+ type: "string"
+ 404:
+ description: "no such task"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such task: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID of the task"
+ type: "string"
+ - name: "details"
+ in: "query"
+ description: "Show task context and extra details provided to logs."
+ type: "boolean"
+ default: false
+ - name: "follow"
+ in: "query"
+ description: |
+ Return the logs as a stream.
+
+ This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach).
+ type: "boolean"
+ default: false
+ - name: "stdout"
+ in: "query"
+ description: "Return logs from `stdout`"
+ type: "boolean"
+ default: false
+ - name: "stderr"
+ in: "query"
+ description: "Return logs from `stderr`"
+ type: "boolean"
+ default: false
+ - name: "since"
+ in: "query"
+ description: "Only return logs since this time, as a UNIX timestamp"
+ type: "integer"
+ default: 0
+ - name: "timestamps"
+ in: "query"
+ description: "Add timestamps to every log line"
+ type: "boolean"
+ default: false
+ - name: "tail"
+ in: "query"
+ description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines."
+ type: "string"
+ default: "all"
+ tags: ["Task"]
+ /secrets:
+ get:
+ summary: "List secrets"
+ operationId: "SecretList"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Secret"
+ example:
+ - ID: "blt1owaxmitz71s9v5zh81zun"
+ Version:
+ Index: 85
+ CreatedAt: "2017-07-20T13:55:28.678958722Z"
+ UpdatedAt: "2017-07-20T13:55:28.678958722Z"
+ Spec:
+ Name: "mysql-passwd"
+ Labels:
+ some.label: "some.value"
+ Driver:
+ Name: "secret-bucket"
+ Options:
+ OptionA: "value for driver option A"
+ OptionB: "value for driver option B"
+ - ID: "ktnbjxoalbkvbvedmg1urrz8h"
+ Version:
+ Index: 11
+ CreatedAt: "2016-11-05T01:20:17.327670065Z"
+ UpdatedAt: "2016-11-05T01:20:17.327670065Z"
+ Spec:
+ Name: "app-dev.crt"
+ Labels:
+ foo: "bar"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ type: "string"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters:
+
+ - `id=<secret id>`
+ - `label=<key> or label=<key>=value`
+ - `name=<secret name>`
+ - `names=<secret name>`
+ tags: ["Secret"]
+ /secrets/create:
+ post:
+ summary: "Create a secret"
+ operationId: "SecretCreate"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ responses:
+ 201:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/IdResponse"
+ 409:
+ description: "name conflicts with an existing object"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "body"
+ in: "body"
+ schema:
+ allOf:
+ - $ref: "#/definitions/SecretSpec"
+ - type: "object"
+ example:
+ Name: "app-key.crt"
+ Labels:
+ foo: "bar"
+ Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg=="
+ Driver:
+ Name: "secret-bucket"
+ Options:
+ OptionA: "value for driver option A"
+ OptionB: "value for driver option B"
+ tags: ["Secret"]
+ /secrets/{id}:
+ get:
+ summary: "Inspect a secret"
+ operationId: "SecretInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Secret"
+ examples:
+ application/json:
+ ID: "ktnbjxoalbkvbvedmg1urrz8h"
+ Version:
+ Index: 11
+ CreatedAt: "2016-11-05T01:20:17.327670065Z"
+ UpdatedAt: "2016-11-05T01:20:17.327670065Z"
+ Spec:
+ Name: "app-dev.crt"
+ Labels:
+ foo: "bar"
+ Driver:
+ Name: "secret-bucket"
+ Options:
+ OptionA: "value for driver option A"
+ OptionB: "value for driver option B"
+
+ 404:
+ description: "secret not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ type: "string"
+ description: "ID of the secret"
+ tags: ["Secret"]
+ delete:
+ summary: "Delete a secret"
+ operationId: "SecretDelete"
+ produces:
+ - "application/json"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "secret not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ type: "string"
+ description: "ID of the secret"
+ tags: ["Secret"]
+ /secrets/{id}/update:
+ post:
+ summary: "Update a Secret"
+ operationId: "SecretUpdate"
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such secret"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "The ID or name of the secret"
+ type: "string"
+ required: true
+ - name: "body"
+ in: "body"
+ schema:
+ $ref: "#/definitions/SecretSpec"
+ description: "The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values."
+ - name: "version"
+ in: "query"
+ description: "The version number of the secret object being updated. This is required to avoid conflicting writes."
+ type: "integer"
+ format: "int64"
+ required: true
+ tags: ["Secret"]
+ /configs:
+ get:
+ summary: "List configs"
+ operationId: "ConfigList"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Config"
+ example:
+ - ID: "ktnbjxoalbkvbvedmg1urrz8h"
+ Version:
+ Index: 11
+ CreatedAt: "2016-11-05T01:20:17.327670065Z"
+ UpdatedAt: "2016-11-05T01:20:17.327670065Z"
+ Spec:
+ Name: "server.conf"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ type: "string"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to process on the configs list. Available filters:
+
+ - `id=<config id>`
+ - `label=<key> or label=<key>=value`
+ - `name=<config name>`
+ - `names=<config name>`
+ tags: ["Config"]
+ /configs/create:
+ post:
+ summary: "Create a config"
+ operationId: "ConfigCreate"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ responses:
+ 201:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/IdResponse"
+ 409:
+ description: "name conflicts with an existing object"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "body"
+ in: "body"
+ schema:
+ allOf:
+ - $ref: "#/definitions/ConfigSpec"
+ - type: "object"
+ example:
+ Name: "server.conf"
+ Labels:
+ foo: "bar"
+ Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg=="
+ tags: ["Config"]
+ /configs/{id}:
+ get:
+ summary: "Inspect a config"
+ operationId: "ConfigInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Config"
+ examples:
+ application/json:
+ ID: "ktnbjxoalbkvbvedmg1urrz8h"
+ Version:
+ Index: 11
+ CreatedAt: "2016-11-05T01:20:17.327670065Z"
+ UpdatedAt: "2016-11-05T01:20:17.327670065Z"
+ Spec:
+ Name: "app-dev.crt"
+ 404:
+ description: "config not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ type: "string"
+ description: "ID of the config"
+ tags: ["Config"]
+ delete:
+ summary: "Delete a config"
+ operationId: "ConfigDelete"
+ produces:
+ - "application/json"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "config not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ type: "string"
+ description: "ID of the config"
+ tags: ["Config"]
+ /configs/{id}/update:
+ post:
+ summary: "Update a Config"
+ operationId: "ConfigUpdate"
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such config"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "The ID or name of the config"
+ type: "string"
+ required: true
+ - name: "body"
+ in: "body"
+ schema:
+ $ref: "#/definitions/ConfigSpec"
+ description: "The spec of the config to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [ConfigInspect endpoint](#operation/ConfigInspect) response values."
+ - name: "version"
+ in: "query"
+ description: "The version number of the config object being updated. This is required to avoid conflicting writes."
+ type: "integer"
+ format: "int64"
+ required: true
+ tags: ["Config"]
+ /distribution/{name}/json:
+ get:
+ summary: "Get image information from the registry"
+ description: "Return image digest and platform information by contacting the registry."
+ operationId: "DistributionInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "descriptor and platform information"
+ schema:
+ type: "object"
+ x-go-name: DistributionInspect
+ title: "DistributionInspectResponse"
+ required: [Descriptor, Platforms]
+ properties:
+ Descriptor:
+ type: "object"
+ description: "A descriptor struct containing digest, media type, and size"
+ properties:
+ MediaType:
+ type: "string"
+ Size:
+ type: "integer"
+ format: "int64"
+ Digest:
+ type: "string"
+ URLs:
+ type: "array"
+ items:
+ type: "string"
+ Platforms:
+ type: "array"
+ description: "An array containing all platforms supported by the image"
+ items:
+ type: "object"
+ properties:
+ Architecture:
+ type: "string"
+ OS:
+ type: "string"
+ OSVersion:
+ type: "string"
+ OSFeatures:
+ type: "array"
+ items:
+ type: "string"
+ Variant:
+ type: "string"
+ Features:
+ type: "array"
+ items:
+ type: "string"
+ examples:
+ application/json:
+ Descriptor:
+ MediaType: "application/vnd.docker.distribution.manifest.v2+json"
+ Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96"
+ Size: 3987495
+ URLs:
+ - ""
+ Platforms:
+ - Architecture: "amd64"
+ OS: "linux"
+ OSVersion: ""
+ OSFeatures:
+ - ""
+ Variant: ""
+ Features:
+ - ""
+ 401:
+ description: "Failed authentication or no image found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such image: someimage (tag: latest)"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "Image name or id"
+ type: "string"
+ required: true
+ tags: ["Distribution"]
+ /session:
+ post:
+ summary: "Initialize interactive session"
+ description: |
+ Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities.
+
+ > **Note**: This endpoint is *experimental* and only available if the daemon is started with experimental
+ > features enabled. The specifications for this endpoint may still change in a future version of the API.
+
+ ### Hijacking
+
+ This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection.
+
+ For example, the client sends this request to upgrade the connection:
+
+ ```
+ POST /session HTTP/1.1
+ Upgrade: h2c
+ Connection: Upgrade
+ ```
+
+ The Docker daemon will respond with a `101 UPGRADED` response follow with the raw stream:
+
+ ```
+ HTTP/1.1 101 UPGRADED
+ Connection: Upgrade
+ Upgrade: h2c
+ ```
+ operationId: "Session"
+ produces:
+ - "application/vnd.docker.raw-stream"
+ responses:
+ 101:
+ description: "no error, hijacking successful"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Session (experimental)"]
diff --git a/vendor/github.com/docker/docker/contrib/README.md b/vendor/github.com/docker/docker/contrib/README.md
deleted file mode 100644
index 92b1d9443..000000000
--- a/vendor/github.com/docker/docker/contrib/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-The `contrib` directory contains scripts, images, and other helpful things
-which are not part of the core docker distribution. Please note that they
-could be out of date, since they do not receive the same attention as the
-rest of the repository.
diff --git a/vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c b/vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c
deleted file mode 100644
index b767da7e1..000000000
--- a/vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c
+++ /dev/null
@@ -1,10 +0,0 @@
-#include <stdio.h>
-#include <unistd.h>
-#include <sys/types.h>
-
-int main(int argc, char *argv[])
-{
- printf("EUID=%d\n", geteuid());
- return 0;
-}
-
diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/acct.c b/vendor/github.com/docker/docker/contrib/syscall-test/acct.c
deleted file mode 100644
index 88ac28796..000000000
--- a/vendor/github.com/docker/docker/contrib/syscall-test/acct.c
+++ /dev/null
@@ -1,16 +0,0 @@
-#define _GNU_SOURCE
-#include <unistd.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <errno.h>
-
-int main(int argc, char **argv)
-{
- int err = acct("/tmp/t");
- if (err == -1) {
- fprintf(stderr, "acct failed: %s\n", strerror(errno));
- exit(EXIT_FAILURE);
- }
- exit(EXIT_SUCCESS);
-}
diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/exit32.s b/vendor/github.com/docker/docker/contrib/syscall-test/exit32.s
deleted file mode 100644
index 8bbb5c58b..000000000
--- a/vendor/github.com/docker/docker/contrib/syscall-test/exit32.s
+++ /dev/null
@@ -1,7 +0,0 @@
-.globl _start
-.text
-_start:
- xorl %eax, %eax
- incl %eax
- movb $0, %bl
- int $0x80
diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/ns.c b/vendor/github.com/docker/docker/contrib/syscall-test/ns.c
deleted file mode 100644
index 624388630..000000000
--- a/vendor/github.com/docker/docker/contrib/syscall-test/ns.c
+++ /dev/null
@@ -1,63 +0,0 @@
-#define _GNU_SOURCE
-#include <errno.h>
-#include <sched.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <sys/wait.h>
-#include <unistd.h>
-
-#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */
-
-struct clone_args {
- char **argv;
-};
-
-// child_exec is the func that will be executed as the result of clone
-static int child_exec(void *stuff)
-{
- struct clone_args *args = (struct clone_args *)stuff;
- if (execvp(args->argv[0], args->argv) != 0) {
- fprintf(stderr, "failed to execvp arguments %s\n",
- strerror(errno));
- exit(-1);
- }
- // we should never reach here!
- exit(EXIT_FAILURE);
-}
-
-int main(int argc, char **argv)
-{
- struct clone_args args;
- args.argv = &argv[1];
-
- int clone_flags = CLONE_NEWNS | CLONE_NEWPID | SIGCHLD;
-
- // allocate stack for child
- char *stack; /* Start of stack buffer */
- char *child_stack; /* End of stack buffer */
- stack =
- mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0);
- if (stack == MAP_FAILED) {
- fprintf(stderr, "mmap failed: %s\n", strerror(errno));
- exit(EXIT_FAILURE);
- }
- child_stack = stack + STACK_SIZE; /* Assume stack grows downward */
-
- // the result of this call is that our child_exec will be run in another
- // process returning its pid
- pid_t pid = clone(child_exec, child_stack, clone_flags, &args);
- if (pid < 0) {
- fprintf(stderr, "clone failed: %s\n", strerror(errno));
- exit(EXIT_FAILURE);
- }
- // lets wait on our child process here before we, the parent, exits
- if (waitpid(pid, NULL, 0) == -1) {
- fprintf(stderr, "failed to wait pid %d\n", pid);
- exit(EXIT_FAILURE);
- }
- exit(EXIT_SUCCESS);
-}
diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/raw.c b/vendor/github.com/docker/docker/contrib/syscall-test/raw.c
deleted file mode 100644
index 7995a0d3a..000000000
--- a/vendor/github.com/docker/docker/contrib/syscall-test/raw.c
+++ /dev/null
@@ -1,14 +0,0 @@
-#include <stdio.h>
-#include <unistd.h>
-#include <sys/socket.h>
-#include <netinet/ip.h>
-#include <netinet/udp.h>
-
-int main() {
- if (socket(PF_INET, SOCK_RAW, IPPROTO_UDP) == -1) {
- perror("socket");
- return 1;
- }
-
- return 0;
-}
diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/setgid.c b/vendor/github.com/docker/docker/contrib/syscall-test/setgid.c
deleted file mode 100644
index df9680c86..000000000
--- a/vendor/github.com/docker/docker/contrib/syscall-test/setgid.c
+++ /dev/null
@@ -1,11 +0,0 @@
-#include <sys/types.h>
-#include <unistd.h>
-#include <stdio.h>
-
-int main() {
- if (setgid(1) == -1) {
- perror("setgid");
- return 1;
- }
- return 0;
-}
diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/setuid.c b/vendor/github.com/docker/docker/contrib/syscall-test/setuid.c
deleted file mode 100644
index 5b939677e..000000000
--- a/vendor/github.com/docker/docker/contrib/syscall-test/setuid.c
+++ /dev/null
@@ -1,11 +0,0 @@
-#include <sys/types.h>
-#include <unistd.h>
-#include <stdio.h>
-
-int main() {
- if (setuid(1) == -1) {
- perror("setuid");
- return 1;
- }
- return 0;
-}
diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/socket.c b/vendor/github.com/docker/docker/contrib/syscall-test/socket.c
deleted file mode 100644
index d26c82f00..000000000
--- a/vendor/github.com/docker/docker/contrib/syscall-test/socket.c
+++ /dev/null
@@ -1,30 +0,0 @@
-#include <stdio.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <netinet/in.h>
-#include <arpa/inet.h>
-
-int main() {
- int s;
- struct sockaddr_in sin;
-
- s = socket(AF_INET, SOCK_STREAM, 0);
- if (s == -1) {
- perror("socket");
- return 1;
- }
-
- sin.sin_family = AF_INET;
- sin.sin_addr.s_addr = INADDR_ANY;
- sin.sin_port = htons(80);
-
- if (bind(s, (struct sockaddr *)&sin, sizeof(sin)) == -1) {
- perror("bind");
- return 1;
- }
-
- close(s);
-
- return 0;
-}
diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/userns.c b/vendor/github.com/docker/docker/contrib/syscall-test/userns.c
deleted file mode 100644
index 4c5c8d304..000000000
--- a/vendor/github.com/docker/docker/contrib/syscall-test/userns.c
+++ /dev/null
@@ -1,63 +0,0 @@
-#define _GNU_SOURCE
-#include <errno.h>
-#include <sched.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <sys/wait.h>
-#include <unistd.h>
-
-#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */
-
-struct clone_args {
- char **argv;
-};
-
-// child_exec is the func that will be executed as the result of clone
-static int child_exec(void *stuff)
-{
- struct clone_args *args = (struct clone_args *)stuff;
- if (execvp(args->argv[0], args->argv) != 0) {
- fprintf(stderr, "failed to execvp arguments %s\n",
- strerror(errno));
- exit(-1);
- }
- // we should never reach here!
- exit(EXIT_FAILURE);
-}
-
-int main(int argc, char **argv)
-{
- struct clone_args args;
- args.argv = &argv[1];
-
- int clone_flags = CLONE_NEWUSER | SIGCHLD;
-
- // allocate stack for child
- char *stack; /* Start of stack buffer */
- char *child_stack; /* End of stack buffer */
- stack =
- mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0);
- if (stack == MAP_FAILED) {
- fprintf(stderr, "mmap failed: %s\n", strerror(errno));
- exit(EXIT_FAILURE);
- }
- child_stack = stack + STACK_SIZE; /* Assume stack grows downward */
-
- // the result of this call is that our child_exec will be run in another
- // process returning its pid
- pid_t pid = clone(child_exec, child_stack, clone_flags, &args);
- if (pid < 0) {
- fprintf(stderr, "clone failed: %s\n", strerror(errno));
- exit(EXIT_FAILURE);
- }
- // lets wait on our child process here before we, the parent, exits
- if (waitpid(pid, NULL, 0) == -1) {
- fprintf(stderr, "failed to wait pid %d\n", pid);
- exit(EXIT_FAILURE);
- }
- exit(EXIT_SUCCESS);
-}
diff --git a/vendor/github.com/docker/docker/opts/address_pools.go b/vendor/github.com/docker/docker/opts/address_pools.go
deleted file mode 100644
index 9b27a6285..000000000
--- a/vendor/github.com/docker/docker/opts/address_pools.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package opts
-
-import (
- "encoding/csv"
- "encoding/json"
- "fmt"
- "strconv"
- "strings"
-
- types "github.com/docker/libnetwork/ipamutils"
-)
-
-// PoolsOpt is a Value type for parsing the default address pools definitions
-type PoolsOpt struct {
- values []*types.NetworkToSplit
-}
-
-// UnmarshalJSON fills values structure info from JSON input
-func (p *PoolsOpt) UnmarshalJSON(raw []byte) error {
- return json.Unmarshal(raw, &(p.values))
-}
-
-// Set predefined pools
-func (p *PoolsOpt) Set(value string) error {
- csvReader := csv.NewReader(strings.NewReader(value))
- fields, err := csvReader.Read()
- if err != nil {
- return err
- }
-
- poolsDef := types.NetworkToSplit{}
-
- for _, field := range fields {
- parts := strings.SplitN(field, "=", 2)
- if len(parts) != 2 {
- return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
- }
-
- key := strings.ToLower(parts[0])
- value := strings.ToLower(parts[1])
-
- switch key {
- case "base":
- poolsDef.Base = value
- case "size":
- size, err := strconv.Atoi(value)
- if err != nil {
- return fmt.Errorf("invalid size value: %q (must be integer): %v", value, err)
- }
- poolsDef.Size = size
- default:
- return fmt.Errorf("unexpected key '%s' in '%s'", key, field)
- }
- }
-
- p.values = append(p.values, &poolsDef)
-
- return nil
-}
-
-// Type returns the type of this option
-func (p *PoolsOpt) Type() string {
- return "pool-options"
-}
-
-// String returns a string repr of this option
-func (p *PoolsOpt) String() string {
- var pools []string
- for _, pool := range p.values {
- repr := fmt.Sprintf("%s %d", pool.Base, pool.Size)
- pools = append(pools, repr)
- }
- return strings.Join(pools, ", ")
-}
-
-// Value returns the mounts
-func (p *PoolsOpt) Value() []*types.NetworkToSplit {
- return p.values
-}
-
-// Name returns the flag name of this option
-func (p *PoolsOpt) Name() string {
- return "default-address-pools"
-}
diff --git a/vendor/github.com/docker/docker/opts/env.go b/vendor/github.com/docker/docker/opts/env.go
deleted file mode 100644
index f6e5e9074..000000000
--- a/vendor/github.com/docker/docker/opts/env.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package opts // import "github.com/docker/docker/opts"
-
-import (
- "fmt"
- "os"
- "runtime"
- "strings"
-
- "github.com/pkg/errors"
-)
-
-// ValidateEnv validates an environment variable and returns it.
-// If no value is specified, it returns the current value using os.Getenv.
-//
-// As on ParseEnvFile and related to #16585, environment variable names
-// are not validate what so ever, it's up to application inside docker
-// to validate them or not.
-//
-// The only validation here is to check if name is empty, per #25099
-func ValidateEnv(val string) (string, error) {
- arr := strings.Split(val, "=")
- if arr[0] == "" {
- return "", errors.Errorf("invalid environment variable: %s", val)
- }
- if len(arr) > 1 {
- return val, nil
- }
- if !doesEnvExist(val) {
- return val, nil
- }
- return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
-}
-
-func doesEnvExist(name string) bool {
- for _, entry := range os.Environ() {
- parts := strings.SplitN(entry, "=", 2)
- if runtime.GOOS == "windows" {
- // Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent.
- if strings.EqualFold(parts[0], name) {
- return true
- }
- }
- if parts[0] == name {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/docker/docker/opts/hosts.go b/vendor/github.com/docker/docker/opts/hosts.go
deleted file mode 100644
index 3d8785f11..000000000
--- a/vendor/github.com/docker/docker/opts/hosts.go
+++ /dev/null
@@ -1,176 +0,0 @@
-package opts // import "github.com/docker/docker/opts"
-
-import (
- "fmt"
- "net"
- "net/url"
- "path/filepath"
- "strconv"
- "strings"
-
- "github.com/docker/docker/pkg/homedir"
-)
-
-var (
- // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. dockerd -H tcp://
- // These are the IANA registered port numbers for use with Docker
- // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
- DefaultHTTPPort = 2375 // Default HTTP Port
- // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled
- DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port
- // DefaultUnixSocket Path for the unix socket.
- // Docker daemon by default always listens on the default unix socket
- DefaultUnixSocket = "/var/run/docker.sock"
- // DefaultTCPHost constant defines the default host string used by docker on Windows
- DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
- // DefaultTLSHost constant defines the default host string used by docker for TLS sockets
- DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
- // DefaultNamedPipe defines the default named pipe used by docker on Windows
- DefaultNamedPipe = `//./pipe/docker_engine`
-)
-
-// ValidateHost validates that the specified string is a valid host and returns it.
-func ValidateHost(val string) (string, error) {
- host := strings.TrimSpace(val)
- // The empty string means default and is not handled by parseDaemonHost
- if host != "" {
- _, err := parseDaemonHost(host)
- if err != nil {
- return val, err
- }
- }
- // Note: unlike most flag validators, we don't return the mutated value here
- // we need to know what the user entered later (using ParseHost) to adjust for TLS
- return val, nil
-}
-
-// ParseHost and set defaults for a Daemon host string.
-// defaultToTLS is preferred over defaultToUnixRootless.
-func ParseHost(defaultToTLS, defaultToUnixRootless bool, val string) (string, error) {
- host := strings.TrimSpace(val)
- if host == "" {
- if defaultToTLS {
- host = DefaultTLSHost
- } else if defaultToUnixRootless {
- runtimeDir, err := homedir.GetRuntimeDir()
- if err != nil {
- return "", err
- }
- socket := filepath.Join(runtimeDir, "docker.sock")
- host = "unix://" + socket
- } else {
- host = DefaultHost
- }
- } else {
- var err error
- host, err = parseDaemonHost(host)
- if err != nil {
- return val, err
- }
- }
- return host, nil
-}
-
-// parseDaemonHost parses the specified address and returns an address that will be used as the host.
-// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go.
-func parseDaemonHost(addr string) (string, error) {
- addrParts := strings.SplitN(addr, "://", 2)
- if len(addrParts) == 1 && addrParts[0] != "" {
- addrParts = []string{"tcp", addrParts[0]}
- }
-
- switch addrParts[0] {
- case "tcp":
- return ParseTCPAddr(addrParts[1], DefaultTCPHost)
- case "unix":
- return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket)
- case "npipe":
- return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe)
- case "fd":
- return addr, nil
- default:
- return "", fmt.Errorf("Invalid bind address format: %s", addr)
- }
-}
-
-// parseSimpleProtoAddr parses and validates that the specified address is a valid
-// socket address for simple protocols like unix and npipe. It returns a formatted
-// socket address, either using the address parsed from addr, or the contents of
-// defaultAddr if addr is a blank string.
-func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) {
- addr = strings.TrimPrefix(addr, proto+"://")
- if strings.Contains(addr, "://") {
- return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr)
- }
- if addr == "" {
- addr = defaultAddr
- }
- return fmt.Sprintf("%s://%s", proto, addr), nil
-}
-
-// ParseTCPAddr parses and validates that the specified address is a valid TCP
-// address. It returns a formatted TCP address, either using the address parsed
-// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string.
-// tryAddr is expected to have already been Trim()'d
-// defaultAddr must be in the full `tcp://host:port` form
-func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
- if tryAddr == "" || tryAddr == "tcp://" {
- return defaultAddr, nil
- }
- addr := strings.TrimPrefix(tryAddr, "tcp://")
- if strings.Contains(addr, "://") || addr == "" {
- return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr)
- }
-
- defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://")
- defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr)
- if err != nil {
- return "", err
- }
- // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but
- // not 1.4. See https://github.com/golang/go/issues/12200 and
- // https://github.com/golang/go/issues/6530.
- if strings.HasSuffix(addr, "]:") {
- addr += defaultPort
- }
-
- u, err := url.Parse("tcp://" + addr)
- if err != nil {
- return "", err
- }
- host, port, err := net.SplitHostPort(u.Host)
- if err != nil {
- // try port addition once
- host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort))
- }
- if err != nil {
- return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
- }
-
- if host == "" {
- host = defaultHost
- }
- if port == "" {
- port = defaultPort
- }
- p, err := strconv.Atoi(port)
- if err != nil && p == 0 {
- return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
- }
-
- return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil
-}
-
-// ValidateExtraHost validates that the specified string is a valid extrahost and returns it.
-// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6).
-func ValidateExtraHost(val string) (string, error) {
- // allow for IPv6 addresses in extra hosts by only splitting on first ":"
- arr := strings.SplitN(val, ":", 2)
- if len(arr) != 2 || len(arr[0]) == 0 {
- return "", fmt.Errorf("bad format for add-host: %q", val)
- }
- if _, err := ValidateIPAddress(arr[1]); err != nil {
- return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1])
- }
- return val, nil
-}
diff --git a/vendor/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/docker/docker/opts/hosts_unix.go
deleted file mode 100644
index 9d5bb6456..000000000
--- a/vendor/github.com/docker/docker/opts/hosts_unix.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +build !windows
-
-package opts // import "github.com/docker/docker/opts"
-
-import "fmt"
-
-// DefaultHost constant defines the default host string used by docker on other hosts than Windows
-var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket)
diff --git a/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go
deleted file mode 100644
index 906eba53e..000000000
--- a/vendor/github.com/docker/docker/opts/hosts_windows.go
+++ /dev/null
@@ -1,4 +0,0 @@
-package opts // import "github.com/docker/docker/opts"
-
-// DefaultHost constant defines the default host string used by docker on Windows
-var DefaultHost = "npipe://" + DefaultNamedPipe
diff --git a/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go
deleted file mode 100644
index cfbff3a9f..000000000
--- a/vendor/github.com/docker/docker/opts/ip.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package opts // import "github.com/docker/docker/opts"
-
-import (
- "fmt"
- "net"
-)
-
-// IPOpt holds an IP. It is used to store values from CLI flags.
-type IPOpt struct {
- *net.IP
-}
-
-// NewIPOpt creates a new IPOpt from a reference net.IP and a
-// string representation of an IP. If the string is not a valid
-// IP it will fallback to the specified reference.
-func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
- o := &IPOpt{
- IP: ref,
- }
- o.Set(defaultVal)
- return o
-}
-
-// Set sets an IPv4 or IPv6 address from a given string. If the given
-// string is not parsable as an IP address it returns an error.
-func (o *IPOpt) Set(val string) error {
- ip := net.ParseIP(val)
- if ip == nil {
- return fmt.Errorf("%s is not an ip address", val)
- }
- *o.IP = ip
- return nil
-}
-
-// String returns the IP address stored in the IPOpt. If stored IP is a
-// nil pointer, it returns an empty string.
-func (o *IPOpt) String() string {
- if *o.IP == nil {
- return ""
- }
- return o.IP.String()
-}
-
-// Type returns the type of the option
-func (o *IPOpt) Type() string {
- return "ip"
-}
diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go
deleted file mode 100644
index de8aacb80..000000000
--- a/vendor/github.com/docker/docker/opts/opts.go
+++ /dev/null
@@ -1,337 +0,0 @@
-package opts // import "github.com/docker/docker/opts"
-
-import (
- "fmt"
- "net"
- "path"
- "regexp"
- "strings"
-
- "github.com/docker/go-units"
-)
-
-var (
- alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
- domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
-)
-
-// ListOpts holds a list of values and a validation function.
-type ListOpts struct {
- values *[]string
- validator ValidatorFctType
-}
-
-// NewListOpts creates a new ListOpts with the specified validator.
-func NewListOpts(validator ValidatorFctType) ListOpts {
- var values []string
- return *NewListOptsRef(&values, validator)
-}
-
-// NewListOptsRef creates a new ListOpts with the specified values and validator.
-func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
- return &ListOpts{
- values: values,
- validator: validator,
- }
-}
-
-func (opts *ListOpts) String() string {
- if len(*opts.values) == 0 {
- return ""
- }
- return fmt.Sprintf("%v", *opts.values)
-}
-
-// Set validates if needed the input value and adds it to the
-// internal slice.
-func (opts *ListOpts) Set(value string) error {
- if opts.validator != nil {
- v, err := opts.validator(value)
- if err != nil {
- return err
- }
- value = v
- }
- *opts.values = append(*opts.values, value)
- return nil
-}
-
-// Delete removes the specified element from the slice.
-func (opts *ListOpts) Delete(key string) {
- for i, k := range *opts.values {
- if k == key {
- *opts.values = append((*opts.values)[:i], (*opts.values)[i+1:]...)
- return
- }
- }
-}
-
-// GetMap returns the content of values in a map in order to avoid
-// duplicates.
-func (opts *ListOpts) GetMap() map[string]struct{} {
- ret := make(map[string]struct{})
- for _, k := range *opts.values {
- ret[k] = struct{}{}
- }
- return ret
-}
-
-// GetAll returns the values of slice.
-func (opts *ListOpts) GetAll() []string {
- return *opts.values
-}
-
-// GetAllOrEmpty returns the values of the slice
-// or an empty slice when there are no values.
-func (opts *ListOpts) GetAllOrEmpty() []string {
- v := *opts.values
- if v == nil {
- return make([]string, 0)
- }
- return v
-}
-
-// Get checks the existence of the specified key.
-func (opts *ListOpts) Get(key string) bool {
- for _, k := range *opts.values {
- if k == key {
- return true
- }
- }
- return false
-}
-
-// Len returns the amount of element in the slice.
-func (opts *ListOpts) Len() int {
- return len(*opts.values)
-}
-
-// Type returns a string name for this Option type
-func (opts *ListOpts) Type() string {
- return "list"
-}
-
-// WithValidator returns the ListOpts with validator set.
-func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts {
- opts.validator = validator
- return opts
-}
-
-// NamedOption is an interface that list and map options
-// with names implement.
-type NamedOption interface {
- Name() string
-}
-
-// NamedListOpts is a ListOpts with a configuration name.
-// This struct is useful to keep reference to the assigned
-// field name in the internal configuration struct.
-type NamedListOpts struct {
- name string
- ListOpts
-}
-
-var _ NamedOption = &NamedListOpts{}
-
-// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
-func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
- return &NamedListOpts{
- name: name,
- ListOpts: *NewListOptsRef(values, validator),
- }
-}
-
-// Name returns the name of the NamedListOpts in the configuration.
-func (o *NamedListOpts) Name() string {
- return o.name
-}
-
-// MapOpts holds a map of values and a validation function.
-type MapOpts struct {
- values map[string]string
- validator ValidatorFctType
-}
-
-// Set validates if needed the input value and add it to the
-// internal map, by splitting on '='.
-func (opts *MapOpts) Set(value string) error {
- if opts.validator != nil {
- v, err := opts.validator(value)
- if err != nil {
- return err
- }
- value = v
- }
- vals := strings.SplitN(value, "=", 2)
- if len(vals) == 1 {
- (opts.values)[vals[0]] = ""
- } else {
- (opts.values)[vals[0]] = vals[1]
- }
- return nil
-}
-
-// GetAll returns the values of MapOpts as a map.
-func (opts *MapOpts) GetAll() map[string]string {
- return opts.values
-}
-
-func (opts *MapOpts) String() string {
- return fmt.Sprintf("%v", opts.values)
-}
-
-// Type returns a string name for this Option type
-func (opts *MapOpts) Type() string {
- return "map"
-}
-
-// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
-func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
- if values == nil {
- values = make(map[string]string)
- }
- return &MapOpts{
- values: values,
- validator: validator,
- }
-}
-
-// NamedMapOpts is a MapOpts struct with a configuration name.
-// This struct is useful to keep reference to the assigned
-// field name in the internal configuration struct.
-type NamedMapOpts struct {
- name string
- MapOpts
-}
-
-var _ NamedOption = &NamedMapOpts{}
-
-// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
-func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
- return &NamedMapOpts{
- name: name,
- MapOpts: *NewMapOpts(values, validator),
- }
-}
-
-// Name returns the name of the NamedMapOpts in the configuration.
-func (o *NamedMapOpts) Name() string {
- return o.name
-}
-
-// ValidatorFctType defines a validator function that returns a validated string and/or an error.
-type ValidatorFctType func(val string) (string, error)
-
-// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
-type ValidatorFctListType func(val string) ([]string, error)
-
-// ValidateIPAddress validates an Ip address.
-func ValidateIPAddress(val string) (string, error) {
- var ip = net.ParseIP(strings.TrimSpace(val))
- if ip != nil {
- return ip.String(), nil
- }
- return "", fmt.Errorf("%s is not an ip address", val)
-}
-
-// ValidateDNSSearch validates domain for resolvconf search configuration.
-// A zero length domain is represented by a dot (.).
-func ValidateDNSSearch(val string) (string, error) {
- if val = strings.Trim(val, " "); val == "." {
- return val, nil
- }
- return validateDomain(val)
-}
-
-func validateDomain(val string) (string, error) {
- if alphaRegexp.FindString(val) == "" {
- return "", fmt.Errorf("%s is not a valid domain", val)
- }
- ns := domainRegexp.FindSubmatch([]byte(val))
- if len(ns) > 0 && len(ns[1]) < 255 {
- return string(ns[1]), nil
- }
- return "", fmt.Errorf("%s is not a valid domain", val)
-}
-
-// ValidateLabel validates that the specified string is a valid label, and returns it.
-// Labels are in the form on key=value.
-func ValidateLabel(val string) (string, error) {
- if strings.Count(val, "=") < 1 {
- return "", fmt.Errorf("bad attribute format: %s", val)
- }
- return val, nil
-}
-
-// ValidateSingleGenericResource validates that a single entry in the
-// generic resource list is valid.
-// i.e 'GPU=UID1' is valid however 'GPU:UID1' or 'UID1' isn't
-func ValidateSingleGenericResource(val string) (string, error) {
- if strings.Count(val, "=") < 1 {
- return "", fmt.Errorf("invalid node-generic-resource format `%s` expected `name=value`", val)
- }
- return val, nil
-}
-
-// ParseLink parses and validates the specified string as a link format (name:alias)
-func ParseLink(val string) (string, string, error) {
- if val == "" {
- return "", "", fmt.Errorf("empty string specified for links")
- }
- arr := strings.Split(val, ":")
- if len(arr) > 2 {
- return "", "", fmt.Errorf("bad format for links: %s", val)
- }
- if len(arr) == 1 {
- return val, val, nil
- }
- // This is kept because we can actually get a HostConfig with links
- // from an already created container and the format is not `foo:bar`
- // but `/foo:/c1/bar`
- if strings.HasPrefix(arr[0], "/") {
- _, alias := path.Split(arr[1])
- return arr[0][1:], alias, nil
- }
- return arr[0], arr[1], nil
-}
-
-// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc)
-type MemBytes int64
-
-// String returns the string format of the human readable memory bytes
-func (m *MemBytes) String() string {
- // NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not.
- // We return "0" in case value is 0 here so that the default value is hidden.
- // (Sometimes "default 0 B" is actually misleading)
- if m.Value() != 0 {
- return units.BytesSize(float64(m.Value()))
- }
- return "0"
-}
-
-// Set sets the value of the MemBytes by passing a string
-func (m *MemBytes) Set(value string) error {
- val, err := units.RAMInBytes(value)
- *m = MemBytes(val)
- return err
-}
-
-// Type returns the type
-func (m *MemBytes) Type() string {
- return "bytes"
-}
-
-// Value returns the value in int64
-func (m *MemBytes) Value() int64 {
- return int64(*m)
-}
-
-// UnmarshalJSON is the customized unmarshaler for MemBytes
-func (m *MemBytes) UnmarshalJSON(s []byte) error {
- if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' {
- return fmt.Errorf("invalid size: %q", s)
- }
- val, err := units.RAMInBytes(string(s[1 : len(s)-1]))
- *m = MemBytes(val)
- return err
-}
diff --git a/vendor/github.com/docker/docker/opts/opts_unix.go b/vendor/github.com/docker/docker/opts/opts_unix.go
deleted file mode 100644
index 0c32367cb..000000000
--- a/vendor/github.com/docker/docker/opts/opts_unix.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// +build !windows
-
-package opts // import "github.com/docker/docker/opts"
-
-// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080
-const DefaultHTTPHost = "localhost"
diff --git a/vendor/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/docker/docker/opts/opts_windows.go
deleted file mode 100644
index 0e1b6c6d1..000000000
--- a/vendor/github.com/docker/docker/opts/opts_windows.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package opts // import "github.com/docker/docker/opts"
-
-// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5.
-// @jhowardmsft, @swernli.
-//
-// On Windows, this mitigates a problem with the default options of running
-// a docker client against a local docker daemon on TP5.
-//
-// What was found that if the default host is "localhost", even if the client
-// (and daemon as this is local) is not physically on a network, and the DNS
-// cache is flushed (ipconfig /flushdns), then the client will pause for
-// exactly one second when connecting to the daemon for calls. For example
-// using docker run windowsservercore cmd, the CLI will send a create followed
-// by an attach. You see the delay between the attach finishing and the attach
-// being seen by the daemon.
-//
-// Here's some daemon debug logs with additional debug spew put in. The
-// AfterWriteJSON log is the very last thing the daemon does as part of the
-// create call. The POST /attach is the second CLI call. Notice the second
-// time gap.
-//
-// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs"
-// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig"
-// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...."
-// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking....
-// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...."
-// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...."
-// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func"
-// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create"
-// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2"
-// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate"
-// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON"
-// ... 1 second gap here....
-// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach"
-// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1"
-//
-// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change
-// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory,
-// the Windows networking stack is supposed to resolve "localhost" internally,
-// without hitting DNS, or even reading the hosts file (which is why localhost
-// is commented out in the hosts file on Windows).
-//
-// We have validated that working around this using the actual IPv4 localhost
-// address does not cause the delay.
-//
-// This does not occur with the docker client built with 1.4.3 on the same
-// Windows build, regardless of whether the daemon is built using 1.5.1
-// or 1.4.3. It does not occur on Linux. We also verified we see the same thing
-// on a cross-compiled Windows binary (from Linux).
-//
-// Final note: This is a mitigation, not a 'real' fix. It is still susceptible
-// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...'
-// explicitly.
-
-// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080
-const DefaultHTTPHost = "127.0.0.1"
diff --git a/vendor/github.com/docker/docker/opts/quotedstring.go b/vendor/github.com/docker/docker/opts/quotedstring.go
deleted file mode 100644
index 6c889070e..000000000
--- a/vendor/github.com/docker/docker/opts/quotedstring.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package opts // import "github.com/docker/docker/opts"
-
-// QuotedString is a string that may have extra quotes around the value. The
-// quotes are stripped from the value.
-type QuotedString struct {
- value *string
-}
-
-// Set sets a new value
-func (s *QuotedString) Set(val string) error {
- *s.value = trimQuotes(val)
- return nil
-}
-
-// Type returns the type of the value
-func (s *QuotedString) Type() string {
- return "string"
-}
-
-func (s *QuotedString) String() string {
- return *s.value
-}
-
-func trimQuotes(value string) string {
- lastIndex := len(value) - 1
- for _, char := range []byte{'\'', '"'} {
- if value[0] == char && value[lastIndex] == char {
- return value[1:lastIndex]
- }
- }
- return value
-}
-
-// NewQuotedString returns a new quoted string option
-func NewQuotedString(value *string) *QuotedString {
- return &QuotedString{value: value}
-}
diff --git a/vendor/github.com/docker/docker/opts/runtime.go b/vendor/github.com/docker/docker/opts/runtime.go
deleted file mode 100644
index 4b9babf0a..000000000
--- a/vendor/github.com/docker/docker/opts/runtime.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package opts // import "github.com/docker/docker/opts"
-
-import (
- "fmt"
- "strings"
-
- "github.com/docker/docker/api/types"
-)
-
-// RuntimeOpt defines a map of Runtimes
-type RuntimeOpt struct {
- name string
- stockRuntimeName string
- values *map[string]types.Runtime
-}
-
-// NewNamedRuntimeOpt creates a new RuntimeOpt
-func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt {
- if ref == nil {
- ref = &map[string]types.Runtime{}
- }
- return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime}
-}
-
-// Name returns the name of the NamedListOpts in the configuration.
-func (o *RuntimeOpt) Name() string {
- return o.name
-}
-
-// Set validates and updates the list of Runtimes
-func (o *RuntimeOpt) Set(val string) error {
- parts := strings.SplitN(val, "=", 2)
- if len(parts) != 2 {
- return fmt.Errorf("invalid runtime argument: %s", val)
- }
-
- parts[0] = strings.TrimSpace(parts[0])
- parts[1] = strings.TrimSpace(parts[1])
- if parts[0] == "" || parts[1] == "" {
- return fmt.Errorf("invalid runtime argument: %s", val)
- }
-
- parts[0] = strings.ToLower(parts[0])
- if parts[0] == o.stockRuntimeName {
- return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName)
- }
-
- if _, ok := (*o.values)[parts[0]]; ok {
- return fmt.Errorf("runtime '%s' was already defined", parts[0])
- }
-
- (*o.values)[parts[0]] = types.Runtime{Path: parts[1]}
-
- return nil
-}
-
-// String returns Runtime values as a string.
-func (o *RuntimeOpt) String() string {
- var out []string
- for k := range *o.values {
- out = append(out, k)
- }
-
- return fmt.Sprintf("%v", out)
-}
-
-// GetMap returns a map of Runtimes (name: path)
-func (o *RuntimeOpt) GetMap() map[string]types.Runtime {
- if o.values != nil {
- return *o.values
- }
-
- return map[string]types.Runtime{}
-}
-
-// Type returns the type of the option
-func (o *RuntimeOpt) Type() string {
- return "runtime"
-}
diff --git a/vendor/github.com/docker/docker/opts/ulimit.go b/vendor/github.com/docker/docker/opts/ulimit.go
deleted file mode 100644
index 0e2a36236..000000000
--- a/vendor/github.com/docker/docker/opts/ulimit.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package opts // import "github.com/docker/docker/opts"
-
-import (
- "fmt"
-
- "github.com/docker/go-units"
-)
-
-// UlimitOpt defines a map of Ulimits
-type UlimitOpt struct {
- values *map[string]*units.Ulimit
-}
-
-// NewUlimitOpt creates a new UlimitOpt
-func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt {
- if ref == nil {
- ref = &map[string]*units.Ulimit{}
- }
- return &UlimitOpt{ref}
-}
-
-// Set validates a Ulimit and sets its name as a key in UlimitOpt
-func (o *UlimitOpt) Set(val string) error {
- l, err := units.ParseUlimit(val)
- if err != nil {
- return err
- }
-
- (*o.values)[l.Name] = l
-
- return nil
-}
-
-// String returns Ulimit values as a string.
-func (o *UlimitOpt) String() string {
- var out []string
- for _, v := range *o.values {
- out = append(out, v.String())
- }
-
- return fmt.Sprintf("%v", out)
-}
-
-// GetList returns a slice of pointers to Ulimits.
-func (o *UlimitOpt) GetList() []*units.Ulimit {
- var ulimits []*units.Ulimit
- for _, v := range *o.values {
- ulimits = append(ulimits, v)
- }
-
- return ulimits
-}
-
-// Type returns the option type
-func (o *UlimitOpt) Type() string {
- return "ulimit"
-}
-
-// NamedUlimitOpt defines a named map of Ulimits
-type NamedUlimitOpt struct {
- name string
- UlimitOpt
-}
-
-var _ NamedOption = &NamedUlimitOpt{}
-
-// NewNamedUlimitOpt creates a new NamedUlimitOpt
-func NewNamedUlimitOpt(name string, ref *map[string]*units.Ulimit) *NamedUlimitOpt {
- if ref == nil {
- ref = &map[string]*units.Ulimit{}
- }
- return &NamedUlimitOpt{
- name: name,
- UlimitOpt: *NewUlimitOpt(ref),
- }
-}
-
-// Name returns the option name
-func (o *NamedUlimitOpt) Name() string {
- return o.name
-}
diff --git a/vendor/github.com/docker/docker/pkg/README.md b/vendor/github.com/docker/docker/pkg/README.md
deleted file mode 100644
index 755cd9683..000000000
--- a/vendor/github.com/docker/docker/pkg/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-pkg/ is a collection of utility packages used by the Moby project without being specific to its internals.
-
-Utility packages are kept separate from the moby core codebase to keep it as small and concise as possible.
-If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the
-Moby organization, to facilitate re-use by other projects. However that is not the priority.
-
-The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core
-Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad!
-
-Because utility packages are small and neatly separated from the rest of the codebase, they are a good
-place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them!
diff --git a/vendor/github.com/docker/docker/profiles/seccomp/default.json b/vendor/github.com/docker/docker/profiles/seccomp/default.json
new file mode 100644
index 000000000..7a3a99ae1
--- /dev/null
+++ b/vendor/github.com/docker/docker/profiles/seccomp/default.json
@@ -0,0 +1,793 @@
+{
+ "defaultAction": "SCMP_ACT_ERRNO",
+ "archMap": [
+ {
+ "architecture": "SCMP_ARCH_X86_64",
+ "subArchitectures": [
+ "SCMP_ARCH_X86",
+ "SCMP_ARCH_X32"
+ ]
+ },
+ {
+ "architecture": "SCMP_ARCH_AARCH64",
+ "subArchitectures": [
+ "SCMP_ARCH_ARM"
+ ]
+ },
+ {
+ "architecture": "SCMP_ARCH_MIPS64",
+ "subArchitectures": [
+ "SCMP_ARCH_MIPS",
+ "SCMP_ARCH_MIPS64N32"
+ ]
+ },
+ {
+ "architecture": "SCMP_ARCH_MIPS64N32",
+ "subArchitectures": [
+ "SCMP_ARCH_MIPS",
+ "SCMP_ARCH_MIPS64"
+ ]
+ },
+ {
+ "architecture": "SCMP_ARCH_MIPSEL64",
+ "subArchitectures": [
+ "SCMP_ARCH_MIPSEL",
+ "SCMP_ARCH_MIPSEL64N32"
+ ]
+ },
+ {
+ "architecture": "SCMP_ARCH_MIPSEL64N32",
+ "subArchitectures": [
+ "SCMP_ARCH_MIPSEL",
+ "SCMP_ARCH_MIPSEL64"
+ ]
+ },
+ {
+ "architecture": "SCMP_ARCH_S390X",
+ "subArchitectures": [
+ "SCMP_ARCH_S390"
+ ]
+ }
+ ],
+ "syscalls": [
+ {
+ "names": [
+ "accept",
+ "accept4",
+ "access",
+ "adjtimex",
+ "alarm",
+ "bind",
+ "brk",
+ "capget",
+ "capset",
+ "chdir",
+ "chmod",
+ "chown",
+ "chown32",
+ "clock_getres",
+ "clock_gettime",
+ "clock_nanosleep",
+ "close",
+ "connect",
+ "copy_file_range",
+ "creat",
+ "dup",
+ "dup2",
+ "dup3",
+ "epoll_create",
+ "epoll_create1",
+ "epoll_ctl",
+ "epoll_ctl_old",
+ "epoll_pwait",
+ "epoll_wait",
+ "epoll_wait_old",
+ "eventfd",
+ "eventfd2",
+ "execve",
+ "execveat",
+ "exit",
+ "exit_group",
+ "faccessat",
+ "fadvise64",
+ "fadvise64_64",
+ "fallocate",
+ "fanotify_mark",
+ "fchdir",
+ "fchmod",
+ "fchmodat",
+ "fchown",
+ "fchown32",
+ "fchownat",
+ "fcntl",
+ "fcntl64",
+ "fdatasync",
+ "fgetxattr",
+ "flistxattr",
+ "flock",
+ "fork",
+ "fremovexattr",
+ "fsetxattr",
+ "fstat",
+ "fstat64",
+ "fstatat64",
+ "fstatfs",
+ "fstatfs64",
+ "fsync",
+ "ftruncate",
+ "ftruncate64",
+ "futex",
+ "futimesat",
+ "getcpu",
+ "getcwd",
+ "getdents",
+ "getdents64",
+ "getegid",
+ "getegid32",
+ "geteuid",
+ "geteuid32",
+ "getgid",
+ "getgid32",
+ "getgroups",
+ "getgroups32",
+ "getitimer",
+ "getpeername",
+ "getpgid",
+ "getpgrp",
+ "getpid",
+ "getppid",
+ "getpriority",
+ "getrandom",
+ "getresgid",
+ "getresgid32",
+ "getresuid",
+ "getresuid32",
+ "getrlimit",
+ "get_robust_list",
+ "getrusage",
+ "getsid",
+ "getsockname",
+ "getsockopt",
+ "get_thread_area",
+ "gettid",
+ "gettimeofday",
+ "getuid",
+ "getuid32",
+ "getxattr",
+ "inotify_add_watch",
+ "inotify_init",
+ "inotify_init1",
+ "inotify_rm_watch",
+ "io_cancel",
+ "ioctl",
+ "io_destroy",
+ "io_getevents",
+ "ioprio_get",
+ "ioprio_set",
+ "io_setup",
+ "io_submit",
+ "ipc",
+ "kill",
+ "lchown",
+ "lchown32",
+ "lgetxattr",
+ "link",
+ "linkat",
+ "listen",
+ "listxattr",
+ "llistxattr",
+ "_llseek",
+ "lremovexattr",
+ "lseek",
+ "lsetxattr",
+ "lstat",
+ "lstat64",
+ "madvise",
+ "memfd_create",
+ "mincore",
+ "mkdir",
+ "mkdirat",
+ "mknod",
+ "mknodat",
+ "mlock",
+ "mlock2",
+ "mlockall",
+ "mmap",
+ "mmap2",
+ "mprotect",
+ "mq_getsetattr",
+ "mq_notify",
+ "mq_open",
+ "mq_timedreceive",
+ "mq_timedsend",
+ "mq_unlink",
+ "mremap",
+ "msgctl",
+ "msgget",
+ "msgrcv",
+ "msgsnd",
+ "msync",
+ "munlock",
+ "munlockall",
+ "munmap",
+ "nanosleep",
+ "newfstatat",
+ "_newselect",
+ "open",
+ "openat",
+ "pause",
+ "pipe",
+ "pipe2",
+ "poll",
+ "ppoll",
+ "prctl",
+ "pread64",
+ "preadv",
+ "preadv2",
+ "prlimit64",
+ "pselect6",
+ "pwrite64",
+ "pwritev",
+ "pwritev2",
+ "read",
+ "readahead",
+ "readlink",
+ "readlinkat",
+ "readv",
+ "recv",
+ "recvfrom",
+ "recvmmsg",
+ "recvmsg",
+ "remap_file_pages",
+ "removexattr",
+ "rename",
+ "renameat",
+ "renameat2",
+ "restart_syscall",
+ "rmdir",
+ "rt_sigaction",
+ "rt_sigpending",
+ "rt_sigprocmask",
+ "rt_sigqueueinfo",
+ "rt_sigreturn",
+ "rt_sigsuspend",
+ "rt_sigtimedwait",
+ "rt_tgsigqueueinfo",
+ "sched_getaffinity",
+ "sched_getattr",
+ "sched_getparam",
+ "sched_get_priority_max",
+ "sched_get_priority_min",
+ "sched_getscheduler",
+ "sched_rr_get_interval",
+ "sched_setaffinity",
+ "sched_setattr",
+ "sched_setparam",
+ "sched_setscheduler",
+ "sched_yield",
+ "seccomp",
+ "select",
+ "semctl",
+ "semget",
+ "semop",
+ "semtimedop",
+ "send",
+ "sendfile",
+ "sendfile64",
+ "sendmmsg",
+ "sendmsg",
+ "sendto",
+ "setfsgid",
+ "setfsgid32",
+ "setfsuid",
+ "setfsuid32",
+ "setgid",
+ "setgid32",
+ "setgroups",
+ "setgroups32",
+ "setitimer",
+ "setpgid",
+ "setpriority",
+ "setregid",
+ "setregid32",
+ "setresgid",
+ "setresgid32",
+ "setresuid",
+ "setresuid32",
+ "setreuid",
+ "setreuid32",
+ "setrlimit",
+ "set_robust_list",
+ "setsid",
+ "setsockopt",
+ "set_thread_area",
+ "set_tid_address",
+ "setuid",
+ "setuid32",
+ "setxattr",
+ "shmat",
+ "shmctl",
+ "shmdt",
+ "shmget",
+ "shutdown",
+ "sigaltstack",
+ "signalfd",
+ "signalfd4",
+ "sigreturn",
+ "socket",
+ "socketcall",
+ "socketpair",
+ "splice",
+ "stat",
+ "stat64",
+ "statfs",
+ "statfs64",
+ "statx",
+ "symlink",
+ "symlinkat",
+ "sync",
+ "sync_file_range",
+ "syncfs",
+ "sysinfo",
+ "tee",
+ "tgkill",
+ "time",
+ "timer_create",
+ "timer_delete",
+ "timerfd_create",
+ "timerfd_gettime",
+ "timerfd_settime",
+ "timer_getoverrun",
+ "timer_gettime",
+ "timer_settime",
+ "times",
+ "tkill",
+ "truncate",
+ "truncate64",
+ "ugetrlimit",
+ "umask",
+ "uname",
+ "unlink",
+ "unlinkat",
+ "utime",
+ "utimensat",
+ "utimes",
+ "vfork",
+ "vmsplice",
+ "wait4",
+ "waitid",
+ "waitpid",
+ "write",
+ "writev"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {},
+ "excludes": {}
+ },
+ {
+ "names": [
+ "ptrace"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": null,
+ "comment": "",
+ "includes": {
+ "minKernel": "4.8"
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "personality"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 0,
+ "value": 0,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_EQ"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {}
+ },
+ {
+ "names": [
+ "personality"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 0,
+ "value": 8,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_EQ"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {}
+ },
+ {
+ "names": [
+ "personality"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 0,
+ "value": 131072,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_EQ"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {}
+ },
+ {
+ "names": [
+ "personality"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 0,
+ "value": 131080,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_EQ"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {}
+ },
+ {
+ "names": [
+ "personality"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 0,
+ "value": 4294967295,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_EQ"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {}
+ },
+ {
+ "names": [
+ "sync_file_range2"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "arches": [
+ "ppc64le"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "arm_fadvise64_64",
+ "arm_sync_file_range",
+ "sync_file_range2",
+ "breakpoint",
+ "cacheflush",
+ "set_tls"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "arches": [
+ "arm",
+ "arm64"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "arch_prctl"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "arches": [
+ "amd64",
+ "x32"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "modify_ldt"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "arches": [
+ "amd64",
+ "x32",
+ "x86"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "s390_pci_mmio_read",
+ "s390_pci_mmio_write",
+ "s390_runtime_instr"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "arches": [
+ "s390",
+ "s390x"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "open_by_handle_at"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_DAC_READ_SEARCH"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "bpf",
+ "clone",
+ "fanotify_init",
+ "lookup_dcookie",
+ "mount",
+ "name_to_handle_at",
+ "perf_event_open",
+ "quotactl",
+ "setdomainname",
+ "sethostname",
+ "setns",
+ "syslog",
+ "umount",
+ "umount2",
+ "unshare"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_ADMIN"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "clone"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 0,
+ "value": 2080505856,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_MASKED_EQ"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {
+ "caps": [
+ "CAP_SYS_ADMIN"
+ ],
+ "arches": [
+ "s390",
+ "s390x"
+ ]
+ }
+ },
+ {
+ "names": [
+ "clone"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 1,
+ "value": 2080505856,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_MASKED_EQ"
+ }
+ ],
+ "comment": "s390 parameter ordering for clone is different",
+ "includes": {
+ "arches": [
+ "s390",
+ "s390x"
+ ]
+ },
+ "excludes": {
+ "caps": [
+ "CAP_SYS_ADMIN"
+ ]
+ }
+ },
+ {
+ "names": [
+ "reboot"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_BOOT"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "chroot"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_CHROOT"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "delete_module",
+ "init_module",
+ "finit_module",
+ "query_module"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_MODULE"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "acct"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_PACCT"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "kcmp",
+ "process_vm_readv",
+ "process_vm_writev",
+ "ptrace"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_PTRACE"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "iopl",
+ "ioperm"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_RAWIO"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "settimeofday",
+ "stime",
+ "clock_settime"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_TIME"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "vhangup"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_TTY_CONFIG"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "get_mempolicy",
+ "mbind",
+ "set_mempolicy"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_NICE"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "syslog"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYSLOG"
+ ]
+ },
+ "excludes": {}
+ }
+ ]
+} \ No newline at end of file
diff --git a/vendor/github.com/docker/docker/profiles/seccomp/generate.go b/vendor/github.com/docker/docker/profiles/seccomp/generate.go
new file mode 100644
index 000000000..32f22bb37
--- /dev/null
+++ b/vendor/github.com/docker/docker/profiles/seccomp/generate.go
@@ -0,0 +1,32 @@
+// +build ignore
+
+package main
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/docker/docker/profiles/seccomp"
+)
+
+// saves the default seccomp profile as a json file so people can use it as a
+// base for their own custom profiles
+func main() {
+ wd, err := os.Getwd()
+ if err != nil {
+ panic(err)
+ }
+ f := filepath.Join(wd, "default.json")
+
+ // write the default profile to the file
+ b, err := json.MarshalIndent(seccomp.DefaultProfile(), "", "\t")
+ if err != nil {
+ panic(err)
+ }
+
+ if err := ioutil.WriteFile(f, b, 0644); err != nil {
+ panic(err)
+ }
+}
diff --git a/vendor/github.com/docker/docker/vendor.conf b/vendor/github.com/docker/docker/vendor.conf
deleted file mode 100644
index 805f89626..000000000
--- a/vendor/github.com/docker/docker/vendor.conf
+++ /dev/null
@@ -1,161 +0,0 @@
-# the following lines are in sorted order, FYI
-github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
-github.com/Microsoft/hcsshim v0.8.6
-github.com/Microsoft/go-winio v0.4.11
-github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
-github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git
-github.com/golang/gddo 9b12a26f3fbd7397dee4e20939ddca719d840d2a
-github.com/gorilla/mux v1.7.0
-github.com/Microsoft/opengcs v0.3.9
-github.com/kr/pty 5cf931ef8f
-github.com/mattn/go-shellwords v1.0.3
-github.com/sirupsen/logrus v1.0.6
-github.com/tchap/go-patricia v2.2.6
-github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
-golang.org/x/net a680a1efc54dd51c040b3b5ce4939ea3cf2ea0d1
-golang.org/x/sys 41f3e6584952bb034a481797859f6ab34b6803bd
-github.com/docker/go-units 47565b4f722fb6ceae66b95f853feed578a4a51c # v0.3.3
-github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55 # v0.4.0
-golang.org/x/text f21a4dfb5e38f5895301dc265a8def02365cc3d0 # v0.3.0
-gotest.tools v2.1.0
-github.com/google/go-cmp v0.2.0
-
-github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
-github.com/imdario/mergo v0.3.6
-golang.org/x/sync 1d60e4601c6fd243af51cc01ddf169918a5407ca
-
-# buildkit
-github.com/moby/buildkit 34ff9c2366a878ada7938d2f9ede71741b0a220c
-github.com/tonistiigi/fsutil 2862f6bc5ac9b97124e552a5c108230b38a1b0ca
-github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
-github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
-github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716
-github.com/opentracing-contrib/go-stdlib b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc
-github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b
-
-#get libnetwork packages
-
-# When updating, also update LIBNETWORK_COMMIT in hack/dockerfile/install/proxy.installer accordingly
-github.com/docker/libnetwork 1a06131fb8a047d919f7deaf02a4c414d7884b83
-github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
-github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
-github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
-github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
-github.com/hashicorp/memberlist 3d8438da9589e7b608a83ffac1ef8211486bcb7c
-github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372
-github.com/hashicorp/go-sockaddr 6d291a969b86c4b633730bfc6b8b9d64c3aafed9
-github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e
-github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
-github.com/docker/libkv 458977154600b9f23984d9f4b82e79570b5ae12b
-github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
-github.com/vishvananda/netlink b2de5d10e38ecce8607e6b438b6d174f389a004e
-
-# When updating, consider updating TOMLV_COMMIT in hack/dockerfile/install/tomlv.installer accordingly
-github.com/BurntSushi/toml 3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005 # v0.3.1
-github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
-github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
-github.com/coreos/etcd v3.3.9
-github.com/coreos/go-semver v0.2.0
-github.com/ugorji/go v1.1.1
-github.com/hashicorp/consul v0.5.2
-github.com/miekg/dns v1.0.7
-github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb
-go.etcd.io/bbolt v1.3.1-etcd.8
-
-# get graph and distribution packages
-github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580
-github.com/vbatts/tar-split v0.11.0
-github.com/opencontainers/go-digest v1.0.0-rc1
-
-# get go-zfs packages
-github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
-github.com/pborman/uuid v1.0
-
-google.golang.org/grpc v1.12.0
-
-# The version of runc should match the version that is used by the containerd
-# version that is used. If you need to update runc, open a pull request in
-# the containerd project first, and update both after that is merged.
-# This commit does not need to match RUNC_COMMIT as it is used for helper
-# packages but should be newer or equal.
-github.com/opencontainers/runc 12f6a991201fdb8f82579582d5e00e28fba06d0a
-github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db
-github.com/opencontainers/image-spec v1.0.1
-github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
-
-# libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json)
-github.com/coreos/go-systemd v17
-github.com/godbus/dbus v4.0.0
-github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
-github.com/golang/protobuf v1.1.0
-
-# gelf logging driver deps
-github.com/Graylog2/go-gelf 4143646226541087117ff2f83334ea48b3201841
-
-github.com/fluent/fluent-logger-golang v1.3.0
-# fluent-logger-golang deps
-github.com/philhofer/fwd 98c11a7a6ec829d672b03833c3d69a7fae1ca972
-github.com/tinylib/msgp 3b556c64540842d4f82967be066a7f7fffc3adad
-
-# fsnotify
-github.com/fsnotify/fsnotify v1.4.7
-
-# awslogs deps
-github.com/aws/aws-sdk-go v1.12.66
-github.com/go-ini/ini v1.25.4
-github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74
-
-# logentries
-github.com/bsphere/le_go 7a984a84b5492ae539b79b62fb4a10afc63c7bcf
-
-# gcplogs deps
-golang.org/x/oauth2 ec22f46f877b4505e0117eeaab541714644fdd28
-google.golang.org/api de943baf05a022a8f921b544b7827bacaba1aed5
-go.opencensus.io v0.11.0
-cloud.google.com/go v0.23.0
-github.com/googleapis/gax-go v2.0.0
-google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9
-
-# containerd
-github.com/containerd/containerd e6b3f5632f50dbc4e9cb6288d911bf4f5e95b18e # v1.2.4
-github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
-github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
-github.com/containerd/cgroups 5e610833b72089b37d0e615de9a92dfc043757c2
-github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
-github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
-github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
-github.com/containerd/ttrpc 2a805f71863501300ae1976d29f0454ae003e85a
-github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
-
-# cluster
-github.com/docker/swarmkit ebfb0aa1118ebfd35a224d72a5d337ce0addd907
-github.com/gogo/protobuf v1.0.0
-github.com/cloudflare/cfssl 1.3.2
-github.com/fernet/fernet-go 1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2
-github.com/google/certificate-transparency-go v1.0.20
-golang.org/x/crypto 0709b304e793a5edb4a2c0145f281ecdc20838a4
-golang.org/x/time fbb02b2291d28baffd63558aa44b4b56f178d650
-github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
-github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git
-github.com/hashicorp/golang-lru 0fb14efe8c47ae851c0034ed7a448854d3d34cf3
-github.com/coreos/pkg v3
-github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0
-github.com/prometheus/client_golang v0.8.0
-github.com/beorn7/perks 3a771d992973f24aa725d07868b467d1ddfceaf
-github.com/prometheus/client_model 6f3806018612930941127f2a7c6c453ba2c527d2
-github.com/prometheus/common 7600349dcfe1abd18d72d3a1770870d9800a7801
-github.com/prometheus/procfs 7d6f385de8bea29190f15ba9931442a0eaef9af7
-github.com/matttproud/golang_protobuf_extensions v1.0.0
-github.com/pkg/errors 645ef00459ed84a119197bfb8d8205042c6df63d # v0.8.0
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
-
-# cli
-github.com/spf13/cobra v0.0.3
-github.com/spf13/pflag v1.0.1
-github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 # v1.0
-github.com/morikuni/aec 39771216ff4c63d11f5e604076f9c45e8be1067b
-
-# metrics
-github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18
-
-github.com/opencontainers/selinux b6fa367ed7f534f9ba25391cc2d467085dbb445a
diff --git a/vendor/github.com/docker/go-connections/README.md b/vendor/github.com/docker/go-connections/README.md
deleted file mode 100644
index d257e44fd..000000000
--- a/vendor/github.com/docker/go-connections/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
-[![GoDoc](https://godoc.org/github.com/docker/go-connections?status.svg)](https://godoc.org/github.com/docker/go-connections)
-
-# Introduction
-
-go-connections provides common package to work with network connections.
-
-## Usage
-
-See the [docs in godoc](https://godoc.org/github.com/docker/go-connections) for examples and documentation.
-
-## License
-
-go-connections is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text.
diff --git a/vendor/github.com/docker/go-metrics/CONTRIBUTING.md b/vendor/github.com/docker/go-metrics/CONTRIBUTING.md
new file mode 100644
index 000000000..b8a512c36
--- /dev/null
+++ b/vendor/github.com/docker/go-metrics/CONTRIBUTING.md
@@ -0,0 +1,55 @@
+# Contributing
+
+## Sign your work
+
+The sign-off is a simple line at the end of the explanation for the patch. Your
+signature certifies that you wrote the patch or otherwise have the right to pass
+it on as an open-source patch. The rules are pretty simple: if you can certify
+the below (from [developercertificate.org](http://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+Then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith <joe.smith@email.com>
+
+Use your real name (sorry, no pseudonyms or anonymous contributions.)
+
+If you set your `user.name` and `user.email` git configs, you can sign your
+commit automatically with `git commit -s`.
diff --git a/vendor/github.com/docker/go-metrics/LICENSE b/vendor/github.com/docker/go-metrics/LICENSE
new file mode 100644
index 000000000..8f3fee627
--- /dev/null
+++ b/vendor/github.com/docker/go-metrics/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2013-2016 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/go-metrics/LICENSE.docs b/vendor/github.com/docker/go-metrics/LICENSE.docs
new file mode 100644
index 000000000..e26cd4fc8
--- /dev/null
+++ b/vendor/github.com/docker/go-metrics/LICENSE.docs
@@ -0,0 +1,425 @@
+Attribution-ShareAlike 4.0 International
+
+=======================================================================
+
+Creative Commons Corporation ("Creative Commons") is not a law firm and
+does not provide legal services or legal advice. Distribution of
+Creative Commons public licenses does not create a lawyer-client or
+other relationship. Creative Commons makes its licenses and related
+information available on an "as-is" basis. Creative Commons gives no
+warranties regarding its licenses, any material licensed under their
+terms and conditions, or any related information. Creative Commons
+disclaims all liability for damages resulting from their use to the
+fullest extent possible.
+
+Using Creative Commons Public Licenses
+
+Creative Commons public licenses provide a standard set of terms and
+conditions that creators and other rights holders may use to share
+original works of authorship and other material subject to copyright
+and certain other rights specified in the public license below. The
+following considerations are for informational purposes only, are not
+exhaustive, and do not form part of our licenses.
+
+ Considerations for licensors: Our public licenses are
+ intended for use by those authorized to give the public
+ permission to use material in ways otherwise restricted by
+ copyright and certain other rights. Our licenses are
+ irrevocable. Licensors should read and understand the terms
+ and conditions of the license they choose before applying it.
+ Licensors should also secure all rights necessary before
+ applying our licenses so that the public can reuse the
+ material as expected. Licensors should clearly mark any
+ material not subject to the license. This includes other CC-
+ licensed material, or material used under an exception or
+ limitation to copyright. More considerations for licensors:
+ wiki.creativecommons.org/Considerations_for_licensors
+
+ Considerations for the public: By using one of our public
+ licenses, a licensor grants the public permission to use the
+ licensed material under specified terms and conditions. If
+ the licensor's permission is not necessary for any reason--for
+ example, because of any applicable exception or limitation to
+ copyright--then that use is not regulated by the license. Our
+ licenses grant only permissions under copyright and certain
+ other rights that a licensor has authority to grant. Use of
+ the licensed material may still be restricted for other
+ reasons, including because others have copyright or other
+ rights in the material. A licensor may make special requests,
+ such as asking that all changes be marked or described.
+ Although not required by our licenses, you are encouraged to
+ respect those requests where reasonable. More_considerations
+ for the public:
+ wiki.creativecommons.org/Considerations_for_licensees
+
+=======================================================================
+
+Creative Commons Attribution-ShareAlike 4.0 International Public
+License
+
+By exercising the Licensed Rights (defined below), You accept and agree
+to be bound by the terms and conditions of this Creative Commons
+Attribution-ShareAlike 4.0 International Public License ("Public
+License"). To the extent this Public License may be interpreted as a
+contract, You are granted the Licensed Rights in consideration of Your
+acceptance of these terms and conditions, and the Licensor grants You
+such rights in consideration of benefits the Licensor receives from
+making the Licensed Material available under these terms and
+conditions.
+
+
+Section 1 -- Definitions.
+
+ a. Adapted Material means material subject to Copyright and Similar
+ Rights that is derived from or based upon the Licensed Material
+ and in which the Licensed Material is translated, altered,
+ arranged, transformed, or otherwise modified in a manner requiring
+ permission under the Copyright and Similar Rights held by the
+ Licensor. For purposes of this Public License, where the Licensed
+ Material is a musical work, performance, or sound recording,
+ Adapted Material is always produced where the Licensed Material is
+ synched in timed relation with a moving image.
+
+ b. Adapter's License means the license You apply to Your Copyright
+ and Similar Rights in Your contributions to Adapted Material in
+ accordance with the terms and conditions of this Public License.
+
+ c. BY-SA Compatible License means a license listed at
+ creativecommons.org/compatiblelicenses, approved by Creative
+ Commons as essentially the equivalent of this Public License.
+
+ d. Copyright and Similar Rights means copyright and/or similar rights
+ closely related to copyright including, without limitation,
+ performance, broadcast, sound recording, and Sui Generis Database
+ Rights, without regard to how the rights are labeled or
+ categorized. For purposes of this Public License, the rights
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
+ Rights.
+
+ e. Effective Technological Measures means those measures that, in the
+ absence of proper authority, may not be circumvented under laws
+ fulfilling obligations under Article 11 of the WIPO Copyright
+ Treaty adopted on December 20, 1996, and/or similar international
+ agreements.
+
+ f. Exceptions and Limitations means fair use, fair dealing, and/or
+ any other exception or limitation to Copyright and Similar Rights
+ that applies to Your use of the Licensed Material.
+
+ g. License Elements means the license attributes listed in the name
+ of a Creative Commons Public License. The License Elements of this
+ Public License are Attribution and ShareAlike.
+
+ h. Licensed Material means the artistic or literary work, database,
+ or other material to which the Licensor applied this Public
+ License.
+
+ i. Licensed Rights means the rights granted to You subject to the
+ terms and conditions of this Public License, which are limited to
+ all Copyright and Similar Rights that apply to Your use of the
+ Licensed Material and that the Licensor has authority to license.
+
+ j. Licensor means the individual(s) or entity(ies) granting rights
+ under this Public License.
+
+ k. Share means to provide material to the public by any means or
+ process that requires permission under the Licensed Rights, such
+ as reproduction, public display, public performance, distribution,
+ dissemination, communication, or importation, and to make material
+ available to the public including in ways that members of the
+ public may access the material from a place and at a time
+ individually chosen by them.
+
+ l. Sui Generis Database Rights means rights other than copyright
+ resulting from Directive 96/9/EC of the European Parliament and of
+ the Council of 11 March 1996 on the legal protection of databases,
+ as amended and/or succeeded, as well as other essentially
+ equivalent rights anywhere in the world.
+
+ m. You means the individual or entity exercising the Licensed Rights
+ under this Public License. Your has a corresponding meaning.
+
+
+Section 2 -- Scope.
+
+ a. License grant.
+
+ 1. Subject to the terms and conditions of this Public License,
+ the Licensor hereby grants You a worldwide, royalty-free,
+ non-sublicensable, non-exclusive, irrevocable license to
+ exercise the Licensed Rights in the Licensed Material to:
+
+ a. reproduce and Share the Licensed Material, in whole or
+ in part; and
+
+ b. produce, reproduce, and Share Adapted Material.
+
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
+ Exceptions and Limitations apply to Your use, this Public
+ License does not apply, and You do not need to comply with
+ its terms and conditions.
+
+ 3. Term. The term of this Public License is specified in Section
+ 6(a).
+
+ 4. Media and formats; technical modifications allowed. The
+ Licensor authorizes You to exercise the Licensed Rights in
+ all media and formats whether now known or hereafter created,
+ and to make technical modifications necessary to do so. The
+ Licensor waives and/or agrees not to assert any right or
+ authority to forbid You from making technical modifications
+ necessary to exercise the Licensed Rights, including
+ technical modifications necessary to circumvent Effective
+ Technological Measures. For purposes of this Public License,
+ simply making modifications authorized by this Section 2(a)
+ (4) never produces Adapted Material.
+
+ 5. Downstream recipients.
+
+ a. Offer from the Licensor -- Licensed Material. Every
+ recipient of the Licensed Material automatically
+ receives an offer from the Licensor to exercise the
+ Licensed Rights under the terms and conditions of this
+ Public License.
+
+ b. Additional offer from the Licensor -- Adapted Material.
+ Every recipient of Adapted Material from You
+ automatically receives an offer from the Licensor to
+ exercise the Licensed Rights in the Adapted Material
+ under the conditions of the Adapter's License You apply.
+
+ c. No downstream restrictions. You may not offer or impose
+ any additional or different terms or conditions on, or
+ apply any Effective Technological Measures to, the
+ Licensed Material if doing so restricts exercise of the
+ Licensed Rights by any recipient of the Licensed
+ Material.
+
+ 6. No endorsement. Nothing in this Public License constitutes or
+ may be construed as permission to assert or imply that You
+ are, or that Your use of the Licensed Material is, connected
+ with, or sponsored, endorsed, or granted official status by,
+ the Licensor or others designated to receive attribution as
+ provided in Section 3(a)(1)(A)(i).
+
+ b. Other rights.
+
+ 1. Moral rights, such as the right of integrity, are not
+ licensed under this Public License, nor are publicity,
+ privacy, and/or other similar personality rights; however, to
+ the extent possible, the Licensor waives and/or agrees not to
+ assert any such rights held by the Licensor to the limited
+ extent necessary to allow You to exercise the Licensed
+ Rights, but not otherwise.
+
+ 2. Patent and trademark rights are not licensed under this
+ Public License.
+
+ 3. To the extent possible, the Licensor waives any right to
+ collect royalties from You for the exercise of the Licensed
+ Rights, whether directly or through a collecting society
+ under any voluntary or waivable statutory or compulsory
+ licensing scheme. In all other cases the Licensor expressly
+ reserves any right to collect such royalties.
+
+
+Section 3 -- License Conditions.
+
+Your exercise of the Licensed Rights is expressly made subject to the
+following conditions.
+
+ a. Attribution.
+
+ 1. If You Share the Licensed Material (including in modified
+ form), You must:
+
+ a. retain the following if it is supplied by the Licensor
+ with the Licensed Material:
+
+ i. identification of the creator(s) of the Licensed
+ Material and any others designated to receive
+ attribution, in any reasonable manner requested by
+ the Licensor (including by pseudonym if
+ designated);
+
+ ii. a copyright notice;
+
+ iii. a notice that refers to this Public License;
+
+ iv. a notice that refers to the disclaimer of
+ warranties;
+
+ v. a URI or hyperlink to the Licensed Material to the
+ extent reasonably practicable;
+
+ b. indicate if You modified the Licensed Material and
+ retain an indication of any previous modifications; and
+
+ c. indicate the Licensed Material is licensed under this
+ Public License, and include the text of, or the URI or
+ hyperlink to, this Public License.
+
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
+ reasonable manner based on the medium, means, and context in
+ which You Share the Licensed Material. For example, it may be
+ reasonable to satisfy the conditions by providing a URI or
+ hyperlink to a resource that includes the required
+ information.
+
+ 3. If requested by the Licensor, You must remove any of the
+ information required by Section 3(a)(1)(A) to the extent
+ reasonably practicable.
+
+ b. ShareAlike.
+
+ In addition to the conditions in Section 3(a), if You Share
+ Adapted Material You produce, the following conditions also apply.
+
+ 1. The Adapter's License You apply must be a Creative Commons
+ license with the same License Elements, this version or
+ later, or a BY-SA Compatible License.
+
+ 2. You must include the text of, or the URI or hyperlink to, the
+ Adapter's License You apply. You may satisfy this condition
+ in any reasonable manner based on the medium, means, and
+ context in which You Share Adapted Material.
+
+ 3. You may not offer or impose any additional or different terms
+ or conditions on, or apply any Effective Technological
+ Measures to, Adapted Material that restrict exercise of the
+ rights granted under the Adapter's License You apply.
+
+
+Section 4 -- Sui Generis Database Rights.
+
+Where the Licensed Rights include Sui Generis Database Rights that
+apply to Your use of the Licensed Material:
+
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
+ to extract, reuse, reproduce, and Share all or a substantial
+ portion of the contents of the database;
+
+ b. if You include all or a substantial portion of the database
+ contents in a database in which You have Sui Generis Database
+ Rights, then the database in which You have Sui Generis Database
+ Rights (but not its individual contents) is Adapted Material,
+
+ including for purposes of Section 3(b); and
+ c. You must comply with the conditions in Section 3(a) if You Share
+ all or a substantial portion of the contents of the database.
+
+For the avoidance of doubt, this Section 4 supplements and does not
+replace Your obligations under this Public License where the Licensed
+Rights include other Copyright and Similar Rights.
+
+
+Section 5 -- Disclaimer of Warranties and Limitation of Liability.
+
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
+
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
+
+ c. The disclaimer of warranties and limitation of liability provided
+ above shall be interpreted in a manner that, to the extent
+ possible, most closely approximates an absolute disclaimer and
+ waiver of all liability.
+
+
+Section 6 -- Term and Termination.
+
+ a. This Public License applies for the term of the Copyright and
+ Similar Rights licensed here. However, if You fail to comply with
+ this Public License, then Your rights under this Public License
+ terminate automatically.
+
+ b. Where Your right to use the Licensed Material has terminated under
+ Section 6(a), it reinstates:
+
+ 1. automatically as of the date the violation is cured, provided
+ it is cured within 30 days of Your discovery of the
+ violation; or
+
+ 2. upon express reinstatement by the Licensor.
+
+ For the avoidance of doubt, this Section 6(b) does not affect any
+ right the Licensor may have to seek remedies for Your violations
+ of this Public License.
+
+ c. For the avoidance of doubt, the Licensor may also offer the
+ Licensed Material under separate terms or conditions or stop
+ distributing the Licensed Material at any time; however, doing so
+ will not terminate this Public License.
+
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
+ License.
+
+
+Section 7 -- Other Terms and Conditions.
+
+ a. The Licensor shall not be bound by any additional or different
+ terms or conditions communicated by You unless expressly agreed.
+
+ b. Any arrangements, understandings, or agreements regarding the
+ Licensed Material not stated herein are separate from and
+ independent of the terms and conditions of this Public License.
+
+
+Section 8 -- Interpretation.
+
+ a. For the avoidance of doubt, this Public License does not, and
+ shall not be interpreted to, reduce, limit, restrict, or impose
+ conditions on any use of the Licensed Material that could lawfully
+ be made without permission under this Public License.
+
+ b. To the extent possible, if any provision of this Public License is
+ deemed unenforceable, it shall be automatically reformed to the
+ minimum extent necessary to make it enforceable. If the provision
+ cannot be reformed, it shall be severed from this Public License
+ without affecting the enforceability of the remaining terms and
+ conditions.
+
+ c. No term or condition of this Public License will be waived and no
+ failure to comply consented to unless expressly agreed to by the
+ Licensor.
+
+ d. Nothing in this Public License constitutes or may be interpreted
+ as a limitation upon, or waiver of, any privileges and immunities
+ that apply to the Licensor or You, including from the legal
+ processes of any jurisdiction or authority.
+
+
+=======================================================================
+
+Creative Commons is not a party to its public licenses.
+Notwithstanding, Creative Commons may elect to apply one of its public
+licenses to material it publishes and in those instances will be
+considered the "Licensor." Except for the limited purpose of indicating
+that material is shared under a Creative Commons public license or as
+otherwise permitted by the Creative Commons policies published at
+creativecommons.org/policies, Creative Commons does not authorize the
+use of the trademark "Creative Commons" or any other trademark or logo
+of Creative Commons without its prior written consent including,
+without limitation, in connection with any unauthorized modifications
+to any of its public licenses or any other arrangements,
+understandings, or agreements concerning use of licensed material. For
+the avoidance of doubt, this paragraph does not form part of the public
+licenses.
+
+Creative Commons may be contacted at creativecommons.org.
diff --git a/vendor/github.com/docker/go-metrics/NOTICE b/vendor/github.com/docker/go-metrics/NOTICE
new file mode 100644
index 000000000..8915f0277
--- /dev/null
+++ b/vendor/github.com/docker/go-metrics/NOTICE
@@ -0,0 +1,16 @@
+Docker
+Copyright 2012-2015 Docker, Inc.
+
+This product includes software developed at Docker, Inc. (https://www.docker.com).
+
+The following is courtesy of our legal counsel:
+
+
+Use and transfer of Docker may be subject to certain restrictions by the
+United States and other governments.
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see https://www.bis.doc.gov
+
+See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/vendor/github.com/docker/go-metrics/README.md b/vendor/github.com/docker/go-metrics/README.md
new file mode 100644
index 000000000..a9e947cb5
--- /dev/null
+++ b/vendor/github.com/docker/go-metrics/README.md
@@ -0,0 +1,91 @@
+# go-metrics [![GoDoc](https://godoc.org/github.com/docker/go-metrics?status.svg)](https://godoc.org/github.com/docker/go-metrics) ![Badge Badge](http://doyouevenbadge.com/github.com/docker/go-metrics)
+
+This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects.
+
+## Best Practices
+
+This packages is meant to be used for collecting metrics in Docker projects.
+It is not meant to be used as a replacement for the prometheus client but to help enforce consistent naming across metrics collected.
+If you have not already read the prometheus best practices around naming and labels you can read the page [here](https://prometheus.io/docs/practices/naming/).
+
+The following are a few Docker specific rules that will help you name and work with metrics in your project.
+
+1. Namespace and Subsystem
+
+This package provides you with a namespace type that allows you to specify the same namespace and subsystem for your metrics.
+
+```go
+ns := metrics.NewNamespace("engine", "daemon", metrics.Labels{
+ "version": dockerversion.Version,
+ "commit": dockerversion.GitCommit,
+})
+```
+
+In the example above we are creating metrics for the Docker engine's daemon package.
+`engine` would be the namespace in this example where `daemon` is the subsystem or package where we are collecting the metrics.
+
+A namespace also allows you to attach constant labels to the metrics such as the git commit and version that it is collecting.
+
+2. Declaring your Metrics
+
+Try to keep all your metric declarations in one file.
+This makes it easy for others to see what constant labels are defined on the namespace and what labels are defined on the metrics when they are created.
+
+3. Use labels instead of multiple metrics
+
+Labels allow you to define one metric such as the time it takes to perform a certain action on an object.
+If we wanted to collect timings on various container actions such as create, start, and delete then we can define one metric called `container_actions` and use labels to specify the type of action.
+
+
+```go
+containerActions = ns.NewLabeledTimer("container_actions", "The number of milliseconds it takes to process each container action", "action")
+```
+
+The last parameter is the label name or key.
+When adding a data point to the metric you will use the `WithValues` function to specify the `action` that you are collecting for.
+
+```go
+containerActions.WithValues("create").UpdateSince(start)
+```
+
+4. Always use a unit
+
+The metric name should describe what you are measuring but you also need to provide the unit that it is being measured with.
+For a timer, the standard unit is seconds and a counter's standard unit is a total.
+For gauges you must provide the unit.
+This package provides a standard set of units for use within the Docker projects.
+
+```go
+Nanoseconds Unit = "nanoseconds"
+Seconds Unit = "seconds"
+Bytes Unit = "bytes"
+Total Unit = "total"
+```
+
+If you need to use a unit but it is not defined in the package please open a PR to add it but first try to see if one of the already created units will work for your metric, i.e. seconds or nanoseconds vs adding milliseconds.
+
+## Docs
+
+Package documentation can be found [here](https://godoc.org/github.com/docker/go-metrics).
+
+## HTTP Metrics
+
+To instrument a http handler, you can wrap the code like this:
+
+```go
+namespace := metrics.NewNamespace("docker_distribution", "http", metrics.Labels{"handler": "your_http_handler_name"})
+httpMetrics := namespace.NewDefaultHttpMetrics()
+metrics.Register(namespace)
+instrumentedHandler = metrics.InstrumentHandler(httpMetrics, unInstrumentedHandler)
+```
+Note: The `handler` label must be provided when a new namespace is created.
+
+## Additional Metrics
+
+Additional metrics are also defined here that are not available in the prometheus client.
+If you need a custom metrics and it is generic enough to be used by multiple projects, define it here.
+
+
+## Copyright and license
+
+Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.
diff --git a/vendor/github.com/docker/go-metrics/counter.go b/vendor/github.com/docker/go-metrics/counter.go
new file mode 100644
index 000000000..fe36316a4
--- /dev/null
+++ b/vendor/github.com/docker/go-metrics/counter.go
@@ -0,0 +1,52 @@
+package metrics
+
+import "github.com/prometheus/client_golang/prometheus"
+
+// Counter is a metrics that can only increment its current count
+type Counter interface {
+ // Inc adds Sum(vs) to the counter. Sum(vs) must be positive.
+ //
+ // If len(vs) == 0, increments the counter by 1.
+ Inc(vs ...float64)
+}
+
+// LabeledCounter is counter that must have labels populated before use.
+type LabeledCounter interface {
+ WithValues(vs ...string) Counter
+}
+
+type labeledCounter struct {
+ pc *prometheus.CounterVec
+}
+
+func (lc *labeledCounter) WithValues(vs ...string) Counter {
+ return &counter{pc: lc.pc.WithLabelValues(vs...)}
+}
+
+func (lc *labeledCounter) Describe(ch chan<- *prometheus.Desc) {
+ lc.pc.Describe(ch)
+}
+
+func (lc *labeledCounter) Collect(ch chan<- prometheus.Metric) {
+ lc.pc.Collect(ch)
+}
+
+type counter struct {
+ pc prometheus.Counter
+}
+
+func (c *counter) Inc(vs ...float64) {
+ if len(vs) == 0 {
+ c.pc.Inc()
+ }
+
+ c.pc.Add(sumFloat64(vs...))
+}
+
+func (c *counter) Describe(ch chan<- *prometheus.Desc) {
+ c.pc.Describe(ch)
+}
+
+func (c *counter) Collect(ch chan<- prometheus.Metric) {
+ c.pc.Collect(ch)
+}
diff --git a/vendor/github.com/docker/go-metrics/docs.go b/vendor/github.com/docker/go-metrics/docs.go
new file mode 100644
index 000000000..8fbdfc697
--- /dev/null
+++ b/vendor/github.com/docker/go-metrics/docs.go
@@ -0,0 +1,3 @@
+// This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects.
+
+package metrics
diff --git a/vendor/github.com/docker/go-metrics/gauge.go b/vendor/github.com/docker/go-metrics/gauge.go
new file mode 100644
index 000000000..74296e877
--- /dev/null
+++ b/vendor/github.com/docker/go-metrics/gauge.go
@@ -0,0 +1,72 @@
+package metrics
+
+import "github.com/prometheus/client_golang/prometheus"
+
+// Gauge is a metric that allows incrementing and decrementing a value
+type Gauge interface {
+ Inc(...float64)
+ Dec(...float64)
+
+ // Add adds the provided value to the gauge's current value
+ Add(float64)
+
+ // Set replaces the gauge's current value with the provided value
+ Set(float64)
+}
+
+// LabeledGauge describes a gauge the must have values populated before use.
+type LabeledGauge interface {
+ WithValues(labels ...string) Gauge
+}
+
+type labeledGauge struct {
+ pg *prometheus.GaugeVec
+}
+
+func (lg *labeledGauge) WithValues(labels ...string) Gauge {
+ return &gauge{pg: lg.pg.WithLabelValues(labels...)}
+}
+
+func (lg *labeledGauge) Describe(c chan<- *prometheus.Desc) {
+ lg.pg.Describe(c)
+}
+
+func (lg *labeledGauge) Collect(c chan<- prometheus.Metric) {
+ lg.pg.Collect(c)
+}
+
+type gauge struct {
+ pg prometheus.Gauge
+}
+
+func (g *gauge) Inc(vs ...float64) {
+ if len(vs) == 0 {
+ g.pg.Inc()
+ }
+
+ g.Add(sumFloat64(vs...))
+}
+
+func (g *gauge) Dec(vs ...float64) {
+ if len(vs) == 0 {
+ g.pg.Dec()
+ }
+
+ g.Add(-sumFloat64(vs...))
+}
+
+func (g *gauge) Add(v float64) {
+ g.pg.Add(v)
+}
+
+func (g *gauge) Set(v float64) {
+ g.pg.Set(v)
+}
+
+func (g *gauge) Describe(c chan<- *prometheus.Desc) {
+ g.pg.Describe(c)
+}
+
+func (g *gauge) Collect(c chan<- prometheus.Metric) {
+ g.pg.Collect(c)
+}
diff --git a/vendor/github.com/docker/go-metrics/handler.go b/vendor/github.com/docker/go-metrics/handler.go
new file mode 100644
index 000000000..05601e9ec
--- /dev/null
+++ b/vendor/github.com/docker/go-metrics/handler.go
@@ -0,0 +1,74 @@
+package metrics
+
+import (
+ "net/http"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+)
+
+// HTTPHandlerOpts describes a set of configurable options of http metrics
+type HTTPHandlerOpts struct {
+ DurationBuckets []float64
+ RequestSizeBuckets []float64
+ ResponseSizeBuckets []float64
+}
+
+const (
+ InstrumentHandlerResponseSize = iota
+ InstrumentHandlerRequestSize
+ InstrumentHandlerDuration
+ InstrumentHandlerCounter
+ InstrumentHandlerInFlight
+)
+
+type HTTPMetric struct {
+ prometheus.Collector
+ handlerType int
+}
+
+var (
+ defaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 60}
+ defaultRequestSizeBuckets = prometheus.ExponentialBuckets(1024, 2, 22) //1K to 4G
+ defaultResponseSizeBuckets = defaultRequestSizeBuckets
+)
+
+// Handler returns the global http.Handler that provides the prometheus
+// metrics format on GET requests. This handler is no longer instrumented.
+func Handler() http.Handler {
+ return promhttp.Handler()
+}
+
+func InstrumentHandler(metrics []*HTTPMetric, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFunc(metrics, handler.ServeHTTP)
+}
+
+func InstrumentHandlerFunc(metrics []*HTTPMetric, handlerFunc http.HandlerFunc) http.HandlerFunc {
+ var handler http.Handler
+ handler = http.HandlerFunc(handlerFunc)
+ for _, metric := range metrics {
+ switch metric.handlerType {
+ case InstrumentHandlerResponseSize:
+ if collector, ok := metric.Collector.(prometheus.ObserverVec); ok {
+ handler = promhttp.InstrumentHandlerResponseSize(collector, handler)
+ }
+ case InstrumentHandlerRequestSize:
+ if collector, ok := metric.Collector.(prometheus.ObserverVec); ok {
+ handler = promhttp.InstrumentHandlerRequestSize(collector, handler)
+ }
+ case InstrumentHandlerDuration:
+ if collector, ok := metric.Collector.(prometheus.ObserverVec); ok {
+ handler = promhttp.InstrumentHandlerDuration(collector, handler)
+ }
+ case InstrumentHandlerCounter:
+ if collector, ok := metric.Collector.(*prometheus.CounterVec); ok {
+ handler = promhttp.InstrumentHandlerCounter(collector, handler)
+ }
+ case InstrumentHandlerInFlight:
+ if collector, ok := metric.Collector.(prometheus.Gauge); ok {
+ handler = promhttp.InstrumentHandlerInFlight(collector, handler)
+ }
+ }
+ }
+ return handler.ServeHTTP
+}
diff --git a/vendor/github.com/docker/go-metrics/helpers.go b/vendor/github.com/docker/go-metrics/helpers.go
new file mode 100644
index 000000000..68b7f51b3
--- /dev/null
+++ b/vendor/github.com/docker/go-metrics/helpers.go
@@ -0,0 +1,10 @@
+package metrics
+
+func sumFloat64(vs ...float64) float64 {
+ var sum float64
+ for _, v := range vs {
+ sum += v
+ }
+
+ return sum
+}
diff --git a/vendor/github.com/docker/go-metrics/namespace.go b/vendor/github.com/docker/go-metrics/namespace.go
new file mode 100644
index 000000000..798315451
--- /dev/null
+++ b/vendor/github.com/docker/go-metrics/namespace.go
@@ -0,0 +1,315 @@
+package metrics
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type Labels map[string]string
+
+// NewNamespace returns a namespaces that is responsible for managing a collection of
+// metrics for a particual namespace and subsystem
+//
+// labels allows const labels to be added to all metrics created in this namespace
+// and are commonly used for data like application version and git commit
+func NewNamespace(name, subsystem string, labels Labels) *Namespace {
+ if labels == nil {
+ labels = make(map[string]string)
+ }
+ return &Namespace{
+ name: name,
+ subsystem: subsystem,
+ labels: labels,
+ }
+}
+
+// Namespace describes a set of metrics that share a namespace and subsystem.
+type Namespace struct {
+ name string
+ subsystem string
+ labels Labels
+ mu sync.Mutex
+ metrics []prometheus.Collector
+}
+
+// WithConstLabels returns a namespace with the provided set of labels merged
+// with the existing constant labels on the namespace.
+//
+// Only metrics created with the returned namespace will get the new constant
+// labels. The returned namespace must be registered separately.
+func (n *Namespace) WithConstLabels(labels Labels) *Namespace {
+ n.mu.Lock()
+ ns := &Namespace{
+ name: n.name,
+ subsystem: n.subsystem,
+ labels: mergeLabels(n.labels, labels),
+ }
+ n.mu.Unlock()
+ return ns
+}
+
+func (n *Namespace) NewCounter(name, help string) Counter {
+ c := &counter{pc: prometheus.NewCounter(n.newCounterOpts(name, help))}
+ n.Add(c)
+ return c
+}
+
+func (n *Namespace) NewLabeledCounter(name, help string, labels ...string) LabeledCounter {
+ c := &labeledCounter{pc: prometheus.NewCounterVec(n.newCounterOpts(name, help), labels)}
+ n.Add(c)
+ return c
+}
+
+func (n *Namespace) newCounterOpts(name, help string) prometheus.CounterOpts {
+ return prometheus.CounterOpts{
+ Namespace: n.name,
+ Subsystem: n.subsystem,
+ Name: makeName(name, Total),
+ Help: help,
+ ConstLabels: prometheus.Labels(n.labels),
+ }
+}
+
+func (n *Namespace) NewTimer(name, help string) Timer {
+ t := &timer{
+ m: prometheus.NewHistogram(n.newTimerOpts(name, help)),
+ }
+ n.Add(t)
+ return t
+}
+
+func (n *Namespace) NewLabeledTimer(name, help string, labels ...string) LabeledTimer {
+ t := &labeledTimer{
+ m: prometheus.NewHistogramVec(n.newTimerOpts(name, help), labels),
+ }
+ n.Add(t)
+ return t
+}
+
+func (n *Namespace) newTimerOpts(name, help string) prometheus.HistogramOpts {
+ return prometheus.HistogramOpts{
+ Namespace: n.name,
+ Subsystem: n.subsystem,
+ Name: makeName(name, Seconds),
+ Help: help,
+ ConstLabels: prometheus.Labels(n.labels),
+ }
+}
+
+func (n *Namespace) NewGauge(name, help string, unit Unit) Gauge {
+ g := &gauge{
+ pg: prometheus.NewGauge(n.newGaugeOpts(name, help, unit)),
+ }
+ n.Add(g)
+ return g
+}
+
+func (n *Namespace) NewLabeledGauge(name, help string, unit Unit, labels ...string) LabeledGauge {
+ g := &labeledGauge{
+ pg: prometheus.NewGaugeVec(n.newGaugeOpts(name, help, unit), labels),
+ }
+ n.Add(g)
+ return g
+}
+
+func (n *Namespace) newGaugeOpts(name, help string, unit Unit) prometheus.GaugeOpts {
+ return prometheus.GaugeOpts{
+ Namespace: n.name,
+ Subsystem: n.subsystem,
+ Name: makeName(name, unit),
+ Help: help,
+ ConstLabels: prometheus.Labels(n.labels),
+ }
+}
+
+func (n *Namespace) Describe(ch chan<- *prometheus.Desc) {
+ n.mu.Lock()
+ defer n.mu.Unlock()
+
+ for _, metric := range n.metrics {
+ metric.Describe(ch)
+ }
+}
+
+func (n *Namespace) Collect(ch chan<- prometheus.Metric) {
+ n.mu.Lock()
+ defer n.mu.Unlock()
+
+ for _, metric := range n.metrics {
+ metric.Collect(ch)
+ }
+}
+
+func (n *Namespace) Add(collector prometheus.Collector) {
+ n.mu.Lock()
+ n.metrics = append(n.metrics, collector)
+ n.mu.Unlock()
+}
+
+func (n *Namespace) NewDesc(name, help string, unit Unit, labels ...string) *prometheus.Desc {
+ name = makeName(name, unit)
+ namespace := n.name
+ if n.subsystem != "" {
+ namespace = fmt.Sprintf("%s_%s", namespace, n.subsystem)
+ }
+ name = fmt.Sprintf("%s_%s", namespace, name)
+ return prometheus.NewDesc(name, help, labels, prometheus.Labels(n.labels))
+}
+
+// mergeLabels merges two or more labels objects into a single map, favoring
+// the later labels.
+func mergeLabels(lbs ...Labels) Labels {
+ merged := make(Labels)
+
+ for _, target := range lbs {
+ for k, v := range target {
+ merged[k] = v
+ }
+ }
+
+ return merged
+}
+
+func makeName(name string, unit Unit) string {
+ if unit == "" {
+ return name
+ }
+
+ return fmt.Sprintf("%s_%s", name, unit)
+}
+
+func (n *Namespace) NewDefaultHttpMetrics(handlerName string) []*HTTPMetric {
+ return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{
+ DurationBuckets: defaultDurationBuckets,
+ RequestSizeBuckets: defaultResponseSizeBuckets,
+ ResponseSizeBuckets: defaultResponseSizeBuckets,
+ })
+}
+
+func (n *Namespace) NewHttpMetrics(handlerName string, durationBuckets, requestSizeBuckets, responseSizeBuckets []float64) []*HTTPMetric {
+ return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{
+ DurationBuckets: durationBuckets,
+ RequestSizeBuckets: requestSizeBuckets,
+ ResponseSizeBuckets: responseSizeBuckets,
+ })
+}
+
+func (n *Namespace) NewHttpMetricsWithOpts(handlerName string, opts HTTPHandlerOpts) []*HTTPMetric {
+ var httpMetrics []*HTTPMetric
+ inFlightMetric := n.NewInFlightGaugeMetric(handlerName)
+ requestTotalMetric := n.NewRequestTotalMetric(handlerName)
+ requestDurationMetric := n.NewRequestDurationMetric(handlerName, opts.DurationBuckets)
+ requestSizeMetric := n.NewRequestSizeMetric(handlerName, opts.RequestSizeBuckets)
+ responseSizeMetric := n.NewResponseSizeMetric(handlerName, opts.ResponseSizeBuckets)
+ httpMetrics = append(httpMetrics, inFlightMetric, requestDurationMetric, requestTotalMetric, requestSizeMetric, responseSizeMetric)
+ return httpMetrics
+}
+
+func (n *Namespace) NewInFlightGaugeMetric(handlerName string) *HTTPMetric {
+ labels := prometheus.Labels(n.labels)
+ labels["handler"] = handlerName
+ metric := prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: n.name,
+ Subsystem: n.subsystem,
+ Name: "in_flight_requests",
+ Help: "The in-flight HTTP requests",
+ ConstLabels: prometheus.Labels(labels),
+ })
+ httpMetric := &HTTPMetric{
+ Collector: metric,
+ handlerType: InstrumentHandlerInFlight,
+ }
+ n.Add(httpMetric)
+ return httpMetric
+}
+
+func (n *Namespace) NewRequestTotalMetric(handlerName string) *HTTPMetric {
+ labels := prometheus.Labels(n.labels)
+ labels["handler"] = handlerName
+ metric := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: n.name,
+ Subsystem: n.subsystem,
+ Name: "requests_total",
+ Help: "Total number of HTTP requests made.",
+ ConstLabels: prometheus.Labels(labels),
+ },
+ []string{"code", "method"},
+ )
+ httpMetric := &HTTPMetric{
+ Collector: metric,
+ handlerType: InstrumentHandlerCounter,
+ }
+ n.Add(httpMetric)
+ return httpMetric
+}
+func (n *Namespace) NewRequestDurationMetric(handlerName string, buckets []float64) *HTTPMetric {
+ if len(buckets) == 0 {
+ panic("DurationBuckets must be provided")
+ }
+ labels := prometheus.Labels(n.labels)
+ labels["handler"] = handlerName
+ opts := prometheus.HistogramOpts{
+ Namespace: n.name,
+ Subsystem: n.subsystem,
+ Name: "request_duration_seconds",
+ Help: "The HTTP request latencies in seconds.",
+ Buckets: buckets,
+ ConstLabels: prometheus.Labels(labels),
+ }
+ metric := prometheus.NewHistogramVec(opts, []string{"method"})
+ httpMetric := &HTTPMetric{
+ Collector: metric,
+ handlerType: InstrumentHandlerDuration,
+ }
+ n.Add(httpMetric)
+ return httpMetric
+}
+
+func (n *Namespace) NewRequestSizeMetric(handlerName string, buckets []float64) *HTTPMetric {
+ if len(buckets) == 0 {
+ panic("RequestSizeBuckets must be provided")
+ }
+ labels := prometheus.Labels(n.labels)
+ labels["handler"] = handlerName
+ opts := prometheus.HistogramOpts{
+ Namespace: n.name,
+ Subsystem: n.subsystem,
+ Name: "request_size_bytes",
+ Help: "The HTTP request sizes in bytes.",
+ Buckets: buckets,
+ ConstLabels: prometheus.Labels(labels),
+ }
+ metric := prometheus.NewHistogramVec(opts, []string{})
+ httpMetric := &HTTPMetric{
+ Collector: metric,
+ handlerType: InstrumentHandlerRequestSize,
+ }
+ n.Add(httpMetric)
+ return httpMetric
+}
+
+func (n *Namespace) NewResponseSizeMetric(handlerName string, buckets []float64) *HTTPMetric {
+ if len(buckets) == 0 {
+ panic("ResponseSizeBuckets must be provided")
+ }
+ labels := prometheus.Labels(n.labels)
+ labels["handler"] = handlerName
+ opts := prometheus.HistogramOpts{
+ Namespace: n.name,
+ Subsystem: n.subsystem,
+ Name: "response_size_bytes",
+ Help: "The HTTP response sizes in bytes.",
+ Buckets: buckets,
+ ConstLabels: prometheus.Labels(labels),
+ }
+ metrics := prometheus.NewHistogramVec(opts, []string{})
+ httpMetric := &HTTPMetric{
+ Collector: metrics,
+ handlerType: InstrumentHandlerResponseSize,
+ }
+ n.Add(httpMetric)
+ return httpMetric
+}
diff --git a/vendor/github.com/docker/go-metrics/register.go b/vendor/github.com/docker/go-metrics/register.go
new file mode 100644
index 000000000..708358df0
--- /dev/null
+++ b/vendor/github.com/docker/go-metrics/register.go
@@ -0,0 +1,15 @@
+package metrics
+
+import "github.com/prometheus/client_golang/prometheus"
+
+// Register adds all the metrics in the provided namespace to the global
+// metrics registry
+func Register(n *Namespace) {
+ prometheus.MustRegister(n)
+}
+
+// Deregister removes all the metrics in the provided namespace from the
+// global metrics registry
+func Deregister(n *Namespace) {
+ prometheus.Unregister(n)
+}
diff --git a/vendor/github.com/docker/go-metrics/timer.go b/vendor/github.com/docker/go-metrics/timer.go
new file mode 100644
index 000000000..824c98739
--- /dev/null
+++ b/vendor/github.com/docker/go-metrics/timer.go
@@ -0,0 +1,85 @@
+package metrics
+
+import (
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// StartTimer begins a timer observation at the callsite. When the target
+// operation is completed, the caller should call the return done func().
+func StartTimer(timer Timer) (done func()) {
+ start := time.Now()
+ return func() {
+ timer.Update(time.Since(start))
+ }
+}
+
+// Timer is a metric that allows collecting the duration of an action in seconds
+type Timer interface {
+ // Update records an observation, duration, and converts to the target
+ // units.
+ Update(duration time.Duration)
+
+ // UpdateSince will add the duration from the provided starting time to the
+ // timer's summary with the precisions that was used in creation of the timer
+ UpdateSince(time.Time)
+}
+
+// LabeledTimer is a timer that must have label values populated before use.
+type LabeledTimer interface {
+ WithValues(labels ...string) *labeledTimerObserver
+}
+
+type labeledTimer struct {
+ m *prometheus.HistogramVec
+}
+
+type labeledTimerObserver struct {
+ m prometheus.Observer
+}
+
+func (lbo *labeledTimerObserver) Update(duration time.Duration) {
+ lbo.m.Observe(duration.Seconds())
+}
+
+func (lbo *labeledTimerObserver) UpdateSince(since time.Time) {
+ lbo.m.Observe(time.Since(since).Seconds())
+}
+
+func (lt *labeledTimer) WithValues(labels ...string) *labeledTimerObserver {
+ return &labeledTimerObserver{m: lt.m.WithLabelValues(labels...)}
+}
+
+func (lt *labeledTimer) Describe(c chan<- *prometheus.Desc) {
+ lt.m.Describe(c)
+}
+
+func (lt *labeledTimer) Collect(c chan<- prometheus.Metric) {
+ lt.m.Collect(c)
+}
+
+type timer struct {
+ m prometheus.Observer
+}
+
+func (t *timer) Update(duration time.Duration) {
+ t.m.Observe(duration.Seconds())
+}
+
+func (t *timer) UpdateSince(since time.Time) {
+ t.m.Observe(time.Since(since).Seconds())
+}
+
+func (t *timer) Describe(c chan<- *prometheus.Desc) {
+ c <- t.m.(prometheus.Metric).Desc()
+}
+
+func (t *timer) Collect(c chan<- prometheus.Metric) {
+ // Are there any observers that don't implement Collector? It is really
+ // unclear what the point of the upstream change was, but we'll let this
+ // panic if we get an observer that doesn't implement collector. In this
+ // case, we should almost always see metricVec objects, so this should
+ // never panic.
+ t.m.(prometheus.Collector).Collect(c)
+}
diff --git a/vendor/github.com/docker/go-metrics/unit.go b/vendor/github.com/docker/go-metrics/unit.go
new file mode 100644
index 000000000..c96622f90
--- /dev/null
+++ b/vendor/github.com/docker/go-metrics/unit.go
@@ -0,0 +1,12 @@
+package metrics
+
+// Unit represents the type or precision of a metric that is appended to
+// the metrics fully qualified name
+type Unit string
+
+const (
+ Nanoseconds Unit = "nanoseconds"
+ Seconds Unit = "seconds"
+ Bytes Unit = "bytes"
+ Total Unit = "total"
+)
diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md
new file mode 100644
index 000000000..9ea86d784
--- /dev/null
+++ b/vendor/github.com/docker/go-units/CONTRIBUTING.md
@@ -0,0 +1,67 @@
+# Contributing to go-units
+
+Want to hack on go-units? Awesome! Here are instructions to get you started.
+
+go-units is a part of the [Docker](https://www.docker.com) project, and follows
+the same rules and principles. If you're already familiar with the way
+Docker does things, you'll feel right at home.
+
+Otherwise, go read Docker's
+[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
+[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
+[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
+[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
+
+### Sign your work
+
+The sign-off is a simple line at the end of the explanation for the patch. Your
+signature certifies that you wrote the patch or otherwise have the right to pass
+it on as an open-source patch. The rules are pretty simple: if you can certify
+the below (from [developercertificate.org](http://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+Then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith <joe.smith@email.com>
+
+Use your real name (sorry, no pseudonyms or anonymous contributions.)
+
+If you set your `user.name` and `user.email` git configs, you can sign your
+commit automatically with `git commit -s`.
diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS
new file mode 100644
index 000000000..4aac7c741
--- /dev/null
+++ b/vendor/github.com/docker/go-units/MAINTAINERS
@@ -0,0 +1,46 @@
+# go-units maintainers file
+#
+# This file describes who runs the docker/go-units project and how.
+# This is a living document - if you see something out of date or missing, speak up!
+#
+# It is structured to be consumable by both humans and programs.
+# To extract its contents programmatically, use any TOML-compliant parser.
+#
+# This file is compiled into the MAINTAINERS file in docker/opensource.
+#
+[Org]
+ [Org."Core maintainers"]
+ people = [
+ "akihirosuda",
+ "dnephin",
+ "thajeztah",
+ "vdemeester",
+ ]
+
+[people]
+
+# A reference list of all people associated with the project.
+# All other sections should refer to people by their canonical key
+# in the people section.
+
+ # ADD YOURSELF HERE IN ALPHABETICAL ORDER
+
+ [people.akihirosuda]
+ Name = "Akihiro Suda"
+ Email = "akihiro.suda.cz@hco.ntt.co.jp"
+ GitHub = "AkihiroSuda"
+
+ [people.dnephin]
+ Name = "Daniel Nephin"
+ Email = "dnephin@gmail.com"
+ GitHub = "dnephin"
+
+ [people.thajeztah]
+ Name = "Sebastiaan van Stijn"
+ Email = "github@gone.nl"
+ GitHub = "thaJeztah"
+
+ [people.vdemeester]
+ Name = "Vincent Demeester"
+ Email = "vincent@sbr.pm"
+ GitHub = "vdemeester" \ No newline at end of file
diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml
new file mode 100644
index 000000000..af9d60552
--- /dev/null
+++ b/vendor/github.com/docker/go-units/circle.yml
@@ -0,0 +1,11 @@
+dependencies:
+ post:
+ # install golint
+ - go get golang.org/x/lint/golint
+
+test:
+ pre:
+ # run analysis before tests
+ - go vet ./...
+ - test -z "$(golint ./... | tee /dev/stderr)"
+ - test -z "$(gofmt -s -l . | tee /dev/stderr)"
diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go
index ba02af26d..48dd8744d 100644
--- a/vendor/github.com/docker/go-units/duration.go
+++ b/vendor/github.com/docker/go-units/duration.go
@@ -18,7 +18,7 @@ func HumanDuration(d time.Duration) string {
return fmt.Sprintf("%d seconds", seconds)
} else if minutes := int(d.Minutes()); minutes == 1 {
return "About a minute"
- } else if minutes < 46 {
+ } else if minutes < 60 {
return fmt.Sprintf("%d minutes", minutes)
} else if hours := int(d.Hours() + 0.5); hours == 1 {
return "About an hour"
diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go
index 44616c271..85f6ab071 100644
--- a/vendor/github.com/docker/go-units/size.go
+++ b/vendor/github.com/docker/go-units/size.go
@@ -31,7 +31,7 @@ type unitMap map[string]int64
var (
decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
- sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`)
+ sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`)
)
var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go
index 5ac7fd825..fca0400cc 100644
--- a/vendor/github.com/docker/go-units/ulimit.go
+++ b/vendor/github.com/docker/go-units/ulimit.go
@@ -96,8 +96,13 @@ func ParseUlimit(val string) (*Ulimit, error) {
return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1])
}
- if soft > *hard {
- return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard)
+ if *hard != -1 {
+ if soft == -1 {
+ return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard)
+ }
+ if soft > *hard {
+ return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard)
+ }
}
return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil
diff --git a/vendor/github.com/docker/libnetwork/README.md b/vendor/github.com/docker/libnetwork/README.md
deleted file mode 100644
index a9020381a..000000000
--- a/vendor/github.com/docker/libnetwork/README.md
+++ /dev/null
@@ -1,100 +0,0 @@
-# libnetwork - networking for containers
-
-[![Circle CI](https://circleci.com/gh/docker/libnetwork/tree/master.svg?style=svg)](https://circleci.com/gh/docker/libnetwork/tree/master) [![Coverage Status](https://coveralls.io/repos/docker/libnetwork/badge.svg)](https://coveralls.io/r/docker/libnetwork) [![GoDoc](https://godoc.org/github.com/docker/libnetwork?status.svg)](https://godoc.org/github.com/docker/libnetwork) [![Go Report Card](https://goreportcard.com/badge/github.com/docker/libnetwork)](https://goreportcard.com/report/github.com/docker/libnetwork)
-
-Libnetwork provides a native Go implementation for connecting containers
-
-The goal of libnetwork is to deliver a robust Container Network Model that provides a consistent programming interface and the required network abstractions for applications.
-
-#### Design
-Please refer to the [design](docs/design.md) for more information.
-
-#### Using libnetwork
-
-There are many networking solutions available to suit a broad range of use-cases. libnetwork uses a driver / plugin model to support all of these solutions while abstracting the complexity of the driver implementations by exposing a simple and consistent Network Model to users.
-
-
-```go
-import (
- "fmt"
- "log"
-
- "github.com/docker/docker/pkg/reexec"
- "github.com/docker/libnetwork"
- "github.com/docker/libnetwork/config"
- "github.com/docker/libnetwork/netlabel"
- "github.com/docker/libnetwork/options"
-)
-
-func main() {
- if reexec.Init() {
- return
- }
-
- // Select and configure the network driver
- networkType := "bridge"
-
- // Create a new controller instance
- driverOptions := options.Generic{}
- genericOption := make(map[string]interface{})
- genericOption[netlabel.GenericData] = driverOptions
- controller, err := libnetwork.New(config.OptionDriverConfig(networkType, genericOption))
- if err != nil {
- log.Fatalf("libnetwork.New: %s", err)
- }
-
- // Create a network for containers to join.
- // NewNetwork accepts Variadic optional arguments that libnetwork and Drivers can use.
- network, err := controller.NewNetwork(networkType, "network1", "")
- if err != nil {
- log.Fatalf("controller.NewNetwork: %s", err)
- }
-
- // For each new container: allocate IP and interfaces. The returned network
- // settings will be used for container infos (inspect and such), as well as
- // iptables rules for port publishing. This info is contained or accessible
- // from the returned endpoint.
- ep, err := network.CreateEndpoint("Endpoint1")
- if err != nil {
- log.Fatalf("network.CreateEndpoint: %s", err)
- }
-
- // Create the sandbox for the container.
- // NewSandbox accepts Variadic optional arguments which libnetwork can use.
- sbx, err := controller.NewSandbox("container1",
- libnetwork.OptionHostname("test"),
- libnetwork.OptionDomainname("docker.io"))
- if err != nil {
- log.Fatalf("controller.NewSandbox: %s", err)
- }
-
- // A sandbox can join the endpoint via the join api.
- err = ep.Join(sbx)
- if err != nil {
- log.Fatalf("ep.Join: %s", err)
- }
-
- // libnetwork client can check the endpoint's operational data via the Info() API
- epInfo, err := ep.DriverInfo()
- if err != nil {
- log.Fatalf("ep.DriverInfo: %s", err)
- }
-
- macAddress, ok := epInfo[netlabel.MacAddress]
- if !ok {
- log.Fatalf("failed to get mac address from endpoint info")
- }
-
- fmt.Printf("Joined endpoint %s (%s) to sandbox %s (%s)\n", ep.Name(), macAddress, sbx.ContainerID(), sbx.Key())
-}
-```
-
-## Future
-Please refer to [roadmap](ROADMAP.md) for more information.
-
-## Contributing
-
-Want to hack on libnetwork? [Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) apply.
-
-## Copyright and license
-Code and documentation copyright 2015 Docker, inc. Code released under the Apache 2.0 license. Docs released under Creative commons.
diff --git a/vendor/github.com/docker/libnetwork/ipamutils/utils.go b/vendor/github.com/docker/libnetwork/ipamutils/utils.go
deleted file mode 100644
index 3fd37cd88..000000000
--- a/vendor/github.com/docker/libnetwork/ipamutils/utils.go
+++ /dev/null
@@ -1,135 +0,0 @@
-// Package ipamutils provides utility functions for ipam management
-package ipamutils
-
-import (
- "fmt"
- "net"
- "sync"
-)
-
-var (
- // PredefinedLocalScopeDefaultNetworks contains a list of 31 IPv4 private networks with host size 16 and 12
- // (172.17-31.x.x/16, 192.168.x.x/20) which do not overlap with the networks in `PredefinedGlobalScopeDefaultNetworks`
- PredefinedLocalScopeDefaultNetworks []*net.IPNet
- // PredefinedGlobalScopeDefaultNetworks contains a list of 64K IPv4 private networks with host size 8
- // (10.x.x.x/24) which do not overlap with the networks in `PredefinedLocalScopeDefaultNetworks`
- PredefinedGlobalScopeDefaultNetworks []*net.IPNet
- mutex sync.Mutex
- localScopeDefaultNetworks = []*NetworkToSplit{{"172.17.0.0/16", 16}, {"172.18.0.0/16", 16}, {"172.19.0.0/16", 16},
- {"172.20.0.0/14", 16}, {"172.24.0.0/14", 16}, {"172.28.0.0/14", 16},
- {"192.168.0.0/16", 20}}
- globalScopeDefaultNetworks = []*NetworkToSplit{{"10.0.0.0/8", 24}}
-)
-
-// NetworkToSplit represent a network that has to be split in chunks with mask length Size.
-// Each subnet in the set is derived from the Base pool. Base is to be passed
-// in CIDR format.
-// Example: a Base "10.10.0.0/16 with Size 24 will define the set of 256
-// 10.10.[0-255].0/24 address pools
-type NetworkToSplit struct {
- Base string `json:"base"`
- Size int `json:"size"`
-}
-
-func init() {
- var err error
- if PredefinedGlobalScopeDefaultNetworks, err = splitNetworks(globalScopeDefaultNetworks); err != nil {
- //we are going to panic in case of error as we should never get into this state
- panic("InitAddressPools failed to initialize the global scope default address pool")
- }
-
- if PredefinedLocalScopeDefaultNetworks, err = splitNetworks(localScopeDefaultNetworks); err != nil {
- //we are going to panic in case of error as we should never get into this state
- panic("InitAddressPools failed to initialize the local scope default address pool")
- }
-}
-
-// configDefaultNetworks configures local as well global default pool based on input
-func configDefaultNetworks(defaultAddressPool []*NetworkToSplit, result *[]*net.IPNet) error {
- mutex.Lock()
- defer mutex.Unlock()
- defaultNetworks, err := splitNetworks(defaultAddressPool)
- if err != nil {
- return err
- }
- *result = defaultNetworks
- return nil
-}
-
-// GetGlobalScopeDefaultNetworks returns PredefinedGlobalScopeDefaultNetworks
-func GetGlobalScopeDefaultNetworks() []*net.IPNet {
- mutex.Lock()
- defer mutex.Unlock()
- return PredefinedGlobalScopeDefaultNetworks
-}
-
-// GetLocalScopeDefaultNetworks returns PredefinedLocalScopeDefaultNetworks
-func GetLocalScopeDefaultNetworks() []*net.IPNet {
- mutex.Lock()
- defer mutex.Unlock()
- return PredefinedLocalScopeDefaultNetworks
-}
-
-// ConfigGlobalScopeDefaultNetworks configures global default pool.
-// Ideally this will be called from SwarmKit as part of swarm init
-func ConfigGlobalScopeDefaultNetworks(defaultAddressPool []*NetworkToSplit) error {
- if defaultAddressPool == nil {
- defaultAddressPool = globalScopeDefaultNetworks
- }
- return configDefaultNetworks(defaultAddressPool, &PredefinedGlobalScopeDefaultNetworks)
-}
-
-// ConfigLocalScopeDefaultNetworks configures local default pool.
-// Ideally this will be called during libnetwork init
-func ConfigLocalScopeDefaultNetworks(defaultAddressPool []*NetworkToSplit) error {
- if defaultAddressPool == nil {
- return nil
- }
- return configDefaultNetworks(defaultAddressPool, &PredefinedLocalScopeDefaultNetworks)
-}
-
-// splitNetworks takes a slice of networks, split them accordingly and returns them
-func splitNetworks(list []*NetworkToSplit) ([]*net.IPNet, error) {
- localPools := make([]*net.IPNet, 0, len(list))
-
- for _, p := range list {
- _, b, err := net.ParseCIDR(p.Base)
- if err != nil {
- return nil, fmt.Errorf("invalid base pool %q: %v", p.Base, err)
- }
- ones, _ := b.Mask.Size()
- if p.Size <= 0 || p.Size < ones {
- return nil, fmt.Errorf("invalid pools size: %d", p.Size)
- }
- localPools = append(localPools, splitNetwork(p.Size, b)...)
- }
- return localPools, nil
-}
-
-func splitNetwork(size int, base *net.IPNet) []*net.IPNet {
- one, bits := base.Mask.Size()
- mask := net.CIDRMask(size, bits)
- n := 1 << uint(size-one)
- s := uint(bits - size)
- list := make([]*net.IPNet, 0, n)
-
- for i := 0; i < n; i++ {
- ip := copyIP(base.IP)
- addIntToIP(ip, uint(i<<s))
- list = append(list, &net.IPNet{IP: ip, Mask: mask})
- }
- return list
-}
-
-func copyIP(from net.IP) net.IP {
- ip := make([]byte, len(from))
- copy(ip, from)
- return ip
-}
-
-func addIntToIP(array net.IP, ordinal uint) {
- for i := len(array) - 1; i >= 0; i-- {
- array[i] |= (byte)(ordinal & 0xff)
- ordinal >>= 8
- }
-}
diff --git a/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go b/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go
index 23caf7f12..5cb251b13 100644
--- a/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go
+++ b/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go
@@ -14,11 +14,6 @@ import (
"github.com/sirupsen/logrus"
)
-const (
- // DefaultResolvConf points to the default file used for dns configuration on a linux machine
- DefaultResolvConf = "/etc/resolv.conf"
-)
-
var (
// Note: the default IPv4 & IPv6 resolvers are set to Google's Public DNS
defaultIPv4Dns = []string{"nameserver 8.8.8.8", "nameserver 8.8.4.4"}
@@ -55,7 +50,15 @@ type File struct {
// Get returns the contents of /etc/resolv.conf and its hash
func Get() (*File, error) {
- return GetSpecific(DefaultResolvConf)
+ resolv, err := ioutil.ReadFile("/etc/resolv.conf")
+ if err != nil {
+ return nil, err
+ }
+ hash, err := ioutils.HashData(bytes.NewReader(resolv))
+ if err != nil {
+ return nil, err
+ }
+ return &File{Content: resolv, Hash: hash}, nil
}
// GetSpecific returns the contents of the user specified resolv.conf file and its hash
diff --git a/vendor/github.com/docker/libnetwork/types/types.go b/vendor/github.com/docker/libnetwork/types/types.go
index b102ba4c3..f851d6fbb 100644
--- a/vendor/github.com/docker/libnetwork/types/types.go
+++ b/vendor/github.com/docker/libnetwork/types/types.go
@@ -145,12 +145,7 @@ func (p *PortBinding) String() string {
return ret
}
-// FromString reads the PortBinding structure from string s.
-// String s is a triple of "protocol/containerIP:port/hostIP:port"
-// containerIP and hostIP can be in dotted decimal ("192.0.2.1") or IPv6 ("2001:db8::68") form.
-// Zoned addresses ("169.254.0.23%eth0" or "fe80::1ff:fe23:4567:890a%eth0") are not supported.
-// If string s is incorrectly formatted or the IP addresses or ports cannot be parsed, FromString
-// returns an error.
+// FromString reads the PortBinding structure from string
func (p *PortBinding) FromString(s string) error {
ps := strings.Split(s, "/")
if len(ps) != 3 {
@@ -172,19 +167,21 @@ func (p *PortBinding) FromString(s string) error {
}
func parseIPPort(s string) (net.IP, uint16, error) {
- hoststr, portstr, err := net.SplitHostPort(s)
- if err != nil {
- return nil, 0, err
+ pp := strings.Split(s, ":")
+ if len(pp) != 2 {
+ return nil, 0, BadRequestErrorf("invalid format: %s", s)
}
- ip := net.ParseIP(hoststr)
- if ip == nil {
- return nil, 0, BadRequestErrorf("invalid ip: %s", hoststr)
+ var ip net.IP
+ if pp[0] != "" {
+ if ip = net.ParseIP(pp[0]); ip == nil {
+ return nil, 0, BadRequestErrorf("invalid ip: %s", pp[0])
+ }
}
- port, err := strconv.ParseUint(portstr, 10, 16)
+ port, err := strconv.ParseUint(pp[1], 10, 16)
if err != nil {
- return nil, 0, BadRequestErrorf("invalid port: %s", portstr)
+ return nil, 0, BadRequestErrorf("invalid port: %s", pp[1])
}
return ip, uint16(port), nil
@@ -332,8 +329,6 @@ func CompareIPNet(a, b *net.IPNet) bool {
}
// GetMinimalIP returns the address in its shortest form
-// If ip contains an IPv4-mapped IPv6 address, the 4-octet form of the IPv4 address will be returned.
-// Otherwise ip is returned unchanged.
func GetMinimalIP(ip net.IP) net.IP {
if ip != nil && ip.To4() != nil {
return ip.To4()
diff --git a/vendor/github.com/docker/libnetwork/vendor.conf b/vendor/github.com/docker/libnetwork/vendor.conf
deleted file mode 100644
index 865704ab4..000000000
--- a/vendor/github.com/docker/libnetwork/vendor.conf
+++ /dev/null
@@ -1,50 +0,0 @@
-github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
-github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
-github.com/Microsoft/go-winio v0.4.11
-github.com/Microsoft/hcsshim v0.7.3
-github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
-github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
-github.com/codegangsta/cli a65b733b303f0055f8d324d805f393cd3e7a7904
-github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b
-github.com/coreos/etcd v3.2.1
-github.com/coreos/go-semver v0.2.0
-github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
-go.etcd.io/bbolt v1.3.1-etcd.8
-
-github.com/docker/docker 162ba6016def672690ee4a1f3978368853a1e149
-github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6
-github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
-github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
-github.com/docker/libkv 458977154600b9f23984d9f4b82e79570b5ae12b
-
-github.com/godbus/dbus v4.0.0
-github.com/gogo/protobuf v1.0.0
-github.com/gorilla/context v1.1
-github.com/gorilla/mux v1.1
-github.com/hashicorp/consul v0.5.2
-github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
-github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e
-github.com/hashicorp/memberlist 3d8438da9589e7b608a83ffac1ef8211486bcb7c
-github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372
-github.com/hashicorp/go-sockaddr 6d291a969b86c4b633730bfc6b8b9d64c3aafed9
-github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
-github.com/mattn/go-shellwords v1.0.3
-github.com/miekg/dns v1.0.7
-github.com/opencontainers/go-digest v1.0.0-rc1
-github.com/opencontainers/image-spec v1.0.1
-github.com/opencontainers/runc 96ec2177ae841256168fcf76954f7177af9446eb
-github.com/opencontainers/runtime-spec v1.0.1
-github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
-github.com/sirupsen/logrus v1.0.3
-github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
-github.com/vishvananda/netlink b2de5d10e38ecce8607e6b438b6d174f389a004e
-github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
-golang.org/x/crypto 1a580b3eff7814fc9b40602fd35256c63b50f491
-golang.org/x/net 0ed95abb35c445290478a5348a7b38bb154135fd
-golang.org/x/sys 37707fdb30a5b38865cfb95e5aab41707daec7fd
-golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5
-github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
-github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb
-
-gotest.tools v2.1.0
-github.com/google/go-cmp v0.2.0
diff --git a/vendor/github.com/docker/libtrust/CONTRIBUTING.md b/vendor/github.com/docker/libtrust/CONTRIBUTING.md
new file mode 100644
index 000000000..05be0f8ab
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/CONTRIBUTING.md
@@ -0,0 +1,13 @@
+# Contributing to libtrust
+
+Want to hack on libtrust? Awesome! Here are instructions to get you
+started.
+
+libtrust is a part of the [Docker](https://www.docker.com) project, and follows
+the same rules and principles. If you're already familiar with the way
+Docker does things, you'll feel right at home.
+
+Otherwise, go read
+[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md).
+
+Happy hacking!
diff --git a/vendor/github.com/docker/libtrust/MAINTAINERS b/vendor/github.com/docker/libtrust/MAINTAINERS
new file mode 100644
index 000000000..9768175fe
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/MAINTAINERS
@@ -0,0 +1,3 @@
+Solomon Hykes <solomon@docker.com>
+Josh Hawn <josh@docker.com> (github: jlhawn)
+Derek McGowan <derek@docker.com> (github: dmcgowan)
diff --git a/vendor/github.com/docker/spdystream/CONTRIBUTING.md b/vendor/github.com/docker/spdystream/CONTRIBUTING.md
new file mode 100644
index 000000000..d4eddcc53
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/CONTRIBUTING.md
@@ -0,0 +1,13 @@
+# Contributing to SpdyStream
+
+Want to hack on spdystream? Awesome! Here are instructions to get you
+started.
+
+SpdyStream is a part of the [Docker](https://docker.io) project, and follows
+the same rules and principles. If you're already familiar with the way
+Docker does things, you'll feel right at home.
+
+Otherwise, go read
+[Docker's contributions guidelines](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md).
+
+Happy hacking!
diff --git a/vendor/github.com/docker/spdystream/MAINTAINERS b/vendor/github.com/docker/spdystream/MAINTAINERS
new file mode 100644
index 000000000..14e263325
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/MAINTAINERS
@@ -0,0 +1,28 @@
+# Spdystream maintainers file
+#
+# This file describes who runs the docker/spdystream project and how.
+# This is a living document - if you see something out of date or missing, speak up!
+#
+# It is structured to be consumable by both humans and programs.
+# To extract its contents programmatically, use any TOML-compliant parser.
+#
+# This file is compiled into the MAINTAINERS file in docker/opensource.
+#
+[Org]
+ [Org."Core maintainers"]
+ people = [
+ "dmcgowan",
+ ]
+
+[people]
+
+# A reference list of all people associated with the project.
+# All other sections should refer to people by their canonical key
+# in the people section.
+
+ # ADD YOURSELF HERE IN ALPHABETICAL ORDER
+
+ [people.dmcgowan]
+ Name = "Derek McGowan"
+ Email = "derek@docker.com"
+ GitHub = "dmcgowan"
diff --git a/vendor/github.com/etcd-io/bbolt/.gitignore b/vendor/github.com/etcd-io/bbolt/.gitignore
new file mode 100644
index 000000000..3bcd8cbaf
--- /dev/null
+++ b/vendor/github.com/etcd-io/bbolt/.gitignore
@@ -0,0 +1,5 @@
+*.prof
+*.test
+*.swp
+/bin/
+cover.out
diff --git a/vendor/github.com/etcd-io/bbolt/.travis.yml b/vendor/github.com/etcd-io/bbolt/.travis.yml
new file mode 100644
index 000000000..a60300c55
--- /dev/null
+++ b/vendor/github.com/etcd-io/bbolt/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+go_import_path: go.etcd.io/bbolt
+
+sudo: false
+
+go:
+- 1.11
+
+before_install:
+- go get -v honnef.co/go/tools/...
+- go get -v github.com/kisielk/errcheck
+
+script:
+- make fmt
+- make test
+- make race
+# - make errcheck
diff --git a/vendor/github.com/etcd-io/bbolt/Makefile b/vendor/github.com/etcd-io/bbolt/Makefile
new file mode 100644
index 000000000..2968aaa61
--- /dev/null
+++ b/vendor/github.com/etcd-io/bbolt/Makefile
@@ -0,0 +1,38 @@
+BRANCH=`git rev-parse --abbrev-ref HEAD`
+COMMIT=`git rev-parse --short HEAD`
+GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
+
+default: build
+
+race:
+ @TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)"
+ @echo "array freelist test"
+ @TEST_FREELIST_TYPE=array go test -v -race -test.run="TestSimulate_(100op|1000op)"
+
+fmt:
+ !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]')
+
+# go get honnef.co/go/tools/simple
+gosimple:
+ gosimple ./...
+
+# go get honnef.co/go/tools/unused
+unused:
+ unused ./...
+
+# go get github.com/kisielk/errcheck
+errcheck:
+ @errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt
+
+test:
+ TEST_FREELIST_TYPE=hashmap go test -timeout 20m -v -coverprofile cover.out -covermode atomic
+ # Note: gets "program not an importable package" in out of path builds
+ TEST_FREELIST_TYPE=hashmap go test -v ./cmd/bbolt
+
+ @echo "array freelist test"
+
+ @TEST_FREELIST_TYPE=array go test -timeout 20m -v -coverprofile cover.out -covermode atomic
+ # Note: gets "program not an importable package" in out of path builds
+ @TEST_FREELIST_TYPE=array go test -v ./cmd/bbolt
+
+.PHONY: race fmt errcheck test gosimple unused
diff --git a/vendor/github.com/etcd-io/bbolt/bolt_riscv64.go b/vendor/github.com/etcd-io/bbolt/bolt_riscv64.go
new file mode 100644
index 000000000..07b4b47cd
--- /dev/null
+++ b/vendor/github.com/etcd-io/bbolt/bolt_riscv64.go
@@ -0,0 +1,12 @@
+// +build riscv64
+
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned = true
diff --git a/vendor/github.com/etcd-io/bbolt/db.go b/vendor/github.com/etcd-io/bbolt/db.go
index 962248c99..870c8b1cc 100644
--- a/vendor/github.com/etcd-io/bbolt/db.go
+++ b/vendor/github.com/etcd-io/bbolt/db.go
@@ -121,6 +121,7 @@ type DB struct {
AllocSize int
path string
+ openFile func(string, int, os.FileMode) (*os.File, error)
file *os.File
dataref []byte // mmap'ed readonly, write throws SEGV
data *[maxMapSize]byte
@@ -199,10 +200,15 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
db.readOnly = true
}
+ db.openFile = options.OpenFile
+ if db.openFile == nil {
+ db.openFile = os.OpenFile
+ }
+
// Open data file and separate sync handler for metadata writes.
db.path = path
var err error
- if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil {
+ if db.file, err = db.openFile(db.path, flag|os.O_CREATE, mode); err != nil {
_ = db.close()
return nil, err
}
@@ -1054,6 +1060,10 @@ type Options struct {
// set directly on the DB itself when returned from Open(), but this option
// is useful in APIs which expose Options but not the underlying DB.
NoSync bool
+
+ // OpenFile is used to open files. It defaults to os.OpenFile. This option
+ // is useful for writing hermetic tests.
+ OpenFile func(string, int, os.FileMode) (*os.File, error)
}
// DefaultOptions represent the options used if nil options are passed into Open().
diff --git a/vendor/github.com/etcd-io/bbolt/freelist.go b/vendor/github.com/etcd-io/bbolt/freelist.go
index 93fd85d50..587b8cc02 100644
--- a/vendor/github.com/etcd-io/bbolt/freelist.go
+++ b/vendor/github.com/etcd-io/bbolt/freelist.go
@@ -349,6 +349,28 @@ func (f *freelist) reload(p *page) {
f.readIDs(a)
}
+// noSyncReload reads the freelist from pgids and filters out pending items.
+func (f *freelist) noSyncReload(pgids []pgid) {
+ // Build a cache of only pending pages.
+ pcache := make(map[pgid]bool)
+ for _, txp := range f.pending {
+ for _, pendingID := range txp.ids {
+ pcache[pendingID] = true
+ }
+ }
+
+ // Check each page in the freelist and build a new available freelist
+ // with any pages not in the pending lists.
+ var a []pgid
+ for _, id := range pgids {
+ if !pcache[id] {
+ a = append(a, id)
+ }
+ }
+
+ f.readIDs(a)
+}
+
// reindex rebuilds the free cache based on available and pending free lists.
func (f *freelist) reindex() {
ids := f.getFreePageIDs()
diff --git a/vendor/github.com/etcd-io/bbolt/tx.go b/vendor/github.com/etcd-io/bbolt/tx.go
index f50864142..2df7688c2 100644
--- a/vendor/github.com/etcd-io/bbolt/tx.go
+++ b/vendor/github.com/etcd-io/bbolt/tx.go
@@ -254,17 +254,36 @@ func (tx *Tx) Rollback() error {
if tx.db == nil {
return ErrTxClosed
}
- tx.rollback()
+ tx.nonPhysicalRollback()
return nil
}
+// nonPhysicalRollback is called when user calls Rollback directly, in this case we do not need to reload the free pages from disk.
+func (tx *Tx) nonPhysicalRollback() {
+ if tx.db == nil {
+ return
+ }
+ if tx.writable {
+ tx.db.freelist.rollback(tx.meta.txid)
+ }
+ tx.close()
+}
+
+// rollback needs to reload the free pages from disk in case some system error happens like fsync error.
func (tx *Tx) rollback() {
if tx.db == nil {
return
}
if tx.writable {
tx.db.freelist.rollback(tx.meta.txid)
- tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
+ if !tx.db.hasSyncedFreelist() {
+ // Reconstruct free page list by scanning the DB to get the whole free page list.
+ // Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode.
+ tx.db.freelist.noSyncReload(tx.db.freepages())
+ } else {
+ // Read free page list from freelist page.
+ tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
+ }
}
tx.close()
}
@@ -315,7 +334,7 @@ func (tx *Tx) Copy(w io.Writer) error {
// If err == nil then exactly tx.Size() bytes will be written into the writer.
func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
// Attempt to open reader with WriteFlag
- f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
+ f, err := tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
if err != nil {
return 0, err
}
@@ -369,7 +388,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
// A reader transaction is maintained during the copy so it is safe to continue
// using the database while a copy is in progress.
func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
- f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
+ f, err := tx.db.openFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
if err != nil {
return err
}
diff --git a/vendor/github.com/fatih/camelcase/.travis.yml b/vendor/github.com/fatih/camelcase/.travis.yml
new file mode 100644
index 000000000..3489e3871
--- /dev/null
+++ b/vendor/github.com/fatih/camelcase/.travis.yml
@@ -0,0 +1,3 @@
+language: go
+go: 1.x
+
diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig
new file mode 100644
index 000000000..ba49e3c23
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig
@@ -0,0 +1,5 @@
+root = true
+
+[*]
+indent_style = tab
+indent_size = 4
diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore
new file mode 100644
index 000000000..4cd0cbaf4
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.gitignore
@@ -0,0 +1,6 @@
+# Setup a Global .gitignore for OS and editor generated files:
+# https://help.github.com/articles/ignoring-files
+# git config --global core.excludesfile ~/.gitignore_global
+
+.vagrant
+*.sublime-project
diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml
new file mode 100644
index 000000000..981d1bb81
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.travis.yml
@@ -0,0 +1,30 @@
+sudo: false
+language: go
+
+go:
+ - 1.8.x
+ - 1.9.x
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+ fast_finish: true
+
+before_script:
+ - go get -u github.com/golang/lint/golint
+
+script:
+ - go test -v --race ./...
+
+after_script:
+ - test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
+ - test -z "$(golint ./... | tee /dev/stderr)"
+ - go vet ./...
+
+os:
+ - linux
+ - osx
+
+notifications:
+ email: false
diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS
new file mode 100644
index 000000000..5ab5d41c5
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS
@@ -0,0 +1,52 @@
+# Names should be added to this file as
+# Name or Organization <email address>
+# The email address is not required for organizations.
+
+# You can update this list using the following command:
+#
+# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
+
+# Please keep the list sorted.
+
+Aaron L <aaron@bettercoder.net>
+Adrien Bustany <adrien@bustany.org>
+Amit Krishnan <amit.krishnan@oracle.com>
+Anmol Sethi <me@anmol.io>
+Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
+Bruno Bigras <bigras.bruno@gmail.com>
+Caleb Spare <cespare@gmail.com>
+Case Nelson <case@teammating.com>
+Chris Howey <chris@howey.me> <howeyc@gmail.com>
+Christoffer Buchholz <christoffer.buchholz@gmail.com>
+Daniel Wagner-Hall <dawagner@gmail.com>
+Dave Cheney <dave@cheney.net>
+Evan Phoenix <evan@fallingsnow.net>
+Francisco Souza <f@souza.cc>
+Hari haran <hariharan.uno@gmail.com>
+John C Barstow
+Kelvin Fo <vmirage@gmail.com>
+Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
+Matt Layher <mdlayher@gmail.com>
+Nathan Youngman <git@nathany.com>
+Nickolai Zeldovich <nickolai@csail.mit.edu>
+Patrick <patrick@dropbox.com>
+Paul Hammond <paul@paulhammond.org>
+Pawel Knap <pawelknap88@gmail.com>
+Pieter Droogendijk <pieter@binky.org.uk>
+Pursuit92 <JoshChase@techpursuit.net>
+Riku Voipio <riku.voipio@linaro.org>
+Rob Figueiredo <robfig@gmail.com>
+Rodrigo Chiossi <rodrigochiossi@gmail.com>
+Slawek Ligus <root@ooz.ie>
+Soge Zhang <zhssoge@gmail.com>
+Tiffany Jernigan <tiffany.jernigan@intel.com>
+Tilak Sharma <tilaks@google.com>
+Tom Payne <twpayne@gmail.com>
+Travis Cline <travis.cline@gmail.com>
+Tudor Golubenco <tudor.g@gmail.com>
+Vahe Khachikyan <vahe@live.ca>
+Yukang <moorekang@gmail.com>
+bronze1man <bronze1man@gmail.com>
+debrando <denis.brandolini@gmail.com>
+henrikedwards <henrik.edwards@gmail.com>
+铁哥 <guotie.9@gmail.com>
diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
new file mode 100644
index 000000000..be4d7ea2c
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
@@ -0,0 +1,317 @@
+# Changelog
+
+## v1.4.7 / 2018-01-09
+
+* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
+* Tests: Fix missing verb on format string (thanks @rchiossi)
+* Linux: Fix deadlock in Remove (thanks @aarondl)
+* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
+* Docs: Moved FAQ into the README (thanks @vahe)
+* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
+* Docs: replace references to OS X with macOS
+
+## v1.4.2 / 2016-10-10
+
+* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
+
+## v1.4.1 / 2016-10-04
+
+* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
+
+## v1.4.0 / 2016-10-01
+
+* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
+
+## v1.3.1 / 2016-06-28
+
+* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
+
+## v1.3.0 / 2016-04-19
+
+* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
+
+## v1.2.10 / 2016-03-02
+
+* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
+
+## v1.2.9 / 2016-01-13
+
+kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
+
+## v1.2.8 / 2015-12-17
+
+* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
+* inotify: fix race in test
+* enable race detection for continuous integration (Linux, Mac, Windows)
+
+## v1.2.5 / 2015-10-17
+
+* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
+* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
+* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
+* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
+
+## v1.2.1 / 2015-10-14
+
+* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
+
+## v1.2.0 / 2015-02-08
+
+* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
+* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
+* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
+
+## v1.1.1 / 2015-02-05
+
+* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
+
+## v1.1.0 / 2014-12-12
+
+* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
+ * add low-level functions
+ * only need to store flags on directories
+ * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
+ * done can be an unbuffered channel
+ * remove calls to os.NewSyscallError
+* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
+* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v1.0.4 / 2014-09-07
+
+* kqueue: add dragonfly to the build tags.
+* Rename source code files, rearrange code so exported APIs are at the top.
+* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
+
+## v1.0.3 / 2014-08-19
+
+* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
+
+## v1.0.2 / 2014-08-17
+
+* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+* [Fix] Make ./path and path equivalent. (thanks @zhsso)
+
+## v1.0.0 / 2014-08-15
+
+* [API] Remove AddWatch on Windows, use Add.
+* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
+* Minor updates based on feedback from golint.
+
+## dev / 2014-07-09
+
+* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
+* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
+
+## dev / 2014-07-04
+
+* kqueue: fix incorrect mutex used in Close()
+* Update example to demonstrate usage of Op.
+
+## dev / 2014-06-28
+
+* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
+* Fix for String() method on Event (thanks Alex Brainman)
+* Don't build on Plan 9 or Solaris (thanks @4ad)
+
+## dev / 2014-06-21
+
+* Events channel of type Event rather than *Event.
+* [internal] use syscall constants directly for inotify and kqueue.
+* [internal] kqueue: rename events to kevents and fileEvent to event.
+
+## dev / 2014-06-19
+
+* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
+* [internal] remove cookie from Event struct (unused).
+* [internal] Event struct has the same definition across every OS.
+* [internal] remove internal watch and removeWatch methods.
+
+## dev / 2014-06-12
+
+* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
+* [API] Pluralized channel names: Events and Errors.
+* [API] Renamed FileEvent struct to Event.
+* [API] Op constants replace methods like IsCreate().
+
+## dev / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## dev / 2014-05-23
+
+* [API] Remove current implementation of WatchFlags.
+ * current implementation doesn't take advantage of OS for efficiency
+ * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
+ * no tests for the current implementation
+ * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
+
+## v0.9.3 / 2014-12-31
+
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v0.9.2 / 2014-08-17
+
+* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+
+## v0.9.1 / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## v0.9.0 / 2014-01-17
+
+* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
+* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
+* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
+
+## v0.8.12 / 2013-11-13
+
+* [API] Remove FD_SET and friends from Linux adapter
+
+## v0.8.11 / 2013-11-02
+
+* [Doc] Add Changelog [#72][] (thanks @nathany)
+* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
+
+## v0.8.10 / 2013-10-19
+
+* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
+* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
+* [Doc] specify OS-specific limits in README (thanks @debrando)
+
+## v0.8.9 / 2013-09-08
+
+* [Doc] Contributing (thanks @nathany)
+* [Doc] update package path in example code [#63][] (thanks @paulhammond)
+* [Doc] GoCI badge in README (Linux only) [#60][]
+* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
+
+## v0.8.8 / 2013-06-17
+
+* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
+
+## v0.8.7 / 2013-06-03
+
+* [API] Make syscall flags internal
+* [Fix] inotify: ignore event changes
+* [Fix] race in symlink test [#45][] (reported by @srid)
+* [Fix] tests on Windows
+* lower case error messages
+
+## v0.8.6 / 2013-05-23
+
+* kqueue: Use EVT_ONLY flag on Darwin
+* [Doc] Update README with full example
+
+## v0.8.5 / 2013-05-09
+
+* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
+
+## v0.8.4 / 2013-04-07
+
+* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
+
+## v0.8.3 / 2013-03-13
+
+* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
+* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
+
+## v0.8.2 / 2013-02-07
+
+* [Doc] add Authors
+* [Fix] fix data races for map access [#29][] (thanks @fsouza)
+
+## v0.8.1 / 2013-01-09
+
+* [Fix] Windows path separators
+* [Doc] BSD License
+
+## v0.8.0 / 2012-11-09
+
+* kqueue: directory watching improvements (thanks @vmirage)
+* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
+* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
+
+## v0.7.4 / 2012-10-09
+
+* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
+* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
+* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
+* [Fix] kqueue: modify after recreation of file
+
+## v0.7.3 / 2012-09-27
+
+* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
+* [Fix] kqueue: no longer get duplicate CREATE events
+
+## v0.7.2 / 2012-09-01
+
+* kqueue: events for created directories
+
+## v0.7.1 / 2012-07-14
+
+* [Fix] for renaming files
+
+## v0.7.0 / 2012-07-02
+
+* [Feature] FSNotify flags
+* [Fix] inotify: Added file name back to event path
+
+## v0.6.0 / 2012-06-06
+
+* kqueue: watch files after directory created (thanks @tmc)
+
+## v0.5.1 / 2012-05-22
+
+* [Fix] inotify: remove all watches before Close()
+
+## v0.5.0 / 2012-05-03
+
+* [API] kqueue: return errors during watch instead of sending over channel
+* kqueue: match symlink behavior on Linux
+* inotify: add `DELETE_SELF` (requested by @taralx)
+* [Fix] kqueue: handle EINTR (reported by @robfig)
+* [Doc] Godoc example [#1][] (thanks @davecheney)
+
+## v0.4.0 / 2012-03-30
+
+* Go 1 released: build with go tool
+* [Feature] Windows support using winfsnotify
+* Windows does not have attribute change notifications
+* Roll attribute notifications into IsModify
+
+## v0.3.0 / 2012-02-19
+
+* kqueue: add files when watch directory
+
+## v0.2.0 / 2011-12-30
+
+* update to latest Go weekly code
+
+## v0.1.0 / 2011-10-19
+
+* kqueue: add watch on file creation to match inotify
+* kqueue: create file event
+* inotify: ignore `IN_IGNORED` events
+* event String()
+* linux: common FileEvent functions
+* initial commit
+
+[#79]: https://github.com/howeyc/fsnotify/pull/79
+[#77]: https://github.com/howeyc/fsnotify/pull/77
+[#72]: https://github.com/howeyc/fsnotify/issues/72
+[#71]: https://github.com/howeyc/fsnotify/issues/71
+[#70]: https://github.com/howeyc/fsnotify/issues/70
+[#63]: https://github.com/howeyc/fsnotify/issues/63
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#60]: https://github.com/howeyc/fsnotify/issues/60
+[#59]: https://github.com/howeyc/fsnotify/issues/59
+[#49]: https://github.com/howeyc/fsnotify/issues/49
+[#45]: https://github.com/howeyc/fsnotify/issues/45
+[#40]: https://github.com/howeyc/fsnotify/issues/40
+[#36]: https://github.com/howeyc/fsnotify/issues/36
+[#33]: https://github.com/howeyc/fsnotify/issues/33
+[#29]: https://github.com/howeyc/fsnotify/issues/29
+[#25]: https://github.com/howeyc/fsnotify/issues/25
+[#24]: https://github.com/howeyc/fsnotify/issues/24
+[#21]: https://github.com/howeyc/fsnotify/issues/21
diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
new file mode 100644
index 000000000..828a60b24
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
@@ -0,0 +1,77 @@
+# Contributing
+
+## Issues
+
+* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
+* Please indicate the platform you are using fsnotify on.
+* A code example to reproduce the problem is appreciated.
+
+## Pull Requests
+
+### Contributor License Agreement
+
+fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
+
+Please indicate that you have signed the CLA in your pull request.
+
+### How fsnotify is Developed
+
+* Development is done on feature branches.
+* Tests are run on BSD, Linux, macOS and Windows.
+* Pull requests are reviewed and [applied to master][am] using [hub][].
+ * Maintainers may modify or squash commits rather than asking contributors to.
+* To issue a new release, the maintainers will:
+ * Update the CHANGELOG
+ * Tag a version, which will become available through gopkg.in.
+
+### How to Fork
+
+For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
+
+1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Ensure everything works and the tests pass (see below)
+4. Commit your changes (`git commit -am 'Add some feature'`)
+
+Contribute upstream:
+
+1. Fork fsnotify on GitHub
+2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
+3. Push to the branch (`git push fork my-new-feature`)
+4. Create a new Pull Request on GitHub
+
+This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
+
+### Testing
+
+fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
+
+Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
+
+To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
+
+* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
+* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
+* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
+* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
+* When you're done, you will want to halt or destroy the Vagrant boxes.
+
+Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
+
+Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
+
+### Maintainers
+
+Help maintaining fsnotify is welcome. To be a maintainer:
+
+* Submit a pull request and sign the CLA as above.
+* You must be able to run the test suite on Mac, Windows, Linux and BSD.
+
+To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
+
+All code changes should be internal pull requests.
+
+Releases are tagged using [Semantic Versioning](http://semver.org/).
+
+[hub]: https://github.com/github/hub
+[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
diff --git a/vendor/github.com/fsouza/go-dockerclient/.gitignore b/vendor/github.com/fsouza/go-dockerclient/.gitignore
new file mode 100644
index 000000000..ef22245ea
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/.gitignore
@@ -0,0 +1,4 @@
+# temporary symlink for testing
+testing/data/symlink
+Gopkg.lock
+vendor/
diff --git a/vendor/github.com/fsouza/go-dockerclient/.travis.yml b/vendor/github.com/fsouza/go-dockerclient/.travis.yml
new file mode 100644
index 000000000..74e54aa77
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/.travis.yml
@@ -0,0 +1,33 @@
+dist: xenial
+language: go
+go:
+ - 1.11.x
+ - 1.12.x
+os:
+ - linux
+ - osx
+ - windows
+env:
+ matrix:
+ - GOARCH=amd64 DEP_TOOL=mod GO111MODULE=on
+ - GOARCH=386 DEP_TOOL=mod GO111MODULE=on
+ - GOARCH=amd64 DEP_TOOL=dep GO111MODULE=off
+ - GOARCH=386 DEP_TOOL=dep GO111MODULE=off
+ global:
+ - GOPROXY=https://proxy.golang.org
+install:
+ - travis-scripts/win-setup.bash
+ - make testdeps DEP_TOOL=${DEP_TOOL}
+script:
+ - travis_wait 25 travis-scripts/run-tests.bash
+services:
+ - docker
+matrix:
+ fast_finish: true
+ exclude:
+ - os: osx
+ env: GOARCH=386 DEP_TOOL=dep GO111MODULE=off
+ - os: osx
+ env: GOARCH=386 DEP_TOOL=mod GO111MODULE=on
+ allow_failures:
+ - os: windows
diff --git a/vendor/github.com/fsouza/go-dockerclient/AUTHORS b/vendor/github.com/fsouza/go-dockerclient/AUTHORS
new file mode 100644
index 000000000..4e9f7b8e8
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/AUTHORS
@@ -0,0 +1,200 @@
+# This is the official list of go-dockerclient authors for copyright purposes.
+
+Abhishek Chanda
+Adam Bell-Hanssen
+Adnan Khan
+Adrien Kohlbecker
+Aithal
+Aldrin Leal
+Alex Dadgar
+Alfonso Acosta
+André Carvalho
+Andreas Jaekle
+Andrew Snodgrass
+Andrews Medina
+Andrey Sibiryov
+Andy Goldstein
+Anirudh Aithal
+Antonio Murdaca
+Artem Sidorenko
+Arthur Rodrigues
+Ben Marini
+Ben McCann
+Ben Parees
+Benno van den Berg
+Bradley Cicenas
+Brendan Fosberry
+Brett Buddin
+Brian Lalor
+Brian P. Hamachek
+Brian Palmer
+Bryan Boreham
+Burke Libbey
+Carlos Diaz-Padron
+Carson A
+Cássio Botaro
+Cesar Wong
+Cezar Sa Espinola
+Changping Chen
+Cheah Chu Yeow
+cheneydeng
+Chris Bednarski
+Chris Stavropoulos
+Christian Stewart
+Christophe Mourette
+Clayton Coleman
+Clint Armstrong
+CMGS
+Colin Hebert
+Craig Jellick
+Damien Lespiau
+Damon Wang
+Dan Williams
+Daniel, Dao Quang Minh
+Daniel Garcia
+Daniel Hess
+Daniel Hiltgen
+Daniel Nephin
+Daniel Tsui
+Darren Shepherd
+Dave Choi
+David Huie
+Dawn Chen
+Denis Makogon
+Derek Petersen
+Dinesh Subhraveti
+Drew Wells
+Ed
+Elias G. Schneevoigt
+Erez Horev
+Eric Anderson
+Eric Fode
+Eric J. Holmes
+Eric Mountain
+Erwin van Eyk
+Ethan Mosbaugh
+Ewout Prangsma
+Fabio Rehm
+Fatih Arslan
+Faye Salwin
+Felipe Oliveira
+Flavia Missi
+Florent Aide
+Francisco Souza
+Frank Groeneveld
+George MacRorie
+George Moura
+Grégoire Delattre
+Guilherme Rezende
+Guillermo Álvarez Fernández
+Harry Zhang
+He Simei
+Isaac Schnitzer
+Ivan Mikushin
+James Bardin
+James Nugent
+Jamie Snell
+Januar Wayong
+Jari Kolehmainen
+Jason Wilder
+Jawher Moussa
+Jean-Baptiste Dalido
+Jeff Mitchell
+Jeffrey Hulten
+Jen Andre
+Jérôme Laurens
+Jim Minter
+Johan Euphrosine
+Johannes Scheuermann
+John Hughes
+Jorge Marey
+Julian Einwag
+Kamil Domanski
+Karan Misra
+Ken Herner
+Kevin Lin
+Kevin Xu
+Kim, Hirokuni
+Kostas Lekkas
+Kyle Allan
+Yunhee Lee
+Liron Levin
+Lior Yankovich
+Liu Peng
+Lorenz Leutgeb
+Lucas Clemente
+Lucas Weiblen
+Lyon Hill
+Mantas Matelis
+Manuel Vogel
+Marguerite des Trois Maisons
+Mariusz Borsa
+Martin Sweeney
+Máximo Cuadros Ortiz
+Michael Schmatz
+Michal Fojtik
+Mike Dillon
+Mrunal Patel
+Nate Jones
+Nguyen Sy Thanh Son
+Nicholas Van Wiggeren
+Nick Ethier
+niko83
+Omeid Matten
+Orivej Desh
+Paul Bellamy
+Paul Morie
+Paul Weil
+Peng Yin
+Peter Edge
+Peter Jihoon Kim
+Peter Teich
+Phil Lu
+Philippe Lafoucrière
+Radek Simko
+Rafe Colton
+Raphaël Pinson
+Reed Allman
+RJ Catalano
+Rob Miller
+Robbert Klarenbeek
+Robert Williamson
+Roman Khlystik
+Russell Haering
+Salvador Gironès
+Sam Rijs
+Sami Wagiaalla
+Samuel Archambault
+Samuel Karp
+Sebastian Borza
+Seth Jennings
+Shane Xie
+Silas Sewell
+Simon Eskildsen
+Simon Menke
+Skolos
+Soulou
+Sridhar Ratnakumar
+Steven Jack
+Summer Mousa
+Sunjin Lee
+Sunny
+Swaroop Ramachandra
+Tarsis Azevedo
+Tim Schindler
+Timothy St. Clair
+Tobi Knaup
+Tom Wilkie
+Tonic
+ttyh061
+upccup
+Victor Marmol
+Vijay Krishnan
+Vincenzo Prignano
+Vlad Alexandru Ionescu
+Weitao Zhou
+Wiliam Souza
+Ye Yin
+Yosuke Otosu
+Yu, Zou
+Yuriy Bogdanov
diff --git a/vendor/github.com/fsouza/go-dockerclient/Gopkg.toml b/vendor/github.com/fsouza/go-dockerclient/Gopkg.toml
new file mode 100644
index 000000000..4be9ee73a
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/Gopkg.toml
@@ -0,0 +1,23 @@
+[[constraint]]
+ name = "github.com/Microsoft/go-winio"
+ version = "v0.4.11"
+
+[[constraint]]
+ branch = "master"
+ name = "github.com/docker/docker"
+
+[[constraint]]
+ name = "github.com/docker/go-units"
+ version = "v0.3.3"
+
+[[constraint]]
+ name = "github.com/google/go-cmp"
+ version = "v0.2.0"
+
+[[constraint]]
+ name = "github.com/gorilla/mux"
+ version = "v1.6.2 - v1.7.0"
+
+[[override]]
+ name = "github.com/docker/libnetwork"
+ revision = "19279f0492417475b6bfbd0aa529f73e8f178fb5"
diff --git a/vendor/github.com/fsouza/go-dockerclient/LICENSE b/vendor/github.com/fsouza/go-dockerclient/LICENSE
index fc7e73f8f..80d56fc05 100644
--- a/vendor/github.com/fsouza/go-dockerclient/LICENSE
+++ b/vendor/github.com/fsouza/go-dockerclient/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2013-2018, go-dockerclient authors
+Copyright (c) 2013-2019, go-dockerclient authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/vendor/github.com/fsouza/go-dockerclient/Makefile b/vendor/github.com/fsouza/go-dockerclient/Makefile
new file mode 100644
index 000000000..be71a3c26
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/Makefile
@@ -0,0 +1,41 @@
+.PHONY: \
+ all \
+ staticcheck \
+ fmt \
+ fmtcheck \
+ pretest \
+ test \
+ integration
+
+DEP_TOOL ?= mod
+
+all: test
+
+staticcheck:
+ GO111MODULE=off go get honnef.co/go/tools/cmd/staticcheck
+ staticcheck ./...
+
+fmtcheck:
+ if [ -z "$${SKIP_FMT_CHECK}" ]; then [ -z "$$(gofumpt -s -d . | tee /dev/stderr)" ]; fi
+
+fmt:
+ GO111MODULE=off go get mvdan.cc/gofumpt
+ gofumpt -s -w .
+
+testdeps:
+ifeq ($(DEP_TOOL), dep)
+ GO111MODULE=off go get -u github.com/golang/dep/cmd/dep
+ dep ensure -v
+else
+ go mod download
+endif
+
+pretest: staticcheck fmtcheck
+
+gotest:
+ go test -race -vet all ./...
+
+test: testdeps pretest gotest
+
+integration:
+ go test -tags docker_integration -run TestIntegration -v
diff --git a/vendor/github.com/fsouza/go-dockerclient/README.markdown b/vendor/github.com/fsouza/go-dockerclient/README.md
index 86824d6c5..501b967f7 100644
--- a/vendor/github.com/fsouza/go-dockerclient/README.markdown
+++ b/vendor/github.com/fsouza/go-dockerclient/README.md
@@ -1,19 +1,30 @@
# go-dockerclient
-[![Travis Build Status](https://travis-ci.org/fsouza/go-dockerclient.svg?branch=master)](https://travis-ci.org/fsouza/go-dockerclient)
-[![AppVeyor Build Status](https://ci.appveyor.com/api/projects/status/4m374pti06ubg2l7?svg=true)](https://ci.appveyor.com/project/fsouza/go-dockerclient)
+[![Travis Build Status](https://travis-ci.com/fsouza/go-dockerclient.svg?branch=master)](https://travis-ci.com/fsouza/go-dockerclient)
+[![AppVeyor Build status](https://ci.appveyor.com/api/projects/status/4yusq1f9dqbicobt?svg=true)](https://ci.appveyor.com/project/fsouza/go-dockerclient)
[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/fsouza/go-dockerclient)
This package presents a client for the Docker remote API. It also provides
support for the extensions in the [Swarm API](https://docs.docker.com/swarm/swarm-api/).
This package also provides support for docker's network API, which is a simple
-passthrough to the libnetwork remote API. Note that docker's network API is
-only available in docker 1.8 and above, and only enabled in docker if
-DOCKER_EXPERIMENTAL is defined during the docker build process.
+passthrough to the libnetwork remote API.
For more details, check the [remote API
-documentation](http://docs.docker.com/engine/reference/api/docker_remote_api/).
+documentation](https://docs.docker.com/engine/api/latest/).
+
+## Difference between go-dockerclient and the official SDK
+
+Link for the official SDK: https://docs.docker.com/develop/sdk/
+
+go-dockerclient was created before Docker had an official Go SDK and is
+still maintained and relatively active because it's still used out there. New
+features in the Docker API do not get automatically implemented here: it's
+based on demand, if someone wants it, they can file an issue or a PR and the
+feature may get implemented/merged.
+
+For new projects, using the official SDK is probably more appropriate as
+go-dockerclient lags behind the official SDK.
## Example
@@ -23,12 +34,11 @@ package main
import (
"fmt"
- "github.com/fsouza/go-dockerclient"
+ docker "github.com/fsouza/go-dockerclient"
)
func main() {
- endpoint := "unix:///var/run/docker.sock"
- client, err := docker.NewClient(endpoint)
+ client, err := docker.NewClientFromEnv()
if err != nil {
panic(err)
}
@@ -59,11 +69,11 @@ package main
import (
"fmt"
- "github.com/fsouza/go-dockerclient"
+ docker "github.com/fsouza/go-dockerclient"
)
func main() {
- endpoint := "tcp://[ip]:[port]"
+ const endpoint = "tcp://[ip]:[port]"
path := os.Getenv("DOCKER_CERT_PATH")
ca := fmt.Sprintf("%s/ca.pem", path)
cert := fmt.Sprintf("%s/cert.pem", path)
@@ -84,7 +94,7 @@ package main
import (
"fmt"
- "github.com/fsouza/go-dockerclient"
+ docker "github.com/fsouza/go-dockerclient"
)
func main() {
@@ -101,20 +111,19 @@ All development commands can be seen in the [Makefile](Makefile).
Commited code must pass:
-* [golint](https://github.com/golang/lint) (with some exceptions, see the Makefile).
-* [go vet](https://golang.org/cmd/vet/)
-* [gofmt](https://golang.org/cmd/gofmt)
+* [staticcheck](https://staticcheck.io/)
+* [gofumpt](https://github.com/mvdan/gofumpt)
* [go test](https://golang.org/cmd/go/#hdr-Test_packages)
-Running `make test` will check all of these. If your editor does not
-automatically call ``gofmt -s``, `make fmt` will format all go files in this
-repository.
+Running ``make test`` will check all of these. You can reformat the code with
+``make fmt``.
-## Vendoring
+## Vendoring / Modules
-go-dockerclient uses [dep](https://github.com/golang/dep/) for vendoring. If
-you're using dep, you should be able to pick go-dockerclient releases and get
-the proper dependencies.
+go-dockerclient supports [dep](https://github.com/golang/dep/) for vendoring
+and can also be installed as a module. If you're using dep or Go modules, you
+should be able to pick go-dockerclient releases and get the proper
+dependencies.
With other vendoring tools, users might need to specify go-dockerclient's
dependencies manually.
diff --git a/vendor/github.com/fsouza/go-dockerclient/appveyor.yml b/vendor/github.com/fsouza/go-dockerclient/appveyor.yml
new file mode 100644
index 000000000..6f49a8b84
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/appveyor.yml
@@ -0,0 +1,33 @@
+version: "{build}"
+platform: x64
+clone_depth: 2
+clone_folder: c:\gopath\src\github.com\fsouza\go-dockerclient
+environment:
+ GOPATH: c:\gopath
+ GOPROXY: https://proxy.golang.org
+ SKIP_FMT_CHECK: 1
+ matrix:
+ - GOVERSION: &go111 "1.11.10"
+ DEP_TOOL: dep
+ GO111MODULE: off
+ - GOVERSION: &go112 "1.12.5"
+ DEP_TOOL: dep
+ GO111MODULE: off
+ - GOVERSION: *go111
+ DEP_TOOL: mod
+ GO111MODULE: on
+ - GOVERSION: *go112
+ DEP_TOOL: mod
+ GO111MODULE: on
+install:
+ - choco install make
+ - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
+ - rmdir c:\go /s /q
+ - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.zip
+ - 7z x go%GOVERSION%.windows-amd64.zip -y -oC:\ > NUL
+build_script:
+ - make testdeps DEP_TOOL=%DEP_TOOL%
+test_script:
+ - make pretest gotest DEP_TOOL=%DEP_TOOL%
+matrix:
+ fast_finish: true
diff --git a/vendor/github.com/fsouza/go-dockerclient/auth.go b/vendor/github.com/fsouza/go-dockerclient/auth.go
index 4335d6e06..e7de97701 100644
--- a/vendor/github.com/fsouza/go-dockerclient/auth.go
+++ b/vendor/github.com/fsouza/go-dockerclient/auth.go
@@ -18,7 +18,7 @@ import (
)
// ErrCannotParseDockercfg is the error returned by NewAuthConfigurations when the dockercfg cannot be parsed.
-var ErrCannotParseDockercfg = errors.New("Failed to read authentication from dockercfg")
+var ErrCannotParseDockercfg = errors.New("failed to read authentication from dockercfg")
// AuthConfiguration represents authentication options to use in the PushImage
// method. It represents the authentication in the Docker index server.
@@ -37,16 +37,40 @@ type AuthConfiguration struct {
RegistryToken string `json:"registrytoken,omitempty"`
}
+func (c AuthConfiguration) isEmpty() bool {
+ return c == AuthConfiguration{}
+}
+
+func (c AuthConfiguration) headerKey() string {
+ return "X-Registry-Auth"
+}
+
// AuthConfigurations represents authentication options to use for the
// PushImage method accommodating the new X-Registry-Config header
type AuthConfigurations struct {
Configs map[string]AuthConfiguration `json:"configs"`
}
+func (c AuthConfigurations) isEmpty() bool {
+ return len(c.Configs) == 0
+}
+
+func (c AuthConfigurations) headerKey() string {
+ return "X-Registry-Config"
+}
+
// AuthConfigurations119 is used to serialize a set of AuthConfigurations
// for Docker API >= 1.19.
type AuthConfigurations119 map[string]AuthConfiguration
+func (c AuthConfigurations119) isEmpty() bool {
+ return len(c) == 0
+}
+
+func (c AuthConfigurations119) headerKey() string {
+ return "X-Registry-Config"
+}
+
// dockerConfig represents a registry authentation configuration from the
// .dockercfg file.
type dockerConfig struct {
@@ -84,7 +108,7 @@ func cfgPaths(dockerConfigEnv string, homeEnv string) []string {
// - $HOME/.docker/config.json
// - $HOME/.dockercfg
func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) {
- err := fmt.Errorf("No docker configuration found")
+ err := fmt.Errorf("no docker configuration found")
var auths *AuthConfigurations
pathsToTry := cfgPaths(os.Getenv("DOCKER_CONFIG"), os.Getenv("HOME"))
diff --git a/vendor/github.com/fsouza/go-dockerclient/client.go b/vendor/github.com/fsouza/go-dockerclient/client.go
index 581e31417..9953e3253 100644
--- a/vendor/github.com/fsouza/go-dockerclient/client.go
+++ b/vendor/github.com/fsouza/go-dockerclient/client.go
@@ -31,7 +31,6 @@ import (
"sync/atomic"
"time"
- "github.com/docker/docker/opts"
"github.com/docker/docker/pkg/homedir"
"github.com/docker/docker/pkg/stdcopy"
"github.com/fsouza/go-dockerclient/internal/jsonmessage"
@@ -70,7 +69,7 @@ type APIVersion []int
// <minor> and <patch> are integer numbers.
func NewAPIVersion(input string) (APIVersion, error) {
if !strings.Contains(input, ".") {
- return nil, fmt.Errorf("Unable to parse version %q", input)
+ return nil, fmt.Errorf("unable to parse version %q", input)
}
raw := strings.Split(input, "-")
arr := strings.Split(raw[0], ".")
@@ -79,7 +78,7 @@ func NewAPIVersion(input string) (APIVersion, error) {
for i, val := range arr {
ret[i], err = strconv.Atoi(val)
if err != nil {
- return nil, fmt.Errorf("Unable to parse version %q: %q is not an integer", input, val)
+ return nil, fmt.Errorf("unable to parse version %q: %q is not an integer", input, val)
}
}
return ret, nil
@@ -329,7 +328,7 @@ func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock,
} else {
caPool := x509.NewCertPool()
if !caPool.AppendCertsFromPEM(caPEMCert) {
- return nil, errors.New("Could not add RootCA pem")
+ return nil, errors.New("could not add RootCA pem")
}
tlsConfig.RootCAs = caPool
}
@@ -387,7 +386,7 @@ func (c *Client) Endpoint() string {
//
// See https://goo.gl/wYfgY1 for more details.
func (c *Client) Ping() error {
- return c.PingWithContext(nil)
+ return c.PingWithContext(context.TODO())
}
// PingWithContext pings the docker server
@@ -414,7 +413,7 @@ func (c *Client) getServerAPIVersionString() (version string, err error) {
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
- return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", resp.StatusCode)
+ return "", fmt.Errorf("received unexpected status %d while trying to retrieve the server version", resp.StatusCode)
}
var versionResponse map[string]interface{}
if err := json.NewDecoder(resp.Body).Decode(&versionResponse); err != nil {
@@ -639,11 +638,7 @@ func handleStreamResponse(resp *http.Response, streamOptions *streamOptions) err
_, err = io.Copy(streamOptions.stdout, resp.Body)
return err
}
- if st, ok := streamOptions.stdout.(interface {
- io.Writer
- FD() uintptr
- IsTerminal() bool
- }); ok {
+ if st, ok := streamOptions.stdout.(stream); ok {
err = jsonmessage.DisplayJSONMessagesToStream(resp.Body, st, nil)
} else {
err = jsonmessage.DisplayJSONMessagesStream(resp.Body, streamOptions.stdout, 0, false, nil)
@@ -651,6 +646,12 @@ func handleStreamResponse(resp *http.Response, streamOptions *streamOptions) err
return err
}
+type stream interface {
+ io.Writer
+ FD() uintptr
+ IsTerminal() bool
+}
+
type proxyReader struct {
io.ReadCloser
calls uint64
@@ -760,6 +761,7 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (Close
errs := make(chan error, 1)
quit := make(chan struct{})
go func() {
+ //lint:ignore SA1019 this is needed here
clientconn := httputil.NewClientConn(dial, nil)
defer clientconn.Close()
clientconn.Do(req)
@@ -872,13 +874,6 @@ func (c *Client) getFakeNativeURL(path string) string {
return fmt.Sprintf("%s%s", urlStr, path)
}
-type jsonMessage struct {
- Status string `json:"status,omitempty"`
- Progress string `json:"progress,omitempty"`
- Error string `json:"error,omitempty"`
- Stream string `json:"stream,omitempty"`
-}
-
func queryString(opts interface{}) string {
if opts == nil {
return ""
@@ -1029,7 +1024,7 @@ func getDockerEnv() (*dockerEnv, error) {
dockerHost := os.Getenv("DOCKER_HOST")
var err error
if dockerHost == "" {
- dockerHost = opts.DefaultHost
+ dockerHost = defaultHost
}
dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") != ""
var dockerCertPath string
diff --git a/vendor/github.com/fsouza/go-dockerclient/client_unix.go b/vendor/github.com/fsouza/go-dockerclient/client_unix.go
index 57d7904ea..cd2034304 100644
--- a/vendor/github.com/fsouza/go-dockerclient/client_unix.go
+++ b/vendor/github.com/fsouza/go-dockerclient/client_unix.go
@@ -12,6 +12,8 @@ import (
"net/http"
)
+const defaultHost = "unix:///var/run/docker.sock"
+
// initializeNativeClient initializes the native Unix domain socket client on
// Unix-style operating systems
func (c *Client) initializeNativeClient(trFunc func() *http.Transport) {
@@ -21,11 +23,8 @@ func (c *Client) initializeNativeClient(trFunc func() *http.Transport) {
sockPath := c.endpointURL.Path
tr := trFunc()
-
- tr.Dial = func(network, addr string) (net.Conn, error) {
- return c.Dialer.Dial(unixProtocol, sockPath)
- }
- tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
+ tr.Proxy = nil
+ tr.DialContext = func(_ context.Context, network, addr string) (net.Conn, error) {
return c.Dialer.Dial(unixProtocol, sockPath)
}
c.HTTPClient.Transport = tr
diff --git a/vendor/github.com/fsouza/go-dockerclient/client_windows.go b/vendor/github.com/fsouza/go-dockerclient/client_windows.go
index 8e7b457d7..63d97ec65 100644
--- a/vendor/github.com/fsouza/go-dockerclient/client_windows.go
+++ b/vendor/github.com/fsouza/go-dockerclient/client_windows.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build windows
-
package docker
import (
@@ -12,10 +10,13 @@ import (
"net/http"
"time"
- "github.com/Microsoft/go-winio"
+ winio "github.com/Microsoft/go-winio"
)
-const namedPipeConnectTimeout = 2 * time.Second
+const (
+ defaultHost = "npipe:////./pipe/docker_engine"
+ namedPipeConnectTimeout = 2 * time.Second
+)
type pipeDialer struct {
dialFunc func(network, addr string) (net.Conn, error)
@@ -36,7 +37,7 @@ func (c *Client) initializeNativeClient(trFunc func() *http.Transport) {
return winio.DialPipe(namedPipePath, &timeout)
}
tr := trFunc()
- tr.Dial = dialFunc
+ tr.Proxy = nil
tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
return dialFunc(network, addr)
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container.go b/vendor/github.com/fsouza/go-dockerclient/container.go
index b48bc2cf6..e40c9c2e4 100644
--- a/vendor/github.com/fsouza/go-dockerclient/container.go
+++ b/vendor/github.com/fsouza/go-dockerclient/container.go
@@ -16,7 +16,7 @@ import (
"strings"
"time"
- "github.com/docker/go-units"
+ units "github.com/docker/go-units"
)
// ErrContainerAlreadyExists is the error returned by CreateContainer when the
@@ -52,7 +52,7 @@ type APIMount struct {
Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty" toml:"Driver,omitempty"`
Mode string `json:"Mode,omitempty" yaml:"Mode,omitempty" toml:"Mode,omitempty"`
RW bool `json:"RW,omitempty" yaml:"RW,omitempty" toml:"RW,omitempty"`
- Propogation string `json:"Propogation,omitempty" yaml:"Propogation,omitempty" toml:"Propogation,omitempty"`
+ Propagation string `json:"Propagation,omitempty" yaml:"Propagation,omitempty" toml:"Propagation,omitempty"`
}
// APIContainers represents each container in the list returned by
@@ -627,7 +627,7 @@ func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error
)
if e, ok := err.(*Error); ok {
- if e.Status == http.StatusNotFound {
+ if e.Status == http.StatusNotFound && strings.Contains(e.Message, "No such image") {
return nil, ErrNoSuchImage
}
if e.Status == http.StatusConflict {
@@ -754,7 +754,6 @@ type HostConfig struct {
MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty" toml:"MemoryReservation,omitempty"`
KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty" toml:"KernelMemory,omitempty"`
MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty" toml:"MemorySwap,omitempty"`
- MemorySwappiness int64 `json:"MemorySwappiness,omitempty" yaml:"MemorySwappiness,omitempty" toml:"MemorySwappiness,omitempty"`
CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty" toml:"CpuShares,omitempty"`
CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty" toml:"Cpuset,omitempty"`
CPUSetCPUs string `json:"CpusetCpus,omitempty" yaml:"CpusetCpus,omitempty" toml:"CpusetCpus,omitempty"`
@@ -772,14 +771,11 @@ type HostConfig struct {
Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty" toml:"Ulimits,omitempty"`
VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty" toml:"VolumeDriver,omitempty"`
OomScoreAdj int `json:"OomScoreAdj,omitempty" yaml:"OomScoreAdj,omitempty" toml:"OomScoreAdj,omitempty"`
- PidsLimit int64 `json:"PidsLimit,omitempty" yaml:"PidsLimit,omitempty" toml:"PidsLimit,omitempty"`
+ MemorySwappiness *int64 `json:"MemorySwappiness,omitempty" yaml:"MemorySwappiness,omitempty" toml:"MemorySwappiness,omitempty"`
+ PidsLimit *int64 `json:"PidsLimit,omitempty" yaml:"PidsLimit,omitempty" toml:"PidsLimit,omitempty"`
+ OOMKillDisable *bool `json:"OomKillDisable,omitempty" yaml:"OomKillDisable,omitempty" toml:"OomKillDisable,omitempty"`
ShmSize int64 `json:"ShmSize,omitempty" yaml:"ShmSize,omitempty" toml:"ShmSize,omitempty"`
Tmpfs map[string]string `json:"Tmpfs,omitempty" yaml:"Tmpfs,omitempty" toml:"Tmpfs,omitempty"`
- Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty" toml:"Privileged,omitempty"`
- PublishAllPorts bool `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty" toml:"PublishAllPorts,omitempty"`
- ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty" toml:"ReadonlyRootfs,omitempty"`
- OOMKillDisable bool `json:"OomKillDisable,omitempty" yaml:"OomKillDisable,omitempty" toml:"OomKillDisable,omitempty"`
- AutoRemove bool `json:"AutoRemove,omitempty" yaml:"AutoRemove,omitempty" toml:"AutoRemove,omitempty"`
StorageOpt map[string]string `json:"StorageOpt,omitempty" yaml:"StorageOpt,omitempty" toml:"StorageOpt,omitempty"`
Sysctls map[string]string `json:"Sysctls,omitempty" yaml:"Sysctls,omitempty" toml:"Sysctls,omitempty"`
CPUCount int64 `json:"CpuCount,omitempty" yaml:"CpuCount,omitempty"`
@@ -787,8 +783,12 @@ type HostConfig struct {
IOMaximumBandwidth int64 `json:"IOMaximumBandwidth,omitempty" yaml:"IOMaximumBandwidth,omitempty"`
IOMaximumIOps int64 `json:"IOMaximumIOps,omitempty" yaml:"IOMaximumIOps,omitempty"`
Mounts []HostMount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"`
- Init bool `json:",omitempty" yaml:",omitempty"`
Runtime string `json:"Runtime,omitempty" yaml:"Runtime,omitempty" toml:"Runtime,omitempty"`
+ Init bool `json:",omitempty" yaml:",omitempty"`
+ Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty" toml:"Privileged,omitempty"`
+ PublishAllPorts bool `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty" toml:"PublishAllPorts,omitempty"`
+ ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty" toml:"ReadonlyRootfs,omitempty"`
+ AutoRemove bool `json:"AutoRemove,omitempty" yaml:"AutoRemove,omitempty" toml:"AutoRemove,omitempty"`
}
// NetworkingConfig represents the container's networking configuration for each of its interfaces
@@ -1103,13 +1103,8 @@ func (c *Client) Stats(opts StatsOptions) (retErr error) {
defer func() {
close(opts.Stats)
- select {
- case err := <-errC:
- if err != nil && retErr == nil {
- retErr = err
- }
- default:
- // No errors
+ if err := <-errC; err != nil && retErr == nil {
+ retErr = err
}
if err := readCloser.Close(); err != nil && retErr == nil {
@@ -1119,6 +1114,7 @@ func (c *Client) Stats(opts StatsOptions) (retErr error) {
reqSent := make(chan struct{})
go func() {
+ defer close(errC)
err := c.stream("GET", fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{
rawJSONStream: true,
useJSONDecoder: true,
@@ -1140,7 +1136,6 @@ func (c *Client) Stats(opts StatsOptions) (retErr error) {
err = closeErr
}
errC <- err
- close(errC)
}()
quit := make(chan struct{})
diff --git a/vendor/github.com/fsouza/go-dockerclient/event.go b/vendor/github.com/fsouza/go-dockerclient/event.go
index 18ae5d5a6..3a3364d9d 100644
--- a/vendor/github.com/fsouza/go-dockerclient/event.go
+++ b/vendor/github.com/fsouza/go-dockerclient/event.go
@@ -330,6 +330,7 @@ func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan
if err != nil {
return err
}
+ //lint:ignore SA1019 this is needed here
conn := httputil.NewClientConn(dial, nil)
req, err := http.NewRequest("GET", uri, nil)
if err != nil {
@@ -339,6 +340,7 @@ func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan
if err != nil {
return err
}
+ //lint:ignore SA1019 ClientConn is needed here
go func(res *http.Response, conn *httputil.ClientConn) {
defer conn.Close()
defer res.Body.Close()
diff --git a/vendor/github.com/fsouza/go-dockerclient/exec.go b/vendor/github.com/fsouza/go-dockerclient/exec.go
index 5e7ea87f6..d804b10b8 100644
--- a/vendor/github.com/fsouza/go-dockerclient/exec.go
+++ b/vendor/github.com/fsouza/go-dockerclient/exec.go
@@ -25,16 +25,16 @@ type Exec struct {
//
// See https://goo.gl/60TeBP for more details
type CreateExecOptions struct {
- AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty" toml:"AttachStdin,omitempty"`
- AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty" toml:"AttachStdout,omitempty"`
- AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty" toml:"AttachStderr,omitempty"`
- Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty" toml:"Tty,omitempty"`
Env []string `json:"Env,omitempty" yaml:"Env,omitempty" toml:"Env,omitempty"`
Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty" toml:"Cmd,omitempty"`
Container string `json:"Container,omitempty" yaml:"Container,omitempty" toml:"Container,omitempty"`
User string `json:"User,omitempty" yaml:"User,omitempty" toml:"User,omitempty"`
WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty" toml:"WorkingDir,omitempty"`
Context context.Context `json:"-"`
+ AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty" toml:"AttachStdin,omitempty"`
+ AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty" toml:"AttachStdout,omitempty"`
+ AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty" toml:"AttachStderr,omitempty"`
+ Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty" toml:"Tty,omitempty"`
Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty" toml:"Privileged,omitempty"`
}
@@ -177,13 +177,13 @@ type ExecProcessConfig struct {
type ExecInspect struct {
ID string `json:"ID,omitempty" yaml:"ID,omitempty" toml:"ID,omitempty"`
ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty" toml:"ExitCode,omitempty"`
+ ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty" toml:"ProcessConfig,omitempty"`
+ ContainerID string `json:"ContainerID,omitempty" yaml:"ContainerID,omitempty" toml:"ContainerID,omitempty"`
+ DetachKeys string `json:"DetachKeys,omitempty" yaml:"DetachKeys,omitempty" toml:"DetachKeys,omitempty"`
Running bool `json:"Running,omitempty" yaml:"Running,omitempty" toml:"Running,omitempty"`
OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty" toml:"OpenStdin,omitempty"`
OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty" toml:"OpenStderr,omitempty"`
OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty" toml:"OpenStdout,omitempty"`
- ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty" toml:"ProcessConfig,omitempty"`
- ContainerID string `json:"ContainerID,omitempty" yaml:"ContainerID,omitempty" toml:"ContainerID,omitempty"`
- DetachKeys string `json:"DetachKeys,omitempty" yaml:"DetachKeys,omitempty" toml:"DetachKeys,omitempty"`
CanRemove bool `json:"CanRemove,omitempty" yaml:"CanRemove,omitempty" toml:"CanRemove,omitempty"`
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/go.mod b/vendor/github.com/fsouza/go-dockerclient/go.mod
index bcf549c21..b4e75afc8 100644
--- a/vendor/github.com/fsouza/go-dockerclient/go.mod
+++ b/vendor/github.com/fsouza/go-dockerclient/go.mod
@@ -1,42 +1,26 @@
module github.com/fsouza/go-dockerclient
+go 1.11
+
require (
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78
- github.com/Microsoft/go-winio v0.4.11
- github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5
- github.com/containerd/continuity v0.0.0-20180814194400-c7c5070e6f6e // indirect
- github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/docker/docker v0.7.3-0.20180827131323-0c5f8d2b9b23
+ github.com/Microsoft/go-winio v0.4.12
+ github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 // indirect
+ github.com/docker/docker v0.7.3-0.20190309235953-33c3200e0d16
github.com/docker/go-connections v0.4.0 // indirect
- github.com/docker/go-units v0.3.3
- github.com/docker/libnetwork v0.8.0-dev.2.0.20180608203834-19279f049241 // indirect
- github.com/fsnotify/fsnotify v1.4.7 // indirect
- github.com/gogo/protobuf v1.1.1 // indirect
- github.com/golang/protobuf v1.2.0 // indirect
- github.com/google/go-cmp v0.2.0
- github.com/gorilla/context v1.1.1 // indirect
- github.com/gorilla/mux v1.6.2
- github.com/hpcloud/tail v1.0.0 // indirect
- github.com/onsi/ginkgo v1.6.0 // indirect
- github.com/onsi/gomega v1.4.1 // indirect
+ github.com/docker/go-units v0.4.0
+ github.com/gogo/protobuf v1.2.1 // indirect
+ github.com/golang/protobuf v1.3.0 // indirect
+ github.com/google/go-cmp v0.3.0
+ github.com/gorilla/mux v1.7.2
+ github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/opencontainers/runc v0.1.1 // indirect
- github.com/pkg/errors v0.8.0 // indirect
- github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/sirupsen/logrus v1.0.6
- github.com/stretchr/testify v1.2.2 // indirect
- github.com/vishvananda/netlink v1.0.0 // indirect
- github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect
- golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac // indirect
- golang.org/x/net v0.0.0-20180826012351-8a410e7b638d // indirect
- golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect
- golang.org/x/sys v0.0.0-20180824143301-4910a1d54f87
- golang.org/x/text v0.3.0 // indirect
- gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
- gopkg.in/fsnotify.v1 v1.4.7 // indirect
- gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect
- gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
- gopkg.in/yaml.v2 v2.2.1 // indirect
- gotest.tools v2.1.0+incompatible // indirect
+ github.com/pkg/errors v0.8.1 // indirect
+ github.com/sirupsen/logrus v1.3.0 // indirect
+ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
+ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect
+ golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa
+ gotest.tools v2.2.0+incompatible // indirect
)
diff --git a/vendor/github.com/fsouza/go-dockerclient/go.sum b/vendor/github.com/fsouza/go-dockerclient/go.sum
new file mode 100644
index 000000000..bc93f2dae
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/go.sum
@@ -0,0 +1,60 @@
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc=
+github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
+github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 h1:4BX8f882bXEDKfWIf0wa8HRvpnBoPszJJXL+TVbBw4M=
+github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/docker/docker v0.7.3-0.20190309235953-33c3200e0d16 h1:dmUn0SuGx7unKFwxyeQ/oLUHhEfZosEDrpmYM+6MTuc=
+github.com/docker/docker v0.7.3-0.20190309235953-33c3200e0d16/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk=
+github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0=
+github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/gorilla/mux v1.7.1 h1:Dw4jY2nghMMRsh1ol8dv1axHkDwMQK2DHerMNJsIpJU=
+github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I=
+github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd h1:anPrsicrIi2ColgWTVPk+TrN42hJIWlfPHSBP9S0ZkM=
+github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd/go.mod h1:3LVOLeyx9XVvwPgrt2be44XgSqndprz1G18rSk8KD84=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
+github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
+github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=
+github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/sirupsen/logrus v1.3.0 h1:hI/7Q+DtNZ2kINb6qt/lS+IyXnHQe9e90POfeewL/ME=
+github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa h1:lqti/xP+yD/6zH5TqEwx2MilNIJY5Vbc6Qr8J3qyPIQ=
+golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
diff --git a/vendor/github.com/fsouza/go-dockerclient/image.go b/vendor/github.com/fsouza/go-dockerclient/image.go
index 124e78da3..f9e1c6f04 100644
--- a/vendor/github.com/fsouza/go-dockerclient/image.go
+++ b/vendor/github.com/fsouza/go-dockerclient/image.go
@@ -5,7 +5,6 @@
package docker
import (
- "bytes"
"context"
"encoding/base64"
"encoding/json"
@@ -445,15 +444,10 @@ func (c *Client) ImportImage(opts ImportImageOptions) error {
// For more details about the Docker building process, see
// https://goo.gl/4nYHwV.
type BuildImageOptions struct {
+ Context context.Context
Name string `qs:"t"`
Dockerfile string `qs:"dockerfile"`
- NoCache bool `qs:"nocache"`
CacheFrom []string `qs:"-"`
- SuppressOutput bool `qs:"q"`
- Pull bool `qs:"pull"`
- RmTmpContainer bool `qs:"rm"`
- ForceRmTmpContainer bool `qs:"forcerm"`
- RawJSONStream bool `qs:"-"`
Memory int64 `qs:"memory"`
Memswap int64 `qs:"memswap"`
CPUShares int64 `qs:"cpushares"`
@@ -474,7 +468,12 @@ type BuildImageOptions struct {
CgroupParent string `qs:"cgroupparent"`
SecurityOpt []string `qs:"securityopt"`
Target string `gs:"target"`
- Context context.Context
+ NoCache bool `qs:"nocache"`
+ SuppressOutput bool `qs:"q"`
+ Pull bool `qs:"pull"`
+ RmTmpContainer bool `qs:"rm"`
+ ForceRmTmpContainer bool `qs:"forcerm"`
+ RawJSONStream bool `qs:"-"`
}
// BuildArg represents arguments that can be passed to the image when building
@@ -558,7 +557,7 @@ func (c *Client) BuildImage(opts BuildImageOptions) error {
})
}
-func (c *Client) versionedAuthConfigs(authConfigs AuthConfigurations) interface{} {
+func (c *Client) versionedAuthConfigs(authConfigs AuthConfigurations) registryAuth {
if c.serverAPIVersion == nil {
c.checkAPIVersion()
}
@@ -588,7 +587,6 @@ func (c *Client) TagImage(name string, opts TagImageOptions) error {
resp, err := c.do("POST", "/images/"+name+"/tag?"+queryString(&opts), doOptions{
context: opts.Context,
})
-
if err != nil {
return err
}
@@ -610,24 +608,18 @@ func isURL(u string) bool {
return p.Scheme == "http" || p.Scheme == "https"
}
-func headersWithAuth(auths ...interface{}) (map[string]string, error) {
+func headersWithAuth(auths ...registryAuth) (map[string]string, error) {
var headers = make(map[string]string)
for _, auth := range auths {
- switch auth.(type) {
- case AuthConfiguration:
- var buf bytes.Buffer
- if err := json.NewEncoder(&buf).Encode(auth); err != nil {
- return nil, err
- }
- headers["X-Registry-Auth"] = base64.URLEncoding.EncodeToString(buf.Bytes())
- case AuthConfigurations, AuthConfigurations119:
- var buf bytes.Buffer
- if err := json.NewEncoder(&buf).Encode(auth); err != nil {
- return nil, err
- }
- headers["X-Registry-Config"] = base64.URLEncoding.EncodeToString(buf.Bytes())
+ if auth.isEmpty() {
+ continue
+ }
+ data, err := json.Marshal(auth)
+ if err != nil {
+ return nil, err
}
+ headers[auth.headerKey()] = base64.URLEncoding.EncodeToString(data)
}
return headers, nil
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go
index 7d7cf496a..f11ee0ee3 100644
--- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go
+++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go
@@ -10,6 +10,7 @@ import (
"compress/gzip"
"fmt"
"io"
+ "log"
"os"
"path/filepath"
"strings"
@@ -18,7 +19,6 @@ import (
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
- "github.com/sirupsen/logrus"
)
const (
@@ -65,31 +65,30 @@ type WhiteoutFormat int
// TarOptions wraps the tar options.
type TarOptions struct {
- IncludeFiles []string
- ExcludePatterns []string
- Compression Compression
- NoLchown bool
- UIDMaps []idtools.IDMap
- GIDMaps []idtools.IDMap
- ChownOpts *idtools.Identity
- IncludeSourceDir bool
+ IncludeFiles []string
+ ExcludePatterns []string
+ Compression Compression
+ UIDMaps []idtools.IDMap
+ GIDMaps []idtools.IDMap
+ ChownOpts *idtools.Identity
// WhiteoutFormat is the expected on disk format for whiteout files.
// This format will be converted to the standard format on pack
// and from the standard format on unpack.
WhiteoutFormat WhiteoutFormat
// When unpacking, specifies whether overwriting a directory with a
// non-directory is allowed and vice versa.
- NoOverwriteDirNonDir bool
// For each include when creating an archive, the included name will be
// replaced with the matching name from this map.
- RebaseNames map[string]string
- InUserNS bool
+ RebaseNames map[string]string
+ NoLchown bool
+ InUserNS bool
+ IncludeSourceDir bool
+ NoOverwriteDirNonDir bool
}
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
-
// Fix the source path to work with long path names. This is a no-op
// on platforms other than Windows.
srcPath = fixVolumePathPrefix(srcPath)
@@ -117,13 +116,13 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
defer func() {
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
- logrus.Errorf("Can't close tar writer: %s", err)
+ log.Printf("Can't close tar writer: %s", err)
}
if err := compressWriter.Close(); err != nil {
- logrus.Errorf("Can't close compress writer: %s", err)
+ log.Printf("Can't close compress writer: %s", err)
}
if err := pipeWriter.Close(); err != nil {
- logrus.Errorf("Can't close pipe writer: %s", err)
+ log.Printf("Can't close pipe writer: %s", err)
}
}()
@@ -146,7 +145,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
// directory. So, we must split the source path and use the
// basename as the include.
if len(options.IncludeFiles) > 0 {
- logrus.Warn("Tar: Can't archive a file with includes")
+ log.Print("Tar: Can't archive a file with includes")
}
dir, base := SplitPathDirEntry(srcPath)
@@ -161,12 +160,13 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
seen := make(map[string]bool)
for _, include := range options.IncludeFiles {
+ include := include
rebaseName := options.RebaseNames[include]
walkRoot := getWalkRoot(srcPath, include)
filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
if err != nil {
- logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
+ log.Printf("Tar: Can't stat file %s to tar: %s", srcPath, err)
return nil
}
@@ -191,7 +191,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
if include != relFilePath {
skip, err = pm.Matches(relFilePath)
if err != nil {
- logrus.Errorf("Error matching %s: %v", relFilePath, err)
+ log.Printf("Error matching %s: %v", relFilePath, err)
return err
}
}
@@ -247,7 +247,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
}
if err := ta.addTarFile(filePath, relFilePath); err != nil {
- logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
+ log.Printf("Can't add file %s to tar: %s", filePath, err)
// if pipe is broken, stop writing tar stream to it
if err == io.ErrClosedPipe {
return err
@@ -276,8 +276,10 @@ func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, er
case Bzip2, Xz:
// archive/bzip2 does not support writing, and there is no xz support at all
// However, this is not a problem as docker only currently generates gzipped tars
+ //lint:ignore ST1005 this is vendored/copied code
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
default:
+ //lint:ignore ST1005 this is vendored/copied code
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
}
}
@@ -355,13 +357,13 @@ func (ta *tarAppender) addTarFile(path, name string) error {
}
}
- //check whether the file is overlayfs whiteout
- //if yes, skip re-mapping container ID mappings.
+ // check whether the file is overlayfs whiteout
+ // if yes, skip re-mapping container ID mappings.
isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0
- //handle re-mapping container ID mappings back to host ID mappings before
- //writing tar headers/files. We skip whiteout files because they were written
- //by the kernel and already have proper ownership relative to the host
+ // handle re-mapping container ID mappings back to host ID mappings before
+ // writing tar headers/files. We skip whiteout files because they were written
+ // by the kernel and already have proper ownership relative to the host
if !isOverlayWhiteout &&
!strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) &&
!ta.IdentityMapping.Empty() {
@@ -437,7 +439,9 @@ func (ta *tarAppender) addTarFile(path, name string) error {
func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
capability, _ := system.Lgetxattr(path, "security.capability")
if capability != nil {
+ //lint:ignore SA1019 this is vendored/copied code
hdr.Xattrs = make(map[string]string)
+ //lint:ignore SA1019 this is vendored/copied code
hdr.Xattrs["security.capability"] = string(capability)
}
return nil
@@ -490,7 +494,7 @@ func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
}
// canonicalTarName provides a platform-independent and consistent posix-style
-//path for files and directories to be archived regardless of the platform.
+// path for files and directories to be archived regardless of the platform.
func canonicalTarName(name string, isDir bool) (string, error) {
name, err := CanonicalTarNameForPath(name)
if err != nil {
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go
index 9e1f3f2f1..e2059e489 100644
--- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go
+++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go
@@ -49,7 +49,9 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os
return nil, err
}
if len(opaque) == 1 && opaque[0] == 'y' {
+ //lint:ignore SA1019 this is vendored/copied code
if hdr.Xattrs != nil {
+ //lint:ignore SA1019 this is vendored/copied code
delete(hdr.Xattrs, "trusted.overlay.opaque")
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go
index 80199d513..bb6bf7145 100644
--- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go
+++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go
@@ -42,7 +42,7 @@ func getInodeFromStat(stat interface{}) (inode uint64, err error) {
s, ok := stat.(*syscall.Stat_t)
if ok {
- inode = uint64(s.Ino)
+ inode = s.Ino
}
return
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go
index c47768e68..33c1dff03 100644
--- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go
+++ b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go
@@ -24,10 +24,10 @@ func CanonicalTarNameForPath(p string) (string, error) {
// in file names, it is mostly safe to replace however we must
// check just in case
if strings.Contains(p, "/") {
+ //lint:ignore ST1005 Windows should be capitalized :)
return "", fmt.Errorf("Windows path contains forward slash: %s", p)
}
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
-
}
// fixVolumePathPrefix does platform specific processing to ensure that if
@@ -55,7 +55,7 @@ func getFileIdentity(stat interface{}) (idtools.Identity, error) {
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
- //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
+ // perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
permPart := perm & os.ModePerm
noPermPart := perm &^ os.ModePerm
// Add the x bit: make everything +x from windows
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go b/vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go
index 71b3395ce..99a32ae05 100644
--- a/vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go
+++ b/vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go
@@ -12,9 +12,9 @@ import (
"strings"
"time"
- "github.com/Nvveen/Gotty"
- "github.com/docker/go-units"
+ units "github.com/docker/go-units"
"github.com/fsouza/go-dockerclient/internal/term"
+ gotty "github.com/ijc/Gotty"
)
// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
@@ -144,13 +144,13 @@ type JSONMessage struct {
Stream string `json:"stream,omitempty"`
Status string `json:"status,omitempty"`
Progress *JSONProgress `json:"progressDetail,omitempty"`
- ProgressMessage string `json:"progress,omitempty"` //deprecated
+ ProgressMessage string `json:"progress,omitempty"` // deprecated
ID string `json:"id,omitempty"`
From string `json:"from,omitempty"`
Time int64 `json:"time,omitempty"`
TimeNano int64 `json:"timeNano,omitempty"`
Error *JSONError `json:"errorDetail,omitempty"`
- ErrorMessage string `json:"error,omitempty"` //deprecated
+ ErrorMessage string `json:"error,omitempty"` // deprecated
// Aux contains out-of-band data, such as digests for push signing and image id after building.
Aux *json.RawMessage `json:"aux,omitempty"`
}
@@ -166,43 +166,72 @@ func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error)
return "", fmt.Errorf("noTermInfo")
}
-func clearLine(out io.Writer, ti termInfo) {
+func clearLine(out io.Writer, ti termInfo) error {
// el2 (clear whole line) is not exposed by terminfo.
// First clear line from beginning to cursor
if attr, err := ti.Parse("el1"); err == nil {
- fmt.Fprintf(out, "%s", attr)
+ _, err = fmt.Fprintf(out, "%s", attr)
+ if err != nil {
+ return err
+ }
} else {
- fmt.Fprintf(out, "\x1b[1K")
+ _, err := fmt.Fprintf(out, "\x1b[1K")
+ if err != nil {
+ return err
+ }
}
// Then clear line from cursor to end
if attr, err := ti.Parse("el"); err == nil {
- fmt.Fprintf(out, "%s", attr)
+ _, err = fmt.Fprintf(out, "%s", attr)
+ if err != nil {
+ return err
+ }
} else {
- fmt.Fprintf(out, "\x1b[K")
+ _, err := fmt.Fprintf(out, "\x1b[K")
+ if err != nil {
+ return err
+ }
}
+
+ return nil
}
-func cursorUp(out io.Writer, ti termInfo, l int) {
+func cursorUp(out io.Writer, ti termInfo, l int) error {
if l == 0 { // Should never be the case, but be tolerant
- return
+ return nil
}
if attr, err := ti.Parse("cuu", l); err == nil {
- fmt.Fprintf(out, "%s", attr)
+ _, err = fmt.Fprintf(out, "%s", attr)
+ if err != nil {
+ return err
+ }
} else {
- fmt.Fprintf(out, "\x1b[%dA", l)
+ _, err := fmt.Fprintf(out, "\x1b[%dA", l)
+ if err != nil {
+ return err
+ }
}
+ return nil
}
-func cursorDown(out io.Writer, ti termInfo, l int) {
+func cursorDown(out io.Writer, ti termInfo, l int) error {
if l == 0 { // Should never be the case, but be tolerant
- return
+ return nil
}
if attr, err := ti.Parse("cud", l); err == nil {
- fmt.Fprintf(out, "%s", attr)
+ _, err = fmt.Fprintf(out, "%s", attr)
+ if err != nil {
+ return err
+ }
} else {
- fmt.Fprintf(out, "\x1b[%dB", l)
+ _, err := fmt.Fprintf(out, "\x1b[%dB", l)
+ if err != nil {
+ return err
+ }
}
+
+ return nil
}
// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out`
@@ -219,29 +248,56 @@ func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error {
if termInfo != nil && jm.Stream == "" && jm.Progress != nil {
clearLine(out, termInfo)
endl = "\r"
- fmt.Fprintf(out, endl)
- } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
+ _, err := fmt.Fprint(out, endl)
+ if err != nil {
+ return err
+ }
+ } else if jm.Progress != nil && jm.Progress.String() != "" { // disable progressbar in non-terminal
return nil
}
if jm.TimeNano != 0 {
- fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed))
+ _, err := fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed))
+ if err != nil {
+ return err
+ }
} else if jm.Time != 0 {
- fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed))
+ _, err := fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed))
+ if err != nil {
+ return err
+ }
}
if jm.ID != "" {
- fmt.Fprintf(out, "%s: ", jm.ID)
+ _, err := fmt.Fprintf(out, "%s: ", jm.ID)
+ if err != nil {
+ return err
+ }
}
if jm.From != "" {
- fmt.Fprintf(out, "(from %s) ", jm.From)
+ _, err := fmt.Fprintf(out, "(from %s) ", jm.From)
+ if err != nil {
+ return err
+ }
}
if jm.Progress != nil && termInfo != nil {
- fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
- } else if jm.ProgressMessage != "" { //deprecated
- fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
+ _, err := fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
+ if err != nil {
+ return err
+ }
+ } else if jm.ProgressMessage != "" { // deprecated
+ _, err := fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
+ if err != nil {
+ return err
+ }
} else if jm.Stream != "" {
- fmt.Fprintf(out, "%s%s", jm.Stream, endl)
+ _, err := fmt.Fprintf(out, "%s%s", jm.Stream, endl)
+ if err != nil {
+ return err
+ }
} else {
- fmt.Fprintf(out, "%s%s\n", jm.Status, endl)
+ _, err := fmt.Fprintf(out, "%s%s\n", jm.Status, endl)
+ if err != nil {
+ return err
+ }
}
return nil
}
@@ -301,12 +357,17 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr,
line = len(ids)
ids[jm.ID] = line
if termInfo != nil {
- fmt.Fprintf(out, "\n")
+ _, err := fmt.Fprintf(out, "\n")
+ if err != nil {
+ return err
+ }
}
}
diff = len(ids) - line
if termInfo != nil {
- cursorUp(out, termInfo, diff)
+ if err := cursorUp(out, termInfo, diff); err != nil {
+ return err
+ }
}
} else {
// When outputting something that isn't progress
@@ -318,7 +379,9 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr,
}
err := jm.Display(out, termInfo)
if jm.ID != "" && termInfo != nil {
- cursorDown(out, termInfo, diff)
+ if err := cursorDown(out, termInfo, diff); err != nil {
+ return err
+ }
}
if err != nil {
return err
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/term/term.go b/vendor/github.com/fsouza/go-dockerclient/internal/term/term.go
index af06911d8..7d3c11358 100644
--- a/vendor/github.com/fsouza/go-dockerclient/internal/term/term.go
+++ b/vendor/github.com/fsouza/go-dockerclient/internal/term/term.go
@@ -8,6 +8,4 @@ package term
type Winsize struct {
Height uint16
Width uint16
- x uint16
- y uint16
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go b/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go
index 2a9964a0d..92a80a308 100644
--- a/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go
+++ b/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go
@@ -11,6 +11,6 @@ import "golang.org/x/sys/unix"
// GetWinsize returns the window size based on the specified file descriptor.
func GetWinsize(fd uintptr) (*Winsize, error) {
uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ)
- ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel}
+ ws := &Winsize{Height: uws.Row, Width: uws.Col}
return ws, err
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/misc.go b/vendor/github.com/fsouza/go-dockerclient/misc.go
index 1fc37b14e..01fd1f687 100644
--- a/vendor/github.com/fsouza/go-dockerclient/misc.go
+++ b/vendor/github.com/fsouza/go-dockerclient/misc.go
@@ -17,7 +17,7 @@ import (
//
// See https://goo.gl/mU7yje for more details.
func (c *Client) Version() (*Env, error) {
- return c.VersionWithContext(nil)
+ return c.VersionWithContext(context.TODO())
}
// VersionWithContext returns version information about the docker server.
@@ -48,19 +48,6 @@ type DockerInfo struct {
DriverStatus [][2]string
SystemStatus [][2]string
Plugins PluginsInfo
- MemoryLimit bool
- SwapLimit bool
- KernelMemory bool
- CPUCfsPeriod bool `json:"CpuCfsPeriod"`
- CPUCfsQuota bool `json:"CpuCfsQuota"`
- CPUShares bool
- CPUSet bool
- IPv4Forwarding bool
- BridgeNfIptables bool
- BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
- Debug bool
- OomKillDisable bool
- ExperimentalBuild bool
NFd int
NGoroutines int
SystemTime string
@@ -85,12 +72,34 @@ type DockerInfo struct {
Labels []string
ServerVersion string
ClusterStore string
+ Runtimes map[string]Runtime
ClusterAdvertise string
Isolation string
InitBinary string
DefaultRuntime string
- LiveRestoreEnabled bool
Swarm swarm.Info
+ LiveRestoreEnabled bool
+ MemoryLimit bool
+ SwapLimit bool
+ KernelMemory bool
+ CPUCfsPeriod bool `json:"CpuCfsPeriod"`
+ CPUCfsQuota bool `json:"CpuCfsQuota"`
+ CPUShares bool
+ CPUSet bool
+ IPv4Forwarding bool
+ BridgeNfIptables bool
+ BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
+ Debug bool
+ OomKillDisable bool
+ ExperimentalBuild bool
+}
+
+// Runtime describes an OCI runtime
+//
+// for more information, see: https://dockr.ly/2NKM8qq
+type Runtime struct {
+ Path string
+ Args []string `json:"runtimeArgs"`
}
// PluginsInfo is a struct with the plugins registered with the docker daemon
diff --git a/vendor/github.com/fsouza/go-dockerclient/plugin.go b/vendor/github.com/fsouza/go-dockerclient/plugin.go
index 957015907..088790313 100644
--- a/vendor/github.com/fsouza/go-dockerclient/plugin.go
+++ b/vendor/github.com/fsouza/go-dockerclient/plugin.go
@@ -40,10 +40,10 @@ func (c *Client) InstallPlugins(opts InstallPluginOptions) error {
data: opts.Plugins,
context: opts.Context,
})
- defer resp.Body.Close()
if err != nil {
return err
}
+ resp.Body.Close()
return nil
}
@@ -288,7 +288,6 @@ type EnablePluginOptions struct {
func (c *Client) EnablePlugin(opts EnablePluginOptions) error {
path := "/plugins/" + opts.Name + "/enable?" + queryString(opts)
resp, err := c.do("POST", path, doOptions{context: opts.Context})
- defer resp.Body.Close()
if err != nil {
return err
}
@@ -312,7 +311,6 @@ type DisablePluginOptions struct {
func (c *Client) DisablePlugin(opts DisablePluginOptions) error {
path := "/plugins/" + opts.Name + "/disable"
resp, err := c.do("POST", path, doOptions{context: opts.Context})
- defer resp.Body.Close()
if err != nil {
return err
}
@@ -339,11 +337,12 @@ func (c *Client) CreatePlugin(opts CreatePluginOptions) (string, error) {
path := "/plugins/create?" + queryString(opts)
resp, err := c.do("POST", path, doOptions{
data: opts.Path,
- context: opts.Context})
- defer resp.Body.Close()
+ context: opts.Context,
+ })
if err != nil {
return "", err
}
+ defer resp.Body.Close()
containerNameBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
@@ -367,10 +366,10 @@ type PushPluginOptions struct {
func (c *Client) PushPlugin(opts PushPluginOptions) error {
path := "/plugins/" + opts.Name + "/push"
resp, err := c.do("POST", path, doOptions{context: opts.Context})
- defer resp.Body.Close()
if err != nil {
return err
}
+ resp.Body.Close()
return nil
}
@@ -394,13 +393,13 @@ func (c *Client) ConfigurePlugin(opts ConfigurePluginOptions) error {
data: opts.Envs,
context: opts.Context,
})
- defer resp.Body.Close()
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return &NoSuchPlugin{ID: opts.Name}
}
return err
}
+ resp.Body.Close()
return nil
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/registry_auth.go b/vendor/github.com/fsouza/go-dockerclient/registry_auth.go
new file mode 100644
index 000000000..1f60d1e8f
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/registry_auth.go
@@ -0,0 +1,10 @@
+// Copyright 2013 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+type registryAuth interface {
+ isEmpty() bool
+ headerKey() string
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_service.go b/vendor/github.com/fsouza/go-dockerclient/swarm_service.go
index ca7e23725..d9c4b2acc 100644
--- a/vendor/github.com/fsouza/go-dockerclient/swarm_service.go
+++ b/vendor/github.com/fsouza/go-dockerclient/swarm_service.go
@@ -176,10 +176,10 @@ type LogsServiceOptions struct {
ErrorStream io.Writer `qs:"-"`
InactivityTimeout time.Duration `qs:"-"`
Tail string
+ Since int64
// Use raw terminal? Usually true when the container contains a TTY.
RawTerminal bool `qs:"-"`
- Since int64
Follow bool
Stdout bool
Stderr bool
diff --git a/vendor/github.com/fsouza/go-dockerclient/system.go b/vendor/github.com/fsouza/go-dockerclient/system.go
new file mode 100644
index 000000000..a43dfb5a2
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/system.go
@@ -0,0 +1,72 @@
+package docker
+
+import (
+ "context"
+ "encoding/json"
+)
+
+// VolumeUsageData represents usage data from the docker system api
+// More Info Here https://dockr.ly/2PNzQyO
+type VolumeUsageData struct {
+
+ // The number of containers referencing this volume. This field
+ // is set to `-1` if the reference-count is not available.
+ //
+ // Required: true
+ RefCount int64 `json:"RefCount"`
+
+ // Amount of disk space used by the volume (in bytes). This information
+ // is only available for volumes created with the `"local"` volume
+ // driver. For volumes created with other volume drivers, this field
+ // is set to `-1` ("not available")
+ //
+ // Required: true
+ Size int64 `json:"Size"`
+}
+
+// ImageSummary represents data about what images are
+// currently known to docker
+// More Info Here https://dockr.ly/2PNzQyO
+type ImageSummary struct {
+ Containers int64 `json:"Containers"`
+ Created int64 `json:"Created"`
+ ID string `json:"Id"`
+ Labels map[string]string `json:"Labels"`
+ ParentID string `json:"ParentId"`
+ RepoDigests []string `json:"RepoDigests"`
+ RepoTags []string `json:"RepoTags"`
+ SharedSize int64 `json:"SharedSize"`
+ Size int64 `json:"Size"`
+ VirtualSize int64 `json:"VirtualSize"`
+}
+
+// DiskUsage holds information about what docker is using disk space on.
+// More Info Here https://dockr.ly/2PNzQyO
+type DiskUsage struct {
+ LayersSize int64
+ Images []*ImageSummary
+ Containers []*APIContainers
+ Volumes []*Volume
+}
+
+// DiskUsageOptions only contains a context for canceling.
+type DiskUsageOptions struct {
+ Context context.Context
+}
+
+// DiskUsage returns a *DiskUsage describing what docker is using disk on.
+//
+// More Info Here https://dockr.ly/2PNzQyO
+func (c *Client) DiskUsage(opts DiskUsageOptions) (*DiskUsage, error) {
+ path := "/system/df"
+ resp, err := c.do("GET", path, doOptions{context: opts.Context})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var du *DiskUsage
+ if err := json.NewDecoder(resp.Body).Decode(&du); err != nil {
+ return nil, err
+ }
+ return du, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/tls.go b/vendor/github.com/fsouza/go-dockerclient/tls.go
index 5f0e2e31e..07661f3d1 100644
--- a/vendor/github.com/fsouza/go-dockerclient/tls.go
+++ b/vendor/github.com/fsouza/go-dockerclient/tls.go
@@ -38,7 +38,7 @@ func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Con
timeout := dialer.Timeout
if !dialer.Deadline.IsZero() {
- deadlineTimeout := dialer.Deadline.Sub(time.Now())
+ deadlineTimeout := time.Until(dialer.Deadline)
if timeout == 0 || deadlineTimeout < timeout {
timeout = deadlineTimeout
}
diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore
new file mode 100644
index 000000000..e256a31e0
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/.gitignore
@@ -0,0 +1,20 @@
+# OSX leaves these everywhere on SMB shares
+._*
+
+# Eclipse files
+.classpath
+.project
+.settings/**
+
+# Emacs save files
+*~
+
+# Vim-related files
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+*.un~
+Session.vim
+.netrwhist
+
+# Go test binaries
+*.test
diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml
new file mode 100644
index 000000000..0e9d6edc0
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+go:
+ - 1.3
+ - 1.4
+script:
+ - go test
+ - go build
diff --git a/vendor/github.com/godbus/dbus/.travis.yml b/vendor/github.com/godbus/dbus/.travis.yml
new file mode 100644
index 000000000..9cd57f432
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/.travis.yml
@@ -0,0 +1,46 @@
+dist: precise
+language: go
+go_import_path: github.com/godbus/dbus
+sudo: true
+
+go:
+ - 1.7.3
+ - 1.8.7
+ - 1.9.5
+ - 1.10.1
+ - tip
+
+env:
+ global:
+ matrix:
+ - TARGET=amd64
+ - TARGET=arm64
+ - TARGET=arm
+ - TARGET=386
+ - TARGET=ppc64le
+
+matrix:
+ fast_finish: true
+ allow_failures:
+ - go: tip
+ exclude:
+ - go: tip
+ env: TARGET=arm
+ - go: tip
+ env: TARGET=arm64
+ - go: tip
+ env: TARGET=386
+ - go: tip
+ env: TARGET=ppc64le
+
+addons:
+ apt:
+ packages:
+ - dbus
+ - dbus-x11
+
+before_install:
+
+script:
+ - go test -v -race ./... # Run all the tests with the race detector enabled
+ - go vet ./... # go vet is the official Go static analyzer
diff --git a/vendor/github.com/godbus/dbus/CONTRIBUTING.md b/vendor/github.com/godbus/dbus/CONTRIBUTING.md
new file mode 100644
index 000000000..c88f9b2bd
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/CONTRIBUTING.md
@@ -0,0 +1,50 @@
+# How to Contribute
+
+## Getting Started
+
+- Fork the repository on GitHub
+- Read the [README](README.markdown) for build and test instructions
+- Play with the project, submit bugs, submit patches!
+
+## Contribution Flow
+
+This is a rough outline of what a contributor's workflow looks like:
+
+- Create a topic branch from where you want to base your work (usually master).
+- Make commits of logical units.
+- Make sure your commit messages are in the proper format (see below).
+- Push your changes to a topic branch in your fork of the repository.
+- Make sure the tests pass, and add any new tests as appropriate.
+- Submit a pull request to the original repository.
+
+Thanks for your contributions!
+
+### Format of the Commit Message
+
+We follow a rough convention for commit messages that is designed to answer two
+questions: what changed and why. The subject line should feature the what and
+the body of the commit should describe the why.
+
+```
+scripts: add the test-cluster command
+
+this uses tmux to setup a test cluster that you can easily kill and
+start for debugging.
+
+Fixes #38
+```
+
+The format can be described more formally as follows:
+
+```
+<subsystem>: <what changed>
+<BLANK LINE>
+<why this change was made>
+<BLANK LINE>
+<footer>
+```
+
+The first line is the subject and should be no longer than 70 characters, the
+second line is always blank, and other lines should be wrapped at 80 characters.
+This allows the message to be easier to read on GitHub as well as in various
+git tools.
diff --git a/vendor/github.com/godbus/dbus/MAINTAINERS b/vendor/github.com/godbus/dbus/MAINTAINERS
new file mode 100644
index 000000000..27618c9cd
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/MAINTAINERS
@@ -0,0 +1,3 @@
+Brandon Philips <brandon@ifup.org> (@philips)
+Brian Waldon <brian@waldon.cc> (@bcwaldon)
+John Southworth <jsouthwo@brocade.com> (@jsouthworth)
diff --git a/vendor/github.com/godbus/dbus/README.markdown b/vendor/github.com/godbus/dbus/README.markdown
index d37f4e2ed..fd2964875 100644
--- a/vendor/github.com/godbus/dbus/README.markdown
+++ b/vendor/github.com/godbus/dbus/README.markdown
@@ -14,7 +14,7 @@ D-Bus message bus system.
### Installation
-This packages requires Go 1.1. If you installed it and set up your GOPATH, just run:
+This packages requires Go 1.7. If you installed it and set up your GOPATH, just run:
```
go get github.com/godbus/dbus
diff --git a/vendor/github.com/godbus/dbus/auth.go b/vendor/github.com/godbus/dbus/auth.go
index 98017b693..b0dcb54e6 100644
--- a/vendor/github.com/godbus/dbus/auth.go
+++ b/vendor/github.com/godbus/dbus/auth.go
@@ -116,7 +116,6 @@ func (conn *Conn) Auth(methods []Auth) error {
return err
}
go conn.inWorker()
- go conn.outWorker()
return nil
}
}
diff --git a/vendor/github.com/godbus/dbus/auth_anonymous.go b/vendor/github.com/godbus/dbus/auth_anonymous.go
new file mode 100644
index 000000000..75f3ad34d
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/auth_anonymous.go
@@ -0,0 +1,16 @@
+package dbus
+
+// AuthAnonymous returns an Auth that uses the ANONYMOUS mechanism.
+func AuthAnonymous() Auth {
+ return &authAnonymous{}
+}
+
+type authAnonymous struct{}
+
+func (a *authAnonymous) FirstData() (name, resp []byte, status AuthStatus) {
+ return []byte("ANONYMOUS"), nil, AuthOk
+}
+
+func (a *authAnonymous) HandleData(data []byte) (resp []byte, status AuthStatus) {
+ return nil, AuthError
+}
diff --git a/vendor/github.com/godbus/dbus/call.go b/vendor/github.com/godbus/dbus/call.go
index ba6e73f60..2cb189012 100644
--- a/vendor/github.com/godbus/dbus/call.go
+++ b/vendor/github.com/godbus/dbus/call.go
@@ -1,9 +1,12 @@
package dbus
import (
+ "context"
"errors"
)
+var errSignature = errors.New("dbus: mismatched signature")
+
// Call represents a pending or completed method call.
type Call struct {
Destination string
@@ -20,9 +23,25 @@ type Call struct {
// Holds the response once the call is done.
Body []interface{}
+
+ // tracks context and canceler
+ ctx context.Context
+ ctxCanceler context.CancelFunc
}
-var errSignature = errors.New("dbus: mismatched signature")
+func (c *Call) Context() context.Context {
+ if c.ctx == nil {
+ return context.Background()
+ }
+
+ return c.ctx
+}
+
+func (c *Call) ContextCancel() {
+ if c.ctxCanceler != nil {
+ c.ctxCanceler()
+ }
+}
// Store stores the body of the reply into the provided pointers. It returns
// an error if the signatures of the body and retvalues don't match, or if
@@ -34,3 +53,8 @@ func (c *Call) Store(retvalues ...interface{}) error {
return Store(c.Body, retvalues...)
}
+
+func (c *Call) done() {
+ c.Done <- c
+ c.ContextCancel()
+}
diff --git a/vendor/github.com/godbus/dbus/conn.go b/vendor/github.com/godbus/dbus/conn.go
index 5720e2ebb..b38920baf 100644
--- a/vendor/github.com/godbus/dbus/conn.go
+++ b/vendor/github.com/godbus/dbus/conn.go
@@ -1,6 +1,7 @@
package dbus
import (
+ "context"
"errors"
"io"
"os"
@@ -14,7 +15,6 @@ var (
systemBusLck sync.Mutex
sessionBus *Conn
sessionBusLck sync.Mutex
- sessionEnvLck sync.Mutex
)
// ErrClosed is the error returned by calls on a closed connection.
@@ -35,23 +35,13 @@ type Conn struct {
unixFD bool
uuid string
- names []string
- namesLck sync.RWMutex
-
- serialLck sync.Mutex
- nextSerial uint32
- serialUsed map[uint32]bool
-
- calls map[uint32]*Call
- callsLck sync.RWMutex
-
- handler Handler
-
- out chan *Message
- closed bool
- outLck sync.RWMutex
-
+ handler Handler
signalHandler SignalHandler
+ serialGen SerialGenerator
+
+ names *nameTracker
+ calls *callTracker
+ outHandler *outputHandler
eavesdropped chan<- *Message
eavesdroppedLck sync.Mutex
@@ -87,32 +77,31 @@ func SessionBus() (conn *Conn, err error) {
}
func getSessionBusAddress() (string, error) {
- sessionEnvLck.Lock()
- defer sessionEnvLck.Unlock()
- address := os.Getenv("DBUS_SESSION_BUS_ADDRESS")
- if address != "" && address != "autolaunch:" {
+ if address := os.Getenv("DBUS_SESSION_BUS_ADDRESS"); address != "" && address != "autolaunch:" {
+ return address, nil
+
+ } else if address := tryDiscoverDbusSessionBusAddress(); address != "" {
+ os.Setenv("DBUS_SESSION_BUS_ADDRESS", address)
return address, nil
}
return getSessionBusPlatformAddress()
}
// SessionBusPrivate returns a new private connection to the session bus.
-func SessionBusPrivate() (*Conn, error) {
+func SessionBusPrivate(opts ...ConnOption) (*Conn, error) {
address, err := getSessionBusAddress()
if err != nil {
return nil, err
}
- return Dial(address)
+ return Dial(address, opts...)
}
// SessionBusPrivate returns a new private connection to the session bus.
+//
+// Deprecated: use SessionBusPrivate with options instead.
func SessionBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) {
- address, err := getSessionBusAddress()
- if err != nil {
- return nil, err
- }
- return DialHandler(address, handler, signalHandler)
+ return SessionBusPrivate(WithHandler(handler), WithSignalHandler(signalHandler))
}
// SystemBus returns a shared connection to the system bus, connecting to it if
@@ -145,53 +134,93 @@ func SystemBus() (conn *Conn, err error) {
}
// SystemBusPrivate returns a new private connection to the system bus.
-func SystemBusPrivate() (*Conn, error) {
- return Dial(getSystemBusPlatformAddress())
+func SystemBusPrivate(opts ...ConnOption) (*Conn, error) {
+ return Dial(getSystemBusPlatformAddress(), opts...)
}
// SystemBusPrivateHandler returns a new private connection to the system bus, using the provided handlers.
+//
+// Deprecated: use SystemBusPrivate with options instead.
func SystemBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) {
- return DialHandler(getSystemBusPlatformAddress(), handler, signalHandler)
+ return SystemBusPrivate(WithHandler(handler), WithSignalHandler(signalHandler))
}
// Dial establishes a new private connection to the message bus specified by address.
-func Dial(address string) (*Conn, error) {
+func Dial(address string, opts ...ConnOption) (*Conn, error) {
tr, err := getTransport(address)
if err != nil {
return nil, err
}
- return newConn(tr, NewDefaultHandler(), NewDefaultSignalHandler())
+ return newConn(tr, opts...)
}
// DialHandler establishes a new private connection to the message bus specified by address, using the supplied handlers.
+//
+// Deprecated: use Dial with options instead.
func DialHandler(address string, handler Handler, signalHandler SignalHandler) (*Conn, error) {
- tr, err := getTransport(address)
- if err != nil {
- return nil, err
+ return Dial(address, WithSignalHandler(signalHandler))
+}
+
+// ConnOption is a connection option.
+type ConnOption func(conn *Conn) error
+
+// WithHandler overrides the default handler.
+func WithHandler(handler Handler) ConnOption {
+ return func(conn *Conn) error {
+ conn.handler = handler
+ return nil
+ }
+}
+
+// WithSignalHandler overrides the default signal handler.
+func WithSignalHandler(handler SignalHandler) ConnOption {
+ return func(conn *Conn) error {
+ conn.signalHandler = handler
+ return nil
+ }
+}
+
+// WithSerialGenerator overrides the default signals generator.
+func WithSerialGenerator(gen SerialGenerator) ConnOption {
+ return func(conn *Conn) error {
+ conn.serialGen = gen
+ return nil
}
- return newConn(tr, handler, signalHandler)
}
// NewConn creates a new private *Conn from an already established connection.
-func NewConn(conn io.ReadWriteCloser) (*Conn, error) {
- return NewConnHandler(conn, NewDefaultHandler(), NewDefaultSignalHandler())
+func NewConn(conn io.ReadWriteCloser, opts ...ConnOption) (*Conn, error) {
+ return newConn(genericTransport{conn}, opts...)
}
// NewConnHandler creates a new private *Conn from an already established connection, using the supplied handlers.
+//
+// Deprecated: use NewConn with options instead.
func NewConnHandler(conn io.ReadWriteCloser, handler Handler, signalHandler SignalHandler) (*Conn, error) {
- return newConn(genericTransport{conn}, handler, signalHandler)
+ return NewConn(genericTransport{conn}, WithHandler(handler), WithSignalHandler(signalHandler))
}
// newConn creates a new *Conn from a transport.
-func newConn(tr transport, handler Handler, signalHandler SignalHandler) (*Conn, error) {
+func newConn(tr transport, opts ...ConnOption) (*Conn, error) {
conn := new(Conn)
conn.transport = tr
- conn.calls = make(map[uint32]*Call)
- conn.out = make(chan *Message, 10)
- conn.handler = handler
- conn.signalHandler = signalHandler
- conn.nextSerial = 1
- conn.serialUsed = map[uint32]bool{0: true}
+ for _, opt := range opts {
+ if err := opt(conn); err != nil {
+ return nil, err
+ }
+ }
+ conn.calls = newCallTracker()
+ if conn.handler == nil {
+ conn.handler = NewDefaultHandler()
+ }
+ if conn.signalHandler == nil {
+ conn.signalHandler = NewDefaultSignalHandler()
+ }
+ if conn.serialGen == nil {
+ conn.serialGen = newSerialGenerator()
+ }
+ conn.outHandler = &outputHandler{conn: conn}
+ conn.names = newNameTracker()
conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus")
return conn, nil
}
@@ -206,18 +235,7 @@ func (conn *Conn) BusObject() BusObject {
// and the channels passed to Eavesdrop and Signal are closed. This method must
// not be called on shared connections.
func (conn *Conn) Close() error {
- conn.outLck.Lock()
- if conn.closed {
- // inWorker calls Close on read error, the read error may
- // be caused by another caller calling Close to shutdown the
- // dbus connection, a double-close scenario we prevent here.
- conn.outLck.Unlock()
- return nil
- }
- close(conn.out)
- conn.closed = true
- conn.outLck.Unlock()
-
+ conn.outHandler.close()
if term, ok := conn.signalHandler.(Terminator); ok {
term.Terminate()
}
@@ -249,17 +267,9 @@ func (conn *Conn) Eavesdrop(ch chan<- *Message) {
conn.eavesdroppedLck.Unlock()
}
-// getSerial returns an unused serial.
+// GetSerial returns an unused serial.
func (conn *Conn) getSerial() uint32 {
- conn.serialLck.Lock()
- defer conn.serialLck.Unlock()
- n := conn.nextSerial
- for conn.serialUsed[n] {
- n++
- }
- conn.serialUsed[n] = true
- conn.nextSerial = n + 1
- return n
+ return conn.serialGen.GetSerial()
}
// Hello sends the initial org.freedesktop.DBus.Hello call. This method must be
@@ -271,10 +281,7 @@ func (conn *Conn) Hello() error {
if err != nil {
return err
}
- conn.namesLck.Lock()
- conn.names = make([]string, 1)
- conn.names[0] = s
- conn.namesLck.Unlock()
+ conn.names.acquireUniqueConnectionName(s)
return nil
}
@@ -283,109 +290,48 @@ func (conn *Conn) Hello() error {
func (conn *Conn) inWorker() {
for {
msg, err := conn.ReadMessage()
- if err == nil {
- conn.eavesdroppedLck.Lock()
- if conn.eavesdropped != nil {
- select {
- case conn.eavesdropped <- msg:
- default:
- }
- conn.eavesdroppedLck.Unlock()
- continue
- }
- conn.eavesdroppedLck.Unlock()
- dest, _ := msg.Headers[FieldDestination].value.(string)
- found := false
- if dest == "" {
- found = true
- } else {
- conn.namesLck.RLock()
- if len(conn.names) == 0 {
- found = true
- }
- for _, v := range conn.names {
- if dest == v {
- found = true
- break
- }
- }
- conn.namesLck.RUnlock()
- }
- if !found {
- // Eavesdropped a message, but no channel for it is registered.
- // Ignore it.
- continue
- }
- switch msg.Type {
- case TypeMethodReply, TypeError:
- serial := msg.Headers[FieldReplySerial].value.(uint32)
- conn.callsLck.Lock()
- if c, ok := conn.calls[serial]; ok {
- if msg.Type == TypeError {
- name, _ := msg.Headers[FieldErrorName].value.(string)
- c.Err = Error{name, msg.Body}
- } else {
- c.Body = msg.Body
- }
- c.Done <- c
- conn.serialLck.Lock()
- delete(conn.serialUsed, serial)
- conn.serialLck.Unlock()
- delete(conn.calls, serial)
- }
- conn.callsLck.Unlock()
- case TypeSignal:
- iface := msg.Headers[FieldInterface].value.(string)
- member := msg.Headers[FieldMember].value.(string)
- // as per http://dbus.freedesktop.org/doc/dbus-specification.html ,
- // sender is optional for signals.
- sender, _ := msg.Headers[FieldSender].value.(string)
- if iface == "org.freedesktop.DBus" && sender == "org.freedesktop.DBus" {
- if member == "NameLost" {
- // If we lost the name on the bus, remove it from our
- // tracking list.
- name, ok := msg.Body[0].(string)
- if !ok {
- panic("Unable to read the lost name")
- }
- conn.namesLck.Lock()
- for i, v := range conn.names {
- if v == name {
- conn.names = append(conn.names[:i],
- conn.names[i+1:]...)
- }
- }
- conn.namesLck.Unlock()
- } else if member == "NameAcquired" {
- // If we acquired the name on the bus, add it to our
- // tracking list.
- name, ok := msg.Body[0].(string)
- if !ok {
- panic("Unable to read the acquired name")
- }
- conn.namesLck.Lock()
- conn.names = append(conn.names, name)
- conn.namesLck.Unlock()
- }
- }
- conn.handleSignal(msg)
- case TypeMethodCall:
- go conn.handleCall(msg)
+ if err != nil {
+ if _, ok := err.(InvalidMessageError); !ok {
+ // Some read error occured (usually EOF); we can't really do
+ // anything but to shut down all stuff and returns errors to all
+ // pending replies.
+ conn.Close()
+ conn.calls.finalizeAllWithError(err)
+ return
}
- } else if _, ok := err.(InvalidMessageError); !ok {
- // Some read error occured (usually EOF); we can't really do
- // anything but to shut down all stuff and returns errors to all
- // pending replies.
- conn.Close()
- conn.callsLck.RLock()
- for _, v := range conn.calls {
- v.Err = err
- v.Done <- v
+ // invalid messages are ignored
+ continue
+ }
+ conn.eavesdroppedLck.Lock()
+ if conn.eavesdropped != nil {
+ select {
+ case conn.eavesdropped <- msg:
+ default:
}
- conn.callsLck.RUnlock()
- return
+ conn.eavesdroppedLck.Unlock()
+ continue
+ }
+ conn.eavesdroppedLck.Unlock()
+ dest, _ := msg.Headers[FieldDestination].value.(string)
+ found := dest == "" ||
+ !conn.names.uniqueNameIsKnown() ||
+ conn.names.isKnownName(dest)
+ if !found {
+ // Eavesdropped a message, but no channel for it is registered.
+ // Ignore it.
+ continue
+ }
+ switch msg.Type {
+ case TypeError:
+ conn.serialGen.RetireSerial(conn.calls.handleDBusError(msg))
+ case TypeMethodReply:
+ conn.serialGen.RetireSerial(conn.calls.handleReply(msg))
+ case TypeSignal:
+ conn.handleSignal(msg)
+ case TypeMethodCall:
+ go conn.handleCall(msg)
}
- // invalid messages are ignored
+
}
}
@@ -395,6 +341,25 @@ func (conn *Conn) handleSignal(msg *Message) {
// as per http://dbus.freedesktop.org/doc/dbus-specification.html ,
// sender is optional for signals.
sender, _ := msg.Headers[FieldSender].value.(string)
+ if iface == "org.freedesktop.DBus" && sender == "org.freedesktop.DBus" {
+ if member == "NameLost" {
+ // If we lost the name on the bus, remove it from our
+ // tracking list.
+ name, ok := msg.Body[0].(string)
+ if !ok {
+ panic("Unable to read the lost name")
+ }
+ conn.names.loseName(name)
+ } else if member == "NameAcquired" {
+ // If we acquired the name on the bus, add it to our
+ // tracking list.
+ name, ok := msg.Body[0].(string)
+ if !ok {
+ panic("Unable to read the acquired name")
+ }
+ conn.names.acquireName(name)
+ }
+ }
signal := &Signal{
Sender: sender,
Path: msg.Headers[FieldPath].value.(ObjectPath),
@@ -408,12 +373,7 @@ func (conn *Conn) handleSignal(msg *Message) {
// connection. The slice is always at least one element long, the first element
// being the unique name of the connection.
func (conn *Conn) Names() []string {
- conn.namesLck.RLock()
- // copy the slice so it can't be modified
- s := make([]string, len(conn.names))
- copy(s, conn.names)
- conn.namesLck.RUnlock()
- return s
+ return conn.names.listKnownNames()
}
// Object returns the object identified by the given destination name and path.
@@ -423,24 +383,17 @@ func (conn *Conn) Object(dest string, path ObjectPath) BusObject {
// outWorker runs in an own goroutine, encoding and sending messages that are
// sent to conn.out.
-func (conn *Conn) outWorker() {
- for msg := range conn.out {
- err := conn.SendMessage(msg)
- conn.callsLck.RLock()
- if err != nil {
- if c := conn.calls[msg.serial]; c != nil {
- c.Err = err
- c.Done <- c
- }
- conn.serialLck.Lock()
- delete(conn.serialUsed, msg.serial)
- conn.serialLck.Unlock()
- } else if msg.Type != TypeMethodCall {
- conn.serialLck.Lock()
- delete(conn.serialUsed, msg.serial)
- conn.serialLck.Unlock()
- }
- conn.callsLck.RUnlock()
+func (conn *Conn) sendMessage(msg *Message) {
+ conn.sendMessageAndIfClosed(msg, func() {})
+}
+
+func (conn *Conn) sendMessageAndIfClosed(msg *Message, ifClosed func()) {
+ err := conn.outHandler.sendAndIfClosed(msg, ifClosed)
+ conn.calls.handleSendError(msg, err)
+ if err != nil {
+ conn.serialGen.RetireSerial(msg.serial)
+ } else if msg.Type != TypeMethodCall {
+ conn.serialGen.RetireSerial(msg.serial)
}
}
@@ -451,8 +404,21 @@ func (conn *Conn) outWorker() {
// once the call is complete. Otherwise, ch is ignored and a Call structure is
// returned of which only the Err member is valid.
func (conn *Conn) Send(msg *Message, ch chan *Call) *Call {
- var call *Call
+ return conn.send(context.Background(), msg, ch)
+}
+// SendWithContext acts like Send but takes a context
+func (conn *Conn) SendWithContext(ctx context.Context, msg *Message, ch chan *Call) *Call {
+ return conn.send(ctx, msg, ch)
+}
+
+func (conn *Conn) send(ctx context.Context, msg *Message, ch chan *Call) *Call {
+ if ctx == nil {
+ panic("nil context")
+ }
+
+ var call *Call
+ ctx, canceler := context.WithCancel(ctx)
msg.serial = conn.getSerial()
if msg.Type == TypeMethodCall && msg.Flags&FlagNoReplyExpected == 0 {
if ch == nil {
@@ -468,26 +434,23 @@ func (conn *Conn) Send(msg *Message, ch chan *Call) *Call {
call.Method = iface + "." + member
call.Args = msg.Body
call.Done = ch
- conn.callsLck.Lock()
- conn.calls[msg.serial] = call
- conn.callsLck.Unlock()
- conn.outLck.RLock()
- if conn.closed {
- call.Err = ErrClosed
- call.Done <- call
- } else {
- conn.out <- msg
- }
- conn.outLck.RUnlock()
+ call.ctx = ctx
+ call.ctxCanceler = canceler
+ conn.calls.track(msg.serial, call)
+ go func() {
+ <-ctx.Done()
+ conn.calls.handleSendError(msg, ctx.Err())
+ }()
+ conn.sendMessageAndIfClosed(msg, func() {
+ conn.calls.handleSendError(msg, ErrClosed)
+ canceler()
+ })
} else {
- conn.outLck.RLock()
- if conn.closed {
+ canceler()
+ call = &Call{Err: nil}
+ conn.sendMessageAndIfClosed(msg, func() {
call = &Call{Err: ErrClosed}
- } else {
- conn.out <- msg
- call = &Call{Err: nil}
- }
- conn.outLck.RUnlock()
+ })
}
return call
}
@@ -520,11 +483,7 @@ func (conn *Conn) sendError(err error, dest string, serial uint32) {
if len(e.Body) > 0 {
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(e.Body...))
}
- conn.outLck.RLock()
- if !conn.closed {
- conn.out <- msg
- }
- conn.outLck.RUnlock()
+ conn.sendMessage(msg)
}
// sendReply creates a method reply message corresponding to the parameters and
@@ -542,11 +501,7 @@ func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) {
if len(values) > 0 {
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
}
- conn.outLck.RLock()
- if !conn.closed {
- conn.out <- msg
- }
- conn.outLck.RUnlock()
+ conn.sendMessage(msg)
}
func (conn *Conn) defaultSignalAction(fn func(h *defaultSignalHandler, ch chan<- *Signal), ch chan<- *Signal) {
@@ -681,3 +636,212 @@ func getKey(s, key string) string {
}
return ""
}
+
+type outputHandler struct {
+ conn *Conn
+ sendLck sync.Mutex
+ closed struct {
+ isClosed bool
+ lck sync.RWMutex
+ }
+}
+
+func (h *outputHandler) sendAndIfClosed(msg *Message, ifClosed func()) error {
+ h.closed.lck.RLock()
+ defer h.closed.lck.RUnlock()
+ if h.closed.isClosed {
+ ifClosed()
+ return nil
+ }
+ h.sendLck.Lock()
+ defer h.sendLck.Unlock()
+ return h.conn.SendMessage(msg)
+}
+
+func (h *outputHandler) close() {
+ h.closed.lck.Lock()
+ defer h.closed.lck.Unlock()
+ h.closed.isClosed = true
+}
+
+type serialGenerator struct {
+ lck sync.Mutex
+ nextSerial uint32
+ serialUsed map[uint32]bool
+}
+
+func newSerialGenerator() *serialGenerator {
+ return &serialGenerator{
+ serialUsed: map[uint32]bool{0: true},
+ nextSerial: 1,
+ }
+}
+
+func (gen *serialGenerator) GetSerial() uint32 {
+ gen.lck.Lock()
+ defer gen.lck.Unlock()
+ n := gen.nextSerial
+ for gen.serialUsed[n] {
+ n++
+ }
+ gen.serialUsed[n] = true
+ gen.nextSerial = n + 1
+ return n
+}
+
+func (gen *serialGenerator) RetireSerial(serial uint32) {
+ gen.lck.Lock()
+ defer gen.lck.Unlock()
+ delete(gen.serialUsed, serial)
+}
+
+type nameTracker struct {
+ lck sync.RWMutex
+ unique string
+ names map[string]struct{}
+}
+
+func newNameTracker() *nameTracker {
+ return &nameTracker{names: map[string]struct{}{}}
+}
+func (tracker *nameTracker) acquireUniqueConnectionName(name string) {
+ tracker.lck.Lock()
+ defer tracker.lck.Unlock()
+ tracker.unique = name
+}
+func (tracker *nameTracker) acquireName(name string) {
+ tracker.lck.Lock()
+ defer tracker.lck.Unlock()
+ tracker.names[name] = struct{}{}
+}
+func (tracker *nameTracker) loseName(name string) {
+ tracker.lck.Lock()
+ defer tracker.lck.Unlock()
+ delete(tracker.names, name)
+}
+
+func (tracker *nameTracker) uniqueNameIsKnown() bool {
+ tracker.lck.RLock()
+ defer tracker.lck.RUnlock()
+ return tracker.unique != ""
+}
+func (tracker *nameTracker) isKnownName(name string) bool {
+ tracker.lck.RLock()
+ defer tracker.lck.RUnlock()
+ _, ok := tracker.names[name]
+ return ok || name == tracker.unique
+}
+func (tracker *nameTracker) listKnownNames() []string {
+ tracker.lck.RLock()
+ defer tracker.lck.RUnlock()
+ out := make([]string, 0, len(tracker.names)+1)
+ out = append(out, tracker.unique)
+ for k := range tracker.names {
+ out = append(out, k)
+ }
+ return out
+}
+
+type callTracker struct {
+ calls map[uint32]*Call
+ lck sync.RWMutex
+}
+
+func newCallTracker() *callTracker {
+ return &callTracker{calls: map[uint32]*Call{}}
+}
+
+func (tracker *callTracker) track(sn uint32, call *Call) {
+ tracker.lck.Lock()
+ tracker.calls[sn] = call
+ tracker.lck.Unlock()
+}
+
+func (tracker *callTracker) handleReply(msg *Message) uint32 {
+ serial := msg.Headers[FieldReplySerial].value.(uint32)
+ tracker.lck.RLock()
+ _, ok := tracker.calls[serial]
+ tracker.lck.RUnlock()
+ if ok {
+ tracker.finalizeWithBody(serial, msg.Body)
+ }
+ return serial
+}
+
+func (tracker *callTracker) handleDBusError(msg *Message) uint32 {
+ serial := msg.Headers[FieldReplySerial].value.(uint32)
+ tracker.lck.RLock()
+ _, ok := tracker.calls[serial]
+ tracker.lck.RUnlock()
+ if ok {
+ name, _ := msg.Headers[FieldErrorName].value.(string)
+ tracker.finalizeWithError(serial, Error{name, msg.Body})
+ }
+ return serial
+}
+
+func (tracker *callTracker) handleSendError(msg *Message, err error) {
+ if err == nil {
+ return
+ }
+ tracker.lck.RLock()
+ _, ok := tracker.calls[msg.serial]
+ tracker.lck.RUnlock()
+ if ok {
+ tracker.finalizeWithError(msg.serial, err)
+ }
+}
+
+// finalize was the only func that did not strobe Done
+func (tracker *callTracker) finalize(sn uint32) {
+ tracker.lck.Lock()
+ defer tracker.lck.Unlock()
+ c, ok := tracker.calls[sn]
+ if ok {
+ delete(tracker.calls, sn)
+ c.ContextCancel()
+ }
+ return
+}
+
+func (tracker *callTracker) finalizeWithBody(sn uint32, body []interface{}) {
+ tracker.lck.Lock()
+ c, ok := tracker.calls[sn]
+ if ok {
+ delete(tracker.calls, sn)
+ }
+ tracker.lck.Unlock()
+ if ok {
+ c.Body = body
+ c.done()
+ }
+ return
+}
+
+func (tracker *callTracker) finalizeWithError(sn uint32, err error) {
+ tracker.lck.Lock()
+ c, ok := tracker.calls[sn]
+ if ok {
+ delete(tracker.calls, sn)
+ }
+ tracker.lck.Unlock()
+ if ok {
+ c.Err = err
+ c.done()
+ }
+ return
+}
+
+func (tracker *callTracker) finalizeAllWithError(err error) {
+ tracker.lck.Lock()
+ closedCalls := make([]*Call, 0, len(tracker.calls))
+ for sn := range tracker.calls {
+ closedCalls = append(closedCalls, tracker.calls[sn])
+ }
+ tracker.calls = map[uint32]*Call{}
+ tracker.lck.Unlock()
+ for _, call := range closedCalls {
+ call.Err = err
+ call.done()
+ }
+}
diff --git a/vendor/github.com/godbus/dbus/conn_darwin.go b/vendor/github.com/godbus/dbus/conn_darwin.go
index c015f80ce..6e2e40202 100644
--- a/vendor/github.com/godbus/dbus/conn_darwin.go
+++ b/vendor/github.com/godbus/dbus/conn_darwin.go
@@ -31,3 +31,7 @@ func getSystemBusPlatformAddress() string {
}
return defaultSystemBusAddress
}
+
+func tryDiscoverDbusSessionBusAddress() string {
+ return ""
+}
diff --git a/vendor/github.com/godbus/dbus/conn_other.go b/vendor/github.com/godbus/dbus/conn_other.go
index 254c9f2ef..289044a44 100644
--- a/vendor/github.com/godbus/dbus/conn_other.go
+++ b/vendor/github.com/godbus/dbus/conn_other.go
@@ -6,12 +6,14 @@ import (
"bytes"
"errors"
"fmt"
+ "io/ioutil"
"os"
"os/exec"
+ "os/user"
+ "path"
+ "strings"
)
-const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket"
-
func getSessionBusPlatformAddress() (string, error) {
cmd := exec.Command("dbus-launch")
b, err := cmd.CombinedOutput()
@@ -33,10 +35,57 @@ func getSessionBusPlatformAddress() (string, error) {
return addr, nil
}
-func getSystemBusPlatformAddress() string {
- address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS")
- if address != "" {
- return fmt.Sprintf("unix:path=%s", address)
+// tryDiscoverDbusSessionBusAddress tries to discover an existing dbus session
+// and return the value of its DBUS_SESSION_BUS_ADDRESS.
+// It tries different techniques employed by different operating systems,
+// returning the first valid address it finds, or an empty string.
+//
+// * /run/user/<uid>/bus if this exists, it *is* the bus socket. present on
+// Ubuntu 18.04
+// * /run/user/<uid>/dbus-session: if this exists, it can be parsed for the bus
+// address. present on Ubuntu 16.04
+//
+// See https://dbus.freedesktop.org/doc/dbus-launch.1.html
+func tryDiscoverDbusSessionBusAddress() string {
+ if runtimeDirectory, err := getRuntimeDirectory(); err == nil {
+
+ if runUserBusFile := path.Join(runtimeDirectory, "bus"); fileExists(runUserBusFile) {
+ // if /run/user/<uid>/bus exists, that file itself
+ // *is* the unix socket, so return its path
+ return fmt.Sprintf("unix:path=%s", runUserBusFile)
+ }
+ if runUserSessionDbusFile := path.Join(runtimeDirectory, "dbus-session"); fileExists(runUserSessionDbusFile) {
+ // if /run/user/<uid>/dbus-session exists, it's a
+ // text file // containing the address of the socket, e.g.:
+ // DBUS_SESSION_BUS_ADDRESS=unix:abstract=/tmp/dbus-E1c73yNqrG
+
+ if f, err := ioutil.ReadFile(runUserSessionDbusFile); err == nil {
+ fileContent := string(f)
+
+ prefix := "DBUS_SESSION_BUS_ADDRESS="
+
+ if strings.HasPrefix(fileContent, prefix) {
+ address := strings.TrimRight(strings.TrimPrefix(fileContent, prefix), "\n\r")
+ return address
+ }
+ }
+ }
+ }
+ return ""
+}
+
+func getRuntimeDirectory() (string, error) {
+ if currentUser, err := user.Current(); err != nil {
+ return "", err
+ } else {
+ return fmt.Sprintf("/run/user/%s", currentUser.Uid), nil
+ }
+}
+
+func fileExists(filename string) bool {
+ if _, err := os.Stat(filename); !os.IsNotExist(err) {
+ return true
+ } else {
+ return false
}
- return defaultSystemBusAddress
}
diff --git a/vendor/github.com/godbus/dbus/conn_unix.go b/vendor/github.com/godbus/dbus/conn_unix.go
new file mode 100644
index 000000000..4cba8ae8e
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/conn_unix.go
@@ -0,0 +1,18 @@
+//+build !windows,!solaris,!darwin
+
+package dbus
+
+import (
+ "os"
+ "fmt"
+)
+
+const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket"
+
+func getSystemBusPlatformAddress() string {
+ address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS")
+ if address != "" {
+ return fmt.Sprintf("unix:path=%s", address)
+ }
+ return defaultSystemBusAddress
+} \ No newline at end of file
diff --git a/vendor/github.com/godbus/dbus/conn_windows.go b/vendor/github.com/godbus/dbus/conn_windows.go
new file mode 100644
index 000000000..4291e4519
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/conn_windows.go
@@ -0,0 +1,15 @@
+//+build windows
+
+package dbus
+
+import "os"
+
+const defaultSystemBusAddress = "tcp:host=127.0.0.1,port=12434"
+
+func getSystemBusPlatformAddress() string {
+ address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS")
+ if address != "" {
+ return address
+ }
+ return defaultSystemBusAddress
+}
diff --git a/vendor/github.com/godbus/dbus/decoder.go b/vendor/github.com/godbus/dbus/decoder.go
index ef50dcab9..5c27d3b51 100644
--- a/vendor/github.com/godbus/dbus/decoder.go
+++ b/vendor/github.com/godbus/dbus/decoder.go
@@ -191,7 +191,14 @@ func (dec *decoder) decode(s string, depth int) interface{} {
length := dec.decode("u", depth).(uint32)
v := reflect.MakeSlice(reflect.SliceOf(typeFor(s[1:])), 0, int(length))
// Even for empty arrays, the correct padding must be included
- dec.align(alignment(typeFor(s[1:])))
+ align := alignment(typeFor(s[1:]))
+ if len(s) > 1 && s[1] == '(' {
+ //Special case for arrays of structs
+ //structs decode as a slice of interface{} values
+ //but the dbus alignment does not match this
+ align = 8
+ }
+ dec.align(align)
spos := dec.pos
for dec.pos < spos+int(length) {
ev := dec.decode(s[1:], depth+1)
diff --git a/vendor/github.com/godbus/dbus/default_handler.go b/vendor/github.com/godbus/dbus/default_handler.go
index e81f73ac5..81dbcc7e4 100644
--- a/vendor/github.com/godbus/dbus/default_handler.go
+++ b/vendor/github.com/godbus/dbus/default_handler.go
@@ -21,6 +21,8 @@ func newIntrospectIntf(h *defaultHandler) *exportedIntf {
//NewDefaultHandler returns an instance of the default
//call handler. This is useful if you want to implement only
//one of the two handlers but not both.
+//
+// Deprecated: this is the default value, don't use it, it will be unexported.
func NewDefaultHandler() *defaultHandler {
h := &defaultHandler{
objects: make(map[ObjectPath]*exportedObj),
@@ -161,6 +163,7 @@ func newExportedObject() *exportedObj {
}
type exportedObj struct {
+ mu sync.RWMutex
interfaces map[string]*exportedIntf
}
@@ -168,19 +171,27 @@ func (obj *exportedObj) LookupInterface(name string) (Interface, bool) {
if name == "" {
return obj, true
}
+ obj.mu.RLock()
+ defer obj.mu.RUnlock()
intf, exists := obj.interfaces[name]
return intf, exists
}
func (obj *exportedObj) AddInterface(name string, iface *exportedIntf) {
+ obj.mu.Lock()
+ defer obj.mu.Unlock()
obj.interfaces[name] = iface
}
func (obj *exportedObj) DeleteInterface(name string) {
+ obj.mu.Lock()
+ defer obj.mu.Unlock()
delete(obj.interfaces, name)
}
func (obj *exportedObj) LookupMethod(name string) (Method, bool) {
+ obj.mu.RLock()
+ defer obj.mu.RUnlock()
for _, intf := range obj.interfaces {
method, exists := intf.LookupMethod(name)
if exists {
@@ -220,8 +231,12 @@ func (obj *exportedIntf) isFallbackInterface() bool {
//NewDefaultSignalHandler returns an instance of the default
//signal handler. This is useful if you want to implement only
//one of the two handlers but not both.
+//
+// Deprecated: this is the default value, don't use it, it will be unexported.
func NewDefaultSignalHandler() *defaultSignalHandler {
- return &defaultSignalHandler{}
+ return &defaultSignalHandler{
+ closeChan: make(chan struct{}),
+ }
}
func isDefaultSignalHandler(handler SignalHandler) bool {
@@ -231,32 +246,47 @@ func isDefaultSignalHandler(handler SignalHandler) bool {
type defaultSignalHandler struct {
sync.RWMutex
- closed bool
- signals []chan<- *Signal
+ closed bool
+ signals []chan<- *Signal
+ closeChan chan struct{}
}
func (sh *defaultSignalHandler) DeliverSignal(intf, name string, signal *Signal) {
- go func() {
- sh.RLock()
- defer sh.RUnlock()
- if sh.closed {
+ sh.RLock()
+ defer sh.RUnlock()
+ if sh.closed {
+ return
+ }
+ for _, ch := range sh.signals {
+ select {
+ case ch <- signal:
+ case <-sh.closeChan:
return
+ default:
+ go func() {
+ select {
+ case ch <- signal:
+ case <-sh.closeChan:
+ return
+ }
+ }()
}
- for _, ch := range sh.signals {
- ch <- signal
- }
- }()
+ }
}
func (sh *defaultSignalHandler) Init() error {
sh.Lock()
sh.signals = make([]chan<- *Signal, 0)
+ sh.closeChan = make(chan struct{})
sh.Unlock()
return nil
}
func (sh *defaultSignalHandler) Terminate() {
sh.Lock()
+ if !sh.closed {
+ close(sh.closeChan)
+ }
sh.closed = true
for _, ch := range sh.signals {
close(ch)
diff --git a/vendor/github.com/godbus/dbus/export.go b/vendor/github.com/godbus/dbus/export.go
index aae970881..95d0e2958 100644
--- a/vendor/github.com/godbus/dbus/export.go
+++ b/vendor/github.com/godbus/dbus/export.go
@@ -170,11 +170,8 @@ func (conn *Conn) handleCall(msg *Message) {
reply.Body[i] = ret[i]
}
reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...))
- conn.outLck.RLock()
- if !conn.closed {
- conn.out <- reply
- }
- conn.outLck.RUnlock()
+
+ conn.sendMessage(reply)
}
}
@@ -207,12 +204,14 @@ func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) erro
if len(values) > 0 {
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
}
- conn.outLck.RLock()
- defer conn.outLck.RUnlock()
- if conn.closed {
+
+ var closed bool
+ conn.sendMessageAndIfClosed(msg, func() {
+ closed = true
+ })
+ if closed {
return ErrClosed
}
- conn.out <- msg
return nil
}
diff --git a/vendor/github.com/godbus/dbus/go.mod b/vendor/github.com/godbus/dbus/go.mod
new file mode 100644
index 000000000..bdcd12598
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/go.mod
@@ -0,0 +1 @@
+module github.com/godbus/dbus
diff --git a/vendor/github.com/godbus/dbus/object.go b/vendor/github.com/godbus/dbus/object.go
index 6d95583d7..f27ffe144 100644
--- a/vendor/github.com/godbus/dbus/object.go
+++ b/vendor/github.com/godbus/dbus/object.go
@@ -1,6 +1,7 @@
package dbus
import (
+ "context"
"errors"
"strings"
)
@@ -9,7 +10,11 @@ import (
// invoked.
type BusObject interface {
Call(method string, flags Flags, args ...interface{}) *Call
+ CallWithContext(ctx context.Context, method string, flags Flags, args ...interface{}) *Call
Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call
+ GoWithContext(ctx context.Context, method string, flags Flags, ch chan *Call, args ...interface{}) *Call
+ AddMatchSignal(iface, member string, options ...MatchOption) *Call
+ RemoveMatchSignal(iface, member string, options ...MatchOption) *Call
GetProperty(p string) (Variant, error)
Destination() string
Path() ObjectPath
@@ -24,16 +29,73 @@ type Object struct {
// Call calls a method with (*Object).Go and waits for its reply.
func (o *Object) Call(method string, flags Flags, args ...interface{}) *Call {
- return <-o.Go(method, flags, make(chan *Call, 1), args...).Done
+ return <-o.createCall(context.Background(), method, flags, make(chan *Call, 1), args...).Done
}
-// AddMatchSignal subscribes BusObject to signals from specified interface and
-// method (member).
-func (o *Object) AddMatchSignal(iface, member string) *Call {
- return o.Call(
+// CallWithContext acts like Call but takes a context
+func (o *Object) CallWithContext(ctx context.Context, method string, flags Flags, args ...interface{}) *Call {
+ return <-o.createCall(ctx, method, flags, make(chan *Call, 1), args...).Done
+}
+
+// MatchOption specifies option for dbus routing match rule. Options can be constructed with WithMatch* helpers.
+// For full list of available options consult
+// https://dbus.freedesktop.org/doc/dbus-specification.html#message-bus-routing-match-rules
+type MatchOption struct {
+ key string
+ value string
+}
+
+// WithMatchOption creates match option with given key and value
+func WithMatchOption(key, value string) MatchOption {
+ return MatchOption{key, value}
+}
+
+// WithMatchObjectPath creates match option that filters events based on given path
+func WithMatchObjectPath(path ObjectPath) MatchOption {
+ return MatchOption{"path", string(path)}
+}
+
+func formatMatchOptions(options []MatchOption) string {
+ items := make([]string, 0, len(options))
+ for _, option := range options {
+ items = append(items, option.key+"='"+option.value+"'")
+ }
+
+ return strings.Join(items, ",")
+}
+
+// AddMatchSignal subscribes BusObject to signals from specified interface,
+// method (member). Additional filter rules can be added via WithMatch* option constructors.
+// Note: To filter events by object path you have to specify this path via an option.
+func (o *Object) AddMatchSignal(iface, member string, options ...MatchOption) *Call {
+ base := []MatchOption{
+ {"type", "signal"},
+ {"interface", iface},
+ {"member", member},
+ }
+
+ options = append(base, options...)
+ return o.conn.BusObject().Call(
"org.freedesktop.DBus.AddMatch",
0,
- "type='signal',interface='"+iface+"',member='"+member+"'",
+ formatMatchOptions(options),
+ )
+}
+
+// RemoveMatchSignal unsubscribes BusObject from signals from specified interface,
+// method (member). Additional filter rules can be added via WithMatch* option constructors
+func (o *Object) RemoveMatchSignal(iface, member string, options ...MatchOption) *Call {
+ base := []MatchOption{
+ {"type", "signal"},
+ {"interface", iface},
+ {"member", member},
+ }
+
+ options = append(base, options...)
+ return o.conn.BusObject().Call(
+ "org.freedesktop.DBus.RemoveMatch",
+ 0,
+ formatMatchOptions(options),
)
}
@@ -49,6 +111,18 @@ func (o *Object) AddMatchSignal(iface, member string) *Call {
// If the method parameter contains a dot ('.'), the part before the last dot
// specifies the interface on which the method is called.
func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call {
+ return o.createCall(context.Background(), method, flags, ch, args...)
+}
+
+// GoWithContext acts like Go but takes a context
+func (o *Object) GoWithContext(ctx context.Context, method string, flags Flags, ch chan *Call, args ...interface{}) *Call {
+ return o.createCall(ctx, method, flags, ch, args...)
+}
+
+func (o *Object) createCall(ctx context.Context, method string, flags Flags, ch chan *Call, args ...interface{}) *Call {
+ if ctx == nil {
+ panic("nil context")
+ }
iface := ""
i := strings.LastIndex(method, ".")
if i != -1 {
@@ -76,28 +150,28 @@ func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface
} else if cap(ch) == 0 {
panic("dbus: unbuffered channel passed to (*Object).Go")
}
+ ctx, cancel := context.WithCancel(ctx)
call := &Call{
Destination: o.dest,
Path: o.path,
Method: method,
Args: args,
Done: ch,
+ ctxCanceler: cancel,
+ ctx: ctx,
}
- o.conn.callsLck.Lock()
- o.conn.calls[msg.serial] = call
- o.conn.callsLck.Unlock()
- o.conn.outLck.RLock()
- if o.conn.closed {
- call.Err = ErrClosed
- call.Done <- call
- } else {
- o.conn.out <- msg
- }
- o.conn.outLck.RUnlock()
+ o.conn.calls.track(msg.serial, call)
+ o.conn.sendMessageAndIfClosed(msg, func() {
+ o.conn.calls.handleSendError(msg, ErrClosed)
+ cancel()
+ })
+ go func() {
+ <-ctx.Done()
+ o.conn.calls.handleSendError(msg, ctx.Err())
+ }()
+
return call
}
- o.conn.outLck.RLock()
- defer o.conn.outLck.RUnlock()
done := make(chan *Call, 1)
call := &Call{
Err: nil,
@@ -107,11 +181,9 @@ func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface
call.Done <- call
close(done)
}()
- if o.conn.closed {
+ o.conn.sendMessageAndIfClosed(msg, func() {
call.Err = ErrClosed
- return call
- }
- o.conn.out <- msg
+ })
return call
}
diff --git a/vendor/github.com/godbus/dbus/server_interfaces.go b/vendor/github.com/godbus/dbus/server_interfaces.go
index 091948aef..01166f0bd 100644
--- a/vendor/github.com/godbus/dbus/server_interfaces.go
+++ b/vendor/github.com/godbus/dbus/server_interfaces.go
@@ -87,3 +87,13 @@ type SignalHandler interface {
type DBusError interface {
DBusError() (string, []interface{})
}
+
+// SerialGenerator is responsible for serials generation.
+//
+// Different approaches for the serial generation can be used,
+// maintaining a map guarded with a mutex (the standard way) or
+// simply increment an atomic counter.
+type SerialGenerator interface {
+ GetSerial() uint32
+ RetireSerial(serial uint32)
+}
diff --git a/vendor/github.com/godbus/dbus/transport_generic.go b/vendor/github.com/godbus/dbus/transport_generic.go
index 3fad859a6..718a1ff02 100644
--- a/vendor/github.com/godbus/dbus/transport_generic.go
+++ b/vendor/github.com/godbus/dbus/transport_generic.go
@@ -11,7 +11,7 @@ var nativeEndian binary.ByteOrder
func detectEndianness() binary.ByteOrder {
var x uint32 = 0x01020304
- if *(*byte)(unsafe.Pointer(&x)) == 0x01 {
+ if *(*byte)(unsafe.Pointer(&x)) == 0x01 {
return binary.BigEndian
}
return binary.LittleEndian
diff --git a/vendor/github.com/godbus/dbus/transport_nonce_tcp.go b/vendor/github.com/godbus/dbus/transport_nonce_tcp.go
new file mode 100644
index 000000000..697739efa
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/transport_nonce_tcp.go
@@ -0,0 +1,39 @@
+//+build !windows
+
+package dbus
+
+import (
+ "errors"
+ "io/ioutil"
+ "net"
+)
+
+func init() {
+ transports["nonce-tcp"] = newNonceTcpTransport
+}
+
+func newNonceTcpTransport(keys string) (transport, error) {
+ host := getKey(keys, "host")
+ port := getKey(keys, "port")
+ noncefile := getKey(keys, "noncefile")
+ if host == "" || port == "" || noncefile == "" {
+ return nil, errors.New("dbus: unsupported address (must set host, port and noncefile)")
+ }
+ protocol, err := tcpFamily(keys)
+ if err != nil {
+ return nil, err
+ }
+ socket, err := net.Dial(protocol, net.JoinHostPort(host, port))
+ if err != nil {
+ return nil, err
+ }
+ b, err := ioutil.ReadFile(noncefile)
+ if err != nil {
+ return nil, err
+ }
+ _, err = socket.Write(b)
+ if err != nil {
+ return nil, err
+ }
+ return NewConn(socket)
+}
diff --git a/vendor/github.com/godbus/dbus/transport_unix.go b/vendor/github.com/godbus/dbus/transport_unix.go
index e56d5ca90..f000c6b5d 100644
--- a/vendor/github.com/godbus/dbus/transport_unix.go
+++ b/vendor/github.com/godbus/dbus/transport_unix.go
@@ -31,6 +31,7 @@ func (o *oobReader) Read(b []byte) (n int, err error) {
type unixTransport struct {
*net.UnixConn
+ rdr *oobReader
hasUnixFDs bool
}
@@ -79,10 +80,15 @@ func (t *unixTransport) ReadMessage() (*Message, error) {
// To be sure that all bytes of out-of-band data are read, we use a special
// reader that uses ReadUnix on the underlying connection instead of Read
// and gathers the out-of-band data in a buffer.
- rd := &oobReader{conn: t.UnixConn}
+ if t.rdr == nil {
+ t.rdr = &oobReader{conn: t.UnixConn}
+ } else {
+ t.rdr.oob = nil
+ }
+
// read the first 16 bytes (the part of the header that has a constant size),
// from which we can figure out the length of the rest of the message
- if _, err := io.ReadFull(rd, csheader[:]); err != nil {
+ if _, err := io.ReadFull(t.rdr, csheader[:]); err != nil {
return nil, err
}
switch csheader[0] {
@@ -104,7 +110,7 @@ func (t *unixTransport) ReadMessage() (*Message, error) {
// decode headers and look for unix fds
headerdata := make([]byte, hlen+4)
copy(headerdata, csheader[12:])
- if _, err := io.ReadFull(t, headerdata[4:]); err != nil {
+ if _, err := io.ReadFull(t.rdr, headerdata[4:]); err != nil {
return nil, err
}
dec := newDecoder(bytes.NewBuffer(headerdata), order)
@@ -122,7 +128,7 @@ func (t *unixTransport) ReadMessage() (*Message, error) {
all := make([]byte, 16+hlen+blen)
copy(all, csheader[:])
copy(all[16:], headerdata[4:])
- if _, err := io.ReadFull(rd, all[16+hlen:]); err != nil {
+ if _, err := io.ReadFull(t.rdr, all[16+hlen:]); err != nil {
return nil, err
}
if unixfds != 0 {
@@ -130,7 +136,7 @@ func (t *unixTransport) ReadMessage() (*Message, error) {
return nil, errors.New("dbus: got unix fds on unsupported transport")
}
// read the fds from the OOB data
- scms, err := syscall.ParseSocketControlMessage(rd.oob)
+ scms, err := syscall.ParseSocketControlMessage(t.rdr.oob)
if err != nil {
return nil, err
}
@@ -148,11 +154,23 @@ func (t *unixTransport) ReadMessage() (*Message, error) {
// substitute the values in the message body (which are indices for the
// array receiver via OOB) with the actual values
for i, v := range msg.Body {
- if j, ok := v.(UnixFDIndex); ok {
+ switch v.(type) {
+ case UnixFDIndex:
+ j := v.(UnixFDIndex)
if uint32(j) >= unixfds {
return nil, InvalidMessageError("invalid index for unix fd")
}
msg.Body[i] = UnixFD(fds[j])
+ case []UnixFDIndex:
+ idxArray := v.([]UnixFDIndex)
+ fdArray := make([]UnixFD, len(idxArray))
+ for k, j := range idxArray {
+ if uint32(j) >= unixfds {
+ return nil, InvalidMessageError("invalid index for unix fd")
+ }
+ fdArray[k] = UnixFD(fds[j])
+ }
+ msg.Body[i] = fdArray
}
}
return msg, nil
diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS
new file mode 100644
index 000000000..3d97fc7a2
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/AUTHORS
@@ -0,0 +1,15 @@
+# This is the official list of GoGo authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS file, which
+# lists people. For example, employees are listed in CONTRIBUTORS,
+# but not in AUTHORS, because the employer holds the copyright.
+
+# Names should be added to this file as one of
+# Organization's name
+# Individual's name <submission email address>
+# Individual's name <submission email address> <email2> <emailN>
+
+# Please keep the list sorted.
+
+Sendgrid, Inc
+Vastech SA (PTY) LTD
+Walter Schulze <awalterschulze@gmail.com>
diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS
new file mode 100644
index 000000000..1b4f6c208
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/CONTRIBUTORS
@@ -0,0 +1,23 @@
+Anton Povarov <anton.povarov@gmail.com>
+Brian Goff <cpuguy83@gmail.com>
+Clayton Coleman <ccoleman@redhat.com>
+Denis Smirnov <denis.smirnov.91@gmail.com>
+DongYun Kang <ceram1000@gmail.com>
+Dwayne Schultz <dschultz@pivotal.io>
+Georg Apitz <gapitz@pivotal.io>
+Gustav Paul <gustav.paul@gmail.com>
+Johan Brandhorst <johan.brandhorst@gmail.com>
+John Shahid <jvshahid@gmail.com>
+John Tuley <john@tuley.org>
+Laurent <laurent@adyoulike.com>
+Patrick Lee <patrick@dropbox.com>
+Peter Edge <peter.edge@gmail.com>
+Roger Johansson <rogeralsing@gmail.com>
+Sam Nguyen <sam.nguyen@sendgrid.com>
+Sergio Arbeo <serabe@gmail.com>
+Stephen J Day <stephen.day@docker.com>
+Tamir Duberstein <tamird@gmail.com>
+Todd Eisenberger <teisenberger@dropbox.com>
+Tormod Erevik Lea <tormodlea@gmail.com>
+Vyacheslav Kim <kane@sendgrid.com>
+Walter Schulze <awalterschulze@gmail.com>
diff --git a/vendor/github.com/gogo/protobuf/README b/vendor/github.com/gogo/protobuf/README
deleted file mode 100644
index 405429a95..000000000
--- a/vendor/github.com/gogo/protobuf/README
+++ /dev/null
@@ -1,298 +0,0 @@
-Protocol Buffers for Go with Gadgets
-
-GoGoProtobuf http://github.com/gogo/protobuf extends
-GoProtobuf http://github.com/golang/protobuf
-
-Copyright (c) 2013, The GoGo Authors. All rights reserved.
-
-
-# Go support for Protocol Buffers
-
-Google's data interchange format.
-Copyright 2010 The Go Authors.
-https://github.com/golang/protobuf
-
-This package and the code it generates requires at least Go 1.6.
-
-This software implements Go bindings for protocol buffers. For
-information about protocol buffers themselves, see
- https://developers.google.com/protocol-buffers/
-
-## Installation ##
-
-To use this software, you must:
-- Install the standard C++ implementation of protocol buffers from
- https://developers.google.com/protocol-buffers/
-- Of course, install the Go compiler and tools from
- https://golang.org/
- See
- https://golang.org/doc/install
- for details or, if you are using gccgo, follow the instructions at
- https://golang.org/doc/install/gccgo
-- Grab the code from the repository and install the proto package.
- The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`.
- The compiler plugin, protoc-gen-go, will be installed in $GOBIN,
- defaulting to $GOPATH/bin. It must be in your $PATH for the protocol
- compiler, protoc, to find it.
-
-This software has two parts: a 'protocol compiler plugin' that
-generates Go source files that, once compiled, can access and manage
-protocol buffers; and a library that implements run-time support for
-encoding (marshaling), decoding (unmarshaling), and accessing protocol
-buffers.
-
-There is support for gRPC in Go using protocol buffers.
-See the note at the bottom of this file for details.
-
-There are no insertion points in the plugin.
-
-GoGoProtobuf provides extensions for protocol buffers and GoProtobuf
-see http://github.com/gogo/protobuf/gogoproto/doc.go
-
-## Using protocol buffers with Go ##
-
-Once the software is installed, there are two steps to using it.
-First you must compile the protocol buffer definitions and then import
-them, with the support library, into your program.
-
-To compile the protocol buffer definition, run protoc with the --gogo_out
-parameter set to the directory you want to output the Go code to.
-
- protoc --gogo_out=. *.proto
-
-The generated files will be suffixed .pb.go. See the Test code below
-for an example using such a file.
-
-## Packages and input paths ##
-
-The protocol buffer language has a concept of "packages" which does not
-correspond well to the Go notion of packages. In generated Go code,
-each source `.proto` file is associated with a single Go package. The
-name and import path for this package is specified with the `go_package`
-proto option:
-
- option go_package = "github.com/gogo/protobuf/types";
-
-The protocol buffer compiler will attempt to derive a package name and
-import path if a `go_package` option is not present, but it is
-best to always specify one explicitly.
-
-There is a one-to-one relationship between source `.proto` files and
-generated `.pb.go` files, but any number of `.pb.go` files may be
-contained in the same Go package.
-
-The output name of a generated file is produced by replacing the
-`.proto` suffix with `.pb.go` (e.g., `foo.proto` produces `foo.pb.go`).
-However, the output directory is selected in one of two ways. Let
-us say we have `inputs/x.proto` with a `go_package` option of
-`github.com/golang/protobuf/p`. The corresponding output file may
-be:
-
-- Relative to the import path:
-
- protoc --gogo_out=. inputs/x.proto
- # writes ./github.com/gogo/protobuf/p/x.pb.go
-
- (This can work well with `--gogo_out=$GOPATH`.)
-
-- Relative to the input file:
-
- protoc --gogo_out=paths=source_relative:. inputs/x.proto
- # generate ./inputs/x.pb.go
-
-## Generated code ##
-
-The package comment for the proto library contains text describing
-the interface provided in Go for protocol buffers. Here is an edited
-version.
-
-If you are using any gogo.proto extensions you will need to specify the
-proto_path to include the descriptor.proto and gogo.proto.
-gogo.proto is located in github.com/gogo/protobuf/gogoproto
-This should be fine, since your import is the same.
-descriptor.proto is located in either github.com/gogo/protobuf/protobuf
-or code.google.com/p/protobuf/trunk/src/
-Its import is google/protobuf/descriptor.proto so it might need some help.
-
- protoc --gogo_out=. -I=.:github.com/gogo/protobuf/protobuf *.proto
-
-==========
-
-The proto package converts data structures to and from the
-wire format of protocol buffers. It works in concert with the
-Go source code generated for .proto files by the protocol compiler.
-
-A summary of the properties of the protocol buffer interface
-for a protocol buffer variable v:
-
- - Names are turned from camel_case to CamelCase for export.
- - There are no methods on v to set fields; just treat
- them as structure fields.
- - There are getters that return a field's value if set,
- and return the field's default value if unset.
- The getters work even if the receiver is a nil message.
- - The zero value for a struct is its correct initialization state.
- All desired fields must be set before marshaling.
- - A Reset() method will restore a protobuf struct to its zero state.
- - Non-repeated fields are pointers to the values; nil means unset.
- That is, optional or required field int32 f becomes F *int32.
- - Repeated fields are slices.
- - Helper functions are available to aid the setting of fields.
- Helpers for getting values are superseded by the
- GetFoo methods and their use is deprecated.
- msg.Foo = proto.String("hello") // set field
- - Constants are defined to hold the default values of all fields that
- have them. They have the form Default_StructName_FieldName.
- Because the getter methods handle defaulted values,
- direct use of these constants should be rare.
- - Enums are given type names and maps from names to values.
- Enum values are prefixed with the enum's type name. Enum types have
- a String method, and a Enum method to assist in message construction.
- - Nested groups and enums have type names prefixed with the name of
- the surrounding message type.
- - Extensions are given descriptor names that start with E_,
- followed by an underscore-delimited list of the nested messages
- that contain it (if any) followed by the CamelCased name of the
- extension field itself. HasExtension, ClearExtension, GetExtension
- and SetExtension are functions for manipulating extensions.
- - Oneof field sets are given a single field in their message,
- with distinguished wrapper types for each possible field value.
- - Marshal and Unmarshal are functions to encode and decode the wire format.
-
-When the .proto file specifies `syntax="proto3"`, there are some differences:
-
- - Non-repeated fields of non-message type are values instead of pointers.
- - Enum types do not get an Enum method.
-
-Consider file test.proto, containing
-
-```proto
- syntax = "proto2";
- package example;
-
- enum FOO { X = 17; };
-
- message Test {
- required string label = 1;
- optional int32 type = 2 [default=77];
- repeated int64 reps = 3;
- }
-```
-
-To create and play with a Test object from the example package,
-
-```go
- package main
-
- import (
- "log"
-
- "github.com/gogo/protobuf/proto"
- "path/to/example"
- )
-
- func main() {
- test := &example.Test{
- Label: proto.String("hello"),
- Type: proto.Int32(17),
- Reps: []int64{1, 2, 3},
- }
- data, err := proto.Marshal(test)
- if err != nil {
- log.Fatal("marshaling error: ", err)
- }
- newTest := &example.Test{}
- err = proto.Unmarshal(data, newTest)
- if err != nil {
- log.Fatal("unmarshaling error: ", err)
- }
- // Now test and newTest contain the same data.
- if test.GetLabel() != newTest.GetLabel() {
- log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
- }
- // etc.
- }
-```
-
-
-## Parameters ##
-
-To pass extra parameters to the plugin, use a comma-separated
-parameter list separated from the output directory by a colon:
-
-
- protoc --gogo_out=plugins=grpc,import_path=mypackage:. *.proto
-
-- `paths=(import | source_relative)` - specifies how the paths of
- generated files are structured. See the "Packages and imports paths"
- section above. The default is `import`.
-- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to
- load. The only plugin in this repo is `grpc`.
-- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is
- associated with Go package quux/shme. This is subject to the
- import_prefix parameter.
-
-The following parameters are deprecated and should not be used:
-
-- `import_prefix=xxx` - a prefix that is added onto the beginning of
- all imports.
-- `import_path=foo/bar` - used as the package if no input files
- declare `go_package`. If it contains slashes, everything up to the
- rightmost slash is ignored.
-
-## gRPC Support ##
-
-If a proto file specifies RPC services, protoc-gen-go can be instructed to
-generate code compatible with gRPC (http://www.grpc.io/). To do this, pass
-the `plugins` parameter to protoc-gen-go; the usual way is to insert it into
-the --go_out argument to protoc:
-
- protoc --gogo_out=plugins=grpc:. *.proto
-
-## Compatibility ##
-
-The library and the generated code are expected to be stable over time.
-However, we reserve the right to make breaking changes without notice for the
-following reasons:
-
-- Security. A security issue in the specification or implementation may come to
- light whose resolution requires breaking compatibility. We reserve the right
- to address such security issues.
-- Unspecified behavior. There are some aspects of the Protocol Buffers
- specification that are undefined. Programs that depend on such unspecified
- behavior may break in future releases.
-- Specification errors or changes. If it becomes necessary to address an
- inconsistency, incompleteness, or change in the Protocol Buffers
- specification, resolving the issue could affect the meaning or legality of
- existing programs. We reserve the right to address such issues, including
- updating the implementations.
-- Bugs. If the library has a bug that violates the specification, a program
- that depends on the buggy behavior may break if the bug is fixed. We reserve
- the right to fix such bugs.
-- Adding methods or fields to generated structs. These may conflict with field
- names that already exist in a schema, causing applications to break. When the
- code generator encounters a field in the schema that would collide with a
- generated field or method name, the code generator will append an underscore
- to the generated field or method name.
-- Adding, removing, or changing methods or fields in generated structs that
- start with `XXX`. These parts of the generated code are exported out of
- necessity, but should not be considered part of the public API.
-- Adding, removing, or changing unexported symbols in generated code.
-
-Any breaking changes outside of these will be announced 6 months in advance to
-protobuf@googlegroups.com.
-
-You should, whenever possible, use generated code created by the `protoc-gen-go`
-tool built at the same commit as the `proto` package. The `proto` package
-declares package-level constants in the form `ProtoPackageIsVersionX`.
-Application code and generated code may depend on one of these constants to
-ensure that compilation will fail if the available version of the proto library
-is too old. Whenever we make a change to the generated code that requires newer
-library support, in the same commit we will increment the version number of the
-generated code and declare a new package-level constant whose name incorporates
-the latest version number. Removing a compatibility constant is considered a
-breaking change and would be subject to the announcement policy stated above.
-
-The `protoc-gen-go/generator` package exposes a plugin interface,
-which is used by the gRPC code generation. This interface is not
-supported and is subject to incompatible changes without notice.
diff --git a/vendor/github.com/gogo/protobuf/Readme.md b/vendor/github.com/gogo/protobuf/Readme.md
deleted file mode 100644
index f8a3aee5b..000000000
--- a/vendor/github.com/gogo/protobuf/Readme.md
+++ /dev/null
@@ -1,159 +0,0 @@
-# Protocol Buffers for Go with Gadgets
-
-[![Build Status](https://travis-ci.org/gogo/protobuf.svg?branch=master)](https://travis-ci.org/gogo/protobuf)
-[![GoDoc](https://godoc.org/github.com/gogo/protobuf?status.svg)](http://godoc.org/github.com/gogo/protobuf)
-
-gogoprotobuf is a fork of <a href="https://github.com/golang/protobuf">golang/protobuf</a> with extra code generation features.
-
-This code generation is used to achieve:
-
- - fast marshalling and unmarshalling
- - more canonical Go structures
- - goprotobuf compatibility
- - less typing by optionally generating extra helper code
- - peace of mind by optionally generating test and benchmark code
- - other serialization formats
-
-Keeping track of how up to date gogoprotobuf is relative to golang/protobuf is done in this
-<a href="https://github.com/gogo/protobuf/issues/191">issue</a>
-
-## Users
-
-These projects use gogoprotobuf:
-
- - <a href="http://godoc.org/github.com/coreos/etcd">etcd</a> - <a href="https://blog.gopheracademy.com/advent-2015/etcd-distributed-key-value-store-with-grpc-http2/">blog</a> - <a href="https://github.com/coreos/etcd/blob/master/etcdserver/etcdserverpb/etcdserver.proto">sample proto file</a>
- - <a href="https://www.spacemonkey.com/">spacemonkey</a> - <a href="https://www.spacemonkey.com/blog/posts/go-space-monkey">blog</a>
- - <a href="http://badoo.com">badoo</a> - <a href="https://github.com/badoo/lsd/blob/32061f501c5eca9c76c596d790b450501ba27b2f/proto/lsd.proto">sample proto file</a>
- - <a href="https://github.com/mesos/mesos-go">mesos-go</a> - <a href="https://github.com/mesos/mesos-go/blob/f9e5fb7c2f50ab5f23299f26b6b07c5d6afdd252/api/v0/mesosproto/authentication.proto">sample proto file</a>
- - <a href="https://github.com/mozilla-services/heka">heka</a> - <a href="https://github.com/mozilla-services/heka/commit/eb72fbf7d2d28249fbaf8d8dc6607f4eb6f03351">the switch from golang/protobuf to gogo/protobuf when it was still on code.google.com</a>
- - <a href="https://github.com/cockroachdb/cockroach">cockroachdb</a> - <a href="https://github.com/cockroachdb/cockroach/blob/651d54d393e391a30154e9117ab4b18d9ee6d845/roachpb/metadata.proto">sample proto file</a>
- - <a href="https://github.com/jbenet/go-ipfs">go-ipfs</a> - <a href="https://github.com/ipfs/go-ipfs/blob/2b6da0c024f28abeb16947fb452787196a6b56a2/merkledag/pb/merkledag.proto">sample proto file</a>
- - <a href="https://github.com/philhofer/rkive">rkive-go</a> - <a href="https://github.com/philhofer/rkive/blob/e5dd884d3ea07b341321073882ae28aa16dd11be/rpbc/riak_dt.proto">sample proto file</a>
- - <a href="https://www.dropbox.com">dropbox</a>
- - <a href="https://srclib.org/">srclib</a> - <a href="https://github.com/sourcegraph/srclib/blob/6538858f0c410cac5c63440317b8d009e889d3fb/graph/def.proto">sample proto file</a>
- - <a href="http://www.adyoulike.com/">adyoulike</a>
- - <a href="http://www.cloudfoundry.org/">cloudfoundry</a> - <a href="https://github.com/cloudfoundry/bbs/blob/d673710b8c4211037805129944ee4c5373d6588a/models/events.proto">sample proto file</a>
- - <a href="http://kubernetes.io/">kubernetes</a> - <a href="https://github.com/kubernetes/kubernetes/tree/88d8628137f94ee816aaa6606ae8cd045dee0bff/cmd/libs/go2idl">go2idl built on top of gogoprotobuf</a>
- - <a href="https://dgraph.io/">dgraph</a> - <a href="https://github.com/dgraph-io/dgraph/releases/tag/v0.4.3">release notes</a> - <a href="https://discuss.dgraph.io/t/gogoprotobuf-is-extremely-fast/639">benchmarks</a></a>
- - <a href="https://github.com/centrifugal/centrifugo">centrifugo</a> - <a href="https://forum.golangbridge.org/t/centrifugo-real-time-messaging-websocket-or-sockjs-server-v1-5-0-released/2861">release notes</a> - <a href="https://medium.com/@fzambia/centrifugo-protobuf-inside-json-outside-21d39bdabd68#.o3icmgjqd">blog</a>
- - <a href="https://github.com/docker/swarmkit">docker swarmkit</a> - <a href="https://github.com/docker/swarmkit/blob/63600e01af3b8da2a0ed1c9fa6e1ae4299d75edb/api/objects.proto">sample proto file</a>
- - <a href="https://nats.io/">nats.io</a> - <a href="https://github.com/nats-io/go-nats-streaming/blob/master/pb/protocol.proto">go-nats-streaming</a>
- - <a href="https://github.com/pingcap/tidb">tidb</a> - Communication between <a href="https://github.com/pingcap/tipb/blob/master/generate-go.sh#L4">tidb</a> and <a href="https://github.com/pingcap/kvproto/blob/master/generate_go.sh#L3">tikv</a>
- - <a href="https://github.com/AsynkronIT/protoactor-go">protoactor-go</a> - <a href="https://github.com/AsynkronIT/protoactor-go/blob/master/protobuf/protoc-gen-protoactor/main.go">vanity command</a> that also generates actors from service definitions
- - <a href="https://containerd.io/">containerd</a> - <a href="https://github.com/containerd/containerd/tree/master/cmd/protoc-gen-gogoctrd">vanity command with custom field names</a> that conforms to the golang convention.
- - <a href="https://github.com/heroiclabs/nakama">nakama</a>
- - <a href="https://github.com/src-d/proteus">proteus</a>
- - <a href="https://github.com/go-graphite">carbonzipper stack</a>
- - <a href="https://sendgrid.com/">sendgrid</a>
- - <a href="https://github.com/zero-os/0-stor">zero-os/0-stor</a>
- - <a href="https://github.com/spacemeshos/go-spacemesh">go-spacemesh</a>
- - <a href="https://github.com/weaveworks/cortex">cortex</a> - <a href="https://github.com/weaveworks/cortex/blob/fee02a59729d3771ef888f7bf0fd050e1197c56e/pkg/ingester/client/cortex.proto">sample proto file</a>
- - <a href="http://skywalking.apache.org/">Apache SkyWalking APM</a> - Istio telemetry receiver based on Mixer bypass protocol
-
-Please let us know if you are using gogoprotobuf by posting on our <a href="https://groups.google.com/forum/#!topic/gogoprotobuf/Brw76BxmFpQ">GoogleGroup</a>.
-
-### Mentioned
-
- - <a href="http://www.slideshare.net/albertstrasheim/serialization-in-go">Cloudflare - go serialization talk - Albert Strasheim</a>
- - <a href="https://youtu.be/4xB46Xl9O9Q?t=557">GopherCon 2014 Writing High Performance Databases in Go by Ben Johnson</a>
- - <a href="https://github.com/alecthomas/go_serialization_benchmarks">alecthomas' go serialization benchmarks</a>
- - <a href="http://agniva.me/go/2017/11/18/gogoproto.html">Go faster with gogoproto - Agniva De Sarker</a>
- - <a href="https://www.youtube.com/watch?v=CY9T020HLP8">Evolution of protobuf (Gource Visualization) - Landon Wilkins</a>
- - <a href="https://fosdem.org/2018/schedule/event/gopherjs/">Creating GopherJS Apps with gRPC-Web - Johan Brandhorst</a>
- - <a href="https://jbrandhorst.com/post/gogoproto/">So you want to use GoGo Protobuf - Johan Brandhorst</a>
- - <a href="https://jbrandhorst.com/post/grpc-errors/">Advanced gRPC Error Usage - Johan Brandhorst</a>
- - <a href="https://www.udemy.com/grpc-golang/?couponCode=GITHUB10">gRPC Golang Course on Udemy - Stephane Maarek</a>
-
-## Getting Started
-
-There are several ways to use gogoprotobuf, but for all you need to install go and protoc.
-After that you can choose:
-
- - Speed
- - More Speed and more generated code
- - Most Speed and most customization
-
-### Installation
-
-To install it, you must first have Go (at least version 1.6.3 or 1.9 if you are using gRPC) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)).
-Latest patch versions of 1.9 and 1.10 are continuously tested.
-
-Next, install the standard protocol buffer implementation from [https://github.com/google/protobuf](https://github.com/google/protobuf).
-Most versions from 2.3.1 should not give any problems, but 2.6.1, 3.0.2 and 3.5.1 are continuously tested.
-
-### Speed
-
-Install the protoc-gen-gofast binary
-
- go get github.com/gogo/protobuf/protoc-gen-gofast
-
-Use it to generate faster marshaling and unmarshaling go code for your protocol buffers.
-
- protoc --gofast_out=. myproto.proto
-
-This does not allow you to use any of the other gogoprotobuf [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md).
-
-### More Speed and more generated code
-
-Fields without pointers cause less time in the garbage collector.
-More code generation results in more convenient methods.
-
-Other binaries are also included:
-
- protoc-gen-gogofast (same as gofast, but imports gogoprotobuf)
- protoc-gen-gogofaster (same as gogofast, without XXX_unrecognized, less pointer fields)
- protoc-gen-gogoslick (same as gogofaster, but with generated string, gostring and equal methods)
-
-Installing any of these binaries is easy. Simply run:
-
- go get github.com/gogo/protobuf/proto
- go get github.com/gogo/protobuf/{binary}
- go get github.com/gogo/protobuf/gogoproto
-
-These binaries allow you to use gogoprotobuf [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md). You can also use your own binary.
-
-To generate the code, you also need to set the include path properly.
-
- protoc -I=. -I=$GOPATH/src -I=$GOPATH/src/github.com/gogo/protobuf/protobuf --{binary}_out=. myproto.proto
-
-To use proto files from "google/protobuf" you need to add additional args to protoc.
-
- protoc -I=. -I=$GOPATH/src -I=$GOPATH/src/github.com/gogo/protobuf/protobuf --{binary}_out=\
- Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types,\
- Mgoogle/protobuf/duration.proto=github.com/gogo/protobuf/types,\
- Mgoogle/protobuf/struct.proto=github.com/gogo/protobuf/types,\
- Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,\
- Mgoogle/protobuf/wrappers.proto=github.com/gogo/protobuf/types:. \
- myproto.proto
-
-Note that in the protoc command, {binary} does not contain the initial prefix of "protoc-gen".
-
-### Most Speed and most customization
-
-Customizing the fields of the messages to be the fields that you actually want to use removes the need to copy between the structs you use and structs you use to serialize.
-gogoprotobuf also offers more serialization formats and generation of tests and even more methods.
-
-Please visit the [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md) page for more documentation.
-
-Install protoc-gen-gogo:
-
- go get github.com/gogo/protobuf/proto
- go get github.com/gogo/protobuf/jsonpb
- go get github.com/gogo/protobuf/protoc-gen-gogo
- go get github.com/gogo/protobuf/gogoproto
-
-## GRPC
-
-It works the same as golang/protobuf, simply specify the plugin.
-Here is an example using gofast:
-
- protoc --gofast_out=plugins=grpc:. my.proto
-
-See [https://github.com/gogo/grpc-example](https://github.com/gogo/grpc-example) for an example of using gRPC with gogoprotobuf and the wider grpc-ecosystem.
-
-
-## License
-This software is licensed under the 3-Clause BSD License
-("BSD License 2.0", "Revised BSD License", "New BSD License", or "Modified BSD License").
-
-
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go
deleted file mode 100644
index 081c86fa8..000000000
--- a/vendor/github.com/gogo/protobuf/gogoproto/doc.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/*
-Package gogoproto provides extensions for protocol buffers to achieve:
-
- - fast marshalling and unmarshalling.
- - peace of mind by optionally generating test and benchmark code.
- - more canonical Go structures.
- - less typing by optionally generating extra helper code.
- - goprotobuf compatibility
-
-More Canonical Go Structures
-
-A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs.
-You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct.
-Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions.
-
- - nullable, if false, a field is generated without a pointer (see warning below).
- - embed, if true, the field is generated as an embedded field.
- - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128
- - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames.
- - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums.
- - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps.
- - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps.
-
-Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset.
-
-Let us look at:
-
- github.com/gogo/protobuf/test/example/example.proto
-
-for a quicker overview.
-
-The following message:
-
- package test;
-
- import "github.com/gogo/protobuf/gogoproto/gogo.proto";
-
- message A {
- optional string Description = 1 [(gogoproto.nullable) = false];
- optional int64 Number = 2 [(gogoproto.nullable) = false];
- optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false];
- }
-
-Will generate a go struct which looks a lot like this:
-
- type A struct {
- Description string
- Number int64
- Id github_com_gogo_protobuf_test_custom.Uuid
- }
-
-You will see there are no pointers, since all fields are non-nullable.
-You will also see a custom type which marshals to a string.
-Be warned it is your responsibility to test your custom types thoroughly.
-You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods.
-
-Next we will embed the message A in message B.
-
- message B {
- optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true];
- repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false];
- }
-
-See below that A is embedded in B.
-
- type B struct {
- A
- G []github_com_gogo_protobuf_test_custom.Uint128
- }
-
-Also see the repeated custom type.
-
- type Uint128 [2]uint64
-
-Next we will create a custom name for one of our fields.
-
- message C {
- optional int64 size = 1 [(gogoproto.customname) = "MySize"];
- }
-
-See below that the field's name is MySize and not Size.
-
- type C struct {
- MySize *int64
- }
-
-The is useful when having a protocol buffer message with a field name which conflicts with a generated method.
-As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error.
-Using customname you can fix this error without changing the field name.
-This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable.
-
-Gogoprotobuf also has some more subtle changes, these could be changed back:
-
- - the generated package name for imports do not have the extra /filename.pb,
- but are actually the imports specified in the .proto file.
-
-Gogoprotobuf also has lost some features which should be brought back with time:
-
- - Marshalling and unmarshalling with reflect and without the unsafe package,
- this requires work in pointer_reflect.go
-
-Why does nullable break protocol buffer specifications:
-
-The protocol buffer specification states, somewhere, that you should be able to tell whether a
-field is set or unset. With the option nullable=false this feature is lost,
-since your non-nullable fields will always be set. It can be seen as a layer on top of
-protocol buffers, where before and after marshalling all non-nullable fields are set
-and they cannot be unset.
-
-Goprotobuf Compatibility:
-
-Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers.
-Gogoprotobuf generates the same code as goprotobuf if no extensions are used.
-The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf:
-
- - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto.
- - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix
- - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method.
- - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face
- - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method.
- - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension
- - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields.
- - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway).
-
-Less Typing and Peace of Mind is explained in their specific plugin folders godoc:
-
- - github.com/gogo/protobuf/plugin/<extension_name>
-
-If you do not use any of these extension the code that is generated
-will be the same as if goprotobuf has generated it.
-
-The most complete way to see examples is to look at
-
- github.com/gogo/protobuf/test/thetest.proto
-
-Gogoprototest is a seperate project,
-because we want to keep gogoprotobuf independent of goprotobuf,
-but we still want to test it thoroughly.
-
-*/
-package gogoproto
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
deleted file mode 100644
index 0057f8e1b..000000000
--- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
+++ /dev/null
@@ -1,872 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: gogo.proto
-
-package gogoproto // import "github.com/gogo/protobuf/gogoproto"
-
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
-
-var E_GoprotoEnumPrefix = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.EnumOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 62001,
- Name: "gogoproto.goproto_enum_prefix",
- Tag: "varint,62001,opt,name=goproto_enum_prefix,json=goprotoEnumPrefix",
- Filename: "gogo.proto",
-}
-
-var E_GoprotoEnumStringer = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.EnumOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 62021,
- Name: "gogoproto.goproto_enum_stringer",
- Tag: "varint,62021,opt,name=goproto_enum_stringer,json=goprotoEnumStringer",
- Filename: "gogo.proto",
-}
-
-var E_EnumStringer = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.EnumOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 62022,
- Name: "gogoproto.enum_stringer",
- Tag: "varint,62022,opt,name=enum_stringer,json=enumStringer",
- Filename: "gogo.proto",
-}
-
-var E_EnumCustomname = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.EnumOptions)(nil),
- ExtensionType: (*string)(nil),
- Field: 62023,
- Name: "gogoproto.enum_customname",
- Tag: "bytes,62023,opt,name=enum_customname,json=enumCustomname",
- Filename: "gogo.proto",
-}
-
-var E_Enumdecl = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.EnumOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 62024,
- Name: "gogoproto.enumdecl",
- Tag: "varint,62024,opt,name=enumdecl",
- Filename: "gogo.proto",
-}
-
-var E_EnumvalueCustomname = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.EnumValueOptions)(nil),
- ExtensionType: (*string)(nil),
- Field: 66001,
- Name: "gogoproto.enumvalue_customname",
- Tag: "bytes,66001,opt,name=enumvalue_customname,json=enumvalueCustomname",
- Filename: "gogo.proto",
-}
-
-var E_GoprotoGettersAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63001,
- Name: "gogoproto.goproto_getters_all",
- Tag: "varint,63001,opt,name=goproto_getters_all,json=goprotoGettersAll",
- Filename: "gogo.proto",
-}
-
-var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63002,
- Name: "gogoproto.goproto_enum_prefix_all",
- Tag: "varint,63002,opt,name=goproto_enum_prefix_all,json=goprotoEnumPrefixAll",
- Filename: "gogo.proto",
-}
-
-var E_GoprotoStringerAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63003,
- Name: "gogoproto.goproto_stringer_all",
- Tag: "varint,63003,opt,name=goproto_stringer_all,json=goprotoStringerAll",
- Filename: "gogo.proto",
-}
-
-var E_VerboseEqualAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63004,
- Name: "gogoproto.verbose_equal_all",
- Tag: "varint,63004,opt,name=verbose_equal_all,json=verboseEqualAll",
- Filename: "gogo.proto",
-}
-
-var E_FaceAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63005,
- Name: "gogoproto.face_all",
- Tag: "varint,63005,opt,name=face_all,json=faceAll",
- Filename: "gogo.proto",
-}
-
-var E_GostringAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63006,
- Name: "gogoproto.gostring_all",
- Tag: "varint,63006,opt,name=gostring_all,json=gostringAll",
- Filename: "gogo.proto",
-}
-
-var E_PopulateAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63007,
- Name: "gogoproto.populate_all",
- Tag: "varint,63007,opt,name=populate_all,json=populateAll",
- Filename: "gogo.proto",
-}
-
-var E_StringerAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63008,
- Name: "gogoproto.stringer_all",
- Tag: "varint,63008,opt,name=stringer_all,json=stringerAll",
- Filename: "gogo.proto",
-}
-
-var E_OnlyoneAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63009,
- Name: "gogoproto.onlyone_all",
- Tag: "varint,63009,opt,name=onlyone_all,json=onlyoneAll",
- Filename: "gogo.proto",
-}
-
-var E_EqualAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63013,
- Name: "gogoproto.equal_all",
- Tag: "varint,63013,opt,name=equal_all,json=equalAll",
- Filename: "gogo.proto",
-}
-
-var E_DescriptionAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63014,
- Name: "gogoproto.description_all",
- Tag: "varint,63014,opt,name=description_all,json=descriptionAll",
- Filename: "gogo.proto",
-}
-
-var E_TestgenAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63015,
- Name: "gogoproto.testgen_all",
- Tag: "varint,63015,opt,name=testgen_all,json=testgenAll",
- Filename: "gogo.proto",
-}
-
-var E_BenchgenAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63016,
- Name: "gogoproto.benchgen_all",
- Tag: "varint,63016,opt,name=benchgen_all,json=benchgenAll",
- Filename: "gogo.proto",
-}
-
-var E_MarshalerAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63017,
- Name: "gogoproto.marshaler_all",
- Tag: "varint,63017,opt,name=marshaler_all,json=marshalerAll",
- Filename: "gogo.proto",
-}
-
-var E_UnmarshalerAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63018,
- Name: "gogoproto.unmarshaler_all",
- Tag: "varint,63018,opt,name=unmarshaler_all,json=unmarshalerAll",
- Filename: "gogo.proto",
-}
-
-var E_StableMarshalerAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63019,
- Name: "gogoproto.stable_marshaler_all",
- Tag: "varint,63019,opt,name=stable_marshaler_all,json=stableMarshalerAll",
- Filename: "gogo.proto",
-}
-
-var E_SizerAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63020,
- Name: "gogoproto.sizer_all",
- Tag: "varint,63020,opt,name=sizer_all,json=sizerAll",
- Filename: "gogo.proto",
-}
-
-var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63021,
- Name: "gogoproto.goproto_enum_stringer_all",
- Tag: "varint,63021,opt,name=goproto_enum_stringer_all,json=goprotoEnumStringerAll",
- Filename: "gogo.proto",
-}
-
-var E_EnumStringerAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63022,
- Name: "gogoproto.enum_stringer_all",
- Tag: "varint,63022,opt,name=enum_stringer_all,json=enumStringerAll",
- Filename: "gogo.proto",
-}
-
-var E_UnsafeMarshalerAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63023,
- Name: "gogoproto.unsafe_marshaler_all",
- Tag: "varint,63023,opt,name=unsafe_marshaler_all,json=unsafeMarshalerAll",
- Filename: "gogo.proto",
-}
-
-var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63024,
- Name: "gogoproto.unsafe_unmarshaler_all",
- Tag: "varint,63024,opt,name=unsafe_unmarshaler_all,json=unsafeUnmarshalerAll",
- Filename: "gogo.proto",
-}
-
-var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63025,
- Name: "gogoproto.goproto_extensions_map_all",
- Tag: "varint,63025,opt,name=goproto_extensions_map_all,json=goprotoExtensionsMapAll",
- Filename: "gogo.proto",
-}
-
-var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63026,
- Name: "gogoproto.goproto_unrecognized_all",
- Tag: "varint,63026,opt,name=goproto_unrecognized_all,json=goprotoUnrecognizedAll",
- Filename: "gogo.proto",
-}
-
-var E_GogoprotoImport = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63027,
- Name: "gogoproto.gogoproto_import",
- Tag: "varint,63027,opt,name=gogoproto_import,json=gogoprotoImport",
- Filename: "gogo.proto",
-}
-
-var E_ProtosizerAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63028,
- Name: "gogoproto.protosizer_all",
- Tag: "varint,63028,opt,name=protosizer_all,json=protosizerAll",
- Filename: "gogo.proto",
-}
-
-var E_CompareAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63029,
- Name: "gogoproto.compare_all",
- Tag: "varint,63029,opt,name=compare_all,json=compareAll",
- Filename: "gogo.proto",
-}
-
-var E_TypedeclAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63030,
- Name: "gogoproto.typedecl_all",
- Tag: "varint,63030,opt,name=typedecl_all,json=typedeclAll",
- Filename: "gogo.proto",
-}
-
-var E_EnumdeclAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63031,
- Name: "gogoproto.enumdecl_all",
- Tag: "varint,63031,opt,name=enumdecl_all,json=enumdeclAll",
- Filename: "gogo.proto",
-}
-
-var E_GoprotoRegistration = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63032,
- Name: "gogoproto.goproto_registration",
- Tag: "varint,63032,opt,name=goproto_registration,json=goprotoRegistration",
- Filename: "gogo.proto",
-}
-
-var E_MessagenameAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63033,
- Name: "gogoproto.messagename_all",
- Tag: "varint,63033,opt,name=messagename_all,json=messagenameAll",
- Filename: "gogo.proto",
-}
-
-var E_GoprotoSizecacheAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63034,
- Name: "gogoproto.goproto_sizecache_all",
- Tag: "varint,63034,opt,name=goproto_sizecache_all,json=goprotoSizecacheAll",
- Filename: "gogo.proto",
-}
-
-var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FileOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 63035,
- Name: "gogoproto.goproto_unkeyed_all",
- Tag: "varint,63035,opt,name=goproto_unkeyed_all,json=goprotoUnkeyedAll",
- Filename: "gogo.proto",
-}
-
-var E_GoprotoGetters = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64001,
- Name: "gogoproto.goproto_getters",
- Tag: "varint,64001,opt,name=goproto_getters,json=goprotoGetters",
- Filename: "gogo.proto",
-}
-
-var E_GoprotoStringer = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64003,
- Name: "gogoproto.goproto_stringer",
- Tag: "varint,64003,opt,name=goproto_stringer,json=goprotoStringer",
- Filename: "gogo.proto",
-}
-
-var E_VerboseEqual = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64004,
- Name: "gogoproto.verbose_equal",
- Tag: "varint,64004,opt,name=verbose_equal,json=verboseEqual",
- Filename: "gogo.proto",
-}
-
-var E_Face = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64005,
- Name: "gogoproto.face",
- Tag: "varint,64005,opt,name=face",
- Filename: "gogo.proto",
-}
-
-var E_Gostring = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64006,
- Name: "gogoproto.gostring",
- Tag: "varint,64006,opt,name=gostring",
- Filename: "gogo.proto",
-}
-
-var E_Populate = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64007,
- Name: "gogoproto.populate",
- Tag: "varint,64007,opt,name=populate",
- Filename: "gogo.proto",
-}
-
-var E_Stringer = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 67008,
- Name: "gogoproto.stringer",
- Tag: "varint,67008,opt,name=stringer",
- Filename: "gogo.proto",
-}
-
-var E_Onlyone = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64009,
- Name: "gogoproto.onlyone",
- Tag: "varint,64009,opt,name=onlyone",
- Filename: "gogo.proto",
-}
-
-var E_Equal = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64013,
- Name: "gogoproto.equal",
- Tag: "varint,64013,opt,name=equal",
- Filename: "gogo.proto",
-}
-
-var E_Description = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64014,
- Name: "gogoproto.description",
- Tag: "varint,64014,opt,name=description",
- Filename: "gogo.proto",
-}
-
-var E_Testgen = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64015,
- Name: "gogoproto.testgen",
- Tag: "varint,64015,opt,name=testgen",
- Filename: "gogo.proto",
-}
-
-var E_Benchgen = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64016,
- Name: "gogoproto.benchgen",
- Tag: "varint,64016,opt,name=benchgen",
- Filename: "gogo.proto",
-}
-
-var E_Marshaler = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64017,
- Name: "gogoproto.marshaler",
- Tag: "varint,64017,opt,name=marshaler",
- Filename: "gogo.proto",
-}
-
-var E_Unmarshaler = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64018,
- Name: "gogoproto.unmarshaler",
- Tag: "varint,64018,opt,name=unmarshaler",
- Filename: "gogo.proto",
-}
-
-var E_StableMarshaler = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64019,
- Name: "gogoproto.stable_marshaler",
- Tag: "varint,64019,opt,name=stable_marshaler,json=stableMarshaler",
- Filename: "gogo.proto",
-}
-
-var E_Sizer = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64020,
- Name: "gogoproto.sizer",
- Tag: "varint,64020,opt,name=sizer",
- Filename: "gogo.proto",
-}
-
-var E_UnsafeMarshaler = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64023,
- Name: "gogoproto.unsafe_marshaler",
- Tag: "varint,64023,opt,name=unsafe_marshaler,json=unsafeMarshaler",
- Filename: "gogo.proto",
-}
-
-var E_UnsafeUnmarshaler = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64024,
- Name: "gogoproto.unsafe_unmarshaler",
- Tag: "varint,64024,opt,name=unsafe_unmarshaler,json=unsafeUnmarshaler",
- Filename: "gogo.proto",
-}
-
-var E_GoprotoExtensionsMap = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64025,
- Name: "gogoproto.goproto_extensions_map",
- Tag: "varint,64025,opt,name=goproto_extensions_map,json=goprotoExtensionsMap",
- Filename: "gogo.proto",
-}
-
-var E_GoprotoUnrecognized = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64026,
- Name: "gogoproto.goproto_unrecognized",
- Tag: "varint,64026,opt,name=goproto_unrecognized,json=goprotoUnrecognized",
- Filename: "gogo.proto",
-}
-
-var E_Protosizer = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64028,
- Name: "gogoproto.protosizer",
- Tag: "varint,64028,opt,name=protosizer",
- Filename: "gogo.proto",
-}
-
-var E_Compare = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64029,
- Name: "gogoproto.compare",
- Tag: "varint,64029,opt,name=compare",
- Filename: "gogo.proto",
-}
-
-var E_Typedecl = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64030,
- Name: "gogoproto.typedecl",
- Tag: "varint,64030,opt,name=typedecl",
- Filename: "gogo.proto",
-}
-
-var E_Messagename = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64033,
- Name: "gogoproto.messagename",
- Tag: "varint,64033,opt,name=messagename",
- Filename: "gogo.proto",
-}
-
-var E_GoprotoSizecache = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64034,
- Name: "gogoproto.goproto_sizecache",
- Tag: "varint,64034,opt,name=goproto_sizecache,json=goprotoSizecache",
- Filename: "gogo.proto",
-}
-
-var E_GoprotoUnkeyed = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MessageOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 64035,
- Name: "gogoproto.goproto_unkeyed",
- Tag: "varint,64035,opt,name=goproto_unkeyed,json=goprotoUnkeyed",
- Filename: "gogo.proto",
-}
-
-var E_Nullable = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FieldOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 65001,
- Name: "gogoproto.nullable",
- Tag: "varint,65001,opt,name=nullable",
- Filename: "gogo.proto",
-}
-
-var E_Embed = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FieldOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 65002,
- Name: "gogoproto.embed",
- Tag: "varint,65002,opt,name=embed",
- Filename: "gogo.proto",
-}
-
-var E_Customtype = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FieldOptions)(nil),
- ExtensionType: (*string)(nil),
- Field: 65003,
- Name: "gogoproto.customtype",
- Tag: "bytes,65003,opt,name=customtype",
- Filename: "gogo.proto",
-}
-
-var E_Customname = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FieldOptions)(nil),
- ExtensionType: (*string)(nil),
- Field: 65004,
- Name: "gogoproto.customname",
- Tag: "bytes,65004,opt,name=customname",
- Filename: "gogo.proto",
-}
-
-var E_Jsontag = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FieldOptions)(nil),
- ExtensionType: (*string)(nil),
- Field: 65005,
- Name: "gogoproto.jsontag",
- Tag: "bytes,65005,opt,name=jsontag",
- Filename: "gogo.proto",
-}
-
-var E_Moretags = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FieldOptions)(nil),
- ExtensionType: (*string)(nil),
- Field: 65006,
- Name: "gogoproto.moretags",
- Tag: "bytes,65006,opt,name=moretags",
- Filename: "gogo.proto",
-}
-
-var E_Casttype = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FieldOptions)(nil),
- ExtensionType: (*string)(nil),
- Field: 65007,
- Name: "gogoproto.casttype",
- Tag: "bytes,65007,opt,name=casttype",
- Filename: "gogo.proto",
-}
-
-var E_Castkey = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FieldOptions)(nil),
- ExtensionType: (*string)(nil),
- Field: 65008,
- Name: "gogoproto.castkey",
- Tag: "bytes,65008,opt,name=castkey",
- Filename: "gogo.proto",
-}
-
-var E_Castvalue = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FieldOptions)(nil),
- ExtensionType: (*string)(nil),
- Field: 65009,
- Name: "gogoproto.castvalue",
- Tag: "bytes,65009,opt,name=castvalue",
- Filename: "gogo.proto",
-}
-
-var E_Stdtime = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FieldOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 65010,
- Name: "gogoproto.stdtime",
- Tag: "varint,65010,opt,name=stdtime",
- Filename: "gogo.proto",
-}
-
-var E_Stdduration = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FieldOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 65011,
- Name: "gogoproto.stdduration",
- Tag: "varint,65011,opt,name=stdduration",
- Filename: "gogo.proto",
-}
-
-var E_Wktpointer = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.FieldOptions)(nil),
- ExtensionType: (*bool)(nil),
- Field: 65012,
- Name: "gogoproto.wktpointer",
- Tag: "varint,65012,opt,name=wktpointer",
- Filename: "gogo.proto",
-}
-
-func init() {
- proto.RegisterExtension(E_GoprotoEnumPrefix)
- proto.RegisterExtension(E_GoprotoEnumStringer)
- proto.RegisterExtension(E_EnumStringer)
- proto.RegisterExtension(E_EnumCustomname)
- proto.RegisterExtension(E_Enumdecl)
- proto.RegisterExtension(E_EnumvalueCustomname)
- proto.RegisterExtension(E_GoprotoGettersAll)
- proto.RegisterExtension(E_GoprotoEnumPrefixAll)
- proto.RegisterExtension(E_GoprotoStringerAll)
- proto.RegisterExtension(E_VerboseEqualAll)
- proto.RegisterExtension(E_FaceAll)
- proto.RegisterExtension(E_GostringAll)
- proto.RegisterExtension(E_PopulateAll)
- proto.RegisterExtension(E_StringerAll)
- proto.RegisterExtension(E_OnlyoneAll)
- proto.RegisterExtension(E_EqualAll)
- proto.RegisterExtension(E_DescriptionAll)
- proto.RegisterExtension(E_TestgenAll)
- proto.RegisterExtension(E_BenchgenAll)
- proto.RegisterExtension(E_MarshalerAll)
- proto.RegisterExtension(E_UnmarshalerAll)
- proto.RegisterExtension(E_StableMarshalerAll)
- proto.RegisterExtension(E_SizerAll)
- proto.RegisterExtension(E_GoprotoEnumStringerAll)
- proto.RegisterExtension(E_EnumStringerAll)
- proto.RegisterExtension(E_UnsafeMarshalerAll)
- proto.RegisterExtension(E_UnsafeUnmarshalerAll)
- proto.RegisterExtension(E_GoprotoExtensionsMapAll)
- proto.RegisterExtension(E_GoprotoUnrecognizedAll)
- proto.RegisterExtension(E_GogoprotoImport)
- proto.RegisterExtension(E_ProtosizerAll)
- proto.RegisterExtension(E_CompareAll)
- proto.RegisterExtension(E_TypedeclAll)
- proto.RegisterExtension(E_EnumdeclAll)
- proto.RegisterExtension(E_GoprotoRegistration)
- proto.RegisterExtension(E_MessagenameAll)
- proto.RegisterExtension(E_GoprotoSizecacheAll)
- proto.RegisterExtension(E_GoprotoUnkeyedAll)
- proto.RegisterExtension(E_GoprotoGetters)
- proto.RegisterExtension(E_GoprotoStringer)
- proto.RegisterExtension(E_VerboseEqual)
- proto.RegisterExtension(E_Face)
- proto.RegisterExtension(E_Gostring)
- proto.RegisterExtension(E_Populate)
- proto.RegisterExtension(E_Stringer)
- proto.RegisterExtension(E_Onlyone)
- proto.RegisterExtension(E_Equal)
- proto.RegisterExtension(E_Description)
- proto.RegisterExtension(E_Testgen)
- proto.RegisterExtension(E_Benchgen)
- proto.RegisterExtension(E_Marshaler)
- proto.RegisterExtension(E_Unmarshaler)
- proto.RegisterExtension(E_StableMarshaler)
- proto.RegisterExtension(E_Sizer)
- proto.RegisterExtension(E_UnsafeMarshaler)
- proto.RegisterExtension(E_UnsafeUnmarshaler)
- proto.RegisterExtension(E_GoprotoExtensionsMap)
- proto.RegisterExtension(E_GoprotoUnrecognized)
- proto.RegisterExtension(E_Protosizer)
- proto.RegisterExtension(E_Compare)
- proto.RegisterExtension(E_Typedecl)
- proto.RegisterExtension(E_Messagename)
- proto.RegisterExtension(E_GoprotoSizecache)
- proto.RegisterExtension(E_GoprotoUnkeyed)
- proto.RegisterExtension(E_Nullable)
- proto.RegisterExtension(E_Embed)
- proto.RegisterExtension(E_Customtype)
- proto.RegisterExtension(E_Customname)
- proto.RegisterExtension(E_Jsontag)
- proto.RegisterExtension(E_Moretags)
- proto.RegisterExtension(E_Casttype)
- proto.RegisterExtension(E_Castkey)
- proto.RegisterExtension(E_Castvalue)
- proto.RegisterExtension(E_Stdtime)
- proto.RegisterExtension(E_Stdduration)
- proto.RegisterExtension(E_Wktpointer)
-}
-
-func init() { proto.RegisterFile("gogo.proto", fileDescriptor_gogo_b95f77e237336c7c) }
-
-var fileDescriptor_gogo_b95f77e237336c7c = []byte{
- // 1328 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45,
- 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9,
- 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18,
- 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84,
- 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f,
- 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7,
- 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6,
- 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9,
- 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6,
- 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59,
- 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc,
- 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99,
- 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19,
- 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b,
- 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79,
- 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8,
- 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d,
- 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4,
- 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78,
- 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0,
- 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1,
- 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6,
- 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae,
- 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c,
- 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0,
- 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b,
- 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04,
- 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28,
- 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36,
- 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50,
- 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d,
- 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa,
- 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5,
- 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b,
- 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24,
- 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05,
- 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2,
- 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b,
- 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92,
- 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56,
- 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e,
- 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19,
- 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70,
- 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0,
- 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c,
- 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a,
- 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0,
- 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4,
- 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95,
- 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9,
- 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9,
- 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f,
- 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9,
- 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5,
- 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8,
- 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb,
- 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae,
- 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31,
- 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d,
- 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30,
- 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94,
- 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f,
- 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36,
- 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e,
- 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b,
- 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e,
- 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb,
- 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5,
- 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17,
- 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45,
- 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32,
- 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4,
- 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8,
- 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f,
- 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49,
- 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f,
- 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb,
- 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c,
- 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90,
- 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e,
- 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd,
- 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb,
- 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00,
-}
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto
deleted file mode 100644
index b80c85653..000000000
--- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto
+++ /dev/null
@@ -1,144 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto2";
-package gogoproto;
-
-import "google/protobuf/descriptor.proto";
-
-option java_package = "com.google.protobuf";
-option java_outer_classname = "GoGoProtos";
-option go_package = "github.com/gogo/protobuf/gogoproto";
-
-extend google.protobuf.EnumOptions {
- optional bool goproto_enum_prefix = 62001;
- optional bool goproto_enum_stringer = 62021;
- optional bool enum_stringer = 62022;
- optional string enum_customname = 62023;
- optional bool enumdecl = 62024;
-}
-
-extend google.protobuf.EnumValueOptions {
- optional string enumvalue_customname = 66001;
-}
-
-extend google.protobuf.FileOptions {
- optional bool goproto_getters_all = 63001;
- optional bool goproto_enum_prefix_all = 63002;
- optional bool goproto_stringer_all = 63003;
- optional bool verbose_equal_all = 63004;
- optional bool face_all = 63005;
- optional bool gostring_all = 63006;
- optional bool populate_all = 63007;
- optional bool stringer_all = 63008;
- optional bool onlyone_all = 63009;
-
- optional bool equal_all = 63013;
- optional bool description_all = 63014;
- optional bool testgen_all = 63015;
- optional bool benchgen_all = 63016;
- optional bool marshaler_all = 63017;
- optional bool unmarshaler_all = 63018;
- optional bool stable_marshaler_all = 63019;
-
- optional bool sizer_all = 63020;
-
- optional bool goproto_enum_stringer_all = 63021;
- optional bool enum_stringer_all = 63022;
-
- optional bool unsafe_marshaler_all = 63023;
- optional bool unsafe_unmarshaler_all = 63024;
-
- optional bool goproto_extensions_map_all = 63025;
- optional bool goproto_unrecognized_all = 63026;
- optional bool gogoproto_import = 63027;
- optional bool protosizer_all = 63028;
- optional bool compare_all = 63029;
- optional bool typedecl_all = 63030;
- optional bool enumdecl_all = 63031;
-
- optional bool goproto_registration = 63032;
- optional bool messagename_all = 63033;
-
- optional bool goproto_sizecache_all = 63034;
- optional bool goproto_unkeyed_all = 63035;
-}
-
-extend google.protobuf.MessageOptions {
- optional bool goproto_getters = 64001;
- optional bool goproto_stringer = 64003;
- optional bool verbose_equal = 64004;
- optional bool face = 64005;
- optional bool gostring = 64006;
- optional bool populate = 64007;
- optional bool stringer = 67008;
- optional bool onlyone = 64009;
-
- optional bool equal = 64013;
- optional bool description = 64014;
- optional bool testgen = 64015;
- optional bool benchgen = 64016;
- optional bool marshaler = 64017;
- optional bool unmarshaler = 64018;
- optional bool stable_marshaler = 64019;
-
- optional bool sizer = 64020;
-
- optional bool unsafe_marshaler = 64023;
- optional bool unsafe_unmarshaler = 64024;
-
- optional bool goproto_extensions_map = 64025;
- optional bool goproto_unrecognized = 64026;
-
- optional bool protosizer = 64028;
- optional bool compare = 64029;
-
- optional bool typedecl = 64030;
-
- optional bool messagename = 64033;
-
- optional bool goproto_sizecache = 64034;
- optional bool goproto_unkeyed = 64035;
-}
-
-extend google.protobuf.FieldOptions {
- optional bool nullable = 65001;
- optional bool embed = 65002;
- optional string customtype = 65003;
- optional string customname = 65004;
- optional string jsontag = 65005;
- optional string moretags = 65006;
- optional string casttype = 65007;
- optional string castkey = 65008;
- optional string castvalue = 65009;
-
- optional bool stdtime = 65010;
- optional bool stdduration = 65011;
- optional bool wktpointer = 65012;
-
-}
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go
deleted file mode 100644
index 390d4e4be..000000000
--- a/vendor/github.com/gogo/protobuf/gogoproto/helper.go
+++ /dev/null
@@ -1,415 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package gogoproto
-
-import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
-import proto "github.com/gogo/protobuf/proto"
-
-func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool {
- return proto.GetBoolExtension(field.Options, E_Embed, false)
-}
-
-func IsNullable(field *google_protobuf.FieldDescriptorProto) bool {
- return proto.GetBoolExtension(field.Options, E_Nullable, true)
-}
-
-func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool {
- return proto.GetBoolExtension(field.Options, E_Stdtime, false)
-}
-
-func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool {
- return proto.GetBoolExtension(field.Options, E_Stdduration, false)
-}
-
-func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool {
- return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue"
-}
-
-func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool {
- return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue"
-}
-
-func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool {
- return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value"
-}
-
-func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool {
- return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value"
-}
-
-func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool {
- return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value"
-}
-
-func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool {
- return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value"
-}
-
-func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool {
- return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue"
-}
-
-func IsStdString(field *google_protobuf.FieldDescriptorProto) bool {
- return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue"
-}
-
-func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool {
- return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue"
-}
-
-func IsStdType(field *google_protobuf.FieldDescriptorProto) bool {
- return (IsStdTime(field) || IsStdDuration(field) ||
- IsStdDouble(field) || IsStdFloat(field) ||
- IsStdInt64(field) || IsStdUInt64(field) ||
- IsStdInt32(field) || IsStdUInt32(field) ||
- IsStdBool(field) ||
- IsStdString(field) || IsStdBytes(field))
-}
-
-func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool {
- return proto.GetBoolExtension(field.Options, E_Wktpointer, false)
-}
-
-func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool {
- nullable := IsNullable(field)
- if field.IsMessage() || IsCustomType(field) {
- return nullable
- }
- if proto3 {
- return false
- }
- return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES
-}
-
-func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool {
- typ := GetCustomType(field)
- if len(typ) > 0 {
- return true
- }
- return false
-}
-
-func IsCastType(field *google_protobuf.FieldDescriptorProto) bool {
- typ := GetCastType(field)
- if len(typ) > 0 {
- return true
- }
- return false
-}
-
-func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool {
- typ := GetCastKey(field)
- if len(typ) > 0 {
- return true
- }
- return false
-}
-
-func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool {
- typ := GetCastValue(field)
- if len(typ) > 0 {
- return true
- }
- return false
-}
-
-func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
- return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true))
-}
-
-func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true))
-}
-
-func GetCustomType(field *google_protobuf.FieldDescriptorProto) string {
- if field == nil {
- return ""
- }
- if field.Options != nil {
- v, err := proto.GetExtension(field.Options, E_Customtype)
- if err == nil && v.(*string) != nil {
- return *(v.(*string))
- }
- }
- return ""
-}
-
-func GetCastType(field *google_protobuf.FieldDescriptorProto) string {
- if field == nil {
- return ""
- }
- if field.Options != nil {
- v, err := proto.GetExtension(field.Options, E_Casttype)
- if err == nil && v.(*string) != nil {
- return *(v.(*string))
- }
- }
- return ""
-}
-
-func GetCastKey(field *google_protobuf.FieldDescriptorProto) string {
- if field == nil {
- return ""
- }
- if field.Options != nil {
- v, err := proto.GetExtension(field.Options, E_Castkey)
- if err == nil && v.(*string) != nil {
- return *(v.(*string))
- }
- }
- return ""
-}
-
-func GetCastValue(field *google_protobuf.FieldDescriptorProto) string {
- if field == nil {
- return ""
- }
- if field.Options != nil {
- v, err := proto.GetExtension(field.Options, E_Castvalue)
- if err == nil && v.(*string) != nil {
- return *(v.(*string))
- }
- }
- return ""
-}
-
-func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool {
- name := GetCustomName(field)
- if len(name) > 0 {
- return true
- }
- return false
-}
-
-func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool {
- name := GetEnumCustomName(field)
- if len(name) > 0 {
- return true
- }
- return false
-}
-
-func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool {
- name := GetEnumValueCustomName(field)
- if len(name) > 0 {
- return true
- }
- return false
-}
-
-func GetCustomName(field *google_protobuf.FieldDescriptorProto) string {
- if field == nil {
- return ""
- }
- if field.Options != nil {
- v, err := proto.GetExtension(field.Options, E_Customname)
- if err == nil && v.(*string) != nil {
- return *(v.(*string))
- }
- }
- return ""
-}
-
-func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string {
- if field == nil {
- return ""
- }
- if field.Options != nil {
- v, err := proto.GetExtension(field.Options, E_EnumCustomname)
- if err == nil && v.(*string) != nil {
- return *(v.(*string))
- }
- }
- return ""
-}
-
-func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string {
- if field == nil {
- return ""
- }
- if field.Options != nil {
- v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname)
- if err == nil && v.(*string) != nil {
- return *(v.(*string))
- }
- }
- return ""
-}
-
-func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string {
- if field == nil {
- return nil
- }
- if field.Options != nil {
- v, err := proto.GetExtension(field.Options, E_Jsontag)
- if err == nil && v.(*string) != nil {
- return (v.(*string))
- }
- }
- return nil
-}
-
-func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string {
- if field == nil {
- return nil
- }
- if field.Options != nil {
- v, err := proto.GetExtension(field.Options, E_Moretags)
- if err == nil && v.(*string) != nil {
- return (v.(*string))
- }
- }
- return nil
-}
-
-type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool
-
-func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
- return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true))
-}
-
-func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true))
-}
-
-func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true))
-}
-
-func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false))
-}
-
-func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false))
-}
-
-func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false))
-}
-
-func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false))
-}
-
-func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false))
-}
-
-func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false))
-}
-
-func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false))
-}
-
-func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false))
-}
-
-func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false))
-}
-
-func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false))
-}
-
-func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false))
-}
-
-func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false))
-}
-
-func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false))
-}
-
-func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false))
-}
-
-func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false))
-}
-
-func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
- return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true))
-}
-
-func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
- return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false))
-}
-
-func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false))
-}
-
-func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false))
-}
-
-func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true))
-}
-
-func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true))
-}
-
-func IsProto3(file *google_protobuf.FileDescriptorProto) bool {
- return file.GetSyntax() == "proto3"
-}
-
-func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool {
- return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true)
-}
-
-func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false))
-}
-
-func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool {
- return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false)
-}
-
-func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false))
-}
-
-func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true))
-}
-
-func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
- return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true))
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile
new file mode 100644
index 000000000..00d65f327
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+ go install
+
+test: install generate-test-pbs
+ go test
+
+
+generate-test-pbs:
+ make install
+ make -C test_proto
+ make -C proto3_proto
+ make
diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go
index d9aa3c42d..63b0f08be 100644
--- a/vendor/github.com/gogo/protobuf/proto/decode.go
+++ b/vendor/github.com/gogo/protobuf/proto/decode.go
@@ -186,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
if b&0x80 == 0 {
goto done
}
- // x -= 0x80 << 63 // Always zero.
return 0, errOverflow
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto b/vendor/github.com/gogo/protobuf/proto/deprecated.go
index 6057c8522..35b882c09 100644
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto
+++ b/vendor/github.com/gogo/protobuf/proto/deprecated.go
@@ -1,6 +1,7 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2018 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
@@ -28,25 +29,35 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-syntax = "proto3";
+package proto
-package google.protobuf;
+import "errors"
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option go_package = "types";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "EmptyProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-option cc_enable_arenas = true;
+// Deprecated: do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
-// A generic empty message that you can re-use to avoid defining duplicated
-// empty messages in your APIs. A typical example is to use it as the request
-// or the response type of an API method. For instance:
-//
-// service Foo {
-// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
-// }
-//
-// The JSON representation for `Empty` is empty JSON object `{}`.
-message Empty {}
+// Deprecated: do not use.
+func GetStats() Stats { return Stats{} }
+
+// Deprecated: do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func RegisterMessageSetType(Message, int32, string) {}
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go
index 44ebd457c..686bd2a09 100644
--- a/vendor/github.com/gogo/protobuf/proto/extensions.go
+++ b/vendor/github.com/gogo/protobuf/proto/extensions.go
@@ -544,7 +544,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
}
typ := reflect.TypeOf(extension.ExtensionType)
if typ != reflect.TypeOf(value) {
- return errors.New("proto: bad extension value type")
+ return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
}
// nil extension values need to be caught early, because the
// encoder can't distinguish an ErrNil due to a nil extension
diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go
index b2271d0b7..d17f80209 100644
--- a/vendor/github.com/gogo/protobuf/proto/lib.go
+++ b/vendor/github.com/gogo/protobuf/proto/lib.go
@@ -341,26 +341,6 @@ type Message interface {
ProtoMessage()
}
-// Stats records allocation details about the protocol buffer encoders
-// and decoders. Useful for tuning the library itself.
-type Stats struct {
- Emalloc uint64 // mallocs in encode
- Dmalloc uint64 // mallocs in decode
- Encode uint64 // number of encodes
- Decode uint64 // number of decodes
- Chit uint64 // number of cache hits
- Cmiss uint64 // number of cache misses
- Size uint64 // number of sizes
-}
-
-// Set to true to enable stats collection.
-const collectStats = false
-
-var stats Stats
-
-// GetStats returns a copy of the global Stats structure.
-func GetStats() Stats { return stats }
-
// A Buffer is a buffer manager for marshaling and unmarshaling
// protocol buffers. It may be reused between invocations to
// reduce memory usage. It is not necessary to use a Buffer;
diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go
index 3b6ca41d5..f48a75676 100644
--- a/vendor/github.com/gogo/protobuf/proto/message_set.go
+++ b/vendor/github.com/gogo/protobuf/proto/message_set.go
@@ -36,13 +36,7 @@ package proto
*/
import (
- "bytes"
- "encoding/json"
"errors"
- "fmt"
- "reflect"
- "sort"
- "sync"
)
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
@@ -145,46 +139,9 @@ func skipVarint(buf []byte) []byte {
return buf[i+1:]
}
-// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
-// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSet(exts interface{}) ([]byte, error) {
- return marshalMessageSet(exts, false)
-}
-
-// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
-func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
- switch exts := exts.(type) {
- case *XXX_InternalExtensions:
- var u marshalInfo
- siz := u.sizeMessageSet(exts)
- b := make([]byte, 0, siz)
- return u.appendMessageSet(b, exts, deterministic)
-
- case map[int32]Extension:
- // This is an old-style extension map.
- // Wrap it in a new-style XXX_InternalExtensions.
- ie := XXX_InternalExtensions{
- p: &struct {
- mu sync.Mutex
- extensionMap map[int32]Extension
- }{
- extensionMap: exts,
- },
- }
-
- var u marshalInfo
- siz := u.sizeMessageSet(&ie)
- b := make([]byte, 0, siz)
- return u.appendMessageSet(b, &ie, deterministic)
-
- default:
- return nil, errors.New("proto: not an extension map")
- }
-}
-
-// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+func unmarshalMessageSet(buf []byte, exts interface{}) error {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
@@ -222,93 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error {
}
return nil
}
-
-// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
-// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
- var m map[int32]Extension
- switch exts := exts.(type) {
- case *XXX_InternalExtensions:
- var mu sync.Locker
- m, mu = exts.extensionsRead()
- if m != nil {
- // Keep the extensions map locked until we're done marshaling to prevent
- // races between marshaling and unmarshaling the lazily-{en,de}coded
- // values.
- mu.Lock()
- defer mu.Unlock()
- }
- case map[int32]Extension:
- m = exts
- default:
- return nil, errors.New("proto: not an extension map")
- }
- var b bytes.Buffer
- b.WriteByte('{')
-
- // Process the map in key order for deterministic output.
- ids := make([]int32, 0, len(m))
- for id := range m {
- ids = append(ids, id)
- }
- sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
-
- for i, id := range ids {
- ext := m[id]
- msd, ok := messageSetMap[id]
- if !ok {
- // Unknown type; we can't render it, so skip it.
- continue
- }
-
- if i > 0 && b.Len() > 1 {
- b.WriteByte(',')
- }
-
- fmt.Fprintf(&b, `"[%s]":`, msd.name)
-
- x := ext.value
- if x == nil {
- x = reflect.New(msd.t.Elem()).Interface()
- if err := Unmarshal(ext.enc, x.(Message)); err != nil {
- return nil, err
- }
- }
- d, err := json.Marshal(x)
- if err != nil {
- return nil, err
- }
- b.Write(d)
- }
- b.WriteByte('}')
- return b.Bytes(), nil
-}
-
-// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
-// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
- // Common-case fast path.
- if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
- return nil
- }
-
- // This is fairly tricky, and it's not clear that it is needed.
- return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
-}
-
-// A global registry of types that can be used in a MessageSet.
-
-var messageSetMap = make(map[int32]messageSetDesc)
-
-type messageSetDesc struct {
- t reflect.Type // pointer to struct
- name string
-}
-
-// RegisterMessageSetType is called from the generated code.
-func RegisterMessageSetType(m Message, fieldNum int32, name string) {
- messageSetMap[fieldNum] = messageSetDesc{
- t: reflect.TypeOf(m),
- name: name,
- }
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go
index 04dcb8d9e..c9e5fa020 100644
--- a/vendor/github.com/gogo/protobuf/proto/properties.go
+++ b/vendor/github.com/gogo/protobuf/proto/properties.go
@@ -391,9 +391,6 @@ func GetProperties(t reflect.Type) *StructProperties {
sprop, ok := propertiesMap[t]
propertiesMu.RUnlock()
if ok {
- if collectStats {
- stats.Chit++
- }
return sprop
}
@@ -406,14 +403,8 @@ func GetProperties(t reflect.Type) *StructProperties {
// getPropertiesLocked requires that propertiesMu is held.
func getPropertiesLocked(t reflect.Type) *StructProperties {
if prop, ok := propertiesMap[t]; ok {
- if collectStats {
- stats.Chit++
- }
return prop
}
- if collectStats {
- stats.Cmiss++
- }
prop := new(StructProperties)
// in case of recursive protos, fill this in now.
diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go
index ba58c49a4..9b1538d05 100644
--- a/vendor/github.com/gogo/protobuf/proto/table_marshal.go
+++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go
@@ -491,7 +491,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
fi.field = toField(f)
- fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+ fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
fi.isPointer = true
fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
index e6b15c76c..bb2622f28 100644
--- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
+++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
@@ -138,7 +138,7 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
u.computeUnmarshalInfo()
}
if u.isMessageSet {
- return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
+ return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
}
var reqMask uint64 // bitmask of required fields we've seen.
var errLater error
@@ -2142,7 +2142,7 @@ func encodeVarint(b []byte, x uint64) []byte {
// If there is an error, it returns 0,0.
func decodeVarint(b []byte) (uint64, int) {
var x, y uint64
- if len(b) <= 0 {
+ if len(b) == 0 {
goto bad
}
x = uint64(b[0])
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto
deleted file mode 100644
index b6cc7cb2e..000000000
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto
+++ /dev/null
@@ -1,154 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option go_package = "types";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "AnyProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-// `Any` contains an arbitrary serialized protocol buffer message along with a
-// URL that describes the type of the serialized message.
-//
-// Protobuf library provides support to pack/unpack Any values in the form
-// of utility functions or additional generated methods of the Any type.
-//
-// Example 1: Pack and unpack a message in C++.
-//
-// Foo foo = ...;
-// Any any;
-// any.PackFrom(foo);
-// ...
-// if (any.UnpackTo(&foo)) {
-// ...
-// }
-//
-// Example 2: Pack and unpack a message in Java.
-//
-// Foo foo = ...;
-// Any any = Any.pack(foo);
-// ...
-// if (any.is(Foo.class)) {
-// foo = any.unpack(Foo.class);
-// }
-//
-// Example 3: Pack and unpack a message in Python.
-//
-// foo = Foo(...)
-// any = Any()
-// any.Pack(foo)
-// ...
-// if any.Is(Foo.DESCRIPTOR):
-// any.Unpack(foo)
-// ...
-//
-// Example 4: Pack and unpack a message in Go
-//
-// foo := &pb.Foo{...}
-// any, err := ptypes.MarshalAny(foo)
-// ...
-// foo := &pb.Foo{}
-// if err := ptypes.UnmarshalAny(any, foo); err != nil {
-// ...
-// }
-//
-// The pack methods provided by protobuf library will by default use
-// 'type.googleapis.com/full.type.name' as the type URL and the unpack
-// methods only use the fully qualified type name after the last '/'
-// in the type URL, for example "foo.bar.com/x/y.z" will yield type
-// name "y.z".
-//
-//
-// JSON
-// ====
-// The JSON representation of an `Any` value uses the regular
-// representation of the deserialized, embedded message, with an
-// additional field `@type` which contains the type URL. Example:
-//
-// package google.profile;
-// message Person {
-// string first_name = 1;
-// string last_name = 2;
-// }
-//
-// {
-// "@type": "type.googleapis.com/google.profile.Person",
-// "firstName": <string>,
-// "lastName": <string>
-// }
-//
-// If the embedded message type is well-known and has a custom JSON
-// representation, that representation will be embedded adding a field
-// `value` which holds the custom JSON in addition to the `@type`
-// field. Example (for message [google.protobuf.Duration][]):
-//
-// {
-// "@type": "type.googleapis.com/google.protobuf.Duration",
-// "value": "1.212s"
-// }
-//
-message Any {
- // A URL/resource name that uniquely identifies the type of the serialized
- // protocol buffer message. The last segment of the URL's path must represent
- // the fully qualified name of the type (as in
- // `path/google.protobuf.Duration`). The name should be in a canonical form
- // (e.g., leading "." is not accepted).
- //
- // In practice, teams usually precompile into the binary all types that they
- // expect it to use in the context of Any. However, for URLs which use the
- // scheme `http`, `https`, or no scheme, one can optionally set up a type
- // server that maps type URLs to message definitions as follows:
- //
- // * If no scheme is provided, `https` is assumed.
- // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
- // value in binary format, or produce an error.
- // * Applications are allowed to cache lookup results based on the
- // URL, or have them precompiled into a binary to avoid any
- // lookup. Therefore, binary compatibility needs to be preserved
- // on changes to types. (Use versioned type names to manage
- // breaking changes.)
- //
- // Note: this functionality is not currently available in the official
- // protobuf release, and it is not used for type URLs beginning with
- // type.googleapis.com.
- //
- // Schemes other than `http`, `https` (or the empty scheme) might be
- // used with implementation specific semantics.
- //
- string type_url = 1;
-
- // Must be a valid serialized protocol buffer of the above specified type.
- bytes value = 2;
-}
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/api.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/api.proto
deleted file mode 100644
index 67c1ddbd9..000000000
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/api.proto
+++ /dev/null
@@ -1,210 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-import "google/protobuf/source_context.proto";
-import "google/protobuf/type.proto";
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "ApiProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-option go_package = "types";
-
-// Api is a light-weight descriptor for an API Interface.
-//
-// Interfaces are also described as "protocol buffer services" in some contexts,
-// such as by the "service" keyword in a .proto file, but they are different
-// from API Services, which represent a concrete implementation of an interface
-// as opposed to simply a description of methods and bindings. They are also
-// sometimes simply referred to as "APIs" in other contexts, such as the name of
-// this message itself. See https://cloud.google.com/apis/design/glossary for
-// detailed terminology.
-message Api {
-
- // The fully qualified name of this interface, including package name
- // followed by the interface's simple name.
- string name = 1;
-
- // The methods of this interface, in unspecified order.
- repeated Method methods = 2;
-
- // Any metadata attached to the interface.
- repeated Option options = 3;
-
- // A version string for this interface. If specified, must have the form
- // `major-version.minor-version`, as in `1.10`. If the minor version is
- // omitted, it defaults to zero. If the entire version field is empty, the
- // major version is derived from the package name, as outlined below. If the
- // field is not empty, the version in the package name will be verified to be
- // consistent with what is provided here.
- //
- // The versioning schema uses [semantic
- // versioning](http://semver.org) where the major version number
- // indicates a breaking change and the minor version an additive,
- // non-breaking change. Both version numbers are signals to users
- // what to expect from different versions, and should be carefully
- // chosen based on the product plan.
- //
- // The major version is also reflected in the package name of the
- // interface, which must end in `v<major-version>`, as in
- // `google.feature.v1`. For major versions 0 and 1, the suffix can
- // be omitted. Zero major versions must only be used for
- // experimental, non-GA interfaces.
- //
- //
- string version = 4;
-
- // Source context for the protocol buffer service represented by this
- // message.
- SourceContext source_context = 5;
-
- // Included interfaces. See [Mixin][].
- repeated Mixin mixins = 6;
-
- // The source syntax of the service.
- Syntax syntax = 7;
-}
-
-// Method represents a method of an API interface.
-message Method {
-
- // The simple name of this method.
- string name = 1;
-
- // A URL of the input message type.
- string request_type_url = 2;
-
- // If true, the request is streamed.
- bool request_streaming = 3;
-
- // The URL of the output message type.
- string response_type_url = 4;
-
- // If true, the response is streamed.
- bool response_streaming = 5;
-
- // Any metadata attached to the method.
- repeated Option options = 6;
-
- // The source syntax of this method.
- Syntax syntax = 7;
-}
-
-// Declares an API Interface to be included in this interface. The including
-// interface must redeclare all the methods from the included interface, but
-// documentation and options are inherited as follows:
-//
-// - If after comment and whitespace stripping, the documentation
-// string of the redeclared method is empty, it will be inherited
-// from the original method.
-//
-// - Each annotation belonging to the service config (http,
-// visibility) which is not set in the redeclared method will be
-// inherited.
-//
-// - If an http annotation is inherited, the path pattern will be
-// modified as follows. Any version prefix will be replaced by the
-// version of the including interface plus the [root][] path if
-// specified.
-//
-// Example of a simple mixin:
-//
-// package google.acl.v1;
-// service AccessControl {
-// // Get the underlying ACL object.
-// rpc GetAcl(GetAclRequest) returns (Acl) {
-// option (google.api.http).get = "/v1/{resource=**}:getAcl";
-// }
-// }
-//
-// package google.storage.v2;
-// service Storage {
-// rpc GetAcl(GetAclRequest) returns (Acl);
-//
-// // Get a data record.
-// rpc GetData(GetDataRequest) returns (Data) {
-// option (google.api.http).get = "/v2/{resource=**}";
-// }
-// }
-//
-// Example of a mixin configuration:
-//
-// apis:
-// - name: google.storage.v2.Storage
-// mixins:
-// - name: google.acl.v1.AccessControl
-//
-// The mixin construct implies that all methods in `AccessControl` are
-// also declared with same name and request/response types in
-// `Storage`. A documentation generator or annotation processor will
-// see the effective `Storage.GetAcl` method after inherting
-// documentation and annotations as follows:
-//
-// service Storage {
-// // Get the underlying ACL object.
-// rpc GetAcl(GetAclRequest) returns (Acl) {
-// option (google.api.http).get = "/v2/{resource=**}:getAcl";
-// }
-// ...
-// }
-//
-// Note how the version in the path pattern changed from `v1` to `v2`.
-//
-// If the `root` field in the mixin is specified, it should be a
-// relative path under which inherited HTTP paths are placed. Example:
-//
-// apis:
-// - name: google.storage.v2.Storage
-// mixins:
-// - name: google.acl.v1.AccessControl
-// root: acls
-//
-// This implies the following inherited HTTP annotation:
-//
-// service Storage {
-// // Get the underlying ACL object.
-// rpc GetAcl(GetAclRequest) returns (Acl) {
-// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl";
-// }
-// ...
-// }
-message Mixin {
- // The fully qualified name of the interface which is included.
- string name = 1;
-
- // If non-empty specifies a path under which inherited HTTP paths
- // are rooted.
- string root = 2;
-}
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto
deleted file mode 100644
index e85c852fc..000000000
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto
+++ /dev/null
@@ -1,167 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Author: kenton@google.com (Kenton Varda)
-//
-// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to
-// change.
-//
-// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is
-// just a program that reads a CodeGeneratorRequest from stdin and writes a
-// CodeGeneratorResponse to stdout.
-//
-// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead
-// of dealing with the raw protocol defined here.
-//
-// A plugin executable needs only to be placed somewhere in the path. The
-// plugin should be named "protoc-gen-$NAME", and will then be used when the
-// flag "--${NAME}_out" is passed to protoc.
-
-syntax = "proto2";
-package google.protobuf.compiler;
-option java_package = "com.google.protobuf.compiler";
-option java_outer_classname = "PluginProtos";
-
-option go_package = "plugin_go";
-
-import "google/protobuf/descriptor.proto";
-
-// The version number of protocol compiler.
-message Version {
- optional int32 major = 1;
- optional int32 minor = 2;
- optional int32 patch = 3;
- // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
- // be empty for mainline stable releases.
- optional string suffix = 4;
-}
-
-// An encoded CodeGeneratorRequest is written to the plugin's stdin.
-message CodeGeneratorRequest {
- // The .proto files that were explicitly listed on the command-line. The
- // code generator should generate code only for these files. Each file's
- // descriptor will be included in proto_file, below.
- repeated string file_to_generate = 1;
-
- // The generator parameter passed on the command-line.
- optional string parameter = 2;
-
- // FileDescriptorProtos for all files in files_to_generate and everything
- // they import. The files will appear in topological order, so each file
- // appears before any file that imports it.
- //
- // protoc guarantees that all proto_files will be written after
- // the fields above, even though this is not technically guaranteed by the
- // protobuf wire format. This theoretically could allow a plugin to stream
- // in the FileDescriptorProtos and handle them one by one rather than read
- // the entire set into memory at once. However, as of this writing, this
- // is not similarly optimized on protoc's end -- it will store all fields in
- // memory at once before sending them to the plugin.
- //
- // Type names of fields and extensions in the FileDescriptorProto are always
- // fully qualified.
- repeated FileDescriptorProto proto_file = 15;
-
- // The version number of protocol compiler.
- optional Version compiler_version = 3;
-
-}
-
-// The plugin writes an encoded CodeGeneratorResponse to stdout.
-message CodeGeneratorResponse {
- // Error message. If non-empty, code generation failed. The plugin process
- // should exit with status code zero even if it reports an error in this way.
- //
- // This should be used to indicate errors in .proto files which prevent the
- // code generator from generating correct code. Errors which indicate a
- // problem in protoc itself -- such as the input CodeGeneratorRequest being
- // unparseable -- should be reported by writing a message to stderr and
- // exiting with a non-zero status code.
- optional string error = 1;
-
- // Represents a single generated file.
- message File {
- // The file name, relative to the output directory. The name must not
- // contain "." or ".." components and must be relative, not be absolute (so,
- // the file cannot lie outside the output directory). "/" must be used as
- // the path separator, not "\".
- //
- // If the name is omitted, the content will be appended to the previous
- // file. This allows the generator to break large files into small chunks,
- // and allows the generated text to be streamed back to protoc so that large
- // files need not reside completely in memory at one time. Note that as of
- // this writing protoc does not optimize for this -- it will read the entire
- // CodeGeneratorResponse before writing files to disk.
- optional string name = 1;
-
- // If non-empty, indicates that the named file should already exist, and the
- // content here is to be inserted into that file at a defined insertion
- // point. This feature allows a code generator to extend the output
- // produced by another code generator. The original generator may provide
- // insertion points by placing special annotations in the file that look
- // like:
- // @@protoc_insertion_point(NAME)
- // The annotation can have arbitrary text before and after it on the line,
- // which allows it to be placed in a comment. NAME should be replaced with
- // an identifier naming the point -- this is what other generators will use
- // as the insertion_point. Code inserted at this point will be placed
- // immediately above the line containing the insertion point (thus multiple
- // insertions to the same point will come out in the order they were added).
- // The double-@ is intended to make it unlikely that the generated code
- // could contain things that look like insertion points by accident.
- //
- // For example, the C++ code generator places the following line in the
- // .pb.h files that it generates:
- // // @@protoc_insertion_point(namespace_scope)
- // This line appears within the scope of the file's package namespace, but
- // outside of any particular class. Another plugin can then specify the
- // insertion_point "namespace_scope" to generate additional classes or
- // other declarations that should be placed in this scope.
- //
- // Note that if the line containing the insertion point begins with
- // whitespace, the same whitespace will be added to every line of the
- // inserted text. This is useful for languages like Python, where
- // indentation matters. In these languages, the insertion point comment
- // should be indented the same amount as any inserted code will need to be
- // in order to work correctly in that context.
- //
- // The code generator that generates the initial file and the one which
- // inserts into it must both run as part of a single invocation of protoc.
- // Code generators are executed in the order in which they appear on the
- // command line.
- //
- // If |insertion_point| is present, |name| must also be present.
- optional string insertion_point = 2;
-
- // The file contents.
- optional string content = 15;
- }
- repeated File file = 15;
-}
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto
deleted file mode 100644
index 1598ad7c1..000000000
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto
+++ /dev/null
@@ -1,872 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Author: kenton@google.com (Kenton Varda)
-// Based on original Protocol Buffers design by
-// Sanjay Ghemawat, Jeff Dean, and others.
-//
-// The messages in this file describe the definitions found in .proto files.
-// A valid .proto file can be translated directly to a FileDescriptorProto
-// without any other information (e.g. without reading its imports).
-
-
-syntax = "proto2";
-
-package google.protobuf;
-option go_package = "descriptor";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "DescriptorProtos";
-option csharp_namespace = "Google.Protobuf.Reflection";
-option objc_class_prefix = "GPB";
-option cc_enable_arenas = true;
-
-// descriptor.proto must be optimized for speed because reflection-based
-// algorithms don't work during bootstrapping.
-option optimize_for = SPEED;
-
-// The protocol compiler can output a FileDescriptorSet containing the .proto
-// files it parses.
-message FileDescriptorSet {
- repeated FileDescriptorProto file = 1;
-}
-
-// Describes a complete .proto file.
-message FileDescriptorProto {
- optional string name = 1; // file name, relative to root of source tree
- optional string package = 2; // e.g. "foo", "foo.bar", etc.
-
- // Names of files imported by this file.
- repeated string dependency = 3;
- // Indexes of the public imported files in the dependency list above.
- repeated int32 public_dependency = 10;
- // Indexes of the weak imported files in the dependency list.
- // For Google-internal migration only. Do not use.
- repeated int32 weak_dependency = 11;
-
- // All top-level definitions in this file.
- repeated DescriptorProto message_type = 4;
- repeated EnumDescriptorProto enum_type = 5;
- repeated ServiceDescriptorProto service = 6;
- repeated FieldDescriptorProto extension = 7;
-
- optional FileOptions options = 8;
-
- // This field contains optional information about the original source code.
- // You may safely remove this entire field without harming runtime
- // functionality of the descriptors -- the information is needed only by
- // development tools.
- optional SourceCodeInfo source_code_info = 9;
-
- // The syntax of the proto file.
- // The supported values are "proto2" and "proto3".
- optional string syntax = 12;
-}
-
-// Describes a message type.
-message DescriptorProto {
- optional string name = 1;
-
- repeated FieldDescriptorProto field = 2;
- repeated FieldDescriptorProto extension = 6;
-
- repeated DescriptorProto nested_type = 3;
- repeated EnumDescriptorProto enum_type = 4;
-
- message ExtensionRange {
- optional int32 start = 1;
- optional int32 end = 2;
-
- optional ExtensionRangeOptions options = 3;
- }
- repeated ExtensionRange extension_range = 5;
-
- repeated OneofDescriptorProto oneof_decl = 8;
-
- optional MessageOptions options = 7;
-
- // Range of reserved tag numbers. Reserved tag numbers may not be used by
- // fields or extension ranges in the same message. Reserved ranges may
- // not overlap.
- message ReservedRange {
- optional int32 start = 1; // Inclusive.
- optional int32 end = 2; // Exclusive.
- }
- repeated ReservedRange reserved_range = 9;
- // Reserved field names, which may not be used by fields in the same message.
- // A given name may only be reserved once.
- repeated string reserved_name = 10;
-}
-
-message ExtensionRangeOptions {
- // The parser stores options it doesn't recognize here. See above.
- repeated UninterpretedOption uninterpreted_option = 999;
-
- // Clients can define custom options in extensions of this message. See above.
- extensions 1000 to max;
-}
-
-// Describes a field within a message.
-message FieldDescriptorProto {
- enum Type {
- // 0 is reserved for errors.
- // Order is weird for historical reasons.
- TYPE_DOUBLE = 1;
- TYPE_FLOAT = 2;
- // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
- // negative values are likely.
- TYPE_INT64 = 3;
- TYPE_UINT64 = 4;
- // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
- // negative values are likely.
- TYPE_INT32 = 5;
- TYPE_FIXED64 = 6;
- TYPE_FIXED32 = 7;
- TYPE_BOOL = 8;
- TYPE_STRING = 9;
- // Tag-delimited aggregate.
- // Group type is deprecated and not supported in proto3. However, Proto3
- // implementations should still be able to parse the group wire format and
- // treat group fields as unknown fields.
- TYPE_GROUP = 10;
- TYPE_MESSAGE = 11; // Length-delimited aggregate.
-
- // New in version 2.
- TYPE_BYTES = 12;
- TYPE_UINT32 = 13;
- TYPE_ENUM = 14;
- TYPE_SFIXED32 = 15;
- TYPE_SFIXED64 = 16;
- TYPE_SINT32 = 17; // Uses ZigZag encoding.
- TYPE_SINT64 = 18; // Uses ZigZag encoding.
- };
-
- enum Label {
- // 0 is reserved for errors
- LABEL_OPTIONAL = 1;
- LABEL_REQUIRED = 2;
- LABEL_REPEATED = 3;
- };
-
- optional string name = 1;
- optional int32 number = 3;
- optional Label label = 4;
-
- // If type_name is set, this need not be set. If both this and type_name
- // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
- optional Type type = 5;
-
- // For message and enum types, this is the name of the type. If the name
- // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
- // rules are used to find the type (i.e. first the nested types within this
- // message are searched, then within the parent, on up to the root
- // namespace).
- optional string type_name = 6;
-
- // For extensions, this is the name of the type being extended. It is
- // resolved in the same manner as type_name.
- optional string extendee = 2;
-
- // For numeric types, contains the original text representation of the value.
- // For booleans, "true" or "false".
- // For strings, contains the default text contents (not escaped in any way).
- // For bytes, contains the C escaped value. All bytes >= 128 are escaped.
- // TODO(kenton): Base-64 encode?
- optional string default_value = 7;
-
- // If set, gives the index of a oneof in the containing type's oneof_decl
- // list. This field is a member of that oneof.
- optional int32 oneof_index = 9;
-
- // JSON name of this field. The value is set by protocol compiler. If the
- // user has set a "json_name" option on this field, that option's value
- // will be used. Otherwise, it's deduced from the field's name by converting
- // it to camelCase.
- optional string json_name = 10;
-
- optional FieldOptions options = 8;
-}
-
-// Describes a oneof.
-message OneofDescriptorProto {
- optional string name = 1;
- optional OneofOptions options = 2;
-}
-
-// Describes an enum type.
-message EnumDescriptorProto {
- optional string name = 1;
-
- repeated EnumValueDescriptorProto value = 2;
-
- optional EnumOptions options = 3;
-
- // Range of reserved numeric values. Reserved values may not be used by
- // entries in the same enum. Reserved ranges may not overlap.
- //
- // Note that this is distinct from DescriptorProto.ReservedRange in that it
- // is inclusive such that it can appropriately represent the entire int32
- // domain.
- message EnumReservedRange {
- optional int32 start = 1; // Inclusive.
- optional int32 end = 2; // Inclusive.
- }
-
- // Range of reserved numeric values. Reserved numeric values may not be used
- // by enum values in the same enum declaration. Reserved ranges may not
- // overlap.
- repeated EnumReservedRange reserved_range = 4;
-
- // Reserved enum value names, which may not be reused. A given name may only
- // be reserved once.
- repeated string reserved_name = 5;
-}
-
-// Describes a value within an enum.
-message EnumValueDescriptorProto {
- optional string name = 1;
- optional int32 number = 2;
-
- optional EnumValueOptions options = 3;
-}
-
-// Describes a service.
-message ServiceDescriptorProto {
- optional string name = 1;
- repeated MethodDescriptorProto method = 2;
-
- optional ServiceOptions options = 3;
-}
-
-// Describes a method of a service.
-message MethodDescriptorProto {
- optional string name = 1;
-
- // Input and output type names. These are resolved in the same way as
- // FieldDescriptorProto.type_name, but must refer to a message type.
- optional string input_type = 2;
- optional string output_type = 3;
-
- optional MethodOptions options = 4;
-
- // Identifies if client streams multiple client messages
- optional bool client_streaming = 5 [default=false];
- // Identifies if server streams multiple server messages
- optional bool server_streaming = 6 [default=false];
-}
-
-
-// ===================================================================
-// Options
-
-// Each of the definitions above may have "options" attached. These are
-// just annotations which may cause code to be generated slightly differently
-// or may contain hints for code that manipulates protocol messages.
-//
-// Clients may define custom options as extensions of the *Options messages.
-// These extensions may not yet be known at parsing time, so the parser cannot
-// store the values in them. Instead it stores them in a field in the *Options
-// message called uninterpreted_option. This field must have the same name
-// across all *Options messages. We then use this field to populate the
-// extensions when we build a descriptor, at which point all protos have been
-// parsed and so all extensions are known.
-//
-// Extension numbers for custom options may be chosen as follows:
-// * For options which will only be used within a single application or
-// organization, or for experimental options, use field numbers 50000
-// through 99999. It is up to you to ensure that you do not use the
-// same number for multiple options.
-// * For options which will be published and used publicly by multiple
-// independent entities, e-mail protobuf-global-extension-registry@google.com
-// to reserve extension numbers. Simply provide your project name (e.g.
-// Objective-C plugin) and your project website (if available) -- there's no
-// need to explain how you intend to use them. Usually you only need one
-// extension number. You can declare multiple options with only one extension
-// number by putting them in a sub-message. See the Custom Options section of
-// the docs for examples:
-// https://developers.google.com/protocol-buffers/docs/proto#options
-// If this turns out to be popular, a web service will be set up
-// to automatically assign option numbers.
-
-
-message FileOptions {
-
- // Sets the Java package where classes generated from this .proto will be
- // placed. By default, the proto package is used, but this is often
- // inappropriate because proto packages do not normally start with backwards
- // domain names.
- optional string java_package = 1;
-
-
- // If set, all the classes from the .proto file are wrapped in a single
- // outer class with the given name. This applies to both Proto1
- // (equivalent to the old "--one_java_file" option) and Proto2 (where
- // a .proto always translates to a single class, but you may want to
- // explicitly choose the class name).
- optional string java_outer_classname = 8;
-
- // If set true, then the Java code generator will generate a separate .java
- // file for each top-level message, enum, and service defined in the .proto
- // file. Thus, these types will *not* be nested inside the outer class
- // named by java_outer_classname. However, the outer class will still be
- // generated to contain the file's getDescriptor() method as well as any
- // top-level extensions defined in the file.
- optional bool java_multiple_files = 10 [default=false];
-
- // This option does nothing.
- optional bool java_generate_equals_and_hash = 20 [deprecated=true];
-
- // If set true, then the Java2 code generator will generate code that
- // throws an exception whenever an attempt is made to assign a non-UTF-8
- // byte sequence to a string field.
- // Message reflection will do the same.
- // However, an extension field still accepts non-UTF-8 byte sequences.
- // This option has no effect on when used with the lite runtime.
- optional bool java_string_check_utf8 = 27 [default=false];
-
-
- // Generated classes can be optimized for speed or code size.
- enum OptimizeMode {
- SPEED = 1; // Generate complete code for parsing, serialization,
- // etc.
- CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
- LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
- }
- optional OptimizeMode optimize_for = 9 [default=SPEED];
-
- // Sets the Go package where structs generated from this .proto will be
- // placed. If omitted, the Go package will be derived from the following:
- // - The basename of the package import path, if provided.
- // - Otherwise, the package statement in the .proto file, if present.
- // - Otherwise, the basename of the .proto file, without extension.
- optional string go_package = 11;
-
-
-
- // Should generic services be generated in each language? "Generic" services
- // are not specific to any particular RPC system. They are generated by the
- // main code generators in each language (without additional plugins).
- // Generic services were the only kind of service generation supported by
- // early versions of google.protobuf.
- //
- // Generic services are now considered deprecated in favor of using plugins
- // that generate code specific to your particular RPC system. Therefore,
- // these default to false. Old code which depends on generic services should
- // explicitly set them to true.
- optional bool cc_generic_services = 16 [default=false];
- optional bool java_generic_services = 17 [default=false];
- optional bool py_generic_services = 18 [default=false];
- optional bool php_generic_services = 42 [default=false];
-
- // Is this file deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for everything in the file, or it will be completely ignored; in the very
- // least, this is a formalization for deprecating files.
- optional bool deprecated = 23 [default=false];
-
- // Enables the use of arenas for the proto messages in this file. This applies
- // only to generated classes for C++.
- optional bool cc_enable_arenas = 31 [default=false];
-
-
- // Sets the objective c class prefix which is prepended to all objective c
- // generated classes from this .proto. There is no default.
- optional string objc_class_prefix = 36;
-
- // Namespace for generated classes; defaults to the package.
- optional string csharp_namespace = 37;
-
- // By default Swift generators will take the proto package and CamelCase it
- // replacing '.' with underscore and use that to prefix the types/symbols
- // defined. When this options is provided, they will use this value instead
- // to prefix the types/symbols defined.
- optional string swift_prefix = 39;
-
- // Sets the php class prefix which is prepended to all php generated classes
- // from this .proto. Default is empty.
- optional string php_class_prefix = 40;
-
- // Use this option to change the namespace of php generated classes. Default
- // is empty. When this option is empty, the package name will be used for
- // determining the namespace.
- optional string php_namespace = 41;
-
- // The parser stores options it doesn't recognize here.
- // See the documentation for the "Options" section above.
- repeated UninterpretedOption uninterpreted_option = 999;
-
- // Clients can define custom options in extensions of this message.
- // See the documentation for the "Options" section above.
- extensions 1000 to max;
-
- //reserved 38;
-}
-
-message MessageOptions {
- // Set true to use the old proto1 MessageSet wire format for extensions.
- // This is provided for backwards-compatibility with the MessageSet wire
- // format. You should not use this for any other reason: It's less
- // efficient, has fewer features, and is more complicated.
- //
- // The message must be defined exactly as follows:
- // message Foo {
- // option message_set_wire_format = true;
- // extensions 4 to max;
- // }
- // Note that the message cannot have any defined fields; MessageSets only
- // have extensions.
- //
- // All extensions of your type must be singular messages; e.g. they cannot
- // be int32s, enums, or repeated messages.
- //
- // Because this is an option, the above two restrictions are not enforced by
- // the protocol compiler.
- optional bool message_set_wire_format = 1 [default=false];
-
- // Disables the generation of the standard "descriptor()" accessor, which can
- // conflict with a field of the same name. This is meant to make migration
- // from proto1 easier; new code should avoid fields named "descriptor".
- optional bool no_standard_descriptor_accessor = 2 [default=false];
-
- // Is this message deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for the message, or it will be completely ignored; in the very least,
- // this is a formalization for deprecating messages.
- optional bool deprecated = 3 [default=false];
-
- // Whether the message is an automatically generated map entry type for the
- // maps field.
- //
- // For maps fields:
- // map<KeyType, ValueType> map_field = 1;
- // The parsed descriptor looks like:
- // message MapFieldEntry {
- // option map_entry = true;
- // optional KeyType key = 1;
- // optional ValueType value = 2;
- // }
- // repeated MapFieldEntry map_field = 1;
- //
- // Implementations may choose not to generate the map_entry=true message, but
- // use a native map in the target language to hold the keys and values.
- // The reflection APIs in such implementions still need to work as
- // if the field is a repeated message field.
- //
- // NOTE: Do not set the option in .proto files. Always use the maps syntax
- // instead. The option should only be implicitly set by the proto compiler
- // parser.
- optional bool map_entry = 7;
-
- //reserved 8; // javalite_serializable
- //reserved 9; // javanano_as_lite
-
- // The parser stores options it doesn't recognize here. See above.
- repeated UninterpretedOption uninterpreted_option = 999;
-
- // Clients can define custom options in extensions of this message. See above.
- extensions 1000 to max;
-}
-
-message FieldOptions {
- // The ctype option instructs the C++ code generator to use a different
- // representation of the field than it normally would. See the specific
- // options below. This option is not yet implemented in the open source
- // release -- sorry, we'll try to include it in a future version!
- optional CType ctype = 1 [default = STRING];
- enum CType {
- // Default mode.
- STRING = 0;
-
- CORD = 1;
-
- STRING_PIECE = 2;
- }
- // The packed option can be enabled for repeated primitive fields to enable
- // a more efficient representation on the wire. Rather than repeatedly
- // writing the tag and type for each element, the entire array is encoded as
- // a single length-delimited blob. In proto3, only explicit setting it to
- // false will avoid using packed encoding.
- optional bool packed = 2;
-
- // The jstype option determines the JavaScript type used for values of the
- // field. The option is permitted only for 64 bit integral and fixed types
- // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
- // is represented as JavaScript string, which avoids loss of precision that
- // can happen when a large value is converted to a floating point JavaScript.
- // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
- // use the JavaScript "number" type. The behavior of the default option
- // JS_NORMAL is implementation dependent.
- //
- // This option is an enum to permit additional types to be added, e.g.
- // goog.math.Integer.
- optional JSType jstype = 6 [default = JS_NORMAL];
- enum JSType {
- // Use the default type.
- JS_NORMAL = 0;
-
- // Use JavaScript strings.
- JS_STRING = 1;
-
- // Use JavaScript numbers.
- JS_NUMBER = 2;
- }
-
- // Should this field be parsed lazily? Lazy applies only to message-type
- // fields. It means that when the outer message is initially parsed, the
- // inner message's contents will not be parsed but instead stored in encoded
- // form. The inner message will actually be parsed when it is first accessed.
- //
- // This is only a hint. Implementations are free to choose whether to use
- // eager or lazy parsing regardless of the value of this option. However,
- // setting this option true suggests that the protocol author believes that
- // using lazy parsing on this field is worth the additional bookkeeping
- // overhead typically needed to implement it.
- //
- // This option does not affect the public interface of any generated code;
- // all method signatures remain the same. Furthermore, thread-safety of the
- // interface is not affected by this option; const methods remain safe to
- // call from multiple threads concurrently, while non-const methods continue
- // to require exclusive access.
- //
- //
- // Note that implementations may choose not to check required fields within
- // a lazy sub-message. That is, calling IsInitialized() on the outer message
- // may return true even if the inner message has missing required fields.
- // This is necessary because otherwise the inner message would have to be
- // parsed in order to perform the check, defeating the purpose of lazy
- // parsing. An implementation which chooses not to check required fields
- // must be consistent about it. That is, for any particular sub-message, the
- // implementation must either *always* check its required fields, or *never*
- // check its required fields, regardless of whether or not the message has
- // been parsed.
- optional bool lazy = 5 [default=false];
-
- // Is this field deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for accessors, or it will be completely ignored; in the very least, this
- // is a formalization for deprecating fields.
- optional bool deprecated = 3 [default=false];
-
- // For Google-internal migration only. Do not use.
- optional bool weak = 10 [default=false];
-
-
- // The parser stores options it doesn't recognize here. See above.
- repeated UninterpretedOption uninterpreted_option = 999;
-
- // Clients can define custom options in extensions of this message. See above.
- extensions 1000 to max;
-
- //reserved 4; // removed jtype
-}
-
-message OneofOptions {
- // The parser stores options it doesn't recognize here. See above.
- repeated UninterpretedOption uninterpreted_option = 999;
-
- // Clients can define custom options in extensions of this message. See above.
- extensions 1000 to max;
-}
-
-message EnumOptions {
-
- // Set this option to true to allow mapping different tag names to the same
- // value.
- optional bool allow_alias = 2;
-
- // Is this enum deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for the enum, or it will be completely ignored; in the very least, this
- // is a formalization for deprecating enums.
- optional bool deprecated = 3 [default=false];
-
- //reserved 5; // javanano_as_lite
-
- // The parser stores options it doesn't recognize here. See above.
- repeated UninterpretedOption uninterpreted_option = 999;
-
- // Clients can define custom options in extensions of this message. See above.
- extensions 1000 to max;
-}
-
-message EnumValueOptions {
- // Is this enum value deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for the enum value, or it will be completely ignored; in the very least,
- // this is a formalization for deprecating enum values.
- optional bool deprecated = 1 [default=false];
-
- // The parser stores options it doesn't recognize here. See above.
- repeated UninterpretedOption uninterpreted_option = 999;
-
- // Clients can define custom options in extensions of this message. See above.
- extensions 1000 to max;
-}
-
-message ServiceOptions {
-
- // Note: Field numbers 1 through 32 are reserved for Google's internal RPC
- // framework. We apologize for hoarding these numbers to ourselves, but
- // we were already using them long before we decided to release Protocol
- // Buffers.
-
- // Is this service deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for the service, or it will be completely ignored; in the very least,
- // this is a formalization for deprecating services.
- optional bool deprecated = 33 [default=false];
-
- // The parser stores options it doesn't recognize here. See above.
- repeated UninterpretedOption uninterpreted_option = 999;
-
- // Clients can define custom options in extensions of this message. See above.
- extensions 1000 to max;
-}
-
-message MethodOptions {
-
- // Note: Field numbers 1 through 32 are reserved for Google's internal RPC
- // framework. We apologize for hoarding these numbers to ourselves, but
- // we were already using them long before we decided to release Protocol
- // Buffers.
-
- // Is this method deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for the method, or it will be completely ignored; in the very least,
- // this is a formalization for deprecating methods.
- optional bool deprecated = 33 [default=false];
-
- // Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
- // or neither? HTTP based RPC implementation may choose GET verb for safe
- // methods, and PUT verb for idempotent methods instead of the default POST.
- enum IdempotencyLevel {
- IDEMPOTENCY_UNKNOWN = 0;
- NO_SIDE_EFFECTS = 1; // implies idempotent
- IDEMPOTENT = 2; // idempotent, but may have side effects
- }
- optional IdempotencyLevel idempotency_level =
- 34 [default=IDEMPOTENCY_UNKNOWN];
-
- // The parser stores options it doesn't recognize here. See above.
- repeated UninterpretedOption uninterpreted_option = 999;
-
- // Clients can define custom options in extensions of this message. See above.
- extensions 1000 to max;
-}
-
-
-// A message representing a option the parser does not recognize. This only
-// appears in options protos created by the compiler::Parser class.
-// DescriptorPool resolves these when building Descriptor objects. Therefore,
-// options protos in descriptor objects (e.g. returned by Descriptor::options(),
-// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
-// in them.
-message UninterpretedOption {
- // The name of the uninterpreted option. Each string represents a segment in
- // a dot-separated name. is_extension is true iff a segment represents an
- // extension (denoted with parentheses in options specs in .proto files).
- // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
- // "foo.(bar.baz).qux".
- message NamePart {
- required string name_part = 1;
- required bool is_extension = 2;
- }
- repeated NamePart name = 2;
-
- // The value of the uninterpreted option, in whatever type the tokenizer
- // identified it as during parsing. Exactly one of these should be set.
- optional string identifier_value = 3;
- optional uint64 positive_int_value = 4;
- optional int64 negative_int_value = 5;
- optional double double_value = 6;
- optional bytes string_value = 7;
- optional string aggregate_value = 8;
-}
-
-// ===================================================================
-// Optional source code info
-
-// Encapsulates information about the original source file from which a
-// FileDescriptorProto was generated.
-message SourceCodeInfo {
- // A Location identifies a piece of source code in a .proto file which
- // corresponds to a particular definition. This information is intended
- // to be useful to IDEs, code indexers, documentation generators, and similar
- // tools.
- //
- // For example, say we have a file like:
- // message Foo {
- // optional string foo = 1;
- // }
- // Let's look at just the field definition:
- // optional string foo = 1;
- // ^ ^^ ^^ ^ ^^^
- // a bc de f ghi
- // We have the following locations:
- // span path represents
- // [a,i) [ 4, 0, 2, 0 ] The whole field definition.
- // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
- // [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
- // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
- // [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
- //
- // Notes:
- // - A location may refer to a repeated field itself (i.e. not to any
- // particular index within it). This is used whenever a set of elements are
- // logically enclosed in a single code segment. For example, an entire
- // extend block (possibly containing multiple extension definitions) will
- // have an outer location whose path refers to the "extensions" repeated
- // field without an index.
- // - Multiple locations may have the same path. This happens when a single
- // logical declaration is spread out across multiple places. The most
- // obvious example is the "extend" block again -- there may be multiple
- // extend blocks in the same scope, each of which will have the same path.
- // - A location's span is not always a subset of its parent's span. For
- // example, the "extendee" of an extension declaration appears at the
- // beginning of the "extend" block and is shared by all extensions within
- // the block.
- // - Just because a location's span is a subset of some other location's span
- // does not mean that it is a descendent. For example, a "group" defines
- // both a type and a field in a single declaration. Thus, the locations
- // corresponding to the type and field and their components will overlap.
- // - Code which tries to interpret locations should probably be designed to
- // ignore those that it doesn't understand, as more types of locations could
- // be recorded in the future.
- repeated Location location = 1;
- message Location {
- // Identifies which part of the FileDescriptorProto was defined at this
- // location.
- //
- // Each element is a field number or an index. They form a path from
- // the root FileDescriptorProto to the place where the definition. For
- // example, this path:
- // [ 4, 3, 2, 7, 1 ]
- // refers to:
- // file.message_type(3) // 4, 3
- // .field(7) // 2, 7
- // .name() // 1
- // This is because FileDescriptorProto.message_type has field number 4:
- // repeated DescriptorProto message_type = 4;
- // and DescriptorProto.field has field number 2:
- // repeated FieldDescriptorProto field = 2;
- // and FieldDescriptorProto.name has field number 1:
- // optional string name = 1;
- //
- // Thus, the above path gives the location of a field name. If we removed
- // the last element:
- // [ 4, 3, 2, 7 ]
- // this path refers to the whole field declaration (from the beginning
- // of the label to the terminating semicolon).
- repeated int32 path = 1 [packed=true];
-
- // Always has exactly three or four elements: start line, start column,
- // end line (optional, otherwise assumed same as start line), end column.
- // These are packed into a single field for efficiency. Note that line
- // and column numbers are zero-based -- typically you will want to add
- // 1 to each before displaying to a user.
- repeated int32 span = 2 [packed=true];
-
- // If this SourceCodeInfo represents a complete declaration, these are any
- // comments appearing before and after the declaration which appear to be
- // attached to the declaration.
- //
- // A series of line comments appearing on consecutive lines, with no other
- // tokens appearing on those lines, will be treated as a single comment.
- //
- // leading_detached_comments will keep paragraphs of comments that appear
- // before (but not connected to) the current element. Each paragraph,
- // separated by empty lines, will be one comment element in the repeated
- // field.
- //
- // Only the comment content is provided; comment markers (e.g. //) are
- // stripped out. For block comments, leading whitespace and an asterisk
- // will be stripped from the beginning of each line other than the first.
- // Newlines are included in the output.
- //
- // Examples:
- //
- // optional int32 foo = 1; // Comment attached to foo.
- // // Comment attached to bar.
- // optional int32 bar = 2;
- //
- // optional string baz = 3;
- // // Comment attached to baz.
- // // Another line attached to baz.
- //
- // // Comment attached to qux.
- // //
- // // Another line attached to qux.
- // optional double qux = 4;
- //
- // // Detached comment for corge. This is not leading or trailing comments
- // // to qux or corge because there are blank lines separating it from
- // // both.
- //
- // // Detached comment for corge paragraph 2.
- //
- // optional string corge = 5;
- // /* Block comment attached
- // * to corge. Leading asterisks
- // * will be removed. */
- // /* Block comment attached to
- // * grault. */
- // optional int32 grault = 6;
- //
- // // ignored detached comments.
- optional string leading_comments = 3;
- optional string trailing_comments = 4;
- repeated string leading_detached_comments = 6;
- }
-}
-
-// Describes the relationship between generated code and its original source
-// file. A GeneratedCodeInfo message is associated with only one generated
-// source file, but may contain references to different source .proto files.
-message GeneratedCodeInfo {
- // An Annotation connects some span of text in generated code to an element
- // of its generating .proto file.
- repeated Annotation annotation = 1;
- message Annotation {
- // Identifies the element in the original source .proto file. This field
- // is formatted the same as SourceCodeInfo.Location.path.
- repeated int32 path = 1 [packed=true];
-
- // Identifies the filesystem path to the original source .proto.
- optional string source_file = 2;
-
- // Identifies the starting offset in bytes in the generated code
- // that relates to the identified object.
- optional int32 begin = 3;
-
- // Identifies the ending offset in bytes in the generated code that
- // relates to the identified offset. The end offset should be one past
- // the last relevant byte (so the length of the text = end - begin).
- optional int32 end = 4;
- }
-}
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto
deleted file mode 100644
index 8bbaa8b62..000000000
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto
+++ /dev/null
@@ -1,117 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option cc_enable_arenas = true;
-option go_package = "types";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "DurationProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-// A Duration represents a signed, fixed-length span of time represented
-// as a count of seconds and fractions of seconds at nanosecond
-// resolution. It is independent of any calendar and concepts like "day"
-// or "month". It is related to Timestamp in that the difference between
-// two Timestamp values is a Duration and it can be added or subtracted
-// from a Timestamp. Range is approximately +-10,000 years.
-//
-// # Examples
-//
-// Example 1: Compute Duration from two Timestamps in pseudo code.
-//
-// Timestamp start = ...;
-// Timestamp end = ...;
-// Duration duration = ...;
-//
-// duration.seconds = end.seconds - start.seconds;
-// duration.nanos = end.nanos - start.nanos;
-//
-// if (duration.seconds < 0 && duration.nanos > 0) {
-// duration.seconds += 1;
-// duration.nanos -= 1000000000;
-// } else if (durations.seconds > 0 && duration.nanos < 0) {
-// duration.seconds -= 1;
-// duration.nanos += 1000000000;
-// }
-//
-// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
-//
-// Timestamp start = ...;
-// Duration duration = ...;
-// Timestamp end = ...;
-//
-// end.seconds = start.seconds + duration.seconds;
-// end.nanos = start.nanos + duration.nanos;
-//
-// if (end.nanos < 0) {
-// end.seconds -= 1;
-// end.nanos += 1000000000;
-// } else if (end.nanos >= 1000000000) {
-// end.seconds += 1;
-// end.nanos -= 1000000000;
-// }
-//
-// Example 3: Compute Duration from datetime.timedelta in Python.
-//
-// td = datetime.timedelta(days=3, minutes=10)
-// duration = Duration()
-// duration.FromTimedelta(td)
-//
-// # JSON Mapping
-//
-// In JSON format, the Duration type is encoded as a string rather than an
-// object, where the string ends in the suffix "s" (indicating seconds) and
-// is preceded by the number of seconds, with nanoseconds expressed as
-// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
-// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
-// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
-// microsecond should be expressed in JSON format as "3.000001s".
-//
-//
-message Duration {
-
- // Signed seconds of the span of time. Must be from -315,576,000,000
- // to +315,576,000,000 inclusive. Note: these bounds are computed from:
- // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
- int64 seconds = 1;
-
- // Signed fractions of a second at nanosecond resolution of the span
- // of time. Durations less than one second are represented with a 0
- // `seconds` field and a positive or negative `nanos` field. For durations
- // of one second or more, a non-zero value for the `nanos` field must be
- // of the same sign as the `seconds` field. Must be from -999,999,999
- // to +999,999,999 inclusive.
- int32 nanos = 2;
-}
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto
deleted file mode 100644
index 121619814..000000000
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto
+++ /dev/null
@@ -1,252 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "FieldMaskProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-option go_package = "types";
-
-// `FieldMask` represents a set of symbolic field paths, for example:
-//
-// paths: "f.a"
-// paths: "f.b.d"
-//
-// Here `f` represents a field in some root message, `a` and `b`
-// fields in the message found in `f`, and `d` a field found in the
-// message in `f.b`.
-//
-// Field masks are used to specify a subset of fields that should be
-// returned by a get operation or modified by an update operation.
-// Field masks also have a custom JSON encoding (see below).
-//
-// # Field Masks in Projections
-//
-// When used in the context of a projection, a response message or
-// sub-message is filtered by the API to only contain those fields as
-// specified in the mask. For example, if the mask in the previous
-// example is applied to a response message as follows:
-//
-// f {
-// a : 22
-// b {
-// d : 1
-// x : 2
-// }
-// y : 13
-// }
-// z: 8
-//
-// The result will not contain specific values for fields x,y and z
-// (their value will be set to the default, and omitted in proto text
-// output):
-//
-//
-// f {
-// a : 22
-// b {
-// d : 1
-// }
-// }
-//
-// A repeated field is not allowed except at the last position of a
-// paths string.
-//
-// If a FieldMask object is not present in a get operation, the
-// operation applies to all fields (as if a FieldMask of all fields
-// had been specified).
-//
-// Note that a field mask does not necessarily apply to the
-// top-level response message. In case of a REST get operation, the
-// field mask applies directly to the response, but in case of a REST
-// list operation, the mask instead applies to each individual message
-// in the returned resource list. In case of a REST custom method,
-// other definitions may be used. Where the mask applies will be
-// clearly documented together with its declaration in the API. In
-// any case, the effect on the returned resource/resources is required
-// behavior for APIs.
-//
-// # Field Masks in Update Operations
-//
-// A field mask in update operations specifies which fields of the
-// targeted resource are going to be updated. The API is required
-// to only change the values of the fields as specified in the mask
-// and leave the others untouched. If a resource is passed in to
-// describe the updated values, the API ignores the values of all
-// fields not covered by the mask.
-//
-// If a repeated field is specified for an update operation, the existing
-// repeated values in the target resource will be overwritten by the new values.
-// Note that a repeated field is only allowed in the last position of a `paths`
-// string.
-//
-// If a sub-message is specified in the last position of the field mask for an
-// update operation, then the existing sub-message in the target resource is
-// overwritten. Given the target message:
-//
-// f {
-// b {
-// d : 1
-// x : 2
-// }
-// c : 1
-// }
-//
-// And an update message:
-//
-// f {
-// b {
-// d : 10
-// }
-// }
-//
-// then if the field mask is:
-//
-// paths: "f.b"
-//
-// then the result will be:
-//
-// f {
-// b {
-// d : 10
-// }
-// c : 1
-// }
-//
-// However, if the update mask was:
-//
-// paths: "f.b.d"
-//
-// then the result would be:
-//
-// f {
-// b {
-// d : 10
-// x : 2
-// }
-// c : 1
-// }
-//
-// In order to reset a field's value to the default, the field must
-// be in the mask and set to the default value in the provided resource.
-// Hence, in order to reset all fields of a resource, provide a default
-// instance of the resource and set all fields in the mask, or do
-// not provide a mask as described below.
-//
-// If a field mask is not present on update, the operation applies to
-// all fields (as if a field mask of all fields has been specified).
-// Note that in the presence of schema evolution, this may mean that
-// fields the client does not know and has therefore not filled into
-// the request will be reset to their default. If this is unwanted
-// behavior, a specific service may require a client to always specify
-// a field mask, producing an error if not.
-//
-// As with get operations, the location of the resource which
-// describes the updated values in the request message depends on the
-// operation kind. In any case, the effect of the field mask is
-// required to be honored by the API.
-//
-// ## Considerations for HTTP REST
-//
-// The HTTP kind of an update operation which uses a field mask must
-// be set to PATCH instead of PUT in order to satisfy HTTP semantics
-// (PUT must only be used for full updates).
-//
-// # JSON Encoding of Field Masks
-//
-// In JSON, a field mask is encoded as a single string where paths are
-// separated by a comma. Fields name in each path are converted
-// to/from lower-camel naming conventions.
-//
-// As an example, consider the following message declarations:
-//
-// message Profile {
-// User user = 1;
-// Photo photo = 2;
-// }
-// message User {
-// string display_name = 1;
-// string address = 2;
-// }
-//
-// In proto a field mask for `Profile` may look as such:
-//
-// mask {
-// paths: "user.display_name"
-// paths: "photo"
-// }
-//
-// In JSON, the same mask is represented as below:
-//
-// {
-// mask: "user.displayName,photo"
-// }
-//
-// # Field Masks and Oneof Fields
-//
-// Field masks treat fields in oneofs just as regular fields. Consider the
-// following message:
-//
-// message SampleMessage {
-// oneof test_oneof {
-// string name = 4;
-// SubMessage sub_message = 9;
-// }
-// }
-//
-// The field mask can be:
-//
-// mask {
-// paths: "name"
-// }
-//
-// Or:
-//
-// mask {
-// paths: "sub_message"
-// }
-//
-// Note that oneof type names ("test_oneof" in this case) cannot be used in
-// paths.
-//
-// ## Field Mask Verification
-//
-// The implementation of the all the API methods, which have any FieldMask type
-// field in the request, should verify the included field paths, and return
-// `INVALID_ARGUMENT` error if any path is duplicated or unmappable.
-message FieldMask {
- // The set of field mask paths.
- repeated string paths = 1;
-}
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto
deleted file mode 100644
index 4f78641fa..000000000
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto
+++ /dev/null
@@ -1,96 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option cc_enable_arenas = true;
-option go_package = "types";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "StructProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-
-// `Struct` represents a structured data value, consisting of fields
-// which map to dynamically typed values. In some languages, `Struct`
-// might be supported by a native representation. For example, in
-// scripting languages like JS a struct is represented as an
-// object. The details of that representation are described together
-// with the proto support for the language.
-//
-// The JSON representation for `Struct` is JSON object.
-message Struct {
- // Unordered map of dynamically typed values.
- map<string, Value> fields = 1;
-}
-
-// `Value` represents a dynamically typed value which can be either
-// null, a number, a string, a boolean, a recursive struct value, or a
-// list of values. A producer of value is expected to set one of that
-// variants, absence of any variant indicates an error.
-//
-// The JSON representation for `Value` is JSON value.
-message Value {
- // The kind of value.
- oneof kind {
- // Represents a null value.
- NullValue null_value = 1;
- // Represents a double value.
- double number_value = 2;
- // Represents a string value.
- string string_value = 3;
- // Represents a boolean value.
- bool bool_value = 4;
- // Represents a structured value.
- Struct struct_value = 5;
- // Represents a repeated `Value`.
- ListValue list_value = 6;
- }
-}
-
-// `NullValue` is a singleton enumeration to represent the null value for the
-// `Value` type union.
-//
-// The JSON representation for `NullValue` is JSON `null`.
-enum NullValue {
- // Null value.
- NULL_VALUE = 0;
-}
-
-// `ListValue` is a wrapper around a repeated field of values.
-//
-// The JSON representation for `ListValue` is JSON array.
-message ListValue {
- // Repeated field of dynamically typed values.
- repeated Value values = 1;
-}
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto
deleted file mode 100644
index 150468b5e..000000000
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto
+++ /dev/null
@@ -1,135 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option cc_enable_arenas = true;
-option go_package = "types";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "TimestampProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-// A Timestamp represents a point in time independent of any time zone
-// or calendar, represented as seconds and fractions of seconds at
-// nanosecond resolution in UTC Epoch time. It is encoded using the
-// Proleptic Gregorian Calendar which extends the Gregorian calendar
-// backwards to year one. It is encoded assuming all minutes are 60
-// seconds long, i.e. leap seconds are "smeared" so that no leap second
-// table is needed for interpretation. Range is from
-// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
-// By restricting to that range, we ensure that we can convert to
-// and from RFC 3339 date strings.
-// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
-//
-// # Examples
-//
-// Example 1: Compute Timestamp from POSIX `time()`.
-//
-// Timestamp timestamp;
-// timestamp.set_seconds(time(NULL));
-// timestamp.set_nanos(0);
-//
-// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
-//
-// struct timeval tv;
-// gettimeofday(&tv, NULL);
-//
-// Timestamp timestamp;
-// timestamp.set_seconds(tv.tv_sec);
-// timestamp.set_nanos(tv.tv_usec * 1000);
-//
-// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
-//
-// FILETIME ft;
-// GetSystemTimeAsFileTime(&ft);
-// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
-//
-// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
-// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
-// Timestamp timestamp;
-// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
-// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
-//
-// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
-//
-// long millis = System.currentTimeMillis();
-//
-// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
-// .setNanos((int) ((millis % 1000) * 1000000)).build();
-//
-//
-// Example 5: Compute Timestamp from current time in Python.
-//
-// timestamp = Timestamp()
-// timestamp.GetCurrentTime()
-//
-// # JSON Mapping
-//
-// In JSON format, the Timestamp type is encoded as a string in the
-// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
-// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
-// where {year} is always expressed using four digits while {month}, {day},
-// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
-// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
-// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
-// is required. A proto3 JSON serializer should always use UTC (as indicated by
-// "Z") when printing the Timestamp type and a proto3 JSON parser should be
-// able to accept both UTC and other timezones (as indicated by an offset).
-//
-// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
-// 01:30 UTC on January 15, 2017.
-//
-// In JavaScript, one can convert a Date object to this format using the
-// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
-// method. In Python, a standard `datetime.datetime` object can be converted
-// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
-// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
-// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
-// ) to obtain a formatter capable of generating timestamps in this format.
-//
-//
-message Timestamp {
-
- // Represents seconds of UTC time since Unix epoch
- // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
- // 9999-12-31T23:59:59Z inclusive.
- int64 seconds = 1;
-
- // Non-negative fractions of a second at nanosecond resolution. Negative
- // second values with fractions must still have non-negative nanos values
- // that count forward in time. Must be from 0 to 999,999,999
- // inclusive.
- int32 nanos = 2;
-}
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/type.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/type.proto
deleted file mode 100644
index fcd15bfd7..000000000
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/type.proto
+++ /dev/null
@@ -1,187 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-import "google/protobuf/any.proto";
-import "google/protobuf/source_context.proto";
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option cc_enable_arenas = true;
-option java_package = "com.google.protobuf";
-option java_outer_classname = "TypeProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-option go_package = "types";
-
-// A protocol buffer message type.
-message Type {
- // The fully qualified message name.
- string name = 1;
- // The list of fields.
- repeated Field fields = 2;
- // The list of types appearing in `oneof` definitions in this type.
- repeated string oneofs = 3;
- // The protocol buffer options.
- repeated Option options = 4;
- // The source context.
- SourceContext source_context = 5;
- // The source syntax.
- Syntax syntax = 6;
-}
-
-// A single field of a message type.
-message Field {
- // Basic field types.
- enum Kind {
- // Field type unknown.
- TYPE_UNKNOWN = 0;
- // Field type double.
- TYPE_DOUBLE = 1;
- // Field type float.
- TYPE_FLOAT = 2;
- // Field type int64.
- TYPE_INT64 = 3;
- // Field type uint64.
- TYPE_UINT64 = 4;
- // Field type int32.
- TYPE_INT32 = 5;
- // Field type fixed64.
- TYPE_FIXED64 = 6;
- // Field type fixed32.
- TYPE_FIXED32 = 7;
- // Field type bool.
- TYPE_BOOL = 8;
- // Field type string.
- TYPE_STRING = 9;
- // Field type group. Proto2 syntax only, and deprecated.
- TYPE_GROUP = 10;
- // Field type message.
- TYPE_MESSAGE = 11;
- // Field type bytes.
- TYPE_BYTES = 12;
- // Field type uint32.
- TYPE_UINT32 = 13;
- // Field type enum.
- TYPE_ENUM = 14;
- // Field type sfixed32.
- TYPE_SFIXED32 = 15;
- // Field type sfixed64.
- TYPE_SFIXED64 = 16;
- // Field type sint32.
- TYPE_SINT32 = 17;
- // Field type sint64.
- TYPE_SINT64 = 18;
- };
-
- // Whether a field is optional, required, or repeated.
- enum Cardinality {
- // For fields with unknown cardinality.
- CARDINALITY_UNKNOWN = 0;
- // For optional fields.
- CARDINALITY_OPTIONAL = 1;
- // For required fields. Proto2 syntax only.
- CARDINALITY_REQUIRED = 2;
- // For repeated fields.
- CARDINALITY_REPEATED = 3;
- };
-
- // The field type.
- Kind kind = 1;
- // The field cardinality.
- Cardinality cardinality = 2;
- // The field number.
- int32 number = 3;
- // The field name.
- string name = 4;
- // The field type URL, without the scheme, for message or enumeration
- // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`.
- string type_url = 6;
- // The index of the field type in `Type.oneofs`, for message or enumeration
- // types. The first type has index 1; zero means the type is not in the list.
- int32 oneof_index = 7;
- // Whether to use alternative packed wire representation.
- bool packed = 8;
- // The protocol buffer options.
- repeated Option options = 9;
- // The field JSON name.
- string json_name = 10;
- // The string value of the default value of this field. Proto2 syntax only.
- string default_value = 11;
-}
-
-// Enum type definition.
-message Enum {
- // Enum type name.
- string name = 1;
- // Enum value definitions.
- repeated EnumValue enumvalue = 2;
- // Protocol buffer options.
- repeated Option options = 3;
- // The source context.
- SourceContext source_context = 4;
- // The source syntax.
- Syntax syntax = 5;
-}
-
-// Enum value definition.
-message EnumValue {
- // Enum value name.
- string name = 1;
- // Enum value number.
- int32 number = 2;
- // Protocol buffer options.
- repeated Option options = 3;
-}
-
-// A protocol buffer option, which can be attached to a message, field,
-// enumeration, etc.
-message Option {
- // The option's name. For protobuf built-in options (options defined in
- // descriptor.proto), this is the short name. For example, `"map_entry"`.
- // For custom options, it should be the fully-qualified name. For example,
- // `"google.api.http"`.
- string name = 1;
- // The option's value packed in an Any message. If the value is a primitive,
- // the corresponding wrapper type defined in google/protobuf/wrappers.proto
- // should be used. If the value is an enum, it should be stored as an int32
- // value using the google.protobuf.Int32Value type.
- Any value = 2;
-}
-
-// The syntax in which a protocol buffer element is defined.
-enum Syntax {
- // Syntax `proto2`.
- SYNTAX_PROTO2 = 0;
- // Syntax `proto3`.
- SYNTAX_PROTO3 = 1;
-}
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto
deleted file mode 100644
index c5632e5ca..000000000
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto
+++ /dev/null
@@ -1,118 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Wrappers for primitive (non-message) types. These types are useful
-// for embedding primitives in the `google.protobuf.Any` type and for places
-// where we need to distinguish between the absence of a primitive
-// typed field and its default value.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option cc_enable_arenas = true;
-option go_package = "types";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "WrappersProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-// Wrapper message for `double`.
-//
-// The JSON representation for `DoubleValue` is JSON number.
-message DoubleValue {
- // The double value.
- double value = 1;
-}
-
-// Wrapper message for `float`.
-//
-// The JSON representation for `FloatValue` is JSON number.
-message FloatValue {
- // The float value.
- float value = 1;
-}
-
-// Wrapper message for `int64`.
-//
-// The JSON representation for `Int64Value` is JSON string.
-message Int64Value {
- // The int64 value.
- int64 value = 1;
-}
-
-// Wrapper message for `uint64`.
-//
-// The JSON representation for `UInt64Value` is JSON string.
-message UInt64Value {
- // The uint64 value.
- uint64 value = 1;
-}
-
-// Wrapper message for `int32`.
-//
-// The JSON representation for `Int32Value` is JSON number.
-message Int32Value {
- // The int32 value.
- int32 value = 1;
-}
-
-// Wrapper message for `uint32`.
-//
-// The JSON representation for `UInt32Value` is JSON number.
-message UInt32Value {
- // The uint32 value.
- uint32 value = 1;
-}
-
-// Wrapper message for `bool`.
-//
-// The JSON representation for `BoolValue` is JSON `true` and `false`.
-message BoolValue {
- // The bool value.
- bool value = 1;
-}
-
-// Wrapper message for `string`.
-//
-// The JSON representation for `StringValue` is JSON string.
-message StringValue {
- // The string value.
- string value = 1;
-}
-
-// Wrapper message for `bytes`.
-//
-// The JSON representation for `BytesValue` is JSON string.
-message BytesValue {
- // The bytes value.
- bytes value = 1;
-}
diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go
deleted file mode 100644
index a85bf1984..000000000
--- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Package descriptor provides functions for obtaining protocol buffer
-// descriptors for generated Go types.
-//
-// These functions cannot go in package proto because they depend on the
-// generated protobuf descriptor messages, which themselves depend on proto.
-package descriptor
-
-import (
- "bytes"
- "compress/gzip"
- "fmt"
- "io/ioutil"
-
- "github.com/gogo/protobuf/proto"
-)
-
-// extractFile extracts a FileDescriptorProto from a gzip'd buffer.
-func extractFile(gz []byte) (*FileDescriptorProto, error) {
- r, err := gzip.NewReader(bytes.NewReader(gz))
- if err != nil {
- return nil, fmt.Errorf("failed to open gzip reader: %v", err)
- }
- defer r.Close()
-
- b, err := ioutil.ReadAll(r)
- if err != nil {
- return nil, fmt.Errorf("failed to uncompress descriptor: %v", err)
- }
-
- fd := new(FileDescriptorProto)
- if err := proto.Unmarshal(b, fd); err != nil {
- return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err)
- }
-
- return fd, nil
-}
-
-// Message is a proto.Message with a method to return its descriptor.
-//
-// Message types generated by the protocol compiler always satisfy
-// the Message interface.
-type Message interface {
- proto.Message
- Descriptor() ([]byte, []int)
-}
-
-// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it
-// describing the given message.
-func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) {
- gz, path := msg.Descriptor()
- fd, err := extractFile(gz)
- if err != nil {
- panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err))
- }
-
- md = fd.MessageType[path[0]]
- for _, i := range path[1:] {
- md = md.NestedType[i]
- }
- return fd, md
-}
-
-// Is this field a scalar numeric type?
-func (field *FieldDescriptorProto) IsScalar() bool {
- if field.Type == nil {
- return false
- }
- switch *field.Type {
- case FieldDescriptorProto_TYPE_DOUBLE,
- FieldDescriptorProto_TYPE_FLOAT,
- FieldDescriptorProto_TYPE_INT64,
- FieldDescriptorProto_TYPE_UINT64,
- FieldDescriptorProto_TYPE_INT32,
- FieldDescriptorProto_TYPE_FIXED64,
- FieldDescriptorProto_TYPE_FIXED32,
- FieldDescriptorProto_TYPE_BOOL,
- FieldDescriptorProto_TYPE_UINT32,
- FieldDescriptorProto_TYPE_ENUM,
- FieldDescriptorProto_TYPE_SFIXED32,
- FieldDescriptorProto_TYPE_SFIXED64,
- FieldDescriptorProto_TYPE_SINT32,
- FieldDescriptorProto_TYPE_SINT64:
- return true
- default:
- return false
- }
-}
diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
deleted file mode 100644
index 44f893b77..000000000
--- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
+++ /dev/null
@@ -1,2806 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: descriptor.proto
-
-package descriptor
-
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
-
-type FieldDescriptorProto_Type int32
-
-const (
- // 0 is reserved for errors.
- // Order is weird for historical reasons.
- FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1
- FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2
- // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
- // negative values are likely.
- FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3
- FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4
- // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
- // negative values are likely.
- FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5
- FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6
- FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7
- FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8
- FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9
- // Tag-delimited aggregate.
- // Group type is deprecated and not supported in proto3. However, Proto3
- // implementations should still be able to parse the group wire format and
- // treat group fields as unknown fields.
- FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10
- FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11
- // New in version 2.
- FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12
- FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13
- FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14
- FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15
- FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16
- FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17
- FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18
-)
-
-var FieldDescriptorProto_Type_name = map[int32]string{
- 1: "TYPE_DOUBLE",
- 2: "TYPE_FLOAT",
- 3: "TYPE_INT64",
- 4: "TYPE_UINT64",
- 5: "TYPE_INT32",
- 6: "TYPE_FIXED64",
- 7: "TYPE_FIXED32",
- 8: "TYPE_BOOL",
- 9: "TYPE_STRING",
- 10: "TYPE_GROUP",
- 11: "TYPE_MESSAGE",
- 12: "TYPE_BYTES",
- 13: "TYPE_UINT32",
- 14: "TYPE_ENUM",
- 15: "TYPE_SFIXED32",
- 16: "TYPE_SFIXED64",
- 17: "TYPE_SINT32",
- 18: "TYPE_SINT64",
-}
-var FieldDescriptorProto_Type_value = map[string]int32{
- "TYPE_DOUBLE": 1,
- "TYPE_FLOAT": 2,
- "TYPE_INT64": 3,
- "TYPE_UINT64": 4,
- "TYPE_INT32": 5,
- "TYPE_FIXED64": 6,
- "TYPE_FIXED32": 7,
- "TYPE_BOOL": 8,
- "TYPE_STRING": 9,
- "TYPE_GROUP": 10,
- "TYPE_MESSAGE": 11,
- "TYPE_BYTES": 12,
- "TYPE_UINT32": 13,
- "TYPE_ENUM": 14,
- "TYPE_SFIXED32": 15,
- "TYPE_SFIXED64": 16,
- "TYPE_SINT32": 17,
- "TYPE_SINT64": 18,
-}
-
-func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type {
- p := new(FieldDescriptorProto_Type)
- *p = x
- return p
-}
-func (x FieldDescriptorProto_Type) String() string {
- return proto.EnumName(FieldDescriptorProto_Type_name, int32(x))
-}
-func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type")
- if err != nil {
- return err
- }
- *x = FieldDescriptorProto_Type(value)
- return nil
-}
-func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{4, 0}
-}
-
-type FieldDescriptorProto_Label int32
-
-const (
- // 0 is reserved for errors
- FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1
- FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
- FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3
-)
-
-var FieldDescriptorProto_Label_name = map[int32]string{
- 1: "LABEL_OPTIONAL",
- 2: "LABEL_REQUIRED",
- 3: "LABEL_REPEATED",
-}
-var FieldDescriptorProto_Label_value = map[string]int32{
- "LABEL_OPTIONAL": 1,
- "LABEL_REQUIRED": 2,
- "LABEL_REPEATED": 3,
-}
-
-func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label {
- p := new(FieldDescriptorProto_Label)
- *p = x
- return p
-}
-func (x FieldDescriptorProto_Label) String() string {
- return proto.EnumName(FieldDescriptorProto_Label_name, int32(x))
-}
-func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label")
- if err != nil {
- return err
- }
- *x = FieldDescriptorProto_Label(value)
- return nil
-}
-func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{4, 1}
-}
-
-// Generated classes can be optimized for speed or code size.
-type FileOptions_OptimizeMode int32
-
-const (
- FileOptions_SPEED FileOptions_OptimizeMode = 1
- // etc.
- FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2
- FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3
-)
-
-var FileOptions_OptimizeMode_name = map[int32]string{
- 1: "SPEED",
- 2: "CODE_SIZE",
- 3: "LITE_RUNTIME",
-}
-var FileOptions_OptimizeMode_value = map[string]int32{
- "SPEED": 1,
- "CODE_SIZE": 2,
- "LITE_RUNTIME": 3,
-}
-
-func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode {
- p := new(FileOptions_OptimizeMode)
- *p = x
- return p
-}
-func (x FileOptions_OptimizeMode) String() string {
- return proto.EnumName(FileOptions_OptimizeMode_name, int32(x))
-}
-func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode")
- if err != nil {
- return err
- }
- *x = FileOptions_OptimizeMode(value)
- return nil
-}
-func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{10, 0}
-}
-
-type FieldOptions_CType int32
-
-const (
- // Default mode.
- FieldOptions_STRING FieldOptions_CType = 0
- FieldOptions_CORD FieldOptions_CType = 1
- FieldOptions_STRING_PIECE FieldOptions_CType = 2
-)
-
-var FieldOptions_CType_name = map[int32]string{
- 0: "STRING",
- 1: "CORD",
- 2: "STRING_PIECE",
-}
-var FieldOptions_CType_value = map[string]int32{
- "STRING": 0,
- "CORD": 1,
- "STRING_PIECE": 2,
-}
-
-func (x FieldOptions_CType) Enum() *FieldOptions_CType {
- p := new(FieldOptions_CType)
- *p = x
- return p
-}
-func (x FieldOptions_CType) String() string {
- return proto.EnumName(FieldOptions_CType_name, int32(x))
-}
-func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType")
- if err != nil {
- return err
- }
- *x = FieldOptions_CType(value)
- return nil
-}
-func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{12, 0}
-}
-
-type FieldOptions_JSType int32
-
-const (
- // Use the default type.
- FieldOptions_JS_NORMAL FieldOptions_JSType = 0
- // Use JavaScript strings.
- FieldOptions_JS_STRING FieldOptions_JSType = 1
- // Use JavaScript numbers.
- FieldOptions_JS_NUMBER FieldOptions_JSType = 2
-)
-
-var FieldOptions_JSType_name = map[int32]string{
- 0: "JS_NORMAL",
- 1: "JS_STRING",
- 2: "JS_NUMBER",
-}
-var FieldOptions_JSType_value = map[string]int32{
- "JS_NORMAL": 0,
- "JS_STRING": 1,
- "JS_NUMBER": 2,
-}
-
-func (x FieldOptions_JSType) Enum() *FieldOptions_JSType {
- p := new(FieldOptions_JSType)
- *p = x
- return p
-}
-func (x FieldOptions_JSType) String() string {
- return proto.EnumName(FieldOptions_JSType_name, int32(x))
-}
-func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType")
- if err != nil {
- return err
- }
- *x = FieldOptions_JSType(value)
- return nil
-}
-func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{12, 1}
-}
-
-// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
-// or neither? HTTP based RPC implementation may choose GET verb for safe
-// methods, and PUT verb for idempotent methods instead of the default POST.
-type MethodOptions_IdempotencyLevel int32
-
-const (
- MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0
- MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1
- MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2
-)
-
-var MethodOptions_IdempotencyLevel_name = map[int32]string{
- 0: "IDEMPOTENCY_UNKNOWN",
- 1: "NO_SIDE_EFFECTS",
- 2: "IDEMPOTENT",
-}
-var MethodOptions_IdempotencyLevel_value = map[string]int32{
- "IDEMPOTENCY_UNKNOWN": 0,
- "NO_SIDE_EFFECTS": 1,
- "IDEMPOTENT": 2,
-}
-
-func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel {
- p := new(MethodOptions_IdempotencyLevel)
- *p = x
- return p
-}
-func (x MethodOptions_IdempotencyLevel) String() string {
- return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x))
-}
-func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel")
- if err != nil {
- return err
- }
- *x = MethodOptions_IdempotencyLevel(value)
- return nil
-}
-func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{17, 0}
-}
-
-// The protocol compiler can output a FileDescriptorSet containing the .proto
-// files it parses.
-type FileDescriptorSet struct {
- File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} }
-func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) }
-func (*FileDescriptorSet) ProtoMessage() {}
-func (*FileDescriptorSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{0}
-}
-func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b)
-}
-func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic)
-}
-func (dst *FileDescriptorSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FileDescriptorSet.Merge(dst, src)
-}
-func (m *FileDescriptorSet) XXX_Size() int {
- return xxx_messageInfo_FileDescriptorSet.Size(m)
-}
-func (m *FileDescriptorSet) XXX_DiscardUnknown() {
- xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo
-
-func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto {
- if m != nil {
- return m.File
- }
- return nil
-}
-
-// Describes a complete .proto file.
-type FileDescriptorProto struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"`
- // Names of files imported by this file.
- Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
- // Indexes of the public imported files in the dependency list above.
- PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"`
- // Indexes of the weak imported files in the dependency list.
- // For Google-internal migration only. Do not use.
- WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
- // All top-level definitions in this file.
- MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
- EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
- Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"`
- Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"`
- Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
- // This field contains optional information about the original source code.
- // You may safely remove this entire field without harming runtime
- // functionality of the descriptors -- the information is needed only by
- // development tools.
- SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"`
- // The syntax of the proto file.
- // The supported values are "proto2" and "proto3".
- Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} }
-func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*FileDescriptorProto) ProtoMessage() {}
-func (*FileDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{1}
-}
-func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b)
-}
-func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic)
-}
-func (dst *FileDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FileDescriptorProto.Merge(dst, src)
-}
-func (m *FileDescriptorProto) XXX_Size() int {
- return xxx_messageInfo_FileDescriptorProto.Size(m)
-}
-func (m *FileDescriptorProto) XXX_DiscardUnknown() {
- xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo
-
-func (m *FileDescriptorProto) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *FileDescriptorProto) GetPackage() string {
- if m != nil && m.Package != nil {
- return *m.Package
- }
- return ""
-}
-
-func (m *FileDescriptorProto) GetDependency() []string {
- if m != nil {
- return m.Dependency
- }
- return nil
-}
-
-func (m *FileDescriptorProto) GetPublicDependency() []int32 {
- if m != nil {
- return m.PublicDependency
- }
- return nil
-}
-
-func (m *FileDescriptorProto) GetWeakDependency() []int32 {
- if m != nil {
- return m.WeakDependency
- }
- return nil
-}
-
-func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto {
- if m != nil {
- return m.MessageType
- }
- return nil
-}
-
-func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto {
- if m != nil {
- return m.EnumType
- }
- return nil
-}
-
-func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto {
- if m != nil {
- return m.Service
- }
- return nil
-}
-
-func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto {
- if m != nil {
- return m.Extension
- }
- return nil
-}
-
-func (m *FileDescriptorProto) GetOptions() *FileOptions {
- if m != nil {
- return m.Options
- }
- return nil
-}
-
-func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo {
- if m != nil {
- return m.SourceCodeInfo
- }
- return nil
-}
-
-func (m *FileDescriptorProto) GetSyntax() string {
- if m != nil && m.Syntax != nil {
- return *m.Syntax
- }
- return ""
-}
-
-// Describes a message type.
-type DescriptorProto struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
- Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
- NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"`
- EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
- ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"`
- OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"`
- Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"`
- ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
- // Reserved field names, which may not be used by fields in the same message.
- // A given name may only be reserved once.
- ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DescriptorProto) Reset() { *m = DescriptorProto{} }
-func (m *DescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*DescriptorProto) ProtoMessage() {}
-func (*DescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{2}
-}
-func (m *DescriptorProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DescriptorProto.Unmarshal(m, b)
-}
-func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic)
-}
-func (dst *DescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DescriptorProto.Merge(dst, src)
-}
-func (m *DescriptorProto) XXX_Size() int {
- return xxx_messageInfo_DescriptorProto.Size(m)
-}
-func (m *DescriptorProto) XXX_DiscardUnknown() {
- xxx_messageInfo_DescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo
-
-func (m *DescriptorProto) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *DescriptorProto) GetField() []*FieldDescriptorProto {
- if m != nil {
- return m.Field
- }
- return nil
-}
-
-func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto {
- if m != nil {
- return m.Extension
- }
- return nil
-}
-
-func (m *DescriptorProto) GetNestedType() []*DescriptorProto {
- if m != nil {
- return m.NestedType
- }
- return nil
-}
-
-func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto {
- if m != nil {
- return m.EnumType
- }
- return nil
-}
-
-func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange {
- if m != nil {
- return m.ExtensionRange
- }
- return nil
-}
-
-func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto {
- if m != nil {
- return m.OneofDecl
- }
- return nil
-}
-
-func (m *DescriptorProto) GetOptions() *MessageOptions {
- if m != nil {
- return m.Options
- }
- return nil
-}
-
-func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange {
- if m != nil {
- return m.ReservedRange
- }
- return nil
-}
-
-func (m *DescriptorProto) GetReservedName() []string {
- if m != nil {
- return m.ReservedName
- }
- return nil
-}
-
-type DescriptorProto_ExtensionRange struct {
- Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
- End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
- Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} }
-func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) }
-func (*DescriptorProto_ExtensionRange) ProtoMessage() {}
-func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{2, 0}
-}
-func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b)
-}
-func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic)
-}
-func (dst *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(dst, src)
-}
-func (m *DescriptorProto_ExtensionRange) XXX_Size() int {
- return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m)
-}
-func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() {
- xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo
-
-func (m *DescriptorProto_ExtensionRange) GetStart() int32 {
- if m != nil && m.Start != nil {
- return *m.Start
- }
- return 0
-}
-
-func (m *DescriptorProto_ExtensionRange) GetEnd() int32 {
- if m != nil && m.End != nil {
- return *m.End
- }
- return 0
-}
-
-func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions {
- if m != nil {
- return m.Options
- }
- return nil
-}
-
-// Range of reserved tag numbers. Reserved tag numbers may not be used by
-// fields or extension ranges in the same message. Reserved ranges may
-// not overlap.
-type DescriptorProto_ReservedRange struct {
- Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
- End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} }
-func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) }
-func (*DescriptorProto_ReservedRange) ProtoMessage() {}
-func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{2, 1}
-}
-func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b)
-}
-func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic)
-}
-func (dst *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DescriptorProto_ReservedRange.Merge(dst, src)
-}
-func (m *DescriptorProto_ReservedRange) XXX_Size() int {
- return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m)
-}
-func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() {
- xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo
-
-func (m *DescriptorProto_ReservedRange) GetStart() int32 {
- if m != nil && m.Start != nil {
- return *m.Start
- }
- return 0
-}
-
-func (m *DescriptorProto_ReservedRange) GetEnd() int32 {
- if m != nil && m.End != nil {
- return *m.End
- }
- return 0
-}
-
-type ExtensionRangeOptions struct {
- // The parser stores options it doesn't recognize here. See above.
- UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} }
-func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) }
-func (*ExtensionRangeOptions) ProtoMessage() {}
-func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{3}
-}
-
-var extRange_ExtensionRangeOptions = []proto.ExtensionRange{
- {Start: 1000, End: 536870911},
-}
-
-func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_ExtensionRangeOptions
-}
-func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b)
-}
-func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic)
-}
-func (dst *ExtensionRangeOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExtensionRangeOptions.Merge(dst, src)
-}
-func (m *ExtensionRangeOptions) XXX_Size() int {
- return xxx_messageInfo_ExtensionRangeOptions.Size(m)
-}
-func (m *ExtensionRangeOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo
-
-func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption {
- if m != nil {
- return m.UninterpretedOption
- }
- return nil
-}
-
-// Describes a field within a message.
-type FieldDescriptorProto struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
- Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
- // If type_name is set, this need not be set. If both this and type_name
- // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
- Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"`
- // For message and enum types, this is the name of the type. If the name
- // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
- // rules are used to find the type (i.e. first the nested types within this
- // message are searched, then within the parent, on up to the root
- // namespace).
- TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"`
- // For extensions, this is the name of the type being extended. It is
- // resolved in the same manner as type_name.
- Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"`
- // For numeric types, contains the original text representation of the value.
- // For booleans, "true" or "false".
- // For strings, contains the default text contents (not escaped in any way).
- // For bytes, contains the C escaped value. All bytes >= 128 are escaped.
- // TODO(kenton): Base-64 encode?
- DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"`
- // If set, gives the index of a oneof in the containing type's oneof_decl
- // list. This field is a member of that oneof.
- OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"`
- // JSON name of this field. The value is set by protocol compiler. If the
- // user has set a "json_name" option on this field, that option's value
- // will be used. Otherwise, it's deduced from the field's name by converting
- // it to camelCase.
- JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"`
- Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} }
-func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*FieldDescriptorProto) ProtoMessage() {}
-func (*FieldDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{4}
-}
-func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b)
-}
-func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic)
-}
-func (dst *FieldDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FieldDescriptorProto.Merge(dst, src)
-}
-func (m *FieldDescriptorProto) XXX_Size() int {
- return xxx_messageInfo_FieldDescriptorProto.Size(m)
-}
-func (m *FieldDescriptorProto) XXX_DiscardUnknown() {
- xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo
-
-func (m *FieldDescriptorProto) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *FieldDescriptorProto) GetNumber() int32 {
- if m != nil && m.Number != nil {
- return *m.Number
- }
- return 0
-}
-
-func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label {
- if m != nil && m.Label != nil {
- return *m.Label
- }
- return FieldDescriptorProto_LABEL_OPTIONAL
-}
-
-func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return FieldDescriptorProto_TYPE_DOUBLE
-}
-
-func (m *FieldDescriptorProto) GetTypeName() string {
- if m != nil && m.TypeName != nil {
- return *m.TypeName
- }
- return ""
-}
-
-func (m *FieldDescriptorProto) GetExtendee() string {
- if m != nil && m.Extendee != nil {
- return *m.Extendee
- }
- return ""
-}
-
-func (m *FieldDescriptorProto) GetDefaultValue() string {
- if m != nil && m.DefaultValue != nil {
- return *m.DefaultValue
- }
- return ""
-}
-
-func (m *FieldDescriptorProto) GetOneofIndex() int32 {
- if m != nil && m.OneofIndex != nil {
- return *m.OneofIndex
- }
- return 0
-}
-
-func (m *FieldDescriptorProto) GetJsonName() string {
- if m != nil && m.JsonName != nil {
- return *m.JsonName
- }
- return ""
-}
-
-func (m *FieldDescriptorProto) GetOptions() *FieldOptions {
- if m != nil {
- return m.Options
- }
- return nil
-}
-
-// Describes a oneof.
-type OneofDescriptorProto struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} }
-func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*OneofDescriptorProto) ProtoMessage() {}
-func (*OneofDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{5}
-}
-func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b)
-}
-func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic)
-}
-func (dst *OneofDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_OneofDescriptorProto.Merge(dst, src)
-}
-func (m *OneofDescriptorProto) XXX_Size() int {
- return xxx_messageInfo_OneofDescriptorProto.Size(m)
-}
-func (m *OneofDescriptorProto) XXX_DiscardUnknown() {
- xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo
-
-func (m *OneofDescriptorProto) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *OneofDescriptorProto) GetOptions() *OneofOptions {
- if m != nil {
- return m.Options
- }
- return nil
-}
-
-// Describes an enum type.
-type EnumDescriptorProto struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
- Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
- // Range of reserved numeric values. Reserved numeric values may not be used
- // by enum values in the same enum declaration. Reserved ranges may not
- // overlap.
- ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
- // Reserved enum value names, which may not be reused. A given name may only
- // be reserved once.
- ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} }
-func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*EnumDescriptorProto) ProtoMessage() {}
-func (*EnumDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{6}
-}
-func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b)
-}
-func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic)
-}
-func (dst *EnumDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EnumDescriptorProto.Merge(dst, src)
-}
-func (m *EnumDescriptorProto) XXX_Size() int {
- return xxx_messageInfo_EnumDescriptorProto.Size(m)
-}
-func (m *EnumDescriptorProto) XXX_DiscardUnknown() {
- xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo
-
-func (m *EnumDescriptorProto) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *EnumDescriptorProto) GetOptions() *EnumOptions {
- if m != nil {
- return m.Options
- }
- return nil
-}
-
-func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange {
- if m != nil {
- return m.ReservedRange
- }
- return nil
-}
-
-func (m *EnumDescriptorProto) GetReservedName() []string {
- if m != nil {
- return m.ReservedName
- }
- return nil
-}
-
-// Range of reserved numeric values. Reserved values may not be used by
-// entries in the same enum. Reserved ranges may not overlap.
-//
-// Note that this is distinct from DescriptorProto.ReservedRange in that it
-// is inclusive such that it can appropriately represent the entire int32
-// domain.
-type EnumDescriptorProto_EnumReservedRange struct {
- Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
- End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} }
-func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) }
-func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {}
-func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{6, 0}
-}
-func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b)
-}
-func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic)
-}
-func (dst *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(dst, src)
-}
-func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int {
- return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m)
-}
-func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() {
- xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo
-
-func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 {
- if m != nil && m.Start != nil {
- return *m.Start
- }
- return 0
-}
-
-func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 {
- if m != nil && m.End != nil {
- return *m.End
- }
- return 0
-}
-
-// Describes a value within an enum.
-type EnumValueDescriptorProto struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
- Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} }
-func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*EnumValueDescriptorProto) ProtoMessage() {}
-func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{7}
-}
-func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b)
-}
-func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic)
-}
-func (dst *EnumValueDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EnumValueDescriptorProto.Merge(dst, src)
-}
-func (m *EnumValueDescriptorProto) XXX_Size() int {
- return xxx_messageInfo_EnumValueDescriptorProto.Size(m)
-}
-func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() {
- xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo
-
-func (m *EnumValueDescriptorProto) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *EnumValueDescriptorProto) GetNumber() int32 {
- if m != nil && m.Number != nil {
- return *m.Number
- }
- return 0
-}
-
-func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions {
- if m != nil {
- return m.Options
- }
- return nil
-}
-
-// Describes a service.
-type ServiceDescriptorProto struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
- Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} }
-func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*ServiceDescriptorProto) ProtoMessage() {}
-func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{8}
-}
-func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b)
-}
-func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic)
-}
-func (dst *ServiceDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceDescriptorProto.Merge(dst, src)
-}
-func (m *ServiceDescriptorProto) XXX_Size() int {
- return xxx_messageInfo_ServiceDescriptorProto.Size(m)
-}
-func (m *ServiceDescriptorProto) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo
-
-func (m *ServiceDescriptorProto) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto {
- if m != nil {
- return m.Method
- }
- return nil
-}
-
-func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions {
- if m != nil {
- return m.Options
- }
- return nil
-}
-
-// Describes a method of a service.
-type MethodDescriptorProto struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- // Input and output type names. These are resolved in the same way as
- // FieldDescriptorProto.type_name, but must refer to a message type.
- InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
- OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"`
- Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"`
- // Identifies if client streams multiple client messages
- ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
- // Identifies if server streams multiple server messages
- ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} }
-func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*MethodDescriptorProto) ProtoMessage() {}
-func (*MethodDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{9}
-}
-func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b)
-}
-func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic)
-}
-func (dst *MethodDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MethodDescriptorProto.Merge(dst, src)
-}
-func (m *MethodDescriptorProto) XXX_Size() int {
- return xxx_messageInfo_MethodDescriptorProto.Size(m)
-}
-func (m *MethodDescriptorProto) XXX_DiscardUnknown() {
- xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo
-
-const Default_MethodDescriptorProto_ClientStreaming bool = false
-const Default_MethodDescriptorProto_ServerStreaming bool = false
-
-func (m *MethodDescriptorProto) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *MethodDescriptorProto) GetInputType() string {
- if m != nil && m.InputType != nil {
- return *m.InputType
- }
- return ""
-}
-
-func (m *MethodDescriptorProto) GetOutputType() string {
- if m != nil && m.OutputType != nil {
- return *m.OutputType
- }
- return ""
-}
-
-func (m *MethodDescriptorProto) GetOptions() *MethodOptions {
- if m != nil {
- return m.Options
- }
- return nil
-}
-
-func (m *MethodDescriptorProto) GetClientStreaming() bool {
- if m != nil && m.ClientStreaming != nil {
- return *m.ClientStreaming
- }
- return Default_MethodDescriptorProto_ClientStreaming
-}
-
-func (m *MethodDescriptorProto) GetServerStreaming() bool {
- if m != nil && m.ServerStreaming != nil {
- return *m.ServerStreaming
- }
- return Default_MethodDescriptorProto_ServerStreaming
-}
-
-type FileOptions struct {
- // Sets the Java package where classes generated from this .proto will be
- // placed. By default, the proto package is used, but this is often
- // inappropriate because proto packages do not normally start with backwards
- // domain names.
- JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"`
- // If set, all the classes from the .proto file are wrapped in a single
- // outer class with the given name. This applies to both Proto1
- // (equivalent to the old "--one_java_file" option) and Proto2 (where
- // a .proto always translates to a single class, but you may want to
- // explicitly choose the class name).
- JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"`
- // If set true, then the Java code generator will generate a separate .java
- // file for each top-level message, enum, and service defined in the .proto
- // file. Thus, these types will *not* be nested inside the outer class
- // named by java_outer_classname. However, the outer class will still be
- // generated to contain the file's getDescriptor() method as well as any
- // top-level extensions defined in the file.
- JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"`
- // This option does nothing.
- JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use.
- // If set true, then the Java2 code generator will generate code that
- // throws an exception whenever an attempt is made to assign a non-UTF-8
- // byte sequence to a string field.
- // Message reflection will do the same.
- // However, an extension field still accepts non-UTF-8 byte sequences.
- // This option has no effect on when used with the lite runtime.
- JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"`
- OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
- // Sets the Go package where structs generated from this .proto will be
- // placed. If omitted, the Go package will be derived from the following:
- // - The basename of the package import path, if provided.
- // - Otherwise, the package statement in the .proto file, if present.
- // - Otherwise, the basename of the .proto file, without extension.
- GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"`
- // Should generic services be generated in each language? "Generic" services
- // are not specific to any particular RPC system. They are generated by the
- // main code generators in each language (without additional plugins).
- // Generic services were the only kind of service generation supported by
- // early versions of google.protobuf.
- //
- // Generic services are now considered deprecated in favor of using plugins
- // that generate code specific to your particular RPC system. Therefore,
- // these default to false. Old code which depends on generic services should
- // explicitly set them to true.
- CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"`
- JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"`
- PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"`
- PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"`
- // Is this file deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for everything in the file, or it will be completely ignored; in the very
- // least, this is a formalization for deprecating files.
- Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
- // Enables the use of arenas for the proto messages in this file. This applies
- // only to generated classes for C++.
- CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"`
- // Sets the objective c class prefix which is prepended to all objective c
- // generated classes from this .proto. There is no default.
- ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"`
- // Namespace for generated classes; defaults to the package.
- CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"`
- // By default Swift generators will take the proto package and CamelCase it
- // replacing '.' with underscore and use that to prefix the types/symbols
- // defined. When this options is provided, they will use this value instead
- // to prefix the types/symbols defined.
- SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"`
- // Sets the php class prefix which is prepended to all php generated classes
- // from this .proto. Default is empty.
- PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"`
- // Use this option to change the namespace of php generated classes. Default
- // is empty. When this option is empty, the package name will be used for
- // determining the namespace.
- PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"`
- // The parser stores options it doesn't recognize here.
- // See the documentation for the "Options" section above.
- UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *FileOptions) Reset() { *m = FileOptions{} }
-func (m *FileOptions) String() string { return proto.CompactTextString(m) }
-func (*FileOptions) ProtoMessage() {}
-func (*FileOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{10}
-}
-
-var extRange_FileOptions = []proto.ExtensionRange{
- {Start: 1000, End: 536870911},
-}
-
-func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_FileOptions
-}
-func (m *FileOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_FileOptions.Unmarshal(m, b)
-}
-func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic)
-}
-func (dst *FileOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FileOptions.Merge(dst, src)
-}
-func (m *FileOptions) XXX_Size() int {
- return xxx_messageInfo_FileOptions.Size(m)
-}
-func (m *FileOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_FileOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FileOptions proto.InternalMessageInfo
-
-const Default_FileOptions_JavaMultipleFiles bool = false
-const Default_FileOptions_JavaStringCheckUtf8 bool = false
-const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED
-const Default_FileOptions_CcGenericServices bool = false
-const Default_FileOptions_JavaGenericServices bool = false
-const Default_FileOptions_PyGenericServices bool = false
-const Default_FileOptions_PhpGenericServices bool = false
-const Default_FileOptions_Deprecated bool = false
-const Default_FileOptions_CcEnableArenas bool = false
-
-func (m *FileOptions) GetJavaPackage() string {
- if m != nil && m.JavaPackage != nil {
- return *m.JavaPackage
- }
- return ""
-}
-
-func (m *FileOptions) GetJavaOuterClassname() string {
- if m != nil && m.JavaOuterClassname != nil {
- return *m.JavaOuterClassname
- }
- return ""
-}
-
-func (m *FileOptions) GetJavaMultipleFiles() bool {
- if m != nil && m.JavaMultipleFiles != nil {
- return *m.JavaMultipleFiles
- }
- return Default_FileOptions_JavaMultipleFiles
-}
-
-// Deprecated: Do not use.
-func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool {
- if m != nil && m.JavaGenerateEqualsAndHash != nil {
- return *m.JavaGenerateEqualsAndHash
- }
- return false
-}
-
-func (m *FileOptions) GetJavaStringCheckUtf8() bool {
- if m != nil && m.JavaStringCheckUtf8 != nil {
- return *m.JavaStringCheckUtf8
- }
- return Default_FileOptions_JavaStringCheckUtf8
-}
-
-func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode {
- if m != nil && m.OptimizeFor != nil {
- return *m.OptimizeFor
- }
- return Default_FileOptions_OptimizeFor
-}
-
-func (m *FileOptions) GetGoPackage() string {
- if m != nil && m.GoPackage != nil {
- return *m.GoPackage
- }
- return ""
-}
-
-func (m *FileOptions) GetCcGenericServices() bool {
- if m != nil && m.CcGenericServices != nil {
- return *m.CcGenericServices
- }
- return Default_FileOptions_CcGenericServices
-}
-
-func (m *FileOptions) GetJavaGenericServices() bool {
- if m != nil && m.JavaGenericServices != nil {
- return *m.JavaGenericServices
- }
- return Default_FileOptions_JavaGenericServices
-}
-
-func (m *FileOptions) GetPyGenericServices() bool {
- if m != nil && m.PyGenericServices != nil {
- return *m.PyGenericServices
- }
- return Default_FileOptions_PyGenericServices
-}
-
-func (m *FileOptions) GetPhpGenericServices() bool {
- if m != nil && m.PhpGenericServices != nil {
- return *m.PhpGenericServices
- }
- return Default_FileOptions_PhpGenericServices
-}
-
-func (m *FileOptions) GetDeprecated() bool {
- if m != nil && m.Deprecated != nil {
- return *m.Deprecated
- }
- return Default_FileOptions_Deprecated
-}
-
-func (m *FileOptions) GetCcEnableArenas() bool {
- if m != nil && m.CcEnableArenas != nil {
- return *m.CcEnableArenas
- }
- return Default_FileOptions_CcEnableArenas
-}
-
-func (m *FileOptions) GetObjcClassPrefix() string {
- if m != nil && m.ObjcClassPrefix != nil {
- return *m.ObjcClassPrefix
- }
- return ""
-}
-
-func (m *FileOptions) GetCsharpNamespace() string {
- if m != nil && m.CsharpNamespace != nil {
- return *m.CsharpNamespace
- }
- return ""
-}
-
-func (m *FileOptions) GetSwiftPrefix() string {
- if m != nil && m.SwiftPrefix != nil {
- return *m.SwiftPrefix
- }
- return ""
-}
-
-func (m *FileOptions) GetPhpClassPrefix() string {
- if m != nil && m.PhpClassPrefix != nil {
- return *m.PhpClassPrefix
- }
- return ""
-}
-
-func (m *FileOptions) GetPhpNamespace() string {
- if m != nil && m.PhpNamespace != nil {
- return *m.PhpNamespace
- }
- return ""
-}
-
-func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
- if m != nil {
- return m.UninterpretedOption
- }
- return nil
-}
-
-type MessageOptions struct {
- // Set true to use the old proto1 MessageSet wire format for extensions.
- // This is provided for backwards-compatibility with the MessageSet wire
- // format. You should not use this for any other reason: It's less
- // efficient, has fewer features, and is more complicated.
- //
- // The message must be defined exactly as follows:
- // message Foo {
- // option message_set_wire_format = true;
- // extensions 4 to max;
- // }
- // Note that the message cannot have any defined fields; MessageSets only
- // have extensions.
- //
- // All extensions of your type must be singular messages; e.g. they cannot
- // be int32s, enums, or repeated messages.
- //
- // Because this is an option, the above two restrictions are not enforced by
- // the protocol compiler.
- MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"`
- // Disables the generation of the standard "descriptor()" accessor, which can
- // conflict with a field of the same name. This is meant to make migration
- // from proto1 easier; new code should avoid fields named "descriptor".
- NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"`
- // Is this message deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for the message, or it will be completely ignored; in the very least,
- // this is a formalization for deprecating messages.
- Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
- // Whether the message is an automatically generated map entry type for the
- // maps field.
- //
- // For maps fields:
- // map<KeyType, ValueType> map_field = 1;
- // The parsed descriptor looks like:
- // message MapFieldEntry {
- // option map_entry = true;
- // optional KeyType key = 1;
- // optional ValueType value = 2;
- // }
- // repeated MapFieldEntry map_field = 1;
- //
- // Implementations may choose not to generate the map_entry=true message, but
- // use a native map in the target language to hold the keys and values.
- // The reflection APIs in such implementions still need to work as
- // if the field is a repeated message field.
- //
- // NOTE: Do not set the option in .proto files. Always use the maps syntax
- // instead. The option should only be implicitly set by the proto compiler
- // parser.
- MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"`
- // The parser stores options it doesn't recognize here. See above.
- UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MessageOptions) Reset() { *m = MessageOptions{} }
-func (m *MessageOptions) String() string { return proto.CompactTextString(m) }
-func (*MessageOptions) ProtoMessage() {}
-func (*MessageOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{11}
-}
-
-var extRange_MessageOptions = []proto.ExtensionRange{
- {Start: 1000, End: 536870911},
-}
-
-func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_MessageOptions
-}
-func (m *MessageOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MessageOptions.Unmarshal(m, b)
-}
-func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic)
-}
-func (dst *MessageOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MessageOptions.Merge(dst, src)
-}
-func (m *MessageOptions) XXX_Size() int {
- return xxx_messageInfo_MessageOptions.Size(m)
-}
-func (m *MessageOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_MessageOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MessageOptions proto.InternalMessageInfo
-
-const Default_MessageOptions_MessageSetWireFormat bool = false
-const Default_MessageOptions_NoStandardDescriptorAccessor bool = false
-const Default_MessageOptions_Deprecated bool = false
-
-func (m *MessageOptions) GetMessageSetWireFormat() bool {
- if m != nil && m.MessageSetWireFormat != nil {
- return *m.MessageSetWireFormat
- }
- return Default_MessageOptions_MessageSetWireFormat
-}
-
-func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool {
- if m != nil && m.NoStandardDescriptorAccessor != nil {
- return *m.NoStandardDescriptorAccessor
- }
- return Default_MessageOptions_NoStandardDescriptorAccessor
-}
-
-func (m *MessageOptions) GetDeprecated() bool {
- if m != nil && m.Deprecated != nil {
- return *m.Deprecated
- }
- return Default_MessageOptions_Deprecated
-}
-
-func (m *MessageOptions) GetMapEntry() bool {
- if m != nil && m.MapEntry != nil {
- return *m.MapEntry
- }
- return false
-}
-
-func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
- if m != nil {
- return m.UninterpretedOption
- }
- return nil
-}
-
-type FieldOptions struct {
- // The ctype option instructs the C++ code generator to use a different
- // representation of the field than it normally would. See the specific
- // options below. This option is not yet implemented in the open source
- // release -- sorry, we'll try to include it in a future version!
- Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
- // The packed option can be enabled for repeated primitive fields to enable
- // a more efficient representation on the wire. Rather than repeatedly
- // writing the tag and type for each element, the entire array is encoded as
- // a single length-delimited blob. In proto3, only explicit setting it to
- // false will avoid using packed encoding.
- Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
- // The jstype option determines the JavaScript type used for values of the
- // field. The option is permitted only for 64 bit integral and fixed types
- // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
- // is represented as JavaScript string, which avoids loss of precision that
- // can happen when a large value is converted to a floating point JavaScript.
- // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
- // use the JavaScript "number" type. The behavior of the default option
- // JS_NORMAL is implementation dependent.
- //
- // This option is an enum to permit additional types to be added, e.g.
- // goog.math.Integer.
- Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"`
- // Should this field be parsed lazily? Lazy applies only to message-type
- // fields. It means that when the outer message is initially parsed, the
- // inner message's contents will not be parsed but instead stored in encoded
- // form. The inner message will actually be parsed when it is first accessed.
- //
- // This is only a hint. Implementations are free to choose whether to use
- // eager or lazy parsing regardless of the value of this option. However,
- // setting this option true suggests that the protocol author believes that
- // using lazy parsing on this field is worth the additional bookkeeping
- // overhead typically needed to implement it.
- //
- // This option does not affect the public interface of any generated code;
- // all method signatures remain the same. Furthermore, thread-safety of the
- // interface is not affected by this option; const methods remain safe to
- // call from multiple threads concurrently, while non-const methods continue
- // to require exclusive access.
- //
- //
- // Note that implementations may choose not to check required fields within
- // a lazy sub-message. That is, calling IsInitialized() on the outer message
- // may return true even if the inner message has missing required fields.
- // This is necessary because otherwise the inner message would have to be
- // parsed in order to perform the check, defeating the purpose of lazy
- // parsing. An implementation which chooses not to check required fields
- // must be consistent about it. That is, for any particular sub-message, the
- // implementation must either *always* check its required fields, or *never*
- // check its required fields, regardless of whether or not the message has
- // been parsed.
- Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"`
- // Is this field deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for accessors, or it will be completely ignored; in the very least, this
- // is a formalization for deprecating fields.
- Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
- // For Google-internal migration only. Do not use.
- Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
- // The parser stores options it doesn't recognize here. See above.
- UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *FieldOptions) Reset() { *m = FieldOptions{} }
-func (m *FieldOptions) String() string { return proto.CompactTextString(m) }
-func (*FieldOptions) ProtoMessage() {}
-func (*FieldOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{12}
-}
-
-var extRange_FieldOptions = []proto.ExtensionRange{
- {Start: 1000, End: 536870911},
-}
-
-func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_FieldOptions
-}
-func (m *FieldOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_FieldOptions.Unmarshal(m, b)
-}
-func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic)
-}
-func (dst *FieldOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FieldOptions.Merge(dst, src)
-}
-func (m *FieldOptions) XXX_Size() int {
- return xxx_messageInfo_FieldOptions.Size(m)
-}
-func (m *FieldOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_FieldOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FieldOptions proto.InternalMessageInfo
-
-const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING
-const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL
-const Default_FieldOptions_Lazy bool = false
-const Default_FieldOptions_Deprecated bool = false
-const Default_FieldOptions_Weak bool = false
-
-func (m *FieldOptions) GetCtype() FieldOptions_CType {
- if m != nil && m.Ctype != nil {
- return *m.Ctype
- }
- return Default_FieldOptions_Ctype
-}
-
-func (m *FieldOptions) GetPacked() bool {
- if m != nil && m.Packed != nil {
- return *m.Packed
- }
- return false
-}
-
-func (m *FieldOptions) GetJstype() FieldOptions_JSType {
- if m != nil && m.Jstype != nil {
- return *m.Jstype
- }
- return Default_FieldOptions_Jstype
-}
-
-func (m *FieldOptions) GetLazy() bool {
- if m != nil && m.Lazy != nil {
- return *m.Lazy
- }
- return Default_FieldOptions_Lazy
-}
-
-func (m *FieldOptions) GetDeprecated() bool {
- if m != nil && m.Deprecated != nil {
- return *m.Deprecated
- }
- return Default_FieldOptions_Deprecated
-}
-
-func (m *FieldOptions) GetWeak() bool {
- if m != nil && m.Weak != nil {
- return *m.Weak
- }
- return Default_FieldOptions_Weak
-}
-
-func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
- if m != nil {
- return m.UninterpretedOption
- }
- return nil
-}
-
-type OneofOptions struct {
- // The parser stores options it doesn't recognize here. See above.
- UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *OneofOptions) Reset() { *m = OneofOptions{} }
-func (m *OneofOptions) String() string { return proto.CompactTextString(m) }
-func (*OneofOptions) ProtoMessage() {}
-func (*OneofOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{13}
-}
-
-var extRange_OneofOptions = []proto.ExtensionRange{
- {Start: 1000, End: 536870911},
-}
-
-func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_OneofOptions
-}
-func (m *OneofOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_OneofOptions.Unmarshal(m, b)
-}
-func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic)
-}
-func (dst *OneofOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_OneofOptions.Merge(dst, src)
-}
-func (m *OneofOptions) XXX_Size() int {
- return xxx_messageInfo_OneofOptions.Size(m)
-}
-func (m *OneofOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_OneofOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_OneofOptions proto.InternalMessageInfo
-
-func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
- if m != nil {
- return m.UninterpretedOption
- }
- return nil
-}
-
-type EnumOptions struct {
- // Set this option to true to allow mapping different tag names to the same
- // value.
- AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
- // Is this enum deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for the enum, or it will be completely ignored; in the very least, this
- // is a formalization for deprecating enums.
- Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
- // The parser stores options it doesn't recognize here. See above.
- UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EnumOptions) Reset() { *m = EnumOptions{} }
-func (m *EnumOptions) String() string { return proto.CompactTextString(m) }
-func (*EnumOptions) ProtoMessage() {}
-func (*EnumOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{14}
-}
-
-var extRange_EnumOptions = []proto.ExtensionRange{
- {Start: 1000, End: 536870911},
-}
-
-func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_EnumOptions
-}
-func (m *EnumOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_EnumOptions.Unmarshal(m, b)
-}
-func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic)
-}
-func (dst *EnumOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EnumOptions.Merge(dst, src)
-}
-func (m *EnumOptions) XXX_Size() int {
- return xxx_messageInfo_EnumOptions.Size(m)
-}
-func (m *EnumOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_EnumOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnumOptions proto.InternalMessageInfo
-
-const Default_EnumOptions_Deprecated bool = false
-
-func (m *EnumOptions) GetAllowAlias() bool {
- if m != nil && m.AllowAlias != nil {
- return *m.AllowAlias
- }
- return false
-}
-
-func (m *EnumOptions) GetDeprecated() bool {
- if m != nil && m.Deprecated != nil {
- return *m.Deprecated
- }
- return Default_EnumOptions_Deprecated
-}
-
-func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
- if m != nil {
- return m.UninterpretedOption
- }
- return nil
-}
-
-type EnumValueOptions struct {
- // Is this enum value deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for the enum value, or it will be completely ignored; in the very least,
- // this is a formalization for deprecating enum values.
- Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
- // The parser stores options it doesn't recognize here. See above.
- UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} }
-func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) }
-func (*EnumValueOptions) ProtoMessage() {}
-func (*EnumValueOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{15}
-}
-
-var extRange_EnumValueOptions = []proto.ExtensionRange{
- {Start: 1000, End: 536870911},
-}
-
-func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_EnumValueOptions
-}
-func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b)
-}
-func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic)
-}
-func (dst *EnumValueOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EnumValueOptions.Merge(dst, src)
-}
-func (m *EnumValueOptions) XXX_Size() int {
- return xxx_messageInfo_EnumValueOptions.Size(m)
-}
-func (m *EnumValueOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_EnumValueOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo
-
-const Default_EnumValueOptions_Deprecated bool = false
-
-func (m *EnumValueOptions) GetDeprecated() bool {
- if m != nil && m.Deprecated != nil {
- return *m.Deprecated
- }
- return Default_EnumValueOptions_Deprecated
-}
-
-func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
- if m != nil {
- return m.UninterpretedOption
- }
- return nil
-}
-
-type ServiceOptions struct {
- // Is this service deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for the service, or it will be completely ignored; in the very least,
- // this is a formalization for deprecating services.
- Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
- // The parser stores options it doesn't recognize here. See above.
- UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ServiceOptions) Reset() { *m = ServiceOptions{} }
-func (m *ServiceOptions) String() string { return proto.CompactTextString(m) }
-func (*ServiceOptions) ProtoMessage() {}
-func (*ServiceOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{16}
-}
-
-var extRange_ServiceOptions = []proto.ExtensionRange{
- {Start: 1000, End: 536870911},
-}
-
-func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_ServiceOptions
-}
-func (m *ServiceOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ServiceOptions.Unmarshal(m, b)
-}
-func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic)
-}
-func (dst *ServiceOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceOptions.Merge(dst, src)
-}
-func (m *ServiceOptions) XXX_Size() int {
- return xxx_messageInfo_ServiceOptions.Size(m)
-}
-func (m *ServiceOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo
-
-const Default_ServiceOptions_Deprecated bool = false
-
-func (m *ServiceOptions) GetDeprecated() bool {
- if m != nil && m.Deprecated != nil {
- return *m.Deprecated
- }
- return Default_ServiceOptions_Deprecated
-}
-
-func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption {
- if m != nil {
- return m.UninterpretedOption
- }
- return nil
-}
-
-type MethodOptions struct {
- // Is this method deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for the method, or it will be completely ignored; in the very least,
- // this is a formalization for deprecating methods.
- Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
- IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
- // The parser stores options it doesn't recognize here. See above.
- UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MethodOptions) Reset() { *m = MethodOptions{} }
-func (m *MethodOptions) String() string { return proto.CompactTextString(m) }
-func (*MethodOptions) ProtoMessage() {}
-func (*MethodOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{17}
-}
-
-var extRange_MethodOptions = []proto.ExtensionRange{
- {Start: 1000, End: 536870911},
-}
-
-func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_MethodOptions
-}
-func (m *MethodOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MethodOptions.Unmarshal(m, b)
-}
-func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic)
-}
-func (dst *MethodOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MethodOptions.Merge(dst, src)
-}
-func (m *MethodOptions) XXX_Size() int {
- return xxx_messageInfo_MethodOptions.Size(m)
-}
-func (m *MethodOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_MethodOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MethodOptions proto.InternalMessageInfo
-
-const Default_MethodOptions_Deprecated bool = false
-const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN
-
-func (m *MethodOptions) GetDeprecated() bool {
- if m != nil && m.Deprecated != nil {
- return *m.Deprecated
- }
- return Default_MethodOptions_Deprecated
-}
-
-func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel {
- if m != nil && m.IdempotencyLevel != nil {
- return *m.IdempotencyLevel
- }
- return Default_MethodOptions_IdempotencyLevel
-}
-
-func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
- if m != nil {
- return m.UninterpretedOption
- }
- return nil
-}
-
-// A message representing a option the parser does not recognize. This only
-// appears in options protos created by the compiler::Parser class.
-// DescriptorPool resolves these when building Descriptor objects. Therefore,
-// options protos in descriptor objects (e.g. returned by Descriptor::options(),
-// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
-// in them.
-type UninterpretedOption struct {
- Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
- // The value of the uninterpreted option, in whatever type the tokenizer
- // identified it as during parsing. Exactly one of these should be set.
- IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
- PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"`
- NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"`
- DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
- StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
- AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} }
-func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) }
-func (*UninterpretedOption) ProtoMessage() {}
-func (*UninterpretedOption) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{18}
-}
-func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b)
-}
-func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic)
-}
-func (dst *UninterpretedOption) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UninterpretedOption.Merge(dst, src)
-}
-func (m *UninterpretedOption) XXX_Size() int {
- return xxx_messageInfo_UninterpretedOption.Size(m)
-}
-func (m *UninterpretedOption) XXX_DiscardUnknown() {
- xxx_messageInfo_UninterpretedOption.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo
-
-func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart {
- if m != nil {
- return m.Name
- }
- return nil
-}
-
-func (m *UninterpretedOption) GetIdentifierValue() string {
- if m != nil && m.IdentifierValue != nil {
- return *m.IdentifierValue
- }
- return ""
-}
-
-func (m *UninterpretedOption) GetPositiveIntValue() uint64 {
- if m != nil && m.PositiveIntValue != nil {
- return *m.PositiveIntValue
- }
- return 0
-}
-
-func (m *UninterpretedOption) GetNegativeIntValue() int64 {
- if m != nil && m.NegativeIntValue != nil {
- return *m.NegativeIntValue
- }
- return 0
-}
-
-func (m *UninterpretedOption) GetDoubleValue() float64 {
- if m != nil && m.DoubleValue != nil {
- return *m.DoubleValue
- }
- return 0
-}
-
-func (m *UninterpretedOption) GetStringValue() []byte {
- if m != nil {
- return m.StringValue
- }
- return nil
-}
-
-func (m *UninterpretedOption) GetAggregateValue() string {
- if m != nil && m.AggregateValue != nil {
- return *m.AggregateValue
- }
- return ""
-}
-
-// The name of the uninterpreted option. Each string represents a segment in
-// a dot-separated name. is_extension is true iff a segment represents an
-// extension (denoted with parentheses in options specs in .proto files).
-// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
-// "foo.(bar.baz).qux".
-type UninterpretedOption_NamePart struct {
- NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
- IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} }
-func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) }
-func (*UninterpretedOption_NamePart) ProtoMessage() {}
-func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{18, 0}
-}
-func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b)
-}
-func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic)
-}
-func (dst *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UninterpretedOption_NamePart.Merge(dst, src)
-}
-func (m *UninterpretedOption_NamePart) XXX_Size() int {
- return xxx_messageInfo_UninterpretedOption_NamePart.Size(m)
-}
-func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() {
- xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo
-
-func (m *UninterpretedOption_NamePart) GetNamePart() string {
- if m != nil && m.NamePart != nil {
- return *m.NamePart
- }
- return ""
-}
-
-func (m *UninterpretedOption_NamePart) GetIsExtension() bool {
- if m != nil && m.IsExtension != nil {
- return *m.IsExtension
- }
- return false
-}
-
-// Encapsulates information about the original source file from which a
-// FileDescriptorProto was generated.
-type SourceCodeInfo struct {
- // A Location identifies a piece of source code in a .proto file which
- // corresponds to a particular definition. This information is intended
- // to be useful to IDEs, code indexers, documentation generators, and similar
- // tools.
- //
- // For example, say we have a file like:
- // message Foo {
- // optional string foo = 1;
- // }
- // Let's look at just the field definition:
- // optional string foo = 1;
- // ^ ^^ ^^ ^ ^^^
- // a bc de f ghi
- // We have the following locations:
- // span path represents
- // [a,i) [ 4, 0, 2, 0 ] The whole field definition.
- // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
- // [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
- // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
- // [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
- //
- // Notes:
- // - A location may refer to a repeated field itself (i.e. not to any
- // particular index within it). This is used whenever a set of elements are
- // logically enclosed in a single code segment. For example, an entire
- // extend block (possibly containing multiple extension definitions) will
- // have an outer location whose path refers to the "extensions" repeated
- // field without an index.
- // - Multiple locations may have the same path. This happens when a single
- // logical declaration is spread out across multiple places. The most
- // obvious example is the "extend" block again -- there may be multiple
- // extend blocks in the same scope, each of which will have the same path.
- // - A location's span is not always a subset of its parent's span. For
- // example, the "extendee" of an extension declaration appears at the
- // beginning of the "extend" block and is shared by all extensions within
- // the block.
- // - Just because a location's span is a subset of some other location's span
- // does not mean that it is a descendent. For example, a "group" defines
- // both a type and a field in a single declaration. Thus, the locations
- // corresponding to the type and field and their components will overlap.
- // - Code which tries to interpret locations should probably be designed to
- // ignore those that it doesn't understand, as more types of locations could
- // be recorded in the future.
- Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} }
-func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) }
-func (*SourceCodeInfo) ProtoMessage() {}
-func (*SourceCodeInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{19}
-}
-func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b)
-}
-func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic)
-}
-func (dst *SourceCodeInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SourceCodeInfo.Merge(dst, src)
-}
-func (m *SourceCodeInfo) XXX_Size() int {
- return xxx_messageInfo_SourceCodeInfo.Size(m)
-}
-func (m *SourceCodeInfo) XXX_DiscardUnknown() {
- xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo
-
-func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
- if m != nil {
- return m.Location
- }
- return nil
-}
-
-type SourceCodeInfo_Location struct {
- // Identifies which part of the FileDescriptorProto was defined at this
- // location.
- //
- // Each element is a field number or an index. They form a path from
- // the root FileDescriptorProto to the place where the definition. For
- // example, this path:
- // [ 4, 3, 2, 7, 1 ]
- // refers to:
- // file.message_type(3) // 4, 3
- // .field(7) // 2, 7
- // .name() // 1
- // This is because FileDescriptorProto.message_type has field number 4:
- // repeated DescriptorProto message_type = 4;
- // and DescriptorProto.field has field number 2:
- // repeated FieldDescriptorProto field = 2;
- // and FieldDescriptorProto.name has field number 1:
- // optional string name = 1;
- //
- // Thus, the above path gives the location of a field name. If we removed
- // the last element:
- // [ 4, 3, 2, 7 ]
- // this path refers to the whole field declaration (from the beginning
- // of the label to the terminating semicolon).
- Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
- // Always has exactly three or four elements: start line, start column,
- // end line (optional, otherwise assumed same as start line), end column.
- // These are packed into a single field for efficiency. Note that line
- // and column numbers are zero-based -- typically you will want to add
- // 1 to each before displaying to a user.
- Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"`
- // If this SourceCodeInfo represents a complete declaration, these are any
- // comments appearing before and after the declaration which appear to be
- // attached to the declaration.
- //
- // A series of line comments appearing on consecutive lines, with no other
- // tokens appearing on those lines, will be treated as a single comment.
- //
- // leading_detached_comments will keep paragraphs of comments that appear
- // before (but not connected to) the current element. Each paragraph,
- // separated by empty lines, will be one comment element in the repeated
- // field.
- //
- // Only the comment content is provided; comment markers (e.g. //) are
- // stripped out. For block comments, leading whitespace and an asterisk
- // will be stripped from the beginning of each line other than the first.
- // Newlines are included in the output.
- //
- // Examples:
- //
- // optional int32 foo = 1; // Comment attached to foo.
- // // Comment attached to bar.
- // optional int32 bar = 2;
- //
- // optional string baz = 3;
- // // Comment attached to baz.
- // // Another line attached to baz.
- //
- // // Comment attached to qux.
- // //
- // // Another line attached to qux.
- // optional double qux = 4;
- //
- // // Detached comment for corge. This is not leading or trailing comments
- // // to qux or corge because there are blank lines separating it from
- // // both.
- //
- // // Detached comment for corge paragraph 2.
- //
- // optional string corge = 5;
- // /* Block comment attached
- // * to corge. Leading asterisks
- // * will be removed. */
- // /* Block comment attached to
- // * grault. */
- // optional int32 grault = 6;
- //
- // // ignored detached comments.
- LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
- TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
- LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} }
-func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) }
-func (*SourceCodeInfo_Location) ProtoMessage() {}
-func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{19, 0}
-}
-func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b)
-}
-func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic)
-}
-func (dst *SourceCodeInfo_Location) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SourceCodeInfo_Location.Merge(dst, src)
-}
-func (m *SourceCodeInfo_Location) XXX_Size() int {
- return xxx_messageInfo_SourceCodeInfo_Location.Size(m)
-}
-func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() {
- xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo
-
-func (m *SourceCodeInfo_Location) GetPath() []int32 {
- if m != nil {
- return m.Path
- }
- return nil
-}
-
-func (m *SourceCodeInfo_Location) GetSpan() []int32 {
- if m != nil {
- return m.Span
- }
- return nil
-}
-
-func (m *SourceCodeInfo_Location) GetLeadingComments() string {
- if m != nil && m.LeadingComments != nil {
- return *m.LeadingComments
- }
- return ""
-}
-
-func (m *SourceCodeInfo_Location) GetTrailingComments() string {
- if m != nil && m.TrailingComments != nil {
- return *m.TrailingComments
- }
- return ""
-}
-
-func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string {
- if m != nil {
- return m.LeadingDetachedComments
- }
- return nil
-}
-
-// Describes the relationship between generated code and its original source
-// file. A GeneratedCodeInfo message is associated with only one generated
-// source file, but may contain references to different source .proto files.
-type GeneratedCodeInfo struct {
- // An Annotation connects some span of text in generated code to an element
- // of its generating .proto file.
- Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} }
-func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) }
-func (*GeneratedCodeInfo) ProtoMessage() {}
-func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{20}
-}
-func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b)
-}
-func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic)
-}
-func (dst *GeneratedCodeInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GeneratedCodeInfo.Merge(dst, src)
-}
-func (m *GeneratedCodeInfo) XXX_Size() int {
- return xxx_messageInfo_GeneratedCodeInfo.Size(m)
-}
-func (m *GeneratedCodeInfo) XXX_DiscardUnknown() {
- xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo
-
-func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
- if m != nil {
- return m.Annotation
- }
- return nil
-}
-
-type GeneratedCodeInfo_Annotation struct {
- // Identifies the element in the original source .proto file. This field
- // is formatted the same as SourceCodeInfo.Location.path.
- Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
- // Identifies the filesystem path to the original source .proto.
- SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"`
- // Identifies the starting offset in bytes in the generated code
- // that relates to the identified object.
- Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"`
- // Identifies the ending offset in bytes in the generated code that
- // relates to the identified offset. The end offset should be one past
- // the last relevant byte (so the length of the text = end - begin).
- End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} }
-func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) }
-func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
-func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_9588782fb9cbecd6, []int{20, 0}
-}
-func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b)
-}
-func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic)
-}
-func (dst *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(dst, src)
-}
-func (m *GeneratedCodeInfo_Annotation) XXX_Size() int {
- return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m)
-}
-func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() {
- xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo
-
-func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 {
- if m != nil {
- return m.Path
- }
- return nil
-}
-
-func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string {
- if m != nil && m.SourceFile != nil {
- return *m.SourceFile
- }
- return ""
-}
-
-func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 {
- if m != nil && m.Begin != nil {
- return *m.Begin
- }
- return 0
-}
-
-func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 {
- if m != nil && m.End != nil {
- return *m.End
- }
- return 0
-}
-
-func init() {
- proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet")
- proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto")
- proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto")
- proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange")
- proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange")
- proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions")
- proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto")
- proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto")
- proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto")
- proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange")
- proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto")
- proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto")
- proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto")
- proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions")
- proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions")
- proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions")
- proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions")
- proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions")
- proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions")
- proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions")
- proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions")
- proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption")
- proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart")
- proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo")
- proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location")
- proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo")
- proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation")
- proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value)
- proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value)
- proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value)
- proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
- proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
- proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value)
-}
-
-func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_descriptor_9588782fb9cbecd6) }
-
-var fileDescriptor_descriptor_9588782fb9cbecd6 = []byte{
- // 2487 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8,
- 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0xe5, 0xf1, 0xd8, 0x9b, 0x30, 0xde, 0x8f, 0x38, 0xda, 0x8f,
- 0x38, 0x49, 0xab, 0x2c, 0x9c, 0xc4, 0xc9, 0x3a, 0xc5, 0xb6, 0xb2, 0xc4, 0x78, 0x95, 0xca, 0x92,
- 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89,
- 0x83, 0x1e, 0x02, 0xf4, 0xd4, 0xff, 0xa0, 0x28, 0x8a, 0x1e, 0x7a, 0x59, 0xa0, 0xd7, 0x02, 0x05,
- 0xda, 0x7b, 0xaf, 0x05, 0x7a, 0xef, 0xa1, 0x40, 0x0b, 0xb4, 0x7f, 0x42, 0x8f, 0xc5, 0xcc, 0x90,
- 0x14, 0xf5, 0x95, 0x78, 0x17, 0x48, 0xf6, 0x64, 0xcf, 0xef, 0xfd, 0xde, 0xe3, 0x9b, 0x37, 0x6f,
- 0xde, 0xbc, 0x19, 0x01, 0xd2, 0xa9, 0xa7, 0xb9, 0x86, 0xe3, 0xdb, 0x6e, 0xc5, 0x71, 0x6d, 0xdf,
- 0xc6, 0x6b, 0x03, 0xdb, 0x1e, 0x98, 0x54, 0x8c, 0x4e, 0xc6, 0xfd, 0xf2, 0x11, 0xac, 0xdf, 0x33,
- 0x4c, 0x5a, 0x8f, 0x88, 0x5d, 0xea, 0xe3, 0x3b, 0x90, 0xee, 0x1b, 0x26, 0x95, 0x12, 0xdb, 0xa9,
- 0x9d, 0xc2, 0xee, 0x87, 0x95, 0x19, 0xa5, 0xca, 0xb4, 0x46, 0x87, 0xc1, 0x0a, 0xd7, 0x28, 0xff,
- 0x3b, 0x0d, 0x1b, 0x0b, 0xa4, 0x18, 0x43, 0xda, 0x22, 0x23, 0x66, 0x31, 0xb1, 0x93, 0x57, 0xf8,
- 0xff, 0x58, 0x82, 0x15, 0x87, 0x68, 0x8f, 0xc9, 0x80, 0x4a, 0x49, 0x0e, 0x87, 0x43, 0xfc, 0x3e,
- 0x80, 0x4e, 0x1d, 0x6a, 0xe9, 0xd4, 0xd2, 0x4e, 0xa5, 0xd4, 0x76, 0x6a, 0x27, 0xaf, 0xc4, 0x10,
- 0x7c, 0x0d, 0xd6, 0x9d, 0xf1, 0x89, 0x69, 0x68, 0x6a, 0x8c, 0x06, 0xdb, 0xa9, 0x9d, 0x8c, 0x82,
- 0x84, 0xa0, 0x3e, 0x21, 0x5f, 0x86, 0xb5, 0xa7, 0x94, 0x3c, 0x8e, 0x53, 0x0b, 0x9c, 0x5a, 0x62,
- 0x70, 0x8c, 0x58, 0x83, 0xe2, 0x88, 0x7a, 0x1e, 0x19, 0x50, 0xd5, 0x3f, 0x75, 0xa8, 0x94, 0xe6,
- 0xb3, 0xdf, 0x9e, 0x9b, 0xfd, 0xec, 0xcc, 0x0b, 0x81, 0x56, 0xef, 0xd4, 0xa1, 0xb8, 0x0a, 0x79,
- 0x6a, 0x8d, 0x47, 0xc2, 0x42, 0x66, 0x49, 0xfc, 0x64, 0x6b, 0x3c, 0x9a, 0xb5, 0x92, 0x63, 0x6a,
- 0x81, 0x89, 0x15, 0x8f, 0xba, 0x4f, 0x0c, 0x8d, 0x4a, 0x59, 0x6e, 0xe0, 0xf2, 0x9c, 0x81, 0xae,
- 0x90, 0xcf, 0xda, 0x08, 0xf5, 0x70, 0x0d, 0xf2, 0xf4, 0x99, 0x4f, 0x2d, 0xcf, 0xb0, 0x2d, 0x69,
- 0x85, 0x1b, 0xf9, 0x68, 0xc1, 0x2a, 0x52, 0x53, 0x9f, 0x35, 0x31, 0xd1, 0xc3, 0x7b, 0xb0, 0x62,
- 0x3b, 0xbe, 0x61, 0x5b, 0x9e, 0x94, 0xdb, 0x4e, 0xec, 0x14, 0x76, 0xdf, 0x5d, 0x98, 0x08, 0x6d,
- 0xc1, 0x51, 0x42, 0x32, 0x6e, 0x00, 0xf2, 0xec, 0xb1, 0xab, 0x51, 0x55, 0xb3, 0x75, 0xaa, 0x1a,
- 0x56, 0xdf, 0x96, 0xf2, 0xdc, 0xc0, 0xc5, 0xf9, 0x89, 0x70, 0x62, 0xcd, 0xd6, 0x69, 0xc3, 0xea,
- 0xdb, 0x4a, 0xc9, 0x9b, 0x1a, 0xe3, 0x73, 0x90, 0xf5, 0x4e, 0x2d, 0x9f, 0x3c, 0x93, 0x8a, 0x3c,
- 0x43, 0x82, 0x51, 0xf9, 0xcf, 0x59, 0x58, 0x3b, 0x4b, 0x8a, 0xdd, 0x85, 0x4c, 0x9f, 0xcd, 0x52,
- 0x4a, 0x7e, 0x93, 0x18, 0x08, 0x9d, 0xe9, 0x20, 0x66, 0xbf, 0x65, 0x10, 0xab, 0x50, 0xb0, 0xa8,
- 0xe7, 0x53, 0x5d, 0x64, 0x44, 0xea, 0x8c, 0x39, 0x05, 0x42, 0x69, 0x3e, 0xa5, 0xd2, 0xdf, 0x2a,
- 0xa5, 0x1e, 0xc0, 0x5a, 0xe4, 0x92, 0xea, 0x12, 0x6b, 0x10, 0xe6, 0xe6, 0xf5, 0x57, 0x79, 0x52,
- 0x91, 0x43, 0x3d, 0x85, 0xa9, 0x29, 0x25, 0x3a, 0x35, 0xc6, 0x75, 0x00, 0xdb, 0xa2, 0x76, 0x5f,
- 0xd5, 0xa9, 0x66, 0x4a, 0xb9, 0x25, 0x51, 0x6a, 0x33, 0xca, 0x5c, 0x94, 0x6c, 0x81, 0x6a, 0x26,
- 0xfe, 0x74, 0x92, 0x6a, 0x2b, 0x4b, 0x32, 0xe5, 0x48, 0x6c, 0xb2, 0xb9, 0x6c, 0x3b, 0x86, 0x92,
- 0x4b, 0x59, 0xde, 0x53, 0x3d, 0x98, 0x59, 0x9e, 0x3b, 0x51, 0x79, 0xe5, 0xcc, 0x94, 0x40, 0x4d,
- 0x4c, 0x6c, 0xd5, 0x8d, 0x0f, 0xf1, 0x07, 0x10, 0x01, 0x2a, 0x4f, 0x2b, 0xe0, 0x55, 0xa8, 0x18,
- 0x82, 0x2d, 0x32, 0xa2, 0x5b, 0xcf, 0xa1, 0x34, 0x1d, 0x1e, 0xbc, 0x09, 0x19, 0xcf, 0x27, 0xae,
- 0xcf, 0xb3, 0x30, 0xa3, 0x88, 0x01, 0x46, 0x90, 0xa2, 0x96, 0xce, 0xab, 0x5c, 0x46, 0x61, 0xff,
- 0xe2, 0x1f, 0x4d, 0x26, 0x9c, 0xe2, 0x13, 0xfe, 0x78, 0x7e, 0x45, 0xa7, 0x2c, 0xcf, 0xce, 0x7b,
- 0xeb, 0x36, 0xac, 0x4e, 0x4d, 0xe0, 0xac, 0x9f, 0x2e, 0xff, 0x02, 0xde, 0x5e, 0x68, 0x1a, 0x3f,
- 0x80, 0xcd, 0xb1, 0x65, 0x58, 0x3e, 0x75, 0x1d, 0x97, 0xb2, 0x8c, 0x15, 0x9f, 0x92, 0xfe, 0xb3,
- 0xb2, 0x24, 0xe7, 0x8e, 0xe3, 0x6c, 0x61, 0x45, 0xd9, 0x18, 0xcf, 0x83, 0x57, 0xf3, 0xb9, 0xff,
- 0xae, 0xa0, 0x17, 0x2f, 0x5e, 0xbc, 0x48, 0x96, 0x7f, 0x9d, 0x85, 0xcd, 0x45, 0x7b, 0x66, 0xe1,
- 0xf6, 0x3d, 0x07, 0x59, 0x6b, 0x3c, 0x3a, 0xa1, 0x2e, 0x0f, 0x52, 0x46, 0x09, 0x46, 0xb8, 0x0a,
- 0x19, 0x93, 0x9c, 0x50, 0x53, 0x4a, 0x6f, 0x27, 0x76, 0x4a, 0xbb, 0xd7, 0xce, 0xb4, 0x2b, 0x2b,
- 0x4d, 0xa6, 0xa2, 0x08, 0x4d, 0xfc, 0x19, 0xa4, 0x83, 0x12, 0xcd, 0x2c, 0x5c, 0x3d, 0x9b, 0x05,
- 0xb6, 0x97, 0x14, 0xae, 0x87, 0xdf, 0x81, 0x3c, 0xfb, 0x2b, 0x72, 0x23, 0xcb, 0x7d, 0xce, 0x31,
- 0x80, 0xe5, 0x05, 0xde, 0x82, 0x1c, 0xdf, 0x26, 0x3a, 0x0d, 0x8f, 0xb6, 0x68, 0xcc, 0x12, 0x4b,
- 0xa7, 0x7d, 0x32, 0x36, 0x7d, 0xf5, 0x09, 0x31, 0xc7, 0x94, 0x27, 0x7c, 0x5e, 0x29, 0x06, 0xe0,
- 0x4f, 0x19, 0x86, 0x2f, 0x42, 0x41, 0xec, 0x2a, 0xc3, 0xd2, 0xe9, 0x33, 0x5e, 0x3d, 0x33, 0x8a,
- 0xd8, 0x68, 0x0d, 0x86, 0xb0, 0xcf, 0x3f, 0xf2, 0x6c, 0x2b, 0x4c, 0x4d, 0xfe, 0x09, 0x06, 0xf0,
- 0xcf, 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x9e, 0xde, 0x6c, 0x4e, 0x95, 0xff, 0x94, 0x84, 0x34,
- 0xaf, 0x17, 0x6b, 0x50, 0xe8, 0x3d, 0xec, 0xc8, 0x6a, 0xbd, 0x7d, 0x7c, 0xd0, 0x94, 0x51, 0x02,
- 0x97, 0x00, 0x38, 0x70, 0xaf, 0xd9, 0xae, 0xf6, 0x50, 0x32, 0x1a, 0x37, 0x5a, 0xbd, 0xbd, 0x9b,
- 0x28, 0x15, 0x29, 0x1c, 0x0b, 0x20, 0x1d, 0x27, 0xdc, 0xd8, 0x45, 0x19, 0x8c, 0xa0, 0x28, 0x0c,
- 0x34, 0x1e, 0xc8, 0xf5, 0xbd, 0x9b, 0x28, 0x3b, 0x8d, 0xdc, 0xd8, 0x45, 0x2b, 0x78, 0x15, 0xf2,
- 0x1c, 0x39, 0x68, 0xb7, 0x9b, 0x28, 0x17, 0xd9, 0xec, 0xf6, 0x94, 0x46, 0xeb, 0x10, 0xe5, 0x23,
- 0x9b, 0x87, 0x4a, 0xfb, 0xb8, 0x83, 0x20, 0xb2, 0x70, 0x24, 0x77, 0xbb, 0xd5, 0x43, 0x19, 0x15,
- 0x22, 0xc6, 0xc1, 0xc3, 0x9e, 0xdc, 0x45, 0xc5, 0x29, 0xb7, 0x6e, 0xec, 0xa2, 0xd5, 0xe8, 0x13,
- 0x72, 0xeb, 0xf8, 0x08, 0x95, 0xf0, 0x3a, 0xac, 0x8a, 0x4f, 0x84, 0x4e, 0xac, 0xcd, 0x40, 0x7b,
- 0x37, 0x11, 0x9a, 0x38, 0x22, 0xac, 0xac, 0x4f, 0x01, 0x7b, 0x37, 0x11, 0x2e, 0xd7, 0x20, 0xc3,
- 0xb3, 0x0b, 0x63, 0x28, 0x35, 0xab, 0x07, 0x72, 0x53, 0x6d, 0x77, 0x7a, 0x8d, 0x76, 0xab, 0xda,
- 0x44, 0x89, 0x09, 0xa6, 0xc8, 0x3f, 0x39, 0x6e, 0x28, 0x72, 0x1d, 0x25, 0xe3, 0x58, 0x47, 0xae,
- 0xf6, 0xe4, 0x3a, 0x4a, 0x95, 0x35, 0xd8, 0x5c, 0x54, 0x27, 0x17, 0xee, 0x8c, 0xd8, 0x12, 0x27,
- 0x97, 0x2c, 0x31, 0xb7, 0x35, 0xb7, 0xc4, 0xff, 0x4a, 0xc2, 0xc6, 0x82, 0xb3, 0x62, 0xe1, 0x47,
- 0x7e, 0x08, 0x19, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb2, 0xf0, 0xd0, 0xe1, 0x09, 0x3b, 0x77, 0x82,
- 0x72, 0xbd, 0x78, 0x07, 0x91, 0x5a, 0xd2, 0x41, 0x30, 0x13, 0x73, 0x35, 0xfd, 0xe7, 0x73, 0x35,
- 0x5d, 0x1c, 0x7b, 0x7b, 0x67, 0x39, 0xf6, 0x38, 0xf6, 0xcd, 0x6a, 0x7b, 0x66, 0x41, 0x6d, 0xbf,
- 0x0b, 0xeb, 0x73, 0x86, 0xce, 0x5c, 0x63, 0x7f, 0x99, 0x00, 0x69, 0x59, 0x70, 0x5e, 0x51, 0xe9,
- 0x92, 0x53, 0x95, 0xee, 0xee, 0x6c, 0x04, 0x2f, 0x2d, 0x5f, 0x84, 0xb9, 0xb5, 0xfe, 0x3a, 0x01,
- 0xe7, 0x16, 0x77, 0x8a, 0x0b, 0x7d, 0xf8, 0x0c, 0xb2, 0x23, 0xea, 0x0f, 0xed, 0xb0, 0x5b, 0xfa,
- 0x78, 0xc1, 0x19, 0xcc, 0xc4, 0xb3, 0x8b, 0x1d, 0x68, 0xc5, 0x0f, 0xf1, 0xd4, 0xb2, 0x76, 0x4f,
- 0x78, 0x33, 0xe7, 0xe9, 0xaf, 0x92, 0xf0, 0xf6, 0x42, 0xe3, 0x0b, 0x1d, 0x7d, 0x0f, 0xc0, 0xb0,
- 0x9c, 0xb1, 0x2f, 0x3a, 0x22, 0x51, 0x60, 0xf3, 0x1c, 0xe1, 0xc5, 0x8b, 0x15, 0xcf, 0xb1, 0x1f,
- 0xc9, 0x53, 0x5c, 0x0e, 0x02, 0xe2, 0x84, 0x3b, 0x13, 0x47, 0xd3, 0xdc, 0xd1, 0xf7, 0x97, 0xcc,
- 0x74, 0x2e, 0x31, 0x3f, 0x01, 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x46, 0x86,
- 0x35, 0xe0, 0x27, 0x48, 0x6e, 0x3f, 0xd3, 0x27, 0xa6, 0x47, 0x95, 0x35, 0x21, 0xee, 0x86, 0x52,
- 0xa6, 0xc1, 0x13, 0xc8, 0x8d, 0x69, 0x64, 0xa7, 0x34, 0x84, 0x38, 0xd2, 0x28, 0xff, 0x31, 0x07,
- 0x85, 0x58, 0x5f, 0x8d, 0x2f, 0x41, 0xf1, 0x11, 0x79, 0x42, 0xd4, 0xf0, 0xae, 0x24, 0x22, 0x51,
- 0x60, 0x58, 0x27, 0xb8, 0x2f, 0x7d, 0x02, 0x9b, 0x9c, 0x62, 0x8f, 0x7d, 0xea, 0xaa, 0x9a, 0x49,
- 0x3c, 0x8f, 0x07, 0x2d, 0xc7, 0xa9, 0x98, 0xc9, 0xda, 0x4c, 0x54, 0x0b, 0x25, 0xf8, 0x16, 0x6c,
- 0x70, 0x8d, 0xd1, 0xd8, 0xf4, 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xbd, 0x79, 0xfc, 0x24, 0x89, 0x3c,
- 0x5b, 0x67, 0x8c, 0xa3, 0x80, 0xc0, 0x3c, 0xf2, 0x70, 0x1d, 0xde, 0xe3, 0x6a, 0x03, 0x6a, 0x51,
- 0x97, 0xf8, 0x54, 0xa5, 0x5f, 0x8d, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x21, 0xf1, 0x86, 0xd2,
- 0x26, 0x33, 0x70, 0x90, 0x94, 0x12, 0xca, 0x05, 0x46, 0x3c, 0x0c, 0x78, 0x32, 0xa7, 0x55, 0x2d,
- 0xfd, 0x73, 0xe2, 0x0d, 0xf1, 0x3e, 0x9c, 0xe3, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x81, 0xaa, 0x0d,
- 0xa9, 0xf6, 0x58, 0x1d, 0xfb, 0xfd, 0x3b, 0xd2, 0x3b, 0xf1, 0xef, 0x73, 0x0f, 0xbb, 0x9c, 0x53,
- 0x63, 0x94, 0x63, 0xbf, 0x7f, 0x07, 0x77, 0xa1, 0xc8, 0x16, 0x63, 0x64, 0x3c, 0xa7, 0x6a, 0xdf,
- 0x76, 0xf9, 0xd1, 0x58, 0x5a, 0x50, 0x9a, 0x62, 0x11, 0xac, 0xb4, 0x03, 0x85, 0x23, 0x5b, 0xa7,
- 0xfb, 0x99, 0x6e, 0x47, 0x96, 0xeb, 0x4a, 0x21, 0xb4, 0x72, 0xcf, 0x76, 0x59, 0x42, 0x0d, 0xec,
- 0x28, 0xc0, 0x05, 0x91, 0x50, 0x03, 0x3b, 0x0c, 0xef, 0x2d, 0xd8, 0xd0, 0x34, 0x31, 0x67, 0x43,
- 0x53, 0x83, 0x3b, 0x96, 0x27, 0xa1, 0xa9, 0x60, 0x69, 0xda, 0xa1, 0x20, 0x04, 0x39, 0xee, 0xe1,
- 0x4f, 0xe1, 0xed, 0x49, 0xb0, 0xe2, 0x8a, 0xeb, 0x73, 0xb3, 0x9c, 0x55, 0xbd, 0x05, 0x1b, 0xce,
- 0xe9, 0xbc, 0x22, 0x9e, 0xfa, 0xa2, 0x73, 0x3a, 0xab, 0x76, 0x1b, 0x36, 0x9d, 0xa1, 0x33, 0xaf,
- 0x77, 0x35, 0xae, 0x87, 0x9d, 0xa1, 0x33, 0xab, 0xf8, 0x11, 0xbf, 0x70, 0xbb, 0x54, 0x23, 0x3e,
- 0xd5, 0xa5, 0xf3, 0x71, 0x7a, 0x4c, 0x80, 0xaf, 0x03, 0xd2, 0x34, 0x95, 0x5a, 0xe4, 0xc4, 0xa4,
- 0x2a, 0x71, 0xa9, 0x45, 0x3c, 0xe9, 0x62, 0x9c, 0x5c, 0xd2, 0x34, 0x99, 0x4b, 0xab, 0x5c, 0x88,
- 0xaf, 0xc2, 0xba, 0x7d, 0xf2, 0x48, 0x13, 0x29, 0xa9, 0x3a, 0x2e, 0xed, 0x1b, 0xcf, 0xa4, 0x0f,
- 0x79, 0x7c, 0xd7, 0x98, 0x80, 0x27, 0x64, 0x87, 0xc3, 0xf8, 0x0a, 0x20, 0xcd, 0x1b, 0x12, 0xd7,
- 0xe1, 0x35, 0xd9, 0x73, 0x88, 0x46, 0xa5, 0x8f, 0x04, 0x55, 0xe0, 0xad, 0x10, 0x66, 0x5b, 0xc2,
- 0x7b, 0x6a, 0xf4, 0xfd, 0xd0, 0xe2, 0x65, 0xb1, 0x25, 0x38, 0x16, 0x58, 0xdb, 0x01, 0xc4, 0x42,
- 0x31, 0xf5, 0xe1, 0x1d, 0x4e, 0x2b, 0x39, 0x43, 0x27, 0xfe, 0xdd, 0x0f, 0x60, 0x95, 0x31, 0x27,
- 0x1f, 0xbd, 0x22, 0x1a, 0x32, 0x67, 0x18, 0xfb, 0xe2, 0x6b, 0xeb, 0x8d, 0xcb, 0xfb, 0x50, 0x8c,
- 0xe7, 0x27, 0xce, 0x83, 0xc8, 0x50, 0x94, 0x60, 0xcd, 0x4a, 0xad, 0x5d, 0x67, 0x6d, 0xc6, 0x97,
- 0x32, 0x4a, 0xb2, 0x76, 0xa7, 0xd9, 0xe8, 0xc9, 0xaa, 0x72, 0xdc, 0xea, 0x35, 0x8e, 0x64, 0x94,
- 0x8a, 0xf7, 0xd5, 0x7f, 0x4d, 0x42, 0x69, 0xfa, 0x8a, 0x84, 0x7f, 0x00, 0xe7, 0xc3, 0xf7, 0x0c,
- 0x8f, 0xfa, 0xea, 0x53, 0xc3, 0xe5, 0x5b, 0x66, 0x44, 0xc4, 0xf1, 0x15, 0x2d, 0xda, 0x66, 0xc0,
- 0xea, 0x52, 0xff, 0x0b, 0xc3, 0x65, 0x1b, 0x62, 0x44, 0x7c, 0xdc, 0x84, 0x8b, 0x96, 0xad, 0x7a,
- 0x3e, 0xb1, 0x74, 0xe2, 0xea, 0xea, 0xe4, 0x25, 0x49, 0x25, 0x9a, 0x46, 0x3d, 0xcf, 0x16, 0x47,
- 0x55, 0x64, 0xe5, 0x5d, 0xcb, 0xee, 0x06, 0xe4, 0x49, 0x0d, 0xaf, 0x06, 0xd4, 0x99, 0x04, 0x4b,
- 0x2d, 0x4b, 0xb0, 0x77, 0x20, 0x3f, 0x22, 0x8e, 0x4a, 0x2d, 0xdf, 0x3d, 0xe5, 0x8d, 0x71, 0x4e,
- 0xc9, 0x8d, 0x88, 0x23, 0xb3, 0xf1, 0x9b, 0xb9, 0x9f, 0xfc, 0x23, 0x05, 0xc5, 0x78, 0x73, 0xcc,
- 0xee, 0x1a, 0x1a, 0x3f, 0x47, 0x12, 0xbc, 0xd2, 0x7c, 0xf0, 0xd2, 0x56, 0xba, 0x52, 0x63, 0x07,
- 0xcc, 0x7e, 0x56, 0xb4, 0xac, 0x8a, 0xd0, 0x64, 0x87, 0x3b, 0xab, 0x2d, 0x54, 0xb4, 0x08, 0x39,
- 0x25, 0x18, 0xe1, 0x43, 0xc8, 0x3e, 0xf2, 0xb8, 0xed, 0x2c, 0xb7, 0xfd, 0xe1, 0xcb, 0x6d, 0xdf,
- 0xef, 0x72, 0xe3, 0xf9, 0xfb, 0x5d, 0xb5, 0xd5, 0x56, 0x8e, 0xaa, 0x4d, 0x25, 0x50, 0xc7, 0x17,
- 0x20, 0x6d, 0x92, 0xe7, 0xa7, 0xd3, 0x47, 0x11, 0x87, 0xce, 0x1a, 0xf8, 0x0b, 0x90, 0x7e, 0x4a,
- 0xc9, 0xe3, 0xe9, 0x03, 0x80, 0x43, 0xaf, 0x31, 0xf5, 0xaf, 0x43, 0x86, 0xc7, 0x0b, 0x03, 0x04,
- 0x11, 0x43, 0x6f, 0xe1, 0x1c, 0xa4, 0x6b, 0x6d, 0x85, 0xa5, 0x3f, 0x82, 0xa2, 0x40, 0xd5, 0x4e,
- 0x43, 0xae, 0xc9, 0x28, 0x59, 0xbe, 0x05, 0x59, 0x11, 0x04, 0xb6, 0x35, 0xa2, 0x30, 0xa0, 0xb7,
- 0x82, 0x61, 0x60, 0x23, 0x11, 0x4a, 0x8f, 0x8f, 0x0e, 0x64, 0x05, 0x25, 0xe3, 0xcb, 0xeb, 0x41,
- 0x31, 0xde, 0x17, 0xbf, 0x99, 0x9c, 0xfa, 0x4b, 0x02, 0x0a, 0xb1, 0x3e, 0x97, 0x35, 0x28, 0xc4,
- 0x34, 0xed, 0xa7, 0x2a, 0x31, 0x0d, 0xe2, 0x05, 0x49, 0x01, 0x1c, 0xaa, 0x32, 0xe4, 0xac, 0x8b,
- 0xf6, 0x46, 0x9c, 0xff, 0x5d, 0x02, 0xd0, 0x6c, 0x8b, 0x39, 0xe3, 0x60, 0xe2, 0x3b, 0x75, 0xf0,
- 0xb7, 0x09, 0x28, 0x4d, 0xf7, 0x95, 0x33, 0xee, 0x5d, 0xfa, 0x4e, 0xdd, 0xfb, 0x67, 0x12, 0x56,
- 0xa7, 0xba, 0xc9, 0xb3, 0x7a, 0xf7, 0x15, 0xac, 0x1b, 0x3a, 0x1d, 0x39, 0xb6, 0x4f, 0x2d, 0xed,
- 0x54, 0x35, 0xe9, 0x13, 0x6a, 0x4a, 0x65, 0x5e, 0x28, 0xae, 0xbf, 0xbc, 0x5f, 0xad, 0x34, 0x26,
- 0x7a, 0x4d, 0xa6, 0xb6, 0xbf, 0xd1, 0xa8, 0xcb, 0x47, 0x9d, 0x76, 0x4f, 0x6e, 0xd5, 0x1e, 0xaa,
- 0xc7, 0xad, 0x1f, 0xb7, 0xda, 0x5f, 0xb4, 0x14, 0x64, 0xcc, 0xd0, 0x5e, 0xe3, 0x56, 0xef, 0x00,
- 0x9a, 0x75, 0x0a, 0x9f, 0x87, 0x45, 0x6e, 0xa1, 0xb7, 0xf0, 0x06, 0xac, 0xb5, 0xda, 0x6a, 0xb7,
- 0x51, 0x97, 0x55, 0xf9, 0xde, 0x3d, 0xb9, 0xd6, 0xeb, 0x8a, 0x17, 0x88, 0x88, 0xdd, 0x9b, 0xde,
- 0xd4, 0xbf, 0x49, 0xc1, 0xc6, 0x02, 0x4f, 0x70, 0x35, 0xb8, 0x3b, 0x88, 0xeb, 0xcc, 0xf7, 0xcf,
- 0xe2, 0x7d, 0x85, 0x1d, 0xf9, 0x1d, 0xe2, 0xfa, 0xc1, 0x55, 0xe3, 0x0a, 0xb0, 0x28, 0x59, 0xbe,
- 0xd1, 0x37, 0xa8, 0x1b, 0x3c, 0xd8, 0x88, 0x0b, 0xc5, 0xda, 0x04, 0x17, 0x6f, 0x36, 0xdf, 0x03,
- 0xec, 0xd8, 0x9e, 0xe1, 0x1b, 0x4f, 0xa8, 0x6a, 0x58, 0xe1, 0xeb, 0x0e, 0xbb, 0x60, 0xa4, 0x15,
- 0x14, 0x4a, 0x1a, 0x96, 0x1f, 0xb1, 0x2d, 0x3a, 0x20, 0x33, 0x6c, 0x56, 0xc0, 0x53, 0x0a, 0x0a,
- 0x25, 0x11, 0xfb, 0x12, 0x14, 0x75, 0x7b, 0xcc, 0xba, 0x2e, 0xc1, 0x63, 0xe7, 0x45, 0x42, 0x29,
- 0x08, 0x2c, 0xa2, 0x04, 0xfd, 0xf4, 0xe4, 0x59, 0xa9, 0xa8, 0x14, 0x04, 0x26, 0x28, 0x97, 0x61,
- 0x8d, 0x0c, 0x06, 0x2e, 0x33, 0x1e, 0x1a, 0x12, 0x37, 0x84, 0x52, 0x04, 0x73, 0xe2, 0xd6, 0x7d,
- 0xc8, 0x85, 0x71, 0x60, 0x47, 0x32, 0x8b, 0x84, 0xea, 0x88, 0x6b, 0x6f, 0x72, 0x27, 0xaf, 0xe4,
- 0xac, 0x50, 0x78, 0x09, 0x8a, 0x86, 0xa7, 0x4e, 0x5e, 0xc9, 0x93, 0xdb, 0xc9, 0x9d, 0x9c, 0x52,
- 0x30, 0xbc, 0xe8, 0x85, 0xb1, 0xfc, 0x75, 0x12, 0x4a, 0xd3, 0xaf, 0xfc, 0xb8, 0x0e, 0x39, 0xd3,
- 0xd6, 0x08, 0x4f, 0x2d, 0xf1, 0x13, 0xd3, 0xce, 0x2b, 0x7e, 0x18, 0xa8, 0x34, 0x03, 0xbe, 0x12,
- 0x69, 0x6e, 0xfd, 0x2d, 0x01, 0xb9, 0x10, 0xc6, 0xe7, 0x20, 0xed, 0x10, 0x7f, 0xc8, 0xcd, 0x65,
- 0x0e, 0x92, 0x28, 0xa1, 0xf0, 0x31, 0xc3, 0x3d, 0x87, 0x58, 0x3c, 0x05, 0x02, 0x9c, 0x8d, 0xd9,
- 0xba, 0x9a, 0x94, 0xe8, 0xfc, 0xfa, 0x61, 0x8f, 0x46, 0xd4, 0xf2, 0xbd, 0x70, 0x5d, 0x03, 0xbc,
- 0x16, 0xc0, 0xf8, 0x1a, 0xac, 0xfb, 0x2e, 0x31, 0xcc, 0x29, 0x6e, 0x9a, 0x73, 0x51, 0x28, 0x88,
- 0xc8, 0xfb, 0x70, 0x21, 0xb4, 0xab, 0x53, 0x9f, 0x68, 0x43, 0xaa, 0x4f, 0x94, 0xb2, 0xfc, 0x99,
- 0xe1, 0x7c, 0x40, 0xa8, 0x07, 0xf2, 0x50, 0xb7, 0xfc, 0xf7, 0x04, 0xac, 0x87, 0x17, 0x26, 0x3d,
- 0x0a, 0xd6, 0x11, 0x00, 0xb1, 0x2c, 0xdb, 0x8f, 0x87, 0x6b, 0x3e, 0x95, 0xe7, 0xf4, 0x2a, 0xd5,
- 0x48, 0x49, 0x89, 0x19, 0xd8, 0x1a, 0x01, 0x4c, 0x24, 0x4b, 0xc3, 0x76, 0x11, 0x0a, 0xc1, 0x4f,
- 0x38, 0xfc, 0x77, 0x40, 0x71, 0xc5, 0x06, 0x01, 0xb1, 0x9b, 0x15, 0xde, 0x84, 0xcc, 0x09, 0x1d,
- 0x18, 0x56, 0xf0, 0x30, 0x2b, 0x06, 0xe1, 0x43, 0x48, 0x3a, 0x7a, 0x08, 0x39, 0xf8, 0x19, 0x6c,
- 0x68, 0xf6, 0x68, 0xd6, 0xdd, 0x03, 0x34, 0x73, 0xcd, 0xf7, 0x3e, 0x4f, 0x7c, 0x09, 0x93, 0x16,
- 0xf3, 0x7f, 0x89, 0xc4, 0xef, 0x93, 0xa9, 0xc3, 0xce, 0xc1, 0x1f, 0x92, 0x5b, 0x87, 0x42, 0xb5,
- 0x13, 0xce, 0x54, 0xa1, 0x7d, 0x93, 0x6a, 0xcc, 0xfb, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0xa3,
- 0x58, 0x22, 0x30, 0xdf, 0x1c, 0x00, 0x00,
-}
diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
deleted file mode 100644
index ec6eb168d..000000000
--- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
+++ /dev/null
@@ -1,744 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: descriptor.proto
-
-package descriptor
-
-import fmt "fmt"
-import strings "strings"
-import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
-import sort "sort"
-import strconv "strconv"
-import reflect "reflect"
-import proto "github.com/gogo/protobuf/proto"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-func (this *FileDescriptorSet) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 5)
- s = append(s, "&descriptor.FileDescriptorSet{")
- if this.File != nil {
- s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n")
- }
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *FileDescriptorProto) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 16)
- s = append(s, "&descriptor.FileDescriptorProto{")
- if this.Name != nil {
- s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
- }
- if this.Package != nil {
- s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n")
- }
- if this.Dependency != nil {
- s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n")
- }
- if this.PublicDependency != nil {
- s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n")
- }
- if this.WeakDependency != nil {
- s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n")
- }
- if this.MessageType != nil {
- s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n")
- }
- if this.EnumType != nil {
- s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
- }
- if this.Service != nil {
- s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n")
- }
- if this.Extension != nil {
- s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
- }
- if this.Options != nil {
- s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
- }
- if this.SourceCodeInfo != nil {
- s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n")
- }
- if this.Syntax != nil {
- s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n")
- }
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *DescriptorProto) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 14)
- s = append(s, "&descriptor.DescriptorProto{")
- if this.Name != nil {
- s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
- }
- if this.Field != nil {
- s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n")
- }
- if this.Extension != nil {
- s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
- }
- if this.NestedType != nil {
- s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n")
- }
- if this.EnumType != nil {
- s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
- }
- if this.ExtensionRange != nil {
- s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n")
- }
- if this.OneofDecl != nil {
- s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n")
- }
- if this.Options != nil {
- s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
- }
- if this.ReservedRange != nil {
- s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n")
- }
- if this.ReservedName != nil {
- s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n")
- }
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *DescriptorProto_ExtensionRange) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 7)
- s = append(s, "&descriptor.DescriptorProto_ExtensionRange{")
- if this.Start != nil {
- s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
- }
- if this.End != nil {
- s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
- }
- if this.Options != nil {
- s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
- }
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *DescriptorProto_ReservedRange) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 6)
- s = append(s, "&descriptor.DescriptorProto_ReservedRange{")
- if this.Start != nil {
- s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
- }
- if this.End != nil {
- s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
- }
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *ExtensionRangeOptions) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 5)
- s = append(s, "&descriptor.ExtensionRangeOptions{")
- if this.UninterpretedOption != nil {
- s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
- }
- s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *FieldDescriptorProto) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 14)
- s = append(s, "&descriptor.FieldDescriptorProto{")
- if this.Name != nil {
- s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
- }
- if this.Number != nil {
- s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
- }
- if this.Label != nil {
- s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n")
- }
- if this.Type != nil {
- s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n")
- }
- if this.TypeName != nil {
- s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n")
- }
- if this.Extendee != nil {
- s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n")
- }
- if this.DefaultValue != nil {
- s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n")
- }
- if this.OneofIndex != nil {
- s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n")
- }
- if this.JsonName != nil {
- s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n")
- }
- if this.Options != nil {
- s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
- }
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *OneofDescriptorProto) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 6)
- s = append(s, "&descriptor.OneofDescriptorProto{")
- if this.Name != nil {
- s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
- }
- if this.Options != nil {
- s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
- }
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *EnumDescriptorProto) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 9)
- s = append(s, "&descriptor.EnumDescriptorProto{")
- if this.Name != nil {
- s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
- }
- if this.Value != nil {
- s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
- }
- if this.Options != nil {
- s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
- }
- if this.ReservedRange != nil {
- s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n")
- }
- if this.ReservedName != nil {
- s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n")
- }
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *EnumDescriptorProto_EnumReservedRange) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 6)
- s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{")
- if this.Start != nil {
- s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
- }
- if this.End != nil {
- s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
- }
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *EnumValueDescriptorProto) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 7)
- s = append(s, "&descriptor.EnumValueDescriptorProto{")
- if this.Name != nil {
- s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
- }
- if this.Number != nil {
- s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
- }
- if this.Options != nil {
- s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
- }
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *ServiceDescriptorProto) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 7)
- s = append(s, "&descriptor.ServiceDescriptorProto{")
- if this.Name != nil {
- s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
- }
- if this.Method != nil {
- s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n")
- }
- if this.Options != nil {
- s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
- }
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *MethodDescriptorProto) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 10)
- s = append(s, "&descriptor.MethodDescriptorProto{")
- if this.Name != nil {
- s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
- }
- if this.InputType != nil {
- s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n")
- }
- if this.OutputType != nil {
- s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n")
- }
- if this.Options != nil {
- s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
- }
- if this.ClientStreaming != nil {
- s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n")
- }
- if this.ServerStreaming != nil {
- s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n")
- }
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *FileOptions) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 23)
- s = append(s, "&descriptor.FileOptions{")
- if this.JavaPackage != nil {
- s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n")
- }
- if this.JavaOuterClassname != nil {
- s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n")
- }
- if this.JavaMultipleFiles != nil {
- s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n")
- }
- if this.JavaGenerateEqualsAndHash != nil {
- s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n")
- }
- if this.JavaStringCheckUtf8 != nil {
- s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n")
- }
- if this.OptimizeFor != nil {
- s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n")
- }
- if this.GoPackage != nil {
- s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n")
- }
- if this.CcGenericServices != nil {
- s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n")
- }
- if this.JavaGenericServices != nil {
- s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n")
- }
- if this.PyGenericServices != nil {
- s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n")
- }
- if this.PhpGenericServices != nil {
- s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n")
- }
- if this.Deprecated != nil {
- s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
- }
- if this.CcEnableArenas != nil {
- s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n")
- }
- if this.ObjcClassPrefix != nil {
- s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n")
- }
- if this.CsharpNamespace != nil {
- s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n")
- }
- if this.SwiftPrefix != nil {
- s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n")
- }
- if this.PhpClassPrefix != nil {
- s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n")
- }
- if this.PhpNamespace != nil {
- s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n")
- }
- if this.UninterpretedOption != nil {
- s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
- }
- s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *MessageOptions) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 9)
- s = append(s, "&descriptor.MessageOptions{")
- if this.MessageSetWireFormat != nil {
- s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n")
- }
- if this.NoStandardDescriptorAccessor != nil {
- s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n")
- }
- if this.Deprecated != nil {
- s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
- }
- if this.MapEntry != nil {
- s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n")
- }
- if this.UninterpretedOption != nil {
- s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
- }
- s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *FieldOptions) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 11)
- s = append(s, "&descriptor.FieldOptions{")
- if this.Ctype != nil {
- s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n")
- }
- if this.Packed != nil {
- s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n")
- }
- if this.Jstype != nil {
- s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n")
- }
- if this.Lazy != nil {
- s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n")
- }
- if this.Deprecated != nil {
- s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
- }
- if this.Weak != nil {
- s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n")
- }
- if this.UninterpretedOption != nil {
- s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
- }
- s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *OneofOptions) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 5)
- s = append(s, "&descriptor.OneofOptions{")
- if this.UninterpretedOption != nil {
- s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
- }
- s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *EnumOptions) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 7)
- s = append(s, "&descriptor.EnumOptions{")
- if this.AllowAlias != nil {
- s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n")
- }
- if this.Deprecated != nil {
- s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
- }
- if this.UninterpretedOption != nil {
- s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
- }
- s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *EnumValueOptions) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 6)
- s = append(s, "&descriptor.EnumValueOptions{")
- if this.Deprecated != nil {
- s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
- }
- if this.UninterpretedOption != nil {
- s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
- }
- s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *ServiceOptions) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 6)
- s = append(s, "&descriptor.ServiceOptions{")
- if this.Deprecated != nil {
- s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
- }
- if this.UninterpretedOption != nil {
- s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
- }
- s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *MethodOptions) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 7)
- s = append(s, "&descriptor.MethodOptions{")
- if this.Deprecated != nil {
- s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
- }
- if this.IdempotencyLevel != nil {
- s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n")
- }
- if this.UninterpretedOption != nil {
- s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
- }
- s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *UninterpretedOption) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 11)
- s = append(s, "&descriptor.UninterpretedOption{")
- if this.Name != nil {
- s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
- }
- if this.IdentifierValue != nil {
- s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n")
- }
- if this.PositiveIntValue != nil {
- s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n")
- }
- if this.NegativeIntValue != nil {
- s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n")
- }
- if this.DoubleValue != nil {
- s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n")
- }
- if this.StringValue != nil {
- s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n")
- }
- if this.AggregateValue != nil {
- s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n")
- }
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *UninterpretedOption_NamePart) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 6)
- s = append(s, "&descriptor.UninterpretedOption_NamePart{")
- if this.NamePart != nil {
- s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n")
- }
- if this.IsExtension != nil {
- s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n")
- }
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *SourceCodeInfo) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 5)
- s = append(s, "&descriptor.SourceCodeInfo{")
- if this.Location != nil {
- s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n")
- }
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *SourceCodeInfo_Location) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 9)
- s = append(s, "&descriptor.SourceCodeInfo_Location{")
- if this.Path != nil {
- s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
- }
- if this.Span != nil {
- s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n")
- }
- if this.LeadingComments != nil {
- s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n")
- }
- if this.TrailingComments != nil {
- s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n")
- }
- if this.LeadingDetachedComments != nil {
- s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n")
- }
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *GeneratedCodeInfo) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 5)
- s = append(s, "&descriptor.GeneratedCodeInfo{")
- if this.Annotation != nil {
- s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n")
- }
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *GeneratedCodeInfo_Annotation) GoString() string {
- if this == nil {
- return "nil"
- }
- s := make([]string, 0, 8)
- s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{")
- if this.Path != nil {
- s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
- }
- if this.SourceFile != nil {
- s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n")
- }
- if this.Begin != nil {
- s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n")
- }
- if this.End != nil {
- s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
- }
- if this.XXX_unrecognized != nil {
- s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
- }
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func valueToGoStringDescriptor(v interface{}, typ string) string {
- rv := reflect.ValueOf(v)
- if rv.IsNil() {
- return "nil"
- }
- pv := reflect.Indirect(rv).Interface()
- return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
-}
-func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string {
- e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
- if e == nil {
- return "nil"
- }
- s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
- keys := make([]int, 0, len(e))
- for k := range e {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
- ss := []string{}
- for _, k := range keys {
- ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
- }
- s += strings.Join(ss, ",") + "})"
- return s
-}
diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
deleted file mode 100644
index e0846a357..000000000
--- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
+++ /dev/null
@@ -1,390 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package descriptor
-
-import (
- "strings"
-)
-
-func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) {
- if !msg.GetOptions().GetMapEntry() {
- return nil, nil
- }
- return msg.GetField()[0], msg.GetField()[1]
-}
-
-func dotToUnderscore(r rune) rune {
- if r == '.' {
- return '_'
- }
- return r
-}
-
-func (field *FieldDescriptorProto) WireType() (wire int) {
- switch *field.Type {
- case FieldDescriptorProto_TYPE_DOUBLE:
- return 1
- case FieldDescriptorProto_TYPE_FLOAT:
- return 5
- case FieldDescriptorProto_TYPE_INT64:
- return 0
- case FieldDescriptorProto_TYPE_UINT64:
- return 0
- case FieldDescriptorProto_TYPE_INT32:
- return 0
- case FieldDescriptorProto_TYPE_UINT32:
- return 0
- case FieldDescriptorProto_TYPE_FIXED64:
- return 1
- case FieldDescriptorProto_TYPE_FIXED32:
- return 5
- case FieldDescriptorProto_TYPE_BOOL:
- return 0
- case FieldDescriptorProto_TYPE_STRING:
- return 2
- case FieldDescriptorProto_TYPE_GROUP:
- return 2
- case FieldDescriptorProto_TYPE_MESSAGE:
- return 2
- case FieldDescriptorProto_TYPE_BYTES:
- return 2
- case FieldDescriptorProto_TYPE_ENUM:
- return 0
- case FieldDescriptorProto_TYPE_SFIXED32:
- return 5
- case FieldDescriptorProto_TYPE_SFIXED64:
- return 1
- case FieldDescriptorProto_TYPE_SINT32:
- return 0
- case FieldDescriptorProto_TYPE_SINT64:
- return 0
- }
- panic("unreachable")
-}
-
-func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) {
- packed := field.IsPacked()
- wireType := field.WireType()
- fieldNumber := field.GetNumber()
- if packed {
- wireType = 2
- }
- x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
- return x
-}
-
-func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) {
- packed := field.IsPacked3()
- wireType := field.WireType()
- fieldNumber := field.GetNumber()
- if packed {
- wireType = 2
- }
- x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
- return x
-}
-
-func (field *FieldDescriptorProto) GetKey() []byte {
- x := field.GetKeyUint64()
- i := 0
- keybuf := make([]byte, 0)
- for i = 0; x > 127; i++ {
- keybuf = append(keybuf, 0x80|uint8(x&0x7F))
- x >>= 7
- }
- keybuf = append(keybuf, uint8(x))
- return keybuf
-}
-
-func (field *FieldDescriptorProto) GetKey3() []byte {
- x := field.GetKey3Uint64()
- i := 0
- keybuf := make([]byte, 0)
- for i = 0; x > 127; i++ {
- keybuf = append(keybuf, 0x80|uint8(x&0x7F))
- x >>= 7
- }
- keybuf = append(keybuf, uint8(x))
- return keybuf
-}
-
-func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto {
- msg := desc.GetMessage(packageName, messageName)
- if msg == nil {
- return nil
- }
- for _, field := range msg.GetField() {
- if field.GetName() == fieldName {
- return field
- }
- }
- return nil
-}
-
-func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto {
- for _, msg := range file.GetMessageType() {
- if msg.GetName() == typeName {
- return msg
- }
- nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+"."))
- if nes != nil {
- return nes
- }
- }
- return nil
-}
-
-func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto {
- for _, nes := range msg.GetNestedType() {
- if nes.GetName() == typeName {
- return nes
- }
- res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+"."))
- if res != nil {
- return res
- }
- }
- return nil
-}
-
-func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto {
- for _, file := range desc.GetFile() {
- if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
- continue
- }
- for _, msg := range file.GetMessageType() {
- if msg.GetName() == typeName {
- return msg
- }
- }
- for _, msg := range file.GetMessageType() {
- for _, nes := range msg.GetNestedType() {
- if nes.GetName() == typeName {
- return nes
- }
- if msg.GetName()+"."+nes.GetName() == typeName {
- return nes
- }
- }
- }
- }
- return nil
-}
-
-func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool {
- for _, file := range desc.GetFile() {
- if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
- continue
- }
- for _, msg := range file.GetMessageType() {
- if msg.GetName() == typeName {
- return file.GetSyntax() == "proto3"
- }
- }
- for _, msg := range file.GetMessageType() {
- for _, nes := range msg.GetNestedType() {
- if nes.GetName() == typeName {
- return file.GetSyntax() == "proto3"
- }
- if msg.GetName()+"."+nes.GetName() == typeName {
- return file.GetSyntax() == "proto3"
- }
- }
- }
- }
- return false
-}
-
-func (msg *DescriptorProto) IsExtendable() bool {
- return len(msg.GetExtensionRange()) > 0
-}
-
-func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) {
- parent := desc.GetMessage(packageName, typeName)
- if parent == nil {
- return "", nil
- }
- if !parent.IsExtendable() {
- return "", nil
- }
- extendee := "." + packageName + "." + typeName
- for _, file := range desc.GetFile() {
- for _, ext := range file.GetExtension() {
- if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
- if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
- continue
- }
- } else {
- if ext.GetExtendee() != extendee {
- continue
- }
- }
- if ext.GetName() == fieldName {
- return file.GetPackage(), ext
- }
- }
- }
- return "", nil
-}
-
-func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) {
- parent := desc.GetMessage(packageName, typeName)
- if parent == nil {
- return "", nil
- }
- if !parent.IsExtendable() {
- return "", nil
- }
- extendee := "." + packageName + "." + typeName
- for _, file := range desc.GetFile() {
- for _, ext := range file.GetExtension() {
- if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
- if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
- continue
- }
- } else {
- if ext.GetExtendee() != extendee {
- continue
- }
- }
- if ext.GetNumber() == fieldNum {
- return file.GetPackage(), ext
- }
- }
- }
- return "", nil
-}
-
-func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) {
- parent := desc.GetMessage(packageName, typeName)
- if parent == nil {
- return "", ""
- }
- field := parent.GetFieldDescriptor(fieldName)
- if field == nil {
- var extPackageName string
- extPackageName, field = desc.FindExtension(packageName, typeName, fieldName)
- if field == nil {
- return "", ""
- }
- packageName = extPackageName
- }
- typeNames := strings.Split(field.GetTypeName(), ".")
- if len(typeNames) == 1 {
- msg := desc.GetMessage(packageName, typeName)
- if msg == nil {
- return "", ""
- }
- return packageName, msg.GetName()
- }
- if len(typeNames) > 2 {
- for i := 1; i < len(typeNames)-1; i++ {
- packageName = strings.Join(typeNames[1:len(typeNames)-i], ".")
- typeName = strings.Join(typeNames[len(typeNames)-i:], ".")
- msg := desc.GetMessage(packageName, typeName)
- if msg != nil {
- typeNames := strings.Split(msg.GetName(), ".")
- if len(typeNames) == 1 {
- return packageName, msg.GetName()
- }
- return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1]
- }
- }
- }
- return "", ""
-}
-
-func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto {
- for _, field := range msg.GetField() {
- if field.GetName() == fieldName {
- return field
- }
- }
- return nil
-}
-
-func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto {
- for _, file := range desc.GetFile() {
- if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
- continue
- }
- for _, enum := range file.GetEnumType() {
- if enum.GetName() == typeName {
- return enum
- }
- }
- }
- return nil
-}
-
-func (f *FieldDescriptorProto) IsEnum() bool {
- return *f.Type == FieldDescriptorProto_TYPE_ENUM
-}
-
-func (f *FieldDescriptorProto) IsMessage() bool {
- return *f.Type == FieldDescriptorProto_TYPE_MESSAGE
-}
-
-func (f *FieldDescriptorProto) IsBytes() bool {
- return *f.Type == FieldDescriptorProto_TYPE_BYTES
-}
-
-func (f *FieldDescriptorProto) IsRepeated() bool {
- return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED
-}
-
-func (f *FieldDescriptorProto) IsString() bool {
- return *f.Type == FieldDescriptorProto_TYPE_STRING
-}
-
-func (f *FieldDescriptorProto) IsBool() bool {
- return *f.Type == FieldDescriptorProto_TYPE_BOOL
-}
-
-func (f *FieldDescriptorProto) IsRequired() bool {
- return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED
-}
-
-func (f *FieldDescriptorProto) IsPacked() bool {
- return f.Options != nil && f.GetOptions().GetPacked()
-}
-
-func (f *FieldDescriptorProto) IsPacked3() bool {
- if f.IsRepeated() && f.IsScalar() {
- if f.Options == nil || f.GetOptions().Packed == nil {
- return true
- }
- return f.Options != nil && f.GetOptions().GetPacked()
- }
- return false
-}
-
-func (m *DescriptorProto) HasExtension() bool {
- return len(m.ExtensionRange) > 0
-}
diff --git a/vendor/github.com/gogo/protobuf/test/issue270/a/a1.proto b/vendor/github.com/gogo/protobuf/test/issue270/a/a1.proto
deleted file mode 100644
index 59dff1394..000000000
--- a/vendor/github.com/gogo/protobuf/test/issue270/a/a1.proto
+++ /dev/null
@@ -1,12 +0,0 @@
-syntax = "proto2";
-
-package issue270.a;
-
-import "github.com/gogo/protobuf/gogoproto/gogo.proto";
-import "github.com/gogo/protobuf/test/issue270/a/a2.proto";
-
-option (gogoproto.populate_all) = true;
-
-message A1 {
- optional A2 a2 = 1;
-}
diff --git a/vendor/github.com/gogo/protobuf/test/issue270/a/a2.proto b/vendor/github.com/gogo/protobuf/test/issue270/a/a2.proto
deleted file mode 100644
index 1d16ff79a..000000000
--- a/vendor/github.com/gogo/protobuf/test/issue270/a/a2.proto
+++ /dev/null
@@ -1,12 +0,0 @@
-syntax = "proto2";
-
-package issue270.a;
-
-import "github.com/gogo/protobuf/gogoproto/gogo.proto";
-import "github.com/gogo/protobuf/test/issue270/b/b.proto";
-
-option (gogoproto.populate_all) = true;
-
-message A2 {
- optional issue270.b.B b = 1;
-}
diff --git a/vendor/github.com/gogo/protobuf/test/issue270/b/b.proto b/vendor/github.com/gogo/protobuf/test/issue270/b/b.proto
deleted file mode 100644
index cb71c2480..000000000
--- a/vendor/github.com/gogo/protobuf/test/issue270/b/b.proto
+++ /dev/null
@@ -1,6 +0,0 @@
-syntax = "proto2";
-
-package issue270.b;
-
-message B {
-}
diff --git a/vendor/github.com/golang/glog/README b/vendor/github.com/golang/glog/README
deleted file mode 100644
index 387b4eb68..000000000
--- a/vendor/github.com/golang/glog/README
+++ /dev/null
@@ -1,44 +0,0 @@
-glog
-====
-
-Leveled execution logs for Go.
-
-This is an efficient pure Go implementation of leveled logs in the
-manner of the open source C++ package
- https://github.com/google/glog
-
-By binding methods to booleans it is possible to use the log package
-without paying the expense of evaluating the arguments to the log.
-Through the -vmodule flag, the package also provides fine-grained
-control over logging at the file level.
-
-The comment from glog.go introduces the ideas:
-
- Package glog implements logging analogous to the Google-internal
- C++ INFO/ERROR/V setup. It provides functions Info, Warning,
- Error, Fatal, plus formatting variants such as Infof. It
- also provides V-style logging controlled by the -v and
- -vmodule=file=2 flags.
-
- Basic examples:
-
- glog.Info("Prepare to repel boarders")
-
- glog.Fatalf("Initialization failed: %s", err)
-
- See the documentation for the V function for an explanation
- of these examples:
-
- if glog.V(2) {
- glog.Info("Starting transaction...")
- }
-
- glog.V(2).Infoln("Processed", nItems, "elements")
-
-
-The repository contains an open source version of the log package
-used inside Google. The master copy of the source lives inside
-Google, not here. The code in this repo is for export only and is not itself
-under development. Feature requests will be ignored.
-
-Send bug reports to golang-nuts@googlegroups.com.
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 000000000..15167cd74
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 000000000..1c4577e96
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md
deleted file mode 100644
index 61820bed6..000000000
--- a/vendor/github.com/golang/protobuf/README.md
+++ /dev/null
@@ -1,281 +0,0 @@
-# Go support for Protocol Buffers - Google's data interchange format
-
-[![Build Status](https://travis-ci.org/golang/protobuf.svg?branch=master)](https://travis-ci.org/golang/protobuf)
-[![GoDoc](https://godoc.org/github.com/golang/protobuf?status.svg)](https://godoc.org/github.com/golang/protobuf)
-
-Google's data interchange format.
-Copyright 2010 The Go Authors.
-https://github.com/golang/protobuf
-
-This package and the code it generates requires at least Go 1.6.
-
-This software implements Go bindings for protocol buffers. For
-information about protocol buffers themselves, see
- https://developers.google.com/protocol-buffers/
-
-## Installation ##
-
-To use this software, you must:
-- Install the standard C++ implementation of protocol buffers from
- https://developers.google.com/protocol-buffers/
-- Of course, install the Go compiler and tools from
- https://golang.org/
- See
- https://golang.org/doc/install
- for details or, if you are using gccgo, follow the instructions at
- https://golang.org/doc/install/gccgo
-- Grab the code from the repository and install the proto package.
- The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`.
- The compiler plugin, protoc-gen-go, will be installed in $GOBIN,
- defaulting to $GOPATH/bin. It must be in your $PATH for the protocol
- compiler, protoc, to find it.
-
-This software has two parts: a 'protocol compiler plugin' that
-generates Go source files that, once compiled, can access and manage
-protocol buffers; and a library that implements run-time support for
-encoding (marshaling), decoding (unmarshaling), and accessing protocol
-buffers.
-
-There is support for gRPC in Go using protocol buffers.
-See the note at the bottom of this file for details.
-
-There are no insertion points in the plugin.
-
-
-## Using protocol buffers with Go ##
-
-Once the software is installed, there are two steps to using it.
-First you must compile the protocol buffer definitions and then import
-them, with the support library, into your program.
-
-To compile the protocol buffer definition, run protoc with the --go_out
-parameter set to the directory you want to output the Go code to.
-
- protoc --go_out=. *.proto
-
-The generated files will be suffixed .pb.go. See the Test code below
-for an example using such a file.
-
-## Packages and input paths ##
-
-The protocol buffer language has a concept of "packages" which does not
-correspond well to the Go notion of packages. In generated Go code,
-each source `.proto` file is associated with a single Go package. The
-name and import path for this package is specified with the `go_package`
-proto option:
-
- option go_package = "github.com/golang/protobuf/ptypes/any";
-
-The protocol buffer compiler will attempt to derive a package name and
-import path if a `go_package` option is not present, but it is
-best to always specify one explicitly.
-
-There is a one-to-one relationship between source `.proto` files and
-generated `.pb.go` files, but any number of `.pb.go` files may be
-contained in the same Go package.
-
-The output name of a generated file is produced by replacing the
-`.proto` suffix with `.pb.go` (e.g., `foo.proto` produces `foo.pb.go`).
-However, the output directory is selected in one of two ways. Let
-us say we have `inputs/x.proto` with a `go_package` option of
-`github.com/golang/protobuf/p`. The corresponding output file may
-be:
-
-- Relative to the import path:
-
-```shell
- protoc --go_out=. inputs/x.proto
- # writes ./github.com/golang/protobuf/p/x.pb.go
-```
-
- (This can work well with `--go_out=$GOPATH`.)
-
-- Relative to the input file:
-
-```shell
-protoc --go_out=paths=source_relative:. inputs/x.proto
-# generate ./inputs/x.pb.go
-```
-
-## Generated code ##
-
-The package comment for the proto library contains text describing
-the interface provided in Go for protocol buffers. Here is an edited
-version.
-
-The proto package converts data structures to and from the
-wire format of protocol buffers. It works in concert with the
-Go source code generated for .proto files by the protocol compiler.
-
-A summary of the properties of the protocol buffer interface
-for a protocol buffer variable v:
-
- - Names are turned from camel_case to CamelCase for export.
- - There are no methods on v to set fields; just treat
- them as structure fields.
- - There are getters that return a field's value if set,
- and return the field's default value if unset.
- The getters work even if the receiver is a nil message.
- - The zero value for a struct is its correct initialization state.
- All desired fields must be set before marshaling.
- - A Reset() method will restore a protobuf struct to its zero state.
- - Non-repeated fields are pointers to the values; nil means unset.
- That is, optional or required field int32 f becomes F *int32.
- - Repeated fields are slices.
- - Helper functions are available to aid the setting of fields.
- Helpers for getting values are superseded by the
- GetFoo methods and their use is deprecated.
- msg.Foo = proto.String("hello") // set field
- - Constants are defined to hold the default values of all fields that
- have them. They have the form Default_StructName_FieldName.
- Because the getter methods handle defaulted values,
- direct use of these constants should be rare.
- - Enums are given type names and maps from names to values.
- Enum values are prefixed with the enum's type name. Enum types have
- a String method, and a Enum method to assist in message construction.
- - Nested groups and enums have type names prefixed with the name of
- the surrounding message type.
- - Extensions are given descriptor names that start with E_,
- followed by an underscore-delimited list of the nested messages
- that contain it (if any) followed by the CamelCased name of the
- extension field itself. HasExtension, ClearExtension, GetExtension
- and SetExtension are functions for manipulating extensions.
- - Oneof field sets are given a single field in their message,
- with distinguished wrapper types for each possible field value.
- - Marshal and Unmarshal are functions to encode and decode the wire format.
-
-When the .proto file specifies `syntax="proto3"`, there are some differences:
-
- - Non-repeated fields of non-message type are values instead of pointers.
- - Enum types do not get an Enum method.
-
-Consider file test.proto, containing
-
-```proto
- syntax = "proto2";
- package example;
-
- enum FOO { X = 17; };
-
- message Test {
- required string label = 1;
- optional int32 type = 2 [default=77];
- repeated int64 reps = 3;
- }
-```
-
-To create and play with a Test object from the example package,
-
-```go
- package main
-
- import (
- "log"
-
- "github.com/golang/protobuf/proto"
- "path/to/example"
- )
-
- func main() {
- test := &example.Test{
- Label: proto.String("hello"),
- Type: proto.Int32(17),
- Reps: []int64{1, 2, 3},
- }
- data, err := proto.Marshal(test)
- if err != nil {
- log.Fatal("marshaling error: ", err)
- }
- newTest := &example.Test{}
- err = proto.Unmarshal(data, newTest)
- if err != nil {
- log.Fatal("unmarshaling error: ", err)
- }
- // Now test and newTest contain the same data.
- if test.GetLabel() != newTest.GetLabel() {
- log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
- }
- // etc.
- }
-```
-
-## Parameters ##
-
-To pass extra parameters to the plugin, use a comma-separated
-parameter list separated from the output directory by a colon:
-
- protoc --go_out=plugins=grpc,import_path=mypackage:. *.proto
-
-- `paths=(import | source_relative)` - specifies how the paths of
- generated files are structured. See the "Packages and imports paths"
- section above. The default is `import`.
-- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to
- load. The only plugin in this repo is `grpc`.
-- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is
- associated with Go package quux/shme. This is subject to the
- import_prefix parameter.
-
-The following parameters are deprecated and should not be used:
-
-- `import_prefix=xxx` - a prefix that is added onto the beginning of
- all imports.
-- `import_path=foo/bar` - used as the package if no input files
- declare `go_package`. If it contains slashes, everything up to the
- rightmost slash is ignored.
-
-## gRPC Support ##
-
-If a proto file specifies RPC services, protoc-gen-go can be instructed to
-generate code compatible with gRPC (http://www.grpc.io/). To do this, pass
-the `plugins` parameter to protoc-gen-go; the usual way is to insert it into
-the --go_out argument to protoc:
-
- protoc --go_out=plugins=grpc:. *.proto
-
-## Compatibility ##
-
-The library and the generated code are expected to be stable over time.
-However, we reserve the right to make breaking changes without notice for the
-following reasons:
-
-- Security. A security issue in the specification or implementation may come to
- light whose resolution requires breaking compatibility. We reserve the right
- to address such security issues.
-- Unspecified behavior. There are some aspects of the Protocol Buffers
- specification that are undefined. Programs that depend on such unspecified
- behavior may break in future releases.
-- Specification errors or changes. If it becomes necessary to address an
- inconsistency, incompleteness, or change in the Protocol Buffers
- specification, resolving the issue could affect the meaning or legality of
- existing programs. We reserve the right to address such issues, including
- updating the implementations.
-- Bugs. If the library has a bug that violates the specification, a program
- that depends on the buggy behavior may break if the bug is fixed. We reserve
- the right to fix such bugs.
-- Adding methods or fields to generated structs. These may conflict with field
- names that already exist in a schema, causing applications to break. When the
- code generator encounters a field in the schema that would collide with a
- generated field or method name, the code generator will append an underscore
- to the generated field or method name.
-- Adding, removing, or changing methods or fields in generated structs that
- start with `XXX`. These parts of the generated code are exported out of
- necessity, but should not be considered part of the public API.
-- Adding, removing, or changing unexported symbols in generated code.
-
-Any breaking changes outside of these will be announced 6 months in advance to
-protobuf@googlegroups.com.
-
-You should, whenever possible, use generated code created by the `protoc-gen-go`
-tool built at the same commit as the `proto` package. The `proto` package
-declares package-level constants in the form `ProtoPackageIsVersionX`.
-Application code and generated code may depend on one of these constants to
-ensure that compilation will fail if the available version of the proto library
-is too old. Whenever we make a change to the generated code that requires newer
-library support, in the same commit we will increment the version number of the
-generated code and declare a new package-level constant whose name incorporates
-the latest version number. Removing a compatibility constant is considered a
-breaking change and would be subject to the announcement policy stated above.
-
-The `protoc-gen-go/generator` package exposes a plugin interface,
-which is used by the gRPC code generation. This interface is not
-supported and is subject to incompatible changes without notice.
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
index d9aa3c42d..63b0f08be 100644
--- a/vendor/github.com/golang/protobuf/proto/decode.go
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -186,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
if b&0x80 == 0 {
goto done
}
- // x -= 0x80 << 63 // Always zero.
return 0, errOverflow
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/source_context.proto b/vendor/github.com/golang/protobuf/proto/deprecated.go
index 8654578c7..35b882c09 100644
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/source_context.proto
+++ b/vendor/github.com/golang/protobuf/proto/deprecated.go
@@ -1,6 +1,7 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2018 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
@@ -28,21 +29,35 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-syntax = "proto3";
+package proto
+
+import "errors"
-package google.protobuf;
+// Deprecated: do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "SourceContextProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-option go_package = "types";
+// Deprecated: do not use.
+func GetStats() Stats { return Stats{} }
-// `SourceContext` represents information about the source of a
-// protobuf element, like the file in which it is defined.
-message SourceContext {
- // The path-qualified name of the .proto file that contained the associated
- // protobuf element. For example: `"google/protobuf/source_context.proto"`.
- string file_name = 1;
+// Deprecated: do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
}
+
+// Deprecated: do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func RegisterMessageSetType(Message, int32, string) {}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
index d4db5a1c1..f9b6e41b3 100644
--- a/vendor/github.com/golang/protobuf/proto/equal.go
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -246,7 +246,8 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
return false
}
- m1, m2 := e1.value, e2.value
+ m1 := extensionAsLegacyType(e1.value)
+ m2 := extensionAsLegacyType(e2.value)
if m1 == nil && m2 == nil {
// Both have only encoded form.
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
index 816a3b9d6..fa88add30 100644
--- a/vendor/github.com/golang/protobuf/proto/extensions.go
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -185,9 +185,25 @@ type Extension struct {
// extension will have only enc set. When such an extension is
// accessed using GetExtension (or GetExtensions) desc and value
// will be set.
- desc *ExtensionDesc
+ desc *ExtensionDesc
+
+ // value is a concrete value for the extension field. Let the type of
+ // desc.ExtensionType be the "API type" and the type of Extension.value
+ // be the "storage type". The API type and storage type are the same except:
+ // * For scalars (except []byte), the API type uses *T,
+ // while the storage type uses T.
+ // * For repeated fields, the API type uses []T, while the storage type
+ // uses *[]T.
+ //
+ // The reason for the divergence is so that the storage type more naturally
+ // matches what is expected of when retrieving the values through the
+ // protobuf reflection APIs.
+ //
+ // The value may only be populated if desc is also populated.
value interface{}
- enc []byte
+
+ // enc is the raw bytes for the extension field.
+ enc []byte
}
// SetRawExtension is for testing only.
@@ -334,7 +350,7 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
// descriptors with the same field number.
return nil, errors.New("proto: descriptor conflict")
}
- return e.value, nil
+ return extensionAsLegacyType(e.value), nil
}
if extension.ExtensionType == nil {
@@ -349,11 +365,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
// Remember the decoded version and drop the encoded version.
// That way it is safe to mutate what we return.
- e.value = v
+ e.value = extensionAsStorageType(v)
e.desc = extension
e.enc = nil
emap[extension.Field] = e
- return e.value, nil
+ return extensionAsLegacyType(e.value), nil
}
// defaultExtensionValue returns the default value for extension.
@@ -488,7 +504,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
}
typ := reflect.TypeOf(extension.ExtensionType)
if typ != reflect.TypeOf(value) {
- return errors.New("proto: bad extension value type")
+ return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
}
// nil extension values need to be caught early, because the
// encoder can't distinguish an ErrNil due to a nil extension
@@ -500,7 +516,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
}
extmap := epb.extensionsWrite()
- extmap[extension.Field] = Extension{desc: extension, value: value}
+ extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
return nil
}
@@ -541,3 +557,51 @@ func RegisterExtension(desc *ExtensionDesc) {
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
return extensionMaps[reflect.TypeOf(pb).Elem()]
}
+
+// extensionAsLegacyType converts an value in the storage type as the API type.
+// See Extension.value.
+func extensionAsLegacyType(v interface{}) interface{} {
+ switch rv := reflect.ValueOf(v); rv.Kind() {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ // Represent primitive types as a pointer to the value.
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
+ case reflect.Ptr:
+ // Represent slice types as the value itself.
+ switch rv.Type().Elem().Kind() {
+ case reflect.Slice:
+ if rv.IsNil() {
+ v = reflect.Zero(rv.Type().Elem()).Interface()
+ } else {
+ v = rv.Elem().Interface()
+ }
+ }
+ }
+ return v
+}
+
+// extensionAsStorageType converts an value in the API type as the storage type.
+// See Extension.value.
+func extensionAsStorageType(v interface{}) interface{} {
+ switch rv := reflect.ValueOf(v); rv.Kind() {
+ case reflect.Ptr:
+ // Represent slice types as the value itself.
+ switch rv.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ if rv.IsNil() {
+ v = reflect.Zero(rv.Type().Elem()).Interface()
+ } else {
+ v = rv.Elem().Interface()
+ }
+ }
+ case reflect.Slice:
+ // Represent slice types as a pointer to the value.
+ if rv.Type().Elem().Kind() != reflect.Uint8 {
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
+ }
+ }
+ return v
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
index 75565cc6d..fdd328bb7 100644
--- a/vendor/github.com/golang/protobuf/proto/lib.go
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -341,26 +341,6 @@ type Message interface {
ProtoMessage()
}
-// Stats records allocation details about the protocol buffer encoders
-// and decoders. Useful for tuning the library itself.
-type Stats struct {
- Emalloc uint64 // mallocs in encode
- Dmalloc uint64 // mallocs in decode
- Encode uint64 // number of encodes
- Decode uint64 // number of decodes
- Chit uint64 // number of cache hits
- Cmiss uint64 // number of cache misses
- Size uint64 // number of sizes
-}
-
-// Set to true to enable stats collection.
-const collectStats = false
-
-var stats Stats
-
-// GetStats returns a copy of the global Stats structure.
-func GetStats() Stats { return stats }
-
// A Buffer is a buffer manager for marshaling and unmarshaling
// protocol buffers. It may be reused between invocations to
// reduce memory usage. It is not necessary to use a Buffer;
@@ -960,13 +940,19 @@ func isProto3Zero(v reflect.Value) bool {
return false
}
-// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const ProtoPackageIsVersion2 = true
+const (
+ // ProtoPackageIsVersion3 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion3 = true
+
+ // ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion2 = true
-// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const ProtoPackageIsVersion1 = true
+ // ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion1 = true
+)
// InternalMessageInfo is a type used internally by generated .pb.go files.
// This type is not intended to be used by non-generated code.
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
index 3b6ca41d5..f48a75676 100644
--- a/vendor/github.com/golang/protobuf/proto/message_set.go
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -36,13 +36,7 @@ package proto
*/
import (
- "bytes"
- "encoding/json"
"errors"
- "fmt"
- "reflect"
- "sort"
- "sync"
)
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
@@ -145,46 +139,9 @@ func skipVarint(buf []byte) []byte {
return buf[i+1:]
}
-// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
-// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSet(exts interface{}) ([]byte, error) {
- return marshalMessageSet(exts, false)
-}
-
-// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
-func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
- switch exts := exts.(type) {
- case *XXX_InternalExtensions:
- var u marshalInfo
- siz := u.sizeMessageSet(exts)
- b := make([]byte, 0, siz)
- return u.appendMessageSet(b, exts, deterministic)
-
- case map[int32]Extension:
- // This is an old-style extension map.
- // Wrap it in a new-style XXX_InternalExtensions.
- ie := XXX_InternalExtensions{
- p: &struct {
- mu sync.Mutex
- extensionMap map[int32]Extension
- }{
- extensionMap: exts,
- },
- }
-
- var u marshalInfo
- siz := u.sizeMessageSet(&ie)
- b := make([]byte, 0, siz)
- return u.appendMessageSet(b, &ie, deterministic)
-
- default:
- return nil, errors.New("proto: not an extension map")
- }
-}
-
-// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+func unmarshalMessageSet(buf []byte, exts interface{}) error {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
@@ -222,93 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error {
}
return nil
}
-
-// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
-// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
- var m map[int32]Extension
- switch exts := exts.(type) {
- case *XXX_InternalExtensions:
- var mu sync.Locker
- m, mu = exts.extensionsRead()
- if m != nil {
- // Keep the extensions map locked until we're done marshaling to prevent
- // races between marshaling and unmarshaling the lazily-{en,de}coded
- // values.
- mu.Lock()
- defer mu.Unlock()
- }
- case map[int32]Extension:
- m = exts
- default:
- return nil, errors.New("proto: not an extension map")
- }
- var b bytes.Buffer
- b.WriteByte('{')
-
- // Process the map in key order for deterministic output.
- ids := make([]int32, 0, len(m))
- for id := range m {
- ids = append(ids, id)
- }
- sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
-
- for i, id := range ids {
- ext := m[id]
- msd, ok := messageSetMap[id]
- if !ok {
- // Unknown type; we can't render it, so skip it.
- continue
- }
-
- if i > 0 && b.Len() > 1 {
- b.WriteByte(',')
- }
-
- fmt.Fprintf(&b, `"[%s]":`, msd.name)
-
- x := ext.value
- if x == nil {
- x = reflect.New(msd.t.Elem()).Interface()
- if err := Unmarshal(ext.enc, x.(Message)); err != nil {
- return nil, err
- }
- }
- d, err := json.Marshal(x)
- if err != nil {
- return nil, err
- }
- b.Write(d)
- }
- b.WriteByte('}')
- return b.Bytes(), nil
-}
-
-// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
-// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
- // Common-case fast path.
- if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
- return nil
- }
-
- // This is fairly tricky, and it's not clear that it is needed.
- return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
-}
-
-// A global registry of types that can be used in a MessageSet.
-
-var messageSetMap = make(map[int32]messageSetDesc)
-
-type messageSetDesc struct {
- t reflect.Type // pointer to struct
- name string
-}
-
-// RegisterMessageSetType is called from the generated code.
-func RegisterMessageSetType(m Message, fieldNum int32, name string) {
- messageSetMap[fieldNum] = messageSetDesc{
- t: reflect.TypeOf(m),
- name: name,
- }
-}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
index b6cad9083..94fa9194a 100644
--- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -79,10 +79,13 @@ func toPointer(i *Message) pointer {
// toAddrPointer converts an interface to a pointer that points to
// the interface data.
-func toAddrPointer(i *interface{}, isptr bool) pointer {
+func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
v := reflect.ValueOf(*i)
u := reflect.New(v.Type())
u.Elem().Set(v)
+ if deref {
+ u = u.Elem()
+ }
return pointer{v: u}
}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
index d55a335d9..dbfffe071 100644
--- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -85,16 +85,21 @@ func toPointer(i *Message) pointer {
// toAddrPointer converts an interface to a pointer that points to
// the interface data.
-func toAddrPointer(i *interface{}, isptr bool) pointer {
+func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
// Super-tricky - read or get the address of data word of interface value.
if isptr {
// The interface is of pointer type, thus it is a direct interface.
// The data word is the pointer data itself. We take its address.
- return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+ p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+ } else {
+ // The interface is not of pointer type. The data word is the pointer
+ // to the data.
+ p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
- // The interface is not of pointer type. The data word is the pointer
- // to the data.
- return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+ if deref {
+ p.p = *(*unsafe.Pointer)(p.p)
+ }
+ return p
}
// valToPointer converts v to a pointer. v must be of pointer type.
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
index 50b99b83a..79668ff5c 100644
--- a/vendor/github.com/golang/protobuf/proto/properties.go
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -334,9 +334,6 @@ func GetProperties(t reflect.Type) *StructProperties {
sprop, ok := propertiesMap[t]
propertiesMu.RUnlock()
if ok {
- if collectStats {
- stats.Chit++
- }
return sprop
}
@@ -346,17 +343,20 @@ func GetProperties(t reflect.Type) *StructProperties {
return sprop
}
+type (
+ oneofFuncsIface interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ oneofWrappersIface interface {
+ XXX_OneofWrappers() []interface{}
+ }
+)
+
// getPropertiesLocked requires that propertiesMu is held.
func getPropertiesLocked(t reflect.Type) *StructProperties {
if prop, ok := propertiesMap[t]; ok {
- if collectStats {
- stats.Chit++
- }
return prop
}
- if collectStats {
- stats.Cmiss++
- }
prop := new(StructProperties)
// in case of recursive protos, fill this in now.
@@ -391,13 +391,14 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
// Re-order prop.order.
sort.Sort(prop)
- type oneofMessage interface {
- XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ var oots []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oots = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oots = m.XXX_OneofWrappers()
}
- if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
- var oots []interface{}
- _, _, _, oots = om.XXX_OneofFuncs()
-
+ if len(oots) > 0 {
// Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
for _, oot := range oots {
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
index b16794496..5cb11fa95 100644
--- a/vendor/github.com/golang/protobuf/proto/table_marshal.go
+++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go
@@ -87,6 +87,7 @@ type marshalElemInfo struct {
sizer sizer
marshaler marshaler
isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+ deref bool // dereference the pointer before operating on it; implies isptr
}
var (
@@ -320,8 +321,11 @@ func (u *marshalInfo) computeMarshalInfo() {
// get oneof implementers
var oneofImplementers []interface{}
- if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oneofImplementers = m.XXX_OneofWrappers()
}
n := t.NumField()
@@ -407,13 +411,22 @@ func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
panic("tag is not an integer")
}
wt := wiretype(tags[0])
+ if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
+ t = t.Elem()
+ }
sizer, marshaler := typeMarshaler(t, tags, false, false)
+ var deref bool
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ t = reflect.PtrTo(t)
+ deref = true
+ }
e = &marshalElemInfo{
wiretag: uint64(tag)<<3 | wt,
tagsize: SizeVarint(uint64(tag) << 3),
sizer: sizer,
marshaler: marshaler,
isptr: t.Kind() == reflect.Ptr,
+ deref: deref,
}
// update cache
@@ -448,7 +461,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
fi.field = toField(f)
- fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+ fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
fi.isPointer = true
fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
@@ -476,10 +489,6 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI
}
}
-type oneofMessage interface {
- XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
-}
-
// wiretype returns the wire encoding of the type.
func wiretype(encoding string) uint64 {
switch encoding {
@@ -2310,8 +2319,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
for _, k := range m.MapKeys() {
ki := k.Interface()
vi := m.MapIndex(k).Interface()
- kaddr := toAddrPointer(&ki, false) // pointer to key
- vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+ kaddr := toAddrPointer(&ki, false, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
n += siz + SizeVarint(uint64(siz)) + tagsize
}
@@ -2329,8 +2338,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
for _, k := range keys {
ki := k.Interface()
vi := m.MapIndex(k).Interface()
- kaddr := toAddrPointer(&ki, false) // pointer to key
- vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+ kaddr := toAddrPointer(&ki, false, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
b = appendVarint(b, tag)
siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
b = appendVarint(b, uint64(siz))
@@ -2399,7 +2408,7 @@ func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
// the last time this function was called.
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
n += ei.sizer(p, ei.tagsize)
}
mu.Unlock()
@@ -2434,7 +2443,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
if !nerr.Merge(err) {
return b, err
@@ -2465,7 +2474,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
if !nerr.Merge(err) {
return b, err
@@ -2510,7 +2519,7 @@ func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
n += ei.sizer(p, 1) // message, tag = 3 (size=1)
}
mu.Unlock()
@@ -2553,7 +2562,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
if !nerr.Merge(err) {
return b, err
@@ -2591,7 +2600,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
b = append(b, 1<<3|WireEndGroup)
if !nerr.Merge(err) {
@@ -2621,7 +2630,7 @@ func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
n += ei.sizer(p, ei.tagsize)
}
return n
@@ -2656,7 +2665,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
if !nerr.Merge(err) {
return b, err
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
index ebf1caa56..acee2fc52 100644
--- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
+++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
@@ -136,7 +136,7 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
u.computeUnmarshalInfo()
}
if u.isMessageSet {
- return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
+ return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
}
var reqMask uint64 // bitmask of required fields we've seen.
var errLater error
@@ -362,46 +362,48 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
}
// Find any types associated with oneof fields.
- // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it?
- fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
- if fn.IsValid() {
- res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
- for i := res.Len() - 1; i >= 0; i-- {
- v := res.Index(i) // interface{}
- tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
- typ := tptr.Elem() // Msg_X
-
- f := typ.Field(0) // oneof implementers have one field
- baseUnmarshal := fieldUnmarshaler(&f)
- tags := strings.Split(f.Tag.Get("protobuf"), ",")
- fieldNum, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("protobuf tag field not an integer: " + tags[1])
- }
- var name string
- for _, tag := range tags {
- if strings.HasPrefix(tag, "name=") {
- name = strings.TrimPrefix(tag, "name=")
- break
- }
+ var oneofImplementers []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oneofImplementers = m.XXX_OneofWrappers()
+ }
+ for _, v := range oneofImplementers {
+ tptr := reflect.TypeOf(v) // *Msg_X
+ typ := tptr.Elem() // Msg_X
+
+ f := typ.Field(0) // oneof implementers have one field
+ baseUnmarshal := fieldUnmarshaler(&f)
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ fieldNum, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tags[1])
+ }
+ var name string
+ for _, tag := range tags {
+ if strings.HasPrefix(tag, "name=") {
+ name = strings.TrimPrefix(tag, "name=")
+ break
}
+ }
- // Find the oneof field that this struct implements.
- // Might take O(n^2) to process all of the oneofs, but who cares.
- for _, of := range oneofFields {
- if tptr.Implements(of.ityp) {
- // We have found the corresponding interface for this struct.
- // That lets us know where this struct should be stored
- // when we encounter it during unmarshaling.
- unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
- u.setTag(fieldNum, of.field, unmarshal, 0, name)
- }
+ // Find the oneof field that this struct implements.
+ // Might take O(n^2) to process all of the oneofs, but who cares.
+ for _, of := range oneofFields {
+ if tptr.Implements(of.ityp) {
+ // We have found the corresponding interface for this struct.
+ // That lets us know where this struct should be stored
+ // when we encounter it during unmarshaling.
+ unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+ u.setTag(fieldNum, of.field, unmarshal, 0, name)
}
}
+
}
// Get extension ranges, if any.
- fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+ fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
if fn.IsValid() {
if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
panic("a message with extensions, but no extensions field in " + t.Name())
@@ -1948,7 +1950,7 @@ func encodeVarint(b []byte, x uint64) []byte {
// If there is an error, it returns 0,0.
func decodeVarint(b []byte) (uint64, int) {
var x, y uint64
- if len(b) <= 0 {
+ if len(b) == 0 {
goto bad
}
x = uint64(b[0])
diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml
new file mode 100644
index 000000000..f8684d99f
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+
+go:
+ - 1.4
+ - 1.3
+ - 1.2
+ - tip
+
+install:
+ - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
+
+script:
+ - go test -cover
diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md
new file mode 100644
index 000000000..51cf5cd1a
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/CONTRIBUTING.md
@@ -0,0 +1,67 @@
+# How to contribute #
+
+We'd love to accept your patches and contributions to this project. There are
+a just a few small guidelines you need to follow.
+
+
+## Contributor License Agreement ##
+
+Contributions to any Google project must be accompanied by a Contributor
+License Agreement. This is not a copyright **assignment**, it simply gives
+Google permission to use and redistribute your contributions as part of the
+project.
+
+ * If you are an individual writing original source code and you're sure you
+ own the intellectual property, then you'll need to sign an [individual
+ CLA][].
+
+ * If you work for a company that wants to allow you to contribute your work,
+ then you'll need to sign a [corporate CLA][].
+
+You generally only need to submit a CLA once, so if you've already submitted
+one (even if it was for a different project), you probably don't need to do it
+again.
+
+[individual CLA]: https://developers.google.com/open-source/cla/individual
+[corporate CLA]: https://developers.google.com/open-source/cla/corporate
+
+
+## Submitting a patch ##
+
+ 1. It's generally best to start by opening a new issue describing the bug or
+ feature you're intending to fix. Even if you think it's relatively minor,
+ it's helpful to know what people are working on. Mention in the initial
+ issue that you are planning to work on that bug or feature so that it can
+ be assigned to you.
+
+ 1. Follow the normal process of [forking][] the project, and setup a new
+ branch to work in. It's important that each group of changes be done in
+ separate branches in order to ensure that a pull request only includes the
+ commits related to that bug or feature.
+
+ 1. Go makes it very simple to ensure properly formatted code, so always run
+ `go fmt` on your code before committing it. You should also run
+ [golint][] over your code. As noted in the [golint readme][], it's not
+ strictly necessary that your code be completely "lint-free", but this will
+ help you find common style issues.
+
+ 1. Any significant changes should almost always be accompanied by tests. The
+ project already has good test coverage, so look at some of the existing
+ tests if you're unsure how to go about it. [gocov][] and [gocov-html][]
+ are invaluable tools for seeing which parts of your code aren't being
+ exercised by your tests.
+
+ 1. Do your best to have [well-formed commit messages][] for each change.
+ This provides consistency throughout the project, and ensures that commit
+ messages are able to be formatted properly by various git tools.
+
+ 1. Finally, push the commits to your fork and submit a [pull request][].
+
+[forking]: https://help.github.com/articles/fork-a-repo
+[golint]: https://github.com/golang/lint
+[golint readme]: https://github.com/golang/lint/blob/master/README
+[gocov]: https://github.com/axw/gocov
+[gocov-html]: https://github.com/matm/gocov-html
+[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
+[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits
+[pull request]: https://help.github.com/articles/creating-a-pull-request
diff --git a/vendor/github.com/google/gofuzz/go.mod b/vendor/github.com/google/gofuzz/go.mod
new file mode 100644
index 000000000..8ec4fe9e9
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/go.mod
@@ -0,0 +1,3 @@
+module github.com/google/gofuzz
+
+go 1.12
diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md
deleted file mode 100644
index 08f86693b..000000000
--- a/vendor/github.com/gorilla/context/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
-context
-=======
-[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context)
-
-gorilla/context is a general purpose registry for global request variables.
-
-> Note: gorilla/context, having been born well before `context.Context` existed, does not play well
-> with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`.
-
-Read the full documentation here: http://www.gorillatoolkit.org/pkg/context
diff --git a/vendor/github.com/gorilla/context/context.go b/vendor/github.com/gorilla/context/context.go
deleted file mode 100644
index 81cb128b1..000000000
--- a/vendor/github.com/gorilla/context/context.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2012 The Gorilla Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package context
-
-import (
- "net/http"
- "sync"
- "time"
-)
-
-var (
- mutex sync.RWMutex
- data = make(map[*http.Request]map[interface{}]interface{})
- datat = make(map[*http.Request]int64)
-)
-
-// Set stores a value for a given key in a given request.
-func Set(r *http.Request, key, val interface{}) {
- mutex.Lock()
- if data[r] == nil {
- data[r] = make(map[interface{}]interface{})
- datat[r] = time.Now().Unix()
- }
- data[r][key] = val
- mutex.Unlock()
-}
-
-// Get returns a value stored for a given key in a given request.
-func Get(r *http.Request, key interface{}) interface{} {
- mutex.RLock()
- if ctx := data[r]; ctx != nil {
- value := ctx[key]
- mutex.RUnlock()
- return value
- }
- mutex.RUnlock()
- return nil
-}
-
-// GetOk returns stored value and presence state like multi-value return of map access.
-func GetOk(r *http.Request, key interface{}) (interface{}, bool) {
- mutex.RLock()
- if _, ok := data[r]; ok {
- value, ok := data[r][key]
- mutex.RUnlock()
- return value, ok
- }
- mutex.RUnlock()
- return nil, false
-}
-
-// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests.
-func GetAll(r *http.Request) map[interface{}]interface{} {
- mutex.RLock()
- if context, ok := data[r]; ok {
- result := make(map[interface{}]interface{}, len(context))
- for k, v := range context {
- result[k] = v
- }
- mutex.RUnlock()
- return result
- }
- mutex.RUnlock()
- return nil
-}
-
-// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if
-// the request was registered.
-func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) {
- mutex.RLock()
- context, ok := data[r]
- result := make(map[interface{}]interface{}, len(context))
- for k, v := range context {
- result[k] = v
- }
- mutex.RUnlock()
- return result, ok
-}
-
-// Delete removes a value stored for a given key in a given request.
-func Delete(r *http.Request, key interface{}) {
- mutex.Lock()
- if data[r] != nil {
- delete(data[r], key)
- }
- mutex.Unlock()
-}
-
-// Clear removes all values stored for a given request.
-//
-// This is usually called by a handler wrapper to clean up request
-// variables at the end of a request lifetime. See ClearHandler().
-func Clear(r *http.Request) {
- mutex.Lock()
- clear(r)
- mutex.Unlock()
-}
-
-// clear is Clear without the lock.
-func clear(r *http.Request) {
- delete(data, r)
- delete(datat, r)
-}
-
-// Purge removes request data stored for longer than maxAge, in seconds.
-// It returns the amount of requests removed.
-//
-// If maxAge <= 0, all request data is removed.
-//
-// This is only used for sanity check: in case context cleaning was not
-// properly set some request data can be kept forever, consuming an increasing
-// amount of memory. In case this is detected, Purge() must be called
-// periodically until the problem is fixed.
-func Purge(maxAge int) int {
- mutex.Lock()
- count := 0
- if maxAge <= 0 {
- count = len(data)
- data = make(map[*http.Request]map[interface{}]interface{})
- datat = make(map[*http.Request]int64)
- } else {
- min := time.Now().Unix() - int64(maxAge)
- for r := range data {
- if datat[r] < min {
- clear(r)
- count++
- }
- }
- }
- mutex.Unlock()
- return count
-}
-
-// ClearHandler wraps an http.Handler and clears request values at the end
-// of a request lifetime.
-func ClearHandler(h http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- defer Clear(r)
- h.ServeHTTP(w, r)
- })
-}
diff --git a/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/gorilla/context/doc.go
deleted file mode 100644
index 448d1bfca..000000000
--- a/vendor/github.com/gorilla/context/doc.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2012 The Gorilla Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package context stores values shared during a request lifetime.
-
-Note: gorilla/context, having been born well before `context.Context` existed,
-does not play well > with the shallow copying of the request that
-[`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext)
-(added to net/http Go 1.7 onwards) performs. You should either use *just*
-gorilla/context, or moving forward, the new `http.Request.Context()`.
-
-For example, a router can set variables extracted from the URL and later
-application handlers can access those values, or it can be used to store
-sessions values to be saved at the end of a request. There are several
-others common uses.
-
-The idea was posted by Brad Fitzpatrick to the go-nuts mailing list:
-
- http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53
-
-Here's the basic usage: first define the keys that you will need. The key
-type is interface{} so a key can be of any type that supports equality.
-Here we define a key using a custom int type to avoid name collisions:
-
- package foo
-
- import (
- "github.com/gorilla/context"
- )
-
- type key int
-
- const MyKey key = 0
-
-Then set a variable. Variables are bound to an http.Request object, so you
-need a request instance to set a value:
-
- context.Set(r, MyKey, "bar")
-
-The application can later access the variable using the same key you provided:
-
- func MyHandler(w http.ResponseWriter, r *http.Request) {
- // val is "bar".
- val := context.Get(r, foo.MyKey)
-
- // returns ("bar", true)
- val, ok := context.GetOk(r, foo.MyKey)
- // ...
- }
-
-And that's all about the basic usage. We discuss some other ideas below.
-
-Any type can be stored in the context. To enforce a given type, make the key
-private and wrap Get() and Set() to accept and return values of a specific
-type:
-
- type key int
-
- const mykey key = 0
-
- // GetMyKey returns a value for this package from the request values.
- func GetMyKey(r *http.Request) SomeType {
- if rv := context.Get(r, mykey); rv != nil {
- return rv.(SomeType)
- }
- return nil
- }
-
- // SetMyKey sets a value for this package in the request values.
- func SetMyKey(r *http.Request, val SomeType) {
- context.Set(r, mykey, val)
- }
-
-Variables must be cleared at the end of a request, to remove all values
-that were stored. This can be done in an http.Handler, after a request was
-served. Just call Clear() passing the request:
-
- context.Clear(r)
-
-...or use ClearHandler(), which conveniently wraps an http.Handler to clear
-variables at the end of a request lifetime.
-
-The Routers from the packages gorilla/mux and gorilla/pat call Clear()
-so if you are using either of them you don't need to clear the context manually.
-*/
-package context
diff --git a/vendor/github.com/gorilla/mux/.travis.yml b/vendor/github.com/gorilla/mux/.travis.yml
new file mode 100644
index 000000000..d003ad922
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/.travis.yml
@@ -0,0 +1,24 @@
+language: go
+
+
+matrix:
+ include:
+ - go: 1.7.x
+ - go: 1.8.x
+ - go: 1.9.x
+ - go: 1.10.x
+ - go: 1.11.x
+ - go: 1.x
+ env: LATEST=true
+ - go: tip
+ allow_failures:
+ - go: tip
+
+install:
+ - # Skip
+
+script:
+ - go get -t -v ./...
+ - diff -u <(echo -n) <(gofmt -d .)
+ - if [[ "$LATEST" = true ]]; then go vet .; fi
+ - go test -v -race ./...
diff --git a/vendor/github.com/gorilla/mux/AUTHORS b/vendor/github.com/gorilla/mux/AUTHORS
new file mode 100644
index 000000000..b722392ee
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/AUTHORS
@@ -0,0 +1,8 @@
+# This is the official list of gorilla/mux authors for copyright purposes.
+#
+# Please keep the list sorted.
+
+Google LLC (https://opensource.google.com/)
+Kamil Kisielk <kamil@kamilkisiel.net>
+Matt Silverlock <matt@eatsleeprepeat.net>
+Rodrigo Moraes (https://github.com/moraes)
diff --git a/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md b/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md
new file mode 100644
index 000000000..232be82e4
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md
@@ -0,0 +1,11 @@
+**What version of Go are you running?** (Paste the output of `go version`)
+
+
+**What version of gorilla/mux are you at?** (Paste the output of `git rev-parse HEAD` inside `$GOPATH/src/github.com/gorilla/mux`)
+
+
+**Describe your problem** (and what you have tried so far)
+
+
+**Paste a minimal, runnable, reproduction of your issue below** (use backticks to format it)
+
diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE
index 0e5fb8728..6903df638 100644
--- a/vendor/github.com/gorilla/mux/LICENSE
+++ b/vendor/github.com/gorilla/mux/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
+Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md
index e424397ac..c661599ab 100644
--- a/vendor/github.com/gorilla/mux/README.md
+++ b/vendor/github.com/gorilla/mux/README.md
@@ -6,7 +6,7 @@
![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png)
-http://www.gorillatoolkit.org/pkg/mux
+https://www.gorillatoolkit.org/pkg/mux
Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
their respective handler.
@@ -88,7 +88,7 @@ r := mux.NewRouter()
// Only matches if domain is "www.example.com".
r.Host("www.example.com")
// Matches a dynamic subdomain.
-r.Host("{subdomain:[a-z]+}.domain.com")
+r.Host("{subdomain:[a-z]+}.example.com")
```
There are several other matchers that can be added. To match path prefixes:
@@ -238,13 +238,13 @@ This also works for host and query value variables:
```go
r := mux.NewRouter()
-r.Host("{subdomain}.domain.com").
+r.Host("{subdomain}.example.com").
Path("/articles/{category}/{id:[0-9]+}").
Queries("filter", "{filter}").
HandlerFunc(ArticleHandler).
Name("article")
-// url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla"
+// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42",
@@ -264,7 +264,7 @@ r.HeadersRegexp("Content-Type", "application/(text|json)")
There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
```go
-// "http://news.domain.com/"
+// "http://news.example.com/"
host, err := r.Get("article").URLHost("subdomain", "news")
// "/articles/technology/42"
@@ -275,12 +275,12 @@ And if you use subrouters, host and path defined separately can be built as well
```go
r := mux.NewRouter()
-s := r.Host("{subdomain}.domain.com").Subrouter()
+s := r.Host("{subdomain}.example.com").Subrouter()
s.Path("/articles/{category}/{id:[0-9]+}").
HandlerFunc(ArticleHandler).
Name("article")
-// "http://news.domain.com/articles/technology/42"
+// "http://news.example.com/articles/technology/42"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42")
@@ -503,8 +503,8 @@ package main
func HealthCheckHandler(w http.ResponseWriter, r *http.Request) {
// A very simple health check.
- w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
// In the future we could report back on the status of our DB, or our cache
// (e.g. Redis) by performing a simple PING, and include them in the response.
diff --git a/vendor/github.com/gorilla/mux/context_native.go b/vendor/github.com/gorilla/mux/context.go
index 209cbea7d..665940a26 100644
--- a/vendor/github.com/gorilla/mux/context_native.go
+++ b/vendor/github.com/gorilla/mux/context.go
@@ -1,5 +1,3 @@
-// +build go1.7
-
package mux
import (
@@ -18,7 +16,3 @@ func contextSet(r *http.Request, key, val interface{}) *http.Request {
return r.WithContext(context.WithValue(r.Context(), key, val))
}
-
-func contextClear(r *http.Request) {
- return
-}
diff --git a/vendor/github.com/gorilla/mux/context_gorilla.go b/vendor/github.com/gorilla/mux/context_gorilla.go
deleted file mode 100644
index d7adaa8fa..000000000
--- a/vendor/github.com/gorilla/mux/context_gorilla.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// +build !go1.7
-
-package mux
-
-import (
- "net/http"
-
- "github.com/gorilla/context"
-)
-
-func contextGet(r *http.Request, key interface{}) interface{} {
- return context.Get(r, key)
-}
-
-func contextSet(r *http.Request, key, val interface{}) *http.Request {
- if val == nil {
- return r
- }
-
- context.Set(r, key, val)
- return r
-}
-
-func contextClear(r *http.Request) {
- context.Clear(r)
-}
diff --git a/vendor/github.com/gorilla/mux/go.mod b/vendor/github.com/gorilla/mux/go.mod
new file mode 100644
index 000000000..cfc8ede58
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/go.mod
@@ -0,0 +1 @@
+module github.com/gorilla/mux
diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go
index 4bbafa51d..a2cd193e4 100644
--- a/vendor/github.com/gorilla/mux/mux.go
+++ b/vendor/github.com/gorilla/mux/mux.go
@@ -22,7 +22,7 @@ var (
// NewRouter returns a new router instance.
func NewRouter() *Router {
- return &Router{namedRoutes: make(map[string]*Route), KeepContext: false}
+ return &Router{namedRoutes: make(map[string]*Route)}
}
// Router registers routes to be matched and dispatches a handler.
@@ -50,24 +50,78 @@ type Router struct {
// Configurable Handler to be used when the request method does not match the route.
MethodNotAllowedHandler http.Handler
- // Parent route, if this is a subrouter.
- parent parentRoute
// Routes to be matched, in order.
routes []*Route
+
// Routes by name for URL building.
namedRoutes map[string]*Route
- // See Router.StrictSlash(). This defines the flag for new routes.
- strictSlash bool
- // See Router.SkipClean(). This defines the flag for new routes.
- skipClean bool
+
// If true, do not clear the request context after handling the request.
- // This has no effect when go1.7+ is used, since the context is stored
+ //
+ // Deprecated: No effect when go1.7+ is used, since the context is stored
// on the request itself.
KeepContext bool
- // see Router.UseEncodedPath(). This defines a flag for all routes.
- useEncodedPath bool
+
// Slice of middlewares to be called after a match is found
middlewares []middleware
+
+ // configuration shared with `Route`
+ routeConf
+}
+
+// common route configuration shared between `Router` and `Route`
+type routeConf struct {
+ // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
+ useEncodedPath bool
+
+ // If true, when the path pattern is "/path/", accessing "/path" will
+ // redirect to the former and vice versa.
+ strictSlash bool
+
+ // If true, when the path pattern is "/path//to", accessing "/path//to"
+ // will not redirect
+ skipClean bool
+
+ // Manager for the variables from host and path.
+ regexp routeRegexpGroup
+
+ // List of matchers.
+ matchers []matcher
+
+ // The scheme used when building URLs.
+ buildScheme string
+
+ buildVarsFunc BuildVarsFunc
+}
+
+// returns an effective deep copy of `routeConf`
+func copyRouteConf(r routeConf) routeConf {
+ c := r
+
+ if r.regexp.path != nil {
+ c.regexp.path = copyRouteRegexp(r.regexp.path)
+ }
+
+ if r.regexp.host != nil {
+ c.regexp.host = copyRouteRegexp(r.regexp.host)
+ }
+
+ c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries))
+ for _, q := range r.regexp.queries {
+ c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q))
+ }
+
+ c.matchers = make([]matcher, 0, len(r.matchers))
+ for _, m := range r.matchers {
+ c.matchers = append(c.matchers, m)
+ }
+
+ return c
+}
+
+func copyRouteRegexp(r *routeRegexp) *routeRegexp {
+ c := *r
+ return &c
}
// Match attempts to match the given request against the router's registered routes.
@@ -155,22 +209,18 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
handler = http.NotFoundHandler()
}
- if !r.KeepContext {
- defer contextClear(req)
- }
-
handler.ServeHTTP(w, req)
}
// Get returns a route registered with the given name.
func (r *Router) Get(name string) *Route {
- return r.getNamedRoutes()[name]
+ return r.namedRoutes[name]
}
// GetRoute returns a route registered with the given name. This method
// was renamed to Get() and remains here for backwards compatibility.
func (r *Router) GetRoute(name string) *Route {
- return r.getNamedRoutes()[name]
+ return r.namedRoutes[name]
}
// StrictSlash defines the trailing slash behavior for new routes. The initial
@@ -222,54 +272,23 @@ func (r *Router) UseEncodedPath() *Router {
}
// ----------------------------------------------------------------------------
-// parentRoute
-// ----------------------------------------------------------------------------
-
-func (r *Router) getBuildScheme() string {
- if r.parent != nil {
- return r.parent.getBuildScheme()
- }
- return ""
-}
-
-// getNamedRoutes returns the map where named routes are registered.
-func (r *Router) getNamedRoutes() map[string]*Route {
- if r.namedRoutes == nil {
- if r.parent != nil {
- r.namedRoutes = r.parent.getNamedRoutes()
- } else {
- r.namedRoutes = make(map[string]*Route)
- }
- }
- return r.namedRoutes
-}
-
-// getRegexpGroup returns regexp definitions from the parent route, if any.
-func (r *Router) getRegexpGroup() *routeRegexpGroup {
- if r.parent != nil {
- return r.parent.getRegexpGroup()
- }
- return nil
-}
-
-func (r *Router) buildVars(m map[string]string) map[string]string {
- if r.parent != nil {
- m = r.parent.buildVars(m)
- }
- return m
-}
-
-// ----------------------------------------------------------------------------
// Route factories
// ----------------------------------------------------------------------------
// NewRoute registers an empty route.
func (r *Router) NewRoute() *Route {
- route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean, useEncodedPath: r.useEncodedPath}
+ // initialize a route with a copy of the parent router's configuration
+ route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
r.routes = append(r.routes, route)
return route
}
+// Name registers a new route with a name.
+// See Route.Name().
+func (r *Router) Name(name string) *Route {
+ return r.NewRoute().Name(name)
+}
+
// Handle registers a new route with a matcher for the URL path.
// See Route.Path() and Route.Handler().
func (r *Router) Handle(path string, handler http.Handler) *Route {
diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go
index 2b57e5627..ac1abcd47 100644
--- a/vendor/github.com/gorilla/mux/regexp.go
+++ b/vendor/github.com/gorilla/mux/regexp.go
@@ -113,6 +113,13 @@ func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*ro
if typ != regexpTypePrefix {
pattern.WriteByte('$')
}
+
+ var wildcardHostPort bool
+ if typ == regexpTypeHost {
+ if !strings.Contains(pattern.String(), ":") {
+ wildcardHostPort = true
+ }
+ }
reverse.WriteString(raw)
if endSlash {
reverse.WriteByte('/')
@@ -131,13 +138,14 @@ func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*ro
// Done!
return &routeRegexp{
- template: template,
- regexpType: typ,
- options: options,
- regexp: reg,
- reverse: reverse.String(),
- varsN: varsN,
- varsR: varsR,
+ template: template,
+ regexpType: typ,
+ options: options,
+ regexp: reg,
+ reverse: reverse.String(),
+ varsN: varsN,
+ varsR: varsR,
+ wildcardHostPort: wildcardHostPort,
}, nil
}
@@ -158,11 +166,22 @@ type routeRegexp struct {
varsN []string
// Variable regexps (validators).
varsR []*regexp.Regexp
+ // Wildcard host-port (no strict port match in hostname)
+ wildcardHostPort bool
}
// Match matches the regexp against the URL host or path.
func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
- if r.regexpType != regexpTypeHost {
+ if r.regexpType == regexpTypeHost {
+ host := getHost(req)
+ if r.wildcardHostPort {
+ // Don't be strict on the port match
+ if i := strings.Index(host, ":"); i != -1 {
+ host = host[:i]
+ }
+ }
+ return r.regexp.MatchString(host)
+ } else {
if r.regexpType == regexpTypeQuery {
return r.matchQueryString(req)
}
@@ -172,8 +191,6 @@ func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
}
return r.regexp.MatchString(path)
}
-
- return r.regexp.MatchString(getHost(req))
}
// url builds a URL part using the given values.
@@ -267,7 +284,7 @@ type routeRegexpGroup struct {
}
// setMatch extracts the variables from the URL once a route matches.
-func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
+func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
// Store host variables.
if v.host != nil {
host := getHost(req)
@@ -296,7 +313,7 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route)
} else {
u.Path += "/"
}
- m.Handler = http.RedirectHandler(u.String(), 301)
+ m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently)
}
}
}
@@ -312,17 +329,13 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route)
}
// getHost tries its best to return the request host.
+// According to section 14.23 of RFC 2616 the Host header
+// can include the port number if the default value of 80 is not used.
func getHost(r *http.Request) string {
if r.URL.IsAbs() {
return r.URL.Host
}
- host := r.Host
- // Slice off any port information.
- if i := strings.Index(host, ":"); i != -1 {
- host = host[:i]
- }
- return host
-
+ return r.Host
}
func extractVars(input string, matches []int, names []string, output map[string]string) {
diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go
index a591d7354..8479c68c1 100644
--- a/vendor/github.com/gorilla/mux/route.go
+++ b/vendor/github.com/gorilla/mux/route.go
@@ -15,24 +15,8 @@ import (
// Route stores information to match a request and build URLs.
type Route struct {
- // Parent where the route was registered (a Router).
- parent parentRoute
// Request handler for the route.
handler http.Handler
- // List of matchers.
- matchers []matcher
- // Manager for the variables from host and path.
- regexp *routeRegexpGroup
- // If true, when the path pattern is "/path/", accessing "/path" will
- // redirect to the former and vice versa.
- strictSlash bool
- // If true, when the path pattern is "/path//to", accessing "/path//to"
- // will not redirect
- skipClean bool
- // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
- useEncodedPath bool
- // The scheme used when building URLs.
- buildScheme string
// If true, this route never matches: it is only used to build URLs.
buildOnly bool
// The name used to build URLs.
@@ -40,7 +24,11 @@ type Route struct {
// Error resulted from building a route.
err error
- buildVarsFunc BuildVarsFunc
+ // "global" reference to all named routes
+ namedRoutes map[string]*Route
+
+ // config possibly passed in from `Router`
+ routeConf
}
// SkipClean reports whether path cleaning is enabled for this route via
@@ -64,6 +52,18 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
matchErr = ErrMethodMismatch
continue
}
+
+ // Ignore ErrNotFound errors. These errors arise from match call
+ // to Subrouters.
+ //
+ // This prevents subsequent matching subrouters from failing to
+ // run middleware. If not ignored, the middleware would see a
+ // non-nil MatchErr and be skipped, even when there was a
+ // matching route.
+ if match.MatchErr == ErrNotFound {
+ match.MatchErr = nil
+ }
+
matchErr = nil
return false
}
@@ -93,9 +93,7 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
}
// Set variables.
- if r.regexp != nil {
- r.regexp.setMatch(req, match, r)
- }
+ r.regexp.setMatch(req, match, r)
return true
}
@@ -137,7 +135,7 @@ func (r *Route) GetHandler() http.Handler {
// Name -----------------------------------------------------------------------
// Name sets the name for the route, used to build URLs.
-// If the name was registered already it will be overwritten.
+// It is an error to call Name more than once on a route.
func (r *Route) Name(name string) *Route {
if r.name != "" {
r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
@@ -145,7 +143,7 @@ func (r *Route) Name(name string) *Route {
}
if r.err == nil {
r.name = name
- r.getNamedRoutes()[name] = r
+ r.namedRoutes[name] = r
}
return r
}
@@ -177,7 +175,6 @@ func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error {
if r.err != nil {
return r.err
}
- r.regexp = r.getRegexpGroup()
if typ == regexpTypePath || typ == regexpTypePrefix {
if len(tpl) > 0 && tpl[0] != '/' {
return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
@@ -386,7 +383,7 @@ func (r *Route) PathPrefix(tpl string) *Route {
// The above route will only match if the URL contains the defined queries
// values, e.g.: ?foo=bar&id=42.
//
-// It the value is an empty string, it will match any value if the key is set.
+// If the value is an empty string, it will match any value if the key is set.
//
// Variables can define an optional regexp pattern to be matched:
//
@@ -424,7 +421,7 @@ func (r *Route) Schemes(schemes ...string) *Route {
for k, v := range schemes {
schemes[k] = strings.ToLower(v)
}
- if r.buildScheme == "" && len(schemes) > 0 {
+ if len(schemes) > 0 {
r.buildScheme = schemes[0]
}
return r.addMatcher(schemeMatcher(schemes))
@@ -439,7 +436,15 @@ type BuildVarsFunc func(map[string]string) map[string]string
// BuildVarsFunc adds a custom function to be used to modify build variables
// before a route's URL is built.
func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
- r.buildVarsFunc = f
+ if r.buildVarsFunc != nil {
+ // compose the old and new functions
+ old := r.buildVarsFunc
+ r.buildVarsFunc = func(m map[string]string) map[string]string {
+ return f(old(m))
+ }
+ } else {
+ r.buildVarsFunc = f
+ }
return r
}
@@ -458,7 +463,8 @@ func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
// Here, the routes registered in the subrouter won't be tested if the host
// doesn't match.
func (r *Route) Subrouter() *Router {
- router := &Router{parent: r, strictSlash: r.strictSlash}
+ // initialize a subrouter with a copy of the parent route's configuration
+ router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
r.addMatcher(router)
return router
}
@@ -502,9 +508,6 @@ func (r *Route) URL(pairs ...string) (*url.URL, error) {
if r.err != nil {
return nil, r.err
}
- if r.regexp == nil {
- return nil, errors.New("mux: route doesn't have a host or path")
- }
values, err := r.prepareVars(pairs...)
if err != nil {
return nil, err
@@ -516,8 +519,8 @@ func (r *Route) URL(pairs ...string) (*url.URL, error) {
return nil, err
}
scheme = "http"
- if s := r.getBuildScheme(); s != "" {
- scheme = s
+ if r.buildScheme != "" {
+ scheme = r.buildScheme
}
}
if r.regexp.path != nil {
@@ -547,7 +550,7 @@ func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
if r.err != nil {
return nil, r.err
}
- if r.regexp == nil || r.regexp.host == nil {
+ if r.regexp.host == nil {
return nil, errors.New("mux: route doesn't have a host")
}
values, err := r.prepareVars(pairs...)
@@ -562,8 +565,8 @@ func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
Scheme: "http",
Host: host,
}
- if s := r.getBuildScheme(); s != "" {
- u.Scheme = s
+ if r.buildScheme != "" {
+ u.Scheme = r.buildScheme
}
return u, nil
}
@@ -575,7 +578,7 @@ func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
if r.err != nil {
return nil, r.err
}
- if r.regexp == nil || r.regexp.path == nil {
+ if r.regexp.path == nil {
return nil, errors.New("mux: route doesn't have a path")
}
values, err := r.prepareVars(pairs...)
@@ -600,7 +603,7 @@ func (r *Route) GetPathTemplate() (string, error) {
if r.err != nil {
return "", r.err
}
- if r.regexp == nil || r.regexp.path == nil {
+ if r.regexp.path == nil {
return "", errors.New("mux: route doesn't have a path")
}
return r.regexp.path.template, nil
@@ -614,7 +617,7 @@ func (r *Route) GetPathRegexp() (string, error) {
if r.err != nil {
return "", r.err
}
- if r.regexp == nil || r.regexp.path == nil {
+ if r.regexp.path == nil {
return "", errors.New("mux: route does not have a path")
}
return r.regexp.path.regexp.String(), nil
@@ -629,7 +632,7 @@ func (r *Route) GetQueriesRegexp() ([]string, error) {
if r.err != nil {
return nil, r.err
}
- if r.regexp == nil || r.regexp.queries == nil {
+ if r.regexp.queries == nil {
return nil, errors.New("mux: route doesn't have queries")
}
var queries []string
@@ -648,7 +651,7 @@ func (r *Route) GetQueriesTemplates() ([]string, error) {
if r.err != nil {
return nil, r.err
}
- if r.regexp == nil || r.regexp.queries == nil {
+ if r.regexp.queries == nil {
return nil, errors.New("mux: route doesn't have queries")
}
var queries []string
@@ -683,7 +686,7 @@ func (r *Route) GetHostTemplate() (string, error) {
if r.err != nil {
return "", r.err
}
- if r.regexp == nil || r.regexp.host == nil {
+ if r.regexp.host == nil {
return "", errors.New("mux: route doesn't have a host")
}
return r.regexp.host.template, nil
@@ -700,64 +703,8 @@ func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
}
func (r *Route) buildVars(m map[string]string) map[string]string {
- if r.parent != nil {
- m = r.parent.buildVars(m)
- }
if r.buildVarsFunc != nil {
m = r.buildVarsFunc(m)
}
return m
}
-
-// ----------------------------------------------------------------------------
-// parentRoute
-// ----------------------------------------------------------------------------
-
-// parentRoute allows routes to know about parent host and path definitions.
-type parentRoute interface {
- getBuildScheme() string
- getNamedRoutes() map[string]*Route
- getRegexpGroup() *routeRegexpGroup
- buildVars(map[string]string) map[string]string
-}
-
-func (r *Route) getBuildScheme() string {
- if r.buildScheme != "" {
- return r.buildScheme
- }
- if r.parent != nil {
- return r.parent.getBuildScheme()
- }
- return ""
-}
-
-// getNamedRoutes returns the map where named routes are registered.
-func (r *Route) getNamedRoutes() map[string]*Route {
- if r.parent == nil {
- // During tests router is not always set.
- r.parent = NewRouter()
- }
- return r.parent.getNamedRoutes()
-}
-
-// getRegexpGroup returns regexp definitions from this route.
-func (r *Route) getRegexpGroup() *routeRegexpGroup {
- if r.regexp == nil {
- if r.parent == nil {
- // During tests router is not always set.
- r.parent = NewRouter()
- }
- regexp := r.parent.getRegexpGroup()
- if regexp == nil {
- r.regexp = new(routeRegexpGroup)
- } else {
- // Copy.
- r.regexp = &routeRegexpGroup{
- host: regexp.host,
- path: regexp.path,
- queries: regexp.queries,
- }
- }
- }
- return r.regexp
-}
diff --git a/vendor/github.com/hashicorp/go-multierror/.travis.yml b/vendor/github.com/hashicorp/go-multierror/.travis.yml
new file mode 100644
index 000000000..304a83595
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/.travis.yml
@@ -0,0 +1,12 @@
+sudo: false
+
+language: go
+
+go:
+ - 1.x
+
+branches:
+ only:
+ - master
+
+script: make test testrace
diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile
new file mode 100644
index 000000000..b97cd6ed0
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/Makefile
@@ -0,0 +1,31 @@
+TEST?=./...
+
+default: test
+
+# test runs the test suite and vets the code.
+test: generate
+ @echo "==> Running tests..."
+ @go list $(TEST) \
+ | grep -v "/vendor/" \
+ | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS}
+
+# testrace runs the race checker
+testrace: generate
+ @echo "==> Running tests (race)..."
+ @go list $(TEST) \
+ | grep -v "/vendor/" \
+ | xargs -n1 go test -timeout=60s -race ${TESTARGS}
+
+# updatedeps installs all the dependencies needed to run and build.
+updatedeps:
+ @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'"
+
+# generate runs `go generate` to build the dynamically generated source files.
+generate:
+ @echo "==> Generating..."
+ @find . -type f -name '.DS_Store' -delete
+ @go list ./... \
+ | grep -v "/vendor/" \
+ | xargs -n1 go generate
+
+.PHONY: default test testrace updatedeps generate
diff --git a/vendor/github.com/hashicorp/go-multierror/go.sum b/vendor/github.com/hashicorp/go-multierror/go.sum
new file mode 100644
index 000000000..85b1f8ff3
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/go.sum
@@ -0,0 +1,4 @@
+github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4=
+github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
diff --git a/vendor/github.com/hpcloud/tail/.gitignore b/vendor/github.com/hpcloud/tail/.gitignore
new file mode 100644
index 000000000..6d9953c3c
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/.gitignore
@@ -0,0 +1,3 @@
+.test
+.go
+
diff --git a/vendor/github.com/hpcloud/tail/.travis.yml b/vendor/github.com/hpcloud/tail/.travis.yml
new file mode 100644
index 000000000..9cf8bb7fc
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/.travis.yml
@@ -0,0 +1,18 @@
+language: go
+
+script:
+ - go test -race -v ./...
+
+go:
+ - 1.4
+ - 1.5
+ - 1.6
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+
+install:
+ - go get gopkg.in/fsnotify.v1
+ - go get gopkg.in/tomb.v1
diff --git a/vendor/github.com/hpcloud/tail/CHANGES.md b/vendor/github.com/hpcloud/tail/CHANGES.md
new file mode 100644
index 000000000..422790c07
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/CHANGES.md
@@ -0,0 +1,63 @@
+# API v1 (gopkg.in/hpcloud/tail.v1)
+
+## April, 2016
+
+* Migrated to godep, as depman is not longer supported
+* Introduced golang vendoring feature
+* Fixed issue [#57](https://github.com/hpcloud/tail/issues/57) related to reopen deleted file
+
+## July, 2015
+
+* Fix inotify watcher leak; remove `Cleanup` (#51)
+
+# API v0 (gopkg.in/hpcloud/tail.v0)
+
+## June, 2015
+
+* Don't return partial lines (PR #40)
+* Use stable version of fsnotify (#46)
+
+## July, 2014
+
+* Fix tail for Windows (PR #36)
+
+## May, 2014
+
+* Improved rate limiting using leaky bucket (PR #29)
+* Fix odd line splitting (PR #30)
+
+## Apr, 2014
+
+* LimitRate now discards read buffer (PR #28)
+* allow reading of longer lines if MaxLineSize is unset (PR #24)
+* updated deps.json to latest fsnotify (441bbc86b1)
+
+## Feb, 2014
+
+* added `Config.Logger` to suppress library logging
+
+## Nov, 2013
+
+* add Cleanup to remove leaky inotify watches (PR #20)
+
+## Aug, 2013
+
+* redesigned Location field (PR #12)
+* add tail.Tell (PR #14)
+
+## July, 2013
+
+* Rate limiting (PR #10)
+
+## May, 2013
+
+* Detect file deletions/renames in polling file watcher (PR #1)
+* Detect file truncation
+* Fix potential race condition when reopening the file (issue 5)
+* Fix potential blocking of `tail.Stop` (issue 4)
+* Fix uncleaned up ChangeEvents goroutines after calling tail.Stop
+* Support Follow=false
+
+## Feb, 2013
+
+* Initial open source release
diff --git a/vendor/github.com/hpcloud/tail/Dockerfile b/vendor/github.com/hpcloud/tail/Dockerfile
new file mode 100644
index 000000000..cd297b940
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/Dockerfile
@@ -0,0 +1,19 @@
+FROM golang
+
+RUN mkdir -p $GOPATH/src/github.com/hpcloud/tail/
+ADD . $GOPATH/src/github.com/hpcloud/tail/
+
+# expecting to fetch dependencies successfully.
+RUN go get -v github.com/hpcloud/tail
+
+# expecting to run the test successfully.
+RUN go test -v github.com/hpcloud/tail
+
+# expecting to install successfully
+RUN go install -v github.com/hpcloud/tail
+RUN go install -v github.com/hpcloud/tail/cmd/gotail
+
+RUN $GOPATH/bin/gotail -h || true
+
+ENV PATH $GOPATH/bin:$PATH
+CMD ["gotail"]
diff --git a/vendor/github.com/hpcloud/tail/Makefile b/vendor/github.com/hpcloud/tail/Makefile
new file mode 100644
index 000000000..6591b24fc
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/Makefile
@@ -0,0 +1,11 @@
+default: test
+
+test: *.go
+ go test -v -race ./...
+
+fmt:
+ gofmt -w .
+
+# Run the test in an isolated environment.
+fulltest:
+ docker build -t hpcloud/tail .
diff --git a/vendor/github.com/hpcloud/tail/appveyor.yml b/vendor/github.com/hpcloud/tail/appveyor.yml
new file mode 100644
index 000000000..d370055b6
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/appveyor.yml
@@ -0,0 +1,11 @@
+version: 0.{build}
+skip_tags: true
+cache: C:\Users\appveyor\AppData\Local\NuGet\Cache
+build_script:
+- SET GOPATH=c:\workspace
+- go test -v -race ./...
+test: off
+clone_folder: c:\workspace\src\github.com\hpcloud\tail
+branches:
+ only:
+ - master
diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/Licence b/vendor/github.com/hpcloud/tail/ratelimiter/Licence
new file mode 100644
index 000000000..434aab19f
--- /dev/null
+++ b/vendor/github.com/hpcloud/tail/ratelimiter/Licence
@@ -0,0 +1,7 @@
+Copyright (C) 2013 99designs
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/Nvveen/Gotty/LICENSE b/vendor/github.com/ijc/Gotty/LICENSE
index 0b71c9736..0b71c9736 100644
--- a/vendor/github.com/Nvveen/Gotty/LICENSE
+++ b/vendor/github.com/ijc/Gotty/LICENSE
diff --git a/vendor/github.com/Nvveen/Gotty/README b/vendor/github.com/ijc/Gotty/README
index a6b0d9a8f..a6b0d9a8f 100644
--- a/vendor/github.com/Nvveen/Gotty/README
+++ b/vendor/github.com/ijc/Gotty/README
diff --git a/vendor/github.com/ijc/Gotty/TODO b/vendor/github.com/ijc/Gotty/TODO
new file mode 100644
index 000000000..470460531
--- /dev/null
+++ b/vendor/github.com/ijc/Gotty/TODO
@@ -0,0 +1,3 @@
+gotty.go:// TODO add more concurrency to name lookup, look for more opportunities.
+all:// TODO add more documentation, with function usage in a doc.go file.
+all:// TODO add more testing/benchmarking with go test.
diff --git a/vendor/github.com/Nvveen/Gotty/attributes.go b/vendor/github.com/ijc/Gotty/attributes.go
index a4c005fae..a4c005fae 100644
--- a/vendor/github.com/Nvveen/Gotty/attributes.go
+++ b/vendor/github.com/ijc/Gotty/attributes.go
diff --git a/vendor/github.com/Nvveen/Gotty/gotty.go b/vendor/github.com/ijc/Gotty/gotty.go
index 093cbf37e..c329778a1 100644
--- a/vendor/github.com/Nvveen/Gotty/gotty.go
+++ b/vendor/github.com/ijc/Gotty/gotty.go
@@ -8,10 +8,12 @@ package gotty
// TODO add more concurrency to name lookup, look for more opportunities.
import (
+ "bytes"
"encoding/binary"
"errors"
"fmt"
"os"
+ "path"
"reflect"
"strings"
"sync"
@@ -21,33 +23,30 @@ import (
// If something went wrong reading the terminfo database file, an error is
// returned.
func OpenTermInfo(termName string) (*TermInfo, error) {
- var term *TermInfo
- var err error
+ if len(termName) == 0 {
+ return nil, errors.New("No termname given")
+ }
// Find the environment variables
- termloc := os.Getenv("TERMINFO")
- if len(termloc) == 0 {
+ if termloc := os.Getenv("TERMINFO"); len(termloc) > 0 {
+ return readTermInfo(path.Join(termloc, string(termName[0]), termName))
+ } else {
// Search like ncurses
- locations := []string{os.Getenv("HOME") + "/.terminfo/", "/etc/terminfo/",
- "/lib/terminfo/", "/usr/share/terminfo/"}
- var path string
+ locations := []string{}
+ if h := os.Getenv("HOME"); len(h) > 0 {
+ locations = append(locations, path.Join(h, ".terminfo"))
+ }
+ locations = append(locations,
+ "/etc/terminfo/",
+ "/lib/terminfo/",
+ "/usr/share/terminfo/")
for _, str := range locations {
- // Construct path
- path = str + string(termName[0]) + "/" + termName
- // Check if path can be opened
- file, _ := os.Open(path)
- if file != nil {
- // Path can open, fall out and use current path
- file.Close()
- break
+ term, err := readTermInfo(path.Join(str, string(termName[0]), termName))
+ if err == nil {
+ return term, nil
}
}
- if len(path) > 0 {
- term, err = readTermInfo(path)
- } else {
- err = errors.New(fmt.Sprintf("No terminfo file(-location) found"))
- }
+ return nil, errors.New("No terminfo file(-location) found")
}
- return term, err
}
// Open a terminfo file from the environment variable containing the current
@@ -110,7 +109,7 @@ func (term *TermInfo) GetAttributeName(name string) (stacker, error) {
return term.GetAttribute(tc)
}
-// A utility function that finds and returns the termcap equivalent of a
+// A utility function that finds and returns the termcap equivalent of a
// variable name.
func GetTermcapName(name string) string {
// Termcap name
@@ -192,7 +191,9 @@ func readTermInfo(path string) (*TermInfo, error) {
}
}
// If the number of bytes read is not even, a byte for alignment is added
- if len(byteArray)%2 != 0 {
+ // We know the header is an even number of bytes so only need to check the
+ // total of the names and booleans.
+ if (header[1]+header[2])%2 != 0 {
err = binary.Read(file, binary.LittleEndian, make([]byte, 1))
if err != nil {
return nil, err
@@ -228,9 +229,14 @@ func readTermInfo(path string) (*TermInfo, error) {
// We get an offset, and then iterate until the string is null-terminated
for i, offset := range shArray {
if offset > -1 {
- r := offset
- for ; byteArray[r] != 0; r++ {
+ if int(offset) >= len(byteArray) {
+ return nil, errors.New("array out of bounds reading string section")
+ }
+ r := bytes.IndexByte(byteArray[offset:], 0)
+ if r == -1 {
+ return nil, errors.New("missing nul byte reading string section")
}
+ r += int(offset)
term.strAttributes[StrAttr[i*2+1]] = string(byteArray[offset:r])
}
}
diff --git a/vendor/github.com/Nvveen/Gotty/parser.go b/vendor/github.com/ijc/Gotty/parser.go
index a9d5d23c5..a9d5d23c5 100644
--- a/vendor/github.com/Nvveen/Gotty/parser.go
+++ b/vendor/github.com/ijc/Gotty/parser.go
diff --git a/vendor/github.com/Nvveen/Gotty/types.go b/vendor/github.com/ijc/Gotty/types.go
index 9bcc65e9b..9bcc65e9b 100644
--- a/vendor/github.com/Nvveen/Gotty/types.go
+++ b/vendor/github.com/ijc/Gotty/types.go
diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore
new file mode 100644
index 000000000..529c3412b
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/.gitignore
@@ -0,0 +1,33 @@
+#### joe made this: http://goel.io/joe
+
+#### go ####
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+#### vim ####
+# Swap
+[._]*.s[a-v][a-z]
+[._]*.sw[a-p]
+[._]s[a-v][a-z]
+[._]sw[a-p]
+
+# Session
+Session.vim
+
+# Temporary
+.netrwhist
+*~
+# Auto-generated tag files
+tags
diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml
new file mode 100644
index 000000000..b13a50ed1
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+install:
+ - go get -t
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+script:
+ - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN
diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..469b44907
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md
index 8b76f1fbf..02fc81e06 100644
--- a/vendor/github.com/imdario/mergo/README.md
+++ b/vendor/github.com/imdario/mergo/README.md
@@ -13,6 +13,7 @@ It is ready for production use. [It is used in several projects by Docker, Googl
[![Build Status][1]][2]
[![Coverage Status][7]][8]
[![Sourcegraph][9]][10]
+[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield)
[1]: https://travis-ci.org/imdario/mergo.png
[2]: https://travis-ci.org/imdario/mergo
@@ -27,7 +28,7 @@ It is ready for production use. [It is used in several projects by Docker, Googl
### Latest release
-[Release v0.3.6](https://github.com/imdario/mergo/releases/tag/v0.3.6).
+[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7).
### Important note
@@ -217,6 +218,21 @@ If I can help you, you have an idea or you are using Mergo in your projects, don
Written by [Dario Castañé](http://dario.im).
+## Top Contributors
+
+[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0)
+[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1)
+[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2)
+[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3)
+[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4)
+[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5)
+[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6)
+[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7)
+
+
## License
[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
+
+
+[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)
diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go
index 6ea38e636..3f5afa83a 100644
--- a/vendor/github.com/imdario/mergo/map.go
+++ b/vendor/github.com/imdario/mergo/map.go
@@ -72,6 +72,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
case reflect.Struct:
srcMap := src.Interface().(map[string]interface{})
for key := range srcMap {
+ config.overwriteWithEmptyValue = true
srcValue := srcMap[key]
fieldName := changeInitialCase(key, unicode.ToUpper)
dstElement := dst.FieldByName(fieldName)
diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go
index 44f70a89d..f8de6c543 100644
--- a/vendor/github.com/imdario/mergo/merge.go
+++ b/vendor/github.com/imdario/mergo/merge.go
@@ -26,9 +26,10 @@ func hasExportedField(dst reflect.Value) (exported bool) {
}
type Config struct {
- Overwrite bool
- AppendSlice bool
- Transformers Transformers
+ Overwrite bool
+ AppendSlice bool
+ Transformers Transformers
+ overwriteWithEmptyValue bool
}
type Transformers interface {
@@ -40,6 +41,8 @@ type Transformers interface {
// short circuiting on recursive types.
func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
overwrite := config.Overwrite
+ overwriteWithEmptySrc := config.overwriteWithEmptyValue
+ config.overwriteWithEmptyValue = false
if !src.IsValid() {
return
@@ -74,7 +77,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
}
}
} else {
- if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) {
+ if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) {
dst.Set(src)
}
}
@@ -125,7 +128,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
dstSlice = reflect.ValueOf(dstElement.Interface())
}
- if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
+ if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
dstSlice = srcSlice
} else if config.AppendSlice {
if srcSlice.Type() != dstSlice.Type() {
@@ -136,7 +139,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
dst.SetMapIndex(key, dstSlice)
}
}
- if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map {
+ if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) {
continue
}
@@ -151,7 +154,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
if !dst.CanSet() {
break
}
- if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
+ if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
dst.Set(src)
} else if config.AppendSlice {
if src.Type() != dst.Type() {
@@ -191,7 +194,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
return
}
default:
- if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) {
+ if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) {
dst.Set(src)
}
}
diff --git a/vendor/github.com/ishidawataru/sctp/.gitignore b/vendor/github.com/ishidawataru/sctp/.gitignore
new file mode 100644
index 000000000..a1338d685
--- /dev/null
+++ b/vendor/github.com/ishidawataru/sctp/.gitignore
@@ -0,0 +1,14 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
diff --git a/vendor/github.com/ishidawataru/sctp/.travis.yml b/vendor/github.com/ishidawataru/sctp/.travis.yml
new file mode 100644
index 000000000..64f85ef28
--- /dev/null
+++ b/vendor/github.com/ishidawataru/sctp/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+go:
+ - 1.6
+ - 1.7
+ - 1.8
+ - 1.9
+
+script:
+ - go test -v -race ./...
+ - GOOS=linux GOARCH=amd64 go build .
+ - GOOS=linux GOARCH=arm go build .
+ - GOOS=linux GOARCH=arm64 go build .
+ - GOOS=linux GOARCH=ppc64le go build .
+ - (go version | grep go1.6 > /dev/null) || GOOS=linux GOARCH=s390x go build .
+# can be compiled but not functional:
+ - GOOS=linux GOARCH=386 go build .
+ - GOOS=windows GOARCH=amd64 go build .
diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml
new file mode 100644
index 000000000..955dc0be5
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.codecov.yml
@@ -0,0 +1,3 @@
+ignore:
+ - "output_tests/.*"
+
diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore
new file mode 100644
index 000000000..15556530a
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.gitignore
@@ -0,0 +1,4 @@
+/vendor
+/bug_test.go
+/coverage.txt
+/.idea
diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml
new file mode 100644
index 000000000..449e67cd0
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+ - 1.8.x
+ - 1.x
+
+before_install:
+ - go get -t -v ./...
+
+script:
+ - ./test.sh
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock
new file mode 100644
index 000000000..c8a9fbb38
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/Gopkg.lock
@@ -0,0 +1,21 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ name = "github.com/modern-go/concurrent"
+ packages = ["."]
+ revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a"
+ version = "1.0.0"
+
+[[projects]]
+ name = "github.com/modern-go/reflect2"
+ packages = ["."]
+ revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
+ version = "1.0.1"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8"
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml
new file mode 100644
index 000000000..313a0f887
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/Gopkg.toml
@@ -0,0 +1,26 @@
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
+
+ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"]
+
+[[constraint]]
+ name = "github.com/modern-go/reflect2"
+ version = "1.0.1"
diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md
index 54d5afe95..50d56ffbf 100644
--- a/vendor/github.com/json-iterator/go/README.md
+++ b/vendor/github.com/json-iterator/go/README.md
@@ -10,10 +10,6 @@ A high-performance 100% compatible drop-in replacement of "encoding/json"
You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go)
-```
-Go开发者们请加入我们,滴滴出行平台技术部 taowen@didichuxing.com
-```
-
# Benchmark
![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png)
diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go
index daecfed61..f6b8aeab0 100644
--- a/vendor/github.com/json-iterator/go/any.go
+++ b/vendor/github.com/json-iterator/go/any.go
@@ -312,6 +312,10 @@ func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
any := *(*Any)(ptr)
+ if any == nil {
+ stream.WriteNil()
+ return
+ }
any.WriteTo(stream)
}
diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh
new file mode 100644
index 000000000..b45ef6883
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/build.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+set -e
+set -x
+
+if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then
+ mkdir -p /tmp/build-golang/src/github.com/json-iterator
+ ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go
+fi
+export GOPATH=/tmp/build-golang
+go get -u github.com/golang/dep/cmd/dep
+cd /tmp/build-golang/src/github.com/json-iterator/go
+exec $GOPATH/bin/dep ensure -update
diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
new file mode 100644
index 000000000..3095662b0
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
@@ -0,0 +1,7 @@
+| json type \ dest type | bool | int | uint | float |string|
+| --- | --- | --- | --- |--|--|
+| number | positive => true <br/> negative => true <br/> zero => false| 23.2 => 23 <br/> -32.1 => -32| 12.1 => 12 <br/> -12.1 => 0|as normal|same as origin|
+| string | empty string => false <br/> string "0" => false <br/> other strings => true | "123.32" => 123 <br/> "-123.4" => -123 <br/> "123.23xxxw" => 123 <br/> "abcde12" => 0 <br/> "-32.1" => -32| 13.2 => 13 <br/> -1.1 => 0 |12.1 => 12.1 <br/> -12.3 => -12.3<br/> 12.4xxa => 12.4 <br/> +1.1e2 =>110 |same as origin|
+| bool | true => true <br/> false => false| true => 1 <br/> false => 0 | true => 1 <br/> false => 0 |true => 1 <br/>false => 0|true => "true" <br/> false => "false"|
+| object | true | 0 | 0 |0|originnal json|
+| array | empty array => false <br/> nonempty array => true| [] => 0 <br/> [1,2] => 1 | [] => 0 <br/> [1,2] => 1 |[] => 0<br/>[1,2] => 1|original json| \ No newline at end of file
diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go
index 4f883c095..b9754638e 100644
--- a/vendor/github.com/json-iterator/go/iter_float.go
+++ b/vendor/github.com/json-iterator/go/iter_float.go
@@ -77,14 +77,12 @@ func (iter *Iterator) ReadFloat32() (ret float32) {
}
func (iter *Iterator) readPositiveFloat32() (ret float32) {
- value := uint64(0)
- c := byte(' ')
i := iter.head
// first char
if i == iter.tail {
return iter.readFloat32SlowPath()
}
- c = iter.buf[i]
+ c := iter.buf[i]
i++
ind := floatDigits[c]
switch ind {
@@ -107,7 +105,7 @@ func (iter *Iterator) readPositiveFloat32() (ret float32) {
return
}
}
- value = uint64(ind)
+ value := uint64(ind)
// chars before dot
non_decimal_loop:
for ; i < iter.tail; i++ {
@@ -145,9 +143,7 @@ non_decimal_loop:
}
// too many decimal places
return iter.readFloat32SlowPath()
- case invalidCharForNumber:
- fallthrough
- case dotInNumber:
+ case invalidCharForNumber, dotInNumber:
return iter.readFloat32SlowPath()
}
decimalPlaces++
@@ -218,14 +214,12 @@ func (iter *Iterator) ReadFloat64() (ret float64) {
}
func (iter *Iterator) readPositiveFloat64() (ret float64) {
- value := uint64(0)
- c := byte(' ')
i := iter.head
// first char
if i == iter.tail {
return iter.readFloat64SlowPath()
}
- c = iter.buf[i]
+ c := iter.buf[i]
i++
ind := floatDigits[c]
switch ind {
@@ -248,7 +242,7 @@ func (iter *Iterator) readPositiveFloat64() (ret float64) {
return
}
}
- value = uint64(ind)
+ value := uint64(ind)
// chars before dot
non_decimal_loop:
for ; i < iter.tail; i++ {
@@ -286,9 +280,7 @@ non_decimal_loop:
}
// too many decimal places
return iter.readFloat64SlowPath()
- case invalidCharForNumber:
- fallthrough
- case dotInNumber:
+ case invalidCharForNumber, dotInNumber:
return iter.readFloat64SlowPath()
}
decimalPlaces++
diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go
index f67bc2e83..6cf66d043 100644
--- a/vendor/github.com/json-iterator/go/iter_skip_strict.go
+++ b/vendor/github.com/json-iterator/go/iter_skip_strict.go
@@ -2,12 +2,22 @@
package jsoniter
-import "fmt"
+import (
+ "fmt"
+ "io"
+)
func (iter *Iterator) skipNumber() {
if !iter.trySkipNumber() {
iter.unreadByte()
- iter.ReadFloat32()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return
+ }
+ iter.ReadFloat64()
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = nil
+ iter.ReadBigFloat()
+ }
}
}
diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go
index 04f68756b..05e8fbf1f 100644
--- a/vendor/github.com/json-iterator/go/reflect_extension.go
+++ b/vendor/github.com/json-iterator/go/reflect_extension.go
@@ -338,7 +338,7 @@ func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor {
for i := 0; i < structType.NumField(); i++ {
field := structType.Field(i)
tag, hastag := field.Tag().Lookup(ctx.getTagKey())
- if ctx.onlyTaggedField && !hastag {
+ if ctx.onlyTaggedField && !hastag && !field.Anonymous() {
continue
}
tagParts := strings.Split(tag, ",")
diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go
index 7f66a88b0..547b4421e 100644
--- a/vendor/github.com/json-iterator/go/reflect_map.go
+++ b/vendor/github.com/json-iterator/go/reflect_map.go
@@ -64,14 +64,26 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
return &numericMapKeyDecoder{decoderOfType(ctx, typ)}
default:
ptrType := reflect2.PtrTo(typ)
- if ptrType.Implements(textMarshalerType) {
+ if ptrType.Implements(unmarshalerType) {
+ return &referenceDecoder{
+ &unmarshalerDecoder{
+ valType: ptrType,
+ },
+ }
+ }
+ if typ.Implements(unmarshalerType) {
+ return &unmarshalerDecoder{
+ valType: typ,
+ }
+ }
+ if ptrType.Implements(textUnmarshalerType) {
return &referenceDecoder{
&textUnmarshalerDecoder{
valType: ptrType,
},
}
}
- if typ.Implements(textMarshalerType) {
+ if typ.Implements(textUnmarshalerType) {
return &textUnmarshalerDecoder{
valType: typ,
}
diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go
index 58ac959ad..fea50719d 100644
--- a/vendor/github.com/json-iterator/go/reflect_marshaler.go
+++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go
@@ -93,8 +93,7 @@ func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteNil()
return
}
- marshaler := obj.(json.Marshaler)
- bytes, err := marshaler.MarshalJSON()
+ bytes, err := json.Marshal(obj)
if err != nil {
stream.Error = err
} else {
diff --git a/vendor/github.com/json-iterator/go/test.sh b/vendor/github.com/json-iterator/go/test.sh
new file mode 100644
index 000000000..f4e7c0b2c
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/test.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -e
+echo "" > coverage.txt
+
+for d in $(go list ./... | grep -v vendor); do
+ go test -coverprofile=profile.out -coverpkg=github.com/json-iterator/go $d
+ if [ -f profile.out ]; then
+ cat profile.out >> coverage.txt
+ rm profile.out
+ fi
+done
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
deleted file mode 100644
index 280d54890..000000000
--- a/vendor/github.com/klauspost/compress/README.md
+++ /dev/null
@@ -1,160 +0,0 @@
-# compress
-
-This package is based on an optimized Deflate function, which is used by gzip/zip/zlib packages.
-
-It offers slightly better compression at lower compression settings, and up to 3x faster encoding at highest compression level.
-
-* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/).
-* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/).
-* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
-* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/)
-
-[![Build Status](https://travis-ci.org/klauspost/compress.svg?branch=master)](https://travis-ci.org/klauspost/compress)
-[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge)
-
-# changelog
-
-* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression).
-* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below.
-* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0).
-* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change.
-* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change.
-* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function.
-* May 28, 2017: Reduce allocations when resetting decoder.
-* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7.
-* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625).
-* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before.
-* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update.
-* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level.
-* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression.
-* Mar 24, 2016: Small speedup for level 1-3.
-* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
-* Feb 19, 2016: Handle small payloads faster in level 1-3.
-* Feb 19, 2016: Added faster level 2 + 3 compression modes.
-* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
-* Feb 14, 2016: Snappy: Merge upstream changes.
-* Feb 14, 2016: Snappy: Fix aggressive skipping.
-* Feb 14, 2016: Snappy: Update benchmark.
-* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression.
-* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%.
-* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content.
-* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup.
-* Jan 16, 2016: Optimization on deflate level 1,2,3 compression.
-* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives.
-* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs.
-* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms.
-* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update!
-* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet).
-* Nov 20 2015: Small optimization to bit writer on 64 bit systems.
-* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15).
-* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate.
-* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file
-* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x.
-
-# usage
-
-The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
-
-| old import | new import |
-|--------------------|-----------------------------------------|
-| `compress/gzip` | `github.com/klauspost/compress/gzip` |
-| `compress/zlib` | `github.com/klauspost/compress/zlib` |
-| `archive/zip` | `github.com/klauspost/compress/zip` |
-| `compress/flate` | `github.com/klauspost/compress/flate` |
-
-You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages.
-
-The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/).
-
-Currently there is only minor speedup on decompression (mostly CRC32 calculation).
-
-# Performance Update 2018
-
-It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
-
-The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
-
-The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard libary at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
-
-The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
-
-
-## Overall differences.
-
-There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
-
-The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
-
-This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
-
-There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
-
-## Web Content
-
-This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
-
-Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
-
-Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
-
-## Object files
-
-This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
-
-The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
-
-The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
-
-## Highly Compressible File
-
-This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
-
-It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
-
-So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
-
-## Medium-High Compressible
-
-This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
-
-We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
-
-## Medium Compressible
-
-I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
-
-The most notable thing is how quickly the standard libary drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
-
-
-## Un-compressible Content
-
-This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
-
-
-# linear time compression (huffman only)
-
-This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
-
-This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
-
-Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
-
-The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression raio can never be better than 8:1 (12.5%).
-
-The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
-
-For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
-
-This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
-
-
-# snappy package
-
-The standard snappy package has now been improved. This repo contains a copy of the snappy repo.
-
-I would advise to use the standard package: https://github.com/golang/snappy
-
-
-# license
-
-This code is licensed under the same conditions as the original Go code. See LICENSE file.
diff --git a/vendor/github.com/klauspost/compress/flate/copy.go b/vendor/github.com/klauspost/compress/flate/copy.go
deleted file mode 100644
index a3200a8f4..000000000
--- a/vendor/github.com/klauspost/compress/flate/copy.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-// forwardCopy is like the built-in copy function except that it always goes
-// forward from the start, even if the dst and src overlap.
-// It is equivalent to:
-// for i := 0; i < n; i++ {
-// mem[dst+i] = mem[src+i]
-// }
-func forwardCopy(mem []byte, dst, src, n int) {
- if dst <= src {
- copy(mem[dst:dst+n], mem[src:src+n])
- return
- }
- for {
- if dst >= src+n {
- copy(mem[dst:dst+n], mem[src:src+n])
- return
- }
- // There is some forward overlap. The destination
- // will be filled with a repeated pattern of mem[src:src+k].
- // We copy one instance of the pattern here, then repeat.
- // Each time around this loop k will double.
- k := dst - src
- copy(mem[dst:dst+k], mem[src:src+k])
- n -= k
- dst += k
- }
-}
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
index 9e6e7ff0c..628795120 100644
--- a/vendor/github.com/klauspost/compress/flate/deflate.go
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -77,16 +77,14 @@ var levels = []compressionLevel{
{32, 258, 258, 4096, skipNever, 9},
}
-type compressor struct {
- compressionLevel
-
- w *huffmanBitWriter
- bulkHasher func([]byte, []uint32)
-
- // compression algorithm
- fill func(*compressor, []byte) int // copy data to window
- step func(*compressor) // process window
- sync bool // requesting flush
+// advancedState contains state for the advanced levels, with bigger hash tables, etc.
+type advancedState struct {
+ // deflate state
+ length int
+ offset int
+ hash uint32
+ maxInsertIndex int
+ ii uint16 // position of last match, intended to overflow to reset.
// Input hash chains
// hashHead[hashValue] contains the largest inputIndex with the specified hash value
@@ -99,57 +97,64 @@ type compressor struct {
hashOffset int
// input window: unprocessed data is window[index:windowEnd]
- index int
+ index int
+ bulkHasher func([]byte, []uint32)
+ hashMatch [maxMatchLength + minMatchLength]uint32
+}
+
+type compressor struct {
+ compressionLevel
+
+ w *huffmanBitWriter
+
+ // compression algorithm
+ fill func(*compressor, []byte) int // copy data to window
+ step func(*compressor) // process window
+ sync bool // requesting flush
+
window []byte
windowEnd int
blockStart int // window index where current tokens start
byteAvailable bool // if true, still need to process window[index-1].
+ err error
// queued output tokens
tokens tokens
-
- // deflate state
- length int
- offset int
- hash uint32
- maxInsertIndex int
- err error
- ii uint16 // position of last match, intended to overflow to reset.
-
- snap snappyEnc
- hashMatch [maxMatchLength + minMatchLength]uint32
+ snap fastEnc
+ state *advancedState
}
func (d *compressor) fillDeflate(b []byte) int {
- if d.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
+ s := d.state
+ if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
// shift the window by windowSize
copy(d.window[:], d.window[windowSize:2*windowSize])
- d.index -= windowSize
+ s.index -= windowSize
d.windowEnd -= windowSize
if d.blockStart >= windowSize {
d.blockStart -= windowSize
} else {
d.blockStart = math.MaxInt32
}
- d.hashOffset += windowSize
- if d.hashOffset > maxHashOffset {
- delta := d.hashOffset - 1
- d.hashOffset -= delta
- d.chainHead -= delta
+ s.hashOffset += windowSize
+ if s.hashOffset > maxHashOffset {
+ delta := s.hashOffset - 1
+ s.hashOffset -= delta
+ s.chainHead -= delta
// Iterate over slices instead of arrays to avoid copying
// the entire table onto the stack (Issue #18625).
- for i, v := range d.hashPrev[:] {
+ for i, v := range s.hashPrev[:] {
if int(v) > delta {
- d.hashPrev[i] = uint32(int(v) - delta)
+ s.hashPrev[i] = uint32(int(v) - delta)
} else {
- d.hashPrev[i] = 0
+ s.hashPrev[i] = 0
}
}
- for i, v := range d.hashHead[:] {
+ for i, v := range s.hashHead[:] {
if int(v) > delta {
- d.hashHead[i] = uint32(int(v) - delta)
+ s.hashHead[i] = uint32(int(v) - delta)
} else {
- d.hashHead[i] = 0
+ s.hashHead[i] = 0
}
}
}
@@ -207,6 +212,7 @@ func (d *compressor) fillWindow(b []byte) {
case 0, 1, 2:
return
}
+ s := d.state
// If we are given too much, cut it.
if len(b) > windowSize {
b = b[len(b)-windowSize:]
@@ -229,28 +235,28 @@ func (d *compressor) fillWindow(b []byte) {
continue
}
- dst := d.hashMatch[:dstSize]
- d.bulkHasher(tocheck, dst)
+ dst := s.hashMatch[:dstSize]
+ s.bulkHasher(tocheck, dst)
var newH uint32
for i, val := range dst {
di := i + startindex
newH = val & hashMask
// Get previous value with the same hash.
// Our chain should point to the previous value.
- d.hashPrev[di&windowMask] = d.hashHead[newH]
+ s.hashPrev[di&windowMask] = s.hashHead[newH]
// Set the head of the hash chain to us.
- d.hashHead[newH] = uint32(di + d.hashOffset)
+ s.hashHead[newH] = uint32(di + s.hashOffset)
}
- d.hash = newH
+ s.hash = newH
}
// Update window information.
d.windowEnd += n
- d.index = n
+ s.index = n
}
// Try to find a match starting at index whose length is greater than prevSize.
// We only look at chainCount possibilities before giving up.
-// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead
+// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
minMatchLook := maxMatchLength
if lookahead < minMatchLook {
@@ -295,7 +301,7 @@ func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead
// hashPrev[i & windowMask] has already been overwritten, so stop now.
break
}
- i = int(d.hashPrev[i&windowMask]) - d.hashOffset
+ i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
if i < minIndex || i < 0 {
break
}
@@ -305,7 +311,7 @@ func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead
// Try to find a match starting at index whose length is greater than prevSize.
// We only look at chainCount possibilities before giving up.
-// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead
+// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
func (d *compressor) findMatchSSE(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
minMatchLook := maxMatchLength
if lookahead < minMatchLook {
@@ -350,7 +356,7 @@ func (d *compressor) findMatchSSE(pos int, prevHead int, prevLength int, lookahe
// hashPrev[i & windowMask] has already been overwritten, so stop now.
break
}
- i = int(d.hashPrev[i&windowMask]) - d.hashOffset
+ i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
if i < minIndex || i < 0 {
break
}
@@ -406,52 +412,57 @@ func matchLen(a, b []byte, max int) int {
func (d *compressor) initDeflate() {
d.window = make([]byte, 2*windowSize)
- d.hashOffset = 1
- d.length = minMatchLength - 1
- d.offset = 0
d.byteAvailable = false
- d.index = 0
- d.hash = 0
- d.chainHead = -1
- d.bulkHasher = bulkHash4
+ d.err = nil
+ if d.state == nil {
+ return
+ }
+ s := d.state
+ s.index = 0
+ s.hashOffset = 1
+ s.length = minMatchLength - 1
+ s.offset = 0
+ s.hash = 0
+ s.chainHead = -1
+ s.bulkHasher = bulkHash4
if useSSE42 {
- d.bulkHasher = crc32sseAll
+ s.bulkHasher = crc32sseAll
}
}
// Assumes that d.fastSkipHashing != skipNever,
// otherwise use deflateLazy
func (d *compressor) deflate() {
-
+ s := d.state
// Sanity enables additional runtime tests.
// It's intended to be used during development
// to supplement the currently ad-hoc unit tests.
const sanity = false
- if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
+ if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
return
}
- d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
- if d.index < d.maxInsertIndex {
- d.hash = hash4(d.window[d.index : d.index+minMatchLength])
+ s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
+ if s.index < s.maxInsertIndex {
+ s.hash = hash4(d.window[s.index : s.index+minMatchLength])
}
for {
- if sanity && d.index > d.windowEnd {
+ if sanity && s.index > d.windowEnd {
panic("index > windowEnd")
}
- lookahead := d.windowEnd - d.index
+ lookahead := d.windowEnd - s.index
if lookahead < minMatchLength+maxMatchLength {
if !d.sync {
return
}
- if sanity && d.index > d.windowEnd {
+ if sanity && s.index > d.windowEnd {
panic("index > windowEnd")
}
if lookahead == 0 {
if d.tokens.n > 0 {
- if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil {
+ if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.n = 0
@@ -459,55 +470,55 @@ func (d *compressor) deflate() {
return
}
}
- if d.index < d.maxInsertIndex {
+ if s.index < s.maxInsertIndex {
// Update the hash
- d.hash = hash4(d.window[d.index : d.index+minMatchLength])
- ch := d.hashHead[d.hash&hashMask]
- d.chainHead = int(ch)
- d.hashPrev[d.index&windowMask] = ch
- d.hashHead[d.hash&hashMask] = uint32(d.index + d.hashOffset)
+ s.hash = hash4(d.window[s.index : s.index+minMatchLength])
+ ch := s.hashHead[s.hash&hashMask]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[s.hash&hashMask] = uint32(s.index + s.hashOffset)
}
- d.length = minMatchLength - 1
- d.offset = 0
- minIndex := d.index - windowSize
+ s.length = minMatchLength - 1
+ s.offset = 0
+ minIndex := s.index - windowSize
if minIndex < 0 {
minIndex = 0
}
- if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 {
- if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
- d.length = newLength
- d.offset = newOffset
+ if s.chainHead-s.hashOffset >= minIndex && lookahead > minMatchLength-1 {
+ if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
+ s.length = newLength
+ s.offset = newOffset
}
}
- if d.length >= minMatchLength {
- d.ii = 0
+ if s.length >= minMatchLength {
+ s.ii = 0
// There was a match at the previous step, and the current match is
// not better. Output the previous match.
- // "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3
- d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize))
+ // "s.length-3" should NOT be "s.length-minMatchLength", since the format always assume 3
+ d.tokens.tokens[d.tokens.n] = matchToken(uint32(s.length-3), uint32(s.offset-minOffsetSize))
d.tokens.n++
// Insert in the hash table all strings up to the end of the match.
// index and index-1 are already inserted. If there is not enough
// lookahead, the last two strings are not inserted into the hash
// table.
- if d.length <= d.fastSkipHashing {
+ if s.length <= d.fastSkipHashing {
var newIndex int
- newIndex = d.index + d.length
+ newIndex = s.index + s.length
// Calculate missing hashes
end := newIndex
- if end > d.maxInsertIndex {
- end = d.maxInsertIndex
+ if end > s.maxInsertIndex {
+ end = s.maxInsertIndex
}
end += minMatchLength - 1
- startindex := d.index + 1
- if startindex > d.maxInsertIndex {
- startindex = d.maxInsertIndex
+ startindex := s.index + 1
+ if startindex > s.maxInsertIndex {
+ startindex = s.maxInsertIndex
}
tocheck := d.window[startindex:end]
dstSize := len(tocheck) - minMatchLength + 1
if dstSize > 0 {
- dst := d.hashMatch[:dstSize]
+ dst := s.hashMatch[:dstSize]
bulkHash4(tocheck, dst)
var newH uint32
for i, val := range dst {
@@ -515,35 +526,35 @@ func (d *compressor) deflate() {
newH = val & hashMask
// Get previous value with the same hash.
// Our chain should point to the previous value.
- d.hashPrev[di&windowMask] = d.hashHead[newH]
+ s.hashPrev[di&windowMask] = s.hashHead[newH]
// Set the head of the hash chain to us.
- d.hashHead[newH] = uint32(di + d.hashOffset)
+ s.hashHead[newH] = uint32(di + s.hashOffset)
}
- d.hash = newH
+ s.hash = newH
}
- d.index = newIndex
+ s.index = newIndex
} else {
// For matches this long, we don't bother inserting each individual
// item into the table.
- d.index += d.length
- if d.index < d.maxInsertIndex {
- d.hash = hash4(d.window[d.index : d.index+minMatchLength])
+ s.index += s.length
+ if s.index < s.maxInsertIndex {
+ s.hash = hash4(d.window[s.index : s.index+minMatchLength])
}
}
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
- if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil {
+ if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
} else {
- d.ii++
- end := d.index + int(d.ii>>uint(d.fastSkipHashing)) + 1
+ s.ii++
+ end := s.index + int(s.ii>>uint(d.fastSkipHashing)) + 1
if end > d.windowEnd {
end = d.windowEnd
}
- for i := d.index; i < end; i++ {
+ for i := s.index; i < end; i++ {
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i]))
d.tokens.n++
if d.tokens.n == maxFlateBlockTokens {
@@ -553,7 +564,7 @@ func (d *compressor) deflate() {
d.tokens.n = 0
}
}
- d.index = end
+ s.index = end
}
}
}
@@ -561,42 +572,43 @@ func (d *compressor) deflate() {
// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
// meaning it always has lazy matching on.
func (d *compressor) deflateLazy() {
+ s := d.state
// Sanity enables additional runtime tests.
// It's intended to be used during development
// to supplement the currently ad-hoc unit tests.
const sanity = false
- if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
+ if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
return
}
- d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
- if d.index < d.maxInsertIndex {
- d.hash = hash4(d.window[d.index : d.index+minMatchLength])
+ s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
+ if s.index < s.maxInsertIndex {
+ s.hash = hash4(d.window[s.index : s.index+minMatchLength])
}
for {
- if sanity && d.index > d.windowEnd {
+ if sanity && s.index > d.windowEnd {
panic("index > windowEnd")
}
- lookahead := d.windowEnd - d.index
+ lookahead := d.windowEnd - s.index
if lookahead < minMatchLength+maxMatchLength {
if !d.sync {
return
}
- if sanity && d.index > d.windowEnd {
+ if sanity && s.index > d.windowEnd {
panic("index > windowEnd")
}
if lookahead == 0 {
// Flush current output block if any.
if d.byteAvailable {
// There is still one pending token that needs to be flushed
- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+ d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
d.tokens.n++
d.byteAvailable = false
}
if d.tokens.n > 0 {
- if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+ if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.n = 0
@@ -604,30 +616,30 @@ func (d *compressor) deflateLazy() {
return
}
}
- if d.index < d.maxInsertIndex {
+ if s.index < s.maxInsertIndex {
// Update the hash
- d.hash = hash4(d.window[d.index : d.index+minMatchLength])
- ch := d.hashHead[d.hash&hashMask]
- d.chainHead = int(ch)
- d.hashPrev[d.index&windowMask] = ch
- d.hashHead[d.hash&hashMask] = uint32(d.index + d.hashOffset)
+ s.hash = hash4(d.window[s.index : s.index+minMatchLength])
+ ch := s.hashHead[s.hash&hashMask]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[s.hash&hashMask] = uint32(s.index + s.hashOffset)
}
- prevLength := d.length
- prevOffset := d.offset
- d.length = minMatchLength - 1
- d.offset = 0
- minIndex := d.index - windowSize
+ prevLength := s.length
+ prevOffset := s.offset
+ s.length = minMatchLength - 1
+ s.offset = 0
+ minIndex := s.index - windowSize
if minIndex < 0 {
minIndex = 0
}
- if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
- if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
- d.length = newLength
- d.offset = newOffset
+ if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
+ if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
+ s.length = newLength
+ s.offset = newOffset
}
}
- if prevLength >= minMatchLength && d.length <= prevLength {
+ if prevLength >= minMatchLength && s.length <= prevLength {
// There was a match at the previous step, and the current match is
// not better. Output the previous match.
d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
@@ -638,21 +650,21 @@ func (d *compressor) deflateLazy() {
// lookahead, the last two strings are not inserted into the hash
// table.
var newIndex int
- newIndex = d.index + prevLength - 1
+ newIndex = s.index + prevLength - 1
// Calculate missing hashes
end := newIndex
- if end > d.maxInsertIndex {
- end = d.maxInsertIndex
+ if end > s.maxInsertIndex {
+ end = s.maxInsertIndex
}
end += minMatchLength - 1
- startindex := d.index + 1
- if startindex > d.maxInsertIndex {
- startindex = d.maxInsertIndex
+ startindex := s.index + 1
+ if startindex > s.maxInsertIndex {
+ startindex = s.maxInsertIndex
}
tocheck := d.window[startindex:end]
dstSize := len(tocheck) - minMatchLength + 1
if dstSize > 0 {
- dst := d.hashMatch[:dstSize]
+ dst := s.hashMatch[:dstSize]
bulkHash4(tocheck, dst)
var newH uint32
for i, val := range dst {
@@ -660,74 +672,74 @@ func (d *compressor) deflateLazy() {
newH = val & hashMask
// Get previous value with the same hash.
// Our chain should point to the previous value.
- d.hashPrev[di&windowMask] = d.hashHead[newH]
+ s.hashPrev[di&windowMask] = s.hashHead[newH]
// Set the head of the hash chain to us.
- d.hashHead[newH] = uint32(di + d.hashOffset)
+ s.hashHead[newH] = uint32(di + s.hashOffset)
}
- d.hash = newH
+ s.hash = newH
}
- d.index = newIndex
+ s.index = newIndex
d.byteAvailable = false
- d.length = minMatchLength - 1
+ s.length = minMatchLength - 1
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
- if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+ if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
} else {
// Reset, if we got a match this run.
- if d.length >= minMatchLength {
- d.ii = 0
+ if s.length >= minMatchLength {
+ s.ii = 0
}
// We have a byte waiting. Emit it.
if d.byteAvailable {
- d.ii++
- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+ s.ii++
+ d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
d.tokens.n++
if d.tokens.n == maxFlateBlockTokens {
- if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+ if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
- d.index++
+ s.index++
// If we have a long run of no matches, skip additional bytes
- // Resets when d.ii overflows after 64KB.
- if d.ii > 31 {
- n := int(d.ii >> 5)
+ // Resets when s.ii overflows after 64KB.
+ if s.ii > 31 {
+ n := int(s.ii >> 5)
for j := 0; j < n; j++ {
- if d.index >= d.windowEnd-1 {
+ if s.index >= d.windowEnd-1 {
break
}
- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+ d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
d.tokens.n++
if d.tokens.n == maxFlateBlockTokens {
- if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+ if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
- d.index++
+ s.index++
}
// Flush last byte
- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+ d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
d.tokens.n++
d.byteAvailable = false
- // d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength
+ // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
if d.tokens.n == maxFlateBlockTokens {
- if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+ if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
}
} else {
- d.index++
+ s.index++
d.byteAvailable = true
}
}
@@ -737,36 +749,36 @@ func (d *compressor) deflateLazy() {
// Assumes that d.fastSkipHashing != skipNever,
// otherwise use deflateLazySSE
func (d *compressor) deflateSSE() {
-
+ s := d.state
// Sanity enables additional runtime tests.
// It's intended to be used during development
// to supplement the currently ad-hoc unit tests.
const sanity = false
- if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
+ if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
return
}
- d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
- if d.index < d.maxInsertIndex {
- d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
+ s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
+ if s.index < s.maxInsertIndex {
+ s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask
}
for {
- if sanity && d.index > d.windowEnd {
+ if sanity && s.index > d.windowEnd {
panic("index > windowEnd")
}
- lookahead := d.windowEnd - d.index
+ lookahead := d.windowEnd - s.index
if lookahead < minMatchLength+maxMatchLength {
if !d.sync {
return
}
- if sanity && d.index > d.windowEnd {
+ if sanity && s.index > d.windowEnd {
panic("index > windowEnd")
}
if lookahead == 0 {
if d.tokens.n > 0 {
- if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil {
+ if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.n = 0
@@ -774,55 +786,55 @@ func (d *compressor) deflateSSE() {
return
}
}
- if d.index < d.maxInsertIndex {
+ if s.index < s.maxInsertIndex {
// Update the hash
- d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
- ch := d.hashHead[d.hash]
- d.chainHead = int(ch)
- d.hashPrev[d.index&windowMask] = ch
- d.hashHead[d.hash] = uint32(d.index + d.hashOffset)
+ s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask
+ ch := s.hashHead[s.hash]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[s.hash] = uint32(s.index + s.hashOffset)
}
- d.length = minMatchLength - 1
- d.offset = 0
- minIndex := d.index - windowSize
+ s.length = minMatchLength - 1
+ s.offset = 0
+ minIndex := s.index - windowSize
if minIndex < 0 {
minIndex = 0
}
- if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 {
- if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
- d.length = newLength
- d.offset = newOffset
+ if s.chainHead-s.hashOffset >= minIndex && lookahead > minMatchLength-1 {
+ if newLength, newOffset, ok := d.findMatchSSE(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
+ s.length = newLength
+ s.offset = newOffset
}
}
- if d.length >= minMatchLength {
- d.ii = 0
+ if s.length >= minMatchLength {
+ s.ii = 0
// There was a match at the previous step, and the current match is
// not better. Output the previous match.
- // "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3
- d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize))
+ // "s.length-3" should NOT be "s.length-minMatchLength", since the format always assume 3
+ d.tokens.tokens[d.tokens.n] = matchToken(uint32(s.length-3), uint32(s.offset-minOffsetSize))
d.tokens.n++
// Insert in the hash table all strings up to the end of the match.
// index and index-1 are already inserted. If there is not enough
// lookahead, the last two strings are not inserted into the hash
// table.
- if d.length <= d.fastSkipHashing {
+ if s.length <= d.fastSkipHashing {
var newIndex int
- newIndex = d.index + d.length
+ newIndex = s.index + s.length
// Calculate missing hashes
end := newIndex
- if end > d.maxInsertIndex {
- end = d.maxInsertIndex
+ if end > s.maxInsertIndex {
+ end = s.maxInsertIndex
}
end += minMatchLength - 1
- startindex := d.index + 1
- if startindex > d.maxInsertIndex {
- startindex = d.maxInsertIndex
+ startindex := s.index + 1
+ if startindex > s.maxInsertIndex {
+ startindex = s.maxInsertIndex
}
tocheck := d.window[startindex:end]
dstSize := len(tocheck) - minMatchLength + 1
if dstSize > 0 {
- dst := d.hashMatch[:dstSize]
+ dst := s.hashMatch[:dstSize]
crc32sseAll(tocheck, dst)
var newH uint32
@@ -831,35 +843,35 @@ func (d *compressor) deflateSSE() {
newH = val & hashMask
// Get previous value with the same hash.
// Our chain should point to the previous value.
- d.hashPrev[di&windowMask] = d.hashHead[newH]
+ s.hashPrev[di&windowMask] = s.hashHead[newH]
// Set the head of the hash chain to us.
- d.hashHead[newH] = uint32(di + d.hashOffset)
+ s.hashHead[newH] = uint32(di + s.hashOffset)
}
- d.hash = newH
+ s.hash = newH
}
- d.index = newIndex
+ s.index = newIndex
} else {
// For matches this long, we don't bother inserting each individual
// item into the table.
- d.index += d.length
- if d.index < d.maxInsertIndex {
- d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
+ s.index += s.length
+ if s.index < s.maxInsertIndex {
+ s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask
}
}
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
- if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil {
+ if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
} else {
- d.ii++
- end := d.index + int(d.ii>>5) + 1
+ s.ii++
+ end := s.index + int(s.ii>>5) + 1
if end > d.windowEnd {
end = d.windowEnd
}
- for i := d.index; i < end; i++ {
+ for i := s.index; i < end; i++ {
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i]))
d.tokens.n++
if d.tokens.n == maxFlateBlockTokens {
@@ -869,7 +881,7 @@ func (d *compressor) deflateSSE() {
d.tokens.n = 0
}
}
- d.index = end
+ s.index = end
}
}
}
@@ -877,42 +889,43 @@ func (d *compressor) deflateSSE() {
// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
// meaning it always has lazy matching on.
func (d *compressor) deflateLazySSE() {
+ s := d.state
// Sanity enables additional runtime tests.
// It's intended to be used during development
// to supplement the currently ad-hoc unit tests.
const sanity = false
- if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
+ if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
return
}
- d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
- if d.index < d.maxInsertIndex {
- d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
+ s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
+ if s.index < s.maxInsertIndex {
+ s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask
}
for {
- if sanity && d.index > d.windowEnd {
+ if sanity && s.index > d.windowEnd {
panic("index > windowEnd")
}
- lookahead := d.windowEnd - d.index
+ lookahead := d.windowEnd - s.index
if lookahead < minMatchLength+maxMatchLength {
if !d.sync {
return
}
- if sanity && d.index > d.windowEnd {
+ if sanity && s.index > d.windowEnd {
panic("index > windowEnd")
}
if lookahead == 0 {
// Flush current output block if any.
if d.byteAvailable {
// There is still one pending token that needs to be flushed
- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+ d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
d.tokens.n++
d.byteAvailable = false
}
if d.tokens.n > 0 {
- if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+ if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.n = 0
@@ -920,30 +933,30 @@ func (d *compressor) deflateLazySSE() {
return
}
}
- if d.index < d.maxInsertIndex {
+ if s.index < s.maxInsertIndex {
// Update the hash
- d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
- ch := d.hashHead[d.hash]
- d.chainHead = int(ch)
- d.hashPrev[d.index&windowMask] = ch
- d.hashHead[d.hash] = uint32(d.index + d.hashOffset)
+ s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask
+ ch := s.hashHead[s.hash]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[s.hash] = uint32(s.index + s.hashOffset)
}
- prevLength := d.length
- prevOffset := d.offset
- d.length = minMatchLength - 1
- d.offset = 0
- minIndex := d.index - windowSize
+ prevLength := s.length
+ prevOffset := s.offset
+ s.length = minMatchLength - 1
+ s.offset = 0
+ minIndex := s.index - windowSize
if minIndex < 0 {
minIndex = 0
}
- if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
- if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
- d.length = newLength
- d.offset = newOffset
+ if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
+ if newLength, newOffset, ok := d.findMatchSSE(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
+ s.length = newLength
+ s.offset = newOffset
}
}
- if prevLength >= minMatchLength && d.length <= prevLength {
+ if prevLength >= minMatchLength && s.length <= prevLength {
// There was a match at the previous step, and the current match is
// not better. Output the previous match.
d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
@@ -954,21 +967,21 @@ func (d *compressor) deflateLazySSE() {
// lookahead, the last two strings are not inserted into the hash
// table.
var newIndex int
- newIndex = d.index + prevLength - 1
+ newIndex = s.index + prevLength - 1
// Calculate missing hashes
end := newIndex
- if end > d.maxInsertIndex {
- end = d.maxInsertIndex
+ if end > s.maxInsertIndex {
+ end = s.maxInsertIndex
}
end += minMatchLength - 1
- startindex := d.index + 1
- if startindex > d.maxInsertIndex {
- startindex = d.maxInsertIndex
+ startindex := s.index + 1
+ if startindex > s.maxInsertIndex {
+ startindex = s.maxInsertIndex
}
tocheck := d.window[startindex:end]
dstSize := len(tocheck) - minMatchLength + 1
if dstSize > 0 {
- dst := d.hashMatch[:dstSize]
+ dst := s.hashMatch[:dstSize]
crc32sseAll(tocheck, dst)
var newH uint32
for i, val := range dst {
@@ -976,74 +989,74 @@ func (d *compressor) deflateLazySSE() {
newH = val & hashMask
// Get previous value with the same hash.
// Our chain should point to the previous value.
- d.hashPrev[di&windowMask] = d.hashHead[newH]
+ s.hashPrev[di&windowMask] = s.hashHead[newH]
// Set the head of the hash chain to us.
- d.hashHead[newH] = uint32(di + d.hashOffset)
+ s.hashHead[newH] = uint32(di + s.hashOffset)
}
- d.hash = newH
+ s.hash = newH
}
- d.index = newIndex
+ s.index = newIndex
d.byteAvailable = false
- d.length = minMatchLength - 1
+ s.length = minMatchLength - 1
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
- if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+ if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
} else {
// Reset, if we got a match this run.
- if d.length >= minMatchLength {
- d.ii = 0
+ if s.length >= minMatchLength {
+ s.ii = 0
}
// We have a byte waiting. Emit it.
if d.byteAvailable {
- d.ii++
- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+ s.ii++
+ d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
d.tokens.n++
if d.tokens.n == maxFlateBlockTokens {
- if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+ if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
- d.index++
+ s.index++
// If we have a long run of no matches, skip additional bytes
- // Resets when d.ii overflows after 64KB.
- if d.ii > 31 {
- n := int(d.ii >> 6)
+ // Resets when s.ii overflows after 64KB.
+ if s.ii > 31 {
+ n := int(s.ii >> 6)
for j := 0; j < n; j++ {
- if d.index >= d.windowEnd-1 {
+ if s.index >= d.windowEnd-1 {
break
}
- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+ d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
d.tokens.n++
if d.tokens.n == maxFlateBlockTokens {
- if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+ if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
- d.index++
+ s.index++
}
// Flush last byte
- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+ d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
d.tokens.n++
d.byteAvailable = false
- // d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength
+ // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
if d.tokens.n == maxFlateBlockTokens {
- if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+ if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
}
} else {
- d.index++
+ s.index++
d.byteAvailable = true
}
}
@@ -1167,7 +1180,7 @@ func (d *compressor) init(w io.Writer, level int) (err error) {
d.fill = (*compressor).fillBlock
d.step = (*compressor).storeHuff
case level >= 1 && level <= 4:
- d.snap = newSnappy(level)
+ d.snap = newFastEnc(level)
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillBlock
d.step = (*compressor).storeSnappy
@@ -1175,6 +1188,7 @@ func (d *compressor) init(w io.Writer, level int) (err error) {
level = 5
fallthrough
case 5 <= level && level <= 9:
+ d.state = &advancedState{}
d.compressionLevel = levels[level]
d.initDeflate()
d.fill = (*compressor).fillDeflate
@@ -1215,22 +1229,23 @@ func (d *compressor) reset(w io.Writer) {
// level was NoCompression or ConstantCompresssion.
d.windowEnd = 0
default:
- d.chainHead = -1
- for i := range d.hashHead {
- d.hashHead[i] = 0
+ s := d.state
+ s.chainHead = -1
+ for i := range s.hashHead {
+ s.hashHead[i] = 0
}
- for i := range d.hashPrev {
- d.hashPrev[i] = 0
+ for i := range s.hashPrev {
+ s.hashPrev[i] = 0
}
- d.hashOffset = 1
- d.index, d.windowEnd = 0, 0
+ s.hashOffset = 1
+ s.index, d.windowEnd = 0, 0
d.blockStart, d.byteAvailable = 0, false
d.tokens.n = 0
- d.length = minMatchLength - 1
- d.offset = 0
- d.hash = 0
- d.ii = 0
- d.maxInsertIndex = 0
+ s.length = minMatchLength - 1
+ s.offset = 0
+ s.hash = 0
+ s.ii = 0
+ s.maxInsertIndex = 0
}
}
diff --git a/vendor/github.com/klauspost/compress/flate/gen.go b/vendor/github.com/klauspost/compress/flate/gen.go
new file mode 100644
index 000000000..154c89a48
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/gen.go
@@ -0,0 +1,265 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// This program generates fixedhuff.go
+// Invoke as
+//
+// go run gen.go -output fixedhuff.go
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+)
+
+var filename = flag.String("output", "fixedhuff.go", "output file name")
+
+const maxCodeLen = 16
+
+// Note: the definition of the huffmanDecoder struct is copied from
+// inflate.go, as it is private to the implementation.
+
+// chunk & 15 is number of bits
+// chunk >> 4 is value, including table link
+
+const (
+ huffmanChunkBits = 9
+ huffmanNumChunks = 1 << huffmanChunkBits
+ huffmanCountMask = 15
+ huffmanValueShift = 4
+)
+
+type huffmanDecoder struct {
+ min int // the minimum code length
+ chunks [huffmanNumChunks]uint32 // chunks as described above
+ links [][]uint32 // overflow links
+ linkMask uint32 // mask the width of the link table
+}
+
+// Initialize Huffman decoding tables from array of code lengths.
+// Following this function, h is guaranteed to be initialized into a complete
+// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
+// degenerate case where the tree has only a single symbol with length 1. Empty
+// trees are permitted.
+func (h *huffmanDecoder) init(bits []int) bool {
+ // Sanity enables additional runtime tests during Huffman
+ // table construction. It's intended to be used during
+ // development to supplement the currently ad-hoc unit tests.
+ const sanity = false
+
+ if h.min != 0 {
+ *h = huffmanDecoder{}
+ }
+
+ // Count number of codes of each length,
+ // compute min and max length.
+ var count [maxCodeLen]int
+ var min, max int
+ for _, n := range bits {
+ if n == 0 {
+ continue
+ }
+ if min == 0 || n < min {
+ min = n
+ }
+ if n > max {
+ max = n
+ }
+ count[n]++
+ }
+
+ // Empty tree. The decompressor.huffSym function will fail later if the tree
+ // is used. Technically, an empty tree is only valid for the HDIST tree and
+ // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
+ // is guaranteed to fail since it will attempt to use the tree to decode the
+ // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
+ // guaranteed to fail later since the compressed data section must be
+ // composed of at least one symbol (the end-of-block marker).
+ if max == 0 {
+ return true
+ }
+
+ code := 0
+ var nextcode [maxCodeLen]int
+ for i := min; i <= max; i++ {
+ code <<= 1
+ nextcode[i] = code
+ code += count[i]
+ }
+
+ // Check that the coding is complete (i.e., that we've
+ // assigned all 2-to-the-max possible bit sequences).
+ // Exception: To be compatible with zlib, we also need to
+ // accept degenerate single-code codings. See also
+ // TestDegenerateHuffmanCoding.
+ if code != 1<<uint(max) && !(code == 1 && max == 1) {
+ return false
+ }
+
+ h.min = min
+ if max > huffmanChunkBits {
+ numLinks := 1 << (uint(max) - huffmanChunkBits)
+ h.linkMask = uint32(numLinks - 1)
+
+ // create link tables
+ link := nextcode[huffmanChunkBits+1] >> 1
+ h.links = make([][]uint32, huffmanNumChunks-link)
+ for j := uint(link); j < huffmanNumChunks; j++ {
+ reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8
+ reverse >>= uint(16 - huffmanChunkBits)
+ off := j - uint(link)
+ if sanity && h.chunks[reverse] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1))
+ h.links[off] = make([]uint32, numLinks)
+ }
+ }
+
+ for i, n := range bits {
+ if n == 0 {
+ continue
+ }
+ code := nextcode[n]
+ nextcode[n]++
+ chunk := uint32(i<<huffmanValueShift | n)
+ reverse := int(reverseByte[code>>8]) | int(reverseByte[code&0xff])<<8
+ reverse >>= uint(16 - n)
+ if n <= huffmanChunkBits {
+ for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
+ // We should never need to overwrite
+ // an existing chunk. Also, 0 is
+ // never a valid chunk, because the
+ // lower 4 "count" bits should be
+ // between 1 and 15.
+ if sanity && h.chunks[off] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ h.chunks[off] = chunk
+ }
+ } else {
+ j := reverse & (huffmanNumChunks - 1)
+ if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
+ // Longer codes should have been
+ // associated with a link table above.
+ panic("impossible: not an indirect chunk")
+ }
+ value := h.chunks[j] >> huffmanValueShift
+ linktab := h.links[value]
+ reverse >>= huffmanChunkBits
+ for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
+ if sanity && linktab[off] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ linktab[off] = chunk
+ }
+ }
+ }
+
+ if sanity {
+ // Above we've sanity checked that we never overwrote
+ // an existing entry. Here we additionally check that
+ // we filled the tables completely.
+ for i, chunk := range h.chunks {
+ if chunk == 0 {
+ // As an exception, in the degenerate
+ // single-code case, we allow odd
+ // chunks to be missing.
+ if code == 1 && i%2 == 1 {
+ continue
+ }
+ panic("impossible: missing chunk")
+ }
+ }
+ for _, linktab := range h.links {
+ for _, chunk := range linktab {
+ if chunk == 0 {
+ panic("impossible: missing chunk")
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+func main() {
+ flag.Parse()
+
+ var h huffmanDecoder
+ var bits [288]int
+ initReverseByte()
+ for i := 0; i < 144; i++ {
+ bits[i] = 8
+ }
+ for i := 144; i < 256; i++ {
+ bits[i] = 9
+ }
+ for i := 256; i < 280; i++ {
+ bits[i] = 7
+ }
+ for i := 280; i < 288; i++ {
+ bits[i] = 8
+ }
+ h.init(bits[:])
+ if h.links != nil {
+ log.Fatal("Unexpected links table in fixed Huffman decoder")
+ }
+
+ var buf bytes.Buffer
+
+ fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.`+"\n\n")
+
+ fmt.Fprintln(&buf, "package flate")
+ fmt.Fprintln(&buf)
+ fmt.Fprintln(&buf, "// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT")
+ fmt.Fprintln(&buf)
+ fmt.Fprintln(&buf, "var fixedHuffmanDecoder = huffmanDecoder{")
+ fmt.Fprintf(&buf, "\t%d,\n", h.min)
+ fmt.Fprintln(&buf, "\t[huffmanNumChunks]uint32{")
+ for i := 0; i < huffmanNumChunks; i++ {
+ if i&7 == 0 {
+ fmt.Fprintf(&buf, "\t\t")
+ } else {
+ fmt.Fprintf(&buf, " ")
+ }
+ fmt.Fprintf(&buf, "0x%04x,", h.chunks[i])
+ if i&7 == 7 {
+ fmt.Fprintln(&buf)
+ }
+ }
+ fmt.Fprintln(&buf, "\t},")
+ fmt.Fprintln(&buf, "\tnil, 0,")
+ fmt.Fprintln(&buf, "}")
+
+ data, err := format.Source(buf.Bytes())
+ if err != nil {
+ log.Fatal(err)
+ }
+ err = ioutil.WriteFile(*filename, data, 0644)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+var reverseByte [256]byte
+
+func initReverseByte() {
+ for x := 0; x < 256; x++ {
+ var result byte
+ for i := uint(0); i < 8; i++ {
+ result |= byte(((x >> i) & 1) << (7 - i))
+ }
+ reverseByte[x] = result
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
index f9b2a699a..f46c65418 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -35,7 +35,7 @@ const (
)
// The number of extra bits needed by length code X - LENGTH_CODES_START.
-var lengthExtraBits = []int8{
+var lengthExtraBits = [32]int8{
/* 257 */ 0, 0, 0,
/* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
/* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
@@ -43,14 +43,14 @@ var lengthExtraBits = []int8{
}
// The length indicated by length code X - LENGTH_CODES_START.
-var lengthBase = []uint32{
+var lengthBase = [32]uint8{
0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
64, 80, 96, 112, 128, 160, 192, 224, 255,
}
// offset code word extra bits.
-var offsetExtraBits = []int8{
+var offsetExtraBits = [64]int8{
0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
@@ -58,7 +58,7 @@ var offsetExtraBits = []int8{
14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20,
}
-var offsetBase = []uint32{
+var offsetBase = [64]uint32{
/* normal deflate */
0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
@@ -86,9 +86,9 @@ type huffmanBitWriter struct {
// and then the low nbits of bits.
bits uint64
nbits uint
- bytes [bufferSize]byte
+ bytes [256]byte
codegenFreq [codegenCodeCount]int32
- nbytes int
+ nbytes uint8
literalFreq []int32
offsetFreq []int32
codegen []uint8
@@ -101,8 +101,8 @@ type huffmanBitWriter struct {
func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
return &huffmanBitWriter{
writer: w,
- literalFreq: make([]int32, maxNumLit),
- offsetFreq: make([]int32, offsetCodeCount),
+ literalFreq: make([]int32, lengthCodesStart+32),
+ offsetFreq: make([]int32, 32),
codegen: make([]uint8, maxNumLit+offsetCodeCount+1),
literalEncoding: newHuffmanEncoder(maxNumLit),
codegenEncoding: newHuffmanEncoder(codegenCodeCount),
@@ -113,7 +113,7 @@ func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
func (w *huffmanBitWriter) reset(writer io.Writer) {
w.writer = writer
w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
- w.bytes = [bufferSize]byte{}
+ w.bytes = [256]byte{}
}
func (w *huffmanBitWriter) flush() {
@@ -145,9 +145,6 @@ func (w *huffmanBitWriter) write(b []byte) {
}
func (w *huffmanBitWriter) writeBits(b int32, nb uint) {
- if w.err != nil {
- return
- }
w.bits |= uint64(b) << w.nbits
w.nbits += nb
if w.nbits >= 48 {
@@ -155,15 +152,18 @@ func (w *huffmanBitWriter) writeBits(b int32, nb uint) {
w.bits >>= 48
w.nbits -= 48
n := w.nbytes
- bytes := w.bytes[n : n+6]
- bytes[0] = byte(bits)
- bytes[1] = byte(bits >> 8)
- bytes[2] = byte(bits >> 16)
- bytes[3] = byte(bits >> 24)
- bytes[4] = byte(bits >> 32)
- bytes[5] = byte(bits >> 40)
+ w.bytes[n] = byte(bits)
+ w.bytes[n+1] = byte(bits >> 8)
+ w.bytes[n+2] = byte(bits >> 16)
+ w.bytes[n+3] = byte(bits >> 24)
+ w.bytes[n+4] = byte(bits >> 32)
+ w.bytes[n+5] = byte(bits >> 40)
n += 6
if n >= bufferFlushSize {
+ if w.err != nil {
+ n = 0
+ return
+ }
w.write(w.bytes[:n])
n = 0
}
@@ -333,9 +333,6 @@ func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
}
func (w *huffmanBitWriter) writeCode(c hcode) {
- if w.err != nil {
- return
- }
w.bits |= uint64(c.code) << w.nbits
w.nbits += uint(c.len)
if w.nbits >= 48 {
@@ -343,15 +340,18 @@ func (w *huffmanBitWriter) writeCode(c hcode) {
w.bits >>= 48
w.nbits -= 48
n := w.nbytes
- bytes := w.bytes[n : n+6]
- bytes[0] = byte(bits)
- bytes[1] = byte(bits >> 8)
- bytes[2] = byte(bits >> 16)
- bytes[3] = byte(bits >> 24)
- bytes[4] = byte(bits >> 32)
- bytes[5] = byte(bits >> 40)
+ w.bytes[n] = byte(bits)
+ w.bytes[n+1] = byte(bits >> 8)
+ w.bytes[n+2] = byte(bits >> 16)
+ w.bytes[n+3] = byte(bits >> 24)
+ w.bytes[n+4] = byte(bits >> 32)
+ w.bytes[n+5] = byte(bits >> 40)
n += 6
if n >= bufferFlushSize {
+ if w.err != nil {
+ n = 0
+ return
+ }
w.write(w.bytes[:n])
n = 0
}
@@ -460,7 +460,7 @@ func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) {
}
for offsetCode := 4; offsetCode < numOffsets; offsetCode++ {
// First four offset codes have extra size = 0.
- extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode])
+ extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode&63])
}
}
@@ -548,15 +548,30 @@ func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets
w.offsetFreq[i] = 0
}
+ if len(tokens) == 0 {
+ return
+ }
+
+ // Only last token should be endBlockMarker.
+ if tokens[len(tokens)-1] == endBlockMarker {
+ w.literalFreq[endBlockMarker]++
+ tokens = tokens[:len(tokens)-1]
+ }
+
+ // Create slices up to the next power of two to avoid bounds checks.
+ lits := w.literalFreq[:256]
+ offs := w.offsetFreq[:32]
+ lengths := w.literalFreq[lengthCodesStart:]
+ lengths = lengths[:32]
for _, t := range tokens {
- if t < matchType {
- w.literalFreq[t.literal()]++
+ if t < endBlockMarker {
+ lits[t.literal()]++
continue
}
length := t.length()
offset := t.offset()
- w.literalFreq[lengthCodesStart+lengthCode(length)]++
- w.offsetFreq[offsetCode(offset)]++
+ lengths[lengthCode(length)&31]++
+ offs[offsetCode(offset)&31]++
}
// get the number of literals
@@ -575,8 +590,8 @@ func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets
w.offsetFreq[0] = 1
numOffsets = 1
}
- w.literalEncoding.generate(w.literalFreq, 15)
- w.offsetEncoding.generate(w.offsetFreq, 15)
+ w.literalEncoding.generate(w.literalFreq[:maxNumLit], 15)
+ w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
return
}
@@ -586,30 +601,50 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
if w.err != nil {
return
}
+ if len(tokens) == 0 {
+ return
+ }
+
+ // Only last token should be endBlockMarker.
+ var deferEOB bool
+ if tokens[len(tokens)-1] == endBlockMarker {
+ tokens = tokens[:len(tokens)-1]
+ deferEOB = true
+ }
+
+ // Create slices up to the next power of two to avoid bounds checks.
+ lits := leCodes[:256]
+ offs := oeCodes[:32]
+ lengths := leCodes[lengthCodesStart:]
+ lengths = lengths[:32]
for _, t := range tokens {
if t < matchType {
- w.writeCode(leCodes[t.literal()])
+ w.writeCode(lits[t.literal()])
continue
}
+
// Write the length
length := t.length()
lengthCode := lengthCode(length)
- w.writeCode(leCodes[lengthCode+lengthCodesStart])
- extraLengthBits := uint(lengthExtraBits[lengthCode])
+ w.writeCode(lengths[lengthCode&31])
+ extraLengthBits := uint(lengthExtraBits[lengthCode&31])
if extraLengthBits > 0 {
- extraLength := int32(length - lengthBase[lengthCode])
+ extraLength := int32(length - lengthBase[lengthCode&31])
w.writeBits(extraLength, extraLengthBits)
}
// Write the offset
offset := t.offset()
offsetCode := offsetCode(offset)
- w.writeCode(oeCodes[offsetCode])
- extraOffsetBits := uint(offsetExtraBits[offsetCode])
+ w.writeCode(offs[offsetCode&31])
+ extraOffsetBits := uint(offsetExtraBits[offsetCode&63])
if extraOffsetBits > 0 {
- extraOffset := int32(offset - offsetBase[offsetCode])
+ extraOffset := int32(offset - offsetBase[offsetCode&63])
w.writeBits(extraOffset, extraOffsetBits)
}
}
+ if deferEOB {
+ w.writeCode(leCodes[endBlockMarker])
+ }
}
// huffOffset is a static offset encoder used for huffman only encoding.
@@ -620,7 +655,7 @@ func init() {
w := newHuffmanBitWriter(nil)
w.offsetFreq[0] = 1
huffOffset = newHuffmanEncoder(offsetCodeCount)
- huffOffset.generate(w.offsetFreq, 15)
+ huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15)
}
// writeBlockHuff encodes a block of bytes as either
@@ -644,7 +679,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) {
const numLiterals = endBlockMarker + 1
const numOffsets = 1
- w.literalEncoding.generate(w.literalFreq, 15)
+ w.literalEncoding.generate(w.literalFreq[:maxNumLit], 15)
// Figure out smallest code.
// Always use dynamic Huffman or Store
@@ -679,13 +714,12 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) {
bits := w.bits
w.bits >>= 48
w.nbits -= 48
- bytes := w.bytes[n : n+6]
- bytes[0] = byte(bits)
- bytes[1] = byte(bits >> 8)
- bytes[2] = byte(bits >> 16)
- bytes[3] = byte(bits >> 24)
- bytes[4] = byte(bits >> 32)
- bytes[5] = byte(bits >> 40)
+ w.bytes[n] = byte(bits)
+ w.bytes[n+1] = byte(bits >> 8)
+ w.bytes[n+2] = byte(bits >> 16)
+ w.bytes[n+3] = byte(bits >> 24)
+ w.bytes[n+4] = byte(bits >> 32)
+ w.bytes[n+5] = byte(bits >> 40)
n += 6
if n < bufferFlushSize {
continue
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go
index bdcbd823b..f65f79336 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_code.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go
@@ -6,6 +6,7 @@ package flate
import (
"math"
+ "math/bits"
"sort"
)
@@ -56,7 +57,9 @@ func (h *hcode) set(code uint16, length uint16) {
func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} }
func newHuffmanEncoder(size int) *huffmanEncoder {
- return &huffmanEncoder{codes: make([]hcode, size)}
+ // Make capacity to next power of two.
+ c := uint(bits.Len32(uint32(size - 1)))
+ return &huffmanEncoder{codes: make([]hcode, size, 1<<c)}
}
// Generates a HuffmanCode corresponding to the fixed literal table
diff --git a/vendor/github.com/klauspost/compress/flate/snappy.go b/vendor/github.com/klauspost/compress/flate/snappy.go
index d853320a7..aebebd524 100644
--- a/vendor/github.com/klauspost/compress/flate/snappy.go
+++ b/vendor/github.com/klauspost/compress/flate/snappy.go
@@ -20,12 +20,12 @@ func emitCopy(dst *tokens, offset, length int) {
dst.n++
}
-type snappyEnc interface {
+type fastEnc interface {
Encode(dst *tokens, src []byte)
Reset()
}
-func newSnappy(level int) snappyEnc {
+func newFastEnc(level int) fastEnc {
switch level {
case 1:
return &snappyL1{}
diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go
index 4f275ea61..141299b97 100644
--- a/vendor/github.com/klauspost/compress/flate/token.go
+++ b/vendor/github.com/klauspost/compress/flate/token.go
@@ -4,8 +4,6 @@
package flate
-import "fmt"
-
const (
// 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused
// 8 bits: xlength = length - MIN_MATCH_LENGTH
@@ -19,7 +17,7 @@ const (
// The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH)
// is lengthCodes[length - MIN_MATCH_LENGTH]
-var lengthCodes = [...]uint32{
+var lengthCodes = [256]uint8{
0, 1, 2, 3, 4, 5, 6, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 12, 12,
13, 13, 13, 13, 14, 14, 14, 14, 15, 15,
@@ -48,7 +46,7 @@ var lengthCodes = [...]uint32{
27, 27, 27, 27, 27, 28,
}
-var offsetCodes = [...]uint32{
+var offsetCodes = [256]uint32{
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
@@ -82,34 +80,27 @@ func matchToken(xlength uint32, xoffset uint32) token {
return token(matchType + xlength<<lengthShift + xoffset)
}
-func matchTokend(xlength uint32, xoffset uint32) token {
- if xlength > maxMatchLength || xoffset > maxMatchOffset {
- panic(fmt.Sprintf("Invalid match: len: %d, offset: %d\n", xlength, xoffset))
- return token(matchType)
- }
- return token(matchType + xlength<<lengthShift + xoffset)
-}
-
// Returns the type of a token
func (t token) typ() uint32 { return uint32(t) & typeMask }
// Returns the literal of a literal token
-func (t token) literal() uint32 { return uint32(t - literalType) }
+func (t token) literal() uint8 { return uint8(t) }
// Returns the extra offset of a match token
func (t token) offset() uint32 { return uint32(t) & offsetMask }
-func (t token) length() uint32 { return uint32((t - matchType) >> lengthShift) }
+func (t token) length() uint8 { return uint8(t >> lengthShift) }
-func lengthCode(len uint32) uint32 { return lengthCodes[len] }
+// The code is never more than 8 bits, but is returned as uint32 for convenience.
+func lengthCode(len uint8) uint32 { return uint32(lengthCodes[len]) }
// Returns the offset code corresponding to a specific offset
func offsetCode(off uint32) uint32 {
if off < uint32(len(offsetCodes)) {
- return offsetCodes[off]
+ return offsetCodes[off&255]
} else if off>>7 < uint32(len(offsetCodes)) {
- return offsetCodes[off>>7] + 14
+ return offsetCodes[(off>>7)&255] + 14
} else {
- return offsetCodes[off>>14] + 28
+ return offsetCodes[(off>>14)&255] + 28
}
}
diff --git a/vendor/github.com/klauspost/cpuid/.gitignore b/vendor/github.com/klauspost/cpuid/.gitignore
new file mode 100644
index 000000000..daf913b1b
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/klauspost/cpuid/.travis.yml b/vendor/github.com/klauspost/cpuid/.travis.yml
new file mode 100644
index 000000000..630192d59
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/.travis.yml
@@ -0,0 +1,23 @@
+language: go
+
+sudo: false
+
+os:
+ - linux
+ - osx
+go:
+ - 1.8.x
+ - 1.9.x
+ - 1.10.x
+ - master
+
+script:
+ - go vet ./...
+ - go test -v ./...
+ - go test -race ./...
+ - diff <(gofmt -d .) <("")
+
+matrix:
+ allow_failures:
+ - go: 'master'
+ fast_finish: true
diff --git a/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt b/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt
new file mode 100644
index 000000000..2ef4714f7
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt
@@ -0,0 +1,35 @@
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2015- Klaus Post & Contributors.
+Email: klauspost@gmail.com
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
diff --git a/vendor/github.com/klauspost/cpuid/README.md b/vendor/github.com/klauspost/cpuid/README.md
index b2b6bee87..a7fb41fbe 100644
--- a/vendor/github.com/klauspost/cpuid/README.md
+++ b/vendor/github.com/klauspost/cpuid/README.md
@@ -83,6 +83,8 @@ Package home: https://github.com/klauspost/cpuid
* **MSVM** (Microsoft Hyper-V or Windows Virtual PC)
* **VMware**
* **XenHVM**
+* **Bhyve**
+* **Hygon**
# installing
diff --git a/vendor/github.com/klauspost/cpuid/cpuid.go b/vendor/github.com/klauspost/cpuid/cpuid.go
index 60c681bed..db9591321 100644
--- a/vendor/github.com/klauspost/cpuid/cpuid.go
+++ b/vendor/github.com/klauspost/cpuid/cpuid.go
@@ -26,6 +26,8 @@ const (
MSVM // Microsoft Hyper-V or Windows Virtual PC
VMware
XenHVM
+ Bhyve
+ Hygon
)
const (
@@ -472,6 +474,11 @@ func (c CPUInfo) AMD() bool {
return c.VendorID == AMD
}
+// Hygon returns true if vendor is recognized as Hygon
+func (c CPUInfo) Hygon() bool {
+ return c.VendorID == Hygon
+}
+
// Transmeta returns true if vendor is recognized as Transmeta
func (c CPUInfo) Transmeta() bool {
return c.VendorID == Transmeta
@@ -527,7 +534,7 @@ func (c CPUInfo) LogicalCPU() int {
// have many false negatives.
func (c CPUInfo) VM() bool {
switch c.VendorID {
- case MSVM, KVM, VMware, XenHVM:
+ case MSVM, KVM, VMware, XenHVM, Bhyve:
return true
}
return false
@@ -625,7 +632,7 @@ func logicalCores() int {
}
_, b, _, _ := cpuidex(0xb, 1)
return int(b & 0xffff)
- case AMD:
+ case AMD, Hygon:
_, b, _, _ := cpuid(1)
return int((b >> 16) & 0xff)
default:
@@ -647,7 +654,7 @@ func physicalCores() int {
switch vendorID() {
case Intel:
return logicalCores() / threadsPerCore()
- case AMD:
+ case AMD, Hygon:
if maxExtendedFunction() >= 0x80000008 {
_, _, c, _ := cpuid(0x80000008)
return int(c&0xff) + 1
@@ -670,6 +677,8 @@ var vendorMapping = map[string]Vendor{
"Microsoft Hv": MSVM,
"VMwareVMware": VMware,
"XenVMMXenVMM": XenHVM,
+ "bhyve bhyve ": Bhyve,
+ "HygonGenuine": Hygon,
}
func vendorID() Vendor {
@@ -742,7 +751,7 @@ func (c *CPUInfo) cacheSize() {
c.Cache.L3 = size
}
}
- case AMD:
+ case AMD, Hygon:
// Untested.
if maxExtendedFunction() < 0x80000005 {
return
diff --git a/vendor/github.com/klauspost/cpuid/private-gen.go b/vendor/github.com/klauspost/cpuid/private-gen.go
new file mode 100644
index 000000000..437333d29
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/private-gen.go
@@ -0,0 +1,476 @@
+// +build ignore
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "reflect"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+var inFiles = []string{"cpuid.go", "cpuid_test.go"}
+var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"}
+var fileSet = token.NewFileSet()
+var reWrites = []rewrite{
+ initRewrite("CPUInfo -> cpuInfo"),
+ initRewrite("Vendor -> vendor"),
+ initRewrite("Flags -> flags"),
+ initRewrite("Detect -> detect"),
+ initRewrite("CPU -> cpu"),
+}
+var excludeNames = map[string]bool{"string": true, "join": true, "trim": true,
+ // cpuid_test.go
+ "t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true,
+}
+
+var excludePrefixes = []string{"test", "benchmark"}
+
+func main() {
+ Package := "private"
+ parserMode := parser.ParseComments
+ exported := make(map[string]rewrite)
+ for _, file := range inFiles {
+ in, err := os.Open(file)
+ if err != nil {
+ log.Fatalf("opening input", err)
+ }
+
+ src, err := ioutil.ReadAll(in)
+ if err != nil {
+ log.Fatalf("reading input", err)
+ }
+
+ astfile, err := parser.ParseFile(fileSet, file, src, parserMode)
+ if err != nil {
+ log.Fatalf("parsing input", err)
+ }
+
+ for _, rw := range reWrites {
+ astfile = rw(astfile)
+ }
+
+ // Inspect the AST and print all identifiers and literals.
+ var startDecl token.Pos
+ var endDecl token.Pos
+ ast.Inspect(astfile, func(n ast.Node) bool {
+ var s string
+ switch x := n.(type) {
+ case *ast.Ident:
+ if x.IsExported() {
+ t := strings.ToLower(x.Name)
+ for _, pre := range excludePrefixes {
+ if strings.HasPrefix(t, pre) {
+ return true
+ }
+ }
+ if excludeNames[t] != true {
+ //if x.Pos() > startDecl && x.Pos() < endDecl {
+ exported[x.Name] = initRewrite(x.Name + " -> " + t)
+ }
+ }
+
+ case *ast.GenDecl:
+ if x.Tok == token.CONST && x.Lparen > 0 {
+ startDecl = x.Lparen
+ endDecl = x.Rparen
+ // fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl))
+ }
+ }
+ if s != "" {
+ fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s)
+ }
+ return true
+ })
+
+ for _, rw := range exported {
+ astfile = rw(astfile)
+ }
+
+ var buf bytes.Buffer
+
+ printer.Fprint(&buf, fileSet, astfile)
+
+ // Remove package documentation and insert information
+ s := buf.String()
+ ind := strings.Index(buf.String(), "\npackage cpuid")
+ s = s[ind:]
+ s = "// Generated, DO NOT EDIT,\n" +
+ "// but copy it to your own project and rename the package.\n" +
+ "// See more at http://github.com/klauspost/cpuid\n" +
+ s
+
+ outputName := Package + string(os.PathSeparator) + file
+
+ err = ioutil.WriteFile(outputName, []byte(s), 0644)
+ if err != nil {
+ log.Fatalf("writing output: %s", err)
+ }
+ log.Println("Generated", outputName)
+ }
+
+ for _, file := range copyFiles {
+ dst := ""
+ if strings.HasPrefix(file, "cpuid") {
+ dst = Package + string(os.PathSeparator) + file
+ } else {
+ dst = Package + string(os.PathSeparator) + "cpuid_" + file
+ }
+ err := copyFile(file, dst)
+ if err != nil {
+ log.Fatalf("copying file: %s", err)
+ }
+ log.Println("Copied", dst)
+ }
+}
+
+// CopyFile copies a file from src to dst. If src and dst files exist, and are
+// the same, then return success. Copy the file contents from src to dst.
+func copyFile(src, dst string) (err error) {
+ sfi, err := os.Stat(src)
+ if err != nil {
+ return
+ }
+ if !sfi.Mode().IsRegular() {
+ // cannot copy non-regular files (e.g., directories,
+ // symlinks, devices, etc.)
+ return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String())
+ }
+ dfi, err := os.Stat(dst)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return
+ }
+ } else {
+ if !(dfi.Mode().IsRegular()) {
+ return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String())
+ }
+ if os.SameFile(sfi, dfi) {
+ return
+ }
+ }
+ err = copyFileContents(src, dst)
+ return
+}
+
+// copyFileContents copies the contents of the file named src to the file named
+// by dst. The file will be created if it does not already exist. If the
+// destination file exists, all it's contents will be replaced by the contents
+// of the source file.
+func copyFileContents(src, dst string) (err error) {
+ in, err := os.Open(src)
+ if err != nil {
+ return
+ }
+ defer in.Close()
+ out, err := os.Create(dst)
+ if err != nil {
+ return
+ }
+ defer func() {
+ cerr := out.Close()
+ if err == nil {
+ err = cerr
+ }
+ }()
+ if _, err = io.Copy(out, in); err != nil {
+ return
+ }
+ err = out.Sync()
+ return
+}
+
+type rewrite func(*ast.File) *ast.File
+
+// Mostly copied from gofmt
+func initRewrite(rewriteRule string) rewrite {
+ f := strings.Split(rewriteRule, "->")
+ if len(f) != 2 {
+ fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n")
+ os.Exit(2)
+ }
+ pattern := parseExpr(f[0], "pattern")
+ replace := parseExpr(f[1], "replacement")
+ return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) }
+}
+
+// parseExpr parses s as an expression.
+// It might make sense to expand this to allow statement patterns,
+// but there are problems with preserving formatting and also
+// with what a wildcard for a statement looks like.
+func parseExpr(s, what string) ast.Expr {
+ x, err := parser.ParseExpr(s)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err)
+ os.Exit(2)
+ }
+ return x
+}
+
+// Keep this function for debugging.
+/*
+func dump(msg string, val reflect.Value) {
+ fmt.Printf("%s:\n", msg)
+ ast.Print(fileSet, val.Interface())
+ fmt.Println()
+}
+*/
+
+// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file.
+func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File {
+ cmap := ast.NewCommentMap(fileSet, p, p.Comments)
+ m := make(map[string]reflect.Value)
+ pat := reflect.ValueOf(pattern)
+ repl := reflect.ValueOf(replace)
+
+ var rewriteVal func(val reflect.Value) reflect.Value
+ rewriteVal = func(val reflect.Value) reflect.Value {
+ // don't bother if val is invalid to start with
+ if !val.IsValid() {
+ return reflect.Value{}
+ }
+ for k := range m {
+ delete(m, k)
+ }
+ val = apply(rewriteVal, val)
+ if match(m, pat, val) {
+ val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos()))
+ }
+ return val
+ }
+
+ r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File)
+ r.Comments = cmap.Filter(r).Comments() // recreate comments list
+ return r
+}
+
+// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y.
+func set(x, y reflect.Value) {
+ // don't bother if x cannot be set or y is invalid
+ if !x.CanSet() || !y.IsValid() {
+ return
+ }
+ defer func() {
+ if x := recover(); x != nil {
+ if s, ok := x.(string); ok &&
+ (strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) {
+ // x cannot be set to y - ignore this rewrite
+ return
+ }
+ panic(x)
+ }
+ }()
+ x.Set(y)
+}
+
+// Values/types for special cases.
+var (
+ objectPtrNil = reflect.ValueOf((*ast.Object)(nil))
+ scopePtrNil = reflect.ValueOf((*ast.Scope)(nil))
+
+ identType = reflect.TypeOf((*ast.Ident)(nil))
+ objectPtrType = reflect.TypeOf((*ast.Object)(nil))
+ positionType = reflect.TypeOf(token.NoPos)
+ callExprType = reflect.TypeOf((*ast.CallExpr)(nil))
+ scopePtrType = reflect.TypeOf((*ast.Scope)(nil))
+)
+
+// apply replaces each AST field x in val with f(x), returning val.
+// To avoid extra conversions, f operates on the reflect.Value form.
+func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value {
+ if !val.IsValid() {
+ return reflect.Value{}
+ }
+
+ // *ast.Objects introduce cycles and are likely incorrect after
+ // rewrite; don't follow them but replace with nil instead
+ if val.Type() == objectPtrType {
+ return objectPtrNil
+ }
+
+ // similarly for scopes: they are likely incorrect after a rewrite;
+ // replace them with nil
+ if val.Type() == scopePtrType {
+ return scopePtrNil
+ }
+
+ switch v := reflect.Indirect(val); v.Kind() {
+ case reflect.Slice:
+ for i := 0; i < v.Len(); i++ {
+ e := v.Index(i)
+ set(e, f(e))
+ }
+ case reflect.Struct:
+ for i := 0; i < v.NumField(); i++ {
+ e := v.Field(i)
+ set(e, f(e))
+ }
+ case reflect.Interface:
+ e := v.Elem()
+ set(v, f(e))
+ }
+ return val
+}
+
+func isWildcard(s string) bool {
+ rune, size := utf8.DecodeRuneInString(s)
+ return size == len(s) && unicode.IsLower(rune)
+}
+
+// match returns true if pattern matches val,
+// recording wildcard submatches in m.
+// If m == nil, match checks whether pattern == val.
+func match(m map[string]reflect.Value, pattern, val reflect.Value) bool {
+ // Wildcard matches any expression. If it appears multiple
+ // times in the pattern, it must match the same expression
+ // each time.
+ if m != nil && pattern.IsValid() && pattern.Type() == identType {
+ name := pattern.Interface().(*ast.Ident).Name
+ if isWildcard(name) && val.IsValid() {
+ // wildcards only match valid (non-nil) expressions.
+ if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() {
+ if old, ok := m[name]; ok {
+ return match(nil, old, val)
+ }
+ m[name] = val
+ return true
+ }
+ }
+ }
+
+ // Otherwise, pattern and val must match recursively.
+ if !pattern.IsValid() || !val.IsValid() {
+ return !pattern.IsValid() && !val.IsValid()
+ }
+ if pattern.Type() != val.Type() {
+ return false
+ }
+
+ // Special cases.
+ switch pattern.Type() {
+ case identType:
+ // For identifiers, only the names need to match
+ // (and none of the other *ast.Object information).
+ // This is a common case, handle it all here instead
+ // of recursing down any further via reflection.
+ p := pattern.Interface().(*ast.Ident)
+ v := val.Interface().(*ast.Ident)
+ return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name
+ case objectPtrType, positionType:
+ // object pointers and token positions always match
+ return true
+ case callExprType:
+ // For calls, the Ellipsis fields (token.Position) must
+ // match since that is how f(x) and f(x...) are different.
+ // Check them here but fall through for the remaining fields.
+ p := pattern.Interface().(*ast.CallExpr)
+ v := val.Interface().(*ast.CallExpr)
+ if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() {
+ return false
+ }
+ }
+
+ p := reflect.Indirect(pattern)
+ v := reflect.Indirect(val)
+ if !p.IsValid() || !v.IsValid() {
+ return !p.IsValid() && !v.IsValid()
+ }
+
+ switch p.Kind() {
+ case reflect.Slice:
+ if p.Len() != v.Len() {
+ return false
+ }
+ for i := 0; i < p.Len(); i++ {
+ if !match(m, p.Index(i), v.Index(i)) {
+ return false
+ }
+ }
+ return true
+
+ case reflect.Struct:
+ for i := 0; i < p.NumField(); i++ {
+ if !match(m, p.Field(i), v.Field(i)) {
+ return false
+ }
+ }
+ return true
+
+ case reflect.Interface:
+ return match(m, p.Elem(), v.Elem())
+ }
+
+ // Handle token integers, etc.
+ return p.Interface() == v.Interface()
+}
+
+// subst returns a copy of pattern with values from m substituted in place
+// of wildcards and pos used as the position of tokens from the pattern.
+// if m == nil, subst returns a copy of pattern and doesn't change the line
+// number information.
+func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value {
+ if !pattern.IsValid() {
+ return reflect.Value{}
+ }
+
+ // Wildcard gets replaced with map value.
+ if m != nil && pattern.Type() == identType {
+ name := pattern.Interface().(*ast.Ident).Name
+ if isWildcard(name) {
+ if old, ok := m[name]; ok {
+ return subst(nil, old, reflect.Value{})
+ }
+ }
+ }
+
+ if pos.IsValid() && pattern.Type() == positionType {
+ // use new position only if old position was valid in the first place
+ if old := pattern.Interface().(token.Pos); !old.IsValid() {
+ return pattern
+ }
+ return pos
+ }
+
+ // Otherwise copy.
+ switch p := pattern; p.Kind() {
+ case reflect.Slice:
+ v := reflect.MakeSlice(p.Type(), p.Len(), p.Len())
+ for i := 0; i < p.Len(); i++ {
+ v.Index(i).Set(subst(m, p.Index(i), pos))
+ }
+ return v
+
+ case reflect.Struct:
+ v := reflect.New(p.Type()).Elem()
+ for i := 0; i < p.NumField(); i++ {
+ v.Field(i).Set(subst(m, p.Field(i), pos))
+ }
+ return v
+
+ case reflect.Ptr:
+ v := reflect.New(p.Type()).Elem()
+ if elem := p.Elem(); elem.IsValid() {
+ v.Set(subst(m, elem, pos).Addr())
+ }
+ return v
+
+ case reflect.Interface:
+ v := reflect.New(p.Type()).Elem()
+ if elem := p.Elem(); elem.IsValid() {
+ v.Set(subst(m, elem, pos))
+ }
+ return v
+ }
+
+ return pattern
+}
diff --git a/vendor/github.com/klauspost/pgzip/.gitignore b/vendor/github.com/klauspost/pgzip/.gitignore
new file mode 100644
index 000000000..daf913b1b
--- /dev/null
+++ b/vendor/github.com/klauspost/pgzip/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/klauspost/pgzip/.travis.yml b/vendor/github.com/klauspost/pgzip/.travis.yml
new file mode 100644
index 000000000..6e9fca0ba
--- /dev/null
+++ b/vendor/github.com/klauspost/pgzip/.travis.yml
@@ -0,0 +1,21 @@
+language: go
+
+sudo: false
+
+os:
+ - linux
+ - osx
+
+go:
+ - 1.9.x
+ - 1.10.x
+ - master
+
+script:
+ - go test -v -cpu=1,2,4 .
+ - go test -v -cpu=2 -race -short .
+
+matrix:
+ allow_failures:
+ - go: 'master'
+ fast_finish: true
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
new file mode 100644
index 000000000..14127cd83
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
@@ -0,0 +1,9 @@
+(The MIT License)
+
+Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
new file mode 100644
index 000000000..195333e51
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
@@ -0,0 +1,41 @@
+# Windows Terminal Sequences
+
+This library allow for enabling Windows terminal color support for Go.
+
+See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) for details.
+
+## Usage
+
+```go
+import (
+ "syscall"
+
+ sequences "github.com/konsorten/go-windows-terminal-sequences"
+)
+
+func main() {
+ sequences.EnableVirtualTerminalProcessing(syscall.Stdout, true)
+}
+
+```
+
+## Authors
+
+The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de).
+
+We thank all the authors who provided code to this library:
+
+* Felix Kollmann
+* Nicolas Perraut
+
+## License
+
+(The MIT License)
+
+Copyright (c) 2018 marvin + konsorten GmbH (open-source@konsorten.de)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
new file mode 100644
index 000000000..716c61312
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
@@ -0,0 +1 @@
+module github.com/konsorten/go-windows-terminal-sequences
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
new file mode 100644
index 000000000..ef18d8f97
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
@@ -0,0 +1,36 @@
+// +build windows
+
+package sequences
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var (
+ kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll")
+ setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode")
+)
+
+func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error {
+ const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4
+
+ var mode uint32
+ err := syscall.GetConsoleMode(syscall.Stdout, &mode)
+ if err != nil {
+ return err
+ }
+
+ if enable {
+ mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING
+ } else {
+ mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING
+ }
+
+ ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode))
+ if ret == 0 {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go
new file mode 100644
index 000000000..df61a6f2f
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go
@@ -0,0 +1,11 @@
+// +build linux darwin
+
+package sequences
+
+import (
+ "fmt"
+)
+
+func EnableVirtualTerminalProcessing(stream uintptr, enable bool) error {
+ return fmt.Errorf("windows only package")
+}
diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml
new file mode 100644
index 000000000..5597e026d
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+go:
+ - tip
+
+os:
+ - linux
+ - osx
+
+before_install:
+ - go get github.com/mattn/goveralls
+ - go get golang.org/x/tools/cmd/cover
+script:
+ - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5
diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod
new file mode 100644
index 000000000..f310320c3
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/go.mod
@@ -0,0 +1,3 @@
+module github.com/mattn/go-isatty
+
+require golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223
diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum
new file mode 100644
index 000000000..426c8973c
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/go.sum
@@ -0,0 +1,2 @@
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_android.go
index 7384cf991..d3567cb5b 100644
--- a/vendor/github.com/mattn/go-isatty/isatty_linux.go
+++ b/vendor/github.com/mattn/go-isatty/isatty_android.go
@@ -1,5 +1,4 @@
-// +build linux
-// +build !appengine,!ppc64,!ppc64le
+// +build android
package isatty
@@ -16,3 +15,9 @@ func IsTerminal(fd uintptr) bool {
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
}
+
+// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
+// terminal. This is also always false on this environment.
+func IsCygwinTerminal(fd uintptr) bool {
+ return false
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go
deleted file mode 100644
index 9584a9884..000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_appengine.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build appengine
-
-package isatty
-
-// IsTerminal returns true if the file descriptor is terminal which
-// is always false on on appengine classic which is a sandboxed PaaS.
-func IsTerminal(fd uintptr) bool {
- return false
-}
-
-// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
-// terminal. This is also always false on this environment.
-func IsCygwinTerminal(fd uintptr) bool {
- return false
-}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
index 42f2514d1..07e93039d 100644
--- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go
+++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
@@ -16,3 +16,9 @@ func IsTerminal(fd uintptr) bool {
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
}
+
+// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
+// terminal. This is also always false on this environment.
+func IsCygwinTerminal(fd uintptr) bool {
+ return false
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go
deleted file mode 100644
index 44e5d2130..000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build linux
-// +build ppc64 ppc64le
-
-package isatty
-
-import (
- "unsafe"
-
- syscall "golang.org/x/sys/unix"
-)
-
-const ioctlReadTermios = syscall.TCGETS
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- var termios syscall.Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
-}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go
index 9d8b4a599..ff714a376 100644
--- a/vendor/github.com/mattn/go-isatty/isatty_others.go
+++ b/vendor/github.com/mattn/go-isatty/isatty_others.go
@@ -1,9 +1,14 @@
-// +build !windows
-// +build !appengine
+// +build appengine js nacl
package isatty
-// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
+// IsTerminal returns true if the file descriptor is terminal which
+// is always false on js and appengine classic which is a sandboxed PaaS.
+func IsTerminal(fd uintptr) bool {
+ return false
+}
+
+// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
// terminal. This is also always false on this environment.
func IsCygwinTerminal(fd uintptr) bool {
return false
diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go
index 1f0c6bf53..bdd5c79a0 100644
--- a/vendor/github.com/mattn/go-isatty/isatty_solaris.go
+++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go
@@ -14,3 +14,9 @@ func IsTerminal(fd uintptr) bool {
err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
return err == nil
}
+
+// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
+// terminal. This is also always false on this environment.
+func IsCygwinTerminal(fd uintptr) bool {
+ return false
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go
new file mode 100644
index 000000000..453b025d0
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go
@@ -0,0 +1,19 @@
+// +build linux aix
+// +build !appengine
+// +build !android
+
+package isatty
+
+import "golang.org/x/sys/unix"
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+ _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS)
+ return err == nil
+}
+
+// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
+// terminal. This is also always false on this environment.
+func IsCygwinTerminal(fd uintptr) bool {
+ return false
+}
diff --git a/vendor/github.com/mattn/go-shellwords/.travis.yml b/vendor/github.com/mattn/go-shellwords/.travis.yml
new file mode 100644
index 000000000..16d1430aa
--- /dev/null
+++ b/vendor/github.com/mattn/go-shellwords/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+go:
+ - tip
+before_install:
+ - go get github.com/mattn/goveralls
+ - go get golang.org/x/tools/cmd/cover
+script:
+ - $HOME/gopath/bin/goveralls -repotoken 2FMhp57u8LcstKL9B190fLTcEnBtAAiEL
diff --git a/vendor/github.com/mattn/go-shellwords/LICENSE b/vendor/github.com/mattn/go-shellwords/LICENSE
new file mode 100644
index 000000000..740fa9313
--- /dev/null
+++ b/vendor/github.com/mattn/go-shellwords/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2017 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/mattn/go-shellwords/README.md b/vendor/github.com/mattn/go-shellwords/README.md
new file mode 100644
index 000000000..b1d235c78
--- /dev/null
+++ b/vendor/github.com/mattn/go-shellwords/README.md
@@ -0,0 +1,47 @@
+# go-shellwords
+
+[![Coverage Status](https://coveralls.io/repos/mattn/go-shellwords/badge.png?branch=master)](https://coveralls.io/r/mattn/go-shellwords?branch=master)
+[![Build Status](https://travis-ci.org/mattn/go-shellwords.svg?branch=master)](https://travis-ci.org/mattn/go-shellwords)
+
+Parse line as shell words.
+
+## Usage
+
+```go
+args, err := shellwords.Parse("./foo --bar=baz")
+// args should be ["./foo", "--bar=baz"]
+```
+
+```go
+os.Setenv("FOO", "bar")
+p := shellwords.NewParser()
+p.ParseEnv = true
+args, err := p.Parse("./foo $FOO")
+// args should be ["./foo", "bar"]
+```
+
+```go
+p := shellwords.NewParser()
+p.ParseBacktick = true
+args, err := p.Parse("./foo `echo $SHELL`")
+// args should be ["./foo", "/bin/bash"]
+```
+
+```go
+shellwords.ParseBacktick = true
+p := shellwords.NewParser()
+args, err := p.Parse("./foo `echo $SHELL`")
+// args should be ["./foo", "/bin/bash"]
+```
+
+# Thanks
+
+This is based on cpan module [Parse::CommandLine](https://metacpan.org/pod/Parse::CommandLine).
+
+# License
+
+under the MIT License: http://mattn.mit-license.org/2017
+
+# Author
+
+Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/vendor/github.com/mattn/go-shellwords/go.mod b/vendor/github.com/mattn/go-shellwords/go.mod
new file mode 100644
index 000000000..8d96dbd5f
--- /dev/null
+++ b/vendor/github.com/mattn/go-shellwords/go.mod
@@ -0,0 +1 @@
+module github.com/mattn/go-shellwords
diff --git a/vendor/github.com/mattn/go-shellwords/shellwords.go b/vendor/github.com/mattn/go-shellwords/shellwords.go
new file mode 100644
index 000000000..41429d8f2
--- /dev/null
+++ b/vendor/github.com/mattn/go-shellwords/shellwords.go
@@ -0,0 +1,195 @@
+package shellwords
+
+import (
+ "errors"
+ "os"
+ "regexp"
+ "strings"
+)
+
+var (
+ ParseEnv bool = false
+ ParseBacktick bool = false
+)
+
+var envRe = regexp.MustCompile(`\$({[a-zA-Z0-9_]+}|[a-zA-Z0-9_]+)`)
+
+func isSpace(r rune) bool {
+ switch r {
+ case ' ', '\t', '\r', '\n':
+ return true
+ }
+ return false
+}
+
+func replaceEnv(getenv func(string) string, s string) string {
+ if getenv == nil {
+ getenv = os.Getenv
+ }
+
+ return envRe.ReplaceAllStringFunc(s, func(s string) string {
+ s = s[1:]
+ if s[0] == '{' {
+ s = s[1 : len(s)-1]
+ }
+ return getenv(s)
+ })
+}
+
+type Parser struct {
+ ParseEnv bool
+ ParseBacktick bool
+ Position int
+
+ // If ParseEnv is true, use this for getenv.
+ // If nil, use os.Getenv.
+ Getenv func(string) string
+}
+
+func NewParser() *Parser {
+ return &Parser{
+ ParseEnv: ParseEnv,
+ ParseBacktick: ParseBacktick,
+ Position: 0,
+ }
+}
+
+func (p *Parser) Parse(line string) ([]string, error) {
+ args := []string{}
+ buf := ""
+ var escaped, doubleQuoted, singleQuoted, backQuote, dollarQuote bool
+ backtick := ""
+
+ pos := -1
+ got := false
+
+loop:
+ for i, r := range line {
+ if escaped {
+ buf += string(r)
+ escaped = false
+ continue
+ }
+
+ if r == '\\' {
+ if singleQuoted {
+ buf += string(r)
+ } else {
+ escaped = true
+ }
+ continue
+ }
+
+ if isSpace(r) {
+ if singleQuoted || doubleQuoted || backQuote || dollarQuote {
+ buf += string(r)
+ backtick += string(r)
+ } else if got {
+ if p.ParseEnv {
+ buf = replaceEnv(p.Getenv, buf)
+ }
+ args = append(args, buf)
+ buf = ""
+ got = false
+ }
+ continue
+ }
+
+ switch r {
+ case '`':
+ if !singleQuoted && !doubleQuoted && !dollarQuote {
+ if p.ParseBacktick {
+ if backQuote {
+ out, err := shellRun(backtick)
+ if err != nil {
+ return nil, err
+ }
+ buf = out
+ }
+ backtick = ""
+ backQuote = !backQuote
+ continue
+ }
+ backtick = ""
+ backQuote = !backQuote
+ }
+ case ')':
+ if !singleQuoted && !doubleQuoted && !backQuote {
+ if p.ParseBacktick {
+ if dollarQuote {
+ out, err := shellRun(backtick)
+ if err != nil {
+ return nil, err
+ }
+ if r == ')' {
+ buf = buf[:len(buf)-len(backtick)-2] + out
+ } else {
+ buf = buf[:len(buf)-len(backtick)-1] + out
+ }
+ }
+ backtick = ""
+ dollarQuote = !dollarQuote
+ continue
+ }
+ backtick = ""
+ dollarQuote = !dollarQuote
+ }
+ case '(':
+ if !singleQuoted && !doubleQuoted && !backQuote {
+ if !dollarQuote && strings.HasSuffix(buf, "$") {
+ dollarQuote = true
+ buf += "("
+ continue
+ } else {
+ return nil, errors.New("invalid command line string")
+ }
+ }
+ case '"':
+ if !singleQuoted && !dollarQuote {
+ doubleQuoted = !doubleQuoted
+ continue
+ }
+ case '\'':
+ if !doubleQuoted && !dollarQuote {
+ singleQuoted = !singleQuoted
+ continue
+ }
+ case ';', '&', '|', '<', '>':
+ if !(escaped || singleQuoted || doubleQuoted || backQuote) {
+ if r == '>' && len(buf) > 0 {
+ if c := buf[0]; '0' <= c && c <= '9' {
+ i -= 1
+ got = false
+ }
+ }
+ pos = i
+ break loop
+ }
+ }
+
+ got = true
+ buf += string(r)
+ if backQuote || dollarQuote {
+ backtick += string(r)
+ }
+ }
+
+ if got {
+ if p.ParseEnv {
+ buf = replaceEnv(p.Getenv, buf)
+ }
+ args = append(args, buf)
+ }
+
+ if escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote {
+ return nil, errors.New("invalid command line string")
+ }
+
+ p.Position = pos
+
+ return args, nil
+}
+
+func Parse(line string) ([]string, error) {
+ return NewParser().Parse(line)
+}
diff --git a/vendor/github.com/mattn/go-shellwords/util_go15.go b/vendor/github.com/mattn/go-shellwords/util_go15.go
new file mode 100644
index 000000000..180f00f0b
--- /dev/null
+++ b/vendor/github.com/mattn/go-shellwords/util_go15.go
@@ -0,0 +1,24 @@
+// +build !go1.6
+
+package shellwords
+
+import (
+ "os"
+ "os/exec"
+ "runtime"
+ "strings"
+)
+
+func shellRun(line string) (string, error) {
+ var b []byte
+ var err error
+ if runtime.GOOS == "windows" {
+ b, err = exec.Command(os.Getenv("COMSPEC"), "/c", line).Output()
+ } else {
+ b, err = exec.Command(os.Getenv("SHELL"), "-c", line).Output()
+ }
+ if err != nil {
+ return "", err
+ }
+ return strings.TrimSpace(string(b)), nil
+}
diff --git a/vendor/github.com/mattn/go-shellwords/util_posix.go b/vendor/github.com/mattn/go-shellwords/util_posix.go
new file mode 100644
index 000000000..eaf1011d6
--- /dev/null
+++ b/vendor/github.com/mattn/go-shellwords/util_posix.go
@@ -0,0 +1,22 @@
+// +build !windows,go1.6
+
+package shellwords
+
+import (
+ "errors"
+ "os"
+ "os/exec"
+ "strings"
+)
+
+func shellRun(line string) (string, error) {
+ shell := os.Getenv("SHELL")
+ b, err := exec.Command(shell, "-c", line).Output()
+ if err != nil {
+ if eerr, ok := err.(*exec.ExitError); ok {
+ b = eerr.Stderr
+ }
+ return "", errors.New(err.Error() + ":" + string(b))
+ }
+ return strings.TrimSpace(string(b)), nil
+}
diff --git a/vendor/github.com/mattn/go-shellwords/util_windows.go b/vendor/github.com/mattn/go-shellwords/util_windows.go
new file mode 100644
index 000000000..e46f89a1f
--- /dev/null
+++ b/vendor/github.com/mattn/go-shellwords/util_windows.go
@@ -0,0 +1,22 @@
+// +build windows,go1.6
+
+package shellwords
+
+import (
+ "errors"
+ "os"
+ "os/exec"
+ "strings"
+)
+
+func shellRun(line string) (string, error) {
+ shell := os.Getenv("COMSPEC")
+ b, err := exec.Command(shell, "/c", line).Output()
+ if err != nil {
+ if eerr, ok := err.(*exec.ExitError); ok {
+ b = eerr.Stderr
+ }
+ return "", errors.New(err.Error() + ":" + string(b))
+ }
+ return strings.TrimSpace(string(b)), nil
+}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
new file mode 100644
index 000000000..8dada3eda
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
new file mode 100644
index 000000000..5d8cb5b72
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
@@ -0,0 +1 @@
+Copyright 2012 Matt T. Proud (matt.proud@gmail.com)
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
new file mode 100644
index 000000000..e16fb946b
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
@@ -0,0 +1 @@
+cover.dat
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
new file mode 100644
index 000000000..81be21437
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
@@ -0,0 +1,7 @@
+all:
+
+cover:
+ go test -cover -v -coverprofile=cover.dat ./...
+ go tool cover -func cover.dat
+
+.PHONY: cover
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
new file mode 100644
index 000000000..258c0636a
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
@@ -0,0 +1,75 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+var errInvalidVarint = errors.New("invalid varint32 encountered")
+
+// ReadDelimited decodes a message from the provided length-delimited stream,
+// where the length is encoded as 32-bit varint prefix to the message body.
+// It returns the total number of bytes read and any applicable error. This is
+// roughly equivalent to the companion Java API's
+// MessageLite#parseDelimitedFrom. As per the reader contract, this function
+// calls r.Read repeatedly as required until exactly one message including its
+// prefix is read and decoded (or an error has occurred). The function never
+// reads more bytes from the stream than required. The function never returns
+// an error if a message has been read and decoded correctly, even if the end
+// of the stream has been reached in doing so. In that case, any subsequent
+// calls return (0, io.EOF).
+func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
+ // Per AbstractParser#parsePartialDelimitedFrom with
+ // CodedInputStream#readRawVarint32.
+ var headerBuf [binary.MaxVarintLen32]byte
+ var bytesRead, varIntBytes int
+ var messageLength uint64
+ for varIntBytes == 0 { // i.e. no varint has been decoded yet.
+ if bytesRead >= len(headerBuf) {
+ return bytesRead, errInvalidVarint
+ }
+ // We have to read byte by byte here to avoid reading more bytes
+ // than required. Each read byte is appended to what we have
+ // read before.
+ newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
+ if newBytesRead == 0 {
+ if err != nil {
+ return bytesRead, err
+ }
+ // A Reader should not return (0, nil), but if it does,
+ // it should be treated as no-op (according to the
+ // Reader contract). So let's go on...
+ continue
+ }
+ bytesRead += newBytesRead
+ // Now present everything read so far to the varint decoder and
+ // see if a varint can be decoded already.
+ messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
+ }
+
+ messageBuf := make([]byte, messageLength)
+ newBytesRead, err := io.ReadFull(r, messageBuf)
+ bytesRead += newBytesRead
+ if err != nil {
+ return bytesRead, err
+ }
+
+ return bytesRead, proto.Unmarshal(messageBuf, m)
+}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
new file mode 100644
index 000000000..c318385cb
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pbutil provides record length-delimited Protocol Buffer streaming.
+package pbutil
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
new file mode 100644
index 000000000..8fb59ad22
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
@@ -0,0 +1,46 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// WriteDelimited encodes and dumps a message to the provided writer prefixed
+// with a 32-bit varint indicating the length of the encoded message, producing
+// a length-delimited record stream, which can be used to chain together
+// encoded messages of the same type together in a file. It returns the total
+// number of bytes written and any applicable error. This is roughly
+// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
+func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
+ buffer, err := proto.Marshal(m)
+ if err != nil {
+ return 0, err
+ }
+
+ var buf [binary.MaxVarintLen32]byte
+ encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
+
+ sync, err := w.Write(buf[:encodedLength])
+ if err != nil {
+ return sync, err
+ }
+
+ n, err = w.Write(buffer)
+ return n + sync, err
+}
diff --git a/vendor/github.com/mistifyio/go-zfs/.gitignore b/vendor/github.com/mistifyio/go-zfs/.gitignore
new file mode 100644
index 000000000..8000dd9db
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/.gitignore
@@ -0,0 +1 @@
+.vagrant
diff --git a/vendor/github.com/mistifyio/go-zfs/CONTRIBUTING.md b/vendor/github.com/mistifyio/go-zfs/CONTRIBUTING.md
new file mode 100644
index 000000000..f1880c19e
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/CONTRIBUTING.md
@@ -0,0 +1,60 @@
+## How to Contribute ##
+
+We always welcome contributions to help make `go-zfs` better. Please take a moment to read this document if you would like to contribute.
+
+### Reporting issues ###
+
+We use [Github issues](https://github.com/mistifyio/go-zfs/issues) to track bug reports, feature requests, and submitting pull requests.
+
+If you find a bug:
+
+* Use the GitHub issue search to check whether the bug has already been reported.
+* If the issue has been fixed, try to reproduce the issue using the latest `master` branch of the repository.
+* If the issue still reproduces or has not yet been reported, try to isolate the problem before opening an issue, if possible. Also provide the steps taken to reproduce the bug.
+
+### Pull requests ###
+
+We welcome bug fixes, improvements, and new features. Before embarking on making significant changes, please open an issue and ask first so that you do not risk duplicating efforts or spending time working on something that may be out of scope. For minor items, just open a pull request.
+
+[Fork the project](https://help.github.com/articles/fork-a-repo), clone your fork, and add the upstream to your remote:
+
+ $ git clone git@github.com:<your-username>/go-zfs.git
+ $ cd go-zfs
+ $ git remote add upstream https://github.com/mistifyio/go-zfs.git
+
+If you need to pull new changes committed upstream:
+
+ $ git checkout master
+ $ git fetch upstream
+ $ git merge upstream/master
+
+Don' work directly on master as this makes it harder to merge later. Create a feature branch for your fix or new feature:
+
+ $ git checkout -b <feature-branch-name>
+
+Please try to commit your changes in logical chunks. Ideally, you should include the issue number in the commit message.
+
+ $ git commit -m "Issue #<issue-number> - <commit-message>"
+
+Push your feature branch to your fork.
+
+ $ git push origin <feature-branch-name>
+
+[Open a Pull Request](https://help.github.com/articles/using-pull-requests) against the upstream master branch. Please give your pull request a clear title and description and note which issue(s) your pull request fixes.
+
+* All Go code should be formatted using [gofmt](http://golang.org/cmd/gofmt/).
+* Every exported function should have [documentation](http://blog.golang.org/godoc-documenting-go-code) and corresponding [tests](http://golang.org/doc/code.html#Testing).
+
+**Important:** By submitting a patch, you agree to allow the project owners to license your work under the [Apache 2.0 License](./LICENSE).
+
+### Go Tools ###
+For consistency and to catch minor issues for all of go code, please run the following:
+* goimports
+* go vet
+* golint
+* errcheck
+
+Many editors can execute the above on save.
+
+----
+Guidelines based on http://azkaban.github.io/contributing.html
diff --git a/vendor/github.com/mistifyio/go-zfs/Vagrantfile b/vendor/github.com/mistifyio/go-zfs/Vagrantfile
new file mode 100644
index 000000000..3bd6e120b
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/Vagrantfile
@@ -0,0 +1,34 @@
+
+VAGRANTFILE_API_VERSION = "2"
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+ config.vm.box = "ubuntu/trusty64"
+ config.ssh.forward_agent = true
+
+ config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/mistifyio/go-zfs", create: true
+
+ config.vm.provision "shell", inline: <<EOF
+cat << END > /etc/profile.d/go.sh
+export GOPATH=\\$HOME/go
+export PATH=\\$GOPATH/bin:/usr/local/go/bin:\\$PATH
+END
+
+chown -R vagrant /home/vagrant/go
+
+apt-get update
+apt-get install -y software-properties-common curl
+apt-add-repository --yes ppa:zfs-native/stable
+apt-get update
+apt-get install -y ubuntu-zfs
+
+cd /home/vagrant
+curl -z go1.3.3.linux-amd64.tar.gz -L -O https://storage.googleapis.com/golang/go1.3.3.linux-amd64.tar.gz
+tar -C /usr/local -zxf /home/vagrant/go1.3.3.linux-amd64.tar.gz
+
+cat << END > /etc/sudoers.d/go
+Defaults env_keep += "GOPATH"
+END
+
+EOF
+
+end
diff --git a/vendor/github.com/modern-go/concurrent/.gitignore b/vendor/github.com/modern-go/concurrent/.gitignore
new file mode 100644
index 000000000..3f2bc4741
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/.gitignore
@@ -0,0 +1 @@
+/coverage.txt
diff --git a/vendor/github.com/modern-go/concurrent/.travis.yml b/vendor/github.com/modern-go/concurrent/.travis.yml
new file mode 100644
index 000000000..449e67cd0
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+ - 1.8.x
+ - 1.x
+
+before_install:
+ - go get -t -v ./...
+
+script:
+ - ./test.sh
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/modern-go/concurrent/test.sh b/vendor/github.com/modern-go/concurrent/test.sh
new file mode 100644
index 000000000..d1e6b2ec5
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/test.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -e
+echo "" > coverage.txt
+
+for d in $(go list ./... | grep -v vendor); do
+ go test -coverprofile=profile.out -coverpkg=github.com/modern-go/concurrent $d
+ if [ -f profile.out ]; then
+ cat profile.out >> coverage.txt
+ rm profile.out
+ fi
+done
diff --git a/vendor/github.com/modern-go/reflect2/.gitignore b/vendor/github.com/modern-go/reflect2/.gitignore
new file mode 100644
index 000000000..7b26c946d
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/.gitignore
@@ -0,0 +1,2 @@
+/vendor
+/coverage.txt
diff --git a/vendor/github.com/modern-go/reflect2/.travis.yml b/vendor/github.com/modern-go/reflect2/.travis.yml
new file mode 100644
index 000000000..fbb43744d
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+
+go:
+ - 1.8.x
+ - 1.x
+
+before_install:
+ - go get -t -v ./...
+ - go get -t -v github.com/modern-go/reflect2-tests/...
+
+script:
+ - ./test.sh
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.lock b/vendor/github.com/modern-go/reflect2/Gopkg.lock
new file mode 100644
index 000000000..2a3a69893
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/Gopkg.lock
@@ -0,0 +1,15 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ name = "github.com/modern-go/concurrent"
+ packages = ["."]
+ revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a"
+ version = "1.0.0"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ inputs-digest = "daee8a88b3498b61c5640056665b8b9eea062006f5e596bbb6a3ed9119a11ec7"
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.toml b/vendor/github.com/modern-go/reflect2/Gopkg.toml
new file mode 100644
index 000000000..2f4f4dbdc
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/Gopkg.toml
@@ -0,0 +1,35 @@
+# Gopkg.toml example
+#
+# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
+#
+# [prune]
+# non-go = false
+# go-tests = true
+# unused-packages = true
+
+ignored = []
+
+[[constraint]]
+ name = "github.com/modern-go/concurrent"
+ version = "1.0.0"
+
+[prune]
+ go-tests = true
+ unused-packages = true
diff --git a/vendor/github.com/modern-go/reflect2/test.sh b/vendor/github.com/modern-go/reflect2/test.sh
new file mode 100644
index 000000000..3d2b9768c
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/test.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -e
+echo "" > coverage.txt
+
+for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do
+ go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d
+ if [ -f profile.out ]; then
+ cat profile.out >> coverage.txt
+ rm profile.out
+ fi
+done
diff --git a/vendor/github.com/mrunalp/fileutils/.gitignore b/vendor/github.com/mrunalp/fileutils/.gitignore
new file mode 100644
index 000000000..aac977bca
--- /dev/null
+++ b/vendor/github.com/mrunalp/fileutils/.gitignore
@@ -0,0 +1 @@
+/gocp
diff --git a/vendor/github.com/mrunalp/fileutils/MAINTAINERS b/vendor/github.com/mrunalp/fileutils/MAINTAINERS
new file mode 100644
index 000000000..4a2cafa5c
--- /dev/null
+++ b/vendor/github.com/mrunalp/fileutils/MAINTAINERS
@@ -0,0 +1 @@
+Mrunal Patel <mrunalp@gmail.com> (@mrunalp)
diff --git a/vendor/github.com/mtrmac/gpgme/.gitignore b/vendor/github.com/mtrmac/gpgme/.gitignore
new file mode 100644
index 000000000..0210b26e0
--- /dev/null
+++ b/vendor/github.com/mtrmac/gpgme/.gitignore
@@ -0,0 +1 @@
+testdata/gpghome/random_seed
diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml
index 3900878bd..72e8ccf0b 100644
--- a/vendor/github.com/onsi/ginkgo/.travis.yml
+++ b/vendor/github.com/onsi/ginkgo/.travis.yml
@@ -1,11 +1,9 @@
language: go
go:
- - 1.6.x
- - 1.7.x
- - 1.8.x
- - 1.9.x
- 1.10.x
- 1.11.x
+ - 1.12.x
+ - tip
install:
- go get -v -t ./...
@@ -14,4 +12,4 @@ install:
- go install github.com/onsi/ginkgo/ginkgo
- export PATH=$PATH:$HOME/gopath/bin
-script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace && go vet
+script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace && go vet
diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
index d7d797017..4920406ae 100644
--- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md
+++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
@@ -1,3 +1,14 @@
+## 1.8.0
+
+### New Features
+- allow config of the vet flag for `go test` (#562) [3cd45fa]
+- Support projects using go modules [d56ee76]
+
+### Fixes and Minor Improvements
+- chore(godoc): fixes typos in Measurement funcs [dbaca8e]
+- Optimize focus to avoid allocations [f493786]
+- Ensure generated test file names are underscored [505cc35]
+
## 1.7.0
### New Features
diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go
index 5e509313c..dab2a2470 100644
--- a/vendor/github.com/onsi/ginkgo/config/config.go
+++ b/vendor/github.com/onsi/ginkgo/config/config.go
@@ -20,7 +20,7 @@ import (
"fmt"
)
-const VERSION = "1.7.0"
+const VERSION = "1.8.0"
type GinkgoConfigType struct {
RandomSeed int64
diff --git a/vendor/github.com/onsi/ginkgo/extensions/table/table_suite_test.go b/vendor/github.com/onsi/ginkgo/extensions/table/table_suite_test.go
deleted file mode 100644
index f482ec3dc..000000000
--- a/vendor/github.com/onsi/ginkgo/extensions/table/table_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package table_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestTable(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Table Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/extensions/table/table_test.go b/vendor/github.com/onsi/ginkgo/extensions/table/table_test.go
deleted file mode 100644
index b008e432b..000000000
--- a/vendor/github.com/onsi/ginkgo/extensions/table/table_test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package table_test
-
-import (
- "strings"
-
- . "github.com/onsi/ginkgo/extensions/table"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("Table", func() {
- DescribeTable("a simple table",
- func(x int, y int, expected bool) {
- Ω(x > y).Should(Equal(expected))
- },
- Entry("x > y", 1, 0, true),
- Entry("x == y", 0, 0, false),
- Entry("x < y", 0, 1, false),
- )
-
- type ComplicatedThings struct {
- Superstructure string
- Substructure string
- Count int
- }
-
- DescribeTable("a more complicated table",
- func(c ComplicatedThings) {
- Ω(strings.Count(c.Superstructure, c.Substructure)).Should(BeNumerically("==", c.Count))
- },
- Entry("with no matching substructures", ComplicatedThings{
- Superstructure: "the sixth sheikh's sixth sheep's sick",
- Substructure: "emir",
- Count: 0,
- }),
- Entry("with one matching substructure", ComplicatedThings{
- Superstructure: "the sixth sheikh's sixth sheep's sick",
- Substructure: "sheep",
- Count: 1,
- }),
- Entry("with many matching substructures", ComplicatedThings{
- Superstructure: "the sixth sheikh's sixth sheep's sick",
- Substructure: "si",
- Count: 3,
- }),
- )
-
- PDescribeTable("a failure",
- func(value bool) {
- Ω(value).Should(BeFalse())
- },
- Entry("when true", true),
- Entry("when false", false),
- Entry("when malformed", 2),
- )
-
- DescribeTable("an untyped nil as an entry",
- func(x interface{}) {
- Expect(x).To(BeNil())
- },
- Entry("nil", nil),
- )
-})
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go
index 019fd2337..ad044303b 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go
@@ -108,10 +108,8 @@ func generateSpec(args []string, agouti, noDot, internal bool) {
func generateSpecForSubject(subject string, agouti, noDot, internal bool) error {
packageName, specFilePrefix, formattedName := getPackageAndFormattedName()
if subject != "" {
- subject = strings.Split(subject, ".go")[0]
- subject = strings.Split(subject, "_test")[0]
- specFilePrefix = subject
- formattedName = prettifyPackageName(subject)
+ specFilePrefix = formatSubject(subject)
+ formattedName = prettifyPackageName(specFilePrefix)
}
data := specData{
@@ -152,6 +150,14 @@ func generateSpecForSubject(subject string, agouti, noDot, internal bool) error
return nil
}
+func formatSubject(name string) string {
+ name = strings.Replace(name, "-", "_", -1)
+ name = strings.Replace(name, " ", "_", -1)
+ name = strings.Split(name, ".go")[0]
+ name = strings.Split(name, "_test")[0]
+ return name
+}
+
func getPackageImportPath() string {
workingDir, err := os.Getwd()
if err != nil {
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot_suite_test.go b/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot_suite_test.go
deleted file mode 100644
index 72b733219..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot_suite_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package nodot_test
-
-import (
- "github.com/onsi/ginkgo"
- "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestNodot(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Nodot Suite")
-}
-
-// Declarations for Ginkgo DSL
-type Done ginkgo.Done
-type Benchmarker ginkgo.Benchmarker
-
-var GinkgoWriter = ginkgo.GinkgoWriter
-var GinkgoParallelNode = ginkgo.GinkgoParallelNode
-var GinkgoT = ginkgo.GinkgoT
-var CurrentGinkgoTestDescription = ginkgo.CurrentGinkgoTestDescription
-var RunSpecs = ginkgo.RunSpecs
-var RunSpecsWithDefaultAndCustomReporters = ginkgo.RunSpecsWithDefaultAndCustomReporters
-var RunSpecsWithCustomReporters = ginkgo.RunSpecsWithCustomReporters
-var Fail = ginkgo.Fail
-var GinkgoRecover = ginkgo.GinkgoRecover
-var Describe = ginkgo.Describe
-var FDescribe = ginkgo.FDescribe
-var PDescribe = ginkgo.PDescribe
-var XDescribe = ginkgo.XDescribe
-var Context = ginkgo.Context
-var FContext = ginkgo.FContext
-var PContext = ginkgo.PContext
-var XContext = ginkgo.XContext
-var It = ginkgo.It
-var FIt = ginkgo.FIt
-var PIt = ginkgo.PIt
-var XIt = ginkgo.XIt
-var Measure = ginkgo.Measure
-var FMeasure = ginkgo.FMeasure
-var PMeasure = ginkgo.PMeasure
-var XMeasure = ginkgo.XMeasure
-var BeforeSuite = ginkgo.BeforeSuite
-var AfterSuite = ginkgo.AfterSuite
-var SynchronizedBeforeSuite = ginkgo.SynchronizedBeforeSuite
-var SynchronizedAfterSuite = ginkgo.SynchronizedAfterSuite
-var BeforeEach = ginkgo.BeforeEach
-var JustBeforeEach = ginkgo.JustBeforeEach
-var JustAfterEach = ginkgo.JustAfterEach
-var AfterEach = ginkgo.AfterEach
-
-// Declarations for Gomega DSL
-var RegisterFailHandler = gomega.RegisterFailHandler
-var RegisterTestingT = gomega.RegisterTestingT
-var InterceptGomegaFailures = gomega.InterceptGomegaFailures
-var Ω = gomega.Ω
-var Expect = gomega.Expect
-var ExpectWithOffset = gomega.ExpectWithOffset
-var Eventually = gomega.Eventually
-var EventuallyWithOffset = gomega.EventuallyWithOffset
-var Consistently = gomega.Consistently
-var ConsistentlyWithOffset = gomega.ConsistentlyWithOffset
-var SetDefaultEventuallyTimeout = gomega.SetDefaultEventuallyTimeout
-var SetDefaultEventuallyPollingInterval = gomega.SetDefaultEventuallyPollingInterval
-var SetDefaultConsistentlyDuration = gomega.SetDefaultConsistentlyDuration
-var SetDefaultConsistentlyPollingInterval = gomega.SetDefaultConsistentlyPollingInterval
-
-// Declarations for Gomega Matchers
-var Equal = gomega.Equal
-var BeEquivalentTo = gomega.BeEquivalentTo
-var BeNil = gomega.BeNil
-var BeTrue = gomega.BeTrue
-var BeFalse = gomega.BeFalse
-var HaveOccurred = gomega.HaveOccurred
-var MatchError = gomega.MatchError
-var BeClosed = gomega.BeClosed
-var Receive = gomega.Receive
-var MatchRegexp = gomega.MatchRegexp
-var ContainSubstring = gomega.ContainSubstring
-var MatchJSON = gomega.MatchJSON
-var BeEmpty = gomega.BeEmpty
-var HaveLen = gomega.HaveLen
-var BeZero = gomega.BeZero
-var ContainElement = gomega.ContainElement
-var ConsistOf = gomega.ConsistOf
-var HaveKey = gomega.HaveKey
-var HaveKeyWithValue = gomega.HaveKeyWithValue
-var BeNumerically = gomega.BeNumerically
-var BeTemporally = gomega.BeTemporally
-var BeAssignableToTypeOf = gomega.BeAssignableToTypeOf
-var Panic = gomega.Panic
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot_test.go b/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot_test.go
deleted file mode 100644
index 1470b7478..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package nodot_test
-
-import (
- "strings"
-
- . "github.com/onsi/ginkgo/ginkgo/nodot"
-)
-
-var _ = Describe("ApplyNoDot", func() {
- var result string
-
- apply := func(input string) string {
- output, err := ApplyNoDot([]byte(input))
- Ω(err).ShouldNot(HaveOccurred())
- return string(output)
- }
-
- Context("when no declarations have been imported yet", func() {
- BeforeEach(func() {
- result = apply("")
- })
-
- It("should add headings for the various declarations", func() {
- Ω(result).Should(ContainSubstring("// Declarations for Ginkgo DSL"))
- Ω(result).Should(ContainSubstring("// Declarations for Gomega DSL"))
- Ω(result).Should(ContainSubstring("// Declarations for Gomega Matchers"))
- })
-
- It("should import Ginkgo's declarations", func() {
- Ω(result).Should(ContainSubstring("var It = ginkgo.It"))
- Ω(result).Should(ContainSubstring("var XDescribe = ginkgo.XDescribe"))
- })
-
- It("should import Ginkgo's types", func() {
- Ω(result).Should(ContainSubstring("type Done ginkgo.Done"))
- Ω(result).Should(ContainSubstring("type Benchmarker ginkgo.Benchmarker"))
- Ω(strings.Count(result, "type ")).Should(Equal(2))
- })
-
- It("should import Gomega's DSL and matchers", func() {
- Ω(result).Should(ContainSubstring("var Ω = gomega.Ω"))
- Ω(result).Should(ContainSubstring("var ContainSubstring = gomega.ContainSubstring"))
- Ω(result).Should(ContainSubstring("var Equal = gomega.Equal"))
- })
-
- It("should not import blacklisted things", func() {
- Ω(result).ShouldNot(ContainSubstring("GINKGO_VERSION"))
- Ω(result).ShouldNot(ContainSubstring("GINKGO_PANIC"))
- Ω(result).ShouldNot(ContainSubstring("GOMEGA_VERSION"))
- })
- })
-
- It("should be idempotent (module empty lines - go fmt can fix those for us)", func() {
- first := apply("")
- second := apply(first)
- first = strings.Trim(first, "\n")
- second = strings.Trim(second, "\n")
- Ω(first).Should(Equal(second))
- })
-
- It("should not mess with other things in the input", func() {
- result = apply("var MyThing = SomethingThatsMine")
- Ω(result).Should(ContainSubstring("var MyThing = SomethingThatsMine"))
- })
-
- Context("when the user has redefined a name", func() {
- It("should honor the redefinition", func() {
- result = apply(`
-var _ = gomega.Ω
-var When = ginkgo.It
- `)
-
- Ω(result).Should(ContainSubstring("var _ = gomega.Ω"))
- Ω(result).ShouldNot(ContainSubstring("var Ω = gomega.Ω"))
-
- Ω(result).Should(ContainSubstring("var When = ginkgo.It"))
- Ω(result).ShouldNot(ContainSubstring("var It = ginkgo.It"))
-
- Ω(result).Should(ContainSubstring("var Context = ginkgo.Context"))
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go b/vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go
index b7cb7f566..e0994fc3c 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go
@@ -126,6 +126,7 @@ func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
c.FlagSet.BoolVar(c.boolSlot("work"), "work", false, "Print the name of the temporary work directory and do not delete it when exiting.")
c.FlagSet.StringVar(c.stringSlot("asmflags"), "asmflags", "", "Arguments to pass on each go tool asm invocation.")
c.FlagSet.StringVar(c.stringSlot("buildmode"), "buildmode", "", "Build mode to use. See 'go help buildmode' for more.")
+ c.FlagSet.StringVar(c.stringSlot("mod"), "mod", "", "Go module control. See 'go help modules' for more.")
c.FlagSet.StringVar(c.stringSlot("compiler"), "compiler", "", "Name of compiler to use, as in runtime.Compiler (gccgo or gc).")
c.FlagSet.StringVar(c.stringSlot("gccgoflags"), "gccgoflags", "", "Arguments to pass on each gccgo compiler/linker invocation.")
c.FlagSet.StringVar(c.stringSlot("installsuffix"), "installsuffix", "", "A suffix to use in the name of the package installation directory.")
@@ -140,6 +141,7 @@ func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
c.FlagSet.IntVar(c.intSlot("memprofilerate"), "memprofilerate", 0, "Enable more precise (and expensive) memory profiles by setting runtime.MemProfileRate.")
c.FlagSet.StringVar(c.stringSlot("outputdir"), "outputdir", "", "Place output files from profiling in the specified directory.")
c.FlagSet.BoolVar(c.boolSlot("requireSuite"), "requireSuite", false, "Fail if there are ginkgo tests in a directory but no test suite (missing RunSpecs)")
+ c.FlagSet.StringVar(c.stringSlot("vet"), "vet", "", "Configure the invocation of 'go vet' to use the comma-separated list of vet checks. If list is 'off', 'go test' does not run 'go vet' at all.")
if mode == runMode || mode == watchMode {
config.Flags(c.FlagSet, "", false)
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
index a0113e136..dccb39518 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
@@ -119,6 +119,8 @@ func (t *TestRunner) BuildArgs(path string) []string {
"coverpkg",
"tags",
"gcflags",
+ "vet",
+ "mod",
}
for _, opt := range stringOpts {
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner_test.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner_test.go
deleted file mode 100644
index 691917043..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner_test.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package testrunner_test
-
-import (
- "testing"
-
- . "github.com/onsi/ginkgo"
- "github.com/onsi/ginkgo/ginkgo/testrunner"
- "github.com/onsi/ginkgo/ginkgo/testsuite"
- . "github.com/onsi/gomega"
-)
-
-func strAddr(s string) interface{} {
- return &s
-}
-
-func boolAddr(s bool) interface{} {
- return &s
-}
-
-func intAddr(s int) interface{} {
- return &s
-}
-
-var _ = Describe("TestRunner", func() {
- It("should pass through go opts", func() {
- //var opts map[string]interface{}
- opts := map[string]interface{}{
- "asmflags": strAddr("a"),
- "pkgdir": strAddr("b"),
- "gcflags": strAddr("c"),
- "covermode": strAddr(""),
- "coverpkg": strAddr(""),
- "cover": boolAddr(false),
- "blockprofilerate": intAddr(100),
- }
- tr := testrunner.New(testsuite.TestSuite{}, 1, false, 0, opts, []string{})
-
- args := tr.BuildArgs(".")
- // Remove the "-i" argument; This is discarded in Golang 1.10+.
- if args[2] == "-i" {
- args = append(args[0:2], args[3:]...)
- }
- Ω(args).Should(Equal([]string{
- "test",
- "-c",
- "-o",
- ".",
- "",
- "-blockprofilerate=100",
- "-asmflags=a",
- "-pkgdir=b",
- "-gcflags=c",
- }))
- })
-})
-
-func TestTestRunner(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Test Runner Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/testsuite_suite_test.go b/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/testsuite_suite_test.go
deleted file mode 100644
index d1e8b21d3..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/testsuite_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package testsuite_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestTestsuite(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Testsuite Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/testsuite_test.go b/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/testsuite_test.go
deleted file mode 100644
index 7a0753bf5..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/testsuite_test.go
+++ /dev/null
@@ -1,212 +0,0 @@
-// +build go1.6
-
-package testsuite_test
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/ginkgo/testsuite"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("TestSuite", func() {
- var tmpDir string
- var relTmpDir string
-
- writeFile := func(folder string, filename string, content string, mode os.FileMode) {
- path := filepath.Join(tmpDir, folder)
- err := os.MkdirAll(path, 0700)
- Ω(err).ShouldNot(HaveOccurred())
-
- path = filepath.Join(path, filename)
- ioutil.WriteFile(path, []byte(content), mode)
- }
-
- var origVendor string
-
- BeforeSuite(func() {
- origVendor = os.Getenv("GO15VENDOREXPERIMENT")
- })
-
- AfterSuite(func() {
- os.Setenv("GO15VENDOREXPERIMENT", origVendor)
- })
-
- BeforeEach(func() {
- var err error
- tmpDir, err = ioutil.TempDir("/tmp", "ginkgo")
- Ω(err).ShouldNot(HaveOccurred())
-
- cwd, err := os.Getwd()
- Ω(err).ShouldNot(HaveOccurred())
- relTmpDir, err = filepath.Rel(cwd, tmpDir)
- Ω(err).ShouldNot(HaveOccurred())
-
- //go files in the root directory (no tests)
- writeFile("/", "main.go", "package main", 0666)
-
- //non-go files in a nested directory
- writeFile("/redherring", "big_test.jpg", "package ginkgo", 0666)
-
- //ginkgo tests in ignored go files
- writeFile("/ignored", ".ignore_dot_test.go", `import "github.com/onsi/ginkgo"`, 0666)
- writeFile("/ignored", "_ignore_underscore_test.go", `import "github.com/onsi/ginkgo"`, 0666)
-
- //non-ginkgo tests in a nested directory
- writeFile("/professorplum", "professorplum_test.go", `import "testing"`, 0666)
-
- //ginkgo tests in a nested directory
- writeFile("/colonelmustard", "colonelmustard_test.go", `import "github.com/onsi/ginkgo"`, 0666)
-
- //ginkgo tests in a deeply nested directory
- writeFile("/colonelmustard/library", "library_test.go", `import "github.com/onsi/ginkgo"`, 0666)
-
- //ginkgo tests deeply nested in a vendored dependency
- writeFile("/vendor/mrspeacock/lounge", "lounge_test.go", `import "github.com/onsi/ginkgo"`, 0666)
-
- //a precompiled ginkgo test
- writeFile("/precompiled-dir", "precompiled.test", `fake-binary-file`, 0777)
- writeFile("/precompiled-dir", "some-other-binary", `fake-binary-file`, 0777)
- writeFile("/precompiled-dir", "nonexecutable.test", `fake-binary-file`, 0666)
- })
-
- AfterEach(func() {
- os.RemoveAll(tmpDir)
- })
-
- Describe("Finding precompiled test suites", func() {
- Context("if pointed at an executable file that ends with .test", func() {
- It("should return a precompiled test suite", func() {
- suite, err := PrecompiledTestSuite(filepath.Join(tmpDir, "precompiled-dir", "precompiled.test"))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(suite).Should(Equal(TestSuite{
- Path: relTmpDir + "/precompiled-dir",
- PackageName: "precompiled",
- IsGinkgo: true,
- Precompiled: true,
- }))
- })
- })
-
- Context("if pointed at a directory", func() {
- It("should error", func() {
- suite, err := PrecompiledTestSuite(filepath.Join(tmpDir, "precompiled-dir"))
- Ω(suite).Should(BeZero())
- Ω(err).Should(HaveOccurred())
- })
- })
-
- Context("if pointed at an executable that doesn't have .test", func() {
- It("should error", func() {
- suite, err := PrecompiledTestSuite(filepath.Join(tmpDir, "precompiled-dir", "some-other-binary"))
- Ω(suite).Should(BeZero())
- Ω(err).Should(HaveOccurred())
- })
- })
-
- Context("if pointed at a .test that isn't executable", func() {
- It("should error", func() {
- suite, err := PrecompiledTestSuite(filepath.Join(tmpDir, "precompiled-dir", "nonexecutable.test"))
- Ω(suite).Should(BeZero())
- Ω(err).Should(HaveOccurred())
- })
- })
-
- Context("if pointed at a nonexisting file", func() {
- It("should error", func() {
- suite, err := PrecompiledTestSuite(filepath.Join(tmpDir, "precompiled-dir", "nope-nothing-to-see-here"))
- Ω(suite).Should(BeZero())
- Ω(err).Should(HaveOccurred())
- })
- })
- })
-
- Describe("scanning for suites in a directory", func() {
- Context("when there are no tests in the specified directory", func() {
- It("should come up empty", func() {
- suites := SuitesInDir(tmpDir, false)
- Ω(suites).Should(BeEmpty())
- })
- })
-
- Context("when there are ginkgo tests in the specified directory", func() {
- It("should return an appropriately configured suite", func() {
- suites := SuitesInDir(filepath.Join(tmpDir, "colonelmustard"), false)
- Ω(suites).Should(HaveLen(1))
-
- Ω(suites[0].Path).Should(Equal(relTmpDir + "/colonelmustard"))
- Ω(suites[0].PackageName).Should(Equal("colonelmustard"))
- Ω(suites[0].IsGinkgo).Should(BeTrue())
- Ω(suites[0].Precompiled).Should(BeFalse())
- })
- })
-
- Context("when there are ginkgo tests that are ignored by go in the specified directory ", func() {
- It("should come up empty", func() {
- suites := SuitesInDir(filepath.Join(tmpDir, "ignored"), false)
- Ω(suites).Should(BeEmpty())
- })
- })
-
- Context("when there are non-ginkgo tests in the specified directory", func() {
- It("should return an appropriately configured suite", func() {
- suites := SuitesInDir(filepath.Join(tmpDir, "professorplum"), false)
- Ω(suites).Should(HaveLen(1))
-
- Ω(suites[0].Path).Should(Equal(relTmpDir + "/professorplum"))
- Ω(suites[0].PackageName).Should(Equal("professorplum"))
- Ω(suites[0].IsGinkgo).Should(BeFalse())
- Ω(suites[0].Precompiled).Should(BeFalse())
- })
- })
-
- Context("given GO15VENDOREXPERIMENT disabled", func() {
- BeforeEach(func() {
- os.Setenv("GO15VENDOREXPERIMENT", "0")
- })
-
- AfterEach(func() {
- os.Setenv("GO15VENDOREXPERIMENT", "")
- })
-
- It("should not skip vendor dirs", func() {
- suites := SuitesInDir(filepath.Join(tmpDir+"/vendor"), true)
- Ω(suites).Should(HaveLen(1))
- })
-
- It("should recurse into vendor dirs", func() {
- suites := SuitesInDir(filepath.Join(tmpDir), true)
- Ω(suites).Should(HaveLen(4))
- })
- })
-
- Context("when recursively scanning", func() {
- It("should return suites for corresponding test suites, only", func() {
- suites := SuitesInDir(tmpDir, true)
- Ω(suites).Should(HaveLen(3))
-
- Ω(suites).Should(ContainElement(TestSuite{
- Path: relTmpDir + "/colonelmustard",
- PackageName: "colonelmustard",
- IsGinkgo: true,
- Precompiled: false,
- }))
- Ω(suites).Should(ContainElement(TestSuite{
- Path: relTmpDir + "/professorplum",
- PackageName: "professorplum",
- IsGinkgo: false,
- Precompiled: false,
- }))
- Ω(suites).Should(ContainElement(TestSuite{
- Path: relTmpDir + "/colonelmustard/library",
- PackageName: "library",
- IsGinkgo: true,
- Precompiled: false,
- }))
- })
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go15_test.go b/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go15_test.go
deleted file mode 100644
index dc3ca2a94..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go15_test.go
+++ /dev/null
@@ -1,201 +0,0 @@
-// +build !go1.6
-
-package testsuite_test
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/ginkgo/testsuite"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("TestSuite", func() {
- var tmpDir string
- var relTmpDir string
-
- writeFile := func(folder string, filename string, content string, mode os.FileMode) {
- path := filepath.Join(tmpDir, folder)
- err := os.MkdirAll(path, 0700)
- Ω(err).ShouldNot(HaveOccurred())
-
- path = filepath.Join(path, filename)
- ioutil.WriteFile(path, []byte(content), mode)
- }
-
- var origVendor string
-
- BeforeSuite(func() {
- origVendor = os.Getenv("GO15VENDOREXPERIMENT")
- })
-
- AfterSuite(func() {
- os.Setenv("GO15VENDOREXPERIMENT", origVendor)
- })
-
- BeforeEach(func() {
- var err error
- tmpDir, err = ioutil.TempDir("/tmp", "ginkgo")
- Ω(err).ShouldNot(HaveOccurred())
-
- cwd, err := os.Getwd()
- Ω(err).ShouldNot(HaveOccurred())
- relTmpDir, err = filepath.Rel(cwd, tmpDir)
- Ω(err).ShouldNot(HaveOccurred())
-
- //go files in the root directory (no tests)
- writeFile("/", "main.go", "package main", 0666)
-
- //non-go files in a nested directory
- writeFile("/redherring", "big_test.jpg", "package ginkgo", 0666)
-
- //non-ginkgo tests in a nested directory
- writeFile("/professorplum", "professorplum_test.go", `import "testing"`, 0666)
-
- //ginkgo tests in a nested directory
- writeFile("/colonelmustard", "colonelmustard_test.go", `import "github.com/onsi/ginkgo"`, 0666)
-
- //ginkgo tests in a deeply nested directory
- writeFile("/colonelmustard/library", "library_test.go", `import "github.com/onsi/ginkgo"`, 0666)
-
- //ginkgo tests deeply nested in a vendored dependency
- writeFile("/vendor/mrspeacock/lounge", "lounge_test.go", `import "github.com/onsi/ginkgo"`, 0666)
-
- //a precompiled ginkgo test
- writeFile("/precompiled-dir", "precompiled.test", `fake-binary-file`, 0777)
- writeFile("/precompiled-dir", "some-other-binary", `fake-binary-file`, 0777)
- writeFile("/precompiled-dir", "nonexecutable.test", `fake-binary-file`, 0666)
- })
-
- AfterEach(func() {
- os.RemoveAll(tmpDir)
- })
-
- Describe("Finding precompiled test suites", func() {
- Context("if pointed at an executable file that ends with .test", func() {
- It("should return a precompiled test suite", func() {
- suite, err := PrecompiledTestSuite(filepath.Join(tmpDir, "precompiled-dir", "precompiled.test"))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(suite).Should(Equal(TestSuite{
- Path: relTmpDir + "/precompiled-dir",
- PackageName: "precompiled",
- IsGinkgo: true,
- Precompiled: true,
- }))
- })
- })
-
- Context("if pointed at a directory", func() {
- It("should error", func() {
- suite, err := PrecompiledTestSuite(filepath.Join(tmpDir, "precompiled-dir"))
- Ω(suite).Should(BeZero())
- Ω(err).Should(HaveOccurred())
- })
- })
-
- Context("if pointed at an executable that doesn't have .test", func() {
- It("should error", func() {
- suite, err := PrecompiledTestSuite(filepath.Join(tmpDir, "precompiled-dir", "some-other-binary"))
- Ω(suite).Should(BeZero())
- Ω(err).Should(HaveOccurred())
- })
- })
-
- Context("if pointed at a .test that isn't executable", func() {
- It("should error", func() {
- suite, err := PrecompiledTestSuite(filepath.Join(tmpDir, "precompiled-dir", "nonexecutable.test"))
- Ω(suite).Should(BeZero())
- Ω(err).Should(HaveOccurred())
- })
- })
-
- Context("if pointed at a nonexisting file", func() {
- It("should error", func() {
- suite, err := PrecompiledTestSuite(filepath.Join(tmpDir, "precompiled-dir", "nope-nothing-to-see-here"))
- Ω(suite).Should(BeZero())
- Ω(err).Should(HaveOccurred())
- })
- })
- })
-
- Describe("scanning for suites in a directory", func() {
- Context("when there are no tests in the specified directory", func() {
- It("should come up empty", func() {
- suites := SuitesInDir(tmpDir, false)
- Ω(suites).Should(BeEmpty())
- })
- })
-
- Context("when there are ginkgo tests in the specified directory", func() {
- It("should return an appropriately configured suite", func() {
- suites := SuitesInDir(filepath.Join(tmpDir, "colonelmustard"), false)
- Ω(suites).Should(HaveLen(1))
-
- Ω(suites[0].Path).Should(Equal(relTmpDir + "/colonelmustard"))
- Ω(suites[0].PackageName).Should(Equal("colonelmustard"))
- Ω(suites[0].IsGinkgo).Should(BeTrue())
- Ω(suites[0].Precompiled).Should(BeFalse())
- })
- })
-
- Context("when there are non-ginkgo tests in the specified directory", func() {
- It("should return an appropriately configured suite", func() {
- suites := SuitesInDir(filepath.Join(tmpDir, "professorplum"), false)
- Ω(suites).Should(HaveLen(1))
-
- Ω(suites[0].Path).Should(Equal(relTmpDir + "/professorplum"))
- Ω(suites[0].PackageName).Should(Equal("professorplum"))
- Ω(suites[0].IsGinkgo).Should(BeFalse())
- Ω(suites[0].Precompiled).Should(BeFalse())
- })
- })
-
- Context("given GO15VENDOREXPERIMENT", func() {
- BeforeEach(func() {
- os.Setenv("GO15VENDOREXPERIMENT", "1")
- })
-
- AfterEach(func() {
- os.Setenv("GO15VENDOREXPERIMENT", "")
- })
-
- It("should skip vendor dirs", func() {
- suites := SuitesInDir(filepath.Join(tmpDir+"/vendor"), false)
- Ω(suites).Should(HaveLen(0))
- })
-
- It("should not recurse into vendor dirs", func() {
- suites := SuitesInDir(filepath.Join(tmpDir), true)
- Ω(suites).Should(HaveLen(3))
- })
- })
-
- Context("when recursively scanning", func() {
- It("should return suites for corresponding test suites, only", func() {
- suites := SuitesInDir(tmpDir, true)
- Ω(suites).Should(HaveLen(4))
-
- Ω(suites).Should(ContainElement(TestSuite{
- Path: relTmpDir + "/colonelmustard",
- PackageName: "colonelmustard",
- IsGinkgo: true,
- Precompiled: false,
- }))
- Ω(suites).Should(ContainElement(TestSuite{
- Path: relTmpDir + "/professorplum",
- PackageName: "professorplum",
- IsGinkgo: false,
- Precompiled: false,
- }))
- Ω(suites).Should(ContainElement(TestSuite{
- Path: relTmpDir + "/colonelmustard/library",
- PackageName: "library",
- IsGinkgo: true,
- Precompiled: false,
- }))
- })
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
index 5aa96b4d9..a6b96d88f 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
@@ -457,13 +457,13 @@ func FMeasure(text string, body interface{}, samples int) bool {
return true
}
-//You can mark Maeasurements as pending using PMeasure
+//You can mark Measurements as pending using PMeasure
func PMeasure(text string, _ ...interface{}) bool {
globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
return true
}
-//You can mark Maeasurements as pending using XMeasure
+//You can mark Measurements as pending using XMeasure
func XMeasure(text string, _ ...interface{}) bool {
globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
return true
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/first_package/coverage.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/first_package/coverage.go
deleted file mode 100644
index 10c1c1bd1..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/first_package/coverage.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package first_package
-
-func A() string {
- return "A"
-}
-
-func B() string {
- return "B"
-}
-
-func C() string {
- return "C"
-}
-
-func D() string {
- return "D"
-}
-
-func E() string {
- return "untested"
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/first_package/coverage_fixture_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/first_package/coverage_fixture_suite_test.go
deleted file mode 100644
index 4e0976cd5..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/first_package/coverage_fixture_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package first_package_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestCoverageFixture(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "CombinedFixture First Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/first_package/coverage_fixture_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/first_package/coverage_fixture_test.go
deleted file mode 100644
index dfe3e1127..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/first_package/coverage_fixture_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package first_package_test
-
-import (
- . "github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/first_package"
- . "github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/first_package/external_coverage_fixture"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("CoverageFixture", func() {
- It("should test A", func() {
- Ω(A()).Should(Equal("A"))
- })
-
- It("should test B", func() {
- Ω(B()).Should(Equal("B"))
- })
-
- It("should test C", func() {
- Ω(C()).Should(Equal("C"))
- })
-
- It("should test D", func() {
- Ω(D()).Should(Equal("D"))
- })
-
- It("should test external package", func() {
- Ω(Tested()).Should(Equal("tested"))
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/first_package/external_coverage_fixture/external_coverage.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/first_package/external_coverage_fixture/external_coverage.go
deleted file mode 100644
index 5280d4ddf..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/first_package/external_coverage_fixture/external_coverage.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package external_coverage
-
-func Tested() string {
- return "tested"
-}
-
-func Untested() string {
- return "untested"
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/second_package/coverage.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/second_package/coverage.go
deleted file mode 100644
index 52160989b..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/second_package/coverage.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package second_package
-
-func A() string {
- return "A"
-}
-
-func B() string {
- return "B"
-}
-
-func C() string {
- return "C"
-}
-
-func D() string {
- return "D"
-}
-
-func E() string {
- return "E"
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/second_package/coverage_fixture_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/second_package/coverage_fixture_suite_test.go
deleted file mode 100644
index 583a0af20..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/second_package/coverage_fixture_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package second_package_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestCoverageFixture(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "CombinedFixture Second Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/second_package/coverage_fixture_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/second_package/coverage_fixture_test.go
deleted file mode 100644
index 2692bec9b..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/second_package/coverage_fixture_test.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package second_package_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/second_package"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("CoverageFixture", func() {
- It("should test A", func() {
- Ω(A()).Should(Equal("A"))
- })
-
- It("should test B", func() {
- Ω(B()).Should(Equal("B"))
- })
-
- It("should test C", func() {
- Ω(C()).Should(Equal("C"))
- })
-
- It("should test D", func() {
- Ω(D()).Should(Equal("D"))
- })
-
- It("should test E", func() {
- Ω(E()).Should(Equal("E"))
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/extra_functions_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/extra_functions_test.go
deleted file mode 100644
index ccb3669a5..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/extra_functions_test.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package tmp
-
-import (
- "testing"
-)
-
-func TestSomethingLessImportant(t *testing.T) {
- strp := "hello!"
- somethingImportant(t, &strp)
-}
-
-func somethingImportant(t *testing.T, message *string) {
- t.Log("Something important happened in a test: " + *message)
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/nested/nested_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/nested/nested_test.go
deleted file mode 100644
index cde42e470..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/nested/nested_test.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package nested
-
-import (
- "testing"
-)
-
-func TestSomethingLessImportant(t *testing.T) {
- whatever := &UselessStruct{}
- t.Fail(whatever.ImportantField != "SECRET_PASSWORD")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/nested_without_gofiles/subpackage/nested_subpackage_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/nested_without_gofiles/subpackage/nested_subpackage_test.go
deleted file mode 100644
index 7cdd326c5..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/nested_without_gofiles/subpackage/nested_subpackage_test.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package subpackage
-
-import (
- "testing"
-)
-
-func TestNestedSubPackages(t *testing.T) {
- t.Fail(true)
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/outside_package_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/outside_package_test.go
deleted file mode 100644
index a682eeaff..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/outside_package_test.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package tmp_test
-
-import (
- "testing"
-)
-
-type UselessStruct struct {
- ImportantField string
-}
-
-func TestSomethingImportant(t *testing.T) {
- whatever := &UselessStruct{}
- if whatever.ImportantField != "SECRET_PASSWORD" {
- t.Fail()
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/xunit_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/xunit_test.go
deleted file mode 100644
index 049829a7d..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/xunit_test.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package tmp
-
-import (
- "testing"
-)
-
-type UselessStruct struct {
- ImportantField string
- T *testing.T
-}
-
-var testFunc = func(t *testing.T, arg *string) {}
-
-func assertEqual(t *testing.T, arg1, arg2 interface{}) {
- if arg1 != arg2 {
- t.Fail()
- }
-}
-
-func TestSomethingImportant(t *testing.T) {
- whatever := &UselessStruct{
- T: t,
- ImportantField: "SECRET_PASSWORD",
- }
- something := &UselessStruct{ImportantField: "string value"}
- assertEqual(t, whatever.ImportantField, "SECRET_PASSWORD")
- assertEqual(t, something.ImportantField, "string value")
-
- var foo = func(t *testing.T) {}
- foo(t)
-
- strp := "something"
- testFunc(t, &strp)
- t.Fail()
-}
-
-func Test3Things(t *testing.T) {
- if 3 != 3 {
- t.Fail()
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/extra_functions_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/extra_functions_test.go
deleted file mode 100644
index 1c2c56cea..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/extra_functions_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package tmp
-
-import (
- . "github.com/onsi/ginkgo"
-)
-
-var _ = Describe("Testing with Ginkgo", func() {
- It("something less important", func() {
-
- strp := "hello!"
- somethingImportant(GinkgoT(), &strp)
- })
-})
-
-func somethingImportant(t GinkgoTInterface, message *string) {
- t.Log("Something important happened in a test: " + *message)
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/fixtures_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/fixtures_suite_test.go
deleted file mode 100644
index a9a404b5c..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/fixtures_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package tmp
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestTmp(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Tmp Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_subpackage_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_subpackage_test.go
deleted file mode 100644
index 3653eae82..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_subpackage_test.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package subpackage
-
-import (
- . "github.com/onsi/ginkgo"
-)
-
-var _ = Describe("Testing with Ginkgo", func() {
- It("nested sub packages", func() {
- GinkgoT().Fail(true)
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_suite_test.go
deleted file mode 100644
index 721d0f2c3..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package nested_test
-
-import (
- "testing"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-func TestNested(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Nested Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_test.go
deleted file mode 100644
index 47364b814..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package nested
-
-import (
- . "github.com/onsi/ginkgo"
-)
-
-var _ = Describe("Testing with Ginkgo", func() {
- It("something less important", func() {
-
- whatever := &UselessStruct{}
- GinkgoT().Fail(whatever.ImportantField != "SECRET_PASSWORD")
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/outside_package_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/outside_package_test.go
deleted file mode 100644
index 1f2e332c4..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/outside_package_test.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package tmp_test
-
-import (
- . "github.com/onsi/ginkgo"
-)
-
-var _ = Describe("Testing with Ginkgo", func() {
- It("something important", func() {
-
- whatever := &UselessStruct{}
- if whatever.ImportantField != "SECRET_PASSWORD" {
- GinkgoT().Fail()
- }
- })
-})
-
-type UselessStruct struct {
- ImportantField string
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/suite_test.go
deleted file mode 100644
index 9ea229135..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package tmp_test
-
-import (
- "testing"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-func TestConvertFixtures(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "ConvertFixtures Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/xunit_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/xunit_test.go
deleted file mode 100644
index dbe3b419d..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/xunit_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package tmp
-
-import (
- . "github.com/onsi/ginkgo"
-)
-
-var _ = Describe("Testing with Ginkgo", func() {
- It("something important", func() {
-
- whatever := &UselessStruct{
- T: GinkgoT(),
- ImportantField: "SECRET_PASSWORD",
- }
- something := &UselessStruct{ImportantField: "string value"}
- assertEqual(GinkgoT(), whatever.ImportantField, "SECRET_PASSWORD")
- assertEqual(GinkgoT(), something.ImportantField, "string value")
-
- var foo = func(t GinkgoTInterface) {}
- foo(GinkgoT())
-
- strp := "something"
- testFunc(GinkgoT(), &strp)
- GinkgoT().Fail()
- })
- It("3 things", func() {
-
- if 3 != 3 {
- GinkgoT().Fail()
- }
- })
-})
-
-type UselessStruct struct {
- ImportantField string
- T GinkgoTInterface
-}
-
-var testFunc = func(t GinkgoTInterface, arg *string) {}
-
-func assertEqual(t GinkgoTInterface, arg1, arg2 interface{}) {
- if arg1 != arg2 {
- t.Fail()
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage.go
deleted file mode 100644
index e4d7e43b1..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package coverage_fixture
-
-import (
- _ "github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture"
-)
-
-func A() string {
- return "A"
-}
-
-func B() string {
- return "B"
-}
-
-func C() string {
- return "C"
-}
-
-func D() string {
- return "D"
-}
-
-func E() string {
- return "untested"
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage_fixture_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage_fixture_suite_test.go
deleted file mode 100644
index 2831bf7d2..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage_fixture_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package coverage_fixture_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestCoverageFixture(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "CoverageFixture Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage_fixture_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage_fixture_test.go
deleted file mode 100644
index 12a72dce8..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage_fixture_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package coverage_fixture_test
-
-import (
- . "github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture"
- . "github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("CoverageFixture", func() {
- It("should test A", func() {
- Ω(A()).Should(Equal("A"))
- })
-
- It("should test B", func() {
- Ω(B()).Should(Equal("B"))
- })
-
- It("should test C", func() {
- Ω(C()).Should(Equal("C"))
- })
-
- It("should test D", func() {
- Ω(D()).Should(Equal("D"))
- })
-
- It("should test external package", func() {
- Ω(Tested()).Should(Equal("tested"))
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture/external_coverage.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture/external_coverage.go
deleted file mode 100644
index 5280d4ddf..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture/external_coverage.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package external_coverage
-
-func Tested() string {
- return "tested"
-}
-
-func Untested() string {
- return "untested"
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/debug_parallel_fixture/debug_parallel_fixture_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/debug_parallel_fixture/debug_parallel_fixture_suite_test.go
deleted file mode 100644
index 429aebc5f..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/debug_parallel_fixture/debug_parallel_fixture_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package debug_parallel_fixture_test
-
-import (
- "testing"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-func TestDebugParallelFixture(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "DebugParallelFixture Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/debug_parallel_fixture/debug_parallel_fixture_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/debug_parallel_fixture/debug_parallel_fixture_test.go
deleted file mode 100644
index b609a8bca..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/debug_parallel_fixture/debug_parallel_fixture_test.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package debug_parallel_fixture_test
-
-import (
- "fmt"
- "time"
-
- . "github.com/onsi/ginkgo"
-)
-
-var _ = Describe("DebugParallelFixture", func() {
- It("emits output to a file", func() {
- for i := 0; i < 10; i += 1 {
- fmt.Printf("StdOut %d\n", i)
- GinkgoWriter.Write([]byte(fmt.Sprintf("GinkgoWriter %d\n", i)))
- }
- time.Sleep(time.Second)
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/does_not_compile/does_not_compile_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/does_not_compile/does_not_compile_suite_test.go
deleted file mode 100644
index 01e792696..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/does_not_compile/does_not_compile_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package does_not_compile_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestDoes_not_compile(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Does_not_compile Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/does_not_compile/does_not_compile_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/does_not_compile/does_not_compile_test.go
deleted file mode 100644
index e4f22b3cc..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/does_not_compile/does_not_compile_test.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package does_not_compile_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/integration/_fixtures/does_not_compile"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("DoesNotCompile", func() {
-
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/eventually_failing/eventually_failing_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/eventually_failing/eventually_failing_suite_test.go
deleted file mode 100644
index 97fa2e775..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/eventually_failing/eventually_failing_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package eventually_failing_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestEventuallyFailing(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "EventuallyFailing Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/eventually_failing/eventually_failing_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/eventually_failing/eventually_failing_test.go
deleted file mode 100644
index 6c83b4258..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/eventually_failing/eventually_failing_test.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package eventually_failing_test
-
-import (
- "fmt"
- "io/ioutil"
- "strings"
- "time"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("EventuallyFailing", func() {
- It("should fail on the third try", func() {
- time.Sleep(time.Second)
- files, err := ioutil.ReadDir(".")
- Ω(err).ShouldNot(HaveOccurred())
-
- numRuns := 1
- for _, file := range files {
- if strings.HasPrefix(file.Name(), "counter") {
- numRuns++
- }
- }
-
- Ω(numRuns).Should(BeNumerically("<", 3))
- ioutil.WriteFile(fmt.Sprintf("./counter-%d", numRuns), []byte("foo"), 0777)
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/exiting_synchronized_setup_tests/exiting_synchronized_setup_tests_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/exiting_synchronized_setup_tests/exiting_synchronized_setup_tests_suite_test.go
deleted file mode 100644
index 045ca7c66..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/exiting_synchronized_setup_tests/exiting_synchronized_setup_tests_suite_test.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package synchronized_setup_tests_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "fmt"
- "os"
- "testing"
-)
-
-func TestSynchronized_setup_tests(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Synchronized_setup_tests Suite")
-}
-
-var beforeData string
-
-var _ = SynchronizedBeforeSuite(func() []byte {
- fmt.Printf("BEFORE_A_%d\n", GinkgoParallelNode())
- os.Exit(1)
- return []byte("WHAT EVZ")
-}, func(data []byte) {
- println("NEVER SEE THIS")
-})
-
-var _ = Describe("Synchronized Setup", func() {
- It("should do nothing", func() {
- Ω(true).Should(BeTrue())
- })
-
- It("should do nothing", func() {
- Ω(true).Should(BeTrue())
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/fail_fixture/fail_fixture_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/fail_fixture/fail_fixture_suite_test.go
deleted file mode 100644
index 6e822643a..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/fail_fixture/fail_fixture_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package fail_fixture_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestFail_fixture(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Fail_fixture Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/fail_fixture/fail_fixture_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/fail_fixture/fail_fixture_test.go
deleted file mode 100644
index ea6f71ca9..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/fail_fixture/fail_fixture_test.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package fail_fixture_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = It("handles top level failures", func() {
- Ω("a top level failure on line 9").Should(Equal("nope"))
- println("NEVER SEE THIS")
-})
-
-var _ = It("handles async top level failures", func(done Done) {
- Fail("an async top level failure on line 14")
- println("NEVER SEE THIS")
-}, 0.1)
-
-var _ = It("FAIL in a goroutine", func(done Done) {
- go func() {
- defer GinkgoRecover()
- Fail("a top level goroutine failure on line 21")
- println("NEVER SEE THIS")
- }()
-}, 0.1)
-
-var _ = Describe("Excercising different failure modes", func() {
- It("synchronous failures", func() {
- Ω("a sync failure").Should(Equal("nope"))
- println("NEVER SEE THIS")
- })
-
- It("synchronous panics", func() {
- panic("a sync panic")
- println("NEVER SEE THIS")
- })
-
- It("synchronous failures with FAIL", func() {
- Fail("a sync FAIL failure")
- println("NEVER SEE THIS")
- })
-
- It("async timeout", func(done Done) {
- Ω(true).Should(BeTrue())
- }, 0.1)
-
- It("async failure", func(done Done) {
- Ω("an async failure").Should(Equal("nope"))
- println("NEVER SEE THIS")
- }, 0.1)
-
- It("async panic", func(done Done) {
- panic("an async panic")
- println("NEVER SEE THIS")
- }, 0.1)
-
- It("async failure with FAIL", func(done Done) {
- Fail("an async FAIL failure")
- println("NEVER SEE THIS")
- }, 0.1)
-
- It("FAIL in a goroutine", func(done Done) {
- go func() {
- defer GinkgoRecover()
- Fail("a goroutine FAIL failure")
- println("NEVER SEE THIS")
- }()
- }, 0.1)
-
- It("Gomega in a goroutine", func(done Done) {
- go func() {
- defer GinkgoRecover()
- Ω("a goroutine failure").Should(Equal("nope"))
- println("NEVER SEE THIS")
- }()
- }, 0.1)
-
- It("Panic in a goroutine", func(done Done) {
- go func() {
- defer GinkgoRecover()
- panic("a goroutine panic")
- println("NEVER SEE THIS")
- }()
- }, 0.1)
-
- Measure("a FAIL measure", func(Benchmarker) {
- Fail("a measure FAIL failure")
- println("NEVER SEE THIS")
- }, 1)
-
- Measure("a gomega failed measure", func(Benchmarker) {
- Ω("a measure failure").Should(Equal("nope"))
- println("NEVER SEE THIS")
- }, 1)
-
- Measure("a panicking measure", func(Benchmarker) {
- panic("a measure panic")
- println("NEVER SEE THIS")
- }, 1)
-})
-
-var _ = Specify("a top level specify", func() {
- Fail("fail the test")
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_after_suite/failing_after_suite_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_after_suite/failing_after_suite_suite_test.go
deleted file mode 100644
index 0e410aaea..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_after_suite/failing_after_suite_suite_test.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package failing_before_suite_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestFailingAfterSuite(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "FailingAfterSuite Suite")
-}
-
-var _ = BeforeSuite(func() {
- println("BEFORE SUITE")
-})
-
-var _ = AfterSuite(func() {
- println("AFTER SUITE")
- panic("BAM!")
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_after_suite/failing_after_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_after_suite/failing_after_suite_test.go
deleted file mode 100644
index 3902ec6c5..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_after_suite/failing_after_suite_test.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package failing_before_suite_test
-
-import (
- . "github.com/onsi/ginkgo"
-)
-
-var _ = Describe("FailingBeforeSuite", func() {
- It("should run", func() {
- println("A TEST")
- })
-
- It("should run", func() {
- println("A TEST")
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_before_suite/failing_before_suite_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_before_suite/failing_before_suite_suite_test.go
deleted file mode 100644
index 109ea3608..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_before_suite/failing_before_suite_suite_test.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package failing_before_suite_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestFailing_before_suite(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Failing_before_suite Suite")
-}
-
-var _ = BeforeSuite(func() {
- println("BEFORE SUITE")
- panic("BAM!")
-})
-
-var _ = AfterSuite(func() {
- println("AFTER SUITE")
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_before_suite/failing_before_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_before_suite/failing_before_suite_test.go
deleted file mode 100644
index e8697c64a..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_before_suite/failing_before_suite_test.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package failing_before_suite_test
-
-import (
- . "github.com/onsi/ginkgo"
-)
-
-var _ = Describe("FailingBeforeSuite", func() {
- It("should never run", func() {
- println("NEVER SEE THIS")
- })
-
- It("should never run", func() {
- println("NEVER SEE THIS")
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests.go
deleted file mode 100644
index e32cd619e..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package failing_ginkgo_tests
-
-func AlwaysFalse() bool {
- return false
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests_suite_test.go
deleted file mode 100644
index 49939bda5..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package failing_ginkgo_tests_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestFailing_ginkgo_tests(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Failing_ginkgo_tests Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests_test.go
deleted file mode 100644
index d9c01e32c..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package failing_ginkgo_tests_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("FailingGinkgoTests", func() {
- It("should fail", func() {
- Ω(AlwaysFalse()).Should(BeTrue())
- })
-
- It("should pass", func() {
- Ω(AlwaysFalse()).Should(BeFalse())
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags.go
deleted file mode 100644
index a440abdaa..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package flags
-
-func Tested() string {
- return "tested"
-}
-
-func Untested() string {
- return "untested"
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags_suite_test.go
deleted file mode 100644
index 0b3071f62..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package flags_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestFlags(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Flags Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags_test.go
deleted file mode 100644
index 27dadf19c..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package flags_test
-
-import (
- "flag"
- "fmt"
- remapped "math"
- _ "math/cmplx"
- "time"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/integration/_fixtures/flags_tests"
- . "github.com/onsi/gomega"
-)
-
-var customFlag string
-
-func init() {
- flag.StringVar(&customFlag, "customFlag", "default", "custom flag!")
-}
-
-var _ = Describe("Testing various flags", func() {
- FDescribe("the focused set", func() {
- Measure("a measurement", func(b Benchmarker) {
- b.RecordValue("a value", 3)
- }, 3)
-
- It("should honor -cover", func() {
- Ω(Tested()).Should(Equal("tested"))
- })
-
- It("should allow gcflags", func() {
- fmt.Printf("NaN returns %T\n", remapped.NaN())
- })
-
- PIt("should honor -failOnPending and -noisyPendings")
-
- Describe("smores", func() {
- It("should honor -skip: marshmallow", func() {
- println("marshmallow")
- })
-
- It("should honor -focus: chocolate", func() {
- println("chocolate")
- })
- })
-
- It("should detect races", func(done Done) {
- var a string
- go func() {
- a = "now you don't"
- close(done)
- }()
- a = "now you see me"
- println(a)
- })
-
- It("should randomize A", func() {
- println("RANDOM_A")
- })
-
- It("should randomize B", func() {
- println("RANDOM_B")
- })
-
- It("should randomize C", func() {
- println("RANDOM_C")
- })
-
- It("should honor -slowSpecThreshold", func() {
- time.Sleep(100 * time.Millisecond)
- })
-
- It("should pass in additional arguments after '--' directly to the test process", func() {
- fmt.Printf("CUSTOM_FLAG: %s", customFlag)
- })
- })
-
- Describe("more smores", func() {
- It("should not run these unless -focus is set", func() {
- println("smores")
- })
- })
-
- Describe("a failing test", func() {
- It("should fail", func() {
- Ω(true).Should(Equal(false))
- })
- })
-
- Describe("a flaky test", func() {
- runs := 0
- It("should only pass the second time it's run", func() {
- runs++
- Ω(runs).Should(BeNumerically("==", 2))
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture/README.md b/vendor/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture/README.md
deleted file mode 100644
index 2b501a25d..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture/README.md
+++ /dev/null
@@ -1 +0,0 @@
-This file should remain the same, regardless the fact that contains FIt, FDescribe, or FWhen.
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture/focused_fixture_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture/focused_fixture_suite_test.go
deleted file mode 100644
index 92d0c6e48..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture/focused_fixture_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package focused_fixture_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestFocused_fixture(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Focused_fixture Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture/focused_fixture_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture/focused_fixture_test.go
deleted file mode 100644
index ea500eaf0..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture/focused_fixture_test.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package focused_fixture_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/extensions/table"
-)
-
-var _ = Describe("FocusedFixture", func() {
- FDescribe("focused", func() {
- It("focused", func() {
-
- })
- })
-
- FContext("focused", func() {
- It("focused", func() {
-
- })
- })
-
- FWhen("focused", func() {
- It("focused", func() {
-
- })
- })
-
- FIt("focused", func() {
-
- })
-
- FSpecify("focused", func() {
-
- })
-
- FMeasure("focused", func(b Benchmarker) {
-
- }, 2)
-
- FDescribeTable("focused",
- func() {},
- Entry("focused"),
- )
-
- DescribeTable("focused",
- func() {},
- FEntry("focused"),
- )
-
- Describe("not focused", func() {
- It("not focused", func() {
-
- })
- })
-
- Context("not focused", func() {
- It("not focused", func() {
-
- })
- })
-
- It("not focused", func() {
-
- })
-
- Measure("not focused", func(b Benchmarker) {
-
- }, 2)
-
- DescribeTable("not focused",
- func() {},
- Entry("not focused"),
- )
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture_with_vendor/focused_fixture_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture_with_vendor/focused_fixture_suite_test.go
deleted file mode 100644
index 92d0c6e48..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture_with_vendor/focused_fixture_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package focused_fixture_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestFocused_fixture(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Focused_fixture Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture_with_vendor/focused_fixture_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture_with_vendor/focused_fixture_test.go
deleted file mode 100644
index ea500eaf0..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture_with_vendor/focused_fixture_test.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package focused_fixture_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/extensions/table"
-)
-
-var _ = Describe("FocusedFixture", func() {
- FDescribe("focused", func() {
- It("focused", func() {
-
- })
- })
-
- FContext("focused", func() {
- It("focused", func() {
-
- })
- })
-
- FWhen("focused", func() {
- It("focused", func() {
-
- })
- })
-
- FIt("focused", func() {
-
- })
-
- FSpecify("focused", func() {
-
- })
-
- FMeasure("focused", func(b Benchmarker) {
-
- }, 2)
-
- FDescribeTable("focused",
- func() {},
- Entry("focused"),
- )
-
- DescribeTable("focused",
- func() {},
- FEntry("focused"),
- )
-
- Describe("not focused", func() {
- It("not focused", func() {
-
- })
- })
-
- Context("not focused", func() {
- It("not focused", func() {
-
- })
- })
-
- It("not focused", func() {
-
- })
-
- Measure("not focused", func(b Benchmarker) {
-
- }, 2)
-
- DescribeTable("not focused",
- func() {},
- Entry("not focused"),
- )
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/hanging_suite/hanging_suite_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/hanging_suite/hanging_suite_suite_test.go
deleted file mode 100644
index e8dd54b52..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/hanging_suite/hanging_suite_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package hanging_suite_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestHangingSuite(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "HangingSuite Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/hanging_suite/hanging_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/hanging_suite/hanging_suite_test.go
deleted file mode 100644
index 6a5a070e1..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/hanging_suite/hanging_suite_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package hanging_suite_test
-
-import (
- "fmt"
- "time"
-
- . "github.com/onsi/ginkgo"
-)
-
-var _ = AfterSuite(func() {
- fmt.Println("Heading Out After Suite")
-})
-
-var _ = Describe("HangingSuite", func() {
- BeforeEach(func() {
- fmt.Fprintln(GinkgoWriter, "Just beginning")
- })
-
- Context("inner context", func() {
- BeforeEach(func() {
- fmt.Fprintln(GinkgoWriter, "Almost there...")
- })
-
- It("should hang out for a while", func() {
- fmt.Fprintln(GinkgoWriter, "Hanging Out")
- fmt.Println("Sleeping...")
- time.Sleep(time.Hour)
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests.go
deleted file mode 100644
index ca12c0d93..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package more_ginkgo_tests
-
-func AlwaysTrue() bool {
- return true
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests_suite_test.go
deleted file mode 100644
index 1e15c8857..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package more_ginkgo_tests_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestMore_ginkgo_tests(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "More_ginkgo_tests Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests_test.go
deleted file mode 100644
index 0549f62fb..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package more_ginkgo_tests_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("MoreGinkgoTests", func() {
- It("should pass", func() {
- Ω(AlwaysTrue()).Should(BeTrue())
- })
-
- It("should always pass", func() {
- Ω(AlwaysTrue()).Should(BeTrue())
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/no_test_fn/no_test_fn.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/no_test_fn/no_test_fn.go
deleted file mode 100644
index bdf1b54b5..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/no_test_fn/no_test_fn.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package no_test_fn
-
-func StringIdentity(a string) string {
- return a
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/no_test_fn/no_test_fn_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/no_test_fn/no_test_fn_test.go
deleted file mode 100644
index 6c38b1e43..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/no_test_fn/no_test_fn_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package no_test_fn_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/integration/_fixtures/no_test_fn"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("NoTestFn", func() {
- It("should proxy strings", func() {
- Ω(StringIdentity("foo")).Should(Equal("foo"))
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/no_tests/no_tests.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/no_tests/no_tests.go
deleted file mode 100644
index da29a2cad..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/no_tests/no_tests.go
+++ /dev/null
@@ -1,4 +0,0 @@
-package main
-
-func main() {
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests.go
deleted file mode 100644
index b710dd129..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package passing_ginkgo_tests
-
-func StringIdentity(a string) string {
- return a
-}
-
-func IntegerIdentity(a int) int {
- return a
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests_suite_test.go
deleted file mode 100644
index 31a3f7d0c..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package passing_ginkgo_tests_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestPassing_ginkgo_tests(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Passing_ginkgo_tests Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests_test.go
deleted file mode 100644
index a5822fdd7..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package passing_ginkgo_tests_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("PassingGinkgoTests", func() {
- It("should proxy strings", func() {
- Ω(StringIdentity("foo")).Should(Equal("foo"))
- })
-
- It("should proxy integers", func() {
- Ω(IntegerIdentity(3)).Should(Equal(3))
- })
-
- It("should do it again", func() {
- Ω(StringIdentity("foo")).Should(Equal("foo"))
- Ω(IntegerIdentity(3)).Should(Equal(3))
- })
-
- It("should be able to run Bys", func() {
- By("emitting one By")
- Ω(3).Should(Equal(3))
-
- By("emitting another By")
- Ω(4).Should(Equal(4))
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/passing_suite_setup/passing_suite_setup_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/passing_suite_setup/passing_suite_setup_suite_test.go
deleted file mode 100644
index 86c9aa2ab..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/passing_suite_setup/passing_suite_setup_suite_test.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package passing_before_suite_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestPassingSuiteSetup(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "PassingSuiteSetup Suite")
-}
-
-var a string
-var b string
-
-var _ = BeforeSuite(func() {
- a = "ran before suite"
- println("BEFORE SUITE")
-})
-
-var _ = AfterSuite(func() {
- b = "ran after suite"
- println("AFTER SUITE")
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/passing_suite_setup/passing_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/passing_suite_setup/passing_suite_test.go
deleted file mode 100644
index f139e1d22..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/passing_suite_setup/passing_suite_test.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package passing_before_suite_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("PassingSuiteSetup", func() {
- It("should pass", func() {
- Ω(a).Should(Equal("ran before suite"))
- Ω(b).Should(BeEmpty())
- })
-
- It("should pass", func() {
- Ω(a).Should(Equal("ran before suite"))
- Ω(b).Should(BeEmpty())
- })
-
- It("should pass", func() {
- Ω(a).Should(Equal("ran before suite"))
- Ω(b).Should(BeEmpty())
- })
-
- It("should pass", func() {
- Ω(a).Should(Equal("ran before suite"))
- Ω(b).Should(BeEmpty())
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/progress_fixture/progress_fixture_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/progress_fixture/progress_fixture_suite_test.go
deleted file mode 100644
index 74262bbc1..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/progress_fixture/progress_fixture_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package progress_fixture_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestProgressFixture(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "ProgressFixture Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/progress_fixture/progress_fixture_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/progress_fixture/progress_fixture_test.go
deleted file mode 100644
index b7f26c25b..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/progress_fixture/progress_fixture_test.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package progress_fixture_test
-
-import (
- "fmt"
-
- . "github.com/onsi/ginkgo"
-)
-
-var _ = Describe("ProgressFixture", func() {
- BeforeEach(func() {
- fmt.Fprintln(GinkgoWriter, ">outer before<")
- })
-
- JustBeforeEach(func() {
- fmt.Fprintln(GinkgoWriter, ">outer just before<")
- })
-
- AfterEach(func() {
- fmt.Fprintln(GinkgoWriter, ">outer after<")
- })
-
- Context("Inner Context", func() {
- BeforeEach(func() {
- fmt.Fprintln(GinkgoWriter, ">inner before<")
- })
-
- JustBeforeEach(func() {
- fmt.Fprintln(GinkgoWriter, ">inner just before<")
- })
-
- AfterEach(func() {
- fmt.Fprintln(GinkgoWriter, ">inner after<")
- })
-
- When("Inner When", func() {
- BeforeEach(func() {
- fmt.Fprintln(GinkgoWriter, ">inner before<")
- })
-
- It("should emit progress as it goes", func() {
- fmt.Fprintln(GinkgoWriter, ">it<")
- })
- })
- })
-
- Specify("should emit progress as it goes", func() {
- fmt.Fprintln(GinkgoWriter, ">specify<")
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/skip_fixture/skip_fixture_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/skip_fixture/skip_fixture_suite_test.go
deleted file mode 100644
index b2028cf55..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/skip_fixture/skip_fixture_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package fail_fixture_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestFail_fixture(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Skip_fixture Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/skip_fixture/skip_fixture_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/skip_fixture/skip_fixture_test.go
deleted file mode 100644
index e406aeb46..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/skip_fixture/skip_fixture_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package fail_fixture_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = It("handles top level skips", func() {
- Skip("a top level skip on line 9")
- println("NEVER SEE THIS")
-})
-
-var _ = It("handles async top level skips", func(done Done) {
- Skip("an async top level skip on line 14")
- println("NEVER SEE THIS")
-}, 0.1)
-
-var _ = It("SKIP in a goroutine", func(done Done) {
- go func() {
- defer GinkgoRecover()
- Skip("a top level goroutine skip on line 21")
- println("NEVER SEE THIS")
- }()
-}, 0.1)
-
-var _ = Describe("Excercising different skip modes", func() {
- It("synchronous skip", func() {
- Skip("a sync SKIP")
- println("NEVER SEE THIS")
- })
-
- It("async skip", func(done Done) {
- Skip("an async SKIP")
- println("NEVER SEE THIS")
- }, 0.1)
-
- It("SKIP in a goroutine", func(done Done) {
- go func() {
- defer GinkgoRecover()
- Skip("a goroutine SKIP")
- println("NEVER SEE THIS")
- }()
- }, 0.1)
-
- Measure("a SKIP measure", func(Benchmarker) {
- Skip("a measure SKIP")
- println("NEVER SEE THIS")
- }, 1)
-})
-
-var _ = Describe("SKIP in a BeforeEach", func() {
- BeforeEach(func() {
- Skip("a BeforeEach SKIP")
- println("NEVER SEE THIS")
- })
-
- It("a SKIP BeforeEach", func() {
- println("NEVER SEE THIS")
- })
-})
-
-var _ = Describe("SKIP in an AfterEach", func() {
- AfterEach(func() {
- Skip("an AfterEach SKIP")
- println("NEVER SEE THIS")
- })
-
- It("a SKIP AfterEach", func() {
- Expect(true).To(BeTrue())
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/suite_command_tests/suite_command.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/suite_command_tests/suite_command.go
deleted file mode 100644
index 1d6704881..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/suite_command_tests/suite_command.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package suite_command
-
-func Tested() string {
- return "tested"
-}
-
-func Untested() string {
- return "untested"
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/suite_command_tests/suite_command_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/suite_command_tests/suite_command_suite_test.go
deleted file mode 100644
index 7f76d8b8f..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/suite_command_tests/suite_command_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package suite_command_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestSuiteCommand(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Suite Command Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/suite_command_tests/suite_command_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/suite_command_tests/suite_command_test.go
deleted file mode 100644
index e083d27a2..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/suite_command_tests/suite_command_test.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package suite_command_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("Testing suite command", func() {
- It("it should succeed", func() {
- Ω(true).Should(Equal(true))
- })
-
- PIt("a failing test", func() {
- It("should fail", func() {
- Ω(true).Should(Equal(false))
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/synchronized_setup_tests/synchronized_setup_tests_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/synchronized_setup_tests/synchronized_setup_tests_suite_test.go
deleted file mode 100644
index b734854ee..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/synchronized_setup_tests/synchronized_setup_tests_suite_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package synchronized_setup_tests_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "fmt"
- "testing"
- "time"
-)
-
-func TestSynchronized_setup_tests(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Synchronized_setup_tests Suite")
-}
-
-var beforeData string
-
-var _ = SynchronizedBeforeSuite(func() []byte {
- fmt.Printf("BEFORE_A_%d\n", GinkgoParallelNode())
- time.Sleep(100 * time.Millisecond)
- return []byte("DATA")
-}, func(data []byte) {
- fmt.Printf("BEFORE_B_%d: %s\n", GinkgoParallelNode(), string(data))
- beforeData += string(data) + "OTHER"
-})
-
-var _ = SynchronizedAfterSuite(func() {
- fmt.Printf("\nAFTER_A_%d\n", GinkgoParallelNode())
- time.Sleep(100 * time.Millisecond)
-}, func() {
- fmt.Printf("AFTER_B_%d\n", GinkgoParallelNode())
-})
-
-var _ = Describe("Synchronized Setup", func() {
- It("should run the before suite once", func() {
- Ω(beforeData).Should(Equal("DATAOTHER"))
- })
-
- It("should run the before suite once", func() {
- Ω(beforeData).Should(Equal("DATAOTHER"))
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/ignored_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/ignored_test.go
deleted file mode 100644
index 517623536..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/ignored_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// +build complex_tests
-
-package tags_tests_test
-
-import (
- . "github.com/onsi/ginkgo"
-)
-
-var _ = Describe("Ignored", func() {
- It("should not have these tests", func() {
-
- })
-
- It("should not have these tests", func() {
-
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/tags_tests_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/tags_tests_suite_test.go
deleted file mode 100644
index dcb11bb1b..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/tags_tests_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package tags_tests_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestTagsTests(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "TagsTests Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/tags_tests_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/tags_tests_test.go
deleted file mode 100644
index b91a8923a..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/tags_tests_test.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package tags_tests_test
-
-import (
- . "github.com/onsi/ginkgo"
-)
-
-var _ = Describe("TagsTests", func() {
- It("should have a test", func() {
-
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/test_description/test_description_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/test_description/test_description_suite_test.go
deleted file mode 100644
index 8976370d3..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/test_description/test_description_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package test_description_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestTestDescription(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "TestDescription Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/test_description/test_description_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/test_description/test_description_test.go
deleted file mode 100644
index 53c2779ea..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/test_description/test_description_test.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package test_description_test
-
-import (
- "fmt"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("TestDescription", func() {
- It("should pass", func() {
- Ω(true).Should(BeTrue())
- })
-
- It("should fail", func() {
- Ω(true).Should(BeFalse())
- })
-
- AfterEach(func() {
- description := CurrentGinkgoTestDescription()
- fmt.Printf("%s:%t\n", description.FullTestText, description.Failed)
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A.go
deleted file mode 100644
index de2c6bbb7..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package A
-
-import "github.com/onsi/B"
-
-func DoIt() string {
- return B.DoIt()
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A_suite_test.go
deleted file mode 100644
index 1b6cff4c7..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package A_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestA(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "A Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A_test.go
deleted file mode 100644
index 003530aae..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A_test.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package A_test
-
-import (
- . "github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("A", func() {
- It("should do it", func() {
- Ω(DoIt()).Should(Equal("done!"))
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B.go
deleted file mode 100644
index 990bab365..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package B
-
-import "github.com/onsi/C"
-
-func DoIt() string {
- return C.DoIt()
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B_suite_test.go
deleted file mode 100644
index e54fce668..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package B_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestB(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "B Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B_test.go
deleted file mode 100644
index b147913c0..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B_test.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package B_test
-
-import (
- . "github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("B", func() {
- It("should do it", func() {
- Ω(DoIt()).Should(Equal("done!"))
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C.go
deleted file mode 100644
index 205b68886..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package C
-
-func DoIt() string {
- return "done!"
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C.json b/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C.json
deleted file mode 100644
index 421d025e0..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "fixture": "data"
-} \ No newline at end of file
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C_suite_test.go
deleted file mode 100644
index 57a7a96ba..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package C_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestC(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "C Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C_test.go
deleted file mode 100644
index 7703fefa3..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C_test.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package C_test
-
-import (
- . "github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("C", func() {
- It("should do it", func() {
- Ω(DoIt()).Should(Equal("done!"))
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D.go
deleted file mode 100644
index 4371b852f..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package D
-
-import "github.com/onsi/C"
-
-func DoIt() string {
- return C.DoIt()
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D_suite_test.go
deleted file mode 100644
index 0ebefe6b7..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package D_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestD(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "D Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D_test.go
deleted file mode 100644
index 097945bf9..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D_test.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package D_test
-
-import (
- . "github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("D", func() {
- It("should do it", func() {
- Ω(DoIt()).Should(Equal("done!"))
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/xunit_tests/xunit_tests.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/xunit_tests/xunit_tests.go
deleted file mode 100644
index cb8fc8bc2..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/xunit_tests/xunit_tests.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package xunit_tests
-
-func AlwaysTrue() bool {
- return true
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/xunit_tests/xunit_tests_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/xunit_tests/xunit_tests_test.go
deleted file mode 100644
index a6ebbe147..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/xunit_tests/xunit_tests_test.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package xunit_tests
-
-import (
- "testing"
-)
-
-func TestAlwaysTrue(t *testing.T) {
- if AlwaysTrue() != true {
- t.Errorf("Expected true, got false")
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/convert_test.go b/vendor/github.com/onsi/ginkgo/integration/convert_test.go
deleted file mode 100644
index f4fd678c5..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/convert_test.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package integration_test
-
-import (
- "io/ioutil"
- "os"
- "os/exec"
- "path/filepath"
- "strings"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("ginkgo convert", func() {
- var tmpDir string
-
- readConvertedFileNamed := func(pathComponents ...string) string {
- pathToFile := filepath.Join(tmpDir, "convert_fixtures", filepath.Join(pathComponents...))
- bytes, err := ioutil.ReadFile(pathToFile)
- ExpectWithOffset(1, err).NotTo(HaveOccurred())
-
- return string(bytes)
- }
-
- readGoldMasterNamed := func(filename string) string {
- bytes, err := ioutil.ReadFile(filepath.Join("_fixtures", "convert_goldmasters", filename))
- Ω(err).ShouldNot(HaveOccurred())
-
- return string(bytes)
- }
-
- BeforeEach(func() {
- var err error
-
- tmpDir, err = ioutil.TempDir("", "ginkgo-convert")
- Ω(err).ShouldNot(HaveOccurred())
-
- err = exec.Command("cp", "-r", filepath.Join("_fixtures", "convert_fixtures"), tmpDir).Run()
- Ω(err).ShouldNot(HaveOccurred())
- })
-
- JustBeforeEach(func() {
- cwd, err := os.Getwd()
- Ω(err).ShouldNot(HaveOccurred())
-
- relPath, err := filepath.Rel(cwd, filepath.Join(tmpDir, "convert_fixtures"))
- Ω(err).ShouldNot(HaveOccurred())
-
- cmd := exec.Command(pathToGinkgo, "convert", relPath)
- cmd.Env = os.Environ()
- for i, env := range cmd.Env {
- if strings.HasPrefix(env, "PATH") {
- cmd.Env[i] = cmd.Env[i] + ":" + filepath.Dir(pathToGinkgo)
- break
- }
- }
- err = cmd.Run()
- Ω(err).ShouldNot(HaveOccurred())
- })
-
- AfterEach(func() {
- err := os.RemoveAll(tmpDir)
- Ω(err).ShouldNot(HaveOccurred())
- })
-
- It("rewrites xunit tests as ginkgo tests", func() {
- convertedFile := readConvertedFileNamed("xunit_test.go")
- goldMaster := readGoldMasterNamed("xunit_test.go")
- Ω(convertedFile).Should(Equal(goldMaster))
- })
-
- It("rewrites all usages of *testing.T as mr.T()", func() {
- convertedFile := readConvertedFileNamed("extra_functions_test.go")
- goldMaster := readGoldMasterNamed("extra_functions_test.go")
- Ω(convertedFile).Should(Equal(goldMaster))
- })
-
- It("rewrites tests in the package dir that belong to other packages", func() {
- convertedFile := readConvertedFileNamed("outside_package_test.go")
- goldMaster := readGoldMasterNamed("outside_package_test.go")
- Ω(convertedFile).Should(Equal(goldMaster))
- })
-
- It("rewrites tests in nested packages", func() {
- convertedFile := readConvertedFileNamed("nested", "nested_test.go")
- goldMaster := readGoldMasterNamed("nested_test.go")
- Ω(convertedFile).Should(Equal(goldMaster))
- })
-
- Context("ginkgo test suite files", func() {
- It("creates a ginkgo test suite file for the package you specified", func() {
- testsuite := readConvertedFileNamed("convert_fixtures_suite_test.go")
- goldMaster := readGoldMasterNamed("suite_test.go")
- Ω(testsuite).Should(Equal(goldMaster))
- })
-
- It("converts go tests in deeply nested packages (some may not contain go files)", func() {
- testsuite := readConvertedFileNamed("nested_without_gofiles", "subpackage", "nested_subpackage_test.go")
- goldMaster := readGoldMasterNamed("nested_subpackage_test.go")
- Ω(testsuite).Should(Equal(goldMaster))
- })
-
- It("creates ginkgo test suites for all nested packages", func() {
- testsuite := readConvertedFileNamed("nested", "nested_suite_test.go")
- goldMaster := readGoldMasterNamed("nested_suite_test.go")
- Ω(testsuite).Should(Equal(goldMaster))
- })
- })
-
- Context("with an existing test suite file", func() {
- BeforeEach(func() {
- goldMaster := readGoldMasterNamed("fixtures_suite_test.go")
- err := ioutil.WriteFile(filepath.Join(tmpDir, "convert_fixtures", "tmp_suite_test.go"), []byte(goldMaster), 0600)
- Ω(err).ShouldNot(HaveOccurred())
- })
-
- It("gracefully handles existing test suite files", func() {
- //nothing should have gone wrong!
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/coverage_test.go b/vendor/github.com/onsi/ginkgo/integration/coverage_test.go
deleted file mode 100644
index a1d24bfed..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/coverage_test.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package integration_test
-
-import (
- "os/exec"
-
- "fmt"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- "github.com/onsi/gomega/gbytes"
- "github.com/onsi/gomega/gexec"
-)
-
-var _ = Describe("Coverage Specs", func() {
- Context("when it runs coverage analysis in series and in parallel", func() {
- AfterEach(func() {
- removeSuccessfully("./_fixtures/coverage_fixture/coverage_fixture.coverprofile")
- })
- It("works", func() {
- session := startGinkgo("./_fixtures/coverage_fixture", "-cover")
- Eventually(session).Should(gexec.Exit(0))
-
- Ω(session.Out).Should(gbytes.Say(("coverage: 80.0% of statements")))
-
- coverFile := "./_fixtures/coverage_fixture/coverage_fixture.coverprofile"
- serialCoverProfileOutput, err := exec.Command("go", "tool", "cover", fmt.Sprintf("-func=%s", coverFile)).CombinedOutput()
- Ω(err).ShouldNot(HaveOccurred())
-
- removeSuccessfully(coverFile)
-
- Eventually(startGinkgo("./_fixtures/coverage_fixture", "-cover", "-nodes=4")).Should(gexec.Exit(0))
-
- parallelCoverProfileOutput, err := exec.Command("go", "tool", "cover", fmt.Sprintf("-func=%s", coverFile)).CombinedOutput()
- Ω(err).ShouldNot(HaveOccurred())
-
- Ω(parallelCoverProfileOutput).Should(Equal(serialCoverProfileOutput))
-
- By("handling external packages", func() {
- session = startGinkgo("./_fixtures/coverage_fixture", "-coverpkg=github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture,github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture")
- Eventually(session).Should(gexec.Exit(0))
-
- Ω(session.Out).Should(gbytes.Say("coverage: 71.4% of statements in github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture, github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture"))
-
- serialCoverProfileOutput, err = exec.Command("go", "tool", "cover", fmt.Sprintf("-func=%s", coverFile)).CombinedOutput()
- Ω(err).ShouldNot(HaveOccurred())
-
- removeSuccessfully("./_fixtures/coverage_fixture/coverage_fixture.coverprofile")
-
- Eventually(startGinkgo("./_fixtures/coverage_fixture", "-coverpkg=github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture,github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture", "-nodes=4")).Should(gexec.Exit(0))
-
- parallelCoverProfileOutput, err = exec.Command("go", "tool", "cover", fmt.Sprintf("-func=%s", coverFile)).CombinedOutput()
- Ω(err).ShouldNot(HaveOccurred())
-
- Ω(parallelCoverProfileOutput).Should(Equal(serialCoverProfileOutput))
- })
- })
- })
-
- Context("when a custom profile name is specified", func() {
- AfterEach(func() {
- removeSuccessfully("./_fixtures/coverage_fixture/coverage.txt")
- })
-
- It("generates cover profiles with the specified name", func() {
- session := startGinkgo("./_fixtures/coverage_fixture", "-cover", "-coverprofile=coverage.txt")
- Eventually(session).Should(gexec.Exit(0))
-
- Ω("./_fixtures/coverage_fixture/coverage.txt").Should(BeARegularFile())
- })
- })
-
- Context("when run in recursive mode", func() {
- AfterEach(func() {
- removeSuccessfully("./_fixtures/combined_coverage_fixture/coverage-recursive.txt")
- removeSuccessfully("./_fixtures/combined_coverage_fixture/first_package/coverage-recursive.txt")
- removeSuccessfully("./_fixtures/combined_coverage_fixture/second_package/coverage-recursive.txt")
- })
-
- It("generates a coverage file per package", func() {
- session := startGinkgo("./_fixtures/combined_coverage_fixture", "-r", "-cover", "-coverprofile=coverage-recursive.txt")
- Eventually(session).Should(gexec.Exit(0))
-
- Ω("./_fixtures/combined_coverage_fixture/first_package/coverage-recursive.txt").Should(BeARegularFile())
- Ω("./_fixtures/combined_coverage_fixture/second_package/coverage-recursive.txt").Should(BeARegularFile())
- })
- })
-
- Context("when run in parallel mode", func() {
- AfterEach(func() {
- removeSuccessfully("./_fixtures/coverage_fixture/coverage-parallel.txt")
- })
-
- It("works", func() {
- session := startGinkgo("./_fixtures/coverage_fixture", "-p", "-cover", "-coverprofile=coverage-parallel.txt")
-
- Eventually(session).Should(gexec.Exit(0))
-
- Ω("./_fixtures/coverage_fixture/coverage-parallel.txt").Should(BeARegularFile())
- })
- })
-
- Context("when run in recursive mode specifying a coverprofile", func() {
- AfterEach(func() {
- removeSuccessfully("./_fixtures/combined_coverage_fixture/coverprofile-recursive.txt")
- removeSuccessfully("./_fixtures/combined_coverage_fixture/first_package/coverprofile-recursive.txt")
- removeSuccessfully("./_fixtures/combined_coverage_fixture/second_package/coverprofile-recursive.txt")
- })
-
- It("combines the coverages", func() {
- session := startGinkgo("./_fixtures/combined_coverage_fixture", "-outputdir=./", "-r", "-cover", "-coverprofile=coverprofile-recursive.txt")
- Eventually(session).Should(gexec.Exit(0))
-
- By("generating a combined coverage file", func() {
- Ω("./_fixtures/combined_coverage_fixture/coverprofile-recursive.txt").Should(BeARegularFile())
- })
-
- By("also generating the single package coverage files", func() {
- Ω("./_fixtures/combined_coverage_fixture/first_package/coverprofile-recursive.txt").Should(BeARegularFile())
- Ω("./_fixtures/combined_coverage_fixture/second_package/coverprofile-recursive.txt").Should(BeARegularFile())
- })
- })
- })
-
- It("Fails with an error if output dir and coverprofile were set, but the output dir did not exist", func() {
- session := startGinkgo("./_fixtures/combined_coverage_fixture", "-outputdir=./all/profiles/here", "-r", "-cover", "-coverprofile=coverage.txt")
-
- Eventually(session).Should(gexec.Exit(1))
- output := session.Out.Contents()
- Ω(string(output)).Should(ContainSubstring("Unable to create combined profile, outputdir does not exist: ./all/profiles/here"))
- })
-
- Context("when only output dir was set", func() {
- AfterEach(func() {
- removeSuccessfully("./_fixtures/combined_coverage_fixture/first_package.coverprofile")
- removeSuccessfully("./_fixtures/combined_coverage_fixture/first_package/coverage.txt")
- removeSuccessfully("./_fixtures/combined_coverage_fixture/second_package.coverprofile")
- removeSuccessfully("./_fixtures/combined_coverage_fixture/second_package/coverage.txt")
- })
- It("moves coverages", func() {
- session := startGinkgo("./_fixtures/combined_coverage_fixture", "-outputdir=./", "-r", "-cover")
- Eventually(session).Should(gexec.Exit(0))
-
- Ω("./_fixtures/combined_coverage_fixture/first_package.coverprofile").Should(BeARegularFile())
- Ω("./_fixtures/combined_coverage_fixture/second_package.coverprofile").Should(BeARegularFile())
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/fail_test.go b/vendor/github.com/onsi/ginkgo/integration/fail_test.go
deleted file mode 100644
index 53b2a67b4..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/fail_test.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package integration_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- "github.com/onsi/gomega/gexec"
-)
-
-var _ = Describe("Failing Specs", func() {
- var pathToTest string
-
- BeforeEach(func() {
- pathToTest = tmpPath("failing")
- copyIn(fixturePath("fail_fixture"), pathToTest, false)
- })
-
- It("should fail in all the possible ways", func() {
- session := startGinkgo(pathToTest, "--noColor")
- Eventually(session).Should(gexec.Exit(1))
- output := string(session.Out.Contents())
-
- Ω(output).ShouldNot(ContainSubstring("NEVER SEE THIS"))
-
- Ω(output).Should(ContainSubstring("a top level failure on line 9"))
- Ω(output).Should(ContainSubstring("fail_fixture_test.go:9"))
- Ω(output).Should(ContainSubstring("an async top level failure on line 14"))
- Ω(output).Should(ContainSubstring("fail_fixture_test.go:14"))
- Ω(output).Should(ContainSubstring("a top level goroutine failure on line 21"))
- Ω(output).Should(ContainSubstring("fail_fixture_test.go:21"))
-
- Ω(output).Should(ContainSubstring("a sync failure"))
- Ω(output).Should(MatchRegexp(`Test Panicked\n\s+a sync panic`))
- Ω(output).Should(ContainSubstring("a sync FAIL failure"))
- Ω(output).Should(ContainSubstring("async timeout [It]"))
- Ω(output).Should(ContainSubstring("Timed out"))
- Ω(output).Should(ContainSubstring("an async failure"))
- Ω(output).Should(MatchRegexp(`Test Panicked\n\s+an async panic`))
- Ω(output).Should(ContainSubstring("an async FAIL failure"))
- Ω(output).Should(ContainSubstring("a goroutine FAIL failure"))
- Ω(output).Should(ContainSubstring("a goroutine failure"))
- Ω(output).Should(MatchRegexp(`Test Panicked\n\s+a goroutine panic`))
- Ω(output).Should(ContainSubstring("a measure failure"))
- Ω(output).Should(ContainSubstring("a measure FAIL failure"))
- Ω(output).Should(MatchRegexp(`Test Panicked\n\s+a measure panic`))
-
- Ω(output).Should(ContainSubstring("a top level specify"))
- Ω(output).ShouldNot(ContainSubstring("ginkgo_dsl.go"))
- // depending on the go version this could be the first line of the Specify
- // block (>= go1.9) or the last line of the Specify block (< go1.9)
- Ω(output).Should(Or(ContainSubstring("fail_fixture_test.go:101"), ContainSubstring("fail_fixture_test.go:103")))
- Ω(output).Should(ContainSubstring("fail_fixture_test.go:102"))
-
- Ω(output).Should(ContainSubstring("0 Passed | 17 Failed"))
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/flags_test.go b/vendor/github.com/onsi/ginkgo/integration/flags_test.go
deleted file mode 100644
index d84eb46cc..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/flags_test.go
+++ /dev/null
@@ -1,237 +0,0 @@
-package integration_test
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
-
- . "github.com/onsi/ginkgo"
- "github.com/onsi/ginkgo/types"
- . "github.com/onsi/gomega"
- "github.com/onsi/gomega/gexec"
-)
-
-var _ = Describe("Flags Specs", func() {
- var pathToTest string
-
- BeforeEach(func() {
- pathToTest = tmpPath("flags")
- copyIn(fixturePath("flags_tests"), pathToTest, false)
- })
-
- getRandomOrders := func(output string) []int {
- return []int{strings.Index(output, "RANDOM_A"), strings.Index(output, "RANDOM_B"), strings.Index(output, "RANDOM_C")}
- }
-
- It("normally passes, runs measurements, prints out noisy pendings, does not randomize tests, and honors the programmatic focus", func() {
- session := startGinkgo(pathToTest, "--noColor")
- Eventually(session).Should(gexec.Exit(types.GINKGO_FOCUS_EXIT_CODE))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("Ran 3 samples:"), "has a measurement")
- Ω(output).Should(ContainSubstring("11 Passed"))
- Ω(output).Should(ContainSubstring("0 Failed"))
- Ω(output).Should(ContainSubstring("1 Pending"))
- Ω(output).Should(ContainSubstring("3 Skipped"))
- Ω(output).Should(ContainSubstring("[PENDING]"))
- Ω(output).Should(ContainSubstring("marshmallow"))
- Ω(output).Should(ContainSubstring("chocolate"))
- Ω(output).Should(ContainSubstring("CUSTOM_FLAG: default"))
- Ω(output).Should(ContainSubstring("Detected Programmatic Focus - setting exit status to %d", types.GINKGO_FOCUS_EXIT_CODE))
- Ω(output).ShouldNot(ContainSubstring("smores"))
- Ω(output).ShouldNot(ContainSubstring("SLOW TEST"))
- Ω(output).ShouldNot(ContainSubstring("should honor -slowSpecThreshold"))
-
- orders := getRandomOrders(output)
- Ω(orders[0]).Should(BeNumerically("<", orders[1]))
- Ω(orders[1]).Should(BeNumerically("<", orders[2]))
- })
-
- It("should run a coverprofile when passed -cover", func() {
- session := startGinkgo(pathToTest, "--noColor", "--cover", "--focus=the focused set")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- _, err := os.Stat(filepath.Join(pathToTest, "flags.coverprofile"))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(output).Should(ContainSubstring("coverage: "))
- })
-
- It("should fail when there are pending tests and it is passed --failOnPending", func() {
- session := startGinkgo(pathToTest, "--noColor", "--failOnPending")
- Eventually(session).Should(gexec.Exit(1))
- })
-
- It("should fail if the test suite takes longer than the timeout", func() {
- session := startGinkgo(pathToTest, "--noColor", "--timeout=1ms")
- Eventually(session).Should(gexec.Exit(1))
- })
-
- It("should not print out pendings when --noisyPendings=false", func() {
- session := startGinkgo(pathToTest, "--noColor", "--noisyPendings=false")
- Eventually(session).Should(gexec.Exit(types.GINKGO_FOCUS_EXIT_CODE))
- output := string(session.Out.Contents())
-
- Ω(output).ShouldNot(ContainSubstring("[PENDING]"))
- Ω(output).Should(ContainSubstring("1 Pending"))
- })
-
- It("should override the programmatic focus when told to focus", func() {
- session := startGinkgo(pathToTest, "--noColor", "--focus=smores")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("marshmallow"))
- Ω(output).Should(ContainSubstring("chocolate"))
- Ω(output).Should(ContainSubstring("smores"))
- Ω(output).Should(ContainSubstring("3 Passed"))
- Ω(output).Should(ContainSubstring("0 Failed"))
- Ω(output).Should(ContainSubstring("0 Pending"))
- Ω(output).Should(ContainSubstring("12 Skipped"))
- })
-
- It("should override the programmatic focus when told to skip", func() {
- session := startGinkgo(pathToTest, "--noColor", "--skip=marshmallow|failing|flaky")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- Ω(output).ShouldNot(ContainSubstring("marshmallow"))
- Ω(output).Should(ContainSubstring("chocolate"))
- Ω(output).Should(ContainSubstring("smores"))
- Ω(output).Should(ContainSubstring("11 Passed"))
- Ω(output).Should(ContainSubstring("0 Failed"))
- Ω(output).Should(ContainSubstring("1 Pending"))
- Ω(output).Should(ContainSubstring("3 Skipped"))
- })
-
- It("should run the race detector when told to", func() {
- session := startGinkgo(pathToTest, "--noColor", "--race")
- Eventually(session).Should(gexec.Exit(types.GINKGO_FOCUS_EXIT_CODE))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("WARNING: DATA RACE"))
- })
-
- It("should randomize tests when told to", func() {
- session := startGinkgo(pathToTest, "--noColor", "--randomizeAllSpecs", "--seed=17")
- Eventually(session).Should(gexec.Exit(types.GINKGO_FOCUS_EXIT_CODE))
- output := string(session.Out.Contents())
-
- orders := getRandomOrders(output)
- Ω(orders[0]).ShouldNot(BeNumerically("<", orders[1]))
- })
-
- It("should skip measurements when told to", func() {
- session := startGinkgo(pathToTest, "--skipMeasurements")
- Eventually(session).Should(gexec.Exit(types.GINKGO_FOCUS_EXIT_CODE))
- output := string(session.Out.Contents())
-
- Ω(output).ShouldNot(ContainSubstring("Ran 3 samples:"), "has a measurement")
- Ω(output).Should(ContainSubstring("4 Skipped"))
- })
-
- It("should watch for slow specs", func() {
- session := startGinkgo(pathToTest, "--slowSpecThreshold=0.05")
- Eventually(session).Should(gexec.Exit(types.GINKGO_FOCUS_EXIT_CODE))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("SLOW TEST"))
- Ω(output).Should(ContainSubstring("should honor -slowSpecThreshold"))
- })
-
- It("should pass additional arguments in", func() {
- session := startGinkgo(pathToTest, "--", "--customFlag=madagascar")
- Eventually(session).Should(gexec.Exit(types.GINKGO_FOCUS_EXIT_CODE))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("CUSTOM_FLAG: madagascar"))
- })
-
- It("should print out full stack traces for failures when told to", func() {
- session := startGinkgo(pathToTest, "--focus=a failing test", "--trace")
- Eventually(session).Should(gexec.Exit(1))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("Full Stack Trace"))
- })
-
- It("should fail fast when told to", func() {
- pathToTest = tmpPath("fail")
- copyIn(fixturePath("fail_fixture"), pathToTest, false)
- session := startGinkgo(pathToTest, "--failFast")
- Eventually(session).Should(gexec.Exit(1))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("1 Failed"))
- Ω(output).Should(ContainSubstring("16 Skipped"))
- })
-
- Context("with a flaky test", func() {
- It("should normally fail", func() {
- session := startGinkgo(pathToTest, "--focus=flaky")
- Eventually(session).Should(gexec.Exit(1))
- })
-
- It("should pass if retries are requested", func() {
- session := startGinkgo(pathToTest, "--focus=flaky --flakeAttempts=2")
- Eventually(session).Should(gexec.Exit(0))
- })
- })
-
- It("should perform a dry run when told to", func() {
- pathToTest = tmpPath("fail")
- copyIn(fixturePath("fail_fixture"), pathToTest, false)
- session := startGinkgo(pathToTest, "--dryRun", "-v")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("synchronous failures"))
- Ω(output).Should(ContainSubstring("17 Specs"))
- Ω(output).Should(ContainSubstring("0 Passed"))
- Ω(output).Should(ContainSubstring("0 Failed"))
- })
-
- regextest := func(regexOption string, skipOrFocus string) string {
- pathToTest = tmpPath("passing")
- copyIn(fixturePath("passing_ginkgo_tests"), pathToTest, false)
- session := startGinkgo(pathToTest, regexOption, "--dryRun", "-v", skipOrFocus)
- Eventually(session).Should(gexec.Exit(0))
- return string(session.Out.Contents())
- }
-
- It("regexScansFilePath (enabled) should skip and focus on file names", func() {
- output := regextest("-regexScansFilePath=true", "-skip=/passing/") // everything gets skipped (nothing runs)
- Ω(output).Should(ContainSubstring("0 of 4 Specs"))
- output = regextest("-regexScansFilePath=true", "-focus=/passing/") // everything gets focused (everything runs)
- Ω(output).Should(ContainSubstring("4 of 4 Specs"))
- })
-
- It("regexScansFilePath (disabled) should not effect normal filtering", func() {
- output := regextest("-regexScansFilePath=false", "-skip=/passing/") // nothing gets skipped (everything runs)
- Ω(output).Should(ContainSubstring("4 of 4 Specs"))
- output = regextest("-regexScansFilePath=false", "-focus=/passing/") // nothing gets focused (nothing runs)
- Ω(output).Should(ContainSubstring("0 of 4 Specs"))
- })
-
- It("should honor compiler flags", func() {
- session := startGinkgo(pathToTest, "-gcflags=-importmap 'math=math/cmplx'")
- Eventually(session).Should(gexec.Exit(types.GINKGO_FOCUS_EXIT_CODE))
- output := string(session.Out.Contents())
- Ω(output).Should(ContainSubstring("NaN returns complex128"))
- })
-
- It("should honor covermode flag", func() {
- session := startGinkgo(pathToTest, "--noColor", "--covermode=count", "--focus=the focused set")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
- Ω(output).Should(ContainSubstring("coverage: "))
-
- coverageFile := filepath.Join(pathToTest, "flags.coverprofile")
- _, err := os.Stat(coverageFile)
- Ω(err).ShouldNot(HaveOccurred())
- contents, err := ioutil.ReadFile(coverageFile)
- Ω(err).ShouldNot(HaveOccurred())
- Ω(contents).Should(ContainSubstring("mode: count"))
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/integration.go b/vendor/github.com/onsi/ginkgo/integration/integration.go
deleted file mode 100644
index 76ab1b728..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/integration.go
+++ /dev/null
@@ -1 +0,0 @@
-package integration
diff --git a/vendor/github.com/onsi/ginkgo/integration/integration_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/integration_suite_test.go
deleted file mode 100644
index 32ec741c9..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/integration_suite_test.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package integration_test
-
-import (
- "io"
- "io/ioutil"
- "os"
- "os/exec"
- "path/filepath"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- "github.com/onsi/gomega/gexec"
-
- "testing"
- "time"
-)
-
-var tmpDir string
-var pathToGinkgo string
-
-func TestIntegration(t *testing.T) {
- SetDefaultEventuallyTimeout(30 * time.Second)
- RegisterFailHandler(Fail)
- RunSpecs(t, "Integration Suite")
-}
-
-var _ = SynchronizedBeforeSuite(func() []byte {
- pathToGinkgo, err := gexec.Build("github.com/onsi/ginkgo/ginkgo")
- Ω(err).ShouldNot(HaveOccurred())
- return []byte(pathToGinkgo)
-}, func(computedPathToGinkgo []byte) {
- pathToGinkgo = string(computedPathToGinkgo)
-})
-
-var _ = BeforeEach(func() {
- var err error
- tmpDir, err = ioutil.TempDir("", "ginkgo-run")
- Ω(err).ShouldNot(HaveOccurred())
-})
-
-var _ = AfterEach(func() {
- err := os.RemoveAll(tmpDir)
- Ω(err).ShouldNot(HaveOccurred())
-})
-
-var _ = SynchronizedAfterSuite(func() {}, func() {
- gexec.CleanupBuildArtifacts()
-})
-
-func tmpPath(destination string) string {
- return filepath.Join(tmpDir, destination)
-}
-
-func fixturePath(name string) string {
- return filepath.Join("_fixtures", name)
-}
-
-func copyIn(sourcePath, destinationPath string, recursive bool) {
- err := os.MkdirAll(destinationPath, 0777)
- Expect(err).NotTo(HaveOccurred())
-
- files, err := ioutil.ReadDir(sourcePath)
- Expect(err).NotTo(HaveOccurred())
- for _, f := range files {
- srcPath := filepath.Join(sourcePath, f.Name())
- dstPath := filepath.Join(destinationPath, f.Name())
- if f.IsDir() {
- if recursive {
- copyIn(srcPath, dstPath, recursive)
- }
- continue
- }
-
- src, err := os.Open(srcPath)
-
- Expect(err).NotTo(HaveOccurred())
- defer src.Close()
-
- dst, err := os.Create(dstPath)
- Expect(err).NotTo(HaveOccurred())
- defer dst.Close()
-
- _, err = io.Copy(dst, src)
- Expect(err).NotTo(HaveOccurred())
- }
-}
-
-func sameFile(filePath, otherFilePath string) bool {
- content, readErr := ioutil.ReadFile(filePath)
- Expect(readErr).NotTo(HaveOccurred())
- otherContent, readErr := ioutil.ReadFile(otherFilePath)
- Expect(readErr).NotTo(HaveOccurred())
- Expect(string(content)).To(Equal(string(otherContent)))
- return true
-}
-
-func sameFolder(sourcePath, destinationPath string) bool {
- files, err := ioutil.ReadDir(sourcePath)
- Expect(err).NotTo(HaveOccurred())
- for _, f := range files {
- srcPath := filepath.Join(sourcePath, f.Name())
- dstPath := filepath.Join(destinationPath, f.Name())
- if f.IsDir() {
- sameFolder(srcPath, dstPath)
- continue
- }
- Expect(sameFile(srcPath, dstPath)).To(BeTrue())
- }
- return true
-}
-
-func ginkgoCommand(dir string, args ...string) *exec.Cmd {
- cmd := exec.Command(pathToGinkgo, args...)
- cmd.Dir = dir
-
- return cmd
-}
-
-func startGinkgo(dir string, args ...string) *gexec.Session {
- cmd := ginkgoCommand(dir, args...)
- session, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter)
- Ω(err).ShouldNot(HaveOccurred())
- return session
-}
-
-func removeSuccessfully(path string) {
- err := os.RemoveAll(path)
- Expect(err).NotTo(HaveOccurred())
-}
diff --git a/vendor/github.com/onsi/ginkgo/integration/interrupt_test.go b/vendor/github.com/onsi/ginkgo/integration/interrupt_test.go
deleted file mode 100644
index d4158b806..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/interrupt_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package integration_test
-
-import (
- "os/exec"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- "github.com/onsi/gomega/gbytes"
- "github.com/onsi/gomega/gexec"
-)
-
-var _ = Describe("Interrupt", func() {
- var pathToTest string
- BeforeEach(func() {
- pathToTest = tmpPath("hanging")
- copyIn(fixturePath("hanging_suite"), pathToTest, false)
- })
-
- Context("when interrupting a suite", func() {
- var session *gexec.Session
- BeforeEach(func() {
- //we need to signal the actual process, so we must compile the test first
- var err error
- cmd := exec.Command("go", "test", "-c")
- cmd.Dir = pathToTest
- session, err = gexec.Start(cmd, GinkgoWriter, GinkgoWriter)
- Ω(err).ShouldNot(HaveOccurred())
- Eventually(session).Should(gexec.Exit(0))
-
- //then run the compiled test directly
- cmd = exec.Command("./hanging.test", "--test.v=true", "--ginkgo.noColor")
- cmd.Dir = pathToTest
- session, err = gexec.Start(cmd, GinkgoWriter, GinkgoWriter)
- Ω(err).ShouldNot(HaveOccurred())
-
- Eventually(session).Should(gbytes.Say("Sleeping..."))
- session.Interrupt()
- Eventually(session, 1000).Should(gexec.Exit(1))
- })
-
- It("should emit the contents of the GinkgoWriter", func() {
- Ω(session).Should(gbytes.Say("Just beginning"))
- Ω(session).Should(gbytes.Say("Almost there..."))
- Ω(session).Should(gbytes.Say("Hanging Out"))
- })
-
- It("should run the AfterSuite", func() {
- Ω(session).Should(gbytes.Say("Heading Out After Suite"))
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/precompiled_test.go b/vendor/github.com/onsi/ginkgo/integration/precompiled_test.go
deleted file mode 100644
index 55724a9b8..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/precompiled_test.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package integration_test
-
-import (
- "os"
- "os/exec"
- "path/filepath"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- "github.com/onsi/gomega/gbytes"
- "github.com/onsi/gomega/gexec"
-)
-
-var _ = Describe("ginkgo build", func() {
- var pathToTest string
-
- BeforeEach(func() {
- pathToTest = tmpPath("passing_ginkgo_tests")
- copyIn(fixturePath("passing_ginkgo_tests"), pathToTest, false)
- session := startGinkgo(pathToTest, "build")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
- Ω(output).Should(ContainSubstring("Compiling passing_ginkgo_tests"))
- Ω(output).Should(ContainSubstring("compiled passing_ginkgo_tests.test"))
- })
-
- It("should build a test binary", func() {
- _, err := os.Stat(filepath.Join(pathToTest, "passing_ginkgo_tests.test"))
- Ω(err).ShouldNot(HaveOccurred())
- })
-
- It("should be possible to run the test binary directly", func() {
- cmd := exec.Command("./passing_ginkgo_tests.test")
- cmd.Dir = pathToTest
- session, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter)
- Ω(err).ShouldNot(HaveOccurred())
- Eventually(session).Should(gexec.Exit(0))
- Ω(session).Should(gbytes.Say("Running Suite: Passing_ginkgo_tests Suite"))
- })
-
- It("should be possible to run the test binary via ginkgo", func() {
- session := startGinkgo(pathToTest, "./passing_ginkgo_tests.test")
- Eventually(session).Should(gexec.Exit(0))
- Ω(session).Should(gbytes.Say("Running Suite: Passing_ginkgo_tests Suite"))
- })
-
- It("should be possible to run the test binary in parallel", func() {
- session := startGinkgo(pathToTest, "--nodes=4", "--noColor", "./passing_ginkgo_tests.test")
- Eventually(session).Should(gexec.Exit(0))
- Ω(session).Should(gbytes.Say("Running Suite: Passing_ginkgo_tests Suite"))
- Ω(session).Should(gbytes.Say("Running in parallel across 4 nodes"))
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/progress_test.go b/vendor/github.com/onsi/ginkgo/integration/progress_test.go
deleted file mode 100644
index cda86b6ea..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/progress_test.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package integration_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- "github.com/onsi/gomega/gbytes"
- "github.com/onsi/gomega/gexec"
-)
-
-var _ = Describe("Emitting progress", func() {
- var pathToTest string
- var session *gexec.Session
- var args []string
-
- BeforeEach(func() {
- args = []string{"--noColor"}
- pathToTest = tmpPath("progress")
- copyIn(fixturePath("progress_fixture"), pathToTest, false)
- })
-
- JustBeforeEach(func() {
- session = startGinkgo(pathToTest, args...)
- Eventually(session).Should(gexec.Exit(0))
- })
-
- Context("with the -progress flag, but no -v flag", func() {
- BeforeEach(func() {
- args = append(args, "-progress")
- })
-
- It("should not emit progress", func() {
- Ω(session).ShouldNot(gbytes.Say("[bB]efore"))
- })
- })
-
- Context("with the -v flag", func() {
- BeforeEach(func() {
- args = append(args, "-v")
- })
-
- It("should not emit progress", func() {
- Ω(session).ShouldNot(gbytes.Say(`\[BeforeEach\]`))
- Ω(session).Should(gbytes.Say(`>outer before<`))
- })
- })
-
- Context("with the -progress flag and the -v flag", func() {
- BeforeEach(func() {
- args = append(args, "-progress", "-v")
- })
-
- It("should emit progress (by writing to the GinkgoWriter)", func() {
- // First spec
-
- Ω(session).Should(gbytes.Say(`\[BeforeEach\] ProgressFixture`))
- Ω(session).Should(gbytes.Say(`>outer before<`))
-
- Ω(session).Should(gbytes.Say(`\[BeforeEach\] Inner Context`))
- Ω(session).Should(gbytes.Say(`>inner before<`))
-
- Ω(session).Should(gbytes.Say(`\[BeforeEach\] when Inner When`))
- Ω(session).Should(gbytes.Say(`>inner before<`))
-
- Ω(session).Should(gbytes.Say(`\[JustBeforeEach\] ProgressFixture`))
- Ω(session).Should(gbytes.Say(`>outer just before<`))
-
- Ω(session).Should(gbytes.Say(`\[JustBeforeEach\] Inner Context`))
- Ω(session).Should(gbytes.Say(`>inner just before<`))
-
- Ω(session).Should(gbytes.Say(`\[It\] should emit progress as it goes`))
- Ω(session).Should(gbytes.Say(`>it<`))
-
- Ω(session).Should(gbytes.Say(`\[AfterEach\] Inner Context`))
- Ω(session).Should(gbytes.Say(`>inner after<`))
-
- Ω(session).Should(gbytes.Say(`\[AfterEach\] ProgressFixture`))
- Ω(session).Should(gbytes.Say(`>outer after<`))
-
- // Second spec
-
- Ω(session).Should(gbytes.Say(`\[BeforeEach\] ProgressFixture`))
- Ω(session).Should(gbytes.Say(`>outer before<`))
-
- Ω(session).Should(gbytes.Say(`\[JustBeforeEach\] ProgressFixture`))
- Ω(session).Should(gbytes.Say(`>outer just before<`))
-
- Ω(session).Should(gbytes.Say(`\[It\] should emit progress as it goes`))
- Ω(session).Should(gbytes.Say(`>specify<`))
-
- Ω(session).Should(gbytes.Say(`\[AfterEach\] ProgressFixture`))
- Ω(session).Should(gbytes.Say(`>outer after<`))
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/run_test.go b/vendor/github.com/onsi/ginkgo/integration/run_test.go
deleted file mode 100644
index 6c270b61b..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/run_test.go
+++ /dev/null
@@ -1,483 +0,0 @@
-package integration_test
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "regexp"
- "runtime"
- "strings"
-
- . "github.com/onsi/ginkgo"
- "github.com/onsi/ginkgo/types"
- . "github.com/onsi/gomega"
- "github.com/onsi/gomega/gbytes"
- "github.com/onsi/gomega/gexec"
-)
-
-var _ = Describe("Running Specs", func() {
- var pathToTest string
-
- isWindows := (runtime.GOOS == "windows")
- denoter := "•"
-
- if isWindows {
- denoter = "+"
- }
-
- Context("when pointed at the current directory", func() {
- BeforeEach(func() {
- pathToTest = tmpPath("ginkgo")
- copyIn(fixturePath("passing_ginkgo_tests"), pathToTest, false)
- })
-
- It("should run the tests in the working directory", func() {
- session := startGinkgo(pathToTest, "--noColor")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("Running Suite: Passing_ginkgo_tests Suite"))
- Ω(output).Should(ContainSubstring(strings.Repeat(denoter, 4)))
- Ω(output).Should(ContainSubstring("SUCCESS! -- 4 Passed"))
- Ω(output).Should(ContainSubstring("Test Suite Passed"))
- })
- })
-
- Context("when passed an explicit package to run", func() {
- BeforeEach(func() {
- pathToTest = tmpPath("ginkgo")
- copyIn(fixturePath("passing_ginkgo_tests"), pathToTest, false)
- })
-
- It("should run the ginkgo style tests", func() {
- session := startGinkgo(tmpDir, "--noColor", pathToTest)
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("Running Suite: Passing_ginkgo_tests Suite"))
- Ω(output).Should(ContainSubstring(strings.Repeat(denoter, 4)))
- Ω(output).Should(ContainSubstring("SUCCESS! -- 4 Passed"))
- Ω(output).Should(ContainSubstring("Test Suite Passed"))
- })
- })
-
- Context("when passed a number of packages to run", func() {
- BeforeEach(func() {
- pathToTest = tmpPath("ginkgo")
- otherPathToTest := tmpPath("other")
- copyIn(fixturePath("passing_ginkgo_tests"), pathToTest, false)
- copyIn(fixturePath("more_ginkgo_tests"), otherPathToTest, false)
- })
-
- It("should run the ginkgo style tests", func() {
- session := startGinkgo(tmpDir, "--noColor", "--succinct=false", "ginkgo", "./other")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("Running Suite: Passing_ginkgo_tests Suite"))
- Ω(output).Should(ContainSubstring("Running Suite: More_ginkgo_tests Suite"))
- Ω(output).Should(ContainSubstring("Test Suite Passed"))
- })
- })
-
- Context("when passed a number of packages to run, some of which have focused tests", func() {
- BeforeEach(func() {
- pathToTest = tmpPath("ginkgo")
- otherPathToTest := tmpPath("other")
- focusedPathToTest := tmpPath("focused")
- copyIn(fixturePath("passing_ginkgo_tests"), pathToTest, false)
- copyIn(fixturePath("more_ginkgo_tests"), otherPathToTest, false)
- copyIn(fixturePath("focused_fixture"), focusedPathToTest, false)
- })
-
- It("should exit with a status code of 2 and explain why", func() {
- session := startGinkgo(tmpDir, "--noColor", "--succinct=false", "-r")
- Eventually(session).Should(gexec.Exit(types.GINKGO_FOCUS_EXIT_CODE))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("Running Suite: Passing_ginkgo_tests Suite"))
- Ω(output).Should(ContainSubstring("Running Suite: More_ginkgo_tests Suite"))
- Ω(output).Should(ContainSubstring("Test Suite Passed"))
- Ω(output).Should(ContainSubstring("Detected Programmatic Focus - setting exit status to %d", types.GINKGO_FOCUS_EXIT_CODE))
- })
-
- Context("when the GINKGO_EDITOR_INTEGRATION environment variable is set", func() {
- BeforeEach(func() {
- os.Setenv("GINKGO_EDITOR_INTEGRATION", "true")
- })
- AfterEach(func() {
- os.Setenv("GINKGO_EDITOR_INTEGRATION", "")
- })
- It("should exit with a status code of 0 to allow a coverage file to be generated", func() {
- session := startGinkgo(tmpDir, "--noColor", "--succinct=false", "-r")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("Running Suite: Passing_ginkgo_tests Suite"))
- Ω(output).Should(ContainSubstring("Running Suite: More_ginkgo_tests Suite"))
- Ω(output).Should(ContainSubstring("Test Suite Passed"))
- })
- })
- })
-
- Context("when told to skipPackages", func() {
- BeforeEach(func() {
- pathToTest = tmpPath("ginkgo")
- otherPathToTest := tmpPath("other")
- focusedPathToTest := tmpPath("focused")
- copyIn(fixturePath("passing_ginkgo_tests"), pathToTest, false)
- copyIn(fixturePath("more_ginkgo_tests"), otherPathToTest, false)
- copyIn(fixturePath("focused_fixture"), focusedPathToTest, false)
- })
-
- It("should skip packages that match the list", func() {
- session := startGinkgo(tmpDir, "--noColor", "--skipPackage=other,focused", "-r")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("Passing_ginkgo_tests Suite"))
- Ω(output).ShouldNot(ContainSubstring("More_ginkgo_tests Suite"))
- Ω(output).ShouldNot(ContainSubstring("Focused_fixture Suite"))
- Ω(output).Should(ContainSubstring("Test Suite Passed"))
- })
-
- Context("when all packages are skipped", func() {
- It("should not run anything, but still exit 0", func() {
- session := startGinkgo(tmpDir, "--noColor", "--skipPackage=other,focused,ginkgo", "-r")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("All tests skipped!"))
- Ω(output).ShouldNot(ContainSubstring("Passing_ginkgo_tests Suite"))
- Ω(output).ShouldNot(ContainSubstring("More_ginkgo_tests Suite"))
- Ω(output).ShouldNot(ContainSubstring("Focused_fixture Suite"))
- Ω(output).ShouldNot(ContainSubstring("Test Suite Passed"))
- })
- })
- })
-
- Context("when there are no tests to run", func() {
- It("should exit 1", func() {
- session := startGinkgo(tmpDir, "--noColor", "--skipPackage=other,focused", "-r")
- Eventually(session).Should(gexec.Exit(1))
- output := string(session.Err.Contents())
-
- Ω(output).Should(ContainSubstring("Found no test suites"))
- })
- })
-
- Context("when there are test files but `go test` reports there are no tests to run", func() {
- BeforeEach(func() {
- pathToTest = tmpPath("ginkgo")
- copyIn(fixturePath("no_test_fn"), pathToTest, false)
- })
-
- It("suggests running ginkgo bootstrap", func() {
- session := startGinkgo(tmpDir, "--noColor", "--skipPackage=other,focused", "-r")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Err.Contents())
-
- Ω(output).Should(ContainSubstring(`Found no test suites, did you forget to run "ginkgo bootstrap"?`))
- })
-
- It("fails if told to requireSuite", func() {
- session := startGinkgo(tmpDir, "--noColor", "--skipPackage=other,focused", "-r", "-requireSuite")
- Eventually(session).Should(gexec.Exit(1))
- output := string(session.Err.Contents())
-
- Ω(output).Should(ContainSubstring(`Found no test suites, did you forget to run "ginkgo bootstrap"?`))
- })
- })
-
- Context("when told to randomizeSuites", func() {
- BeforeEach(func() {
- pathToTest = tmpPath("ginkgo")
- otherPathToTest := tmpPath("other")
- copyIn(fixturePath("passing_ginkgo_tests"), pathToTest, false)
- copyIn(fixturePath("more_ginkgo_tests"), otherPathToTest, false)
- })
-
- It("should skip packages that match the regexp", func() {
- session := startGinkgo(tmpDir, "--noColor", "--randomizeSuites", "-r", "--seed=2")
- Eventually(session).Should(gexec.Exit(0))
-
- Ω(session).Should(gbytes.Say("More_ginkgo_tests Suite"))
- Ω(session).Should(gbytes.Say("Passing_ginkgo_tests Suite"))
-
- session = startGinkgo(tmpDir, "--noColor", "--randomizeSuites", "-r", "--seed=3")
- Eventually(session).Should(gexec.Exit(0))
-
- Ω(session).Should(gbytes.Say("Passing_ginkgo_tests Suite"))
- Ω(session).Should(gbytes.Say("More_ginkgo_tests Suite"))
- })
- })
-
- Context("when pointed at a package with xunit style tests", func() {
- BeforeEach(func() {
- pathToTest = tmpPath("xunit")
- copyIn(fixturePath("xunit_tests"), pathToTest, false)
- })
-
- It("should run the xunit style tests", func() {
- session := startGinkgo(pathToTest)
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("--- PASS: TestAlwaysTrue"))
- Ω(output).Should(ContainSubstring("Test Suite Passed"))
- })
- })
-
- Context("when pointed at a package with no tests", func() {
- BeforeEach(func() {
- pathToTest = tmpPath("no_tests")
- copyIn(fixturePath("no_tests"), pathToTest, false)
- })
-
- It("should fail", func() {
- session := startGinkgo(pathToTest, "--noColor")
- Eventually(session).Should(gexec.Exit(1))
-
- Ω(session.Err.Contents()).Should(ContainSubstring("Found no test suites"))
- })
- })
-
- Context("when pointed at a package that fails to compile", func() {
- BeforeEach(func() {
- pathToTest = tmpPath("does_not_compile")
- copyIn(fixturePath("does_not_compile"), pathToTest, false)
- })
-
- It("should fail", func() {
- session := startGinkgo(pathToTest, "--noColor")
- Eventually(session).Should(gexec.Exit(1))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("Failed to compile"))
- })
- })
-
- Context("when running in parallel", func() {
- BeforeEach(func() {
- pathToTest = tmpPath("ginkgo")
- copyIn(fixturePath("passing_ginkgo_tests"), pathToTest, false)
- })
-
- Context("with a specific number of -nodes", func() {
- It("should use the specified number of nodes", func() {
- session := startGinkgo(pathToTest, "--noColor", "-succinct", "-nodes=2")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- Ω(output).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 4 specs - 2 nodes [%s]{4} SUCCESS! \d+(\.\d+)?[muµ]s`, regexp.QuoteMeta(denoter)))
- Ω(output).Should(ContainSubstring("Test Suite Passed"))
- })
- })
-
- Context("with -p", func() {
- It("it should autocompute the number of nodes", func() {
- session := startGinkgo(pathToTest, "--noColor", "-succinct", "-p")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- nodes := runtime.NumCPU()
- if nodes == 1 {
- Skip("Can't test parallel testings with 1 CPU")
- }
- if nodes > 4 {
- nodes = nodes - 1
- }
- Ω(output).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 4 specs - %d nodes [%s]{4} SUCCESS! \d+(\.\d+)?[muµ]?s`, nodes, regexp.QuoteMeta(denoter)))
- Ω(output).Should(ContainSubstring("Test Suite Passed"))
- })
- })
- })
-
- Context("when running in parallel with -debug", func() {
- BeforeEach(func() {
- pathToTest = tmpPath("ginkgo")
- copyIn(fixturePath("debug_parallel_fixture"), pathToTest, false)
- })
-
- Context("without -v", func() {
- It("should emit node output to files on disk", func() {
- session := startGinkgo(pathToTest, "--nodes=2", "--debug")
- Eventually(session).Should(gexec.Exit(0))
-
- f0, err := ioutil.ReadFile(pathToTest + "/ginkgo-node-1.log")
- Ω(err).ShouldNot(HaveOccurred())
- f1, err := ioutil.ReadFile(pathToTest + "/ginkgo-node-2.log")
- Ω(err).ShouldNot(HaveOccurred())
- content := string(append(f0, f1...))
-
- for i := 0; i < 10; i += 1 {
- Ω(content).Should(ContainSubstring("StdOut %d\n", i))
- Ω(content).Should(ContainSubstring("GinkgoWriter %d\n", i))
- }
- })
- })
-
- Context("without -v", func() {
- It("should emit node output to files on disk, without duplicating the GinkgoWriter output", func() {
- session := startGinkgo(pathToTest, "--nodes=2", "--debug", "-v")
- Eventually(session).Should(gexec.Exit(0))
-
- f0, err := ioutil.ReadFile(pathToTest + "/ginkgo-node-1.log")
- Ω(err).ShouldNot(HaveOccurred())
- f1, err := ioutil.ReadFile(pathToTest + "/ginkgo-node-2.log")
- Ω(err).ShouldNot(HaveOccurred())
- content := string(append(f0, f1...))
-
- out := strings.Split(content, "GinkgoWriter 2")
- Ω(out).Should(HaveLen(2))
- })
- })
- })
-
- Context("when streaming in parallel", func() {
- BeforeEach(func() {
- pathToTest = tmpPath("ginkgo")
- copyIn(fixturePath("passing_ginkgo_tests"), pathToTest, false)
- })
-
- It("should print output in realtime", func() {
- session := startGinkgo(pathToTest, "--noColor", "-stream", "-nodes=2")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring(`[1] Parallel test node 1/2.`))
- Ω(output).Should(ContainSubstring(`[2] Parallel test node 2/2.`))
- Ω(output).Should(ContainSubstring(`[1] SUCCESS!`))
- Ω(output).Should(ContainSubstring(`[2] SUCCESS!`))
- Ω(output).Should(ContainSubstring("Test Suite Passed"))
- })
- })
-
- Context("when running recursively", func() {
- BeforeEach(func() {
- passingTest := tmpPath("A")
- otherPassingTest := tmpPath("E")
- copyIn(fixturePath("passing_ginkgo_tests"), passingTest, false)
- copyIn(fixturePath("more_ginkgo_tests"), otherPassingTest, false)
- })
-
- Context("when all the tests pass", func() {
- Context("with the -r flag", func() {
- It("should run all the tests (in succinct mode) and succeed", func() {
- session := startGinkgo(tmpDir, "--noColor", "-r", ".")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- outputLines := strings.Split(output, "\n")
- Ω(outputLines[0]).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 4/4 specs [%s]{4} SUCCESS! \d+(\.\d+)?[muµ]s PASS`, regexp.QuoteMeta(denoter)))
- Ω(outputLines[1]).Should(MatchRegexp(`\[\d+\] More_ginkgo_tests Suite - 2/2 specs [%s]{2} SUCCESS! \d+(\.\d+)?[muµ]s PASS`, regexp.QuoteMeta(denoter)))
- Ω(output).Should(ContainSubstring("Test Suite Passed"))
- })
- })
- Context("with a trailing /...", func() {
- It("should run all the tests (in succinct mode) and succeed", func() {
- session := startGinkgo(tmpDir, "--noColor", "./...")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- outputLines := strings.Split(output, "\n")
- Ω(outputLines[0]).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 4/4 specs [%s]{4} SUCCESS! \d+(\.\d+)?[muµ]s PASS`, regexp.QuoteMeta(denoter)))
- Ω(outputLines[1]).Should(MatchRegexp(`\[\d+\] More_ginkgo_tests Suite - 2/2 specs [%s]{2} SUCCESS! \d+(\.\d+)?[muµ]s PASS`, regexp.QuoteMeta(denoter)))
- Ω(output).Should(ContainSubstring("Test Suite Passed"))
- })
- })
- })
-
- Context("when one of the packages has a failing tests", func() {
- BeforeEach(func() {
- failingTest := tmpPath("C")
- copyIn(fixturePath("failing_ginkgo_tests"), failingTest, false)
- })
-
- It("should fail and stop running tests", func() {
- session := startGinkgo(tmpDir, "--noColor", "-r")
- Eventually(session).Should(gexec.Exit(1))
- output := string(session.Out.Contents())
-
- outputLines := strings.Split(output, "\n")
- Ω(outputLines[0]).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 4/4 specs [%s]{4} SUCCESS! \d+(\.\d+)?[muµ]s PASS`, regexp.QuoteMeta(denoter)))
- Ω(outputLines[1]).Should(MatchRegexp(`\[\d+\] Failing_ginkgo_tests Suite - 2/2 specs`))
- Ω(output).Should(ContainSubstring(fmt.Sprintf("%s Failure", denoter)))
- Ω(output).ShouldNot(ContainSubstring("More_ginkgo_tests Suite"))
- Ω(output).Should(ContainSubstring("Test Suite Failed"))
-
- Ω(output).Should(ContainSubstring("Summarizing 1 Failure:"))
- Ω(output).Should(ContainSubstring("[Fail] FailingGinkgoTests [It] should fail"))
- })
- })
-
- Context("when one of the packages fails to compile", func() {
- BeforeEach(func() {
- doesNotCompileTest := tmpPath("C")
- copyIn(fixturePath("does_not_compile"), doesNotCompileTest, false)
- })
-
- It("should fail and stop running tests", func() {
- session := startGinkgo(tmpDir, "--noColor", "-r")
- Eventually(session).Should(gexec.Exit(1))
- output := string(session.Out.Contents())
-
- outputLines := strings.Split(output, "\n")
- Ω(outputLines[0]).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 4/4 specs [%s]{4} SUCCESS! \d+(\.\d+)?[muµ]s PASS`, regexp.QuoteMeta(denoter)))
- Ω(outputLines[1]).Should(ContainSubstring("Failed to compile C:"))
- Ω(output).ShouldNot(ContainSubstring("More_ginkgo_tests Suite"))
- Ω(output).Should(ContainSubstring("Test Suite Failed"))
- })
- })
-
- Context("when either is the case, but the keepGoing flag is set", func() {
- BeforeEach(func() {
- doesNotCompileTest := tmpPath("B")
- copyIn(fixturePath("does_not_compile"), doesNotCompileTest, false)
-
- failingTest := tmpPath("C")
- copyIn(fixturePath("failing_ginkgo_tests"), failingTest, false)
- })
-
- It("should soldier on", func() {
- session := startGinkgo(tmpDir, "--noColor", "-r", "-keepGoing")
- Eventually(session).Should(gexec.Exit(1))
- output := string(session.Out.Contents())
-
- outputLines := strings.Split(output, "\n")
- Ω(outputLines[0]).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 4/4 specs [%s]{4} SUCCESS! \d+(\.\d+)?[muµ]s PASS`, regexp.QuoteMeta(denoter)))
- Ω(outputLines[1]).Should(ContainSubstring("Failed to compile B:"))
- Ω(output).Should(MatchRegexp(`\[\d+\] Failing_ginkgo_tests Suite - 2/2 specs`))
- Ω(output).Should(ContainSubstring(fmt.Sprintf("%s Failure", denoter)))
- Ω(output).Should(MatchRegexp(`\[\d+\] More_ginkgo_tests Suite - 2/2 specs [%s]{2} SUCCESS! \d+(\.\d+)?[muµ]s PASS`, regexp.QuoteMeta(denoter)))
- Ω(output).Should(ContainSubstring("Test Suite Failed"))
- })
- })
- })
-
- Context("when told to keep going --untilItFails", func() {
- BeforeEach(func() {
- copyIn(fixturePath("eventually_failing"), tmpDir, false)
- })
-
- It("should keep rerunning the tests, until a failure occurs", func() {
- session := startGinkgo(tmpDir, "--untilItFails", "--noColor")
- Eventually(session).Should(gexec.Exit(1))
- Ω(session).Should(gbytes.Say("This was attempt #1"))
- Ω(session).Should(gbytes.Say("This was attempt #2"))
- Ω(session).Should(gbytes.Say("Tests failed on attempt #3"))
-
- //it should change the random seed between each test
- lines := strings.Split(string(session.Out.Contents()), "\n")
- randomSeeds := []string{}
- for _, line := range lines {
- if strings.Contains(line, "Random Seed:") {
- randomSeeds = append(randomSeeds, strings.Split(line, ": ")[1])
- }
- }
- Ω(randomSeeds[0]).ShouldNot(Equal(randomSeeds[1]))
- Ω(randomSeeds[1]).ShouldNot(Equal(randomSeeds[2]))
- Ω(randomSeeds[0]).ShouldNot(Equal(randomSeeds[2]))
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/skip_test.go b/vendor/github.com/onsi/ginkgo/integration/skip_test.go
deleted file mode 100644
index f0fc9d5ee..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/skip_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package integration_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- "github.com/onsi/gomega/gexec"
-)
-
-var _ = Describe("Skipping Specs", func() {
- var pathToTest string
-
- BeforeEach(func() {
- pathToTest = tmpPath("skipping")
- copyIn(fixturePath("skip_fixture"), pathToTest, false)
- })
-
- It("should skip in all the possible ways", func() {
- session := startGinkgo(pathToTest, "--noColor")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- Ω(output).ShouldNot(ContainSubstring("NEVER SEE THIS"))
-
- Ω(output).Should(ContainSubstring("a top level skip on line 9"))
- Ω(output).Should(ContainSubstring("skip_fixture_test.go:9"))
- Ω(output).Should(ContainSubstring("an async top level skip on line 14"))
- Ω(output).Should(ContainSubstring("skip_fixture_test.go:14"))
- Ω(output).Should(ContainSubstring("a top level goroutine skip on line 21"))
- Ω(output).Should(ContainSubstring("skip_fixture_test.go:21"))
-
- Ω(output).Should(ContainSubstring("a sync SKIP"))
- Ω(output).Should(ContainSubstring("an async SKIP"))
- Ω(output).Should(ContainSubstring("a goroutine SKIP"))
- Ω(output).Should(ContainSubstring("a measure SKIP"))
-
- Ω(output).Should(ContainSubstring("S [SKIPPING] in Spec Setup (BeforeEach) ["))
- Ω(output).Should(ContainSubstring("a BeforeEach SKIP"))
- Ω(output).Should(ContainSubstring("S [SKIPPING] in Spec Teardown (AfterEach) ["))
- Ω(output).Should(ContainSubstring("an AfterEach SKIP"))
-
- Ω(output).Should(ContainSubstring("0 Passed | 0 Failed | 0 Pending | 9 Skipped"))
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/subcommand_test.go b/vendor/github.com/onsi/ginkgo/integration/subcommand_test.go
deleted file mode 100644
index fec197f56..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/subcommand_test.go
+++ /dev/null
@@ -1,419 +0,0 @@
-package integration_test
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
-
- . "github.com/onsi/ginkgo"
- "github.com/onsi/ginkgo/types"
- . "github.com/onsi/gomega"
- "github.com/onsi/gomega/gexec"
-)
-
-var _ = Describe("Subcommand", func() {
- Describe("ginkgo bootstrap", func() {
- var pkgPath string
- BeforeEach(func() {
- pkgPath = tmpPath("foo")
- os.Mkdir(pkgPath, 0777)
- })
-
- It("should generate a bootstrap file, as long as one does not exist", func() {
- session := startGinkgo(pkgPath, "bootstrap")
- Eventually(session).Should(gexec.Exit(0))
- output := session.Out.Contents()
-
- Ω(output).Should(ContainSubstring("foo_suite_test.go"))
-
- content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_suite_test.go"))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(content).Should(ContainSubstring("package foo_test"))
- Ω(content).Should(ContainSubstring("func TestFoo(t *testing.T) {"))
- Ω(content).Should(ContainSubstring("RegisterFailHandler"))
- Ω(content).Should(ContainSubstring("RunSpecs"))
-
- Ω(content).Should(ContainSubstring("\t" + `. "github.com/onsi/ginkgo"`))
- Ω(content).Should(ContainSubstring("\t" + `. "github.com/onsi/gomega"`))
-
- session = startGinkgo(pkgPath, "bootstrap")
- Eventually(session).Should(gexec.Exit(1))
- output = session.Out.Contents()
- Ω(output).Should(ContainSubstring("foo_suite_test.go already exists"))
- })
-
- It("should import nodot declarations when told to", func() {
- session := startGinkgo(pkgPath, "bootstrap", "--nodot")
- Eventually(session).Should(gexec.Exit(0))
- output := session.Out.Contents()
-
- Ω(output).Should(ContainSubstring("foo_suite_test.go"))
-
- content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_suite_test.go"))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(content).Should(ContainSubstring("package foo_test"))
- Ω(content).Should(ContainSubstring("func TestFoo(t *testing.T) {"))
- Ω(content).Should(ContainSubstring("RegisterFailHandler"))
- Ω(content).Should(ContainSubstring("RunSpecs"))
-
- Ω(content).Should(ContainSubstring("var It = ginkgo.It"))
- Ω(content).Should(ContainSubstring("var Ω = gomega.Ω"))
-
- Ω(content).Should(ContainSubstring("\t" + `"github.com/onsi/ginkgo"`))
- Ω(content).Should(ContainSubstring("\t" + `"github.com/onsi/gomega"`))
- })
-
- It("should generate an agouti bootstrap file when told to", func() {
- session := startGinkgo(pkgPath, "bootstrap", "--agouti")
- Eventually(session).Should(gexec.Exit(0))
- output := session.Out.Contents()
-
- Ω(output).Should(ContainSubstring("foo_suite_test.go"))
-
- content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_suite_test.go"))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(content).Should(ContainSubstring("package foo_test"))
- Ω(content).Should(ContainSubstring("func TestFoo(t *testing.T) {"))
- Ω(content).Should(ContainSubstring("RegisterFailHandler"))
- Ω(content).Should(ContainSubstring("RunSpecs"))
-
- Ω(content).Should(ContainSubstring("\t" + `. "github.com/onsi/ginkgo"`))
- Ω(content).Should(ContainSubstring("\t" + `. "github.com/onsi/gomega"`))
- Ω(content).Should(ContainSubstring("\t" + `"github.com/sclevine/agouti"`))
- })
-
- It("should generate a bootstrap file using a template when told to", func() {
- templateFile := filepath.Join(pkgPath, ".bootstrap")
- ioutil.WriteFile(templateFile, []byte(`package {{.Package}}
-
- import (
- {{.GinkgoImport}}
- {{.GomegaImport}}
-
- "testing"
- "binary"
- )
-
- func Test{{.FormattedName}}(t *testing.T) {
- // This is a {{.Package}} test
- }`), 0666)
- session := startGinkgo(pkgPath, "bootstrap", "--template", templateFile)
- Eventually(session).Should(gexec.Exit(0))
- output := session.Out.Contents()
-
- Ω(output).Should(ContainSubstring("foo_suite_test.go"))
-
- content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_suite_test.go"))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(content).Should(ContainSubstring("package foo_test"))
- Ω(content).Should(ContainSubstring(`. "github.com/onsi/ginkgo"`))
- Ω(content).Should(ContainSubstring(`. "github.com/onsi/gomega"`))
- Ω(content).Should(ContainSubstring(`"binary"`))
- Ω(content).Should(ContainSubstring("// This is a foo_test test"))
- })
- })
-
- Describe("nodot", func() {
- It("should update the declarations in the bootstrap file", func() {
- pkgPath := tmpPath("foo")
- os.Mkdir(pkgPath, 0777)
-
- session := startGinkgo(pkgPath, "bootstrap", "--nodot")
- Eventually(session).Should(gexec.Exit(0))
-
- byteContent, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_suite_test.go"))
- Ω(err).ShouldNot(HaveOccurred())
-
- content := string(byteContent)
- content = strings.Replace(content, "var It =", "var MyIt =", -1)
- content = strings.Replace(content, "var Ω = gomega.Ω\n", "", -1)
-
- err = ioutil.WriteFile(filepath.Join(pkgPath, "foo_suite_test.go"), []byte(content), os.ModePerm)
- Ω(err).ShouldNot(HaveOccurred())
-
- session = startGinkgo(pkgPath, "nodot")
- Eventually(session).Should(gexec.Exit(0))
-
- byteContent, err = ioutil.ReadFile(filepath.Join(pkgPath, "foo_suite_test.go"))
- Ω(err).ShouldNot(HaveOccurred())
-
- Ω(byteContent).Should(ContainSubstring("var MyIt = ginkgo.It"))
- Ω(byteContent).ShouldNot(ContainSubstring("var It = ginkgo.It"))
- Ω(byteContent).Should(ContainSubstring("var Ω = gomega.Ω"))
- })
- })
-
- Describe("ginkgo generate", func() {
- var pkgPath string
-
- BeforeEach(func() {
- pkgPath = tmpPath("foo_bar")
- os.Mkdir(pkgPath, 0777)
- })
-
- Context("with no arguments", func() {
- It("should generate a test file named after the package", func() {
- session := startGinkgo(pkgPath, "generate")
- Eventually(session).Should(gexec.Exit(0))
- output := session.Out.Contents()
-
- Ω(output).Should(ContainSubstring("foo_bar_test.go"))
-
- content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_bar_test.go"))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(content).Should(ContainSubstring("package foo_bar_test"))
- Ω(content).Should(ContainSubstring(`var _ = Describe("FooBar", func() {`))
- Ω(content).Should(ContainSubstring("\t" + `. "github.com/onsi/ginkgo"`))
- Ω(content).Should(ContainSubstring("\t" + `. "github.com/onsi/gomega"`))
-
- session = startGinkgo(pkgPath, "generate")
- Eventually(session).Should(gexec.Exit(1))
- output = session.Out.Contents()
-
- Ω(output).Should(ContainSubstring("foo_bar_test.go already exists"))
- })
- })
-
- Context("with an argument of the form: foo", func() {
- It("should generate a test file named after the argument", func() {
- session := startGinkgo(pkgPath, "generate", "baz_buzz")
- Eventually(session).Should(gexec.Exit(0))
- output := session.Out.Contents()
-
- Ω(output).Should(ContainSubstring("baz_buzz_test.go"))
-
- content, err := ioutil.ReadFile(filepath.Join(pkgPath, "baz_buzz_test.go"))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(content).Should(ContainSubstring("package foo_bar_test"))
- Ω(content).Should(ContainSubstring(`var _ = Describe("BazBuzz", func() {`))
- })
- })
-
- Context("with an argument of the form: foo.go", func() {
- It("should generate a test file named after the argument", func() {
- session := startGinkgo(pkgPath, "generate", "baz_buzz.go")
- Eventually(session).Should(gexec.Exit(0))
- output := session.Out.Contents()
-
- Ω(output).Should(ContainSubstring("baz_buzz_test.go"))
-
- content, err := ioutil.ReadFile(filepath.Join(pkgPath, "baz_buzz_test.go"))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(content).Should(ContainSubstring("package foo_bar_test"))
- Ω(content).Should(ContainSubstring(`var _ = Describe("BazBuzz", func() {`))
-
- })
- })
-
- Context("with an argument of the form: foo_test", func() {
- It("should generate a test file named after the argument", func() {
- session := startGinkgo(pkgPath, "generate", "baz_buzz_test")
- Eventually(session).Should(gexec.Exit(0))
- output := session.Out.Contents()
-
- Ω(output).Should(ContainSubstring("baz_buzz_test.go"))
-
- content, err := ioutil.ReadFile(filepath.Join(pkgPath, "baz_buzz_test.go"))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(content).Should(ContainSubstring("package foo_bar_test"))
- Ω(content).Should(ContainSubstring(`var _ = Describe("BazBuzz", func() {`))
- })
- })
-
- Context("with an argument of the form: foo_test.go", func() {
- It("should generate a test file named after the argument", func() {
- session := startGinkgo(pkgPath, "generate", "baz_buzz_test.go")
- Eventually(session).Should(gexec.Exit(0))
- output := session.Out.Contents()
-
- Ω(output).Should(ContainSubstring("baz_buzz_test.go"))
-
- content, err := ioutil.ReadFile(filepath.Join(pkgPath, "baz_buzz_test.go"))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(content).Should(ContainSubstring("package foo_bar_test"))
- Ω(content).Should(ContainSubstring(`var _ = Describe("BazBuzz", func() {`))
- })
- })
-
- Context("with multiple arguments", func() {
- It("should generate a test file named after the argument", func() {
- session := startGinkgo(pkgPath, "generate", "baz", "buzz")
- Eventually(session).Should(gexec.Exit(0))
- output := session.Out.Contents()
-
- Ω(output).Should(ContainSubstring("baz_test.go"))
- Ω(output).Should(ContainSubstring("buzz_test.go"))
-
- content, err := ioutil.ReadFile(filepath.Join(pkgPath, "baz_test.go"))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(content).Should(ContainSubstring("package foo_bar_test"))
- Ω(content).Should(ContainSubstring(`var _ = Describe("Baz", func() {`))
-
- content, err = ioutil.ReadFile(filepath.Join(pkgPath, "buzz_test.go"))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(content).Should(ContainSubstring("package foo_bar_test"))
- Ω(content).Should(ContainSubstring(`var _ = Describe("Buzz", func() {`))
- })
- })
-
- Context("with nodot", func() {
- It("should not import ginkgo or gomega", func() {
- session := startGinkgo(pkgPath, "generate", "--nodot")
- Eventually(session).Should(gexec.Exit(0))
- output := session.Out.Contents()
-
- Ω(output).Should(ContainSubstring("foo_bar_test.go"))
-
- content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_bar_test.go"))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(content).Should(ContainSubstring("package foo_bar_test"))
- Ω(content).ShouldNot(ContainSubstring("\t" + `. "github.com/onsi/ginkgo"`))
- Ω(content).ShouldNot(ContainSubstring("\t" + `. "github.com/onsi/gomega"`))
- })
- })
-
- Context("with agouti", func() {
- It("should generate an agouti test file", func() {
- session := startGinkgo(pkgPath, "generate", "--agouti")
- Eventually(session).Should(gexec.Exit(0))
- output := session.Out.Contents()
-
- Ω(output).Should(ContainSubstring("foo_bar_test.go"))
-
- content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_bar_test.go"))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(content).Should(ContainSubstring("package foo_bar_test"))
- Ω(content).Should(ContainSubstring("\t" + `. "github.com/onsi/ginkgo"`))
- Ω(content).Should(ContainSubstring("\t" + `. "github.com/onsi/gomega"`))
- Ω(content).Should(ContainSubstring("\t" + `. "github.com/sclevine/agouti/matchers"`))
- Ω(content).Should(ContainSubstring("\t" + `"github.com/sclevine/agouti"`))
- Ω(content).Should(ContainSubstring("page, err = agoutiDriver.NewPage()"))
- })
- })
- })
-
- Describe("ginkgo bootstrap/generate", func() {
- var pkgPath string
- BeforeEach(func() {
- pkgPath = tmpPath("some crazy-thing")
- os.Mkdir(pkgPath, 0777)
- })
-
- Context("when the working directory is empty", func() {
- It("generates correctly named bootstrap and generate files with a package name derived from the directory", func() {
- session := startGinkgo(pkgPath, "bootstrap")
- Eventually(session).Should(gexec.Exit(0))
-
- content, err := ioutil.ReadFile(filepath.Join(pkgPath, "some_crazy_thing_suite_test.go"))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(content).Should(ContainSubstring("package some_crazy_thing_test"))
- Ω(content).Should(ContainSubstring("SomeCrazyThing Suite"))
-
- session = startGinkgo(pkgPath, "generate")
- Eventually(session).Should(gexec.Exit(0))
-
- content, err = ioutil.ReadFile(filepath.Join(pkgPath, "some_crazy_thing_test.go"))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(content).Should(ContainSubstring("package some_crazy_thing_test"))
- Ω(content).Should(ContainSubstring("SomeCrazyThing"))
- })
- })
-
- Context("when the working directory contains a file with a package name", func() {
- BeforeEach(func() {
- Ω(ioutil.WriteFile(filepath.Join(pkgPath, "foo.go"), []byte("package main\n\nfunc main() {}"), 0777)).Should(Succeed())
- })
-
- It("generates correctly named bootstrap and generate files with the package name", func() {
- session := startGinkgo(pkgPath, "bootstrap")
- Eventually(session).Should(gexec.Exit(0))
-
- content, err := ioutil.ReadFile(filepath.Join(pkgPath, "some_crazy_thing_suite_test.go"))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(content).Should(ContainSubstring("package main_test"))
- Ω(content).Should(ContainSubstring("SomeCrazyThing Suite"))
-
- session = startGinkgo(pkgPath, "generate")
- Eventually(session).Should(gexec.Exit(0))
-
- content, err = ioutil.ReadFile(filepath.Join(pkgPath, "some_crazy_thing_test.go"))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(content).Should(ContainSubstring("package main_test"))
- Ω(content).Should(ContainSubstring("SomeCrazyThing"))
- })
- })
- })
-
- Describe("ginkgo blur", func() {
- It("should unfocus tests", func() {
- pathToTest := tmpPath("focused")
- fixture := fixturePath("focused_fixture")
- copyIn(fixture, pathToTest, false)
-
- session := startGinkgo(pathToTest, "--noColor")
- Eventually(session).Should(gexec.Exit(types.GINKGO_FOCUS_EXIT_CODE))
- output := session.Out.Contents()
-
- Ω(string(output)).Should(ContainSubstring("8 Passed"))
- Ω(string(output)).Should(ContainSubstring("5 Skipped"))
-
- session = startGinkgo(pathToTest, "blur")
- Eventually(session).Should(gexec.Exit(0))
- output = session.Out.Contents()
- Ω(string(output)).ShouldNot(ContainSubstring("expected 'package'"))
-
- session = startGinkgo(pathToTest, "--noColor")
- Eventually(session).Should(gexec.Exit(0))
- output = session.Out.Contents()
- Ω(string(output)).Should(ContainSubstring("13 Passed"))
- Ω(string(output)).Should(ContainSubstring("0 Skipped"))
-
- Expect(sameFile(filepath.Join(pathToTest, "README.md"), filepath.Join(fixture, "README.md"))).To(BeTrue())
- })
-
- It("should ignore the 'vendor' folder", func() {
- pathToTest := tmpPath("focused_fixture_with_vendor")
- copyIn(fixturePath("focused_fixture_with_vendor"), pathToTest, true)
-
- session := startGinkgo(pathToTest, "blur")
- Eventually(session).Should(gexec.Exit(0))
-
- session = startGinkgo(pathToTest, "--noColor")
- Eventually(session).Should(gexec.Exit(0))
- output := session.Out.Contents()
- Expect(string(output)).To(ContainSubstring("13 Passed"))
- Expect(string(output)).To(ContainSubstring("0 Skipped"))
-
- vendorPath := fixturePath("focused_fixture_with_vendor/vendor")
- otherVendorPath := filepath.Join(pathToTest, "vendor")
-
- Expect(sameFolder(vendorPath, otherVendorPath)).To(BeTrue())
- })
- })
-
- Describe("ginkgo version", func() {
- It("should print out the version info", func() {
- session := startGinkgo("", "version")
- Eventually(session).Should(gexec.Exit(0))
- output := session.Out.Contents()
-
- Ω(output).Should(MatchRegexp(`Ginkgo Version \d+\.\d+\.\d+`))
- })
- })
-
- Describe("ginkgo help", func() {
- It("should print out usage information", func() {
- session := startGinkgo("", "help")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Err.Contents())
-
- Ω(output).Should(MatchRegexp(`Ginkgo Version \d+\.\d+\.\d+`))
- Ω(output).Should(ContainSubstring("ginkgo watch"))
- Ω(output).Should(ContainSubstring("-succinct"))
- Ω(output).Should(ContainSubstring("-nodes"))
- Ω(output).Should(ContainSubstring("ginkgo generate"))
- Ω(output).Should(ContainSubstring("ginkgo help <COMMAND>"))
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/suite_command_test.go b/vendor/github.com/onsi/ginkgo/integration/suite_command_test.go
deleted file mode 100644
index 4aec1bc41..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/suite_command_test.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package integration_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- "github.com/onsi/gomega/gexec"
-)
-
-var _ = Describe("Suite Command Specs", func() {
- var pathToTest string
-
- BeforeEach(func() {
- pathToTest = tmpPath("suite_command")
- copyIn(fixturePath("suite_command_tests"), pathToTest, false)
- })
-
- It("Runs command after suite echoing out suite data, properly reporting suite name and passing status in successful command output", func() {
- command := "-afterSuiteHook=echo THIS IS A (ginkgo-suite-passed) TEST OF THE (ginkgo-suite-name) SYSTEM, THIS IS ONLY A TEST"
- expected := "THIS IS A [PASS] TEST OF THE suite_command SYSTEM, THIS IS ONLY A TEST"
- session := startGinkgo(pathToTest, command)
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("1 Passed"))
- Ω(output).Should(ContainSubstring("0 Failed"))
- Ω(output).Should(ContainSubstring("1 Pending"))
- Ω(output).Should(ContainSubstring("0 Skipped"))
- Ω(output).Should(ContainSubstring("Test Suite Passed"))
- Ω(output).Should(ContainSubstring("Post-suite command succeeded:"))
- Ω(output).Should(ContainSubstring(expected))
- })
-
- It("Runs command after suite reporting that command failed", func() {
- command := "-afterSuiteHook=exit 1"
- session := startGinkgo(pathToTest, command)
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("1 Passed"))
- Ω(output).Should(ContainSubstring("0 Failed"))
- Ω(output).Should(ContainSubstring("1 Pending"))
- Ω(output).Should(ContainSubstring("0 Skipped"))
- Ω(output).Should(ContainSubstring("Test Suite Passed"))
- Ω(output).Should(ContainSubstring("Post-suite command failed:"))
- })
-
- It("Runs command after suite echoing out suite data, properly reporting suite name and failing status in successful command output", func() {
- command := "-afterSuiteHook=echo THIS IS A (ginkgo-suite-passed) TEST OF THE (ginkgo-suite-name) SYSTEM, THIS IS ONLY A TEST"
- expected := "THIS IS A [FAIL] TEST OF THE suite_command SYSTEM, THIS IS ONLY A TEST"
- session := startGinkgo(pathToTest, "-failOnPending=true", command)
- Eventually(session).Should(gexec.Exit(1))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("1 Passed"))
- Ω(output).Should(ContainSubstring("0 Failed"))
- Ω(output).Should(ContainSubstring("1 Pending"))
- Ω(output).Should(ContainSubstring("0 Skipped"))
- Ω(output).Should(ContainSubstring("Test Suite Failed"))
- Ω(output).Should(ContainSubstring("Post-suite command succeeded:"))
- Ω(output).Should(ContainSubstring(expected))
- })
-
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/suite_setup_test.go b/vendor/github.com/onsi/ginkgo/integration/suite_setup_test.go
deleted file mode 100644
index 33ff5b983..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/suite_setup_test.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package integration_test
-
-import (
- "strings"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- "github.com/onsi/gomega/gexec"
-)
-
-var _ = Describe("SuiteSetup", func() {
- var pathToTest string
-
- Context("when the BeforeSuite and AfterSuite pass", func() {
- BeforeEach(func() {
- pathToTest = tmpPath("suite_setup")
- copyIn(fixturePath("passing_suite_setup"), pathToTest, false)
- })
-
- It("should run the BeforeSuite once, then run all the tests", func() {
- session := startGinkgo(pathToTest, "--noColor")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- Ω(strings.Count(output, "BEFORE SUITE")).Should(Equal(1))
- Ω(strings.Count(output, "AFTER SUITE")).Should(Equal(1))
- })
-
- It("should run the BeforeSuite once per parallel node, then run all the tests", func() {
- session := startGinkgo(pathToTest, "--noColor", "--nodes=2")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- Ω(strings.Count(output, "BEFORE SUITE")).Should(Equal(2))
- Ω(strings.Count(output, "AFTER SUITE")).Should(Equal(2))
- })
- })
-
- Context("when the BeforeSuite fails", func() {
- BeforeEach(func() {
- pathToTest = tmpPath("suite_setup")
- copyIn(fixturePath("failing_before_suite"), pathToTest, false)
- })
-
- It("should run the BeforeSuite once, none of the tests, but it should run the AfterSuite", func() {
- session := startGinkgo(pathToTest, "--noColor")
- Eventually(session).Should(gexec.Exit(1))
- output := string(session.Out.Contents())
-
- Ω(strings.Count(output, "BEFORE SUITE")).Should(Equal(1))
- Ω(strings.Count(output, "Test Panicked")).Should(Equal(1))
- Ω(strings.Count(output, "AFTER SUITE")).Should(Equal(1))
- Ω(output).ShouldNot(ContainSubstring("NEVER SEE THIS"))
- })
-
- It("should run the BeforeSuite once per parallel node, none of the tests, but it should run the AfterSuite for each node", func() {
- session := startGinkgo(pathToTest, "--noColor", "--nodes=2")
- Eventually(session).Should(gexec.Exit(1))
- output := string(session.Out.Contents())
-
- Ω(strings.Count(output, "BEFORE SUITE")).Should(Equal(2))
- Ω(strings.Count(output, "Test Panicked")).Should(Equal(2))
- Ω(strings.Count(output, "AFTER SUITE")).Should(Equal(2))
- Ω(output).ShouldNot(ContainSubstring("NEVER SEE THIS"))
- })
- })
-
- Context("when the AfterSuite fails", func() {
- BeforeEach(func() {
- pathToTest = tmpPath("suite_setup")
- copyIn(fixturePath("failing_after_suite"), pathToTest, false)
- })
-
- It("should run the BeforeSuite once, none of the tests, but it should run the AfterSuite", func() {
- session := startGinkgo(pathToTest, "--noColor")
- Eventually(session).Should(gexec.Exit(1))
- output := string(session.Out.Contents())
-
- Ω(strings.Count(output, "BEFORE SUITE")).Should(Equal(1))
- Ω(strings.Count(output, "AFTER SUITE")).Should(Equal(1))
- Ω(strings.Count(output, "Test Panicked")).Should(Equal(1))
- Ω(strings.Count(output, "A TEST")).Should(Equal(2))
- })
-
- It("should run the BeforeSuite once per parallel node, none of the tests, but it should run the AfterSuite for each node", func() {
- session := startGinkgo(pathToTest, "--noColor", "--nodes=2")
- Eventually(session).Should(gexec.Exit(1))
- output := string(session.Out.Contents())
-
- Ω(strings.Count(output, "BEFORE SUITE")).Should(Equal(2))
- Ω(strings.Count(output, "AFTER SUITE")).Should(Equal(2))
- Ω(strings.Count(output, "Test Panicked")).Should(Equal(2))
- Ω(strings.Count(output, "A TEST")).Should(Equal(2))
- })
- })
-
- Context("With passing synchronized before and after suites", func() {
- BeforeEach(func() {
- pathToTest = tmpPath("suite_setup")
- copyIn(fixturePath("synchronized_setup_tests"), pathToTest, false)
- })
-
- Context("when run with one node", func() {
- It("should do all the work on that one node", func() {
- session := startGinkgo(pathToTest, "--noColor")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("BEFORE_A_1\nBEFORE_B_1: DATA"))
- Ω(output).Should(ContainSubstring("AFTER_A_1\nAFTER_B_1"))
- })
- })
-
- Context("when run across multiple nodes", func() {
- It("should run the first BeforeSuite function (BEFORE_A) on node 1, the second (BEFORE_B) on all the nodes, the first AfterSuite (AFTER_A) on all the nodes, and then the second (AFTER_B) on Node 1 *after* everything else is finished", func() {
- session := startGinkgo(pathToTest, "--noColor", "--nodes=3")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("BEFORE_A_1"))
- Ω(output).Should(ContainSubstring("BEFORE_B_1: DATA"))
- Ω(output).Should(ContainSubstring("BEFORE_B_2: DATA"))
- Ω(output).Should(ContainSubstring("BEFORE_B_3: DATA"))
-
- Ω(output).ShouldNot(ContainSubstring("BEFORE_A_2"))
- Ω(output).ShouldNot(ContainSubstring("BEFORE_A_3"))
-
- Ω(output).Should(ContainSubstring("AFTER_A_1"))
- Ω(output).Should(ContainSubstring("AFTER_A_2"))
- Ω(output).Should(ContainSubstring("AFTER_A_3"))
- Ω(output).Should(ContainSubstring("AFTER_B_1"))
-
- Ω(output).ShouldNot(ContainSubstring("AFTER_B_2"))
- Ω(output).ShouldNot(ContainSubstring("AFTER_B_3"))
- })
- })
-
- Context("when streaming across multiple nodes", func() {
- It("should run the first BeforeSuite function (BEFORE_A) on node 1, the second (BEFORE_B) on all the nodes, the first AfterSuite (AFTER_A) on all the nodes, and then the second (AFTER_B) on Node 1 *after* everything else is finished", func() {
- session := startGinkgo(pathToTest, "--noColor", "--nodes=3", "--stream")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("[1] BEFORE_A_1"))
- Ω(output).Should(ContainSubstring("[1] BEFORE_B_1: DATA"))
- Ω(output).Should(ContainSubstring("[2] BEFORE_B_2: DATA"))
- Ω(output).Should(ContainSubstring("[3] BEFORE_B_3: DATA"))
-
- Ω(output).ShouldNot(ContainSubstring("BEFORE_A_2"))
- Ω(output).ShouldNot(ContainSubstring("BEFORE_A_3"))
-
- Ω(output).Should(ContainSubstring("[1] AFTER_A_1"))
- Ω(output).Should(ContainSubstring("[2] AFTER_A_2"))
- Ω(output).Should(ContainSubstring("[3] AFTER_A_3"))
- Ω(output).Should(ContainSubstring("[1] AFTER_B_1"))
-
- Ω(output).ShouldNot(ContainSubstring("AFTER_B_2"))
- Ω(output).ShouldNot(ContainSubstring("AFTER_B_3"))
- })
- })
- })
-
- Context("With a failing synchronized before suite", func() {
- BeforeEach(func() {
- pathToTest = tmpPath("suite_setup")
- copyIn(fixturePath("exiting_synchronized_setup_tests"), pathToTest, false)
- })
-
- It("should fail and let the user know that node 1 disappeared prematurely", func() {
- session := startGinkgo(pathToTest, "--noColor", "--nodes=3")
- Eventually(session).Should(gexec.Exit(1))
- output := string(session.Out.Contents())
-
- Ω(output).Should(ContainSubstring("Node 1 disappeared before completing BeforeSuite"))
- Ω(output).Should(ContainSubstring("Ginkgo timed out waiting for all parallel nodes to report back!"))
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/tags_test.go b/vendor/github.com/onsi/ginkgo/integration/tags_test.go
deleted file mode 100644
index fc2ff5e5c..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/tags_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package integration_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- "github.com/onsi/gomega/gexec"
-)
-
-var _ = Describe("Tags", func() {
- var pathToTest string
- BeforeEach(func() {
- pathToTest = tmpPath("tags")
- copyIn(fixturePath("tags_tests"), pathToTest, false)
- })
-
- It("should honor the passed in -tags flag", func() {
- session := startGinkgo(pathToTest, "--noColor")
- Eventually(session).Should(gexec.Exit(0))
- output := string(session.Out.Contents())
- Ω(output).Should(ContainSubstring("Ran 1 of 1 Specs"))
-
- session = startGinkgo(pathToTest, "--noColor", "-tags=complex_tests")
- Eventually(session).Should(gexec.Exit(0))
- output = string(session.Out.Contents())
- Ω(output).Should(ContainSubstring("Ran 3 of 3 Specs"))
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/test_description_test.go b/vendor/github.com/onsi/ginkgo/integration/test_description_test.go
deleted file mode 100644
index 6739871fb..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/test_description_test.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package integration_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- "github.com/onsi/gomega/gbytes"
- "github.com/onsi/gomega/gexec"
-)
-
-var _ = Describe("TestDescription", func() {
- var pathToTest string
-
- BeforeEach(func() {
- pathToTest = tmpPath("test_description")
- copyIn(fixturePath("test_description"), pathToTest, false)
- })
-
- It("should capture and emit information about the current test", func() {
- session := startGinkgo(pathToTest, "--noColor")
- Eventually(session).Should(gexec.Exit(1))
-
- Ω(session).Should(gbytes.Say("TestDescription should pass:false"))
- Ω(session).Should(gbytes.Say("TestDescription should fail:true"))
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/verbose_and_succinct_test.go b/vendor/github.com/onsi/ginkgo/integration/verbose_and_succinct_test.go
deleted file mode 100644
index 8238762d1..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/verbose_and_succinct_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package integration_test
-
-import (
- "regexp"
- "runtime"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- "github.com/onsi/gomega/gexec"
-)
-
-var _ = Describe("Verbose And Succinct Mode", func() {
- var pathToTest string
- var otherPathToTest string
-
- isWindows := (runtime.GOOS == "windows")
- denoter := "•"
-
- if isWindows {
- denoter = "+"
- }
-
- Context("when running one package", func() {
- BeforeEach(func() {
- pathToTest = tmpPath("ginkgo")
- copyIn(fixturePath("passing_ginkgo_tests"), pathToTest, false)
- })
-
- It("should default to non-succinct mode", func() {
- session := startGinkgo(pathToTest, "--noColor")
- Eventually(session).Should(gexec.Exit(0))
- output := session.Out.Contents()
-
- Ω(output).Should(ContainSubstring("Running Suite: Passing_ginkgo_tests Suite"))
- })
- })
-
- Context("when running more than one package", func() {
- BeforeEach(func() {
- pathToTest = tmpPath("ginkgo")
- copyIn(fixturePath("passing_ginkgo_tests"), pathToTest, false)
- otherPathToTest = tmpPath("more_ginkgo")
- copyIn(fixturePath("more_ginkgo_tests"), otherPathToTest, false)
- })
-
- Context("with no flags set", func() {
- It("should default to succinct mode", func() {
- session := startGinkgo(pathToTest, "--noColor", pathToTest, otherPathToTest)
- Eventually(session).Should(gexec.Exit(0))
- output := session.Out.Contents()
-
- Ω(output).Should(MatchRegexp(`\] Passing_ginkgo_tests Suite - 4/4 specs [%s]{4} SUCCESS!`, regexp.QuoteMeta(denoter)))
- Ω(output).Should(MatchRegexp(`\] More_ginkgo_tests Suite - 2/2 specs [%s]{2} SUCCESS!`, regexp.QuoteMeta(denoter)))
- })
- })
-
- Context("with --succinct=false", func() {
- It("should not be in succinct mode", func() {
- session := startGinkgo(pathToTest, "--noColor", "--succinct=false", pathToTest, otherPathToTest)
- Eventually(session).Should(gexec.Exit(0))
- output := session.Out.Contents()
-
- Ω(output).Should(ContainSubstring("Running Suite: Passing_ginkgo_tests Suite"))
- Ω(output).Should(ContainSubstring("Running Suite: More_ginkgo_tests Suite"))
- })
- })
-
- Context("with -v", func() {
- It("should not be in succinct mode, but should be verbose", func() {
- session := startGinkgo(pathToTest, "--noColor", "-v", pathToTest, otherPathToTest)
- Eventually(session).Should(gexec.Exit(0))
- output := session.Out.Contents()
-
- Ω(output).Should(ContainSubstring("Running Suite: Passing_ginkgo_tests Suite"))
- Ω(output).Should(ContainSubstring("Running Suite: More_ginkgo_tests Suite"))
- Ω(output).Should(ContainSubstring("should proxy strings"))
- Ω(output).Should(ContainSubstring("should always pass"))
- })
-
- It("should emit output from Bys", func() {
- session := startGinkgo(pathToTest, "--noColor", "-v", pathToTest)
- Eventually(session).Should(gexec.Exit(0))
- output := session.Out.Contents()
-
- Ω(output).Should(ContainSubstring("emitting one By"))
- Ω(output).Should(ContainSubstring("emitting another By"))
- })
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/integration/watch_test.go b/vendor/github.com/onsi/ginkgo/integration/watch_test.go
deleted file mode 100644
index 1d65702a7..000000000
--- a/vendor/github.com/onsi/ginkgo/integration/watch_test.go
+++ /dev/null
@@ -1,275 +0,0 @@
-package integration_test
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
- "time"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- "github.com/onsi/gomega/gbytes"
- "github.com/onsi/gomega/gexec"
-)
-
-var _ = Describe("Watch", func() {
- var rootPath string
- var pathA string
- var pathB string
- var pathC string
- var session *gexec.Session
-
- BeforeEach(func() {
- rootPath = tmpPath("root")
- pathA = filepath.Join(rootPath, "src", "github.com", "onsi", "A")
- pathB = filepath.Join(rootPath, "src", "github.com", "onsi", "B")
- pathC = filepath.Join(rootPath, "src", "github.com", "onsi", "C")
-
- err := os.MkdirAll(pathA, 0700)
- Ω(err).ShouldNot(HaveOccurred())
-
- err = os.MkdirAll(pathB, 0700)
- Ω(err).ShouldNot(HaveOccurred())
-
- err = os.MkdirAll(pathC, 0700)
- Ω(err).ShouldNot(HaveOccurred())
-
- copyIn(fixturePath(filepath.Join("watch_fixtures", "A")), pathA, false)
- copyIn(fixturePath(filepath.Join("watch_fixtures", "B")), pathB, false)
- copyIn(fixturePath(filepath.Join("watch_fixtures", "C")), pathC, false)
- })
-
- startGinkgoWithGopath := func(args ...string) *gexec.Session {
- cmd := ginkgoCommand(rootPath, args...)
- os.Setenv("GOPATH", rootPath+":"+os.Getenv("GOPATH"))
- session, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter)
- Ω(err).ShouldNot(HaveOccurred())
- return session
- }
-
- modifyFile := func(path string) {
- time.Sleep(time.Second)
- content, err := ioutil.ReadFile(path)
- Ω(err).ShouldNot(HaveOccurred())
- content = append(content, []byte("//")...)
- err = ioutil.WriteFile(path, content, 0666)
- Ω(err).ShouldNot(HaveOccurred())
- }
-
- modifyCode := func(pkgToModify string) {
- modifyFile(filepath.Join(rootPath, "src", "github.com", "onsi", pkgToModify, pkgToModify+".go"))
- }
-
- modifyJSON := func(pkgToModify string) {
- modifyFile(filepath.Join(rootPath, "src", "github.com", "onsi", pkgToModify, pkgToModify+".json"))
- }
-
- modifyTest := func(pkgToModify string) {
- modifyFile(filepath.Join(rootPath, "src", "github.com", "onsi", pkgToModify, pkgToModify+"_test.go"))
- }
-
- AfterEach(func() {
- if session != nil {
- session.Kill().Wait()
- }
- })
-
- It("should be set up correctly", func() {
- session = startGinkgoWithGopath("-r")
- Eventually(session).Should(gexec.Exit(0))
- Ω(session.Out.Contents()).Should(ContainSubstring("A Suite"))
- Ω(session.Out.Contents()).Should(ContainSubstring("B Suite"))
- Ω(session.Out.Contents()).Should(ContainSubstring("C Suite"))
- Ω(session.Out.Contents()).Should(ContainSubstring("Ginkgo ran 3 suites"))
- })
-
- Context("when watching just one test suite", func() {
- It("should immediately run, and should rerun when the test suite changes", func() {
- session = startGinkgoWithGopath("watch", "-succinct", pathA)
- Eventually(session).Should(gbytes.Say("A Suite"))
- modifyCode("A")
- Eventually(session).Should(gbytes.Say("Detected changes in"))
- Eventually(session).Should(gbytes.Say("A Suite"))
- session.Kill().Wait()
- })
- })
-
- Context("when watching several test suites", func() {
- It("should not immediately run, but should rerun a test when its code changes", func() {
- session = startGinkgoWithGopath("watch", "-succinct", "-r")
- Eventually(session).Should(gbytes.Say("Identified 3 test suites"))
- Consistently(session).ShouldNot(gbytes.Say("A Suite|B Suite|C Suite"))
- modifyCode("A")
- Eventually(session).Should(gbytes.Say("Detected changes in"))
- Eventually(session).Should(gbytes.Say("A Suite"))
- Consistently(session).ShouldNot(gbytes.Say("B Suite|C Suite"))
- session.Kill().Wait()
- })
- })
-
- Describe("watching dependencies", func() {
- Context("with a depth of 2", func() {
- It("should watch down to that depth", func() {
- session = startGinkgoWithGopath("watch", "-succinct", "-r", "-depth=2")
- Eventually(session).Should(gbytes.Say("Identified 3 test suites"))
- Eventually(session).Should(gbytes.Say(`A \[2 dependencies\]`))
- Eventually(session).Should(gbytes.Say(`B \[1 dependency\]`))
- Eventually(session).Should(gbytes.Say(`C \[0 dependencies\]`))
-
- modifyCode("A")
- Eventually(session).Should(gbytes.Say("Detected changes in"))
- Eventually(session).Should(gbytes.Say("A Suite"))
- Consistently(session).ShouldNot(gbytes.Say("B Suite|C Suite"))
-
- modifyCode("B")
- Eventually(session).Should(gbytes.Say("Detected changes in"))
- Eventually(session).Should(gbytes.Say("B Suite"))
- Eventually(session).Should(gbytes.Say("A Suite"))
- Consistently(session).ShouldNot(gbytes.Say("C Suite"))
-
- modifyCode("C")
- Eventually(session).Should(gbytes.Say("Detected changes in"))
- Eventually(session).Should(gbytes.Say("C Suite"))
- Eventually(session).Should(gbytes.Say("B Suite"))
- Eventually(session).Should(gbytes.Say("A Suite"))
- })
- })
-
- Context("with a depth of 1", func() {
- It("should watch down to that depth", func() {
- session = startGinkgoWithGopath("watch", "-succinct", "-r", "-depth=1")
- Eventually(session).Should(gbytes.Say("Identified 3 test suites"))
- Eventually(session).Should(gbytes.Say(`A \[1 dependency\]`))
- Eventually(session).Should(gbytes.Say(`B \[1 dependency\]`))
- Eventually(session).Should(gbytes.Say(`C \[0 dependencies\]`))
-
- modifyCode("A")
- Eventually(session).Should(gbytes.Say("Detected changes in"))
- Eventually(session).Should(gbytes.Say("A Suite"))
- Consistently(session).ShouldNot(gbytes.Say("B Suite|C Suite"))
-
- modifyCode("B")
- Eventually(session).Should(gbytes.Say("Detected changes in"))
- Eventually(session).Should(gbytes.Say("B Suite"))
- Eventually(session).Should(gbytes.Say("A Suite"))
- Consistently(session).ShouldNot(gbytes.Say("C Suite"))
-
- modifyCode("C")
- Eventually(session).Should(gbytes.Say("Detected changes in"))
- Eventually(session).Should(gbytes.Say("C Suite"))
- Eventually(session).Should(gbytes.Say("B Suite"))
- Consistently(session).ShouldNot(gbytes.Say("A Suite"))
- })
- })
-
- Context("with a depth of 0", func() {
- It("should not watch any dependencies", func() {
- session = startGinkgoWithGopath("watch", "-succinct", "-r", "-depth=0")
- Eventually(session).Should(gbytes.Say("Identified 3 test suites"))
- Eventually(session).Should(gbytes.Say(`A \[0 dependencies\]`))
- Eventually(session).Should(gbytes.Say(`B \[0 dependencies\]`))
- Eventually(session).Should(gbytes.Say(`C \[0 dependencies\]`))
-
- modifyCode("A")
- Eventually(session).Should(gbytes.Say("Detected changes in"))
- Eventually(session).Should(gbytes.Say("A Suite"))
- Consistently(session).ShouldNot(gbytes.Say("B Suite|C Suite"))
-
- modifyCode("B")
- Eventually(session).Should(gbytes.Say("Detected changes in"))
- Eventually(session).Should(gbytes.Say("B Suite"))
- Consistently(session).ShouldNot(gbytes.Say("A Suite|C Suite"))
-
- modifyCode("C")
- Eventually(session).Should(gbytes.Say("Detected changes in"))
- Eventually(session).Should(gbytes.Say("C Suite"))
- Consistently(session).ShouldNot(gbytes.Say("A Suite|B Suite"))
- })
- })
-
- It("should not trigger dependents when tests are changed", func() {
- session = startGinkgoWithGopath("watch", "-succinct", "-r", "-depth=2")
- Eventually(session).Should(gbytes.Say("Identified 3 test suites"))
- Eventually(session).Should(gbytes.Say(`A \[2 dependencies\]`))
- Eventually(session).Should(gbytes.Say(`B \[1 dependency\]`))
- Eventually(session).Should(gbytes.Say(`C \[0 dependencies\]`))
-
- modifyTest("A")
- Eventually(session).Should(gbytes.Say("Detected changes in"))
- Eventually(session).Should(gbytes.Say("A Suite"))
- Consistently(session).ShouldNot(gbytes.Say("B Suite|C Suite"))
-
- modifyTest("B")
- Eventually(session).Should(gbytes.Say("Detected changes in"))
- Eventually(session).Should(gbytes.Say("B Suite"))
- Consistently(session).ShouldNot(gbytes.Say("A Suite|C Suite"))
-
- modifyTest("C")
- Eventually(session).Should(gbytes.Say("Detected changes in"))
- Eventually(session).Should(gbytes.Say("C Suite"))
- Consistently(session).ShouldNot(gbytes.Say("A Suite|B Suite"))
- })
- })
-
- Describe("adjusting the watch regular expression", func() {
- Describe("the default regular expression", func() {
- It("should only trigger when go files are changed", func() {
- session = startGinkgoWithGopath("watch", "-succinct", "-r", "-depth=2")
- Eventually(session).Should(gbytes.Say("Identified 3 test suites"))
- Eventually(session).Should(gbytes.Say(`A \[2 dependencies\]`))
- Eventually(session).Should(gbytes.Say(`B \[1 dependency\]`))
- Eventually(session).Should(gbytes.Say(`C \[0 dependencies\]`))
-
- modifyJSON("C")
- Consistently(session).ShouldNot(gbytes.Say("Detected changes in"))
- Consistently(session).ShouldNot(gbytes.Say("A Suite|B Suite|C Suite"))
- })
- })
-
- Describe("modifying the regular expression", func() {
- It("should trigger if the regexp matches", func() {
- session = startGinkgoWithGopath("watch", "-succinct", "-r", "-depth=2", `-watchRegExp=\.json$`)
- Eventually(session).Should(gbytes.Say("Identified 3 test suites"))
- Eventually(session).Should(gbytes.Say(`A \[2 dependencies\]`))
- Eventually(session).Should(gbytes.Say(`B \[1 dependency\]`))
- Eventually(session).Should(gbytes.Say(`C \[0 dependencies\]`))
-
- modifyJSON("C")
- Eventually(session).Should(gbytes.Say("Detected changes in"))
- Eventually(session).Should(gbytes.Say("C Suite"))
- Eventually(session).Should(gbytes.Say("B Suite"))
- Eventually(session).Should(gbytes.Say("A Suite"))
- })
- })
- })
-
- Describe("when new test suite is added", func() {
- It("should start monitoring that test suite", func() {
- session = startGinkgoWithGopath("watch", "-succinct", "-r")
-
- Eventually(session).Should(gbytes.Say("Watching 3 suites"))
-
- pathD := filepath.Join(rootPath, "src", "github.com", "onsi", "D")
-
- err := os.MkdirAll(pathD, 0700)
- Ω(err).ShouldNot(HaveOccurred())
-
- copyIn(fixturePath(filepath.Join("watch_fixtures", "D")), pathD, false)
-
- Eventually(session).Should(gbytes.Say("Detected 1 new suite"))
- Eventually(session).Should(gbytes.Say(`D \[1 dependency\]`))
- Eventually(session).Should(gbytes.Say("D Suite"))
-
- modifyCode("D")
-
- Eventually(session).Should(gbytes.Say("Detected changes in"))
- Eventually(session).Should(gbytes.Say("D Suite"))
-
- modifyCode("C")
-
- Eventually(session).Should(gbytes.Say("Detected changes in"))
- Eventually(session).Should(gbytes.Say("C Suite"))
- Eventually(session).Should(gbytes.Say("D Suite"))
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_suite_test.go
deleted file mode 100644
index f06abf3c5..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package codelocation_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestCodelocation(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "CodeLocation Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_test.go b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_test.go
deleted file mode 100644
index cca75a449..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_test.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package codelocation_test
-
-import (
- "runtime"
-
- . "github.com/onsi/ginkgo"
- "github.com/onsi/ginkgo/internal/codelocation"
- "github.com/onsi/ginkgo/types"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("CodeLocation", func() {
- var (
- codeLocation types.CodeLocation
- expectedFileName string
- expectedLineNumber int
- )
-
- caller0 := func() {
- codeLocation = codelocation.New(1)
- }
-
- caller1 := func() {
- _, expectedFileName, expectedLineNumber, _ = runtime.Caller(0)
- expectedLineNumber += 2
- caller0()
- }
-
- BeforeEach(func() {
- caller1()
- })
-
- It("should use the passed in skip parameter to pick out the correct file & line number", func() {
- Ω(codeLocation.FileName).Should(Equal(expectedFileName))
- Ω(codeLocation.LineNumber).Should(Equal(expectedLineNumber))
- })
-
- Describe("stringer behavior", func() {
- It("should stringify nicely", func() {
- Ω(codeLocation.String()).Should(ContainSubstring("code_location_test.go:%d", expectedLineNumber))
- })
- })
-
- //There's no better way than to test this private method as it
- //goes out of its way to prune out ginkgo related code in the stack trace
- Describe("PruneStack", func() {
- It("should remove any references to ginkgo and pkg/testing and pkg/runtime", func() {
- input := `/Skip/me
-Skip: skip()
-/Skip/me
-Skip: skip()
-/Users/whoever/gospace/src/github.com/onsi/ginkgo/whatever.go:10 (0x12314)
-Something: Func()
-/Users/whoever/gospace/src/github.com/onsi/ginkgo/whatever_else.go:10 (0x12314)
-SomethingInternalToGinkgo: Func()
-/usr/goroot/pkg/strings/oops.go:10 (0x12341)
-Oops: BlowUp()
-/Users/whoever/gospace/src/mycode/code.go:10 (0x12341)
-MyCode: Func()
-/Users/whoever/gospace/src/mycode/code_test.go:10 (0x12341)
-MyCodeTest: Func()
-/Users/whoever/gospace/src/mycode/code_suite_test.go:12 (0x37f08)
-TestFoo: RunSpecs(t, "Foo Suite")
-/usr/goroot/pkg/testing/testing.go:12 (0x37f08)
-TestingT: Blah()
-/usr/goroot/pkg/runtime/runtime.go:12 (0x37f08)
-Something: Func()
-`
- prunedStack := codelocation.PruneStack(input, 1)
- Ω(prunedStack).Should(Equal(`/usr/goroot/pkg/strings/oops.go:10 (0x12341)
-Oops: BlowUp()
-/Users/whoever/gospace/src/mycode/code.go:10 (0x12341)
-MyCode: Func()
-/Users/whoever/gospace/src/mycode/code_test.go:10 (0x12341)
-MyCodeTest: Func()
-/Users/whoever/gospace/src/mycode/code_suite_test.go:12 (0x37f08)
-TestFoo: RunSpecs(t, "Foo Suite")`))
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_suite_test.go
deleted file mode 100644
index c6fc314ff..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package containernode_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestContainernode(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Containernode Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_test.go b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_test.go
deleted file mode 100644
index 11ac9b70b..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_test.go
+++ /dev/null
@@ -1,213 +0,0 @@
-package containernode_test
-
-import (
- "math/rand"
-
- "github.com/onsi/ginkgo/internal/leafnodes"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "github.com/onsi/ginkgo/internal/codelocation"
- . "github.com/onsi/ginkgo/internal/containernode"
- "github.com/onsi/ginkgo/types"
-)
-
-var _ = Describe("Container Node", func() {
- var (
- codeLocation types.CodeLocation
- container *ContainerNode
- )
-
- BeforeEach(func() {
- codeLocation = codelocation.New(0)
- container = New("description text", types.FlagTypeFocused, codeLocation)
- })
-
- Describe("creating a container node", func() {
- It("can answer questions about itself", func() {
- Ω(container.Text()).Should(Equal("description text"))
- Ω(container.Flag()).Should(Equal(types.FlagTypeFocused))
- Ω(container.CodeLocation()).Should(Equal(codeLocation))
- })
- })
-
- Describe("pushing setup nodes", func() {
- It("can append setup nodes of various types and fetch them by type", func() {
- befA := leafnodes.NewBeforeEachNode(func() {}, codelocation.New(0), 0, nil, 0)
- befB := leafnodes.NewBeforeEachNode(func() {}, codelocation.New(0), 0, nil, 0)
- aftA := leafnodes.NewAfterEachNode(func() {}, codelocation.New(0), 0, nil, 0)
- aftB := leafnodes.NewAfterEachNode(func() {}, codelocation.New(0), 0, nil, 0)
- jusBefA := leafnodes.NewJustBeforeEachNode(func() {}, codelocation.New(0), 0, nil, 0)
- jusBefB := leafnodes.NewJustBeforeEachNode(func() {}, codelocation.New(0), 0, nil, 0)
-
- container.PushSetupNode(befA)
- container.PushSetupNode(befB)
- container.PushSetupNode(aftA)
- container.PushSetupNode(aftB)
- container.PushSetupNode(jusBefA)
- container.PushSetupNode(jusBefB)
-
- subject := leafnodes.NewItNode("subject", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0)
- container.PushSubjectNode(subject)
-
- Ω(container.SetupNodesOfType(types.SpecComponentTypeBeforeEach)).Should(Equal([]leafnodes.BasicNode{befA, befB}))
- Ω(container.SetupNodesOfType(types.SpecComponentTypeAfterEach)).Should(Equal([]leafnodes.BasicNode{aftA, aftB}))
- Ω(container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach)).Should(Equal([]leafnodes.BasicNode{jusBefA, jusBefB}))
- Ω(container.SetupNodesOfType(types.SpecComponentTypeIt)).Should(BeEmpty()) //subjects are not setup nodes
- })
- })
-
- Context("With appended containers and subject nodes", func() {
- var (
- itA, itB, innerItA, innerItB leafnodes.SubjectNode
- innerContainer *ContainerNode
- )
-
- BeforeEach(func() {
- itA = leafnodes.NewItNode("Banana", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0)
- itB = leafnodes.NewItNode("Apple", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0)
-
- innerItA = leafnodes.NewItNode("inner A", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0)
- innerItB = leafnodes.NewItNode("inner B", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0)
-
- innerContainer = New("Orange", types.FlagTypeNone, codelocation.New(0))
-
- container.PushSubjectNode(itA)
- container.PushContainerNode(innerContainer)
- innerContainer.PushSubjectNode(innerItA)
- innerContainer.PushSubjectNode(innerItB)
- container.PushSubjectNode(itB)
- })
-
- Describe("Collating", func() {
- It("should return a collated set of containers and subject nodes in the correct order", func() {
- collated := container.Collate()
- Ω(collated).Should(HaveLen(4))
-
- Ω(collated[0]).Should(Equal(CollatedNodes{
- Containers: []*ContainerNode{container},
- Subject: itA,
- }))
-
- Ω(collated[1]).Should(Equal(CollatedNodes{
- Containers: []*ContainerNode{container, innerContainer},
- Subject: innerItA,
- }))
-
- Ω(collated[2]).Should(Equal(CollatedNodes{
- Containers: []*ContainerNode{container, innerContainer},
- Subject: innerItB,
- }))
-
- Ω(collated[3]).Should(Equal(CollatedNodes{
- Containers: []*ContainerNode{container},
- Subject: itB,
- }))
- })
- })
-
- Describe("Backpropagating Programmatic Focus", func() {
- //This allows inner focused specs to override the focus of outer focussed
- //specs and more closely maps to what a developer wants to happen
- //when debugging a test suite
-
- Context("when a parent is focused *and* an inner subject is focused", func() {
- BeforeEach(func() {
- container = New("description text", types.FlagTypeFocused, codeLocation)
- itA = leafnodes.NewItNode("A", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0)
- container.PushSubjectNode(itA)
-
- innerContainer = New("Orange", types.FlagTypeNone, codelocation.New(0))
- container.PushContainerNode(innerContainer)
- innerItA = leafnodes.NewItNode("inner A", func() {}, types.FlagTypeFocused, codelocation.New(0), 0, nil, 0)
- innerContainer.PushSubjectNode(innerItA)
- })
-
- It("should unfocus the parent", func() {
- container.BackPropagateProgrammaticFocus()
-
- Ω(container.Flag()).Should(Equal(types.FlagTypeNone))
- Ω(itA.Flag()).Should(Equal(types.FlagTypeNone))
- Ω(innerContainer.Flag()).Should(Equal(types.FlagTypeNone))
- Ω(innerItA.Flag()).Should(Equal(types.FlagTypeFocused))
- })
- })
-
- Context("when a parent is focused *and* an inner container is focused", func() {
- BeforeEach(func() {
- container = New("description text", types.FlagTypeFocused, codeLocation)
- itA = leafnodes.NewItNode("A", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0)
- container.PushSubjectNode(itA)
-
- innerContainer = New("Orange", types.FlagTypeFocused, codelocation.New(0))
- container.PushContainerNode(innerContainer)
- innerItA = leafnodes.NewItNode("inner A", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0)
- innerContainer.PushSubjectNode(innerItA)
- })
-
- It("should unfocus the parent", func() {
- container.BackPropagateProgrammaticFocus()
-
- Ω(container.Flag()).Should(Equal(types.FlagTypeNone))
- Ω(itA.Flag()).Should(Equal(types.FlagTypeNone))
- Ω(innerContainer.Flag()).Should(Equal(types.FlagTypeFocused))
- Ω(innerItA.Flag()).Should(Equal(types.FlagTypeNone))
- })
- })
-
- Context("when a parent is pending and a child is focused", func() {
- BeforeEach(func() {
- container = New("description text", types.FlagTypeFocused, codeLocation)
- itA = leafnodes.NewItNode("A", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0)
- container.PushSubjectNode(itA)
-
- innerContainer = New("Orange", types.FlagTypePending, codelocation.New(0))
- container.PushContainerNode(innerContainer)
- innerItA = leafnodes.NewItNode("inner A", func() {}, types.FlagTypeFocused, codelocation.New(0), 0, nil, 0)
- innerContainer.PushSubjectNode(innerItA)
- })
-
- It("should not do anything", func() {
- container.BackPropagateProgrammaticFocus()
-
- Ω(container.Flag()).Should(Equal(types.FlagTypeFocused))
- Ω(itA.Flag()).Should(Equal(types.FlagTypeNone))
- Ω(innerContainer.Flag()).Should(Equal(types.FlagTypePending))
- Ω(innerItA.Flag()).Should(Equal(types.FlagTypeFocused))
- })
- })
- })
-
- Describe("Shuffling", func() {
- var unshuffledCollation []CollatedNodes
- BeforeEach(func() {
- unshuffledCollation = container.Collate()
-
- r := rand.New(rand.NewSource(17))
- container.Shuffle(r)
- })
-
- It("should sort, and then shuffle, the top level contents of the container", func() {
- shuffledCollation := container.Collate()
- Ω(shuffledCollation).Should(HaveLen(len(unshuffledCollation)))
- Ω(shuffledCollation).ShouldNot(Equal(unshuffledCollation))
-
- for _, entry := range unshuffledCollation {
- Ω(shuffledCollation).Should(ContainElement(entry))
- }
-
- innerAIndex, innerBIndex := 0, 0
- for i, entry := range shuffledCollation {
- if entry.Subject == innerItA {
- innerAIndex = i
- } else if entry.Subject == innerItB {
- innerBIndex = i
- }
- }
-
- Ω(innerAIndex).Should(Equal(innerBIndex - 1))
- })
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/failer/failer_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/failer/failer_suite_test.go
deleted file mode 100644
index 8dce7be9a..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/failer/failer_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package failer_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestFailer(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Failer Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/failer/failer_test.go b/vendor/github.com/onsi/ginkgo/internal/failer/failer_test.go
deleted file mode 100644
index 65210a40a..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/failer/failer_test.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package failer_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/internal/failer"
- . "github.com/onsi/gomega"
-
- "github.com/onsi/ginkgo/internal/codelocation"
- "github.com/onsi/ginkgo/types"
-)
-
-var _ = Describe("Failer", func() {
- var (
- failer *Failer
- codeLocationA types.CodeLocation
- codeLocationB types.CodeLocation
- )
-
- BeforeEach(func() {
- codeLocationA = codelocation.New(0)
- codeLocationB = codelocation.New(0)
- failer = New()
- })
-
- Context("with no failures", func() {
- It("should return success when drained", func() {
- failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
- Ω(failure).Should(BeZero())
- Ω(state).Should(Equal(types.SpecStatePassed))
- })
- })
-
- Describe("Skip", func() {
- It("should handle failures", func() {
- failer.Skip("something skipped", codeLocationA)
- failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
- Ω(failure).Should(Equal(types.SpecFailure{
- Message: "something skipped",
- Location: codeLocationA,
- ForwardedPanic: "",
- ComponentType: types.SpecComponentTypeIt,
- ComponentIndex: 3,
- ComponentCodeLocation: codeLocationB,
- }))
- Ω(state).Should(Equal(types.SpecStateSkipped))
- })
- })
-
- Describe("Fail", func() {
- It("should handle failures", func() {
- failer.Fail("something failed", codeLocationA)
- failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
- Ω(failure).Should(Equal(types.SpecFailure{
- Message: "something failed",
- Location: codeLocationA,
- ForwardedPanic: "",
- ComponentType: types.SpecComponentTypeIt,
- ComponentIndex: 3,
- ComponentCodeLocation: codeLocationB,
- }))
- Ω(state).Should(Equal(types.SpecStateFailed))
- })
- })
-
- Describe("Panic", func() {
- It("should handle panics", func() {
- failer.Panic(codeLocationA, "some forwarded panic")
- failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
- Ω(failure).Should(Equal(types.SpecFailure{
- Message: "Test Panicked",
- Location: codeLocationA,
- ForwardedPanic: "some forwarded panic",
- ComponentType: types.SpecComponentTypeIt,
- ComponentIndex: 3,
- ComponentCodeLocation: codeLocationB,
- }))
- Ω(state).Should(Equal(types.SpecStatePanicked))
- })
- })
-
- Describe("Timeout", func() {
- It("should handle timeouts", func() {
- failer.Timeout(codeLocationA)
- failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
- Ω(failure).Should(Equal(types.SpecFailure{
- Message: "Timed out",
- Location: codeLocationA,
- ForwardedPanic: "",
- ComponentType: types.SpecComponentTypeIt,
- ComponentIndex: 3,
- ComponentCodeLocation: codeLocationB,
- }))
- Ω(state).Should(Equal(types.SpecStateTimedOut))
- })
- })
-
- Context("when multiple failures are registered", func() {
- BeforeEach(func() {
- failer.Fail("something failed", codeLocationA)
- failer.Fail("something else failed", codeLocationA)
- })
-
- It("should only report the first one when drained", func() {
- failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
-
- Ω(failure).Should(Equal(types.SpecFailure{
- Message: "something failed",
- Location: codeLocationA,
- ForwardedPanic: "",
- ComponentType: types.SpecComponentTypeIt,
- ComponentIndex: 3,
- ComponentCodeLocation: codeLocationB,
- }))
- Ω(state).Should(Equal(types.SpecStateFailed))
- })
-
- It("should report subsequent failures after being drained", func() {
- failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
- failer.Fail("yet another thing failed", codeLocationA)
-
- failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
-
- Ω(failure).Should(Equal(types.SpecFailure{
- Message: "yet another thing failed",
- Location: codeLocationA,
- ForwardedPanic: "",
- ComponentType: types.SpecComponentTypeIt,
- ComponentIndex: 3,
- ComponentCodeLocation: codeLocationB,
- }))
- Ω(state).Should(Equal(types.SpecStateFailed))
- })
-
- It("should report sucess on subsequent drains if no errors occur", func() {
- failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
- failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
- Ω(failure).Should(BeZero())
- Ω(state).Should(Equal(types.SpecStatePassed))
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node_test.go
deleted file mode 100644
index 29fa0c6e2..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node_test.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package leafnodes_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/internal/leafnodes"
- . "github.com/onsi/gomega"
-
- "github.com/onsi/ginkgo/internal/codelocation"
- "github.com/onsi/ginkgo/types"
-)
-
-var _ = Describe("It Nodes", func() {
- It("should report the correct type, text, flag, and code location", func() {
- codeLocation := codelocation.New(0)
- it := NewItNode("my it node", func() {}, types.FlagTypeFocused, codeLocation, 0, nil, 3)
- Ω(it.Type()).Should(Equal(types.SpecComponentTypeIt))
- Ω(it.Flag()).Should(Equal(types.FlagTypeFocused))
- Ω(it.Text()).Should(Equal("my it node"))
- Ω(it.CodeLocation()).Should(Equal(codeLocation))
- Ω(it.Samples()).Should(Equal(1))
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/leaf_node_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/leaf_node_suite_test.go
deleted file mode 100644
index a7ba9e006..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/leaf_node_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package leafnodes_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestLeafNode(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "LeafNode Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node_test.go
deleted file mode 100644
index 1cd13336a..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node_test.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package leafnodes_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/internal/leafnodes"
- . "github.com/onsi/gomega"
-
- "time"
-
- "github.com/onsi/ginkgo/internal/codelocation"
- Failer "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-var _ = Describe("Measure Nodes", func() {
- It("should report the correct type, text, flag, and code location", func() {
- codeLocation := codelocation.New(0)
- measure := NewMeasureNode("my measure node", func(b Benchmarker) {}, types.FlagTypeFocused, codeLocation, 10, nil, 3)
- Ω(measure.Type()).Should(Equal(types.SpecComponentTypeMeasure))
- Ω(measure.Flag()).Should(Equal(types.FlagTypeFocused))
- Ω(measure.Text()).Should(Equal("my measure node"))
- Ω(measure.CodeLocation()).Should(Equal(codeLocation))
- Ω(measure.Samples()).Should(Equal(10))
- })
-
- Describe("benchmarking", func() {
- var measure *MeasureNode
-
- Describe("Value", func() {
- BeforeEach(func() {
- measure = NewMeasureNode("the measurement", func(b Benchmarker) {
- b.RecordValue("foo", 7, "info!")
- b.RecordValue("foo", 2)
- b.RecordValue("foo", 3)
- b.RecordValue("bar", 0.3)
- b.RecordValue("bar", 0.1)
- b.RecordValue("bar", 0.5)
- b.RecordValue("bar", 0.7)
- }, types.FlagTypeFocused, codelocation.New(0), 1, Failer.New(), 3)
- Ω(measure.Run()).Should(Equal(types.SpecStatePassed))
- })
-
- It("records passed in values and reports on them", func() {
- report := measure.MeasurementsReport()
- Ω(report).Should(HaveLen(2))
- Ω(report["foo"].Name).Should(Equal("foo"))
- Ω(report["foo"].Info).Should(Equal("info!"))
- Ω(report["foo"].Order).Should(Equal(0))
- Ω(report["foo"].SmallestLabel).Should(Equal("Smallest"))
- Ω(report["foo"].LargestLabel).Should(Equal(" Largest"))
- Ω(report["foo"].AverageLabel).Should(Equal(" Average"))
- Ω(report["foo"].Units).Should(Equal(""))
- Ω(report["foo"].Results).Should(Equal([]float64{7, 2, 3}))
- Ω(report["foo"].Smallest).Should(BeNumerically("==", 2))
- Ω(report["foo"].Largest).Should(BeNumerically("==", 7))
- Ω(report["foo"].Average).Should(BeNumerically("==", 4))
- Ω(report["foo"].StdDeviation).Should(BeNumerically("~", 2.16, 0.01))
-
- Ω(report["bar"].Name).Should(Equal("bar"))
- Ω(report["bar"].Info).Should(BeNil())
- Ω(report["bar"].SmallestLabel).Should(Equal("Smallest"))
- Ω(report["bar"].Order).Should(Equal(1))
- Ω(report["bar"].LargestLabel).Should(Equal(" Largest"))
- Ω(report["bar"].AverageLabel).Should(Equal(" Average"))
- Ω(report["bar"].Units).Should(Equal(""))
- Ω(report["bar"].Results).Should(Equal([]float64{0.3, 0.1, 0.5, 0.7}))
- Ω(report["bar"].Smallest).Should(BeNumerically("==", 0.1))
- Ω(report["bar"].Largest).Should(BeNumerically("==", 0.7))
- Ω(report["bar"].Average).Should(BeNumerically("==", 0.4))
- Ω(report["bar"].StdDeviation).Should(BeNumerically("~", 0.22, 0.01))
- })
- })
-
- Describe("Value with precision", func() {
- BeforeEach(func() {
- measure = NewMeasureNode("the measurement", func(b Benchmarker) {
- b.RecordValueWithPrecision("foo", 7, "ms", 7, "info!")
- b.RecordValueWithPrecision("foo", 2, "ms", 6)
- b.RecordValueWithPrecision("foo", 3, "ms", 5)
- b.RecordValueWithPrecision("bar", 0.3, "ns", 4)
- b.RecordValueWithPrecision("bar", 0.1, "ns", 3)
- b.RecordValueWithPrecision("bar", 0.5, "ns", 2)
- b.RecordValueWithPrecision("bar", 0.7, "ns", 1)
- }, types.FlagTypeFocused, codelocation.New(0), 1, Failer.New(), 3)
- Ω(measure.Run()).Should(Equal(types.SpecStatePassed))
- })
-
- It("records passed in values and reports on them", func() {
- report := measure.MeasurementsReport()
- Ω(report).Should(HaveLen(2))
- Ω(report["foo"].Name).Should(Equal("foo"))
- Ω(report["foo"].Info).Should(Equal("info!"))
- Ω(report["foo"].Order).Should(Equal(0))
- Ω(report["foo"].SmallestLabel).Should(Equal("Smallest"))
- Ω(report["foo"].LargestLabel).Should(Equal(" Largest"))
- Ω(report["foo"].AverageLabel).Should(Equal(" Average"))
- Ω(report["foo"].Units).Should(Equal("ms"))
- Ω(report["foo"].Results).Should(Equal([]float64{7, 2, 3}))
- Ω(report["foo"].Smallest).Should(BeNumerically("==", 2))
- Ω(report["foo"].Largest).Should(BeNumerically("==", 7))
- Ω(report["foo"].Average).Should(BeNumerically("==", 4))
- Ω(report["foo"].StdDeviation).Should(BeNumerically("~", 2.16, 0.01))
-
- Ω(report["bar"].Name).Should(Equal("bar"))
- Ω(report["bar"].Info).Should(BeNil())
- Ω(report["bar"].SmallestLabel).Should(Equal("Smallest"))
- Ω(report["bar"].Order).Should(Equal(1))
- Ω(report["bar"].LargestLabel).Should(Equal(" Largest"))
- Ω(report["bar"].AverageLabel).Should(Equal(" Average"))
- Ω(report["bar"].Units).Should(Equal("ns"))
- Ω(report["bar"].Results).Should(Equal([]float64{0.3, 0.1, 0.5, 0.7}))
- Ω(report["bar"].Smallest).Should(BeNumerically("==", 0.1))
- Ω(report["bar"].Largest).Should(BeNumerically("==", 0.7))
- Ω(report["bar"].Average).Should(BeNumerically("==", 0.4))
- Ω(report["bar"].StdDeviation).Should(BeNumerically("~", 0.22, 0.01))
- })
- })
-
- Describe("Time", func() {
- BeforeEach(func() {
- measure = NewMeasureNode("the measurement", func(b Benchmarker) {
- b.Time("foo", func() {
- time.Sleep(200 * time.Millisecond)
- }, "info!")
- b.Time("foo", func() {
- time.Sleep(300 * time.Millisecond)
- })
- b.Time("foo", func() {
- time.Sleep(250 * time.Millisecond)
- })
- }, types.FlagTypeFocused, codelocation.New(0), 1, Failer.New(), 3)
- Ω(measure.Run()).Should(Equal(types.SpecStatePassed))
- })
-
- It("records passed in values and reports on them", func() {
- report := measure.MeasurementsReport()
- Ω(report).Should(HaveLen(1))
- Ω(report["foo"].Name).Should(Equal("foo"))
- Ω(report["foo"].Info).Should(Equal("info!"))
- Ω(report["foo"].SmallestLabel).Should(Equal("Fastest Time"))
- Ω(report["foo"].LargestLabel).Should(Equal("Slowest Time"))
- Ω(report["foo"].AverageLabel).Should(Equal("Average Time"))
- Ω(report["foo"].Units).Should(Equal("s"))
- Ω(report["foo"].Results).Should(HaveLen(3))
- Ω(report["foo"].Results[0]).Should(BeNumerically("~", 0.2, 0.06))
- Ω(report["foo"].Results[1]).Should(BeNumerically("~", 0.3, 0.06))
- Ω(report["foo"].Results[2]).Should(BeNumerically("~", 0.25, 0.06))
- Ω(report["foo"].Smallest).Should(BeNumerically("~", 0.2, 0.06))
- Ω(report["foo"].Largest).Should(BeNumerically("~", 0.3, 0.06))
- Ω(report["foo"].Average).Should(BeNumerically("~", 0.25, 0.06))
- Ω(report["foo"].StdDeviation).Should(BeNumerically("~", 0.07, 0.04))
- })
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes_test.go
deleted file mode 100644
index 9810688cb..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes_test.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package leafnodes_test
-
-import (
- . "github.com/onsi/ginkgo"
- "github.com/onsi/ginkgo/types"
- . "github.com/onsi/gomega"
-
- . "github.com/onsi/ginkgo/internal/leafnodes"
-
- "github.com/onsi/ginkgo/internal/codelocation"
-)
-
-var _ = Describe("Setup Nodes", func() {
- Describe("BeforeEachNodes", func() {
- It("should report the correct type and code location", func() {
- codeLocation := codelocation.New(0)
- beforeEach := NewBeforeEachNode(func() {}, codeLocation, 0, nil, 3)
- Ω(beforeEach.Type()).Should(Equal(types.SpecComponentTypeBeforeEach))
- Ω(beforeEach.CodeLocation()).Should(Equal(codeLocation))
- })
- })
-
- Describe("AfterEachNodes", func() {
- It("should report the correct type and code location", func() {
- codeLocation := codelocation.New(0)
- afterEach := NewAfterEachNode(func() {}, codeLocation, 0, nil, 3)
- Ω(afterEach.Type()).Should(Equal(types.SpecComponentTypeAfterEach))
- Ω(afterEach.CodeLocation()).Should(Equal(codeLocation))
- })
- })
-
- Describe("JustBeforeEachNodes", func() {
- It("should report the correct type and code location", func() {
- codeLocation := codelocation.New(0)
- justBeforeEach := NewJustBeforeEachNode(func() {}, codeLocation, 0, nil, 3)
- Ω(justBeforeEach.Type()).Should(Equal(types.SpecComponentTypeJustBeforeEach))
- Ω(justBeforeEach.CodeLocation()).Should(Equal(codeLocation))
- })
- })
- Describe("JustAfterEachNodes", func() {
- It("should report the correct type and code location", func() {
- codeLocation := codelocation.New(0)
- justAfterEach := NewJustAfterEachNode(func() {}, codeLocation, 0, nil, 3)
- Ω(justAfterEach.Type()).Should(Equal(types.SpecComponentTypeJustAfterEach))
- Ω(justAfterEach.CodeLocation()).Should(Equal(codeLocation))
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go
deleted file mode 100644
index 0897836cb..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go
+++ /dev/null
@@ -1,353 +0,0 @@
-package leafnodes_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/internal/leafnodes"
- . "github.com/onsi/gomega"
-
- "reflect"
- "time"
-
- "github.com/onsi/ginkgo/internal/codelocation"
- Failer "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type runnable interface {
- Run() (outcome types.SpecState, failure types.SpecFailure)
- CodeLocation() types.CodeLocation
-}
-
-func SynchronousSharedRunnerBehaviors(build func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable, componentType types.SpecComponentType, componentIndex int) {
- var (
- outcome types.SpecState
- failure types.SpecFailure
-
- failer *Failer.Failer
-
- componentCodeLocation types.CodeLocation
- innerCodeLocation types.CodeLocation
-
- didRun bool
- )
-
- BeforeEach(func() {
- failer = Failer.New()
- componentCodeLocation = codelocation.New(0)
- innerCodeLocation = codelocation.New(0)
-
- didRun = false
- })
-
- Describe("synchronous functions", func() {
- Context("when the function passes", func() {
- BeforeEach(func() {
- outcome, failure = build(func() {
- didRun = true
- }, 0, failer, componentCodeLocation).Run()
- })
-
- It("should have a succesful outcome", func() {
- Ω(didRun).Should(BeTrue())
-
- Ω(outcome).Should(Equal(types.SpecStatePassed))
- Ω(failure).Should(BeZero())
- })
- })
-
- Context("when a failure occurs", func() {
- BeforeEach(func() {
- outcome, failure = build(func() {
- didRun = true
- failer.Fail("bam", innerCodeLocation)
- panic("should not matter")
- }, 0, failer, componentCodeLocation).Run()
- })
-
- It("should return the failure", func() {
- Ω(didRun).Should(BeTrue())
-
- Ω(outcome).Should(Equal(types.SpecStateFailed))
- Ω(failure).Should(Equal(types.SpecFailure{
- Message: "bam",
- Location: innerCodeLocation,
- ForwardedPanic: "",
- ComponentIndex: componentIndex,
- ComponentType: componentType,
- ComponentCodeLocation: componentCodeLocation,
- }))
- })
- })
-
- Context("when a panic occurs", func() {
- BeforeEach(func() {
- outcome, failure = build(func() {
- didRun = true
- innerCodeLocation = codelocation.New(0)
- panic("ack!")
- }, 0, failer, componentCodeLocation).Run()
- })
-
- It("should return the panic", func() {
- Ω(didRun).Should(BeTrue())
-
- Ω(outcome).Should(Equal(types.SpecStatePanicked))
- Ω(failure.ForwardedPanic).Should(Equal("ack!"))
- })
- })
-
- Context("when a panic occurs with a nil value", func() {
- BeforeEach(func() {
- outcome, failure = build(func() {
- didRun = true
- innerCodeLocation = codelocation.New(0)
- panic(nil)
- }, 0, failer, componentCodeLocation).Run()
- })
-
- It("should return the nil-valued panic", func() {
- Ω(didRun).Should(BeTrue())
-
- Ω(outcome).Should(Equal(types.SpecStatePanicked))
- Ω(failure.ForwardedPanic).Should(Equal("<nil>"))
- })
- })
-
- })
-}
-
-func AsynchronousSharedRunnerBehaviors(build func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable, componentType types.SpecComponentType, componentIndex int) {
- var (
- outcome types.SpecState
- failure types.SpecFailure
-
- failer *Failer.Failer
-
- componentCodeLocation types.CodeLocation
- innerCodeLocation types.CodeLocation
-
- didRun bool
- )
-
- BeforeEach(func() {
- failer = Failer.New()
- componentCodeLocation = codelocation.New(0)
- innerCodeLocation = codelocation.New(0)
-
- didRun = false
- })
-
- Describe("asynchronous functions", func() {
- var timeoutDuration time.Duration
-
- BeforeEach(func() {
- timeoutDuration = time.Duration(1 * float64(time.Second))
- })
-
- Context("when running", func() {
- It("should run the function as a goroutine, and block until it's done", func() {
- proveAsync := make(chan bool)
-
- build(func(done Done) {
- didRun = true
- proveAsync <- true
- close(done)
- }, timeoutDuration, failer, componentCodeLocation).Run()
-
- Eventually(proveAsync).Should(Receive(Equal(true)))
- })
- })
-
- Context("when the function passes", func() {
- BeforeEach(func() {
- outcome, failure = build(func(done Done) {
- didRun = true
- close(done)
- }, timeoutDuration, failer, componentCodeLocation).Run()
- })
-
- It("should have a succesful outcome", func() {
- Ω(didRun).Should(BeTrue())
- Ω(outcome).Should(Equal(types.SpecStatePassed))
- Ω(failure).Should(BeZero())
- })
- })
-
- Context("when the function fails", func() {
- BeforeEach(func() {
- outcome, failure = build(func(done Done) {
- didRun = true
- failer.Fail("bam", innerCodeLocation)
- time.Sleep(20 * time.Millisecond)
- defer close(done)
- panic("doesn't matter")
- }, 10*time.Millisecond, failer, componentCodeLocation).Run()
- })
-
- It("should return the failure", func() {
- Ω(didRun).Should(BeTrue())
-
- Ω(outcome).Should(Equal(types.SpecStateFailed))
- Ω(failure).Should(Equal(types.SpecFailure{
- Message: "bam",
- Location: innerCodeLocation,
- ForwardedPanic: "",
- ComponentIndex: componentIndex,
- ComponentType: componentType,
- ComponentCodeLocation: componentCodeLocation,
- }))
- })
- })
-
- Context("when the function doesn't close the done channel in time", func() {
- var guard chan struct{}
-
- BeforeEach(func() {
- guard = make(chan struct{})
- outcome, failure = build(func(done Done) {
- didRun = true
- close(guard)
- }, 10*time.Millisecond, failer, componentCodeLocation).Run()
- })
-
- It("should return a timeout", func() {
- <-guard
- Ω(didRun).Should(BeTrue())
-
- Ω(outcome).Should(Equal(types.SpecStateTimedOut))
- Ω(failure).Should(Equal(types.SpecFailure{
- Message: "Timed out",
- Location: componentCodeLocation,
- ForwardedPanic: "",
- ComponentIndex: componentIndex,
- ComponentType: componentType,
- ComponentCodeLocation: componentCodeLocation,
- }))
- })
- })
-
- Context("when the function panics", func() {
- BeforeEach(func() {
- outcome, failure = build(func(done Done) {
- didRun = true
- innerCodeLocation = codelocation.New(0)
- panic("ack!")
- }, 100*time.Millisecond, failer, componentCodeLocation).Run()
- })
-
- It("should return the panic", func() {
- Ω(didRun).Should(BeTrue())
-
- Ω(outcome).Should(Equal(types.SpecStatePanicked))
- Ω(failure.ForwardedPanic).Should(Equal("ack!"))
- })
- })
-
- Context("when the function panics with a nil value", func() {
- BeforeEach(func() {
- outcome, failure = build(func(done Done) {
- didRun = true
- innerCodeLocation = codelocation.New(0)
- panic(nil)
- }, 100*time.Millisecond, failer, componentCodeLocation).Run()
- })
-
- It("should return the nil-valued panic", func() {
- Ω(didRun).Should(BeTrue())
-
- Ω(outcome).Should(Equal(types.SpecStatePanicked))
- Ω(failure.ForwardedPanic).Should(Equal("<nil>"))
- })
- })
- })
-}
-
-func InvalidSharedRunnerBehaviors(build func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable, componentType types.SpecComponentType) {
- var (
- failer *Failer.Failer
- componentCodeLocation types.CodeLocation
- )
-
- BeforeEach(func() {
- failer = Failer.New()
- componentCodeLocation = codelocation.New(0)
- })
-
- Describe("invalid functions", func() {
- Context("when passed something that's not a function", func() {
- It("should panic", func() {
- Ω(func() {
- build("not a function", 0, failer, componentCodeLocation)
- }).Should(Panic())
- })
- })
-
- Context("when the function takes the wrong kind of argument", func() {
- It("should panic", func() {
- Ω(func() {
- build(func(oops string) {}, 0, failer, componentCodeLocation)
- }).Should(Panic())
- })
- })
-
- Context("when the function takes more than one argument", func() {
- It("should panic", func() {
- Ω(func() {
- build(func(done Done, oops string) {}, 0, failer, componentCodeLocation)
- }).Should(Panic())
- })
- })
- })
-}
-
-var _ = Describe("Shared RunnableNode behavior", func() {
- Describe("It Nodes", func() {
- build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable {
- return NewItNode("", body, types.FlagTypeFocused, componentCodeLocation, timeout, failer, 3)
- }
-
- SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeIt, 3)
- AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeIt, 3)
- InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeIt)
- })
-
- Describe("Measure Nodes", func() {
- build := func(body interface{}, _ time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable {
- return NewMeasureNode("", func(Benchmarker) {
- reflect.ValueOf(body).Call([]reflect.Value{})
- }, types.FlagTypeFocused, componentCodeLocation, 10, failer, 3)
- }
-
- SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeMeasure, 3)
- })
-
- Describe("BeforeEach Nodes", func() {
- build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable {
- return NewBeforeEachNode(body, componentCodeLocation, timeout, failer, 3)
- }
-
- SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeBeforeEach, 3)
- AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeBeforeEach, 3)
- InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeBeforeEach)
- })
-
- Describe("AfterEach Nodes", func() {
- build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable {
- return NewAfterEachNode(body, componentCodeLocation, timeout, failer, 3)
- }
-
- SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeAfterEach, 3)
- AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeAfterEach, 3)
- InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeAfterEach)
- })
-
- Describe("JustBeforeEach Nodes", func() {
- build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable {
- return NewJustBeforeEachNode(body, componentCodeLocation, timeout, failer, 3)
- }
-
- SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeJustBeforeEach, 3)
- AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeJustBeforeEach, 3)
- InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeJustBeforeEach)
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes_test.go
deleted file mode 100644
index 246b329fe..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes_test.go
+++ /dev/null
@@ -1,230 +0,0 @@
-package leafnodes_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- . "github.com/onsi/ginkgo/internal/leafnodes"
-
- "time"
-
- "github.com/onsi/ginkgo/internal/codelocation"
- Failer "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-var _ = Describe("SuiteNodes", func() {
- Describe("BeforeSuite nodes", func() {
- var befSuite SuiteNode
- var failer *Failer.Failer
- var codeLocation types.CodeLocation
- var innerCodeLocation types.CodeLocation
- var outcome bool
-
- BeforeEach(func() {
- failer = Failer.New()
- codeLocation = codelocation.New(0)
- innerCodeLocation = codelocation.New(0)
- })
-
- Context("when the body passes", func() {
- BeforeEach(func() {
- befSuite = NewBeforeSuiteNode(func() {
- time.Sleep(10 * time.Millisecond)
- }, codeLocation, 0, failer)
- outcome = befSuite.Run(0, 0, "")
- })
-
- It("should return true when run and report as passed", func() {
- Ω(outcome).Should(BeTrue())
- Ω(befSuite.Passed()).Should(BeTrue())
- })
-
- It("should have the correct summary", func() {
- summary := befSuite.Summary()
- Ω(summary.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite))
- Ω(summary.CodeLocation).Should(Equal(codeLocation))
- Ω(summary.State).Should(Equal(types.SpecStatePassed))
- Ω(summary.RunTime).Should(BeNumerically(">=", 10*time.Millisecond))
- Ω(summary.Failure).Should(BeZero())
- })
- })
-
- Context("when the body fails", func() {
- BeforeEach(func() {
- befSuite = NewBeforeSuiteNode(func() {
- failer.Fail("oops", innerCodeLocation)
- }, codeLocation, 0, failer)
- outcome = befSuite.Run(0, 0, "")
- })
-
- It("should return false when run and report as failed", func() {
- Ω(outcome).Should(BeFalse())
- Ω(befSuite.Passed()).Should(BeFalse())
- })
-
- It("should have the correct summary", func() {
- summary := befSuite.Summary()
- Ω(summary.State).Should(Equal(types.SpecStateFailed))
- Ω(summary.Failure.Message).Should(Equal("oops"))
- Ω(summary.Failure.Location).Should(Equal(innerCodeLocation))
- Ω(summary.Failure.ForwardedPanic).Should(BeEmpty())
- Ω(summary.Failure.ComponentIndex).Should(Equal(0))
- Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite))
- Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation))
- })
- })
-
- Context("when the body times out", func() {
- BeforeEach(func() {
- befSuite = NewBeforeSuiteNode(func(done Done) {
- }, codeLocation, time.Millisecond, failer)
- outcome = befSuite.Run(0, 0, "")
- })
-
- It("should return false when run and report as failed", func() {
- Ω(outcome).Should(BeFalse())
- Ω(befSuite.Passed()).Should(BeFalse())
- })
-
- It("should have the correct summary", func() {
- summary := befSuite.Summary()
- Ω(summary.State).Should(Equal(types.SpecStateTimedOut))
- Ω(summary.Failure.ForwardedPanic).Should(BeEmpty())
- Ω(summary.Failure.ComponentIndex).Should(Equal(0))
- Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite))
- Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation))
- })
- })
-
- Context("when the body panics", func() {
- BeforeEach(func() {
- befSuite = NewBeforeSuiteNode(func() {
- panic("bam")
- }, codeLocation, 0, failer)
- outcome = befSuite.Run(0, 0, "")
- })
-
- It("should return false when run and report as failed", func() {
- Ω(outcome).Should(BeFalse())
- Ω(befSuite.Passed()).Should(BeFalse())
- })
-
- It("should have the correct summary", func() {
- summary := befSuite.Summary()
- Ω(summary.State).Should(Equal(types.SpecStatePanicked))
- Ω(summary.Failure.ForwardedPanic).Should(Equal("bam"))
- Ω(summary.Failure.ComponentIndex).Should(Equal(0))
- Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite))
- Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation))
- })
- })
- })
-
- Describe("AfterSuite nodes", func() {
- var aftSuite SuiteNode
- var failer *Failer.Failer
- var codeLocation types.CodeLocation
- var innerCodeLocation types.CodeLocation
- var outcome bool
-
- BeforeEach(func() {
- failer = Failer.New()
- codeLocation = codelocation.New(0)
- innerCodeLocation = codelocation.New(0)
- })
-
- Context("when the body passes", func() {
- BeforeEach(func() {
- aftSuite = NewAfterSuiteNode(func() {
- time.Sleep(10 * time.Millisecond)
- }, codeLocation, 0, failer)
- outcome = aftSuite.Run(0, 0, "")
- })
-
- It("should return true when run and report as passed", func() {
- Ω(outcome).Should(BeTrue())
- Ω(aftSuite.Passed()).Should(BeTrue())
- })
-
- It("should have the correct summary", func() {
- summary := aftSuite.Summary()
- Ω(summary.ComponentType).Should(Equal(types.SpecComponentTypeAfterSuite))
- Ω(summary.CodeLocation).Should(Equal(codeLocation))
- Ω(summary.State).Should(Equal(types.SpecStatePassed))
- Ω(summary.RunTime).Should(BeNumerically(">=", 10*time.Millisecond))
- Ω(summary.Failure).Should(BeZero())
- })
- })
-
- Context("when the body fails", func() {
- BeforeEach(func() {
- aftSuite = NewAfterSuiteNode(func() {
- failer.Fail("oops", innerCodeLocation)
- }, codeLocation, 0, failer)
- outcome = aftSuite.Run(0, 0, "")
- })
-
- It("should return false when run and report as failed", func() {
- Ω(outcome).Should(BeFalse())
- Ω(aftSuite.Passed()).Should(BeFalse())
- })
-
- It("should have the correct summary", func() {
- summary := aftSuite.Summary()
- Ω(summary.State).Should(Equal(types.SpecStateFailed))
- Ω(summary.Failure.Message).Should(Equal("oops"))
- Ω(summary.Failure.Location).Should(Equal(innerCodeLocation))
- Ω(summary.Failure.ForwardedPanic).Should(BeEmpty())
- Ω(summary.Failure.ComponentIndex).Should(Equal(0))
- Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeAfterSuite))
- Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation))
- })
- })
-
- Context("when the body times out", func() {
- BeforeEach(func() {
- aftSuite = NewAfterSuiteNode(func(done Done) {
- }, codeLocation, time.Millisecond, failer)
- outcome = aftSuite.Run(0, 0, "")
- })
-
- It("should return false when run and report as failed", func() {
- Ω(outcome).Should(BeFalse())
- Ω(aftSuite.Passed()).Should(BeFalse())
- })
-
- It("should have the correct summary", func() {
- summary := aftSuite.Summary()
- Ω(summary.State).Should(Equal(types.SpecStateTimedOut))
- Ω(summary.Failure.ForwardedPanic).Should(BeEmpty())
- Ω(summary.Failure.ComponentIndex).Should(Equal(0))
- Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeAfterSuite))
- Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation))
- })
- })
-
- Context("when the body panics", func() {
- BeforeEach(func() {
- aftSuite = NewAfterSuiteNode(func() {
- panic("bam")
- }, codeLocation, 0, failer)
- outcome = aftSuite.Run(0, 0, "")
- })
-
- It("should return false when run and report as failed", func() {
- Ω(outcome).Should(BeFalse())
- Ω(aftSuite.Passed()).Should(BeFalse())
- })
-
- It("should have the correct summary", func() {
- summary := aftSuite.Summary()
- Ω(summary.State).Should(Equal(types.SpecStatePanicked))
- Ω(summary.Failure.ForwardedPanic).Should(Equal("bam"))
- Ω(summary.Failure.ComponentIndex).Should(Equal(0))
- Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeAfterSuite))
- Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation))
- })
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node_test.go
deleted file mode 100644
index edbdf6ae5..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node_test.go
+++ /dev/null
@@ -1,199 +0,0 @@
-package leafnodes_test
-
-import (
- "sync"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/internal/leafnodes"
- "github.com/onsi/ginkgo/types"
- . "github.com/onsi/gomega"
-
- "net/http"
-
- "github.com/onsi/gomega/ghttp"
-
- "time"
-
- "github.com/onsi/ginkgo/internal/codelocation"
- Failer "github.com/onsi/ginkgo/internal/failer"
-)
-
-var _ = Describe("SynchronizedAfterSuiteNode", func() {
- var failer *Failer.Failer
- var node SuiteNode
- var codeLocation types.CodeLocation
- var innerCodeLocation types.CodeLocation
- var outcome bool
- var server *ghttp.Server
- var things []string
- var lock *sync.Mutex
-
- BeforeEach(func() {
- things = []string{}
- server = ghttp.NewServer()
- codeLocation = codelocation.New(0)
- innerCodeLocation = codelocation.New(0)
- failer = Failer.New()
- lock = &sync.Mutex{}
- })
-
- AfterEach(func() {
- server.Close()
- })
-
- newNode := func(bodyA interface{}, bodyB interface{}) SuiteNode {
- return NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, time.Millisecond, failer)
- }
-
- ranThing := func(thing string) {
- lock.Lock()
- defer lock.Unlock()
- things = append(things, thing)
- }
-
- thingsThatRan := func() []string {
- lock.Lock()
- defer lock.Unlock()
- return things
- }
-
- Context("when not running in parallel", func() {
- Context("when all is well", func() {
- BeforeEach(func() {
- node = newNode(func() {
- ranThing("A")
- }, func() {
- ranThing("B")
- })
-
- outcome = node.Run(1, 1, server.URL())
- })
-
- It("should run A, then B", func() {
- Ω(thingsThatRan()).Should(Equal([]string{"A", "B"}))
- })
-
- It("should report success", func() {
- Ω(outcome).Should(BeTrue())
- Ω(node.Passed()).Should(BeTrue())
- Ω(node.Summary().State).Should(Equal(types.SpecStatePassed))
- })
- })
-
- Context("when A fails", func() {
- BeforeEach(func() {
- node = newNode(func() {
- ranThing("A")
- failer.Fail("bam", innerCodeLocation)
- }, func() {
- ranThing("B")
- })
-
- outcome = node.Run(1, 1, server.URL())
- })
-
- It("should still run B", func() {
- Ω(thingsThatRan()).Should(Equal([]string{"A", "B"}))
- })
-
- It("should report failure", func() {
- Ω(outcome).Should(BeFalse())
- Ω(node.Passed()).Should(BeFalse())
- Ω(node.Summary().State).Should(Equal(types.SpecStateFailed))
- })
- })
-
- Context("when B fails", func() {
- BeforeEach(func() {
- node = newNode(func() {
- ranThing("A")
- }, func() {
- ranThing("B")
- failer.Fail("bam", innerCodeLocation)
- })
-
- outcome = node.Run(1, 1, server.URL())
- })
-
- It("should run all the things", func() {
- Ω(thingsThatRan()).Should(Equal([]string{"A", "B"}))
- })
-
- It("should report failure", func() {
- Ω(outcome).Should(BeFalse())
- Ω(node.Passed()).Should(BeFalse())
- Ω(node.Summary().State).Should(Equal(types.SpecStateFailed))
- })
- })
- })
-
- Context("when running in parallel", func() {
- Context("as the first node", func() {
- BeforeEach(func() {
- server.AppendHandlers(ghttp.CombineHandlers(
- ghttp.VerifyRequest("GET", "/RemoteAfterSuiteData"),
- func(writer http.ResponseWriter, request *http.Request) {
- ranThing("Request1")
- },
- ghttp.RespondWithJSONEncoded(200, types.RemoteAfterSuiteData{CanRun: false}),
- ), ghttp.CombineHandlers(
- ghttp.VerifyRequest("GET", "/RemoteAfterSuiteData"),
- func(writer http.ResponseWriter, request *http.Request) {
- ranThing("Request2")
- },
- ghttp.RespondWithJSONEncoded(200, types.RemoteAfterSuiteData{CanRun: false}),
- ), ghttp.CombineHandlers(
- ghttp.VerifyRequest("GET", "/RemoteAfterSuiteData"),
- func(writer http.ResponseWriter, request *http.Request) {
- ranThing("Request3")
- },
- ghttp.RespondWithJSONEncoded(200, types.RemoteAfterSuiteData{CanRun: true}),
- ))
-
- node = newNode(func() {
- ranThing("A")
- }, func() {
- ranThing("B")
- })
-
- outcome = node.Run(1, 3, server.URL())
- })
-
- It("should run A and, when the server says its time, run B", func() {
- Ω(thingsThatRan()).Should(Equal([]string{"A", "Request1", "Request2", "Request3", "B"}))
- })
-
- It("should report success", func() {
- Ω(outcome).Should(BeTrue())
- Ω(node.Passed()).Should(BeTrue())
- Ω(node.Summary().State).Should(Equal(types.SpecStatePassed))
- })
- })
-
- Context("as any other node", func() {
- BeforeEach(func() {
- node = newNode(func() {
- ranThing("A")
- }, func() {
- ranThing("B")
- })
-
- outcome = node.Run(2, 3, server.URL())
- })
-
- It("should run A, and not run B", func() {
- Ω(thingsThatRan()).Should(Equal([]string{"A"}))
- })
-
- It("should not talk to the server", func() {
- Ω(server.ReceivedRequests()).Should(BeEmpty())
- })
-
- It("should report success", func() {
- Ω(outcome).Should(BeTrue())
- Ω(node.Passed()).Should(BeTrue())
- Ω(node.Summary().State).Should(Equal(types.SpecStatePassed))
- })
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node_test.go
deleted file mode 100644
index 46c3e276b..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node_test.go
+++ /dev/null
@@ -1,446 +0,0 @@
-package leafnodes_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/internal/leafnodes"
- . "github.com/onsi/gomega"
-
- "net/http"
-
- "github.com/onsi/gomega/ghttp"
-
- "time"
-
- "github.com/onsi/ginkgo/internal/codelocation"
- Failer "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-var _ = Describe("SynchronizedBeforeSuiteNode", func() {
- var failer *Failer.Failer
- var node SuiteNode
- var codeLocation types.CodeLocation
- var innerCodeLocation types.CodeLocation
- var outcome bool
- var server *ghttp.Server
-
- BeforeEach(func() {
- server = ghttp.NewServer()
- codeLocation = codelocation.New(0)
- innerCodeLocation = codelocation.New(0)
- failer = Failer.New()
- })
-
- AfterEach(func() {
- server.Close()
- })
-
- newNode := func(bodyA interface{}, bodyB interface{}) SuiteNode {
- return NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, time.Millisecond, failer)
- }
-
- Describe("when not running in parallel", func() {
- Context("when all is well", func() {
- var data []byte
- BeforeEach(func() {
- data = nil
-
- node = newNode(func() []byte {
- return []byte("my data")
- }, func(d []byte) {
- data = d
- })
-
- outcome = node.Run(1, 1, server.URL())
- })
-
- It("should run A, then B passing the output from A to B", func() {
- Ω(data).Should(Equal([]byte("my data")))
- })
-
- It("should report success", func() {
- Ω(outcome).Should(BeTrue())
- Ω(node.Passed()).Should(BeTrue())
- Ω(node.Summary().State).Should(Equal(types.SpecStatePassed))
- })
- })
-
- Context("when A fails", func() {
- var ranB bool
- BeforeEach(func() {
- ranB = false
- node = newNode(func() []byte {
- failer.Fail("boom", innerCodeLocation)
- return nil
- }, func([]byte) {
- ranB = true
- })
-
- outcome = node.Run(1, 1, server.URL())
- })
-
- It("should not run B", func() {
- Ω(ranB).Should(BeFalse())
- })
-
- It("should report failure", func() {
- Ω(outcome).Should(BeFalse())
- Ω(node.Passed()).Should(BeFalse())
- Ω(node.Summary().State).Should(Equal(types.SpecStateFailed))
- })
- })
-
- Context("when B fails", func() {
- BeforeEach(func() {
- node = newNode(func() []byte {
- return nil
- }, func([]byte) {
- failer.Fail("boom", innerCodeLocation)
- })
-
- outcome = node.Run(1, 1, server.URL())
- })
-
- It("should report failure", func() {
- Ω(outcome).Should(BeFalse())
- Ω(node.Passed()).Should(BeFalse())
- Ω(node.Summary().State).Should(Equal(types.SpecStateFailed))
- })
- })
-
- Context("when A times out", func() {
- var ranB bool
- BeforeEach(func() {
- ranB = false
- node = newNode(func(Done) []byte {
- time.Sleep(time.Second)
- return nil
- }, func([]byte) {
- ranB = true
- })
-
- outcome = node.Run(1, 1, server.URL())
- })
-
- It("should not run B", func() {
- Ω(ranB).Should(BeFalse())
- })
-
- It("should report failure", func() {
- Ω(outcome).Should(BeFalse())
- Ω(node.Passed()).Should(BeFalse())
- Ω(node.Summary().State).Should(Equal(types.SpecStateTimedOut))
- })
- })
-
- Context("when B times out", func() {
- BeforeEach(func() {
- node = newNode(func() []byte {
- return nil
- }, func([]byte, Done) {
- time.Sleep(time.Second)
- })
-
- outcome = node.Run(1, 1, server.URL())
- })
-
- It("should report failure", func() {
- Ω(outcome).Should(BeFalse())
- Ω(node.Passed()).Should(BeFalse())
- Ω(node.Summary().State).Should(Equal(types.SpecStateTimedOut))
- })
- })
- })
-
- Describe("when running in parallel", func() {
- var ranB bool
- var parallelNode, parallelTotal int
- BeforeEach(func() {
- ranB = false
- parallelNode, parallelTotal = 1, 3
- })
-
- Context("as the first node, it runs A", func() {
- var expectedState types.RemoteBeforeSuiteData
-
- BeforeEach(func() {
- parallelNode, parallelTotal = 1, 3
- })
-
- JustBeforeEach(func() {
- server.AppendHandlers(ghttp.CombineHandlers(
- ghttp.VerifyRequest("POST", "/BeforeSuiteState"),
- ghttp.VerifyJSONRepresenting(expectedState),
- ))
-
- outcome = node.Run(parallelNode, parallelTotal, server.URL())
- })
-
- Context("when A succeeds", func() {
- BeforeEach(func() {
- expectedState = types.RemoteBeforeSuiteData{Data: []byte("my data"), State: types.RemoteBeforeSuiteStatePassed}
-
- node = newNode(func() []byte {
- return []byte("my data")
- }, func([]byte) {
- ranB = true
- })
- })
-
- It("should post about A succeeding", func() {
- Ω(server.ReceivedRequests()).Should(HaveLen(1))
- })
-
- It("should run B", func() {
- Ω(ranB).Should(BeTrue())
- })
-
- It("should report success", func() {
- Ω(outcome).Should(BeTrue())
- })
- })
-
- Context("when A fails", func() {
- BeforeEach(func() {
- expectedState = types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStateFailed}
-
- node = newNode(func() []byte {
- panic("BAM")
- }, func([]byte) {
- ranB = true
- })
- })
-
- It("should post about A failing", func() {
- Ω(server.ReceivedRequests()).Should(HaveLen(1))
- })
-
- It("should not run B", func() {
- Ω(ranB).Should(BeFalse())
- })
-
- It("should report failure", func() {
- Ω(outcome).Should(BeFalse())
- })
- })
- })
-
- Context("as the Nth node", func() {
- var statusCode int
- var response interface{}
- var ranA bool
- var bData []byte
-
- BeforeEach(func() {
- ranA = false
- bData = nil
-
- statusCode = http.StatusOK
-
- server.AppendHandlers(ghttp.CombineHandlers(
- ghttp.VerifyRequest("GET", "/BeforeSuiteState"),
- ghttp.RespondWith(http.StatusOK, string((types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}).ToJSON())),
- ), ghttp.CombineHandlers(
- ghttp.VerifyRequest("GET", "/BeforeSuiteState"),
- ghttp.RespondWith(http.StatusOK, string((types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}).ToJSON())),
- ), ghttp.CombineHandlers(
- ghttp.VerifyRequest("GET", "/BeforeSuiteState"),
- ghttp.RespondWithJSONEncodedPtr(&statusCode, &response),
- ))
-
- node = newNode(func() []byte {
- ranA = true
- return nil
- }, func(data []byte) {
- bData = data
- })
-
- parallelNode, parallelTotal = 2, 3
- })
-
- Context("when A on node1 succeeds", func() {
- BeforeEach(func() {
- response = types.RemoteBeforeSuiteData{Data: []byte("my data"), State: types.RemoteBeforeSuiteStatePassed}
- outcome = node.Run(parallelNode, parallelTotal, server.URL())
- })
-
- It("should not run A", func() {
- Ω(ranA).Should(BeFalse())
- })
-
- It("should poll for A", func() {
- Ω(server.ReceivedRequests()).Should(HaveLen(3))
- })
-
- It("should run B when the polling succeeds", func() {
- Ω(bData).Should(Equal([]byte("my data")))
- })
-
- It("should succeed", func() {
- Ω(outcome).Should(BeTrue())
- Ω(node.Passed()).Should(BeTrue())
- })
- })
-
- Context("when A on node1 fails", func() {
- BeforeEach(func() {
- response = types.RemoteBeforeSuiteData{Data: []byte("my data"), State: types.RemoteBeforeSuiteStateFailed}
- outcome = node.Run(parallelNode, parallelTotal, server.URL())
- })
-
- It("should not run A", func() {
- Ω(ranA).Should(BeFalse())
- })
-
- It("should poll for A", func() {
- Ω(server.ReceivedRequests()).Should(HaveLen(3))
- })
-
- It("should not run B", func() {
- Ω(bData).Should(BeNil())
- })
-
- It("should fail", func() {
- Ω(outcome).Should(BeFalse())
- Ω(node.Passed()).Should(BeFalse())
-
- summary := node.Summary()
- Ω(summary.State).Should(Equal(types.SpecStateFailed))
- Ω(summary.Failure.Message).Should(Equal("BeforeSuite on Node 1 failed"))
- Ω(summary.Failure.Location).Should(Equal(codeLocation))
- Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite))
- Ω(summary.Failure.ComponentIndex).Should(Equal(0))
- Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation))
- })
- })
-
- Context("when node1 disappears", func() {
- BeforeEach(func() {
- response = types.RemoteBeforeSuiteData{Data: []byte("my data"), State: types.RemoteBeforeSuiteStateDisappeared}
- outcome = node.Run(parallelNode, parallelTotal, server.URL())
- })
-
- It("should not run A", func() {
- Ω(ranA).Should(BeFalse())
- })
-
- It("should poll for A", func() {
- Ω(server.ReceivedRequests()).Should(HaveLen(3))
- })
-
- It("should not run B", func() {
- Ω(bData).Should(BeNil())
- })
-
- It("should fail", func() {
- Ω(outcome).Should(BeFalse())
- Ω(node.Passed()).Should(BeFalse())
-
- summary := node.Summary()
- Ω(summary.State).Should(Equal(types.SpecStateFailed))
- Ω(summary.Failure.Message).Should(Equal("Node 1 disappeared before completing BeforeSuite"))
- Ω(summary.Failure.Location).Should(Equal(codeLocation))
- Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite))
- Ω(summary.Failure.ComponentIndex).Should(Equal(0))
- Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation))
- })
- })
- })
- })
-
- Describe("construction", func() {
- Describe("the first function", func() {
- Context("when the first function returns a byte array", func() {
- Context("and takes nothing", func() {
- It("should be fine", func() {
- Ω(func() {
- newNode(func() []byte { return nil }, func([]byte) {})
- }).ShouldNot(Panic())
- })
- })
-
- Context("and takes a done function", func() {
- It("should be fine", func() {
- Ω(func() {
- newNode(func(Done) []byte { return nil }, func([]byte) {})
- }).ShouldNot(Panic())
- })
- })
-
- Context("and takes more than one thing", func() {
- It("should panic", func() {
- Ω(func() {
- newNode(func(Done, Done) []byte { return nil }, func([]byte) {})
- }).Should(Panic())
- })
- })
-
- Context("and takes something else", func() {
- It("should panic", func() {
- Ω(func() {
- newNode(func(bool) []byte { return nil }, func([]byte) {})
- }).Should(Panic())
- })
- })
- })
-
- Context("when the first function does not return a byte array", func() {
- It("should panic", func() {
- Ω(func() {
- newNode(func() {}, func([]byte) {})
- }).Should(Panic())
-
- Ω(func() {
- newNode(func() []int { return nil }, func([]byte) {})
- }).Should(Panic())
- })
- })
- })
-
- Describe("the second function", func() {
- Context("when the second function takes a byte array", func() {
- It("should be fine", func() {
- Ω(func() {
- newNode(func() []byte { return nil }, func([]byte) {})
- }).ShouldNot(Panic())
- })
- })
-
- Context("when it also takes a done channel", func() {
- It("should be fine", func() {
- Ω(func() {
- newNode(func() []byte { return nil }, func([]byte, Done) {})
- }).ShouldNot(Panic())
- })
- })
-
- Context("if it takes anything else", func() {
- It("should panic", func() {
- Ω(func() {
- newNode(func() []byte { return nil }, func([]byte, chan bool) {})
- }).Should(Panic())
-
- Ω(func() {
- newNode(func() []byte { return nil }, func(string) {})
- }).Should(Panic())
- })
- })
-
- Context("if it takes nothing at all", func() {
- It("should panic", func() {
- Ω(func() {
- newNode(func() []byte { return nil }, func() {})
- }).Should(Panic())
- })
- })
-
- Context("if it returns something", func() {
- It("should panic", func() {
- Ω(func() {
- newNode(func() []byte { return nil }, func([]byte) []byte { return nil })
- }).Should(Panic())
- })
- })
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator_test.go
deleted file mode 100644
index aedf93927..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator_test.go
+++ /dev/null
@@ -1,315 +0,0 @@
-package remote_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "time"
-
- "github.com/onsi/ginkgo/config"
- . "github.com/onsi/ginkgo/internal/remote"
- st "github.com/onsi/ginkgo/reporters/stenographer"
- "github.com/onsi/ginkgo/types"
-)
-
-var _ = Describe("Aggregator", func() {
- var (
- aggregator *Aggregator
- reporterConfig config.DefaultReporterConfigType
- stenographer *st.FakeStenographer
- result chan bool
-
- ginkgoConfig1 config.GinkgoConfigType
- ginkgoConfig2 config.GinkgoConfigType
-
- suiteSummary1 *types.SuiteSummary
- suiteSummary2 *types.SuiteSummary
-
- beforeSummary *types.SetupSummary
- afterSummary *types.SetupSummary
- specSummary *types.SpecSummary
-
- suiteDescription string
- )
-
- BeforeEach(func() {
- reporterConfig = config.DefaultReporterConfigType{
- NoColor: false,
- SlowSpecThreshold: 0.1,
- NoisyPendings: true,
- Succinct: false,
- Verbose: true,
- }
- stenographer = st.NewFakeStenographer()
- result = make(chan bool, 1)
- aggregator = NewAggregator(2, result, reporterConfig, stenographer)
-
- //
- // now set up some fixture data
- //
-
- ginkgoConfig1 = config.GinkgoConfigType{
- RandomSeed: 1138,
- RandomizeAllSpecs: true,
- ParallelNode: 1,
- ParallelTotal: 2,
- }
-
- ginkgoConfig2 = config.GinkgoConfigType{
- RandomSeed: 1138,
- RandomizeAllSpecs: true,
- ParallelNode: 2,
- ParallelTotal: 2,
- }
-
- suiteDescription = "My Parallel Suite"
-
- suiteSummary1 = &types.SuiteSummary{
- SuiteDescription: suiteDescription,
-
- NumberOfSpecsBeforeParallelization: 30,
- NumberOfTotalSpecs: 17,
- NumberOfSpecsThatWillBeRun: 15,
- NumberOfPendingSpecs: 1,
- NumberOfSkippedSpecs: 1,
- }
-
- suiteSummary2 = &types.SuiteSummary{
- SuiteDescription: suiteDescription,
-
- NumberOfSpecsBeforeParallelization: 30,
- NumberOfTotalSpecs: 13,
- NumberOfSpecsThatWillBeRun: 8,
- NumberOfPendingSpecs: 2,
- NumberOfSkippedSpecs: 3,
- }
-
- beforeSummary = &types.SetupSummary{
- State: types.SpecStatePassed,
- CapturedOutput: "BeforeSuiteOutput",
- }
-
- afterSummary = &types.SetupSummary{
- State: types.SpecStatePassed,
- CapturedOutput: "AfterSuiteOutput",
- }
-
- specSummary = &types.SpecSummary{
- State: types.SpecStatePassed,
- CapturedOutput: "SpecOutput",
- }
- })
-
- call := func(method string, args ...interface{}) st.FakeStenographerCall {
- return st.NewFakeStenographerCall(method, args...)
- }
-
- beginSuite := func() {
- stenographer.Reset()
- aggregator.SpecSuiteWillBegin(ginkgoConfig2, suiteSummary2)
- aggregator.SpecSuiteWillBegin(ginkgoConfig1, suiteSummary1)
- Eventually(func() interface{} {
- return len(stenographer.Calls())
- }).Should(BeNumerically(">=", 3))
- }
-
- Describe("Announcing the beginning of the suite", func() {
- Context("When one of the parallel-suites starts", func() {
- BeforeEach(func() {
- aggregator.SpecSuiteWillBegin(ginkgoConfig2, suiteSummary2)
- })
-
- It("should be silent", func() {
- Consistently(func() interface{} { return stenographer.Calls() }).Should(BeEmpty())
- })
- })
-
- Context("once all of the parallel-suites have started", func() {
- BeforeEach(func() {
- aggregator.SpecSuiteWillBegin(ginkgoConfig2, suiteSummary2)
- aggregator.SpecSuiteWillBegin(ginkgoConfig1, suiteSummary1)
- Eventually(func() interface{} {
- return stenographer.Calls()
- }).Should(HaveLen(3))
- })
-
- It("should announce the beginning of the suite", func() {
- Ω(stenographer.Calls()).Should(HaveLen(3))
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuite", suiteDescription, ginkgoConfig1.RandomSeed, true, false)))
- Ω(stenographer.Calls()[1]).Should(Equal(call("AnnounceTotalNumberOfSpecs", 30, false)))
- Ω(stenographer.Calls()[2]).Should(Equal(call("AnnounceAggregatedParallelRun", 2, false)))
- })
- })
- })
-
- Describe("Announcing specs and before suites", func() {
- Context("when the parallel-suites have not all started", func() {
- BeforeEach(func() {
- aggregator.BeforeSuiteDidRun(beforeSummary)
- aggregator.AfterSuiteDidRun(afterSummary)
- aggregator.SpecDidComplete(specSummary)
- })
-
- It("should not announce any specs", func() {
- Consistently(func() interface{} { return stenographer.Calls() }).Should(BeEmpty())
- })
-
- Context("when the parallel-suites subsequently start", func() {
- BeforeEach(func() {
- beginSuite()
- })
-
- It("should announce the specs, the before suites and the after suites", func() {
- Eventually(func() interface{} {
- return stenographer.Calls()
- }).Should(ContainElement(call("AnnounceSuccesfulSpec", specSummary)))
-
- Ω(stenographer.Calls()).Should(ContainElement(call("AnnounceCapturedOutput", beforeSummary.CapturedOutput)))
- Ω(stenographer.Calls()).Should(ContainElement(call("AnnounceCapturedOutput", afterSummary.CapturedOutput)))
- })
- })
- })
-
- Context("When the parallel-suites have all started", func() {
- BeforeEach(func() {
- beginSuite()
- stenographer.Reset()
- })
-
- Context("When a spec completes", func() {
- BeforeEach(func() {
- aggregator.BeforeSuiteDidRun(beforeSummary)
- aggregator.SpecDidComplete(specSummary)
- aggregator.AfterSuiteDidRun(afterSummary)
- Eventually(func() interface{} {
- return stenographer.Calls()
- }).Should(HaveLen(5))
- })
-
- It("should announce the captured output of the BeforeSuite", func() {
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceCapturedOutput", beforeSummary.CapturedOutput)))
- })
-
- It("should announce that the spec will run (when in verbose mode)", func() {
- Ω(stenographer.Calls()[1]).Should(Equal(call("AnnounceSpecWillRun", specSummary)))
- })
-
- It("should announce the captured stdout of the spec", func() {
- Ω(stenographer.Calls()[2]).Should(Equal(call("AnnounceCapturedOutput", specSummary.CapturedOutput)))
- })
-
- It("should announce completion", func() {
- Ω(stenographer.Calls()[3]).Should(Equal(call("AnnounceSuccesfulSpec", specSummary)))
- })
-
- It("should announce the captured output of the AfterSuite", func() {
- Ω(stenographer.Calls()[4]).Should(Equal(call("AnnounceCapturedOutput", afterSummary.CapturedOutput)))
- })
- })
- })
- })
-
- Describe("Announcing the end of the suite", func() {
- BeforeEach(func() {
- beginSuite()
- stenographer.Reset()
- })
-
- Context("When one of the parallel-suites ends", func() {
- BeforeEach(func() {
- aggregator.SpecSuiteDidEnd(suiteSummary2)
- })
-
- It("should be silent", func() {
- Consistently(func() interface{} { return stenographer.Calls() }).Should(BeEmpty())
- })
-
- It("should not notify the channel", func() {
- Ω(result).Should(BeEmpty())
- })
- })
-
- Context("once all of the parallel-suites end", func() {
- BeforeEach(func() {
- time.Sleep(200 * time.Millisecond)
-
- suiteSummary1.SuiteSucceeded = true
- suiteSummary1.NumberOfPassedSpecs = 15
- suiteSummary1.NumberOfFailedSpecs = 0
- suiteSummary1.NumberOfFlakedSpecs = 3
- suiteSummary2.SuiteSucceeded = false
- suiteSummary2.NumberOfPassedSpecs = 5
- suiteSummary2.NumberOfFailedSpecs = 3
- suiteSummary2.NumberOfFlakedSpecs = 4
-
- aggregator.SpecSuiteDidEnd(suiteSummary2)
- aggregator.SpecSuiteDidEnd(suiteSummary1)
- Eventually(func() interface{} {
- return stenographer.Calls()
- }).Should(HaveLen(2))
- })
-
- It("should announce the end of the suite", func() {
- compositeSummary := stenographer.Calls()[1].Args[0].(*types.SuiteSummary)
-
- Ω(compositeSummary.SuiteSucceeded).Should(BeFalse())
- Ω(compositeSummary.NumberOfSpecsThatWillBeRun).Should(Equal(23))
- Ω(compositeSummary.NumberOfTotalSpecs).Should(Equal(30))
- Ω(compositeSummary.NumberOfPassedSpecs).Should(Equal(20))
- Ω(compositeSummary.NumberOfFailedSpecs).Should(Equal(3))
- Ω(compositeSummary.NumberOfPendingSpecs).Should(Equal(3))
- Ω(compositeSummary.NumberOfSkippedSpecs).Should(Equal(4))
- Ω(compositeSummary.NumberOfFlakedSpecs).Should(Equal(7))
- Ω(compositeSummary.RunTime.Seconds()).Should(BeNumerically(">", 0.2))
- })
- })
-
- Context("when all the parallel-suites pass", func() {
- BeforeEach(func() {
- suiteSummary1.SuiteSucceeded = true
- suiteSummary2.SuiteSucceeded = true
-
- aggregator.SpecSuiteDidEnd(suiteSummary2)
- aggregator.SpecSuiteDidEnd(suiteSummary1)
- Eventually(func() interface{} {
- return stenographer.Calls()
- }).Should(HaveLen(2))
- })
-
- It("should report success", func() {
- compositeSummary := stenographer.Calls()[1].Args[0].(*types.SuiteSummary)
-
- Ω(compositeSummary.SuiteSucceeded).Should(BeTrue())
- })
-
- It("should notify the channel that it succeded", func(done Done) {
- Ω(<-result).Should(BeTrue())
- close(done)
- })
- })
-
- Context("when one of the parallel-suites fails", func() {
- BeforeEach(func() {
- suiteSummary1.SuiteSucceeded = true
- suiteSummary2.SuiteSucceeded = false
-
- aggregator.SpecSuiteDidEnd(suiteSummary2)
- aggregator.SpecSuiteDidEnd(suiteSummary1)
- Eventually(func() interface{} {
- return stenographer.Calls()
- }).Should(HaveLen(2))
- })
-
- It("should report failure", func() {
- compositeSummary := stenographer.Calls()[1].Args[0].(*types.SuiteSummary)
-
- Ω(compositeSummary.SuiteSucceeded).Should(BeFalse())
- })
-
- It("should notify the channel that it failed", func(done Done) {
- Ω(<-result).Should(BeFalse())
- close(done)
- })
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/fake_output_interceptor_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/fake_output_interceptor_test.go
deleted file mode 100644
index ef54862ea..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/fake_output_interceptor_test.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package remote_test
-
-import "os"
-
-type fakeOutputInterceptor struct {
- DidStartInterceptingOutput bool
- DidStopInterceptingOutput bool
- InterceptedOutput string
-}
-
-func (interceptor *fakeOutputInterceptor) StartInterceptingOutput() error {
- interceptor.DidStartInterceptingOutput = true
- return nil
-}
-
-func (interceptor *fakeOutputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
- interceptor.DidStopInterceptingOutput = true
- return interceptor.InterceptedOutput, nil
-}
-
-func (interceptor *fakeOutputInterceptor) StreamTo(*os.File) {
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/fake_poster_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/fake_poster_test.go
deleted file mode 100644
index 3543c59c6..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/fake_poster_test.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package remote_test
-
-import (
- "io"
- "io/ioutil"
- "net/http"
-)
-
-type post struct {
- url string
- bodyType string
- bodyContent []byte
-}
-
-type fakePoster struct {
- posts []post
-}
-
-func newFakePoster() *fakePoster {
- return &fakePoster{
- posts: make([]post, 0),
- }
-}
-
-func (poster *fakePoster) Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) {
- bodyContent, _ := ioutil.ReadAll(body)
- poster.posts = append(poster.posts, post{
- url: url,
- bodyType: bodyType,
- bodyContent: bodyContent,
- })
- return nil, nil
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go
deleted file mode 100644
index 0d7e4769c..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package remote_test
-
-import (
- "encoding/json"
-
- . "github.com/onsi/ginkgo"
- "github.com/onsi/ginkgo/config"
- . "github.com/onsi/ginkgo/internal/remote"
- "github.com/onsi/ginkgo/types"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("ForwardingReporter", func() {
- var (
- reporter *ForwardingReporter
- interceptor *fakeOutputInterceptor
- poster *fakePoster
- suiteSummary *types.SuiteSummary
- specSummary *types.SpecSummary
- setupSummary *types.SetupSummary
- serverHost string
- )
-
- BeforeEach(func() {
- serverHost = "http://127.0.0.1:7788"
-
- poster = newFakePoster()
-
- interceptor = &fakeOutputInterceptor{
- InterceptedOutput: "The intercepted output!",
- }
-
- reporter = NewForwardingReporter(config.DefaultReporterConfigType{}, serverHost, poster, interceptor, nil, "")
-
- suiteSummary = &types.SuiteSummary{
- SuiteDescription: "My Test Suite",
- }
-
- setupSummary = &types.SetupSummary{
- State: types.SpecStatePassed,
- }
-
- specSummary = &types.SpecSummary{
- ComponentTexts: []string{"My", "Spec"},
- State: types.SpecStatePassed,
- }
- })
-
- Context("When a suite begins", func() {
- BeforeEach(func() {
- reporter.SpecSuiteWillBegin(config.GinkgoConfig, suiteSummary)
- })
-
- It("should start intercepting output", func() {
- Ω(interceptor.DidStartInterceptingOutput).Should(BeTrue())
- })
-
- It("should POST the SuiteSummary and Ginkgo Config to the Ginkgo server", func() {
- Ω(poster.posts).Should(HaveLen(1))
- Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/SpecSuiteWillBegin"))
- Ω(poster.posts[0].bodyType).Should(Equal("application/json"))
-
- var sentData struct {
- SentConfig config.GinkgoConfigType `json:"config"`
- SentSuiteSummary *types.SuiteSummary `json:"suite-summary"`
- }
-
- err := json.Unmarshal(poster.posts[0].bodyContent, &sentData)
- Ω(err).ShouldNot(HaveOccurred())
-
- Ω(sentData.SentConfig).Should(Equal(config.GinkgoConfig))
- Ω(sentData.SentSuiteSummary).Should(Equal(suiteSummary))
- })
- })
-
- Context("when a BeforeSuite completes", func() {
- BeforeEach(func() {
- reporter.BeforeSuiteDidRun(setupSummary)
- })
-
- It("should stop, then start intercepting output", func() {
- Ω(interceptor.DidStopInterceptingOutput).Should(BeTrue())
- Ω(interceptor.DidStartInterceptingOutput).Should(BeTrue())
- })
-
- It("should POST the SetupSummary to the Ginkgo server", func() {
- Ω(poster.posts).Should(HaveLen(1))
- Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/BeforeSuiteDidRun"))
- Ω(poster.posts[0].bodyType).Should(Equal("application/json"))
-
- var summary *types.SetupSummary
- err := json.Unmarshal(poster.posts[0].bodyContent, &summary)
- Ω(err).ShouldNot(HaveOccurred())
- setupSummary.CapturedOutput = interceptor.InterceptedOutput
- Ω(summary).Should(Equal(setupSummary))
- })
- })
-
- Context("when an AfterSuite completes", func() {
- BeforeEach(func() {
- reporter.AfterSuiteDidRun(setupSummary)
- })
-
- It("should stop, then start intercepting output", func() {
- Ω(interceptor.DidStopInterceptingOutput).Should(BeTrue())
- Ω(interceptor.DidStartInterceptingOutput).Should(BeTrue())
- })
-
- It("should POST the SetupSummary to the Ginkgo server", func() {
- Ω(poster.posts).Should(HaveLen(1))
- Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/AfterSuiteDidRun"))
- Ω(poster.posts[0].bodyType).Should(Equal("application/json"))
-
- var summary *types.SetupSummary
- err := json.Unmarshal(poster.posts[0].bodyContent, &summary)
- Ω(err).ShouldNot(HaveOccurred())
- setupSummary.CapturedOutput = interceptor.InterceptedOutput
- Ω(summary).Should(Equal(setupSummary))
- })
- })
-
- Context("When a spec will run", func() {
- BeforeEach(func() {
- reporter.SpecWillRun(specSummary)
- })
-
- It("should POST the SpecSummary to the Ginkgo server", func() {
- Ω(poster.posts).Should(HaveLen(1))
- Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/SpecWillRun"))
- Ω(poster.posts[0].bodyType).Should(Equal("application/json"))
-
- var summary *types.SpecSummary
- err := json.Unmarshal(poster.posts[0].bodyContent, &summary)
- Ω(err).ShouldNot(HaveOccurred())
- Ω(summary).Should(Equal(specSummary))
- })
-
- Context("When a spec completes", func() {
- BeforeEach(func() {
- specSummary.State = types.SpecStatePanicked
- reporter.SpecDidComplete(specSummary)
- })
-
- It("should POST the SpecSummary to the Ginkgo server and include any intercepted output", func() {
- Ω(poster.posts).Should(HaveLen(2))
- Ω(poster.posts[1].url).Should(Equal("http://127.0.0.1:7788/SpecDidComplete"))
- Ω(poster.posts[1].bodyType).Should(Equal("application/json"))
-
- var summary *types.SpecSummary
- err := json.Unmarshal(poster.posts[1].bodyContent, &summary)
- Ω(err).ShouldNot(HaveOccurred())
- specSummary.CapturedOutput = interceptor.InterceptedOutput
- Ω(summary).Should(Equal(specSummary))
- })
-
- It("should stop, then start intercepting output", func() {
- Ω(interceptor.DidStopInterceptingOutput).Should(BeTrue())
- Ω(interceptor.DidStartInterceptingOutput).Should(BeTrue())
- })
- })
- })
-
- Context("When a suite ends", func() {
- BeforeEach(func() {
- reporter.SpecSuiteDidEnd(suiteSummary)
- })
-
- It("should POST the SuiteSummary to the Ginkgo server", func() {
- Ω(poster.posts).Should(HaveLen(1))
- Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/SpecSuiteDidEnd"))
- Ω(poster.posts[0].bodyType).Should(Equal("application/json"))
-
- var summary *types.SuiteSummary
-
- err := json.Unmarshal(poster.posts[0].bodyContent, &summary)
- Ω(err).ShouldNot(HaveOccurred())
-
- Ω(summary).Should(Equal(suiteSummary))
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/remote_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/remote_suite_test.go
deleted file mode 100644
index e6b4e9f32..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/remote_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package remote_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestRemote(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Remote Spec Forwarding Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/server_test.go
deleted file mode 100644
index 36bd00355..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/server_test.go
+++ /dev/null
@@ -1,269 +0,0 @@
-package remote_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/internal/remote"
- . "github.com/onsi/gomega"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/reporters"
- "github.com/onsi/ginkgo/types"
-
- "bytes"
- "encoding/json"
- "net/http"
-)
-
-var _ = Describe("Server", func() {
- var (
- server *Server
- )
-
- BeforeEach(func() {
- var err error
- server, err = NewServer(3)
- Ω(err).ShouldNot(HaveOccurred())
-
- server.Start()
- })
-
- AfterEach(func() {
- server.Close()
- })
-
- Describe("Streaming endpoints", func() {
- var (
- reporterA, reporterB *reporters.FakeReporter
- forwardingReporter *ForwardingReporter
-
- suiteSummary *types.SuiteSummary
- setupSummary *types.SetupSummary
- specSummary *types.SpecSummary
- )
-
- BeforeEach(func() {
- reporterA = reporters.NewFakeReporter()
- reporterB = reporters.NewFakeReporter()
-
- server.RegisterReporters(reporterA, reporterB)
-
- forwardingReporter = NewForwardingReporter(config.DefaultReporterConfigType{}, server.Address(), &http.Client{}, &fakeOutputInterceptor{}, nil, "")
-
- suiteSummary = &types.SuiteSummary{
- SuiteDescription: "My Test Suite",
- }
-
- setupSummary = &types.SetupSummary{
- State: types.SpecStatePassed,
- }
-
- specSummary = &types.SpecSummary{
- ComponentTexts: []string{"My", "Spec"},
- State: types.SpecStatePassed,
- }
- })
-
- It("should make its address available", func() {
- Ω(server.Address()).Should(MatchRegexp(`http://127.0.0.1:\d{2,}`))
- })
-
- Describe("/SpecSuiteWillBegin", func() {
- It("should decode and forward the Ginkgo config and suite summary", func(done Done) {
- forwardingReporter.SpecSuiteWillBegin(config.GinkgoConfig, suiteSummary)
- Ω(reporterA.Config).Should(Equal(config.GinkgoConfig))
- Ω(reporterB.Config).Should(Equal(config.GinkgoConfig))
- Ω(reporterA.BeginSummary).Should(Equal(suiteSummary))
- Ω(reporterB.BeginSummary).Should(Equal(suiteSummary))
- close(done)
- })
- })
-
- Describe("/BeforeSuiteDidRun", func() {
- It("should decode and forward the setup summary", func() {
- forwardingReporter.BeforeSuiteDidRun(setupSummary)
- Ω(reporterA.BeforeSuiteSummary).Should(Equal(setupSummary))
- Ω(reporterB.BeforeSuiteSummary).Should(Equal(setupSummary))
- })
- })
-
- Describe("/AfterSuiteDidRun", func() {
- It("should decode and forward the setup summary", func() {
- forwardingReporter.AfterSuiteDidRun(setupSummary)
- Ω(reporterA.AfterSuiteSummary).Should(Equal(setupSummary))
- Ω(reporterB.AfterSuiteSummary).Should(Equal(setupSummary))
- })
- })
-
- Describe("/SpecWillRun", func() {
- It("should decode and forward the spec summary", func(done Done) {
- forwardingReporter.SpecWillRun(specSummary)
- Ω(reporterA.SpecWillRunSummaries[0]).Should(Equal(specSummary))
- Ω(reporterB.SpecWillRunSummaries[0]).Should(Equal(specSummary))
- close(done)
- })
- })
-
- Describe("/SpecDidComplete", func() {
- It("should decode and forward the spec summary", func(done Done) {
- forwardingReporter.SpecDidComplete(specSummary)
- Ω(reporterA.SpecSummaries[0]).Should(Equal(specSummary))
- Ω(reporterB.SpecSummaries[0]).Should(Equal(specSummary))
- close(done)
- })
- })
-
- Describe("/SpecSuiteDidEnd", func() {
- It("should decode and forward the suite summary", func(done Done) {
- forwardingReporter.SpecSuiteDidEnd(suiteSummary)
- Ω(reporterA.EndSummary).Should(Equal(suiteSummary))
- Ω(reporterB.EndSummary).Should(Equal(suiteSummary))
- close(done)
- })
- })
- })
-
- Describe("Synchronization endpoints", func() {
- Describe("GETting and POSTing BeforeSuiteState", func() {
- getBeforeSuite := func() types.RemoteBeforeSuiteData {
- resp, err := http.Get(server.Address() + "/BeforeSuiteState")
- Ω(err).ShouldNot(HaveOccurred())
- Ω(resp.StatusCode).Should(Equal(http.StatusOK))
-
- r := types.RemoteBeforeSuiteData{}
- decoder := json.NewDecoder(resp.Body)
- err = decoder.Decode(&r)
- Ω(err).ShouldNot(HaveOccurred())
-
- return r
- }
-
- postBeforeSuite := func(r types.RemoteBeforeSuiteData) {
- resp, err := http.Post(server.Address()+"/BeforeSuiteState", "application/json", bytes.NewReader(r.ToJSON()))
- Ω(err).ShouldNot(HaveOccurred())
- Ω(resp.StatusCode).Should(Equal(http.StatusOK))
- }
-
- Context("when the first node's Alive has not been registered yet", func() {
- It("should return pending", func() {
- state := getBeforeSuite()
- Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}))
-
- state = getBeforeSuite()
- Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}))
- })
- })
-
- Context("when the first node is Alive but has not responded yet", func() {
- BeforeEach(func() {
- server.RegisterAlive(1, func() bool {
- return true
- })
- })
-
- It("should return pending", func() {
- state := getBeforeSuite()
- Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}))
-
- state = getBeforeSuite()
- Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}))
- })
- })
-
- Context("when the first node has responded", func() {
- var state types.RemoteBeforeSuiteData
- BeforeEach(func() {
- server.RegisterAlive(1, func() bool {
- return false
- })
-
- state = types.RemoteBeforeSuiteData{
- Data: []byte("my data"),
- State: types.RemoteBeforeSuiteStatePassed,
- }
- postBeforeSuite(state)
- })
-
- It("should return the passed in state", func() {
- returnedState := getBeforeSuite()
- Ω(returnedState).Should(Equal(state))
- })
- })
-
- Context("when the first node is no longer Alive and has not responded yet", func() {
- BeforeEach(func() {
- server.RegisterAlive(1, func() bool {
- return false
- })
- })
-
- It("should return disappeared", func() {
- state := getBeforeSuite()
- Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStateDisappeared}))
-
- state = getBeforeSuite()
- Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStateDisappeared}))
- })
- })
- })
-
- Describe("GETting RemoteAfterSuiteData", func() {
- getRemoteAfterSuiteData := func() bool {
- resp, err := http.Get(server.Address() + "/RemoteAfterSuiteData")
- Ω(err).ShouldNot(HaveOccurred())
- Ω(resp.StatusCode).Should(Equal(http.StatusOK))
-
- a := types.RemoteAfterSuiteData{}
- decoder := json.NewDecoder(resp.Body)
- err = decoder.Decode(&a)
- Ω(err).ShouldNot(HaveOccurred())
-
- return a.CanRun
- }
-
- Context("when there are unregistered nodes", func() {
- BeforeEach(func() {
- server.RegisterAlive(2, func() bool {
- return false
- })
- })
-
- It("should return false", func() {
- Ω(getRemoteAfterSuiteData()).Should(BeFalse())
- })
- })
-
- Context("when all none-node-1 nodes are still running", func() {
- BeforeEach(func() {
- server.RegisterAlive(2, func() bool {
- return true
- })
-
- server.RegisterAlive(3, func() bool {
- return false
- })
- })
-
- It("should return false", func() {
- Ω(getRemoteAfterSuiteData()).Should(BeFalse())
- })
- })
-
- Context("when all none-1 nodes are done", func() {
- BeforeEach(func() {
- server.RegisterAlive(2, func() bool {
- return false
- })
-
- server.RegisterAlive(3, func() bool {
- return false
- })
- })
-
- It("should return true", func() {
- Ω(getRemoteAfterSuiteData()).Should(BeTrue())
- })
-
- })
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec_suite_test.go
deleted file mode 100644
index 8681a7206..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec/spec_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package spec_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestSpec(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Spec Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec_test.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec_test.go
deleted file mode 100644
index b4a2c9c79..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec/spec_test.go
+++ /dev/null
@@ -1,739 +0,0 @@
-package spec_test
-
-import (
- "time"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- "github.com/onsi/gomega/gbytes"
-
- . "github.com/onsi/ginkgo/internal/spec"
-
- "github.com/onsi/ginkgo/internal/codelocation"
- "github.com/onsi/ginkgo/internal/containernode"
- Failer "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/internal/leafnodes"
- "github.com/onsi/ginkgo/types"
-)
-
-var noneFlag = types.FlagTypeNone
-var focusedFlag = types.FlagTypeFocused
-var pendingFlag = types.FlagTypePending
-
-var _ = Describe("Spec", func() {
- var (
- failer *Failer.Failer
- codeLocation types.CodeLocation
- nodesThatRan []string
- spec *Spec
- buffer *gbytes.Buffer
- )
-
- newBody := func(text string, fail bool) func() {
- return func() {
- nodesThatRan = append(nodesThatRan, text)
- if fail {
- failer.Fail(text, codeLocation)
- }
- }
- }
-
- newIt := func(text string, flag types.FlagType, fail bool) *leafnodes.ItNode {
- return leafnodes.NewItNode(text, newBody(text, fail), flag, codeLocation, 0, failer, 0)
- }
-
- newItWithBody := func(text string, body interface{}) *leafnodes.ItNode {
- return leafnodes.NewItNode(text, body, noneFlag, codeLocation, 0, failer, 0)
- }
-
- newMeasure := func(text string, flag types.FlagType, fail bool, samples int) *leafnodes.MeasureNode {
- return leafnodes.NewMeasureNode(text, func(Benchmarker) {
- nodesThatRan = append(nodesThatRan, text)
- if fail {
- failer.Fail(text, codeLocation)
- }
- }, flag, codeLocation, samples, failer, 0)
- }
-
- newBef := func(text string, fail bool) leafnodes.BasicNode {
- return leafnodes.NewBeforeEachNode(newBody(text, fail), codeLocation, 0, failer, 0)
- }
-
- newAft := func(text string, fail bool) leafnodes.BasicNode {
- return leafnodes.NewAfterEachNode(newBody(text, fail), codeLocation, 0, failer, 0)
- }
-
- newJusBef := func(text string, fail bool) leafnodes.BasicNode {
- return leafnodes.NewJustBeforeEachNode(newBody(text, fail), codeLocation, 0, failer, 0)
- }
-
- newJusAft := func(text string, fail bool) leafnodes.BasicNode {
- return leafnodes.NewJustAfterEachNode(newBody(text, fail), codeLocation, 0, failer, 0)
- }
-
- newContainer := func(text string, flag types.FlagType, setupNodes ...leafnodes.BasicNode) *containernode.ContainerNode {
- c := containernode.New(text, flag, codeLocation)
- for _, node := range setupNodes {
- c.PushSetupNode(node)
- }
- return c
- }
-
- containers := func(containers ...*containernode.ContainerNode) []*containernode.ContainerNode {
- return containers
- }
-
- BeforeEach(func() {
- buffer = gbytes.NewBuffer()
- failer = Failer.New()
- codeLocation = codelocation.New(0)
- nodesThatRan = []string{}
- })
-
- Describe("marking specs focused and pending", func() {
- It("should satisfy various caes", func() {
- cases := []struct {
- ContainerFlags []types.FlagType
- SubjectFlag types.FlagType
- Pending bool
- Focused bool
- }{
- {[]types.FlagType{}, noneFlag, false, false},
- {[]types.FlagType{}, focusedFlag, false, true},
- {[]types.FlagType{}, pendingFlag, true, false},
- {[]types.FlagType{noneFlag}, noneFlag, false, false},
- {[]types.FlagType{focusedFlag}, noneFlag, false, true},
- {[]types.FlagType{pendingFlag}, noneFlag, true, false},
- {[]types.FlagType{noneFlag}, focusedFlag, false, true},
- {[]types.FlagType{focusedFlag}, focusedFlag, false, true},
- {[]types.FlagType{pendingFlag}, focusedFlag, true, true},
- {[]types.FlagType{noneFlag}, pendingFlag, true, false},
- {[]types.FlagType{focusedFlag}, pendingFlag, true, true},
- {[]types.FlagType{pendingFlag}, pendingFlag, true, false},
- {[]types.FlagType{focusedFlag, noneFlag}, noneFlag, false, true},
- {[]types.FlagType{noneFlag, focusedFlag}, noneFlag, false, true},
- {[]types.FlagType{pendingFlag, noneFlag}, noneFlag, true, false},
- {[]types.FlagType{noneFlag, pendingFlag}, noneFlag, true, false},
- {[]types.FlagType{focusedFlag, pendingFlag}, noneFlag, true, true},
- }
-
- for i, c := range cases {
- subject := newIt("it node", c.SubjectFlag, false)
- containers := []*containernode.ContainerNode{}
- for _, flag := range c.ContainerFlags {
- containers = append(containers, newContainer("container", flag))
- }
-
- spec := New(subject, containers, false)
- Ω(spec.Pending()).Should(Equal(c.Pending), "Case %d: %#v", i, c)
- Ω(spec.Focused()).Should(Equal(c.Focused), "Case %d: %#v", i, c)
-
- if c.Pending {
- Ω(spec.Summary("").State).Should(Equal(types.SpecStatePending))
- }
- }
- })
- })
-
- Describe("Skip", func() {
- It("should be skipped", func() {
- spec := New(newIt("it node", noneFlag, false), containers(newContainer("container", noneFlag)), false)
- Ω(spec.Skipped()).Should(BeFalse())
- spec.Skip()
- Ω(spec.Skipped()).Should(BeTrue())
- Ω(spec.Summary("").State).Should(Equal(types.SpecStateSkipped))
- })
- })
-
- Describe("IsMeasurement", func() {
- It("should be true if the subject is a measurement node", func() {
- spec := New(newIt("it node", noneFlag, false), containers(newContainer("container", noneFlag)), false)
- Ω(spec.IsMeasurement()).Should(BeFalse())
- Ω(spec.Summary("").IsMeasurement).Should(BeFalse())
- Ω(spec.Summary("").NumberOfSamples).Should(Equal(1))
-
- spec = New(newMeasure("measure node", noneFlag, false, 10), containers(newContainer("container", noneFlag)), false)
- Ω(spec.IsMeasurement()).Should(BeTrue())
- Ω(spec.Summary("").IsMeasurement).Should(BeTrue())
- Ω(spec.Summary("").NumberOfSamples).Should(Equal(10))
- })
- })
-
- Describe("Passed", func() {
- It("should pass when the subject passed", func() {
- spec := New(newIt("it node", noneFlag, false), containers(), false)
- spec.Run(buffer)
-
- Ω(spec.Passed()).Should(BeTrue())
- Ω(spec.Failed()).Should(BeFalse())
- Ω(spec.Summary("").State).Should(Equal(types.SpecStatePassed))
- Ω(spec.Summary("").Failure).Should(BeZero())
- })
- })
-
- Describe("Flaked", func() {
- It("should work if Run is called twice and gets different results", func() {
- i := 0
- spec := New(newItWithBody("flaky it", func() {
- i++
- if i == 1 {
- failer.Fail("oops", codeLocation)
- }
- }), containers(), false)
- spec.Run(buffer)
- Ω(spec.Passed()).Should(BeFalse())
- Ω(spec.Failed()).Should(BeTrue())
- Ω(spec.Flaked()).Should(BeFalse())
- Ω(spec.Summary("").State).Should(Equal(types.SpecStateFailed))
- Ω(spec.Summary("").Failure.Message).Should(Equal("oops"))
- spec.Run(buffer)
- Ω(spec.Passed()).Should(BeTrue())
- Ω(spec.Failed()).Should(BeFalse())
- Ω(spec.Flaked()).Should(BeTrue())
- Ω(spec.Summary("").State).Should(Equal(types.SpecStatePassed))
- })
- })
-
- Describe("Failed", func() {
- It("should be failed if the failure was panic", func() {
- spec := New(newItWithBody("panicky it", func() {
- panic("bam")
- }), containers(), false)
- spec.Run(buffer)
- Ω(spec.Passed()).Should(BeFalse())
- Ω(spec.Failed()).Should(BeTrue())
- Ω(spec.Summary("").State).Should(Equal(types.SpecStatePanicked))
- Ω(spec.Summary("").Failure.Message).Should(Equal("Test Panicked"))
- Ω(spec.Summary("").Failure.ForwardedPanic).Should(Equal("bam"))
- })
-
- It("should be failed if the failure was a timeout", func() {
- spec := New(newItWithBody("sleepy it", func(done Done) {}), containers(), false)
- spec.Run(buffer)
- Ω(spec.Passed()).Should(BeFalse())
- Ω(spec.Failed()).Should(BeTrue())
- Ω(spec.Summary("").State).Should(Equal(types.SpecStateTimedOut))
- Ω(spec.Summary("").Failure.Message).Should(Equal("Timed out"))
- })
-
- It("should be failed if the failure was... a failure", func() {
- spec := New(newItWithBody("failing it", func() {
- failer.Fail("bam", codeLocation)
- }), containers(), false)
- spec.Run(buffer)
- Ω(spec.Passed()).Should(BeFalse())
- Ω(spec.Failed()).Should(BeTrue())
- Ω(spec.Summary("").State).Should(Equal(types.SpecStateFailed))
- Ω(spec.Summary("").Failure.Message).Should(Equal("bam"))
- })
- })
-
- Describe("Concatenated string", func() {
- It("should concatenate the texts of the containers and the subject", func() {
- spec := New(
- newIt("it node", noneFlag, false),
- containers(
- newContainer("outer container", noneFlag),
- newContainer("inner container", noneFlag),
- ),
- false,
- )
-
- Ω(spec.ConcatenatedString()).Should(Equal("outer container inner container it node"))
- })
- })
-
- Describe("running it specs", func() {
- Context("with just an it", func() {
- Context("that succeeds", func() {
- It("should run the it and report on its success", func() {
- spec := New(newIt("it node", noneFlag, false), containers(), false)
- spec.Run(buffer)
- Ω(spec.Passed()).Should(BeTrue())
- Ω(spec.Failed()).Should(BeFalse())
- Ω(nodesThatRan).Should(Equal([]string{"it node"}))
- })
- })
-
- Context("that fails", func() {
- It("should run the it and report on its success", func() {
- spec := New(newIt("it node", noneFlag, true), containers(), false)
- spec.Run(buffer)
- Ω(spec.Passed()).Should(BeFalse())
- Ω(spec.Failed()).Should(BeTrue())
- Ω(spec.Summary("").Failure.Message).Should(Equal("it node"))
- Ω(nodesThatRan).Should(Equal([]string{"it node"}))
- })
- })
- })
-
- Context("with a full set of setup nodes", func() {
- var failingNodes map[string]bool
-
- BeforeEach(func() {
- failingNodes = map[string]bool{}
- })
-
- JustBeforeEach(func() {
- spec = New(
- newIt("it node", noneFlag, failingNodes["it node"]),
- containers(
- newContainer("outer container", noneFlag,
- newBef("outer bef A", failingNodes["outer bef A"]),
- newBef("outer bef B", failingNodes["outer bef B"]),
- newJusBef("outer jusbef A", failingNodes["outer jusbef A"]),
- newJusBef("outer jusbef B", failingNodes["outer jusbef B"]),
- newJusAft("outer jusaft A", failingNodes["outer jusaft A"]),
- newJusAft("outer jusaft B", failingNodes["outer jusaft B"]),
- newAft("outer aft A", failingNodes["outer aft A"]),
- newAft("outer aft B", failingNodes["outer aft B"]),
- ),
- newContainer("inner container", noneFlag,
- newBef("inner bef A", failingNodes["inner bef A"]),
- newBef("inner bef B", failingNodes["inner bef B"]),
- newJusBef("inner jusbef A", failingNodes["inner jusbef A"]),
- newJusBef("inner jusbef B", failingNodes["inner jusbef B"]),
- newJusAft("inner jusaft A", failingNodes["inner jusaft A"]),
- newJusAft("inner jusaft B", failingNodes["inner jusaft B"]),
- newAft("inner aft A", failingNodes["inner aft A"]),
- newAft("inner aft B", failingNodes["inner aft B"]),
- ),
- ),
- false,
- )
- spec.Run(buffer)
- })
-
- Context("that all pass", func() {
- It("should walk through the nodes in the correct order", func() {
- Ω(spec.Passed()).Should(BeTrue())
- Ω(spec.Failed()).Should(BeFalse())
- Ω(nodesThatRan).Should(Equal([]string{
- "outer bef A",
- "outer bef B",
- "inner bef A",
- "inner bef B",
- "outer jusbef A",
- "outer jusbef B",
- "inner jusbef A",
- "inner jusbef B",
- "it node",
- "inner jusaft A",
- "inner jusaft B",
- "outer jusaft A",
- "outer jusaft B",
- "inner aft A",
- "inner aft B",
- "outer aft A",
- "outer aft B",
- }))
- })
- })
-
- Context("when the subject fails", func() {
- BeforeEach(func() {
- failingNodes["it node"] = true
- })
-
- It("should run the afters", func() {
- Ω(spec.Passed()).Should(BeFalse())
- Ω(spec.Failed()).Should(BeTrue())
- Ω(nodesThatRan).Should(Equal([]string{
- "outer bef A",
- "outer bef B",
- "inner bef A",
- "inner bef B",
- "outer jusbef A",
- "outer jusbef B",
- "inner jusbef A",
- "inner jusbef B",
- "it node",
- "inner jusaft A",
- "inner jusaft B",
- "outer jusaft A",
- "outer jusaft B",
- "inner aft A",
- "inner aft B",
- "outer aft A",
- "outer aft B",
- }))
- Ω(spec.Summary("").Failure.Message).Should(Equal("it node"))
- })
- })
-
- Context("when an inner before fails", func() {
- BeforeEach(func() {
- failingNodes["inner bef A"] = true
- })
-
- It("should not run any other befores, but it should run the subsequent afters", func() {
- Ω(spec.Passed()).Should(BeFalse())
- Ω(spec.Failed()).Should(BeTrue())
- Ω(nodesThatRan).Should(Equal([]string{
- "outer bef A",
- "outer bef B",
- "inner bef A",
- "inner jusaft A",
- "inner jusaft B",
- "outer jusaft A",
- "outer jusaft B",
- "inner aft A",
- "inner aft B",
- "outer aft A",
- "outer aft B",
- }))
- Ω(spec.Summary("").Failure.Message).Should(Equal("inner bef A"))
- })
- })
-
- Context("when an outer before fails", func() {
- BeforeEach(func() {
- failingNodes["outer bef B"] = true
- })
-
- It("should not run any other befores, but it should run the subsequent afters", func() {
- Ω(spec.Passed()).Should(BeFalse())
- Ω(spec.Failed()).Should(BeTrue())
- Ω(nodesThatRan).Should(Equal([]string{
- "outer bef A",
- "outer bef B",
- "outer jusaft A",
- "outer jusaft B",
- "outer aft A",
- "outer aft B",
- }))
- Ω(spec.Summary("").Failure.Message).Should(Equal("outer bef B"))
- })
- })
-
- Context("when an after fails", func() {
- BeforeEach(func() {
- failingNodes["inner aft B"] = true
- })
-
- It("should run all other afters, but mark the test as failed", func() {
- Ω(spec.Passed()).Should(BeFalse())
- Ω(spec.Failed()).Should(BeTrue())
- Ω(nodesThatRan).Should(Equal([]string{
- "outer bef A",
- "outer bef B",
- "inner bef A",
- "inner bef B",
- "outer jusbef A",
- "outer jusbef B",
- "inner jusbef A",
- "inner jusbef B",
- "it node",
- "inner jusaft A",
- "inner jusaft B",
- "outer jusaft A",
- "outer jusaft B",
- "inner aft A",
- "inner aft B",
- "outer aft A",
- "outer aft B",
- }))
- Ω(spec.Summary("").Failure.Message).Should(Equal("inner aft B"))
- })
- })
-
- Context("when a just before each fails", func() {
- BeforeEach(func() {
- failingNodes["outer jusbef B"] = true
- })
-
- It("should run the afters, but not the subject", func() {
- Ω(spec.Passed()).Should(BeFalse())
- Ω(spec.Failed()).Should(BeTrue())
- Ω(nodesThatRan).Should(Equal([]string{
- "outer bef A",
- "outer bef B",
- "inner bef A",
- "inner bef B",
- "outer jusbef A",
- "outer jusbef B",
- "inner jusaft A",
- "inner jusaft B",
- "outer jusaft A",
- "outer jusaft B",
- "inner aft A",
- "inner aft B",
- "outer aft A",
- "outer aft B",
- }))
- Ω(spec.Summary("").Failure.Message).Should(Equal("outer jusbef B"))
- })
- })
-
- Context("when a just after each fails", func() {
- BeforeEach(func() {
- failingNodes["outer jusaft A"] = true
- })
-
- It("should run all other afters, but mark the test as failed", func() {
- Ω(spec.Passed()).Should(BeFalse())
- Ω(spec.Failed()).Should(BeTrue())
- Ω(nodesThatRan).Should(Equal([]string{
- "outer bef A",
- "outer bef B",
- "inner bef A",
- "inner bef B",
- "outer jusbef A",
- "outer jusbef B",
- "inner jusbef A",
- "inner jusbef B",
- "it node",
- "inner jusaft A",
- "inner jusaft B",
- "outer jusaft A",
- "outer jusaft B",
- "inner aft A",
- "inner aft B",
- "outer aft A",
- "outer aft B",
- }))
- Ω(spec.Summary("").Failure.Message).Should(Equal("outer jusaft A"))
- })
- })
-
- Context("when an after fails after an earlier node has failed", func() {
- BeforeEach(func() {
- failingNodes["it node"] = true
- failingNodes["inner aft B"] = true
- })
-
- It("should record the earlier failure", func() {
- Ω(spec.Passed()).Should(BeFalse())
- Ω(spec.Failed()).Should(BeTrue())
- Ω(nodesThatRan).Should(Equal([]string{
- "outer bef A",
- "outer bef B",
- "inner bef A",
- "inner bef B",
- "outer jusbef A",
- "outer jusbef B",
- "inner jusbef A",
- "inner jusbef B",
- "it node",
- "inner jusaft A",
- "inner jusaft B",
- "outer jusaft A",
- "outer jusaft B",
- "inner aft A",
- "inner aft B",
- "outer aft A",
- "outer aft B",
- }))
- Ω(spec.Summary("").Failure.Message).Should(Equal("it node"))
- })
- })
- })
- })
-
- Describe("running measurement specs", func() {
- Context("when the measurement succeeds", func() {
- It("should run N samples", func() {
- spec = New(
- newMeasure("measure node", noneFlag, false, 3),
- containers(
- newContainer("container", noneFlag,
- newBef("bef A", false),
- newJusBef("jusbef A", false),
- newJusAft("jusaft A", false),
- newAft("aft A", false),
- ),
- ),
- false,
- )
- spec.Run(buffer)
-
- Ω(spec.Passed()).Should(BeTrue())
- Ω(spec.Failed()).Should(BeFalse())
- Ω(nodesThatRan).Should(Equal([]string{
- "bef A",
- "jusbef A",
- "measure node",
- "jusaft A",
- "aft A",
- "bef A",
- "jusbef A",
- "measure node",
- "jusaft A",
- "aft A",
- "bef A",
- "jusbef A",
- "measure node",
- "jusaft A",
- "aft A",
- }))
- })
- })
-
- Context("when the measurement fails", func() {
- It("should bail after the failure occurs", func() {
- spec = New(
- newMeasure("measure node", noneFlag, true, 3),
- containers(
- newContainer("container", noneFlag,
- newBef("bef A", false),
- newJusBef("jusbef A", false),
- newJusAft("jusaft A", false),
- newAft("aft A", false),
- ),
- ),
- false,
- )
- spec.Run(buffer)
-
- Ω(spec.Passed()).Should(BeFalse())
- Ω(spec.Failed()).Should(BeTrue())
- Ω(nodesThatRan).Should(Equal([]string{
- "bef A",
- "jusbef A",
- "measure node",
- "jusaft A",
- "aft A",
- }))
- })
- })
- })
-
- Describe("Summary", func() {
- var (
- subjectCodeLocation types.CodeLocation
- outerContainerCodeLocation types.CodeLocation
- innerContainerCodeLocation types.CodeLocation
- summary *types.SpecSummary
- )
-
- BeforeEach(func() {
- subjectCodeLocation = codelocation.New(0)
- outerContainerCodeLocation = codelocation.New(0)
- innerContainerCodeLocation = codelocation.New(0)
-
- spec = New(
- leafnodes.NewItNode("it node", func() {
- time.Sleep(10 * time.Millisecond)
- }, noneFlag, subjectCodeLocation, 0, failer, 0),
- containers(
- containernode.New("outer container", noneFlag, outerContainerCodeLocation),
- containernode.New("inner container", noneFlag, innerContainerCodeLocation),
- ),
- false,
- )
-
- spec.Run(buffer)
- Ω(spec.Passed()).Should(BeTrue())
- summary = spec.Summary("suite id")
- })
-
- It("should have the suite id", func() {
- Ω(summary.SuiteID).Should(Equal("suite id"))
- })
-
- It("should have the component texts and code locations", func() {
- Ω(summary.ComponentTexts).Should(Equal([]string{"outer container", "inner container", "it node"}))
- Ω(summary.ComponentCodeLocations).Should(Equal([]types.CodeLocation{outerContainerCodeLocation, innerContainerCodeLocation, subjectCodeLocation}))
- })
-
- It("should have a runtime", func() {
- Ω(summary.RunTime).Should(BeNumerically(">=", 10*time.Millisecond))
- })
-
- It("should have a runtime which remains consistent after spec run", func() {
- totalRunTime := summary.RunTime
- Ω(totalRunTime).Should(BeNumerically(">=", 10*time.Millisecond))
-
- Consistently(func() time.Duration { return spec.Summary("suite id").RunTime }).Should(Equal(totalRunTime))
- })
-
- It("should not be a measurement, or have a measurement summary", func() {
- Ω(summary.IsMeasurement).Should(BeFalse())
- Ω(summary.Measurements).Should(BeEmpty())
- })
- })
-
- Describe("Summaries for measurements", func() {
- var summary *types.SpecSummary
-
- BeforeEach(func() {
- spec = New(leafnodes.NewMeasureNode("measure node", func(b Benchmarker) {
- b.RecordValue("a value", 7, "some info")
- b.RecordValueWithPrecision("another value", 8, "ns", 5, "more info")
- }, noneFlag, codeLocation, 4, failer, 0), containers(), false)
- spec.Run(buffer)
- Ω(spec.Passed()).Should(BeTrue())
- summary = spec.Summary("suite id")
- })
-
- It("should include the number of samples", func() {
- Ω(summary.NumberOfSamples).Should(Equal(4))
- })
-
- It("should be a measurement", func() {
- Ω(summary.IsMeasurement).Should(BeTrue())
- })
-
- It("should have the measurements report", func() {
- Ω(summary.Measurements).Should(HaveKey("a value"))
- report := summary.Measurements["a value"]
- Ω(report.Name).Should(Equal("a value"))
- Ω(report.Info).Should(Equal("some info"))
- Ω(report.Results).Should(Equal([]float64{7, 7, 7, 7}))
-
- Ω(summary.Measurements).Should(HaveKey("another value"))
- report = summary.Measurements["another value"]
- Ω(report.Name).Should(Equal("another value"))
- Ω(report.Info).Should(Equal("more info"))
- Ω(report.Results).Should(Equal([]float64{8, 8, 8, 8}))
- Ω(report.Units).Should(Equal("ns"))
- Ω(report.Precision).Should(Equal(5))
- })
- })
-
- Describe("When told to emit progress", func() {
- It("should emit progress to the writer as it runs Befores, JustBefores, Afters, and Its", func() {
- spec = New(
- newIt("it node", noneFlag, false),
- containers(
- newContainer("outer container", noneFlag,
- newBef("outer bef A", false),
- newJusBef("outer jusbef A", false),
- newJusAft("outer jusaft A", false),
- newAft("outer aft A", false),
- ),
- newContainer("inner container", noneFlag,
- newBef("inner bef A", false),
- newJusBef("inner jusbef A", false),
- newJusAft("inner jusaft A", false),
- newAft("inner aft A", false),
- ),
- ),
- true,
- )
- spec.Run(buffer)
-
- Ω(buffer).Should(gbytes.Say(`\[BeforeEach\] outer container`))
- Ω(buffer).Should(gbytes.Say(`\[BeforeEach\] inner container`))
- Ω(buffer).Should(gbytes.Say(`\[JustBeforeEach\] outer container`))
- Ω(buffer).Should(gbytes.Say(`\[JustBeforeEach\] inner container`))
- Ω(buffer).Should(gbytes.Say(`\[It\] it node`))
- Ω(buffer).Should(gbytes.Say(`\[JustAfterEach\] inner container`))
- Ω(buffer).Should(gbytes.Say(`\[JustAfterEach\] outer container`))
- Ω(buffer).Should(gbytes.Say(`\[AfterEach\] inner container`))
- Ω(buffer).Should(gbytes.Say(`\[AfterEach\] outer container`))
- })
-
- It("should emit progress to the writer as it runs Befores, JustBefores, JustAfters, Afters, and Measures", func() {
- spec = New(
- newMeasure("measure node", noneFlag, false, 2),
- containers(),
- true,
- )
- spec.Run(buffer)
-
- Ω(buffer).Should(gbytes.Say(`\[Measure\] measure node`))
- Ω(buffer).Should(gbytes.Say(`\[Measure\] measure node`))
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go
index 006185ab5..27c0d1d6c 100644
--- a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go
+++ b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go
@@ -7,14 +7,21 @@ import (
)
type Specs struct {
- specs []*Spec
+ specs []*Spec
+ names []string
+
hasProgrammaticFocus bool
RegexScansFilePath bool
}
func NewSpecs(specs []*Spec) *Specs {
+ names := make([]string, len(specs))
+ for i, spec := range specs {
+ names[i] = spec.ConcatenatedString()
+ }
return &Specs{
specs: specs,
+ names: names,
}
}
@@ -30,10 +37,13 @@ func (e *Specs) Shuffle(r *rand.Rand) {
sort.Sort(e)
permutation := r.Perm(len(e.specs))
shuffledSpecs := make([]*Spec, len(e.specs))
+ names := make([]string, len(e.specs))
for i, j := range permutation {
shuffledSpecs[i] = e.specs[j]
+ names[i] = e.names[j]
}
e.specs = shuffledSpecs
+ e.names = names
}
func (e *Specs) ApplyFocus(description string, focusString string, skipString string) {
@@ -64,33 +74,43 @@ func (e *Specs) applyProgrammaticFocus() {
// toMatch returns a byte[] to be used by regex matchers. When adding new behaviours to the matching function,
// this is the place which we append to.
-func (e *Specs) toMatch(description string, spec *Spec) []byte {
+func (e *Specs) toMatch(description string, i int) []byte {
+ if i > len(e.names) {
+ return nil
+ }
if e.RegexScansFilePath {
return []byte(
description + " " +
- spec.ConcatenatedString() + " " +
- spec.subject.CodeLocation().FileName)
+ e.names[i] + " " +
+ e.specs[i].subject.CodeLocation().FileName)
} else {
return []byte(
description + " " +
- spec.ConcatenatedString())
+ e.names[i])
}
}
func (e *Specs) applyRegExpFocusAndSkip(description string, focusString string, skipString string) {
- for _, spec := range e.specs {
+ var focusFilter *regexp.Regexp
+ if focusString != "" {
+ focusFilter = regexp.MustCompile(focusString)
+ }
+ var skipFilter *regexp.Regexp
+ if skipString != "" {
+ skipFilter = regexp.MustCompile(skipString)
+ }
+
+ for i, spec := range e.specs {
matchesFocus := true
matchesSkip := false
- toMatch := e.toMatch(description, spec)
+ toMatch := e.toMatch(description, i)
- if focusString != "" {
- focusFilter := regexp.MustCompile(focusString)
+ if focusFilter != nil {
matchesFocus = focusFilter.Match([]byte(toMatch))
}
- if skipString != "" {
- skipFilter := regexp.MustCompile(skipString)
+ if skipFilter != nil {
matchesSkip = skipFilter.Match([]byte(toMatch))
}
@@ -115,9 +135,10 @@ func (e *Specs) Len() int {
}
func (e *Specs) Less(i, j int) bool {
- return e.specs[i].ConcatenatedString() < e.specs[j].ConcatenatedString()
+ return e.names[i] < e.names[j]
}
func (e *Specs) Swap(i, j int) {
+ e.names[i], e.names[j] = e.names[j], e.names[i]
e.specs[i], e.specs[j] = e.specs[j], e.specs[i]
}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/specs_test.go b/vendor/github.com/onsi/ginkgo/internal/spec/specs_test.go
deleted file mode 100644
index 066fbbb3a..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec/specs_test.go
+++ /dev/null
@@ -1,287 +0,0 @@
-package spec_test
-
-import (
- "math/rand"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/internal/spec"
- . "github.com/onsi/gomega"
-
- "github.com/onsi/ginkgo/internal/codelocation"
- "github.com/onsi/ginkgo/internal/containernode"
- "github.com/onsi/ginkgo/internal/leafnodes"
- "github.com/onsi/ginkgo/types"
-)
-
-var _ = Describe("Specs", func() {
- var specs *Specs
-
- newSpec := func(text string, flag types.FlagType) *Spec {
- subject := leafnodes.NewItNode(text, func() {}, flag, codelocation.New(0), 0, nil, 0)
- return New(subject, []*containernode.ContainerNode{}, false)
- }
-
- newMeasureSpec := func(text string, flag types.FlagType) *Spec {
- subject := leafnodes.NewMeasureNode(text, func(Benchmarker) {}, flag, codelocation.New(0), 0, nil, 0)
- return New(subject, []*containernode.ContainerNode{}, false)
- }
-
- newSpecs := func(args ...interface{}) *Specs {
- specs := []*Spec{}
- for index := 0; index < len(args)-1; index += 2 {
- specs = append(specs, newSpec(args[index].(string), args[index+1].(types.FlagType)))
- }
- return NewSpecs(specs)
- }
-
- specTexts := func(specs *Specs) []string {
- texts := []string{}
- for _, spec := range specs.Specs() {
- texts = append(texts, spec.ConcatenatedString())
- }
- return texts
- }
-
- willRunTexts := func(specs *Specs) []string {
- texts := []string{}
- for _, spec := range specs.Specs() {
- if !(spec.Skipped() || spec.Pending()) {
- texts = append(texts, spec.ConcatenatedString())
- }
- }
- return texts
- }
-
- skippedTexts := func(specs *Specs) []string {
- texts := []string{}
- for _, spec := range specs.Specs() {
- if spec.Skipped() {
- texts = append(texts, spec.ConcatenatedString())
- }
- }
- return texts
- }
-
- pendingTexts := func(specs *Specs) []string {
- texts := []string{}
- for _, spec := range specs.Specs() {
- if spec.Pending() {
- texts = append(texts, spec.ConcatenatedString())
- }
- }
- return texts
- }
-
- Describe("Shuffling specs", func() {
- It("should shuffle the specs using the passed in randomizer", func() {
- specs17 := newSpecs("C", noneFlag, "A", noneFlag, "B", noneFlag)
- specs17.Shuffle(rand.New(rand.NewSource(17)))
- texts17 := specTexts(specs17)
-
- specs17Again := newSpecs("C", noneFlag, "A", noneFlag, "B", noneFlag)
- specs17Again.Shuffle(rand.New(rand.NewSource(17)))
- texts17Again := specTexts(specs17Again)
-
- specs15 := newSpecs("C", noneFlag, "A", noneFlag, "B", noneFlag)
- specs15.Shuffle(rand.New(rand.NewSource(15)))
- texts15 := specTexts(specs15)
-
- specsUnshuffled := newSpecs("C", noneFlag, "A", noneFlag, "B", noneFlag)
- textsUnshuffled := specTexts(specsUnshuffled)
-
- Ω(textsUnshuffled).Should(Equal([]string{"C", "A", "B"}))
-
- Ω(texts17).Should(Equal(texts17Again))
- Ω(texts17).ShouldNot(Equal(texts15))
- Ω(texts17).ShouldNot(Equal(textsUnshuffled))
- Ω(texts15).ShouldNot(Equal(textsUnshuffled))
-
- Ω(texts17).Should(HaveLen(3))
- Ω(texts17).Should(ContainElement("A"))
- Ω(texts17).Should(ContainElement("B"))
- Ω(texts17).Should(ContainElement("C"))
-
- Ω(texts15).Should(HaveLen(3))
- Ω(texts15).Should(ContainElement("A"))
- Ω(texts15).Should(ContainElement("B"))
- Ω(texts15).Should(ContainElement("C"))
- })
- })
-
- Describe("with no programmatic focus", func() {
- BeforeEach(func() {
- specs = newSpecs("A1", noneFlag, "A2", noneFlag, "B1", noneFlag, "B2", pendingFlag)
- specs.ApplyFocus("", "", "")
- })
-
- It("should not report as having programmatic specs", func() {
- Ω(specs.HasProgrammaticFocus()).Should(BeFalse())
- })
- })
-
- Describe("Applying focus/skip", func() {
- var description, focusString, skipString string
-
- BeforeEach(func() {
- description, focusString, skipString = "", "", ""
- })
-
- JustBeforeEach(func() {
- specs = newSpecs("A1", focusedFlag, "A2", noneFlag, "B1", focusedFlag, "B2", pendingFlag)
- specs.ApplyFocus(description, focusString, skipString)
- })
-
- Context("with neither a focus string nor a skip string", func() {
- It("should apply the programmatic focus", func() {
- Ω(willRunTexts(specs)).Should(Equal([]string{"A1", "B1"}))
- Ω(skippedTexts(specs)).Should(Equal([]string{"A2", "B2"}))
- Ω(pendingTexts(specs)).Should(BeEmpty())
- })
-
- It("should report as having programmatic specs", func() {
- Ω(specs.HasProgrammaticFocus()).Should(BeTrue())
- })
- })
-
- Context("with a focus regexp", func() {
- BeforeEach(func() {
- focusString = "A"
- })
-
- It("should override the programmatic focus", func() {
- Ω(willRunTexts(specs)).Should(Equal([]string{"A1", "A2"}))
- Ω(skippedTexts(specs)).Should(Equal([]string{"B1", "B2"}))
- Ω(pendingTexts(specs)).Should(BeEmpty())
- })
-
- It("should not report as having programmatic specs", func() {
- Ω(specs.HasProgrammaticFocus()).Should(BeFalse())
- })
- })
-
- Context("with a focus regexp", func() {
- BeforeEach(func() {
- focusString = "B"
- })
-
- It("should not override any pendings", func() {
- Ω(willRunTexts(specs)).Should(Equal([]string{"B1"}))
- Ω(skippedTexts(specs)).Should(Equal([]string{"A1", "A2"}))
- Ω(pendingTexts(specs)).Should(Equal([]string{"B2"}))
- })
- })
-
- Context("with a description", func() {
- BeforeEach(func() {
- description = "C"
- focusString = "C"
- })
-
- It("should include the description in the focus determination", func() {
- Ω(willRunTexts(specs)).Should(Equal([]string{"A1", "A2", "B1"}))
- Ω(skippedTexts(specs)).Should(BeEmpty())
- Ω(pendingTexts(specs)).Should(Equal([]string{"B2"}))
- })
- })
-
- Context("with a description", func() {
- BeforeEach(func() {
- description = "C"
- skipString = "C"
- })
-
- It("should include the description in the focus determination", func() {
- Ω(willRunTexts(specs)).Should(BeEmpty())
- Ω(skippedTexts(specs)).Should(Equal([]string{"A1", "A2", "B1", "B2"}))
- Ω(pendingTexts(specs)).Should(BeEmpty())
- })
- })
-
- Context("with a skip regexp", func() {
- BeforeEach(func() {
- skipString = "A"
- })
-
- It("should override the programmatic focus", func() {
- Ω(willRunTexts(specs)).Should(Equal([]string{"B1"}))
- Ω(skippedTexts(specs)).Should(Equal([]string{"A1", "A2"}))
- Ω(pendingTexts(specs)).Should(Equal([]string{"B2"}))
- })
-
- It("should not report as having programmatic specs", func() {
- Ω(specs.HasProgrammaticFocus()).Should(BeFalse())
- })
- })
-
- Context("with both a focus and a skip regexp", func() {
- BeforeEach(func() {
- focusString = "1"
- skipString = "B"
- })
-
- It("should AND the two", func() {
- Ω(willRunTexts(specs)).Should(Equal([]string{"A1"}))
- Ω(skippedTexts(specs)).Should(Equal([]string{"A2", "B1", "B2"}))
- Ω(pendingTexts(specs)).Should(BeEmpty())
- })
-
- It("should not report as having programmatic specs", func() {
- Ω(specs.HasProgrammaticFocus()).Should(BeFalse())
- })
- })
- })
-
- Describe("With a focused spec within a pending context and a pending spec within a focused context", func() {
- BeforeEach(func() {
- pendingInFocused := New(
- leafnodes.NewItNode("PendingInFocused", func() {}, pendingFlag, codelocation.New(0), 0, nil, 0),
- []*containernode.ContainerNode{
- containernode.New("", focusedFlag, codelocation.New(0)),
- }, false)
-
- focusedInPending := New(
- leafnodes.NewItNode("FocusedInPending", func() {}, focusedFlag, codelocation.New(0), 0, nil, 0),
- []*containernode.ContainerNode{
- containernode.New("", pendingFlag, codelocation.New(0)),
- }, false)
-
- specs = NewSpecs([]*Spec{
- newSpec("A", noneFlag),
- newSpec("B", noneFlag),
- pendingInFocused,
- focusedInPending,
- })
- specs.ApplyFocus("", "", "")
- })
-
- It("should not have a programmatic focus and should run all tests", func() {
- Ω(willRunTexts(specs)).Should(Equal([]string{"A", "B"}))
- Ω(skippedTexts(specs)).Should(BeEmpty())
- Ω(pendingTexts(specs)).Should(ConsistOf(ContainSubstring("PendingInFocused"), ContainSubstring("FocusedInPending")))
- })
- })
-
- Describe("skipping measurements", func() {
- BeforeEach(func() {
- specs = NewSpecs([]*Spec{
- newSpec("A", noneFlag),
- newSpec("B", noneFlag),
- newSpec("C", pendingFlag),
- newMeasureSpec("measurementA", noneFlag),
- newMeasureSpec("measurementB", pendingFlag),
- })
- })
-
- It("should skip measurements", func() {
- Ω(willRunTexts(specs)).Should(Equal([]string{"A", "B", "measurementA"}))
- Ω(skippedTexts(specs)).Should(BeEmpty())
- Ω(pendingTexts(specs)).Should(Equal([]string{"C", "measurementB"}))
-
- specs.SkipMeasurements()
-
- Ω(willRunTexts(specs)).Should(Equal([]string{"A", "B"}))
- Ω(skippedTexts(specs)).Should(Equal([]string{"measurementA", "measurementB"}))
- Ω(pendingTexts(specs)).Should(Equal([]string{"C"}))
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer_test.go
deleted file mode 100644
index 65da9837c..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer_test.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package spec_iterator_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/internal/spec_iterator"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("ParallelizedIndexRange", func() {
- var startIndex, count int
-
- It("should return the correct index range for 4 tests on 2 nodes", func() {
- startIndex, count = ParallelizedIndexRange(4, 2, 1)
- Ω(startIndex).Should(Equal(0))
- Ω(count).Should(Equal(2))
-
- startIndex, count = ParallelizedIndexRange(4, 2, 2)
- Ω(startIndex).Should(Equal(2))
- Ω(count).Should(Equal(2))
- })
-
- It("should return the correct index range for 5 tests on 2 nodes", func() {
- startIndex, count = ParallelizedIndexRange(5, 2, 1)
- Ω(startIndex).Should(Equal(0))
- Ω(count).Should(Equal(3))
-
- startIndex, count = ParallelizedIndexRange(5, 2, 2)
- Ω(startIndex).Should(Equal(3))
- Ω(count).Should(Equal(2))
- })
-
- It("should return the correct index range for 5 tests on 3 nodes", func() {
- startIndex, count = ParallelizedIndexRange(5, 3, 1)
- Ω(startIndex).Should(Equal(0))
- Ω(count).Should(Equal(2))
-
- startIndex, count = ParallelizedIndexRange(5, 3, 2)
- Ω(startIndex).Should(Equal(2))
- Ω(count).Should(Equal(2))
-
- startIndex, count = ParallelizedIndexRange(5, 3, 3)
- Ω(startIndex).Should(Equal(4))
- Ω(count).Should(Equal(1))
- })
-
- It("should return the correct index range for 5 tests on 4 nodes", func() {
- startIndex, count = ParallelizedIndexRange(5, 4, 1)
- Ω(startIndex).Should(Equal(0))
- Ω(count).Should(Equal(2))
-
- startIndex, count = ParallelizedIndexRange(5, 4, 2)
- Ω(startIndex).Should(Equal(2))
- Ω(count).Should(Equal(1))
-
- startIndex, count = ParallelizedIndexRange(5, 4, 3)
- Ω(startIndex).Should(Equal(3))
- Ω(count).Should(Equal(1))
-
- startIndex, count = ParallelizedIndexRange(5, 4, 4)
- Ω(startIndex).Should(Equal(4))
- Ω(count).Should(Equal(1))
- })
-
- It("should return the correct index range for 5 tests on 5 nodes", func() {
- startIndex, count = ParallelizedIndexRange(5, 5, 1)
- Ω(startIndex).Should(Equal(0))
- Ω(count).Should(Equal(1))
-
- startIndex, count = ParallelizedIndexRange(5, 5, 2)
- Ω(startIndex).Should(Equal(1))
- Ω(count).Should(Equal(1))
-
- startIndex, count = ParallelizedIndexRange(5, 5, 3)
- Ω(startIndex).Should(Equal(2))
- Ω(count).Should(Equal(1))
-
- startIndex, count = ParallelizedIndexRange(5, 5, 4)
- Ω(startIndex).Should(Equal(3))
- Ω(count).Should(Equal(1))
-
- startIndex, count = ParallelizedIndexRange(5, 5, 5)
- Ω(startIndex).Should(Equal(4))
- Ω(count).Should(Equal(1))
- })
-
- It("should return the correct index range for 5 tests on 6 nodes", func() {
- startIndex, count = ParallelizedIndexRange(5, 6, 1)
- Ω(startIndex).Should(Equal(0))
- Ω(count).Should(Equal(1))
-
- startIndex, count = ParallelizedIndexRange(5, 6, 2)
- Ω(startIndex).Should(Equal(1))
- Ω(count).Should(Equal(1))
-
- startIndex, count = ParallelizedIndexRange(5, 6, 3)
- Ω(startIndex).Should(Equal(2))
- Ω(count).Should(Equal(1))
-
- startIndex, count = ParallelizedIndexRange(5, 6, 4)
- Ω(startIndex).Should(Equal(3))
- Ω(count).Should(Equal(1))
-
- startIndex, count = ParallelizedIndexRange(5, 6, 5)
- Ω(startIndex).Should(Equal(4))
- Ω(count).Should(Equal(1))
-
- startIndex, count = ParallelizedIndexRange(5, 6, 6)
- Ω(count).Should(Equal(0))
- })
-
- It("should return the correct index range for 5 tests on 7 nodes", func() {
- startIndex, count = ParallelizedIndexRange(5, 7, 6)
- Ω(count).Should(Equal(0))
-
- startIndex, count = ParallelizedIndexRange(5, 7, 7)
- Ω(count).Should(Equal(0))
- })
-
- It("should return the correct index range for 11 tests on 7 nodes", func() {
- startIndex, count = ParallelizedIndexRange(11, 7, 1)
- Ω(startIndex).Should(Equal(0))
- Ω(count).Should(Equal(2))
-
- startIndex, count = ParallelizedIndexRange(11, 7, 2)
- Ω(startIndex).Should(Equal(2))
- Ω(count).Should(Equal(2))
-
- startIndex, count = ParallelizedIndexRange(11, 7, 3)
- Ω(startIndex).Should(Equal(4))
- Ω(count).Should(Equal(2))
-
- startIndex, count = ParallelizedIndexRange(11, 7, 4)
- Ω(startIndex).Should(Equal(6))
- Ω(count).Should(Equal(2))
-
- startIndex, count = ParallelizedIndexRange(11, 7, 5)
- Ω(startIndex).Should(Equal(8))
- Ω(count).Should(Equal(1))
-
- startIndex, count = ParallelizedIndexRange(11, 7, 6)
- Ω(startIndex).Should(Equal(9))
- Ω(count).Should(Equal(1))
-
- startIndex, count = ParallelizedIndexRange(11, 7, 7)
- Ω(startIndex).Should(Equal(10))
- Ω(count).Should(Equal(1))
- })
-
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go
deleted file mode 100644
index c5a762fd5..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package spec_iterator_test
-
-import (
- "net/http"
-
- . "github.com/onsi/ginkgo/internal/spec_iterator"
- "github.com/onsi/gomega/ghttp"
-
- "github.com/onsi/ginkgo/internal/codelocation"
- "github.com/onsi/ginkgo/internal/containernode"
- "github.com/onsi/ginkgo/internal/leafnodes"
- "github.com/onsi/ginkgo/internal/spec"
- "github.com/onsi/ginkgo/types"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("ParallelSpecIterator", func() {
- var specs []*spec.Spec
- var iterator *ParallelIterator
- var server *ghttp.Server
-
- newSpec := func(text string, flag types.FlagType) *spec.Spec {
- subject := leafnodes.NewItNode(text, func() {}, flag, codelocation.New(0), 0, nil, 0)
- return spec.New(subject, []*containernode.ContainerNode{}, false)
- }
-
- BeforeEach(func() {
- specs = []*spec.Spec{
- newSpec("A", types.FlagTypePending),
- newSpec("B", types.FlagTypeNone),
- newSpec("C", types.FlagTypeNone),
- newSpec("D", types.FlagTypeNone),
- }
- specs[3].Skip()
-
- server = ghttp.NewServer()
-
- iterator = NewParallelIterator(specs, "http://"+server.Addr())
- })
-
- AfterEach(func() {
- server.Close()
- })
-
- It("should report the total number of specs", func() {
- Ω(iterator.NumberOfSpecsPriorToIteration()).Should(Equal(4))
- })
-
- It("should not report the number to be processed", func() {
- n, known := iterator.NumberOfSpecsToProcessIfKnown()
- Ω(n).Should(Equal(-1))
- Ω(known).Should(BeFalse())
- })
-
- It("should not report the number that will be run", func() {
- n, known := iterator.NumberOfSpecsThatWillBeRunIfKnown()
- Ω(n).Should(Equal(-1))
- Ω(known).Should(BeFalse())
- })
-
- Describe("iterating", func() {
- Describe("when the server returns well-formed responses", func() {
- BeforeEach(func() {
- server.AppendHandlers(
- ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{Index: 0}),
- ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{Index: 1}),
- ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{Index: 3}),
- ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{Index: 4}),
- )
- })
-
- It("should return the specs in question", func() {
- Ω(iterator.Next()).Should(Equal(specs[0]))
- Ω(iterator.Next()).Should(Equal(specs[1]))
- Ω(iterator.Next()).Should(Equal(specs[3]))
- spec, err := iterator.Next()
- Ω(spec).Should(BeNil())
- Ω(err).Should(MatchError(ErrClosed))
- })
- })
-
- Describe("when the server 404s", func() {
- BeforeEach(func() {
- server.AppendHandlers(
- ghttp.RespondWith(http.StatusNotFound, ""),
- )
- })
-
- It("should return an error", func() {
- spec, err := iterator.Next()
- Ω(spec).Should(BeNil())
- Ω(err).Should(MatchError("unexpected status code 404"))
- })
- })
-
- Describe("when the server returns gibberish", func() {
- BeforeEach(func() {
- server.AppendHandlers(
- ghttp.RespondWith(http.StatusOK, "ß"),
- )
- })
-
- It("should error", func() {
- spec, err := iterator.Next()
- Ω(spec).Should(BeNil())
- Ω(err).ShouldNot(BeNil())
- })
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator_test.go
deleted file mode 100644
index dde4a344e..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator_test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package spec_iterator_test
-
-import (
- . "github.com/onsi/ginkgo/internal/spec_iterator"
-
- "github.com/onsi/ginkgo/internal/codelocation"
- "github.com/onsi/ginkgo/internal/containernode"
- "github.com/onsi/ginkgo/internal/leafnodes"
- "github.com/onsi/ginkgo/internal/spec"
- "github.com/onsi/ginkgo/types"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("SerialSpecIterator", func() {
- var specs []*spec.Spec
- var iterator *SerialIterator
-
- newSpec := func(text string, flag types.FlagType) *spec.Spec {
- subject := leafnodes.NewItNode(text, func() {}, flag, codelocation.New(0), 0, nil, 0)
- return spec.New(subject, []*containernode.ContainerNode{}, false)
- }
-
- BeforeEach(func() {
- specs = []*spec.Spec{
- newSpec("A", types.FlagTypePending),
- newSpec("B", types.FlagTypeNone),
- newSpec("C", types.FlagTypeNone),
- newSpec("D", types.FlagTypeNone),
- }
- specs[3].Skip()
-
- iterator = NewSerialIterator(specs)
- })
-
- It("should report the total number of specs", func() {
- Ω(iterator.NumberOfSpecsPriorToIteration()).Should(Equal(4))
- })
-
- It("should report the number to be processed", func() {
- n, known := iterator.NumberOfSpecsToProcessIfKnown()
- Ω(n).Should(Equal(4))
- Ω(known).Should(BeTrue())
- })
-
- It("should report the number that will be run", func() {
- n, known := iterator.NumberOfSpecsThatWillBeRunIfKnown()
- Ω(n).Should(Equal(2))
- Ω(known).Should(BeTrue())
- })
-
- Describe("iterating", func() {
- It("should return the specs in order", func() {
- Ω(iterator.Next()).Should(Equal(specs[0]))
- Ω(iterator.Next()).Should(Equal(specs[1]))
- Ω(iterator.Next()).Should(Equal(specs[2]))
- Ω(iterator.Next()).Should(Equal(specs[3]))
- spec, err := iterator.Next()
- Ω(spec).Should(BeNil())
- Ω(err).Should(MatchError(ErrClosed))
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator_test.go
deleted file mode 100644
index c3786e03a..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator_test.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package spec_iterator_test
-
-import (
- . "github.com/onsi/ginkgo/internal/spec_iterator"
-
- "github.com/onsi/ginkgo/internal/codelocation"
- "github.com/onsi/ginkgo/internal/containernode"
- "github.com/onsi/ginkgo/internal/leafnodes"
- "github.com/onsi/ginkgo/internal/spec"
- "github.com/onsi/ginkgo/types"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("ShardedParallelSpecIterator", func() {
- var specs []*spec.Spec
- var iterator *ShardedParallelIterator
-
- newSpec := func(text string, flag types.FlagType) *spec.Spec {
- subject := leafnodes.NewItNode(text, func() {}, flag, codelocation.New(0), 0, nil, 0)
- return spec.New(subject, []*containernode.ContainerNode{}, false)
- }
-
- BeforeEach(func() {
- specs = []*spec.Spec{
- newSpec("A", types.FlagTypePending),
- newSpec("B", types.FlagTypeNone),
- newSpec("C", types.FlagTypeNone),
- newSpec("D", types.FlagTypeNone),
- }
- specs[3].Skip()
-
- iterator = NewShardedParallelIterator(specs, 2, 1)
- })
-
- It("should report the total number of specs", func() {
- Ω(iterator.NumberOfSpecsPriorToIteration()).Should(Equal(4))
- })
-
- It("should report the number to be processed", func() {
- n, known := iterator.NumberOfSpecsToProcessIfKnown()
- Ω(n).Should(Equal(2))
- Ω(known).Should(BeTrue())
- })
-
- It("should report the number that will be run", func() {
- n, known := iterator.NumberOfSpecsThatWillBeRunIfKnown()
- Ω(n).Should(Equal(1))
- Ω(known).Should(BeTrue())
- })
-
- Describe("iterating", func() {
- It("should return the specs in order", func() {
- Ω(iterator.Next()).Should(Equal(specs[0]))
- Ω(iterator.Next()).Should(Equal(specs[1]))
- spec, err := iterator.Next()
- Ω(spec).Should(BeNil())
- Ω(err).Should(MatchError(ErrClosed))
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator_suite_test.go
deleted file mode 100644
index 5c08a77e3..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package spec_iterator_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestSpecIterator(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "SpecIterator Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_suite_test.go
deleted file mode 100644
index c8388fb6f..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package specrunner_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestSpecRunner(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Spec Runner Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_test.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_test.go
deleted file mode 100644
index a41437922..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_test.go
+++ /dev/null
@@ -1,785 +0,0 @@
-package specrunner_test
-
-import (
- . "github.com/onsi/ginkgo"
- "github.com/onsi/ginkgo/internal/spec_iterator"
- . "github.com/onsi/ginkgo/internal/specrunner"
- "github.com/onsi/ginkgo/types"
- . "github.com/onsi/gomega"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/internal/codelocation"
- "github.com/onsi/ginkgo/internal/containernode"
- Failer "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/internal/leafnodes"
- "github.com/onsi/ginkgo/internal/spec"
- Writer "github.com/onsi/ginkgo/internal/writer"
- "github.com/onsi/ginkgo/reporters"
-)
-
-var noneFlag = types.FlagTypeNone
-var pendingFlag = types.FlagTypePending
-
-var _ = Describe("Spec Runner", func() {
- var (
- reporter1 *reporters.FakeReporter
- reporter2 *reporters.FakeReporter
- failer *Failer.Failer
- writer *Writer.FakeGinkgoWriter
-
- thingsThatRan []string
-
- runner *SpecRunner
- )
-
- newBefSuite := func(text string, fail bool) leafnodes.SuiteNode {
- return leafnodes.NewBeforeSuiteNode(func() {
- writer.AddEvent(text)
- thingsThatRan = append(thingsThatRan, text)
- if fail {
- failer.Fail(text, codelocation.New(0))
- }
- }, codelocation.New(0), 0, failer)
- }
-
- newAftSuite := func(text string, fail bool) leafnodes.SuiteNode {
- return leafnodes.NewAfterSuiteNode(func() {
- writer.AddEvent(text)
- thingsThatRan = append(thingsThatRan, text)
- if fail {
- failer.Fail(text, codelocation.New(0))
- }
- }, codelocation.New(0), 0, failer)
- }
-
- newSpec := func(text string, flag types.FlagType, fail bool) *spec.Spec {
- subject := leafnodes.NewItNode(text, func() {
- writer.AddEvent(text)
- thingsThatRan = append(thingsThatRan, text)
- if fail {
- failer.Fail(text, codelocation.New(0))
- }
- }, flag, codelocation.New(0), 0, failer, 0)
-
- return spec.New(subject, []*containernode.ContainerNode{}, false)
- }
-
- newFlakySpec := func(text string, flag types.FlagType, failures int) *spec.Spec {
- runs := 0
- subject := leafnodes.NewItNode(text, func() {
- writer.AddEvent(text)
- thingsThatRan = append(thingsThatRan, text)
- runs++
- if runs < failures {
- failer.Fail(text, codelocation.New(0))
- }
- }, flag, codelocation.New(0), 0, failer, 0)
-
- return spec.New(subject, []*containernode.ContainerNode{}, false)
- }
-
- newSpecWithBody := func(text string, body interface{}) *spec.Spec {
- subject := leafnodes.NewItNode(text, body, noneFlag, codelocation.New(0), 0, failer, 0)
-
- return spec.New(subject, []*containernode.ContainerNode{}, false)
- }
-
- newRunner := func(config config.GinkgoConfigType, beforeSuiteNode leafnodes.SuiteNode, afterSuiteNode leafnodes.SuiteNode, specs ...*spec.Spec) *SpecRunner {
- iterator := spec_iterator.NewSerialIterator(specs)
- return New("description", beforeSuiteNode, iterator, afterSuiteNode, []reporters.Reporter{reporter1, reporter2}, writer, config)
- }
-
- BeforeEach(func() {
- reporter1 = reporters.NewFakeReporter()
- reporter2 = reporters.NewFakeReporter()
- writer = Writer.NewFake()
- failer = Failer.New()
-
- thingsThatRan = []string{}
- })
-
- Describe("Running and Reporting", func() {
- var specA, pendingSpec, anotherPendingSpec, failedSpec, specB, skippedSpec *spec.Spec
- var willRunCalls, didCompleteCalls []string
- var conf config.GinkgoConfigType
-
- JustBeforeEach(func() {
- willRunCalls = []string{}
- didCompleteCalls = []string{}
- specA = newSpec("spec A", noneFlag, false)
- pendingSpec = newSpec("pending spec", pendingFlag, false)
- anotherPendingSpec = newSpec("another pending spec", pendingFlag, false)
- failedSpec = newSpec("failed spec", noneFlag, true)
- specB = newSpec("spec B", noneFlag, false)
- skippedSpec = newSpec("skipped spec", noneFlag, false)
- skippedSpec.Skip()
-
- reporter1.SpecWillRunStub = func(specSummary *types.SpecSummary) {
- willRunCalls = append(willRunCalls, "Reporter1")
- }
- reporter2.SpecWillRunStub = func(specSummary *types.SpecSummary) {
- willRunCalls = append(willRunCalls, "Reporter2")
- }
-
- reporter1.SpecDidCompleteStub = func(specSummary *types.SpecSummary) {
- didCompleteCalls = append(didCompleteCalls, "Reporter1")
- }
- reporter2.SpecDidCompleteStub = func(specSummary *types.SpecSummary) {
- didCompleteCalls = append(didCompleteCalls, "Reporter2")
- }
-
- runner = newRunner(conf, newBefSuite("BefSuite", false), newAftSuite("AftSuite", false), specA, pendingSpec, anotherPendingSpec, failedSpec, specB, skippedSpec)
- runner.Run()
- })
-
- BeforeEach(func() {
- conf = config.GinkgoConfigType{RandomSeed: 17}
- })
-
- It("should skip skipped/pending tests", func() {
- Ω(thingsThatRan).Should(Equal([]string{"BefSuite", "spec A", "failed spec", "spec B", "AftSuite"}))
- })
-
- It("should report to any attached reporters", func() {
- Ω(reporter1.Config).Should(Equal(reporter2.Config))
- Ω(reporter1.BeforeSuiteSummary).Should(Equal(reporter2.BeforeSuiteSummary))
- Ω(reporter1.BeginSummary).Should(Equal(reporter2.BeginSummary))
- Ω(reporter1.SpecWillRunSummaries).Should(Equal(reporter2.SpecWillRunSummaries))
- Ω(reporter1.SpecSummaries).Should(Equal(reporter2.SpecSummaries))
- Ω(reporter1.AfterSuiteSummary).Should(Equal(reporter2.AfterSuiteSummary))
- Ω(reporter1.EndSummary).Should(Equal(reporter2.EndSummary))
- })
-
- It("should report that a spec did end in reverse order", func() {
- Ω(willRunCalls[0:4]).Should(Equal([]string{"Reporter1", "Reporter2", "Reporter1", "Reporter2"}))
- Ω(didCompleteCalls[0:4]).Should(Equal([]string{"Reporter2", "Reporter1", "Reporter2", "Reporter1"}))
- })
-
- It("should report the passed in config", func() {
- Ω(reporter1.Config.RandomSeed).Should(BeNumerically("==", 17))
- })
-
- It("should report the beginning of the suite", func() {
- Ω(reporter1.BeginSummary.SuiteDescription).Should(Equal("description"))
- Ω(reporter1.BeginSummary.SuiteID).Should(MatchRegexp("[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}"))
- Ω(reporter1.BeginSummary.NumberOfSpecsBeforeParallelization).Should(Equal(6))
- Ω(reporter1.BeginSummary.NumberOfTotalSpecs).Should(Equal(6))
- Ω(reporter1.BeginSummary.NumberOfSpecsThatWillBeRun).Should(Equal(3))
- Ω(reporter1.BeginSummary.NumberOfPendingSpecs).Should(Equal(-1))
- Ω(reporter1.BeginSummary.NumberOfSkippedSpecs).Should(Equal(-1))
- })
-
- It("should report the end of the suite", func() {
- Ω(reporter1.EndSummary.SuiteDescription).Should(Equal("description"))
- Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse())
- Ω(reporter1.EndSummary.SuiteID).Should(MatchRegexp("[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}"))
- Ω(reporter1.EndSummary.NumberOfSpecsBeforeParallelization).Should(Equal(6))
- Ω(reporter1.EndSummary.NumberOfTotalSpecs).Should(Equal(6))
- Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(3))
- Ω(reporter1.EndSummary.NumberOfPendingSpecs).Should(Equal(2))
- Ω(reporter1.EndSummary.NumberOfSkippedSpecs).Should(Equal(1))
- Ω(reporter1.EndSummary.NumberOfPassedSpecs).Should(Equal(2))
- Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(1))
- })
-
- Context("when told to perform a dry run", func() {
- BeforeEach(func() {
- conf.DryRun = true
- })
-
- It("should report to the reporters", func() {
- Ω(reporter1.Config).Should(Equal(reporter2.Config))
- Ω(reporter1.BeforeSuiteSummary).Should(Equal(reporter2.BeforeSuiteSummary))
- Ω(reporter1.BeginSummary).Should(Equal(reporter2.BeginSummary))
- Ω(reporter1.SpecWillRunSummaries).Should(Equal(reporter2.SpecWillRunSummaries))
- Ω(reporter1.SpecSummaries).Should(Equal(reporter2.SpecSummaries))
- Ω(reporter1.AfterSuiteSummary).Should(Equal(reporter2.AfterSuiteSummary))
- Ω(reporter1.EndSummary).Should(Equal(reporter2.EndSummary))
- })
-
- It("should not actually run anything", func() {
- Ω(thingsThatRan).Should(BeEmpty())
- })
-
- It("report before and after suites as passed", func() {
- Ω(reporter1.BeforeSuiteSummary.State).Should(Equal(types.SpecStatePassed))
- Ω(reporter1.AfterSuiteSummary.State).Should(Equal(types.SpecStatePassed))
- })
-
- It("should report specs as passed", func() {
- summaries := reporter1.SpecSummaries
- Ω(summaries).Should(HaveLen(6))
- Ω(summaries[0].ComponentTexts).Should(ContainElement("spec A"))
- Ω(summaries[0].State).Should(Equal(types.SpecStatePassed))
- Ω(summaries[1].ComponentTexts).Should(ContainElement("pending spec"))
- Ω(summaries[1].State).Should(Equal(types.SpecStatePending))
- Ω(summaries[2].ComponentTexts).Should(ContainElement("another pending spec"))
- Ω(summaries[2].State).Should(Equal(types.SpecStatePending))
- Ω(summaries[3].ComponentTexts).Should(ContainElement("failed spec"))
- Ω(summaries[3].State).Should(Equal(types.SpecStatePassed))
- Ω(summaries[4].ComponentTexts).Should(ContainElement("spec B"))
- Ω(summaries[4].State).Should(Equal(types.SpecStatePassed))
- Ω(summaries[5].ComponentTexts).Should(ContainElement("skipped spec"))
- Ω(summaries[5].State).Should(Equal(types.SpecStateSkipped))
- })
-
- It("should report the end of the suite", func() {
- Ω(reporter1.EndSummary.SuiteDescription).Should(Equal("description"))
- Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeTrue())
- Ω(reporter1.EndSummary.SuiteID).Should(MatchRegexp("[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}"))
- Ω(reporter1.EndSummary.NumberOfSpecsBeforeParallelization).Should(Equal(6))
- Ω(reporter1.EndSummary.NumberOfTotalSpecs).Should(Equal(6))
- Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(3))
- Ω(reporter1.EndSummary.NumberOfPendingSpecs).Should(Equal(2))
- Ω(reporter1.EndSummary.NumberOfSkippedSpecs).Should(Equal(1))
- Ω(reporter1.EndSummary.NumberOfPassedSpecs).Should(Equal(0))
- Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(0))
- })
-
- It("should not report a slow test", func() {
- summaries := reporter1.SpecSummaries
- for _, s := range summaries {
- Expect(s.RunTime).To(BeZero())
- }
- })
- })
- })
-
- Describe("reporting on specs", func() {
- var proceed chan bool
- var ready chan bool
- var finished chan bool
- BeforeEach(func() {
- ready = make(chan bool)
- proceed = make(chan bool)
- finished = make(chan bool)
- skippedSpec := newSpec("SKIP", noneFlag, false)
- skippedSpec.Skip()
-
- runner = newRunner(
- config.GinkgoConfigType{},
- newBefSuite("BefSuite", false),
- newAftSuite("AftSuite", false),
- skippedSpec,
- newSpec("PENDING", pendingFlag, false),
- newSpecWithBody("RUN", func() {
- close(ready)
- <-proceed
- }),
- )
- go func() {
- runner.Run()
- close(finished)
- }()
- })
-
- It("should report about pending/skipped specs", func() {
- <-ready
- Ω(reporter1.SpecWillRunSummaries).Should(HaveLen(3))
-
- Ω(reporter1.SpecWillRunSummaries[0].ComponentTexts[0]).Should(Equal("SKIP"))
- Ω(reporter1.SpecWillRunSummaries[1].ComponentTexts[0]).Should(Equal("PENDING"))
- Ω(reporter1.SpecWillRunSummaries[2].ComponentTexts[0]).Should(Equal("RUN"))
-
- Ω(reporter1.SpecSummaries[0].ComponentTexts[0]).Should(Equal("SKIP"))
- Ω(reporter1.SpecSummaries[1].ComponentTexts[0]).Should(Equal("PENDING"))
- Ω(reporter1.SpecSummaries).Should(HaveLen(2))
-
- close(proceed)
- <-finished
-
- Ω(reporter1.SpecSummaries).Should(HaveLen(3))
- Ω(reporter1.SpecSummaries[2].ComponentTexts[0]).Should(Equal("RUN"))
- })
- })
-
- Describe("Running and Reporting when there's flakes", func() {
- var specA, pendingSpec, flakySpec, failedSpec, specB, skippedSpec *spec.Spec
- var willRunCalls, didCompleteCalls []string
- var conf config.GinkgoConfigType
- var failedSpecFlag = noneFlag
-
- JustBeforeEach(func() {
- willRunCalls = []string{}
- didCompleteCalls = []string{}
- specA = newSpec("spec A", noneFlag, false)
- pendingSpec = newSpec("pending spec", pendingFlag, false)
- flakySpec = newFlakySpec("flaky spec", noneFlag, 3)
- failedSpec = newSpec("failed spec", failedSpecFlag, true)
- specB = newSpec("spec B", noneFlag, false)
- skippedSpec = newSpec("skipped spec", noneFlag, false)
- skippedSpec.Skip()
-
- reporter1.SpecWillRunStub = func(specSummary *types.SpecSummary) {
- willRunCalls = append(willRunCalls, "Reporter1")
- }
- reporter2.SpecWillRunStub = func(specSummary *types.SpecSummary) {
- willRunCalls = append(willRunCalls, "Reporter2")
- }
-
- reporter1.SpecDidCompleteStub = func(specSummary *types.SpecSummary) {
- didCompleteCalls = append(didCompleteCalls, "Reporter1")
- }
- reporter2.SpecDidCompleteStub = func(specSummary *types.SpecSummary) {
- didCompleteCalls = append(didCompleteCalls, "Reporter2")
- }
-
- runner = newRunner(conf, newBefSuite("BefSuite", false), newAftSuite("AftSuite", false), specA, pendingSpec, flakySpec, failedSpec, specB, skippedSpec)
- runner.Run()
- })
-
- BeforeEach(func() {
- failedSpecFlag = noneFlag
- conf = config.GinkgoConfigType{
- RandomSeed: 17,
- FlakeAttempts: 5,
- }
- })
-
- It("should skip skipped/pending tests", func() {
- Ω(thingsThatRan).Should(Equal([]string{"BefSuite", "spec A", "flaky spec", "flaky spec", "flaky spec", "failed spec", "failed spec", "failed spec", "failed spec", "failed spec", "spec B", "AftSuite"}))
- })
-
- It("should report to any attached reporters", func() {
- Ω(reporter1.Config).Should(Equal(reporter2.Config))
- Ω(reporter1.BeforeSuiteSummary).Should(Equal(reporter2.BeforeSuiteSummary))
- Ω(reporter1.BeginSummary).Should(Equal(reporter2.BeginSummary))
- Ω(reporter1.SpecWillRunSummaries).Should(Equal(reporter2.SpecWillRunSummaries))
- Ω(reporter1.SpecSummaries).Should(Equal(reporter2.SpecSummaries))
- Ω(reporter1.AfterSuiteSummary).Should(Equal(reporter2.AfterSuiteSummary))
- Ω(reporter1.EndSummary).Should(Equal(reporter2.EndSummary))
- })
-
- It("should report that a spec did end in reverse order", func() {
- Ω(willRunCalls[0:4]).Should(Equal([]string{"Reporter1", "Reporter2", "Reporter1", "Reporter2"}))
- Ω(didCompleteCalls[0:4]).Should(Equal([]string{"Reporter2", "Reporter1", "Reporter2", "Reporter1"}))
- })
-
- It("should report the passed in config", func() {
- Ω(reporter1.Config.RandomSeed).Should(BeNumerically("==", 17))
- })
-
- It("should report the beginning of the suite", func() {
- Ω(reporter1.BeginSummary.SuiteDescription).Should(Equal("description"))
- Ω(reporter1.BeginSummary.SuiteID).Should(MatchRegexp("[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}"))
- Ω(reporter1.BeginSummary.NumberOfSpecsBeforeParallelization).Should(Equal(6))
- Ω(reporter1.BeginSummary.NumberOfTotalSpecs).Should(Equal(6))
- Ω(reporter1.BeginSummary.NumberOfSpecsThatWillBeRun).Should(Equal(4))
- Ω(reporter1.BeginSummary.NumberOfPendingSpecs).Should(Equal(-1))
- Ω(reporter1.BeginSummary.NumberOfSkippedSpecs).Should(Equal(-1))
- })
-
- It("should report the end of the suite", func() {
- Ω(reporter1.EndSummary.SuiteDescription).Should(Equal("description"))
- Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse())
- Ω(reporter1.EndSummary.SuiteID).Should(MatchRegexp("[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}"))
- Ω(reporter1.EndSummary.NumberOfSpecsBeforeParallelization).Should(Equal(6))
- Ω(reporter1.EndSummary.NumberOfTotalSpecs).Should(Equal(6))
- Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(4))
- Ω(reporter1.EndSummary.NumberOfPendingSpecs).Should(Equal(1))
- Ω(reporter1.EndSummary.NumberOfSkippedSpecs).Should(Equal(1))
- Ω(reporter1.EndSummary.NumberOfPassedSpecs).Should(Equal(3))
- Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(1))
- Ω(reporter1.EndSummary.NumberOfFlakedSpecs).Should(Equal(1))
- })
-
- Context("when nothing fails", func() {
- BeforeEach(func() {
- failedSpecFlag = pendingFlag
- })
-
- It("the suite should pass even with flakes", func() {
- Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeTrue())
- Ω(reporter1.EndSummary.NumberOfFlakedSpecs).Should(Equal(1))
- })
- })
-
- Context("when told to perform a dry run", func() {
- BeforeEach(func() {
- conf.DryRun = true
- })
-
- It("should report to the reporters", func() {
- Ω(reporter1.Config).Should(Equal(reporter2.Config))
- Ω(reporter1.BeforeSuiteSummary).Should(Equal(reporter2.BeforeSuiteSummary))
- Ω(reporter1.BeginSummary).Should(Equal(reporter2.BeginSummary))
- Ω(reporter1.SpecWillRunSummaries).Should(Equal(reporter2.SpecWillRunSummaries))
- Ω(reporter1.SpecSummaries).Should(Equal(reporter2.SpecSummaries))
- Ω(reporter1.AfterSuiteSummary).Should(Equal(reporter2.AfterSuiteSummary))
- Ω(reporter1.EndSummary).Should(Equal(reporter2.EndSummary))
- })
-
- It("should not actually run anything", func() {
- Ω(thingsThatRan).Should(BeEmpty())
- })
-
- It("report before and after suites as passed", func() {
- Ω(reporter1.BeforeSuiteSummary.State).Should(Equal(types.SpecStatePassed))
- Ω(reporter1.AfterSuiteSummary.State).Should(Equal(types.SpecStatePassed))
- })
-
- It("should report specs as passed", func() {
- summaries := reporter1.SpecSummaries
- Ω(summaries).Should(HaveLen(6))
- Ω(summaries[0].ComponentTexts).Should(ContainElement("spec A"))
- Ω(summaries[0].State).Should(Equal(types.SpecStatePassed))
- Ω(summaries[1].ComponentTexts).Should(ContainElement("pending spec"))
- Ω(summaries[1].State).Should(Equal(types.SpecStatePending))
- Ω(summaries[2].ComponentTexts).Should(ContainElement("flaky spec"))
- Ω(summaries[2].State).Should(Equal(types.SpecStatePassed))
- Ω(summaries[3].ComponentTexts).Should(ContainElement("failed spec"))
- Ω(summaries[3].State).Should(Equal(types.SpecStatePassed))
- Ω(summaries[4].ComponentTexts).Should(ContainElement("spec B"))
- Ω(summaries[4].State).Should(Equal(types.SpecStatePassed))
- Ω(summaries[5].ComponentTexts).Should(ContainElement("skipped spec"))
- Ω(summaries[5].State).Should(Equal(types.SpecStateSkipped))
- })
-
- It("should report the end of the suite", func() {
- Ω(reporter1.EndSummary.SuiteDescription).Should(Equal("description"))
- Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeTrue())
- Ω(reporter1.EndSummary.SuiteID).Should(MatchRegexp("[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}"))
- Ω(reporter1.EndSummary.NumberOfSpecsBeforeParallelization).Should(Equal(6))
- Ω(reporter1.EndSummary.NumberOfTotalSpecs).Should(Equal(6))
- Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(4))
- Ω(reporter1.EndSummary.NumberOfPendingSpecs).Should(Equal(1))
- Ω(reporter1.EndSummary.NumberOfSkippedSpecs).Should(Equal(1))
- Ω(reporter1.EndSummary.NumberOfPassedSpecs).Should(Equal(0))
- Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(0))
- })
- })
- })
-
- Describe("Running BeforeSuite & AfterSuite", func() {
- var success bool
- var befSuite leafnodes.SuiteNode
- var aftSuite leafnodes.SuiteNode
- Context("with a nil BeforeSuite & AfterSuite", func() {
- BeforeEach(func() {
- runner = newRunner(
- config.GinkgoConfigType{},
- nil,
- nil,
- newSpec("A", noneFlag, false),
- newSpec("B", noneFlag, false),
- )
- success = runner.Run()
- })
-
- It("should not report about the BeforeSuite", func() {
- Ω(reporter1.BeforeSuiteSummary).Should(BeNil())
- })
-
- It("should not report about the AfterSuite", func() {
- Ω(reporter1.AfterSuiteSummary).Should(BeNil())
- })
-
- It("should run the specs", func() {
- Ω(thingsThatRan).Should(Equal([]string{"A", "B"}))
- })
- })
-
- Context("when the BeforeSuite & AfterSuite pass", func() {
- BeforeEach(func() {
- befSuite = newBefSuite("BefSuite", false)
- aftSuite = newBefSuite("AftSuite", false)
- runner = newRunner(
- config.GinkgoConfigType{},
- befSuite,
- aftSuite,
- newSpec("A", noneFlag, false),
- newSpec("B", noneFlag, false),
- )
- success = runner.Run()
- })
-
- It("should run the BeforeSuite, the AfterSuite and the specs", func() {
- Ω(thingsThatRan).Should(Equal([]string{"BefSuite", "A", "B", "AftSuite"}))
- })
-
- It("should report about the BeforeSuite", func() {
- Ω(reporter1.BeforeSuiteSummary).Should(Equal(befSuite.Summary()))
- })
-
- It("should report about the AfterSuite", func() {
- Ω(reporter1.AfterSuiteSummary).Should(Equal(aftSuite.Summary()))
- })
-
- It("should report success", func() {
- Ω(success).Should(BeTrue())
- Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeTrue())
- Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(0))
- })
-
- It("should not dump the writer", func() {
- Ω(writer.EventStream).ShouldNot(ContainElement("DUMP"))
- })
- })
-
- Context("when the BeforeSuite fails", func() {
- BeforeEach(func() {
- befSuite = newBefSuite("BefSuite", true)
- aftSuite = newBefSuite("AftSuite", false)
-
- skipped := newSpec("Skipped", noneFlag, false)
- skipped.Skip()
-
- runner = newRunner(
- config.GinkgoConfigType{},
- befSuite,
- aftSuite,
- newSpec("A", noneFlag, false),
- newSpec("B", noneFlag, false),
- newSpec("Pending", pendingFlag, false),
- skipped,
- )
- success = runner.Run()
- })
-
- It("should not run the specs, but it should run the AfterSuite", func() {
- Ω(thingsThatRan).Should(Equal([]string{"BefSuite", "AftSuite"}))
- })
-
- It("should report about the BeforeSuite", func() {
- Ω(reporter1.BeforeSuiteSummary).Should(Equal(befSuite.Summary()))
- })
-
- It("should report about the AfterSuite", func() {
- Ω(reporter1.AfterSuiteSummary).Should(Equal(aftSuite.Summary()))
- })
-
- It("should report failure", func() {
- Ω(success).Should(BeFalse())
- Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse())
- Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(2))
- Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(2))
- })
-
- It("should dump the writer", func() {
- Ω(writer.EventStream).Should(ContainElement("DUMP"))
- })
- })
-
- Context("when some other test fails", func() {
- BeforeEach(func() {
- aftSuite = newBefSuite("AftSuite", false)
-
- runner = newRunner(
- config.GinkgoConfigType{},
- nil,
- aftSuite,
- newSpec("A", noneFlag, true),
- )
- success = runner.Run()
- })
-
- It("should still run the AfterSuite", func() {
- Ω(thingsThatRan).Should(Equal([]string{"A", "AftSuite"}))
- })
-
- It("should report about the AfterSuite", func() {
- Ω(reporter1.AfterSuiteSummary).Should(Equal(aftSuite.Summary()))
- })
-
- It("should report failure", func() {
- Ω(success).Should(BeFalse())
- Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse())
- Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(1))
- Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(1))
- })
- })
-
- Context("when the AfterSuite fails", func() {
- BeforeEach(func() {
- befSuite = newBefSuite("BefSuite", false)
- aftSuite = newBefSuite("AftSuite", true)
- runner = newRunner(
- config.GinkgoConfigType{},
- befSuite,
- aftSuite,
- newSpec("A", noneFlag, false),
- newSpec("B", noneFlag, false),
- )
- success = runner.Run()
- })
-
- It("should run everything", func() {
- Ω(thingsThatRan).Should(Equal([]string{"BefSuite", "A", "B", "AftSuite"}))
- })
-
- It("should report about the BeforeSuite", func() {
- Ω(reporter1.BeforeSuiteSummary).Should(Equal(befSuite.Summary()))
- })
-
- It("should report about the AfterSuite", func() {
- Ω(reporter1.AfterSuiteSummary).Should(Equal(aftSuite.Summary()))
- })
-
- It("should report failure", func() {
- Ω(success).Should(BeFalse())
- Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse())
- Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(0))
- })
-
- It("should dump the writer", func() {
- Ω(writer.EventStream).Should(ContainElement("DUMP"))
- })
- })
- })
-
- Describe("When instructed to fail fast", func() {
- BeforeEach(func() {
- conf := config.GinkgoConfigType{
- FailFast: true,
- }
- runner = newRunner(conf, nil, newAftSuite("after-suite", false), newSpec("passing", noneFlag, false), newSpec("failing", noneFlag, true), newSpec("dont-see", noneFlag, true), newSpec("dont-see", noneFlag, true))
- })
-
- It("should return false, report failure, and not run anything past the failing test", func() {
- Ω(runner.Run()).Should(BeFalse())
- Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse())
- Ω(thingsThatRan).Should(Equal([]string{"passing", "failing", "after-suite"}))
- })
-
- It("should announce the subsequent specs as skipped", func() {
- runner.Run()
- Ω(reporter1.SpecSummaries).Should(HaveLen(4))
- Ω(reporter1.SpecSummaries[2].State).Should(Equal(types.SpecStateSkipped))
- Ω(reporter1.SpecSummaries[3].State).Should(Equal(types.SpecStateSkipped))
- })
-
- It("should mark all subsequent specs as skipped", func() {
- runner.Run()
- Ω(reporter1.EndSummary.NumberOfSkippedSpecs).Should(Equal(2))
- })
- })
-
- Describe("Marking failure and success", func() {
- Context("when all tests pass", func() {
- BeforeEach(func() {
- runner = newRunner(config.GinkgoConfigType{}, nil, nil, newSpec("passing", noneFlag, false), newSpec("pending", pendingFlag, false))
- })
-
- It("should return true and report success", func() {
- Ω(runner.Run()).Should(BeTrue())
- Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeTrue())
- })
- })
-
- Context("when a test fails", func() {
- BeforeEach(func() {
- runner = newRunner(config.GinkgoConfigType{}, nil, nil, newSpec("failing", noneFlag, true), newSpec("pending", pendingFlag, false))
- })
-
- It("should return false and report failure", func() {
- Ω(runner.Run()).Should(BeFalse())
- Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse())
- })
- })
-
- Context("when there is a pending test, but pendings count as failures", func() {
- BeforeEach(func() {
- runner = newRunner(config.GinkgoConfigType{FailOnPending: true}, nil, nil, newSpec("passing", noneFlag, false), newSpec("pending", pendingFlag, false))
- })
-
- It("should return false and report failure", func() {
- Ω(runner.Run()).Should(BeFalse())
- Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse())
- })
- })
- })
-
- Describe("Managing the writer", func() {
- BeforeEach(func() {
- runner = newRunner(
- config.GinkgoConfigType{},
- nil,
- nil,
- newSpec("A", noneFlag, false),
- newSpec("B", noneFlag, true),
- newSpec("C", noneFlag, false),
- )
- reporter1.SpecWillRunStub = func(specSummary *types.SpecSummary) {
- writer.AddEvent("R1.WillRun")
- }
- reporter2.SpecWillRunStub = func(specSummary *types.SpecSummary) {
- writer.AddEvent("R2.WillRun")
- }
- reporter1.SpecDidCompleteStub = func(specSummary *types.SpecSummary) {
- writer.AddEvent("R1.DidComplete")
- }
- reporter2.SpecDidCompleteStub = func(specSummary *types.SpecSummary) {
- writer.AddEvent("R2.DidComplete")
- }
- runner.Run()
- })
-
- It("should truncate between tests, but only dump if a test fails", func() {
- Ω(writer.EventStream).Should(Equal([]string{
- "TRUNCATE",
- "R1.WillRun",
- "R2.WillRun",
- "A",
- "R2.DidComplete",
- "R1.DidComplete",
- "TRUNCATE",
- "R1.WillRun",
- "R2.WillRun",
- "B",
- "BYTES",
- "R2.DidComplete",
- "DUMP",
- "R1.DidComplete",
- "TRUNCATE",
- "R1.WillRun",
- "R2.WillRun",
- "C",
- "R2.DidComplete",
- "R1.DidComplete",
- }))
- })
- })
-
- Describe("CurrentSpecSummary", func() {
- It("should return the spec summary for the currently running spec", func() {
- var summary *types.SpecSummary
- runner = newRunner(
- config.GinkgoConfigType{},
- nil,
- nil,
- newSpec("A", noneFlag, false),
- newSpecWithBody("B", func() {
- var ok bool
- summary, ok = runner.CurrentSpecSummary()
- Ω(ok).Should(BeTrue())
- }),
- newSpec("C", noneFlag, false),
- )
- runner.Run()
-
- Ω(summary.ComponentTexts).Should(Equal([]string{"B"}))
-
- summary, ok := runner.CurrentSpecSummary()
- Ω(summary).Should(BeNil())
- Ω(ok).Should(BeFalse())
- })
- })
-
- Describe("generating a suite id", func() {
- It("should generate an id randomly", func() {
- runnerA := newRunner(config.GinkgoConfigType{}, nil, nil)
- runnerA.Run()
- IDA := reporter1.BeginSummary.SuiteID
-
- runnerB := newRunner(config.GinkgoConfigType{}, nil, nil)
- runnerB.Run()
- IDB := reporter1.BeginSummary.SuiteID
-
- IDRegexp := "[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}"
- Ω(IDA).Should(MatchRegexp(IDRegexp))
- Ω(IDB).Should(MatchRegexp(IDRegexp))
-
- Ω(IDA).ShouldNot(Equal(IDB))
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite_suite_test.go
deleted file mode 100644
index 06fe1d12a..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/suite/suite_suite_test.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package suite_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func Test(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Suite")
-}
-
-var numBeforeSuiteRuns = 0
-var numAfterSuiteRuns = 0
-
-var _ = BeforeSuite(func() {
- numBeforeSuiteRuns++
-})
-
-var _ = AfterSuite(func() {
- numAfterSuiteRuns++
- Ω(numBeforeSuiteRuns).Should(Equal(1))
- Ω(numAfterSuiteRuns).Should(Equal(1))
-})
-
-//Fakes
-type fakeTestingT struct {
- didFail bool
-}
-
-func (fakeT *fakeTestingT) Fail() {
- fakeT.didFail = true
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite_test.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite_test.go
deleted file mode 100644
index fd2d11dc3..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/suite/suite_test.go
+++ /dev/null
@@ -1,385 +0,0 @@
-package suite_test
-
-import (
- "bytes"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/internal/suite"
- . "github.com/onsi/gomega"
-
- "math/rand"
- "time"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/internal/codelocation"
- Failer "github.com/onsi/ginkgo/internal/failer"
- Writer "github.com/onsi/ginkgo/internal/writer"
- "github.com/onsi/ginkgo/reporters"
- "github.com/onsi/ginkgo/types"
-)
-
-var _ = Describe("Suite", func() {
- var (
- specSuite *Suite
- fakeT *fakeTestingT
- fakeR *reporters.FakeReporter
- writer *Writer.FakeGinkgoWriter
- failer *Failer.Failer
- )
-
- BeforeEach(func() {
- writer = Writer.NewFake()
- fakeT = &fakeTestingT{}
- fakeR = reporters.NewFakeReporter()
- failer = Failer.New()
- specSuite = New(failer)
- })
-
- Describe("running a suite", func() {
- var (
- runOrder []string
- randomizeAllSpecs bool
- randomSeed int64
- focusString string
- parallelNode int
- parallelTotal int
- runResult bool
- hasProgrammaticFocus bool
- )
-
- var f = func(runText string) func() {
- return func() {
- runOrder = append(runOrder, runText)
- }
- }
-
- BeforeEach(func() {
- randomizeAllSpecs = false
- randomSeed = 11
- parallelNode = 1
- parallelTotal = 1
- focusString = ""
-
- runOrder = make([]string, 0)
- specSuite.SetBeforeSuiteNode(f("BeforeSuite"), codelocation.New(0), 0)
- specSuite.PushBeforeEachNode(f("top BE"), codelocation.New(0), 0)
- specSuite.PushJustBeforeEachNode(f("top JBE"), codelocation.New(0), 0)
- specSuite.PushAfterEachNode(f("top AE"), codelocation.New(0), 0)
-
- specSuite.PushContainerNode("container", func() {
- specSuite.PushBeforeEachNode(f("BE"), codelocation.New(0), 0)
- specSuite.PushJustBeforeEachNode(f("JBE"), codelocation.New(0), 0)
- specSuite.PushAfterEachNode(f("AE"), codelocation.New(0), 0)
- specSuite.PushItNode("it", f("IT"), types.FlagTypeNone, codelocation.New(0), 0)
-
- specSuite.PushContainerNode("inner container", func() {
- specSuite.PushItNode("inner it", f("inner IT"), types.FlagTypeNone, codelocation.New(0), 0)
- }, types.FlagTypeNone, codelocation.New(0))
- }, types.FlagTypeNone, codelocation.New(0))
-
- specSuite.PushContainerNode("container 2", func() {
- specSuite.PushBeforeEachNode(f("BE 2"), codelocation.New(0), 0)
- specSuite.PushItNode("it 2", f("IT 2"), types.FlagTypeNone, codelocation.New(0), 0)
- }, types.FlagTypeNone, codelocation.New(0))
-
- specSuite.PushItNode("top level it", f("top IT"), types.FlagTypeNone, codelocation.New(0), 0)
-
- specSuite.SetAfterSuiteNode(f("AfterSuite"), codelocation.New(0), 0)
- })
-
- JustBeforeEach(func() {
- runResult, hasProgrammaticFocus = specSuite.Run(fakeT, "suite description", []reporters.Reporter{fakeR}, writer, config.GinkgoConfigType{
- RandomSeed: randomSeed,
- RandomizeAllSpecs: randomizeAllSpecs,
- FocusString: focusString,
- ParallelNode: parallelNode,
- ParallelTotal: parallelTotal,
- })
- })
-
- It("provides the config and suite description to the reporter", func() {
- Ω(fakeR.Config.RandomSeed).Should(Equal(int64(randomSeed)))
- Ω(fakeR.Config.RandomizeAllSpecs).Should(Equal(randomizeAllSpecs))
- Ω(fakeR.BeginSummary.SuiteDescription).Should(Equal("suite description"))
- })
-
- It("reports that the BeforeSuite node ran", func() {
- Ω(fakeR.BeforeSuiteSummary).ShouldNot(BeNil())
- })
-
- It("reports that the AfterSuite node ran", func() {
- Ω(fakeR.AfterSuiteSummary).ShouldNot(BeNil())
- })
-
- It("provides information about the current test", func() {
- description := CurrentGinkgoTestDescription()
- Ω(description.ComponentTexts).Should(Equal([]string{"Suite", "running a suite", "provides information about the current test"}))
- Ω(description.FullTestText).Should(Equal("Suite running a suite provides information about the current test"))
- Ω(description.TestText).Should(Equal("provides information about the current test"))
- Ω(description.IsMeasurement).Should(BeFalse())
- Ω(description.FileName).Should(ContainSubstring("suite_test.go"))
- Ω(description.LineNumber).Should(BeNumerically(">", 50))
- Ω(description.LineNumber).Should(BeNumerically("<", 150))
- Ω(description.Failed).Should(BeFalse())
- Ω(description.Duration).Should(BeNumerically(">", 0))
- })
-
- Measure("should run measurements", func(b Benchmarker) {
- r := rand.New(rand.NewSource(time.Now().UnixNano()))
-
- runtime := b.Time("sleeping", func() {
- sleepTime := time.Duration(r.Float64() * 0.01 * float64(time.Second))
- time.Sleep(sleepTime)
- })
- Ω(runtime.Seconds()).Should(BeNumerically("<=", 1))
- Ω(runtime.Seconds()).Should(BeNumerically(">=", 0))
-
- randomValue := r.Float64() * 10.0
- b.RecordValue("random value", randomValue)
- Ω(randomValue).Should(BeNumerically("<=", 10.0))
- Ω(randomValue).Should(BeNumerically(">=", 0.0))
-
- b.RecordValueWithPrecision("specific value", 123.4567, "ms", 2)
- b.RecordValueWithPrecision("specific value", 234.5678, "ms", 2)
- }, 10)
-
- It("creates a node hierarchy, converts it to a spec collection, and runs it", func() {
- Ω(runOrder).Should(Equal([]string{
- "BeforeSuite",
- "top BE", "BE", "top JBE", "JBE", "IT", "AE", "top AE",
- "top BE", "BE", "top JBE", "JBE", "inner IT", "AE", "top AE",
- "top BE", "BE 2", "top JBE", "IT 2", "top AE",
- "top BE", "top JBE", "top IT", "top AE",
- "AfterSuite",
- }))
- })
- Context("when in an AfterEach block", func() {
- AfterEach(func() {
- description := CurrentGinkgoTestDescription()
- Ω(description.IsMeasurement).Should(BeFalse())
- Ω(description.FileName).Should(ContainSubstring("suite_test.go"))
- Ω(description.Failed).Should(BeFalse())
- Ω(description.Duration).Should(BeNumerically(">", 0))
- })
-
- It("still provides information about the current test", func() {
- Ω(true).To(BeTrue())
- })
- })
-
- Context("when told to randomize all specs", func() {
- BeforeEach(func() {
- randomizeAllSpecs = true
- })
-
- It("does", func() {
- Ω(runOrder).Should(Equal([]string{
- "BeforeSuite",
- "top BE", "top JBE", "top IT", "top AE",
- "top BE", "BE", "top JBE", "JBE", "inner IT", "AE", "top AE",
- "top BE", "BE", "top JBE", "JBE", "IT", "AE", "top AE",
- "top BE", "BE 2", "top JBE", "IT 2", "top AE",
- "AfterSuite",
- }))
- })
- })
-
- Context("when provided with a filter", func() {
- BeforeEach(func() {
- focusString = `inner|\d`
- })
-
- It("converts the filter to a regular expression and uses it to filter the running specs", func() {
- Ω(runOrder).Should(Equal([]string{
- "BeforeSuite",
- "top BE", "BE", "top JBE", "JBE", "inner IT", "AE", "top AE",
- "top BE", "BE 2", "top JBE", "IT 2", "top AE",
- "AfterSuite",
- }))
- })
-
- It("should not report a programmatic focus", func() {
- Ω(hasProgrammaticFocus).Should(BeFalse())
- })
- })
-
- Context("with a programatically focused spec", func() {
- BeforeEach(func() {
- specSuite.PushItNode("focused it", f("focused it"), types.FlagTypeFocused, codelocation.New(0), 0)
-
- specSuite.PushContainerNode("focused container", func() {
- specSuite.PushItNode("inner focused it", f("inner focused it"), types.FlagTypeFocused, codelocation.New(0), 0)
- specSuite.PushItNode("inner unfocused it", f("inner unfocused it"), types.FlagTypeNone, codelocation.New(0), 0)
- }, types.FlagTypeFocused, codelocation.New(0))
-
- })
-
- It("should only run the focused test, applying backpropagation to favor most deeply focused leaf nodes", func() {
- Ω(runOrder).Should(Equal([]string{
- "BeforeSuite",
- "top BE", "top JBE", "focused it", "top AE",
- "top BE", "top JBE", "inner focused it", "top AE",
- "AfterSuite",
- }))
- })
-
- It("should report a programmatic focus", func() {
- Ω(hasProgrammaticFocus).Should(BeTrue())
- })
- })
-
- Context("when the specs pass", func() {
- It("doesn't report a failure", func() {
- Ω(fakeT.didFail).Should(BeFalse())
- })
-
- It("should return true", func() {
- Ω(runResult).Should(BeTrue())
- })
- })
-
- Context("when a spec fails", func() {
- var location types.CodeLocation
- BeforeEach(func() {
- specSuite.PushItNode("top level it", func() {
- location = codelocation.New(0)
- failer.Fail("oops!", location)
- }, types.FlagTypeNone, codelocation.New(0), 0)
- })
-
- It("should return false", func() {
- Ω(runResult).Should(BeFalse())
- })
-
- It("reports a failure", func() {
- Ω(fakeT.didFail).Should(BeTrue())
- })
-
- It("generates the correct failure data", func() {
- Ω(fakeR.SpecSummaries[0].Failure.Message).Should(Equal("oops!"))
- Ω(fakeR.SpecSummaries[0].Failure.Location).Should(Equal(location))
- })
- })
-
- Context("when runnable nodes are nested within other runnable nodes", func() {
- Context("when an It is nested", func() {
- BeforeEach(func() {
- specSuite.PushItNode("top level it", func() {
- specSuite.PushItNode("nested it", f("oops"), types.FlagTypeNone, codelocation.New(0), 0)
- }, types.FlagTypeNone, codelocation.New(0), 0)
- })
-
- It("should fail", func() {
- Ω(fakeT.didFail).Should(BeTrue())
- })
- })
-
- Context("when a Measure is nested", func() {
- BeforeEach(func() {
- specSuite.PushItNode("top level it", func() {
- specSuite.PushMeasureNode("nested measure", func(Benchmarker) {}, types.FlagTypeNone, codelocation.New(0), 10)
- }, types.FlagTypeNone, codelocation.New(0), 0)
- })
-
- It("should fail", func() {
- Ω(fakeT.didFail).Should(BeTrue())
- })
- })
-
- Context("when a BeforeEach is nested", func() {
- BeforeEach(func() {
- specSuite.PushItNode("top level it", func() {
- specSuite.PushBeforeEachNode(f("nested bef"), codelocation.New(0), 0)
- }, types.FlagTypeNone, codelocation.New(0), 0)
- })
-
- It("should fail", func() {
- Ω(fakeT.didFail).Should(BeTrue())
- })
- })
-
- Context("when a JustBeforeEach is nested", func() {
- BeforeEach(func() {
- specSuite.PushItNode("top level it", func() {
- specSuite.PushJustBeforeEachNode(f("nested jbef"), codelocation.New(0), 0)
- }, types.FlagTypeNone, codelocation.New(0), 0)
- })
-
- It("should fail", func() {
- Ω(fakeT.didFail).Should(BeTrue())
- })
- })
-
- Context("when a AfterEach is nested", func() {
- BeforeEach(func() {
- specSuite.PushItNode("top level it", func() {
- specSuite.PushAfterEachNode(f("nested aft"), codelocation.New(0), 0)
- }, types.FlagTypeNone, codelocation.New(0), 0)
- })
-
- It("should fail", func() {
- Ω(fakeT.didFail).Should(BeTrue())
- })
- })
- })
- })
-
- Describe("BeforeSuite", func() {
- Context("when setting BeforeSuite more than once", func() {
- It("should panic", func() {
- specSuite.SetBeforeSuiteNode(func() {}, codelocation.New(0), 0)
-
- Ω(func() {
- specSuite.SetBeforeSuiteNode(func() {}, codelocation.New(0), 0)
- }).Should(Panic())
-
- })
- })
- })
-
- Describe("AfterSuite", func() {
- Context("when setting AfterSuite more than once", func() {
- It("should panic", func() {
- specSuite.SetAfterSuiteNode(func() {}, codelocation.New(0), 0)
-
- Ω(func() {
- specSuite.SetAfterSuiteNode(func() {}, codelocation.New(0), 0)
- }).Should(Panic())
- })
- })
- })
-
- Describe("By", func() {
- It("writes to the GinkgoWriter", func() {
- originalGinkgoWriter := GinkgoWriter
- buffer := &bytes.Buffer{}
-
- GinkgoWriter = buffer
- By("Saying Hello GinkgoWriter")
- GinkgoWriter = originalGinkgoWriter
-
- Ω(buffer.String()).Should(ContainSubstring("STEP"))
- Ω(buffer.String()).Should(ContainSubstring(": Saying Hello GinkgoWriter\n"))
- })
-
- It("calls the passed-in callback if present", func() {
- a := 0
- By("calling the callback", func() {
- a = 1
- })
- Ω(a).Should(Equal(1))
- })
-
- It("panics if there is more than one callback", func() {
- Ω(func() {
- By("registering more than one callback", func() {}, func() {})
- }).Should(Panic())
- })
- })
-
- Describe("GinkgoRandomSeed", func() {
- It("returns the current config's random seed", func() {
- Ω(GinkgoRandomSeed()).Should(Equal(config.GinkgoConfig.RandomSeed))
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/writer_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/writer/writer_suite_test.go
deleted file mode 100644
index e20657791..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/writer/writer_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package writer_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestWriter(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Writer Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/writer_test.go b/vendor/github.com/onsi/ginkgo/internal/writer/writer_test.go
deleted file mode 100644
index 3e1d17c6d..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/writer/writer_test.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package writer_test
-
-import (
- "github.com/onsi/gomega/gbytes"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/ginkgo/internal/writer"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("Writer", func() {
- var writer *Writer
- var out *gbytes.Buffer
-
- BeforeEach(func() {
- out = gbytes.NewBuffer()
- writer = New(out)
- })
-
- It("should stream directly to the outbuffer by default", func() {
- writer.Write([]byte("foo"))
- Ω(out).Should(gbytes.Say("foo"))
- })
-
- It("should not emit the header when asked to DumpOutWitHeader", func() {
- writer.Write([]byte("foo"))
- writer.DumpOutWithHeader("my header")
- Ω(out).ShouldNot(gbytes.Say("my header"))
- Ω(out).Should(gbytes.Say("foo"))
- })
-
- Context("when told not to stream", func() {
- BeforeEach(func() {
- writer.SetStream(false)
- })
-
- It("should only write to the buffer when told to DumpOut", func() {
- writer.Write([]byte("foo"))
- Ω(out).ShouldNot(gbytes.Say("foo"))
- writer.DumpOut()
- Ω(out).Should(gbytes.Say("foo"))
- })
-
- It("should truncate the internal buffer when told to truncate", func() {
- writer.Write([]byte("foo"))
- writer.Truncate()
- writer.DumpOut()
- Ω(out).ShouldNot(gbytes.Say("foo"))
-
- writer.Write([]byte("bar"))
- writer.DumpOut()
- Ω(out).Should(gbytes.Say("bar"))
- })
-
- Describe("emitting a header", func() {
- Context("when the buffer has content", func() {
- It("should emit the header followed by the content", func() {
- writer.Write([]byte("foo"))
- writer.DumpOutWithHeader("my header")
-
- Ω(out).Should(gbytes.Say("my header"))
- Ω(out).Should(gbytes.Say("foo"))
- })
- })
-
- Context("when the buffer has no content", func() {
- It("should not emit the header", func() {
- writer.DumpOutWithHeader("my header")
-
- Ω(out).ShouldNot(gbytes.Say("my header"))
- })
- })
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/reporters/default_reporter_test.go b/vendor/github.com/onsi/ginkgo/reporters/default_reporter_test.go
deleted file mode 100644
index 2dcf276d3..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/default_reporter_test.go
+++ /dev/null
@@ -1,433 +0,0 @@
-package reporters_test
-
-import (
- "time"
-
- . "github.com/onsi/ginkgo"
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/reporters"
- st "github.com/onsi/ginkgo/reporters/stenographer"
- "github.com/onsi/ginkgo/types"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("DefaultReporter", func() {
- var (
- reporter *reporters.DefaultReporter
- reporterConfig config.DefaultReporterConfigType
- stenographer *st.FakeStenographer
-
- ginkgoConfig config.GinkgoConfigType
- suite *types.SuiteSummary
- spec *types.SpecSummary
- )
-
- BeforeEach(func() {
- stenographer = st.NewFakeStenographer()
- reporterConfig = config.DefaultReporterConfigType{
- NoColor: false,
- SlowSpecThreshold: 0.1,
- NoisyPendings: false,
- NoisySkippings: false,
- Verbose: true,
- FullTrace: true,
- }
-
- reporter = reporters.NewDefaultReporter(reporterConfig, stenographer)
- })
-
- call := func(method string, args ...interface{}) st.FakeStenographerCall {
- return st.NewFakeStenographerCall(method, args...)
- }
-
- Describe("SpecSuiteWillBegin", func() {
- BeforeEach(func() {
- suite = &types.SuiteSummary{
- SuiteDescription: "A Sweet Suite",
- NumberOfTotalSpecs: 10,
- NumberOfSpecsThatWillBeRun: 8,
- }
-
- ginkgoConfig = config.GinkgoConfigType{
- RandomSeed: 1138,
- RandomizeAllSpecs: true,
- }
- })
-
- Context("when a serial (non-parallel) suite begins", func() {
- BeforeEach(func() {
- ginkgoConfig.ParallelTotal = 1
-
- reporter.SpecSuiteWillBegin(ginkgoConfig, suite)
- })
-
- It("should announce the suite, then announce the number of specs", func() {
- Ω(stenographer.Calls()).Should(HaveLen(2))
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuite", "A Sweet Suite", ginkgoConfig.RandomSeed, true, false)))
- Ω(stenographer.Calls()[1]).Should(Equal(call("AnnounceNumberOfSpecs", 8, 10, false)))
- })
- })
-
- Context("when a parallel suite begins", func() {
- BeforeEach(func() {
- ginkgoConfig.ParallelTotal = 2
- ginkgoConfig.ParallelNode = 1
- suite.NumberOfSpecsBeforeParallelization = 20
-
- reporter.SpecSuiteWillBegin(ginkgoConfig, suite)
- })
-
- It("should announce the suite, announce that it's a parallel run, then announce the number of specs", func() {
- Ω(stenographer.Calls()).Should(HaveLen(2))
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuite", "A Sweet Suite", ginkgoConfig.RandomSeed, true, false)))
- Ω(stenographer.Calls()[1]).Should(Equal(call("AnnounceParallelRun", 1, 2, false)))
- })
- })
- })
-
- Describe("BeforeSuiteDidRun", func() {
- Context("when the BeforeSuite passes", func() {
- It("should announce nothing", func() {
- reporter.BeforeSuiteDidRun(&types.SetupSummary{
- State: types.SpecStatePassed,
- })
-
- Ω(stenographer.Calls()).Should(BeEmpty())
- })
- })
-
- Context("when the BeforeSuite fails", func() {
- It("should announce the failure", func() {
- summary := &types.SetupSummary{
- State: types.SpecStateFailed,
- }
- reporter.BeforeSuiteDidRun(summary)
-
- Ω(stenographer.Calls()).Should(HaveLen(1))
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceBeforeSuiteFailure", summary, false, true)))
- })
- })
- })
-
- Describe("AfterSuiteDidRun", func() {
- Context("when the AfterSuite passes", func() {
- It("should announce nothing", func() {
- reporter.AfterSuiteDidRun(&types.SetupSummary{
- State: types.SpecStatePassed,
- })
-
- Ω(stenographer.Calls()).Should(BeEmpty())
- })
- })
-
- Context("when the AfterSuite fails", func() {
- It("should announce the failure", func() {
- summary := &types.SetupSummary{
- State: types.SpecStateFailed,
- }
- reporter.AfterSuiteDidRun(summary)
-
- Ω(stenographer.Calls()).Should(HaveLen(1))
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceAfterSuiteFailure", summary, false, true)))
- })
- })
- })
-
- Describe("SpecWillRun", func() {
- Context("When running in verbose mode", func() {
- Context("and the spec will run", func() {
- BeforeEach(func() {
- spec = &types.SpecSummary{}
- reporter.SpecWillRun(spec)
- })
-
- It("should announce that the spec will run", func() {
- Ω(stenographer.Calls()).Should(HaveLen(1))
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecWillRun", spec)))
- })
- })
-
- Context("and the spec will not run", func() {
- Context("because it is pending", func() {
- BeforeEach(func() {
- spec = &types.SpecSummary{
- State: types.SpecStatePending,
- }
- reporter.SpecWillRun(spec)
- })
-
- It("should announce nothing", func() {
- Ω(stenographer.Calls()).Should(BeEmpty())
- })
- })
-
- Context("because it is skipped", func() {
- BeforeEach(func() {
- spec = &types.SpecSummary{
- State: types.SpecStateSkipped,
- }
- reporter.SpecWillRun(spec)
- })
-
- It("should announce nothing", func() {
- Ω(stenographer.Calls()).Should(BeEmpty())
- })
- })
- })
- })
-
- Context("When running in verbose & succinct mode", func() {
- BeforeEach(func() {
- reporterConfig.Succinct = true
- reporter = reporters.NewDefaultReporter(reporterConfig, stenographer)
- spec = &types.SpecSummary{}
- reporter.SpecWillRun(spec)
- })
-
- It("should announce nothing", func() {
- Ω(stenographer.Calls()).Should(BeEmpty())
- })
- })
-
- Context("When not running in verbose mode", func() {
- BeforeEach(func() {
- reporterConfig.Verbose = false
- reporter = reporters.NewDefaultReporter(reporterConfig, stenographer)
- spec = &types.SpecSummary{}
- reporter.SpecWillRun(spec)
- })
-
- It("should announce nothing", func() {
- Ω(stenographer.Calls()).Should(BeEmpty())
- })
- })
- })
-
- Describe("SpecDidComplete", func() {
- JustBeforeEach(func() {
- reporter.SpecDidComplete(spec)
- })
-
- BeforeEach(func() {
- spec = &types.SpecSummary{}
- })
-
- Context("When the spec passed", func() {
- BeforeEach(func() {
- spec.State = types.SpecStatePassed
- })
-
- Context("When the spec was a measurement", func() {
- BeforeEach(func() {
- spec.IsMeasurement = true
- })
-
- It("should announce the measurement", func() {
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuccesfulMeasurement", spec, false)))
- })
- })
-
- Context("When the spec is slow", func() {
- BeforeEach(func() {
- spec.RunTime = time.Second
- })
-
- It("should announce that it was slow", func() {
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuccesfulSlowSpec", spec, false)))
- })
- })
-
- Context("Otherwise", func() {
- It("should announce the succesful spec", func() {
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuccesfulSpec", spec)))
- })
- })
- })
-
- Context("When the spec is pending", func() {
- BeforeEach(func() {
- spec.State = types.SpecStatePending
- })
-
- It("should announce the pending spec, succinctly", func() {
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnouncePendingSpec", spec, false)))
- })
- })
-
- Context("When the spec is skipped", func() {
- BeforeEach(func() {
- spec.State = types.SpecStateSkipped
- })
-
- It("should announce the skipped spec, succinctly", func() {
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSkippedSpec", spec, true, true)))
- })
- })
-
- Context("When the spec timed out", func() {
- BeforeEach(func() {
- spec.State = types.SpecStateTimedOut
- })
-
- It("should announce the timedout spec", func() {
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecTimedOut", spec, false, true)))
- })
- })
-
- Context("When the spec panicked", func() {
- BeforeEach(func() {
- spec.State = types.SpecStatePanicked
- })
-
- It("should announce the panicked spec", func() {
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecPanicked", spec, false, true)))
- })
- })
-
- Context("When the spec failed", func() {
- BeforeEach(func() {
- spec.State = types.SpecStateFailed
- })
-
- It("should announce the failed spec", func() {
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecFailed", spec, false, true)))
- })
- })
-
- Context("in noisy pendings mode", func() {
- BeforeEach(func() {
- reporterConfig.Succinct = false
- reporterConfig.NoisyPendings = true
- reporter = reporters.NewDefaultReporter(reporterConfig, stenographer)
- })
-
- Context("When the spec is pending", func() {
- BeforeEach(func() {
- spec.State = types.SpecStatePending
- })
-
- It("should announce the pending spec, noisily", func() {
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnouncePendingSpec", spec, true)))
- })
- })
- })
-
- Context("in noisy skippings mode", func() {
- BeforeEach(func() {
- reporterConfig.Succinct = false
- reporterConfig.NoisySkippings = true
- reporter = reporters.NewDefaultReporter(reporterConfig, stenographer)
- })
-
- Context("When the spec is skipped", func() {
- BeforeEach(func() {
- spec.State = types.SpecStateSkipped
- })
-
- It("should announce the skipped spec, noisily", func() {
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSkippedSpec", spec, false, true)))
- })
- })
- })
-
- Context("in succinct mode", func() {
- BeforeEach(func() {
- reporterConfig.Succinct = true
- reporter = reporters.NewDefaultReporter(reporterConfig, stenographer)
- })
-
- Context("When the spec passed", func() {
- BeforeEach(func() {
- spec.State = types.SpecStatePassed
- })
-
- Context("When the spec was a measurement", func() {
- BeforeEach(func() {
- spec.IsMeasurement = true
- })
-
- It("should announce the measurement", func() {
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuccesfulMeasurement", spec, true)))
- })
- })
-
- Context("When the spec is slow", func() {
- BeforeEach(func() {
- spec.RunTime = time.Second
- })
-
- It("should announce that it was slow", func() {
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuccesfulSlowSpec", spec, true)))
- })
- })
-
- Context("Otherwise", func() {
- It("should announce the succesful spec", func() {
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuccesfulSpec", spec)))
- })
- })
- })
-
- Context("When the spec is pending", func() {
- BeforeEach(func() {
- spec.State = types.SpecStatePending
- })
-
- It("should announce the pending spec, succinctly", func() {
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnouncePendingSpec", spec, false)))
- })
- })
-
- Context("When the spec is skipped", func() {
- BeforeEach(func() {
- spec.State = types.SpecStateSkipped
- })
-
- It("should announce the skipped spec", func() {
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSkippedSpec", spec, true, true)))
- })
- })
-
- Context("When the spec timed out", func() {
- BeforeEach(func() {
- spec.State = types.SpecStateTimedOut
- })
-
- It("should announce the timedout spec", func() {
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecTimedOut", spec, true, true)))
- })
- })
-
- Context("When the spec panicked", func() {
- BeforeEach(func() {
- spec.State = types.SpecStatePanicked
- })
-
- It("should announce the panicked spec", func() {
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecPanicked", spec, true, true)))
- })
- })
-
- Context("When the spec failed", func() {
- BeforeEach(func() {
- spec.State = types.SpecStateFailed
- })
-
- It("should announce the failed spec", func() {
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecFailed", spec, true, true)))
- })
- })
- })
- })
-
- Describe("SpecSuiteDidEnd", func() {
- BeforeEach(func() {
- suite = &types.SuiteSummary{}
- reporter.SpecSuiteDidEnd(suite)
- })
-
- It("should announce the spec run's completion", func() {
- Ω(stenographer.Calls()[1]).Should(Equal(call("AnnounceSpecRunCompletion", suite, false)))
- })
- })
-})
diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter_test.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter_test.go
deleted file mode 100644
index 9b75dc006..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter_test.go
+++ /dev/null
@@ -1,258 +0,0 @@
-package reporters_test
-
-import (
- "encoding/xml"
- "io/ioutil"
- "os"
- "time"
-
- . "github.com/onsi/ginkgo"
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/internal/codelocation"
- "github.com/onsi/ginkgo/reporters"
- "github.com/onsi/ginkgo/types"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("JUnit Reporter", func() {
- var (
- outputFile string
- reporter Reporter
- )
- testSuiteTime := 12456999 * time.Microsecond
- reportedSuiteTime := 12.456
-
- readOutputFile := func() reporters.JUnitTestSuite {
- bytes, err := ioutil.ReadFile(outputFile)
- Ω(err).ShouldNot(HaveOccurred())
- var suite reporters.JUnitTestSuite
- err = xml.Unmarshal(bytes, &suite)
- Ω(err).ShouldNot(HaveOccurred())
- return suite
- }
-
- BeforeEach(func() {
- f, err := ioutil.TempFile("", "output")
- Ω(err).ShouldNot(HaveOccurred())
- f.Close()
- outputFile = f.Name()
-
- reporter = reporters.NewJUnitReporter(outputFile)
-
- reporter.SpecSuiteWillBegin(config.GinkgoConfigType{}, &types.SuiteSummary{
- SuiteDescription: "My test suite",
- NumberOfSpecsThatWillBeRun: 1,
- })
- })
-
- AfterEach(func() {
- os.RemoveAll(outputFile)
- })
-
- Describe("a passing test", func() {
- BeforeEach(func() {
- beforeSuite := &types.SetupSummary{
- State: types.SpecStatePassed,
- }
- reporter.BeforeSuiteDidRun(beforeSuite)
-
- afterSuite := &types.SetupSummary{
- State: types.SpecStatePassed,
- }
- reporter.AfterSuiteDidRun(afterSuite)
-
- spec := &types.SpecSummary{
- ComponentTexts: []string{"[Top Level]", "A", "B", "C"},
- State: types.SpecStatePassed,
- RunTime: 5 * time.Second,
- }
- reporter.SpecWillRun(spec)
- reporter.SpecDidComplete(spec)
-
- reporter.SpecSuiteDidEnd(&types.SuiteSummary{
- NumberOfSpecsThatWillBeRun: 1,
- NumberOfFailedSpecs: 0,
- RunTime: testSuiteTime,
- })
- })
-
- It("should record the test as passing", func() {
- output := readOutputFile()
- Ω(output.Name).Should(Equal("My test suite"))
- Ω(output.Tests).Should(Equal(1))
- Ω(output.Failures).Should(Equal(0))
- Ω(output.Time).Should(Equal(reportedSuiteTime))
- Ω(output.Errors).Should(Equal(0))
- Ω(output.TestCases).Should(HaveLen(1))
- Ω(output.TestCases[0].Name).Should(Equal("A B C"))
- Ω(output.TestCases[0].ClassName).Should(Equal("My test suite"))
- Ω(output.TestCases[0].FailureMessage).Should(BeNil())
- Ω(output.TestCases[0].Skipped).Should(BeNil())
- Ω(output.TestCases[0].Time).Should(Equal(5.0))
- })
- })
-
- Describe("when the BeforeSuite fails", func() {
- var beforeSuite *types.SetupSummary
-
- BeforeEach(func() {
- beforeSuite = &types.SetupSummary{
- State: types.SpecStateFailed,
- RunTime: 3 * time.Second,
- Failure: types.SpecFailure{
- Message: "failed to setup",
- ComponentCodeLocation: codelocation.New(0),
- Location: codelocation.New(2),
- },
- }
- reporter.BeforeSuiteDidRun(beforeSuite)
-
- reporter.SpecSuiteDidEnd(&types.SuiteSummary{
- NumberOfSpecsThatWillBeRun: 1,
- NumberOfFailedSpecs: 1,
- RunTime: testSuiteTime,
- })
- })
-
- It("should record the test as having failed", func() {
- output := readOutputFile()
- Ω(output.Name).Should(Equal("My test suite"))
- Ω(output.Tests).Should(Equal(1))
- Ω(output.Failures).Should(Equal(1))
- Ω(output.Time).Should(Equal(reportedSuiteTime))
- Ω(output.Errors).Should(Equal(0))
- Ω(output.TestCases[0].Name).Should(Equal("BeforeSuite"))
- Ω(output.TestCases[0].Time).Should(Equal(3.0))
- Ω(output.TestCases[0].ClassName).Should(Equal("My test suite"))
- Ω(output.TestCases[0].FailureMessage.Type).Should(Equal("Failure"))
- Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring("failed to setup"))
- Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring(beforeSuite.Failure.ComponentCodeLocation.String()))
- Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring(beforeSuite.Failure.Location.String()))
- Ω(output.TestCases[0].Skipped).Should(BeNil())
- })
- })
-
- Describe("when the AfterSuite fails", func() {
- var afterSuite *types.SetupSummary
-
- BeforeEach(func() {
- afterSuite = &types.SetupSummary{
- State: types.SpecStateFailed,
- RunTime: 3 * time.Second,
- Failure: types.SpecFailure{
- Message: "failed to setup",
- ComponentCodeLocation: codelocation.New(0),
- Location: codelocation.New(2),
- },
- }
- reporter.AfterSuiteDidRun(afterSuite)
-
- reporter.SpecSuiteDidEnd(&types.SuiteSummary{
- NumberOfSpecsThatWillBeRun: 1,
- NumberOfFailedSpecs: 1,
- RunTime: testSuiteTime,
- })
- })
-
- It("should record the test as having failed", func() {
- output := readOutputFile()
- Ω(output.Name).Should(Equal("My test suite"))
- Ω(output.Tests).Should(Equal(1))
- Ω(output.Failures).Should(Equal(1))
- Ω(output.Time).Should(Equal(reportedSuiteTime))
- Ω(output.Errors).Should(Equal(0))
- Ω(output.TestCases[0].Name).Should(Equal("AfterSuite"))
- Ω(output.TestCases[0].Time).Should(Equal(3.0))
- Ω(output.TestCases[0].ClassName).Should(Equal("My test suite"))
- Ω(output.TestCases[0].FailureMessage.Type).Should(Equal("Failure"))
- Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring("failed to setup"))
- Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring(afterSuite.Failure.ComponentCodeLocation.String()))
- Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring(afterSuite.Failure.Location.String()))
- Ω(output.TestCases[0].Skipped).Should(BeNil())
- })
- })
-
- specStateCases := []struct {
- state types.SpecState
- message string
- }{
- {types.SpecStateFailed, "Failure"},
- {types.SpecStateTimedOut, "Timeout"},
- {types.SpecStatePanicked, "Panic"},
- }
-
- for _, specStateCase := range specStateCases {
- specStateCase := specStateCase
- Describe("a failing test", func() {
- var spec *types.SpecSummary
- BeforeEach(func() {
- spec = &types.SpecSummary{
- ComponentTexts: []string{"[Top Level]", "A", "B", "C"},
- State: specStateCase.state,
- RunTime: 5 * time.Second,
- Failure: types.SpecFailure{
- ComponentCodeLocation: codelocation.New(0),
- Location: codelocation.New(2),
- Message: "I failed",
- },
- }
- reporter.SpecWillRun(spec)
- reporter.SpecDidComplete(spec)
-
- reporter.SpecSuiteDidEnd(&types.SuiteSummary{
- NumberOfSpecsThatWillBeRun: 1,
- NumberOfFailedSpecs: 1,
- RunTime: testSuiteTime,
- })
- })
-
- It("should record test as failing", func() {
- output := readOutputFile()
- Ω(output.Name).Should(Equal("My test suite"))
- Ω(output.Tests).Should(Equal(1))
- Ω(output.Failures).Should(Equal(1))
- Ω(output.Time).Should(Equal(reportedSuiteTime))
- Ω(output.Errors).Should(Equal(0))
- Ω(output.TestCases[0].Name).Should(Equal("A B C"))
- Ω(output.TestCases[0].ClassName).Should(Equal("My test suite"))
- Ω(output.TestCases[0].FailureMessage.Type).Should(Equal(specStateCase.message))
- Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring("I failed"))
- Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring(spec.Failure.ComponentCodeLocation.String()))
- Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring(spec.Failure.Location.String()))
- Ω(output.TestCases[0].Skipped).Should(BeNil())
- })
- })
- }
-
- for _, specStateCase := range []types.SpecState{types.SpecStatePending, types.SpecStateSkipped} {
- specStateCase := specStateCase
- Describe("a skipped test", func() {
- var spec *types.SpecSummary
- BeforeEach(func() {
- spec = &types.SpecSummary{
- ComponentTexts: []string{"[Top Level]", "A", "B", "C"},
- State: specStateCase,
- RunTime: 5 * time.Second,
- }
- reporter.SpecWillRun(spec)
- reporter.SpecDidComplete(spec)
-
- reporter.SpecSuiteDidEnd(&types.SuiteSummary{
- NumberOfSpecsThatWillBeRun: 1,
- NumberOfFailedSpecs: 0,
- RunTime: testSuiteTime,
- })
- })
-
- It("should record test as failing", func() {
- output := readOutputFile()
- Ω(output.Tests).Should(Equal(1))
- Ω(output.Failures).Should(Equal(0))
- Ω(output.Time).Should(Equal(reportedSuiteTime))
- Ω(output.Errors).Should(Equal(0))
- Ω(output.TestCases[0].Name).Should(Equal("A B C"))
- Ω(output.TestCases[0].Skipped).ShouldNot(BeNil())
- })
- })
- }
-})
diff --git a/vendor/github.com/onsi/ginkgo/reporters/reporters_suite_test.go b/vendor/github.com/onsi/ginkgo/reporters/reporters_suite_test.go
deleted file mode 100644
index cec5a4dbf..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/reporters_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package reporters_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestReporters(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Reporters Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/README.md b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/README.md
deleted file mode 100644
index 37de454f4..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-## Colorize Windows
-
-These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com
-
- * go-colorable: <https://github.com/mattn/go-colorable>
- * go-isatty: <https://github.com/mattn/go-isatty>
diff --git a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter_test.go b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter_test.go
deleted file mode 100644
index b45d5db01..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter_test.go
+++ /dev/null
@@ -1,214 +0,0 @@
-package reporters_test
-
-import (
- "bytes"
- "fmt"
- "time"
-
- . "github.com/onsi/ginkgo"
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/internal/codelocation"
- "github.com/onsi/ginkgo/reporters"
- "github.com/onsi/ginkgo/types"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("TeamCity Reporter", func() {
- var (
- buffer bytes.Buffer
- reporter Reporter
- )
-
- BeforeEach(func() {
- buffer.Truncate(0)
- reporter = reporters.NewTeamCityReporter(&buffer)
- reporter.SpecSuiteWillBegin(config.GinkgoConfigType{}, &types.SuiteSummary{
- SuiteDescription: "Foo's test suite",
- NumberOfSpecsThatWillBeRun: 1,
- })
- })
-
- Describe("a passing test", func() {
- BeforeEach(func() {
- beforeSuite := &types.SetupSummary{
- State: types.SpecStatePassed,
- }
- reporter.BeforeSuiteDidRun(beforeSuite)
-
- afterSuite := &types.SetupSummary{
- State: types.SpecStatePassed,
- }
- reporter.AfterSuiteDidRun(afterSuite)
-
- spec := &types.SpecSummary{
- ComponentTexts: []string{"[Top Level]", "A", "B", "C"},
- State: types.SpecStatePassed,
- RunTime: 5 * time.Second,
- }
- reporter.SpecWillRun(spec)
- reporter.SpecDidComplete(spec)
-
- reporter.SpecSuiteDidEnd(&types.SuiteSummary{
- NumberOfSpecsThatWillBeRun: 1,
- NumberOfFailedSpecs: 0,
- RunTime: 10 * time.Second,
- })
- })
-
- It("should record the test as passing", func() {
- actual := buffer.String()
- expected :=
- "##teamcity[testSuiteStarted name='Foo|'s test suite']" +
- "##teamcity[testStarted name='A B C']" +
- "##teamcity[testFinished name='A B C' duration='5000']" +
- "##teamcity[testSuiteFinished name='Foo|'s test suite']"
- Ω(actual).Should(Equal(expected))
- })
- })
-
- Describe("when the BeforeSuite fails", func() {
- var beforeSuite *types.SetupSummary
-
- BeforeEach(func() {
- beforeSuite = &types.SetupSummary{
- State: types.SpecStateFailed,
- RunTime: 3 * time.Second,
- Failure: types.SpecFailure{
- Message: "failed to setup\n",
- ComponentCodeLocation: codelocation.New(0),
- },
- }
- reporter.BeforeSuiteDidRun(beforeSuite)
-
- reporter.SpecSuiteDidEnd(&types.SuiteSummary{
- NumberOfSpecsThatWillBeRun: 1,
- NumberOfFailedSpecs: 1,
- RunTime: 10 * time.Second,
- })
- })
-
- It("should record the test as having failed", func() {
- actual := buffer.String()
- expected := fmt.Sprintf(
- "##teamcity[testSuiteStarted name='Foo|'s test suite']"+
- "##teamcity[testStarted name='BeforeSuite']"+
- "##teamcity[testFailed name='BeforeSuite' message='%s' details='failed to setup|n']"+
- "##teamcity[testFinished name='BeforeSuite' duration='3000']"+
- "##teamcity[testSuiteFinished name='Foo|'s test suite']", beforeSuite.Failure.ComponentCodeLocation.String(),
- )
- Ω(actual).Should(Equal(expected))
- })
- })
-
- Describe("when the AfterSuite fails", func() {
- var afterSuite *types.SetupSummary
-
- BeforeEach(func() {
- afterSuite = &types.SetupSummary{
- State: types.SpecStateFailed,
- RunTime: 3 * time.Second,
- Failure: types.SpecFailure{
- Message: "failed to setup\n",
- ComponentCodeLocation: codelocation.New(0),
- },
- }
- reporter.AfterSuiteDidRun(afterSuite)
-
- reporter.SpecSuiteDidEnd(&types.SuiteSummary{
- NumberOfSpecsThatWillBeRun: 1,
- NumberOfFailedSpecs: 1,
- RunTime: 10 * time.Second,
- })
- })
-
- It("should record the test as having failed", func() {
- actual := buffer.String()
- expected := fmt.Sprintf(
- "##teamcity[testSuiteStarted name='Foo|'s test suite']"+
- "##teamcity[testStarted name='AfterSuite']"+
- "##teamcity[testFailed name='AfterSuite' message='%s' details='failed to setup|n']"+
- "##teamcity[testFinished name='AfterSuite' duration='3000']"+
- "##teamcity[testSuiteFinished name='Foo|'s test suite']", afterSuite.Failure.ComponentCodeLocation.String(),
- )
- Ω(actual).Should(Equal(expected))
- })
- })
- specStateCases := []struct {
- state types.SpecState
- message string
- }{
- {types.SpecStateFailed, "Failure"},
- {types.SpecStateTimedOut, "Timeout"},
- {types.SpecStatePanicked, "Panic"},
- }
-
- for _, specStateCase := range specStateCases {
- specStateCase := specStateCase
- Describe("a failing test", func() {
- var spec *types.SpecSummary
- BeforeEach(func() {
- spec = &types.SpecSummary{
- ComponentTexts: []string{"[Top Level]", "A", "B", "C"},
- State: specStateCase.state,
- RunTime: 5 * time.Second,
- Failure: types.SpecFailure{
- ComponentCodeLocation: codelocation.New(0),
- Message: "I failed",
- },
- }
- reporter.SpecWillRun(spec)
- reporter.SpecDidComplete(spec)
-
- reporter.SpecSuiteDidEnd(&types.SuiteSummary{
- NumberOfSpecsThatWillBeRun: 1,
- NumberOfFailedSpecs: 1,
- RunTime: 10 * time.Second,
- })
- })
-
- It("should record test as failing", func() {
- actual := buffer.String()
- expected :=
- fmt.Sprintf("##teamcity[testSuiteStarted name='Foo|'s test suite']"+
- "##teamcity[testStarted name='A B C']"+
- "##teamcity[testFailed name='A B C' message='%s' details='I failed']"+
- "##teamcity[testFinished name='A B C' duration='5000']"+
- "##teamcity[testSuiteFinished name='Foo|'s test suite']", spec.Failure.ComponentCodeLocation.String())
- Ω(actual).Should(Equal(expected))
- })
- })
- }
-
- for _, specStateCase := range []types.SpecState{types.SpecStatePending, types.SpecStateSkipped} {
- specStateCase := specStateCase
- Describe("a skipped test", func() {
- var spec *types.SpecSummary
- BeforeEach(func() {
- spec = &types.SpecSummary{
- ComponentTexts: []string{"[Top Level]", "A", "B", "C"},
- State: specStateCase,
- RunTime: 5 * time.Second,
- }
- reporter.SpecWillRun(spec)
- reporter.SpecDidComplete(spec)
-
- reporter.SpecSuiteDidEnd(&types.SuiteSummary{
- NumberOfSpecsThatWillBeRun: 1,
- NumberOfFailedSpecs: 0,
- RunTime: 10 * time.Second,
- })
- })
-
- It("should record test as ignored", func() {
- actual := buffer.String()
- expected :=
- "##teamcity[testSuiteStarted name='Foo|'s test suite']" +
- "##teamcity[testStarted name='A B C']" +
- "##teamcity[testIgnored name='A B C']" +
- "##teamcity[testFinished name='A B C' duration='5000']" +
- "##teamcity[testSuiteFinished name='Foo|'s test suite']"
- Ω(actual).Should(Equal(expected))
- })
- })
- }
-})
diff --git a/vendor/github.com/onsi/ginkgo/types/types_suite_test.go b/vendor/github.com/onsi/ginkgo/types/types_suite_test.go
deleted file mode 100644
index b026169c1..000000000
--- a/vendor/github.com/onsi/ginkgo/types/types_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package types_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestTypes(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Types Suite")
-}
diff --git a/vendor/github.com/onsi/ginkgo/types/types_test.go b/vendor/github.com/onsi/ginkgo/types/types_test.go
deleted file mode 100644
index a0e161c88..000000000
--- a/vendor/github.com/onsi/ginkgo/types/types_test.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package types_test
-
-import (
- . "github.com/onsi/ginkgo/types"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var specStates = []SpecState{
- SpecStatePassed,
- SpecStateTimedOut,
- SpecStatePanicked,
- SpecStateFailed,
- SpecStatePending,
- SpecStateSkipped,
-}
-
-func verifySpecSummary(caller func(SpecSummary) bool, trueStates ...SpecState) {
- summary := SpecSummary{}
- trueStateLookup := map[SpecState]bool{}
- for _, state := range trueStates {
- trueStateLookup[state] = true
- summary.State = state
- Ω(caller(summary)).Should(BeTrue())
- }
-
- for _, state := range specStates {
- if trueStateLookup[state] {
- continue
- }
- summary.State = state
- Ω(caller(summary)).Should(BeFalse())
- }
-}
-
-var _ = Describe("Types", func() {
- Describe("IsFailureState", func() {
- It("knows when it is in a failure-like state", func() {
- verifySpecSummary(func(summary SpecSummary) bool {
- return summary.State.IsFailure()
- }, SpecStateTimedOut, SpecStatePanicked, SpecStateFailed)
- })
- })
-
- Describe("SpecSummary", func() {
- It("knows when it is in a failure-like state", func() {
- verifySpecSummary(func(summary SpecSummary) bool {
- return summary.HasFailureState()
- }, SpecStateTimedOut, SpecStatePanicked, SpecStateFailed)
- })
-
- It("knows when it passed", func() {
- verifySpecSummary(func(summary SpecSummary) bool {
- return summary.Passed()
- }, SpecStatePassed)
- })
-
- It("knows when it has failed", func() {
- verifySpecSummary(func(summary SpecSummary) bool {
- return summary.Failed()
- }, SpecStateFailed)
- })
-
- It("knows when it has panicked", func() {
- verifySpecSummary(func(summary SpecSummary) bool {
- return summary.Panicked()
- }, SpecStatePanicked)
- })
-
- It("knows when it has timed out", func() {
- verifySpecSummary(func(summary SpecSummary) bool {
- return summary.TimedOut()
- }, SpecStateTimedOut)
- })
-
- It("knows when it is pending", func() {
- verifySpecSummary(func(summary SpecSummary) bool {
- return summary.Pending()
- }, SpecStatePending)
- })
-
- It("knows when it is skipped", func() {
- verifySpecSummary(func(summary SpecSummary) bool {
- return summary.Skipped()
- }, SpecStateSkipped)
- })
- })
-
- Describe("SpecMeasurement", func() {
- It("knows how to format values when the precision is 0", func() {
- Ω(SpecMeasurement{}.PrecisionFmt()).Should(Equal("%f"))
- })
-
- It("knows how to format the values when the precision is 3", func() {
- Ω(SpecMeasurement{Precision: 3}.PrecisionFmt()).Should(Equal("%.3f"))
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml
index 4d71367f6..2420a5d07 100644
--- a/vendor/github.com/onsi/gomega/.travis.yml
+++ b/vendor/github.com/onsi/gomega/.travis.yml
@@ -1,12 +1,9 @@
language: go
go:
- - 1.6.x
- - 1.7.x
- - 1.8.x
- - 1.9.x
- 1.10.x
- 1.11.x
+ - 1.12.x
env:
- GO111MODULE=on
@@ -17,7 +14,4 @@ install:
- go get github.com/onsi/ginkgo
- go install github.com/onsi/ginkgo/ginkgo
-script: |
- $HOME/gopath/bin/ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race &&
- go vet &&
- [ -z "`gofmt -l -e -s -w .`" ]
+script: make test
diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md
index 9153294f7..5d1eda837 100644
--- a/vendor/github.com/onsi/gomega/CHANGELOG.md
+++ b/vendor/github.com/onsi/gomega/CHANGELOG.md
@@ -1,3 +1,14 @@
+## 1.5.0
+
+### Features
+
+- Added MatchKeys matchers [8b909fc]
+
+### Fixes and Minor Improvements
+
+- Add type aliases to remove stuttering [03b0461]
+- Don't run session_test.go on windows (#324) [5533ce8]
+
## 1.4.3
### Fixes:
diff --git a/vendor/github.com/onsi/gomega/Makefile b/vendor/github.com/onsi/gomega/Makefile
new file mode 100644
index 000000000..c92cd56e3
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/Makefile
@@ -0,0 +1,6 @@
+test:
+ [ -z "`gofmt -s -w -l -e .`" ]
+ go vet
+ ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race
+
+.PHONY: test
diff --git a/vendor/github.com/onsi/gomega/format/format_suite_test.go b/vendor/github.com/onsi/gomega/format/format_suite_test.go
deleted file mode 100644
index 8e65a9529..000000000
--- a/vendor/github.com/onsi/gomega/format/format_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package format_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestFormat(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Format Suite")
-}
diff --git a/vendor/github.com/onsi/gomega/format/format_test.go b/vendor/github.com/onsi/gomega/format/format_test.go
deleted file mode 100644
index 9ea781379..000000000
--- a/vendor/github.com/onsi/gomega/format/format_test.go
+++ /dev/null
@@ -1,627 +0,0 @@
-package format_test
-
-import (
- "fmt"
- "strings"
- "time"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/format"
- "github.com/onsi/gomega/types"
-)
-
-//recursive struct
-
-type StringAlias string
-type ByteAlias []byte
-type IntAlias int
-
-type AStruct struct {
- Exported string
-}
-
-type SimpleStruct struct {
- Name string
- Enumeration int
- Veritas bool
- Data []byte
- secret uint32
-}
-
-type ComplexStruct struct {
- Strings []string
- SimpleThings []*SimpleStruct
- DataMaps map[int]ByteAlias
-}
-
-type SecretiveStruct struct {
- boolValue bool
- intValue int
- uintValue uint
- uintptrValue uintptr
- floatValue float32
- complexValue complex64
- chanValue chan bool
- funcValue func()
- pointerValue *int
- sliceValue []string
- byteSliceValue []byte
- stringValue string
- arrValue [3]int
- byteArrValue [3]byte
- mapValue map[string]int
- structValue AStruct
- interfaceValue interface{}
-}
-
-type GoStringer struct {
-}
-
-func (g GoStringer) GoString() string {
- return "go-string"
-}
-
-func (g GoStringer) String() string {
- return "string"
-}
-
-type Stringer struct {
-}
-
-func (g Stringer) String() string {
- return "string"
-}
-
-type ctx struct {
-}
-
-func (c *ctx) Deadline() (deadline time.Time, ok bool) {
- return time.Time{}, false
-}
-
-func (c *ctx) Done() <-chan struct{} {
- return nil
-}
-
-func (c *ctx) Err() error {
- return nil
-}
-
-func (c *ctx) Value(key interface{}) interface{} {
- return nil
-}
-
-var _ = Describe("Format", func() {
- match := func(typeRepresentation string, valueRepresentation string, args ...interface{}) types.GomegaMatcher {
- if len(args) > 0 {
- valueRepresentation = fmt.Sprintf(valueRepresentation, args...)
- }
- return Equal(fmt.Sprintf("%s<%s>: %s", Indent, typeRepresentation, valueRepresentation))
- }
-
- matchRegexp := func(typeRepresentation string, valueRepresentation string, args ...interface{}) types.GomegaMatcher {
- if len(args) > 0 {
- valueRepresentation = fmt.Sprintf(valueRepresentation, args...)
- }
- return MatchRegexp(fmt.Sprintf("%s<%s>: %s", Indent, typeRepresentation, valueRepresentation))
- }
-
- hashMatchingRegexp := func(entries ...string) string {
- entriesSwitch := "(" + strings.Join(entries, "|") + ")"
- arr := make([]string, len(entries))
- for i := range arr {
- arr[i] = entriesSwitch
- }
- return "{" + strings.Join(arr, ", ") + "}"
- }
-
- Describe("Message", func() {
- Context("with only an actual value", func() {
- It("should print out an indented formatted representation of the value and the message", func() {
- Expect(Message(3, "to be three.")).Should(Equal("Expected\n <int>: 3\nto be three."))
- })
- })
-
- Context("with an actual and an expected value", func() {
- It("should print out an indented formatted representatino of both values, and the message", func() {
- Expect(Message(3, "to equal", 4)).Should(Equal("Expected\n <int>: 3\nto equal\n <int>: 4"))
- })
- })
- })
-
- Describe("MessageWithDiff", func() {
- It("shows the exact point where two long strings differ", func() {
- stringWithB := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
- stringWithZ := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaazaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
-
- Expect(MessageWithDiff(stringWithB, "to equal", stringWithZ)).Should(Equal(expectedLongStringFailureMessage))
- })
-
- It("truncates the start of long strings that differ only at their end", func() {
- stringWithB := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"
- stringWithZ := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaz"
-
- Expect(MessageWithDiff(stringWithB, "to equal", stringWithZ)).Should(Equal(expectedTruncatedStartStringFailureMessage))
- })
-
- It("truncates the start of long strings that differ only in length", func() {
- smallString := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
- largeString := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
-
- Expect(MessageWithDiff(largeString, "to equal", smallString)).Should(Equal(expectedTruncatedStartSizeFailureMessage))
- Expect(MessageWithDiff(smallString, "to equal", largeString)).Should(Equal(expectedTruncatedStartSizeSwappedFailureMessage))
- })
-
- It("truncates the end of long strings that differ only at their start", func() {
- stringWithB := "baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
- stringWithZ := "zaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
-
- Expect(MessageWithDiff(stringWithB, "to equal", stringWithZ)).Should(Equal(expectedTruncatedEndStringFailureMessage))
- })
-
- It("handles multi-byte sequences correctly", func() {
- stringA := "• abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz1"
- stringB := "• abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"
-
- Expect(MessageWithDiff(stringA, "to equal", stringB)).Should(Equal(expectedTruncatedMultiByteFailureMessage))
- })
-
- Context("With truncated diff disabled", func() {
- BeforeEach(func() {
- TruncatedDiff = false
- })
-
- AfterEach(func() {
- TruncatedDiff = true
- })
-
- It("should show the full diff", func() {
- stringWithB := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
- stringWithZ := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaazaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
-
- Expect(MessageWithDiff(stringWithB, "to equal", stringWithZ)).Should(Equal(expectedFullFailureDiff))
- })
- })
- })
-
- Describe("IndentString", func() {
- It("should indent the string", func() {
- Expect(IndentString("foo\n bar\nbaz", 2)).Should(Equal(" foo\n bar\n baz"))
- })
- })
-
- Describe("Object", func() {
- Describe("formatting boolean values", func() {
- It("should give the type and format values correctly", func() {
- Expect(Object(true, 1)).Should(match("bool", "true"))
- Expect(Object(false, 1)).Should(match("bool", "false"))
- })
- })
-
- Describe("formatting numbers", func() {
- It("should give the type and format values correctly", func() {
- Expect(Object(int(3), 1)).Should(match("int", "3"))
- Expect(Object(int8(3), 1)).Should(match("int8", "3"))
- Expect(Object(int16(3), 1)).Should(match("int16", "3"))
- Expect(Object(int32(3), 1)).Should(match("int32", "3"))
- Expect(Object(int64(3), 1)).Should(match("int64", "3"))
-
- Expect(Object(uint(3), 1)).Should(match("uint", "3"))
- Expect(Object(uint8(3), 1)).Should(match("uint8", "3"))
- Expect(Object(uint16(3), 1)).Should(match("uint16", "3"))
- Expect(Object(uint32(3), 1)).Should(match("uint32", "3"))
- Expect(Object(uint64(3), 1)).Should(match("uint64", "3"))
- })
-
- It("should handle uintptr differently", func() {
- Expect(Object(uintptr(3), 1)).Should(match("uintptr", "0x3"))
- })
- })
-
- Describe("formatting channels", func() {
- It("should give the type and format values correctly", func() {
- c := make(chan<- bool, 3)
- c <- true
- c <- false
- Expect(Object(c, 1)).Should(match("chan<- bool | len:2, cap:3", "%v", c))
- })
- })
-
- Describe("formatting strings", func() {
- It("should give the type and format values correctly", func() {
- s := "a\nb\nc"
- Expect(Object(s, 1)).Should(match("string", `a
- b
- c`))
- })
- })
-
- Describe("formatting []byte slices", func() {
- Context("when the slice is made of printable bytes", func() {
- It("should present it as string", func() {
- b := []byte("a b c")
- Expect(Object(b, 1)).Should(matchRegexp(`\[\]uint8 \| len:5, cap:\d+`, `a b c`))
- })
- })
- Context("when the slice contains non-printable bytes", func() {
- It("should present it as slice", func() {
- b := []byte("a b c\n\x01\x02\x03\xff\x1bH")
- Expect(Object(b, 1)).Should(matchRegexp(`\[\]uint8 \| len:12, cap:\d+`, `\[97, 32, 98, 32, 99, 10, 1, 2, 3, 255, 27, 72\]`))
- })
- })
- })
-
- Describe("formatting functions", func() {
- It("should give the type and format values correctly", func() {
- f := func(a string, b []int) ([]byte, error) {
- return []byte("abc"), nil
- }
- Expect(Object(f, 1)).Should(match("func(string, []int) ([]uint8, error)", "%v", f))
- })
- })
-
- Describe("formatting pointers", func() {
- It("should give the type and dereference the value to format it correctly", func() {
- a := 3
- Expect(Object(&a, 1)).Should(match(fmt.Sprintf("*int | %p", &a), "3"))
- })
-
- Context("when there are pointers to pointers...", func() {
- It("should recursively deference the pointer until it gets to a value", func() {
- a := 3
- var b *int
- var c **int
- var d ***int
- b = &a
- c = &b
- d = &c
-
- Expect(Object(d, 1)).Should(match(fmt.Sprintf("***int | %p", d), "3"))
- })
- })
-
- Context("when the pointer points to nil", func() {
- It("should say nil and not explode", func() {
- var a *AStruct
- Expect(Object(a, 1)).Should(match("*format_test.AStruct | 0x0", "nil"))
- })
- })
- })
-
- Describe("formatting arrays", func() {
- It("should give the type and format values correctly", func() {
- w := [3]string{"Jed Bartlet", "Toby Ziegler", "CJ Cregg"}
- Expect(Object(w, 1)).Should(match("[3]string", `["Jed Bartlet", "Toby Ziegler", "CJ Cregg"]`))
- })
-
- Context("with byte arrays", func() {
- It("should give the type and format values correctly", func() {
- w := [3]byte{17, 28, 19}
- Expect(Object(w, 1)).Should(match("[3]uint8", `[17, 28, 19]`))
- })
- })
- })
-
- Describe("formatting slices", func() {
- It("should include the length and capacity in the type information", func() {
- s := make([]bool, 3, 4)
- Expect(Object(s, 1)).Should(match("[]bool | len:3, cap:4", "[false, false, false]"))
- })
-
- Context("when the slice contains long entries", func() {
- It("should format the entries with newlines", func() {
- w := []string{"Josiah Edward Bartlet", "Toby Ziegler", "CJ Cregg"}
- expected := `[
- "Josiah Edward Bartlet",
- "Toby Ziegler",
- "CJ Cregg",
- ]`
- Expect(Object(w, 1)).Should(match("[]string | len:3, cap:3", expected))
- })
- })
- })
-
- Describe("formatting maps", func() {
- It("should include the length in the type information", func() {
- m := make(map[int]bool, 5)
- m[3] = true
- m[4] = false
- Expect(Object(m, 1)).Should(matchRegexp(`map\[int\]bool \| len:2`, hashMatchingRegexp("3: true", "4: false")))
- })
-
- Context("when the slice contains long entries", func() {
- It("should format the entries with newlines", func() {
- m := map[string][]byte{}
- m["Josiah Edward Bartlet"] = []byte("Martin Sheen")
- m["Toby Ziegler"] = []byte("Richard Schiff")
- m["CJ Cregg"] = []byte("Allison Janney")
- expected := `{
- ("Josiah Edward Bartlet": "Martin Sheen"|"Toby Ziegler": "Richard Schiff"|"CJ Cregg": "Allison Janney"),
- ("Josiah Edward Bartlet": "Martin Sheen"|"Toby Ziegler": "Richard Schiff"|"CJ Cregg": "Allison Janney"),
- ("Josiah Edward Bartlet": "Martin Sheen"|"Toby Ziegler": "Richard Schiff"|"CJ Cregg": "Allison Janney"),
- }`
- Expect(Object(m, 1)).Should(matchRegexp(`map\[string\]\[\]uint8 \| len:3`, expected))
- })
- })
- })
-
- Describe("formatting structs", func() {
- It("should include the struct name and the field names", func() {
- s := SimpleStruct{
- Name: "Oswald",
- Enumeration: 17,
- Veritas: true,
- Data: []byte("datum"),
- secret: 1983,
- }
-
- Expect(Object(s, 1)).Should(match("format_test.SimpleStruct", `{Name: "Oswald", Enumeration: 17, Veritas: true, Data: "datum", secret: 1983}`))
- })
-
- Context("when the struct contains long entries", func() {
- It("should format the entries with new lines", func() {
- s := &SimpleStruct{
- Name: "Mithrandir Gandalf Greyhame",
- Enumeration: 2021,
- Veritas: true,
- Data: []byte("wizard"),
- secret: 3,
- }
-
- Expect(Object(s, 1)).Should(match(fmt.Sprintf("*format_test.SimpleStruct | %p", s), `{
- Name: "Mithrandir Gandalf Greyhame",
- Enumeration: 2021,
- Veritas: true,
- Data: "wizard",
- secret: 3,
- }`))
- })
- })
- })
-
- Describe("formatting nil values", func() {
- It("should print out nil", func() {
- Expect(Object(nil, 1)).Should(match("nil", "nil"))
- var typedNil *AStruct
- Expect(Object(typedNil, 1)).Should(match("*format_test.AStruct | 0x0", "nil"))
- var c chan<- bool
- Expect(Object(c, 1)).Should(match("chan<- bool | len:0, cap:0", "nil"))
- var s []string
- Expect(Object(s, 1)).Should(match("[]string | len:0, cap:0", "nil"))
- var m map[string]bool
- Expect(Object(m, 1)).Should(match("map[string]bool | len:0", "nil"))
- })
- })
-
- Describe("formatting aliased types", func() {
- It("should print out the correct alias type", func() {
- Expect(Object(StringAlias("alias"), 1)).Should(match("format_test.StringAlias", `alias`))
- Expect(Object(ByteAlias("alias"), 1)).Should(matchRegexp(`format_test\.ByteAlias \| len:5, cap:\d+`, `alias`))
- Expect(Object(IntAlias(3), 1)).Should(match("format_test.IntAlias", "3"))
- })
- })
-
- Describe("handling nested things", func() {
- It("should produce a correctly nested representation", func() {
- s := ComplexStruct{
- Strings: []string{"lots", "of", "short", "strings"},
- SimpleThings: []*SimpleStruct{
- {"short", 7, true, []byte("succinct"), 17},
- {"something longer", 427, true, []byte("designed to wrap around nicely"), 30},
- },
- DataMaps: map[int]ByteAlias{
- 17: ByteAlias("some substantially longer chunks of data"),
- 1138: ByteAlias("that should make things wrap"),
- },
- }
- expected := `{
- Strings: \["lots", "of", "short", "strings"\],
- SimpleThings: \[
- {Name: "short", Enumeration: 7, Veritas: true, Data: "succinct", secret: 17},
- {
- Name: "something longer",
- Enumeration: 427,
- Veritas: true,
- Data: "designed to wrap around nicely",
- secret: 30,
- },
- \],
- DataMaps: {
- (17: "some substantially longer chunks of data"|1138: "that should make things wrap"),
- (17: "some substantially longer chunks of data"|1138: "that should make things wrap"),
- },
- }`
- Expect(Object(s, 1)).Should(matchRegexp(`format_test\.ComplexStruct`, expected))
- })
- })
-
- Describe("formatting times", func() {
- It("should format time as RFC3339", func() {
- t := time.Date(2016, 10, 31, 9, 57, 23, 12345, time.UTC)
- Expect(Object(t, 1)).Should(match("time.Time", `2016-10-31T09:57:23.000012345Z`))
- })
- })
- })
-
- Describe("Handling unexported fields in structs", func() {
- It("should handle all the various types correctly", func() {
- a := int(5)
- s := SecretiveStruct{
- boolValue: true,
- intValue: 3,
- uintValue: 4,
- uintptrValue: 5,
- floatValue: 6.0,
- complexValue: complex(5.0, 3.0),
- chanValue: make(chan bool, 2),
- funcValue: func() {},
- pointerValue: &a,
- sliceValue: []string{"string", "slice"},
- byteSliceValue: []byte("bytes"),
- stringValue: "a string",
- arrValue: [3]int{11, 12, 13},
- byteArrValue: [3]byte{17, 20, 32},
- mapValue: map[string]int{"a key": 20, "b key": 30},
- structValue: AStruct{"exported"},
- interfaceValue: map[string]int{"a key": 17},
- }
-
- expected := fmt.Sprintf(`{
- boolValue: true,
- intValue: 3,
- uintValue: 4,
- uintptrValue: 0x5,
- floatValue: 6,
- complexValue: \(5\+3i\),
- chanValue: %p,
- funcValue: %p,
- pointerValue: 5,
- sliceValue: \["string", "slice"\],
- byteSliceValue: "bytes",
- stringValue: "a string",
- arrValue: \[11, 12, 13\],
- byteArrValue: \[17, 20, 32\],
- mapValue: %s,
- structValue: {Exported: "exported"},
- interfaceValue: {"a key": 17},
- }`, s.chanValue, s.funcValue, hashMatchingRegexp(`"a key": 20`, `"b key": 30`))
-
- Expect(Object(s, 1)).Should(matchRegexp(`format_test\.SecretiveStruct`, expected))
- })
- })
-
- Describe("Handling interfaces", func() {
- It("should unpack the interface", func() {
- outerHash := map[string]interface{}{}
- innerHash := map[string]int{}
-
- innerHash["inner"] = 3
- outerHash["integer"] = 2
- outerHash["map"] = innerHash
-
- expected := hashMatchingRegexp(`"integer": 2`, `"map": {"inner": 3}`)
- Expect(Object(outerHash, 1)).Should(matchRegexp(`map\[string\]interface {} \| len:2`, expected))
- })
- })
-
- Describe("Handling recursive things", func() {
- It("should not go crazy...", func() {
- m := map[string]interface{}{}
- m["integer"] = 2
- m["map"] = m
- Expect(Object(m, 1)).Should(ContainSubstring("..."))
- })
-
- It("really should not go crazy...", func() {
- type complexKey struct {
- Value map[interface{}]int
- }
-
- complexObject := complexKey{}
- complexObject.Value = make(map[interface{}]int)
-
- complexObject.Value[&complexObject] = 2
- Expect(Object(complexObject, 1)).Should(ContainSubstring("..."))
- })
- })
-
- Describe("When instructed to use the Stringer representation", func() {
- BeforeEach(func() {
- UseStringerRepresentation = true
- })
-
- AfterEach(func() {
- UseStringerRepresentation = false
- })
-
- Context("when passed a GoStringer", func() {
- It("should use what GoString() returns", func() {
- Expect(Object(GoStringer{}, 1)).Should(ContainSubstring("<format_test.GoStringer>: go-string"))
- })
- })
-
- Context("when passed a stringer", func() {
- It("should use what String() returns", func() {
- Expect(Object(Stringer{}, 1)).Should(ContainSubstring("<format_test.Stringer>: string"))
- })
- })
- })
-
- Describe("Printing a context.Context field", func() {
-
- type structWithContext struct {
- Context Ctx
- Value string
- }
-
- context := ctx{}
- objWithContext := structWithContext{Value: "some-value", Context: &context}
-
- It("Suppresses the content by default", func() {
- Expect(Object(objWithContext, 1)).Should(ContainSubstring("<suppressed context>"))
- })
-
- It("Doesn't supress the context if it's the object being printed", func() {
- Expect(Object(context, 1)).ShouldNot(MatchRegexp("^.*<suppressed context>$"))
- })
-
- Context("PrintContextObjects is set", func() {
- BeforeEach(func() {
- PrintContextObjects = true
- })
-
- AfterEach(func() {
- PrintContextObjects = false
- })
-
- It("Prints the context", func() {
- Expect(Object(objWithContext, 1)).ShouldNot(ContainSubstring("<suppressed context>"))
- })
- })
- })
-})
-
-var expectedLongStringFailureMessage = strings.TrimSpace(`
-Expected
- <string>: "...aaaaabaaaaa..."
-to equal |
- <string>: "...aaaaazaaaaa..."
-`)
-var expectedTruncatedEndStringFailureMessage = strings.TrimSpace(`
-Expected
- <string>: "baaaaa..."
-to equal |
- <string>: "zaaaaa..."
-`)
-var expectedTruncatedStartStringFailureMessage = strings.TrimSpace(`
-Expected
- <string>: "...aaaaab"
-to equal |
- <string>: "...aaaaaz"
-`)
-var expectedTruncatedStartSizeFailureMessage = strings.TrimSpace(`
-Expected
- <string>: "...aaaaaa"
-to equal |
- <string>: "...aaaaa"
-`)
-var expectedTruncatedStartSizeSwappedFailureMessage = strings.TrimSpace(`
-Expected
- <string>: "...aaaa"
-to equal |
- <string>: "...aaaaa"
-`)
-var expectedTruncatedMultiByteFailureMessage = strings.TrimSpace(`
-Expected
- <string>: "...tuvwxyz1"
-to equal |
- <string>: "...tuvwxyz"
-`)
-
-var expectedFullFailureDiff = strings.TrimSpace(`
-Expected
- <string>: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
-to equal
- <string>: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaazaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
-`)
diff --git a/vendor/github.com/onsi/gomega/gbytes/buffer_test.go b/vendor/github.com/onsi/gomega/gbytes/buffer_test.go
deleted file mode 100644
index 9d4e8279d..000000000
--- a/vendor/github.com/onsi/gomega/gbytes/buffer_test.go
+++ /dev/null
@@ -1,205 +0,0 @@
-package gbytes_test
-
-import (
- "io"
- "time"
-
- . "github.com/onsi/gomega/gbytes"
-
- "bytes"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-type SlowReader struct {
- R io.Reader
- D time.Duration
-}
-
-func (s SlowReader) Read(p []byte) (int, error) {
- time.Sleep(s.D)
- return s.R.Read(p)
-}
-
-var _ = Describe("Buffer", func() {
- var buffer *Buffer
-
- BeforeEach(func() {
- buffer = NewBuffer()
- })
-
- Describe("dumping the entire contents of the buffer", func() {
- It("should return everything that's been written", func() {
- buffer.Write([]byte("abc"))
- buffer.Write([]byte("def"))
- Expect(buffer.Contents()).Should(Equal([]byte("abcdef")))
-
- Expect(buffer).Should(Say("bcd"))
- Expect(buffer.Contents()).Should(Equal([]byte("abcdef")))
- })
- })
-
- Describe("creating a buffer with bytes", func() {
- It("should create the buffer with the cursor set to the beginning", func() {
- buffer := BufferWithBytes([]byte("abcdef"))
- Expect(buffer.Contents()).Should(Equal([]byte("abcdef")))
- Expect(buffer).Should(Say("abc"))
- Expect(buffer).ShouldNot(Say("abc"))
- Expect(buffer).Should(Say("def"))
- })
- })
-
- Describe("creating a buffer that wraps a reader", func() {
- Context("for a well-behaved reader", func() {
- It("should buffer the contents of the reader", func() {
- reader := bytes.NewBuffer([]byte("abcdef"))
- buffer := BufferReader(reader)
- Eventually(buffer).Should(Say("abc"))
- Expect(buffer).ShouldNot(Say("abc"))
- Eventually(buffer).Should(Say("def"))
- Eventually(buffer.Closed).Should(BeTrue())
- })
- })
-
- Context("for a slow reader", func() {
- It("should allow Eventually to time out", func() {
- slowReader := SlowReader{
- R: bytes.NewBuffer([]byte("abcdef")),
- D: time.Second,
- }
- buffer := BufferReader(slowReader)
- failures := InterceptGomegaFailures(func() {
- Eventually(buffer, 100*time.Millisecond).Should(Say("abc"))
- })
- Expect(failures).ShouldNot(BeEmpty())
-
- fastReader := SlowReader{
- R: bytes.NewBuffer([]byte("abcdef")),
- D: time.Millisecond,
- }
- buffer = BufferReader(fastReader)
- Eventually(buffer, 100*time.Millisecond).Should(Say("abc"))
- Eventually(buffer.Closed).Should(BeTrue())
- })
- })
- })
-
- Describe("reading from a buffer", func() {
- It("should read the current contents of the buffer", func() {
- buffer := BufferWithBytes([]byte("abcde"))
-
- dest := make([]byte, 3)
- n, err := buffer.Read(dest)
- Expect(err).ShouldNot(HaveOccurred())
- Expect(n).Should(Equal(3))
- Expect(string(dest)).Should(Equal("abc"))
-
- dest = make([]byte, 3)
- n, err = buffer.Read(dest)
- Expect(err).ShouldNot(HaveOccurred())
- Expect(n).Should(Equal(2))
- Expect(string(dest[:n])).Should(Equal("de"))
-
- n, err = buffer.Read(dest)
- Expect(err).Should(Equal(io.EOF))
- Expect(n).Should(Equal(0))
- })
-
- Context("after the buffer has been closed", func() {
- It("returns an error", func() {
- buffer := BufferWithBytes([]byte("abcde"))
-
- buffer.Close()
-
- dest := make([]byte, 3)
- n, err := buffer.Read(dest)
- Expect(err).Should(HaveOccurred())
- Expect(n).Should(Equal(0))
- })
- })
- })
-
- Describe("detecting regular expressions", func() {
- It("should fire the appropriate channel when the passed in pattern matches, then close it", func(done Done) {
- go func() {
- time.Sleep(10 * time.Millisecond)
- buffer.Write([]byte("abcde"))
- }()
-
- A := buffer.Detect("%s", "a.c")
- B := buffer.Detect("def")
-
- var gotIt bool
- select {
- case gotIt = <-A:
- case <-B:
- Fail("should not have gotten here")
- }
-
- Expect(gotIt).Should(BeTrue())
- Eventually(A).Should(BeClosed())
-
- buffer.Write([]byte("f"))
- Eventually(B).Should(Receive())
- Eventually(B).Should(BeClosed())
-
- close(done)
- })
-
- It("should fast-forward the buffer upon detection", func(done Done) {
- buffer.Write([]byte("abcde"))
- <-buffer.Detect("abc")
- Expect(buffer).ShouldNot(Say("abc"))
- Expect(buffer).Should(Say("de"))
- close(done)
- })
-
- It("should only fast-forward the buffer when the channel is read, and only if doing so would not rewind it", func(done Done) {
- buffer.Write([]byte("abcde"))
- A := buffer.Detect("abc")
- time.Sleep(20 * time.Millisecond) //give the goroutine a chance to detect and write to the channel
- Expect(buffer).Should(Say("abcd"))
- <-A
- Expect(buffer).ShouldNot(Say("d"))
- Expect(buffer).Should(Say("e"))
- Eventually(A).Should(BeClosed())
- close(done)
- })
-
- It("should be possible to cancel a detection", func(done Done) {
- A := buffer.Detect("abc")
- B := buffer.Detect("def")
- buffer.CancelDetects()
- buffer.Write([]byte("abcdef"))
- Eventually(A).Should(BeClosed())
- Eventually(B).Should(BeClosed())
-
- Expect(buffer).Should(Say("bcde"))
- <-buffer.Detect("f")
- close(done)
- })
- })
-
- Describe("closing the buffer", func() {
- It("should error when further write attempts are made", func() {
- _, err := buffer.Write([]byte("abc"))
- Expect(err).ShouldNot(HaveOccurred())
-
- buffer.Close()
-
- _, err = buffer.Write([]byte("def"))
- Expect(err).Should(HaveOccurred())
-
- Expect(buffer.Contents()).Should(Equal([]byte("abc")))
- })
-
- It("should be closed", func() {
- Expect(buffer.Closed()).Should(BeFalse())
-
- buffer.Close()
-
- Expect(buffer.Closed()).Should(BeTrue())
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/gbytes/gbuffer_suite_test.go b/vendor/github.com/onsi/gomega/gbytes/gbuffer_suite_test.go
deleted file mode 100644
index 3a7dc0612..000000000
--- a/vendor/github.com/onsi/gomega/gbytes/gbuffer_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package gbytes_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestGbytes(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Gbytes Suite")
-}
diff --git a/vendor/github.com/onsi/gomega/gbytes/io_wrappers_test.go b/vendor/github.com/onsi/gomega/gbytes/io_wrappers_test.go
deleted file mode 100644
index 3da973498..000000000
--- a/vendor/github.com/onsi/gomega/gbytes/io_wrappers_test.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package gbytes_test
-
-import (
- "fmt"
- "io"
- "time"
-
- . "github.com/onsi/gomega/gbytes"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-type FakeCloser struct {
- err error
- duration time.Duration
-}
-
-func (f FakeCloser) Close() error {
- time.Sleep(f.duration)
- return f.err
-}
-
-type FakeReader struct {
- err error
- duration time.Duration
-}
-
-func (f FakeReader) Read(p []byte) (int, error) {
- time.Sleep(f.duration)
- if f.err != nil {
- return 0, f.err
- }
-
- for i := 0; i < len(p); i++ {
- p[i] = 'a'
- }
-
- return len(p), nil
-}
-
-type FakeWriter struct {
- err error
- duration time.Duration
-}
-
-func (f FakeWriter) Write(p []byte) (int, error) {
- time.Sleep(f.duration)
- if f.err != nil {
- return 0, f.err
- }
-
- return len(p), nil
-}
-
-var _ = Describe("Io Wrappers", func() {
- Describe("TimeoutCloser", func() {
- var innerCloser io.Closer
- var timeoutCloser io.Closer
-
- JustBeforeEach(func() {
- timeoutCloser = TimeoutCloser(innerCloser, 20*time.Millisecond)
- })
-
- Context("when the underlying Closer closes with no error", func() {
- BeforeEach(func() {
- innerCloser = FakeCloser{}
- })
-
- It("returns with no error", func() {
- Expect(timeoutCloser.Close()).Should(Succeed())
- })
- })
-
- Context("when the underlying Closer closes with an error", func() {
- BeforeEach(func() {
- innerCloser = FakeCloser{err: fmt.Errorf("boom")}
- })
-
- It("returns the error", func() {
- Expect(timeoutCloser.Close()).Should(MatchError("boom"))
- })
- })
-
- Context("when the underlying Closer hangs", func() {
- BeforeEach(func() {
- innerCloser = FakeCloser{
- err: fmt.Errorf("boom"),
- duration: time.Hour,
- }
- })
-
- It("returns ErrTimeout", func() {
- Expect(timeoutCloser.Close()).Should(MatchError(ErrTimeout))
- })
- })
- })
-
- Describe("TimeoutReader", func() {
- var innerReader io.Reader
- var timeoutReader io.Reader
-
- JustBeforeEach(func() {
- timeoutReader = TimeoutReader(innerReader, 20*time.Millisecond)
- })
-
- Context("when the underlying Reader returns no error", func() {
- BeforeEach(func() {
- innerReader = FakeReader{}
- })
-
- It("returns with no error", func() {
- p := make([]byte, 5)
- n, err := timeoutReader.Read(p)
- Expect(n).Should(Equal(5))
- Expect(err).ShouldNot(HaveOccurred())
- Expect(p).Should(Equal([]byte("aaaaa")))
- })
- })
-
- Context("when the underlying Reader returns an error", func() {
- BeforeEach(func() {
- innerReader = FakeReader{err: fmt.Errorf("boom")}
- })
-
- It("returns the error", func() {
- p := make([]byte, 5)
- _, err := timeoutReader.Read(p)
- Expect(err).Should(MatchError("boom"))
- })
- })
-
- Context("when the underlying Reader hangs", func() {
- BeforeEach(func() {
- innerReader = FakeReader{err: fmt.Errorf("boom"), duration: time.Hour}
- })
-
- It("returns ErrTimeout", func() {
- p := make([]byte, 5)
- _, err := timeoutReader.Read(p)
- Expect(err).Should(MatchError(ErrTimeout))
- })
- })
- })
-
- Describe("TimeoutWriter", func() {
- var innerWriter io.Writer
- var timeoutWriter io.Writer
-
- JustBeforeEach(func() {
- timeoutWriter = TimeoutWriter(innerWriter, 20*time.Millisecond)
- })
-
- Context("when the underlying Writer returns no error", func() {
- BeforeEach(func() {
- innerWriter = FakeWriter{}
- })
-
- It("returns with no error", func() {
- n, err := timeoutWriter.Write([]byte("aaaaa"))
- Expect(n).Should(Equal(5))
- Expect(err).ShouldNot(HaveOccurred())
- })
- })
-
- Context("when the underlying Writer returns an error", func() {
- BeforeEach(func() {
- innerWriter = FakeWriter{err: fmt.Errorf("boom")}
- })
-
- It("returns the error", func() {
- _, err := timeoutWriter.Write([]byte("aaaaa"))
- Expect(err).Should(MatchError("boom"))
- })
- })
-
- Context("when the underlying Writer hangs", func() {
- BeforeEach(func() {
- innerWriter = FakeWriter{err: fmt.Errorf("boom"), duration: time.Hour}
- })
-
- It("returns ErrTimeout", func() {
- _, err := timeoutWriter.Write([]byte("aaaaa"))
- Expect(err).Should(MatchError(ErrTimeout))
- })
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/gbytes/say_matcher_test.go b/vendor/github.com/onsi/gomega/gbytes/say_matcher_test.go
deleted file mode 100644
index 0055d4a1b..000000000
--- a/vendor/github.com/onsi/gomega/gbytes/say_matcher_test.go
+++ /dev/null
@@ -1,169 +0,0 @@
-package gbytes_test
-
-import (
- "time"
-
- . "github.com/onsi/gomega/gbytes"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-type speaker struct {
- buffer *Buffer
-}
-
-func (s *speaker) Buffer() *Buffer {
- return s.buffer
-}
-
-var _ = Describe("SayMatcher", func() {
- var buffer *Buffer
-
- BeforeEach(func() {
- buffer = NewBuffer()
- buffer.Write([]byte("abc"))
- })
-
- Context("when actual is not a gexec Buffer, or a BufferProvider", func() {
- It("should error", func() {
- failures := InterceptGomegaFailures(func() {
- Expect("foo").Should(Say("foo"))
- })
- Expect(failures[0]).Should(ContainSubstring("*gbytes.Buffer"))
- })
- })
-
- Context("when a match is found", func() {
- It("should succeed", func() {
- Expect(buffer).Should(Say("abc"))
- })
-
- It("should support printf-like formatting", func() {
- Expect(buffer).Should(Say("a%sc", "b"))
- })
-
- It("should match literal %", func() {
- buffer.Write([]byte("%"))
- Expect(buffer).Should(Say("abc%"))
- })
-
- It("should use a regular expression", func() {
- Expect(buffer).Should(Say("a.c"))
- })
-
- It("should fastforward the buffer", func() {
- buffer.Write([]byte("def"))
- Expect(buffer).Should(Say("abcd"))
- Expect(buffer).Should(Say("ef"))
- Expect(buffer).ShouldNot(Say("[a-z]"))
- })
- })
-
- Context("when no match is found", func() {
- It("should not error", func() {
- Expect(buffer).ShouldNot(Say("def"))
- })
-
- Context("when the buffer is closed", func() {
- BeforeEach(func() {
- buffer.Close()
- })
-
- It("should abort an eventually", func() {
- t := time.Now()
- failures := InterceptGomegaFailures(func() {
- Eventually(buffer).Should(Say("def"))
- })
- Eventually(buffer).ShouldNot(Say("def"))
- Expect(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond))
- Expect(failures).Should(HaveLen(1))
-
- t = time.Now()
- Eventually(buffer).Should(Say("abc"))
- Expect(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond))
- })
-
- It("should abort a consistently", func() {
- t := time.Now()
- Consistently(buffer, 2.0).ShouldNot(Say("def"))
- Expect(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond))
- })
-
- It("should not error with a synchronous matcher", func() {
- Expect(buffer).ShouldNot(Say("def"))
- Expect(buffer).Should(Say("abc"))
- })
- })
- })
-
- Context("when a positive match fails", func() {
- It("should report where it got stuck", func() {
- Expect(buffer).Should(Say("abc"))
- buffer.Write([]byte("def"))
- failures := InterceptGomegaFailures(func() {
- Expect(buffer).Should(Say("abc"))
- })
- Expect(failures[0]).Should(ContainSubstring("Got stuck at:"))
- Expect(failures[0]).Should(ContainSubstring("def"))
- })
- })
-
- Context("when a negative match fails", func() {
- It("should report where it got stuck", func() {
- failures := InterceptGomegaFailures(func() {
- Expect(buffer).ShouldNot(Say("abc"))
- })
- Expect(failures[0]).Should(ContainSubstring("Saw:"))
- Expect(failures[0]).Should(ContainSubstring("Which matches the unexpected:"))
- Expect(failures[0]).Should(ContainSubstring("abc"))
- })
- })
-
- Context("when a match is not found", func() {
- It("should not fastforward the buffer", func() {
- Expect(buffer).ShouldNot(Say("def"))
- Expect(buffer).Should(Say("abc"))
- })
- })
-
- Context("a nice real-life example", func() {
- It("should behave well", func() {
- Expect(buffer).Should(Say("abc"))
- go func() {
- time.Sleep(10 * time.Millisecond)
- buffer.Write([]byte("def"))
- }()
- Expect(buffer).ShouldNot(Say("def"))
- Eventually(buffer).Should(Say("def"))
- })
- })
-
- Context("when actual is a BufferProvider", func() {
- It("should use actual's buffer", func() {
- s := &speaker{
- buffer: NewBuffer(),
- }
-
- Expect(s).ShouldNot(Say("abc"))
-
- s.Buffer().Write([]byte("abc"))
- Expect(s).Should(Say("abc"))
- })
-
- It("should abort an eventually", func() {
- s := &speaker{
- buffer: NewBuffer(),
- }
-
- s.buffer.Close()
-
- t := time.Now()
- failures := InterceptGomegaFailures(func() {
- Eventually(s).Should(Say("def"))
- })
- Expect(failures).Should(HaveLen(1))
- Expect(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond))
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/gexec/_fixture/firefly/main.go b/vendor/github.com/onsi/gomega/gexec/_fixture/firefly/main.go
deleted file mode 100644
index 16091c22b..000000000
--- a/vendor/github.com/onsi/gomega/gexec/_fixture/firefly/main.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package main
-
-import (
- "fmt"
- "math/rand"
- "os"
- "strconv"
- "time"
-)
-
-var outQuote = "We've done the impossible, and that makes us mighty."
-var errQuote = "Ah, curse your sudden but inevitable betrayal!"
-
-var randomQuotes = []string{
- "Can we maybe vote on the whole murdering people issue?",
- "I swear by my pretty floral bonnet, I will end you.",
- "My work's illegal, but at least it's honest.",
-}
-
-func main() {
- fmt.Fprintln(os.Stdout, outQuote)
- fmt.Fprintln(os.Stderr, errQuote)
-
- randomIndex := rand.New(rand.NewSource(time.Now().UnixNano())).Intn(len(randomQuotes))
-
- time.Sleep(100 * time.Millisecond)
-
- fmt.Fprintln(os.Stdout, randomQuotes[randomIndex])
-
- if len(os.Args) == 2 {
- exitCode, _ := strconv.Atoi(os.Args[1])
- os.Exit(exitCode)
- } else {
- os.Exit(randomIndex)
- }
-}
diff --git a/vendor/github.com/onsi/gomega/gexec/build_test.go b/vendor/github.com/onsi/gomega/gexec/build_test.go
deleted file mode 100644
index 295dac8bc..000000000
--- a/vendor/github.com/onsi/gomega/gexec/build_test.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package gexec_test
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- "github.com/onsi/gomega/gexec"
-)
-
-var packagePath = "./_fixture/firefly"
-
-var _ = Describe(".Build", func() {
- Context("when there have been previous calls to Build", func() {
- BeforeEach(func() {
- _, err := gexec.Build(packagePath)
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- It("compiles the specified package", func() {
- compiledPath, err := gexec.Build(packagePath)
- Expect(err).ShouldNot(HaveOccurred())
- Expect(compiledPath).Should(BeAnExistingFile())
- })
-
- Context("and CleanupBuildArtifacts has been called", func() {
- BeforeEach(func() {
- gexec.CleanupBuildArtifacts()
- })
-
- It("compiles the specified package", func() {
- var err error
- fireflyPath, err = gexec.Build(packagePath)
- Expect(err).ShouldNot(HaveOccurred())
- Expect(fireflyPath).Should(BeAnExistingFile())
- })
- })
- })
-})
-
-var _ = Describe(".BuildWithEnvironment", func() {
- var err error
- env := []string{
- "GOOS=linux",
- "GOARCH=amd64",
- }
-
- It("compiles the specified package with the specified env vars", func() {
- compiledPath, err := gexec.BuildWithEnvironment(packagePath, env)
- Expect(err).ShouldNot(HaveOccurred())
- Expect(compiledPath).Should(BeAnExistingFile())
- })
-
- It("returns the environment to a good state", func() {
- _, err = gexec.BuildWithEnvironment(packagePath, env)
- Expect(err).ShouldNot(HaveOccurred())
- Expect(os.Environ()).ShouldNot(ContainElement("GOOS=linux"))
- })
-})
-
-var _ = Describe(".BuildIn", func() {
- const (
- target = "github.com/onsi/gomega/gexec/_fixture/firefly/"
- )
-
- var (
- original string
- gopath string
- )
-
- BeforeEach(func() {
- var err error
- original = os.Getenv("GOPATH")
- gopath, err = ioutil.TempDir("", "")
- Expect(err).NotTo(HaveOccurred())
- copyFile(filepath.Join("_fixture", "firefly", "main.go"), filepath.Join(gopath, "src", target), "main.go")
- Expect(os.Setenv("GOPATH", filepath.Join(os.TempDir(), "emptyFakeGopath"))).To(Succeed())
- Expect(os.Environ()).To(ContainElement(fmt.Sprintf("GOPATH=%s", filepath.Join(os.TempDir(), "emptyFakeGopath"))))
- })
-
- AfterEach(func() {
- if original == "" {
- Expect(os.Unsetenv("GOPATH")).To(Succeed())
- } else {
- Expect(os.Setenv("GOPATH", original)).To(Succeed())
- }
- if gopath != "" {
- os.RemoveAll(gopath)
- }
- })
-
- It("appends the gopath env var", func() {
- _, err := gexec.BuildIn(gopath, target)
- Expect(err).NotTo(HaveOccurred())
- })
-
- It("resets GOPATH to its original value", func() {
- _, err := gexec.BuildIn(gopath, target)
- Expect(err).NotTo(HaveOccurred())
- Expect(os.Getenv("GOPATH")).To(Equal(filepath.Join(os.TempDir(), "emptyFakeGopath")))
- })
-})
-
-func copyFile(source, directory, basename string) {
- Expect(os.MkdirAll(directory, 0755)).To(Succeed())
- content, err := ioutil.ReadFile(source)
- Expect(err).NotTo(HaveOccurred())
- Expect(ioutil.WriteFile(filepath.Join(directory, basename), content, 0644)).To(Succeed())
-}
diff --git a/vendor/github.com/onsi/gomega/gexec/exit_matcher_test.go b/vendor/github.com/onsi/gomega/gexec/exit_matcher_test.go
deleted file mode 100644
index 9abc3226f..000000000
--- a/vendor/github.com/onsi/gomega/gexec/exit_matcher_test.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package gexec_test
-
-import (
- "os/exec"
- "time"
-
- . "github.com/onsi/gomega/gexec"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-type NeverExits struct{}
-
-func (e NeverExits) ExitCode() int {
- return -1
-}
-
-var _ = Describe("ExitMatcher", func() {
- var command *exec.Cmd
- var session *Session
-
- BeforeEach(func() {
- var err error
- command = exec.Command(fireflyPath, "0")
- session, err = Start(command, nil, nil)
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- Describe("when passed something that is an Exiter", func() {
- It("should act normally", func() {
- failures := InterceptGomegaFailures(func() {
- Expect(NeverExits{}).Should(Exit())
- })
-
- Expect(failures[0]).Should(ContainSubstring("Expected process to exit. It did not."))
- })
- })
-
- Describe("when passed something that is not an Exiter", func() {
- It("should error", func() {
- failures := InterceptGomegaFailures(func() {
- Expect("aardvark").Should(Exit())
- })
-
- Expect(failures[0]).Should(ContainSubstring("Exit must be passed a gexec.Exiter"))
- })
- })
-
- Context("with no exit code", func() {
- It("should say the right things when it fails", func() {
- Expect(session).ShouldNot(Exit())
-
- failures := InterceptGomegaFailures(func() {
- Expect(session).Should(Exit())
- })
-
- Expect(failures[0]).Should(ContainSubstring("Expected process to exit. It did not."))
-
- Eventually(session).Should(Exit())
-
- Expect(session).Should(Exit())
-
- failures = InterceptGomegaFailures(func() {
- Expect(session).ShouldNot(Exit())
- })
-
- Expect(failures[0]).Should(ContainSubstring("Expected process not to exit. It did."))
- })
- })
-
- Context("with an exit code", func() {
- It("should say the right things when it fails", func() {
- Expect(session).ShouldNot(Exit(0))
- Expect(session).ShouldNot(Exit(1))
-
- failures := InterceptGomegaFailures(func() {
- Expect(session).Should(Exit(0))
- })
-
- Expect(failures[0]).Should(ContainSubstring("Expected process to exit. It did not."))
-
- Eventually(session).Should(Exit(0))
-
- Expect(session).Should(Exit(0))
-
- failures = InterceptGomegaFailures(func() {
- Expect(session).Should(Exit(1))
- })
-
- Expect(failures[0]).Should(ContainSubstring("to match exit code:"))
-
- Expect(session).ShouldNot(Exit(1))
-
- failures = InterceptGomegaFailures(func() {
- Expect(session).ShouldNot(Exit(0))
- })
-
- Expect(failures[0]).Should(ContainSubstring("not to match exit code:"))
- })
- })
-
- Describe("bailing out early", func() {
- It("should bail out early once the process exits", func() {
- t := time.Now()
-
- failures := InterceptGomegaFailures(func() {
- Eventually(session).Should(Exit(1))
- })
- Expect(time.Since(t)).Should(BeNumerically("<=", 500*time.Millisecond))
- Expect(failures).Should(HaveLen(1))
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/gexec/gexec_suite_test.go b/vendor/github.com/onsi/gomega/gexec/gexec_suite_test.go
deleted file mode 100644
index dc8e1f40c..000000000
--- a/vendor/github.com/onsi/gomega/gexec/gexec_suite_test.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package gexec_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- "github.com/onsi/gomega/gexec"
-
- "testing"
-)
-
-var fireflyPath string
-
-func TestGexec(t *testing.T) {
- BeforeSuite(func() {
- var err error
- fireflyPath, err = gexec.Build("./_fixture/firefly")
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- AfterSuite(func() {
- gexec.CleanupBuildArtifacts()
- })
-
- RegisterFailHandler(Fail)
- RunSpecs(t, "Gexec Suite")
-}
diff --git a/vendor/github.com/onsi/gomega/gexec/prefixed_writer_test.go b/vendor/github.com/onsi/gomega/gexec/prefixed_writer_test.go
deleted file mode 100644
index e847b1501..000000000
--- a/vendor/github.com/onsi/gomega/gexec/prefixed_writer_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package gexec_test
-
-import (
- "bytes"
-
- . "github.com/onsi/gomega/gexec"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("PrefixedWriter", func() {
- var buffer *bytes.Buffer
- var writer *PrefixedWriter
- BeforeEach(func() {
- buffer = &bytes.Buffer{}
- writer = NewPrefixedWriter("[p]", buffer)
- })
-
- It("should emit the prefix on newlines", func() {
- writer.Write([]byte("abc"))
- writer.Write([]byte("def\n"))
- writer.Write([]byte("hij\n"))
- writer.Write([]byte("\n\n"))
- writer.Write([]byte("klm\n\nnop"))
- writer.Write([]byte(""))
- writer.Write([]byte("qrs"))
- writer.Write([]byte("\ntuv\nwx"))
- writer.Write([]byte("yz\n\n"))
-
- Expect(buffer.String()).Should(Equal(`[p]abcdef
-[p]hij
-[p]
-[p]
-[p]klm
-[p]
-[p]nopqrs
-[p]tuv
-[p]wxyz
-[p]
-`))
- })
-})
diff --git a/vendor/github.com/onsi/gomega/gexec/session_test.go b/vendor/github.com/onsi/gomega/gexec/session_test.go
deleted file mode 100644
index 6fdf22d21..000000000
--- a/vendor/github.com/onsi/gomega/gexec/session_test.go
+++ /dev/null
@@ -1,336 +0,0 @@
-package gexec_test
-
-import (
- "io"
- "io/ioutil"
- "os/exec"
- "syscall"
- "time"
-
- . "github.com/onsi/gomega/gbytes"
- . "github.com/onsi/gomega/gexec"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("Session", func() {
- var command *exec.Cmd
- var session *Session
-
- var outWriter, errWriter io.Writer
-
- BeforeEach(func() {
- outWriter = nil
- errWriter = nil
- })
-
- JustBeforeEach(func() {
- command = exec.Command(fireflyPath)
- var err error
- session, err = Start(command, outWriter, errWriter)
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- Context("running a command", func() {
- It("should start the process", func() {
- Expect(command.Process).ShouldNot(BeNil())
- })
-
- It("should wrap the process's stdout and stderr with gbytes buffers", func(done Done) {
- Eventually(session.Out).Should(Say("We've done the impossible, and that makes us mighty"))
- Eventually(session.Err).Should(Say("Ah, curse your sudden but inevitable betrayal!"))
- defer session.Out.CancelDetects()
-
- select {
- case <-session.Out.Detect("Can we maybe vote on the whole murdering people issue"):
- Eventually(session).Should(Exit(0))
- case <-session.Out.Detect("I swear by my pretty floral bonnet, I will end you."):
- Eventually(session).Should(Exit(1))
- case <-session.Out.Detect("My work's illegal, but at least it's honest."):
- Eventually(session).Should(Exit(2))
- }
-
- close(done)
- })
-
- It("should satisfy the gbytes.BufferProvider interface, passing Stdout", func() {
- Eventually(session).Should(Say("We've done the impossible, and that makes us mighty"))
- Eventually(session).Should(Exit())
- })
- })
-
- Describe("providing the exit code", func() {
- It("should provide the app's exit code", func() {
- Expect(session.ExitCode()).Should(Equal(-1))
-
- Eventually(session).Should(Exit())
- Expect(session.ExitCode()).Should(BeNumerically(">=", 0))
- Expect(session.ExitCode()).Should(BeNumerically("<", 3))
- })
- })
-
- Describe("wait", func() {
- It("should wait till the command exits", func() {
- Expect(session.ExitCode()).Should(Equal(-1))
- Expect(session.Wait().ExitCode()).Should(BeNumerically(">=", 0))
- Expect(session.Wait().ExitCode()).Should(BeNumerically("<", 3))
- })
- })
-
- Describe("exited", func() {
- It("should close when the command exits", func() {
- Eventually(session.Exited).Should(BeClosed())
- Expect(session.ExitCode()).ShouldNot(Equal(-1))
- })
- })
-
- Describe("kill", func() {
- It("should kill the command", func() {
- session, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- session.Kill()
- Eventually(session).Should(Exit(128 + 9))
- })
- })
-
- Describe("interrupt", func() {
- It("should interrupt the command", func() {
- session, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- session.Interrupt()
- Eventually(session).Should(Exit(128 + 2))
- })
- })
-
- Describe("terminate", func() {
- It("should terminate the command", func() {
- session, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- session.Terminate()
- Eventually(session).Should(Exit(128 + 15))
- })
- })
-
- Describe("signal", func() {
- It("should send the signal to the command", func() {
- session, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- session.Signal(syscall.SIGABRT)
- Eventually(session).Should(Exit(128 + 6))
- })
-
- It("should ignore sending a signal if the command did not start", func() {
- session, err := Start(exec.Command("notexisting"), GinkgoWriter, GinkgoWriter)
- Expect(err).To(HaveOccurred())
-
- Expect(func() { session.Signal(syscall.SIGUSR1) }).NotTo(Panic())
- })
- })
-
- Context("tracking sessions", func() {
- BeforeEach(func() {
- KillAndWait()
- })
-
- Describe("kill", func() {
- It("should kill all the started sessions", func() {
- session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- Kill()
-
- Eventually(session1).Should(Exit(128 + 9))
- Eventually(session2).Should(Exit(128 + 9))
- Eventually(session3).Should(Exit(128 + 9))
- })
-
- It("should not track unstarted sessions", func() {
- _, err := Start(exec.Command("does not exist", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).Should(HaveOccurred())
-
- session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- Kill()
-
- Eventually(session2).Should(Exit(128 + 9))
- Eventually(session3).Should(Exit(128 + 9))
- })
-
- })
-
- Describe("killAndWait", func() {
- It("should kill all the started sessions and wait for them to finish", func() {
- session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- KillAndWait()
- Expect(session1).Should(Exit(128+9), "Should have exited")
- Expect(session2).Should(Exit(128+9), "Should have exited")
- Expect(session3).Should(Exit(128+9), "Should have exited")
- })
- })
-
- Describe("terminate", func() {
- It("should terminate all the started sessions", func() {
- session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- Terminate()
-
- Eventually(session1).Should(Exit(128 + 15))
- Eventually(session2).Should(Exit(128 + 15))
- Eventually(session3).Should(Exit(128 + 15))
- })
- })
-
- Describe("terminateAndWait", func() {
- It("should terminate all the started sessions, and wait for them to exit", func() {
- session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- TerminateAndWait()
-
- Expect(session1).Should(Exit(128+15), "Should have exited")
- Expect(session2).Should(Exit(128+15), "Should have exited")
- Expect(session3).Should(Exit(128+15), "Should have exited")
- })
- })
-
- Describe("signal", func() {
- It("should signal all the started sessions", func() {
- session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- Signal(syscall.SIGABRT)
-
- Eventually(session1).Should(Exit(128 + 6))
- Eventually(session2).Should(Exit(128 + 6))
- Eventually(session3).Should(Exit(128 + 6))
- })
- })
-
- Describe("interrupt", func() {
- It("should interrupt all the started sessions, and not wait", func() {
- session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter)
- Expect(err).ShouldNot(HaveOccurred())
-
- Interrupt()
-
- Eventually(session1).Should(Exit(128 + 2))
- Eventually(session2).Should(Exit(128 + 2))
- Eventually(session3).Should(Exit(128 + 2))
- })
- })
- })
-
- Context("when the command exits", func() {
- It("should close the buffers", func() {
- Eventually(session).Should(Exit())
-
- Expect(session.Out.Closed()).Should(BeTrue())
- Expect(session.Err.Closed()).Should(BeTrue())
-
- Expect(session.Out).Should(Say("We've done the impossible, and that makes us mighty"))
- })
-
- var So = It
-
- So("this means that eventually should short circuit", func() {
- t := time.Now()
- failures := InterceptGomegaFailures(func() {
- Eventually(session).Should(Say("blah blah blah blah blah"))
- })
- Expect(time.Since(t)).Should(BeNumerically("<=", 500*time.Millisecond))
- Expect(failures).Should(HaveLen(1))
- })
- })
-
- Context("when wrapping out and err", func() {
- var (
- outWriterBuffer, errWriterBuffer *Buffer
- )
-
- BeforeEach(func() {
- outWriterBuffer = NewBuffer()
- outWriter = outWriterBuffer
- errWriterBuffer = NewBuffer()
- errWriter = errWriterBuffer
- })
-
- It("should route to both the provided writers and the gbytes buffers", func() {
- Eventually(session.Out).Should(Say("We've done the impossible, and that makes us mighty"))
- Eventually(session.Err).Should(Say("Ah, curse your sudden but inevitable betrayal!"))
-
- Expect(outWriterBuffer.Contents()).Should(ContainSubstring("We've done the impossible, and that makes us mighty"))
- Expect(errWriterBuffer.Contents()).Should(ContainSubstring("Ah, curse your sudden but inevitable betrayal!"))
-
- Eventually(session).Should(Exit())
-
- Expect(outWriterBuffer.Contents()).Should(Equal(session.Out.Contents()))
- Expect(errWriterBuffer.Contents()).Should(Equal(session.Err.Contents()))
- })
-
- Context("when discarding the output of the command", func() {
- BeforeEach(func() {
- outWriter = ioutil.Discard
- errWriter = ioutil.Discard
- })
-
- It("executes succesfuly", func() {
- Eventually(session).Should(Exit())
- })
- })
- })
-
- Describe("when the command fails to start", func() {
- It("should return an error", func() {
- _, err := Start(exec.Command("agklsjdfas"), nil, nil)
- Expect(err).Should(HaveOccurred())
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/ghttp/handlers.go b/vendor/github.com/onsi/gomega/ghttp/handlers.go
deleted file mode 100644
index 894eae6d4..000000000
--- a/vendor/github.com/onsi/gomega/ghttp/handlers.go
+++ /dev/null
@@ -1,322 +0,0 @@
-package ghttp
-
-import (
- "encoding/base64"
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
- "reflect"
- "strings"
-
- "github.com/golang/protobuf/proto"
- . "github.com/onsi/gomega"
- "github.com/onsi/gomega/types"
-)
-
-//CombineHandler takes variadic list of handlers and produces one handler
-//that calls each handler in order.
-func CombineHandlers(handlers ...http.HandlerFunc) http.HandlerFunc {
- return func(w http.ResponseWriter, req *http.Request) {
- for _, handler := range handlers {
- handler(w, req)
- }
- }
-}
-
-//VerifyRequest returns a handler that verifies that a request uses the specified method to connect to the specified path
-//You may also pass in an optional rawQuery string which is tested against the request's `req.URL.RawQuery`
-//
-//For path, you may pass in a string, in which case strict equality will be applied
-//Alternatively you can pass in a matcher (ContainSubstring("/foo") and MatchRegexp("/foo/[a-f0-9]+") for example)
-func VerifyRequest(method string, path interface{}, rawQuery ...string) http.HandlerFunc {
- return func(w http.ResponseWriter, req *http.Request) {
- Expect(req.Method).Should(Equal(method), "Method mismatch")
- switch p := path.(type) {
- case types.GomegaMatcher:
- Expect(req.URL.Path).Should(p, "Path mismatch")
- default:
- Expect(req.URL.Path).Should(Equal(path), "Path mismatch")
- }
- if len(rawQuery) > 0 {
- values, err := url.ParseQuery(rawQuery[0])
- Expect(err).ShouldNot(HaveOccurred(), "Expected RawQuery is malformed")
-
- Expect(req.URL.Query()).Should(Equal(values), "RawQuery mismatch")
- }
- }
-}
-
-//VerifyContentType returns a handler that verifies that a request has a Content-Type header set to the
-//specified value
-func VerifyContentType(contentType string) http.HandlerFunc {
- return func(w http.ResponseWriter, req *http.Request) {
- Expect(req.Header.Get("Content-Type")).Should(Equal(contentType))
- }
-}
-
-//VerifyMimeType returns a handler that verifies that a request has a specified mime type set
-//in Content-Type header
-func VerifyMimeType(mimeType string) http.HandlerFunc {
- return func(w http.ResponseWriter, req *http.Request) {
- Expect(strings.Split(req.Header.Get("Content-Type"), ";")[0]).Should(Equal(mimeType))
- }
-}
-
-//VerifyBasicAuth returns a handler that verifies the request contains a BasicAuth Authorization header
-//matching the passed in username and password
-func VerifyBasicAuth(username string, password string) http.HandlerFunc {
- return func(w http.ResponseWriter, req *http.Request) {
- auth := req.Header.Get("Authorization")
- Expect(auth).ShouldNot(Equal(""), "Authorization header must be specified")
-
- decoded, err := base64.StdEncoding.DecodeString(auth[6:])
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(string(decoded)).Should(Equal(fmt.Sprintf("%s:%s", username, password)), "Authorization mismatch")
- }
-}
-
-//VerifyHeader returns a handler that verifies the request contains the passed in headers.
-//The passed in header keys are first canonicalized via http.CanonicalHeaderKey.
-//
-//The request must contain *all* the passed in headers, but it is allowed to have additional headers
-//beyond the passed in set.
-func VerifyHeader(header http.Header) http.HandlerFunc {
- return func(w http.ResponseWriter, req *http.Request) {
- for key, values := range header {
- key = http.CanonicalHeaderKey(key)
- Expect(req.Header[key]).Should(Equal(values), "Header mismatch for key: %s", key)
- }
- }
-}
-
-//VerifyHeaderKV returns a handler that verifies the request contains a header matching the passed in key and values
-//(recall that a `http.Header` is a mapping from string (key) to []string (values))
-//It is a convenience wrapper around `VerifyHeader` that allows you to avoid having to create an `http.Header` object.
-func VerifyHeaderKV(key string, values ...string) http.HandlerFunc {
- return VerifyHeader(http.Header{key: values})
-}
-
-//VerifyBody returns a handler that verifies that the body of the request matches the passed in byte array.
-//It does this using Equal().
-func VerifyBody(expectedBody []byte) http.HandlerFunc {
- return CombineHandlers(
- func(w http.ResponseWriter, req *http.Request) {
- body, err := ioutil.ReadAll(req.Body)
- req.Body.Close()
- Expect(err).ShouldNot(HaveOccurred())
- Expect(body).Should(Equal(expectedBody), "Body Mismatch")
- },
- )
-}
-
-//VerifyJSON returns a handler that verifies that the body of the request is a valid JSON representation
-//matching the passed in JSON string. It does this using Gomega's MatchJSON method
-//
-//VerifyJSON also verifies that the request's content type is application/json
-func VerifyJSON(expectedJSON string) http.HandlerFunc {
- return CombineHandlers(
- VerifyMimeType("application/json"),
- func(w http.ResponseWriter, req *http.Request) {
- body, err := ioutil.ReadAll(req.Body)
- req.Body.Close()
- Expect(err).ShouldNot(HaveOccurred())
- Expect(body).Should(MatchJSON(expectedJSON), "JSON Mismatch")
- },
- )
-}
-
-//VerifyJSONRepresenting is similar to VerifyJSON. Instead of taking a JSON string, however, it
-//takes an arbitrary JSON-encodable object and verifies that the requests's body is a JSON representation
-//that matches the object
-func VerifyJSONRepresenting(object interface{}) http.HandlerFunc {
- data, err := json.Marshal(object)
- Expect(err).ShouldNot(HaveOccurred())
- return CombineHandlers(
- VerifyContentType("application/json"),
- VerifyJSON(string(data)),
- )
-}
-
-//VerifyForm returns a handler that verifies a request contains the specified form values.
-//
-//The request must contain *all* of the specified values, but it is allowed to have additional
-//form values beyond the passed in set.
-func VerifyForm(values url.Values) http.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) {
- err := r.ParseForm()
- Expect(err).ShouldNot(HaveOccurred())
- for key, vals := range values {
- Expect(r.Form[key]).Should(Equal(vals), "Form mismatch for key: %s", key)
- }
- }
-}
-
-//VerifyFormKV returns a handler that verifies a request contains a form key with the specified values.
-//
-//It is a convenience wrapper around `VerifyForm` that lets you avoid having to create a `url.Values` object.
-func VerifyFormKV(key string, values ...string) http.HandlerFunc {
- return VerifyForm(url.Values{key: values})
-}
-
-//VerifyProtoRepresenting returns a handler that verifies that the body of the request is a valid protobuf
-//representation of the passed message.
-//
-//VerifyProtoRepresenting also verifies that the request's content type is application/x-protobuf
-func VerifyProtoRepresenting(expected proto.Message) http.HandlerFunc {
- return CombineHandlers(
- VerifyContentType("application/x-protobuf"),
- func(w http.ResponseWriter, req *http.Request) {
- body, err := ioutil.ReadAll(req.Body)
- Expect(err).ShouldNot(HaveOccurred())
- req.Body.Close()
-
- expectedType := reflect.TypeOf(expected)
- actualValuePtr := reflect.New(expectedType.Elem())
-
- actual, ok := actualValuePtr.Interface().(proto.Message)
- Expect(ok).Should(BeTrue(), "Message value is not a proto.Message")
-
- err = proto.Unmarshal(body, actual)
- Expect(err).ShouldNot(HaveOccurred(), "Failed to unmarshal protobuf")
-
- Expect(actual).Should(Equal(expected), "ProtoBuf Mismatch")
- },
- )
-}
-
-func copyHeader(src http.Header, dst http.Header) {
- for key, value := range src {
- dst[key] = value
- }
-}
-
-/*
-RespondWith returns a handler that responds to a request with the specified status code and body
-
-Body may be a string or []byte
-
-Also, RespondWith can be given an optional http.Header. The headers defined therein will be added to the response headers.
-*/
-func RespondWith(statusCode int, body interface{}, optionalHeader ...http.Header) http.HandlerFunc {
- return func(w http.ResponseWriter, req *http.Request) {
- if len(optionalHeader) == 1 {
- copyHeader(optionalHeader[0], w.Header())
- }
- w.WriteHeader(statusCode)
- switch x := body.(type) {
- case string:
- w.Write([]byte(x))
- case []byte:
- w.Write(x)
- default:
- Expect(body).Should(BeNil(), "Invalid type for body. Should be string or []byte.")
- }
- }
-}
-
-/*
-RespondWithPtr returns a handler that responds to a request with the specified status code and body
-
-Unlike RespondWith, you pass RepondWithPtr a pointer to the status code and body allowing different tests
-to share the same setup but specify different status codes and bodies.
-
-Also, RespondWithPtr can be given an optional http.Header. The headers defined therein will be added to the response headers.
-Since the http.Header can be mutated after the fact you don't need to pass in a pointer.
-*/
-func RespondWithPtr(statusCode *int, body interface{}, optionalHeader ...http.Header) http.HandlerFunc {
- return func(w http.ResponseWriter, req *http.Request) {
- if len(optionalHeader) == 1 {
- copyHeader(optionalHeader[0], w.Header())
- }
- w.WriteHeader(*statusCode)
- if body != nil {
- switch x := (body).(type) {
- case *string:
- w.Write([]byte(*x))
- case *[]byte:
- w.Write(*x)
- default:
- Expect(body).Should(BeNil(), "Invalid type for body. Should be string or []byte.")
- }
- }
- }
-}
-
-/*
-RespondWithJSONEncoded returns a handler that responds to a request with the specified status code and a body
-containing the JSON-encoding of the passed in object
-
-Also, RespondWithJSONEncoded can be given an optional http.Header. The headers defined therein will be added to the response headers.
-*/
-func RespondWithJSONEncoded(statusCode int, object interface{}, optionalHeader ...http.Header) http.HandlerFunc {
- data, err := json.Marshal(object)
- Expect(err).ShouldNot(HaveOccurred())
-
- var headers http.Header
- if len(optionalHeader) == 1 {
- headers = optionalHeader[0]
- } else {
- headers = make(http.Header)
- }
- if _, found := headers["Content-Type"]; !found {
- headers["Content-Type"] = []string{"application/json"}
- }
- return RespondWith(statusCode, string(data), headers)
-}
-
-/*
-RespondWithJSONEncodedPtr behaves like RespondWithJSONEncoded but takes a pointer
-to a status code and object.
-
-This allows different tests to share the same setup but specify different status codes and JSON-encoded
-objects.
-
-Also, RespondWithJSONEncodedPtr can be given an optional http.Header. The headers defined therein will be added to the response headers.
-Since the http.Header can be mutated after the fact you don't need to pass in a pointer.
-*/
-func RespondWithJSONEncodedPtr(statusCode *int, object interface{}, optionalHeader ...http.Header) http.HandlerFunc {
- return func(w http.ResponseWriter, req *http.Request) {
- data, err := json.Marshal(object)
- Expect(err).ShouldNot(HaveOccurred())
- var headers http.Header
- if len(optionalHeader) == 1 {
- headers = optionalHeader[0]
- } else {
- headers = make(http.Header)
- }
- if _, found := headers["Content-Type"]; !found {
- headers["Content-Type"] = []string{"application/json"}
- }
- copyHeader(headers, w.Header())
- w.WriteHeader(*statusCode)
- w.Write(data)
- }
-}
-
-//RespondWithProto returns a handler that responds to a request with the specified status code and a body
-//containing the protobuf serialization of the provided message.
-//
-//Also, RespondWithProto can be given an optional http.Header. The headers defined therein will be added to the response headers.
-func RespondWithProto(statusCode int, message proto.Message, optionalHeader ...http.Header) http.HandlerFunc {
- return func(w http.ResponseWriter, req *http.Request) {
- data, err := proto.Marshal(message)
- Expect(err).ShouldNot(HaveOccurred())
-
- var headers http.Header
- if len(optionalHeader) == 1 {
- headers = optionalHeader[0]
- } else {
- headers = make(http.Header)
- }
- if _, found := headers["Content-Type"]; !found {
- headers["Content-Type"] = []string{"application/x-protobuf"}
- }
- copyHeader(headers, w.Header())
-
- w.WriteHeader(statusCode)
- w.Write(data)
- }
-}
diff --git a/vendor/github.com/onsi/gomega/ghttp/protobuf/protobuf.go b/vendor/github.com/onsi/gomega/ghttp/protobuf/protobuf.go
deleted file mode 100644
index b2972bc9f..000000000
--- a/vendor/github.com/onsi/gomega/ghttp/protobuf/protobuf.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package protobuf
-
-//go:generate protoc --go_out=. simple_message.proto
diff --git a/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.pb.go b/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.pb.go
deleted file mode 100644
index c55a48448..000000000
--- a/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.pb.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Code generated by protoc-gen-go.
-// source: simple_message.proto
-// DO NOT EDIT!
-
-/*
-Package protobuf is a generated protocol buffer package.
-
-It is generated from these files:
- simple_message.proto
-
-It has these top-level messages:
- SimpleMessage
-*/
-package protobuf
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-type SimpleMessage struct {
- Description *string `protobuf:"bytes,1,req,name=description" json:"description,omitempty"`
- Id *int32 `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
- Metadata *string `protobuf:"bytes,3,opt,name=metadata" json:"metadata,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *SimpleMessage) Reset() { *m = SimpleMessage{} }
-func (m *SimpleMessage) String() string { return proto.CompactTextString(m) }
-func (*SimpleMessage) ProtoMessage() {}
-
-func (m *SimpleMessage) GetDescription() string {
- if m != nil && m.Description != nil {
- return *m.Description
- }
- return ""
-}
-
-func (m *SimpleMessage) GetId() int32 {
- if m != nil && m.Id != nil {
- return *m.Id
- }
- return 0
-}
-
-func (m *SimpleMessage) GetMetadata() string {
- if m != nil && m.Metadata != nil {
- return *m.Metadata
- }
- return ""
-}
diff --git a/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.proto b/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.proto
deleted file mode 100644
index 35b7145c2..000000000
--- a/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.proto
+++ /dev/null
@@ -1,9 +0,0 @@
-syntax = "proto2";
-
-package protobuf;
-
-message SimpleMessage {
- required string description = 1;
- required int32 id = 2;
- optional string metadata = 3;
-}
diff --git a/vendor/github.com/onsi/gomega/ghttp/test_server.go b/vendor/github.com/onsi/gomega/ghttp/test_server.go
deleted file mode 100644
index 77535f309..000000000
--- a/vendor/github.com/onsi/gomega/ghttp/test_server.go
+++ /dev/null
@@ -1,422 +0,0 @@
-/*
-Package ghttp supports testing HTTP clients by providing a test server (simply a thin wrapper around httptest's server) that supports
-registering multiple handlers. Incoming requests are not routed between the different handlers
-- rather it is merely the order of the handlers that matters. The first request is handled by the first
-registered handler, the second request by the second handler, etc.
-
-The intent here is to have each handler *verify* that the incoming request is valid. To accomplish, ghttp
-also provides a collection of bite-size handlers that each perform one aspect of request verification. These can
-be composed together and registered with a ghttp server. The result is an expressive language for describing
-the requests generated by the client under test.
-
-Here's a simple example, note that the server handler is only defined in one BeforeEach and then modified, as required, by the nested BeforeEaches.
-A more comprehensive example is available at https://onsi.github.io/gomega/#_testing_http_clients
-
- var _ = Describe("A Sprockets Client", func() {
- var server *ghttp.Server
- var client *SprocketClient
- BeforeEach(func() {
- server = ghttp.NewServer()
- client = NewSprocketClient(server.URL(), "skywalker", "tk427")
- })
-
- AfterEach(func() {
- server.Close()
- })
-
- Describe("fetching sprockets", func() {
- var statusCode int
- var sprockets []Sprocket
- BeforeEach(func() {
- statusCode = http.StatusOK
- sprockets = []Sprocket{}
- server.AppendHandlers(ghttp.CombineHandlers(
- ghttp.VerifyRequest("GET", "/sprockets"),
- ghttp.VerifyBasicAuth("skywalker", "tk427"),
- ghttp.RespondWithJSONEncodedPtr(&statusCode, &sprockets),
- ))
- })
-
- Context("when requesting all sprockets", func() {
- Context("when the response is succesful", func() {
- BeforeEach(func() {
- sprockets = []Sprocket{
- NewSprocket("Alfalfa"),
- NewSprocket("Banana"),
- }
- })
-
- It("should return the returned sprockets", func() {
- Expect(client.Sprockets()).Should(Equal(sprockets))
- })
- })
-
- Context("when the response is missing", func() {
- BeforeEach(func() {
- statusCode = http.StatusNotFound
- })
-
- It("should return an empty list of sprockets", func() {
- Expect(client.Sprockets()).Should(BeEmpty())
- })
- })
-
- Context("when the response fails to authenticate", func() {
- BeforeEach(func() {
- statusCode = http.StatusUnauthorized
- })
-
- It("should return an AuthenticationError error", func() {
- sprockets, err := client.Sprockets()
- Expect(sprockets).Should(BeEmpty())
- Expect(err).Should(MatchError(AuthenticationError))
- })
- })
-
- Context("when the response is a server failure", func() {
- BeforeEach(func() {
- statusCode = http.StatusInternalServerError
- })
-
- It("should return an InternalError error", func() {
- sprockets, err := client.Sprockets()
- Expect(sprockets).Should(BeEmpty())
- Expect(err).Should(MatchError(InternalError))
- })
- })
- })
-
- Context("when requesting some sprockets", func() {
- BeforeEach(func() {
- sprockets = []Sprocket{
- NewSprocket("Alfalfa"),
- NewSprocket("Banana"),
- }
-
- server.WrapHandler(0, ghttp.VerifyRequest("GET", "/sprockets", "filter=FOOD"))
- })
-
- It("should make the request with a filter", func() {
- Expect(client.Sprockets("food")).Should(Equal(sprockets))
- })
- })
- })
- })
-*/
-package ghttp
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "net/http/httputil"
- "reflect"
- "regexp"
- "strings"
- "sync"
-
- . "github.com/onsi/gomega"
-)
-
-func new() *Server {
- return &Server{
- AllowUnhandledRequests: false,
- UnhandledRequestStatusCode: http.StatusInternalServerError,
- rwMutex: &sync.RWMutex{},
- }
-}
-
-type routedHandler struct {
- method string
- pathRegexp *regexp.Regexp
- path string
- handler http.HandlerFunc
-}
-
-// NewServer returns a new `*ghttp.Server` that wraps an `httptest` server. The server is started automatically.
-func NewServer() *Server {
- s := new()
- s.HTTPTestServer = httptest.NewServer(s)
- return s
-}
-
-// NewUnstartedServer return a new, unstarted, `*ghttp.Server`. Useful for specifying a custom listener on `server.HTTPTestServer`.
-func NewUnstartedServer() *Server {
- s := new()
- s.HTTPTestServer = httptest.NewUnstartedServer(s)
- return s
-}
-
-// NewTLSServer returns a new `*ghttp.Server` that wraps an `httptest` TLS server. The server is started automatically.
-func NewTLSServer() *Server {
- s := new()
- s.HTTPTestServer = httptest.NewTLSServer(s)
- return s
-}
-
-type Server struct {
- //The underlying httptest server
- HTTPTestServer *httptest.Server
-
- //Defaults to false. If set to true, the Server will allow more requests than there are registered handlers.
- //Direct use of this property is deprecated and is likely to be removed, use GetAllowUnhandledRequests and SetAllowUnhandledRequests instead.
- AllowUnhandledRequests bool
-
- //The status code returned when receiving an unhandled request.
- //Defaults to http.StatusInternalServerError.
- //Only applies if AllowUnhandledRequests is true
- //Direct use of this property is deprecated and is likely to be removed, use GetUnhandledRequestStatusCode and SetUnhandledRequestStatusCode instead.
- UnhandledRequestStatusCode int
-
- //If provided, ghttp will log about each request received to the provided io.Writer
- //Defaults to nil
- //If you're using Ginkgo, set this to GinkgoWriter to get improved output during failures
- Writer io.Writer
-
- receivedRequests []*http.Request
- requestHandlers []http.HandlerFunc
- routedHandlers []routedHandler
-
- rwMutex *sync.RWMutex
- calls int
-}
-
-//Start() starts an unstarted ghttp server. It is a catastrophic error to call Start more than once (thanks, httptest).
-func (s *Server) Start() {
- s.HTTPTestServer.Start()
-}
-
-//URL() returns a url that will hit the server
-func (s *Server) URL() string {
- s.rwMutex.RLock()
- defer s.rwMutex.RUnlock()
- return s.HTTPTestServer.URL
-}
-
-//Addr() returns the address on which the server is listening.
-func (s *Server) Addr() string {
- s.rwMutex.RLock()
- defer s.rwMutex.RUnlock()
- return s.HTTPTestServer.Listener.Addr().String()
-}
-
-//Close() should be called at the end of each test. It spins down and cleans up the test server.
-func (s *Server) Close() {
- s.rwMutex.Lock()
- server := s.HTTPTestServer
- s.HTTPTestServer = nil
- s.rwMutex.Unlock()
-
- if server != nil {
- server.Close()
- }
-}
-
-//ServeHTTP() makes Server an http.Handler
-//When the server receives a request it handles the request in the following order:
-//
-//1. If the request matches a handler registered with RouteToHandler, that handler is called.
-//2. Otherwise, if there are handlers registered via AppendHandlers, those handlers are called in order.
-//3. If all registered handlers have been called then:
-// a) If AllowUnhandledRequests is set to true, the request will be handled with response code of UnhandledRequestStatusCode
-// b) If AllowUnhandledRequests is false, the request will not be handled and the current test will be marked as failed.
-func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- s.rwMutex.Lock()
- defer func() {
- e := recover()
- if e != nil {
- w.WriteHeader(http.StatusInternalServerError)
- }
-
- //If the handler panics GHTTP will silently succeed. This is bad™.
- //To catch this case we need to fail the test if the handler has panicked.
- //However, if the handler is panicking because Ginkgo's causing it to panic (i.e. an assertion failed)
- //then we shouldn't double-report the error as this will confuse people.
-
- //So: step 1, if this is a Ginkgo panic - do nothing, Ginkgo's aware of the failure
- eAsString, ok := e.(string)
- if ok && strings.Contains(eAsString, "defer GinkgoRecover()") {
- return
- }
-
- //If we're here, we have to do step 2: assert that the error is nil. This assertion will
- //allow us to fail the test suite (note: we can't call Fail since Gomega is not allowed to import Ginkgo).
- //Since a failed assertion throws a panic, and we are likely in a goroutine, we need to defer within our defer!
- defer func() {
- recover()
- }()
- Expect(e).Should(BeNil(), "Handler Panicked")
- }()
-
- if s.Writer != nil {
- s.Writer.Write([]byte(fmt.Sprintf("GHTTP Received Request: %s - %s\n", req.Method, req.URL)))
- }
-
- s.receivedRequests = append(s.receivedRequests, req)
- if routedHandler, ok := s.handlerForRoute(req.Method, req.URL.Path); ok {
- s.rwMutex.Unlock()
- routedHandler(w, req)
- } else if s.calls < len(s.requestHandlers) {
- h := s.requestHandlers[s.calls]
- s.calls++
- s.rwMutex.Unlock()
- h(w, req)
- } else {
- s.rwMutex.Unlock()
- if s.GetAllowUnhandledRequests() {
- ioutil.ReadAll(req.Body)
- req.Body.Close()
- w.WriteHeader(s.GetUnhandledRequestStatusCode())
- } else {
- formatted, err := httputil.DumpRequest(req, true)
- Expect(err).NotTo(HaveOccurred(), "Encountered error while dumping HTTP request")
- Expect(string(formatted)).Should(BeNil(), "Received Unhandled Request")
- }
- }
-}
-
-//ReceivedRequests is an array containing all requests received by the server (both handled and unhandled requests)
-func (s *Server) ReceivedRequests() []*http.Request {
- s.rwMutex.RLock()
- defer s.rwMutex.RUnlock()
-
- return s.receivedRequests
-}
-
-//RouteToHandler can be used to register handlers that will always handle requests that match
-//the passed in method and path.
-//
-//The path may be either a string object or a *regexp.Regexp.
-func (s *Server) RouteToHandler(method string, path interface{}, handler http.HandlerFunc) {
- s.rwMutex.Lock()
- defer s.rwMutex.Unlock()
-
- rh := routedHandler{
- method: method,
- handler: handler,
- }
-
- switch p := path.(type) {
- case *regexp.Regexp:
- rh.pathRegexp = p
- case string:
- rh.path = p
- default:
- panic("path must be a string or a regular expression")
- }
-
- for i, existingRH := range s.routedHandlers {
- if existingRH.method == method &&
- reflect.DeepEqual(existingRH.pathRegexp, rh.pathRegexp) &&
- existingRH.path == rh.path {
- s.routedHandlers[i] = rh
- return
- }
- }
- s.routedHandlers = append(s.routedHandlers, rh)
-}
-
-func (s *Server) handlerForRoute(method string, path string) (http.HandlerFunc, bool) {
- for _, rh := range s.routedHandlers {
- if rh.method == method {
- if rh.pathRegexp != nil {
- if rh.pathRegexp.Match([]byte(path)) {
- return rh.handler, true
- }
- } else if rh.path == path {
- return rh.handler, true
- }
- }
- }
-
- return nil, false
-}
-
-//AppendHandlers will appends http.HandlerFuncs to the server's list of registered handlers. The first incoming request is handled by the first handler, the second by the second, etc...
-func (s *Server) AppendHandlers(handlers ...http.HandlerFunc) {
- s.rwMutex.Lock()
- defer s.rwMutex.Unlock()
-
- s.requestHandlers = append(s.requestHandlers, handlers...)
-}
-
-//SetHandler overrides the registered handler at the passed in index with the passed in handler
-//This is useful, for example, when a server has been set up in a shared context, but must be tweaked
-//for a particular test.
-func (s *Server) SetHandler(index int, handler http.HandlerFunc) {
- s.rwMutex.Lock()
- defer s.rwMutex.Unlock()
-
- s.requestHandlers[index] = handler
-}
-
-//GetHandler returns the handler registered at the passed in index.
-func (s *Server) GetHandler(index int) http.HandlerFunc {
- s.rwMutex.RLock()
- defer s.rwMutex.RUnlock()
-
- return s.requestHandlers[index]
-}
-
-func (s *Server) Reset() {
- s.rwMutex.Lock()
- defer s.rwMutex.Unlock()
-
- s.HTTPTestServer.CloseClientConnections()
- s.calls = 0
- s.receivedRequests = nil
- s.requestHandlers = nil
- s.routedHandlers = nil
-}
-
-//WrapHandler combines the passed in handler with the handler registered at the passed in index.
-//This is useful, for example, when a server has been set up in a shared context but must be tweaked
-//for a particular test.
-//
-//If the currently registered handler is A, and the new passed in handler is B then
-//WrapHandler will generate a new handler that first calls A, then calls B, and assign it to index
-func (s *Server) WrapHandler(index int, handler http.HandlerFunc) {
- existingHandler := s.GetHandler(index)
- s.SetHandler(index, CombineHandlers(existingHandler, handler))
-}
-
-func (s *Server) CloseClientConnections() {
- s.rwMutex.Lock()
- defer s.rwMutex.Unlock()
-
- s.HTTPTestServer.CloseClientConnections()
-}
-
-//SetAllowUnhandledRequests enables the server to accept unhandled requests.
-func (s *Server) SetAllowUnhandledRequests(allowUnhandledRequests bool) {
- s.rwMutex.Lock()
- defer s.rwMutex.Unlock()
-
- s.AllowUnhandledRequests = allowUnhandledRequests
-}
-
-//GetAllowUnhandledRequests returns true if the server accepts unhandled requests.
-func (s *Server) GetAllowUnhandledRequests() bool {
- s.rwMutex.RLock()
- defer s.rwMutex.RUnlock()
-
- return s.AllowUnhandledRequests
-}
-
-//SetUnhandledRequestStatusCode status code to be returned when the server receives unhandled requests
-func (s *Server) SetUnhandledRequestStatusCode(statusCode int) {
- s.rwMutex.Lock()
- defer s.rwMutex.Unlock()
-
- s.UnhandledRequestStatusCode = statusCode
-}
-
-//GetUnhandledRequestStatusCode returns the current status code being returned for unhandled requests
-func (s *Server) GetUnhandledRequestStatusCode() int {
- s.rwMutex.RLock()
- defer s.rwMutex.RUnlock()
-
- return s.UnhandledRequestStatusCode
-}
diff --git a/vendor/github.com/onsi/gomega/ghttp/test_server_suite_test.go b/vendor/github.com/onsi/gomega/ghttp/test_server_suite_test.go
deleted file mode 100644
index 7c1236082..000000000
--- a/vendor/github.com/onsi/gomega/ghttp/test_server_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package ghttp_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestGHTTP(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "GHTTP Suite")
-}
diff --git a/vendor/github.com/onsi/gomega/ghttp/test_server_test.go b/vendor/github.com/onsi/gomega/ghttp/test_server_test.go
deleted file mode 100644
index be1c58e82..000000000
--- a/vendor/github.com/onsi/gomega/ghttp/test_server_test.go
+++ /dev/null
@@ -1,1129 +0,0 @@
-package ghttp_test
-
-import (
- "bytes"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "regexp"
-
- "github.com/golang/protobuf/proto"
- "github.com/onsi/gomega/gbytes"
- "github.com/onsi/gomega/ghttp/protobuf"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/ghttp"
-)
-
-var _ = Describe("TestServer", func() {
- var (
- resp *http.Response
- err error
- s *Server
- )
-
- BeforeEach(func() {
- s = NewServer()
- })
-
- AfterEach(func() {
- s.Close()
- })
-
- Describe("Resetting the server", func() {
- BeforeEach(func() {
- s.RouteToHandler("GET", "/", func(w http.ResponseWriter, req *http.Request) {})
- s.AppendHandlers(func(w http.ResponseWriter, req *http.Request) {})
- http.Get(s.URL() + "/")
-
- Expect(s.ReceivedRequests()).Should(HaveLen(1))
- })
-
- It("clears all handlers and call counts", func() {
- s.Reset()
- Expect(s.ReceivedRequests()).Should(HaveLen(0))
- Expect(func() { s.GetHandler(0) }).Should(Panic())
- })
- })
-
- Describe("closing client connections", func() {
- It("closes", func() {
- s.RouteToHandler("GET", "/",
- func(w http.ResponseWriter, req *http.Request) {
- io.WriteString(w, req.RemoteAddr)
- },
- )
- client := http.Client{Transport: &http.Transport{DisableKeepAlives: true}}
- resp, err := client.Get(s.URL())
- Expect(err).ShouldNot(HaveOccurred())
- Expect(resp.StatusCode).Should(Equal(200))
-
- body, err := ioutil.ReadAll(resp.Body)
- resp.Body.Close()
- Expect(err).ShouldNot(HaveOccurred())
-
- s.CloseClientConnections()
-
- resp, err = client.Get(s.URL())
- Expect(err).ShouldNot(HaveOccurred())
- Expect(resp.StatusCode).Should(Equal(200))
-
- body2, err := ioutil.ReadAll(resp.Body)
- resp.Body.Close()
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(body2).ShouldNot(Equal(body))
- })
- })
-
- Describe("closing server mulitple times", func() {
- It("should not fail", func() {
- s.Close()
- Expect(s.Close).ShouldNot(Panic())
- })
- })
-
- Describe("allowing unhandled requests", func() {
- It("is not permitted by default", func() {
- Expect(s.GetAllowUnhandledRequests()).To(BeFalse())
- })
-
- Context("when true", func() {
- BeforeEach(func() {
- s.SetAllowUnhandledRequests(true)
- s.SetUnhandledRequestStatusCode(http.StatusForbidden)
- resp, err = http.Get(s.URL() + "/foo")
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- It("should allow unhandled requests and respond with the passed in status code", func() {
- Expect(err).ShouldNot(HaveOccurred())
- Expect(resp.StatusCode).Should(Equal(http.StatusForbidden))
-
- data, err := ioutil.ReadAll(resp.Body)
- Expect(err).ShouldNot(HaveOccurred())
- Expect(data).Should(BeEmpty())
- })
-
- It("should record the requests", func() {
- Expect(s.ReceivedRequests()).Should(HaveLen(1))
- Expect(s.ReceivedRequests()[0].URL.Path).Should(Equal("/foo"))
- })
- })
-
- Context("when false", func() {
- It("should fail when attempting a request", func() {
- failures := InterceptGomegaFailures(func() {
- http.Get(s.URL() + "/foo")
- })
-
- Expect(failures[0]).Should(ContainSubstring("Received Unhandled Request"))
- })
- })
- })
-
- Describe("Managing Handlers", func() {
- var called []string
- BeforeEach(func() {
- called = []string{}
- s.RouteToHandler("GET", "/routed", func(w http.ResponseWriter, req *http.Request) {
- called = append(called, "r1")
- })
- s.RouteToHandler("POST", regexp.MustCompile(`/routed\d`), func(w http.ResponseWriter, req *http.Request) {
- called = append(called, "r2")
- })
- s.AppendHandlers(func(w http.ResponseWriter, req *http.Request) {
- called = append(called, "A")
- }, func(w http.ResponseWriter, req *http.Request) {
- called = append(called, "B")
- })
- })
-
- It("should prefer routed handlers if there is a match", func() {
- http.Get(s.URL() + "/routed")
- http.Post(s.URL()+"/routed7", "application/json", nil)
- http.Get(s.URL() + "/foo")
- http.Get(s.URL() + "/routed")
- http.Post(s.URL()+"/routed9", "application/json", nil)
- http.Get(s.URL() + "/bar")
-
- failures := InterceptGomegaFailures(func() {
- http.Get(s.URL() + "/foo")
- http.Get(s.URL() + "/routed/not/a/match")
- http.Get(s.URL() + "/routed7")
- http.Post(s.URL()+"/routed", "application/json", nil)
- })
-
- Expect(failures[0]).Should(ContainSubstring("Received Unhandled Request"))
- Expect(failures).Should(HaveLen(4))
-
- http.Post(s.URL()+"/routed3", "application/json", nil)
-
- Expect(called).Should(Equal([]string{"r1", "r2", "A", "r1", "r2", "B", "r2"}))
- })
-
- It("should override routed handlers when reregistered", func() {
- s.RouteToHandler("GET", "/routed", func(w http.ResponseWriter, req *http.Request) {
- called = append(called, "r3")
- })
- s.RouteToHandler("POST", regexp.MustCompile(`/routed\d`), func(w http.ResponseWriter, req *http.Request) {
- called = append(called, "r4")
- })
-
- http.Get(s.URL() + "/routed")
- http.Post(s.URL()+"/routed7", "application/json", nil)
-
- Expect(called).Should(Equal([]string{"r3", "r4"}))
- })
-
- It("should call the appended handlers, in order, as requests come in", func() {
- http.Get(s.URL() + "/foo")
- Expect(called).Should(Equal([]string{"A"}))
-
- http.Get(s.URL() + "/foo")
- Expect(called).Should(Equal([]string{"A", "B"}))
-
- failures := InterceptGomegaFailures(func() {
- http.Get(s.URL() + "/foo")
- })
-
- Expect(failures[0]).Should(ContainSubstring("Received Unhandled Request"))
- })
-
- Describe("Overwriting an existing handler", func() {
- BeforeEach(func() {
- s.SetHandler(0, func(w http.ResponseWriter, req *http.Request) {
- called = append(called, "C")
- })
- })
-
- It("should override the specified handler", func() {
- http.Get(s.URL() + "/foo")
- http.Get(s.URL() + "/foo")
- Expect(called).Should(Equal([]string{"C", "B"}))
- })
- })
-
- Describe("Getting an existing handler", func() {
- It("should return the handler func", func() {
- s.GetHandler(1)(nil, nil)
- Expect(called).Should(Equal([]string{"B"}))
- })
- })
-
- Describe("Wrapping an existing handler", func() {
- BeforeEach(func() {
- s.WrapHandler(0, func(w http.ResponseWriter, req *http.Request) {
- called = append(called, "C")
- })
- })
-
- It("should wrap the existing handler in a new handler", func() {
- http.Get(s.URL() + "/foo")
- http.Get(s.URL() + "/foo")
- Expect(called).Should(Equal([]string{"A", "C", "B"}))
- })
- })
- })
-
- Describe("When a handler fails", func() {
- BeforeEach(func() {
- s.SetUnhandledRequestStatusCode(http.StatusForbidden) //just to be clear that 500s aren't coming from unhandled requests
- })
-
- Context("because the handler has panicked", func() {
- BeforeEach(func() {
- s.AppendHandlers(func(w http.ResponseWriter, req *http.Request) {
- panic("bam")
- })
- })
-
- It("should respond with a 500 and make a failing assertion", func() {
- var resp *http.Response
- var err error
-
- failures := InterceptGomegaFailures(func() {
- resp, err = http.Get(s.URL())
- })
-
- Expect(err).ShouldNot(HaveOccurred())
- Expect(resp.StatusCode).Should(Equal(http.StatusInternalServerError))
- Expect(failures).Should(ConsistOf(ContainSubstring("Handler Panicked")))
- })
- })
-
- Context("because an assertion has failed", func() {
- BeforeEach(func() {
- s.AppendHandlers(func(w http.ResponseWriter, req *http.Request) {
- // Expect(true).Should(BeFalse()) <-- would be nice to do it this way, but the test just can't be written this way
-
- By("We're cheating a bit here -- we're throwing a GINKGO_PANIC which simulates a failed assertion")
- panic(GINKGO_PANIC)
- })
- })
-
- It("should respond with a 500 and *not* make a failing assertion, instead relying on Ginkgo to have already been notified of the error", func() {
- resp, err := http.Get(s.URL())
-
- Expect(err).ShouldNot(HaveOccurred())
- Expect(resp.StatusCode).Should(Equal(http.StatusInternalServerError))
- })
- })
- })
-
- Describe("Logging to the Writer", func() {
- var buf *gbytes.Buffer
- BeforeEach(func() {
- buf = gbytes.NewBuffer()
- s.Writer = buf
- s.AppendHandlers(func(w http.ResponseWriter, req *http.Request) {})
- s.AppendHandlers(func(w http.ResponseWriter, req *http.Request) {})
- })
-
- It("should write to the buffer when a request comes in", func() {
- http.Get(s.URL() + "/foo")
- Expect(buf).Should(gbytes.Say("GHTTP Received Request: GET - /foo\n"))
-
- http.Post(s.URL()+"/bar", "", nil)
- Expect(buf).Should(gbytes.Say("GHTTP Received Request: POST - /bar\n"))
- })
- })
-
- Describe("Request Handlers", func() {
- Describe("VerifyRequest", func() {
- BeforeEach(func() {
- s.AppendHandlers(VerifyRequest("GET", "/foo"))
- })
-
- It("should verify the method, path", func() {
- resp, err = http.Get(s.URL() + "/foo?baz=bar")
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- It("should verify the method, path", func() {
- failures := InterceptGomegaFailures(func() {
- http.Get(s.URL() + "/foo2")
- })
- Expect(failures).Should(HaveLen(1))
- })
-
- It("should verify the method, path", func() {
- failures := InterceptGomegaFailures(func() {
- http.Post(s.URL()+"/foo", "application/json", nil)
- })
- Expect(failures).Should(HaveLen(1))
- })
-
- Context("when passed a rawQuery", func() {
- It("should also be possible to verify the rawQuery", func() {
- s.SetHandler(0, VerifyRequest("GET", "/foo", "baz=bar"))
- resp, err = http.Get(s.URL() + "/foo?baz=bar")
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- It("should match irregardless of query parameter ordering", func() {
- s.SetHandler(0, VerifyRequest("GET", "/foo", "type=get&name=money"))
- u, _ := url.Parse(s.URL() + "/foo")
- u.RawQuery = url.Values{
- "type": []string{"get"},
- "name": []string{"money"},
- }.Encode()
-
- resp, err = http.Get(u.String())
- Expect(err).ShouldNot(HaveOccurred())
- })
- })
-
- Context("when passed a matcher for path", func() {
- It("should apply the matcher", func() {
- s.SetHandler(0, VerifyRequest("GET", MatchRegexp(`/foo/[a-f]*/3`)))
- resp, err = http.Get(s.URL() + "/foo/abcdefa/3")
- Expect(err).ShouldNot(HaveOccurred())
- })
- })
- })
-
- Describe("VerifyContentType", func() {
- BeforeEach(func() {
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("GET", "/foo"),
- VerifyContentType("application/octet-stream"),
- ))
- })
-
- It("should verify the content type", func() {
- req, err := http.NewRequest("GET", s.URL()+"/foo", nil)
- Expect(err).ShouldNot(HaveOccurred())
- req.Header.Set("Content-Type", "application/octet-stream")
-
- resp, err = http.DefaultClient.Do(req)
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- It("should verify the content type", func() {
- req, err := http.NewRequest("GET", s.URL()+"/foo", nil)
- Expect(err).ShouldNot(HaveOccurred())
- req.Header.Set("Content-Type", "application/json")
-
- failures := InterceptGomegaFailures(func() {
- http.DefaultClient.Do(req)
- })
- Expect(failures).Should(HaveLen(1))
- })
-
- It("should verify the content type", func() {
- req, err := http.NewRequest("GET", s.URL()+"/foo", nil)
- Expect(err).ShouldNot(HaveOccurred())
- req.Header.Set("Content-Type", "application/octet-stream; charset=utf-8")
-
- failures := InterceptGomegaFailures(func() {
- http.DefaultClient.Do(req)
- })
- Expect(failures).Should(HaveLen(1))
- })
- })
-
- Describe("Verify BasicAuth", func() {
- BeforeEach(func() {
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("GET", "/foo"),
- VerifyBasicAuth("bob", "password"),
- ))
- })
-
- It("should verify basic auth", func() {
- req, err := http.NewRequest("GET", s.URL()+"/foo", nil)
- Expect(err).ShouldNot(HaveOccurred())
- req.SetBasicAuth("bob", "password")
-
- resp, err = http.DefaultClient.Do(req)
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- It("should verify basic auth", func() {
- req, err := http.NewRequest("GET", s.URL()+"/foo", nil)
- Expect(err).ShouldNot(HaveOccurred())
- req.SetBasicAuth("bob", "bassword")
-
- failures := InterceptGomegaFailures(func() {
- http.DefaultClient.Do(req)
- })
- Expect(failures).Should(HaveLen(1))
- })
-
- It("should require basic auth header", func() {
- req, err := http.NewRequest("GET", s.URL()+"/foo", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- failures := InterceptGomegaFailures(func() {
- http.DefaultClient.Do(req)
- })
- Expect(failures).Should(ContainElement(ContainSubstring("Authorization header must be specified")))
- })
- })
-
- Describe("VerifyHeader", func() {
- BeforeEach(func() {
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("GET", "/foo"),
- VerifyHeader(http.Header{
- "accept": []string{"jpeg", "png"},
- "cache-control": []string{"omicron"},
- "Return-Path": []string{"hobbiton"},
- }),
- ))
- })
-
- It("should verify the headers", func() {
- req, err := http.NewRequest("GET", s.URL()+"/foo", nil)
- Expect(err).ShouldNot(HaveOccurred())
- req.Header.Add("Accept", "jpeg")
- req.Header.Add("Accept", "png")
- req.Header.Add("Cache-Control", "omicron")
- req.Header.Add("return-path", "hobbiton")
-
- resp, err = http.DefaultClient.Do(req)
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- It("should verify the headers", func() {
- req, err := http.NewRequest("GET", s.URL()+"/foo", nil)
- Expect(err).ShouldNot(HaveOccurred())
- req.Header.Add("Schmaccept", "jpeg")
- req.Header.Add("Schmaccept", "png")
- req.Header.Add("Cache-Control", "omicron")
- req.Header.Add("return-path", "hobbiton")
-
- failures := InterceptGomegaFailures(func() {
- http.DefaultClient.Do(req)
- })
- Expect(failures).Should(HaveLen(1))
- })
- })
-
- Describe("VerifyHeaderKV", func() {
- BeforeEach(func() {
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("GET", "/foo"),
- VerifyHeaderKV("accept", "jpeg", "png"),
- VerifyHeaderKV("cache-control", "omicron"),
- VerifyHeaderKV("Return-Path", "hobbiton"),
- ))
- })
-
- It("should verify the headers", func() {
- req, err := http.NewRequest("GET", s.URL()+"/foo", nil)
- Expect(err).ShouldNot(HaveOccurred())
- req.Header.Add("Accept", "jpeg")
- req.Header.Add("Accept", "png")
- req.Header.Add("Cache-Control", "omicron")
- req.Header.Add("return-path", "hobbiton")
-
- resp, err = http.DefaultClient.Do(req)
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- It("should verify the headers", func() {
- req, err := http.NewRequest("GET", s.URL()+"/foo", nil)
- Expect(err).ShouldNot(HaveOccurred())
- req.Header.Add("Accept", "jpeg")
- req.Header.Add("Cache-Control", "omicron")
- req.Header.Add("return-path", "hobbiton")
-
- failures := InterceptGomegaFailures(func() {
- http.DefaultClient.Do(req)
- })
- Expect(failures).Should(HaveLen(1))
- })
- })
-
- Describe("VerifyBody", func() {
- BeforeEach(func() {
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("POST", "/foo"),
- VerifyBody([]byte("some body")),
- ))
- })
-
- It("should verify the body", func() {
- resp, err = http.Post(s.URL()+"/foo", "", bytes.NewReader([]byte("some body")))
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- It("should verify the body", func() {
- failures := InterceptGomegaFailures(func() {
- http.Post(s.URL()+"/foo", "", bytes.NewReader([]byte("wrong body")))
- })
- Expect(failures).Should(HaveLen(1))
- })
- })
-
- Describe("VerifyMimeType", func() {
- BeforeEach(func() {
- s.AppendHandlers(CombineHandlers(
- VerifyMimeType("application/json"),
- ))
- })
-
- It("should verify the mime type in content-type header", func() {
- resp, err = http.Post(s.URL()+"/foo", "application/json; charset=utf-8", bytes.NewReader([]byte(`{}`)))
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- It("should verify the mime type in content-type header", func() {
- failures := InterceptGomegaFailures(func() {
- http.Post(s.URL()+"/foo", "text/plain", bytes.NewReader([]byte(`{}`)))
- })
- Expect(failures).Should(HaveLen(1))
- })
- })
-
- Describe("VerifyJSON", func() {
- BeforeEach(func() {
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("POST", "/foo"),
- VerifyJSON(`{"a":3, "b":2}`),
- ))
- })
-
- It("should verify the json body and the content type", func() {
- resp, err = http.Post(s.URL()+"/foo", "application/json", bytes.NewReader([]byte(`{"b":2, "a":3}`)))
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- It("should verify the json body and the content type", func() {
- failures := InterceptGomegaFailures(func() {
- http.Post(s.URL()+"/foo", "application/json", bytes.NewReader([]byte(`{"b":2, "a":4}`)))
- })
- Expect(failures).Should(HaveLen(1))
- })
-
- It("should verify the json body and the content type", func() {
- failures := InterceptGomegaFailures(func() {
- http.Post(s.URL()+"/foo", "application/not-json", bytes.NewReader([]byte(`{"b":2, "a":3}`)))
- })
- Expect(failures).Should(HaveLen(1))
- })
-
- It("should verify the json body and the content type", func() {
- resp, err = http.Post(s.URL()+"/foo", "application/json; charset=utf-8", bytes.NewReader([]byte(`{"b":2, "a":3}`)))
- Expect(err).ShouldNot(HaveOccurred())
- })
- })
-
- Describe("VerifyJSONRepresenting", func() {
- BeforeEach(func() {
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("POST", "/foo"),
- VerifyJSONRepresenting([]int{1, 3, 5}),
- ))
- })
-
- It("should verify the json body and the content type", func() {
- resp, err = http.Post(s.URL()+"/foo", "application/json", bytes.NewReader([]byte(`[1,3,5]`)))
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- It("should verify the json body and the content type", func() {
- failures := InterceptGomegaFailures(func() {
- http.Post(s.URL()+"/foo", "application/json", bytes.NewReader([]byte(`[1,3]`)))
- })
- Expect(failures).Should(HaveLen(1))
- })
- })
-
- Describe("VerifyForm", func() {
- var formValues url.Values
-
- BeforeEach(func() {
- formValues = make(url.Values)
- formValues.Add("users", "user1")
- formValues.Add("users", "user2")
- formValues.Add("group", "users")
- })
-
- Context("when encoded in the URL", func() {
- BeforeEach(func() {
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("GET", "/foo"),
- VerifyForm(url.Values{
- "users": []string{"user1", "user2"},
- "group": []string{"users"},
- }),
- ))
- })
-
- It("should verify form values", func() {
- resp, err = http.Get(s.URL() + "/foo?" + formValues.Encode())
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- It("should ignore extra values", func() {
- formValues.Add("extra", "value")
- resp, err = http.Get(s.URL() + "/foo?" + formValues.Encode())
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- It("fail on missing values", func() {
- formValues.Del("group")
- failures := InterceptGomegaFailures(func() {
- resp, err = http.Get(s.URL() + "/foo?" + formValues.Encode())
- })
- Expect(failures).Should(HaveLen(1))
- })
-
- It("fail on incorrect values", func() {
- formValues.Set("group", "wheel")
- failures := InterceptGomegaFailures(func() {
- resp, err = http.Get(s.URL() + "/foo?" + formValues.Encode())
- })
- Expect(failures).Should(HaveLen(1))
- })
- })
-
- Context("when present in the body", func() {
- BeforeEach(func() {
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("POST", "/foo"),
- VerifyForm(url.Values{
- "users": []string{"user1", "user2"},
- "group": []string{"users"},
- }),
- ))
- })
-
- It("should verify form values", func() {
- resp, err = http.PostForm(s.URL()+"/foo", formValues)
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- It("should ignore extra values", func() {
- formValues.Add("extra", "value")
- resp, err = http.PostForm(s.URL()+"/foo", formValues)
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- It("fail on missing values", func() {
- formValues.Del("group")
- failures := InterceptGomegaFailures(func() {
- resp, err = http.PostForm(s.URL()+"/foo", formValues)
- })
- Expect(failures).Should(HaveLen(1))
- })
-
- It("fail on incorrect values", func() {
- formValues.Set("group", "wheel")
- failures := InterceptGomegaFailures(func() {
- resp, err = http.PostForm(s.URL()+"/foo", formValues)
- })
- Expect(failures).Should(HaveLen(1))
- })
- })
- })
-
- Describe("VerifyFormKV", func() {
- Context("when encoded in the URL", func() {
- BeforeEach(func() {
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("GET", "/foo"),
- VerifyFormKV("users", "user1", "user2"),
- ))
- })
-
- It("verifies the form value", func() {
- resp, err = http.Get(s.URL() + "/foo?users=user1&users=user2")
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- It("verifies the form value", func() {
- failures := InterceptGomegaFailures(func() {
- resp, err = http.Get(s.URL() + "/foo?users=user1")
- })
- Expect(failures).Should(HaveLen(1))
- })
- })
-
- Context("when present in the body", func() {
- BeforeEach(func() {
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("POST", "/foo"),
- VerifyFormKV("users", "user1", "user2"),
- ))
- })
-
- It("verifies the form value", func() {
- resp, err = http.PostForm(s.URL()+"/foo", url.Values{"users": []string{"user1", "user2"}})
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- It("verifies the form value", func() {
- failures := InterceptGomegaFailures(func() {
- resp, err = http.PostForm(s.URL()+"/foo", url.Values{"users": []string{"user1"}})
- })
- Expect(failures).Should(HaveLen(1))
- })
- })
- })
-
- Describe("VerifyProtoRepresenting", func() {
- var message *protobuf.SimpleMessage
-
- BeforeEach(func() {
- message = new(protobuf.SimpleMessage)
- message.Description = proto.String("A description")
- message.Id = proto.Int32(0)
-
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("POST", "/proto"),
- VerifyProtoRepresenting(message),
- ))
- })
-
- It("verifies the proto body and the content type", func() {
- serialized, err := proto.Marshal(message)
- Expect(err).ShouldNot(HaveOccurred())
-
- resp, err = http.Post(s.URL()+"/proto", "application/x-protobuf", bytes.NewReader(serialized))
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- It("should verify the proto body and the content type", func() {
- serialized, err := proto.Marshal(&protobuf.SimpleMessage{
- Description: proto.String("A description"),
- Id: proto.Int32(0),
- Metadata: proto.String("some metadata"),
- })
- Expect(err).ShouldNot(HaveOccurred())
-
- failures := InterceptGomegaFailures(func() {
- http.Post(s.URL()+"/proto", "application/x-protobuf", bytes.NewReader(serialized))
- })
- Expect(failures).Should(HaveLen(1))
- })
-
- It("should verify the proto body and the content type", func() {
- serialized, err := proto.Marshal(message)
- Expect(err).ShouldNot(HaveOccurred())
-
- failures := InterceptGomegaFailures(func() {
- http.Post(s.URL()+"/proto", "application/not-x-protobuf", bytes.NewReader(serialized))
- })
- Expect(failures).Should(HaveLen(1))
- })
- })
-
- Describe("RespondWith", func() {
- Context("without headers", func() {
- BeforeEach(func() {
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("POST", "/foo"),
- RespondWith(http.StatusCreated, "sweet"),
- ), CombineHandlers(
- VerifyRequest("POST", "/foo"),
- RespondWith(http.StatusOK, []byte("sour")),
- ))
- })
-
- It("should return the response", func() {
- resp, err = http.Post(s.URL()+"/foo", "application/json", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(resp.StatusCode).Should(Equal(http.StatusCreated))
-
- body, err := ioutil.ReadAll(resp.Body)
- Expect(err).ShouldNot(HaveOccurred())
- Expect(body).Should(Equal([]byte("sweet")))
-
- resp, err = http.Post(s.URL()+"/foo", "application/json", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(resp.StatusCode).Should(Equal(http.StatusOK))
-
- body, err = ioutil.ReadAll(resp.Body)
- Expect(err).ShouldNot(HaveOccurred())
- Expect(body).Should(Equal([]byte("sour")))
- })
- })
-
- Context("with headers", func() {
- BeforeEach(func() {
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("POST", "/foo"),
- RespondWith(http.StatusCreated, "sweet", http.Header{"X-Custom-Header": []string{"my header"}}),
- ))
- })
-
- It("should return the headers too", func() {
- resp, err = http.Post(s.URL()+"/foo", "application/json", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(resp.StatusCode).Should(Equal(http.StatusCreated))
- Expect(ioutil.ReadAll(resp.Body)).Should(Equal([]byte("sweet")))
- Expect(resp.Header.Get("X-Custom-Header")).Should(Equal("my header"))
- })
- })
- })
-
- Describe("RespondWithPtr", func() {
- var code int
- var byteBody []byte
- var stringBody string
- BeforeEach(func() {
- code = http.StatusOK
- byteBody = []byte("sweet")
- stringBody = "sour"
-
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("POST", "/foo"),
- RespondWithPtr(&code, &byteBody),
- ), CombineHandlers(
- VerifyRequest("POST", "/foo"),
- RespondWithPtr(&code, &stringBody),
- ))
- })
-
- It("should return the response", func() {
- code = http.StatusCreated
- byteBody = []byte("tasty")
- stringBody = "treat"
-
- resp, err = http.Post(s.URL()+"/foo", "application/json", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(resp.StatusCode).Should(Equal(http.StatusCreated))
-
- body, err := ioutil.ReadAll(resp.Body)
- Expect(err).ShouldNot(HaveOccurred())
- Expect(body).Should(Equal([]byte("tasty")))
-
- resp, err = http.Post(s.URL()+"/foo", "application/json", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(resp.StatusCode).Should(Equal(http.StatusCreated))
-
- body, err = ioutil.ReadAll(resp.Body)
- Expect(err).ShouldNot(HaveOccurred())
- Expect(body).Should(Equal([]byte("treat")))
- })
-
- Context("when passed a nil body", func() {
- BeforeEach(func() {
- s.SetHandler(0, CombineHandlers(
- VerifyRequest("POST", "/foo"),
- RespondWithPtr(&code, nil),
- ))
- })
-
- It("should return an empty body and not explode", func() {
- resp, err = http.Post(s.URL()+"/foo", "application/json", nil)
-
- Expect(err).ShouldNot(HaveOccurred())
- Expect(resp.StatusCode).Should(Equal(http.StatusOK))
- body, err := ioutil.ReadAll(resp.Body)
- Expect(err).ShouldNot(HaveOccurred())
- Expect(body).Should(BeEmpty())
-
- Expect(s.ReceivedRequests()).Should(HaveLen(1))
- })
- })
- })
-
- Describe("RespondWithJSON", func() {
- Context("when no optional headers are set", func() {
- BeforeEach(func() {
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("POST", "/foo"),
- RespondWithJSONEncoded(http.StatusCreated, []int{1, 2, 3}),
- ))
- })
-
- It("should return the response", func() {
- resp, err = http.Post(s.URL()+"/foo", "application/json", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(resp.StatusCode).Should(Equal(http.StatusCreated))
-
- body, err := ioutil.ReadAll(resp.Body)
- Expect(err).ShouldNot(HaveOccurred())
- Expect(body).Should(MatchJSON("[1,2,3]"))
- })
-
- It("should set the Content-Type header to application/json", func() {
- resp, err = http.Post(s.URL()+"/foo", "application/json", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(resp.Header["Content-Type"]).Should(Equal([]string{"application/json"}))
- })
- })
-
- Context("when optional headers are set", func() {
- var headers http.Header
- BeforeEach(func() {
- headers = http.Header{"Stuff": []string{"things"}}
- })
-
- JustBeforeEach(func() {
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("POST", "/foo"),
- RespondWithJSONEncoded(http.StatusCreated, []int{1, 2, 3}, headers),
- ))
- })
-
- It("should preserve those headers", func() {
- resp, err = http.Post(s.URL()+"/foo", "application/json", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(resp.Header["Stuff"]).Should(Equal([]string{"things"}))
- })
-
- It("should set the Content-Type header to application/json", func() {
- resp, err = http.Post(s.URL()+"/foo", "application/json", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(resp.Header["Content-Type"]).Should(Equal([]string{"application/json"}))
- })
-
- Context("when setting the Content-Type explicitly", func() {
- BeforeEach(func() {
- headers["Content-Type"] = []string{"not-json"}
- })
-
- It("should use the Content-Type header that was explicitly set", func() {
- resp, err = http.Post(s.URL()+"/foo", "application/json", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(resp.Header["Content-Type"]).Should(Equal([]string{"not-json"}))
- })
- })
- })
- })
-
- Describe("RespondWithJSONPtr", func() {
- type testObject struct {
- Key string
- Value string
- }
-
- var code int
- var object testObject
-
- Context("when no optional headers are set", func() {
- BeforeEach(func() {
- code = http.StatusOK
- object = testObject{}
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("POST", "/foo"),
- RespondWithJSONEncodedPtr(&code, &object),
- ))
- })
-
- It("should return the response", func() {
- code = http.StatusCreated
- object = testObject{
- Key: "Jim",
- Value: "Codes",
- }
- resp, err = http.Post(s.URL()+"/foo", "application/json", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(resp.StatusCode).Should(Equal(http.StatusCreated))
-
- body, err := ioutil.ReadAll(resp.Body)
- Expect(err).ShouldNot(HaveOccurred())
- Expect(body).Should(MatchJSON(`{"Key": "Jim", "Value": "Codes"}`))
- })
-
- It("should set the Content-Type header to application/json", func() {
- resp, err = http.Post(s.URL()+"/foo", "application/json", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(resp.Header["Content-Type"]).Should(Equal([]string{"application/json"}))
- })
- })
-
- Context("when optional headers are set", func() {
- var headers http.Header
- BeforeEach(func() {
- headers = http.Header{"Stuff": []string{"things"}}
- })
-
- JustBeforeEach(func() {
- code = http.StatusOK
- object = testObject{}
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("POST", "/foo"),
- RespondWithJSONEncodedPtr(&code, &object, headers),
- ))
- })
-
- It("should preserve those headers", func() {
- resp, err = http.Post(s.URL()+"/foo", "application/json", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(resp.Header["Stuff"]).Should(Equal([]string{"things"}))
- })
-
- It("should set the Content-Type header to application/json", func() {
- resp, err = http.Post(s.URL()+"/foo", "application/json", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(resp.Header["Content-Type"]).Should(Equal([]string{"application/json"}))
- })
-
- Context("when setting the Content-Type explicitly", func() {
- BeforeEach(func() {
- headers["Content-Type"] = []string{"not-json"}
- })
-
- It("should use the Content-Type header that was explicitly set", func() {
- resp, err = http.Post(s.URL()+"/foo", "application/json", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(resp.Header["Content-Type"]).Should(Equal([]string{"not-json"}))
- })
- })
- })
- })
-
- Describe("RespondWithProto", func() {
- var message *protobuf.SimpleMessage
-
- BeforeEach(func() {
- message = new(protobuf.SimpleMessage)
- message.Description = proto.String("A description")
- message.Id = proto.Int32(99)
- })
-
- Context("when no optional headers are set", func() {
- BeforeEach(func() {
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("POST", "/proto"),
- RespondWithProto(http.StatusCreated, message),
- ))
- })
-
- It("should return the response", func() {
- resp, err = http.Post(s.URL()+"/proto", "application/x-protobuf", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(resp.StatusCode).Should(Equal(http.StatusCreated))
-
- var received protobuf.SimpleMessage
- body, err := ioutil.ReadAll(resp.Body)
- err = proto.Unmarshal(body, &received)
- Expect(err).ShouldNot(HaveOccurred())
- })
-
- It("should set the Content-Type header to application/x-protobuf", func() {
- resp, err = http.Post(s.URL()+"/proto", "application/x-protobuf", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(resp.Header["Content-Type"]).Should(Equal([]string{"application/x-protobuf"}))
- })
- })
-
- Context("when optional headers are set", func() {
- var headers http.Header
- BeforeEach(func() {
- headers = http.Header{"Stuff": []string{"things"}}
- })
-
- JustBeforeEach(func() {
- s.AppendHandlers(CombineHandlers(
- VerifyRequest("POST", "/proto"),
- RespondWithProto(http.StatusCreated, message, headers),
- ))
- })
-
- It("should preserve those headers", func() {
- resp, err = http.Post(s.URL()+"/proto", "application/x-protobuf", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(resp.Header["Stuff"]).Should(Equal([]string{"things"}))
- })
-
- It("should set the Content-Type header to application/x-protobuf", func() {
- resp, err = http.Post(s.URL()+"/proto", "application/x-protobuf", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(resp.Header["Content-Type"]).Should(Equal([]string{"application/x-protobuf"}))
- })
-
- Context("when setting the Content-Type explicitly", func() {
- BeforeEach(func() {
- headers["Content-Type"] = []string{"not-x-protobuf"}
- })
-
- It("should use the Content-Type header that was explicitly set", func() {
- resp, err = http.Post(s.URL()+"/proto", "application/x-protobuf", nil)
- Expect(err).ShouldNot(HaveOccurred())
-
- Expect(resp.Header["Content-Type"]).Should(Equal([]string{"not-x-protobuf"}))
- })
- })
- })
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go
index 471f691a6..448d595da 100644
--- a/vendor/github.com/onsi/gomega/gomega_dsl.go
+++ b/vendor/github.com/onsi/gomega/gomega_dsl.go
@@ -24,7 +24,7 @@ import (
"github.com/onsi/gomega/types"
)
-const GOMEGA_VERSION = "1.4.3"
+const GOMEGA_VERSION = "1.5.0"
const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
If you're using Ginkgo then you probably forgot to put your assertion in an It().
@@ -39,20 +39,14 @@ var defaultEventuallyPollingInterval = 10 * time.Millisecond
var defaultConsistentlyDuration = 100 * time.Millisecond
var defaultConsistentlyPollingInterval = 10 * time.Millisecond
-//RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails
-//the fail handler passed into RegisterFailHandler is called.
+// RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails
+// the fail handler passed into RegisterFailHandler is called.
func RegisterFailHandler(handler types.GomegaFailHandler) {
- if handler == nil {
- globalFailWrapper = nil
- return
- }
-
- globalFailWrapper = &types.GomegaFailWrapper{
- Fail: handler,
- TWithHelper: testingtsupport.EmptyTWithHelper{},
- }
+ RegisterFailHandlerWithT(testingtsupport.EmptyTWithHelper{}, handler)
}
+// RegisterFailHandlerWithT ensures that the given types.TWithHelper and fail handler
+// are used globally.
func RegisterFailHandlerWithT(t types.TWithHelper, handler types.GomegaFailHandler) {
if handler == nil {
globalFailWrapper = nil
@@ -65,12 +59,12 @@ func RegisterFailHandlerWithT(t types.TWithHelper, handler types.GomegaFailHandl
}
}
-//RegisterTestingT connects Gomega to Golang's XUnit style
-//Testing.T tests. It is now deprecated and you should use NewGomegaWithT() instead.
+// RegisterTestingT connects Gomega to Golang's XUnit style
+// Testing.T tests. It is now deprecated and you should use NewWithT() instead.
//
-//Legacy Documentation:
+// Legacy Documentation:
//
-//You'll need to call this at the top of each XUnit style test:
+// You'll need to call this at the top of each XUnit style test:
//
// func TestFarmHasCow(t *testing.T) {
// RegisterTestingT(t)
@@ -83,7 +77,7 @@ func RegisterFailHandlerWithT(t types.TWithHelper, handler types.GomegaFailHandl
// pass `t` down to the matcher itself). This means that you cannot run the XUnit style tests
// in parallel as the global fail handler cannot point to more than one testing.T at a time.
//
-// NewGomegaWithT() does not have this limitation
+// NewWithT() does not have this limitation
//
// (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*).
func RegisterTestingT(t types.GomegaTestingT) {
@@ -95,15 +89,15 @@ func RegisterTestingT(t types.GomegaTestingT) {
RegisterFailHandlerWithT(tWithHelper, testingtsupport.BuildTestingTGomegaFailWrapper(t).Fail)
}
-//InterceptGomegaHandlers runs a given callback and returns an array of
-//failure messages generated by any Gomega assertions within the callback.
+// InterceptGomegaFailures runs a given callback and returns an array of
+// failure messages generated by any Gomega assertions within the callback.
//
-//This is accomplished by temporarily replacing the *global* fail handler
-//with a fail handler that simply annotates failures. The original fail handler
-//is reset when InterceptGomegaFailures returns.
+// This is accomplished by temporarily replacing the *global* fail handler
+// with a fail handler that simply annotates failures. The original fail handler
+// is reset when InterceptGomegaFailures returns.
//
-//This is most useful when testing custom matchers, but can also be used to check
-//on a value using a Gomega assertion without causing a test failure.
+// This is most useful when testing custom matchers, but can also be used to check
+// on a value using a Gomega assertion without causing a test failure.
func InterceptGomegaFailures(f func()) []string {
originalHandler := globalFailWrapper.Fail
failures := []string{}
@@ -115,108 +109,108 @@ func InterceptGomegaFailures(f func()) []string {
return failures
}
-//Ω wraps an actual value allowing assertions to be made on it:
+// Ω wraps an actual value allowing assertions to be made on it:
// Ω("foo").Should(Equal("foo"))
//
-//If Ω is passed more than one argument it will pass the *first* argument to the matcher.
-//All subsequent arguments will be required to be nil/zero.
+// If Ω is passed more than one argument it will pass the *first* argument to the matcher.
+// All subsequent arguments will be required to be nil/zero.
//
-//This is convenient if you want to make an assertion on a method/function that returns
-//a value and an error - a common patter in Go.
+// This is convenient if you want to make an assertion on a method/function that returns
+// a value and an error - a common patter in Go.
//
-//For example, given a function with signature:
-// func MyAmazingThing() (int, error)
+// For example, given a function with signature:
+// func MyAmazingThing() (int, error)
//
-//Then:
+// Then:
// Ω(MyAmazingThing()).Should(Equal(3))
-//Will succeed only if `MyAmazingThing()` returns `(3, nil)`
+// Will succeed only if `MyAmazingThing()` returns `(3, nil)`
//
-//Ω and Expect are identical
-func Ω(actual interface{}, extra ...interface{}) GomegaAssertion {
+// Ω and Expect are identical
+func Ω(actual interface{}, extra ...interface{}) Assertion {
return ExpectWithOffset(0, actual, extra...)
}
-//Expect wraps an actual value allowing assertions to be made on it:
+// Expect wraps an actual value allowing assertions to be made on it:
// Expect("foo").To(Equal("foo"))
//
-//If Expect is passed more than one argument it will pass the *first* argument to the matcher.
-//All subsequent arguments will be required to be nil/zero.
+// If Expect is passed more than one argument it will pass the *first* argument to the matcher.
+// All subsequent arguments will be required to be nil/zero.
//
-//This is convenient if you want to make an assertion on a method/function that returns
-//a value and an error - a common patter in Go.
+// This is convenient if you want to make an assertion on a method/function that returns
+// a value and an error - a common patter in Go.
//
-//For example, given a function with signature:
-// func MyAmazingThing() (int, error)
+// For example, given a function with signature:
+// func MyAmazingThing() (int, error)
//
-//Then:
+// Then:
// Expect(MyAmazingThing()).Should(Equal(3))
-//Will succeed only if `MyAmazingThing()` returns `(3, nil)`
+// Will succeed only if `MyAmazingThing()` returns `(3, nil)`
//
-//Expect and Ω are identical
-func Expect(actual interface{}, extra ...interface{}) GomegaAssertion {
+// Expect and Ω are identical
+func Expect(actual interface{}, extra ...interface{}) Assertion {
return ExpectWithOffset(0, actual, extra...)
}
-//ExpectWithOffset wraps an actual value allowing assertions to be made on it:
+// ExpectWithOffset wraps an actual value allowing assertions to be made on it:
// ExpectWithOffset(1, "foo").To(Equal("foo"))
//
-//Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument
-//this is used to modify the call-stack offset when computing line numbers.
+// Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument
+// this is used to modify the call-stack offset when computing line numbers.
//
-//This is most useful in helper functions that make assertions. If you want Gomega's
-//error message to refer to the calling line in the test (as opposed to the line in the helper function)
-//set the first argument of `ExpectWithOffset` appropriately.
-func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) GomegaAssertion {
+// This is most useful in helper functions that make assertions. If you want Gomega's
+// error message to refer to the calling line in the test (as opposed to the line in the helper function)
+// set the first argument of `ExpectWithOffset` appropriately.
+func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion {
if globalFailWrapper == nil {
panic(nilFailHandlerPanic)
}
return assertion.New(actual, globalFailWrapper, offset, extra...)
}
-//Eventually wraps an actual value allowing assertions to be made on it.
-//The assertion is tried periodically until it passes or a timeout occurs.
+// Eventually wraps an actual value allowing assertions to be made on it.
+// The assertion is tried periodically until it passes or a timeout occurs.
//
-//Both the timeout and polling interval are configurable as optional arguments:
-//The first optional argument is the timeout
-//The second optional argument is the polling interval
+// Both the timeout and polling interval are configurable as optional arguments:
+// The first optional argument is the timeout
+// The second optional argument is the polling interval
//
-//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the
-//last case they are interpreted as seconds.
+// Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the
+// last case they are interpreted as seconds.
//
-//If Eventually is passed an actual that is a function taking no arguments and returning at least one value,
-//then Eventually will call the function periodically and try the matcher against the function's first return value.
+// If Eventually is passed an actual that is a function taking no arguments and returning at least one value,
+// then Eventually will call the function periodically and try the matcher against the function's first return value.
//
-//Example:
+// Example:
//
// Eventually(func() int {
// return thingImPolling.Count()
// }).Should(BeNumerically(">=", 17))
//
-//Note that this example could be rewritten:
+// Note that this example could be rewritten:
//
// Eventually(thingImPolling.Count).Should(BeNumerically(">=", 17))
//
-//If the function returns more than one value, then Eventually will pass the first value to the matcher and
-//assert that all other values are nil/zero.
-//This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go.
+// If the function returns more than one value, then Eventually will pass the first value to the matcher and
+// assert that all other values are nil/zero.
+// This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go.
//
-//For example, consider a method that returns a value and an error:
+// For example, consider a method that returns a value and an error:
// func FetchFromDB() (string, error)
//
-//Then
+// Then
// Eventually(FetchFromDB).Should(Equal("hasselhoff"))
//
-//Will pass only if the the returned error is nil and the returned string passes the matcher.
+// Will pass only if the the returned error is nil and the returned string passes the matcher.
//
-//Eventually's default timeout is 1 second, and its default polling interval is 10ms
-func Eventually(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
+// Eventually's default timeout is 1 second, and its default polling interval is 10ms
+func Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion {
return EventuallyWithOffset(0, actual, intervals...)
}
-//EventuallyWithOffset operates like Eventually but takes an additional
-//initial argument to indicate an offset in the call stack. This is useful when building helper
-//functions that contain matchers. To learn more, read about `ExpectWithOffset`.
-func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
+// EventuallyWithOffset operates like Eventually but takes an additional
+// initial argument to indicate an offset in the call stack. This is useful when building helper
+// functions that contain matchers. To learn more, read about `ExpectWithOffset`.
+func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion {
if globalFailWrapper == nil {
panic(nilFailHandlerPanic)
}
@@ -231,37 +225,37 @@ func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface
return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, globalFailWrapper, timeoutInterval, pollingInterval, offset)
}
-//Consistently wraps an actual value allowing assertions to be made on it.
-//The assertion is tried periodically and is required to pass for a period of time.
+// Consistently wraps an actual value allowing assertions to be made on it.
+// The assertion is tried periodically and is required to pass for a period of time.
//
-//Both the total time and polling interval are configurable as optional arguments:
-//The first optional argument is the duration that Consistently will run for
-//The second optional argument is the polling interval
+// Both the total time and polling interval are configurable as optional arguments:
+// The first optional argument is the duration that Consistently will run for
+// The second optional argument is the polling interval
//
-//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the
-//last case they are interpreted as seconds.
+// Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the
+// last case they are interpreted as seconds.
//
-//If Consistently is passed an actual that is a function taking no arguments and returning at least one value,
-//then Consistently will call the function periodically and try the matcher against the function's first return value.
+// If Consistently is passed an actual that is a function taking no arguments and returning at least one value,
+// then Consistently will call the function periodically and try the matcher against the function's first return value.
//
-//If the function returns more than one value, then Consistently will pass the first value to the matcher and
-//assert that all other values are nil/zero.
-//This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go.
+// If the function returns more than one value, then Consistently will pass the first value to the matcher and
+// assert that all other values are nil/zero.
+// This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go.
//
-//Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem.
-//For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could:
+// Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem.
+// For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could:
//
-// Consistently(channel).ShouldNot(Receive())
+// Consistently(channel).ShouldNot(Receive())
//
-//Consistently's default duration is 100ms, and its default polling interval is 10ms
-func Consistently(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
+// Consistently's default duration is 100ms, and its default polling interval is 10ms
+func Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion {
return ConsistentlyWithOffset(0, actual, intervals...)
}
-//ConsistentlyWithOffset operates like Consistnetly but takes an additional
-//initial argument to indicate an offset in the call stack. This is useful when building helper
-//functions that contain matchers. To learn more, read about `ExpectWithOffset`.
-func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
+// ConsistentlyWithOffset operates like Consistnetly but takes an additional
+// initial argument to indicate an offset in the call stack. This is useful when building helper
+// functions that contain matchers. To learn more, read about `ExpectWithOffset`.
+func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion {
if globalFailWrapper == nil {
panic(nilFailHandlerPanic)
}
@@ -276,59 +270,62 @@ func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interfa
return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, globalFailWrapper, timeoutInterval, pollingInterval, offset)
}
-//Set the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses.
+// SetDefaultEventuallyTimeout sets the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses.
func SetDefaultEventuallyTimeout(t time.Duration) {
defaultEventuallyTimeout = t
}
-//Set the default polling interval for Eventually.
+// SetDefaultEventuallyPollingInterval sets the default polling interval for Eventually.
func SetDefaultEventuallyPollingInterval(t time.Duration) {
defaultEventuallyPollingInterval = t
}
-//Set the default duration for Consistently. Consistently will verify that your condition is satsified for this long.
+// SetDefaultConsistentlyDuration sets the default duration for Consistently. Consistently will verify that your condition is satsified for this long.
func SetDefaultConsistentlyDuration(t time.Duration) {
defaultConsistentlyDuration = t
}
-//Set the default polling interval for Consistently.
+// SetDefaultConsistentlyPollingInterval sets the default polling interval for Consistently.
func SetDefaultConsistentlyPollingInterval(t time.Duration) {
defaultConsistentlyPollingInterval = t
}
-//GomegaAsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against
-//the matcher passed to the Should and ShouldNot methods.
+// AsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against
+// the matcher passed to the Should and ShouldNot methods.
//
-//Both Should and ShouldNot take a variadic optionalDescription argument. This is passed on to
-//fmt.Sprintf() and is used to annotate failure messages. This allows you to make your failure messages more
-//descriptive
+// Both Should and ShouldNot take a variadic optionalDescription argument. This is passed on to
+// fmt.Sprintf() and is used to annotate failure messages. This allows you to make your failure messages more
+// descriptive.
//
-//Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed.
+// Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed.
//
-//Example:
+// Example:
//
-// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.")
-// Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.")
-type GomegaAsyncAssertion interface {
+// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.")
+// Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.")
+type AsyncAssertion interface {
Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
}
-//GomegaAssertion is returned by Ω and Expect and compares the actual value to the matcher
-//passed to the Should/ShouldNot and To/ToNot/NotTo methods.
+// GomegaAsyncAssertion is deprecated in favor of AsyncAssertion, which does not stutter.
+type GomegaAsyncAssertion = AsyncAssertion
+
+// Assertion is returned by Ω and Expect and compares the actual value to the matcher
+// passed to the Should/ShouldNot and To/ToNot/NotTo methods.
//
-//Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect
-//though this is not enforced.
+// Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect
+// though this is not enforced.
//
-//All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf()
-//and is used to annotate failure messages.
+// All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf()
+// and is used to annotate failure messages.
//
-//All methods return a bool that is true if hte assertion passed and false if it failed.
+// All methods return a bool that is true if hte assertion passed and false if it failed.
//
-//Example:
+// Example:
//
-// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm)
-type GomegaAssertion interface {
+// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm)
+type Assertion interface {
Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
@@ -337,39 +334,50 @@ type GomegaAssertion interface {
NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
}
-//OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it
+// GomegaAssertion is deprecated in favor of Assertion, which does not stutter.
+type GomegaAssertion = Assertion
+
+// OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it
type OmegaMatcher types.GomegaMatcher
-//GomegaWithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage
-//Gomega's rich ecosystem of matchers in standard `testing` test suites.
+// WithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage
+// Gomega's rich ecosystem of matchers in standard `testing` test suites.
//
-//Use `NewGomegaWithT` to instantiate a `GomegaWithT`
-type GomegaWithT struct {
+// Use `NewWithT` to instantiate a `WithT`
+type WithT struct {
t types.GomegaTestingT
}
-//NewGomegaWithT takes a *testing.T and returngs a `GomegaWithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with
-//Gomega's rich ecosystem of matchers in standard `testing` test suits.
+// GomegaWithT is deprecated in favor of gomega.WithT, which does not stutter.
+type GomegaWithT = WithT
+
+// NewWithT takes a *testing.T and returngs a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with
+// Gomega's rich ecosystem of matchers in standard `testing` test suits.
//
// func TestFarmHasCow(t *testing.T) {
-// g := GomegaWithT(t)
+// g := gomega.NewWithT(t)
//
// f := farm.New([]string{"Cow", "Horse"})
// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow")
// }
-func NewGomegaWithT(t types.GomegaTestingT) *GomegaWithT {
- return &GomegaWithT{
+func NewWithT(t types.GomegaTestingT) *WithT {
+ return &WithT{
t: t,
}
}
-//See documentation for Expect
-func (g *GomegaWithT) Expect(actual interface{}, extra ...interface{}) GomegaAssertion {
+// NewGomegaWithT is deprecated in favor of gomega.NewWithT, which does not stutter.
+func NewGomegaWithT(t types.GomegaTestingT) *GomegaWithT {
+ return NewWithT(t)
+}
+
+// Expect is used to make assertions. See documentation for Expect.
+func (g *WithT) Expect(actual interface{}, extra ...interface{}) Assertion {
return assertion.New(actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), 0, extra...)
}
-//See documentation for Eventually
-func (g *GomegaWithT) Eventually(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
+// Eventually is used to make asynchronous assertions. See documentation for Eventually.
+func (g *WithT) Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion {
timeoutInterval := defaultEventuallyTimeout
pollingInterval := defaultEventuallyPollingInterval
if len(intervals) > 0 {
@@ -381,8 +389,8 @@ func (g *GomegaWithT) Eventually(actual interface{}, intervals ...interface{}) G
return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, 0)
}
-//See documentation for Consistently
-func (g *GomegaWithT) Consistently(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
+// Consistently is used to make asynchronous assertions. See documentation for Consistently.
+func (g *WithT) Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion {
timeoutInterval := defaultConsistentlyDuration
pollingInterval := defaultConsistentlyPollingInterval
if len(intervals) > 0 {
diff --git a/vendor/github.com/onsi/gomega/gstruct/elements.go b/vendor/github.com/onsi/gomega/gstruct/elements.go
deleted file mode 100644
index 13bf5b895..000000000
--- a/vendor/github.com/onsi/gomega/gstruct/elements.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package gstruct
-
-import (
- "errors"
- "fmt"
- "reflect"
- "runtime/debug"
-
- "github.com/onsi/gomega/format"
- errorsutil "github.com/onsi/gomega/gstruct/errors"
- "github.com/onsi/gomega/types"
-)
-
-//MatchAllElements succeeds if every element of a slice matches the element matcher it maps to
-//through the id function, and every element matcher is matched.
-// idFn := func(element interface{}) string {
-// return fmt.Sprintf("%v", element)
-// }
-//
-// Expect([]string{"a", "b"}).To(MatchAllElements(idFn, Elements{
-// "a": Equal("a"),
-// "b": Equal("b"),
-// }))
-func MatchAllElements(identifier Identifier, elements Elements) types.GomegaMatcher {
- return &ElementsMatcher{
- Identifier: identifier,
- Elements: elements,
- }
-}
-
-//MatchElements succeeds if each element of a slice matches the element matcher it maps to
-//through the id function. It can ignore extra elements and/or missing elements.
-// idFn := func(element interface{}) string {
-// return fmt.Sprintf("%v", element)
-// }
-//
-// Expect([]string{"a", "b", "c"}).To(MatchElements(idFn, IgnoreExtras, Elements{
-// "a": Equal("a"),
-// "b": Equal("b"),
-// }))
-// Expect([]string{"a", "c"}).To(MatchElements(idFn, IgnoreMissing, Elements{
-// "a": Equal("a"),
-// "b": Equal("b"),
-// "c": Equal("c"),
-// "d": Equal("d"),
-// }))
-func MatchElements(identifier Identifier, options Options, elements Elements) types.GomegaMatcher {
- return &ElementsMatcher{
- Identifier: identifier,
- Elements: elements,
- IgnoreExtras: options&IgnoreExtras != 0,
- IgnoreMissing: options&IgnoreMissing != 0,
- AllowDuplicates: options&AllowDuplicates != 0,
- }
-}
-
-// ElementsMatcher is a NestingMatcher that applies custom matchers to each element of a slice mapped
-// by the Identifier function.
-// TODO: Extend this to work with arrays & maps (map the key) as well.
-type ElementsMatcher struct {
- // Matchers for each element.
- Elements Elements
- // Function mapping an element to the string key identifying its matcher.
- Identifier Identifier
-
- // Whether to ignore extra elements or consider it an error.
- IgnoreExtras bool
- // Whether to ignore missing elements or consider it an error.
- IgnoreMissing bool
- // Whether to key duplicates when matching IDs.
- AllowDuplicates bool
-
- // State.
- failures []error
-}
-
-// Element ID to matcher.
-type Elements map[string]types.GomegaMatcher
-
-// Function for identifying (mapping) elements.
-type Identifier func(element interface{}) string
-
-func (m *ElementsMatcher) Match(actual interface{}) (success bool, err error) {
- if reflect.TypeOf(actual).Kind() != reflect.Slice {
- return false, fmt.Errorf("%v is type %T, expected slice", actual, actual)
- }
-
- m.failures = m.matchElements(actual)
- if len(m.failures) > 0 {
- return false, nil
- }
- return true, nil
-}
-
-func (m *ElementsMatcher) matchElements(actual interface{}) (errs []error) {
- // Provide more useful error messages in the case of a panic.
- defer func() {
- if err := recover(); err != nil {
- errs = append(errs, fmt.Errorf("panic checking %+v: %v\n%s", actual, err, debug.Stack()))
- }
- }()
-
- val := reflect.ValueOf(actual)
- elements := map[string]bool{}
- for i := 0; i < val.Len(); i++ {
- element := val.Index(i).Interface()
- id := m.Identifier(element)
- if elements[id] {
- if !m.AllowDuplicates {
- errs = append(errs, fmt.Errorf("found duplicate element ID %s", id))
- continue
- }
- }
- elements[id] = true
-
- matcher, expected := m.Elements[id]
- if !expected {
- if !m.IgnoreExtras {
- errs = append(errs, fmt.Errorf("unexpected element %s", id))
- }
- continue
- }
-
- match, err := matcher.Match(element)
- if match {
- continue
- }
-
- if err == nil {
- if nesting, ok := matcher.(errorsutil.NestingMatcher); ok {
- err = errorsutil.AggregateError(nesting.Failures())
- } else {
- err = errors.New(matcher.FailureMessage(element))
- }
- }
- errs = append(errs, errorsutil.Nest(fmt.Sprintf("[%s]", id), err))
- }
-
- for id := range m.Elements {
- if !elements[id] && !m.IgnoreMissing {
- errs = append(errs, fmt.Errorf("missing expected element %s", id))
- }
- }
-
- return errs
-}
-
-func (m *ElementsMatcher) FailureMessage(actual interface{}) (message string) {
- failure := errorsutil.AggregateError(m.failures)
- return format.Message(actual, fmt.Sprintf("to match elements: %v", failure))
-}
-
-func (m *ElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return format.Message(actual, "not to match elements")
-}
-
-func (m *ElementsMatcher) Failures() []error {
- return m.failures
-}
diff --git a/vendor/github.com/onsi/gomega/gstruct/elements_test.go b/vendor/github.com/onsi/gomega/gstruct/elements_test.go
deleted file mode 100644
index 355d463eb..000000000
--- a/vendor/github.com/onsi/gomega/gstruct/elements_test.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package gstruct_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/gstruct"
-)
-
-var _ = Describe("Slice", func() {
- allElements := []string{"a", "b"}
- missingElements := []string{"a"}
- extraElements := []string{"a", "b", "c"}
- duplicateElements := []string{"a", "a", "b"}
- empty := []string{}
- var nils []string
-
- It("should strictly match all elements", func() {
- m := MatchAllElements(id, Elements{
- "b": Equal("b"),
- "a": Equal("a"),
- })
- Expect(allElements).Should(m, "should match all elements")
- Expect(missingElements).ShouldNot(m, "should fail with missing elements")
- Expect(extraElements).ShouldNot(m, "should fail with extra elements")
- Expect(duplicateElements).ShouldNot(m, "should fail with duplicate elements")
- Expect(nils).ShouldNot(m, "should fail with an uninitialized slice")
-
- m = MatchAllElements(id, Elements{
- "a": Equal("a"),
- "b": Equal("fail"),
- })
- Expect(allElements).ShouldNot(m, "should run nested matchers")
-
- m = MatchAllElements(id, Elements{})
- Expect(empty).Should(m, "should handle empty slices")
- Expect(allElements).ShouldNot(m, "should handle only empty slices")
- Expect(nils).Should(m, "should handle nil slices")
- })
-
- It("should ignore extra elements", func() {
- m := MatchElements(id, IgnoreExtras, Elements{
- "b": Equal("b"),
- "a": Equal("a"),
- })
- Expect(allElements).Should(m, "should match all elements")
- Expect(missingElements).ShouldNot(m, "should fail with missing elements")
- Expect(extraElements).Should(m, "should ignore extra elements")
- Expect(duplicateElements).ShouldNot(m, "should fail with duplicate elements")
- Expect(nils).ShouldNot(m, "should fail with an uninitialized slice")
- })
-
- It("should ignore missing elements", func() {
- m := MatchElements(id, IgnoreMissing, Elements{
- "a": Equal("a"),
- "b": Equal("b"),
- })
- Expect(allElements).Should(m, "should match all elements")
- Expect(missingElements).Should(m, "should ignore missing elements")
- Expect(extraElements).ShouldNot(m, "should fail with extra elements")
- Expect(duplicateElements).ShouldNot(m, "should fail with duplicate elements")
- Expect(nils).Should(m, "should ignore an uninitialized slice")
- })
-
- It("should ignore missing and extra elements", func() {
- m := MatchElements(id, IgnoreMissing|IgnoreExtras, Elements{
- "a": Equal("a"),
- "b": Equal("b"),
- })
- Expect(allElements).Should(m, "should match all elements")
- Expect(missingElements).Should(m, "should ignore missing elements")
- Expect(extraElements).Should(m, "should ignore extra elements")
- Expect(duplicateElements).ShouldNot(m, "should fail with duplicate elements")
- Expect(nils).Should(m, "should ignore an uninitialized slice")
-
- m = MatchElements(id, IgnoreExtras|IgnoreMissing, Elements{
- "a": Equal("a"),
- "b": Equal("fail"),
- })
- Expect(allElements).ShouldNot(m, "should run nested matchers")
- })
-
- Context("with elements that share a key", func() {
- nonUniqueID := func(element interface{}) string {
- return element.(string)[0:1]
- }
-
- allElements := []string{"a123", "a213", "b321"}
- includingBadElements := []string{"a123", "b123", "b5555"}
- extraElements := []string{"a123", "b1234", "c345"}
- missingElements := []string{"b123", "b1234", "b1345"}
-
- It("should strictly allow multiple matches", func() {
- m := MatchElements(nonUniqueID, AllowDuplicates, Elements{
- "a": ContainSubstring("1"),
- "b": ContainSubstring("1"),
- })
- Expect(allElements).Should(m, "should match all elements")
- Expect(includingBadElements).ShouldNot(m, "should reject if a member fails the matcher")
- Expect(extraElements).ShouldNot(m, "should reject with extra keys")
- Expect(missingElements).ShouldNot(m, "should reject with missing keys")
- Expect(nils).ShouldNot(m, "should fail with an uninitialized slice")
- })
-
- It("should ignore missing", func() {
- m := MatchElements(nonUniqueID, AllowDuplicates|IgnoreMissing, Elements{
- "a": ContainSubstring("1"),
- "b": ContainSubstring("1"),
- })
- Expect(allElements).Should(m, "should match all elements")
- Expect(includingBadElements).ShouldNot(m, "should reject if a member fails the matcher")
- Expect(extraElements).ShouldNot(m, "should reject with extra keys")
- Expect(missingElements).Should(m, "should allow missing keys")
- Expect(nils).Should(m, "should allow an uninitialized slice")
- })
-
- It("should ignore extras", func() {
- m := MatchElements(nonUniqueID, AllowDuplicates|IgnoreExtras, Elements{
- "a": ContainSubstring("1"),
- "b": ContainSubstring("1"),
- })
- Expect(allElements).Should(m, "should match all elements")
- Expect(includingBadElements).ShouldNot(m, "should reject if a member fails the matcher")
- Expect(extraElements).Should(m, "should allow extra keys")
- Expect(missingElements).ShouldNot(m, "should reject missing keys")
- Expect(nils).ShouldNot(m, "should reject an uninitialized slice")
- })
-
- It("should ignore missing and extras", func() {
- m := MatchElements(nonUniqueID, AllowDuplicates|IgnoreExtras|IgnoreMissing, Elements{
- "a": ContainSubstring("1"),
- "b": ContainSubstring("1"),
- })
- Expect(allElements).Should(m, "should match all elements")
- Expect(includingBadElements).ShouldNot(m, "should reject if a member fails the matcher")
- Expect(extraElements).Should(m, "should allow extra keys")
- Expect(missingElements).Should(m, "should allow missing keys")
- Expect(nils).Should(m, "should allow an uninitialized slice")
- })
- })
-})
-
-func id(element interface{}) string {
- return element.(string)
-}
diff --git a/vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go b/vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go
deleted file mode 100644
index 188492b21..000000000
--- a/vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package errors
-
-import (
- "fmt"
- "strings"
-
- "github.com/onsi/gomega/types"
-)
-
-// A stateful matcher that nests other matchers within it and preserves the error types of the
-// nested matcher failures.
-type NestingMatcher interface {
- types.GomegaMatcher
-
- // Returns the failures of nested matchers.
- Failures() []error
-}
-
-// An error type for labeling errors on deeply nested matchers.
-type NestedError struct {
- Path string
- Err error
-}
-
-func (e *NestedError) Error() string {
- // Indent Errors.
- indented := strings.Replace(e.Err.Error(), "\n", "\n\t", -1)
- return fmt.Sprintf("%s:\n\t%v", e.Path, indented)
-}
-
-// Create a NestedError with the given path.
-// If err is a NestedError, prepend the path to it.
-// If err is an AggregateError, recursively Nest each error.
-func Nest(path string, err error) error {
- if ag, ok := err.(AggregateError); ok {
- var errs AggregateError
- for _, e := range ag {
- errs = append(errs, Nest(path, e))
- }
- return errs
- }
- if ne, ok := err.(*NestedError); ok {
- return &NestedError{
- Path: path + ne.Path,
- Err: ne.Err,
- }
- }
- return &NestedError{
- Path: path,
- Err: err,
- }
-}
-
-// An error type for treating multiple errors as a single error.
-type AggregateError []error
-
-// Error is part of the error interface.
-func (err AggregateError) Error() string {
- if len(err) == 0 {
- // This should never happen, really.
- return ""
- }
- if len(err) == 1 {
- return err[0].Error()
- }
- result := fmt.Sprintf("[%s", err[0].Error())
- for i := 1; i < len(err); i++ {
- result += fmt.Sprintf(", %s", err[i].Error())
- }
- result += "]"
- return result
-}
diff --git a/vendor/github.com/onsi/gomega/gstruct/fields.go b/vendor/github.com/onsi/gomega/gstruct/fields.go
deleted file mode 100644
index 2eb2d0887..000000000
--- a/vendor/github.com/onsi/gomega/gstruct/fields.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package gstruct
-
-import (
- "errors"
- "fmt"
- "reflect"
- "runtime/debug"
- "strings"
-
- "github.com/onsi/gomega/format"
- errorsutil "github.com/onsi/gomega/gstruct/errors"
- "github.com/onsi/gomega/types"
-)
-
-//MatchAllFields succeeds if every field of a struct matches the field matcher associated with
-//it, and every element matcher is matched.
-// actual := struct{
-// A int
-// B []bool
-// C string
-// }{
-// A: 5,
-// B: []bool{true, false},
-// C: "foo",
-// }
-//
-// Expect(actual).To(MatchAllFields(Fields{
-// "A": Equal(5),
-// "B": ConsistOf(true, false),
-// "C": Equal("foo"),
-// }))
-func MatchAllFields(fields Fields) types.GomegaMatcher {
- return &FieldsMatcher{
- Fields: fields,
- }
-}
-
-//MatchFields succeeds if each element of a struct matches the field matcher associated with
-//it. It can ignore extra fields and/or missing fields.
-// actual := struct{
-// A int
-// B []bool
-// C string
-// }{
-// A: 5,
-// B: []bool{true, false},
-// C: "foo",
-// }
-//
-// Expect(actual).To(MatchFields(IgnoreExtras, Fields{
-// "A": Equal(5),
-// "B": ConsistOf(true, false),
-// }))
-// Expect(actual).To(MatchFields(IgnoreMissing, Fields{
-// "A": Equal(5),
-// "B": ConsistOf(true, false),
-// "C": Equal("foo"),
-// "D": Equal("extra"),
-// }))
-func MatchFields(options Options, fields Fields) types.GomegaMatcher {
- return &FieldsMatcher{
- Fields: fields,
- IgnoreExtras: options&IgnoreExtras != 0,
- IgnoreMissing: options&IgnoreMissing != 0,
- }
-}
-
-type FieldsMatcher struct {
- // Matchers for each field.
- Fields Fields
-
- // Whether to ignore extra elements or consider it an error.
- IgnoreExtras bool
- // Whether to ignore missing elements or consider it an error.
- IgnoreMissing bool
-
- // State.
- failures []error
-}
-
-// Field name to matcher.
-type Fields map[string]types.GomegaMatcher
-
-func (m *FieldsMatcher) Match(actual interface{}) (success bool, err error) {
- if reflect.TypeOf(actual).Kind() != reflect.Struct {
- return false, fmt.Errorf("%v is type %T, expected struct", actual, actual)
- }
-
- m.failures = m.matchFields(actual)
- if len(m.failures) > 0 {
- return false, nil
- }
- return true, nil
-}
-
-func (m *FieldsMatcher) matchFields(actual interface{}) (errs []error) {
- val := reflect.ValueOf(actual)
- typ := val.Type()
- fields := map[string]bool{}
- for i := 0; i < val.NumField(); i++ {
- fieldName := typ.Field(i).Name
- fields[fieldName] = true
-
- err := func() (err error) {
- // This test relies heavily on reflect, which tends to panic.
- // Recover here to provide more useful error messages in that case.
- defer func() {
- if r := recover(); r != nil {
- err = fmt.Errorf("panic checking %+v: %v\n%s", actual, r, debug.Stack())
- }
- }()
-
- matcher, expected := m.Fields[fieldName]
- if !expected {
- if !m.IgnoreExtras {
- return fmt.Errorf("unexpected field %s: %+v", fieldName, actual)
- }
- return nil
- }
-
- var field interface{}
- if val.Field(i).IsValid() {
- field = val.Field(i).Interface()
- } else {
- field = reflect.Zero(typ.Field(i).Type)
- }
-
- match, err := matcher.Match(field)
- if err != nil {
- return err
- } else if !match {
- if nesting, ok := matcher.(errorsutil.NestingMatcher); ok {
- return errorsutil.AggregateError(nesting.Failures())
- }
- return errors.New(matcher.FailureMessage(field))
- }
- return nil
- }()
- if err != nil {
- errs = append(errs, errorsutil.Nest("."+fieldName, err))
- }
- }
-
- for field := range m.Fields {
- if !fields[field] && !m.IgnoreMissing {
- errs = append(errs, fmt.Errorf("missing expected field %s", field))
- }
- }
-
- return errs
-}
-
-func (m *FieldsMatcher) FailureMessage(actual interface{}) (message string) {
- failures := make([]string, len(m.failures))
- for i := range m.failures {
- failures[i] = m.failures[i].Error()
- }
- return format.Message(reflect.TypeOf(actual).Name(),
- fmt.Sprintf("to match fields: {\n%v\n}\n", strings.Join(failures, "\n")))
-}
-
-func (m *FieldsMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return format.Message(actual, "not to match fields")
-}
-
-func (m *FieldsMatcher) Failures() []error {
- return m.failures
-}
diff --git a/vendor/github.com/onsi/gomega/gstruct/fields_test.go b/vendor/github.com/onsi/gomega/gstruct/fields_test.go
deleted file mode 100644
index e4e039e21..000000000
--- a/vendor/github.com/onsi/gomega/gstruct/fields_test.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package gstruct_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/gstruct"
-)
-
-var _ = Describe("Struct", func() {
- allFields := struct{ A, B string }{"a", "b"}
- missingFields := struct{ A string }{"a"}
- extraFields := struct{ A, B, C string }{"a", "b", "c"}
- emptyFields := struct{ A, B string }{}
-
- It("should strictly match all fields", func() {
- m := MatchAllFields(Fields{
- "B": Equal("b"),
- "A": Equal("a"),
- })
- Expect(allFields).Should(m, "should match all fields")
- Expect(missingFields).ShouldNot(m, "should fail with missing fields")
- Expect(extraFields).ShouldNot(m, "should fail with extra fields")
- Expect(emptyFields).ShouldNot(m, "should fail with empty fields")
-
- m = MatchAllFields(Fields{
- "A": Equal("a"),
- "B": Equal("fail"),
- })
- Expect(allFields).ShouldNot(m, "should run nested matchers")
- })
-
- It("should handle empty structs", func() {
- m := MatchAllFields(Fields{})
- Expect(struct{}{}).Should(m, "should handle empty structs")
- Expect(allFields).ShouldNot(m, "should fail with extra fields")
- })
-
- It("should ignore missing fields", func() {
- m := MatchFields(IgnoreMissing, Fields{
- "B": Equal("b"),
- "A": Equal("a"),
- })
- Expect(allFields).Should(m, "should match all fields")
- Expect(missingFields).Should(m, "should ignore missing fields")
- Expect(extraFields).ShouldNot(m, "should fail with extra fields")
- Expect(emptyFields).ShouldNot(m, "should fail with empty fields")
- })
-
- It("should ignore extra fields", func() {
- m := MatchFields(IgnoreExtras, Fields{
- "B": Equal("b"),
- "A": Equal("a"),
- })
- Expect(allFields).Should(m, "should match all fields")
- Expect(missingFields).ShouldNot(m, "should fail with missing fields")
- Expect(extraFields).Should(m, "should ignore extra fields")
- Expect(emptyFields).ShouldNot(m, "should fail with empty fields")
- })
-
- It("should ignore missing and extra fields", func() {
- m := MatchFields(IgnoreMissing|IgnoreExtras, Fields{
- "B": Equal("b"),
- "A": Equal("a"),
- })
- Expect(allFields).Should(m, "should match all fields")
- Expect(missingFields).Should(m, "should ignore missing fields")
- Expect(extraFields).Should(m, "should ignore extra fields")
- Expect(emptyFields).ShouldNot(m, "should fail with empty fields")
-
- m = MatchFields(IgnoreMissing|IgnoreExtras, Fields{
- "A": Equal("a"),
- "B": Equal("fail"),
- })
- Expect(allFields).ShouldNot(m, "should run nested matchers")
- })
-})
diff --git a/vendor/github.com/onsi/gomega/gstruct/gstruct_tests_suite_test.go b/vendor/github.com/onsi/gomega/gstruct/gstruct_tests_suite_test.go
deleted file mode 100644
index d47566304..000000000
--- a/vendor/github.com/onsi/gomega/gstruct/gstruct_tests_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package gstruct_test
-
-import (
- "testing"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-func Test(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Gstruct Suite")
-}
diff --git a/vendor/github.com/onsi/gomega/gstruct/ignore.go b/vendor/github.com/onsi/gomega/gstruct/ignore.go
deleted file mode 100644
index 0365f32ad..000000000
--- a/vendor/github.com/onsi/gomega/gstruct/ignore.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package gstruct
-
-import (
- "github.com/onsi/gomega/types"
-)
-
-//Ignore ignores the actual value and always succeeds.
-// Expect(nil).To(Ignore())
-// Expect(true).To(Ignore())
-func Ignore() types.GomegaMatcher {
- return &IgnoreMatcher{true}
-}
-
-//Reject ignores the actual value and always fails. It can be used in conjunction with IgnoreMissing
-//to catch problematic elements, or to verify tests are running.
-// Expect(nil).NotTo(Reject())
-// Expect(true).NotTo(Reject())
-func Reject() types.GomegaMatcher {
- return &IgnoreMatcher{false}
-}
-
-// A matcher that either always succeeds or always fails.
-type IgnoreMatcher struct {
- Succeed bool
-}
-
-func (m *IgnoreMatcher) Match(actual interface{}) (bool, error) {
- return m.Succeed, nil
-}
-
-func (m *IgnoreMatcher) FailureMessage(_ interface{}) (message string) {
- return "Unconditional failure"
-}
-
-func (m *IgnoreMatcher) NegatedFailureMessage(_ interface{}) (message string) {
- return "Unconditional success"
-}
diff --git a/vendor/github.com/onsi/gomega/gstruct/ignore_test.go b/vendor/github.com/onsi/gomega/gstruct/ignore_test.go
deleted file mode 100644
index 07775e742..000000000
--- a/vendor/github.com/onsi/gomega/gstruct/ignore_test.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package gstruct_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/gstruct"
-)
-
-var _ = Describe("Ignore", func() {
- It("should always succeed", func() {
- Expect(nil).Should(Ignore())
- Expect(struct{}{}).Should(Ignore())
- Expect(0).Should(Ignore())
- Expect(false).Should(Ignore())
- })
-
- It("should always fail", func() {
- Expect(nil).ShouldNot(Reject())
- Expect(struct{}{}).ShouldNot(Reject())
- Expect(1).ShouldNot(Reject())
- Expect(true).ShouldNot(Reject())
- })
-})
diff --git a/vendor/github.com/onsi/gomega/gstruct/pointer.go b/vendor/github.com/onsi/gomega/gstruct/pointer.go
deleted file mode 100644
index 0a2f35de3..000000000
--- a/vendor/github.com/onsi/gomega/gstruct/pointer.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package gstruct
-
-import (
- "fmt"
- "reflect"
-
- "github.com/onsi/gomega/format"
- "github.com/onsi/gomega/types"
-)
-
-//PointTo applies the given matcher to the value pointed to by actual. It fails if the pointer is
-//nil.
-// actual := 5
-// Expect(&actual).To(PointTo(Equal(5)))
-func PointTo(matcher types.GomegaMatcher) types.GomegaMatcher {
- return &PointerMatcher{
- Matcher: matcher,
- }
-}
-
-type PointerMatcher struct {
- Matcher types.GomegaMatcher
-
- // Failure message.
- failure string
-}
-
-func (m *PointerMatcher) Match(actual interface{}) (bool, error) {
- val := reflect.ValueOf(actual)
-
- // return error if actual type is not a pointer
- if val.Kind() != reflect.Ptr {
- return false, fmt.Errorf("PointerMatcher expects a pointer but we have '%s'", val.Kind())
- }
-
- if !val.IsValid() || val.IsNil() {
- m.failure = format.Message(actual, "not to be <nil>")
- return false, nil
- }
-
- // Forward the value.
- elem := val.Elem().Interface()
- match, err := m.Matcher.Match(elem)
- if !match {
- m.failure = m.Matcher.FailureMessage(elem)
- }
- return match, err
-}
-
-func (m *PointerMatcher) FailureMessage(_ interface{}) (message string) {
- return m.failure
-}
-
-func (m *PointerMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return m.Matcher.NegatedFailureMessage(actual)
-}
diff --git a/vendor/github.com/onsi/gomega/gstruct/pointer_test.go b/vendor/github.com/onsi/gomega/gstruct/pointer_test.go
deleted file mode 100644
index 805a92abe..000000000
--- a/vendor/github.com/onsi/gomega/gstruct/pointer_test.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package gstruct_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/gstruct"
-)
-
-var _ = Describe("PointTo", func() {
- It("should fail when passed nil", func() {
- var p *struct{}
- Expect(p).Should(BeNil())
- })
-
- It("should succeed when passed non-nil pointer", func() {
- var s struct{}
- Expect(&s).Should(PointTo(Ignore()))
- })
-
- It("should unwrap the pointee value", func() {
- i := 1
- Expect(&i).Should(PointTo(Equal(1)))
- Expect(&i).ShouldNot(PointTo(Equal(2)))
- })
-
- It("should work with nested pointers", func() {
- i := 1
- ip := &i
- ipp := &ip
- Expect(ipp).Should(PointTo(PointTo(Equal(1))))
- Expect(ipp).ShouldNot(PointTo(PointTo(Equal(2))))
- })
-})
diff --git a/vendor/github.com/onsi/gomega/gstruct/types.go b/vendor/github.com/onsi/gomega/gstruct/types.go
deleted file mode 100644
index 48cbbe8f6..000000000
--- a/vendor/github.com/onsi/gomega/gstruct/types.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package gstruct
-
-//Options is the type for options passed to some matchers.
-type Options int
-
-const (
- //IgnoreExtras tells the matcher to ignore extra elements or fields, rather than triggering a failure.
- IgnoreExtras Options = 1 << iota
- //IgnoreMissing tells the matcher to ignore missing elements or fields, rather than triggering a failure.
- IgnoreMissing
- //AllowDuplicates tells the matcher to permit multiple members of the slice to produce the same ID when
- //considered by the indentifier function. All members that map to a given key must still match successfully
- //with the matcher that is provided for that key.
- AllowDuplicates
-)
diff --git a/vendor/github.com/onsi/gomega/internal/assertion/assertion_suite_test.go b/vendor/github.com/onsi/gomega/internal/assertion/assertion_suite_test.go
deleted file mode 100644
index dae47a48b..000000000
--- a/vendor/github.com/onsi/gomega/internal/assertion/assertion_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package assertion_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestAssertion(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Assertion Suite")
-}
diff --git a/vendor/github.com/onsi/gomega/internal/assertion/assertion_test.go b/vendor/github.com/onsi/gomega/internal/assertion/assertion_test.go
deleted file mode 100644
index cac0d24c5..000000000
--- a/vendor/github.com/onsi/gomega/internal/assertion/assertion_test.go
+++ /dev/null
@@ -1,258 +0,0 @@
-package assertion_test
-
-import (
- "errors"
-
- "github.com/onsi/gomega/internal/testingtsupport"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/internal/assertion"
- "github.com/onsi/gomega/internal/fakematcher"
- "github.com/onsi/gomega/types"
-)
-
-var _ = Describe("Assertion", func() {
- var (
- a *Assertion
- failureMessage string
- failureCallerSkip int
- matcher *fakematcher.FakeMatcher
- )
-
- input := "The thing I'm testing"
-
- var fakeFailWrapper = &types.GomegaFailWrapper{
- Fail: func(message string, callerSkip ...int) {
- failureMessage = message
- if len(callerSkip) == 1 {
- failureCallerSkip = callerSkip[0]
- }
- },
- TWithHelper: testingtsupport.EmptyTWithHelper{},
- }
-
- BeforeEach(func() {
- matcher = &fakematcher.FakeMatcher{}
- failureMessage = ""
- failureCallerSkip = 0
- a = New(input, fakeFailWrapper, 1)
- })
-
- Context("when called", func() {
- It("should pass the provided input value to the matcher", func() {
- a.Should(matcher)
-
- Expect(matcher.ReceivedActual).Should(Equal(input))
- matcher.ReceivedActual = ""
-
- a.ShouldNot(matcher)
-
- Expect(matcher.ReceivedActual).Should(Equal(input))
- matcher.ReceivedActual = ""
-
- a.To(matcher)
-
- Expect(matcher.ReceivedActual).Should(Equal(input))
- matcher.ReceivedActual = ""
-
- a.ToNot(matcher)
-
- Expect(matcher.ReceivedActual).Should(Equal(input))
- matcher.ReceivedActual = ""
-
- a.NotTo(matcher)
-
- Expect(matcher.ReceivedActual).Should(Equal(input))
- })
- })
-
- Context("when the matcher succeeds", func() {
- BeforeEach(func() {
- matcher.MatchesToReturn = true
- matcher.ErrToReturn = nil
- })
-
- Context("and a positive assertion is being made", func() {
- It("should not call the failure callback", func() {
- a.Should(matcher)
- Expect(failureMessage).Should(Equal(""))
- })
-
- It("should be true", func() {
- Expect(a.Should(matcher)).Should(BeTrue())
- })
- })
-
- Context("and a negative assertion is being made", func() {
- It("should call the failure callback", func() {
- a.ShouldNot(matcher)
- Expect(failureMessage).Should(Equal("negative: The thing I'm testing"))
- Expect(failureCallerSkip).Should(Equal(3))
- })
-
- It("should be false", func() {
- Expect(a.ShouldNot(matcher)).Should(BeFalse())
- })
- })
- })
-
- Context("when the matcher fails", func() {
- BeforeEach(func() {
- matcher.MatchesToReturn = false
- matcher.ErrToReturn = nil
- })
-
- Context("and a positive assertion is being made", func() {
- It("should call the failure callback", func() {
- a.Should(matcher)
- Expect(failureMessage).Should(Equal("positive: The thing I'm testing"))
- Expect(failureCallerSkip).Should(Equal(3))
- })
-
- It("should be false", func() {
- Expect(a.Should(matcher)).Should(BeFalse())
- })
- })
-
- Context("and a negative assertion is being made", func() {
- It("should not call the failure callback", func() {
- a.ShouldNot(matcher)
- Expect(failureMessage).Should(Equal(""))
- })
-
- It("should be true", func() {
- Expect(a.ShouldNot(matcher)).Should(BeTrue())
- })
- })
- })
-
- Context("When reporting a failure", func() {
- BeforeEach(func() {
- matcher.MatchesToReturn = false
- matcher.ErrToReturn = nil
- })
-
- Context("and there is an optional description", func() {
- It("should append the description to the failure message", func() {
- a.Should(matcher, "A description")
- Expect(failureMessage).Should(Equal("A description\npositive: The thing I'm testing"))
- Expect(failureCallerSkip).Should(Equal(3))
- })
- })
-
- Context("and there are multiple arguments to the optional description", func() {
- It("should append the formatted description to the failure message", func() {
- a.Should(matcher, "A description of [%d]", 3)
- Expect(failureMessage).Should(Equal("A description of [3]\npositive: The thing I'm testing"))
- Expect(failureCallerSkip).Should(Equal(3))
- })
- })
- })
-
- Context("When the matcher returns an error", func() {
- BeforeEach(func() {
- matcher.ErrToReturn = errors.New("Kaboom!")
- })
-
- Context("and a positive assertion is being made", func() {
- It("should call the failure callback", func() {
- matcher.MatchesToReturn = true
- a.Should(matcher)
- Expect(failureMessage).Should(Equal("Kaboom!"))
- Expect(failureCallerSkip).Should(Equal(3))
- })
- })
-
- Context("and a negative assertion is being made", func() {
- It("should call the failure callback", func() {
- matcher.MatchesToReturn = false
- a.ShouldNot(matcher)
- Expect(failureMessage).Should(Equal("Kaboom!"))
- Expect(failureCallerSkip).Should(Equal(3))
- })
- })
-
- It("should always be false", func() {
- Expect(a.Should(matcher)).Should(BeFalse())
- Expect(a.ShouldNot(matcher)).Should(BeFalse())
- })
- })
-
- Context("when there are extra parameters", func() {
- It("(a simple example)", func() {
- Expect(func() (string, int, error) {
- return "foo", 0, nil
- }()).Should(Equal("foo"))
- })
-
- Context("when the parameters are all nil or zero", func() {
- It("should invoke the matcher", func() {
- matcher.MatchesToReturn = true
- matcher.ErrToReturn = nil
-
- var typedNil []string
- a = New(input, fakeFailWrapper, 1, 0, nil, typedNil)
-
- result := a.Should(matcher)
- Expect(result).Should(BeTrue())
- Expect(matcher.ReceivedActual).Should(Equal(input))
-
- Expect(failureMessage).Should(BeZero())
- })
- })
-
- Context("when any of the parameters are not nil or zero", func() {
- It("should call the failure callback", func() {
- matcher.MatchesToReturn = false
- matcher.ErrToReturn = nil
-
- a = New(input, fakeFailWrapper, 1, errors.New("foo"))
- result := a.Should(matcher)
- Expect(result).Should(BeFalse())
- Expect(matcher.ReceivedActual).Should(BeZero(), "The matcher doesn't even get called")
- Expect(failureMessage).Should(ContainSubstring("foo"))
- failureMessage = ""
-
- a = New(input, fakeFailWrapper, 1, nil, 1)
- result = a.ShouldNot(matcher)
- Expect(result).Should(BeFalse())
- Expect(failureMessage).Should(ContainSubstring("1"))
- failureMessage = ""
-
- a = New(input, fakeFailWrapper, 1, nil, 0, []string{"foo"})
- result = a.To(matcher)
- Expect(result).Should(BeFalse())
- Expect(failureMessage).Should(ContainSubstring("foo"))
- failureMessage = ""
-
- a = New(input, fakeFailWrapper, 1, nil, 0, []string{"foo"})
- result = a.ToNot(matcher)
- Expect(result).Should(BeFalse())
- Expect(failureMessage).Should(ContainSubstring("foo"))
- failureMessage = ""
-
- a = New(input, fakeFailWrapper, 1, nil, 0, []string{"foo"})
- result = a.NotTo(matcher)
- Expect(result).Should(BeFalse())
- Expect(failureMessage).Should(ContainSubstring("foo"))
- Expect(failureCallerSkip).Should(Equal(3))
- })
- })
- })
-
- Context("Making an assertion without a registered fail handler", func() {
- It("should panic", func() {
- defer func() {
- e := recover()
- RegisterFailHandler(Fail)
- if e == nil {
- Fail("expected a panic to have occurred")
- }
- }()
-
- RegisterFailHandler(nil)
- Expect(true).Should(BeTrue())
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_suite_test.go b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_suite_test.go
deleted file mode 100644
index bdb0c3d22..000000000
--- a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package asyncassertion_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestAsyncAssertion(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "AsyncAssertion Suite")
-}
diff --git a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_test.go b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_test.go
deleted file mode 100644
index afd61a7cd..000000000
--- a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_test.go
+++ /dev/null
@@ -1,351 +0,0 @@
-package asyncassertion_test
-
-import (
- "errors"
- "time"
-
- "github.com/onsi/gomega/internal/testingtsupport"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/internal/asyncassertion"
- "github.com/onsi/gomega/types"
-)
-
-var _ = Describe("Async Assertion", func() {
- var (
- failureMessage string
- callerSkip int
- )
-
- var fakeFailWrapper = &types.GomegaFailWrapper{
- Fail: func(message string, skip ...int) {
- failureMessage = message
- callerSkip = skip[0]
- },
- TWithHelper: testingtsupport.EmptyTWithHelper{},
- }
-
- BeforeEach(func() {
- failureMessage = ""
- callerSkip = 0
- })
-
- Describe("Eventually", func() {
- Context("the positive case", func() {
- It("should poll the function and matcher", func() {
- counter := 0
- a := New(AsyncAssertionTypeEventually, func() int {
- counter++
- return counter
- }, fakeFailWrapper, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1)
-
- a.Should(BeNumerically("==", 5))
- Expect(failureMessage).Should(BeZero())
- })
-
- It("should continue when the matcher errors", func() {
- counter := 0
- a := New(AsyncAssertionTypeEventually, func() interface{} {
- counter++
- if counter == 5 {
- return "not-a-number" //this should cause the matcher to error
- }
- return counter
- }, fakeFailWrapper, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1)
-
- a.Should(BeNumerically("==", 5), "My description %d", 2)
-
- Expect(failureMessage).Should(ContainSubstring("Timed out after"))
- Expect(failureMessage).Should(ContainSubstring("My description 2"))
- Expect(callerSkip).Should(Equal(4))
- })
-
- It("should be able to timeout", func() {
- counter := 0
- a := New(AsyncAssertionTypeEventually, func() int {
- counter++
- return counter
- }, fakeFailWrapper, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1)
-
- a.Should(BeNumerically(">", 100), "My description %d", 2)
-
- Expect(counter).Should(BeNumerically(">", 8))
- Expect(counter).Should(BeNumerically("<=", 10))
- Expect(failureMessage).Should(ContainSubstring("Timed out after"))
- Expect(failureMessage).Should(MatchRegexp(`\<int\>: \d`), "Should pass the correct value to the matcher message formatter.")
- Expect(failureMessage).Should(ContainSubstring("My description 2"))
- Expect(callerSkip).Should(Equal(4))
- })
- })
-
- Context("the negative case", func() {
- It("should poll the function and matcher", func() {
- counter := 0
- a := New(AsyncAssertionTypeEventually, func() int {
- counter += 1
- return counter
- }, fakeFailWrapper, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1)
-
- a.ShouldNot(BeNumerically("<", 3))
-
- Expect(counter).Should(Equal(3))
- Expect(failureMessage).Should(BeZero())
- })
-
- It("should timeout when the matcher errors", func() {
- a := New(AsyncAssertionTypeEventually, func() interface{} {
- return 0 //this should cause the matcher to error
- }, fakeFailWrapper, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1)
-
- a.ShouldNot(HaveLen(0), "My description %d", 2)
-
- Expect(failureMessage).Should(ContainSubstring("Timed out after"))
- Expect(failureMessage).Should(ContainSubstring("Error:"))
- Expect(failureMessage).Should(ContainSubstring("My description 2"))
- Expect(callerSkip).Should(Equal(4))
- })
-
- It("should be able to timeout", func() {
- a := New(AsyncAssertionTypeEventually, func() int {
- return 0
- }, fakeFailWrapper, time.Duration(0.1*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1)
-
- a.ShouldNot(Equal(0), "My description %d", 2)
-
- Expect(failureMessage).Should(ContainSubstring("Timed out after"))
- Expect(failureMessage).Should(ContainSubstring("<int>: 0"), "Should pass the correct value to the matcher message formatter.")
- Expect(failureMessage).Should(ContainSubstring("My description 2"))
- Expect(callerSkip).Should(Equal(4))
- })
- })
-
- Context("with a function that returns multiple values", func() {
- It("should eventually succeed if the additional arguments are nil", func() {
- i := 0
- Eventually(func() (int, error) {
- i++
- return i, nil
- }).Should(Equal(10))
- })
-
- It("should eventually timeout if the additional arguments are not nil", func() {
- i := 0
- a := New(AsyncAssertionTypeEventually, func() (int, error) {
- i++
- return i, errors.New("bam")
- }, fakeFailWrapper, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1)
- a.Should(Equal(2))
-
- Expect(failureMessage).Should(ContainSubstring("Timed out after"))
- Expect(failureMessage).Should(ContainSubstring("Error:"))
- Expect(failureMessage).Should(ContainSubstring("bam"))
- Expect(callerSkip).Should(Equal(4))
- })
- })
-
- Context("Making an assertion without a registered fail handler", func() {
- It("should panic", func() {
- defer func() {
- e := recover()
- RegisterFailHandler(Fail)
- if e == nil {
- Fail("expected a panic to have occurred")
- }
- }()
-
- RegisterFailHandler(nil)
- c := make(chan bool, 1)
- c <- true
- Eventually(c).Should(Receive())
- })
- })
- })
-
- Describe("Consistently", func() {
- Describe("The positive case", func() {
- Context("when the matcher consistently passes for the duration", func() {
- It("should pass", func() {
- calls := 0
- a := New(AsyncAssertionTypeConsistently, func() string {
- calls++
- return "foo"
- }, fakeFailWrapper, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1)
-
- a.Should(Equal("foo"))
- Expect(calls).Should(BeNumerically(">", 8))
- Expect(calls).Should(BeNumerically("<=", 10))
- Expect(failureMessage).Should(BeZero())
- })
- })
-
- Context("when the matcher fails at some point", func() {
- It("should fail", func() {
- calls := 0
- a := New(AsyncAssertionTypeConsistently, func() interface{} {
- calls++
- if calls > 5 {
- return "bar"
- }
- return "foo"
- }, fakeFailWrapper, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1)
-
- a.Should(Equal("foo"))
- Expect(failureMessage).Should(ContainSubstring("to equal"))
- Expect(callerSkip).Should(Equal(4))
- })
- })
-
- Context("when the matcher errors at some point", func() {
- It("should fail", func() {
- calls := 0
- a := New(AsyncAssertionTypeConsistently, func() interface{} {
- calls++
- if calls > 5 {
- return 3
- }
- return []int{1, 2, 3}
- }, fakeFailWrapper, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1)
-
- a.Should(HaveLen(3))
- Expect(failureMessage).Should(ContainSubstring("HaveLen matcher expects"))
- Expect(callerSkip).Should(Equal(4))
- })
- })
- })
-
- Describe("The negative case", func() {
- Context("when the matcher consistently passes for the duration", func() {
- It("should pass", func() {
- c := make(chan bool)
- a := New(AsyncAssertionTypeConsistently, c, fakeFailWrapper, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1)
-
- a.ShouldNot(Receive())
- Expect(failureMessage).Should(BeZero())
- })
- })
-
- Context("when the matcher fails at some point", func() {
- It("should fail", func() {
- c := make(chan bool)
- go func() {
- time.Sleep(time.Duration(100 * time.Millisecond))
- c <- true
- }()
-
- a := New(AsyncAssertionTypeConsistently, c, fakeFailWrapper, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1)
-
- a.ShouldNot(Receive())
- Expect(failureMessage).Should(ContainSubstring("not to receive anything"))
- })
- })
-
- Context("when the matcher errors at some point", func() {
- It("should fail", func() {
- calls := 0
- a := New(AsyncAssertionTypeConsistently, func() interface{} {
- calls++
- return calls
- }, fakeFailWrapper, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1)
-
- a.ShouldNot(BeNumerically(">", 5))
- Expect(failureMessage).Should(ContainSubstring("not to be >"))
- Expect(callerSkip).Should(Equal(4))
- })
- })
- })
-
- Context("with a function that returns multiple values", func() {
- It("should consistently succeed if the additional arguments are nil", func() {
- i := 2
- Consistently(func() (int, error) {
- i++
- return i, nil
- }).Should(BeNumerically(">=", 2))
- })
-
- It("should eventually timeout if the additional arguments are not nil", func() {
- i := 2
- a := New(AsyncAssertionTypeEventually, func() (int, error) {
- i++
- return i, errors.New("bam")
- }, fakeFailWrapper, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1)
- a.Should(BeNumerically(">=", 2))
-
- Expect(failureMessage).Should(ContainSubstring("Error:"))
- Expect(failureMessage).Should(ContainSubstring("bam"))
- Expect(callerSkip).Should(Equal(4))
- })
- })
-
- Context("Making an assertion without a registered fail handler", func() {
- It("should panic", func() {
- defer func() {
- e := recover()
- RegisterFailHandler(Fail)
- if e == nil {
- Fail("expected a panic to have occurred")
- }
- }()
-
- RegisterFailHandler(nil)
- c := make(chan bool)
- Consistently(c).ShouldNot(Receive())
- })
- })
- })
-
- Context("when passed a function with the wrong # or arguments & returns", func() {
- It("should panic", func() {
- Expect(func() {
- New(AsyncAssertionTypeEventually, func() {}, fakeFailWrapper, 0, 0, 1)
- }).Should(Panic())
-
- Expect(func() {
- New(AsyncAssertionTypeEventually, func(a string) int { return 0 }, fakeFailWrapper, 0, 0, 1)
- }).Should(Panic())
-
- Expect(func() {
- New(AsyncAssertionTypeEventually, func() int { return 0 }, fakeFailWrapper, 0, 0, 1)
- }).ShouldNot(Panic())
-
- Expect(func() {
- New(AsyncAssertionTypeEventually, func() (int, error) { return 0, nil }, fakeFailWrapper, 0, 0, 1)
- }).ShouldNot(Panic())
- })
- })
-
- Describe("bailing early", func() {
- Context("when actual is a value", func() {
- It("Eventually should bail out and fail early if the matcher says to", func() {
- c := make(chan bool)
- close(c)
-
- t := time.Now()
- failures := InterceptGomegaFailures(func() {
- Eventually(c, 0.1).Should(Receive())
- })
- Expect(time.Since(t)).Should(BeNumerically("<", 90*time.Millisecond))
-
- Expect(failures).Should(HaveLen(1))
- })
- })
-
- Context("when actual is a function", func() {
- It("should never bail early", func() {
- c := make(chan bool)
- close(c)
-
- t := time.Now()
- failures := InterceptGomegaFailures(func() {
- Eventually(func() chan bool {
- return c
- }, 0.1).Should(Receive())
- })
- Expect(time.Since(t)).Should(BeNumerically(">=", 90*time.Millisecond))
-
- Expect(failures).Should(HaveLen(1))
- })
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go b/vendor/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go
deleted file mode 100644
index 6e351a7de..000000000
--- a/vendor/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package fakematcher
-
-import "fmt"
-
-type FakeMatcher struct {
- ReceivedActual interface{}
- MatchesToReturn bool
- ErrToReturn error
-}
-
-func (matcher *FakeMatcher) Match(actual interface{}) (bool, error) {
- matcher.ReceivedActual = actual
-
- return matcher.MatchesToReturn, matcher.ErrToReturn
-}
-
-func (matcher *FakeMatcher) FailureMessage(actual interface{}) string {
- return fmt.Sprintf("positive: %v", actual)
-}
-
-func (matcher *FakeMatcher) NegatedFailureMessage(actual interface{}) string {
- return fmt.Sprintf("negative: %v", actual)
-}
diff --git a/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go
deleted file mode 100644
index 8fd8f0a6c..000000000
--- a/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package testingtsupport_test
-
-import (
- "regexp"
- "time"
-
- "github.com/onsi/gomega/internal/testingtsupport"
-
- . "github.com/onsi/gomega"
-
- "fmt"
- "testing"
-)
-
-func TestTestingT(t *testing.T) {
- RegisterTestingT(t)
- Ω(true).Should(BeTrue())
-}
-
-type FakeTWithHelper struct {
- LastFatal string
-}
-
-func (f *FakeTWithHelper) Fatalf(format string, args ...interface{}) {
- f.LastFatal = fmt.Sprintf(format, args...)
-}
-
-func TestGomegaWithTWithoutHelper(t *testing.T) {
- g := NewGomegaWithT(t)
-
- testingtsupport.StackTracePruneRE = regexp.MustCompile(`\/ginkgo\/`)
-
- f := &FakeTWithHelper{}
- testG := NewGomegaWithT(f)
-
- testG.Expect("foo").To(Equal("foo"))
- g.Expect(f.LastFatal).To(BeZero())
-
- testG.Expect("foo").To(Equal("bar"))
- g.Expect(f.LastFatal).To(ContainSubstring("<string>: foo"))
- g.Expect(f.LastFatal).To(ContainSubstring("testingtsupport_test"), "It should include a stacktrace")
-
- testG.Eventually("foo2", time.Millisecond).Should(Equal("bar"))
- g.Expect(f.LastFatal).To(ContainSubstring("<string>: foo2"))
-
- testG.Consistently("foo3", time.Millisecond).Should(Equal("bar"))
- g.Expect(f.LastFatal).To(ContainSubstring("<string>: foo3"))
-}
-
-type FakeTWithoutHelper struct {
- LastFatal string
- HelperCount int
-}
-
-func (f *FakeTWithoutHelper) Fatalf(format string, args ...interface{}) {
- f.LastFatal = fmt.Sprintf(format, args...)
-}
-
-func (f *FakeTWithoutHelper) Helper() {
- f.HelperCount += 1
-}
-
-func (f *FakeTWithoutHelper) ResetHelper() {
- f.HelperCount = 0
-}
-
-func TestGomegaWithTWithHelper(t *testing.T) {
- g := NewGomegaWithT(t)
-
- f := &FakeTWithoutHelper{}
- testG := NewGomegaWithT(f)
-
- testG.Expect("foo").To(Equal("foo"))
- g.Expect(f.LastFatal).To(BeZero())
- g.Expect(f.HelperCount).To(BeNumerically(">", 0))
- f.ResetHelper()
-
- testG.Expect("foo").To(Equal("bar"))
- g.Expect(f.LastFatal).To(ContainSubstring("<string>: foo"))
- g.Expect(f.LastFatal).NotTo(ContainSubstring("testingtsupport_test"), "It should _not_ include a stacktrace")
- g.Expect(f.HelperCount).To(BeNumerically(">", 0))
- f.ResetHelper()
-
- testG.Eventually("foo2", time.Millisecond).Should(Equal("bar"))
- g.Expect(f.LastFatal).To(ContainSubstring("<string>: foo2"))
- g.Expect(f.HelperCount).To(BeNumerically(">", 0))
- f.ResetHelper()
-
- testG.Consistently("foo3", time.Millisecond).Should(Equal("bar"))
- g.Expect(f.LastFatal).To(ContainSubstring("<string>: foo3"))
- g.Expect(f.HelperCount).To(BeNumerically(">", 0))
-}
diff --git a/vendor/github.com/onsi/gomega/matchers/and_test.go b/vendor/github.com/onsi/gomega/matchers/and_test.go
deleted file mode 100644
index acf778cd6..000000000
--- a/vendor/github.com/onsi/gomega/matchers/and_test.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
- "github.com/onsi/gomega/types"
-)
-
-// sample data
-var (
- // example input
- input = "hi"
- // some matchers that succeed against the input
- true1 = HaveLen(2)
- true2 = Equal("hi")
- true3 = MatchRegexp("hi")
- // some matchers that fail against the input.
- false1 = HaveLen(1)
- false2 = Equal("hip")
- false3 = MatchRegexp("hope")
-)
-
-// verifyFailureMessage expects the matcher to fail with the given input, and verifies the failure message.
-func verifyFailureMessage(m types.GomegaMatcher, input string, expectedFailureMsgFragment string) {
- Expect(m.Match(input)).To(BeFalse())
- Expect(m.FailureMessage(input)).To(Equal(
- "Expected\n <string>: " + input + "\n" + expectedFailureMsgFragment))
-}
-
-var _ = Describe("AndMatcher", func() {
- It("works with positive cases", func() {
- Expect(input).To(And())
- Expect(input).To(And(true1))
- Expect(input).To(And(true1, true2))
- Expect(input).To(And(true1, true2, true3))
-
- // use alias
- Expect(input).To(SatisfyAll(true1, true2, true3))
- })
-
- It("works with negative cases", func() {
- Expect(input).ToNot(And(false1, false2))
- Expect(input).ToNot(And(true1, true2, false3))
- Expect(input).ToNot(And(true1, false2, false3))
- Expect(input).ToNot(And(false1, true1, true2))
- })
-
- Context("failure messages", func() {
- Context("when match fails", func() {
- It("gives a descriptive message", func() {
- verifyFailureMessage(And(false1, true1), input, "to have length 1")
- verifyFailureMessage(And(true1, false2), input, "to equal\n <string>: hip")
- verifyFailureMessage(And(true1, true2, false3), input, "to match regular expression\n <string>: hope")
- })
- })
-
- Context("when match succeeds, but expected it to fail", func() {
- It("gives a descriptive message", func() {
- verifyFailureMessage(Not(And(true1, true2)), input,
- `To not satisfy all of these matchers: [%!s(*matchers.HaveLenMatcher=&{2}) %!s(*matchers.EqualMatcher=&{hi})]`)
- })
- })
- })
-
- Context("MatchMayChangeInTheFuture", func() {
- Context("Match returned false", func() {
- Context("returns value of the failed matcher", func() {
- It("false if failed matcher not going to change", func() {
- // 3 matchers: 1st returns true, 2nd returns false and is not going to change, 3rd is never called
- m := And(Not(BeNil()), Or(), Equal(1))
- Expect(m.Match("hi")).To(BeFalse())
- Expect(m.(*AndMatcher).MatchMayChangeInTheFuture("hi")).To(BeFalse()) // empty Or() indicates not going to change
- })
- It("true if failed matcher indicates it might change", func() {
- // 3 matchers: 1st returns true, 2nd returns false and "might" change, 3rd is never called
- m := And(Not(BeNil()), Equal(5), Equal(1))
- Expect(m.Match("hi")).To(BeFalse())
- Expect(m.(*AndMatcher).MatchMayChangeInTheFuture("hi")).To(BeTrue()) // Equal(5) indicates it might change
- })
- })
- })
- Context("Match returned true", func() {
- It("returns true if any of the matchers could change", func() {
- // 3 matchers, all return true, and all could change
- m := And(Not(BeNil()), Equal("hi"), HaveLen(2))
- Expect(m.Match("hi")).To(BeTrue())
- Expect(m.(*AndMatcher).MatchMayChangeInTheFuture("hi")).To(BeTrue()) // all 3 of these matchers default to 'true'
- })
- It("returns false if none of the matchers could change", func() {
- // empty And() has the property of always matching, and never can change since there are no sub-matchers that could change
- m := And()
- Expect(m.Match("anything")).To(BeTrue())
- Expect(m.(*AndMatcher).MatchMayChangeInTheFuture("anything")).To(BeFalse())
-
- // And() with 3 sub-matchers that return true, and can't change
- m = And(And(), And(), And())
- Expect(m.Match("hi")).To(BeTrue())
- Expect(m.(*AndMatcher).MatchMayChangeInTheFuture("hi")).To(BeFalse()) // the 3 empty And()'s won't change
- })
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher_test.go
deleted file mode 100644
index 471a46d97..000000000
--- a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("AssignableToTypeOf", func() {
- Context("When asserting assignability between types", func() {
- It("should do the right thing", func() {
- Expect(0).Should(BeAssignableToTypeOf(0))
- Expect(5).Should(BeAssignableToTypeOf(-1))
- Expect("foo").Should(BeAssignableToTypeOf("bar"))
- Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{}))
-
- Expect(0).ShouldNot(BeAssignableToTypeOf("bar"))
- Expect(5).ShouldNot(BeAssignableToTypeOf(struct{ Foo string }{}))
- Expect("foo").ShouldNot(BeAssignableToTypeOf(42))
- })
- })
-
- Context("When asserting nil values", func() {
- It("should error", func() {
- success, err := (&AssignableToTypeOfMatcher{Expected: nil}).Match(nil)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
-
- Context("When actual is nil and expected is not nil", func() {
- It("should return false without error", func() {
- success, err := (&AssignableToTypeOfMatcher{Expected: 17}).Match(nil)
- Expect(success).Should(BeFalse())
- Expect(err).ShouldNot(HaveOccurred())
- })
- })
-
- Context("When actual is not nil and expected is nil", func() {
- It("should error", func() {
- success, err := (&AssignableToTypeOfMatcher{Expected: nil}).Match(17)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_directory_test.go b/vendor/github.com/onsi/gomega/matchers/be_a_directory_test.go
deleted file mode 100644
index bc8742763..000000000
--- a/vendor/github.com/onsi/gomega/matchers/be_a_directory_test.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package matchers_test
-
-import (
- "io/ioutil"
- "os"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("BeADirectoryMatcher", func() {
- Context("when passed a string", func() {
- It("should do the right thing", func() {
- Expect("/dne/test").ShouldNot(BeADirectory())
-
- tmpFile, err := ioutil.TempFile("", "gomega-test-tempfile")
- Expect(err).ShouldNot(HaveOccurred())
- defer os.Remove(tmpFile.Name())
- Expect(tmpFile.Name()).ShouldNot(BeADirectory())
-
- tmpDir, err := ioutil.TempDir("", "gomega-test-tempdir")
- Expect(err).ShouldNot(HaveOccurred())
- defer os.Remove(tmpDir)
- Expect(tmpDir).Should(BeADirectory())
- })
- })
-
- Context("when passed something else", func() {
- It("should error", func() {
- success, err := (&BeADirectoryMatcher{}).Match(nil)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&BeADirectoryMatcher{}).Match(true)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file_test.go b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file_test.go
deleted file mode 100644
index eae06a03e..000000000
--- a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file_test.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package matchers_test
-
-import (
- "io/ioutil"
- "os"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("BeARegularFileMatcher", func() {
- Context("when passed a string", func() {
- It("should do the right thing", func() {
- Expect("/dne/test").ShouldNot(BeARegularFile())
-
- tmpFile, err := ioutil.TempFile("", "gomega-test-tempfile")
- Expect(err).ShouldNot(HaveOccurred())
- defer os.Remove(tmpFile.Name())
- Expect(tmpFile.Name()).Should(BeARegularFile())
-
- tmpDir, err := ioutil.TempDir("", "gomega-test-tempdir")
- Expect(err).ShouldNot(HaveOccurred())
- defer os.Remove(tmpDir)
- Expect(tmpDir).ShouldNot(BeARegularFile())
- })
- })
-
- Context("when passed something else", func() {
- It("should error", func() {
- success, err := (&BeARegularFileMatcher{}).Match(nil)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&BeARegularFileMatcher{}).Match(true)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file_test.go b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file_test.go
deleted file mode 100644
index e28bd0d65..000000000
--- a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file_test.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package matchers_test
-
-import (
- "io/ioutil"
- "os"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("BeAnExistingFileMatcher", func() {
- Context("when passed a string", func() {
- It("should do the right thing", func() {
- Expect("/dne/test").ShouldNot(BeAnExistingFile())
-
- tmpFile, err := ioutil.TempFile("", "gomega-test-tempfile")
- Expect(err).ShouldNot(HaveOccurred())
- defer os.Remove(tmpFile.Name())
- Expect(tmpFile.Name()).Should(BeAnExistingFile())
-
- tmpDir, err := ioutil.TempDir("", "gomega-test-tempdir")
- Expect(err).ShouldNot(HaveOccurred())
- defer os.Remove(tmpDir)
- Expect(tmpDir).Should(BeAnExistingFile())
- })
- })
-
- Context("when passed something else", func() {
- It("should error", func() {
- success, err := (&BeAnExistingFileMatcher{}).Match(nil)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&BeAnExistingFileMatcher{}).Match(true)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher_test.go
deleted file mode 100644
index c2e49ab50..000000000
--- a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher_test.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("BeClosedMatcher", func() {
- Context("when passed a channel", func() {
- It("should do the right thing", func() {
- openChannel := make(chan bool)
- Expect(openChannel).ShouldNot(BeClosed())
-
- var openReaderChannel <-chan bool
- openReaderChannel = openChannel
- Expect(openReaderChannel).ShouldNot(BeClosed())
-
- closedChannel := make(chan bool)
- close(closedChannel)
-
- Expect(closedChannel).Should(BeClosed())
-
- var closedReaderChannel <-chan bool
- closedReaderChannel = closedChannel
- Expect(closedReaderChannel).Should(BeClosed())
- })
- })
-
- Context("when passed a send-only channel", func() {
- It("should error", func() {
- openChannel := make(chan bool)
- var openWriterChannel chan<- bool
- openWriterChannel = openChannel
-
- success, err := (&BeClosedMatcher{}).Match(openWriterChannel)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- closedChannel := make(chan bool)
- close(closedChannel)
-
- var closedWriterChannel chan<- bool
- closedWriterChannel = closedChannel
-
- success, err = (&BeClosedMatcher{}).Match(closedWriterChannel)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- })
- })
-
- Context("when passed something else", func() {
- It("should error", func() {
- var nilChannel chan bool
-
- success, err := (&BeClosedMatcher{}).Match(nilChannel)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&BeClosedMatcher{}).Match(nil)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&BeClosedMatcher{}).Match(7)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher_test.go
deleted file mode 100644
index 132480cfc..000000000
--- a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher_test.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("BeEmpty", func() {
- Context("when passed a supported type", func() {
- It("should do the right thing", func() {
- Expect("").Should(BeEmpty())
- Expect(" ").ShouldNot(BeEmpty())
-
- Expect([0]int{}).Should(BeEmpty())
- Expect([1]int{1}).ShouldNot(BeEmpty())
-
- Expect([]int{}).Should(BeEmpty())
- Expect([]int{1}).ShouldNot(BeEmpty())
-
- Expect(map[string]int{}).Should(BeEmpty())
- Expect(map[string]int{"a": 1}).ShouldNot(BeEmpty())
-
- c := make(chan bool, 1)
- Expect(c).Should(BeEmpty())
- c <- true
- Expect(c).ShouldNot(BeEmpty())
- })
- })
-
- Context("when passed a correctly typed nil", func() {
- It("should be true", func() {
- var nilSlice []int
- Expect(nilSlice).Should(BeEmpty())
-
- var nilMap map[int]string
- Expect(nilMap).Should(BeEmpty())
- })
- })
-
- Context("when passed an unsupported type", func() {
- It("should error", func() {
- success, err := (&BeEmptyMatcher{}).Match(0)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&BeEmptyMatcher{}).Match(nil)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher_test.go
deleted file mode 100644
index 4d9d11d2d..000000000
--- a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("BeEquivalentTo", func() {
- Context("when asserting that nil is equivalent to nil", func() {
- It("should error", func() {
- success, err := (&BeEquivalentToMatcher{Expected: nil}).Match(nil)
-
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-
- Context("When asserting on nil", func() {
- It("should do the right thing", func() {
- Expect("foo").ShouldNot(BeEquivalentTo(nil))
- Expect(nil).ShouldNot(BeEquivalentTo(3))
- Expect([]int{1, 2}).ShouldNot(BeEquivalentTo(nil))
- })
- })
-
- Context("When asserting on type aliases", func() {
- It("should the right thing", func() {
- Expect(StringAlias("foo")).Should(BeEquivalentTo("foo"))
- Expect("foo").Should(BeEquivalentTo(StringAlias("foo")))
- Expect(StringAlias("foo")).ShouldNot(BeEquivalentTo("bar"))
- Expect("foo").ShouldNot(BeEquivalentTo(StringAlias("bar")))
- })
- })
-
- Context("When asserting on numbers", func() {
- It("should convert actual to expected and do the right thing", func() {
- Expect(5).Should(BeEquivalentTo(5))
- Expect(5.0).Should(BeEquivalentTo(5.0))
- Expect(5).Should(BeEquivalentTo(5.0))
-
- Expect(5).ShouldNot(BeEquivalentTo("5"))
- Expect(5).ShouldNot(BeEquivalentTo(3))
-
- //Here be dragons!
- Expect(5.1).Should(BeEquivalentTo(5))
- Expect(5).ShouldNot(BeEquivalentTo(5.1))
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/be_false_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_false_matcher_test.go
deleted file mode 100644
index 25e70633d..000000000
--- a/vendor/github.com/onsi/gomega/matchers/be_false_matcher_test.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("BeFalse", func() {
- It("should handle true and false correctly", func() {
- Expect(true).ShouldNot(BeFalse())
- Expect(false).Should(BeFalse())
- })
-
- It("should only support booleans", func() {
- success, err := (&BeFalseMatcher{}).Match("foo")
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/be_identical_to_test.go b/vendor/github.com/onsi/gomega/matchers/be_identical_to_test.go
deleted file mode 100644
index 7fdd56eed..000000000
--- a/vendor/github.com/onsi/gomega/matchers/be_identical_to_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package matchers_test
-
-import (
- "errors"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("BeIdenticalTo", func() {
- Context("when asserting that nil equals nil", func() {
- It("should error", func() {
- success, err := (&BeIdenticalToMatcher{Expected: nil}).Match(nil)
-
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-
- It("should treat the same pointer to a struct as identical", func() {
- mySpecialStruct := myCustomType{}
- Expect(&mySpecialStruct).Should(BeIdenticalTo(&mySpecialStruct))
- Expect(&myCustomType{}).ShouldNot(BeIdenticalTo(&mySpecialStruct))
- })
-
- It("should be strict about types", func() {
- Expect(5).ShouldNot(BeIdenticalTo("5"))
- Expect(5).ShouldNot(BeIdenticalTo(5.0))
- Expect(5).ShouldNot(BeIdenticalTo(3))
- })
-
- It("should treat primtives as identical", func() {
- Expect("5").Should(BeIdenticalTo("5"))
- Expect("5").ShouldNot(BeIdenticalTo("55"))
-
- Expect(5.55).Should(BeIdenticalTo(5.55))
- Expect(5.55).ShouldNot(BeIdenticalTo(6.66))
-
- Expect(5).Should(BeIdenticalTo(5))
- Expect(5).ShouldNot(BeIdenticalTo(55))
- })
-
- It("should treat the same pointers to a slice as identical", func() {
- mySlice := []int{1, 2}
- Expect(&mySlice).Should(BeIdenticalTo(&mySlice))
- Expect(&mySlice).ShouldNot(BeIdenticalTo(&[]int{1, 2}))
- })
-
- It("should treat the same pointers to a map as identical", func() {
- myMap := map[string]string{"a": "b", "c": "d"}
- Expect(&myMap).Should(BeIdenticalTo(&myMap))
- Expect(myMap).ShouldNot(BeIdenticalTo(map[string]string{"a": "b", "c": "d"}))
- })
-
- It("should treat the same pointers to an error as identical", func() {
- myError := errors.New("foo")
- Expect(&myError).Should(BeIdenticalTo(&myError))
- Expect(errors.New("foo")).ShouldNot(BeIdenticalTo(errors.New("bar")))
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher_test.go
deleted file mode 100644
index c35aa3d7c..000000000
--- a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher_test.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("BeNil", func() {
- It("should succeed when passed nil", func() {
- Expect(nil).Should(BeNil())
- })
-
- It("should succeed when passed a typed nil", func() {
- var a []int
- Expect(a).Should(BeNil())
- })
-
- It("should succeed when passing nil pointer", func() {
- var f *struct{}
- Expect(f).Should(BeNil())
- })
-
- It("should not succeed when not passed nil", func() {
- Expect(0).ShouldNot(BeNil())
- Expect(false).ShouldNot(BeNil())
- Expect("").ShouldNot(BeNil())
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher_test.go
deleted file mode 100644
index a32d2b8b1..000000000
--- a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher_test.go
+++ /dev/null
@@ -1,172 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("BeNumerically", func() {
- Context("when passed a number", func() {
- It("should support ==", func() {
- Expect(uint32(5)).Should(BeNumerically("==", 5))
- Expect(float64(5.0)).Should(BeNumerically("==", 5))
- Expect(int8(5)).Should(BeNumerically("==", 5))
- })
-
- It("should not have false positives", func() {
- Expect(5.1).ShouldNot(BeNumerically("==", 5))
- Expect(5).ShouldNot(BeNumerically("==", 5.1))
- })
-
- It("should support >", func() {
- Expect(uint32(5)).Should(BeNumerically(">", 4))
- Expect(float64(5.0)).Should(BeNumerically(">", 4.9))
- Expect(int8(5)).Should(BeNumerically(">", 4))
-
- Expect(uint32(5)).ShouldNot(BeNumerically(">", 5))
- Expect(float64(5.0)).ShouldNot(BeNumerically(">", 5.0))
- Expect(int8(5)).ShouldNot(BeNumerically(">", 5))
- })
-
- It("should support <", func() {
- Expect(uint32(5)).Should(BeNumerically("<", 6))
- Expect(float64(5.0)).Should(BeNumerically("<", 5.1))
- Expect(int8(5)).Should(BeNumerically("<", 6))
-
- Expect(uint32(5)).ShouldNot(BeNumerically("<", 5))
- Expect(float64(5.0)).ShouldNot(BeNumerically("<", 5.0))
- Expect(int8(5)).ShouldNot(BeNumerically("<", 5))
- })
-
- It("should support >=", func() {
- Expect(uint32(5)).Should(BeNumerically(">=", 4))
- Expect(float64(5.0)).Should(BeNumerically(">=", 4.9))
- Expect(int8(5)).Should(BeNumerically(">=", 4))
-
- Expect(uint32(5)).Should(BeNumerically(">=", 5))
- Expect(float64(5.0)).Should(BeNumerically(">=", 5.0))
- Expect(int8(5)).Should(BeNumerically(">=", 5))
-
- Expect(uint32(5)).ShouldNot(BeNumerically(">=", 6))
- Expect(float64(5.0)).ShouldNot(BeNumerically(">=", 5.1))
- Expect(int8(5)).ShouldNot(BeNumerically(">=", 6))
- })
-
- It("should support <=", func() {
- Expect(uint32(5)).Should(BeNumerically("<=", 6))
- Expect(float64(5.0)).Should(BeNumerically("<=", 5.1))
- Expect(int8(5)).Should(BeNumerically("<=", 6))
-
- Expect(uint32(5)).Should(BeNumerically("<=", 5))
- Expect(float64(5.0)).Should(BeNumerically("<=", 5.0))
- Expect(int8(5)).Should(BeNumerically("<=", 5))
-
- Expect(uint32(5)).ShouldNot(BeNumerically("<=", 4))
- Expect(float64(5.0)).ShouldNot(BeNumerically("<=", 4.9))
- Expect(int8(5)).Should(BeNumerically("<=", 5))
- })
-
- Context("when passed ~", func() {
- Context("when passed a float", func() {
- Context("and there is no precision parameter", func() {
- It("should default to 1e-8", func() {
- Expect(5.00000001).Should(BeNumerically("~", 5.00000002))
- Expect(5.00000001).ShouldNot(BeNumerically("~", 5.0000001))
- })
-
- It("should show failure message", func() {
- actual := BeNumerically("~", 4.98).FailureMessage(123)
- expected := "Expected\n <int>: 123\nto be ~\n <float64>: 4.98"
- Expect(actual).To(Equal(expected))
- })
-
- It("should show negated failure message", func() {
- actual := BeNumerically("~", 4.98).NegatedFailureMessage(123)
- expected := "Expected\n <int>: 123\nnot to be ~\n <float64>: 4.98"
- Expect(actual).To(Equal(expected))
- })
- })
-
- Context("and there is a precision parameter", func() {
- It("should use the precision parameter", func() {
- Expect(5.1).Should(BeNumerically("~", 5.19, 0.1))
- Expect(5.1).Should(BeNumerically("~", 5.01, 0.1))
- Expect(5.1).ShouldNot(BeNumerically("~", 5.22, 0.1))
- Expect(5.1).ShouldNot(BeNumerically("~", 4.98, 0.1))
- })
-
- It("should show precision in failure message", func() {
- actual := BeNumerically("~", 4.98, 0.1).FailureMessage(123)
- expected := "Expected\n <int>: 123\nto be within 0.1 of ~\n <float64>: 4.98"
- Expect(actual).To(Equal(expected))
- })
-
- It("should show precision in negated failure message", func() {
- actual := BeNumerically("~", 4.98, 0.1).NegatedFailureMessage(123)
- expected := "Expected\n <int>: 123\nnot to be within 0.1 of ~\n <float64>: 4.98"
- Expect(actual).To(Equal(expected))
- })
- })
- })
-
- Context("when passed an int/uint", func() {
- Context("and there is no precision parameter", func() {
- It("should just do strict equality", func() {
- Expect(5).Should(BeNumerically("~", 5))
- Expect(5).ShouldNot(BeNumerically("~", 6))
- Expect(uint(5)).ShouldNot(BeNumerically("~", 6))
- })
- })
-
- Context("and there is a precision parameter", func() {
- It("should use precision paramter", func() {
- Expect(5).Should(BeNumerically("~", 6, 2))
- Expect(5).ShouldNot(BeNumerically("~", 8, 2))
- Expect(uint(5)).Should(BeNumerically("~", 6, 1))
- })
- })
- })
- })
- })
-
- Context("when passed a non-number", func() {
- It("should error", func() {
- success, err := (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{5}}).Match("foo")
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&BeNumericallyMatcher{Comparator: "=="}).Match(5)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&BeNumericallyMatcher{Comparator: "~", CompareTo: []interface{}{3.0, "foo"}}).Match(5.0)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{"bar"}}).Match(5)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{"bar"}}).Match("foo")
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{nil}}).Match(0)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{0}}).Match(nil)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-
- Context("when passed an unsupported comparator", func() {
- It("should error", func() {
- success, err := (&BeNumericallyMatcher{Comparator: "!=", CompareTo: []interface{}{5}}).Match(4)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher_test.go
deleted file mode 100644
index 68ec72879..000000000
--- a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher_test.go
+++ /dev/null
@@ -1,107 +0,0 @@
-package matchers_test
-
-import (
- "time"
-
- . "github.com/onsi/gomega/matchers"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("BeSent", func() {
- Context("when passed a channel and a matching type", func() {
- Context("when the channel is ready to receive", func() {
- It("should succeed and send the value down the channel", func() {
- c := make(chan string)
- d := make(chan string)
- go func() {
- val := <-c
- d <- val
- }()
-
- time.Sleep(10 * time.Millisecond)
-
- Expect(c).Should(BeSent("foo"))
- Eventually(d).Should(Receive(Equal("foo")))
- })
-
- It("should succeed (with a buffered channel)", func() {
- c := make(chan string, 1)
- Expect(c).Should(BeSent("foo"))
- Expect(<-c).Should(Equal("foo"))
- })
- })
-
- Context("when the channel is not ready to receive", func() {
- It("should fail and not send down the channel", func() {
- c := make(chan string)
- Expect(c).ShouldNot(BeSent("foo"))
- Consistently(c).ShouldNot(Receive())
- })
- })
-
- Context("when the channel is eventually ready to receive", func() {
- It("should succeed", func() {
- c := make(chan string)
- d := make(chan string)
- go func() {
- time.Sleep(30 * time.Millisecond)
- val := <-c
- d <- val
- }()
-
- Eventually(c).Should(BeSent("foo"))
- Eventually(d).Should(Receive(Equal("foo")))
- })
- })
-
- Context("when the channel is closed", func() {
- It("should error", func() {
- c := make(chan string)
- close(c)
- success, err := (&BeSentMatcher{Arg: "foo"}).Match(c)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
-
- It("should short-circuit Eventually", func() {
- c := make(chan string)
- close(c)
-
- t := time.Now()
- failures := InterceptGomegaFailures(func() {
- Eventually(c, 10.0).Should(BeSent("foo"))
- })
- Expect(failures).Should(HaveLen(1))
- Expect(time.Since(t)).Should(BeNumerically("<", time.Second))
- })
- })
- })
-
- Context("when passed a channel and a non-matching type", func() {
- It("should error", func() {
- success, err := (&BeSentMatcher{Arg: "foo"}).Match(make(chan int, 1))
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-
- Context("when passed a receive-only channel", func() {
- It("should error", func() {
- var c <-chan string
- c = make(chan string, 1)
- success, err := (&BeSentMatcher{Arg: "foo"}).Match(c)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-
- Context("when passed a nonchannel", func() {
- It("should error", func() {
- success, err := (&BeSentMatcher{Arg: "foo"}).Match("bar")
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go
deleted file mode 100644
index 95a3a103e..000000000
--- a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package matchers_test
-
-import (
- "time"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("BeTemporally", func() {
-
- var t0, t1, t2 time.Time
- BeforeEach(func() {
- t0 = time.Now()
- t1 = t0.Add(time.Second)
- t2 = t0.Add(-time.Second)
- })
-
- Context("When comparing times", func() {
-
- It("should support ==", func() {
- Expect(t0).Should(BeTemporally("==", t0))
- Expect(t1).ShouldNot(BeTemporally("==", t0))
- Expect(t0).ShouldNot(BeTemporally("==", t1))
- Expect(t0).ShouldNot(BeTemporally("==", time.Time{}))
- })
-
- It("should support >", func() {
- Expect(t0).Should(BeTemporally(">", t2))
- Expect(t0).ShouldNot(BeTemporally(">", t0))
- Expect(t2).ShouldNot(BeTemporally(">", t0))
- })
-
- It("should support <", func() {
- Expect(t0).Should(BeTemporally("<", t1))
- Expect(t0).ShouldNot(BeTemporally("<", t0))
- Expect(t1).ShouldNot(BeTemporally("<", t0))
- })
-
- It("should support >=", func() {
- Expect(t0).Should(BeTemporally(">=", t2))
- Expect(t0).Should(BeTemporally(">=", t0))
- Expect(t0).ShouldNot(BeTemporally(">=", t1))
- })
-
- It("should support <=", func() {
- Expect(t0).Should(BeTemporally("<=", t1))
- Expect(t0).Should(BeTemporally("<=", t0))
- Expect(t0).ShouldNot(BeTemporally("<=", t2))
- })
-
- Context("when passed ~", func() {
- Context("and there is no precision parameter", func() {
- BeforeEach(func() {
- t1 = t0.Add(time.Millisecond / 2)
- t2 = t0.Add(-2 * time.Millisecond)
- })
- It("should approximate", func() {
- Expect(t0).Should(BeTemporally("~", t0))
- Expect(t0).Should(BeTemporally("~", t1))
- Expect(t0).ShouldNot(BeTemporally("~", t2))
- })
- })
-
- Context("and there is a precision parameter", func() {
- BeforeEach(func() {
- t2 = t0.Add(3 * time.Second)
- })
- It("should use precision paramter", func() {
- d := 2 * time.Second
- Expect(t0).Should(BeTemporally("~", t0, d))
- Expect(t0).Should(BeTemporally("~", t1, d))
- Expect(t0).ShouldNot(BeTemporally("~", t2, d))
- })
- })
- })
- })
-
- Context("when passed a non-time", func() {
- It("should error", func() {
- success, err := (&BeTemporallyMatcher{Comparator: "==", CompareTo: t0}).Match("foo")
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&BeTemporallyMatcher{Comparator: "=="}).Match(nil)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-
- Context("when passed an unsupported comparator", func() {
- It("should error", func() {
- success, err := (&BeTemporallyMatcher{Comparator: "!=", CompareTo: t0}).Match(t2)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/be_true_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_true_matcher_test.go
deleted file mode 100644
index 9eda62c33..000000000
--- a/vendor/github.com/onsi/gomega/matchers/be_true_matcher_test.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("BeTrue", func() {
- It("should handle true and false correctly", func() {
- Expect(true).Should(BeTrue())
- Expect(false).ShouldNot(BeTrue())
- })
-
- It("should only support booleans", func() {
- success, err := (&BeTrueMatcher{}).Match("foo")
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher_test.go
deleted file mode 100644
index c89e10330..000000000
--- a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher_test.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("BeZero", func() {
- It("succeeds for zero values for its type", func() {
- Expect(nil).Should(BeZero())
-
- Expect("").Should(BeZero())
- Expect(" ").ShouldNot(BeZero())
-
- Expect(0).Should(BeZero())
- Expect(1).ShouldNot(BeZero())
-
- Expect(0.0).Should(BeZero())
- Expect(0.1).ShouldNot(BeZero())
-
- // Expect([]int{}).Should(BeZero())
- Expect([]int{1}).ShouldNot(BeZero())
-
- // Expect(map[string]int{}).Should(BeZero())
- Expect(map[string]int{"a": 1}).ShouldNot(BeZero())
-
- Expect(myCustomType{}).Should(BeZero())
- Expect(myCustomType{s: "a"}).ShouldNot(BeZero())
- })
-
- It("builds failure message", func() {
- actual := BeZero().FailureMessage(123)
- Expect(actual).To(Equal("Expected\n <int>: 123\nto be zero-valued"))
- })
-
- It("builds negated failure message", func() {
- actual := BeZero().NegatedFailureMessage(123)
- Expect(actual).To(Equal("Expected\n <int>: 123\nnot to be zero-valued"))
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of_test.go b/vendor/github.com/onsi/gomega/matchers/consist_of_test.go
deleted file mode 100644
index f6971c4f5..000000000
--- a/vendor/github.com/onsi/gomega/matchers/consist_of_test.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("ConsistOf", func() {
- Context("with a slice", func() {
- It("should do the right thing", func() {
- Expect([]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", "bar", "baz"))
- Expect([]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", "bar", "baz"))
- Expect([]string{"foo", "bar", "baz"}).Should(ConsistOf("baz", "bar", "foo"))
- Expect([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("baz", "bar", "foo", "foo"))
- Expect([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("baz", "foo"))
- })
- })
-
- Context("with an array", func() {
- It("should do the right thing", func() {
- Expect([3]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", "bar", "baz"))
- Expect([3]string{"foo", "bar", "baz"}).Should(ConsistOf("baz", "bar", "foo"))
- Expect([3]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("baz", "bar", "foo", "foo"))
- Expect([3]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("baz", "foo"))
- })
- })
-
- Context("with a map", func() {
- It("should apply to the values", func() {
- Expect(map[int]string{1: "foo", 2: "bar", 3: "baz"}).Should(ConsistOf("foo", "bar", "baz"))
- Expect(map[int]string{1: "foo", 2: "bar", 3: "baz"}).Should(ConsistOf("baz", "bar", "foo"))
- Expect(map[int]string{1: "foo", 2: "bar", 3: "baz"}).ShouldNot(ConsistOf("baz", "bar", "foo", "foo"))
- Expect(map[int]string{1: "foo", 2: "bar", 3: "baz"}).ShouldNot(ConsistOf("baz", "foo"))
- })
-
- })
-
- Context("with anything else", func() {
- It("should error", func() {
- failures := InterceptGomegaFailures(func() {
- Expect("foo").Should(ConsistOf("f", "o", "o"))
- })
-
- Expect(failures).Should(HaveLen(1))
- })
- })
-
- Context("when passed matchers", func() {
- It("should pass if the matchers pass", func() {
- Expect([]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", MatchRegexp("^ba"), "baz"))
- Expect([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("foo", MatchRegexp("^ba")))
- Expect([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("foo", MatchRegexp("^ba"), MatchRegexp("foo")))
- Expect([]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", MatchRegexp("^ba"), MatchRegexp("^ba")))
- Expect([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("foo", MatchRegexp("^ba"), MatchRegexp("turducken")))
- })
-
- It("should not depend on the order of the matchers", func() {
- Expect([][]int{{1, 2}, {2}}).Should(ConsistOf(ContainElement(1), ContainElement(2)))
- Expect([][]int{{1, 2}, {2}}).Should(ConsistOf(ContainElement(2), ContainElement(1)))
- })
-
- Context("when a matcher errors", func() {
- It("should soldier on", func() {
- Expect([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf(BeFalse(), "foo", "bar"))
- Expect([]interface{}{"foo", "bar", false}).Should(ConsistOf(BeFalse(), ContainSubstring("foo"), "bar"))
- })
- })
- })
-
- Context("when passed exactly one argument, and that argument is a slice", func() {
- It("should match against the elements of that argument", func() {
- Expect([]string{"foo", "bar", "baz"}).Should(ConsistOf([]string{"foo", "bar", "baz"}))
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher_test.go
deleted file mode 100644
index 60fb55e96..000000000
--- a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher_test.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("ContainElement", func() {
- Context("when passed a supported type", func() {
- Context("and expecting a non-matcher", func() {
- It("should do the right thing", func() {
- Expect([2]int{1, 2}).Should(ContainElement(2))
- Expect([2]int{1, 2}).ShouldNot(ContainElement(3))
-
- Expect([]int{1, 2}).Should(ContainElement(2))
- Expect([]int{1, 2}).ShouldNot(ContainElement(3))
-
- Expect(map[string]int{"foo": 1, "bar": 2}).Should(ContainElement(2))
- Expect(map[int]int{3: 1, 4: 2}).ShouldNot(ContainElement(3))
-
- arr := make([]myCustomType, 2)
- arr[0] = myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}
- arr[1] = myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "c"}}
- Expect(arr).Should(ContainElement(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}))
- Expect(arr).ShouldNot(ContainElement(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"b", "c"}}))
- })
- })
-
- Context("and expecting a matcher", func() {
- It("should pass each element through the matcher", func() {
- Expect([]int{1, 2, 3}).Should(ContainElement(BeNumerically(">=", 3)))
- Expect([]int{1, 2, 3}).ShouldNot(ContainElement(BeNumerically(">", 3)))
- Expect(map[string]int{"foo": 1, "bar": 2}).Should(ContainElement(BeNumerically(">=", 2)))
- Expect(map[string]int{"foo": 1, "bar": 2}).ShouldNot(ContainElement(BeNumerically(">", 2)))
- })
-
- It("should power through even if the matcher ever fails", func() {
- Expect([]interface{}{1, 2, "3", 4}).Should(ContainElement(BeNumerically(">=", 3)))
- })
-
- It("should fail if the matcher fails", func() {
- actual := []interface{}{1, 2, "3", "4"}
- success, err := (&ContainElementMatcher{Element: BeNumerically(">=", 3)}).Match(actual)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
- })
-
- Context("when passed a correctly typed nil", func() {
- It("should operate succesfully on the passed in value", func() {
- var nilSlice []int
- Expect(nilSlice).ShouldNot(ContainElement(1))
-
- var nilMap map[int]string
- Expect(nilMap).ShouldNot(ContainElement("foo"))
- })
- })
-
- Context("when passed an unsupported type", func() {
- It("should error", func() {
- success, err := (&ContainElementMatcher{Element: 0}).Match(0)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&ContainElementMatcher{Element: 0}).Match("abc")
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&ContainElementMatcher{Element: 0}).Match(nil)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher_test.go
deleted file mode 100644
index efffb4732..000000000
--- a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher_test.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("ContainSubstringMatcher", func() {
- Context("when actual is a string", func() {
- It("should match against the string", func() {
- Expect("Marvelous").Should(ContainSubstring("rve"))
- Expect("Marvelous").ShouldNot(ContainSubstring("boo"))
- })
- })
-
- Context("when the matcher is called with multiple arguments", func() {
- It("should pass the string and arguments to sprintf", func() {
- Expect("Marvelous3").Should(ContainSubstring("velous%d", 3))
- })
- })
-
- Context("when actual is a stringer", func() {
- It("should call the stringer and match agains the returned string", func() {
- Expect(&myStringer{a: "Abc3"}).Should(ContainSubstring("bc3"))
- })
- })
-
- Context("when actual is neither a string nor a stringer", func() {
- It("should error", func() {
- success, err := (&ContainSubstringMatcher{Substr: "2"}).Match(2)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/equal_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/equal_matcher_test.go
deleted file mode 100644
index 3ab991e4f..000000000
--- a/vendor/github.com/onsi/gomega/matchers/equal_matcher_test.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package matchers_test
-
-import (
- "errors"
- "strings"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("Equal", func() {
- Context("when asserting that nil equals nil", func() {
- It("should error", func() {
- success, err := (&EqualMatcher{Expected: nil}).Match(nil)
-
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-
- Context("When asserting equality between objects", func() {
- It("should do the right thing", func() {
- Expect(5).Should(Equal(5))
- Expect(5.0).Should(Equal(5.0))
-
- Expect(5).ShouldNot(Equal("5"))
- Expect(5).ShouldNot(Equal(5.0))
- Expect(5).ShouldNot(Equal(3))
-
- Expect("5").Should(Equal("5"))
- Expect([]int{1, 2}).Should(Equal([]int{1, 2}))
- Expect([]int{1, 2}).ShouldNot(Equal([]int{2, 1}))
- Expect([]byte{'f', 'o', 'o'}).Should(Equal([]byte{'f', 'o', 'o'}))
- Expect([]byte{'f', 'o', 'o'}).ShouldNot(Equal([]byte{'b', 'a', 'r'}))
- Expect(map[string]string{"a": "b", "c": "d"}).Should(Equal(map[string]string{"a": "b", "c": "d"}))
- Expect(map[string]string{"a": "b", "c": "d"}).ShouldNot(Equal(map[string]string{"a": "b", "c": "e"}))
- Expect(errors.New("foo")).Should(Equal(errors.New("foo")))
- Expect(errors.New("foo")).ShouldNot(Equal(errors.New("bar")))
-
- Expect(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).Should(Equal(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}))
- Expect(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).ShouldNot(Equal(myCustomType{s: "bar", n: 3, f: 2.0, arr: []string{"a", "b"}}))
- Expect(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).ShouldNot(Equal(myCustomType{s: "foo", n: 2, f: 2.0, arr: []string{"a", "b"}}))
- Expect(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).ShouldNot(Equal(myCustomType{s: "foo", n: 3, f: 3.0, arr: []string{"a", "b"}}))
- Expect(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).ShouldNot(Equal(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b", "c"}}))
- })
- })
-
- Describe("failure messages", func() {
- It("shows the two strings simply when they are short", func() {
- subject := EqualMatcher{Expected: "eric"}
-
- failureMessage := subject.FailureMessage("tim")
- Expect(failureMessage).To(BeEquivalentTo(expectedShortStringFailureMessage))
- })
-
- It("shows the exact point where two long strings differ", func() {
- stringWithB := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
- stringWithZ := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaazaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
-
- subject := EqualMatcher{Expected: stringWithZ}
-
- failureMessage := subject.FailureMessage(stringWithB)
- Expect(failureMessage).To(BeEquivalentTo(expectedLongStringFailureMessage))
- })
- })
-})
-
-var expectedShortStringFailureMessage = strings.TrimSpace(`
-Expected
- <string>: tim
-to equal
- <string>: eric
-`)
-var expectedLongStringFailureMessage = strings.TrimSpace(`
-Expected
- <string>: "...aaaaabaaaaa..."
-to equal |
- <string>: "...aaaaazaaaaa..."
-`)
diff --git a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher_test.go
deleted file mode 100644
index 8a61f2e2c..000000000
--- a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("HaveCap", func() {
- Context("when passed a supported type", func() {
- It("should do the right thing", func() {
- Expect([0]int{}).Should(HaveCap(0))
- Expect([2]int{1}).Should(HaveCap(2))
-
- Expect([]int{}).Should(HaveCap(0))
- Expect([]int{1, 2, 3, 4, 5}[:2]).Should(HaveCap(5))
- Expect(make([]int, 0, 5)).Should(HaveCap(5))
-
- c := make(chan bool, 3)
- Expect(c).Should(HaveCap(3))
- c <- true
- c <- true
- Expect(c).Should(HaveCap(3))
-
- Expect(make(chan bool)).Should(HaveCap(0))
- })
- })
-
- Context("when passed a correctly typed nil", func() {
- It("should operate succesfully on the passed in value", func() {
- var nilSlice []int
- Expect(nilSlice).Should(HaveCap(0))
-
- var nilChan chan int
- Expect(nilChan).Should(HaveCap(0))
- })
- })
-
- Context("when passed an unsupported type", func() {
- It("should error", func() {
- success, err := (&HaveCapMatcher{Count: 0}).Match(0)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&HaveCapMatcher{Count: 0}).Match(nil)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher_test.go
deleted file mode 100644
index 0f1561b7d..000000000
--- a/vendor/github.com/onsi/gomega/matchers/have_key_matcher_test.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("HaveKey", func() {
- var (
- stringKeys map[string]int
- intKeys map[int]string
- objKeys map[*myCustomType]string
-
- customA *myCustomType
- customB *myCustomType
- )
- BeforeEach(func() {
- stringKeys = map[string]int{"foo": 2, "bar": 3}
- intKeys = map[int]string{2: "foo", 3: "bar"}
-
- customA = &myCustomType{s: "a", n: 2, f: 2.3, arr: []string{"ice", "cream"}}
- customB = &myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"cake"}}
- objKeys = map[*myCustomType]string{customA: "aardvark", customB: "kangaroo"}
- })
-
- Context("when passed a map", func() {
- It("should do the right thing", func() {
- Expect(stringKeys).Should(HaveKey("foo"))
- Expect(stringKeys).ShouldNot(HaveKey("baz"))
-
- Expect(intKeys).Should(HaveKey(2))
- Expect(intKeys).ShouldNot(HaveKey(4))
-
- Expect(objKeys).Should(HaveKey(customA))
- Expect(objKeys).Should(HaveKey(&myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"cake"}}))
- Expect(objKeys).ShouldNot(HaveKey(&myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"apple", "pie"}}))
- })
- })
-
- Context("when passed a correctly typed nil", func() {
- It("should operate succesfully on the passed in value", func() {
- var nilMap map[int]string
- Expect(nilMap).ShouldNot(HaveKey("foo"))
- })
- })
-
- Context("when the passed in key is actually a matcher", func() {
- It("should pass each element through the matcher", func() {
- Expect(stringKeys).Should(HaveKey(ContainSubstring("oo")))
- Expect(stringKeys).ShouldNot(HaveKey(ContainSubstring("foobar")))
- })
-
- It("should fail if the matcher ever fails", func() {
- actual := map[int]string{1: "a", 3: "b", 2: "c"}
- success, err := (&HaveKeyMatcher{Key: ContainSubstring("ar")}).Match(actual)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-
- Context("when passed something that is not a map", func() {
- It("should error", func() {
- success, err := (&HaveKeyMatcher{Key: "foo"}).Match([]string{"foo"})
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&HaveKeyMatcher{Key: "foo"}).Match(nil)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher_test.go
deleted file mode 100644
index 0a49ec993..000000000
--- a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("HaveKeyWithValue", func() {
- var (
- stringKeys map[string]int
- intKeys map[int]string
- objKeys map[*myCustomType]*myCustomType
-
- customA *myCustomType
- customB *myCustomType
- )
- BeforeEach(func() {
- stringKeys = map[string]int{"foo": 2, "bar": 3}
- intKeys = map[int]string{2: "foo", 3: "bar"}
-
- customA = &myCustomType{s: "a", n: 2, f: 2.3, arr: []string{"ice", "cream"}}
- customB = &myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"cake"}}
- objKeys = map[*myCustomType]*myCustomType{customA: customA, customB: customA}
- })
-
- Context("when passed a map", func() {
- It("should do the right thing", func() {
- Expect(stringKeys).Should(HaveKeyWithValue("foo", 2))
- Expect(stringKeys).ShouldNot(HaveKeyWithValue("foo", 1))
- Expect(stringKeys).ShouldNot(HaveKeyWithValue("baz", 2))
- Expect(stringKeys).ShouldNot(HaveKeyWithValue("baz", 1))
-
- Expect(intKeys).Should(HaveKeyWithValue(2, "foo"))
- Expect(intKeys).ShouldNot(HaveKeyWithValue(4, "foo"))
- Expect(intKeys).ShouldNot(HaveKeyWithValue(2, "baz"))
-
- Expect(objKeys).Should(HaveKeyWithValue(customA, customA))
- Expect(objKeys).Should(HaveKeyWithValue(&myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"cake"}}, &myCustomType{s: "a", n: 2, f: 2.3, arr: []string{"ice", "cream"}}))
- Expect(objKeys).ShouldNot(HaveKeyWithValue(&myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"apple", "pie"}}, customA))
- })
- })
-
- Context("when passed a correctly typed nil", func() {
- It("should operate succesfully on the passed in value", func() {
- var nilMap map[int]string
- Expect(nilMap).ShouldNot(HaveKeyWithValue("foo", "bar"))
- })
- })
-
- Context("when the passed in key or value is actually a matcher", func() {
- It("should pass each element through the matcher", func() {
- Expect(stringKeys).Should(HaveKeyWithValue(ContainSubstring("oo"), 2))
- Expect(intKeys).Should(HaveKeyWithValue(2, ContainSubstring("oo")))
- Expect(stringKeys).ShouldNot(HaveKeyWithValue(ContainSubstring("foobar"), 2))
- })
-
- It("should fail if the matcher ever fails", func() {
- actual := map[int]string{1: "a", 3: "b", 2: "c"}
- success, err := (&HaveKeyWithValueMatcher{Key: ContainSubstring("ar"), Value: 2}).Match(actual)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- otherActual := map[string]int{"a": 1, "b": 2, "c": 3}
- success, err = (&HaveKeyWithValueMatcher{Key: "a", Value: ContainSubstring("1")}).Match(otherActual)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-
- Context("when passed something that is not a map", func() {
- It("should error", func() {
- success, err := (&HaveKeyWithValueMatcher{Key: "foo", Value: "bar"}).Match([]string{"foo"})
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&HaveKeyWithValueMatcher{Key: "foo", Value: "bar"}).Match(nil)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/have_len_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_len_matcher_test.go
deleted file mode 100644
index c60f63886..000000000
--- a/vendor/github.com/onsi/gomega/matchers/have_len_matcher_test.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("HaveLen", func() {
- Context("when passed a supported type", func() {
- It("should do the right thing", func() {
- Expect("").Should(HaveLen(0))
- Expect("AA").Should(HaveLen(2))
-
- Expect([0]int{}).Should(HaveLen(0))
- Expect([2]int{1, 2}).Should(HaveLen(2))
-
- Expect([]int{}).Should(HaveLen(0))
- Expect([]int{1, 2, 3}).Should(HaveLen(3))
-
- Expect(map[string]int{}).Should(HaveLen(0))
- Expect(map[string]int{"a": 1, "b": 2, "c": 3, "d": 4}).Should(HaveLen(4))
-
- c := make(chan bool, 3)
- Expect(c).Should(HaveLen(0))
- c <- true
- c <- true
- Expect(c).Should(HaveLen(2))
- })
- })
-
- Context("when passed a correctly typed nil", func() {
- It("should operate succesfully on the passed in value", func() {
- var nilSlice []int
- Expect(nilSlice).Should(HaveLen(0))
-
- var nilMap map[int]string
- Expect(nilMap).Should(HaveLen(0))
- })
- })
-
- Context("when passed an unsupported type", func() {
- It("should error", func() {
- success, err := (&HaveLenMatcher{Count: 0}).Match(0)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&HaveLenMatcher{Count: 0}).Match(nil)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
index ebdd71786..bef00ae21 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
@@ -29,5 +29,5 @@ func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message
}
func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return fmt.Sprintf("Expected error:\n%s\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1), "not to have occurred")
+ return fmt.Sprintf("Unexpected error:\n%s\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1), "occurred")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go
deleted file mode 100644
index 0ad632ec1..000000000
--- a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package matchers_test
-
-import (
- "errors"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-type CustomErr struct {
- msg string
-}
-
-func (e *CustomErr) Error() string {
- return e.msg
-}
-
-var _ = Describe("HaveOccurred", func() {
- It("should succeed if matching an error", func() {
- Expect(errors.New("Foo")).Should(HaveOccurred())
- })
-
- It("should not succeed with nil", func() {
- Expect(nil).ShouldNot(HaveOccurred())
- })
-
- It("should only support errors and nil", func() {
- success, err := (&HaveOccurredMatcher{}).Match("foo")
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&HaveOccurredMatcher{}).Match("")
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
-
- It("doesn't support non-error type", func() {
- success, err := (&HaveOccurredMatcher{}).Match(AnyType{})
- Expect(success).Should(BeFalse())
- Expect(err).Should(MatchError("Expected an error-type. Got:\n <matchers_test.AnyType>: {}"))
- })
-
- It("doesn't support non-error pointer type", func() {
- success, err := (&HaveOccurredMatcher{}).Match(&AnyType{})
- Expect(success).Should(BeFalse())
- Expect(err).Should(MatchError(MatchRegexp(`Expected an error-type. Got:\n <*matchers_test.AnyType | 0x[[:xdigit:]]+>: {}`)))
- })
-
- It("should succeed with pointer types that conform to error interface", func() {
- err := &CustomErr{"ohai"}
- Expect(err).Should(HaveOccurred())
- })
-
- It("should not succeed with nil pointers to types that conform to error interface", func() {
- var err *CustomErr = nil
- Expect(err).ShouldNot(HaveOccurred())
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher_test.go
deleted file mode 100644
index fe29b7b5d..000000000
--- a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher_test.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("HavePrefixMatcher", func() {
- Context("when actual is a string", func() {
- It("should match a string prefix", func() {
- Expect("Ab").Should(HavePrefix("A"))
- Expect("A").ShouldNot(HavePrefix("Ab"))
- })
- })
-
- Context("when the matcher is called with multiple arguments", func() {
- It("should pass the string and arguments to sprintf", func() {
- Expect("C3PO").Should(HavePrefix("C%dP", 3))
- })
- })
-
- Context("when actual is a stringer", func() {
- It("should call the stringer and match against the returned string", func() {
- Expect(&myStringer{a: "Ab"}).Should(HavePrefix("A"))
- })
- })
-
- Context("when actual is neither a string nor a stringer", func() {
- It("should error", func() {
- success, err := (&HavePrefixMatcher{Prefix: "2"}).Match(2)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher_test.go
deleted file mode 100644
index 2ae29821a..000000000
--- a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher_test.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("HaveSuffixMatcher", func() {
- Context("when actual is a string", func() {
- It("should match a string suffix", func() {
- Expect("Ab").Should(HaveSuffix("b"))
- Expect("A").ShouldNot(HaveSuffix("Ab"))
- })
- })
-
- Context("when the matcher is called with multiple arguments", func() {
- It("should pass the string and arguments to sprintf", func() {
- Expect("C3PO").Should(HaveSuffix("%dPO", 3))
- })
- })
-
- Context("when actual is a stringer", func() {
- It("should call the stringer and match against the returned string", func() {
- Expect(&myStringer{a: "Ab"}).Should(HaveSuffix("b"))
- })
- })
-
- Context("when actual is neither a string nor a stringer", func() {
- It("should error", func() {
- success, err := (&HaveSuffixMatcher{Suffix: "2"}).Match(2)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher_test.go
deleted file mode 100644
index 9bf89fc46..000000000
--- a/vendor/github.com/onsi/gomega/matchers/match_error_matcher_test.go
+++ /dev/null
@@ -1,107 +0,0 @@
-package matchers_test
-
-import (
- "errors"
- "fmt"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-type CustomError struct {
-}
-
-func (c CustomError) Error() string {
- return "an error"
-}
-
-var _ = Describe("MatchErrorMatcher", func() {
- Context("When asserting against an error", func() {
- It("should succeed when matching with an error", func() {
- err := errors.New("an error")
- fmtErr := fmt.Errorf("an error")
- customErr := CustomError{}
-
- Expect(err).Should(MatchError(errors.New("an error")))
- Expect(err).ShouldNot(MatchError(errors.New("another error")))
-
- Expect(fmtErr).Should(MatchError(errors.New("an error")))
- Expect(customErr).Should(MatchError(CustomError{}))
- })
-
- It("should succeed when matching with a string", func() {
- err := errors.New("an error")
- fmtErr := fmt.Errorf("an error")
- customErr := CustomError{}
-
- Expect(err).Should(MatchError("an error"))
- Expect(err).ShouldNot(MatchError("another error"))
-
- Expect(fmtErr).Should(MatchError("an error"))
- Expect(customErr).Should(MatchError("an error"))
- })
-
- Context("when passed a matcher", func() {
- It("should pass if the matcher passes against the error string", func() {
- err := errors.New("error 123 abc")
-
- Expect(err).Should(MatchError(MatchRegexp(`\d{3}`)))
- })
-
- It("should fail if the matcher fails against the error string", func() {
- err := errors.New("no digits")
- Expect(err).ShouldNot(MatchError(MatchRegexp(`\d`)))
- })
- })
-
- It("should fail when passed anything else", func() {
- actualErr := errors.New("an error")
- _, err := (&MatchErrorMatcher{
- Expected: []byte("an error"),
- }).Match(actualErr)
- Expect(err).Should(HaveOccurred())
-
- _, err = (&MatchErrorMatcher{
- Expected: 3,
- }).Match(actualErr)
- Expect(err).Should(HaveOccurred())
- })
- })
-
- Context("when passed nil", func() {
- It("should fail", func() {
- _, err := (&MatchErrorMatcher{
- Expected: "an error",
- }).Match(nil)
- Expect(err).Should(HaveOccurred())
- })
- })
-
- Context("when passed a non-error", func() {
- It("should fail", func() {
- _, err := (&MatchErrorMatcher{
- Expected: "an error",
- }).Match("an error")
- Expect(err).Should(HaveOccurred())
-
- _, err = (&MatchErrorMatcher{
- Expected: "an error",
- }).Match(3)
- Expect(err).Should(HaveOccurred())
- })
- })
-
- Context("when passed an error that is also a string", func() {
- It("should use it as an error", func() {
- var e mockErr = "mockErr"
-
- // this fails if the matcher casts e to a string before comparison
- Expect(e).Should(MatchError(e))
- })
- })
-})
-
-type mockErr string
-
-func (m mockErr) Error() string { return string(m) }
diff --git a/vendor/github.com/onsi/gomega/matchers/match_json_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_json_matcher_test.go
deleted file mode 100644
index 4a1a9db20..000000000
--- a/vendor/github.com/onsi/gomega/matchers/match_json_matcher_test.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package matchers_test
-
-import (
- "encoding/json"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("MatchJSONMatcher", func() {
- Context("When passed stringifiables", func() {
- It("should succeed if the JSON matches", func() {
- Expect("{}").Should(MatchJSON("{}"))
- Expect(`{"a":1}`).Should(MatchJSON(`{"a":1}`))
- Expect(`{
- "a":1
- }`).Should(MatchJSON(`{"a":1}`))
- Expect(`{"a":1, "b":2}`).Should(MatchJSON(`{"b":2, "a":1}`))
- Expect(`{"a":1}`).ShouldNot(MatchJSON(`{"b":2, "a":1}`))
-
- Expect(`{"a":"a", "b":"b"}`).ShouldNot(MatchJSON(`{"a":"a", "b":"b", "c":"c"}`))
- Expect(`{"a":"a", "b":"b", "c":"c"}`).ShouldNot(MatchJSON(`{"a":"a", "b":"b"}`))
-
- Expect(`{"a":null, "b":null}`).ShouldNot(MatchJSON(`{"c":"c", "d":"d"}`))
- Expect(`{"a":null, "b":null, "c":null}`).ShouldNot(MatchJSON(`{"a":null, "b":null, "d":null}`))
- })
-
- It("should work with byte arrays", func() {
- Expect([]byte("{}")).Should(MatchJSON([]byte("{}")))
- Expect("{}").Should(MatchJSON([]byte("{}")))
- Expect([]byte("{}")).Should(MatchJSON("{}"))
- })
-
- It("should work with json.RawMessage", func() {
- Expect([]byte(`{"a": 1}`)).Should(MatchJSON(json.RawMessage(`{"a": 1}`)))
- })
- })
-
- Context("when a key mismatch is found", func() {
- It("reports the first found mismatch", func() {
- subject := MatchJSONMatcher{JSONToMatch: `5`}
- actual := `7`
- subject.Match(actual)
-
- failureMessage := subject.FailureMessage(`7`)
- Expect(failureMessage).ToNot(ContainSubstring("first mismatched key"))
-
- subject = MatchJSONMatcher{JSONToMatch: `{"a": 1, "b.g": {"c": 2, "1": ["hello", "see ya"]}}`}
- actual = `{"a": 1, "b.g": {"c": 2, "1": ["hello", "goodbye"]}}`
- subject.Match(actual)
-
- failureMessage = subject.FailureMessage(actual)
- Expect(failureMessage).To(ContainSubstring(`first mismatched key: "b.g"."1"[1]`))
- })
- })
-
- Context("when the expected is not valid JSON", func() {
- It("should error and explain why", func() {
- success, err := (&MatchJSONMatcher{JSONToMatch: `{}`}).Match(`oops`)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- Expect(err.Error()).Should(ContainSubstring("Actual 'oops' should be valid JSON"))
- })
- })
-
- Context("when the actual is not valid JSON", func() {
- It("should error and explain why", func() {
- success, err := (&MatchJSONMatcher{JSONToMatch: `oops`}).Match(`{}`)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- Expect(err.Error()).Should(ContainSubstring("Expected 'oops' should be valid JSON"))
- })
- })
-
- Context("when the expected is neither a string nor a stringer nor a byte array", func() {
- It("should error", func() {
- success, err := (&MatchJSONMatcher{JSONToMatch: 2}).Match("{}")
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- Expect(err.Error()).Should(ContainSubstring("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got expected:\n <int>: 2"))
-
- success, err = (&MatchJSONMatcher{JSONToMatch: nil}).Match("{}")
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- Expect(err.Error()).Should(ContainSubstring("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got expected:\n <nil>: nil"))
- })
- })
-
- Context("when the actual is neither a string nor a stringer nor a byte array", func() {
- It("should error", func() {
- success, err := (&MatchJSONMatcher{JSONToMatch: "{}"}).Match(2)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- Expect(err.Error()).Should(ContainSubstring("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n <int>: 2"))
-
- success, err = (&MatchJSONMatcher{JSONToMatch: "{}"}).Match(nil)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- Expect(err.Error()).Should(ContainSubstring("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n <nil>: nil"))
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher_test.go
deleted file mode 100644
index ac2538bb4..000000000
--- a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("MatchRegexp", func() {
- Context("when actual is a string", func() {
- It("should match against the string", func() {
- Expect(" a2!bla").Should(MatchRegexp(`\d!`))
- Expect(" a2!bla").ShouldNot(MatchRegexp(`[A-Z]`))
- })
- })
-
- Context("when actual is a stringer", func() {
- It("should call the stringer and match agains the returned string", func() {
- Expect(&myStringer{a: "Abc3"}).Should(MatchRegexp(`[A-Z][a-z]+\d`))
- })
- })
-
- Context("when the matcher is called with multiple arguments", func() {
- It("should pass the string and arguments to sprintf", func() {
- Expect(" a23!bla").Should(MatchRegexp(`\d%d!`, 3))
- })
- })
-
- Context("when actual is neither a string nor a stringer", func() {
- It("should error", func() {
- success, err := (&MatchRegexpMatcher{Regexp: `\d`}).Match(2)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-
- Context("when the passed in regexp fails to compile", func() {
- It("should error", func() {
- success, err := (&MatchRegexpMatcher{Regexp: "("}).Match("Foo")
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
index 3b412ce81..5c815f5af 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
@@ -70,7 +70,7 @@ func parseXmlContent(content string) (*xmlNode, error) {
if err == io.EOF {
break
}
- return nil, fmt.Errorf("failed to decode next token: %v", err)
+ return nil, fmt.Errorf("failed to decode next token: %v", err) // untested section
}
lastNodeIndex := len(allNodes) - 1
@@ -94,7 +94,7 @@ func parseXmlContent(content string) (*xmlNode, error) {
case xml.CharData:
lastNode.Content = append(lastNode.Content, tok.Copy()...)
case xml.Comment:
- lastNode.Comments = append(lastNode.Comments, tok.Copy())
+ lastNode.Comments = append(lastNode.Comments, tok.Copy()) // untested section
case xml.ProcInst:
lastNode.ProcInsts = append(lastNode.ProcInsts, tok.Copy())
}
diff --git a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher_test.go
deleted file mode 100644
index 0b559b22e..000000000
--- a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("MatchXMLMatcher", func() {
-
- var (
- sample_01 = readFileContents("test_data/xml/sample_01.xml")
- sample_02 = readFileContents("test_data/xml/sample_02.xml")
- sample_03 = readFileContents("test_data/xml/sample_03.xml")
- sample_04 = readFileContents("test_data/xml/sample_04.xml")
- sample_05 = readFileContents("test_data/xml/sample_05.xml")
- sample_06 = readFileContents("test_data/xml/sample_06.xml")
- sample_07 = readFileContents("test_data/xml/sample_07.xml")
- sample_08 = readFileContents("test_data/xml/sample_08.xml")
- sample_09 = readFileContents("test_data/xml/sample_09.xml")
- sample_10 = readFileContents("test_data/xml/sample_10.xml")
- sample_11 = readFileContents("test_data/xml/sample_11.xml")
- )
-
- Context("When passed stringifiables", func() {
- It("matches documents regardless of the attribute order", func() {
- a := `<a foo="bar" ka="boom"></a>`
- b := `<a ka="boom" foo="bar"></a>`
- Expect(b).Should(MatchXML(a))
- Expect(a).Should(MatchXML(b))
- })
-
- It("should succeed if the XML matches", func() {
- Expect(sample_01).Should(MatchXML(sample_01)) // same XML
- Expect(sample_01).Should(MatchXML(sample_02)) // same XML with blank lines
- Expect(sample_01).Should(MatchXML(sample_03)) // same XML with different formatting
- Expect(sample_01).ShouldNot(MatchXML(sample_04)) // same structures with different values
- Expect(sample_01).ShouldNot(MatchXML(sample_05)) // different structures
- Expect(sample_06).ShouldNot(MatchXML(sample_07)) // same xml names with different namespaces
- Expect(sample_07).ShouldNot(MatchXML(sample_08)) // same structures with different values
- Expect(sample_09).ShouldNot(MatchXML(sample_10)) // same structures with different attribute values
- Expect(sample_11).Should(MatchXML(sample_11)) // with non UTF-8 encoding
- })
-
- It("should work with byte arrays", func() {
- Expect([]byte(sample_01)).Should(MatchXML([]byte(sample_01)))
- Expect([]byte(sample_01)).Should(MatchXML(sample_01))
- Expect(sample_01).Should(MatchXML([]byte(sample_01)))
- })
- })
-
- Context("when the expected is not valid XML", func() {
- It("should error and explain why", func() {
- success, err := (&MatchXMLMatcher{XMLToMatch: sample_01}).Match(`oops`)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- Expect(err.Error()).Should(ContainSubstring("Actual 'oops' should be valid XML"))
- })
- })
-
- Context("when the actual is not valid XML", func() {
- It("should error and explain why", func() {
- success, err := (&MatchXMLMatcher{XMLToMatch: `oops`}).Match(sample_01)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- Expect(err.Error()).Should(ContainSubstring("Expected 'oops' should be valid XML"))
- })
- })
-
- Context("when the expected is neither a string nor a stringer nor a byte array", func() {
- It("should error", func() {
- success, err := (&MatchXMLMatcher{XMLToMatch: 2}).Match(sample_01)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- Expect(err.Error()).Should(ContainSubstring("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n <int>: 2"))
-
- success, err = (&MatchXMLMatcher{XMLToMatch: nil}).Match(sample_01)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- Expect(err.Error()).Should(ContainSubstring("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n <nil>: nil"))
- })
- })
-
- Context("when the actual is neither a string nor a stringer nor a byte array", func() {
- It("should error", func() {
- success, err := (&MatchXMLMatcher{XMLToMatch: sample_01}).Match(2)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- Expect(err.Error()).Should(ContainSubstring("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n <int>: 2"))
-
- success, err = (&MatchXMLMatcher{XMLToMatch: sample_01}).Match(nil)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- Expect(err.Error()).Should(ContainSubstring("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n <nil>: nil"))
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher_test.go
deleted file mode 100644
index 1b0044fd0..000000000
--- a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher_test.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("MatchYAMLMatcher", func() {
- Context("When passed stringifiables", func() {
- It("should succeed if the YAML matches", func() {
- Expect("---").Should(MatchYAML(""))
- Expect("a: 1").Should(MatchYAML(`{"a":1}`))
- Expect("a: 1\nb: 2").Should(MatchYAML(`{"b":2, "a":1}`))
- })
-
- It("should explain if the YAML does not match when it should", func() {
- message := (&MatchYAMLMatcher{YAMLToMatch: "a: 1"}).FailureMessage("b: 2")
- Expect(message).To(MatchRegexp(`Expected\s+<string>: b: 2\s+to match YAML of\s+<string>: a: 1`))
- })
-
- It("should normalise the expected and actual when explaining if the YAML does not match when it should", func() {
- message := (&MatchYAMLMatcher{YAMLToMatch: "a: 'one'"}).FailureMessage("{b: two}")
- Expect(message).To(MatchRegexp(`Expected\s+<string>: b: two\s+to match YAML of\s+<string>: a: one`))
- })
-
- It("should explain if the YAML matches when it should not", func() {
- message := (&MatchYAMLMatcher{YAMLToMatch: "a: 1"}).NegatedFailureMessage("a: 1")
- Expect(message).To(MatchRegexp(`Expected\s+<string>: a: 1\s+not to match YAML of\s+<string>: a: 1`))
- })
-
- It("should normalise the expected and actual when explaining if the YAML matches when it should not", func() {
- message := (&MatchYAMLMatcher{YAMLToMatch: "a: 'one'"}).NegatedFailureMessage("{a: one}")
- Expect(message).To(MatchRegexp(`Expected\s+<string>: a: one\s+not to match YAML of\s+<string>: a: one`))
- })
-
- It("should fail if the YAML does not match", func() {
- Expect("a: 1").ShouldNot(MatchYAML(`{"b":2, "a":1}`))
- })
-
- It("should work with byte arrays", func() {
- Expect([]byte("a: 1")).Should(MatchYAML([]byte("a: 1")))
- Expect("a: 1").Should(MatchYAML([]byte("a: 1")))
- Expect([]byte("a: 1")).Should(MatchYAML("a: 1"))
- })
- })
-
- Context("when the expected is not valid YAML", func() {
- It("should error and explain why", func() {
- success, err := (&MatchYAMLMatcher{YAMLToMatch: ""}).Match("good:\nbad")
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- Expect(err.Error()).Should(ContainSubstring("Actual 'good:\nbad' should be valid YAML"))
- })
- })
-
- Context("when the actual is not valid YAML", func() {
- It("should error and explain why", func() {
- success, err := (&MatchYAMLMatcher{YAMLToMatch: "good:\nbad"}).Match("")
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- Expect(err.Error()).Should(ContainSubstring("Expected 'good:\nbad' should be valid YAML"))
- })
-
- It("errors when passed directly to Message", func() {
- Expect(func() {
- matcher := MatchYAMLMatcher{YAMLToMatch: "good"}
- matcher.FailureMessage("good:\nbad")
- }).To(Panic())
- })
- })
-
- Context("when the expected is neither a string nor a stringer nor a byte array", func() {
- It("should error", func() {
- success, err := (&MatchYAMLMatcher{YAMLToMatch: 2}).Match("")
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- Expect(err.Error()).Should(ContainSubstring("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n <int>: 2"))
-
- success, err = (&MatchYAMLMatcher{YAMLToMatch: nil}).Match("")
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- Expect(err.Error()).Should(ContainSubstring("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n <nil>: nil"))
- })
- })
-
- Context("when the actual is neither a string nor a stringer nor a byte array", func() {
- It("should error", func() {
- success, err := (&MatchYAMLMatcher{YAMLToMatch: ""}).Match(2)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- Expect(err.Error()).Should(ContainSubstring("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n <int>: 2"))
-
- success, err = (&MatchYAMLMatcher{YAMLToMatch: ""}).Match(nil)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- Expect(err.Error()).Should(ContainSubstring("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n <nil>: nil"))
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/matcher_tests_suite_test.go b/vendor/github.com/onsi/gomega/matchers/matcher_tests_suite_test.go
deleted file mode 100644
index b5f76c995..000000000
--- a/vendor/github.com/onsi/gomega/matchers/matcher_tests_suite_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package matchers_test
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "testing"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-type myStringer struct {
- a string
-}
-
-func (s *myStringer) String() string {
- return s.a
-}
-
-type StringAlias string
-
-type myCustomType struct {
- s string
- n int
- f float32
- arr []string
-}
-
-func Test(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Gomega Matchers")
-}
-
-func readFileContents(filePath string) []byte {
- f := openFile(filePath)
- b, err := ioutil.ReadAll(f)
- if err != nil {
- panic(fmt.Errorf("failed to read file contents: %v", err))
- }
- return b
-}
-
-func openFile(filePath string) *os.File {
- f, err := os.Open(filePath)
- if err != nil {
- panic(fmt.Errorf("failed to open file: %v", err))
- }
- return f
-}
diff --git a/vendor/github.com/onsi/gomega/matchers/not_test.go b/vendor/github.com/onsi/gomega/matchers/not_test.go
deleted file mode 100644
index 06d3ebd17..000000000
--- a/vendor/github.com/onsi/gomega/matchers/not_test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("NotMatcher", func() {
- Context("basic examples", func() {
- It("works", func() {
- Expect(input).To(Not(false1))
- Expect(input).To(Not(Not(true2)))
- Expect(input).ToNot(Not(true3))
- Expect(input).ToNot(Not(Not(false1)))
- Expect(input).To(Not(Not(Not(false2))))
- })
-
- It("fails on error", func() {
- failuresMessages := InterceptGomegaFailures(func() {
- Expect(input).To(Not(Panic()))
- })
- Expect(failuresMessages).To(Equal([]string{"PanicMatcher expects a function. Got:\n <string>: hi"}))
- })
- })
-
- Context("De Morgan's laws", func() {
- It("~(A && B) == ~A || ~B", func() {
- Expect(input).To(Not(And(false1, false2)))
- Expect(input).To(Or(Not(false1), Not(false2)))
- })
- It("~(A || B) == ~A && ~B", func() {
- Expect(input).To(Not(Or(false1, false2)))
- Expect(input).To(And(Not(false1), Not(false2)))
- })
- })
-
- Context("failure messages are opposite of original matchers' failure messages", func() {
- Context("when match fails", func() {
- It("gives a descriptive message", func() {
- verifyFailureMessage(Not(HaveLen(2)), input, "not to have length 2")
- })
- })
-
- Context("when match succeeds, but expected it to fail", func() {
- It("gives a descriptive message", func() {
- verifyFailureMessage(Not(Not(HaveLen(3))), input, "to have length 3")
- })
- })
- })
-
- Context("MatchMayChangeInTheFuture()", func() {
- It("Propagates value from wrapped matcher", func() {
- m := Not(Or()) // an empty Or() always returns false, and indicates it cannot change
- Expect(m.Match("anything")).To(BeTrue())
- Expect(m.(*NotMatcher).MatchMayChangeInTheFuture("anything")).To(BeFalse())
- })
- It("Defaults to true", func() {
- m := Not(Equal(1)) // Equal does not have this method
- Expect(m.Match(2)).To(BeTrue())
- Expect(m.(*NotMatcher).MatchMayChangeInTheFuture(2)).To(BeTrue()) // defaults to true
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/or_test.go b/vendor/github.com/onsi/gomega/matchers/or_test.go
deleted file mode 100644
index 1f6dfaf61..000000000
--- a/vendor/github.com/onsi/gomega/matchers/or_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("OrMatcher", func() {
- It("works with positive cases", func() {
- Expect(input).To(Or(true1))
- Expect(input).To(Or(true1, true2))
- Expect(input).To(Or(true1, false1))
- Expect(input).To(Or(false1, true2))
- Expect(input).To(Or(true1, true2, true3))
- Expect(input).To(Or(true1, true2, false3))
- Expect(input).To(Or(true1, false2, true3))
- Expect(input).To(Or(false1, true2, true3))
- Expect(input).To(Or(true1, false2, false3))
- Expect(input).To(Or(false1, false2, true3))
-
- // use alias
- Expect(input).To(SatisfyAny(false1, false2, true3))
- })
-
- It("stops on errors", func() {
- failuresMessages := InterceptGomegaFailures(func() {
- Expect(input).To(Or(Panic(), true1))
- })
- Expect(failuresMessages).To(Equal([]string{"PanicMatcher expects a function. Got:\n <string>: hi"}))
- })
-
- It("works with negative cases", func() {
- Expect(input).ToNot(Or())
- Expect(input).ToNot(Or(false1))
- Expect(input).ToNot(Or(false1, false2))
- Expect(input).ToNot(Or(false1, false2, false3))
- })
-
- Context("failure messages", func() {
- Context("when match fails", func() {
- It("gives a descriptive message", func() {
- verifyFailureMessage(Or(false1, false2), input,
- "To satisfy at least one of these matchers: [%!s(*matchers.HaveLenMatcher=&{1}) %!s(*matchers.EqualMatcher=&{hip})]")
- })
- })
-
- Context("when match succeeds, but expected it to fail", func() {
- It("gives a descriptive message", func() {
- verifyFailureMessage(Not(Or(true1, true2)), input, `not to have length 2`)
- })
- })
- })
-
- Context("MatchMayChangeInTheFuture", func() {
- Context("Match returned false", func() {
- It("returns true if any of the matchers could change", func() {
- // 3 matchers, all return false, and all could change
- m := Or(BeNil(), Equal("hip"), HaveLen(1))
- Expect(m.Match("hi")).To(BeFalse())
- Expect(m.(*OrMatcher).MatchMayChangeInTheFuture("hi")).To(BeTrue()) // all 3 of these matchers default to 'true'
- })
- It("returns false if none of the matchers could change", func() {
- // empty Or() has the property of never matching, and never can change since there are no sub-matchers that could change
- m := Or()
- Expect(m.Match("anything")).To(BeFalse())
- Expect(m.(*OrMatcher).MatchMayChangeInTheFuture("anything")).To(BeFalse())
-
- // Or() with 3 sub-matchers that return false, and can't change
- m = Or(Or(), Or(), Or())
- Expect(m.Match("hi")).To(BeFalse())
- Expect(m.(*OrMatcher).MatchMayChangeInTheFuture("hi")).To(BeFalse()) // the 3 empty Or()'s won't change
- })
- })
- Context("Match returned true", func() {
- Context("returns value of the successful matcher", func() {
- It("false if successful matcher not going to change", func() {
- // 3 matchers: 1st returns false, 2nd returns true and is not going to change, 3rd is never called
- m := Or(BeNil(), And(), Equal(1))
- Expect(m.Match("hi")).To(BeTrue())
- Expect(m.(*OrMatcher).MatchMayChangeInTheFuture("hi")).To(BeFalse())
- })
- It("true if successful matcher indicates it might change", func() {
- // 3 matchers: 1st returns false, 2nd returns true and "might" change, 3rd is never called
- m := Or(Not(BeNil()), Equal("hi"), Equal(1))
- Expect(m.Match("hi")).To(BeTrue())
- Expect(m.(*OrMatcher).MatchMayChangeInTheFuture("hi")).To(BeTrue()) // Equal("hi") indicates it might change
- })
- })
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/panic_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/panic_matcher_test.go
deleted file mode 100644
index 326bb10a4..000000000
--- a/vendor/github.com/onsi/gomega/matchers/panic_matcher_test.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package matchers_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("Panic", func() {
- Context("when passed something that's not a function that takes zero arguments and returns nothing", func() {
- It("should error", func() {
- success, err := (&PanicMatcher{}).Match("foo")
- Expect(success).To(BeFalse())
- Expect(err).To(HaveOccurred())
-
- success, err = (&PanicMatcher{}).Match(nil)
- Expect(success).To(BeFalse())
- Expect(err).To(HaveOccurred())
-
- success, err = (&PanicMatcher{}).Match(func(foo string) {})
- Expect(success).To(BeFalse())
- Expect(err).To(HaveOccurred())
-
- success, err = (&PanicMatcher{}).Match(func() string { return "bar" })
- Expect(success).To(BeFalse())
- Expect(err).To(HaveOccurred())
- })
- })
-
- Context("when passed a function of the correct type", func() {
- It("should call the function and pass if the function panics", func() {
- Expect(func() { panic("ack!") }).To(Panic())
- Expect(func() {}).NotTo(Panic())
- })
- })
-
- Context("when assertion fails", func() {
- It("prints the object passed to Panic when negative", func() {
- failuresMessages := InterceptGomegaFailures(func() {
- Expect(func() { panic("ack!") }).NotTo(Panic())
- })
- Expect(failuresMessages).To(ConsistOf(ContainSubstring("not to panic, but panicked with\n <string>: ack!")))
- })
-
- It("prints simple message when positive", func() {
- failuresMessages := InterceptGomegaFailures(func() {
- Expect(func() {}).To(Panic())
- })
- Expect(failuresMessages).To(ConsistOf(MatchRegexp("Expected\n\\s+<func\\(\\)>: .+\nto panic")))
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher_test.go
deleted file mode 100644
index cf04e85dd..000000000
--- a/vendor/github.com/onsi/gomega/matchers/receive_matcher_test.go
+++ /dev/null
@@ -1,304 +0,0 @@
-package matchers_test
-
-import (
- "time"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-type kungFuActor interface {
- DrunkenMaster() bool
-}
-
-type jackie struct {
- name string
-}
-
-func (j *jackie) DrunkenMaster() bool {
- return true
-}
-
-type someError struct{ s string }
-
-func (e *someError) Error() string { return e.s }
-
-var _ = Describe("ReceiveMatcher", func() {
- Context("with no argument", func() {
- Context("for a buffered channel", func() {
- It("should succeed", func() {
- channel := make(chan bool, 1)
-
- Expect(channel).ShouldNot(Receive())
-
- channel <- true
-
- Expect(channel).Should(Receive())
- })
- })
-
- Context("for an unbuffered channel", func() {
- It("should succeed (eventually)", func() {
- channel := make(chan bool)
-
- Expect(channel).ShouldNot(Receive())
-
- go func() {
- time.Sleep(10 * time.Millisecond)
- channel <- true
- }()
-
- Eventually(channel).Should(Receive())
- })
- })
- })
-
- Context("with a pointer argument", func() {
- Context("of the correct type", func() {
- Context("when the channel has an interface type", func() {
- It("should write the value received on the channel to the pointer", func() {
- channel := make(chan error, 1)
-
- var value *someError
-
- Ω(channel).ShouldNot(Receive(&value))
- Ω(value).Should(BeZero())
-
- channel <- &someError{"boooom!"}
-
- Ω(channel).Should(Receive(&value))
- Ω(value).Should(MatchError("boooom!"))
- })
- })
- })
-
- Context("of the correct type", func() {
- It("should write the value received on the channel to the pointer", func() {
- channel := make(chan int, 1)
-
- var value int
-
- Expect(channel).ShouldNot(Receive(&value))
- Expect(value).Should(BeZero())
-
- channel <- 17
-
- Expect(channel).Should(Receive(&value))
- Expect(value).Should(Equal(17))
- })
- })
-
- Context("to various types of objects", func() {
- It("should work", func() {
- //channels of strings
- stringChan := make(chan string, 1)
- stringChan <- "foo"
-
- var s string
- Expect(stringChan).Should(Receive(&s))
- Expect(s).Should(Equal("foo"))
-
- //channels of slices
- sliceChan := make(chan []bool, 1)
- sliceChan <- []bool{true, true, false}
-
- var sl []bool
- Expect(sliceChan).Should(Receive(&sl))
- Expect(sl).Should(Equal([]bool{true, true, false}))
-
- //channels of channels
- chanChan := make(chan chan bool, 1)
- c := make(chan bool)
- chanChan <- c
-
- var receivedC chan bool
- Expect(chanChan).Should(Receive(&receivedC))
- Expect(receivedC).Should(Equal(c))
-
- //channels of interfaces
- jackieChan := make(chan kungFuActor, 1)
- aJackie := &jackie{name: "Jackie Chan"}
- jackieChan <- aJackie
-
- var theJackie kungFuActor
- Expect(jackieChan).Should(Receive(&theJackie))
- Expect(theJackie).Should(Equal(aJackie))
- })
- })
-
- Context("of the wrong type", func() {
- It("should error", func() {
- channel := make(chan int, 1)
- channel <- 10
-
- var incorrectType bool
-
- success, err := (&ReceiveMatcher{Arg: &incorrectType}).Match(channel)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- var notAPointer int
- success, err = (&ReceiveMatcher{Arg: notAPointer}).Match(channel)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
- })
-
- Context("with a matcher", func() {
- It("should defer to the underlying matcher", func() {
- intChannel := make(chan int, 1)
- intChannel <- 3
- Expect(intChannel).Should(Receive(Equal(3)))
-
- intChannel <- 2
- Expect(intChannel).ShouldNot(Receive(Equal(3)))
-
- stringChannel := make(chan []string, 1)
- stringChannel <- []string{"foo", "bar", "baz"}
- Expect(stringChannel).Should(Receive(ContainElement(ContainSubstring("fo"))))
-
- stringChannel <- []string{"foo", "bar", "baz"}
- Expect(stringChannel).ShouldNot(Receive(ContainElement(ContainSubstring("archipelago"))))
- })
-
- It("should defer to the underlying matcher for the message", func() {
- matcher := Receive(Equal(3))
- channel := make(chan int, 1)
- channel <- 2
- matcher.Match(channel)
- Expect(matcher.FailureMessage(channel)).Should(MatchRegexp(`Expected\s+<int>: 2\s+to equal\s+<int>: 3`))
-
- channel <- 3
- matcher.Match(channel)
- Expect(matcher.NegatedFailureMessage(channel)).Should(MatchRegexp(`Expected\s+<int>: 3\s+not to equal\s+<int>: 3`))
- })
-
- It("should work just fine with Eventually", func() {
- stringChannel := make(chan string)
-
- go func() {
- time.Sleep(5 * time.Millisecond)
- stringChannel <- "A"
- time.Sleep(5 * time.Millisecond)
- stringChannel <- "B"
- }()
-
- Eventually(stringChannel).Should(Receive(Equal("B")))
- })
-
- Context("if the matcher errors", func() {
- It("should error", func() {
- channel := make(chan int, 1)
- channel <- 3
- success, err := (&ReceiveMatcher{Arg: ContainSubstring("three")}).Match(channel)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-
- Context("if nothing is received", func() {
- It("should fail", func() {
- channel := make(chan int, 1)
- success, err := (&ReceiveMatcher{Arg: Equal(1)}).Match(channel)
- Expect(success).Should(BeFalse())
- Expect(err).ShouldNot(HaveOccurred())
- })
- })
- })
-
- Context("When actual is a *closed* channel", func() {
- Context("for a buffered channel", func() {
- It("should work until it hits the end of the buffer", func() {
- channel := make(chan bool, 1)
- channel <- true
-
- close(channel)
-
- Expect(channel).Should(Receive())
- Expect(channel).ShouldNot(Receive())
- })
- })
-
- Context("for an unbuffered channel", func() {
- It("should always fail", func() {
- channel := make(chan bool)
- close(channel)
-
- Expect(channel).ShouldNot(Receive())
- })
- })
- })
-
- Context("When actual is a send-only channel", func() {
- It("should error", func() {
- channel := make(chan bool)
-
- var writerChannel chan<- bool
- writerChannel = channel
-
- success, err := (&ReceiveMatcher{}).Match(writerChannel)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-
- Context("when acutal is a non-channel", func() {
- It("should error", func() {
- var nilChannel chan bool
-
- success, err := (&ReceiveMatcher{}).Match(nilChannel)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&ReceiveMatcher{}).Match(nil)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
-
- success, err = (&ReceiveMatcher{}).Match(3)
- Expect(success).Should(BeFalse())
- Expect(err).Should(HaveOccurred())
- })
- })
-
- Describe("when used with eventually and a custom matcher", func() {
- It("should return the matcher's error when a failing value is received on the channel, instead of the must receive something failure", func() {
- failures := InterceptGomegaFailures(func() {
- c := make(chan string, 0)
- Eventually(c, 0.01).Should(Receive(Equal("hello")))
- })
- Expect(failures[0]).Should(ContainSubstring("When passed a matcher, ReceiveMatcher's channel *must* receive something."))
-
- failures = InterceptGomegaFailures(func() {
- c := make(chan string, 1)
- c <- "hi"
- Eventually(c, 0.01).Should(Receive(Equal("hello")))
- })
- Expect(failures[0]).Should(ContainSubstring("<string>: hello"))
- })
- })
-
- Describe("Bailing early", func() {
- It("should bail early when passed a closed channel", func() {
- c := make(chan bool)
- close(c)
-
- t := time.Now()
- failures := InterceptGomegaFailures(func() {
- Eventually(c).Should(Receive())
- })
- Expect(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond))
- Expect(failures).Should(HaveLen(1))
- })
-
- It("should bail early when passed a non-channel", func() {
- t := time.Now()
- failures := InterceptGomegaFailures(func() {
- Eventually(3).Should(Receive())
- })
- Expect(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond))
- Expect(failures).Should(HaveLen(1))
- })
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher_test.go
deleted file mode 100644
index e42dd8a6e..000000000
--- a/vendor/github.com/onsi/gomega/matchers/succeed_matcher_test.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package matchers_test
-
-import (
- "errors"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
- "regexp"
-)
-
-func Erroring() error {
- return errors.New("bam")
-}
-
-func NotErroring() error {
- return nil
-}
-
-type AnyType struct{}
-
-func Invalid() *AnyType {
- return nil
-}
-
-var _ = Describe("Succeed", func() {
- It("should succeed if the function succeeds", func() {
- Expect(NotErroring()).Should(Succeed())
- })
-
- It("should succeed (in the negated) if the function errored", func() {
- Expect(Erroring()).ShouldNot(Succeed())
- })
-
- It("should not if passed a non-error", func() {
- success, err := (&SucceedMatcher{}).Match(Invalid())
- Expect(success).Should(BeFalse())
- Expect(err).Should(MatchError("Expected an error-type. Got:\n <*matchers_test.AnyType | 0x0>: nil"))
- })
-
- It("doesn't support non-error type", func() {
- success, err := (&SucceedMatcher{}).Match(AnyType{})
- Expect(success).Should(BeFalse())
- Expect(err).Should(MatchError("Expected an error-type. Got:\n <matchers_test.AnyType>: {}"))
- })
-
- It("doesn't support non-error pointer type", func() {
- success, err := (&SucceedMatcher{}).Match(&AnyType{})
- Expect(success).Should(BeFalse())
- Expect(err).Should(MatchError(MatchRegexp(`Expected an error-type. Got:\n <*matchers_test.AnyType | 0x[[:xdigit:]]+>: {}`)))
- })
-
- It("should not succeed with pointer types that conform to error interface", func() {
- err := &CustomErr{"ohai"}
- Expect(err).ShouldNot(Succeed())
- })
-
- It("should succeed with nil pointers to types that conform to error interface", func() {
- var err *CustomErr = nil
- Expect(err).Should(Succeed())
- })
-
- It("builds failure message", func() {
- actual := Succeed().FailureMessage(errors.New("oops"))
- actual = regexp.MustCompile(" 0x.*>").ReplaceAllString(actual, " 0x00000000>")
- Expect(actual).To(Equal("Expected success, but got an error:\n <*errors.errorString | 0x00000000>: {s: \"oops\"}\n oops"))
- })
-
- It("builds negated failure message", func() {
- actual := Succeed().NegatedFailureMessage(123)
- Expect(actual).To(Equal("Expected failure, but got no error."))
- })
-})
diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_01.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_01.xml
deleted file mode 100644
index 90f0a1b45..000000000
--- a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_01.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-<note>
- <to>Tove</to>
- <from>Jani</from>
- <heading>Reminder</heading>
- <body>Don't forget me this weekend!</body>
-</note> \ No newline at end of file
diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_02.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_02.xml
deleted file mode 100644
index 3863b83c3..000000000
--- a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_02.xml
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
-<note>
- <to>Tove</to>
- <from>Jani</from>
- <heading>Reminder</heading>
- <body>Don't forget me this weekend!</body>
-</note>
-
diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_03.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_03.xml
deleted file mode 100644
index a491c213c..000000000
--- a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_03.xml
+++ /dev/null
@@ -1 +0,0 @@
-<note> <to>Tove</to> <from>Jani</from> <heading>Reminder</heading> <body>Don't forget me this weekend!</body> </note>
diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_04.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_04.xml
deleted file mode 100644
index dcfd3db03..000000000
--- a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_04.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-<note>
- <to>Tove</to>
- <from>John</from>
- <heading>Doe</heading>
- <body>Don't forget me this weekend!</body>
-</note> \ No newline at end of file
diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_05.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_05.xml
deleted file mode 100644
index de15a6a55..000000000
--- a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_05.xml
+++ /dev/null
@@ -1,211 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<CATALOG>
- <CD>
- <TITLE>Empire Burlesque</TITLE>
- <ARTIST>Bob Dylan</ARTIST>
- <COUNTRY>USA</COUNTRY>
- <COMPANY>Columbia</COMPANY>
- <PRICE>10.90</PRICE>
- <YEAR>1985</YEAR>
- </CD>
- <CD>
- <TITLE>Hide your heart</TITLE>
- <ARTIST>Bonnie Tyler</ARTIST>
- <COUNTRY>UK</COUNTRY>
- <COMPANY>CBS Records</COMPANY>
- <PRICE>9.90</PRICE>
- <YEAR>1988</YEAR>
- </CD>
- <CD>
- <TITLE>Greatest Hits</TITLE>
- <ARTIST>Dolly Parton</ARTIST>
- <COUNTRY>USA</COUNTRY>
- <COMPANY>RCA</COMPANY>
- <PRICE>9.90</PRICE>
- <YEAR>1982</YEAR>
- </CD>
- <CD>
- <TITLE>Still got the blues</TITLE>
- <ARTIST>Gary Moore</ARTIST>
- <COUNTRY>UK</COUNTRY>
- <COMPANY>Virgin records</COMPANY>
- <PRICE>10.20</PRICE>
- <YEAR>1990</YEAR>
- </CD>
- <CD>
- <TITLE>Eros</TITLE>
- <ARTIST>Eros Ramazzotti</ARTIST>
- <COUNTRY>EU</COUNTRY>
- <COMPANY>BMG</COMPANY>
- <PRICE>9.90</PRICE>
- <YEAR>1997</YEAR>
- </CD>
- <CD>
- <TITLE>One night only</TITLE>
- <ARTIST>Bee Gees</ARTIST>
- <COUNTRY>UK</COUNTRY>
- <COMPANY>Polydor</COMPANY>
- <PRICE>10.90</PRICE>
- <YEAR>1998</YEAR>
- </CD>
- <CD>
- <TITLE>Sylvias Mother</TITLE>
- <ARTIST>Dr.Hook</ARTIST>
- <COUNTRY>UK</COUNTRY>
- <COMPANY>CBS</COMPANY>
- <PRICE>8.10</PRICE>
- <YEAR>1973</YEAR>
- </CD>
- <CD>
- <TITLE>Maggie May</TITLE>
- <ARTIST>Rod Stewart</ARTIST>
- <COUNTRY>UK</COUNTRY>
- <COMPANY>Pickwick</COMPANY>
- <PRICE>8.50</PRICE>
- <YEAR>1990</YEAR>
- </CD>
- <CD>
- <TITLE>Romanza</TITLE>
- <ARTIST>Andrea Bocelli</ARTIST>
- <COUNTRY>EU</COUNTRY>
- <COMPANY>Polydor</COMPANY>
- <PRICE>10.80</PRICE>
- <YEAR>1996</YEAR>
- </CD>
- <CD>
- <TITLE>When a man loves a woman</TITLE>
- <ARTIST>Percy Sledge</ARTIST>
- <COUNTRY>USA</COUNTRY>
- <COMPANY>Atlantic</COMPANY>
- <PRICE>8.70</PRICE>
- <YEAR>1987</YEAR>
- </CD>
- <CD>
- <TITLE>Black angel</TITLE>
- <ARTIST>Savage Rose</ARTIST>
- <COUNTRY>EU</COUNTRY>
- <COMPANY>Mega</COMPANY>
- <PRICE>10.90</PRICE>
- <YEAR>1995</YEAR>
- </CD>
- <CD>
- <TITLE>1999 Grammy Nominees</TITLE>
- <ARTIST>Many</ARTIST>
- <COUNTRY>USA</COUNTRY>
- <COMPANY>Grammy</COMPANY>
- <PRICE>10.20</PRICE>
- <YEAR>1999</YEAR>
- </CD>
- <CD>
- <TITLE>For the good times</TITLE>
- <ARTIST>Kenny Rogers</ARTIST>
- <COUNTRY>UK</COUNTRY>
- <COMPANY>Mucik Master</COMPANY>
- <PRICE>8.70</PRICE>
- <YEAR>1995</YEAR>
- </CD>
- <CD>
- <TITLE>Big Willie style</TITLE>
- <ARTIST>Will Smith</ARTIST>
- <COUNTRY>USA</COUNTRY>
- <COMPANY>Columbia</COMPANY>
- <PRICE>9.90</PRICE>
- <YEAR>1997</YEAR>
- </CD>
- <CD>
- <TITLE>Tupelo Honey</TITLE>
- <ARTIST>Van Morrison</ARTIST>
- <COUNTRY>UK</COUNTRY>
- <COMPANY>Polydor</COMPANY>
- <PRICE>8.20</PRICE>
- <YEAR>1971</YEAR>
- </CD>
- <CD>
- <TITLE>Soulsville</TITLE>
- <ARTIST>Jorn Hoel</ARTIST>
- <COUNTRY>Norway</COUNTRY>
- <COMPANY>WEA</COMPANY>
- <PRICE>7.90</PRICE>
- <YEAR>1996</YEAR>
- </CD>
- <CD>
- <TITLE>The very best of</TITLE>
- <ARTIST>Cat Stevens</ARTIST>
- <COUNTRY>UK</COUNTRY>
- <COMPANY>Island</COMPANY>
- <PRICE>8.90</PRICE>
- <YEAR>1990</YEAR>
- </CD>
- <CD>
- <TITLE>Stop</TITLE>
- <ARTIST>Sam Brown</ARTIST>
- <COUNTRY>UK</COUNTRY>
- <COMPANY>A and M</COMPANY>
- <PRICE>8.90</PRICE>
- <YEAR>1988</YEAR>
- </CD>
- <CD>
- <TITLE>Bridge of Spies</TITLE>
- <ARTIST>T'Pau</ARTIST>
- <COUNTRY>UK</COUNTRY>
- <COMPANY>Siren</COMPANY>
- <PRICE>7.90</PRICE>
- <YEAR>1987</YEAR>
- </CD>
- <CD>
- <TITLE>Private Dancer</TITLE>
- <ARTIST>Tina Turner</ARTIST>
- <COUNTRY>UK</COUNTRY>
- <COMPANY>Capitol</COMPANY>
- <PRICE>8.90</PRICE>
- <YEAR>1983</YEAR>
- </CD>
- <CD>
- <TITLE>Midt om natten</TITLE>
- <ARTIST>Kim Larsen</ARTIST>
- <COUNTRY>EU</COUNTRY>
- <COMPANY>Medley</COMPANY>
- <PRICE>7.80</PRICE>
- <YEAR>1983</YEAR>
- </CD>
- <CD>
- <TITLE>Pavarotti Gala Concert</TITLE>
- <ARTIST>Luciano Pavarotti</ARTIST>
- <COUNTRY>UK</COUNTRY>
- <COMPANY>DECCA</COMPANY>
- <PRICE>9.90</PRICE>
- <YEAR>1991</YEAR>
- </CD>
- <CD>
- <TITLE>The dock of the bay</TITLE>
- <ARTIST>Otis Redding</ARTIST>
- <COUNTRY>USA</COUNTRY>
- <COMPANY>Stax Records</COMPANY>
- <PRICE>7.90</PRICE>
- <YEAR>1968</YEAR>
- </CD>
- <CD>
- <TITLE>Picture book</TITLE>
- <ARTIST>Simply Red</ARTIST>
- <COUNTRY>EU</COUNTRY>
- <COMPANY>Elektra</COMPANY>
- <PRICE>7.20</PRICE>
- <YEAR>1985</YEAR>
- </CD>
- <CD>
- <TITLE>Red</TITLE>
- <ARTIST>The Communards</ARTIST>
- <COUNTRY>UK</COUNTRY>
- <COMPANY>London</COMPANY>
- <PRICE>7.80</PRICE>
- <YEAR>1987</YEAR>
- </CD>
- <CD>
- <TITLE>Unchain my heart</TITLE>
- <ARTIST>Joe Cocker</ARTIST>
- <COUNTRY>USA</COUNTRY>
- <COMPANY>EMI</COMPANY>
- <PRICE>8.20</PRICE>
- <YEAR>1987</YEAR>
- </CD>
-</CATALOG>
diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_06.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_06.xml
deleted file mode 100644
index 4ba90fb97..000000000
--- a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_06.xml
+++ /dev/null
@@ -1,13 +0,0 @@
-<root>
- <table>
- <tr>
- <td>Apples</td>
- <td>Bananas</td>
- </tr>
- </table>
- <table>
- <name>African Coffee Table</name>
- <width>80</width>
- <length>120</length>
- </table>
-</root> \ No newline at end of file
diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_07.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_07.xml
deleted file mode 100644
index 34b9e9775..000000000
--- a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_07.xml
+++ /dev/null
@@ -1,13 +0,0 @@
-<root>
- <h:table xmlns:h="http://www.w3.org/TR/html4/">
- <h:tr>
- <h:td>Apples</h:td>
- <h:td>Bananas</h:td>
- </h:tr>
- </h:table>
- <f:table xmlns:f="https://www.w3schools.com/furniture">
- <f:name>African Coffee Table</f:name>
- <f:width>80</f:width>
- <f:length>120</f:length>
- </f:table>
-</root> \ No newline at end of file
diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_08.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_08.xml
deleted file mode 100644
index ccaee4e1a..000000000
--- a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_08.xml
+++ /dev/null
@@ -1,13 +0,0 @@
-<root>
- <h:table xmlns:h="http://www.w3.org/TR/html4/">
- <h:tr>
- <h:td>Apples</h:td>
- <h:td>Oranges</h:td>
- </h:tr>
- </h:table>
- <f:table xmlns:f="https://www.w3schools.com/furniture">
- <f:name>African Coffee Table</f:name>
- <f:width>80</f:width>
- <f:length>120</f:length>
- </f:table>
-</root> \ No newline at end of file
diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_09.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_09.xml
deleted file mode 100644
index 531f84d3f..000000000
--- a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_09.xml
+++ /dev/null
@@ -1,4 +0,0 @@
-<person gender="female">
- <firstname>Foo</firstname>
- <lastname>Bar</lastname>
-</person> \ No newline at end of file
diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_10.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_10.xml
deleted file mode 100644
index b1e1e1fbe..000000000
--- a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_10.xml
+++ /dev/null
@@ -1,4 +0,0 @@
-<person gender="male">
- <firstname>Foo</firstname>
- <lastname>Bar</lastname>
-</person> \ No newline at end of file
diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_11.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_11.xml
deleted file mode 100644
index 3132b0f90..000000000
--- a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_11.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-<?xml version="1.0" encoding="ISO-8859-1" standalone="no"?>
-<note>
- <to>Tove</to>
- <from>Jani</from>
- <heading>Reminder</heading>
- <body>Don't forget me this weekend!</body>
-</note>
diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform_test.go b/vendor/github.com/onsi/gomega/matchers/with_transform_test.go
deleted file mode 100644
index e52bf8e63..000000000
--- a/vendor/github.com/onsi/gomega/matchers/with_transform_test.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package matchers_test
-
-import (
- "errors"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- . "github.com/onsi/gomega/matchers"
-)
-
-var _ = Describe("WithTransformMatcher", func() {
-
- var plus1 = func(i int) int { return i + 1 }
-
- Context("Panic if transform function invalid", func() {
- panicsWithTransformer := func(transform interface{}) {
- ExpectWithOffset(1, func() { WithTransform(transform, nil) }).To(Panic())
- }
- It("nil", func() {
- panicsWithTransformer(nil)
- })
- Context("Invalid number of args, but correct return value count", func() {
- It("zero", func() {
- panicsWithTransformer(func() int { return 5 })
- })
- It("two", func() {
- panicsWithTransformer(func(i, j int) int { return 5 })
- })
- })
- Context("Invalid number of return values, but correct number of arguments", func() {
- It("zero", func() {
- panicsWithTransformer(func(i int) {})
- })
- It("two", func() {
- panicsWithTransformer(func(i int) (int, int) { return 5, 6 })
- })
- })
- })
-
- It("works with positive cases", func() {
- Expect(1).To(WithTransform(plus1, Equal(2)))
- Expect(1).To(WithTransform(plus1, WithTransform(plus1, Equal(3))))
- Expect(1).To(WithTransform(plus1, And(Equal(2), BeNumerically(">", 1))))
-
- // transform expects custom type
- type S struct {
- A int
- B string
- }
- transformer := func(s S) string { return s.B }
- Expect(S{1, "hi"}).To(WithTransform(transformer, Equal("hi")))
-
- // transform expects interface
- errString := func(e error) string { return e.Error() }
- Expect(errors.New("abc")).To(WithTransform(errString, Equal("abc")))
- })
-
- It("works with negative cases", func() {
- Expect(1).ToNot(WithTransform(plus1, Equal(3)))
- Expect(1).ToNot(WithTransform(plus1, WithTransform(plus1, Equal(2))))
- })
-
- Context("failure messages", func() {
- Context("when match fails", func() {
- It("gives a descriptive message", func() {
- m := WithTransform(plus1, Equal(3))
- Expect(m.Match(1)).To(BeFalse())
- Expect(m.FailureMessage(1)).To(Equal("Expected\n <int>: 2\nto equal\n <int>: 3"))
- })
- })
-
- Context("when match succeeds, but expected it to fail", func() {
- It("gives a descriptive message", func() {
- m := Not(WithTransform(plus1, Equal(3)))
- Expect(m.Match(2)).To(BeFalse())
- Expect(m.FailureMessage(2)).To(Equal("Expected\n <int>: 3\nnot to equal\n <int>: 3"))
- })
- })
-
- Context("actual value is incompatible with transform function's argument type", func() {
- It("gracefully fails if transform cannot be performed", func() {
- m := WithTransform(plus1, Equal(3))
- result, err := m.Match("hi") // give it a string but transform expects int; doesn't panic
- Expect(result).To(BeFalse())
- Expect(err).To(MatchError("Transform function expects 'int' but we have 'string'"))
- })
- })
- })
-
- Context("MatchMayChangeInTheFuture()", func() {
- It("Propagates value from wrapped matcher on the transformed value", func() {
- m := WithTransform(plus1, Or()) // empty Or() always returns false, and indicates it cannot change
- Expect(m.Match(1)).To(BeFalse())
- Expect(m.(*WithTransformMatcher).MatchMayChangeInTheFuture(1)).To(BeFalse()) // empty Or() indicates cannot change
- })
- It("Defaults to true", func() {
- m := WithTransform(plus1, Equal(2)) // Equal does not have this method
- Expect(m.Match(1)).To(BeTrue())
- Expect(m.(*WithTransformMatcher).MatchMayChangeInTheFuture(1)).To(BeTrue()) // defaults to true
- })
- })
-})
diff --git a/vendor/github.com/opencontainers/go-digest/.mailmap b/vendor/github.com/opencontainers/go-digest/.mailmap
new file mode 100644
index 000000000..ba611cb21
--- /dev/null
+++ b/vendor/github.com/opencontainers/go-digest/.mailmap
@@ -0,0 +1 @@
+Stephen J Day <stephen.day@docker.com> <stevvooe@users.noreply.github.com>
diff --git a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml
new file mode 100644
index 000000000..45fa4b9ec
--- /dev/null
+++ b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml
@@ -0,0 +1,12 @@
+approve_by_comment: true
+approve_regex: '^(Approved|lgtm|LGTM|:shipit:|:star:|:\+1:|:ship:)'
+reject_regex: ^Rejected
+reset_on_push: true
+author_approval: ignored
+signed_off_by:
+ required: true
+reviewers:
+ teams:
+ - go-digest-maintainers
+ name: default
+ required: 2
diff --git a/vendor/github.com/opencontainers/go-digest/.travis.yml b/vendor/github.com/opencontainers/go-digest/.travis.yml
new file mode 100644
index 000000000..7ea4ed1d2
--- /dev/null
+++ b/vendor/github.com/opencontainers/go-digest/.travis.yml
@@ -0,0 +1,4 @@
+language: go
+go:
+ - 1.7
+ - master
diff --git a/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md b/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md
new file mode 100644
index 000000000..e4d962ac1
--- /dev/null
+++ b/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md
@@ -0,0 +1,72 @@
+# Contributing to Docker open source projects
+
+Want to hack on this project? Awesome! Here are instructions to get you started.
+
+This project is a part of the [Docker](https://www.docker.com) project, and follows
+the same rules and principles. If you're already familiar with the way
+Docker does things, you'll feel right at home.
+
+Otherwise, go read Docker's
+[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
+[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
+[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
+[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
+
+For an in-depth description of our contribution process, visit the
+contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/)
+
+### Sign your work
+
+The sign-off is a simple line at the end of the explanation for the patch. Your
+signature certifies that you wrote the patch or otherwise have the right to pass
+it on as an open-source patch. The rules are pretty simple: if you can certify
+the below (from [developercertificate.org](http://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+1 Letterman Drive
+Suite D4700
+San Francisco, CA, 94129
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+Then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith <joe.smith@email.com>
+
+Use your real name (sorry, no pseudonyms or anonymous contributions.)
+
+If you set your `user.name` and `user.email` git configs, you can sign your
+commit automatically with `git commit -s`.
diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE b/vendor/github.com/opencontainers/go-digest/LICENSE.code
index 0ea3ff81e..0ea3ff81e 100644
--- a/vendor/github.com/opencontainers/go-digest/LICENSE
+++ b/vendor/github.com/opencontainers/go-digest/LICENSE.code
diff --git a/vendor/github.com/opencontainers/go-digest/MAINTAINERS b/vendor/github.com/opencontainers/go-digest/MAINTAINERS
new file mode 100644
index 000000000..42a29795d
--- /dev/null
+++ b/vendor/github.com/opencontainers/go-digest/MAINTAINERS
@@ -0,0 +1,9 @@
+Aaron Lehmann <aaron.lehmann@docker.com> (@aaronlehmann)
+Brandon Philips <brandon.philips@coreos.com> (@philips)
+Brendan Burns <bburns@microsoft.com> (@brendandburns)
+Derek McGowan <derek@mcgstyle.net> (@dmcgowan)
+Jason Bouzane <jbouzane@google.com> (@jbouzane)
+John Starks <jostarks@microsoft.com> (@jstarks)
+Jonathan Boulle <jon.boulle@coreos.com> (@jonboulle)
+Stephen Day <stephen.day@docker.com> (@stevvooe)
+Vincent Batts <vbatts@redhat.com> (@vbatts)
diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md
index 25aac3470..0f5a04092 100644
--- a/vendor/github.com/opencontainers/go-digest/README.md
+++ b/vendor/github.com/opencontainers/go-digest/README.md
@@ -101,4 +101,4 @@ the various OCI projects).
# Copyright and license
-Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/.
+Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE.code). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/.
diff --git a/vendor/github.com/opencontainers/image-spec/README.md b/vendor/github.com/opencontainers/image-spec/README.md
deleted file mode 100644
index 5ab5554e4..000000000
--- a/vendor/github.com/opencontainers/image-spec/README.md
+++ /dev/null
@@ -1,167 +0,0 @@
-# OCI Image Format Specification
-<div>
-<a href="https://travis-ci.org/opencontainers/image-spec">
-<img src="https://travis-ci.org/opencontainers/image-spec.svg?branch=master"></img>
-</a>
-</div>
-
-The OCI Image Format project creates and maintains the software shipping container image format spec (OCI Image Format).
-
-**[The specification can be found here](spec.md).**
-
-This repository also provides [Go types](specs-go), [intra-blob validation tooling, and JSON Schema](schema).
-The Go types and validation should be compatible with the current Go release; earlier Go releases are not supported.
-
-Additional documentation about how this group operates:
-
-- [Code of Conduct](https://github.com/opencontainers/tob/blob/d2f9d68c1332870e40693fe077d311e0742bc73d/code-of-conduct.md)
-- [Roadmap](#roadmap)
-- [Releases](RELEASES.md)
-- [Project Documentation](project.md)
-
-The _optional_ and _base_ layers of all OCI projects are tracked in the [OCI Scope Table](https://www.opencontainers.org/about/oci-scope-table).
-
-## Running an OCI Image
-
-The OCI Image Format partner project is the [OCI Runtime Spec project](https://github.com/opencontainers/runtime-spec).
-The Runtime Specification outlines how to run a "[filesystem bundle](https://github.com/opencontainers/runtime-spec/blob/master/bundle.md)" that is unpacked on disk.
-At a high-level an OCI implementation would download an OCI Image then unpack that image into an OCI Runtime filesystem bundle.
-At this point the OCI Runtime Bundle would be run by an OCI Runtime.
-
-This entire workflow supports the UX that users have come to expect from container engines like Docker and rkt: primarily, the ability to run an image with no additional arguments:
-
-* docker run example.com/org/app:v1.0.0
-* rkt run example.com/org/app,version=v1.0.0
-
-To support this UX the OCI Image Format contains sufficient information to launch the application on the target platform (e.g. command, arguments, environment variables, etc).
-
-## FAQ
-
-**Q: Why doesn't this project mention distribution?**
-
-A: Distribution, for example using HTTP as both Docker v2.2 and AppC do today, is currently out of scope on the [OCI Scope Table](https://www.opencontainers.org/about/oci-scope-table).
-There has been [some discussion on the TOB mailing list](https://groups.google.com/a/opencontainers.org/d/msg/tob/A3JnmI-D-6Y/tLuptPDHAgAJ) to make distribution an optional layer, but this topic is a work in progress.
-
-**Q: What happens to AppC or Docker Image Formats?**
-
-A: Existing formats can continue to be a proving ground for technologies, as needed.
-The OCI Image Format project strives to provide a dependable open specification that can be shared between different tools and be evolved for years or decades of compatibility; as the deb and rpm format have.
-
-Find more [FAQ on the OCI site](https://www.opencontainers.org/faq).
-
-## Roadmap
-
-The [GitHub milestones](https://github.com/opencontainers/image-spec/milestones) lay out the path to the OCI v1.0.0 release in late 2016.
-
-# Contributing
-
-Development happens on GitHub for the spec.
-Issues are used for bugs and actionable items and longer discussions can happen on the [mailing list](#mailing-list).
-
-The specification and code is licensed under the Apache 2.0 license found in the `LICENSE` file of this repository.
-
-## Discuss your design
-
-The project welcomes submissions, but please let everyone know what you are working on.
-
-Before undertaking a nontrivial change to this specification, send mail to the [mailing list](#mailing-list) to discuss what you plan to do.
-This gives everyone a chance to validate the design, helps prevent duplication of effort, and ensures that the idea fits.
-It also guarantees that the design is sound before code is written; a GitHub pull-request is not the place for high-level discussions.
-
-Typos and grammatical errors can go straight to a pull-request.
-When in doubt, start on the [mailing-list](#mailing-list).
-
-## Weekly Call
-
-The contributors and maintainers of all OCI projects have a weekly meeting Wednesdays at 2:00 PM (USA Pacific).
-Everyone is welcome to participate via [UberConference web][UberConference] or audio-only: +1-415-968-0849 (no PIN needed).
-An initial agenda will be posted to the [mailing list](#mailing-list) earlier in the week, and everyone is welcome to propose additional topics or suggest other agenda alterations there.
-Minutes are posted to the [mailing list](#mailing-list) and minutes from past calls are archived [here][minutes].
-
-## Mailing List
-
-You can subscribe and join the mailing list on [Google Groups](https://groups.google.com/a/opencontainers.org/forum/#!forum/dev).
-
-## IRC
-
-OCI discussion happens on #opencontainers on Freenode ([logs][irc-logs]).
-
-## Markdown style
-
-To keep consistency throughout the Markdown files in the Open Container spec all files should be formatted one sentence per line.
-This fixes two things: it makes diffing easier with git and it resolves fights about line wrapping length.
-For example, this paragraph will span three lines in the Markdown source.
-
-## Git commit
-
-### Sign your work
-
-The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch.
-The rules are pretty simple: if you can certify the below (from [developercertificate.org](http://developercertificate.org/)):
-
-```
-Developer Certificate of Origin
-Version 1.1
-
-Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
-660 York Street, Suite 102,
-San Francisco, CA 94110 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-
-Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-(a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
-(b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
-(c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
-(d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
-```
-
-then you just add a line to every git commit message:
-
- Signed-off-by: Joe Smith <joe@gmail.com>
-
-using your real name (sorry, no pseudonyms or anonymous contributions.)
-
-You can add the sign off when creating the git commit via `git commit -s`.
-
-### Commit Style
-
-Simple house-keeping for clean git history.
-Read more on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/) or the Discussion section of [`git-commit(1)`](http://git-scm.com/docs/git-commit).
-
-1. Separate the subject from body with a blank line
-2. Limit the subject line to 50 characters
-3. Capitalize the subject line
-4. Do not end the subject line with a period
-5. Use the imperative mood in the subject line
-6. Wrap the body at 72 characters
-7. Use the body to explain what and why vs. how
- * If there was important/useful/essential conversation or information, copy or include a reference
-8. When possible, one keyword to scope the change in the subject (i.e. "README: ...", "runtime: ...")
-
-
-[UberConference]: https://www.uberconference.com/opencontainers
-[irc-logs]: http://ircbot.wl.linuxfoundation.org/eavesdrop/%23opencontainers/
-[minutes]: http://ircbot.wl.linuxfoundation.org/meetings/opencontainers/
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go
index e3eee29b4..5d493df23 100644
--- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go
@@ -22,7 +22,7 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 0
// VersionPatch is for backwards-compatible bug fixes
- VersionPatch = 0
+ VersionPatch = 1
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""
diff --git a/vendor/github.com/opencontainers/runc/README.md b/vendor/github.com/opencontainers/runc/README.md
deleted file mode 100644
index e755fb7bc..000000000
--- a/vendor/github.com/opencontainers/runc/README.md
+++ /dev/null
@@ -1,270 +0,0 @@
-# runc
-
-[![Build Status](https://travis-ci.org/opencontainers/runc.svg?branch=master)](https://travis-ci.org/opencontainers/runc)
-[![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/runc)](https://goreportcard.com/report/github.com/opencontainers/runc)
-[![GoDoc](https://godoc.org/github.com/opencontainers/runc?status.svg)](https://godoc.org/github.com/opencontainers/runc)
-
-## Introduction
-
-`runc` is a CLI tool for spawning and running containers according to the OCI specification.
-
-## Releases
-
-`runc` depends on and tracks the [runtime-spec](https://github.com/opencontainers/runtime-spec) repository.
-We will try to make sure that `runc` and the OCI specification major versions stay in lockstep.
-This means that `runc` 1.0.0 should implement the 1.0 version of the specification.
-
-You can find official releases of `runc` on the [release](https://github.com/opencontainers/runc/releases) page.
-
-### Security
-
-If you wish to report a security issue, please disclose the issue responsibly
-to security@opencontainers.org.
-
-## Building
-
-`runc` currently supports the Linux platform with various architecture support.
-It must be built with Go version 1.6 or higher in order for some features to function properly.
-
-In order to enable seccomp support you will need to install `libseccomp` on your platform.
-> e.g. `libseccomp-devel` for CentOS, or `libseccomp-dev` for Ubuntu
-
-Otherwise, if you do not want to build `runc` with seccomp support you can add `BUILDTAGS=""` when running make.
-
-```bash
-# create a 'github.com/opencontainers' in your GOPATH/src
-cd github.com/opencontainers
-git clone https://github.com/opencontainers/runc
-cd runc
-
-make
-sudo make install
-```
-
-You can also use `go get` to install to your `GOPATH`, assuming that you have a `github.com` parent folder already created under `src`:
-
-```bash
-go get github.com/opencontainers/runc
-cd $GOPATH/src/github.com/opencontainers/runc
-make
-sudo make install
-```
-
-`runc` will be installed to `/usr/local/sbin/runc` on your system.
-
-
-#### Build Tags
-
-`runc` supports optional build tags for compiling support of various features.
-To add build tags to the make option the `BUILDTAGS` variable must be set.
-
-```bash
-make BUILDTAGS='seccomp apparmor'
-```
-
-| Build Tag | Feature | Dependency |
-|-----------|------------------------------------|-------------|
-| seccomp | Syscall filtering | libseccomp |
-| selinux | selinux process and mount labeling | <none> |
-| apparmor | apparmor profile support | <none> |
-| ambient | ambient capability support | kernel 4.3 |
-| nokmem | disable kernel memory account | <none> |
-
-
-### Running the test suite
-
-`runc` currently supports running its test suite via Docker.
-To run the suite just type `make test`.
-
-```bash
-make test
-```
-
-There are additional make targets for running the tests outside of a container but this is not recommended as the tests are written with the expectation that they can write and remove anywhere.
-
-You can run a specific test case by setting the `TESTFLAGS` variable.
-
-```bash
-# make test TESTFLAGS="-run=SomeTestFunction"
-```
-
-You can run a specific integration test by setting the `TESTPATH` variable.
-
-```bash
-# make test TESTPATH="/checkpoint.bats"
-```
-
-You can run a test in your proxy environment by setting `DOCKER_BUILD_PROXY` and `DOCKER_RUN_PROXY` variables.
-
-```bash
-# make test DOCKER_BUILD_PROXY="--build-arg HTTP_PROXY=http://yourproxy/" DOCKER_RUN_PROXY="-e HTTP_PROXY=http://yourproxy/"
-```
-
-### Dependencies Management
-
-`runc` uses [vndr](https://github.com/LK4D4/vndr) for dependencies management.
-Please refer to [vndr](https://github.com/LK4D4/vndr) for how to add or update
-new dependencies.
-
-## Using runc
-
-### Creating an OCI Bundle
-
-In order to use runc you must have your container in the format of an OCI bundle.
-If you have Docker installed you can use its `export` method to acquire a root filesystem from an existing Docker container.
-
-```bash
-# create the top most bundle directory
-mkdir /mycontainer
-cd /mycontainer
-
-# create the rootfs directory
-mkdir rootfs
-
-# export busybox via Docker into the rootfs directory
-docker export $(docker create busybox) | tar -C rootfs -xvf -
-```
-
-After a root filesystem is populated you just generate a spec in the format of a `config.json` file inside your bundle.
-`runc` provides a `spec` command to generate a base template spec that you are then able to edit.
-To find features and documentation for fields in the spec please refer to the [specs](https://github.com/opencontainers/runtime-spec) repository.
-
-```bash
-runc spec
-```
-
-### Running Containers
-
-Assuming you have an OCI bundle from the previous step you can execute the container in two different ways.
-
-The first way is to use the convenience command `run` that will handle creating, starting, and deleting the container after it exits.
-
-```bash
-# run as root
-cd /mycontainer
-runc run mycontainerid
-```
-
-If you used the unmodified `runc spec` template this should give you a `sh` session inside the container.
-
-The second way to start a container is using the specs lifecycle operations.
-This gives you more power over how the container is created and managed while it is running.
-This will also launch the container in the background so you will have to edit the `config.json` to remove the `terminal` setting for the simple examples here.
-Your process field in the `config.json` should look like this below with `"terminal": false` and `"args": ["sleep", "5"]`.
-
-
-```json
- "process": {
- "terminal": false,
- "user": {
- "uid": 0,
- "gid": 0
- },
- "args": [
- "sleep", "5"
- ],
- "env": [
- "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
- "TERM=xterm"
- ],
- "cwd": "/",
- "capabilities": {
- "bounding": [
- "CAP_AUDIT_WRITE",
- "CAP_KILL",
- "CAP_NET_BIND_SERVICE"
- ],
- "effective": [
- "CAP_AUDIT_WRITE",
- "CAP_KILL",
- "CAP_NET_BIND_SERVICE"
- ],
- "inheritable": [
- "CAP_AUDIT_WRITE",
- "CAP_KILL",
- "CAP_NET_BIND_SERVICE"
- ],
- "permitted": [
- "CAP_AUDIT_WRITE",
- "CAP_KILL",
- "CAP_NET_BIND_SERVICE"
- ],
- "ambient": [
- "CAP_AUDIT_WRITE",
- "CAP_KILL",
- "CAP_NET_BIND_SERVICE"
- ]
- },
- "rlimits": [
- {
- "type": "RLIMIT_NOFILE",
- "hard": 1024,
- "soft": 1024
- }
- ],
- "noNewPrivileges": true
- },
-```
-
-Now we can go through the lifecycle operations in your shell.
-
-
-```bash
-# run as root
-cd /mycontainer
-runc create mycontainerid
-
-# view the container is created and in the "created" state
-runc list
-
-# start the process inside the container
-runc start mycontainerid
-
-# after 5 seconds view that the container has exited and is now in the stopped state
-runc list
-
-# now delete the container
-runc delete mycontainerid
-```
-
-This allows higher level systems to augment the containers creation logic with setup of various settings after the container is created and/or before it is deleted. For example, the container's network stack is commonly set up after `create` but before `start`.
-
-#### Rootless containers
-`runc` has the ability to run containers without root privileges. This is called `rootless`. You need to pass some parameters to `runc` in order to run rootless containers. See below and compare with the previous version. Run the following commands as an ordinary user:
-```bash
-# Same as the first example
-mkdir ~/mycontainer
-cd ~/mycontainer
-mkdir rootfs
-docker export $(docker create busybox) | tar -C rootfs -xvf -
-
-# The --rootless parameter instructs runc spec to generate a configuration for a rootless container, which will allow you to run the container as a non-root user.
-runc spec --rootless
-
-# The --root parameter tells runc where to store the container state. It must be writable by the user.
-runc --root /tmp/runc run mycontainerid
-```
-
-#### Supervisors
-
-`runc` can be used with process supervisors and init systems to ensure that containers are restarted when they exit.
-An example systemd unit file looks something like this.
-
-```systemd
-[Unit]
-Description=Start My Container
-
-[Service]
-Type=forking
-ExecStart=/usr/local/sbin/runc run -d --pid-file /run/mycontainerid.pid mycontainerid
-ExecStopPost=/usr/local/sbin/runc delete mycontainerid
-WorkingDirectory=/mycontainer
-PIDFile=/run/mycontainerid.pid
-
-[Install]
-WantedBy=multi-user.target
-```
-
-## License
-
-The code and docs are released under the [Apache 2.0 license](LICENSE).
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/README.md b/vendor/github.com/opencontainers/runc/libcontainer/README.md
deleted file mode 100644
index 1d7fa04c0..000000000
--- a/vendor/github.com/opencontainers/runc/libcontainer/README.md
+++ /dev/null
@@ -1,330 +0,0 @@
-# libcontainer
-
-[![GoDoc](https://godoc.org/github.com/opencontainers/runc/libcontainer?status.svg)](https://godoc.org/github.com/opencontainers/runc/libcontainer)
-
-Libcontainer provides a native Go implementation for creating containers
-with namespaces, cgroups, capabilities, and filesystem access controls.
-It allows you to manage the lifecycle of the container performing additional operations
-after the container is created.
-
-
-#### Container
-A container is a self contained execution environment that shares the kernel of the
-host system and which is (optionally) isolated from other containers in the system.
-
-#### Using libcontainer
-
-Because containers are spawned in a two step process you will need a binary that
-will be executed as the init process for the container. In libcontainer, we use
-the current binary (/proc/self/exe) to be executed as the init process, and use
-arg "init", we call the first step process "bootstrap", so you always need a "init"
-function as the entry of "bootstrap".
-
-In addition to the go init function the early stage bootstrap is handled by importing
-[nsenter](https://github.com/opencontainers/runc/blob/master/libcontainer/nsenter/README.md).
-
-```go
-import (
- _ "github.com/opencontainers/runc/libcontainer/nsenter"
-)
-
-func init() {
- if len(os.Args) > 1 && os.Args[1] == "init" {
- runtime.GOMAXPROCS(1)
- runtime.LockOSThread()
- factory, _ := libcontainer.New("")
- if err := factory.StartInitialization(); err != nil {
- logrus.Fatal(err)
- }
- panic("--this line should have never been executed, congratulations--")
- }
-}
-```
-
-Then to create a container you first have to initialize an instance of a factory
-that will handle the creation and initialization for a container.
-
-```go
-factory, err := libcontainer.New("/var/lib/container", libcontainer.Cgroupfs, libcontainer.InitArgs(os.Args[0], "init"))
-if err != nil {
- logrus.Fatal(err)
- return
-}
-```
-
-Once you have an instance of the factory created we can create a configuration
-struct describing how the container is to be created. A sample would look similar to this:
-
-```go
-defaultMountFlags := unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_NODEV
-config := &configs.Config{
- Rootfs: "/your/path/to/rootfs",
- Capabilities: &configs.Capabilities{
- Bounding: []string{
- "CAP_CHOWN",
- "CAP_DAC_OVERRIDE",
- "CAP_FSETID",
- "CAP_FOWNER",
- "CAP_MKNOD",
- "CAP_NET_RAW",
- "CAP_SETGID",
- "CAP_SETUID",
- "CAP_SETFCAP",
- "CAP_SETPCAP",
- "CAP_NET_BIND_SERVICE",
- "CAP_SYS_CHROOT",
- "CAP_KILL",
- "CAP_AUDIT_WRITE",
- },
- Effective: []string{
- "CAP_CHOWN",
- "CAP_DAC_OVERRIDE",
- "CAP_FSETID",
- "CAP_FOWNER",
- "CAP_MKNOD",
- "CAP_NET_RAW",
- "CAP_SETGID",
- "CAP_SETUID",
- "CAP_SETFCAP",
- "CAP_SETPCAP",
- "CAP_NET_BIND_SERVICE",
- "CAP_SYS_CHROOT",
- "CAP_KILL",
- "CAP_AUDIT_WRITE",
- },
- Inheritable: []string{
- "CAP_CHOWN",
- "CAP_DAC_OVERRIDE",
- "CAP_FSETID",
- "CAP_FOWNER",
- "CAP_MKNOD",
- "CAP_NET_RAW",
- "CAP_SETGID",
- "CAP_SETUID",
- "CAP_SETFCAP",
- "CAP_SETPCAP",
- "CAP_NET_BIND_SERVICE",
- "CAP_SYS_CHROOT",
- "CAP_KILL",
- "CAP_AUDIT_WRITE",
- },
- Permitted: []string{
- "CAP_CHOWN",
- "CAP_DAC_OVERRIDE",
- "CAP_FSETID",
- "CAP_FOWNER",
- "CAP_MKNOD",
- "CAP_NET_RAW",
- "CAP_SETGID",
- "CAP_SETUID",
- "CAP_SETFCAP",
- "CAP_SETPCAP",
- "CAP_NET_BIND_SERVICE",
- "CAP_SYS_CHROOT",
- "CAP_KILL",
- "CAP_AUDIT_WRITE",
- },
- Ambient: []string{
- "CAP_CHOWN",
- "CAP_DAC_OVERRIDE",
- "CAP_FSETID",
- "CAP_FOWNER",
- "CAP_MKNOD",
- "CAP_NET_RAW",
- "CAP_SETGID",
- "CAP_SETUID",
- "CAP_SETFCAP",
- "CAP_SETPCAP",
- "CAP_NET_BIND_SERVICE",
- "CAP_SYS_CHROOT",
- "CAP_KILL",
- "CAP_AUDIT_WRITE",
- },
- },
- Namespaces: configs.Namespaces([]configs.Namespace{
- {Type: configs.NEWNS},
- {Type: configs.NEWUTS},
- {Type: configs.NEWIPC},
- {Type: configs.NEWPID},
- {Type: configs.NEWUSER},
- {Type: configs.NEWNET},
- {Type: configs.NEWCGROUP},
- }),
- Cgroups: &configs.Cgroup{
- Name: "test-container",
- Parent: "system",
- Resources: &configs.Resources{
- MemorySwappiness: nil,
- AllowAllDevices: nil,
- AllowedDevices: configs.DefaultAllowedDevices,
- },
- },
- MaskPaths: []string{
- "/proc/kcore",
- "/sys/firmware",
- },
- ReadonlyPaths: []string{
- "/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus",
- },
- Devices: configs.DefaultAutoCreatedDevices,
- Hostname: "testing",
- Mounts: []*configs.Mount{
- {
- Source: "proc",
- Destination: "/proc",
- Device: "proc",
- Flags: defaultMountFlags,
- },
- {
- Source: "tmpfs",
- Destination: "/dev",
- Device: "tmpfs",
- Flags: unix.MS_NOSUID | unix.MS_STRICTATIME,
- Data: "mode=755",
- },
- {
- Source: "devpts",
- Destination: "/dev/pts",
- Device: "devpts",
- Flags: unix.MS_NOSUID | unix.MS_NOEXEC,
- Data: "newinstance,ptmxmode=0666,mode=0620,gid=5",
- },
- {
- Device: "tmpfs",
- Source: "shm",
- Destination: "/dev/shm",
- Data: "mode=1777,size=65536k",
- Flags: defaultMountFlags,
- },
- {
- Source: "mqueue",
- Destination: "/dev/mqueue",
- Device: "mqueue",
- Flags: defaultMountFlags,
- },
- {
- Source: "sysfs",
- Destination: "/sys",
- Device: "sysfs",
- Flags: defaultMountFlags | unix.MS_RDONLY,
- },
- },
- UidMappings: []configs.IDMap{
- {
- ContainerID: 0,
- HostID: 1000,
- Size: 65536,
- },
- },
- GidMappings: []configs.IDMap{
- {
- ContainerID: 0,
- HostID: 1000,
- Size: 65536,
- },
- },
- Networks: []*configs.Network{
- {
- Type: "loopback",
- Address: "127.0.0.1/0",
- Gateway: "localhost",
- },
- },
- Rlimits: []configs.Rlimit{
- {
- Type: unix.RLIMIT_NOFILE,
- Hard: uint64(1025),
- Soft: uint64(1025),
- },
- },
-}
-```
-
-Once you have the configuration populated you can create a container:
-
-```go
-container, err := factory.Create("container-id", config)
-if err != nil {
- logrus.Fatal(err)
- return
-}
-```
-
-To spawn bash as the initial process inside the container and have the
-processes pid returned in order to wait, signal, or kill the process:
-
-```go
-process := &libcontainer.Process{
- Args: []string{"/bin/bash"},
- Env: []string{"PATH=/bin"},
- User: "daemon",
- Stdin: os.Stdin,
- Stdout: os.Stdout,
- Stderr: os.Stderr,
-}
-
-err := container.Run(process)
-if err != nil {
- container.Destroy()
- logrus.Fatal(err)
- return
-}
-
-// wait for the process to finish.
-_, err := process.Wait()
-if err != nil {
- logrus.Fatal(err)
-}
-
-// destroy the container.
-container.Destroy()
-```
-
-Additional ways to interact with a running container are:
-
-```go
-// return all the pids for all processes running inside the container.
-processes, err := container.Processes()
-
-// get detailed cpu, memory, io, and network statistics for the container and
-// it's processes.
-stats, err := container.Stats()
-
-// pause all processes inside the container.
-container.Pause()
-
-// resume all paused processes.
-container.Resume()
-
-// send signal to container's init process.
-container.Signal(signal)
-
-// update container resource constraints.
-container.Set(config)
-
-// get current status of the container.
-status, err := container.Status()
-
-// get current container's state information.
-state, err := container.State()
-```
-
-
-#### Checkpoint & Restore
-
-libcontainer now integrates [CRIU](http://criu.org/) for checkpointing and restoring containers.
-This let's you save the state of a process running inside a container to disk, and then restore
-that state into a new process, on the same machine or on another machine.
-
-`criu` version 1.5.2 or higher is required to use checkpoint and restore.
-If you don't already have `criu` installed, you can build it from source, following the
-[online instructions](http://criu.org/Installation). `criu` is also installed in the docker image
-generated when building libcontainer with docker.
-
-
-## Copyright and license
-
-Code and documentation copyright 2014 Docker, inc.
-The code and documentation are released under the [Apache 2.0 license](../LICENSE).
-The documentation is also released under Creative Commons Attribution 4.0 International License.
-You may obtain a copy of the license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by/4.0/.
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md
deleted file mode 100644
index 9ec6c3931..000000000
--- a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md
+++ /dev/null
@@ -1,44 +0,0 @@
-## nsenter
-
-The `nsenter` package registers a special init constructor that is called before
-the Go runtime has a chance to boot. This provides us the ability to `setns` on
-existing namespaces and avoid the issues that the Go runtime has with multiple
-threads. This constructor will be called if this package is registered,
-imported, in your go application.
-
-The `nsenter` package will `import "C"` and it uses [cgo](https://golang.org/cmd/cgo/)
-package. In cgo, if the import of "C" is immediately preceded by a comment, that comment,
-called the preamble, is used as a header when compiling the C parts of the package.
-So every time we import package `nsenter`, the C code function `nsexec()` would be
-called. And package `nsenter` is only imported in `init.go`, so every time the runc
-`init` command is invoked, that C code is run.
-
-Because `nsexec()` must be run before the Go runtime in order to use the
-Linux kernel namespace, you must `import` this library into a package if
-you plan to use `libcontainer` directly. Otherwise Go will not execute
-the `nsexec()` constructor, which means that the re-exec will not cause
-the namespaces to be joined. You can import it like this:
-
-```go
-import _ "github.com/opencontainers/runc/libcontainer/nsenter"
-```
-
-`nsexec()` will first get the file descriptor number for the init pipe
-from the environment variable `_LIBCONTAINER_INITPIPE` (which was opened
-by the parent and kept open across the fork-exec of the `nsexec()` init
-process). The init pipe is used to read bootstrap data (namespace paths,
-clone flags, uid and gid mappings, and the console path) from the parent
-process. `nsexec()` will then call `setns(2)` to join the namespaces
-provided in the bootstrap data (if available), `clone(2)` a child process
-with the provided clone flags, update the user and group ID mappings, do
-some further miscellaneous setup steps, and then send the PID of the
-child process to the parent of the `nsexec()` "caller". Finally,
-the parent `nsexec()` will exit and the child `nsexec()` process will
-return to allow the Go runtime take over.
-
-NOTE: We do both `setns(2)` and `clone(2)` even if we don't have any
-`CLONE_NEW*` clone flags because we must fork a new process in order to
-enter the PID namespace.
-
-
-
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/namespace.h b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/namespace.h
deleted file mode 100644
index 9e9bdca05..000000000
--- a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/namespace.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef NSENTER_NAMESPACE_H
-#define NSENTER_NAMESPACE_H
-
-#ifndef _GNU_SOURCE
-# define _GNU_SOURCE
-#endif
-#include <sched.h>
-
-/* All of these are taken from include/uapi/linux/sched.h */
-#ifndef CLONE_NEWNS
-# define CLONE_NEWNS 0x00020000 /* New mount namespace group */
-#endif
-#ifndef CLONE_NEWCGROUP
-# define CLONE_NEWCGROUP 0x02000000 /* New cgroup namespace */
-#endif
-#ifndef CLONE_NEWUTS
-# define CLONE_NEWUTS 0x04000000 /* New utsname namespace */
-#endif
-#ifndef CLONE_NEWIPC
-# define CLONE_NEWIPC 0x08000000 /* New ipc namespace */
-#endif
-#ifndef CLONE_NEWUSER
-# define CLONE_NEWUSER 0x10000000 /* New user namespace */
-#endif
-#ifndef CLONE_NEWPID
-# define CLONE_NEWPID 0x20000000 /* New pid namespace */
-#endif
-#ifndef CLONE_NEWNET
-# define CLONE_NEWNET 0x40000000 /* New network namespace */
-#endif
-
-#endif /* NSENTER_NAMESPACE_H */
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go
deleted file mode 100644
index 07f4d63e4..000000000
--- a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build linux,!gccgo
-
-package nsenter
-
-/*
-#cgo CFLAGS: -Wall
-extern void nsexec();
-void __attribute__((constructor)) init(void) {
- nsexec();
-}
-*/
-import "C"
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go
deleted file mode 100644
index 63c7a3ec2..000000000
--- a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// +build linux,gccgo
-
-package nsenter
-
-/*
-#cgo CFLAGS: -Wall
-extern void nsexec();
-void __attribute__((constructor)) init(void) {
- nsexec();
-}
-*/
-import "C"
-
-// AlwaysFalse is here to stay false
-// (and be exported so the compiler doesn't optimize out its reference)
-var AlwaysFalse bool
-
-func init() {
- if AlwaysFalse {
- // by referencing this C init() in a noop test, it will ensure the compiler
- // links in the C function.
- // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134
- C.init()
- }
-}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go
deleted file mode 100644
index ac701ca39..000000000
--- a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go
+++ /dev/null
@@ -1,5 +0,0 @@
-// +build !linux !cgo
-
-package nsenter
-
-import "C"
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
deleted file mode 100644
index 28269dfc0..000000000
--- a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
+++ /dev/null
@@ -1,995 +0,0 @@
-
-#define _GNU_SOURCE
-#include <endian.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <grp.h>
-#include <sched.h>
-#include <setjmp.h>
-#include <signal.h>
-#include <stdarg.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdbool.h>
-#include <string.h>
-#include <unistd.h>
-
-#include <sys/ioctl.h>
-#include <sys/prctl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-
-#include <linux/limits.h>
-#include <linux/netlink.h>
-#include <linux/types.h>
-
-/* Get all of the CLONE_NEW* flags. */
-#include "namespace.h"
-
-/* Synchronisation values. */
-enum sync_t {
- SYNC_USERMAP_PLS = 0x40, /* Request parent to map our users. */
- SYNC_USERMAP_ACK = 0x41, /* Mapping finished by the parent. */
- SYNC_RECVPID_PLS = 0x42, /* Tell parent we're sending the PID. */
- SYNC_RECVPID_ACK = 0x43, /* PID was correctly received by parent. */
- SYNC_GRANDCHILD = 0x44, /* The grandchild is ready to run. */
- SYNC_CHILD_READY = 0x45, /* The child or grandchild is ready to return. */
-
- /* XXX: This doesn't help with segfaults and other such issues. */
- SYNC_ERR = 0xFF, /* Fatal error, no turning back. The error code follows. */
-};
-
-/*
- * Synchronisation value for cgroup namespace setup.
- * The same constant is defined in process_linux.go as "createCgroupns".
- */
-#define CREATECGROUPNS 0x80
-
-/* longjmp() arguments. */
-#define JUMP_PARENT 0x00
-#define JUMP_CHILD 0xA0
-#define JUMP_INIT 0xA1
-
-/* JSON buffer. */
-#define JSON_MAX 4096
-
-/* Assume the stack grows down, so arguments should be above it. */
-struct clone_t {
- /*
- * Reserve some space for clone() to locate arguments
- * and retcode in this place
- */
- char stack[4096] __attribute__ ((aligned(16)));
- char stack_ptr[0];
-
- /* There's two children. This is used to execute the different code. */
- jmp_buf *env;
- int jmpval;
-};
-
-struct nlconfig_t {
- char *data;
-
- /* Process settings. */
- uint32_t cloneflags;
- char *oom_score_adj;
- size_t oom_score_adj_len;
-
- /* User namespace settings. */
- char *uidmap;
- size_t uidmap_len;
- char *gidmap;
- size_t gidmap_len;
- char *namespaces;
- size_t namespaces_len;
- uint8_t is_setgroup;
-
- /* Rootless container settings. */
- uint8_t is_rootless_euid; /* boolean */
- char *uidmappath;
- size_t uidmappath_len;
- char *gidmappath;
- size_t gidmappath_len;
-};
-
-/*
- * List of netlink message types sent to us as part of bootstrapping the init.
- * These constants are defined in libcontainer/message_linux.go.
- */
-#define INIT_MSG 62000
-#define CLONE_FLAGS_ATTR 27281
-#define NS_PATHS_ATTR 27282
-#define UIDMAP_ATTR 27283
-#define GIDMAP_ATTR 27284
-#define SETGROUP_ATTR 27285
-#define OOM_SCORE_ADJ_ATTR 27286
-#define ROOTLESS_EUID_ATTR 27287
-#define UIDMAPPATH_ATTR 27288
-#define GIDMAPPATH_ATTR 27289
-
-/*
- * Use the raw syscall for versions of glibc which don't include a function for
- * it, namely (glibc 2.12).
- */
-#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 14
-# define _GNU_SOURCE
-# include "syscall.h"
-# if !defined(SYS_setns) && defined(__NR_setns)
-# define SYS_setns __NR_setns
-# endif
-
-#ifndef SYS_setns
-# error "setns(2) syscall not supported by glibc version"
-#endif
-
-int setns(int fd, int nstype)
-{
- return syscall(SYS_setns, fd, nstype);
-}
-#endif
-
-/* XXX: This is ugly. */
-static int syncfd = -1;
-
-/* TODO(cyphar): Fix this so it correctly deals with syncT. */
-#define bail(fmt, ...) \
- do { \
- int ret = __COUNTER__ + 1; \
- fprintf(stderr, "nsenter: " fmt ": %m\n", ##__VA_ARGS__); \
- if (syncfd >= 0) { \
- enum sync_t s = SYNC_ERR; \
- if (write(syncfd, &s, sizeof(s)) != sizeof(s)) \
- fprintf(stderr, "nsenter: failed: write(s)"); \
- if (write(syncfd, &ret, sizeof(ret)) != sizeof(ret)) \
- fprintf(stderr, "nsenter: failed: write(ret)"); \
- } \
- exit(ret); \
- } while(0)
-
-static int write_file(char *data, size_t data_len, char *pathfmt, ...)
-{
- int fd, len, ret = 0;
- char path[PATH_MAX];
-
- va_list ap;
- va_start(ap, pathfmt);
- len = vsnprintf(path, PATH_MAX, pathfmt, ap);
- va_end(ap);
- if (len < 0)
- return -1;
-
- fd = open(path, O_RDWR);
- if (fd < 0) {
- return -1;
- }
-
- len = write(fd, data, data_len);
- if (len != data_len) {
- ret = -1;
- goto out;
- }
-
- out:
- close(fd);
- return ret;
-}
-
-enum policy_t {
- SETGROUPS_DEFAULT = 0,
- SETGROUPS_ALLOW,
- SETGROUPS_DENY,
-};
-
-/* This *must* be called before we touch gid_map. */
-static void update_setgroups(int pid, enum policy_t setgroup)
-{
- char *policy;
-
- switch (setgroup) {
- case SETGROUPS_ALLOW:
- policy = "allow";
- break;
- case SETGROUPS_DENY:
- policy = "deny";
- break;
- case SETGROUPS_DEFAULT:
- default:
- /* Nothing to do. */
- return;
- }
-
- if (write_file(policy, strlen(policy), "/proc/%d/setgroups", pid) < 0) {
- /*
- * If the kernel is too old to support /proc/pid/setgroups,
- * open(2) or write(2) will return ENOENT. This is fine.
- */
- if (errno != ENOENT)
- bail("failed to write '%s' to /proc/%d/setgroups", policy, pid);
- }
-}
-
-static int try_mapping_tool(const char *app, int pid, char *map, size_t map_len)
-{
- int child;
-
- /*
- * If @app is NULL, execve will segfault. Just check it here and bail (if
- * we're in this path, the caller is already getting desperate and there
- * isn't a backup to this failing). This usually would be a configuration
- * or programming issue.
- */
- if (!app)
- bail("mapping tool not present");
-
- child = fork();
- if (child < 0)
- bail("failed to fork");
-
- if (!child) {
-#define MAX_ARGV 20
- char *argv[MAX_ARGV];
- char *envp[] = { NULL };
- char pid_fmt[16];
- int argc = 0;
- char *next;
-
- snprintf(pid_fmt, 16, "%d", pid);
-
- argv[argc++] = (char *)app;
- argv[argc++] = pid_fmt;
- /*
- * Convert the map string into a list of argument that
- * newuidmap/newgidmap can understand.
- */
-
- while (argc < MAX_ARGV) {
- if (*map == '\0') {
- argv[argc++] = NULL;
- break;
- }
- argv[argc++] = map;
- next = strpbrk(map, "\n ");
- if (next == NULL)
- break;
- *next++ = '\0';
- map = next + strspn(next, "\n ");
- }
-
- execve(app, argv, envp);
- bail("failed to execv");
- } else {
- int status;
-
- while (true) {
- if (waitpid(child, &status, 0) < 0) {
- if (errno == EINTR)
- continue;
- bail("failed to waitpid");
- }
- if (WIFEXITED(status) || WIFSIGNALED(status))
- return WEXITSTATUS(status);
- }
- }
-
- return -1;
-}
-
-static void update_uidmap(const char *path, int pid, char *map, size_t map_len)
-{
- if (map == NULL || map_len <= 0)
- return;
-
- if (write_file(map, map_len, "/proc/%d/uid_map", pid) < 0) {
- if (errno != EPERM)
- bail("failed to update /proc/%d/uid_map", pid);
- if (try_mapping_tool(path, pid, map, map_len))
- bail("failed to use newuid map on %d", pid);
- }
-}
-
-static void update_gidmap(const char *path, int pid, char *map, size_t map_len)
-{
- if (map == NULL || map_len <= 0)
- return;
-
- if (write_file(map, map_len, "/proc/%d/gid_map", pid) < 0) {
- if (errno != EPERM)
- bail("failed to update /proc/%d/gid_map", pid);
- if (try_mapping_tool(path, pid, map, map_len))
- bail("failed to use newgid map on %d", pid);
- }
-}
-
-static void update_oom_score_adj(char *data, size_t len)
-{
- if (data == NULL || len <= 0)
- return;
-
- if (write_file(data, len, "/proc/self/oom_score_adj") < 0)
- bail("failed to update /proc/self/oom_score_adj");
-}
-
-/* A dummy function that just jumps to the given jumpval. */
-static int child_func(void *arg) __attribute__ ((noinline));
-static int child_func(void *arg)
-{
- struct clone_t *ca = (struct clone_t *)arg;
- longjmp(*ca->env, ca->jmpval);
-}
-
-static int clone_parent(jmp_buf *env, int jmpval) __attribute__ ((noinline));
-static int clone_parent(jmp_buf *env, int jmpval)
-{
- struct clone_t ca = {
- .env = env,
- .jmpval = jmpval,
- };
-
- return clone(child_func, ca.stack_ptr, CLONE_PARENT | SIGCHLD, &ca);
-}
-
-/*
- * Gets the init pipe fd from the environment, which is used to read the
- * bootstrap data and tell the parent what the new pid is after we finish
- * setting up the environment.
- */
-static int initpipe(void)
-{
- int pipenum;
- char *initpipe, *endptr;
-
- initpipe = getenv("_LIBCONTAINER_INITPIPE");
- if (initpipe == NULL || *initpipe == '\0')
- return -1;
-
- pipenum = strtol(initpipe, &endptr, 10);
- if (*endptr != '\0')
- bail("unable to parse _LIBCONTAINER_INITPIPE");
-
- return pipenum;
-}
-
-/* Returns the clone(2) flag for a namespace, given the name of a namespace. */
-static int nsflag(char *name)
-{
- if (!strcmp(name, "cgroup"))
- return CLONE_NEWCGROUP;
- else if (!strcmp(name, "ipc"))
- return CLONE_NEWIPC;
- else if (!strcmp(name, "mnt"))
- return CLONE_NEWNS;
- else if (!strcmp(name, "net"))
- return CLONE_NEWNET;
- else if (!strcmp(name, "pid"))
- return CLONE_NEWPID;
- else if (!strcmp(name, "user"))
- return CLONE_NEWUSER;
- else if (!strcmp(name, "uts"))
- return CLONE_NEWUTS;
-
- /* If we don't recognise a name, fallback to 0. */
- return 0;
-}
-
-static uint32_t readint32(char *buf)
-{
- return *(uint32_t *) buf;
-}
-
-static uint8_t readint8(char *buf)
-{
- return *(uint8_t *) buf;
-}
-
-static void nl_parse(int fd, struct nlconfig_t *config)
-{
- size_t len, size;
- struct nlmsghdr hdr;
- char *data, *current;
-
- /* Retrieve the netlink header. */
- len = read(fd, &hdr, NLMSG_HDRLEN);
- if (len != NLMSG_HDRLEN)
- bail("invalid netlink header length %zu", len);
-
- if (hdr.nlmsg_type == NLMSG_ERROR)
- bail("failed to read netlink message");
-
- if (hdr.nlmsg_type != INIT_MSG)
- bail("unexpected msg type %d", hdr.nlmsg_type);
-
- /* Retrieve data. */
- size = NLMSG_PAYLOAD(&hdr, 0);
- current = data = malloc(size);
- if (!data)
- bail("failed to allocate %zu bytes of memory for nl_payload", size);
-
- len = read(fd, data, size);
- if (len != size)
- bail("failed to read netlink payload, %zu != %zu", len, size);
-
- /* Parse the netlink payload. */
- config->data = data;
- while (current < data + size) {
- struct nlattr *nlattr = (struct nlattr *)current;
- size_t payload_len = nlattr->nla_len - NLA_HDRLEN;
-
- /* Advance to payload. */
- current += NLA_HDRLEN;
-
- /* Handle payload. */
- switch (nlattr->nla_type) {
- case CLONE_FLAGS_ATTR:
- config->cloneflags = readint32(current);
- break;
- case ROOTLESS_EUID_ATTR:
- config->is_rootless_euid = readint8(current); /* boolean */
- break;
- case OOM_SCORE_ADJ_ATTR:
- config->oom_score_adj = current;
- config->oom_score_adj_len = payload_len;
- break;
- case NS_PATHS_ATTR:
- config->namespaces = current;
- config->namespaces_len = payload_len;
- break;
- case UIDMAP_ATTR:
- config->uidmap = current;
- config->uidmap_len = payload_len;
- break;
- case GIDMAP_ATTR:
- config->gidmap = current;
- config->gidmap_len = payload_len;
- break;
- case UIDMAPPATH_ATTR:
- config->uidmappath = current;
- config->uidmappath_len = payload_len;
- break;
- case GIDMAPPATH_ATTR:
- config->gidmappath = current;
- config->gidmappath_len = payload_len;
- break;
- case SETGROUP_ATTR:
- config->is_setgroup = readint8(current);
- break;
- default:
- bail("unknown netlink message type %d", nlattr->nla_type);
- }
-
- current += NLA_ALIGN(payload_len);
- }
-}
-
-void nl_free(struct nlconfig_t *config)
-{
- free(config->data);
-}
-
-void join_namespaces(char *nslist)
-{
- int num = 0, i;
- char *saveptr = NULL;
- char *namespace = strtok_r(nslist, ",", &saveptr);
- struct namespace_t {
- int fd;
- int ns;
- char type[PATH_MAX];
- char path[PATH_MAX];
- } *namespaces = NULL;
-
- if (!namespace || !strlen(namespace) || !strlen(nslist))
- bail("ns paths are empty");
-
- /*
- * We have to open the file descriptors first, since after
- * we join the mnt namespace we might no longer be able to
- * access the paths.
- */
- do {
- int fd;
- char *path;
- struct namespace_t *ns;
-
- /* Resize the namespace array. */
- namespaces = realloc(namespaces, ++num * sizeof(struct namespace_t));
- if (!namespaces)
- bail("failed to reallocate namespace array");
- ns = &namespaces[num - 1];
-
- /* Split 'ns:path'. */
- path = strstr(namespace, ":");
- if (!path)
- bail("failed to parse %s", namespace);
- *path++ = '\0';
-
- fd = open(path, O_RDONLY);
- if (fd < 0)
- bail("failed to open %s", path);
-
- ns->fd = fd;
- ns->ns = nsflag(namespace);
- strncpy(ns->path, path, PATH_MAX - 1);
- ns->path[PATH_MAX - 1] = '\0';
- } while ((namespace = strtok_r(NULL, ",", &saveptr)) != NULL);
-
- /*
- * The ordering in which we join namespaces is important. We should
- * always join the user namespace *first*. This is all guaranteed
- * from the container_linux.go side of this, so we're just going to
- * follow the order given to us.
- */
-
- for (i = 0; i < num; i++) {
- struct namespace_t ns = namespaces[i];
-
- if (setns(ns.fd, ns.ns) < 0)
- bail("failed to setns to %s", ns.path);
-
- close(ns.fd);
- }
-
- free(namespaces);
-}
-
-void nsexec(void)
-{
- int pipenum;
- jmp_buf env;
- int sync_child_pipe[2], sync_grandchild_pipe[2];
- struct nlconfig_t config = { 0 };
-
- /*
- * If we don't have an init pipe, just return to the go routine.
- * We'll only get an init pipe for start or exec.
- */
- pipenum = initpipe();
- if (pipenum == -1)
- return;
-
- /* Parse all of the netlink configuration. */
- nl_parse(pipenum, &config);
-
- /* Set oom_score_adj. This has to be done before !dumpable because
- * /proc/self/oom_score_adj is not writeable unless you're an privileged
- * user (if !dumpable is set). All children inherit their parent's
- * oom_score_adj value on fork(2) so this will always be propagated
- * properly.
- */
- update_oom_score_adj(config.oom_score_adj, config.oom_score_adj_len);
-
- /*
- * Make the process non-dumpable, to avoid various race conditions that
- * could cause processes in namespaces we're joining to access host
- * resources (or potentially execute code).
- *
- * However, if the number of namespaces we are joining is 0, we are not
- * going to be switching to a different security context. Thus setting
- * ourselves to be non-dumpable only breaks things (like rootless
- * containers), which is the recommendation from the kernel folks.
- */
- if (config.namespaces) {
- if (prctl(PR_SET_DUMPABLE, 0, 0, 0, 0) < 0)
- bail("failed to set process as non-dumpable");
- }
-
- /* Pipe so we can tell the child when we've finished setting up. */
- if (socketpair(AF_LOCAL, SOCK_STREAM, 0, sync_child_pipe) < 0)
- bail("failed to setup sync pipe between parent and child");
-
- /*
- * We need a new socketpair to sync with grandchild so we don't have
- * race condition with child.
- */
- if (socketpair(AF_LOCAL, SOCK_STREAM, 0, sync_grandchild_pipe) < 0)
- bail("failed to setup sync pipe between parent and grandchild");
-
- /* TODO: Currently we aren't dealing with child deaths properly. */
-
- /*
- * Okay, so this is quite annoying.
- *
- * In order for this unsharing code to be more extensible we need to split
- * up unshare(CLONE_NEWUSER) and clone() in various ways. The ideal case
- * would be if we did clone(CLONE_NEWUSER) and the other namespaces
- * separately, but because of SELinux issues we cannot really do that. But
- * we cannot just dump the namespace flags into clone(...) because several
- * usecases (such as rootless containers) require more granularity around
- * the namespace setup. In addition, some older kernels had issues where
- * CLONE_NEWUSER wasn't handled before other namespaces (but we cannot
- * handle this while also dealing with SELinux so we choose SELinux support
- * over broken kernel support).
- *
- * However, if we unshare(2) the user namespace *before* we clone(2), then
- * all hell breaks loose.
- *
- * The parent no longer has permissions to do many things (unshare(2) drops
- * all capabilities in your old namespace), and the container cannot be set
- * up to have more than one {uid,gid} mapping. This is obviously less than
- * ideal. In order to fix this, we have to first clone(2) and then unshare.
- *
- * Unfortunately, it's not as simple as that. We have to fork to enter the
- * PID namespace (the PID namespace only applies to children). Since we'll
- * have to double-fork, this clone_parent() call won't be able to get the
- * PID of the _actual_ init process (without doing more synchronisation than
- * I can deal with at the moment). So we'll just get the parent to send it
- * for us, the only job of this process is to update
- * /proc/pid/{setgroups,uid_map,gid_map}.
- *
- * And as a result of the above, we also need to setns(2) in the first child
- * because if we join a PID namespace in the topmost parent then our child
- * will be in that namespace (and it will not be able to give us a PID value
- * that makes sense without resorting to sending things with cmsg).
- *
- * This also deals with an older issue caused by dumping cloneflags into
- * clone(2): On old kernels, CLONE_PARENT didn't work with CLONE_NEWPID, so
- * we have to unshare(2) before clone(2) in order to do this. This was fixed
- * in upstream commit 1f7f4dde5c945f41a7abc2285be43d918029ecc5, and was
- * introduced by 40a0d32d1eaffe6aac7324ca92604b6b3977eb0e. As far as we're
- * aware, the last mainline kernel which had this bug was Linux 3.12.
- * However, we cannot comment on which kernels the broken patch was
- * backported to.
- *
- * -- Aleksa "what has my life come to?" Sarai
- */
-
- switch (setjmp(env)) {
- /*
- * Stage 0: We're in the parent. Our job is just to create a new child
- * (stage 1: JUMP_CHILD) process and write its uid_map and
- * gid_map. That process will go on to create a new process, then
- * it will send us its PID which we will send to the bootstrap
- * process.
- */
- case JUMP_PARENT:{
- int len;
- pid_t child, first_child = -1;
- bool ready = false;
-
- /* For debugging. */
- prctl(PR_SET_NAME, (unsigned long)"runc:[0:PARENT]", 0, 0, 0);
-
- /* Start the process of getting a container. */
- child = clone_parent(&env, JUMP_CHILD);
- if (child < 0)
- bail("unable to fork: child_func");
-
- /*
- * State machine for synchronisation with the children.
- *
- * Father only return when both child and grandchild are
- * ready, so we can receive all possible error codes
- * generated by children.
- */
- while (!ready) {
- enum sync_t s;
- int ret;
-
- syncfd = sync_child_pipe[1];
- close(sync_child_pipe[0]);
-
- if (read(syncfd, &s, sizeof(s)) != sizeof(s))
- bail("failed to sync with child: next state");
-
- switch (s) {
- case SYNC_ERR:
- /* We have to mirror the error code of the child. */
- if (read(syncfd, &ret, sizeof(ret)) != sizeof(ret))
- bail("failed to sync with child: read(error code)");
-
- exit(ret);
- case SYNC_USERMAP_PLS:
- /*
- * Enable setgroups(2) if we've been asked to. But we also
- * have to explicitly disable setgroups(2) if we're
- * creating a rootless container for single-entry mapping.
- * i.e. config.is_setgroup == false.
- * (this is required since Linux 3.19).
- *
- * For rootless multi-entry mapping, config.is_setgroup shall be true and
- * newuidmap/newgidmap shall be used.
- */
-
- if (config.is_rootless_euid && !config.is_setgroup)
- update_setgroups(child, SETGROUPS_DENY);
-
- /* Set up mappings. */
- update_uidmap(config.uidmappath, child, config.uidmap, config.uidmap_len);
- update_gidmap(config.gidmappath, child, config.gidmap, config.gidmap_len);
-
- s = SYNC_USERMAP_ACK;
- if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
- kill(child, SIGKILL);
- bail("failed to sync with child: write(SYNC_USERMAP_ACK)");
- }
- break;
- case SYNC_RECVPID_PLS:{
- first_child = child;
-
- /* Get the init_func pid. */
- if (read(syncfd, &child, sizeof(child)) != sizeof(child)) {
- kill(first_child, SIGKILL);
- bail("failed to sync with child: read(childpid)");
- }
-
- /* Send ACK. */
- s = SYNC_RECVPID_ACK;
- if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
- kill(first_child, SIGKILL);
- kill(child, SIGKILL);
- bail("failed to sync with child: write(SYNC_RECVPID_ACK)");
- }
-
- /* Send the init_func pid back to our parent.
- *
- * Send the init_func pid and the pid of the first child back to our parent.
- * We need to send both back because we can't reap the first child we created (CLONE_PARENT).
- * It becomes the responsibility of our parent to reap the first child.
- */
- len = dprintf(pipenum, "{\"pid\": %d, \"pid_first\": %d}\n", child, first_child);
- if (len < 0) {
- kill(child, SIGKILL);
- bail("unable to generate JSON for child pid");
- }
- }
- break;
- case SYNC_CHILD_READY:
- ready = true;
- break;
- default:
- bail("unexpected sync value: %u", s);
- }
- }
-
- /* Now sync with grandchild. */
-
- ready = false;
- while (!ready) {
- enum sync_t s;
- int ret;
-
- syncfd = sync_grandchild_pipe[1];
- close(sync_grandchild_pipe[0]);
-
- s = SYNC_GRANDCHILD;
- if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
- kill(child, SIGKILL);
- bail("failed to sync with child: write(SYNC_GRANDCHILD)");
- }
-
- if (read(syncfd, &s, sizeof(s)) != sizeof(s))
- bail("failed to sync with child: next state");
-
- switch (s) {
- case SYNC_ERR:
- /* We have to mirror the error code of the child. */
- if (read(syncfd, &ret, sizeof(ret)) != sizeof(ret))
- bail("failed to sync with child: read(error code)");
-
- exit(ret);
- case SYNC_CHILD_READY:
- ready = true;
- break;
- default:
- bail("unexpected sync value: %u", s);
- }
- }
- exit(0);
- }
-
- /*
- * Stage 1: We're in the first child process. Our job is to join any
- * provided namespaces in the netlink payload and unshare all
- * of the requested namespaces. If we've been asked to
- * CLONE_NEWUSER, we will ask our parent (stage 0) to set up
- * our user mappings for us. Then, we create a new child
- * (stage 2: JUMP_INIT) for PID namespace. We then send the
- * child's PID to our parent (stage 0).
- */
- case JUMP_CHILD:{
- pid_t child;
- enum sync_t s;
-
- /* We're in a child and thus need to tell the parent if we die. */
- syncfd = sync_child_pipe[0];
- close(sync_child_pipe[1]);
-
- /* For debugging. */
- prctl(PR_SET_NAME, (unsigned long)"runc:[1:CHILD]", 0, 0, 0);
-
- /*
- * We need to setns first. We cannot do this earlier (in stage 0)
- * because of the fact that we forked to get here (the PID of
- * [stage 2: JUMP_INIT]) would be meaningless). We could send it
- * using cmsg(3) but that's just annoying.
- */
- if (config.namespaces)
- join_namespaces(config.namespaces);
-
- /*
- * Deal with user namespaces first. They are quite special, as they
- * affect our ability to unshare other namespaces and are used as
- * context for privilege checks.
- *
- * We don't unshare all namespaces in one go. The reason for this
- * is that, while the kernel documentation may claim otherwise,
- * there are certain cases where unsharing all namespaces at once
- * will result in namespace objects being owned incorrectly.
- * Ideally we should just fix these kernel bugs, but it's better to
- * be safe than sorry, and fix them separately.
- *
- * A specific case of this is that the SELinux label of the
- * internal kern-mount that mqueue uses will be incorrect if the
- * UTS namespace is cloned before the USER namespace is mapped.
- * I've also heard of similar problems with the network namespace
- * in some scenarios. This also mirrors how LXC deals with this
- * problem.
- */
- if (config.cloneflags & CLONE_NEWUSER) {
- if (unshare(CLONE_NEWUSER) < 0)
- bail("failed to unshare user namespace");
- config.cloneflags &= ~CLONE_NEWUSER;
-
- /*
- * We don't have the privileges to do any mapping here (see the
- * clone_parent rant). So signal our parent to hook us up.
- */
-
- /* Switching is only necessary if we joined namespaces. */
- if (config.namespaces) {
- if (prctl(PR_SET_DUMPABLE, 1, 0, 0, 0) < 0)
- bail("failed to set process as dumpable");
- }
- s = SYNC_USERMAP_PLS;
- if (write(syncfd, &s, sizeof(s)) != sizeof(s))
- bail("failed to sync with parent: write(SYNC_USERMAP_PLS)");
-
- /* ... wait for mapping ... */
-
- if (read(syncfd, &s, sizeof(s)) != sizeof(s))
- bail("failed to sync with parent: read(SYNC_USERMAP_ACK)");
- if (s != SYNC_USERMAP_ACK)
- bail("failed to sync with parent: SYNC_USERMAP_ACK: got %u", s);
- /* Switching is only necessary if we joined namespaces. */
- if (config.namespaces) {
- if (prctl(PR_SET_DUMPABLE, 0, 0, 0, 0) < 0)
- bail("failed to set process as dumpable");
- }
-
- /* Become root in the namespace proper. */
- if (setresuid(0, 0, 0) < 0)
- bail("failed to become root in user namespace");
- }
- /*
- * Unshare all of the namespaces. Now, it should be noted that this
- * ordering might break in the future (especially with rootless
- * containers). But for now, it's not possible to split this into
- * CLONE_NEWUSER + [the rest] because of some RHEL SELinux issues.
- *
- * Note that we don't merge this with clone() because there were
- * some old kernel versions where clone(CLONE_PARENT | CLONE_NEWPID)
- * was broken, so we'll just do it the long way anyway.
- */
- if (unshare(config.cloneflags & ~CLONE_NEWCGROUP) < 0)
- bail("failed to unshare namespaces");
-
- /*
- * TODO: What about non-namespace clone flags that we're dropping here?
- *
- * We fork again because of PID namespace, setns(2) or unshare(2) don't
- * change the PID namespace of the calling process, because doing so
- * would change the caller's idea of its own PID (as reported by getpid()),
- * which would break many applications and libraries, so we must fork
- * to actually enter the new PID namespace.
- */
- child = clone_parent(&env, JUMP_INIT);
- if (child < 0)
- bail("unable to fork: init_func");
-
- /* Send the child to our parent, which knows what it's doing. */
- s = SYNC_RECVPID_PLS;
- if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
- kill(child, SIGKILL);
- bail("failed to sync with parent: write(SYNC_RECVPID_PLS)");
- }
- if (write(syncfd, &child, sizeof(child)) != sizeof(child)) {
- kill(child, SIGKILL);
- bail("failed to sync with parent: write(childpid)");
- }
-
- /* ... wait for parent to get the pid ... */
-
- if (read(syncfd, &s, sizeof(s)) != sizeof(s)) {
- kill(child, SIGKILL);
- bail("failed to sync with parent: read(SYNC_RECVPID_ACK)");
- }
- if (s != SYNC_RECVPID_ACK) {
- kill(child, SIGKILL);
- bail("failed to sync with parent: SYNC_RECVPID_ACK: got %u", s);
- }
-
- s = SYNC_CHILD_READY;
- if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
- kill(child, SIGKILL);
- bail("failed to sync with parent: write(SYNC_CHILD_READY)");
- }
-
- /* Our work is done. [Stage 2: JUMP_INIT] is doing the rest of the work. */
- exit(0);
- }
-
- /*
- * Stage 2: We're the final child process, and the only process that will
- * actually return to the Go runtime. Our job is to just do the
- * final cleanup steps and then return to the Go runtime to allow
- * init_linux.go to run.
- */
- case JUMP_INIT:{
- /*
- * We're inside the child now, having jumped from the
- * start_child() code after forking in the parent.
- */
- enum sync_t s;
-
- /* We're in a child and thus need to tell the parent if we die. */
- syncfd = sync_grandchild_pipe[0];
- close(sync_grandchild_pipe[1]);
- close(sync_child_pipe[0]);
- close(sync_child_pipe[1]);
-
- /* For debugging. */
- prctl(PR_SET_NAME, (unsigned long)"runc:[2:INIT]", 0, 0, 0);
-
- if (read(syncfd, &s, sizeof(s)) != sizeof(s))
- bail("failed to sync with parent: read(SYNC_GRANDCHILD)");
- if (s != SYNC_GRANDCHILD)
- bail("failed to sync with parent: SYNC_GRANDCHILD: got %u", s);
-
- if (setsid() < 0)
- bail("setsid failed");
-
- if (setuid(0) < 0)
- bail("setuid failed");
-
- if (setgid(0) < 0)
- bail("setgid failed");
-
- if (!config.is_rootless_euid && config.is_setgroup) {
- if (setgroups(0, NULL) < 0)
- bail("setgroups failed");
- }
-
- /* ... wait until our topmost parent has finished cgroup setup in p.manager.Apply() ... */
- if (config.cloneflags & CLONE_NEWCGROUP) {
- uint8_t value;
- if (read(pipenum, &value, sizeof(value)) != sizeof(value))
- bail("read synchronisation value failed");
- if (value == CREATECGROUPNS) {
- if (unshare(CLONE_NEWCGROUP) < 0)
- bail("failed to unshare cgroup namespace");
- } else
- bail("received unknown synchronisation value");
- }
-
- s = SYNC_CHILD_READY;
- if (write(syncfd, &s, sizeof(s)) != sizeof(s))
- bail("failed to sync with patent: write(SYNC_CHILD_READY)");
-
- /* Close sync pipes. */
- close(sync_grandchild_pipe[0]);
-
- /* Free netlink data. */
- nl_free(&config);
-
- /* Finish executing, let the Go runtime take over. */
- return;
- }
- default:
- bail("unexpected jump value");
- }
-
- /* Should never be reached. */
- bail("should never be reached");
-}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS b/vendor/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS
new file mode 100644
index 000000000..edbe20066
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS
@@ -0,0 +1,2 @@
+Tianon Gravi <admwiggin@gmail.com> (@tianon)
+Aleksa Sarai <cyphar@cyphar.com> (@cyphar)
diff --git a/vendor/github.com/opencontainers/runc/vendor.conf b/vendor/github.com/opencontainers/runc/vendor.conf
deleted file mode 100644
index fadbe0707..000000000
--- a/vendor/github.com/opencontainers/runc/vendor.conf
+++ /dev/null
@@ -1,25 +0,0 @@
-# OCI runtime-spec. When updating this, make sure you use a version tag rather
-# than a commit ID so it's much more obvious what version of the spec we are
-# using.
-github.com/opencontainers/runtime-spec 5684b8af48c1ac3b1451fa499724e30e3c20a294
-# Core libcontainer functionality.
-github.com/mrunalp/fileutils ed869b029674c0e9ce4c0dfa781405c2d9946d08
-github.com/opencontainers/selinux v1.0.0-rc1
-github.com/seccomp/libseccomp-golang 84e90a91acea0f4e51e62bc1a75de18b1fc0790f
-github.com/sirupsen/logrus a3f95b5c423586578a4e099b11a46c2479628cac
-github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
-github.com/vishvananda/netlink 1e2e08e8a2dcdacaae3f14ac44c5cfa31361f270
-# systemd integration.
-github.com/coreos/go-systemd v14
-github.com/coreos/pkg v3
-github.com/godbus/dbus v3
-github.com/golang/protobuf 18c9bb3261723cd5401db4d0c9fbc5c3b6c70fe8
-# Command-line interface.
-github.com/cyphar/filepath-securejoin v0.2.1
-github.com/docker/go-units v0.2.0
-github.com/urfave/cli d53eb991652b1d438abdd34ce4bfa3ef1539108e
-golang.org/x/sys 7ddbeae9ae08c6a06a59597f0c9edbc5ff2444ce https://github.com/golang/sys
-
-# console dependencies
-github.com/containerd/console 2748ece16665b45a47f884001d5831ec79703880
-github.com/pkg/errors v0.8.0
diff --git a/vendor/github.com/opencontainers/runtime-spec/README.md b/vendor/github.com/opencontainers/runtime-spec/README.md
deleted file mode 100644
index b40dba17d..000000000
--- a/vendor/github.com/opencontainers/runtime-spec/README.md
+++ /dev/null
@@ -1,153 +0,0 @@
-# Open Container Initiative Runtime Specification
-
-The [Open Container Initiative][oci] develops specifications for standards on Operating System process and application containers.
-
-The specification can be found [here](spec.md).
-
-## Table of Contents
-
-Additional documentation about how this group operates:
-
-- [Code of Conduct][code-of-conduct]
-- [Style and Conventions](style.md)
-- [Implementations](implementations.md)
-- [Releases](RELEASES.md)
-- [project](project.md)
-- [charter][charter]
-
-## Use Cases
-
-To provide context for users the following section gives example use cases for each part of the spec.
-
-### Application Bundle Builders
-
-Application bundle builders can create a [bundle](bundle.md) directory that includes all of the files required for launching an application as a container.
-The bundle contains an OCI [configuration file](config.md) where the builder can specify host-independent details such as [which executable to launch](config.md#process) and host-specific settings such as [mount](config.md#mounts) locations, [hook](config.md#posix-platform-hooks) paths, Linux [namespaces](config-linux.md#namespaces) and [cgroups](config-linux.md#control-groups).
-Because the configuration includes host-specific settings, application bundle directories copied between two hosts may require configuration adjustments.
-
-### Hook Developers
-
-[Hook](config.md#posix-platform-hooks) developers can extend the functionality of an OCI-compliant runtime by hooking into a container's lifecycle with an external application.
-Example use cases include sophisticated network configuration, volume garbage collection, etc.
-
-### Runtime Developers
-
-Runtime developers can build runtime implementations that run OCI-compliant bundles and container configuration, containing low-level OS and host-specific details, on a particular platform.
-
-## Contributing
-
-Development happens on GitHub for the spec.
-Issues are used for bugs and actionable items and longer discussions can happen on the [mailing list](#mailing-list).
-
-The specification and code is licensed under the Apache 2.0 license found in the [LICENSE](./LICENSE) file.
-
-### Discuss your design
-
-The project welcomes submissions, but please let everyone know what you are working on.
-
-Before undertaking a nontrivial change to this specification, send mail to the [mailing list](#mailing-list) to discuss what you plan to do.
-This gives everyone a chance to validate the design, helps prevent duplication of effort, and ensures that the idea fits.
-It also guarantees that the design is sound before code is written; a GitHub pull-request is not the place for high-level discussions.
-
-Typos and grammatical errors can go straight to a pull-request.
-When in doubt, start on the [mailing-list](#mailing-list).
-
-### Meetings
-
-The contributors and maintainers of all OCI projects have monthly meetings, which are usually at 2:00 PM (USA Pacific) on the first Wednesday of every month.
-There is an [iCalendar][rfc5545] format for the meetings [here](meeting.ics).
-Everyone is welcome to participate via [UberConference web][uberconference] or audio-only: +1 415 968 0849 (no PIN needed).
-An initial agenda will be posted to the [mailing list](#mailing-list) in the week before each meeting, and everyone is welcome to propose additional topics or suggest other agenda alterations there.
-Minutes are posted to the [mailing list](#mailing-list) and minutes from past calls are archived [here][minutes], with minutes from especially old meetings (September 2015 and earlier) archived [here][runtime-wiki].
-
-### Mailing List
-
-You can subscribe and join the mailing list on [Google Groups][dev-list].
-
-### IRC
-
-OCI discussion happens on #opencontainers on Freenode ([logs][irc-logs]).
-
-### Git commit
-
-#### Sign your work
-
-The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch.
-The rules are pretty simple: if you can certify the below (from http://developercertificate.org):
-
-```
-Developer Certificate of Origin
-Version 1.1
-
-Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
-660 York Street, Suite 102,
-San Francisco, CA 94110 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-
-Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-(a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
-(b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
-(c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
-(d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
-```
-
-then you just add a line to every git commit message:
-
- Signed-off-by: Joe Smith <joe@gmail.com>
-
-using your real name (sorry, no pseudonyms or anonymous contributions.)
-
-You can add the sign off when creating the git commit via `git commit -s`.
-
-#### Commit Style
-
-Simple house-keeping for clean git history.
-Read more on [How to Write a Git Commit Message][how-to-git-commit] or the Discussion section of [git-commit(1)][git-commit.1].
-
-1. Separate the subject from body with a blank line
-2. Limit the subject line to 50 characters
-3. Capitalize the subject line
-4. Do not end the subject line with a period
-5. Use the imperative mood in the subject line
-6. Wrap the body at 72 characters
-7. Use the body to explain what and why vs. how
- * If there was important/useful/essential conversation or information, copy or include a reference
-8. When possible, one keyword to scope the change in the subject (i.e. "README: ...", "runtime: ...")
-
-
-[charter]: https://www.opencontainers.org/about/governance
-[code-of-conduct]: https://github.com/opencontainers/tob/blob/master/code-of-conduct.md
-[dev-list]: https://groups.google.com/a/opencontainers.org/forum/#!forum/dev
-[how-to-git-commit]: http://chris.beams.io/posts/git-commit
-[irc-logs]: http://ircbot.wl.linuxfoundation.org/eavesdrop/%23opencontainers/
-[iso-week]: https://en.wikipedia.org/wiki/ISO_week_date#Calculating_the_week_number_of_a_given_date
-[minutes]: http://ircbot.wl.linuxfoundation.org/meetings/opencontainers/
-[oci]: https://www.opencontainers.org
-[rfc5545]: https://tools.ietf.org/html/rfc5545
-[runtime-wiki]: https://github.com/opencontainers/runtime-spec/wiki
-[uberconference]: https://www.uberconference.com/opencontainers
-
-[git-commit.1]: http://git-scm.com/docs/git-commit
diff --git a/vendor/github.com/opencontainers/runtime-tools/README.md b/vendor/github.com/opencontainers/runtime-tools/README.md
deleted file mode 100644
index c9974cab5..000000000
--- a/vendor/github.com/opencontainers/runtime-tools/README.md
+++ /dev/null
@@ -1,128 +0,0 @@
-# oci-runtime-tool [![Build Status](https://travis-ci.org/opencontainers/runtime-tools.svg?branch=master)](https://travis-ci.org/opencontainers/runtime-tools) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/runtime-tools)](https://goreportcard.com/report/github.com/opencontainers/runtime-tools)
-
-oci-runtime-tool is a collection of tools for working with the [OCI runtime specification][runtime-spec].
-To build from source code, runtime-tools requires Go 1.7.x or above.
-
-## Generating an OCI runtime spec configuration files
-
-[`oci-runtime-tool generate`][generate.1] generates [configuration JSON][config.json] for an [OCI bundle][bundle].
-[OCI-compatible runtimes][runtime-spec] like [runC][] expect to read the configuration from `config.json`.
-
-```console
-$ oci-runtime-tool generate --output config.json
-$ cat config.json
-{
- "ociVersion": "0.5.0",
- …
-}
-```
-
-## Validating an OCI bundle
-
-[`oci-runtime-tool validate`][validate.1] validates an OCI bundle.
-The error message will be printed if the OCI bundle failed the validation procedure.
-
-```console
-$ oci-runtime-tool generate
-$ oci-runtime-tool validate
-INFO[0000] Bundle validation succeeded.
-```
-
-## Testing OCI runtimes
-
-The runtime validation suite uses [node-tap][], which is packaged for some distributions (for example, it is in [Debian's `node-tap` package][debian-node-tap]).
-If your distribution does not package node-tap, you can install [npm][] (for example, from [Gentoo's `nodejs` package][gentoo-nodejs]) and use it:
-
-```console
-$ npm install tap
-```
-
-Build the validation executables:
-
-```console
-$ make runtimetest validation-executables
-```
-
-Runtime validation currently [only supports](docs/runtime-compliance-testing.md) the [OCI Runtime Command Line Interface](docs/command-line-interface.md).
-If we add support for alternative APIs in the future, runtime validation will gain an option to select the desired runtime API.
-For the command line interface, the `RUNTIME` option selects the runtime command (`funC` in the [OCI Runtime Command Line Interface](docs/command-line-interface.md)).
-
-```
-$ sudo make RUNTIME=runc localvalidation
-RUNTIME=runc tap validation/pidfile.t validation/linux_cgroups_hugetlb.t validation/linux_cgroups_memory.t validation/linux_rootfs_propagation_shared.t validation/kill.t validation/create.t validation/poststart.t validation/linux_cgroups_network.t validation/poststop_fail.t validation/linux_readonly_paths.t validation/prestart_fail.t validation/hooks_stdin.t validation/default.t validation/linux_masked_paths.t validation/poststop.t validation/misc_props.t validation/prestart.t validation/poststart_fail.t validation/mounts.t validation/linux_cgroups_relative_pids.t validation/process_user.t validation/process.t validation/hooks.t validation/process_capabilities_fail.t validation/process_rlimits_fail.t validation/linux_cgroups_relative_cpus.t validation/process_rlimits.t validation/linux_cgroups_relative_blkio.t validation/linux_sysctl.t validation/linux_seccomp.t validation/linux_devices.t validation/start.t validation/linux_cgroups_pids.t validation/process_capabilities.t validation/process_oom_score_adj.t validation/linux_cgroups_relative_hugetlb.t validation/linux_cgroups_cpus.t validation/linux_cgroups_relative_memory.t validation/state.t validation/root_readonly_true.t validation/linux_cgroups_blkio.t validation/linux_rootfs_propagation_unbindable.t validation/delete.t validation/linux_cgroups_relative_network.t validation/hostname.t validation/killsig.t validation/linux_uid_mappings.t
-validation/pidfile.t .failed to create the container
-container_linux.go:348: starting container process caused "process_linux.go:402: container init caused \"process_linux.go:367: setting cgroup config for procHooks process caused \\\"failed to write 56892210544640 to hugetlb.1GB.limit_in_bytes: open /sys/fs/cgroup/hugetlb/cgrouptest/hugetlb.1GB.limit_in_bytes: permission denied\\\"\""
-exit status 1
-validation/pidfile.t .................................. 1/1 315ms
-validation/linux_cgroups_hugetlb.t .................... 0/1
- not ok validation/linux_cgroups_hugetlb.t
- timeout: 30000
- file: validation/linux_cgroups_hugetlb.t
- command: validation/linux_cgroups_hugetlb.t
- args: []
- stdio:
- - 0
- - pipe
- - 2
- cwd: /…/go/src/github.com/opencontainers/runtime-tools
- exitCode: 1
-
-validation/linux_cgroups_memory.t ..................... 9/9
-validation/linux_rootfs_propagation_shared.t ...... 252/282
- not ok shared root propagation exposes "/target348456609/mount892511628/example376408222"
-
- Skipped: 29
- /dev/null (default device) has unconfigured permissions
-…
-total ........................................... 4381/4962
-
-
- 4381 passing (1m)
- 567 pending
- 14 failing
-
-make: *** [Makefile:44: localvalidation] Error 1
-```
-
-You can also run an individual test executable directly:
-
-```console
-$ sudo RUNTIME=runc validation/default/default.t
-TAP version 13
-ok 1 - has expected hostname
- ---
- {
- "actual": "mrsdalloway",
- "expected": "mrsdalloway"
- }
- ...
-…
-ok 287 # SKIP linux.gidMappings not set
-1..287
-```
-
-If you cannot install node-tap, you can probably run the test suite with another [TAP consumer][tap-consumers].
-For example, with [`prove`][prove]:
-
-```console
-$ sudo make TAP='prove -Q -j9' RUNTIME=runc VALIDATION_TESTS=validation/pidfile/pidfile.t localvalidation
-RUNTIME=runc prove -Q -j9 validation/pidfile.t
-All tests successful.
-Files=1, Tests=1, 0 wallclock secs ( 0.01 usr 0.01 sys + 0.03 cusr 0.03 csys = 0.08 CPU)
-Result: PASS
-```
-
-[bundle]: https://github.com/opencontainers/runtime-spec/blob/master/bundle.md
-[config.json]: https://github.com/opencontainers/runtime-spec/blob/master/config.md
-[debian-node-tap]: https://packages.debian.org/stretch/node-tap
-[debian-nodejs]: https://packages.debian.org/stretch/nodejs
-[gentoo-nodejs]: https://packages.gentoo.org/packages/net-libs/nodejs
-[node-tap]: http://www.node-tap.org/
-[npm]: https://www.npmjs.com/
-[prove]: http://search.cpan.org/~leont/Test-Harness-3.39/bin/prove
-[runC]: https://github.com/opencontainers/runc
-[runtime-spec]: https://github.com/opencontainers/runtime-spec
-[tap-consumers]: https://testanything.org/consumers.html
-
-[generate.1]: man/oci-runtime-tool-generate.1.md
-[validate.1]: man/oci-runtime-tool-validate.1.md
diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/config.go b/vendor/github.com/opencontainers/runtime-tools/generate/config.go
index 164fdf141..f68bdde37 100644
--- a/vendor/github.com/opencontainers/runtime-tools/generate/config.go
+++ b/vendor/github.com/opencontainers/runtime-tools/generate/config.go
@@ -151,6 +151,13 @@ func (g *Generator) initConfigWindows() {
}
}
+func (g *Generator) initConfigWindowsNetwork() {
+ g.initConfigWindows()
+ if g.Config.Windows.Network == nil {
+ g.Config.Windows.Network = &rspec.WindowsNetwork{}
+ }
+}
+
func (g *Generator) initConfigWindowsHyperV() {
g.initConfigWindows()
if g.Config.Windows.HyperV == nil {
@@ -171,3 +178,31 @@ func (g *Generator) initConfigWindowsResourcesMemory() {
g.Config.Windows.Resources.Memory = &rspec.WindowsMemoryResources{}
}
}
+
+func (g *Generator) initConfigVM() {
+ g.initConfig()
+ if g.Config.VM == nil {
+ g.Config.VM = &rspec.VM{}
+ }
+}
+
+func (g *Generator) initConfigVMHypervisor() {
+ g.initConfigVM()
+ if &g.Config.VM.Hypervisor == nil {
+ g.Config.VM.Hypervisor = rspec.VMHypervisor{}
+ }
+}
+
+func (g *Generator) initConfigVMKernel() {
+ g.initConfigVM()
+ if &g.Config.VM.Kernel == nil {
+ g.Config.VM.Kernel = rspec.VMKernel{}
+ }
+}
+
+func (g *Generator) initConfigVMImage() {
+ g.initConfigVM()
+ if &g.Config.VM.Image == nil {
+ g.Config.VM.Image = rspec.VMImage{}
+ }
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/generate.go b/vendor/github.com/opencontainers/runtime-tools/generate/generate.go
index 1eb44770f..6d3268902 100644
--- a/vendor/github.com/opencontainers/runtime-tools/generate/generate.go
+++ b/vendor/github.com/opencontainers/runtime-tools/generate/generate.go
@@ -54,17 +54,8 @@ func New(os string) (generator Generator, err error) {
"cmd",
},
Cwd: `C:\`,
- ConsoleSize: &rspec.Box{
- Width: 80,
- Height: 20,
- },
- }
- config.Windows = &rspec.Windows{
- IgnoreFlushesDuringBoot: true,
- Network: &rspec.WindowsNetwork{
- AllowUnqualifiedDNSQuery: true,
- },
}
+ config.Windows = &rspec.Windows{}
} else {
config.Root = &rspec.Root{
Path: "rootfs",
@@ -368,6 +359,12 @@ func (g *Generator) SetHostname(s string) {
g.Config.Hostname = s
}
+// SetOCIVersion sets g.Config.Version.
+func (g *Generator) SetOCIVersion(s string) {
+ g.initConfig()
+ g.Config.Version = s
+}
+
// ClearAnnotations clears g.Config.Annotations.
func (g *Generator) ClearAnnotations() {
if g.Config == nil {
@@ -1074,6 +1071,69 @@ func (g *Generator) ClearProcessCapabilities() {
g.Config.Process.Capabilities.Ambient = []string{}
}
+// AddProcessCapability adds a process capability into all 5 capability sets.
+func (g *Generator) AddProcessCapability(c string) error {
+ cp := strings.ToUpper(c)
+ if err := validate.CapValid(cp, g.HostSpecific); err != nil {
+ return err
+ }
+
+ g.initConfigProcessCapabilities()
+
+ var foundAmbient, foundBounding, foundEffective, foundInheritable, foundPermitted bool
+ for _, cap := range g.Config.Process.Capabilities.Ambient {
+ if strings.ToUpper(cap) == cp {
+ foundAmbient = true
+ break
+ }
+ }
+ if !foundAmbient {
+ g.Config.Process.Capabilities.Ambient = append(g.Config.Process.Capabilities.Ambient, cp)
+ }
+
+ for _, cap := range g.Config.Process.Capabilities.Bounding {
+ if strings.ToUpper(cap) == cp {
+ foundBounding = true
+ break
+ }
+ }
+ if !foundBounding {
+ g.Config.Process.Capabilities.Bounding = append(g.Config.Process.Capabilities.Bounding, cp)
+ }
+
+ for _, cap := range g.Config.Process.Capabilities.Effective {
+ if strings.ToUpper(cap) == cp {
+ foundEffective = true
+ break
+ }
+ }
+ if !foundEffective {
+ g.Config.Process.Capabilities.Effective = append(g.Config.Process.Capabilities.Effective, cp)
+ }
+
+ for _, cap := range g.Config.Process.Capabilities.Inheritable {
+ if strings.ToUpper(cap) == cp {
+ foundInheritable = true
+ break
+ }
+ }
+ if !foundInheritable {
+ g.Config.Process.Capabilities.Inheritable = append(g.Config.Process.Capabilities.Inheritable, cp)
+ }
+
+ for _, cap := range g.Config.Process.Capabilities.Permitted {
+ if strings.ToUpper(cap) == cp {
+ foundPermitted = true
+ break
+ }
+ }
+ if !foundPermitted {
+ g.Config.Process.Capabilities.Permitted = append(g.Config.Process.Capabilities.Permitted, cp)
+ }
+
+ return nil
+}
+
// AddProcessCapabilityAmbient adds a process capability into g.Config.Process.Capabilities.Ambient.
func (g *Generator) AddProcessCapabilityAmbient(c string) error {
cp := strings.ToUpper(c)
@@ -1190,6 +1250,42 @@ func (g *Generator) AddProcessCapabilityPermitted(c string) error {
return nil
}
+// DropProcessCapability drops a process capability from all 5 capability sets.
+func (g *Generator) DropProcessCapability(c string) error {
+ if g.Config == nil || g.Config.Process == nil || g.Config.Process.Capabilities == nil {
+ return nil
+ }
+
+ cp := strings.ToUpper(c)
+ for i, cap := range g.Config.Process.Capabilities.Ambient {
+ if strings.ToUpper(cap) == cp {
+ g.Config.Process.Capabilities.Ambient = removeFunc(g.Config.Process.Capabilities.Ambient, i)
+ }
+ }
+ for i, cap := range g.Config.Process.Capabilities.Bounding {
+ if strings.ToUpper(cap) == cp {
+ g.Config.Process.Capabilities.Bounding = removeFunc(g.Config.Process.Capabilities.Bounding, i)
+ }
+ }
+ for i, cap := range g.Config.Process.Capabilities.Effective {
+ if strings.ToUpper(cap) == cp {
+ g.Config.Process.Capabilities.Effective = removeFunc(g.Config.Process.Capabilities.Effective, i)
+ }
+ }
+ for i, cap := range g.Config.Process.Capabilities.Inheritable {
+ if strings.ToUpper(cap) == cp {
+ g.Config.Process.Capabilities.Inheritable = removeFunc(g.Config.Process.Capabilities.Inheritable, i)
+ }
+ }
+ for i, cap := range g.Config.Process.Capabilities.Permitted {
+ if strings.ToUpper(cap) == cp {
+ g.Config.Process.Capabilities.Permitted = removeFunc(g.Config.Process.Capabilities.Permitted, i)
+ }
+ }
+
+ return validate.CapValid(cp, false)
+}
+
// DropProcessCapabilityAmbient drops a process capability from g.Config.Process.Capabilities.Ambient.
func (g *Generator) DropProcessCapabilityAmbient(c string) error {
if g.Config == nil || g.Config.Process == nil || g.Config.Process.Capabilities == nil {
@@ -1533,14 +1629,82 @@ func (g *Generator) SetSolarisMilestone(milestone string) {
g.Config.Solaris.Milestone = milestone
}
+// SetVMHypervisorPath sets g.Config.VM.Hypervisor.Path
+func (g *Generator) SetVMHypervisorPath(path string) error {
+ if !strings.HasPrefix(path, "/") {
+ return fmt.Errorf("hypervisorPath %v is not an absolute path", path)
+ }
+ g.initConfigVMHypervisor()
+ g.Config.VM.Hypervisor.Path = path
+ return nil
+}
+
+// SetVMHypervisorParameters sets g.Config.VM.Hypervisor.Parameters
+func (g *Generator) SetVMHypervisorParameters(parameters []string) {
+ g.initConfigVMHypervisor()
+ g.Config.VM.Hypervisor.Parameters = parameters
+}
+
+// SetVMKernelPath sets g.Config.VM.Kernel.Path
+func (g *Generator) SetVMKernelPath(path string) error {
+ if !strings.HasPrefix(path, "/") {
+ return fmt.Errorf("kernelPath %v is not an absolute path", path)
+ }
+ g.initConfigVMKernel()
+ g.Config.VM.Kernel.Path = path
+ return nil
+}
+
+// SetVMKernelParameters sets g.Config.VM.Kernel.Parameters
+func (g *Generator) SetVMKernelParameters(parameters []string) {
+ g.initConfigVMKernel()
+ g.Config.VM.Kernel.Parameters = parameters
+}
+
+// SetVMKernelInitRD sets g.Config.VM.Kernel.InitRD
+func (g *Generator) SetVMKernelInitRD(initrd string) error {
+ if !strings.HasPrefix(initrd, "/") {
+ return fmt.Errorf("kernelInitrd %v is not an absolute path", initrd)
+ }
+ g.initConfigVMKernel()
+ g.Config.VM.Kernel.InitRD = initrd
+ return nil
+}
+
+// SetVMImagePath sets g.Config.VM.Image.Path
+func (g *Generator) SetVMImagePath(path string) error {
+ if !strings.HasPrefix(path, "/") {
+ return fmt.Errorf("imagePath %v is not an absolute path", path)
+ }
+ g.initConfigVMImage()
+ g.Config.VM.Image.Path = path
+ return nil
+}
+
+// SetVMImageFormat sets g.Config.VM.Image.Format
+func (g *Generator) SetVMImageFormat(format string) error {
+ switch format {
+ case "raw":
+ case "qcow2":
+ case "vdi":
+ case "vmdk":
+ case "vhd":
+ default:
+ return fmt.Errorf("Commonly supported formats are: raw, qcow2, vdi, vmdk, vhd")
+ }
+ g.initConfigVMImage()
+ g.Config.VM.Image.Format = format
+ return nil
+}
+
// SetWindowsHypervUntilityVMPath sets g.Config.Windows.HyperV.UtilityVMPath.
func (g *Generator) SetWindowsHypervUntilityVMPath(path string) {
g.initConfigWindowsHyperV()
g.Config.Windows.HyperV.UtilityVMPath = path
}
-// SetWinodwsIgnoreFlushesDuringBoot sets g.Config.Winodws.IgnoreFlushesDuringBoot.
-func (g *Generator) SetWinodwsIgnoreFlushesDuringBoot(ignore bool) {
+// SetWindowsIgnoreFlushesDuringBoot sets g.Config.Windows.IgnoreFlushesDuringBoot.
+func (g *Generator) SetWindowsIgnoreFlushesDuringBoot(ignore bool) {
g.initConfigWindows()
g.Config.Windows.IgnoreFlushesDuringBoot = ignore
}
@@ -1551,12 +1715,45 @@ func (g *Generator) AddWindowsLayerFolders(folder string) {
g.Config.Windows.LayerFolders = append(g.Config.Windows.LayerFolders, folder)
}
+// AddWindowsDevices adds or sets g.Config.Windwos.Devices
+func (g *Generator) AddWindowsDevices(id, idType string) error {
+ if idType != "class" {
+ return fmt.Errorf("Invalid idType value: %s. Windows only supports a value of class", idType)
+ }
+ device := rspec.WindowsDevice{
+ ID: id,
+ IDType: idType,
+ }
+
+ g.initConfigWindows()
+ for i, device := range g.Config.Windows.Devices {
+ if device.ID == id {
+ g.Config.Windows.Devices[i].IDType = idType
+ return nil
+ }
+ }
+ g.Config.Windows.Devices = append(g.Config.Windows.Devices, device)
+ return nil
+}
+
// SetWindowsNetwork sets g.Config.Windows.Network.
func (g *Generator) SetWindowsNetwork(network rspec.WindowsNetwork) {
g.initConfigWindows()
g.Config.Windows.Network = &network
}
+// SetWindowsNetworkAllowUnqualifiedDNSQuery sets g.Config.Windows.Network.AllowUnqualifiedDNSQuery
+func (g *Generator) SetWindowsNetworkAllowUnqualifiedDNSQuery(setting bool) {
+ g.initConfigWindowsNetwork()
+ g.Config.Windows.Network.AllowUnqualifiedDNSQuery = setting
+}
+
+// SetWindowsNetworkNamespace sets g.Config.Windows.Network.NetworkNamespace
+func (g *Generator) SetWindowsNetworkNamespace(path string) {
+ g.initConfigWindowsNetwork()
+ g.Config.Windows.Network.NetworkNamespace = path
+}
+
// SetWindowsResourcesCPU sets g.Config.Windows.Resources.CPU.
func (g *Generator) SetWindowsResourcesCPU(cpu rspec.WindowsCPUResources) {
g.initConfigWindowsResources()
@@ -1575,8 +1772,8 @@ func (g *Generator) SetWindowsResourcesStorage(storage rspec.WindowsStorageResou
g.Config.Windows.Resources.Storage = &storage
}
-// SetWinodwsServicing sets g.Config.Winodws.Servicing.
-func (g *Generator) SetWinodwsServicing(servicing bool) {
+// SetWindowsServicing sets g.Config.Windows.Servicing.
+func (g *Generator) SetWindowsServicing(servicing bool) {
g.initConfigWindows()
g.Config.Windows.Servicing = servicing
}
diff --git a/vendor/github.com/opencontainers/runtime-tools/validate/validate_linux.go b/vendor/github.com/opencontainers/runtime-tools/validate/validate_linux.go
index 8d452c209..dcefafae7 100644
--- a/vendor/github.com/opencontainers/runtime-tools/validate/validate_linux.go
+++ b/vendor/github.com/opencontainers/runtime-tools/validate/validate_linux.go
@@ -16,6 +16,7 @@ import (
rspec "github.com/opencontainers/runtime-spec/specs-go"
osFilepath "github.com/opencontainers/runtime-tools/filepath"
"github.com/opencontainers/runtime-tools/specerror"
+ "github.com/opencontainers/selinux/go-selinux/label"
"github.com/sirupsen/logrus"
)
@@ -226,5 +227,11 @@ func (v *Validator) CheckLinux() (errs error) {
}
}
+ if v.spec.Linux.MountLabel != "" {
+ if err := label.Validate(v.spec.Linux.MountLabel); err != nil {
+ errs = multierror.Append(errs, fmt.Errorf("mountLabel %v is invalid", v.spec.Linux.MountLabel))
+ }
+ }
+
return
}
diff --git a/vendor/github.com/opencontainers/selinux/README.md b/vendor/github.com/opencontainers/selinux/README.md
deleted file mode 100644
index 043a92937..000000000
--- a/vendor/github.com/opencontainers/selinux/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# selinux
-
-[![GoDoc](https://godoc.org/github.com/opencontainers/selinux?status.svg)](https://godoc.org/github.com/opencontainers/selinux) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/selinux)](https://goreportcard.com/report/github.com/opencontainers/selinux) [![Build Status](https://travis-ci.org/opencontainers/selinux.svg?branch=master)](https://travis-ci.org/opencontainers/selinux)
-
-Common SELinux package used across the container ecosystem.
-
-Please see the [godoc](https://godoc.org/github.com/opencontainers/selinux) for more information.
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
index 4e9a8c54f..e178568fd 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
@@ -37,6 +37,14 @@ func SocketLabel() (string, error) {
return "", nil
}
+func SetKeyLabel(processLabel string) error {
+ return nil
+}
+
+func KeyLabel() (string, error) {
+ return "", nil
+}
+
func FileLabel(path string) (string, error) {
return "", nil
}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
index d4e26909d..1eb9a6bf2 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
@@ -115,6 +115,17 @@ func SocketLabel() (string, error) {
return selinux.SocketLabel()
}
+// SetKeyLabel takes a process label and tells the kernel to assign the
+// label to the next kernel keyring that gets created
+func SetKeyLabel(processLabel string) error {
+ return selinux.SetKeyLabel(processLabel)
+}
+
+// KeyLabel retrieves the current default kernel keyring label setting
+func KeyLabel() (string, error) {
+ return selinux.KeyLabel()
+}
+
// ProcessLabel returns the process label that the kernel will assign
// to the next program executed by the current process. If "" is returned
// this indicates that the default labeling will happen for the process.
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
index 5adafd317..d7786c33c 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
@@ -333,6 +333,11 @@ func writeCon(fpath string, val string) error {
if fpath == "" {
return ErrEmptyPath
}
+ if val == "" {
+ if !GetEnabled() {
+ return nil
+ }
+ }
out, err := os.OpenFile(fpath, os.O_WRONLY, 0)
if err != nil {
@@ -398,6 +403,24 @@ func SocketLabel() (string, error) {
return readCon(fmt.Sprintf("/proc/self/task/%d/attr/sockcreate", syscall.Gettid()))
}
+// SetKeyLabel takes a process label and tells the kernel to assign the
+// label to the next kernel keyring that gets created
+func SetKeyLabel(label string) error {
+ err := writeCon("/proc/self/attr/keycreate", label)
+ if os.IsNotExist(err) {
+ return nil
+ }
+ if label == "" && os.IsPermission(err) && !GetEnabled() {
+ return nil
+ }
+ return err
+}
+
+// KeyLabel retrieves the current kernel keyring label setting
+func KeyLabel() (string, error) {
+ return readCon("/proc/self/attr/keycreate")
+}
+
// Get returns the Context as a string
func (c Context) Get() string {
if c["level"] != "" {
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
index 9497acbd0..79b005d19 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
@@ -109,6 +109,17 @@ func SocketLabel() (string, error) {
return "", nil
}
+// SetKeyLabel takes a process label and tells the kernel to assign the
+// label to the next kernel keyring that gets created
+func SetKeyLabel(label string) error {
+ return nil
+}
+
+// KeyLabel retrieves the current kernel keyring label setting
+func KeyLabel() (string, error) {
+ return "", nil
+}
+
// Get returns the Context as a string
func (c Context) Get() string {
return ""
diff --git a/vendor/github.com/openshift/imagebuilder/.gitignore b/vendor/github.com/openshift/imagebuilder/.gitignore
new file mode 100644
index 000000000..56e98ad8a
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/.gitignore
@@ -0,0 +1 @@
+/imagebuilder
diff --git a/vendor/github.com/openshift/imagebuilder/.travis.yml b/vendor/github.com/openshift/imagebuilder/.travis.yml
new file mode 100644
index 000000000..97b530b4f
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/.travis.yml
@@ -0,0 +1,16 @@
+language: go
+
+go:
+ - "1.9"
+ - "1.10"
+
+install:
+
+script:
+ - make build
+ - make test
+
+notifications:
+ irc: "chat.freenode.net#openshift-dev"
+
+sudo: false
diff --git a/vendor/github.com/openshift/imagebuilder/Makefile b/vendor/github.com/openshift/imagebuilder/Makefile
new file mode 100644
index 000000000..1cbb26eed
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/Makefile
@@ -0,0 +1,11 @@
+build:
+ go build ./cmd/imagebuilder
+.PHONY: build
+
+test:
+ go test $(go list ./... | grep -v /vendor/)
+.PHONY: test
+
+test-conformance:
+ go test -v -tags conformance -timeout 10m ./dockerclient
+.PHONY: test-conformance
diff --git a/vendor/github.com/openshift/imagebuilder/OWNERS b/vendor/github.com/openshift/imagebuilder/OWNERS
new file mode 100644
index 000000000..6a900fd07
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/OWNERS
@@ -0,0 +1,8 @@
+reviewers:
+- bparees
+approvers:
+- TomSweeneyRedHat
+- mrunalp
+- nalind
+- rhatdan
+- smarterclayton
diff --git a/vendor/github.com/openshift/imagebuilder/imagebuilder.spec b/vendor/github.com/openshift/imagebuilder/imagebuilder.spec
new file mode 100644
index 000000000..07c880166
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/imagebuilder.spec
@@ -0,0 +1,63 @@
+
+#debuginfo not supported with Go
+%global debug_package %{nil}
+
+# modifying the Go binaries breaks the DWARF debugging
+%global __os_install_post %{_rpmconfigdir}/brp-compress
+
+%{!?commit: %global commit HEAD }
+
+#
+# Customize from here.
+#
+
+%global golang_version 1.8.1
+%{!?version: %global version 1.1-dev}
+%{!?release: %global release 1}
+%global package_name imagebuilder
+%global product_name Container Image Builder
+%global import_path github.com/openshift/imagebuilder
+
+Name: %{package_name}
+Version: %{version}
+Release: %{release}%{?dist}
+Summary: Builds Dockerfile using the Docker client
+License: ASL 2.0
+URL: https://%{import_path}
+
+Source0: https://%{import_path}/archive/%{commit}/%{name}-%{version}.tar.gz
+BuildRequires: golang >= %{golang_version}
+
+### AUTO-BUNDLED-GEN-ENTRY-POINT
+
+%description
+Builds Dockerfile using the Docker client
+
+%prep
+GOPATH=$RPM_BUILD_DIR/go
+rm -rf $GOPATH
+mkdir -p $GOPATH/{src/github.com/openshift,bin,pkg}
+%setup -q -c -n imagebuilder
+cd ..
+mv imagebuilder $GOPATH/src/github.com/openshift/imagebuilder
+ln -s $GOPATH/src/github.com/openshift/imagebuilder imagebuilder
+
+%build
+export GOPATH=$RPM_BUILD_DIR/go
+cd $GOPATH/src/github.com/openshift/imagebuilder
+go install ./cmd/imagebuilder
+
+%install
+
+install -d %{buildroot}%{_bindir}
+install -p -m 755 $RPM_BUILD_DIR/go/bin/imagebuilder %{buildroot}%{_bindir}/imagebuilder
+
+%files
+%doc README.md
+%license LICENSE
+%{_bindir}/imagebuilder
+
+%pre
+
+%changelog
+
diff --git a/vendor/github.com/opentracing/opentracing-go/.gitignore b/vendor/github.com/opentracing/opentracing-go/.gitignore
new file mode 100644
index 000000000..c57100a59
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/.gitignore
@@ -0,0 +1 @@
+coverage.txt
diff --git a/vendor/github.com/opentracing/opentracing-go/.travis.yml b/vendor/github.com/opentracing/opentracing-go/.travis.yml
new file mode 100644
index 000000000..8d5b75e41
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/.travis.yml
@@ -0,0 +1,20 @@
+language: go
+
+matrix:
+ include:
+ - go: "1.11.x"
+ - go: "1.12.x"
+ - go: "tip"
+ env:
+ - LINT=true
+ - COVERAGE=true
+
+install:
+ - if [ "$LINT" == true ]; then go get -u golang.org/x/lint/golint/... ; else echo 'skipping lint'; fi
+ - go get -u github.com/stretchr/testify/...
+
+script:
+ - make test
+ - go build ./...
+ - if [ "$LINT" == true ]; then make lint ; else echo 'skipping lint'; fi
+ - if [ "$COVERAGE" == true ]; then make cover && bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi
diff --git a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
new file mode 100644
index 000000000..7c14febe1
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
@@ -0,0 +1,46 @@
+Changes by Version
+==================
+
+1.1.0 (2019-03-23)
+-------------------
+
+Notable changes:
+- The library is now released under Apache 2.0 license
+- Use Set() instead of Add() in HTTPHeadersCarrier is functionally a breaking change (fixes issue [#159](https://github.com/opentracing/opentracing-go/issues/159))
+- 'golang.org/x/net/context' is replaced with 'context' from the standard library
+
+List of all changes:
+
+- Export StartSpanFromContextWithTracer (#214) <Aaron Delaney>
+- Add IsGlobalTracerRegistered() to indicate if a tracer has been registered (#201) <Mike Goldsmith>
+- Use Set() instead of Add() in HTTPHeadersCarrier (#191) <jeremyxu2010>
+- Update license to Apache 2.0 (#181) <Andrea Kao>
+- Replace 'golang.org/x/net/context' with 'context' (#176) <Tony Ghita>
+- Port of Python opentracing/harness/api_check.py to Go (#146) <chris erway>
+- Fix race condition in MockSpan.Context() (#170) <Brad>
+- Add PeerHostIPv4.SetString() (#155) <NeoCN>
+- Add a Noop log field type to log to allow for optional fields (#150) <Matt Ho>
+
+
+1.0.2 (2017-04-26)
+-------------------
+
+- Add more semantic tags (#139) <Rustam Zagirov>
+
+
+1.0.1 (2017-02-06)
+-------------------
+
+- Correct spelling in comments <Ben Sigelman>
+- Address race in nextMockID() (#123) <bill fumerola>
+- log: avoid panic marshaling nil error (#131) <Anthony Voutas>
+- Deprecate InitGlobalTracer in favor of SetGlobalTracer (#128) <Yuri Shkuro>
+- Drop Go 1.5 that fails in Travis (#129) <Yuri Shkuro>
+- Add convenience methods Key() and Value() to log.Field <Ben Sigelman>
+- Add convenience methods to log.Field (2 years, 6 months ago) <Radu Berinde>
+
+1.0.0 (2016-09-26)
+-------------------
+
+- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec)
+
diff --git a/vendor/github.com/opentracing/opentracing-go/Makefile b/vendor/github.com/opentracing/opentracing-go/Makefile
new file mode 100644
index 000000000..62abb63f5
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/Makefile
@@ -0,0 +1,20 @@
+.DEFAULT_GOAL := test-and-lint
+
+.PHONY: test-and-lint
+test-and-lint: test lint
+
+.PHONY: test
+test:
+ go test -v -cover -race ./...
+
+.PHONY: cover
+cover:
+ go test -v -coverprofile=coverage.txt -covermode=atomic -race ./...
+
+.PHONY: lint
+lint:
+ go fmt ./...
+ golint ./...
+ @# Run again with magic to exit non-zero if golint outputs anything.
+ @! (golint ./... | read dummy)
+ go vet ./...
diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go
index 05a62e70b..08c00c04e 100644
--- a/vendor/github.com/opentracing/opentracing-go/gocontext.go
+++ b/vendor/github.com/opentracing/opentracing-go/gocontext.go
@@ -41,11 +41,17 @@ func SpanFromContext(ctx context.Context) Span {
// ...
// }
func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) {
- return startSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...)
+ return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...)
}
-// startSpanFromContextWithTracer is factored out for testing purposes.
-func startSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) {
+// StartSpanFromContextWithTracer starts and returns a span with `operationName`
+// using a span found within the context as a ChildOfRef. If that doesn't exist
+// it creates a root span. It also returns a context.Context object built
+// around the returned span.
+//
+// It's behavior is identical to StartSpanFromContext except that it takes an explicit
+// tracer as opposed to using the global tracer.
+func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) {
if parentSpan := SpanFromContext(ctx); parentSpan != nil {
opts = append(opts, ChildOf(parentSpan.Context()))
}
diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go
index 7bca1f736..715f0cedf 100644
--- a/vendor/github.com/opentracing/opentracing-go/tracer.go
+++ b/vendor/github.com/opentracing/opentracing-go/tracer.go
@@ -44,8 +44,7 @@ type Tracer interface {
// and each has an expected carrier type.
//
// Other packages may declare their own `format` values, much like the keys
- // used by `context.Context` (see
- // https://godoc.org/golang.org/x/net/context#WithValue).
+ // used by `context.Context` (see https://godoc.org/context#WithValue).
//
// Example usage (sans error handling):
//
diff --git a/vendor/github.com/ostreedev/ostree-go/README.md b/vendor/github.com/ostreedev/ostree-go/README.md
deleted file mode 100644
index c79010a0f..000000000
--- a/vendor/github.com/ostreedev/ostree-go/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-OSTree-Go
-=========
-
-Go bindings for OSTree. Find out more about OSTree [here](https://github.com/ostreedev/ostree)
diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore
new file mode 100644
index 000000000..daf913b1b
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml
new file mode 100644
index 000000000..d4b92663b
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+go_import_path: github.com/pkg/errors
+go:
+ - 1.4.x
+ - 1.5.x
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+ - tip
+
+script:
+ - go test -v ./...
diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml
new file mode 100644
index 000000000..a932eade0
--- /dev/null
+++ b/vendor/github.com/pkg/errors/appveyor.yml
@@ -0,0 +1,32 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\pkg\errors
+shallow_clone: true # for startup speed
+
+environment:
+ GOPATH: C:\gopath
+
+platform:
+ - x64
+
+# http://www.appveyor.com/docs/installed-software
+install:
+ # some helpful output for debugging builds
+ - go version
+ - go env
+ # pre-installed MinGW at C:\MinGW is 32bit only
+ # but MSYS2 at C:\msys64 has mingw64
+ - set PATH=C:\msys64\mingw64\bin;%PATH%
+ - gcc --version
+ - g++ --version
+
+build_script:
+ - go install -v ./...
+
+test_script:
+ - set PATH=C:\gopath\bin;%PATH%
+ - go test -v ./...
+
+#artifacts:
+# - path: '%GOPATH%\bin\*.exe'
+deploy: off
diff --git a/vendor/github.com/pkg/profile/.travis.yml b/vendor/github.com/pkg/profile/.travis.yml
new file mode 100644
index 000000000..1c9e6bb6b
--- /dev/null
+++ b/vendor/github.com/pkg/profile/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+go_import_path: github.com/pkg/profile
+go:
+ - 1.10.x
+ - 1.12.x
+ - tip
+
+script:
+ - go test github.com/pkg/profile
+ - go test -race github.com/pkg/profile
diff --git a/vendor/github.com/pkg/profile/AUTHORS b/vendor/github.com/pkg/profile/AUTHORS
new file mode 100644
index 000000000..00441d354
--- /dev/null
+++ b/vendor/github.com/pkg/profile/AUTHORS
@@ -0,0 +1 @@
+Dave Cheney <dave@cheney.net>
diff --git a/vendor/github.com/pkg/profile/profile.go b/vendor/github.com/pkg/profile/profile.go
index c44913a4c..20e285427 100644
--- a/vendor/github.com/pkg/profile/profile.go
+++ b/vendor/github.com/pkg/profile/profile.go
@@ -19,6 +19,7 @@ const (
mutexMode
blockMode
traceMode
+ threadCreateMode
)
// Profile represents an active profiling session.
@@ -83,17 +84,20 @@ func MemProfileRate(rate int) func(*Profile) {
// MutexProfile enables mutex profiling.
// It disables any previous profiling settings.
-//
-// Mutex profiling is a no-op before go1.8.
func MutexProfile(p *Profile) { p.mode = mutexMode }
// BlockProfile enables block (contention) profiling.
// It disables any previous profiling settings.
func BlockProfile(p *Profile) { p.mode = blockMode }
-// Trace profile controls if execution tracing will be enabled. It disables any previous profiling settings.
+// Trace profile enables execution tracing.
+// It disables any previous profiling settings.
func TraceProfile(p *Profile) { p.mode = traceMode }
+// ThreadcreationProfile enables thread creation profiling..
+// It disables any previous profiling settings.
+func ThreadcreationProfile(p *Profile) { p.mode = threadCreateMode }
+
// ProfilePath controls the base path where various profiling
// files are written. If blank, the base path will be generated
// by ioutil.TempDir.
@@ -211,6 +215,21 @@ func Start(options ...func(*Profile)) interface {
logf("profile: block profiling disabled, %s", fn)
}
+ case threadCreateMode:
+ fn := filepath.Join(path, "threadcreation.pprof")
+ f, err := os.Create(fn)
+ if err != nil {
+ log.Fatalf("profile: could not create thread creation profile %q: %v", fn, err)
+ }
+ logf("profile: thread creation profiling enabled, %s", fn)
+ prof.closer = func() {
+ if mp := pprof.Lookup("threadcreate"); mp != nil {
+ mp.WriteTo(f, 0)
+ }
+ f.Close()
+ logf("profile: thread creation profiling disabled, %s", fn)
+ }
+
case traceMode:
fn := filepath.Join(path, "trace.out")
f, err := os.Create(fn)
diff --git a/vendor/github.com/pmezard/go-difflib/README.md b/vendor/github.com/pmezard/go-difflib/README.md
deleted file mode 100644
index e87f307ed..000000000
--- a/vendor/github.com/pmezard/go-difflib/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-go-difflib
-==========
-
-[![Build Status](https://travis-ci.org/pmezard/go-difflib.png?branch=master)](https://travis-ci.org/pmezard/go-difflib)
-[![GoDoc](https://godoc.org/github.com/pmezard/go-difflib/difflib?status.svg)](https://godoc.org/github.com/pmezard/go-difflib/difflib)
-
-Go-difflib is a partial port of python 3 difflib package. Its main goal
-was to make unified and context diff available in pure Go, mostly for
-testing purposes.
-
-The following class and functions (and related tests) have be ported:
-
-* `SequenceMatcher`
-* `unified_diff()`
-* `context_diff()`
-
-## Installation
-
-```bash
-$ go get github.com/pmezard/go-difflib/difflib
-```
-
-### Quick Start
-
-Diffs are configured with Unified (or ContextDiff) structures, and can
-be output to an io.Writer or returned as a string.
-
-```Go
-diff := UnifiedDiff{
- A: difflib.SplitLines("foo\nbar\n"),
- B: difflib.SplitLines("foo\nbaz\n"),
- FromFile: "Original",
- ToFile: "Current",
- Context: 3,
-}
-text, _ := GetUnifiedDiffString(diff)
-fmt.Printf(text)
-```
-
-would output:
-
-```
---- Original
-+++ Current
-@@ -1,3 +1,3 @@
- foo
--bar
-+baz
-```
-
diff --git a/vendor/github.com/pquerna/ffjson/README.md b/vendor/github.com/pquerna/ffjson/README.md
deleted file mode 100644
index 30b239f10..000000000
--- a/vendor/github.com/pquerna/ffjson/README.md
+++ /dev/null
@@ -1,232 +0,0 @@
-# ffjson: faster JSON for Go
-
-[![Build Status](https://travis-ci.org/pquerna/ffjson.svg?branch=master)](https://travis-ci.org/pquerna/ffjson)
-
-`ffjson` generates static `MarshalJSON` and `UnmarshalJSON` functions for structures in Go. The generated functions reduce the reliance upon runtime reflection to do serialization and are generally 2 to 3 times faster. In cases where `ffjson` doesn't understand a Type involved, it falls back to `encoding/json`, meaning it is a safe drop in replacement. By using `ffjson` your JSON serialization just gets faster with no additional code changes.
-
-When you change your `struct`, you will need to run `ffjson` again (or make it part of your build tools).
-
-## Blog Posts
-
-* 2014-03-31: [First Release and Background](https://journal.paul.querna.org/articles/2014/03/31/ffjson-faster-json-in-go/)
-
-## Getting Started
-
-If `myfile.go` contains the `struct` types you would like to be faster, and assuming `GOPATH` is set to a reasonable value for an existing project (meaning that in this particular example if `myfile.go` is in the `myproject` directory, the project should be under `$GOPATH/src/myproject`), you can just run:
-
- go get -u github.com/pquerna/ffjson
- ffjson myfile.go
- git add myfile_ffjson.go
-
-
-## Performance Status:
-
-* `MarshalJSON` is **2x to 3x** faster than `encoding/json`.
-* `UnmarshalJSON` is **2x to 3x** faster than `encoding/json`.
-
-## Features
-
-* **Unmarshal Support:** Since v0.9, `ffjson` supports Unmarshaling of structures.
-* **Drop in Replacement:** Because `ffjson` implements the interfaces already defined by `encoding/json` the performance enhancements are transparent to users of your structures.
-* **Supports all types:** `ffjson` has native support for most of Go's types -- for any type it doesn't support with fast paths, it falls back to using `encoding/json`. This means all structures should work out of the box. If they don't, [open a issue!](https://github.com/pquerna/ffjson/issues)
-* **ffjson: skip**: If you have a structure you want `ffjson` to ignore, add `ffjson: skip` to the doc string for this structure.
-* **Extensive Tests:** `ffjson` contains an extensive test suite including fuzz'ing against the JSON parser.
-
-
-# Using ffjson
-
-`ffjson` generates code based upon existing `struct` types. For example, `ffjson foo.go` will by default create a new file `foo_ffjson.go` that contains serialization functions for all structs found in `foo.go`.
-
-```
-Usage of ffjson:
-
- ffjson [options] [input_file]
-
-ffjson generates Go code for optimized JSON serialization.
-
- -go-cmd="": Path to go command; Useful for `goapp` support.
- -import-name="": Override import name in case it cannot be detected.
- -nodecoder: Do not generate decoder functions
- -noencoder: Do not generate encoder functions
- -w="": Write generate code to this path instead of ${input}_ffjson.go.
-```
-
-Your code must be in a compilable state for `ffjson` to work. If you code doesn't compile ffjson will most likely exit with an error.
-
-## Disabling code generation for structs
-
-You might not want all your structs to have JSON code generated. To completely disable generation for a struct, add `ffjson: skip` to the struct comment. For example:
-
-```Go
-// ffjson: skip
-type Foo struct {
- Bar string
-}
-```
-
-You can also choose not to have either the decoder or encoder generated by including `ffjson: nodecoder` or `ffjson: noencoder` in your comment. For instance, this will only generate the encoder (marshal) part for this struct:
-
-```Go
-// ffjson: nodecoder
-type Foo struct {
- Bar string
-}
-```
-
-You can also disable encoders/decoders entirely for a file by using the `-noencoder`/`-nodecoder` commandline flags.
-
-## Using ffjson with `go generate`
-
-`ffjson` is a great fit with `go generate`. It allows you to specify the ffjson command inside your individual go files and run them all at once. This way you don't have to maintain a separate build file with the files you need to generate.
-
-Add this comment anywhere inside your go files:
-
-```Go
-//go:generate ffjson $GOFILE
-```
-
-To re-generate ffjson for all files with the tag in a folder, simply execute:
-
-```sh
-go generate
-```
-
-To generate for the current package and all sub-packages, use:
-
-```sh
-go generate ./...
-```
-This is most of what you need to know about go generate, but you can sese more about [go generate on the golang blog](http://blog.golang.org/generate).
-
-## Should I include ffjson files in VCS?
-
-That question is really up to you. If you don't, you will have a more complex build process. If you do, you have to keep the generated files updated if you change the content of your structs.
-
-That said, ffjson operates deterministically, so it will generate the same code every time it run, so unless your code changes, the generated content should not change. Note however that this is only true if you are using the same ffjson version, so if you have several people working on a project, you might need to synchronize your ffjson version.
-
-## Performance pitfalls
-
-`ffjson` has a few cases where it will fall back to using the runtime encoder/decoder. Notable cases are:
-
-* Interface struct members. Since it isn't possible to know the type of these types before runtime, ffjson has to use the reflect based coder.
-* Structs with custom marshal/unmarshal.
-* Map with a complex value. Simple types like `map[string]int` is fine though.
-* Inline struct definitions `type A struct{B struct{ X int} }` are handled by the encoder, but currently has fallback in the decoder.
-* Slices of slices / slices of maps are currently falling back when generating the decoder.
-
-## Reducing Garbage Collection
-
-`ffjson` already does a lot to help garbage generation. However whenever you go through the json.Marshal you get a new byte slice back. On very high throughput servers this can lead to increased GC pressure.
-
-### Tip 1: Use ffjson.Marshal() / ffjson.Unmarshal()
-
-This is probably the easiest optimization for you. Instead of going through encoding/json, you can call ffjson. This will disable the checks that encoding/json does to the json when it receives it from struct functions.
-
-```Go
- import "github.com/pquerna/ffjson/ffjson"
-
- // BEFORE:
- buf, err := json.Marshal(&item)
-
- // AFTER:
- buf, err := ffjson.Marshal(&item)
-```
-This simple change is likely to double the speed of your encoding/decoding.
-
-
-[![GoDoc][1]][2]
-[1]: https://godoc.org/github.com/pquerna/ffjson/ffjson?status.svg
-[2]: https://godoc.org/github.com/pquerna/ffjson/ffjson#Marshal
-
-### Tip 2: Pooling the buffer
-
-On servers where you have a lot of concurrent encoding going on, you can hand back the byte buffer you get from json.Marshal once you are done using it. An example could look like this:
-```Go
-import "github.com/pquerna/ffjson/ffjson"
-
-func Encode(item interface{}, out io.Writer) {
- // Encode
- buf, err := ffjson.Marshal(&item)
-
- // Write the buffer
- _,_ = out.Write(buf)
-
- // We are now no longer need the buffer so we pool it.
- ffjson.Pool(buf)
-}
-```
-Note that the buffers you put back in the pool can still be reclaimed by the garbage collector, so you wont risk your program building up a big memory use by pooling the buffers.
-
-[![GoDoc][1]][2]
-[1]: https://godoc.org/github.com/pquerna/ffjson/ffjson?status.svg
-[2]: https://godoc.org/github.com/pquerna/ffjson/ffjson#Pool
-
-### Tip 3: Creating an Encoder
-
-There might be cases where you need to encode many objects at once. This could be a server backing up, writing a lot of entries to files, etc.
-
-To do this, there is an interface similar to `encoding/json`, that allow you to create a re-usable encoder. Here is an example where we want to encode an array of the `Item` type, with a comma between entries:
-```Go
-import "github.com/pquerna/ffjson/ffjson"
-
-func EncodeItems(items []Item, out io.Writer) {
- // We create an encoder.
- enc := ffjson.NewEncoder(out)
-
- for i, item := range items {
- // Encode into the buffer
- err := enc.Encode(&item)
-
- // If err is nil, the content is written to out, so we can write to it as well.
- if i != len(items) -1 {
- _,_ = out.Write([]byte{','})
- }
- }
-}
-```
-
-
-Documentation: [![GoDoc][1]][2]
-[1]: https://godoc.org/github.com/pquerna/ffjson/ffjson?status.svg
-[2]: https://godoc.org/github.com/pquerna/ffjson/ffjson#Encoder
-
-## Tip 4: Avoid interfaces
-
-We don't want to dictate how you structure your data, but having interfaces in your code will make ffjson use the golang encoder for these. When ffjson has to do this, it may even become slower than using `json.Marshal` directly.
-
-To see where that happens, search the generated `_ffjson.go` file for the text `Falling back`, which will indicate where ffjson is unable to generate code for your data structure.
-
-## Tip 5: `ffjson` all the things!
-
-You should not only create ffjson code for your main struct, but also any structs that is included/used in your json code.
-
-So if your struct looks like this:
-```Go
-type Foo struct {
- V Bar
-}
-```
-You should also make sure that code is generated for `Bar` if it is placed in another file. Also note that currently it requires you to do this in order, since generating code for `Foo` will check if code for `Bar` exists. This is only an issue if `Foo` and `Bar` are placed in different files. We are currently working on allowing simultaneous generation of an entire package.
-
-
-## Improvements, bugs, adding features, and taking ffjson new directions!
-
-Please [open issues in Github](https://github.com/pquerna/ffjson/issues) for ideas, bugs, and general thoughts. Pull requests are of course preferred :)
-
-## Similar projects
-
-* [go-codec](https://github.com/ugorji/go/tree/master/codec#readme). Very good project, that also allows streaming en/decoding, but requires you to call the library to use.
-* [megajson](https://github.com/benbjohnson/megajson). This has limited support, and development seems to have almost stopped at the time of writing.
-
-# Credits
-
-`ffjson` has recieved significant contributions from:
-
-* [Klaus Post](https://github.com/klauspost)
-* [Paul Querna](https://github.com/pquerna)
-* [Erik Dubbelboer](https://github.com/erikdubbelboer)
-
-## License
-
-`ffjson` is licensed under the [Apache License, Version 2.0](./LICENSE)
-
diff --git a/vendor/github.com/containerd/cgroups/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE
index 261eeb9e9..261eeb9e9 100644
--- a/vendor/github.com/containerd/cgroups/LICENSE
+++ b/vendor/github.com/prometheus/client_golang/LICENSE
diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE
new file mode 100644
index 000000000..dd878a30e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/NOTICE
@@ -0,0 +1,23 @@
+Prometheus instrumentation library for Go applications
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
+
+
+The following components are included in this product:
+
+perks - a fork of https://github.com/bmizerany/perks
+https://github.com/beorn7/perks
+Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
+See https://github.com/beorn7/perks/blob/master/README.md for license details.
+
+Go support for Protocol Buffers - Google's data interchange format
+http://github.com/golang/protobuf/
+Copyright 2010 The Go Authors
+See source code for license details.
+
+Support for streaming Protocol Buffer messages for the Go language (golang).
+https://github.com/matttproud/golang_protobuf_extensions
+Copyright 2013 Matt T. Proud
+Licensed under the Apache License, Version 2.0
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
new file mode 100644
index 000000000..3460f0346
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
@@ -0,0 +1 @@
+command-line-arguments.test
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
new file mode 100644
index 000000000..44986bff0
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md
@@ -0,0 +1 @@
+See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go
new file mode 100644
index 000000000..288f0e854
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go
@@ -0,0 +1,29 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.12
+
+package prometheus
+
+import "runtime/debug"
+
+// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+.
+func readBuildInfo() (path, version, sum string) {
+ path, version, sum = "unknown", "unknown", "unknown"
+ if bi, ok := debug.ReadBuildInfo(); ok {
+ path = bi.Main.Path
+ version = bi.Main.Version
+ sum = bi.Main.Sum
+ }
+ return
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
new file mode 100644
index 000000000..6609e2877
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
@@ -0,0 +1,22 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.12
+
+package prometheus
+
+// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before
+// 1.12. Remove this whole file once the minimum supported Go version is 1.12.
+func readBuildInfo() (path, version, sum string) {
+ return "unknown", "unknown", "unknown"
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
new file mode 100644
index 000000000..1e839650d
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -0,0 +1,120 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Collector is the interface implemented by anything that can be used by
+// Prometheus to collect metrics. A Collector has to be registered for
+// collection. See Registerer.Register.
+//
+// The stock metrics provided by this package (Gauge, Counter, Summary,
+// Histogram, Untyped) are also Collectors (which only ever collect one metric,
+// namely itself). An implementer of Collector may, however, collect multiple
+// metrics in a coordinated fashion and/or create metrics on the fly. Examples
+// for collectors already implemented in this library are the metric vectors
+// (i.e. collection of multiple instances of the same Metric but with different
+// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
+type Collector interface {
+ // Describe sends the super-set of all possible descriptors of metrics
+ // collected by this Collector to the provided channel and returns once
+ // the last descriptor has been sent. The sent descriptors fulfill the
+ // consistency and uniqueness requirements described in the Desc
+ // documentation.
+ //
+ // It is valid if one and the same Collector sends duplicate
+ // descriptors. Those duplicates are simply ignored. However, two
+ // different Collectors must not send duplicate descriptors.
+ //
+ // Sending no descriptor at all marks the Collector as “unchecked”,
+ // i.e. no checks will be performed at registration time, and the
+ // Collector may yield any Metric it sees fit in its Collect method.
+ //
+ // This method idempotently sends the same descriptors throughout the
+ // lifetime of the Collector. It may be called concurrently and
+ // therefore must be implemented in a concurrency safe way.
+ //
+ // If a Collector encounters an error while executing this method, it
+ // must send an invalid descriptor (created with NewInvalidDesc) to
+ // signal the error to the registry.
+ Describe(chan<- *Desc)
+ // Collect is called by the Prometheus registry when collecting
+ // metrics. The implementation sends each collected metric via the
+ // provided channel and returns once the last metric has been sent. The
+ // descriptor of each sent metric is one of those returned by Describe
+ // (unless the Collector is unchecked, see above). Returned metrics that
+ // share the same descriptor must differ in their variable label
+ // values.
+ //
+ // This method may be called concurrently and must therefore be
+ // implemented in a concurrency safe way. Blocking occurs at the expense
+ // of total performance of rendering all registered metrics. Ideally,
+ // Collector implementations support concurrent readers.
+ Collect(chan<- Metric)
+}
+
+// DescribeByCollect is a helper to implement the Describe method of a custom
+// Collector. It collects the metrics from the provided Collector and sends
+// their descriptors to the provided channel.
+//
+// If a Collector collects the same metrics throughout its lifetime, its
+// Describe method can simply be implemented as:
+//
+// func (c customCollector) Describe(ch chan<- *Desc) {
+// DescribeByCollect(c, ch)
+// }
+//
+// However, this will not work if the metrics collected change dynamically over
+// the lifetime of the Collector in a way that their combined set of descriptors
+// changes as well. The shortcut implementation will then violate the contract
+// of the Describe method. If a Collector sometimes collects no metrics at all
+// (for example vectors like CounterVec, GaugeVec, etc., which only collect
+// metrics after a metric with a fully specified label set has been accessed),
+// it might even get registered as an unchecked Collector (cf. the Register
+// method of the Registerer interface). Hence, only use this shortcut
+// implementation of Describe if you are certain to fulfill the contract.
+//
+// The Collector example demonstrates a use of DescribeByCollect.
+func DescribeByCollect(c Collector, descs chan<- *Desc) {
+ metrics := make(chan Metric)
+ go func() {
+ c.Collect(metrics)
+ close(metrics)
+ }()
+ for m := range metrics {
+ descs <- m.Desc()
+ }
+}
+
+// selfCollector implements Collector for a single Metric so that the Metric
+// collects itself. Add it as an anonymous field to a struct that implements
+// Metric, and call init with the Metric itself as an argument.
+type selfCollector struct {
+ self Metric
+}
+
+// init provides the selfCollector with a reference to the metric it is supposed
+// to collect. It is usually called within the factory function to create a
+// metric. See example.
+func (c *selfCollector) init(self Metric) {
+ c.self = self
+}
+
+// Describe implements Collector.
+func (c *selfCollector) Describe(ch chan<- *Desc) {
+ ch <- c.self.Desc()
+}
+
+// Collect implements Collector.
+func (c *selfCollector) Collect(ch chan<- Metric) {
+ ch <- c.self
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
new file mode 100644
index 000000000..d463e36d3
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -0,0 +1,277 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "math"
+ "sync/atomic"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Counter is a Metric that represents a single numerical value that only ever
+// goes up. That implies that it cannot be used to count items whose number can
+// also go down, e.g. the number of currently running goroutines. Those
+// "counters" are represented by Gauges.
+//
+// A Counter is typically used to count requests served, tasks completed, errors
+// occurred, etc.
+//
+// To create Counter instances, use NewCounter.
+type Counter interface {
+ Metric
+ Collector
+
+ // Inc increments the counter by 1. Use Add to increment it by arbitrary
+ // non-negative values.
+ Inc()
+ // Add adds the given value to the counter. It panics if the value is <
+ // 0.
+ Add(float64)
+}
+
+// CounterOpts is an alias for Opts. See there for doc comments.
+type CounterOpts Opts
+
+// NewCounter creates a new Counter based on the provided CounterOpts.
+//
+// The returned implementation tracks the counter value in two separate
+// variables, a float64 and a uint64. The latter is used to track calls of the
+// Inc method and calls of the Add method with a value that can be represented
+// as a uint64. This allows atomic increments of the counter with optimal
+// performance. (It is common to have an Inc call in very hot execution paths.)
+// Both internal tracking values are added up in the Write method. This has to
+// be taken into account when it comes to precision and overflow behavior.
+func NewCounter(opts CounterOpts) Counter {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ )
+ result := &counter{desc: desc, labelPairs: desc.constLabelPairs}
+ result.init(result) // Init self-collection.
+ return result
+}
+
+type counter struct {
+ // valBits contains the bits of the represented float64 value, while
+ // valInt stores values that are exact integers. Both have to go first
+ // in the struct to guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+ valInt uint64
+
+ selfCollector
+ desc *Desc
+
+ labelPairs []*dto.LabelPair
+}
+
+func (c *counter) Desc() *Desc {
+ return c.desc
+}
+
+func (c *counter) Add(v float64) {
+ if v < 0 {
+ panic(errors.New("counter cannot decrease in value"))
+ }
+ ival := uint64(v)
+ if float64(ival) == v {
+ atomic.AddUint64(&c.valInt, ival)
+ return
+ }
+
+ for {
+ oldBits := atomic.LoadUint64(&c.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (c *counter) Inc() {
+ atomic.AddUint64(&c.valInt, 1)
+}
+
+func (c *counter) Write(out *dto.Metric) error {
+ fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
+ ival := atomic.LoadUint64(&c.valInt)
+ val := fval + float64(ival)
+
+ return populateMetric(CounterValue, val, c.labelPairs, out)
+}
+
+// CounterVec is a Collector that bundles a set of Counters that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. number of HTTP requests, partitioned by response code and
+// method). Create instances with NewCounterVec.
+type CounterVec struct {
+ *metricVec
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
+// partitioned by the given label names.
+func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &CounterVec{
+ metricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ if len(lvs) != len(desc.variableLabels) {
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
+ }
+ result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
+ result.init(result) // Init self-collection.
+ return result
+ }),
+ }
+}
+
+// GetMetricWithLabelValues returns the Counter for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Counter is created.
+//
+// It is possible to call this method without using the returned Counter to only
+// create the new Counter but leave it at its starting value 0. See also the
+// SummaryVec example.
+//
+// Keeping the Counter for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Counter from the CounterVec. In that case,
+// the Counter will still exist, but it will not be exported anymore, even if a
+// Counter with the same label values is created later.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+ metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// GetMetricWith returns the Counter for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Counter is created. Implications of
+// creating a Counter without using it and keeping the Counter for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
+ metric, err := v.metricVec.getMetricWith(labels)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
+ c, err := v.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return c
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+func (v *CounterVec) With(labels Labels) Counter {
+ c, err := v.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return c
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the CounterVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
+ vec, err := v.curryWith(labels)
+ if vec != nil {
+ return &CounterVec{vec}, err
+ }
+ return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
+ vec, err := v.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
+}
+
+// CounterFunc is a Counter whose value is determined at collect time by calling a
+// provided function.
+//
+// To create CounterFunc instances, use NewCounterFunc.
+type CounterFunc interface {
+ Metric
+ Collector
+}
+
+// NewCounterFunc creates a new CounterFunc based on the provided
+// CounterOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a CounterFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe. The function should also honor
+// the contract for a Counter (values only go up, not down), but compliance will
+// not be checked.
+func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), CounterValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
new file mode 100644
index 000000000..1d034f871
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -0,0 +1,184 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Desc is the descriptor used by every Prometheus Metric. It is essentially
+// the immutable meta-data of a Metric. The normal Metric implementations
+// included in this package manage their Desc under the hood. Users only have to
+// deal with Desc if they use advanced features like the ExpvarCollector or
+// custom Collectors and Metrics.
+//
+// Descriptors registered with the same registry have to fulfill certain
+// consistency and uniqueness criteria if they share the same fully-qualified
+// name: They must have the same help string and the same label names (aka label
+// dimensions) in each, constLabels and variableLabels, but they must differ in
+// the values of the constLabels.
+//
+// Descriptors that share the same fully-qualified names and the same label
+// values of their constLabels are considered equal.
+//
+// Use NewDesc to create new Desc instances.
+type Desc struct {
+ // fqName has been built from Namespace, Subsystem, and Name.
+ fqName string
+ // help provides some helpful information about this metric.
+ help string
+ // constLabelPairs contains precalculated DTO label pairs based on
+ // the constant labels.
+ constLabelPairs []*dto.LabelPair
+ // VariableLabels contains names of labels for which the metric
+ // maintains variable values.
+ variableLabels []string
+ // id is a hash of the values of the ConstLabels and fqName. This
+ // must be unique among all registered descriptors and can therefore be
+ // used as an identifier of the descriptor.
+ id uint64
+ // dimHash is a hash of the label names (preset and variable) and the
+ // Help string. Each Desc with the same fqName must have the same
+ // dimHash.
+ dimHash uint64
+ // err is an error that occurred during construction. It is reported on
+ // registration time.
+ err error
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName must not be empty.
+//
+// variableLabels only contain the label names. Their label values are variable
+// and therefore not part of the Desc. (They are managed within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Collector example for a usage pattern.
+func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+ d := &Desc{
+ fqName: fqName,
+ help: help,
+ variableLabels: variableLabels,
+ }
+ if !model.IsValidMetricName(model.LabelValue(fqName)) {
+ d.err = fmt.Errorf("%q is not a valid metric name", fqName)
+ return d
+ }
+ // labelValues contains the label values of const labels (in order of
+ // their sorted label names) plus the fqName (at position 0).
+ labelValues := make([]string, 1, len(constLabels)+1)
+ labelValues[0] = fqName
+ labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
+ labelNameSet := map[string]struct{}{}
+ // First add only the const label names and sort them...
+ for labelName := range constLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
+ return d
+ }
+ labelNames = append(labelNames, labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ sort.Strings(labelNames)
+ // ... so that we can now add const label values in the order of their names.
+ for _, labelName := range labelNames {
+ labelValues = append(labelValues, constLabels[labelName])
+ }
+ // Validate the const label values. They can't have a wrong cardinality, so
+ // use in len(labelValues) as expectedNumberOfValues.
+ if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
+ d.err = err
+ return d
+ }
+ // Now add the variable label names, but prefix them with something that
+ // cannot be in a regular label name. That prevents matching the label
+ // dimension with a different mix between preset and variable labels.
+ for _, labelName := range variableLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
+ return d
+ }
+ labelNames = append(labelNames, "$"+labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ if len(labelNames) != len(labelNameSet) {
+ d.err = errors.New("duplicate label names")
+ return d
+ }
+
+ vh := hashNew()
+ for _, val := range labelValues {
+ vh = hashAdd(vh, val)
+ vh = hashAddByte(vh, separatorByte)
+ }
+ d.id = vh
+ // Sort labelNames so that order doesn't matter for the hash.
+ sort.Strings(labelNames)
+ // Now hash together (in this order) the help string and the sorted
+ // label names.
+ lh := hashNew()
+ lh = hashAdd(lh, help)
+ lh = hashAddByte(lh, separatorByte)
+ for _, labelName := range labelNames {
+ lh = hashAdd(lh, labelName)
+ lh = hashAddByte(lh, separatorByte)
+ }
+ d.dimHash = lh
+
+ d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
+ for n, v := range constLabels {
+ d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(v),
+ })
+ }
+ sort.Sort(labelPairSorter(d.constLabelPairs))
+ return d
+}
+
+// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
+// provided error set. If a collector returning such a descriptor is registered,
+// registration will fail with the provided error. NewInvalidDesc can be used by
+// a Collector to signal inability to describe itself.
+func NewInvalidDesc(err error) *Desc {
+ return &Desc{
+ err: err,
+ }
+}
+
+func (d *Desc) String() string {
+ lpStrings := make([]string, 0, len(d.constLabelPairs))
+ for _, lp := range d.constLabelPairs {
+ lpStrings = append(
+ lpStrings,
+ fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
+ )
+ }
+ return fmt.Sprintf(
+ "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
+ d.fqName,
+ d.help,
+ strings.Join(lpStrings, ","),
+ d.variableLabels,
+ )
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
new file mode 100644
index 000000000..01977de66
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -0,0 +1,200 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package prometheus is the core instrumentation package. It provides metrics
+// primitives to instrument code for monitoring. It also offers a registry for
+// metrics. Sub-packages allow to expose the registered metrics via HTTP
+// (package promhttp) or push them to a Pushgateway (package push). There is
+// also a sub-package promauto, which provides metrics constructors with
+// automatic registration.
+//
+// All exported functions and methods are safe to be used concurrently unless
+// specified otherwise.
+//
+// A Basic Example
+//
+// As a starting point, a very basic usage example:
+//
+// package main
+//
+// import (
+// "log"
+// "net/http"
+//
+// "github.com/prometheus/client_golang/prometheus"
+// "github.com/prometheus/client_golang/prometheus/promhttp"
+// )
+//
+// var (
+// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
+// Name: "cpu_temperature_celsius",
+// Help: "Current temperature of the CPU.",
+// })
+// hdFailures = prometheus.NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// },
+// []string{"device"},
+// )
+// )
+//
+// func init() {
+// // Metrics have to be registered to be exposed:
+// prometheus.MustRegister(cpuTemp)
+// prometheus.MustRegister(hdFailures)
+// }
+//
+// func main() {
+// cpuTemp.Set(65.3)
+// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
+//
+// // The Handler function provides a default handler to expose metrics
+// // via an HTTP server. "/metrics" is the usual endpoint for that.
+// http.Handle("/metrics", promhttp.Handler())
+// log.Fatal(http.ListenAndServe(":8080", nil))
+// }
+//
+//
+// This is a complete program that exports two metrics, a Gauge and a Counter,
+// the latter with a label attached to turn it into a (one-dimensional) vector.
+//
+// Metrics
+//
+// The number of exported identifiers in this package might appear a bit
+// overwhelming. However, in addition to the basic plumbing shown in the example
+// above, you only need to understand the different metric types and their
+// vector versions for basic usage. Furthermore, if you are not concerned with
+// fine-grained control of when and how to register metrics with the registry,
+// have a look at the promauto package, which will effectively allow you to
+// ignore registration altogether in simple cases.
+//
+// Above, you have already touched the Counter and the Gauge. There are two more
+// advanced metric types: the Summary and Histogram. A more thorough description
+// of those four metric types can be found in the Prometheus docs:
+// https://prometheus.io/docs/concepts/metric_types/
+//
+// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
+// Prometheus server not to assume anything about its type.
+//
+// In addition to the fundamental metric types Gauge, Counter, Summary,
+// Histogram, and Untyped, a very important part of the Prometheus data model is
+// the partitioning of samples along dimensions called labels, which results in
+// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
+// HistogramVec, and UntypedVec.
+//
+// While only the fundamental metric types implement the Metric interface, both
+// the metrics and their vector versions implement the Collector interface. A
+// Collector manages the collection of a number of Metrics, but for convenience,
+// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
+// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
+// SummaryVec, HistogramVec, and UntypedVec are not.
+//
+// To create instances of Metrics and their vector versions, you need a suitable
+// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or
+// UntypedOpts.
+//
+// Custom Collectors and constant Metrics
+//
+// While you could create your own implementations of Metric, most likely you
+// will only ever implement the Collector interface on your own. At a first
+// glance, a custom Collector seems handy to bundle Metrics for common
+// registration (with the prime example of the different metric vectors above,
+// which bundle all the metrics of the same name but with different labels).
+//
+// There is a more involved use case, too: If you already have metrics
+// available, created outside of the Prometheus context, you don't need the
+// interface of the various Metric types. You essentially want to mirror the
+// existing numbers into Prometheus Metrics during collection. An own
+// implementation of the Collector interface is perfect for that. You can create
+// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
+// NewConstSummary (and their respective Must… versions). That will happen in
+// the Collect method. The Describe method has to return separate Desc
+// instances, representative of the “throw-away” metrics to be created later.
+// NewDesc comes in handy to create those Desc instances. Alternatively, you
+// could return no Desc at all, which will mark the Collector “unchecked”. No
+// checks are performed at registration time, but metric consistency will still
+// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
+// errors. Thus, with unchecked Collectors, the responsibility to not collect
+// metrics that lead to inconsistencies in the total scrape result lies with the
+// implementer of the Collector. While this is not a desirable state, it is
+// sometimes necessary. The typical use case is a situation where the exact
+// metrics to be returned by a Collector cannot be predicted at registration
+// time, but the implementer has sufficient knowledge of the whole system to
+// guarantee metric consistency.
+//
+// The Collector example illustrates the use case. You can also look at the
+// source code of the processCollector (mirroring process metrics), the
+// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
+// metrics) as examples that are used in this package itself.
+//
+// If you just need to call a function to get a single float value to collect as
+// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
+// shortcuts.
+//
+// Advanced Uses of the Registry
+//
+// While MustRegister is the by far most common way of registering a Collector,
+// sometimes you might want to handle the errors the registration might cause.
+// As suggested by the name, MustRegister panics if an error occurs. With the
+// Register function, the error is returned and can be handled.
+//
+// An error is returned if the registered Collector is incompatible or
+// inconsistent with already registered metrics. The registry aims for
+// consistency of the collected metrics according to the Prometheus data model.
+// Inconsistencies are ideally detected at registration time, not at collect
+// time. The former will usually be detected at start-up time of a program,
+// while the latter will only happen at scrape time, possibly not even on the
+// first scrape if the inconsistency only becomes relevant later. That is the
+// main reason why a Collector and a Metric have to describe themselves to the
+// registry.
+//
+// So far, everything we did operated on the so-called default registry, as it
+// can be found in the global DefaultRegisterer variable. With NewRegistry, you
+// can create a custom registry, or you can even implement the Registerer or
+// Gatherer interfaces yourself. The methods Register and Unregister work in the
+// same way on a custom registry as the global functions Register and Unregister
+// on the default registry.
+//
+// There are a number of uses for custom registries: You can use registries with
+// special properties, see NewPedanticRegistry. You can avoid global state, as
+// it is imposed by the DefaultRegisterer. You can use multiple registries at
+// the same time to expose different metrics in different ways. You can use
+// separate registries for testing purposes.
+//
+// Also note that the DefaultRegisterer comes registered with a Collector for Go
+// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
+// NewProcessCollector). With a custom registry, you are in control and decide
+// yourself about the Collectors to register.
+//
+// HTTP Exposition
+//
+// The Registry implements the Gatherer interface. The caller of the Gather
+// method can then expose the gathered metrics in some way. Usually, the metrics
+// are served via HTTP on the /metrics endpoint. That's happening in the example
+// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
+//
+// Pushing to the Pushgateway
+//
+// Function for pushing to the Pushgateway can be found in the push sub-package.
+//
+// Graphite Bridge
+//
+// Functions and examples to push metrics from a Gatherer to Graphite can be
+// found in the graphite sub-package.
+//
+// Other Means of Exposition
+//
+// More ways of exposing metrics can easily be added by following the approaches
+// of the existing implementations.
+package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
new file mode 100644
index 000000000..18a99d5fa
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
@@ -0,0 +1,119 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "encoding/json"
+ "expvar"
+)
+
+type expvarCollector struct {
+ exports map[string]*Desc
+}
+
+// NewExpvarCollector returns a newly allocated expvar Collector that still has
+// to be registered with a Prometheus registry.
+//
+// An expvar Collector collects metrics from the expvar interface. It provides a
+// quick way to expose numeric values that are already exported via expvar as
+// Prometheus metrics. Note that the data models of expvar and Prometheus are
+// fundamentally different, and that the expvar Collector is inherently slower
+// than native Prometheus metrics. Thus, the expvar Collector is probably great
+// for experiments and prototying, but you should seriously consider a more
+// direct implementation of Prometheus metrics for monitoring production
+// systems.
+//
+// The exports map has the following meaning:
+//
+// The keys in the map correspond to expvar keys, i.e. for every expvar key you
+// want to export as Prometheus metric, you need an entry in the exports
+// map. The descriptor mapped to each key describes how to export the expvar
+// value. It defines the name and the help string of the Prometheus metric
+// proxying the expvar value. The type will always be Untyped.
+//
+// For descriptors without variable labels, the expvar value must be a number or
+// a bool. The number is then directly exported as the Prometheus sample
+// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
+// that are not numbers or bools are silently ignored.
+//
+// If the descriptor has one variable label, the expvar value must be an expvar
+// map. The keys in the expvar map become the various values of the one
+// Prometheus label. The values in the expvar map must be numbers or bools again
+// as above.
+//
+// For descriptors with more than one variable label, the expvar must be a
+// nested expvar map, i.e. where the values of the topmost map are maps again
+// etc. until a depth is reached that corresponds to the number of labels. The
+// leaves of that structure must be numbers or bools as above to serve as the
+// sample values.
+//
+// Anything that does not fit into the scheme above is silently ignored.
+func NewExpvarCollector(exports map[string]*Desc) Collector {
+ return &expvarCollector{
+ exports: exports,
+ }
+}
+
+// Describe implements Collector.
+func (e *expvarCollector) Describe(ch chan<- *Desc) {
+ for _, desc := range e.exports {
+ ch <- desc
+ }
+}
+
+// Collect implements Collector.
+func (e *expvarCollector) Collect(ch chan<- Metric) {
+ for name, desc := range e.exports {
+ var m Metric
+ expVar := expvar.Get(name)
+ if expVar == nil {
+ continue
+ }
+ var v interface{}
+ labels := make([]string, len(desc.variableLabels))
+ if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
+ ch <- NewInvalidMetric(desc, err)
+ continue
+ }
+ var processValue func(v interface{}, i int)
+ processValue = func(v interface{}, i int) {
+ if i >= len(labels) {
+ copiedLabels := append(make([]string, 0, len(labels)), labels...)
+ switch v := v.(type) {
+ case float64:
+ m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
+ case bool:
+ if v {
+ m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
+ } else {
+ m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
+ }
+ default:
+ return
+ }
+ ch <- m
+ return
+ }
+ vm, ok := v.(map[string]interface{})
+ if !ok {
+ return
+ }
+ for lv, val := range vm {
+ labels[i] = lv
+ processValue(val, i+1)
+ }
+ }
+ processValue(v, 0)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
new file mode 100644
index 000000000..3d383a735
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
new file mode 100644
index 000000000..71d406bd9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -0,0 +1,286 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "math"
+ "sync/atomic"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Gauge is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// A Gauge is typically used for measured values like temperatures or current
+// memory usage, but also "counts" that can go up and down, like the number of
+// running goroutines.
+//
+// To create Gauge instances, use NewGauge.
+type Gauge interface {
+ Metric
+ Collector
+
+ // Set sets the Gauge to an arbitrary value.
+ Set(float64)
+ // Inc increments the Gauge by 1. Use Add to increment it by arbitrary
+ // values.
+ Inc()
+ // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
+ // values.
+ Dec()
+ // Add adds the given value to the Gauge. (The value can be negative,
+ // resulting in a decrease of the Gauge.)
+ Add(float64)
+ // Sub subtracts the given value from the Gauge. (The value can be
+ // negative, resulting in an increase of the Gauge.)
+ Sub(float64)
+
+ // SetToCurrentTime sets the Gauge to the current Unix time in seconds.
+ SetToCurrentTime()
+}
+
+// GaugeOpts is an alias for Opts. See there for doc comments.
+type GaugeOpts Opts
+
+// NewGauge creates a new Gauge based on the provided GaugeOpts.
+//
+// The returned implementation is optimized for a fast Set method. If you have a
+// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
+// the former. For example, the Inc method of the returned Gauge is slower than
+// the Inc method of a Counter returned by NewCounter. This matches the typical
+// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
+// the latter Inc-heavy.
+func NewGauge(opts GaugeOpts) Gauge {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ )
+ result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
+ result.init(result) // Init self-collection.
+ return result
+}
+
+type gauge struct {
+ // valBits contains the bits of the represented float64 value. It has
+ // to go first in the struct to guarantee alignment for atomic
+ // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+
+ selfCollector
+
+ desc *Desc
+ labelPairs []*dto.LabelPair
+}
+
+func (g *gauge) Desc() *Desc {
+ return g.desc
+}
+
+func (g *gauge) Set(val float64) {
+ atomic.StoreUint64(&g.valBits, math.Float64bits(val))
+}
+
+func (g *gauge) SetToCurrentTime() {
+ g.Set(float64(time.Now().UnixNano()) / 1e9)
+}
+
+func (g *gauge) Inc() {
+ g.Add(1)
+}
+
+func (g *gauge) Dec() {
+ g.Add(-1)
+}
+
+func (g *gauge) Add(val float64) {
+ for {
+ oldBits := atomic.LoadUint64(&g.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
+ if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (g *gauge) Sub(val float64) {
+ g.Add(val * -1)
+}
+
+func (g *gauge) Write(out *dto.Metric) error {
+ val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
+ return populateMetric(GaugeValue, val, g.labelPairs, out)
+}
+
+// GaugeVec is a Collector that bundles a set of Gauges that all share the same
+// Desc, but have different values for their variable labels. This is used if
+// you want to count the same thing partitioned by various dimensions
+// (e.g. number of operations queued, partitioned by user and operation
+// type). Create instances with NewGaugeVec.
+type GaugeVec struct {
+ *metricVec
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
+// partitioned by the given label names.
+func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &GaugeVec{
+ metricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ if len(lvs) != len(desc.variableLabels) {
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
+ }
+ result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
+ result.init(result) // Init self-collection.
+ return result
+ }),
+ }
+}
+
+// GetMetricWithLabelValues returns the Gauge for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Gauge is created.
+//
+// It is possible to call this method without using the returned Gauge to only
+// create the new Gauge but leave it at its starting value 0. See also the
+// SummaryVec example.
+//
+// Keeping the Gauge for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
+// Gauge will still exist, but it will not be exported anymore, even if a
+// Gauge with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+ metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// GetMetricWith returns the Gauge for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Gauge is created. Implications of
+// creating a Gauge without using it and keeping the Gauge for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
+ metric, err := v.metricVec.getMetricWith(labels)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
+ g, err := v.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return g
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+func (v *GaugeVec) With(labels Labels) Gauge {
+ g, err := v.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return g
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the GaugeVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
+ vec, err := v.curryWith(labels)
+ if vec != nil {
+ return &GaugeVec{vec}, err
+ }
+ return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec {
+ vec, err := v.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
+}
+
+// GaugeFunc is a Gauge whose value is determined at collect time by calling a
+// provided function.
+//
+// To create GaugeFunc instances, use NewGaugeFunc.
+type GaugeFunc interface {
+ Metric
+ Collector
+}
+
+// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
+// value reported is determined by calling the given function from within the
+// Write method. Take into account that metric collection may happen
+// concurrently. If that results in concurrent calls to Write, like in the case
+// where a GaugeFunc is directly registered with Prometheus, the provided
+// function must be concurrency-safe.
+func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
new file mode 100644
index 000000000..dc9247fed
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -0,0 +1,396 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "runtime"
+ "runtime/debug"
+ "sync"
+ "time"
+)
+
+type goCollector struct {
+ goroutinesDesc *Desc
+ threadsDesc *Desc
+ gcDesc *Desc
+ goInfoDesc *Desc
+
+ // ms... are memstats related.
+ msLast *runtime.MemStats // Previously collected memstats.
+ msLastTimestamp time.Time
+ msMtx sync.Mutex // Protects msLast and msLastTimestamp.
+ msMetrics memStatsMetrics
+ msRead func(*runtime.MemStats) // For mocking in tests.
+ msMaxWait time.Duration // Wait time for fresh memstats.
+ msMaxAge time.Duration // Maximum allowed age of old memstats.
+}
+
+// NewGoCollector returns a collector that exports metrics about the current Go
+// process. This includes memory stats. To collect those, runtime.ReadMemStats
+// is called. This requires to “stop the world”, which usually only happens for
+// garbage collection (GC). Take the following implications into account when
+// deciding whether to use the Go collector:
+//
+// 1. The performance impact of stopping the world is the more relevant the more
+// frequently metrics are collected. However, with Go1.9 or later the
+// stop-the-world time per metrics collection is very short (~25µs) so that the
+// performance impact will only matter in rare cases. However, with older Go
+// versions, the stop-the-world duration depends on the heap size and can be
+// quite significant (~1.7 ms/GiB as per
+// https://go-review.googlesource.com/c/go/+/34937).
+//
+// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the
+// metrics collection happens to coincide with GC, it will only complete after
+// GC has finished. Usually, GC is fast enough to not cause problems. However,
+// with a very large heap, GC might take multiple seconds, which is enough to
+// cause scrape timeouts in common setups. To avoid this problem, the Go
+// collector will use the memstats from a previous collection if
+// runtime.ReadMemStats takes more than 1s. However, if there are no previously
+// collected memstats, or their collection is more than 5m ago, the collection
+// will block until runtime.ReadMemStats succeeds. (The problem might be solved
+// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go
+// issue.)
+func NewGoCollector() Collector {
+ return &goCollector{
+ goroutinesDesc: NewDesc(
+ "go_goroutines",
+ "Number of goroutines that currently exist.",
+ nil, nil),
+ threadsDesc: NewDesc(
+ "go_threads",
+ "Number of OS threads created.",
+ nil, nil),
+ gcDesc: NewDesc(
+ "go_gc_duration_seconds",
+ "A summary of the GC invocation durations.",
+ nil, nil),
+ goInfoDesc: NewDesc(
+ "go_info",
+ "Information about the Go environment.",
+ nil, Labels{"version": runtime.Version()}),
+ msLast: &runtime.MemStats{},
+ msRead: runtime.ReadMemStats,
+ msMaxWait: time.Second,
+ msMaxAge: 5 * time.Minute,
+ msMetrics: memStatsMetrics{
+ {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes"),
+ "Number of bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes_total"),
+ "Total number of bytes allocated, even if freed.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("sys_bytes"),
+ "Number of bytes obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("lookups_total"),
+ "Total number of pointer lookups.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mallocs_total"),
+ "Total number of mallocs.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("frees_total"),
+ "Total number of frees.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_alloc_bytes"),
+ "Number of heap bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_sys_bytes"),
+ "Number of heap bytes obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_idle_bytes"),
+ "Number of heap bytes waiting to be used.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_inuse_bytes"),
+ "Number of heap bytes that are in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_released_bytes"),
+ "Number of heap bytes released to OS.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_objects"),
+ "Number of allocated objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_inuse_bytes"),
+ "Number of bytes in use by the stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_sys_bytes"),
+ "Number of bytes obtained from system for stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_inuse_bytes"),
+ "Number of bytes in use by mspan structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_sys_bytes"),
+ "Number of bytes used for mspan structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_inuse_bytes"),
+ "Number of bytes in use by mcache structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_sys_bytes"),
+ "Number of bytes used for mcache structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("buck_hash_sys_bytes"),
+ "Number of bytes used by the profiling bucket hash table.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_sys_bytes"),
+ "Number of bytes used for garbage collection system metadata.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("other_sys_bytes"),
+ "Number of bytes used for other system allocations.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("next_gc_bytes"),
+ "Number of heap bytes when next garbage collection will take place.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("last_gc_time_seconds"),
+ "Number of seconds since 1970 of last garbage collection.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_cpu_fraction"),
+ "The fraction of this program's available CPU time used by the GC since the program started.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
+ valType: GaugeValue,
+ },
+ },
+ }
+}
+
+func memstatNamespace(s string) string {
+ return "go_memstats_" + s
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ ch <- c.goroutinesDesc
+ ch <- c.threadsDesc
+ ch <- c.gcDesc
+ ch <- c.goInfoDesc
+ for _, i := range c.msMetrics {
+ ch <- i.desc
+ }
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ var (
+ ms = &runtime.MemStats{}
+ done = make(chan struct{})
+ )
+ // Start reading memstats first as it might take a while.
+ go func() {
+ c.msRead(ms)
+ c.msMtx.Lock()
+ c.msLast = ms
+ c.msLastTimestamp = time.Now()
+ c.msMtx.Unlock()
+ close(done)
+ }()
+
+ ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
+ n, _ := runtime.ThreadCreateProfile(nil)
+ ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
+
+ var stats debug.GCStats
+ stats.PauseQuantiles = make([]time.Duration, 5)
+ debug.ReadGCStats(&stats)
+
+ quantiles := make(map[float64]float64)
+ for idx, pq := range stats.PauseQuantiles[1:] {
+ quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
+ }
+ quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
+ ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
+
+ ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
+
+ timer := time.NewTimer(c.msMaxWait)
+ select {
+ case <-done: // Our own ReadMemStats succeeded in time. Use it.
+ timer.Stop() // Important for high collection frequencies to not pile up timers.
+ c.msCollect(ch, ms)
+ return
+ case <-timer.C: // Time out, use last memstats if possible. Continue below.
+ }
+ c.msMtx.Lock()
+ if time.Since(c.msLastTimestamp) < c.msMaxAge {
+ // Last memstats are recent enough. Collect from them under the lock.
+ c.msCollect(ch, c.msLast)
+ c.msMtx.Unlock()
+ return
+ }
+ // If we are here, the last memstats are too old or don't exist. We have
+ // to wait until our own ReadMemStats finally completes. For that to
+ // happen, we have to release the lock.
+ c.msMtx.Unlock()
+ <-done
+ c.msCollect(ch, ms)
+}
+
+func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
+ for _, i := range c.msMetrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
+ }
+}
+
+// memStatsMetrics provide description, value, and value type for memstat metrics.
+type memStatsMetrics []struct {
+ desc *Desc
+ eval func(*runtime.MemStats) float64
+ valType ValueType
+}
+
+// NewBuildInfoCollector returns a collector collecting a single metric
+// "go_build_info" with the constant value 1 and three labels "path", "version",
+// and "checksum". Their label values contain the main module path, version, and
+// checksum, respectively. The labels will only have meaningful values if the
+// binary is built with Go module support and from source code retrieved from
+// the source repository (rather than the local file system). This is usually
+// accomplished by building from outside of GOPATH, specifying the full address
+// of the main package, e.g. "GO111MODULE=on go run
+// github.com/prometheus/client_golang/examples/random". If built without Go
+// module support, all label values will be "unknown". If built with Go module
+// support but using the source code from the local file system, the "path" will
+// be set appropriately, but "checksum" will be empty and "version" will be
+// "(devel)".
+//
+// This collector uses only the build information for the main module. See
+// https://github.com/povilasv/prommod for an example of a collector for the
+// module dependencies.
+func NewBuildInfoCollector() Collector {
+ path, version, sum := readBuildInfo()
+ c := &selfCollector{MustNewConstMetric(
+ NewDesc(
+ "go_build_info",
+ "Build information about the main Go module.",
+ nil, Labels{"path": path, "version": version, "checksum": sum},
+ ),
+ GaugeValue, 1)}
+ c.init(c.self)
+ return c
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
new file mode 100644
index 000000000..d7ea67bd2
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -0,0 +1,586 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "runtime"
+ "sort"
+ "sync"
+ "sync/atomic"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// A Histogram counts individual observations from an event or sample stream in
+// configurable buckets. Similar to a summary, it also provides a sum of
+// observations and an observation count.
+//
+// On the Prometheus server, quantiles can be calculated from a Histogram using
+// the histogram_quantile function in the query language.
+//
+// Note that Histograms, in contrast to Summaries, can be aggregated with the
+// Prometheus query language (see the documentation for detailed
+// procedures). However, Histograms require the user to pre-define suitable
+// buckets, and they are in general less accurate. The Observe method of a
+// Histogram has a very low performance overhead in comparison with the Observe
+// method of a Summary.
+//
+// To create Histogram instances, use NewHistogram.
+type Histogram interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the histogram.
+ Observe(float64)
+}
+
+// bucketLabel is used for the label that defines the upper bound of a
+// bucket of a histogram ("le" -> "less or equal").
+const bucketLabel = "le"
+
+// DefBuckets are the default Histogram buckets. The default buckets are
+// tailored to broadly measure the response time (in seconds) of a network
+// service. Most likely, however, you will be required to define buckets
+// customized to your use case.
+var (
+ DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+
+ errBucketLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in histograms", bucketLabel,
+ )
+)
+
+// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
+// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is zero or negative.
+func LinearBuckets(start, width float64, count int) []float64 {
+ if count < 1 {
+ panic("LinearBuckets needs a positive count")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start += width
+ }
+ return buckets
+}
+
+// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
+// upper bound of 'start' and each following bucket's upper bound is 'factor'
+// times the previous bucket's upper bound. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
+// or if 'factor' is less than or equal 1.
+func ExponentialBuckets(start, factor float64, count int) []float64 {
+ if count < 1 {
+ panic("ExponentialBuckets needs a positive count")
+ }
+ if start <= 0 {
+ panic("ExponentialBuckets needs a positive start value")
+ }
+ if factor <= 1 {
+ panic("ExponentialBuckets needs a factor greater than 1")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start *= factor
+ }
+ return buckets
+}
+
+// HistogramOpts bundles the options for creating a Histogram metric. It is
+// mandatory to set Name to a non-empty string. All other fields are optional
+// and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
+type HistogramOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Histogram (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Histogram must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Histogram.
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // ConstLabels are only used rarely. In particular, do not use them to
+ // attach the same labels to all your metrics. Those use cases are
+ // better covered by target labels set by the scraping Prometheus
+ // server, or by one specific metric (e.g. a build_info or a
+ // machine_role metric). See also
+ // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
+ ConstLabels Labels
+
+ // Buckets defines the buckets into which observations are counted. Each
+ // element in the slice is the upper inclusive bound of a bucket. The
+ // values must be sorted in strictly increasing order. There is no need
+ // to add a highest bucket with +Inf bound, it will be added
+ // implicitly. The default value is DefBuckets.
+ Buckets []float64
+}
+
+// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
+// panics if the buckets in HistogramOpts are not in strictly increasing order.
+func NewHistogram(opts HistogramOpts) Histogram {
+ return newHistogram(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Buckets) == 0 {
+ opts.Buckets = DefBuckets
+ }
+
+ h := &histogram{
+ desc: desc,
+ upperBounds: opts.Buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}},
+ }
+ for i, upperBound := range h.upperBounds {
+ if i < len(h.upperBounds)-1 {
+ if upperBound >= h.upperBounds[i+1] {
+ panic(fmt.Errorf(
+ "histogram buckets must be in increasing order: %f >= %f",
+ upperBound, h.upperBounds[i+1],
+ ))
+ }
+ } else {
+ if math.IsInf(upperBound, +1) {
+ // The +Inf bucket is implicit. Remove it here.
+ h.upperBounds = h.upperBounds[:i]
+ }
+ }
+ }
+ // Finally we know the final length of h.upperBounds and can make buckets
+ // for both counts:
+ h.counts[0].buckets = make([]uint64, len(h.upperBounds))
+ h.counts[1].buckets = make([]uint64, len(h.upperBounds))
+
+ h.init(h) // Init self-collection.
+ return h
+}
+
+type histogramCounts struct {
+ // sumBits contains the bits of the float64 representing the sum of all
+ // observations. sumBits and count have to go first in the struct to
+ // guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ sumBits uint64
+ count uint64
+ buckets []uint64
+}
+
+type histogram struct {
+ // countAndHotIdx enables lock-free writes with use of atomic updates.
+ // The most significant bit is the hot index [0 or 1] of the count field
+ // below. Observe calls update the hot one. All remaining bits count the
+ // number of Observe calls. Observe starts by incrementing this counter,
+ // and finish by incrementing the count field in the respective
+ // histogramCounts, as a marker for completion.
+ //
+ // Calls of the Write method (which are non-mutating reads from the
+ // perspective of the histogram) swap the hot–cold under the writeMtx
+ // lock. A cooldown is awaited (while locked) by comparing the number of
+ // observations with the initiation count. Once they match, then the
+ // last observation on the now cool one has completed. All cool fields must
+ // be merged into the new hot before releasing writeMtx.
+ //
+ // Fields with atomic access first! See alignment constraint:
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ countAndHotIdx uint64
+
+ selfCollector
+ desc *Desc
+ writeMtx sync.Mutex // Only used in the Write method.
+
+ // Two counts, one is "hot" for lock-free observations, the other is
+ // "cold" for writing out a dto.Metric. It has to be an array of
+ // pointers to guarantee 64bit alignment of the histogramCounts, see
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
+ counts [2]*histogramCounts
+
+ upperBounds []float64
+ labelPairs []*dto.LabelPair
+}
+
+func (h *histogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *histogram) Observe(v float64) {
+ // TODO(beorn7): For small numbers of buckets (<30), a linear search is
+ // slightly faster than the binary search. If we really care, we could
+ // switch from one search strategy to the other depending on the number
+ // of buckets.
+ //
+ // Microbenchmarks (BenchmarkHistogramNoLabels):
+ // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
+ // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
+ // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
+ i := sort.SearchFloat64s(h.upperBounds, v)
+
+ // We increment h.countAndHotIdx so that the counter in the lower
+ // 63 bits gets incremented. At the same time, we get the new value
+ // back, which we can use to find the currently-hot counts.
+ n := atomic.AddUint64(&h.countAndHotIdx, 1)
+ hotCounts := h.counts[n>>63]
+
+ if i < len(h.upperBounds) {
+ atomic.AddUint64(&hotCounts.buckets[i], 1)
+ }
+ for {
+ oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+ break
+ }
+ }
+ // Increment count last as we take it as a signal that the observation
+ // is complete.
+ atomic.AddUint64(&hotCounts.count, 1)
+}
+
+func (h *histogram) Write(out *dto.Metric) error {
+ // For simplicity, we protect this whole method by a mutex. It is not in
+ // the hot path, i.e. Observe is called much more often than Write. The
+ // complication of making Write lock-free isn't worth it, if possible at
+ // all.
+ h.writeMtx.Lock()
+ defer h.writeMtx.Unlock()
+
+ // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
+ // without touching the count bits. See the struct comments for a full
+ // description of the algorithm.
+ n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+ // count is contained unchanged in the lower 63 bits.
+ count := n & ((1 << 63) - 1)
+ // The most significant bit tells us which counts is hot. The complement
+ // is thus the cold one.
+ hotCounts := h.counts[n>>63]
+ coldCounts := h.counts[(^n)>>63]
+
+ // Await cooldown.
+ for count != atomic.LoadUint64(&coldCounts.count) {
+ runtime.Gosched() // Let observations get work done.
+ }
+
+ his := &dto.Histogram{
+ Bucket: make([]*dto.Bucket, len(h.upperBounds)),
+ SampleCount: proto.Uint64(count),
+ SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+ }
+ var cumCount uint64
+ for i, upperBound := range h.upperBounds {
+ cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
+ his.Bucket[i] = &dto.Bucket{
+ CumulativeCount: proto.Uint64(cumCount),
+ UpperBound: proto.Float64(upperBound),
+ }
+ }
+
+ out.Histogram = his
+ out.Label = h.labelPairs
+
+ // Finally add all the cold counts to the new hot counts and reset the cold counts.
+ atomic.AddUint64(&hotCounts.count, count)
+ atomic.StoreUint64(&coldCounts.count, 0)
+ for {
+ oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
+ if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+ atomic.StoreUint64(&coldCounts.sumBits, 0)
+ break
+ }
+ }
+ for i := range h.upperBounds {
+ atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
+ atomic.StoreUint64(&coldCounts.buckets[i], 0)
+ }
+ return nil
+}
+
+// HistogramVec is a Collector that bundles a set of Histograms that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewHistogramVec.
+type HistogramVec struct {
+ *metricVec
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
+// partitioned by the given label names.
+func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &HistogramVec{
+ metricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newHistogram(desc, opts, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues returns the Histogram for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Histogram is created.
+//
+// It is possible to call this method without using the returned Histogram to only
+// create the new Histogram but leave it at its starting value, a Histogram without
+// any observations.
+//
+// Keeping the Histogram for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Histogram from the HistogramVec. In that case, the
+// Histogram will still exist, but it will not be exported anymore, even if a
+// Histogram with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+ metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Observer), err
+ }
+ return nil, err
+}
+
+// GetMetricWith returns the Histogram for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Histogram is created. Implications of
+// creating a Histogram without using it and keeping the Histogram for later use
+// are the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
+ metric, err := v.metricVec.getMetricWith(labels)
+ if metric != nil {
+ return metric.(Observer), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
+ h, err := v.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return h
+}
+
+// With works as GetMetricWith but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (v *HistogramVec) With(labels Labels) Observer {
+ h, err := v.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return h
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the HistogramVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
+ vec, err := v.curryWith(labels)
+ if vec != nil {
+ return &HistogramVec{vec}, err
+ }
+ return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec {
+ vec, err := v.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
+}
+
+type constHistogram struct {
+ desc *Desc
+ count uint64
+ sum float64
+ buckets map[float64]uint64
+ labelPairs []*dto.LabelPair
+}
+
+func (h *constHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *constHistogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, 0, len(h.buckets))
+
+ his.SampleCount = proto.Uint64(h.count)
+ his.SampleSum = proto.Float64(h.sum)
+
+ for upperBound, count := range h.buckets {
+ buckets = append(buckets, &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ })
+ }
+
+ if len(buckets) > 0 {
+ sort.Sort(buckSort(buckets))
+ }
+ his.Bucket = buckets
+
+ out.Histogram = his
+ out.Label = h.labelPairs
+
+ return nil
+}
+
+// NewConstHistogram returns a metric representing a Prometheus histogram with
+// fixed values for the count, sum, and bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
+// bucket.
+//
+// NewConstHistogram returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc or if Desc is invalid.
+func NewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ return nil, err
+ }
+ return &constHistogram{
+ desc: desc,
+ count: count,
+ sum: sum,
+ buckets: buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstHistogram is a version of NewConstHistogram that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type buckSort []*dto.Bucket
+
+func (s buckSort) Len() int {
+ return len(s)
+}
+
+func (s buckSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s buckSort) Less(i, j int) bool {
+ return s[i].GetUpperBound() < s[j].GetUpperBound()
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
new file mode 100644
index 000000000..351c26e1a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
@@ -0,0 +1,85 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+ "sort"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// metricSorter is a sortable slice of *dto.Metric.
+type metricSorter []*dto.Metric
+
+func (s metricSorter) Len() int {
+ return len(s)
+}
+
+func (s metricSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s metricSorter) Less(i, j int) bool {
+ if len(s[i].Label) != len(s[j].Label) {
+ // This should not happen. The metrics are
+ // inconsistent. However, we have to deal with the fact, as
+ // people might use custom collectors or metric family injection
+ // to create inconsistent metrics. So let's simply compare the
+ // number of labels in this case. That will still yield
+ // reproducible sorting.
+ return len(s[i].Label) < len(s[j].Label)
+ }
+ for n, lp := range s[i].Label {
+ vi := lp.GetValue()
+ vj := s[j].Label[n].GetValue()
+ if vi != vj {
+ return vi < vj
+ }
+ }
+
+ // We should never arrive here. Multiple metrics with the same
+ // label set in the same scrape will lead to undefined ingestion
+ // behavior. However, as above, we have to provide stable sorting
+ // here, even for inconsistent metrics. So sort equal metrics
+ // by their timestamp, with missing timestamps (implying "now")
+ // coming last.
+ if s[i].TimestampMs == nil {
+ return false
+ }
+ if s[j].TimestampMs == nil {
+ return true
+ }
+ return s[i].GetTimestampMs() < s[j].GetTimestampMs()
+}
+
+// NormalizeMetricFamilies returns a MetricFamily slice with empty
+// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
+// the slice, with the contained Metrics sorted within each MetricFamily.
+func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
+ for _, mf := range metricFamiliesByName {
+ sort.Sort(metricSorter(mf.Metric))
+ }
+ names := make([]string, 0, len(metricFamiliesByName))
+ for name, mf := range metricFamiliesByName {
+ if len(mf.Metric) > 0 {
+ names = append(names, name)
+ }
+ }
+ sort.Strings(names)
+ result := make([]*dto.MetricFamily, 0, len(names))
+ for _, name := range names {
+ result = append(result, metricFamiliesByName[name])
+ }
+ return result
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
new file mode 100644
index 000000000..2744443ac
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -0,0 +1,87 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "github.com/prometheus/common/model"
+)
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
+ return fmt.Errorf(
+ "%s: %q has %d variable labels named %q but %d values %q were provided",
+ errInconsistentCardinality, fqName,
+ len(labels), labels,
+ len(labelValues), labelValues,
+ )
+}
+
+func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
+ if len(labels) != expectedNumberOfValues {
+ return fmt.Errorf(
+ "%s: expected %d label values but got %d in %#v",
+ errInconsistentCardinality, expectedNumberOfValues,
+ len(labels), labels,
+ )
+ }
+
+ for name, val := range labels {
+ if !utf8.ValidString(val) {
+ return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
+ }
+ }
+
+ return nil
+}
+
+func validateLabelValues(vals []string, expectedNumberOfValues int) error {
+ if len(vals) != expectedNumberOfValues {
+ return fmt.Errorf(
+ "%s: expected %d label values but got %d in %#v",
+ errInconsistentCardinality, expectedNumberOfValues,
+ len(vals), vals,
+ )
+ }
+
+ for _, val := range vals {
+ if !utf8.ValidString(val) {
+ return fmt.Errorf("label value %q is not valid UTF-8", val)
+ }
+ }
+
+ return nil
+}
+
+func checkLabelName(l string) bool {
+ return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
new file mode 100644
index 000000000..55e6d86d5
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -0,0 +1,174 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+const separatorByte byte = 255
+
+// A Metric models a single sample value with its meta data being exported to
+// Prometheus. Implementations of Metric in this package are Gauge, Counter,
+// Histogram, Summary, and Untyped.
+type Metric interface {
+ // Desc returns the descriptor for the Metric. This method idempotently
+ // returns the same descriptor throughout the lifetime of the
+ // Metric. The returned descriptor is immutable by contract. A Metric
+ // unable to describe itself must return an invalid descriptor (created
+ // with NewInvalidDesc).
+ Desc() *Desc
+ // Write encodes the Metric into a "Metric" Protocol Buffer data
+ // transmission object.
+ //
+ // Metric implementations must observe concurrency safety as reads of
+ // this metric may occur at any time, and any blocking occurs at the
+ // expense of total performance of rendering all registered
+ // metrics. Ideally, Metric implementations should support concurrent
+ // readers.
+ //
+ // While populating dto.Metric, it is the responsibility of the
+ // implementation to ensure validity of the Metric protobuf (like valid
+ // UTF-8 strings or syntactically valid metric and label names). It is
+ // recommended to sort labels lexicographically. Callers of Write should
+ // still make sure of sorting if they depend on it.
+ Write(*dto.Metric) error
+ // TODO(beorn7): The original rationale of passing in a pre-allocated
+ // dto.Metric protobuf to save allocations has disappeared. The
+ // signature of this method should be changed to "Write() (*dto.Metric,
+ // error)".
+}
+
+// Opts bundles the options for creating most Metric types. Each metric
+// implementation XXX has its own XXXOpts type, but in most cases, it is just be
+// an alias of this type (which might change when the requirement arises.)
+//
+// It is mandatory to set Name to a non-empty string. All other fields are
+// optional and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
+type Opts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Metric (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the metric must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this metric.
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // ConstLabels are only used rarely. In particular, do not use them to
+ // attach the same labels to all your metrics. Those use cases are
+ // better covered by target labels set by the scraping Prometheus
+ // server, or by one specific metric (e.g. a build_info or a
+ // machine_role metric). See also
+ // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
+ ConstLabels Labels
+}
+
+// BuildFQName joins the given three name components by "_". Empty name
+// components are ignored. If the name parameter itself is empty, an empty
+// string is returned, no matter what. Metric implementations included in this
+// library use this function internally to generate the fully-qualified metric
+// name from the name component in their Opts. Users of the library will only
+// need this function if they implement their own Metric or instantiate a Desc
+// (with NewDesc) directly.
+func BuildFQName(namespace, subsystem, name string) string {
+ if name == "" {
+ return ""
+ }
+ switch {
+ case namespace != "" && subsystem != "":
+ return strings.Join([]string{namespace, subsystem, name}, "_")
+ case namespace != "":
+ return strings.Join([]string{namespace, name}, "_")
+ case subsystem != "":
+ return strings.Join([]string{subsystem, name}, "_")
+ }
+ return name
+}
+
+// labelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers.
+type labelPairSorter []*dto.LabelPair
+
+func (s labelPairSorter) Len() int {
+ return len(s)
+}
+
+func (s labelPairSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s labelPairSorter) Less(i, j int) bool {
+ return s[i].GetName() < s[j].GetName()
+}
+
+type invalidMetric struct {
+ desc *Desc
+ err error
+}
+
+// NewInvalidMetric returns a metric whose Write method always returns the
+// provided error. It is useful if a Collector finds itself unable to collect
+// a metric and wishes to report an error to the registry.
+func NewInvalidMetric(desc *Desc, err error) Metric {
+ return &invalidMetric{desc, err}
+}
+
+func (m *invalidMetric) Desc() *Desc { return m.desc }
+
+func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
+
+type timestampedMetric struct {
+ Metric
+ t time.Time
+}
+
+func (m timestampedMetric) Write(pb *dto.Metric) error {
+ e := m.Metric.Write(pb)
+ pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
+ return e
+}
+
+// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
+// way that it has an explicit timestamp set to the provided Time. This is only
+// useful in rare cases as the timestamp of a Prometheus metric should usually
+// be set by the Prometheus server during scraping. Exceptions include mirroring
+// metrics with given timestamps from other metric
+// sources.
+//
+// NewMetricWithTimestamp works best with MustNewConstMetric,
+// MustNewConstHistogram, and MustNewConstSummary, see example.
+//
+// Currently, the exposition formats used by Prometheus are limited to
+// millisecond resolution. Thus, the provided time will be rounded down to the
+// next full millisecond value.
+func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
+ return timestampedMetric{Metric: m, t: t}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
new file mode 100644
index 000000000..5806cd09e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
@@ -0,0 +1,52 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Observer is the interface that wraps the Observe method, which is used by
+// Histogram and Summary to add observations.
+type Observer interface {
+ Observe(float64)
+}
+
+// The ObserverFunc type is an adapter to allow the use of ordinary
+// functions as Observers. If f is a function with the appropriate
+// signature, ObserverFunc(f) is an Observer that calls f.
+//
+// This adapter is usually used in connection with the Timer type, and there are
+// two general use cases:
+//
+// The most common one is to use a Gauge as the Observer for a Timer.
+// See the "Gauge" Timer example.
+//
+// The more advanced use case is to create a function that dynamically decides
+// which Observer to use for observing the duration. See the "Complex" Timer
+// example.
+type ObserverFunc func(float64)
+
+// Observe calls f(value). It implements Observer.
+func (f ObserverFunc) Observe(value float64) {
+ f(value)
+}
+
+// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
+type ObserverVec interface {
+ GetMetricWith(Labels) (Observer, error)
+ GetMetricWithLabelValues(lvs ...string) (Observer, error)
+ With(Labels) Observer
+ WithLabelValues(...string) Observer
+ CurryWith(Labels) (ObserverVec, error)
+ MustCurryWith(Labels) ObserverVec
+
+ Collector
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
new file mode 100644
index 000000000..9b8097942
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -0,0 +1,151 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "os"
+)
+
+type processCollector struct {
+ collectFn func(chan<- Metric)
+ pidFn func() (int, error)
+ reportErrors bool
+ cpuTotal *Desc
+ openFDs, maxFDs *Desc
+ vsize, maxVsize *Desc
+ rss *Desc
+ startTime *Desc
+}
+
+// ProcessCollectorOpts defines the behavior of a process metrics collector
+// created with NewProcessCollector.
+type ProcessCollectorOpts struct {
+ // PidFn returns the PID of the process the collector collects metrics
+ // for. It is called upon each collection. By default, the PID of the
+ // current process is used, as determined on construction time by
+ // calling os.Getpid().
+ PidFn func() (int, error)
+ // If non-empty, each of the collected metrics is prefixed by the
+ // provided string and an underscore ("_").
+ Namespace string
+ // If true, any error encountered during collection is reported as an
+ // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
+ // and the collected metrics will be incomplete. (Possibly, no metrics
+ // will be collected at all.) While that's usually not desired, it is
+ // appropriate for the common "mix-in" of process metrics, where process
+ // metrics are nice to have, but failing to collect them should not
+ // disrupt the collection of the remaining metrics.
+ ReportErrors bool
+}
+
+// NewProcessCollector returns a collector which exports the current state of
+// process metrics including CPU, memory and file descriptor usage as well as
+// the process start time. The detailed behavior is defined by the provided
+// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a
+// collector for the current process with an empty namespace string and no error
+// reporting.
+//
+// The collector only works on operating systems with a Linux-style proc
+// filesystem and on Microsoft Windows. On other operating systems, it will not
+// collect any metrics.
+func NewProcessCollector(opts ProcessCollectorOpts) Collector {
+ ns := ""
+ if len(opts.Namespace) > 0 {
+ ns = opts.Namespace + "_"
+ }
+
+ c := &processCollector{
+ reportErrors: opts.ReportErrors,
+ cpuTotal: NewDesc(
+ ns+"process_cpu_seconds_total",
+ "Total user and system CPU time spent in seconds.",
+ nil, nil,
+ ),
+ openFDs: NewDesc(
+ ns+"process_open_fds",
+ "Number of open file descriptors.",
+ nil, nil,
+ ),
+ maxFDs: NewDesc(
+ ns+"process_max_fds",
+ "Maximum number of open file descriptors.",
+ nil, nil,
+ ),
+ vsize: NewDesc(
+ ns+"process_virtual_memory_bytes",
+ "Virtual memory size in bytes.",
+ nil, nil,
+ ),
+ maxVsize: NewDesc(
+ ns+"process_virtual_memory_max_bytes",
+ "Maximum amount of virtual memory available in bytes.",
+ nil, nil,
+ ),
+ rss: NewDesc(
+ ns+"process_resident_memory_bytes",
+ "Resident memory size in bytes.",
+ nil, nil,
+ ),
+ startTime: NewDesc(
+ ns+"process_start_time_seconds",
+ "Start time of the process since unix epoch in seconds.",
+ nil, nil,
+ ),
+ }
+
+ if opts.PidFn == nil {
+ pid := os.Getpid()
+ c.pidFn = func() (int, error) { return pid, nil }
+ } else {
+ c.pidFn = opts.PidFn
+ }
+
+ // Set up process metric collection if supported by the runtime.
+ if canCollectProcess() {
+ c.collectFn = c.processCollect
+ } else {
+ c.collectFn = func(ch chan<- Metric) {
+ c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
+ }
+ }
+
+ return c
+}
+
+// Describe returns all descriptions of the collector.
+func (c *processCollector) Describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal
+ ch <- c.openFDs
+ ch <- c.maxFDs
+ ch <- c.vsize
+ ch <- c.maxVsize
+ ch <- c.rss
+ ch <- c.startTime
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *processCollector) Collect(ch chan<- Metric) {
+ c.collectFn(ch)
+}
+
+func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
+ if !c.reportErrors {
+ return
+ }
+ if desc == nil {
+ desc = NewInvalidDesc(err)
+ }
+ ch <- NewInvalidMetric(desc, err)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
new file mode 100644
index 000000000..3117461cd
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
@@ -0,0 +1,65 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows
+
+package prometheus
+
+import (
+ "github.com/prometheus/procfs"
+)
+
+func canCollectProcess() bool {
+ _, err := procfs.NewDefaultFS()
+ return err == nil
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ pid, err := c.pidFn()
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+
+ p, err := procfs.NewProc(pid)
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+
+ if stat, err := p.Stat(); err == nil {
+ ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
+ ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
+ ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
+ if startTime, err := stat.StartTime(); err == nil {
+ ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
+ } else {
+ c.reportError(ch, c.startTime, err)
+ }
+ } else {
+ c.reportError(ch, nil, err)
+ }
+
+ if fds, err := p.FileDescriptorsLen(); err == nil {
+ ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
+ } else {
+ c.reportError(ch, c.openFDs, err)
+ }
+
+ if limits, err := p.Limits(); err == nil {
+ ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
+ ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
+ } else {
+ c.reportError(ch, nil, err)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
new file mode 100644
index 000000000..e0b935d1f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
@@ -0,0 +1,112 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+func canCollectProcess() bool {
+ return true
+}
+
+var (
+ modpsapi = syscall.NewLazyDLL("psapi.dll")
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+ procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo")
+ procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount")
+)
+
+type processMemoryCounters struct {
+ // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex
+ _ uint32
+ PageFaultCount uint32
+ PeakWorkingSetSize uint64
+ WorkingSetSize uint64
+ QuotaPeakPagedPoolUsage uint64
+ QuotaPagedPoolUsage uint64
+ QuotaPeakNonPagedPoolUsage uint64
+ QuotaNonPagedPoolUsage uint64
+ PagefileUsage uint64
+ PeakPagefileUsage uint64
+ PrivateUsage uint64
+}
+
+func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
+ mem := processMemoryCounters{}
+ r1, _, err := procGetProcessMemoryInfo.Call(
+ uintptr(handle),
+ uintptr(unsafe.Pointer(&mem)),
+ uintptr(unsafe.Sizeof(mem)),
+ )
+ if r1 != 1 {
+ return mem, err
+ } else {
+ return mem, nil
+ }
+}
+
+func getProcessHandleCount(handle windows.Handle) (uint32, error) {
+ var count uint32
+ r1, _, err := procGetProcessHandleCount.Call(
+ uintptr(handle),
+ uintptr(unsafe.Pointer(&count)),
+ )
+ if r1 != 1 {
+ return 0, err
+ } else {
+ return count, nil
+ }
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ h, err := windows.GetCurrentProcess()
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+
+ var startTime, exitTime, kernelTime, userTime windows.Filetime
+ err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+ ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9))
+ ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime))
+
+ mem, err := getProcessMemoryInfo(h)
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+ ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage))
+ ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize))
+
+ handles, err := getProcessHandleCount(h)
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+ ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles))
+ ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
+}
+
+func fileTimeToSeconds(ft windows.Filetime) float64 {
+ return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
new file mode 100644
index 000000000..fa535684f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
@@ -0,0 +1,357 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+)
+
+const (
+ closeNotifier = 1 << iota
+ flusher
+ hijacker
+ readerFrom
+ pusher
+)
+
+type delegator interface {
+ http.ResponseWriter
+
+ Status() int
+ Written() int64
+}
+
+type responseWriterDelegator struct {
+ http.ResponseWriter
+
+ status int
+ written int64
+ wroteHeader bool
+ observeWriteHeader func(int)
+}
+
+func (r *responseWriterDelegator) Status() int {
+ return r.status
+}
+
+func (r *responseWriterDelegator) Written() int64 {
+ return r.written
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+ r.status = code
+ r.wroteHeader = true
+ r.ResponseWriter.WriteHeader(code)
+ if r.observeWriteHeader != nil {
+ r.observeWriteHeader(code)
+ }
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.Write(b)
+ r.written += int64(n)
+ return n, err
+}
+
+type closeNotifierDelegator struct{ *responseWriterDelegator }
+type flusherDelegator struct{ *responseWriterDelegator }
+type hijackerDelegator struct{ *responseWriterDelegator }
+type readerFromDelegator struct{ *responseWriterDelegator }
+type pusherDelegator struct{ *responseWriterDelegator }
+
+func (d closeNotifierDelegator) CloseNotify() <-chan bool {
+ //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
+ //remove support from client_golang yet.
+ return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+func (d flusherDelegator) Flush() {
+ d.ResponseWriter.(http.Flusher).Flush()
+}
+func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return d.ResponseWriter.(http.Hijacker).Hijack()
+}
+func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
+ if !d.wroteHeader {
+ d.WriteHeader(http.StatusOK)
+ }
+ n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
+ d.written += n
+ return n, err
+}
+func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
+ return d.ResponseWriter.(http.Pusher).Push(target, opts)
+}
+
+var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
+
+func init() {
+ // TODO(beorn7): Code generation would help here.
+ pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
+ return d
+ }
+ pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
+ return closeNotifierDelegator{d}
+ }
+ pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
+ return flusherDelegator{d}
+ }
+ pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
+ return struct {
+ *responseWriterDelegator
+ http.Flusher
+ http.CloseNotifier
+ }{d, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
+ return hijackerDelegator{d}
+ }
+ pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.CloseNotifier
+ }{d, hijackerDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.Flusher
+ }{d, hijackerDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
+ return readerFromDelegator{d}
+ }
+ pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.CloseNotifier
+ }{d, readerFromDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Flusher
+ }{d, readerFromDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Flusher
+ http.CloseNotifier
+ }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ }{d, readerFromDelegator{d}, hijackerDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.CloseNotifier
+ }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
+ return pusherDelegator{d}
+ }
+ pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Flusher
+ }{d, pusherDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Flusher
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ }{d, pusherDelegator{d}, hijackerDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.Flusher
+ }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ }{d, pusherDelegator{d}, readerFromDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Flusher
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Flusher
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+}
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+ d := &responseWriterDelegator{
+ ResponseWriter: w,
+ observeWriteHeader: observeWriteHeaderFunc,
+ }
+
+ id := 0
+ //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
+ //remove support from client_golang yet.
+ if _, ok := w.(http.CloseNotifier); ok {
+ id += closeNotifier
+ }
+ if _, ok := w.(http.Flusher); ok {
+ id += flusher
+ }
+ if _, ok := w.(http.Hijacker); ok {
+ id += hijacker
+ }
+ if _, ok := w.(io.ReaderFrom); ok {
+ id += readerFrom
+ }
+ if _, ok := w.(http.Pusher); ok {
+ id += pusher
+ }
+
+ return pickDelegator[id](d)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
new file mode 100644
index 000000000..cea5a90fd
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -0,0 +1,349 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package promhttp provides tooling around HTTP servers and clients.
+//
+// First, the package allows the creation of http.Handler instances to expose
+// Prometheus metrics via HTTP. promhttp.Handler acts on the
+// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
+// custom registry or anything that implements the Gatherer interface. It also
+// allows the creation of handlers that act differently on errors or allow to
+// log errors.
+//
+// Second, the package provides tooling to instrument instances of http.Handler
+// via middleware. Middleware wrappers follow the naming scheme
+// InstrumentHandlerX, where X describes the intended use of the middleware.
+// See each function's doc comment for specific details.
+//
+// Finally, the package allows for an http.RoundTripper to be instrumented via
+// middleware. Middleware wrappers follow the naming scheme
+// InstrumentRoundTripperX, where X describes the intended use of the
+// middleware. See each function's doc comment for specific details.
+package promhttp
+
+import (
+ "compress/gzip"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/prometheus/common/expfmt"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+ contentTypeHeader = "Content-Type"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
+)
+
+var gzipPool = sync.Pool{
+ New: func() interface{} {
+ return gzip.NewWriter(nil)
+ },
+}
+
+// Handler returns an http.Handler for the prometheus.DefaultGatherer, using
+// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
+// no error logging, and it applies compression if requested by the client.
+//
+// The returned http.Handler is already instrumented using the
+// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
+// create multiple http.Handlers by separate calls of the Handler function, the
+// metrics used for instrumentation will be shared between them, providing
+// global scrape counts.
+//
+// This function is meant to cover the bulk of basic use cases. If you are doing
+// anything that requires more customization (including using a non-default
+// Gatherer, different instrumentation, and non-default HandlerOpts), use the
+// HandlerFor function. See there for details.
+func Handler() http.Handler {
+ return InstrumentMetricHandler(
+ prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
+ )
+}
+
+// HandlerFor returns an uninstrumented http.Handler for the provided
+// Gatherer. The behavior of the Handler is defined by the provided
+// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
+// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
+// instrumentation. Use the InstrumentMetricHandler function to apply the same
+// kind of instrumentation as it is used by the Handler function.
+func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
+ var (
+ inFlightSem chan struct{}
+ errCnt = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "promhttp_metric_handler_errors_total",
+ Help: "Total number of internal errors encountered by the promhttp metric handler.",
+ },
+ []string{"cause"},
+ )
+ )
+
+ if opts.MaxRequestsInFlight > 0 {
+ inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
+ }
+ if opts.Registry != nil {
+ // Initialize all possibilites that can occur below.
+ errCnt.WithLabelValues("gathering")
+ errCnt.WithLabelValues("encoding")
+ if err := opts.Registry.Register(errCnt); err != nil {
+ if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ errCnt = are.ExistingCollector.(*prometheus.CounterVec)
+ } else {
+ panic(err)
+ }
+ }
+ }
+
+ h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
+ if inFlightSem != nil {
+ select {
+ case inFlightSem <- struct{}{}: // All good, carry on.
+ defer func() { <-inFlightSem }()
+ default:
+ http.Error(rsp, fmt.Sprintf(
+ "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
+ ), http.StatusServiceUnavailable)
+ return
+ }
+ }
+ mfs, err := reg.Gather()
+ if err != nil {
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error gathering metrics:", err)
+ }
+ errCnt.WithLabelValues("gathering").Inc()
+ switch opts.ErrorHandling {
+ case PanicOnError:
+ panic(err)
+ case ContinueOnError:
+ if len(mfs) == 0 {
+ // Still report the error if no metrics have been gathered.
+ httpError(rsp, err)
+ return
+ }
+ case HTTPErrorOnError:
+ httpError(rsp, err)
+ return
+ }
+ }
+
+ contentType := expfmt.Negotiate(req.Header)
+ header := rsp.Header()
+ header.Set(contentTypeHeader, string(contentType))
+
+ w := io.Writer(rsp)
+ if !opts.DisableCompression && gzipAccepted(req.Header) {
+ header.Set(contentEncodingHeader, "gzip")
+ gz := gzipPool.Get().(*gzip.Writer)
+ defer gzipPool.Put(gz)
+
+ gz.Reset(w)
+ defer gz.Close()
+
+ w = gz
+ }
+
+ enc := expfmt.NewEncoder(w, contentType)
+
+ var lastErr error
+ for _, mf := range mfs {
+ if err := enc.Encode(mf); err != nil {
+ lastErr = err
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error encoding and sending metric family:", err)
+ }
+ errCnt.WithLabelValues("encoding").Inc()
+ switch opts.ErrorHandling {
+ case PanicOnError:
+ panic(err)
+ case ContinueOnError:
+ // Handled later.
+ case HTTPErrorOnError:
+ httpError(rsp, err)
+ return
+ }
+ }
+ }
+
+ if lastErr != nil {
+ httpError(rsp, lastErr)
+ }
+ })
+
+ if opts.Timeout <= 0 {
+ return h
+ }
+ return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
+ "Exceeded configured timeout of %v.\n",
+ opts.Timeout,
+ ))
+}
+
+// InstrumentMetricHandler is usually used with an http.Handler returned by the
+// HandlerFor function. It instruments the provided http.Handler with two
+// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
+// scrapes partitioned by HTTP status code, and a gauge
+// "promhttp_metric_handler_requests_in_flight" to track the number of
+// simultaneous scrapes. This function idempotently registers collectors for
+// both metrics with the provided Registerer. It panics if the registration
+// fails. The provided metrics are useful to see how many scrapes hit the
+// monitored target (which could be from different Prometheus servers or other
+// scrapers), and how often they overlap (which would result in more than one
+// scrape in flight at the same time). Note that the scrapes-in-flight gauge
+// will contain the scrape by which it is exposed, while the scrape counter will
+// only get incremented after the scrape is complete (as only then the status
+// code is known). For tracking scrape durations, use the
+// "scrape_duration_seconds" gauge created by the Prometheus server upon each
+// scrape.
+func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
+ cnt := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "promhttp_metric_handler_requests_total",
+ Help: "Total number of scrapes by HTTP status code.",
+ },
+ []string{"code"},
+ )
+ // Initialize the most likely HTTP status codes.
+ cnt.WithLabelValues("200")
+ cnt.WithLabelValues("500")
+ cnt.WithLabelValues("503")
+ if err := reg.Register(cnt); err != nil {
+ if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ cnt = are.ExistingCollector.(*prometheus.CounterVec)
+ } else {
+ panic(err)
+ }
+ }
+
+ gge := prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "promhttp_metric_handler_requests_in_flight",
+ Help: "Current number of scrapes being served.",
+ })
+ if err := reg.Register(gge); err != nil {
+ if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ gge = are.ExistingCollector.(prometheus.Gauge)
+ } else {
+ panic(err)
+ }
+ }
+
+ return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
+}
+
+// HandlerErrorHandling defines how a Handler serving metrics will handle
+// errors.
+type HandlerErrorHandling int
+
+// These constants cause handlers serving metrics to behave as described if
+// errors are encountered.
+const (
+ // Serve an HTTP status code 500 upon the first error
+ // encountered. Report the error message in the body.
+ HTTPErrorOnError HandlerErrorHandling = iota
+ // Ignore errors and try to serve as many metrics as possible. However,
+ // if no metrics can be served, serve an HTTP status code 500 and the
+ // last error message in the body. Only use this in deliberate "best
+ // effort" metrics collection scenarios. In this case, it is highly
+ // recommended to provide other means of detecting errors: By setting an
+ // ErrorLog in HandlerOpts, the errors are logged. By providing a
+ // Registry in HandlerOpts, the exposed metrics include an error counter
+ // "promhttp_metric_handler_errors_total", which can be used for
+ // alerts.
+ ContinueOnError
+ // Panic upon the first error encountered (useful for "crash only" apps).
+ PanicOnError
+)
+
+// Logger is the minimal interface HandlerOpts needs for logging. Note that
+// log.Logger from the standard library implements this interface, and it is
+// easy to implement by custom loggers, if they don't do so already anyway.
+type Logger interface {
+ Println(v ...interface{})
+}
+
+// HandlerOpts specifies options how to serve metrics via an http.Handler. The
+// zero value of HandlerOpts is a reasonable default.
+type HandlerOpts struct {
+ // ErrorLog specifies an optional logger for errors collecting and
+ // serving metrics. If nil, errors are not logged at all.
+ ErrorLog Logger
+ // ErrorHandling defines how errors are handled. Note that errors are
+ // logged regardless of the configured ErrorHandling provided ErrorLog
+ // is not nil.
+ ErrorHandling HandlerErrorHandling
+ // If Registry is not nil, it is used to register a metric
+ // "promhttp_metric_handler_errors_total", partitioned by "cause". A
+ // failed registration causes a panic. Note that this error counter is
+ // different from the instrumentation you get from the various
+ // InstrumentHandler... helpers. It counts errors that don't necessarily
+ // result in a non-2xx HTTP status code. There are two typical cases:
+ // (1) Encoding errors that only happen after streaming of the HTTP body
+ // has already started (and the status code 200 has been sent). This
+ // should only happen with custom collectors. (2) Collection errors with
+ // no effect on the HTTP status code because ErrorHandling is set to
+ // ContinueOnError.
+ Registry prometheus.Registerer
+ // If DisableCompression is true, the handler will never compress the
+ // response, even if requested by the client.
+ DisableCompression bool
+ // The number of concurrent HTTP requests is limited to
+ // MaxRequestsInFlight. Additional requests are responded to with 503
+ // Service Unavailable and a suitable message in the body. If
+ // MaxRequestsInFlight is 0 or negative, no limit is applied.
+ MaxRequestsInFlight int
+ // If handling a request takes longer than Timeout, it is responded to
+ // with 503 ServiceUnavailable and a suitable Message. No timeout is
+ // applied if Timeout is 0 or negative. Note that with the current
+ // implementation, reaching the timeout simply ends the HTTP requests as
+ // described above (and even that only if sending of the body hasn't
+ // started yet), while the bulk work of gathering all the metrics keeps
+ // running in the background (with the eventual result to be thrown
+ // away). Until the implementation is improved, it is recommended to
+ // implement a separate timeout in potentially slow Collectors.
+ Timeout time.Duration
+}
+
+// gzipAccepted returns whether the client will accept gzip-encoded content.
+func gzipAccepted(header http.Header) bool {
+ a := header.Get(acceptEncodingHeader)
+ parts := strings.Split(a, ",")
+ for _, part := range parts {
+ part = strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return true
+ }
+ }
+ return false
+}
+
+// httpError removes any content-encoding header and then calls http.Error with
+// the provided error and http.StatusInternalServerErrer. Error contents is
+// supposed to be uncompressed plain text. However, same as with a plain
+// http.Error, any header settings will be void if the header has already been
+// sent. The error message will still be written to the writer, but it will
+// probably be of limited use.
+func httpError(rsp http.ResponseWriter, err error) {
+ rsp.Header().Del(contentEncodingHeader)
+ http.Error(
+ rsp,
+ "An error has occurred while serving metrics:\n\n"+err.Error(),
+ http.StatusInternalServerError,
+ )
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
new file mode 100644
index 000000000..83c49b66a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
@@ -0,0 +1,219 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "crypto/tls"
+ "net/http"
+ "net/http/httptrace"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// The RoundTripperFunc type is an adapter to allow the use of ordinary
+// functions as RoundTrippers. If f is a function with the appropriate
+// signature, RountTripperFunc(f) is a RoundTripper that calls f.
+type RoundTripperFunc func(req *http.Request) (*http.Response, error)
+
+// RoundTrip implements the RoundTripper interface.
+func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
+ return rt(r)
+}
+
+// InstrumentRoundTripperInFlight is a middleware that wraps the provided
+// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.RoundTripper.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ gauge.Inc()
+ defer gauge.Dec()
+ return next.RoundTrip(r)
+ })
+}
+
+// InstrumentRoundTripperCounter is a middleware that wraps the provided
+// http.RoundTripper to observe the request result with the provided CounterVec.
+// The CounterVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. Partitioning of the CounterVec happens by HTTP status code
+// and/or HTTP method if the respective instance label names are present in the
+// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
+// is not incremented.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
+ code, method := checkLabels(counter)
+
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ resp, err := next.RoundTrip(r)
+ if err == nil {
+ counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
+ }
+ return resp, err
+ })
+}
+
+// InstrumentRoundTripperDuration is a middleware that wraps the provided
+// http.RoundTripper to observe the request duration with the provided
+// ObserverVec. The ObserverVec must have zero, one, or two non-const
+// non-curried labels. For those, the only allowed label names are "code" and
+// "method". The function panics otherwise. The Observe method of the Observer
+// in the ObserverVec is called with the request duration in
+// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
+// respective instance label names are present in the ObserverVec. For
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
+// partitioning of Histograms is expensive and should be used judiciously.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, no values are
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
+ code, method := checkLabels(obs)
+
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ start := time.Now()
+ resp, err := next.RoundTrip(r)
+ if err == nil {
+ obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
+ }
+ return resp, err
+ })
+}
+
+// InstrumentTrace is used to offer flexibility in instrumenting the available
+// httptrace.ClientTrace hook functions. Each function is passed a float64
+// representing the time in seconds since the start of the http request. A user
+// may choose to use separately buckets Histograms, or implement custom
+// instance labels on a per function basis.
+type InstrumentTrace struct {
+ GotConn func(float64)
+ PutIdleConn func(float64)
+ GotFirstResponseByte func(float64)
+ Got100Continue func(float64)
+ DNSStart func(float64)
+ DNSDone func(float64)
+ ConnectStart func(float64)
+ ConnectDone func(float64)
+ TLSHandshakeStart func(float64)
+ TLSHandshakeDone func(float64)
+ WroteHeaders func(float64)
+ Wait100Continue func(float64)
+ WroteRequest func(float64)
+}
+
+// InstrumentRoundTripperTrace is a middleware that wraps the provided
+// RoundTripper and reports times to hook functions provided in the
+// InstrumentTrace struct. Hook functions that are not present in the provided
+// InstrumentTrace struct are ignored. Times reported to the hook functions are
+// time since the start of the request. Only with Go1.9+, those times are
+// guaranteed to never be negative. (Earlier Go versions are not using a
+// monotonic clock.) Note that partitioning of Histograms is expensive and
+// should be used judiciously.
+//
+// For hook functions that receive an error as an argument, no observations are
+// made in the event of a non-nil error value.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ start := time.Now()
+
+ trace := &httptrace.ClientTrace{
+ GotConn: func(_ httptrace.GotConnInfo) {
+ if it.GotConn != nil {
+ it.GotConn(time.Since(start).Seconds())
+ }
+ },
+ PutIdleConn: func(err error) {
+ if err != nil {
+ return
+ }
+ if it.PutIdleConn != nil {
+ it.PutIdleConn(time.Since(start).Seconds())
+ }
+ },
+ DNSStart: func(_ httptrace.DNSStartInfo) {
+ if it.DNSStart != nil {
+ it.DNSStart(time.Since(start).Seconds())
+ }
+ },
+ DNSDone: func(_ httptrace.DNSDoneInfo) {
+ if it.DNSDone != nil {
+ it.DNSDone(time.Since(start).Seconds())
+ }
+ },
+ ConnectStart: func(_, _ string) {
+ if it.ConnectStart != nil {
+ it.ConnectStart(time.Since(start).Seconds())
+ }
+ },
+ ConnectDone: func(_, _ string, err error) {
+ if err != nil {
+ return
+ }
+ if it.ConnectDone != nil {
+ it.ConnectDone(time.Since(start).Seconds())
+ }
+ },
+ GotFirstResponseByte: func() {
+ if it.GotFirstResponseByte != nil {
+ it.GotFirstResponseByte(time.Since(start).Seconds())
+ }
+ },
+ Got100Continue: func() {
+ if it.Got100Continue != nil {
+ it.Got100Continue(time.Since(start).Seconds())
+ }
+ },
+ TLSHandshakeStart: func() {
+ if it.TLSHandshakeStart != nil {
+ it.TLSHandshakeStart(time.Since(start).Seconds())
+ }
+ },
+ TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
+ if err != nil {
+ return
+ }
+ if it.TLSHandshakeDone != nil {
+ it.TLSHandshakeDone(time.Since(start).Seconds())
+ }
+ },
+ WroteHeaders: func() {
+ if it.WroteHeaders != nil {
+ it.WroteHeaders(time.Since(start).Seconds())
+ }
+ },
+ Wait100Continue: func() {
+ if it.Wait100Continue != nil {
+ it.Wait100Continue(time.Since(start).Seconds())
+ }
+ },
+ WroteRequest: func(_ httptrace.WroteRequestInfo) {
+ if it.WroteRequest != nil {
+ it.WroteRequest(time.Since(start).Seconds())
+ }
+ },
+ }
+ r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
+
+ return next.RoundTrip(r)
+ })
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
new file mode 100644
index 000000000..9db243805
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -0,0 +1,447 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "errors"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// magicString is used for the hacky label test in checkLabels. Remove once fixed.
+const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
+
+// InstrumentHandlerInFlight is a middleware that wraps the provided
+// http.Handler. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.Handler.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ g.Inc()
+ defer g.Dec()
+ next.ServeHTTP(w, r)
+ })
+}
+
+// InstrumentHandlerDuration is a middleware that wraps the provided
+// http.Handler to observe the request duration with the provided ObserverVec.
+// The ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the request duration in seconds. Partitioning happens by HTTP
+// status code and/or HTTP method if the respective instance label names are
+// present in the ObserverVec. For unpartitioned observations, use an
+// ObserverVec with zero labels. Note that partitioning of Histograms is
+// expensive and should be used judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ next.ServeHTTP(w, r)
+ obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
+ })
+}
+
+// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
+// to observe the request result with the provided CounterVec. The CounterVec
+// must have zero, one, or two non-const non-curried labels. For those, the only
+// allowed label names are "code" and "method". The function panics
+// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or
+// HTTP method if the respective instance label names are present in the
+// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, the Counter is not incremented.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(counter)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ counter.With(labels(code, method, r.Method, d.Status())).Inc()
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ next.ServeHTTP(w, r)
+ counter.With(labels(code, method, r.Method, 0)).Inc()
+ })
+}
+
+// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
+// http.Handler to observe with the provided ObserverVec the request duration
+// until the response headers are written. The ObserverVec must have zero, one,
+// or two non-const non-curried labels. For those, the only allowed label names
+// are "code" and "method". The function panics otherwise. The Observe method of
+// the Observer in the ObserverVec is called with the request duration in
+// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
+// respective instance label names are present in the ObserverVec. For
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
+// partitioning of Histograms is expensive and should be used judiciously.
+//
+// If the wrapped Handler panics before calling WriteHeader, no value is
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ d := newDelegator(w, func(status int) {
+ obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
+ })
+ next.ServeHTTP(d, r)
+ })
+}
+
+// InstrumentHandlerRequestSize is a middleware that wraps the provided
+// http.Handler to observe the request size with the provided ObserverVec. The
+// ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the request size in bytes. Partitioning happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present in
+// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
+// labels. Note that partitioning of Histograms is expensive and should be used
+// judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ size := computeApproximateRequestSize(r)
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ next.ServeHTTP(w, r)
+ size := computeApproximateRequestSize(r)
+ obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
+ })
+}
+
+// InstrumentHandlerResponseSize is a middleware that wraps the provided
+// http.Handler to observe the response size with the provided ObserverVec. The
+// ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the response size in bytes. Partitioning happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present in
+// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
+// labels. Note that partitioning of Histograms is expensive and should be used
+// judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
+ code, method := checkLabels(obs)
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
+ })
+}
+
+func checkLabels(c prometheus.Collector) (code bool, method bool) {
+ // TODO(beorn7): Remove this hacky way to check for instance labels
+ // once Descriptors can have their dimensionality queried.
+ var (
+ desc *prometheus.Desc
+ m prometheus.Metric
+ pm dto.Metric
+ lvs []string
+ )
+
+ // Get the Desc from the Collector.
+ descc := make(chan *prometheus.Desc, 1)
+ c.Describe(descc)
+
+ select {
+ case desc = <-descc:
+ default:
+ panic("no description provided by collector")
+ }
+ select {
+ case <-descc:
+ panic("more than one description provided by collector")
+ default:
+ }
+
+ close(descc)
+
+ // Create a ConstMetric with the Desc. Since we don't know how many
+ // variable labels there are, try for as long as it needs.
+ for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {
+ m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...)
+ }
+
+ // Write out the metric into a proto message and look at the labels.
+ // If the value is not the magicString, it is a constLabel, which doesn't interest us.
+ // If the label is curried, it doesn't interest us.
+ // In all other cases, only "code" or "method" is allowed.
+ if err := m.Write(&pm); err != nil {
+ panic("error checking metric for labels")
+ }
+ for _, label := range pm.Label {
+ name, value := label.GetName(), label.GetValue()
+ if value != magicString || isLabelCurried(c, name) {
+ continue
+ }
+ switch name {
+ case "code":
+ code = true
+ case "method":
+ method = true
+ default:
+ panic("metric partitioned with non-supported labels")
+ }
+ }
+ return
+}
+
+func isLabelCurried(c prometheus.Collector, label string) bool {
+ // This is even hackier than the label test above.
+ // We essentially try to curry again and see if it works.
+ // But for that, we need to type-convert to the two
+ // types we use here, ObserverVec or *CounterVec.
+ switch v := c.(type) {
+ case *prometheus.CounterVec:
+ if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
+ return false
+ }
+ case prometheus.ObserverVec:
+ if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
+ return false
+ }
+ default:
+ panic("unsupported metric vec type")
+ }
+ return true
+}
+
+// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
+// unnecessary allocations on each request.
+var emptyLabels = prometheus.Labels{}
+
+func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
+ if !(code || method) {
+ return emptyLabels
+ }
+ labels := prometheus.Labels{}
+
+ if code {
+ labels["code"] = sanitizeCode(status)
+ }
+ if method {
+ labels["method"] = sanitizeMethod(reqMethod)
+ }
+
+ return labels
+}
+
+func computeApproximateRequestSize(r *http.Request) int {
+ s := 0
+ if r.URL != nil {
+ s += len(r.URL.String())
+ }
+
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ return s
+}
+
+func sanitizeMethod(m string) string {
+ switch m {
+ case "GET", "get":
+ return "get"
+ case "PUT", "put":
+ return "put"
+ case "HEAD", "head":
+ return "head"
+ case "POST", "post":
+ return "post"
+ case "DELETE", "delete":
+ return "delete"
+ case "CONNECT", "connect":
+ return "connect"
+ case "OPTIONS", "options":
+ return "options"
+ case "NOTIFY", "notify":
+ return "notify"
+ default:
+ return strings.ToLower(m)
+ }
+}
+
+// If the wrapped http.Handler has not set a status code, i.e. the value is
+// currently 0, santizeCode will return 200, for consistency with behavior in
+// the stdlib.
+func sanitizeCode(s int) string {
+ switch s {
+ case 100:
+ return "100"
+ case 101:
+ return "101"
+
+ case 200, 0:
+ return "200"
+ case 201:
+ return "201"
+ case 202:
+ return "202"
+ case 203:
+ return "203"
+ case 204:
+ return "204"
+ case 205:
+ return "205"
+ case 206:
+ return "206"
+
+ case 300:
+ return "300"
+ case 301:
+ return "301"
+ case 302:
+ return "302"
+ case 304:
+ return "304"
+ case 305:
+ return "305"
+ case 307:
+ return "307"
+
+ case 400:
+ return "400"
+ case 401:
+ return "401"
+ case 402:
+ return "402"
+ case 403:
+ return "403"
+ case 404:
+ return "404"
+ case 405:
+ return "405"
+ case 406:
+ return "406"
+ case 407:
+ return "407"
+ case 408:
+ return "408"
+ case 409:
+ return "409"
+ case 410:
+ return "410"
+ case 411:
+ return "411"
+ case 412:
+ return "412"
+ case 413:
+ return "413"
+ case 414:
+ return "414"
+ case 415:
+ return "415"
+ case 416:
+ return "416"
+ case 417:
+ return "417"
+ case 418:
+ return "418"
+
+ case 500:
+ return "500"
+ case 501:
+ return "501"
+ case 502:
+ return "502"
+ case 503:
+ return "503"
+ case 504:
+ return "504"
+ case 505:
+ return "505"
+
+ case 428:
+ return "428"
+ case 429:
+ return "429"
+ case 431:
+ return "431"
+ case 511:
+ return "511"
+
+ default:
+ return strconv.Itoa(s)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
new file mode 100644
index 000000000..6c32516aa
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -0,0 +1,945 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/expfmt"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/client_golang/prometheus/internal"
+)
+
+const (
+ // Capacity for the channel to collect metrics and descriptors.
+ capMetricChan = 1000
+ capDescChan = 10
+)
+
+// DefaultRegisterer and DefaultGatherer are the implementations of the
+// Registerer and Gatherer interface a number of convenience functions in this
+// package act on. Initially, both variables point to the same Registry, which
+// has a process collector (currently on Linux only, see NewProcessCollector)
+// and a Go collector (see NewGoCollector, in particular the note about
+// stop-the-world implication with Go versions older than 1.9) already
+// registered. This approach to keep default instances as global state mirrors
+// the approach of other packages in the Go standard library. Note that there
+// are caveats. Change the variables with caution and only if you understand the
+// consequences. Users who want to avoid global state altogether should not use
+// the convenience functions and act on custom instances instead.
+var (
+ defaultRegistry = NewRegistry()
+ DefaultRegisterer Registerer = defaultRegistry
+ DefaultGatherer Gatherer = defaultRegistry
+)
+
+func init() {
+ MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
+ MustRegister(NewGoCollector())
+}
+
+// NewRegistry creates a new vanilla Registry without any Collectors
+// pre-registered.
+func NewRegistry() *Registry {
+ return &Registry{
+ collectorsByID: map[uint64]Collector{},
+ descIDs: map[uint64]struct{}{},
+ dimHashesByName: map[string]uint64{},
+ }
+}
+
+// NewPedanticRegistry returns a registry that checks during collection if each
+// collected Metric is consistent with its reported Desc, and if the Desc has
+// actually been registered with the registry. Unchecked Collectors (those whose
+// Describe methed does not yield any descriptors) are excluded from the check.
+//
+// Usually, a Registry will be happy as long as the union of all collected
+// Metrics is consistent and valid even if some metrics are not consistent with
+// their own Desc or a Desc provided by their registered Collector. Well-behaved
+// Collectors and Metrics will only provide consistent Descs. This Registry is
+// useful to test the implementation of Collectors and Metrics.
+func NewPedanticRegistry() *Registry {
+ r := NewRegistry()
+ r.pedanticChecksEnabled = true
+ return r
+}
+
+// Registerer is the interface for the part of a registry in charge of
+// registering and unregistering. Users of custom registries should use
+// Registerer as type for registration purposes (rather than the Registry type
+// directly). In that way, they are free to use custom Registerer implementation
+// (e.g. for testing purposes).
+type Registerer interface {
+ // Register registers a new Collector to be included in metrics
+ // collection. It returns an error if the descriptors provided by the
+ // Collector are invalid or if they — in combination with descriptors of
+ // already registered Collectors — do not fulfill the consistency and
+ // uniqueness criteria described in the documentation of metric.Desc.
+ //
+ // If the provided Collector is equal to a Collector already registered
+ // (which includes the case of re-registering the same Collector), the
+ // returned error is an instance of AlreadyRegisteredError, which
+ // contains the previously registered Collector.
+ //
+ // A Collector whose Describe method does not yield any Desc is treated
+ // as unchecked. Registration will always succeed. No check for
+ // re-registering (see previous paragraph) is performed. Thus, the
+ // caller is responsible for not double-registering the same unchecked
+ // Collector, and for providing a Collector that will not cause
+ // inconsistent metrics on collection. (This would lead to scrape
+ // errors.)
+ Register(Collector) error
+ // MustRegister works like Register but registers any number of
+ // Collectors and panics upon the first registration that causes an
+ // error.
+ MustRegister(...Collector)
+ // Unregister unregisters the Collector that equals the Collector passed
+ // in as an argument. (Two Collectors are considered equal if their
+ // Describe method yields the same set of descriptors.) The function
+ // returns whether a Collector was unregistered. Note that an unchecked
+ // Collector cannot be unregistered (as its Describe method does not
+ // yield any descriptor).
+ //
+ // Note that even after unregistering, it will not be possible to
+ // register a new Collector that is inconsistent with the unregistered
+ // Collector, e.g. a Collector collecting metrics with the same name but
+ // a different help string. The rationale here is that the same registry
+ // instance must only collect consistent metrics throughout its
+ // lifetime.
+ Unregister(Collector) bool
+}
+
+// Gatherer is the interface for the part of a registry in charge of gathering
+// the collected metrics into a number of MetricFamilies. The Gatherer interface
+// comes with the same general implication as described for the Registerer
+// interface.
+type Gatherer interface {
+ // Gather calls the Collect method of the registered Collectors and then
+ // gathers the collected metrics into a lexicographically sorted slice
+ // of uniquely named MetricFamily protobufs. Gather ensures that the
+ // returned slice is valid and self-consistent so that it can be used
+ // for valid exposition. As an exception to the strict consistency
+ // requirements described for metric.Desc, Gather will tolerate
+ // different sets of label names for metrics of the same metric family.
+ //
+ // Even if an error occurs, Gather attempts to gather as many metrics as
+ // possible. Hence, if a non-nil error is returned, the returned
+ // MetricFamily slice could be nil (in case of a fatal error that
+ // prevented any meaningful metric collection) or contain a number of
+ // MetricFamily protobufs, some of which might be incomplete, and some
+ // might be missing altogether. The returned error (which might be a
+ // MultiError) explains the details. Note that this is mostly useful for
+ // debugging purposes. If the gathered protobufs are to be used for
+ // exposition in actual monitoring, it is almost always better to not
+ // expose an incomplete result and instead disregard the returned
+ // MetricFamily protobufs in case the returned error is non-nil.
+ Gather() ([]*dto.MetricFamily, error)
+}
+
+// Register registers the provided Collector with the DefaultRegisterer.
+//
+// Register is a shortcut for DefaultRegisterer.Register(c). See there for more
+// details.
+func Register(c Collector) error {
+ return DefaultRegisterer.Register(c)
+}
+
+// MustRegister registers the provided Collectors with the DefaultRegisterer and
+// panics if any error occurs.
+//
+// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See
+// there for more details.
+func MustRegister(cs ...Collector) {
+ DefaultRegisterer.MustRegister(cs...)
+}
+
+// Unregister removes the registration of the provided Collector from the
+// DefaultRegisterer.
+//
+// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for
+// more details.
+func Unregister(c Collector) bool {
+ return DefaultRegisterer.Unregister(c)
+}
+
+// GathererFunc turns a function into a Gatherer.
+type GathererFunc func() ([]*dto.MetricFamily, error)
+
+// Gather implements Gatherer.
+func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
+ return gf()
+}
+
+// AlreadyRegisteredError is returned by the Register method if the Collector to
+// be registered has already been registered before, or a different Collector
+// that collects the same metrics has been registered before. Registration fails
+// in that case, but you can detect from the kind of error what has
+// happened. The error contains fields for the existing Collector and the
+// (rejected) new Collector that equals the existing one. This can be used to
+// find out if an equal Collector has been registered before and switch over to
+// using the old one, as demonstrated in the example.
+type AlreadyRegisteredError struct {
+ ExistingCollector, NewCollector Collector
+}
+
+func (err AlreadyRegisteredError) Error() string {
+ return "duplicate metrics collector registration attempted"
+}
+
+// MultiError is a slice of errors implementing the error interface. It is used
+// by a Gatherer to report multiple errors during MetricFamily gathering.
+type MultiError []error
+
+func (errs MultiError) Error() string {
+ if len(errs) == 0 {
+ return ""
+ }
+ buf := &bytes.Buffer{}
+ fmt.Fprintf(buf, "%d error(s) occurred:", len(errs))
+ for _, err := range errs {
+ fmt.Fprintf(buf, "\n* %s", err)
+ }
+ return buf.String()
+}
+
+// Append appends the provided error if it is not nil.
+func (errs *MultiError) Append(err error) {
+ if err != nil {
+ *errs = append(*errs, err)
+ }
+}
+
+// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
+// contained error as error if len(errs is 1). In all other cases, it returns
+// the MultiError directly. This is helpful for returning a MultiError in a way
+// that only uses the MultiError if needed.
+func (errs MultiError) MaybeUnwrap() error {
+ switch len(errs) {
+ case 0:
+ return nil
+ case 1:
+ return errs[0]
+ default:
+ return errs
+ }
+}
+
+// Registry registers Prometheus collectors, collects their metrics, and gathers
+// them into MetricFamilies for exposition. It implements both Registerer and
+// Gatherer. The zero value is not usable. Create instances with NewRegistry or
+// NewPedanticRegistry.
+type Registry struct {
+ mtx sync.RWMutex
+ collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
+ descIDs map[uint64]struct{}
+ dimHashesByName map[string]uint64
+ uncheckedCollectors []Collector
+ pedanticChecksEnabled bool
+}
+
+// Register implements Registerer.
+func (r *Registry) Register(c Collector) error {
+ var (
+ descChan = make(chan *Desc, capDescChan)
+ newDescIDs = map[uint64]struct{}{}
+ newDimHashesByName = map[string]uint64{}
+ collectorID uint64 // Just a sum of all desc IDs.
+ duplicateDescErr error
+ )
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+ r.mtx.Lock()
+ defer func() {
+ // Drain channel in case of premature return to not leak a goroutine.
+ for range descChan {
+ }
+ r.mtx.Unlock()
+ }()
+ // Conduct various tests...
+ for desc := range descChan {
+
+ // Is the descriptor valid at all?
+ if desc.err != nil {
+ return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
+ }
+
+ // Is the descID unique?
+ // (In other words: Is the fqName + constLabel combination unique?)
+ if _, exists := r.descIDs[desc.id]; exists {
+ duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
+ }
+ // If it is not a duplicate desc in this collector, add it to
+ // the collectorID. (We allow duplicate descs within the same
+ // collector, but their existence must be a no-op.)
+ if _, exists := newDescIDs[desc.id]; !exists {
+ newDescIDs[desc.id] = struct{}{}
+ collectorID += desc.id
+ }
+
+ // Are all the label names and the help string consistent with
+ // previous descriptors of the same name?
+ // First check existing descriptors...
+ if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
+ }
+ } else {
+ // ...then check the new descriptors already seen.
+ if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
+ }
+ } else {
+ newDimHashesByName[desc.fqName] = desc.dimHash
+ }
+ }
+ }
+ // A Collector yielding no Desc at all is considered unchecked.
+ if len(newDescIDs) == 0 {
+ r.uncheckedCollectors = append(r.uncheckedCollectors, c)
+ return nil
+ }
+ if existing, exists := r.collectorsByID[collectorID]; exists {
+ switch e := existing.(type) {
+ case *wrappingCollector:
+ return AlreadyRegisteredError{
+ ExistingCollector: e.unwrapRecursively(),
+ NewCollector: c,
+ }
+ default:
+ return AlreadyRegisteredError{
+ ExistingCollector: e,
+ NewCollector: c,
+ }
+ }
+ }
+ // If the collectorID is new, but at least one of the descs existed
+ // before, we are in trouble.
+ if duplicateDescErr != nil {
+ return duplicateDescErr
+ }
+
+ // Only after all tests have passed, actually register.
+ r.collectorsByID[collectorID] = c
+ for hash := range newDescIDs {
+ r.descIDs[hash] = struct{}{}
+ }
+ for name, dimHash := range newDimHashesByName {
+ r.dimHashesByName[name] = dimHash
+ }
+ return nil
+}
+
+// Unregister implements Registerer.
+func (r *Registry) Unregister(c Collector) bool {
+ var (
+ descChan = make(chan *Desc, capDescChan)
+ descIDs = map[uint64]struct{}{}
+ collectorID uint64 // Just a sum of the desc IDs.
+ )
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+ for desc := range descChan {
+ if _, exists := descIDs[desc.id]; !exists {
+ collectorID += desc.id
+ descIDs[desc.id] = struct{}{}
+ }
+ }
+
+ r.mtx.RLock()
+ if _, exists := r.collectorsByID[collectorID]; !exists {
+ r.mtx.RUnlock()
+ return false
+ }
+ r.mtx.RUnlock()
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ delete(r.collectorsByID, collectorID)
+ for id := range descIDs {
+ delete(r.descIDs, id)
+ }
+ // dimHashesByName is left untouched as those must be consistent
+ // throughout the lifetime of a program.
+ return true
+}
+
+// MustRegister implements Registerer.
+func (r *Registry) MustRegister(cs ...Collector) {
+ for _, c := range cs {
+ if err := r.Register(c); err != nil {
+ panic(err)
+ }
+ }
+}
+
+// Gather implements Gatherer.
+func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
+ var (
+ checkedMetricChan = make(chan Metric, capMetricChan)
+ uncheckedMetricChan = make(chan Metric, capMetricChan)
+ metricHashes = map[uint64]struct{}{}
+ wg sync.WaitGroup
+ errs MultiError // The collected errors to return in the end.
+ registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
+ )
+
+ r.mtx.RLock()
+ goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
+ metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
+ checkedCollectors := make(chan Collector, len(r.collectorsByID))
+ uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
+ for _, collector := range r.collectorsByID {
+ checkedCollectors <- collector
+ }
+ for _, collector := range r.uncheckedCollectors {
+ uncheckedCollectors <- collector
+ }
+ // In case pedantic checks are enabled, we have to copy the map before
+ // giving up the RLock.
+ if r.pedanticChecksEnabled {
+ registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs))
+ for id := range r.descIDs {
+ registeredDescIDs[id] = struct{}{}
+ }
+ }
+ r.mtx.RUnlock()
+
+ wg.Add(goroutineBudget)
+
+ collectWorker := func() {
+ for {
+ select {
+ case collector := <-checkedCollectors:
+ collector.Collect(checkedMetricChan)
+ case collector := <-uncheckedCollectors:
+ collector.Collect(uncheckedMetricChan)
+ default:
+ return
+ }
+ wg.Done()
+ }
+ }
+
+ // Start the first worker now to make sure at least one is running.
+ go collectWorker()
+ goroutineBudget--
+
+ // Close checkedMetricChan and uncheckedMetricChan once all collectors
+ // are collected.
+ go func() {
+ wg.Wait()
+ close(checkedMetricChan)
+ close(uncheckedMetricChan)
+ }()
+
+ // Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
+ defer func() {
+ if checkedMetricChan != nil {
+ for range checkedMetricChan {
+ }
+ }
+ if uncheckedMetricChan != nil {
+ for range uncheckedMetricChan {
+ }
+ }
+ }()
+
+ // Copy the channel references so we can nil them out later to remove
+ // them from the select statements below.
+ cmc := checkedMetricChan
+ umc := uncheckedMetricChan
+
+ for {
+ select {
+ case metric, ok := <-cmc:
+ if !ok {
+ cmc = nil
+ break
+ }
+ errs.Append(processMetric(
+ metric, metricFamiliesByName,
+ metricHashes,
+ registeredDescIDs,
+ ))
+ case metric, ok := <-umc:
+ if !ok {
+ umc = nil
+ break
+ }
+ errs.Append(processMetric(
+ metric, metricFamiliesByName,
+ metricHashes,
+ nil,
+ ))
+ default:
+ if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
+ // All collectors are already being worked on or
+ // we have already as many goroutines started as
+ // there are collectors. Do the same as above,
+ // just without the default.
+ select {
+ case metric, ok := <-cmc:
+ if !ok {
+ cmc = nil
+ break
+ }
+ errs.Append(processMetric(
+ metric, metricFamiliesByName,
+ metricHashes,
+ registeredDescIDs,
+ ))
+ case metric, ok := <-umc:
+ if !ok {
+ umc = nil
+ break
+ }
+ errs.Append(processMetric(
+ metric, metricFamiliesByName,
+ metricHashes,
+ nil,
+ ))
+ }
+ break
+ }
+ // Start more workers.
+ go collectWorker()
+ goroutineBudget--
+ runtime.Gosched()
+ }
+ // Once both checkedMetricChan and uncheckdMetricChan are closed
+ // and drained, the contraption above will nil out cmc and umc,
+ // and then we can leave the collect loop here.
+ if cmc == nil && umc == nil {
+ break
+ }
+ }
+ return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
+// Prometheus text format, and writes it to a temporary file. Upon success, the
+// temporary file is renamed to the provided filename.
+//
+// This is intended for use with the textfile collector of the node exporter.
+// Note that the node exporter expects the filename to be suffixed with ".prom".
+func WriteToTextfile(filename string, g Gatherer) error {
+ tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename))
+ if err != nil {
+ return err
+ }
+ defer os.Remove(tmp.Name())
+
+ mfs, err := g.Gather()
+ if err != nil {
+ return err
+ }
+ for _, mf := range mfs {
+ if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil {
+ return err
+ }
+ }
+ if err := tmp.Close(); err != nil {
+ return err
+ }
+
+ if err := os.Chmod(tmp.Name(), 0644); err != nil {
+ return err
+ }
+ return os.Rename(tmp.Name(), filename)
+}
+
+// processMetric is an internal helper method only used by the Gather method.
+func processMetric(
+ metric Metric,
+ metricFamiliesByName map[string]*dto.MetricFamily,
+ metricHashes map[uint64]struct{},
+ registeredDescIDs map[uint64]struct{},
+) error {
+ desc := metric.Desc()
+ // Wrapped metrics collected by an unchecked Collector can have an
+ // invalid Desc.
+ if desc.err != nil {
+ return desc.err
+ }
+ dtoMetric := &dto.Metric{}
+ if err := metric.Write(dtoMetric); err != nil {
+ return fmt.Errorf("error collecting metric %v: %s", desc, err)
+ }
+ metricFamily, ok := metricFamiliesByName[desc.fqName]
+ if ok { // Existing name.
+ if metricFamily.GetHelp() != desc.help {
+ return fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
+ )
+ }
+ // TODO(beorn7): Simplify switch once Desc has type.
+ switch metricFamily.GetType() {
+ case dto.MetricType_COUNTER:
+ if dtoMetric.Counter == nil {
+ return fmt.Errorf(
+ "collected metric %s %s should be a Counter",
+ desc.fqName, dtoMetric,
+ )
+ }
+ case dto.MetricType_GAUGE:
+ if dtoMetric.Gauge == nil {
+ return fmt.Errorf(
+ "collected metric %s %s should be a Gauge",
+ desc.fqName, dtoMetric,
+ )
+ }
+ case dto.MetricType_SUMMARY:
+ if dtoMetric.Summary == nil {
+ return fmt.Errorf(
+ "collected metric %s %s should be a Summary",
+ desc.fqName, dtoMetric,
+ )
+ }
+ case dto.MetricType_UNTYPED:
+ if dtoMetric.Untyped == nil {
+ return fmt.Errorf(
+ "collected metric %s %s should be Untyped",
+ desc.fqName, dtoMetric,
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ if dtoMetric.Histogram == nil {
+ return fmt.Errorf(
+ "collected metric %s %s should be a Histogram",
+ desc.fqName, dtoMetric,
+ )
+ }
+ default:
+ panic("encountered MetricFamily with invalid type")
+ }
+ } else { // New name.
+ metricFamily = &dto.MetricFamily{}
+ metricFamily.Name = proto.String(desc.fqName)
+ metricFamily.Help = proto.String(desc.help)
+ // TODO(beorn7): Simplify switch once Desc has type.
+ switch {
+ case dtoMetric.Gauge != nil:
+ metricFamily.Type = dto.MetricType_GAUGE.Enum()
+ case dtoMetric.Counter != nil:
+ metricFamily.Type = dto.MetricType_COUNTER.Enum()
+ case dtoMetric.Summary != nil:
+ metricFamily.Type = dto.MetricType_SUMMARY.Enum()
+ case dtoMetric.Untyped != nil:
+ metricFamily.Type = dto.MetricType_UNTYPED.Enum()
+ case dtoMetric.Histogram != nil:
+ metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
+ default:
+ return fmt.Errorf("empty metric collected: %s", dtoMetric)
+ }
+ if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil {
+ return err
+ }
+ metricFamiliesByName[desc.fqName] = metricFamily
+ }
+ if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil {
+ return err
+ }
+ if registeredDescIDs != nil {
+ // Is the desc registered at all?
+ if _, exist := registeredDescIDs[desc.id]; !exist {
+ return fmt.Errorf(
+ "collected metric %s %s with unregistered descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
+ return err
+ }
+ }
+ metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+ return nil
+}
+
+// Gatherers is a slice of Gatherer instances that implements the Gatherer
+// interface itself. Its Gather method calls Gather on all Gatherers in the
+// slice in order and returns the merged results. Errors returned from the
+// Gather calls are all returned in a flattened MultiError. Duplicate and
+// inconsistent Metrics are skipped (first occurrence in slice order wins) and
+// reported in the returned error.
+//
+// Gatherers can be used to merge the Gather results from multiple
+// Registries. It also provides a way to directly inject existing MetricFamily
+// protobufs into the gathering by creating a custom Gatherer with a Gather
+// method that simply returns the existing MetricFamily protobufs. Note that no
+// registration is involved (in contrast to Collector registration), so
+// obviously registration-time checks cannot happen. Any inconsistencies between
+// the gathered MetricFamilies are reported as errors by the Gather method, and
+// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies
+// (e.g. syntactically invalid metric or label names) will go undetected.
+type Gatherers []Gatherer
+
+// Gather implements Gatherer.
+func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
+ var (
+ metricFamiliesByName = map[string]*dto.MetricFamily{}
+ metricHashes = map[uint64]struct{}{}
+ errs MultiError // The collected errors to return in the end.
+ )
+
+ for i, g := range gs {
+ mfs, err := g.Gather()
+ if err != nil {
+ if multiErr, ok := err.(MultiError); ok {
+ for _, err := range multiErr {
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ }
+ } else {
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ }
+ }
+ for _, mf := range mfs {
+ existingMF, exists := metricFamiliesByName[mf.GetName()]
+ if exists {
+ if existingMF.GetHelp() != mf.GetHelp() {
+ errs = append(errs, fmt.Errorf(
+ "gathered metric family %s has help %q but should have %q",
+ mf.GetName(), mf.GetHelp(), existingMF.GetHelp(),
+ ))
+ continue
+ }
+ if existingMF.GetType() != mf.GetType() {
+ errs = append(errs, fmt.Errorf(
+ "gathered metric family %s has type %s but should have %s",
+ mf.GetName(), mf.GetType(), existingMF.GetType(),
+ ))
+ continue
+ }
+ } else {
+ existingMF = &dto.MetricFamily{}
+ existingMF.Name = mf.Name
+ existingMF.Help = mf.Help
+ existingMF.Type = mf.Type
+ if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ metricFamiliesByName[mf.GetName()] = existingMF
+ }
+ for _, m := range mf.Metric {
+ if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ existingMF.Metric = append(existingMF.Metric, m)
+ }
+ }
+ }
+ return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// checkSuffixCollisions checks for collisions with the “magic” suffixes the
+// Prometheus text format and the internal metric representation of the
+// Prometheus server add while flattening Summaries and Histograms.
+func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error {
+ var (
+ newName = mf.GetName()
+ newType = mf.GetType()
+ newNameWithoutSuffix = ""
+ )
+ switch {
+ case strings.HasSuffix(newName, "_count"):
+ newNameWithoutSuffix = newName[:len(newName)-6]
+ case strings.HasSuffix(newName, "_sum"):
+ newNameWithoutSuffix = newName[:len(newName)-4]
+ case strings.HasSuffix(newName, "_bucket"):
+ newNameWithoutSuffix = newName[:len(newName)-7]
+ }
+ if newNameWithoutSuffix != "" {
+ if existingMF, ok := mfs[newNameWithoutSuffix]; ok {
+ switch existingMF.GetType() {
+ case dto.MetricType_SUMMARY:
+ if !strings.HasSuffix(newName, "_bucket") {
+ return fmt.Errorf(
+ "collected metric named %q collides with previously collected summary named %q",
+ newName, newNameWithoutSuffix,
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ return fmt.Errorf(
+ "collected metric named %q collides with previously collected histogram named %q",
+ newName, newNameWithoutSuffix,
+ )
+ }
+ }
+ }
+ if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM {
+ if _, ok := mfs[newName+"_count"]; ok {
+ return fmt.Errorf(
+ "collected histogram or summary named %q collides with previously collected metric named %q",
+ newName, newName+"_count",
+ )
+ }
+ if _, ok := mfs[newName+"_sum"]; ok {
+ return fmt.Errorf(
+ "collected histogram or summary named %q collides with previously collected metric named %q",
+ newName, newName+"_sum",
+ )
+ }
+ }
+ if newType == dto.MetricType_HISTOGRAM {
+ if _, ok := mfs[newName+"_bucket"]; ok {
+ return fmt.Errorf(
+ "collected histogram named %q collides with previously collected metric named %q",
+ newName, newName+"_bucket",
+ )
+ }
+ }
+ return nil
+}
+
+// checkMetricConsistency checks if the provided Metric is consistent with the
+// provided MetricFamily. It also hashes the Metric labels and the MetricFamily
+// name. If the resulting hash is already in the provided metricHashes, an error
+// is returned. If not, it is added to metricHashes.
+func checkMetricConsistency(
+ metricFamily *dto.MetricFamily,
+ dtoMetric *dto.Metric,
+ metricHashes map[uint64]struct{},
+) error {
+ name := metricFamily.GetName()
+
+ // Type consistency with metric family.
+ if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
+ metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
+ metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
+ metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
+ metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
+ return fmt.Errorf(
+ "collected metric %q { %s} is not a %s",
+ name, dtoMetric, metricFamily.GetType(),
+ )
+ }
+
+ previousLabelName := ""
+ for _, labelPair := range dtoMetric.GetLabel() {
+ labelName := labelPair.GetName()
+ if labelName == previousLabelName {
+ return fmt.Errorf(
+ "collected metric %q { %s} has two or more labels with the same name: %s",
+ name, dtoMetric, labelName,
+ )
+ }
+ if !checkLabelName(labelName) {
+ return fmt.Errorf(
+ "collected metric %q { %s} has a label with an invalid name: %s",
+ name, dtoMetric, labelName,
+ )
+ }
+ if dtoMetric.Summary != nil && labelName == quantileLabel {
+ return fmt.Errorf(
+ "collected metric %q { %s} must not have an explicit %q label",
+ name, dtoMetric, quantileLabel,
+ )
+ }
+ if !utf8.ValidString(labelPair.GetValue()) {
+ return fmt.Errorf(
+ "collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
+ name, dtoMetric, labelName, labelPair.GetValue())
+ }
+ previousLabelName = labelName
+ }
+
+ // Is the metric unique (i.e. no other metric with the same name and the same labels)?
+ h := hashNew()
+ h = hashAdd(h, name)
+ h = hashAddByte(h, separatorByte)
+ // Make sure label pairs are sorted. We depend on it for the consistency
+ // check.
+ if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) {
+ // We cannot sort dtoMetric.Label in place as it is immutable by contract.
+ copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label))
+ copy(copiedLabels, dtoMetric.Label)
+ sort.Sort(labelPairSorter(copiedLabels))
+ dtoMetric.Label = copiedLabels
+ }
+ for _, lp := range dtoMetric.Label {
+ h = hashAdd(h, lp.GetName())
+ h = hashAddByte(h, separatorByte)
+ h = hashAdd(h, lp.GetValue())
+ h = hashAddByte(h, separatorByte)
+ }
+ if _, exists := metricHashes[h]; exists {
+ return fmt.Errorf(
+ "collected metric %q { %s} was collected before with the same name and label values",
+ name, dtoMetric,
+ )
+ }
+ metricHashes[h] = struct{}{}
+ return nil
+}
+
+func checkDescConsistency(
+ metricFamily *dto.MetricFamily,
+ dtoMetric *dto.Metric,
+ desc *Desc,
+) error {
+ // Desc help consistency with metric family help.
+ if metricFamily.GetHelp() != desc.help {
+ return fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
+ )
+ }
+
+ // Is the desc consistent with the content of the metric?
+ lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
+ copy(lpsFromDesc, desc.constLabelPairs)
+ for _, l := range desc.variableLabels {
+ lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
+ Name: proto.String(l),
+ })
+ }
+ if len(lpsFromDesc) != len(dtoMetric.Label) {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ sort.Sort(labelPairSorter(lpsFromDesc))
+ for i, lpFromDesc := range lpsFromDesc {
+ lpFromMetric := dtoMetric.Label[i]
+ if lpFromDesc.GetName() != lpFromMetric.GetName() ||
+ lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
new file mode 100644
index 000000000..c970fdee0
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -0,0 +1,736 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "runtime"
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/beorn7/perks/quantile"
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// quantileLabel is used for the label that defines the quantile in a
+// summary.
+const quantileLabel = "quantile"
+
+// A Summary captures individual observations from an event or sample stream and
+// summarizes them in a manner similar to traditional summary statistics: 1. sum
+// of observations, 2. observation count, 3. rank estimations.
+//
+// A typical use-case is the observation of request latencies. By default, a
+// Summary provides the median, the 90th and the 99th percentile of the latency
+// as rank estimations. However, the default behavior will change in the
+// upcoming v1.0.0 of the library. There will be no rank estimations at all by
+// default. For a sane transition, it is recommended to set the desired rank
+// estimations explicitly.
+//
+// Note that the rank estimations cannot be aggregated in a meaningful way with
+// the Prometheus query language (i.e. you cannot average or add them). If you
+// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
+// queries served across all instances of a service), consider the Histogram
+// metric type. See the Prometheus documentation for more details.
+//
+// To create Summary instances, use NewSummary.
+type Summary interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the summary.
+ Observe(float64)
+}
+
+var errQuantileLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in summaries", quantileLabel,
+)
+
+// Default values for SummaryOpts.
+const (
+ // DefMaxAge is the default duration for which observations stay
+ // relevant.
+ DefMaxAge time.Duration = 10 * time.Minute
+ // DefAgeBuckets is the default number of buckets used to calculate the
+ // age of observations.
+ DefAgeBuckets = 5
+ // DefBufCap is the standard buffer size for collecting Summary observations.
+ DefBufCap = 500
+)
+
+// SummaryOpts bundles the options for creating a Summary metric. It is
+// mandatory to set Name to a non-empty string. While all other fields are
+// optional and can safely be left at their zero value, it is recommended to set
+// a help string and to explicitly set the Objectives field to the desired value
+// as the default value will change in the upcoming v1.0.0 of the library.
+type SummaryOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Summary (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Summary must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Summary.
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // Due to the way a Summary is represented in the Prometheus text format
+ // and how it is handled by the Prometheus server internally, “quantile”
+ // is an illegal label name. Construction of a Summary or SummaryVec
+ // will panic if this label name is used in ConstLabels.
+ //
+ // ConstLabels are only used rarely. In particular, do not use them to
+ // attach the same labels to all your metrics. Those use cases are
+ // better covered by target labels set by the scraping Prometheus
+ // server, or by one specific metric (e.g. a build_info or a
+ // machine_role metric). See also
+ // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
+ ConstLabels Labels
+
+ // Objectives defines the quantile rank estimates with their respective
+ // absolute error. If Objectives[q] = e, then the value reported for q
+ // will be the φ-quantile value for some φ between q-e and q+e. The
+ // default value is an empty map, resulting in a summary without
+ // quantiles.
+ Objectives map[float64]float64
+
+ // MaxAge defines the duration for which an observation stays relevant
+ // for the summary. Must be positive. The default value is DefMaxAge.
+ MaxAge time.Duration
+
+ // AgeBuckets is the number of buckets used to exclude observations that
+ // are older than MaxAge from the summary. A higher number has a
+ // resource penalty, so only increase it if the higher resolution is
+ // really required. For very high observation rates, you might want to
+ // reduce the number of age buckets. With only one age bucket, you will
+ // effectively see a complete reset of the summary each time MaxAge has
+ // passed. The default value is DefAgeBuckets.
+ AgeBuckets uint32
+
+ // BufCap defines the default sample stream buffer size. The default
+ // value of DefBufCap should suffice for most uses. If there is a need
+ // to increase the value, a multiple of 500 is recommended (because that
+ // is the internal buffer size of the underlying package
+ // "github.com/bmizerany/perks/quantile").
+ BufCap uint32
+}
+
+// Problem with the sliding-window decay algorithm... The Merge method of
+// perk/quantile is actually not working as advertised - and it might be
+// unfixable, as the underlying algorithm is apparently not capable of merging
+// summaries in the first place. To avoid using Merge, we are currently adding
+// observations to _each_ age bucket, i.e. the effort to add a sample is
+// essentially multiplied by the number of age buckets. When rotating age
+// buckets, we empty the previous head stream. On scrape time, we simply take
+// the quantiles from the head stream (no merging required). Result: More effort
+// on observation time, less effort on scrape time, which is exactly the
+// opposite of what we try to accomplish, but at least the results are correct.
+//
+// The quite elegant previous contraption to merge the age buckets efficiently
+// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
+// can't be used anymore.
+
+// NewSummary creates a new Summary based on the provided SummaryOpts.
+func NewSummary(opts SummaryOpts) Summary {
+ return newSummary(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+
+ if opts.Objectives == nil {
+ opts.Objectives = map[float64]float64{}
+ }
+
+ if opts.MaxAge < 0 {
+ panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
+ }
+ if opts.MaxAge == 0 {
+ opts.MaxAge = DefMaxAge
+ }
+
+ if opts.AgeBuckets == 0 {
+ opts.AgeBuckets = DefAgeBuckets
+ }
+
+ if opts.BufCap == 0 {
+ opts.BufCap = DefBufCap
+ }
+
+ if len(opts.Objectives) == 0 {
+ // Use the lock-free implementation of a Summary without objectives.
+ s := &noObjectivesSummary{
+ desc: desc,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ counts: [2]*summaryCounts{&summaryCounts{}, &summaryCounts{}},
+ }
+ s.init(s) // Init self-collection.
+ return s
+ }
+
+ s := &summary{
+ desc: desc,
+
+ objectives: opts.Objectives,
+ sortedObjectives: make([]float64, 0, len(opts.Objectives)),
+
+ labelPairs: makeLabelPairs(desc, labelValues),
+
+ hotBuf: make([]float64, 0, opts.BufCap),
+ coldBuf: make([]float64, 0, opts.BufCap),
+ streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
+ }
+ s.headStreamExpTime = time.Now().Add(s.streamDuration)
+ s.hotBufExpTime = s.headStreamExpTime
+
+ for i := uint32(0); i < opts.AgeBuckets; i++ {
+ s.streams = append(s.streams, s.newStream())
+ }
+ s.headStream = s.streams[0]
+
+ for qu := range s.objectives {
+ s.sortedObjectives = append(s.sortedObjectives, qu)
+ }
+ sort.Float64s(s.sortedObjectives)
+
+ s.init(s) // Init self-collection.
+ return s
+}
+
+type summary struct {
+ selfCollector
+
+ bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
+ mtx sync.Mutex // Protects every other moving part.
+ // Lock bufMtx before mtx if both are needed.
+
+ desc *Desc
+
+ objectives map[float64]float64
+ sortedObjectives []float64
+
+ labelPairs []*dto.LabelPair
+
+ sum float64
+ cnt uint64
+
+ hotBuf, coldBuf []float64
+
+ streams []*quantile.Stream
+ streamDuration time.Duration
+ headStream *quantile.Stream
+ headStreamIdx int
+ headStreamExpTime, hotBufExpTime time.Time
+}
+
+func (s *summary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *summary) Observe(v float64) {
+ s.bufMtx.Lock()
+ defer s.bufMtx.Unlock()
+
+ now := time.Now()
+ if now.After(s.hotBufExpTime) {
+ s.asyncFlush(now)
+ }
+ s.hotBuf = append(s.hotBuf, v)
+ if len(s.hotBuf) == cap(s.hotBuf) {
+ s.asyncFlush(now)
+ }
+}
+
+func (s *summary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.objectives))
+
+ s.bufMtx.Lock()
+ s.mtx.Lock()
+ // Swap bufs even if hotBuf is empty to set new hotBufExpTime.
+ s.swapBufs(time.Now())
+ s.bufMtx.Unlock()
+
+ s.flushColdBuf()
+ sum.SampleCount = proto.Uint64(s.cnt)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for _, rank := range s.sortedObjectives {
+ var q float64
+ if s.headStream.Count() == 0 {
+ q = math.NaN()
+ } else {
+ q = s.headStream.Query(rank)
+ }
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ s.mtx.Unlock()
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+ return nil
+}
+
+func (s *summary) newStream() *quantile.Stream {
+ return quantile.NewTargeted(s.objectives)
+}
+
+// asyncFlush needs bufMtx locked.
+func (s *summary) asyncFlush(now time.Time) {
+ s.mtx.Lock()
+ s.swapBufs(now)
+
+ // Unblock the original goroutine that was responsible for the mutation
+ // that triggered the compaction. But hold onto the global non-buffer
+ // state mutex until the operation finishes.
+ go func() {
+ s.flushColdBuf()
+ s.mtx.Unlock()
+ }()
+}
+
+// rotateStreams needs mtx AND bufMtx locked.
+func (s *summary) maybeRotateStreams() {
+ for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
+ s.headStream.Reset()
+ s.headStreamIdx++
+ if s.headStreamIdx >= len(s.streams) {
+ s.headStreamIdx = 0
+ }
+ s.headStream = s.streams[s.headStreamIdx]
+ s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
+ }
+}
+
+// flushColdBuf needs mtx locked.
+func (s *summary) flushColdBuf() {
+ for _, v := range s.coldBuf {
+ for _, stream := range s.streams {
+ stream.Insert(v)
+ }
+ s.cnt++
+ s.sum += v
+ }
+ s.coldBuf = s.coldBuf[0:0]
+ s.maybeRotateStreams()
+}
+
+// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
+func (s *summary) swapBufs(now time.Time) {
+ if len(s.coldBuf) != 0 {
+ panic("coldBuf is not empty")
+ }
+ s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
+ // hotBuf is now empty and gets new expiration set.
+ for now.After(s.hotBufExpTime) {
+ s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
+ }
+}
+
+type summaryCounts struct {
+ // sumBits contains the bits of the float64 representing the sum of all
+ // observations. sumBits and count have to go first in the struct to
+ // guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ sumBits uint64
+ count uint64
+}
+
+type noObjectivesSummary struct {
+ // countAndHotIdx enables lock-free writes with use of atomic updates.
+ // The most significant bit is the hot index [0 or 1] of the count field
+ // below. Observe calls update the hot one. All remaining bits count the
+ // number of Observe calls. Observe starts by incrementing this counter,
+ // and finish by incrementing the count field in the respective
+ // summaryCounts, as a marker for completion.
+ //
+ // Calls of the Write method (which are non-mutating reads from the
+ // perspective of the summary) swap the hot–cold under the writeMtx
+ // lock. A cooldown is awaited (while locked) by comparing the number of
+ // observations with the initiation count. Once they match, then the
+ // last observation on the now cool one has completed. All cool fields must
+ // be merged into the new hot before releasing writeMtx.
+
+ // Fields with atomic access first! See alignment constraint:
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ countAndHotIdx uint64
+
+ selfCollector
+ desc *Desc
+ writeMtx sync.Mutex // Only used in the Write method.
+
+ // Two counts, one is "hot" for lock-free observations, the other is
+ // "cold" for writing out a dto.Metric. It has to be an array of
+ // pointers to guarantee 64bit alignment of the histogramCounts, see
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
+ counts [2]*summaryCounts
+
+ labelPairs []*dto.LabelPair
+}
+
+func (s *noObjectivesSummary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *noObjectivesSummary) Observe(v float64) {
+ // We increment h.countAndHotIdx so that the counter in the lower
+ // 63 bits gets incremented. At the same time, we get the new value
+ // back, which we can use to find the currently-hot counts.
+ n := atomic.AddUint64(&s.countAndHotIdx, 1)
+ hotCounts := s.counts[n>>63]
+
+ for {
+ oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+ break
+ }
+ }
+ // Increment count last as we take it as a signal that the observation
+ // is complete.
+ atomic.AddUint64(&hotCounts.count, 1)
+}
+
+func (s *noObjectivesSummary) Write(out *dto.Metric) error {
+ // For simplicity, we protect this whole method by a mutex. It is not in
+ // the hot path, i.e. Observe is called much more often than Write. The
+ // complication of making Write lock-free isn't worth it, if possible at
+ // all.
+ s.writeMtx.Lock()
+ defer s.writeMtx.Unlock()
+
+ // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
+ // without touching the count bits. See the struct comments for a full
+ // description of the algorithm.
+ n := atomic.AddUint64(&s.countAndHotIdx, 1<<63)
+ // count is contained unchanged in the lower 63 bits.
+ count := n & ((1 << 63) - 1)
+ // The most significant bit tells us which counts is hot. The complement
+ // is thus the cold one.
+ hotCounts := s.counts[n>>63]
+ coldCounts := s.counts[(^n)>>63]
+
+ // Await cooldown.
+ for count != atomic.LoadUint64(&coldCounts.count) {
+ runtime.Gosched() // Let observations get work done.
+ }
+
+ sum := &dto.Summary{
+ SampleCount: proto.Uint64(count),
+ SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+ }
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+
+ // Finally add all the cold counts to the new hot counts and reset the cold counts.
+ atomic.AddUint64(&hotCounts.count, count)
+ atomic.StoreUint64(&coldCounts.count, 0)
+ for {
+ oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
+ if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+ atomic.StoreUint64(&coldCounts.sumBits, 0)
+ break
+ }
+ }
+ return nil
+}
+
+type quantSort []*dto.Quantile
+
+func (s quantSort) Len() int {
+ return len(s)
+}
+
+func (s quantSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s quantSort) Less(i, j int) bool {
+ return s[i].GetQuantile() < s[j].GetQuantile()
+}
+
+// SummaryVec is a Collector that bundles a set of Summaries that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewSummaryVec.
+type SummaryVec struct {
+ *metricVec
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
+// partitioned by the given label names.
+//
+// Due to the way a Summary is represented in the Prometheus text format and how
+// it is handled by the Prometheus server internally, “quantile” is an illegal
+// label name. NewSummaryVec will panic if this label name is used.
+func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
+ for _, ln := range labelNames {
+ if ln == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &SummaryVec{
+ metricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newSummary(desc, opts, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues returns the Summary for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Summary is created.
+//
+// It is possible to call this method without using the returned Summary to only
+// create the new Summary but leave it at its starting value, a Summary without
+// any observations.
+//
+// Keeping the Summary for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Summary from the SummaryVec. In that case,
+// the Summary will still exist, but it will not be exported anymore, even if a
+// Summary with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+ metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Observer), err
+ }
+ return nil, err
+}
+
+// GetMetricWith returns the Summary for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Summary is created. Implications of
+// creating a Summary without using it and keeping the Summary for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
+ metric, err := v.metricVec.getMetricWith(labels)
+ if metric != nil {
+ return metric.(Observer), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
+ s, err := v.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return s
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (v *SummaryVec) With(labels Labels) Observer {
+ s, err := v.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return s
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the SummaryVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
+ vec, err := v.curryWith(labels)
+ if vec != nil {
+ return &SummaryVec{vec}, err
+ }
+ return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec {
+ vec, err := v.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
+}
+
+type constSummary struct {
+ desc *Desc
+ count uint64
+ sum float64
+ quantiles map[float64]float64
+ labelPairs []*dto.LabelPair
+}
+
+func (s *constSummary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *constSummary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.quantiles))
+
+ sum.SampleCount = proto.Uint64(s.count)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for rank, q := range s.quantiles {
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+
+ return nil
+}
+
+// NewConstSummary returns a metric representing a Prometheus summary with fixed
+// values for the count, sum, and quantiles. As those parameters cannot be
+// changed, the returned value does not implement the Summary interface (but
+// only the Metric interface). Users of this package will not have much use for
+// it in regular operations. However, when implementing custom Collectors, it is
+// useful as a throw-away metric that is generated on the fly to send it to
+// Prometheus in the Collect method.
+//
+// quantiles maps ranks to quantile values. For example, a median latency of
+// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
+// map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+// NewConstSummary returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc or if Desc is invalid.
+func NewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ return nil, err
+ }
+ return &constSummary{
+ desc: desc,
+ count: count,
+ sum: sum,
+ quantiles: quantiles,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstSummary is a version of NewConstSummary that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
new file mode 100644
index 000000000..8d5f10523
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
@@ -0,0 +1,54 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "time"
+
+// Timer is a helper type to time functions. Use NewTimer to create new
+// instances.
+type Timer struct {
+ begin time.Time
+ observer Observer
+}
+
+// NewTimer creates a new Timer. The provided Observer is used to observe a
+// duration in seconds. Timer is usually used to time a function call in the
+// following way:
+// func TimeMe() {
+// timer := NewTimer(myHistogram)
+// defer timer.ObserveDuration()
+// // Do actual work.
+// }
+func NewTimer(o Observer) *Timer {
+ return &Timer{
+ begin: time.Now(),
+ observer: o,
+ }
+}
+
+// ObserveDuration records the duration passed since the Timer was created with
+// NewTimer. It calls the Observe method of the Observer provided during
+// construction with the duration in seconds as an argument. The observed
+// duration is also returned. ObserveDuration is usually called with a defer
+// statement.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func (t *Timer) ObserveDuration() time.Duration {
+ d := time.Since(t.begin)
+ if t.observer != nil {
+ t.observer.Observe(d.Seconds())
+ }
+ return d
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
new file mode 100644
index 000000000..0f9ce63f4
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
@@ -0,0 +1,42 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// UntypedOpts is an alias for Opts. See there for doc comments.
+type UntypedOpts Opts
+
+// UntypedFunc works like GaugeFunc but the collected metric is of type
+// "Untyped". UntypedFunc is useful to mirror an external metric of unknown
+// type.
+//
+// To create UntypedFunc instances, use NewUntypedFunc.
+type UntypedFunc interface {
+ Metric
+ Collector
+}
+
+// NewUntypedFunc creates a new UntypedFunc based on the provided
+// UntypedOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where an UntypedFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
new file mode 100644
index 000000000..eb248f108
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -0,0 +1,162 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// ValueType is an enumeration of metric types that represent a simple value.
+type ValueType int
+
+// Possible values for the ValueType enum.
+const (
+ _ ValueType = iota
+ CounterValue
+ GaugeValue
+ UntypedValue
+)
+
+// valueFunc is a generic metric for simple values retrieved on collect time
+// from a function. It implements Metric and Collector. Its effective type is
+// determined by ValueType. This is a low-level building block used by the
+// library to back the implementations of CounterFunc, GaugeFunc, and
+// UntypedFunc.
+type valueFunc struct {
+ selfCollector
+
+ desc *Desc
+ valType ValueType
+ function func() float64
+ labelPairs []*dto.LabelPair
+}
+
+// newValueFunc returns a newly allocated valueFunc with the given Desc and
+// ValueType. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a valueFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
+ result := &valueFunc{
+ desc: desc,
+ valType: valueType,
+ function: function,
+ labelPairs: makeLabelPairs(desc, nil),
+ }
+ result.init(result)
+ return result
+}
+
+func (v *valueFunc) Desc() *Desc {
+ return v.desc
+}
+
+func (v *valueFunc) Write(out *dto.Metric) error {
+ return populateMetric(v.valType, v.function(), v.labelPairs, out)
+}
+
+// NewConstMetric returns a metric with one fixed value that cannot be
+// changed. Users of this package will not have much use for it in regular
+// operations. However, when implementing custom Collectors, it is useful as a
+// throw-away metric that is generated on the fly to send it to Prometheus in
+// the Collect method. NewConstMetric returns an error if the length of
+// labelValues is not consistent with the variable labels in Desc or if Desc is
+// invalid.
+func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ return nil, err
+ }
+ return &constMetric{
+ desc: desc,
+ valType: valueType,
+ val: value,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstMetric is a version of NewConstMetric that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
+ m, err := NewConstMetric(desc, valueType, value, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type constMetric struct {
+ desc *Desc
+ valType ValueType
+ val float64
+ labelPairs []*dto.LabelPair
+}
+
+func (m *constMetric) Desc() *Desc {
+ return m.desc
+}
+
+func (m *constMetric) Write(out *dto.Metric) error {
+ return populateMetric(m.valType, m.val, m.labelPairs, out)
+}
+
+func populateMetric(
+ t ValueType,
+ v float64,
+ labelPairs []*dto.LabelPair,
+ m *dto.Metric,
+) error {
+ m.Label = labelPairs
+ switch t {
+ case CounterValue:
+ m.Counter = &dto.Counter{Value: proto.Float64(v)}
+ case GaugeValue:
+ m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
+ case UntypedValue:
+ m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
+ default:
+ return fmt.Errorf("encountered unknown type %v", t)
+ }
+ return nil
+}
+
+func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
+ totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
+ if totalLen == 0 {
+ // Super fast path.
+ return nil
+ }
+ if len(desc.variableLabels) == 0 {
+ // Moderately fast path.
+ return desc.constLabelPairs
+ }
+ labelPairs := make([]*dto.LabelPair, 0, totalLen)
+ for i, n := range desc.variableLabels {
+ labelPairs = append(labelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(labelValues[i]),
+ })
+ }
+ labelPairs = append(labelPairs, desc.constLabelPairs...)
+ sort.Sort(labelPairSorter(labelPairs))
+ return labelPairs
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
new file mode 100644
index 000000000..14ed9e856
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -0,0 +1,472 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/prometheus/common/model"
+)
+
+// metricVec is a Collector to bundle metrics of the same name that differ in
+// their label values. metricVec is not used directly (and therefore
+// unexported). It is used as a building block for implementations of vectors of
+// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec.
+// It also handles label currying. It uses basicMetricVec internally.
+type metricVec struct {
+ *metricMap
+
+ curry []curriedLabelValue
+
+ // hashAdd and hashAddByte can be replaced for testing collision handling.
+ hashAdd func(h uint64, s string) uint64
+ hashAddByte func(h uint64, b byte) uint64
+}
+
+// newMetricVec returns an initialized metricVec.
+func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {
+ return &metricVec{
+ metricMap: &metricMap{
+ metrics: map[uint64][]metricWithLabelValues{},
+ desc: desc,
+ newMetric: newMetric,
+ },
+ hashAdd: hashAdd,
+ hashAddByte: hashAddByte,
+ }
+}
+
+// DeleteLabelValues removes the metric where the variable labels are the same
+// as those passed in as labels (same order as the VariableLabels in Desc). It
+// returns true if a metric was deleted.
+//
+// It is not an error if the number of label values is not the same as the
+// number of VariableLabels in Desc. However, such inconsistent label count can
+// never match an actual metric, so the method will always return false in that
+// case.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider Delete(Labels) as an
+// alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the CounterVec example.
+func (m *metricVec) DeleteLabelValues(lvs ...string) bool {
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return false
+ }
+
+ return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc. However, such inconsistent Labels
+// can never match an actual metric, so the method will always return false in
+// that case.
+//
+// This method is used for the same purpose as DeleteLabelValues(...string). See
+// there for pros and cons of the two methods.
+func (m *metricVec) Delete(labels Labels) bool {
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return false
+ }
+
+ return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
+}
+
+func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
+ var (
+ newCurry []curriedLabelValue
+ oldCurry = m.curry
+ iCurry int
+ )
+ for i, label := range m.desc.variableLabels {
+ val, ok := labels[label]
+ if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
+ if ok {
+ return nil, fmt.Errorf("label name %q is already curried", label)
+ }
+ newCurry = append(newCurry, oldCurry[iCurry])
+ iCurry++
+ } else {
+ if !ok {
+ continue // Label stays uncurried.
+ }
+ newCurry = append(newCurry, curriedLabelValue{i, val})
+ }
+ }
+ if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
+ return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
+ }
+
+ return &metricVec{
+ metricMap: m.metricMap,
+ curry: newCurry,
+ hashAdd: m.hashAdd,
+ hashAddByte: m.hashAddByte,
+ }, nil
+}
+
+func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return nil, err
+ }
+
+ return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
+}
+
+func (m *metricVec) getMetricWith(labels Labels) (Metric, error) {
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return nil, err
+ }
+
+ return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
+}
+
+func (m *metricVec) hashLabelValues(vals []string) (uint64, error) {
+ if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
+ return 0, err
+ }
+
+ var (
+ h = hashNew()
+ curry = m.curry
+ iVals, iCurry int
+ )
+ for i := 0; i < len(m.desc.variableLabels); i++ {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ h = m.hashAdd(h, curry[iCurry].value)
+ iCurry++
+ } else {
+ h = m.hashAdd(h, vals[iVals])
+ iVals++
+ }
+ h = m.hashAddByte(h, model.SeparatorByte)
+ }
+ return h, nil
+}
+
+func (m *metricVec) hashLabels(labels Labels) (uint64, error) {
+ if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
+ return 0, err
+ }
+
+ var (
+ h = hashNew()
+ curry = m.curry
+ iCurry int
+ )
+ for i, label := range m.desc.variableLabels {
+ val, ok := labels[label]
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ if ok {
+ return 0, fmt.Errorf("label name %q is already curried", label)
+ }
+ h = m.hashAdd(h, curry[iCurry].value)
+ iCurry++
+ } else {
+ if !ok {
+ return 0, fmt.Errorf("label name %q missing in label map", label)
+ }
+ h = m.hashAdd(h, val)
+ }
+ h = m.hashAddByte(h, model.SeparatorByte)
+ }
+ return h, nil
+}
+
+// metricWithLabelValues provides the metric and its label values for
+// disambiguation on hash collision.
+type metricWithLabelValues struct {
+ values []string
+ metric Metric
+}
+
+// curriedLabelValue sets the curried value for a label at the given index.
+type curriedLabelValue struct {
+ index int
+ value string
+}
+
+// metricMap is a helper for metricVec and shared between differently curried
+// metricVecs.
+type metricMap struct {
+ mtx sync.RWMutex // Protects metrics.
+ metrics map[uint64][]metricWithLabelValues
+ desc *Desc
+ newMetric func(labelValues ...string) Metric
+}
+
+// Describe implements Collector. It will send exactly one Desc to the provided
+// channel.
+func (m *metricMap) Describe(ch chan<- *Desc) {
+ ch <- m.desc
+}
+
+// Collect implements Collector.
+func (m *metricMap) Collect(ch chan<- Metric) {
+ m.mtx.RLock()
+ defer m.mtx.RUnlock()
+
+ for _, metrics := range m.metrics {
+ for _, metric := range metrics {
+ ch <- metric.metric
+ }
+ }
+}
+
+// Reset deletes all metrics in this vector.
+func (m *metricMap) Reset() {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ for h := range m.metrics {
+ delete(m.metrics, h)
+ }
+}
+
+// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
+// there are multiple matches in the bucket, use lvs to select a metric and
+// remove only that metric.
+func (m *metricMap) deleteByHashWithLabelValues(
+ h uint64, lvs []string, curry []curriedLabelValue,
+) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ metrics, ok := m.metrics[h]
+ if !ok {
+ return false
+ }
+
+ i := findMetricWithLabelValues(metrics, lvs, curry)
+ if i >= len(metrics) {
+ return false
+ }
+
+ if len(metrics) > 1 {
+ m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
+ } else {
+ delete(m.metrics, h)
+ }
+ return true
+}
+
+// deleteByHashWithLabels removes the metric from the hash bucket h. If there
+// are multiple matches in the bucket, use lvs to select a metric and remove
+// only that metric.
+func (m *metricMap) deleteByHashWithLabels(
+ h uint64, labels Labels, curry []curriedLabelValue,
+) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ metrics, ok := m.metrics[h]
+ if !ok {
+ return false
+ }
+ i := findMetricWithLabels(m.desc, metrics, labels, curry)
+ if i >= len(metrics) {
+ return false
+ }
+
+ if len(metrics) > 1 {
+ m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
+ } else {
+ delete(m.metrics, h)
+ }
+ return true
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *metricMap) getOrCreateMetricWithLabelValues(
+ hash uint64, lvs []string, curry []curriedLabelValue,
+) Metric {
+ m.mtx.RLock()
+ metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry)
+ m.mtx.RUnlock()
+ if ok {
+ return metric
+ }
+
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry)
+ if !ok {
+ inlinedLVs := inlineLabelValues(lvs, curry)
+ metric = m.newMetric(inlinedLVs...)
+ m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric})
+ }
+ return metric
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *metricMap) getOrCreateMetricWithLabels(
+ hash uint64, labels Labels, curry []curriedLabelValue,
+) Metric {
+ m.mtx.RLock()
+ metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry)
+ m.mtx.RUnlock()
+ if ok {
+ return metric
+ }
+
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry)
+ if !ok {
+ lvs := extractLabelValues(m.desc, labels, curry)
+ metric = m.newMetric(lvs...)
+ m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric})
+ }
+ return metric
+}
+
+// getMetricWithHashAndLabelValues gets a metric while handling possible
+// collisions in the hash space. Must be called while holding the read mutex.
+func (m *metricMap) getMetricWithHashAndLabelValues(
+ h uint64, lvs []string, curry []curriedLabelValue,
+) (Metric, bool) {
+ metrics, ok := m.metrics[h]
+ if ok {
+ if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) {
+ return metrics[i].metric, true
+ }
+ }
+ return nil, false
+}
+
+// getMetricWithHashAndLabels gets a metric while handling possible collisions in
+// the hash space. Must be called while holding read mutex.
+func (m *metricMap) getMetricWithHashAndLabels(
+ h uint64, labels Labels, curry []curriedLabelValue,
+) (Metric, bool) {
+ metrics, ok := m.metrics[h]
+ if ok {
+ if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) {
+ return metrics[i].metric, true
+ }
+ }
+ return nil, false
+}
+
+// findMetricWithLabelValues returns the index of the matching metric or
+// len(metrics) if not found.
+func findMetricWithLabelValues(
+ metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue,
+) int {
+ for i, metric := range metrics {
+ if matchLabelValues(metric.values, lvs, curry) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+// findMetricWithLabels returns the index of the matching metric or len(metrics)
+// if not found.
+func findMetricWithLabels(
+ desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
+) int {
+ for i, metric := range metrics {
+ if matchLabels(desc, metric.values, labels, curry) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool {
+ if len(values) != len(lvs)+len(curry) {
+ return false
+ }
+ var iLVs, iCurry int
+ for i, v := range values {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ if v != curry[iCurry].value {
+ return false
+ }
+ iCurry++
+ continue
+ }
+ if v != lvs[iLVs] {
+ return false
+ }
+ iLVs++
+ }
+ return true
+}
+
+func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
+ if len(values) != len(labels)+len(curry) {
+ return false
+ }
+ iCurry := 0
+ for i, k := range desc.variableLabels {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ if values[i] != curry[iCurry].value {
+ return false
+ }
+ iCurry++
+ continue
+ }
+ if values[i] != labels[k] {
+ return false
+ }
+ }
+ return true
+}
+
+func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
+ labelValues := make([]string, len(labels)+len(curry))
+ iCurry := 0
+ for i, k := range desc.variableLabels {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ labelValues[i] = curry[iCurry].value
+ iCurry++
+ continue
+ }
+ labelValues[i] = labels[k]
+ }
+ return labelValues
+}
+
+func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
+ labelValues := make([]string, len(lvs)+len(curry))
+ var iCurry, iLVs int
+ for i := range labelValues {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ labelValues[i] = curry[iCurry].value
+ iCurry++
+ continue
+ }
+ labelValues[i] = lvs[iLVs]
+ iLVs++
+ }
+ return labelValues
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
new file mode 100644
index 000000000..e303eef6d
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
@@ -0,0 +1,200 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// WrapRegistererWith returns a Registerer wrapping the provided
+// Registerer. Collectors registered with the returned Registerer will be
+// registered with the wrapped Registerer in a modified way. The modified
+// Collector adds the provided Labels to all Metrics it collects (as
+// ConstLabels). The Metrics collected by the unmodified Collector must not
+// duplicate any of those labels.
+//
+// WrapRegistererWith provides a way to add fixed labels to a subset of
+// Collectors. It should not be used to add fixed labels to all metrics exposed.
+//
+// Conflicts between Collectors registered through the original Registerer with
+// Collectors registered through the wrapping Registerer will still be
+// detected. Any AlreadyRegisteredError returned by the Register method of
+// either Registerer will contain the ExistingCollector in the form it was
+// provided to the respective registry.
+//
+// The Collector example demonstrates a use of WrapRegistererWith.
+func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
+ return &wrappingRegisterer{
+ wrappedRegisterer: reg,
+ labels: labels,
+ }
+}
+
+// WrapRegistererWithPrefix returns a Registerer wrapping the provided
+// Registerer. Collectors registered with the returned Registerer will be
+// registered with the wrapped Registerer in a modified way. The modified
+// Collector adds the provided prefix to the name of all Metrics it collects.
+//
+// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
+// a sub-system. To make this work, register metrics of the sub-system with the
+// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful
+// to use the same prefix for all metrics exposed. In particular, do not prefix
+// metric names that are standardized across applications, as that would break
+// horizontal monitoring, for example the metrics provided by the Go collector
+// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
+// fact, those metrics are already prefixed with “go_” or “process_”,
+// respectively.)
+//
+// Conflicts between Collectors registered through the original Registerer with
+// Collectors registered through the wrapping Registerer will still be
+// detected. Any AlreadyRegisteredError returned by the Register method of
+// either Registerer will contain the ExistingCollector in the form it was
+// provided to the respective registry.
+func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
+ return &wrappingRegisterer{
+ wrappedRegisterer: reg,
+ prefix: prefix,
+ }
+}
+
+type wrappingRegisterer struct {
+ wrappedRegisterer Registerer
+ prefix string
+ labels Labels
+}
+
+func (r *wrappingRegisterer) Register(c Collector) error {
+ return r.wrappedRegisterer.Register(&wrappingCollector{
+ wrappedCollector: c,
+ prefix: r.prefix,
+ labels: r.labels,
+ })
+}
+
+func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
+ for _, c := range cs {
+ if err := r.Register(c); err != nil {
+ panic(err)
+ }
+ }
+}
+
+func (r *wrappingRegisterer) Unregister(c Collector) bool {
+ return r.wrappedRegisterer.Unregister(&wrappingCollector{
+ wrappedCollector: c,
+ prefix: r.prefix,
+ labels: r.labels,
+ })
+}
+
+type wrappingCollector struct {
+ wrappedCollector Collector
+ prefix string
+ labels Labels
+}
+
+func (c *wrappingCollector) Collect(ch chan<- Metric) {
+ wrappedCh := make(chan Metric)
+ go func() {
+ c.wrappedCollector.Collect(wrappedCh)
+ close(wrappedCh)
+ }()
+ for m := range wrappedCh {
+ ch <- &wrappingMetric{
+ wrappedMetric: m,
+ prefix: c.prefix,
+ labels: c.labels,
+ }
+ }
+}
+
+func (c *wrappingCollector) Describe(ch chan<- *Desc) {
+ wrappedCh := make(chan *Desc)
+ go func() {
+ c.wrappedCollector.Describe(wrappedCh)
+ close(wrappedCh)
+ }()
+ for desc := range wrappedCh {
+ ch <- wrapDesc(desc, c.prefix, c.labels)
+ }
+}
+
+func (c *wrappingCollector) unwrapRecursively() Collector {
+ switch wc := c.wrappedCollector.(type) {
+ case *wrappingCollector:
+ return wc.unwrapRecursively()
+ default:
+ return wc
+ }
+}
+
+type wrappingMetric struct {
+ wrappedMetric Metric
+ prefix string
+ labels Labels
+}
+
+func (m *wrappingMetric) Desc() *Desc {
+ return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels)
+}
+
+func (m *wrappingMetric) Write(out *dto.Metric) error {
+ if err := m.wrappedMetric.Write(out); err != nil {
+ return err
+ }
+ if len(m.labels) == 0 {
+ // No wrapping labels.
+ return nil
+ }
+ for ln, lv := range m.labels {
+ out.Label = append(out.Label, &dto.LabelPair{
+ Name: proto.String(ln),
+ Value: proto.String(lv),
+ })
+ }
+ sort.Sort(labelPairSorter(out.Label))
+ return nil
+}
+
+func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
+ constLabels := Labels{}
+ for _, lp := range desc.constLabelPairs {
+ constLabels[*lp.Name] = *lp.Value
+ }
+ for ln, lv := range labels {
+ if _, alreadyUsed := constLabels[ln]; alreadyUsed {
+ return &Desc{
+ fqName: desc.fqName,
+ help: desc.help,
+ variableLabels: desc.variableLabels,
+ constLabelPairs: desc.constLabelPairs,
+ err: fmt.Errorf("attempted wrapping with already existing label name %q", ln),
+ }
+ }
+ constLabels[ln] = lv
+ }
+ // NewDesc will do remaining validations.
+ newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
+ // Propagate errors if there was any. This will override any errer
+ // created by NewDesc above, i.e. earlier errors get precedence.
+ if desc.err != nil {
+ newDesc.err = desc.err
+ }
+ return newDesc
+}
diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE
new file mode 100644
index 000000000..20110e410
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/NOTICE
@@ -0,0 +1,5 @@
+Data model artifacts for Prometheus.
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
new file mode 100644
index 000000000..9805432c2
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -0,0 +1,629 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: metrics.proto
+
+package io_prometheus_client // import "github.com/prometheus/client_model/go"
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type MetricType int32
+
+const (
+ MetricType_COUNTER MetricType = 0
+ MetricType_GAUGE MetricType = 1
+ MetricType_SUMMARY MetricType = 2
+ MetricType_UNTYPED MetricType = 3
+ MetricType_HISTOGRAM MetricType = 4
+)
+
+var MetricType_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ 2: "SUMMARY",
+ 3: "UNTYPED",
+ 4: "HISTOGRAM",
+}
+var MetricType_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "SUMMARY": 2,
+ "UNTYPED": 3,
+ "HISTOGRAM": 4,
+}
+
+func (x MetricType) Enum() *MetricType {
+ p := new(MetricType)
+ *p = x
+ return p
+}
+func (x MetricType) String() string {
+ return proto.EnumName(MetricType_name, int32(x))
+}
+func (x *MetricType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
+ if err != nil {
+ return err
+ }
+ *x = MetricType(value)
+ return nil
+}
+func (MetricType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
+}
+
+type LabelPair struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LabelPair) Reset() { *m = LabelPair{} }
+func (m *LabelPair) String() string { return proto.CompactTextString(m) }
+func (*LabelPair) ProtoMessage() {}
+func (*LabelPair) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
+}
+func (m *LabelPair) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LabelPair.Unmarshal(m, b)
+}
+func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
+}
+func (dst *LabelPair) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelPair.Merge(dst, src)
+}
+func (m *LabelPair) XXX_Size() int {
+ return xxx_messageInfo_LabelPair.Size(m)
+}
+func (m *LabelPair) XXX_DiscardUnknown() {
+ xxx_messageInfo_LabelPair.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LabelPair proto.InternalMessageInfo
+
+func (m *LabelPair) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *LabelPair) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Gauge struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Gauge) Reset() { *m = Gauge{} }
+func (m *Gauge) String() string { return proto.CompactTextString(m) }
+func (*Gauge) ProtoMessage() {}
+func (*Gauge) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1}
+}
+func (m *Gauge) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Gauge.Unmarshal(m, b)
+}
+func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
+}
+func (dst *Gauge) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Gauge.Merge(dst, src)
+}
+func (m *Gauge) XXX_Size() int {
+ return xxx_messageInfo_Gauge.Size(m)
+}
+func (m *Gauge) XXX_DiscardUnknown() {
+ xxx_messageInfo_Gauge.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Gauge proto.InternalMessageInfo
+
+func (m *Gauge) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Counter struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Counter) Reset() { *m = Counter{} }
+func (m *Counter) String() string { return proto.CompactTextString(m) }
+func (*Counter) ProtoMessage() {}
+func (*Counter) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2}
+}
+func (m *Counter) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Counter.Unmarshal(m, b)
+}
+func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
+}
+func (dst *Counter) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Counter.Merge(dst, src)
+}
+func (m *Counter) XXX_Size() int {
+ return xxx_messageInfo_Counter.Size(m)
+}
+func (m *Counter) XXX_DiscardUnknown() {
+ xxx_messageInfo_Counter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Counter proto.InternalMessageInfo
+
+func (m *Counter) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Quantile struct {
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Quantile) Reset() { *m = Quantile{} }
+func (m *Quantile) String() string { return proto.CompactTextString(m) }
+func (*Quantile) ProtoMessage() {}
+func (*Quantile) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3}
+}
+func (m *Quantile) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Quantile.Unmarshal(m, b)
+}
+func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
+}
+func (dst *Quantile) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Quantile.Merge(dst, src)
+}
+func (m *Quantile) XXX_Size() int {
+ return xxx_messageInfo_Quantile.Size(m)
+}
+func (m *Quantile) XXX_DiscardUnknown() {
+ xxx_messageInfo_Quantile.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Quantile proto.InternalMessageInfo
+
+func (m *Quantile) GetQuantile() float64 {
+ if m != nil && m.Quantile != nil {
+ return *m.Quantile
+ }
+ return 0
+}
+
+func (m *Quantile) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Summary struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Summary) Reset() { *m = Summary{} }
+func (m *Summary) String() string { return proto.CompactTextString(m) }
+func (*Summary) ProtoMessage() {}
+func (*Summary) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4}
+}
+func (m *Summary) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Summary.Unmarshal(m, b)
+}
+func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
+}
+func (dst *Summary) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Summary.Merge(dst, src)
+}
+func (m *Summary) XXX_Size() int {
+ return xxx_messageInfo_Summary.Size(m)
+}
+func (m *Summary) XXX_DiscardUnknown() {
+ xxx_messageInfo_Summary.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Summary proto.InternalMessageInfo
+
+func (m *Summary) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Summary) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Summary) GetQuantile() []*Quantile {
+ if m != nil {
+ return m.Quantile
+ }
+ return nil
+}
+
+type Untyped struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Untyped) Reset() { *m = Untyped{} }
+func (m *Untyped) String() string { return proto.CompactTextString(m) }
+func (*Untyped) ProtoMessage() {}
+func (*Untyped) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5}
+}
+func (m *Untyped) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Untyped.Unmarshal(m, b)
+}
+func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
+}
+func (dst *Untyped) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Untyped.Merge(dst, src)
+}
+func (m *Untyped) XXX_Size() int {
+ return xxx_messageInfo_Untyped.Size(m)
+}
+func (m *Untyped) XXX_DiscardUnknown() {
+ xxx_messageInfo_Untyped.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Untyped proto.InternalMessageInfo
+
+func (m *Untyped) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Histogram struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Histogram) Reset() { *m = Histogram{} }
+func (m *Histogram) String() string { return proto.CompactTextString(m) }
+func (*Histogram) ProtoMessage() {}
+func (*Histogram) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6}
+}
+func (m *Histogram) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Histogram.Unmarshal(m, b)
+}
+func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
+}
+func (dst *Histogram) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Histogram.Merge(dst, src)
+}
+func (m *Histogram) XXX_Size() int {
+ return xxx_messageInfo_Histogram.Size(m)
+}
+func (m *Histogram) XXX_DiscardUnknown() {
+ xxx_messageInfo_Histogram.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Histogram proto.InternalMessageInfo
+
+func (m *Histogram) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Histogram) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Histogram) GetBucket() []*Bucket {
+ if m != nil {
+ return m.Bucket
+ }
+ return nil
+}
+
+type Bucket struct {
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Bucket) Reset() { *m = Bucket{} }
+func (m *Bucket) String() string { return proto.CompactTextString(m) }
+func (*Bucket) ProtoMessage() {}
+func (*Bucket) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7}
+}
+func (m *Bucket) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Bucket.Unmarshal(m, b)
+}
+func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
+}
+func (dst *Bucket) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Bucket.Merge(dst, src)
+}
+func (m *Bucket) XXX_Size() int {
+ return xxx_messageInfo_Bucket.Size(m)
+}
+func (m *Bucket) XXX_DiscardUnknown() {
+ xxx_messageInfo_Bucket.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Bucket proto.InternalMessageInfo
+
+func (m *Bucket) GetCumulativeCount() uint64 {
+ if m != nil && m.CumulativeCount != nil {
+ return *m.CumulativeCount
+ }
+ return 0
+}
+
+func (m *Bucket) GetUpperBound() float64 {
+ if m != nil && m.UpperBound != nil {
+ return *m.UpperBound
+ }
+ return 0
+}
+
+type Metric struct {
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Metric) Reset() { *m = Metric{} }
+func (m *Metric) String() string { return proto.CompactTextString(m) }
+func (*Metric) ProtoMessage() {}
+func (*Metric) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8}
+}
+func (m *Metric) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Metric.Unmarshal(m, b)
+}
+func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
+}
+func (dst *Metric) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Metric.Merge(dst, src)
+}
+func (m *Metric) XXX_Size() int {
+ return xxx_messageInfo_Metric.Size(m)
+}
+func (m *Metric) XXX_DiscardUnknown() {
+ xxx_messageInfo_Metric.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Metric proto.InternalMessageInfo
+
+func (m *Metric) GetLabel() []*LabelPair {
+ if m != nil {
+ return m.Label
+ }
+ return nil
+}
+
+func (m *Metric) GetGauge() *Gauge {
+ if m != nil {
+ return m.Gauge
+ }
+ return nil
+}
+
+func (m *Metric) GetCounter() *Counter {
+ if m != nil {
+ return m.Counter
+ }
+ return nil
+}
+
+func (m *Metric) GetSummary() *Summary {
+ if m != nil {
+ return m.Summary
+ }
+ return nil
+}
+
+func (m *Metric) GetUntyped() *Untyped {
+ if m != nil {
+ return m.Untyped
+ }
+ return nil
+}
+
+func (m *Metric) GetHistogram() *Histogram {
+ if m != nil {
+ return m.Histogram
+ }
+ return nil
+}
+
+func (m *Metric) GetTimestampMs() int64 {
+ if m != nil && m.TimestampMs != nil {
+ return *m.TimestampMs
+ }
+ return 0
+}
+
+type MetricFamily struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MetricFamily) Reset() { *m = MetricFamily{} }
+func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
+func (*MetricFamily) ProtoMessage() {}
+func (*MetricFamily) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9}
+}
+func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
+}
+func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
+}
+func (dst *MetricFamily) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MetricFamily.Merge(dst, src)
+}
+func (m *MetricFamily) XXX_Size() int {
+ return xxx_messageInfo_MetricFamily.Size(m)
+}
+func (m *MetricFamily) XXX_DiscardUnknown() {
+ xxx_messageInfo_MetricFamily.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetricFamily proto.InternalMessageInfo
+
+func (m *MetricFamily) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetHelp() string {
+ if m != nil && m.Help != nil {
+ return *m.Help
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetType() MetricType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return MetricType_COUNTER
+}
+
+func (m *MetricFamily) GetMetric() []*Metric {
+ if m != nil {
+ return m.Metric
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
+ proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
+ proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
+ proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile")
+ proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary")
+ proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
+ proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
+ proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
+ proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
+ proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
+ proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
+}
+
+func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) }
+
+var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{
+ // 591 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e,
+ 0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89,
+ 0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81,
+ 0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47,
+ 0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77,
+ 0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e,
+ 0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64,
+ 0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58,
+ 0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c,
+ 0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2,
+ 0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4,
+ 0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12,
+ 0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c,
+ 0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee,
+ 0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f,
+ 0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54,
+ 0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea,
+ 0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63,
+ 0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45,
+ 0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d,
+ 0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5,
+ 0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d,
+ 0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d,
+ 0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7,
+ 0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8,
+ 0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2,
+ 0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58,
+ 0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11,
+ 0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff,
+ 0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02,
+ 0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd,
+ 0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25,
+ 0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9,
+ 0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27,
+ 0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9,
+ 0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48,
+ 0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00,
+}
diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/prometheus/common/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE
new file mode 100644
index 000000000..636a2c1a5
--- /dev/null
+++ b/vendor/github.com/prometheus/common/NOTICE
@@ -0,0 +1,5 @@
+Common libraries shared by Prometheus Go components.
+Copyright 2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
new file mode 100644
index 000000000..c092723e8
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -0,0 +1,429 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "mime"
+ "net/http"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/model"
+)
+
+// Decoder types decode an input stream into metric families.
+type Decoder interface {
+ Decode(*dto.MetricFamily) error
+}
+
+// DecodeOptions contains options used by the Decoder and in sample extraction.
+type DecodeOptions struct {
+ // Timestamp is added to each value from the stream that has no explicit timestamp set.
+ Timestamp model.Time
+}
+
+// ResponseFormat extracts the correct format from a HTTP response header.
+// If no matching format can be found FormatUnknown is returned.
+func ResponseFormat(h http.Header) Format {
+ ct := h.Get(hdrContentType)
+
+ mediatype, params, err := mime.ParseMediaType(ct)
+ if err != nil {
+ return FmtUnknown
+ }
+
+ const textType = "text/plain"
+
+ switch mediatype {
+ case ProtoType:
+ if p, ok := params["proto"]; ok && p != ProtoProtocol {
+ return FmtUnknown
+ }
+ if e, ok := params["encoding"]; ok && e != "delimited" {
+ return FmtUnknown
+ }
+ return FmtProtoDelim
+
+ case textType:
+ if v, ok := params["version"]; ok && v != TextVersion {
+ return FmtUnknown
+ }
+ return FmtText
+ }
+
+ return FmtUnknown
+}
+
+// NewDecoder returns a new decoder based on the given input format.
+// If the input format does not imply otherwise, a text format decoder is returned.
+func NewDecoder(r io.Reader, format Format) Decoder {
+ switch format {
+ case FmtProtoDelim:
+ return &protoDecoder{r: r}
+ }
+ return &textDecoder{r: r}
+}
+
+// protoDecoder implements the Decoder interface for protocol buffers.
+type protoDecoder struct {
+ r io.Reader
+}
+
+// Decode implements the Decoder interface.
+func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
+ _, err := pbutil.ReadDelimited(d.r, v)
+ if err != nil {
+ return err
+ }
+ if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
+ return fmt.Errorf("invalid metric name %q", v.GetName())
+ }
+ for _, m := range v.GetMetric() {
+ if m == nil {
+ continue
+ }
+ for _, l := range m.GetLabel() {
+ if l == nil {
+ continue
+ }
+ if !model.LabelValue(l.GetValue()).IsValid() {
+ return fmt.Errorf("invalid label value %q", l.GetValue())
+ }
+ if !model.LabelName(l.GetName()).IsValid() {
+ return fmt.Errorf("invalid label name %q", l.GetName())
+ }
+ }
+ }
+ return nil
+}
+
+// textDecoder implements the Decoder interface for the text protocol.
+type textDecoder struct {
+ r io.Reader
+ p TextParser
+ fams []*dto.MetricFamily
+}
+
+// Decode implements the Decoder interface.
+func (d *textDecoder) Decode(v *dto.MetricFamily) error {
+ // TODO(fabxc): Wrap this as a line reader to make streaming safer.
+ if len(d.fams) == 0 {
+ // No cached metric families, read everything and parse metrics.
+ fams, err := d.p.TextToMetricFamilies(d.r)
+ if err != nil {
+ return err
+ }
+ if len(fams) == 0 {
+ return io.EOF
+ }
+ d.fams = make([]*dto.MetricFamily, 0, len(fams))
+ for _, f := range fams {
+ d.fams = append(d.fams, f)
+ }
+ }
+
+ *v = *d.fams[0]
+ d.fams = d.fams[1:]
+
+ return nil
+}
+
+// SampleDecoder wraps a Decoder to extract samples from the metric families
+// decoded by the wrapped Decoder.
+type SampleDecoder struct {
+ Dec Decoder
+ Opts *DecodeOptions
+
+ f dto.MetricFamily
+}
+
+// Decode calls the Decode method of the wrapped Decoder and then extracts the
+// samples from the decoded MetricFamily into the provided model.Vector.
+func (sd *SampleDecoder) Decode(s *model.Vector) error {
+ err := sd.Dec.Decode(&sd.f)
+ if err != nil {
+ return err
+ }
+ *s, err = extractSamples(&sd.f, sd.Opts)
+ return err
+}
+
+// ExtractSamples builds a slice of samples from the provided metric
+// families. If an error occurrs during sample extraction, it continues to
+// extract from the remaining metric families. The returned error is the last
+// error that has occurred.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
+ var (
+ all model.Vector
+ lastErr error
+ )
+ for _, f := range fams {
+ some, err := extractSamples(f, o)
+ if err != nil {
+ lastErr = err
+ continue
+ }
+ all = append(all, some...)
+ }
+ return all, lastErr
+}
+
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
+ switch f.GetType() {
+ case dto.MetricType_COUNTER:
+ return extractCounter(o, f), nil
+ case dto.MetricType_GAUGE:
+ return extractGauge(o, f), nil
+ case dto.MetricType_SUMMARY:
+ return extractSummary(o, f), nil
+ case dto.MetricType_UNTYPED:
+ return extractUntyped(o, f), nil
+ case dto.MetricType_HISTOGRAM:
+ return extractHistogram(o, f), nil
+ }
+ return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
+}
+
+func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Counter == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Counter.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Gauge == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Gauge.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Untyped == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Untyped.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Summary == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ for _, q := range m.Summary.Quantile {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ // BUG(matt): Update other names to "quantile".
+ lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetValue()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ return samples
+}
+
+func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Histogram == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ infSeen := false
+
+ for _, q := range m.Histogram.Bucket {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetCumulativeCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ count := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleCount()),
+ Timestamp: timestamp,
+ }
+ samples = append(samples, count)
+
+ if !infSeen {
+ // Append an infinity bucket sample.
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: count.Value,
+ Timestamp: timestamp,
+ })
+ }
+ }
+
+ return samples
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
new file mode 100644
index 000000000..11839ed65
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Encoder types encode metric families into an underlying wire protocol.
+type Encoder interface {
+ Encode(*dto.MetricFamily) error
+}
+
+type encoder func(*dto.MetricFamily) error
+
+func (e encoder) Encode(v *dto.MetricFamily) error {
+ return e(v)
+}
+
+// Negotiate returns the Content-Type based on the given Accept header.
+// If no appropriate accepted type is found, FmtText is returned.
+func Negotiate(h http.Header) Format {
+ for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ // Check for protocol buffer
+ if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+ switch ac.Params["encoding"] {
+ case "delimited":
+ return FmtProtoDelim
+ case "text":
+ return FmtProtoText
+ case "compact-text":
+ return FmtProtoCompact
+ }
+ }
+ // Check for text format.
+ ver := ac.Params["version"]
+ if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+ return FmtText
+ }
+ }
+ return FmtText
+}
+
+// NewEncoder returns a new encoder based on content type negotiation.
+func NewEncoder(w io.Writer, format Format) Encoder {
+ switch format {
+ case FmtProtoDelim:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := pbutil.WriteDelimited(w, v)
+ return err
+ })
+ case FmtProtoCompact:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, v.String())
+ return err
+ })
+ case FmtProtoText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+ return err
+ })
+ case FmtText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := MetricFamilyToText(w, v)
+ return err
+ })
+ }
+ panic("expfmt.NewEncoder: unknown format")
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
new file mode 100644
index 000000000..c71bcb981
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -0,0 +1,38 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package expfmt contains tools for reading and writing Prometheus metrics.
+package expfmt
+
+// Format specifies the HTTP content type of the different wire protocols.
+type Format string
+
+// Constants to assemble the Content-Type values for the different wire protocols.
+const (
+ TextVersion = "0.0.4"
+ ProtoType = `application/vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+
+ // The Content-Type values for the different wire protocols.
+ FmtUnknown Format = `<unknown>`
+ FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
+ FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+ FmtProtoText Format = ProtoFmt + ` encoding=text`
+ FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+)
+
+const (
+ hdrContentType = "Content-Type"
+ hdrAccept = "Accept"
+)
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
new file mode 100644
index 000000000..dc2eedeef
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -0,0 +1,36 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Build only when actually fuzzing
+// +build gofuzz
+
+package expfmt
+
+import "bytes"
+
+// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
+//
+// go-fuzz-build github.com/prometheus/common/expfmt
+// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+//
+// Further input samples should go in the folder fuzz/corpus.
+func Fuzz(in []byte) int {
+ parser := TextParser{}
+ _, err := parser.TextToMetricFamilies(bytes.NewReader(in))
+
+ if err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
new file mode 100644
index 000000000..8e473d0fe
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -0,0 +1,468 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/prometheus/common/model"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer
+// implements it.
+type enhancedWriter interface {
+ io.Writer
+ WriteRune(r rune) (n int, err error)
+ WriteString(s string) (n int, err error)
+ WriteByte(c byte) error
+}
+
+const (
+ initialBufSize = 512
+ initialNumBufSize = 24
+)
+
+var (
+ bufPool = sync.Pool{
+ New: func() interface{} {
+ return bytes.NewBuffer(make([]byte, 0, initialBufSize))
+ },
+ }
+ numBufPool = sync.Pool{
+ New: func() interface{} {
+ b := make([]byte, 0, initialNumBufSize)
+ return &b
+ },
+ }
+)
+
+// MetricFamilyToText converts a MetricFamily proto message into text format and
+// writes the resulting lines to 'out'. It returns the number of bytes written
+// and any error encountered. The output will have the same order as the input,
+// no further sorting is performed. Furthermore, this function assumes the input
+// is already sanitized and does not perform any sanity checks. If the input
+// contains duplicate metrics or invalid metric or label names, the conversion
+// will result in invalid text format output.
+//
+// This method fulfills the type 'prometheus.encoder'.
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
+ // Fail-fast checks.
+ if len(in.Metric) == 0 {
+ return 0, fmt.Errorf("MetricFamily has no metrics: %s", in)
+ }
+ name := in.GetName()
+ if name == "" {
+ return 0, fmt.Errorf("MetricFamily has no name: %s", in)
+ }
+
+ // Try the interface upgrade. If it doesn't work, we'll use a
+ // bytes.Buffer from the sync.Pool and write out its content to out in a
+ // single go in the end.
+ w, ok := out.(enhancedWriter)
+ if !ok {
+ b := bufPool.Get().(*bytes.Buffer)
+ b.Reset()
+ w = b
+ defer func() {
+ bWritten, bErr := out.Write(b.Bytes())
+ written = bWritten
+ if err == nil {
+ err = bErr
+ }
+ bufPool.Put(b)
+ }()
+ }
+
+ var n int
+
+ // Comments, first HELP, then TYPE.
+ if in.Help != nil {
+ n, err = w.WriteString("# HELP ")
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = w.WriteString(name)
+ written += n
+ if err != nil {
+ return
+ }
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return
+ }
+ n, err = writeEscapedString(w, *in.Help, false)
+ written += n
+ if err != nil {
+ return
+ }
+ err = w.WriteByte('\n')
+ written++
+ if err != nil {
+ return
+ }
+ }
+ n, err = w.WriteString("# TYPE ")
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = w.WriteString(name)
+ written += n
+ if err != nil {
+ return
+ }
+ metricType := in.GetType()
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ n, err = w.WriteString(" counter\n")
+ case dto.MetricType_GAUGE:
+ n, err = w.WriteString(" gauge\n")
+ case dto.MetricType_SUMMARY:
+ n, err = w.WriteString(" summary\n")
+ case dto.MetricType_UNTYPED:
+ n, err = w.WriteString(" untyped\n")
+ case dto.MetricType_HISTOGRAM:
+ n, err = w.WriteString(" histogram\n")
+ default:
+ return written, fmt.Errorf("unknown metric type %s", metricType.String())
+ }
+ written += n
+ if err != nil {
+ return
+ }
+
+ // Finally the samples, one line for each.
+ for _, metric := range in.Metric {
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ if metric.Counter == nil {
+ return written, fmt.Errorf(
+ "expected counter in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ w, name, "", metric, "", 0,
+ metric.Counter.GetValue(),
+ )
+ case dto.MetricType_GAUGE:
+ if metric.Gauge == nil {
+ return written, fmt.Errorf(
+ "expected gauge in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ w, name, "", metric, "", 0,
+ metric.Gauge.GetValue(),
+ )
+ case dto.MetricType_UNTYPED:
+ if metric.Untyped == nil {
+ return written, fmt.Errorf(
+ "expected untyped in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ w, name, "", metric, "", 0,
+ metric.Untyped.GetValue(),
+ )
+ case dto.MetricType_SUMMARY:
+ if metric.Summary == nil {
+ return written, fmt.Errorf(
+ "expected summary in metric %s %s", name, metric,
+ )
+ }
+ for _, q := range metric.Summary.Quantile {
+ n, err = writeSample(
+ w, name, "", metric,
+ model.QuantileLabel, q.GetQuantile(),
+ q.GetValue(),
+ )
+ written += n
+ if err != nil {
+ return
+ }
+ }
+ n, err = writeSample(
+ w, name, "_sum", metric, "", 0,
+ metric.Summary.GetSampleSum(),
+ )
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = writeSample(
+ w, name, "_count", metric, "", 0,
+ float64(metric.Summary.GetSampleCount()),
+ )
+ case dto.MetricType_HISTOGRAM:
+ if metric.Histogram == nil {
+ return written, fmt.Errorf(
+ "expected histogram in metric %s %s", name, metric,
+ )
+ }
+ infSeen := false
+ for _, b := range metric.Histogram.Bucket {
+ n, err = writeSample(
+ w, name, "_bucket", metric,
+ model.BucketLabel, b.GetUpperBound(),
+ float64(b.GetCumulativeCount()),
+ )
+ written += n
+ if err != nil {
+ return
+ }
+ if math.IsInf(b.GetUpperBound(), +1) {
+ infSeen = true
+ }
+ }
+ if !infSeen {
+ n, err = writeSample(
+ w, name, "_bucket", metric,
+ model.BucketLabel, math.Inf(+1),
+ float64(metric.Histogram.GetSampleCount()),
+ )
+ written += n
+ if err != nil {
+ return
+ }
+ }
+ n, err = writeSample(
+ w, name, "_sum", metric, "", 0,
+ metric.Histogram.GetSampleSum(),
+ )
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = writeSample(
+ w, name, "_count", metric, "", 0,
+ float64(metric.Histogram.GetSampleCount()),
+ )
+ default:
+ return written, fmt.Errorf(
+ "unexpected type in metric %s %s", name, metric,
+ )
+ }
+ written += n
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// writeSample writes a single sample in text format to w, given the metric
+// name, the metric proto message itself, optionally an additional label name
+// with a float64 value (use empty string as label name if not required), and
+// the value. The function returns the number of bytes written and any error
+// encountered.
+func writeSample(
+ w enhancedWriter,
+ name, suffix string,
+ metric *dto.Metric,
+ additionalLabelName string, additionalLabelValue float64,
+ value float64,
+) (int, error) {
+ var written int
+ n, err := w.WriteString(name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if suffix != "" {
+ n, err = w.WriteString(suffix)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = writeLabelPairs(
+ w, metric.Label, additionalLabelName, additionalLabelValue,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err = writeFloat(w, value)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if metric.TimestampMs != nil {
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err = writeInt(w, *metric.TimestampMs)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ err = w.WriteByte('\n')
+ written++
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// writeLabelPairs converts a slice of LabelPair proto messages plus the
+// explicitly given additional label pair into text formatted as required by the
+// text format and writes it to 'w'. An empty slice in combination with an empty
+// string 'additionalLabelName' results in nothing being written. Otherwise, the
+// label pairs are written, escaped as required by the text format, and enclosed
+// in '{...}'. The function returns the number of bytes written and any error
+// encountered.
+func writeLabelPairs(
+ w enhancedWriter,
+ in []*dto.LabelPair,
+ additionalLabelName string, additionalLabelValue float64,
+) (int, error) {
+ if len(in) == 0 && additionalLabelName == "" {
+ return 0, nil
+ }
+ var (
+ written int
+ separator byte = '{'
+ )
+ for _, lp := range in {
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err := w.WriteString(lp.GetName())
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = w.WriteString(`="`)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = writeEscapedString(w, lp.GetValue(), true)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte('"')
+ written++
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ if additionalLabelName != "" {
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err := w.WriteString(additionalLabelName)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = w.WriteString(`="`)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = writeFloat(w, additionalLabelValue)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte('"')
+ written++
+ if err != nil {
+ return written, err
+ }
+ }
+ err := w.WriteByte('}')
+ written++
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if
+// includeDoubleQuote is true - '"' by '\"'.
+var (
+ escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`)
+ quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
+)
+
+func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
+ if includeDoubleQuote {
+ return quotedEscaper.WriteString(w, v)
+ } else {
+ return escaper.WriteString(w, v)
+ }
+}
+
+// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
+// a few common cases for increased efficiency. For non-hardcoded cases, it uses
+// strconv.AppendFloat to avoid allocations, similar to writeInt.
+func writeFloat(w enhancedWriter, f float64) (int, error) {
+ switch {
+ case f == 1:
+ return 1, w.WriteByte('1')
+ case f == 0:
+ return 1, w.WriteByte('0')
+ case f == -1:
+ return w.WriteString("-1")
+ case math.IsNaN(f):
+ return w.WriteString("NaN")
+ case math.IsInf(f, +1):
+ return w.WriteString("+Inf")
+ case math.IsInf(f, -1):
+ return w.WriteString("-Inf")
+ default:
+ bp := numBufPool.Get().(*[]byte)
+ *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
+ written, err := w.Write(*bp)
+ numBufPool.Put(bp)
+ return written, err
+ }
+}
+
+// writeInt is equivalent to fmt.Fprint with an int64 argument but uses
+// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid
+// allocations.
+func writeInt(w enhancedWriter, i int64) (int, error) {
+ bp := numBufPool.Get().(*[]byte)
+ *bp = strconv.AppendInt((*bp)[:0], i, 10)
+ written, err := w.Write(*bp)
+ numBufPool.Put(bp)
+ return written, err
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
new file mode 100644
index 000000000..ec3d86ba7
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -0,0 +1,757 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
+)
+
+// A stateFn is a function that represents a state in a state machine. By
+// executing it, the state is progressed to the next state. The stateFn returns
+// another stateFn, which represents the new state. The end state is represented
+// by nil.
+type stateFn func() stateFn
+
+// ParseError signals errors while parsing the simple and flat text-based
+// exchange format.
+type ParseError struct {
+ Line int
+ Msg string
+}
+
+// Error implements the error interface.
+func (e ParseError) Error() string {
+ return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
+}
+
+// TextParser is used to parse the simple and flat text-based exchange format. Its
+// zero value is ready to use.
+type TextParser struct {
+ metricFamiliesByName map[string]*dto.MetricFamily
+ buf *bufio.Reader // Where the parsed input is read through.
+ err error // Most recent error.
+ lineCount int // Tracks the line count for error messages.
+ currentByte byte // The most recent byte read.
+ currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
+ currentMF *dto.MetricFamily
+ currentMetric *dto.Metric
+ currentLabelPair *dto.LabelPair
+
+ // The remaining member variables are only used for summaries/histograms.
+ currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
+ // Summary specific.
+ summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentQuantile float64
+ // Histogram specific.
+ histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentBucket float64
+ // These tell us if the currently processed line ends on '_count' or
+ // '_sum' respectively and belong to a summary/histogram, representing the sample
+ // count and sum of that summary/histogram.
+ currentIsSummaryCount, currentIsSummarySum bool
+ currentIsHistogramCount, currentIsHistogramSum bool
+}
+
+// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
+// format and creates MetricFamily proto messages. It returns the MetricFamily
+// proto messages in a map where the metric names are the keys, along with any
+// error encountered.
+//
+// If the input contains duplicate metrics (i.e. lines with the same metric name
+// and exactly the same label set), the resulting MetricFamily will contain
+// duplicate Metric proto messages. Similar is true for duplicate label
+// names. Checks for duplicates have to be performed separately, if required.
+// Also note that neither the metrics within each MetricFamily are sorted nor
+// the label pairs within each Metric. Sorting is not required for the most
+// frequent use of this method, which is sample ingestion in the Prometheus
+// server. However, for presentation purposes, you might want to sort the
+// metrics, and in some cases, you must sort the labels, e.g. for consumption by
+// the metric family injection hook of the Prometheus registry.
+//
+// Summaries and histograms are rather special beasts. You would probably not
+// use them in the simple text format anyway. This method can deal with
+// summaries and histograms if they are presented in exactly the way the
+// text.Create function creates them.
+//
+// This method must not be called concurrently. If you want to parse different
+// input concurrently, instantiate a separate Parser for each goroutine.
+func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
+ p.reset(in)
+ for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
+ // Magic happens here...
+ }
+ // Get rid of empty metric families.
+ for k, mf := range p.metricFamiliesByName {
+ if len(mf.GetMetric()) == 0 {
+ delete(p.metricFamiliesByName, k)
+ }
+ }
+ // If p.err is io.EOF now, we have run into a premature end of the input
+ // stream. Turn this error into something nicer and more
+ // meaningful. (io.EOF is often used as a signal for the legitimate end
+ // of an input stream.)
+ if p.err == io.EOF {
+ p.parseError("unexpected end of input stream")
+ }
+ return p.metricFamiliesByName, p.err
+}
+
+func (p *TextParser) reset(in io.Reader) {
+ p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+ if p.buf == nil {
+ p.buf = bufio.NewReader(in)
+ } else {
+ p.buf.Reset(in)
+ }
+ p.err = nil
+ p.lineCount = 0
+ if p.summaries == nil || len(p.summaries) > 0 {
+ p.summaries = map[uint64]*dto.Metric{}
+ }
+ if p.histograms == nil || len(p.histograms) > 0 {
+ p.histograms = map[uint64]*dto.Metric{}
+ }
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+}
+
+// startOfLine represents the state where the next byte read from p.buf is the
+// start of a line (or whitespace leading up to it).
+func (p *TextParser) startOfLine() stateFn {
+ p.lineCount++
+ if p.skipBlankTab(); p.err != nil {
+ // End of input reached. This is the only case where
+ // that is not an error but a signal that we are done.
+ p.err = nil
+ return nil
+ }
+ switch p.currentByte {
+ case '#':
+ return p.startComment
+ case '\n':
+ return p.startOfLine // Empty line, start the next one.
+ }
+ return p.readingMetricName
+}
+
+// startComment represents the state where the next byte read from p.buf is the
+// start of a comment (or whitespace leading up to it).
+func (p *TextParser) startComment() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ // If we have hit the end of line already, there is nothing left
+ // to do. This is not considered a syntax error.
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ keyword := p.currentToken.String()
+ if keyword != "HELP" && keyword != "TYPE" {
+ // Generic comment, ignore by fast forwarding to end of line.
+ for p.currentByte != '\n' {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ }
+ return p.startOfLine
+ }
+ // There is something. Next has to be a metric name.
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ if !isBlankOrTab(p.currentByte) {
+ p.parseError("invalid metric name in comment")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ switch keyword {
+ case "HELP":
+ return p.readingHelp
+ case "TYPE":
+ return p.readingType
+ }
+ panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
+}
+
+// readingMetricName represents the state where the last byte read (now in
+// p.currentByte) is the first byte of a metric name.
+func (p *TextParser) readingMetricName() stateFn {
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError("invalid metric name")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ // Now is the time to fix the type if it hasn't happened yet.
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ // Do not append the newly created currentMetric to
+ // currentMF.Metric right now. First wait if this is a summary,
+ // and the metric exists already, which we can only know after
+ // having read all the labels.
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingLabels
+}
+
+// readingLabels represents the state where the last byte read (now in
+// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
+// first byte of the value (otherwise).
+func (p *TextParser) readingLabels() stateFn {
+ // Summaries/histograms are special. We have to reset the
+ // currentLabels map, currentQuantile and currentBucket before starting to
+ // read labels.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ p.currentLabels = map[string]string{}
+ p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+ }
+ if p.currentByte != '{' {
+ return p.readingValue
+ }
+ return p.startLabelName
+}
+
+// startLabelName represents the state where the next byte read from p.buf is
+// the start of a label name (or whitespace leading up to it).
+func (p *TextParser) startLabelName() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '}' {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ }
+ if p.readTokenAsLabelName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
+ return nil
+ }
+ p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+ if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
+ p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+ return nil
+ }
+ // Special summary/histogram treatment. Don't add 'quantile' and 'le'
+ // labels to 'real' labels.
+ if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
+ !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
+ }
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '=' {
+ p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+ return nil
+ }
+ return p.startLabelValue
+}
+
+// startLabelValue represents the state where the next byte read from p.buf is
+// the start of a (quoted) label value (or whitespace leading up to it).
+func (p *TextParser) startLabelValue() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '"' {
+ p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
+ return nil
+ }
+ if p.readTokenAsLabelValue(); p.err != nil {
+ return nil
+ }
+ if !model.LabelValue(p.currentToken.String()).IsValid() {
+ p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentLabelPair.Value = proto.String(p.currentToken.String())
+ // Special treatment of summaries:
+ // - Quantile labels are special, will result in dto.Quantile later.
+ // - Other labels have to be added to currentLabels for signature calculation.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if p.currentLabelPair.GetName() == model.QuantileLabel {
+ if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ // Similar special treatment of histograms.
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if p.currentLabelPair.GetName() == model.BucketLabel {
+ if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ switch p.currentByte {
+ case ',':
+ return p.startLabelName
+
+ case '}':
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ default:
+ p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+}
+
+// readingValue represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the sample value (i.e. a float).
+func (p *TextParser) readingValue() stateFn {
+ // When we are here, we have read all the labels, so for the
+ // special case of a summary/histogram, we can finally find out
+ // if the metric already exists.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if summary := p.summaries[signature]; summary != nil {
+ p.currentMetric = summary
+ } else {
+ p.summaries[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if histogram := p.histograms[signature]; histogram != nil {
+ p.currentMetric = histogram
+ } else {
+ p.histograms[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else {
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ value, err := strconv.ParseFloat(p.currentToken.String(), 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
+ return nil
+ }
+ switch p.currentMF.GetType() {
+ case dto.MetricType_COUNTER:
+ p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
+ case dto.MetricType_GAUGE:
+ p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
+ case dto.MetricType_UNTYPED:
+ p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
+ case dto.MetricType_SUMMARY:
+ // *sigh*
+ if p.currentMetric.Summary == nil {
+ p.currentMetric.Summary = &dto.Summary{}
+ }
+ switch {
+ case p.currentIsSummaryCount:
+ p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsSummarySum:
+ p.currentMetric.Summary.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentQuantile):
+ p.currentMetric.Summary.Quantile = append(
+ p.currentMetric.Summary.Quantile,
+ &dto.Quantile{
+ Quantile: proto.Float64(p.currentQuantile),
+ Value: proto.Float64(value),
+ },
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ // *sigh*
+ if p.currentMetric.Histogram == nil {
+ p.currentMetric.Histogram = &dto.Histogram{}
+ }
+ switch {
+ case p.currentIsHistogramCount:
+ p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsHistogramSum:
+ p.currentMetric.Histogram.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentBucket):
+ p.currentMetric.Histogram.Bucket = append(
+ p.currentMetric.Histogram.Bucket,
+ &dto.Bucket{
+ UpperBound: proto.Float64(p.currentBucket),
+ CumulativeCount: proto.Uint64(uint64(value)),
+ },
+ )
+ }
+ default:
+ p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ return p.startTimestamp
+}
+
+// startTimestamp represents the state where the next byte read from p.buf is
+// the start of the timestamp (or whitespace leading up to it).
+func (p *TextParser) startTimestamp() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMetric.TimestampMs = proto.Int64(timestamp)
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() > 0 {
+ p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
+ return nil
+ }
+ return p.startOfLine
+}
+
+// readingHelp represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the docstring after 'HELP'.
+func (p *TextParser) readingHelp() stateFn {
+ if p.currentMF.Help != nil {
+ p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the docstring.
+ if p.readTokenUntilNewline(true); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ p.currentMF.Help = proto.String(p.currentToken.String())
+ return p.startOfLine
+}
+
+// readingType represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the type hint after 'HELP'.
+func (p *TextParser) readingType() stateFn {
+ if p.currentMF.Type != nil {
+ p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the type.
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
+ if !ok {
+ p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMF.Type = dto.MetricType(metricType).Enum()
+ return p.startOfLine
+}
+
+// parseError sets p.err to a ParseError at the current line with the given
+// message.
+func (p *TextParser) parseError(msg string) {
+ p.err = ParseError{
+ Line: p.lineCount,
+ Msg: msg,
+ }
+}
+
+// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
+// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
+func (p *TextParser) skipBlankTab() {
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
+ return
+ }
+ }
+}
+
+// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
+// anything if p.currentByte is neither ' ' nor '\t'.
+func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
+ if isBlankOrTab(p.currentByte) {
+ p.skipBlankTab()
+ }
+}
+
+// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
+// first byte considered is the byte already read (now in p.currentByte). The
+// first whitespace byte encountered is still copied into p.currentByte, but not
+// into p.currentToken.
+func (p *TextParser) readTokenUntilWhitespace() {
+ p.currentToken.Reset()
+ for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
+// byte considered is the byte already read (now in p.currentByte). The first
+// newline byte encountered is still copied into p.currentByte, but not into
+// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
+// recognized: '\\' translates into '\', and '\n' into a line-feed character.
+// All other escape sequences are invalid and cause an error.
+func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
+ p.currentToken.Reset()
+ escaped := false
+ for p.err == nil {
+ if recognizeEscapeSequence && escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '\n':
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a metric name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsMetricName() {
+ p.currentToken.Reset()
+ if !isValidMetricNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a label name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelName() {
+ p.currentToken.Reset()
+ if !isValidLabelNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
+// In contrast to the other 'readTokenAs...' functions, which start with the
+// last read byte in p.currentByte, this method ignores p.currentByte and starts
+// with reading a new byte from p.buf. The first byte not part of a label value
+// is still copied into p.currentByte, but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelValue() {
+ p.currentToken.Reset()
+ escaped := false
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return
+ }
+ if escaped {
+ switch p.currentByte {
+ case '"', '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ continue
+ }
+ switch p.currentByte {
+ case '"':
+ return
+ case '\n':
+ p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+}
+
+func (p *TextParser) setOrCreateCurrentMF() {
+ p.currentIsSummaryCount = false
+ p.currentIsSummarySum = false
+ p.currentIsHistogramCount = false
+ p.currentIsHistogramSum = false
+ name := p.currentToken.String()
+ if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
+ return
+ }
+ // Try out if this is a _sum or _count for a summary/histogram.
+ summaryName := summaryMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if isCount(name) {
+ p.currentIsSummaryCount = true
+ }
+ if isSum(name) {
+ p.currentIsSummarySum = true
+ }
+ return
+ }
+ }
+ histogramName := histogramMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if isCount(name) {
+ p.currentIsHistogramCount = true
+ }
+ if isSum(name) {
+ p.currentIsHistogramSum = true
+ }
+ return
+ }
+ }
+ p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
+ p.metricFamiliesByName[name] = p.currentMF
+}
+
+func isValidLabelNameStart(b byte) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+}
+
+func isValidLabelNameContinuation(b byte) bool {
+ return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+}
+
+func isValidMetricNameStart(b byte) bool {
+ return isValidLabelNameStart(b) || b == ':'
+}
+
+func isValidMetricNameContinuation(b byte) bool {
+ return isValidLabelNameContinuation(b) || b == ':'
+}
+
+func isBlankOrTab(b byte) bool {
+ return b == ' ' || b == '\t'
+}
+
+func isCount(name string) bool {
+ return len(name) > 6 && name[len(name)-6:] == "_count"
+}
+
+func isSum(name string) bool {
+ return len(name) > 4 && name[len(name)-4:] == "_sum"
+}
+
+func isBucket(name string) bool {
+ return len(name) > 7 && name[len(name)-7:] == "_bucket"
+}
+
+func summaryMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ default:
+ return name
+ }
+}
+
+func histogramMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ case isBucket(name):
+ return name[:len(name)-7]
+ default:
+ return name
+ }
+}
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
new file mode 100644
index 000000000..7723656d5
--- /dev/null
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
@@ -0,0 +1,67 @@
+PACKAGE
+
+package goautoneg
+import "bitbucket.org/ww/goautoneg"
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+FUNCTIONS
+
+func Negotiate(header string, alternatives []string) (content_type string)
+Negotiate the most appropriate content_type given the accept header
+and a list of alternatives.
+
+func ParseAccept(header string) (accept []Accept)
+Parse an Accept Header string returning a sorted list
+of clauses
+
+
+TYPES
+
+type Accept struct {
+ Type, SubType string
+ Q float32
+ Params map[string]string
+}
+Structure to represent a clause in an HTTP Accept Header
+
+
+SUBDIRECTORIES
+
+ .hg
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
new file mode 100644
index 000000000..26e92288c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
@@ -0,0 +1,162 @@
+/*
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+*/
+package goautoneg
+
+import (
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+ Type, SubType string
+ Q float64
+ Params map[string]string
+}
+
+// For internal use, so that we can use the sort interface
+type accept_slice []Accept
+
+func (accept accept_slice) Len() int {
+ slice := []Accept(accept)
+ return len(slice)
+}
+
+func (accept accept_slice) Less(i, j int) bool {
+ slice := []Accept(accept)
+ ai, aj := slice[i], slice[j]
+ if ai.Q > aj.Q {
+ return true
+ }
+ if ai.Type != "*" && aj.Type == "*" {
+ return true
+ }
+ if ai.SubType != "*" && aj.SubType == "*" {
+ return true
+ }
+ return false
+}
+
+func (accept accept_slice) Swap(i, j int) {
+ slice := []Accept(accept)
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) (accept []Accept) {
+ parts := strings.Split(header, ",")
+ accept = make([]Accept, 0, len(parts))
+ for _, part := range parts {
+ part := strings.Trim(part, " ")
+
+ a := Accept{}
+ a.Params = make(map[string]string)
+ a.Q = 1.0
+
+ mrp := strings.Split(part, ";")
+
+ media_range := mrp[0]
+ sp := strings.Split(media_range, "/")
+ a.Type = strings.Trim(sp[0], " ")
+
+ switch {
+ case len(sp) == 1 && a.Type == "*":
+ a.SubType = "*"
+ case len(sp) == 2:
+ a.SubType = strings.Trim(sp[1], " ")
+ default:
+ continue
+ }
+
+ if len(mrp) == 1 {
+ accept = append(accept, a)
+ continue
+ }
+
+ for _, param := range mrp[1:] {
+ sp := strings.SplitN(param, "=", 2)
+ if len(sp) != 2 {
+ continue
+ }
+ token := strings.Trim(sp[0], " ")
+ if token == "q" {
+ a.Q, _ = strconv.ParseFloat(sp[1], 32)
+ } else {
+ a.Params[token] = strings.Trim(sp[1], " ")
+ }
+ }
+
+ accept = append(accept, a)
+ }
+
+ slice := accept_slice(accept)
+ sort.Sort(slice)
+
+ return
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+ asp := make([][]string, 0, len(alternatives))
+ for _, ctype := range alternatives {
+ asp = append(asp, strings.SplitN(ctype, "/", 2))
+ }
+ for _, clause := range ParseAccept(header) {
+ for i, ctsp := range asp {
+ if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == ctsp[0] && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == "*" && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
new file mode 100644
index 000000000..35e739c7a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -0,0 +1,136 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "time"
+)
+
+type AlertStatus string
+
+const (
+ AlertFiring AlertStatus = "firing"
+ AlertResolved AlertStatus = "resolved"
+)
+
+// Alert is a generic representation of an alert in the Prometheus eco-system.
+type Alert struct {
+ // Label value pairs for purpose of aggregation, matching, and disposition
+ // dispatching. This must minimally include an "alertname" label.
+ Labels LabelSet `json:"labels"`
+
+ // Extra key/value information which does not define alert identity.
+ Annotations LabelSet `json:"annotations"`
+
+ // The known time range for this alert. Both ends are optional.
+ StartsAt time.Time `json:"startsAt,omitempty"`
+ EndsAt time.Time `json:"endsAt,omitempty"`
+ GeneratorURL string `json:"generatorURL"`
+}
+
+// Name returns the name of the alert. It is equivalent to the "alertname" label.
+func (a *Alert) Name() string {
+ return string(a.Labels[AlertNameLabel])
+}
+
+// Fingerprint returns a unique hash for the alert. It is equivalent to
+// the fingerprint of the alert's label set.
+func (a *Alert) Fingerprint() Fingerprint {
+ return a.Labels.Fingerprint()
+}
+
+func (a *Alert) String() string {
+ s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
+ if a.Resolved() {
+ return s + "[resolved]"
+ }
+ return s + "[active]"
+}
+
+// Resolved returns true iff the activity interval ended in the past.
+func (a *Alert) Resolved() bool {
+ return a.ResolvedAt(time.Now())
+}
+
+// ResolvedAt returns true off the activity interval ended before
+// the given timestamp.
+func (a *Alert) ResolvedAt(ts time.Time) bool {
+ if a.EndsAt.IsZero() {
+ return false
+ }
+ return !a.EndsAt.After(ts)
+}
+
+// Status returns the status of the alert.
+func (a *Alert) Status() AlertStatus {
+ if a.Resolved() {
+ return AlertResolved
+ }
+ return AlertFiring
+}
+
+// Validate checks whether the alert data is inconsistent.
+func (a *Alert) Validate() error {
+ if a.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if err := a.Labels.Validate(); err != nil {
+ return fmt.Errorf("invalid label set: %s", err)
+ }
+ if len(a.Labels) == 0 {
+ return fmt.Errorf("at least one label pair required")
+ }
+ if err := a.Annotations.Validate(); err != nil {
+ return fmt.Errorf("invalid annotations: %s", err)
+ }
+ return nil
+}
+
+// Alert is a list of alerts that can be sorted in chronological order.
+type Alerts []*Alert
+
+func (as Alerts) Len() int { return len(as) }
+func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
+
+func (as Alerts) Less(i, j int) bool {
+ if as[i].StartsAt.Before(as[j].StartsAt) {
+ return true
+ }
+ if as[i].EndsAt.Before(as[j].EndsAt) {
+ return true
+ }
+ return as[i].Fingerprint() < as[j].Fingerprint()
+}
+
+// HasFiring returns true iff one of the alerts is not resolved.
+func (as Alerts) HasFiring() bool {
+ for _, a := range as {
+ if !a.Resolved() {
+ return true
+ }
+ }
+ return false
+}
+
+// Status returns StatusFiring iff at least one of the alerts is firing.
+func (as Alerts) Status() AlertStatus {
+ if as.HasFiring() {
+ return AlertFiring
+ }
+ return AlertResolved
+}
diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go
new file mode 100644
index 000000000..fc4de4106
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fingerprinting.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// Fingerprint provides a hash-capable representation of a Metric.
+// For our purposes, FNV-1A 64-bit is used.
+type Fingerprint uint64
+
+// FingerprintFromString transforms a string representation into a Fingerprint.
+func FingerprintFromString(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ return Fingerprint(num), err
+}
+
+// ParseFingerprint parses the input string into a fingerprint.
+func ParseFingerprint(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return 0, err
+ }
+ return Fingerprint(num), nil
+}
+
+func (f Fingerprint) String() string {
+ return fmt.Sprintf("%016x", uint64(f))
+}
+
+// Fingerprints represents a collection of Fingerprint subject to a given
+// natural sorting scheme. It implements sort.Interface.
+type Fingerprints []Fingerprint
+
+// Len implements sort.Interface.
+func (f Fingerprints) Len() int {
+ return len(f)
+}
+
+// Less implements sort.Interface.
+func (f Fingerprints) Less(i, j int) bool {
+ return f[i] < f[j]
+}
+
+// Swap implements sort.Interface.
+func (f Fingerprints) Swap(i, j int) {
+ f[i], f[j] = f[j], f[i]
+}
+
+// FingerprintSet is a set of Fingerprints.
+type FingerprintSet map[Fingerprint]struct{}
+
+// Equal returns true if both sets contain the same elements (and not more).
+func (s FingerprintSet) Equal(o FingerprintSet) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for k := range s {
+ if _, ok := o[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection returns the elements contained in both sets.
+func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
+ myLength, otherLength := len(s), len(o)
+ if myLength == 0 || otherLength == 0 {
+ return FingerprintSet{}
+ }
+
+ subSet := s
+ superSet := o
+
+ if otherLength < myLength {
+ subSet = o
+ superSet = s
+ }
+
+ out := FingerprintSet{}
+
+ for k := range subSet {
+ if _, ok := superSet[k]; ok {
+ out[k] = struct{}{}
+ }
+ }
+
+ return out
+}
diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go
new file mode 100644
index 000000000..038fc1c90
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
new file mode 100644
index 000000000..41051a01a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -0,0 +1,210 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strings"
+ "unicode/utf8"
+)
+
+const (
+ // AlertNameLabel is the name of the label containing the an alert's name.
+ AlertNameLabel = "alertname"
+
+ // ExportedLabelPrefix is the prefix to prepend to the label names present in
+ // exported metrics if a label of the same name is added by the server.
+ ExportedLabelPrefix = "exported_"
+
+ // MetricNameLabel is the label name indicating the metric name of a
+ // timeseries.
+ MetricNameLabel = "__name__"
+
+ // SchemeLabel is the name of the label that holds the scheme on which to
+ // scrape a target.
+ SchemeLabel = "__scheme__"
+
+ // AddressLabel is the name of the label that holds the address of
+ // a scrape target.
+ AddressLabel = "__address__"
+
+ // MetricsPathLabel is the name of the label that holds the path on which to
+ // scrape a target.
+ MetricsPathLabel = "__metrics_path__"
+
+ // ReservedLabelPrefix is a prefix which is not legal in user-supplied
+ // label names.
+ ReservedLabelPrefix = "__"
+
+ // MetaLabelPrefix is a prefix for labels that provide meta information.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series.
+ MetaLabelPrefix = "__meta_"
+
+ // TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series. This is reserved for use in
+ // Prometheus configuration files by users.
+ TmpLabelPrefix = "__tmp_"
+
+ // ParamLabelPrefix is a prefix for labels that provide URL parameters
+ // used to scrape a target.
+ ParamLabelPrefix = "__param_"
+
+ // JobLabel is the label name indicating the job from which a timeseries
+ // was scraped.
+ JobLabel = "job"
+
+ // InstanceLabel is the label name used for the instance label.
+ InstanceLabel = "instance"
+
+ // BucketLabel is used for the label that defines the upper bound of a
+ // bucket of a histogram ("le" -> "less or equal").
+ BucketLabel = "le"
+
+ // QuantileLabel is used for the label that defines the quantile in a
+ // summary.
+ QuantileLabel = "quantile"
+)
+
+// LabelNameRE is a regular expression matching valid label names. Note that the
+// IsValid method of LabelName performs the same check but faster than a match
+// with this regular expression.
+var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+// A LabelName is a key for a LabelSet or Metric. It has a value associated
+// therewith.
+type LabelName string
+
+// IsValid is true iff the label name matches the pattern of LabelNameRE. This
+// method, however, does not use LabelNameRE for the check but a much faster
+// hardcoded implementation.
+func (ln LabelName) IsValid() bool {
+ if len(ln) == 0 {
+ return false
+ }
+ for i, b := range ln {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ if !LabelName(s).IsValid() {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (ln *LabelName) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if !LabelName(s).IsValid() {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// LabelNames is a sortable LabelName slice. In implements sort.Interface.
+type LabelNames []LabelName
+
+func (l LabelNames) Len() int {
+ return len(l)
+}
+
+func (l LabelNames) Less(i, j int) bool {
+ return l[i] < l[j]
+}
+
+func (l LabelNames) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+func (l LabelNames) String() string {
+ labelStrings := make([]string, 0, len(l))
+ for _, label := range l {
+ labelStrings = append(labelStrings, string(label))
+ }
+ return strings.Join(labelStrings, ", ")
+}
+
+// A LabelValue is an associated value for a LabelName.
+type LabelValue string
+
+// IsValid returns true iff the string is a valid UTF8.
+func (lv LabelValue) IsValid() bool {
+ return utf8.ValidString(string(lv))
+}
+
+// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
+type LabelValues []LabelValue
+
+func (l LabelValues) Len() int {
+ return len(l)
+}
+
+func (l LabelValues) Less(i, j int) bool {
+ return string(l[i]) < string(l[j])
+}
+
+func (l LabelValues) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+// LabelPair pairs a name with a value.
+type LabelPair struct {
+ Name LabelName
+ Value LabelValue
+}
+
+// LabelPairs is a sortable slice of LabelPair pointers. It implements
+// sort.Interface.
+type LabelPairs []*LabelPair
+
+func (l LabelPairs) Len() int {
+ return len(l)
+}
+
+func (l LabelPairs) Less(i, j int) bool {
+ switch {
+ case l[i].Name > l[j].Name:
+ return false
+ case l[i].Name < l[j].Name:
+ return true
+ case l[i].Value > l[j].Value:
+ return false
+ case l[i].Value < l[j].Value:
+ return true
+ default:
+ return false
+ }
+}
+
+func (l LabelPairs) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
new file mode 100644
index 000000000..6eda08a73
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset.go
@@ -0,0 +1,169 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
+// may be fully-qualified down to the point where it may resolve to a single
+// Metric in the data store or not. All operations that occur within the realm
+// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
+// match.
+type LabelSet map[LabelName]LabelValue
+
+// Validate checks whether all names and values in the label set
+// are valid.
+func (ls LabelSet) Validate() error {
+ for ln, lv := range ls {
+ if !ln.IsValid() {
+ return fmt.Errorf("invalid name %q", ln)
+ }
+ if !lv.IsValid() {
+ return fmt.Errorf("invalid value %q", lv)
+ }
+ }
+ return nil
+}
+
+// Equal returns true iff both label sets have exactly the same key/value pairs.
+func (ls LabelSet) Equal(o LabelSet) bool {
+ if len(ls) != len(o) {
+ return false
+ }
+ for ln, lv := range ls {
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if olv != lv {
+ return false
+ }
+ }
+ return true
+}
+
+// Before compares the metrics, using the following criteria:
+//
+// If m has fewer labels than o, it is before o. If it has more, it is not.
+//
+// If the number of labels is the same, the superset of all label names is
+// sorted alphanumerically. The first differing label pair found in that order
+// determines the outcome: If the label does not exist at all in m, then m is
+// before o, and vice versa. Otherwise the label value is compared
+// alphanumerically.
+//
+// If m and o are equal, the method returns false.
+func (ls LabelSet) Before(o LabelSet) bool {
+ if len(ls) < len(o) {
+ return true
+ }
+ if len(ls) > len(o) {
+ return false
+ }
+
+ lns := make(LabelNames, 0, len(ls)+len(o))
+ for ln := range ls {
+ lns = append(lns, ln)
+ }
+ for ln := range o {
+ lns = append(lns, ln)
+ }
+ // It's probably not worth it to de-dup lns.
+ sort.Sort(lns)
+ for _, ln := range lns {
+ mlv, ok := ls[ln]
+ if !ok {
+ return true
+ }
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if mlv < olv {
+ return true
+ }
+ if mlv > olv {
+ return false
+ }
+ }
+ return false
+}
+
+// Clone returns a copy of the label set.
+func (ls LabelSet) Clone() LabelSet {
+ lsn := make(LabelSet, len(ls))
+ for ln, lv := range ls {
+ lsn[ln] = lv
+ }
+ return lsn
+}
+
+// Merge is a helper function to non-destructively merge two label sets.
+func (l LabelSet) Merge(other LabelSet) LabelSet {
+ result := make(LabelSet, len(l))
+
+ for k, v := range l {
+ result[k] = v
+ }
+
+ for k, v := range other {
+ result[k] = v
+ }
+
+ return result
+}
+
+func (l LabelSet) String() string {
+ lstrs := make([]string, 0, len(l))
+ for l, v := range l {
+ lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
+ }
+
+ sort.Strings(lstrs)
+ return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
+}
+
+// Fingerprint returns the LabelSet's fingerprint.
+func (ls LabelSet) Fingerprint() Fingerprint {
+ return labelSetToFingerprint(ls)
+}
+
+// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (ls LabelSet) FastFingerprint() Fingerprint {
+ return labelSetToFastFingerprint(ls)
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (l *LabelSet) UnmarshalJSON(b []byte) error {
+ var m map[LabelName]LabelValue
+ if err := json.Unmarshal(b, &m); err != nil {
+ return err
+ }
+ // encoding/json only unmarshals maps of the form map[string]T. It treats
+ // LabelName as a string and does not call its UnmarshalJSON method.
+ // Thus, we have to replicate the behavior here.
+ for ln := range m {
+ if !ln.IsValid() {
+ return fmt.Errorf("%q is not a valid label name", ln)
+ }
+ }
+ *l = LabelSet(m)
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
new file mode 100644
index 000000000..00804b7fe
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -0,0 +1,102 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+var (
+ // MetricNameRE is a regular expression matching valid metric
+ // names. Note that the IsValidMetricName function performs the same
+ // check but faster than a match with this regular expression.
+ MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
+)
+
+// A Metric is similar to a LabelSet, but the key difference is that a Metric is
+// a singleton and refers to one and only one stream of samples.
+type Metric LabelSet
+
+// Equal compares the metrics.
+func (m Metric) Equal(o Metric) bool {
+ return LabelSet(m).Equal(LabelSet(o))
+}
+
+// Before compares the metrics' underlying label sets.
+func (m Metric) Before(o Metric) bool {
+ return LabelSet(m).Before(LabelSet(o))
+}
+
+// Clone returns a copy of the Metric.
+func (m Metric) Clone() Metric {
+ clone := make(Metric, len(m))
+ for k, v := range m {
+ clone[k] = v
+ }
+ return clone
+}
+
+func (m Metric) String() string {
+ metricName, hasName := m[MetricNameLabel]
+ numLabels := len(m) - 1
+ if !hasName {
+ numLabels = len(m)
+ }
+ labelStrings := make([]string, 0, numLabels)
+ for label, value := range m {
+ if label != MetricNameLabel {
+ labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
+ }
+ }
+
+ switch numLabels {
+ case 0:
+ if hasName {
+ return string(metricName)
+ }
+ return "{}"
+ default:
+ sort.Strings(labelStrings)
+ return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+ }
+}
+
+// Fingerprint returns a Metric's Fingerprint.
+func (m Metric) Fingerprint() Fingerprint {
+ return LabelSet(m).Fingerprint()
+}
+
+// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (m Metric) FastFingerprint() Fingerprint {
+ return LabelSet(m).FastFingerprint()
+}
+
+// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
+// This function, however, does not use MetricNameRE for the check but a much
+// faster hardcoded implementation.
+func IsValidMetricName(n LabelValue) bool {
+ if len(n) == 0 {
+ return false
+ }
+ for i, b := range n {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go
new file mode 100644
index 000000000..a7b969170
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/model.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package model contains common data structures that are shared across
+// Prometheus components and libraries.
+package model
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
new file mode 100644
index 000000000..8762b13c6
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/signature.go
@@ -0,0 +1,144 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "sort"
+)
+
+// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
+// used to separate label names, label values, and other strings from each other
+// when calculating their combined hash value (aka signature aka fingerprint).
+const SeparatorByte byte = 255
+
+var (
+ // cache the signature of an empty label set.
+ emptyLabelSignature = hashNew()
+)
+
+// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
+// given label set. (Collisions are possible but unlikely if the number of label
+// sets the function is applied to is small.)
+func LabelsToSignature(labels map[string]string) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make([]string, 0, len(labels))
+ for labelName := range labels {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Strings(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, labelName)
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, labels[labelName])
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
+// parameter (rather than a label map) and returns a Fingerprint.
+func labelSetToFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ labelNames := make(LabelNames, 0, len(ls))
+ for labelName := range ls {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(ls[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return Fingerprint(sum)
+}
+
+// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
+// faster and less allocation-heavy hash function, which is more susceptible to
+// create hash collisions. Therefore, collision detection should be applied.
+func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ var result uint64
+ for labelName, labelValue := range ls {
+ sum := hashNew()
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(labelValue))
+ result ^= sum
+ }
+ return Fingerprint(result)
+}
+
+// SignatureForLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and only includes the labels with the
+// specified LabelNames into the signature calculation. The labels passed in
+// will be sorted by this function.
+func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ sort.Sort(LabelNames(labels))
+
+ sum := hashNew()
+ for _, label := range labels {
+ sum = hashAdd(sum, string(label))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[label]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and excludes the labels with any of the
+// specified LabelNames from the signature calculation.
+func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
+ if len(m) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make(LabelNames, 0, len(m))
+ for labelName := range m {
+ if _, exclude := labels[labelName]; !exclude {
+ labelNames = append(labelNames, labelName)
+ }
+ }
+ if len(labelNames) == 0 {
+ return emptyLabelSignature
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
new file mode 100644
index 000000000..bb99889d2
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -0,0 +1,106 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "time"
+)
+
+// Matcher describes a matches the value of a given label.
+type Matcher struct {
+ Name LabelName `json:"name"`
+ Value string `json:"value"`
+ IsRegex bool `json:"isRegex"`
+}
+
+func (m *Matcher) UnmarshalJSON(b []byte) error {
+ type plain Matcher
+ if err := json.Unmarshal(b, (*plain)(m)); err != nil {
+ return err
+ }
+
+ if len(m.Name) == 0 {
+ return fmt.Errorf("label name in matcher must not be empty")
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Validate returns true iff all fields of the matcher have valid values.
+func (m *Matcher) Validate() error {
+ if !m.Name.IsValid() {
+ return fmt.Errorf("invalid name %q", m.Name)
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return fmt.Errorf("invalid regular expression %q", m.Value)
+ }
+ } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
+ return fmt.Errorf("invalid value %q", m.Value)
+ }
+ return nil
+}
+
+// Silence defines the representation of a silence definition in the Prometheus
+// eco-system.
+type Silence struct {
+ ID uint64 `json:"id,omitempty"`
+
+ Matchers []*Matcher `json:"matchers"`
+
+ StartsAt time.Time `json:"startsAt"`
+ EndsAt time.Time `json:"endsAt"`
+
+ CreatedAt time.Time `json:"createdAt,omitempty"`
+ CreatedBy string `json:"createdBy"`
+ Comment string `json:"comment,omitempty"`
+}
+
+// Validate returns true iff all fields of the silence have valid values.
+func (s *Silence) Validate() error {
+ if len(s.Matchers) == 0 {
+ return fmt.Errorf("at least one matcher required")
+ }
+ for _, m := range s.Matchers {
+ if err := m.Validate(); err != nil {
+ return fmt.Errorf("invalid matcher: %s", err)
+ }
+ }
+ if s.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if s.EndsAt.IsZero() {
+ return fmt.Errorf("end time missing")
+ }
+ if s.EndsAt.Before(s.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if s.CreatedBy == "" {
+ return fmt.Errorf("creator information missing")
+ }
+ if s.Comment == "" {
+ return fmt.Errorf("comment missing")
+ }
+ if s.CreatedAt.IsZero() {
+ return fmt.Errorf("creation timestamp missing")
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
new file mode 100644
index 000000000..7b0064fdb
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -0,0 +1,270 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ // MinimumTick is the minimum supported time resolution. This has to be
+ // at least time.Second in order for the code below to work.
+ minimumTick = time.Millisecond
+ // second is the Time duration equivalent to one second.
+ second = int64(time.Second / minimumTick)
+ // The number of nanoseconds per minimum tick.
+ nanosPerTick = int64(minimumTick / time.Nanosecond)
+
+ // Earliest is the earliest Time representable. Handy for
+ // initializing a high watermark.
+ Earliest = Time(math.MinInt64)
+ // Latest is the latest Time representable. Handy for initializing
+ // a low watermark.
+ Latest = Time(math.MaxInt64)
+)
+
+// Time is the number of milliseconds since the epoch
+// (1970-01-01 00:00 UTC) excluding leap seconds.
+type Time int64
+
+// Interval describes an interval between two timestamps.
+type Interval struct {
+ Start, End Time
+}
+
+// Now returns the current time as a Time.
+func Now() Time {
+ return TimeFromUnixNano(time.Now().UnixNano())
+}
+
+// TimeFromUnix returns the Time equivalent to the Unix Time t
+// provided in seconds.
+func TimeFromUnix(t int64) Time {
+ return Time(t * second)
+}
+
+// TimeFromUnixNano returns the Time equivalent to the Unix Time
+// t provided in nanoseconds.
+func TimeFromUnixNano(t int64) Time {
+ return Time(t / nanosPerTick)
+}
+
+// Equal reports whether two Times represent the same instant.
+func (t Time) Equal(o Time) bool {
+ return t == o
+}
+
+// Before reports whether the Time t is before o.
+func (t Time) Before(o Time) bool {
+ return t < o
+}
+
+// After reports whether the Time t is after o.
+func (t Time) After(o Time) bool {
+ return t > o
+}
+
+// Add returns the Time t + d.
+func (t Time) Add(d time.Duration) Time {
+ return t + Time(d/minimumTick)
+}
+
+// Sub returns the Duration t - o.
+func (t Time) Sub(o Time) time.Duration {
+ return time.Duration(t-o) * minimumTick
+}
+
+// Time returns the time.Time representation of t.
+func (t Time) Time() time.Time {
+ return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
+}
+
+// Unix returns t as a Unix time, the number of seconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) Unix() int64 {
+ return int64(t) / second
+}
+
+// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) UnixNano() int64 {
+ return int64(t) * nanosPerTick
+}
+
+// The number of digits after the dot.
+var dotPrecision = int(math.Log10(float64(second)))
+
+// String returns a string representation of the Time.
+func (t Time) String() string {
+ return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+ return []byte(t.String()), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+ p := strings.Split(string(b), ".")
+ switch len(p) {
+ case 1:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ *t = Time(v * second)
+
+ case 2:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ v *= second
+
+ prec := dotPrecision - len(p[1])
+ if prec < 0 {
+ p[1] = p[1][:dotPrecision]
+ } else if prec > 0 {
+ p[1] = p[1] + strings.Repeat("0", prec)
+ }
+
+ va, err := strconv.ParseInt(p[1], 10, 32)
+ if err != nil {
+ return err
+ }
+
+ // If the value was something like -0.1 the negative is lost in the
+ // parsing because of the leading zero, this ensures that we capture it.
+ if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 {
+ *t = Time(v+va) * -1
+ } else {
+ *t = Time(v + va)
+ }
+
+ default:
+ return fmt.Errorf("invalid time %q", string(b))
+ }
+ return nil
+}
+
+// Duration wraps time.Duration. It is used to parse the custom duration format
+// from YAML.
+// This type should not propagate beyond the scope of input/output processing.
+type Duration time.Duration
+
+// Set implements pflag/flag.Value
+func (d *Duration) Set(s string) error {
+ var err error
+ *d, err = ParseDuration(s)
+ return err
+}
+
+// Type implements pflag.Value
+func (d *Duration) Type() string {
+ return "duration"
+}
+
+var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
+
+// ParseDuration parses a string into a time.Duration, assuming that a year
+// always has 365d, a week always has 7d, and a day always has 24h.
+func ParseDuration(durationStr string) (Duration, error) {
+ matches := durationRE.FindStringSubmatch(durationStr)
+ if len(matches) != 3 {
+ return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
+ }
+ var (
+ n, _ = strconv.Atoi(matches[1])
+ dur = time.Duration(n) * time.Millisecond
+ )
+ switch unit := matches[2]; unit {
+ case "y":
+ dur *= 1000 * 60 * 60 * 24 * 365
+ case "w":
+ dur *= 1000 * 60 * 60 * 24 * 7
+ case "d":
+ dur *= 1000 * 60 * 60 * 24
+ case "h":
+ dur *= 1000 * 60 * 60
+ case "m":
+ dur *= 1000 * 60
+ case "s":
+ dur *= 1000
+ case "ms":
+ // Value already correct
+ default:
+ return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
+ }
+ return Duration(dur), nil
+}
+
+func (d Duration) String() string {
+ var (
+ ms = int64(time.Duration(d) / time.Millisecond)
+ unit = "ms"
+ )
+ if ms == 0 {
+ return "0s"
+ }
+ factors := map[string]int64{
+ "y": 1000 * 60 * 60 * 24 * 365,
+ "w": 1000 * 60 * 60 * 24 * 7,
+ "d": 1000 * 60 * 60 * 24,
+ "h": 1000 * 60 * 60,
+ "m": 1000 * 60,
+ "s": 1000,
+ "ms": 1,
+ }
+
+ switch int64(0) {
+ case ms % factors["y"]:
+ unit = "y"
+ case ms % factors["w"]:
+ unit = "w"
+ case ms % factors["d"]:
+ unit = "d"
+ case ms % factors["h"]:
+ unit = "h"
+ case ms % factors["m"]:
+ unit = "m"
+ case ms % factors["s"]:
+ unit = "s"
+ }
+ return fmt.Sprintf("%v%v", ms/factors[unit], unit)
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (d Duration) MarshalYAML() (interface{}, error) {
+ return d.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ dur, err := ParseDuration(s)
+ if err != nil {
+ return err
+ }
+ *d = dur
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
new file mode 100644
index 000000000..c9d8fb1a2
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -0,0 +1,416 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+var (
+ // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+ // non-existing sample pair. It is a SamplePair with timestamp Earliest and
+ // value 0.0. Note that the natural zero value of SamplePair has a timestamp
+ // of 0, which is possible to appear in a real SamplePair and thus not
+ // suitable to signal a non-existing SamplePair.
+ ZeroSamplePair = SamplePair{Timestamp: Earliest}
+
+ // ZeroSample is the pseudo zero-value of Sample used to signal a
+ // non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+ // and metric nil. Note that the natural zero value of Sample has a timestamp
+ // of 0, which is possible to appear in a real Sample and thus not suitable
+ // to signal a non-existing Sample.
+ ZeroSample = Sample{Timestamp: Earliest}
+)
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+ if v == o {
+ return true
+ }
+ return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
+
+// Sample is a sample pair associated with a metric.
+type Sample struct {
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+// Equal compares first the metrics, then the timestamp, then the value. The
+// semantics of value equality is defined by SampleValue.Equal.
+func (s *Sample) Equal(o *Sample) bool {
+ if s == o {
+ return true
+ }
+
+ if !s.Metric.Equal(o.Metric) {
+ return false
+ }
+ if !s.Timestamp.Equal(o.Timestamp) {
+ return false
+ }
+
+ return s.Value.Equal(o.Value)
+}
+
+func (s Sample) String() string {
+ return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ })
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Sample) MarshalJSON() ([]byte, error) {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ return json.Marshal(&v)
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Sample) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ s.Metric = v.Metric
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+
+ return nil
+}
+
+// Samples is a sortable Sample slice. It implements sort.Interface.
+type Samples []*Sample
+
+func (s Samples) Len() int {
+ return len(s)
+}
+
+// Less compares first the metrics, then the timestamp.
+func (s Samples) Less(i, j int) bool {
+ switch {
+ case s[i].Metric.Before(s[j].Metric):
+ return true
+ case s[j].Metric.Before(s[i].Metric):
+ return false
+ case s[i].Timestamp.Before(s[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+func (s Samples) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (s Samples) Equal(o Samples) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, sample := range s {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// SampleStream is a stream of Values belonging to an attached COWMetric.
+type SampleStream struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+}
+
+func (ss SampleStream) String() string {
+ vals := make([]string, len(ss.Values))
+ for i, v := range ss.Values {
+ vals[i] = v.String()
+ }
+ return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
+}
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "<ValNone>":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (e ValueType) String() string {
+ switch e {
+ case ValNone:
+ return "<ValNone>"
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
+
+// Scalar is a scalar value evaluated at the set timestamp.
+type Scalar struct {
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s Scalar) String() string {
+ return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Scalar) MarshalJSON() ([]byte, error) {
+ v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
+ return json.Marshal([...]interface{}{s.Timestamp, string(v)})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Scalar) UnmarshalJSON(b []byte) error {
+ var f string
+ v := [...]interface{}{&s.Timestamp, &f}
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ value, err := strconv.ParseFloat(f, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing sample value: %s", err)
+ }
+ s.Value = SampleValue(value)
+ return nil
+}
+
+// String is a string value evaluated at the set timestamp.
+type String struct {
+ Value string `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s *String) String() string {
+ return s.Value
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s String) MarshalJSON() ([]byte, error) {
+ return json.Marshal([]interface{}{s.Timestamp, s.Value})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *String) UnmarshalJSON(b []byte) error {
+ v := [...]interface{}{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Vector is basically only an alias for Samples, but the
+// contract is that in a Vector, all Samples have the same timestamp.
+type Vector []*Sample
+
+func (vec Vector) String() string {
+ entries := make([]string, len(vec))
+ for i, s := range vec {
+ entries[i] = s.String()
+ }
+ return strings.Join(entries, "\n")
+}
+
+func (vec Vector) Len() int { return len(vec) }
+func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
+
+// Less compares first the metrics, then the timestamp.
+func (vec Vector) Less(i, j int) bool {
+ switch {
+ case vec[i].Metric.Before(vec[j].Metric):
+ return true
+ case vec[j].Metric.Before(vec[i].Metric):
+ return false
+ case vec[i].Timestamp.Before(vec[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (vec Vector) Equal(o Vector) bool {
+ if len(vec) != len(o) {
+ return false
+ }
+
+ for i, sample := range vec {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Matrix is a list of time series.
+type Matrix []*SampleStream
+
+func (m Matrix) Len() int { return len(m) }
+func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
+func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
+
+func (mat Matrix) String() string {
+ matCp := make(Matrix, len(mat))
+ copy(matCp, mat)
+ sort.Sort(matCp)
+
+ strs := make([]string, len(matCp))
+
+ for i, ss := range matCp {
+ strs[i] = ss.String()
+ }
+
+ return strings.Join(strs, "\n")
+}
diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore
new file mode 100644
index 000000000..25e3659ab
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/.gitignore
@@ -0,0 +1 @@
+/fixtures/
diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml
new file mode 100644
index 000000000..438ca92ec
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/.golangci.yml
@@ -0,0 +1,6 @@
+# Run only staticcheck for now. Additional linters will be enabled one-by-one.
+linters:
+ enable:
+ - staticcheck
+ - govet
+ disable-all: true
diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
new file mode 100644
index 000000000..40503edbf
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -0,0 +1,18 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull request,
+ addressing (with `@...`) the maintainer of this repository (see
+ [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+ on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+ This will avoid unnecessary work and surely give you and us a good deal
+ of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
new file mode 100644
index 000000000..56ba67d3e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
@@ -0,0 +1,2 @@
+* Johannes 'fish' Ziemke <github@freigeist.org> @discordianfish
+* Paul Gier <pgier@redhat.com> @pgier
diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile
new file mode 100644
index 000000000..616a0d25e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/Makefile
@@ -0,0 +1,29 @@
+# Copyright 2018 The Prometheus Authors
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+include Makefile.common
+
+%/.unpacked: %.ttar
+ @echo ">> extracting fixtures"
+ ./ttar -C $(dir $*) -x -f $*.ttar
+ touch $@
+
+update_fixtures:
+ rm -vf fixtures/.unpacked
+ ./ttar -c -f fixtures.ttar fixtures/
+
+.PHONY: build
+build:
+
+.PHONY: test
+test: fixtures/.unpacked common-test
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
new file mode 100644
index 000000000..c7f9ea64f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/Makefile.common
@@ -0,0 +1,272 @@
+# Copyright 2018 The Prometheus Authors
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# A common Makefile that includes rules to be reused in different prometheus projects.
+# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!
+
+# Example usage :
+# Create the main Makefile in the root project directory.
+# include Makefile.common
+# customTarget:
+# @echo ">> Running customTarget"
+#
+
+# Ensure GOBIN is not set during build so that promu is installed to the correct path
+unexport GOBIN
+
+GO ?= go
+GOFMT ?= $(GO)fmt
+FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
+GOOPTS ?=
+GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
+GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
+
+GO_VERSION ?= $(shell $(GO) version)
+GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
+PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
+
+GOVENDOR :=
+GO111MODULE :=
+ifeq (, $(PRE_GO_111))
+ ifneq (,$(wildcard go.mod))
+ # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI).
+ GO111MODULE := on
+
+ ifneq (,$(wildcard vendor))
+ # Always use the local vendor/ directory to satisfy the dependencies.
+ GOOPTS := $(GOOPTS) -mod=vendor
+ endif
+ endif
+else
+ ifneq (,$(wildcard go.mod))
+ ifneq (,$(wildcard vendor))
+$(warning This repository requires Go >= 1.11 because of Go modules)
+$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)')
+ endif
+ else
+ # This repository isn't using Go modules (yet).
+ GOVENDOR := $(FIRST_GOPATH)/bin/govendor
+ endif
+endif
+PROMU := $(FIRST_GOPATH)/bin/promu
+pkgs = ./...
+
+ifeq (arm, $(GOHOSTARCH))
+ GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM)
+ GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM)
+else
+ GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
+endif
+
+PROMU_VERSION ?= 0.4.0
+PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
+
+GOLANGCI_LINT :=
+GOLANGCI_LINT_OPTS ?=
+GOLANGCI_LINT_VERSION ?= v1.16.0
+# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
+# windows isn't included here because of the path separator being different.
+ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
+ ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
+ GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
+ endif
+endif
+
+PREFIX ?= $(shell pwd)
+BIN_DIR ?= $(shell pwd)
+DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
+DOCKER_REPO ?= prom
+
+DOCKER_ARCHS ?= amd64
+
+BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
+PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
+TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
+
+ifeq ($(GOHOSTARCH),amd64)
+ ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
+ # Only supported on amd64
+ test-flags := -race
+ endif
+endif
+
+# This rule is used to forward a target like "build" to "common-build". This
+# allows a new "build" target to be defined in a Makefile which includes this
+# one and override "common-build" without override warnings.
+%: common-% ;
+
+.PHONY: common-all
+common-all: precheck style check_license lint unused build test
+
+.PHONY: common-style
+common-style:
+ @echo ">> checking code style"
+ @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \
+ if [ -n "$${fmtRes}" ]; then \
+ echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \
+ echo "Please ensure you are using $$($(GO) version) for formatting code."; \
+ exit 1; \
+ fi
+
+.PHONY: common-check_license
+common-check_license:
+ @echo ">> checking license header"
+ @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \
+ awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \
+ done); \
+ if [ -n "$${licRes}" ]; then \
+ echo "license header checking failed:"; echo "$${licRes}"; \
+ exit 1; \
+ fi
+
+.PHONY: common-deps
+common-deps:
+ @echo ">> getting dependencies"
+ifdef GO111MODULE
+ GO111MODULE=$(GO111MODULE) $(GO) mod download
+else
+ $(GO) get $(GOOPTS) -t ./...
+endif
+
+.PHONY: common-test-short
+common-test-short:
+ @echo ">> running short tests"
+ GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs)
+
+.PHONY: common-test
+common-test:
+ @echo ">> running all tests"
+ GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs)
+
+.PHONY: common-format
+common-format:
+ @echo ">> formatting code"
+ GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs)
+
+.PHONY: common-vet
+common-vet:
+ @echo ">> vetting code"
+ GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs)
+
+.PHONY: common-lint
+common-lint: $(GOLANGCI_LINT)
+ifdef GOLANGCI_LINT
+ @echo ">> running golangci-lint"
+ifdef GO111MODULE
+# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
+# Otherwise staticcheck might fail randomly for some reason not yet explained.
+ GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
+ GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
+else
+ $(GOLANGCI_LINT) run $(pkgs)
+endif
+endif
+
+# For backward-compatibility.
+.PHONY: common-staticcheck
+common-staticcheck: lint
+
+.PHONY: common-unused
+common-unused: $(GOVENDOR)
+ifdef GOVENDOR
+ @echo ">> running check for unused packages"
+ @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
+else
+ifdef GO111MODULE
+ @echo ">> running check for unused/missing packages in go.mod"
+ GO111MODULE=$(GO111MODULE) $(GO) mod tidy
+ifeq (,$(wildcard vendor))
+ @git diff --exit-code -- go.sum go.mod
+else
+ @echo ">> running check for unused packages in vendor/"
+ GO111MODULE=$(GO111MODULE) $(GO) mod vendor
+ @git diff --exit-code -- go.sum go.mod vendor/
+endif
+endif
+endif
+
+.PHONY: common-build
+common-build: promu
+ @echo ">> building binaries"
+ GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX)
+
+.PHONY: common-tarball
+common-tarball: promu
+ @echo ">> building release tarball"
+ $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
+
+.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
+common-docker: $(BUILD_DOCKER_ARCHS)
+$(BUILD_DOCKER_ARCHS): common-docker-%:
+ docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
+ --build-arg ARCH="$*" \
+ --build-arg OS="linux" \
+ .
+
+.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
+common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
+$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
+ docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
+
+.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
+common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
+$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
+
+.PHONY: common-docker-manifest
+common-docker-manifest:
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
+
+.PHONY: promu
+promu: $(PROMU)
+
+$(PROMU):
+ $(eval PROMU_TMP := $(shell mktemp -d))
+ curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP)
+ mkdir -p $(FIRST_GOPATH)/bin
+ cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
+ rm -r $(PROMU_TMP)
+
+.PHONY: proto
+proto:
+ @echo ">> generating code from proto files"
+ @./scripts/genproto.sh
+
+ifdef GOLANGCI_LINT
+$(GOLANGCI_LINT):
+ mkdir -p $(FIRST_GOPATH)/bin
+ curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
+endif
+
+ifdef GOVENDOR
+.PHONY: $(GOVENDOR)
+$(GOVENDOR):
+ GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor
+endif
+
+.PHONY: precheck
+precheck::
+
+define PRECHECK_COMMAND_template =
+precheck:: $(1)_precheck
+
+PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1)))
+.PHONY: $(1)_precheck
+$(1)_precheck:
+ @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \
+ echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \
+ exit 1; \
+ fi
+endef
diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE
new file mode 100644
index 000000000..53c5e9aa1
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/NOTICE
@@ -0,0 +1,7 @@
+procfs provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+Copyright 2014-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
new file mode 100644
index 000000000..6f8850feb
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -0,0 +1,53 @@
+# procfs
+
+This procfs package provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystems /proc and /sys.
+
+*WARNING*: This package is a work in progress. Its API may still break in
+backwards-incompatible ways without warnings. Use it at your own risk.
+
+[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
+[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
+[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
+
+## Usage
+
+The procfs library is organized by packages based on whether the gathered data is coming from
+/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, /sys, or both. For example, current cpu statistics are gathered from
+`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount
+point is initialized, and then the stat information is read.
+
+```go
+fs, err := procfs.NewFS("/proc")
+stats, err := fs.Stat()
+```
+
+Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems.
+
+```go
+ fs, err := blockdevice.NewFS("/proc", "/sys")
+ stats, err := fs.ProcDiskstats()
+```
+
+## Building and Testing
+
+The procfs library is normally built as part of another application. However, when making
+changes to the library, the `make test` command can be used to run the API test suite.
+
+### Updating Test Fixtures
+
+The procfs library includes a set of test fixtures which include many example files from
+the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
+which is extracted automatically during testing. To add/update the test fixtures, first
+ensure the `fixtures` directory is up to date by removing the existing directory and then
+extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
+
+```bash
+rm -rf fixtures
+make test
+```
+
+Next, make the required changes to the extracted files in the `fixtures` directory. When
+the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
+based on the updated `fixtures` directory. And finally, verify the changes using
+`git diff fixtures.ttar`.
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go
new file mode 100644
index 000000000..63d4229a4
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/buddyinfo.go
@@ -0,0 +1,85 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// A BuddyInfo is the details parsed from /proc/buddyinfo.
+// The data is comprised of an array of free fragments of each size.
+// The sizes are 2^n*PAGE_SIZE, where n is the array index.
+type BuddyInfo struct {
+ Node string
+ Zone string
+ Sizes []float64
+}
+
+// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
+func (fs FS) BuddyInfo() ([]BuddyInfo, error) {
+ file, err := os.Open(fs.proc.Path("buddyinfo"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return parseBuddyInfo(file)
+}
+
+func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
+ var (
+ buddyInfo = []BuddyInfo{}
+ scanner = bufio.NewScanner(r)
+ bucketCount = -1
+ )
+
+ for scanner.Scan() {
+ var err error
+ line := scanner.Text()
+ parts := strings.Fields(line)
+
+ if len(parts) < 4 {
+ return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
+ }
+
+ node := strings.TrimRight(parts[1], ",")
+ zone := strings.TrimRight(parts[3], ",")
+ arraySize := len(parts[4:])
+
+ if bucketCount == -1 {
+ bucketCount = arraySize
+ } else {
+ if bucketCount != arraySize {
+ return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
+ }
+ }
+
+ sizes := make([]float64, arraySize)
+ for i := 0; i < arraySize; i++ {
+ sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid value in buddyinfo: %s", err)
+ }
+ }
+
+ buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})
+ }
+
+ return buddyInfo, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
new file mode 100644
index 000000000..e2acd6d40
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2014 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package procfs provides functions to retrieve system, kernel and process
+// metrics from the pseudo-filesystem proc.
+//
+// Example:
+//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+//
+// "github.com/prometheus/procfs"
+// )
+//
+// func main() {
+// p, err := procfs.Self()
+// if err != nil {
+// log.Fatalf("could not get process: %s", err)
+// }
+//
+// stat, err := p.NewStat()
+// if err != nil {
+// log.Fatalf("could not get process stat: %s", err)
+// }
+//
+// fmt.Printf("command: %s\n", stat.Comm)
+// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
+// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
+// }
+//
+package procfs
diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar
new file mode 100644
index 000000000..951d909af
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures.ttar
@@ -0,0 +1,1808 @@
+# Archive created by ttar -c -f fixtures.ttar fixtures/
+Directory: fixtures
+Mode: 775
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/proc
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/proc/26231
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26231/cmdline
+Lines: 1
+vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26231/comm
+Lines: 1
+vim
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26231/cwd
+SymlinkTo: /usr/bin
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26231/exe
+SymlinkTo: /usr/bin/vim
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/proc/26231/fd
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26231/fd/0
+SymlinkTo: ../../symlinktargets/abc
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26231/fd/1
+SymlinkTo: ../../symlinktargets/def
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26231/fd/10
+SymlinkTo: ../../symlinktargets/xyz
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26231/fd/2
+SymlinkTo: ../../symlinktargets/ghi
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26231/fd/3
+SymlinkTo: ../../symlinktargets/uvw
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26231/io
+Lines: 7
+rchar: 750339
+wchar: 818609
+syscr: 7405
+syscw: 5245
+read_bytes: 1024
+write_bytes: 2048
+cancelled_write_bytes: -1024
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26231/limits
+Lines: 17
+Limit Soft Limit Hard Limit Units
+Max cpu time unlimited unlimited seconds
+Max file size unlimited unlimited bytes
+Max data size unlimited unlimited bytes
+Max stack size 8388608 unlimited bytes
+Max core file size 0 unlimited bytes
+Max resident set unlimited unlimited bytes
+Max processes 62898 62898 processes
+Max open files 2048 4096 files
+Max locked memory 65536 65536 bytes
+Max address space 8589934592 unlimited bytes
+Max file locks unlimited unlimited locks
+Max pending signals 62898 62898 signals
+Max msgqueue size 819200 819200 bytes
+Max nice priority 0 0
+Max realtime priority 0 0
+Max realtime timeout unlimited unlimited us
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26231/mountstats
+Lines: 20
+device rootfs mounted on / with fstype rootfs
+device sysfs mounted on /sys with fstype sysfs
+device proc mounted on /proc with fstype proc
+device /dev/sda1 mounted on / with fstype ext4
+device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1
+ opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.1,clientaddr=192.168.1.5,local_lock=none
+ age: 13968
+ caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255
+ nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured
+ sec: flavor=1,pseudoflavor=1
+ events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0
+ bytes: 1207640230 0 0 0 1210214218 0 295483 0
+ RPC iostats version: 1.0 p/v: 100003/4 (nfs)
+ xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726
+ per-op statistics
+ NULL: 0 0 0 0 0 0 0 0
+ READ: 1298 1298 0 207680 1210292152 6 79386 79407
+ WRITE: 0 0 0 0 0 0 0 0
+ ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717
+
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/proc/26231/net
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26231/net/dev
+Lines: 4
+Inter-| Receive | Transmit
+ face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
+ lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/proc/26231/ns
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26231/ns/mnt
+SymlinkTo: mnt:[4026531840]
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26231/ns/net
+SymlinkTo: net:[4026531993]
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26231/root
+SymlinkTo: /
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26231/stat
+Lines: 1
+26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26231/status
+Lines: 53
+
+Name: prometheus
+Umask: 0022
+State: S (sleeping)
+Tgid: 1
+Ngid: 0
+Pid: 1
+PPid: 0
+TracerPid: 0
+Uid: 0 0 0 0
+Gid: 0 0 0 0
+FDSize: 128
+Groups:
+NStgid: 1
+NSpid: 1
+NSpgid: 1
+NSsid: 1
+VmPeak: 58472 kB
+VmSize: 58440 kB
+VmLck: 0 kB
+VmPin: 0 kB
+VmHWM: 8028 kB
+VmRSS: 6716 kB
+RssAnon: 2092 kB
+RssFile: 4624 kB
+RssShmem: 0 kB
+VmData: 2580 kB
+VmStk: 136 kB
+VmExe: 948 kB
+VmLib: 6816 kB
+VmPTE: 128 kB
+VmPMD: 12 kB
+VmSwap: 660 kB
+HugetlbPages: 0 kB
+Threads: 1
+SigQ: 8/63965
+SigPnd: 0000000000000000
+ShdPnd: 0000000000000000
+SigBlk: 7be3c0fe28014a03
+SigIgn: 0000000000001000
+SigCgt: 00000001800004ec
+CapInh: 0000000000000000
+CapPrm: 0000003fffffffff
+CapEff: 0000003fffffffff
+CapBnd: 0000003fffffffff
+CapAmb: 0000000000000000
+Seccomp: 0
+Cpus_allowed: ff
+Cpus_allowed_list: 0-7
+Mems_allowed: 00000000,00000001
+Mems_allowed_list: 0
+voluntary_ctxt_switches: 4742839
+nonvoluntary_ctxt_switches: 1727500
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/proc/26232
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26232/cmdline
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26232/comm
+Lines: 1
+ata_sff
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26232/cwd
+SymlinkTo: /does/not/exist
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/proc/26232/fd
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26232/fd/0
+SymlinkTo: ../../symlinktargets/abc
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26232/fd/1
+SymlinkTo: ../../symlinktargets/def
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26232/fd/2
+SymlinkTo: ../../symlinktargets/ghi
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26232/fd/3
+SymlinkTo: ../../symlinktargets/uvw
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26232/fd/4
+SymlinkTo: ../../symlinktargets/xyz
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26232/limits
+Lines: 17
+Limit Soft Limit Hard Limit Units
+Max cpu time unlimited unlimited seconds
+Max file size unlimited unlimited bytes
+Max data size unlimited unlimited bytes
+Max stack size 8388608 unlimited bytes
+Max core file size 0 unlimited bytes
+Max resident set unlimited unlimited bytes
+Max processes 29436 29436 processes
+Max open files 1024 4096 files
+Max locked memory 65536 65536 bytes
+Max address space unlimited unlimited bytes
+Max file locks unlimited unlimited locks
+Max pending signals 29436 29436 signals
+Max msgqueue size 819200 819200 bytes
+Max nice priority 0 0
+Max realtime priority 0 0
+Max realtime timeout unlimited unlimited us
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26232/root
+SymlinkTo: /does/not/exist
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26232/stat
+Lines: 1
+33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/proc/26233
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/26233/cmdline
+Lines: 1
+com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/proc/584
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/584/stat
+Lines: 2
+1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0
+#!/bin/cat /proc/self/stat
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/buddyinfo
+Lines: 3
+Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3
+Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0
+Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/diskstats
+Lines: 49
+ 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0
+ 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0
+ 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0
+ 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0
+ 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0
+ 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0
+ 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0
+ 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0
+ 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0
+ 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0
+ 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0
+ 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0
+ 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0
+ 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0
+ 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0
+ 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0
+ 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0
+ 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0
+ 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0
+ 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0
+ 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0
+ 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0
+ 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0
+ 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0
+ 8 0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804
+ 8 1 sda1 250 0 2000 36 0 0 0 0 0 36 36
+ 8 2 sda2 246 0 1968 32 0 0 0 0 0 32 32
+ 8 3 sda3 340 13 2818 52 11 8 152 8 0 56 60
+ 8 4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428
+ 252 0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256
+ 252 1 dm-1 388 0 3104 84 74 0 592 0 0 76 84
+ 252 2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416
+ 252 3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104
+ 252 4 dm-4 392 0 1034 28 38 0 137 16 0 24 44
+ 252 5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632
+ 179 0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156
+ 179 1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24
+ 179 2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68
+ 2 0 fd0 2 0 16 80 0 0 0 0 0 80 80
+ 254 0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228
+ 254 1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720
+ 254 2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992
+ 11 0 sr0 0 0 0 0 0 0 0 0 0 0 0
+ 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546
+ 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16
+ 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970
+ 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130
+ 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0
+ 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/proc/fs
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/proc/fs/xfs
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/fs/xfs/stat
+Lines: 23
+extent_alloc 92447 97589 92448 93751
+abt 0 0 0 0
+blk_map 1767055 188820 184891 92447 92448 2140766 0
+bmbt 0 0 0 0
+dir 185039 92447 92444 136422
+trans 706 944304 0
+ig 185045 58807 0 126238 0 33637 22
+log 2883 113448 9 17360 739
+push_ail 945014 0 134260 15483 0 3940 464 159985 0 40
+xstrat 92447 0
+rw 107739 94045
+attr 4 0 0 0
+icluster 8677 7849 135802
+vnodes 92601 0 0 0 92444 92444 92444 0
+buf 2666287 7122 2659202 3599 2 7085 0 10297 7085
+abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147
+abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023
+bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0
+fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+qm 0 0 0 0 0 0 0 0
+xpc 399724544 92823103 86219234
+debug 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/mdstat
+Lines: 26
+Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
+md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9]
+ 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU]
+
+md127 : active raid1 sdi2[0] sdj2[1]
+ 312319552 blocks [2/2] [UU]
+
+md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1]
+ 248896 blocks [2/2] [UU]
+
+md4 : inactive raid1 sda3[0] sdb3[1]
+ 4883648 blocks [2/2] [UU]
+
+md6 : active raid1 sdb2[2] sda2[0]
+ 195310144 blocks [2/1] [U_]
+ [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
+
+md8 : active raid1 sdb1[1] sda1[0]
+ 195310144 blocks [2/2] [UU]
+ [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
+
+md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1]
+ 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU]
+ bitmap: 0/30 pages [0KB], 65536KB chunk
+
+unused devices: <none>
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/proc/net
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/net/dev
+Lines: 6
+Inter-| Receive | Transmit
+ face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
+vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0
+ lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0
+docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0
+ eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/net/ip_vs
+Lines: 21
+IP Virtual Server version 1.2.1 (size=4096)
+Prot LocalAddress:Port Scheduler Flags
+ -> RemoteAddress:Port Forward Weight ActiveConn InActConn
+TCP C0A80016:0CEA wlc
+ -> C0A85216:0CEA Tunnel 100 248 2
+ -> C0A85318:0CEA Tunnel 100 248 2
+ -> C0A85315:0CEA Tunnel 100 248 1
+TCP C0A80039:0CEA wlc
+ -> C0A85416:0CEA Tunnel 0 0 0
+ -> C0A85215:0CEA Tunnel 100 1499 0
+ -> C0A83215:0CEA Tunnel 100 1498 0
+TCP C0A80037:0CEA wlc
+ -> C0A8321A:0CEA Tunnel 0 0 0
+ -> C0A83120:0CEA Tunnel 100 0 0
+TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh
+ -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0
+ -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0
+ -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1
+FWM 10001000 wlc
+ -> C0A8321A:0CEA Route 0 0 1
+ -> C0A83215:0CEA Route 0 0 2
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/net/ip_vs_stats
+Lines: 6
+ Total Incoming Outgoing Incoming Outgoing
+ Conns Packets Packets Bytes Bytes
+ 16AA370 E33656E5 0 51D8C8883AB3 0
+
+ Conns/s Pkts/s Pkts/s Bytes/s Bytes/s
+ 4 1FB3C 0 1282A8F 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/proc/net/rpc
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/net/rpc/nfs
+Lines: 5
+net 18628 0 18628 6
+rpc 4329785 0 4338291
+proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
+proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39
+proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/net/rpc/nfsd
+Lines: 11
+rc 0 6 18622
+fh 0 0 0 0 0
+io 157286400 0
+th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
+ra 32 0 0 0 0 0 0 0 0 0 0 0
+net 18628 0 18628 6
+rpc 18628 0 0 0 0
+proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
+proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0
+proc4 2 2 10853
+proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/net/unix
+Lines: 6
+Num RefCount Protocol Flags Type St Inode Path
+0000000000000000: 00000002 00000000 00010000 0001 01 3442596 /var/run/postgresql/.s.PGSQL.5432
+0000000000000000: 0000000a 00000000 00010000 0005 01 10061 /run/udev/control
+0000000000000000: 00000007 00000000 00000000 0002 01 12392 /dev/log
+0000000000000000: 00000003 00000000 00000000 0001 03 4787297 /var/run/postgresql/.s.PGSQL.5432
+0000000000000000: 00000003 00000000 00000000 0001 03 5091797
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/net/unix_without_inode
+Lines: 6
+Num RefCount Protocol Flags Type St Path
+0000000000000000: 00000002 00000000 00010000 0001 01 /var/run/postgresql/.s.PGSQL.5432
+0000000000000000: 0000000a 00000000 00010000 0005 01 /run/udev/control
+0000000000000000: 00000007 00000000 00000000 0002 01 /dev/log
+0000000000000000: 00000003 00000000 00000000 0001 03 /var/run/postgresql/.s.PGSQL.5432
+0000000000000000: 00000003 00000000 00000000 0001 03
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/net/xfrm_stat
+Lines: 28
+XfrmInError 1
+XfrmInBufferError 2
+XfrmInHdrError 4
+XfrmInNoStates 3
+XfrmInStateProtoError 40
+XfrmInStateModeError 100
+XfrmInStateSeqError 6000
+XfrmInStateExpired 4
+XfrmInStateMismatch 23451
+XfrmInStateInvalid 55555
+XfrmInTmplMismatch 51
+XfrmInNoPols 65432
+XfrmInPolBlock 100
+XfrmInPolError 10000
+XfrmOutError 1000000
+XfrmOutBundleGenError 43321
+XfrmOutBundleCheckError 555
+XfrmOutNoStates 869
+XfrmOutStateProtoError 4542
+XfrmOutStateModeError 4
+XfrmOutStateSeqError 543
+XfrmOutStateExpired 565
+XfrmOutPolBlock 43456
+XfrmOutPolDead 7656
+XfrmOutPolError 1454
+XfrmFwdHdrError 6654
+XfrmOutStateInvalid 28765
+XfrmAcquireError 24532
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/proc/pressure
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/pressure/cpu
+Lines: 1
+some avg10=0.10 avg60=2.00 avg300=3.85 total=15
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/pressure/io
+Lines: 2
+some avg10=0.10 avg60=2.00 avg300=3.85 total=15
+full avg10=0.20 avg60=3.00 avg300=4.95 total=25
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/pressure/memory
+Lines: 2
+some avg10=0.10 avg60=2.00 avg300=3.85 total=15
+full avg10=0.20 avg60=3.00 avg300=4.95 total=25
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/self
+SymlinkTo: 26231
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/stat
+Lines: 16
+cpu 301854 612 111922 8979004 3552 2 3944 0 0 0
+cpu0 44490 19 21045 1087069 220 1 3410 0 0 0
+cpu1 47869 23 16474 1110787 591 0 46 0 0 0
+cpu2 46504 36 15916 1112321 441 0 326 0 0 0
+cpu3 47054 102 15683 1113230 533 0 60 0 0 0
+cpu4 28413 25 10776 1140321 217 0 8 0 0 0
+cpu5 29271 101 11586 1136270 672 0 30 0 0 0
+cpu6 29152 36 10276 1139721 319 0 29 0 0 0
+cpu7 29098 268 10164 1139282 555 0 31 0 0 0
+intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ctxt 38014093
+btime 1418183276
+processes 26442
+procs_running 2
+procs_blocked 1
+softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/proc/symlinktargets
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/symlinktargets/README
+Lines: 2
+This directory contains some empty files that are the symlinks the files in the "fd" directory point to.
+They are otherwise ignored by the tests
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/symlinktargets/abc
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/symlinktargets/def
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/symlinktargets/ghi
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/symlinktargets/uvw
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/proc/symlinktargets/xyz
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/block
+Mode: 775
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/block/dm-0
+Mode: 775
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/block/dm-0/stat
+Lines: 1
+6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/block/sda
+Mode: 775
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/block/sda/stat
+Lines: 1
+9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/class
+Mode: 775
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/class/net
+Mode: 775
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/class/net/eth0
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/addr_assign_type
+Lines: 1
+3
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/addr_len
+Lines: 1
+6
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/address
+Lines: 1
+01:01:01:01:01:01
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/broadcast
+Lines: 1
+ff:ff:ff:ff:ff:ff
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/carrier
+Lines: 1
+1
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/carrier_changes
+Lines: 1
+2
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/carrier_down_count
+Lines: 1
+1
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/carrier_up_count
+Lines: 1
+1
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/dev_id
+Lines: 1
+0x20
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/dormant
+Lines: 1
+1
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/duplex
+Lines: 1
+full
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/flags
+Lines: 1
+0x1303
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/ifalias
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/ifindex
+Lines: 1
+2
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/iflink
+Lines: 1
+2
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/link_mode
+Lines: 1
+1
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/mtu
+Lines: 1
+1500
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/name_assign_type
+Lines: 1
+2
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/netdev_group
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/operstate
+Lines: 1
+up
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/phys_port_id
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/phys_port_name
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/phys_switch_id
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/speed
+Lines: 1
+1000
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/tx_queue_len
+Lines: 1
+1000
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/net/eth0/type
+Lines: 1
+1
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/class/power_supply
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/class/power_supply/AC
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/AC/online
+Lines: 1
+0
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/AC/type
+Lines: 1
+Mains
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/AC/uevent
+Lines: 2
+POWER_SUPPLY_NAME=AC
+POWER_SUPPLY_ONLINE=0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/class/power_supply/BAT0
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/BAT0/alarm
+Lines: 1
+2503000
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/BAT0/capacity
+Lines: 1
+98
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/BAT0/capacity_level
+Lines: 1
+Normal
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/BAT0/charge_start_threshold
+Lines: 1
+95
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/BAT0/charge_stop_threshold
+Lines: 1
+100
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/BAT0/cycle_count
+Lines: 1
+0
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/BAT0/energy_full
+Lines: 1
+50060000
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/BAT0/energy_full_design
+Lines: 1
+47520000
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/BAT0/energy_now
+Lines: 1
+49450000
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/BAT0/manufacturer
+Lines: 1
+LGC
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/BAT0/model_name
+Lines: 1
+LNV-45N1
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/BAT0/power_now
+Lines: 1
+4830000
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/BAT0/present
+Lines: 1
+1
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/BAT0/serial_number
+Lines: 1
+38109
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/BAT0/status
+Lines: 1
+Discharging
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/BAT0/technology
+Lines: 1
+Li-ion
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/BAT0/type
+Lines: 1
+Battery
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/BAT0/uevent
+Lines: 16
+POWER_SUPPLY_NAME=BAT0
+POWER_SUPPLY_STATUS=Discharging
+POWER_SUPPLY_PRESENT=1
+POWER_SUPPLY_TECHNOLOGY=Li-ion
+POWER_SUPPLY_CYCLE_COUNT=0
+POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000
+POWER_SUPPLY_VOLTAGE_NOW=12229000
+POWER_SUPPLY_POWER_NOW=4830000
+POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000
+POWER_SUPPLY_ENERGY_FULL=50060000
+POWER_SUPPLY_ENERGY_NOW=49450000
+POWER_SUPPLY_CAPACITY=98
+POWER_SUPPLY_CAPACITY_LEVEL=Normal
+POWER_SUPPLY_MODEL_NAME=LNV-45N1
+POWER_SUPPLY_MANUFACTURER=LGC
+POWER_SUPPLY_SERIAL_NUMBER=38109
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/BAT0/voltage_min_design
+Lines: 1
+10800000
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/power_supply/BAT0/voltage_now
+Lines: 1
+12229000
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/class/thermal
+Mode: 775
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/class/thermal/thermal_zone0
+Mode: 775
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/thermal/thermal_zone0/policy
+Lines: 1
+step_wise
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/thermal/thermal_zone0/temp
+Lines: 1
+49925
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/thermal/thermal_zone0/type
+Lines: 1
+bcm2835_thermal
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/class/thermal/thermal_zone1
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/thermal/thermal_zone1/mode
+Lines: 1
+enabled
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/thermal/thermal_zone1/passive
+Lines: 1
+0
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/thermal/thermal_zone1/policy
+Lines: 1
+step_wise
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/thermal/thermal_zone1/temp
+Lines: 1
+44000
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/thermal/thermal_zone1/type
+Lines: 1
+acpitz
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/pci0000:00
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio
+Lines: 1
+100
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits
+Lines: 1
+289
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio
+Lines: 1
+100
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits
+Lines: 1
+546
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written
+Lines: 1
+512
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats
+Lines: 5
+Unused: 99%
+Metadata: 0%
+Average: 10473
+Sectors per Q: 64
+Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946]
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/system
+Mode: 775
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/system/clocksource
+Mode: 775
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/system/clocksource/clocksource0
+Mode: 775
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource
+Lines: 1
+tsc hpet acpi_pm
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource
+Lines: 1
+tsc
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/system/cpu
+Mode: 775
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/system/cpu/cpu0
+Mode: 775
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq
+SymlinkTo: ../cpufreq/policy0
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/system/cpu/cpu1
+Mode: 775
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq
+Mode: 775
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq
+Lines: 1
+1200195
+Mode: 400
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq
+Lines: 1
+3300000
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq
+Lines: 1
+1200000
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency
+Lines: 1
+4294967295
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus
+Lines: 1
+1
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors
+Lines: 1
+performance powersave
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver
+Lines: 1
+intel_pstate
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
+Lines: 1
+powersave
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq
+Lines: 1
+3300000
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq
+Lines: 1
+1200000
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed
+Lines: 1
+<unsupported>
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/system/cpu/cpufreq
+Mode: 775
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0
+Mode: 775
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus
+Lines: 1
+0
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq
+Lines: 1
+2400000
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq
+Lines: 1
+800000
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency
+Lines: 1
+0
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus
+Lines: 1
+0
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors
+Lines: 1
+performance powersave
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq
+Lines: 1
+1219917
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver
+Lines: 1
+intel_pstate
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor
+Lines: 1
+powersave
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq
+Lines: 1
+2400000
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq
+Lines: 1
+800000
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed
+Lines: 1
+<unsupported>
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/fs
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/fs/bcache
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0
+Mode: 777
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio
+Lines: 1
+100
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits
+Lines: 1
+289
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio
+Lines: 1
+100
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits
+Lines: 1
+546
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0
+Mode: 777
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written
+Lines: 1
+512
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats
+Lines: 5
+Unused: 99%
+Metadata: 0%
+Average: 10473
+Sectors per Q: 64
+Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946]
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent
+Lines: 1
+100
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries
+Lines: 1
+1
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us
+Lines: 1
+1305
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio
+Lines: 1
+100
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits
+Lines: 1
+289
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio
+Lines: 1
+100
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits
+Lines: 1
+546
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth
+Lines: 1
+0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/fs/xfs
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/fs/xfs/sda1
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/fs/xfs/sda1/stats
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/xfs/sda1/stats/stats
+Lines: 1
+extent_alloc 1 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/fs/xfs/sdb1
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/fs/xfs/sdb1/stats
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/fs/xfs/sdb1/stats/stats
+Lines: 1
+extent_alloc 2 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
new file mode 100644
index 000000000..0102ab0fd
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -0,0 +1,43 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "github.com/prometheus/procfs/internal/fs"
+)
+
+// FS represents the pseudo-filesystem sys, which provides an interface to
+// kernel data structures.
+type FS struct {
+ proc fs.FS
+}
+
+// DefaultMountPoint is the common mount point of the proc filesystem.
+const DefaultMountPoint = fs.DefaultProcMountPoint
+
+// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
+// It will error if the mount point directory can't be read or is a file.
+func NewDefaultFS() (FS, error) {
+ return NewFS(DefaultMountPoint)
+}
+
+// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error
+// if the mount point directory can't be read or is a file.
+func NewFS(mountPoint string) (FS, error) {
+ fs, err := fs.NewFS(mountPoint)
+ if err != nil {
+ return FS{}, err
+ }
+ return FS{fs}, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod
new file mode 100644
index 000000000..8a1b839fd
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/go.mod
@@ -0,0 +1,3 @@
+module github.com/prometheus/procfs
+
+require golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4
diff --git a/vendor/github.com/prometheus/procfs/go.sum b/vendor/github.com/prometheus/procfs/go.sum
new file mode 100644
index 000000000..7827dd3d5
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/go.sum
@@ -0,0 +1,2 @@
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
new file mode 100644
index 000000000..c66a1cf80
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
@@ -0,0 +1,52 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fs
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+)
+
+const (
+ // DefaultProcMountPoint is the common mount point of the proc filesystem.
+ DefaultProcMountPoint = "/proc"
+
+ // DefaultSysMountPoint is the common mount point of the sys filesystem.
+ DefaultSysMountPoint = "/sys"
+)
+
+// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
+// interface to kernel data structures.
+type FS string
+
+// NewFS returns a new FS mounted under the given mountPoint. It will error
+// if the mount point can't be read.
+func NewFS(mountPoint string) (FS, error) {
+ info, err := os.Stat(mountPoint)
+ if err != nil {
+ return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
+ }
+ if !info.IsDir() {
+ return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
+ }
+
+ return FS(mountPoint), nil
+}
+
+// Path appends the given path elements to the filesystem path, adding separators
+// as necessary.
+func (fs FS) Path(p ...string) string {
+ return filepath.Join(append([]string{string(fs)}, p...)...)
+}
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
new file mode 100644
index 000000000..2d6cb8d1c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -0,0 +1,239 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
+type IPVSStats struct {
+ // Total count of connections.
+ Connections uint64
+ // Total incoming packages processed.
+ IncomingPackets uint64
+ // Total outgoing packages processed.
+ OutgoingPackets uint64
+ // Total incoming traffic.
+ IncomingBytes uint64
+ // Total outgoing traffic.
+ OutgoingBytes uint64
+}
+
+// IPVSBackendStatus holds current metrics of one virtual / real address pair.
+type IPVSBackendStatus struct {
+ // The local (virtual) IP address.
+ LocalAddress net.IP
+ // The remote (real) IP address.
+ RemoteAddress net.IP
+ // The local (virtual) port.
+ LocalPort uint16
+ // The remote (real) port.
+ RemotePort uint16
+ // The local firewall mark
+ LocalMark string
+ // The transport protocol (TCP, UDP).
+ Proto string
+ // The current number of active connections for this virtual/real address pair.
+ ActiveConn uint64
+ // The current number of inactive connections for this virtual/real address pair.
+ InactConn uint64
+ // The current weight of this virtual/real address pair.
+ Weight uint64
+}
+
+// IPVSStats reads the IPVS statistics from the specified `proc` filesystem.
+func (fs FS) IPVSStats() (IPVSStats, error) {
+ file, err := os.Open(fs.proc.Path("net/ip_vs_stats"))
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ defer file.Close()
+
+ return parseIPVSStats(file)
+}
+
+// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
+func parseIPVSStats(file io.Reader) (IPVSStats, error) {
+ var (
+ statContent []byte
+ statLines []string
+ statFields []string
+ stats IPVSStats
+ )
+
+ statContent, err := ioutil.ReadAll(file)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ statLines = strings.SplitN(string(statContent), "\n", 4)
+ if len(statLines) != 4 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short")
+ }
+
+ statFields = strings.Fields(statLines[2])
+ if len(statFields) != 5 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields")
+ }
+
+ stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return stats, nil
+}
+
+// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
+func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ file, err := os.Open(fs.proc.Path("net/ip_vs"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return parseIPVSBackendStatus(file)
+}
+
+func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
+ var (
+ status []IPVSBackendStatus
+ scanner = bufio.NewScanner(file)
+ proto string
+ localMark string
+ localAddress net.IP
+ localPort uint16
+ err error
+ )
+
+ for scanner.Scan() {
+ fields := strings.Fields(scanner.Text())
+ if len(fields) == 0 {
+ continue
+ }
+ switch {
+ case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port":
+ continue
+ case fields[0] == "TCP" || fields[0] == "UDP":
+ if len(fields) < 2 {
+ continue
+ }
+ proto = fields[0]
+ localMark = ""
+ localAddress, localPort, err = parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ case fields[0] == "FWM":
+ if len(fields) < 2 {
+ continue
+ }
+ proto = fields[0]
+ localMark = fields[1]
+ localAddress = nil
+ localPort = 0
+ case fields[0] == "->":
+ if len(fields) < 6 {
+ continue
+ }
+ remoteAddress, remotePort, err := parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ weight, err := strconv.ParseUint(fields[3], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ activeConn, err := strconv.ParseUint(fields[4], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ inactConn, err := strconv.ParseUint(fields[5], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ status = append(status, IPVSBackendStatus{
+ LocalAddress: localAddress,
+ LocalPort: localPort,
+ LocalMark: localMark,
+ RemoteAddress: remoteAddress,
+ RemotePort: remotePort,
+ Proto: proto,
+ Weight: weight,
+ ActiveConn: activeConn,
+ InactConn: inactConn,
+ })
+ }
+ }
+ return status, nil
+}
+
+func parseIPPort(s string) (net.IP, uint16, error) {
+ var (
+ ip net.IP
+ err error
+ )
+
+ switch len(s) {
+ case 13:
+ ip, err = hex.DecodeString(s[0:8])
+ if err != nil {
+ return nil, 0, err
+ }
+ case 46:
+ ip = net.ParseIP(s[1:40])
+ if ip == nil {
+ return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40])
+ }
+ default:
+ return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s)
+ }
+
+ portString := s[len(s)-4:]
+ if len(portString) != 4 {
+ return nil, 0, fmt.Errorf("unexpected port string format: %s", portString)
+ }
+ port, err := strconv.ParseUint(portString, 16, 16)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ return ip, uint16(port), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
new file mode 100644
index 000000000..71c106782
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -0,0 +1,151 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
+ buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
+)
+
+// MDStat holds info parsed from /proc/mdstat.
+type MDStat struct {
+ // Name of the device.
+ Name string
+ // activity-state of the device.
+ ActivityState string
+ // Number of active disks.
+ DisksActive int64
+ // Total number of disks the device consists of.
+ DisksTotal int64
+ // Number of blocks the device holds.
+ BlocksTotal int64
+ // Number of blocks on the device that are in sync.
+ BlocksSynced int64
+}
+
+// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of
+// structs containing the relevant info. More information available here:
+// https://raid.wiki.kernel.org/index.php/Mdstat
+func (fs FS) MDStat() ([]MDStat, error) {
+ data, err := ioutil.ReadFile(fs.proc.Path("mdstat"))
+ if err != nil {
+ return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
+ }
+ mdstat, err := parseMDStat(data)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
+ }
+ return mdstat, nil
+}
+
+// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of
+// structs containing the relevant info.
+func parseMDStat(mdstatData []byte) ([]MDStat, error) {
+ mdStats := []MDStat{}
+ lines := strings.Split(string(mdstatData), "\n")
+ for i, l := range lines {
+ if strings.TrimSpace(l) == "" || l[0] == ' ' ||
+ strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
+ continue
+ }
+
+ deviceFields := strings.Fields(l)
+ if len(deviceFields) < 3 {
+ return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", l)
+ }
+ mdName := deviceFields[0]
+ activityState := deviceFields[2]
+
+ if len(lines) <= i+3 {
+ return mdStats, fmt.Errorf("missing lines for md device %s", mdName)
+ }
+
+ active, total, size, err := evalStatusLine(lines[i+1])
+ if err != nil {
+ return nil, err
+ }
+
+ syncLineIdx := i + 2
+ if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
+ syncLineIdx++
+ }
+
+ // If device is recovering/syncing at the moment, get the number of currently
+ // synced bytes, otherwise that number equals the size of the device.
+ syncedBlocks := size
+ if strings.Contains(lines[syncLineIdx], "recovery") || strings.Contains(lines[syncLineIdx], "resync") {
+ syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ mdStats = append(mdStats, MDStat{
+ Name: mdName,
+ ActivityState: activityState,
+ DisksActive: active,
+ DisksTotal: total,
+ BlocksTotal: size,
+ BlocksSynced: syncedBlocks,
+ })
+ }
+
+ return mdStats, nil
+}
+
+func evalStatusLine(statusline string) (active, total, size int64, err error) {
+ matches := statuslineRE.FindStringSubmatch(statusline)
+ if len(matches) != 4 {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
+ }
+
+ size, err = strconv.ParseInt(matches[1], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ total, err = strconv.ParseInt(matches[2], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ active, err = strconv.ParseInt(matches[3], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ return active, total, size, nil
+}
+
+func evalRecoveryLine(buildline string) (syncedBlocks int64, err error) {
+ matches := buildlineRE.FindStringSubmatch(buildline)
+ if len(matches) != 2 {
+ return 0, fmt.Errorf("unexpected buildline: %s", buildline)
+ }
+
+ syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
+ }
+
+ return syncedBlocks, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
new file mode 100644
index 000000000..35b2ef351
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -0,0 +1,621 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+// While implementing parsing of /proc/[pid]/mountstats, this blog was used
+// heavily as a reference:
+// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
+//
+// Special thanks to Chris Siebenmann for all of his posts explaining the
+// various statistics available for NFS.
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Constants shared between multiple functions.
+const (
+ deviceEntryLen = 8
+
+ fieldBytesLen = 8
+ fieldEventsLen = 27
+
+ statVersion10 = "1.0"
+ statVersion11 = "1.1"
+
+ fieldTransport10TCPLen = 10
+ fieldTransport10UDPLen = 7
+
+ fieldTransport11TCPLen = 13
+ fieldTransport11UDPLen = 10
+)
+
+// A Mount is a device mount parsed from /proc/[pid]/mountstats.
+type Mount struct {
+ // Name of the device.
+ Device string
+ // The mount point of the device.
+ Mount string
+ // The filesystem type used by the device.
+ Type string
+ // If available additional statistics related to this Mount.
+ // Use a type assertion to determine if additional statistics are available.
+ Stats MountStats
+}
+
+// A MountStats is a type which contains detailed statistics for a specific
+// type of Mount.
+type MountStats interface {
+ mountStats()
+}
+
+// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
+type MountStatsNFS struct {
+ // The version of statistics provided.
+ StatVersion string
+ // The mount options of the NFS mount.
+ Opts map[string]string
+ // The age of the NFS mount.
+ Age time.Duration
+ // Statistics related to byte counters for various operations.
+ Bytes NFSBytesStats
+ // Statistics related to various NFS event occurrences.
+ Events NFSEventsStats
+ // Statistics broken down by filesystem operation.
+ Operations []NFSOperationStats
+ // Statistics about the NFS RPC transport.
+ Transport NFSTransportStats
+}
+
+// mountStats implements MountStats.
+func (m MountStatsNFS) mountStats() {}
+
+// A NFSBytesStats contains statistics about the number of bytes read and written
+// by an NFS client to and from an NFS server.
+type NFSBytesStats struct {
+ // Number of bytes read using the read() syscall.
+ Read uint64
+ // Number of bytes written using the write() syscall.
+ Write uint64
+ // Number of bytes read using the read() syscall in O_DIRECT mode.
+ DirectRead uint64
+ // Number of bytes written using the write() syscall in O_DIRECT mode.
+ DirectWrite uint64
+ // Number of bytes read from the NFS server, in total.
+ ReadTotal uint64
+ // Number of bytes written to the NFS server, in total.
+ WriteTotal uint64
+ // Number of pages read directly via mmap()'d files.
+ ReadPages uint64
+ // Number of pages written directly via mmap()'d files.
+ WritePages uint64
+}
+
+// A NFSEventsStats contains statistics about NFS event occurrences.
+type NFSEventsStats struct {
+ // Number of times cached inode attributes are re-validated from the server.
+ InodeRevalidate uint64
+ // Number of times cached dentry nodes are re-validated from the server.
+ DnodeRevalidate uint64
+ // Number of times an inode cache is cleared.
+ DataInvalidate uint64
+ // Number of times cached inode attributes are invalidated.
+ AttributeInvalidate uint64
+ // Number of times files or directories have been open()'d.
+ VFSOpen uint64
+ // Number of times a directory lookup has occurred.
+ VFSLookup uint64
+ // Number of times permissions have been checked.
+ VFSAccess uint64
+ // Number of updates (and potential writes) to pages.
+ VFSUpdatePage uint64
+ // Number of pages read directly via mmap()'d files.
+ VFSReadPage uint64
+ // Number of times a group of pages have been read.
+ VFSReadPages uint64
+ // Number of pages written directly via mmap()'d files.
+ VFSWritePage uint64
+ // Number of times a group of pages have been written.
+ VFSWritePages uint64
+ // Number of times directory entries have been read with getdents().
+ VFSGetdents uint64
+ // Number of times attributes have been set on inodes.
+ VFSSetattr uint64
+ // Number of pending writes that have been forcefully flushed to the server.
+ VFSFlush uint64
+ // Number of times fsync() has been called on directories and files.
+ VFSFsync uint64
+ // Number of times locking has been attempted on a file.
+ VFSLock uint64
+ // Number of times files have been closed and released.
+ VFSFileRelease uint64
+ // Unknown. Possibly unused.
+ CongestionWait uint64
+ // Number of times files have been truncated.
+ Truncation uint64
+ // Number of times a file has been grown due to writes beyond its existing end.
+ WriteExtension uint64
+ // Number of times a file was removed while still open by another process.
+ SillyRename uint64
+ // Number of times the NFS server gave less data than expected while reading.
+ ShortRead uint64
+ // Number of times the NFS server wrote less data than expected while writing.
+ ShortWrite uint64
+ // Number of times the NFS server indicated EJUKEBOX; retrieving data from
+ // offline storage.
+ JukeboxDelay uint64
+ // Number of NFS v4.1+ pNFS reads.
+ PNFSRead uint64
+ // Number of NFS v4.1+ pNFS writes.
+ PNFSWrite uint64
+}
+
+// A NFSOperationStats contains statistics for a single operation.
+type NFSOperationStats struct {
+ // The name of the operation.
+ Operation string
+ // Number of requests performed for this operation.
+ Requests uint64
+ // Number of times an actual RPC request has been transmitted for this operation.
+ Transmissions uint64
+ // Number of times a request has had a major timeout.
+ MajorTimeouts uint64
+ // Number of bytes sent for this operation, including RPC headers and payload.
+ BytesSent uint64
+ // Number of bytes received for this operation, including RPC headers and payload.
+ BytesReceived uint64
+ // Duration all requests spent queued for transmission before they were sent.
+ CumulativeQueueMilliseconds uint64
+ // Duration it took to get a reply back after the request was transmitted.
+ CumulativeTotalResponseMilliseconds uint64
+ // Duration from when a request was enqueued to when it was completely handled.
+ CumulativeTotalRequestMilliseconds uint64
+}
+
+// A NFSTransportStats contains statistics for the NFS mount RPC requests and
+// responses.
+type NFSTransportStats struct {
+ // The transport protocol used for the NFS mount.
+ Protocol string
+ // The local port used for the NFS mount.
+ Port uint64
+ // Number of times the client has had to establish a connection from scratch
+ // to the NFS server.
+ Bind uint64
+ // Number of times the client has made a TCP connection to the NFS server.
+ Connect uint64
+ // Duration (in jiffies, a kernel internal unit of time) the NFS mount has
+ // spent waiting for connections to the server to be established.
+ ConnectIdleTime uint64
+ // Duration since the NFS mount last saw any RPC traffic.
+ IdleTimeSeconds uint64
+ // Number of RPC requests for this mount sent to the NFS server.
+ Sends uint64
+ // Number of RPC responses for this mount received from the NFS server.
+ Receives uint64
+ // Number of times the NFS server sent a response with a transaction ID
+ // unknown to this client.
+ BadTransactionIDs uint64
+ // A running counter, incremented on each request as the current difference
+ // ebetween sends and receives.
+ CumulativeActiveRequests uint64
+ // A running counter, incremented on each request by the current backlog
+ // queue size.
+ CumulativeBacklog uint64
+
+ // Stats below only available with stat version 1.1.
+
+ // Maximum number of simultaneously active RPC requests ever used.
+ MaximumRPCSlotsUsed uint64
+ // A running counter, incremented on each request as the current size of the
+ // sending queue.
+ CumulativeSendingQueue uint64
+ // A running counter, incremented on each request as the current size of the
+ // pending queue.
+ CumulativePendingQueue uint64
+}
+
+// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
+// of Mount structures containing detailed information about each mount.
+// If available, statistics for each mount are parsed as well.
+func parseMountStats(r io.Reader) ([]*Mount, error) {
+ const (
+ device = "device"
+ statVersionPrefix = "statvers="
+
+ nfs3Type = "nfs"
+ nfs4Type = "nfs4"
+ )
+
+ var mounts []*Mount
+
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ // Only look for device entries in this function
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 || ss[0] != device {
+ continue
+ }
+
+ m, err := parseMount(ss)
+ if err != nil {
+ return nil, err
+ }
+
+ // Does this mount also possess statistics information?
+ if len(ss) > deviceEntryLen {
+ // Only NFSv3 and v4 are supported for parsing statistics
+ if m.Type != nfs3Type && m.Type != nfs4Type {
+ return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
+ }
+
+ statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
+
+ stats, err := parseMountStatsNFS(s, statVersion)
+ if err != nil {
+ return nil, err
+ }
+
+ m.Stats = stats
+ }
+
+ mounts = append(mounts, m)
+ }
+
+ return mounts, s.Err()
+}
+
+// parseMount parses an entry in /proc/[pid]/mountstats in the format:
+// device [device] mounted on [mount] with fstype [type]
+func parseMount(ss []string) (*Mount, error) {
+ if len(ss) < deviceEntryLen {
+ return nil, fmt.Errorf("invalid device entry: %v", ss)
+ }
+
+ // Check for specific words appearing at specific indices to ensure
+ // the format is consistent with what we expect
+ format := []struct {
+ i int
+ s string
+ }{
+ {i: 0, s: "device"},
+ {i: 2, s: "mounted"},
+ {i: 3, s: "on"},
+ {i: 5, s: "with"},
+ {i: 6, s: "fstype"},
+ }
+
+ for _, f := range format {
+ if ss[f.i] != f.s {
+ return nil, fmt.Errorf("invalid device entry: %v", ss)
+ }
+ }
+
+ return &Mount{
+ Device: ss[1],
+ Mount: ss[4],
+ Type: ss[7],
+ }, nil
+}
+
+// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
+// related to NFS statistics.
+func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
+ // Field indicators for parsing specific types of data
+ const (
+ fieldOpts = "opts:"
+ fieldAge = "age:"
+ fieldBytes = "bytes:"
+ fieldEvents = "events:"
+ fieldPerOpStats = "per-op"
+ fieldTransport = "xprt:"
+ )
+
+ stats := &MountStatsNFS{
+ StatVersion: statVersion,
+ }
+
+ for s.Scan() {
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 {
+ break
+ }
+ if len(ss) < 2 {
+ return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ }
+
+ switch ss[0] {
+ case fieldOpts:
+ if stats.Opts == nil {
+ stats.Opts = map[string]string{}
+ }
+ for _, opt := range strings.Split(ss[1], ",") {
+ split := strings.Split(opt, "=")
+ if len(split) == 2 {
+ stats.Opts[split[0]] = split[1]
+ } else {
+ stats.Opts[opt] = ""
+ }
+ }
+ case fieldAge:
+ // Age integer is in seconds
+ d, err := time.ParseDuration(ss[1] + "s")
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Age = d
+ case fieldBytes:
+ bstats, err := parseNFSBytesStats(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Bytes = *bstats
+ case fieldEvents:
+ estats, err := parseNFSEventsStats(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Events = *estats
+ case fieldTransport:
+ if len(ss) < 3 {
+ return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
+ }
+
+ tstats, err := parseNFSTransportStats(ss[1:], statVersion)
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Transport = *tstats
+ }
+
+ // When encountering "per-operation statistics", we must break this
+ // loop and parse them separately to ensure we can terminate parsing
+ // before reaching another device entry; hence why this 'if' statement
+ // is not just another switch case
+ if ss[0] == fieldPerOpStats {
+ break
+ }
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ // NFS per-operation stats appear last before the next device entry
+ perOpStats, err := parseNFSOperationStats(s)
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Operations = perOpStats
+
+ return stats, nil
+}
+
+// parseNFSBytesStats parses a NFSBytesStats line using an input set of
+// integer fields.
+func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
+ if len(ss) != fieldBytesLen {
+ return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
+ }
+
+ ns := make([]uint64, 0, fieldBytesLen)
+ for _, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ return &NFSBytesStats{
+ Read: ns[0],
+ Write: ns[1],
+ DirectRead: ns[2],
+ DirectWrite: ns[3],
+ ReadTotal: ns[4],
+ WriteTotal: ns[5],
+ ReadPages: ns[6],
+ WritePages: ns[7],
+ }, nil
+}
+
+// parseNFSEventsStats parses a NFSEventsStats line using an input set of
+// integer fields.
+func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
+ if len(ss) != fieldEventsLen {
+ return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
+ }
+
+ ns := make([]uint64, 0, fieldEventsLen)
+ for _, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ return &NFSEventsStats{
+ InodeRevalidate: ns[0],
+ DnodeRevalidate: ns[1],
+ DataInvalidate: ns[2],
+ AttributeInvalidate: ns[3],
+ VFSOpen: ns[4],
+ VFSLookup: ns[5],
+ VFSAccess: ns[6],
+ VFSUpdatePage: ns[7],
+ VFSReadPage: ns[8],
+ VFSReadPages: ns[9],
+ VFSWritePage: ns[10],
+ VFSWritePages: ns[11],
+ VFSGetdents: ns[12],
+ VFSSetattr: ns[13],
+ VFSFlush: ns[14],
+ VFSFsync: ns[15],
+ VFSLock: ns[16],
+ VFSFileRelease: ns[17],
+ CongestionWait: ns[18],
+ Truncation: ns[19],
+ WriteExtension: ns[20],
+ SillyRename: ns[21],
+ ShortRead: ns[22],
+ ShortWrite: ns[23],
+ JukeboxDelay: ns[24],
+ PNFSRead: ns[25],
+ PNFSWrite: ns[26],
+ }, nil
+}
+
+// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
+// additional information about per-operation statistics until an empty
+// line is reached.
+func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
+ const (
+ // Number of expected fields in each per-operation statistics set
+ numFields = 9
+ )
+
+ var ops []NFSOperationStats
+
+ for s.Scan() {
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 {
+ // Must break when reading a blank line after per-operation stats to
+ // enable top-level function to parse the next device entry
+ break
+ }
+
+ if len(ss) != numFields {
+ return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
+ }
+
+ // Skip string operation name for integers
+ ns := make([]uint64, 0, numFields-1)
+ for _, st := range ss[1:] {
+ n, err := strconv.ParseUint(st, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ ops = append(ops, NFSOperationStats{
+ Operation: strings.TrimSuffix(ss[0], ":"),
+ Requests: ns[0],
+ Transmissions: ns[1],
+ MajorTimeouts: ns[2],
+ BytesSent: ns[3],
+ BytesReceived: ns[4],
+ CumulativeQueueMilliseconds: ns[5],
+ CumulativeTotalResponseMilliseconds: ns[6],
+ CumulativeTotalRequestMilliseconds: ns[7],
+ })
+ }
+
+ return ops, s.Err()
+}
+
+// parseNFSTransportStats parses a NFSTransportStats line using an input set of
+// integer fields matched to a specific stats version.
+func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
+ // Extract the protocol field. It is the only string value in the line
+ protocol := ss[0]
+ ss = ss[1:]
+
+ switch statVersion {
+ case statVersion10:
+ var expectedLength int
+ if protocol == "tcp" {
+ expectedLength = fieldTransport10TCPLen
+ } else if protocol == "udp" {
+ expectedLength = fieldTransport10UDPLen
+ } else {
+ return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss)
+ }
+ if len(ss) != expectedLength {
+ return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
+ }
+ case statVersion11:
+ var expectedLength int
+ if protocol == "tcp" {
+ expectedLength = fieldTransport11TCPLen
+ } else if protocol == "udp" {
+ expectedLength = fieldTransport11UDPLen
+ } else {
+ return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss)
+ }
+ if len(ss) != expectedLength {
+ return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
+ }
+ default:
+ return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
+ }
+
+ // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
+ // in a v1.0 response. Since the stat length is bigger for TCP stats, we use
+ // the TCP length here.
+ //
+ // Note: slice length must be set to length of v1.1 stats to avoid a panic when
+ // only v1.0 stats are present.
+ // See: https://github.com/prometheus/node_exporter/issues/571.
+ ns := make([]uint64, fieldTransport11TCPLen)
+ for i, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns[i] = n
+ }
+
+ // The fields differ depending on the transport protocol (TCP or UDP)
+ // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt
+ //
+ // For the udp RPC transport there is no connection count, connect idle time,
+ // or idle time (fields #3, #4, and #5); all other fields are the same. So
+ // we set them to 0 here.
+ if protocol == "udp" {
+ ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
+ }
+
+ return &NFSTransportStats{
+ Protocol: protocol,
+ Port: ns[0],
+ Bind: ns[1],
+ Connect: ns[2],
+ ConnectIdleTime: ns[3],
+ IdleTimeSeconds: ns[4],
+ Sends: ns[5],
+ Receives: ns[6],
+ BadTransactionIDs: ns[7],
+ CumulativeActiveRequests: ns[8],
+ CumulativeBacklog: ns[9],
+ MaximumRPCSlotsUsed: ns[10],
+ CumulativeSendingQueue: ns[11],
+ CumulativePendingQueue: ns[12],
+ }, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go
new file mode 100644
index 000000000..a0b7a0119
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_dev.go
@@ -0,0 +1,206 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "errors"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev.
+type NetDevLine struct {
+ Name string `json:"name"` // The name of the interface.
+ RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received.
+ RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received.
+ RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered.
+ RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving.
+ RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors.
+ RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors.
+ RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver.
+ RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver.
+ TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted.
+ TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted.
+ TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered.
+ TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting.
+ TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors.
+ TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface.
+ TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver.
+ TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver.
+}
+
+// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys
+// are interface names.
+type NetDev map[string]NetDevLine
+
+// NetDev returns kernel/system statistics read from /proc/net/dev.
+func (fs FS) NetDev() (NetDev, error) {
+ return newNetDev(fs.proc.Path("net/dev"))
+}
+
+// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
+func (p Proc) NetDev() (NetDev, error) {
+ return newNetDev(p.path("net/dev"))
+}
+
+// newNetDev creates a new NetDev from the contents of the given file.
+func newNetDev(file string) (NetDev, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return NetDev{}, err
+ }
+ defer f.Close()
+
+ netDev := NetDev{}
+ s := bufio.NewScanner(f)
+ for n := 0; s.Scan(); n++ {
+ // Skip the 2 header lines.
+ if n < 2 {
+ continue
+ }
+
+ line, err := netDev.parseLine(s.Text())
+ if err != nil {
+ return netDev, err
+ }
+
+ netDev[line.Name] = *line
+ }
+
+ return netDev, s.Err()
+}
+
+// parseLine parses a single line from the /proc/net/dev file. Header lines
+// must be filtered prior to calling this method.
+func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) {
+ parts := strings.SplitN(rawLine, ":", 2)
+ if len(parts) != 2 {
+ return nil, errors.New("invalid net/dev line, missing colon")
+ }
+ fields := strings.Fields(strings.TrimSpace(parts[1]))
+
+ var err error
+ line := &NetDevLine{}
+
+ // Interface Name
+ line.Name = strings.TrimSpace(parts[0])
+ if line.Name == "" {
+ return nil, errors.New("invalid net/dev line, empty interface name")
+ }
+
+ // RX
+ line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ // TX
+ line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ return line, nil
+}
+
+// Total aggregates the values across interfaces and returns a new NetDevLine.
+// The Name field will be a sorted comma separated list of interface names.
+func (netDev NetDev) Total() NetDevLine {
+ total := NetDevLine{}
+
+ names := make([]string, 0, len(netDev))
+ for _, ifc := range netDev {
+ names = append(names, ifc.Name)
+ total.RxBytes += ifc.RxBytes
+ total.RxPackets += ifc.RxPackets
+ total.RxPackets += ifc.RxPackets
+ total.RxErrors += ifc.RxErrors
+ total.RxDropped += ifc.RxDropped
+ total.RxFIFO += ifc.RxFIFO
+ total.RxFrame += ifc.RxFrame
+ total.RxCompressed += ifc.RxCompressed
+ total.RxMulticast += ifc.RxMulticast
+ total.TxBytes += ifc.TxBytes
+ total.TxPackets += ifc.TxPackets
+ total.TxErrors += ifc.TxErrors
+ total.TxDropped += ifc.TxDropped
+ total.TxFIFO += ifc.TxFIFO
+ total.TxCollisions += ifc.TxCollisions
+ total.TxCarrier += ifc.TxCarrier
+ total.TxCompressed += ifc.TxCompressed
+ }
+ sort.Strings(names)
+ total.Name = strings.Join(names, ", ")
+
+ return total
+}
diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go
new file mode 100644
index 000000000..240340a83
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_unix.go
@@ -0,0 +1,275 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// For the proc file format details,
+// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
+// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
+
+const (
+ netUnixKernelPtrIdx = iota
+ netUnixRefCountIdx
+ _
+ netUnixFlagsIdx
+ netUnixTypeIdx
+ netUnixStateIdx
+ netUnixInodeIdx
+
+ // Inode and Path are optional.
+ netUnixStaticFieldsCnt = 6
+)
+
+const (
+ netUnixTypeStream = 1
+ netUnixTypeDgram = 2
+ netUnixTypeSeqpacket = 5
+
+ netUnixFlagListen = 1 << 16
+
+ netUnixStateUnconnected = 1
+ netUnixStateConnecting = 2
+ netUnixStateConnected = 3
+ netUnixStateDisconnected = 4
+)
+
+var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format")
+
+// NetUnixType is the type of the type field.
+type NetUnixType uint64
+
+// NetUnixFlags is the type of the flags field.
+type NetUnixFlags uint64
+
+// NetUnixState is the type of the state field.
+type NetUnixState uint64
+
+// NetUnixLine represents a line of /proc/net/unix.
+type NetUnixLine struct {
+ KernelPtr string
+ RefCount uint64
+ Protocol uint64
+ Flags NetUnixFlags
+ Type NetUnixType
+ State NetUnixState
+ Inode uint64
+ Path string
+}
+
+// NetUnix holds the data read from /proc/net/unix.
+type NetUnix struct {
+ Rows []*NetUnixLine
+}
+
+// NewNetUnix returns data read from /proc/net/unix.
+func NewNetUnix() (*NetUnix, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return nil, err
+ }
+
+ return fs.NewNetUnix()
+}
+
+// NewNetUnix returns data read from /proc/net/unix.
+func (fs FS) NewNetUnix() (*NetUnix, error) {
+ return NewNetUnixByPath(fs.proc.Path("net/unix"))
+}
+
+// NewNetUnixByPath returns data read from /proc/net/unix by file path.
+// It might returns an error with partial parsed data, if an error occur after some data parsed.
+func NewNetUnixByPath(path string) (*NetUnix, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return NewNetUnixByReader(f)
+}
+
+// NewNetUnixByReader returns data read from /proc/net/unix by a reader.
+// It might returns an error with partial parsed data, if an error occur after some data parsed.
+func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) {
+ nu := &NetUnix{
+ Rows: make([]*NetUnixLine, 0, 32),
+ }
+ scanner := bufio.NewScanner(reader)
+ // Omit the header line.
+ scanner.Scan()
+ header := scanner.Text()
+ // From the man page of proc(5), it does not contain an Inode field,
+ // but in actually it exists.
+ // This code works for both cases.
+ hasInode := strings.Contains(header, "Inode")
+
+ minFieldsCnt := netUnixStaticFieldsCnt
+ if hasInode {
+ minFieldsCnt++
+ }
+ for scanner.Scan() {
+ line := scanner.Text()
+ item, err := nu.parseLine(line, hasInode, minFieldsCnt)
+ if err != nil {
+ return nu, err
+ }
+ nu.Rows = append(nu.Rows, item)
+ }
+
+ return nu, scanner.Err()
+}
+
+func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) {
+ fields := strings.Fields(line)
+ fieldsLen := len(fields)
+ if fieldsLen < minFieldsCnt {
+ return nil, fmt.Errorf(
+ "Parse Unix domain failed: expect at least %d fields but got %d",
+ minFieldsCnt, fieldsLen)
+ }
+ kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx])
+ if err != nil {
+ return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err)
+ }
+ users, err := u.parseUsers(fields[netUnixRefCountIdx])
+ if err != nil {
+ return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err)
+ }
+ flags, err := u.parseFlags(fields[netUnixFlagsIdx])
+ if err != nil {
+ return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err)
+ }
+ typ, err := u.parseType(fields[netUnixTypeIdx])
+ if err != nil {
+ return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err)
+ }
+ state, err := u.parseState(fields[netUnixStateIdx])
+ if err != nil {
+ return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err)
+ }
+ var inode uint64
+ if hasInode {
+ inodeStr := fields[netUnixInodeIdx]
+ inode, err = u.parseInode(inodeStr)
+ if err != nil {
+ return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err)
+ }
+ }
+
+ nuLine := &NetUnixLine{
+ KernelPtr: kernelPtr,
+ RefCount: users,
+ Type: typ,
+ Flags: flags,
+ State: state,
+ Inode: inode,
+ }
+
+ // Path field is optional.
+ if fieldsLen > minFieldsCnt {
+ pathIdx := netUnixInodeIdx + 1
+ if !hasInode {
+ pathIdx--
+ }
+ nuLine.Path = fields[pathIdx]
+ }
+
+ return nuLine, nil
+}
+
+func (u NetUnix) parseKernelPtr(str string) (string, error) {
+ if !strings.HasSuffix(str, ":") {
+ return "", errInvalidKernelPtrFmt
+ }
+ return str[:len(str)-1], nil
+}
+
+func (u NetUnix) parseUsers(hexStr string) (uint64, error) {
+ return strconv.ParseUint(hexStr, 16, 32)
+}
+
+func (u NetUnix) parseProtocol(hexStr string) (uint64, error) {
+ return strconv.ParseUint(hexStr, 16, 32)
+}
+
+func (u NetUnix) parseType(hexStr string) (NetUnixType, error) {
+ typ, err := strconv.ParseUint(hexStr, 16, 16)
+ if err != nil {
+ return 0, err
+ }
+ return NetUnixType(typ), nil
+}
+
+func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) {
+ flags, err := strconv.ParseUint(hexStr, 16, 32)
+ if err != nil {
+ return 0, err
+ }
+ return NetUnixFlags(flags), nil
+}
+
+func (u NetUnix) parseState(hexStr string) (NetUnixState, error) {
+ st, err := strconv.ParseInt(hexStr, 16, 8)
+ if err != nil {
+ return 0, err
+ }
+ return NetUnixState(st), nil
+}
+
+func (u NetUnix) parseInode(inodeStr string) (uint64, error) {
+ return strconv.ParseUint(inodeStr, 10, 64)
+}
+
+func (t NetUnixType) String() string {
+ switch t {
+ case netUnixTypeStream:
+ return "stream"
+ case netUnixTypeDgram:
+ return "dgram"
+ case netUnixTypeSeqpacket:
+ return "seqpacket"
+ }
+ return "unknown"
+}
+
+func (f NetUnixFlags) String() string {
+ switch f {
+ case netUnixFlagListen:
+ return "listen"
+ default:
+ return "default"
+ }
+}
+
+func (s NetUnixState) String() string {
+ switch s {
+ case netUnixStateUnconnected:
+ return "unconnected"
+ case netUnixStateConnecting:
+ return "connecting"
+ case netUnixStateConnected:
+ return "connected"
+ case netUnixStateDisconnected:
+ return "disconnected"
+ }
+ return "unknown"
+}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
new file mode 100644
index 000000000..8a8430147
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -0,0 +1,267 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/fs"
+)
+
+// Proc provides information about a running process.
+type Proc struct {
+ // The process ID.
+ PID int
+
+ fs fs.FS
+}
+
+// Procs represents a list of Proc structs.
+type Procs []Proc
+
+func (p Procs) Len() int { return len(p) }
+func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
+
+// Self returns a process for the current process read via /proc/self.
+func Self() (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.Self()
+}
+
+// NewProc returns a process for the given pid under /proc.
+func NewProc(pid int) (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.Proc(pid)
+}
+
+// AllProcs returns a list of all currently available processes under /proc.
+func AllProcs() (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+ return fs.AllProcs()
+}
+
+// Self returns a process for the current process.
+func (fs FS) Self() (Proc, error) {
+ p, err := os.Readlink(fs.proc.Path("self"))
+ if err != nil {
+ return Proc{}, err
+ }
+ pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.Proc(pid)
+}
+
+// NewProc returns a process for the given pid.
+//
+// Deprecated: use fs.Proc() instead
+func (fs FS) NewProc(pid int) (Proc, error) {
+ return fs.Proc(pid)
+}
+
+// Proc returns a process for the given pid.
+func (fs FS) Proc(pid int) (Proc, error) {
+ if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: pid, fs: fs.proc}, nil
+}
+
+// AllProcs returns a list of all currently available processes.
+func (fs FS) AllProcs() (Procs, error) {
+ d, err := os.Open(fs.proc.Path())
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ p := Procs{}
+ for _, n := range names {
+ pid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+ p = append(p, Proc{PID: int(pid), fs: fs.proc})
+ }
+
+ return p, nil
+}
+
+// CmdLine returns the command line of a process.
+func (p Proc) CmdLine() ([]string, error) {
+ f, err := os.Open(p.path("cmdline"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(data) < 1 {
+ return []string{}, nil
+ }
+
+ return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
+}
+
+// Comm returns the command name of a process.
+func (p Proc) Comm() (string, error) {
+ f, err := os.Open(p.path("comm"))
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return "", err
+ }
+
+ return strings.TrimSpace(string(data)), nil
+}
+
+// Executable returns the absolute path of the executable command of a process.
+func (p Proc) Executable() (string, error) {
+ exe, err := os.Readlink(p.path("exe"))
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+
+ return exe, err
+}
+
+// Cwd returns the absolute path to the current working directory of the process.
+func (p Proc) Cwd() (string, error) {
+ wd, err := os.Readlink(p.path("cwd"))
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+
+ return wd, err
+}
+
+// RootDir returns the absolute path to the process's root directory (as set by chroot)
+func (p Proc) RootDir() (string, error) {
+ rdir, err := os.Readlink(p.path("root"))
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+
+ return rdir, err
+}
+
+// FileDescriptors returns the currently open file descriptors of a process.
+func (p Proc) FileDescriptors() ([]uintptr, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ fds := make([]uintptr, len(names))
+ for i, n := range names {
+ fd, err := strconv.ParseInt(n, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse fd %s: %s", n, err)
+ }
+ fds[i] = uintptr(fd)
+ }
+
+ return fds, nil
+}
+
+// FileDescriptorTargets returns the targets of all file descriptors of a process.
+// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.
+func (p Proc) FileDescriptorTargets() ([]string, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ targets := make([]string, len(names))
+
+ for i, name := range names {
+ target, err := os.Readlink(p.path("fd", name))
+ if err == nil {
+ targets[i] = target
+ }
+ }
+
+ return targets, nil
+}
+
+// FileDescriptorsLen returns the number of currently open file descriptors of
+// a process.
+func (p Proc) FileDescriptorsLen() (int, error) {
+ fds, err := p.fileDescriptors()
+ if err != nil {
+ return 0, err
+ }
+
+ return len(fds), nil
+}
+
+// MountStats retrieves statistics and configuration for mount points in a
+// process's namespace.
+func (p Proc) MountStats() ([]*Mount, error) {
+ f, err := os.Open(p.path("mountstats"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseMountStats(f)
+}
+
+func (p Proc) fileDescriptors() ([]string, error) {
+ d, err := os.Open(p.path("fd"))
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ return names, nil
+}
+
+func (p Proc) path(pa ...string) string {
+ return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
new file mode 100644
index 000000000..0ff89b1ce
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_io.go
@@ -0,0 +1,65 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+// ProcIO models the content of /proc/<pid>/io.
+type ProcIO struct {
+ // Chars read.
+ RChar uint64
+ // Chars written.
+ WChar uint64
+ // Read syscalls.
+ SyscR uint64
+ // Write syscalls.
+ SyscW uint64
+ // Bytes read.
+ ReadBytes uint64
+ // Bytes written.
+ WriteBytes uint64
+ // Bytes written, but taking into account truncation. See
+ // Documentation/filesystems/proc.txt in the kernel sources for
+ // detailed explanation.
+ CancelledWriteBytes int64
+}
+
+// IO creates a new ProcIO instance from a given Proc instance.
+func (p Proc) IO() (ProcIO, error) {
+ pio := ProcIO{}
+
+ f, err := os.Open(p.path("io"))
+ if err != nil {
+ return pio, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return pio, err
+ }
+
+ ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
+ "read_bytes: %d\nwrite_bytes: %d\n" +
+ "cancelled_write_bytes: %d\n"
+
+ _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
+ &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
+
+ return pio, err
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
new file mode 100644
index 000000000..91ee24df8
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -0,0 +1,157 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+)
+
+// ProcLimits represents the soft limits for each of the process's resource
+// limits. For more information see getrlimit(2):
+// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
+type ProcLimits struct {
+ // CPU time limit in seconds.
+ CPUTime int64
+ // Maximum size of files that the process may create.
+ FileSize int64
+ // Maximum size of the process's data segment (initialized data,
+ // uninitialized data, and heap).
+ DataSize int64
+ // Maximum size of the process stack in bytes.
+ StackSize int64
+ // Maximum size of a core file.
+ CoreFileSize int64
+ // Limit of the process's resident set in pages.
+ ResidentSet int64
+ // Maximum number of processes that can be created for the real user ID of
+ // the calling process.
+ Processes int64
+ // Value one greater than the maximum file descriptor number that can be
+ // opened by this process.
+ OpenFiles int64
+ // Maximum number of bytes of memory that may be locked into RAM.
+ LockedMemory int64
+ // Maximum size of the process's virtual memory address space in bytes.
+ AddressSpace int64
+ // Limit on the combined number of flock(2) locks and fcntl(2) leases that
+ // this process may establish.
+ FileLocks int64
+ // Limit of signals that may be queued for the real user ID of the calling
+ // process.
+ PendingSignals int64
+ // Limit on the number of bytes that can be allocated for POSIX message
+ // queues for the real user ID of the calling process.
+ MsqqueueSize int64
+ // Limit of the nice priority set using setpriority(2) or nice(2).
+ NicePriority int64
+ // Limit of the real-time priority set using sched_setscheduler(2) or
+ // sched_setparam(2).
+ RealtimePriority int64
+ // Limit (in microseconds) on the amount of CPU time that a process
+ // scheduled under a real-time scheduling policy may consume without making
+ // a blocking system call.
+ RealtimeTimeout int64
+}
+
+const (
+ limitsFields = 3
+ limitsUnlimited = "unlimited"
+)
+
+var (
+ limitsDelimiter = regexp.MustCompile(" +")
+)
+
+// NewLimits returns the current soft limits of the process.
+//
+// Deprecated: use p.Limits() instead
+func (p Proc) NewLimits() (ProcLimits, error) {
+ return p.Limits()
+}
+
+// Limits returns the current soft limits of the process.
+func (p Proc) Limits() (ProcLimits, error) {
+ f, err := os.Open(p.path("limits"))
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ defer f.Close()
+
+ var (
+ l = ProcLimits{}
+ s = bufio.NewScanner(f)
+ )
+ for s.Scan() {
+ fields := limitsDelimiter.Split(s.Text(), limitsFields)
+ if len(fields) != limitsFields {
+ return ProcLimits{}, fmt.Errorf(
+ "couldn't parse %s line %s", f.Name(), s.Text())
+ }
+
+ switch fields[0] {
+ case "Max cpu time":
+ l.CPUTime, err = parseInt(fields[1])
+ case "Max file size":
+ l.FileSize, err = parseInt(fields[1])
+ case "Max data size":
+ l.DataSize, err = parseInt(fields[1])
+ case "Max stack size":
+ l.StackSize, err = parseInt(fields[1])
+ case "Max core file size":
+ l.CoreFileSize, err = parseInt(fields[1])
+ case "Max resident set":
+ l.ResidentSet, err = parseInt(fields[1])
+ case "Max processes":
+ l.Processes, err = parseInt(fields[1])
+ case "Max open files":
+ l.OpenFiles, err = parseInt(fields[1])
+ case "Max locked memory":
+ l.LockedMemory, err = parseInt(fields[1])
+ case "Max address space":
+ l.AddressSpace, err = parseInt(fields[1])
+ case "Max file locks":
+ l.FileLocks, err = parseInt(fields[1])
+ case "Max pending signals":
+ l.PendingSignals, err = parseInt(fields[1])
+ case "Max msgqueue size":
+ l.MsqqueueSize, err = parseInt(fields[1])
+ case "Max nice priority":
+ l.NicePriority, err = parseInt(fields[1])
+ case "Max realtime priority":
+ l.RealtimePriority, err = parseInt(fields[1])
+ case "Max realtime timeout":
+ l.RealtimeTimeout, err = parseInt(fields[1])
+ }
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ }
+
+ return l, s.Err()
+}
+
+func parseInt(s string) (int64, error) {
+ if s == limitsUnlimited {
+ return -1, nil
+ }
+ i, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
+ }
+ return i, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go
new file mode 100644
index 000000000..c66740ff7
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_ns.go
@@ -0,0 +1,68 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Namespace represents a single namespace of a process.
+type Namespace struct {
+ Type string // Namespace type.
+ Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match.
+}
+
+// Namespaces contains all of the namespaces that the process is contained in.
+type Namespaces map[string]Namespace
+
+// Namespaces reads from /proc/<pid>/ns/* to get the namespaces of which the
+// process is a member.
+func (p Proc) Namespaces() (Namespaces, error) {
+ d, err := os.Open(p.path("ns"))
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read contents of ns dir: %v", err)
+ }
+
+ ns := make(Namespaces, len(names))
+ for _, name := range names {
+ target, err := os.Readlink(p.path("ns", name))
+ if err != nil {
+ return nil, err
+ }
+
+ fields := strings.SplitN(target, ":", 2)
+ if len(fields) != 2 {
+ return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target)
+ }
+
+ typ := fields[0]
+ inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err)
+ }
+
+ ns[name] = Namespace{typ, uint32(inode)}
+ }
+
+ return ns, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go
new file mode 100644
index 000000000..46fe26626
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_psi.go
@@ -0,0 +1,101 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+// The PSI / pressure interface is described at
+// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt
+// Each resource (cpu, io, memory, ...) is exposed as a single file.
+// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure.
+// Each line contains several averages (over n seconds) and a total in µs.
+//
+// Example io pressure file:
+// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362
+// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+)
+
+const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d"
+
+// PSILine is a single line of values as returned by /proc/pressure/*
+// The Avg entries are averages over n seconds, as a percentage
+// The Total line is in microseconds
+type PSILine struct {
+ Avg10 float64
+ Avg60 float64
+ Avg300 float64
+ Total uint64
+}
+
+// PSIStats represent pressure stall information from /proc/pressure/*
+// Some indicates the share of time in which at least some tasks are stalled
+// Full indicates the share of time in which all non-idle tasks are stalled simultaneously
+type PSIStats struct {
+ Some *PSILine
+ Full *PSILine
+}
+
+// PSIStatsForResource reads pressure stall information for the specified
+// resource from /proc/pressure/<resource>. At time of writing this can be
+// either "cpu", "memory" or "io".
+func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
+ file, err := os.Open(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
+ if err != nil {
+ return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource)
+ }
+
+ defer file.Close()
+ return parsePSIStats(resource, file)
+}
+
+// parsePSIStats parses the specified file for pressure stall information
+func parsePSIStats(resource string, file io.Reader) (PSIStats, error) {
+ psiStats := PSIStats{}
+ stats, err := ioutil.ReadAll(file)
+ if err != nil {
+ return psiStats, fmt.Errorf("psi_stats: unable to read data for %s", resource)
+ }
+
+ for _, l := range strings.Split(string(stats), "\n") {
+ prefix := strings.Split(l, " ")[0]
+ switch prefix {
+ case "some":
+ psi := PSILine{}
+ _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
+ if err != nil {
+ return PSIStats{}, err
+ }
+ psiStats.Some = &psi
+ case "full":
+ psi := PSILine{}
+ _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
+ if err != nil {
+ return PSIStats{}, err
+ }
+ psiStats.Full = &psi
+ default:
+ // If we encounter a line with an unknown prefix, ignore it and move on
+ // Should new measurement types be added in the future we'll simply ignore them instead
+ // of erroring on retrieval
+ continue
+ }
+ }
+
+ return psiStats, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
new file mode 100644
index 000000000..6ed98a8ae
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -0,0 +1,198 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/prometheus/procfs/internal/fs"
+)
+
+// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
+// which required cgo. However, that caused a lot of problems regarding
+// cross-compilation. Alternatives such as running a binary to determine the
+// value, or trying to derive it in some other way were all problematic. After
+// much research it was determined that USER_HZ is actually hardcoded to 100 on
+// all Go-supported platforms as of the time of this writing. This is why we
+// decided to hardcode it here as well. It is not impossible that there could
+// be systems with exceptions, but they should be very exotic edge cases, and
+// in that case, the worst outcome will be two misreported metrics.
+//
+// See also the following discussions:
+//
+// - https://github.com/prometheus/node_exporter/issues/52
+// - https://github.com/prometheus/procfs/pull/2
+// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
+const userHZ = 100
+
+// ProcStat provides status information about the process,
+// read from /proc/[pid]/stat.
+type ProcStat struct {
+ // The process ID.
+ PID int
+ // The filename of the executable.
+ Comm string
+ // The process state.
+ State string
+ // The PID of the parent of this process.
+ PPID int
+ // The process group ID of the process.
+ PGRP int
+ // The session ID of the process.
+ Session int
+ // The controlling terminal of the process.
+ TTY int
+ // The ID of the foreground process group of the controlling terminal of
+ // the process.
+ TPGID int
+ // The kernel flags word of the process.
+ Flags uint
+ // The number of minor faults the process has made which have not required
+ // loading a memory page from disk.
+ MinFlt uint
+ // The number of minor faults that the process's waited-for children have
+ // made.
+ CMinFlt uint
+ // The number of major faults the process has made which have required
+ // loading a memory page from disk.
+ MajFlt uint
+ // The number of major faults that the process's waited-for children have
+ // made.
+ CMajFlt uint
+ // Amount of time that this process has been scheduled in user mode,
+ // measured in clock ticks.
+ UTime uint
+ // Amount of time that this process has been scheduled in kernel mode,
+ // measured in clock ticks.
+ STime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in user mode, measured in clock ticks.
+ CUTime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in kernel mode, measured in clock ticks.
+ CSTime uint
+ // For processes running a real-time scheduling policy, this is the negated
+ // scheduling priority, minus one.
+ Priority int
+ // The nice value, a value in the range 19 (low priority) to -20 (high
+ // priority).
+ Nice int
+ // Number of threads in this process.
+ NumThreads int
+ // The time the process started after system boot, the value is expressed
+ // in clock ticks.
+ Starttime uint64
+ // Virtual memory size in bytes.
+ VSize uint
+ // Resident set size in pages.
+ RSS int
+
+ proc fs.FS
+}
+
+// NewStat returns the current status information of the process.
+//
+// Deprecated: use NewStat() instead
+func (p Proc) NewStat() (ProcStat, error) {
+ return p.Stat()
+}
+
+// Stat returns the current status information of the process.
+func (p Proc) Stat() (ProcStat, error) {
+ f, err := os.Open(p.path("stat"))
+ if err != nil {
+ return ProcStat{}, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ var (
+ ignore int
+
+ s = ProcStat{PID: p.PID, proc: p.fs}
+ l = bytes.Index(data, []byte("("))
+ r = bytes.LastIndex(data, []byte(")"))
+ )
+
+ if l < 0 || r < 0 {
+ return ProcStat{}, fmt.Errorf(
+ "unexpected format, couldn't extract comm: %s",
+ data,
+ )
+ }
+
+ s.Comm = string(data[l+1 : r])
+ _, err = fmt.Fscan(
+ bytes.NewBuffer(data[r+2:]),
+ &s.State,
+ &s.PPID,
+ &s.PGRP,
+ &s.Session,
+ &s.TTY,
+ &s.TPGID,
+ &s.Flags,
+ &s.MinFlt,
+ &s.CMinFlt,
+ &s.MajFlt,
+ &s.CMajFlt,
+ &s.UTime,
+ &s.STime,
+ &s.CUTime,
+ &s.CSTime,
+ &s.Priority,
+ &s.Nice,
+ &s.NumThreads,
+ &ignore,
+ &s.Starttime,
+ &s.VSize,
+ &s.RSS,
+ )
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ return s, nil
+}
+
+// VirtualMemory returns the virtual memory size in bytes.
+func (s ProcStat) VirtualMemory() uint {
+ return s.VSize
+}
+
+// ResidentMemory returns the resident memory size in bytes.
+func (s ProcStat) ResidentMemory() int {
+ return s.RSS * os.Getpagesize()
+}
+
+// StartTime returns the unix timestamp of the process in seconds.
+func (s ProcStat) StartTime() (float64, error) {
+ fs := FS{proc: s.proc}
+ stat, err := fs.Stat()
+ if err != nil {
+ return 0, err
+ }
+ return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
+}
+
+// CPUTime returns the total CPU user and system time in seconds.
+func (s ProcStat) CPUTime() float64 {
+ return float64(s.UTime+s.STime) / userHZ
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
new file mode 100644
index 000000000..6b4b61f71
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_status.go
@@ -0,0 +1,162 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bytes"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// ProcStat provides status information about the process,
+// read from /proc/[pid]/stat.
+type ProcStatus struct {
+ // The process ID.
+ PID int
+ // The process name.
+ Name string
+
+ // Peak virtual memory size.
+ VmPeak uint64
+ // Virtual memory size.
+ VmSize uint64
+ // Locked memory size.
+ VmLck uint64
+ // Pinned memory size.
+ VmPin uint64
+ // Peak resident set size.
+ VmHWM uint64
+ // Resident set size (sum of RssAnnon RssFile and RssShmem).
+ VmRSS uint64
+ // Size of resident anonymous memory.
+ RssAnon uint64
+ // Size of resident file mappings.
+ RssFile uint64
+ // Size of resident shared memory.
+ RssShmem uint64
+ // Size of data segments.
+ VmData uint64
+ // Size of stack segments.
+ VmStk uint64
+ // Size of text segments.
+ VmExe uint64
+ // Shared library code size.
+ VmLib uint64
+ // Page table entries size.
+ VmPTE uint64
+ // Size of second-level page tables.
+ VmPMD uint64
+ // Swapped-out virtual memory size by anonymous private.
+ VmSwap uint64
+ // Size of hugetlb memory portions
+ HugetlbPages uint64
+
+ // Number of voluntary context switches.
+ VoluntaryCtxtSwitches uint64
+ // Number of involuntary context switches.
+ NonVoluntaryCtxtSwitches uint64
+}
+
+// NewStatus returns the current status information of the process.
+func (p Proc) NewStatus() (ProcStatus, error) {
+ f, err := os.Open(p.path("status"))
+ if err != nil {
+ return ProcStatus{}, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return ProcStatus{}, err
+ }
+
+ s := ProcStatus{PID: p.PID}
+
+ lines := strings.Split(string(data), "\n")
+ for _, line := range lines {
+ if !bytes.Contains([]byte(line), []byte(":")) {
+ continue
+ }
+
+ kv := strings.SplitN(line, ":", 2)
+
+ // removes spaces
+ k := string(strings.TrimSpace(kv[0]))
+ v := string(strings.TrimSpace(kv[1]))
+ // removes "kB"
+ v = string(bytes.Trim([]byte(v), " kB"))
+
+ // value to int when possible
+ // we can skip error check here, 'cause vKBytes is not used when value is a string
+ vKBytes, _ := strconv.ParseUint(v, 10, 64)
+ // convert kB to B
+ vBytes := vKBytes * 1024
+
+ s.fillStatus(k, v, vKBytes, vBytes)
+ }
+
+ return s, nil
+}
+
+func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
+ switch k {
+ case "Name":
+ s.Name = vString
+ case "VmPeak":
+ s.VmPeak = vUintBytes
+ case "VmSize":
+ s.VmSize = vUintBytes
+ case "VmLck":
+ s.VmLck = vUintBytes
+ case "VmPin":
+ s.VmPin = vUintBytes
+ case "VmHWM":
+ s.VmHWM = vUintBytes
+ case "VmRSS":
+ s.VmRSS = vUintBytes
+ case "RssAnon":
+ s.RssAnon = vUintBytes
+ case "RssFile":
+ s.RssFile = vUintBytes
+ case "RssShmem":
+ s.RssShmem = vUintBytes
+ case "VmData":
+ s.VmData = vUintBytes
+ case "VmStk":
+ s.VmStk = vUintBytes
+ case "VmExe":
+ s.VmExe = vUintBytes
+ case "VmLib":
+ s.VmLib = vUintBytes
+ case "VmPTE":
+ s.VmPTE = vUintBytes
+ case "VmPMD":
+ s.VmPMD = vUintBytes
+ case "VmSwap":
+ s.VmSwap = vUintBytes
+ case "HugetlbPages":
+ s.HugetlbPages = vUintBytes
+ case "voluntary_ctxt_switches":
+ s.VoluntaryCtxtSwitches = vUint
+ case "nonvoluntary_ctxt_switches":
+ s.NonVoluntaryCtxtSwitches = vUint
+ }
+}
+
+// TotalCtxtSwitches returns the total context switch.
+func (s ProcStatus) TotalCtxtSwitches() uint64 {
+ return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
+}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
new file mode 100644
index 000000000..6661ee03a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -0,0 +1,244 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/fs"
+)
+
+// CPUStat shows how much time the cpu spend in various stages.
+type CPUStat struct {
+ User float64
+ Nice float64
+ System float64
+ Idle float64
+ Iowait float64
+ IRQ float64
+ SoftIRQ float64
+ Steal float64
+ Guest float64
+ GuestNice float64
+}
+
+// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
+// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
+// It is possible to get per-cpu stats by reading /proc/softirqs
+type SoftIRQStat struct {
+ Hi uint64
+ Timer uint64
+ NetTx uint64
+ NetRx uint64
+ Block uint64
+ BlockIoPoll uint64
+ Tasklet uint64
+ Sched uint64
+ Hrtimer uint64
+ Rcu uint64
+}
+
+// Stat represents kernel/system statistics.
+type Stat struct {
+ // Boot time in seconds since the Epoch.
+ BootTime uint64
+ // Summed up cpu statistics.
+ CPUTotal CPUStat
+ // Per-CPU statistics.
+ CPU []CPUStat
+ // Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
+ IRQTotal uint64
+ // Number of times a numbered IRQ was triggered.
+ IRQ []uint64
+ // Number of times a context switch happened.
+ ContextSwitches uint64
+ // Number of times a process was created.
+ ProcessCreated uint64
+ // Number of processes currently running.
+ ProcessesRunning uint64
+ // Number of processes currently blocked (waiting for IO).
+ ProcessesBlocked uint64
+ // Number of times a softirq was scheduled.
+ SoftIRQTotal uint64
+ // Detailed softirq statistics.
+ SoftIRQ SoftIRQStat
+}
+
+// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
+func parseCPUStat(line string) (CPUStat, int64, error) {
+ cpuStat := CPUStat{}
+ var cpu string
+
+ count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f",
+ &cpu,
+ &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle,
+ &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal,
+ &cpuStat.Guest, &cpuStat.GuestNice)
+
+ if err != nil && err != io.EOF {
+ return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err)
+ }
+ if count == 0 {
+ return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line)
+ }
+
+ cpuStat.User /= userHZ
+ cpuStat.Nice /= userHZ
+ cpuStat.System /= userHZ
+ cpuStat.Idle /= userHZ
+ cpuStat.Iowait /= userHZ
+ cpuStat.IRQ /= userHZ
+ cpuStat.SoftIRQ /= userHZ
+ cpuStat.Steal /= userHZ
+ cpuStat.Guest /= userHZ
+ cpuStat.GuestNice /= userHZ
+
+ if cpu == "cpu" {
+ return cpuStat, -1, nil
+ }
+
+ cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
+ if err != nil {
+ return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err)
+ }
+
+ return cpuStat, cpuID, nil
+}
+
+// Parse a softirq line.
+func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
+ softIRQStat := SoftIRQStat{}
+ var total uint64
+ var prefix string
+
+ _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d",
+ &prefix, &total,
+ &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx,
+ &softIRQStat.Block, &softIRQStat.BlockIoPoll,
+ &softIRQStat.Tasklet, &softIRQStat.Sched,
+ &softIRQStat.Hrtimer, &softIRQStat.Rcu)
+
+ if err != nil {
+ return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err)
+ }
+
+ return softIRQStat, total, nil
+}
+
+// NewStat returns information about current cpu/process statistics.
+// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+//
+// Deprecated: use fs.Stat() instead
+func NewStat() (Stat, error) {
+ fs, err := NewFS(fs.DefaultProcMountPoint)
+ if err != nil {
+ return Stat{}, err
+ }
+ return fs.Stat()
+}
+
+// NewStat returns information about current cpu/process statistics.
+// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+//
+// Deprecated: use fs.Stat() instead
+func (fs FS) NewStat() (Stat, error) {
+ return fs.Stat()
+}
+
+// Stat returns information about current cpu/process statistics.
+// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+func (fs FS) Stat() (Stat, error) {
+
+ f, err := os.Open(fs.proc.Path("stat"))
+ if err != nil {
+ return Stat{}, err
+ }
+ defer f.Close()
+
+ stat := Stat{}
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.Fields(scanner.Text())
+ // require at least <key> <value>
+ if len(parts) < 2 {
+ continue
+ }
+ switch {
+ case parts[0] == "btime":
+ if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err)
+ }
+ case parts[0] == "intr":
+ if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err)
+ }
+ numberedIRQs := parts[2:]
+ stat.IRQ = make([]uint64, len(numberedIRQs))
+ for i, count := range numberedIRQs {
+ if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err)
+ }
+ }
+ case parts[0] == "ctxt":
+ if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err)
+ }
+ case parts[0] == "processes":
+ if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err)
+ }
+ case parts[0] == "procs_running":
+ if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err)
+ }
+ case parts[0] == "procs_blocked":
+ if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err)
+ }
+ case parts[0] == "softirq":
+ softIRQStats, total, err := parseSoftIRQStat(line)
+ if err != nil {
+ return Stat{}, err
+ }
+ stat.SoftIRQTotal = total
+ stat.SoftIRQ = softIRQStats
+ case strings.HasPrefix(parts[0], "cpu"):
+ cpuStat, cpuID, err := parseCPUStat(line)
+ if err != nil {
+ return Stat{}, err
+ }
+ if cpuID == -1 {
+ stat.CPUTotal = cpuStat
+ } else {
+ for int64(len(stat.CPU)) <= cpuID {
+ stat.CPU = append(stat.CPU, CPUStat{})
+ }
+ stat.CPU[cpuID] = cpuStat
+ }
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
+ }
+
+ return stat, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar
new file mode 100644
index 000000000..19ef02b8d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/ttar
@@ -0,0 +1,413 @@
+#!/usr/bin/env bash
+
+# Purpose: plain text tar format
+# Limitations: - only suitable for text files, directories, and symlinks
+# - stores only filename, content, and mode
+# - not designed for untrusted input
+#
+# Note: must work with bash version 3.2 (macOS)
+
+# Copyright 2017 Roger Luethi
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit -o nounset
+
+# Sanitize environment (for instance, standard sorting of glob matches)
+export LC_ALL=C
+
+path=""
+CMD=""
+ARG_STRING="$*"
+
+#------------------------------------------------------------------------------
+# Not all sed implementations can work on null bytes. In order to make ttar
+# work out of the box on macOS, use Python as a stream editor.
+
+USE_PYTHON=0
+
+PYTHON_CREATE_FILTER=$(cat << 'PCF'
+#!/usr/bin/env python
+
+import re
+import sys
+
+for line in sys.stdin:
+ line = re.sub(r'EOF', r'\EOF', line)
+ line = re.sub(r'NULLBYTE', r'\NULLBYTE', line)
+ line = re.sub('\x00', r'NULLBYTE', line)
+ sys.stdout.write(line)
+PCF
+)
+
+PYTHON_EXTRACT_FILTER=$(cat << 'PEF'
+#!/usr/bin/env python
+
+import re
+import sys
+
+for line in sys.stdin:
+ line = re.sub(r'(?<!\\)NULLBYTE', '\x00', line)
+ line = re.sub(r'\\NULLBYTE', 'NULLBYTE', line)
+ line = re.sub(r'([^\\])EOF', r'\1', line)
+ line = re.sub(r'\\EOF', 'EOF', line)
+ sys.stdout.write(line)
+PEF
+)
+
+function test_environment {
+ if [[ "$(echo "a" | sed 's/a/\x0/' | wc -c)" -ne 2 ]]; then
+ echo "WARNING sed unable to handle null bytes, using Python (slow)."
+ if ! which python >/dev/null; then
+ echo "ERROR Python not found. Aborting."
+ exit 2
+ fi
+ USE_PYTHON=1
+ fi
+}
+
+#------------------------------------------------------------------------------
+
+function usage {
+ bname=$(basename "$0")
+ cat << USAGE
+Usage: $bname [-C <DIR>] -c -f <ARCHIVE> <FILE...> (create archive)
+ $bname -t -f <ARCHIVE> (list archive contents)
+ $bname [-C <DIR>] -x -f <ARCHIVE> (extract archive)
+
+Options:
+ -C <DIR> (change directory)
+ -v (verbose)
+ --recursive-unlink (recursively delete existing directory if path
+ collides with file or directory to extract)
+
+Example: Change to sysfs directory, create ttar file from fixtures directory
+ $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
+USAGE
+exit "$1"
+}
+
+function vecho {
+ if [ "${VERBOSE:-}" == "yes" ]; then
+ echo >&7 "$@"
+ fi
+}
+
+function set_cmd {
+ if [ -n "$CMD" ]; then
+ echo "ERROR: more than one command given"
+ echo
+ usage 2
+ fi
+ CMD=$1
+}
+
+unset VERBOSE
+unset RECURSIVE_UNLINK
+
+while getopts :cf:-:htxvC: opt; do
+ case $opt in
+ c)
+ set_cmd "create"
+ ;;
+ f)
+ ARCHIVE=$OPTARG
+ ;;
+ h)
+ usage 0
+ ;;
+ t)
+ set_cmd "list"
+ ;;
+ x)
+ set_cmd "extract"
+ ;;
+ v)
+ VERBOSE=yes
+ exec 7>&1
+ ;;
+ C)
+ CDIR=$OPTARG
+ ;;
+ -)
+ case $OPTARG in
+ recursive-unlink)
+ RECURSIVE_UNLINK="yes"
+ ;;
+ *)
+ echo -e "Error: invalid option -$OPTARG"
+ echo
+ usage 1
+ ;;
+ esac
+ ;;
+ *)
+ echo >&2 "ERROR: invalid option -$OPTARG"
+ echo
+ usage 1
+ ;;
+ esac
+done
+
+# Remove processed options from arguments
+shift $(( OPTIND - 1 ));
+
+if [ "${CMD:-}" == "" ]; then
+ echo >&2 "ERROR: no command given"
+ echo
+ usage 1
+elif [ "${ARCHIVE:-}" == "" ]; then
+ echo >&2 "ERROR: no archive name given"
+ echo
+ usage 1
+fi
+
+function list {
+ local path=""
+ local size=0
+ local line_no=0
+ local ttar_file=$1
+ if [ -n "${2:-}" ]; then
+ echo >&2 "ERROR: too many arguments."
+ echo
+ usage 1
+ fi
+ if [ ! -e "$ttar_file" ]; then
+ echo >&2 "ERROR: file not found ($ttar_file)"
+ echo
+ usage 1
+ fi
+ while read -r line; do
+ line_no=$(( line_no + 1 ))
+ if [ $size -gt 0 ]; then
+ size=$(( size - 1 ))
+ continue
+ fi
+ if [[ $line =~ ^Path:\ (.*)$ ]]; then
+ path=${BASH_REMATCH[1]}
+ elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
+ size=${BASH_REMATCH[1]}
+ echo "$path"
+ elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
+ path=${BASH_REMATCH[1]}
+ echo "$path/"
+ elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
+ echo "$path -> ${BASH_REMATCH[1]}"
+ fi
+ done < "$ttar_file"
+}
+
+function extract {
+ local path=""
+ local size=0
+ local line_no=0
+ local ttar_file=$1
+ if [ -n "${2:-}" ]; then
+ echo >&2 "ERROR: too many arguments."
+ echo
+ usage 1
+ fi
+ if [ ! -e "$ttar_file" ]; then
+ echo >&2 "ERROR: file not found ($ttar_file)"
+ echo
+ usage 1
+ fi
+ while IFS= read -r line; do
+ line_no=$(( line_no + 1 ))
+ local eof_without_newline
+ if [ "$size" -gt 0 ]; then
+ if [[ "$line" =~ [^\\]EOF ]]; then
+ # An EOF not preceded by a backslash indicates that the line
+ # does not end with a newline
+ eof_without_newline=1
+ else
+ eof_without_newline=0
+ fi
+ # Replace NULLBYTE with null byte if at beginning of line
+ # Replace NULLBYTE with null byte unless preceded by backslash
+ # Remove one backslash in front of NULLBYTE (if any)
+ # Remove EOF unless preceded by backslash
+ # Remove one backslash in front of EOF
+ if [ $USE_PYTHON -eq 1 ]; then
+ echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path"
+ else
+ # The repeated pattern makes up for sed's lack of negative
+ # lookbehind assertions (for consecutive null bytes).
+ echo -n "$line" | \
+ sed -e 's/^NULLBYTE/\x0/g;
+ s/\([^\\]\)NULLBYTE/\1\x0/g;
+ s/\([^\\]\)NULLBYTE/\1\x0/g;
+ s/\\NULLBYTE/NULLBYTE/g;
+ s/\([^\\]\)EOF/\1/g;
+ s/\\EOF/EOF/g;
+ ' >> "$path"
+ fi
+ if [[ "$eof_without_newline" -eq 0 ]]; then
+ echo >> "$path"
+ fi
+ size=$(( size - 1 ))
+ continue
+ fi
+ if [[ $line =~ ^Path:\ (.*)$ ]]; then
+ path=${BASH_REMATCH[1]}
+ if [ -L "$path" ]; then
+ rm "$path"
+ elif [ -d "$path" ]; then
+ if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then
+ rm -r "$path"
+ else
+ # Safe because symlinks to directories are dealt with above
+ rmdir "$path"
+ fi
+ elif [ -e "$path" ]; then
+ rm "$path"
+ fi
+ elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
+ size=${BASH_REMATCH[1]}
+ # Create file even if it is zero-length.
+ touch "$path"
+ vecho " $path"
+ elif [[ $line =~ ^Mode:\ (.*)$ ]]; then
+ mode=${BASH_REMATCH[1]}
+ chmod "$mode" "$path"
+ vecho "$mode"
+ elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
+ path=${BASH_REMATCH[1]}
+ mkdir -p "$path"
+ vecho " $path/"
+ elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
+ ln -s "${BASH_REMATCH[1]}" "$path"
+ vecho " $path -> ${BASH_REMATCH[1]}"
+ elif [[ $line =~ ^# ]]; then
+ # Ignore comments between files
+ continue
+ else
+ echo >&2 "ERROR: Unknown keyword on line $line_no: $line"
+ exit 1
+ fi
+ done < "$ttar_file"
+}
+
+function div {
+ echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \
+ "- - - - - -"
+}
+
+function get_mode {
+ local mfile=$1
+ if [ -z "${STAT_OPTION:-}" ]; then
+ if stat -c '%a' "$mfile" >/dev/null 2>&1; then
+ # GNU stat
+ STAT_OPTION='-c'
+ STAT_FORMAT='%a'
+ else
+ # BSD stat
+ STAT_OPTION='-f'
+ # Octal output, user/group/other (omit file type, sticky bit)
+ STAT_FORMAT='%OLp'
+ fi
+ fi
+ stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile"
+}
+
+function _create {
+ shopt -s nullglob
+ local mode
+ local eof_without_newline
+ while (( "$#" )); do
+ file=$1
+ if [ -L "$file" ]; then
+ echo "Path: $file"
+ symlinkTo=$(readlink "$file")
+ echo "SymlinkTo: $symlinkTo"
+ vecho " $file -> $symlinkTo"
+ div
+ elif [ -d "$file" ]; then
+ # Strip trailing slash (if there is one)
+ file=${file%/}
+ echo "Directory: $file"
+ mode=$(get_mode "$file")
+ echo "Mode: $mode"
+ vecho "$mode $file/"
+ div
+ # Find all files and dirs, including hidden/dot files
+ for x in "$file/"{*,.[^.]*}; do
+ _create "$x"
+ done
+ elif [ -f "$file" ]; then
+ echo "Path: $file"
+ lines=$(wc -l "$file"|awk '{print $1}')
+ eof_without_newline=0
+ if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \
+ [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then
+ eof_without_newline=1
+ lines=$((lines+1))
+ fi
+ echo "Lines: $lines"
+ # Add backslash in front of EOF
+ # Add backslash in front of NULLBYTE
+ # Replace null byte with NULLBYTE
+ if [ $USE_PYTHON -eq 1 ]; then
+ < "$file" python -c "$PYTHON_CREATE_FILTER"
+ else
+ < "$file" \
+ sed 's/EOF/\\EOF/g;
+ s/NULLBYTE/\\NULLBYTE/g;
+ s/\x0/NULLBYTE/g;
+ '
+ fi
+ if [[ "$eof_without_newline" -eq 1 ]]; then
+ # Finish line with EOF to indicate that the original line did
+ # not end with a linefeed
+ echo "EOF"
+ fi
+ mode=$(get_mode "$file")
+ echo "Mode: $mode"
+ vecho "$mode $file"
+ div
+ else
+ echo >&2 "ERROR: file not found ($file in $(pwd))"
+ exit 2
+ fi
+ shift
+ done
+}
+
+function create {
+ ttar_file=$1
+ shift
+ if [ -z "${1:-}" ]; then
+ echo >&2 "ERROR: missing arguments."
+ echo
+ usage 1
+ fi
+ if [ -e "$ttar_file" ]; then
+ rm "$ttar_file"
+ fi
+ exec > "$ttar_file"
+ echo "# Archive created by ttar $ARG_STRING"
+ _create "$@"
+}
+
+test_environment
+
+if [ -n "${CDIR:-}" ]; then
+ if [[ "$ARCHIVE" != /* ]]; then
+ # Relative path: preserve the archive's location before changing
+ # directory
+ ARCHIVE="$(pwd)/$ARCHIVE"
+ fi
+ cd "$CDIR"
+fi
+
+"$CMD" "$ARCHIVE" "$@"
diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go
new file mode 100644
index 000000000..30aa417d5
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/xfrm.go
@@ -0,0 +1,187 @@
+// Copyright 2017 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// XfrmStat models the contents of /proc/net/xfrm_stat.
+type XfrmStat struct {
+ // All errors which are not matched by other
+ XfrmInError int
+ // No buffer is left
+ XfrmInBufferError int
+ // Header Error
+ XfrmInHdrError int
+ // No state found
+ // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong
+ XfrmInNoStates int
+ // Transformation protocol specific error
+ // e.g. SA Key is wrong
+ XfrmInStateProtoError int
+ // Transformation mode specific error
+ XfrmInStateModeError int
+ // Sequence error
+ // e.g. sequence number is out of window
+ XfrmInStateSeqError int
+ // State is expired
+ XfrmInStateExpired int
+ // State has mismatch option
+ // e.g. UDP encapsulation type is mismatched
+ XfrmInStateMismatch int
+ // State is invalid
+ XfrmInStateInvalid int
+ // No matching template for states
+ // e.g. Inbound SAs are correct but SP rule is wrong
+ XfrmInTmplMismatch int
+ // No policy is found for states
+ // e.g. Inbound SAs are correct but no SP is found
+ XfrmInNoPols int
+ // Policy discards
+ XfrmInPolBlock int
+ // Policy error
+ XfrmInPolError int
+ // All errors which are not matched by others
+ XfrmOutError int
+ // Bundle generation error
+ XfrmOutBundleGenError int
+ // Bundle check error
+ XfrmOutBundleCheckError int
+ // No state was found
+ XfrmOutNoStates int
+ // Transformation protocol specific error
+ XfrmOutStateProtoError int
+ // Transportation mode specific error
+ XfrmOutStateModeError int
+ // Sequence error
+ // i.e sequence number overflow
+ XfrmOutStateSeqError int
+ // State is expired
+ XfrmOutStateExpired int
+ // Policy discads
+ XfrmOutPolBlock int
+ // Policy is dead
+ XfrmOutPolDead int
+ // Policy Error
+ XfrmOutPolError int
+ XfrmFwdHdrError int
+ XfrmOutStateInvalid int
+ XfrmAcquireError int
+}
+
+// NewXfrmStat reads the xfrm_stat statistics.
+func NewXfrmStat() (XfrmStat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return XfrmStat{}, err
+ }
+
+ return fs.NewXfrmStat()
+}
+
+// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
+func (fs FS) NewXfrmStat() (XfrmStat, error) {
+ file, err := os.Open(fs.proc.Path("net/xfrm_stat"))
+ if err != nil {
+ return XfrmStat{}, err
+ }
+ defer file.Close()
+
+ var (
+ x = XfrmStat{}
+ s = bufio.NewScanner(file)
+ )
+
+ for s.Scan() {
+ fields := strings.Fields(s.Text())
+
+ if len(fields) != 2 {
+ return XfrmStat{}, fmt.Errorf(
+ "couldn't parse %s line %s", file.Name(), s.Text())
+ }
+
+ name := fields[0]
+ value, err := strconv.Atoi(fields[1])
+ if err != nil {
+ return XfrmStat{}, err
+ }
+
+ switch name {
+ case "XfrmInError":
+ x.XfrmInError = value
+ case "XfrmInBufferError":
+ x.XfrmInBufferError = value
+ case "XfrmInHdrError":
+ x.XfrmInHdrError = value
+ case "XfrmInNoStates":
+ x.XfrmInNoStates = value
+ case "XfrmInStateProtoError":
+ x.XfrmInStateProtoError = value
+ case "XfrmInStateModeError":
+ x.XfrmInStateModeError = value
+ case "XfrmInStateSeqError":
+ x.XfrmInStateSeqError = value
+ case "XfrmInStateExpired":
+ x.XfrmInStateExpired = value
+ case "XfrmInStateInvalid":
+ x.XfrmInStateInvalid = value
+ case "XfrmInTmplMismatch":
+ x.XfrmInTmplMismatch = value
+ case "XfrmInNoPols":
+ x.XfrmInNoPols = value
+ case "XfrmInPolBlock":
+ x.XfrmInPolBlock = value
+ case "XfrmInPolError":
+ x.XfrmInPolError = value
+ case "XfrmOutError":
+ x.XfrmOutError = value
+ case "XfrmInStateMismatch":
+ x.XfrmInStateMismatch = value
+ case "XfrmOutBundleGenError":
+ x.XfrmOutBundleGenError = value
+ case "XfrmOutBundleCheckError":
+ x.XfrmOutBundleCheckError = value
+ case "XfrmOutNoStates":
+ x.XfrmOutNoStates = value
+ case "XfrmOutStateProtoError":
+ x.XfrmOutStateProtoError = value
+ case "XfrmOutStateModeError":
+ x.XfrmOutStateModeError = value
+ case "XfrmOutStateSeqError":
+ x.XfrmOutStateSeqError = value
+ case "XfrmOutStateExpired":
+ x.XfrmOutStateExpired = value
+ case "XfrmOutPolBlock":
+ x.XfrmOutPolBlock = value
+ case "XfrmOutPolDead":
+ x.XfrmOutPolDead = value
+ case "XfrmOutPolError":
+ x.XfrmOutPolError = value
+ case "XfrmFwdHdrError":
+ x.XfrmFwdHdrError = value
+ case "XfrmOutStateInvalid":
+ x.XfrmOutStateInvalid = value
+ case "XfrmAcquireError":
+ x.XfrmAcquireError = value
+ }
+
+ }
+
+ return x, s.Err()
+}
diff --git a/vendor/github.com/seccomp/containers-golang/.gitignore b/vendor/github.com/seccomp/containers-golang/.gitignore
new file mode 100644
index 000000000..2cad96a16
--- /dev/null
+++ b/vendor/github.com/seccomp/containers-golang/.gitignore
@@ -0,0 +1,2 @@
+default.json
+fixtures
diff --git a/vendor/github.com/seccomp/containers-golang/Makefile b/vendor/github.com/seccomp/containers-golang/Makefile
new file mode 100644
index 000000000..88569e87b
--- /dev/null
+++ b/vendor/github.com/seccomp/containers-golang/Makefile
@@ -0,0 +1,18 @@
+TAGS ?= seccomp
+BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)"
+GO := go
+PACKAGE := github.com/seccomp/containers-golang
+
+sources := $(wildcard *.go)
+
+default.json: $(sources)
+ $(GO) build -compiler gc $(BUILDFLAGS) ./cmd/generate.go
+ $(GO) build -compiler gc ./cmd/generate.go
+ $(GO) run ${BUILDFLAGS} cmd/generate.go
+
+all: default.json
+
+.PHONY: test-unit
+test-unit:
+ $(GO) test $(BUILDFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
+ $(GO) test $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
diff --git a/vendor/github.com/seccomp/containers-golang/seccomp.json b/vendor/github.com/seccomp/containers-golang/seccomp.json
new file mode 100644
index 000000000..fe9eda56a
--- /dev/null
+++ b/vendor/github.com/seccomp/containers-golang/seccomp.json
@@ -0,0 +1,773 @@
+{
+ "defaultAction": "SCMP_ACT_ERRNO",
+ "archMap": [
+ {
+ "architecture": "SCMP_ARCH_X86_64",
+ "subArchitectures": [
+ "SCMP_ARCH_X86",
+ "SCMP_ARCH_X32"
+ ]
+ },
+ {
+ "architecture": "SCMP_ARCH_AARCH64",
+ "subArchitectures": [
+ "SCMP_ARCH_ARM"
+ ]
+ },
+ {
+ "architecture": "SCMP_ARCH_MIPS64",
+ "subArchitectures": [
+ "SCMP_ARCH_MIPS",
+ "SCMP_ARCH_MIPS64N32"
+ ]
+ },
+ {
+ "architecture": "SCMP_ARCH_MIPS64N32",
+ "subArchitectures": [
+ "SCMP_ARCH_MIPS",
+ "SCMP_ARCH_MIPS64"
+ ]
+ },
+ {
+ "architecture": "SCMP_ARCH_MIPSEL64",
+ "subArchitectures": [
+ "SCMP_ARCH_MIPSEL",
+ "SCMP_ARCH_MIPSEL64N32"
+ ]
+ },
+ {
+ "architecture": "SCMP_ARCH_MIPSEL64N32",
+ "subArchitectures": [
+ "SCMP_ARCH_MIPSEL",
+ "SCMP_ARCH_MIPSEL64"
+ ]
+ },
+ {
+ "architecture": "SCMP_ARCH_S390X",
+ "subArchitectures": [
+ "SCMP_ARCH_S390"
+ ]
+ }
+ ],
+ "syscalls": [
+ {
+ "names": [
+ "accept",
+ "accept4",
+ "access",
+ "adjtimex",
+ "alarm",
+ "bind",
+ "brk",
+ "capget",
+ "capset",
+ "chdir",
+ "chmod",
+ "chown",
+ "chown32",
+ "clock_getres",
+ "clock_gettime",
+ "clock_nanosleep",
+ "close",
+ "connect",
+ "copy_file_range",
+ "creat",
+ "dup",
+ "dup2",
+ "dup3",
+ "epoll_create",
+ "epoll_create1",
+ "epoll_ctl",
+ "epoll_ctl_old",
+ "epoll_pwait",
+ "epoll_wait",
+ "epoll_wait_old",
+ "eventfd",
+ "eventfd2",
+ "execve",
+ "execveat",
+ "exit",
+ "exit_group",
+ "faccessat",
+ "fadvise64",
+ "fadvise64_64",
+ "fallocate",
+ "fanotify_mark",
+ "fchdir",
+ "fchmod",
+ "fchmodat",
+ "fchown",
+ "fchown32",
+ "fchownat",
+ "fcntl",
+ "fcntl64",
+ "fdatasync",
+ "fgetxattr",
+ "flistxattr",
+ "flock",
+ "fork",
+ "fremovexattr",
+ "fsetxattr",
+ "fstat",
+ "fstat64",
+ "fstatat64",
+ "fstatfs",
+ "fstatfs64",
+ "fsync",
+ "ftruncate",
+ "ftruncate64",
+ "futex",
+ "futimesat",
+ "getcpu",
+ "getcwd",
+ "getdents",
+ "getdents64",
+ "getegid",
+ "getegid32",
+ "geteuid",
+ "geteuid32",
+ "getgid",
+ "getgid32",
+ "getgroups",
+ "getgroups32",
+ "getitimer",
+ "getpeername",
+ "getpgid",
+ "getpgrp",
+ "getpid",
+ "getppid",
+ "getpriority",
+ "getrandom",
+ "getresgid",
+ "getresgid32",
+ "getresuid",
+ "getresuid32",
+ "getrlimit",
+ "get_robust_list",
+ "getrusage",
+ "getsid",
+ "getsockname",
+ "getsockopt",
+ "get_thread_area",
+ "gettid",
+ "gettimeofday",
+ "getuid",
+ "getuid32",
+ "getxattr",
+ "inotify_add_watch",
+ "inotify_init",
+ "inotify_init1",
+ "inotify_rm_watch",
+ "io_cancel",
+ "ioctl",
+ "io_destroy",
+ "io_getevents",
+ "ioprio_get",
+ "ioprio_set",
+ "io_setup",
+ "io_submit",
+ "ipc",
+ "kill",
+ "lchown",
+ "lchown32",
+ "lgetxattr",
+ "link",
+ "linkat",
+ "listen",
+ "listxattr",
+ "llistxattr",
+ "_llseek",
+ "lremovexattr",
+ "lseek",
+ "lsetxattr",
+ "lstat",
+ "lstat64",
+ "madvise",
+ "memfd_create",
+ "mincore",
+ "mkdir",
+ "mkdirat",
+ "mknod",
+ "mknodat",
+ "mlock",
+ "mlock2",
+ "mlockall",
+ "mmap",
+ "mmap2",
+ "mprotect",
+ "mq_getsetattr",
+ "mq_notify",
+ "mq_open",
+ "mq_timedreceive",
+ "mq_timedsend",
+ "mq_unlink",
+ "mremap",
+ "msgctl",
+ "msgget",
+ "msgrcv",
+ "msgsnd",
+ "msync",
+ "munlock",
+ "munlockall",
+ "munmap",
+ "nanosleep",
+ "newfstatat",
+ "_newselect",
+ "open",
+ "openat",
+ "pause",
+ "pipe",
+ "pipe2",
+ "poll",
+ "ppoll",
+ "prctl",
+ "pread64",
+ "preadv",
+ "preadv2",
+ "prlimit64",
+ "pselect6",
+ "pwrite64",
+ "pwritev",
+ "pwritev2",
+ "read",
+ "readahead",
+ "readlink",
+ "readlinkat",
+ "readv",
+ "recv",
+ "recvfrom",
+ "recvmmsg",
+ "recvmsg",
+ "remap_file_pages",
+ "removexattr",
+ "rename",
+ "renameat",
+ "renameat2",
+ "restart_syscall",
+ "rmdir",
+ "rt_sigaction",
+ "rt_sigpending",
+ "rt_sigprocmask",
+ "rt_sigqueueinfo",
+ "rt_sigreturn",
+ "rt_sigsuspend",
+ "rt_sigtimedwait",
+ "rt_tgsigqueueinfo",
+ "sched_getaffinity",
+ "sched_getattr",
+ "sched_getparam",
+ "sched_get_priority_max",
+ "sched_get_priority_min",
+ "sched_getscheduler",
+ "sched_rr_get_interval",
+ "sched_setaffinity",
+ "sched_setattr",
+ "sched_setparam",
+ "sched_setscheduler",
+ "sched_yield",
+ "seccomp",
+ "select",
+ "semctl",
+ "semget",
+ "semop",
+ "semtimedop",
+ "send",
+ "sendfile",
+ "sendfile64",
+ "sendmmsg",
+ "sendmsg",
+ "sendto",
+ "setfsgid",
+ "setfsgid32",
+ "setfsuid",
+ "setfsuid32",
+ "setgid",
+ "setgid32",
+ "setgroups",
+ "setgroups32",
+ "setitimer",
+ "setpgid",
+ "setpriority",
+ "setregid",
+ "setregid32",
+ "setresgid",
+ "setresgid32",
+ "setresuid",
+ "setresuid32",
+ "setreuid",
+ "setreuid32",
+ "setrlimit",
+ "set_robust_list",
+ "setsid",
+ "setsockopt",
+ "set_thread_area",
+ "set_tid_address",
+ "setuid",
+ "setuid32",
+ "setxattr",
+ "shmat",
+ "shmctl",
+ "shmdt",
+ "shmget",
+ "shutdown",
+ "sigaltstack",
+ "signalfd",
+ "signalfd4",
+ "sigreturn",
+ "socket",
+ "socketcall",
+ "socketpair",
+ "splice",
+ "stat",
+ "stat64",
+ "statfs",
+ "statfs64",
+ "statx",
+ "symlink",
+ "symlinkat",
+ "sync",
+ "sync_file_range",
+ "syncfs",
+ "sysinfo",
+ "syslog",
+ "tee",
+ "tgkill",
+ "time",
+ "timer_create",
+ "timer_delete",
+ "timerfd_create",
+ "timerfd_gettime",
+ "timerfd_settime",
+ "timer_getoverrun",
+ "timer_gettime",
+ "timer_settime",
+ "times",
+ "tkill",
+ "truncate",
+ "truncate64",
+ "ugetrlimit",
+ "umask",
+ "uname",
+ "unlink",
+ "unlinkat",
+ "utime",
+ "utimensat",
+ "utimes",
+ "vfork",
+ "vmsplice",
+ "wait4",
+ "waitid",
+ "waitpid",
+ "write",
+ "writev",
+ "mount",
+ "umount2",
+ "reboot",
+ "name_to_handle_at",
+ "unshare"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {},
+ "excludes": {}
+ },
+ {
+ "names": [
+ "personality"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 0,
+ "value": 0,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_EQ"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {}
+ },
+ {
+ "names": [
+ "personality"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 0,
+ "value": 8,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_EQ"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {}
+ },
+ {
+ "names": [
+ "personality"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 0,
+ "value": 131072,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_EQ"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {}
+ },
+ {
+ "names": [
+ "personality"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 0,
+ "value": 131080,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_EQ"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {}
+ },
+ {
+ "names": [
+ "personality"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 0,
+ "value": 4294967295,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_EQ"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {}
+ },
+ {
+ "names": [
+ "sync_file_range2"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "arches": [
+ "ppc64le"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "arm_fadvise64_64",
+ "arm_sync_file_range",
+ "sync_file_range2",
+ "breakpoint",
+ "cacheflush",
+ "set_tls"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "arches": [
+ "arm",
+ "arm64"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "arch_prctl"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "arches": [
+ "amd64",
+ "x32"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "modify_ldt"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "arches": [
+ "amd64",
+ "x32",
+ "x86"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "s390_pci_mmio_read",
+ "s390_pci_mmio_write",
+ "s390_runtime_instr"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "arches": [
+ "s390",
+ "s390x"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "open_by_handle_at"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_DAC_READ_SEARCH"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "bpf",
+ "clone",
+ "fanotify_init",
+ "lookup_dcookie",
+ "mount",
+ "name_to_handle_at",
+ "perf_event_open",
+ "quotactl",
+ "setdomainname",
+ "sethostname",
+ "setns",
+ "umount",
+ "umount2",
+ "unshare"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_ADMIN"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "clone"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 0,
+ "value": 2080505856,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_MASKED_EQ"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {
+ "caps": [
+ "CAP_SYS_ADMIN"
+ ],
+ "arches": [
+ "s390",
+ "s390x"
+ ]
+ }
+ },
+ {
+ "names": [
+ "clone"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 1,
+ "value": 2080505856,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_MASKED_EQ"
+ }
+ ],
+ "comment": "s390 parameter ordering for clone is different",
+ "includes": {
+ "arches": [
+ "s390",
+ "s390x"
+ ]
+ },
+ "excludes": {
+ "caps": [
+ "CAP_SYS_ADMIN"
+ ]
+ }
+ },
+ {
+ "names": [
+ "reboot"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_BOOT"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "chroot"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_CHROOT"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "delete_module",
+ "init_module",
+ "finit_module",
+ "query_module"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_MODULE"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "get_mempolicy",
+ "mbind",
+ "name_to_handle_at",
+ "set_mempolicy"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_NICE"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "acct"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_PACCT"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "kcmp",
+ "process_vm_readv",
+ "process_vm_writev",
+ "ptrace"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_PTRACE"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "iopl",
+ "ioperm"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_RAWIO"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "settimeofday",
+ "stime",
+ "clock_settime"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_TIME"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "vhangup"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_TTY_CONFIG"
+ ]
+ },
+ "excludes": {}
+ }
+ ]
+}
diff --git a/vendor/github.com/seccomp/libseccomp-golang/.gitignore b/vendor/github.com/seccomp/libseccomp-golang/.gitignore
new file mode 100644
index 000000000..b4826968b
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/.gitignore
@@ -0,0 +1,4 @@
+*~
+*.swp
+*.orig
+tags
diff --git a/vendor/github.com/seccomp/libseccomp-golang/CHANGELOG b/vendor/github.com/seccomp/libseccomp-golang/CHANGELOG
new file mode 100644
index 000000000..a01d9a722
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/CHANGELOG
@@ -0,0 +1,17 @@
+libseccomp-golang: Releases
+===============================================================================
+https://github.com/seccomp/libseccomp-golang
+
+* Version 0.9.1 - May 21, 2019
+- Minimum supported version of libseccomp bumped to v2.2.0
+- Use Libseccomp's `seccomp_version` API to retrieve library version
+- Unconditionally set TSync attribute for filters, due to Go's heavily threaded nature
+- Fix CVE-2017-18367 - Multiple syscall arguments were incorrectly combined with logical-OR, instead of logical-AND
+- Fix a failure to build on Debian-based distributions due to CGo code
+- Fix unit test failures on 32-bit architectures
+- Improve several errors to be more verbose about their causes
+- Add support for SCMP_ACT_LOG (with libseccomp versions 2.4.x and higher), permitting syscalls but logging their execution
+- Add support for SCMP_FLTATR_CTL_LOG (with libseccomp versions 2.4.x and higher), logging not-allowed actions when they are denied
+
+* Version 0.9.0 - January 5, 2017
+- Initial tagged release
diff --git a/vendor/github.com/seccomp/libseccomp-golang/Makefile b/vendor/github.com/seccomp/libseccomp-golang/Makefile
new file mode 100644
index 000000000..1ff4cc898
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/Makefile
@@ -0,0 +1,26 @@
+# libseccomp-golang
+
+.PHONY: all check check-build check-syntax fix-syntax vet test lint
+
+all: check-build
+
+check: vet test
+
+check-build:
+ go build
+
+check-syntax:
+ gofmt -d .
+
+fix-syntax:
+ gofmt -w .
+
+vet:
+ go vet -v
+
+test:
+ go test -v
+
+lint:
+ @$(if $(shell which golint),true,$(error "install golint and include it in your PATH"))
+ golint -set_exit_status
diff --git a/vendor/github.com/seccomp/libseccomp-golang/SUBMITTING_PATCHES b/vendor/github.com/seccomp/libseccomp-golang/SUBMITTING_PATCHES
new file mode 100644
index 000000000..744e5cd64
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/SUBMITTING_PATCHES
@@ -0,0 +1,112 @@
+How to Submit Patches to the libseccomp Project
+===============================================================================
+https://github.com/seccomp/libseccomp-golang
+
+This document is intended to act as a guide to help you contribute to the
+libseccomp project. It is not perfect, and there will always be exceptions
+to the rules described here, but by following the instructions below you
+should have a much easier time getting your work merged with the upstream
+project.
+
+* Test Your Code
+
+There are two possible tests you can run to verify your code. The first test
+is used to check the formatting and coding style of your changes, you can run
+the test with the following command:
+
+ # make check-syntax
+
+... if there are any problems with your changes a diff/patch will be shown
+which indicates the problems and how to fix them.
+
+The second possible test is used to ensure the sanity of your code changes
+and to test these changes against the included tests. You can run the test
+with the following command:
+
+ # make check
+
+... if there are any faults or errors they will be displayed.
+
+* Generate the Patch(es)
+
+Depending on how you decided to work with the libseccomp code base and what
+tools you are using there are different ways to generate your patch(es).
+However, regardless of what tools you use, you should always generate your
+patches using the "unified" diff/patch format and the patches should always
+apply to the libseccomp source tree using the following command from the top
+directory of the libseccomp sources:
+
+ # patch -p1 < changes.patch
+
+If you are not using git, stacked git (stgit), or some other tool which can
+generate patch files for you automatically, you may find the following command
+helpful in generating patches, where "libseccomp.orig/" is the unmodified
+source code directory and "libseccomp/" is the source code directory with your
+changes:
+
+ # diff -purN libseccomp-golang.orig/ libseccomp-golang/
+
+When in doubt please generate your patch and try applying it to an unmodified
+copy of the libseccomp sources; if it fails for you, it will fail for the rest
+of us.
+
+* Explain Your Work
+
+At the top of every patch you should include a description of the problem you
+are trying to solve, how you solved it, and why you chose the solution you
+implemented. If you are submitting a bug fix, it is also incredibly helpful
+if you can describe/include a reproducer for the problem in the description as
+well as instructions on how to test for the bug and verify that it has been
+fixed.
+
+* Sign Your Work
+
+The sign-off is a simple line at the end of the patch description, which
+certifies that you wrote it or otherwise have the right to pass it on as an
+open-source patch. The "Developer's Certificate of Origin" pledge is taken
+from the Linux Kernel and the rules are pretty simple:
+
+ Developer's Certificate of Origin 1.1
+
+ By making a contribution to this project, I certify that:
+
+ (a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+ (b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+ (c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+ (d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+
+... then you just add a line to the bottom of your patch description, with
+your real name, saying:
+
+ Signed-off-by: Random J Developer <random@developer.example.org>
+
+* Email Your Patch(es)
+
+Finally, you will need to email your patches to the mailing list so they can
+be reviewed and potentially merged into the main libseccomp-golang repository.
+When sending patches to the mailing list it is important to send your email in
+text form, no HTML mail please, and ensure that your email client does not
+mangle your patches. It should be possible to save your raw email to disk and
+apply it directly to the libseccomp source code; if that fails then you likely
+have a problem with your email client. When in doubt try a test first by
+sending yourself an email with your patch and attempting to apply the emailed
+patch to the libseccomp-golang repository; if it fails for you, it will fail
+for the rest of us trying to test your patch and include it in the main
+libseccomp-golang repository.
diff --git a/vendor/github.com/seccomp/libseccomp-golang/seccomp.go b/vendor/github.com/seccomp/libseccomp-golang/seccomp.go
index 53bcb024d..a3cc53822 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/seccomp.go
+++ b/vendor/github.com/seccomp/libseccomp-golang/seccomp.go
@@ -43,7 +43,7 @@ func (e VersionError) Error() string {
if e.minimum != "" {
format += e.minimum + ": "
} else {
- format += "2.1.0: "
+ format += "2.2.0: "
}
format += "detected %d.%d.%d"
return fmt.Sprintf(format, verMajor, verMinor, verMicro)
@@ -76,8 +76,8 @@ type ScmpSyscall int32
const (
// Valid architectures recognized by libseccomp
- // ARM64 and all MIPS architectures are unsupported by versions of the
- // library before v2.2 and will return errors if used
+ // PowerPC and S390(x) architectures are unavailable below library version
+ // v2.3.0 and will returns errors if used with incompatible libraries
// ArchInvalid is a placeholder to ensure uninitialized ScmpArch
// variables are invalid
@@ -137,6 +137,10 @@ const (
ActTrace ScmpAction = iota
// ActAllow permits the syscall to continue execution
ActAllow ScmpAction = iota
+ // ActLog permits the syscall to continue execution after logging it.
+ // This action is only usable when libseccomp API level 3 or higher is
+ // supported.
+ ActLog ScmpAction = iota
)
const (
@@ -211,7 +215,7 @@ func GetArchFromString(arch string) (ScmpArch, error) {
case "s390x":
return ArchS390X, nil
default:
- return ArchInvalid, fmt.Errorf("cannot convert unrecognized string %s", arch)
+ return ArchInvalid, fmt.Errorf("cannot convert unrecognized string %q", arch)
}
}
@@ -255,7 +259,7 @@ func (a ScmpArch) String() string {
case ArchInvalid:
return "Invalid architecture"
default:
- return "Unknown architecture"
+ return fmt.Sprintf("Unknown architecture %#x", uint(a))
}
}
@@ -279,7 +283,7 @@ func (a ScmpCompareOp) String() string {
case CompareInvalid:
return "Invalid comparison operator"
default:
- return "Unrecognized comparison operator"
+ return fmt.Sprintf("Unrecognized comparison operator %#x", uint(a))
}
}
@@ -295,10 +299,12 @@ func (a ScmpAction) String() string {
case ActTrace:
return fmt.Sprintf("Action: Notify tracing processes with code %d",
(a >> 16))
+ case ActLog:
+ return "Action: Log system call"
case ActAllow:
return "Action: Allow system call"
default:
- return "Unrecognized Action"
+ return fmt.Sprintf("Unrecognized Action %#x", uint(a))
}
}
@@ -324,10 +330,29 @@ func (a ScmpAction) GetReturnCode() int16 {
// GetLibraryVersion returns the version of the library the bindings are built
// against.
// The version is formatted as follows: Major.Minor.Micro
-func GetLibraryVersion() (major, minor, micro int) {
+func GetLibraryVersion() (major, minor, micro uint) {
return verMajor, verMinor, verMicro
}
+// GetApi returns the API level supported by the system.
+// Returns a positive int containing the API level, or 0 with an error if the
+// API level could not be detected due to the library being older than v2.4.0.
+// See the seccomp_api_get(3) man page for details on available API levels:
+// https://github.com/seccomp/libseccomp/blob/master/doc/man/man3/seccomp_api_get.3
+func GetApi() (uint, error) {
+ return getApi()
+}
+
+// SetApi forcibly sets the API level. General use of this function is strongly
+// discouraged.
+// Returns an error if the API level could not be set. An error is always
+// returned if the library is older than v2.4.0
+// See the seccomp_api_get(3) man page for details on available API levels:
+// https://github.com/seccomp/libseccomp/blob/master/doc/man/man3/seccomp_api_get.3
+func SetApi(api uint) error {
+ return setApi(api)
+}
+
// Syscall functions
// GetName retrieves the name of a syscall from its number.
@@ -350,7 +375,7 @@ func (s ScmpSyscall) GetNameByArch(arch ScmpArch) (string, error) {
cString := C.seccomp_syscall_resolve_num_arch(arch.toNative(), C.int(s))
if cString == nil {
- return "", fmt.Errorf("could not resolve syscall name")
+ return "", fmt.Errorf("could not resolve syscall name for %#x", int32(s))
}
defer C.free(unsafe.Pointer(cString))
@@ -373,7 +398,7 @@ func GetSyscallFromName(name string) (ScmpSyscall, error) {
result := C.seccomp_syscall_resolve_name(cString)
if result == scmpError {
- return 0, fmt.Errorf("could not resolve name to syscall")
+ return 0, fmt.Errorf("could not resolve name to syscall: %q", name)
}
return ScmpSyscall(result), nil
@@ -397,7 +422,7 @@ func GetSyscallFromNameByArch(name string, arch ScmpArch) (ScmpSyscall, error) {
result := C.seccomp_syscall_resolve_name_arch(arch.toNative(), cString)
if result == scmpError {
- return 0, fmt.Errorf("could not resolve name to syscall")
+ return 0, fmt.Errorf("could not resolve name to syscall: %q on %v", name, arch)
}
return ScmpSyscall(result), nil
@@ -426,9 +451,9 @@ func MakeCondition(arg uint, comparison ScmpCompareOp, values ...uint64) (ScmpCo
if comparison == CompareInvalid {
return condStruct, fmt.Errorf("invalid comparison operator")
} else if arg > 5 {
- return condStruct, fmt.Errorf("syscalls only have up to 6 arguments")
+ return condStruct, fmt.Errorf("syscalls only have up to 6 arguments (%d given)", arg)
} else if len(values) > 2 {
- return condStruct, fmt.Errorf("conditions can have at most 2 arguments")
+ return condStruct, fmt.Errorf("conditions can have at most 2 arguments (%d given)", len(values))
} else if len(values) == 0 {
return condStruct, fmt.Errorf("must provide at least one value to compare against")
}
@@ -494,6 +519,13 @@ func NewFilter(defaultAction ScmpAction) (*ScmpFilter, error) {
filter.valid = true
runtime.SetFinalizer(filter, filterFinalizer)
+ // Enable TSync so all goroutines will receive the same rules
+ // If the kernel does not support TSYNC, allow us to continue without error
+ if err := filter.setFilterAttr(filterAttrTsync, 0x1); err != nil && err != syscall.ENOTSUP {
+ filter.Release()
+ return nil, fmt.Errorf("could not create filter - error setting tsync bit: %v", err)
+ }
+
return filter, nil
}
@@ -550,7 +582,7 @@ func (f *ScmpFilter) Release() {
// The source filter src will be released as part of the process, and will no
// longer be usable or valid after this call.
// To be merged, filters must NOT share any architectures, and all their
-// attributes (Default Action, Bad Arch Action, No New Privs and TSync bools)
+// attributes (Default Action, Bad Arch Action, and No New Privs bools)
// must match.
// The filter src will be merged into the filter this is called on.
// The architectures of the src filter not present in the destination, and all
@@ -723,24 +755,24 @@ func (f *ScmpFilter) GetNoNewPrivsBit() (bool, error) {
return true, nil
}
-// GetTsyncBit returns whether Thread Synchronization will be enabled on the
-// filter being loaded, or an error if an issue was encountered retrieving the
-// value.
-// Thread Sync ensures that all members of the thread group of the calling
-// process will share the same Seccomp filter set.
-// Tsync is a fairly recent addition to the Linux kernel and older kernels
-// lack support. If the running kernel does not support Tsync and it is
-// requested in a filter, Libseccomp will not enable TSync support and will
-// proceed as normal.
-// This function is unavailable before v2.2 of libseccomp and will return an
-// error.
-func (f *ScmpFilter) GetTsyncBit() (bool, error) {
- tSync, err := f.getFilterAttr(filterAttrTsync)
+// GetLogBit returns the current state the Log bit will be set to on the filter
+// being loaded, or an error if an issue was encountered retrieving the value.
+// The Log bit tells the kernel that all actions taken by the filter, with the
+// exception of ActAllow, should be logged.
+// The Log bit is only usable when libseccomp API level 3 or higher is
+// supported.
+func (f *ScmpFilter) GetLogBit() (bool, error) {
+ log, err := f.getFilterAttr(filterAttrLog)
if err != nil {
+ api, apiErr := getApi()
+ if (apiErr != nil && api == 0) || (apiErr == nil && api < 3) {
+ return false, fmt.Errorf("getting the log bit is only supported in libseccomp 2.4.0 and newer with API level 3 or higher")
+ }
+
return false, err
}
- if tSync == 0 {
+ if log == 0 {
return false, nil
}
@@ -773,25 +805,26 @@ func (f *ScmpFilter) SetNoNewPrivsBit(state bool) error {
return f.setFilterAttr(filterAttrNNP, toSet)
}
-// SetTsync sets whether Thread Synchronization will be enabled on the filter
-// being loaded. Returns an error if setting Tsync failed, or the filter is
-// invalid.
-// Thread Sync ensures that all members of the thread group of the calling
-// process will share the same Seccomp filter set.
-// Tsync is a fairly recent addition to the Linux kernel and older kernels
-// lack support. If the running kernel does not support Tsync and it is
-// requested in a filter, Libseccomp will not enable TSync support and will
-// proceed as normal.
-// This function is unavailable before v2.2 of libseccomp and will return an
-// error.
-func (f *ScmpFilter) SetTsync(enable bool) error {
+// SetLogBit sets the state of the Log bit, which will be applied on filter
+// load, or an error if an issue was encountered setting the value.
+// The Log bit is only usable when libseccomp API level 3 or higher is
+// supported.
+func (f *ScmpFilter) SetLogBit(state bool) error {
var toSet C.uint32_t = 0x0
- if enable {
+ if state {
toSet = 0x1
}
- return f.setFilterAttr(filterAttrTsync, toSet)
+ err := f.setFilterAttr(filterAttrLog, toSet)
+ if err != nil {
+ api, apiErr := getApi()
+ if (apiErr != nil && api == 0) || (apiErr == nil && api < 3) {
+ return fmt.Errorf("setting the log bit is only supported in libseccomp 2.4.0 and newer with API level 3 or higher")
+ }
+ }
+
+ return err
}
// SetSyscallPriority sets a syscall's priority.
diff --git a/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go b/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go
index b0caac91b..4e36b27ae 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go
+++ b/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go
@@ -16,47 +16,20 @@ import (
// #cgo pkg-config: libseccomp
/*
+#include <errno.h>
#include <stdlib.h>
#include <seccomp.h>
#if SCMP_VER_MAJOR < 2
-#error Minimum supported version of Libseccomp is v2.1.0
-#elif SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 1
-#error Minimum supported version of Libseccomp is v2.1.0
+#error Minimum supported version of Libseccomp is v2.2.0
+#elif SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 2
+#error Minimum supported version of Libseccomp is v2.2.0
#endif
#define ARCH_BAD ~0
const uint32_t C_ARCH_BAD = ARCH_BAD;
-#ifndef SCMP_ARCH_AARCH64
-#define SCMP_ARCH_AARCH64 ARCH_BAD
-#endif
-
-#ifndef SCMP_ARCH_MIPS
-#define SCMP_ARCH_MIPS ARCH_BAD
-#endif
-
-#ifndef SCMP_ARCH_MIPS64
-#define SCMP_ARCH_MIPS64 ARCH_BAD
-#endif
-
-#ifndef SCMP_ARCH_MIPS64N32
-#define SCMP_ARCH_MIPS64N32 ARCH_BAD
-#endif
-
-#ifndef SCMP_ARCH_MIPSEL
-#define SCMP_ARCH_MIPSEL ARCH_BAD
-#endif
-
-#ifndef SCMP_ARCH_MIPSEL64
-#define SCMP_ARCH_MIPSEL64 ARCH_BAD
-#endif
-
-#ifndef SCMP_ARCH_MIPSEL64N32
-#define SCMP_ARCH_MIPSEL64N32 ARCH_BAD
-#endif
-
#ifndef SCMP_ARCH_PPC
#define SCMP_ARCH_PPC ARCH_BAD
#endif
@@ -95,22 +68,29 @@ const uint32_t C_ARCH_PPC64LE = SCMP_ARCH_PPC64LE;
const uint32_t C_ARCH_S390 = SCMP_ARCH_S390;
const uint32_t C_ARCH_S390X = SCMP_ARCH_S390X;
+#ifndef SCMP_ACT_LOG
+#define SCMP_ACT_LOG 0x7ffc0000U
+#endif
+
const uint32_t C_ACT_KILL = SCMP_ACT_KILL;
const uint32_t C_ACT_TRAP = SCMP_ACT_TRAP;
const uint32_t C_ACT_ERRNO = SCMP_ACT_ERRNO(0);
const uint32_t C_ACT_TRACE = SCMP_ACT_TRACE(0);
+const uint32_t C_ACT_LOG = SCMP_ACT_LOG;
const uint32_t C_ACT_ALLOW = SCMP_ACT_ALLOW;
-// If TSync is not supported, make sure it doesn't map to a supported filter attribute
-// Don't worry about major version < 2, the minimum version checks should catch that case
-#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 2
-#define SCMP_FLTATR_CTL_TSYNC _SCMP_CMP_MIN
+// The libseccomp SCMP_FLTATR_CTL_LOG member of the scmp_filter_attr enum was
+// added in v2.4.0
+#if (SCMP_VER_MAJOR < 2) || \
+ (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 4)
+#define SCMP_FLTATR_CTL_LOG _SCMP_FLTATR_MIN
#endif
const uint32_t C_ATTRIBUTE_DEFAULT = (uint32_t)SCMP_FLTATR_ACT_DEFAULT;
const uint32_t C_ATTRIBUTE_BADARCH = (uint32_t)SCMP_FLTATR_ACT_BADARCH;
const uint32_t C_ATTRIBUTE_NNP = (uint32_t)SCMP_FLTATR_CTL_NNP;
const uint32_t C_ATTRIBUTE_TSYNC = (uint32_t)SCMP_FLTATR_CTL_TSYNC;
+const uint32_t C_ATTRIBUTE_LOG = (uint32_t)SCMP_FLTATR_CTL_LOG;
const int C_CMP_NE = (int)SCMP_CMP_NE;
const int C_CMP_LT = (int)SCMP_CMP_LT;
@@ -124,25 +104,80 @@ const int C_VERSION_MAJOR = SCMP_VER_MAJOR;
const int C_VERSION_MINOR = SCMP_VER_MINOR;
const int C_VERSION_MICRO = SCMP_VER_MICRO;
+#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR >= 3
+unsigned int get_major_version()
+{
+ return seccomp_version()->major;
+}
+
+unsigned int get_minor_version()
+{
+ return seccomp_version()->minor;
+}
+
+unsigned int get_micro_version()
+{
+ return seccomp_version()->micro;
+}
+#else
+unsigned int get_major_version()
+{
+ return (unsigned int)C_VERSION_MAJOR;
+}
+
+unsigned int get_minor_version()
+{
+ return (unsigned int)C_VERSION_MINOR;
+}
+
+unsigned int get_micro_version()
+{
+ return (unsigned int)C_VERSION_MICRO;
+}
+#endif
+
+// The libseccomp API level functions were added in v2.4.0
+#if (SCMP_VER_MAJOR < 2) || \
+ (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 4)
+const unsigned int seccomp_api_get(void)
+{
+ // libseccomp-golang requires libseccomp v2.2.0, at a minimum, which
+ // supported API level 2. However, the kernel may not support API level
+ // 2 constructs which are the seccomp() system call and the TSYNC
+ // filter flag. Return the "reserved" value of 0 here to indicate that
+ // proper API level support is not available in libseccomp.
+ return 0;
+}
+
+int seccomp_api_set(unsigned int level)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
typedef struct scmp_arg_cmp* scmp_cast_t;
-// Wrapper to create an scmp_arg_cmp struct
-void*
-make_struct_arg_cmp(
- unsigned int arg,
- int compare,
- uint64_t a,
- uint64_t b
- )
+void* make_arg_cmp_array(unsigned int length)
{
- struct scmp_arg_cmp *s = malloc(sizeof(struct scmp_arg_cmp));
+ return calloc(length, sizeof(struct scmp_arg_cmp));
+}
- s->arg = arg;
- s->op = compare;
- s->datum_a = a;
- s->datum_b = b;
+// Wrapper to add an scmp_arg_cmp struct to an existing arg_cmp array
+void add_struct_arg_cmp(
+ struct scmp_arg_cmp* arr,
+ unsigned int pos,
+ unsigned int arg,
+ int compare,
+ uint64_t a,
+ uint64_t b
+ )
+{
+ arr[pos].arg = arg;
+ arr[pos].op = compare;
+ arr[pos].datum_a = a;
+ arr[pos].datum_b = b;
- return s;
+ return;
}
*/
import "C"
@@ -157,6 +192,7 @@ const (
filterAttrActBadArch scmpFilterAttr = iota
filterAttrNNP scmpFilterAttr = iota
filterAttrTsync scmpFilterAttr = iota
+ filterAttrLog scmpFilterAttr = iota
)
const (
@@ -167,7 +203,7 @@ const (
archEnd ScmpArch = ArchS390X
// Comparison boundaries to check for action validity
actionStart ScmpAction = ActKill
- actionEnd ScmpAction = ActAllow
+ actionEnd ScmpAction = ActLog
// Comparison boundaries to check for comparison operator validity
compareOpStart ScmpCompareOp = CompareNotEqual
compareOpEnd ScmpCompareOp = CompareMaskedEqual
@@ -177,28 +213,51 @@ var (
// Error thrown on bad filter context
errBadFilter = fmt.Errorf("filter is invalid or uninitialized")
// Constants representing library major, minor, and micro versions
- verMajor = int(C.C_VERSION_MAJOR)
- verMinor = int(C.C_VERSION_MINOR)
- verMicro = int(C.C_VERSION_MICRO)
+ verMajor = uint(C.get_major_version())
+ verMinor = uint(C.get_minor_version())
+ verMicro = uint(C.get_micro_version())
)
// Nonexported functions
// Check if library version is greater than or equal to the given one
-func checkVersionAbove(major, minor, micro int) bool {
+func checkVersionAbove(major, minor, micro uint) bool {
return (verMajor > major) ||
(verMajor == major && verMinor > minor) ||
(verMajor == major && verMinor == minor && verMicro >= micro)
}
-// Ensure that the library is supported, i.e. >= 2.1.0.
+// Ensure that the library is supported, i.e. >= 2.2.0.
func ensureSupportedVersion() error {
- if !checkVersionAbove(2, 1, 0) {
+ if !checkVersionAbove(2, 2, 0) {
return VersionError{}
}
return nil
}
+// Get the API level
+func getApi() (uint, error) {
+ api := C.seccomp_api_get()
+ if api == 0 {
+ return 0, fmt.Errorf("API level operations are not supported")
+ }
+
+ return uint(api), nil
+}
+
+// Set the API level
+func setApi(api uint) error {
+ if retCode := C.seccomp_api_set(C.uint(api)); retCode != 0 {
+ if syscall.Errno(-1*retCode) == syscall.EOPNOTSUPP {
+ return fmt.Errorf("API level operations are not supported")
+ }
+
+ return fmt.Errorf("could not set API level: %v", retCode)
+ }
+
+ return nil
+}
+
// Filter helpers
// Filter finalizer - ensure that kernel context for filters is freed
@@ -215,13 +274,6 @@ func (f *ScmpFilter) getFilterAttr(attr scmpFilterAttr) (C.uint32_t, error) {
return 0x0, errBadFilter
}
- if !checkVersionAbove(2, 2, 0) && attr == filterAttrTsync {
- return 0x0, VersionError{
- message: "thread synchronization attribute is not supported",
- minimum: "2.2.0",
- }
- }
-
var attribute C.uint32_t
retCode := C.seccomp_attr_get(f.filterCtx, attr.toNative(), &attribute)
@@ -241,13 +293,6 @@ func (f *ScmpFilter) setFilterAttr(attr scmpFilterAttr, value C.uint32_t) error
return errBadFilter
}
- if !checkVersionAbove(2, 2, 0) && attr == filterAttrTsync {
- return VersionError{
- message: "thread synchronization attribute is not supported",
- minimum: "2.2.0",
- }
- }
-
retCode := C.seccomp_attr_set(f.filterCtx, attr.toNative(), value)
if retCode != 0 {
return syscall.Errno(-1 * retCode)
@@ -259,12 +304,9 @@ func (f *ScmpFilter) setFilterAttr(attr scmpFilterAttr, value C.uint32_t) error
// DOES NOT LOCK OR CHECK VALIDITY
// Assumes caller has already done this
// Wrapper for seccomp_rule_add_... functions
-func (f *ScmpFilter) addRuleWrapper(call ScmpSyscall, action ScmpAction, exact bool, cond C.scmp_cast_t) error {
- var length C.uint
- if cond != nil {
- length = 1
- } else {
- length = 0
+func (f *ScmpFilter) addRuleWrapper(call ScmpSyscall, action ScmpAction, exact bool, length C.uint, cond C.scmp_cast_t) error {
+ if length != 0 && cond == nil {
+ return fmt.Errorf("null conditions list, but length is nonzero")
}
var retCode C.int
@@ -275,9 +317,11 @@ func (f *ScmpFilter) addRuleWrapper(call ScmpSyscall, action ScmpAction, exact b
}
if syscall.Errno(-1*retCode) == syscall.EFAULT {
- return fmt.Errorf("unrecognized syscall")
+ return fmt.Errorf("unrecognized syscall %#x", int32(call))
} else if syscall.Errno(-1*retCode) == syscall.EPERM {
return fmt.Errorf("requested action matches default action of filter")
+ } else if syscall.Errno(-1*retCode) == syscall.EINVAL {
+ return fmt.Errorf("two checks on same syscall argument")
} else if retCode != 0 {
return syscall.Errno(-1 * retCode)
}
@@ -295,7 +339,7 @@ func (f *ScmpFilter) addRuleGeneric(call ScmpSyscall, action ScmpAction, exact b
}
if len(conds) == 0 {
- if err := f.addRuleWrapper(call, action, exact, nil); err != nil {
+ if err := f.addRuleWrapper(call, action, exact, 0, nil); err != nil {
return err
}
} else {
@@ -307,13 +351,20 @@ func (f *ScmpFilter) addRuleGeneric(call ScmpSyscall, action ScmpAction, exact b
}
}
- for _, cond := range conds {
- cmpStruct := C.make_struct_arg_cmp(C.uint(cond.Argument), cond.Op.toNative(), C.uint64_t(cond.Operand1), C.uint64_t(cond.Operand2))
- defer C.free(cmpStruct)
+ argsArr := C.make_arg_cmp_array(C.uint(len(conds)))
+ if argsArr == nil {
+ return fmt.Errorf("error allocating memory for conditions")
+ }
+ defer C.free(argsArr)
+
+ for i, cond := range conds {
+ C.add_struct_arg_cmp(C.scmp_cast_t(argsArr), C.uint(i),
+ C.uint(cond.Argument), cond.Op.toNative(),
+ C.uint64_t(cond.Operand1), C.uint64_t(cond.Operand2))
+ }
- if err := f.addRuleWrapper(call, action, exact, C.scmp_cast_t(cmpStruct)); err != nil {
- return err
- }
+ if err := f.addRuleWrapper(call, action, exact, C.uint(len(conds)), C.scmp_cast_t(argsArr)); err != nil {
+ return err
}
}
@@ -325,11 +376,11 @@ func (f *ScmpFilter) addRuleGeneric(call ScmpSyscall, action ScmpAction, exact b
// Helper - Sanitize Arch token input
func sanitizeArch(in ScmpArch) error {
if in < archStart || in > archEnd {
- return fmt.Errorf("unrecognized architecture")
+ return fmt.Errorf("unrecognized architecture %#x", uint(in))
}
if in.toNative() == C.C_ARCH_BAD {
- return fmt.Errorf("architecture is not supported on this version of the library")
+ return fmt.Errorf("architecture %v is not supported on this version of the library", in)
}
return nil
@@ -338,7 +389,7 @@ func sanitizeArch(in ScmpArch) error {
func sanitizeAction(in ScmpAction) error {
inTmp := in & 0x0000FFFF
if inTmp < actionStart || inTmp > actionEnd {
- return fmt.Errorf("unrecognized action")
+ return fmt.Errorf("unrecognized action %#x", uint(inTmp))
}
if inTmp != ActTrace && inTmp != ActErrno && (in&0xFFFF0000) != 0 {
@@ -350,7 +401,7 @@ func sanitizeAction(in ScmpAction) error {
func sanitizeCompareOp(in ScmpCompareOp) error {
if in < compareOpStart || in > compareOpEnd {
- return fmt.Errorf("unrecognized comparison operator")
+ return fmt.Errorf("unrecognized comparison operator %#x", uint(in))
}
return nil
@@ -393,7 +444,7 @@ func archFromNative(a C.uint32_t) (ScmpArch, error) {
case C.C_ARCH_S390X:
return ArchS390X, nil
default:
- return 0x0, fmt.Errorf("unrecognized architecture")
+ return 0x0, fmt.Errorf("unrecognized architecture %#x", uint32(a))
}
}
@@ -472,10 +523,12 @@ func actionFromNative(a C.uint32_t) (ScmpAction, error) {
return ActErrno.SetReturnCode(int16(aTmp)), nil
case C.C_ACT_TRACE:
return ActTrace.SetReturnCode(int16(aTmp)), nil
+ case C.C_ACT_LOG:
+ return ActLog, nil
case C.C_ACT_ALLOW:
return ActAllow, nil
default:
- return 0x0, fmt.Errorf("unrecognized action")
+ return 0x0, fmt.Errorf("unrecognized action %#x", uint32(a))
}
}
@@ -490,6 +543,8 @@ func (a ScmpAction) toNative() C.uint32_t {
return C.C_ACT_ERRNO | (C.uint32_t(a) >> 16)
case ActTrace:
return C.C_ACT_TRACE | (C.uint32_t(a) >> 16)
+ case ActLog:
+ return C.C_ACT_LOG
case ActAllow:
return C.C_ACT_ALLOW
default:
@@ -508,6 +563,8 @@ func (a scmpFilterAttr) toNative() uint32 {
return uint32(C.C_ATTRIBUTE_NNP)
case filterAttrTsync:
return uint32(C.C_ATTRIBUTE_TSYNC)
+ case filterAttrLog:
+ return uint32(C.C_ATTRIBUTE_LOG)
default:
return 0x0
}
diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore
new file mode 100644
index 000000000..6b7d7d1e8
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.gitignore
@@ -0,0 +1,2 @@
+logrus
+vendor
diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml
new file mode 100644
index 000000000..848938a6d
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.travis.yml
@@ -0,0 +1,25 @@
+language: go
+go_import_path: github.com/sirupsen/logrus
+git:
+ depth: 1
+env:
+ - GO111MODULE=on
+ - GO111MODULE=off
+go: [ 1.11.x, 1.12.x ]
+os: [ linux, osx ]
+matrix:
+ exclude:
+ - go: 1.12.x
+ env: GO111MODULE=off
+ - go: 1.11.x
+ os: osx
+install:
+ - ./travis/install.sh
+ - if [[ "$GO111MODULE" == "on" ]]; then go mod download; fi
+ - if [[ "$GO111MODULE" == "off" ]]; then go get github.com/stretchr/testify/assert golang.org/x/sys/unix github.com/konsorten/go-windows-terminal-sequences; fi
+script:
+ - ./travis/cross_build.sh
+ - export GOMAXPROCS=4
+ - export GORACE=halt_on_error=1
+ - go test -race -v ./...
+ - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi
diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 000000000..51a7ab0ca
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,200 @@
+# 1.4.2
+ * Fixes build break for plan9, nacl, solaris
+# 1.4.1
+This new release introduces:
+ * Enhance TextFormatter to not print caller information when they are empty (#944)
+ * Remove dependency on golang.org/x/crypto (#932, #943)
+
+Fixes:
+ * Fix Entry.WithContext method to return a copy of the initial entry (#941)
+
+# 1.4.0
+This new release introduces:
+ * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848).
+ * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter (#909, #911)
+ * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919).
+
+Fixes:
+ * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893).
+ * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903)
+ * Fix infinite recursion on unknown `Level.String()` (#907)
+ * Fix race condition in `getCaller` (#916).
+
+
+# 1.3.0
+This new release introduces:
+ * Log, Logf, Logln functions for Logger and Entry that take a Level
+
+Fixes:
+ * Building prometheus node_exporter on AIX (#840)
+ * Race condition in TextFormatter (#468)
+ * Travis CI import path (#868)
+ * Remove coloured output on Windows (#862)
+ * Pointer to func as field in JSONFormatter (#870)
+ * Properly marshal Levels (#873)
+
+# 1.2.0
+This new release introduces:
+ * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued
+ * A new trace level named `Trace` whose level is below `Debug`
+ * A configurable exit function to be called upon a Fatal trace
+ * The `Level` object now implements `encoding.TextUnmarshaler` interface
+
+# 1.1.1
+This is a bug fix release.
+ * fix the build break on Solaris
+ * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized
+
+# 1.1.0
+This new release introduces:
+ * several fixes:
+ * a fix for a race condition on entry formatting
+ * proper cleanup of previously used entries before putting them back in the pool
+ * the extra new line at the end of message in text formatter has been removed
+ * a new global public API to check if a level is activated: IsLevelEnabled
+ * the following methods have been added to the Logger object
+ * IsLevelEnabled
+ * SetFormatter
+ * SetOutput
+ * ReplaceHooks
+ * introduction of go module
+ * an indent configuration for the json formatter
+ * output colour support for windows
+ * the field sort function is now configurable for text formatter
+ * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater
+
+# 1.0.6
+
+This new release introduces:
+ * a new api WithTime which allows to easily force the time of the log entry
+ which is mostly useful for logger wrapper
+ * a fix reverting the immutability of the entry given as parameter to the hooks
+ a new configuration field of the json formatter in order to put all the fields
+ in a nested dictionnary
+ * a new SetOutput method in the Logger
+ * a new configuration of the textformatter to configure the name of the default keys
+ * a new configuration of the text formatter to disable the level truncation
+
+# 1.0.5
+
+* Fix hooks race (#707)
+* Fix panic deadlock (#695)
+
+# 1.0.4
+
+* Fix race when adding hooks (#612)
+* Fix terminal check in AppEngine (#635)
+
+# 1.0.3
+
+* Replace example files with testable examples
+
+# 1.0.2
+
+* bug: quote non-string values in text formatter (#583)
+* Make (*Logger) SetLevel a public method
+
+# 1.0.1
+
+* bug: fix escaping in text formatter (#575)
+
+# 1.0.0
+
+* Officially changed name to lower-case
+* bug: colors on Windows 10 (#541)
+* bug: fix race in accessing level (#512)
+
+# 0.11.5
+
+* feature: add writer and writerlevel to entry (#372)
+
+# 0.11.4
+
+* bug: fix undefined variable on solaris (#493)
+
+# 0.11.3
+
+* formatter: configure quoting of empty values (#484)
+* formatter: configure quoting character (default is `"`) (#484)
+* bug: fix not importing io correctly in non-linux environments (#481)
+
+# 0.11.2
+
+* bug: fix windows terminal detection (#476)
+
+# 0.11.1
+
+* bug: fix tty detection with custom out (#471)
+
+# 0.11.0
+
+* performance: Use bufferpool to allocate (#370)
+* terminal: terminal detection for app-engine (#343)
+* feature: exit handler (#375)
+
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md
index cbe8b6962..a4796eb07 100644
--- a/vendor/github.com/sirupsen/logrus/README.md
+++ b/vendor/github.com/sirupsen/logrus/README.md
@@ -1,22 +1,24 @@
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus)
Logrus is a structured logger for Go (golang), completely API compatible with
-the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
-yet stable (pre 1.0). Logrus itself is completely stable and has been used in
-many large deployments. The core API is unlikely to change much but please
-version control your Logrus to make sure you aren't fetching latest `master` on
-every build.**
-
-**Seeing weird case-sensitive problems?** Unfortunately, the author failed to
-realize the consequences of renaming to lower-case. Due to the Go package
-environment, this caused issues. Regretfully, there's no turning back now.
+the standard library logger.
+
+**Seeing weird case-sensitive problems?** It's in the past been possible to
+import Logrus as both upper- and lower-case. Due to the Go package environment,
+this caused issues in the community and we needed a standard. Some environments
+experienced problems with the upper-case variant, so the lower-case was decided.
Everything using `logrus` will need to use the lower-case:
`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
-I am terribly sorry for this inconvenience. Logrus strives hard for backwards
-compatibility, and the author failed to realize the cascading consequences of
-such a name-change. To fix Glide, see [these
+To fix Glide, see [these
comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
+For an in-depth explanation of the casing issue, see [this
+comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
+
+**Are you interested in assisting in maintaining Logrus?** Currently I have a
+lot of obligations, and I am unable to provide Logrus with the maintainership it
+needs. If you'd like to help, please reach out to me at `simon at author's
+username dot com`.
Nicely color-coded in development (when a TTY is attached, otherwise just
plain text):
@@ -54,9 +56,40 @@ time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
-exit status 1
+```
+To ensure this behaviour even if a TTY is attached, set your formatter as follows:
+
+```go
+ log.SetFormatter(&log.TextFormatter{
+ DisableColors: true,
+ FullTimestamp: true,
+ })
```
+#### Logging Method Name
+
+If you wish to add the calling method as a field, instruct the logger via:
+```go
+log.SetReportCaller(true)
+```
+This adds the caller as 'method' like so:
+
+```json
+{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by",
+"time":"2014-03-10 19:57:38.562543129 -0400 EDT"}
+```
+
+```text
+time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin
+```
+Note that this does add measurable overhead - the cost will depend on the version of Go, but is
+between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your
+environment via benchmarks:
+```
+go test -bench=.*CallerTracing
+```
+
+
#### Case-sensitivity
The organization's name was changed to lower-case--and this will not be changed
@@ -218,7 +251,7 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
```go
import (
log "github.com/sirupsen/logrus"
- "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake"
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
"log/syslog"
)
@@ -239,59 +272,15 @@ func init() {
```
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
-| Hook | Description |
-| ----- | ----------- |
-| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
-| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
-| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
-| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
-| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
-| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
-| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
-| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
-| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
-| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
-| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
-| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
-| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
-| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
-| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
-| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
-| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
-| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
-| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
-| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
-| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
-| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
-| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
-| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
-| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
-| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
-| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
-| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
-| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
-| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
-| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
-| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
-| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
-| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
-| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
-| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
-| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
-| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
-| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
-| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
-| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
-| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
-| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
-| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
-| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
+A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
+
#### Level logging
-Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic.
```go
+log.Trace("Something very low level.")
log.Debug("Useful debugging information.")
log.Info("Something noteworthy happened!")
log.Warn("You should probably take a look at this.")
@@ -363,15 +352,20 @@ The built-in logging formatters are:
field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`. For Windows, see
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
+ * When colors are enabled, levels are truncated to 4 characters by default. To disable
+ truncation set the `DisableLevelTruncation` field to `true`.
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
* `logrus.JSONFormatter`. Logs fields as JSON.
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
Third party logging formatters:
+* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
+* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html).
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure.
You can define your formatter by implementing the `Formatter` interface,
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
@@ -449,13 +443,13 @@ Logrus has a built in facility for asserting the presence of log messages. This
```go
import(
"github.com/sirupsen/logrus"
- "github.com/sirupsen/logrus/hooks/null"
+ "github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
"testing"
)
func TestSomething(t*testing.T){
- logger, hook := null.NewNullLogger()
+ logger, hook := test.NewNullLogger()
logger.Error("Helloerror")
assert.Equal(t, 1, len(hook.Entries))
@@ -485,7 +479,7 @@ logrus.RegisterExitHandler(handler)
#### Thread safety
-By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
+By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs.
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
Situation when locking is not needed includes:
diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go
index 8af90637a..8fd189e1c 100644
--- a/vendor/github.com/sirupsen/logrus/alt_exit.go
+++ b/vendor/github.com/sirupsen/logrus/alt_exit.go
@@ -51,9 +51,9 @@ func Exit(code int) {
os.Exit(code)
}
-// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
-// all handlers. The handlers will also be invoked when any Fatal log entry is
-// made.
+// RegisterExitHandler appends a Logrus Exit handler to the list of handlers,
+// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
+// any Fatal log entry is made.
//
// This method is useful when a caller wishes to use logrus to log a fatal
// message but also needs to gracefully shutdown. An example usecase could be
@@ -62,3 +62,15 @@ func Exit(code int) {
func RegisterExitHandler(handler func()) {
handlers = append(handlers, handler)
}
+
+// DeferExitHandler prepends a Logrus Exit handler to the list of handlers,
+// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
+// any Fatal log entry is made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func DeferExitHandler(handler func()) {
+ handlers = append([]func(){handler}, handlers...)
+}
diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml
new file mode 100644
index 000000000..96c2ce15f
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/appveyor.yml
@@ -0,0 +1,14 @@
+version: "{build}"
+platform: x64
+clone_folder: c:\gopath\src\github.com\sirupsen\logrus
+environment:
+ GOPATH: c:\gopath
+branches:
+ only:
+ - master
+install:
+ - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
+ - go version
+build_script:
+ - go get -t
+ - go test
diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
index 320e5d5b8..63e25583c 100644
--- a/vendor/github.com/sirupsen/logrus/entry.go
+++ b/vendor/github.com/sirupsen/logrus/entry.go
@@ -2,13 +2,33 @@ package logrus
import (
"bytes"
+ "context"
"fmt"
"os"
+ "reflect"
+ "runtime"
+ "strings"
"sync"
"time"
)
-var bufferPool *sync.Pool
+var (
+ bufferPool *sync.Pool
+
+ // qualified package name, cached at first use
+ logrusPackage string
+
+ // Positions in the call stack when tracing to report the calling method
+ minimumCallerDepth int
+
+ // Used for caller information initialisation
+ callerInitOnce sync.Once
+)
+
+const (
+ maximumCallerDepth int = 25
+ knownLogrusFrames int = 4
+)
func init() {
bufferPool = &sync.Pool{
@@ -16,15 +36,18 @@ func init() {
return new(bytes.Buffer)
},
}
+
+ // start at the bottom of the stack before the package-name cache is primed
+ minimumCallerDepth = 1
}
// Defines the key when adding errors using WithError.
var ErrorKey = "error"
// An entry is the final or intermediate Logrus logging entry. It contains all
-// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
-// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
-// passed around as much as you wish to avoid field duplication.
+// the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
+// Info, Warn, Error, Fatal or Panic is called on it. These objects can be
+// reused and passed around as much as you wish to avoid field duplication.
type Entry struct {
Logger *Logger
@@ -34,21 +57,31 @@ type Entry struct {
// Time at which the log entry was created
Time time.Time
- // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+ // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic
+ // This field will be set on entry firing and the value will be equal to the one in Logger struct field.
Level Level
- // Message passed to Debug, Info, Warn, Error, Fatal or Panic
+ // Calling method, with package name
+ Caller *runtime.Frame
+
+ // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic
Message string
- // When formatter is called in entry.log(), an Buffer may be set to entry
+ // When formatter is called in entry.log(), a Buffer may be set to entry
Buffer *bytes.Buffer
+
+ // Contains the context set by the user. Useful for hook processing etc.
+ Context context.Context
+
+ // err may contain a field formatting error
+ err string
}
func NewEntry(logger *Logger) *Entry {
return &Entry{
Logger: logger,
- // Default is three fields, give a little extra room
- Data: make(Fields, 5),
+ // Default is three fields, plus one optional. Give a little extra room.
+ Data: make(Fields, 6),
}
}
@@ -68,6 +101,11 @@ func (entry *Entry) WithError(err error) *Entry {
return entry.WithField(ErrorKey, err)
}
+// Add a context to the Entry.
+func (entry *Entry) WithContext(ctx context.Context) *Entry {
+ return &Entry{Logger: entry.Logger, Data: entry.Data, Time: entry.Time, err: entry.err, Context: ctx}
+}
+
// Add a single field to the Entry.
func (entry *Entry) WithField(key string, value interface{}) *Entry {
return entry.WithFields(Fields{key: value})
@@ -79,56 +117,164 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
for k, v := range entry.Data {
data[k] = v
}
+ fieldErr := entry.err
for k, v := range fields {
- data[k] = v
+ isErrField := false
+ if t := reflect.TypeOf(v); t != nil {
+ switch t.Kind() {
+ case reflect.Func:
+ isErrField = true
+ case reflect.Ptr:
+ isErrField = t.Elem().Kind() == reflect.Func
+ }
+ }
+ if isErrField {
+ tmp := fmt.Sprintf("can not add field %q", k)
+ if fieldErr != "" {
+ fieldErr = entry.err + ", " + tmp
+ } else {
+ fieldErr = tmp
+ }
+ } else {
+ data[k] = v
+ }
+ }
+ return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context}
+}
+
+// Overrides the time of the Entry.
+func (entry *Entry) WithTime(t time.Time) *Entry {
+ return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t, err: entry.err, Context: entry.Context}
+}
+
+// getPackageName reduces a fully qualified function name to the package name
+// There really ought to be to be a better way...
+func getPackageName(f string) string {
+ for {
+ lastPeriod := strings.LastIndex(f, ".")
+ lastSlash := strings.LastIndex(f, "/")
+ if lastPeriod > lastSlash {
+ f = f[:lastPeriod]
+ } else {
+ break
+ }
}
- return &Entry{Logger: entry.Logger, Data: data}
+
+ return f
+}
+
+// getCaller retrieves the name of the first non-logrus calling function
+func getCaller() *runtime.Frame {
+
+ // cache this package's fully-qualified name
+ callerInitOnce.Do(func() {
+ pcs := make([]uintptr, 2)
+ _ = runtime.Callers(0, pcs)
+ logrusPackage = getPackageName(runtime.FuncForPC(pcs[1]).Name())
+
+ // now that we have the cache, we can skip a minimum count of known-logrus functions
+ // XXX this is dubious, the number of frames may vary
+ minimumCallerDepth = knownLogrusFrames
+ })
+
+ // Restrict the lookback frames to avoid runaway lookups
+ pcs := make([]uintptr, maximumCallerDepth)
+ depth := runtime.Callers(minimumCallerDepth, pcs)
+ frames := runtime.CallersFrames(pcs[:depth])
+
+ for f, again := frames.Next(); again; f, again = frames.Next() {
+ pkg := getPackageName(f.Function)
+
+ // If the caller isn't part of this package, we're done
+ if pkg != logrusPackage {
+ return &f
+ }
+ }
+
+ // if we got here, we failed to find the caller's context
+ return nil
+}
+
+func (entry Entry) HasCaller() (has bool) {
+ return entry.Logger != nil &&
+ entry.Logger.ReportCaller &&
+ entry.Caller != nil
}
// This function is not declared with a pointer value because otherwise
// race conditions will occur when using multiple goroutines
func (entry Entry) log(level Level, msg string) {
var buffer *bytes.Buffer
- entry.Time = time.Now()
+
+ // Default to now, but allow users to override if they want.
+ //
+ // We don't have to worry about polluting future calls to Entry#log()
+ // with this assignment because this function is declared with a
+ // non-pointer receiver.
+ if entry.Time.IsZero() {
+ entry.Time = time.Now()
+ }
+
entry.Level = level
entry.Message = msg
-
- if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
- entry.Logger.mu.Lock()
- fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
- entry.Logger.mu.Unlock()
+ if entry.Logger.ReportCaller {
+ entry.Caller = getCaller()
}
+
+ entry.fireHooks()
+
buffer = bufferPool.Get().(*bytes.Buffer)
buffer.Reset()
defer bufferPool.Put(buffer)
entry.Buffer = buffer
- serialized, err := entry.Logger.Formatter.Format(&entry)
+
+ entry.write()
+
entry.Buffer = nil
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(&entry)
+ }
+}
+
+func (entry *Entry) fireHooks() {
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+ err := entry.Logger.Hooks.Fire(entry.Level, entry)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ }
+}
+
+func (entry *Entry) write() {
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+ serialized, err := entry.Logger.Formatter.Format(entry)
if err != nil {
- entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
- entry.Logger.mu.Unlock()
} else {
- entry.Logger.mu.Lock()
_, err = entry.Logger.Out.Write(serialized)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
- entry.Logger.mu.Unlock()
}
+}
- // To avoid Entry#log() returning a value that only would make sense for
- // panic() to use in Entry#Panic(), we avoid the allocation by checking
- // directly here.
- if level <= PanicLevel {
- panic(&entry)
+func (entry *Entry) Log(level Level, args ...interface{}) {
+ if entry.Logger.IsLevelEnabled(level) {
+ entry.log(level, fmt.Sprint(args...))
}
}
+func (entry *Entry) Trace(args ...interface{}) {
+ entry.Log(TraceLevel, args...)
+}
+
func (entry *Entry) Debug(args ...interface{}) {
- if entry.Logger.level() >= DebugLevel {
- entry.log(DebugLevel, fmt.Sprint(args...))
- }
+ entry.Log(DebugLevel, args...)
}
func (entry *Entry) Print(args ...interface{}) {
@@ -136,15 +282,11 @@ func (entry *Entry) Print(args ...interface{}) {
}
func (entry *Entry) Info(args ...interface{}) {
- if entry.Logger.level() >= InfoLevel {
- entry.log(InfoLevel, fmt.Sprint(args...))
- }
+ entry.Log(InfoLevel, args...)
}
func (entry *Entry) Warn(args ...interface{}) {
- if entry.Logger.level() >= WarnLevel {
- entry.log(WarnLevel, fmt.Sprint(args...))
- }
+ entry.Log(WarnLevel, args...)
}
func (entry *Entry) Warning(args ...interface{}) {
@@ -152,37 +294,37 @@ func (entry *Entry) Warning(args ...interface{}) {
}
func (entry *Entry) Error(args ...interface{}) {
- if entry.Logger.level() >= ErrorLevel {
- entry.log(ErrorLevel, fmt.Sprint(args...))
- }
+ entry.Log(ErrorLevel, args...)
}
func (entry *Entry) Fatal(args ...interface{}) {
- if entry.Logger.level() >= FatalLevel {
- entry.log(FatalLevel, fmt.Sprint(args...))
- }
- Exit(1)
+ entry.Log(FatalLevel, args...)
+ entry.Logger.Exit(1)
}
func (entry *Entry) Panic(args ...interface{}) {
- if entry.Logger.level() >= PanicLevel {
- entry.log(PanicLevel, fmt.Sprint(args...))
- }
+ entry.Log(PanicLevel, args...)
panic(fmt.Sprint(args...))
}
// Entry Printf family functions
-func (entry *Entry) Debugf(format string, args ...interface{}) {
- if entry.Logger.level() >= DebugLevel {
- entry.Debug(fmt.Sprintf(format, args...))
+func (entry *Entry) Logf(level Level, format string, args ...interface{}) {
+ if entry.Logger.IsLevelEnabled(level) {
+ entry.Log(level, fmt.Sprintf(format, args...))
}
}
+func (entry *Entry) Tracef(format string, args ...interface{}) {
+ entry.Logf(TraceLevel, format, args...)
+}
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ entry.Logf(DebugLevel, format, args...)
+}
+
func (entry *Entry) Infof(format string, args ...interface{}) {
- if entry.Logger.level() >= InfoLevel {
- entry.Info(fmt.Sprintf(format, args...))
- }
+ entry.Logf(InfoLevel, format, args...)
}
func (entry *Entry) Printf(format string, args ...interface{}) {
@@ -190,9 +332,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) {
}
func (entry *Entry) Warnf(format string, args ...interface{}) {
- if entry.Logger.level() >= WarnLevel {
- entry.Warn(fmt.Sprintf(format, args...))
- }
+ entry.Logf(WarnLevel, format, args...)
}
func (entry *Entry) Warningf(format string, args ...interface{}) {
@@ -200,36 +340,36 @@ func (entry *Entry) Warningf(format string, args ...interface{}) {
}
func (entry *Entry) Errorf(format string, args ...interface{}) {
- if entry.Logger.level() >= ErrorLevel {
- entry.Error(fmt.Sprintf(format, args...))
- }
+ entry.Logf(ErrorLevel, format, args...)
}
func (entry *Entry) Fatalf(format string, args ...interface{}) {
- if entry.Logger.level() >= FatalLevel {
- entry.Fatal(fmt.Sprintf(format, args...))
- }
- Exit(1)
+ entry.Logf(FatalLevel, format, args...)
+ entry.Logger.Exit(1)
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
- if entry.Logger.level() >= PanicLevel {
- entry.Panic(fmt.Sprintf(format, args...))
- }
+ entry.Logf(PanicLevel, format, args...)
}
// Entry Println family functions
-func (entry *Entry) Debugln(args ...interface{}) {
- if entry.Logger.level() >= DebugLevel {
- entry.Debug(entry.sprintlnn(args...))
+func (entry *Entry) Logln(level Level, args ...interface{}) {
+ if entry.Logger.IsLevelEnabled(level) {
+ entry.Log(level, entry.sprintlnn(args...))
}
}
+func (entry *Entry) Traceln(args ...interface{}) {
+ entry.Logln(TraceLevel, args...)
+}
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ entry.Logln(DebugLevel, args...)
+}
+
func (entry *Entry) Infoln(args ...interface{}) {
- if entry.Logger.level() >= InfoLevel {
- entry.Info(entry.sprintlnn(args...))
- }
+ entry.Logln(InfoLevel, args...)
}
func (entry *Entry) Println(args ...interface{}) {
@@ -237,9 +377,7 @@ func (entry *Entry) Println(args ...interface{}) {
}
func (entry *Entry) Warnln(args ...interface{}) {
- if entry.Logger.level() >= WarnLevel {
- entry.Warn(entry.sprintlnn(args...))
- }
+ entry.Logln(WarnLevel, args...)
}
func (entry *Entry) Warningln(args ...interface{}) {
@@ -247,22 +385,16 @@ func (entry *Entry) Warningln(args ...interface{}) {
}
func (entry *Entry) Errorln(args ...interface{}) {
- if entry.Logger.level() >= ErrorLevel {
- entry.Error(entry.sprintlnn(args...))
- }
+ entry.Logln(ErrorLevel, args...)
}
func (entry *Entry) Fatalln(args ...interface{}) {
- if entry.Logger.level() >= FatalLevel {
- entry.Fatal(entry.sprintlnn(args...))
- }
- Exit(1)
+ entry.Logln(FatalLevel, args...)
+ entry.Logger.Exit(1)
}
func (entry *Entry) Panicln(args ...interface{}) {
- if entry.Logger.level() >= PanicLevel {
- entry.Panic(entry.sprintlnn(args...))
- }
+ entry.Logln(PanicLevel, args...)
}
// Sprintlnn => Sprint no newline. This is to get the behavior of how
diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go
index 1aeaa90ba..62fc2f219 100644
--- a/vendor/github.com/sirupsen/logrus/exported.go
+++ b/vendor/github.com/sirupsen/logrus/exported.go
@@ -1,7 +1,9 @@
package logrus
import (
+ "context"
"io"
+ "time"
)
var (
@@ -15,37 +17,38 @@ func StandardLogger() *Logger {
// SetOutput sets the standard logger output.
func SetOutput(out io.Writer) {
- std.mu.Lock()
- defer std.mu.Unlock()
- std.Out = out
+ std.SetOutput(out)
}
// SetFormatter sets the standard logger formatter.
func SetFormatter(formatter Formatter) {
- std.mu.Lock()
- defer std.mu.Unlock()
- std.Formatter = formatter
+ std.SetFormatter(formatter)
+}
+
+// SetReportCaller sets whether the standard logger will include the calling
+// method as a field.
+func SetReportCaller(include bool) {
+ std.SetReportCaller(include)
}
// SetLevel sets the standard logger level.
func SetLevel(level Level) {
- std.mu.Lock()
- defer std.mu.Unlock()
- std.setLevel(level)
+ std.SetLevel(level)
}
// GetLevel returns the standard logger level.
func GetLevel() Level {
- std.mu.Lock()
- defer std.mu.Unlock()
- return std.level()
+ return std.GetLevel()
+}
+
+// IsLevelEnabled checks if the log level of the standard logger is greater than the level param
+func IsLevelEnabled(level Level) bool {
+ return std.IsLevelEnabled(level)
}
// AddHook adds a hook to the standard logger hooks.
func AddHook(hook Hook) {
- std.mu.Lock()
- defer std.mu.Unlock()
- std.Hooks.Add(hook)
+ std.AddHook(hook)
}
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
@@ -53,6 +56,11 @@ func WithError(err error) *Entry {
return std.WithField(ErrorKey, err)
}
+// WithContext creates an entry from the standard logger and adds a context to it.
+func WithContext(ctx context.Context) *Entry {
+ return std.WithContext(ctx)
+}
+
// WithField creates an entry from the standard logger and adds a field to
// it. If you want multiple fields, use `WithFields`.
//
@@ -72,6 +80,20 @@ func WithFields(fields Fields) *Entry {
return std.WithFields(fields)
}
+// WithTime creats an entry from the standard logger and overrides the time of
+// logs generated with it.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithTime(t time.Time) *Entry {
+ return std.WithTime(t)
+}
+
+// Trace logs a message at level Trace on the standard logger.
+func Trace(args ...interface{}) {
+ std.Trace(args...)
+}
+
// Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) {
std.Debug(args...)
@@ -107,11 +129,16 @@ func Panic(args ...interface{}) {
std.Panic(args...)
}
-// Fatal logs a message at level Fatal on the standard logger.
+// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
func Fatal(args ...interface{}) {
std.Fatal(args...)
}
+// Tracef logs a message at level Trace on the standard logger.
+func Tracef(format string, args ...interface{}) {
+ std.Tracef(format, args...)
+}
+
// Debugf logs a message at level Debug on the standard logger.
func Debugf(format string, args ...interface{}) {
std.Debugf(format, args...)
@@ -147,11 +174,16 @@ func Panicf(format string, args ...interface{}) {
std.Panicf(format, args...)
}
-// Fatalf logs a message at level Fatal on the standard logger.
+// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
func Fatalf(format string, args ...interface{}) {
std.Fatalf(format, args...)
}
+// Traceln logs a message at level Trace on the standard logger.
+func Traceln(args ...interface{}) {
+ std.Traceln(args...)
+}
+
// Debugln logs a message at level Debug on the standard logger.
func Debugln(args ...interface{}) {
std.Debugln(args...)
@@ -187,7 +219,7 @@ func Panicln(args ...interface{}) {
std.Panicln(args...)
}
-// Fatalln logs a message at level Fatal on the standard logger.
+// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
func Fatalln(args ...interface{}) {
std.Fatalln(args...)
}
diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go
index b5fbe934d..408883773 100644
--- a/vendor/github.com/sirupsen/logrus/formatter.go
+++ b/vendor/github.com/sirupsen/logrus/formatter.go
@@ -2,7 +2,16 @@ package logrus
import "time"
-const DefaultTimestampFormat = time.RFC3339
+// Default key names for the default fields
+const (
+ defaultTimestampFormat = time.RFC3339
+ FieldKeyMsg = "msg"
+ FieldKeyLevel = "level"
+ FieldKeyTime = "time"
+ FieldKeyLogrusError = "logrus_error"
+ FieldKeyFunc = "func"
+ FieldKeyFile = "file"
+)
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:
@@ -18,7 +27,7 @@ type Formatter interface {
Format(*Entry) ([]byte, error)
}
-// This is to not silently overwrite `time`, `msg` and `level` fields when
+// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when
// dumping it. If this code wasn't there doing:
//
// logrus.WithField("level", 1).Info("hello")
@@ -30,16 +39,40 @@ type Formatter interface {
//
// It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters.
-func prefixFieldClashes(data Fields) {
- if t, ok := data["time"]; ok {
- data["fields.time"] = t
+func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) {
+ timeKey := fieldMap.resolve(FieldKeyTime)
+ if t, ok := data[timeKey]; ok {
+ data["fields."+timeKey] = t
+ delete(data, timeKey)
}
- if m, ok := data["msg"]; ok {
- data["fields.msg"] = m
+ msgKey := fieldMap.resolve(FieldKeyMsg)
+ if m, ok := data[msgKey]; ok {
+ data["fields."+msgKey] = m
+ delete(data, msgKey)
}
- if l, ok := data["level"]; ok {
- data["fields.level"] = l
+ levelKey := fieldMap.resolve(FieldKeyLevel)
+ if l, ok := data[levelKey]; ok {
+ data["fields."+levelKey] = l
+ delete(data, levelKey)
+ }
+
+ logrusErrKey := fieldMap.resolve(FieldKeyLogrusError)
+ if l, ok := data[logrusErrKey]; ok {
+ data["fields."+logrusErrKey] = l
+ delete(data, logrusErrKey)
+ }
+
+ // If reportCaller is not set, 'func' will not conflict.
+ if reportCaller {
+ funcKey := fieldMap.resolve(FieldKeyFunc)
+ if l, ok := data[funcKey]; ok {
+ data["fields."+funcKey] = l
+ }
+ fileKey := fieldMap.resolve(FieldKeyFile)
+ if l, ok := data[fileKey]; ok {
+ data["fields."+fileKey] = l
+ }
}
}
diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod
new file mode 100644
index 000000000..12fdf9898
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/go.mod
@@ -0,0 +1,10 @@
+module github.com/sirupsen/logrus
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/konsorten/go-windows-terminal-sequences v1.0.1
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/stretchr/objx v0.1.1 // indirect
+ github.com/stretchr/testify v1.2.2
+ golang.org/x/sys v0.0.0-20190422165155-953cdadca894
+)
diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum
new file mode 100644
index 000000000..596c318b9
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/go.sum
@@ -0,0 +1,16 @@
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs=
+github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/sirupsen/logrus/hooks/syslog/README.md b/vendor/github.com/sirupsen/logrus/hooks/syslog/README.md
index 92b391c17..1bbc0f72d 100644
--- a/vendor/github.com/sirupsen/logrus/hooks/syslog/README.md
+++ b/vendor/github.com/sirupsen/logrus/hooks/syslog/README.md
@@ -6,12 +6,12 @@
import (
"log/syslog"
"github.com/sirupsen/logrus"
- logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
+ lSyslog "github.com/sirupsen/logrus/hooks/syslog"
)
func main() {
log := logrus.New()
- hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ hook, err := lSyslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err == nil {
log.Hooks.Add(hook)
@@ -25,12 +25,12 @@ If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "
import (
"log/syslog"
"github.com/sirupsen/logrus"
- logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
+ lSyslog "github.com/sirupsen/logrus/hooks/syslog"
)
func main() {
log := logrus.New()
- hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_INFO, "")
+ hook, err := lSyslog.NewSyslogHook("", "", syslog.LOG_INFO, "")
if err == nil {
log.Hooks.Add(hook)
diff --git a/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go b/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go
index 204f0016d..02b8df380 100644
--- a/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go
+++ b/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go
@@ -1,12 +1,13 @@
// +build !windows,!nacl,!plan9
-package logrus_syslog
+package syslog
import (
"fmt"
- "github.com/sirupsen/logrus"
"log/syslog"
"os"
+
+ "github.com/sirupsen/logrus"
)
// SyslogHook to send logs via syslog.
@@ -42,7 +43,7 @@ func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
return hook.Writer.Warning(line)
case logrus.InfoLevel:
return hook.Writer.Info(line)
- case logrus.DebugLevel:
+ case logrus.DebugLevel, logrus.TraceLevel:
return hook.Writer.Debug(line)
default:
return nil
diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go
index e787ea175..098a21a06 100644
--- a/vendor/github.com/sirupsen/logrus/json_formatter.go
+++ b/vendor/github.com/sirupsen/logrus/json_formatter.go
@@ -1,18 +1,16 @@
package logrus
import (
+ "bytes"
"encoding/json"
"fmt"
+ "runtime"
)
type fieldKey string
-type FieldMap map[fieldKey]string
-const (
- FieldKeyMsg = "msg"
- FieldKeyLevel = "level"
- FieldKeyTime = "time"
-)
+// FieldMap allows customization of the key names for default fields.
+type FieldMap map[fieldKey]string
func (f FieldMap) resolve(key fieldKey) string {
if k, ok := f[key]; ok {
@@ -22,6 +20,7 @@ func (f FieldMap) resolve(key fieldKey) string {
return string(key)
}
+// JSONFormatter formats logs into parsable json
type JSONFormatter struct {
// TimestampFormat sets the format used for marshaling timestamps.
TimestampFormat string
@@ -29,20 +28,34 @@ type JSONFormatter struct {
// DisableTimestamp allows disabling automatic timestamps in output
DisableTimestamp bool
- // FieldMap allows users to customize the names of keys for various fields.
+ // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
+ DataKey string
+
+ // FieldMap allows users to customize the names of keys for default fields.
// As an example:
// formatter := &JSONFormatter{
// FieldMap: FieldMap{
- // FieldKeyTime: "@timestamp",
+ // FieldKeyTime: "@timestamp",
// FieldKeyLevel: "@level",
- // FieldKeyMsg: "@message",
+ // FieldKeyMsg: "@message",
+ // FieldKeyFunc: "@caller",
// },
// }
FieldMap FieldMap
+
+ // CallerPrettyfier can be set by the user to modify the content
+ // of the function and file keys in the json data when ReportCaller is
+ // activated. If any of the returned value is the empty string the
+ // corresponding key will be removed from json fields.
+ CallerPrettyfier func(*runtime.Frame) (function string, file string)
+
+ // PrettyPrint will indent all json logs
+ PrettyPrint bool
}
+// Format renders a single log entry
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
- data := make(Fields, len(entry.Data)+3)
+ data := make(Fields, len(entry.Data)+4)
for k, v := range entry.Data {
switch v := v.(type) {
case error:
@@ -53,22 +66,56 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data[k] = v
}
}
- prefixFieldClashes(data)
+
+ if f.DataKey != "" {
+ newData := make(Fields, 4)
+ newData[f.DataKey] = data
+ data = newData
+ }
+
+ prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
- timestampFormat = DefaultTimestampFormat
+ timestampFormat = defaultTimestampFormat
}
+ if entry.err != "" {
+ data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err
+ }
if !f.DisableTimestamp {
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
}
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
+ if entry.HasCaller() {
+ funcVal := entry.Caller.Function
+ fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
+ if f.CallerPrettyfier != nil {
+ funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
+ }
+ if funcVal != "" {
+ data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal
+ }
+ if fileVal != "" {
+ data[f.FieldMap.resolve(FieldKeyFile)] = fileVal
+ }
+ }
- serialized, err := json.Marshal(data)
- if err != nil {
- return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ var b *bytes.Buffer
+ if entry.Buffer != nil {
+ b = entry.Buffer
+ } else {
+ b = &bytes.Buffer{}
}
- return append(serialized, '\n'), nil
+
+ encoder := json.NewEncoder(b)
+ if f.PrettyPrint {
+ encoder.SetIndent("", " ")
+ }
+ if err := encoder.Encode(data); err != nil {
+ return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err)
+ }
+
+ return b.Bytes(), nil
}
diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
index 370fff5d1..c0c0b1e55 100644
--- a/vendor/github.com/sirupsen/logrus/logger.go
+++ b/vendor/github.com/sirupsen/logrus/logger.go
@@ -1,16 +1,18 @@
package logrus
import (
+ "context"
"io"
"os"
"sync"
"sync/atomic"
+ "time"
)
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
// file, or leave it default which is `os.Stderr`. You can also set this to
- // something more adventorous, such as logging to Kafka.
+ // something more adventurous, such as logging to Kafka.
Out io.Writer
// Hooks for the logger instance. These allow firing events based on logging
// levels and log entries. For example, to send errors to an error tracking
@@ -23,16 +25,24 @@ type Logger struct {
// own that implements the `Formatter` interface, see the `README` or included
// formatters for examples.
Formatter Formatter
+
+ // Flag for whether to log caller info (off by default)
+ ReportCaller bool
+
// The logging level the logger should log at. This is typically (and defaults
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
- // logged. `logrus.Debug` is useful in
+ // logged.
Level Level
// Used to sync writing to the log. Locking is enabled by Default
mu MutexWrap
// Reusable empty entry
entryPool sync.Pool
+ // Function to exit the application, defaults to `os.Exit()`
+ ExitFunc exitFunc
}
+type exitFunc func(int)
+
type MutexWrap struct {
lock sync.Mutex
disabled bool
@@ -68,10 +78,12 @@ func (mw *MutexWrap) Disable() {
// It's recommended to make this a global instance called `log`.
func New() *Logger {
return &Logger{
- Out: os.Stderr,
- Formatter: new(TextFormatter),
- Hooks: make(LevelHooks),
- Level: InfoLevel,
+ Out: os.Stderr,
+ Formatter: new(TextFormatter),
+ Hooks: make(LevelHooks),
+ Level: InfoLevel,
+ ExitFunc: os.Exit,
+ ReportCaller: false,
}
}
@@ -84,11 +96,12 @@ func (logger *Logger) newEntry() *Entry {
}
func (logger *Logger) releaseEntry(entry *Entry) {
+ entry.Data = map[string]interface{}{}
logger.entryPool.Put(entry)
}
// Adds a field to the log entry, note that it doesn't log until you call
-// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry.
// If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
entry := logger.newEntry()
@@ -112,20 +125,38 @@ func (logger *Logger) WithError(err error) *Entry {
return entry.WithError(err)
}
-func (logger *Logger) Debugf(format string, args ...interface{}) {
- if logger.level() >= DebugLevel {
+// Add a context to the log entry.
+func (logger *Logger) WithContext(ctx context.Context) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithContext(ctx)
+}
+
+// Overrides the time of the log entry.
+func (logger *Logger) WithTime(t time.Time) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithTime(t)
+}
+
+func (logger *Logger) Logf(level Level, format string, args ...interface{}) {
+ if logger.IsLevelEnabled(level) {
entry := logger.newEntry()
- entry.Debugf(format, args...)
+ entry.Logf(level, format, args...)
logger.releaseEntry(entry)
}
}
+func (logger *Logger) Tracef(format string, args ...interface{}) {
+ logger.Logf(TraceLevel, format, args...)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ logger.Logf(DebugLevel, format, args...)
+}
+
func (logger *Logger) Infof(format string, args ...interface{}) {
- if logger.level() >= InfoLevel {
- entry := logger.newEntry()
- entry.Infof(format, args...)
- logger.releaseEntry(entry)
- }
+ logger.Logf(InfoLevel, format, args...)
}
func (logger *Logger) Printf(format string, args ...interface{}) {
@@ -135,123 +166,91 @@ func (logger *Logger) Printf(format string, args ...interface{}) {
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
- if logger.level() >= WarnLevel {
- entry := logger.newEntry()
- entry.Warnf(format, args...)
- logger.releaseEntry(entry)
- }
+ logger.Logf(WarnLevel, format, args...)
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
- if logger.level() >= WarnLevel {
- entry := logger.newEntry()
- entry.Warnf(format, args...)
- logger.releaseEntry(entry)
- }
+ logger.Warnf(format, args...)
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
- if logger.level() >= ErrorLevel {
- entry := logger.newEntry()
- entry.Errorf(format, args...)
- logger.releaseEntry(entry)
- }
+ logger.Logf(ErrorLevel, format, args...)
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
- if logger.level() >= FatalLevel {
- entry := logger.newEntry()
- entry.Fatalf(format, args...)
- logger.releaseEntry(entry)
- }
- Exit(1)
+ logger.Logf(FatalLevel, format, args...)
+ logger.Exit(1)
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
- if logger.level() >= PanicLevel {
+ logger.Logf(PanicLevel, format, args...)
+}
+
+func (logger *Logger) Log(level Level, args ...interface{}) {
+ if logger.IsLevelEnabled(level) {
entry := logger.newEntry()
- entry.Panicf(format, args...)
+ entry.Log(level, args...)
logger.releaseEntry(entry)
}
}
+func (logger *Logger) Trace(args ...interface{}) {
+ logger.Log(TraceLevel, args...)
+}
+
func (logger *Logger) Debug(args ...interface{}) {
- if logger.level() >= DebugLevel {
- entry := logger.newEntry()
- entry.Debug(args...)
- logger.releaseEntry(entry)
- }
+ logger.Log(DebugLevel, args...)
}
func (logger *Logger) Info(args ...interface{}) {
- if logger.level() >= InfoLevel {
- entry := logger.newEntry()
- entry.Info(args...)
- logger.releaseEntry(entry)
- }
+ logger.Log(InfoLevel, args...)
}
func (logger *Logger) Print(args ...interface{}) {
entry := logger.newEntry()
- entry.Info(args...)
+ entry.Print(args...)
logger.releaseEntry(entry)
}
func (logger *Logger) Warn(args ...interface{}) {
- if logger.level() >= WarnLevel {
- entry := logger.newEntry()
- entry.Warn(args...)
- logger.releaseEntry(entry)
- }
+ logger.Log(WarnLevel, args...)
}
func (logger *Logger) Warning(args ...interface{}) {
- if logger.level() >= WarnLevel {
- entry := logger.newEntry()
- entry.Warn(args...)
- logger.releaseEntry(entry)
- }
+ logger.Warn(args...)
}
func (logger *Logger) Error(args ...interface{}) {
- if logger.level() >= ErrorLevel {
- entry := logger.newEntry()
- entry.Error(args...)
- logger.releaseEntry(entry)
- }
+ logger.Log(ErrorLevel, args...)
}
func (logger *Logger) Fatal(args ...interface{}) {
- if logger.level() >= FatalLevel {
- entry := logger.newEntry()
- entry.Fatal(args...)
- logger.releaseEntry(entry)
- }
- Exit(1)
+ logger.Log(FatalLevel, args...)
+ logger.Exit(1)
}
func (logger *Logger) Panic(args ...interface{}) {
- if logger.level() >= PanicLevel {
+ logger.Log(PanicLevel, args...)
+}
+
+func (logger *Logger) Logln(level Level, args ...interface{}) {
+ if logger.IsLevelEnabled(level) {
entry := logger.newEntry()
- entry.Panic(args...)
+ entry.Logln(level, args...)
logger.releaseEntry(entry)
}
}
+func (logger *Logger) Traceln(args ...interface{}) {
+ logger.Logln(TraceLevel, args...)
+}
+
func (logger *Logger) Debugln(args ...interface{}) {
- if logger.level() >= DebugLevel {
- entry := logger.newEntry()
- entry.Debugln(args...)
- logger.releaseEntry(entry)
- }
+ logger.Logln(DebugLevel, args...)
}
func (logger *Logger) Infoln(args ...interface{}) {
- if logger.level() >= InfoLevel {
- entry := logger.newEntry()
- entry.Infoln(args...)
- logger.releaseEntry(entry)
- }
+ logger.Logln(InfoLevel, args...)
}
func (logger *Logger) Println(args ...interface{}) {
@@ -261,44 +260,32 @@ func (logger *Logger) Println(args ...interface{}) {
}
func (logger *Logger) Warnln(args ...interface{}) {
- if logger.level() >= WarnLevel {
- entry := logger.newEntry()
- entry.Warnln(args...)
- logger.releaseEntry(entry)
- }
+ logger.Logln(WarnLevel, args...)
}
func (logger *Logger) Warningln(args ...interface{}) {
- if logger.level() >= WarnLevel {
- entry := logger.newEntry()
- entry.Warnln(args...)
- logger.releaseEntry(entry)
- }
+ logger.Warnln(args...)
}
func (logger *Logger) Errorln(args ...interface{}) {
- if logger.level() >= ErrorLevel {
- entry := logger.newEntry()
- entry.Errorln(args...)
- logger.releaseEntry(entry)
- }
+ logger.Logln(ErrorLevel, args...)
}
func (logger *Logger) Fatalln(args ...interface{}) {
- if logger.level() >= FatalLevel {
- entry := logger.newEntry()
- entry.Fatalln(args...)
- logger.releaseEntry(entry)
- }
- Exit(1)
+ logger.Logln(FatalLevel, args...)
+ logger.Exit(1)
}
func (logger *Logger) Panicln(args ...interface{}) {
- if logger.level() >= PanicLevel {
- entry := logger.newEntry()
- entry.Panicln(args...)
- logger.releaseEntry(entry)
+ logger.Logln(PanicLevel, args...)
+}
+
+func (logger *Logger) Exit(code int) {
+ runHandlers()
+ if logger.ExitFunc == nil {
+ logger.ExitFunc = os.Exit
}
+ logger.ExitFunc(code)
}
//When file is opened with appending mode, it's safe to
@@ -312,6 +299,53 @@ func (logger *Logger) level() Level {
return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
}
-func (logger *Logger) setLevel(level Level) {
+// SetLevel sets the logger level.
+func (logger *Logger) SetLevel(level Level) {
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
}
+
+// GetLevel returns the logger level.
+func (logger *Logger) GetLevel() Level {
+ return logger.level()
+}
+
+// AddHook adds a hook to the logger hooks.
+func (logger *Logger) AddHook(hook Hook) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.Hooks.Add(hook)
+}
+
+// IsLevelEnabled checks if the log level of the logger is greater than the level param
+func (logger *Logger) IsLevelEnabled(level Level) bool {
+ return logger.level() >= level
+}
+
+// SetFormatter sets the logger formatter.
+func (logger *Logger) SetFormatter(formatter Formatter) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.Formatter = formatter
+}
+
+// SetOutput sets the logger output.
+func (logger *Logger) SetOutput(output io.Writer) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.Out = output
+}
+
+func (logger *Logger) SetReportCaller(reportCaller bool) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.ReportCaller = reportCaller
+}
+
+// ReplaceHooks replaces the logger hooks and returns the old ones
+func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks {
+ logger.mu.Lock()
+ oldHooks := logger.Hooks
+ logger.Hooks = hooks
+ logger.mu.Unlock()
+ return oldHooks
+}
diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go
index dd3899974..8644761f7 100644
--- a/vendor/github.com/sirupsen/logrus/logrus.go
+++ b/vendor/github.com/sirupsen/logrus/logrus.go
@@ -14,22 +14,11 @@ type Level uint32
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
func (level Level) String() string {
- switch level {
- case DebugLevel:
- return "debug"
- case InfoLevel:
- return "info"
- case WarnLevel:
- return "warning"
- case ErrorLevel:
- return "error"
- case FatalLevel:
- return "fatal"
- case PanicLevel:
- return "panic"
+ if b, err := level.MarshalText(); err == nil {
+ return string(b)
+ } else {
+ return "unknown"
}
-
- return "unknown"
}
// ParseLevel takes a string level and returns the Logrus log level constant.
@@ -47,12 +36,47 @@ func ParseLevel(lvl string) (Level, error) {
return InfoLevel, nil
case "debug":
return DebugLevel, nil
+ case "trace":
+ return TraceLevel, nil
}
var l Level
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
}
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (level *Level) UnmarshalText(text []byte) error {
+ l, err := ParseLevel(string(text))
+ if err != nil {
+ return err
+ }
+
+ *level = Level(l)
+
+ return nil
+}
+
+func (level Level) MarshalText() ([]byte, error) {
+ switch level {
+ case TraceLevel:
+ return []byte("trace"), nil
+ case DebugLevel:
+ return []byte("debug"), nil
+ case InfoLevel:
+ return []byte("info"), nil
+ case WarnLevel:
+ return []byte("warning"), nil
+ case ErrorLevel:
+ return []byte("error"), nil
+ case FatalLevel:
+ return []byte("fatal"), nil
+ case PanicLevel:
+ return []byte("panic"), nil
+ }
+
+ return nil, fmt.Errorf("not a valid logrus level %d", level)
+}
+
// A constant exposing all logging levels
var AllLevels = []Level{
PanicLevel,
@@ -61,6 +85,7 @@ var AllLevels = []Level{
WarnLevel,
InfoLevel,
DebugLevel,
+ TraceLevel,
}
// These are the different logging levels. You can set the logging level to log
@@ -69,7 +94,7 @@ const (
// PanicLevel level, highest level of severity. Logs and then calls panic with the
// message passed to Debug, Info, ...
PanicLevel Level = iota
- // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+ // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the
// logging level is set to Panic.
FatalLevel
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
@@ -82,6 +107,8 @@ const (
InfoLevel
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
DebugLevel
+ // TraceLevel level. Designates finer-grained informational events than the Debug.
+ TraceLevel
)
// Won't compile if StdLogger can't be realized by a log.Logger
@@ -140,4 +167,20 @@ type FieldLogger interface {
Errorln(args ...interface{})
Fatalln(args ...interface{})
Panicln(args ...interface{})
+
+ // IsDebugEnabled() bool
+ // IsInfoEnabled() bool
+ // IsWarnEnabled() bool
+ // IsErrorEnabled() bool
+ // IsFatalEnabled() bool
+ // IsPanicEnabled() bool
+}
+
+// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is
+// here for consistancy. Do not use. Use Logger or Entry instead.
+type Ext1FieldLogger interface {
+ FieldLogger
+ Tracef(format string, args ...interface{})
+ Trace(args ...interface{})
+ Traceln(args ...interface{})
}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_appengine.go
deleted file mode 100644
index e011a8694..000000000
--- a/vendor/github.com/sirupsen/logrus/terminal_appengine.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build appengine
-
-package logrus
-
-import "io"
-
-// IsTerminal returns true if stderr's file descriptor is a terminal.
-func IsTerminal(f io.Writer) bool {
- return true
-}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_bsd.go
deleted file mode 100644
index 5f6be4d3c..000000000
--- a/vendor/github.com/sirupsen/logrus/terminal_bsd.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build darwin freebsd openbsd netbsd dragonfly
-// +build !appengine
-
-package logrus
-
-import "syscall"
-
-const ioctlReadTermios = syscall.TIOCGETA
-
-type Termios syscall.Termios
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
new file mode 100644
index 000000000..2403de981
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
@@ -0,0 +1,11 @@
+// +build appengine
+
+package logrus
+
+import (
+ "io"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+ return true
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
new file mode 100644
index 000000000..3c4f43f91
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
@@ -0,0 +1,13 @@
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package logrus
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TIOCGETA
+
+func isTerminal(fd int) bool {
+ _, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
+ return err == nil
+}
+
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
new file mode 100644
index 000000000..97af92c68
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
@@ -0,0 +1,11 @@
+// +build js nacl plan9
+
+package logrus
+
+import (
+ "io"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+ return false
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
new file mode 100644
index 000000000..3293fb3ca
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
@@ -0,0 +1,17 @@
+// +build !appengine,!js,!windows,!nacl,!plan9
+
+package logrus
+
+import (
+ "io"
+ "os"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+ switch v := w.(type) {
+ case *os.File:
+ return isTerminal(int(v.Fd()))
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
new file mode 100644
index 000000000..f6710b3bd
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
@@ -0,0 +1,11 @@
+package logrus
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func isTerminal(fd int) bool {
+ _, err := unix.IoctlGetTermio(fd, unix.TCGETA)
+ return err == nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
new file mode 100644
index 000000000..355dc966f
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
@@ -0,0 +1,13 @@
+// +build linux aix
+
+package logrus
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TCGETS
+
+func isTerminal(fd int) bool {
+ _, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
+ return err == nil
+}
+
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
new file mode 100644
index 000000000..572889db2
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
@@ -0,0 +1,34 @@
+// +build !appengine,!js,windows
+
+package logrus
+
+import (
+ "io"
+ "os"
+ "syscall"
+
+ sequences "github.com/konsorten/go-windows-terminal-sequences"
+)
+
+func initTerminal(w io.Writer) {
+ switch v := w.(type) {
+ case *os.File:
+ sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true)
+ }
+}
+
+func checkIfTerminal(w io.Writer) bool {
+ var ret bool
+ switch v := w.(type) {
+ case *os.File:
+ var mode uint32
+ err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode)
+ ret = (err == nil)
+ default:
+ ret = false
+ }
+ if ret {
+ initTerminal(w)
+ }
+ return ret
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_linux.go b/vendor/github.com/sirupsen/logrus/terminal_linux.go
deleted file mode 100644
index 308160ca8..000000000
--- a/vendor/github.com/sirupsen/logrus/terminal_linux.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Based on ssh/terminal:
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-
-package logrus
-
-import "syscall"
-
-const ioctlReadTermios = syscall.TCGETS
-
-type Termios syscall.Termios
diff --git a/vendor/github.com/sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/sirupsen/logrus/terminal_notwindows.go
deleted file mode 100644
index 190297abf..000000000
--- a/vendor/github.com/sirupsen/logrus/terminal_notwindows.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Based on ssh/terminal:
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux darwin freebsd openbsd netbsd dragonfly
-// +build !appengine
-
-package logrus
-
-import (
- "io"
- "os"
- "syscall"
- "unsafe"
-)
-
-// IsTerminal returns true if stderr's file descriptor is a terminal.
-func IsTerminal(f io.Writer) bool {
- var termios Termios
- switch v := f.(type) {
- case *os.File:
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
- default:
- return false
- }
-}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_solaris.go
deleted file mode 100644
index 3c86b1abe..000000000
--- a/vendor/github.com/sirupsen/logrus/terminal_solaris.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// +build solaris,!appengine
-
-package logrus
-
-import (
- "io"
- "os"
-
- "golang.org/x/sys/unix"
-)
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-func IsTerminal(f io.Writer) bool {
- switch v := f.(type) {
- case *os.File:
- _, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA)
- return err == nil
- default:
- return false
- }
-}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_windows.go b/vendor/github.com/sirupsen/logrus/terminal_windows.go
deleted file mode 100644
index 7a336307e..000000000
--- a/vendor/github.com/sirupsen/logrus/terminal_windows.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Based on ssh/terminal:
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows,!appengine
-
-package logrus
-
-import (
- "bytes"
- "errors"
- "io"
- "os"
- "os/exec"
- "strconv"
- "strings"
- "syscall"
- "unsafe"
-)
-
-var kernel32 = syscall.NewLazyDLL("kernel32.dll")
-
-var (
- procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
- procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
-)
-
-const (
- enableProcessedOutput = 0x0001
- enableWrapAtEolOutput = 0x0002
- enableVirtualTerminalProcessing = 0x0004
-)
-
-func getVersion() (float64, error) {
- stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
- cmd := exec.Command("cmd", "ver")
- cmd.Stdout = stdout
- cmd.Stderr = stderr
- err := cmd.Run()
- if err != nil {
- return -1, err
- }
-
- // The output should be like "Microsoft Windows [Version XX.X.XXXXXX]"
- version := strings.Replace(stdout.String(), "\n", "", -1)
- version = strings.Replace(version, "\r\n", "", -1)
-
- x1 := strings.Index(version, "[Version")
-
- if x1 == -1 || strings.Index(version, "]") == -1 {
- return -1, errors.New("Can't determine Windows version")
- }
-
- return strconv.ParseFloat(version[x1+9:x1+13], 64)
-}
-
-func init() {
- ver, err := getVersion()
- if err != nil {
- return
- }
-
- // Activate Virtual Processing for Windows CMD
- // Info: https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
- if ver >= 10 {
- handle := syscall.Handle(os.Stderr.Fd())
- procSetConsoleMode.Call(uintptr(handle), enableProcessedOutput|enableWrapAtEolOutput|enableVirtualTerminalProcessing)
- }
-}
-
-// IsTerminal returns true if stderr's file descriptor is a terminal.
-func IsTerminal(f io.Writer) bool {
- switch v := f.(type) {
- case *os.File:
- var st uint32
- r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0)
- return r != 0 && e == 0
- default:
- return false
- }
-}
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
index ba8885406..e01587c43 100644
--- a/vendor/github.com/sirupsen/logrus/text_formatter.go
+++ b/vendor/github.com/sirupsen/logrus/text_formatter.go
@@ -3,6 +3,8 @@ package logrus
import (
"bytes"
"fmt"
+ "os"
+ "runtime"
"sort"
"strings"
"sync"
@@ -10,22 +12,19 @@ import (
)
const (
- nocolor = 0
- red = 31
- green = 32
- yellow = 33
- blue = 34
- gray = 37
+ red = 31
+ yellow = 33
+ blue = 36
+ gray = 37
)
-var (
- baseTimestamp time.Time
-)
+var baseTimestamp time.Time
func init() {
baseTimestamp = time.Now()
}
+// TextFormatter formats logs into text
type TextFormatter struct {
// Set to true to bypass checking for a TTY before outputting colors.
ForceColors bool
@@ -33,6 +32,9 @@ type TextFormatter struct {
// Force disabling colors.
DisableColors bool
+ // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/
+ EnvironmentOverrideColors bool
+
// Disable timestamp logging. useful when output is redirected to logging
// system that already adds timestamps.
DisableTimestamp bool
@@ -49,66 +51,151 @@ type TextFormatter struct {
// be desired.
DisableSorting bool
+ // The keys sorting function, when uninitialized it uses sort.Strings.
+ SortingFunc func([]string)
+
+ // Disables the truncation of the level text to 4 characters.
+ DisableLevelTruncation bool
+
// QuoteEmptyFields will wrap empty fields in quotes if true
QuoteEmptyFields bool
- // QuoteCharacter can be set to the override the default quoting character "
- // with something else. For example: ', or `.
- QuoteCharacter string
-
// Whether the logger's out is to a terminal
isTerminal bool
- sync.Once
+ // FieldMap allows users to customize the names of keys for default fields.
+ // As an example:
+ // formatter := &TextFormatter{
+ // FieldMap: FieldMap{
+ // FieldKeyTime: "@timestamp",
+ // FieldKeyLevel: "@level",
+ // FieldKeyMsg: "@message"}}
+ FieldMap FieldMap
+
+ // CallerPrettyfier can be set by the user to modify the content
+ // of the function and file keys in the data when ReportCaller is
+ // activated. If any of the returned value is the empty string the
+ // corresponding key will be removed from fields.
+ CallerPrettyfier func(*runtime.Frame) (function string, file string)
+
+ terminalInitOnce sync.Once
}
func (f *TextFormatter) init(entry *Entry) {
- if len(f.QuoteCharacter) == 0 {
- f.QuoteCharacter = "\""
- }
if entry.Logger != nil {
- f.isTerminal = IsTerminal(entry.Logger.Out)
+ f.isTerminal = checkIfTerminal(entry.Logger.Out)
}
}
+func (f *TextFormatter) isColored() bool {
+ isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows"))
+
+ if f.EnvironmentOverrideColors {
+ if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" {
+ isColored = true
+ } else if ok && force == "0" {
+ isColored = false
+ } else if os.Getenv("CLICOLOR") == "0" {
+ isColored = false
+ }
+ }
+
+ return isColored && !f.DisableColors
+}
+
+// Format renders a single log entry
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
- var b *bytes.Buffer
- keys := make([]string, 0, len(entry.Data))
- for k := range entry.Data {
+ data := make(Fields)
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
+ keys := make([]string, 0, len(data))
+ for k := range data {
keys = append(keys, k)
}
+ var funcVal, fileVal string
+
+ fixedKeys := make([]string, 0, 4+len(data))
+ if !f.DisableTimestamp {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
+ }
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel))
+ if entry.Message != "" {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
+ }
+ if entry.err != "" {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError))
+ }
+ if entry.HasCaller() {
+ if f.CallerPrettyfier != nil {
+ funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
+ } else {
+ funcVal = entry.Caller.Function
+ fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
+ }
+
+ if funcVal != "" {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc))
+ }
+ if fileVal != "" {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile))
+ }
+ }
+
if !f.DisableSorting {
- sort.Strings(keys)
+ if f.SortingFunc == nil {
+ sort.Strings(keys)
+ fixedKeys = append(fixedKeys, keys...)
+ } else {
+ if !f.isColored() {
+ fixedKeys = append(fixedKeys, keys...)
+ f.SortingFunc(fixedKeys)
+ } else {
+ f.SortingFunc(keys)
+ }
+ }
+ } else {
+ fixedKeys = append(fixedKeys, keys...)
}
+
+ var b *bytes.Buffer
if entry.Buffer != nil {
b = entry.Buffer
} else {
b = &bytes.Buffer{}
}
- prefixFieldClashes(entry.Data)
-
- f.Do(func() { f.init(entry) })
-
- isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
+ f.terminalInitOnce.Do(func() { f.init(entry) })
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
- timestampFormat = DefaultTimestampFormat
+ timestampFormat = defaultTimestampFormat
}
- if isColored {
- f.printColored(b, entry, keys, timestampFormat)
+ if f.isColored() {
+ f.printColored(b, entry, keys, data, timestampFormat)
} else {
- if !f.DisableTimestamp {
- f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
- }
- f.appendKeyValue(b, "level", entry.Level.String())
- if entry.Message != "" {
- f.appendKeyValue(b, "msg", entry.Message)
- }
- for _, key := range keys {
- f.appendKeyValue(b, key, entry.Data[key])
+
+ for _, key := range fixedKeys {
+ var value interface{}
+ switch {
+ case key == f.FieldMap.resolve(FieldKeyTime):
+ value = entry.Time.Format(timestampFormat)
+ case key == f.FieldMap.resolve(FieldKeyLevel):
+ value = entry.Level.String()
+ case key == f.FieldMap.resolve(FieldKeyMsg):
+ value = entry.Message
+ case key == f.FieldMap.resolve(FieldKeyLogrusError):
+ value = entry.err
+ case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller():
+ value = funcVal
+ case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller():
+ value = fileVal
+ default:
+ value = data[key]
+ }
+ f.appendKeyValue(b, key, value)
}
}
@@ -116,10 +203,10 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
return b.Bytes(), nil
}
-func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) {
var levelColor int
switch entry.Level {
- case DebugLevel:
+ case DebugLevel, TraceLevel:
levelColor = gray
case WarnLevel:
levelColor = yellow
@@ -129,17 +216,42 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
levelColor = blue
}
- levelText := strings.ToUpper(entry.Level.String())[0:4]
+ levelText := strings.ToUpper(entry.Level.String())
+ if !f.DisableLevelTruncation {
+ levelText = levelText[0:4]
+ }
+
+ // Remove a single newline if it already exists in the message to keep
+ // the behavior of logrus text_formatter the same as the stdlib log package
+ entry.Message = strings.TrimSuffix(entry.Message, "\n")
+
+ caller := ""
+ if entry.HasCaller() {
+ funcVal := fmt.Sprintf("%s()", entry.Caller.Function)
+ fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
+
+ if f.CallerPrettyfier != nil {
+ funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
+ }
+
+ if fileVal == "" {
+ caller = funcVal
+ } else if funcVal == "" {
+ caller = fileVal
+ } else {
+ caller = fileVal + " " + funcVal
+ }
+ }
if f.DisableTimestamp {
- fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message)
} else if !f.FullTimestamp {
- fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message)
} else {
- fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message)
}
for _, k := range keys {
- v := entry.Data[k]
+ v := data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
f.appendValue(b, v)
}
@@ -153,7 +265,7 @@ func (f *TextFormatter) needsQuoting(text string) bool {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') ||
- ch == '-' || ch == '.') {
+ ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
return true
}
}
@@ -161,29 +273,23 @@ func (f *TextFormatter) needsQuoting(text string) bool {
}
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
-
+ if b.Len() > 0 {
+ b.WriteByte(' ')
+ }
b.WriteString(key)
b.WriteByte('=')
f.appendValue(b, value)
- b.WriteByte(' ')
}
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
- switch value := value.(type) {
- case string:
- if !f.needsQuoting(value) {
- b.WriteString(value)
- } else {
- fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter)
- }
- case error:
- errmsg := value.Error()
- if !f.needsQuoting(errmsg) {
- b.WriteString(errmsg)
- } else {
- fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter)
- }
- default:
- fmt.Fprint(b, value)
+ stringVal, ok := value.(string)
+ if !ok {
+ stringVal = fmt.Sprint(value)
+ }
+
+ if !f.needsQuoting(stringVal) {
+ b.WriteString(stringVal)
+ } else {
+ b.WriteString(fmt.Sprintf("%q", stringVal))
}
}
diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go
index 7bdebedc6..9e1f75135 100644
--- a/vendor/github.com/sirupsen/logrus/writer.go
+++ b/vendor/github.com/sirupsen/logrus/writer.go
@@ -24,6 +24,8 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
var printFunc func(args ...interface{})
switch level {
+ case TraceLevel:
+ printFunc = entry.Trace
case DebugLevel:
printFunc = entry.Debug
case InfoLevel:
diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore
new file mode 100644
index 000000000..3b053c59e
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.gitignore
@@ -0,0 +1,38 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore
+# swap
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+# session
+Session.vim
+# temporary
+.netrwhist
+*~
+# auto-generated tag files
+tags
+
+*.exe
+
+cobra.test
+
+.idea/*
diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap
new file mode 100644
index 000000000..94ec53068
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.mailmap
@@ -0,0 +1,3 @@
+Steve Francia <steve.francia@gmail.com>
+Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
+Fabiano Franz <ffranz@redhat.com> <contact@fabianofranz.com>
diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml
new file mode 100644
index 000000000..38b85f499
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.travis.yml
@@ -0,0 +1,31 @@
+language: go
+
+stages:
+ - diff
+ - test
+
+go:
+ - 1.10.x
+ - 1.11.x
+ - 1.12.x
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+ include:
+ - stage: diff
+ go: 1.12.x
+ script: diff -u <(echo -n) <(gofmt -d -s .)
+
+before_install:
+ - mkdir -p bin
+ - curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.6.0/shellcheck
+ - chmod +x bin/shellcheck
+ - go get -u github.com/kyoh86/richgo
+script:
+ - PATH=$PATH:$PWD/bin richgo test -v ./...
+ - go build
+ - if [ -z $NOVET ]; then
+ diff -u <(echo -n) <(go vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint');
+ fi
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
index 851fcc087..60c5a425b 100644
--- a/vendor/github.com/spf13/cobra/README.md
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -2,25 +2,29 @@
Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files.
-Many of the most widely used Go projects are built using Cobra including:
-
-* [Kubernetes](http://kubernetes.io/)
-* [Hugo](http://gohugo.io)
-* [rkt](https://github.com/coreos/rkt)
-* [etcd](https://github.com/coreos/etcd)
-* [Moby (former Docker)](https://github.com/moby/moby)
-* [Docker (distribution)](https://github.com/docker/distribution)
-* [OpenShift](https://www.openshift.com/)
-* [Delve](https://github.com/derekparker/delve)
-* [GopherJS](http://www.gopherjs.org/)
-* [CockroachDB](http://www.cockroachlabs.com/)
-* [Bleve](http://www.blevesearch.com/)
-* [ProjectAtomic (enterprise)](http://www.projectatomic.io/)
-* [GiantSwarm's swarm](https://github.com/giantswarm/cli)
-* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
-* [rclone](http://rclone.org/)
-* [nehm](https://github.com/bogem/nehm)
-* [Pouch](https://github.com/alibaba/pouch)
+Many of the most widely used Go projects are built using Cobra, such as:
+[Kubernetes](http://kubernetes.io/),
+[Hugo](http://gohugo.io),
+[rkt](https://github.com/coreos/rkt),
+[etcd](https://github.com/coreos/etcd),
+[Moby (former Docker)](https://github.com/moby/moby),
+[Docker (distribution)](https://github.com/docker/distribution),
+[OpenShift](https://www.openshift.com/),
+[Delve](https://github.com/derekparker/delve),
+[GopherJS](http://www.gopherjs.org/),
+[CockroachDB](http://www.cockroachlabs.com/),
+[Bleve](http://www.blevesearch.com/),
+[ProjectAtomic (enterprise)](http://www.projectatomic.io/),
+[Giant Swarm's gsctl](https://github.com/giantswarm/gsctl),
+[Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack),
+[rclone](http://rclone.org/),
+[nehm](https://github.com/bogem/nehm),
+[Pouch](https://github.com/alibaba/pouch),
+[Istio](https://istio.io),
+[Prototool](https://github.com/uber/prototool),
+[mattermost-server](https://github.com/mattermost/mattermost-server),
+[Gardener](https://github.com/gardener/gardenctl),
+etc.
[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra)
[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra)
@@ -45,6 +49,7 @@ Many of the most widely used Go projects are built using Cobra including:
* [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens)
* [Generating documentation for your command](#generating-documentation-for-your-command)
* [Generating bash completions](#generating-bash-completions)
+ * [Generating zsh completions](#generating-zsh-completions)
- [Contributing](#contributing)
- [License](#license)
@@ -152,9 +157,6 @@ In a Cobra app, typically the main.go file is very bare. It serves one purpose:
package main
import (
- "fmt"
- "os"
-
"{pathToYourApp}/cmd"
)
@@ -265,9 +267,6 @@ In a Cobra app, typically the main.go file is very bare. It serves, one purpose,
package main
import (
- "fmt"
- "os"
-
"{pathToYourApp}/cmd"
)
@@ -339,7 +338,7 @@ rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose out
A flag can also be assigned locally which will only apply to that specific command.
```go
-rootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
+localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
```
### Local Flag on Parent Commands
@@ -395,6 +394,7 @@ The following validators are built in:
- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args.
- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args.
- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args.
+- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command`
- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args.
An example of setting the custom validator:
@@ -404,7 +404,7 @@ var cmd = &cobra.Command{
Short: "hello",
Args: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
- return errors.New("requires at least one arg")
+ return errors.New("requires a color argument")
}
if myapp.IsValidColor(args[0]) {
return nil
@@ -464,7 +464,7 @@ Echo works a lot like print, except it has a child command.`,
}
var cmdTimes = &cobra.Command{
- Use: "times [# times] [string to echo]",
+ Use: "times [string to echo]",
Short: "Echo anything to the screen more times",
Long: `echo things multiple times back to the user by providing
a count and a string.`,
@@ -721,6 +721,11 @@ Cobra can generate documentation based on subcommands, flags, etc. in the follow
Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md).
+## Generating zsh completions
+
+Cobra can generate zsh-completion file. Read more about it in
+[Zsh Completions](zsh_completions.md).
+
# Contributing
1. Fork it
diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go
index a5d8a9273..c4d820b85 100644
--- a/vendor/github.com/spf13/cobra/args.go
+++ b/vendor/github.com/spf13/cobra/args.go
@@ -78,6 +78,18 @@ func ExactArgs(n int) PositionalArgs {
}
}
+// ExactValidArgs returns an error if
+// there are not exactly N positional args OR
+// there are any positional args that are not in the `ValidArgs` field of `Command`
+func ExactValidArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if err := ExactArgs(n)(cmd, args); err != nil {
+ return err
+ }
+ return OnlyValidArgs(cmd, args)
+ }
+}
+
// RangeArgs returns an error if the number of args is not within the expected range.
func RangeArgs(min int, max int) PositionalArgs {
return func(cmd *Command, args []string) error {
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
index 8fa8f486f..57bb8e1b3 100644
--- a/vendor/github.com/spf13/cobra/bash_completions.go
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -129,7 +129,13 @@ __%[1]s_handle_reply()
fi
if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
- declare -F __custom_func >/dev/null && __custom_func
+ if declare -F __%[1]s_custom_func >/dev/null; then
+ # try command name qualified custom func
+ __%[1]s_custom_func
+ else
+ # otherwise fall back to unqualified for compatibility
+ declare -F __custom_func >/dev/null && __custom_func
+ fi
fi
# available in bash-completion >= 2, not always present on macOS
@@ -193,7 +199,8 @@ __%[1]s_handle_flag()
fi
# skip the argument to a two word flag
- if __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
+ if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
+ __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument"
c=$((c+1))
# if we are looking for a flags value, don't show commands
if [[ $c -eq $cword ]]; then
@@ -373,6 +380,10 @@ func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
}
format += "\")\n"
buf.WriteString(fmt.Sprintf(format, name))
+ if len(flag.NoOptDefVal) == 0 {
+ format = " two_word_flags+=(\"--%s\")\n"
+ buf.WriteString(fmt.Sprintf(format, name))
+ }
writeFlagHandler(buf, "--"+name, flag.Annotations, cmd)
}
@@ -534,51 +545,3 @@ func (c *Command) GenBashCompletionFile(filename string) error {
return c.GenBashCompletion(outFile)
}
-
-// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
-// and causes your command to report an error if invoked without the flag.
-func (c *Command) MarkFlagRequired(name string) error {
- return MarkFlagRequired(c.Flags(), name)
-}
-
-// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists,
-// and causes your command to report an error if invoked without the flag.
-func (c *Command) MarkPersistentFlagRequired(name string) error {
- return MarkFlagRequired(c.PersistentFlags(), name)
-}
-
-// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
-// and causes your command to report an error if invoked without the flag.
-func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
- return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"})
-}
-
-// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists.
-// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
-func (c *Command) MarkFlagFilename(name string, extensions ...string) error {
- return MarkFlagFilename(c.Flags(), name, extensions...)
-}
-
-// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
-// Generated bash autocompletion will call the bash function f for the flag.
-func (c *Command) MarkFlagCustom(name string, f string) error {
- return MarkFlagCustom(c.Flags(), name, f)
-}
-
-// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists.
-// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
-func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
- return MarkFlagFilename(c.PersistentFlags(), name, extensions...)
-}
-
-// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists.
-// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
-func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error {
- return flags.SetAnnotation(name, BashCompFilenameExt, extensions)
-}
-
-// MarkFlagCustom adds the BashCompCustom annotation to the named flag in the flag set, if it exists.
-// Generated bash autocompletion will call the bash function f for the flag.
-func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error {
- return flags.SetAnnotation(name, BashCompCustom, []string{f})
-}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md
new file mode 100644
index 000000000..4ac61ee13
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completions.md
@@ -0,0 +1,256 @@
+# Generating Bash Completions For Your Own cobra.Command
+
+If you are using the generator you can create a completion command by running
+
+```bash
+cobra add completion
+```
+
+Update the help text show how to install the bash_completion Linux show here [Kubectl docs show mac options](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion)
+
+Writing the shell script to stdout allows the most flexible use.
+
+```go
+// completionCmd represents the completion command
+var completionCmd = &cobra.Command{
+ Use: "completion",
+ Short: "Generates bash completion scripts",
+ Long: `To load completion run
+
+. <(bitbucket completion)
+
+To configure your bash shell to load completions for each session add to your bashrc
+
+# ~/.bashrc or ~/.profile
+. <(bitbucket completion)
+`,
+ Run: func(cmd *cobra.Command, args []string) {
+ rootCmd.GenBashCompletion(os.Stdout);
+ },
+}
+```
+
+**Note:** The cobra generator may include messages printed to stdout for example if the config file is loaded, this will break the auto complete script
+
+
+## Example from kubectl
+
+Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows:
+
+```go
+package main
+
+import (
+ "io/ioutil"
+ "os"
+
+ "k8s.io/kubernetes/pkg/kubectl/cmd"
+ "k8s.io/kubernetes/pkg/kubectl/cmd/util"
+)
+
+func main() {
+ kubectl := cmd.NewKubectlCommand(util.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard)
+ kubectl.GenBashCompletionFile("out.sh")
+}
+```
+
+`out.sh` will get you completions of subcommands and flags. Copy it to `/etc/bash_completion.d/` as described [here](https://debian-administration.org/article/316/An_introduction_to_bash_completion_part_1) and reset your terminal to use autocompletion. If you make additional annotations to your code, you can get even more intelligent and flexible behavior.
+
+## Creating your own custom functions
+
+Some more actual code that works in kubernetes:
+
+```bash
+const (
+ bash_completion_func = `__kubectl_parse_get()
+{
+ local kubectl_output out
+ if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then
+ out=($(echo "${kubectl_output}" | awk '{print $1}'))
+ COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) )
+ fi
+}
+
+__kubectl_get_resource()
+{
+ if [[ ${#nouns[@]} -eq 0 ]]; then
+ return 1
+ fi
+ __kubectl_parse_get ${nouns[${#nouns[@]} -1]}
+ if [[ $? -eq 0 ]]; then
+ return 0
+ fi
+}
+
+__kubectl_custom_func() {
+ case ${last_command} in
+ kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop)
+ __kubectl_get_resource
+ return
+ ;;
+ *)
+ ;;
+ esac
+}
+`)
+```
+
+And then I set that in my command definition:
+
+```go
+cmds := &cobra.Command{
+ Use: "kubectl",
+ Short: "kubectl controls the Kubernetes cluster manager",
+ Long: `kubectl controls the Kubernetes cluster manager.
+
+Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`,
+ Run: runHelp,
+ BashCompletionFunction: bash_completion_func,
+}
+```
+
+The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`__<command-use>_custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods!
+
+## Have the completions code complete your 'nouns'
+
+In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like:
+
+```go
+validArgs []string = { "pod", "node", "service", "replicationcontroller" }
+
+cmd := &cobra.Command{
+ Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)",
+ Short: "Display one or many resources",
+ Long: get_long,
+ Example: get_example,
+ Run: func(cmd *cobra.Command, args []string) {
+ err := RunGet(f, out, cmd, args)
+ util.CheckErr(err)
+ },
+ ValidArgs: validArgs,
+}
+```
+
+Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like
+
+```bash
+# kubectl get [tab][tab]
+node pod replicationcontroller service
+```
+
+## Plural form and shortcuts for nouns
+
+If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`:
+
+```go
+argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
+
+cmd := &cobra.Command{
+ ...
+ ValidArgs: validArgs,
+ ArgAliases: argAliases
+}
+```
+
+The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by
+the completion algorithm if entered manually, e.g. in:
+
+```bash
+# kubectl get rc [tab][tab]
+backend frontend database
+```
+
+Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns
+in this example again instead of the replication controllers.
+
+## Mark flags as required
+
+Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy.
+
+```go
+cmd.MarkFlagRequired("pod")
+cmd.MarkFlagRequired("container")
+```
+
+and you'll get something like
+
+```bash
+# kubectl exec [tab][tab][tab]
+-c --container= -p --pod=
+```
+
+# Specify valid filename extensions for flags that take a filename
+
+In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions.
+
+```go
+ annotations := []string{"json", "yaml", "yml"}
+ annotation := make(map[string][]string)
+ annotation[cobra.BashCompFilenameExt] = annotations
+
+ flag := &pflag.Flag{
+ Name: "filename",
+ Shorthand: "f",
+ Usage: usage,
+ Value: value,
+ DefValue: value.String(),
+ Annotations: annotation,
+ }
+ cmd.Flags().AddFlag(flag)
+```
+
+Now when you run a command with this filename flag you'll get something like
+
+```bash
+# kubectl create -f
+test/ example/ rpmbuild/
+hello.yml test.json
+```
+
+So while there are many other files in the CWD it only shows me subdirs and those with valid extensions.
+
+# Specify custom flag completion
+
+Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specify
+a custom flag completion function with cobra.BashCompCustom:
+
+```go
+ annotation := make(map[string][]string)
+ annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"}
+
+ flag := &pflag.Flag{
+ Name: "namespace",
+ Usage: usage,
+ Annotations: annotation,
+ }
+ cmd.Flags().AddFlag(flag)
+```
+
+In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction`
+value, e.g.:
+
+```bash
+__kubectl_get_namespaces()
+{
+ local template
+ template="{{ range .items }}{{ .metadata.name }} {{ end }}"
+ local kubectl_out
+ if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then
+ COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) )
+ fi
+}
+```
+# Using bash aliases for commands
+
+You can also configure the `bash aliases` for the commands and they will also support completions.
+
+```bash
+alias aliasname=origcommand
+complete -o default -F __start_origcommand aliasname
+
+# and now when you run `aliasname` completion will make
+# suggestions as it did for `origcommand`.
+
+$) aliasname <tab><tab>
+completion firstcommand secondcommand
+```
diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go
index 7010fd15b..6505c070b 100644
--- a/vendor/github.com/spf13/cobra/cobra.go
+++ b/vendor/github.com/spf13/cobra/cobra.go
@@ -23,6 +23,7 @@ import (
"strconv"
"strings"
"text/template"
+ "time"
"unicode"
)
@@ -56,6 +57,12 @@ var MousetrapHelpText string = `This is a command line tool.
You need to open cmd.exe and run it from there.
`
+// MousetrapDisplayDuration controls how long the MousetrapHelpText message is displayed on Windows
+// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed.
+// To disable the mousetrap, just set MousetrapHelpText to blank string ("").
+// Works only on Microsoft Windows.
+var MousetrapDisplayDuration time.Duration = 5 * time.Second
+
// AddTemplateFunc adds a template function that's available to Usage and Help
// template generation.
func AddTemplateFunc(name string, tmplFunc interface{}) {
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
index 34d1bf367..c7e898303 100644
--- a/vendor/github.com/spf13/cobra/command.go
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -177,8 +177,6 @@ type Command struct {
// that we can use on every pflag set and children commands
globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName
- // output is an output writer defined by user.
- output io.Writer
// usageFunc is usage func defined by user.
usageFunc func(*Command) error
// usageTemplate is usage template defined by user.
@@ -195,6 +193,13 @@ type Command struct {
helpCommand *Command
// versionTemplate is the version template defined by user.
versionTemplate string
+
+ // inReader is a reader defined by the user that replaces stdin
+ inReader io.Reader
+ // outWriter is a writer defined by the user that replaces stdout
+ outWriter io.Writer
+ // errWriter is a writer defined by the user that replaces stderr
+ errWriter io.Writer
}
// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
@@ -205,8 +210,28 @@ func (c *Command) SetArgs(a []string) {
// SetOutput sets the destination for usage and error messages.
// If output is nil, os.Stderr is used.
+// Deprecated: Use SetOut and/or SetErr instead
func (c *Command) SetOutput(output io.Writer) {
- c.output = output
+ c.outWriter = output
+ c.errWriter = output
+}
+
+// SetOut sets the destination for usage messages.
+// If newOut is nil, os.Stdout is used.
+func (c *Command) SetOut(newOut io.Writer) {
+ c.outWriter = newOut
+}
+
+// SetErr sets the destination for error messages.
+// If newErr is nil, os.Stderr is used.
+func (c *Command) SetErr(newErr io.Writer) {
+ c.errWriter = newErr
+}
+
+// SetOut sets the source for input data
+// If newIn is nil, os.Stdin is used.
+func (c *Command) SetIn(newIn io.Reader) {
+ c.inReader = newIn
}
// SetUsageFunc sets usage function. Usage can be defined by application.
@@ -267,9 +292,19 @@ func (c *Command) OutOrStderr() io.Writer {
return c.getOut(os.Stderr)
}
+// ErrOrStderr returns output to stderr
+func (c *Command) ErrOrStderr() io.Writer {
+ return c.getErr(os.Stderr)
+}
+
+// ErrOrStderr returns output to stderr
+func (c *Command) InOrStdin() io.Reader {
+ return c.getIn(os.Stdin)
+}
+
func (c *Command) getOut(def io.Writer) io.Writer {
- if c.output != nil {
- return c.output
+ if c.outWriter != nil {
+ return c.outWriter
}
if c.HasParent() {
return c.parent.getOut(def)
@@ -277,6 +312,26 @@ func (c *Command) getOut(def io.Writer) io.Writer {
return def
}
+func (c *Command) getErr(def io.Writer) io.Writer {
+ if c.errWriter != nil {
+ return c.errWriter
+ }
+ if c.HasParent() {
+ return c.parent.getErr(def)
+ }
+ return def
+}
+
+func (c *Command) getIn(def io.Reader) io.Reader {
+ if c.inReader != nil {
+ return c.inReader
+ }
+ if c.HasParent() {
+ return c.parent.getIn(def)
+ }
+ return def
+}
+
// UsageFunc returns either the function set by SetUsageFunc for this command
// or a parent, or it returns a default usage function.
func (c *Command) UsageFunc() (f func(*Command) error) {
@@ -329,13 +384,22 @@ func (c *Command) Help() error {
return nil
}
-// UsageString return usage string.
+// UsageString returns usage string.
func (c *Command) UsageString() string {
- tmpOutput := c.output
+ // Storing normal writers
+ tmpOutput := c.outWriter
+ tmpErr := c.errWriter
+
bb := new(bytes.Buffer)
- c.SetOutput(bb)
+ c.outWriter = bb
+ c.errWriter = bb
+
c.Usage()
- c.output = tmpOutput
+
+ // Setting things back to normal
+ c.outWriter = tmpOutput
+ c.errWriter = tmpErr
+
return bb.String()
}
@@ -817,13 +881,11 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
// overriding
c.InitDefaultHelpCmd()
- var args []string
+ args := c.args
// Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155
if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" {
args = os.Args[1:]
- } else {
- args = c.args
}
var flags []string
@@ -1070,6 +1132,21 @@ func (c *Command) Printf(format string, i ...interface{}) {
c.Print(fmt.Sprintf(format, i...))
}
+// PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErr(i ...interface{}) {
+ fmt.Fprint(c.ErrOrStderr(), i...)
+}
+
+// PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErrln(i ...interface{}) {
+ c.Print(fmt.Sprintln(i...))
+}
+
+// PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErrf(format string, i ...interface{}) {
+ c.Print(fmt.Sprintf(format, i...))
+}
+
// CommandPath returns the full path to this command.
func (c *Command) CommandPath() string {
if c.HasParent() {
@@ -1335,7 +1412,7 @@ func (c *Command) LocalFlags() *flag.FlagSet {
return c.lflags
}
-// InheritedFlags returns all flags which were inherited from parents commands.
+// InheritedFlags returns all flags which were inherited from parent commands.
func (c *Command) InheritedFlags() *flag.FlagSet {
c.mergePersistentFlags()
diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go
index edec728e4..8768b1736 100644
--- a/vendor/github.com/spf13/cobra/command_win.go
+++ b/vendor/github.com/spf13/cobra/command_win.go
@@ -3,6 +3,7 @@
package cobra
import (
+ "fmt"
"os"
"time"
@@ -14,7 +15,12 @@ var preExecHookFn = preExecHook
func preExecHook(c *Command) {
if MousetrapHelpText != "" && mousetrap.StartedByExplorer() {
c.Print(MousetrapHelpText)
- time.Sleep(5 * time.Second)
+ if MousetrapDisplayDuration > 0 {
+ time.Sleep(MousetrapDisplayDuration)
+ } else {
+ c.Println("Press return to continue...")
+ fmt.Scanln()
+ }
os.Exit(1)
}
}
diff --git a/vendor/github.com/spf13/cobra/go.mod b/vendor/github.com/spf13/cobra/go.mod
new file mode 100644
index 000000000..9a9eb65a3
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/go.mod
@@ -0,0 +1,13 @@
+module github.com/spf13/cobra
+
+go 1.12
+
+require (
+ github.com/BurntSushi/toml v0.3.1 // indirect
+ github.com/cpuguy83/go-md2man v1.0.10
+ github.com/inconshreveable/mousetrap v1.0.0
+ github.com/mitchellh/go-homedir v1.1.0
+ github.com/spf13/pflag v1.0.3
+ github.com/spf13/viper v1.3.2
+ gopkg.in/yaml.v2 v2.2.2
+)
diff --git a/vendor/github.com/spf13/cobra/go.sum b/vendor/github.com/spf13/cobra/go.sum
new file mode 100644
index 000000000..9761f4d03
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/go.sum
@@ -0,0 +1,51 @@
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
+github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M=
+github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a h1:1n5lsVfiQW3yfsRGu98756EH1YthsFqr/5mxHduZW2A=
+golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go
new file mode 100644
index 000000000..756c61b9d
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/powershell_completions.go
@@ -0,0 +1,100 @@
+// PowerShell completions are based on the amazing work from clap:
+// https://github.com/clap-rs/clap/blob/3294d18efe5f264d12c9035f404c7d189d4824e1/src/completions/powershell.rs
+//
+// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but
+// can be downloaded separately for windows 7 or 8.1).
+
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/spf13/pflag"
+)
+
+var powerShellCompletionTemplate = `using namespace System.Management.Automation
+using namespace System.Management.Automation.Language
+Register-ArgumentCompleter -Native -CommandName '%s' -ScriptBlock {
+ param($wordToComplete, $commandAst, $cursorPosition)
+ $commandElements = $commandAst.CommandElements
+ $command = @(
+ '%s'
+ for ($i = 1; $i -lt $commandElements.Count; $i++) {
+ $element = $commandElements[$i]
+ if ($element -isnot [StringConstantExpressionAst] -or
+ $element.StringConstantType -ne [StringConstantType]::BareWord -or
+ $element.Value.StartsWith('-')) {
+ break
+ }
+ $element.Value
+ }
+ ) -join ';'
+ $completions = @(switch ($command) {%s
+ })
+ $completions.Where{ $_.CompletionText -like "$wordToComplete*" } |
+ Sort-Object -Property ListItemText
+}`
+
+func generatePowerShellSubcommandCases(out io.Writer, cmd *Command, previousCommandName string) {
+ var cmdName string
+ if previousCommandName == "" {
+ cmdName = cmd.Name()
+ } else {
+ cmdName = fmt.Sprintf("%s;%s", previousCommandName, cmd.Name())
+ }
+
+ fmt.Fprintf(out, "\n '%s' {", cmdName)
+
+ cmd.Flags().VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ usage := escapeStringForPowerShell(flag.Usage)
+ if len(flag.Shorthand) > 0 {
+ fmt.Fprintf(out, "\n [CompletionResult]::new('-%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Shorthand, flag.Shorthand, usage)
+ }
+ fmt.Fprintf(out, "\n [CompletionResult]::new('--%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Name, flag.Name, usage)
+ })
+
+ for _, subCmd := range cmd.Commands() {
+ usage := escapeStringForPowerShell(subCmd.Short)
+ fmt.Fprintf(out, "\n [CompletionResult]::new('%s', '%s', [CompletionResultType]::ParameterValue, '%s')", subCmd.Name(), subCmd.Name(), usage)
+ }
+
+ fmt.Fprint(out, "\n break\n }")
+
+ for _, subCmd := range cmd.Commands() {
+ generatePowerShellSubcommandCases(out, subCmd, cmdName)
+ }
+}
+
+func escapeStringForPowerShell(s string) string {
+ return strings.Replace(s, "'", "''", -1)
+}
+
+// GenPowerShellCompletion generates PowerShell completion file and writes to the passed writer.
+func (c *Command) GenPowerShellCompletion(w io.Writer) error {
+ buf := new(bytes.Buffer)
+
+ var subCommandCases bytes.Buffer
+ generatePowerShellSubcommandCases(&subCommandCases, c, "")
+ fmt.Fprintf(buf, powerShellCompletionTemplate, c.Name(), c.Name(), subCommandCases.String())
+
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+// GenPowerShellCompletionFile generates PowerShell completion file.
+func (c *Command) GenPowerShellCompletionFile(filename string) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenPowerShellCompletion(outFile)
+}
diff --git a/vendor/github.com/spf13/cobra/powershell_completions.md b/vendor/github.com/spf13/cobra/powershell_completions.md
new file mode 100644
index 000000000..afed80240
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/powershell_completions.md
@@ -0,0 +1,14 @@
+# Generating PowerShell Completions For Your Own cobra.Command
+
+Cobra can generate PowerShell completion scripts. Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles.
+
+# What's supported
+
+- Completion for subcommands using their `.Short` description
+- Completion for non-hidden flags using their `.Name` and `.Shorthand`
+
+# What's not yet supported
+
+- Command aliases
+- Required, filename or custom flags (they will work like normal flags)
+- Custom completion scripts
diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go
new file mode 100644
index 000000000..ba0af9cb5
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/shell_completions.go
@@ -0,0 +1,85 @@
+package cobra
+
+import (
+ "github.com/spf13/pflag"
+)
+
+// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkFlagRequired(name string) error {
+ return MarkFlagRequired(c.Flags(), name)
+}
+
+// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkPersistentFlagRequired(name string) error {
+ return MarkFlagRequired(c.PersistentFlags(), name)
+}
+
+// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
+ return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"})
+}
+
+// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists.
+// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
+func (c *Command) MarkFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.Flags(), name, extensions...)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
+// Generated bash autocompletion will call the bash function f for the flag.
+func (c *Command) MarkFlagCustom(name string, f string) error {
+ return MarkFlagCustom(c.Flags(), name, f)
+}
+
+// MarkPersistentFlagFilename instructs the various shell completion
+// implementations to limit completions for this persistent flag to the
+// specified extensions (patterns).
+//
+// Shell Completion compatibility matrix: bash, zsh
+func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.PersistentFlags(), name, extensions...)
+}
+
+// MarkFlagFilename instructs the various shell completion implementations to
+// limit completions for this flag to the specified extensions (patterns).
+//
+// Shell Completion compatibility matrix: bash, zsh
+func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error {
+ return flags.SetAnnotation(name, BashCompFilenameExt, extensions)
+}
+
+// MarkFlagCustom instructs the various shell completion implementations to
+// limit completions for this flag to the specified extensions (patterns).
+//
+// Shell Completion compatibility matrix: bash, zsh
+func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error {
+ return flags.SetAnnotation(name, BashCompCustom, []string{f})
+}
+
+// MarkFlagDirname instructs the various shell completion implementations to
+// complete only directories with this named flag.
+//
+// Shell Completion compatibility matrix: zsh
+func (c *Command) MarkFlagDirname(name string) error {
+ return MarkFlagDirname(c.Flags(), name)
+}
+
+// MarkPersistentFlagDirname instructs the various shell completion
+// implementations to complete only directories with this persistent named flag.
+//
+// Shell Completion compatibility matrix: zsh
+func (c *Command) MarkPersistentFlagDirname(name string) error {
+ return MarkFlagDirname(c.PersistentFlags(), name)
+}
+
+// MarkFlagDirname instructs the various shell completion implementations to
+// complete only directories with this specified flag.
+//
+// Shell Completion compatibility matrix: zsh
+func MarkFlagDirname(flags *pflag.FlagSet, name string) error {
+ zshPattern := "-(/)"
+ return flags.SetAnnotation(name, zshCompDirname, []string{zshPattern})
+}
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go
index 889c22e27..12755482f 100644
--- a/vendor/github.com/spf13/cobra/zsh_completions.go
+++ b/vendor/github.com/spf13/cobra/zsh_completions.go
@@ -1,13 +1,102 @@
package cobra
import (
- "bytes"
+ "encoding/json"
"fmt"
"io"
"os"
+ "sort"
"strings"
+ "text/template"
+
+ "github.com/spf13/pflag"
+)
+
+const (
+ zshCompArgumentAnnotation = "cobra_annotations_zsh_completion_argument_annotation"
+ zshCompArgumentFilenameComp = "cobra_annotations_zsh_completion_argument_file_completion"
+ zshCompArgumentWordComp = "cobra_annotations_zsh_completion_argument_word_completion"
+ zshCompDirname = "cobra_annotations_zsh_dirname"
+)
+
+var (
+ zshCompFuncMap = template.FuncMap{
+ "genZshFuncName": zshCompGenFuncName,
+ "extractFlags": zshCompExtractFlag,
+ "genFlagEntryForZshArguments": zshCompGenFlagEntryForArguments,
+ "extractArgsCompletions": zshCompExtractArgumentCompletionHintsForRendering,
+ }
+ zshCompletionText = `
+{{/* should accept Command (that contains subcommands) as parameter */}}
+{{define "argumentsC" -}}
+{{ $cmdPath := genZshFuncName .}}
+function {{$cmdPath}} {
+ local -a commands
+
+ _arguments -C \{{- range extractFlags .}}
+ {{genFlagEntryForZshArguments .}} \{{- end}}
+ "1: :->cmnds" \
+ "*::arg:->args"
+
+ case $state in
+ cmnds)
+ commands=({{range .Commands}}{{if not .Hidden}}
+ "{{.Name}}:{{.Short}}"{{end}}{{end}}
+ )
+ _describe "command" commands
+ ;;
+ esac
+
+ case "$words[1]" in {{- range .Commands}}{{if not .Hidden}}
+ {{.Name}})
+ {{$cmdPath}}_{{.Name}}
+ ;;{{end}}{{end}}
+ esac
+}
+{{range .Commands}}{{if not .Hidden}}
+{{template "selectCmdTemplate" .}}
+{{- end}}{{end}}
+{{- end}}
+
+{{/* should accept Command without subcommands as parameter */}}
+{{define "arguments" -}}
+function {{genZshFuncName .}} {
+{{" _arguments"}}{{range extractFlags .}} \
+ {{genFlagEntryForZshArguments . -}}
+{{end}}{{range extractArgsCompletions .}} \
+ {{.}}{{end}}
+}
+{{end}}
+
+{{/* dispatcher for commands with or without subcommands */}}
+{{define "selectCmdTemplate" -}}
+{{if .Hidden}}{{/* ignore hidden*/}}{{else -}}
+{{if .Commands}}{{template "argumentsC" .}}{{else}}{{template "arguments" .}}{{end}}
+{{- end}}
+{{- end}}
+
+{{/* template entry point */}}
+{{define "Main" -}}
+#compdef _{{.Name}} {{.Name}}
+
+{{template "selectCmdTemplate" .}}
+{{end}}
+`
)
+// zshCompArgsAnnotation is used to encode/decode zsh completion for
+// arguments to/from Command.Annotations.
+type zshCompArgsAnnotation map[int]zshCompArgHint
+
+type zshCompArgHint struct {
+ // Indicates the type of the completion to use. One of:
+ // zshCompArgumentFilenameComp or zshCompArgumentWordComp
+ Tipe string `json:"type"`
+
+ // A value for the type above (globs for file completion or words)
+ Options []string `json:"options"`
+}
+
// GenZshCompletionFile generates zsh completion file.
func (c *Command) GenZshCompletionFile(filename string) error {
outFile, err := os.Create(filename)
@@ -19,108 +108,229 @@ func (c *Command) GenZshCompletionFile(filename string) error {
return c.GenZshCompletion(outFile)
}
-// GenZshCompletion generates a zsh completion file and writes to the passed writer.
+// GenZshCompletion generates a zsh completion file and writes to the passed
+// writer. The completion always run on the root command regardless of the
+// command it was called from.
func (c *Command) GenZshCompletion(w io.Writer) error {
- buf := new(bytes.Buffer)
-
- writeHeader(buf, c)
- maxDepth := maxDepth(c)
- writeLevelMapping(buf, maxDepth)
- writeLevelCases(buf, maxDepth, c)
+ tmpl, err := template.New("Main").Funcs(zshCompFuncMap).Parse(zshCompletionText)
+ if err != nil {
+ return fmt.Errorf("error creating zsh completion template: %v", err)
+ }
+ return tmpl.Execute(w, c.Root())
+}
- _, err := buf.WriteTo(w)
- return err
+// MarkZshCompPositionalArgumentFile marks the specified argument (first
+// argument is 1) as completed by file selection. patterns (e.g. "*.txt") are
+// optional - if not provided the completion will search for all files.
+func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error {
+ if argPosition < 1 {
+ return fmt.Errorf("Invalid argument position (%d)", argPosition)
+ }
+ annotation, err := c.zshCompGetArgsAnnotations()
+ if err != nil {
+ return err
+ }
+ if c.zshcompArgsAnnotationnIsDuplicatePosition(annotation, argPosition) {
+ return fmt.Errorf("Duplicate annotation for positional argument at index %d", argPosition)
+ }
+ annotation[argPosition] = zshCompArgHint{
+ Tipe: zshCompArgumentFilenameComp,
+ Options: patterns,
+ }
+ return c.zshCompSetArgsAnnotations(annotation)
}
-func writeHeader(w io.Writer, cmd *Command) {
- fmt.Fprintf(w, "#compdef %s\n\n", cmd.Name())
+// MarkZshCompPositionalArgumentWords marks the specified positional argument
+// (first argument is 1) as completed by the provided words. At east one word
+// must be provided, spaces within words will be offered completion with
+// "word\ word".
+func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error {
+ if argPosition < 1 {
+ return fmt.Errorf("Invalid argument position (%d)", argPosition)
+ }
+ if len(words) == 0 {
+ return fmt.Errorf("Trying to set empty word list for positional argument %d", argPosition)
+ }
+ annotation, err := c.zshCompGetArgsAnnotations()
+ if err != nil {
+ return err
+ }
+ if c.zshcompArgsAnnotationnIsDuplicatePosition(annotation, argPosition) {
+ return fmt.Errorf("Duplicate annotation for positional argument at index %d", argPosition)
+ }
+ annotation[argPosition] = zshCompArgHint{
+ Tipe: zshCompArgumentWordComp,
+ Options: words,
+ }
+ return c.zshCompSetArgsAnnotations(annotation)
}
-func maxDepth(c *Command) int {
- if len(c.Commands()) == 0 {
- return 0
+func zshCompExtractArgumentCompletionHintsForRendering(c *Command) ([]string, error) {
+ var result []string
+ annotation, err := c.zshCompGetArgsAnnotations()
+ if err != nil {
+ return nil, err
}
- maxDepthSub := 0
- for _, s := range c.Commands() {
- subDepth := maxDepth(s)
- if subDepth > maxDepthSub {
- maxDepthSub = subDepth
+ for k, v := range annotation {
+ s, err := zshCompRenderZshCompArgHint(k, v)
+ if err != nil {
+ return nil, err
}
+ result = append(result, s)
}
- return 1 + maxDepthSub
+ if len(c.ValidArgs) > 0 {
+ if _, positionOneExists := annotation[1]; !positionOneExists {
+ s, err := zshCompRenderZshCompArgHint(1, zshCompArgHint{
+ Tipe: zshCompArgumentWordComp,
+ Options: c.ValidArgs,
+ })
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, s)
+ }
+ }
+ sort.Strings(result)
+ return result, nil
}
-func writeLevelMapping(w io.Writer, numLevels int) {
- fmt.Fprintln(w, `_arguments \`)
- for i := 1; i <= numLevels; i++ {
- fmt.Fprintf(w, ` '%d: :->level%d' \`, i, i)
- fmt.Fprintln(w)
+func zshCompRenderZshCompArgHint(i int, z zshCompArgHint) (string, error) {
+ switch t := z.Tipe; t {
+ case zshCompArgumentFilenameComp:
+ var globs []string
+ for _, g := range z.Options {
+ globs = append(globs, fmt.Sprintf(`-g "%s"`, g))
+ }
+ return fmt.Sprintf(`'%d: :_files %s'`, i, strings.Join(globs, " ")), nil
+ case zshCompArgumentWordComp:
+ var words []string
+ for _, w := range z.Options {
+ words = append(words, fmt.Sprintf("%q", w))
+ }
+ return fmt.Sprintf(`'%d: :(%s)'`, i, strings.Join(words, " ")), nil
+ default:
+ return "", fmt.Errorf("Invalid zsh argument completion annotation: %s", t)
}
- fmt.Fprintf(w, ` '%d: :%s'`, numLevels+1, "_files")
- fmt.Fprintln(w)
}
-func writeLevelCases(w io.Writer, maxDepth int, root *Command) {
- fmt.Fprintln(w, "case $state in")
- defer fmt.Fprintln(w, "esac")
+func (c *Command) zshcompArgsAnnotationnIsDuplicatePosition(annotation zshCompArgsAnnotation, position int) bool {
+ _, dup := annotation[position]
+ return dup
+}
- for i := 1; i <= maxDepth; i++ {
- fmt.Fprintf(w, " level%d)\n", i)
- writeLevel(w, root, i)
- fmt.Fprintln(w, " ;;")
+func (c *Command) zshCompGetArgsAnnotations() (zshCompArgsAnnotation, error) {
+ annotation := make(zshCompArgsAnnotation)
+ annotationString, ok := c.Annotations[zshCompArgumentAnnotation]
+ if !ok {
+ return annotation, nil
+ }
+ err := json.Unmarshal([]byte(annotationString), &annotation)
+ if err != nil {
+ return annotation, fmt.Errorf("Error unmarshaling zsh argument annotation: %v", err)
}
- fmt.Fprintln(w, " *)")
- fmt.Fprintln(w, " _arguments '*: :_files'")
- fmt.Fprintln(w, " ;;")
+ return annotation, nil
}
-func writeLevel(w io.Writer, root *Command, i int) {
- fmt.Fprintf(w, " case $words[%d] in\n", i)
- defer fmt.Fprintln(w, " esac")
-
- commands := filterByLevel(root, i)
- byParent := groupByParent(commands)
+func (c *Command) zshCompSetArgsAnnotations(annotation zshCompArgsAnnotation) error {
+ jsn, err := json.Marshal(annotation)
+ if err != nil {
+ return fmt.Errorf("Error marshaling zsh argument annotation: %v", err)
+ }
+ if c.Annotations == nil {
+ c.Annotations = make(map[string]string)
+ }
+ c.Annotations[zshCompArgumentAnnotation] = string(jsn)
+ return nil
+}
- for p, c := range byParent {
- names := names(c)
- fmt.Fprintf(w, " %s)\n", p)
- fmt.Fprintf(w, " _arguments '%d: :(%s)'\n", i, strings.Join(names, " "))
- fmt.Fprintln(w, " ;;")
+func zshCompGenFuncName(c *Command) string {
+ if c.HasParent() {
+ return zshCompGenFuncName(c.Parent()) + "_" + c.Name()
}
- fmt.Fprintln(w, " *)")
- fmt.Fprintln(w, " _arguments '*: :_files'")
- fmt.Fprintln(w, " ;;")
+ return "_" + c.Name()
+}
+func zshCompExtractFlag(c *Command) []*pflag.Flag {
+ var flags []*pflag.Flag
+ c.LocalFlags().VisitAll(func(f *pflag.Flag) {
+ if !f.Hidden {
+ flags = append(flags, f)
+ }
+ })
+ c.InheritedFlags().VisitAll(func(f *pflag.Flag) {
+ if !f.Hidden {
+ flags = append(flags, f)
+ }
+ })
+ return flags
}
-func filterByLevel(c *Command, l int) []*Command {
- cs := make([]*Command, 0)
- if l == 0 {
- cs = append(cs, c)
- return cs
+// zshCompGenFlagEntryForArguments returns an entry that matches _arguments
+// zsh-completion parameters. It's too complicated to generate in a template.
+func zshCompGenFlagEntryForArguments(f *pflag.Flag) string {
+ if f.Name == "" || f.Shorthand == "" {
+ return zshCompGenFlagEntryForSingleOptionFlag(f)
}
- for _, s := range c.Commands() {
- cs = append(cs, filterByLevel(s, l-1)...)
+ return zshCompGenFlagEntryForMultiOptionFlag(f)
+}
+
+func zshCompGenFlagEntryForSingleOptionFlag(f *pflag.Flag) string {
+ var option, multiMark, extras string
+
+ if zshCompFlagCouldBeSpecifiedMoreThenOnce(f) {
+ multiMark = "*"
}
- return cs
+
+ option = "--" + f.Name
+ if option == "--" {
+ option = "-" + f.Shorthand
+ }
+ extras = zshCompGenFlagEntryExtras(f)
+
+ return fmt.Sprintf(`'%s%s[%s]%s'`, multiMark, option, zshCompQuoteFlagDescription(f.Usage), extras)
}
-func groupByParent(commands []*Command) map[string][]*Command {
- m := make(map[string][]*Command)
- for _, c := range commands {
- parent := c.Parent()
- if parent == nil {
- continue
- }
- m[parent.Name()] = append(m[parent.Name()], c)
+func zshCompGenFlagEntryForMultiOptionFlag(f *pflag.Flag) string {
+ var options, parenMultiMark, curlyMultiMark, extras string
+
+ if zshCompFlagCouldBeSpecifiedMoreThenOnce(f) {
+ parenMultiMark = "*"
+ curlyMultiMark = "\\*"
}
- return m
+
+ options = fmt.Sprintf(`'(%s-%s %s--%s)'{%s-%s,%s--%s}`,
+ parenMultiMark, f.Shorthand, parenMultiMark, f.Name, curlyMultiMark, f.Shorthand, curlyMultiMark, f.Name)
+ extras = zshCompGenFlagEntryExtras(f)
+
+ return fmt.Sprintf(`%s'[%s]%s'`, options, zshCompQuoteFlagDescription(f.Usage), extras)
}
-func names(commands []*Command) []string {
- ns := make([]string, len(commands))
- for i, c := range commands {
- ns[i] = c.Name()
+func zshCompGenFlagEntryExtras(f *pflag.Flag) string {
+ if f.NoOptDefVal != "" {
+ return ""
}
- return ns
+
+ extras := ":" // allow options for flag (even without assistance)
+ for key, values := range f.Annotations {
+ switch key {
+ case zshCompDirname:
+ extras = fmt.Sprintf(":filename:_files -g %q", values[0])
+ case BashCompFilenameExt:
+ extras = ":filename:_files"
+ for _, pattern := range values {
+ extras = extras + fmt.Sprintf(` -g "%s"`, pattern)
+ }
+ }
+ }
+
+ return extras
+}
+
+func zshCompFlagCouldBeSpecifiedMoreThenOnce(f *pflag.Flag) bool {
+ return strings.Contains(f.Value.Type(), "Slice") ||
+ strings.Contains(f.Value.Type(), "Array")
+}
+
+func zshCompQuoteFlagDescription(s string) string {
+ return strings.Replace(s, "'", `'\''`, -1)
}
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.md b/vendor/github.com/spf13/cobra/zsh_completions.md
new file mode 100644
index 000000000..df9c2eac9
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/zsh_completions.md
@@ -0,0 +1,39 @@
+## Generating Zsh Completion for your cobra.Command
+
+Cobra supports native Zsh completion generated from the root `cobra.Command`.
+The generated completion script should be put somewhere in your `$fpath` named
+`_<YOUR COMMAND>`.
+
+### What's Supported
+
+* Completion for all non-hidden subcommands using their `.Short` description.
+* Completion for all non-hidden flags using the following rules:
+ * Filename completion works by marking the flag with `cmd.MarkFlagFilename...`
+ family of commands.
+ * The requirement for argument to the flag is decided by the `.NoOptDefVal`
+ flag value - if it's empty then completion will expect an argument.
+ * Flags of one of the various `*Array` and `*Slice` types supports multiple
+ specifications (with or without argument depending on the specific type).
+* Completion of positional arguments using the following rules:
+ * Argument position for all options below starts at `1`. If argument position
+ `0` is requested it will raise an error.
+ * Use `command.MarkZshCompPositionalArgumentFile` to complete filenames. Glob
+ patterns (e.g. `"*.log"`) are optional - if not specified it will offer to
+ complete all file types.
+ * Use `command.MarkZshCompPositionalArgumentWords` to offer specific words for
+ completion. At least one word is required.
+ * It's possible to specify completion for some arguments and leave some
+ unspecified (e.g. offer words for second argument but nothing for first
+ argument). This will cause no completion for first argument but words
+ completion for second argument.
+ * If no argument completion was specified for 1st argument (but optionally was
+ specified for 2nd) and the command has `ValidArgs` it will be used as
+ completion options for 1st argument.
+ * Argument completions only offered for commands with no subcommands.
+
+### What's not yet Supported
+
+* Custom completion scripts are not supported yet (We should probably create zsh
+ specific one, doesn't make sense to re-use the bash one as the functions will
+ be different).
+* Whatever other feature you're looking for and doesn't exist :)
diff --git a/vendor/github.com/spf13/pflag/.gitignore b/vendor/github.com/spf13/pflag/.gitignore
new file mode 100644
index 000000000..c3da29013
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.gitignore
@@ -0,0 +1,2 @@
+.idea/*
+
diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml
new file mode 100644
index 000000000..f8a63b308
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.travis.yml
@@ -0,0 +1,21 @@
+sudo: false
+
+language: go
+
+go:
+ - 1.7.3
+ - 1.8.1
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+
+install:
+ - go get github.com/golang/lint/golint
+ - export PATH=$GOPATH/bin:$PATH
+ - go install ./...
+
+script:
+ - verify/all.sh -v
+ - go test ./...
diff --git a/vendor/github.com/stretchr/testify/README.md b/vendor/github.com/stretchr/testify/README.md
deleted file mode 100644
index 11951d4a5..000000000
--- a/vendor/github.com/stretchr/testify/README.md
+++ /dev/null
@@ -1,340 +0,0 @@
-Testify - Thou Shalt Write Tests
-================================
-
-[![Build Status](https://travis-ci.org/stretchr/testify.svg)](https://travis-ci.org/stretchr/testify) [![Go Report Card](https://goreportcard.com/badge/github.com/stretchr/testify)](https://goreportcard.com/report/github.com/stretchr/testify) [![GoDoc](https://godoc.org/github.com/stretchr/testify?status.svg)](https://godoc.org/github.com/stretchr/testify)
-
-Go code (golang) set of packages that provide many tools for testifying that your code will behave as you intend.
-
-Features include:
-
- * [Easy assertions](#assert-package)
- * [Mocking](#mock-package)
- * [Testing suite interfaces and functions](#suite-package)
-
-Get started:
-
- * Install testify with [one line of code](#installation), or [update it with another](#staying-up-to-date)
- * For an introduction to writing test code in Go, see http://golang.org/doc/code.html#Testing
- * Check out the API Documentation http://godoc.org/github.com/stretchr/testify
- * To make your testing life easier, check out our other project, [gorc](http://github.com/stretchr/gorc)
- * A little about [Test-Driven Development (TDD)](http://en.wikipedia.org/wiki/Test-driven_development)
-
-
-
-[`assert`](http://godoc.org/github.com/stretchr/testify/assert "API documentation") package
--------------------------------------------------------------------------------------------
-
-The `assert` package provides some helpful methods that allow you to write better test code in Go.
-
- * Prints friendly, easy to read failure descriptions
- * Allows for very readable code
- * Optionally annotate each assertion with a message
-
-See it in action:
-
-```go
-package yours
-
-import (
- "testing"
- "github.com/stretchr/testify/assert"
-)
-
-func TestSomething(t *testing.T) {
-
- // assert equality
- assert.Equal(t, 123, 123, "they should be equal")
-
- // assert inequality
- assert.NotEqual(t, 123, 456, "they should not be equal")
-
- // assert for nil (good for errors)
- assert.Nil(t, object)
-
- // assert for not nil (good when you expect something)
- if assert.NotNil(t, object) {
-
- // now we know that object isn't nil, we are safe to make
- // further assertions without causing any errors
- assert.Equal(t, "Something", object.Value)
-
- }
-
-}
-```
-
- * Every assert func takes the `testing.T` object as the first argument. This is how it writes the errors out through the normal `go test` capabilities.
- * Every assert func returns a bool indicating whether the assertion was successful or not, this is useful for if you want to go on making further assertions under certain conditions.
-
-if you assert many times, use the below:
-
-```go
-package yours
-
-import (
- "testing"
- "github.com/stretchr/testify/assert"
-)
-
-func TestSomething(t *testing.T) {
- assert := assert.New(t)
-
- // assert equality
- assert.Equal(123, 123, "they should be equal")
-
- // assert inequality
- assert.NotEqual(123, 456, "they should not be equal")
-
- // assert for nil (good for errors)
- assert.Nil(object)
-
- // assert for not nil (good when you expect something)
- if assert.NotNil(object) {
-
- // now we know that object isn't nil, we are safe to make
- // further assertions without causing any errors
- assert.Equal("Something", object.Value)
- }
-}
-```
-
-[`require`](http://godoc.org/github.com/stretchr/testify/require "API documentation") package
----------------------------------------------------------------------------------------------
-
-The `require` package provides same global functions as the `assert` package, but instead of returning a boolean result they terminate current test.
-
-See [t.FailNow](http://golang.org/pkg/testing/#T.FailNow) for details.
-
-[`mock`](http://godoc.org/github.com/stretchr/testify/mock "API documentation") package
-----------------------------------------------------------------------------------------
-
-The `mock` package provides a mechanism for easily writing mock objects that can be used in place of real objects when writing test code.
-
-An example test function that tests a piece of code that relies on an external object `testObj`, can setup expectations (testify) and assert that they indeed happened:
-
-```go
-package yours
-
-import (
- "testing"
- "github.com/stretchr/testify/mock"
-)
-
-/*
- Test objects
-*/
-
-// MyMockedObject is a mocked object that implements an interface
-// that describes an object that the code I am testing relies on.
-type MyMockedObject struct{
- mock.Mock
-}
-
-// DoSomething is a method on MyMockedObject that implements some interface
-// and just records the activity, and returns what the Mock object tells it to.
-//
-// In the real object, this method would do something useful, but since this
-// is a mocked object - we're just going to stub it out.
-//
-// NOTE: This method is not being tested here, code that uses this object is.
-func (m *MyMockedObject) DoSomething(number int) (bool, error) {
-
- args := m.Called(number)
- return args.Bool(0), args.Error(1)
-
-}
-
-/*
- Actual test functions
-*/
-
-// TestSomething is an example of how to use our test object to
-// make assertions about some target code we are testing.
-func TestSomething(t *testing.T) {
-
- // create an instance of our test object
- testObj := new(MyMockedObject)
-
- // setup expectations
- testObj.On("DoSomething", 123).Return(true, nil)
-
- // call the code we are testing
- targetFuncThatDoesSomethingWithObj(testObj)
-
- // assert that the expectations were met
- testObj.AssertExpectations(t)
-
-
-}
-
-// TestSomethingElse is a second example of how to use our test object to
-// make assertions about some target code we are testing.
-// This time using a placeholder. Placeholders might be used when the
-// data being passed in is normally dynamically generated and cannot be
-// predicted beforehand (eg. containing hashes that are time sensitive)
-func TestSomethingElse(t *testing.T) {
-
- // create an instance of our test object
- testObj := new(MyMockedObject)
-
- // setup expectations with a placeholder in the argument list
- testObj.On("DoSomething", mock.Anything).Return(true, nil)
-
- // call the code we are testing
- targetFuncThatDoesSomethingWithObj(testObj)
-
- // assert that the expectations were met
- testObj.AssertExpectations(t)
-
-
-}
-```
-
-For more information on how to write mock code, check out the [API documentation for the `mock` package](http://godoc.org/github.com/stretchr/testify/mock).
-
-You can use the [mockery tool](http://github.com/vektra/mockery) to autogenerate the mock code against an interface as well, making using mocks much quicker.
-
-[`suite`](http://godoc.org/github.com/stretchr/testify/suite "API documentation") package
------------------------------------------------------------------------------------------
-
-The `suite` package provides functionality that you might be used to from more common object oriented languages. With it, you can build a testing suite as a struct, build setup/teardown methods and testing methods on your struct, and run them with 'go test' as per normal.
-
-An example suite is shown below:
-
-```go
-// Basic imports
-import (
- "testing"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/suite"
-)
-
-// Define the suite, and absorb the built-in basic suite
-// functionality from testify - including a T() method which
-// returns the current testing context
-type ExampleTestSuite struct {
- suite.Suite
- VariableThatShouldStartAtFive int
-}
-
-// Make sure that VariableThatShouldStartAtFive is set to five
-// before each test
-func (suite *ExampleTestSuite) SetupTest() {
- suite.VariableThatShouldStartAtFive = 5
-}
-
-// All methods that begin with "Test" are run as tests within a
-// suite.
-func (suite *ExampleTestSuite) TestExample() {
- assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive)
-}
-
-// In order for 'go test' to run this suite, we need to create
-// a normal test function and pass our suite to suite.Run
-func TestExampleTestSuite(t *testing.T) {
- suite.Run(t, new(ExampleTestSuite))
-}
-```
-
-For a more complete example, using all of the functionality provided by the suite package, look at our [example testing suite](https://github.com/stretchr/testify/blob/master/suite/suite_test.go)
-
-For more information on writing suites, check out the [API documentation for the `suite` package](http://godoc.org/github.com/stretchr/testify/suite).
-
-`Suite` object has assertion methods:
-
-```go
-// Basic imports
-import (
- "testing"
- "github.com/stretchr/testify/suite"
-)
-
-// Define the suite, and absorb the built-in basic suite
-// functionality from testify - including assertion methods.
-type ExampleTestSuite struct {
- suite.Suite
- VariableThatShouldStartAtFive int
-}
-
-// Make sure that VariableThatShouldStartAtFive is set to five
-// before each test
-func (suite *ExampleTestSuite) SetupTest() {
- suite.VariableThatShouldStartAtFive = 5
-}
-
-// All methods that begin with "Test" are run as tests within a
-// suite.
-func (suite *ExampleTestSuite) TestExample() {
- suite.Equal(suite.VariableThatShouldStartAtFive, 5)
-}
-
-// In order for 'go test' to run this suite, we need to create
-// a normal test function and pass our suite to suite.Run
-func TestExampleTestSuite(t *testing.T) {
- suite.Run(t, new(ExampleTestSuite))
-}
-```
-
-------
-
-Installation
-============
-
-To install Testify, use `go get`:
-
- go get github.com/stretchr/testify
-
-This will then make the following packages available to you:
-
- github.com/stretchr/testify/assert
- github.com/stretchr/testify/require
- github.com/stretchr/testify/mock
- github.com/stretchr/testify/suite
- github.com/stretchr/testify/http (deprecated)
-
-Import the `testify/assert` package into your code using this template:
-
-```go
-package yours
-
-import (
- "testing"
- "github.com/stretchr/testify/assert"
-)
-
-func TestSomething(t *testing.T) {
-
- assert.True(t, true, "True is true!")
-
-}
-```
-
-------
-
-Staying up to date
-==================
-
-To update Testify to the latest version, use `go get -u github.com/stretchr/testify`.
-
-------
-
-Supported go versions
-==================
-
-We support the three major Go versions, which are 1.9, 1.10, and 1.11 at the moment.
-
-------
-
-Contributing
-============
-
-Please feel free to submit issues, fork the repository and send pull requests!
-
-When submitting an issue, we ask that you please include a complete test function that demonstrates the issue. Extra credit for those using Testify to write the test code that demonstrates it.
-
-------
-
-License
-=======
-
-This project is licensed under the terms of the MIT license.
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
new file mode 100644
index 000000000..d2bb0b817
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
@@ -0,0 +1,5 @@
+{{.CommentFormat}}
+func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool {
+ if h, ok := t.(tHelper); ok { h.Helper() }
+ return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}})
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
new file mode 100644
index 000000000..188bb9e17
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
@@ -0,0 +1,5 @@
+{{.CommentWithoutT "a"}}
+func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool {
+ if h, ok := a.t.(tHelper); ok { h.Helper() }
+ return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
+}
diff --git a/vendor/github.com/stretchr/testify/go.mod b/vendor/github.com/stretchr/testify/go.mod
deleted file mode 100644
index 90e5dbe25..000000000
--- a/vendor/github.com/stretchr/testify/go.mod
+++ /dev/null
@@ -1,7 +0,0 @@
-module github.com/stretchr/testify
-
-require (
- github.com/davecgh/go-spew v1.1.0
- github.com/pmezard/go-difflib v1.0.0
- github.com/stretchr/objx v0.1.0
-)
diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl
new file mode 100644
index 000000000..6ffc751b5
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/require/require.go.tmpl
@@ -0,0 +1,6 @@
+{{.Comment}}
+func {{.DocInfo.Name}}(t TestingT, {{.Params}}) {
+ if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return }
+ if h, ok := t.(tHelper); ok { h.Helper() }
+ t.FailNow()
+}
diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl b/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl
new file mode 100644
index 000000000..54124df1d
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl
@@ -0,0 +1,5 @@
+{{.CommentWithoutT "a"}}
+func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) {
+ if h, ok := a.t.(tHelper); ok { h.Helper() }
+ {{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
+}
diff --git a/vendor/github.com/tchap/go-patricia/AUTHORS b/vendor/github.com/tchap/go-patricia/AUTHORS
new file mode 100644
index 000000000..e640b0bf5
--- /dev/null
+++ b/vendor/github.com/tchap/go-patricia/AUTHORS
@@ -0,0 +1,3 @@
+This is the complete list of go-patricia copyright holders:
+
+Ondřej Kupka <ondra.cap@gmail.com>
diff --git a/vendor/github.com/tchap/go-patricia/README.md b/vendor/github.com/tchap/go-patricia/README.md
deleted file mode 100644
index d9cbf624f..000000000
--- a/vendor/github.com/tchap/go-patricia/README.md
+++ /dev/null
@@ -1,123 +0,0 @@
-# go-patricia #
-
-**Documentation**: [GoDoc](http://godoc.org/github.com/tchap/go-patricia/patricia)<br />
-**Build Status**: [![Build
-Status](https://drone.io/github.com/tchap/go-patricia/status.png)](https://drone.io/github.com/tchap/go-patricia/latest)<br />
-**Test Coverage**: [![Coverage
-Status](https://coveralls.io/repos/tchap/go-patricia/badge.png)](https://coveralls.io/r/tchap/go-patricia)
-
-## About ##
-
-A generic patricia trie (also called radix tree) implemented in Go (Golang).
-
-The patricia trie as implemented in this library enables fast visiting of items
-in some particular ways:
-
-1. visit all items saved in the tree,
-2. visit all items matching particular prefix (visit subtree), or
-3. given a string, visit all items matching some prefix of that string.
-
-`[]byte` type is used for keys, `interface{}` for values.
-
-`Trie` is not thread safe. Synchronize the access yourself.
-
-### State of the Project ###
-
-Apparently some people are using this, so the API should not change often.
-Any ideas on how to make the library better are still welcome.
-
-More (unit) testing would be cool as well...
-
-## Usage ##
-
-Import the package from GitHub first.
-
-```go
-import "github.com/tchap/go-patricia/patricia"
-```
-
-You can as well use gopkg.in thingie:
-
-```go
-import "gopkg.in/tchap/go-patricia.v2/patricia"
-```
-
-Then you can start having fun.
-
-```go
-printItem := func(prefix patricia.Prefix, item patricia.Item) error {
- fmt.Printf("%q: %v\n", prefix, item)
- return nil
-}
-
-// Create a new default trie (using the default parameter values).
-trie := NewTrie()
-
-// Create a new custom trie.
-trie := NewTrie(MaxPrefixPerNode(16), MaxChildrenPerSparseNode(10))
-
-// Insert some items.
-trie.Insert(Prefix("Pepa Novak"), 1)
-trie.Insert(Prefix("Pepa Sindelar"), 2)
-trie.Insert(Prefix("Karel Macha"), 3)
-trie.Insert(Prefix("Karel Hynek Macha"), 4)
-
-// Just check if some things are present in the tree.
-key := Prefix("Pepa Novak")
-fmt.Printf("%q present? %v\n", key, trie.Match(key))
-// "Pepa Novak" present? true
-key = Prefix("Karel")
-fmt.Printf("Anybody called %q here? %v\n", key, trie.MatchSubtree(key))
-// Anybody called "Karel" here? true
-
-// Walk the tree in alphabetical order.
-trie.Visit(printItem)
-// "Karel Hynek Macha": 4
-// "Karel Macha": 3
-// "Pepa Novak": 1
-// "Pepa Sindelar": 2
-
-// Walk a subtree.
-trie.VisitSubtree(Prefix("Pepa"), printItem)
-// "Pepa Novak": 1
-// "Pepa Sindelar": 2
-
-// Modify an item, then fetch it from the tree.
-trie.Set(Prefix("Karel Hynek Macha"), 10)
-key = Prefix("Karel Hynek Macha")
-fmt.Printf("%q: %v\n", key, trie.Get(key))
-// "Karel Hynek Macha": 10
-
-// Walk prefixes.
-prefix := Prefix("Karel Hynek Macha je kouzelnik")
-trie.VisitPrefixes(prefix, printItem)
-// "Karel Hynek Macha": 10
-
-// Delete some items.
-trie.Delete(Prefix("Pepa Novak"))
-trie.Delete(Prefix("Karel Macha"))
-
-// Walk again.
-trie.Visit(printItem)
-// "Karel Hynek Macha": 10
-// "Pepa Sindelar": 2
-
-// Delete a subtree.
-trie.DeleteSubtree(Prefix("Pepa"))
-
-// Print what is left.
-trie.Visit(printItem)
-// "Karel Hynek Macha": 10
-```
-
-## License ##
-
-MIT, check the `LICENSE` file.
-
-[![Gittip
-Badge](http://img.shields.io/gittip/alanhamlett.png)](https://www.gittip.com/tchap/
-"Gittip Badge")
-
-[![Bitdeli
-Badge](https://d2weczhvl823v0.cloudfront.net/tchap/go-patricia/trend.png)](https://bitdeli.com/free
-"Bitdeli Badge")
diff --git a/vendor/github.com/tchap/go-patricia/patricia/children.go b/vendor/github.com/tchap/go-patricia/patricia/children.go
index a5677c335..bcfd0a5dd 100644
--- a/vendor/github.com/tchap/go-patricia/patricia/children.go
+++ b/vendor/github.com/tchap/go-patricia/patricia/children.go
@@ -20,6 +20,7 @@ type childList interface {
next(b byte) *Trie
walk(prefix *Prefix, visitor VisitorFunc) error
print(w io.Writer, indent int)
+ clone() childList
total() int
}
@@ -143,6 +144,17 @@ func (list *sparseChildList) total() int {
return tot
}
+func (list *sparseChildList) clone() childList {
+ clones := make(tries, len(list.children), cap(list.children))
+ for i, child := range list.children {
+ clones[i] = child.Clone()
+ }
+
+ return &sparseChildList{
+ children: clones,
+ }
+}
+
func (list *sparseChildList) print(w io.Writer, indent int) {
for _, child := range list.children {
if child != nil {
@@ -314,6 +326,32 @@ func (list *denseChildList) print(w io.Writer, indent int) {
}
}
+func (list *denseChildList) clone() childList {
+ clones := make(tries, cap(list.children))
+
+ if list.numChildren != 0 {
+ clonedCount := 0
+ for i := list.headIndex; i < len(list.children); i++ {
+ child := list.children[i]
+ if child != nil {
+ clones[i] = child.Clone()
+ clonedCount++
+ if clonedCount == list.numChildren {
+ break
+ }
+ }
+ }
+ }
+
+ return &denseChildList{
+ min: list.min,
+ max: list.max,
+ numChildren: list.numChildren,
+ headIndex: list.headIndex,
+ children: clones,
+ }
+}
+
func (list *denseChildList) total() int {
tot := 0
for _, child := range list.children {
diff --git a/vendor/github.com/tchap/go-patricia/patricia/patricia.go b/vendor/github.com/tchap/go-patricia/patricia/patricia.go
index a1fc53d5d..7b9975e38 100644
--- a/vendor/github.com/tchap/go-patricia/patricia/patricia.go
+++ b/vendor/github.com/tchap/go-patricia/patricia/patricia.go
@@ -77,6 +77,18 @@ func MaxChildrenPerSparseNode(value int) Option {
}
}
+// Clone makes a copy of an existing trie.
+// Items stored in both tries become shared, obviously.
+func (trie *Trie) Clone() *Trie {
+ return &Trie{
+ prefix: append(Prefix(nil), trie.prefix...),
+ item: trie.item,
+ maxPrefixPerNode: trie.maxPrefixPerNode,
+ maxChildrenPerSparseNode: trie.maxChildrenPerSparseNode,
+ children: trie.children.clone(),
+ }
+}
+
// Item returns the item stored in the root of this trie.
func (trie *Trie) Item() Item {
return trie.item
diff --git a/vendor/github.com/uber/jaeger-client-go/.gitignore b/vendor/github.com/uber/jaeger-client-go/.gitignore
new file mode 100644
index 000000000..273490790
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/.gitignore
@@ -0,0 +1,15 @@
+*.out
+*.test
+*.xml
+*.swp
+.idea/
+.tmp/
+*.iml
+*.cov
+*.html
+*.log
+gen/thrift/js
+gen/thrift/py
+vendor/
+crossdock-main
+crossdock/jaeger-docker-compose.yml
diff --git a/vendor/github.com/uber/jaeger-client-go/.gitmodules b/vendor/github.com/uber/jaeger-client-go/.gitmodules
new file mode 100644
index 000000000..295ebcf62
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "idl"]
+ path = idl
+ url = https://github.com/uber/jaeger-idl.git
diff --git a/vendor/github.com/uber/jaeger-client-go/.travis.yml b/vendor/github.com/uber/jaeger-client-go/.travis.yml
new file mode 100644
index 000000000..acdc2a010
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/.travis.yml
@@ -0,0 +1,53 @@
+sudo: required
+
+language: go
+go_import_path: github.com/uber/jaeger-client-go
+
+dist: trusty
+
+matrix:
+ include:
+ - go: 1.12.x
+ env:
+ - TESTS=true
+ - COVERAGE=true
+ - go: 1.12.x
+ env:
+ - CROSSDOCK=true
+ - go: 1.12.x
+ env:
+ - TESTS=true
+ - USE_DEP=true
+ - go: 1.11.x
+ env:
+ - TESTS=true
+ - USE_DEP=true
+ - CI_SKIP_LINT=true
+
+services:
+ - docker
+
+env:
+ global:
+ - DOCKER_COMPOSE_VERSION=1.8.0
+ - GO15VENDOREXPERIMENT=1
+ - COMMIT=${TRAVIS_COMMIT::8}
+ # DOCKER_PASS
+ - secure: "CnjVyxNvMC/dhr/eR7C+FiWucZ4/O5LfAuz9YU0qlnV6XLR7XXRtzZlfFKIImJT6xHp+OptTqAIXqUbvwK2OXDP1ZsLiWRm+2elb9/isGusWXjs3g817lX8njSUcIFILbfi+vAE7UD2BKjHxpmvWmCZidisU1rcaZ9OQNPqMnNIDxVx0FOTwYx+2hfkdjnN5dikzafBDQ6ZZV/mGbcaTG45GGFU6DHyVLzf9qCPXyXnz2+VDhcoPQsYkzE56XHCmHxvEfXxgfqYefJNUlFPhniAQySVsCNVDJ8QcCV6uHaXoIzxJKx9FdUnWKI1/AtpQsTZPgEm4Ujnt+kGJsXopXy2Xx4MZxmcTCBwAMjZxPMF7KoojbtDeOZgEMtf1tGPN6DTNc3NpVmr0BKZ44lhqk+vnd8HAiC1tHDEoSb1Esl7dMUUf1qZAh3MtT+NYi3mTwyx/ilXUS7KPyy7x0ezB3kGuMoLhvR2hrprqRr5NOV2hrd1au+IXmb+4IanFOsBlceBfs8P0JFMO/aw15r+HimSZpQsJx//IT0LReCZYXLe0/WVsF/8+HDwHKlO99gGpk4iXlNKKvdPWabihMp3I3peMrvL+jnlwh47RqHs/0Q71xsKjVWTn+Svq3FpVP0Pgyxhg+oG4WEByBiLnBQcZwSBhWexkJrNI73GzaZiIldk="
+ # DOCKER_USER
+ - secure: "bpBSmypHzI4PnteM4cwLiMC2163Sj/4mEl+1dj+6NWl2tr1hREeVXKhsWBpah25n6BDyr2A4yhBZcWLaNKrsCKT3U37csAQTOFVeQ9x5xhPq+ohANd/OsspFsxNZaKwx161LizH/uTDotMxxevZacsyYWGNv/cRFkwcQ8upLkReRR6puJ+jNQC0BFpKWBJY/zpm5J7xFb7FO20LvQVyRgsgzqWmg9oRNVw9uwOfSY3btacftYctDLUbAr8YRNHd2C6dZnMAi8KdDTLXKTqjKmp6WidOmi92Ml7tOjB+bV6TOaVAhrcI5Rdje4rRWG4MucAjPMP0ZBW36KTfcGqFUcDhX7UqISe2WxoI+8ZD6fJ+nNtD3bk4YAUJB4BSs2sQdiYyjpHyGJR6RW50+3uRz2YbXpzVr9wqv2lZSl/xy3wC5Hag55uqzVlSiDw2pK8lctT3dnQveE7PqAI577PjF2NrHlgrBbykOwwUCNbRTmykzqoDnkxclmiZ+rflEeWsSYglePK/d6Gj9+N7wJZM5heprdJJMFTrzMWZ21Ll9ZGY9updCBKmJA8pBYiLHbu0lWOp+9QUGC+621Zq0d1PHhN6L4eXk/f3RNoZTr//cX6WdNmmO7tBbaGpmp/UYiYTY1WO9vP7tCDsT75k285HCfnIrlGRdbCZZbfuYNGPKIQ0="
+
+install:
+ - make install-ci
+ - if [ "$CROSSDOCK" == true ]; then bash ./travis/install-crossdock-deps.sh ; fi
+
+script:
+ - if [ "$TESTS" == true ]; then make test-ci ; else echo 'skipping tests'; fi
+ - if [ "$CROSSDOCK" == true ]; then bash ./travis/build-crossdock.sh ; else echo 'skipping crossdock'; fi
+
+after_success:
+ - if [ "$COVERAGE" == true ]; then mv cover.out coverage.txt ; else echo 'skipping coverage'; fi
+ - if [ "$COVERAGE" == true ]; then bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi
+
+after_failure:
+ - if [ "$CROSSDOCK" == true ]; then timeout 5 docker-compose -f crossdock/docker-compose.yml logs; fi
diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
new file mode 100644
index 000000000..05216d5a8
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
@@ -0,0 +1,210 @@
+Changes by Version
+==================
+
+2.16.0 (2019-03-24)
+-------------------
+
+- Add baggage to B3 codec (#319) <Pavol Loffay>
+- Add support for 128bit trace ids to zipkin thrift spans. (#378) <Douglas Reid>
+- Update zipkin propagation logic to support 128bit traceIDs (#373) <Douglas Reid>
+- Accept "true" for the x-b3-sampled header (#356) <Adrian Bogatu>
+
+- Allow setting of PoolSpans from Config object (#322) <Matthew Pound>
+- Make propagators public to allow wrapping (#379) <Ivan Babrou>
+- Change default metric namespace to use relevant separator for the metric backend (#364) <Gary Brown>
+- Change metrics prefix to jaeger_tracer and add descriptions (#346) <Gary Brown>
+- Bump OpenTracing to ^1.1.x (#383) <Yuri Shkuro>
+- Upgrade jaeger-lib to v2.0.0 (#359) <Gary Brown>
+- Avoid defer when generating random number (#358) <Gary Brown>
+- Use a pool of rand.Source to reduce lock contention when creating span ids (#357) <Gary Brown>
+- Make JAEGER_ENDPOINT take priority over JAEGER_AGENT_XXX (#342) <Eundoo Song>
+
+
+2.15.0 (2018-10-10)
+-------------------
+
+- Fix FollowsFrom spans ignoring baggage/debug header from dummy parent context (#313) <Zvi Cahana>
+- Make maximum annotation length configurable in tracer options (#318) <Eric Chang>
+- Support more environment variables in configuration (#323) <Daneyon Hansen>
+- Print error on Sampler Query failure (#328) <Goutham Veeramachaneni>
+- Add an HTTPOption to support custom http.RoundTripper (#333) <Michael Puncel>
+- Return an error when an HTTP error code is seen in zipkin HTTP transport (#331) <Michael Puncel>
+
+
+2.14.0 (2018-04-30)
+-------------------
+
+- Support throttling for debug traces (#274) <Isaac Hier>
+- Remove dependency on Apache Thrift (#303) <Yuri Shkuro>
+- Remove dependency on tchannel (#295) (#294) <Yuri Shkuro>
+- Test with Go 1.9 (#298) <Yuri Shkuro>
+
+
+2.13.0 (2018-04-15)
+-------------------
+
+- Use value receiver for config.NewTracer() (#283) <Yuri Shkuro>
+- Lock span during jaeger thrift conversion (#273) <Won Jun Jang>
+- Fix the RemotelyControlledSampler so that it terminates go-routine on Close() (#260) <Scott Kidder> <Yuri Shkuro>
+- Added support for client configuration via env vars (#275) <Juraci Paixão Kröhling>
+- Allow overriding sampler in the Config (#270) <Mike Kabischev>
+
+
+2.12.0 (2018-03-14)
+-------------------
+
+- Use lock when retrieving span.Context() (#268)
+- Add Configuration support for custom Injector and Extractor (#263) <Martin Liu>
+
+
+2.11.2 (2018-01-12)
+-------------------
+
+- Add Gopkg.toml to allow using the lib with `dep`
+
+
+2.11.1 (2018-01-03)
+-------------------
+
+- Do not enqueue spans after Reporter is closed (#235, #245)
+- Change default flush interval to 1sec (#243)
+
+
+2.11.0 (2017-11-27)
+-------------------
+
+- Normalize metric names and tags to be compatible with Prometheus (#222)
+
+
+2.10.0 (2017-11-14)
+-------------------
+
+- Support custom tracing headers (#176)
+- Add BaggageRestrictionManager (#178) and RemoteBaggageRestrictionManager (#182)
+- Do not coerce baggage keys to lower case (#196)
+- Log span name when span cannot be reported (#198)
+- Add option to enable gen128Bit for tracer (#193) and allow custom generator for high bits of trace ID (#219)
+
+
+2.9.0 (2017-07-29)
+------------------
+
+- Pin thrift <= 0.10 (#179)
+- Introduce a parallel interface ContribObserver (#159)
+
+
+2.8.0 (2017-07-05)
+------------------
+
+- Drop `jaeger.` prefix from `jaeger.hostname` process-level tag
+- Add options to set tracer tags
+
+
+2.7.0 (2017-06-21)
+------------------
+
+- Fix rate limiter balance [#135](https://github.com/uber/jaeger-client-go/pull/135) [#140](https://github.com/uber/jaeger-client-go/pull/140)
+- Default client to send Jaeger.thrift [#147](https://github.com/uber/jaeger-client-go/pull/147)
+- Save baggage in span [#153](https://github.com/uber/jaeger-client-go/pull/153)
+- Move reporter.queueLength to the top of the struct to guarantee 64bit alignment [#158](https://github.com/uber/jaeger-client-go/pull/158)
+- Support HTTP transport with jaeger.thrift [#161](https://github.com/uber/jaeger-client-go/pull/161)
+
+
+2.6.0 (2017-03-28)
+------------------
+
+- Add config option to initialize RPC Metrics feature
+
+
+2.5.0 (2017-03-23)
+------------------
+
+- Split request latency metric by success/failure [#123](https://github.com/uber/jaeger-client-go/pull/123)
+- Add mutex to adaptive sampler and fix race condition [#124](https://github.com/uber/jaeger-client-go/pull/124)
+- Fix rate limiter panic [#125](https://github.com/uber/jaeger-client-go/pull/125)
+
+
+2.4.0 (2017-03-21)
+------------------
+
+- Remove `_ms` suffix from request latency metric name [#121](https://github.com/uber/jaeger-client-go/pull/121)
+- Rename all metrics to "request" and "http_request" and use tags for other dimensions [#121](https://github.com/uber/jaeger-client-go/pull/121)
+
+
+2.3.0 (2017-03-20)
+------------------
+
+- Make Span type public to allow access to non-std methods for testing [#117](https://github.com/uber/jaeger-client-go/pull/117)
+- Add a structured way to extract traces for logging with zap [#118](https://github.com/uber/jaeger-client-go/pull/118)
+
+
+2.2.1 (2017-03-14)
+------------------
+
+- Fix panic caused by updating the remote sampler from adaptive sampler to any other sampler type (https://github.com/uber/jaeger-client-go/pull/111)
+
+
+2.2.0 (2017-03-10)
+------------------
+
+- Introduce Observer and SpanObserver (https://github.com/uber/jaeger-client-go/pull/94)
+- Add RPC metrics emitter as Observer/SpanObserver (https://github.com/uber/jaeger-client-go/pull/103)
+
+
+2.1.2 (2017-02-27)
+-------------------
+
+- Fix leaky bucket bug (https://github.com/uber/jaeger-client-go/pull/99)
+- Fix zap logger Infof (https://github.com/uber/jaeger-client-go/pull/100)
+- Add tracer initialization godoc examples
+
+
+2.1.1 (2017-02-21)
+-------------------
+
+- Fix inefficient usage of zap.Logger
+
+
+2.1.0 (2017-02-17)
+-------------------
+
+- Add adapter for zap.Logger (https://github.com/uber-go/zap)
+- Move logging API to ./log/ package
+
+
+2.0.0 (2017-02-08)
+-------------------
+
+- Support Adaptive Sampling
+- Support 128bit Trace IDs
+- Change trace/span IDs from uint64 to strong types TraceID and SpanID
+- Add Zipkin HTTP B3 Propagation format support #72
+- Rip out existing metrics and use github.com/uber/jaeger-lib/metrics
+- Change API for tracer, reporter, sampler initialization
+
+
+1.6.0 (2016-10-14)
+-------------------
+
+- Add Zipkin HTTP transport
+- Support external baggage via jaeger-baggage header
+- Unpin Thrift version, keep to master
+
+
+1.5.1 (2016-09-27)
+-------------------
+
+- Relax dependency on opentracing to ^1
+
+
+1.5.0 (2016-09-27)
+-------------------
+
+- Upgrade to opentracing-go 1.0
+- Support KV logging for Spans
+
+
+1.4.0 (2016-09-14)
+-------------------
+
+- Support debug traces via HTTP header "jaeger-debug-id"
diff --git a/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md b/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md
new file mode 100644
index 000000000..7cf014a51
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md
@@ -0,0 +1,170 @@
+# How to Contribute to Jaeger
+
+We'd love your help!
+
+Jaeger is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub
+pull requests. This document outlines some of the conventions on development
+workflow, commit message formatting, contact points and other resources to make
+it easier to get your contribution accepted.
+
+We gratefully welcome improvements to documentation as well as to code.
+
+# Certificate of Origin
+
+By contributing to this project you agree to the [Developer Certificate of
+Origin](https://developercertificate.org/) (DCO). This document was created
+by the Linux Kernel community and is a simple statement that you, as a
+contributor, have the legal right to make the contribution. See the [DCO](DCO)
+file for details.
+
+## Getting Started
+
+This library uses [glide](https://github.com/Masterminds/glide) to manage dependencies.
+
+To get started, make sure you clone the Git repository into the correct location
+`github.com/uber/jaeger-client-go` relative to `$GOPATH`:
+
+```
+mkdir -p $GOPATH/src/github.com/uber
+cd $GOPATH/src/github.com/uber
+git clone git@github.com:jaegertracing/jaeger-client-go.git jaeger-client-go
+cd jaeger-client-go
+```
+
+Then install dependencies and run the tests:
+
+```
+git submodule update --init --recursive
+glide install
+make test
+```
+
+## Imports grouping
+
+This projects follows the following pattern for grouping imports in Go files:
+ * imports from standard library
+ * imports from other projects
+ * imports from `jaeger-client-go` project
+
+For example:
+
+```go
+import (
+ "fmt"
+
+ "github.com/uber/jaeger-lib/metrics"
+ "go.uber.org/zap"
+
+ "github.com/uber/jaeger-client-go/config"
+)
+```
+
+## Making A Change
+
+*Before making any significant changes, please [open an
+issue](https://github.com/jaegertracing/jaeger-client-go/issues).* Discussing your proposed
+changes ahead of time will make the contribution process smooth for everyone.
+
+Once we've discussed your changes and you've got your code ready, make sure
+that tests are passing (`make test` or `make cover`) and open your PR. Your
+pull request is most likely to be accepted if it:
+
+* Includes tests for new functionality.
+* Follows the guidelines in [Effective
+ Go](https://golang.org/doc/effective_go.html) and the [Go team's common code
+ review comments](https://github.com/golang/go/wiki/CodeReviewComments).
+* Has a [good commit message](https://chris.beams.io/posts/git-commit/):
+ * Separate subject from body with a blank line
+ * Limit the subject line to 50 characters
+ * Capitalize the subject line
+ * Do not end the subject line with a period
+ * Use the imperative mood in the subject line
+ * Wrap the body at 72 characters
+ * Use the body to explain _what_ and _why_ instead of _how_
+* Each commit must be signed by the author ([see below](#sign-your-work)).
+
+## License
+
+By contributing your code, you agree to license your contribution under the terms
+of the [Apache License](LICENSE).
+
+If you are adding a new file it should have a header like below. The easiest
+way to add such header is to run `make fmt`.
+
+```
+// Copyright (c) 2017 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+```
+
+## Sign your work
+
+The sign-off is a simple line at the end of the explanation for the
+patch, which certifies that you wrote it or otherwise have the right to
+pass it on as an open-source patch. The rules are pretty simple: if you
+can certify the below (from
+[developercertificate.org](http://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith <joe@gmail.com>
+
+using your real name (sorry, no pseudonyms or anonymous contributions.)
+
+You can add the sign off when creating the git commit via `git commit -s`.
+
+If you want this to be automatic you can set up some aliases:
+
+```
+git config --add alias.amend "commit -s --amend"
+git config --add alias.c "commit -s"
+```
diff --git a/vendor/github.com/uber/jaeger-client-go/DCO b/vendor/github.com/uber/jaeger-client-go/DCO
new file mode 100644
index 000000000..068953d4b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/DCO
@@ -0,0 +1,37 @@
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+
diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
new file mode 100644
index 000000000..55d9ac030
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
@@ -0,0 +1,223 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ branch = "master"
+ digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
+ name = "github.com/beorn7/perks"
+ packages = ["quantile"]
+ pruneopts = "UT"
+ revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
+
+[[projects]]
+ branch = "master"
+ digest = "1:4c4c33075b704791d6a7f09dfb55c66769e8a1dc6adf87026292d274fe8ad113"
+ name = "github.com/codahale/hdrhistogram"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "3a0bb77429bd3a61596f5e8a3172445844342120"
+
+[[projects]]
+ branch = "master"
+ digest = "1:a382acd6150713655ded76ab5fbcbc7924a7808dab4312dda5d1f23dd8ce5277"
+ name = "github.com/crossdock/crossdock-go"
+ packages = [
+ ".",
+ "assert",
+ "require",
+ ]
+ pruneopts = "UT"
+ revision = "049aabb0122b03bc9bd30cab8f3f91fb60166361"
+
+[[projects]]
+ digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
+ name = "github.com/davecgh/go-spew"
+ packages = ["spew"]
+ pruneopts = "UT"
+ revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
+ version = "v1.1.1"
+
+[[projects]]
+ digest = "1:318f1c959a8a740366fce4b1e1eb2fd914036b4af58fbd0a003349b305f118ad"
+ name = "github.com/golang/protobuf"
+ packages = ["proto"]
+ pruneopts = "UT"
+ revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30"
+ version = "v1.3.1"
+
+[[projects]]
+ digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
+ name = "github.com/matttproud/golang_protobuf_extensions"
+ packages = ["pbutil"]
+ pruneopts = "UT"
+ revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
+ version = "v1.0.1"
+
+[[projects]]
+ digest = "1:727b8f567a30d0739d6c26b9472b3422b351c93cf62095164c845a54b16fc18e"
+ name = "github.com/opentracing/opentracing-go"
+ packages = [
+ ".",
+ "ext",
+ "harness",
+ "log",
+ ]
+ pruneopts = "UT"
+ revision = "659c90643e714681897ec2521c60567dd21da733"
+ version = "v1.1.0"
+
+[[projects]]
+ digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b"
+ name = "github.com/pkg/errors"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
+ version = "v0.8.1"
+
+[[projects]]
+ digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
+ name = "github.com/pmezard/go-difflib"
+ packages = ["difflib"]
+ pruneopts = "UT"
+ revision = "792786c7400a136282c1664665ae0a8db921c6c2"
+ version = "v1.0.0"
+
+[[projects]]
+ digest = "1:b6221ec0f8903b556e127c449e7106b63e6867170c2d10a7c058623d086f2081"
+ name = "github.com/prometheus/client_golang"
+ packages = ["prometheus"]
+ pruneopts = "UT"
+ revision = "c5b7fccd204277076155f10851dad72b76a49317"
+ version = "v0.8.0"
+
+[[projects]]
+ branch = "master"
+ digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
+ name = "github.com/prometheus/client_model"
+ packages = ["go"]
+ pruneopts = "UT"
+ revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8"
+
+[[projects]]
+ digest = "1:35cf6bdf68db765988baa9c4f10cc5d7dda1126a54bd62e252dbcd0b1fc8da90"
+ name = "github.com/prometheus/common"
+ packages = [
+ "expfmt",
+ "internal/bitbucket.org/ww/goautoneg",
+ "model",
+ ]
+ pruneopts = "UT"
+ revision = "cfeb6f9992ffa54aaa4f2170ade4067ee478b250"
+ version = "v0.2.0"
+
+[[projects]]
+ branch = "master"
+ digest = "1:c31163bd62461e0c5f7ddc7363e39ef8d9e929693e77b5c11c709b05f9cb9219"
+ name = "github.com/prometheus/procfs"
+ packages = [
+ ".",
+ "internal/util",
+ "iostats",
+ "nfs",
+ "xfs",
+ ]
+ pruneopts = "UT"
+ revision = "55ae3d9d557340b5bc24cd8aa5f6fa2c2ab31352"
+
+[[projects]]
+ digest = "1:8ff03ccc603abb0d7cce94d34b613f5f6251a9e1931eba1a3f9888a9029b055c"
+ name = "github.com/stretchr/testify"
+ packages = [
+ "assert",
+ "require",
+ "suite",
+ ]
+ pruneopts = "UT"
+ revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053"
+ version = "v1.3.0"
+
+[[projects]]
+ digest = "1:3c1a69cdae3501bf75e76d0d86dc6f2b0a7421bc205c0cb7b96b19eed464a34d"
+ name = "github.com/uber-go/atomic"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289"
+ version = "v1.3.2"
+
+[[projects]]
+ digest = "1:f5c5ad1e08141e18aee1b9c37729d93d06805840421ccfc9d407787ffe969ce6"
+ name = "github.com/uber/jaeger-lib"
+ packages = [
+ "metrics",
+ "metrics/metricstest",
+ "metrics/prometheus",
+ ]
+ pruneopts = "UT"
+ revision = "0e30338a695636fe5bcf7301e8030ce8dd2a8530"
+ version = "v2.0.0"
+
+[[projects]]
+ digest = "1:3c1a69cdae3501bf75e76d0d86dc6f2b0a7421bc205c0cb7b96b19eed464a34d"
+ name = "go.uber.org/atomic"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289"
+ version = "v1.3.2"
+
+[[projects]]
+ digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a"
+ name = "go.uber.org/multierr"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
+ version = "v1.1.0"
+
+[[projects]]
+ digest = "1:c52caf7bd44f92e54627a31b85baf06a68333a196b3d8d241480a774733dcf8b"
+ name = "go.uber.org/zap"
+ packages = [
+ ".",
+ "buffer",
+ "internal/bufferpool",
+ "internal/color",
+ "internal/exit",
+ "zapcore",
+ ]
+ pruneopts = "UT"
+ revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982"
+ version = "v1.9.1"
+
+[[projects]]
+ branch = "master"
+ digest = "1:f8b491a7c25030a895a0e579742d07136e6958e77ef2d46e769db8eec4e58fcd"
+ name = "golang.org/x/net"
+ packages = [
+ "context",
+ "context/ctxhttp",
+ ]
+ pruneopts = "UT"
+ revision = "addf6b3196f61cd44ce5a76657913698c73479d0"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ input-imports = [
+ "github.com/crossdock/crossdock-go",
+ "github.com/opentracing/opentracing-go",
+ "github.com/opentracing/opentracing-go/ext",
+ "github.com/opentracing/opentracing-go/harness",
+ "github.com/opentracing/opentracing-go/log",
+ "github.com/pkg/errors",
+ "github.com/prometheus/client_golang/prometheus",
+ "github.com/stretchr/testify/assert",
+ "github.com/stretchr/testify/require",
+ "github.com/stretchr/testify/suite",
+ "github.com/uber-go/atomic",
+ "github.com/uber/jaeger-lib/metrics",
+ "github.com/uber/jaeger-lib/metrics/metricstest",
+ "github.com/uber/jaeger-lib/metrics/prometheus",
+ "go.uber.org/zap",
+ "go.uber.org/zap/zapcore",
+ ]
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
new file mode 100644
index 000000000..067f15a92
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
@@ -0,0 +1,31 @@
+[[constraint]]
+ name = "github.com/crossdock/crossdock-go"
+ branch = "master"
+
+[[constraint]]
+ name = "github.com/opentracing/opentracing-go"
+ version = "^1.1"
+
+[[constraint]]
+ name = "github.com/prometheus/client_golang"
+ version = "0.8.0"
+
+[[constraint]]
+ name = "github.com/stretchr/testify"
+ version = "^1.1.3"
+
+[[constraint]]
+ name = "github.com/uber-go/atomic"
+ version = "^1"
+
+[[constraint]]
+ name = "github.com/uber/jaeger-lib"
+ version = "^2.0"
+
+[[constraint]]
+ name = "go.uber.org/zap"
+ version = "^1"
+
+[prune]
+ go-tests = true
+ unused-packages = true
diff --git a/vendor/github.com/uber/jaeger-client-go/Makefile b/vendor/github.com/uber/jaeger-client-go/Makefile
new file mode 100644
index 000000000..1b10c0964
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/Makefile
@@ -0,0 +1,123 @@
+PROJECT_ROOT=github.com/uber/jaeger-client-go
+PACKAGES := $(shell glide novendor | grep -v -e ./thrift-gen/... -e ./thrift/...)
+# all .go files that don't exist in hidden directories
+ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen -e ./thrift/ \
+ -e ".*/\..*" \
+ -e ".*/_.*" \
+ -e ".*/mocks.*")
+
+-include crossdock/rules.mk
+
+export GO15VENDOREXPERIMENT=1
+
+RACE=-race
+GOTEST=go test -v $(RACE)
+GOLINT=golint
+GOVET=go vet
+GOFMT=gofmt
+FMT_LOG=fmt.log
+LINT_LOG=lint.log
+
+THRIFT_VER=0.9.3
+THRIFT_IMG=thrift:$(THRIFT_VER)
+THRIFT=docker run -v "${PWD}:/data" $(THRIFT_IMG) thrift
+THRIFT_GO_ARGS=thrift_import="github.com/apache/thrift/lib/go/thrift"
+THRIFT_GEN_DIR=thrift-gen
+
+PASS=$(shell printf "\033[32mPASS\033[0m")
+FAIL=$(shell printf "\033[31mFAIL\033[0m")
+COLORIZE=sed ''/PASS/s//$(PASS)/'' | sed ''/FAIL/s//$(FAIL)/''
+
+.DEFAULT_GOAL := test-and-lint
+
+.PHONY: test-and-lint
+test-and-lint: test fmt lint
+
+.PHONY: test
+test:
+ifeq ($(USE_DEP),true)
+ dep check
+endif
+ bash -c "set -e; set -o pipefail; $(GOTEST) $(PACKAGES) | $(COLORIZE)"
+
+.PHONY: fmt
+fmt:
+ $(GOFMT) -e -s -l -w $(ALL_SRC)
+ ./scripts/updateLicenses.sh
+
+.PHONY: lint
+lint:
+ $(GOVET) $(PACKAGES)
+ @cat /dev/null > $(LINT_LOG)
+ @$(foreach pkg, $(PACKAGES), $(GOLINT) $(pkg) | grep -v crossdock/thrift >> $(LINT_LOG) || true;)
+ @[ ! -s "$(LINT_LOG)" ] || (echo "Lint Failures" | cat - $(LINT_LOG) && false)
+ @$(GOFMT) -e -s -l $(ALL_SRC) > $(FMT_LOG)
+ ./scripts/updateLicenses.sh >> $(FMT_LOG)
+ @[ ! -s "$(FMT_LOG)" ] || (echo "go fmt or license check failures, run 'make fmt'" | cat - $(FMT_LOG) && false)
+
+
+.PHONY: install
+install:
+ glide --version || go get github.com/Masterminds/glide
+ifeq ($(USE_DEP),true)
+ dep ensure
+else
+ glide install
+endif
+
+
+.PHONY: cover
+cover:
+ ./scripts/cover.sh $(shell go list $(PACKAGES))
+ go tool cover -html=cover.out -o cover.html
+
+
+# This is not part of the regular test target because we don't want to slow it
+# down.
+.PHONY: test-examples
+test-examples:
+ make -C examples
+
+# TODO at the moment we're not generating tchan_*.go files
+thrift: idl-submodule thrift-image
+ $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/agent.thrift
+ $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/sampling.thrift
+ $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/jaeger.thrift
+ $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/zipkincore.thrift
+ $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/baggage.thrift
+ $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/crossdock/thrift/ /data/idl/thrift/crossdock/tracetest.thrift
+ sed -i '' 's|"zipkincore"|"$(PROJECT_ROOT)/thrift-gen/zipkincore"|g' $(THRIFT_GEN_DIR)/agent/*.go
+ sed -i '' 's|"jaeger"|"$(PROJECT_ROOT)/thrift-gen/jaeger"|g' $(THRIFT_GEN_DIR)/agent/*.go
+ sed -i '' 's|"github.com/apache/thrift/lib/go/thrift"|"github.com/uber/jaeger-client-go/thrift"|g' \
+ $(THRIFT_GEN_DIR)/*/*.go crossdock/thrift/tracetest/*.go
+ rm -rf thrift-gen/*/*-remote
+ rm -rf crossdock/thrift/*/*-remote
+ rm -rf thrift-gen/jaeger/collector.go
+
+idl-submodule:
+ git submodule init
+ git submodule update
+
+thrift-image:
+ $(THRIFT) -version
+
+.PHONY: install-dep-ci
+install-dep-ci:
+ - curl -L -s https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-amd64 -o $$GOPATH/bin/dep
+ - chmod +x $$GOPATH/bin/dep
+
+.PHONY: install-ci
+install-ci: install-dep-ci install
+ go get github.com/wadey/gocovmerge
+ go get github.com/mattn/goveralls
+ go get golang.org/x/tools/cmd/cover
+ go get golang.org/x/lint/golint
+
+.PHONY: test-ci
+test-ci:
+ @./scripts/cover.sh $(shell go list $(PACKAGES))
+ifeq ($(CI_SKIP_LINT),true)
+ echo 'skipping lint'
+else
+ make lint
+endif
diff --git a/vendor/github.com/uber/jaeger-client-go/RELEASE.md b/vendor/github.com/uber/jaeger-client-go/RELEASE.md
new file mode 100644
index 000000000..115e49ab8
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/RELEASE.md
@@ -0,0 +1,11 @@
+# Release Process
+
+1. Create a PR "Preparing for release X.Y.Z" against master branch
+ * Alter CHANGELOG.md from `<placeholder_version> (unreleased)` to `<X.Y.Z> (YYYY-MM-DD)`
+ * Update `JaegerClientVersion` in constants.go to `Go-X.Y.Z`
+2. Create a release "Release X.Y.Z" on Github
+ * Create Tag `vX.Y.Z`
+ * Copy CHANGELOG.md into the release notes
+3. Create a PR "Back to development" against master branch
+ * Add `<next_version> (unreleased)` to CHANGELOG.md
+ * Update `JaegerClientVersion` in constants.go to `Go-<next_version>dev`
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go
index 821333ddb..320125087 100644
--- a/vendor/github.com/uber/jaeger-client-go/config/config.go
+++ b/vendor/github.com/uber/jaeger-client-go/config/config.go
@@ -231,6 +231,7 @@ func (c Configuration) NewTracer(options ...Option) (opentracing.Tracer, io.Clos
jaeger.TracerOptions.Logger(opts.logger),
jaeger.TracerOptions.CustomHeaderKeys(c.Headers),
jaeger.TracerOptions.Gen128Bit(opts.gen128Bit),
+ jaeger.TracerOptions.PoolSpans(opts.poolSpans),
jaeger.TracerOptions.ZipkinSharedRPCSpan(opts.zipkinSharedRPCSpan),
jaeger.TracerOptions.MaxTagValueLength(opts.maxTagValueLength),
}
diff --git a/vendor/github.com/uber/jaeger-client-go/config/options.go b/vendor/github.com/uber/jaeger-client-go/config/options.go
index 9eed0ec75..322691bea 100644
--- a/vendor/github.com/uber/jaeger-client-go/config/options.go
+++ b/vendor/github.com/uber/jaeger-client-go/config/options.go
@@ -33,6 +33,7 @@ type Options struct {
contribObservers []jaeger.ContribObserver
observers []jaeger.Observer
gen128Bit bool
+ poolSpans bool
zipkinSharedRPCSpan bool
maxTagValueLength int
tags []opentracing.Tag
@@ -93,6 +94,13 @@ func Gen128Bit(gen128Bit bool) Option {
}
}
+// PoolSpans specifies whether to pool spans
+func PoolSpans(poolSpans bool) Option {
+ return func(c *Options) {
+ c.poolSpans = poolSpans
+ }
+}
+
// ZipkinSharedRPCSpan creates an option that enables sharing span ID between client
// and server spans a la zipkin. If false, client and server spans will be assigned
// different IDs.
diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go
index 5a4e18752..aa2d43fc8 100644
--- a/vendor/github.com/uber/jaeger-client-go/constants.go
+++ b/vendor/github.com/uber/jaeger-client-go/constants.go
@@ -16,7 +16,7 @@ package jaeger
const (
// JaegerClientVersion is the version of the client library reported as Span tag.
- JaegerClientVersion = "Go-2.15.1dev"
+ JaegerClientVersion = "Go-2.16.0"
// JaegerClientVersionTagKey is the name of the tag used to report client version.
JaegerClientVersionTagKey = "jaeger.version"
diff --git a/vendor/github.com/uber/jaeger-client-go/context.go b/vendor/github.com/uber/jaeger-client-go/context.go
index 8b06173d9..90045f4fc 100644
--- a/vendor/github.com/uber/jaeger-client-go/context.go
+++ b/vendor/github.com/uber/jaeger-client-go/context.go
@@ -198,7 +198,7 @@ func (c SpanContext) WithBaggageItem(key, value string) SpanContext {
// extract method, but now it returns a dummy context with only debugID filled in.
//
// See JaegerDebugHeader in constants.go
-// See textMapPropagator#Extract
+// See TextMapPropagator#Extract
func (c *SpanContext) isDebugIDContainerOnly() bool {
return !c.traceID.IsValid() && c.debugID != ""
}
diff --git a/vendor/github.com/uber/jaeger-client-go/glide.lock b/vendor/github.com/uber/jaeger-client-go/glide.lock
new file mode 100644
index 000000000..af659ca0e
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/glide.lock
@@ -0,0 +1,90 @@
+hash: 92cc8f956428fc65bee07d809a752f34376aece141c934eff02aefa08d450b72
+updated: 2019-03-23T18:26:09.960887-04:00
+imports:
+- name: github.com/beorn7/perks
+ version: 3a771d992973f24aa725d07868b467d1ddfceafb
+ subpackages:
+ - quantile
+- name: github.com/codahale/hdrhistogram
+ version: 3a0bb77429bd3a61596f5e8a3172445844342120
+- name: github.com/crossdock/crossdock-go
+ version: 049aabb0122b03bc9bd30cab8f3f91fb60166361
+ subpackages:
+ - assert
+ - require
+- name: github.com/davecgh/go-spew
+ version: d8f796af33cc11cb798c1aaeb27a4ebc5099927d
+ subpackages:
+ - spew
+- name: github.com/golang/protobuf
+ version: bbd03ef6da3a115852eaf24c8a1c46aeb39aa175
+ subpackages:
+ - proto
+- name: github.com/matttproud/golang_protobuf_extensions
+ version: c12348ce28de40eed0136aa2b644d0ee0650e56c
+ subpackages:
+ - pbutil
+- name: github.com/opentracing/opentracing-go
+ version: 659c90643e714681897ec2521c60567dd21da733
+ subpackages:
+ - ext
+ - harness
+ - log
+- name: github.com/pkg/errors
+ version: ba968bfe8b2f7e042a574c888954fccecfa385b4
+- name: github.com/pmezard/go-difflib
+ version: 792786c7400a136282c1664665ae0a8db921c6c2
+ subpackages:
+ - difflib
+- name: github.com/prometheus/client_golang
+ version: c5b7fccd204277076155f10851dad72b76a49317
+ subpackages:
+ - prometheus
+- name: github.com/prometheus/client_model
+ version: 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
+ subpackages:
+ - go
+- name: github.com/prometheus/common
+ version: 38c53a9f4bfcd932d1b00bfc65e256a7fba6b37a
+ subpackages:
+ - expfmt
+ - internal/bitbucket.org/ww/goautoneg
+ - model
+- name: github.com/prometheus/procfs
+ version: 780932d4fbbe0e69b84c34c20f5c8d0981e109ea
+ subpackages:
+ - internal/util
+ - nfs
+ - xfs
+- name: github.com/stretchr/testify
+ version: f35b8ab0b5a2cef36673838d662e249dd9c94686
+ subpackages:
+ - assert
+ - require
+ - suite
+- name: github.com/uber/jaeger-lib
+ version: 0e30338a695636fe5bcf7301e8030ce8dd2a8530
+ subpackages:
+ - metrics
+ - metrics/metricstest
+ - metrics/prometheus
+- name: go.uber.org/atomic
+ version: 1ea20fb1cbb1cc08cbd0d913a96dead89aa18289
+- name: go.uber.org/multierr
+ version: 3c4937480c32f4c13a875a1829af76c98ca3d40a
+- name: go.uber.org/zap
+ version: ff33455a0e382e8a81d14dd7c922020b6b5e7982
+ subpackages:
+ - buffer
+ - internal/bufferpool
+ - internal/color
+ - internal/exit
+ - zapcore
+- name: golang.org/x/net
+ version: 49bb7cea24b1df9410e1712aa6433dae904ff66a
+ subpackages:
+ - context
+ - context/ctxhttp
+testImports:
+- name: github.com/uber-go/atomic
+ version: 8474b86a5a6f79c443ce4b2992817ff32cf208b8
diff --git a/vendor/github.com/uber/jaeger-client-go/glide.yaml b/vendor/github.com/uber/jaeger-client-go/glide.yaml
new file mode 100644
index 000000000..b3e5b80bc
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/glide.yaml
@@ -0,0 +1,22 @@
+package: github.com/uber/jaeger-client-go
+import:
+- package: github.com/opentracing/opentracing-go
+ version: ^1.1
+ subpackages:
+ - ext
+ - log
+- package: github.com/crossdock/crossdock-go
+- package: github.com/uber/jaeger-lib
+ version: ^2.0.0
+ subpackages:
+ - metrics
+- package: github.com/pkg/errors
+ version: ~0.8.0
+testImport:
+- package: github.com/stretchr/testify
+ subpackages:
+ - assert
+ - require
+ - suite
+- package: github.com/prometheus/client_golang
+ version: v0.8.0
diff --git a/vendor/github.com/uber/jaeger-client-go/header.go b/vendor/github.com/uber/jaeger-client-go/header.go
index 19c2c055b..5da70351d 100644
--- a/vendor/github.com/uber/jaeger-client-go/header.go
+++ b/vendor/github.com/uber/jaeger-client-go/header.go
@@ -38,7 +38,8 @@ type HeadersConfig struct {
TraceBaggageHeaderPrefix string `yaml:"traceBaggageHeaderPrefix"`
}
-func (c *HeadersConfig) applyDefaults() *HeadersConfig {
+// ApplyDefaults sets missing configuration keys to default values
+func (c *HeadersConfig) ApplyDefaults() *HeadersConfig {
if c.JaegerBaggageHeader == "" {
c.JaegerBaggageHeader = JaegerBaggageHeader
}
diff --git a/vendor/github.com/uber/jaeger-client-go/propagation.go b/vendor/github.com/uber/jaeger-client-go/propagation.go
index abca67a3c..5b50cfb71 100644
--- a/vendor/github.com/uber/jaeger-client-go/propagation.go
+++ b/vendor/github.com/uber/jaeger-client-go/propagation.go
@@ -51,15 +51,17 @@ type Extractor interface {
Extract(carrier interface{}) (SpanContext, error)
}
-type textMapPropagator struct {
+// TextMapPropagator is a combined Injector and Extractor for TextMap format
+type TextMapPropagator struct {
headerKeys *HeadersConfig
metrics Metrics
encodeValue func(string) string
decodeValue func(string) string
}
-func newTextMapPropagator(headerKeys *HeadersConfig, metrics Metrics) *textMapPropagator {
- return &textMapPropagator{
+// NewTextMapPropagator creates a combined Injector and Extractor for TextMap format
+func NewTextMapPropagator(headerKeys *HeadersConfig, metrics Metrics) *TextMapPropagator {
+ return &TextMapPropagator{
headerKeys: headerKeys,
metrics: metrics,
encodeValue: func(val string) string {
@@ -71,8 +73,9 @@ func newTextMapPropagator(headerKeys *HeadersConfig, metrics Metrics) *textMapPr
}
}
-func newHTTPHeaderPropagator(headerKeys *HeadersConfig, metrics Metrics) *textMapPropagator {
- return &textMapPropagator{
+// NewHTTPHeaderPropagator creates a combined Injector and Extractor for HTTPHeaders format
+func NewHTTPHeaderPropagator(headerKeys *HeadersConfig, metrics Metrics) *TextMapPropagator {
+ return &TextMapPropagator{
headerKeys: headerKeys,
metrics: metrics,
encodeValue: func(val string) string {
@@ -88,19 +91,22 @@ func newHTTPHeaderPropagator(headerKeys *HeadersConfig, metrics Metrics) *textMa
}
}
-type binaryPropagator struct {
+// BinaryPropagator is a combined Injector and Extractor for Binary format
+type BinaryPropagator struct {
tracer *Tracer
buffers sync.Pool
}
-func newBinaryPropagator(tracer *Tracer) *binaryPropagator {
- return &binaryPropagator{
+// NewBinaryPropagator creates a combined Injector and Extractor for Binary format
+func NewBinaryPropagator(tracer *Tracer) *BinaryPropagator {
+ return &BinaryPropagator{
tracer: tracer,
buffers: sync.Pool{New: func() interface{} { return &bytes.Buffer{} }},
}
}
-func (p *textMapPropagator) Inject(
+// Inject implements Injector of TextMapPropagator
+func (p *TextMapPropagator) Inject(
sc SpanContext,
abstractCarrier interface{},
) error {
@@ -121,7 +127,8 @@ func (p *textMapPropagator) Inject(
return nil
}
-func (p *textMapPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+// Extract implements Extractor of TextMapPropagator
+func (p *TextMapPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
textMapReader, ok := abstractCarrier.(opentracing.TextMapReader)
if !ok {
return emptyContext, opentracing.ErrInvalidCarrier
@@ -166,7 +173,8 @@ func (p *textMapPropagator) Extract(abstractCarrier interface{}) (SpanContext, e
return ctx, nil
}
-func (p *binaryPropagator) Inject(
+// Inject implements Injector of BinaryPropagator
+func (p *BinaryPropagator) Inject(
sc SpanContext,
abstractCarrier interface{},
) error {
@@ -207,7 +215,8 @@ func (p *binaryPropagator) Inject(
return nil
}
-func (p *binaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+// Extract implements Extractor of BinaryPropagator
+func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
carrier, ok := abstractCarrier.(io.Reader)
if !ok {
return emptyContext, opentracing.ErrInvalidCarrier
@@ -269,7 +278,7 @@ func (p *binaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, er
// is converted to map[string]string { "key1" : "value1",
// "key2" : "value2",
// "key3" : "value3" }
-func (p *textMapPropagator) parseCommaSeparatedMap(value string) map[string]string {
+func (p *TextMapPropagator) parseCommaSeparatedMap(value string) map[string]string {
baggage := make(map[string]string)
value, err := url.QueryUnescape(value)
if err != nil {
@@ -289,12 +298,12 @@ func (p *textMapPropagator) parseCommaSeparatedMap(value string) map[string]stri
// Converts a baggage item key into an http header format,
// by prepending TraceBaggageHeaderPrefix and encoding the key string
-func (p *textMapPropagator) addBaggageKeyPrefix(key string) string {
+func (p *TextMapPropagator) addBaggageKeyPrefix(key string) string {
// TODO encodeBaggageKeyAsHeader add caching and escaping
return fmt.Sprintf("%v%v", p.headerKeys.TraceBaggageHeaderPrefix, key)
}
-func (p *textMapPropagator) removeBaggageKeyPrefix(key string) string {
+func (p *TextMapPropagator) removeBaggageKeyPrefix(key string) string {
// TODO decodeBaggageHeaderKey add caching and escaping
return key[len(p.headerKeys.TraceBaggageHeaderPrefix):]
}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go
index f05144bfc..a53d46f0e 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go
@@ -18,6 +18,8 @@ const CLIENT_SEND = "cs"
const CLIENT_RECV = "cr"
const SERVER_SEND = "ss"
const SERVER_RECV = "sr"
+const MESSAGE_SEND = "ms"
+const MESSAGE_RECV = "mr"
const WIRE_SEND = "ws"
const WIRE_RECV = "wr"
const CLIENT_SEND_FRAGMENT = "csf"
@@ -27,6 +29,7 @@ const SERVER_RECV_FRAGMENT = "srf"
const LOCAL_COMPONENT = "lc"
const CLIENT_ADDR = "ca"
const SERVER_ADDR = "sa"
+const MESSAGE_ADDR = "ma"
func init() {
}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go
index 34b2b267e..2d49e1d5f 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go
@@ -103,10 +103,12 @@ func (p *AnnotationType) UnmarshalText(text []byte) error {
// - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web"
//
// Conventionally, when the service name isn't known, service_name = "unknown".
+// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes()
type Endpoint struct {
Ipv4 int32 `thrift:"ipv4,1" json:"ipv4"`
Port int16 `thrift:"port,2" json:"port"`
ServiceName string `thrift:"service_name,3" json:"service_name"`
+ Ipv6 []byte `thrift:"ipv6,4" json:"ipv6,omitempty"`
}
func NewEndpoint() *Endpoint {
@@ -124,6 +126,16 @@ func (p *Endpoint) GetPort() int16 {
func (p *Endpoint) GetServiceName() string {
return p.ServiceName
}
+
+var Endpoint_Ipv6_DEFAULT []byte
+
+func (p *Endpoint) GetIpv6() []byte {
+ return p.Ipv6
+}
+func (p *Endpoint) IsSetIpv6() bool {
+ return p.Ipv6 != nil
+}
+
func (p *Endpoint) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
@@ -150,6 +162,10 @@ func (p *Endpoint) Read(iprot thrift.TProtocol) error {
if err := p.readField3(iprot); err != nil {
return err
}
+ case 4:
+ if err := p.readField4(iprot); err != nil {
+ return err
+ }
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
@@ -192,6 +208,15 @@ func (p *Endpoint) readField3(iprot thrift.TProtocol) error {
return nil
}
+func (p *Endpoint) readField4(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBinary(); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+ } else {
+ p.Ipv6 = v
+ }
+ return nil
+}
+
func (p *Endpoint) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("Endpoint"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
@@ -205,6 +230,9 @@ func (p *Endpoint) Write(oprot thrift.TProtocol) error {
if err := p.writeField3(oprot); err != nil {
return err
}
+ if err := p.writeField4(oprot); err != nil {
+ return err
+ }
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
@@ -253,6 +281,21 @@ func (p *Endpoint) writeField3(oprot thrift.TProtocol) (err error) {
return err
}
+func (p *Endpoint) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetIpv6() {
+ if err := oprot.WriteFieldBegin("ipv6", thrift.STRING, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err)
+ }
+ if err := oprot.WriteBinary(p.Ipv6); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err)
+ }
+ }
+ return err
+}
+
func (p *Endpoint) String() string {
if p == nil {
return "<nil>"
@@ -707,6 +750,8 @@ func (p *BinaryAnnotation) String() string {
// this field non-atomically is implementation-specific.
//
// This field is i64 vs i32 to support spans longer than 35 minutes.
+// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this
+// means the trace uses 128 bit traceIds instead of 64 bit.
type Span struct {
TraceID int64 `thrift:"trace_id,1" json:"trace_id"`
// unused field # 2
@@ -719,6 +764,7 @@ type Span struct {
Debug bool `thrift:"debug,9" json:"debug,omitempty"`
Timestamp *int64 `thrift:"timestamp,10" json:"timestamp,omitempty"`
Duration *int64 `thrift:"duration,11" json:"duration,omitempty"`
+ TraceIDHigh *int64 `thrift:"trace_id_high,12" json:"trace_id_high,omitempty"`
}
func NewSpan() *Span {
@@ -777,6 +823,15 @@ func (p *Span) GetDuration() int64 {
}
return *p.Duration
}
+
+var Span_TraceIDHigh_DEFAULT int64
+
+func (p *Span) GetTraceIDHigh() int64 {
+ if !p.IsSetTraceIDHigh() {
+ return Span_TraceIDHigh_DEFAULT
+ }
+ return *p.TraceIDHigh
+}
func (p *Span) IsSetParentID() bool {
return p.ParentID != nil
}
@@ -793,6 +848,10 @@ func (p *Span) IsSetDuration() bool {
return p.Duration != nil
}
+func (p *Span) IsSetTraceIDHigh() bool {
+ return p.TraceIDHigh != nil
+}
+
func (p *Span) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
@@ -843,6 +902,10 @@ func (p *Span) Read(iprot thrift.TProtocol) error {
if err := p.readField11(iprot); err != nil {
return err
}
+ case 12:
+ if err := p.readField12(iprot); err != nil {
+ return err
+ }
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
@@ -961,6 +1024,15 @@ func (p *Span) readField11(iprot thrift.TProtocol) error {
return nil
}
+func (p *Span) readField12(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 12: ", err)
+ } else {
+ p.TraceIDHigh = &v
+ }
+ return nil
+}
+
func (p *Span) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("Span"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
@@ -992,6 +1064,9 @@ func (p *Span) Write(oprot thrift.TProtocol) error {
if err := p.writeField11(oprot); err != nil {
return err
}
+ if err := p.writeField12(oprot); err != nil {
+ return err
+ }
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
@@ -1142,6 +1217,21 @@ func (p *Span) writeField11(oprot thrift.TProtocol) (err error) {
return err
}
+func (p *Span) writeField12(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTraceIDHigh() {
+ if err := oprot.WriteFieldBegin("trace_id_high", thrift.I64, 12); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(*p.TraceIDHigh)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err)
+ }
+ }
+ return err
+}
+
func (p *Span) String() string {
if p == nil {
return "<nil>"
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/.nocover b/vendor/github.com/uber/jaeger-client-go/thrift/.nocover
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/.nocover
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer.go b/vendor/github.com/uber/jaeger-client-go/tracer.go
index 2c29ae8c9..d87fb10be 100644
--- a/vendor/github.com/uber/jaeger-client-go/tracer.go
+++ b/vendor/github.com/uber/jaeger-client-go/tracer.go
@@ -97,13 +97,13 @@ func NewTracer(
}
// register default injectors/extractors unless they are already provided via options
- textPropagator := newTextMapPropagator(getDefaultHeadersConfig(), t.metrics)
+ textPropagator := NewTextMapPropagator(getDefaultHeadersConfig(), t.metrics)
t.addCodec(opentracing.TextMap, textPropagator, textPropagator)
- httpHeaderPropagator := newHTTPHeaderPropagator(getDefaultHeadersConfig(), t.metrics)
+ httpHeaderPropagator := NewHTTPHeaderPropagator(getDefaultHeadersConfig(), t.metrics)
t.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator)
- binaryPropagator := newBinaryPropagator(t)
+ binaryPropagator := NewBinaryPropagator(t)
t.addCodec(opentracing.Binary, binaryPropagator, binaryPropagator)
// TODO remove after TChannel supports OpenTracing
@@ -319,7 +319,11 @@ func (t *Tracer) Extract(
carrier interface{},
) (opentracing.SpanContext, error) {
if extractor, ok := t.extractors[format]; ok {
- return extractor.Extract(carrier)
+ spanCtx, err := extractor.Extract(carrier)
+ if err != nil {
+ return nil, err // ensure returned spanCtx is nil
+ }
+ return spanCtx, nil
}
return nil, opentracing.ErrUnsupportedFormat
}
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer_options.go b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
index a90265f03..b4176cc72 100644
--- a/vendor/github.com/uber/jaeger-client-go/tracer_options.go
+++ b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
@@ -51,10 +51,10 @@ func (tracerOptions) CustomHeaderKeys(headerKeys *HeadersConfig) TracerOption {
if headerKeys == nil {
return
}
- textPropagator := newTextMapPropagator(headerKeys.applyDefaults(), tracer.metrics)
+ textPropagator := NewTextMapPropagator(headerKeys.ApplyDefaults(), tracer.metrics)
tracer.addCodec(opentracing.TextMap, textPropagator, textPropagator)
- httpHeaderPropagator := newHTTPHeaderPropagator(headerKeys.applyDefaults(), tracer.metrics)
+ httpHeaderPropagator := NewHTTPHeaderPropagator(headerKeys.ApplyDefaults(), tracer.metrics)
tracer.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator)
}
}
diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go
index dce58b433..eb31c4369 100644
--- a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go
+++ b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go
@@ -48,13 +48,19 @@ func BuildZipkinThrift(s *Span) *z.Span {
if parentID != 0 {
ptrParentID = &parentID
}
+ traceIDHigh := int64(span.context.traceID.High)
+ var ptrTraceIDHigh *int64
+ if traceIDHigh != 0 {
+ ptrTraceIDHigh = &traceIDHigh
+ }
timestamp := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime)
duration := span.duration.Nanoseconds() / int64(time.Microsecond)
endpoint := &z.Endpoint{
ServiceName: span.tracer.serviceName,
Ipv4: int32(span.tracer.hostIPv4)}
thriftSpan := &z.Span{
- TraceID: int64(span.context.traceID.Low), // TODO upgrade zipkin thrift and use TraceIdHigh
+ TraceID: int64(span.context.traceID.Low),
+ TraceIDHigh: ptrTraceIDHigh,
ID: int64(span.context.spanID),
ParentID: ptrParentID,
Name: span.operationName,
diff --git a/vendor/github.com/uber/jaeger-lib/README.md b/vendor/github.com/uber/jaeger-lib/README.md
deleted file mode 100644
index 144ed1fb8..000000000
--- a/vendor/github.com/uber/jaeger-lib/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
-
-
-# jaeger-lib
-
-A collection of shared infrastructure libraries used by different
-components of [Jaeger](https://github.com/uber/jaeger) backend and [jaeger-client-go](https://github.com/uber/jaeger-client-go).
-This library is *not intended to be used standalone*, and provides *no guarantees of backwards compatibility*.
-
-The library's import path is `github.com/uber/jaeger-lib`.
-
-## How to Contribute
-
-Please see [CONTRIBUTING.md](CONTRIBUTING.md).
-
-## License
-
-[Apache 2.0 License](./LICENSE).
-
-
-[doc-img]: https://godoc.org/github.com/uber/jaeger-lib?status.svg
-[doc]: https://godoc.org/github.com/uber/jaeger-lib
-[ci-img]: https://travis-ci.org/jaegertracing/jaeger-lib.svg?branch=master
-[ci]: https://travis-ci.org/jaegertracing/jaeger-lib
-[cov-img]: https://coveralls.io/repos/jaegertracing/jaeger-lib/badge.svg?branch=master&service=github
-[cov]: https://coveralls.io/github/jaegertracing/jaeger-lib?branch=master
-
diff --git a/vendor/github.com/ulikunitz/xz/.gitignore b/vendor/github.com/ulikunitz/xz/.gitignore
new file mode 100644
index 000000000..e3c2fc2f1
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/.gitignore
@@ -0,0 +1,25 @@
+# .gitignore
+
+TODO.html
+README.html
+
+lzma/writer.txt
+lzma/reader.txt
+
+cmd/gxz/gxz
+cmd/xb/xb
+
+# test executables
+*.test
+
+# profile files
+*.out
+
+# vim swap file
+.*.swp
+
+# executables on windows
+*.exe
+
+# default compression test file
+enwik8*
diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md
new file mode 100644
index 000000000..1be3bb845
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/TODO.md
@@ -0,0 +1,323 @@
+# TODO list
+
+## Release v0.6
+
+1. Review encoder and check for lzma improvements under xz.
+2. Fix binary tree matcher.
+3. Compare compression ratio with xz tool using comparable parameters
+ and optimize parameters
+4. Do some optimizations
+ - rename operation action and make it a simple type of size 8
+ - make maxMatches, wordSize parameters
+ - stop searching after a certain length is found (parameter sweetLen)
+
+## Release v0.7
+
+1. Optimize code
+2. Do statistical analysis to get linear presets.
+3. Test sync.Pool compatability for xz and lzma Writer and Reader
+3. Fuzz optimized code.
+
+## Release v0.8
+
+1. Support parallel go routines for writing and reading xz files.
+2. Support a ReaderAt interface for xz files with small block sizes.
+3. Improve compatibility between gxz and xz
+4. Provide manual page for gxz
+
+## Release v0.9
+
+1. Improve documentation
+2. Fuzz again
+
+## Release v1.0
+
+1. Full functioning gxz
+2. Add godoc URL to README.md (godoc.org)
+3. Resolve all issues.
+4. Define release candidates.
+5. Public announcement.
+
+## Package lzma
+
+### Release v0.6
+
+- Rewrite Encoder into a simple greedy one-op-at-a-time encoder
+ including
+ + simple scan at the dictionary head for the same byte
+ + use the killer byte (requiring matches to get longer, the first
+ test should be the byte that would make the match longer)
+
+
+## Optimizations
+
+- There may be a lot of false sharing in lzma.State; check whether this
+ can be improved by reorganizing the internal structure of it.
+- Check whether batching encoding and decoding improves speed.
+
+### DAG optimizations
+
+- Use full buffer to create minimal bit-length above range encoder.
+- Might be too slow (see v0.4)
+
+### Different match finders
+
+- hashes with 2, 3 characters additional to 4 characters
+- binary trees with 2-7 characters (uint64 as key, use uint32 as
+ pointers into a an array)
+- rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers
+ into an array with bit-steeling for the colors)
+
+## Release Procedure
+
+- execute goch -l for all packages; probably with lower param like 0.5.
+- check orthography with gospell
+- Write release notes in doc/relnotes.
+- Update README.md
+- xb copyright . in xz directory to ensure all new files have Copyright
+ header
+- VERSION=<version> go generate github.com/ulikunitz/xz/... to update
+ version files
+- Execute test for Linux/amd64, Linux/x86 and Windows/amd64.
+- Update TODO.md - write short log entry
+- git checkout master && git merge dev
+- git tag -a <version>
+- git push
+
+## Log
+
+### 2019-02-20
+
+Release v0.5.6 supports the go.mod file.
+
+### 2018-10-28
+
+Release v0.5.5 fixes issues #19 observing ErrLimit outputs.
+
+### 2017-06-05
+
+Release v0.5.4 fixes issues #15 of another problem with the padding size
+check for the xz block header. I removed the check completely.
+
+### 2017-02-15
+
+Release v0.5.3 fixes issue #12 regarding the decompression of an empty
+XZ stream. Many thanks to Tomasz Kłak, who reported the issue.
+
+### 2016-12-02
+
+Release v0.5.2 became necessary to allow the decoding of xz files with
+4-byte padding in the block header. Many thanks to Greg, who reported
+the issue.
+
+### 2016-07-23
+
+Release v0.5.1 became necessary to fix problems with 32-bit platforms.
+Many thanks to Bruno Brigas, who reported the issue.
+
+### 2016-07-04
+
+Release v0.5 provides improvements to the compressor and provides support for
+the decompression of xz files with multiple xz streams.
+
+### 2016-01-31
+
+Another compression rate increase by checking the byte at length of the
+best match first, before checking the whole prefix. This makes the
+compressor even faster. We have now a large time budget to beat the
+compression ratio of the xz tool. For enwik8 we have now over 40 seconds
+to reduce the compressed file size for another 7 MiB.
+
+### 2016-01-30
+
+I simplified the encoder. Speed and compression rate increased
+dramatically. A high compression rate affects also the decompression
+speed. The approach with the buffer and optimizing for operation
+compression rate has not been successful. Going for the maximum length
+appears to be the best approach.
+
+### 2016-01-28
+
+The release v0.4 is ready. It provides a working xz implementation,
+which is rather slow, but works and is interoperable with the xz tool.
+It is an important milestone.
+
+### 2016-01-10
+
+I have the first working implementation of an xz reader and writer. I'm
+happy about reaching this milestone.
+
+### 2015-12-02
+
+I'm now ready to implement xz because, I have a working LZMA2
+implementation. I decided today that v0.4 will use the slow encoder
+using the operations buffer to be able to go back, if I intend to do so.
+
+### 2015-10-21
+
+I have restarted the work on the library. While trying to implement
+LZMA2, I discovered that I need to resimplify the encoder and decoder
+functions. The option approach is too complicated. Using a limited byte
+writer and not caring for written bytes at all and not to try to handle
+uncompressed data simplifies the LZMA encoder and decoder much.
+Processing uncompressed data and handling limits is a feature of the
+LZMA2 format not of LZMA.
+
+I learned an interesting method from the LZO format. If the last copy is
+too far away they are moving the head one 2 bytes and not 1 byte to
+reduce processing times.
+
+### 2015-08-26
+
+I have now reimplemented the lzma package. The code is reasonably fast,
+but can still be optimized. The next step is to implement LZMA2 and then
+xz.
+
+### 2015-07-05
+
+Created release v0.3. The version is the foundation for a full xz
+implementation that is the target of v0.4.
+
+### 2015-06-11
+
+The gflag package has been developed because I couldn't use flag and
+pflag for a fully compatible support of gzip's and lzma's options. It
+seems to work now quite nicely.
+
+### 2015-06-05
+
+The overflow issue was interesting to research, however Henry S. Warren
+Jr. Hacker's Delight book was very helpful as usual and had the issue
+explained perfectly. Fefe's information on his website was based on the
+C FAQ and quite bad, because it didn't address the issue of -MININT ==
+MININT.
+
+### 2015-06-04
+
+It has been a productive day. I improved the interface of lzma.Reader
+and lzma.Writer and fixed the error handling.
+
+### 2015-06-01
+
+By computing the bit length of the LZMA operations I was able to
+improve the greedy algorithm implementation. By using an 8 MByte buffer
+the compression rate was not as good as for xz but already better then
+gzip default.
+
+Compression is currently slow, but this is something we will be able to
+improve over time.
+
+### 2015-05-26
+
+Checked the license of ogier/pflag. The binary lzmago binary should
+include the license terms for the pflag library.
+
+I added the endorsement clause as used by Google for the Go sources the
+LICENSE file.
+
+### 2015-05-22
+
+The package lzb contains now the basic implementation for creating or
+reading LZMA byte streams. It allows the support for the implementation
+of the DAG-shortest-path algorithm for the compression function.
+
+### 2015-04-23
+
+Completed yesterday the lzbase classes. I'm a little bit concerned that
+using the components may require too much code, but on the other hand
+there is a lot of flexibility.
+
+### 2015-04-22
+
+Implemented Reader and Writer during the Bayern game against Porto. The
+second half gave me enough time.
+
+### 2015-04-21
+
+While showering today morning I discovered that the design for OpEncoder
+and OpDecoder doesn't work, because encoding/decoding might depend on
+the current status of the dictionary. This is not exactly the right way
+to start the day.
+
+Therefore we need to keep the Reader and Writer design. This time around
+we simplify it by ignoring size limits. These can be added by wrappers
+around the Reader and Writer interfaces. The Parameters type isn't
+needed anymore.
+
+However I will implement a ReaderState and WriterState type to use
+static typing to ensure the right State object is combined with the
+right lzbase.Reader and lzbase.Writer.
+
+As a start I have implemented ReaderState and WriterState to ensure
+that the state for reading is only used by readers and WriterState only
+used by Writers.
+
+### 2015-04-20
+
+Today I implemented the OpDecoder and tested OpEncoder and OpDecoder.
+
+### 2015-04-08
+
+Came up with a new simplified design for lzbase. I implemented already
+the type State that replaces OpCodec.
+
+### 2015-04-06
+
+The new lzma package is now fully usable and lzmago is using it now. The
+old lzma package has been completely removed.
+
+### 2015-04-05
+
+Implemented lzma.Reader and tested it.
+
+### 2015-04-04
+
+Implemented baseReader by adapting code form lzma.Reader.
+
+### 2015-04-03
+
+The opCodec has been copied yesterday to lzma2. opCodec has a high
+number of dependencies on other files in lzma2. Therefore I had to copy
+almost all files from lzma.
+
+### 2015-03-31
+
+Removed only a TODO item.
+
+However in Francesco Campoy's presentation "Go for Javaneros
+(Javaïstes?)" is the the idea that using an embedded field E, all the
+methods of E will be defined on T. If E is an interface T satisfies E.
+
+https://talks.golang.org/2014/go4java.slide#51
+
+I have never used this, but it seems to be a cool idea.
+
+### 2015-03-30
+
+Finished the type writerDict and wrote a simple test.
+
+### 2015-03-25
+
+I started to implement the writerDict.
+
+### 2015-03-24
+
+After thinking long about the LZMA2 code and several false starts, I
+have now a plan to create a self-sufficient lzma2 package that supports
+the classic LZMA format as well as LZMA2. The core idea is to support a
+baseReader and baseWriter type that support the basic LZMA stream
+without any headers. Both types must support the reuse of dictionaries
+and the opCodec.
+
+### 2015-01-10
+
+1. Implemented simple lzmago tool
+2. Tested tool against large 4.4G file
+ - compression worked correctly; tested decompression with lzma
+ - decompression hits a full buffer condition
+3. Fixed a bug in the compressor and wrote a test for it
+4. Executed full cycle for 4.4 GB file; performance can be improved ;-)
+
+### 2015-01-11
+
+- Release v0.2 because of the working LZMA encoder and decoder
diff --git a/vendor/github.com/ulikunitz/xz/example.go b/vendor/github.com/ulikunitz/xz/example.go
new file mode 100644
index 000000000..855e60aee
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/example.go
@@ -0,0 +1,40 @@
+// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import (
+ "bytes"
+ "io"
+ "log"
+ "os"
+
+ "github.com/ulikunitz/xz"
+)
+
+func main() {
+ const text = "The quick brown fox jumps over the lazy dog.\n"
+ var buf bytes.Buffer
+ // compress text
+ w, err := xz.NewWriter(&buf)
+ if err != nil {
+ log.Fatalf("xz.NewWriter error %s", err)
+ }
+ if _, err := io.WriteString(w, text); err != nil {
+ log.Fatalf("WriteString error %s", err)
+ }
+ if err := w.Close(); err != nil {
+ log.Fatalf("w.Close error %s", err)
+ }
+ // decompress buffer and write output to stdout
+ r, err := xz.NewReader(&buf)
+ if err != nil {
+ log.Fatalf("NewReader error %s", err)
+ }
+ if _, err = io.Copy(os.Stdout, r); err != nil {
+ log.Fatalf("io.Copy error %s", err)
+ }
+}
diff --git a/vendor/github.com/ulikunitz/xz/fox.xz b/vendor/github.com/ulikunitz/xz/fox.xz
new file mode 100644
index 000000000..4b820bd5a
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/fox.xz
Binary files differ
diff --git a/vendor/github.com/ulikunitz/xz/go.mod b/vendor/github.com/ulikunitz/xz/go.mod
new file mode 100644
index 000000000..9e5eea2c9
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/go.mod
@@ -0,0 +1 @@
+module github.com/ulikunitz/xz
diff --git a/vendor/github.com/ulikunitz/xz/lzma/fox.lzma b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma
new file mode 100644
index 000000000..5edad6332
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma
Binary files differ
diff --git a/vendor/github.com/ulikunitz/xz/make-docs b/vendor/github.com/ulikunitz/xz/make-docs
new file mode 100644
index 000000000..a8c612ce1
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/make-docs
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+set -x
+pandoc -t html5 -f markdown -s --css=doc/md.css -o README.html README.md
+pandoc -t html5 -f markdown -s --css=doc/md.css -o TODO.html TODO.md
diff --git a/vendor/github.com/varlink/go/.gitignore b/vendor/github.com/varlink/go/.gitignore
deleted file mode 100644
index a43b0e2d3..000000000
--- a/vendor/github.com/varlink/go/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/cmd/varlink-go-certification/orgvarlinkcertification/orgvarlinkcertification.go
-/.idea
diff --git a/vendor/github.com/varlink/go/.travis.yml b/vendor/github.com/varlink/go/.travis.yml
deleted file mode 100644
index fa9963500..000000000
--- a/vendor/github.com/varlink/go/.travis.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-language: go
-sudo: false
-go:
-- '1.10'
-- 1.11.x
-install:
-- go get golang.org/x/tools/cmd/cover
-- go get github.com/mattn/goveralls
-- go get github.com/TylerBrock/colorjson
-- go get github.com/fatih/color
-script:
-- go generate ./...
-- '"$HOME/gopath/bin/goveralls" -v -show -service=travis-ci -repotoken "$COVERALLS_TOKEN"'
-env:
- global:
- - secure: bjxOSgBfB+YooxNTkIDHAD+/X6g56qBWoYpB1JinuS5kmt3vSjfRSuXui71sGuha7jO2FOJja8HcpjOv3UP+qmmej9276o5VWrjS1AwnI95hSQQ4JHm293Z1QeojjRaxmoKrgn7i82Hn4qNdVLQA142s+SIdqOxtN6LDs7i0Yb4IuXoiMQHbd6kAAL95o9IUFPpYAdsXoQ6xnx+TXNiSwPPeh4m5CNKuTtmGTuMGaj8tXxttFKJhZcRzvOpDuh7luc9PSVnQgYmKE/3S9ehzGV8Lk4T8eC7587DY1GdYQKt1egJSE72L+PVnmoalWROaAGHZvYWsSAeNi1UIvcFwGbXBRpq7kz3DVfIULM8V67UAaF3dGYDN3Ae825mDjN5JDfml17AoEjMjI0LlBImZLX2EWIEN225JIREHdpG9seJkaN1ClcpvEIeYuThF2MiivP1EE8/w8S80yoO5nW76Py/th16OuaEiP9LdLsbXimObUPsS9Sr8qquf/PiVqRMMpVW88oOEG5HVn4Ra5B/xVC6nPEF88tE6p9+7RSz4rOWih8QmW+6SX6eo0BI9di4L779f/WfUrddN0JLIvEnRFZZ+pVF/oo+N2INNeIMsZBvG3FVo+Zxzo6SExXnSSpuf1bp140ZdinUMACq6BqK+9gj1C9vNRmqQJaEefrqutws=
-notifications:
- webhooks:
- urls:
- - https://webhooks.gitter.im/e/8cfd8d5fa3c31aa3d0c3
- on_success: change # options: [always|never|change] default: always
- on_failure: always # options: [always|never|change] default: always
- on_start: never # options: [always|never|change] default: always
-
-
diff --git a/vendor/github.com/varlink/go/Makefile b/vendor/github.com/varlink/go/Makefile
deleted file mode 100644
index 7538d0042..000000000
--- a/vendor/github.com/varlink/go/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-all: cmd/varlink-go-certification/orgvarlinkcertification/orgvarlinkcertification.go
- go test ./...
-
-cmd/varlink-go-certification/orgvarlinkcertification/orgvarlinkcertification.go: cmd/varlink-go-certification/orgvarlinkcertification/org.varlink.certification.varlink
- go generate cmd/varlink-go-certification/orgvarlinkcertification/generate.go
-
-.PHONY: all
diff --git a/vendor/github.com/varlink/go/README.md b/vendor/github.com/varlink/go/README.md
deleted file mode 100644
index 926a82bdd..000000000
--- a/vendor/github.com/varlink/go/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-[![Build Status](https://travis-ci.org/varlink/go.svg?branch=master)](https://travis-ci.org/varlink/go)
-[![Go Report Card](https://goreportcard.com/badge/github.com/varlink/go)](https://goreportcard.com/report/github.com/varlink/go)
-[![Go Doc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](http://godoc.org/github.com/varlink/go/varlink)
-[![Coverage Status](https://coveralls.io/repos/github/varlink/go/badge.svg?branch=master)](https://coveralls.io/github/varlink/go?branch=master)
-[![Release](https://img.shields.io/github/release/golang-standards/project-layout.svg?style=flat-square)](https://github.com/varlink/go/varlink/releases/latest)
-
-# go/varlink
diff --git a/vendor/github.com/varlink/go/cmd/varlink-go-certification/main.go b/vendor/github.com/varlink/go/cmd/varlink-go-certification/main.go
deleted file mode 100644
index dfffb5d0d..000000000
--- a/vendor/github.com/varlink/go/cmd/varlink-go-certification/main.go
+++ /dev/null
@@ -1,619 +0,0 @@
-package main
-
-import (
- "crypto/rand"
- "encoding/json"
- "flag"
- "fmt"
- "github.com/varlink/go/cmd/varlink-go-certification/orgvarlinkcertification"
- "github.com/varlink/go/varlink"
- "io"
- "math"
- "os"
- "strconv"
- "sync"
- "time"
-)
-
-func run_client(address string) {
- c, err := varlink.NewConnection(address)
- if err != nil {
- fmt.Println("Failed to connect")
- return
- }
- defer c.Close()
-
- client_id, err := orgvarlinkcertification.Start().Call(c)
- if err != nil {
- fmt.Println("Start() failed")
- return
- }
- fmt.Printf("Start: '%v'\n", client_id)
-
- b1, err := orgvarlinkcertification.Test01().Call(c, client_id)
- if err != nil {
- fmt.Println("Test01() failed")
- return
- }
- fmt.Printf("Test01: '%v'\n", b1)
-
- i2, err := orgvarlinkcertification.Test02().Call(c, client_id, b1)
- if err != nil {
- fmt.Println("Test02() failed")
- return
- }
- fmt.Printf("Test02: '%v'\n", i2)
-
- f3, err := orgvarlinkcertification.Test03().Call(c, client_id, i2)
- if err != nil {
- fmt.Println("Test03() failed")
- return
- }
- fmt.Printf("Test03: '%v'\n", f3)
-
- s4, err := orgvarlinkcertification.Test04().Call(c, client_id, f3)
- if err != nil {
- fmt.Println("Test04() failed")
- return
- }
- fmt.Printf("Test04: '%v'\n", s4)
-
- b5, i5, f5, s5, err := orgvarlinkcertification.Test05().Call(c, client_id, s4)
- if err != nil {
- fmt.Println("Test05() failed")
- return
- }
- fmt.Printf("Test05: '%v'\n", b5)
-
- o6, err := orgvarlinkcertification.Test06().Call(c, client_id, b5, i5, f5, s5)
- if err != nil {
- fmt.Println("Test06() failed")
- return
- }
- fmt.Printf("Test06: '%v'\n", o6)
-
- m7, err := orgvarlinkcertification.Test07().Call(c, client_id, o6)
- if err != nil {
- fmt.Println("Test07() failed")
- return
- }
- fmt.Printf("Test07: '%v'\n", m7)
-
- m8, err := orgvarlinkcertification.Test08().Call(c, client_id, m7)
- if err != nil {
- fmt.Println("Test08() failed")
- return
- }
- fmt.Printf("Test08: '%v'\n", m8)
-
- t9, err := orgvarlinkcertification.Test09().Call(c, client_id, m8)
- if err != nil {
- fmt.Println("Test09() failed")
- return
- }
- fmt.Printf("Test09: '%v'\n", t9)
-
- receive10, err := orgvarlinkcertification.Test10().Send(c, varlink.More, client_id, t9)
- if err != nil {
- fmt.Println("Test10() failed")
- return
- }
-
- fmt.Println("Test10() Send:")
- var a10 []string
- for {
- s10, flags10, err := receive10()
- if err != nil {
- fmt.Println("Test10() receive failed")
- return
- }
- a10 = append(a10, s10)
- fmt.Printf(" Receive: '%v'\n", s10)
-
- if flags10&varlink.Continues == 0 {
- break
- }
- }
- fmt.Printf("Test10: '%v'\n", a10)
-
- _, err = orgvarlinkcertification.Test11().Send(c, varlink.Oneway, client_id, a10)
- if err != nil {
- fmt.Println("Test11() failed")
- return
- }
- fmt.Println("Test11: ''")
-
- end, err := orgvarlinkcertification.End().Call(c, client_id)
- if err != nil {
- fmt.Println("End() failed")
- return
- }
- fmt.Printf("End: '%v'\n", end)
-}
-
-// Service
-type client struct {
- id string
- time time.Time
-}
-
-type test struct {
- orgvarlinkcertification.VarlinkInterface
- mutex sync.Mutex
- clients map[string]*client
-}
-
-func (t *test) Client(id string) *client {
- t.mutex.Lock()
- defer t.mutex.Unlock()
-
- return t.clients[id]
-}
-
-func (t *test) NewClient() *client {
- id128 := make([]byte, 16)
- io.ReadFull(rand.Reader, id128)
- id128[8] = id128[8]&^0xc0 | 0x80
- id128[6] = id128[6]&^0xf0 | 0x40
- uuid := fmt.Sprintf("%x-%x-%x-%x-%x", id128[0:4], id128[4:6], id128[6:8], id128[8:10], id128[10:])
-
- t.mutex.Lock()
- defer t.mutex.Unlock()
-
- // Garbage-collect old clients
- for key, client := range t.clients {
- if time.Since(client.time).Minutes() > 1 {
- delete(t.clients, key)
- }
- }
-
- if len(t.clients) > 100 {
- return nil
- }
-
- c := client{
- id: uuid,
- time: time.Now(),
- }
- t.clients[uuid] = &c
-
- return &c
-}
-
-func (t *test) RemoveClient(id string) {
- t.mutex.Lock()
- defer t.mutex.Unlock()
-
- delete(t.clients, id)
-}
-
-func (t *test) Start(c orgvarlinkcertification.VarlinkCall) error {
- return c.ReplyStart(t.NewClient().id)
-}
-
-func (t *test) Test01(c orgvarlinkcertification.VarlinkCall, client_id_ string) error {
- if t.Client(client_id_) == nil {
- return c.ReplyClientIdError()
- }
-
- return c.ReplyTest01(true)
-}
-
-func (t *test) Test02(c orgvarlinkcertification.VarlinkCall, client_id_ string, bool_ bool) error {
- if t.Client(client_id_) == nil {
- return c.ReplyClientIdError()
- }
-
- if !bool_ {
- return c.ReplyCertificationError(nil, nil)
- }
-
- return c.ReplyTest02(1)
-}
-
-func (t *test) Test03(c orgvarlinkcertification.VarlinkCall, client_id_ string, int_ int64) error {
- if t.Client(client_id_) == nil {
- return c.ReplyClientIdError()
- }
-
- if int_ != 1 {
- return c.ReplyCertificationError(nil, nil)
- }
-
- return c.ReplyTest03(1.0)
-}
-
-func (t *test) Test04(c orgvarlinkcertification.VarlinkCall, client_id_ string, float_ float64) error {
- if t.Client(client_id_) == nil {
- return c.ReplyClientIdError()
- }
-
- if float_ != 1.0 {
- return c.ReplyCertificationError(nil, nil)
- }
-
- return c.ReplyTest04("ping")
-}
-func (t *test) Test05(c orgvarlinkcertification.VarlinkCall, client_id_ string, string_ string) error {
- if t.Client(client_id_) == nil {
- return c.ReplyClientIdError()
- }
-
- if string_ != "ping" {
- return c.ReplyCertificationError(nil, nil)
- }
-
- return c.ReplyTest05(false, 2, math.Pi, "a lot of string")
-}
-
-func (t *test) Test06(c orgvarlinkcertification.VarlinkCall, client_id_ string, bool_ bool, int_ int64, float_ float64, string_ string) error {
- if t.Client(client_id_) == nil {
- return c.ReplyClientIdError()
- }
-
- if bool_ {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if int_ != 2 {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if float_ != math.Pi {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if string_ != "a lot of string" {
- return c.ReplyCertificationError(nil, nil)
- }
-
- s := struct {
- Bool bool
- Int int64
- Float float64
- String string
- }{
- Bool: false,
- Int: 2,
- Float: math.Pi,
- String: "a lot of string",
- }
- return c.ReplyTest06(s)
-}
-
-func (t *test) Test07(c orgvarlinkcertification.VarlinkCall, client_id_ string, struct_ struct {
- Bool bool
- Int int64
- Float float64
- String string
-}) error {
- if t.Client(client_id_) == nil {
- return c.ReplyClientIdError()
- }
-
- if struct_.Bool {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if struct_.Int != 2 {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if struct_.Float != math.Pi {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if struct_.String != "a lot of string" {
- return c.ReplyCertificationError(nil, nil)
- }
-
- m := map[string]string{
- "bar": "Bar",
- "foo": "Foo",
- }
- return c.ReplyTest07(m)
-}
-
-func (t *test) Test08(c orgvarlinkcertification.VarlinkCall, client_id_ string, map_ map[string]string) error {
- if t.Client(client_id_) == nil {
- return c.ReplyClientIdError()
- }
-
- if len(map_) != 2 {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if map_["bar"] != "Bar" {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if map_["foo"] != "Foo" {
- return c.ReplyCertificationError(nil, nil)
- }
-
- m := map[string]struct{}{
- "one": {},
- "two": {},
- "three": {},
- }
- return c.ReplyTest08(m)
-}
-
-func (t *test) Test09(c orgvarlinkcertification.VarlinkCall, client_id_ string, set_ map[string]struct{}) error {
- if t.Client(client_id_) == nil {
- return c.ReplyClientIdError()
- }
-
- if len(set_) != 3 {
- return c.ReplyCertificationError(nil, nil)
- }
-
- _, ok := set_["one"]
- if !ok {
- return c.ReplyCertificationError(nil, nil)
- }
-
- _, ok = set_["two"]
- if !ok {
- return c.ReplyCertificationError(nil, nil)
- }
-
- _, ok = set_["three"]
- if !ok {
- return c.ReplyCertificationError(nil, nil)
- }
-
- m := orgvarlinkcertification.MyType{
- Object: json.RawMessage(`{"method": "org.varlink.certification.Test09", "parameters": {"map": {"foo": "Foo", "bar": "Bar"}}}`),
- Enum: "two",
- Struct: struct {
- First int64 `json:"first"`
- Second string `json:"second"`
- }{First: 1, Second: "2"},
- Array: []string{"one", "two", "three"},
- Dictionary: map[string]string{"foo": "Foo", "bar": "Bar"},
- Stringset: map[string]struct{}{"one": {}, "two": {}, "three": {}},
- Nullable: nil,
- Nullable_array_struct: nil,
- Interface: orgvarlinkcertification.Interface{
- Foo: &[]*map[string]string{
- nil,
- &map[string]string{"Foo": "foo", "Bar": "bar"},
- nil,
- &map[string]string{"one": "foo", "two": "bar"},
- },
- Anon: struct {
- Foo bool `json:"foo"`
- Bar bool `json:"bar"`
- }{Foo: true, Bar: false},
- },
- }
- return c.ReplyTest09(m)
-}
-
-func (t *test) Test10(c orgvarlinkcertification.VarlinkCall, client_id_ string, mytype_ orgvarlinkcertification.MyType) error {
- if t.Client(client_id_) == nil {
- return c.ReplyClientIdError()
- }
-
- var o struct {
- Method string `json:"method"`
- Parameters struct {
- Map map[string]string `json:"map"`
- } `json:"parameters"`
- }
- err := json.Unmarshal(mytype_.Object, &o)
- if err != nil {
- return err
- }
-
- if o.Method != "org.varlink.certification.Test09" {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if len(o.Parameters.Map) != 2 {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if o.Parameters.Map["bar"] != "Bar" {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if o.Parameters.Map["foo"] != "Foo" {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if mytype_.Enum != "two" {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if mytype_.Struct.First != 1 {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if mytype_.Struct.Second != "2" {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if len(mytype_.Array) != 3 {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if mytype_.Array[0] != "one" && mytype_.Array[1] != "two" && mytype_.Array[2] != "three" {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if len(mytype_.Dictionary) != 2 {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if mytype_.Dictionary["bar"] != "Bar" {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if mytype_.Dictionary["foo"] != "Foo" {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if len(mytype_.Stringset) != 3 {
- return c.ReplyCertificationError(nil, nil)
- }
-
- _, ok := mytype_.Stringset["one"]
- if !ok {
- return c.ReplyCertificationError(nil, nil)
- }
-
- _, ok = mytype_.Stringset["two"]
- if !ok {
- return c.ReplyCertificationError(nil, nil)
- }
-
- _, ok = mytype_.Stringset["three"]
- if !ok {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if mytype_.Nullable != nil {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if mytype_.Nullable_array_struct != nil {
- return c.ReplyCertificationError(nil, nil)
- }
-
- i := *mytype_.Interface.Foo
- if len(i) != 4 {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if i[0] != nil {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if len(*i[1]) != 2 {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if (*i[1])["Foo"] != "foo" {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if (*i[1])["Bar"] != "bar" {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if i[2] != nil {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if len(*i[3]) != 2 {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if (*i[3])["one"] != "foo" {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if (*i[3])["two"] != "bar" {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if !mytype_.Interface.Anon.Foo {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if mytype_.Interface.Anon.Bar {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if !c.WantsMore() {
- return c.ReplyCertificationError(nil, nil)
- }
-
- for i := 1; i <= 10; i++ {
- c.Continues = i < 10
- err := c.ReplyTest10("Reply number " + strconv.Itoa(i))
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (t *test) Test11(c orgvarlinkcertification.VarlinkCall, client_id_ string, last_more_replies_ []string) error {
- if t.Client(client_id_) == nil {
- return c.ReplyClientIdError()
- }
-
- if len(last_more_replies_) != 10 {
- return c.ReplyCertificationError(nil, nil)
- }
-
- if !c.IsOneway() {
- return c.ReplyCertificationError(nil, nil)
- }
-
- for i := 1; i <= 10; i++ {
- if last_more_replies_[i] != "Reply number "+strconv.Itoa(i) {
- return c.ReplyCertificationError(nil, nil)
- }
- }
-
- return c.ReplyTest11()
-}
-
-func (t *test) End(c orgvarlinkcertification.VarlinkCall, client_id_ string) error {
- if t.Client(client_id_) == nil {
- return c.ReplyClientIdError()
- }
-
- t.RemoveClient(client_id_)
- return c.ReplyEnd(true)
-}
-
-func run_server(address string) {
- t := test{
- clients: make(map[string]*client),
- }
-
- s, err := varlink.NewService(
- "Varlink",
- "Certification",
- "1",
- "https://github.com/varlink/go",
- )
- if err != nil {
- fmt.Println(err)
- os.Exit(1)
- }
-
- s.RegisterInterface(orgvarlinkcertification.VarlinkNew(&t))
- err = s.Listen(address, 0)
- if err != nil {
- fmt.Println(err)
- os.Exit(1)
- }
-}
-
-func main() {
- var address string
- var client bool
-
- flag.StringVar(&address, "varlink", "", "Varlink address")
- flag.BoolVar(&client, "client", false, "Run as client")
- flag.Parse()
-
- if address == "" {
- flag.Usage()
- os.Exit(1)
- }
-
- if client {
- run_client(address)
- return
- }
-
- run_server(address)
-}
diff --git a/vendor/github.com/varlink/go/cmd/varlink-go-certification/orgvarlinkcertification/generate.go b/vendor/github.com/varlink/go/cmd/varlink-go-certification/orgvarlinkcertification/generate.go
deleted file mode 100644
index e1ac7f090..000000000
--- a/vendor/github.com/varlink/go/cmd/varlink-go-certification/orgvarlinkcertification/generate.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package orgvarlinkcertification
-
-//go:generate go run ../../varlink-go-interface-generator/main.go org.varlink.certification.varlink
diff --git a/vendor/github.com/varlink/go/cmd/varlink-go-certification/orgvarlinkcertification/org.varlink.certification.varlink b/vendor/github.com/varlink/go/cmd/varlink-go-certification/orgvarlinkcertification/org.varlink.certification.varlink
deleted file mode 100644
index 41b9967b5..000000000
--- a/vendor/github.com/varlink/go/cmd/varlink-go-certification/orgvarlinkcertification/org.varlink.certification.varlink
+++ /dev/null
@@ -1,89 +0,0 @@
-# Interface to test varlink implementations against.
-# First you write a varlink client calling:
-# Start, Test01, Test02, …, Test09, End
-# The return value of the previous call should be the argument of the next call.
-# Then you test this client against well known servers like python or rust from
-# https://github.com/varlink/
-#
-# Next you write a varlink server providing the same service as the well known ones.
-# Now run your client against it and run well known clients like python or rust
-# from https://github.com/varlink/ against your server. If all works out, then
-# your new language bindings should be varlink certified.
-interface org.varlink.certification
-
-type Interface (
- foo: ?[]?[string](foo, bar, baz),
- anon: (foo: bool, bar: bool)
-)
-
-type MyType (
- object: object,
- enum: (one, two, three),
- struct: (first: int, second: string),
- array: []string,
- dictionary: [string]string,
- stringset: [string](),
- nullable: ?string,
- nullable_array_struct: ?[](first: int, second: string),
- interface: Interface
-)
-
-method Start() -> (client_id: string)
-
-method Test01(client_id: string) -> (bool: bool)
-
-method Test02(client_id: string, bool: bool) -> (int: int)
-
-method Test03(client_id: string, int: int) -> (float: float)
-
-method Test04(client_id: string, float: float) -> (string: string)
-
-method Test05(client_id: string, string: string) -> (
- bool: bool,
- int: int,
- float: float,
- string: string
-)
-
-method Test06(
- client_id: string,
- bool: bool,
- int: int,
- float: float,
- string: string
-) -> (
- struct: (
- bool: bool,
- int: int,
- float: float,
- string: string
- )
-)
-
-method Test07(
- client_id: string,
- struct: (
- bool: bool,
- int: int,
- float: float,
- string: string
- )
-) -> (map: [string]string)
-
-method Test08(client_id: string, map: [string]string) -> (set: [string]())
-
-method Test09(client_id: string, set: [string]()) -> (mytype: MyType)
-
-# returns more than one reply with "continues"
-method Test10(client_id: string, mytype: MyType) -> (string: string)
-
-method Test11(
- client_id: string,
- last_more_replies: []string
-) -> ()
-
-method End(client_id: string) -> (all_ok: bool)
-
-error ClientIdError ()
-
-error CertificationError (wants: object, got: object)
diff --git a/vendor/github.com/varlink/go/cmd/varlink-go-interface-generator/generator_test.go b/vendor/github.com/varlink/go/cmd/varlink-go-interface-generator/generator_test.go
deleted file mode 100644
index 8e749f411..000000000
--- a/vendor/github.com/varlink/go/cmd/varlink-go-interface-generator/generator_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package main
-
-import (
- "strings"
- "testing"
-)
-
-func expect(t *testing.T, expected string, returned string) {
- if strings.Compare(returned, expected) != 0 {
- t.Fatalf("Expected(%d): `%s`\nGot(%d): `%s`\n",
- len(expected), expected,
- len(returned), returned)
- }
-}
-
-func TestIDLParser(t *testing.T) {
- pkgname, b, err := generateTemplate(`
-# Interface to jump a spacecraft to another point in space. The
-# FTL Drive is the propulsion system to achieve faster-than-light
-# travel through space. A ship making a properly calculated
-# jump can arrive safely in planetary orbit, or alongside other
-# ships or spaceborne objects.
-interface org.example.ftl
-
-# The current state of the FTL drive and the amount of fuel
-# available to jump.
-type DriveCondition (
- state: (idle, spooling, busy),
- booster: bool,
- active_engines: [](id: int, state: bool),
- tylium_level: int
-)
-
-# Speed, trajectory and jump duration is calculated prior to
-# activating the FTL drive.
-type DriveConfiguration (
- speed: int,
- trajectory: int,
- duration: int
-)
-
-# The galactic coordinates use the Sun as the origin. Galactic
-# longitude is measured with primary direction from the Sun to
-# the center of the galaxy in the galactic plane, while the
-# galactic latitude measures the angle of the object above the
-# galactic plane.
-type Coordinate (
- longitude: float,
- latitude: float,
- distance: int
-)
-
-# Monitor the drive. The method will reply with an update whenever
-# the drive's state changes
-method Monitor() -> (condition: DriveCondition)
-
-# Calculate the drive's jump parameters from the current
-# position to the target position in the galaxy
-method CalculateConfiguration(
- current: Coordinate,
- target: Coordinate
-) -> (configuration: DriveConfiguration)
-
-# Jump to the calculated point in space
-method Jump(configuration: DriveConfiguration) -> ()
-
-# There is not enough tylium to jump with the given parameters
-error NotEnoughEnergy ()
-
-# The supplied parameters are outside the supported range
-error ParameterOutOfRange (field: string)
-
-# some more coverage
-method Foo(interface: string) -> (ret: (go: string, switch: bool, more: (t:bool, f:bool)))
-
-# some more coverage
-method TestMap(map: [string]string) -> (map: [string](i: int, val: string))
-method TestSet(set: [string]()) -> (set: [string]())
-method TestObject(object: object) -> (object: object)
- `)
-
- if err != nil {
- t.Fatalf("Error parsing %v", err)
- }
- expect(t, "orgexampleftl", pkgname)
- if len(b) <= 0 {
- t.Fatal("No generated go source")
- }
- // FIXME: compare b.String() against expected output
-}
diff --git a/vendor/github.com/varlink/go/cmd/varlink-go-type-generator/main.go b/vendor/github.com/varlink/go/cmd/varlink-go-type-generator/main.go
deleted file mode 100644
index 46414b7bf..000000000
--- a/vendor/github.com/varlink/go/cmd/varlink-go-type-generator/main.go
+++ /dev/null
@@ -1,172 +0,0 @@
-package main
-
-import (
- "fmt"
- "go/ast"
- "go/importer"
- "go/parser"
- "go/token"
- "go/types"
- "log"
- "os"
-)
-
-func IsBasicGoType(t types.Type, flag types.BasicInfo) bool {
- switch u := t.(type) {
- case *types.Basic:
- if u.Info()&flag != 0 {
- return true
- }
- return false
- case *types.Named:
- return IsBasicGoType(u.Underlying(), flag)
- }
- return false
-}
-
-func GoToVarlinkType(t types.Type) string {
- if IsBasicGoType(t, types.IsBoolean) {
- return "bool"
- }
-
- if IsBasicGoType(t, types.IsInteger) {
- return "int"
- }
-
- if IsBasicGoType(t, types.IsFloat) {
- return "float"
- }
-
- if IsBasicGoType(t, types.IsString) {
- return "string"
- }
-
- switch u := t.(type) {
- case *types.Basic:
- return fmt.Sprintf("<<<%s>>>", t.String())
-
- case *types.Named:
- return u.Obj().Name()
-
- case *types.Map:
- if IsBasicGoType(u.Key(), types.IsString) {
- return fmt.Sprintf("[string]%s", GoToVarlinkType(u.Elem()))
- } else {
- return fmt.Sprintf("<<<%s>>>", u.String())
- }
-
- case *types.Interface:
- if u.Empty() {
- return "()"
- }
- return fmt.Sprintf("<<<%s>>>", u.String())
-
- case *types.Pointer:
- return fmt.Sprintf("?%s", GoToVarlinkType(u.Elem()))
-
- case *types.Array:
- return fmt.Sprintf("[]%s", GoToVarlinkType(u.Elem()))
-
- case *types.Slice:
- return fmt.Sprintf("[]%s", GoToVarlinkType(u.Elem()))
-
- case *types.Struct:
- if u.NumFields() > 0 {
- s := ""
- for i := 0; i < u.NumFields(); i++ {
- if i > 0 {
- s += ",\n"
- }
- s += fmt.Sprintf("\t%s: %s",
- u.Field(i).Name(), GoToVarlinkType(u.Field(i).Type()))
- }
-
- return fmt.Sprintf("(\n%s\n)", s)
- }
- return "()"
-
- default:
- return fmt.Sprintf("<<<%T %s>>>", t, u)
- }
-}
-
-func PrintDefsUses(name string, fset *token.FileSet, files []*ast.File) error {
- conf := types.Config{
- Importer: importer.Default(),
- FakeImportC: true,
- }
-
- info := &types.Info{
- Defs: make(map[*ast.Ident]types.Object),
- }
-
- _, err := conf.Check(name, fset, files, info)
- if err != nil {
- return err // type error
- }
-
- seen := map[string]interface{}{}
-
- for id, obj := range info.Defs {
- if obj == nil {
- continue
- }
-
- if _, ok := seen[id.Name]; ok {
- continue
- }
-
- /*
- if !obj.Exported() || obj.Pkg().Name() != name {
- continue
- }
- */
- switch f := obj.Type().Underlying().(type) {
- case *types.Struct:
- if f.NumFields() > 0 {
- fmt.Printf("type %s %s\n\n", id.Name, GoToVarlinkType(f))
- }
- }
- seen[id.Name] = nil
- }
-
- return nil
-}
-
-func main() {
-
- path := os.Args[1]
- fs := token.NewFileSet()
-
- if stat, err := os.Stat(path); err == nil && stat.IsDir() {
- pkgs, err := parser.ParseDir(fs, path, nil, 0)
- if err != nil {
- fmt.Printf("parsing dir '%s': %s", path, err)
- }
- for name, pkg := range pkgs {
- log.Println("Found package:", name)
-
- fset := make([]*ast.File, len(pkg.Files), len(pkg.Files))
- idx := 0
- for _, value := range pkg.Files {
- fset[idx] = value
- idx++
- }
-
- if err := PrintDefsUses(name, fs, fset); err != nil {
- log.Print(err) // type error
- }
- }
- } else {
-
- fset, err := parser.ParseFile(fs, path, nil, 0)
-
- if err != nil {
- fmt.Printf("parsing file '%s': %s", path, err)
- }
- name := fset.Name.String()
- if err := PrintDefsUses(name, fs, []*ast.File{fset}); err != nil {
- log.Print(err) // type error
- }
- }
-}
diff --git a/vendor/github.com/varlink/go/cmd/varlink/main.go b/vendor/github.com/varlink/go/cmd/varlink/main.go
deleted file mode 100644
index 6781dd956..000000000
--- a/vendor/github.com/varlink/go/cmd/varlink/main.go
+++ /dev/null
@@ -1,295 +0,0 @@
-package main
-
-import (
- "encoding/json"
- "flag"
- "fmt"
- "github.com/TylerBrock/colorjson"
- "github.com/fatih/color"
- "github.com/varlink/go/varlink"
- "os"
- "strings"
-)
-
-var bold = color.New(color.Bold)
-var errorBoldRed string
-var bridge string
-
-func ErrPrintf(format string, a ...interface{}) {
- fmt.Fprintf(os.Stderr, "%s ", errorBoldRed)
- fmt.Fprintf(os.Stderr, format, a...)
-}
-
-func print_usage(set *flag.FlagSet, arg_help string) {
- if set == nil {
- fmt.Fprintf(os.Stderr, "Usage: %s [GLOBAL OPTIONS] COMMAND ...\n", os.Args[0])
- } else {
- fmt.Fprintf(os.Stderr, "Usage: %s [GLOBAL OPTIONS] %s [OPTIONS] %s\n", os.Args[0], set.Name(), arg_help)
- }
-
- fmt.Fprintln(os.Stderr, "\nGlobal Options:")
- flag.PrintDefaults()
-
- if set == nil {
- fmt.Fprintln(os.Stderr, "\nCommands:")
- fmt.Fprintln(os.Stderr, " info\tPrint information about a service")
- fmt.Fprintln(os.Stderr, " help\tPrint interface description or service information")
- fmt.Fprintln(os.Stderr, " call\tCall a method")
- } else {
- fmt.Fprintln(os.Stderr, "\nOptions:")
- set.PrintDefaults()
- }
- os.Exit(1)
-}
-
-func varlink_call(args []string) {
- var err error
- var oneway bool
-
- callFlags := flag.NewFlagSet("help", flag.ExitOnError)
- callFlags.BoolVar(&oneway, "-oneway", false, "Use bridge for connection")
- var help bool
- callFlags.BoolVar(&help, "help", false, "Prints help information")
- var usage = func() { print_usage(callFlags, "<[ADDRESS/]INTERFACE.METHOD> [ARGUMENTS]") }
- callFlags.Usage = usage
-
- _ = callFlags.Parse(args)
-
- if help {
- usage()
- }
-
- var con *varlink.Connection
- var address string
- var methodName string
-
- if len(bridge) != 0 {
- con, err = varlink.NewBridge(bridge)
-
- if err != nil {
- ErrPrintf("Cannot connect with bridge '%s': %v\n", bridge, err)
- os.Exit(2)
- }
- address = "bridge:" + bridge
- methodName = callFlags.Arg(0)
- } else {
- uri := callFlags.Arg(0)
- if uri == "" {
- usage()
- }
-
- li := strings.LastIndex(uri, "/")
-
- if li == -1 {
- ErrPrintf("Invalid address '%s'\n", uri)
- os.Exit(2)
- }
-
- address = uri[:li]
- methodName = uri[li+1:]
-
- con, err = varlink.NewConnection(address)
-
- if err != nil {
- ErrPrintf("Cannot connect to '%s': %v\n", address, err)
- os.Exit(2)
- }
- }
- var parameters string
- var params json.RawMessage
-
- parameters = callFlags.Arg(1)
- if parameters == "" {
- params = nil
- } else {
- json.Unmarshal([]byte(parameters), &params)
- }
-
- var flags uint64
- flags = 0
- if oneway {
- flags |= varlink.Oneway
- }
- recv, err := con.Send(methodName, params, flags)
-
- var retval map[string]interface{}
-
- // FIXME: Use cont
- _, err = recv(&retval)
-
- f := colorjson.NewFormatter()
- f.Indent = 2
- f.KeyColor = color.New(color.FgCyan)
- f.StringColor = color.New(color.FgMagenta)
- f.NumberColor = color.New(color.FgMagenta)
- f.BoolColor = color.New(color.FgMagenta)
- f.NullColor = color.New(color.FgMagenta)
-
- if err != nil {
- if e, ok := err.(*varlink.Error); ok {
- ErrPrintf("Call failed with error: %v\n", color.New(color.FgRed).Sprint(e.Name))
- errorRawParameters := e.Parameters.(*json.RawMessage)
- if errorRawParameters != nil {
- var param map[string]interface{}
- _ = json.Unmarshal(*errorRawParameters, &param)
- c, _ := f.Marshal(param)
- fmt.Fprintf(os.Stderr, "%v\n", string(c))
- }
- os.Exit(2)
- }
- ErrPrintf("Error calling '%s': %v\n", methodName, err)
- os.Exit(2)
- }
- c, _ := f.Marshal(retval)
- fmt.Println(string(c))
-}
-
-func varlink_help(args []string) {
- var err error
-
- helpFlags := flag.NewFlagSet("help", flag.ExitOnError)
- var help bool
- helpFlags.BoolVar(&help, "help", false, "Prints help information")
- var usage = func() { print_usage(helpFlags, "<[ADDRESS/]INTERFACE>") }
- helpFlags.Usage = usage
-
- _ = helpFlags.Parse(args)
-
- if help {
- usage()
- }
-
- var con *varlink.Connection
- var address string
- var interfaceName string
-
- if len(bridge) != 0 {
- con, err = varlink.NewBridge(bridge)
-
- if err != nil {
- ErrPrintf("Cannot connect with bridge '%s': %v\n", bridge, err)
- os.Exit(2)
- }
- address = "bridge:" + bridge
- interfaceName = helpFlags.Arg(0)
- } else {
- uri := helpFlags.Arg(0)
- if uri == "" && bridge == "" {
- ErrPrintf("No ADDRESS or activation or bridge\n\n")
- usage()
- }
-
- li := strings.LastIndex(uri, "/")
-
- if li == -1 {
- ErrPrintf("Invalid address '%s'\n", uri)
- os.Exit(2)
- }
-
- address = uri[:li]
-
- con, err = varlink.NewConnection(address)
-
- if err != nil {
- ErrPrintf("Cannot connect to '%s': %v\n", address, err)
- os.Exit(2)
- }
-
- interfaceName = uri[li+1:]
- }
- description, err := con.GetInterfaceDescription(interfaceName)
-
- if err != nil {
- ErrPrintf("Cannot get interface description for '%s': %v\n", interfaceName, err)
- os.Exit(2)
- }
-
- fmt.Println(description)
-}
-
-func varlink_info(args []string) {
- var err error
- infoFlags := flag.NewFlagSet("info", flag.ExitOnError)
- var help bool
- infoFlags.BoolVar(&help, "help", false, "Prints help information")
- var usage = func() { print_usage(infoFlags, "[ADDRESS]") }
- infoFlags.Usage = usage
-
- _ = infoFlags.Parse(args)
-
- if help {
- usage()
- }
-
- var con *varlink.Connection
- var address string
-
- if len(bridge) != 0 {
- con, err = varlink.NewBridge(bridge)
-
- if err != nil {
- ErrPrintf("Cannot connect with bridge '%s': %v\n", bridge, err)
- os.Exit(2)
- }
- address = "bridge:" + bridge
- } else {
- address = infoFlags.Arg(0)
-
- if address == "" && bridge == "" {
- ErrPrintf("No ADDRESS or activation or bridge\n\n")
- usage()
- }
-
- con, err = varlink.NewConnection(address)
-
- if err != nil {
- ErrPrintf("Cannot connect to '%s': %v\n", address, err)
- os.Exit(2)
- }
- }
-
- var vendor, product, version, url string
- var interfaces []string
-
- err = con.GetInfo(&vendor, &product, &version, &url, &interfaces)
-
- if err != nil {
- ErrPrintf("Cannot get info for '%s': %v\n", address, err)
- os.Exit(2)
- }
-
- fmt.Printf("%s %s\n", bold.Sprint("Vendor:"), vendor)
- fmt.Printf("%s %s\n", bold.Sprint("Product:"), product)
- fmt.Printf("%s %s\n", bold.Sprint("Version:"), version)
- fmt.Printf("%s %s\n", bold.Sprint("URL:"), url)
- fmt.Printf("%s\n %s\n\n", bold.Sprint("Interfaces:"), strings.Join(interfaces[:], "\n "))
-}
-
-func main() {
- var debug bool
- var colorMode string
-
- flag.CommandLine.Usage = func() { print_usage(nil, "") }
- flag.BoolVar(&debug, "debug", false, "Enable debug output")
- flag.StringVar(&bridge, "bridge", "", "Use bridge for connection")
- flag.StringVar(&colorMode, "color", "auto", "colorize output [default: auto] [possible values: on, off, auto]")
-
- flag.Parse()
-
- if colorMode != "on" && (os.Getenv("TERM") == "" || colorMode == "off") {
- color.NoColor = true // disables colorized output
- }
-
- errorBoldRed = bold.Sprint(color.New(color.FgRed).Sprint("Error:"))
-
- switch flag.Arg(0) {
- case "info":
- varlink_info(flag.Args()[1:])
- case "help":
- varlink_help(flag.Args()[1:])
- case "call":
- varlink_call(flag.Args()[1:])
- default:
- print_usage(nil, "")
- }
-}
diff --git a/vendor/github.com/varlink/go/golang-github-varlink-go.spec b/vendor/github.com/varlink/go/golang-github-varlink-go.spec
deleted file mode 100644
index 85f5d1788..000000000
--- a/vendor/github.com/varlink/go/golang-github-varlink-go.spec
+++ /dev/null
@@ -1,44 +0,0 @@
-%global goipath github.com/varlink/go
-Version: 0
-%gometa
-
-Name: %{goname}
-Release: 1%{?dist}
-Summary: Go bindings for varlink
-License: ASL 2.0
-URL: %{gourl}
-Source0: %{gosource}
-
-%description
-Native Go bindings for the varlink protocol.
-
-%package devel
-Summary: %{summary}
-BuildArch: noarch
-
-%description devel
-%{summary}
-
-This package contains library source intended for
-building other packages which use import path with
-%{gobaseipath} prefix.
-
-%prep
-%forgesetup
-
-%build
-%gobuildroot
-
-%install
-gofiles=$(find . %{gofindfilter} -print)
-%goinstall $gofiles
-
-%check
-
-%files devel -f devel.file-list
-%license LICENSE
-%doc README.md
-
-%changelog
-* Tue Mar 20 2018 <info@varlink.org> 0-1
-- Version 0
diff --git a/vendor/github.com/varlink/go/varlink/external_test.go b/vendor/github.com/varlink/go/varlink/external_test.go
deleted file mode 100644
index d5e14b25f..000000000
--- a/vendor/github.com/varlink/go/varlink/external_test.go
+++ /dev/null
@@ -1,211 +0,0 @@
-package varlink_test
-
-// test with no internal access
-
-import (
- "github.com/varlink/go/varlink"
- "os"
- "runtime"
- "testing"
- "time"
-)
-
-type VarlinkInterface struct{}
-
-func (s *VarlinkInterface) VarlinkDispatch(call varlink.Call, methodname string) error {
- return call.ReplyMethodNotImplemented(methodname)
-}
-func (s *VarlinkInterface) VarlinkGetName() string {
- return `org.example.test`
-}
-
-func (s *VarlinkInterface) VarlinkGetDescription() string {
- return "#"
-}
-
-type VarlinkInterface2 struct{}
-
-func (s *VarlinkInterface2) VarlinkDispatch(call varlink.Call, methodname string) error {
- return call.ReplyMethodNotImplemented(methodname)
-}
-func (s *VarlinkInterface2) VarlinkGetName() string {
- return `org.example.test2`
-}
-
-func (s *VarlinkInterface2) VarlinkGetDescription() string {
- return "#"
-}
-
-func TestRegisterService(t *testing.T) {
- newTestInterface := new(VarlinkInterface)
- service, err := varlink.NewService(
- "Varlink",
- "Varlink Test",
- "1",
- "https://github.com/varlink/go/varlink",
- )
- if err != nil {
- t.Fatalf("NewService(): %v", err)
- }
-
- if err := service.RegisterInterface(newTestInterface); err != nil {
- t.Fatalf("Couldn't register service: %v", err)
- }
-
- if err := service.RegisterInterface(newTestInterface); err == nil {
- t.Fatal("Could register service twice")
- }
-
- defer func() { service.Shutdown() }()
-
- servererror := make(chan error)
-
- go func() {
- servererror <- service.Listen("unix:varlinkexternal_TestRegisterService", 0)
- }()
-
- time.Sleep(time.Second / 5)
-
- n := new(VarlinkInterface2)
-
- if err := service.RegisterInterface(n); err == nil {
- t.Fatal("Could register service while running")
- }
- time.Sleep(time.Second / 5)
- service.Shutdown()
-
- if err := <-servererror; err != nil {
- t.Fatalf("service.Listen(): %v", err)
- }
-}
-
-func TestUnix(t *testing.T) {
- newTestInterface := new(VarlinkInterface)
- service, err := varlink.NewService(
- "Varlink",
- "Varlink Test",
- "1",
- "https://github.com/varlink/go/varlink",
- )
-
- if err != nil {
- t.Fatalf("NewService(): %v", err)
- }
-
- if err := service.RegisterInterface(newTestInterface); err != nil {
- t.Fatalf("RegisterInterface(): %v", err)
- }
-
- servererror := make(chan error)
-
- go func() {
- servererror <- service.Listen("unix:varlinkexternal_TestUnix", 0)
- }()
-
- time.Sleep(time.Second / 5)
- service.Shutdown()
-
- if err := <-servererror; err != nil {
- t.Fatalf("service.Listen(): %v", err)
- }
-}
-
-func TestInvalidAddress(t *testing.T) {
- newTestInterface := new(VarlinkInterface)
- service, err := varlink.NewService(
- "Varlink",
- "Varlink Test",
- "1",
- "https://github.com/varlink/go/varlink",
- )
-
- if err != nil {
- t.Fatalf("NewService(): %v", err)
- }
-
- if err = service.RegisterInterface(newTestInterface); err != nil {
- t.Fatalf("RegisterInterface(): %v", err)
- }
-
- if err = service.Listen("foo", 0); err == nil {
- t.Fatalf("service.Listen() should error")
- }
-
- if err = service.Listen("", 0); err == nil {
- t.Fatalf("service.Listen() should error")
- }
-
- if err = service.Listen("unix", 0); err == nil {
- t.Fatalf("service.Listen() should error")
- }
-}
-
-func TestAnonUnix(t *testing.T) {
- if runtime.GOOS != "linux" {
- return
- }
-
- newTestInterface := new(VarlinkInterface)
- service, err := varlink.NewService(
- "Varlink",
- "Varlink Test",
- "1",
- "https://github.com/varlink/go/varlink",
- )
-
- if err != nil {
- t.Fatalf("NewService(): %v", err)
- }
-
- if err := service.RegisterInterface(newTestInterface); err != nil {
- t.Fatalf("RegisterInterface(): %v", err)
- }
-
- servererror := make(chan error)
-
- go func() {
- servererror <- service.Listen("unix:@varlinkexternal_TestAnonUnix", 0)
- }()
-
- time.Sleep(time.Second / 5)
- service.Shutdown()
-
- if err := <-servererror; err != nil {
- t.Fatalf("service.Listen(): %v", err)
- }
-}
-
-func TestListenFDSNotInt(t *testing.T) {
- newTestInterface := new(VarlinkInterface)
- service, err := varlink.NewService(
- "Varlink",
- "Varlink Test",
- "1",
- "https://github.com/varlink/go/varlink",
- )
-
- if err != nil {
- t.Fatalf("NewService(): %v", err)
- }
-
- if err := service.RegisterInterface(newTestInterface); err != nil {
- t.Fatalf("Couldn't register service: %v", err)
- }
- os.Setenv("LISTEN_FDS", "foo")
- os.Setenv("LISTEN_PID", string(os.Getpid()))
-
- servererror := make(chan error)
-
- go func() {
- servererror <- service.Listen("unix:varlinkexternal_TestListenFDSNotInt", 0)
- }()
-
- time.Sleep(time.Second / 5)
- service.Shutdown()
-
- err = <-servererror
-
- if err != nil {
- t.Fatalf("service.Run(): %v", err)
- }
-}
diff --git a/vendor/github.com/varlink/go/varlink/idl/idl_test.go b/vendor/github.com/varlink/go/varlink/idl/idl_test.go
deleted file mode 100644
index a3377bec0..000000000
--- a/vendor/github.com/varlink/go/varlink/idl/idl_test.go
+++ /dev/null
@@ -1,150 +0,0 @@
-package idl
-
-import (
- "fmt"
- "runtime"
- "testing"
-)
-
-/*
-func expect(t *testing.T, expected string, returned string) {
- if strings.Compare(returned, expected) != 0 {
- t.Fatalf("Expected(%d): `%s`\nGot(%d): `%s`\n",
- len(expected), expected,
- len(returned), returned)
- }
-}
-*/
-
-func testParse(t *testing.T, pass bool, description string) {
- _, _, line, _ := runtime.Caller(1)
-
- t.Run(fmt.Sprintf("Line-%d", line), func(t *testing.T) {
- midl, err := New(description)
- if pass {
- if err != nil {
- t.Fatalf("generateTemplate(`%s`): %v", description, err)
- }
- if len(midl.Name) <= 0 {
- t.Fatalf("generateTemplate(`%s`): returned no pkgname", description)
- }
- }
- if !pass && (err == nil) {
- t.Fatalf("generateTemplate(`%s`): did not fail", description)
- }
- })
-}
-
-func TestOneMethod(t *testing.T) {
- testParse(t, true, "interface foo.bar\nmethod Foo()->()")
-}
-
-func TestOneMethodNoType(t *testing.T) {
- testParse(t, false, "interface foo.bar\nmethod Foo()->(b:)")
-}
-
-func TestDomainNames(t *testing.T) {
- testParse(t, true, "interface org.varlink.service\nmethod F()->()")
- testParse(t, true, "interface com.example.0example\nmethod F()->()")
- testParse(t, true, "interface com.example.example-dash\nmethod F()->()")
- testParse(t, true, "interface xn--lgbbat1ad8j.example.algeria\nmethod F()->()")
- testParse(t, false, "interface com.-example.leadinghyphen\nmethod F()->()")
- testParse(t, false, "interface com.example-.danglinghyphen-\nmethod F()->()")
- testParse(t, false, "interface Com.example.uppercase-toplevel\nmethod F()->()")
- testParse(t, false, "interface Co9.example.number-toplevel\nmethod F()->()")
- testParse(t, false, "interface 1om.example.number-toplevel\nmethod F()->()")
- testParse(t, false, "interface com.Example\nmethod F()->()")
- var name string
- for i := 0; i < 255; i++ {
- name += "a"
- }
- testParse(t, false, "interface com.example.toolong"+name+"\nmethod F()->()")
- testParse(t, false, "interface xn--example.toolong"+name+"\nmethod F()->()")
-}
-
-func TestNoMethod(t *testing.T) {
- testParse(t, false, `
-interface org.varlink.service
- type Interface (name: string, types: []Type, methods: []Method)
- type Property (key: string, value: string)
-`)
-}
-
-func TestTypeNoArgs(t *testing.T) {
- testParse(t, true, "interface foo.bar\n type I ()\nmethod F()->()")
-}
-
-func TestTypeOneArg(t *testing.T) {
- testParse(t, true, "interface foo.bar\n type I (b:bool)\nmethod F()->()")
-}
-
-func TestBasicTypes(t *testing.T) {
- testParse(t, true, "interface foo.bar\n type I (b:bool)\nmethod F()->()")
- testParse(t, true, "interface foo.bar\n type I (b:string)\nmethod F()->()")
- testParse(t, true, "interface foo.bar\n type I (b:float)\nmethod F()->()")
- testParse(t, true, "interface foo.bar\n type I (b:int)\nmethod F()->()")
- testParse(t, true, "interface foo.bar\n type I (b:object)\nmethod F()->()")
-}
-
-func TestTypeOneArray(t *testing.T) {
- testParse(t, true, "interface foo.bar\n type I (b:[]bool)\nmethod F()->()")
- testParse(t, false, "interface foo.bar\n type I (b:bool[ ])\nmethod F()->()")
- testParse(t, false, "interface foo.bar\n type I (b:bool[1])\nmethod F()->()")
- testParse(t, false, "interface foo.bar\n type I (b:bool[ 1 ])\nmethod F()->()")
- testParse(t, false, "interface foo.bar\n type I (b:bool[ 1 1 ])\nmethod F()->()")
-}
-
-func TestFieldnames(t *testing.T) {
- testParse(t, false, "interface foo.bar\n type I (Test:[]bool)\nmethod F()->()")
- testParse(t, false, "interface foo.bar\n type I (_test:[]bool)\nmethod F()->()")
- testParse(t, false, "interface foo.bar\n type I (Äest:[]bool)\nmethod F()->()")
-}
-
-func TestNestedStructs(t *testing.T) {
- testParse(t, true, "interface foo.bar\n type I ( b: [](foo: bool, bar: bool, baz: int) )\nmethod F()->()")
-}
-
-func TestEnum(t *testing.T) {
- testParse(t, true, "interface foo.bar\n type I (b:(foo, bar, baz))\nmethod F()->()")
- testParse(t, false, "interface foo.bar\n type I (foo, bar, baz : bool)\nmethod F()->()")
-}
-
-func TestMap(t *testing.T) {
- testParse(t, true, "interface foo.bar\n type I (m: [string]string)\nmethod F()->()")
- testParse(t, true, "interface foo.bar\n type I (m: [string]int)\nmethod F()->()")
- testParse(t, true, "interface foo.bar\n type I (m: [string]())\nmethod F()->()")
- testParse(t, false, "interface foo.bar\n type I (m: [int]string)\nmethod F()->()")
-}
-
-func TestMaybe(t *testing.T) {
- testParse(t, true, "interface foo.bar\n type I (m: ?string)\nmethod F()->()")
- testParse(t, true, "interface foo.bar\n type I (m: ?[string]?int)\nmethod F()->()")
- testParse(t, true, "interface foo.bar\n type I (m: ?[]?int)\nmethod F()->()")
- testParse(t, false, "interface foo.bar\n type I (m: ??int)\nmethod F()->()")
-}
-
-func TestIncomplete(t *testing.T) {
- testParse(t, false, "interfacef foo.bar\nmethod F()->()")
- testParse(t, false, "interface foo.bar\nmethod F()->()\ntype I (b: bool")
- testParse(t, false, "interface foo.bar\nmethod F()->(")
- testParse(t, false, "interface foo.bar\nmethod F(")
- testParse(t, false, "interface foo.bar\nmethod ()->()")
- testParse(t, false, "interface foo.bar\nmethod F->()\n")
- testParse(t, false, "interface foo.bar\nmethod F()->\n")
- testParse(t, false, "interface foo.bar\nmethod F()>()\n")
- testParse(t, false, "interface foo.bar\nmethod F()->()\ntype (b: bool)")
- testParse(t, false, "interface foo.bar\nmethod F()->()\nerror (b: bool)")
- testParse(t, false, "interface foo.bar\nmethod F()->()\n dfghdrg")
-}
-
-func TestDuplicate(t *testing.T) {
- testParse(t, false, `
-interface foo.example
- type Device()
- type Device()
- type T()
- type T()
- method F() -> ()
- method F() -> ()
-`)
-}
diff --git a/vendor/github.com/varlink/go/varlink/varlink_test.go b/vendor/github.com/varlink/go/varlink/varlink_test.go
deleted file mode 100644
index 9e6d0a1f4..000000000
--- a/vendor/github.com/varlink/go/varlink/varlink_test.go
+++ /dev/null
@@ -1,258 +0,0 @@
-package varlink
-
-// tests with access to internals
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "strings"
- "testing"
-)
-
-func expect(t *testing.T, expected string, returned string) {
- if strings.Compare(returned, expected) != 0 {
- t.Fatalf("Expected(%d): `%s`\nGot(%d): `%s`\n",
- len(expected), expected,
- len(returned), strings.Replace(returned, "\000", "`+\"\\000\"+`", -1))
- }
-}
-
-func TestService(t *testing.T) {
- service, _ := NewService(
- "Varlink",
- "Varlink Test",
- "1",
- "https://github.com/varlink/go/varlink",
- )
-
- t.Run("ZeroMessage", func(t *testing.T) {
- var br bytes.Buffer
- r := bufio.NewReader(&br)
- var b bytes.Buffer
- w := bufio.NewWriter(&b)
- if err := service.HandleMessage(nil, r, w, []byte{0}); err == nil {
- t.Fatal("HandleMessage returned non-error")
- }
- })
-
- t.Run("InvalidJson", func(t *testing.T) {
- var br bytes.Buffer
- r := bufio.NewReader(&br)
- var b bytes.Buffer
- w := bufio.NewWriter(&b)
- msg := []byte(`{"method":"foo.GetInterfaceDescription" fdgdfg}`)
- if err := service.HandleMessage(nil, r, w, msg); err == nil {
- t.Fatal("HandleMessage returned no error on invalid json")
- }
- })
-
- t.Run("WrongInterface", func(t *testing.T) {
- var br bytes.Buffer
- r := bufio.NewReader(&br)
- var b bytes.Buffer
- w := bufio.NewWriter(&b)
- msg := []byte(`{"method":"foo.GetInterfaceDescription"}`)
- if err := service.HandleMessage(nil, r, w, msg); err != nil {
- t.Fatal("HandleMessage returned error on wrong interface")
- }
- expect(t, `{"parameters":{"interface":"foo"},"error":"org.varlink.service.InterfaceNotFound"}`+"\000",
- b.String())
- })
-
- t.Run("InvalidMethod", func(t *testing.T) {
- var br bytes.Buffer
- r := bufio.NewReader(&br)
- var b bytes.Buffer
- w := bufio.NewWriter(&b)
- msg := []byte(`{"method":"InvalidMethod"}`)
- if err := service.HandleMessage(nil, r, w, msg); err != nil {
- t.Fatal("HandleMessage returned error on invalid method")
- }
- expect(t, `{"parameters":{"parameter":"method"},"error":"org.varlink.service.InvalidParameter"}`+"\000",
- b.String())
- })
-
- t.Run("WrongMethod", func(t *testing.T) {
- var br bytes.Buffer
- r := bufio.NewReader(&br)
- var b bytes.Buffer
- w := bufio.NewWriter(&b)
- msg := []byte(`{"method":"org.varlink.service.WrongMethod"}`)
- if err := service.HandleMessage(nil, r, w, msg); err != nil {
- t.Fatal("HandleMessage returned error on wrong method")
- }
- expect(t, `{"parameters":{"method":"WrongMethod"},"error":"org.varlink.service.MethodNotFound"}`+"\000",
- b.String())
- })
-
- t.Run("GetInterfaceDescriptionNullParameters", func(t *testing.T) {
- var br bytes.Buffer
- r := bufio.NewReader(&br)
- var b bytes.Buffer
- w := bufio.NewWriter(&b)
- msg := []byte(`{"method":"org.varlink.service.GetInterfaceDescription","parameters": null}`)
- if err := service.HandleMessage(nil, r, w, msg); err != nil {
- t.Fatalf("HandleMessage returned error: %v", err)
- }
- expect(t, `{"parameters":{"parameter":"parameters"},"error":"org.varlink.service.InvalidParameter"}`+"\000",
- b.String())
- })
-
- t.Run("GetInterfaceDescriptionNoInterface", func(t *testing.T) {
- var br bytes.Buffer
- r := bufio.NewReader(&br)
- var b bytes.Buffer
- w := bufio.NewWriter(&b)
- msg := []byte(`{"method":"org.varlink.service.GetInterfaceDescription","parameters":{}}`)
- if err := service.HandleMessage(nil, r, w, msg); err != nil {
- t.Fatalf("HandleMessage returned error: %v", err)
- }
- expect(t, `{"parameters":{"parameter":"interface"},"error":"org.varlink.service.InvalidParameter"}`+"\000",
- b.String())
- })
-
- t.Run("GetInterfaceDescriptionWrongInterface", func(t *testing.T) {
- var br bytes.Buffer
- r := bufio.NewReader(&br)
- var b bytes.Buffer
- w := bufio.NewWriter(&b)
- msg := []byte(`{"method":"org.varlink.service.GetInterfaceDescription","parameters":{"interface":"foo"}}`)
- if err := service.HandleMessage(nil, r, w, msg); err != nil {
- t.Fatalf("HandleMessage returned error: %v", err)
- }
- expect(t, `{"parameters":{"parameter":"interface"},"error":"org.varlink.service.InvalidParameter"}`+"\000",
- b.String())
- })
-
- t.Run("GetInterfaceDescription", func(t *testing.T) {
- var br bytes.Buffer
- r := bufio.NewReader(&br)
- var b bytes.Buffer
- w := bufio.NewWriter(&b)
- msg := []byte(`{"method":"org.varlink.service.GetInterfaceDescription","parameters":{"interface":"org.varlink.service"}}`)
- if err := service.HandleMessage(nil, r, w, msg); err != nil {
- t.Fatalf("HandleMessage returned error: %v", err)
- }
- expect(t, `{"parameters":{"description":"# The Varlink Service Interface is provided by every varlink service. It\n# describes the service and the interfaces it implements.\ninterface org.varlink.service\n\n# Get a list of all the interfaces a service provides and information\n# about the implementation.\nmethod GetInfo() -\u003e (\n vendor: string,\n product: string,\n version: string,\n url: string,\n interfaces: []string\n)\n\n# Get the description of an interface that is implemented by this service.\nmethod GetInterfaceDescription(interface: string) -\u003e (description: string)\n\n# The requested interface was not found.\nerror InterfaceNotFound (interface: string)\n\n# The requested method was not found\nerror MethodNotFound (method: string)\n\n# The interface defines the requested method, but the service does not\n# implement it.\nerror MethodNotImplemented (method: string)\n\n# One of the passed parameters is invalid.\nerror InvalidParameter (parameter: string)"}}`+"\000",
- b.String())
- })
-
- t.Run("GetInfo", func(t *testing.T) {
- var br bytes.Buffer
- r := bufio.NewReader(&br)
- var b bytes.Buffer
- w := bufio.NewWriter(&b)
- msg := []byte(`{"method":"org.varlink.service.GetInfo"}`)
- if err := service.HandleMessage(nil, r, w, msg); err != nil {
- t.Fatalf("HandleMessage returned error: %v", err)
- }
- expect(t, `{"parameters":{"vendor":"Varlink","product":"Varlink Test","version":"1","url":"https://github.com/varlink/go/varlink","interfaces":["org.varlink.service"]}}`+"\000",
- b.String())
- })
-}
-
-type VarlinkInterface struct{}
-
-func (s *VarlinkInterface) VarlinkDispatch(call Call, methodname string) error {
- switch methodname {
- case "Ping":
- if !call.WantsMore() {
- return fmt.Errorf("More flag not passed")
- }
- if call.IsOneway() {
- return fmt.Errorf("OneShot flag set")
- }
- call.Continues = true
- if err := call.Reply(nil); err != nil {
- return err
- }
- if err := call.Reply(nil); err != nil {
- return err
- }
- call.Continues = false
- if err := call.Reply(nil); err != nil {
- return err
- }
- return nil
-
- case "PingError":
- return call.ReplyError("org.example.test.PingError", nil)
- }
-
- call.Continues = true
- if err := call.Reply(nil); err == nil {
- return fmt.Errorf("call.Reply did not fail for Continues/More mismatch")
- }
- call.Continues = false
-
- if err := call.ReplyError("WrongName", nil); err == nil {
- return fmt.Errorf("call.ReplyError accepted invalid error name")
- }
-
- if err := call.ReplyError("org.varlink.service.MethodNotImplemented", nil); err == nil {
- return fmt.Errorf("call.ReplyError accepted org.varlink.service error")
- }
-
- return call.ReplyMethodNotImplemented(methodname)
-}
-func (s *VarlinkInterface) VarlinkGetName() string {
- return `org.example.test`
-}
-
-func (s *VarlinkInterface) VarlinkGetDescription() string {
- return "#"
-}
-
-func TestMoreService(t *testing.T) {
- newTestInterface := new(VarlinkInterface)
-
- service, _ := NewService(
- "Varlink",
- "Varlink Test",
- "1",
- "https://github.com/varlink/go/varlink",
- )
-
- if err := service.RegisterInterface(newTestInterface); err != nil {
- t.Fatalf("Couldn't register service: %v", err)
- }
-
- t.Run("MethodNotImplemented", func(t *testing.T) {
- var br bytes.Buffer
- r := bufio.NewReader(&br)
- var b bytes.Buffer
- w := bufio.NewWriter(&b)
- msg := []byte(`{"method":"org.example.test.Pingf"}`)
- if err := service.HandleMessage(nil, r, w, msg); err != nil {
- t.Fatalf("HandleMessage returned error: %v", err)
- }
- expect(t, `{"parameters":{"method":"Pingf"},"error":"org.varlink.service.MethodNotImplemented"}`+"\000",
- b.String())
- })
-
- t.Run("PingError", func(t *testing.T) {
- var br bytes.Buffer
- r := bufio.NewReader(&br)
- var b bytes.Buffer
- w := bufio.NewWriter(&b)
- msg := []byte(`{"method":"org.example.test.PingError", "more" : true}`)
- if err := service.HandleMessage(nil, r, w, msg); err != nil {
- t.Fatalf("HandleMessage returned error: %v", err)
- }
- expect(t, `{"error":"org.example.test.PingError"}`+"\000",
- b.String())
- })
- t.Run("MoreTest", func(t *testing.T) {
- var br bytes.Buffer
- r := bufio.NewReader(&br)
- var b bytes.Buffer
- w := bufio.NewWriter(&b)
- msg := []byte(`{"method":"org.example.test.Ping", "more" : true}`)
- if err := service.HandleMessage(nil, r, w, msg); err != nil {
- t.Fatalf("HandleMessage returned error: %v", err)
- }
- expect(t, `{"continues":true}`+"\000"+`{"continues":true}`+"\000"+`{}`+"\000",
- b.String())
- })
-}
diff --git a/vendor/github.com/vbatts/tar-split/README.md b/vendor/github.com/vbatts/tar-split/README.md
deleted file mode 100644
index fe997f69b..000000000
--- a/vendor/github.com/vbatts/tar-split/README.md
+++ /dev/null
@@ -1,137 +0,0 @@
-# tar-split
-
-[![Build Status](https://travis-ci.org/vbatts/tar-split.svg?branch=master)](https://travis-ci.org/vbatts/tar-split)
-[![Go Report Card](https://goreportcard.com/badge/github.com/vbatts/tar-split)](https://goreportcard.com/report/github.com/vbatts/tar-split)
-
-Pristinely disassembling a tar archive, and stashing needed raw bytes and offsets to reassemble a validating original archive.
-
-## Docs
-
-Code API for libraries provided by `tar-split`:
-
-* https://godoc.org/github.com/vbatts/tar-split/tar/asm
-* https://godoc.org/github.com/vbatts/tar-split/tar/storage
-* https://godoc.org/github.com/vbatts/tar-split/archive/tar
-
-## Install
-
-The command line utilitiy is installable via:
-
-```bash
-go get github.com/vbatts/tar-split/cmd/tar-split
-```
-
-## Usage
-
-For cli usage, see its [README.md](cmd/tar-split/README.md).
-For the library see the [docs](#docs)
-
-## Demo
-
-### Basic disassembly and assembly
-
-This demonstrates the `tar-split` command and how to assemble a tar archive from the `tar-data.json.gz`
-
-
-![basic cmd demo thumbnail](https://i.ytimg.com/vi/vh5wyjIOBtc/2.jpg?time=1445027151805)
-[youtube video of basic command demo](https://youtu.be/vh5wyjIOBtc)
-
-### Docker layer preservation
-
-This demonstrates the tar-split integration for docker-1.8. Providing consistent tar archives for the image layer content.
-
-![docker tar-split demo](https://i.ytimg.com/vi_webp/vh5wyjIOBtc/default.webp)
-[youtube vide of docker layer checksums](https://youtu.be/tV_Dia8E8xw)
-
-## Caveat
-
-Eventually this should detect TARs that this is not possible with.
-
-For example stored sparse files that have "holes" in them, will be read as a
-contiguous file, though the archive contents may be recorded in sparse format.
-Therefore when adding the file payload to a reassembled tar, to achieve
-identical output, the file payload would need be precisely re-sparsified. This
-is not something I seek to fix immediately, but would rather have an alert that
-precise reassembly is not possible.
-(see more http://www.gnu.org/software/tar/manual/html_node/Sparse-Formats.html)
-
-
-Other caveat, while tar archives support having multiple file entries for the
-same path, we will not support this feature. If there are more than one entries
-with the same path, expect an err (like `ErrDuplicatePath`) or a resulting tar
-stream that does not validate your original checksum/signature.
-
-## Contract
-
-Do not break the API of stdlib `archive/tar` in our fork (ideally find an upstream mergeable solution).
-
-## Std Version
-
-The version of golang stdlib `archive/tar` is from go1.11
-It is minimally extended to expose the raw bytes of the TAR, rather than just the marshalled headers and file stream.
-
-
-## Design
-
-See the [design](concept/DESIGN.md).
-
-## Stored Metadata
-
-Since the raw bytes of the headers and padding are stored, you may be wondering
-what the size implications are. The headers are at least 512 bytes per
-file (sometimes more), at least 1024 null bytes on the end, and then various
-padding. This makes for a constant linear growth in the stored metadata, with a
-naive storage implementation.
-
-First we'll get an archive to work with. For repeatability, we'll make an
-archive from what you've just cloned:
-
-```bash
-git archive --format=tar -o tar-split.tar HEAD .
-```
-
-```bash
-$ go get github.com/vbatts/tar-split/cmd/tar-split
-$ tar-split checksize ./tar-split.tar
-inspecting "tar-split.tar" (size 210k)
- -- number of files: 50
- -- size of metadata uncompressed: 53k
- -- size of gzip compressed metadata: 3k
-```
-
-So assuming you've managed the extraction of the archive yourself, for reuse of
-the file payloads from a relative path, then the only additional storage
-implications are as little as 3kb.
-
-But let's look at a larger archive, with many files.
-
-```bash
-$ ls -sh ./d.tar
-1.4G ./d.tar
-$ tar-split checksize ~/d.tar
-inspecting "/home/vbatts/d.tar" (size 1420749k)
- -- number of files: 38718
- -- size of metadata uncompressed: 43261k
- -- size of gzip compressed metadata: 2251k
-```
-
-Here, an archive with 38,718 files has a compressed footprint of about 2mb.
-
-Rolling the null bytes on the end of the archive, we will assume a
-bytes-per-file rate for the storage implications.
-
-| uncompressed | compressed |
-| :----------: | :--------: |
-| ~ 1kb per/file | 0.06kb per/file |
-
-
-## What's Next?
-
-* More implementations of storage Packer and Unpacker
-* More implementations of FileGetter and FilePutter
-* would be interesting to have an assembler stream that implements `io.Seeker`
-
-
-## License
-
-See [LICENSE](LICENSE)
diff --git a/vendor/github.com/vbauerster/mpb/.gitignore b/vendor/github.com/vbauerster/mpb/.gitignore
new file mode 100644
index 000000000..63bd91672
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/.gitignore
@@ -0,0 +1,5 @@
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
diff --git a/vendor/github.com/vbauerster/mpb/.travis.yml b/vendor/github.com/vbauerster/mpb/.travis.yml
new file mode 100644
index 000000000..c982d1f90
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+sudo: false
+go:
+ - 1.10.x
+ - tip
+
+before_install:
+ - go get -t -v ./...
+
+script:
+ - go test -race -coverprofile=coverage.txt -covermode=atomic
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/vbauerster/mpb/README.md b/vendor/github.com/vbauerster/mpb/README.md
index 9b760647e..f96857c47 100644
--- a/vendor/github.com/vbauerster/mpb/README.md
+++ b/vendor/github.com/vbauerster/mpb/README.md
@@ -31,8 +31,6 @@ _Note:_ it is preferable to go get from github.com, rather than gopkg.in. See is
p := mpb.New(
// override default (80) width
mpb.WithWidth(64),
- // override default "[=>-]" format
- mpb.WithFormat("╢▌▌░╟"),
// override default 120ms refresh rate
mpb.WithRefreshRate(180*time.Millisecond),
)
@@ -41,6 +39,8 @@ _Note:_ it is preferable to go get from github.com, rather than gopkg.in. See is
name := "Single Bar:"
// adding a single bar
bar := p.AddBar(int64(total),
+ // override default "[=>-]" style
+ mpb.BarStyle("╢▌▌░╟"),
mpb.PrependDecorators(
// display our name with one space on the right
decor.Name(name, decor.WC{W: len(name) + 1, C: decor.DidentRight}),
diff --git a/vendor/github.com/vbauerster/mpb/bar.go b/vendor/github.com/vbauerster/mpb/bar.go
index 5a506fc84..a304a87cb 100644
--- a/vendor/github.com/vbauerster/mpb/bar.go
+++ b/vendor/github.com/vbauerster/mpb/bar.go
@@ -2,6 +2,7 @@ package mpb
import (
"bytes"
+ "context"
"fmt"
"io"
"io/ioutil"
@@ -11,21 +12,8 @@ import (
"unicode/utf8"
"github.com/vbauerster/mpb/decor"
- "github.com/vbauerster/mpb/internal"
)
-const (
- rLeft = iota
- rFill
- rTip
- rEmpty
- rRight
-)
-
-const formatLen = 5
-
-type barRunes [formatLen]rune
-
// Bar represents a progress Bar
type Bar struct {
priority int
@@ -45,15 +33,30 @@ type Bar struct {
shutdown chan struct{}
}
+// Filler interface.
+// Bar renders by calling Filler's Fill method. You can literally have
+// any bar kind, by implementing this interface and passing it to the
+// Add method.
+type Filler interface {
+ Fill(w io.Writer, width int, s *decor.Statistics)
+}
+
+// FillerFunc is function type adapter to convert function into Filler.
+type FillerFunc func(w io.Writer, width int, stat *decor.Statistics)
+
+func (f FillerFunc) Fill(w io.Writer, width int, stat *decor.Statistics) {
+ f(w, width, stat)
+}
+
type (
bState struct {
+ filler Filler
id int
width int
+ alignment int
total int64
current int64
- runes barRunes
- trimLeftSpace bool
- trimRightSpace bool
+ trimSpace bool
toComplete bool
removeOnComplete bool
barClearOnComplete bool
@@ -73,8 +76,8 @@ type (
runningBar *Bar
}
refill struct {
- char rune
- till int64
+ r rune
+ limit int64
}
frameReader struct {
io.Reader
@@ -84,14 +87,20 @@ type (
}
)
-func newBar(wg *sync.WaitGroup, id int, total int64, cancel <-chan struct{}, options ...BarOption) *Bar {
- if total <= 0 {
- total = time.Now().Unix()
- }
+func newBar(
+ ctx context.Context,
+ wg *sync.WaitGroup,
+ filler Filler,
+ id, width int,
+ total int64,
+ options ...BarOption,
+) *Bar {
s := &bState{
+ filler: filler,
id: id,
priority: id,
+ width: width,
total: total,
}
@@ -104,6 +113,9 @@ func newBar(wg *sync.WaitGroup, id int, total int64, cancel <-chan struct{}, opt
s.bufP = bytes.NewBuffer(make([]byte, 0, s.width))
s.bufB = bytes.NewBuffer(make([]byte, 0, s.width))
s.bufA = bytes.NewBuffer(make([]byte, 0, s.width))
+ if s.newLineExtendFn != nil {
+ s.bufNL = bytes.NewBuffer(make([]byte, 0, s.width))
+ }
b := &Bar{
priority: s.priority,
@@ -121,11 +133,7 @@ func newBar(wg *sync.WaitGroup, id int, total int64, cancel <-chan struct{}, opt
b.priority = b.runningBar.priority
}
- if s.newLineExtendFn != nil {
- s.bufNL = bytes.NewBuffer(make([]byte, 0, s.width))
- }
-
- go b.serve(wg, s, cancel)
+ go b.serve(ctx, wg, s)
return b
}
@@ -178,52 +186,42 @@ func (b *Bar) Current() int64 {
}
// SetTotal sets total dynamically.
-// Set final to true, when total is known, it will trigger bar complete event.
-func (b *Bar) SetTotal(total int64, final bool) bool {
+// Set complete to true, to trigger bar complete event now.
+func (b *Bar) SetTotal(total int64, complete bool) {
select {
case b.operateState <- func(s *bState) {
- if total > 0 {
- s.total = total
- }
- if final {
+ s.total = total
+ if complete && !s.toComplete {
s.current = s.total
s.toComplete = true
}
}:
- return true
case <-b.done:
- return false
}
}
-// SetRefill sets fill rune to r, up until n.
-func (b *Bar) SetRefill(n int, r rune) {
- if n <= 0 {
- return
- }
+// SetRefill sets refill, if supported by underlying Filler.
+func (b *Bar) SetRefill(amount int64) {
b.operateState <- func(s *bState) {
- s.refill = &refill{r, int64(n)}
+ if f, ok := s.filler.(interface{ SetRefill(int64) }); ok {
+ f.SetRefill(amount)
+ }
}
}
-// RefillBy is deprecated, use SetRefill
-func (b *Bar) RefillBy(n int, r rune) {
- b.SetRefill(n, r)
-}
-
// Increment is a shorthand for b.IncrBy(1).
func (b *Bar) Increment() {
b.IncrBy(1)
}
// IncrBy increments progress bar by amount of n.
-// wdd is optional work duration i.e. time.Since(start),
-// which expected to be provided, if any ewma based decorator is used.
+// wdd is optional work duration i.e. time.Since(start), which expected
+// to be provided, if any ewma based decorator is used.
func (b *Bar) IncrBy(n int, wdd ...time.Duration) {
select {
case b.operateState <- func(s *bState) {
s.current += int64(n)
- if s.current >= s.total {
+ if s.total > 0 && s.current >= s.total {
s.current = s.total
s.toComplete = true
}
@@ -238,9 +236,9 @@ func (b *Bar) IncrBy(n int, wdd ...time.Duration) {
// Completed reports whether the bar is in completed state.
func (b *Bar) Completed() bool {
// omit select here, because primary usage of the method is for loop
- // condition, like for !bar.Completed() {...}
- // so when toComplete=true it is called once (at which time, the bar is still alive),
- // then quits the loop and never suppose to be called afterwards.
+ // condition, like for !bar.Completed() {...} so when toComplete=true
+ // it is called once (at which time, the bar is still alive), then
+ // quits the loop and never suppose to be called afterwards.
return <-b.boolCh
}
@@ -253,8 +251,9 @@ func (b *Bar) wSyncTable() [][]chan int {
}
}
-func (b *Bar) serve(wg *sync.WaitGroup, s *bState, cancel <-chan struct{}) {
+func (b *Bar) serve(ctx context.Context, wg *sync.WaitGroup, s *bState) {
defer wg.Done()
+ cancel := ctx.Done()
for {
select {
case op := <-b.operateState:
@@ -322,8 +321,6 @@ func (b *Bar) render(debugOut io.Writer, tw int) {
}
func (s *bState) draw(termWidth int) io.Reader {
- defer s.bufA.WriteByte('\n')
-
if s.panicMsg != "" {
return strings.NewReader(fmt.Sprintf(fmt.Sprintf("%%.%ds\n", termWidth), s.panicMsg))
}
@@ -338,77 +335,32 @@ func (s *bState) draw(termWidth int) io.Reader {
s.bufA.WriteString(d.Decor(stat))
}
- prependCount := utf8.RuneCount(s.bufP.Bytes())
- appendCount := utf8.RuneCount(s.bufA.Bytes())
-
if s.barClearOnComplete && s.completeFlushed {
+ s.bufA.WriteByte('\n')
return io.MultiReader(s.bufP, s.bufA)
}
- s.fillBar(s.width)
- barCount := utf8.RuneCount(s.bufB.Bytes())
- totalCount := prependCount + barCount + appendCount
- if spaceCount := 0; totalCount > termWidth {
- if !s.trimLeftSpace {
- spaceCount++
- }
- if !s.trimRightSpace {
- spaceCount++
- }
- s.fillBar(termWidth - prependCount - appendCount - spaceCount)
- }
-
- return io.MultiReader(s.bufP, s.bufB, s.bufA)
-}
-
-func (s *bState) fillBar(width int) {
- defer func() {
- s.bufB.WriteRune(s.runes[rRight])
- if !s.trimRightSpace {
- s.bufB.WriteByte(' ')
- }
- }()
+ prependCount := utf8.RuneCount(s.bufP.Bytes())
+ appendCount := utf8.RuneCount(s.bufA.Bytes())
- s.bufB.Reset()
- if !s.trimLeftSpace {
+ if !s.trimSpace {
+ // reserve space for edge spaces
+ termWidth -= 2
s.bufB.WriteByte(' ')
}
- s.bufB.WriteRune(s.runes[rLeft])
- if width <= 2 {
- return
- }
-
- // bar s.width without leftEnd and rightEnd runes
- barWidth := width - 2
-
- completedWidth := internal.Percentage(s.total, s.current, int64(barWidth))
- if s.refill != nil {
- till := internal.Percentage(s.total, s.refill.till, int64(barWidth))
- // append refill rune
- var i int64
- for i = 0; i < till; i++ {
- s.bufB.WriteRune(s.refill.char)
- }
- for i = till; i < completedWidth; i++ {
- s.bufB.WriteRune(s.runes[rFill])
- }
+ if prependCount+s.width+appendCount > termWidth {
+ s.filler.Fill(s.bufB, termWidth-prependCount-appendCount, stat)
} else {
- var i int64
- for i = 0; i < completedWidth; i++ {
- s.bufB.WriteRune(s.runes[rFill])
- }
+ s.filler.Fill(s.bufB, s.width, stat)
}
- if completedWidth < int64(barWidth) && completedWidth > 0 {
- _, size := utf8.DecodeLastRune(s.bufB.Bytes())
- s.bufB.Truncate(s.bufB.Len() - size)
- s.bufB.WriteRune(s.runes[rTip])
+ if !s.trimSpace {
+ s.bufB.WriteByte(' ')
}
- for i := completedWidth; i < int64(barWidth); i++ {
- s.bufB.WriteRune(s.runes[rEmpty])
- }
+ s.bufA.WriteByte('\n')
+ return io.MultiReader(s.bufP, s.bufB, s.bufA)
}
func (s *bState) wSyncTable() [][]chan int {
@@ -442,14 +394,6 @@ func newStatistics(s *bState) *decor.Statistics {
}
}
-func strToBarRunes(format string) (array barRunes) {
- for i, n := 0, 0; len(format) > 0; i++ {
- array[i], n = utf8.DecodeRuneInString(format)
- format = format[n:]
- }
- return
-}
-
func countLines(b []byte) int {
return bytes.Count(b, []byte("\n"))
}
diff --git a/vendor/github.com/vbauerster/mpb/bar_filler.go b/vendor/github.com/vbauerster/mpb/bar_filler.go
new file mode 100644
index 000000000..4e9285ca5
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/bar_filler.go
@@ -0,0 +1,111 @@
+package mpb
+
+import (
+ "io"
+ "unicode/utf8"
+
+ "github.com/vbauerster/mpb/decor"
+ "github.com/vbauerster/mpb/internal"
+)
+
+const (
+ rLeft = iota
+ rFill
+ rTip
+ rEmpty
+ rRight
+ rRevTip
+ rRefill
+)
+
+var defaultBarStyle = "[=>-]<+"
+
+type barFiller struct {
+ format [][]byte
+ refillAmount int64
+ reverse bool
+}
+
+func newDefaultBarFiller() Filler {
+ bf := &barFiller{
+ format: make([][]byte, utf8.RuneCountInString(defaultBarStyle)),
+ }
+ bf.setStyle(defaultBarStyle)
+ return bf
+}
+
+func (s *barFiller) setStyle(style string) {
+ if !utf8.ValidString(style) {
+ return
+ }
+ src := make([][]byte, 0, utf8.RuneCountInString(style))
+ for _, r := range style {
+ src = append(src, []byte(string(r)))
+ }
+ copy(s.format, src)
+}
+
+func (s *barFiller) setReverse() {
+ s.reverse = true
+}
+
+func (s *barFiller) SetRefill(amount int64) {
+ s.refillAmount = amount
+}
+
+func (s *barFiller) Fill(w io.Writer, width int, stat *decor.Statistics) {
+
+ // don't count rLeft and rRight [brackets]
+ width -= 2
+ if width < 2 {
+ return
+ }
+
+ w.Write(s.format[rLeft])
+ if width == 2 {
+ w.Write(s.format[rRight])
+ return
+ }
+
+ bb := make([][]byte, width)
+
+ cwidth := int(internal.Percentage(stat.Total, stat.Current, int64(width)))
+
+ for i := 0; i < cwidth; i++ {
+ bb[i] = s.format[rFill]
+ }
+
+ if s.refillAmount > 0 {
+ var rwidth int
+ if s.refillAmount > stat.Current {
+ rwidth = cwidth
+ } else {
+ rwidth = int(internal.Percentage(stat.Total, int64(s.refillAmount), int64(width)))
+ }
+ for i := 0; i < rwidth; i++ {
+ bb[i] = s.format[rRefill]
+ }
+ }
+
+ if cwidth > 0 && cwidth < width {
+ bb[cwidth-1] = s.format[rTip]
+ }
+
+ for i := cwidth; i < width; i++ {
+ bb[i] = s.format[rEmpty]
+ }
+
+ if s.reverse {
+ if cwidth > 0 && cwidth < width {
+ bb[cwidth-1] = s.format[rRevTip]
+ }
+ for i := len(bb) - 1; i >= 0; i-- {
+ w.Write(bb[i])
+ }
+ } else {
+ for i := 0; i < len(bb); i++ {
+ w.Write(bb[i])
+ }
+ }
+ w.Write(s.format[rRight])
+}
diff --git a/vendor/github.com/vbauerster/mpb/bar_option.go b/vendor/github.com/vbauerster/mpb/bar_option.go
index e33bce4da..e9a4bd2a7 100644
--- a/vendor/github.com/vbauerster/mpb/bar_option.go
+++ b/vendor/github.com/vbauerster/mpb/bar_option.go
@@ -6,11 +6,10 @@ import (
"github.com/vbauerster/mpb/decor"
)
-// BarOption is a function option which changes the default behavior of a bar,
-// if passed to p.AddBar(int64, ...BarOption)
+// BarOption is a function option which changes the default behavior of a bar.
type BarOption func(*bState)
-// AppendDecorators let you inject decorators to the bar's right side
+// AppendDecorators let you inject decorators to the bar's right side.
func AppendDecorators(appenders ...decor.Decorator) BarOption {
return func(s *bState) {
for _, decorator := range appenders {
@@ -25,7 +24,7 @@ func AppendDecorators(appenders ...decor.Decorator) BarOption {
}
}
-// PrependDecorators let you inject decorators to the bar's left side
+// PrependDecorators let you inject decorators to the bar's left side.
func PrependDecorators(prependers ...decor.Decorator) BarOption {
return func(s *bState) {
for _, decorator := range prependers {
@@ -40,85 +39,155 @@ func PrependDecorators(prependers ...decor.Decorator) BarOption {
}
}
-// BarTrimLeft trims left side space of the bar
-func BarTrimLeft() BarOption {
- return func(s *bState) {
- s.trimLeftSpace = true
- }
-}
-
-// BarTrimRight trims right space of the bar
-func BarTrimRight() BarOption {
- return func(s *bState) {
- s.trimRightSpace = true
- }
-}
-
-// BarTrim trims both left and right spaces of the bar
-func BarTrim() BarOption {
+// BarID sets bar id.
+func BarID(id int) BarOption {
return func(s *bState) {
- s.trimLeftSpace = true
- s.trimRightSpace = true
+ s.id = id
}
}
-// BarID overwrites internal bar id
-func BarID(id int) BarOption {
+// BarWidth sets bar width independent of the container.
+func BarWidth(width int) BarOption {
return func(s *bState) {
- s.id = id
+ s.width = width
}
}
-// BarRemoveOnComplete is a flag, if set whole bar line will be removed on complete event.
-// If both BarRemoveOnComplete and BarClearOnComplete are set, first bar section gets cleared
-// and then whole bar line gets removed completely.
+// BarRemoveOnComplete is a flag, if set whole bar line will be removed
+// on complete event. If both BarRemoveOnComplete and BarClearOnComplete
+// are set, first bar section gets cleared and then whole bar line
+// gets removed completely.
func BarRemoveOnComplete() BarOption {
return func(s *bState) {
s.removeOnComplete = true
}
}
-// BarReplaceOnComplete is indicator for delayed bar start, after the `runningBar` is complete.
-// To achieve bar replacement effect, `runningBar` should has its `BarRemoveOnComplete` option set.
+// BarReplaceOnComplete is indicator for delayed bar start, after the
+// `runningBar` is complete. To achieve bar replacement effect,
+// `runningBar` should has its `BarRemoveOnComplete` option set.
func BarReplaceOnComplete(runningBar *Bar) BarOption {
+ return BarParkTo(runningBar)
+}
+
+// BarParkTo same as BarReplaceOnComplete
+func BarParkTo(runningBar *Bar) BarOption {
return func(s *bState) {
s.runningBar = runningBar
}
}
-// BarClearOnComplete is a flag, if set will clear bar section on complete event.
-// If you need to remove a whole bar line, refer to BarRemoveOnComplete.
+// BarClearOnComplete is a flag, if set will clear bar section on
+// complete event. If you need to remove a whole bar line, refer to
+// BarRemoveOnComplete.
func BarClearOnComplete() BarOption {
return func(s *bState) {
s.barClearOnComplete = true
}
}
-// BarPriority sets bar's priority.
-// Zero is highest priority, i.e. bar will be on top.
-// If `BarReplaceOnComplete` option is supplied, this option is ignored.
+// BarPriority sets bar's priority. Zero is highest priority, i.e. bar
+// will be on top. If `BarReplaceOnComplete` option is supplied, this
+// option is ignored.
func BarPriority(priority int) BarOption {
return func(s *bState) {
s.priority = priority
}
}
-// BarNewLineExtend takes user defined efn, which gets called each render cycle.
-// Any write to provided writer of efn, will appear on new line of respective bar.
+// BarNewLineExtend takes user defined efn, which gets called each
+// render cycle. Any write to provided writer of efn, will appear on
+// new line of respective bar.
func BarNewLineExtend(efn func(io.Writer, *decor.Statistics)) BarOption {
return func(s *bState) {
s.newLineExtendFn = efn
}
}
-func barWidth(w int) BarOption {
+// TrimSpace trims bar's edge spaces.
+func TrimSpace() BarOption {
return func(s *bState) {
- s.width = w
+ s.trimSpace = true
+ }
+}
+
+// BarStyle sets custom bar style, default one is "[=>-]<+".
+//
+// '[' left bracket rune
+//
+// '=' fill rune
+//
+// '>' tip rune
+//
+// '-' empty rune
+//
+// ']' right bracket rune
+//
+// '<' reverse tip rune, used when BarReverse option is set
+//
+// '+' refill rune, used when *Bar.SetRefill(int64) is called
+//
+// It's ok to provide first five runes only, for example mpb.BarStyle("╢▌▌░╟")
+func BarStyle(style string) BarOption {
+ chk := func(filler Filler) (interface{}, bool) {
+ if style == "" {
+ return nil, false
+ }
+ t, ok := filler.(*barFiller)
+ return t, ok
+ }
+ cb := func(t interface{}) {
+ t.(*barFiller).setStyle(style)
+ }
+ return MakeFillerTypeSpecificBarOption(chk, cb)
+}
+
+// BarReverse reverse mode, bar will progress from right to left.
+func BarReverse() BarOption {
+ chk := func(filler Filler) (interface{}, bool) {
+ t, ok := filler.(*barFiller)
+ return t, ok
+ }
+ cb := func(t interface{}) {
+ t.(*barFiller).setReverse()
+ }
+ return MakeFillerTypeSpecificBarOption(chk, cb)
+}
+
+// SpinnerStyle sets custom spinner style.
+// Effective when Filler type is spinner.
+func SpinnerStyle(frames []string) BarOption {
+ chk := func(filler Filler) (interface{}, bool) {
+ if len(frames) == 0 {
+ return nil, false
+ }
+ t, ok := filler.(*spinnerFiller)
+ return t, ok
+ }
+ cb := func(t interface{}) {
+ t.(*spinnerFiller).frames = frames
}
+ return MakeFillerTypeSpecificBarOption(chk, cb)
}
-func barFormat(format string) BarOption {
+// MakeFillerTypeSpecificBarOption makes BarOption specific to Filler's
+// actual type. If you implement your own Filler, so most probably
+// you'll need this. See BarStyle or SpinnerStyle for example.
+func MakeFillerTypeSpecificBarOption(
+ typeChecker func(Filler) (interface{}, bool),
+ cb func(interface{}),
+) BarOption {
return func(s *bState) {
- s.runes = strToBarRunes(format)
+ if t, ok := typeChecker(s.filler); ok {
+ cb(t)
+ }
+ }
+}
+
+// OptionOnCondition returns option when condition evaluates to true.
+func OptionOnCondition(option BarOption, condition func() bool) BarOption {
+ if condition() {
+ return option
}
+ return nil
}
diff --git a/vendor/github.com/vbauerster/mpb/cwriter/writer.go b/vendor/github.com/vbauerster/mpb/cwriter/writer.go
index 0b1470d4c..638237c18 100644
--- a/vendor/github.com/vbauerster/mpb/cwriter/writer.go
+++ b/vendor/github.com/vbauerster/mpb/cwriter/writer.go
@@ -22,8 +22,8 @@ var (
clearCursorAndLine = cursorUp + clearLine
)
-// Writer is a buffered the writer that updates the terminal.
-// The contents of writer will be flushed when Flush is called.
+// Writer is a buffered the writer that updates the terminal. The
+// contents of writer will be flushed when Flush is called.
type Writer struct {
out io.Writer
buf bytes.Buffer
@@ -64,11 +64,13 @@ func (w *Writer) WriteString(s string) (n int, err error) {
return w.buf.WriteString(s)
}
-// ReadFrom reads from the provided io.Reader and writes to the underlying buffer.
+// ReadFrom reads from the provided io.Reader and writes to the
+// underlying buffer.
func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) {
return w.buf.ReadFrom(r)
}
+// GetWidth returns width of underlying terminal.
func (w *Writer) GetWidth() (int, error) {
if w.isTerminal {
tw, _, err := terminal.GetSize(w.fd)
diff --git a/vendor/github.com/vbauerster/mpb/cwriter/writer_windows.go b/vendor/github.com/vbauerster/mpb/cwriter/writer_windows.go
index dad7f50b2..747a63484 100644
--- a/vendor/github.com/vbauerster/mpb/cwriter/writer_windows.go
+++ b/vendor/github.com/vbauerster/mpb/cwriter/writer_windows.go
@@ -8,7 +8,7 @@ import (
"syscall"
"unsafe"
- "github.com/mattn/go-isatty"
+ isatty "github.com/mattn/go-isatty"
)
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
diff --git a/vendor/github.com/vbauerster/mpb/decor/counters.go b/vendor/github.com/vbauerster/mpb/decor/counters.go
index e4161dc4b..7d581eefb 100644
--- a/vendor/github.com/vbauerster/mpb/decor/counters.go
+++ b/vendor/github.com/vbauerster/mpb/decor/counters.go
@@ -141,12 +141,14 @@ func CountersNoUnit(pairFormat string, wcc ...WC) Decorator {
return Counters(0, pairFormat, wcc...)
}
-// CountersKibiByte is a wrapper around Counters with predefined unit UnitKiB (bytes/1024).
+// CountersKibiByte is a wrapper around Counters with predefined unit
+// UnitKiB (bytes/1024).
func CountersKibiByte(pairFormat string, wcc ...WC) Decorator {
return Counters(UnitKiB, pairFormat, wcc...)
}
-// CountersKiloByte is a wrapper around Counters with predefined unit UnitKB (bytes/1000).
+// CountersKiloByte is a wrapper around Counters with predefined unit
+// UnitKB (bytes/1000).
func CountersKiloByte(pairFormat string, wcc ...WC) Decorator {
return Counters(UnitKB, pairFormat, wcc...)
}
diff --git a/vendor/github.com/vbauerster/mpb/decor/decorator.go b/vendor/github.com/vbauerster/mpb/decor/decorator.go
index 6aaf6c830..2fe40aea6 100644
--- a/vendor/github.com/vbauerster/mpb/decor/decorator.go
+++ b/vendor/github.com/vbauerster/mpb/decor/decorator.go
@@ -31,8 +31,12 @@ const (
DSyncSpaceR = DSyncWidth | DextraSpace | DidentRight
)
+// TimeStyle enum.
+type TimeStyle int
+
+// TimeStyle kinds.
const (
- ET_STYLE_GO = iota
+ ET_STYLE_GO TimeStyle = iota
ET_STYLE_HHMMSS
ET_STYLE_HHMM
ET_STYLE_MMSS
@@ -47,35 +51,37 @@ type Statistics struct {
}
// Decorator interface.
-// A decorator must implement this interface, in order to be used with mpb library.
+// A decorator must implement this interface, in order to be used with
+// mpb library.
type Decorator interface {
Decor(*Statistics) string
Syncable
}
// Syncable interface.
-// All decorators implement this interface implicitly.
-// Its Syncable method exposes width sync channel, if sync is enabled.
+// All decorators implement this interface implicitly. Its Syncable
+// method exposes width sync channel, if sync is enabled.
type Syncable interface {
Syncable() (bool, chan int)
}
// OnCompleteMessenger interface.
-// Decorators implementing this interface suppose to return provided string on complete event.
+// Decorators implementing this interface suppose to return provided
+// string on complete event.
type OnCompleteMessenger interface {
OnCompleteMessage(string)
}
// AmountReceiver interface.
-// If decorator needs to receive increment amount,
-// so this is the right interface to implement.
+// If decorator needs to receive increment amount, so this is the right
+// interface to implement.
type AmountReceiver interface {
NextAmount(int, ...time.Duration)
}
// ShutdownListener interface.
-// If decorator needs to be notified once upon bar shutdown event,
-// so this is the right interface to implement.
+// If decorator needs to be notified once upon bar shutdown event, so
+// this is the right interface to implement.
type ShutdownListener interface {
Shutdown()
}
@@ -90,6 +96,7 @@ var (
// WC is a struct with two public fields W and C, both of int type.
// W represents width and C represents bit set of width related config.
+// A decorator should embed WC, in order to become Syncable.
type WC struct {
W int
C int
@@ -126,12 +133,13 @@ func (wc *WC) Init() {
}
}
+// Syncable is implementation of Syncable interface.
func (wc *WC) Syncable() (bool, chan int) {
return (wc.C & DSyncWidth) != 0, wc.wsync
}
-// OnComplete returns decorator, which wraps provided decorator, with sole
-// purpose to display provided message on complete event.
+// OnComplete returns decorator, which wraps provided decorator, with
+// sole purpose to display provided message on complete event.
//
// `decorator` Decorator to wrap
//
diff --git a/vendor/github.com/vbauerster/mpb/decor/elapsed.go b/vendor/github.com/vbauerster/mpb/decor/elapsed.go
index 649d40a30..b2e75852c 100644
--- a/vendor/github.com/vbauerster/mpb/decor/elapsed.go
+++ b/vendor/github.com/vbauerster/mpb/decor/elapsed.go
@@ -10,7 +10,7 @@ import (
// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
//
// `wcc` optional WC config
-func Elapsed(style int, wcc ...WC) Decorator {
+func Elapsed(style TimeStyle, wcc ...WC) Decorator {
var wc WC
for _, widthConf := range wcc {
wc = widthConf
@@ -26,7 +26,7 @@ func Elapsed(style int, wcc ...WC) Decorator {
type elapsedDecorator struct {
WC
- style int
+ style TimeStyle
startTime time.Time
msg string
completeMsg *string
diff --git a/vendor/github.com/vbauerster/mpb/decor/eta.go b/vendor/github.com/vbauerster/mpb/decor/eta.go
index 44a1f03ea..e8dc979b4 100644
--- a/vendor/github.com/vbauerster/mpb/decor/eta.go
+++ b/vendor/github.com/vbauerster/mpb/decor/eta.go
@@ -6,7 +6,6 @@ import (
"time"
"github.com/VividCortex/ewma"
- "github.com/vbauerster/mpb/internal"
)
type TimeNormalizer func(time.Duration) time.Duration
@@ -18,7 +17,7 @@ type TimeNormalizer func(time.Duration) time.Duration
// `age` is the previous N samples to average over.
//
// `wcc` optional WC config
-func EwmaETA(style int, age float64, wcc ...WC) Decorator {
+func EwmaETA(style TimeStyle, age float64, wcc ...WC) Decorator {
return MovingAverageETA(style, ewma.NewMovingAverage(age), NopNormalizer(), wcc...)
}
@@ -31,7 +30,7 @@ func EwmaETA(style int, age float64, wcc ...WC) Decorator {
// `normalizer` available implementations are [NopNormalizer|FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer]
//
// `wcc` optional WC config
-func MovingAverageETA(style int, average MovingAverage, normalizer TimeNormalizer, wcc ...WC) Decorator {
+func MovingAverageETA(style TimeStyle, average MovingAverage, normalizer TimeNormalizer, wcc ...WC) Decorator {
var wc WC
for _, widthConf := range wcc {
wc = widthConf
@@ -48,7 +47,7 @@ func MovingAverageETA(style int, average MovingAverage, normalizer TimeNormalize
type movingAverageETA struct {
WC
- style int
+ style TimeStyle
average ewma.MovingAverage
completeMsg *string
normalizer TimeNormalizer
@@ -59,7 +58,7 @@ func (d *movingAverageETA) Decor(st *Statistics) string {
return d.FormatMsg(*d.completeMsg)
}
- v := internal.Round(d.average.Value())
+ v := math.Round(d.average.Value())
remaining := d.normalizer(time.Duration((st.Total - st.Current) * int64(v)))
hours := int64((remaining / time.Hour) % 60)
minutes := int64((remaining / time.Minute) % 60)
@@ -105,7 +104,7 @@ func (d *movingAverageETA) OnCompleteMessage(msg string) {
// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
//
// `wcc` optional WC config
-func AverageETA(style int, wcc ...WC) Decorator {
+func AverageETA(style TimeStyle, wcc ...WC) Decorator {
var wc WC
for _, widthConf := range wcc {
wc = widthConf
@@ -121,7 +120,7 @@ func AverageETA(style int, wcc ...WC) Decorator {
type averageETA struct {
WC
- style int
+ style TimeStyle
startTime time.Time
completeMsg *string
}
@@ -133,7 +132,7 @@ func (d *averageETA) Decor(st *Statistics) string {
var str string
timeElapsed := time.Since(d.startTime)
- v := internal.Round(float64(timeElapsed) / float64(st.Current))
+ v := math.Round(float64(timeElapsed) / float64(st.Current))
if math.IsInf(v, 0) || math.IsNaN(v) {
v = 0
}
diff --git a/vendor/github.com/vbauerster/mpb/decor/moving-average.go b/vendor/github.com/vbauerster/mpb/decor/moving-average.go
index f9596a27f..fcd268923 100644
--- a/vendor/github.com/vbauerster/mpb/decor/moving-average.go
+++ b/vendor/github.com/vbauerster/mpb/decor/moving-average.go
@@ -6,9 +6,9 @@ import (
"github.com/VividCortex/ewma"
)
-// MovingAverage is the interface that computes a moving average over a time-
-// series stream of numbers. The average may be over a window or exponentially
-// decaying.
+// MovingAverage is the interface that computes a moving average over
+// a time-series stream of numbers. The average may be over a window
+// or exponentially decaying.
type MovingAverage interface {
Add(float64)
Value() float64
@@ -57,7 +57,8 @@ func (s *medianEwma) Add(v float64) {
s.count++
}
-// NewMedianEwma is ewma based MovingAverage, which gets its values from median MovingAverage.
+// NewMedianEwma is ewma based MovingAverage, which gets its values
+// from median MovingAverage.
func NewMedianEwma(age ...float64) MovingAverage {
return &medianEwma{
MovingAverage: ewma.NewMovingAverage(age...),
diff --git a/vendor/github.com/vbauerster/mpb/decor/speed.go b/vendor/github.com/vbauerster/mpb/decor/speed.go
index 395e5d04d..74658ce41 100644
--- a/vendor/github.com/vbauerster/mpb/decor/speed.go
+++ b/vendor/github.com/vbauerster/mpb/decor/speed.go
@@ -137,7 +137,8 @@ func EwmaSpeed(unit int, unitFormat string, age float64, wcc ...WC) Decorator {
return MovingAverageSpeed(unit, unitFormat, ewma.NewMovingAverage(age), wcc...)
}
-// MovingAverageSpeed decorator relies on MovingAverage implementation to calculate its average.
+// MovingAverageSpeed decorator relies on MovingAverage implementation
+// to calculate its average.
//
// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
//
diff --git a/vendor/github.com/vbauerster/mpb/go.test.sh b/vendor/github.com/vbauerster/mpb/go.test.sh
new file mode 100644
index 000000000..34dbbfb31
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/go.test.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -e
+echo "" > coverage.txt
+
+for d in $(go list ./... | grep -v vendor); do
+ go test -race -coverprofile=profile.out -covermode=atomic $d
+ if [ -f profile.out ]; then
+ cat profile.out >> coverage.txt
+ rm profile.out
+ fi
+done
diff --git a/vendor/github.com/vbauerster/mpb/internal/percentage.go b/vendor/github.com/vbauerster/mpb/internal/percentage.go
index 3c8defb7d..0483d2598 100644
--- a/vendor/github.com/vbauerster/mpb/internal/percentage.go
+++ b/vendor/github.com/vbauerster/mpb/internal/percentage.go
@@ -1,10 +1,12 @@
package internal
+import "math"
+
// Percentage is a helper function, to calculate percentage.
func Percentage(total, current, width int64) int64 {
if total <= 0 {
return 0
}
p := float64(width*current) / float64(total)
- return int64(Round(p))
+ return int64(math.Round(p))
}
diff --git a/vendor/github.com/vbauerster/mpb/internal/round.go b/vendor/github.com/vbauerster/mpb/internal/round.go
deleted file mode 100644
index c54a789d2..000000000
--- a/vendor/github.com/vbauerster/mpb/internal/round.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package internal
-
-import "math"
-
-const (
- uvone = 0x3FF0000000000000
- mask = 0x7FF
- shift = 64 - 11 - 1
- bias = 1023
- signMask = 1 << 63
- fracMask = 1<<shift - 1
-)
-
-// Round returns the nearest integer, rounding half away from zero.
-//
-// Special cases are:
-// Round(±0) = ±0
-// Round(±Inf) = ±Inf
-// Round(NaN) = NaN
-func Round(x float64) float64 {
- // Round is a faster implementation of:
- //
- // func Round(x float64) float64 {
- // t := Trunc(x)
- // if Abs(x-t) >= 0.5 {
- // return t + Copysign(1, x)
- // }
- // return t
- // }
- bits := math.Float64bits(x)
- e := uint(bits>>shift) & mask
- if e < bias {
- // Round abs(x) < 1 including denormals.
- bits &= signMask // +-0
- if e == bias-1 {
- bits |= uvone // +-1
- }
- } else if e < bias+shift {
- // Round any abs(x) >= 1 containing a fractional component [0,1).
- //
- // Numbers with larger exponents are returned unchanged since they
- // must be either an integer, infinity, or NaN.
- const half = 1 << (shift - 1)
- e -= bias
- bits += half >> e
- bits &^= fracMask >> e
- }
- return math.Float64frombits(bits)
-}
diff --git a/vendor/github.com/vbauerster/mpb/options.go b/vendor/github.com/vbauerster/mpb/options.go
index 05d2ecf1f..44a6ee3f3 100644
--- a/vendor/github.com/vbauerster/mpb/options.go
+++ b/vendor/github.com/vbauerster/mpb/options.go
@@ -1,29 +1,30 @@
package mpb
import (
+ "context"
"io"
"sync"
"time"
- "unicode/utf8"
"github.com/vbauerster/mpb/cwriter"
)
-// ProgressOption is a function option which changes the default behavior of
-// progress pool, if passed to mpb.New(...ProgressOption)
+// ProgressOption is a function option which changes the default
+// behavior of progress pool, if passed to mpb.New(...ProgressOption).
type ProgressOption func(*pState)
-// WithWaitGroup provides means to have a single joint point.
-// If *sync.WaitGroup is provided, you can safely call just p.Wait()
-// without calling Wait() on provided *sync.WaitGroup.
-// Makes sense when there are more than one bar to render.
+// WithWaitGroup provides means to have a single joint point. If
+// *sync.WaitGroup is provided, you can safely call just p.Wait()
+// without calling Wait() on provided *sync.WaitGroup. Makes sense
+// when there are more than one bar to render.
func WithWaitGroup(wg *sync.WaitGroup) ProgressOption {
return func(s *pState) {
s.uwg = wg
}
}
-// WithWidth overrides default width 80
+// WithWidth sets container width. Default is 80. Bars inherit this
+// width, as long as no BarWidth is applied.
func WithWidth(w int) ProgressOption {
return func(s *pState) {
if w >= 0 {
@@ -32,16 +33,7 @@ func WithWidth(w int) ProgressOption {
}
}
-// WithFormat overrides default bar format "[=>-]"
-func WithFormat(format string) ProgressOption {
- return func(s *pState) {
- if utf8.RuneCountInString(format) == formatLen {
- s.format = format
- }
- }
-}
-
-// WithRefreshRate overrides default 120ms refresh rate
+// WithRefreshRate overrides default 120ms refresh rate.
func WithRefreshRate(d time.Duration) ProgressOption {
return func(s *pState) {
if d < 10*time.Millisecond {
@@ -59,22 +51,25 @@ func WithManualRefresh(ch <-chan time.Time) ProgressOption {
}
}
-// WithCancel provide your cancel channel,
-// which you plan to close at some point.
-func WithCancel(ch <-chan struct{}) ProgressOption {
+// WithContext provided context will be used for cancellation purposes.
+func WithContext(ctx context.Context) ProgressOption {
return func(s *pState) {
- s.cancel = ch
+ if ctx == nil {
+ return
+ }
+ s.ctx = ctx
}
}
-// WithShutdownNotifier provided chanel will be closed, after all bars have been rendered.
+// WithShutdownNotifier provided chanel will be closed, after all bars
+// have been rendered.
func WithShutdownNotifier(ch chan struct{}) ProgressOption {
return func(s *pState) {
s.shutdownNotifier = ch
}
}
-// WithOutput overrides default output os.Stdout
+// WithOutput overrides default output os.Stdout.
func WithOutput(w io.Writer) ProgressOption {
return func(s *pState) {
if w == nil {
diff --git a/vendor/github.com/vbauerster/mpb/options_go1.7.go b/vendor/github.com/vbauerster/mpb/options_go1.7.go
deleted file mode 100644
index ca9a5bad8..000000000
--- a/vendor/github.com/vbauerster/mpb/options_go1.7.go
+++ /dev/null
@@ -1,15 +0,0 @@
-//+build go1.7
-
-package mpb
-
-import "context"
-
-// WithContext provided context will be used for cancellation purposes
-func WithContext(ctx context.Context) ProgressOption {
- return func(s *pState) {
- if ctx == nil {
- panic("ctx must not be nil")
- }
- s.cancel = ctx.Done()
- }
-}
diff --git a/vendor/github.com/vbauerster/mpb/progress.go b/vendor/github.com/vbauerster/mpb/progress.go
index d95fe45b7..f9e25af79 100644
--- a/vendor/github.com/vbauerster/mpb/progress.go
+++ b/vendor/github.com/vbauerster/mpb/progress.go
@@ -2,6 +2,7 @@ package mpb
import (
"container/heap"
+ "context"
"fmt"
"io"
"io/ioutil"
@@ -17,8 +18,6 @@ const (
prr = 120 * time.Millisecond
// default width
pwidth = 80
- // default format
- pformat = "[=>-]"
)
// Progress represents the container that renders Progress bars
@@ -42,24 +41,24 @@ type pState struct {
pMatrix map[int][]chan int
aMatrix map[int][]chan int
- // following are provided by user
+ // following are provided/overrided by user
+ ctx context.Context
uwg *sync.WaitGroup
manualRefreshCh <-chan time.Time
- cancel <-chan struct{}
shutdownNotifier chan struct{}
waitBars map[*Bar]*Bar
debugOut io.Writer
}
-// New creates new Progress instance, which orchestrates bars rendering process.
-// Accepts mpb.ProgressOption funcs for customization.
+// New creates new Progress instance, which orchestrates bars rendering
+// process. Accepts mpb.ProgressOption funcs for customization.
func New(options ...ProgressOption) *Progress {
pq := make(priorityQueue, 0)
heap.Init(&pq)
s := &pState{
+ ctx: context.Background(),
bHeap: &pq,
width: pwidth,
- format: pformat,
cw: cwriter.New(os.Stdout),
rr: prr,
waitBars: make(map[*Bar]*Bar),
@@ -84,12 +83,28 @@ func New(options ...ProgressOption) *Progress {
// AddBar creates a new progress bar and adds to the container.
func (p *Progress) AddBar(total int64, options ...BarOption) *Bar {
+ return p.Add(total, newDefaultBarFiller(), options...)
+}
+
+// AddSpinner creates a new spinner bar and adds to the container.
+func (p *Progress) AddSpinner(total int64, alignment SpinnerAlignment, options ...BarOption) *Bar {
+ filler := &spinnerFiller{
+ frames: defaultSpinnerStyle,
+ alignment: alignment,
+ }
+ return p.Add(total, filler, options...)
+}
+
+// Add creates a bar which renders itself by provided filler.
+func (p *Progress) Add(total int64, filler Filler, options ...BarOption) *Bar {
+ if filler == nil {
+ filler = newDefaultBarFiller()
+ }
p.wg.Add(1)
result := make(chan *Bar)
select {
case p.operateState <- func(s *pState) {
- options = append(options, barWidth(s.width), barFormat(s.format))
- b := newBar(p.wg, s.idCounter, total, s.cancel, options...)
+ b := newBar(s.ctx, p.wg, filler, s.idCounter, s.width, total, options...)
if b.runningBar != nil {
s.waitBars[b.runningBar] = b
} else {
@@ -106,10 +121,10 @@ func (p *Progress) AddBar(total int64, options ...BarOption) *Bar {
}
}
-// Abort is only effective while bar progress is running,
-// it means remove bar now without waiting for its completion.
-// If bar is already completed, there is nothing to abort.
-// If you need to remove bar after completion, use BarRemoveOnComplete BarOption.
+// Abort is only effective while bar progress is running, it means
+// remove bar now without waiting for its completion. If bar is already
+// completed, there is nothing to abort. If you need to remove bar
+// after completion, use BarRemoveOnComplete BarOption.
func (p *Progress) Abort(b *Bar, remove bool) {
select {
case p.operateState <- func(s *pState) {
@@ -145,9 +160,10 @@ func (p *Progress) BarCount() int {
}
}
-// Wait first waits for user provided *sync.WaitGroup, if any,
-// then waits far all bars to complete and finally shutdowns master goroutine.
-// After this method has been called, there is no way to reuse *Progress instance.
+// Wait first waits for user provided *sync.WaitGroup, if any, then
+// waits far all bars to complete and finally shutdowns master goroutine.
+// After this method has been called, there is no way to reuse *Progress
+// instance.
func (p *Progress) Wait() {
if p.uwg != nil {
p.uwg.Wait()
@@ -205,8 +221,8 @@ func (s *pState) flush(lineCount int) error {
defer func() {
if frameReader.toShutdown {
// shutdown at next flush, in other words decrement underlying WaitGroup
- // only after the bar with completed state has been flushed.
- // this ensures no bar ends up with less than 100% rendered.
+ // only after the bar with completed state has been flushed. this
+ // ensures no bar ends up with less than 100% rendered.
s.shutdownPending = append(s.shutdownPending, bar)
if replacementBar, ok := s.waitBars[bar]; ok {
heap.Push(s.bHeap, replacementBar)
diff --git a/vendor/github.com/vbauerster/mpb/spinner_filler.go b/vendor/github.com/vbauerster/mpb/spinner_filler.go
new file mode 100644
index 000000000..36299fef0
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/spinner_filler.go
@@ -0,0 +1,48 @@
+package mpb
+
+import (
+ "io"
+ "strings"
+ "unicode/utf8"
+
+ "github.com/vbauerster/mpb/decor"
+)
+
+// SpinnerAlignment enum.
+type SpinnerAlignment int
+
+// SpinnerAlignment kinds.
+const (
+ SpinnerOnLeft SpinnerAlignment = iota
+ SpinnerOnMiddle
+ SpinnerOnRight
+)
+
+var defaultSpinnerStyle = []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"}
+
+type spinnerFiller struct {
+ frames []string
+ count uint
+ alignment SpinnerAlignment
+}
+
+func (s *spinnerFiller) Fill(w io.Writer, width int, stat *decor.Statistics) {
+
+ frame := s.frames[s.count%uint(len(s.frames))]
+ frameWidth := utf8.RuneCountInString(frame)
+
+ if width < frameWidth {
+ return
+ }
+
+ switch rest := width - frameWidth; s.alignment {
+ case SpinnerOnLeft:
+ io.WriteString(w, frame+strings.Repeat(" ", rest))
+ case SpinnerOnMiddle:
+ str := strings.Repeat(" ", rest/2) + frame + strings.Repeat(" ", rest/2+rest%2)
+ io.WriteString(w, str)
+ case SpinnerOnRight:
+ io.WriteString(w, strings.Repeat(" ", rest)+frame)
+ }
+ s.count++
+}
diff --git a/vendor/github.com/vishvananda/netlink/.travis.yml b/vendor/github.com/vishvananda/netlink/.travis.yml
new file mode 100644
index 000000000..f5c0b3eb5
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+before_script:
+ # make sure we keep path in tact when we sudo
+ - sudo sed -i -e 's/^Defaults\tsecure_path.*$//' /etc/sudoers
+ # modprobe ip_gre or else the first gre device can't be deleted
+ - sudo modprobe ip_gre
+ # modprobe nf_conntrack for the conntrack testing
+ - sudo modprobe nf_conntrack
+ - sudo modprobe nf_conntrack_netlink
+ - sudo modprobe nf_conntrack_ipv4
+ - sudo modprobe nf_conntrack_ipv6
+install:
+ - go get github.com/vishvananda/netns
diff --git a/vendor/github.com/vishvananda/netlink/CHANGELOG.md b/vendor/github.com/vishvananda/netlink/CHANGELOG.md
new file mode 100644
index 000000000..b11e59ff6
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/CHANGELOG.md
@@ -0,0 +1,5 @@
+# Changelog
+
+## 1.0.0 (2018-03-15)
+
+Initial release tagging \ No newline at end of file
diff --git a/vendor/github.com/vishvananda/netlink/Makefile b/vendor/github.com/vishvananda/netlink/Makefile
new file mode 100644
index 000000000..a0e68e7a9
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/Makefile
@@ -0,0 +1,30 @@
+DIRS := \
+ . \
+ nl
+
+DEPS = \
+ github.com/vishvananda/netns \
+ golang.org/x/sys/unix
+
+uniq = $(if $1,$(firstword $1) $(call uniq,$(filter-out $(firstword $1),$1)))
+testdirs = $(call uniq,$(foreach d,$(1),$(dir $(wildcard $(d)/*_test.go))))
+goroot = $(addprefix ../../../,$(1))
+unroot = $(subst ../../../,,$(1))
+fmt = $(addprefix fmt-,$(1))
+
+all: test
+
+$(call goroot,$(DEPS)):
+ go get $(call unroot,$@)
+
+.PHONY: $(call testdirs,$(DIRS))
+$(call testdirs,$(DIRS)):
+ go test -test.exec sudo -test.parallel 4 -timeout 60s -test.v github.com/vishvananda/netlink/$@
+
+$(call fmt,$(call testdirs,$(DIRS))):
+ ! gofmt -l $(subst fmt-,,$@)/*.go | grep -q .
+
+.PHONY: fmt
+fmt: $(call fmt,$(call testdirs,$(DIRS)))
+
+test: fmt $(call goroot,$(DEPS)) $(call testdirs,$(DIRS))
diff --git a/vendor/github.com/xeipuuv/gojsonschema/.gitignore b/vendor/github.com/xeipuuv/gojsonschema/.gitignore
new file mode 100644
index 000000000..68e993ce3
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/.gitignore
@@ -0,0 +1,3 @@
+*.sw[nop]
+*.iml
+.vscode/
diff --git a/vendor/github.com/xeipuuv/gojsonschema/.travis.yml b/vendor/github.com/xeipuuv/gojsonschema/.travis.yml
new file mode 100644
index 000000000..09ee789ca
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+go:
+ - "1.5"
+ - "1.6"
+ - "1.7"
+ - "1.8"
+ - "1.9"
+ - "1.10"
+before_install:
+ - go get github.com/xeipuuv/gojsonreference
+ - go get github.com/xeipuuv/gojsonpointer
+ - go get github.com/stretchr/testify/assert
diff --git a/vendor/github.com/xeipuuv/gojsonschema/glide.yaml b/vendor/github.com/xeipuuv/gojsonschema/glide.yaml
new file mode 100644
index 000000000..ab6fb867c
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/glide.yaml
@@ -0,0 +1,13 @@
+package: github.com/xeipuuv/gojsonschema
+license: Apache 2.0
+import:
+- package: github.com/xeipuuv/gojsonschema
+
+- package: github.com/xeipuuv/gojsonpointer
+
+- package: github.com/xeipuuv/gojsonreference
+
+testImport:
+- package: github.com/stretchr/testify
+ subpackages:
+ - assert
diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS
new file mode 100644
index 000000000..2b00ddba0
--- /dev/null
+++ b/vendor/golang.org/x/crypto/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at https://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS
new file mode 100644
index 000000000..1fbd3e976
--- /dev/null
+++ b/vendor/golang.org/x/crypto/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at https://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/crypto/README.md b/vendor/golang.org/x/crypto/README.md
deleted file mode 100644
index c9d6fecd1..000000000
--- a/vendor/golang.org/x/crypto/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# Go Cryptography
-
-This repository holds supplementary Go cryptography libraries.
-
-## Download/Install
-
-The easiest way to install is to run `go get -u golang.org/x/crypto/...`. You
-can also manually git clone the repository to `$GOPATH/src/golang.org/x/crypto`.
-
-## Report Issues / Send Patches
-
-This repository uses Gerrit for code changes. To learn how to submit changes to
-this repository, see https://golang.org/doc/contribute.html.
-
-The main issue tracker for the crypto repository is located at
-https://github.com/golang/go/issues. Prefix your issue with "x/crypto:" in the
-subject line, so it is easy to find.
-
-Note that contributions to the cryptography package receive additional scrutiny
-due to their sensitive nature. Patches may take longer than normal to receive
-feedback.
diff --git a/vendor/golang.org/x/crypto/cast5/cast5.go b/vendor/golang.org/x/crypto/cast5/cast5.go
index 0b4af37bd..ddcbeb6f2 100644
--- a/vendor/golang.org/x/crypto/cast5/cast5.go
+++ b/vendor/golang.org/x/crypto/cast5/cast5.go
@@ -2,8 +2,15 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package cast5 implements CAST5, as defined in RFC 2144. CAST5 is a common
-// OpenPGP cipher.
+// Package cast5 implements CAST5, as defined in RFC 2144.
+//
+// CAST5 is a legacy cipher and its short block size makes it vulnerable to
+// birthday bound attacks (see https://sweet32.info). It should only be used
+// where compatibility with legacy systems, not security, is the goal.
+//
+// Deprecated: any new system should use AES (from crypto/aes, if necessary in
+// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from
+// golang.org/x/crypto/chacha20poly1305).
package cast5 // import "golang.org/x/crypto/cast5"
import "errors"
diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.h b/vendor/golang.org/x/crypto/curve25519/const_amd64.h
deleted file mode 100644
index b3f74162f..000000000
--- a/vendor/golang.org/x/crypto/curve25519/const_amd64.h
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code was translated into a form compatible with 6a from the public
-// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
-
-#define REDMASK51 0x0007FFFFFFFFFFFF
diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.s b/vendor/golang.org/x/crypto/curve25519/const_amd64.s
deleted file mode 100644
index ee7b4bd5f..000000000
--- a/vendor/golang.org/x/crypto/curve25519/const_amd64.s
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code was translated into a form compatible with 6a from the public
-// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
-
-// +build amd64,!gccgo,!appengine
-
-// These constants cannot be encoded in non-MOVQ immediates.
-// We access them directly from memory instead.
-
-DATA ·_121666_213(SB)/8, $996687872
-GLOBL ·_121666_213(SB), 8, $8
-
-DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA
-GLOBL ·_2P0(SB), 8, $8
-
-DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE
-GLOBL ·_2P1234(SB), 8, $8
diff --git a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s
deleted file mode 100644
index cd793a5b5..000000000
--- a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build amd64,!gccgo,!appengine
-
-// func cswap(inout *[4][5]uint64, v uint64)
-TEXT ·cswap(SB),7,$0
- MOVQ inout+0(FP),DI
- MOVQ v+8(FP),SI
-
- SUBQ $1, SI
- NOTQ SI
- MOVQ SI, X15
- PSHUFD $0x44, X15, X15
-
- MOVOU 0(DI), X0
- MOVOU 16(DI), X2
- MOVOU 32(DI), X4
- MOVOU 48(DI), X6
- MOVOU 64(DI), X8
- MOVOU 80(DI), X1
- MOVOU 96(DI), X3
- MOVOU 112(DI), X5
- MOVOU 128(DI), X7
- MOVOU 144(DI), X9
-
- MOVO X1, X10
- MOVO X3, X11
- MOVO X5, X12
- MOVO X7, X13
- MOVO X9, X14
-
- PXOR X0, X10
- PXOR X2, X11
- PXOR X4, X12
- PXOR X6, X13
- PXOR X8, X14
- PAND X15, X10
- PAND X15, X11
- PAND X15, X12
- PAND X15, X13
- PAND X15, X14
- PXOR X10, X0
- PXOR X10, X1
- PXOR X11, X2
- PXOR X11, X3
- PXOR X12, X4
- PXOR X12, X5
- PXOR X13, X6
- PXOR X13, X7
- PXOR X14, X8
- PXOR X14, X9
-
- MOVOU X0, 0(DI)
- MOVOU X2, 16(DI)
- MOVOU X4, 32(DI)
- MOVOU X6, 48(DI)
- MOVOU X8, 64(DI)
- MOVOU X1, 80(DI)
- MOVOU X3, 96(DI)
- MOVOU X5, 112(DI)
- MOVOU X7, 128(DI)
- MOVOU X9, 144(DI)
- RET
diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go
deleted file mode 100644
index cb8fbc57b..000000000
--- a/vendor/golang.org/x/crypto/curve25519/curve25519.go
+++ /dev/null
@@ -1,834 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// We have an implementation in amd64 assembly so this code is only run on
-// non-amd64 platforms. The amd64 assembly does not support gccgo.
-// +build !amd64 gccgo appengine
-
-package curve25519
-
-import (
- "encoding/binary"
-)
-
-// This code is a port of the public domain, "ref10" implementation of
-// curve25519 from SUPERCOP 20130419 by D. J. Bernstein.
-
-// fieldElement represents an element of the field GF(2^255 - 19). An element
-// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
-// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
-// context.
-type fieldElement [10]int32
-
-func feZero(fe *fieldElement) {
- for i := range fe {
- fe[i] = 0
- }
-}
-
-func feOne(fe *fieldElement) {
- feZero(fe)
- fe[0] = 1
-}
-
-func feAdd(dst, a, b *fieldElement) {
- for i := range dst {
- dst[i] = a[i] + b[i]
- }
-}
-
-func feSub(dst, a, b *fieldElement) {
- for i := range dst {
- dst[i] = a[i] - b[i]
- }
-}
-
-func feCopy(dst, src *fieldElement) {
- for i := range dst {
- dst[i] = src[i]
- }
-}
-
-// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0.
-//
-// Preconditions: b in {0,1}.
-func feCSwap(f, g *fieldElement, b int32) {
- b = -b
- for i := range f {
- t := b & (f[i] ^ g[i])
- f[i] ^= t
- g[i] ^= t
- }
-}
-
-// load3 reads a 24-bit, little-endian value from in.
-func load3(in []byte) int64 {
- var r int64
- r = int64(in[0])
- r |= int64(in[1]) << 8
- r |= int64(in[2]) << 16
- return r
-}
-
-// load4 reads a 32-bit, little-endian value from in.
-func load4(in []byte) int64 {
- return int64(binary.LittleEndian.Uint32(in))
-}
-
-func feFromBytes(dst *fieldElement, src *[32]byte) {
- h0 := load4(src[:])
- h1 := load3(src[4:]) << 6
- h2 := load3(src[7:]) << 5
- h3 := load3(src[10:]) << 3
- h4 := load3(src[13:]) << 2
- h5 := load4(src[16:])
- h6 := load3(src[20:]) << 7
- h7 := load3(src[23:]) << 5
- h8 := load3(src[26:]) << 4
- h9 := load3(src[29:]) << 2
-
- var carry [10]int64
- carry[9] = (h9 + 1<<24) >> 25
- h0 += carry[9] * 19
- h9 -= carry[9] << 25
- carry[1] = (h1 + 1<<24) >> 25
- h2 += carry[1]
- h1 -= carry[1] << 25
- carry[3] = (h3 + 1<<24) >> 25
- h4 += carry[3]
- h3 -= carry[3] << 25
- carry[5] = (h5 + 1<<24) >> 25
- h6 += carry[5]
- h5 -= carry[5] << 25
- carry[7] = (h7 + 1<<24) >> 25
- h8 += carry[7]
- h7 -= carry[7] << 25
-
- carry[0] = (h0 + 1<<25) >> 26
- h1 += carry[0]
- h0 -= carry[0] << 26
- carry[2] = (h2 + 1<<25) >> 26
- h3 += carry[2]
- h2 -= carry[2] << 26
- carry[4] = (h4 + 1<<25) >> 26
- h5 += carry[4]
- h4 -= carry[4] << 26
- carry[6] = (h6 + 1<<25) >> 26
- h7 += carry[6]
- h6 -= carry[6] << 26
- carry[8] = (h8 + 1<<25) >> 26
- h9 += carry[8]
- h8 -= carry[8] << 26
-
- dst[0] = int32(h0)
- dst[1] = int32(h1)
- dst[2] = int32(h2)
- dst[3] = int32(h3)
- dst[4] = int32(h4)
- dst[5] = int32(h5)
- dst[6] = int32(h6)
- dst[7] = int32(h7)
- dst[8] = int32(h8)
- dst[9] = int32(h9)
-}
-
-// feToBytes marshals h to s.
-// Preconditions:
-// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
-//
-// Write p=2^255-19; q=floor(h/p).
-// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
-//
-// Proof:
-// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
-// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
-//
-// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
-// Then 0<y<1.
-//
-// Write r=h-pq.
-// Have 0<=r<=p-1=2^255-20.
-// Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
-//
-// Write x=r+19(2^-255)r+y.
-// Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
-//
-// Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
-// so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q.
-func feToBytes(s *[32]byte, h *fieldElement) {
- var carry [10]int32
-
- q := (19*h[9] + (1 << 24)) >> 25
- q = (h[0] + q) >> 26
- q = (h[1] + q) >> 25
- q = (h[2] + q) >> 26
- q = (h[3] + q) >> 25
- q = (h[4] + q) >> 26
- q = (h[5] + q) >> 25
- q = (h[6] + q) >> 26
- q = (h[7] + q) >> 25
- q = (h[8] + q) >> 26
- q = (h[9] + q) >> 25
-
- // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
- h[0] += 19 * q
- // Goal: Output h-2^255 q, which is between 0 and 2^255-20.
-
- carry[0] = h[0] >> 26
- h[1] += carry[0]
- h[0] -= carry[0] << 26
- carry[1] = h[1] >> 25
- h[2] += carry[1]
- h[1] -= carry[1] << 25
- carry[2] = h[2] >> 26
- h[3] += carry[2]
- h[2] -= carry[2] << 26
- carry[3] = h[3] >> 25
- h[4] += carry[3]
- h[3] -= carry[3] << 25
- carry[4] = h[4] >> 26
- h[5] += carry[4]
- h[4] -= carry[4] << 26
- carry[5] = h[5] >> 25
- h[6] += carry[5]
- h[5] -= carry[5] << 25
- carry[6] = h[6] >> 26
- h[7] += carry[6]
- h[6] -= carry[6] << 26
- carry[7] = h[7] >> 25
- h[8] += carry[7]
- h[7] -= carry[7] << 25
- carry[8] = h[8] >> 26
- h[9] += carry[8]
- h[8] -= carry[8] << 26
- carry[9] = h[9] >> 25
- h[9] -= carry[9] << 25
- // h10 = carry9
-
- // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
- // Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
- // evidently 2^255 h10-2^255 q = 0.
- // Goal: Output h[0]+...+2^230 h[9].
-
- s[0] = byte(h[0] >> 0)
- s[1] = byte(h[0] >> 8)
- s[2] = byte(h[0] >> 16)
- s[3] = byte((h[0] >> 24) | (h[1] << 2))
- s[4] = byte(h[1] >> 6)
- s[5] = byte(h[1] >> 14)
- s[6] = byte((h[1] >> 22) | (h[2] << 3))
- s[7] = byte(h[2] >> 5)
- s[8] = byte(h[2] >> 13)
- s[9] = byte((h[2] >> 21) | (h[3] << 5))
- s[10] = byte(h[3] >> 3)
- s[11] = byte(h[3] >> 11)
- s[12] = byte((h[3] >> 19) | (h[4] << 6))
- s[13] = byte(h[4] >> 2)
- s[14] = byte(h[4] >> 10)
- s[15] = byte(h[4] >> 18)
- s[16] = byte(h[5] >> 0)
- s[17] = byte(h[5] >> 8)
- s[18] = byte(h[5] >> 16)
- s[19] = byte((h[5] >> 24) | (h[6] << 1))
- s[20] = byte(h[6] >> 7)
- s[21] = byte(h[6] >> 15)
- s[22] = byte((h[6] >> 23) | (h[7] << 3))
- s[23] = byte(h[7] >> 5)
- s[24] = byte(h[7] >> 13)
- s[25] = byte((h[7] >> 21) | (h[8] << 4))
- s[26] = byte(h[8] >> 4)
- s[27] = byte(h[8] >> 12)
- s[28] = byte((h[8] >> 20) | (h[9] << 6))
- s[29] = byte(h[9] >> 2)
- s[30] = byte(h[9] >> 10)
- s[31] = byte(h[9] >> 18)
-}
-
-// feMul calculates h = f * g
-// Can overlap h with f or g.
-//
-// Preconditions:
-// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
-// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
-//
-// Postconditions:
-// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
-//
-// Notes on implementation strategy:
-//
-// Using schoolbook multiplication.
-// Karatsuba would save a little in some cost models.
-//
-// Most multiplications by 2 and 19 are 32-bit precomputations;
-// cheaper than 64-bit postcomputations.
-//
-// There is one remaining multiplication by 19 in the carry chain;
-// one *19 precomputation can be merged into this,
-// but the resulting data flow is considerably less clean.
-//
-// There are 12 carries below.
-// 10 of them are 2-way parallelizable and vectorizable.
-// Can get away with 11 carries, but then data flow is much deeper.
-//
-// With tighter constraints on inputs can squeeze carries into int32.
-func feMul(h, f, g *fieldElement) {
- f0 := f[0]
- f1 := f[1]
- f2 := f[2]
- f3 := f[3]
- f4 := f[4]
- f5 := f[5]
- f6 := f[6]
- f7 := f[7]
- f8 := f[8]
- f9 := f[9]
- g0 := g[0]
- g1 := g[1]
- g2 := g[2]
- g3 := g[3]
- g4 := g[4]
- g5 := g[5]
- g6 := g[6]
- g7 := g[7]
- g8 := g[8]
- g9 := g[9]
- g1_19 := 19 * g1 // 1.4*2^29
- g2_19 := 19 * g2 // 1.4*2^30; still ok
- g3_19 := 19 * g3
- g4_19 := 19 * g4
- g5_19 := 19 * g5
- g6_19 := 19 * g6
- g7_19 := 19 * g7
- g8_19 := 19 * g8
- g9_19 := 19 * g9
- f1_2 := 2 * f1
- f3_2 := 2 * f3
- f5_2 := 2 * f5
- f7_2 := 2 * f7
- f9_2 := 2 * f9
- f0g0 := int64(f0) * int64(g0)
- f0g1 := int64(f0) * int64(g1)
- f0g2 := int64(f0) * int64(g2)
- f0g3 := int64(f0) * int64(g3)
- f0g4 := int64(f0) * int64(g4)
- f0g5 := int64(f0) * int64(g5)
- f0g6 := int64(f0) * int64(g6)
- f0g7 := int64(f0) * int64(g7)
- f0g8 := int64(f0) * int64(g8)
- f0g9 := int64(f0) * int64(g9)
- f1g0 := int64(f1) * int64(g0)
- f1g1_2 := int64(f1_2) * int64(g1)
- f1g2 := int64(f1) * int64(g2)
- f1g3_2 := int64(f1_2) * int64(g3)
- f1g4 := int64(f1) * int64(g4)
- f1g5_2 := int64(f1_2) * int64(g5)
- f1g6 := int64(f1) * int64(g6)
- f1g7_2 := int64(f1_2) * int64(g7)
- f1g8 := int64(f1) * int64(g8)
- f1g9_38 := int64(f1_2) * int64(g9_19)
- f2g0 := int64(f2) * int64(g0)
- f2g1 := int64(f2) * int64(g1)
- f2g2 := int64(f2) * int64(g2)
- f2g3 := int64(f2) * int64(g3)
- f2g4 := int64(f2) * int64(g4)
- f2g5 := int64(f2) * int64(g5)
- f2g6 := int64(f2) * int64(g6)
- f2g7 := int64(f2) * int64(g7)
- f2g8_19 := int64(f2) * int64(g8_19)
- f2g9_19 := int64(f2) * int64(g9_19)
- f3g0 := int64(f3) * int64(g0)
- f3g1_2 := int64(f3_2) * int64(g1)
- f3g2 := int64(f3) * int64(g2)
- f3g3_2 := int64(f3_2) * int64(g3)
- f3g4 := int64(f3) * int64(g4)
- f3g5_2 := int64(f3_2) * int64(g5)
- f3g6 := int64(f3) * int64(g6)
- f3g7_38 := int64(f3_2) * int64(g7_19)
- f3g8_19 := int64(f3) * int64(g8_19)
- f3g9_38 := int64(f3_2) * int64(g9_19)
- f4g0 := int64(f4) * int64(g0)
- f4g1 := int64(f4) * int64(g1)
- f4g2 := int64(f4) * int64(g2)
- f4g3 := int64(f4) * int64(g3)
- f4g4 := int64(f4) * int64(g4)
- f4g5 := int64(f4) * int64(g5)
- f4g6_19 := int64(f4) * int64(g6_19)
- f4g7_19 := int64(f4) * int64(g7_19)
- f4g8_19 := int64(f4) * int64(g8_19)
- f4g9_19 := int64(f4) * int64(g9_19)
- f5g0 := int64(f5) * int64(g0)
- f5g1_2 := int64(f5_2) * int64(g1)
- f5g2 := int64(f5) * int64(g2)
- f5g3_2 := int64(f5_2) * int64(g3)
- f5g4 := int64(f5) * int64(g4)
- f5g5_38 := int64(f5_2) * int64(g5_19)
- f5g6_19 := int64(f5) * int64(g6_19)
- f5g7_38 := int64(f5_2) * int64(g7_19)
- f5g8_19 := int64(f5) * int64(g8_19)
- f5g9_38 := int64(f5_2) * int64(g9_19)
- f6g0 := int64(f6) * int64(g0)
- f6g1 := int64(f6) * int64(g1)
- f6g2 := int64(f6) * int64(g2)
- f6g3 := int64(f6) * int64(g3)
- f6g4_19 := int64(f6) * int64(g4_19)
- f6g5_19 := int64(f6) * int64(g5_19)
- f6g6_19 := int64(f6) * int64(g6_19)
- f6g7_19 := int64(f6) * int64(g7_19)
- f6g8_19 := int64(f6) * int64(g8_19)
- f6g9_19 := int64(f6) * int64(g9_19)
- f7g0 := int64(f7) * int64(g0)
- f7g1_2 := int64(f7_2) * int64(g1)
- f7g2 := int64(f7) * int64(g2)
- f7g3_38 := int64(f7_2) * int64(g3_19)
- f7g4_19 := int64(f7) * int64(g4_19)
- f7g5_38 := int64(f7_2) * int64(g5_19)
- f7g6_19 := int64(f7) * int64(g6_19)
- f7g7_38 := int64(f7_2) * int64(g7_19)
- f7g8_19 := int64(f7) * int64(g8_19)
- f7g9_38 := int64(f7_2) * int64(g9_19)
- f8g0 := int64(f8) * int64(g0)
- f8g1 := int64(f8) * int64(g1)
- f8g2_19 := int64(f8) * int64(g2_19)
- f8g3_19 := int64(f8) * int64(g3_19)
- f8g4_19 := int64(f8) * int64(g4_19)
- f8g5_19 := int64(f8) * int64(g5_19)
- f8g6_19 := int64(f8) * int64(g6_19)
- f8g7_19 := int64(f8) * int64(g7_19)
- f8g8_19 := int64(f8) * int64(g8_19)
- f8g9_19 := int64(f8) * int64(g9_19)
- f9g0 := int64(f9) * int64(g0)
- f9g1_38 := int64(f9_2) * int64(g1_19)
- f9g2_19 := int64(f9) * int64(g2_19)
- f9g3_38 := int64(f9_2) * int64(g3_19)
- f9g4_19 := int64(f9) * int64(g4_19)
- f9g5_38 := int64(f9_2) * int64(g5_19)
- f9g6_19 := int64(f9) * int64(g6_19)
- f9g7_38 := int64(f9_2) * int64(g7_19)
- f9g8_19 := int64(f9) * int64(g8_19)
- f9g9_38 := int64(f9_2) * int64(g9_19)
- h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38
- h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19
- h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38
- h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19
- h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38
- h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19
- h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38
- h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19
- h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38
- h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0
- var carry [10]int64
-
- // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
- // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
- // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
- // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
-
- carry[0] = (h0 + (1 << 25)) >> 26
- h1 += carry[0]
- h0 -= carry[0] << 26
- carry[4] = (h4 + (1 << 25)) >> 26
- h5 += carry[4]
- h4 -= carry[4] << 26
- // |h0| <= 2^25
- // |h4| <= 2^25
- // |h1| <= 1.51*2^58
- // |h5| <= 1.51*2^58
-
- carry[1] = (h1 + (1 << 24)) >> 25
- h2 += carry[1]
- h1 -= carry[1] << 25
- carry[5] = (h5 + (1 << 24)) >> 25
- h6 += carry[5]
- h5 -= carry[5] << 25
- // |h1| <= 2^24; from now on fits into int32
- // |h5| <= 2^24; from now on fits into int32
- // |h2| <= 1.21*2^59
- // |h6| <= 1.21*2^59
-
- carry[2] = (h2 + (1 << 25)) >> 26
- h3 += carry[2]
- h2 -= carry[2] << 26
- carry[6] = (h6 + (1 << 25)) >> 26
- h7 += carry[6]
- h6 -= carry[6] << 26
- // |h2| <= 2^25; from now on fits into int32 unchanged
- // |h6| <= 2^25; from now on fits into int32 unchanged
- // |h3| <= 1.51*2^58
- // |h7| <= 1.51*2^58
-
- carry[3] = (h3 + (1 << 24)) >> 25
- h4 += carry[3]
- h3 -= carry[3] << 25
- carry[7] = (h7 + (1 << 24)) >> 25
- h8 += carry[7]
- h7 -= carry[7] << 25
- // |h3| <= 2^24; from now on fits into int32 unchanged
- // |h7| <= 2^24; from now on fits into int32 unchanged
- // |h4| <= 1.52*2^33
- // |h8| <= 1.52*2^33
-
- carry[4] = (h4 + (1 << 25)) >> 26
- h5 += carry[4]
- h4 -= carry[4] << 26
- carry[8] = (h8 + (1 << 25)) >> 26
- h9 += carry[8]
- h8 -= carry[8] << 26
- // |h4| <= 2^25; from now on fits into int32 unchanged
- // |h8| <= 2^25; from now on fits into int32 unchanged
- // |h5| <= 1.01*2^24
- // |h9| <= 1.51*2^58
-
- carry[9] = (h9 + (1 << 24)) >> 25
- h0 += carry[9] * 19
- h9 -= carry[9] << 25
- // |h9| <= 2^24; from now on fits into int32 unchanged
- // |h0| <= 1.8*2^37
-
- carry[0] = (h0 + (1 << 25)) >> 26
- h1 += carry[0]
- h0 -= carry[0] << 26
- // |h0| <= 2^25; from now on fits into int32 unchanged
- // |h1| <= 1.01*2^24
-
- h[0] = int32(h0)
- h[1] = int32(h1)
- h[2] = int32(h2)
- h[3] = int32(h3)
- h[4] = int32(h4)
- h[5] = int32(h5)
- h[6] = int32(h6)
- h[7] = int32(h7)
- h[8] = int32(h8)
- h[9] = int32(h9)
-}
-
-// feSquare calculates h = f*f. Can overlap h with f.
-//
-// Preconditions:
-// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
-//
-// Postconditions:
-// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
-func feSquare(h, f *fieldElement) {
- f0 := f[0]
- f1 := f[1]
- f2 := f[2]
- f3 := f[3]
- f4 := f[4]
- f5 := f[5]
- f6 := f[6]
- f7 := f[7]
- f8 := f[8]
- f9 := f[9]
- f0_2 := 2 * f0
- f1_2 := 2 * f1
- f2_2 := 2 * f2
- f3_2 := 2 * f3
- f4_2 := 2 * f4
- f5_2 := 2 * f5
- f6_2 := 2 * f6
- f7_2 := 2 * f7
- f5_38 := 38 * f5 // 1.31*2^30
- f6_19 := 19 * f6 // 1.31*2^30
- f7_38 := 38 * f7 // 1.31*2^30
- f8_19 := 19 * f8 // 1.31*2^30
- f9_38 := 38 * f9 // 1.31*2^30
- f0f0 := int64(f0) * int64(f0)
- f0f1_2 := int64(f0_2) * int64(f1)
- f0f2_2 := int64(f0_2) * int64(f2)
- f0f3_2 := int64(f0_2) * int64(f3)
- f0f4_2 := int64(f0_2) * int64(f4)
- f0f5_2 := int64(f0_2) * int64(f5)
- f0f6_2 := int64(f0_2) * int64(f6)
- f0f7_2 := int64(f0_2) * int64(f7)
- f0f8_2 := int64(f0_2) * int64(f8)
- f0f9_2 := int64(f0_2) * int64(f9)
- f1f1_2 := int64(f1_2) * int64(f1)
- f1f2_2 := int64(f1_2) * int64(f2)
- f1f3_4 := int64(f1_2) * int64(f3_2)
- f1f4_2 := int64(f1_2) * int64(f4)
- f1f5_4 := int64(f1_2) * int64(f5_2)
- f1f6_2 := int64(f1_2) * int64(f6)
- f1f7_4 := int64(f1_2) * int64(f7_2)
- f1f8_2 := int64(f1_2) * int64(f8)
- f1f9_76 := int64(f1_2) * int64(f9_38)
- f2f2 := int64(f2) * int64(f2)
- f2f3_2 := int64(f2_2) * int64(f3)
- f2f4_2 := int64(f2_2) * int64(f4)
- f2f5_2 := int64(f2_2) * int64(f5)
- f2f6_2 := int64(f2_2) * int64(f6)
- f2f7_2 := int64(f2_2) * int64(f7)
- f2f8_38 := int64(f2_2) * int64(f8_19)
- f2f9_38 := int64(f2) * int64(f9_38)
- f3f3_2 := int64(f3_2) * int64(f3)
- f3f4_2 := int64(f3_2) * int64(f4)
- f3f5_4 := int64(f3_2) * int64(f5_2)
- f3f6_2 := int64(f3_2) * int64(f6)
- f3f7_76 := int64(f3_2) * int64(f7_38)
- f3f8_38 := int64(f3_2) * int64(f8_19)
- f3f9_76 := int64(f3_2) * int64(f9_38)
- f4f4 := int64(f4) * int64(f4)
- f4f5_2 := int64(f4_2) * int64(f5)
- f4f6_38 := int64(f4_2) * int64(f6_19)
- f4f7_38 := int64(f4) * int64(f7_38)
- f4f8_38 := int64(f4_2) * int64(f8_19)
- f4f9_38 := int64(f4) * int64(f9_38)
- f5f5_38 := int64(f5) * int64(f5_38)
- f5f6_38 := int64(f5_2) * int64(f6_19)
- f5f7_76 := int64(f5_2) * int64(f7_38)
- f5f8_38 := int64(f5_2) * int64(f8_19)
- f5f9_76 := int64(f5_2) * int64(f9_38)
- f6f6_19 := int64(f6) * int64(f6_19)
- f6f7_38 := int64(f6) * int64(f7_38)
- f6f8_38 := int64(f6_2) * int64(f8_19)
- f6f9_38 := int64(f6) * int64(f9_38)
- f7f7_38 := int64(f7) * int64(f7_38)
- f7f8_38 := int64(f7_2) * int64(f8_19)
- f7f9_76 := int64(f7_2) * int64(f9_38)
- f8f8_19 := int64(f8) * int64(f8_19)
- f8f9_38 := int64(f8) * int64(f9_38)
- f9f9_38 := int64(f9) * int64(f9_38)
- h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
- h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38
- h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19
- h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38
- h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38
- h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38
- h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19
- h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38
- h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38
- h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2
- var carry [10]int64
-
- carry[0] = (h0 + (1 << 25)) >> 26
- h1 += carry[0]
- h0 -= carry[0] << 26
- carry[4] = (h4 + (1 << 25)) >> 26
- h5 += carry[4]
- h4 -= carry[4] << 26
-
- carry[1] = (h1 + (1 << 24)) >> 25
- h2 += carry[1]
- h1 -= carry[1] << 25
- carry[5] = (h5 + (1 << 24)) >> 25
- h6 += carry[5]
- h5 -= carry[5] << 25
-
- carry[2] = (h2 + (1 << 25)) >> 26
- h3 += carry[2]
- h2 -= carry[2] << 26
- carry[6] = (h6 + (1 << 25)) >> 26
- h7 += carry[6]
- h6 -= carry[6] << 26
-
- carry[3] = (h3 + (1 << 24)) >> 25
- h4 += carry[3]
- h3 -= carry[3] << 25
- carry[7] = (h7 + (1 << 24)) >> 25
- h8 += carry[7]
- h7 -= carry[7] << 25
-
- carry[4] = (h4 + (1 << 25)) >> 26
- h5 += carry[4]
- h4 -= carry[4] << 26
- carry[8] = (h8 + (1 << 25)) >> 26
- h9 += carry[8]
- h8 -= carry[8] << 26
-
- carry[9] = (h9 + (1 << 24)) >> 25
- h0 += carry[9] * 19
- h9 -= carry[9] << 25
-
- carry[0] = (h0 + (1 << 25)) >> 26
- h1 += carry[0]
- h0 -= carry[0] << 26
-
- h[0] = int32(h0)
- h[1] = int32(h1)
- h[2] = int32(h2)
- h[3] = int32(h3)
- h[4] = int32(h4)
- h[5] = int32(h5)
- h[6] = int32(h6)
- h[7] = int32(h7)
- h[8] = int32(h8)
- h[9] = int32(h9)
-}
-
-// feMul121666 calculates h = f * 121666. Can overlap h with f.
-//
-// Preconditions:
-// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
-//
-// Postconditions:
-// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
-func feMul121666(h, f *fieldElement) {
- h0 := int64(f[0]) * 121666
- h1 := int64(f[1]) * 121666
- h2 := int64(f[2]) * 121666
- h3 := int64(f[3]) * 121666
- h4 := int64(f[4]) * 121666
- h5 := int64(f[5]) * 121666
- h6 := int64(f[6]) * 121666
- h7 := int64(f[7]) * 121666
- h8 := int64(f[8]) * 121666
- h9 := int64(f[9]) * 121666
- var carry [10]int64
-
- carry[9] = (h9 + (1 << 24)) >> 25
- h0 += carry[9] * 19
- h9 -= carry[9] << 25
- carry[1] = (h1 + (1 << 24)) >> 25
- h2 += carry[1]
- h1 -= carry[1] << 25
- carry[3] = (h3 + (1 << 24)) >> 25
- h4 += carry[3]
- h3 -= carry[3] << 25
- carry[5] = (h5 + (1 << 24)) >> 25
- h6 += carry[5]
- h5 -= carry[5] << 25
- carry[7] = (h7 + (1 << 24)) >> 25
- h8 += carry[7]
- h7 -= carry[7] << 25
-
- carry[0] = (h0 + (1 << 25)) >> 26
- h1 += carry[0]
- h0 -= carry[0] << 26
- carry[2] = (h2 + (1 << 25)) >> 26
- h3 += carry[2]
- h2 -= carry[2] << 26
- carry[4] = (h4 + (1 << 25)) >> 26
- h5 += carry[4]
- h4 -= carry[4] << 26
- carry[6] = (h6 + (1 << 25)) >> 26
- h7 += carry[6]
- h6 -= carry[6] << 26
- carry[8] = (h8 + (1 << 25)) >> 26
- h9 += carry[8]
- h8 -= carry[8] << 26
-
- h[0] = int32(h0)
- h[1] = int32(h1)
- h[2] = int32(h2)
- h[3] = int32(h3)
- h[4] = int32(h4)
- h[5] = int32(h5)
- h[6] = int32(h6)
- h[7] = int32(h7)
- h[8] = int32(h8)
- h[9] = int32(h9)
-}
-
-// feInvert sets out = z^-1.
-func feInvert(out, z *fieldElement) {
- var t0, t1, t2, t3 fieldElement
- var i int
-
- feSquare(&t0, z)
- for i = 1; i < 1; i++ {
- feSquare(&t0, &t0)
- }
- feSquare(&t1, &t0)
- for i = 1; i < 2; i++ {
- feSquare(&t1, &t1)
- }
- feMul(&t1, z, &t1)
- feMul(&t0, &t0, &t1)
- feSquare(&t2, &t0)
- for i = 1; i < 1; i++ {
- feSquare(&t2, &t2)
- }
- feMul(&t1, &t1, &t2)
- feSquare(&t2, &t1)
- for i = 1; i < 5; i++ {
- feSquare(&t2, &t2)
- }
- feMul(&t1, &t2, &t1)
- feSquare(&t2, &t1)
- for i = 1; i < 10; i++ {
- feSquare(&t2, &t2)
- }
- feMul(&t2, &t2, &t1)
- feSquare(&t3, &t2)
- for i = 1; i < 20; i++ {
- feSquare(&t3, &t3)
- }
- feMul(&t2, &t3, &t2)
- feSquare(&t2, &t2)
- for i = 1; i < 10; i++ {
- feSquare(&t2, &t2)
- }
- feMul(&t1, &t2, &t1)
- feSquare(&t2, &t1)
- for i = 1; i < 50; i++ {
- feSquare(&t2, &t2)
- }
- feMul(&t2, &t2, &t1)
- feSquare(&t3, &t2)
- for i = 1; i < 100; i++ {
- feSquare(&t3, &t3)
- }
- feMul(&t2, &t3, &t2)
- feSquare(&t2, &t2)
- for i = 1; i < 50; i++ {
- feSquare(&t2, &t2)
- }
- feMul(&t1, &t2, &t1)
- feSquare(&t1, &t1)
- for i = 1; i < 5; i++ {
- feSquare(&t1, &t1)
- }
- feMul(out, &t1, &t0)
-}
-
-func scalarMult(out, in, base *[32]byte) {
- var e [32]byte
-
- copy(e[:], in[:])
- e[0] &= 248
- e[31] &= 127
- e[31] |= 64
-
- var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement
- feFromBytes(&x1, base)
- feOne(&x2)
- feCopy(&x3, &x1)
- feOne(&z3)
-
- swap := int32(0)
- for pos := 254; pos >= 0; pos-- {
- b := e[pos/8] >> uint(pos&7)
- b &= 1
- swap ^= int32(b)
- feCSwap(&x2, &x3, swap)
- feCSwap(&z2, &z3, swap)
- swap = int32(b)
-
- feSub(&tmp0, &x3, &z3)
- feSub(&tmp1, &x2, &z2)
- feAdd(&x2, &x2, &z2)
- feAdd(&z2, &x3, &z3)
- feMul(&z3, &tmp0, &x2)
- feMul(&z2, &z2, &tmp1)
- feSquare(&tmp0, &tmp1)
- feSquare(&tmp1, &x2)
- feAdd(&x3, &z3, &z2)
- feSub(&z2, &z3, &z2)
- feMul(&x2, &tmp1, &tmp0)
- feSub(&tmp1, &tmp1, &tmp0)
- feSquare(&z2, &z2)
- feMul121666(&z3, &tmp1)
- feSquare(&x3, &x3)
- feAdd(&tmp0, &tmp0, &z3)
- feMul(&z3, &x1, &z2)
- feMul(&z2, &tmp1, &tmp0)
- }
-
- feCSwap(&x2, &x3, swap)
- feCSwap(&z2, &z3, swap)
-
- feInvert(&z2, &z2)
- feMul(&x2, &x2, &z2)
- feToBytes(out, &x2)
-}
diff --git a/vendor/golang.org/x/crypto/curve25519/doc.go b/vendor/golang.org/x/crypto/curve25519/doc.go
deleted file mode 100644
index da9b10d9c..000000000
--- a/vendor/golang.org/x/crypto/curve25519/doc.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package curve25519 provides an implementation of scalar multiplication on
-// the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html
-package curve25519 // import "golang.org/x/crypto/curve25519"
-
-// basePoint is the x coordinate of the generator of the curve.
-var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
-
-// ScalarMult sets dst to the product in*base where dst and base are the x
-// coordinates of group points and all values are in little-endian form.
-func ScalarMult(dst, in, base *[32]byte) {
- scalarMult(dst, in, base)
-}
-
-// ScalarBaseMult sets dst to the product in*base where dst and base are the x
-// coordinates of group points, base is the standard generator and all values
-// are in little-endian form.
-func ScalarBaseMult(dst, in *[32]byte) {
- ScalarMult(dst, in, &basePoint)
-}
diff --git a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s
deleted file mode 100644
index 390816106..000000000
--- a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code was translated into a form compatible with 6a from the public
-// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
-
-// +build amd64,!gccgo,!appengine
-
-#include "const_amd64.h"
-
-// func freeze(inout *[5]uint64)
-TEXT ·freeze(SB),7,$0-8
- MOVQ inout+0(FP), DI
-
- MOVQ 0(DI),SI
- MOVQ 8(DI),DX
- MOVQ 16(DI),CX
- MOVQ 24(DI),R8
- MOVQ 32(DI),R9
- MOVQ $REDMASK51,AX
- MOVQ AX,R10
- SUBQ $18,R10
- MOVQ $3,R11
-REDUCELOOP:
- MOVQ SI,R12
- SHRQ $51,R12
- ANDQ AX,SI
- ADDQ R12,DX
- MOVQ DX,R12
- SHRQ $51,R12
- ANDQ AX,DX
- ADDQ R12,CX
- MOVQ CX,R12
- SHRQ $51,R12
- ANDQ AX,CX
- ADDQ R12,R8
- MOVQ R8,R12
- SHRQ $51,R12
- ANDQ AX,R8
- ADDQ R12,R9
- MOVQ R9,R12
- SHRQ $51,R12
- ANDQ AX,R9
- IMUL3Q $19,R12,R12
- ADDQ R12,SI
- SUBQ $1,R11
- JA REDUCELOOP
- MOVQ $1,R12
- CMPQ R10,SI
- CMOVQLT R11,R12
- CMPQ AX,DX
- CMOVQNE R11,R12
- CMPQ AX,CX
- CMOVQNE R11,R12
- CMPQ AX,R8
- CMOVQNE R11,R12
- CMPQ AX,R9
- CMOVQNE R11,R12
- NEGQ R12
- ANDQ R12,AX
- ANDQ R12,R10
- SUBQ R10,SI
- SUBQ AX,DX
- SUBQ AX,CX
- SUBQ AX,R8
- SUBQ AX,R9
- MOVQ SI,0(DI)
- MOVQ DX,8(DI)
- MOVQ CX,16(DI)
- MOVQ R8,24(DI)
- MOVQ R9,32(DI)
- RET
diff --git a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s
deleted file mode 100644
index 9e9040b25..000000000
--- a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s
+++ /dev/null
@@ -1,1377 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code was translated into a form compatible with 6a from the public
-// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
-
-// +build amd64,!gccgo,!appengine
-
-#include "const_amd64.h"
-
-// func ladderstep(inout *[5][5]uint64)
-TEXT ·ladderstep(SB),0,$296-8
- MOVQ inout+0(FP),DI
-
- MOVQ 40(DI),SI
- MOVQ 48(DI),DX
- MOVQ 56(DI),CX
- MOVQ 64(DI),R8
- MOVQ 72(DI),R9
- MOVQ SI,AX
- MOVQ DX,R10
- MOVQ CX,R11
- MOVQ R8,R12
- MOVQ R9,R13
- ADDQ ·_2P0(SB),AX
- ADDQ ·_2P1234(SB),R10
- ADDQ ·_2P1234(SB),R11
- ADDQ ·_2P1234(SB),R12
- ADDQ ·_2P1234(SB),R13
- ADDQ 80(DI),SI
- ADDQ 88(DI),DX
- ADDQ 96(DI),CX
- ADDQ 104(DI),R8
- ADDQ 112(DI),R9
- SUBQ 80(DI),AX
- SUBQ 88(DI),R10
- SUBQ 96(DI),R11
- SUBQ 104(DI),R12
- SUBQ 112(DI),R13
- MOVQ SI,0(SP)
- MOVQ DX,8(SP)
- MOVQ CX,16(SP)
- MOVQ R8,24(SP)
- MOVQ R9,32(SP)
- MOVQ AX,40(SP)
- MOVQ R10,48(SP)
- MOVQ R11,56(SP)
- MOVQ R12,64(SP)
- MOVQ R13,72(SP)
- MOVQ 40(SP),AX
- MULQ 40(SP)
- MOVQ AX,SI
- MOVQ DX,CX
- MOVQ 40(SP),AX
- SHLQ $1,AX
- MULQ 48(SP)
- MOVQ AX,R8
- MOVQ DX,R9
- MOVQ 40(SP),AX
- SHLQ $1,AX
- MULQ 56(SP)
- MOVQ AX,R10
- MOVQ DX,R11
- MOVQ 40(SP),AX
- SHLQ $1,AX
- MULQ 64(SP)
- MOVQ AX,R12
- MOVQ DX,R13
- MOVQ 40(SP),AX
- SHLQ $1,AX
- MULQ 72(SP)
- MOVQ AX,R14
- MOVQ DX,R15
- MOVQ 48(SP),AX
- MULQ 48(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 48(SP),AX
- SHLQ $1,AX
- MULQ 56(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 48(SP),AX
- SHLQ $1,AX
- MULQ 64(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 48(SP),DX
- IMUL3Q $38,DX,AX
- MULQ 72(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 56(SP),AX
- MULQ 56(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 56(SP),DX
- IMUL3Q $38,DX,AX
- MULQ 64(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 56(SP),DX
- IMUL3Q $38,DX,AX
- MULQ 72(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 64(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 64(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 64(SP),DX
- IMUL3Q $38,DX,AX
- MULQ 72(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 72(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 72(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ $REDMASK51,DX
- SHLQ $13,CX:SI
- ANDQ DX,SI
- SHLQ $13,R9:R8
- ANDQ DX,R8
- ADDQ CX,R8
- SHLQ $13,R11:R10
- ANDQ DX,R10
- ADDQ R9,R10
- SHLQ $13,R13:R12
- ANDQ DX,R12
- ADDQ R11,R12
- SHLQ $13,R15:R14
- ANDQ DX,R14
- ADDQ R13,R14
- IMUL3Q $19,R15,CX
- ADDQ CX,SI
- MOVQ SI,CX
- SHRQ $51,CX
- ADDQ R8,CX
- ANDQ DX,SI
- MOVQ CX,R8
- SHRQ $51,CX
- ADDQ R10,CX
- ANDQ DX,R8
- MOVQ CX,R9
- SHRQ $51,CX
- ADDQ R12,CX
- ANDQ DX,R9
- MOVQ CX,AX
- SHRQ $51,CX
- ADDQ R14,CX
- ANDQ DX,AX
- MOVQ CX,R10
- SHRQ $51,CX
- IMUL3Q $19,CX,CX
- ADDQ CX,SI
- ANDQ DX,R10
- MOVQ SI,80(SP)
- MOVQ R8,88(SP)
- MOVQ R9,96(SP)
- MOVQ AX,104(SP)
- MOVQ R10,112(SP)
- MOVQ 0(SP),AX
- MULQ 0(SP)
- MOVQ AX,SI
- MOVQ DX,CX
- MOVQ 0(SP),AX
- SHLQ $1,AX
- MULQ 8(SP)
- MOVQ AX,R8
- MOVQ DX,R9
- MOVQ 0(SP),AX
- SHLQ $1,AX
- MULQ 16(SP)
- MOVQ AX,R10
- MOVQ DX,R11
- MOVQ 0(SP),AX
- SHLQ $1,AX
- MULQ 24(SP)
- MOVQ AX,R12
- MOVQ DX,R13
- MOVQ 0(SP),AX
- SHLQ $1,AX
- MULQ 32(SP)
- MOVQ AX,R14
- MOVQ DX,R15
- MOVQ 8(SP),AX
- MULQ 8(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 8(SP),AX
- SHLQ $1,AX
- MULQ 16(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 8(SP),AX
- SHLQ $1,AX
- MULQ 24(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 8(SP),DX
- IMUL3Q $38,DX,AX
- MULQ 32(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 16(SP),AX
- MULQ 16(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 16(SP),DX
- IMUL3Q $38,DX,AX
- MULQ 24(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 16(SP),DX
- IMUL3Q $38,DX,AX
- MULQ 32(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 24(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 24(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 24(SP),DX
- IMUL3Q $38,DX,AX
- MULQ 32(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 32(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 32(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ $REDMASK51,DX
- SHLQ $13,CX:SI
- ANDQ DX,SI
- SHLQ $13,R9:R8
- ANDQ DX,R8
- ADDQ CX,R8
- SHLQ $13,R11:R10
- ANDQ DX,R10
- ADDQ R9,R10
- SHLQ $13,R13:R12
- ANDQ DX,R12
- ADDQ R11,R12
- SHLQ $13,R15:R14
- ANDQ DX,R14
- ADDQ R13,R14
- IMUL3Q $19,R15,CX
- ADDQ CX,SI
- MOVQ SI,CX
- SHRQ $51,CX
- ADDQ R8,CX
- ANDQ DX,SI
- MOVQ CX,R8
- SHRQ $51,CX
- ADDQ R10,CX
- ANDQ DX,R8
- MOVQ CX,R9
- SHRQ $51,CX
- ADDQ R12,CX
- ANDQ DX,R9
- MOVQ CX,AX
- SHRQ $51,CX
- ADDQ R14,CX
- ANDQ DX,AX
- MOVQ CX,R10
- SHRQ $51,CX
- IMUL3Q $19,CX,CX
- ADDQ CX,SI
- ANDQ DX,R10
- MOVQ SI,120(SP)
- MOVQ R8,128(SP)
- MOVQ R9,136(SP)
- MOVQ AX,144(SP)
- MOVQ R10,152(SP)
- MOVQ SI,SI
- MOVQ R8,DX
- MOVQ R9,CX
- MOVQ AX,R8
- MOVQ R10,R9
- ADDQ ·_2P0(SB),SI
- ADDQ ·_2P1234(SB),DX
- ADDQ ·_2P1234(SB),CX
- ADDQ ·_2P1234(SB),R8
- ADDQ ·_2P1234(SB),R9
- SUBQ 80(SP),SI
- SUBQ 88(SP),DX
- SUBQ 96(SP),CX
- SUBQ 104(SP),R8
- SUBQ 112(SP),R9
- MOVQ SI,160(SP)
- MOVQ DX,168(SP)
- MOVQ CX,176(SP)
- MOVQ R8,184(SP)
- MOVQ R9,192(SP)
- MOVQ 120(DI),SI
- MOVQ 128(DI),DX
- MOVQ 136(DI),CX
- MOVQ 144(DI),R8
- MOVQ 152(DI),R9
- MOVQ SI,AX
- MOVQ DX,R10
- MOVQ CX,R11
- MOVQ R8,R12
- MOVQ R9,R13
- ADDQ ·_2P0(SB),AX
- ADDQ ·_2P1234(SB),R10
- ADDQ ·_2P1234(SB),R11
- ADDQ ·_2P1234(SB),R12
- ADDQ ·_2P1234(SB),R13
- ADDQ 160(DI),SI
- ADDQ 168(DI),DX
- ADDQ 176(DI),CX
- ADDQ 184(DI),R8
- ADDQ 192(DI),R9
- SUBQ 160(DI),AX
- SUBQ 168(DI),R10
- SUBQ 176(DI),R11
- SUBQ 184(DI),R12
- SUBQ 192(DI),R13
- MOVQ SI,200(SP)
- MOVQ DX,208(SP)
- MOVQ CX,216(SP)
- MOVQ R8,224(SP)
- MOVQ R9,232(SP)
- MOVQ AX,240(SP)
- MOVQ R10,248(SP)
- MOVQ R11,256(SP)
- MOVQ R12,264(SP)
- MOVQ R13,272(SP)
- MOVQ 224(SP),SI
- IMUL3Q $19,SI,AX
- MOVQ AX,280(SP)
- MULQ 56(SP)
- MOVQ AX,SI
- MOVQ DX,CX
- MOVQ 232(SP),DX
- IMUL3Q $19,DX,AX
- MOVQ AX,288(SP)
- MULQ 48(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 200(SP),AX
- MULQ 40(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 200(SP),AX
- MULQ 48(SP)
- MOVQ AX,R8
- MOVQ DX,R9
- MOVQ 200(SP),AX
- MULQ 56(SP)
- MOVQ AX,R10
- MOVQ DX,R11
- MOVQ 200(SP),AX
- MULQ 64(SP)
- MOVQ AX,R12
- MOVQ DX,R13
- MOVQ 200(SP),AX
- MULQ 72(SP)
- MOVQ AX,R14
- MOVQ DX,R15
- MOVQ 208(SP),AX
- MULQ 40(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 208(SP),AX
- MULQ 48(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 208(SP),AX
- MULQ 56(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 208(SP),AX
- MULQ 64(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 208(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 72(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 216(SP),AX
- MULQ 40(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 216(SP),AX
- MULQ 48(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 216(SP),AX
- MULQ 56(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 216(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 64(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 216(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 72(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 224(SP),AX
- MULQ 40(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 224(SP),AX
- MULQ 48(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 280(SP),AX
- MULQ 64(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 280(SP),AX
- MULQ 72(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 232(SP),AX
- MULQ 40(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 288(SP),AX
- MULQ 56(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 288(SP),AX
- MULQ 64(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 288(SP),AX
- MULQ 72(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ $REDMASK51,DX
- SHLQ $13,CX:SI
- ANDQ DX,SI
- SHLQ $13,R9:R8
- ANDQ DX,R8
- ADDQ CX,R8
- SHLQ $13,R11:R10
- ANDQ DX,R10
- ADDQ R9,R10
- SHLQ $13,R13:R12
- ANDQ DX,R12
- ADDQ R11,R12
- SHLQ $13,R15:R14
- ANDQ DX,R14
- ADDQ R13,R14
- IMUL3Q $19,R15,CX
- ADDQ CX,SI
- MOVQ SI,CX
- SHRQ $51,CX
- ADDQ R8,CX
- MOVQ CX,R8
- SHRQ $51,CX
- ANDQ DX,SI
- ADDQ R10,CX
- MOVQ CX,R9
- SHRQ $51,CX
- ANDQ DX,R8
- ADDQ R12,CX
- MOVQ CX,AX
- SHRQ $51,CX
- ANDQ DX,R9
- ADDQ R14,CX
- MOVQ CX,R10
- SHRQ $51,CX
- ANDQ DX,AX
- IMUL3Q $19,CX,CX
- ADDQ CX,SI
- ANDQ DX,R10
- MOVQ SI,40(SP)
- MOVQ R8,48(SP)
- MOVQ R9,56(SP)
- MOVQ AX,64(SP)
- MOVQ R10,72(SP)
- MOVQ 264(SP),SI
- IMUL3Q $19,SI,AX
- MOVQ AX,200(SP)
- MULQ 16(SP)
- MOVQ AX,SI
- MOVQ DX,CX
- MOVQ 272(SP),DX
- IMUL3Q $19,DX,AX
- MOVQ AX,208(SP)
- MULQ 8(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 240(SP),AX
- MULQ 0(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 240(SP),AX
- MULQ 8(SP)
- MOVQ AX,R8
- MOVQ DX,R9
- MOVQ 240(SP),AX
- MULQ 16(SP)
- MOVQ AX,R10
- MOVQ DX,R11
- MOVQ 240(SP),AX
- MULQ 24(SP)
- MOVQ AX,R12
- MOVQ DX,R13
- MOVQ 240(SP),AX
- MULQ 32(SP)
- MOVQ AX,R14
- MOVQ DX,R15
- MOVQ 248(SP),AX
- MULQ 0(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 248(SP),AX
- MULQ 8(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 248(SP),AX
- MULQ 16(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 248(SP),AX
- MULQ 24(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 248(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 32(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 256(SP),AX
- MULQ 0(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 256(SP),AX
- MULQ 8(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 256(SP),AX
- MULQ 16(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 256(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 24(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 256(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 32(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 264(SP),AX
- MULQ 0(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 264(SP),AX
- MULQ 8(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 200(SP),AX
- MULQ 24(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 200(SP),AX
- MULQ 32(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 272(SP),AX
- MULQ 0(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 208(SP),AX
- MULQ 16(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 208(SP),AX
- MULQ 24(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 208(SP),AX
- MULQ 32(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ $REDMASK51,DX
- SHLQ $13,CX:SI
- ANDQ DX,SI
- SHLQ $13,R9:R8
- ANDQ DX,R8
- ADDQ CX,R8
- SHLQ $13,R11:R10
- ANDQ DX,R10
- ADDQ R9,R10
- SHLQ $13,R13:R12
- ANDQ DX,R12
- ADDQ R11,R12
- SHLQ $13,R15:R14
- ANDQ DX,R14
- ADDQ R13,R14
- IMUL3Q $19,R15,CX
- ADDQ CX,SI
- MOVQ SI,CX
- SHRQ $51,CX
- ADDQ R8,CX
- MOVQ CX,R8
- SHRQ $51,CX
- ANDQ DX,SI
- ADDQ R10,CX
- MOVQ CX,R9
- SHRQ $51,CX
- ANDQ DX,R8
- ADDQ R12,CX
- MOVQ CX,AX
- SHRQ $51,CX
- ANDQ DX,R9
- ADDQ R14,CX
- MOVQ CX,R10
- SHRQ $51,CX
- ANDQ DX,AX
- IMUL3Q $19,CX,CX
- ADDQ CX,SI
- ANDQ DX,R10
- MOVQ SI,DX
- MOVQ R8,CX
- MOVQ R9,R11
- MOVQ AX,R12
- MOVQ R10,R13
- ADDQ ·_2P0(SB),DX
- ADDQ ·_2P1234(SB),CX
- ADDQ ·_2P1234(SB),R11
- ADDQ ·_2P1234(SB),R12
- ADDQ ·_2P1234(SB),R13
- ADDQ 40(SP),SI
- ADDQ 48(SP),R8
- ADDQ 56(SP),R9
- ADDQ 64(SP),AX
- ADDQ 72(SP),R10
- SUBQ 40(SP),DX
- SUBQ 48(SP),CX
- SUBQ 56(SP),R11
- SUBQ 64(SP),R12
- SUBQ 72(SP),R13
- MOVQ SI,120(DI)
- MOVQ R8,128(DI)
- MOVQ R9,136(DI)
- MOVQ AX,144(DI)
- MOVQ R10,152(DI)
- MOVQ DX,160(DI)
- MOVQ CX,168(DI)
- MOVQ R11,176(DI)
- MOVQ R12,184(DI)
- MOVQ R13,192(DI)
- MOVQ 120(DI),AX
- MULQ 120(DI)
- MOVQ AX,SI
- MOVQ DX,CX
- MOVQ 120(DI),AX
- SHLQ $1,AX
- MULQ 128(DI)
- MOVQ AX,R8
- MOVQ DX,R9
- MOVQ 120(DI),AX
- SHLQ $1,AX
- MULQ 136(DI)
- MOVQ AX,R10
- MOVQ DX,R11
- MOVQ 120(DI),AX
- SHLQ $1,AX
- MULQ 144(DI)
- MOVQ AX,R12
- MOVQ DX,R13
- MOVQ 120(DI),AX
- SHLQ $1,AX
- MULQ 152(DI)
- MOVQ AX,R14
- MOVQ DX,R15
- MOVQ 128(DI),AX
- MULQ 128(DI)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 128(DI),AX
- SHLQ $1,AX
- MULQ 136(DI)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 128(DI),AX
- SHLQ $1,AX
- MULQ 144(DI)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 128(DI),DX
- IMUL3Q $38,DX,AX
- MULQ 152(DI)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 136(DI),AX
- MULQ 136(DI)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 136(DI),DX
- IMUL3Q $38,DX,AX
- MULQ 144(DI)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 136(DI),DX
- IMUL3Q $38,DX,AX
- MULQ 152(DI)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 144(DI),DX
- IMUL3Q $19,DX,AX
- MULQ 144(DI)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 144(DI),DX
- IMUL3Q $38,DX,AX
- MULQ 152(DI)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 152(DI),DX
- IMUL3Q $19,DX,AX
- MULQ 152(DI)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ $REDMASK51,DX
- SHLQ $13,CX:SI
- ANDQ DX,SI
- SHLQ $13,R9:R8
- ANDQ DX,R8
- ADDQ CX,R8
- SHLQ $13,R11:R10
- ANDQ DX,R10
- ADDQ R9,R10
- SHLQ $13,R13:R12
- ANDQ DX,R12
- ADDQ R11,R12
- SHLQ $13,R15:R14
- ANDQ DX,R14
- ADDQ R13,R14
- IMUL3Q $19,R15,CX
- ADDQ CX,SI
- MOVQ SI,CX
- SHRQ $51,CX
- ADDQ R8,CX
- ANDQ DX,SI
- MOVQ CX,R8
- SHRQ $51,CX
- ADDQ R10,CX
- ANDQ DX,R8
- MOVQ CX,R9
- SHRQ $51,CX
- ADDQ R12,CX
- ANDQ DX,R9
- MOVQ CX,AX
- SHRQ $51,CX
- ADDQ R14,CX
- ANDQ DX,AX
- MOVQ CX,R10
- SHRQ $51,CX
- IMUL3Q $19,CX,CX
- ADDQ CX,SI
- ANDQ DX,R10
- MOVQ SI,120(DI)
- MOVQ R8,128(DI)
- MOVQ R9,136(DI)
- MOVQ AX,144(DI)
- MOVQ R10,152(DI)
- MOVQ 160(DI),AX
- MULQ 160(DI)
- MOVQ AX,SI
- MOVQ DX,CX
- MOVQ 160(DI),AX
- SHLQ $1,AX
- MULQ 168(DI)
- MOVQ AX,R8
- MOVQ DX,R9
- MOVQ 160(DI),AX
- SHLQ $1,AX
- MULQ 176(DI)
- MOVQ AX,R10
- MOVQ DX,R11
- MOVQ 160(DI),AX
- SHLQ $1,AX
- MULQ 184(DI)
- MOVQ AX,R12
- MOVQ DX,R13
- MOVQ 160(DI),AX
- SHLQ $1,AX
- MULQ 192(DI)
- MOVQ AX,R14
- MOVQ DX,R15
- MOVQ 168(DI),AX
- MULQ 168(DI)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 168(DI),AX
- SHLQ $1,AX
- MULQ 176(DI)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 168(DI),AX
- SHLQ $1,AX
- MULQ 184(DI)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 168(DI),DX
- IMUL3Q $38,DX,AX
- MULQ 192(DI)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 176(DI),AX
- MULQ 176(DI)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 176(DI),DX
- IMUL3Q $38,DX,AX
- MULQ 184(DI)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 176(DI),DX
- IMUL3Q $38,DX,AX
- MULQ 192(DI)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 184(DI),DX
- IMUL3Q $19,DX,AX
- MULQ 184(DI)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 184(DI),DX
- IMUL3Q $38,DX,AX
- MULQ 192(DI)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 192(DI),DX
- IMUL3Q $19,DX,AX
- MULQ 192(DI)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ $REDMASK51,DX
- SHLQ $13,CX:SI
- ANDQ DX,SI
- SHLQ $13,R9:R8
- ANDQ DX,R8
- ADDQ CX,R8
- SHLQ $13,R11:R10
- ANDQ DX,R10
- ADDQ R9,R10
- SHLQ $13,R13:R12
- ANDQ DX,R12
- ADDQ R11,R12
- SHLQ $13,R15:R14
- ANDQ DX,R14
- ADDQ R13,R14
- IMUL3Q $19,R15,CX
- ADDQ CX,SI
- MOVQ SI,CX
- SHRQ $51,CX
- ADDQ R8,CX
- ANDQ DX,SI
- MOVQ CX,R8
- SHRQ $51,CX
- ADDQ R10,CX
- ANDQ DX,R8
- MOVQ CX,R9
- SHRQ $51,CX
- ADDQ R12,CX
- ANDQ DX,R9
- MOVQ CX,AX
- SHRQ $51,CX
- ADDQ R14,CX
- ANDQ DX,AX
- MOVQ CX,R10
- SHRQ $51,CX
- IMUL3Q $19,CX,CX
- ADDQ CX,SI
- ANDQ DX,R10
- MOVQ SI,160(DI)
- MOVQ R8,168(DI)
- MOVQ R9,176(DI)
- MOVQ AX,184(DI)
- MOVQ R10,192(DI)
- MOVQ 184(DI),SI
- IMUL3Q $19,SI,AX
- MOVQ AX,0(SP)
- MULQ 16(DI)
- MOVQ AX,SI
- MOVQ DX,CX
- MOVQ 192(DI),DX
- IMUL3Q $19,DX,AX
- MOVQ AX,8(SP)
- MULQ 8(DI)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 160(DI),AX
- MULQ 0(DI)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 160(DI),AX
- MULQ 8(DI)
- MOVQ AX,R8
- MOVQ DX,R9
- MOVQ 160(DI),AX
- MULQ 16(DI)
- MOVQ AX,R10
- MOVQ DX,R11
- MOVQ 160(DI),AX
- MULQ 24(DI)
- MOVQ AX,R12
- MOVQ DX,R13
- MOVQ 160(DI),AX
- MULQ 32(DI)
- MOVQ AX,R14
- MOVQ DX,R15
- MOVQ 168(DI),AX
- MULQ 0(DI)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 168(DI),AX
- MULQ 8(DI)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 168(DI),AX
- MULQ 16(DI)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 168(DI),AX
- MULQ 24(DI)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 168(DI),DX
- IMUL3Q $19,DX,AX
- MULQ 32(DI)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 176(DI),AX
- MULQ 0(DI)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 176(DI),AX
- MULQ 8(DI)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 176(DI),AX
- MULQ 16(DI)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 176(DI),DX
- IMUL3Q $19,DX,AX
- MULQ 24(DI)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 176(DI),DX
- IMUL3Q $19,DX,AX
- MULQ 32(DI)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 184(DI),AX
- MULQ 0(DI)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 184(DI),AX
- MULQ 8(DI)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 0(SP),AX
- MULQ 24(DI)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 0(SP),AX
- MULQ 32(DI)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 192(DI),AX
- MULQ 0(DI)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 8(SP),AX
- MULQ 16(DI)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 8(SP),AX
- MULQ 24(DI)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 8(SP),AX
- MULQ 32(DI)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ $REDMASK51,DX
- SHLQ $13,CX:SI
- ANDQ DX,SI
- SHLQ $13,R9:R8
- ANDQ DX,R8
- ADDQ CX,R8
- SHLQ $13,R11:R10
- ANDQ DX,R10
- ADDQ R9,R10
- SHLQ $13,R13:R12
- ANDQ DX,R12
- ADDQ R11,R12
- SHLQ $13,R15:R14
- ANDQ DX,R14
- ADDQ R13,R14
- IMUL3Q $19,R15,CX
- ADDQ CX,SI
- MOVQ SI,CX
- SHRQ $51,CX
- ADDQ R8,CX
- MOVQ CX,R8
- SHRQ $51,CX
- ANDQ DX,SI
- ADDQ R10,CX
- MOVQ CX,R9
- SHRQ $51,CX
- ANDQ DX,R8
- ADDQ R12,CX
- MOVQ CX,AX
- SHRQ $51,CX
- ANDQ DX,R9
- ADDQ R14,CX
- MOVQ CX,R10
- SHRQ $51,CX
- ANDQ DX,AX
- IMUL3Q $19,CX,CX
- ADDQ CX,SI
- ANDQ DX,R10
- MOVQ SI,160(DI)
- MOVQ R8,168(DI)
- MOVQ R9,176(DI)
- MOVQ AX,184(DI)
- MOVQ R10,192(DI)
- MOVQ 144(SP),SI
- IMUL3Q $19,SI,AX
- MOVQ AX,0(SP)
- MULQ 96(SP)
- MOVQ AX,SI
- MOVQ DX,CX
- MOVQ 152(SP),DX
- IMUL3Q $19,DX,AX
- MOVQ AX,8(SP)
- MULQ 88(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 120(SP),AX
- MULQ 80(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 120(SP),AX
- MULQ 88(SP)
- MOVQ AX,R8
- MOVQ DX,R9
- MOVQ 120(SP),AX
- MULQ 96(SP)
- MOVQ AX,R10
- MOVQ DX,R11
- MOVQ 120(SP),AX
- MULQ 104(SP)
- MOVQ AX,R12
- MOVQ DX,R13
- MOVQ 120(SP),AX
- MULQ 112(SP)
- MOVQ AX,R14
- MOVQ DX,R15
- MOVQ 128(SP),AX
- MULQ 80(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 128(SP),AX
- MULQ 88(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 128(SP),AX
- MULQ 96(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 128(SP),AX
- MULQ 104(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 128(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 112(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 136(SP),AX
- MULQ 80(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 136(SP),AX
- MULQ 88(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 136(SP),AX
- MULQ 96(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 136(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 104(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 136(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 112(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 144(SP),AX
- MULQ 80(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 144(SP),AX
- MULQ 88(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 0(SP),AX
- MULQ 104(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 0(SP),AX
- MULQ 112(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 152(SP),AX
- MULQ 80(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 8(SP),AX
- MULQ 96(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 8(SP),AX
- MULQ 104(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 8(SP),AX
- MULQ 112(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ $REDMASK51,DX
- SHLQ $13,CX:SI
- ANDQ DX,SI
- SHLQ $13,R9:R8
- ANDQ DX,R8
- ADDQ CX,R8
- SHLQ $13,R11:R10
- ANDQ DX,R10
- ADDQ R9,R10
- SHLQ $13,R13:R12
- ANDQ DX,R12
- ADDQ R11,R12
- SHLQ $13,R15:R14
- ANDQ DX,R14
- ADDQ R13,R14
- IMUL3Q $19,R15,CX
- ADDQ CX,SI
- MOVQ SI,CX
- SHRQ $51,CX
- ADDQ R8,CX
- MOVQ CX,R8
- SHRQ $51,CX
- ANDQ DX,SI
- ADDQ R10,CX
- MOVQ CX,R9
- SHRQ $51,CX
- ANDQ DX,R8
- ADDQ R12,CX
- MOVQ CX,AX
- SHRQ $51,CX
- ANDQ DX,R9
- ADDQ R14,CX
- MOVQ CX,R10
- SHRQ $51,CX
- ANDQ DX,AX
- IMUL3Q $19,CX,CX
- ADDQ CX,SI
- ANDQ DX,R10
- MOVQ SI,40(DI)
- MOVQ R8,48(DI)
- MOVQ R9,56(DI)
- MOVQ AX,64(DI)
- MOVQ R10,72(DI)
- MOVQ 160(SP),AX
- MULQ ·_121666_213(SB)
- SHRQ $13,AX
- MOVQ AX,SI
- MOVQ DX,CX
- MOVQ 168(SP),AX
- MULQ ·_121666_213(SB)
- SHRQ $13,AX
- ADDQ AX,CX
- MOVQ DX,R8
- MOVQ 176(SP),AX
- MULQ ·_121666_213(SB)
- SHRQ $13,AX
- ADDQ AX,R8
- MOVQ DX,R9
- MOVQ 184(SP),AX
- MULQ ·_121666_213(SB)
- SHRQ $13,AX
- ADDQ AX,R9
- MOVQ DX,R10
- MOVQ 192(SP),AX
- MULQ ·_121666_213(SB)
- SHRQ $13,AX
- ADDQ AX,R10
- IMUL3Q $19,DX,DX
- ADDQ DX,SI
- ADDQ 80(SP),SI
- ADDQ 88(SP),CX
- ADDQ 96(SP),R8
- ADDQ 104(SP),R9
- ADDQ 112(SP),R10
- MOVQ SI,80(DI)
- MOVQ CX,88(DI)
- MOVQ R8,96(DI)
- MOVQ R9,104(DI)
- MOVQ R10,112(DI)
- MOVQ 104(DI),SI
- IMUL3Q $19,SI,AX
- MOVQ AX,0(SP)
- MULQ 176(SP)
- MOVQ AX,SI
- MOVQ DX,CX
- MOVQ 112(DI),DX
- IMUL3Q $19,DX,AX
- MOVQ AX,8(SP)
- MULQ 168(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 80(DI),AX
- MULQ 160(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 80(DI),AX
- MULQ 168(SP)
- MOVQ AX,R8
- MOVQ DX,R9
- MOVQ 80(DI),AX
- MULQ 176(SP)
- MOVQ AX,R10
- MOVQ DX,R11
- MOVQ 80(DI),AX
- MULQ 184(SP)
- MOVQ AX,R12
- MOVQ DX,R13
- MOVQ 80(DI),AX
- MULQ 192(SP)
- MOVQ AX,R14
- MOVQ DX,R15
- MOVQ 88(DI),AX
- MULQ 160(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 88(DI),AX
- MULQ 168(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 88(DI),AX
- MULQ 176(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 88(DI),AX
- MULQ 184(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 88(DI),DX
- IMUL3Q $19,DX,AX
- MULQ 192(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 96(DI),AX
- MULQ 160(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 96(DI),AX
- MULQ 168(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 96(DI),AX
- MULQ 176(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 96(DI),DX
- IMUL3Q $19,DX,AX
- MULQ 184(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 96(DI),DX
- IMUL3Q $19,DX,AX
- MULQ 192(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 104(DI),AX
- MULQ 160(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 104(DI),AX
- MULQ 168(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 0(SP),AX
- MULQ 184(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 0(SP),AX
- MULQ 192(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 112(DI),AX
- MULQ 160(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 8(SP),AX
- MULQ 176(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 8(SP),AX
- MULQ 184(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 8(SP),AX
- MULQ 192(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ $REDMASK51,DX
- SHLQ $13,CX:SI
- ANDQ DX,SI
- SHLQ $13,R9:R8
- ANDQ DX,R8
- ADDQ CX,R8
- SHLQ $13,R11:R10
- ANDQ DX,R10
- ADDQ R9,R10
- SHLQ $13,R13:R12
- ANDQ DX,R12
- ADDQ R11,R12
- SHLQ $13,R15:R14
- ANDQ DX,R14
- ADDQ R13,R14
- IMUL3Q $19,R15,CX
- ADDQ CX,SI
- MOVQ SI,CX
- SHRQ $51,CX
- ADDQ R8,CX
- MOVQ CX,R8
- SHRQ $51,CX
- ANDQ DX,SI
- ADDQ R10,CX
- MOVQ CX,R9
- SHRQ $51,CX
- ANDQ DX,R8
- ADDQ R12,CX
- MOVQ CX,AX
- SHRQ $51,CX
- ANDQ DX,R9
- ADDQ R14,CX
- MOVQ CX,R10
- SHRQ $51,CX
- ANDQ DX,AX
- IMUL3Q $19,CX,CX
- ADDQ CX,SI
- ANDQ DX,R10
- MOVQ SI,80(DI)
- MOVQ R8,88(DI)
- MOVQ R9,96(DI)
- MOVQ AX,104(DI)
- MOVQ R10,112(DI)
- RET
diff --git a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go
deleted file mode 100644
index 5822bd533..000000000
--- a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build amd64,!gccgo,!appengine
-
-package curve25519
-
-// These functions are implemented in the .s files. The names of the functions
-// in the rest of the file are also taken from the SUPERCOP sources to help
-// people following along.
-
-//go:noescape
-
-func cswap(inout *[5]uint64, v uint64)
-
-//go:noescape
-
-func ladderstep(inout *[5][5]uint64)
-
-//go:noescape
-
-func freeze(inout *[5]uint64)
-
-//go:noescape
-
-func mul(dest, a, b *[5]uint64)
-
-//go:noescape
-
-func square(out, in *[5]uint64)
-
-// mladder uses a Montgomery ladder to calculate (xr/zr) *= s.
-func mladder(xr, zr *[5]uint64, s *[32]byte) {
- var work [5][5]uint64
-
- work[0] = *xr
- setint(&work[1], 1)
- setint(&work[2], 0)
- work[3] = *xr
- setint(&work[4], 1)
-
- j := uint(6)
- var prevbit byte
-
- for i := 31; i >= 0; i-- {
- for j < 8 {
- bit := ((*s)[i] >> j) & 1
- swap := bit ^ prevbit
- prevbit = bit
- cswap(&work[1], uint64(swap))
- ladderstep(&work)
- j--
- }
- j = 7
- }
-
- *xr = work[1]
- *zr = work[2]
-}
-
-func scalarMult(out, in, base *[32]byte) {
- var e [32]byte
- copy(e[:], (*in)[:])
- e[0] &= 248
- e[31] &= 127
- e[31] |= 64
-
- var t, z [5]uint64
- unpack(&t, base)
- mladder(&t, &z, &e)
- invert(&z, &z)
- mul(&t, &t, &z)
- pack(out, &t)
-}
-
-func setint(r *[5]uint64, v uint64) {
- r[0] = v
- r[1] = 0
- r[2] = 0
- r[3] = 0
- r[4] = 0
-}
-
-// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian
-// order.
-func unpack(r *[5]uint64, x *[32]byte) {
- r[0] = uint64(x[0]) |
- uint64(x[1])<<8 |
- uint64(x[2])<<16 |
- uint64(x[3])<<24 |
- uint64(x[4])<<32 |
- uint64(x[5])<<40 |
- uint64(x[6]&7)<<48
-
- r[1] = uint64(x[6])>>3 |
- uint64(x[7])<<5 |
- uint64(x[8])<<13 |
- uint64(x[9])<<21 |
- uint64(x[10])<<29 |
- uint64(x[11])<<37 |
- uint64(x[12]&63)<<45
-
- r[2] = uint64(x[12])>>6 |
- uint64(x[13])<<2 |
- uint64(x[14])<<10 |
- uint64(x[15])<<18 |
- uint64(x[16])<<26 |
- uint64(x[17])<<34 |
- uint64(x[18])<<42 |
- uint64(x[19]&1)<<50
-
- r[3] = uint64(x[19])>>1 |
- uint64(x[20])<<7 |
- uint64(x[21])<<15 |
- uint64(x[22])<<23 |
- uint64(x[23])<<31 |
- uint64(x[24])<<39 |
- uint64(x[25]&15)<<47
-
- r[4] = uint64(x[25])>>4 |
- uint64(x[26])<<4 |
- uint64(x[27])<<12 |
- uint64(x[28])<<20 |
- uint64(x[29])<<28 |
- uint64(x[30])<<36 |
- uint64(x[31]&127)<<44
-}
-
-// pack sets out = x where out is the usual, little-endian form of the 5,
-// 51-bit limbs in x.
-func pack(out *[32]byte, x *[5]uint64) {
- t := *x
- freeze(&t)
-
- out[0] = byte(t[0])
- out[1] = byte(t[0] >> 8)
- out[2] = byte(t[0] >> 16)
- out[3] = byte(t[0] >> 24)
- out[4] = byte(t[0] >> 32)
- out[5] = byte(t[0] >> 40)
- out[6] = byte(t[0] >> 48)
-
- out[6] ^= byte(t[1]<<3) & 0xf8
- out[7] = byte(t[1] >> 5)
- out[8] = byte(t[1] >> 13)
- out[9] = byte(t[1] >> 21)
- out[10] = byte(t[1] >> 29)
- out[11] = byte(t[1] >> 37)
- out[12] = byte(t[1] >> 45)
-
- out[12] ^= byte(t[2]<<6) & 0xc0
- out[13] = byte(t[2] >> 2)
- out[14] = byte(t[2] >> 10)
- out[15] = byte(t[2] >> 18)
- out[16] = byte(t[2] >> 26)
- out[17] = byte(t[2] >> 34)
- out[18] = byte(t[2] >> 42)
- out[19] = byte(t[2] >> 50)
-
- out[19] ^= byte(t[3]<<1) & 0xfe
- out[20] = byte(t[3] >> 7)
- out[21] = byte(t[3] >> 15)
- out[22] = byte(t[3] >> 23)
- out[23] = byte(t[3] >> 31)
- out[24] = byte(t[3] >> 39)
- out[25] = byte(t[3] >> 47)
-
- out[25] ^= byte(t[4]<<4) & 0xf0
- out[26] = byte(t[4] >> 4)
- out[27] = byte(t[4] >> 12)
- out[28] = byte(t[4] >> 20)
- out[29] = byte(t[4] >> 28)
- out[30] = byte(t[4] >> 36)
- out[31] = byte(t[4] >> 44)
-}
-
-// invert calculates r = x^-1 mod p using Fermat's little theorem.
-func invert(r *[5]uint64, x *[5]uint64) {
- var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64
-
- square(&z2, x) /* 2 */
- square(&t, &z2) /* 4 */
- square(&t, &t) /* 8 */
- mul(&z9, &t, x) /* 9 */
- mul(&z11, &z9, &z2) /* 11 */
- square(&t, &z11) /* 22 */
- mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */
-
- square(&t, &z2_5_0) /* 2^6 - 2^1 */
- for i := 1; i < 5; i++ { /* 2^20 - 2^10 */
- square(&t, &t)
- }
- mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */
-
- square(&t, &z2_10_0) /* 2^11 - 2^1 */
- for i := 1; i < 10; i++ { /* 2^20 - 2^10 */
- square(&t, &t)
- }
- mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */
-
- square(&t, &z2_20_0) /* 2^21 - 2^1 */
- for i := 1; i < 20; i++ { /* 2^40 - 2^20 */
- square(&t, &t)
- }
- mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */
-
- square(&t, &t) /* 2^41 - 2^1 */
- for i := 1; i < 10; i++ { /* 2^50 - 2^10 */
- square(&t, &t)
- }
- mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */
-
- square(&t, &z2_50_0) /* 2^51 - 2^1 */
- for i := 1; i < 50; i++ { /* 2^100 - 2^50 */
- square(&t, &t)
- }
- mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */
-
- square(&t, &z2_100_0) /* 2^101 - 2^1 */
- for i := 1; i < 100; i++ { /* 2^200 - 2^100 */
- square(&t, &t)
- }
- mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */
-
- square(&t, &t) /* 2^201 - 2^1 */
- for i := 1; i < 50; i++ { /* 2^250 - 2^50 */
- square(&t, &t)
- }
- mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */
-
- square(&t, &t) /* 2^251 - 2^1 */
- square(&t, &t) /* 2^252 - 2^2 */
- square(&t, &t) /* 2^253 - 2^3 */
-
- square(&t, &t) /* 2^254 - 2^4 */
-
- square(&t, &t) /* 2^255 - 2^5 */
- mul(r, &t, &z11) /* 2^255 - 21 */
-}
diff --git a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s
deleted file mode 100644
index 5ce80a2e5..000000000
--- a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code was translated into a form compatible with 6a from the public
-// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
-
-// +build amd64,!gccgo,!appengine
-
-#include "const_amd64.h"
-
-// func mul(dest, a, b *[5]uint64)
-TEXT ·mul(SB),0,$16-24
- MOVQ dest+0(FP), DI
- MOVQ a+8(FP), SI
- MOVQ b+16(FP), DX
-
- MOVQ DX,CX
- MOVQ 24(SI),DX
- IMUL3Q $19,DX,AX
- MOVQ AX,0(SP)
- MULQ 16(CX)
- MOVQ AX,R8
- MOVQ DX,R9
- MOVQ 32(SI),DX
- IMUL3Q $19,DX,AX
- MOVQ AX,8(SP)
- MULQ 8(CX)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 0(SI),AX
- MULQ 0(CX)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 0(SI),AX
- MULQ 8(CX)
- MOVQ AX,R10
- MOVQ DX,R11
- MOVQ 0(SI),AX
- MULQ 16(CX)
- MOVQ AX,R12
- MOVQ DX,R13
- MOVQ 0(SI),AX
- MULQ 24(CX)
- MOVQ AX,R14
- MOVQ DX,R15
- MOVQ 0(SI),AX
- MULQ 32(CX)
- MOVQ AX,BX
- MOVQ DX,BP
- MOVQ 8(SI),AX
- MULQ 0(CX)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 8(SI),AX
- MULQ 8(CX)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 8(SI),AX
- MULQ 16(CX)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 8(SI),AX
- MULQ 24(CX)
- ADDQ AX,BX
- ADCQ DX,BP
- MOVQ 8(SI),DX
- IMUL3Q $19,DX,AX
- MULQ 32(CX)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 16(SI),AX
- MULQ 0(CX)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 16(SI),AX
- MULQ 8(CX)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 16(SI),AX
- MULQ 16(CX)
- ADDQ AX,BX
- ADCQ DX,BP
- MOVQ 16(SI),DX
- IMUL3Q $19,DX,AX
- MULQ 24(CX)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 16(SI),DX
- IMUL3Q $19,DX,AX
- MULQ 32(CX)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 24(SI),AX
- MULQ 0(CX)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 24(SI),AX
- MULQ 8(CX)
- ADDQ AX,BX
- ADCQ DX,BP
- MOVQ 0(SP),AX
- MULQ 24(CX)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 0(SP),AX
- MULQ 32(CX)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 32(SI),AX
- MULQ 0(CX)
- ADDQ AX,BX
- ADCQ DX,BP
- MOVQ 8(SP),AX
- MULQ 16(CX)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 8(SP),AX
- MULQ 24(CX)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 8(SP),AX
- MULQ 32(CX)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ $REDMASK51,SI
- SHLQ $13,R9:R8
- ANDQ SI,R8
- SHLQ $13,R11:R10
- ANDQ SI,R10
- ADDQ R9,R10
- SHLQ $13,R13:R12
- ANDQ SI,R12
- ADDQ R11,R12
- SHLQ $13,R15:R14
- ANDQ SI,R14
- ADDQ R13,R14
- SHLQ $13,BP:BX
- ANDQ SI,BX
- ADDQ R15,BX
- IMUL3Q $19,BP,DX
- ADDQ DX,R8
- MOVQ R8,DX
- SHRQ $51,DX
- ADDQ R10,DX
- MOVQ DX,CX
- SHRQ $51,DX
- ANDQ SI,R8
- ADDQ R12,DX
- MOVQ DX,R9
- SHRQ $51,DX
- ANDQ SI,CX
- ADDQ R14,DX
- MOVQ DX,AX
- SHRQ $51,DX
- ANDQ SI,R9
- ADDQ BX,DX
- MOVQ DX,R10
- SHRQ $51,DX
- ANDQ SI,AX
- IMUL3Q $19,DX,DX
- ADDQ DX,R8
- ANDQ SI,R10
- MOVQ R8,0(DI)
- MOVQ CX,8(DI)
- MOVQ R9,16(DI)
- MOVQ AX,24(DI)
- MOVQ R10,32(DI)
- RET
diff --git a/vendor/golang.org/x/crypto/curve25519/square_amd64.s b/vendor/golang.org/x/crypto/curve25519/square_amd64.s
deleted file mode 100644
index 12f73734f..000000000
--- a/vendor/golang.org/x/crypto/curve25519/square_amd64.s
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code was translated into a form compatible with 6a from the public
-// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
-
-// +build amd64,!gccgo,!appengine
-
-#include "const_amd64.h"
-
-// func square(out, in *[5]uint64)
-TEXT ·square(SB),7,$0-16
- MOVQ out+0(FP), DI
- MOVQ in+8(FP), SI
-
- MOVQ 0(SI),AX
- MULQ 0(SI)
- MOVQ AX,CX
- MOVQ DX,R8
- MOVQ 0(SI),AX
- SHLQ $1,AX
- MULQ 8(SI)
- MOVQ AX,R9
- MOVQ DX,R10
- MOVQ 0(SI),AX
- SHLQ $1,AX
- MULQ 16(SI)
- MOVQ AX,R11
- MOVQ DX,R12
- MOVQ 0(SI),AX
- SHLQ $1,AX
- MULQ 24(SI)
- MOVQ AX,R13
- MOVQ DX,R14
- MOVQ 0(SI),AX
- SHLQ $1,AX
- MULQ 32(SI)
- MOVQ AX,R15
- MOVQ DX,BX
- MOVQ 8(SI),AX
- MULQ 8(SI)
- ADDQ AX,R11
- ADCQ DX,R12
- MOVQ 8(SI),AX
- SHLQ $1,AX
- MULQ 16(SI)
- ADDQ AX,R13
- ADCQ DX,R14
- MOVQ 8(SI),AX
- SHLQ $1,AX
- MULQ 24(SI)
- ADDQ AX,R15
- ADCQ DX,BX
- MOVQ 8(SI),DX
- IMUL3Q $38,DX,AX
- MULQ 32(SI)
- ADDQ AX,CX
- ADCQ DX,R8
- MOVQ 16(SI),AX
- MULQ 16(SI)
- ADDQ AX,R15
- ADCQ DX,BX
- MOVQ 16(SI),DX
- IMUL3Q $38,DX,AX
- MULQ 24(SI)
- ADDQ AX,CX
- ADCQ DX,R8
- MOVQ 16(SI),DX
- IMUL3Q $38,DX,AX
- MULQ 32(SI)
- ADDQ AX,R9
- ADCQ DX,R10
- MOVQ 24(SI),DX
- IMUL3Q $19,DX,AX
- MULQ 24(SI)
- ADDQ AX,R9
- ADCQ DX,R10
- MOVQ 24(SI),DX
- IMUL3Q $38,DX,AX
- MULQ 32(SI)
- ADDQ AX,R11
- ADCQ DX,R12
- MOVQ 32(SI),DX
- IMUL3Q $19,DX,AX
- MULQ 32(SI)
- ADDQ AX,R13
- ADCQ DX,R14
- MOVQ $REDMASK51,SI
- SHLQ $13,R8:CX
- ANDQ SI,CX
- SHLQ $13,R10:R9
- ANDQ SI,R9
- ADDQ R8,R9
- SHLQ $13,R12:R11
- ANDQ SI,R11
- ADDQ R10,R11
- SHLQ $13,R14:R13
- ANDQ SI,R13
- ADDQ R12,R13
- SHLQ $13,BX:R15
- ANDQ SI,R15
- ADDQ R14,R15
- IMUL3Q $19,BX,DX
- ADDQ DX,CX
- MOVQ CX,DX
- SHRQ $51,DX
- ADDQ R9,DX
- ANDQ SI,CX
- MOVQ DX,R8
- SHRQ $51,DX
- ADDQ R11,DX
- ANDQ SI,R8
- MOVQ DX,R9
- SHRQ $51,DX
- ADDQ R13,DX
- ANDQ SI,R9
- MOVQ DX,AX
- SHRQ $51,DX
- ADDQ R15,DX
- ANDQ SI,AX
- MOVQ DX,R10
- SHRQ $51,DX
- IMUL3Q $19,DX,DX
- ADDQ DX,CX
- ANDQ SI,R10
- MOVQ CX,0(DI)
- MOVQ R8,8(DI)
- MOVQ R9,16(DI)
- MOVQ AX,24(DI)
- MOVQ R10,32(DI)
- RET
diff --git a/vendor/golang.org/x/crypto/openpgp/keys.go b/vendor/golang.org/x/crypto/openpgp/keys.go
index 3e2518600..faa2fb369 100644
--- a/vendor/golang.org/x/crypto/openpgp/keys.go
+++ b/vendor/golang.org/x/crypto/openpgp/keys.go
@@ -504,7 +504,7 @@ const defaultRSAKeyBits = 2048
// which may be empty but must not contain any of "()<>\x00".
// If config is nil, sensible defaults will be used.
func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) {
- currentTime := config.Now()
+ creationTime := config.Now()
bits := defaultRSAKeyBits
if config != nil && config.RSABits != 0 {
@@ -525,8 +525,8 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
}
e := &Entity{
- PrimaryKey: packet.NewRSAPublicKey(currentTime, &signingPriv.PublicKey),
- PrivateKey: packet.NewRSAPrivateKey(currentTime, signingPriv),
+ PrimaryKey: packet.NewRSAPublicKey(creationTime, &signingPriv.PublicKey),
+ PrivateKey: packet.NewRSAPrivateKey(creationTime, signingPriv),
Identities: make(map[string]*Identity),
}
isPrimaryId := true
@@ -534,7 +534,7 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
Name: uid.Id,
UserId: uid,
SelfSignature: &packet.Signature{
- CreationTime: currentTime,
+ CreationTime: creationTime,
SigType: packet.SigTypePositiveCert,
PubKeyAlgo: packet.PubKeyAlgoRSA,
Hash: config.Hash(),
@@ -563,10 +563,10 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
e.Subkeys = make([]Subkey, 1)
e.Subkeys[0] = Subkey{
- PublicKey: packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey),
- PrivateKey: packet.NewRSAPrivateKey(currentTime, encryptingPriv),
+ PublicKey: packet.NewRSAPublicKey(creationTime, &encryptingPriv.PublicKey),
+ PrivateKey: packet.NewRSAPrivateKey(creationTime, encryptingPriv),
Sig: &packet.Signature{
- CreationTime: currentTime,
+ CreationTime: creationTime,
SigType: packet.SigTypeSubkeyBinding,
PubKeyAlgo: packet.PubKeyAlgoRSA,
Hash: config.Hash(),
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
index bd31cceac..6f8ec0938 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
@@ -36,49 +36,49 @@ type PrivateKey struct {
iv []byte
}
-func NewRSAPrivateKey(currentTime time.Time, priv *rsa.PrivateKey) *PrivateKey {
+func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey {
pk := new(PrivateKey)
- pk.PublicKey = *NewRSAPublicKey(currentTime, &priv.PublicKey)
+ pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey)
pk.PrivateKey = priv
return pk
}
-func NewDSAPrivateKey(currentTime time.Time, priv *dsa.PrivateKey) *PrivateKey {
+func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey {
pk := new(PrivateKey)
- pk.PublicKey = *NewDSAPublicKey(currentTime, &priv.PublicKey)
+ pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey)
pk.PrivateKey = priv
return pk
}
-func NewElGamalPrivateKey(currentTime time.Time, priv *elgamal.PrivateKey) *PrivateKey {
+func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey {
pk := new(PrivateKey)
- pk.PublicKey = *NewElGamalPublicKey(currentTime, &priv.PublicKey)
+ pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey)
pk.PrivateKey = priv
return pk
}
-func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey {
+func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey {
pk := new(PrivateKey)
- pk.PublicKey = *NewECDSAPublicKey(currentTime, &priv.PublicKey)
+ pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey)
pk.PrivateKey = priv
return pk
}
// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that
// implements RSA or ECDSA.
-func NewSignerPrivateKey(currentTime time.Time, signer crypto.Signer) *PrivateKey {
+func NewSignerPrivateKey(creationTime time.Time, signer crypto.Signer) *PrivateKey {
pk := new(PrivateKey)
// In general, the public Keys should be used as pointers. We still
// type-switch on the values, for backwards-compatibility.
switch pubkey := signer.Public().(type) {
case *rsa.PublicKey:
- pk.PublicKey = *NewRSAPublicKey(currentTime, pubkey)
+ pk.PublicKey = *NewRSAPublicKey(creationTime, pubkey)
case rsa.PublicKey:
- pk.PublicKey = *NewRSAPublicKey(currentTime, &pubkey)
+ pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey)
case *ecdsa.PublicKey:
- pk.PublicKey = *NewECDSAPublicKey(currentTime, pubkey)
+ pk.PublicKey = *NewECDSAPublicKey(creationTime, pubkey)
case ecdsa.PublicKey:
- pk.PublicKey = *NewECDSAPublicKey(currentTime, &pubkey)
+ pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey)
default:
panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey")
}
diff --git a/vendor/golang.org/x/crypto/otr/libotr_test_helper.c b/vendor/golang.org/x/crypto/otr/libotr_test_helper.c
deleted file mode 100644
index b3ca072d4..000000000
--- a/vendor/golang.org/x/crypto/otr/libotr_test_helper.c
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code can be compiled and used to test the otr package against libotr.
-// See otr_test.go.
-
-// +build ignore
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-
-#include <proto.h>
-#include <message.h>
-#include <privkey.h>
-
-static int g_session_established = 0;
-
-OtrlPolicy policy(void *opdata, ConnContext *context) {
- return OTRL_POLICY_ALWAYS;
-}
-
-int is_logged_in(void *opdata, const char *accountname, const char *protocol,
- const char *recipient) {
- return 1;
-}
-
-void inject_message(void *opdata, const char *accountname, const char *protocol,
- const char *recipient, const char *message) {
- printf("%s\n", message);
- fflush(stdout);
- fprintf(stderr, "libotr helper sent: %s\n", message);
-}
-
-void update_context_list(void *opdata) {}
-
-void new_fingerprint(void *opdata, OtrlUserState us, const char *accountname,
- const char *protocol, const char *username,
- unsigned char fingerprint[20]) {
- fprintf(stderr, "NEW FINGERPRINT\n");
- g_session_established = 1;
-}
-
-void write_fingerprints(void *opdata) {}
-
-void gone_secure(void *opdata, ConnContext *context) {}
-
-void gone_insecure(void *opdata, ConnContext *context) {}
-
-void still_secure(void *opdata, ConnContext *context, int is_reply) {}
-
-int max_message_size(void *opdata, ConnContext *context) { return 99999; }
-
-const char *account_name(void *opdata, const char *account,
- const char *protocol) {
- return "ACCOUNT";
-}
-
-void account_name_free(void *opdata, const char *account_name) {}
-
-const char *error_message(void *opdata, ConnContext *context,
- OtrlErrorCode err_code) {
- return "ERR";
-}
-
-void error_message_free(void *opdata, const char *msg) {}
-
-void resent_msg_prefix_free(void *opdata, const char *prefix) {}
-
-void handle_smp_event(void *opdata, OtrlSMPEvent smp_event,
- ConnContext *context, unsigned short progress_event,
- char *question) {}
-
-void handle_msg_event(void *opdata, OtrlMessageEvent msg_event,
- ConnContext *context, const char *message,
- gcry_error_t err) {
- fprintf(stderr, "msg event: %d %s\n", msg_event, message);
-}
-
-OtrlMessageAppOps uiops = {
- policy,
- NULL,
- is_logged_in,
- inject_message,
- update_context_list,
- new_fingerprint,
- write_fingerprints,
- gone_secure,
- gone_insecure,
- still_secure,
- max_message_size,
- account_name,
- account_name_free,
- NULL, /* received_symkey */
- error_message,
- error_message_free,
- NULL, /* resent_msg_prefix */
- resent_msg_prefix_free,
- handle_smp_event,
- handle_msg_event,
- NULL /* create_instag */,
- NULL /* convert_msg */,
- NULL /* convert_free */,
- NULL /* timer_control */,
-};
-
-static const char kPrivateKeyData[] =
- "(privkeys (account (name \"account\") (protocol proto) (private-key (dsa "
- "(p "
- "#00FC07ABCF0DC916AFF6E9AE47BEF60C7AB9B4D6B2469E436630E36F8A489BE812486A09F"
- "30B71224508654940A835301ACC525A4FF133FC152CC53DCC59D65C30A54F1993FE13FE63E"
- "5823D4C746DB21B90F9B9C00B49EC7404AB1D929BA7FBA12F2E45C6E0A651689750E8528AB"
- "8C031D3561FECEE72EBB4A090D450A9B7A857#) (q "
- "#00997BD266EF7B1F60A5C23F3A741F2AEFD07A2081#) (g "
- "#535E360E8A95EBA46A4F7DE50AD6E9B2A6DB785A66B64EB9F20338D2A3E8FB0E94725848F"
- "1AA6CC567CB83A1CC517EC806F2E92EAE71457E80B2210A189B91250779434B41FC8A8873F"
- "6DB94BEA7D177F5D59E7E114EE10A49CFD9CEF88AE43387023B672927BA74B04EB6BBB5E57"
- "597766A2F9CE3857D7ACE3E1E3BC1FC6F26#) (y "
- "#0AC8670AD767D7A8D9D14CC1AC6744CD7D76F993B77FFD9E39DF01E5A6536EF65E775FCEF"
- "2A983E2A19BD6415500F6979715D9FD1257E1FE2B6F5E1E74B333079E7C880D39868462A93"
- "454B41877BE62E5EF0A041C2EE9C9E76BD1E12AE25D9628DECB097025DD625EF49C3258A1A"
- "3C0FF501E3DC673B76D7BABF349009B6ECF#) (x "
- "#14D0345A3562C480A039E3C72764F72D79043216#)))))\n";
-
-int main() {
- OTRL_INIT;
-
- // We have to write the private key information to a file because the libotr
- // API demands a filename to read from.
- const char *tmpdir = "/tmp";
- if (getenv("TMP")) {
- tmpdir = getenv("TMP");
- }
-
- char private_key_file[256];
- snprintf(private_key_file, sizeof(private_key_file),
- "%s/libotr_test_helper_privatekeys-XXXXXX", tmpdir);
- int fd = mkstemp(private_key_file);
- if (fd == -1) {
- perror("creating temp file");
- }
- write(fd, kPrivateKeyData, sizeof(kPrivateKeyData) - 1);
- close(fd);
-
- OtrlUserState userstate = otrl_userstate_create();
- otrl_privkey_read(userstate, private_key_file);
- unlink(private_key_file);
-
- fprintf(stderr, "libotr helper started\n");
-
- char buf[4096];
-
- for (;;) {
- char *message = fgets(buf, sizeof(buf), stdin);
- if (strlen(message) == 0) {
- break;
- }
- message[strlen(message) - 1] = 0;
- fprintf(stderr, "libotr helper got: %s\n", message);
-
- char *newmessage = NULL;
- OtrlTLV *tlvs;
- int ignore_message = otrl_message_receiving(
- userstate, &uiops, NULL, "account", "proto", "peer", message,
- &newmessage, &tlvs, NULL, NULL, NULL);
- if (tlvs) {
- otrl_tlv_free(tlvs);
- }
-
- if (newmessage != NULL) {
- fprintf(stderr, "libotr got: %s\n", newmessage);
- otrl_message_free(newmessage);
-
- gcry_error_t err;
- char *newmessage = NULL;
-
- err = otrl_message_sending(userstate, &uiops, NULL, "account", "proto",
- "peer", 0, "test message", NULL, &newmessage,
- OTRL_FRAGMENT_SEND_SKIP, NULL, NULL, NULL);
- if (newmessage == NULL) {
- fprintf(stderr, "libotr didn't encrypt message\n");
- return 1;
- }
- write(1, newmessage, strlen(newmessage));
- write(1, "\n", 1);
- fprintf(stderr, "libotr sent: %s\n", newmessage);
- otrl_message_free(newmessage);
-
- g_session_established = 0;
- write(1, "?OTRv2?\n", 8);
- fprintf(stderr, "libotr sent: ?OTRv2\n");
- }
- }
-
- return 0;
-}
diff --git a/vendor/golang.org/x/crypto/otr/otr.go b/vendor/golang.org/x/crypto/otr/otr.go
deleted file mode 100644
index 173b753db..000000000
--- a/vendor/golang.org/x/crypto/otr/otr.go
+++ /dev/null
@@ -1,1415 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package otr implements the Off The Record protocol as specified in
-// http://www.cypherpunks.ca/otr/Protocol-v2-3.1.0.html
-package otr // import "golang.org/x/crypto/otr"
-
-import (
- "bytes"
- "crypto/aes"
- "crypto/cipher"
- "crypto/dsa"
- "crypto/hmac"
- "crypto/rand"
- "crypto/sha1"
- "crypto/sha256"
- "crypto/subtle"
- "encoding/base64"
- "encoding/hex"
- "errors"
- "hash"
- "io"
- "math/big"
- "strconv"
-)
-
-// SecurityChange describes a change in the security state of a Conversation.
-type SecurityChange int
-
-const (
- NoChange SecurityChange = iota
- // NewKeys indicates that a key exchange has completed. This occurs
- // when a conversation first becomes encrypted, and when the keys are
- // renegotiated within an encrypted conversation.
- NewKeys
- // SMPSecretNeeded indicates that the peer has started an
- // authentication and that we need to supply a secret. Call SMPQuestion
- // to get the optional, human readable challenge and then Authenticate
- // to supply the matching secret.
- SMPSecretNeeded
- // SMPComplete indicates that an authentication completed. The identity
- // of the peer has now been confirmed.
- SMPComplete
- // SMPFailed indicates that an authentication failed.
- SMPFailed
- // ConversationEnded indicates that the peer ended the secure
- // conversation.
- ConversationEnded
-)
-
-// QueryMessage can be sent to a peer to start an OTR conversation.
-var QueryMessage = "?OTRv2?"
-
-// ErrorPrefix can be used to make an OTR error by appending an error message
-// to it.
-var ErrorPrefix = "?OTR Error:"
-
-var (
- fragmentPartSeparator = []byte(",")
- fragmentPrefix = []byte("?OTR,")
- msgPrefix = []byte("?OTR:")
- queryMarker = []byte("?OTR")
-)
-
-// isQuery attempts to parse an OTR query from msg and returns the greatest
-// common version, or 0 if msg is not an OTR query.
-func isQuery(msg []byte) (greatestCommonVersion int) {
- pos := bytes.Index(msg, queryMarker)
- if pos == -1 {
- return 0
- }
- for i, c := range msg[pos+len(queryMarker):] {
- if i == 0 {
- if c == '?' {
- // Indicates support for version 1, but we don't
- // implement that.
- continue
- }
-
- if c != 'v' {
- // Invalid message
- return 0
- }
-
- continue
- }
-
- if c == '?' {
- // End of message
- return
- }
-
- if c == ' ' || c == '\t' {
- // Probably an invalid message
- return 0
- }
-
- if c == '2' {
- greatestCommonVersion = 2
- }
- }
-
- return 0
-}
-
-const (
- statePlaintext = iota
- stateEncrypted
- stateFinished
-)
-
-const (
- authStateNone = iota
- authStateAwaitingDHKey
- authStateAwaitingRevealSig
- authStateAwaitingSig
-)
-
-const (
- msgTypeDHCommit = 2
- msgTypeData = 3
- msgTypeDHKey = 10
- msgTypeRevealSig = 17
- msgTypeSig = 18
-)
-
-const (
- // If the requested fragment size is less than this, it will be ignored.
- minFragmentSize = 18
- // Messages are padded to a multiple of this number of bytes.
- paddingGranularity = 256
- // The number of bytes in a Diffie-Hellman private value (320-bits).
- dhPrivateBytes = 40
- // The number of bytes needed to represent an element of the DSA
- // subgroup (160-bits).
- dsaSubgroupBytes = 20
- // The number of bytes of the MAC that are sent on the wire (160-bits).
- macPrefixBytes = 20
-)
-
-// These are the global, common group parameters for OTR.
-var (
- p *big.Int // group prime
- g *big.Int // group generator
- q *big.Int // group order
- pMinus2 *big.Int
-)
-
-func init() {
- p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", 16)
- q, _ = new(big.Int).SetString("7FFFFFFFFFFFFFFFE487ED5110B4611A62633145C06E0E68948127044533E63A0105DF531D89CD9128A5043CC71A026EF7CA8CD9E69D218D98158536F92F8A1BA7F09AB6B6A8E122F242DABB312F3F637A262174D31BF6B585FFAE5B7A035BF6F71C35FDAD44CFD2D74F9208BE258FF324943328F6722D9EE1003E5C50B1DF82CC6D241B0E2AE9CD348B1FD47E9267AFC1B2AE91EE51D6CB0E3179AB1042A95DCF6A9483B84B4B36B3861AA7255E4C0278BA36046511B993FFFFFFFFFFFFFFFF", 16)
- g = new(big.Int).SetInt64(2)
- pMinus2 = new(big.Int).Sub(p, g)
-}
-
-// Conversation represents a relation with a peer. The zero value is a valid
-// Conversation, although PrivateKey must be set.
-//
-// When communicating with a peer, all inbound messages should be passed to
-// Conversation.Receive and all outbound messages to Conversation.Send. The
-// Conversation will take care of maintaining the encryption state and
-// negotiating encryption as needed.
-type Conversation struct {
- // PrivateKey contains the private key to use to sign key exchanges.
- PrivateKey *PrivateKey
-
- // Rand can be set to override the entropy source. Otherwise,
- // crypto/rand will be used.
- Rand io.Reader
- // If FragmentSize is set, all messages produced by Receive and Send
- // will be fragmented into messages of, at most, this number of bytes.
- FragmentSize int
-
- // Once Receive has returned NewKeys once, the following fields are
- // valid.
- SSID [8]byte
- TheirPublicKey PublicKey
-
- state, authState int
-
- r [16]byte
- x, y *big.Int
- gx, gy *big.Int
- gxBytes []byte
- digest [sha256.Size]byte
-
- revealKeys, sigKeys akeKeys
-
- myKeyId uint32
- myCurrentDHPub *big.Int
- myCurrentDHPriv *big.Int
- myLastDHPub *big.Int
- myLastDHPriv *big.Int
-
- theirKeyId uint32
- theirCurrentDHPub *big.Int
- theirLastDHPub *big.Int
-
- keySlots [4]keySlot
-
- myCounter [8]byte
- theirLastCtr [8]byte
- oldMACs []byte
-
- k, n int // fragment state
- frag []byte
-
- smp smpState
-}
-
-// A keySlot contains key material for a specific (their keyid, my keyid) pair.
-type keySlot struct {
- // used is true if this slot is valid. If false, it's free for reuse.
- used bool
- theirKeyId uint32
- myKeyId uint32
- sendAESKey, recvAESKey []byte
- sendMACKey, recvMACKey []byte
- theirLastCtr [8]byte
-}
-
-// akeKeys are generated during key exchange. There's one set for the reveal
-// signature message and another for the signature message. In the protocol
-// spec the latter are indicated with a prime mark.
-type akeKeys struct {
- c [16]byte
- m1, m2 [32]byte
-}
-
-func (c *Conversation) rand() io.Reader {
- if c.Rand != nil {
- return c.Rand
- }
- return rand.Reader
-}
-
-func (c *Conversation) randMPI(buf []byte) *big.Int {
- _, err := io.ReadFull(c.rand(), buf)
- if err != nil {
- panic("otr: short read from random source")
- }
-
- return new(big.Int).SetBytes(buf)
-}
-
-// tlv represents the type-length value from the protocol.
-type tlv struct {
- typ, length uint16
- data []byte
-}
-
-const (
- tlvTypePadding = 0
- tlvTypeDisconnected = 1
- tlvTypeSMP1 = 2
- tlvTypeSMP2 = 3
- tlvTypeSMP3 = 4
- tlvTypeSMP4 = 5
- tlvTypeSMPAbort = 6
- tlvTypeSMP1WithQuestion = 7
-)
-
-// Receive handles a message from a peer. It returns a human readable message,
-// an indicator of whether that message was encrypted, a hint about the
-// encryption state and zero or more messages to send back to the peer.
-// These messages do not need to be passed to Send before transmission.
-func (c *Conversation) Receive(in []byte) (out []byte, encrypted bool, change SecurityChange, toSend [][]byte, err error) {
- if bytes.HasPrefix(in, fragmentPrefix) {
- in, err = c.processFragment(in)
- if in == nil || err != nil {
- return
- }
- }
-
- if bytes.HasPrefix(in, msgPrefix) && in[len(in)-1] == '.' {
- in = in[len(msgPrefix) : len(in)-1]
- } else if version := isQuery(in); version > 0 {
- c.authState = authStateAwaitingDHKey
- c.reset()
- toSend = c.encode(c.generateDHCommit())
- return
- } else {
- // plaintext message
- out = in
- return
- }
-
- msg := make([]byte, base64.StdEncoding.DecodedLen(len(in)))
- msgLen, err := base64.StdEncoding.Decode(msg, in)
- if err != nil {
- err = errors.New("otr: invalid base64 encoding in message")
- return
- }
- msg = msg[:msgLen]
-
- // The first two bytes are the protocol version (2)
- if len(msg) < 3 || msg[0] != 0 || msg[1] != 2 {
- err = errors.New("otr: invalid OTR message")
- return
- }
-
- msgType := int(msg[2])
- msg = msg[3:]
-
- switch msgType {
- case msgTypeDHCommit:
- switch c.authState {
- case authStateNone:
- c.authState = authStateAwaitingRevealSig
- if err = c.processDHCommit(msg); err != nil {
- return
- }
- c.reset()
- toSend = c.encode(c.generateDHKey())
- return
- case authStateAwaitingDHKey:
- // This is a 'SYN-crossing'. The greater digest wins.
- var cmp int
- if cmp, err = c.compareToDHCommit(msg); err != nil {
- return
- }
- if cmp > 0 {
- // We win. Retransmit DH commit.
- toSend = c.encode(c.serializeDHCommit())
- return
- } else {
- // They win. We forget about our DH commit.
- c.authState = authStateAwaitingRevealSig
- if err = c.processDHCommit(msg); err != nil {
- return
- }
- c.reset()
- toSend = c.encode(c.generateDHKey())
- return
- }
- case authStateAwaitingRevealSig:
- if err = c.processDHCommit(msg); err != nil {
- return
- }
- toSend = c.encode(c.serializeDHKey())
- case authStateAwaitingSig:
- if err = c.processDHCommit(msg); err != nil {
- return
- }
- c.reset()
- toSend = c.encode(c.generateDHKey())
- c.authState = authStateAwaitingRevealSig
- default:
- panic("bad state")
- }
- case msgTypeDHKey:
- switch c.authState {
- case authStateAwaitingDHKey:
- var isSame bool
- if isSame, err = c.processDHKey(msg); err != nil {
- return
- }
- if isSame {
- err = errors.New("otr: unexpected duplicate DH key")
- return
- }
- toSend = c.encode(c.generateRevealSig())
- c.authState = authStateAwaitingSig
- case authStateAwaitingSig:
- var isSame bool
- if isSame, err = c.processDHKey(msg); err != nil {
- return
- }
- if isSame {
- toSend = c.encode(c.serializeDHKey())
- }
- }
- case msgTypeRevealSig:
- if c.authState != authStateAwaitingRevealSig {
- return
- }
- if err = c.processRevealSig(msg); err != nil {
- return
- }
- toSend = c.encode(c.generateSig())
- c.authState = authStateNone
- c.state = stateEncrypted
- change = NewKeys
- case msgTypeSig:
- if c.authState != authStateAwaitingSig {
- return
- }
- if err = c.processSig(msg); err != nil {
- return
- }
- c.authState = authStateNone
- c.state = stateEncrypted
- change = NewKeys
- case msgTypeData:
- if c.state != stateEncrypted {
- err = errors.New("otr: encrypted message received without encrypted session established")
- return
- }
- var tlvs []tlv
- out, tlvs, err = c.processData(msg)
- encrypted = true
-
- EachTLV:
- for _, inTLV := range tlvs {
- switch inTLV.typ {
- case tlvTypeDisconnected:
- change = ConversationEnded
- c.state = stateFinished
- break EachTLV
- case tlvTypeSMP1, tlvTypeSMP2, tlvTypeSMP3, tlvTypeSMP4, tlvTypeSMPAbort, tlvTypeSMP1WithQuestion:
- var reply tlv
- var complete bool
- reply, complete, err = c.processSMP(inTLV)
- if err == smpSecretMissingError {
- err = nil
- change = SMPSecretNeeded
- c.smp.saved = &inTLV
- return
- }
- if err == smpFailureError {
- err = nil
- change = SMPFailed
- } else if complete {
- change = SMPComplete
- }
- if reply.typ != 0 {
- toSend = c.encode(c.generateData(nil, &reply))
- }
- break EachTLV
- default:
- // skip unknown TLVs
- }
- }
- default:
- err = errors.New("otr: unknown message type " + strconv.Itoa(msgType))
- }
-
- return
-}
-
-// Send takes a human readable message from the local user, possibly encrypts
-// it and returns zero one or more messages to send to the peer.
-func (c *Conversation) Send(msg []byte) ([][]byte, error) {
- switch c.state {
- case statePlaintext:
- return [][]byte{msg}, nil
- case stateEncrypted:
- return c.encode(c.generateData(msg, nil)), nil
- case stateFinished:
- return nil, errors.New("otr: cannot send message because secure conversation has finished")
- }
-
- return nil, errors.New("otr: cannot send message in current state")
-}
-
-// SMPQuestion returns the human readable challenge question from the peer.
-// It's only valid after Receive has returned SMPSecretNeeded.
-func (c *Conversation) SMPQuestion() string {
- return c.smp.question
-}
-
-// Authenticate begins an authentication with the peer. Authentication involves
-// an optional challenge message and a shared secret. The authentication
-// proceeds until either Receive returns SMPComplete, SMPSecretNeeded (which
-// indicates that a new authentication is happening and thus this one was
-// aborted) or SMPFailed.
-func (c *Conversation) Authenticate(question string, mutualSecret []byte) (toSend [][]byte, err error) {
- if c.state != stateEncrypted {
- err = errors.New("otr: can't authenticate a peer without a secure conversation established")
- return
- }
-
- if c.smp.saved != nil {
- c.calcSMPSecret(mutualSecret, false /* they started it */)
-
- var out tlv
- var complete bool
- out, complete, err = c.processSMP(*c.smp.saved)
- if complete {
- panic("SMP completed on the first message")
- }
- c.smp.saved = nil
- if out.typ != 0 {
- toSend = c.encode(c.generateData(nil, &out))
- }
- return
- }
-
- c.calcSMPSecret(mutualSecret, true /* we started it */)
- outs := c.startSMP(question)
- for _, out := range outs {
- toSend = append(toSend, c.encode(c.generateData(nil, &out))...)
- }
- return
-}
-
-// End ends a secure conversation by generating a termination message for
-// the peer and switches to unencrypted communication.
-func (c *Conversation) End() (toSend [][]byte) {
- switch c.state {
- case statePlaintext:
- return nil
- case stateEncrypted:
- c.state = statePlaintext
- return c.encode(c.generateData(nil, &tlv{typ: tlvTypeDisconnected}))
- case stateFinished:
- c.state = statePlaintext
- return nil
- }
- panic("unreachable")
-}
-
-// IsEncrypted returns true if a message passed to Send would be encrypted
-// before transmission. This result remains valid until the next call to
-// Receive or End, which may change the state of the Conversation.
-func (c *Conversation) IsEncrypted() bool {
- return c.state == stateEncrypted
-}
-
-var fragmentError = errors.New("otr: invalid OTR fragment")
-
-// processFragment processes a fragmented OTR message and possibly returns a
-// complete message. Fragmented messages look like "?OTR,k,n,msg," where k is
-// the fragment number (starting from 1), n is the number of fragments in this
-// message and msg is a substring of the base64 encoded message.
-func (c *Conversation) processFragment(in []byte) (out []byte, err error) {
- in = in[len(fragmentPrefix):] // remove "?OTR,"
- parts := bytes.Split(in, fragmentPartSeparator)
- if len(parts) != 4 || len(parts[3]) != 0 {
- return nil, fragmentError
- }
-
- k, err := strconv.Atoi(string(parts[0]))
- if err != nil {
- return nil, fragmentError
- }
-
- n, err := strconv.Atoi(string(parts[1]))
- if err != nil {
- return nil, fragmentError
- }
-
- if k < 1 || n < 1 || k > n {
- return nil, fragmentError
- }
-
- if k == 1 {
- c.frag = append(c.frag[:0], parts[2]...)
- c.k, c.n = k, n
- } else if n == c.n && k == c.k+1 {
- c.frag = append(c.frag, parts[2]...)
- c.k++
- } else {
- c.frag = c.frag[:0]
- c.n, c.k = 0, 0
- }
-
- if c.n > 0 && c.k == c.n {
- c.n, c.k = 0, 0
- return c.frag, nil
- }
-
- return nil, nil
-}
-
-func (c *Conversation) generateDHCommit() []byte {
- _, err := io.ReadFull(c.rand(), c.r[:])
- if err != nil {
- panic("otr: short read from random source")
- }
-
- var xBytes [dhPrivateBytes]byte
- c.x = c.randMPI(xBytes[:])
- c.gx = new(big.Int).Exp(g, c.x, p)
- c.gy = nil
- c.gxBytes = appendMPI(nil, c.gx)
-
- h := sha256.New()
- h.Write(c.gxBytes)
- h.Sum(c.digest[:0])
-
- aesCipher, err := aes.NewCipher(c.r[:])
- if err != nil {
- panic(err.Error())
- }
-
- var iv [aes.BlockSize]byte
- ctr := cipher.NewCTR(aesCipher, iv[:])
- ctr.XORKeyStream(c.gxBytes, c.gxBytes)
-
- return c.serializeDHCommit()
-}
-
-func (c *Conversation) serializeDHCommit() []byte {
- var ret []byte
- ret = appendU16(ret, 2) // protocol version
- ret = append(ret, msgTypeDHCommit)
- ret = appendData(ret, c.gxBytes)
- ret = appendData(ret, c.digest[:])
- return ret
-}
-
-func (c *Conversation) processDHCommit(in []byte) error {
- var ok1, ok2 bool
- c.gxBytes, in, ok1 = getData(in)
- digest, in, ok2 := getData(in)
- if !ok1 || !ok2 || len(in) > 0 {
- return errors.New("otr: corrupt DH commit message")
- }
- copy(c.digest[:], digest)
- return nil
-}
-
-func (c *Conversation) compareToDHCommit(in []byte) (int, error) {
- _, in, ok1 := getData(in)
- digest, in, ok2 := getData(in)
- if !ok1 || !ok2 || len(in) > 0 {
- return 0, errors.New("otr: corrupt DH commit message")
- }
- return bytes.Compare(c.digest[:], digest), nil
-}
-
-func (c *Conversation) generateDHKey() []byte {
- var yBytes [dhPrivateBytes]byte
- c.y = c.randMPI(yBytes[:])
- c.gy = new(big.Int).Exp(g, c.y, p)
- return c.serializeDHKey()
-}
-
-func (c *Conversation) serializeDHKey() []byte {
- var ret []byte
- ret = appendU16(ret, 2) // protocol version
- ret = append(ret, msgTypeDHKey)
- ret = appendMPI(ret, c.gy)
- return ret
-}
-
-func (c *Conversation) processDHKey(in []byte) (isSame bool, err error) {
- gy, in, ok := getMPI(in)
- if !ok {
- err = errors.New("otr: corrupt DH key message")
- return
- }
- if gy.Cmp(g) < 0 || gy.Cmp(pMinus2) > 0 {
- err = errors.New("otr: DH value out of range")
- return
- }
- if c.gy != nil {
- isSame = c.gy.Cmp(gy) == 0
- return
- }
- c.gy = gy
- return
-}
-
-func (c *Conversation) generateEncryptedSignature(keys *akeKeys, xFirst bool) ([]byte, []byte) {
- var xb []byte
- xb = c.PrivateKey.PublicKey.Serialize(xb)
-
- var verifyData []byte
- if xFirst {
- verifyData = appendMPI(verifyData, c.gx)
- verifyData = appendMPI(verifyData, c.gy)
- } else {
- verifyData = appendMPI(verifyData, c.gy)
- verifyData = appendMPI(verifyData, c.gx)
- }
- verifyData = append(verifyData, xb...)
- verifyData = appendU32(verifyData, c.myKeyId)
-
- mac := hmac.New(sha256.New, keys.m1[:])
- mac.Write(verifyData)
- mb := mac.Sum(nil)
-
- xb = appendU32(xb, c.myKeyId)
- xb = append(xb, c.PrivateKey.Sign(c.rand(), mb)...)
-
- aesCipher, err := aes.NewCipher(keys.c[:])
- if err != nil {
- panic(err.Error())
- }
- var iv [aes.BlockSize]byte
- ctr := cipher.NewCTR(aesCipher, iv[:])
- ctr.XORKeyStream(xb, xb)
-
- mac = hmac.New(sha256.New, keys.m2[:])
- encryptedSig := appendData(nil, xb)
- mac.Write(encryptedSig)
-
- return encryptedSig, mac.Sum(nil)
-}
-
-func (c *Conversation) generateRevealSig() []byte {
- s := new(big.Int).Exp(c.gy, c.x, p)
- c.calcAKEKeys(s)
- c.myKeyId++
-
- encryptedSig, mac := c.generateEncryptedSignature(&c.revealKeys, true /* gx comes first */)
-
- c.myCurrentDHPub = c.gx
- c.myCurrentDHPriv = c.x
- c.rotateDHKeys()
- incCounter(&c.myCounter)
-
- var ret []byte
- ret = appendU16(ret, 2)
- ret = append(ret, msgTypeRevealSig)
- ret = appendData(ret, c.r[:])
- ret = append(ret, encryptedSig...)
- ret = append(ret, mac[:20]...)
- return ret
-}
-
-func (c *Conversation) processEncryptedSig(encryptedSig, theirMAC []byte, keys *akeKeys, xFirst bool) error {
- mac := hmac.New(sha256.New, keys.m2[:])
- mac.Write(appendData(nil, encryptedSig))
- myMAC := mac.Sum(nil)[:20]
-
- if len(myMAC) != len(theirMAC) || subtle.ConstantTimeCompare(myMAC, theirMAC) == 0 {
- return errors.New("bad signature MAC in encrypted signature")
- }
-
- aesCipher, err := aes.NewCipher(keys.c[:])
- if err != nil {
- panic(err.Error())
- }
- var iv [aes.BlockSize]byte
- ctr := cipher.NewCTR(aesCipher, iv[:])
- ctr.XORKeyStream(encryptedSig, encryptedSig)
-
- sig := encryptedSig
- sig, ok1 := c.TheirPublicKey.Parse(sig)
- keyId, sig, ok2 := getU32(sig)
- if !ok1 || !ok2 {
- return errors.New("otr: corrupt encrypted signature")
- }
-
- var verifyData []byte
- if xFirst {
- verifyData = appendMPI(verifyData, c.gx)
- verifyData = appendMPI(verifyData, c.gy)
- } else {
- verifyData = appendMPI(verifyData, c.gy)
- verifyData = appendMPI(verifyData, c.gx)
- }
- verifyData = c.TheirPublicKey.Serialize(verifyData)
- verifyData = appendU32(verifyData, keyId)
-
- mac = hmac.New(sha256.New, keys.m1[:])
- mac.Write(verifyData)
- mb := mac.Sum(nil)
-
- sig, ok1 = c.TheirPublicKey.Verify(mb, sig)
- if !ok1 {
- return errors.New("bad signature in encrypted signature")
- }
- if len(sig) > 0 {
- return errors.New("corrupt encrypted signature")
- }
-
- c.theirKeyId = keyId
- zero(c.theirLastCtr[:])
- return nil
-}
-
-func (c *Conversation) processRevealSig(in []byte) error {
- r, in, ok1 := getData(in)
- encryptedSig, in, ok2 := getData(in)
- theirMAC := in
- if !ok1 || !ok2 || len(theirMAC) != 20 {
- return errors.New("otr: corrupt reveal signature message")
- }
-
- aesCipher, err := aes.NewCipher(r)
- if err != nil {
- return errors.New("otr: cannot create AES cipher from reveal signature message: " + err.Error())
- }
- var iv [aes.BlockSize]byte
- ctr := cipher.NewCTR(aesCipher, iv[:])
- ctr.XORKeyStream(c.gxBytes, c.gxBytes)
- h := sha256.New()
- h.Write(c.gxBytes)
- digest := h.Sum(nil)
- if len(digest) != len(c.digest) || subtle.ConstantTimeCompare(digest, c.digest[:]) == 0 {
- return errors.New("otr: bad commit MAC in reveal signature message")
- }
- var rest []byte
- c.gx, rest, ok1 = getMPI(c.gxBytes)
- if !ok1 || len(rest) > 0 {
- return errors.New("otr: gx corrupt after decryption")
- }
- if c.gx.Cmp(g) < 0 || c.gx.Cmp(pMinus2) > 0 {
- return errors.New("otr: DH value out of range")
- }
- s := new(big.Int).Exp(c.gx, c.y, p)
- c.calcAKEKeys(s)
-
- if err := c.processEncryptedSig(encryptedSig, theirMAC, &c.revealKeys, true /* gx comes first */); err != nil {
- return errors.New("otr: in reveal signature message: " + err.Error())
- }
-
- c.theirCurrentDHPub = c.gx
- c.theirLastDHPub = nil
-
- return nil
-}
-
-func (c *Conversation) generateSig() []byte {
- c.myKeyId++
-
- encryptedSig, mac := c.generateEncryptedSignature(&c.sigKeys, false /* gy comes first */)
-
- c.myCurrentDHPub = c.gy
- c.myCurrentDHPriv = c.y
- c.rotateDHKeys()
- incCounter(&c.myCounter)
-
- var ret []byte
- ret = appendU16(ret, 2)
- ret = append(ret, msgTypeSig)
- ret = append(ret, encryptedSig...)
- ret = append(ret, mac[:macPrefixBytes]...)
- return ret
-}
-
-func (c *Conversation) processSig(in []byte) error {
- encryptedSig, in, ok1 := getData(in)
- theirMAC := in
- if !ok1 || len(theirMAC) != macPrefixBytes {
- return errors.New("otr: corrupt signature message")
- }
-
- if err := c.processEncryptedSig(encryptedSig, theirMAC, &c.sigKeys, false /* gy comes first */); err != nil {
- return errors.New("otr: in signature message: " + err.Error())
- }
-
- c.theirCurrentDHPub = c.gy
- c.theirLastDHPub = nil
-
- return nil
-}
-
-func (c *Conversation) rotateDHKeys() {
- // evict slots using our retired key id
- for i := range c.keySlots {
- slot := &c.keySlots[i]
- if slot.used && slot.myKeyId == c.myKeyId-1 {
- slot.used = false
- c.oldMACs = append(c.oldMACs, slot.recvMACKey...)
- }
- }
-
- c.myLastDHPriv = c.myCurrentDHPriv
- c.myLastDHPub = c.myCurrentDHPub
-
- var xBytes [dhPrivateBytes]byte
- c.myCurrentDHPriv = c.randMPI(xBytes[:])
- c.myCurrentDHPub = new(big.Int).Exp(g, c.myCurrentDHPriv, p)
- c.myKeyId++
-}
-
-func (c *Conversation) processData(in []byte) (out []byte, tlvs []tlv, err error) {
- origIn := in
- flags, in, ok1 := getU8(in)
- theirKeyId, in, ok2 := getU32(in)
- myKeyId, in, ok3 := getU32(in)
- y, in, ok4 := getMPI(in)
- counter, in, ok5 := getNBytes(in, 8)
- encrypted, in, ok6 := getData(in)
- macedData := origIn[:len(origIn)-len(in)]
- theirMAC, in, ok7 := getNBytes(in, macPrefixBytes)
- _, in, ok8 := getData(in)
- if !ok1 || !ok2 || !ok3 || !ok4 || !ok5 || !ok6 || !ok7 || !ok8 || len(in) > 0 {
- err = errors.New("otr: corrupt data message")
- return
- }
-
- ignoreErrors := flags&1 != 0
-
- slot, err := c.calcDataKeys(myKeyId, theirKeyId)
- if err != nil {
- if ignoreErrors {
- err = nil
- }
- return
- }
-
- mac := hmac.New(sha1.New, slot.recvMACKey)
- mac.Write([]byte{0, 2, 3})
- mac.Write(macedData)
- myMAC := mac.Sum(nil)
- if len(myMAC) != len(theirMAC) || subtle.ConstantTimeCompare(myMAC, theirMAC) == 0 {
- if !ignoreErrors {
- err = errors.New("otr: bad MAC on data message")
- }
- return
- }
-
- if bytes.Compare(counter, slot.theirLastCtr[:]) <= 0 {
- err = errors.New("otr: counter regressed")
- return
- }
- copy(slot.theirLastCtr[:], counter)
-
- var iv [aes.BlockSize]byte
- copy(iv[:], counter)
- aesCipher, err := aes.NewCipher(slot.recvAESKey)
- if err != nil {
- panic(err.Error())
- }
- ctr := cipher.NewCTR(aesCipher, iv[:])
- ctr.XORKeyStream(encrypted, encrypted)
- decrypted := encrypted
-
- if myKeyId == c.myKeyId {
- c.rotateDHKeys()
- }
- if theirKeyId == c.theirKeyId {
- // evict slots using their retired key id
- for i := range c.keySlots {
- slot := &c.keySlots[i]
- if slot.used && slot.theirKeyId == theirKeyId-1 {
- slot.used = false
- c.oldMACs = append(c.oldMACs, slot.recvMACKey...)
- }
- }
-
- c.theirLastDHPub = c.theirCurrentDHPub
- c.theirKeyId++
- c.theirCurrentDHPub = y
- }
-
- if nulPos := bytes.IndexByte(decrypted, 0); nulPos >= 0 {
- out = decrypted[:nulPos]
- tlvData := decrypted[nulPos+1:]
- for len(tlvData) > 0 {
- var t tlv
- var ok1, ok2, ok3 bool
-
- t.typ, tlvData, ok1 = getU16(tlvData)
- t.length, tlvData, ok2 = getU16(tlvData)
- t.data, tlvData, ok3 = getNBytes(tlvData, int(t.length))
- if !ok1 || !ok2 || !ok3 {
- err = errors.New("otr: corrupt tlv data")
- return
- }
- tlvs = append(tlvs, t)
- }
- } else {
- out = decrypted
- }
-
- return
-}
-
-func (c *Conversation) generateData(msg []byte, extra *tlv) []byte {
- slot, err := c.calcDataKeys(c.myKeyId-1, c.theirKeyId)
- if err != nil {
- panic("otr: failed to generate sending keys: " + err.Error())
- }
-
- var plaintext []byte
- plaintext = append(plaintext, msg...)
- plaintext = append(plaintext, 0)
-
- padding := paddingGranularity - ((len(plaintext) + 4) % paddingGranularity)
- plaintext = appendU16(plaintext, tlvTypePadding)
- plaintext = appendU16(plaintext, uint16(padding))
- for i := 0; i < padding; i++ {
- plaintext = append(plaintext, 0)
- }
-
- if extra != nil {
- plaintext = appendU16(plaintext, extra.typ)
- plaintext = appendU16(plaintext, uint16(len(extra.data)))
- plaintext = append(plaintext, extra.data...)
- }
-
- encrypted := make([]byte, len(plaintext))
-
- var iv [aes.BlockSize]byte
- copy(iv[:], c.myCounter[:])
- aesCipher, err := aes.NewCipher(slot.sendAESKey)
- if err != nil {
- panic(err.Error())
- }
- ctr := cipher.NewCTR(aesCipher, iv[:])
- ctr.XORKeyStream(encrypted, plaintext)
-
- var ret []byte
- ret = appendU16(ret, 2)
- ret = append(ret, msgTypeData)
- ret = append(ret, 0 /* flags */)
- ret = appendU32(ret, c.myKeyId-1)
- ret = appendU32(ret, c.theirKeyId)
- ret = appendMPI(ret, c.myCurrentDHPub)
- ret = append(ret, c.myCounter[:]...)
- ret = appendData(ret, encrypted)
-
- mac := hmac.New(sha1.New, slot.sendMACKey)
- mac.Write(ret)
- ret = append(ret, mac.Sum(nil)[:macPrefixBytes]...)
- ret = appendData(ret, c.oldMACs)
- c.oldMACs = nil
- incCounter(&c.myCounter)
-
- return ret
-}
-
-func incCounter(counter *[8]byte) {
- for i := 7; i >= 0; i-- {
- counter[i]++
- if counter[i] > 0 {
- break
- }
- }
-}
-
-// calcDataKeys computes the keys used to encrypt a data message given the key
-// IDs.
-func (c *Conversation) calcDataKeys(myKeyId, theirKeyId uint32) (slot *keySlot, err error) {
- // Check for a cache hit.
- for i := range c.keySlots {
- slot = &c.keySlots[i]
- if slot.used && slot.theirKeyId == theirKeyId && slot.myKeyId == myKeyId {
- return
- }
- }
-
- // Find an empty slot to write into.
- slot = nil
- for i := range c.keySlots {
- if !c.keySlots[i].used {
- slot = &c.keySlots[i]
- break
- }
- }
- if slot == nil {
- return nil, errors.New("otr: internal error: no more key slots")
- }
-
- var myPriv, myPub, theirPub *big.Int
-
- if myKeyId == c.myKeyId {
- myPriv = c.myCurrentDHPriv
- myPub = c.myCurrentDHPub
- } else if myKeyId == c.myKeyId-1 {
- myPriv = c.myLastDHPriv
- myPub = c.myLastDHPub
- } else {
- err = errors.New("otr: peer requested keyid " + strconv.FormatUint(uint64(myKeyId), 10) + " when I'm on " + strconv.FormatUint(uint64(c.myKeyId), 10))
- return
- }
-
- if theirKeyId == c.theirKeyId {
- theirPub = c.theirCurrentDHPub
- } else if theirKeyId == c.theirKeyId-1 && c.theirLastDHPub != nil {
- theirPub = c.theirLastDHPub
- } else {
- err = errors.New("otr: peer requested keyid " + strconv.FormatUint(uint64(myKeyId), 10) + " when they're on " + strconv.FormatUint(uint64(c.myKeyId), 10))
- return
- }
-
- var sendPrefixByte, recvPrefixByte [1]byte
-
- if myPub.Cmp(theirPub) > 0 {
- // we're the high end
- sendPrefixByte[0], recvPrefixByte[0] = 1, 2
- } else {
- // we're the low end
- sendPrefixByte[0], recvPrefixByte[0] = 2, 1
- }
-
- s := new(big.Int).Exp(theirPub, myPriv, p)
- sBytes := appendMPI(nil, s)
-
- h := sha1.New()
- h.Write(sendPrefixByte[:])
- h.Write(sBytes)
- slot.sendAESKey = h.Sum(slot.sendAESKey[:0])[:16]
-
- h.Reset()
- h.Write(slot.sendAESKey)
- slot.sendMACKey = h.Sum(slot.sendMACKey[:0])
-
- h.Reset()
- h.Write(recvPrefixByte[:])
- h.Write(sBytes)
- slot.recvAESKey = h.Sum(slot.recvAESKey[:0])[:16]
-
- h.Reset()
- h.Write(slot.recvAESKey)
- slot.recvMACKey = h.Sum(slot.recvMACKey[:0])
-
- slot.theirKeyId = theirKeyId
- slot.myKeyId = myKeyId
- slot.used = true
-
- zero(slot.theirLastCtr[:])
- return
-}
-
-func (c *Conversation) calcAKEKeys(s *big.Int) {
- mpi := appendMPI(nil, s)
- h := sha256.New()
-
- var cBytes [32]byte
- hashWithPrefix(c.SSID[:], 0, mpi, h)
-
- hashWithPrefix(cBytes[:], 1, mpi, h)
- copy(c.revealKeys.c[:], cBytes[:16])
- copy(c.sigKeys.c[:], cBytes[16:])
-
- hashWithPrefix(c.revealKeys.m1[:], 2, mpi, h)
- hashWithPrefix(c.revealKeys.m2[:], 3, mpi, h)
- hashWithPrefix(c.sigKeys.m1[:], 4, mpi, h)
- hashWithPrefix(c.sigKeys.m2[:], 5, mpi, h)
-}
-
-func hashWithPrefix(out []byte, prefix byte, in []byte, h hash.Hash) {
- h.Reset()
- var p [1]byte
- p[0] = prefix
- h.Write(p[:])
- h.Write(in)
- if len(out) == h.Size() {
- h.Sum(out[:0])
- } else {
- digest := h.Sum(nil)
- copy(out, digest)
- }
-}
-
-func (c *Conversation) encode(msg []byte) [][]byte {
- b64 := make([]byte, base64.StdEncoding.EncodedLen(len(msg))+len(msgPrefix)+1)
- base64.StdEncoding.Encode(b64[len(msgPrefix):], msg)
- copy(b64, msgPrefix)
- b64[len(b64)-1] = '.'
-
- if c.FragmentSize < minFragmentSize || len(b64) <= c.FragmentSize {
- // We can encode this in a single fragment.
- return [][]byte{b64}
- }
-
- // We have to fragment this message.
- var ret [][]byte
- bytesPerFragment := c.FragmentSize - minFragmentSize
- numFragments := (len(b64) + bytesPerFragment) / bytesPerFragment
-
- for i := 0; i < numFragments; i++ {
- frag := []byte("?OTR," + strconv.Itoa(i+1) + "," + strconv.Itoa(numFragments) + ",")
- todo := bytesPerFragment
- if todo > len(b64) {
- todo = len(b64)
- }
- frag = append(frag, b64[:todo]...)
- b64 = b64[todo:]
- frag = append(frag, ',')
- ret = append(ret, frag)
- }
-
- return ret
-}
-
-func (c *Conversation) reset() {
- c.myKeyId = 0
-
- for i := range c.keySlots {
- c.keySlots[i].used = false
- }
-}
-
-type PublicKey struct {
- dsa.PublicKey
-}
-
-func (pk *PublicKey) Parse(in []byte) ([]byte, bool) {
- var ok bool
- var pubKeyType uint16
-
- if pubKeyType, in, ok = getU16(in); !ok || pubKeyType != 0 {
- return nil, false
- }
- if pk.P, in, ok = getMPI(in); !ok {
- return nil, false
- }
- if pk.Q, in, ok = getMPI(in); !ok {
- return nil, false
- }
- if pk.G, in, ok = getMPI(in); !ok {
- return nil, false
- }
- if pk.Y, in, ok = getMPI(in); !ok {
- return nil, false
- }
-
- return in, true
-}
-
-func (pk *PublicKey) Serialize(in []byte) []byte {
- in = appendU16(in, 0)
- in = appendMPI(in, pk.P)
- in = appendMPI(in, pk.Q)
- in = appendMPI(in, pk.G)
- in = appendMPI(in, pk.Y)
- return in
-}
-
-// Fingerprint returns the 20-byte, binary fingerprint of the PublicKey.
-func (pk *PublicKey) Fingerprint() []byte {
- b := pk.Serialize(nil)
- h := sha1.New()
- h.Write(b[2:])
- return h.Sum(nil)
-}
-
-func (pk *PublicKey) Verify(hashed, sig []byte) ([]byte, bool) {
- if len(sig) != 2*dsaSubgroupBytes {
- return nil, false
- }
- r := new(big.Int).SetBytes(sig[:dsaSubgroupBytes])
- s := new(big.Int).SetBytes(sig[dsaSubgroupBytes:])
- ok := dsa.Verify(&pk.PublicKey, hashed, r, s)
- return sig[dsaSubgroupBytes*2:], ok
-}
-
-type PrivateKey struct {
- PublicKey
- dsa.PrivateKey
-}
-
-func (priv *PrivateKey) Sign(rand io.Reader, hashed []byte) []byte {
- r, s, err := dsa.Sign(rand, &priv.PrivateKey, hashed)
- if err != nil {
- panic(err.Error())
- }
- rBytes := r.Bytes()
- sBytes := s.Bytes()
- if len(rBytes) > dsaSubgroupBytes || len(sBytes) > dsaSubgroupBytes {
- panic("DSA signature too large")
- }
-
- out := make([]byte, 2*dsaSubgroupBytes)
- copy(out[dsaSubgroupBytes-len(rBytes):], rBytes)
- copy(out[len(out)-len(sBytes):], sBytes)
- return out
-}
-
-func (priv *PrivateKey) Serialize(in []byte) []byte {
- in = priv.PublicKey.Serialize(in)
- in = appendMPI(in, priv.PrivateKey.X)
- return in
-}
-
-func (priv *PrivateKey) Parse(in []byte) ([]byte, bool) {
- in, ok := priv.PublicKey.Parse(in)
- if !ok {
- return in, ok
- }
- priv.PrivateKey.PublicKey = priv.PublicKey.PublicKey
- priv.PrivateKey.X, in, ok = getMPI(in)
- return in, ok
-}
-
-func (priv *PrivateKey) Generate(rand io.Reader) {
- if err := dsa.GenerateParameters(&priv.PrivateKey.PublicKey.Parameters, rand, dsa.L1024N160); err != nil {
- panic(err.Error())
- }
- if err := dsa.GenerateKey(&priv.PrivateKey, rand); err != nil {
- panic(err.Error())
- }
- priv.PublicKey.PublicKey = priv.PrivateKey.PublicKey
-}
-
-func notHex(r rune) bool {
- if r >= '0' && r <= '9' ||
- r >= 'a' && r <= 'f' ||
- r >= 'A' && r <= 'F' {
- return false
- }
-
- return true
-}
-
-// Import parses the contents of a libotr private key file.
-func (priv *PrivateKey) Import(in []byte) bool {
- mpiStart := []byte(" #")
-
- mpis := make([]*big.Int, 5)
-
- for i := 0; i < len(mpis); i++ {
- start := bytes.Index(in, mpiStart)
- if start == -1 {
- return false
- }
- in = in[start+len(mpiStart):]
- end := bytes.IndexFunc(in, notHex)
- if end == -1 {
- return false
- }
- hexBytes := in[:end]
- in = in[end:]
-
- if len(hexBytes)&1 != 0 {
- return false
- }
-
- mpiBytes := make([]byte, len(hexBytes)/2)
- if _, err := hex.Decode(mpiBytes, hexBytes); err != nil {
- return false
- }
-
- mpis[i] = new(big.Int).SetBytes(mpiBytes)
- }
-
- for _, mpi := range mpis {
- if mpi.Sign() <= 0 {
- return false
- }
- }
-
- priv.PrivateKey.P = mpis[0]
- priv.PrivateKey.Q = mpis[1]
- priv.PrivateKey.G = mpis[2]
- priv.PrivateKey.Y = mpis[3]
- priv.PrivateKey.X = mpis[4]
- priv.PublicKey.PublicKey = priv.PrivateKey.PublicKey
-
- a := new(big.Int).Exp(priv.PrivateKey.G, priv.PrivateKey.X, priv.PrivateKey.P)
- return a.Cmp(priv.PrivateKey.Y) == 0
-}
-
-func getU8(in []byte) (uint8, []byte, bool) {
- if len(in) < 1 {
- return 0, in, false
- }
- return in[0], in[1:], true
-}
-
-func getU16(in []byte) (uint16, []byte, bool) {
- if len(in) < 2 {
- return 0, in, false
- }
- r := uint16(in[0])<<8 | uint16(in[1])
- return r, in[2:], true
-}
-
-func getU32(in []byte) (uint32, []byte, bool) {
- if len(in) < 4 {
- return 0, in, false
- }
- r := uint32(in[0])<<24 | uint32(in[1])<<16 | uint32(in[2])<<8 | uint32(in[3])
- return r, in[4:], true
-}
-
-func getMPI(in []byte) (*big.Int, []byte, bool) {
- l, in, ok := getU32(in)
- if !ok || uint32(len(in)) < l {
- return nil, in, false
- }
- r := new(big.Int).SetBytes(in[:l])
- return r, in[l:], true
-}
-
-func getData(in []byte) ([]byte, []byte, bool) {
- l, in, ok := getU32(in)
- if !ok || uint32(len(in)) < l {
- return nil, in, false
- }
- return in[:l], in[l:], true
-}
-
-func getNBytes(in []byte, n int) ([]byte, []byte, bool) {
- if len(in) < n {
- return nil, in, false
- }
- return in[:n], in[n:], true
-}
-
-func appendU16(out []byte, v uint16) []byte {
- out = append(out, byte(v>>8), byte(v))
- return out
-}
-
-func appendU32(out []byte, v uint32) []byte {
- out = append(out, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
- return out
-}
-
-func appendData(out, v []byte) []byte {
- out = appendU32(out, uint32(len(v)))
- out = append(out, v...)
- return out
-}
-
-func appendMPI(out []byte, v *big.Int) []byte {
- vBytes := v.Bytes()
- out = appendU32(out, uint32(len(vBytes)))
- out = append(out, vBytes...)
- return out
-}
-
-func appendMPIs(out []byte, mpis ...*big.Int) []byte {
- for _, mpi := range mpis {
- out = appendMPI(out, mpi)
- }
- return out
-}
-
-func zero(b []byte) {
- for i := range b {
- b[i] = 0
- }
-}
diff --git a/vendor/golang.org/x/crypto/otr/smp.go b/vendor/golang.org/x/crypto/otr/smp.go
deleted file mode 100644
index dc6de4ee0..000000000
--- a/vendor/golang.org/x/crypto/otr/smp.go
+++ /dev/null
@@ -1,572 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements the Socialist Millionaires Protocol as described in
-// http://www.cypherpunks.ca/otr/Protocol-v2-3.1.0.html. The protocol
-// specification is required in order to understand this code and, where
-// possible, the variable names in the code match up with the spec.
-
-package otr
-
-import (
- "bytes"
- "crypto/sha256"
- "errors"
- "hash"
- "math/big"
-)
-
-type smpFailure string
-
-func (s smpFailure) Error() string {
- return string(s)
-}
-
-var smpFailureError = smpFailure("otr: SMP protocol failed")
-var smpSecretMissingError = smpFailure("otr: mutual secret needed")
-
-const smpVersion = 1
-
-const (
- smpState1 = iota
- smpState2
- smpState3
- smpState4
-)
-
-type smpState struct {
- state int
- a2, a3, b2, b3, pb, qb *big.Int
- g2a, g3a *big.Int
- g2, g3 *big.Int
- g3b, papb, qaqb, ra *big.Int
- saved *tlv
- secret *big.Int
- question string
-}
-
-func (c *Conversation) startSMP(question string) (tlvs []tlv) {
- if c.smp.state != smpState1 {
- tlvs = append(tlvs, c.generateSMPAbort())
- }
- tlvs = append(tlvs, c.generateSMP1(question))
- c.smp.question = ""
- c.smp.state = smpState2
- return
-}
-
-func (c *Conversation) resetSMP() {
- c.smp.state = smpState1
- c.smp.secret = nil
- c.smp.question = ""
-}
-
-func (c *Conversation) processSMP(in tlv) (out tlv, complete bool, err error) {
- data := in.data
-
- switch in.typ {
- case tlvTypeSMPAbort:
- if c.smp.state != smpState1 {
- err = smpFailureError
- }
- c.resetSMP()
- return
- case tlvTypeSMP1WithQuestion:
- // We preprocess this into a SMP1 message.
- nulPos := bytes.IndexByte(data, 0)
- if nulPos == -1 {
- err = errors.New("otr: SMP message with question didn't contain a NUL byte")
- return
- }
- c.smp.question = string(data[:nulPos])
- data = data[nulPos+1:]
- }
-
- numMPIs, data, ok := getU32(data)
- if !ok || numMPIs > 20 {
- err = errors.New("otr: corrupt SMP message")
- return
- }
-
- mpis := make([]*big.Int, numMPIs)
- for i := range mpis {
- var ok bool
- mpis[i], data, ok = getMPI(data)
- if !ok {
- err = errors.New("otr: corrupt SMP message")
- return
- }
- }
-
- switch in.typ {
- case tlvTypeSMP1, tlvTypeSMP1WithQuestion:
- if c.smp.state != smpState1 {
- c.resetSMP()
- out = c.generateSMPAbort()
- return
- }
- if c.smp.secret == nil {
- err = smpSecretMissingError
- return
- }
- if err = c.processSMP1(mpis); err != nil {
- return
- }
- c.smp.state = smpState3
- out = c.generateSMP2()
- case tlvTypeSMP2:
- if c.smp.state != smpState2 {
- c.resetSMP()
- out = c.generateSMPAbort()
- return
- }
- if out, err = c.processSMP2(mpis); err != nil {
- out = c.generateSMPAbort()
- return
- }
- c.smp.state = smpState4
- case tlvTypeSMP3:
- if c.smp.state != smpState3 {
- c.resetSMP()
- out = c.generateSMPAbort()
- return
- }
- if out, err = c.processSMP3(mpis); err != nil {
- return
- }
- c.smp.state = smpState1
- c.smp.secret = nil
- complete = true
- case tlvTypeSMP4:
- if c.smp.state != smpState4 {
- c.resetSMP()
- out = c.generateSMPAbort()
- return
- }
- if err = c.processSMP4(mpis); err != nil {
- out = c.generateSMPAbort()
- return
- }
- c.smp.state = smpState1
- c.smp.secret = nil
- complete = true
- default:
- panic("unknown SMP message")
- }
-
- return
-}
-
-func (c *Conversation) calcSMPSecret(mutualSecret []byte, weStarted bool) {
- h := sha256.New()
- h.Write([]byte{smpVersion})
- if weStarted {
- h.Write(c.PrivateKey.PublicKey.Fingerprint())
- h.Write(c.TheirPublicKey.Fingerprint())
- } else {
- h.Write(c.TheirPublicKey.Fingerprint())
- h.Write(c.PrivateKey.PublicKey.Fingerprint())
- }
- h.Write(c.SSID[:])
- h.Write(mutualSecret)
- c.smp.secret = new(big.Int).SetBytes(h.Sum(nil))
-}
-
-func (c *Conversation) generateSMP1(question string) tlv {
- var randBuf [16]byte
- c.smp.a2 = c.randMPI(randBuf[:])
- c.smp.a3 = c.randMPI(randBuf[:])
- g2a := new(big.Int).Exp(g, c.smp.a2, p)
- g3a := new(big.Int).Exp(g, c.smp.a3, p)
- h := sha256.New()
-
- r2 := c.randMPI(randBuf[:])
- r := new(big.Int).Exp(g, r2, p)
- c2 := new(big.Int).SetBytes(hashMPIs(h, 1, r))
- d2 := new(big.Int).Mul(c.smp.a2, c2)
- d2.Sub(r2, d2)
- d2.Mod(d2, q)
- if d2.Sign() < 0 {
- d2.Add(d2, q)
- }
-
- r3 := c.randMPI(randBuf[:])
- r.Exp(g, r3, p)
- c3 := new(big.Int).SetBytes(hashMPIs(h, 2, r))
- d3 := new(big.Int).Mul(c.smp.a3, c3)
- d3.Sub(r3, d3)
- d3.Mod(d3, q)
- if d3.Sign() < 0 {
- d3.Add(d3, q)
- }
-
- var ret tlv
- if len(question) > 0 {
- ret.typ = tlvTypeSMP1WithQuestion
- ret.data = append(ret.data, question...)
- ret.data = append(ret.data, 0)
- } else {
- ret.typ = tlvTypeSMP1
- }
- ret.data = appendU32(ret.data, 6)
- ret.data = appendMPIs(ret.data, g2a, c2, d2, g3a, c3, d3)
- return ret
-}
-
-func (c *Conversation) processSMP1(mpis []*big.Int) error {
- if len(mpis) != 6 {
- return errors.New("otr: incorrect number of arguments in SMP1 message")
- }
- g2a := mpis[0]
- c2 := mpis[1]
- d2 := mpis[2]
- g3a := mpis[3]
- c3 := mpis[4]
- d3 := mpis[5]
- h := sha256.New()
-
- r := new(big.Int).Exp(g, d2, p)
- s := new(big.Int).Exp(g2a, c2, p)
- r.Mul(r, s)
- r.Mod(r, p)
- t := new(big.Int).SetBytes(hashMPIs(h, 1, r))
- if c2.Cmp(t) != 0 {
- return errors.New("otr: ZKP c2 incorrect in SMP1 message")
- }
- r.Exp(g, d3, p)
- s.Exp(g3a, c3, p)
- r.Mul(r, s)
- r.Mod(r, p)
- t.SetBytes(hashMPIs(h, 2, r))
- if c3.Cmp(t) != 0 {
- return errors.New("otr: ZKP c3 incorrect in SMP1 message")
- }
-
- c.smp.g2a = g2a
- c.smp.g3a = g3a
- return nil
-}
-
-func (c *Conversation) generateSMP2() tlv {
- var randBuf [16]byte
- b2 := c.randMPI(randBuf[:])
- c.smp.b3 = c.randMPI(randBuf[:])
- r2 := c.randMPI(randBuf[:])
- r3 := c.randMPI(randBuf[:])
- r4 := c.randMPI(randBuf[:])
- r5 := c.randMPI(randBuf[:])
- r6 := c.randMPI(randBuf[:])
-
- g2b := new(big.Int).Exp(g, b2, p)
- g3b := new(big.Int).Exp(g, c.smp.b3, p)
-
- r := new(big.Int).Exp(g, r2, p)
- h := sha256.New()
- c2 := new(big.Int).SetBytes(hashMPIs(h, 3, r))
- d2 := new(big.Int).Mul(b2, c2)
- d2.Sub(r2, d2)
- d2.Mod(d2, q)
- if d2.Sign() < 0 {
- d2.Add(d2, q)
- }
-
- r.Exp(g, r3, p)
- c3 := new(big.Int).SetBytes(hashMPIs(h, 4, r))
- d3 := new(big.Int).Mul(c.smp.b3, c3)
- d3.Sub(r3, d3)
- d3.Mod(d3, q)
- if d3.Sign() < 0 {
- d3.Add(d3, q)
- }
-
- c.smp.g2 = new(big.Int).Exp(c.smp.g2a, b2, p)
- c.smp.g3 = new(big.Int).Exp(c.smp.g3a, c.smp.b3, p)
- c.smp.pb = new(big.Int).Exp(c.smp.g3, r4, p)
- c.smp.qb = new(big.Int).Exp(g, r4, p)
- r.Exp(c.smp.g2, c.smp.secret, p)
- c.smp.qb.Mul(c.smp.qb, r)
- c.smp.qb.Mod(c.smp.qb, p)
-
- s := new(big.Int)
- s.Exp(c.smp.g2, r6, p)
- r.Exp(g, r5, p)
- s.Mul(r, s)
- s.Mod(s, p)
- r.Exp(c.smp.g3, r5, p)
- cp := new(big.Int).SetBytes(hashMPIs(h, 5, r, s))
-
- // D5 = r5 - r4 cP mod q and D6 = r6 - y cP mod q
-
- s.Mul(r4, cp)
- r.Sub(r5, s)
- d5 := new(big.Int).Mod(r, q)
- if d5.Sign() < 0 {
- d5.Add(d5, q)
- }
-
- s.Mul(c.smp.secret, cp)
- r.Sub(r6, s)
- d6 := new(big.Int).Mod(r, q)
- if d6.Sign() < 0 {
- d6.Add(d6, q)
- }
-
- var ret tlv
- ret.typ = tlvTypeSMP2
- ret.data = appendU32(ret.data, 11)
- ret.data = appendMPIs(ret.data, g2b, c2, d2, g3b, c3, d3, c.smp.pb, c.smp.qb, cp, d5, d6)
- return ret
-}
-
-func (c *Conversation) processSMP2(mpis []*big.Int) (out tlv, err error) {
- if len(mpis) != 11 {
- err = errors.New("otr: incorrect number of arguments in SMP2 message")
- return
- }
- g2b := mpis[0]
- c2 := mpis[1]
- d2 := mpis[2]
- g3b := mpis[3]
- c3 := mpis[4]
- d3 := mpis[5]
- pb := mpis[6]
- qb := mpis[7]
- cp := mpis[8]
- d5 := mpis[9]
- d6 := mpis[10]
- h := sha256.New()
-
- r := new(big.Int).Exp(g, d2, p)
- s := new(big.Int).Exp(g2b, c2, p)
- r.Mul(r, s)
- r.Mod(r, p)
- s.SetBytes(hashMPIs(h, 3, r))
- if c2.Cmp(s) != 0 {
- err = errors.New("otr: ZKP c2 failed in SMP2 message")
- return
- }
-
- r.Exp(g, d3, p)
- s.Exp(g3b, c3, p)
- r.Mul(r, s)
- r.Mod(r, p)
- s.SetBytes(hashMPIs(h, 4, r))
- if c3.Cmp(s) != 0 {
- err = errors.New("otr: ZKP c3 failed in SMP2 message")
- return
- }
-
- c.smp.g2 = new(big.Int).Exp(g2b, c.smp.a2, p)
- c.smp.g3 = new(big.Int).Exp(g3b, c.smp.a3, p)
-
- r.Exp(g, d5, p)
- s.Exp(c.smp.g2, d6, p)
- r.Mul(r, s)
- s.Exp(qb, cp, p)
- r.Mul(r, s)
- r.Mod(r, p)
-
- s.Exp(c.smp.g3, d5, p)
- t := new(big.Int).Exp(pb, cp, p)
- s.Mul(s, t)
- s.Mod(s, p)
- t.SetBytes(hashMPIs(h, 5, s, r))
- if cp.Cmp(t) != 0 {
- err = errors.New("otr: ZKP cP failed in SMP2 message")
- return
- }
-
- var randBuf [16]byte
- r4 := c.randMPI(randBuf[:])
- r5 := c.randMPI(randBuf[:])
- r6 := c.randMPI(randBuf[:])
- r7 := c.randMPI(randBuf[:])
-
- pa := new(big.Int).Exp(c.smp.g3, r4, p)
- r.Exp(c.smp.g2, c.smp.secret, p)
- qa := new(big.Int).Exp(g, r4, p)
- qa.Mul(qa, r)
- qa.Mod(qa, p)
-
- r.Exp(g, r5, p)
- s.Exp(c.smp.g2, r6, p)
- r.Mul(r, s)
- r.Mod(r, p)
-
- s.Exp(c.smp.g3, r5, p)
- cp.SetBytes(hashMPIs(h, 6, s, r))
-
- r.Mul(r4, cp)
- d5 = new(big.Int).Sub(r5, r)
- d5.Mod(d5, q)
- if d5.Sign() < 0 {
- d5.Add(d5, q)
- }
-
- r.Mul(c.smp.secret, cp)
- d6 = new(big.Int).Sub(r6, r)
- d6.Mod(d6, q)
- if d6.Sign() < 0 {
- d6.Add(d6, q)
- }
-
- r.ModInverse(qb, p)
- qaqb := new(big.Int).Mul(qa, r)
- qaqb.Mod(qaqb, p)
-
- ra := new(big.Int).Exp(qaqb, c.smp.a3, p)
- r.Exp(qaqb, r7, p)
- s.Exp(g, r7, p)
- cr := new(big.Int).SetBytes(hashMPIs(h, 7, s, r))
-
- r.Mul(c.smp.a3, cr)
- d7 := new(big.Int).Sub(r7, r)
- d7.Mod(d7, q)
- if d7.Sign() < 0 {
- d7.Add(d7, q)
- }
-
- c.smp.g3b = g3b
- c.smp.qaqb = qaqb
-
- r.ModInverse(pb, p)
- c.smp.papb = new(big.Int).Mul(pa, r)
- c.smp.papb.Mod(c.smp.papb, p)
- c.smp.ra = ra
-
- out.typ = tlvTypeSMP3
- out.data = appendU32(out.data, 8)
- out.data = appendMPIs(out.data, pa, qa, cp, d5, d6, ra, cr, d7)
- return
-}
-
-func (c *Conversation) processSMP3(mpis []*big.Int) (out tlv, err error) {
- if len(mpis) != 8 {
- err = errors.New("otr: incorrect number of arguments in SMP3 message")
- return
- }
- pa := mpis[0]
- qa := mpis[1]
- cp := mpis[2]
- d5 := mpis[3]
- d6 := mpis[4]
- ra := mpis[5]
- cr := mpis[6]
- d7 := mpis[7]
- h := sha256.New()
-
- r := new(big.Int).Exp(g, d5, p)
- s := new(big.Int).Exp(c.smp.g2, d6, p)
- r.Mul(r, s)
- s.Exp(qa, cp, p)
- r.Mul(r, s)
- r.Mod(r, p)
-
- s.Exp(c.smp.g3, d5, p)
- t := new(big.Int).Exp(pa, cp, p)
- s.Mul(s, t)
- s.Mod(s, p)
- t.SetBytes(hashMPIs(h, 6, s, r))
- if t.Cmp(cp) != 0 {
- err = errors.New("otr: ZKP cP failed in SMP3 message")
- return
- }
-
- r.ModInverse(c.smp.qb, p)
- qaqb := new(big.Int).Mul(qa, r)
- qaqb.Mod(qaqb, p)
-
- r.Exp(qaqb, d7, p)
- s.Exp(ra, cr, p)
- r.Mul(r, s)
- r.Mod(r, p)
-
- s.Exp(g, d7, p)
- t.Exp(c.smp.g3a, cr, p)
- s.Mul(s, t)
- s.Mod(s, p)
- t.SetBytes(hashMPIs(h, 7, s, r))
- if t.Cmp(cr) != 0 {
- err = errors.New("otr: ZKP cR failed in SMP3 message")
- return
- }
-
- var randBuf [16]byte
- r7 := c.randMPI(randBuf[:])
- rb := new(big.Int).Exp(qaqb, c.smp.b3, p)
-
- r.Exp(qaqb, r7, p)
- s.Exp(g, r7, p)
- cr = new(big.Int).SetBytes(hashMPIs(h, 8, s, r))
-
- r.Mul(c.smp.b3, cr)
- d7 = new(big.Int).Sub(r7, r)
- d7.Mod(d7, q)
- if d7.Sign() < 0 {
- d7.Add(d7, q)
- }
-
- out.typ = tlvTypeSMP4
- out.data = appendU32(out.data, 3)
- out.data = appendMPIs(out.data, rb, cr, d7)
-
- r.ModInverse(c.smp.pb, p)
- r.Mul(pa, r)
- r.Mod(r, p)
- s.Exp(ra, c.smp.b3, p)
- if r.Cmp(s) != 0 {
- err = smpFailureError
- }
-
- return
-}
-
-func (c *Conversation) processSMP4(mpis []*big.Int) error {
- if len(mpis) != 3 {
- return errors.New("otr: incorrect number of arguments in SMP4 message")
- }
- rb := mpis[0]
- cr := mpis[1]
- d7 := mpis[2]
- h := sha256.New()
-
- r := new(big.Int).Exp(c.smp.qaqb, d7, p)
- s := new(big.Int).Exp(rb, cr, p)
- r.Mul(r, s)
- r.Mod(r, p)
-
- s.Exp(g, d7, p)
- t := new(big.Int).Exp(c.smp.g3b, cr, p)
- s.Mul(s, t)
- s.Mod(s, p)
- t.SetBytes(hashMPIs(h, 8, s, r))
- if t.Cmp(cr) != 0 {
- return errors.New("otr: ZKP cR failed in SMP4 message")
- }
-
- r.Exp(rb, c.smp.a3, p)
- if r.Cmp(c.smp.papb) != 0 {
- return smpFailureError
- }
-
- return nil
-}
-
-func (c *Conversation) generateSMPAbort() tlv {
- return tlv{typ: tlvTypeSMPAbort}
-}
-
-func hashMPIs(h hash.Hash, magic byte, mpis ...*big.Int) []byte {
- if h != nil {
- h.Reset()
- } else {
- h = sha256.New()
- }
-
- h.Write([]byte{magic})
- for _, mpi := range mpis {
- h.Write(appendMPI(nil, mpi))
- }
- return h.Sum(nil)
-}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
index 9a887598f..2f04ee5b5 100644
--- a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
+++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
@@ -7,6 +7,7 @@ package terminal
import (
"bytes"
"io"
+ "strconv"
"sync"
"unicode/utf8"
)
@@ -159,6 +160,10 @@ func bytesToKey(b []byte, pasteActive bool) (rune, []byte) {
return keyClearScreen, b[1:]
case 23: // ^W
return keyDeleteWord, b[1:]
+ case 14: // ^N
+ return keyDown, b[1:]
+ case 16: // ^P
+ return keyUp, b[1:]
}
}
@@ -267,34 +272,44 @@ func (t *Terminal) moveCursorToPos(pos int) {
}
func (t *Terminal) move(up, down, left, right int) {
- movement := make([]rune, 3*(up+down+left+right))
- m := movement
- for i := 0; i < up; i++ {
- m[0] = keyEscape
- m[1] = '['
- m[2] = 'A'
- m = m[3:]
- }
- for i := 0; i < down; i++ {
- m[0] = keyEscape
- m[1] = '['
- m[2] = 'B'
- m = m[3:]
- }
- for i := 0; i < left; i++ {
- m[0] = keyEscape
- m[1] = '['
- m[2] = 'D'
- m = m[3:]
- }
- for i := 0; i < right; i++ {
- m[0] = keyEscape
- m[1] = '['
- m[2] = 'C'
- m = m[3:]
- }
-
- t.queue(movement)
+ m := []rune{}
+
+ // 1 unit up can be expressed as ^[[A or ^[A
+ // 5 units up can be expressed as ^[[5A
+
+ if up == 1 {
+ m = append(m, keyEscape, '[', 'A')
+ } else if up > 1 {
+ m = append(m, keyEscape, '[')
+ m = append(m, []rune(strconv.Itoa(up))...)
+ m = append(m, 'A')
+ }
+
+ if down == 1 {
+ m = append(m, keyEscape, '[', 'B')
+ } else if down > 1 {
+ m = append(m, keyEscape, '[')
+ m = append(m, []rune(strconv.Itoa(down))...)
+ m = append(m, 'B')
+ }
+
+ if right == 1 {
+ m = append(m, keyEscape, '[', 'C')
+ } else if right > 1 {
+ m = append(m, keyEscape, '[')
+ m = append(m, []rune(strconv.Itoa(right))...)
+ m = append(m, 'C')
+ }
+
+ if left == 1 {
+ m = append(m, keyEscape, '[', 'D')
+ } else if left > 1 {
+ m = append(m, keyEscape, '[')
+ m = append(m, []rune(strconv.Itoa(left))...)
+ m = append(m, 'D')
+ }
+
+ t.queue(m)
}
func (t *Terminal) clearLineToRight() {
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
index 6cb8a9503..5cfdf8f3f 100644
--- a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
@@ -64,13 +64,15 @@ func Restore(fd int, state *State) error {
return windows.SetConsoleMode(windows.Handle(fd), state.mode)
}
-// GetSize returns the dimensions of the given terminal.
+// GetSize returns the visible dimensions of the given terminal.
+//
+// These dimensions don't include any scrollback buffer height.
func GetSize(fd int) (width, height int, err error) {
var info windows.ConsoleScreenBufferInfo
if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil {
return 0, 0, err
}
- return int(info.Size.X), int(info.Size.Y), nil
+ return int(info.Window.Right - info.Window.Left + 1), int(info.Window.Bottom - info.Window.Top + 1), nil
}
// ReadPassword reads a line of input from a terminal without local echo. This
diff --git a/vendor/golang.org/x/crypto/ssh/test/doc.go b/vendor/golang.org/x/crypto/ssh/test/doc.go
deleted file mode 100644
index 198f0ca1e..000000000
--- a/vendor/golang.org/x/crypto/ssh/test/doc.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package test contains integration tests for the
-// golang.org/x/crypto/ssh package.
-package test // import "golang.org/x/crypto/ssh/test"
diff --git a/vendor/golang.org/x/crypto/ssh/test/sshd_test_pw.c b/vendor/golang.org/x/crypto/ssh/test/sshd_test_pw.c
deleted file mode 100644
index 2794a563a..000000000
--- a/vendor/golang.org/x/crypto/ssh/test/sshd_test_pw.c
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// sshd_test_pw.c
-// Wrapper to inject test password data for sshd PAM authentication
-//
-// This wrapper implements custom versions of getpwnam, getpwnam_r,
-// getspnam and getspnam_r. These functions first call their real
-// libc versions, then check if the requested user matches test user
-// specified in env variable TEST_USER and if so replace the password
-// with crypted() value of TEST_PASSWD env variable.
-//
-// Compile:
-// gcc -Wall -shared -o sshd_test_pw.so -fPIC sshd_test_pw.c
-//
-// Compile with debug:
-// gcc -DVERBOSE -Wall -shared -o sshd_test_pw.so -fPIC sshd_test_pw.c
-//
-// Run sshd:
-// LD_PRELOAD="sshd_test_pw.so" TEST_USER="..." TEST_PASSWD="..." sshd ...
-
-// +build ignore
-
-#define _GNU_SOURCE
-#include <string.h>
-#include <pwd.h>
-#include <shadow.h>
-#include <dlfcn.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <stdio.h>
-
-#ifdef VERBOSE
-#define DEBUG(X...) fprintf(stderr, X)
-#else
-#define DEBUG(X...) while (0) { }
-#endif
-
-/* crypt() password */
-static char *
-pwhash(char *passwd) {
- return strdup(crypt(passwd, "$6$"));
-}
-
-/* Pointers to real functions in libc */
-static struct passwd * (*real_getpwnam)(const char *) = NULL;
-static int (*real_getpwnam_r)(const char *, struct passwd *, char *, size_t, struct passwd **) = NULL;
-static struct spwd * (*real_getspnam)(const char *) = NULL;
-static int (*real_getspnam_r)(const char *, struct spwd *, char *, size_t, struct spwd **) = NULL;
-
-/* Cached test user and test password */
-static char *test_user = NULL;
-static char *test_passwd_hash = NULL;
-
-static void
-init(void) {
- /* Fetch real libc function pointers */
- real_getpwnam = dlsym(RTLD_NEXT, "getpwnam");
- real_getpwnam_r = dlsym(RTLD_NEXT, "getpwnam_r");
- real_getspnam = dlsym(RTLD_NEXT, "getspnam");
- real_getspnam_r = dlsym(RTLD_NEXT, "getspnam_r");
-
- /* abort if env variables are not defined */
- if (getenv("TEST_USER") == NULL || getenv("TEST_PASSWD") == NULL) {
- fprintf(stderr, "env variables TEST_USER and TEST_PASSWD are missing\n");
- abort();
- }
-
- /* Fetch test user and test password from env */
- test_user = strdup(getenv("TEST_USER"));
- test_passwd_hash = pwhash(getenv("TEST_PASSWD"));
-
- DEBUG("sshd_test_pw init():\n");
- DEBUG("\treal_getpwnam: %p\n", real_getpwnam);
- DEBUG("\treal_getpwnam_r: %p\n", real_getpwnam_r);
- DEBUG("\treal_getspnam: %p\n", real_getspnam);
- DEBUG("\treal_getspnam_r: %p\n", real_getspnam_r);
- DEBUG("\tTEST_USER: '%s'\n", test_user);
- DEBUG("\tTEST_PASSWD: '%s'\n", getenv("TEST_PASSWD"));
- DEBUG("\tTEST_PASSWD_HASH: '%s'\n", test_passwd_hash);
-}
-
-static int
-is_test_user(const char *name) {
- if (test_user != NULL && strcmp(test_user, name) == 0)
- return 1;
- return 0;
-}
-
-/* getpwnam */
-
-struct passwd *
-getpwnam(const char *name) {
- struct passwd *pw;
-
- DEBUG("sshd_test_pw getpwnam(%s)\n", name);
-
- if (real_getpwnam == NULL)
- init();
- if ((pw = real_getpwnam(name)) == NULL)
- return NULL;
-
- if (is_test_user(name))
- pw->pw_passwd = strdup(test_passwd_hash);
-
- return pw;
-}
-
-/* getpwnam_r */
-
-int
-getpwnam_r(const char *name,
- struct passwd *pwd,
- char *buf,
- size_t buflen,
- struct passwd **result) {
- int r;
-
- DEBUG("sshd_test_pw getpwnam_r(%s)\n", name);
-
- if (real_getpwnam_r == NULL)
- init();
- if ((r = real_getpwnam_r(name, pwd, buf, buflen, result)) != 0 || *result == NULL)
- return r;
-
- if (is_test_user(name))
- pwd->pw_passwd = strdup(test_passwd_hash);
-
- return 0;
-}
-
-/* getspnam */
-
-struct spwd *
-getspnam(const char *name) {
- struct spwd *sp;
-
- DEBUG("sshd_test_pw getspnam(%s)\n", name);
-
- if (real_getspnam == NULL)
- init();
- if ((sp = real_getspnam(name)) == NULL)
- return NULL;
-
- if (is_test_user(name))
- sp->sp_pwdp = strdup(test_passwd_hash);
-
- return sp;
-}
-
-/* getspnam_r */
-
-int
-getspnam_r(const char *name,
- struct spwd *spbuf,
- char *buf,
- size_t buflen,
- struct spwd **spbufp) {
- int r;
-
- DEBUG("sshd_test_pw getspnam_r(%s)\n", name);
-
- if (real_getspnam_r == NULL)
- init();
- if ((r = real_getspnam_r(name, spbuf, buf, buflen, spbufp)) != 0)
- return r;
-
- if (is_test_user(name))
- spbuf->sp_pwdp = strdup(test_passwd_hash);
-
- return r;
-}
diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS
new file mode 100644
index 000000000..15167cd74
--- /dev/null
+++ b/vendor/golang.org/x/net/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS
new file mode 100644
index 000000000..1c4577e96
--- /dev/null
+++ b/vendor/golang.org/x/net/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/net/README.md b/vendor/golang.org/x/net/README.md
deleted file mode 100644
index 00a9b6eb2..000000000
--- a/vendor/golang.org/x/net/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
-# Go Networking
-
-This repository holds supplementary Go networking libraries.
-
-## Download/Install
-
-The easiest way to install is to run `go get -u golang.org/x/net`. You can
-also manually git clone the repository to `$GOPATH/src/golang.org/x/net`.
-
-## Report Issues / Send Patches
-
-This repository uses Gerrit for code changes. To learn how to submit
-changes to this repository, see https://golang.org/doc/contribute.html.
-The main issue tracker for the net repository is located at
-https://github.com/golang/go/issues. Prefix your issue with "x/net:" in the
-subject line, so it is easy to find.
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
new file mode 100644
index 000000000..37dc0cfdb
--- /dev/null
+++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
@@ -0,0 +1,71 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
+package ctxhttp // import "golang.org/x/net/context/ctxhttp"
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+// Do sends an HTTP request with the provided http.Client and returns
+// an HTTP response.
+//
+// If the client is nil, http.DefaultClient is used.
+//
+// The provided ctx must be non-nil. If it is canceled or times out,
+// ctx.Err() will be returned.
+func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
+ if client == nil {
+ client = http.DefaultClient
+ }
+ resp, err := client.Do(req.WithContext(ctx))
+ // If we got an error, and the context has been canceled,
+ // the context's error is probably more useful.
+ if err != nil {
+ select {
+ case <-ctx.Done():
+ err = ctx.Err()
+ default:
+ }
+ }
+ return resp, err
+}
+
+// Get issues a GET request via the Do function.
+func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Head issues a HEAD request via the Do function.
+func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("HEAD", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Post issues a POST request via the Do function.
+func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
+ req, err := http.NewRequest("POST", url, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", bodyType)
+ return Do(ctx, client, req)
+}
+
+// PostForm issues a POST request via the Do function.
+func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
+ return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go
new file mode 100644
index 000000000..5d052781b
--- /dev/null
+++ b/vendor/golang.org/x/net/html/atom/gen.go
@@ -0,0 +1,712 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+//go:generate go run gen.go
+//go:generate go run gen.go -test
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "math/rand"
+ "os"
+ "sort"
+ "strings"
+)
+
+// identifier converts s to a Go exported identifier.
+// It converts "div" to "Div" and "accept-charset" to "AcceptCharset".
+func identifier(s string) string {
+ b := make([]byte, 0, len(s))
+ cap := true
+ for _, c := range s {
+ if c == '-' {
+ cap = true
+ continue
+ }
+ if cap && 'a' <= c && c <= 'z' {
+ c -= 'a' - 'A'
+ }
+ cap = false
+ b = append(b, byte(c))
+ }
+ return string(b)
+}
+
+var test = flag.Bool("test", false, "generate table_test.go")
+
+func genFile(name string, buf *bytes.Buffer) {
+ b, err := format.Source(buf.Bytes())
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ if err := ioutil.WriteFile(name, b, 0644); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+}
+
+func main() {
+ flag.Parse()
+
+ var all []string
+ all = append(all, elements...)
+ all = append(all, attributes...)
+ all = append(all, eventHandlers...)
+ all = append(all, extra...)
+ sort.Strings(all)
+
+ // uniq - lists have dups
+ w := 0
+ for _, s := range all {
+ if w == 0 || all[w-1] != s {
+ all[w] = s
+ w++
+ }
+ }
+ all = all[:w]
+
+ if *test {
+ var buf bytes.Buffer
+ fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n")
+ fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n")
+ fmt.Fprintln(&buf, "package atom\n")
+ fmt.Fprintln(&buf, "var testAtomList = []string{")
+ for _, s := range all {
+ fmt.Fprintf(&buf, "\t%q,\n", s)
+ }
+ fmt.Fprintln(&buf, "}")
+
+ genFile("table_test.go", &buf)
+ return
+ }
+
+ // Find hash that minimizes table size.
+ var best *table
+ for i := 0; i < 1000000; i++ {
+ if best != nil && 1<<(best.k-1) < len(all) {
+ break
+ }
+ h := rand.Uint32()
+ for k := uint(0); k <= 16; k++ {
+ if best != nil && k >= best.k {
+ break
+ }
+ var t table
+ if t.init(h, k, all) {
+ best = &t
+ break
+ }
+ }
+ }
+ if best == nil {
+ fmt.Fprintf(os.Stderr, "failed to construct string table\n")
+ os.Exit(1)
+ }
+
+ // Lay out strings, using overlaps when possible.
+ layout := append([]string{}, all...)
+
+ // Remove strings that are substrings of other strings
+ for changed := true; changed; {
+ changed = false
+ for i, s := range layout {
+ if s == "" {
+ continue
+ }
+ for j, t := range layout {
+ if i != j && t != "" && strings.Contains(s, t) {
+ changed = true
+ layout[j] = ""
+ }
+ }
+ }
+ }
+
+ // Join strings where one suffix matches another prefix.
+ for {
+ // Find best i, j, k such that layout[i][len-k:] == layout[j][:k],
+ // maximizing overlap length k.
+ besti := -1
+ bestj := -1
+ bestk := 0
+ for i, s := range layout {
+ if s == "" {
+ continue
+ }
+ for j, t := range layout {
+ if i == j {
+ continue
+ }
+ for k := bestk + 1; k <= len(s) && k <= len(t); k++ {
+ if s[len(s)-k:] == t[:k] {
+ besti = i
+ bestj = j
+ bestk = k
+ }
+ }
+ }
+ }
+ if bestk > 0 {
+ layout[besti] += layout[bestj][bestk:]
+ layout[bestj] = ""
+ continue
+ }
+ break
+ }
+
+ text := strings.Join(layout, "")
+
+ atom := map[string]uint32{}
+ for _, s := range all {
+ off := strings.Index(text, s)
+ if off < 0 {
+ panic("lost string " + s)
+ }
+ atom[s] = uint32(off<<8 | len(s))
+ }
+
+ var buf bytes.Buffer
+ // Generate the Go code.
+ fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n")
+ fmt.Fprintln(&buf, "//go:generate go run gen.go\n")
+ fmt.Fprintln(&buf, "package atom\n\nconst (")
+
+ // compute max len
+ maxLen := 0
+ for _, s := range all {
+ if maxLen < len(s) {
+ maxLen = len(s)
+ }
+ fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s])
+ }
+ fmt.Fprintln(&buf, ")\n")
+
+ fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0)
+ fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen)
+
+ fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k)
+ for i, s := range best.tab {
+ if s == "" {
+ continue
+ }
+ fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s)
+ }
+ fmt.Fprintf(&buf, "}\n")
+ datasize := (1 << best.k) * 4
+
+ fmt.Fprintln(&buf, "const atomText =")
+ textsize := len(text)
+ for len(text) > 60 {
+ fmt.Fprintf(&buf, "\t%q +\n", text[:60])
+ text = text[60:]
+ }
+ fmt.Fprintf(&buf, "\t%q\n\n", text)
+
+ genFile("table.go", &buf)
+
+ fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize)
+}
+
+type byLen []string
+
+func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) }
+func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x byLen) Len() int { return len(x) }
+
+// fnv computes the FNV hash with an arbitrary starting value h.
+func fnv(h uint32, s string) uint32 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint32(s[i])
+ h *= 16777619
+ }
+ return h
+}
+
+// A table represents an attempt at constructing the lookup table.
+// The lookup table uses cuckoo hashing, meaning that each string
+// can be found in one of two positions.
+type table struct {
+ h0 uint32
+ k uint
+ mask uint32
+ tab []string
+}
+
+// hash returns the two hashes for s.
+func (t *table) hash(s string) (h1, h2 uint32) {
+ h := fnv(t.h0, s)
+ h1 = h & t.mask
+ h2 = (h >> 16) & t.mask
+ return
+}
+
+// init initializes the table with the given parameters.
+// h0 is the initial hash value,
+// k is the number of bits of hash value to use, and
+// x is the list of strings to store in the table.
+// init returns false if the table cannot be constructed.
+func (t *table) init(h0 uint32, k uint, x []string) bool {
+ t.h0 = h0
+ t.k = k
+ t.tab = make([]string, 1<<k)
+ t.mask = 1<<k - 1
+ for _, s := range x {
+ if !t.insert(s) {
+ return false
+ }
+ }
+ return true
+}
+
+// insert inserts s in the table.
+func (t *table) insert(s string) bool {
+ h1, h2 := t.hash(s)
+ if t.tab[h1] == "" {
+ t.tab[h1] = s
+ return true
+ }
+ if t.tab[h2] == "" {
+ t.tab[h2] = s
+ return true
+ }
+ if t.push(h1, 0) {
+ t.tab[h1] = s
+ return true
+ }
+ if t.push(h2, 0) {
+ t.tab[h2] = s
+ return true
+ }
+ return false
+}
+
+// push attempts to push aside the entry in slot i.
+func (t *table) push(i uint32, depth int) bool {
+ if depth > len(t.tab) {
+ return false
+ }
+ s := t.tab[i]
+ h1, h2 := t.hash(s)
+ j := h1 + h2 - i
+ if t.tab[j] != "" && !t.push(j, depth+1) {
+ return false
+ }
+ t.tab[j] = s
+ return true
+}
+
+// The lists of element names and attribute keys were taken from
+// https://html.spec.whatwg.org/multipage/indices.html#index
+// as of the "HTML Living Standard - Last Updated 16 April 2018" version.
+
+// "command", "keygen" and "menuitem" have been removed from the spec,
+// but are kept here for backwards compatibility.
+var elements = []string{
+ "a",
+ "abbr",
+ "address",
+ "area",
+ "article",
+ "aside",
+ "audio",
+ "b",
+ "base",
+ "bdi",
+ "bdo",
+ "blockquote",
+ "body",
+ "br",
+ "button",
+ "canvas",
+ "caption",
+ "cite",
+ "code",
+ "col",
+ "colgroup",
+ "command",
+ "data",
+ "datalist",
+ "dd",
+ "del",
+ "details",
+ "dfn",
+ "dialog",
+ "div",
+ "dl",
+ "dt",
+ "em",
+ "embed",
+ "fieldset",
+ "figcaption",
+ "figure",
+ "footer",
+ "form",
+ "h1",
+ "h2",
+ "h3",
+ "h4",
+ "h5",
+ "h6",
+ "head",
+ "header",
+ "hgroup",
+ "hr",
+ "html",
+ "i",
+ "iframe",
+ "img",
+ "input",
+ "ins",
+ "kbd",
+ "keygen",
+ "label",
+ "legend",
+ "li",
+ "link",
+ "main",
+ "map",
+ "mark",
+ "menu",
+ "menuitem",
+ "meta",
+ "meter",
+ "nav",
+ "noscript",
+ "object",
+ "ol",
+ "optgroup",
+ "option",
+ "output",
+ "p",
+ "param",
+ "picture",
+ "pre",
+ "progress",
+ "q",
+ "rp",
+ "rt",
+ "ruby",
+ "s",
+ "samp",
+ "script",
+ "section",
+ "select",
+ "slot",
+ "small",
+ "source",
+ "span",
+ "strong",
+ "style",
+ "sub",
+ "summary",
+ "sup",
+ "table",
+ "tbody",
+ "td",
+ "template",
+ "textarea",
+ "tfoot",
+ "th",
+ "thead",
+ "time",
+ "title",
+ "tr",
+ "track",
+ "u",
+ "ul",
+ "var",
+ "video",
+ "wbr",
+}
+
+// https://html.spec.whatwg.org/multipage/indices.html#attributes-3
+//
+// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup",
+// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec,
+// but are kept here for backwards compatibility.
+var attributes = []string{
+ "abbr",
+ "accept",
+ "accept-charset",
+ "accesskey",
+ "action",
+ "allowfullscreen",
+ "allowpaymentrequest",
+ "allowusermedia",
+ "alt",
+ "as",
+ "async",
+ "autocomplete",
+ "autofocus",
+ "autoplay",
+ "challenge",
+ "charset",
+ "checked",
+ "cite",
+ "class",
+ "color",
+ "cols",
+ "colspan",
+ "command",
+ "content",
+ "contenteditable",
+ "contextmenu",
+ "controls",
+ "coords",
+ "crossorigin",
+ "data",
+ "datetime",
+ "default",
+ "defer",
+ "dir",
+ "dirname",
+ "disabled",
+ "download",
+ "draggable",
+ "dropzone",
+ "enctype",
+ "for",
+ "form",
+ "formaction",
+ "formenctype",
+ "formmethod",
+ "formnovalidate",
+ "formtarget",
+ "headers",
+ "height",
+ "hidden",
+ "high",
+ "href",
+ "hreflang",
+ "http-equiv",
+ "icon",
+ "id",
+ "inputmode",
+ "integrity",
+ "is",
+ "ismap",
+ "itemid",
+ "itemprop",
+ "itemref",
+ "itemscope",
+ "itemtype",
+ "keytype",
+ "kind",
+ "label",
+ "lang",
+ "list",
+ "loop",
+ "low",
+ "manifest",
+ "max",
+ "maxlength",
+ "media",
+ "mediagroup",
+ "method",
+ "min",
+ "minlength",
+ "multiple",
+ "muted",
+ "name",
+ "nomodule",
+ "nonce",
+ "novalidate",
+ "open",
+ "optimum",
+ "pattern",
+ "ping",
+ "placeholder",
+ "playsinline",
+ "poster",
+ "preload",
+ "radiogroup",
+ "readonly",
+ "referrerpolicy",
+ "rel",
+ "required",
+ "reversed",
+ "rows",
+ "rowspan",
+ "sandbox",
+ "spellcheck",
+ "scope",
+ "scoped",
+ "seamless",
+ "selected",
+ "shape",
+ "size",
+ "sizes",
+ "sortable",
+ "sorted",
+ "slot",
+ "span",
+ "spellcheck",
+ "src",
+ "srcdoc",
+ "srclang",
+ "srcset",
+ "start",
+ "step",
+ "style",
+ "tabindex",
+ "target",
+ "title",
+ "translate",
+ "type",
+ "typemustmatch",
+ "updateviacache",
+ "usemap",
+ "value",
+ "width",
+ "workertype",
+ "wrap",
+}
+
+// "onautocomplete", "onautocompleteerror", "onmousewheel",
+// "onshow" and "onsort" have been removed from the spec,
+// but are kept here for backwards compatibility.
+var eventHandlers = []string{
+ "onabort",
+ "onautocomplete",
+ "onautocompleteerror",
+ "onauxclick",
+ "onafterprint",
+ "onbeforeprint",
+ "onbeforeunload",
+ "onblur",
+ "oncancel",
+ "oncanplay",
+ "oncanplaythrough",
+ "onchange",
+ "onclick",
+ "onclose",
+ "oncontextmenu",
+ "oncopy",
+ "oncuechange",
+ "oncut",
+ "ondblclick",
+ "ondrag",
+ "ondragend",
+ "ondragenter",
+ "ondragexit",
+ "ondragleave",
+ "ondragover",
+ "ondragstart",
+ "ondrop",
+ "ondurationchange",
+ "onemptied",
+ "onended",
+ "onerror",
+ "onfocus",
+ "onhashchange",
+ "oninput",
+ "oninvalid",
+ "onkeydown",
+ "onkeypress",
+ "onkeyup",
+ "onlanguagechange",
+ "onload",
+ "onloadeddata",
+ "onloadedmetadata",
+ "onloadend",
+ "onloadstart",
+ "onmessage",
+ "onmessageerror",
+ "onmousedown",
+ "onmouseenter",
+ "onmouseleave",
+ "onmousemove",
+ "onmouseout",
+ "onmouseover",
+ "onmouseup",
+ "onmousewheel",
+ "onwheel",
+ "onoffline",
+ "ononline",
+ "onpagehide",
+ "onpageshow",
+ "onpaste",
+ "onpause",
+ "onplay",
+ "onplaying",
+ "onpopstate",
+ "onprogress",
+ "onratechange",
+ "onreset",
+ "onresize",
+ "onrejectionhandled",
+ "onscroll",
+ "onsecuritypolicyviolation",
+ "onseeked",
+ "onseeking",
+ "onselect",
+ "onshow",
+ "onsort",
+ "onstalled",
+ "onstorage",
+ "onsubmit",
+ "onsuspend",
+ "ontimeupdate",
+ "ontoggle",
+ "onunhandledrejection",
+ "onunload",
+ "onvolumechange",
+ "onwaiting",
+}
+
+// extra are ad-hoc values not covered by any of the lists above.
+var extra = []string{
+ "acronym",
+ "align",
+ "annotation",
+ "annotation-xml",
+ "applet",
+ "basefont",
+ "bgsound",
+ "big",
+ "blink",
+ "center",
+ "color",
+ "desc",
+ "face",
+ "font",
+ "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive.
+ "foreignobject",
+ "frame",
+ "frameset",
+ "image",
+ "isindex",
+ "listing",
+ "malignmark",
+ "marquee",
+ "math",
+ "mglyph",
+ "mi",
+ "mn",
+ "mo",
+ "ms",
+ "mtext",
+ "nobr",
+ "noembed",
+ "noframes",
+ "plaintext",
+ "prompt",
+ "public",
+ "rb",
+ "rtc",
+ "spacer",
+ "strike",
+ "svg",
+ "system",
+ "tt",
+ "xmp",
+}
diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go
index 2c1cade60..633ee15dc 100644
--- a/vendor/golang.org/x/net/html/node.go
+++ b/vendor/golang.org/x/net/html/node.go
@@ -177,7 +177,7 @@ func (s *nodeStack) index(n *Node) int {
// contains returns whether a is within s.
func (s *nodeStack) contains(a atom.Atom) bool {
for _, n := range *s {
- if n.DataAtom == a {
+ if n.DataAtom == a && n.Namespace == "" {
return true
}
}
diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go
index 64a579372..992cff2a3 100644
--- a/vendor/golang.org/x/net/html/parse.go
+++ b/vendor/golang.org/x/net/html/parse.go
@@ -439,9 +439,6 @@ func (p *parser) resetInsertionMode() {
case a.Select:
if !last {
for ancestor, first := n, p.oe[0]; ancestor != first; {
- if ancestor == first {
- break
- }
ancestor = p.oe[p.oe.index(ancestor)-1]
switch ancestor.DataAtom {
case a.Template:
@@ -633,7 +630,16 @@ func inHeadIM(p *parser) bool {
p.oe.pop()
p.acknowledgeSelfClosingTag()
return true
- case a.Script, a.Title, a.Noscript, a.Noframes, a.Style:
+ case a.Noscript:
+ p.addElement()
+ if p.scripting {
+ p.setOriginalIM()
+ p.im = textIM
+ } else {
+ p.im = inHeadNoscriptIM
+ }
+ return true
+ case a.Script, a.Title, a.Noframes, a.Style:
p.addElement()
p.setOriginalIM()
p.im = textIM
@@ -695,6 +701,49 @@ func inHeadIM(p *parser) bool {
return false
}
+// 12.2.6.4.5.
+func inHeadNoscriptIM(p *parser) bool {
+ switch p.tok.Type {
+ case DoctypeToken:
+ // Ignore the token.
+ return true
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Html:
+ return inBodyIM(p)
+ case a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Style:
+ return inHeadIM(p)
+ case a.Head, a.Noscript:
+ // Ignore the token.
+ return true
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Noscript, a.Br:
+ default:
+ // Ignore the token.
+ return true
+ }
+ case TextToken:
+ s := strings.TrimLeft(p.tok.Data, whitespace)
+ if len(s) == 0 {
+ // It was all whitespace.
+ return inHeadIM(p)
+ }
+ case CommentToken:
+ return inHeadIM(p)
+ }
+ p.oe.pop()
+ if p.top().DataAtom != a.Head {
+ panic("html: the new current node will be a head element.")
+ }
+ p.im = inHeadIM
+ if p.tok.DataAtom == a.Noscript {
+ return true
+ }
+ return false
+}
+
// Section 12.2.6.4.6.
func afterHeadIM(p *parser) bool {
switch p.tok.Type {
@@ -904,7 +953,7 @@ func inBodyIM(p *parser) bool {
case a.A:
for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
- p.inBodyEndTagFormatting(a.A)
+ p.inBodyEndTagFormatting(a.A, "a")
p.oe.remove(n)
p.afe.remove(n)
break
@@ -918,7 +967,7 @@ func inBodyIM(p *parser) bool {
case a.Nobr:
p.reconstructActiveFormattingElements()
if p.elementInScope(defaultScope, a.Nobr) {
- p.inBodyEndTagFormatting(a.Nobr)
+ p.inBodyEndTagFormatting(a.Nobr, "nobr")
p.reconstructActiveFormattingElements()
}
p.addFormattingElement()
@@ -1126,7 +1175,7 @@ func inBodyIM(p *parser) bool {
case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
- p.inBodyEndTagFormatting(p.tok.DataAtom)
+ p.inBodyEndTagFormatting(p.tok.DataAtom, p.tok.Data)
case a.Applet, a.Marquee, a.Object:
if p.popUntil(defaultScope, p.tok.DataAtom) {
p.clearActiveFormattingElements()
@@ -1137,7 +1186,7 @@ func inBodyIM(p *parser) bool {
case a.Template:
return inHeadIM(p)
default:
- p.inBodyEndTagOther(p.tok.DataAtom)
+ p.inBodyEndTagOther(p.tok.DataAtom, p.tok.Data)
}
case CommentToken:
p.addChild(&Node{
@@ -1164,7 +1213,7 @@ func inBodyIM(p *parser) bool {
return true
}
-func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
+func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) {
// This is the "adoption agency" algorithm, described at
// https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency
@@ -1186,7 +1235,7 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
}
}
if formattingElement == nil {
- p.inBodyEndTagOther(tagAtom)
+ p.inBodyEndTagOther(tagAtom, tagName)
return
}
feIndex := p.oe.index(formattingElement)
@@ -1291,9 +1340,17 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
// "Any other end tag" handling from 12.2.6.5 The rules for parsing tokens in foreign content
// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
-func (p *parser) inBodyEndTagOther(tagAtom a.Atom) {
+func (p *parser) inBodyEndTagOther(tagAtom a.Atom, tagName string) {
for i := len(p.oe) - 1; i >= 0; i-- {
- if p.oe[i].DataAtom == tagAtom {
+ // Two element nodes have the same tag if they have the same Data (a
+ // string-typed field). As an optimization, for common HTML tags, each
+ // Data string is assigned a unique, non-zero DataAtom (a uint32-typed
+ // field), since integer comparison is faster than string comparison.
+ // Uncommon (custom) tags get a zero DataAtom.
+ //
+ // The if condition here is equivalent to (p.oe[i].Data == tagName).
+ if (p.oe[i].DataAtom == tagAtom) &&
+ ((tagAtom != 0) || (p.oe[i].Data == tagName)) {
p.oe = p.oe[:i]
break
}
@@ -1687,8 +1744,9 @@ func inCellIM(p *parser) bool {
return true
}
// Close the cell and reprocess.
- p.popUntil(tableScope, a.Td, a.Th)
- p.clearActiveFormattingElements()
+ if p.popUntil(tableScope, a.Td, a.Th) {
+ p.clearActiveFormattingElements()
+ }
p.im = inRowIM
return false
}
@@ -1719,8 +1777,12 @@ func inSelectIM(p *parser) bool {
}
p.addElement()
case a.Select:
- p.tok.Type = EndTagToken
- return false
+ if p.popUntil(selectScope, a.Select) {
+ p.resetInsertionMode()
+ } else {
+ // Ignore the token.
+ return true
+ }
case a.Input, a.Keygen, a.Textarea:
if p.elementInScope(selectScope, a.Select) {
p.parseImpliedToken(EndTagToken, a.Select, a.Select.String())
@@ -1750,6 +1812,9 @@ func inSelectIM(p *parser) bool {
case a.Select:
if p.popUntil(selectScope, a.Select) {
p.resetInsertionMode()
+ } else {
+ // Ignore the token.
+ return true
}
case a.Template:
return inHeadIM(p)
@@ -1775,13 +1840,22 @@ func inSelectInTableIM(p *parser) bool {
case StartTagToken, EndTagToken:
switch p.tok.DataAtom {
case a.Caption, a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr, a.Td, a.Th:
- if p.tok.Type == StartTagToken || p.elementInScope(tableScope, p.tok.DataAtom) {
- p.parseImpliedToken(EndTagToken, a.Select, a.Select.String())
- return false
- } else {
+ if p.tok.Type == EndTagToken && !p.elementInScope(tableScope, p.tok.DataAtom) {
// Ignore the token.
return true
}
+ // This is like p.popUntil(selectScope, a.Select), but it also
+ // matches <math select>, not just <select>. Matching the MathML
+ // tag is arguably incorrect (conceptually), but it mimics what
+ // Chromium does.
+ for i := len(p.oe) - 1; i >= 0; i-- {
+ if n := p.oe[i]; n.DataAtom == a.Select {
+ p.oe = p.oe[:i]
+ break
+ }
+ }
+ p.resetInsertionMode()
+ return false
}
}
return inSelectIM(p)
@@ -2226,6 +2300,33 @@ func (p *parser) parse() error {
//
// The input is assumed to be UTF-8 encoded.
func Parse(r io.Reader) (*Node, error) {
+ return ParseWithOptions(r)
+}
+
+// ParseFragment parses a fragment of HTML and returns the nodes that were
+// found. If the fragment is the InnerHTML for an existing element, pass that
+// element in context.
+//
+// It has the same intricacies as Parse.
+func ParseFragment(r io.Reader, context *Node) ([]*Node, error) {
+ return ParseFragmentWithOptions(r, context)
+}
+
+// ParseOption configures a parser.
+type ParseOption func(p *parser)
+
+// ParseOptionEnableScripting configures the scripting flag.
+// https://html.spec.whatwg.org/multipage/webappapis.html#enabling-and-disabling-scripting
+//
+// By default, scripting is enabled.
+func ParseOptionEnableScripting(enable bool) ParseOption {
+ return func(p *parser) {
+ p.scripting = enable
+ }
+}
+
+// ParseWithOptions is like Parse, with options.
+func ParseWithOptions(r io.Reader, opts ...ParseOption) (*Node, error) {
p := &parser{
tokenizer: NewTokenizer(r),
doc: &Node{
@@ -2235,6 +2336,11 @@ func Parse(r io.Reader) (*Node, error) {
framesetOK: true,
im: initialIM,
}
+
+ for _, f := range opts {
+ f(p)
+ }
+
err := p.parse()
if err != nil {
return nil, err
@@ -2242,12 +2348,8 @@ func Parse(r io.Reader) (*Node, error) {
return p.doc, nil
}
-// ParseFragment parses a fragment of HTML and returns the nodes that were
-// found. If the fragment is the InnerHTML for an existing element, pass that
-// element in context.
-//
-// It has the same intricacies as Parse.
-func ParseFragment(r io.Reader, context *Node) ([]*Node, error) {
+// ParseFragmentWithOptions is like ParseFragment, with options.
+func ParseFragmentWithOptions(r io.Reader, context *Node, opts ...ParseOption) ([]*Node, error) {
contextTag := ""
if context != nil {
if context.Type != ElementNode {
@@ -2271,6 +2373,10 @@ func ParseFragment(r io.Reader, context *Node) ([]*Node, error) {
context: context,
}
+ for _, f := range opts {
+ f(p)
+ }
+
root := &Node{
Type: ElementNode,
DataAtom: a.Html,
diff --git a/vendor/golang.org/x/net/http2/.gitignore b/vendor/golang.org/x/net/http2/.gitignore
new file mode 100644
index 000000000..190f12234
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/.gitignore
@@ -0,0 +1,2 @@
+*~
+h2i/h2i
diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile
new file mode 100644
index 000000000..53fc52579
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/Dockerfile
@@ -0,0 +1,51 @@
+#
+# This Dockerfile builds a recent curl with HTTP/2 client support, using
+# a recent nghttp2 build.
+#
+# See the Makefile for how to tag it. If Docker and that image is found, the
+# Go tests use this curl binary for integration tests.
+#
+
+FROM ubuntu:trusty
+
+RUN apt-get update && \
+ apt-get upgrade -y && \
+ apt-get install -y git-core build-essential wget
+
+RUN apt-get install -y --no-install-recommends \
+ autotools-dev libtool pkg-config zlib1g-dev \
+ libcunit1-dev libssl-dev libxml2-dev libevent-dev \
+ automake autoconf
+
+# The list of packages nghttp2 recommends for h2load:
+RUN apt-get install -y --no-install-recommends make binutils \
+ autoconf automake autotools-dev \
+ libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
+ libev-dev libevent-dev libjansson-dev libjemalloc-dev \
+ cython python3.4-dev python-setuptools
+
+# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
+ENV NGHTTP2_VER 895da9a
+RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
+
+WORKDIR /root/nghttp2
+RUN git reset --hard $NGHTTP2_VER
+RUN autoreconf -i
+RUN automake
+RUN autoconf
+RUN ./configure
+RUN make
+RUN make install
+
+WORKDIR /root
+RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz
+RUN tar -zxvf curl-7.45.0.tar.gz
+WORKDIR /root/curl-7.45.0
+RUN ./configure --with-ssl --with-nghttp2=/usr/local
+RUN make
+RUN make install
+RUN ldconfig
+
+CMD ["-h"]
+ENTRYPOINT ["/usr/local/bin/curl"]
+
diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile
new file mode 100644
index 000000000..55fd826f7
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/Makefile
@@ -0,0 +1,3 @@
+curlimage:
+ docker build -t gohttp2/curl .
+
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index b46791d1d..514c126c5 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -643,7 +643,7 @@ func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
return f.WriteDataPadded(streamID, endStream, data, nil)
}
-// WriteData writes a DATA frame with optional padding.
+// WriteDataPadded writes a DATA frame with optional padding.
//
// If pad is nil, the padding bit is not sent.
// The length of pad must not exceed 255 bytes.
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index b57b6e2d0..57334dc79 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -273,7 +273,20 @@ func ConfigureServer(s *http.Server, conf *Server) error {
if testHookOnConn != nil {
testHookOnConn()
}
+ // The TLSNextProto interface predates contexts, so
+ // the net/http package passes down its per-connection
+ // base context via an exported but unadvertised
+ // method on the Handler. This is for internal
+ // net/http<=>http2 use only.
+ var ctx context.Context
+ type baseContexter interface {
+ BaseContext() context.Context
+ }
+ if bc, ok := h.(baseContexter); ok {
+ ctx = bc.BaseContext()
+ }
conf.ServeConn(c, &ServeConnOpts{
+ Context: ctx,
Handler: h,
BaseConfig: hs,
})
@@ -284,6 +297,10 @@ func ConfigureServer(s *http.Server, conf *Server) error {
// ServeConnOpts are options for the Server.ServeConn method.
type ServeConnOpts struct {
+ // Context is the base context to use.
+ // If nil, context.Background is used.
+ Context context.Context
+
// BaseConfig optionally sets the base configuration
// for values. If nil, defaults are used.
BaseConfig *http.Server
@@ -294,6 +311,13 @@ type ServeConnOpts struct {
Handler http.Handler
}
+func (o *ServeConnOpts) context() context.Context {
+ if o.Context != nil {
+ return o.Context
+ }
+ return context.Background()
+}
+
func (o *ServeConnOpts) baseConfig() *http.Server {
if o != nil && o.BaseConfig != nil {
return o.BaseConfig
@@ -439,7 +463,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
}
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
- ctx, cancel = context.WithCancel(context.Background())
+ ctx, cancel = context.WithCancel(opts.context())
ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
if hs := opts.baseConfig(); hs != nil {
ctx = context.WithValue(ctx, http.ServerContextKey, hs)
@@ -1594,12 +1618,6 @@ func (sc *serverConn) processData(f *DataFrame) error {
// type PROTOCOL_ERROR."
return ConnectionError(ErrCodeProtocol)
}
- // RFC 7540, sec 6.1: If a DATA frame is received whose stream is not in
- // "open" or "half-closed (local)" state, the recipient MUST respond with a
- // stream error (Section 5.4.2) of type STREAM_CLOSED.
- if state == stateClosed {
- return streamError(id, ErrCodeStreamClosed)
- }
if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued {
// This includes sending a RST_STREAM if the stream is
// in stateHalfClosedLocal (which currently means that
@@ -2313,7 +2331,16 @@ type chunkWriter struct{ rws *responseWriterState }
func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
-func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 }
+func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 }
+
+func (rws *responseWriterState) hasNonemptyTrailers() bool {
+ for _, trailer := range rws.trailers {
+ if _, ok := rws.handlerHeader[trailer]; ok {
+ return true
+ }
+ }
+ return false
+}
// declareTrailer is called for each Trailer header when the
// response header is written. It notes that a header will need to be
@@ -2413,7 +2440,10 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
rws.promoteUndeclaredTrailers()
}
- endStream := rws.handlerDone && !rws.hasTrailers()
+ // only send trailers if they have actually been defined by the
+ // server handler.
+ hasNonemptyTrailers := rws.hasNonemptyTrailers()
+ endStream := rws.handlerDone && !hasNonemptyTrailers
if len(p) > 0 || endStream {
// only send a 0 byte DATA frame if we're ending the stream.
if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
@@ -2422,7 +2452,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
}
}
- if rws.handlerDone && rws.hasTrailers() {
+ if rws.handlerDone && hasNonemptyTrailers {
err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
streamID: rws.stream.id,
h: rws.handlerHeader,
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index f272e8f9f..c0c80d893 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -28,6 +28,7 @@ import (
"strconv"
"strings"
"sync"
+ "sync/atomic"
"time"
"golang.org/x/net/http/httpguts"
@@ -199,6 +200,7 @@ type ClientConn struct {
t *Transport
tconn net.Conn // usually *tls.Conn, except specialized impls
tlsState *tls.ConnectionState // nil only for specialized impls
+ reused uint32 // whether conn is being reused; atomic
singleUse bool // whether being used for a single http.Request
// readLoop goroutine fields:
@@ -440,7 +442,8 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
return nil, err
}
- traceGotConn(req, cc)
+ reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1)
+ traceGotConn(req, cc, reused)
res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req)
if err != nil && retry <= 6 {
if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil {
@@ -1411,7 +1414,11 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
// followed by the query production (see Sections 3.3 and 3.4 of
// [RFC3986]).
f(":authority", host)
- f(":method", req.Method)
+ m := req.Method
+ if m == "" {
+ m = http.MethodGet
+ }
+ f(":method", m)
if req.Method != "CONNECT" {
f(":path", path)
f(":scheme", req.URL.Scheme)
@@ -2555,15 +2562,15 @@ func traceGetConn(req *http.Request, hostPort string) {
trace.GetConn(hostPort)
}
-func traceGotConn(req *http.Request, cc *ClientConn) {
+func traceGotConn(req *http.Request, cc *ClientConn, reused bool) {
trace := httptrace.ContextClientTrace(req.Context())
if trace == nil || trace.GotConn == nil {
return
}
ci := httptrace.GotConnInfo{Conn: cc.tconn}
+ ci.Reused = reused
cc.mu.Lock()
- ci.Reused = cc.nextStreamID > 1
- ci.WasIdle = len(cc.streams) == 0 && ci.Reused
+ ci.WasIdle = len(cc.streams) == 0 && reused
if ci.WasIdle && !cc.lastActive.IsZero() {
ci.IdleTime = time.Now().Sub(cc.lastActive)
}
diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna10.0.0.go
index 346fe4423..a98a31f40 100644
--- a/vendor/golang.org/x/net/idna/idna.go
+++ b/vendor/golang.org/x/net/idna/idna10.0.0.go
@@ -4,14 +4,16 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build go1.10
+
// Package idna implements IDNA2008 using the compatibility processing
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
// deal with the transition from IDNA2003.
//
// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
-// UTS #46 is defined in http://www.unicode.org/reports/tr46.
-// See http://unicode.org/cldr/utility/idna.jsp for a visualization of the
+// UTS #46 is defined in https://www.unicode.org/reports/tr46.
+// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the
// differences between these two standards.
package idna // import "golang.org/x/net/idna"
@@ -297,7 +299,7 @@ func (e runeError) Error() string {
}
// process implements the algorithm described in section 4 of UTS #46,
-// see http://www.unicode.org/reports/tr46.
+// see https://www.unicode.org/reports/tr46.
func (p *Profile) process(s string, toASCII bool) (string, error) {
var err error
var isBidi bool
diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go
new file mode 100644
index 000000000..8842146b5
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/idna9.0.0.go
@@ -0,0 +1,682 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.10
+
+// Package idna implements IDNA2008 using the compatibility processing
+// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
+// deal with the transition from IDNA2003.
+//
+// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
+// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
+// UTS #46 is defined in https://www.unicode.org/reports/tr46.
+// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the
+// differences between these two standards.
+package idna // import "golang.org/x/net/idna"
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "golang.org/x/text/secure/bidirule"
+ "golang.org/x/text/unicode/norm"
+)
+
+// NOTE: Unlike common practice in Go APIs, the functions will return a
+// sanitized domain name in case of errors. Browsers sometimes use a partially
+// evaluated string as lookup.
+// TODO: the current error handling is, in my opinion, the least opinionated.
+// Other strategies are also viable, though:
+// Option 1) Return an empty string in case of error, but allow the user to
+// specify explicitly which errors to ignore.
+// Option 2) Return the partially evaluated string if it is itself a valid
+// string, otherwise return the empty string in case of error.
+// Option 3) Option 1 and 2.
+// Option 4) Always return an empty string for now and implement Option 1 as
+// needed, and document that the return string may not be empty in case of
+// error in the future.
+// I think Option 1 is best, but it is quite opinionated.
+
+// ToASCII is a wrapper for Punycode.ToASCII.
+func ToASCII(s string) (string, error) {
+ return Punycode.process(s, true)
+}
+
+// ToUnicode is a wrapper for Punycode.ToUnicode.
+func ToUnicode(s string) (string, error) {
+ return Punycode.process(s, false)
+}
+
+// An Option configures a Profile at creation time.
+type Option func(*options)
+
+// Transitional sets a Profile to use the Transitional mapping as defined in UTS
+// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
+// transitional mapping provides a compromise between IDNA2003 and IDNA2008
+// compatibility. It is used by most browsers when resolving domain names. This
+// option is only meaningful if combined with MapForLookup.
+func Transitional(transitional bool) Option {
+ return func(o *options) { o.transitional = true }
+}
+
+// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
+// are longer than allowed by the RFC.
+func VerifyDNSLength(verify bool) Option {
+ return func(o *options) { o.verifyDNSLength = verify }
+}
+
+// RemoveLeadingDots removes leading label separators. Leading runes that map to
+// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
+//
+// This is the behavior suggested by the UTS #46 and is adopted by some
+// browsers.
+func RemoveLeadingDots(remove bool) Option {
+ return func(o *options) { o.removeLeadingDots = remove }
+}
+
+// ValidateLabels sets whether to check the mandatory label validation criteria
+// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
+// of hyphens ('-'), normalization, validity of runes, and the context rules.
+func ValidateLabels(enable bool) Option {
+ return func(o *options) {
+ // Don't override existing mappings, but set one that at least checks
+ // normalization if it is not set.
+ if o.mapping == nil && enable {
+ o.mapping = normalize
+ }
+ o.trie = trie
+ o.validateLabels = enable
+ o.fromPuny = validateFromPunycode
+ }
+}
+
+// StrictDomainName limits the set of permissable ASCII characters to those
+// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
+// hyphen). This is set by default for MapForLookup and ValidateForRegistration.
+//
+// This option is useful, for instance, for browsers that allow characters
+// outside this range, for example a '_' (U+005F LOW LINE). See
+// http://www.rfc-editor.org/std/std3.txt for more details This option
+// corresponds to the UseSTD3ASCIIRules option in UTS #46.
+func StrictDomainName(use bool) Option {
+ return func(o *options) {
+ o.trie = trie
+ o.useSTD3Rules = use
+ o.fromPuny = validateFromPunycode
+ }
+}
+
+// NOTE: the following options pull in tables. The tables should not be linked
+// in as long as the options are not used.
+
+// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
+// that relies on proper validation of labels should include this rule.
+func BidiRule() Option {
+ return func(o *options) { o.bidirule = bidirule.ValidString }
+}
+
+// ValidateForRegistration sets validation options to verify that a given IDN is
+// properly formatted for registration as defined by Section 4 of RFC 5891.
+func ValidateForRegistration() Option {
+ return func(o *options) {
+ o.mapping = validateRegistration
+ StrictDomainName(true)(o)
+ ValidateLabels(true)(o)
+ VerifyDNSLength(true)(o)
+ BidiRule()(o)
+ }
+}
+
+// MapForLookup sets validation and mapping options such that a given IDN is
+// transformed for domain name lookup according to the requirements set out in
+// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
+// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
+// to add this check.
+//
+// The mappings include normalization and mapping case, width and other
+// compatibility mappings.
+func MapForLookup() Option {
+ return func(o *options) {
+ o.mapping = validateAndMap
+ StrictDomainName(true)(o)
+ ValidateLabels(true)(o)
+ RemoveLeadingDots(true)(o)
+ }
+}
+
+type options struct {
+ transitional bool
+ useSTD3Rules bool
+ validateLabels bool
+ verifyDNSLength bool
+ removeLeadingDots bool
+
+ trie *idnaTrie
+
+ // fromPuny calls validation rules when converting A-labels to U-labels.
+ fromPuny func(p *Profile, s string) error
+
+ // mapping implements a validation and mapping step as defined in RFC 5895
+ // or UTS 46, tailored to, for example, domain registration or lookup.
+ mapping func(p *Profile, s string) (string, error)
+
+ // bidirule, if specified, checks whether s conforms to the Bidi Rule
+ // defined in RFC 5893.
+ bidirule func(s string) bool
+}
+
+// A Profile defines the configuration of a IDNA mapper.
+type Profile struct {
+ options
+}
+
+func apply(o *options, opts []Option) {
+ for _, f := range opts {
+ f(o)
+ }
+}
+
+// New creates a new Profile.
+//
+// With no options, the returned Profile is the most permissive and equals the
+// Punycode Profile. Options can be passed to further restrict the Profile. The
+// MapForLookup and ValidateForRegistration options set a collection of options,
+// for lookup and registration purposes respectively, which can be tailored by
+// adding more fine-grained options, where later options override earlier
+// options.
+func New(o ...Option) *Profile {
+ p := &Profile{}
+ apply(&p.options, o)
+ return p
+}
+
+// ToASCII converts a domain or domain label to its ASCII form. For example,
+// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
+// ToASCII("golang") is "golang". If an error is encountered it will return
+// an error and a (partially) processed result.
+func (p *Profile) ToASCII(s string) (string, error) {
+ return p.process(s, true)
+}
+
+// ToUnicode converts a domain or domain label to its Unicode form. For example,
+// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
+// ToUnicode("golang") is "golang". If an error is encountered it will return
+// an error and a (partially) processed result.
+func (p *Profile) ToUnicode(s string) (string, error) {
+ pp := *p
+ pp.transitional = false
+ return pp.process(s, false)
+}
+
+// String reports a string with a description of the profile for debugging
+// purposes. The string format may change with different versions.
+func (p *Profile) String() string {
+ s := ""
+ if p.transitional {
+ s = "Transitional"
+ } else {
+ s = "NonTransitional"
+ }
+ if p.useSTD3Rules {
+ s += ":UseSTD3Rules"
+ }
+ if p.validateLabels {
+ s += ":ValidateLabels"
+ }
+ if p.verifyDNSLength {
+ s += ":VerifyDNSLength"
+ }
+ return s
+}
+
+var (
+ // Punycode is a Profile that does raw punycode processing with a minimum
+ // of validation.
+ Punycode *Profile = punycode
+
+ // Lookup is the recommended profile for looking up domain names, according
+ // to Section 5 of RFC 5891. The exact configuration of this profile may
+ // change over time.
+ Lookup *Profile = lookup
+
+ // Display is the recommended profile for displaying domain names.
+ // The configuration of this profile may change over time.
+ Display *Profile = display
+
+ // Registration is the recommended profile for checking whether a given
+ // IDN is valid for registration, according to Section 4 of RFC 5891.
+ Registration *Profile = registration
+
+ punycode = &Profile{}
+ lookup = &Profile{options{
+ transitional: true,
+ useSTD3Rules: true,
+ validateLabels: true,
+ removeLeadingDots: true,
+ trie: trie,
+ fromPuny: validateFromPunycode,
+ mapping: validateAndMap,
+ bidirule: bidirule.ValidString,
+ }}
+ display = &Profile{options{
+ useSTD3Rules: true,
+ validateLabels: true,
+ removeLeadingDots: true,
+ trie: trie,
+ fromPuny: validateFromPunycode,
+ mapping: validateAndMap,
+ bidirule: bidirule.ValidString,
+ }}
+ registration = &Profile{options{
+ useSTD3Rules: true,
+ validateLabels: true,
+ verifyDNSLength: true,
+ trie: trie,
+ fromPuny: validateFromPunycode,
+ mapping: validateRegistration,
+ bidirule: bidirule.ValidString,
+ }}
+
+ // TODO: profiles
+ // Register: recommended for approving domain names: don't do any mappings
+ // but rather reject on invalid input. Bundle or block deviation characters.
+)
+
+type labelError struct{ label, code_ string }
+
+func (e labelError) code() string { return e.code_ }
+func (e labelError) Error() string {
+ return fmt.Sprintf("idna: invalid label %q", e.label)
+}
+
+type runeError rune
+
+func (e runeError) code() string { return "P1" }
+func (e runeError) Error() string {
+ return fmt.Sprintf("idna: disallowed rune %U", e)
+}
+
+// process implements the algorithm described in section 4 of UTS #46,
+// see https://www.unicode.org/reports/tr46.
+func (p *Profile) process(s string, toASCII bool) (string, error) {
+ var err error
+ if p.mapping != nil {
+ s, err = p.mapping(p, s)
+ }
+ // Remove leading empty labels.
+ if p.removeLeadingDots {
+ for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
+ }
+ }
+ // It seems like we should only create this error on ToASCII, but the
+ // UTS 46 conformance tests suggests we should always check this.
+ if err == nil && p.verifyDNSLength && s == "" {
+ err = &labelError{s, "A4"}
+ }
+ labels := labelIter{orig: s}
+ for ; !labels.done(); labels.next() {
+ label := labels.label()
+ if label == "" {
+ // Empty labels are not okay. The label iterator skips the last
+ // label if it is empty.
+ if err == nil && p.verifyDNSLength {
+ err = &labelError{s, "A4"}
+ }
+ continue
+ }
+ if strings.HasPrefix(label, acePrefix) {
+ u, err2 := decode(label[len(acePrefix):])
+ if err2 != nil {
+ if err == nil {
+ err = err2
+ }
+ // Spec says keep the old label.
+ continue
+ }
+ labels.set(u)
+ if err == nil && p.validateLabels {
+ err = p.fromPuny(p, u)
+ }
+ if err == nil {
+ // This should be called on NonTransitional, according to the
+ // spec, but that currently does not have any effect. Use the
+ // original profile to preserve options.
+ err = p.validateLabel(u)
+ }
+ } else if err == nil {
+ err = p.validateLabel(label)
+ }
+ }
+ if toASCII {
+ for labels.reset(); !labels.done(); labels.next() {
+ label := labels.label()
+ if !ascii(label) {
+ a, err2 := encode(acePrefix, label)
+ if err == nil {
+ err = err2
+ }
+ label = a
+ labels.set(a)
+ }
+ n := len(label)
+ if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
+ err = &labelError{label, "A4"}
+ }
+ }
+ }
+ s = labels.result()
+ if toASCII && p.verifyDNSLength && err == nil {
+ // Compute the length of the domain name minus the root label and its dot.
+ n := len(s)
+ if n > 0 && s[n-1] == '.' {
+ n--
+ }
+ if len(s) < 1 || n > 253 {
+ err = &labelError{s, "A4"}
+ }
+ }
+ return s, err
+}
+
+func normalize(p *Profile, s string) (string, error) {
+ return norm.NFC.String(s), nil
+}
+
+func validateRegistration(p *Profile, s string) (string, error) {
+ if !norm.NFC.IsNormalString(s) {
+ return s, &labelError{s, "V1"}
+ }
+ for i := 0; i < len(s); {
+ v, sz := trie.lookupString(s[i:])
+ // Copy bytes not copied so far.
+ switch p.simplify(info(v).category()) {
+ // TODO: handle the NV8 defined in the Unicode idna data set to allow
+ // for strict conformance to IDNA2008.
+ case valid, deviation:
+ case disallowed, mapped, unknown, ignored:
+ r, _ := utf8.DecodeRuneInString(s[i:])
+ return s, runeError(r)
+ }
+ i += sz
+ }
+ return s, nil
+}
+
+func validateAndMap(p *Profile, s string) (string, error) {
+ var (
+ err error
+ b []byte
+ k int
+ )
+ for i := 0; i < len(s); {
+ v, sz := trie.lookupString(s[i:])
+ start := i
+ i += sz
+ // Copy bytes not copied so far.
+ switch p.simplify(info(v).category()) {
+ case valid:
+ continue
+ case disallowed:
+ if err == nil {
+ r, _ := utf8.DecodeRuneInString(s[start:])
+ err = runeError(r)
+ }
+ continue
+ case mapped, deviation:
+ b = append(b, s[k:start]...)
+ b = info(v).appendMapping(b, s[start:i])
+ case ignored:
+ b = append(b, s[k:start]...)
+ // drop the rune
+ case unknown:
+ b = append(b, s[k:start]...)
+ b = append(b, "\ufffd"...)
+ }
+ k = i
+ }
+ if k == 0 {
+ // No changes so far.
+ s = norm.NFC.String(s)
+ } else {
+ b = append(b, s[k:]...)
+ if norm.NFC.QuickSpan(b) != len(b) {
+ b = norm.NFC.Bytes(b)
+ }
+ // TODO: the punycode converters require strings as input.
+ s = string(b)
+ }
+ return s, err
+}
+
+// A labelIter allows iterating over domain name labels.
+type labelIter struct {
+ orig string
+ slice []string
+ curStart int
+ curEnd int
+ i int
+}
+
+func (l *labelIter) reset() {
+ l.curStart = 0
+ l.curEnd = 0
+ l.i = 0
+}
+
+func (l *labelIter) done() bool {
+ return l.curStart >= len(l.orig)
+}
+
+func (l *labelIter) result() string {
+ if l.slice != nil {
+ return strings.Join(l.slice, ".")
+ }
+ return l.orig
+}
+
+func (l *labelIter) label() string {
+ if l.slice != nil {
+ return l.slice[l.i]
+ }
+ p := strings.IndexByte(l.orig[l.curStart:], '.')
+ l.curEnd = l.curStart + p
+ if p == -1 {
+ l.curEnd = len(l.orig)
+ }
+ return l.orig[l.curStart:l.curEnd]
+}
+
+// next sets the value to the next label. It skips the last label if it is empty.
+func (l *labelIter) next() {
+ l.i++
+ if l.slice != nil {
+ if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
+ l.curStart = len(l.orig)
+ }
+ } else {
+ l.curStart = l.curEnd + 1
+ if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
+ l.curStart = len(l.orig)
+ }
+ }
+}
+
+func (l *labelIter) set(s string) {
+ if l.slice == nil {
+ l.slice = strings.Split(l.orig, ".")
+ }
+ l.slice[l.i] = s
+}
+
+// acePrefix is the ASCII Compatible Encoding prefix.
+const acePrefix = "xn--"
+
+func (p *Profile) simplify(cat category) category {
+ switch cat {
+ case disallowedSTD3Mapped:
+ if p.useSTD3Rules {
+ cat = disallowed
+ } else {
+ cat = mapped
+ }
+ case disallowedSTD3Valid:
+ if p.useSTD3Rules {
+ cat = disallowed
+ } else {
+ cat = valid
+ }
+ case deviation:
+ if !p.transitional {
+ cat = valid
+ }
+ case validNV8, validXV8:
+ // TODO: handle V2008
+ cat = valid
+ }
+ return cat
+}
+
+func validateFromPunycode(p *Profile, s string) error {
+ if !norm.NFC.IsNormalString(s) {
+ return &labelError{s, "V1"}
+ }
+ for i := 0; i < len(s); {
+ v, sz := trie.lookupString(s[i:])
+ if c := p.simplify(info(v).category()); c != valid && c != deviation {
+ return &labelError{s, "V6"}
+ }
+ i += sz
+ }
+ return nil
+}
+
+const (
+ zwnj = "\u200c"
+ zwj = "\u200d"
+)
+
+type joinState int8
+
+const (
+ stateStart joinState = iota
+ stateVirama
+ stateBefore
+ stateBeforeVirama
+ stateAfter
+ stateFAIL
+)
+
+var joinStates = [][numJoinTypes]joinState{
+ stateStart: {
+ joiningL: stateBefore,
+ joiningD: stateBefore,
+ joinZWNJ: stateFAIL,
+ joinZWJ: stateFAIL,
+ joinVirama: stateVirama,
+ },
+ stateVirama: {
+ joiningL: stateBefore,
+ joiningD: stateBefore,
+ },
+ stateBefore: {
+ joiningL: stateBefore,
+ joiningD: stateBefore,
+ joiningT: stateBefore,
+ joinZWNJ: stateAfter,
+ joinZWJ: stateFAIL,
+ joinVirama: stateBeforeVirama,
+ },
+ stateBeforeVirama: {
+ joiningL: stateBefore,
+ joiningD: stateBefore,
+ joiningT: stateBefore,
+ },
+ stateAfter: {
+ joiningL: stateFAIL,
+ joiningD: stateBefore,
+ joiningT: stateAfter,
+ joiningR: stateStart,
+ joinZWNJ: stateFAIL,
+ joinZWJ: stateFAIL,
+ joinVirama: stateAfter, // no-op as we can't accept joiners here
+ },
+ stateFAIL: {
+ 0: stateFAIL,
+ joiningL: stateFAIL,
+ joiningD: stateFAIL,
+ joiningT: stateFAIL,
+ joiningR: stateFAIL,
+ joinZWNJ: stateFAIL,
+ joinZWJ: stateFAIL,
+ joinVirama: stateFAIL,
+ },
+}
+
+// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
+// already implicitly satisfied by the overall implementation.
+func (p *Profile) validateLabel(s string) error {
+ if s == "" {
+ if p.verifyDNSLength {
+ return &labelError{s, "A4"}
+ }
+ return nil
+ }
+ if p.bidirule != nil && !p.bidirule(s) {
+ return &labelError{s, "B"}
+ }
+ if !p.validateLabels {
+ return nil
+ }
+ trie := p.trie // p.validateLabels is only set if trie is set.
+ if len(s) > 4 && s[2] == '-' && s[3] == '-' {
+ return &labelError{s, "V2"}
+ }
+ if s[0] == '-' || s[len(s)-1] == '-' {
+ return &labelError{s, "V3"}
+ }
+ // TODO: merge the use of this in the trie.
+ v, sz := trie.lookupString(s)
+ x := info(v)
+ if x.isModifier() {
+ return &labelError{s, "V5"}
+ }
+ // Quickly return in the absence of zero-width (non) joiners.
+ if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
+ return nil
+ }
+ st := stateStart
+ for i := 0; ; {
+ jt := x.joinType()
+ if s[i:i+sz] == zwj {
+ jt = joinZWJ
+ } else if s[i:i+sz] == zwnj {
+ jt = joinZWNJ
+ }
+ st = joinStates[st][jt]
+ if x.isViramaModifier() {
+ st = joinStates[st][joinVirama]
+ }
+ if i += sz; i == len(s) {
+ break
+ }
+ v, sz = trie.lookupString(s[i:])
+ x = info(v)
+ }
+ if st == stateFAIL || st == stateAfter {
+ return &labelError{s, "C"}
+ }
+ return nil
+}
+
+func ascii(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] >= utf8.RuneSelf {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/golang.org/x/net/idna/tables.go b/vendor/golang.org/x/net/idna/tables10.0.0.go
index f910b2691..54fddb4b1 100644
--- a/vendor/golang.org/x/net/idna/tables.go
+++ b/vendor/golang.org/x/net/idna/tables10.0.0.go
@@ -1,11 +1,13 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+// +build go1.10,!go1.13
+
package idna
// UnicodeVersion is the Unicode version from which the tables in this package are derived.
const UnicodeVersion = "10.0.0"
-var mappings string = "" + // Size: 8176 bytes
+var mappings string = "" + // Size: 8175 bytes
"\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" +
"\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" +
"\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" +
@@ -4554,4 +4556,4 @@ var idnaSparseValues = [1915]valueRange{
{value: 0x0040, lo: 0xb0, hi: 0xbf},
}
-// Total table size 42115 bytes (41KiB); checksum: F4A1FA4E
+// Total table size 42114 bytes (41KiB); checksum: 355A58A4
diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go
new file mode 100644
index 000000000..c515d7ad2
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/tables11.0.0.go
@@ -0,0 +1,4653 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// +build go1.13
+
+package idna
+
+// UnicodeVersion is the Unicode version from which the tables in this package are derived.
+const UnicodeVersion = "11.0.0"
+
+var mappings string = "" + // Size: 8175 bytes
+ "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" +
+ "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" +
+ "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" +
+ "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" +
+ "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" +
+ "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" +
+ "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" +
+ "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" +
+ "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" +
+ "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" +
+ "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" +
+ "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" +
+ "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" +
+ "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" +
+ "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" +
+ "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" +
+ "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" +
+ "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" +
+ "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" +
+ "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" +
+ "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" +
+ "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" +
+ "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" +
+ "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" +
+ "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" +
+ "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" +
+ ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" +
+ "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" +
+ "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" +
+ "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" +
+ "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" +
+ "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" +
+ "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" +
+ "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" +
+ "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" +
+ "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" +
+ "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" +
+ "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" +
+ "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" +
+ "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" +
+ "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" +
+ "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" +
+ "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" +
+ "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" +
+ "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" +
+ "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" +
+ "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" +
+ "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" +
+ "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" +
+ "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" +
+ "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" +
+ "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" +
+ "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" +
+ "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" +
+ "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" +
+ "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" +
+ "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" +
+ "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" +
+ "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" +
+ "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" +
+ "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" +
+ "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" +
+ "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" +
+ "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" +
+ "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" +
+ "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" +
+ "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" +
+ "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" +
+ "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" +
+ "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" +
+ "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" +
+ "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" +
+ "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" +
+ " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" +
+ "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" +
+ "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" +
+ "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" +
+ "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" +
+ "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" +
+ "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" +
+ "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" +
+ "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" +
+ "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" +
+ "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" +
+ "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" +
+ "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" +
+ "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" +
+ "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" +
+ "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" +
+ "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" +
+ "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" +
+ "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" +
+ "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" +
+ "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" +
+ "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" +
+ "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" +
+ "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" +
+ "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" +
+ "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" +
+ "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" +
+ "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" +
+ "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" +
+ "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" +
+ "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" +
+ "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" +
+ "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" +
+ "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" +
+ "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" +
+ "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" +
+ "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" +
+ "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" +
+ "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" +
+ "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" +
+ "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" +
+ "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" +
+ "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" +
+ "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" +
+ "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" +
+ "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" +
+ "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" +
+ "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" +
+ "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" +
+ "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" +
+ "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" +
+ "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" +
+ "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" +
+ "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" +
+ "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" +
+ "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" +
+ "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" +
+ "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" +
+ "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" +
+ "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" +
+ "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" +
+ "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" +
+ "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" +
+ "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻"
+
+var xorData string = "" + // Size: 4855 bytes
+ "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" +
+ "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" +
+ "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" +
+ "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" +
+ "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" +
+ "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" +
+ "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" +
+ "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" +
+ "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" +
+ "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" +
+ "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" +
+ "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" +
+ "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" +
+ "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" +
+ "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" +
+ "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" +
+ "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" +
+ "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" +
+ "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" +
+ "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" +
+ "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" +
+ "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" +
+ "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" +
+ "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" +
+ "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" +
+ "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" +
+ "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" +
+ "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" +
+ "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" +
+ "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" +
+ "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" +
+ "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" +
+ "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" +
+ "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" +
+ "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" +
+ "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" +
+ "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " +
+ "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" +
+ "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" +
+ "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" +
+ "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" +
+ "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" +
+ ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" +
+ "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" +
+ "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" +
+ "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" +
+ "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" +
+ "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" +
+ "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" +
+ "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" +
+ "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" +
+ "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" +
+ "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" +
+ "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" +
+ "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" +
+ "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" +
+ "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" +
+ "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" +
+ "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" +
+ "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" +
+ "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" +
+ "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" +
+ "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" +
+ "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" +
+ "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" +
+ "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" +
+ "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" +
+ "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" +
+ "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" +
+ "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" +
+ "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" +
+ "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" +
+ "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" +
+ "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" +
+ "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" +
+ "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" +
+ "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" +
+ "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" +
+ "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" +
+ "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" +
+ "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" +
+ "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" +
+ "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" +
+ "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" +
+ "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" +
+ "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" +
+ "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" +
+ "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" +
+ "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," +
+ "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" +
+ "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" +
+ "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" +
+ "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" +
+ ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" +
+ "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" +
+ "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" +
+ "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" +
+ "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" +
+ "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" +
+ "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" +
+ "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" +
+ "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" +
+ "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" +
+ "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" +
+ "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" +
+ "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" +
+ "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" +
+ "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" +
+ "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" +
+ "\x08\x1a\x0a\x03\x07</\x03\x07:+\x03\x07\x07*\x03\x06&\x1c\x03\x09\x0c" +
+ "\x16\x03\x09\x10\x0e\x03\x08'\x0f\x03\x08+\x09\x03\x074%\x03\x06!3\x03" +
+ "\x06\x03+\x03\x0b\x1e\x19\x03\x0a))\x03\x09\x08\x19\x03\x08,\x05\x03\x07" +
+ "<2\x03\x06\x1c>\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" +
+ "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" +
+ "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" +
+ "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" +
+ "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" +
+ "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" +
+ "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" +
+ "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" +
+ "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" +
+ "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" +
+ "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" +
+ "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" +
+ "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" +
+ "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" +
+ "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" +
+ "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" +
+ "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" +
+ "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." +
+ "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" +
+ "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" +
+ "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " +
+ "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" +
+ "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" +
+ "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" +
+ "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" +
+ "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" +
+ "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" +
+ "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," +
+ "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" +
+ "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" +
+ "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" +
+ "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" +
+ "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" +
+ "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" +
+ "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" +
+ "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" +
+ "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" +
+ "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" +
+ "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" +
+ "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" +
+ "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" +
+ "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" +
+ "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" +
+ "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" +
+ "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" +
+ "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" +
+ "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" +
+ "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" +
+ "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" +
+ "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" +
+ "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" +
+ "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" +
+ "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" +
+ "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" +
+ "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" +
+ "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" +
+ "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" +
+ "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" +
+ "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" +
+ "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" +
+ "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" +
+ "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" +
+ "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" +
+ "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" +
+ "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" +
+ "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" +
+ "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," +
+ "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" +
+ "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" +
+ "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" +
+ "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" +
+ "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" +
+ "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" +
+ "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" +
+ "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" +
+ "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" +
+ "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" +
+ "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" +
+ "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" +
+ "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" +
+ "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" +
+ "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" +
+ "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" +
+ "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" +
+ "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" +
+ "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" +
+ "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" +
+ "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" +
+ "\x04\x03\x0c?\x05\x03\x0c<?\x03\x0c=\x00\x03\x0c=\x06\x03\x0c=\x05\x03" +
+ "\x0c=\x0c\x03\x0c=\x0f\x03\x0c=\x0d\x03\x0c=\x0b\x03\x0c=\x07\x03\x0c=" +
+ "\x19\x03\x0c=\x15\x03\x0c=\x11\x03\x0c=1\x03\x0c=3\x03\x0c=0\x03\x0c=>" +
+ "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" +
+ "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" +
+ "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" +
+ "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" +
+ "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" +
+ "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" +
+ "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" +
+ "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" +
+ "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" +
+ "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" +
+ "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" +
+ "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" +
+ "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" +
+ "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" +
+ "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" +
+ "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" +
+ "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" +
+ "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" +
+ "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" +
+ "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" +
+ "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" +
+ "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" +
+ "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" +
+ "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" +
+ "\x05\x22\x05\x03\x050\x1d"
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupUnsafe(s []byte) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookupString(s string) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupStringUnsafe(s string) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// idnaTrie. Total size: 29404 bytes (28.71 KiB). Checksum: 848c45acb5f7991c.
+type idnaTrie struct{}
+
+func newIdnaTrie(i int) *idnaTrie {
+ return &idnaTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 {
+ switch {
+ case n < 125:
+ return uint16(idnaValues[n<<6+uint32(b)])
+ default:
+ n -= 125
+ return uint16(idnaSparse.lookup(n, b))
+ }
+}
+
+// idnaValues: 127 blocks, 8128 entries, 16256 bytes
+// The third block is the zero block.
+var idnaValues = [8128]uint16{
+ // Block 0x0, offset 0x0
+ 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080,
+ 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080,
+ 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080,
+ 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080,
+ 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080,
+ 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080,
+ 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080,
+ 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080,
+ 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008,
+ 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080,
+ 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080,
+ // Block 0x1, offset 0x40
+ 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105,
+ 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105,
+ 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105,
+ 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105,
+ 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080,
+ 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008,
+ 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008,
+ 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008,
+ 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008,
+ 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080,
+ 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040,
+ 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040,
+ 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040,
+ 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040,
+ 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040,
+ 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018,
+ 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018,
+ 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a,
+ 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005,
+ 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018,
+ 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018,
+ // Block 0x4, offset 0x100
+ 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008,
+ 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008,
+ 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008,
+ 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008,
+ 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008,
+ 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008,
+ 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008,
+ 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008,
+ 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008,
+ 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d,
+ 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199,
+ // Block 0x5, offset 0x140
+ 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d,
+ 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008,
+ 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008,
+ 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008,
+ 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008,
+ 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008,
+ 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008,
+ 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008,
+ 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008,
+ 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d,
+ 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9,
+ // Block 0x6, offset 0x180
+ 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008,
+ 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d,
+ 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d,
+ 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d,
+ 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155,
+ 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008,
+ 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d,
+ 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd,
+ 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d,
+ 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008,
+ 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9,
+ 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d,
+ 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d,
+ 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d,
+ 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008,
+ 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008,
+ 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008,
+ 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008,
+ 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008,
+ 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008,
+ 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008,
+ // Block 0x8, offset 0x200
+ 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008,
+ 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008,
+ 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008,
+ 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008,
+ 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008,
+ 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008,
+ 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008,
+ 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008,
+ 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008,
+ 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d,
+ 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008,
+ // Block 0x9, offset 0x240
+ 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018,
+ 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008,
+ 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008,
+ 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018,
+ 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a,
+ 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369,
+ 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018,
+ 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018,
+ 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018,
+ 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018,
+ 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018,
+ // Block 0xa, offset 0x280
+ 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d,
+ 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308,
+ 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308,
+ 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308,
+ 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308,
+ 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308,
+ 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308,
+ 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308,
+ 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008,
+ 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008,
+ 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2,
+ 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040,
+ 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105,
+ 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105,
+ 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105,
+ 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d,
+ 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d,
+ 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008,
+ 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008,
+ 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008,
+ 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008,
+ // Block 0xc, offset 0x300
+ 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008,
+ 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008,
+ 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd,
+ 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008,
+ 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008,
+ 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008,
+ 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008,
+ 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008,
+ 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd,
+ 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008,
+ 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d,
+ // Block 0xd, offset 0x340
+ 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008,
+ 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008,
+ 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008,
+ 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008,
+ 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008,
+ 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008,
+ 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008,
+ 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008,
+ 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008,
+ 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008,
+ 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008,
+ // Block 0xe, offset 0x380
+ 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308,
+ 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008,
+ 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008,
+ 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008,
+ 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008,
+ 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008,
+ 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008,
+ 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008,
+ 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008,
+ 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008,
+ 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d,
+ 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d,
+ 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008,
+ 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008,
+ 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008,
+ 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008,
+ 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008,
+ 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008,
+ 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008,
+ 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008,
+ 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008,
+ // Block 0x10, offset 0x400
+ 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008,
+ 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008,
+ 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008,
+ 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008,
+ 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008,
+ 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008,
+ 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008,
+ 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008,
+ 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5,
+ 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5,
+ 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5,
+ // Block 0x11, offset 0x440
+ 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840,
+ 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818,
+ 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308,
+ 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308,
+ 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040,
+ 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08,
+ 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08,
+ 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08,
+ 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08,
+ 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08,
+ 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08,
+ // Block 0x12, offset 0x480
+ 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08,
+ 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308,
+ 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308,
+ 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308,
+ 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308,
+ 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808,
+ 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808,
+ 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08,
+ 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429,
+ 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08,
+ 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08,
+ 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08,
+ 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08,
+ 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308,
+ 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840,
+ 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308,
+ 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018,
+ 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08,
+ 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008,
+ 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08,
+ 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08,
+ // Block 0x14, offset 0x500
+ 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818,
+ 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818,
+ 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308,
+ 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08,
+ 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08,
+ 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08,
+ 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08,
+ 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08,
+ 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308,
+ 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308,
+ 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308,
+ // Block 0x15, offset 0x540
+ 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08,
+ 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08,
+ 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08,
+ 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808,
+ 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040,
+ 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08,
+ 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08,
+ 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040,
+ 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040,
+ 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040,
+ 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040,
+ // Block 0x16, offset 0x580
+ 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308,
+ 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008,
+ 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308,
+ 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308,
+ 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1,
+ 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308,
+ 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008,
+ 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008,
+ 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008,
+ 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008,
+ 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008,
+ 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008,
+ 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040,
+ 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008,
+ 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008,
+ 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008,
+ 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040,
+ 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008,
+ 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040,
+ 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040,
+ 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008,
+ // Block 0x18, offset 0x600
+ 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040,
+ 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008,
+ 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040,
+ 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008,
+ 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1,
+ 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308,
+ 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008,
+ 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008,
+ 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018,
+ 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018,
+ 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x3308, 0x63f: 0x0040,
+ // Block 0x19, offset 0x640
+ 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008,
+ 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040,
+ 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040,
+ 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008,
+ 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008,
+ 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008,
+ 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040,
+ 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008,
+ 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008,
+ 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040,
+ 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040,
+ 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308,
+ 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308,
+ 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040,
+ 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040,
+ 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040,
+ 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008,
+ 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008,
+ 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308,
+ 0x6b6: 0x0018, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040,
+ 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008,
+ 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008,
+ 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008,
+ 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008,
+ 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008,
+ 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008,
+ 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040,
+ 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008,
+ 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008,
+ 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040,
+ 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308,
+ 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008,
+ 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040,
+ 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040,
+ 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040,
+ 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308,
+ 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008,
+ 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008,
+ 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040,
+ 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308,
+ 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008,
+ 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008,
+ 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040,
+ 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008,
+ 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008,
+ 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008,
+ 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040,
+ 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008,
+ 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008,
+ 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040,
+ 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308,
+ // Block 0x1e, offset 0x780
+ 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040,
+ 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008,
+ 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040,
+ 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008,
+ 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9,
+ 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308,
+ 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008,
+ 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008,
+ 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018,
+ 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040,
+ 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008,
+ 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040,
+ 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040,
+ 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040,
+ 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040,
+ 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008,
+ 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008,
+ 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008,
+ 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008,
+ 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040,
+ 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008,
+ // Block 0x20, offset 0x800
+ 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040,
+ 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308,
+ 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040,
+ 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040,
+ 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040,
+ 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308,
+ 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008,
+ 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008,
+ 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040,
+ 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018,
+ 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018,
+ // Block 0x21, offset 0x840
+ 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0018, 0x845: 0x0008,
+ 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008,
+ 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040,
+ 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008,
+ 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008,
+ 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008,
+ 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040,
+ 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008,
+ 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008,
+ 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040,
+ 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308,
+ // Block 0x22, offset 0x880
+ 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040,
+ 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008,
+ 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040,
+ 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040,
+ 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040,
+ 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308,
+ 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008,
+ 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008,
+ 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040,
+ 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040,
+ 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040,
+ 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008,
+ 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040,
+ 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008,
+ 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018,
+ 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308,
+ 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008,
+ 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008,
+ 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018,
+ 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008,
+ 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008,
+ // Block 0x24, offset 0x900
+ 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040,
+ 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040,
+ 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040,
+ 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008,
+ 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008,
+ 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008,
+ 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040,
+ 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008,
+ 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308,
+ 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308,
+ 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040,
+ // Block 0x25, offset 0x940
+ 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008,
+ 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008,
+ 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008,
+ 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79,
+ 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008,
+ 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008,
+ 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9,
+ 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040,
+ 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59,
+ 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308,
+ 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008,
+ // Block 0x26, offset 0x980
+ 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018,
+ 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008,
+ 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308,
+ 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308,
+ 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11,
+ 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308,
+ 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308,
+ 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308,
+ 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308,
+ 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308,
+ 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018,
+ // Block 0x27, offset 0x9c0
+ 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008,
+ 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008,
+ 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008,
+ 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008,
+ 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008,
+ 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008,
+ 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008,
+ 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008,
+ 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41,
+ 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008,
+ 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269,
+ // Block 0x28, offset 0xa00
+ 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1,
+ 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011,
+ 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041,
+ 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9,
+ 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099,
+ 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269,
+ 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1,
+ 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008,
+ 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008,
+ 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008,
+ 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008,
+ // Block 0x29, offset 0xa40
+ 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008,
+ 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008,
+ 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008,
+ 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008,
+ 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169,
+ 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9,
+ 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251,
+ 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9,
+ 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359,
+ 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1,
+ 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429,
+ // Block 0x2a, offset 0xa80
+ 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008,
+ 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008,
+ 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008,
+ 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008,
+ 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008,
+ 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008,
+ 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008,
+ 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008,
+ 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008,
+ 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008,
+ 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008,
+ // Block 0x2b, offset 0xac0
+ 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008,
+ 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008,
+ 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008,
+ 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008,
+ 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008,
+ 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008,
+ 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008,
+ 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008,
+ 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008,
+ 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008,
+ 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008,
+ // Block 0x2c, offset 0xb00
+ 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008,
+ 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045,
+ 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008,
+ 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008,
+ 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045,
+ 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008,
+ 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045,
+ 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045,
+ 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489,
+ 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1,
+ 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040,
+ // Block 0x2d, offset 0xb40
+ 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1,
+ 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591,
+ 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1,
+ 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1,
+ 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771,
+ 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891,
+ 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831,
+ 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951,
+ 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040,
+ 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459,
+ 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686,
+ // Block 0x2e, offset 0xb80
+ 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040,
+ 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489,
+ 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008,
+ 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008,
+ 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2,
+ 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61,
+ 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045,
+ 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa,
+ 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040,
+ 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9,
+ 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040,
+ // Block 0x2f, offset 0xbc0
+ 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a,
+ 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0,
+ 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d,
+ 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e,
+ 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018,
+ 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018,
+ 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040,
+ 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a,
+ 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018,
+ 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018,
+ 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018,
+ // Block 0x30, offset 0xc00
+ 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018,
+ 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018,
+ 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018,
+ 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9,
+ 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018,
+ 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340,
+ 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040,
+ 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340,
+ 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61,
+ 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd,
+ 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71,
+ // Block 0x31, offset 0xc40
+ 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61,
+ 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5,
+ 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09,
+ 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359,
+ 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040,
+ 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018,
+ 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018,
+ 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018,
+ 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018,
+ 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018,
+ 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018,
+ // Block 0x32, offset 0xc80
+ 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e,
+ 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249,
+ 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41,
+ 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018,
+ 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269,
+ 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018,
+ 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018,
+ 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09,
+ 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9,
+ 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd,
+ 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109,
+ // Block 0x33, offset 0xcc0
+ 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9,
+ 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018,
+ 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151,
+ 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279,
+ 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399,
+ 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439,
+ 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369,
+ 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61,
+ 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451,
+ 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5,
+ 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61,
+ // Block 0x34, offset 0xd00
+ 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018,
+ 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040,
+ 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040,
+ 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040,
+ 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040,
+ 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51,
+ 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601,
+ 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691,
+ 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26,
+ 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6,
+ 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a,
+ // Block 0x35, offset 0xd40
+ 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a,
+ 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040,
+ 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040,
+ 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040,
+ 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46,
+ 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06,
+ 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6,
+ 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86,
+ 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46,
+ 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199,
+ 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259,
+ // Block 0x36, offset 0xd80
+ 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99,
+ 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089,
+ 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9,
+ 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249,
+ 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71,
+ 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9,
+ 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1,
+ 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018,
+ 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018,
+ 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018,
+ 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018,
+ // Block 0x37, offset 0xdc0
+ 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008,
+ 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008,
+ 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008,
+ 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008,
+ 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008,
+ 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd,
+ 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d,
+ 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9,
+ 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d,
+ 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008,
+ 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9,
+ // Block 0x38, offset 0xe00
+ 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008,
+ 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008,
+ 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008,
+ 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008,
+ 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008,
+ 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008,
+ 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018,
+ 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308,
+ 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040,
+ 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018,
+ 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018,
+ // Block 0x39, offset 0xe40
+ 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d,
+ 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d,
+ 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d,
+ 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040,
+ 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040,
+ 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040,
+ 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040,
+ 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040,
+ 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040,
+ 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040,
+ 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040,
+ // Block 0x3a, offset 0xe80
+ 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008,
+ 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018,
+ 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018,
+ 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018,
+ 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018,
+ 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018,
+ 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018,
+ 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018,
+ 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018,
+ 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018,
+ 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018,
+ // Block 0x3b, offset 0xec0
+ 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd,
+ 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd,
+ 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d,
+ 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d,
+ 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d,
+ 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd,
+ 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d,
+ 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd,
+ 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d,
+ 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd,
+ 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d,
+ // Block 0x3c, offset 0xf00
+ 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd,
+ 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d,
+ 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018,
+ 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd,
+ 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d,
+ 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008,
+ 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008,
+ 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008,
+ 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008,
+ 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040,
+ 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040,
+ // Block 0x3d, offset 0xf40
+ 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd,
+ 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018,
+ 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761,
+ 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1,
+ 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881,
+ 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd,
+ 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d,
+ 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d,
+ 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd,
+ 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d,
+ 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018,
+ // Block 0x3e, offset 0xf80
+ 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d,
+ 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d,
+ 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd,
+ 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd,
+ 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d,
+ 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d,
+ 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd,
+ 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d,
+ 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999,
+ 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29,
+ 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89,
+ // Block 0x3f, offset 0xfc0
+ 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69,
+ 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69,
+ 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15,
+ 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75,
+ 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded,
+ 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d,
+ 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5,
+ 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d,
+ 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d,
+ 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd,
+ 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040,
+ // Block 0x40, offset 0x1000
+ 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9,
+ 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1,
+ 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9,
+ 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549,
+ 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1,
+ 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11,
+ 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91,
+ 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9,
+ 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011,
+ 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209,
+ 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361,
+ // Block 0x41, offset 0x1040
+ 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541,
+ 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781,
+ 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979,
+ 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89,
+ 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1,
+ 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99,
+ 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9,
+ 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9,
+ 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069,
+ 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9,
+ 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9,
+ // Block 0x42, offset 0x1080
+ 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271,
+ 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9,
+ 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed,
+ 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371,
+ 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9,
+ 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d,
+ 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211,
+ 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1,
+ 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599,
+ 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9,
+ 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611,
+ // Block 0x43, offset 0x10c0
+ 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671,
+ 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709,
+ 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781,
+ 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1,
+ 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811,
+ 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901,
+ 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1,
+ 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11,
+ 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31,
+ 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51,
+ 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d,
+ // Block 0x44, offset 0x1100
+ 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008,
+ 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008,
+ 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008,
+ 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008,
+ 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008,
+ 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008,
+ 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008,
+ 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308,
+ 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308,
+ 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308,
+ 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008,
+ // Block 0x45, offset 0x1140
+ 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008,
+ 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008,
+ 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008,
+ 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008,
+ 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11,
+ 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008,
+ 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008,
+ 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008,
+ 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008,
+ 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008,
+ 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008,
+ // Block 0x46, offset 0x1180
+ 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018,
+ 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018,
+ 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018,
+ 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008,
+ 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008,
+ 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008,
+ 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008,
+ 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008,
+ 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008,
+ 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008,
+ 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008,
+ // Block 0x47, offset 0x11c0
+ 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008,
+ 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008,
+ 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008,
+ 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008,
+ 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008,
+ 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008,
+ 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008,
+ 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008,
+ 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008,
+ 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d,
+ 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008,
+ // Block 0x48, offset 0x1200
+ 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008,
+ 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d,
+ 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008,
+ 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008,
+ 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008,
+ 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008,
+ 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008,
+ 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0008,
+ 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008,
+ 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0008, 0x123a: 0x0040, 0x123b: 0x0040,
+ 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040,
+ // Block 0x49, offset 0x1240
+ 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575,
+ 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635,
+ 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008,
+ 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715,
+ 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5,
+ 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008,
+ 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008,
+ 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935,
+ 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5,
+ 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5,
+ 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35,
+ // Block 0x4a, offset 0x1280
+ 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35,
+ 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5,
+ 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19,
+ 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91,
+ 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040,
+ 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040,
+ 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040,
+ 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040,
+ 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040,
+ 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040,
+ 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040,
+ // Block 0x4b, offset 0x12c0
+ 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001,
+ 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040,
+ 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040,
+ 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9,
+ 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1,
+ 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149,
+ 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2,
+ 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1,
+ 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1,
+ 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479,
+ 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040,
+ // Block 0x4c, offset 0x1300
+ 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040,
+ 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659,
+ 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721,
+ 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751,
+ 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769,
+ 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799,
+ 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1,
+ 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1,
+ 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9,
+ 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829,
+ 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841,
+ // Block 0x4d, offset 0x1340
+ 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871,
+ 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9,
+ 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9,
+ 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919,
+ 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931,
+ 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961,
+ 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991,
+ 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1,
+ 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818,
+ 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818,
+ 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818,
+ // Block 0x4e, offset 0x1380
+ 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040,
+ 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040,
+ 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040,
+ 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09,
+ 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479,
+ 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81,
+ 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1,
+ 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19,
+ 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91,
+ 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1,
+ 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09,
+ // Block 0x4f, offset 0x13c0
+ 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1,
+ 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1,
+ 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1,
+ 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991,
+ 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81,
+ 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a,
+ 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99,
+ 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89,
+ 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79,
+ 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19,
+ 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469,
+ // Block 0x50, offset 0x1400
+ 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649,
+ 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9,
+ 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49,
+ 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21,
+ 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9,
+ 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01,
+ 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91,
+ 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9,
+ 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171,
+ 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289,
+ 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329,
+ // Block 0x51, offset 0x1440
+ 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1,
+ 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621,
+ 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739,
+ 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1,
+ 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9,
+ 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29,
+ 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079,
+ 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1,
+ 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171,
+ 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261,
+ 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301,
+ // Block 0x52, offset 0x1480
+ 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1,
+ 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1,
+ 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171,
+ 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261,
+ 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351,
+ 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441,
+ 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509,
+ 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1,
+ 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081,
+ 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239,
+ 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018,
+ // Block 0x53, offset 0x14c0
+ 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040,
+ 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040,
+ 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609,
+ 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721,
+ 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839,
+ 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919,
+ 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9,
+ 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9,
+ 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9,
+ 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1,
+ 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79,
+ // Block 0x54, offset 0x1500
+ 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989,
+ 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040,
+ 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040,
+ 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040,
+ 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040,
+ 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040,
+ 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040,
+ 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040,
+ 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9,
+ 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12,
+ 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040,
+ // Block 0x55, offset 0x1540
+ 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0,
+ 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0,
+ 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55,
+ 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75,
+ 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040,
+ 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308,
+ 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308,
+ 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308,
+ 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2,
+ 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35,
+ 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55,
+ // Block 0x56, offset 0x1580
+ 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018,
+ 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56,
+ 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95,
+ 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa,
+ 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95,
+ 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99,
+ 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda,
+ 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040,
+ 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040,
+ 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081,
+ 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1,
+ // Block 0x57, offset 0x15c0
+ 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141,
+ 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171,
+ 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1,
+ 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1,
+ 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201,
+ 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219,
+ 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249,
+ 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291,
+ 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1,
+ 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9,
+ 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1,
+ // Block 0x58, offset 0x1600
+ 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321,
+ 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339,
+ 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369,
+ 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381,
+ 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1,
+ 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9,
+ 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9,
+ 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1,
+ 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441,
+ 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9,
+ 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0,
+ // Block 0x59, offset 0x1640
+ 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea,
+ 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2,
+ 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9,
+ 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81,
+ 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2,
+ 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159,
+ 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41,
+ 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9,
+ 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9,
+ 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a,
+ 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a,
+ // Block 0x5a, offset 0x1680
+ 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09,
+ 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51,
+ 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039,
+ 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279,
+ 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a,
+ 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115,
+ 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5,
+ 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295,
+ 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355,
+ 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415,
+ 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215,
+ // Block 0x5b, offset 0x16c0
+ 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515,
+ 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595,
+ 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5,
+ 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655,
+ 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115,
+ 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735,
+ 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5,
+ 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5,
+ 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5,
+ 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5,
+ 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040,
+ // Block 0x5c, offset 0x1700
+ 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5,
+ 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715,
+ 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040,
+ 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935,
+ 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040,
+ 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6,
+ 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35,
+ 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040,
+ 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040,
+ 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340,
+ 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040,
+ // Block 0x5d, offset 0x1740
+ 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08,
+ 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808,
+ 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08,
+ 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908,
+ 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08,
+ 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808,
+ 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040,
+ 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18,
+ 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818,
+ 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040,
+ 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040,
+ // Block 0x5e, offset 0x1780
+ 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08,
+ 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08,
+ 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08,
+ 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040,
+ 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040,
+ 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040,
+ 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18,
+ 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818,
+ 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040,
+ 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040,
+ 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040,
+ // Block 0x5f, offset 0x17c0
+ 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008,
+ 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008,
+ 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040,
+ 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008,
+ 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008,
+ 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008,
+ 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040,
+ 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008,
+ 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008,
+ 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x3308,
+ 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008,
+ // Block 0x60, offset 0x1800
+ 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040,
+ 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008,
+ 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040,
+ 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008,
+ 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008,
+ 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008,
+ 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308,
+ 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040,
+ 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040,
+ 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040,
+ 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040,
+ // Block 0x61, offset 0x1840
+ 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199,
+ 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359,
+ 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269,
+ 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369,
+ 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9,
+ 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259,
+ 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99,
+ 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089,
+ 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9,
+ 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249,
+ 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359,
+ // Block 0x62, offset 0x1880
+ 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269,
+ 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369,
+ 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9,
+ 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259,
+ 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99,
+ 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089,
+ 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9,
+ 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249,
+ 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71,
+ 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9,
+ 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369,
+ // Block 0x63, offset 0x18c0
+ 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9,
+ 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259,
+ 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99,
+ 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089,
+ 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040,
+ 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040,
+ 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71,
+ 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9,
+ 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1,
+ 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199,
+ 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259,
+ // Block 0x64, offset 0x1900
+ 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99,
+ 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089,
+ 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9,
+ 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249,
+ 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71,
+ 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9,
+ 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1,
+ 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199,
+ 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359,
+ 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269,
+ 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089,
+ // Block 0x65, offset 0x1940
+ 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9,
+ 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040,
+ 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71,
+ 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9,
+ 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040,
+ 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199,
+ 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359,
+ 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269,
+ 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369,
+ 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9,
+ 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040,
+ // Block 0x66, offset 0x1980
+ 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040,
+ 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9,
+ 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040,
+ 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199,
+ 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359,
+ 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269,
+ 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369,
+ 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9,
+ 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259,
+ 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99,
+ 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9,
+ // Block 0x67, offset 0x19c0
+ 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1,
+ 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199,
+ 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359,
+ 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269,
+ 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369,
+ 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9,
+ 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259,
+ 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99,
+ 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089,
+ 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9,
+ 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199,
+ // Block 0x68, offset 0x1a00
+ 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359,
+ 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269,
+ 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369,
+ 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9,
+ 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259,
+ 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99,
+ 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089,
+ 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9,
+ 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249,
+ 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71,
+ 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269,
+ // Block 0x69, offset 0x1a40
+ 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369,
+ 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9,
+ 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259,
+ 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99,
+ 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089,
+ 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9,
+ 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249,
+ 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71,
+ 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9,
+ 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1,
+ 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9,
+ // Block 0x6a, offset 0x1a80
+ 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259,
+ 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99,
+ 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089,
+ 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9,
+ 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249,
+ 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71,
+ 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9,
+ 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1,
+ 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199,
+ 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359,
+ 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99,
+ // Block 0x6b, offset 0x1ac0
+ 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089,
+ 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9,
+ 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249,
+ 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71,
+ 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9,
+ 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1,
+ 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099,
+ 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429,
+ 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71,
+ 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9,
+ 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9,
+ // Block 0x6c, offset 0x1b00
+ 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9,
+ 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11,
+ 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109,
+ 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1,
+ 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429,
+ 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099,
+ 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429,
+ 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71,
+ 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9,
+ 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01,
+ 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9,
+ // Block 0x6d, offset 0x1b40
+ 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11,
+ 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109,
+ 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1,
+ 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429,
+ 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099,
+ 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429,
+ 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71,
+ 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9,
+ 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01,
+ 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1,
+ 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11,
+ // Block 0x6e, offset 0x1b80
+ 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109,
+ 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1,
+ 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429,
+ 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099,
+ 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429,
+ 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71,
+ 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9,
+ 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01,
+ 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1,
+ 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41,
+ 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109,
+ // Block 0x6f, offset 0x1bc0
+ 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1,
+ 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429,
+ 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099,
+ 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429,
+ 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71,
+ 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9,
+ 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01,
+ 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1,
+ 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41,
+ 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1,
+ 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1,
+ // Block 0x70, offset 0x1c00
+ 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429,
+ 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41,
+ 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079,
+ 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1,
+ 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61,
+ 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9,
+ 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81,
+ 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079,
+ 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1,
+ 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61,
+ 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1,
+ // Block 0x71, offset 0x1c40
+ 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115,
+ 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135,
+ 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115,
+ 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175,
+ 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115,
+ 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08,
+ 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08,
+ 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08,
+ 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08,
+ 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08,
+ 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08,
+ // Block 0x72, offset 0x1c80
+ 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411,
+ 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1,
+ 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9,
+ 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231,
+ 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949,
+ 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040,
+ 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429,
+ 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339,
+ 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1,
+ 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351,
+ 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040,
+ // Block 0x73, offset 0x1cc0
+ 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040,
+ 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1,
+ 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9,
+ 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231,
+ 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949,
+ 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040,
+ 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429,
+ 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339,
+ 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1,
+ 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351,
+ 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040,
+ // Block 0x74, offset 0x1d00
+ 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411,
+ 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1,
+ 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9,
+ 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231,
+ 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040,
+ 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249,
+ 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429,
+ 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339,
+ 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1,
+ 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351,
+ 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040,
+ // Block 0x75, offset 0x1d40
+ 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02,
+ 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018,
+ 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2,
+ 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72,
+ 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32,
+ 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2,
+ 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2,
+ 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0018,
+ 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199,
+ 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359,
+ 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99,
+ // Block 0x76, offset 0x1d80
+ 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089,
+ 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1,
+ 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018,
+ 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018,
+ 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018,
+ 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018,
+ 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018,
+ 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040,
+ 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018,
+ 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018,
+ 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018,
+ // Block 0x77, offset 0x1dc0
+ 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040,
+ 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040,
+ 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289,
+ 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349,
+ 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409,
+ 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9,
+ 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589,
+ 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649,
+ 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709,
+ 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9,
+ 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040,
+ // Block 0x78, offset 0x1e00
+ 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79,
+ 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39,
+ 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9,
+ 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39,
+ 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9,
+ 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79,
+ 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39,
+ 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9,
+ 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059,
+ 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9,
+ 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179,
+ // Block 0x79, offset 0x1e40
+ 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239,
+ 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9,
+ 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399,
+ 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459,
+ 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309,
+ 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559,
+ 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9,
+ 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679,
+ 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9,
+ 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d,
+ 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9,
+ // Block 0x7a, offset 0x1e80
+ 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9,
+ 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959,
+ 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d,
+ 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d,
+ 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9,
+ 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99,
+ 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9,
+ 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9,
+ 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99,
+ 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39,
+ 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99,
+ // Block 0x7b, offset 0x1ec0
+ 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639,
+ 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9,
+ 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d,
+ 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9,
+ 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d,
+ 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd,
+ 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979,
+ 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19,
+ 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d,
+ 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d,
+ 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59,
+ // Block 0x7c, offset 0x1f00
+ 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99,
+ 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39,
+ 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9,
+ 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39,
+ 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd,
+ 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19,
+ 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9,
+ 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59,
+ 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd,
+ 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d,
+ 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079,
+ // Block 0x7d, offset 0x1f40
+ 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d,
+ 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d,
+ 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879,
+ 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919,
+ 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd,
+ 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9,
+ 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99,
+ 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39,
+ 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9,
+ 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d,
+ 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79,
+ // Block 0x7e, offset 0x1f80
+ 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19,
+ 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9,
+ 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59,
+ 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9,
+ 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d,
+ 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040,
+ 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040,
+ 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040,
+ 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040,
+ 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040,
+ 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040,
+}
+
+// idnaIndex: 36 blocks, 2304 entries, 4608 bytes
+// Block 0 is the zero block.
+var idnaIndex = [2304]uint16{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05,
+ 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a,
+ 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84,
+ 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88,
+ 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07,
+ 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c,
+ 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21,
+ // Block 0x4, offset 0x100
+ 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16,
+ 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d,
+ 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91,
+ 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96,
+ // Block 0x5, offset 0x140
+ 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e,
+ 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6,
+ 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f,
+ 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae,
+ 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6,
+ 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe,
+ 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3,
+ 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c,
+ // Block 0x6, offset 0x180
+ 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b,
+ 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b,
+ 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b,
+ 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b,
+ 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b,
+ 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0,
+ 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5,
+ 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1,
+ 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41,
+ 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f,
+ 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f,
+ 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f,
+ 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f,
+ 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f,
+ 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f,
+ // Block 0x8, offset 0x200
+ 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f,
+ 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f,
+ 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f,
+ 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f,
+ 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f,
+ 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f,
+ 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b,
+ 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f,
+ // Block 0x9, offset 0x240
+ 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f,
+ 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f,
+ 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f,
+ 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f,
+ 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f,
+ 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f,
+ 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f,
+ 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f,
+ // Block 0xa, offset 0x280
+ 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f,
+ 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f,
+ 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f,
+ 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f,
+ 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f,
+ 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f,
+ 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f,
+ 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f,
+ 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f,
+ 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f,
+ 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8,
+ 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0,
+ 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8,
+ 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f,
+ 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f,
+ // Block 0xc, offset 0x300
+ 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f,
+ 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f,
+ 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f,
+ 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa,
+ // Block 0xd, offset 0x340
+ 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba,
+ 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba,
+ 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba,
+ 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba,
+ 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba,
+ 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba,
+ 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba,
+ 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba,
+ // Block 0xe, offset 0x380
+ 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba,
+ 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba,
+ 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba,
+ 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba,
+ 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe,
+ 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c,
+ 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52,
+ 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108,
+ 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e,
+ 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba,
+ 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba,
+ 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c,
+ 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba,
+ 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0x126, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba,
+ 0x3f8: 0xba, 0x3f9: 0x127, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0x128, 0x3fd: 0x129, 0x3fe: 0xba, 0x3ff: 0xba,
+ // Block 0x10, offset 0x400
+ 0x400: 0x12a, 0x401: 0x12b, 0x402: 0x12c, 0x403: 0x12d, 0x404: 0x12e, 0x405: 0x12f, 0x406: 0x130, 0x407: 0x131,
+ 0x408: 0x132, 0x409: 0xba, 0x40a: 0x133, 0x40b: 0x134, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba,
+ 0x410: 0x135, 0x411: 0x136, 0x412: 0x137, 0x413: 0x138, 0x414: 0xba, 0x415: 0xba, 0x416: 0x139, 0x417: 0x13a,
+ 0x418: 0x13b, 0x419: 0x13c, 0x41a: 0x13d, 0x41b: 0x13e, 0x41c: 0x13f, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba,
+ 0x420: 0x140, 0x421: 0xba, 0x422: 0x141, 0x423: 0x142, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba,
+ 0x428: 0x143, 0x429: 0x144, 0x42a: 0x145, 0x42b: 0x146, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba,
+ 0x430: 0x147, 0x431: 0x148, 0x432: 0x149, 0x433: 0xba, 0x434: 0x14a, 0x435: 0x14b, 0x436: 0x14c, 0x437: 0xba,
+ 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0x14d, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba,
+ // Block 0x11, offset 0x440
+ 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f,
+ 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x14e, 0x44f: 0xba,
+ 0x450: 0x9b, 0x451: 0x14f, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x150, 0x456: 0xba, 0x457: 0xba,
+ 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba,
+ 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba,
+ 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba,
+ 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba,
+ 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba,
+ // Block 0x12, offset 0x480
+ 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f,
+ 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f,
+ 0x490: 0x151, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba,
+ 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba,
+ 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba,
+ 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba,
+ 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba,
+ 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba,
+ 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba,
+ 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f,
+ 0x4d8: 0x9f, 0x4d9: 0x152, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba,
+ 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba,
+ 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba,
+ 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba,
+ 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba,
+ // Block 0x14, offset 0x500
+ 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba,
+ 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba,
+ 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba,
+ 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba,
+ 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f,
+ 0x528: 0x146, 0x529: 0x153, 0x52a: 0xba, 0x52b: 0x154, 0x52c: 0x155, 0x52d: 0x156, 0x52e: 0x157, 0x52f: 0xba,
+ 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba,
+ 0x538: 0xba, 0x539: 0x158, 0x53a: 0x159, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x15a, 0x53e: 0x15b, 0x53f: 0x15c,
+ // Block 0x15, offset 0x540
+ 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f,
+ 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f,
+ 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f,
+ 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x15d,
+ 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f,
+ 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x15e, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba,
+ 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba,
+ 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba,
+ // Block 0x16, offset 0x580
+ 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x15f, 0x585: 0x160, 0x586: 0x9f, 0x587: 0x9f,
+ 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x161, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba,
+ 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba,
+ 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba,
+ 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba,
+ 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba,
+ 0x5b0: 0x9f, 0x5b1: 0x162, 0x5b2: 0x163, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba,
+ 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x164, 0x5c4: 0x165, 0x5c5: 0x166, 0x5c6: 0x167, 0x5c7: 0x168,
+ 0x5c8: 0x9b, 0x5c9: 0x169, 0x5ca: 0xba, 0x5cb: 0x16a, 0x5cc: 0x9b, 0x5cd: 0x16b, 0x5ce: 0xba, 0x5cf: 0xba,
+ 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66,
+ 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e,
+ 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b,
+ 0x5e8: 0x16c, 0x5e9: 0x16d, 0x5ea: 0x16e, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba,
+ 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba,
+ 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba,
+ // Block 0x18, offset 0x600
+ 0x600: 0x16f, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba,
+ 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba,
+ 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba,
+ 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba,
+ 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x170, 0x624: 0x6f, 0x625: 0x171, 0x626: 0xba, 0x627: 0xba,
+ 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba,
+ 0x630: 0xba, 0x631: 0x172, 0x632: 0x173, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba,
+ 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x174, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba,
+ // Block 0x19, offset 0x640
+ 0x640: 0x175, 0x641: 0x9b, 0x642: 0x176, 0x643: 0x177, 0x644: 0x73, 0x645: 0x74, 0x646: 0x178, 0x647: 0x179,
+ 0x648: 0x75, 0x649: 0x17a, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b,
+ 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b,
+ 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x17b, 0x65c: 0x9b, 0x65d: 0x17c, 0x65e: 0x9b, 0x65f: 0x17d,
+ 0x660: 0x17e, 0x661: 0x17f, 0x662: 0x180, 0x663: 0xba, 0x664: 0x181, 0x665: 0x182, 0x666: 0x183, 0x667: 0x184,
+ 0x668: 0xba, 0x669: 0x185, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba,
+ 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba,
+ 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f,
+ 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f,
+ 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f,
+ 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x186, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f,
+ 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f,
+ 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f,
+ 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f,
+ 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f,
+ 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f,
+ 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f,
+ 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x187, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f,
+ 0x6e0: 0x188, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f,
+ 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f,
+ 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f,
+ 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f,
+ 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f,
+ 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f,
+ 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f,
+ 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f,
+ 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f,
+ 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f,
+ 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x189, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f,
+ 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f,
+ 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f,
+ 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f,
+ 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f,
+ 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x18a,
+ 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba,
+ 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba,
+ // Block 0x1e, offset 0x780
+ 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba,
+ 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba,
+ 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba,
+ 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba,
+ 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x18b, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x18c, 0x7a7: 0x7b,
+ 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba,
+ 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba,
+ 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba,
+ // Block 0x1f, offset 0x7c0
+ 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07,
+ 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17,
+ 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07,
+ 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c,
+ 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b,
+ 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b,
+ // Block 0x20, offset 0x800
+ 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b,
+ 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b,
+ 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b,
+ 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b,
+ 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b,
+ 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b,
+ 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b,
+ 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b,
+ // Block 0x21, offset 0x840
+ 0x840: 0x18d, 0x841: 0x18e, 0x842: 0xba, 0x843: 0xba, 0x844: 0x18f, 0x845: 0x18f, 0x846: 0x18f, 0x847: 0x190,
+ 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba,
+ 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba,
+ 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba,
+ 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba,
+ 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba,
+ 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba,
+ 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba,
+ // Block 0x22, offset 0x880
+ 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b,
+ 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b,
+ 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b,
+ 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b,
+ 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b,
+ 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b,
+ 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b,
+ 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b,
+ 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b,
+}
+
+// idnaSparseOffset: 276 entries, 552 bytes
+var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x86, 0x8b, 0x94, 0xa4, 0xb2, 0xbe, 0xca, 0xdb, 0xe5, 0xec, 0xf9, 0x10a, 0x111, 0x11c, 0x12b, 0x139, 0x143, 0x145, 0x14a, 0x14d, 0x150, 0x152, 0x15e, 0x169, 0x171, 0x177, 0x17d, 0x182, 0x187, 0x18a, 0x18e, 0x194, 0x199, 0x1a5, 0x1af, 0x1b5, 0x1c6, 0x1d0, 0x1d3, 0x1db, 0x1de, 0x1eb, 0x1f3, 0x1f7, 0x1fe, 0x206, 0x216, 0x222, 0x224, 0x22e, 0x23a, 0x246, 0x252, 0x25a, 0x25f, 0x269, 0x27a, 0x27e, 0x289, 0x28d, 0x296, 0x29e, 0x2a4, 0x2a9, 0x2ac, 0x2b0, 0x2b6, 0x2ba, 0x2be, 0x2c2, 0x2c7, 0x2cd, 0x2d5, 0x2dc, 0x2e7, 0x2f1, 0x2f5, 0x2f8, 0x2fe, 0x302, 0x304, 0x307, 0x309, 0x30c, 0x316, 0x319, 0x328, 0x32c, 0x331, 0x334, 0x338, 0x33d, 0x342, 0x348, 0x34e, 0x35d, 0x363, 0x367, 0x376, 0x37b, 0x383, 0x38d, 0x398, 0x3a0, 0x3b1, 0x3ba, 0x3ca, 0x3d7, 0x3e1, 0x3e6, 0x3f3, 0x3f7, 0x3fc, 0x3fe, 0x402, 0x404, 0x408, 0x411, 0x417, 0x41b, 0x42b, 0x435, 0x43a, 0x43d, 0x443, 0x44a, 0x44f, 0x453, 0x459, 0x45e, 0x467, 0x46c, 0x472, 0x479, 0x480, 0x487, 0x48b, 0x490, 0x493, 0x498, 0x4a4, 0x4aa, 0x4af, 0x4b6, 0x4be, 0x4c3, 0x4c7, 0x4d7, 0x4de, 0x4e2, 0x4e6, 0x4ed, 0x4ef, 0x4f2, 0x4f5, 0x4f9, 0x502, 0x506, 0x50e, 0x516, 0x51c, 0x525, 0x531, 0x538, 0x541, 0x54b, 0x552, 0x560, 0x56d, 0x57a, 0x583, 0x587, 0x596, 0x59e, 0x5a9, 0x5b2, 0x5b8, 0x5c0, 0x5c9, 0x5d3, 0x5d6, 0x5e2, 0x5eb, 0x5ee, 0x5f3, 0x5fe, 0x607, 0x613, 0x616, 0x620, 0x629, 0x635, 0x642, 0x64f, 0x65d, 0x664, 0x667, 0x66c, 0x66f, 0x672, 0x675, 0x67c, 0x683, 0x687, 0x692, 0x695, 0x698, 0x69b, 0x6a1, 0x6a6, 0x6aa, 0x6ad, 0x6b0, 0x6b3, 0x6b6, 0x6b9, 0x6be, 0x6c8, 0x6cb, 0x6cf, 0x6de, 0x6ea, 0x6ee, 0x6f3, 0x6f7, 0x6fc, 0x700, 0x705, 0x70e, 0x719, 0x71f, 0x727, 0x72a, 0x72d, 0x731, 0x735, 0x73b, 0x741, 0x746, 0x749, 0x759, 0x760, 0x763, 0x766, 0x76a, 0x770, 0x775, 0x77a, 0x782, 0x787, 0x78b, 0x78f, 0x792, 0x795, 0x799, 0x79d, 0x7a0, 0x7b0, 0x7c1, 0x7c6, 0x7c8, 0x7ca}
+
+// idnaSparseValues: 1997 entries, 7988 bytes
+var idnaSparseValues = [1997]valueRange{
+ // Block 0x0, offset 0x0
+ {value: 0x0000, lo: 0x07},
+ {value: 0xe105, lo: 0x80, hi: 0x96},
+ {value: 0x0018, lo: 0x97, hi: 0x97},
+ {value: 0xe105, lo: 0x98, hi: 0x9e},
+ {value: 0x001f, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbf},
+ // Block 0x1, offset 0x8
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0xe01d, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0335, lo: 0x83, hi: 0x83},
+ {value: 0x034d, lo: 0x84, hi: 0x84},
+ {value: 0x0365, lo: 0x85, hi: 0x85},
+ {value: 0xe00d, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0xe00d, lo: 0x88, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x89},
+ {value: 0xe00d, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe00d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0x8d},
+ {value: 0xe00d, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0xbf},
+ // Block 0x2, offset 0x19
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x0249, lo: 0xb0, hi: 0xb0},
+ {value: 0x037d, lo: 0xb1, hi: 0xb1},
+ {value: 0x0259, lo: 0xb2, hi: 0xb2},
+ {value: 0x0269, lo: 0xb3, hi: 0xb3},
+ {value: 0x034d, lo: 0xb4, hi: 0xb4},
+ {value: 0x0395, lo: 0xb5, hi: 0xb5},
+ {value: 0xe1bd, lo: 0xb6, hi: 0xb6},
+ {value: 0x0279, lo: 0xb7, hi: 0xb7},
+ {value: 0x0289, lo: 0xb8, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbf},
+ // Block 0x3, offset 0x25
+ {value: 0x0000, lo: 0x01},
+ {value: 0x3308, lo: 0x80, hi: 0xbf},
+ // Block 0x4, offset 0x27
+ {value: 0x0000, lo: 0x04},
+ {value: 0x03f5, lo: 0x80, hi: 0x8f},
+ {value: 0xe105, lo: 0x90, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x5, offset 0x2c
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x0545, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x0008, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x6, offset 0x33
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0401, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0018, lo: 0x89, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x7, offset 0x3e
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0818, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x82},
+ {value: 0x0818, lo: 0x83, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x85},
+ {value: 0x0818, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xae},
+ {value: 0x0808, lo: 0xaf, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x8, offset 0x4a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0a08, lo: 0x80, hi: 0x87},
+ {value: 0x0c08, lo: 0x88, hi: 0x99},
+ {value: 0x0a08, lo: 0x9a, hi: 0xbf},
+ // Block 0x9, offset 0x4e
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3308, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0c08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0a08, lo: 0x8e, hi: 0x98},
+ {value: 0x0c08, lo: 0x99, hi: 0x9b},
+ {value: 0x0a08, lo: 0x9c, hi: 0xaa},
+ {value: 0x0c08, lo: 0xab, hi: 0xac},
+ {value: 0x0a08, lo: 0xad, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb1},
+ {value: 0x0a08, lo: 0xb2, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0a08, lo: 0xb5, hi: 0xb7},
+ {value: 0x0c08, lo: 0xb8, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbf},
+ // Block 0xa, offset 0x5d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xb0},
+ {value: 0x0808, lo: 0xb1, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xb, offset 0x62
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0808, lo: 0x80, hi: 0x89},
+ {value: 0x0a08, lo: 0x8a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbf},
+ // Block 0xc, offset 0x6c
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x99},
+ {value: 0x0808, lo: 0x9a, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa3},
+ {value: 0x0808, lo: 0xa4, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa7},
+ {value: 0x0808, lo: 0xa8, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0818, lo: 0xb0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xd, offset 0x78
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0a08, lo: 0xa0, hi: 0xa9},
+ {value: 0x0c08, lo: 0xaa, hi: 0xac},
+ {value: 0x0808, lo: 0xad, hi: 0xad},
+ {value: 0x0c08, lo: 0xae, hi: 0xae},
+ {value: 0x0a08, lo: 0xaf, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb2},
+ {value: 0x0a08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0a08, lo: 0xb6, hi: 0xb8},
+ {value: 0x0c08, lo: 0xb9, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0xe, offset 0x86
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x92},
+ {value: 0x3308, lo: 0x93, hi: 0xa1},
+ {value: 0x0840, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xbf},
+ // Block 0xf, offset 0x8b
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x10, offset 0x94
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x85},
+ {value: 0x3008, lo: 0x86, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8c},
+ {value: 0x3b08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x11, offset 0xa4
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x12, offset 0xb2
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xba},
+ {value: 0x3b08, lo: 0xbb, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x13, offset 0xbe
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0040, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x14, offset 0xca
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x89},
+ {value: 0x3b08, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x3008, lo: 0x98, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x15, offset 0xdb
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb2},
+ {value: 0x08f1, lo: 0xb3, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb9},
+ {value: 0x3b08, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0x16, offset 0xe5
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x8e},
+ {value: 0x0018, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0xbf},
+ // Block 0x17, offset 0xec
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0961, lo: 0x9c, hi: 0x9c},
+ {value: 0x0999, lo: 0x9d, hi: 0x9d},
+ {value: 0x0008, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x18, offset 0xf9
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe03d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x19, offset 0x10a
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0x1a, offset 0x111
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x1b, offset 0x11c
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3008, lo: 0xa2, hi: 0xa4},
+ {value: 0x0008, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xbf},
+ // Block 0x1c, offset 0x12b
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x8c},
+ {value: 0x3308, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x3008, lo: 0x9a, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x1d, offset 0x139
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x86},
+ {value: 0x055d, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8c},
+ {value: 0x055d, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0xe105, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0x1e, offset 0x143
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0018, lo: 0x80, hi: 0xbf},
+ // Block 0x1f, offset 0x145
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa0},
+ {value: 0x2018, lo: 0xa1, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x20, offset 0x14a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa7},
+ {value: 0x2018, lo: 0xa8, hi: 0xbf},
+ // Block 0x21, offset 0x14d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x2018, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0xbf},
+ // Block 0x22, offset 0x150
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0008, lo: 0x80, hi: 0xbf},
+ // Block 0x23, offset 0x152
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x24, offset 0x15e
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x25, offset 0x169
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x26, offset 0x171
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x27, offset 0x177
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x28, offset 0x17d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x29, offset 0x182
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x2a, offset 0x187
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x2b, offset 0x18a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xbf},
+ // Block 0x2c, offset 0x18e
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x2d, offset 0x194
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x2e, offset 0x199
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x3b08, lo: 0x94, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x2f, offset 0x1a5
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x30, offset 0x1af
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xb3},
+ {value: 0x3340, lo: 0xb4, hi: 0xb5},
+ {value: 0x3008, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x31, offset 0x1b5
+ {value: 0x0000, lo: 0x10},
+ {value: 0x3008, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x91},
+ {value: 0x3b08, lo: 0x92, hi: 0x92},
+ {value: 0x3308, lo: 0x93, hi: 0x93},
+ {value: 0x0018, lo: 0x94, hi: 0x96},
+ {value: 0x0008, lo: 0x97, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x32, offset 0x1c6
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x86},
+ {value: 0x0218, lo: 0x87, hi: 0x87},
+ {value: 0x0018, lo: 0x88, hi: 0x8a},
+ {value: 0x33c0, lo: 0x8b, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0208, lo: 0xa0, hi: 0xbf},
+ // Block 0x33, offset 0x1d0
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0208, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x34, offset 0x1d3
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0208, lo: 0x87, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xa9},
+ {value: 0x0208, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x35, offset 0x1db
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0x36, offset 0x1de
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x37, offset 0x1eb
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x38, offset 0x1f3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x39, offset 0x1f7
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0028, lo: 0x9a, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xbf},
+ // Block 0x3a, offset 0x1fe
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x3308, lo: 0x97, hi: 0x98},
+ {value: 0x3008, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x3b, offset 0x206
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x94},
+ {value: 0x3008, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3b08, lo: 0xa0, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xac},
+ {value: 0x3008, lo: 0xad, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x3c, offset 0x216
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xbd},
+ {value: 0x3318, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x3d, offset 0x222
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0040, lo: 0x80, hi: 0xbf},
+ // Block 0x3e, offset 0x224
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3008, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x3f, offset 0x22e
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x3808, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x40, offset 0x23a
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3808, lo: 0xaa, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xbf},
+ // Block 0x41, offset 0x246
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3008, lo: 0xaa, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3808, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbf},
+ // Block 0x42, offset 0x252
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x3008, lo: 0xa4, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbf},
+ // Block 0x43, offset 0x25a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x44, offset 0x25f
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0e29, lo: 0x80, hi: 0x80},
+ {value: 0x0e41, lo: 0x81, hi: 0x81},
+ {value: 0x0e59, lo: 0x82, hi: 0x82},
+ {value: 0x0e71, lo: 0x83, hi: 0x83},
+ {value: 0x0e89, lo: 0x84, hi: 0x85},
+ {value: 0x0ea1, lo: 0x86, hi: 0x86},
+ {value: 0x0eb9, lo: 0x87, hi: 0x87},
+ {value: 0x057d, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0x45, offset 0x269
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x92},
+ {value: 0x0018, lo: 0x93, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa8},
+ {value: 0x0008, lo: 0xa9, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x46, offset 0x27a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0x47, offset 0x27e
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x87},
+ {value: 0xe045, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0xe045, lo: 0x98, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0xe045, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbf},
+ // Block 0x48, offset 0x289
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x3318, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbf},
+ // Block 0x49, offset 0x28d
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x24c1, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x4a, offset 0x296
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x24f1, lo: 0xac, hi: 0xac},
+ {value: 0x2529, lo: 0xad, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xae},
+ {value: 0x2579, lo: 0xaf, hi: 0xaf},
+ {value: 0x25b1, lo: 0xb0, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0x4b, offset 0x29e
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x9f},
+ {value: 0x0080, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xad},
+ {value: 0x0080, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x4c, offset 0x2a4
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xa8},
+ {value: 0x09c5, lo: 0xa9, hi: 0xa9},
+ {value: 0x09e5, lo: 0xaa, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xbf},
+ // Block 0x4d, offset 0x2a9
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xbf},
+ // Block 0x4e, offset 0x2ac
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x28c1, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0xbf},
+ // Block 0x4f, offset 0x2b0
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0e66, lo: 0xb4, hi: 0xb4},
+ {value: 0x292a, lo: 0xb5, hi: 0xb5},
+ {value: 0x0e86, lo: 0xb6, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x50, offset 0x2b6
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x9b},
+ {value: 0x2941, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0xbf},
+ // Block 0x51, offset 0x2ba
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x52, offset 0x2be
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0xbf},
+ // Block 0x53, offset 0x2c2
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x54, offset 0x2c7
+ {value: 0x0000, lo: 0x05},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x03f5, lo: 0x90, hi: 0x9f},
+ {value: 0x0ea5, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x55, offset 0x2cd
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x56, offset 0x2d5
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xae},
+ {value: 0xe075, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0x57, offset 0x2dc
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x58, offset 0x2e7
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xbf},
+ // Block 0x59, offset 0x2f1
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x5a, offset 0x2f5
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0xbf},
+ // Block 0x5b, offset 0x2f8
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9e},
+ {value: 0x0edd, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x5c, offset 0x2fe
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb2},
+ {value: 0x0efd, lo: 0xb3, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x5d, offset 0x302
+ {value: 0x0020, lo: 0x01},
+ {value: 0x0f1d, lo: 0x80, hi: 0xbf},
+ // Block 0x5e, offset 0x304
+ {value: 0x0020, lo: 0x02},
+ {value: 0x171d, lo: 0x80, hi: 0x8f},
+ {value: 0x18fd, lo: 0x90, hi: 0xbf},
+ // Block 0x5f, offset 0x307
+ {value: 0x0020, lo: 0x01},
+ {value: 0x1efd, lo: 0x80, hi: 0xbf},
+ // Block 0x60, offset 0x309
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x61, offset 0x30c
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9a},
+ {value: 0x29e2, lo: 0x9b, hi: 0x9b},
+ {value: 0x2a0a, lo: 0x9c, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9e},
+ {value: 0x2a31, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xbf},
+ // Block 0x62, offset 0x316
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbe},
+ {value: 0x2a69, lo: 0xbf, hi: 0xbf},
+ // Block 0x63, offset 0x319
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0040, lo: 0x80, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb0},
+ {value: 0x2a1d, lo: 0xb1, hi: 0xb1},
+ {value: 0x2a3d, lo: 0xb2, hi: 0xb2},
+ {value: 0x2a5d, lo: 0xb3, hi: 0xb3},
+ {value: 0x2a7d, lo: 0xb4, hi: 0xb4},
+ {value: 0x2a5d, lo: 0xb5, hi: 0xb5},
+ {value: 0x2a9d, lo: 0xb6, hi: 0xb6},
+ {value: 0x2abd, lo: 0xb7, hi: 0xb7},
+ {value: 0x2add, lo: 0xb8, hi: 0xb9},
+ {value: 0x2afd, lo: 0xba, hi: 0xbb},
+ {value: 0x2b1d, lo: 0xbc, hi: 0xbd},
+ {value: 0x2afd, lo: 0xbe, hi: 0xbf},
+ // Block 0x64, offset 0x328
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x65, offset 0x32c
+ {value: 0x0030, lo: 0x04},
+ {value: 0x2aa2, lo: 0x80, hi: 0x9d},
+ {value: 0x305a, lo: 0x9e, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x30a2, lo: 0xa0, hi: 0xbf},
+ // Block 0x66, offset 0x331
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x67, offset 0x334
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x68, offset 0x338
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x69, offset 0x33d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xbf},
+ // Block 0x6a, offset 0x342
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb1},
+ {value: 0x0018, lo: 0xb2, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6b, offset 0x348
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0xb6},
+ {value: 0x0008, lo: 0xb7, hi: 0xb7},
+ {value: 0x2009, lo: 0xb8, hi: 0xb8},
+ {value: 0x6e89, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xbf},
+ // Block 0x6c, offset 0x34e
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x3308, lo: 0x8b, hi: 0x8b},
+ {value: 0x0008, lo: 0x8c, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x6d, offset 0x35d
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0208, lo: 0x80, hi: 0xb1},
+ {value: 0x0108, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6e, offset 0x363
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xbf},
+ // Block 0x6f, offset 0x367
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3008, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xba},
+ {value: 0x0008, lo: 0xbb, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x70, offset 0x376
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x71, offset 0x37b
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x91},
+ {value: 0x3008, lo: 0x92, hi: 0x92},
+ {value: 0x3808, lo: 0x93, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x72, offset 0x383
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb9},
+ {value: 0x3008, lo: 0xba, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x73, offset 0x38d
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x74, offset 0x398
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x75, offset 0x3a0
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8c},
+ {value: 0x3008, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0008, lo: 0xbe, hi: 0xbf},
+ // Block 0x76, offset 0x3b1
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x77, offset 0x3ba
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x9a},
+ {value: 0x0008, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3b08, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x78, offset 0x3ca
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x90},
+ {value: 0x0008, lo: 0x91, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x79, offset 0x3d7
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x4465, lo: 0x9c, hi: 0x9c},
+ {value: 0x447d, lo: 0x9d, hi: 0x9d},
+ {value: 0x2971, lo: 0x9e, hi: 0x9e},
+ {value: 0xe06d, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xaf},
+ {value: 0x4495, lo: 0xb0, hi: 0xbf},
+ // Block 0x7a, offset 0x3e1
+ {value: 0x0000, lo: 0x04},
+ {value: 0x44b5, lo: 0x80, hi: 0x8f},
+ {value: 0x44d5, lo: 0x90, hi: 0x9f},
+ {value: 0x44f5, lo: 0xa0, hi: 0xaf},
+ {value: 0x44d5, lo: 0xb0, hi: 0xbf},
+ // Block 0x7b, offset 0x3e6
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3b08, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x7c, offset 0x3f3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x7d, offset 0x3f7
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x7e, offset 0x3fc
+ {value: 0x0020, lo: 0x01},
+ {value: 0x4515, lo: 0x80, hi: 0xbf},
+ // Block 0x7f, offset 0x3fe
+ {value: 0x0020, lo: 0x03},
+ {value: 0x4d15, lo: 0x80, hi: 0x94},
+ {value: 0x4ad5, lo: 0x95, hi: 0x95},
+ {value: 0x4fb5, lo: 0x96, hi: 0xbf},
+ // Block 0x80, offset 0x402
+ {value: 0x0020, lo: 0x01},
+ {value: 0x54f5, lo: 0x80, hi: 0xbf},
+ // Block 0x81, offset 0x404
+ {value: 0x0020, lo: 0x03},
+ {value: 0x5cf5, lo: 0x80, hi: 0x84},
+ {value: 0x5655, lo: 0x85, hi: 0x85},
+ {value: 0x5d95, lo: 0x86, hi: 0xbf},
+ // Block 0x82, offset 0x408
+ {value: 0x0020, lo: 0x08},
+ {value: 0x6b55, lo: 0x80, hi: 0x8f},
+ {value: 0x6d15, lo: 0x90, hi: 0x90},
+ {value: 0x6d55, lo: 0x91, hi: 0xab},
+ {value: 0x6ea1, lo: 0xac, hi: 0xac},
+ {value: 0x70b5, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x70d5, lo: 0xb0, hi: 0xbf},
+ // Block 0x83, offset 0x411
+ {value: 0x0020, lo: 0x05},
+ {value: 0x72d5, lo: 0x80, hi: 0xad},
+ {value: 0x6535, lo: 0xae, hi: 0xae},
+ {value: 0x7895, lo: 0xaf, hi: 0xb5},
+ {value: 0x6f55, lo: 0xb6, hi: 0xb6},
+ {value: 0x7975, lo: 0xb7, hi: 0xbf},
+ // Block 0x84, offset 0x417
+ {value: 0x0028, lo: 0x03},
+ {value: 0x7c21, lo: 0x80, hi: 0x82},
+ {value: 0x7be1, lo: 0x83, hi: 0x83},
+ {value: 0x7c99, lo: 0x84, hi: 0xbf},
+ // Block 0x85, offset 0x41b
+ {value: 0x0038, lo: 0x0f},
+ {value: 0x9db1, lo: 0x80, hi: 0x83},
+ {value: 0x9e59, lo: 0x84, hi: 0x85},
+ {value: 0x9e91, lo: 0x86, hi: 0x87},
+ {value: 0x9ec9, lo: 0x88, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0xa089, lo: 0x92, hi: 0x97},
+ {value: 0xa1a1, lo: 0x98, hi: 0x9c},
+ {value: 0xa281, lo: 0x9d, hi: 0xb3},
+ {value: 0x9d41, lo: 0xb4, hi: 0xb4},
+ {value: 0x9db1, lo: 0xb5, hi: 0xb5},
+ {value: 0xa789, lo: 0xb6, hi: 0xbb},
+ {value: 0xa869, lo: 0xbc, hi: 0xbc},
+ {value: 0xa7f9, lo: 0xbd, hi: 0xbd},
+ {value: 0xa8d9, lo: 0xbe, hi: 0xbf},
+ // Block 0x86, offset 0x42b
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x0008, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x87, offset 0x435
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0x88, offset 0x43a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x89, offset 0x43d
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x8a, offset 0x443
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x8b, offset 0x44a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x8c, offset 0x44f
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x8d, offset 0x453
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x8e, offset 0x459
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xbf},
+ // Block 0x8f, offset 0x45e
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x90, offset 0x467
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x91, offset 0x46c
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0x92, offset 0x472
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x97},
+ {value: 0x8ad5, lo: 0x98, hi: 0x9f},
+ {value: 0x8aed, lo: 0xa0, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xbf},
+ // Block 0x93, offset 0x479
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x8aed, lo: 0xb0, hi: 0xb7},
+ {value: 0x8ad5, lo: 0xb8, hi: 0xbf},
+ // Block 0x94, offset 0x480
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x95, offset 0x487
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x96, offset 0x48b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xae},
+ {value: 0x0018, lo: 0xaf, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x97, offset 0x490
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x98, offset 0x493
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xbf},
+ // Block 0x99, offset 0x498
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0808, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0808, lo: 0x8a, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb6},
+ {value: 0x0808, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbb},
+ {value: 0x0808, lo: 0xbc, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x0808, lo: 0xbf, hi: 0xbf},
+ // Block 0x9a, offset 0x4a4
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x96},
+ {value: 0x0818, lo: 0x97, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb6},
+ {value: 0x0818, lo: 0xb7, hi: 0xbf},
+ // Block 0x9b, offset 0x4aa
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa6},
+ {value: 0x0818, lo: 0xa7, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x9c, offset 0x4af
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xba},
+ {value: 0x0818, lo: 0xbb, hi: 0xbf},
+ // Block 0x9d, offset 0x4b6
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0818, lo: 0x96, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbe},
+ {value: 0x0818, lo: 0xbf, hi: 0xbf},
+ // Block 0x9e, offset 0x4be
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbb},
+ {value: 0x0818, lo: 0xbc, hi: 0xbd},
+ {value: 0x0808, lo: 0xbe, hi: 0xbf},
+ // Block 0x9f, offset 0x4c3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0818, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x0818, lo: 0x92, hi: 0xbf},
+ // Block 0xa0, offset 0x4c7
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0808, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x94},
+ {value: 0x0808, lo: 0x95, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0x98},
+ {value: 0x0808, lo: 0x99, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xa1, offset 0x4d7
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0818, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0818, lo: 0x90, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xbc},
+ {value: 0x0818, lo: 0xbd, hi: 0xbf},
+ // Block 0xa2, offset 0x4de
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xa3, offset 0x4e2
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb8},
+ {value: 0x0018, lo: 0xb9, hi: 0xbf},
+ // Block 0xa4, offset 0x4e6
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0818, lo: 0x98, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb7},
+ {value: 0x0818, lo: 0xb8, hi: 0xbf},
+ // Block 0xa5, offset 0x4ed
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0808, lo: 0x80, hi: 0xbf},
+ // Block 0xa6, offset 0x4ef
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0808, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0xa7, offset 0x4f2
+ {value: 0x0000, lo: 0x02},
+ {value: 0x03dd, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xa8, offset 0x4f5
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xbf},
+ // Block 0xa9, offset 0x4f9
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0908, lo: 0x80, hi: 0x80},
+ {value: 0x0a08, lo: 0x81, hi: 0xa1},
+ {value: 0x0c08, lo: 0xa2, hi: 0xa2},
+ {value: 0x0a08, lo: 0xa3, hi: 0xa3},
+ {value: 0x3308, lo: 0xa4, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0808, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xaa, offset 0x502
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0818, lo: 0xa0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xab, offset 0x506
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0xa6},
+ {value: 0x0808, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0a08, lo: 0xb0, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb3},
+ {value: 0x0a08, lo: 0xb4, hi: 0xbf},
+ // Block 0xac, offset 0x50e
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0a08, lo: 0x80, hi: 0x84},
+ {value: 0x0808, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x90},
+ {value: 0x0a18, lo: 0x91, hi: 0x93},
+ {value: 0x0c18, lo: 0x94, hi: 0x94},
+ {value: 0x0818, lo: 0x95, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xad, offset 0x516
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xae, offset 0x51c
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x91},
+ {value: 0x0018, lo: 0x92, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xaf, offset 0x525
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0xb0, offset 0x531
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb1, offset 0x538
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb2},
+ {value: 0x3b08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xbf},
+ // Block 0xb2, offset 0x541
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xb3, offset 0x54b
+ {value: 0x0000, lo: 0x06},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xbe},
+ {value: 0x3008, lo: 0xbf, hi: 0xbf},
+ // Block 0xb4, offset 0x552
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xb5, offset 0x560
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3808, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xb6, offset 0x56d
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0008, lo: 0x9f, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xb7, offset 0x57a
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x3308, lo: 0x9f, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa9},
+ {value: 0x3b08, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb8, offset 0x583
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xb9, offset 0x587
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xbf},
+ // Block 0xba, offset 0x596
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xbb, offset 0x59e
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x85},
+ {value: 0x0018, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xbc, offset 0x5a9
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbd, offset 0x5b2
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9b},
+ {value: 0x3308, lo: 0x9c, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0xbe, offset 0x5b8
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbf, offset 0x5c0
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xc0, offset 0x5c9
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb5},
+ {value: 0x3808, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0xc1, offset 0x5d3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0xbf},
+ // Block 0xc2, offset 0x5d6
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbf},
+ // Block 0xc3, offset 0x5e2
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0xc4, offset 0x5eb
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xbf},
+ // Block 0xc5, offset 0x5ee
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0xc6, offset 0x5f3
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0xc7, offset 0x5fe
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x3b08, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0xbf},
+ // Block 0xc8, offset 0x607
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x89},
+ {value: 0x3308, lo: 0x8a, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x98},
+ {value: 0x3b08, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xa2},
+ {value: 0x0040, lo: 0xa3, hi: 0xbf},
+ // Block 0xc9, offset 0x613
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xca, offset 0x616
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xcb, offset 0x620
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xbf},
+ // Block 0xcc, offset 0x629
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xa9},
+ {value: 0x3308, lo: 0xaa, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xcd, offset 0x635
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xce, offset 0x642
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xbf},
+ // Block 0xcf, offset 0x64f
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x3008, lo: 0x93, hi: 0x94},
+ {value: 0x3308, lo: 0x95, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x96},
+ {value: 0x3b08, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xbf},
+ // Block 0xd0, offset 0x65d
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xd1, offset 0x664
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xd2, offset 0x667
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xd3, offset 0x66c
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0xbf},
+ // Block 0xd4, offset 0x66f
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xbf},
+ // Block 0xd5, offset 0x672
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0xbf},
+ // Block 0xd6, offset 0x675
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xd7, offset 0x67c
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xd8, offset 0x683
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0xd9, offset 0x687
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0xda, offset 0x692
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0xdb, offset 0x695
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0xdc, offset 0x698
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0xdd, offset 0x69b
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3008, lo: 0x91, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xde, offset 0x6a1
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xdf, offset 0x6a6
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xbf},
+ // Block 0xe0, offset 0x6aa
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xe1, offset 0x6ad
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xe2, offset 0x6b0
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xbf},
+ // Block 0xe3, offset 0x6b3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xe4, offset 0x6b6
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0xe5, offset 0x6b9
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0xe6, offset 0x6be
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x03c0, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xbf},
+ // Block 0xe7, offset 0x6c8
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xe8, offset 0x6cb
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xbf},
+ // Block 0xe9, offset 0x6cf
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0018, lo: 0x80, hi: 0x9d},
+ {value: 0xb5b9, lo: 0x9e, hi: 0x9e},
+ {value: 0xb601, lo: 0x9f, hi: 0x9f},
+ {value: 0xb649, lo: 0xa0, hi: 0xa0},
+ {value: 0xb6b1, lo: 0xa1, hi: 0xa1},
+ {value: 0xb719, lo: 0xa2, hi: 0xa2},
+ {value: 0xb781, lo: 0xa3, hi: 0xa3},
+ {value: 0xb7e9, lo: 0xa4, hi: 0xa4},
+ {value: 0x3018, lo: 0xa5, hi: 0xa6},
+ {value: 0x3318, lo: 0xa7, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xac},
+ {value: 0x3018, lo: 0xad, hi: 0xb2},
+ {value: 0x0340, lo: 0xb3, hi: 0xba},
+ {value: 0x3318, lo: 0xbb, hi: 0xbf},
+ // Block 0xea, offset 0x6de
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3318, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0x84},
+ {value: 0x3318, lo: 0x85, hi: 0x8b},
+ {value: 0x0018, lo: 0x8c, hi: 0xa9},
+ {value: 0x3318, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xba},
+ {value: 0xb851, lo: 0xbb, hi: 0xbb},
+ {value: 0xb899, lo: 0xbc, hi: 0xbc},
+ {value: 0xb8e1, lo: 0xbd, hi: 0xbd},
+ {value: 0xb949, lo: 0xbe, hi: 0xbe},
+ {value: 0xb9b1, lo: 0xbf, hi: 0xbf},
+ // Block 0xeb, offset 0x6ea
+ {value: 0x0000, lo: 0x03},
+ {value: 0xba19, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xbf},
+ // Block 0xec, offset 0x6ee
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x3318, lo: 0x82, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0xbf},
+ // Block 0xed, offset 0x6f3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0xee, offset 0x6f7
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xef, offset 0x6fc
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0xf0, offset 0x700
+ {value: 0x0000, lo: 0x04},
+ {value: 0x3308, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0xf1, offset 0x705
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x3308, lo: 0xa1, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xf2, offset 0x70e
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa4},
+ {value: 0x0040, lo: 0xa5, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xbf},
+ // Block 0xf3, offset 0x719
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x86},
+ {value: 0x0818, lo: 0x87, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0xf4, offset 0x71f
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0a08, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0818, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xf5, offset 0x727
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xb0},
+ {value: 0x0818, lo: 0xb1, hi: 0xbf},
+ // Block 0xf6, offset 0x72a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0818, lo: 0x80, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xf7, offset 0x72d
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xf8, offset 0x731
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0xf9, offset 0x735
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0xfa, offset 0x73b
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xfb, offset 0x741
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8f},
+ {value: 0xc1c1, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xfc, offset 0x746
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xbf},
+ // Block 0xfd, offset 0x749
+ {value: 0x0000, lo: 0x0f},
+ {value: 0xc7e9, lo: 0x80, hi: 0x80},
+ {value: 0xc839, lo: 0x81, hi: 0x81},
+ {value: 0xc889, lo: 0x82, hi: 0x82},
+ {value: 0xc8d9, lo: 0x83, hi: 0x83},
+ {value: 0xc929, lo: 0x84, hi: 0x84},
+ {value: 0xc979, lo: 0x85, hi: 0x85},
+ {value: 0xc9c9, lo: 0x86, hi: 0x86},
+ {value: 0xca19, lo: 0x87, hi: 0x87},
+ {value: 0xca69, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0xcab9, lo: 0x90, hi: 0x90},
+ {value: 0xcad9, lo: 0x91, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xbf},
+ // Block 0xfe, offset 0x759
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xff, offset 0x760
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x100, offset 0x763
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0xbf},
+ // Block 0x101, offset 0x766
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x102, offset 0x76a
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x103, offset 0x770
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xbf},
+ // Block 0x104, offset 0x775
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x105, offset 0x77a
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb2},
+ {value: 0x0018, lo: 0xb3, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbf},
+ // Block 0x106, offset 0x782
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xa2},
+ {value: 0x0040, lo: 0xa3, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x107, offset 0x787
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x108, offset 0x78b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xbf},
+ // Block 0x109, offset 0x78f
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0x10a, offset 0x792
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x10b, offset 0x795
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x10c, offset 0x799
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x10d, offset 0x79d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x10e, offset 0x7a0
+ {value: 0x0020, lo: 0x0f},
+ {value: 0xdeb9, lo: 0x80, hi: 0x89},
+ {value: 0x8dfd, lo: 0x8a, hi: 0x8a},
+ {value: 0xdff9, lo: 0x8b, hi: 0x9c},
+ {value: 0x8e1d, lo: 0x9d, hi: 0x9d},
+ {value: 0xe239, lo: 0x9e, hi: 0xa2},
+ {value: 0x8e3d, lo: 0xa3, hi: 0xa3},
+ {value: 0xe2d9, lo: 0xa4, hi: 0xab},
+ {value: 0x7ed5, lo: 0xac, hi: 0xac},
+ {value: 0xe3d9, lo: 0xad, hi: 0xaf},
+ {value: 0x8e5d, lo: 0xb0, hi: 0xb0},
+ {value: 0xe439, lo: 0xb1, hi: 0xb6},
+ {value: 0x8e7d, lo: 0xb7, hi: 0xb9},
+ {value: 0xe4f9, lo: 0xba, hi: 0xba},
+ {value: 0x8edd, lo: 0xbb, hi: 0xbb},
+ {value: 0xe519, lo: 0xbc, hi: 0xbf},
+ // Block 0x10f, offset 0x7b0
+ {value: 0x0020, lo: 0x10},
+ {value: 0x937d, lo: 0x80, hi: 0x80},
+ {value: 0xf099, lo: 0x81, hi: 0x86},
+ {value: 0x939d, lo: 0x87, hi: 0x8a},
+ {value: 0xd9f9, lo: 0x8b, hi: 0x8b},
+ {value: 0xf159, lo: 0x8c, hi: 0x96},
+ {value: 0x941d, lo: 0x97, hi: 0x97},
+ {value: 0xf2b9, lo: 0x98, hi: 0xa3},
+ {value: 0x943d, lo: 0xa4, hi: 0xa6},
+ {value: 0xf439, lo: 0xa7, hi: 0xaa},
+ {value: 0x949d, lo: 0xab, hi: 0xab},
+ {value: 0xf4b9, lo: 0xac, hi: 0xac},
+ {value: 0x94bd, lo: 0xad, hi: 0xad},
+ {value: 0xf4d9, lo: 0xae, hi: 0xaf},
+ {value: 0x94dd, lo: 0xb0, hi: 0xb1},
+ {value: 0xf519, lo: 0xb2, hi: 0xbe},
+ {value: 0x2040, lo: 0xbf, hi: 0xbf},
+ // Block 0x110, offset 0x7c1
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0340, lo: 0x81, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x9f},
+ {value: 0x0340, lo: 0xa0, hi: 0xbf},
+ // Block 0x111, offset 0x7c6
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0340, lo: 0x80, hi: 0xbf},
+ // Block 0x112, offset 0x7c8
+ {value: 0x0000, lo: 0x01},
+ {value: 0x33c0, lo: 0x80, hi: 0xbf},
+ // Block 0x113, offset 0x7ca
+ {value: 0x0000, lo: 0x02},
+ {value: 0x33c0, lo: 0x80, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+}
+
+// Total table size 42466 bytes (41KiB); checksum: 355A58A4
diff --git a/vendor/golang.org/x/net/idna/tables9.0.0.go b/vendor/golang.org/x/net/idna/tables9.0.0.go
new file mode 100644
index 000000000..8b65fa167
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/tables9.0.0.go
@@ -0,0 +1,4486 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// +build !go1.10
+
+package idna
+
+// UnicodeVersion is the Unicode version from which the tables in this package are derived.
+const UnicodeVersion = "9.0.0"
+
+var mappings string = "" + // Size: 8175 bytes
+ "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" +
+ "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" +
+ "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" +
+ "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" +
+ "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" +
+ "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" +
+ "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" +
+ "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" +
+ "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" +
+ "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" +
+ "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" +
+ "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" +
+ "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" +
+ "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" +
+ "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" +
+ "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" +
+ "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" +
+ "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" +
+ "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" +
+ "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" +
+ "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" +
+ "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" +
+ "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" +
+ "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" +
+ "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" +
+ "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" +
+ ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" +
+ "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" +
+ "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" +
+ "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" +
+ "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" +
+ "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" +
+ "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" +
+ "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" +
+ "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" +
+ "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" +
+ "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" +
+ "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" +
+ "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" +
+ "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" +
+ "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" +
+ "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" +
+ "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" +
+ "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" +
+ "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" +
+ "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" +
+ "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" +
+ "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" +
+ "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" +
+ "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" +
+ "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" +
+ "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" +
+ "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" +
+ "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" +
+ "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" +
+ "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" +
+ "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" +
+ "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" +
+ "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" +
+ "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" +
+ "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" +
+ "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" +
+ "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" +
+ "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" +
+ "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" +
+ "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" +
+ "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" +
+ "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" +
+ "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" +
+ "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" +
+ "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" +
+ "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" +
+ "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" +
+ " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" +
+ "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" +
+ "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" +
+ "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" +
+ "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" +
+ "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" +
+ "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" +
+ "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" +
+ "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" +
+ "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" +
+ "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" +
+ "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" +
+ "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" +
+ "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" +
+ "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" +
+ "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" +
+ "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" +
+ "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" +
+ "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" +
+ "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" +
+ "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" +
+ "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" +
+ "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" +
+ "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" +
+ "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" +
+ "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" +
+ "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" +
+ "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" +
+ "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" +
+ "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" +
+ "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" +
+ "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" +
+ "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" +
+ "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" +
+ "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" +
+ "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" +
+ "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" +
+ "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" +
+ "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" +
+ "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" +
+ "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" +
+ "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" +
+ "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" +
+ "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" +
+ "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" +
+ "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" +
+ "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" +
+ "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" +
+ "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" +
+ "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" +
+ "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" +
+ "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" +
+ "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" +
+ "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" +
+ "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" +
+ "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" +
+ "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" +
+ "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" +
+ "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" +
+ "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" +
+ "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" +
+ "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" +
+ "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" +
+ "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻"
+
+var xorData string = "" + // Size: 4855 bytes
+ "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" +
+ "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" +
+ "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" +
+ "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" +
+ "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" +
+ "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" +
+ "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" +
+ "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" +
+ "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" +
+ "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" +
+ "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" +
+ "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" +
+ "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" +
+ "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" +
+ "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" +
+ "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" +
+ "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" +
+ "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" +
+ "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" +
+ "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" +
+ "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" +
+ "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" +
+ "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" +
+ "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" +
+ "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" +
+ "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" +
+ "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" +
+ "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" +
+ "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" +
+ "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" +
+ "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" +
+ "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" +
+ "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" +
+ "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" +
+ "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" +
+ "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" +
+ "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " +
+ "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" +
+ "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" +
+ "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" +
+ "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" +
+ "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" +
+ ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" +
+ "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" +
+ "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" +
+ "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" +
+ "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" +
+ "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" +
+ "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" +
+ "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" +
+ "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" +
+ "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" +
+ "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" +
+ "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" +
+ "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" +
+ "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" +
+ "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" +
+ "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" +
+ "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" +
+ "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" +
+ "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" +
+ "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" +
+ "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" +
+ "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" +
+ "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" +
+ "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" +
+ "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" +
+ "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" +
+ "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" +
+ "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" +
+ "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" +
+ "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" +
+ "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" +
+ "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" +
+ "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" +
+ "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" +
+ "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" +
+ "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" +
+ "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" +
+ "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" +
+ "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" +
+ "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" +
+ "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" +
+ "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" +
+ "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" +
+ "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" +
+ "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" +
+ "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" +
+ "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," +
+ "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" +
+ "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" +
+ "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" +
+ "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" +
+ ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" +
+ "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" +
+ "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" +
+ "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" +
+ "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" +
+ "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" +
+ "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" +
+ "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" +
+ "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" +
+ "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" +
+ "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" +
+ "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" +
+ "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" +
+ "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" +
+ "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" +
+ "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" +
+ "\x08\x1a\x0a\x03\x07</\x03\x07:+\x03\x07\x07*\x03\x06&\x1c\x03\x09\x0c" +
+ "\x16\x03\x09\x10\x0e\x03\x08'\x0f\x03\x08+\x09\x03\x074%\x03\x06!3\x03" +
+ "\x06\x03+\x03\x0b\x1e\x19\x03\x0a))\x03\x09\x08\x19\x03\x08,\x05\x03\x07" +
+ "<2\x03\x06\x1c>\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" +
+ "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" +
+ "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" +
+ "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" +
+ "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" +
+ "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" +
+ "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" +
+ "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" +
+ "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" +
+ "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" +
+ "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" +
+ "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" +
+ "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" +
+ "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" +
+ "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" +
+ "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" +
+ "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" +
+ "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." +
+ "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" +
+ "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" +
+ "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " +
+ "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" +
+ "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" +
+ "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" +
+ "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" +
+ "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" +
+ "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" +
+ "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," +
+ "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" +
+ "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" +
+ "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" +
+ "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" +
+ "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" +
+ "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" +
+ "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" +
+ "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" +
+ "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" +
+ "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" +
+ "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" +
+ "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" +
+ "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" +
+ "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" +
+ "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" +
+ "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" +
+ "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" +
+ "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" +
+ "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" +
+ "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" +
+ "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" +
+ "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" +
+ "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" +
+ "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" +
+ "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" +
+ "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" +
+ "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" +
+ "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" +
+ "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" +
+ "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" +
+ "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" +
+ "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" +
+ "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" +
+ "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" +
+ "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" +
+ "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" +
+ "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" +
+ "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" +
+ "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," +
+ "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" +
+ "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" +
+ "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" +
+ "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" +
+ "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" +
+ "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" +
+ "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" +
+ "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" +
+ "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" +
+ "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" +
+ "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" +
+ "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" +
+ "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" +
+ "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" +
+ "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" +
+ "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" +
+ "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" +
+ "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" +
+ "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" +
+ "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" +
+ "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" +
+ "\x04\x03\x0c?\x05\x03\x0c<?\x03\x0c=\x00\x03\x0c=\x06\x03\x0c=\x05\x03" +
+ "\x0c=\x0c\x03\x0c=\x0f\x03\x0c=\x0d\x03\x0c=\x0b\x03\x0c=\x07\x03\x0c=" +
+ "\x19\x03\x0c=\x15\x03\x0c=\x11\x03\x0c=1\x03\x0c=3\x03\x0c=0\x03\x0c=>" +
+ "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" +
+ "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" +
+ "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" +
+ "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" +
+ "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" +
+ "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" +
+ "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" +
+ "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" +
+ "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" +
+ "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" +
+ "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" +
+ "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" +
+ "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" +
+ "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" +
+ "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" +
+ "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" +
+ "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" +
+ "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" +
+ "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" +
+ "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" +
+ "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" +
+ "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" +
+ "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" +
+ "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" +
+ "\x05\x22\x05\x03\x050\x1d"
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupUnsafe(s []byte) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookupString(s string) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupStringUnsafe(s string) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// idnaTrie. Total size: 28600 bytes (27.93 KiB). Checksum: 95575047b5d8fff.
+type idnaTrie struct{}
+
+func newIdnaTrie(i int) *idnaTrie {
+ return &idnaTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 {
+ switch {
+ case n < 124:
+ return uint16(idnaValues[n<<6+uint32(b)])
+ default:
+ n -= 124
+ return uint16(idnaSparse.lookup(n, b))
+ }
+}
+
+// idnaValues: 126 blocks, 8064 entries, 16128 bytes
+// The third block is the zero block.
+var idnaValues = [8064]uint16{
+ // Block 0x0, offset 0x0
+ 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080,
+ 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080,
+ 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080,
+ 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080,
+ 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080,
+ 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080,
+ 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080,
+ 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080,
+ 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008,
+ 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080,
+ 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080,
+ // Block 0x1, offset 0x40
+ 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105,
+ 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105,
+ 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105,
+ 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105,
+ 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080,
+ 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008,
+ 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008,
+ 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008,
+ 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008,
+ 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080,
+ 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040,
+ 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040,
+ 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040,
+ 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040,
+ 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040,
+ 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018,
+ 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018,
+ 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a,
+ 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005,
+ 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018,
+ 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018,
+ // Block 0x4, offset 0x100
+ 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008,
+ 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008,
+ 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008,
+ 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008,
+ 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008,
+ 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008,
+ 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008,
+ 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008,
+ 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008,
+ 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d,
+ 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199,
+ // Block 0x5, offset 0x140
+ 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d,
+ 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008,
+ 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008,
+ 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008,
+ 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008,
+ 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008,
+ 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008,
+ 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008,
+ 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008,
+ 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d,
+ 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9,
+ // Block 0x6, offset 0x180
+ 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008,
+ 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d,
+ 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d,
+ 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d,
+ 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155,
+ 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008,
+ 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d,
+ 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd,
+ 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d,
+ 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008,
+ 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9,
+ 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d,
+ 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d,
+ 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d,
+ 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008,
+ 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008,
+ 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008,
+ 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008,
+ 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008,
+ 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008,
+ 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008,
+ // Block 0x8, offset 0x200
+ 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008,
+ 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008,
+ 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008,
+ 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008,
+ 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008,
+ 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008,
+ 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008,
+ 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008,
+ 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008,
+ 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d,
+ 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008,
+ // Block 0x9, offset 0x240
+ 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018,
+ 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008,
+ 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008,
+ 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018,
+ 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a,
+ 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369,
+ 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018,
+ 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018,
+ 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018,
+ 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018,
+ 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018,
+ // Block 0xa, offset 0x280
+ 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d,
+ 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308,
+ 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308,
+ 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308,
+ 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308,
+ 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308,
+ 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308,
+ 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308,
+ 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008,
+ 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008,
+ 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2,
+ 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040,
+ 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105,
+ 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105,
+ 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105,
+ 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d,
+ 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d,
+ 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008,
+ 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008,
+ 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008,
+ 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008,
+ // Block 0xc, offset 0x300
+ 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008,
+ 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008,
+ 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd,
+ 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008,
+ 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008,
+ 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008,
+ 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008,
+ 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008,
+ 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd,
+ 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008,
+ 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d,
+ // Block 0xd, offset 0x340
+ 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008,
+ 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008,
+ 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008,
+ 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008,
+ 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008,
+ 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008,
+ 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008,
+ 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008,
+ 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008,
+ 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008,
+ 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008,
+ // Block 0xe, offset 0x380
+ 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308,
+ 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008,
+ 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008,
+ 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008,
+ 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008,
+ 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008,
+ 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008,
+ 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008,
+ 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008,
+ 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008,
+ 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d,
+ 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d,
+ 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008,
+ 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008,
+ 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008,
+ 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008,
+ 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008,
+ 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008,
+ 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008,
+ 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008,
+ 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008,
+ // Block 0x10, offset 0x400
+ 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008,
+ 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008,
+ 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008,
+ 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008,
+ 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008,
+ 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008,
+ 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008,
+ 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008,
+ 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5,
+ 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5,
+ 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5,
+ // Block 0x11, offset 0x440
+ 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840,
+ 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818,
+ 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308,
+ 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308,
+ 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040,
+ 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08,
+ 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08,
+ 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08,
+ 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08,
+ 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08,
+ 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08,
+ // Block 0x12, offset 0x480
+ 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08,
+ 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308,
+ 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308,
+ 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308,
+ 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308,
+ 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808,
+ 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808,
+ 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08,
+ 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429,
+ 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08,
+ 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08,
+ 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08,
+ 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08,
+ 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308,
+ 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840,
+ 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308,
+ 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018,
+ 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08,
+ 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008,
+ 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08,
+ 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08,
+ // Block 0x14, offset 0x500
+ 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818,
+ 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818,
+ 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308,
+ 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08,
+ 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08,
+ 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08,
+ 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08,
+ 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08,
+ 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308,
+ 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308,
+ 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308,
+ // Block 0x15, offset 0x540
+ 0x540: 0x3008, 0x541: 0x3308, 0x542: 0x3308, 0x543: 0x3308, 0x544: 0x3308, 0x545: 0x3308,
+ 0x546: 0x3308, 0x547: 0x3308, 0x548: 0x3308, 0x549: 0x3008, 0x54a: 0x3008, 0x54b: 0x3008,
+ 0x54c: 0x3008, 0x54d: 0x3b08, 0x54e: 0x3008, 0x54f: 0x3008, 0x550: 0x0008, 0x551: 0x3308,
+ 0x552: 0x3308, 0x553: 0x3308, 0x554: 0x3308, 0x555: 0x3308, 0x556: 0x3308, 0x557: 0x3308,
+ 0x558: 0x04c9, 0x559: 0x0501, 0x55a: 0x0539, 0x55b: 0x0571, 0x55c: 0x05a9, 0x55d: 0x05e1,
+ 0x55e: 0x0619, 0x55f: 0x0651, 0x560: 0x0008, 0x561: 0x0008, 0x562: 0x3308, 0x563: 0x3308,
+ 0x564: 0x0018, 0x565: 0x0018, 0x566: 0x0008, 0x567: 0x0008, 0x568: 0x0008, 0x569: 0x0008,
+ 0x56a: 0x0008, 0x56b: 0x0008, 0x56c: 0x0008, 0x56d: 0x0008, 0x56e: 0x0008, 0x56f: 0x0008,
+ 0x570: 0x0018, 0x571: 0x0008, 0x572: 0x0008, 0x573: 0x0008, 0x574: 0x0008, 0x575: 0x0008,
+ 0x576: 0x0008, 0x577: 0x0008, 0x578: 0x0008, 0x579: 0x0008, 0x57a: 0x0008, 0x57b: 0x0008,
+ 0x57c: 0x0008, 0x57d: 0x0008, 0x57e: 0x0008, 0x57f: 0x0008,
+ // Block 0x16, offset 0x580
+ 0x580: 0x0008, 0x581: 0x3308, 0x582: 0x3008, 0x583: 0x3008, 0x584: 0x0040, 0x585: 0x0008,
+ 0x586: 0x0008, 0x587: 0x0008, 0x588: 0x0008, 0x589: 0x0008, 0x58a: 0x0008, 0x58b: 0x0008,
+ 0x58c: 0x0008, 0x58d: 0x0040, 0x58e: 0x0040, 0x58f: 0x0008, 0x590: 0x0008, 0x591: 0x0040,
+ 0x592: 0x0040, 0x593: 0x0008, 0x594: 0x0008, 0x595: 0x0008, 0x596: 0x0008, 0x597: 0x0008,
+ 0x598: 0x0008, 0x599: 0x0008, 0x59a: 0x0008, 0x59b: 0x0008, 0x59c: 0x0008, 0x59d: 0x0008,
+ 0x59e: 0x0008, 0x59f: 0x0008, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x0008, 0x5a3: 0x0008,
+ 0x5a4: 0x0008, 0x5a5: 0x0008, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0040,
+ 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008,
+ 0x5b0: 0x0008, 0x5b1: 0x0040, 0x5b2: 0x0008, 0x5b3: 0x0040, 0x5b4: 0x0040, 0x5b5: 0x0040,
+ 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0040, 0x5bb: 0x0040,
+ 0x5bc: 0x3308, 0x5bd: 0x0008, 0x5be: 0x3008, 0x5bf: 0x3008,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x3008, 0x5c1: 0x3308, 0x5c2: 0x3308, 0x5c3: 0x3308, 0x5c4: 0x3308, 0x5c5: 0x0040,
+ 0x5c6: 0x0040, 0x5c7: 0x3008, 0x5c8: 0x3008, 0x5c9: 0x0040, 0x5ca: 0x0040, 0x5cb: 0x3008,
+ 0x5cc: 0x3008, 0x5cd: 0x3b08, 0x5ce: 0x0008, 0x5cf: 0x0040, 0x5d0: 0x0040, 0x5d1: 0x0040,
+ 0x5d2: 0x0040, 0x5d3: 0x0040, 0x5d4: 0x0040, 0x5d5: 0x0040, 0x5d6: 0x0040, 0x5d7: 0x3008,
+ 0x5d8: 0x0040, 0x5d9: 0x0040, 0x5da: 0x0040, 0x5db: 0x0040, 0x5dc: 0x0689, 0x5dd: 0x06c1,
+ 0x5de: 0x0040, 0x5df: 0x06f9, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x3308, 0x5e3: 0x3308,
+ 0x5e4: 0x0040, 0x5e5: 0x0040, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0008,
+ 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008,
+ 0x5f0: 0x0008, 0x5f1: 0x0008, 0x5f2: 0x0018, 0x5f3: 0x0018, 0x5f4: 0x0018, 0x5f5: 0x0018,
+ 0x5f6: 0x0018, 0x5f7: 0x0018, 0x5f8: 0x0018, 0x5f9: 0x0018, 0x5fa: 0x0018, 0x5fb: 0x0018,
+ 0x5fc: 0x0040, 0x5fd: 0x0040, 0x5fe: 0x0040, 0x5ff: 0x0040,
+ // Block 0x18, offset 0x600
+ 0x600: 0x0040, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3008, 0x604: 0x0040, 0x605: 0x0008,
+ 0x606: 0x0008, 0x607: 0x0008, 0x608: 0x0008, 0x609: 0x0008, 0x60a: 0x0008, 0x60b: 0x0040,
+ 0x60c: 0x0040, 0x60d: 0x0040, 0x60e: 0x0040, 0x60f: 0x0008, 0x610: 0x0008, 0x611: 0x0040,
+ 0x612: 0x0040, 0x613: 0x0008, 0x614: 0x0008, 0x615: 0x0008, 0x616: 0x0008, 0x617: 0x0008,
+ 0x618: 0x0008, 0x619: 0x0008, 0x61a: 0x0008, 0x61b: 0x0008, 0x61c: 0x0008, 0x61d: 0x0008,
+ 0x61e: 0x0008, 0x61f: 0x0008, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x0008, 0x623: 0x0008,
+ 0x624: 0x0008, 0x625: 0x0008, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0040,
+ 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008,
+ 0x630: 0x0008, 0x631: 0x0040, 0x632: 0x0008, 0x633: 0x0731, 0x634: 0x0040, 0x635: 0x0008,
+ 0x636: 0x0769, 0x637: 0x0040, 0x638: 0x0008, 0x639: 0x0008, 0x63a: 0x0040, 0x63b: 0x0040,
+ 0x63c: 0x3308, 0x63d: 0x0040, 0x63e: 0x3008, 0x63f: 0x3008,
+ // Block 0x19, offset 0x640
+ 0x640: 0x3008, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x0040, 0x644: 0x0040, 0x645: 0x0040,
+ 0x646: 0x0040, 0x647: 0x3308, 0x648: 0x3308, 0x649: 0x0040, 0x64a: 0x0040, 0x64b: 0x3308,
+ 0x64c: 0x3308, 0x64d: 0x3b08, 0x64e: 0x0040, 0x64f: 0x0040, 0x650: 0x0040, 0x651: 0x3308,
+ 0x652: 0x0040, 0x653: 0x0040, 0x654: 0x0040, 0x655: 0x0040, 0x656: 0x0040, 0x657: 0x0040,
+ 0x658: 0x0040, 0x659: 0x07a1, 0x65a: 0x07d9, 0x65b: 0x0811, 0x65c: 0x0008, 0x65d: 0x0040,
+ 0x65e: 0x0849, 0x65f: 0x0040, 0x660: 0x0040, 0x661: 0x0040, 0x662: 0x0040, 0x663: 0x0040,
+ 0x664: 0x0040, 0x665: 0x0040, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0008,
+ 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008,
+ 0x670: 0x3308, 0x671: 0x3308, 0x672: 0x0008, 0x673: 0x0008, 0x674: 0x0008, 0x675: 0x3308,
+ 0x676: 0x0040, 0x677: 0x0040, 0x678: 0x0040, 0x679: 0x0040, 0x67a: 0x0040, 0x67b: 0x0040,
+ 0x67c: 0x0040, 0x67d: 0x0040, 0x67e: 0x0040, 0x67f: 0x0040,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x0040, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x3008, 0x684: 0x0040, 0x685: 0x0008,
+ 0x686: 0x0008, 0x687: 0x0008, 0x688: 0x0008, 0x689: 0x0008, 0x68a: 0x0008, 0x68b: 0x0008,
+ 0x68c: 0x0008, 0x68d: 0x0008, 0x68e: 0x0040, 0x68f: 0x0008, 0x690: 0x0008, 0x691: 0x0008,
+ 0x692: 0x0040, 0x693: 0x0008, 0x694: 0x0008, 0x695: 0x0008, 0x696: 0x0008, 0x697: 0x0008,
+ 0x698: 0x0008, 0x699: 0x0008, 0x69a: 0x0008, 0x69b: 0x0008, 0x69c: 0x0008, 0x69d: 0x0008,
+ 0x69e: 0x0008, 0x69f: 0x0008, 0x6a0: 0x0008, 0x6a1: 0x0008, 0x6a2: 0x0008, 0x6a3: 0x0008,
+ 0x6a4: 0x0008, 0x6a5: 0x0008, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0040,
+ 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008,
+ 0x6b0: 0x0008, 0x6b1: 0x0040, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0040, 0x6b5: 0x0008,
+ 0x6b6: 0x0008, 0x6b7: 0x0008, 0x6b8: 0x0008, 0x6b9: 0x0008, 0x6ba: 0x0040, 0x6bb: 0x0040,
+ 0x6bc: 0x3308, 0x6bd: 0x0008, 0x6be: 0x3008, 0x6bf: 0x3008,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x3008, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3308, 0x6c4: 0x3308, 0x6c5: 0x3308,
+ 0x6c6: 0x0040, 0x6c7: 0x3308, 0x6c8: 0x3308, 0x6c9: 0x3008, 0x6ca: 0x0040, 0x6cb: 0x3008,
+ 0x6cc: 0x3008, 0x6cd: 0x3b08, 0x6ce: 0x0040, 0x6cf: 0x0040, 0x6d0: 0x0008, 0x6d1: 0x0040,
+ 0x6d2: 0x0040, 0x6d3: 0x0040, 0x6d4: 0x0040, 0x6d5: 0x0040, 0x6d6: 0x0040, 0x6d7: 0x0040,
+ 0x6d8: 0x0040, 0x6d9: 0x0040, 0x6da: 0x0040, 0x6db: 0x0040, 0x6dc: 0x0040, 0x6dd: 0x0040,
+ 0x6de: 0x0040, 0x6df: 0x0040, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x3308, 0x6e3: 0x3308,
+ 0x6e4: 0x0040, 0x6e5: 0x0040, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0008,
+ 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008,
+ 0x6f0: 0x0018, 0x6f1: 0x0018, 0x6f2: 0x0040, 0x6f3: 0x0040, 0x6f4: 0x0040, 0x6f5: 0x0040,
+ 0x6f6: 0x0040, 0x6f7: 0x0040, 0x6f8: 0x0040, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040,
+ 0x6fc: 0x0040, 0x6fd: 0x0040, 0x6fe: 0x0040, 0x6ff: 0x0040,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x0040, 0x701: 0x3308, 0x702: 0x3008, 0x703: 0x3008, 0x704: 0x0040, 0x705: 0x0008,
+ 0x706: 0x0008, 0x707: 0x0008, 0x708: 0x0008, 0x709: 0x0008, 0x70a: 0x0008, 0x70b: 0x0008,
+ 0x70c: 0x0008, 0x70d: 0x0040, 0x70e: 0x0040, 0x70f: 0x0008, 0x710: 0x0008, 0x711: 0x0040,
+ 0x712: 0x0040, 0x713: 0x0008, 0x714: 0x0008, 0x715: 0x0008, 0x716: 0x0008, 0x717: 0x0008,
+ 0x718: 0x0008, 0x719: 0x0008, 0x71a: 0x0008, 0x71b: 0x0008, 0x71c: 0x0008, 0x71d: 0x0008,
+ 0x71e: 0x0008, 0x71f: 0x0008, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x0008, 0x723: 0x0008,
+ 0x724: 0x0008, 0x725: 0x0008, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0040,
+ 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008,
+ 0x730: 0x0008, 0x731: 0x0040, 0x732: 0x0008, 0x733: 0x0008, 0x734: 0x0040, 0x735: 0x0008,
+ 0x736: 0x0008, 0x737: 0x0008, 0x738: 0x0008, 0x739: 0x0008, 0x73a: 0x0040, 0x73b: 0x0040,
+ 0x73c: 0x3308, 0x73d: 0x0008, 0x73e: 0x3008, 0x73f: 0x3308,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x3008, 0x741: 0x3308, 0x742: 0x3308, 0x743: 0x3308, 0x744: 0x3308, 0x745: 0x0040,
+ 0x746: 0x0040, 0x747: 0x3008, 0x748: 0x3008, 0x749: 0x0040, 0x74a: 0x0040, 0x74b: 0x3008,
+ 0x74c: 0x3008, 0x74d: 0x3b08, 0x74e: 0x0040, 0x74f: 0x0040, 0x750: 0x0040, 0x751: 0x0040,
+ 0x752: 0x0040, 0x753: 0x0040, 0x754: 0x0040, 0x755: 0x0040, 0x756: 0x3308, 0x757: 0x3008,
+ 0x758: 0x0040, 0x759: 0x0040, 0x75a: 0x0040, 0x75b: 0x0040, 0x75c: 0x0881, 0x75d: 0x08b9,
+ 0x75e: 0x0040, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x3308, 0x763: 0x3308,
+ 0x764: 0x0040, 0x765: 0x0040, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0008,
+ 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008,
+ 0x770: 0x0018, 0x771: 0x0008, 0x772: 0x0018, 0x773: 0x0018, 0x774: 0x0018, 0x775: 0x0018,
+ 0x776: 0x0018, 0x777: 0x0018, 0x778: 0x0040, 0x779: 0x0040, 0x77a: 0x0040, 0x77b: 0x0040,
+ 0x77c: 0x0040, 0x77d: 0x0040, 0x77e: 0x0040, 0x77f: 0x0040,
+ // Block 0x1e, offset 0x780
+ 0x780: 0x0040, 0x781: 0x0040, 0x782: 0x3308, 0x783: 0x0008, 0x784: 0x0040, 0x785: 0x0008,
+ 0x786: 0x0008, 0x787: 0x0008, 0x788: 0x0008, 0x789: 0x0008, 0x78a: 0x0008, 0x78b: 0x0040,
+ 0x78c: 0x0040, 0x78d: 0x0040, 0x78e: 0x0008, 0x78f: 0x0008, 0x790: 0x0008, 0x791: 0x0040,
+ 0x792: 0x0008, 0x793: 0x0008, 0x794: 0x0008, 0x795: 0x0008, 0x796: 0x0040, 0x797: 0x0040,
+ 0x798: 0x0040, 0x799: 0x0008, 0x79a: 0x0008, 0x79b: 0x0040, 0x79c: 0x0008, 0x79d: 0x0040,
+ 0x79e: 0x0008, 0x79f: 0x0008, 0x7a0: 0x0040, 0x7a1: 0x0040, 0x7a2: 0x0040, 0x7a3: 0x0008,
+ 0x7a4: 0x0008, 0x7a5: 0x0040, 0x7a6: 0x0040, 0x7a7: 0x0040, 0x7a8: 0x0008, 0x7a9: 0x0008,
+ 0x7aa: 0x0008, 0x7ab: 0x0040, 0x7ac: 0x0040, 0x7ad: 0x0040, 0x7ae: 0x0008, 0x7af: 0x0008,
+ 0x7b0: 0x0008, 0x7b1: 0x0008, 0x7b2: 0x0008, 0x7b3: 0x0008, 0x7b4: 0x0008, 0x7b5: 0x0008,
+ 0x7b6: 0x0008, 0x7b7: 0x0008, 0x7b8: 0x0008, 0x7b9: 0x0008, 0x7ba: 0x0040, 0x7bb: 0x0040,
+ 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x3008, 0x7bf: 0x3008,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x3308, 0x7c1: 0x3008, 0x7c2: 0x3008, 0x7c3: 0x3008, 0x7c4: 0x3008, 0x7c5: 0x0040,
+ 0x7c6: 0x3308, 0x7c7: 0x3308, 0x7c8: 0x3308, 0x7c9: 0x0040, 0x7ca: 0x3308, 0x7cb: 0x3308,
+ 0x7cc: 0x3308, 0x7cd: 0x3b08, 0x7ce: 0x0040, 0x7cf: 0x0040, 0x7d0: 0x0040, 0x7d1: 0x0040,
+ 0x7d2: 0x0040, 0x7d3: 0x0040, 0x7d4: 0x0040, 0x7d5: 0x3308, 0x7d6: 0x3308, 0x7d7: 0x0040,
+ 0x7d8: 0x0008, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0040, 0x7dd: 0x0040,
+ 0x7de: 0x0040, 0x7df: 0x0040, 0x7e0: 0x0008, 0x7e1: 0x0008, 0x7e2: 0x3308, 0x7e3: 0x3308,
+ 0x7e4: 0x0040, 0x7e5: 0x0040, 0x7e6: 0x0008, 0x7e7: 0x0008, 0x7e8: 0x0008, 0x7e9: 0x0008,
+ 0x7ea: 0x0008, 0x7eb: 0x0008, 0x7ec: 0x0008, 0x7ed: 0x0008, 0x7ee: 0x0008, 0x7ef: 0x0008,
+ 0x7f0: 0x0040, 0x7f1: 0x0040, 0x7f2: 0x0040, 0x7f3: 0x0040, 0x7f4: 0x0040, 0x7f5: 0x0040,
+ 0x7f6: 0x0040, 0x7f7: 0x0040, 0x7f8: 0x0018, 0x7f9: 0x0018, 0x7fa: 0x0018, 0x7fb: 0x0018,
+ 0x7fc: 0x0018, 0x7fd: 0x0018, 0x7fe: 0x0018, 0x7ff: 0x0018,
+ // Block 0x20, offset 0x800
+ 0x800: 0x0008, 0x801: 0x3308, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x0040, 0x805: 0x0008,
+ 0x806: 0x0008, 0x807: 0x0008, 0x808: 0x0008, 0x809: 0x0008, 0x80a: 0x0008, 0x80b: 0x0008,
+ 0x80c: 0x0008, 0x80d: 0x0040, 0x80e: 0x0008, 0x80f: 0x0008, 0x810: 0x0008, 0x811: 0x0040,
+ 0x812: 0x0008, 0x813: 0x0008, 0x814: 0x0008, 0x815: 0x0008, 0x816: 0x0008, 0x817: 0x0008,
+ 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0008, 0x81c: 0x0008, 0x81d: 0x0008,
+ 0x81e: 0x0008, 0x81f: 0x0008, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x0008, 0x823: 0x0008,
+ 0x824: 0x0008, 0x825: 0x0008, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0040,
+ 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008,
+ 0x830: 0x0008, 0x831: 0x0008, 0x832: 0x0008, 0x833: 0x0008, 0x834: 0x0040, 0x835: 0x0008,
+ 0x836: 0x0008, 0x837: 0x0008, 0x838: 0x0008, 0x839: 0x0008, 0x83a: 0x0040, 0x83b: 0x0040,
+ 0x83c: 0x3308, 0x83d: 0x0008, 0x83e: 0x3008, 0x83f: 0x3308,
+ // Block 0x21, offset 0x840
+ 0x840: 0x3008, 0x841: 0x3008, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x3008, 0x845: 0x0040,
+ 0x846: 0x3308, 0x847: 0x3008, 0x848: 0x3008, 0x849: 0x0040, 0x84a: 0x3008, 0x84b: 0x3008,
+ 0x84c: 0x3308, 0x84d: 0x3b08, 0x84e: 0x0040, 0x84f: 0x0040, 0x850: 0x0040, 0x851: 0x0040,
+ 0x852: 0x0040, 0x853: 0x0040, 0x854: 0x0040, 0x855: 0x3008, 0x856: 0x3008, 0x857: 0x0040,
+ 0x858: 0x0040, 0x859: 0x0040, 0x85a: 0x0040, 0x85b: 0x0040, 0x85c: 0x0040, 0x85d: 0x0040,
+ 0x85e: 0x0008, 0x85f: 0x0040, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x3308, 0x863: 0x3308,
+ 0x864: 0x0040, 0x865: 0x0040, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0008,
+ 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008,
+ 0x870: 0x0040, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0040, 0x874: 0x0040, 0x875: 0x0040,
+ 0x876: 0x0040, 0x877: 0x0040, 0x878: 0x0040, 0x879: 0x0040, 0x87a: 0x0040, 0x87b: 0x0040,
+ 0x87c: 0x0040, 0x87d: 0x0040, 0x87e: 0x0040, 0x87f: 0x0040,
+ // Block 0x22, offset 0x880
+ 0x880: 0x3008, 0x881: 0x3308, 0x882: 0x3308, 0x883: 0x3308, 0x884: 0x3308, 0x885: 0x0040,
+ 0x886: 0x3008, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008,
+ 0x88c: 0x3008, 0x88d: 0x3b08, 0x88e: 0x0008, 0x88f: 0x0018, 0x890: 0x0040, 0x891: 0x0040,
+ 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0008, 0x895: 0x0008, 0x896: 0x0008, 0x897: 0x3008,
+ 0x898: 0x0018, 0x899: 0x0018, 0x89a: 0x0018, 0x89b: 0x0018, 0x89c: 0x0018, 0x89d: 0x0018,
+ 0x89e: 0x0018, 0x89f: 0x0008, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308,
+ 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008,
+ 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008,
+ 0x8b0: 0x0018, 0x8b1: 0x0018, 0x8b2: 0x0018, 0x8b3: 0x0018, 0x8b4: 0x0018, 0x8b5: 0x0018,
+ 0x8b6: 0x0018, 0x8b7: 0x0018, 0x8b8: 0x0018, 0x8b9: 0x0018, 0x8ba: 0x0008, 0x8bb: 0x0008,
+ 0x8bc: 0x0008, 0x8bd: 0x0008, 0x8be: 0x0008, 0x8bf: 0x0008,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x0040, 0x8c1: 0x0008, 0x8c2: 0x0008, 0x8c3: 0x0040, 0x8c4: 0x0008, 0x8c5: 0x0040,
+ 0x8c6: 0x0040, 0x8c7: 0x0008, 0x8c8: 0x0008, 0x8c9: 0x0040, 0x8ca: 0x0008, 0x8cb: 0x0040,
+ 0x8cc: 0x0040, 0x8cd: 0x0008, 0x8ce: 0x0040, 0x8cf: 0x0040, 0x8d0: 0x0040, 0x8d1: 0x0040,
+ 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x0008,
+ 0x8d8: 0x0040, 0x8d9: 0x0008, 0x8da: 0x0008, 0x8db: 0x0008, 0x8dc: 0x0008, 0x8dd: 0x0008,
+ 0x8de: 0x0008, 0x8df: 0x0008, 0x8e0: 0x0040, 0x8e1: 0x0008, 0x8e2: 0x0008, 0x8e3: 0x0008,
+ 0x8e4: 0x0040, 0x8e5: 0x0008, 0x8e6: 0x0040, 0x8e7: 0x0008, 0x8e8: 0x0040, 0x8e9: 0x0040,
+ 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0040, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008,
+ 0x8f0: 0x0008, 0x8f1: 0x3308, 0x8f2: 0x0008, 0x8f3: 0x0929, 0x8f4: 0x3308, 0x8f5: 0x3308,
+ 0x8f6: 0x3308, 0x8f7: 0x3308, 0x8f8: 0x3308, 0x8f9: 0x3308, 0x8fa: 0x0040, 0x8fb: 0x3308,
+ 0x8fc: 0x3308, 0x8fd: 0x0008, 0x8fe: 0x0040, 0x8ff: 0x0040,
+ // Block 0x24, offset 0x900
+ 0x900: 0x0008, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x09d1, 0x904: 0x0008, 0x905: 0x0008,
+ 0x906: 0x0008, 0x907: 0x0008, 0x908: 0x0040, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0008,
+ 0x90c: 0x0008, 0x90d: 0x0a09, 0x90e: 0x0008, 0x90f: 0x0008, 0x910: 0x0008, 0x911: 0x0008,
+ 0x912: 0x0a41, 0x913: 0x0008, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0a79,
+ 0x918: 0x0008, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0ab1, 0x91d: 0x0008,
+ 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008,
+ 0x924: 0x0008, 0x925: 0x0008, 0x926: 0x0008, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0ae9,
+ 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0040, 0x92e: 0x0040, 0x92f: 0x0040,
+ 0x930: 0x0040, 0x931: 0x3308, 0x932: 0x3308, 0x933: 0x0b21, 0x934: 0x3308, 0x935: 0x0b59,
+ 0x936: 0x0b91, 0x937: 0x0bc9, 0x938: 0x0c19, 0x939: 0x0c51, 0x93a: 0x3308, 0x93b: 0x3308,
+ 0x93c: 0x3308, 0x93d: 0x3308, 0x93e: 0x3308, 0x93f: 0x3008,
+ // Block 0x25, offset 0x940
+ 0x940: 0x3308, 0x941: 0x0ca1, 0x942: 0x3308, 0x943: 0x3308, 0x944: 0x3b08, 0x945: 0x0018,
+ 0x946: 0x3308, 0x947: 0x3308, 0x948: 0x0008, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008,
+ 0x94c: 0x0008, 0x94d: 0x3308, 0x94e: 0x3308, 0x94f: 0x3308, 0x950: 0x3308, 0x951: 0x3308,
+ 0x952: 0x3308, 0x953: 0x0cd9, 0x954: 0x3308, 0x955: 0x3308, 0x956: 0x3308, 0x957: 0x3308,
+ 0x958: 0x0040, 0x959: 0x3308, 0x95a: 0x3308, 0x95b: 0x3308, 0x95c: 0x3308, 0x95d: 0x0d11,
+ 0x95e: 0x3308, 0x95f: 0x3308, 0x960: 0x3308, 0x961: 0x3308, 0x962: 0x0d49, 0x963: 0x3308,
+ 0x964: 0x3308, 0x965: 0x3308, 0x966: 0x3308, 0x967: 0x0d81, 0x968: 0x3308, 0x969: 0x3308,
+ 0x96a: 0x3308, 0x96b: 0x3308, 0x96c: 0x0db9, 0x96d: 0x3308, 0x96e: 0x3308, 0x96f: 0x3308,
+ 0x970: 0x3308, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x3308, 0x974: 0x3308, 0x975: 0x3308,
+ 0x976: 0x3308, 0x977: 0x3308, 0x978: 0x3308, 0x979: 0x0df1, 0x97a: 0x3308, 0x97b: 0x3308,
+ 0x97c: 0x3308, 0x97d: 0x0040, 0x97e: 0x0018, 0x97f: 0x0018,
+ // Block 0x26, offset 0x980
+ 0x980: 0x0008, 0x981: 0x0008, 0x982: 0x0008, 0x983: 0x0008, 0x984: 0x0008, 0x985: 0x0008,
+ 0x986: 0x0008, 0x987: 0x0008, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008,
+ 0x98c: 0x0008, 0x98d: 0x0008, 0x98e: 0x0008, 0x98f: 0x0008, 0x990: 0x0008, 0x991: 0x0008,
+ 0x992: 0x0008, 0x993: 0x0008, 0x994: 0x0008, 0x995: 0x0008, 0x996: 0x0008, 0x997: 0x0008,
+ 0x998: 0x0008, 0x999: 0x0008, 0x99a: 0x0008, 0x99b: 0x0008, 0x99c: 0x0008, 0x99d: 0x0008,
+ 0x99e: 0x0008, 0x99f: 0x0008, 0x9a0: 0x0008, 0x9a1: 0x0008, 0x9a2: 0x0008, 0x9a3: 0x0008,
+ 0x9a4: 0x0008, 0x9a5: 0x0008, 0x9a6: 0x0008, 0x9a7: 0x0008, 0x9a8: 0x0008, 0x9a9: 0x0008,
+ 0x9aa: 0x0008, 0x9ab: 0x0008, 0x9ac: 0x0039, 0x9ad: 0x0ed1, 0x9ae: 0x0ee9, 0x9af: 0x0008,
+ 0x9b0: 0x0ef9, 0x9b1: 0x0f09, 0x9b2: 0x0f19, 0x9b3: 0x0f31, 0x9b4: 0x0249, 0x9b5: 0x0f41,
+ 0x9b6: 0x0259, 0x9b7: 0x0f51, 0x9b8: 0x0359, 0x9b9: 0x0f61, 0x9ba: 0x0f71, 0x9bb: 0x0008,
+ 0x9bc: 0x00d9, 0x9bd: 0x0f81, 0x9be: 0x0f99, 0x9bf: 0x0269,
+ // Block 0x27, offset 0x9c0
+ 0x9c0: 0x0fa9, 0x9c1: 0x0fb9, 0x9c2: 0x0279, 0x9c3: 0x0039, 0x9c4: 0x0fc9, 0x9c5: 0x0fe1,
+ 0x9c6: 0x059d, 0x9c7: 0x0ee9, 0x9c8: 0x0ef9, 0x9c9: 0x0f09, 0x9ca: 0x0ff9, 0x9cb: 0x1011,
+ 0x9cc: 0x1029, 0x9cd: 0x0f31, 0x9ce: 0x0008, 0x9cf: 0x0f51, 0x9d0: 0x0f61, 0x9d1: 0x1041,
+ 0x9d2: 0x00d9, 0x9d3: 0x1059, 0x9d4: 0x05b5, 0x9d5: 0x05b5, 0x9d6: 0x0f99, 0x9d7: 0x0fa9,
+ 0x9d8: 0x0fb9, 0x9d9: 0x059d, 0x9da: 0x1071, 0x9db: 0x1089, 0x9dc: 0x05cd, 0x9dd: 0x1099,
+ 0x9de: 0x10b1, 0x9df: 0x10c9, 0x9e0: 0x10e1, 0x9e1: 0x10f9, 0x9e2: 0x0f41, 0x9e3: 0x0269,
+ 0x9e4: 0x0fb9, 0x9e5: 0x1089, 0x9e6: 0x1099, 0x9e7: 0x10b1, 0x9e8: 0x1111, 0x9e9: 0x10e1,
+ 0x9ea: 0x10f9, 0x9eb: 0x0008, 0x9ec: 0x0008, 0x9ed: 0x0008, 0x9ee: 0x0008, 0x9ef: 0x0008,
+ 0x9f0: 0x0008, 0x9f1: 0x0008, 0x9f2: 0x0008, 0x9f3: 0x0008, 0x9f4: 0x0008, 0x9f5: 0x0008,
+ 0x9f6: 0x0008, 0x9f7: 0x0008, 0x9f8: 0x1129, 0x9f9: 0x0008, 0x9fa: 0x0008, 0x9fb: 0x0008,
+ 0x9fc: 0x0008, 0x9fd: 0x0008, 0x9fe: 0x0008, 0x9ff: 0x0008,
+ // Block 0x28, offset 0xa00
+ 0xa00: 0x0008, 0xa01: 0x0008, 0xa02: 0x0008, 0xa03: 0x0008, 0xa04: 0x0008, 0xa05: 0x0008,
+ 0xa06: 0x0008, 0xa07: 0x0008, 0xa08: 0x0008, 0xa09: 0x0008, 0xa0a: 0x0008, 0xa0b: 0x0008,
+ 0xa0c: 0x0008, 0xa0d: 0x0008, 0xa0e: 0x0008, 0xa0f: 0x0008, 0xa10: 0x0008, 0xa11: 0x0008,
+ 0xa12: 0x0008, 0xa13: 0x0008, 0xa14: 0x0008, 0xa15: 0x0008, 0xa16: 0x0008, 0xa17: 0x0008,
+ 0xa18: 0x0008, 0xa19: 0x0008, 0xa1a: 0x0008, 0xa1b: 0x1141, 0xa1c: 0x1159, 0xa1d: 0x1169,
+ 0xa1e: 0x1181, 0xa1f: 0x1029, 0xa20: 0x1199, 0xa21: 0x11a9, 0xa22: 0x11c1, 0xa23: 0x11d9,
+ 0xa24: 0x11f1, 0xa25: 0x1209, 0xa26: 0x1221, 0xa27: 0x05e5, 0xa28: 0x1239, 0xa29: 0x1251,
+ 0xa2a: 0xe17d, 0xa2b: 0x1269, 0xa2c: 0x1281, 0xa2d: 0x1299, 0xa2e: 0x12b1, 0xa2f: 0x12c9,
+ 0xa30: 0x12e1, 0xa31: 0x12f9, 0xa32: 0x1311, 0xa33: 0x1329, 0xa34: 0x1341, 0xa35: 0x1359,
+ 0xa36: 0x1371, 0xa37: 0x1389, 0xa38: 0x05fd, 0xa39: 0x13a1, 0xa3a: 0x13b9, 0xa3b: 0x13d1,
+ 0xa3c: 0x13e1, 0xa3d: 0x13f9, 0xa3e: 0x1411, 0xa3f: 0x1429,
+ // Block 0x29, offset 0xa40
+ 0xa40: 0xe00d, 0xa41: 0x0008, 0xa42: 0xe00d, 0xa43: 0x0008, 0xa44: 0xe00d, 0xa45: 0x0008,
+ 0xa46: 0xe00d, 0xa47: 0x0008, 0xa48: 0xe00d, 0xa49: 0x0008, 0xa4a: 0xe00d, 0xa4b: 0x0008,
+ 0xa4c: 0xe00d, 0xa4d: 0x0008, 0xa4e: 0xe00d, 0xa4f: 0x0008, 0xa50: 0xe00d, 0xa51: 0x0008,
+ 0xa52: 0xe00d, 0xa53: 0x0008, 0xa54: 0xe00d, 0xa55: 0x0008, 0xa56: 0xe00d, 0xa57: 0x0008,
+ 0xa58: 0xe00d, 0xa59: 0x0008, 0xa5a: 0xe00d, 0xa5b: 0x0008, 0xa5c: 0xe00d, 0xa5d: 0x0008,
+ 0xa5e: 0xe00d, 0xa5f: 0x0008, 0xa60: 0xe00d, 0xa61: 0x0008, 0xa62: 0xe00d, 0xa63: 0x0008,
+ 0xa64: 0xe00d, 0xa65: 0x0008, 0xa66: 0xe00d, 0xa67: 0x0008, 0xa68: 0xe00d, 0xa69: 0x0008,
+ 0xa6a: 0xe00d, 0xa6b: 0x0008, 0xa6c: 0xe00d, 0xa6d: 0x0008, 0xa6e: 0xe00d, 0xa6f: 0x0008,
+ 0xa70: 0xe00d, 0xa71: 0x0008, 0xa72: 0xe00d, 0xa73: 0x0008, 0xa74: 0xe00d, 0xa75: 0x0008,
+ 0xa76: 0xe00d, 0xa77: 0x0008, 0xa78: 0xe00d, 0xa79: 0x0008, 0xa7a: 0xe00d, 0xa7b: 0x0008,
+ 0xa7c: 0xe00d, 0xa7d: 0x0008, 0xa7e: 0xe00d, 0xa7f: 0x0008,
+ // Block 0x2a, offset 0xa80
+ 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008,
+ 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008,
+ 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008,
+ 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0x0008, 0xa97: 0x0008,
+ 0xa98: 0x0008, 0xa99: 0x0008, 0xa9a: 0x0615, 0xa9b: 0x0635, 0xa9c: 0x0008, 0xa9d: 0x0008,
+ 0xa9e: 0x1441, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008,
+ 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008,
+ 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008,
+ 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008,
+ 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008,
+ 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008,
+ // Block 0x2b, offset 0xac0
+ 0xac0: 0x0008, 0xac1: 0x0008, 0xac2: 0x0008, 0xac3: 0x0008, 0xac4: 0x0008, 0xac5: 0x0008,
+ 0xac6: 0x0040, 0xac7: 0x0040, 0xac8: 0xe045, 0xac9: 0xe045, 0xaca: 0xe045, 0xacb: 0xe045,
+ 0xacc: 0xe045, 0xacd: 0xe045, 0xace: 0x0040, 0xacf: 0x0040, 0xad0: 0x0008, 0xad1: 0x0008,
+ 0xad2: 0x0008, 0xad3: 0x0008, 0xad4: 0x0008, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008,
+ 0xad8: 0x0040, 0xad9: 0xe045, 0xada: 0x0040, 0xadb: 0xe045, 0xadc: 0x0040, 0xadd: 0xe045,
+ 0xade: 0x0040, 0xadf: 0xe045, 0xae0: 0x0008, 0xae1: 0x0008, 0xae2: 0x0008, 0xae3: 0x0008,
+ 0xae4: 0x0008, 0xae5: 0x0008, 0xae6: 0x0008, 0xae7: 0x0008, 0xae8: 0xe045, 0xae9: 0xe045,
+ 0xaea: 0xe045, 0xaeb: 0xe045, 0xaec: 0xe045, 0xaed: 0xe045, 0xaee: 0xe045, 0xaef: 0xe045,
+ 0xaf0: 0x0008, 0xaf1: 0x1459, 0xaf2: 0x0008, 0xaf3: 0x1471, 0xaf4: 0x0008, 0xaf5: 0x1489,
+ 0xaf6: 0x0008, 0xaf7: 0x14a1, 0xaf8: 0x0008, 0xaf9: 0x14b9, 0xafa: 0x0008, 0xafb: 0x14d1,
+ 0xafc: 0x0008, 0xafd: 0x14e9, 0xafe: 0x0040, 0xaff: 0x0040,
+ // Block 0x2c, offset 0xb00
+ 0xb00: 0x1501, 0xb01: 0x1531, 0xb02: 0x1561, 0xb03: 0x1591, 0xb04: 0x15c1, 0xb05: 0x15f1,
+ 0xb06: 0x1621, 0xb07: 0x1651, 0xb08: 0x1501, 0xb09: 0x1531, 0xb0a: 0x1561, 0xb0b: 0x1591,
+ 0xb0c: 0x15c1, 0xb0d: 0x15f1, 0xb0e: 0x1621, 0xb0f: 0x1651, 0xb10: 0x1681, 0xb11: 0x16b1,
+ 0xb12: 0x16e1, 0xb13: 0x1711, 0xb14: 0x1741, 0xb15: 0x1771, 0xb16: 0x17a1, 0xb17: 0x17d1,
+ 0xb18: 0x1681, 0xb19: 0x16b1, 0xb1a: 0x16e1, 0xb1b: 0x1711, 0xb1c: 0x1741, 0xb1d: 0x1771,
+ 0xb1e: 0x17a1, 0xb1f: 0x17d1, 0xb20: 0x1801, 0xb21: 0x1831, 0xb22: 0x1861, 0xb23: 0x1891,
+ 0xb24: 0x18c1, 0xb25: 0x18f1, 0xb26: 0x1921, 0xb27: 0x1951, 0xb28: 0x1801, 0xb29: 0x1831,
+ 0xb2a: 0x1861, 0xb2b: 0x1891, 0xb2c: 0x18c1, 0xb2d: 0x18f1, 0xb2e: 0x1921, 0xb2f: 0x1951,
+ 0xb30: 0x0008, 0xb31: 0x0008, 0xb32: 0x1981, 0xb33: 0x19b1, 0xb34: 0x19d9, 0xb35: 0x0040,
+ 0xb36: 0x0008, 0xb37: 0x1a01, 0xb38: 0xe045, 0xb39: 0xe045, 0xb3a: 0x064d, 0xb3b: 0x1459,
+ 0xb3c: 0x19b1, 0xb3d: 0x0666, 0xb3e: 0x1a31, 0xb3f: 0x0686,
+ // Block 0x2d, offset 0xb40
+ 0xb40: 0x06a6, 0xb41: 0x1a4a, 0xb42: 0x1a79, 0xb43: 0x1aa9, 0xb44: 0x1ad1, 0xb45: 0x0040,
+ 0xb46: 0x0008, 0xb47: 0x1af9, 0xb48: 0x06c5, 0xb49: 0x1471, 0xb4a: 0x06dd, 0xb4b: 0x1489,
+ 0xb4c: 0x1aa9, 0xb4d: 0x1b2a, 0xb4e: 0x1b5a, 0xb4f: 0x1b8a, 0xb50: 0x0008, 0xb51: 0x0008,
+ 0xb52: 0x0008, 0xb53: 0x1bb9, 0xb54: 0x0040, 0xb55: 0x0040, 0xb56: 0x0008, 0xb57: 0x0008,
+ 0xb58: 0xe045, 0xb59: 0xe045, 0xb5a: 0x06f5, 0xb5b: 0x14a1, 0xb5c: 0x0040, 0xb5d: 0x1bd2,
+ 0xb5e: 0x1c02, 0xb5f: 0x1c32, 0xb60: 0x0008, 0xb61: 0x0008, 0xb62: 0x0008, 0xb63: 0x1c61,
+ 0xb64: 0x0008, 0xb65: 0x0008, 0xb66: 0x0008, 0xb67: 0x0008, 0xb68: 0xe045, 0xb69: 0xe045,
+ 0xb6a: 0x070d, 0xb6b: 0x14d1, 0xb6c: 0xe04d, 0xb6d: 0x1c7a, 0xb6e: 0x03d2, 0xb6f: 0x1caa,
+ 0xb70: 0x0040, 0xb71: 0x0040, 0xb72: 0x1cb9, 0xb73: 0x1ce9, 0xb74: 0x1d11, 0xb75: 0x0040,
+ 0xb76: 0x0008, 0xb77: 0x1d39, 0xb78: 0x0725, 0xb79: 0x14b9, 0xb7a: 0x0515, 0xb7b: 0x14e9,
+ 0xb7c: 0x1ce9, 0xb7d: 0x073e, 0xb7e: 0x075e, 0xb7f: 0x0040,
+ // Block 0x2e, offset 0xb80
+ 0xb80: 0x000a, 0xb81: 0x000a, 0xb82: 0x000a, 0xb83: 0x000a, 0xb84: 0x000a, 0xb85: 0x000a,
+ 0xb86: 0x000a, 0xb87: 0x000a, 0xb88: 0x000a, 0xb89: 0x000a, 0xb8a: 0x000a, 0xb8b: 0x03c0,
+ 0xb8c: 0x0003, 0xb8d: 0x0003, 0xb8e: 0x0340, 0xb8f: 0x0b40, 0xb90: 0x0018, 0xb91: 0xe00d,
+ 0xb92: 0x0018, 0xb93: 0x0018, 0xb94: 0x0018, 0xb95: 0x0018, 0xb96: 0x0018, 0xb97: 0x077e,
+ 0xb98: 0x0018, 0xb99: 0x0018, 0xb9a: 0x0018, 0xb9b: 0x0018, 0xb9c: 0x0018, 0xb9d: 0x0018,
+ 0xb9e: 0x0018, 0xb9f: 0x0018, 0xba0: 0x0018, 0xba1: 0x0018, 0xba2: 0x0018, 0xba3: 0x0018,
+ 0xba4: 0x0040, 0xba5: 0x0040, 0xba6: 0x0040, 0xba7: 0x0018, 0xba8: 0x0040, 0xba9: 0x0040,
+ 0xbaa: 0x0340, 0xbab: 0x0340, 0xbac: 0x0340, 0xbad: 0x0340, 0xbae: 0x0340, 0xbaf: 0x000a,
+ 0xbb0: 0x0018, 0xbb1: 0x0018, 0xbb2: 0x0018, 0xbb3: 0x1d69, 0xbb4: 0x1da1, 0xbb5: 0x0018,
+ 0xbb6: 0x1df1, 0xbb7: 0x1e29, 0xbb8: 0x0018, 0xbb9: 0x0018, 0xbba: 0x0018, 0xbbb: 0x0018,
+ 0xbbc: 0x1e7a, 0xbbd: 0x0018, 0xbbe: 0x079e, 0xbbf: 0x0018,
+ // Block 0x2f, offset 0xbc0
+ 0xbc0: 0x0018, 0xbc1: 0x0018, 0xbc2: 0x0018, 0xbc3: 0x0018, 0xbc4: 0x0018, 0xbc5: 0x0018,
+ 0xbc6: 0x0018, 0xbc7: 0x1e92, 0xbc8: 0x1eaa, 0xbc9: 0x1ec2, 0xbca: 0x0018, 0xbcb: 0x0018,
+ 0xbcc: 0x0018, 0xbcd: 0x0018, 0xbce: 0x0018, 0xbcf: 0x0018, 0xbd0: 0x0018, 0xbd1: 0x0018,
+ 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x1ed9,
+ 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018,
+ 0xbde: 0x0018, 0xbdf: 0x000a, 0xbe0: 0x03c0, 0xbe1: 0x0340, 0xbe2: 0x0340, 0xbe3: 0x0340,
+ 0xbe4: 0x03c0, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0040, 0xbe8: 0x0040, 0xbe9: 0x0040,
+ 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x0340,
+ 0xbf0: 0x1f41, 0xbf1: 0x0f41, 0xbf2: 0x0040, 0xbf3: 0x0040, 0xbf4: 0x1f51, 0xbf5: 0x1f61,
+ 0xbf6: 0x1f71, 0xbf7: 0x1f81, 0xbf8: 0x1f91, 0xbf9: 0x1fa1, 0xbfa: 0x1fb2, 0xbfb: 0x07bd,
+ 0xbfc: 0x1fc2, 0xbfd: 0x1fd2, 0xbfe: 0x1fe2, 0xbff: 0x0f71,
+ // Block 0x30, offset 0xc00
+ 0xc00: 0x1f41, 0xc01: 0x00c9, 0xc02: 0x0069, 0xc03: 0x0079, 0xc04: 0x1f51, 0xc05: 0x1f61,
+ 0xc06: 0x1f71, 0xc07: 0x1f81, 0xc08: 0x1f91, 0xc09: 0x1fa1, 0xc0a: 0x1fb2, 0xc0b: 0x07d5,
+ 0xc0c: 0x1fc2, 0xc0d: 0x1fd2, 0xc0e: 0x1fe2, 0xc0f: 0x0040, 0xc10: 0x0039, 0xc11: 0x0f09,
+ 0xc12: 0x00d9, 0xc13: 0x0369, 0xc14: 0x0ff9, 0xc15: 0x0249, 0xc16: 0x0f51, 0xc17: 0x0359,
+ 0xc18: 0x0f61, 0xc19: 0x0f71, 0xc1a: 0x0f99, 0xc1b: 0x01d9, 0xc1c: 0x0fa9, 0xc1d: 0x0040,
+ 0xc1e: 0x0040, 0xc1f: 0x0040, 0xc20: 0x0018, 0xc21: 0x0018, 0xc22: 0x0018, 0xc23: 0x0018,
+ 0xc24: 0x0018, 0xc25: 0x0018, 0xc26: 0x0018, 0xc27: 0x0018, 0xc28: 0x1ff1, 0xc29: 0x0018,
+ 0xc2a: 0x0018, 0xc2b: 0x0018, 0xc2c: 0x0018, 0xc2d: 0x0018, 0xc2e: 0x0018, 0xc2f: 0x0018,
+ 0xc30: 0x0018, 0xc31: 0x0018, 0xc32: 0x0018, 0xc33: 0x0018, 0xc34: 0x0018, 0xc35: 0x0018,
+ 0xc36: 0x0018, 0xc37: 0x0018, 0xc38: 0x0018, 0xc39: 0x0018, 0xc3a: 0x0018, 0xc3b: 0x0018,
+ 0xc3c: 0x0018, 0xc3d: 0x0018, 0xc3e: 0x0018, 0xc3f: 0x0040,
+ // Block 0x31, offset 0xc40
+ 0xc40: 0x07ee, 0xc41: 0x080e, 0xc42: 0x1159, 0xc43: 0x082d, 0xc44: 0x0018, 0xc45: 0x084e,
+ 0xc46: 0x086e, 0xc47: 0x1011, 0xc48: 0x0018, 0xc49: 0x088d, 0xc4a: 0x0f31, 0xc4b: 0x0249,
+ 0xc4c: 0x0249, 0xc4d: 0x0249, 0xc4e: 0x0249, 0xc4f: 0x2009, 0xc50: 0x0f41, 0xc51: 0x0f41,
+ 0xc52: 0x0359, 0xc53: 0x0359, 0xc54: 0x0018, 0xc55: 0x0f71, 0xc56: 0x2021, 0xc57: 0x0018,
+ 0xc58: 0x0018, 0xc59: 0x0f99, 0xc5a: 0x2039, 0xc5b: 0x0269, 0xc5c: 0x0269, 0xc5d: 0x0269,
+ 0xc5e: 0x0018, 0xc5f: 0x0018, 0xc60: 0x2049, 0xc61: 0x08ad, 0xc62: 0x2061, 0xc63: 0x0018,
+ 0xc64: 0x13d1, 0xc65: 0x0018, 0xc66: 0x2079, 0xc67: 0x0018, 0xc68: 0x13d1, 0xc69: 0x0018,
+ 0xc6a: 0x0f51, 0xc6b: 0x2091, 0xc6c: 0x0ee9, 0xc6d: 0x1159, 0xc6e: 0x0018, 0xc6f: 0x0f09,
+ 0xc70: 0x0f09, 0xc71: 0x1199, 0xc72: 0x0040, 0xc73: 0x0f61, 0xc74: 0x00d9, 0xc75: 0x20a9,
+ 0xc76: 0x20c1, 0xc77: 0x20d9, 0xc78: 0x20f1, 0xc79: 0x0f41, 0xc7a: 0x0018, 0xc7b: 0x08cd,
+ 0xc7c: 0x2109, 0xc7d: 0x10b1, 0xc7e: 0x10b1, 0xc7f: 0x2109,
+ // Block 0x32, offset 0xc80
+ 0xc80: 0x08ed, 0xc81: 0x0018, 0xc82: 0x0018, 0xc83: 0x0018, 0xc84: 0x0018, 0xc85: 0x0ef9,
+ 0xc86: 0x0ef9, 0xc87: 0x0f09, 0xc88: 0x0f41, 0xc89: 0x0259, 0xc8a: 0x0018, 0xc8b: 0x0018,
+ 0xc8c: 0x0018, 0xc8d: 0x0018, 0xc8e: 0x0008, 0xc8f: 0x0018, 0xc90: 0x2121, 0xc91: 0x2151,
+ 0xc92: 0x2181, 0xc93: 0x21b9, 0xc94: 0x21e9, 0xc95: 0x2219, 0xc96: 0x2249, 0xc97: 0x2279,
+ 0xc98: 0x22a9, 0xc99: 0x22d9, 0xc9a: 0x2309, 0xc9b: 0x2339, 0xc9c: 0x2369, 0xc9d: 0x2399,
+ 0xc9e: 0x23c9, 0xc9f: 0x23f9, 0xca0: 0x0f41, 0xca1: 0x2421, 0xca2: 0x0905, 0xca3: 0x2439,
+ 0xca4: 0x1089, 0xca5: 0x2451, 0xca6: 0x0925, 0xca7: 0x2469, 0xca8: 0x2491, 0xca9: 0x0369,
+ 0xcaa: 0x24a9, 0xcab: 0x0945, 0xcac: 0x0359, 0xcad: 0x1159, 0xcae: 0x0ef9, 0xcaf: 0x0f61,
+ 0xcb0: 0x0f41, 0xcb1: 0x2421, 0xcb2: 0x0965, 0xcb3: 0x2439, 0xcb4: 0x1089, 0xcb5: 0x2451,
+ 0xcb6: 0x0985, 0xcb7: 0x2469, 0xcb8: 0x2491, 0xcb9: 0x0369, 0xcba: 0x24a9, 0xcbb: 0x09a5,
+ 0xcbc: 0x0359, 0xcbd: 0x1159, 0xcbe: 0x0ef9, 0xcbf: 0x0f61,
+ // Block 0x33, offset 0xcc0
+ 0xcc0: 0x0018, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0018,
+ 0xcc6: 0x0018, 0xcc7: 0x0018, 0xcc8: 0x0018, 0xcc9: 0x0018, 0xcca: 0x0018, 0xccb: 0x0040,
+ 0xccc: 0x0040, 0xccd: 0x0040, 0xcce: 0x0040, 0xccf: 0x0040, 0xcd0: 0x0040, 0xcd1: 0x0040,
+ 0xcd2: 0x0040, 0xcd3: 0x0040, 0xcd4: 0x0040, 0xcd5: 0x0040, 0xcd6: 0x0040, 0xcd7: 0x0040,
+ 0xcd8: 0x0040, 0xcd9: 0x0040, 0xcda: 0x0040, 0xcdb: 0x0040, 0xcdc: 0x0040, 0xcdd: 0x0040,
+ 0xcde: 0x0040, 0xcdf: 0x0040, 0xce0: 0x00c9, 0xce1: 0x0069, 0xce2: 0x0079, 0xce3: 0x1f51,
+ 0xce4: 0x1f61, 0xce5: 0x1f71, 0xce6: 0x1f81, 0xce7: 0x1f91, 0xce8: 0x1fa1, 0xce9: 0x2601,
+ 0xcea: 0x2619, 0xceb: 0x2631, 0xcec: 0x2649, 0xced: 0x2661, 0xcee: 0x2679, 0xcef: 0x2691,
+ 0xcf0: 0x26a9, 0xcf1: 0x26c1, 0xcf2: 0x26d9, 0xcf3: 0x26f1, 0xcf4: 0x0a06, 0xcf5: 0x0a26,
+ 0xcf6: 0x0a46, 0xcf7: 0x0a66, 0xcf8: 0x0a86, 0xcf9: 0x0aa6, 0xcfa: 0x0ac6, 0xcfb: 0x0ae6,
+ 0xcfc: 0x0b06, 0xcfd: 0x270a, 0xcfe: 0x2732, 0xcff: 0x275a,
+ // Block 0x34, offset 0xd00
+ 0xd00: 0x2782, 0xd01: 0x27aa, 0xd02: 0x27d2, 0xd03: 0x27fa, 0xd04: 0x2822, 0xd05: 0x284a,
+ 0xd06: 0x2872, 0xd07: 0x289a, 0xd08: 0x0040, 0xd09: 0x0040, 0xd0a: 0x0040, 0xd0b: 0x0040,
+ 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040,
+ 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040,
+ 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0b26, 0xd1d: 0x0b46,
+ 0xd1e: 0x0b66, 0xd1f: 0x0b86, 0xd20: 0x0ba6, 0xd21: 0x0bc6, 0xd22: 0x0be6, 0xd23: 0x0c06,
+ 0xd24: 0x0c26, 0xd25: 0x0c46, 0xd26: 0x0c66, 0xd27: 0x0c86, 0xd28: 0x0ca6, 0xd29: 0x0cc6,
+ 0xd2a: 0x0ce6, 0xd2b: 0x0d06, 0xd2c: 0x0d26, 0xd2d: 0x0d46, 0xd2e: 0x0d66, 0xd2f: 0x0d86,
+ 0xd30: 0x0da6, 0xd31: 0x0dc6, 0xd32: 0x0de6, 0xd33: 0x0e06, 0xd34: 0x0e26, 0xd35: 0x0e46,
+ 0xd36: 0x0039, 0xd37: 0x0ee9, 0xd38: 0x1159, 0xd39: 0x0ef9, 0xd3a: 0x0f09, 0xd3b: 0x1199,
+ 0xd3c: 0x0f31, 0xd3d: 0x0249, 0xd3e: 0x0f41, 0xd3f: 0x0259,
+ // Block 0x35, offset 0xd40
+ 0xd40: 0x0f51, 0xd41: 0x0359, 0xd42: 0x0f61, 0xd43: 0x0f71, 0xd44: 0x00d9, 0xd45: 0x0f99,
+ 0xd46: 0x2039, 0xd47: 0x0269, 0xd48: 0x01d9, 0xd49: 0x0fa9, 0xd4a: 0x0fb9, 0xd4b: 0x1089,
+ 0xd4c: 0x0279, 0xd4d: 0x0369, 0xd4e: 0x0289, 0xd4f: 0x13d1, 0xd50: 0x0039, 0xd51: 0x0ee9,
+ 0xd52: 0x1159, 0xd53: 0x0ef9, 0xd54: 0x0f09, 0xd55: 0x1199, 0xd56: 0x0f31, 0xd57: 0x0249,
+ 0xd58: 0x0f41, 0xd59: 0x0259, 0xd5a: 0x0f51, 0xd5b: 0x0359, 0xd5c: 0x0f61, 0xd5d: 0x0f71,
+ 0xd5e: 0x00d9, 0xd5f: 0x0f99, 0xd60: 0x2039, 0xd61: 0x0269, 0xd62: 0x01d9, 0xd63: 0x0fa9,
+ 0xd64: 0x0fb9, 0xd65: 0x1089, 0xd66: 0x0279, 0xd67: 0x0369, 0xd68: 0x0289, 0xd69: 0x13d1,
+ 0xd6a: 0x1f41, 0xd6b: 0x0018, 0xd6c: 0x0018, 0xd6d: 0x0018, 0xd6e: 0x0018, 0xd6f: 0x0018,
+ 0xd70: 0x0018, 0xd71: 0x0018, 0xd72: 0x0018, 0xd73: 0x0018, 0xd74: 0x0018, 0xd75: 0x0018,
+ 0xd76: 0x0018, 0xd77: 0x0018, 0xd78: 0x0018, 0xd79: 0x0018, 0xd7a: 0x0018, 0xd7b: 0x0018,
+ 0xd7c: 0x0018, 0xd7d: 0x0018, 0xd7e: 0x0018, 0xd7f: 0x0018,
+ // Block 0x36, offset 0xd80
+ 0xd80: 0x0008, 0xd81: 0x0008, 0xd82: 0x0008, 0xd83: 0x0008, 0xd84: 0x0008, 0xd85: 0x0008,
+ 0xd86: 0x0008, 0xd87: 0x0008, 0xd88: 0x0008, 0xd89: 0x0008, 0xd8a: 0x0008, 0xd8b: 0x0008,
+ 0xd8c: 0x0008, 0xd8d: 0x0008, 0xd8e: 0x0008, 0xd8f: 0x0008, 0xd90: 0x0008, 0xd91: 0x0008,
+ 0xd92: 0x0008, 0xd93: 0x0008, 0xd94: 0x0008, 0xd95: 0x0008, 0xd96: 0x0008, 0xd97: 0x0008,
+ 0xd98: 0x0008, 0xd99: 0x0008, 0xd9a: 0x0008, 0xd9b: 0x0008, 0xd9c: 0x0008, 0xd9d: 0x0008,
+ 0xd9e: 0x0008, 0xd9f: 0x0040, 0xda0: 0xe00d, 0xda1: 0x0008, 0xda2: 0x2971, 0xda3: 0x0ebd,
+ 0xda4: 0x2989, 0xda5: 0x0008, 0xda6: 0x0008, 0xda7: 0xe07d, 0xda8: 0x0008, 0xda9: 0xe01d,
+ 0xdaa: 0x0008, 0xdab: 0xe03d, 0xdac: 0x0008, 0xdad: 0x0fe1, 0xdae: 0x1281, 0xdaf: 0x0fc9,
+ 0xdb0: 0x1141, 0xdb1: 0x0008, 0xdb2: 0xe00d, 0xdb3: 0x0008, 0xdb4: 0x0008, 0xdb5: 0xe01d,
+ 0xdb6: 0x0008, 0xdb7: 0x0008, 0xdb8: 0x0008, 0xdb9: 0x0008, 0xdba: 0x0008, 0xdbb: 0x0008,
+ 0xdbc: 0x0259, 0xdbd: 0x1089, 0xdbe: 0x29a1, 0xdbf: 0x29b9,
+ // Block 0x37, offset 0xdc0
+ 0xdc0: 0xe00d, 0xdc1: 0x0008, 0xdc2: 0xe00d, 0xdc3: 0x0008, 0xdc4: 0xe00d, 0xdc5: 0x0008,
+ 0xdc6: 0xe00d, 0xdc7: 0x0008, 0xdc8: 0xe00d, 0xdc9: 0x0008, 0xdca: 0xe00d, 0xdcb: 0x0008,
+ 0xdcc: 0xe00d, 0xdcd: 0x0008, 0xdce: 0xe00d, 0xdcf: 0x0008, 0xdd0: 0xe00d, 0xdd1: 0x0008,
+ 0xdd2: 0xe00d, 0xdd3: 0x0008, 0xdd4: 0xe00d, 0xdd5: 0x0008, 0xdd6: 0xe00d, 0xdd7: 0x0008,
+ 0xdd8: 0xe00d, 0xdd9: 0x0008, 0xdda: 0xe00d, 0xddb: 0x0008, 0xddc: 0xe00d, 0xddd: 0x0008,
+ 0xdde: 0xe00d, 0xddf: 0x0008, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0xe00d, 0xde3: 0x0008,
+ 0xde4: 0x0008, 0xde5: 0x0018, 0xde6: 0x0018, 0xde7: 0x0018, 0xde8: 0x0018, 0xde9: 0x0018,
+ 0xdea: 0x0018, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0xe01d, 0xdee: 0x0008, 0xdef: 0x3308,
+ 0xdf0: 0x3308, 0xdf1: 0x3308, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0040, 0xdf5: 0x0040,
+ 0xdf6: 0x0040, 0xdf7: 0x0040, 0xdf8: 0x0040, 0xdf9: 0x0018, 0xdfa: 0x0018, 0xdfb: 0x0018,
+ 0xdfc: 0x0018, 0xdfd: 0x0018, 0xdfe: 0x0018, 0xdff: 0x0018,
+ // Block 0x38, offset 0xe00
+ 0xe00: 0x26fd, 0xe01: 0x271d, 0xe02: 0x273d, 0xe03: 0x275d, 0xe04: 0x277d, 0xe05: 0x279d,
+ 0xe06: 0x27bd, 0xe07: 0x27dd, 0xe08: 0x27fd, 0xe09: 0x281d, 0xe0a: 0x283d, 0xe0b: 0x285d,
+ 0xe0c: 0x287d, 0xe0d: 0x289d, 0xe0e: 0x28bd, 0xe0f: 0x28dd, 0xe10: 0x28fd, 0xe11: 0x291d,
+ 0xe12: 0x293d, 0xe13: 0x295d, 0xe14: 0x297d, 0xe15: 0x299d, 0xe16: 0x0040, 0xe17: 0x0040,
+ 0xe18: 0x0040, 0xe19: 0x0040, 0xe1a: 0x0040, 0xe1b: 0x0040, 0xe1c: 0x0040, 0xe1d: 0x0040,
+ 0xe1e: 0x0040, 0xe1f: 0x0040, 0xe20: 0x0040, 0xe21: 0x0040, 0xe22: 0x0040, 0xe23: 0x0040,
+ 0xe24: 0x0040, 0xe25: 0x0040, 0xe26: 0x0040, 0xe27: 0x0040, 0xe28: 0x0040, 0xe29: 0x0040,
+ 0xe2a: 0x0040, 0xe2b: 0x0040, 0xe2c: 0x0040, 0xe2d: 0x0040, 0xe2e: 0x0040, 0xe2f: 0x0040,
+ 0xe30: 0x0040, 0xe31: 0x0040, 0xe32: 0x0040, 0xe33: 0x0040, 0xe34: 0x0040, 0xe35: 0x0040,
+ 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0040, 0xe3a: 0x0040, 0xe3b: 0x0040,
+ 0xe3c: 0x0040, 0xe3d: 0x0040, 0xe3e: 0x0040, 0xe3f: 0x0040,
+ // Block 0x39, offset 0xe40
+ 0xe40: 0x000a, 0xe41: 0x0018, 0xe42: 0x29d1, 0xe43: 0x0018, 0xe44: 0x0018, 0xe45: 0x0008,
+ 0xe46: 0x0008, 0xe47: 0x0008, 0xe48: 0x0018, 0xe49: 0x0018, 0xe4a: 0x0018, 0xe4b: 0x0018,
+ 0xe4c: 0x0018, 0xe4d: 0x0018, 0xe4e: 0x0018, 0xe4f: 0x0018, 0xe50: 0x0018, 0xe51: 0x0018,
+ 0xe52: 0x0018, 0xe53: 0x0018, 0xe54: 0x0018, 0xe55: 0x0018, 0xe56: 0x0018, 0xe57: 0x0018,
+ 0xe58: 0x0018, 0xe59: 0x0018, 0xe5a: 0x0018, 0xe5b: 0x0018, 0xe5c: 0x0018, 0xe5d: 0x0018,
+ 0xe5e: 0x0018, 0xe5f: 0x0018, 0xe60: 0x0018, 0xe61: 0x0018, 0xe62: 0x0018, 0xe63: 0x0018,
+ 0xe64: 0x0018, 0xe65: 0x0018, 0xe66: 0x0018, 0xe67: 0x0018, 0xe68: 0x0018, 0xe69: 0x0018,
+ 0xe6a: 0x3308, 0xe6b: 0x3308, 0xe6c: 0x3308, 0xe6d: 0x3308, 0xe6e: 0x3018, 0xe6f: 0x3018,
+ 0xe70: 0x0018, 0xe71: 0x0018, 0xe72: 0x0018, 0xe73: 0x0018, 0xe74: 0x0018, 0xe75: 0x0018,
+ 0xe76: 0xe125, 0xe77: 0x0018, 0xe78: 0x29bd, 0xe79: 0x29dd, 0xe7a: 0x29fd, 0xe7b: 0x0018,
+ 0xe7c: 0x0008, 0xe7d: 0x0018, 0xe7e: 0x0018, 0xe7f: 0x0018,
+ // Block 0x3a, offset 0xe80
+ 0xe80: 0x2b3d, 0xe81: 0x2b5d, 0xe82: 0x2b7d, 0xe83: 0x2b9d, 0xe84: 0x2bbd, 0xe85: 0x2bdd,
+ 0xe86: 0x2bdd, 0xe87: 0x2bdd, 0xe88: 0x2bfd, 0xe89: 0x2bfd, 0xe8a: 0x2bfd, 0xe8b: 0x2bfd,
+ 0xe8c: 0x2c1d, 0xe8d: 0x2c1d, 0xe8e: 0x2c1d, 0xe8f: 0x2c3d, 0xe90: 0x2c5d, 0xe91: 0x2c5d,
+ 0xe92: 0x2a7d, 0xe93: 0x2a7d, 0xe94: 0x2c5d, 0xe95: 0x2c5d, 0xe96: 0x2c7d, 0xe97: 0x2c7d,
+ 0xe98: 0x2c5d, 0xe99: 0x2c5d, 0xe9a: 0x2a7d, 0xe9b: 0x2a7d, 0xe9c: 0x2c5d, 0xe9d: 0x2c5d,
+ 0xe9e: 0x2c3d, 0xe9f: 0x2c3d, 0xea0: 0x2c9d, 0xea1: 0x2c9d, 0xea2: 0x2cbd, 0xea3: 0x2cbd,
+ 0xea4: 0x0040, 0xea5: 0x2cdd, 0xea6: 0x2cfd, 0xea7: 0x2d1d, 0xea8: 0x2d1d, 0xea9: 0x2d3d,
+ 0xeaa: 0x2d5d, 0xeab: 0x2d7d, 0xeac: 0x2d9d, 0xead: 0x2dbd, 0xeae: 0x2ddd, 0xeaf: 0x2dfd,
+ 0xeb0: 0x2e1d, 0xeb1: 0x2e3d, 0xeb2: 0x2e3d, 0xeb3: 0x2e5d, 0xeb4: 0x2e7d, 0xeb5: 0x2e7d,
+ 0xeb6: 0x2e9d, 0xeb7: 0x2ebd, 0xeb8: 0x2e5d, 0xeb9: 0x2edd, 0xeba: 0x2efd, 0xebb: 0x2edd,
+ 0xebc: 0x2e5d, 0xebd: 0x2f1d, 0xebe: 0x2f3d, 0xebf: 0x2f5d,
+ // Block 0x3b, offset 0xec0
+ 0xec0: 0x2f7d, 0xec1: 0x2f9d, 0xec2: 0x2cfd, 0xec3: 0x2cdd, 0xec4: 0x2fbd, 0xec5: 0x2fdd,
+ 0xec6: 0x2ffd, 0xec7: 0x301d, 0xec8: 0x303d, 0xec9: 0x305d, 0xeca: 0x307d, 0xecb: 0x309d,
+ 0xecc: 0x30bd, 0xecd: 0x30dd, 0xece: 0x30fd, 0xecf: 0x0040, 0xed0: 0x0018, 0xed1: 0x0018,
+ 0xed2: 0x311d, 0xed3: 0x313d, 0xed4: 0x315d, 0xed5: 0x317d, 0xed6: 0x319d, 0xed7: 0x31bd,
+ 0xed8: 0x31dd, 0xed9: 0x31fd, 0xeda: 0x321d, 0xedb: 0x323d, 0xedc: 0x315d, 0xedd: 0x325d,
+ 0xede: 0x327d, 0xedf: 0x329d, 0xee0: 0x0008, 0xee1: 0x0008, 0xee2: 0x0008, 0xee3: 0x0008,
+ 0xee4: 0x0008, 0xee5: 0x0008, 0xee6: 0x0008, 0xee7: 0x0008, 0xee8: 0x0008, 0xee9: 0x0008,
+ 0xeea: 0x0008, 0xeeb: 0x0008, 0xeec: 0x0008, 0xeed: 0x0008, 0xeee: 0x0008, 0xeef: 0x0008,
+ 0xef0: 0x0008, 0xef1: 0x0008, 0xef2: 0x0008, 0xef3: 0x0008, 0xef4: 0x0008, 0xef5: 0x0008,
+ 0xef6: 0x0008, 0xef7: 0x0008, 0xef8: 0x0008, 0xef9: 0x0008, 0xefa: 0x0008, 0xefb: 0x0040,
+ 0xefc: 0x0040, 0xefd: 0x0040, 0xefe: 0x0040, 0xeff: 0x0040,
+ // Block 0x3c, offset 0xf00
+ 0xf00: 0x36a2, 0xf01: 0x36d2, 0xf02: 0x3702, 0xf03: 0x3732, 0xf04: 0x32bd, 0xf05: 0x32dd,
+ 0xf06: 0x32fd, 0xf07: 0x331d, 0xf08: 0x0018, 0xf09: 0x0018, 0xf0a: 0x0018, 0xf0b: 0x0018,
+ 0xf0c: 0x0018, 0xf0d: 0x0018, 0xf0e: 0x0018, 0xf0f: 0x0018, 0xf10: 0x333d, 0xf11: 0x3761,
+ 0xf12: 0x3779, 0xf13: 0x3791, 0xf14: 0x37a9, 0xf15: 0x37c1, 0xf16: 0x37d9, 0xf17: 0x37f1,
+ 0xf18: 0x3809, 0xf19: 0x3821, 0xf1a: 0x3839, 0xf1b: 0x3851, 0xf1c: 0x3869, 0xf1d: 0x3881,
+ 0xf1e: 0x3899, 0xf1f: 0x38b1, 0xf20: 0x335d, 0xf21: 0x337d, 0xf22: 0x339d, 0xf23: 0x33bd,
+ 0xf24: 0x33dd, 0xf25: 0x33dd, 0xf26: 0x33fd, 0xf27: 0x341d, 0xf28: 0x343d, 0xf29: 0x345d,
+ 0xf2a: 0x347d, 0xf2b: 0x349d, 0xf2c: 0x34bd, 0xf2d: 0x34dd, 0xf2e: 0x34fd, 0xf2f: 0x351d,
+ 0xf30: 0x353d, 0xf31: 0x355d, 0xf32: 0x357d, 0xf33: 0x359d, 0xf34: 0x35bd, 0xf35: 0x35dd,
+ 0xf36: 0x35fd, 0xf37: 0x361d, 0xf38: 0x363d, 0xf39: 0x365d, 0xf3a: 0x367d, 0xf3b: 0x369d,
+ 0xf3c: 0x38c9, 0xf3d: 0x3901, 0xf3e: 0x36bd, 0xf3f: 0x0018,
+ // Block 0x3d, offset 0xf40
+ 0xf40: 0x36dd, 0xf41: 0x36fd, 0xf42: 0x371d, 0xf43: 0x373d, 0xf44: 0x375d, 0xf45: 0x377d,
+ 0xf46: 0x379d, 0xf47: 0x37bd, 0xf48: 0x37dd, 0xf49: 0x37fd, 0xf4a: 0x381d, 0xf4b: 0x383d,
+ 0xf4c: 0x385d, 0xf4d: 0x387d, 0xf4e: 0x389d, 0xf4f: 0x38bd, 0xf50: 0x38dd, 0xf51: 0x38fd,
+ 0xf52: 0x391d, 0xf53: 0x393d, 0xf54: 0x395d, 0xf55: 0x397d, 0xf56: 0x399d, 0xf57: 0x39bd,
+ 0xf58: 0x39dd, 0xf59: 0x39fd, 0xf5a: 0x3a1d, 0xf5b: 0x3a3d, 0xf5c: 0x3a5d, 0xf5d: 0x3a7d,
+ 0xf5e: 0x3a9d, 0xf5f: 0x3abd, 0xf60: 0x3add, 0xf61: 0x3afd, 0xf62: 0x3b1d, 0xf63: 0x3b3d,
+ 0xf64: 0x3b5d, 0xf65: 0x3b7d, 0xf66: 0x127d, 0xf67: 0x3b9d, 0xf68: 0x3bbd, 0xf69: 0x3bdd,
+ 0xf6a: 0x3bfd, 0xf6b: 0x3c1d, 0xf6c: 0x3c3d, 0xf6d: 0x3c5d, 0xf6e: 0x239d, 0xf6f: 0x3c7d,
+ 0xf70: 0x3c9d, 0xf71: 0x3939, 0xf72: 0x3951, 0xf73: 0x3969, 0xf74: 0x3981, 0xf75: 0x3999,
+ 0xf76: 0x39b1, 0xf77: 0x39c9, 0xf78: 0x39e1, 0xf79: 0x39f9, 0xf7a: 0x3a11, 0xf7b: 0x3a29,
+ 0xf7c: 0x3a41, 0xf7d: 0x3a59, 0xf7e: 0x3a71, 0xf7f: 0x3a89,
+ // Block 0x3e, offset 0xf80
+ 0xf80: 0x3aa1, 0xf81: 0x3ac9, 0xf82: 0x3af1, 0xf83: 0x3b19, 0xf84: 0x3b41, 0xf85: 0x3b69,
+ 0xf86: 0x3b91, 0xf87: 0x3bb9, 0xf88: 0x3be1, 0xf89: 0x3c09, 0xf8a: 0x3c39, 0xf8b: 0x3c69,
+ 0xf8c: 0x3c99, 0xf8d: 0x3cbd, 0xf8e: 0x3cb1, 0xf8f: 0x3cdd, 0xf90: 0x3cfd, 0xf91: 0x3d15,
+ 0xf92: 0x3d2d, 0xf93: 0x3d45, 0xf94: 0x3d5d, 0xf95: 0x3d5d, 0xf96: 0x3d45, 0xf97: 0x3d75,
+ 0xf98: 0x07bd, 0xf99: 0x3d8d, 0xf9a: 0x3da5, 0xf9b: 0x3dbd, 0xf9c: 0x3dd5, 0xf9d: 0x3ded,
+ 0xf9e: 0x3e05, 0xf9f: 0x3e1d, 0xfa0: 0x3e35, 0xfa1: 0x3e4d, 0xfa2: 0x3e65, 0xfa3: 0x3e7d,
+ 0xfa4: 0x3e95, 0xfa5: 0x3e95, 0xfa6: 0x3ead, 0xfa7: 0x3ead, 0xfa8: 0x3ec5, 0xfa9: 0x3ec5,
+ 0xfaa: 0x3edd, 0xfab: 0x3ef5, 0xfac: 0x3f0d, 0xfad: 0x3f25, 0xfae: 0x3f3d, 0xfaf: 0x3f3d,
+ 0xfb0: 0x3f55, 0xfb1: 0x3f55, 0xfb2: 0x3f55, 0xfb3: 0x3f6d, 0xfb4: 0x3f85, 0xfb5: 0x3f9d,
+ 0xfb6: 0x3fb5, 0xfb7: 0x3f9d, 0xfb8: 0x3fcd, 0xfb9: 0x3fe5, 0xfba: 0x3f6d, 0xfbb: 0x3ffd,
+ 0xfbc: 0x4015, 0xfbd: 0x4015, 0xfbe: 0x4015, 0xfbf: 0x0040,
+ // Block 0x3f, offset 0xfc0
+ 0xfc0: 0x3cc9, 0xfc1: 0x3d31, 0xfc2: 0x3d99, 0xfc3: 0x3e01, 0xfc4: 0x3e51, 0xfc5: 0x3eb9,
+ 0xfc6: 0x3f09, 0xfc7: 0x3f59, 0xfc8: 0x3fd9, 0xfc9: 0x4041, 0xfca: 0x4091, 0xfcb: 0x40e1,
+ 0xfcc: 0x4131, 0xfcd: 0x4199, 0xfce: 0x4201, 0xfcf: 0x4251, 0xfd0: 0x42a1, 0xfd1: 0x42d9,
+ 0xfd2: 0x4329, 0xfd3: 0x4391, 0xfd4: 0x43f9, 0xfd5: 0x4431, 0xfd6: 0x44b1, 0xfd7: 0x4549,
+ 0xfd8: 0x45c9, 0xfd9: 0x4619, 0xfda: 0x4699, 0xfdb: 0x4719, 0xfdc: 0x4781, 0xfdd: 0x47d1,
+ 0xfde: 0x4821, 0xfdf: 0x4871, 0xfe0: 0x48d9, 0xfe1: 0x4959, 0xfe2: 0x49c1, 0xfe3: 0x4a11,
+ 0xfe4: 0x4a61, 0xfe5: 0x4ab1, 0xfe6: 0x4ae9, 0xfe7: 0x4b21, 0xfe8: 0x4b59, 0xfe9: 0x4b91,
+ 0xfea: 0x4be1, 0xfeb: 0x4c31, 0xfec: 0x4cb1, 0xfed: 0x4d01, 0xfee: 0x4d69, 0xfef: 0x4de9,
+ 0xff0: 0x4e39, 0xff1: 0x4e71, 0xff2: 0x4ea9, 0xff3: 0x4f29, 0xff4: 0x4f91, 0xff5: 0x5011,
+ 0xff6: 0x5061, 0xff7: 0x50e1, 0xff8: 0x5119, 0xff9: 0x5169, 0xffa: 0x51b9, 0xffb: 0x5209,
+ 0xffc: 0x5259, 0xffd: 0x52a9, 0xffe: 0x5311, 0xfff: 0x5361,
+ // Block 0x40, offset 0x1000
+ 0x1000: 0x5399, 0x1001: 0x53e9, 0x1002: 0x5439, 0x1003: 0x5489, 0x1004: 0x54f1, 0x1005: 0x5541,
+ 0x1006: 0x5591, 0x1007: 0x55e1, 0x1008: 0x5661, 0x1009: 0x56c9, 0x100a: 0x5701, 0x100b: 0x5781,
+ 0x100c: 0x57b9, 0x100d: 0x5821, 0x100e: 0x5889, 0x100f: 0x58d9, 0x1010: 0x5929, 0x1011: 0x5979,
+ 0x1012: 0x59e1, 0x1013: 0x5a19, 0x1014: 0x5a69, 0x1015: 0x5ad1, 0x1016: 0x5b09, 0x1017: 0x5b89,
+ 0x1018: 0x5bd9, 0x1019: 0x5c01, 0x101a: 0x5c29, 0x101b: 0x5c51, 0x101c: 0x5c79, 0x101d: 0x5ca1,
+ 0x101e: 0x5cc9, 0x101f: 0x5cf1, 0x1020: 0x5d19, 0x1021: 0x5d41, 0x1022: 0x5d69, 0x1023: 0x5d99,
+ 0x1024: 0x5dc9, 0x1025: 0x5df9, 0x1026: 0x5e29, 0x1027: 0x5e59, 0x1028: 0x5e89, 0x1029: 0x5eb9,
+ 0x102a: 0x5ee9, 0x102b: 0x5f19, 0x102c: 0x5f49, 0x102d: 0x5f79, 0x102e: 0x5fa9, 0x102f: 0x5fd9,
+ 0x1030: 0x6009, 0x1031: 0x402d, 0x1032: 0x6039, 0x1033: 0x6051, 0x1034: 0x404d, 0x1035: 0x6069,
+ 0x1036: 0x6081, 0x1037: 0x6099, 0x1038: 0x406d, 0x1039: 0x406d, 0x103a: 0x60b1, 0x103b: 0x60c9,
+ 0x103c: 0x6101, 0x103d: 0x6139, 0x103e: 0x6171, 0x103f: 0x61a9,
+ // Block 0x41, offset 0x1040
+ 0x1040: 0x6211, 0x1041: 0x6229, 0x1042: 0x408d, 0x1043: 0x6241, 0x1044: 0x6259, 0x1045: 0x6271,
+ 0x1046: 0x6289, 0x1047: 0x62a1, 0x1048: 0x40ad, 0x1049: 0x62b9, 0x104a: 0x62e1, 0x104b: 0x62f9,
+ 0x104c: 0x40cd, 0x104d: 0x40cd, 0x104e: 0x6311, 0x104f: 0x6329, 0x1050: 0x6341, 0x1051: 0x40ed,
+ 0x1052: 0x410d, 0x1053: 0x412d, 0x1054: 0x414d, 0x1055: 0x416d, 0x1056: 0x6359, 0x1057: 0x6371,
+ 0x1058: 0x6389, 0x1059: 0x63a1, 0x105a: 0x63b9, 0x105b: 0x418d, 0x105c: 0x63d1, 0x105d: 0x63e9,
+ 0x105e: 0x6401, 0x105f: 0x41ad, 0x1060: 0x41cd, 0x1061: 0x6419, 0x1062: 0x41ed, 0x1063: 0x420d,
+ 0x1064: 0x422d, 0x1065: 0x6431, 0x1066: 0x424d, 0x1067: 0x6449, 0x1068: 0x6479, 0x1069: 0x6211,
+ 0x106a: 0x426d, 0x106b: 0x428d, 0x106c: 0x42ad, 0x106d: 0x42cd, 0x106e: 0x64b1, 0x106f: 0x64f1,
+ 0x1070: 0x6539, 0x1071: 0x6551, 0x1072: 0x42ed, 0x1073: 0x6569, 0x1074: 0x6581, 0x1075: 0x6599,
+ 0x1076: 0x430d, 0x1077: 0x65b1, 0x1078: 0x65c9, 0x1079: 0x65b1, 0x107a: 0x65e1, 0x107b: 0x65f9,
+ 0x107c: 0x432d, 0x107d: 0x6611, 0x107e: 0x6629, 0x107f: 0x6611,
+ // Block 0x42, offset 0x1080
+ 0x1080: 0x434d, 0x1081: 0x436d, 0x1082: 0x0040, 0x1083: 0x6641, 0x1084: 0x6659, 0x1085: 0x6671,
+ 0x1086: 0x6689, 0x1087: 0x0040, 0x1088: 0x66c1, 0x1089: 0x66d9, 0x108a: 0x66f1, 0x108b: 0x6709,
+ 0x108c: 0x6721, 0x108d: 0x6739, 0x108e: 0x6401, 0x108f: 0x6751, 0x1090: 0x6769, 0x1091: 0x6781,
+ 0x1092: 0x438d, 0x1093: 0x6799, 0x1094: 0x6289, 0x1095: 0x43ad, 0x1096: 0x43cd, 0x1097: 0x67b1,
+ 0x1098: 0x0040, 0x1099: 0x43ed, 0x109a: 0x67c9, 0x109b: 0x67e1, 0x109c: 0x67f9, 0x109d: 0x6811,
+ 0x109e: 0x6829, 0x109f: 0x6859, 0x10a0: 0x6889, 0x10a1: 0x68b1, 0x10a2: 0x68d9, 0x10a3: 0x6901,
+ 0x10a4: 0x6929, 0x10a5: 0x6951, 0x10a6: 0x6979, 0x10a7: 0x69a1, 0x10a8: 0x69c9, 0x10a9: 0x69f1,
+ 0x10aa: 0x6a21, 0x10ab: 0x6a51, 0x10ac: 0x6a81, 0x10ad: 0x6ab1, 0x10ae: 0x6ae1, 0x10af: 0x6b11,
+ 0x10b0: 0x6b41, 0x10b1: 0x6b71, 0x10b2: 0x6ba1, 0x10b3: 0x6bd1, 0x10b4: 0x6c01, 0x10b5: 0x6c31,
+ 0x10b6: 0x6c61, 0x10b7: 0x6c91, 0x10b8: 0x6cc1, 0x10b9: 0x6cf1, 0x10ba: 0x6d21, 0x10bb: 0x6d51,
+ 0x10bc: 0x6d81, 0x10bd: 0x6db1, 0x10be: 0x6de1, 0x10bf: 0x440d,
+ // Block 0x43, offset 0x10c0
+ 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008,
+ 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008,
+ 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008,
+ 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008,
+ 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0xe00d, 0x10dd: 0x0008,
+ 0x10de: 0xe00d, 0x10df: 0x0008, 0x10e0: 0xe00d, 0x10e1: 0x0008, 0x10e2: 0xe00d, 0x10e3: 0x0008,
+ 0x10e4: 0xe00d, 0x10e5: 0x0008, 0x10e6: 0xe00d, 0x10e7: 0x0008, 0x10e8: 0xe00d, 0x10e9: 0x0008,
+ 0x10ea: 0xe00d, 0x10eb: 0x0008, 0x10ec: 0xe00d, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x3308,
+ 0x10f0: 0x3318, 0x10f1: 0x3318, 0x10f2: 0x3318, 0x10f3: 0x0018, 0x10f4: 0x3308, 0x10f5: 0x3308,
+ 0x10f6: 0x3308, 0x10f7: 0x3308, 0x10f8: 0x3308, 0x10f9: 0x3308, 0x10fa: 0x3308, 0x10fb: 0x3308,
+ 0x10fc: 0x3308, 0x10fd: 0x3308, 0x10fe: 0x0018, 0x10ff: 0x0008,
+ // Block 0x44, offset 0x1100
+ 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008,
+ 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008,
+ 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008,
+ 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008,
+ 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0x0ea1, 0x111d: 0x6e11,
+ 0x111e: 0x3308, 0x111f: 0x3308, 0x1120: 0x0008, 0x1121: 0x0008, 0x1122: 0x0008, 0x1123: 0x0008,
+ 0x1124: 0x0008, 0x1125: 0x0008, 0x1126: 0x0008, 0x1127: 0x0008, 0x1128: 0x0008, 0x1129: 0x0008,
+ 0x112a: 0x0008, 0x112b: 0x0008, 0x112c: 0x0008, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x0008,
+ 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0x0008, 0x1133: 0x0008, 0x1134: 0x0008, 0x1135: 0x0008,
+ 0x1136: 0x0008, 0x1137: 0x0008, 0x1138: 0x0008, 0x1139: 0x0008, 0x113a: 0x0008, 0x113b: 0x0008,
+ 0x113c: 0x0008, 0x113d: 0x0008, 0x113e: 0x0008, 0x113f: 0x0008,
+ // Block 0x45, offset 0x1140
+ 0x1140: 0x0018, 0x1141: 0x0018, 0x1142: 0x0018, 0x1143: 0x0018, 0x1144: 0x0018, 0x1145: 0x0018,
+ 0x1146: 0x0018, 0x1147: 0x0018, 0x1148: 0x0018, 0x1149: 0x0018, 0x114a: 0x0018, 0x114b: 0x0018,
+ 0x114c: 0x0018, 0x114d: 0x0018, 0x114e: 0x0018, 0x114f: 0x0018, 0x1150: 0x0018, 0x1151: 0x0018,
+ 0x1152: 0x0018, 0x1153: 0x0018, 0x1154: 0x0018, 0x1155: 0x0018, 0x1156: 0x0018, 0x1157: 0x0008,
+ 0x1158: 0x0008, 0x1159: 0x0008, 0x115a: 0x0008, 0x115b: 0x0008, 0x115c: 0x0008, 0x115d: 0x0008,
+ 0x115e: 0x0008, 0x115f: 0x0008, 0x1160: 0x0018, 0x1161: 0x0018, 0x1162: 0xe00d, 0x1163: 0x0008,
+ 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008,
+ 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008,
+ 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0xe00d, 0x1173: 0x0008, 0x1174: 0xe00d, 0x1175: 0x0008,
+ 0x1176: 0xe00d, 0x1177: 0x0008, 0x1178: 0xe00d, 0x1179: 0x0008, 0x117a: 0xe00d, 0x117b: 0x0008,
+ 0x117c: 0xe00d, 0x117d: 0x0008, 0x117e: 0xe00d, 0x117f: 0x0008,
+ // Block 0x46, offset 0x1180
+ 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008,
+ 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0xe00d, 0x1189: 0x0008, 0x118a: 0xe00d, 0x118b: 0x0008,
+ 0x118c: 0xe00d, 0x118d: 0x0008, 0x118e: 0xe00d, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008,
+ 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0xe00d, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008,
+ 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008,
+ 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008,
+ 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008,
+ 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008,
+ 0x11b0: 0xe0fd, 0x11b1: 0x0008, 0x11b2: 0x0008, 0x11b3: 0x0008, 0x11b4: 0x0008, 0x11b5: 0x0008,
+ 0x11b6: 0x0008, 0x11b7: 0x0008, 0x11b8: 0x0008, 0x11b9: 0xe01d, 0x11ba: 0x0008, 0x11bb: 0xe03d,
+ 0x11bc: 0x0008, 0x11bd: 0x442d, 0x11be: 0xe00d, 0x11bf: 0x0008,
+ // Block 0x47, offset 0x11c0
+ 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008,
+ 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0x0008, 0x11c9: 0x0018, 0x11ca: 0x0018, 0x11cb: 0xe03d,
+ 0x11cc: 0x0008, 0x11cd: 0x11d9, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008,
+ 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008,
+ 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008,
+ 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008,
+ 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008,
+ 0x11ea: 0x6e29, 0x11eb: 0x1029, 0x11ec: 0x11c1, 0x11ed: 0x6e41, 0x11ee: 0x1221, 0x11ef: 0x0040,
+ 0x11f0: 0x6e59, 0x11f1: 0x6e71, 0x11f2: 0x1239, 0x11f3: 0x444d, 0x11f4: 0xe00d, 0x11f5: 0x0008,
+ 0x11f6: 0xe00d, 0x11f7: 0x0008, 0x11f8: 0x0040, 0x11f9: 0x0040, 0x11fa: 0x0040, 0x11fb: 0x0040,
+ 0x11fc: 0x0040, 0x11fd: 0x0040, 0x11fe: 0x0040, 0x11ff: 0x0040,
+ // Block 0x48, offset 0x1200
+ 0x1200: 0x64d5, 0x1201: 0x64f5, 0x1202: 0x6515, 0x1203: 0x6535, 0x1204: 0x6555, 0x1205: 0x6575,
+ 0x1206: 0x6595, 0x1207: 0x65b5, 0x1208: 0x65d5, 0x1209: 0x65f5, 0x120a: 0x6615, 0x120b: 0x6635,
+ 0x120c: 0x6655, 0x120d: 0x6675, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0x6695, 0x1211: 0x0008,
+ 0x1212: 0x66b5, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x66d5, 0x1216: 0x66f5, 0x1217: 0x6715,
+ 0x1218: 0x6735, 0x1219: 0x6755, 0x121a: 0x6775, 0x121b: 0x6795, 0x121c: 0x67b5, 0x121d: 0x67d5,
+ 0x121e: 0x67f5, 0x121f: 0x0008, 0x1220: 0x6815, 0x1221: 0x0008, 0x1222: 0x6835, 0x1223: 0x0008,
+ 0x1224: 0x0008, 0x1225: 0x6855, 0x1226: 0x6875, 0x1227: 0x0008, 0x1228: 0x0008, 0x1229: 0x0008,
+ 0x122a: 0x6895, 0x122b: 0x68b5, 0x122c: 0x68d5, 0x122d: 0x68f5, 0x122e: 0x6915, 0x122f: 0x6935,
+ 0x1230: 0x6955, 0x1231: 0x6975, 0x1232: 0x6995, 0x1233: 0x69b5, 0x1234: 0x69d5, 0x1235: 0x69f5,
+ 0x1236: 0x6a15, 0x1237: 0x6a35, 0x1238: 0x6a55, 0x1239: 0x6a75, 0x123a: 0x6a95, 0x123b: 0x6ab5,
+ 0x123c: 0x6ad5, 0x123d: 0x6af5, 0x123e: 0x6b15, 0x123f: 0x6b35,
+ // Block 0x49, offset 0x1240
+ 0x1240: 0x7a95, 0x1241: 0x7ab5, 0x1242: 0x7ad5, 0x1243: 0x7af5, 0x1244: 0x7b15, 0x1245: 0x7b35,
+ 0x1246: 0x7b55, 0x1247: 0x7b75, 0x1248: 0x7b95, 0x1249: 0x7bb5, 0x124a: 0x7bd5, 0x124b: 0x7bf5,
+ 0x124c: 0x7c15, 0x124d: 0x7c35, 0x124e: 0x7c55, 0x124f: 0x6ec9, 0x1250: 0x6ef1, 0x1251: 0x6f19,
+ 0x1252: 0x7c75, 0x1253: 0x7c95, 0x1254: 0x7cb5, 0x1255: 0x6f41, 0x1256: 0x6f69, 0x1257: 0x6f91,
+ 0x1258: 0x7cd5, 0x1259: 0x7cf5, 0x125a: 0x0040, 0x125b: 0x0040, 0x125c: 0x0040, 0x125d: 0x0040,
+ 0x125e: 0x0040, 0x125f: 0x0040, 0x1260: 0x0040, 0x1261: 0x0040, 0x1262: 0x0040, 0x1263: 0x0040,
+ 0x1264: 0x0040, 0x1265: 0x0040, 0x1266: 0x0040, 0x1267: 0x0040, 0x1268: 0x0040, 0x1269: 0x0040,
+ 0x126a: 0x0040, 0x126b: 0x0040, 0x126c: 0x0040, 0x126d: 0x0040, 0x126e: 0x0040, 0x126f: 0x0040,
+ 0x1270: 0x0040, 0x1271: 0x0040, 0x1272: 0x0040, 0x1273: 0x0040, 0x1274: 0x0040, 0x1275: 0x0040,
+ 0x1276: 0x0040, 0x1277: 0x0040, 0x1278: 0x0040, 0x1279: 0x0040, 0x127a: 0x0040, 0x127b: 0x0040,
+ 0x127c: 0x0040, 0x127d: 0x0040, 0x127e: 0x0040, 0x127f: 0x0040,
+ // Block 0x4a, offset 0x1280
+ 0x1280: 0x6fb9, 0x1281: 0x6fd1, 0x1282: 0x6fe9, 0x1283: 0x7d15, 0x1284: 0x7d35, 0x1285: 0x7001,
+ 0x1286: 0x7001, 0x1287: 0x0040, 0x1288: 0x0040, 0x1289: 0x0040, 0x128a: 0x0040, 0x128b: 0x0040,
+ 0x128c: 0x0040, 0x128d: 0x0040, 0x128e: 0x0040, 0x128f: 0x0040, 0x1290: 0x0040, 0x1291: 0x0040,
+ 0x1292: 0x0040, 0x1293: 0x7019, 0x1294: 0x7041, 0x1295: 0x7069, 0x1296: 0x7091, 0x1297: 0x70b9,
+ 0x1298: 0x0040, 0x1299: 0x0040, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x70e1,
+ 0x129e: 0x3308, 0x129f: 0x7109, 0x12a0: 0x7131, 0x12a1: 0x20a9, 0x12a2: 0x20f1, 0x12a3: 0x7149,
+ 0x12a4: 0x7161, 0x12a5: 0x7179, 0x12a6: 0x7191, 0x12a7: 0x71a9, 0x12a8: 0x71c1, 0x12a9: 0x1fb2,
+ 0x12aa: 0x71d9, 0x12ab: 0x7201, 0x12ac: 0x7229, 0x12ad: 0x7261, 0x12ae: 0x7299, 0x12af: 0x72c1,
+ 0x12b0: 0x72e9, 0x12b1: 0x7311, 0x12b2: 0x7339, 0x12b3: 0x7361, 0x12b4: 0x7389, 0x12b5: 0x73b1,
+ 0x12b6: 0x73d9, 0x12b7: 0x0040, 0x12b8: 0x7401, 0x12b9: 0x7429, 0x12ba: 0x7451, 0x12bb: 0x7479,
+ 0x12bc: 0x74a1, 0x12bd: 0x0040, 0x12be: 0x74c9, 0x12bf: 0x0040,
+ // Block 0x4b, offset 0x12c0
+ 0x12c0: 0x74f1, 0x12c1: 0x7519, 0x12c2: 0x0040, 0x12c3: 0x7541, 0x12c4: 0x7569, 0x12c5: 0x0040,
+ 0x12c6: 0x7591, 0x12c7: 0x75b9, 0x12c8: 0x75e1, 0x12c9: 0x7609, 0x12ca: 0x7631, 0x12cb: 0x7659,
+ 0x12cc: 0x7681, 0x12cd: 0x76a9, 0x12ce: 0x76d1, 0x12cf: 0x76f9, 0x12d0: 0x7721, 0x12d1: 0x7721,
+ 0x12d2: 0x7739, 0x12d3: 0x7739, 0x12d4: 0x7739, 0x12d5: 0x7739, 0x12d6: 0x7751, 0x12d7: 0x7751,
+ 0x12d8: 0x7751, 0x12d9: 0x7751, 0x12da: 0x7769, 0x12db: 0x7769, 0x12dc: 0x7769, 0x12dd: 0x7769,
+ 0x12de: 0x7781, 0x12df: 0x7781, 0x12e0: 0x7781, 0x12e1: 0x7781, 0x12e2: 0x7799, 0x12e3: 0x7799,
+ 0x12e4: 0x7799, 0x12e5: 0x7799, 0x12e6: 0x77b1, 0x12e7: 0x77b1, 0x12e8: 0x77b1, 0x12e9: 0x77b1,
+ 0x12ea: 0x77c9, 0x12eb: 0x77c9, 0x12ec: 0x77c9, 0x12ed: 0x77c9, 0x12ee: 0x77e1, 0x12ef: 0x77e1,
+ 0x12f0: 0x77e1, 0x12f1: 0x77e1, 0x12f2: 0x77f9, 0x12f3: 0x77f9, 0x12f4: 0x77f9, 0x12f5: 0x77f9,
+ 0x12f6: 0x7811, 0x12f7: 0x7811, 0x12f8: 0x7811, 0x12f9: 0x7811, 0x12fa: 0x7829, 0x12fb: 0x7829,
+ 0x12fc: 0x7829, 0x12fd: 0x7829, 0x12fe: 0x7841, 0x12ff: 0x7841,
+ // Block 0x4c, offset 0x1300
+ 0x1300: 0x7841, 0x1301: 0x7841, 0x1302: 0x7859, 0x1303: 0x7859, 0x1304: 0x7871, 0x1305: 0x7871,
+ 0x1306: 0x7889, 0x1307: 0x7889, 0x1308: 0x78a1, 0x1309: 0x78a1, 0x130a: 0x78b9, 0x130b: 0x78b9,
+ 0x130c: 0x78d1, 0x130d: 0x78d1, 0x130e: 0x78e9, 0x130f: 0x78e9, 0x1310: 0x78e9, 0x1311: 0x78e9,
+ 0x1312: 0x7901, 0x1313: 0x7901, 0x1314: 0x7901, 0x1315: 0x7901, 0x1316: 0x7919, 0x1317: 0x7919,
+ 0x1318: 0x7919, 0x1319: 0x7919, 0x131a: 0x7931, 0x131b: 0x7931, 0x131c: 0x7931, 0x131d: 0x7931,
+ 0x131e: 0x7949, 0x131f: 0x7949, 0x1320: 0x7961, 0x1321: 0x7961, 0x1322: 0x7961, 0x1323: 0x7961,
+ 0x1324: 0x7979, 0x1325: 0x7979, 0x1326: 0x7991, 0x1327: 0x7991, 0x1328: 0x7991, 0x1329: 0x7991,
+ 0x132a: 0x79a9, 0x132b: 0x79a9, 0x132c: 0x79a9, 0x132d: 0x79a9, 0x132e: 0x79c1, 0x132f: 0x79c1,
+ 0x1330: 0x79d9, 0x1331: 0x79d9, 0x1332: 0x0818, 0x1333: 0x0818, 0x1334: 0x0818, 0x1335: 0x0818,
+ 0x1336: 0x0818, 0x1337: 0x0818, 0x1338: 0x0818, 0x1339: 0x0818, 0x133a: 0x0818, 0x133b: 0x0818,
+ 0x133c: 0x0818, 0x133d: 0x0818, 0x133e: 0x0818, 0x133f: 0x0818,
+ // Block 0x4d, offset 0x1340
+ 0x1340: 0x0818, 0x1341: 0x0818, 0x1342: 0x0040, 0x1343: 0x0040, 0x1344: 0x0040, 0x1345: 0x0040,
+ 0x1346: 0x0040, 0x1347: 0x0040, 0x1348: 0x0040, 0x1349: 0x0040, 0x134a: 0x0040, 0x134b: 0x0040,
+ 0x134c: 0x0040, 0x134d: 0x0040, 0x134e: 0x0040, 0x134f: 0x0040, 0x1350: 0x0040, 0x1351: 0x0040,
+ 0x1352: 0x0040, 0x1353: 0x79f1, 0x1354: 0x79f1, 0x1355: 0x79f1, 0x1356: 0x79f1, 0x1357: 0x7a09,
+ 0x1358: 0x7a09, 0x1359: 0x7a21, 0x135a: 0x7a21, 0x135b: 0x7a39, 0x135c: 0x7a39, 0x135d: 0x0479,
+ 0x135e: 0x7a51, 0x135f: 0x7a51, 0x1360: 0x7a69, 0x1361: 0x7a69, 0x1362: 0x7a81, 0x1363: 0x7a81,
+ 0x1364: 0x7a99, 0x1365: 0x7a99, 0x1366: 0x7a99, 0x1367: 0x7a99, 0x1368: 0x7ab1, 0x1369: 0x7ab1,
+ 0x136a: 0x7ac9, 0x136b: 0x7ac9, 0x136c: 0x7af1, 0x136d: 0x7af1, 0x136e: 0x7b19, 0x136f: 0x7b19,
+ 0x1370: 0x7b41, 0x1371: 0x7b41, 0x1372: 0x7b69, 0x1373: 0x7b69, 0x1374: 0x7b91, 0x1375: 0x7b91,
+ 0x1376: 0x7bb9, 0x1377: 0x7bb9, 0x1378: 0x7bb9, 0x1379: 0x7be1, 0x137a: 0x7be1, 0x137b: 0x7be1,
+ 0x137c: 0x7c09, 0x137d: 0x7c09, 0x137e: 0x7c09, 0x137f: 0x7c09,
+ // Block 0x4e, offset 0x1380
+ 0x1380: 0x85f9, 0x1381: 0x8621, 0x1382: 0x8649, 0x1383: 0x8671, 0x1384: 0x8699, 0x1385: 0x86c1,
+ 0x1386: 0x86e9, 0x1387: 0x8711, 0x1388: 0x8739, 0x1389: 0x8761, 0x138a: 0x8789, 0x138b: 0x87b1,
+ 0x138c: 0x87d9, 0x138d: 0x8801, 0x138e: 0x8829, 0x138f: 0x8851, 0x1390: 0x8879, 0x1391: 0x88a1,
+ 0x1392: 0x88c9, 0x1393: 0x88f1, 0x1394: 0x8919, 0x1395: 0x8941, 0x1396: 0x8969, 0x1397: 0x8991,
+ 0x1398: 0x89b9, 0x1399: 0x89e1, 0x139a: 0x8a09, 0x139b: 0x8a31, 0x139c: 0x8a59, 0x139d: 0x8a81,
+ 0x139e: 0x8aaa, 0x139f: 0x8ada, 0x13a0: 0x8b0a, 0x13a1: 0x8b3a, 0x13a2: 0x8b6a, 0x13a3: 0x8b9a,
+ 0x13a4: 0x8bc9, 0x13a5: 0x8bf1, 0x13a6: 0x7c71, 0x13a7: 0x8c19, 0x13a8: 0x7be1, 0x13a9: 0x7c99,
+ 0x13aa: 0x8c41, 0x13ab: 0x8c69, 0x13ac: 0x7d39, 0x13ad: 0x8c91, 0x13ae: 0x7d61, 0x13af: 0x7d89,
+ 0x13b0: 0x8cb9, 0x13b1: 0x8ce1, 0x13b2: 0x7e29, 0x13b3: 0x8d09, 0x13b4: 0x7e51, 0x13b5: 0x7e79,
+ 0x13b6: 0x8d31, 0x13b7: 0x8d59, 0x13b8: 0x7ec9, 0x13b9: 0x8d81, 0x13ba: 0x7ef1, 0x13bb: 0x7f19,
+ 0x13bc: 0x83a1, 0x13bd: 0x83c9, 0x13be: 0x8441, 0x13bf: 0x8469,
+ // Block 0x4f, offset 0x13c0
+ 0x13c0: 0x8491, 0x13c1: 0x8531, 0x13c2: 0x8559, 0x13c3: 0x8581, 0x13c4: 0x85a9, 0x13c5: 0x8649,
+ 0x13c6: 0x8671, 0x13c7: 0x8699, 0x13c8: 0x8da9, 0x13c9: 0x8739, 0x13ca: 0x8dd1, 0x13cb: 0x8df9,
+ 0x13cc: 0x8829, 0x13cd: 0x8e21, 0x13ce: 0x8851, 0x13cf: 0x8879, 0x13d0: 0x8a81, 0x13d1: 0x8e49,
+ 0x13d2: 0x8e71, 0x13d3: 0x89b9, 0x13d4: 0x8e99, 0x13d5: 0x89e1, 0x13d6: 0x8a09, 0x13d7: 0x7c21,
+ 0x13d8: 0x7c49, 0x13d9: 0x8ec1, 0x13da: 0x7c71, 0x13db: 0x8ee9, 0x13dc: 0x7cc1, 0x13dd: 0x7ce9,
+ 0x13de: 0x7d11, 0x13df: 0x7d39, 0x13e0: 0x8f11, 0x13e1: 0x7db1, 0x13e2: 0x7dd9, 0x13e3: 0x7e01,
+ 0x13e4: 0x7e29, 0x13e5: 0x8f39, 0x13e6: 0x7ec9, 0x13e7: 0x7f41, 0x13e8: 0x7f69, 0x13e9: 0x7f91,
+ 0x13ea: 0x7fb9, 0x13eb: 0x7fe1, 0x13ec: 0x8031, 0x13ed: 0x8059, 0x13ee: 0x8081, 0x13ef: 0x80a9,
+ 0x13f0: 0x80d1, 0x13f1: 0x80f9, 0x13f2: 0x8f61, 0x13f3: 0x8121, 0x13f4: 0x8149, 0x13f5: 0x8171,
+ 0x13f6: 0x8199, 0x13f7: 0x81c1, 0x13f8: 0x81e9, 0x13f9: 0x8239, 0x13fa: 0x8261, 0x13fb: 0x8289,
+ 0x13fc: 0x82b1, 0x13fd: 0x82d9, 0x13fe: 0x8301, 0x13ff: 0x8329,
+ // Block 0x50, offset 0x1400
+ 0x1400: 0x8351, 0x1401: 0x8379, 0x1402: 0x83f1, 0x1403: 0x8419, 0x1404: 0x84b9, 0x1405: 0x84e1,
+ 0x1406: 0x8509, 0x1407: 0x8531, 0x1408: 0x8559, 0x1409: 0x85d1, 0x140a: 0x85f9, 0x140b: 0x8621,
+ 0x140c: 0x8649, 0x140d: 0x8f89, 0x140e: 0x86c1, 0x140f: 0x86e9, 0x1410: 0x8711, 0x1411: 0x8739,
+ 0x1412: 0x87b1, 0x1413: 0x87d9, 0x1414: 0x8801, 0x1415: 0x8829, 0x1416: 0x8fb1, 0x1417: 0x88a1,
+ 0x1418: 0x88c9, 0x1419: 0x8fd9, 0x141a: 0x8941, 0x141b: 0x8969, 0x141c: 0x8991, 0x141d: 0x89b9,
+ 0x141e: 0x9001, 0x141f: 0x7c71, 0x1420: 0x8ee9, 0x1421: 0x7d39, 0x1422: 0x8f11, 0x1423: 0x7e29,
+ 0x1424: 0x8f39, 0x1425: 0x7ec9, 0x1426: 0x9029, 0x1427: 0x80d1, 0x1428: 0x9051, 0x1429: 0x9079,
+ 0x142a: 0x90a1, 0x142b: 0x8531, 0x142c: 0x8559, 0x142d: 0x8649, 0x142e: 0x8829, 0x142f: 0x8fb1,
+ 0x1430: 0x89b9, 0x1431: 0x9001, 0x1432: 0x90c9, 0x1433: 0x9101, 0x1434: 0x9139, 0x1435: 0x9171,
+ 0x1436: 0x9199, 0x1437: 0x91c1, 0x1438: 0x91e9, 0x1439: 0x9211, 0x143a: 0x9239, 0x143b: 0x9261,
+ 0x143c: 0x9289, 0x143d: 0x92b1, 0x143e: 0x92d9, 0x143f: 0x9301,
+ // Block 0x51, offset 0x1440
+ 0x1440: 0x9329, 0x1441: 0x9351, 0x1442: 0x9379, 0x1443: 0x93a1, 0x1444: 0x93c9, 0x1445: 0x93f1,
+ 0x1446: 0x9419, 0x1447: 0x9441, 0x1448: 0x9469, 0x1449: 0x9491, 0x144a: 0x94b9, 0x144b: 0x94e1,
+ 0x144c: 0x9079, 0x144d: 0x9509, 0x144e: 0x9531, 0x144f: 0x9559, 0x1450: 0x9581, 0x1451: 0x9171,
+ 0x1452: 0x9199, 0x1453: 0x91c1, 0x1454: 0x91e9, 0x1455: 0x9211, 0x1456: 0x9239, 0x1457: 0x9261,
+ 0x1458: 0x9289, 0x1459: 0x92b1, 0x145a: 0x92d9, 0x145b: 0x9301, 0x145c: 0x9329, 0x145d: 0x9351,
+ 0x145e: 0x9379, 0x145f: 0x93a1, 0x1460: 0x93c9, 0x1461: 0x93f1, 0x1462: 0x9419, 0x1463: 0x9441,
+ 0x1464: 0x9469, 0x1465: 0x9491, 0x1466: 0x94b9, 0x1467: 0x94e1, 0x1468: 0x9079, 0x1469: 0x9509,
+ 0x146a: 0x9531, 0x146b: 0x9559, 0x146c: 0x9581, 0x146d: 0x9491, 0x146e: 0x94b9, 0x146f: 0x94e1,
+ 0x1470: 0x9079, 0x1471: 0x9051, 0x1472: 0x90a1, 0x1473: 0x8211, 0x1474: 0x8059, 0x1475: 0x8081,
+ 0x1476: 0x80a9, 0x1477: 0x9491, 0x1478: 0x94b9, 0x1479: 0x94e1, 0x147a: 0x8211, 0x147b: 0x8239,
+ 0x147c: 0x95a9, 0x147d: 0x95a9, 0x147e: 0x0018, 0x147f: 0x0018,
+ // Block 0x52, offset 0x1480
+ 0x1480: 0x0040, 0x1481: 0x0040, 0x1482: 0x0040, 0x1483: 0x0040, 0x1484: 0x0040, 0x1485: 0x0040,
+ 0x1486: 0x0040, 0x1487: 0x0040, 0x1488: 0x0040, 0x1489: 0x0040, 0x148a: 0x0040, 0x148b: 0x0040,
+ 0x148c: 0x0040, 0x148d: 0x0040, 0x148e: 0x0040, 0x148f: 0x0040, 0x1490: 0x95d1, 0x1491: 0x9609,
+ 0x1492: 0x9609, 0x1493: 0x9641, 0x1494: 0x9679, 0x1495: 0x96b1, 0x1496: 0x96e9, 0x1497: 0x9721,
+ 0x1498: 0x9759, 0x1499: 0x9759, 0x149a: 0x9791, 0x149b: 0x97c9, 0x149c: 0x9801, 0x149d: 0x9839,
+ 0x149e: 0x9871, 0x149f: 0x98a9, 0x14a0: 0x98a9, 0x14a1: 0x98e1, 0x14a2: 0x9919, 0x14a3: 0x9919,
+ 0x14a4: 0x9951, 0x14a5: 0x9951, 0x14a6: 0x9989, 0x14a7: 0x99c1, 0x14a8: 0x99c1, 0x14a9: 0x99f9,
+ 0x14aa: 0x9a31, 0x14ab: 0x9a31, 0x14ac: 0x9a69, 0x14ad: 0x9a69, 0x14ae: 0x9aa1, 0x14af: 0x9ad9,
+ 0x14b0: 0x9ad9, 0x14b1: 0x9b11, 0x14b2: 0x9b11, 0x14b3: 0x9b49, 0x14b4: 0x9b81, 0x14b5: 0x9bb9,
+ 0x14b6: 0x9bf1, 0x14b7: 0x9bf1, 0x14b8: 0x9c29, 0x14b9: 0x9c61, 0x14ba: 0x9c99, 0x14bb: 0x9cd1,
+ 0x14bc: 0x9d09, 0x14bd: 0x9d09, 0x14be: 0x9d41, 0x14bf: 0x9d79,
+ // Block 0x53, offset 0x14c0
+ 0x14c0: 0xa949, 0x14c1: 0xa981, 0x14c2: 0xa9b9, 0x14c3: 0xa8a1, 0x14c4: 0x9bb9, 0x14c5: 0x9989,
+ 0x14c6: 0xa9f1, 0x14c7: 0xaa29, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040,
+ 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x0040, 0x14d1: 0x0040,
+ 0x14d2: 0x0040, 0x14d3: 0x0040, 0x14d4: 0x0040, 0x14d5: 0x0040, 0x14d6: 0x0040, 0x14d7: 0x0040,
+ 0x14d8: 0x0040, 0x14d9: 0x0040, 0x14da: 0x0040, 0x14db: 0x0040, 0x14dc: 0x0040, 0x14dd: 0x0040,
+ 0x14de: 0x0040, 0x14df: 0x0040, 0x14e0: 0x0040, 0x14e1: 0x0040, 0x14e2: 0x0040, 0x14e3: 0x0040,
+ 0x14e4: 0x0040, 0x14e5: 0x0040, 0x14e6: 0x0040, 0x14e7: 0x0040, 0x14e8: 0x0040, 0x14e9: 0x0040,
+ 0x14ea: 0x0040, 0x14eb: 0x0040, 0x14ec: 0x0040, 0x14ed: 0x0040, 0x14ee: 0x0040, 0x14ef: 0x0040,
+ 0x14f0: 0xaa61, 0x14f1: 0xaa99, 0x14f2: 0xaad1, 0x14f3: 0xab19, 0x14f4: 0xab61, 0x14f5: 0xaba9,
+ 0x14f6: 0xabf1, 0x14f7: 0xac39, 0x14f8: 0xac81, 0x14f9: 0xacc9, 0x14fa: 0xad02, 0x14fb: 0xae12,
+ 0x14fc: 0xae91, 0x14fd: 0x0018, 0x14fe: 0x0040, 0x14ff: 0x0040,
+ // Block 0x54, offset 0x1500
+ 0x1500: 0x33c0, 0x1501: 0x33c0, 0x1502: 0x33c0, 0x1503: 0x33c0, 0x1504: 0x33c0, 0x1505: 0x33c0,
+ 0x1506: 0x33c0, 0x1507: 0x33c0, 0x1508: 0x33c0, 0x1509: 0x33c0, 0x150a: 0x33c0, 0x150b: 0x33c0,
+ 0x150c: 0x33c0, 0x150d: 0x33c0, 0x150e: 0x33c0, 0x150f: 0x33c0, 0x1510: 0xaeda, 0x1511: 0x7d55,
+ 0x1512: 0x0040, 0x1513: 0xaeea, 0x1514: 0x03c2, 0x1515: 0xaefa, 0x1516: 0xaf0a, 0x1517: 0x7d75,
+ 0x1518: 0x7d95, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040,
+ 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x3308, 0x1521: 0x3308, 0x1522: 0x3308, 0x1523: 0x3308,
+ 0x1524: 0x3308, 0x1525: 0x3308, 0x1526: 0x3308, 0x1527: 0x3308, 0x1528: 0x3308, 0x1529: 0x3308,
+ 0x152a: 0x3308, 0x152b: 0x3308, 0x152c: 0x3308, 0x152d: 0x3308, 0x152e: 0x3308, 0x152f: 0x3308,
+ 0x1530: 0x0040, 0x1531: 0x7db5, 0x1532: 0x7dd5, 0x1533: 0xaf1a, 0x1534: 0xaf1a, 0x1535: 0x1fd2,
+ 0x1536: 0x1fe2, 0x1537: 0xaf2a, 0x1538: 0xaf3a, 0x1539: 0x7df5, 0x153a: 0x7e15, 0x153b: 0x7e35,
+ 0x153c: 0x7df5, 0x153d: 0x7e55, 0x153e: 0x7e75, 0x153f: 0x7e55,
+ // Block 0x55, offset 0x1540
+ 0x1540: 0x7e95, 0x1541: 0x7eb5, 0x1542: 0x7ed5, 0x1543: 0x7eb5, 0x1544: 0x7ef5, 0x1545: 0x0018,
+ 0x1546: 0x0018, 0x1547: 0xaf4a, 0x1548: 0xaf5a, 0x1549: 0x7f16, 0x154a: 0x7f36, 0x154b: 0x7f56,
+ 0x154c: 0x7f76, 0x154d: 0xaf1a, 0x154e: 0xaf1a, 0x154f: 0xaf1a, 0x1550: 0xaeda, 0x1551: 0x7f95,
+ 0x1552: 0x0040, 0x1553: 0x0040, 0x1554: 0x03c2, 0x1555: 0xaeea, 0x1556: 0xaf0a, 0x1557: 0xaefa,
+ 0x1558: 0x7fb5, 0x1559: 0x1fd2, 0x155a: 0x1fe2, 0x155b: 0xaf2a, 0x155c: 0xaf3a, 0x155d: 0x7e95,
+ 0x155e: 0x7ef5, 0x155f: 0xaf6a, 0x1560: 0xaf7a, 0x1561: 0xaf8a, 0x1562: 0x1fb2, 0x1563: 0xaf99,
+ 0x1564: 0xafaa, 0x1565: 0xafba, 0x1566: 0x1fc2, 0x1567: 0x0040, 0x1568: 0xafca, 0x1569: 0xafda,
+ 0x156a: 0xafea, 0x156b: 0xaffa, 0x156c: 0x0040, 0x156d: 0x0040, 0x156e: 0x0040, 0x156f: 0x0040,
+ 0x1570: 0x7fd6, 0x1571: 0xb009, 0x1572: 0x7ff6, 0x1573: 0x0808, 0x1574: 0x8016, 0x1575: 0x0040,
+ 0x1576: 0x8036, 0x1577: 0xb031, 0x1578: 0x8056, 0x1579: 0xb059, 0x157a: 0x8076, 0x157b: 0xb081,
+ 0x157c: 0x8096, 0x157d: 0xb0a9, 0x157e: 0x80b6, 0x157f: 0xb0d1,
+ // Block 0x56, offset 0x1580
+ 0x1580: 0xb0f9, 0x1581: 0xb111, 0x1582: 0xb111, 0x1583: 0xb129, 0x1584: 0xb129, 0x1585: 0xb141,
+ 0x1586: 0xb141, 0x1587: 0xb159, 0x1588: 0xb159, 0x1589: 0xb171, 0x158a: 0xb171, 0x158b: 0xb171,
+ 0x158c: 0xb171, 0x158d: 0xb189, 0x158e: 0xb189, 0x158f: 0xb1a1, 0x1590: 0xb1a1, 0x1591: 0xb1a1,
+ 0x1592: 0xb1a1, 0x1593: 0xb1b9, 0x1594: 0xb1b9, 0x1595: 0xb1d1, 0x1596: 0xb1d1, 0x1597: 0xb1d1,
+ 0x1598: 0xb1d1, 0x1599: 0xb1e9, 0x159a: 0xb1e9, 0x159b: 0xb1e9, 0x159c: 0xb1e9, 0x159d: 0xb201,
+ 0x159e: 0xb201, 0x159f: 0xb201, 0x15a0: 0xb201, 0x15a1: 0xb219, 0x15a2: 0xb219, 0x15a3: 0xb219,
+ 0x15a4: 0xb219, 0x15a5: 0xb231, 0x15a6: 0xb231, 0x15a7: 0xb231, 0x15a8: 0xb231, 0x15a9: 0xb249,
+ 0x15aa: 0xb249, 0x15ab: 0xb261, 0x15ac: 0xb261, 0x15ad: 0xb279, 0x15ae: 0xb279, 0x15af: 0xb291,
+ 0x15b0: 0xb291, 0x15b1: 0xb2a9, 0x15b2: 0xb2a9, 0x15b3: 0xb2a9, 0x15b4: 0xb2a9, 0x15b5: 0xb2c1,
+ 0x15b6: 0xb2c1, 0x15b7: 0xb2c1, 0x15b8: 0xb2c1, 0x15b9: 0xb2d9, 0x15ba: 0xb2d9, 0x15bb: 0xb2d9,
+ 0x15bc: 0xb2d9, 0x15bd: 0xb2f1, 0x15be: 0xb2f1, 0x15bf: 0xb2f1,
+ // Block 0x57, offset 0x15c0
+ 0x15c0: 0xb2f1, 0x15c1: 0xb309, 0x15c2: 0xb309, 0x15c3: 0xb309, 0x15c4: 0xb309, 0x15c5: 0xb321,
+ 0x15c6: 0xb321, 0x15c7: 0xb321, 0x15c8: 0xb321, 0x15c9: 0xb339, 0x15ca: 0xb339, 0x15cb: 0xb339,
+ 0x15cc: 0xb339, 0x15cd: 0xb351, 0x15ce: 0xb351, 0x15cf: 0xb351, 0x15d0: 0xb351, 0x15d1: 0xb369,
+ 0x15d2: 0xb369, 0x15d3: 0xb369, 0x15d4: 0xb369, 0x15d5: 0xb381, 0x15d6: 0xb381, 0x15d7: 0xb381,
+ 0x15d8: 0xb381, 0x15d9: 0xb399, 0x15da: 0xb399, 0x15db: 0xb399, 0x15dc: 0xb399, 0x15dd: 0xb3b1,
+ 0x15de: 0xb3b1, 0x15df: 0xb3b1, 0x15e0: 0xb3b1, 0x15e1: 0xb3c9, 0x15e2: 0xb3c9, 0x15e3: 0xb3c9,
+ 0x15e4: 0xb3c9, 0x15e5: 0xb3e1, 0x15e6: 0xb3e1, 0x15e7: 0xb3e1, 0x15e8: 0xb3e1, 0x15e9: 0xb3f9,
+ 0x15ea: 0xb3f9, 0x15eb: 0xb3f9, 0x15ec: 0xb3f9, 0x15ed: 0xb411, 0x15ee: 0xb411, 0x15ef: 0x7ab1,
+ 0x15f0: 0x7ab1, 0x15f1: 0xb429, 0x15f2: 0xb429, 0x15f3: 0xb429, 0x15f4: 0xb429, 0x15f5: 0xb441,
+ 0x15f6: 0xb441, 0x15f7: 0xb469, 0x15f8: 0xb469, 0x15f9: 0xb491, 0x15fa: 0xb491, 0x15fb: 0xb4b9,
+ 0x15fc: 0xb4b9, 0x15fd: 0x0040, 0x15fe: 0x0040, 0x15ff: 0x03c0,
+ // Block 0x58, offset 0x1600
+ 0x1600: 0x0040, 0x1601: 0xaefa, 0x1602: 0xb4e2, 0x1603: 0xaf6a, 0x1604: 0xafda, 0x1605: 0xafea,
+ 0x1606: 0xaf7a, 0x1607: 0xb4f2, 0x1608: 0x1fd2, 0x1609: 0x1fe2, 0x160a: 0xaf8a, 0x160b: 0x1fb2,
+ 0x160c: 0xaeda, 0x160d: 0xaf99, 0x160e: 0x29d1, 0x160f: 0xb502, 0x1610: 0x1f41, 0x1611: 0x00c9,
+ 0x1612: 0x0069, 0x1613: 0x0079, 0x1614: 0x1f51, 0x1615: 0x1f61, 0x1616: 0x1f71, 0x1617: 0x1f81,
+ 0x1618: 0x1f91, 0x1619: 0x1fa1, 0x161a: 0xaeea, 0x161b: 0x03c2, 0x161c: 0xafaa, 0x161d: 0x1fc2,
+ 0x161e: 0xafba, 0x161f: 0xaf0a, 0x1620: 0xaffa, 0x1621: 0x0039, 0x1622: 0x0ee9, 0x1623: 0x1159,
+ 0x1624: 0x0ef9, 0x1625: 0x0f09, 0x1626: 0x1199, 0x1627: 0x0f31, 0x1628: 0x0249, 0x1629: 0x0f41,
+ 0x162a: 0x0259, 0x162b: 0x0f51, 0x162c: 0x0359, 0x162d: 0x0f61, 0x162e: 0x0f71, 0x162f: 0x00d9,
+ 0x1630: 0x0f99, 0x1631: 0x2039, 0x1632: 0x0269, 0x1633: 0x01d9, 0x1634: 0x0fa9, 0x1635: 0x0fb9,
+ 0x1636: 0x1089, 0x1637: 0x0279, 0x1638: 0x0369, 0x1639: 0x0289, 0x163a: 0x13d1, 0x163b: 0xaf4a,
+ 0x163c: 0xafca, 0x163d: 0xaf5a, 0x163e: 0xb512, 0x163f: 0xaf1a,
+ // Block 0x59, offset 0x1640
+ 0x1640: 0x1caa, 0x1641: 0x0039, 0x1642: 0x0ee9, 0x1643: 0x1159, 0x1644: 0x0ef9, 0x1645: 0x0f09,
+ 0x1646: 0x1199, 0x1647: 0x0f31, 0x1648: 0x0249, 0x1649: 0x0f41, 0x164a: 0x0259, 0x164b: 0x0f51,
+ 0x164c: 0x0359, 0x164d: 0x0f61, 0x164e: 0x0f71, 0x164f: 0x00d9, 0x1650: 0x0f99, 0x1651: 0x2039,
+ 0x1652: 0x0269, 0x1653: 0x01d9, 0x1654: 0x0fa9, 0x1655: 0x0fb9, 0x1656: 0x1089, 0x1657: 0x0279,
+ 0x1658: 0x0369, 0x1659: 0x0289, 0x165a: 0x13d1, 0x165b: 0xaf2a, 0x165c: 0xb522, 0x165d: 0xaf3a,
+ 0x165e: 0xb532, 0x165f: 0x80d5, 0x1660: 0x80f5, 0x1661: 0x29d1, 0x1662: 0x8115, 0x1663: 0x8115,
+ 0x1664: 0x8135, 0x1665: 0x8155, 0x1666: 0x8175, 0x1667: 0x8195, 0x1668: 0x81b5, 0x1669: 0x81d5,
+ 0x166a: 0x81f5, 0x166b: 0x8215, 0x166c: 0x8235, 0x166d: 0x8255, 0x166e: 0x8275, 0x166f: 0x8295,
+ 0x1670: 0x82b5, 0x1671: 0x82d5, 0x1672: 0x82f5, 0x1673: 0x8315, 0x1674: 0x8335, 0x1675: 0x8355,
+ 0x1676: 0x8375, 0x1677: 0x8395, 0x1678: 0x83b5, 0x1679: 0x83d5, 0x167a: 0x83f5, 0x167b: 0x8415,
+ 0x167c: 0x81b5, 0x167d: 0x8435, 0x167e: 0x8455, 0x167f: 0x8215,
+ // Block 0x5a, offset 0x1680
+ 0x1680: 0x8475, 0x1681: 0x8495, 0x1682: 0x84b5, 0x1683: 0x84d5, 0x1684: 0x84f5, 0x1685: 0x8515,
+ 0x1686: 0x8535, 0x1687: 0x8555, 0x1688: 0x84d5, 0x1689: 0x8575, 0x168a: 0x84d5, 0x168b: 0x8595,
+ 0x168c: 0x8595, 0x168d: 0x85b5, 0x168e: 0x85b5, 0x168f: 0x85d5, 0x1690: 0x8515, 0x1691: 0x85f5,
+ 0x1692: 0x8615, 0x1693: 0x85f5, 0x1694: 0x8635, 0x1695: 0x8615, 0x1696: 0x8655, 0x1697: 0x8655,
+ 0x1698: 0x8675, 0x1699: 0x8675, 0x169a: 0x8695, 0x169b: 0x8695, 0x169c: 0x8615, 0x169d: 0x8115,
+ 0x169e: 0x86b5, 0x169f: 0x86d5, 0x16a0: 0x0040, 0x16a1: 0x86f5, 0x16a2: 0x8715, 0x16a3: 0x8735,
+ 0x16a4: 0x8755, 0x16a5: 0x8735, 0x16a6: 0x8775, 0x16a7: 0x8795, 0x16a8: 0x87b5, 0x16a9: 0x87b5,
+ 0x16aa: 0x87d5, 0x16ab: 0x87d5, 0x16ac: 0x87f5, 0x16ad: 0x87f5, 0x16ae: 0x87d5, 0x16af: 0x87d5,
+ 0x16b0: 0x8815, 0x16b1: 0x8835, 0x16b2: 0x8855, 0x16b3: 0x8875, 0x16b4: 0x8895, 0x16b5: 0x88b5,
+ 0x16b6: 0x88b5, 0x16b7: 0x88b5, 0x16b8: 0x88d5, 0x16b9: 0x88d5, 0x16ba: 0x88d5, 0x16bb: 0x88d5,
+ 0x16bc: 0x87b5, 0x16bd: 0x87b5, 0x16be: 0x87b5, 0x16bf: 0x0040,
+ // Block 0x5b, offset 0x16c0
+ 0x16c0: 0x0040, 0x16c1: 0x0040, 0x16c2: 0x8715, 0x16c3: 0x86f5, 0x16c4: 0x88f5, 0x16c5: 0x86f5,
+ 0x16c6: 0x8715, 0x16c7: 0x86f5, 0x16c8: 0x0040, 0x16c9: 0x0040, 0x16ca: 0x8915, 0x16cb: 0x8715,
+ 0x16cc: 0x8935, 0x16cd: 0x88f5, 0x16ce: 0x8935, 0x16cf: 0x8715, 0x16d0: 0x0040, 0x16d1: 0x0040,
+ 0x16d2: 0x8955, 0x16d3: 0x8975, 0x16d4: 0x8875, 0x16d5: 0x8935, 0x16d6: 0x88f5, 0x16d7: 0x8935,
+ 0x16d8: 0x0040, 0x16d9: 0x0040, 0x16da: 0x8995, 0x16db: 0x89b5, 0x16dc: 0x8995, 0x16dd: 0x0040,
+ 0x16de: 0x0040, 0x16df: 0x0040, 0x16e0: 0xb541, 0x16e1: 0xb559, 0x16e2: 0xb571, 0x16e3: 0x89d6,
+ 0x16e4: 0xb589, 0x16e5: 0xb5a1, 0x16e6: 0x89f5, 0x16e7: 0x0040, 0x16e8: 0x8a15, 0x16e9: 0x8a35,
+ 0x16ea: 0x8a55, 0x16eb: 0x8a35, 0x16ec: 0x8a75, 0x16ed: 0x8a95, 0x16ee: 0x8ab5, 0x16ef: 0x0040,
+ 0x16f0: 0x0040, 0x16f1: 0x0040, 0x16f2: 0x0040, 0x16f3: 0x0040, 0x16f4: 0x0040, 0x16f5: 0x0040,
+ 0x16f6: 0x0040, 0x16f7: 0x0040, 0x16f8: 0x0040, 0x16f9: 0x0340, 0x16fa: 0x0340, 0x16fb: 0x0340,
+ 0x16fc: 0x0040, 0x16fd: 0x0040, 0x16fe: 0x0040, 0x16ff: 0x0040,
+ // Block 0x5c, offset 0x1700
+ 0x1700: 0x0a08, 0x1701: 0x0a08, 0x1702: 0x0a08, 0x1703: 0x0a08, 0x1704: 0x0a08, 0x1705: 0x0c08,
+ 0x1706: 0x0808, 0x1707: 0x0c08, 0x1708: 0x0818, 0x1709: 0x0c08, 0x170a: 0x0c08, 0x170b: 0x0808,
+ 0x170c: 0x0808, 0x170d: 0x0908, 0x170e: 0x0c08, 0x170f: 0x0c08, 0x1710: 0x0c08, 0x1711: 0x0c08,
+ 0x1712: 0x0c08, 0x1713: 0x0a08, 0x1714: 0x0a08, 0x1715: 0x0a08, 0x1716: 0x0a08, 0x1717: 0x0908,
+ 0x1718: 0x0a08, 0x1719: 0x0a08, 0x171a: 0x0a08, 0x171b: 0x0a08, 0x171c: 0x0a08, 0x171d: 0x0c08,
+ 0x171e: 0x0a08, 0x171f: 0x0a08, 0x1720: 0x0a08, 0x1721: 0x0c08, 0x1722: 0x0808, 0x1723: 0x0808,
+ 0x1724: 0x0c08, 0x1725: 0x3308, 0x1726: 0x3308, 0x1727: 0x0040, 0x1728: 0x0040, 0x1729: 0x0040,
+ 0x172a: 0x0040, 0x172b: 0x0a18, 0x172c: 0x0a18, 0x172d: 0x0a18, 0x172e: 0x0a18, 0x172f: 0x0c18,
+ 0x1730: 0x0818, 0x1731: 0x0818, 0x1732: 0x0818, 0x1733: 0x0818, 0x1734: 0x0818, 0x1735: 0x0818,
+ 0x1736: 0x0818, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0040, 0x173a: 0x0040, 0x173b: 0x0040,
+ 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040,
+ // Block 0x5d, offset 0x1740
+ 0x1740: 0x0a08, 0x1741: 0x0c08, 0x1742: 0x0a08, 0x1743: 0x0c08, 0x1744: 0x0c08, 0x1745: 0x0c08,
+ 0x1746: 0x0a08, 0x1747: 0x0a08, 0x1748: 0x0a08, 0x1749: 0x0c08, 0x174a: 0x0a08, 0x174b: 0x0a08,
+ 0x174c: 0x0c08, 0x174d: 0x0a08, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0a08, 0x1751: 0x0c08,
+ 0x1752: 0x0040, 0x1753: 0x0040, 0x1754: 0x0040, 0x1755: 0x0040, 0x1756: 0x0040, 0x1757: 0x0040,
+ 0x1758: 0x0040, 0x1759: 0x0818, 0x175a: 0x0818, 0x175b: 0x0818, 0x175c: 0x0818, 0x175d: 0x0040,
+ 0x175e: 0x0040, 0x175f: 0x0040, 0x1760: 0x0040, 0x1761: 0x0040, 0x1762: 0x0040, 0x1763: 0x0040,
+ 0x1764: 0x0040, 0x1765: 0x0040, 0x1766: 0x0040, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0c18,
+ 0x176a: 0x0c18, 0x176b: 0x0c18, 0x176c: 0x0c18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0818,
+ 0x1770: 0x0040, 0x1771: 0x0040, 0x1772: 0x0040, 0x1773: 0x0040, 0x1774: 0x0040, 0x1775: 0x0040,
+ 0x1776: 0x0040, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040,
+ 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040,
+ // Block 0x5e, offset 0x1780
+ 0x1780: 0x3308, 0x1781: 0x3308, 0x1782: 0x3008, 0x1783: 0x3008, 0x1784: 0x0040, 0x1785: 0x0008,
+ 0x1786: 0x0008, 0x1787: 0x0008, 0x1788: 0x0008, 0x1789: 0x0008, 0x178a: 0x0008, 0x178b: 0x0008,
+ 0x178c: 0x0008, 0x178d: 0x0040, 0x178e: 0x0040, 0x178f: 0x0008, 0x1790: 0x0008, 0x1791: 0x0040,
+ 0x1792: 0x0040, 0x1793: 0x0008, 0x1794: 0x0008, 0x1795: 0x0008, 0x1796: 0x0008, 0x1797: 0x0008,
+ 0x1798: 0x0008, 0x1799: 0x0008, 0x179a: 0x0008, 0x179b: 0x0008, 0x179c: 0x0008, 0x179d: 0x0008,
+ 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x0008, 0x17a3: 0x0008,
+ 0x17a4: 0x0008, 0x17a5: 0x0008, 0x17a6: 0x0008, 0x17a7: 0x0008, 0x17a8: 0x0008, 0x17a9: 0x0040,
+ 0x17aa: 0x0008, 0x17ab: 0x0008, 0x17ac: 0x0008, 0x17ad: 0x0008, 0x17ae: 0x0008, 0x17af: 0x0008,
+ 0x17b0: 0x0008, 0x17b1: 0x0040, 0x17b2: 0x0008, 0x17b3: 0x0008, 0x17b4: 0x0040, 0x17b5: 0x0008,
+ 0x17b6: 0x0008, 0x17b7: 0x0008, 0x17b8: 0x0008, 0x17b9: 0x0008, 0x17ba: 0x0040, 0x17bb: 0x0040,
+ 0x17bc: 0x3308, 0x17bd: 0x0008, 0x17be: 0x3008, 0x17bf: 0x3008,
+ // Block 0x5f, offset 0x17c0
+ 0x17c0: 0x3308, 0x17c1: 0x3008, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x3008, 0x17c5: 0x0040,
+ 0x17c6: 0x0040, 0x17c7: 0x3008, 0x17c8: 0x3008, 0x17c9: 0x0040, 0x17ca: 0x0040, 0x17cb: 0x3008,
+ 0x17cc: 0x3008, 0x17cd: 0x3808, 0x17ce: 0x0040, 0x17cf: 0x0040, 0x17d0: 0x0008, 0x17d1: 0x0040,
+ 0x17d2: 0x0040, 0x17d3: 0x0040, 0x17d4: 0x0040, 0x17d5: 0x0040, 0x17d6: 0x0040, 0x17d7: 0x3008,
+ 0x17d8: 0x0040, 0x17d9: 0x0040, 0x17da: 0x0040, 0x17db: 0x0040, 0x17dc: 0x0040, 0x17dd: 0x0008,
+ 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x3008, 0x17e3: 0x3008,
+ 0x17e4: 0x0040, 0x17e5: 0x0040, 0x17e6: 0x3308, 0x17e7: 0x3308, 0x17e8: 0x3308, 0x17e9: 0x3308,
+ 0x17ea: 0x3308, 0x17eb: 0x3308, 0x17ec: 0x3308, 0x17ed: 0x0040, 0x17ee: 0x0040, 0x17ef: 0x0040,
+ 0x17f0: 0x3308, 0x17f1: 0x3308, 0x17f2: 0x3308, 0x17f3: 0x3308, 0x17f4: 0x3308, 0x17f5: 0x0040,
+ 0x17f6: 0x0040, 0x17f7: 0x0040, 0x17f8: 0x0040, 0x17f9: 0x0040, 0x17fa: 0x0040, 0x17fb: 0x0040,
+ 0x17fc: 0x0040, 0x17fd: 0x0040, 0x17fe: 0x0040, 0x17ff: 0x0040,
+ // Block 0x60, offset 0x1800
+ 0x1800: 0x0039, 0x1801: 0x0ee9, 0x1802: 0x1159, 0x1803: 0x0ef9, 0x1804: 0x0f09, 0x1805: 0x1199,
+ 0x1806: 0x0f31, 0x1807: 0x0249, 0x1808: 0x0f41, 0x1809: 0x0259, 0x180a: 0x0f51, 0x180b: 0x0359,
+ 0x180c: 0x0f61, 0x180d: 0x0f71, 0x180e: 0x00d9, 0x180f: 0x0f99, 0x1810: 0x2039, 0x1811: 0x0269,
+ 0x1812: 0x01d9, 0x1813: 0x0fa9, 0x1814: 0x0fb9, 0x1815: 0x1089, 0x1816: 0x0279, 0x1817: 0x0369,
+ 0x1818: 0x0289, 0x1819: 0x13d1, 0x181a: 0x0039, 0x181b: 0x0ee9, 0x181c: 0x1159, 0x181d: 0x0ef9,
+ 0x181e: 0x0f09, 0x181f: 0x1199, 0x1820: 0x0f31, 0x1821: 0x0249, 0x1822: 0x0f41, 0x1823: 0x0259,
+ 0x1824: 0x0f51, 0x1825: 0x0359, 0x1826: 0x0f61, 0x1827: 0x0f71, 0x1828: 0x00d9, 0x1829: 0x0f99,
+ 0x182a: 0x2039, 0x182b: 0x0269, 0x182c: 0x01d9, 0x182d: 0x0fa9, 0x182e: 0x0fb9, 0x182f: 0x1089,
+ 0x1830: 0x0279, 0x1831: 0x0369, 0x1832: 0x0289, 0x1833: 0x13d1, 0x1834: 0x0039, 0x1835: 0x0ee9,
+ 0x1836: 0x1159, 0x1837: 0x0ef9, 0x1838: 0x0f09, 0x1839: 0x1199, 0x183a: 0x0f31, 0x183b: 0x0249,
+ 0x183c: 0x0f41, 0x183d: 0x0259, 0x183e: 0x0f51, 0x183f: 0x0359,
+ // Block 0x61, offset 0x1840
+ 0x1840: 0x0f61, 0x1841: 0x0f71, 0x1842: 0x00d9, 0x1843: 0x0f99, 0x1844: 0x2039, 0x1845: 0x0269,
+ 0x1846: 0x01d9, 0x1847: 0x0fa9, 0x1848: 0x0fb9, 0x1849: 0x1089, 0x184a: 0x0279, 0x184b: 0x0369,
+ 0x184c: 0x0289, 0x184d: 0x13d1, 0x184e: 0x0039, 0x184f: 0x0ee9, 0x1850: 0x1159, 0x1851: 0x0ef9,
+ 0x1852: 0x0f09, 0x1853: 0x1199, 0x1854: 0x0f31, 0x1855: 0x0040, 0x1856: 0x0f41, 0x1857: 0x0259,
+ 0x1858: 0x0f51, 0x1859: 0x0359, 0x185a: 0x0f61, 0x185b: 0x0f71, 0x185c: 0x00d9, 0x185d: 0x0f99,
+ 0x185e: 0x2039, 0x185f: 0x0269, 0x1860: 0x01d9, 0x1861: 0x0fa9, 0x1862: 0x0fb9, 0x1863: 0x1089,
+ 0x1864: 0x0279, 0x1865: 0x0369, 0x1866: 0x0289, 0x1867: 0x13d1, 0x1868: 0x0039, 0x1869: 0x0ee9,
+ 0x186a: 0x1159, 0x186b: 0x0ef9, 0x186c: 0x0f09, 0x186d: 0x1199, 0x186e: 0x0f31, 0x186f: 0x0249,
+ 0x1870: 0x0f41, 0x1871: 0x0259, 0x1872: 0x0f51, 0x1873: 0x0359, 0x1874: 0x0f61, 0x1875: 0x0f71,
+ 0x1876: 0x00d9, 0x1877: 0x0f99, 0x1878: 0x2039, 0x1879: 0x0269, 0x187a: 0x01d9, 0x187b: 0x0fa9,
+ 0x187c: 0x0fb9, 0x187d: 0x1089, 0x187e: 0x0279, 0x187f: 0x0369,
+ // Block 0x62, offset 0x1880
+ 0x1880: 0x0289, 0x1881: 0x13d1, 0x1882: 0x0039, 0x1883: 0x0ee9, 0x1884: 0x1159, 0x1885: 0x0ef9,
+ 0x1886: 0x0f09, 0x1887: 0x1199, 0x1888: 0x0f31, 0x1889: 0x0249, 0x188a: 0x0f41, 0x188b: 0x0259,
+ 0x188c: 0x0f51, 0x188d: 0x0359, 0x188e: 0x0f61, 0x188f: 0x0f71, 0x1890: 0x00d9, 0x1891: 0x0f99,
+ 0x1892: 0x2039, 0x1893: 0x0269, 0x1894: 0x01d9, 0x1895: 0x0fa9, 0x1896: 0x0fb9, 0x1897: 0x1089,
+ 0x1898: 0x0279, 0x1899: 0x0369, 0x189a: 0x0289, 0x189b: 0x13d1, 0x189c: 0x0039, 0x189d: 0x0040,
+ 0x189e: 0x1159, 0x189f: 0x0ef9, 0x18a0: 0x0040, 0x18a1: 0x0040, 0x18a2: 0x0f31, 0x18a3: 0x0040,
+ 0x18a4: 0x0040, 0x18a5: 0x0259, 0x18a6: 0x0f51, 0x18a7: 0x0040, 0x18a8: 0x0040, 0x18a9: 0x0f71,
+ 0x18aa: 0x00d9, 0x18ab: 0x0f99, 0x18ac: 0x2039, 0x18ad: 0x0040, 0x18ae: 0x01d9, 0x18af: 0x0fa9,
+ 0x18b0: 0x0fb9, 0x18b1: 0x1089, 0x18b2: 0x0279, 0x18b3: 0x0369, 0x18b4: 0x0289, 0x18b5: 0x13d1,
+ 0x18b6: 0x0039, 0x18b7: 0x0ee9, 0x18b8: 0x1159, 0x18b9: 0x0ef9, 0x18ba: 0x0040, 0x18bb: 0x1199,
+ 0x18bc: 0x0040, 0x18bd: 0x0249, 0x18be: 0x0f41, 0x18bf: 0x0259,
+ // Block 0x63, offset 0x18c0
+ 0x18c0: 0x0f51, 0x18c1: 0x0359, 0x18c2: 0x0f61, 0x18c3: 0x0f71, 0x18c4: 0x0040, 0x18c5: 0x0f99,
+ 0x18c6: 0x2039, 0x18c7: 0x0269, 0x18c8: 0x01d9, 0x18c9: 0x0fa9, 0x18ca: 0x0fb9, 0x18cb: 0x1089,
+ 0x18cc: 0x0279, 0x18cd: 0x0369, 0x18ce: 0x0289, 0x18cf: 0x13d1, 0x18d0: 0x0039, 0x18d1: 0x0ee9,
+ 0x18d2: 0x1159, 0x18d3: 0x0ef9, 0x18d4: 0x0f09, 0x18d5: 0x1199, 0x18d6: 0x0f31, 0x18d7: 0x0249,
+ 0x18d8: 0x0f41, 0x18d9: 0x0259, 0x18da: 0x0f51, 0x18db: 0x0359, 0x18dc: 0x0f61, 0x18dd: 0x0f71,
+ 0x18de: 0x00d9, 0x18df: 0x0f99, 0x18e0: 0x2039, 0x18e1: 0x0269, 0x18e2: 0x01d9, 0x18e3: 0x0fa9,
+ 0x18e4: 0x0fb9, 0x18e5: 0x1089, 0x18e6: 0x0279, 0x18e7: 0x0369, 0x18e8: 0x0289, 0x18e9: 0x13d1,
+ 0x18ea: 0x0039, 0x18eb: 0x0ee9, 0x18ec: 0x1159, 0x18ed: 0x0ef9, 0x18ee: 0x0f09, 0x18ef: 0x1199,
+ 0x18f0: 0x0f31, 0x18f1: 0x0249, 0x18f2: 0x0f41, 0x18f3: 0x0259, 0x18f4: 0x0f51, 0x18f5: 0x0359,
+ 0x18f6: 0x0f61, 0x18f7: 0x0f71, 0x18f8: 0x00d9, 0x18f9: 0x0f99, 0x18fa: 0x2039, 0x18fb: 0x0269,
+ 0x18fc: 0x01d9, 0x18fd: 0x0fa9, 0x18fe: 0x0fb9, 0x18ff: 0x1089,
+ // Block 0x64, offset 0x1900
+ 0x1900: 0x0279, 0x1901: 0x0369, 0x1902: 0x0289, 0x1903: 0x13d1, 0x1904: 0x0039, 0x1905: 0x0ee9,
+ 0x1906: 0x0040, 0x1907: 0x0ef9, 0x1908: 0x0f09, 0x1909: 0x1199, 0x190a: 0x0f31, 0x190b: 0x0040,
+ 0x190c: 0x0040, 0x190d: 0x0259, 0x190e: 0x0f51, 0x190f: 0x0359, 0x1910: 0x0f61, 0x1911: 0x0f71,
+ 0x1912: 0x00d9, 0x1913: 0x0f99, 0x1914: 0x2039, 0x1915: 0x0040, 0x1916: 0x01d9, 0x1917: 0x0fa9,
+ 0x1918: 0x0fb9, 0x1919: 0x1089, 0x191a: 0x0279, 0x191b: 0x0369, 0x191c: 0x0289, 0x191d: 0x0040,
+ 0x191e: 0x0039, 0x191f: 0x0ee9, 0x1920: 0x1159, 0x1921: 0x0ef9, 0x1922: 0x0f09, 0x1923: 0x1199,
+ 0x1924: 0x0f31, 0x1925: 0x0249, 0x1926: 0x0f41, 0x1927: 0x0259, 0x1928: 0x0f51, 0x1929: 0x0359,
+ 0x192a: 0x0f61, 0x192b: 0x0f71, 0x192c: 0x00d9, 0x192d: 0x0f99, 0x192e: 0x2039, 0x192f: 0x0269,
+ 0x1930: 0x01d9, 0x1931: 0x0fa9, 0x1932: 0x0fb9, 0x1933: 0x1089, 0x1934: 0x0279, 0x1935: 0x0369,
+ 0x1936: 0x0289, 0x1937: 0x13d1, 0x1938: 0x0039, 0x1939: 0x0ee9, 0x193a: 0x0040, 0x193b: 0x0ef9,
+ 0x193c: 0x0f09, 0x193d: 0x1199, 0x193e: 0x0f31, 0x193f: 0x0040,
+ // Block 0x65, offset 0x1940
+ 0x1940: 0x0f41, 0x1941: 0x0259, 0x1942: 0x0f51, 0x1943: 0x0359, 0x1944: 0x0f61, 0x1945: 0x0040,
+ 0x1946: 0x00d9, 0x1947: 0x0040, 0x1948: 0x0040, 0x1949: 0x0040, 0x194a: 0x01d9, 0x194b: 0x0fa9,
+ 0x194c: 0x0fb9, 0x194d: 0x1089, 0x194e: 0x0279, 0x194f: 0x0369, 0x1950: 0x0289, 0x1951: 0x0040,
+ 0x1952: 0x0039, 0x1953: 0x0ee9, 0x1954: 0x1159, 0x1955: 0x0ef9, 0x1956: 0x0f09, 0x1957: 0x1199,
+ 0x1958: 0x0f31, 0x1959: 0x0249, 0x195a: 0x0f41, 0x195b: 0x0259, 0x195c: 0x0f51, 0x195d: 0x0359,
+ 0x195e: 0x0f61, 0x195f: 0x0f71, 0x1960: 0x00d9, 0x1961: 0x0f99, 0x1962: 0x2039, 0x1963: 0x0269,
+ 0x1964: 0x01d9, 0x1965: 0x0fa9, 0x1966: 0x0fb9, 0x1967: 0x1089, 0x1968: 0x0279, 0x1969: 0x0369,
+ 0x196a: 0x0289, 0x196b: 0x13d1, 0x196c: 0x0039, 0x196d: 0x0ee9, 0x196e: 0x1159, 0x196f: 0x0ef9,
+ 0x1970: 0x0f09, 0x1971: 0x1199, 0x1972: 0x0f31, 0x1973: 0x0249, 0x1974: 0x0f41, 0x1975: 0x0259,
+ 0x1976: 0x0f51, 0x1977: 0x0359, 0x1978: 0x0f61, 0x1979: 0x0f71, 0x197a: 0x00d9, 0x197b: 0x0f99,
+ 0x197c: 0x2039, 0x197d: 0x0269, 0x197e: 0x01d9, 0x197f: 0x0fa9,
+ // Block 0x66, offset 0x1980
+ 0x1980: 0x0fb9, 0x1981: 0x1089, 0x1982: 0x0279, 0x1983: 0x0369, 0x1984: 0x0289, 0x1985: 0x13d1,
+ 0x1986: 0x0039, 0x1987: 0x0ee9, 0x1988: 0x1159, 0x1989: 0x0ef9, 0x198a: 0x0f09, 0x198b: 0x1199,
+ 0x198c: 0x0f31, 0x198d: 0x0249, 0x198e: 0x0f41, 0x198f: 0x0259, 0x1990: 0x0f51, 0x1991: 0x0359,
+ 0x1992: 0x0f61, 0x1993: 0x0f71, 0x1994: 0x00d9, 0x1995: 0x0f99, 0x1996: 0x2039, 0x1997: 0x0269,
+ 0x1998: 0x01d9, 0x1999: 0x0fa9, 0x199a: 0x0fb9, 0x199b: 0x1089, 0x199c: 0x0279, 0x199d: 0x0369,
+ 0x199e: 0x0289, 0x199f: 0x13d1, 0x19a0: 0x0039, 0x19a1: 0x0ee9, 0x19a2: 0x1159, 0x19a3: 0x0ef9,
+ 0x19a4: 0x0f09, 0x19a5: 0x1199, 0x19a6: 0x0f31, 0x19a7: 0x0249, 0x19a8: 0x0f41, 0x19a9: 0x0259,
+ 0x19aa: 0x0f51, 0x19ab: 0x0359, 0x19ac: 0x0f61, 0x19ad: 0x0f71, 0x19ae: 0x00d9, 0x19af: 0x0f99,
+ 0x19b0: 0x2039, 0x19b1: 0x0269, 0x19b2: 0x01d9, 0x19b3: 0x0fa9, 0x19b4: 0x0fb9, 0x19b5: 0x1089,
+ 0x19b6: 0x0279, 0x19b7: 0x0369, 0x19b8: 0x0289, 0x19b9: 0x13d1, 0x19ba: 0x0039, 0x19bb: 0x0ee9,
+ 0x19bc: 0x1159, 0x19bd: 0x0ef9, 0x19be: 0x0f09, 0x19bf: 0x1199,
+ // Block 0x67, offset 0x19c0
+ 0x19c0: 0x0f31, 0x19c1: 0x0249, 0x19c2: 0x0f41, 0x19c3: 0x0259, 0x19c4: 0x0f51, 0x19c5: 0x0359,
+ 0x19c6: 0x0f61, 0x19c7: 0x0f71, 0x19c8: 0x00d9, 0x19c9: 0x0f99, 0x19ca: 0x2039, 0x19cb: 0x0269,
+ 0x19cc: 0x01d9, 0x19cd: 0x0fa9, 0x19ce: 0x0fb9, 0x19cf: 0x1089, 0x19d0: 0x0279, 0x19d1: 0x0369,
+ 0x19d2: 0x0289, 0x19d3: 0x13d1, 0x19d4: 0x0039, 0x19d5: 0x0ee9, 0x19d6: 0x1159, 0x19d7: 0x0ef9,
+ 0x19d8: 0x0f09, 0x19d9: 0x1199, 0x19da: 0x0f31, 0x19db: 0x0249, 0x19dc: 0x0f41, 0x19dd: 0x0259,
+ 0x19de: 0x0f51, 0x19df: 0x0359, 0x19e0: 0x0f61, 0x19e1: 0x0f71, 0x19e2: 0x00d9, 0x19e3: 0x0f99,
+ 0x19e4: 0x2039, 0x19e5: 0x0269, 0x19e6: 0x01d9, 0x19e7: 0x0fa9, 0x19e8: 0x0fb9, 0x19e9: 0x1089,
+ 0x19ea: 0x0279, 0x19eb: 0x0369, 0x19ec: 0x0289, 0x19ed: 0x13d1, 0x19ee: 0x0039, 0x19ef: 0x0ee9,
+ 0x19f0: 0x1159, 0x19f1: 0x0ef9, 0x19f2: 0x0f09, 0x19f3: 0x1199, 0x19f4: 0x0f31, 0x19f5: 0x0249,
+ 0x19f6: 0x0f41, 0x19f7: 0x0259, 0x19f8: 0x0f51, 0x19f9: 0x0359, 0x19fa: 0x0f61, 0x19fb: 0x0f71,
+ 0x19fc: 0x00d9, 0x19fd: 0x0f99, 0x19fe: 0x2039, 0x19ff: 0x0269,
+ // Block 0x68, offset 0x1a00
+ 0x1a00: 0x01d9, 0x1a01: 0x0fa9, 0x1a02: 0x0fb9, 0x1a03: 0x1089, 0x1a04: 0x0279, 0x1a05: 0x0369,
+ 0x1a06: 0x0289, 0x1a07: 0x13d1, 0x1a08: 0x0039, 0x1a09: 0x0ee9, 0x1a0a: 0x1159, 0x1a0b: 0x0ef9,
+ 0x1a0c: 0x0f09, 0x1a0d: 0x1199, 0x1a0e: 0x0f31, 0x1a0f: 0x0249, 0x1a10: 0x0f41, 0x1a11: 0x0259,
+ 0x1a12: 0x0f51, 0x1a13: 0x0359, 0x1a14: 0x0f61, 0x1a15: 0x0f71, 0x1a16: 0x00d9, 0x1a17: 0x0f99,
+ 0x1a18: 0x2039, 0x1a19: 0x0269, 0x1a1a: 0x01d9, 0x1a1b: 0x0fa9, 0x1a1c: 0x0fb9, 0x1a1d: 0x1089,
+ 0x1a1e: 0x0279, 0x1a1f: 0x0369, 0x1a20: 0x0289, 0x1a21: 0x13d1, 0x1a22: 0x0039, 0x1a23: 0x0ee9,
+ 0x1a24: 0x1159, 0x1a25: 0x0ef9, 0x1a26: 0x0f09, 0x1a27: 0x1199, 0x1a28: 0x0f31, 0x1a29: 0x0249,
+ 0x1a2a: 0x0f41, 0x1a2b: 0x0259, 0x1a2c: 0x0f51, 0x1a2d: 0x0359, 0x1a2e: 0x0f61, 0x1a2f: 0x0f71,
+ 0x1a30: 0x00d9, 0x1a31: 0x0f99, 0x1a32: 0x2039, 0x1a33: 0x0269, 0x1a34: 0x01d9, 0x1a35: 0x0fa9,
+ 0x1a36: 0x0fb9, 0x1a37: 0x1089, 0x1a38: 0x0279, 0x1a39: 0x0369, 0x1a3a: 0x0289, 0x1a3b: 0x13d1,
+ 0x1a3c: 0x0039, 0x1a3d: 0x0ee9, 0x1a3e: 0x1159, 0x1a3f: 0x0ef9,
+ // Block 0x69, offset 0x1a40
+ 0x1a40: 0x0f09, 0x1a41: 0x1199, 0x1a42: 0x0f31, 0x1a43: 0x0249, 0x1a44: 0x0f41, 0x1a45: 0x0259,
+ 0x1a46: 0x0f51, 0x1a47: 0x0359, 0x1a48: 0x0f61, 0x1a49: 0x0f71, 0x1a4a: 0x00d9, 0x1a4b: 0x0f99,
+ 0x1a4c: 0x2039, 0x1a4d: 0x0269, 0x1a4e: 0x01d9, 0x1a4f: 0x0fa9, 0x1a50: 0x0fb9, 0x1a51: 0x1089,
+ 0x1a52: 0x0279, 0x1a53: 0x0369, 0x1a54: 0x0289, 0x1a55: 0x13d1, 0x1a56: 0x0039, 0x1a57: 0x0ee9,
+ 0x1a58: 0x1159, 0x1a59: 0x0ef9, 0x1a5a: 0x0f09, 0x1a5b: 0x1199, 0x1a5c: 0x0f31, 0x1a5d: 0x0249,
+ 0x1a5e: 0x0f41, 0x1a5f: 0x0259, 0x1a60: 0x0f51, 0x1a61: 0x0359, 0x1a62: 0x0f61, 0x1a63: 0x0f71,
+ 0x1a64: 0x00d9, 0x1a65: 0x0f99, 0x1a66: 0x2039, 0x1a67: 0x0269, 0x1a68: 0x01d9, 0x1a69: 0x0fa9,
+ 0x1a6a: 0x0fb9, 0x1a6b: 0x1089, 0x1a6c: 0x0279, 0x1a6d: 0x0369, 0x1a6e: 0x0289, 0x1a6f: 0x13d1,
+ 0x1a70: 0x0039, 0x1a71: 0x0ee9, 0x1a72: 0x1159, 0x1a73: 0x0ef9, 0x1a74: 0x0f09, 0x1a75: 0x1199,
+ 0x1a76: 0x0f31, 0x1a77: 0x0249, 0x1a78: 0x0f41, 0x1a79: 0x0259, 0x1a7a: 0x0f51, 0x1a7b: 0x0359,
+ 0x1a7c: 0x0f61, 0x1a7d: 0x0f71, 0x1a7e: 0x00d9, 0x1a7f: 0x0f99,
+ // Block 0x6a, offset 0x1a80
+ 0x1a80: 0x2039, 0x1a81: 0x0269, 0x1a82: 0x01d9, 0x1a83: 0x0fa9, 0x1a84: 0x0fb9, 0x1a85: 0x1089,
+ 0x1a86: 0x0279, 0x1a87: 0x0369, 0x1a88: 0x0289, 0x1a89: 0x13d1, 0x1a8a: 0x0039, 0x1a8b: 0x0ee9,
+ 0x1a8c: 0x1159, 0x1a8d: 0x0ef9, 0x1a8e: 0x0f09, 0x1a8f: 0x1199, 0x1a90: 0x0f31, 0x1a91: 0x0249,
+ 0x1a92: 0x0f41, 0x1a93: 0x0259, 0x1a94: 0x0f51, 0x1a95: 0x0359, 0x1a96: 0x0f61, 0x1a97: 0x0f71,
+ 0x1a98: 0x00d9, 0x1a99: 0x0f99, 0x1a9a: 0x2039, 0x1a9b: 0x0269, 0x1a9c: 0x01d9, 0x1a9d: 0x0fa9,
+ 0x1a9e: 0x0fb9, 0x1a9f: 0x1089, 0x1aa0: 0x0279, 0x1aa1: 0x0369, 0x1aa2: 0x0289, 0x1aa3: 0x13d1,
+ 0x1aa4: 0xba81, 0x1aa5: 0xba99, 0x1aa6: 0x0040, 0x1aa7: 0x0040, 0x1aa8: 0xbab1, 0x1aa9: 0x1099,
+ 0x1aaa: 0x10b1, 0x1aab: 0x10c9, 0x1aac: 0xbac9, 0x1aad: 0xbae1, 0x1aae: 0xbaf9, 0x1aaf: 0x1429,
+ 0x1ab0: 0x1a31, 0x1ab1: 0xbb11, 0x1ab2: 0xbb29, 0x1ab3: 0xbb41, 0x1ab4: 0xbb59, 0x1ab5: 0xbb71,
+ 0x1ab6: 0xbb89, 0x1ab7: 0x2109, 0x1ab8: 0x1111, 0x1ab9: 0x1429, 0x1aba: 0xbba1, 0x1abb: 0xbbb9,
+ 0x1abc: 0xbbd1, 0x1abd: 0x10e1, 0x1abe: 0x10f9, 0x1abf: 0xbbe9,
+ // Block 0x6b, offset 0x1ac0
+ 0x1ac0: 0x2079, 0x1ac1: 0xbc01, 0x1ac2: 0xbab1, 0x1ac3: 0x1099, 0x1ac4: 0x10b1, 0x1ac5: 0x10c9,
+ 0x1ac6: 0xbac9, 0x1ac7: 0xbae1, 0x1ac8: 0xbaf9, 0x1ac9: 0x1429, 0x1aca: 0x1a31, 0x1acb: 0xbb11,
+ 0x1acc: 0xbb29, 0x1acd: 0xbb41, 0x1ace: 0xbb59, 0x1acf: 0xbb71, 0x1ad0: 0xbb89, 0x1ad1: 0x2109,
+ 0x1ad2: 0x1111, 0x1ad3: 0xbba1, 0x1ad4: 0xbba1, 0x1ad5: 0xbbb9, 0x1ad6: 0xbbd1, 0x1ad7: 0x10e1,
+ 0x1ad8: 0x10f9, 0x1ad9: 0xbbe9, 0x1ada: 0x2079, 0x1adb: 0xbc21, 0x1adc: 0xbac9, 0x1add: 0x1429,
+ 0x1ade: 0xbb11, 0x1adf: 0x10e1, 0x1ae0: 0x1111, 0x1ae1: 0x2109, 0x1ae2: 0xbab1, 0x1ae3: 0x1099,
+ 0x1ae4: 0x10b1, 0x1ae5: 0x10c9, 0x1ae6: 0xbac9, 0x1ae7: 0xbae1, 0x1ae8: 0xbaf9, 0x1ae9: 0x1429,
+ 0x1aea: 0x1a31, 0x1aeb: 0xbb11, 0x1aec: 0xbb29, 0x1aed: 0xbb41, 0x1aee: 0xbb59, 0x1aef: 0xbb71,
+ 0x1af0: 0xbb89, 0x1af1: 0x2109, 0x1af2: 0x1111, 0x1af3: 0x1429, 0x1af4: 0xbba1, 0x1af5: 0xbbb9,
+ 0x1af6: 0xbbd1, 0x1af7: 0x10e1, 0x1af8: 0x10f9, 0x1af9: 0xbbe9, 0x1afa: 0x2079, 0x1afb: 0xbc01,
+ 0x1afc: 0xbab1, 0x1afd: 0x1099, 0x1afe: 0x10b1, 0x1aff: 0x10c9,
+ // Block 0x6c, offset 0x1b00
+ 0x1b00: 0xbac9, 0x1b01: 0xbae1, 0x1b02: 0xbaf9, 0x1b03: 0x1429, 0x1b04: 0x1a31, 0x1b05: 0xbb11,
+ 0x1b06: 0xbb29, 0x1b07: 0xbb41, 0x1b08: 0xbb59, 0x1b09: 0xbb71, 0x1b0a: 0xbb89, 0x1b0b: 0x2109,
+ 0x1b0c: 0x1111, 0x1b0d: 0xbba1, 0x1b0e: 0xbba1, 0x1b0f: 0xbbb9, 0x1b10: 0xbbd1, 0x1b11: 0x10e1,
+ 0x1b12: 0x10f9, 0x1b13: 0xbbe9, 0x1b14: 0x2079, 0x1b15: 0xbc21, 0x1b16: 0xbac9, 0x1b17: 0x1429,
+ 0x1b18: 0xbb11, 0x1b19: 0x10e1, 0x1b1a: 0x1111, 0x1b1b: 0x2109, 0x1b1c: 0xbab1, 0x1b1d: 0x1099,
+ 0x1b1e: 0x10b1, 0x1b1f: 0x10c9, 0x1b20: 0xbac9, 0x1b21: 0xbae1, 0x1b22: 0xbaf9, 0x1b23: 0x1429,
+ 0x1b24: 0x1a31, 0x1b25: 0xbb11, 0x1b26: 0xbb29, 0x1b27: 0xbb41, 0x1b28: 0xbb59, 0x1b29: 0xbb71,
+ 0x1b2a: 0xbb89, 0x1b2b: 0x2109, 0x1b2c: 0x1111, 0x1b2d: 0x1429, 0x1b2e: 0xbba1, 0x1b2f: 0xbbb9,
+ 0x1b30: 0xbbd1, 0x1b31: 0x10e1, 0x1b32: 0x10f9, 0x1b33: 0xbbe9, 0x1b34: 0x2079, 0x1b35: 0xbc01,
+ 0x1b36: 0xbab1, 0x1b37: 0x1099, 0x1b38: 0x10b1, 0x1b39: 0x10c9, 0x1b3a: 0xbac9, 0x1b3b: 0xbae1,
+ 0x1b3c: 0xbaf9, 0x1b3d: 0x1429, 0x1b3e: 0x1a31, 0x1b3f: 0xbb11,
+ // Block 0x6d, offset 0x1b40
+ 0x1b40: 0xbb29, 0x1b41: 0xbb41, 0x1b42: 0xbb59, 0x1b43: 0xbb71, 0x1b44: 0xbb89, 0x1b45: 0x2109,
+ 0x1b46: 0x1111, 0x1b47: 0xbba1, 0x1b48: 0xbba1, 0x1b49: 0xbbb9, 0x1b4a: 0xbbd1, 0x1b4b: 0x10e1,
+ 0x1b4c: 0x10f9, 0x1b4d: 0xbbe9, 0x1b4e: 0x2079, 0x1b4f: 0xbc21, 0x1b50: 0xbac9, 0x1b51: 0x1429,
+ 0x1b52: 0xbb11, 0x1b53: 0x10e1, 0x1b54: 0x1111, 0x1b55: 0x2109, 0x1b56: 0xbab1, 0x1b57: 0x1099,
+ 0x1b58: 0x10b1, 0x1b59: 0x10c9, 0x1b5a: 0xbac9, 0x1b5b: 0xbae1, 0x1b5c: 0xbaf9, 0x1b5d: 0x1429,
+ 0x1b5e: 0x1a31, 0x1b5f: 0xbb11, 0x1b60: 0xbb29, 0x1b61: 0xbb41, 0x1b62: 0xbb59, 0x1b63: 0xbb71,
+ 0x1b64: 0xbb89, 0x1b65: 0x2109, 0x1b66: 0x1111, 0x1b67: 0x1429, 0x1b68: 0xbba1, 0x1b69: 0xbbb9,
+ 0x1b6a: 0xbbd1, 0x1b6b: 0x10e1, 0x1b6c: 0x10f9, 0x1b6d: 0xbbe9, 0x1b6e: 0x2079, 0x1b6f: 0xbc01,
+ 0x1b70: 0xbab1, 0x1b71: 0x1099, 0x1b72: 0x10b1, 0x1b73: 0x10c9, 0x1b74: 0xbac9, 0x1b75: 0xbae1,
+ 0x1b76: 0xbaf9, 0x1b77: 0x1429, 0x1b78: 0x1a31, 0x1b79: 0xbb11, 0x1b7a: 0xbb29, 0x1b7b: 0xbb41,
+ 0x1b7c: 0xbb59, 0x1b7d: 0xbb71, 0x1b7e: 0xbb89, 0x1b7f: 0x2109,
+ // Block 0x6e, offset 0x1b80
+ 0x1b80: 0x1111, 0x1b81: 0xbba1, 0x1b82: 0xbba1, 0x1b83: 0xbbb9, 0x1b84: 0xbbd1, 0x1b85: 0x10e1,
+ 0x1b86: 0x10f9, 0x1b87: 0xbbe9, 0x1b88: 0x2079, 0x1b89: 0xbc21, 0x1b8a: 0xbac9, 0x1b8b: 0x1429,
+ 0x1b8c: 0xbb11, 0x1b8d: 0x10e1, 0x1b8e: 0x1111, 0x1b8f: 0x2109, 0x1b90: 0xbab1, 0x1b91: 0x1099,
+ 0x1b92: 0x10b1, 0x1b93: 0x10c9, 0x1b94: 0xbac9, 0x1b95: 0xbae1, 0x1b96: 0xbaf9, 0x1b97: 0x1429,
+ 0x1b98: 0x1a31, 0x1b99: 0xbb11, 0x1b9a: 0xbb29, 0x1b9b: 0xbb41, 0x1b9c: 0xbb59, 0x1b9d: 0xbb71,
+ 0x1b9e: 0xbb89, 0x1b9f: 0x2109, 0x1ba0: 0x1111, 0x1ba1: 0x1429, 0x1ba2: 0xbba1, 0x1ba3: 0xbbb9,
+ 0x1ba4: 0xbbd1, 0x1ba5: 0x10e1, 0x1ba6: 0x10f9, 0x1ba7: 0xbbe9, 0x1ba8: 0x2079, 0x1ba9: 0xbc01,
+ 0x1baa: 0xbab1, 0x1bab: 0x1099, 0x1bac: 0x10b1, 0x1bad: 0x10c9, 0x1bae: 0xbac9, 0x1baf: 0xbae1,
+ 0x1bb0: 0xbaf9, 0x1bb1: 0x1429, 0x1bb2: 0x1a31, 0x1bb3: 0xbb11, 0x1bb4: 0xbb29, 0x1bb5: 0xbb41,
+ 0x1bb6: 0xbb59, 0x1bb7: 0xbb71, 0x1bb8: 0xbb89, 0x1bb9: 0x2109, 0x1bba: 0x1111, 0x1bbb: 0xbba1,
+ 0x1bbc: 0xbba1, 0x1bbd: 0xbbb9, 0x1bbe: 0xbbd1, 0x1bbf: 0x10e1,
+ // Block 0x6f, offset 0x1bc0
+ 0x1bc0: 0x10f9, 0x1bc1: 0xbbe9, 0x1bc2: 0x2079, 0x1bc3: 0xbc21, 0x1bc4: 0xbac9, 0x1bc5: 0x1429,
+ 0x1bc6: 0xbb11, 0x1bc7: 0x10e1, 0x1bc8: 0x1111, 0x1bc9: 0x2109, 0x1bca: 0xbc41, 0x1bcb: 0xbc41,
+ 0x1bcc: 0x0040, 0x1bcd: 0x0040, 0x1bce: 0x1f41, 0x1bcf: 0x00c9, 0x1bd0: 0x0069, 0x1bd1: 0x0079,
+ 0x1bd2: 0x1f51, 0x1bd3: 0x1f61, 0x1bd4: 0x1f71, 0x1bd5: 0x1f81, 0x1bd6: 0x1f91, 0x1bd7: 0x1fa1,
+ 0x1bd8: 0x1f41, 0x1bd9: 0x00c9, 0x1bda: 0x0069, 0x1bdb: 0x0079, 0x1bdc: 0x1f51, 0x1bdd: 0x1f61,
+ 0x1bde: 0x1f71, 0x1bdf: 0x1f81, 0x1be0: 0x1f91, 0x1be1: 0x1fa1, 0x1be2: 0x1f41, 0x1be3: 0x00c9,
+ 0x1be4: 0x0069, 0x1be5: 0x0079, 0x1be6: 0x1f51, 0x1be7: 0x1f61, 0x1be8: 0x1f71, 0x1be9: 0x1f81,
+ 0x1bea: 0x1f91, 0x1beb: 0x1fa1, 0x1bec: 0x1f41, 0x1bed: 0x00c9, 0x1bee: 0x0069, 0x1bef: 0x0079,
+ 0x1bf0: 0x1f51, 0x1bf1: 0x1f61, 0x1bf2: 0x1f71, 0x1bf3: 0x1f81, 0x1bf4: 0x1f91, 0x1bf5: 0x1fa1,
+ 0x1bf6: 0x1f41, 0x1bf7: 0x00c9, 0x1bf8: 0x0069, 0x1bf9: 0x0079, 0x1bfa: 0x1f51, 0x1bfb: 0x1f61,
+ 0x1bfc: 0x1f71, 0x1bfd: 0x1f81, 0x1bfe: 0x1f91, 0x1bff: 0x1fa1,
+ // Block 0x70, offset 0x1c00
+ 0x1c00: 0xe115, 0x1c01: 0xe115, 0x1c02: 0xe135, 0x1c03: 0xe135, 0x1c04: 0xe115, 0x1c05: 0xe115,
+ 0x1c06: 0xe175, 0x1c07: 0xe175, 0x1c08: 0xe115, 0x1c09: 0xe115, 0x1c0a: 0xe135, 0x1c0b: 0xe135,
+ 0x1c0c: 0xe115, 0x1c0d: 0xe115, 0x1c0e: 0xe1f5, 0x1c0f: 0xe1f5, 0x1c10: 0xe115, 0x1c11: 0xe115,
+ 0x1c12: 0xe135, 0x1c13: 0xe135, 0x1c14: 0xe115, 0x1c15: 0xe115, 0x1c16: 0xe175, 0x1c17: 0xe175,
+ 0x1c18: 0xe115, 0x1c19: 0xe115, 0x1c1a: 0xe135, 0x1c1b: 0xe135, 0x1c1c: 0xe115, 0x1c1d: 0xe115,
+ 0x1c1e: 0x8b05, 0x1c1f: 0x8b05, 0x1c20: 0x04b5, 0x1c21: 0x04b5, 0x1c22: 0x0a08, 0x1c23: 0x0a08,
+ 0x1c24: 0x0a08, 0x1c25: 0x0a08, 0x1c26: 0x0a08, 0x1c27: 0x0a08, 0x1c28: 0x0a08, 0x1c29: 0x0a08,
+ 0x1c2a: 0x0a08, 0x1c2b: 0x0a08, 0x1c2c: 0x0a08, 0x1c2d: 0x0a08, 0x1c2e: 0x0a08, 0x1c2f: 0x0a08,
+ 0x1c30: 0x0a08, 0x1c31: 0x0a08, 0x1c32: 0x0a08, 0x1c33: 0x0a08, 0x1c34: 0x0a08, 0x1c35: 0x0a08,
+ 0x1c36: 0x0a08, 0x1c37: 0x0a08, 0x1c38: 0x0a08, 0x1c39: 0x0a08, 0x1c3a: 0x0a08, 0x1c3b: 0x0a08,
+ 0x1c3c: 0x0a08, 0x1c3d: 0x0a08, 0x1c3e: 0x0a08, 0x1c3f: 0x0a08,
+ // Block 0x71, offset 0x1c40
+ 0x1c40: 0xb189, 0x1c41: 0xb1a1, 0x1c42: 0xb201, 0x1c43: 0xb249, 0x1c44: 0x0040, 0x1c45: 0xb411,
+ 0x1c46: 0xb291, 0x1c47: 0xb219, 0x1c48: 0xb309, 0x1c49: 0xb429, 0x1c4a: 0xb399, 0x1c4b: 0xb3b1,
+ 0x1c4c: 0xb3c9, 0x1c4d: 0xb3e1, 0x1c4e: 0xb2a9, 0x1c4f: 0xb339, 0x1c50: 0xb369, 0x1c51: 0xb2d9,
+ 0x1c52: 0xb381, 0x1c53: 0xb279, 0x1c54: 0xb2c1, 0x1c55: 0xb1d1, 0x1c56: 0xb1e9, 0x1c57: 0xb231,
+ 0x1c58: 0xb261, 0x1c59: 0xb2f1, 0x1c5a: 0xb321, 0x1c5b: 0xb351, 0x1c5c: 0xbc59, 0x1c5d: 0x7949,
+ 0x1c5e: 0xbc71, 0x1c5f: 0xbc89, 0x1c60: 0x0040, 0x1c61: 0xb1a1, 0x1c62: 0xb201, 0x1c63: 0x0040,
+ 0x1c64: 0xb3f9, 0x1c65: 0x0040, 0x1c66: 0x0040, 0x1c67: 0xb219, 0x1c68: 0x0040, 0x1c69: 0xb429,
+ 0x1c6a: 0xb399, 0x1c6b: 0xb3b1, 0x1c6c: 0xb3c9, 0x1c6d: 0xb3e1, 0x1c6e: 0xb2a9, 0x1c6f: 0xb339,
+ 0x1c70: 0xb369, 0x1c71: 0xb2d9, 0x1c72: 0xb381, 0x1c73: 0x0040, 0x1c74: 0xb2c1, 0x1c75: 0xb1d1,
+ 0x1c76: 0xb1e9, 0x1c77: 0xb231, 0x1c78: 0x0040, 0x1c79: 0xb2f1, 0x1c7a: 0x0040, 0x1c7b: 0xb351,
+ 0x1c7c: 0x0040, 0x1c7d: 0x0040, 0x1c7e: 0x0040, 0x1c7f: 0x0040,
+ // Block 0x72, offset 0x1c80
+ 0x1c80: 0x0040, 0x1c81: 0x0040, 0x1c82: 0xb201, 0x1c83: 0x0040, 0x1c84: 0x0040, 0x1c85: 0x0040,
+ 0x1c86: 0x0040, 0x1c87: 0xb219, 0x1c88: 0x0040, 0x1c89: 0xb429, 0x1c8a: 0x0040, 0x1c8b: 0xb3b1,
+ 0x1c8c: 0x0040, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0x0040, 0x1c91: 0xb2d9,
+ 0x1c92: 0xb381, 0x1c93: 0x0040, 0x1c94: 0xb2c1, 0x1c95: 0x0040, 0x1c96: 0x0040, 0x1c97: 0xb231,
+ 0x1c98: 0x0040, 0x1c99: 0xb2f1, 0x1c9a: 0x0040, 0x1c9b: 0xb351, 0x1c9c: 0x0040, 0x1c9d: 0x7949,
+ 0x1c9e: 0x0040, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040,
+ 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0xb309, 0x1ca9: 0xb429,
+ 0x1caa: 0xb399, 0x1cab: 0x0040, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339,
+ 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1,
+ 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0xb321, 0x1cbb: 0xb351,
+ 0x1cbc: 0xbc59, 0x1cbd: 0x0040, 0x1cbe: 0xbc71, 0x1cbf: 0x0040,
+ // Block 0x73, offset 0x1cc0
+ 0x1cc0: 0xb189, 0x1cc1: 0xb1a1, 0x1cc2: 0xb201, 0x1cc3: 0xb249, 0x1cc4: 0xb3f9, 0x1cc5: 0xb411,
+ 0x1cc6: 0xb291, 0x1cc7: 0xb219, 0x1cc8: 0xb309, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1,
+ 0x1ccc: 0xb3c9, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0xb369, 0x1cd1: 0xb2d9,
+ 0x1cd2: 0xb381, 0x1cd3: 0xb279, 0x1cd4: 0xb2c1, 0x1cd5: 0xb1d1, 0x1cd6: 0xb1e9, 0x1cd7: 0xb231,
+ 0x1cd8: 0xb261, 0x1cd9: 0xb2f1, 0x1cda: 0xb321, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x0040,
+ 0x1cde: 0x0040, 0x1cdf: 0x0040, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0xb249,
+ 0x1ce4: 0x0040, 0x1ce5: 0xb411, 0x1ce6: 0xb291, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429,
+ 0x1cea: 0x0040, 0x1ceb: 0xb3b1, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339,
+ 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0xb279, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1,
+ 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0xb261, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351,
+ 0x1cfc: 0x0040, 0x1cfd: 0x0040, 0x1cfe: 0x0040, 0x1cff: 0x0040,
+ // Block 0x74, offset 0x1d00
+ 0x1d00: 0x0040, 0x1d01: 0xbca2, 0x1d02: 0xbcba, 0x1d03: 0xbcd2, 0x1d04: 0xbcea, 0x1d05: 0xbd02,
+ 0x1d06: 0xbd1a, 0x1d07: 0xbd32, 0x1d08: 0xbd4a, 0x1d09: 0xbd62, 0x1d0a: 0xbd7a, 0x1d0b: 0x0018,
+ 0x1d0c: 0x0018, 0x1d0d: 0x0040, 0x1d0e: 0x0040, 0x1d0f: 0x0040, 0x1d10: 0xbd92, 0x1d11: 0xbdb2,
+ 0x1d12: 0xbdd2, 0x1d13: 0xbdf2, 0x1d14: 0xbe12, 0x1d15: 0xbe32, 0x1d16: 0xbe52, 0x1d17: 0xbe72,
+ 0x1d18: 0xbe92, 0x1d19: 0xbeb2, 0x1d1a: 0xbed2, 0x1d1b: 0xbef2, 0x1d1c: 0xbf12, 0x1d1d: 0xbf32,
+ 0x1d1e: 0xbf52, 0x1d1f: 0xbf72, 0x1d20: 0xbf92, 0x1d21: 0xbfb2, 0x1d22: 0xbfd2, 0x1d23: 0xbff2,
+ 0x1d24: 0xc012, 0x1d25: 0xc032, 0x1d26: 0xc052, 0x1d27: 0xc072, 0x1d28: 0xc092, 0x1d29: 0xc0b2,
+ 0x1d2a: 0xc0d1, 0x1d2b: 0x1159, 0x1d2c: 0x0269, 0x1d2d: 0x6671, 0x1d2e: 0xc111, 0x1d2f: 0x0040,
+ 0x1d30: 0x0039, 0x1d31: 0x0ee9, 0x1d32: 0x1159, 0x1d33: 0x0ef9, 0x1d34: 0x0f09, 0x1d35: 0x1199,
+ 0x1d36: 0x0f31, 0x1d37: 0x0249, 0x1d38: 0x0f41, 0x1d39: 0x0259, 0x1d3a: 0x0f51, 0x1d3b: 0x0359,
+ 0x1d3c: 0x0f61, 0x1d3d: 0x0f71, 0x1d3e: 0x00d9, 0x1d3f: 0x0f99,
+ // Block 0x75, offset 0x1d40
+ 0x1d40: 0x2039, 0x1d41: 0x0269, 0x1d42: 0x01d9, 0x1d43: 0x0fa9, 0x1d44: 0x0fb9, 0x1d45: 0x1089,
+ 0x1d46: 0x0279, 0x1d47: 0x0369, 0x1d48: 0x0289, 0x1d49: 0x13d1, 0x1d4a: 0xc129, 0x1d4b: 0x65b1,
+ 0x1d4c: 0xc141, 0x1d4d: 0x1441, 0x1d4e: 0xc159, 0x1d4f: 0xc179, 0x1d50: 0x0018, 0x1d51: 0x0018,
+ 0x1d52: 0x0018, 0x1d53: 0x0018, 0x1d54: 0x0018, 0x1d55: 0x0018, 0x1d56: 0x0018, 0x1d57: 0x0018,
+ 0x1d58: 0x0018, 0x1d59: 0x0018, 0x1d5a: 0x0018, 0x1d5b: 0x0018, 0x1d5c: 0x0018, 0x1d5d: 0x0018,
+ 0x1d5e: 0x0018, 0x1d5f: 0x0018, 0x1d60: 0x0018, 0x1d61: 0x0018, 0x1d62: 0x0018, 0x1d63: 0x0018,
+ 0x1d64: 0x0018, 0x1d65: 0x0018, 0x1d66: 0x0018, 0x1d67: 0x0018, 0x1d68: 0x0018, 0x1d69: 0x0018,
+ 0x1d6a: 0xc191, 0x1d6b: 0xc1a9, 0x1d6c: 0x0040, 0x1d6d: 0x0040, 0x1d6e: 0x0040, 0x1d6f: 0x0040,
+ 0x1d70: 0x0018, 0x1d71: 0x0018, 0x1d72: 0x0018, 0x1d73: 0x0018, 0x1d74: 0x0018, 0x1d75: 0x0018,
+ 0x1d76: 0x0018, 0x1d77: 0x0018, 0x1d78: 0x0018, 0x1d79: 0x0018, 0x1d7a: 0x0018, 0x1d7b: 0x0018,
+ 0x1d7c: 0x0018, 0x1d7d: 0x0018, 0x1d7e: 0x0018, 0x1d7f: 0x0018,
+ // Block 0x76, offset 0x1d80
+ 0x1d80: 0xc1d9, 0x1d81: 0xc211, 0x1d82: 0xc249, 0x1d83: 0x0040, 0x1d84: 0x0040, 0x1d85: 0x0040,
+ 0x1d86: 0x0040, 0x1d87: 0x0040, 0x1d88: 0x0040, 0x1d89: 0x0040, 0x1d8a: 0x0040, 0x1d8b: 0x0040,
+ 0x1d8c: 0x0040, 0x1d8d: 0x0040, 0x1d8e: 0x0040, 0x1d8f: 0x0040, 0x1d90: 0xc269, 0x1d91: 0xc289,
+ 0x1d92: 0xc2a9, 0x1d93: 0xc2c9, 0x1d94: 0xc2e9, 0x1d95: 0xc309, 0x1d96: 0xc329, 0x1d97: 0xc349,
+ 0x1d98: 0xc369, 0x1d99: 0xc389, 0x1d9a: 0xc3a9, 0x1d9b: 0xc3c9, 0x1d9c: 0xc3e9, 0x1d9d: 0xc409,
+ 0x1d9e: 0xc429, 0x1d9f: 0xc449, 0x1da0: 0xc469, 0x1da1: 0xc489, 0x1da2: 0xc4a9, 0x1da3: 0xc4c9,
+ 0x1da4: 0xc4e9, 0x1da5: 0xc509, 0x1da6: 0xc529, 0x1da7: 0xc549, 0x1da8: 0xc569, 0x1da9: 0xc589,
+ 0x1daa: 0xc5a9, 0x1dab: 0xc5c9, 0x1dac: 0xc5e9, 0x1dad: 0xc609, 0x1dae: 0xc629, 0x1daf: 0xc649,
+ 0x1db0: 0xc669, 0x1db1: 0xc689, 0x1db2: 0xc6a9, 0x1db3: 0xc6c9, 0x1db4: 0xc6e9, 0x1db5: 0xc709,
+ 0x1db6: 0xc729, 0x1db7: 0xc749, 0x1db8: 0xc769, 0x1db9: 0xc789, 0x1dba: 0xc7a9, 0x1dbb: 0xc7c9,
+ 0x1dbc: 0x0040, 0x1dbd: 0x0040, 0x1dbe: 0x0040, 0x1dbf: 0x0040,
+ // Block 0x77, offset 0x1dc0
+ 0x1dc0: 0xcaf9, 0x1dc1: 0xcb19, 0x1dc2: 0xcb39, 0x1dc3: 0x8b1d, 0x1dc4: 0xcb59, 0x1dc5: 0xcb79,
+ 0x1dc6: 0xcb99, 0x1dc7: 0xcbb9, 0x1dc8: 0xcbd9, 0x1dc9: 0xcbf9, 0x1dca: 0xcc19, 0x1dcb: 0xcc39,
+ 0x1dcc: 0xcc59, 0x1dcd: 0x8b3d, 0x1dce: 0xcc79, 0x1dcf: 0xcc99, 0x1dd0: 0xccb9, 0x1dd1: 0xccd9,
+ 0x1dd2: 0x8b5d, 0x1dd3: 0xccf9, 0x1dd4: 0xcd19, 0x1dd5: 0xc429, 0x1dd6: 0x8b7d, 0x1dd7: 0xcd39,
+ 0x1dd8: 0xcd59, 0x1dd9: 0xcd79, 0x1dda: 0xcd99, 0x1ddb: 0xcdb9, 0x1ddc: 0x8b9d, 0x1ddd: 0xcdd9,
+ 0x1dde: 0xcdf9, 0x1ddf: 0xce19, 0x1de0: 0xce39, 0x1de1: 0xce59, 0x1de2: 0xc789, 0x1de3: 0xce79,
+ 0x1de4: 0xce99, 0x1de5: 0xceb9, 0x1de6: 0xced9, 0x1de7: 0xcef9, 0x1de8: 0xcf19, 0x1de9: 0xcf39,
+ 0x1dea: 0xcf59, 0x1deb: 0xcf79, 0x1dec: 0xcf99, 0x1ded: 0xcfb9, 0x1dee: 0xcfd9, 0x1def: 0xcff9,
+ 0x1df0: 0xd019, 0x1df1: 0xd039, 0x1df2: 0xd039, 0x1df3: 0xd039, 0x1df4: 0x8bbd, 0x1df5: 0xd059,
+ 0x1df6: 0xd079, 0x1df7: 0xd099, 0x1df8: 0x8bdd, 0x1df9: 0xd0b9, 0x1dfa: 0xd0d9, 0x1dfb: 0xd0f9,
+ 0x1dfc: 0xd119, 0x1dfd: 0xd139, 0x1dfe: 0xd159, 0x1dff: 0xd179,
+ // Block 0x78, offset 0x1e00
+ 0x1e00: 0xd199, 0x1e01: 0xd1b9, 0x1e02: 0xd1d9, 0x1e03: 0xd1f9, 0x1e04: 0xd219, 0x1e05: 0xd239,
+ 0x1e06: 0xd239, 0x1e07: 0xd259, 0x1e08: 0xd279, 0x1e09: 0xd299, 0x1e0a: 0xd2b9, 0x1e0b: 0xd2d9,
+ 0x1e0c: 0xd2f9, 0x1e0d: 0xd319, 0x1e0e: 0xd339, 0x1e0f: 0xd359, 0x1e10: 0xd379, 0x1e11: 0xd399,
+ 0x1e12: 0xd3b9, 0x1e13: 0xd3d9, 0x1e14: 0xd3f9, 0x1e15: 0xd419, 0x1e16: 0xd439, 0x1e17: 0xd459,
+ 0x1e18: 0xd479, 0x1e19: 0x8bfd, 0x1e1a: 0xd499, 0x1e1b: 0xd4b9, 0x1e1c: 0xd4d9, 0x1e1d: 0xc309,
+ 0x1e1e: 0xd4f9, 0x1e1f: 0xd519, 0x1e20: 0x8c1d, 0x1e21: 0x8c3d, 0x1e22: 0xd539, 0x1e23: 0xd559,
+ 0x1e24: 0xd579, 0x1e25: 0xd599, 0x1e26: 0xd5b9, 0x1e27: 0xd5d9, 0x1e28: 0x2040, 0x1e29: 0xd5f9,
+ 0x1e2a: 0xd619, 0x1e2b: 0xd619, 0x1e2c: 0x8c5d, 0x1e2d: 0xd639, 0x1e2e: 0xd659, 0x1e2f: 0xd679,
+ 0x1e30: 0xd699, 0x1e31: 0x8c7d, 0x1e32: 0xd6b9, 0x1e33: 0xd6d9, 0x1e34: 0x2040, 0x1e35: 0xd6f9,
+ 0x1e36: 0xd719, 0x1e37: 0xd739, 0x1e38: 0xd759, 0x1e39: 0xd779, 0x1e3a: 0xd799, 0x1e3b: 0x8c9d,
+ 0x1e3c: 0xd7b9, 0x1e3d: 0x8cbd, 0x1e3e: 0xd7d9, 0x1e3f: 0xd7f9,
+ // Block 0x79, offset 0x1e40
+ 0x1e40: 0xd819, 0x1e41: 0xd839, 0x1e42: 0xd859, 0x1e43: 0xd879, 0x1e44: 0xd899, 0x1e45: 0xd8b9,
+ 0x1e46: 0xd8d9, 0x1e47: 0xd8f9, 0x1e48: 0xd919, 0x1e49: 0x8cdd, 0x1e4a: 0xd939, 0x1e4b: 0xd959,
+ 0x1e4c: 0xd979, 0x1e4d: 0xd999, 0x1e4e: 0xd9b9, 0x1e4f: 0x8cfd, 0x1e50: 0xd9d9, 0x1e51: 0x8d1d,
+ 0x1e52: 0x8d3d, 0x1e53: 0xd9f9, 0x1e54: 0xda19, 0x1e55: 0xda19, 0x1e56: 0xda39, 0x1e57: 0x8d5d,
+ 0x1e58: 0x8d7d, 0x1e59: 0xda59, 0x1e5a: 0xda79, 0x1e5b: 0xda99, 0x1e5c: 0xdab9, 0x1e5d: 0xdad9,
+ 0x1e5e: 0xdaf9, 0x1e5f: 0xdb19, 0x1e60: 0xdb39, 0x1e61: 0xdb59, 0x1e62: 0xdb79, 0x1e63: 0xdb99,
+ 0x1e64: 0x8d9d, 0x1e65: 0xdbb9, 0x1e66: 0xdbd9, 0x1e67: 0xdbf9, 0x1e68: 0xdc19, 0x1e69: 0xdbf9,
+ 0x1e6a: 0xdc39, 0x1e6b: 0xdc59, 0x1e6c: 0xdc79, 0x1e6d: 0xdc99, 0x1e6e: 0xdcb9, 0x1e6f: 0xdcd9,
+ 0x1e70: 0xdcf9, 0x1e71: 0xdd19, 0x1e72: 0xdd39, 0x1e73: 0xdd59, 0x1e74: 0xdd79, 0x1e75: 0xdd99,
+ 0x1e76: 0xddb9, 0x1e77: 0xddd9, 0x1e78: 0x8dbd, 0x1e79: 0xddf9, 0x1e7a: 0xde19, 0x1e7b: 0xde39,
+ 0x1e7c: 0xde59, 0x1e7d: 0xde79, 0x1e7e: 0x8ddd, 0x1e7f: 0xde99,
+ // Block 0x7a, offset 0x1e80
+ 0x1e80: 0xe599, 0x1e81: 0xe5b9, 0x1e82: 0xe5d9, 0x1e83: 0xe5f9, 0x1e84: 0xe619, 0x1e85: 0xe639,
+ 0x1e86: 0x8efd, 0x1e87: 0xe659, 0x1e88: 0xe679, 0x1e89: 0xe699, 0x1e8a: 0xe6b9, 0x1e8b: 0xe6d9,
+ 0x1e8c: 0xe6f9, 0x1e8d: 0x8f1d, 0x1e8e: 0xe719, 0x1e8f: 0xe739, 0x1e90: 0x8f3d, 0x1e91: 0x8f5d,
+ 0x1e92: 0xe759, 0x1e93: 0xe779, 0x1e94: 0xe799, 0x1e95: 0xe7b9, 0x1e96: 0xe7d9, 0x1e97: 0xe7f9,
+ 0x1e98: 0xe819, 0x1e99: 0xe839, 0x1e9a: 0xe859, 0x1e9b: 0x8f7d, 0x1e9c: 0xe879, 0x1e9d: 0x8f9d,
+ 0x1e9e: 0xe899, 0x1e9f: 0x2040, 0x1ea0: 0xe8b9, 0x1ea1: 0xe8d9, 0x1ea2: 0xe8f9, 0x1ea3: 0x8fbd,
+ 0x1ea4: 0xe919, 0x1ea5: 0xe939, 0x1ea6: 0x8fdd, 0x1ea7: 0x8ffd, 0x1ea8: 0xe959, 0x1ea9: 0xe979,
+ 0x1eaa: 0xe999, 0x1eab: 0xe9b9, 0x1eac: 0xe9d9, 0x1ead: 0xe9d9, 0x1eae: 0xe9f9, 0x1eaf: 0xea19,
+ 0x1eb0: 0xea39, 0x1eb1: 0xea59, 0x1eb2: 0xea79, 0x1eb3: 0xea99, 0x1eb4: 0xeab9, 0x1eb5: 0x901d,
+ 0x1eb6: 0xead9, 0x1eb7: 0x903d, 0x1eb8: 0xeaf9, 0x1eb9: 0x905d, 0x1eba: 0xeb19, 0x1ebb: 0x907d,
+ 0x1ebc: 0x909d, 0x1ebd: 0x90bd, 0x1ebe: 0xeb39, 0x1ebf: 0xeb59,
+ // Block 0x7b, offset 0x1ec0
+ 0x1ec0: 0xeb79, 0x1ec1: 0x90dd, 0x1ec2: 0x90fd, 0x1ec3: 0x911d, 0x1ec4: 0x913d, 0x1ec5: 0xeb99,
+ 0x1ec6: 0xebb9, 0x1ec7: 0xebb9, 0x1ec8: 0xebd9, 0x1ec9: 0xebf9, 0x1eca: 0xec19, 0x1ecb: 0xec39,
+ 0x1ecc: 0xec59, 0x1ecd: 0x915d, 0x1ece: 0xec79, 0x1ecf: 0xec99, 0x1ed0: 0xecb9, 0x1ed1: 0xecd9,
+ 0x1ed2: 0x917d, 0x1ed3: 0xecf9, 0x1ed4: 0x919d, 0x1ed5: 0x91bd, 0x1ed6: 0xed19, 0x1ed7: 0xed39,
+ 0x1ed8: 0xed59, 0x1ed9: 0xed79, 0x1eda: 0xed99, 0x1edb: 0xedb9, 0x1edc: 0x91dd, 0x1edd: 0x91fd,
+ 0x1ede: 0x921d, 0x1edf: 0x2040, 0x1ee0: 0xedd9, 0x1ee1: 0x923d, 0x1ee2: 0xedf9, 0x1ee3: 0xee19,
+ 0x1ee4: 0xee39, 0x1ee5: 0x925d, 0x1ee6: 0xee59, 0x1ee7: 0xee79, 0x1ee8: 0xee99, 0x1ee9: 0xeeb9,
+ 0x1eea: 0xeed9, 0x1eeb: 0x927d, 0x1eec: 0xeef9, 0x1eed: 0xef19, 0x1eee: 0xef39, 0x1eef: 0xef59,
+ 0x1ef0: 0xef79, 0x1ef1: 0xef99, 0x1ef2: 0x929d, 0x1ef3: 0x92bd, 0x1ef4: 0xefb9, 0x1ef5: 0x92dd,
+ 0x1ef6: 0xefd9, 0x1ef7: 0x92fd, 0x1ef8: 0xeff9, 0x1ef9: 0xf019, 0x1efa: 0xf039, 0x1efb: 0x931d,
+ 0x1efc: 0x933d, 0x1efd: 0xf059, 0x1efe: 0x935d, 0x1eff: 0xf079,
+ // Block 0x7c, offset 0x1f00
+ 0x1f00: 0xf6b9, 0x1f01: 0xf6d9, 0x1f02: 0xf6f9, 0x1f03: 0xf719, 0x1f04: 0xf739, 0x1f05: 0x951d,
+ 0x1f06: 0xf759, 0x1f07: 0xf779, 0x1f08: 0xf799, 0x1f09: 0xf7b9, 0x1f0a: 0xf7d9, 0x1f0b: 0x953d,
+ 0x1f0c: 0x955d, 0x1f0d: 0xf7f9, 0x1f0e: 0xf819, 0x1f0f: 0xf839, 0x1f10: 0xf859, 0x1f11: 0xf879,
+ 0x1f12: 0xf899, 0x1f13: 0x957d, 0x1f14: 0xf8b9, 0x1f15: 0xf8d9, 0x1f16: 0xf8f9, 0x1f17: 0xf919,
+ 0x1f18: 0x959d, 0x1f19: 0x95bd, 0x1f1a: 0xf939, 0x1f1b: 0xf959, 0x1f1c: 0xf979, 0x1f1d: 0x95dd,
+ 0x1f1e: 0xf999, 0x1f1f: 0xf9b9, 0x1f20: 0x6815, 0x1f21: 0x95fd, 0x1f22: 0xf9d9, 0x1f23: 0xf9f9,
+ 0x1f24: 0xfa19, 0x1f25: 0x961d, 0x1f26: 0xfa39, 0x1f27: 0xfa59, 0x1f28: 0xfa79, 0x1f29: 0xfa99,
+ 0x1f2a: 0xfab9, 0x1f2b: 0xfad9, 0x1f2c: 0xfaf9, 0x1f2d: 0x963d, 0x1f2e: 0xfb19, 0x1f2f: 0xfb39,
+ 0x1f30: 0xfb59, 0x1f31: 0x965d, 0x1f32: 0xfb79, 0x1f33: 0xfb99, 0x1f34: 0xfbb9, 0x1f35: 0xfbd9,
+ 0x1f36: 0x7b35, 0x1f37: 0x967d, 0x1f38: 0xfbf9, 0x1f39: 0xfc19, 0x1f3a: 0xfc39, 0x1f3b: 0x969d,
+ 0x1f3c: 0xfc59, 0x1f3d: 0x96bd, 0x1f3e: 0xfc79, 0x1f3f: 0xfc79,
+ // Block 0x7d, offset 0x1f40
+ 0x1f40: 0xfc99, 0x1f41: 0x96dd, 0x1f42: 0xfcb9, 0x1f43: 0xfcd9, 0x1f44: 0xfcf9, 0x1f45: 0xfd19,
+ 0x1f46: 0xfd39, 0x1f47: 0xfd59, 0x1f48: 0xfd79, 0x1f49: 0x96fd, 0x1f4a: 0xfd99, 0x1f4b: 0xfdb9,
+ 0x1f4c: 0xfdd9, 0x1f4d: 0xfdf9, 0x1f4e: 0xfe19, 0x1f4f: 0xfe39, 0x1f50: 0x971d, 0x1f51: 0xfe59,
+ 0x1f52: 0x973d, 0x1f53: 0x975d, 0x1f54: 0x977d, 0x1f55: 0xfe79, 0x1f56: 0xfe99, 0x1f57: 0xfeb9,
+ 0x1f58: 0xfed9, 0x1f59: 0xfef9, 0x1f5a: 0xff19, 0x1f5b: 0xff39, 0x1f5c: 0xff59, 0x1f5d: 0x979d,
+ 0x1f5e: 0x0040, 0x1f5f: 0x0040, 0x1f60: 0x0040, 0x1f61: 0x0040, 0x1f62: 0x0040, 0x1f63: 0x0040,
+ 0x1f64: 0x0040, 0x1f65: 0x0040, 0x1f66: 0x0040, 0x1f67: 0x0040, 0x1f68: 0x0040, 0x1f69: 0x0040,
+ 0x1f6a: 0x0040, 0x1f6b: 0x0040, 0x1f6c: 0x0040, 0x1f6d: 0x0040, 0x1f6e: 0x0040, 0x1f6f: 0x0040,
+ 0x1f70: 0x0040, 0x1f71: 0x0040, 0x1f72: 0x0040, 0x1f73: 0x0040, 0x1f74: 0x0040, 0x1f75: 0x0040,
+ 0x1f76: 0x0040, 0x1f77: 0x0040, 0x1f78: 0x0040, 0x1f79: 0x0040, 0x1f7a: 0x0040, 0x1f7b: 0x0040,
+ 0x1f7c: 0x0040, 0x1f7d: 0x0040, 0x1f7e: 0x0040, 0x1f7f: 0x0040,
+}
+
+// idnaIndex: 35 blocks, 2240 entries, 4480 bytes
+// Block 0 is the zero block.
+var idnaIndex = [2240]uint16{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x01, 0xc3: 0x7c, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05,
+ 0xc8: 0x06, 0xc9: 0x7d, 0xca: 0x7e, 0xcb: 0x07, 0xcc: 0x7f, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a,
+ 0xd0: 0x80, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x81, 0xd6: 0x82, 0xd7: 0x83,
+ 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x84, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x85, 0xde: 0x86, 0xdf: 0x87,
+ 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07,
+ 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c,
+ 0xf0: 0x1c, 0xf1: 0x1d, 0xf2: 0x1d, 0xf3: 0x1f, 0xf4: 0x20,
+ // Block 0x4, offset 0x100
+ 0x120: 0x88, 0x121: 0x89, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x13, 0x126: 0x14, 0x127: 0x15,
+ 0x128: 0x16, 0x129: 0x17, 0x12a: 0x18, 0x12b: 0x19, 0x12c: 0x1a, 0x12d: 0x1b, 0x12e: 0x1c, 0x12f: 0x8d,
+ 0x130: 0x8e, 0x131: 0x1d, 0x132: 0x1e, 0x133: 0x1f, 0x134: 0x8f, 0x135: 0x20, 0x136: 0x90, 0x137: 0x91,
+ 0x138: 0x92, 0x139: 0x93, 0x13a: 0x21, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x22, 0x13e: 0x23, 0x13f: 0x96,
+ // Block 0x5, offset 0x140
+ 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e,
+ 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6,
+ 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f,
+ 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae,
+ 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6,
+ 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe,
+ 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x24, 0x175: 0x25, 0x176: 0x26, 0x177: 0xc3,
+ 0x178: 0x27, 0x179: 0x27, 0x17a: 0x28, 0x17b: 0x27, 0x17c: 0xc4, 0x17d: 0x29, 0x17e: 0x2a, 0x17f: 0x2b,
+ // Block 0x6, offset 0x180
+ 0x180: 0x2c, 0x181: 0x2d, 0x182: 0x2e, 0x183: 0xc5, 0x184: 0x2f, 0x185: 0x30, 0x186: 0xc6, 0x187: 0x9b,
+ 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0xca,
+ 0x190: 0xcb, 0x191: 0x31, 0x192: 0x32, 0x193: 0x33, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b,
+ 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b,
+ 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b,
+ 0x1a8: 0xcc, 0x1a9: 0xcd, 0x1aa: 0x9b, 0x1ab: 0xce, 0x1ac: 0x9b, 0x1ad: 0xcf, 0x1ae: 0xd0, 0x1af: 0xd1,
+ 0x1b0: 0xd2, 0x1b1: 0x34, 0x1b2: 0x27, 0x1b3: 0x35, 0x1b4: 0xd3, 0x1b5: 0xd4, 0x1b6: 0xd5, 0x1b7: 0xd6,
+ 0x1b8: 0xd7, 0x1b9: 0xd8, 0x1ba: 0xd9, 0x1bb: 0xda, 0x1bc: 0xdb, 0x1bd: 0xdc, 0x1be: 0xdd, 0x1bf: 0x36,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x37, 0x1c1: 0xde, 0x1c2: 0xdf, 0x1c3: 0xe0, 0x1c4: 0xe1, 0x1c5: 0x38, 0x1c6: 0x39, 0x1c7: 0xe2,
+ 0x1c8: 0xe3, 0x1c9: 0x3a, 0x1ca: 0x3b, 0x1cb: 0x3c, 0x1cc: 0x3d, 0x1cd: 0x3e, 0x1ce: 0x3f, 0x1cf: 0x40,
+ 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f,
+ 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f,
+ 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f,
+ 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f,
+ 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f,
+ 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f,
+ // Block 0x8, offset 0x200
+ 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f,
+ 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f,
+ 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f,
+ 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f,
+ 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f,
+ 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f,
+ 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b,
+ 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f,
+ // Block 0x9, offset 0x240
+ 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f,
+ 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f,
+ 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f,
+ 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f,
+ 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f,
+ 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f,
+ 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f,
+ 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f,
+ // Block 0xa, offset 0x280
+ 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f,
+ 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f,
+ 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f,
+ 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f,
+ 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f,
+ 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f,
+ 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f,
+ 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe4,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f,
+ 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f,
+ 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe5, 0x2d3: 0xe6, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f,
+ 0x2d8: 0xe7, 0x2d9: 0x41, 0x2da: 0x42, 0x2db: 0xe8, 0x2dc: 0x43, 0x2dd: 0x44, 0x2de: 0x45, 0x2df: 0xe9,
+ 0x2e0: 0xea, 0x2e1: 0xeb, 0x2e2: 0xec, 0x2e3: 0xed, 0x2e4: 0xee, 0x2e5: 0xef, 0x2e6: 0xf0, 0x2e7: 0xf1,
+ 0x2e8: 0xf2, 0x2e9: 0xf3, 0x2ea: 0xf4, 0x2eb: 0xf5, 0x2ec: 0xf6, 0x2ed: 0xf7, 0x2ee: 0xf8, 0x2ef: 0xf9,
+ 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f,
+ 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f,
+ // Block 0xc, offset 0x300
+ 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f,
+ 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f,
+ 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f,
+ 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xfa, 0x31f: 0xfb,
+ // Block 0xd, offset 0x340
+ 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba,
+ 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba,
+ 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba,
+ 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba,
+ 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba,
+ 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba,
+ 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba,
+ 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba,
+ // Block 0xe, offset 0x380
+ 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba,
+ 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba,
+ 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba,
+ 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba,
+ 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfc, 0x3a5: 0xfd, 0x3a6: 0xfe, 0x3a7: 0xff,
+ 0x3a8: 0x46, 0x3a9: 0x100, 0x3aa: 0x101, 0x3ab: 0x47, 0x3ac: 0x48, 0x3ad: 0x49, 0x3ae: 0x4a, 0x3af: 0x4b,
+ 0x3b0: 0x102, 0x3b1: 0x4c, 0x3b2: 0x4d, 0x3b3: 0x4e, 0x3b4: 0x4f, 0x3b5: 0x50, 0x3b6: 0x103, 0x3b7: 0x51,
+ 0x3b8: 0x52, 0x3b9: 0x53, 0x3ba: 0x54, 0x3bb: 0x55, 0x3bc: 0x56, 0x3bd: 0x57, 0x3be: 0x58, 0x3bf: 0x59,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x104, 0x3c1: 0x105, 0x3c2: 0x9f, 0x3c3: 0x106, 0x3c4: 0x107, 0x3c5: 0x9b, 0x3c6: 0x108, 0x3c7: 0x109,
+ 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x10a, 0x3cb: 0x10b, 0x3cc: 0x10c, 0x3cd: 0x10d, 0x3ce: 0x10e, 0x3cf: 0x10f,
+ 0x3d0: 0x110, 0x3d1: 0x9f, 0x3d2: 0x111, 0x3d3: 0x112, 0x3d4: 0x113, 0x3d5: 0x114, 0x3d6: 0xba, 0x3d7: 0xba,
+ 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x115, 0x3dd: 0x116, 0x3de: 0xba, 0x3df: 0xba,
+ 0x3e0: 0x117, 0x3e1: 0x118, 0x3e2: 0x119, 0x3e3: 0x11a, 0x3e4: 0x11b, 0x3e5: 0xba, 0x3e6: 0x11c, 0x3e7: 0x11d,
+ 0x3e8: 0x11e, 0x3e9: 0x11f, 0x3ea: 0x120, 0x3eb: 0x5a, 0x3ec: 0x121, 0x3ed: 0x122, 0x3ee: 0x5b, 0x3ef: 0xba,
+ 0x3f0: 0x123, 0x3f1: 0x124, 0x3f2: 0x125, 0x3f3: 0x126, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba,
+ 0x3f8: 0xba, 0x3f9: 0x127, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba,
+ // Block 0x10, offset 0x400
+ 0x400: 0x128, 0x401: 0x129, 0x402: 0x12a, 0x403: 0x12b, 0x404: 0x12c, 0x405: 0x12d, 0x406: 0x12e, 0x407: 0x12f,
+ 0x408: 0x130, 0x409: 0xba, 0x40a: 0x131, 0x40b: 0x132, 0x40c: 0x5c, 0x40d: 0x5d, 0x40e: 0xba, 0x40f: 0xba,
+ 0x410: 0x133, 0x411: 0x134, 0x412: 0x135, 0x413: 0x136, 0x414: 0xba, 0x415: 0xba, 0x416: 0x137, 0x417: 0x138,
+ 0x418: 0x139, 0x419: 0x13a, 0x41a: 0x13b, 0x41b: 0x13c, 0x41c: 0x13d, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba,
+ 0x420: 0xba, 0x421: 0xba, 0x422: 0x13e, 0x423: 0x13f, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba,
+ 0x428: 0xba, 0x429: 0xba, 0x42a: 0xba, 0x42b: 0x140, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba,
+ 0x430: 0x141, 0x431: 0x142, 0x432: 0x143, 0x433: 0xba, 0x434: 0xba, 0x435: 0xba, 0x436: 0xba, 0x437: 0xba,
+ 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba,
+ // Block 0x11, offset 0x440
+ 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f,
+ 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x144, 0x44f: 0xba,
+ 0x450: 0x9b, 0x451: 0x145, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x146, 0x456: 0xba, 0x457: 0xba,
+ 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba,
+ 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba,
+ 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba,
+ 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba,
+ 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba,
+ // Block 0x12, offset 0x480
+ 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f,
+ 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f,
+ 0x490: 0x147, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba,
+ 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba,
+ 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba,
+ 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba,
+ 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba,
+ 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba,
+ 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba,
+ 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f,
+ 0x4d8: 0x9f, 0x4d9: 0x148, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba,
+ 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba,
+ 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba,
+ 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba,
+ 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba,
+ // Block 0x14, offset 0x500
+ 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba,
+ 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba,
+ 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba,
+ 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba,
+ 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f,
+ 0x528: 0x140, 0x529: 0x149, 0x52a: 0xba, 0x52b: 0x14a, 0x52c: 0x14b, 0x52d: 0x14c, 0x52e: 0x14d, 0x52f: 0xba,
+ 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba,
+ 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x14e, 0x53e: 0x14f, 0x53f: 0x150,
+ // Block 0x15, offset 0x540
+ 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f,
+ 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f,
+ 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f,
+ 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x151,
+ 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f,
+ 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x152, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba,
+ 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba,
+ 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba,
+ // Block 0x16, offset 0x580
+ 0x580: 0x153, 0x581: 0xba, 0x582: 0xba, 0x583: 0xba, 0x584: 0xba, 0x585: 0xba, 0x586: 0xba, 0x587: 0xba,
+ 0x588: 0xba, 0x589: 0xba, 0x58a: 0xba, 0x58b: 0xba, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba,
+ 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba,
+ 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba,
+ 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba,
+ 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba,
+ 0x5b0: 0x9f, 0x5b1: 0x154, 0x5b2: 0x155, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba,
+ 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x156, 0x5c4: 0x157, 0x5c5: 0x158, 0x5c6: 0x159, 0x5c7: 0x15a,
+ 0x5c8: 0x9b, 0x5c9: 0x15b, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x15c, 0x5ce: 0xba, 0x5cf: 0xba,
+ 0x5d0: 0x5e, 0x5d1: 0x5f, 0x5d2: 0x60, 0x5d3: 0x61, 0x5d4: 0x62, 0x5d5: 0x63, 0x5d6: 0x64, 0x5d7: 0x65,
+ 0x5d8: 0x66, 0x5d9: 0x67, 0x5da: 0x68, 0x5db: 0x69, 0x5dc: 0x6a, 0x5dd: 0x6b, 0x5de: 0x6c, 0x5df: 0x6d,
+ 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b,
+ 0x5e8: 0x15d, 0x5e9: 0x15e, 0x5ea: 0x15f, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba,
+ 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba,
+ 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba,
+ // Block 0x18, offset 0x600
+ 0x600: 0x160, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba,
+ 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba,
+ 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba,
+ 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba,
+ 0x620: 0x123, 0x621: 0x123, 0x622: 0x123, 0x623: 0x161, 0x624: 0x6e, 0x625: 0x162, 0x626: 0xba, 0x627: 0xba,
+ 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba,
+ 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba,
+ 0x638: 0x6f, 0x639: 0x70, 0x63a: 0x71, 0x63b: 0x163, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba,
+ // Block 0x19, offset 0x640
+ 0x640: 0x164, 0x641: 0x9b, 0x642: 0x165, 0x643: 0x166, 0x644: 0x72, 0x645: 0x73, 0x646: 0x167, 0x647: 0x168,
+ 0x648: 0x74, 0x649: 0x169, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b,
+ 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b,
+ 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x16a, 0x65c: 0x9b, 0x65d: 0x16b, 0x65e: 0x9b, 0x65f: 0x16c,
+ 0x660: 0x16d, 0x661: 0x16e, 0x662: 0x16f, 0x663: 0xba, 0x664: 0x170, 0x665: 0x171, 0x666: 0x172, 0x667: 0x173,
+ 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba,
+ 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba,
+ 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f,
+ 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f,
+ 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f,
+ 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x174, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f,
+ 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f,
+ 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f,
+ 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f,
+ 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f,
+ 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f,
+ 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f,
+ 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x175, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f,
+ 0x6e0: 0x176, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f,
+ 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f,
+ 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f,
+ 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f,
+ 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f,
+ 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f,
+ 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f,
+ 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f,
+ 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f,
+ 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f,
+ 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x177, 0x73b: 0xba, 0x73c: 0xba, 0x73d: 0xba, 0x73e: 0xba, 0x73f: 0xba,
+ // Block 0x1d, offset 0x740
+ 0x740: 0xba, 0x741: 0xba, 0x742: 0xba, 0x743: 0xba, 0x744: 0xba, 0x745: 0xba, 0x746: 0xba, 0x747: 0xba,
+ 0x748: 0xba, 0x749: 0xba, 0x74a: 0xba, 0x74b: 0xba, 0x74c: 0xba, 0x74d: 0xba, 0x74e: 0xba, 0x74f: 0xba,
+ 0x750: 0xba, 0x751: 0xba, 0x752: 0xba, 0x753: 0xba, 0x754: 0xba, 0x755: 0xba, 0x756: 0xba, 0x757: 0xba,
+ 0x758: 0xba, 0x759: 0xba, 0x75a: 0xba, 0x75b: 0xba, 0x75c: 0xba, 0x75d: 0xba, 0x75e: 0xba, 0x75f: 0xba,
+ 0x760: 0x75, 0x761: 0x76, 0x762: 0x77, 0x763: 0x178, 0x764: 0x78, 0x765: 0x79, 0x766: 0x179, 0x767: 0x7a,
+ 0x768: 0x7b, 0x769: 0xba, 0x76a: 0xba, 0x76b: 0xba, 0x76c: 0xba, 0x76d: 0xba, 0x76e: 0xba, 0x76f: 0xba,
+ 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba,
+ 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba,
+ // Block 0x1e, offset 0x780
+ 0x790: 0x0d, 0x791: 0x0e, 0x792: 0x0f, 0x793: 0x10, 0x794: 0x11, 0x795: 0x0b, 0x796: 0x12, 0x797: 0x07,
+ 0x798: 0x13, 0x799: 0x0b, 0x79a: 0x0b, 0x79b: 0x14, 0x79c: 0x0b, 0x79d: 0x15, 0x79e: 0x16, 0x79f: 0x17,
+ 0x7a0: 0x07, 0x7a1: 0x07, 0x7a2: 0x07, 0x7a3: 0x07, 0x7a4: 0x07, 0x7a5: 0x07, 0x7a6: 0x07, 0x7a7: 0x07,
+ 0x7a8: 0x07, 0x7a9: 0x07, 0x7aa: 0x18, 0x7ab: 0x19, 0x7ac: 0x1a, 0x7ad: 0x0b, 0x7ae: 0x0b, 0x7af: 0x1b,
+ 0x7b0: 0x0b, 0x7b1: 0x0b, 0x7b2: 0x0b, 0x7b3: 0x0b, 0x7b4: 0x0b, 0x7b5: 0x0b, 0x7b6: 0x0b, 0x7b7: 0x0b,
+ 0x7b8: 0x0b, 0x7b9: 0x0b, 0x7ba: 0x0b, 0x7bb: 0x0b, 0x7bc: 0x0b, 0x7bd: 0x0b, 0x7be: 0x0b, 0x7bf: 0x0b,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x0b, 0x7c1: 0x0b, 0x7c2: 0x0b, 0x7c3: 0x0b, 0x7c4: 0x0b, 0x7c5: 0x0b, 0x7c6: 0x0b, 0x7c7: 0x0b,
+ 0x7c8: 0x0b, 0x7c9: 0x0b, 0x7ca: 0x0b, 0x7cb: 0x0b, 0x7cc: 0x0b, 0x7cd: 0x0b, 0x7ce: 0x0b, 0x7cf: 0x0b,
+ 0x7d0: 0x0b, 0x7d1: 0x0b, 0x7d2: 0x0b, 0x7d3: 0x0b, 0x7d4: 0x0b, 0x7d5: 0x0b, 0x7d6: 0x0b, 0x7d7: 0x0b,
+ 0x7d8: 0x0b, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x0b, 0x7dc: 0x0b, 0x7dd: 0x0b, 0x7de: 0x0b, 0x7df: 0x0b,
+ 0x7e0: 0x0b, 0x7e1: 0x0b, 0x7e2: 0x0b, 0x7e3: 0x0b, 0x7e4: 0x0b, 0x7e5: 0x0b, 0x7e6: 0x0b, 0x7e7: 0x0b,
+ 0x7e8: 0x0b, 0x7e9: 0x0b, 0x7ea: 0x0b, 0x7eb: 0x0b, 0x7ec: 0x0b, 0x7ed: 0x0b, 0x7ee: 0x0b, 0x7ef: 0x0b,
+ 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b,
+ 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b,
+ // Block 0x20, offset 0x800
+ 0x800: 0x17a, 0x801: 0x17b, 0x802: 0xba, 0x803: 0xba, 0x804: 0x17c, 0x805: 0x17c, 0x806: 0x17c, 0x807: 0x17d,
+ 0x808: 0xba, 0x809: 0xba, 0x80a: 0xba, 0x80b: 0xba, 0x80c: 0xba, 0x80d: 0xba, 0x80e: 0xba, 0x80f: 0xba,
+ 0x810: 0xba, 0x811: 0xba, 0x812: 0xba, 0x813: 0xba, 0x814: 0xba, 0x815: 0xba, 0x816: 0xba, 0x817: 0xba,
+ 0x818: 0xba, 0x819: 0xba, 0x81a: 0xba, 0x81b: 0xba, 0x81c: 0xba, 0x81d: 0xba, 0x81e: 0xba, 0x81f: 0xba,
+ 0x820: 0xba, 0x821: 0xba, 0x822: 0xba, 0x823: 0xba, 0x824: 0xba, 0x825: 0xba, 0x826: 0xba, 0x827: 0xba,
+ 0x828: 0xba, 0x829: 0xba, 0x82a: 0xba, 0x82b: 0xba, 0x82c: 0xba, 0x82d: 0xba, 0x82e: 0xba, 0x82f: 0xba,
+ 0x830: 0xba, 0x831: 0xba, 0x832: 0xba, 0x833: 0xba, 0x834: 0xba, 0x835: 0xba, 0x836: 0xba, 0x837: 0xba,
+ 0x838: 0xba, 0x839: 0xba, 0x83a: 0xba, 0x83b: 0xba, 0x83c: 0xba, 0x83d: 0xba, 0x83e: 0xba, 0x83f: 0xba,
+ // Block 0x21, offset 0x840
+ 0x840: 0x0b, 0x841: 0x0b, 0x842: 0x0b, 0x843: 0x0b, 0x844: 0x0b, 0x845: 0x0b, 0x846: 0x0b, 0x847: 0x0b,
+ 0x848: 0x0b, 0x849: 0x0b, 0x84a: 0x0b, 0x84b: 0x0b, 0x84c: 0x0b, 0x84d: 0x0b, 0x84e: 0x0b, 0x84f: 0x0b,
+ 0x850: 0x0b, 0x851: 0x0b, 0x852: 0x0b, 0x853: 0x0b, 0x854: 0x0b, 0x855: 0x0b, 0x856: 0x0b, 0x857: 0x0b,
+ 0x858: 0x0b, 0x859: 0x0b, 0x85a: 0x0b, 0x85b: 0x0b, 0x85c: 0x0b, 0x85d: 0x0b, 0x85e: 0x0b, 0x85f: 0x0b,
+ 0x860: 0x1e, 0x861: 0x0b, 0x862: 0x0b, 0x863: 0x0b, 0x864: 0x0b, 0x865: 0x0b, 0x866: 0x0b, 0x867: 0x0b,
+ 0x868: 0x0b, 0x869: 0x0b, 0x86a: 0x0b, 0x86b: 0x0b, 0x86c: 0x0b, 0x86d: 0x0b, 0x86e: 0x0b, 0x86f: 0x0b,
+ 0x870: 0x0b, 0x871: 0x0b, 0x872: 0x0b, 0x873: 0x0b, 0x874: 0x0b, 0x875: 0x0b, 0x876: 0x0b, 0x877: 0x0b,
+ 0x878: 0x0b, 0x879: 0x0b, 0x87a: 0x0b, 0x87b: 0x0b, 0x87c: 0x0b, 0x87d: 0x0b, 0x87e: 0x0b, 0x87f: 0x0b,
+ // Block 0x22, offset 0x880
+ 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b,
+ 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b,
+}
+
+// idnaSparseOffset: 258 entries, 516 bytes
+var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x93, 0x98, 0xa1, 0xb1, 0xbf, 0xcc, 0xd8, 0xe9, 0xf3, 0xfa, 0x107, 0x118, 0x11f, 0x12a, 0x139, 0x147, 0x151, 0x153, 0x158, 0x15b, 0x15e, 0x160, 0x16c, 0x177, 0x17f, 0x185, 0x18b, 0x190, 0x195, 0x198, 0x19c, 0x1a2, 0x1a7, 0x1b3, 0x1bd, 0x1c3, 0x1d4, 0x1de, 0x1e1, 0x1e9, 0x1ec, 0x1f9, 0x201, 0x205, 0x20c, 0x214, 0x224, 0x230, 0x232, 0x23c, 0x248, 0x254, 0x260, 0x268, 0x26d, 0x277, 0x288, 0x28c, 0x297, 0x29b, 0x2a4, 0x2ac, 0x2b2, 0x2b7, 0x2ba, 0x2bd, 0x2c1, 0x2c7, 0x2cb, 0x2cf, 0x2d5, 0x2dc, 0x2e2, 0x2ea, 0x2f1, 0x2fc, 0x306, 0x30a, 0x30d, 0x313, 0x317, 0x319, 0x31c, 0x31e, 0x321, 0x32b, 0x32e, 0x33d, 0x341, 0x346, 0x349, 0x34d, 0x352, 0x357, 0x35d, 0x363, 0x372, 0x378, 0x37c, 0x38b, 0x390, 0x398, 0x3a2, 0x3ad, 0x3b5, 0x3c6, 0x3cf, 0x3df, 0x3ec, 0x3f6, 0x3fb, 0x408, 0x40c, 0x411, 0x413, 0x417, 0x419, 0x41d, 0x426, 0x42c, 0x430, 0x440, 0x44a, 0x44f, 0x452, 0x458, 0x45f, 0x464, 0x468, 0x46e, 0x473, 0x47c, 0x481, 0x487, 0x48e, 0x495, 0x49c, 0x4a0, 0x4a5, 0x4a8, 0x4ad, 0x4b9, 0x4bf, 0x4c4, 0x4cb, 0x4d3, 0x4d8, 0x4dc, 0x4ec, 0x4f3, 0x4f7, 0x4fb, 0x502, 0x504, 0x507, 0x50a, 0x50e, 0x512, 0x518, 0x521, 0x52d, 0x534, 0x53d, 0x545, 0x54c, 0x55a, 0x567, 0x574, 0x57d, 0x581, 0x58f, 0x597, 0x5a2, 0x5ab, 0x5b1, 0x5b9, 0x5c2, 0x5cc, 0x5cf, 0x5db, 0x5de, 0x5e3, 0x5e6, 0x5f0, 0x5f9, 0x605, 0x608, 0x60d, 0x610, 0x613, 0x616, 0x61d, 0x624, 0x628, 0x633, 0x636, 0x63c, 0x641, 0x645, 0x648, 0x64b, 0x64e, 0x653, 0x65d, 0x660, 0x664, 0x673, 0x67f, 0x683, 0x688, 0x68d, 0x691, 0x696, 0x69f, 0x6aa, 0x6b0, 0x6b8, 0x6bc, 0x6c0, 0x6c6, 0x6cc, 0x6d1, 0x6d4, 0x6e2, 0x6e9, 0x6ec, 0x6ef, 0x6f3, 0x6f9, 0x6fe, 0x708, 0x70d, 0x710, 0x713, 0x716, 0x719, 0x71d, 0x720, 0x730, 0x741, 0x746, 0x748, 0x74a}
+
+// idnaSparseValues: 1869 entries, 7476 bytes
+var idnaSparseValues = [1869]valueRange{
+ // Block 0x0, offset 0x0
+ {value: 0x0000, lo: 0x07},
+ {value: 0xe105, lo: 0x80, hi: 0x96},
+ {value: 0x0018, lo: 0x97, hi: 0x97},
+ {value: 0xe105, lo: 0x98, hi: 0x9e},
+ {value: 0x001f, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbf},
+ // Block 0x1, offset 0x8
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0xe01d, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0335, lo: 0x83, hi: 0x83},
+ {value: 0x034d, lo: 0x84, hi: 0x84},
+ {value: 0x0365, lo: 0x85, hi: 0x85},
+ {value: 0xe00d, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0xe00d, lo: 0x88, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x89},
+ {value: 0xe00d, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe00d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0x8d},
+ {value: 0xe00d, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0xbf},
+ // Block 0x2, offset 0x19
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x0249, lo: 0xb0, hi: 0xb0},
+ {value: 0x037d, lo: 0xb1, hi: 0xb1},
+ {value: 0x0259, lo: 0xb2, hi: 0xb2},
+ {value: 0x0269, lo: 0xb3, hi: 0xb3},
+ {value: 0x034d, lo: 0xb4, hi: 0xb4},
+ {value: 0x0395, lo: 0xb5, hi: 0xb5},
+ {value: 0xe1bd, lo: 0xb6, hi: 0xb6},
+ {value: 0x0279, lo: 0xb7, hi: 0xb7},
+ {value: 0x0289, lo: 0xb8, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbf},
+ // Block 0x3, offset 0x25
+ {value: 0x0000, lo: 0x01},
+ {value: 0x3308, lo: 0x80, hi: 0xbf},
+ // Block 0x4, offset 0x27
+ {value: 0x0000, lo: 0x04},
+ {value: 0x03f5, lo: 0x80, hi: 0x8f},
+ {value: 0xe105, lo: 0x90, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x5, offset 0x2c
+ {value: 0x0000, lo: 0x07},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x0545, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x0008, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xbf},
+ // Block 0x6, offset 0x34
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0401, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x88},
+ {value: 0x0018, lo: 0x89, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x7, offset 0x3f
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0818, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x82},
+ {value: 0x0818, lo: 0x83, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x85},
+ {value: 0x0818, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0808, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x8, offset 0x4b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0a08, lo: 0x80, hi: 0x87},
+ {value: 0x0c08, lo: 0x88, hi: 0x99},
+ {value: 0x0a08, lo: 0x9a, hi: 0xbf},
+ // Block 0x9, offset 0x4f
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3308, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0c08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0a08, lo: 0x8e, hi: 0x98},
+ {value: 0x0c08, lo: 0x99, hi: 0x9b},
+ {value: 0x0a08, lo: 0x9c, hi: 0xaa},
+ {value: 0x0c08, lo: 0xab, hi: 0xac},
+ {value: 0x0a08, lo: 0xad, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb1},
+ {value: 0x0a08, lo: 0xb2, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0a08, lo: 0xb5, hi: 0xb7},
+ {value: 0x0c08, lo: 0xb8, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbf},
+ // Block 0xa, offset 0x5e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xb0},
+ {value: 0x0808, lo: 0xb1, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xb, offset 0x63
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x89},
+ {value: 0x0a08, lo: 0x8a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0xc, offset 0x6b
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x99},
+ {value: 0x0808, lo: 0x9a, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa3},
+ {value: 0x0808, lo: 0xa4, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa7},
+ {value: 0x0808, lo: 0xa8, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0818, lo: 0xb0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xd, offset 0x77
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0c08, lo: 0x80, hi: 0x80},
+ {value: 0x0a08, lo: 0x81, hi: 0x85},
+ {value: 0x0c08, lo: 0x86, hi: 0x87},
+ {value: 0x0a08, lo: 0x88, hi: 0x88},
+ {value: 0x0c08, lo: 0x89, hi: 0x89},
+ {value: 0x0a08, lo: 0x8a, hi: 0x93},
+ {value: 0x0c08, lo: 0x94, hi: 0x94},
+ {value: 0x0a08, lo: 0x95, hi: 0x95},
+ {value: 0x0808, lo: 0x96, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9d},
+ {value: 0x0818, lo: 0x9e, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xbf},
+ // Block 0xe, offset 0x85
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0a08, lo: 0xa0, hi: 0xa9},
+ {value: 0x0c08, lo: 0xaa, hi: 0xac},
+ {value: 0x0808, lo: 0xad, hi: 0xad},
+ {value: 0x0c08, lo: 0xae, hi: 0xae},
+ {value: 0x0a08, lo: 0xaf, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb2},
+ {value: 0x0a08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0a08, lo: 0xb6, hi: 0xb8},
+ {value: 0x0c08, lo: 0xb9, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0xf, offset 0x93
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0xa1},
+ {value: 0x0840, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xbf},
+ // Block 0x10, offset 0x98
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x11, offset 0xa1
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x85},
+ {value: 0x3008, lo: 0x86, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8c},
+ {value: 0x3b08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x12, offset 0xb1
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x13, offset 0xbf
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x14, offset 0xcc
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0040, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x15, offset 0xd8
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x89},
+ {value: 0x3b08, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x3008, lo: 0x98, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x16, offset 0xe9
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb2},
+ {value: 0x08f1, lo: 0xb3, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb9},
+ {value: 0x3b08, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0x17, offset 0xf3
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x8e},
+ {value: 0x0018, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0xbf},
+ // Block 0x18, offset 0xfa
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0961, lo: 0x9c, hi: 0x9c},
+ {value: 0x0999, lo: 0x9d, hi: 0x9d},
+ {value: 0x0008, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x19, offset 0x107
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe03d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x1a, offset 0x118
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0x1b, offset 0x11f
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x1c, offset 0x12a
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3008, lo: 0xa2, hi: 0xa4},
+ {value: 0x0008, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xbf},
+ // Block 0x1d, offset 0x139
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x8c},
+ {value: 0x3308, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x3008, lo: 0x9a, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x1e, offset 0x147
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x86},
+ {value: 0x055d, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8c},
+ {value: 0x055d, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0xe105, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0x1f, offset 0x151
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0018, lo: 0x80, hi: 0xbf},
+ // Block 0x20, offset 0x153
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa0},
+ {value: 0x2018, lo: 0xa1, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x21, offset 0x158
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa7},
+ {value: 0x2018, lo: 0xa8, hi: 0xbf},
+ // Block 0x22, offset 0x15b
+ {value: 0x0000, lo: 0x02},
+ {value: 0x2018, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0xbf},
+ // Block 0x23, offset 0x15e
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0008, lo: 0x80, hi: 0xbf},
+ // Block 0x24, offset 0x160
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x25, offset 0x16c
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x26, offset 0x177
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x27, offset 0x17f
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x28, offset 0x185
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x29, offset 0x18b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x2a, offset 0x190
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x2b, offset 0x195
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x2c, offset 0x198
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xbf},
+ // Block 0x2d, offset 0x19c
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x2e, offset 0x1a2
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x2f, offset 0x1a7
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x3b08, lo: 0x94, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x30, offset 0x1b3
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x31, offset 0x1bd
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xb3},
+ {value: 0x3340, lo: 0xb4, hi: 0xb5},
+ {value: 0x3008, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x32, offset 0x1c3
+ {value: 0x0000, lo: 0x10},
+ {value: 0x3008, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x91},
+ {value: 0x3b08, lo: 0x92, hi: 0x92},
+ {value: 0x3308, lo: 0x93, hi: 0x93},
+ {value: 0x0018, lo: 0x94, hi: 0x96},
+ {value: 0x0008, lo: 0x97, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x33, offset 0x1d4
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x86},
+ {value: 0x0218, lo: 0x87, hi: 0x87},
+ {value: 0x0018, lo: 0x88, hi: 0x8a},
+ {value: 0x33c0, lo: 0x8b, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0208, lo: 0xa0, hi: 0xbf},
+ // Block 0x34, offset 0x1de
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0208, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x35, offset 0x1e1
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0208, lo: 0x87, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xa9},
+ {value: 0x0208, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x36, offset 0x1e9
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0x37, offset 0x1ec
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x38, offset 0x1f9
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x39, offset 0x201
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x3a, offset 0x205
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0028, lo: 0x9a, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xbf},
+ // Block 0x3b, offset 0x20c
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x3308, lo: 0x97, hi: 0x98},
+ {value: 0x3008, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x3c, offset 0x214
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x94},
+ {value: 0x3008, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3b08, lo: 0xa0, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xac},
+ {value: 0x3008, lo: 0xad, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x3d, offset 0x224
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xbd},
+ {value: 0x3318, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x3e, offset 0x230
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0040, lo: 0x80, hi: 0xbf},
+ // Block 0x3f, offset 0x232
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3008, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x40, offset 0x23c
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x3808, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x41, offset 0x248
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3808, lo: 0xaa, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xbf},
+ // Block 0x42, offset 0x254
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3008, lo: 0xaa, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3808, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbf},
+ // Block 0x43, offset 0x260
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x3008, lo: 0xa4, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbf},
+ // Block 0x44, offset 0x268
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x45, offset 0x26d
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0e29, lo: 0x80, hi: 0x80},
+ {value: 0x0e41, lo: 0x81, hi: 0x81},
+ {value: 0x0e59, lo: 0x82, hi: 0x82},
+ {value: 0x0e71, lo: 0x83, hi: 0x83},
+ {value: 0x0e89, lo: 0x84, hi: 0x85},
+ {value: 0x0ea1, lo: 0x86, hi: 0x86},
+ {value: 0x0eb9, lo: 0x87, hi: 0x87},
+ {value: 0x057d, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0x46, offset 0x277
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x92},
+ {value: 0x0018, lo: 0x93, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa8},
+ {value: 0x0008, lo: 0xa9, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x47, offset 0x288
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0x48, offset 0x28c
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x87},
+ {value: 0xe045, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0xe045, lo: 0x98, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0xe045, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbf},
+ // Block 0x49, offset 0x297
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x3318, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbf},
+ // Block 0x4a, offset 0x29b
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x24c1, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x4b, offset 0x2a4
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x24f1, lo: 0xac, hi: 0xac},
+ {value: 0x2529, lo: 0xad, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xae},
+ {value: 0x2579, lo: 0xaf, hi: 0xaf},
+ {value: 0x25b1, lo: 0xb0, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0x4c, offset 0x2ac
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x9f},
+ {value: 0x0080, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xad},
+ {value: 0x0080, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x4d, offset 0x2b2
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xa8},
+ {value: 0x09c5, lo: 0xa9, hi: 0xa9},
+ {value: 0x09e5, lo: 0xaa, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xbf},
+ // Block 0x4e, offset 0x2b7
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x4f, offset 0x2ba
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xbf},
+ // Block 0x50, offset 0x2bd
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x28c1, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0xbf},
+ // Block 0x51, offset 0x2c1
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0e66, lo: 0xb4, hi: 0xb4},
+ {value: 0x292a, lo: 0xb5, hi: 0xb5},
+ {value: 0x0e86, lo: 0xb6, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x52, offset 0x2c7
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x9b},
+ {value: 0x2941, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0xbf},
+ // Block 0x53, offset 0x2cb
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x54, offset 0x2cf
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbc},
+ {value: 0x0018, lo: 0xbd, hi: 0xbf},
+ // Block 0x55, offset 0x2d5
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0xab},
+ {value: 0x0018, lo: 0xac, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x56, offset 0x2dc
+ {value: 0x0000, lo: 0x05},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x03f5, lo: 0x90, hi: 0x9f},
+ {value: 0x0ea5, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x57, offset 0x2e2
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x58, offset 0x2ea
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xae},
+ {value: 0xe075, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0x59, offset 0x2f1
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x5a, offset 0x2fc
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xbf},
+ // Block 0x5b, offset 0x306
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x5c, offset 0x30a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0xbf},
+ // Block 0x5d, offset 0x30d
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9e},
+ {value: 0x0edd, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x5e, offset 0x313
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb2},
+ {value: 0x0efd, lo: 0xb3, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x5f, offset 0x317
+ {value: 0x0020, lo: 0x01},
+ {value: 0x0f1d, lo: 0x80, hi: 0xbf},
+ // Block 0x60, offset 0x319
+ {value: 0x0020, lo: 0x02},
+ {value: 0x171d, lo: 0x80, hi: 0x8f},
+ {value: 0x18fd, lo: 0x90, hi: 0xbf},
+ // Block 0x61, offset 0x31c
+ {value: 0x0020, lo: 0x01},
+ {value: 0x1efd, lo: 0x80, hi: 0xbf},
+ // Block 0x62, offset 0x31e
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x63, offset 0x321
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9a},
+ {value: 0x29e2, lo: 0x9b, hi: 0x9b},
+ {value: 0x2a0a, lo: 0x9c, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9e},
+ {value: 0x2a31, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xbf},
+ // Block 0x64, offset 0x32b
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbe},
+ {value: 0x2a69, lo: 0xbf, hi: 0xbf},
+ // Block 0x65, offset 0x32e
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0040, lo: 0x80, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xb0},
+ {value: 0x2a1d, lo: 0xb1, hi: 0xb1},
+ {value: 0x2a3d, lo: 0xb2, hi: 0xb2},
+ {value: 0x2a5d, lo: 0xb3, hi: 0xb3},
+ {value: 0x2a7d, lo: 0xb4, hi: 0xb4},
+ {value: 0x2a5d, lo: 0xb5, hi: 0xb5},
+ {value: 0x2a9d, lo: 0xb6, hi: 0xb6},
+ {value: 0x2abd, lo: 0xb7, hi: 0xb7},
+ {value: 0x2add, lo: 0xb8, hi: 0xb9},
+ {value: 0x2afd, lo: 0xba, hi: 0xbb},
+ {value: 0x2b1d, lo: 0xbc, hi: 0xbd},
+ {value: 0x2afd, lo: 0xbe, hi: 0xbf},
+ // Block 0x66, offset 0x33d
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x67, offset 0x341
+ {value: 0x0030, lo: 0x04},
+ {value: 0x2aa2, lo: 0x80, hi: 0x9d},
+ {value: 0x305a, lo: 0x9e, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x30a2, lo: 0xa0, hi: 0xbf},
+ // Block 0x68, offset 0x346
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0x69, offset 0x349
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x6a, offset 0x34d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x6b, offset 0x352
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xbf},
+ // Block 0x6c, offset 0x357
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb1},
+ {value: 0x0018, lo: 0xb2, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6d, offset 0x35d
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0xb6},
+ {value: 0x0008, lo: 0xb7, hi: 0xb7},
+ {value: 0x2009, lo: 0xb8, hi: 0xb8},
+ {value: 0x6e89, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xbf},
+ // Block 0x6e, offset 0x363
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x3308, lo: 0x8b, hi: 0x8b},
+ {value: 0x0008, lo: 0x8c, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x6f, offset 0x372
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0208, lo: 0x80, hi: 0xb1},
+ {value: 0x0108, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x70, offset 0x378
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xbf},
+ // Block 0x71, offset 0x37c
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3008, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xba},
+ {value: 0x0008, lo: 0xbb, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x72, offset 0x38b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x73, offset 0x390
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x91},
+ {value: 0x3008, lo: 0x92, hi: 0x92},
+ {value: 0x3808, lo: 0x93, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x74, offset 0x398
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb9},
+ {value: 0x3008, lo: 0xba, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x75, offset 0x3a2
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x76, offset 0x3ad
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x77, offset 0x3b5
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8c},
+ {value: 0x3008, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0008, lo: 0xbe, hi: 0xbf},
+ // Block 0x78, offset 0x3c6
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x79, offset 0x3cf
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x9a},
+ {value: 0x0008, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3b08, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x7a, offset 0x3df
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x90},
+ {value: 0x0008, lo: 0x91, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x7b, offset 0x3ec
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x4465, lo: 0x9c, hi: 0x9c},
+ {value: 0x447d, lo: 0x9d, hi: 0x9d},
+ {value: 0x2971, lo: 0x9e, hi: 0x9e},
+ {value: 0xe06d, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xaf},
+ {value: 0x4495, lo: 0xb0, hi: 0xbf},
+ // Block 0x7c, offset 0x3f6
+ {value: 0x0000, lo: 0x04},
+ {value: 0x44b5, lo: 0x80, hi: 0x8f},
+ {value: 0x44d5, lo: 0x90, hi: 0x9f},
+ {value: 0x44f5, lo: 0xa0, hi: 0xaf},
+ {value: 0x44d5, lo: 0xb0, hi: 0xbf},
+ // Block 0x7d, offset 0x3fb
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3b08, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x7e, offset 0x408
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x7f, offset 0x40c
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x80, offset 0x411
+ {value: 0x0020, lo: 0x01},
+ {value: 0x4515, lo: 0x80, hi: 0xbf},
+ // Block 0x81, offset 0x413
+ {value: 0x0020, lo: 0x03},
+ {value: 0x4d15, lo: 0x80, hi: 0x94},
+ {value: 0x4ad5, lo: 0x95, hi: 0x95},
+ {value: 0x4fb5, lo: 0x96, hi: 0xbf},
+ // Block 0x82, offset 0x417
+ {value: 0x0020, lo: 0x01},
+ {value: 0x54f5, lo: 0x80, hi: 0xbf},
+ // Block 0x83, offset 0x419
+ {value: 0x0020, lo: 0x03},
+ {value: 0x5cf5, lo: 0x80, hi: 0x84},
+ {value: 0x5655, lo: 0x85, hi: 0x85},
+ {value: 0x5d95, lo: 0x86, hi: 0xbf},
+ // Block 0x84, offset 0x41d
+ {value: 0x0020, lo: 0x08},
+ {value: 0x6b55, lo: 0x80, hi: 0x8f},
+ {value: 0x6d15, lo: 0x90, hi: 0x90},
+ {value: 0x6d55, lo: 0x91, hi: 0xab},
+ {value: 0x6ea1, lo: 0xac, hi: 0xac},
+ {value: 0x70b5, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x70d5, lo: 0xb0, hi: 0xbf},
+ // Block 0x85, offset 0x426
+ {value: 0x0020, lo: 0x05},
+ {value: 0x72d5, lo: 0x80, hi: 0xad},
+ {value: 0x6535, lo: 0xae, hi: 0xae},
+ {value: 0x7895, lo: 0xaf, hi: 0xb5},
+ {value: 0x6f55, lo: 0xb6, hi: 0xb6},
+ {value: 0x7975, lo: 0xb7, hi: 0xbf},
+ // Block 0x86, offset 0x42c
+ {value: 0x0028, lo: 0x03},
+ {value: 0x7c21, lo: 0x80, hi: 0x82},
+ {value: 0x7be1, lo: 0x83, hi: 0x83},
+ {value: 0x7c99, lo: 0x84, hi: 0xbf},
+ // Block 0x87, offset 0x430
+ {value: 0x0038, lo: 0x0f},
+ {value: 0x9db1, lo: 0x80, hi: 0x83},
+ {value: 0x9e59, lo: 0x84, hi: 0x85},
+ {value: 0x9e91, lo: 0x86, hi: 0x87},
+ {value: 0x9ec9, lo: 0x88, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0xa089, lo: 0x92, hi: 0x97},
+ {value: 0xa1a1, lo: 0x98, hi: 0x9c},
+ {value: 0xa281, lo: 0x9d, hi: 0xb3},
+ {value: 0x9d41, lo: 0xb4, hi: 0xb4},
+ {value: 0x9db1, lo: 0xb5, hi: 0xb5},
+ {value: 0xa789, lo: 0xb6, hi: 0xbb},
+ {value: 0xa869, lo: 0xbc, hi: 0xbc},
+ {value: 0xa7f9, lo: 0xbd, hi: 0xbd},
+ {value: 0xa8d9, lo: 0xbe, hi: 0xbf},
+ // Block 0x88, offset 0x440
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x0008, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x89, offset 0x44a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0x8a, offset 0x44f
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x8b, offset 0x452
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x8c, offset 0x458
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x8d, offset 0x45f
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x8e, offset 0x464
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x8f, offset 0x468
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x90, offset 0x46e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x91, offset 0x473
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x92, offset 0x47c
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x93, offset 0x481
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0x94, offset 0x487
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x97},
+ {value: 0x8ad5, lo: 0x98, hi: 0x9f},
+ {value: 0x8aed, lo: 0xa0, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xbf},
+ // Block 0x95, offset 0x48e
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x8aed, lo: 0xb0, hi: 0xb7},
+ {value: 0x8ad5, lo: 0xb8, hi: 0xbf},
+ // Block 0x96, offset 0x495
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x97, offset 0x49c
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x98, offset 0x4a0
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xae},
+ {value: 0x0018, lo: 0xaf, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x99, offset 0x4a5
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x9a, offset 0x4a8
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xbf},
+ // Block 0x9b, offset 0x4ad
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0808, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0808, lo: 0x8a, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb6},
+ {value: 0x0808, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbb},
+ {value: 0x0808, lo: 0xbc, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x0808, lo: 0xbf, hi: 0xbf},
+ // Block 0x9c, offset 0x4b9
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x96},
+ {value: 0x0818, lo: 0x97, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb6},
+ {value: 0x0818, lo: 0xb7, hi: 0xbf},
+ // Block 0x9d, offset 0x4bf
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa6},
+ {value: 0x0818, lo: 0xa7, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x9e, offset 0x4c4
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xba},
+ {value: 0x0818, lo: 0xbb, hi: 0xbf},
+ // Block 0x9f, offset 0x4cb
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0818, lo: 0x96, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbe},
+ {value: 0x0818, lo: 0xbf, hi: 0xbf},
+ // Block 0xa0, offset 0x4d3
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbb},
+ {value: 0x0818, lo: 0xbc, hi: 0xbd},
+ {value: 0x0808, lo: 0xbe, hi: 0xbf},
+ // Block 0xa1, offset 0x4d8
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0818, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x0818, lo: 0x92, hi: 0xbf},
+ // Block 0xa2, offset 0x4dc
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0808, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x94},
+ {value: 0x0808, lo: 0x95, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0x98},
+ {value: 0x0808, lo: 0x99, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xa3, offset 0x4ec
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0818, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0818, lo: 0x90, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xbc},
+ {value: 0x0818, lo: 0xbd, hi: 0xbf},
+ // Block 0xa4, offset 0x4f3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xa5, offset 0x4f7
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb8},
+ {value: 0x0018, lo: 0xb9, hi: 0xbf},
+ // Block 0xa6, offset 0x4fb
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0818, lo: 0x98, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb7},
+ {value: 0x0818, lo: 0xb8, hi: 0xbf},
+ // Block 0xa7, offset 0x502
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0808, lo: 0x80, hi: 0xbf},
+ // Block 0xa8, offset 0x504
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0808, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0xa9, offset 0x507
+ {value: 0x0000, lo: 0x02},
+ {value: 0x03dd, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xaa, offset 0x50a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xbf},
+ // Block 0xab, offset 0x50e
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0818, lo: 0xa0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xac, offset 0x512
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xad, offset 0x518
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x91},
+ {value: 0x0018, lo: 0x92, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xae, offset 0x521
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbc},
+ {value: 0x0340, lo: 0xbd, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0xaf, offset 0x52d
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb0, offset 0x534
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb2},
+ {value: 0x3b08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xbf},
+ // Block 0xb1, offset 0x53d
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xb2, offset 0x545
+ {value: 0x0000, lo: 0x06},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xbe},
+ {value: 0x3008, lo: 0xbf, hi: 0xbf},
+ // Block 0xb3, offset 0x54c
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x89},
+ {value: 0x3308, lo: 0x8a, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xb4, offset 0x55a
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3808, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xb5, offset 0x567
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0008, lo: 0x9f, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xb6, offset 0x574
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x3308, lo: 0x9f, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa9},
+ {value: 0x3b08, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb7, offset 0x57d
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xb8, offset 0x581
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0xb9, offset 0x58f
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xba, offset 0x597
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x85},
+ {value: 0x0018, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xbb, offset 0x5a2
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbc, offset 0x5ab
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9b},
+ {value: 0x3308, lo: 0x9c, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0xbd, offset 0x5b1
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbe, offset 0x5b9
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xbf, offset 0x5c2
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb5},
+ {value: 0x3808, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0xc0, offset 0x5cc
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0xbf},
+ // Block 0xc1, offset 0x5cf
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbf},
+ // Block 0xc2, offset 0x5db
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xbf},
+ // Block 0xc3, offset 0x5de
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0xc4, offset 0x5e3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xc5, offset 0x5e6
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xc6, offset 0x5f0
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xbf},
+ // Block 0xc7, offset 0x5f9
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xa9},
+ {value: 0x3308, lo: 0xaa, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xc8, offset 0x605
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xc9, offset 0x608
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xca, offset 0x60d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0xbf},
+ // Block 0xcb, offset 0x610
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xbf},
+ // Block 0xcc, offset 0x613
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0xbf},
+ // Block 0xcd, offset 0x616
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xce, offset 0x61d
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xcf, offset 0x624
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0xd0, offset 0x628
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0xd1, offset 0x633
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0xd2, offset 0x636
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3008, lo: 0x91, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xd3, offset 0x63c
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xd4, offset 0x641
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0xd5, offset 0x645
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xd6, offset 0x648
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xd7, offset 0x64b
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0xbf},
+ // Block 0xd8, offset 0x64e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0xd9, offset 0x653
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x03c0, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xbf},
+ // Block 0xda, offset 0x65d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xdb, offset 0x660
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xbf},
+ // Block 0xdc, offset 0x664
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0018, lo: 0x80, hi: 0x9d},
+ {value: 0xb5b9, lo: 0x9e, hi: 0x9e},
+ {value: 0xb601, lo: 0x9f, hi: 0x9f},
+ {value: 0xb649, lo: 0xa0, hi: 0xa0},
+ {value: 0xb6b1, lo: 0xa1, hi: 0xa1},
+ {value: 0xb719, lo: 0xa2, hi: 0xa2},
+ {value: 0xb781, lo: 0xa3, hi: 0xa3},
+ {value: 0xb7e9, lo: 0xa4, hi: 0xa4},
+ {value: 0x3018, lo: 0xa5, hi: 0xa6},
+ {value: 0x3318, lo: 0xa7, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xac},
+ {value: 0x3018, lo: 0xad, hi: 0xb2},
+ {value: 0x0340, lo: 0xb3, hi: 0xba},
+ {value: 0x3318, lo: 0xbb, hi: 0xbf},
+ // Block 0xdd, offset 0x673
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3318, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0x84},
+ {value: 0x3318, lo: 0x85, hi: 0x8b},
+ {value: 0x0018, lo: 0x8c, hi: 0xa9},
+ {value: 0x3318, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xba},
+ {value: 0xb851, lo: 0xbb, hi: 0xbb},
+ {value: 0xb899, lo: 0xbc, hi: 0xbc},
+ {value: 0xb8e1, lo: 0xbd, hi: 0xbd},
+ {value: 0xb949, lo: 0xbe, hi: 0xbe},
+ {value: 0xb9b1, lo: 0xbf, hi: 0xbf},
+ // Block 0xde, offset 0x67f
+ {value: 0x0000, lo: 0x03},
+ {value: 0xba19, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xbf},
+ // Block 0xdf, offset 0x683
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x3318, lo: 0x82, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0xbf},
+ // Block 0xe0, offset 0x688
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xe1, offset 0x68d
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0xe2, offset 0x691
+ {value: 0x0000, lo: 0x04},
+ {value: 0x3308, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0xe3, offset 0x696
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x3308, lo: 0xa1, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xe4, offset 0x69f
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa4},
+ {value: 0x0040, lo: 0xa5, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xbf},
+ // Block 0xe5, offset 0x6aa
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x86},
+ {value: 0x0818, lo: 0x87, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0xe6, offset 0x6b0
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0a08, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0818, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xe7, offset 0x6b8
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xe8, offset 0x6bc
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0xe9, offset 0x6c0
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0xea, offset 0x6c6
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xeb, offset 0x6cc
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8f},
+ {value: 0xc1c1, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xec, offset 0x6d1
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xbf},
+ // Block 0xed, offset 0x6d4
+ {value: 0x0000, lo: 0x0d},
+ {value: 0xc7e9, lo: 0x80, hi: 0x80},
+ {value: 0xc839, lo: 0x81, hi: 0x81},
+ {value: 0xc889, lo: 0x82, hi: 0x82},
+ {value: 0xc8d9, lo: 0x83, hi: 0x83},
+ {value: 0xc929, lo: 0x84, hi: 0x84},
+ {value: 0xc979, lo: 0x85, hi: 0x85},
+ {value: 0xc9c9, lo: 0x86, hi: 0x86},
+ {value: 0xca19, lo: 0x87, hi: 0x87},
+ {value: 0xca69, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0xcab9, lo: 0x90, hi: 0x90},
+ {value: 0xcad9, lo: 0x91, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0xbf},
+ // Block 0xee, offset 0x6e2
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x92},
+ {value: 0x0040, lo: 0x93, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xef, offset 0x6e9
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0xf0, offset 0x6ec
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0xbf},
+ // Block 0xf1, offset 0x6ef
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0xf2, offset 0x6f3
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0xf3, offset 0x6f9
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xbf},
+ // Block 0xf4, offset 0x6fe
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb2},
+ {value: 0x0018, lo: 0xb3, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xf5, offset 0x708
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xbf},
+ // Block 0xf6, offset 0x70d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0xbf},
+ // Block 0xf7, offset 0x710
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0xbf},
+ // Block 0xf8, offset 0x713
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0xf9, offset 0x716
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xfa, offset 0x719
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0xfb, offset 0x71d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xbf},
+ // Block 0xfc, offset 0x720
+ {value: 0x0020, lo: 0x0f},
+ {value: 0xdeb9, lo: 0x80, hi: 0x89},
+ {value: 0x8dfd, lo: 0x8a, hi: 0x8a},
+ {value: 0xdff9, lo: 0x8b, hi: 0x9c},
+ {value: 0x8e1d, lo: 0x9d, hi: 0x9d},
+ {value: 0xe239, lo: 0x9e, hi: 0xa2},
+ {value: 0x8e3d, lo: 0xa3, hi: 0xa3},
+ {value: 0xe2d9, lo: 0xa4, hi: 0xab},
+ {value: 0x7ed5, lo: 0xac, hi: 0xac},
+ {value: 0xe3d9, lo: 0xad, hi: 0xaf},
+ {value: 0x8e5d, lo: 0xb0, hi: 0xb0},
+ {value: 0xe439, lo: 0xb1, hi: 0xb6},
+ {value: 0x8e7d, lo: 0xb7, hi: 0xb9},
+ {value: 0xe4f9, lo: 0xba, hi: 0xba},
+ {value: 0x8edd, lo: 0xbb, hi: 0xbb},
+ {value: 0xe519, lo: 0xbc, hi: 0xbf},
+ // Block 0xfd, offset 0x730
+ {value: 0x0020, lo: 0x10},
+ {value: 0x937d, lo: 0x80, hi: 0x80},
+ {value: 0xf099, lo: 0x81, hi: 0x86},
+ {value: 0x939d, lo: 0x87, hi: 0x8a},
+ {value: 0xd9f9, lo: 0x8b, hi: 0x8b},
+ {value: 0xf159, lo: 0x8c, hi: 0x96},
+ {value: 0x941d, lo: 0x97, hi: 0x97},
+ {value: 0xf2b9, lo: 0x98, hi: 0xa3},
+ {value: 0x943d, lo: 0xa4, hi: 0xa6},
+ {value: 0xf439, lo: 0xa7, hi: 0xaa},
+ {value: 0x949d, lo: 0xab, hi: 0xab},
+ {value: 0xf4b9, lo: 0xac, hi: 0xac},
+ {value: 0x94bd, lo: 0xad, hi: 0xad},
+ {value: 0xf4d9, lo: 0xae, hi: 0xaf},
+ {value: 0x94dd, lo: 0xb0, hi: 0xb1},
+ {value: 0xf519, lo: 0xb2, hi: 0xbe},
+ {value: 0x2040, lo: 0xbf, hi: 0xbf},
+ // Block 0xfe, offset 0x741
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0340, lo: 0x81, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x9f},
+ {value: 0x0340, lo: 0xa0, hi: 0xbf},
+ // Block 0xff, offset 0x746
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0340, lo: 0x80, hi: 0xbf},
+ // Block 0x100, offset 0x748
+ {value: 0x0000, lo: 0x01},
+ {value: 0x33c0, lo: 0x80, hi: 0xbf},
+ // Block 0x101, offset 0x74a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x33c0, lo: 0x80, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+}
+
+// Total table size 41662 bytes (40KiB); checksum: 355A58A4
diff --git a/vendor/golang.org/x/net/proxy/dial.go b/vendor/golang.org/x/net/proxy/dial.go
new file mode 100644
index 000000000..811c2e4e9
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/dial.go
@@ -0,0 +1,54 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+ "context"
+ "net"
+)
+
+// A ContextDialer dials using a context.
+type ContextDialer interface {
+ DialContext(ctx context.Context, network, address string) (net.Conn, error)
+}
+
+// Dial works like DialContext on net.Dialer but using a dialer returned by FromEnvironment.
+//
+// The passed ctx is only used for returning the Conn, not the lifetime of the Conn.
+//
+// Custom dialers (registered via RegisterDialerType) that do not implement ContextDialer
+// can leak a goroutine for as long as it takes the underlying Dialer implementation to timeout.
+//
+// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed.
+func Dial(ctx context.Context, network, address string) (net.Conn, error) {
+ d := FromEnvironment()
+ if xd, ok := d.(ContextDialer); ok {
+ return xd.DialContext(ctx, network, address)
+ }
+ return dialContext(ctx, d, network, address)
+}
+
+// WARNING: this can leak a goroutine for as long as the underlying Dialer implementation takes to timeout
+// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed.
+func dialContext(ctx context.Context, d Dialer, network, address string) (net.Conn, error) {
+ var (
+ conn net.Conn
+ done = make(chan struct{}, 1)
+ err error
+ )
+ go func() {
+ conn, err = d.Dial(network, address)
+ close(done)
+ if conn != nil && ctx.Err() != nil {
+ conn.Close()
+ }
+ }()
+ select {
+ case <-ctx.Done():
+ err = ctx.Err()
+ case <-done:
+ }
+ return conn, err
+}
diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go
index 4c5ad88b1..3d66bdef9 100644
--- a/vendor/golang.org/x/net/proxy/direct.go
+++ b/vendor/golang.org/x/net/proxy/direct.go
@@ -5,14 +5,27 @@
package proxy
import (
+ "context"
"net"
)
type direct struct{}
-// Direct is a direct proxy: one that makes network connections directly.
+// Direct implements Dialer by making network connections directly using net.Dial or net.DialContext.
var Direct = direct{}
+var (
+ _ Dialer = Direct
+ _ ContextDialer = Direct
+)
+
+// Dial directly invokes net.Dial with the supplied parameters.
func (direct) Dial(network, addr string) (net.Conn, error) {
return net.Dial(network, addr)
}
+
+// DialContext instantiates a net.Dialer and invokes its DialContext receiver with the supplied parameters.
+func (direct) DialContext(ctx context.Context, network, addr string) (net.Conn, error) {
+ var d net.Dialer
+ return d.DialContext(ctx, network, addr)
+}
diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go
index 0689bb6a7..573fe79e8 100644
--- a/vendor/golang.org/x/net/proxy/per_host.go
+++ b/vendor/golang.org/x/net/proxy/per_host.go
@@ -5,6 +5,7 @@
package proxy
import (
+ "context"
"net"
"strings"
)
@@ -41,6 +42,20 @@ func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) {
return p.dialerForRequest(host).Dial(network, addr)
}
+// DialContext connects to the address addr on the given network through either
+// defaultDialer or bypass.
+func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+ d := p.dialerForRequest(host)
+ if x, ok := d.(ContextDialer); ok {
+ return x.DialContext(ctx, network, addr)
+ }
+ return dialContext(ctx, d, network, addr)
+}
+
func (p *PerHost) dialerForRequest(host string) Dialer {
if ip := net.ParseIP(host); ip != nil {
for _, net := range p.bypassNetworks {
diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go
index 553ead7cf..9ff4b9a77 100644
--- a/vendor/golang.org/x/net/proxy/proxy.go
+++ b/vendor/golang.org/x/net/proxy/proxy.go
@@ -15,6 +15,7 @@ import (
)
// A Dialer is a means to establish a connection.
+// Custom dialers should also implement ContextDialer.
type Dialer interface {
// Dial connects to the given address via the proxy.
Dial(network, addr string) (c net.Conn, err error)
@@ -25,21 +26,30 @@ type Auth struct {
User, Password string
}
-// FromEnvironment returns the dialer specified by the proxy related variables in
-// the environment.
+// FromEnvironment returns the dialer specified by the proxy-related
+// variables in the environment and makes underlying connections
+// directly.
func FromEnvironment() Dialer {
+ return FromEnvironmentUsing(Direct)
+}
+
+// FromEnvironmentUsing returns the dialer specify by the proxy-related
+// variables in the environment and makes underlying connections
+// using the provided forwarding Dialer (for instance, a *net.Dialer
+// with desired configuration).
+func FromEnvironmentUsing(forward Dialer) Dialer {
allProxy := allProxyEnv.Get()
if len(allProxy) == 0 {
- return Direct
+ return forward
}
proxyURL, err := url.Parse(allProxy)
if err != nil {
- return Direct
+ return forward
}
- proxy, err := FromURL(proxyURL, Direct)
+ proxy, err := FromURL(proxyURL, forward)
if err != nil {
- return Direct
+ return forward
}
noProxy := noProxyEnv.Get()
@@ -47,7 +57,7 @@ func FromEnvironment() Dialer {
return proxy
}
- perHost := NewPerHost(proxy, Direct)
+ perHost := NewPerHost(proxy, forward)
perHost.AddFromString(noProxy)
return perHost
}
@@ -79,8 +89,13 @@ func FromURL(u *url.URL, forward Dialer) (Dialer, error) {
}
switch u.Scheme {
- case "socks5":
- return SOCKS5("tcp", u.Host, auth, forward)
+ case "socks5", "socks5h":
+ addr := u.Hostname()
+ port := u.Port()
+ if port == "" {
+ port = "1080"
+ }
+ return SOCKS5("tcp", net.JoinHostPort(addr, port), auth, forward)
}
// If the scheme doesn't match any of the built-in schemes, see if it
diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go
index 56345ec8b..c91651f96 100644
--- a/vendor/golang.org/x/net/proxy/socks5.go
+++ b/vendor/golang.org/x/net/proxy/socks5.go
@@ -17,8 +17,14 @@ import (
func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) {
d := socks.NewDialer(network, address)
if forward != nil {
- d.ProxyDial = func(_ context.Context, network string, address string) (net.Conn, error) {
- return forward.Dial(network, address)
+ if f, ok := forward.(ContextDialer); ok {
+ d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) {
+ return f.DialContext(ctx, network, address)
+ }
+ } else {
+ d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) {
+ return dialContext(ctx, forward, network, address)
+ }
}
}
if auth != nil {
diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml
new file mode 100644
index 000000000..fa139db22
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+
+go:
+ - tip
+
+install:
+ - export GOPATH="$HOME/gopath"
+ - mkdir -p "$GOPATH/src/golang.org/x"
+ - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
+ - go get -v -t -d golang.org/x/oauth2/...
+
+script:
+ - go test -v golang.org/x/oauth2/...
diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS
new file mode 100644
index 000000000..15167cd74
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md
new file mode 100644
index 000000000..dfbed62cf
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md
@@ -0,0 +1,26 @@
+# Contributing to Go
+
+Go is an open source project.
+
+It is the work of hundreds of contributors. We appreciate your help!
+
+## Filing issues
+
+When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
+
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
+The gophers there will answer or ask you to file an issue if you've tripped over a bug.
+
+## Contributing code
+
+Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
+before sending patches.
+
+Unless otherwise noted, the Go source files are distributed under
+the BSD-style license found in the LICENSE file.
diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS
new file mode 100644
index 000000000..1c4577e96
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/gorilla/context/LICENSE b/vendor/golang.org/x/oauth2/LICENSE
index 0e5fb8728..6a66aea5e 100644
--- a/vendor/github.com/gorilla/context/LICENSE
+++ b/vendor/golang.org/x/oauth2/LICENSE
@@ -1,16 +1,16 @@
-Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
+Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- * Redistributions of source code must retain the above copyright
+ * Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
+ * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md
new file mode 100644
index 000000000..0f443e693
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/README.md
@@ -0,0 +1,35 @@
+# OAuth2 for Go
+
+[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2)
+[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2)
+
+oauth2 package contains a client implementation for OAuth 2.0 spec.
+
+## Installation
+
+~~~~
+go get golang.org/x/oauth2
+~~~~
+
+Or you can manually git clone the repository to
+`$(go env GOPATH)/src/golang.org/x/oauth2`.
+
+See godoc for further documentation and examples.
+
+* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
+* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
+
+## Policy for new packages
+
+We no longer accept new provider-specific packages in this repo. For
+defining provider endpoints and provider-specific OAuth2 behavior, we
+encourage you to create packages elsewhere. We'll keep the existing
+packages for compatibility.
+
+## Report Issues / Send Patches
+
+This repository uses Gerrit for code changes. To learn how to submit changes to
+this repository, see https://golang.org/doc/contribute.html.
+
+The main issue tracker for the oauth2 repository is located at
+https://github.com/golang/oauth2/issues.
diff --git a/vendor/golang.org/x/oauth2/go.mod b/vendor/golang.org/x/oauth2/go.mod
new file mode 100644
index 000000000..b34578155
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/go.mod
@@ -0,0 +1,10 @@
+module golang.org/x/oauth2
+
+go 1.11
+
+require (
+ cloud.google.com/go v0.34.0
+ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e
+ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect
+ google.golang.org/appengine v1.4.0
+)
diff --git a/vendor/golang.org/x/oauth2/go.sum b/vendor/golang.org/x/oauth2/go.sum
new file mode 100644
index 000000000..6f0079b0d
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/go.sum
@@ -0,0 +1,12 @@
+cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go
index f55e0c82c..743487188 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go
+++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go
@@ -2,10 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build mips64 mips64le
+// +build appengine
-package cpu
+package internal
-const cacheLineSize = 32
+import "google.golang.org/appengine/urlfetch"
-func doinit() {}
+func init() {
+ appengineClientHook = urlfetch.Client
+}
diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go
new file mode 100644
index 000000000..03265e888
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/doc.go
@@ -0,0 +1,6 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go
new file mode 100644
index 000000000..c0ab196cf
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/oauth2.go
@@ -0,0 +1,37 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+)
+
+// ParseKey converts the binary contents of a private key file
+// to an *rsa.PrivateKey. It detects whether the private key is in a
+// PEM container or not. If so, it extracts the the private key
+// from PEM container before conversion. It only supports PEM
+// containers with no passphrase.
+func ParseKey(key []byte) (*rsa.PrivateKey, error) {
+ block, _ := pem.Decode(key)
+ if block != nil {
+ key = block.Bytes
+ }
+ parsedKey, err := x509.ParsePKCS8PrivateKey(key)
+ if err != nil {
+ parsedKey, err = x509.ParsePKCS1PrivateKey(key)
+ if err != nil {
+ return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8; parse error: %v", err)
+ }
+ }
+ parsed, ok := parsedKey.(*rsa.PrivateKey)
+ if !ok {
+ return nil, errors.New("private key is invalid")
+ }
+ return parsed, nil
+}
diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go
new file mode 100644
index 000000000..355c38696
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/token.go
@@ -0,0 +1,294 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "mime"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context/ctxhttp"
+)
+
+// Token represents the credentials used to authorize
+// the requests to access protected resources on the OAuth 2.0
+// provider's backend.
+//
+// This type is a mirror of oauth2.Token and exists to break
+// an otherwise-circular dependency. Other internal packages
+// should convert this Token into an oauth2.Token before use.
+type Token struct {
+ // AccessToken is the token that authorizes and authenticates
+ // the requests.
+ AccessToken string
+
+ // TokenType is the type of token.
+ // The Type method returns either this or "Bearer", the default.
+ TokenType string
+
+ // RefreshToken is a token that's used by the application
+ // (as opposed to the user) to refresh the access token
+ // if it expires.
+ RefreshToken string
+
+ // Expiry is the optional expiration time of the access token.
+ //
+ // If zero, TokenSource implementations will reuse the same
+ // token forever and RefreshToken or equivalent
+ // mechanisms for that TokenSource will not be used.
+ Expiry time.Time
+
+ // Raw optionally contains extra metadata from the server
+ // when updating a token.
+ Raw interface{}
+}
+
+// tokenJSON is the struct representing the HTTP response from OAuth2
+// providers returning a token in JSON form.
+type tokenJSON struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ RefreshToken string `json:"refresh_token"`
+ ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
+}
+
+func (e *tokenJSON) expiry() (t time.Time) {
+ if v := e.ExpiresIn; v != 0 {
+ return time.Now().Add(time.Duration(v) * time.Second)
+ }
+ return
+}
+
+type expirationTime int32
+
+func (e *expirationTime) UnmarshalJSON(b []byte) error {
+ if len(b) == 0 || string(b) == "null" {
+ return nil
+ }
+ var n json.Number
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+ i, err := n.Int64()
+ if err != nil {
+ return err
+ }
+ if i > math.MaxInt32 {
+ i = math.MaxInt32
+ }
+ *e = expirationTime(i)
+ return nil
+}
+
+// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
+//
+// Deprecated: this function no longer does anything. Caller code that
+// wants to avoid potential extra HTTP requests made during
+// auto-probing of the provider's auth style should set
+// Endpoint.AuthStyle.
+func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
+
+// AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type.
+type AuthStyle int
+
+const (
+ AuthStyleUnknown AuthStyle = 0
+ AuthStyleInParams AuthStyle = 1
+ AuthStyleInHeader AuthStyle = 2
+)
+
+// authStyleCache is the set of tokenURLs we've successfully used via
+// RetrieveToken and which style auth we ended up using.
+// It's called a cache, but it doesn't (yet?) shrink. It's expected that
+// the set of OAuth2 servers a program contacts over time is fixed and
+// small.
+var authStyleCache struct {
+ sync.Mutex
+ m map[string]AuthStyle // keyed by tokenURL
+}
+
+// ResetAuthCache resets the global authentication style cache used
+// for AuthStyleUnknown token requests.
+func ResetAuthCache() {
+ authStyleCache.Lock()
+ defer authStyleCache.Unlock()
+ authStyleCache.m = nil
+}
+
+// lookupAuthStyle reports which auth style we last used with tokenURL
+// when calling RetrieveToken and whether we have ever done so.
+func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) {
+ authStyleCache.Lock()
+ defer authStyleCache.Unlock()
+ style, ok = authStyleCache.m[tokenURL]
+ return
+}
+
+// setAuthStyle adds an entry to authStyleCache, documented above.
+func setAuthStyle(tokenURL string, v AuthStyle) {
+ authStyleCache.Lock()
+ defer authStyleCache.Unlock()
+ if authStyleCache.m == nil {
+ authStyleCache.m = make(map[string]AuthStyle)
+ }
+ authStyleCache.m[tokenURL] = v
+}
+
+// newTokenRequest returns a new *http.Request to retrieve a new token
+// from tokenURL using the provided clientID, clientSecret, and POST
+// body parameters.
+//
+// inParams is whether the clientID & clientSecret should be encoded
+// as the POST body. An 'inParams' value of true means to send it in
+// the POST body (along with any values in v); false means to send it
+// in the Authorization header.
+func newTokenRequest(tokenURL, clientID, clientSecret string, v url.Values, authStyle AuthStyle) (*http.Request, error) {
+ if authStyle == AuthStyleInParams {
+ v = cloneURLValues(v)
+ if clientID != "" {
+ v.Set("client_id", clientID)
+ }
+ if clientSecret != "" {
+ v.Set("client_secret", clientSecret)
+ }
+ }
+ req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode()))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ if authStyle == AuthStyleInHeader {
+ req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret))
+ }
+ return req, nil
+}
+
+func cloneURLValues(v url.Values) url.Values {
+ v2 := make(url.Values, len(v))
+ for k, vv := range v {
+ v2[k] = append([]string(nil), vv...)
+ }
+ return v2
+}
+
+func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) {
+ needsAuthStyleProbe := authStyle == 0
+ if needsAuthStyleProbe {
+ if style, ok := lookupAuthStyle(tokenURL); ok {
+ authStyle = style
+ needsAuthStyleProbe = false
+ } else {
+ authStyle = AuthStyleInHeader // the first way we'll try
+ }
+ }
+ req, err := newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle)
+ if err != nil {
+ return nil, err
+ }
+ token, err := doTokenRoundTrip(ctx, req)
+ if err != nil && needsAuthStyleProbe {
+ // If we get an error, assume the server wants the
+ // clientID & clientSecret in a different form.
+ // See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
+ // In summary:
+ // - Reddit only accepts client secret in the Authorization header
+ // - Dropbox accepts either it in URL param or Auth header, but not both.
+ // - Google only accepts URL param (not spec compliant?), not Auth header
+ // - Stripe only accepts client secret in Auth header with Bearer method, not Basic
+ //
+ // We used to maintain a big table in this code of all the sites and which way
+ // they went, but maintaining it didn't scale & got annoying.
+ // So just try both ways.
+ authStyle = AuthStyleInParams // the second way we'll try
+ req, _ = newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle)
+ token, err = doTokenRoundTrip(ctx, req)
+ }
+ if needsAuthStyleProbe && err == nil {
+ setAuthStyle(tokenURL, authStyle)
+ }
+ // Don't overwrite `RefreshToken` with an empty value
+ // if this was a token refreshing request.
+ if token != nil && token.RefreshToken == "" {
+ token.RefreshToken = v.Get("refresh_token")
+ }
+ return token, err
+}
+
+func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
+ r, err := ctxhttp.Do(ctx, ContextClient(ctx), req)
+ if err != nil {
+ return nil, err
+ }
+ body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
+ r.Body.Close()
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ if code := r.StatusCode; code < 200 || code > 299 {
+ return nil, &RetrieveError{
+ Response: r,
+ Body: body,
+ }
+ }
+
+ var token *Token
+ content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
+ switch content {
+ case "application/x-www-form-urlencoded", "text/plain":
+ vals, err := url.ParseQuery(string(body))
+ if err != nil {
+ return nil, err
+ }
+ token = &Token{
+ AccessToken: vals.Get("access_token"),
+ TokenType: vals.Get("token_type"),
+ RefreshToken: vals.Get("refresh_token"),
+ Raw: vals,
+ }
+ e := vals.Get("expires_in")
+ expires, _ := strconv.Atoi(e)
+ if expires != 0 {
+ token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
+ }
+ default:
+ var tj tokenJSON
+ if err = json.Unmarshal(body, &tj); err != nil {
+ return nil, err
+ }
+ token = &Token{
+ AccessToken: tj.AccessToken,
+ TokenType: tj.TokenType,
+ RefreshToken: tj.RefreshToken,
+ Expiry: tj.expiry(),
+ Raw: make(map[string]interface{}),
+ }
+ json.Unmarshal(body, &token.Raw) // no error checks for optional fields
+ }
+ if token.AccessToken == "" {
+ return nil, errors.New("oauth2: server response missing access_token")
+ }
+ return token, nil
+}
+
+type RetrieveError struct {
+ Response *http.Response
+ Body []byte
+}
+
+func (r *RetrieveError) Error() string {
+ return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
+}
diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go
new file mode 100644
index 000000000..572074a63
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/transport.go
@@ -0,0 +1,33 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "context"
+ "net/http"
+)
+
+// HTTPClient is the context key to use with golang.org/x/net/context's
+// WithValue function to associate an *http.Client value with a context.
+var HTTPClient ContextKey
+
+// ContextKey is just an empty struct. It exists so HTTPClient can be
+// an immutable public variable with a unique type. It's immutable
+// because nobody else can create a ContextKey, being unexported.
+type ContextKey struct{}
+
+var appengineClientHook func(context.Context) *http.Client
+
+func ContextClient(ctx context.Context) *http.Client {
+ if ctx != nil {
+ if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
+ return hc
+ }
+ }
+ if appengineClientHook != nil {
+ return appengineClientHook(ctx)
+ }
+ return http.DefaultClient
+}
diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go
new file mode 100644
index 000000000..291df5c83
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/oauth2.go
@@ -0,0 +1,381 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package oauth2 provides support for making
+// OAuth2 authorized and authenticated HTTP requests,
+// as specified in RFC 6749.
+// It can additionally grant authorization with Bearer JWT.
+package oauth2 // import "golang.org/x/oauth2"
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+
+ "golang.org/x/oauth2/internal"
+)
+
+// NoContext is the default context you should supply if not using
+// your own context.Context (see https://golang.org/x/net/context).
+//
+// Deprecated: Use context.Background() or context.TODO() instead.
+var NoContext = context.TODO()
+
+// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
+//
+// Deprecated: this function no longer does anything. Caller code that
+// wants to avoid potential extra HTTP requests made during
+// auto-probing of the provider's auth style should set
+// Endpoint.AuthStyle.
+func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
+
+// Config describes a typical 3-legged OAuth2 flow, with both the
+// client application information and the server's endpoint URLs.
+// For the client credentials 2-legged OAuth2 flow, see the clientcredentials
+// package (https://golang.org/x/oauth2/clientcredentials).
+type Config struct {
+ // ClientID is the application's ID.
+ ClientID string
+
+ // ClientSecret is the application's secret.
+ ClientSecret string
+
+ // Endpoint contains the resource server's token endpoint
+ // URLs. These are constants specific to each server and are
+ // often available via site-specific packages, such as
+ // google.Endpoint or github.Endpoint.
+ Endpoint Endpoint
+
+ // RedirectURL is the URL to redirect users going through
+ // the OAuth flow, after the resource owner's URLs.
+ RedirectURL string
+
+ // Scope specifies optional requested permissions.
+ Scopes []string
+}
+
+// A TokenSource is anything that can return a token.
+type TokenSource interface {
+ // Token returns a token or an error.
+ // Token must be safe for concurrent use by multiple goroutines.
+ // The returned Token must not be modified.
+ Token() (*Token, error)
+}
+
+// Endpoint represents an OAuth 2.0 provider's authorization and token
+// endpoint URLs.
+type Endpoint struct {
+ AuthURL string
+ TokenURL string
+
+ // AuthStyle optionally specifies how the endpoint wants the
+ // client ID & client secret sent. The zero value means to
+ // auto-detect.
+ AuthStyle AuthStyle
+}
+
+// AuthStyle represents how requests for tokens are authenticated
+// to the server.
+type AuthStyle int
+
+const (
+ // AuthStyleAutoDetect means to auto-detect which authentication
+ // style the provider wants by trying both ways and caching
+ // the successful way for the future.
+ AuthStyleAutoDetect AuthStyle = 0
+
+ // AuthStyleInParams sends the "client_id" and "client_secret"
+ // in the POST body as application/x-www-form-urlencoded parameters.
+ AuthStyleInParams AuthStyle = 1
+
+ // AuthStyleInHeader sends the client_id and client_password
+ // using HTTP Basic Authorization. This is an optional style
+ // described in the OAuth2 RFC 6749 section 2.3.1.
+ AuthStyleInHeader AuthStyle = 2
+)
+
+var (
+ // AccessTypeOnline and AccessTypeOffline are options passed
+ // to the Options.AuthCodeURL method. They modify the
+ // "access_type" field that gets sent in the URL returned by
+ // AuthCodeURL.
+ //
+ // Online is the default if neither is specified. If your
+ // application needs to refresh access tokens when the user
+ // is not present at the browser, then use offline. This will
+ // result in your application obtaining a refresh token the
+ // first time your application exchanges an authorization
+ // code for a user.
+ AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online")
+ AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline")
+
+ // ApprovalForce forces the users to view the consent dialog
+ // and confirm the permissions request at the URL returned
+ // from AuthCodeURL, even if they've already done so.
+ ApprovalForce AuthCodeOption = SetAuthURLParam("prompt", "consent")
+)
+
+// An AuthCodeOption is passed to Config.AuthCodeURL.
+type AuthCodeOption interface {
+ setValue(url.Values)
+}
+
+type setParam struct{ k, v string }
+
+func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
+
+// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters
+// to a provider's authorization endpoint.
+func SetAuthURLParam(key, value string) AuthCodeOption {
+ return setParam{key, value}
+}
+
+// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page
+// that asks for permissions for the required scopes explicitly.
+//
+// State is a token to protect the user from CSRF attacks. You must
+// always provide a non-empty string and validate that it matches the
+// the state query parameter on your redirect callback.
+// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
+//
+// Opts may include AccessTypeOnline or AccessTypeOffline, as well
+// as ApprovalForce.
+// It can also be used to pass the PKCE challenge.
+// See https://www.oauth.com/oauth2-servers/pkce/ for more info.
+func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
+ var buf bytes.Buffer
+ buf.WriteString(c.Endpoint.AuthURL)
+ v := url.Values{
+ "response_type": {"code"},
+ "client_id": {c.ClientID},
+ }
+ if c.RedirectURL != "" {
+ v.Set("redirect_uri", c.RedirectURL)
+ }
+ if len(c.Scopes) > 0 {
+ v.Set("scope", strings.Join(c.Scopes, " "))
+ }
+ if state != "" {
+ // TODO(light): Docs say never to omit state; don't allow empty.
+ v.Set("state", state)
+ }
+ for _, opt := range opts {
+ opt.setValue(v)
+ }
+ if strings.Contains(c.Endpoint.AuthURL, "?") {
+ buf.WriteByte('&')
+ } else {
+ buf.WriteByte('?')
+ }
+ buf.WriteString(v.Encode())
+ return buf.String()
+}
+
+// PasswordCredentialsToken converts a resource owner username and password
+// pair into a token.
+//
+// Per the RFC, this grant type should only be used "when there is a high
+// degree of trust between the resource owner and the client (e.g., the client
+// is part of the device operating system or a highly privileged application),
+// and when other authorization grant types are not available."
+// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
+//
+// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
+func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
+ v := url.Values{
+ "grant_type": {"password"},
+ "username": {username},
+ "password": {password},
+ }
+ if len(c.Scopes) > 0 {
+ v.Set("scope", strings.Join(c.Scopes, " "))
+ }
+ return retrieveToken(ctx, c, v)
+}
+
+// Exchange converts an authorization code into a token.
+//
+// It is used after a resource provider redirects the user back
+// to the Redirect URI (the URL obtained from AuthCodeURL).
+//
+// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
+//
+// The code will be in the *http.Request.FormValue("code"). Before
+// calling Exchange, be sure to validate FormValue("state").
+//
+// Opts may include the PKCE verifier code if previously used in AuthCodeURL.
+// See https://www.oauth.com/oauth2-servers/pkce/ for more info.
+func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) {
+ v := url.Values{
+ "grant_type": {"authorization_code"},
+ "code": {code},
+ }
+ if c.RedirectURL != "" {
+ v.Set("redirect_uri", c.RedirectURL)
+ }
+ for _, opt := range opts {
+ opt.setValue(v)
+ }
+ return retrieveToken(ctx, c, v)
+}
+
+// Client returns an HTTP client using the provided token.
+// The token will auto-refresh as necessary. The underlying
+// HTTP transport will be obtained using the provided context.
+// The returned client and its Transport should not be modified.
+func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
+ return NewClient(ctx, c.TokenSource(ctx, t))
+}
+
+// TokenSource returns a TokenSource that returns t until t expires,
+// automatically refreshing it as necessary using the provided context.
+//
+// Most users will use Config.Client instead.
+func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
+ tkr := &tokenRefresher{
+ ctx: ctx,
+ conf: c,
+ }
+ if t != nil {
+ tkr.refreshToken = t.RefreshToken
+ }
+ return &reuseTokenSource{
+ t: t,
+ new: tkr,
+ }
+}
+
+// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
+// HTTP requests to renew a token using a RefreshToken.
+type tokenRefresher struct {
+ ctx context.Context // used to get HTTP requests
+ conf *Config
+ refreshToken string
+}
+
+// WARNING: Token is not safe for concurrent access, as it
+// updates the tokenRefresher's refreshToken field.
+// Within this package, it is used by reuseTokenSource which
+// synchronizes calls to this method with its own mutex.
+func (tf *tokenRefresher) Token() (*Token, error) {
+ if tf.refreshToken == "" {
+ return nil, errors.New("oauth2: token expired and refresh token is not set")
+ }
+
+ tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{
+ "grant_type": {"refresh_token"},
+ "refresh_token": {tf.refreshToken},
+ })
+
+ if err != nil {
+ return nil, err
+ }
+ if tf.refreshToken != tk.RefreshToken {
+ tf.refreshToken = tk.RefreshToken
+ }
+ return tk, err
+}
+
+// reuseTokenSource is a TokenSource that holds a single token in memory
+// and validates its expiry before each call to retrieve it with
+// Token. If it's expired, it will be auto-refreshed using the
+// new TokenSource.
+type reuseTokenSource struct {
+ new TokenSource // called when t is expired.
+
+ mu sync.Mutex // guards t
+ t *Token
+}
+
+// Token returns the current token if it's still valid, else will
+// refresh the current token (using r.Context for HTTP client
+// information) and return the new one.
+func (s *reuseTokenSource) Token() (*Token, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.t.Valid() {
+ return s.t, nil
+ }
+ t, err := s.new.Token()
+ if err != nil {
+ return nil, err
+ }
+ s.t = t
+ return t, nil
+}
+
+// StaticTokenSource returns a TokenSource that always returns the same token.
+// Because the provided token t is never refreshed, StaticTokenSource is only
+// useful for tokens that never expire.
+func StaticTokenSource(t *Token) TokenSource {
+ return staticTokenSource{t}
+}
+
+// staticTokenSource is a TokenSource that always returns the same Token.
+type staticTokenSource struct {
+ t *Token
+}
+
+func (s staticTokenSource) Token() (*Token, error) {
+ return s.t, nil
+}
+
+// HTTPClient is the context key to use with golang.org/x/net/context's
+// WithValue function to associate an *http.Client value with a context.
+var HTTPClient internal.ContextKey
+
+// NewClient creates an *http.Client from a Context and TokenSource.
+// The returned client is not valid beyond the lifetime of the context.
+//
+// Note that if a custom *http.Client is provided via the Context it
+// is used only for token acquisition and is not used to configure the
+// *http.Client returned from NewClient.
+//
+// As a special case, if src is nil, a non-OAuth2 client is returned
+// using the provided context. This exists to support related OAuth2
+// packages.
+func NewClient(ctx context.Context, src TokenSource) *http.Client {
+ if src == nil {
+ return internal.ContextClient(ctx)
+ }
+ return &http.Client{
+ Transport: &Transport{
+ Base: internal.ContextClient(ctx).Transport,
+ Source: ReuseTokenSource(nil, src),
+ },
+ }
+}
+
+// ReuseTokenSource returns a TokenSource which repeatedly returns the
+// same token as long as it's valid, starting with t.
+// When its cached token is invalid, a new token is obtained from src.
+//
+// ReuseTokenSource is typically used to reuse tokens from a cache
+// (such as a file on disk) between runs of a program, rather than
+// obtaining new tokens unnecessarily.
+//
+// The initial token t may be nil, in which case the TokenSource is
+// wrapped in a caching version if it isn't one already. This also
+// means it's always safe to wrap ReuseTokenSource around any other
+// TokenSource without adverse effects.
+func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
+ // Don't wrap a reuseTokenSource in itself. That would work,
+ // but cause an unnecessary number of mutex operations.
+ // Just build the equivalent one.
+ if rt, ok := src.(*reuseTokenSource); ok {
+ if t == nil {
+ // Just use it directly.
+ return rt
+ }
+ src = rt.new
+ }
+ return &reuseTokenSource{
+ t: t,
+ new: src,
+ }
+}
diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go
new file mode 100644
index 000000000..822720341
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/token.go
@@ -0,0 +1,178 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "golang.org/x/oauth2/internal"
+)
+
+// expiryDelta determines how earlier a token should be considered
+// expired than its actual expiration time. It is used to avoid late
+// expirations due to client-server time mismatches.
+const expiryDelta = 10 * time.Second
+
+// Token represents the credentials used to authorize
+// the requests to access protected resources on the OAuth 2.0
+// provider's backend.
+//
+// Most users of this package should not access fields of Token
+// directly. They're exported mostly for use by related packages
+// implementing derivative OAuth2 flows.
+type Token struct {
+ // AccessToken is the token that authorizes and authenticates
+ // the requests.
+ AccessToken string `json:"access_token"`
+
+ // TokenType is the type of token.
+ // The Type method returns either this or "Bearer", the default.
+ TokenType string `json:"token_type,omitempty"`
+
+ // RefreshToken is a token that's used by the application
+ // (as opposed to the user) to refresh the access token
+ // if it expires.
+ RefreshToken string `json:"refresh_token,omitempty"`
+
+ // Expiry is the optional expiration time of the access token.
+ //
+ // If zero, TokenSource implementations will reuse the same
+ // token forever and RefreshToken or equivalent
+ // mechanisms for that TokenSource will not be used.
+ Expiry time.Time `json:"expiry,omitempty"`
+
+ // raw optionally contains extra metadata from the server
+ // when updating a token.
+ raw interface{}
+}
+
+// Type returns t.TokenType if non-empty, else "Bearer".
+func (t *Token) Type() string {
+ if strings.EqualFold(t.TokenType, "bearer") {
+ return "Bearer"
+ }
+ if strings.EqualFold(t.TokenType, "mac") {
+ return "MAC"
+ }
+ if strings.EqualFold(t.TokenType, "basic") {
+ return "Basic"
+ }
+ if t.TokenType != "" {
+ return t.TokenType
+ }
+ return "Bearer"
+}
+
+// SetAuthHeader sets the Authorization header to r using the access
+// token in t.
+//
+// This method is unnecessary when using Transport or an HTTP Client
+// returned by this package.
+func (t *Token) SetAuthHeader(r *http.Request) {
+ r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
+}
+
+// WithExtra returns a new Token that's a clone of t, but using the
+// provided raw extra map. This is only intended for use by packages
+// implementing derivative OAuth2 flows.
+func (t *Token) WithExtra(extra interface{}) *Token {
+ t2 := new(Token)
+ *t2 = *t
+ t2.raw = extra
+ return t2
+}
+
+// Extra returns an extra field.
+// Extra fields are key-value pairs returned by the server as a
+// part of the token retrieval response.
+func (t *Token) Extra(key string) interface{} {
+ if raw, ok := t.raw.(map[string]interface{}); ok {
+ return raw[key]
+ }
+
+ vals, ok := t.raw.(url.Values)
+ if !ok {
+ return nil
+ }
+
+ v := vals.Get(key)
+ switch s := strings.TrimSpace(v); strings.Count(s, ".") {
+ case 0: // Contains no "."; try to parse as int
+ if i, err := strconv.ParseInt(s, 10, 64); err == nil {
+ return i
+ }
+ case 1: // Contains a single "."; try to parse as float
+ if f, err := strconv.ParseFloat(s, 64); err == nil {
+ return f
+ }
+ }
+
+ return v
+}
+
+// timeNow is time.Now but pulled out as a variable for tests.
+var timeNow = time.Now
+
+// expired reports whether the token is expired.
+// t must be non-nil.
+func (t *Token) expired() bool {
+ if t.Expiry.IsZero() {
+ return false
+ }
+ return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow())
+}
+
+// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
+func (t *Token) Valid() bool {
+ return t != nil && t.AccessToken != "" && !t.expired()
+}
+
+// tokenFromInternal maps an *internal.Token struct into
+// a *Token struct.
+func tokenFromInternal(t *internal.Token) *Token {
+ if t == nil {
+ return nil
+ }
+ return &Token{
+ AccessToken: t.AccessToken,
+ TokenType: t.TokenType,
+ RefreshToken: t.RefreshToken,
+ Expiry: t.Expiry,
+ raw: t.Raw,
+ }
+}
+
+// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
+// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
+// with an error..
+func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
+ tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle))
+ if err != nil {
+ if rErr, ok := err.(*internal.RetrieveError); ok {
+ return nil, (*RetrieveError)(rErr)
+ }
+ return nil, err
+ }
+ return tokenFromInternal(tk), nil
+}
+
+// RetrieveError is the error returned when the token endpoint returns a
+// non-2XX HTTP status code.
+type RetrieveError struct {
+ Response *http.Response
+ // Body is the body that was consumed by reading Response.Body.
+ // It may be truncated.
+ Body []byte
+}
+
+func (r *RetrieveError) Error() string {
+ return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
+}
diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go
new file mode 100644
index 000000000..aa0d34f1e
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/transport.go
@@ -0,0 +1,144 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "sync"
+)
+
+// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
+// wrapping a base RoundTripper and adding an Authorization header
+// with a token from the supplied Sources.
+//
+// Transport is a low-level mechanism. Most code will use the
+// higher-level Config.Client method instead.
+type Transport struct {
+ // Source supplies the token to add to outgoing requests'
+ // Authorization headers.
+ Source TokenSource
+
+ // Base is the base RoundTripper used to make HTTP requests.
+ // If nil, http.DefaultTransport is used.
+ Base http.RoundTripper
+
+ mu sync.Mutex // guards modReq
+ modReq map[*http.Request]*http.Request // original -> modified
+}
+
+// RoundTrip authorizes and authenticates the request with an
+// access token from Transport's Source.
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ reqBodyClosed := false
+ if req.Body != nil {
+ defer func() {
+ if !reqBodyClosed {
+ req.Body.Close()
+ }
+ }()
+ }
+
+ if t.Source == nil {
+ return nil, errors.New("oauth2: Transport's Source is nil")
+ }
+ token, err := t.Source.Token()
+ if err != nil {
+ return nil, err
+ }
+
+ req2 := cloneRequest(req) // per RoundTripper contract
+ token.SetAuthHeader(req2)
+ t.setModReq(req, req2)
+ res, err := t.base().RoundTrip(req2)
+
+ // req.Body is assumed to have been closed by the base RoundTripper.
+ reqBodyClosed = true
+
+ if err != nil {
+ t.setModReq(req, nil)
+ return nil, err
+ }
+ res.Body = &onEOFReader{
+ rc: res.Body,
+ fn: func() { t.setModReq(req, nil) },
+ }
+ return res, nil
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+func (t *Transport) CancelRequest(req *http.Request) {
+ type canceler interface {
+ CancelRequest(*http.Request)
+ }
+ if cr, ok := t.base().(canceler); ok {
+ t.mu.Lock()
+ modReq := t.modReq[req]
+ delete(t.modReq, req)
+ t.mu.Unlock()
+ cr.CancelRequest(modReq)
+ }
+}
+
+func (t *Transport) base() http.RoundTripper {
+ if t.Base != nil {
+ return t.Base
+ }
+ return http.DefaultTransport
+}
+
+func (t *Transport) setModReq(orig, mod *http.Request) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.modReq == nil {
+ t.modReq = make(map[*http.Request]*http.Request)
+ }
+ if mod == nil {
+ delete(t.modReq, orig)
+ } else {
+ t.modReq[orig] = mod
+ }
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+ // shallow copy of the struct
+ r2 := new(http.Request)
+ *r2 = *r
+ // deep copy of the Header
+ r2.Header = make(http.Header, len(r.Header))
+ for k, s := range r.Header {
+ r2.Header[k] = append([]string(nil), s...)
+ }
+ return r2
+}
+
+type onEOFReader struct {
+ rc io.ReadCloser
+ fn func()
+}
+
+func (r *onEOFReader) Read(p []byte) (n int, err error) {
+ n, err = r.rc.Read(p)
+ if err == io.EOF {
+ r.runFunc()
+ }
+ return
+}
+
+func (r *onEOFReader) Close() error {
+ err := r.rc.Close()
+ r.runFunc()
+ return err
+}
+
+func (r *onEOFReader) runFunc() {
+ if fn := r.fn; fn != nil {
+ fn()
+ r.fn = nil
+ }
+}
diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS
new file mode 100644
index 000000000..15167cd74
--- /dev/null
+++ b/vendor/golang.org/x/sync/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS
new file mode 100644
index 000000000..1c4577e96
--- /dev/null
+++ b/vendor/golang.org/x/sync/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/sync/README.md b/vendor/golang.org/x/sync/README.md
deleted file mode 100644
index 1f8436cc9..000000000
--- a/vendor/golang.org/x/sync/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# Go Sync
-
-This repository provides Go concurrency primitives in addition to the
-ones provided by the language and "sync" and "sync/atomic" packages.
-
-## Download/Install
-
-The easiest way to install is to run `go get -u golang.org/x/sync`. You can
-also manually git clone the repository to `$GOPATH/src/golang.org/x/sync`.
-
-## Report Issues / Send Patches
-
-This repository uses Gerrit for code changes. To learn how to submit changes to
-this repository, see https://golang.org/doc/contribute.html.
-
-The main issue tracker for the sync repository is located at
-https://github.com/golang/go/issues. Prefix your issue with "x/sync:" in the
-subject line, so it is easy to find.
diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go
index ac53e733e..7f096fef0 100644
--- a/vendor/golang.org/x/sync/semaphore/semaphore.go
+++ b/vendor/golang.org/x/sync/semaphore/semaphore.go
@@ -95,7 +95,7 @@ func (s *Weighted) Release(n int64) {
s.cur -= n
if s.cur < 0 {
s.mu.Unlock()
- panic("semaphore: bad release")
+ panic("semaphore: released more than held")
}
for {
next := s.waiters.Front()
diff --git a/vendor/golang.org/x/sys/AUTHORS b/vendor/golang.org/x/sys/AUTHORS
new file mode 100644
index 000000000..15167cd74
--- /dev/null
+++ b/vendor/golang.org/x/sys/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/sys/CONTRIBUTORS b/vendor/golang.org/x/sys/CONTRIBUTORS
new file mode 100644
index 000000000..1c4577e96
--- /dev/null
+++ b/vendor/golang.org/x/sys/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/sys/README.md b/vendor/golang.org/x/sys/README.md
deleted file mode 100644
index ef6c9e59c..000000000
--- a/vendor/golang.org/x/sys/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# sys
-
-This repository holds supplemental Go packages for low-level interactions with
-the operating system.
-
-## Download/Install
-
-The easiest way to install is to run `go get -u golang.org/x/sys`. You can
-also manually git clone the repository to `$GOPATH/src/golang.org/x/sys`.
-
-## Report Issues / Send Patches
-
-This repository uses Gerrit for code changes. To learn how to submit changes to
-this repository, see https://golang.org/doc/contribute.html.
-
-The main issue tracker for the sys repository is located at
-https://github.com/golang/go/issues. Prefix your issue with "x/sys:" in the
-subject line, so it is easy to find.
diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go
deleted file mode 100644
index 84962cf0f..000000000
--- a/vendor/golang.org/x/sys/cpu/cpu.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package cpu implements processor feature detection for
-// various CPU architectures.
-package cpu
-
-// CacheLinePad is used to pad structs to avoid false sharing.
-type CacheLinePad struct{ _ [cacheLineSize]byte }
-
-// X86 contains the supported CPU features of the
-// current X86/AMD64 platform. If the current platform
-// is not X86/AMD64 then all feature flags are false.
-//
-// X86 is padded to avoid false sharing. Further the HasAVX
-// and HasAVX2 are only set if the OS supports XMM and YMM
-// registers in addition to the CPUID feature bit being set.
-var X86 struct {
- _ CacheLinePad
- HasAES bool // AES hardware implementation (AES NI)
- HasADX bool // Multi-precision add-carry instruction extensions
- HasAVX bool // Advanced vector extension
- HasAVX2 bool // Advanced vector extension 2
- HasBMI1 bool // Bit manipulation instruction set 1
- HasBMI2 bool // Bit manipulation instruction set 2
- HasERMS bool // Enhanced REP for MOVSB and STOSB
- HasFMA bool // Fused-multiply-add instructions
- HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers.
- HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM
- HasPOPCNT bool // Hamming weight instruction POPCNT.
- HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64)
- HasSSE3 bool // Streaming SIMD extension 3
- HasSSSE3 bool // Supplemental streaming SIMD extension 3
- HasSSE41 bool // Streaming SIMD extension 4 and 4.1
- HasSSE42 bool // Streaming SIMD extension 4 and 4.2
- _ CacheLinePad
-}
-
-// ARM64 contains the supported CPU features of the
-// current ARMv8(aarch64) platform. If the current platform
-// is not arm64 then all feature flags are false.
-var ARM64 struct {
- _ CacheLinePad
- HasFP bool // Floating-point instruction set (always available)
- HasASIMD bool // Advanced SIMD (always available)
- HasEVTSTRM bool // Event stream support
- HasAES bool // AES hardware implementation
- HasPMULL bool // Polynomial multiplication instruction set
- HasSHA1 bool // SHA1 hardware implementation
- HasSHA2 bool // SHA2 hardware implementation
- HasCRC32 bool // CRC32 hardware implementation
- HasATOMICS bool // Atomic memory operation instruction set
- HasFPHP bool // Half precision floating-point instruction set
- HasASIMDHP bool // Advanced SIMD half precision instruction set
- HasCPUID bool // CPUID identification scheme registers
- HasASIMDRDM bool // Rounding double multiply add/subtract instruction set
- HasJSCVT bool // Javascript conversion from floating-point to integer
- HasFCMA bool // Floating-point multiplication and addition of complex numbers
- HasLRCPC bool // Release Consistent processor consistent support
- HasDCPOP bool // Persistent memory support
- HasSHA3 bool // SHA3 hardware implementation
- HasSM3 bool // SM3 hardware implementation
- HasSM4 bool // SM4 hardware implementation
- HasASIMDDP bool // Advanced SIMD double precision instruction set
- HasSHA512 bool // SHA512 hardware implementation
- HasSVE bool // Scalable Vector Extensions
- HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32
- _ CacheLinePad
-}
-
-// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms.
-// If the current platform is not ppc64/ppc64le then all feature flags are false.
-//
-// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00,
-// since there are no optional categories. There are some exceptions that also
-// require kernel support to work (DARN, SCV), so there are feature bits for
-// those as well. The minimum processor requirement is POWER8 (ISA 2.07).
-// The struct is padded to avoid false sharing.
-var PPC64 struct {
- _ CacheLinePad
- HasDARN bool // Hardware random number generator (requires kernel enablement)
- HasSCV bool // Syscall vectored (requires kernel enablement)
- IsPOWER8 bool // ISA v2.07 (POWER8)
- IsPOWER9 bool // ISA v3.00 (POWER9)
- _ CacheLinePad
-}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go
deleted file mode 100644
index 7f2348b7d..000000000
--- a/vendor/golang.org/x/sys/cpu/cpu_arm.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cpu
-
-const cacheLineSize = 32
-
-func doinit() {}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go
deleted file mode 100644
index 02ed58b30..000000000
--- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cpu
-
-const cacheLineSize = 64
-
-// HWCAP/HWCAP2 bits. These are exposed by Linux.
-const (
- hwcap_FP = 1 << 0
- hwcap_ASIMD = 1 << 1
- hwcap_EVTSTRM = 1 << 2
- hwcap_AES = 1 << 3
- hwcap_PMULL = 1 << 4
- hwcap_SHA1 = 1 << 5
- hwcap_SHA2 = 1 << 6
- hwcap_CRC32 = 1 << 7
- hwcap_ATOMICS = 1 << 8
- hwcap_FPHP = 1 << 9
- hwcap_ASIMDHP = 1 << 10
- hwcap_CPUID = 1 << 11
- hwcap_ASIMDRDM = 1 << 12
- hwcap_JSCVT = 1 << 13
- hwcap_FCMA = 1 << 14
- hwcap_LRCPC = 1 << 15
- hwcap_DCPOP = 1 << 16
- hwcap_SHA3 = 1 << 17
- hwcap_SM3 = 1 << 18
- hwcap_SM4 = 1 << 19
- hwcap_ASIMDDP = 1 << 20
- hwcap_SHA512 = 1 << 21
- hwcap_SVE = 1 << 22
- hwcap_ASIMDFHM = 1 << 23
-)
-
-func doinit() {
- // HWCAP feature bits
- ARM64.HasFP = isSet(HWCap, hwcap_FP)
- ARM64.HasASIMD = isSet(HWCap, hwcap_ASIMD)
- ARM64.HasEVTSTRM = isSet(HWCap, hwcap_EVTSTRM)
- ARM64.HasAES = isSet(HWCap, hwcap_AES)
- ARM64.HasPMULL = isSet(HWCap, hwcap_PMULL)
- ARM64.HasSHA1 = isSet(HWCap, hwcap_SHA1)
- ARM64.HasSHA2 = isSet(HWCap, hwcap_SHA2)
- ARM64.HasCRC32 = isSet(HWCap, hwcap_CRC32)
- ARM64.HasATOMICS = isSet(HWCap, hwcap_ATOMICS)
- ARM64.HasFPHP = isSet(HWCap, hwcap_FPHP)
- ARM64.HasASIMDHP = isSet(HWCap, hwcap_ASIMDHP)
- ARM64.HasCPUID = isSet(HWCap, hwcap_CPUID)
- ARM64.HasASIMDRDM = isSet(HWCap, hwcap_ASIMDRDM)
- ARM64.HasJSCVT = isSet(HWCap, hwcap_JSCVT)
- ARM64.HasFCMA = isSet(HWCap, hwcap_FCMA)
- ARM64.HasLRCPC = isSet(HWCap, hwcap_LRCPC)
- ARM64.HasDCPOP = isSet(HWCap, hwcap_DCPOP)
- ARM64.HasSHA3 = isSet(HWCap, hwcap_SHA3)
- ARM64.HasSM3 = isSet(HWCap, hwcap_SM3)
- ARM64.HasSM4 = isSet(HWCap, hwcap_SM4)
- ARM64.HasASIMDDP = isSet(HWCap, hwcap_ASIMDDP)
- ARM64.HasSHA512 = isSet(HWCap, hwcap_SHA512)
- ARM64.HasSVE = isSet(HWCap, hwcap_SVE)
- ARM64.HasASIMDFHM = isSet(HWCap, hwcap_ASIMDFHM)
-}
-
-func isSet(hwc uint, value uint) bool {
- return hwc&value != 0
-}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go
deleted file mode 100644
index f7cb46971..000000000
--- a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build 386 amd64 amd64p32
-// +build !gccgo
-
-package cpu
-
-// cpuid is implemented in cpu_x86.s for gc compiler
-// and in cpu_gccgo.c for gccgo.
-func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
-
-// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler
-// and in cpu_gccgo.c for gccgo.
-func xgetbv() (eax, edx uint32)
diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo.c
deleted file mode 100644
index e363c7d13..000000000
--- a/vendor/golang.org/x/sys/cpu/cpu_gccgo.c
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build 386 amd64 amd64p32
-// +build gccgo
-
-#include <cpuid.h>
-#include <stdint.h>
-
-// Need to wrap __get_cpuid_count because it's declared as static.
-int
-gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf,
- uint32_t *eax, uint32_t *ebx,
- uint32_t *ecx, uint32_t *edx)
-{
- return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx);
-}
-
-// xgetbv reads the contents of an XCR (Extended Control Register)
-// specified in the ECX register into registers EDX:EAX.
-// Currently, the only supported value for XCR is 0.
-//
-// TODO: Replace with a better alternative:
-//
-// #include <xsaveintrin.h>
-//
-// #pragma GCC target("xsave")
-//
-// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) {
-// unsigned long long x = _xgetbv(0);
-// *eax = x & 0xffffffff;
-// *edx = (x >> 32) & 0xffffffff;
-// }
-//
-// Note that _xgetbv is defined starting with GCC 8.
-void
-gccgoXgetbv(uint32_t *eax, uint32_t *edx)
-{
- __asm(" xorl %%ecx, %%ecx\n"
- " xgetbv"
- : "=a"(*eax), "=d"(*edx));
-}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo.go
deleted file mode 100644
index ba49b91bd..000000000
--- a/vendor/golang.org/x/sys/cpu/cpu_gccgo.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build 386 amd64 amd64p32
-// +build gccgo
-
-package cpu
-
-//extern gccgoGetCpuidCount
-func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32)
-
-func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) {
- var a, b, c, d uint32
- gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d)
- return a, b, c, d
-}
-
-//extern gccgoXgetbv
-func gccgoXgetbv(eax, edx *uint32)
-
-func xgetbv() (eax, edx uint32) {
- var a, d uint32
- gccgoXgetbv(&a, &d)
- return a, d
-}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go
deleted file mode 100644
index a8452e094..000000000
--- a/vendor/golang.org/x/sys/cpu/cpu_linux.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//+build !amd64,!amd64p32,!386
-
-package cpu
-
-import (
- "encoding/binary"
- "io/ioutil"
- "runtime"
-)
-
-const (
- _AT_HWCAP = 16
- _AT_HWCAP2 = 26
-
- procAuxv = "/proc/self/auxv"
-
- uintSize uint = 32 << (^uint(0) >> 63)
-)
-
-// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2
-// These are initialized in cpu_$GOARCH.go
-// and should not be changed after they are initialized.
-var HWCap uint
-var HWCap2 uint
-
-func init() {
- buf, err := ioutil.ReadFile(procAuxv)
- if err != nil {
- panic("read proc auxv failed: " + err.Error())
- }
-
- pb := int(uintSize / 8)
-
- for i := 0; i < len(buf)-pb*2; i += pb * 2 {
- var tag, val uint
- switch uintSize {
- case 32:
- tag = uint(binary.LittleEndian.Uint32(buf[i:]))
- val = uint(binary.LittleEndian.Uint32(buf[i+pb:]))
- case 64:
- if runtime.GOARCH == "ppc64" {
- tag = uint(binary.BigEndian.Uint64(buf[i:]))
- val = uint(binary.BigEndian.Uint64(buf[i+pb:]))
- } else {
- tag = uint(binary.LittleEndian.Uint64(buf[i:]))
- val = uint(binary.LittleEndian.Uint64(buf[i+pb:]))
- }
- }
- switch tag {
- case _AT_HWCAP:
- HWCap = val
- case _AT_HWCAP2:
- HWCap2 = val
- }
- }
- doinit()
-}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go
deleted file mode 100644
index cda87b1a1..000000000
--- a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build mips mipsle
-
-package cpu
-
-const cacheLineSize = 32
-
-func doinit() {}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go
deleted file mode 100644
index ed975de62..000000000
--- a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ppc64 ppc64le
-
-package cpu
-
-const cacheLineSize = 128
-
-// HWCAP/HWCAP2 bits. These are exposed by the kernel.
-const (
- // ISA Level
- _PPC_FEATURE2_ARCH_2_07 = 0x80000000
- _PPC_FEATURE2_ARCH_3_00 = 0x00800000
-
- // CPU features
- _PPC_FEATURE2_DARN = 0x00200000
- _PPC_FEATURE2_SCV = 0x00100000
-)
-
-func doinit() {
- // HWCAP2 feature bits
- PPC64.IsPOWER8 = isSet(HWCap2, _PPC_FEATURE2_ARCH_2_07)
- PPC64.IsPOWER9 = isSet(HWCap2, _PPC_FEATURE2_ARCH_3_00)
- PPC64.HasDARN = isSet(HWCap2, _PPC_FEATURE2_DARN)
- PPC64.HasSCV = isSet(HWCap2, _PPC_FEATURE2_SCV)
-}
-
-func isSet(hwc uint, value uint) bool {
- return hwc&value != 0
-}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_s390x.go
deleted file mode 100644
index ce8a2289e..000000000
--- a/vendor/golang.org/x/sys/cpu/cpu_s390x.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cpu
-
-const cacheLineSize = 256
-
-func doinit() {}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go
deleted file mode 100644
index 71e288b06..000000000
--- a/vendor/golang.org/x/sys/cpu/cpu_x86.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build 386 amd64 amd64p32
-
-package cpu
-
-const cacheLineSize = 64
-
-func init() {
- maxID, _, _, _ := cpuid(0, 0)
-
- if maxID < 1 {
- return
- }
-
- _, _, ecx1, edx1 := cpuid(1, 0)
- X86.HasSSE2 = isSet(26, edx1)
-
- X86.HasSSE3 = isSet(0, ecx1)
- X86.HasPCLMULQDQ = isSet(1, ecx1)
- X86.HasSSSE3 = isSet(9, ecx1)
- X86.HasFMA = isSet(12, ecx1)
- X86.HasSSE41 = isSet(19, ecx1)
- X86.HasSSE42 = isSet(20, ecx1)
- X86.HasPOPCNT = isSet(23, ecx1)
- X86.HasAES = isSet(25, ecx1)
- X86.HasOSXSAVE = isSet(27, ecx1)
-
- osSupportsAVX := false
- // For XGETBV, OSXSAVE bit is required and sufficient.
- if X86.HasOSXSAVE {
- eax, _ := xgetbv()
- // Check if XMM and YMM registers have OS support.
- osSupportsAVX = isSet(1, eax) && isSet(2, eax)
- }
-
- X86.HasAVX = isSet(28, ecx1) && osSupportsAVX
-
- if maxID < 7 {
- return
- }
-
- _, ebx7, _, _ := cpuid(7, 0)
- X86.HasBMI1 = isSet(3, ebx7)
- X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX
- X86.HasBMI2 = isSet(8, ebx7)
- X86.HasERMS = isSet(9, ebx7)
- X86.HasADX = isSet(19, ebx7)
-}
-
-func isSet(bitpos uint, value uint32) bool {
- return value&(1<<bitpos) != 0
-}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.s b/vendor/golang.org/x/sys/cpu/cpu_x86.s
deleted file mode 100644
index 47f084128..000000000
--- a/vendor/golang.org/x/sys/cpu/cpu_x86.s
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build 386 amd64 amd64p32
-// +build !gccgo
-
-#include "textflag.h"
-
-// func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
-TEXT ·cpuid(SB), NOSPLIT, $0-24
- MOVL eaxArg+0(FP), AX
- MOVL ecxArg+4(FP), CX
- CPUID
- MOVL AX, eax+8(FP)
- MOVL BX, ebx+12(FP)
- MOVL CX, ecx+16(FP)
- MOVL DX, edx+20(FP)
- RET
-
-// func xgetbv() (eax, edx uint32)
-TEXT ·xgetbv(SB),NOSPLIT,$0-8
- MOVL $0, CX
- XGETBV
- MOVL AX, eax+0(FP)
- MOVL DX, edx+4(FP)
- RET
diff --git a/vendor/golang.org/x/sys/unix/.gitignore b/vendor/golang.org/x/sys/unix/.gitignore
new file mode 100644
index 000000000..e3e0fc6f8
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/.gitignore
@@ -0,0 +1,2 @@
+_obj/
+unix.test
diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md
index 2bf415fb1..eb2f78ae2 100644
--- a/vendor/golang.org/x/sys/unix/README.md
+++ b/vendor/golang.org/x/sys/unix/README.md
@@ -32,7 +32,7 @@ To build the files for your current OS and architecture, make sure GOOS and
GOARCH are set correctly and run `mkall.sh`. This will generate the files for
your specific system. Running `mkall.sh -n` shows the commands that will be run.
-Requirements: bash, perl, go
+Requirements: bash, go
### New Build System (currently for `GOOS == "linux"`)
@@ -52,14 +52,14 @@ system and have your GOOS and GOARCH set accordingly. Running `mkall.sh` will
then generate all of the files for all of the GOOS/GOARCH pairs in the new build
system. Running `mkall.sh -n` shows the commands that will be run.
-Requirements: bash, perl, go, docker
+Requirements: bash, go, docker
## Component files
This section describes the various files used in the code generation process.
It also contains instructions on how to modify these files to add a new
architecture/OS or to add additional syscalls, types, or constants. Note that
-if you are using the new build system, the scripts cannot be called normally.
+if you are using the new build system, the scripts/programs cannot be called normally.
They must be called from within the docker container.
### asm files
@@ -81,8 +81,8 @@ each GOOS/GOARCH pair.
### mksysnum
-Mksysnum is a script located at `${GOOS}/mksysnum.pl` (or `mksysnum_${GOOS}.pl`
-for the old system). This script takes in a list of header files containing the
+Mksysnum is a Go program located at `${GOOS}/mksysnum.go` (or `mksysnum_${GOOS}.go`
+for the old system). This program takes in a list of header files containing the
syscall number declarations and parses them to produce the corresponding list of
Go numeric constants. See `zsysnum_${GOOS}_${GOARCH}.go` for the generated
constants.
@@ -92,14 +92,14 @@ new installation of the target OS (or updating the source checkouts for the
new build system). However, depending on the OS, you make need to update the
parsing in mksysnum.
-### mksyscall.pl
+### mksyscall.go
The `syscall.go`, `syscall_${GOOS}.go`, `syscall_${GOOS}_${GOARCH}.go` are
hand-written Go files which implement system calls (for unix, the specific OS,
or the specific OS/Architecture pair respectively) that need special handling
and list `//sys` comments giving prototypes for ones that can be generated.
-The mksyscall.pl script takes the `//sys` and `//sysnb` comments and converts
+The mksyscall.go program takes the `//sys` and `//sysnb` comments and converts
them into syscalls. This requires the name of the prototype in the comment to
match a syscall number in the `zsysnum_${GOOS}_${GOARCH}.go` file. The function
prototype can be exported (capitalized) or not.
@@ -160,7 +160,7 @@ signal numbers, and constants. Generated by `mkerrors.sh` (see above).
### `zsyscall_${GOOS}_${GOARCH}.go`
A file containing all the generated syscalls for a specific GOOS and GOARCH.
-Generated by `mksyscall.pl` (see above).
+Generated by `mksyscall.go` (see above).
### `zsysnum_${GOOS}_${GOARCH}.go`
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
new file mode 100644
index 000000000..6db717de5
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
@@ -0,0 +1,54 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build riscv64,!gccgo
+
+#include "textflag.h"
+
+//
+// System calls for linux/riscv64.
+//
+// Where available, just jump to package syscall's implementation of
+// these functions.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ CALL runtime·entersyscall(SB)
+ MOV a1+8(FP), A0
+ MOV a2+16(FP), A1
+ MOV a3+24(FP), A2
+ MOV $0, A3
+ MOV $0, A4
+ MOV $0, A5
+ MOV $0, A6
+ MOV trap+0(FP), A7 // syscall entry
+ ECALL
+ MOV A0, r1+32(FP) // r1
+ MOV A1, r2+40(FP) // r2
+ CALL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOV a1+8(FP), A0
+ MOV a2+16(FP), A1
+ MOV a3+24(FP), A2
+ MOV ZERO, A3
+ MOV ZERO, A4
+ MOV ZERO, A5
+ MOV trap+0(FP), A7 // syscall entry
+ ECALL
+ MOV A0, r1+32(FP)
+ MOV A1, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s
new file mode 100644
index 000000000..6f98ba5a3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s
@@ -0,0 +1,29 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for ARM64, NetBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ B syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ B syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s
new file mode 100644
index 000000000..0cedea3d3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s
@@ -0,0 +1,29 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for arm64, OpenBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
new file mode 100644
index 000000000..5a22eca96
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mkall.sh
@@ -0,0 +1,227 @@
+#!/usr/bin/env bash
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# This script runs or (given -n) prints suggested commands to generate files for
+# the Architecture/OS specified by the GOARCH and GOOS environment variables.
+# See README.md for more information about how the build system works.
+
+GOOSARCH="${GOOS}_${GOARCH}"
+
+# defaults
+mksyscall="go run mksyscall.go"
+mkerrors="./mkerrors.sh"
+zerrors="zerrors_$GOOSARCH.go"
+mksysctl=""
+zsysctl="zsysctl_$GOOSARCH.go"
+mksysnum=
+mktypes=
+mkasm=
+run="sh"
+cmd=""
+
+case "$1" in
+-syscalls)
+ for i in zsyscall*go
+ do
+ # Run the command line that appears in the first line
+ # of the generated file to regenerate it.
+ sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i
+ rm _$i
+ done
+ exit 0
+ ;;
+-n)
+ run="cat"
+ cmd="echo"
+ shift
+esac
+
+case "$#" in
+0)
+ ;;
+*)
+ echo 'usage: mkall.sh [-n]' 1>&2
+ exit 2
+esac
+
+if [[ "$GOOS" = "linux" ]]; then
+ # Use the Docker-based build system
+ # Files generated through docker (use $cmd so you can Ctl-C the build or run)
+ $cmd docker build --tag generate:$GOOS $GOOS
+ $cmd docker run --interactive --tty --volume $(dirname "$(readlink -f "$0")"):/build generate:$GOOS
+ exit
+fi
+
+GOOSARCH_in=syscall_$GOOSARCH.go
+case "$GOOSARCH" in
+_* | *_ | _)
+ echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+aix_ppc)
+ mkerrors="$mkerrors -maix32"
+ mksyscall="go run mksyscall_aix_ppc.go -aix"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+aix_ppc64)
+ mkerrors="$mkerrors -maix64"
+ mksyscall="go run mksyscall_aix_ppc64.go -aix"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+darwin_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="go run mksyscall.go -l32"
+ mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ mkasm="go run mkasm_darwin.go"
+ ;;
+darwin_amd64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ mkasm="go run mkasm_darwin.go"
+ ;;
+darwin_arm)
+ mkerrors="$mkerrors"
+ mksyscall="go run mksyscall.go -l32"
+ mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ mkasm="go run mkasm_darwin.go"
+ ;;
+darwin_arm64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ mkasm="go run mkasm_darwin.go"
+ ;;
+dragonfly_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -dragonfly"
+ mksysnum="go run mksysnum.go 'https://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="go run mksyscall.go -l32"
+ mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_arm)
+ mkerrors="$mkerrors"
+ mksyscall="go run mksyscall.go -l32 -arm"
+ mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+freebsd_arm64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+netbsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="go run mksyscall.go -l32 -netbsd"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+netbsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -netbsd"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+netbsd_arm)
+ mkerrors="$mkerrors"
+ mksyscall="go run mksyscall.go -l32 -netbsd -arm"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+netbsd_arm64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -netbsd"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="go run mksyscall.go -l32 -openbsd"
+ mksysctl="go run mksysctl_openbsd.go"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -openbsd"
+ mksysctl="go run mksysctl_openbsd.go"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_arm)
+ mkerrors="$mkerrors"
+ mksyscall="go run mksyscall.go -l32 -openbsd -arm"
+ mksysctl="go run mksysctl_openbsd.go"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+openbsd_arm64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -openbsd"
+ mksysctl="go run mksysctl_openbsd.go"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+solaris_amd64)
+ mksyscall="go run mksyscall_solaris.go"
+ mkerrors="$mkerrors -m64"
+ mksysnum=
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+*)
+ echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+esac
+
+(
+ if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi
+ case "$GOOS" in
+ *)
+ syscall_goos="syscall_$GOOS.go"
+ case "$GOOS" in
+ darwin | dragonfly | freebsd | netbsd | openbsd)
+ syscall_goos="syscall_bsd.go $syscall_goos"
+ ;;
+ esac
+ if [ -n "$mksyscall" ]; then
+ if [ "$GOOSARCH" == "aix_ppc64" ]; then
+ # aix/ppc64 script generates files instead of writing to stdin.
+ echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ;
+ elif [ "$GOOS" == "darwin" ]; then
+ # pre-1.12, direct syscalls
+ echo "$mksyscall -tags $GOOS,$GOARCH,!go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.1_11.go";
+ # 1.12 and later, syscalls via libSystem
+ echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
+ else
+ echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
+ fi
+ fi
+ esac
+ if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
+ if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
+ if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go"; fi
+ if [ -n "$mkasm" ]; then echo "$mkasm $GOARCH"; fi
+) | $run
diff --git a/vendor/golang.org/x/sys/unix/mkasm_darwin.go b/vendor/golang.org/x/sys/unix/mkasm_darwin.go
new file mode 100644
index 000000000..4548b993d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mkasm_darwin.go
@@ -0,0 +1,61 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go.
+//This program must be run after mksyscall.go.
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+)
+
+func main() {
+ in1, err := ioutil.ReadFile("syscall_darwin.go")
+ if err != nil {
+ log.Fatalf("can't open syscall_darwin.go: %s", err)
+ }
+ arch := os.Args[1]
+ in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch))
+ if err != nil {
+ log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err)
+ }
+ in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch))
+ if err != nil {
+ log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err)
+ }
+ in := string(in1) + string(in2) + string(in3)
+
+ trampolines := map[string]bool{}
+
+ var out bytes.Buffer
+
+ fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " "))
+ fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n")
+ fmt.Fprintf(&out, "\n")
+ fmt.Fprintf(&out, "// +build go1.12\n")
+ fmt.Fprintf(&out, "\n")
+ fmt.Fprintf(&out, "#include \"textflag.h\"\n")
+ for _, line := range strings.Split(in, "\n") {
+ if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") {
+ continue
+ }
+ fn := line[5 : len(line)-13]
+ if !trampolines[fn] {
+ trampolines[fn] = true
+ fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn)
+ fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn)
+ }
+ }
+ err = ioutil.WriteFile(fmt.Sprintf("zsyscall_darwin_%s.s", arch), out.Bytes(), 0644)
+ if err != nil {
+ log.Fatalf("can't write zsyscall_darwin_%s.s: %s", arch, err)
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
new file mode 100644
index 000000000..3d85f2795
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -0,0 +1,663 @@
+#!/usr/bin/env bash
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Generate Go code listing errors and other #defined constant
+# values (ENAMETOOLONG etc.), by asking the preprocessor
+# about the definitions.
+
+unset LANG
+export LC_ALL=C
+export LC_CTYPE=C
+
+if test -z "$GOARCH" -o -z "$GOOS"; then
+ echo 1>&2 "GOARCH or GOOS not defined in environment"
+ exit 1
+fi
+
+# Check that we are using the new build system if we should
+if [[ "$GOOS" = "linux" ]] && [[ "$GOLANG_SYS_BUILD" != "docker" ]]; then
+ echo 1>&2 "In the Docker based build system, mkerrors should not be called directly."
+ echo 1>&2 "See README.md"
+ exit 1
+fi
+
+if [[ "$GOOS" = "aix" ]]; then
+ CC=${CC:-gcc}
+else
+ CC=${CC:-cc}
+fi
+
+if [[ "$GOOS" = "solaris" ]]; then
+ # Assumes GNU versions of utilities in PATH.
+ export PATH=/usr/gnu/bin:$PATH
+fi
+
+uname=$(uname)
+
+includes_AIX='
+#include <net/if.h>
+#include <net/netopt.h>
+#include <netinet/ip_mroute.h>
+#include <sys/protosw.h>
+#include <sys/stropts.h>
+#include <sys/mman.h>
+#include <sys/poll.h>
+#include <sys/termio.h>
+#include <termios.h>
+#include <fcntl.h>
+
+#define AF_LOCAL AF_UNIX
+'
+
+includes_Darwin='
+#define _DARWIN_C_SOURCE
+#define KERNEL
+#define _DARWIN_USE_64_BIT_INODE
+#include <stdint.h>
+#include <sys/attr.h>
+#include <sys/types.h>
+#include <sys/event.h>
+#include <sys/ptrace.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/utsname.h>
+#include <sys/wait.h>
+#include <sys/xattr.h>
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <termios.h>
+'
+
+includes_DragonFly='
+#include <sys/types.h>
+#include <sys/event.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/stat.h>
+#include <sys/sysctl.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/wait.h>
+#include <sys/ioctl.h>
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <termios.h>
+#include <netinet/ip.h>
+#include <net/ip_mroute/ip_mroute.h>
+'
+
+includes_FreeBSD='
+#include <sys/capsicum.h>
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/event.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/stat.h>
+#include <sys/sysctl.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/wait.h>
+#include <sys/ioctl.h>
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <termios.h>
+#include <netinet/ip.h>
+#include <netinet/ip_mroute.h>
+#include <sys/extattr.h>
+
+#if __FreeBSD__ >= 10
+#define IFT_CARP 0xf8 // IFT_CARP is deprecated in FreeBSD 10
+#undef SIOCAIFADDR
+#define SIOCAIFADDR _IOW(105, 26, struct oifaliasreq) // ifaliasreq contains if_data
+#undef SIOCSIFPHYADDR
+#define SIOCSIFPHYADDR _IOW(105, 70, struct oifaliasreq) // ifaliasreq contains if_data
+#endif
+'
+
+includes_Linux='
+#define _LARGEFILE_SOURCE
+#define _LARGEFILE64_SOURCE
+#ifndef __LP64__
+#define _FILE_OFFSET_BITS 64
+#endif
+#define _GNU_SOURCE
+
+// <sys/ioctl.h> is broken on powerpc64, as it fails to include definitions of
+// these structures. We just include them copied from <bits/termios.h>.
+#if defined(__powerpc__)
+struct sgttyb {
+ char sg_ispeed;
+ char sg_ospeed;
+ char sg_erase;
+ char sg_kill;
+ short sg_flags;
+};
+
+struct tchars {
+ char t_intrc;
+ char t_quitc;
+ char t_startc;
+ char t_stopc;
+ char t_eofc;
+ char t_brkc;
+};
+
+struct ltchars {
+ char t_suspc;
+ char t_dsuspc;
+ char t_rprntc;
+ char t_flushc;
+ char t_werasc;
+ char t_lnextc;
+};
+#endif
+
+#include <bits/sockaddr.h>
+#include <sys/epoll.h>
+#include <sys/eventfd.h>
+#include <sys/inotify.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/prctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/signalfd.h>
+#include <sys/socket.h>
+#include <sys/xattr.h>
+#include <linux/bpf.h>
+#include <linux/capability.h>
+#include <linux/errqueue.h>
+#include <linux/if.h>
+#include <linux/if_alg.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/if_ppp.h>
+#include <linux/if_tun.h>
+#include <linux/if_packet.h>
+#include <linux/if_addr.h>
+#include <linux/falloc.h>
+#include <linux/fanotify.h>
+#include <linux/filter.h>
+#include <linux/fs.h>
+#include <linux/kexec.h>
+#include <linux/keyctl.h>
+#include <linux/magic.h>
+#include <linux/memfd.h>
+#include <linux/module.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netlink.h>
+#include <linux/net_namespace.h>
+#include <linux/perf_event.h>
+#include <linux/random.h>
+#include <linux/reboot.h>
+#include <linux/rtnetlink.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/seccomp.h>
+#include <linux/sockios.h>
+#include <linux/wait.h>
+#include <linux/icmpv6.h>
+#include <linux/serial.h>
+#include <linux/can.h>
+#include <linux/vm_sockets.h>
+#include <linux/taskstats.h>
+#include <linux/genetlink.h>
+#include <linux/watchdog.h>
+#include <linux/hdreg.h>
+#include <linux/rtc.h>
+#include <linux/if_xdp.h>
+#include <linux/cryptouser.h>
+#include <mtd/ubi-user.h>
+#include <net/route.h>
+
+#if defined(__sparc__)
+// On sparc{,64}, the kernel defines struct termios2 itself which clashes with the
+// definition in glibc. As only the error constants are needed here, include the
+// generic termibits.h (which is included by termbits.h on sparc).
+#include <asm-generic/termbits.h>
+#else
+#include <asm/termbits.h>
+#endif
+
+#ifndef MSG_FASTOPEN
+#define MSG_FASTOPEN 0x20000000
+#endif
+
+#ifndef PTRACE_GETREGS
+#define PTRACE_GETREGS 0xc
+#endif
+
+#ifndef PTRACE_SETREGS
+#define PTRACE_SETREGS 0xd
+#endif
+
+#ifndef SOL_NETLINK
+#define SOL_NETLINK 270
+#endif
+
+#ifdef SOL_BLUETOOTH
+// SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h
+// but it is already in bluetooth_linux.go
+#undef SOL_BLUETOOTH
+#endif
+
+// Certain constants are missing from the fs/crypto UAPI
+#define FS_KEY_DESC_PREFIX "fscrypt:"
+#define FS_KEY_DESC_PREFIX_SIZE 8
+#define FS_MAX_KEY_SIZE 64
+'
+
+includes_NetBSD='
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/event.h>
+#include <sys/extattr.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+#include <sys/termios.h>
+#include <sys/ttycom.h>
+#include <sys/wait.h>
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#include <netinet/ip_mroute.h>
+#include <netinet/if_ether.h>
+
+// Needed since <sys/param.h> refers to it...
+#define schedppq 1
+'
+
+includes_OpenBSD='
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/event.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/stat.h>
+#include <sys/sysctl.h>
+#include <sys/termios.h>
+#include <sys/ttycom.h>
+#include <sys/unistd.h>
+#include <sys/wait.h>
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/if_var.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#include <netinet/ip_mroute.h>
+#include <netinet/if_ether.h>
+#include <net/if_bridge.h>
+
+// We keep some constants not supported in OpenBSD 5.5 and beyond for
+// the promise of compatibility.
+#define EMUL_ENABLED 0x1
+#define EMUL_NATIVE 0x2
+#define IPV6_FAITH 0x1d
+#define IPV6_OPTIONS 0x1
+#define IPV6_RTHDR_STRICT 0x1
+#define IPV6_SOCKOPT_RESERVED1 0x3
+#define SIOCGIFGENERIC 0xc020693a
+#define SIOCSIFGENERIC 0x80206939
+#define WALTSIG 0x4
+'
+
+includes_SunOS='
+#include <limits.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <sys/wait.h>
+#include <sys/ioctl.h>
+#include <sys/mkdev.h>
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/if_types.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <termios.h>
+#include <netinet/ip.h>
+#include <netinet/ip_mroute.h>
+'
+
+
+includes='
+#include <sys/types.h>
+#include <sys/file.h>
+#include <fcntl.h>
+#include <dirent.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#include <errno.h>
+#include <sys/signal.h>
+#include <signal.h>
+#include <sys/resource.h>
+#include <time.h>
+'
+ccflags="$@"
+
+# Write go tool cgo -godefs input.
+(
+ echo package unix
+ echo
+ echo '/*'
+ indirect="includes_$(uname)"
+ echo "${!indirect} $includes"
+ echo '*/'
+ echo 'import "C"'
+ echo 'import "syscall"'
+ echo
+ echo 'const ('
+
+ # The gcc command line prints all the #defines
+ # it encounters while processing the input
+ echo "${!indirect} $includes" | $CC -x c - -E -dM $ccflags |
+ awk '
+ $1 != "#define" || $2 ~ /\(/ || $3 == "" {next}
+
+ $2 ~ /^E([ABCD]X|[BIS]P|[SD]I|S|FL)$/ {next} # 386 registers
+ $2 ~ /^(SIGEV_|SIGSTKSZ|SIGRT(MIN|MAX))/ {next}
+ $2 ~ /^(SCM_SRCRT)$/ {next}
+ $2 ~ /^(MAP_FAILED)$/ {next}
+ $2 ~ /^ELF_.*$/ {next}# <asm/elf.h> contains ELF_ARCH, etc.
+
+ $2 ~ /^EXTATTR_NAMESPACE_NAMES/ ||
+ $2 ~ /^EXTATTR_NAMESPACE_[A-Z]+_STRING/ {next}
+
+ $2 !~ /^ECCAPBITS/ &&
+ $2 !~ /^ETH_/ &&
+ $2 !~ /^EPROC_/ &&
+ $2 !~ /^EQUIV_/ &&
+ $2 !~ /^EXPR_/ &&
+ $2 ~ /^E[A-Z0-9_]+$/ ||
+ $2 ~ /^B[0-9_]+$/ ||
+ $2 ~ /^(OLD|NEW)DEV$/ ||
+ $2 == "BOTHER" ||
+ $2 ~ /^CI?BAUD(EX)?$/ ||
+ $2 == "IBSHIFT" ||
+ $2 ~ /^V[A-Z0-9]+$/ ||
+ $2 ~ /^CS[A-Z0-9]/ ||
+ $2 ~ /^I(SIG|CANON|CRNL|UCLC|EXTEN|MAXBEL|STRIP|UTF8)$/ ||
+ $2 ~ /^IGN/ ||
+ $2 ~ /^IX(ON|ANY|OFF)$/ ||
+ $2 ~ /^IN(LCR|PCK)$/ ||
+ $2 !~ "X86_CR3_PCID_NOFLUSH" &&
+ $2 ~ /(^FLU?SH)|(FLU?SH$)/ ||
+ $2 ~ /^C(LOCAL|READ|MSPAR|RTSCTS)$/ ||
+ $2 == "BRKINT" ||
+ $2 == "HUPCL" ||
+ $2 == "PENDIN" ||
+ $2 == "TOSTOP" ||
+ $2 == "XCASE" ||
+ $2 == "ALTWERASE" ||
+ $2 == "NOKERNINFO" ||
+ $2 ~ /^PAR/ ||
+ $2 ~ /^SIG[^_]/ ||
+ $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ ||
+ $2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ ||
+ $2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ ||
+ $2 ~ /^O?XTABS$/ ||
+ $2 ~ /^TC[IO](ON|OFF)$/ ||
+ $2 ~ /^IN_/ ||
+ $2 ~ /^LOCK_(SH|EX|NB|UN)$/ ||
+ $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|MCAST|EVFILT|NOTE|EV|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ ||
+ $2 ~ /^TP_STATUS_/ ||
+ $2 ~ /^FALLOC_/ ||
+ $2 == "ICMPV6_FILTER" ||
+ $2 == "SOMAXCONN" ||
+ $2 == "NAME_MAX" ||
+ $2 == "IFNAMSIZ" ||
+ $2 ~ /^CTL_(HW|KERN|MAXNAME|NET|QUERY)$/ ||
+ $2 ~ /^KERN_(HOSTNAME|OS(RELEASE|TYPE)|VERSION)$/ ||
+ $2 ~ /^HW_MACHINE$/ ||
+ $2 ~ /^SYSCTL_VERS/ ||
+ $2 !~ "MNT_BITS" &&
+ $2 ~ /^(MS|MNT|UMOUNT)_/ ||
+ $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ ||
+ $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT)_/ ||
+ $2 ~ /^KEXEC_/ ||
+ $2 ~ /^LINUX_REBOOT_CMD_/ ||
+ $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ ||
+ $2 ~ /^MODULE_INIT_/ ||
+ $2 !~ "NLA_TYPE_MASK" &&
+ $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ ||
+ $2 ~ /^SIOC/ ||
+ $2 ~ /^TIOC/ ||
+ $2 ~ /^TCGET/ ||
+ $2 ~ /^TCSET/ ||
+ $2 ~ /^TC(FLSH|SBRKP?|XONC)$/ ||
+ $2 !~ "RTF_BITS" &&
+ $2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ ||
+ $2 ~ /^BIOC/ ||
+ $2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ ||
+ $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ ||
+ $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ ||
+ $2 ~ /^CLONE_[A-Z_]+/ ||
+ $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ &&
+ $2 ~ /^(BPF|DLT)_/ ||
+ $2 ~ /^(CLOCK|TIMER)_/ ||
+ $2 ~ /^CAN_/ ||
+ $2 ~ /^CAP_/ ||
+ $2 ~ /^ALG_/ ||
+ $2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE|IOC_(GET|SET)_ENCRYPTION)/ ||
+ $2 ~ /^GRND_/ ||
+ $2 ~ /^RND/ ||
+ $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ ||
+ $2 ~ /^KEYCTL_/ ||
+ $2 ~ /^PERF_EVENT_IOC_/ ||
+ $2 ~ /^SECCOMP_MODE_/ ||
+ $2 ~ /^SPLICE_/ ||
+ $2 ~ /^SYNC_FILE_RANGE_/ ||
+ $2 !~ /^AUDIT_RECORD_MAGIC/ &&
+ $2 !~ /IOC_MAGIC/ &&
+ $2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ ||
+ $2 ~ /^(VM|VMADDR)_/ ||
+ $2 ~ /^IOCTL_VM_SOCKETS_/ ||
+ $2 ~ /^(TASKSTATS|TS)_/ ||
+ $2 ~ /^CGROUPSTATS_/ ||
+ $2 ~ /^GENL_/ ||
+ $2 ~ /^STATX_/ ||
+ $2 ~ /^RENAME/ ||
+ $2 ~ /^UBI_IOC[A-Z]/ ||
+ $2 ~ /^UTIME_/ ||
+ $2 ~ /^XATTR_(CREATE|REPLACE|NO(DEFAULT|FOLLOW|SECURITY)|SHOWCOMPRESSION)/ ||
+ $2 ~ /^ATTR_(BIT_MAP_COUNT|(CMN|VOL|FILE)_)/ ||
+ $2 ~ /^FSOPT_/ ||
+ $2 ~ /^WDIOC_/ ||
+ $2 ~ /^NFN/ ||
+ $2 ~ /^XDP_/ ||
+ $2 ~ /^(HDIO|WIN|SMART)_/ ||
+ $2 ~ /^CRYPTO_/ ||
+ $2 !~ "WMESGLEN" &&
+ $2 ~ /^W[A-Z0-9]+$/ ||
+ $2 ~/^PPPIOC/ ||
+ $2 ~ /^FAN_|FANOTIFY_/ ||
+ $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)}
+ $2 ~ /^__WCOREFLAG$/ {next}
+ $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)}
+
+ {next}
+ ' | sort
+
+ echo ')'
+) >_const.go
+
+# Pull out the error names for later.
+errors=$(
+ echo '#include <errno.h>' | $CC -x c - -E -dM $ccflags |
+ awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print $2 }' |
+ sort
+)
+
+# Pull out the signal names for later.
+signals=$(
+ echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags |
+ awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' |
+ egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' |
+ sort
+)
+
+# Again, writing regexps to a file.
+echo '#include <errno.h>' | $CC -x c - -E -dM $ccflags |
+ awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print "^\t" $2 "[ \t]*=" }' |
+ sort >_error.grep
+echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags |
+ awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' |
+ egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' |
+ sort >_signal.grep
+
+echo '// mkerrors.sh' "$@"
+echo '// Code generated by the command above; see README.md. DO NOT EDIT.'
+echo
+echo "// +build ${GOARCH},${GOOS}"
+echo
+go tool cgo -godefs -- "$@" _const.go >_error.out
+cat _error.out | grep -vf _error.grep | grep -vf _signal.grep
+echo
+echo '// Errors'
+echo 'const ('
+cat _error.out | grep -f _error.grep | sed 's/=\(.*\)/= syscall.Errno(\1)/'
+echo ')'
+
+echo
+echo '// Signals'
+echo 'const ('
+cat _error.out | grep -f _signal.grep | sed 's/=\(.*\)/= syscall.Signal(\1)/'
+echo ')'
+
+# Run C program to print error and syscall strings.
+(
+ echo -E "
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <ctype.h>
+#include <string.h>
+#include <signal.h>
+
+#define nelem(x) (sizeof(x)/sizeof((x)[0]))
+
+enum { A = 'A', Z = 'Z', a = 'a', z = 'z' }; // avoid need for single quotes below
+
+struct tuple {
+ int num;
+ const char *name;
+};
+
+struct tuple errors[] = {
+"
+ for i in $errors
+ do
+ echo -E ' {'$i', "'$i'" },'
+ done
+
+ echo -E "
+};
+
+struct tuple signals[] = {
+"
+ for i in $signals
+ do
+ echo -E ' {'$i', "'$i'" },'
+ done
+
+ # Use -E because on some systems bash builtin interprets \n itself.
+ echo -E '
+};
+
+static int
+tuplecmp(const void *a, const void *b)
+{
+ return ((struct tuple *)a)->num - ((struct tuple *)b)->num;
+}
+
+int
+main(void)
+{
+ int i, e;
+ char buf[1024], *p;
+
+ printf("\n\n// Error table\n");
+ printf("var errorList = [...]struct {\n");
+ printf("\tnum syscall.Errno\n");
+ printf("\tname string\n");
+ printf("\tdesc string\n");
+ printf("} {\n");
+ qsort(errors, nelem(errors), sizeof errors[0], tuplecmp);
+ for(i=0; i<nelem(errors); i++) {
+ e = errors[i].num;
+ if(i > 0 && errors[i-1].num == e)
+ continue;
+ strcpy(buf, strerror(e));
+ // lowercase first letter: Bad -> bad, but STREAM -> STREAM.
+ if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z)
+ buf[0] += a - A;
+ printf("\t{ %d, \"%s\", \"%s\" },\n", e, errors[i].name, buf);
+ }
+ printf("}\n\n");
+
+ printf("\n\n// Signal table\n");
+ printf("var signalList = [...]struct {\n");
+ printf("\tnum syscall.Signal\n");
+ printf("\tname string\n");
+ printf("\tdesc string\n");
+ printf("} {\n");
+ qsort(signals, nelem(signals), sizeof signals[0], tuplecmp);
+ for(i=0; i<nelem(signals); i++) {
+ e = signals[i].num;
+ if(i > 0 && signals[i-1].num == e)
+ continue;
+ strcpy(buf, strsignal(e));
+ // lowercase first letter: Bad -> bad, but STREAM -> STREAM.
+ if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z)
+ buf[0] += a - A;
+ // cut trailing : number.
+ p = strrchr(buf, ":"[0]);
+ if(p)
+ *p = '\0';
+ printf("\t{ %d, \"%s\", \"%s\" },\n", e, signals[i].name, buf);
+ }
+ printf("}\n\n");
+
+ return 0;
+}
+
+'
+) >_errors.c
+
+$CC $ccflags -o _errors _errors.c && $GORUN ./_errors && rm -f _errors.c _errors _const.go _error.grep _signal.grep _error.out
diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go
new file mode 100644
index 000000000..eb4332059
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mkpost.go
@@ -0,0 +1,122 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// mkpost processes the output of cgo -godefs to
+// modify the generated types. It is used to clean up
+// the sys API in an architecture specific manner.
+//
+// mkpost is run after cgo -godefs; see README.md.
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+ "os"
+ "regexp"
+)
+
+func main() {
+ // Get the OS and architecture (using GOARCH_TARGET if it exists)
+ goos := os.Getenv("GOOS")
+ goarch := os.Getenv("GOARCH_TARGET")
+ if goarch == "" {
+ goarch = os.Getenv("GOARCH")
+ }
+ // Check that we are using the Docker-based build system if we should be.
+ if goos == "linux" {
+ if os.Getenv("GOLANG_SYS_BUILD") != "docker" {
+ os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n")
+ os.Stderr.WriteString("See README.md\n")
+ os.Exit(1)
+ }
+ }
+
+ b, err := ioutil.ReadAll(os.Stdin)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ if goos == "aix" {
+ // Replace type of Atim, Mtim and Ctim by Timespec in Stat_t
+ // to avoid having both StTimespec and Timespec.
+ sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`)
+ b = sttimespec.ReplaceAll(b, []byte("Timespec"))
+ }
+
+ // Intentionally export __val fields in Fsid and Sigset_t
+ valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__(bits|val)(\s+\S+\s+)}`)
+ b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$4}"))
+
+ // Intentionally export __fds_bits field in FdSet
+ fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`)
+ b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}"))
+
+ // If we have empty Ptrace structs, we should delete them. Only s390x emits
+ // nonempty Ptrace structs.
+ ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`)
+ b = ptraceRexexp.ReplaceAll(b, nil)
+
+ // Replace the control_regs union with a blank identifier for now.
+ controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`)
+ b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64"))
+
+ // Remove fields that are added by glibc
+ // Note that this is unstable as the identifers are private.
+ removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`)
+ b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
+
+ // Convert [65]int8 to [65]byte in Utsname members to simplify
+ // conversion to string; see golang.org/issue/20753
+ convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`)
+ b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte"))
+
+ // Convert [1024]int8 to [1024]byte in Ptmget members
+ convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`)
+ b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte"))
+
+ // Remove spare fields (e.g. in Statx_t)
+ spareFieldsRegex := regexp.MustCompile(`X__spare\S*`)
+ b = spareFieldsRegex.ReplaceAll(b, []byte("_"))
+
+ // Remove cgo padding fields
+ removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`)
+ b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_"))
+
+ // Remove padding, hidden, or unused fields
+ removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`)
+ b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
+
+ // Remove the first line of warning from cgo
+ b = b[bytes.IndexByte(b, '\n')+1:]
+ // Modify the command in the header to include:
+ // mkpost, our own warning, and a build tag.
+ replacement := fmt.Sprintf(`$1 | go run mkpost.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build %s,%s`, goarch, goos)
+ cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`)
+ b = cgoCommandRegex.ReplaceAll(b, []byte(replacement))
+
+ // Rename Stat_t time fields
+ if goos == "freebsd" && goarch == "386" {
+ // Hide Stat_t.[AMCB]tim_ext fields
+ renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`)
+ b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_"))
+ }
+ renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`)
+ b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}"))
+
+ // gofmt
+ b, err = format.Source(b)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ os.Stdout.Write(b)
+}
diff --git a/vendor/golang.org/x/sys/unix/mksyscall.go b/vendor/golang.org/x/sys/unix/mksyscall.go
new file mode 100644
index 000000000..e4af9424e
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mksyscall.go
@@ -0,0 +1,407 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+This program reads a file containing function prototypes
+(like syscall_darwin.go) and generates system call bodies.
+The prototypes are marked by lines beginning with "//sys"
+and read like func declarations if //sys is replaced by func, but:
+ * The parameter lists must give a name for each argument.
+ This includes return parameters.
+ * The parameter lists must give a type for each argument:
+ the (x, y, z int) shorthand is not allowed.
+ * If the return parameter is an error number, it must be named errno.
+
+A line beginning with //sysnb is like //sys, except that the
+goroutine will not be suspended during the execution of the system
+call. This must only be used for system calls which can never
+block, as otherwise the system call could cause all goroutines to
+hang.
+*/
+package main
+
+import (
+ "bufio"
+ "flag"
+ "fmt"
+ "os"
+ "regexp"
+ "strings"
+)
+
+var (
+ b32 = flag.Bool("b32", false, "32bit big-endian")
+ l32 = flag.Bool("l32", false, "32bit little-endian")
+ plan9 = flag.Bool("plan9", false, "plan9")
+ openbsd = flag.Bool("openbsd", false, "openbsd")
+ netbsd = flag.Bool("netbsd", false, "netbsd")
+ dragonfly = flag.Bool("dragonfly", false, "dragonfly")
+ arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair
+ tags = flag.String("tags", "", "build tags")
+ filename = flag.String("output", "", "output file name (standard output if omitted)")
+)
+
+// cmdLine returns this programs's commandline arguments
+func cmdLine() string {
+ return "go run mksyscall.go " + strings.Join(os.Args[1:], " ")
+}
+
+// buildTags returns build tags
+func buildTags() string {
+ return *tags
+}
+
+// Param is function parameter
+type Param struct {
+ Name string
+ Type string
+}
+
+// usage prints the program usage
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n")
+ os.Exit(1)
+}
+
+// parseParamList parses parameter list and returns a slice of parameters
+func parseParamList(list string) []string {
+ list = strings.TrimSpace(list)
+ if list == "" {
+ return []string{}
+ }
+ return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
+}
+
+// parseParam splits a parameter into name and type
+func parseParam(p string) Param {
+ ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
+ if ps == nil {
+ fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
+ os.Exit(1)
+ }
+ return Param{ps[1], ps[2]}
+}
+
+func main() {
+ // Get the OS and architecture (using GOARCH_TARGET if it exists)
+ goos := os.Getenv("GOOS")
+ if goos == "" {
+ fmt.Fprintln(os.Stderr, "GOOS not defined in environment")
+ os.Exit(1)
+ }
+ goarch := os.Getenv("GOARCH_TARGET")
+ if goarch == "" {
+ goarch = os.Getenv("GOARCH")
+ }
+
+ // Check that we are using the Docker-based build system if we should
+ if goos == "linux" {
+ if os.Getenv("GOLANG_SYS_BUILD") != "docker" {
+ fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n")
+ fmt.Fprintf(os.Stderr, "See README.md\n")
+ os.Exit(1)
+ }
+ }
+
+ flag.Usage = usage
+ flag.Parse()
+ if len(flag.Args()) <= 0 {
+ fmt.Fprintf(os.Stderr, "no files to parse provided\n")
+ usage()
+ }
+
+ endianness := ""
+ if *b32 {
+ endianness = "big-endian"
+ } else if *l32 {
+ endianness = "little-endian"
+ }
+
+ libc := false
+ if goos == "darwin" && strings.Contains(buildTags(), ",go1.12") {
+ libc = true
+ }
+ trampolines := map[string]bool{}
+
+ text := ""
+ for _, path := range flag.Args() {
+ file, err := os.Open(path)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+ s := bufio.NewScanner(file)
+ for s.Scan() {
+ t := s.Text()
+ t = strings.TrimSpace(t)
+ t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
+ nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
+ if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
+ continue
+ }
+
+ // Line must be of the form
+ // func Open(path string, mode int, perm int) (fd int, errno error)
+ // Split into name, in params, out params.
+ f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t)
+ if f == nil {
+ fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
+ os.Exit(1)
+ }
+ funct, inps, outps, sysname := f[2], f[3], f[4], f[5]
+
+ // ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers.
+ if goos == "darwin" && !libc && funct == "ClockGettime" {
+ continue
+ }
+
+ // Split argument lists on comma.
+ in := parseParamList(inps)
+ out := parseParamList(outps)
+
+ // Try in vain to keep people from editing this file.
+ // The theory is that they jump into the middle of the file
+ // without reading the header.
+ text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
+
+ // Go function header.
+ outDecl := ""
+ if len(out) > 0 {
+ outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", "))
+ }
+ text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl)
+
+ // Check if err return available
+ errvar := ""
+ for _, param := range out {
+ p := parseParam(param)
+ if p.Type == "error" {
+ errvar = p.Name
+ break
+ }
+ }
+
+ // Prepare arguments to Syscall.
+ var args []string
+ n := 0
+ for _, param := range in {
+ p := parseParam(param)
+ if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
+ args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))")
+ } else if p.Type == "string" && errvar != "" {
+ text += fmt.Sprintf("\tvar _p%d *byte\n", n)
+ text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name)
+ text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
+ args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
+ n++
+ } else if p.Type == "string" {
+ fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
+ text += fmt.Sprintf("\tvar _p%d *byte\n", n)
+ text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name)
+ args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
+ n++
+ } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
+ // Convert slice into pointer, length.
+ // Have to be careful not to take address of &a[0] if len == 0:
+ // pass dummy pointer in that case.
+ // Used to pass nil, but some OSes or simulators reject write(fd, nil, 0).
+ text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n)
+ text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name)
+ text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n)
+ args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name))
+ n++
+ } else if p.Type == "int64" && (*openbsd || *netbsd) {
+ args = append(args, "0")
+ if endianness == "big-endian" {
+ args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
+ } else if endianness == "little-endian" {
+ args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
+ } else {
+ args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
+ }
+ } else if p.Type == "int64" && *dragonfly {
+ if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil {
+ args = append(args, "0")
+ }
+ if endianness == "big-endian" {
+ args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
+ } else if endianness == "little-endian" {
+ args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
+ } else {
+ args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
+ }
+ } else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" {
+ if len(args)%2 == 1 && *arm {
+ // arm abi specifies 64-bit argument uses
+ // (even, odd) pair
+ args = append(args, "0")
+ }
+ if endianness == "big-endian" {
+ args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
+ } else {
+ args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
+ }
+ } else {
+ args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
+ }
+ }
+
+ // Determine which form to use; pad args with zeros.
+ asm := "Syscall"
+ if nonblock != nil {
+ if errvar == "" && goos == "linux" {
+ asm = "RawSyscallNoError"
+ } else {
+ asm = "RawSyscall"
+ }
+ } else {
+ if errvar == "" && goos == "linux" {
+ asm = "SyscallNoError"
+ }
+ }
+ if len(args) <= 3 {
+ for len(args) < 3 {
+ args = append(args, "0")
+ }
+ } else if len(args) <= 6 {
+ asm += "6"
+ for len(args) < 6 {
+ args = append(args, "0")
+ }
+ } else if len(args) <= 9 {
+ asm += "9"
+ for len(args) < 9 {
+ args = append(args, "0")
+ }
+ } else {
+ fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct)
+ }
+
+ // System call number.
+ if sysname == "" {
+ sysname = "SYS_" + funct
+ sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
+ sysname = strings.ToUpper(sysname)
+ }
+
+ var libcFn string
+ if libc {
+ asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call
+ sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_
+ sysname = strings.ToLower(sysname) // lowercase
+ if sysname == "getdirentries64" {
+ // Special case - libSystem name and
+ // raw syscall name don't match.
+ sysname = "__getdirentries64"
+ }
+ libcFn = sysname
+ sysname = "funcPC(libc_" + sysname + "_trampoline)"
+ }
+
+ // Actual call.
+ arglist := strings.Join(args, ", ")
+ call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist)
+
+ // Assign return values.
+ body := ""
+ ret := []string{"_", "_", "_"}
+ doErrno := false
+ for i := 0; i < len(out); i++ {
+ p := parseParam(out[i])
+ reg := ""
+ if p.Name == "err" && !*plan9 {
+ reg = "e1"
+ ret[2] = reg
+ doErrno = true
+ } else if p.Name == "err" && *plan9 {
+ ret[0] = "r0"
+ ret[2] = "e1"
+ break
+ } else {
+ reg = fmt.Sprintf("r%d", i)
+ ret[i] = reg
+ }
+ if p.Type == "bool" {
+ reg = fmt.Sprintf("%s != 0", reg)
+ }
+ if p.Type == "int64" && endianness != "" {
+ // 64-bit number in r1:r0 or r0:r1.
+ if i+2 > len(out) {
+ fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct)
+ }
+ if endianness == "big-endian" {
+ reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1)
+ } else {
+ reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i)
+ }
+ ret[i] = fmt.Sprintf("r%d", i)
+ ret[i+1] = fmt.Sprintf("r%d", i+1)
+ }
+ if reg != "e1" || *plan9 {
+ body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
+ }
+ }
+ if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" {
+ text += fmt.Sprintf("\t%s\n", call)
+ } else {
+ if errvar == "" && goos == "linux" {
+ // raw syscall without error on Linux, see golang.org/issue/22924
+ text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call)
+ } else {
+ text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call)
+ }
+ }
+ text += body
+
+ if *plan9 && ret[2] == "e1" {
+ text += "\tif int32(r0) == -1 {\n"
+ text += "\t\terr = e1\n"
+ text += "\t}\n"
+ } else if doErrno {
+ text += "\tif e1 != 0 {\n"
+ text += "\t\terr = errnoErr(e1)\n"
+ text += "\t}\n"
+ }
+ text += "\treturn\n"
+ text += "}\n\n"
+
+ if libc && !trampolines[libcFn] {
+ // some system calls share a trampoline, like read and readlen.
+ trampolines[libcFn] = true
+ // Declare assembly trampoline.
+ text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn)
+ // Assembly trampoline calls the libc_* function, which this magic
+ // redirects to use the function from libSystem.
+ text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn)
+ text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn)
+ text += "\n"
+ }
+ }
+ if err := s.Err(); err != nil {
+ fmt.Fprintf(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+ file.Close()
+ }
+ fmt.Printf(srcTemplate, cmdLine(), buildTags(), text)
+}
+
+const srcTemplate = `// %s
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build %s
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var _ syscall.Errno
+
+%s
+`
diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go
new file mode 100644
index 000000000..3be3cdfc3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go
@@ -0,0 +1,415 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+This program reads a file containing function prototypes
+(like syscall_aix.go) and generates system call bodies.
+The prototypes are marked by lines beginning with "//sys"
+and read like func declarations if //sys is replaced by func, but:
+ * The parameter lists must give a name for each argument.
+ This includes return parameters.
+ * The parameter lists must give a type for each argument:
+ the (x, y, z int) shorthand is not allowed.
+ * If the return parameter is an error number, it must be named err.
+ * If go func name needs to be different than its libc name,
+ * or the function is not in libc, name could be specified
+ * at the end, after "=" sign, like
+ //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
+*/
+package main
+
+import (
+ "bufio"
+ "flag"
+ "fmt"
+ "os"
+ "regexp"
+ "strings"
+)
+
+var (
+ b32 = flag.Bool("b32", false, "32bit big-endian")
+ l32 = flag.Bool("l32", false, "32bit little-endian")
+ aix = flag.Bool("aix", false, "aix")
+ tags = flag.String("tags", "", "build tags")
+)
+
+// cmdLine returns this programs's commandline arguments
+func cmdLine() string {
+ return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ")
+}
+
+// buildTags returns build tags
+func buildTags() string {
+ return *tags
+}
+
+// Param is function parameter
+type Param struct {
+ Name string
+ Type string
+}
+
+// usage prints the program usage
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n")
+ os.Exit(1)
+}
+
+// parseParamList parses parameter list and returns a slice of parameters
+func parseParamList(list string) []string {
+ list = strings.TrimSpace(list)
+ if list == "" {
+ return []string{}
+ }
+ return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
+}
+
+// parseParam splits a parameter into name and type
+func parseParam(p string) Param {
+ ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
+ if ps == nil {
+ fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
+ os.Exit(1)
+ }
+ return Param{ps[1], ps[2]}
+}
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+ if len(flag.Args()) <= 0 {
+ fmt.Fprintf(os.Stderr, "no files to parse provided\n")
+ usage()
+ }
+
+ endianness := ""
+ if *b32 {
+ endianness = "big-endian"
+ } else if *l32 {
+ endianness = "little-endian"
+ }
+
+ pack := ""
+ text := ""
+ cExtern := "/*\n#include <stdint.h>\n#include <stddef.h>\n"
+ for _, path := range flag.Args() {
+ file, err := os.Open(path)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+ s := bufio.NewScanner(file)
+ for s.Scan() {
+ t := s.Text()
+ t = strings.TrimSpace(t)
+ t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
+ if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
+ pack = p[1]
+ }
+ nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
+ if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
+ continue
+ }
+
+ // Line must be of the form
+ // func Open(path string, mode int, perm int) (fd int, err error)
+ // Split into name, in params, out params.
+ f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
+ if f == nil {
+ fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
+ os.Exit(1)
+ }
+ funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
+
+ // Split argument lists on comma.
+ in := parseParamList(inps)
+ out := parseParamList(outps)
+
+ inps = strings.Join(in, ", ")
+ outps = strings.Join(out, ", ")
+
+ // Try in vain to keep people from editing this file.
+ // The theory is that they jump into the middle of the file
+ // without reading the header.
+ text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
+
+ // Check if value return, err return available
+ errvar := ""
+ retvar := ""
+ rettype := ""
+ for _, param := range out {
+ p := parseParam(param)
+ if p.Type == "error" {
+ errvar = p.Name
+ } else {
+ retvar = p.Name
+ rettype = p.Type
+ }
+ }
+
+ // System call name.
+ if sysname == "" {
+ sysname = funct
+ }
+ sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
+ sysname = strings.ToLower(sysname) // All libc functions are lowercase.
+
+ cRettype := ""
+ if rettype == "unsafe.Pointer" {
+ cRettype = "uintptr_t"
+ } else if rettype == "uintptr" {
+ cRettype = "uintptr_t"
+ } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil {
+ cRettype = "uintptr_t"
+ } else if rettype == "int" {
+ cRettype = "int"
+ } else if rettype == "int32" {
+ cRettype = "int"
+ } else if rettype == "int64" {
+ cRettype = "long long"
+ } else if rettype == "uint32" {
+ cRettype = "unsigned int"
+ } else if rettype == "uint64" {
+ cRettype = "unsigned long long"
+ } else {
+ cRettype = "int"
+ }
+ if sysname == "exit" {
+ cRettype = "void"
+ }
+
+ // Change p.Types to c
+ var cIn []string
+ for _, param := range in {
+ p := parseParam(param)
+ if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
+ cIn = append(cIn, "uintptr_t")
+ } else if p.Type == "string" {
+ cIn = append(cIn, "uintptr_t")
+ } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
+ cIn = append(cIn, "uintptr_t", "size_t")
+ } else if p.Type == "unsafe.Pointer" {
+ cIn = append(cIn, "uintptr_t")
+ } else if p.Type == "uintptr" {
+ cIn = append(cIn, "uintptr_t")
+ } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
+ cIn = append(cIn, "uintptr_t")
+ } else if p.Type == "int" {
+ cIn = append(cIn, "int")
+ } else if p.Type == "int32" {
+ cIn = append(cIn, "int")
+ } else if p.Type == "int64" {
+ cIn = append(cIn, "long long")
+ } else if p.Type == "uint32" {
+ cIn = append(cIn, "unsigned int")
+ } else if p.Type == "uint64" {
+ cIn = append(cIn, "unsigned long long")
+ } else {
+ cIn = append(cIn, "int")
+ }
+ }
+
+ if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" {
+ if sysname == "select" {
+ // select is a keyword of Go. Its name is
+ // changed to c_select.
+ cExtern += "#define c_select select\n"
+ }
+ // Imports of system calls from libc
+ cExtern += fmt.Sprintf("%s %s", cRettype, sysname)
+ cIn := strings.Join(cIn, ", ")
+ cExtern += fmt.Sprintf("(%s);\n", cIn)
+ }
+
+ // So file name.
+ if *aix {
+ if modname == "" {
+ modname = "libc.a/shr_64.o"
+ } else {
+ fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct)
+ os.Exit(1)
+ }
+ }
+
+ strconvfunc := "C.CString"
+
+ // Go function header.
+ if outps != "" {
+ outps = fmt.Sprintf(" (%s)", outps)
+ }
+ if text != "" {
+ text += "\n"
+ }
+
+ text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps)
+
+ // Prepare arguments to Syscall.
+ var args []string
+ n := 0
+ argN := 0
+ for _, param := range in {
+ p := parseParam(param)
+ if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
+ args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))")
+ } else if p.Type == "string" && errvar != "" {
+ text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name)
+ args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n))
+ n++
+ } else if p.Type == "string" {
+ fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
+ text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name)
+ args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n))
+ n++
+ } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil {
+ // Convert slice into pointer, length.
+ // Have to be careful not to take address of &a[0] if len == 0:
+ // pass nil in that case.
+ text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1])
+ text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
+ args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n))
+ n++
+ text += fmt.Sprintf("\tvar _p%d int\n", n)
+ text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name)
+ args = append(args, fmt.Sprintf("C.size_t(_p%d)", n))
+ n++
+ } else if p.Type == "int64" && endianness != "" {
+ if endianness == "big-endian" {
+ args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
+ } else {
+ args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
+ }
+ n++
+ } else if p.Type == "bool" {
+ text += fmt.Sprintf("\tvar _p%d uint32\n", n)
+ text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n)
+ args = append(args, fmt.Sprintf("_p%d", n))
+ } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
+ args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name))
+ } else if p.Type == "unsafe.Pointer" {
+ args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name))
+ } else if p.Type == "int" {
+ if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) {
+ args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name))
+ } else if argN == 0 && funct == "fcntl" {
+ args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
+ } else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) {
+ args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
+ } else {
+ args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
+ }
+ } else if p.Type == "int32" {
+ args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
+ } else if p.Type == "int64" {
+ args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name))
+ } else if p.Type == "uint32" {
+ args = append(args, fmt.Sprintf("C.uint(%s)", p.Name))
+ } else if p.Type == "uint64" {
+ args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name))
+ } else if p.Type == "uintptr" {
+ args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
+ } else {
+ args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
+ }
+ argN++
+ }
+
+ // Actual call.
+ arglist := strings.Join(args, ", ")
+ call := ""
+ if sysname == "exit" {
+ if errvar != "" {
+ call += "er :="
+ } else {
+ call += ""
+ }
+ } else if errvar != "" {
+ call += "r0,er :="
+ } else if retvar != "" {
+ call += "r0,_ :="
+ } else {
+ call += ""
+ }
+ if sysname == "select" {
+ // select is a keyword of Go. Its name is
+ // changed to c_select.
+ call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist)
+ } else {
+ call += fmt.Sprintf("C.%s(%s)", sysname, arglist)
+ }
+
+ // Assign return values.
+ body := ""
+ for i := 0; i < len(out); i++ {
+ p := parseParam(out[i])
+ reg := ""
+ if p.Name == "err" {
+ reg = "e1"
+ } else {
+ reg = "r0"
+ }
+ if reg != "e1" {
+ body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
+ }
+ }
+
+ // verify return
+ if sysname != "exit" && errvar != "" {
+ if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil {
+ body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n"
+ body += fmt.Sprintf("\t\t%s = er\n", errvar)
+ body += "\t}\n"
+ } else {
+ body += "\tif (r0 ==-1 && er != nil) {\n"
+ body += fmt.Sprintf("\t\t%s = er\n", errvar)
+ body += "\t}\n"
+ }
+ } else if errvar != "" {
+ body += "\tif (er != nil) {\n"
+ body += fmt.Sprintf("\t\t%s = er\n", errvar)
+ body += "\t}\n"
+ }
+
+ text += fmt.Sprintf("\t%s\n", call)
+ text += body
+
+ text += "\treturn\n"
+ text += "}\n"
+ }
+ if err := s.Err(); err != nil {
+ fmt.Fprintf(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+ file.Close()
+ }
+ imp := ""
+ if pack != "unix" {
+ imp = "import \"golang.org/x/sys/unix\"\n"
+
+ }
+ fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text)
+}
+
+const srcTemplate = `// %s
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build %s
+
+package %s
+
+
+%s
+*/
+import "C"
+import (
+ "unsafe"
+)
+
+
+%s
+
+%s
+`
diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go
new file mode 100644
index 000000000..c96009951
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go
@@ -0,0 +1,614 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+This program reads a file containing function prototypes
+(like syscall_aix.go) and generates system call bodies.
+The prototypes are marked by lines beginning with "//sys"
+and read like func declarations if //sys is replaced by func, but:
+ * The parameter lists must give a name for each argument.
+ This includes return parameters.
+ * The parameter lists must give a type for each argument:
+ the (x, y, z int) shorthand is not allowed.
+ * If the return parameter is an error number, it must be named err.
+ * If go func name needs to be different than its libc name,
+ * or the function is not in libc, name could be specified
+ * at the end, after "=" sign, like
+ //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
+
+
+This program will generate three files and handle both gc and gccgo implementation:
+ - zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation)
+ - zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6
+ - zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type.
+
+ The generated code looks like this
+
+zsyscall_aix_ppc64.go
+func asyscall(...) (n int, err error) {
+ // Pointer Creation
+ r1, e1 := callasyscall(...)
+ // Type Conversion
+ // Error Handler
+ return
+}
+
+zsyscall_aix_ppc64_gc.go
+//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o"
+//go:linkname libc_asyscall libc_asyscall
+var asyscall syscallFunc
+
+func callasyscall(...) (r1 uintptr, e1 Errno) {
+ r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... )
+ return
+}
+
+zsyscall_aix_ppc64_ggcgo.go
+
+// int asyscall(...)
+
+import "C"
+
+func callasyscall(...) (r1 uintptr, e1 Errno) {
+ r1 = uintptr(C.asyscall(...))
+ e1 = syscall.GetErrno()
+ return
+}
+*/
+
+package main
+
+import (
+ "bufio"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "regexp"
+ "strings"
+)
+
+var (
+ b32 = flag.Bool("b32", false, "32bit big-endian")
+ l32 = flag.Bool("l32", false, "32bit little-endian")
+ aix = flag.Bool("aix", false, "aix")
+ tags = flag.String("tags", "", "build tags")
+)
+
+// cmdLine returns this programs's commandline arguments
+func cmdLine() string {
+ return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ")
+}
+
+// buildTags returns build tags
+func buildTags() string {
+ return *tags
+}
+
+// Param is function parameter
+type Param struct {
+ Name string
+ Type string
+}
+
+// usage prints the program usage
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n")
+ os.Exit(1)
+}
+
+// parseParamList parses parameter list and returns a slice of parameters
+func parseParamList(list string) []string {
+ list = strings.TrimSpace(list)
+ if list == "" {
+ return []string{}
+ }
+ return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
+}
+
+// parseParam splits a parameter into name and type
+func parseParam(p string) Param {
+ ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
+ if ps == nil {
+ fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
+ os.Exit(1)
+ }
+ return Param{ps[1], ps[2]}
+}
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+ if len(flag.Args()) <= 0 {
+ fmt.Fprintf(os.Stderr, "no files to parse provided\n")
+ usage()
+ }
+
+ endianness := ""
+ if *b32 {
+ endianness = "big-endian"
+ } else if *l32 {
+ endianness = "little-endian"
+ }
+
+ pack := ""
+ // GCCGO
+ textgccgo := ""
+ cExtern := "/*\n#include <stdint.h>\n"
+ // GC
+ textgc := ""
+ dynimports := ""
+ linknames := ""
+ var vars []string
+ // COMMON
+ textcommon := ""
+ for _, path := range flag.Args() {
+ file, err := os.Open(path)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+ s := bufio.NewScanner(file)
+ for s.Scan() {
+ t := s.Text()
+ t = strings.TrimSpace(t)
+ t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
+ if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
+ pack = p[1]
+ }
+ nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
+ if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
+ continue
+ }
+
+ // Line must be of the form
+ // func Open(path string, mode int, perm int) (fd int, err error)
+ // Split into name, in params, out params.
+ f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
+ if f == nil {
+ fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
+ os.Exit(1)
+ }
+ funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
+
+ // Split argument lists on comma.
+ in := parseParamList(inps)
+ out := parseParamList(outps)
+
+ inps = strings.Join(in, ", ")
+ outps = strings.Join(out, ", ")
+
+ if sysname == "" {
+ sysname = funct
+ }
+
+ onlyCommon := false
+ if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" {
+ // This function call another syscall which is already implemented.
+ // Therefore, the gc and gccgo part must not be generated.
+ onlyCommon = true
+ }
+
+ // Try in vain to keep people from editing this file.
+ // The theory is that they jump into the middle of the file
+ // without reading the header.
+
+ textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
+ if !onlyCommon {
+ textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
+ textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
+ }
+
+ // Check if value return, err return available
+ errvar := ""
+ rettype := ""
+ for _, param := range out {
+ p := parseParam(param)
+ if p.Type == "error" {
+ errvar = p.Name
+ } else {
+ rettype = p.Type
+ }
+ }
+
+ sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
+ sysname = strings.ToLower(sysname) // All libc functions are lowercase.
+
+ // GCCGO Prototype return type
+ cRettype := ""
+ if rettype == "unsafe.Pointer" {
+ cRettype = "uintptr_t"
+ } else if rettype == "uintptr" {
+ cRettype = "uintptr_t"
+ } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil {
+ cRettype = "uintptr_t"
+ } else if rettype == "int" {
+ cRettype = "int"
+ } else if rettype == "int32" {
+ cRettype = "int"
+ } else if rettype == "int64" {
+ cRettype = "long long"
+ } else if rettype == "uint32" {
+ cRettype = "unsigned int"
+ } else if rettype == "uint64" {
+ cRettype = "unsigned long long"
+ } else {
+ cRettype = "int"
+ }
+ if sysname == "exit" {
+ cRettype = "void"
+ }
+
+ // GCCGO Prototype arguments type
+ var cIn []string
+ for i, param := range in {
+ p := parseParam(param)
+ if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
+ cIn = append(cIn, "uintptr_t")
+ } else if p.Type == "string" {
+ cIn = append(cIn, "uintptr_t")
+ } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
+ cIn = append(cIn, "uintptr_t", "size_t")
+ } else if p.Type == "unsafe.Pointer" {
+ cIn = append(cIn, "uintptr_t")
+ } else if p.Type == "uintptr" {
+ cIn = append(cIn, "uintptr_t")
+ } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
+ cIn = append(cIn, "uintptr_t")
+ } else if p.Type == "int" {
+ if (i == 0 || i == 2) && funct == "fcntl" {
+ // These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock
+ cIn = append(cIn, "uintptr_t")
+ } else {
+ cIn = append(cIn, "int")
+ }
+
+ } else if p.Type == "int32" {
+ cIn = append(cIn, "int")
+ } else if p.Type == "int64" {
+ cIn = append(cIn, "long long")
+ } else if p.Type == "uint32" {
+ cIn = append(cIn, "unsigned int")
+ } else if p.Type == "uint64" {
+ cIn = append(cIn, "unsigned long long")
+ } else {
+ cIn = append(cIn, "int")
+ }
+ }
+
+ if !onlyCommon {
+ // GCCGO Prototype Generation
+ // Imports of system calls from libc
+ if sysname == "select" {
+ // select is a keyword of Go. Its name is
+ // changed to c_select.
+ cExtern += "#define c_select select\n"
+ }
+ cExtern += fmt.Sprintf("%s %s", cRettype, sysname)
+ cIn := strings.Join(cIn, ", ")
+ cExtern += fmt.Sprintf("(%s);\n", cIn)
+ }
+ // GC Library name
+ if modname == "" {
+ modname = "libc.a/shr_64.o"
+ } else {
+ fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct)
+ os.Exit(1)
+ }
+ sysvarname := fmt.Sprintf("libc_%s", sysname)
+
+ if !onlyCommon {
+ // GC Runtime import of function to allow cross-platform builds.
+ dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname)
+ // GC Link symbol to proc address variable.
+ linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname)
+ // GC Library proc address variable.
+ vars = append(vars, sysvarname)
+ }
+
+ strconvfunc := "BytePtrFromString"
+ strconvtype := "*byte"
+
+ // Go function header.
+ if outps != "" {
+ outps = fmt.Sprintf(" (%s)", outps)
+ }
+ if textcommon != "" {
+ textcommon += "\n"
+ }
+
+ textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps)
+
+ // Prepare arguments tocall.
+ var argscommon []string // Arguments in the common part
+ var argscall []string // Arguments for call prototype
+ var argsgc []string // Arguments for gc call (with syscall6)
+ var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall)
+ n := 0
+ argN := 0
+ for _, param := range in {
+ p := parseParam(param)
+ if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
+ argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name))
+ argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
+ argsgc = append(argsgc, p.Name)
+ argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
+ } else if p.Type == "string" && errvar != "" {
+ textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
+ textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
+ textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
+
+ argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
+ argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n))
+ argsgc = append(argsgc, fmt.Sprintf("_p%d", n))
+ argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n))
+ n++
+ } else if p.Type == "string" {
+ fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
+ textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
+ textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
+ textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
+
+ argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
+ argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n))
+ argsgc = append(argsgc, fmt.Sprintf("_p%d", n))
+ argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n))
+ n++
+ } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil {
+ // Convert slice into pointer, length.
+ // Have to be careful not to take address of &a[0] if len == 0:
+ // pass nil in that case.
+ textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1])
+ textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
+ argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name))
+ argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n))
+ argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n))
+ argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n))
+ n++
+ } else if p.Type == "int64" && endianness != "" {
+ fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n")
+ } else if p.Type == "bool" {
+ fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n")
+ } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" {
+ argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name))
+ argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
+ argsgc = append(argsgc, p.Name)
+ argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
+ } else if p.Type == "int" {
+ if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) {
+ // These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock
+ argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name))
+ argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
+ argsgc = append(argsgc, p.Name)
+ argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
+
+ } else {
+ argscommon = append(argscommon, p.Name)
+ argscall = append(argscall, fmt.Sprintf("%s int", p.Name))
+ argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
+ argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
+ }
+ } else if p.Type == "int32" {
+ argscommon = append(argscommon, p.Name)
+ argscall = append(argscall, fmt.Sprintf("%s int32", p.Name))
+ argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
+ argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
+ } else if p.Type == "int64" {
+ argscommon = append(argscommon, p.Name)
+ argscall = append(argscall, fmt.Sprintf("%s int64", p.Name))
+ argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
+ argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name))
+ } else if p.Type == "uint32" {
+ argscommon = append(argscommon, p.Name)
+ argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name))
+ argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
+ argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name))
+ } else if p.Type == "uint64" {
+ argscommon = append(argscommon, p.Name)
+ argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name))
+ argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
+ argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name))
+ } else if p.Type == "uintptr" {
+ argscommon = append(argscommon, p.Name)
+ argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
+ argsgc = append(argsgc, p.Name)
+ argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
+ } else {
+ argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name))
+ argscall = append(argscall, fmt.Sprintf("%s int", p.Name))
+ argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
+ argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
+ }
+ argN++
+ }
+ nargs := len(argsgc)
+
+ // COMMON function generation
+ argscommonlist := strings.Join(argscommon, ", ")
+ callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist)
+ ret := []string{"_", "_"}
+ body := ""
+ doErrno := false
+ for i := 0; i < len(out); i++ {
+ p := parseParam(out[i])
+ reg := ""
+ if p.Name == "err" {
+ reg = "e1"
+ ret[1] = reg
+ doErrno = true
+ } else {
+ reg = "r0"
+ ret[0] = reg
+ }
+ if p.Type == "bool" {
+ reg = fmt.Sprintf("%s != 0", reg)
+ }
+ if reg != "e1" {
+ body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
+ }
+ }
+ if ret[0] == "_" && ret[1] == "_" {
+ textcommon += fmt.Sprintf("\t%s\n", callcommon)
+ } else {
+ textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon)
+ }
+ textcommon += body
+
+ if doErrno {
+ textcommon += "\tif e1 != 0 {\n"
+ textcommon += "\t\terr = errnoErr(e1)\n"
+ textcommon += "\t}\n"
+ }
+ textcommon += "\treturn\n"
+ textcommon += "}\n"
+
+ if onlyCommon {
+ continue
+ }
+
+ // CALL Prototype
+ callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", "))
+
+ // GC function generation
+ asm := "syscall6"
+ if nonblock != nil {
+ asm = "rawSyscall6"
+ }
+
+ if len(argsgc) <= 6 {
+ for len(argsgc) < 6 {
+ argsgc = append(argsgc, "0")
+ }
+ } else {
+ fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct)
+ os.Exit(1)
+ }
+ argsgclist := strings.Join(argsgc, ", ")
+ callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist)
+
+ textgc += callProto
+ textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc)
+ textgc += "\treturn\n}\n"
+
+ // GCCGO function generation
+ argsgccgolist := strings.Join(argsgccgo, ", ")
+ var callgccgo string
+ if sysname == "select" {
+ // select is a keyword of Go. Its name is
+ // changed to c_select.
+ callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist)
+ } else {
+ callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist)
+ }
+ textgccgo += callProto
+ textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo)
+ textgccgo += "\te1 = syscall.GetErrno()\n"
+ textgccgo += "\treturn\n}\n"
+ }
+ if err := s.Err(); err != nil {
+ fmt.Fprintf(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+ file.Close()
+ }
+ imp := ""
+ if pack != "unix" {
+ imp = "import \"golang.org/x/sys/unix\"\n"
+
+ }
+
+ // Print zsyscall_aix_ppc64.go
+ err := ioutil.WriteFile("zsyscall_aix_ppc64.go",
+ []byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)),
+ 0644)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+
+ // Print zsyscall_aix_ppc64_gc.go
+ vardecls := "\t" + strings.Join(vars, ",\n\t")
+ vardecls += " syscallFunc"
+ err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go",
+ []byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)),
+ 0644)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+
+ // Print zsyscall_aix_ppc64_gccgo.go
+ err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go",
+ []byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)),
+ 0644)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+}
+
+const srcTemplate1 = `// %s
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build %s
+
+package %s
+
+import (
+ "unsafe"
+)
+
+
+%s
+
+%s
+`
+const srcTemplate2 = `// %s
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build %s
+// +build !gccgo
+
+package %s
+
+import (
+ "unsafe"
+)
+%s
+%s
+%s
+type syscallFunc uintptr
+
+var (
+%s
+)
+
+// Implemented in runtime/syscall_aix.go.
+func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
+func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
+
+%s
+`
+const srcTemplate3 = `// %s
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build %s
+// +build gccgo
+
+package %s
+
+%s
+*/
+import "C"
+import (
+ "syscall"
+)
+
+
+%s
+
+%s
+`
diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go b/vendor/golang.org/x/sys/unix/mksyscall_solaris.go
new file mode 100644
index 000000000..3d864738b
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mksyscall_solaris.go
@@ -0,0 +1,335 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+ This program reads a file containing function prototypes
+ (like syscall_solaris.go) and generates system call bodies.
+ The prototypes are marked by lines beginning with "//sys"
+ and read like func declarations if //sys is replaced by func, but:
+ * The parameter lists must give a name for each argument.
+ This includes return parameters.
+ * The parameter lists must give a type for each argument:
+ the (x, y, z int) shorthand is not allowed.
+ * If the return parameter is an error number, it must be named err.
+ * If go func name needs to be different than its libc name,
+ * or the function is not in libc, name could be specified
+ * at the end, after "=" sign, like
+ //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
+*/
+
+package main
+
+import (
+ "bufio"
+ "flag"
+ "fmt"
+ "os"
+ "regexp"
+ "strings"
+)
+
+var (
+ b32 = flag.Bool("b32", false, "32bit big-endian")
+ l32 = flag.Bool("l32", false, "32bit little-endian")
+ tags = flag.String("tags", "", "build tags")
+)
+
+// cmdLine returns this programs's commandline arguments
+func cmdLine() string {
+ return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ")
+}
+
+// buildTags returns build tags
+func buildTags() string {
+ return *tags
+}
+
+// Param is function parameter
+type Param struct {
+ Name string
+ Type string
+}
+
+// usage prints the program usage
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n")
+ os.Exit(1)
+}
+
+// parseParamList parses parameter list and returns a slice of parameters
+func parseParamList(list string) []string {
+ list = strings.TrimSpace(list)
+ if list == "" {
+ return []string{}
+ }
+ return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
+}
+
+// parseParam splits a parameter into name and type
+func parseParam(p string) Param {
+ ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
+ if ps == nil {
+ fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
+ os.Exit(1)
+ }
+ return Param{ps[1], ps[2]}
+}
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+ if len(flag.Args()) <= 0 {
+ fmt.Fprintf(os.Stderr, "no files to parse provided\n")
+ usage()
+ }
+
+ endianness := ""
+ if *b32 {
+ endianness = "big-endian"
+ } else if *l32 {
+ endianness = "little-endian"
+ }
+
+ pack := ""
+ text := ""
+ dynimports := ""
+ linknames := ""
+ var vars []string
+ for _, path := range flag.Args() {
+ file, err := os.Open(path)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+ s := bufio.NewScanner(file)
+ for s.Scan() {
+ t := s.Text()
+ t = strings.TrimSpace(t)
+ t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
+ if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
+ pack = p[1]
+ }
+ nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
+ if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
+ continue
+ }
+
+ // Line must be of the form
+ // func Open(path string, mode int, perm int) (fd int, err error)
+ // Split into name, in params, out params.
+ f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
+ if f == nil {
+ fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
+ os.Exit(1)
+ }
+ funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
+
+ // Split argument lists on comma.
+ in := parseParamList(inps)
+ out := parseParamList(outps)
+
+ inps = strings.Join(in, ", ")
+ outps = strings.Join(out, ", ")
+
+ // Try in vain to keep people from editing this file.
+ // The theory is that they jump into the middle of the file
+ // without reading the header.
+ text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
+
+ // So file name.
+ if modname == "" {
+ modname = "libc"
+ }
+
+ // System call name.
+ if sysname == "" {
+ sysname = funct
+ }
+
+ // System call pointer variable name.
+ sysvarname := fmt.Sprintf("proc%s", sysname)
+
+ strconvfunc := "BytePtrFromString"
+ strconvtype := "*byte"
+
+ sysname = strings.ToLower(sysname) // All libc functions are lowercase.
+
+ // Runtime import of function to allow cross-platform builds.
+ dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname)
+ // Link symbol to proc address variable.
+ linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname)
+ // Library proc address variable.
+ vars = append(vars, sysvarname)
+
+ // Go function header.
+ outlist := strings.Join(out, ", ")
+ if outlist != "" {
+ outlist = fmt.Sprintf(" (%s)", outlist)
+ }
+ if text != "" {
+ text += "\n"
+ }
+ text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist)
+
+ // Check if err return available
+ errvar := ""
+ for _, param := range out {
+ p := parseParam(param)
+ if p.Type == "error" {
+ errvar = p.Name
+ continue
+ }
+ }
+
+ // Prepare arguments to Syscall.
+ var args []string
+ n := 0
+ for _, param := range in {
+ p := parseParam(param)
+ if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
+ args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))")
+ } else if p.Type == "string" && errvar != "" {
+ text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
+ text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
+ text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
+ args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
+ n++
+ } else if p.Type == "string" {
+ fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
+ text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
+ text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name)
+ args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
+ n++
+ } else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil {
+ // Convert slice into pointer, length.
+ // Have to be careful not to take address of &a[0] if len == 0:
+ // pass nil in that case.
+ text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1])
+ text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
+ args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name))
+ n++
+ } else if p.Type == "int64" && endianness != "" {
+ if endianness == "big-endian" {
+ args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
+ } else {
+ args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
+ }
+ } else if p.Type == "bool" {
+ text += fmt.Sprintf("\tvar _p%d uint32\n", n)
+ text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n)
+ args = append(args, fmt.Sprintf("uintptr(_p%d)", n))
+ n++
+ } else {
+ args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
+ }
+ }
+ nargs := len(args)
+
+ // Determine which form to use; pad args with zeros.
+ asm := "sysvicall6"
+ if nonblock != nil {
+ asm = "rawSysvicall6"
+ }
+ if len(args) <= 6 {
+ for len(args) < 6 {
+ args = append(args, "0")
+ }
+ } else {
+ fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path)
+ os.Exit(1)
+ }
+
+ // Actual call.
+ arglist := strings.Join(args, ", ")
+ call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist)
+
+ // Assign return values.
+ body := ""
+ ret := []string{"_", "_", "_"}
+ doErrno := false
+ for i := 0; i < len(out); i++ {
+ p := parseParam(out[i])
+ reg := ""
+ if p.Name == "err" {
+ reg = "e1"
+ ret[2] = reg
+ doErrno = true
+ } else {
+ reg = fmt.Sprintf("r%d", i)
+ ret[i] = reg
+ }
+ if p.Type == "bool" {
+ reg = fmt.Sprintf("%d != 0", reg)
+ }
+ if p.Type == "int64" && endianness != "" {
+ // 64-bit number in r1:r0 or r0:r1.
+ if i+2 > len(out) {
+ fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path)
+ os.Exit(1)
+ }
+ if endianness == "big-endian" {
+ reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1)
+ } else {
+ reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i)
+ }
+ ret[i] = fmt.Sprintf("r%d", i)
+ ret[i+1] = fmt.Sprintf("r%d", i+1)
+ }
+ if reg != "e1" {
+ body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
+ }
+ }
+ if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" {
+ text += fmt.Sprintf("\t%s\n", call)
+ } else {
+ text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call)
+ }
+ text += body
+
+ if doErrno {
+ text += "\tif e1 != 0 {\n"
+ text += "\t\terr = e1\n"
+ text += "\t}\n"
+ }
+ text += "\treturn\n"
+ text += "}\n"
+ }
+ if err := s.Err(); err != nil {
+ fmt.Fprintf(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+ file.Close()
+ }
+ imp := ""
+ if pack != "unix" {
+ imp = "import \"golang.org/x/sys/unix\"\n"
+
+ }
+ vardecls := "\t" + strings.Join(vars, ",\n\t")
+ vardecls += " syscallFunc"
+ fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text)
+}
+
+const srcTemplate = `// %s
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build %s
+
+package %s
+
+import (
+ "syscall"
+ "unsafe"
+)
+%s
+%s
+%s
+var (
+%s
+)
+
+%s
+`
diff --git a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go
new file mode 100644
index 000000000..b6b409909
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go
@@ -0,0 +1,355 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// Parse the header files for OpenBSD and generate a Go usable sysctl MIB.
+//
+// Build a MIB with each entry being an array containing the level, type and
+// a hash that will contain additional entries if the current entry is a node.
+// We then walk this MIB and create a flattened sysctl name to OID hash.
+
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+var (
+ goos, goarch string
+)
+
+// cmdLine returns this programs's commandline arguments.
+func cmdLine() string {
+ return "go run mksysctl_openbsd.go " + strings.Join(os.Args[1:], " ")
+}
+
+// buildTags returns build tags.
+func buildTags() string {
+ return fmt.Sprintf("%s,%s", goarch, goos)
+}
+
+// reMatch performs regular expression match and stores the substring slice to value pointed by m.
+func reMatch(re *regexp.Regexp, str string, m *[]string) bool {
+ *m = re.FindStringSubmatch(str)
+ if *m != nil {
+ return true
+ }
+ return false
+}
+
+type nodeElement struct {
+ n int
+ t string
+ pE *map[string]nodeElement
+}
+
+var (
+ debugEnabled bool
+ mib map[string]nodeElement
+ node *map[string]nodeElement
+ nodeMap map[string]string
+ sysCtl []string
+)
+
+var (
+ ctlNames1RE = regexp.MustCompile(`^#define\s+(CTL_NAMES)\s+{`)
+ ctlNames2RE = regexp.MustCompile(`^#define\s+(CTL_(.*)_NAMES)\s+{`)
+ ctlNames3RE = regexp.MustCompile(`^#define\s+((.*)CTL_NAMES)\s+{`)
+ netInetRE = regexp.MustCompile(`^netinet/`)
+ netInet6RE = regexp.MustCompile(`^netinet6/`)
+ netRE = regexp.MustCompile(`^net/`)
+ bracesRE = regexp.MustCompile(`{.*}`)
+ ctlTypeRE = regexp.MustCompile(`{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}`)
+ fsNetKernRE = regexp.MustCompile(`^(fs|net|kern)_`)
+)
+
+func debug(s string) {
+ if debugEnabled {
+ fmt.Fprintln(os.Stderr, s)
+ }
+}
+
+// Walk the MIB and build a sysctl name to OID mapping.
+func buildSysctl(pNode *map[string]nodeElement, name string, oid []int) {
+ lNode := pNode // local copy of pointer to node
+ var keys []string
+ for k := range *lNode {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, key := range keys {
+ nodename := name
+ if name != "" {
+ nodename += "."
+ }
+ nodename += key
+
+ nodeoid := append(oid, (*pNode)[key].n)
+
+ if (*pNode)[key].t == `CTLTYPE_NODE` {
+ if _, ok := nodeMap[nodename]; ok {
+ lNode = &mib
+ ctlName := nodeMap[nodename]
+ for _, part := range strings.Split(ctlName, ".") {
+ lNode = ((*lNode)[part]).pE
+ }
+ } else {
+ lNode = (*pNode)[key].pE
+ }
+ buildSysctl(lNode, nodename, nodeoid)
+ } else if (*pNode)[key].t != "" {
+ oidStr := []string{}
+ for j := range nodeoid {
+ oidStr = append(oidStr, fmt.Sprintf("%d", nodeoid[j]))
+ }
+ text := "\t{ \"" + nodename + "\", []_C_int{ " + strings.Join(oidStr, ", ") + " } }, \n"
+ sysCtl = append(sysCtl, text)
+ }
+ }
+}
+
+func main() {
+ // Get the OS (using GOOS_TARGET if it exist)
+ goos = os.Getenv("GOOS_TARGET")
+ if goos == "" {
+ goos = os.Getenv("GOOS")
+ }
+ // Get the architecture (using GOARCH_TARGET if it exists)
+ goarch = os.Getenv("GOARCH_TARGET")
+ if goarch == "" {
+ goarch = os.Getenv("GOARCH")
+ }
+ // Check if GOOS and GOARCH environment variables are defined
+ if goarch == "" || goos == "" {
+ fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n")
+ os.Exit(1)
+ }
+
+ mib = make(map[string]nodeElement)
+ headers := [...]string{
+ `sys/sysctl.h`,
+ `sys/socket.h`,
+ `sys/tty.h`,
+ `sys/malloc.h`,
+ `sys/mount.h`,
+ `sys/namei.h`,
+ `sys/sem.h`,
+ `sys/shm.h`,
+ `sys/vmmeter.h`,
+ `uvm/uvmexp.h`,
+ `uvm/uvm_param.h`,
+ `uvm/uvm_swap_encrypt.h`,
+ `ddb/db_var.h`,
+ `net/if.h`,
+ `net/if_pfsync.h`,
+ `net/pipex.h`,
+ `netinet/in.h`,
+ `netinet/icmp_var.h`,
+ `netinet/igmp_var.h`,
+ `netinet/ip_ah.h`,
+ `netinet/ip_carp.h`,
+ `netinet/ip_divert.h`,
+ `netinet/ip_esp.h`,
+ `netinet/ip_ether.h`,
+ `netinet/ip_gre.h`,
+ `netinet/ip_ipcomp.h`,
+ `netinet/ip_ipip.h`,
+ `netinet/pim_var.h`,
+ `netinet/tcp_var.h`,
+ `netinet/udp_var.h`,
+ `netinet6/in6.h`,
+ `netinet6/ip6_divert.h`,
+ `netinet6/pim6_var.h`,
+ `netinet/icmp6.h`,
+ `netmpls/mpls.h`,
+ }
+
+ ctls := [...]string{
+ `kern`,
+ `vm`,
+ `fs`,
+ `net`,
+ //debug /* Special handling required */
+ `hw`,
+ //machdep /* Arch specific */
+ `user`,
+ `ddb`,
+ //vfs /* Special handling required */
+ `fs.posix`,
+ `kern.forkstat`,
+ `kern.intrcnt`,
+ `kern.malloc`,
+ `kern.nchstats`,
+ `kern.seminfo`,
+ `kern.shminfo`,
+ `kern.timecounter`,
+ `kern.tty`,
+ `kern.watchdog`,
+ `net.bpf`,
+ `net.ifq`,
+ `net.inet`,
+ `net.inet.ah`,
+ `net.inet.carp`,
+ `net.inet.divert`,
+ `net.inet.esp`,
+ `net.inet.etherip`,
+ `net.inet.gre`,
+ `net.inet.icmp`,
+ `net.inet.igmp`,
+ `net.inet.ip`,
+ `net.inet.ip.ifq`,
+ `net.inet.ipcomp`,
+ `net.inet.ipip`,
+ `net.inet.mobileip`,
+ `net.inet.pfsync`,
+ `net.inet.pim`,
+ `net.inet.tcp`,
+ `net.inet.udp`,
+ `net.inet6`,
+ `net.inet6.divert`,
+ `net.inet6.ip6`,
+ `net.inet6.icmp6`,
+ `net.inet6.pim6`,
+ `net.inet6.tcp6`,
+ `net.inet6.udp6`,
+ `net.mpls`,
+ `net.mpls.ifq`,
+ `net.key`,
+ `net.pflow`,
+ `net.pfsync`,
+ `net.pipex`,
+ `net.rt`,
+ `vm.swapencrypt`,
+ //vfsgenctl /* Special handling required */
+ }
+
+ // Node name "fixups"
+ ctlMap := map[string]string{
+ "ipproto": "net.inet",
+ "net.inet.ipproto": "net.inet",
+ "net.inet6.ipv6proto": "net.inet6",
+ "net.inet6.ipv6": "net.inet6.ip6",
+ "net.inet.icmpv6": "net.inet6.icmp6",
+ "net.inet6.divert6": "net.inet6.divert",
+ "net.inet6.tcp6": "net.inet.tcp",
+ "net.inet6.udp6": "net.inet.udp",
+ "mpls": "net.mpls",
+ "swpenc": "vm.swapencrypt",
+ }
+
+ // Node mappings
+ nodeMap = map[string]string{
+ "net.inet.ip.ifq": "net.ifq",
+ "net.inet.pfsync": "net.pfsync",
+ "net.mpls.ifq": "net.ifq",
+ }
+
+ mCtls := make(map[string]bool)
+ for _, ctl := range ctls {
+ mCtls[ctl] = true
+ }
+
+ for _, header := range headers {
+ debug("Processing " + header)
+ file, err := os.Open(filepath.Join("/usr/include", header))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ os.Exit(1)
+ }
+ s := bufio.NewScanner(file)
+ for s.Scan() {
+ var sub []string
+ if reMatch(ctlNames1RE, s.Text(), &sub) ||
+ reMatch(ctlNames2RE, s.Text(), &sub) ||
+ reMatch(ctlNames3RE, s.Text(), &sub) {
+ if sub[1] == `CTL_NAMES` {
+ // Top level.
+ node = &mib
+ } else {
+ // Node.
+ nodename := strings.ToLower(sub[2])
+ ctlName := ""
+ if reMatch(netInetRE, header, &sub) {
+ ctlName = "net.inet." + nodename
+ } else if reMatch(netInet6RE, header, &sub) {
+ ctlName = "net.inet6." + nodename
+ } else if reMatch(netRE, header, &sub) {
+ ctlName = "net." + nodename
+ } else {
+ ctlName = nodename
+ ctlName = fsNetKernRE.ReplaceAllString(ctlName, `$1.`)
+ }
+
+ if val, ok := ctlMap[ctlName]; ok {
+ ctlName = val
+ }
+ if _, ok := mCtls[ctlName]; !ok {
+ debug("Ignoring " + ctlName + "...")
+ continue
+ }
+
+ // Walk down from the top of the MIB.
+ node = &mib
+ for _, part := range strings.Split(ctlName, ".") {
+ if _, ok := (*node)[part]; !ok {
+ debug("Missing node " + part)
+ (*node)[part] = nodeElement{n: 0, t: "", pE: &map[string]nodeElement{}}
+ }
+ node = (*node)[part].pE
+ }
+ }
+
+ // Populate current node with entries.
+ i := -1
+ for !strings.HasPrefix(s.Text(), "}") {
+ s.Scan()
+ if reMatch(bracesRE, s.Text(), &sub) {
+ i++
+ }
+ if !reMatch(ctlTypeRE, s.Text(), &sub) {
+ continue
+ }
+ (*node)[sub[1]] = nodeElement{n: i, t: sub[2], pE: &map[string]nodeElement{}}
+ }
+ }
+ }
+ err = s.Err()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ os.Exit(1)
+ }
+ file.Close()
+ }
+ buildSysctl(&mib, "", []int{})
+
+ sort.Strings(sysCtl)
+ text := strings.Join(sysCtl, "")
+
+ fmt.Printf(srcTemplate, cmdLine(), buildTags(), text)
+}
+
+const srcTemplate = `// %s
+// Code generated by the command above; DO NOT EDIT.
+
+// +build %s
+
+package unix
+
+type mibentry struct {
+ ctlname string
+ ctloid []_C_int
+}
+
+var sysctlMib = []mibentry {
+%s
+}
+`
diff --git a/vendor/golang.org/x/sys/unix/mksysnum.go b/vendor/golang.org/x/sys/unix/mksysnum.go
new file mode 100644
index 000000000..baa6ecd85
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mksysnum.go
@@ -0,0 +1,190 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// Generate system call table for DragonFly, NetBSD,
+// FreeBSD, OpenBSD or Darwin from master list
+// (for example, /usr/src/sys/kern/syscalls.master or
+// sys/syscall.h).
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "regexp"
+ "strings"
+)
+
+var (
+ goos, goarch string
+)
+
+// cmdLine returns this programs's commandline arguments
+func cmdLine() string {
+ return "go run mksysnum.go " + strings.Join(os.Args[1:], " ")
+}
+
+// buildTags returns build tags
+func buildTags() string {
+ return fmt.Sprintf("%s,%s", goarch, goos)
+}
+
+func checkErr(err error) {
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ os.Exit(1)
+ }
+}
+
+// source string and substring slice for regexp
+type re struct {
+ str string // source string
+ sub []string // matched sub-string
+}
+
+// Match performs regular expression match
+func (r *re) Match(exp string) bool {
+ r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str)
+ if r.sub != nil {
+ return true
+ }
+ return false
+}
+
+// fetchFile fetches a text file from URL
+func fetchFile(URL string) io.Reader {
+ resp, err := http.Get(URL)
+ checkErr(err)
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ checkErr(err)
+ return strings.NewReader(string(body))
+}
+
+// readFile reads a text file from path
+func readFile(path string) io.Reader {
+ file, err := os.Open(os.Args[1])
+ checkErr(err)
+ return file
+}
+
+func format(name, num, proto string) string {
+ name = strings.ToUpper(name)
+ // There are multiple entries for enosys and nosys, so comment them out.
+ nm := re{str: name}
+ if nm.Match(`^SYS_E?NOSYS$`) {
+ name = fmt.Sprintf("// %s", name)
+ }
+ if name == `SYS_SYS_EXIT` {
+ name = `SYS_EXIT`
+ }
+ return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto)
+}
+
+func main() {
+ // Get the OS (using GOOS_TARGET if it exist)
+ goos = os.Getenv("GOOS_TARGET")
+ if goos == "" {
+ goos = os.Getenv("GOOS")
+ }
+ // Get the architecture (using GOARCH_TARGET if it exists)
+ goarch = os.Getenv("GOARCH_TARGET")
+ if goarch == "" {
+ goarch = os.Getenv("GOARCH")
+ }
+ // Check if GOOS and GOARCH environment variables are defined
+ if goarch == "" || goos == "" {
+ fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n")
+ os.Exit(1)
+ }
+
+ file := strings.TrimSpace(os.Args[1])
+ var syscalls io.Reader
+ if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") {
+ // Download syscalls.master file
+ syscalls = fetchFile(file)
+ } else {
+ syscalls = readFile(file)
+ }
+
+ var text, line string
+ s := bufio.NewScanner(syscalls)
+ for s.Scan() {
+ t := re{str: line}
+ if t.Match(`^(.*)\\$`) {
+ // Handle continuation
+ line = t.sub[1]
+ line += strings.TrimLeft(s.Text(), " \t")
+ } else {
+ // New line
+ line = s.Text()
+ }
+ t = re{str: line}
+ if t.Match(`\\$`) {
+ continue
+ }
+ t = re{str: line}
+
+ switch goos {
+ case "dragonfly":
+ if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) {
+ num, proto := t.sub[1], t.sub[2]
+ name := fmt.Sprintf("SYS_%s", t.sub[3])
+ text += format(name, num, proto)
+ }
+ case "freebsd":
+ if t.Match(`^([0-9]+)\s+\S+\s+(?:(?:NO)?STD|COMPAT10)\s+({ \S+\s+(\w+).*)$`) {
+ num, proto := t.sub[1], t.sub[2]
+ name := fmt.Sprintf("SYS_%s", t.sub[3])
+ text += format(name, num, proto)
+ }
+ case "openbsd":
+ if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) {
+ num, proto, name := t.sub[1], t.sub[3], t.sub[4]
+ text += format(name, num, proto)
+ }
+ case "netbsd":
+ if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) {
+ num, proto, compat := t.sub[1], t.sub[6], t.sub[8]
+ name := t.sub[7] + "_" + t.sub[9]
+ if t.sub[11] != "" {
+ name = t.sub[7] + "_" + t.sub[11]
+ }
+ name = strings.ToUpper(name)
+ if compat == "" || compat == "13" || compat == "30" || compat == "50" {
+ text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto)
+ }
+ }
+ case "darwin":
+ if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) {
+ name, num := t.sub[1], t.sub[2]
+ name = strings.ToUpper(name)
+ text += fmt.Sprintf(" SYS_%s = %s;\n", name, num)
+ }
+ default:
+ fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos)
+ os.Exit(1)
+
+ }
+ }
+ err := s.Err()
+ checkErr(err)
+
+ fmt.Printf(template, cmdLine(), buildTags(), text)
+}
+
+const template = `// %s
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build %s
+
+package unix
+
+const(
+%s)`
diff --git a/vendor/golang.org/x/sys/unix/openbsd_pledge.go b/vendor/golang.org/x/sys/unix/pledge_openbsd.go
index 230a36d24..eb48294b2 100644
--- a/vendor/golang.org/x/sys/unix/openbsd_pledge.go
+++ b/vendor/golang.org/x/sys/unix/pledge_openbsd.go
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build openbsd
-// +build 386 amd64 arm
-
package unix
import (
diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdents.go b/vendor/golang.org/x/sys/unix/readdirent_getdents.go
new file mode 100644
index 000000000..3a90aa6df
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/readdirent_getdents.go
@@ -0,0 +1,12 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix dragonfly freebsd linux netbsd openbsd
+
+package unix
+
+// ReadDirent reads directory entries from fd and writes them into buf.
+func ReadDirent(fd int, buf []byte) (n int, err error) {
+ return Getdents(fd, buf)
+}
diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go
new file mode 100644
index 000000000..5fdae40b3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go
@@ -0,0 +1,19 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin
+
+package unix
+
+import "unsafe"
+
+// ReadDirent reads directory entries from fd and writes them into buf.
+func ReadDirent(fd int, buf []byte) (n int, err error) {
+ // Final argument is (basep *uintptr) and the syscall doesn't take nil.
+ // 64 bits should be enough. (32 bits isn't even on 386). Since the
+ // actual system call is getdirentries64, 64 is a good guess.
+ // TODO(rsc): Can we use a single global basep for all calls?
+ var base = (*uintptr)(unsafe.Pointer(new(uint64)))
+ return Getdirentries(fd, buf, base)
+}
diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go
index 5f9ae233a..062bcabab 100644
--- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go
+++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go
@@ -18,15 +18,18 @@ func cmsgAlignOf(salen int) int {
salign := SizeofPtr
switch runtime.GOOS {
- case "darwin", "dragonfly", "solaris":
- // NOTE: It seems like 64-bit Darwin, DragonFly BSD and
- // Solaris kernels still require 32-bit aligned access to
- // network subsystem.
+ case "aix":
+ // There is no alignment on AIX.
+ salign = 1
+ case "darwin", "dragonfly", "solaris", "illumos":
+ // NOTE: It seems like 64-bit Darwin, DragonFly BSD,
+ // illumos, and Solaris kernels still require 32-bit
+ // aligned access to network subsystem.
if SizeofPtr == 8 {
salign = 4
}
- case "openbsd":
- // OpenBSD armv7 requires 64-bit alignment.
+ case "netbsd", "openbsd":
+ // NetBSD and OpenBSD armv7 require 64-bit alignment.
if runtime.GOARCH == "arm" {
salign = 8
}
diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go
index 0d4b1d7a2..fd4ee8ebe 100644
--- a/vendor/golang.org/x/sys/unix/syscall.go
+++ b/vendor/golang.org/x/sys/unix/syscall.go
@@ -50,5 +50,4 @@ func BytePtrFromString(s string) (*byte, error) {
}
// Single-word zero for use when we need a valid pointer to 0 bytes.
-// See mkunix.pl.
var _zero uintptr
diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go
index 1351a228b..a079243dc 100644
--- a/vendor/golang.org/x/sys/unix/syscall_aix.go
+++ b/vendor/golang.org/x/sys/unix/syscall_aix.go
@@ -227,7 +227,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
// Some versions of AIX have a bug in getsockname (see IV78655).
// We can't rely on sa.Len being set correctly.
- n := SizeofSockaddrUnix - 3 // substract leading Family, Len, terminating NUL.
+ n := SizeofSockaddrUnix - 3 // subtract leading Family, Len, terminating NUL.
for i := 0; i < n; i++ {
if pp.Path[i] == 0 {
n = i
@@ -281,7 +281,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
}
//sys getdirent(fd int, buf []byte) (n int, err error)
-func ReadDirent(fd int, buf []byte) (n int, err error) {
+func Getdents(fd int, buf []byte) (n int, err error) {
return getdirent(fd, buf)
}
@@ -444,8 +444,6 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) {
//sysnb Times(tms *Tms) (ticks uintptr, err error)
//sysnb Umask(mask int) (oldmask int)
//sysnb Uname(buf *Utsname) (err error)
-//TODO umount
-// //sys Unmount(target string, flags int) (err error) = umount
//sys Unlink(path string) (err error)
//sys Unlinkat(dirfd int, path string, flags int) (err error)
//sys Ustat(dev int, ubuf *Ustat_t) (err error)
@@ -456,8 +454,8 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) {
//sys Dup2(oldfd int, newfd int) (err error)
//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = posix_fadvise64
//sys Fchown(fd int, uid int, gid int) (err error)
-//sys Fstat(fd int, stat *Stat_t) (err error)
-//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = fstatat
+//sys fstat(fd int, stat *Stat_t) (err error)
+//sys fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = fstatat
//sys Fstatfs(fd int, buf *Statfs_t) (err error)
//sys Ftruncate(fd int, length int64) (err error)
//sysnb Getegid() (egid int)
@@ -466,18 +464,17 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) {
//sysnb Getuid() (uid int)
//sys Lchown(path string, uid int, gid int) (err error)
//sys Listen(s int, n int) (err error)
-//sys Lstat(path string, stat *Stat_t) (err error)
+//sys lstat(path string, stat *Stat_t) (err error)
//sys Pause() (err error)
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = pread64
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = pwrite64
-//TODO Select
-// //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
+//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
//sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error)
//sysnb Setregid(rgid int, egid int) (err error)
//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
-//sys Stat(path string, stat *Stat_t) (err error)
+//sys stat(path string, statptr *Stat_t) (err error)
//sys Statfs(path string, buf *Statfs_t) (err error)
//sys Truncate(path string, length int64) (err error)
@@ -493,8 +490,10 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) {
//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
-//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
-//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
+
+// In order to use msghdr structure with Control, Controllen, nrecvmsg and nsendmsg must be used.
+//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = nrecvmsg
+//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = nsendmsg
//sys munmap(addr uintptr, length uintptr) (err error)
@@ -545,3 +544,14 @@ func Poll(fds []PollFd, timeout int) (n int, err error) {
//sys gettimeofday(tv *Timeval, tzp *Timezone) (err error)
//sysnb Time(t *Time_t) (tt Time_t, err error)
//sys Utime(path string, buf *Utimbuf) (err error)
+
+//sys Getsystemcfg(label int) (n uint64)
+
+//sys umount(target string) (err error)
+func Unmount(target string, flags int) (err error) {
+ if flags != 0 {
+ // AIX doesn't have any flags for umount.
+ return ENOSYS
+ }
+ return umount(target)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go
index c28af1f86..bf05603f1 100644
--- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go
+++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go
@@ -32,3 +32,19 @@ func (msghdr *Msghdr) SetControllen(length int) {
func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint32(length)
}
+
+func Fstat(fd int, stat *Stat_t) error {
+ return fstat(fd, stat)
+}
+
+func Fstatat(dirfd int, path string, stat *Stat_t, flags int) error {
+ return fstatat(dirfd, path, stat, flags)
+}
+
+func Lstat(path string, stat *Stat_t) error {
+ return lstat(path, stat)
+}
+
+func Stat(path string, statptr *Stat_t) error {
+ return stat(path, statptr)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go
index 881cacc6c..13d4321f4 100644
--- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go
@@ -32,3 +32,50 @@ func (msghdr *Msghdr) SetControllen(length int) {
func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint32(length)
}
+
+// In order to only have Timespec structure, type of Stat_t's fields
+// Atim, Mtim and Ctim is changed from StTimespec to Timespec during
+// ztypes generation.
+// On ppc64, Timespec.Nsec is an int64 while StTimespec.Nsec is an
+// int32, so the fields' value must be modified.
+func fixStatTimFields(stat *Stat_t) {
+ stat.Atim.Nsec >>= 32
+ stat.Mtim.Nsec >>= 32
+ stat.Ctim.Nsec >>= 32
+}
+
+func Fstat(fd int, stat *Stat_t) error {
+ err := fstat(fd, stat)
+ if err != nil {
+ return err
+ }
+ fixStatTimFields(stat)
+ return nil
+}
+
+func Fstatat(dirfd int, path string, stat *Stat_t, flags int) error {
+ err := fstatat(dirfd, path, stat, flags)
+ if err != nil {
+ return err
+ }
+ fixStatTimFields(stat)
+ return nil
+}
+
+func Lstat(path string, stat *Stat_t) error {
+ err := lstat(path, stat)
+ if err != nil {
+ return err
+ }
+ fixStatTimFields(stat)
+ return nil
+}
+
+func Stat(path string, statptr *Stat_t) error {
+ err := stat(path, statptr)
+ if err != nil {
+ return err
+ }
+ fixStatTimFields(statptr)
+ return nil
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go
index 33c8b5f0d..97a8eef6f 100644
--- a/vendor/golang.org/x/sys/unix/syscall_bsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go
@@ -63,15 +63,6 @@ func Setgroups(gids []int) (err error) {
return setgroups(len(a), &a[0])
}
-func ReadDirent(fd int, buf []byte) (n int, err error) {
- // Final argument is (basep *uintptr) and the syscall doesn't take nil.
- // 64 bits should be enough. (32 bits isn't even on 386). Since the
- // actual system call is getdirentries64, 64 is a good guess.
- // TODO(rsc): Can we use a single global basep for all calls?
- var base = (*uintptr)(unsafe.Pointer(new(uint64)))
- return Getdirentries(fd, buf, base)
-}
-
// Wait status is 7 bits at bottom, either 0 (exited),
// 0x7F (stopped), or a signal number that caused an exit.
// The 0x80 bit is whether there was a core dump.
@@ -86,6 +77,7 @@ const (
shift = 8
exited = 0
+ killed = 9
stopped = 0x7F
)
@@ -112,6 +104,8 @@ func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 }
func (w WaitStatus) Stopped() bool { return w&mask == stopped && syscall.Signal(w>>shift) != SIGSTOP }
+func (w WaitStatus) Killed() bool { return w&mask == killed && syscall.Signal(w>>shift) != SIGKILL }
+
func (w WaitStatus) Continued() bool { return w&mask == stopped && syscall.Signal(w>>shift) == SIGSTOP }
func (w WaitStatus) StopSignal() syscall.Signal {
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index 04042e44b..212009189 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -144,6 +144,23 @@ func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) (
//sys getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error)
+func SysctlClockinfo(name string) (*Clockinfo, error) {
+ mib, err := sysctlmib(name)
+ if err != nil {
+ return nil, err
+ }
+
+ n := uintptr(SizeofClockinfo)
+ var ci Clockinfo
+ if err := sysctl(mib, (*byte)(unsafe.Pointer(&ci)), &n, nil, 0); err != nil {
+ return nil, err
+ }
+ if n != SizeofClockinfo {
+ return nil, EIO
+ }
+ return &ci, nil
+}
+
//sysnb pipe() (r int, w int, err error)
func Pipe(p []int) (err error) {
@@ -416,6 +433,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys Chmod(path string, mode uint32) (err error)
//sys Chown(path string, uid int, gid int) (err error)
//sys Chroot(path string) (err error)
+//sys ClockGettime(clockid int32, time *Timespec) (err error)
//sys Close(fd int) (err error)
//sys Dup(fd int) (nfd int, err error)
//sys Dup2(from int, to int) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
index 891c94d7e..bf537011f 100644
--- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
+++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
@@ -269,6 +269,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys Fstatfs(fd int, stat *Statfs_t) (err error)
//sys Fsync(fd int) (err error)
//sys Ftruncate(fd int, length int64) (err error)
+//sys Getdents(fd int, buf []byte) (n int, err error)
//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error)
//sys Getdtablesize() (size int)
//sysnb Getegid() (egid int)
@@ -304,6 +305,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys read(fd int, p []byte) (n int, err error)
//sys Readlink(path string, buf []byte) (n int, err error)
//sys Rename(from string, to string) (err error)
+//sys Renameat(fromfd int, from string, tofd int, to string) (err error)
//sys Revoke(path string) (err error)
//sys Rmdir(path string) (err error)
//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go
index a7ca1ebea..c9c802df0 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go
@@ -362,7 +362,21 @@ func Getdents(fd int, buf []byte) (n int, err error) {
func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
if supportsABI(_ino64First) {
- return getdirentries_freebsd12(fd, buf, basep)
+ if basep == nil || unsafe.Sizeof(*basep) == 8 {
+ return getdirentries_freebsd12(fd, buf, (*uint64)(unsafe.Pointer(basep)))
+ }
+ // The freebsd12 syscall needs a 64-bit base. On 32-bit machines
+ // we can't just use the basep passed in. See #32498.
+ var base uint64 = uint64(*basep)
+ n, err = getdirentries_freebsd12(fd, buf, &base)
+ *basep = uintptr(base)
+ if base>>32 != 0 {
+ // We can't stuff the base back into a uintptr, so any
+ // future calls would be suspect. Generate an error.
+ // EIO is allowed by getdirentries.
+ err = EIO
+ }
+ return
}
// The old syscall entries are smaller than the new. Use 1/4 of the original
@@ -404,22 +418,22 @@ func roundup(x, y int) int {
func (s *Stat_t) convertFrom(old *stat_freebsd11_t) {
*s = Stat_t{
- Dev: uint64(old.Dev),
- Ino: uint64(old.Ino),
- Nlink: uint64(old.Nlink),
- Mode: old.Mode,
- Uid: old.Uid,
- Gid: old.Gid,
- Rdev: uint64(old.Rdev),
- Atim: old.Atim,
- Mtim: old.Mtim,
- Ctim: old.Ctim,
- Birthtim: old.Birthtim,
- Size: old.Size,
- Blocks: old.Blocks,
- Blksize: old.Blksize,
- Flags: old.Flags,
- Gen: uint64(old.Gen),
+ Dev: uint64(old.Dev),
+ Ino: uint64(old.Ino),
+ Nlink: uint64(old.Nlink),
+ Mode: old.Mode,
+ Uid: old.Uid,
+ Gid: old.Gid,
+ Rdev: uint64(old.Rdev),
+ Atim: old.Atim,
+ Mtim: old.Mtim,
+ Ctim: old.Ctim,
+ Btim: old.Btim,
+ Size: old.Size,
+ Blocks: old.Blocks,
+ Blksize: old.Blksize,
+ Flags: old.Flags,
+ Gen: uint64(old.Gen),
}
}
@@ -507,6 +521,70 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
return sendfile(outfd, infd, offset, count)
}
+//sys ptrace(request int, pid int, addr uintptr, data int) (err error)
+
+func PtraceAttach(pid int) (err error) {
+ return ptrace(PTRACE_ATTACH, pid, 0, 0)
+}
+
+func PtraceCont(pid int, signal int) (err error) {
+ return ptrace(PTRACE_CONT, pid, 1, signal)
+}
+
+func PtraceDetach(pid int) (err error) {
+ return ptrace(PTRACE_DETACH, pid, 1, 0)
+}
+
+func PtraceGetFpRegs(pid int, fpregsout *FpReg) (err error) {
+ return ptrace(PTRACE_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0)
+}
+
+func PtraceGetFsBase(pid int, fsbase *int64) (err error) {
+ return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0)
+}
+
+func PtraceGetRegs(pid int, regsout *Reg) (err error) {
+ return ptrace(PTRACE_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0)
+}
+
+func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
+ ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint(countin)}
+ err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
+ return int(ioDesc.Len), err
+}
+
+func PtraceLwpEvents(pid int, enable int) (err error) {
+ return ptrace(PTRACE_LWPEVENTS, pid, 0, enable)
+}
+
+func PtraceLwpInfo(pid int, info uintptr) (err error) {
+ return ptrace(PTRACE_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{})))
+}
+
+func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) {
+ return PtraceIO(PIOD_READ_D, pid, addr, out, SizeofLong)
+}
+
+func PtracePeekText(pid int, addr uintptr, out []byte) (count int, err error) {
+ return PtraceIO(PIOD_READ_I, pid, addr, out, SizeofLong)
+}
+
+func PtracePokeData(pid int, addr uintptr, data []byte) (count int, err error) {
+ return PtraceIO(PIOD_WRITE_D, pid, addr, data, SizeofLong)
+}
+
+func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) {
+ return PtraceIO(PIOD_WRITE_I, pid, addr, data, SizeofLong)
+}
+
+func PtraceSetRegs(pid int, regs *Reg) (err error) {
+ return ptrace(PTRACE_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0)
+}
+
+func PtraceSingleStep(pid int) (err error) {
+ return ptrace(PTRACE_SINGLESTEP, pid, 1, 0)
+}
+
/*
* Exposed directly
*/
@@ -555,7 +633,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys Fsync(fd int) (err error)
//sys Ftruncate(fd int, length int64) (err error)
//sys getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error)
-//sys getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error)
+//sys getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error)
//sys Getdtablesize() (size int)
//sysnb Getegid() (egid int)
//sysnb Geteuid() (uid int)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 776040233..11d07ace2 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -13,7 +13,7 @@ package unix
import (
"encoding/binary"
- "net"
+ "runtime"
"syscall"
"unsafe"
)
@@ -38,6 +38,20 @@ func Creat(path string, mode uint32) (fd int, err error) {
return Open(path, O_CREAT|O_WRONLY|O_TRUNC, mode)
}
+//sys FanotifyInit(flags uint, event_f_flags uint) (fd int, err error)
+//sys fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error)
+
+func FanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname string) (err error) {
+ if pathname == "" {
+ return fanotifyMark(fd, flags, mask, dirFd, nil)
+ }
+ p, err := BytePtrFromString(pathname)
+ if err != nil {
+ return err
+ }
+ return fanotifyMark(fd, flags, mask, dirFd, p)
+}
+
//sys fchmodat(dirfd int, path string, mode uint32) (err error)
func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
@@ -80,6 +94,12 @@ func ioctlSetTermios(fd int, req uint, value *Termios) error {
return ioctl(fd, req, uintptr(unsafe.Pointer(value)))
}
+func IoctlSetRTCTime(fd int, value *RTCTime) error {
+ err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value)))
+ runtime.KeepAlive(value)
+ return err
+}
+
// IoctlGetInt performs an ioctl operation which gets an integer value
// from fd, using the specified request number.
func IoctlGetInt(fd int, req uint) (int, error) {
@@ -88,6 +108,12 @@ func IoctlGetInt(fd int, req uint) (int, error) {
return value, err
}
+func IoctlGetUint32(fd int, req uint) (uint32, error) {
+ var value uint32
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return value, err
+}
+
func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
var value Winsize
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
@@ -100,6 +126,12 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) {
return &value, err
}
+func IoctlGetRTCTime(fd int) (*RTCTime, error) {
+ var value RTCTime
+ err := ioctl(fd, RTC_RD_TIME, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
//sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error)
func Link(oldpath string, newpath string) (err error) {
@@ -732,7 +764,7 @@ const px_proto_oe = 0
type SockaddrPPPoE struct {
SID uint16
- Remote net.HardwareAddr
+ Remote []byte
Dev string
raw RawSockaddrPPPoX
}
@@ -883,7 +915,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
}
sa := &SockaddrPPPoE{
SID: binary.BigEndian.Uint16(pp[6:8]),
- Remote: net.HardwareAddr(pp[8:14]),
+ Remote: pp[8:14],
}
for i := 14; i < 14+IFNAMSIZ; i++ {
if pp[i] == 0 {
@@ -977,10 +1009,50 @@ func GetsockoptString(fd, level, opt int) (string, error) {
return string(buf[:vallen-1]), nil
}
+func GetsockoptTpacketStats(fd, level, opt int) (*TpacketStats, error) {
+ var value TpacketStats
+ vallen := _Socklen(SizeofTpacketStats)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptTpacketStatsV3(fd, level, opt int) (*TpacketStatsV3, error) {
+ var value TpacketStatsV3
+ vallen := _Socklen(SizeofTpacketStatsV3)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) {
return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq))
}
+func SetsockoptPacketMreq(fd, level, opt int, mreq *PacketMreq) error {
+ return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq))
+}
+
+// SetsockoptSockFprog attaches a classic BPF or an extended BPF program to a
+// socket to filter incoming packets. See 'man 7 socket' for usage information.
+func SetsockoptSockFprog(fd, level, opt int, fprog *SockFprog) error {
+ return setsockopt(fd, level, opt, unsafe.Pointer(fprog), unsafe.Sizeof(*fprog))
+}
+
+func SetsockoptCanRawFilter(fd, level, opt int, filter []CanFilter) error {
+ var p unsafe.Pointer
+ if len(filter) > 0 {
+ p = unsafe.Pointer(&filter[0])
+ }
+ return setsockopt(fd, level, opt, p, uintptr(len(filter)*SizeofCanFilter))
+}
+
+func SetsockoptTpacketReq(fd, level, opt int, tp *TpacketReq) error {
+ return setsockopt(fd, level, opt, unsafe.Pointer(tp), unsafe.Sizeof(*tp))
+}
+
+func SetsockoptTpacketReq3(fd, level, opt int, tp *TpacketReq3) error {
+ return setsockopt(fd, level, opt, unsafe.Pointer(tp), unsafe.Sizeof(*tp))
+}
+
// Keyctl Commands (http://man7.org/linux/man-pages/man2/keyctl.2.html)
// KeyctlInt calls keyctl commands in which each argument is an int.
@@ -1341,10 +1413,6 @@ func Reboot(cmd int) (err error) {
return reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, "")
}
-func ReadDirent(fd int, buf []byte) (n int, err error) {
- return Getdents(fd, buf)
-}
-
//sys mount(source string, target string, fstype string, flags uintptr, data *byte) (err error)
func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
@@ -1377,10 +1445,13 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys Acct(path string) (err error)
//sys AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error)
//sys Adjtimex(buf *Timex) (state int, err error)
+//sys Capget(hdr *CapUserHeader, data *CapUserData) (err error)
+//sys Capset(hdr *CapUserHeader, data *CapUserData) (err error)
//sys Chdir(path string) (err error)
//sys Chroot(path string) (err error)
//sys ClockGetres(clockid int32, res *Timespec) (err error)
//sys ClockGettime(clockid int32, time *Timespec) (err error)
+//sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error)
//sys Close(fd int) (err error)
//sys CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
//sys DeleteModule(name string, flags int) (err error)
@@ -1441,7 +1512,6 @@ func Getpgrp() (pid int) {
//sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6
//sys read(fd int, p []byte) (n int, err error)
//sys Removexattr(path string, attr string) (err error)
-//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
//sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error)
//sys RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error)
//sys Setdomainname(p []byte) (err error)
@@ -1464,8 +1534,13 @@ func Setgid(uid int) (err error) {
return EOPNOTSUPP
}
+func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) {
+ return signalfd(fd, sigmask, _C__NSIG/8, flags)
+}
+
//sys Setpriority(which int, who int, prio int) (err error)
//sys Setxattr(path string, attr string, data []byte, flags int) (err error)
+//sys signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) = SYS_SIGNALFD4
//sys Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error)
//sys Sync()
//sys Syncfs(fd int) (err error)
@@ -1594,6 +1669,82 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
return EACCES
}
+//sys nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) = SYS_NAME_TO_HANDLE_AT
+//sys openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) = SYS_OPEN_BY_HANDLE_AT
+
+// fileHandle is the argument to nameToHandleAt and openByHandleAt. We
+// originally tried to generate it via unix/linux/types.go with "type
+// fileHandle C.struct_file_handle" but that generated empty structs
+// for mips64 and mips64le. Instead, hard code it for now (it's the
+// same everywhere else) until the mips64 generator issue is fixed.
+type fileHandle struct {
+ Bytes uint32
+ Type int32
+}
+
+// FileHandle represents the C struct file_handle used by
+// name_to_handle_at (see NameToHandleAt) and open_by_handle_at (see
+// OpenByHandleAt).
+type FileHandle struct {
+ *fileHandle
+}
+
+// NewFileHandle constructs a FileHandle.
+func NewFileHandle(handleType int32, handle []byte) FileHandle {
+ const hdrSize = unsafe.Sizeof(fileHandle{})
+ buf := make([]byte, hdrSize+uintptr(len(handle)))
+ copy(buf[hdrSize:], handle)
+ fh := (*fileHandle)(unsafe.Pointer(&buf[0]))
+ fh.Type = handleType
+ fh.Bytes = uint32(len(handle))
+ return FileHandle{fh}
+}
+
+func (fh *FileHandle) Size() int { return int(fh.fileHandle.Bytes) }
+func (fh *FileHandle) Type() int32 { return fh.fileHandle.Type }
+func (fh *FileHandle) Bytes() []byte {
+ n := fh.Size()
+ if n == 0 {
+ return nil
+ }
+ return (*[1 << 30]byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&fh.fileHandle.Type)) + 4))[:n:n]
+}
+
+// NameToHandleAt wraps the name_to_handle_at system call; it obtains
+// a handle for a path name.
+func NameToHandleAt(dirfd int, path string, flags int) (handle FileHandle, mountID int, err error) {
+ var mid _C_int
+ // Try first with a small buffer, assuming the handle will
+ // only be 32 bytes.
+ size := uint32(32 + unsafe.Sizeof(fileHandle{}))
+ didResize := false
+ for {
+ buf := make([]byte, size)
+ fh := (*fileHandle)(unsafe.Pointer(&buf[0]))
+ fh.Bytes = size - uint32(unsafe.Sizeof(fileHandle{}))
+ err = nameToHandleAt(dirfd, path, fh, &mid, flags)
+ if err == EOVERFLOW {
+ if didResize {
+ // We shouldn't need to resize more than once
+ return
+ }
+ didResize = true
+ size = fh.Bytes + uint32(unsafe.Sizeof(fileHandle{}))
+ continue
+ }
+ if err != nil {
+ return
+ }
+ return FileHandle{fh}, int(mid), nil
+ }
+}
+
+// OpenByHandleAt wraps the open_by_handle_at system call; it opens a
+// file via a handle as previously returned by NameToHandleAt.
+func OpenByHandleAt(mountFD int, handle FileHandle, flags int) (fd int, err error) {
+ return openByHandleAt(mountFD, handle.fileHandle, flags)
+}
+
/*
* Unimplemented
*/
@@ -1601,8 +1752,6 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
// Alarm
// ArchPrctl
// Brk
-// Capget
-// Capset
// ClockNanosleep
// ClockSettime
// Clone
@@ -1682,7 +1831,6 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
// Shmdt
// Shmget
// Sigaltstack
-// Signalfd
// Swapoff
// Swapon
// Sysfs
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go
index 74bc098ce..e2f8cf6e5 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go
@@ -68,6 +68,7 @@ func Pipe2(p []int, flags int) (err error) {
//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
//sys Setfsgid(gid int) (err error) = SYS_SETFSGID32
//sys Setfsuid(uid int) (err error) = SYS_SETFSUID32
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
index 615f2918a..87a30744d 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
@@ -43,6 +43,7 @@ func Lstat(path string, stat *Stat_t) (err error) {
//sys Pause() (err error)
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
index ad2bd2582..f62679443 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
@@ -19,12 +19,18 @@ func setTimeval(sec, usec int64) Timeval {
return Timeval{Sec: int32(sec), Usec: int32(usec)}
}
+//sysnb pipe(p *[2]_C_int) (err error)
+
func Pipe(p []int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
+ // Try pipe2 first for Android O, then try pipe for kernel 2.6.23.
err = pipe2(&pp, 0)
+ if err == ENOSYS {
+ err = pipe(&pp)
+ }
p[0] = int(pp[0])
p[1] = int(pp[1])
return
@@ -89,6 +95,7 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
//sys Listen(s int, n int) (err error)
//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
//sys Pause() (err error)
+//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
//sys Setfsgid(gid int) (err error) = SYS_SETFSGID32
@@ -265,3 +272,16 @@ func SyncFileRange(fd int, off int64, n int64, flags int) error {
// order of their arguments.
return armSyncFileRange(fd, flags, off, n)
}
+
+//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error)
+
+func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error {
+ cmdlineLen := len(cmdline)
+ if cmdlineLen > 0 {
+ // Account for the additional NULL byte added by
+ // BytePtrFromString in kexecFileLoad. The kexec_file_load
+ // syscall expects a NULL-terminated string.
+ cmdlineLen++
+ }
+ return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
index fa5a9a6f6..cb20b15d5 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
@@ -30,6 +30,7 @@ func EpollCreate(size int) (fd int, err error) {
//sys Listen(s int, n int) (err error)
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
@@ -207,3 +208,16 @@ func Poll(fds []PollFd, timeout int) (n int, err error) {
}
return ppoll(&fds[0], len(fds), ts, nil)
}
+
+//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error)
+
+func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error {
+ cmdlineLen := len(cmdline)
+ if cmdlineLen > 0 {
+ // Account for the additional NULL byte added by
+ // BytePtrFromString in kexecFileLoad. The kexec_file_load
+ // syscall expects a NULL-terminated string.
+ cmdlineLen++
+ }
+ return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
index 18541dc57..b3b21ec1e 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
@@ -24,6 +24,7 @@ package unix
//sys Pause() (err error)
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
index 99e0e999a..5144d4e13 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
@@ -28,6 +28,7 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr,
//sys Listen(s int, n int) (err error)
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
//sys Setfsgid(gid int) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
index 41451854b..0a100b66a 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
@@ -30,6 +30,7 @@ package unix
//sys Pause() (err error)
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
index 44aa1227a..6230f6405 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
@@ -207,3 +207,20 @@ func Poll(fds []PollFd, timeout int) (n int, err error) {
}
return ppoll(&fds[0], len(fds), ts, nil)
}
+
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+ return Renameat2(olddirfd, oldpath, newdirfd, newpath, 0)
+}
+
+//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error)
+
+func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error {
+ cmdlineLen := len(cmdline)
+ if cmdlineLen > 0 {
+ // Account for the additional NULL byte added by
+ // BytePtrFromString in kexecFileLoad. The kexec_file_load
+ // syscall expects a NULL-terminated string.
+ cmdlineLen++
+ }
+ return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
index f52f148f9..f81dbdc9c 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
@@ -30,6 +30,7 @@ import (
//sys Pause() (err error)
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
index 72e64187d..b69565616 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
@@ -26,6 +26,7 @@ package unix
//sys Pause() (err error)
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
index 5240e16e4..45377107a 100644
--- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
@@ -120,9 +120,30 @@ func Pipe(p []int) (err error) {
return
}
-//sys getdents(fd int, buf []byte) (n int, err error)
+//sys Getdents(fd int, buf []byte) (n int, err error)
func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
- return getdents(fd, buf)
+ n, err = Getdents(fd, buf)
+ if err != nil || basep == nil {
+ return
+ }
+
+ var off int64
+ off, err = Seek(fd, 0, 1 /* SEEK_CUR */)
+ if err != nil {
+ *basep = ^uintptr(0)
+ return
+ }
+ *basep = uintptr(off)
+ if unsafe.Sizeof(*basep) == 8 {
+ return
+ }
+ if off>>32 != 0 {
+ // We can't stuff the offset back into a uintptr, so any
+ // future calls would be suspect. Generate an error.
+ // EIO is allowed by getdirentries.
+ err = EIO
+ }
+ return
}
const ImplementsGetwd = true
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go
new file mode 100644
index 000000000..f3434465a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go
@@ -0,0 +1,33 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm64,netbsd
+
+package unix
+
+func setTimespec(sec, nsec int64) Timespec {
+ return Timespec{Sec: sec, Nsec: nsec}
+}
+
+func setTimeval(sec, usec int64) Timeval {
+ return Timeval{Sec: sec, Usec: int32(usec)}
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint64(fd)
+ k.Filter = uint32(mode)
+ k.Flags = uint32(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go
index 687999549..4f34d6d03 100644
--- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go
@@ -43,6 +43,23 @@ func nametomib(name string) (mib []_C_int, err error) {
return nil, EINVAL
}
+func SysctlClockinfo(name string) (*Clockinfo, error) {
+ mib, err := sysctlmib(name)
+ if err != nil {
+ return nil, err
+ }
+
+ n := uintptr(SizeofClockinfo)
+ var ci Clockinfo
+ if err := sysctl(mib, (*byte)(unsafe.Pointer(&ci)), &n, nil, 0); err != nil {
+ return nil, err
+ }
+ if n != SizeofClockinfo {
+ return nil, EIO
+ }
+ return &ci, nil
+}
+
func SysctlUvmexp(name string) (*Uvmexp, error) {
mib, err := sysctlmib(name)
if err != nil {
@@ -72,9 +89,30 @@ func Pipe(p []int) (err error) {
return
}
-//sys getdents(fd int, buf []byte) (n int, err error)
+//sys Getdents(fd int, buf []byte) (n int, err error)
func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
- return getdents(fd, buf)
+ n, err = Getdents(fd, buf)
+ if err != nil || basep == nil {
+ return
+ }
+
+ var off int64
+ off, err = Seek(fd, 0, 1 /* SEEK_CUR */)
+ if err != nil {
+ *basep = ^uintptr(0)
+ return
+ }
+ *basep = uintptr(off)
+ if unsafe.Sizeof(*basep) == 8 {
+ return
+ }
+ if off>>32 != 0 {
+ // We can't stuff the offset back into a uintptr, so any
+ // future calls would be suspect. Generate an error.
+ // EIO was allowed by getdirentries.
+ err = EIO
+ }
+ return
}
const ImplementsGetwd = true
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go
new file mode 100644
index 000000000..0fb39cf5e
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go
@@ -0,0 +1,37 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm64,openbsd
+
+package unix
+
+func setTimespec(sec, nsec int64) Timespec {
+ return Timespec{Sec: sec, Nsec: nsec}
+}
+
+func setTimeval(sec, usec int64) Timeval {
+ return Timeval{Sec: sec, Usec: usec}
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint64(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
+// of openbsd/amd64 the syscall is called sysctl instead of __sysctl.
+const SYS___SYSCTL = SYS_SYSCTL
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
index e47801275..9147ba152 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -189,6 +189,7 @@ func Setgroups(gids []int) (err error) {
return setgroups(len(a), &a[0])
}
+// ReadDirent reads directory entries from fd and writes them into buf.
func ReadDirent(fd int, buf []byte) (n int, err error) {
// Final argument is (basep *uintptr) and the syscall doesn't take nil.
// TODO(rsc): Can we use a single global basep for all calls?
diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go
index 33583a22b..3de37566c 100644
--- a/vendor/golang.org/x/sys/unix/syscall_unix.go
+++ b/vendor/golang.org/x/sys/unix/syscall_unix.go
@@ -28,6 +28,11 @@ var (
errENOENT error = syscall.ENOENT
)
+var (
+ signalNameMapOnce sync.Once
+ signalNameMap map[string]syscall.Signal
+)
+
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
@@ -66,6 +71,19 @@ func SignalName(s syscall.Signal) string {
return ""
}
+// SignalNum returns the syscall.Signal for signal named s,
+// or 0 if a signal with such name is not found.
+// The signal name should start with "SIG".
+func SignalNum(s string) syscall.Signal {
+ signalNameMapOnce.Do(func() {
+ signalNameMap = make(map[string]syscall.Signal)
+ for _, signal := range signalList {
+ signalNameMap[signal.name] = signal.num
+ }
+ })
+ return signalNameMap[s]
+}
+
// clen returns the index of the first NULL byte in n or len(n) if n contains no NULL byte.
func clen(n []byte) int {
i := bytes.IndexByte(n, 0)
@@ -276,6 +294,13 @@ func GetsockoptTimeval(fd, level, opt int) (*Timeval, error) {
return &tv, err
}
+func GetsockoptUint64(fd, level, opt int) (value uint64, err error) {
+ var n uint64
+ vallen := _Socklen(8)
+ err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen)
+ return n, err
+}
+
func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) {
var rsa RawSockaddrAny
var len _Socklen = SizeofSockaddrAny
@@ -326,13 +351,21 @@ func SetsockoptLinger(fd, level, opt int, l *Linger) (err error) {
}
func SetsockoptString(fd, level, opt int, s string) (err error) {
- return setsockopt(fd, level, opt, unsafe.Pointer(&[]byte(s)[0]), uintptr(len(s)))
+ var p unsafe.Pointer
+ if len(s) > 0 {
+ p = unsafe.Pointer(&[]byte(s)[0])
+ }
+ return setsockopt(fd, level, opt, p, uintptr(len(s)))
}
func SetsockoptTimeval(fd, level, opt int, tv *Timeval) (err error) {
return setsockopt(fd, level, opt, unsafe.Pointer(tv), unsafe.Sizeof(*tv))
}
+func SetsockoptUint64(fd, level, opt int, value uint64) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(&value), 8)
+}
+
func Socket(domain, typ, proto int) (fd int, err error) {
if domain == AF_INET6 && SocketDisableIPv6 {
return -1, EAFNOSUPPORT
@@ -377,3 +410,22 @@ func SetNonblock(fd int, nonblocking bool) (err error) {
func Exec(argv0 string, argv []string, envv []string) error {
return syscall.Exec(argv0, argv, envv)
}
+
+// Lutimes sets the access and modification times tv on path. If path refers to
+// a symlink, it is not dereferenced and the timestamps are set on the symlink.
+// If tv is nil, the access and modification times are set to the current time.
+// Otherwise tv must contain exactly 2 elements, with access time as the first
+// element and modification time as the second element.
+func Lutimes(path string, tv []Timeval) error {
+ if tv == nil {
+ return UtimesNanoAt(AT_FDCWD, path, nil, AT_SYMLINK_NOFOLLOW)
+ }
+ if len(tv) != 2 {
+ return EINVAL
+ }
+ ts := []Timespec{
+ NsecToTimespec(TimevalToNsec(tv[0])),
+ NsecToTimespec(TimevalToNsec(tv[1])),
+ }
+ return UtimesNanoAt(AT_FDCWD, path, ts, AT_SYMLINK_NOFOLLOW)
+}
diff --git a/vendor/golang.org/x/sys/unix/types_aix.go b/vendor/golang.org/x/sys/unix/types_aix.go
new file mode 100644
index 000000000..40d2beede
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/types_aix.go
@@ -0,0 +1,237 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+// +build aix
+
+/*
+Input to cgo -godefs. See also mkerrors.sh and mkall.sh
+*/
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package unix
+
+/*
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/limits.h>
+#include <sys/un.h>
+#include <utime.h>
+#include <sys/utsname.h>
+#include <sys/poll.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/statfs.h>
+#include <sys/termio.h>
+#include <sys/ioctl.h>
+
+#include <termios.h>
+
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <netinet/in.h>
+#include <netinet/icmp6.h>
+
+
+#include <dirent.h>
+#include <fcntl.h>
+
+enum {
+ sizeofPtr = sizeof(void*),
+};
+
+union sockaddr_all {
+ struct sockaddr s1; // this one gets used for fields
+ struct sockaddr_in s2; // these pad it out
+ struct sockaddr_in6 s3;
+ struct sockaddr_un s4;
+ struct sockaddr_dl s5;
+};
+
+struct sockaddr_any {
+ struct sockaddr addr;
+ char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
+};
+
+*/
+import "C"
+
+// Machine characteristics
+
+const (
+ SizeofPtr = C.sizeofPtr
+ SizeofShort = C.sizeof_short
+ SizeofInt = C.sizeof_int
+ SizeofLong = C.sizeof_long
+ SizeofLongLong = C.sizeof_longlong
+ PathMax = C.PATH_MAX
+)
+
+// Basic types
+
+type (
+ _C_short C.short
+ _C_int C.int
+ _C_long C.long
+ _C_long_long C.longlong
+)
+
+type off64 C.off64_t
+type off C.off_t
+type Mode_t C.mode_t
+
+// Time
+
+type Timespec C.struct_timespec
+
+type Timeval C.struct_timeval
+
+type Timeval32 C.struct_timeval32
+
+type Timex C.struct_timex
+
+type Time_t C.time_t
+
+type Tms C.struct_tms
+
+type Utimbuf C.struct_utimbuf
+
+type Timezone C.struct_timezone
+
+// Processes
+
+type Rusage C.struct_rusage
+
+type Rlimit C.struct_rlimit64
+
+type Pid_t C.pid_t
+
+type _Gid_t C.gid_t
+
+type dev_t C.dev_t
+
+// Files
+
+type Stat_t C.struct_stat
+
+type StatxTimestamp C.struct_statx_timestamp
+
+type Statx_t C.struct_statx
+
+type Dirent C.struct_dirent
+
+// Sockets
+
+type RawSockaddrInet4 C.struct_sockaddr_in
+
+type RawSockaddrInet6 C.struct_sockaddr_in6
+
+type RawSockaddrUnix C.struct_sockaddr_un
+
+type RawSockaddrDatalink C.struct_sockaddr_dl
+
+type RawSockaddr C.struct_sockaddr
+
+type RawSockaddrAny C.struct_sockaddr_any
+
+type _Socklen C.socklen_t
+
+type Cmsghdr C.struct_cmsghdr
+
+type ICMPv6Filter C.struct_icmp6_filter
+
+type Iovec C.struct_iovec
+
+type IPMreq C.struct_ip_mreq
+
+type IPv6Mreq C.struct_ipv6_mreq
+
+type IPv6MTUInfo C.struct_ip6_mtuinfo
+
+type Linger C.struct_linger
+
+type Msghdr C.struct_msghdr
+
+const (
+ SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
+ SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
+ SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
+ SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
+ SizeofLinger = C.sizeof_struct_linger
+ SizeofIPMreq = C.sizeof_struct_ip_mreq
+ SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+ SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
+ SizeofMsghdr = C.sizeof_struct_msghdr
+ SizeofCmsghdr = C.sizeof_struct_cmsghdr
+ SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+// Routing and interface messages
+
+const (
+ SizeofIfMsghdr = C.sizeof_struct_if_msghdr
+)
+
+type IfMsgHdr C.struct_if_msghdr
+
+// Misc
+
+type FdSet C.fd_set
+
+type Utsname C.struct_utsname
+
+type Ustat_t C.struct_ustat
+
+type Sigset_t C.sigset_t
+
+const (
+ AT_FDCWD = C.AT_FDCWD
+ AT_REMOVEDIR = C.AT_REMOVEDIR
+ AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
+)
+
+// Terminal handling
+
+type Termios C.struct_termios
+
+type Termio C.struct_termio
+
+type Winsize C.struct_winsize
+
+//poll
+
+type PollFd struct {
+ Fd int32
+ Events uint16
+ Revents uint16
+}
+
+const (
+ POLLERR = C.POLLERR
+ POLLHUP = C.POLLHUP
+ POLLIN = C.POLLIN
+ POLLNVAL = C.POLLNVAL
+ POLLOUT = C.POLLOUT
+ POLLPRI = C.POLLPRI
+ POLLRDBAND = C.POLLRDBAND
+ POLLRDNORM = C.POLLRDNORM
+ POLLWRBAND = C.POLLWRBAND
+ POLLWRNORM = C.POLLWRNORM
+)
+
+//flock_t
+
+type Flock_t C.struct_flock64
+
+// Statfs
+
+type Fsid_t C.struct_fsid_t
+type Fsid64_t C.struct_fsid64_t
+
+type Statfs_t C.struct_statfs
+
+const RNDGETENTCNT = 0x80045200
diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go
new file mode 100644
index 000000000..155c2e692
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/types_darwin.go
@@ -0,0 +1,283 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+Input to cgo -godefs. See README.md
+*/
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package unix
+
+/*
+#define __DARWIN_UNIX03 0
+#define KERNEL
+#define _DARWIN_USE_64_BIT_INODE
+#include <dirent.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <signal.h>
+#include <termios.h>
+#include <unistd.h>
+#include <mach/mach.h>
+#include <mach/message.h>
+#include <sys/event.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/param.h>
+#include <sys/ptrace.h>
+#include <sys/resource.h>
+#include <sys/select.h>
+#include <sys/signal.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/uio.h>
+#include <sys/un.h>
+#include <sys/utsname.h>
+#include <sys/wait.h>
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_var.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <netinet/icmp6.h>
+#include <netinet/tcp.h>
+
+enum {
+ sizeofPtr = sizeof(void*),
+};
+
+union sockaddr_all {
+ struct sockaddr s1; // this one gets used for fields
+ struct sockaddr_in s2; // these pad it out
+ struct sockaddr_in6 s3;
+ struct sockaddr_un s4;
+ struct sockaddr_dl s5;
+};
+
+struct sockaddr_any {
+ struct sockaddr addr;
+ char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
+};
+
+*/
+import "C"
+
+// Machine characteristics
+
+const (
+ SizeofPtr = C.sizeofPtr
+ SizeofShort = C.sizeof_short
+ SizeofInt = C.sizeof_int
+ SizeofLong = C.sizeof_long
+ SizeofLongLong = C.sizeof_longlong
+)
+
+// Basic types
+
+type (
+ _C_short C.short
+ _C_int C.int
+ _C_long C.long
+ _C_long_long C.longlong
+)
+
+// Time
+
+type Timespec C.struct_timespec
+
+type Timeval C.struct_timeval
+
+type Timeval32 C.struct_timeval32
+
+// Processes
+
+type Rusage C.struct_rusage
+
+type Rlimit C.struct_rlimit
+
+type _Gid_t C.gid_t
+
+// Files
+
+type Stat_t C.struct_stat64
+
+type Statfs_t C.struct_statfs64
+
+type Flock_t C.struct_flock
+
+type Fstore_t C.struct_fstore
+
+type Radvisory_t C.struct_radvisory
+
+type Fbootstraptransfer_t C.struct_fbootstraptransfer
+
+type Log2phys_t C.struct_log2phys
+
+type Fsid C.struct_fsid
+
+type Dirent C.struct_dirent
+
+// Sockets
+
+type RawSockaddrInet4 C.struct_sockaddr_in
+
+type RawSockaddrInet6 C.struct_sockaddr_in6
+
+type RawSockaddrUnix C.struct_sockaddr_un
+
+type RawSockaddrDatalink C.struct_sockaddr_dl
+
+type RawSockaddr C.struct_sockaddr
+
+type RawSockaddrAny C.struct_sockaddr_any
+
+type _Socklen C.socklen_t
+
+type Linger C.struct_linger
+
+type Iovec C.struct_iovec
+
+type IPMreq C.struct_ip_mreq
+
+type IPv6Mreq C.struct_ipv6_mreq
+
+type Msghdr C.struct_msghdr
+
+type Cmsghdr C.struct_cmsghdr
+
+type Inet4Pktinfo C.struct_in_pktinfo
+
+type Inet6Pktinfo C.struct_in6_pktinfo
+
+type IPv6MTUInfo C.struct_ip6_mtuinfo
+
+type ICMPv6Filter C.struct_icmp6_filter
+
+const (
+ SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
+ SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
+ SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
+ SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
+ SizeofLinger = C.sizeof_struct_linger
+ SizeofIPMreq = C.sizeof_struct_ip_mreq
+ SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+ SizeofMsghdr = C.sizeof_struct_msghdr
+ SizeofCmsghdr = C.sizeof_struct_cmsghdr
+ SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo
+ SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
+ SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
+ SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+// Ptrace requests
+
+const (
+ PTRACE_TRACEME = C.PT_TRACE_ME
+ PTRACE_CONT = C.PT_CONTINUE
+ PTRACE_KILL = C.PT_KILL
+)
+
+// Events (kqueue, kevent)
+
+type Kevent_t C.struct_kevent
+
+// Select
+
+type FdSet C.fd_set
+
+// Routing and interface messages
+
+const (
+ SizeofIfMsghdr = C.sizeof_struct_if_msghdr
+ SizeofIfData = C.sizeof_struct_if_data
+ SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
+ SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
+ SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2
+ SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
+ SizeofRtMetrics = C.sizeof_struct_rt_metrics
+)
+
+type IfMsghdr C.struct_if_msghdr
+
+type IfData C.struct_if_data
+
+type IfaMsghdr C.struct_ifa_msghdr
+
+type IfmaMsghdr C.struct_ifma_msghdr
+
+type IfmaMsghdr2 C.struct_ifma_msghdr2
+
+type RtMsghdr C.struct_rt_msghdr
+
+type RtMetrics C.struct_rt_metrics
+
+// Berkeley packet filter
+
+const (
+ SizeofBpfVersion = C.sizeof_struct_bpf_version
+ SizeofBpfStat = C.sizeof_struct_bpf_stat
+ SizeofBpfProgram = C.sizeof_struct_bpf_program
+ SizeofBpfInsn = C.sizeof_struct_bpf_insn
+ SizeofBpfHdr = C.sizeof_struct_bpf_hdr
+)
+
+type BpfVersion C.struct_bpf_version
+
+type BpfStat C.struct_bpf_stat
+
+type BpfProgram C.struct_bpf_program
+
+type BpfInsn C.struct_bpf_insn
+
+type BpfHdr C.struct_bpf_hdr
+
+// Terminal handling
+
+type Termios C.struct_termios
+
+type Winsize C.struct_winsize
+
+// fchmodat-like syscalls.
+
+const (
+ AT_FDCWD = C.AT_FDCWD
+ AT_REMOVEDIR = C.AT_REMOVEDIR
+ AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
+ AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
+)
+
+// poll
+
+type PollFd C.struct_pollfd
+
+const (
+ POLLERR = C.POLLERR
+ POLLHUP = C.POLLHUP
+ POLLIN = C.POLLIN
+ POLLNVAL = C.POLLNVAL
+ POLLOUT = C.POLLOUT
+ POLLPRI = C.POLLPRI
+ POLLRDBAND = C.POLLRDBAND
+ POLLRDNORM = C.POLLRDNORM
+ POLLWRBAND = C.POLLWRBAND
+ POLLWRNORM = C.POLLWRNORM
+)
+
+// uname
+
+type Utsname C.struct_utsname
+
+// Clockinfo
+
+const SizeofClockinfo = C.sizeof_struct_clockinfo
+
+type Clockinfo C.struct_clockinfo
diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go
new file mode 100644
index 000000000..3365dd79d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/types_dragonfly.go
@@ -0,0 +1,263 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+Input to cgo -godefs. See README.md
+*/
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package unix
+
+/*
+#define KERNEL
+#include <dirent.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <signal.h>
+#include <termios.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/event.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/param.h>
+#include <sys/ptrace.h>
+#include <sys/resource.h>
+#include <sys/select.h>
+#include <sys/signal.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <sys/utsname.h>
+#include <sys/wait.h>
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <netinet/icmp6.h>
+#include <netinet/tcp.h>
+
+enum {
+ sizeofPtr = sizeof(void*),
+};
+
+union sockaddr_all {
+ struct sockaddr s1; // this one gets used for fields
+ struct sockaddr_in s2; // these pad it out
+ struct sockaddr_in6 s3;
+ struct sockaddr_un s4;
+ struct sockaddr_dl s5;
+};
+
+struct sockaddr_any {
+ struct sockaddr addr;
+ char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
+};
+
+*/
+import "C"
+
+// Machine characteristics
+
+const (
+ SizeofPtr = C.sizeofPtr
+ SizeofShort = C.sizeof_short
+ SizeofInt = C.sizeof_int
+ SizeofLong = C.sizeof_long
+ SizeofLongLong = C.sizeof_longlong
+)
+
+// Basic types
+
+type (
+ _C_short C.short
+ _C_int C.int
+ _C_long C.long
+ _C_long_long C.longlong
+)
+
+// Time
+
+type Timespec C.struct_timespec
+
+type Timeval C.struct_timeval
+
+// Processes
+
+type Rusage C.struct_rusage
+
+type Rlimit C.struct_rlimit
+
+type _Gid_t C.gid_t
+
+// Files
+
+type Stat_t C.struct_stat
+
+type Statfs_t C.struct_statfs
+
+type Flock_t C.struct_flock
+
+type Dirent C.struct_dirent
+
+type Fsid C.struct_fsid
+
+// File system limits
+
+const (
+ PathMax = C.PATH_MAX
+)
+
+// Sockets
+
+type RawSockaddrInet4 C.struct_sockaddr_in
+
+type RawSockaddrInet6 C.struct_sockaddr_in6
+
+type RawSockaddrUnix C.struct_sockaddr_un
+
+type RawSockaddrDatalink C.struct_sockaddr_dl
+
+type RawSockaddr C.struct_sockaddr
+
+type RawSockaddrAny C.struct_sockaddr_any
+
+type _Socklen C.socklen_t
+
+type Linger C.struct_linger
+
+type Iovec C.struct_iovec
+
+type IPMreq C.struct_ip_mreq
+
+type IPv6Mreq C.struct_ipv6_mreq
+
+type Msghdr C.struct_msghdr
+
+type Cmsghdr C.struct_cmsghdr
+
+type Inet6Pktinfo C.struct_in6_pktinfo
+
+type IPv6MTUInfo C.struct_ip6_mtuinfo
+
+type ICMPv6Filter C.struct_icmp6_filter
+
+const (
+ SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
+ SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
+ SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
+ SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
+ SizeofLinger = C.sizeof_struct_linger
+ SizeofIPMreq = C.sizeof_struct_ip_mreq
+ SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+ SizeofMsghdr = C.sizeof_struct_msghdr
+ SizeofCmsghdr = C.sizeof_struct_cmsghdr
+ SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
+ SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
+ SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+// Ptrace requests
+
+const (
+ PTRACE_TRACEME = C.PT_TRACE_ME
+ PTRACE_CONT = C.PT_CONTINUE
+ PTRACE_KILL = C.PT_KILL
+)
+
+// Events (kqueue, kevent)
+
+type Kevent_t C.struct_kevent
+
+// Select
+
+type FdSet C.fd_set
+
+// Routing and interface messages
+
+const (
+ SizeofIfMsghdr = C.sizeof_struct_if_msghdr
+ SizeofIfData = C.sizeof_struct_if_data
+ SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
+ SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
+ SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
+ SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
+ SizeofRtMetrics = C.sizeof_struct_rt_metrics
+)
+
+type IfMsghdr C.struct_if_msghdr
+
+type IfData C.struct_if_data
+
+type IfaMsghdr C.struct_ifa_msghdr
+
+type IfmaMsghdr C.struct_ifma_msghdr
+
+type IfAnnounceMsghdr C.struct_if_announcemsghdr
+
+type RtMsghdr C.struct_rt_msghdr
+
+type RtMetrics C.struct_rt_metrics
+
+// Berkeley packet filter
+
+const (
+ SizeofBpfVersion = C.sizeof_struct_bpf_version
+ SizeofBpfStat = C.sizeof_struct_bpf_stat
+ SizeofBpfProgram = C.sizeof_struct_bpf_program
+ SizeofBpfInsn = C.sizeof_struct_bpf_insn
+ SizeofBpfHdr = C.sizeof_struct_bpf_hdr
+)
+
+type BpfVersion C.struct_bpf_version
+
+type BpfStat C.struct_bpf_stat
+
+type BpfProgram C.struct_bpf_program
+
+type BpfInsn C.struct_bpf_insn
+
+type BpfHdr C.struct_bpf_hdr
+
+// Terminal handling
+
+type Termios C.struct_termios
+
+type Winsize C.struct_winsize
+
+// fchmodat-like syscalls.
+
+const (
+ AT_FDCWD = C.AT_FDCWD
+ AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
+)
+
+// poll
+
+type PollFd C.struct_pollfd
+
+const (
+ POLLERR = C.POLLERR
+ POLLHUP = C.POLLHUP
+ POLLIN = C.POLLIN
+ POLLNVAL = C.POLLNVAL
+ POLLOUT = C.POLLOUT
+ POLLPRI = C.POLLPRI
+ POLLRDBAND = C.POLLRDBAND
+ POLLRDNORM = C.POLLRDNORM
+ POLLWRBAND = C.POLLWRBAND
+ POLLWRNORM = C.POLLWRNORM
+)
+
+// Uname
+
+type Utsname C.struct_utsname
diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go
new file mode 100644
index 000000000..a121dc336
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/types_freebsd.go
@@ -0,0 +1,400 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+Input to cgo -godefs. See README.md
+*/
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package unix
+
+/*
+#define _WANT_FREEBSD11_STAT 1
+#define _WANT_FREEBSD11_STATFS 1
+#define _WANT_FREEBSD11_DIRENT 1
+#define _WANT_FREEBSD11_KEVENT 1
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <signal.h>
+#include <termios.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/capsicum.h>
+#include <sys/event.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/param.h>
+#include <sys/ptrace.h>
+#include <sys/resource.h>
+#include <sys/select.h>
+#include <sys/signal.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <sys/utsname.h>
+#include <sys/wait.h>
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <netinet/icmp6.h>
+#include <netinet/tcp.h>
+
+enum {
+ sizeofPtr = sizeof(void*),
+};
+
+union sockaddr_all {
+ struct sockaddr s1; // this one gets used for fields
+ struct sockaddr_in s2; // these pad it out
+ struct sockaddr_in6 s3;
+ struct sockaddr_un s4;
+ struct sockaddr_dl s5;
+};
+
+struct sockaddr_any {
+ struct sockaddr addr;
+ char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
+};
+
+// This structure is a duplicate of if_data on FreeBSD 8-STABLE.
+// See /usr/include/net/if.h.
+struct if_data8 {
+ u_char ifi_type;
+ u_char ifi_physical;
+ u_char ifi_addrlen;
+ u_char ifi_hdrlen;
+ u_char ifi_link_state;
+ u_char ifi_spare_char1;
+ u_char ifi_spare_char2;
+ u_char ifi_datalen;
+ u_long ifi_mtu;
+ u_long ifi_metric;
+ u_long ifi_baudrate;
+ u_long ifi_ipackets;
+ u_long ifi_ierrors;
+ u_long ifi_opackets;
+ u_long ifi_oerrors;
+ u_long ifi_collisions;
+ u_long ifi_ibytes;
+ u_long ifi_obytes;
+ u_long ifi_imcasts;
+ u_long ifi_omcasts;
+ u_long ifi_iqdrops;
+ u_long ifi_noproto;
+ u_long ifi_hwassist;
+// FIXME: these are now unions, so maybe need to change definitions?
+#undef ifi_epoch
+ time_t ifi_epoch;
+#undef ifi_lastchange
+ struct timeval ifi_lastchange;
+};
+
+// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE.
+// See /usr/include/net/if.h.
+struct if_msghdr8 {
+ u_short ifm_msglen;
+ u_char ifm_version;
+ u_char ifm_type;
+ int ifm_addrs;
+ int ifm_flags;
+ u_short ifm_index;
+ struct if_data8 ifm_data;
+};
+*/
+import "C"
+
+// Machine characteristics
+
+const (
+ SizeofPtr = C.sizeofPtr
+ SizeofShort = C.sizeof_short
+ SizeofInt = C.sizeof_int
+ SizeofLong = C.sizeof_long
+ SizeofLongLong = C.sizeof_longlong
+)
+
+// Basic types
+
+type (
+ _C_short C.short
+ _C_int C.int
+ _C_long C.long
+ _C_long_long C.longlong
+)
+
+// Time
+
+type Timespec C.struct_timespec
+
+type Timeval C.struct_timeval
+
+// Processes
+
+type Rusage C.struct_rusage
+
+type Rlimit C.struct_rlimit
+
+type _Gid_t C.gid_t
+
+// Files
+
+const (
+ _statfsVersion = C.STATFS_VERSION
+ _dirblksiz = C.DIRBLKSIZ
+)
+
+type Stat_t C.struct_stat
+
+type stat_freebsd11_t C.struct_freebsd11_stat
+
+type Statfs_t C.struct_statfs
+
+type statfs_freebsd11_t C.struct_freebsd11_statfs
+
+type Flock_t C.struct_flock
+
+type Dirent C.struct_dirent
+
+type dirent_freebsd11 C.struct_freebsd11_dirent
+
+type Fsid C.struct_fsid
+
+// File system limits
+
+const (
+ PathMax = C.PATH_MAX
+)
+
+// Advice to Fadvise
+
+const (
+ FADV_NORMAL = C.POSIX_FADV_NORMAL
+ FADV_RANDOM = C.POSIX_FADV_RANDOM
+ FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL
+ FADV_WILLNEED = C.POSIX_FADV_WILLNEED
+ FADV_DONTNEED = C.POSIX_FADV_DONTNEED
+ FADV_NOREUSE = C.POSIX_FADV_NOREUSE
+)
+
+// Sockets
+
+type RawSockaddrInet4 C.struct_sockaddr_in
+
+type RawSockaddrInet6 C.struct_sockaddr_in6
+
+type RawSockaddrUnix C.struct_sockaddr_un
+
+type RawSockaddrDatalink C.struct_sockaddr_dl
+
+type RawSockaddr C.struct_sockaddr
+
+type RawSockaddrAny C.struct_sockaddr_any
+
+type _Socklen C.socklen_t
+
+type Linger C.struct_linger
+
+type Iovec C.struct_iovec
+
+type IPMreq C.struct_ip_mreq
+
+type IPMreqn C.struct_ip_mreqn
+
+type IPv6Mreq C.struct_ipv6_mreq
+
+type Msghdr C.struct_msghdr
+
+type Cmsghdr C.struct_cmsghdr
+
+type Inet6Pktinfo C.struct_in6_pktinfo
+
+type IPv6MTUInfo C.struct_ip6_mtuinfo
+
+type ICMPv6Filter C.struct_icmp6_filter
+
+const (
+ SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
+ SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
+ SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
+ SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
+ SizeofLinger = C.sizeof_struct_linger
+ SizeofIPMreq = C.sizeof_struct_ip_mreq
+ SizeofIPMreqn = C.sizeof_struct_ip_mreqn
+ SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+ SizeofMsghdr = C.sizeof_struct_msghdr
+ SizeofCmsghdr = C.sizeof_struct_cmsghdr
+ SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
+ SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
+ SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+// Ptrace requests
+
+const (
+ PTRACE_ATTACH = C.PT_ATTACH
+ PTRACE_CONT = C.PT_CONTINUE
+ PTRACE_DETACH = C.PT_DETACH
+ PTRACE_GETFPREGS = C.PT_GETFPREGS
+ PTRACE_GETFSBASE = C.PT_GETFSBASE
+ PTRACE_GETLWPLIST = C.PT_GETLWPLIST
+ PTRACE_GETNUMLWPS = C.PT_GETNUMLWPS
+ PTRACE_GETREGS = C.PT_GETREGS
+ PTRACE_GETXSTATE = C.PT_GETXSTATE
+ PTRACE_IO = C.PT_IO
+ PTRACE_KILL = C.PT_KILL
+ PTRACE_LWPEVENTS = C.PT_LWP_EVENTS
+ PTRACE_LWPINFO = C.PT_LWPINFO
+ PTRACE_SETFPREGS = C.PT_SETFPREGS
+ PTRACE_SETREGS = C.PT_SETREGS
+ PTRACE_SINGLESTEP = C.PT_STEP
+ PTRACE_TRACEME = C.PT_TRACE_ME
+)
+
+const (
+ PIOD_READ_D = C.PIOD_READ_D
+ PIOD_WRITE_D = C.PIOD_WRITE_D
+ PIOD_READ_I = C.PIOD_READ_I
+ PIOD_WRITE_I = C.PIOD_WRITE_I
+)
+
+const (
+ PL_FLAG_BORN = C.PL_FLAG_BORN
+ PL_FLAG_EXITED = C.PL_FLAG_EXITED
+ PL_FLAG_SI = C.PL_FLAG_SI
+)
+
+const (
+ TRAP_BRKPT = C.TRAP_BRKPT
+ TRAP_TRACE = C.TRAP_TRACE
+)
+
+type PtraceLwpInfoStruct C.struct_ptrace_lwpinfo
+
+type __Siginfo C.struct___siginfo
+
+type Sigset_t C.sigset_t
+
+type Reg C.struct_reg
+
+type FpReg C.struct_fpreg
+
+type PtraceIoDesc C.struct_ptrace_io_desc
+
+// Events (kqueue, kevent)
+
+type Kevent_t C.struct_kevent_freebsd11
+
+// Select
+
+type FdSet C.fd_set
+
+// Routing and interface messages
+
+const (
+ sizeofIfMsghdr = C.sizeof_struct_if_msghdr
+ SizeofIfMsghdr = C.sizeof_struct_if_msghdr8
+ sizeofIfData = C.sizeof_struct_if_data
+ SizeofIfData = C.sizeof_struct_if_data8
+ SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
+ SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
+ SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
+ SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
+ SizeofRtMetrics = C.sizeof_struct_rt_metrics
+)
+
+type ifMsghdr C.struct_if_msghdr
+
+type IfMsghdr C.struct_if_msghdr8
+
+type ifData C.struct_if_data
+
+type IfData C.struct_if_data8
+
+type IfaMsghdr C.struct_ifa_msghdr
+
+type IfmaMsghdr C.struct_ifma_msghdr
+
+type IfAnnounceMsghdr C.struct_if_announcemsghdr
+
+type RtMsghdr C.struct_rt_msghdr
+
+type RtMetrics C.struct_rt_metrics
+
+// Berkeley packet filter
+
+const (
+ SizeofBpfVersion = C.sizeof_struct_bpf_version
+ SizeofBpfStat = C.sizeof_struct_bpf_stat
+ SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf
+ SizeofBpfProgram = C.sizeof_struct_bpf_program
+ SizeofBpfInsn = C.sizeof_struct_bpf_insn
+ SizeofBpfHdr = C.sizeof_struct_bpf_hdr
+ SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header
+)
+
+type BpfVersion C.struct_bpf_version
+
+type BpfStat C.struct_bpf_stat
+
+type BpfZbuf C.struct_bpf_zbuf
+
+type BpfProgram C.struct_bpf_program
+
+type BpfInsn C.struct_bpf_insn
+
+type BpfHdr C.struct_bpf_hdr
+
+type BpfZbufHeader C.struct_bpf_zbuf_header
+
+// Terminal handling
+
+type Termios C.struct_termios
+
+type Winsize C.struct_winsize
+
+// fchmodat-like syscalls.
+
+const (
+ AT_FDCWD = C.AT_FDCWD
+ AT_REMOVEDIR = C.AT_REMOVEDIR
+ AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
+ AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
+)
+
+// poll
+
+type PollFd C.struct_pollfd
+
+const (
+ POLLERR = C.POLLERR
+ POLLHUP = C.POLLHUP
+ POLLIN = C.POLLIN
+ POLLINIGNEOF = C.POLLINIGNEOF
+ POLLNVAL = C.POLLNVAL
+ POLLOUT = C.POLLOUT
+ POLLPRI = C.POLLPRI
+ POLLRDBAND = C.POLLRDBAND
+ POLLRDNORM = C.POLLRDNORM
+ POLLWRBAND = C.POLLWRBAND
+ POLLWRNORM = C.POLLWRNORM
+)
+
+// Capabilities
+
+type CapRights C.struct_cap_rights
+
+// Uname
+
+type Utsname C.struct_utsname
diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go
new file mode 100644
index 000000000..4a96d72c3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/types_netbsd.go
@@ -0,0 +1,290 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+Input to cgo -godefs. See README.md
+*/
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package unix
+
+/*
+#define KERNEL
+#include <dirent.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <signal.h>
+#include <termios.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/event.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/ptrace.h>
+#include <sys/resource.h>
+#include <sys/select.h>
+#include <sys/signal.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/sysctl.h>
+#include <sys/time.h>
+#include <sys/uio.h>
+#include <sys/un.h>
+#include <sys/utsname.h>
+#include <sys/wait.h>
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <netinet/icmp6.h>
+#include <netinet/tcp.h>
+
+enum {
+ sizeofPtr = sizeof(void*),
+};
+
+union sockaddr_all {
+ struct sockaddr s1; // this one gets used for fields
+ struct sockaddr_in s2; // these pad it out
+ struct sockaddr_in6 s3;
+ struct sockaddr_un s4;
+ struct sockaddr_dl s5;
+};
+
+struct sockaddr_any {
+ struct sockaddr addr;
+ char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
+};
+
+*/
+import "C"
+
+// Machine characteristics
+
+const (
+ SizeofPtr = C.sizeofPtr
+ SizeofShort = C.sizeof_short
+ SizeofInt = C.sizeof_int
+ SizeofLong = C.sizeof_long
+ SizeofLongLong = C.sizeof_longlong
+)
+
+// Basic types
+
+type (
+ _C_short C.short
+ _C_int C.int
+ _C_long C.long
+ _C_long_long C.longlong
+)
+
+// Time
+
+type Timespec C.struct_timespec
+
+type Timeval C.struct_timeval
+
+// Processes
+
+type Rusage C.struct_rusage
+
+type Rlimit C.struct_rlimit
+
+type _Gid_t C.gid_t
+
+// Files
+
+type Stat_t C.struct_stat
+
+type Statfs_t C.struct_statfs
+
+type Flock_t C.struct_flock
+
+type Dirent C.struct_dirent
+
+type Fsid C.fsid_t
+
+// File system limits
+
+const (
+ PathMax = C.PATH_MAX
+)
+
+// Advice to Fadvise
+
+const (
+ FADV_NORMAL = C.POSIX_FADV_NORMAL
+ FADV_RANDOM = C.POSIX_FADV_RANDOM
+ FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL
+ FADV_WILLNEED = C.POSIX_FADV_WILLNEED
+ FADV_DONTNEED = C.POSIX_FADV_DONTNEED
+ FADV_NOREUSE = C.POSIX_FADV_NOREUSE
+)
+
+// Sockets
+
+type RawSockaddrInet4 C.struct_sockaddr_in
+
+type RawSockaddrInet6 C.struct_sockaddr_in6
+
+type RawSockaddrUnix C.struct_sockaddr_un
+
+type RawSockaddrDatalink C.struct_sockaddr_dl
+
+type RawSockaddr C.struct_sockaddr
+
+type RawSockaddrAny C.struct_sockaddr_any
+
+type _Socklen C.socklen_t
+
+type Linger C.struct_linger
+
+type Iovec C.struct_iovec
+
+type IPMreq C.struct_ip_mreq
+
+type IPv6Mreq C.struct_ipv6_mreq
+
+type Msghdr C.struct_msghdr
+
+type Cmsghdr C.struct_cmsghdr
+
+type Inet6Pktinfo C.struct_in6_pktinfo
+
+type IPv6MTUInfo C.struct_ip6_mtuinfo
+
+type ICMPv6Filter C.struct_icmp6_filter
+
+const (
+ SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
+ SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
+ SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
+ SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
+ SizeofLinger = C.sizeof_struct_linger
+ SizeofIPMreq = C.sizeof_struct_ip_mreq
+ SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+ SizeofMsghdr = C.sizeof_struct_msghdr
+ SizeofCmsghdr = C.sizeof_struct_cmsghdr
+ SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
+ SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
+ SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+// Ptrace requests
+
+const (
+ PTRACE_TRACEME = C.PT_TRACE_ME
+ PTRACE_CONT = C.PT_CONTINUE
+ PTRACE_KILL = C.PT_KILL
+)
+
+// Events (kqueue, kevent)
+
+type Kevent_t C.struct_kevent
+
+// Select
+
+type FdSet C.fd_set
+
+// Routing and interface messages
+
+const (
+ SizeofIfMsghdr = C.sizeof_struct_if_msghdr
+ SizeofIfData = C.sizeof_struct_if_data
+ SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
+ SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
+ SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
+ SizeofRtMetrics = C.sizeof_struct_rt_metrics
+)
+
+type IfMsghdr C.struct_if_msghdr
+
+type IfData C.struct_if_data
+
+type IfaMsghdr C.struct_ifa_msghdr
+
+type IfAnnounceMsghdr C.struct_if_announcemsghdr
+
+type RtMsghdr C.struct_rt_msghdr
+
+type RtMetrics C.struct_rt_metrics
+
+type Mclpool C.struct_mclpool
+
+// Berkeley packet filter
+
+const (
+ SizeofBpfVersion = C.sizeof_struct_bpf_version
+ SizeofBpfStat = C.sizeof_struct_bpf_stat
+ SizeofBpfProgram = C.sizeof_struct_bpf_program
+ SizeofBpfInsn = C.sizeof_struct_bpf_insn
+ SizeofBpfHdr = C.sizeof_struct_bpf_hdr
+)
+
+type BpfVersion C.struct_bpf_version
+
+type BpfStat C.struct_bpf_stat
+
+type BpfProgram C.struct_bpf_program
+
+type BpfInsn C.struct_bpf_insn
+
+type BpfHdr C.struct_bpf_hdr
+
+type BpfTimeval C.struct_bpf_timeval
+
+// Terminal handling
+
+type Termios C.struct_termios
+
+type Winsize C.struct_winsize
+
+type Ptmget C.struct_ptmget
+
+// fchmodat-like syscalls.
+
+const (
+ AT_FDCWD = C.AT_FDCWD
+ AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
+ AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
+)
+
+// poll
+
+type PollFd C.struct_pollfd
+
+const (
+ POLLERR = C.POLLERR
+ POLLHUP = C.POLLHUP
+ POLLIN = C.POLLIN
+ POLLNVAL = C.POLLNVAL
+ POLLOUT = C.POLLOUT
+ POLLPRI = C.POLLPRI
+ POLLRDBAND = C.POLLRDBAND
+ POLLRDNORM = C.POLLRDNORM
+ POLLWRBAND = C.POLLWRBAND
+ POLLWRNORM = C.POLLWRNORM
+)
+
+// Sysctl
+
+type Sysctlnode C.struct_sysctlnode
+
+// Uname
+
+type Utsname C.struct_utsname
+
+// Clockinfo
+
+const SizeofClockinfo = C.sizeof_struct_clockinfo
+
+type Clockinfo C.struct_clockinfo
diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go
new file mode 100644
index 000000000..775cb57dc
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/types_openbsd.go
@@ -0,0 +1,283 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+Input to cgo -godefs. See README.md
+*/
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package unix
+
+/*
+#define KERNEL
+#include <dirent.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <signal.h>
+#include <termios.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/event.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/ptrace.h>
+#include <sys/resource.h>
+#include <sys/select.h>
+#include <sys/signal.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/uio.h>
+#include <sys/un.h>
+#include <sys/utsname.h>
+#include <sys/wait.h>
+#include <uvm/uvmexp.h>
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <netinet/icmp6.h>
+#include <netinet/tcp.h>
+
+enum {
+ sizeofPtr = sizeof(void*),
+};
+
+union sockaddr_all {
+ struct sockaddr s1; // this one gets used for fields
+ struct sockaddr_in s2; // these pad it out
+ struct sockaddr_in6 s3;
+ struct sockaddr_un s4;
+ struct sockaddr_dl s5;
+};
+
+struct sockaddr_any {
+ struct sockaddr addr;
+ char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
+};
+
+*/
+import "C"
+
+// Machine characteristics
+
+const (
+ SizeofPtr = C.sizeofPtr
+ SizeofShort = C.sizeof_short
+ SizeofInt = C.sizeof_int
+ SizeofLong = C.sizeof_long
+ SizeofLongLong = C.sizeof_longlong
+)
+
+// Basic types
+
+type (
+ _C_short C.short
+ _C_int C.int
+ _C_long C.long
+ _C_long_long C.longlong
+)
+
+// Time
+
+type Timespec C.struct_timespec
+
+type Timeval C.struct_timeval
+
+// Processes
+
+type Rusage C.struct_rusage
+
+type Rlimit C.struct_rlimit
+
+type _Gid_t C.gid_t
+
+// Files
+
+type Stat_t C.struct_stat
+
+type Statfs_t C.struct_statfs
+
+type Flock_t C.struct_flock
+
+type Dirent C.struct_dirent
+
+type Fsid C.fsid_t
+
+// File system limits
+
+const (
+ PathMax = C.PATH_MAX
+)
+
+// Sockets
+
+type RawSockaddrInet4 C.struct_sockaddr_in
+
+type RawSockaddrInet6 C.struct_sockaddr_in6
+
+type RawSockaddrUnix C.struct_sockaddr_un
+
+type RawSockaddrDatalink C.struct_sockaddr_dl
+
+type RawSockaddr C.struct_sockaddr
+
+type RawSockaddrAny C.struct_sockaddr_any
+
+type _Socklen C.socklen_t
+
+type Linger C.struct_linger
+
+type Iovec C.struct_iovec
+
+type IPMreq C.struct_ip_mreq
+
+type IPv6Mreq C.struct_ipv6_mreq
+
+type Msghdr C.struct_msghdr
+
+type Cmsghdr C.struct_cmsghdr
+
+type Inet6Pktinfo C.struct_in6_pktinfo
+
+type IPv6MTUInfo C.struct_ip6_mtuinfo
+
+type ICMPv6Filter C.struct_icmp6_filter
+
+const (
+ SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
+ SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
+ SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
+ SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
+ SizeofLinger = C.sizeof_struct_linger
+ SizeofIPMreq = C.sizeof_struct_ip_mreq
+ SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+ SizeofMsghdr = C.sizeof_struct_msghdr
+ SizeofCmsghdr = C.sizeof_struct_cmsghdr
+ SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
+ SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
+ SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+// Ptrace requests
+
+const (
+ PTRACE_TRACEME = C.PT_TRACE_ME
+ PTRACE_CONT = C.PT_CONTINUE
+ PTRACE_KILL = C.PT_KILL
+)
+
+// Events (kqueue, kevent)
+
+type Kevent_t C.struct_kevent
+
+// Select
+
+type FdSet C.fd_set
+
+// Routing and interface messages
+
+const (
+ SizeofIfMsghdr = C.sizeof_struct_if_msghdr
+ SizeofIfData = C.sizeof_struct_if_data
+ SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
+ SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
+ SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
+ SizeofRtMetrics = C.sizeof_struct_rt_metrics
+)
+
+type IfMsghdr C.struct_if_msghdr
+
+type IfData C.struct_if_data
+
+type IfaMsghdr C.struct_ifa_msghdr
+
+type IfAnnounceMsghdr C.struct_if_announcemsghdr
+
+type RtMsghdr C.struct_rt_msghdr
+
+type RtMetrics C.struct_rt_metrics
+
+type Mclpool C.struct_mclpool
+
+// Berkeley packet filter
+
+const (
+ SizeofBpfVersion = C.sizeof_struct_bpf_version
+ SizeofBpfStat = C.sizeof_struct_bpf_stat
+ SizeofBpfProgram = C.sizeof_struct_bpf_program
+ SizeofBpfInsn = C.sizeof_struct_bpf_insn
+ SizeofBpfHdr = C.sizeof_struct_bpf_hdr
+)
+
+type BpfVersion C.struct_bpf_version
+
+type BpfStat C.struct_bpf_stat
+
+type BpfProgram C.struct_bpf_program
+
+type BpfInsn C.struct_bpf_insn
+
+type BpfHdr C.struct_bpf_hdr
+
+type BpfTimeval C.struct_bpf_timeval
+
+// Terminal handling
+
+type Termios C.struct_termios
+
+type Winsize C.struct_winsize
+
+// fchmodat-like syscalls.
+
+const (
+ AT_FDCWD = C.AT_FDCWD
+ AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
+ AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
+)
+
+// poll
+
+type PollFd C.struct_pollfd
+
+const (
+ POLLERR = C.POLLERR
+ POLLHUP = C.POLLHUP
+ POLLIN = C.POLLIN
+ POLLNVAL = C.POLLNVAL
+ POLLOUT = C.POLLOUT
+ POLLPRI = C.POLLPRI
+ POLLRDBAND = C.POLLRDBAND
+ POLLRDNORM = C.POLLRDNORM
+ POLLWRBAND = C.POLLWRBAND
+ POLLWRNORM = C.POLLWRNORM
+)
+
+// Signal Sets
+
+type Sigset_t C.sigset_t
+
+// Uname
+
+type Utsname C.struct_utsname
+
+// Uvmexp
+
+const SizeofUvmexp = C.sizeof_struct_uvmexp
+
+type Uvmexp C.struct_uvmexp
+
+// Clockinfo
+
+const SizeofClockinfo = C.sizeof_struct_clockinfo
+
+type Clockinfo C.struct_clockinfo
diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go
new file mode 100644
index 000000000..2b716f934
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/types_solaris.go
@@ -0,0 +1,266 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+Input to cgo -godefs. See README.md
+*/
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package unix
+
+/*
+#define KERNEL
+// These defines ensure that builds done on newer versions of Solaris are
+// backwards-compatible with older versions of Solaris and
+// OpenSolaris-based derivatives.
+#define __USE_SUNOS_SOCKETS__ // msghdr
+#define __USE_LEGACY_PROTOTYPES__ // iovec
+#include <dirent.h>
+#include <fcntl.h>
+#include <netdb.h>
+#include <limits.h>
+#include <poll.h>
+#include <signal.h>
+#include <termios.h>
+#include <termio.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/param.h>
+#include <sys/resource.h>
+#include <sys/select.h>
+#include <sys/signal.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/statvfs.h>
+#include <sys/time.h>
+#include <sys/times.h>
+#include <sys/types.h>
+#include <sys/utsname.h>
+#include <sys/un.h>
+#include <sys/wait.h>
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <netinet/icmp6.h>
+#include <netinet/tcp.h>
+#include <ustat.h>
+#include <utime.h>
+
+enum {
+ sizeofPtr = sizeof(void*),
+};
+
+union sockaddr_all {
+ struct sockaddr s1; // this one gets used for fields
+ struct sockaddr_in s2; // these pad it out
+ struct sockaddr_in6 s3;
+ struct sockaddr_un s4;
+ struct sockaddr_dl s5;
+};
+
+struct sockaddr_any {
+ struct sockaddr addr;
+ char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
+};
+
+*/
+import "C"
+
+// Machine characteristics
+
+const (
+ SizeofPtr = C.sizeofPtr
+ SizeofShort = C.sizeof_short
+ SizeofInt = C.sizeof_int
+ SizeofLong = C.sizeof_long
+ SizeofLongLong = C.sizeof_longlong
+ PathMax = C.PATH_MAX
+ MaxHostNameLen = C.MAXHOSTNAMELEN
+)
+
+// Basic types
+
+type (
+ _C_short C.short
+ _C_int C.int
+ _C_long C.long
+ _C_long_long C.longlong
+)
+
+// Time
+
+type Timespec C.struct_timespec
+
+type Timeval C.struct_timeval
+
+type Timeval32 C.struct_timeval32
+
+type Tms C.struct_tms
+
+type Utimbuf C.struct_utimbuf
+
+// Processes
+
+type Rusage C.struct_rusage
+
+type Rlimit C.struct_rlimit
+
+type _Gid_t C.gid_t
+
+// Files
+
+type Stat_t C.struct_stat
+
+type Flock_t C.struct_flock
+
+type Dirent C.struct_dirent
+
+// Filesystems
+
+type _Fsblkcnt_t C.fsblkcnt_t
+
+type Statvfs_t C.struct_statvfs
+
+// Sockets
+
+type RawSockaddrInet4 C.struct_sockaddr_in
+
+type RawSockaddrInet6 C.struct_sockaddr_in6
+
+type RawSockaddrUnix C.struct_sockaddr_un
+
+type RawSockaddrDatalink C.struct_sockaddr_dl
+
+type RawSockaddr C.struct_sockaddr
+
+type RawSockaddrAny C.struct_sockaddr_any
+
+type _Socklen C.socklen_t
+
+type Linger C.struct_linger
+
+type Iovec C.struct_iovec
+
+type IPMreq C.struct_ip_mreq
+
+type IPv6Mreq C.struct_ipv6_mreq
+
+type Msghdr C.struct_msghdr
+
+type Cmsghdr C.struct_cmsghdr
+
+type Inet6Pktinfo C.struct_in6_pktinfo
+
+type IPv6MTUInfo C.struct_ip6_mtuinfo
+
+type ICMPv6Filter C.struct_icmp6_filter
+
+const (
+ SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
+ SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
+ SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
+ SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
+ SizeofLinger = C.sizeof_struct_linger
+ SizeofIPMreq = C.sizeof_struct_ip_mreq
+ SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+ SizeofMsghdr = C.sizeof_struct_msghdr
+ SizeofCmsghdr = C.sizeof_struct_cmsghdr
+ SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
+ SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
+ SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+// Select
+
+type FdSet C.fd_set
+
+// Misc
+
+type Utsname C.struct_utsname
+
+type Ustat_t C.struct_ustat
+
+const (
+ AT_FDCWD = C.AT_FDCWD
+ AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
+ AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
+ AT_REMOVEDIR = C.AT_REMOVEDIR
+ AT_EACCESS = C.AT_EACCESS
+)
+
+// Routing and interface messages
+
+const (
+ SizeofIfMsghdr = C.sizeof_struct_if_msghdr
+ SizeofIfData = C.sizeof_struct_if_data
+ SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
+ SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
+ SizeofRtMetrics = C.sizeof_struct_rt_metrics
+)
+
+type IfMsghdr C.struct_if_msghdr
+
+type IfData C.struct_if_data
+
+type IfaMsghdr C.struct_ifa_msghdr
+
+type RtMsghdr C.struct_rt_msghdr
+
+type RtMetrics C.struct_rt_metrics
+
+// Berkeley packet filter
+
+const (
+ SizeofBpfVersion = C.sizeof_struct_bpf_version
+ SizeofBpfStat = C.sizeof_struct_bpf_stat
+ SizeofBpfProgram = C.sizeof_struct_bpf_program
+ SizeofBpfInsn = C.sizeof_struct_bpf_insn
+ SizeofBpfHdr = C.sizeof_struct_bpf_hdr
+)
+
+type BpfVersion C.struct_bpf_version
+
+type BpfStat C.struct_bpf_stat
+
+type BpfProgram C.struct_bpf_program
+
+type BpfInsn C.struct_bpf_insn
+
+type BpfTimeval C.struct_bpf_timeval
+
+type BpfHdr C.struct_bpf_hdr
+
+// Terminal handling
+
+type Termios C.struct_termios
+
+type Termio C.struct_termio
+
+type Winsize C.struct_winsize
+
+// poll
+
+type PollFd C.struct_pollfd
+
+const (
+ POLLERR = C.POLLERR
+ POLLHUP = C.POLLHUP
+ POLLIN = C.POLLIN
+ POLLNVAL = C.POLLNVAL
+ POLLOUT = C.POLLOUT
+ POLLPRI = C.POLLPRI
+ POLLRDBAND = C.POLLRDBAND
+ POLLRDNORM = C.POLLRDNORM
+ POLLWRBAND = C.POLLWRBAND
+ POLLWRNORM = C.POLLWRNORM
+)
diff --git a/vendor/golang.org/x/sys/unix/openbsd_unveil.go b/vendor/golang.org/x/sys/unix/unveil_openbsd.go
index aebc2dc57..168d5ae77 100644
--- a/vendor/golang.org/x/sys/unix/openbsd_unveil.go
+++ b/vendor/golang.org/x/sys/unix/unveil_openbsd.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build openbsd
-
package unix
import (
diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go
index 4b7b96502..1def8a581 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go
@@ -926,6 +926,8 @@ const (
TCSETSF = 0x5404
TCSETSW = 0x5403
TCXONC = 0x540b
+ TIMER_ABSTIME = 0x3e7
+ TIMER_MAX = 0x20
TIOC = 0x5400
TIOCCBRK = 0x2000747a
TIOCCDTR = 0x20007478
diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go
index ed04fd1b7..03187dea9 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go
@@ -3,7 +3,7 @@
// +build ppc64,aix
-// Created by cgo -godefs - DO NOT EDIT
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -maix64 _const.go
package unix
@@ -926,6 +926,8 @@ const (
TCSETSF = 0x5404
TCSETSW = 0x5403
TCXONC = 0x540b
+ TIMER_ABSTIME = 0x3e7
+ TIMER_MAX = 0x20
TIOC = 0x5400
TIOCCBRK = 0x2000747a
TIOCCDTR = 0x20007478
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index c62bb9490..1db2f00de 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -41,7 +41,7 @@ const (
AF_KEY = 0xf
AF_LLC = 0x1a
AF_LOCAL = 0x1
- AF_MAX = 0x2c
+ AF_MAX = 0x2d
AF_MPLS = 0x1c
AF_NETBEUI = 0xd
AF_NETLINK = 0x10
@@ -174,6 +174,7 @@ const (
B9600 = 0xd
BALLOON_KVM_MAGIC = 0x13661366
BDEVFS_MAGIC = 0x62646576
+ BINDERFS_SUPER_MAGIC = 0x6c6f6f70
BINFMTFS_MAGIC = 0x42494e4d
BLKBSZGET = 0x80041270
BLKBSZSET = 0x40041271
@@ -196,10 +197,59 @@ const (
BPF_ABS = 0x20
BPF_ADD = 0x0
BPF_ALU = 0x4
+ BPF_ALU64 = 0x7
BPF_AND = 0x50
+ BPF_ANY = 0x0
+ BPF_ARSH = 0xc0
BPF_B = 0x10
+ BPF_BUILD_ID_SIZE = 0x14
+ BPF_CALL = 0x80
+ BPF_DEVCG_ACC_MKNOD = 0x1
+ BPF_DEVCG_ACC_READ = 0x2
+ BPF_DEVCG_ACC_WRITE = 0x4
+ BPF_DEVCG_DEV_BLOCK = 0x1
+ BPF_DEVCG_DEV_CHAR = 0x2
BPF_DIV = 0x30
+ BPF_DW = 0x18
+ BPF_END = 0xd0
+ BPF_EXIST = 0x2
+ BPF_EXIT = 0x90
+ BPF_FROM_BE = 0x8
+ BPF_FROM_LE = 0x0
BPF_FS_MAGIC = 0xcafe4a11
+ BPF_F_ALLOW_MULTI = 0x2
+ BPF_F_ALLOW_OVERRIDE = 0x1
+ BPF_F_ANY_ALIGNMENT = 0x2
+ BPF_F_CTXLEN_MASK = 0xfffff00000000
+ BPF_F_CURRENT_CPU = 0xffffffff
+ BPF_F_CURRENT_NETNS = -0x1
+ BPF_F_DONT_FRAGMENT = 0x4
+ BPF_F_FAST_STACK_CMP = 0x200
+ BPF_F_HDR_FIELD_MASK = 0xf
+ BPF_F_INDEX_MASK = 0xffffffff
+ BPF_F_INGRESS = 0x1
+ BPF_F_INVALIDATE_HASH = 0x2
+ BPF_F_LOCK = 0x4
+ BPF_F_MARK_ENFORCE = 0x40
+ BPF_F_MARK_MANGLED_0 = 0x20
+ BPF_F_NO_COMMON_LRU = 0x2
+ BPF_F_NO_PREALLOC = 0x1
+ BPF_F_NUMA_NODE = 0x4
+ BPF_F_PSEUDO_HDR = 0x10
+ BPF_F_QUERY_EFFECTIVE = 0x1
+ BPF_F_RDONLY = 0x8
+ BPF_F_RECOMPUTE_CSUM = 0x1
+ BPF_F_REUSE_STACKID = 0x400
+ BPF_F_SEQ_NUMBER = 0x8
+ BPF_F_SKIP_FIELD_MASK = 0xff
+ BPF_F_STACK_BUILD_ID = 0x20
+ BPF_F_STRICT_ALIGNMENT = 0x1
+ BPF_F_TUNINFO_IPV6 = 0x1
+ BPF_F_USER_BUILD_ID = 0x800
+ BPF_F_USER_STACK = 0x100
+ BPF_F_WRONLY = 0x10
+ BPF_F_ZERO_CSUM_TX = 0x2
+ BPF_F_ZERO_SEED = 0x40
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@@ -207,8 +257,16 @@ const (
BPF_JEQ = 0x10
BPF_JGE = 0x30
BPF_JGT = 0x20
+ BPF_JLE = 0xb0
+ BPF_JLT = 0xa0
BPF_JMP = 0x5
+ BPF_JMP32 = 0x6
+ BPF_JNE = 0x50
BPF_JSET = 0x40
+ BPF_JSGE = 0x70
+ BPF_JSGT = 0x60
+ BPF_JSLE = 0xd0
+ BPF_JSLT = 0xc0
BPF_K = 0x0
BPF_LD = 0x0
BPF_LDX = 0x1
@@ -222,20 +280,33 @@ const (
BPF_MINOR_VERSION = 0x1
BPF_MISC = 0x7
BPF_MOD = 0x90
+ BPF_MOV = 0xb0
BPF_MSH = 0xa0
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_NET_OFF = -0x100000
+ BPF_NOEXIST = 0x1
+ BPF_OBJ_NAME_LEN = 0x10
BPF_OR = 0x40
+ BPF_PSEUDO_CALL = 0x1
+ BPF_PSEUDO_MAP_FD = 0x1
BPF_RET = 0x6
BPF_RSH = 0x70
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7
+ BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2
+ BPF_SOCK_OPS_RTO_CB_FLAG = 0x1
+ BPF_SOCK_OPS_STATE_CB_FLAG = 0x4
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
+ BPF_TAG_SIZE = 0x8
BPF_TAX = 0x0
+ BPF_TO_BE = 0x8
+ BPF_TO_LE = 0x0
BPF_TXA = 0x80
BPF_W = 0x0
BPF_X = 0x8
+ BPF_XADD = 0xc0
BPF_XOR = 0xa0
BRKINT = 0x2
BS0 = 0x0
@@ -263,6 +334,45 @@ const (
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
+ CAP_AUDIT_CONTROL = 0x1e
+ CAP_AUDIT_READ = 0x25
+ CAP_AUDIT_WRITE = 0x1d
+ CAP_BLOCK_SUSPEND = 0x24
+ CAP_CHOWN = 0x0
+ CAP_DAC_OVERRIDE = 0x1
+ CAP_DAC_READ_SEARCH = 0x2
+ CAP_FOWNER = 0x3
+ CAP_FSETID = 0x4
+ CAP_IPC_LOCK = 0xe
+ CAP_IPC_OWNER = 0xf
+ CAP_KILL = 0x5
+ CAP_LAST_CAP = 0x25
+ CAP_LEASE = 0x1c
+ CAP_LINUX_IMMUTABLE = 0x9
+ CAP_MAC_ADMIN = 0x21
+ CAP_MAC_OVERRIDE = 0x20
+ CAP_MKNOD = 0x1b
+ CAP_NET_ADMIN = 0xc
+ CAP_NET_BIND_SERVICE = 0xa
+ CAP_NET_BROADCAST = 0xb
+ CAP_NET_RAW = 0xd
+ CAP_SETFCAP = 0x1f
+ CAP_SETGID = 0x6
+ CAP_SETPCAP = 0x8
+ CAP_SETUID = 0x7
+ CAP_SYSLOG = 0x22
+ CAP_SYS_ADMIN = 0x15
+ CAP_SYS_BOOT = 0x16
+ CAP_SYS_CHROOT = 0x12
+ CAP_SYS_MODULE = 0x10
+ CAP_SYS_NICE = 0x17
+ CAP_SYS_PACCT = 0x14
+ CAP_SYS_PTRACE = 0x13
+ CAP_SYS_RAWIO = 0x11
+ CAP_SYS_RESOURCE = 0x18
+ CAP_SYS_TIME = 0x19
+ CAP_SYS_TTY_CONFIG = 0x1a
+ CAP_WAKE_ALARM = 0x23
CBAUD = 0x100f
CBAUDEX = 0x1000
CFLUSH = 0xf
@@ -319,6 +429,10 @@ const (
CRDLY = 0x600
CREAD = 0x80
CRTSCTS = 0x80000000
+ CRYPTO_MAX_NAME = 0x40
+ CRYPTO_MSG_MAX = 0x15
+ CRYPTO_NR_MSGTYPES = 0x6
+ CRYPTO_REPORT_MAXSIZE = 0x160
CS5 = 0x0
CS6 = 0x10
CS7 = 0x20
@@ -486,6 +600,60 @@ const (
FALLOC_FL_PUNCH_HOLE = 0x2
FALLOC_FL_UNSHARE_RANGE = 0x40
FALLOC_FL_ZERO_RANGE = 0x10
+ FANOTIFY_METADATA_VERSION = 0x3
+ FAN_ACCESS = 0x1
+ FAN_ACCESS_PERM = 0x20000
+ FAN_ALLOW = 0x1
+ FAN_ALL_CLASS_BITS = 0xc
+ FAN_ALL_EVENTS = 0x3b
+ FAN_ALL_INIT_FLAGS = 0x3f
+ FAN_ALL_MARK_FLAGS = 0xff
+ FAN_ALL_OUTGOING_EVENTS = 0x3403b
+ FAN_ALL_PERM_EVENTS = 0x30000
+ FAN_ATTRIB = 0x4
+ FAN_AUDIT = 0x10
+ FAN_CLASS_CONTENT = 0x4
+ FAN_CLASS_NOTIF = 0x0
+ FAN_CLASS_PRE_CONTENT = 0x8
+ FAN_CLOEXEC = 0x1
+ FAN_CLOSE = 0x18
+ FAN_CLOSE_NOWRITE = 0x10
+ FAN_CLOSE_WRITE = 0x8
+ FAN_CREATE = 0x100
+ FAN_DELETE = 0x200
+ FAN_DELETE_SELF = 0x400
+ FAN_DENY = 0x2
+ FAN_ENABLE_AUDIT = 0x40
+ FAN_EVENT_INFO_TYPE_FID = 0x1
+ FAN_EVENT_METADATA_LEN = 0x18
+ FAN_EVENT_ON_CHILD = 0x8000000
+ FAN_MARK_ADD = 0x1
+ FAN_MARK_DONT_FOLLOW = 0x4
+ FAN_MARK_FILESYSTEM = 0x100
+ FAN_MARK_FLUSH = 0x80
+ FAN_MARK_IGNORED_MASK = 0x20
+ FAN_MARK_IGNORED_SURV_MODIFY = 0x40
+ FAN_MARK_INODE = 0x0
+ FAN_MARK_MOUNT = 0x10
+ FAN_MARK_ONLYDIR = 0x8
+ FAN_MARK_REMOVE = 0x2
+ FAN_MODIFY = 0x2
+ FAN_MOVE = 0xc0
+ FAN_MOVED_FROM = 0x40
+ FAN_MOVED_TO = 0x80
+ FAN_MOVE_SELF = 0x800
+ FAN_NOFD = -0x1
+ FAN_NONBLOCK = 0x2
+ FAN_ONDIR = 0x40000000
+ FAN_OPEN = 0x20
+ FAN_OPEN_EXEC = 0x1000
+ FAN_OPEN_EXEC_PERM = 0x40000
+ FAN_OPEN_PERM = 0x10000
+ FAN_Q_OVERFLOW = 0x4000
+ FAN_REPORT_FID = 0x200
+ FAN_REPORT_TID = 0x100
+ FAN_UNLIMITED_MARKS = 0x20
+ FAN_UNLIMITED_QUEUE = 0x10
FD_CLOEXEC = 0x1
FD_SETSIZE = 0x400
FF0 = 0x0
@@ -493,6 +661,7 @@ const (
FFDLY = 0x8000
FLUSHO = 0x1000
FP_XSTATE_MAGIC2 = 0x46505845
+ FS_ENCRYPTION_MODE_ADIANTUM = 0x9
FS_ENCRYPTION_MODE_AES_128_CBC = 0x5
FS_ENCRYPTION_MODE_AES_128_CTS = 0x6
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
@@ -514,7 +683,7 @@ const (
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
- FS_POLICY_FLAGS_VALID = 0x3
+ FS_POLICY_FLAGS_VALID = 0x7
FUTEXFS_SUPER_MAGIC = 0xbad1dea
F_ADD_SEALS = 0x409
F_DUPFD = 0x0
@@ -707,6 +876,7 @@ const (
IN_ISDIR = 0x40000000
IN_LOOPBACKNET = 0x7f
IN_MASK_ADD = 0x20000000
+ IN_MASK_CREATE = 0x10000000
IN_MODIFY = 0x2
IN_MOVE = 0xc0
IN_MOVED_FROM = 0x40
@@ -778,6 +948,7 @@ const (
IPV6_MINHOPCOUNT = 0x49
IPV6_MTU = 0x18
IPV6_MTU_DISCOVER = 0x17
+ IPV6_MULTICAST_ALL = 0x1d
IPV6_MULTICAST_HOPS = 0x12
IPV6_MULTICAST_IF = 0x11
IPV6_MULTICAST_LOOP = 0x13
@@ -1004,6 +1175,15 @@ const (
MAP_STACK = 0x20000
MAP_SYNC = 0x80000
MAP_TYPE = 0xf
+ MCAST_BLOCK_SOURCE = 0x2b
+ MCAST_EXCLUDE = 0x0
+ MCAST_INCLUDE = 0x1
+ MCAST_JOIN_GROUP = 0x2a
+ MCAST_JOIN_SOURCE_GROUP = 0x2e
+ MCAST_LEAVE_GROUP = 0x2d
+ MCAST_LEAVE_SOURCE_GROUP = 0x2f
+ MCAST_MSFILTER = 0x30
+ MCAST_UNBLOCK_SOURCE = 0x2c
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
@@ -1132,7 +1312,7 @@ const (
NETLINK_UNUSED = 0x1
NETLINK_USERSOCK = 0x2
NETLINK_XFRM = 0x6
- NETNSA_MAX = 0x3
+ NETNSA_MAX = 0x5
NETNSA_NSID_NOT_ASSIGNED = -0x1
NFNETLINK_V0 = 0x0
NFNLGRP_ACCT_QUOTA = 0x8
@@ -1396,6 +1576,12 @@ const (
PR_MCE_KILL_SET = 0x1
PR_MPX_DISABLE_MANAGEMENT = 0x2c
PR_MPX_ENABLE_MANAGEMENT = 0x2b
+ PR_PAC_APDAKEY = 0x4
+ PR_PAC_APDBKEY = 0x8
+ PR_PAC_APGAKEY = 0x10
+ PR_PAC_APIAKEY = 0x1
+ PR_PAC_APIBKEY = 0x2
+ PR_PAC_RESET_KEYS = 0x36
PR_SET_CHILD_SUBREAPER = 0x24
PR_SET_DUMPABLE = 0x4
PR_SET_ENDIAN = 0x14
@@ -1433,6 +1619,7 @@ const (
PR_SET_TSC = 0x1a
PR_SET_UNALIGN = 0x6
PR_SPEC_DISABLE = 0x4
+ PR_SPEC_DISABLE_NOEXEC = 0x10
PR_SPEC_ENABLE = 0x2
PR_SPEC_FORCE_DISABLE = 0x8
PR_SPEC_INDIRECT_BRANCH = 0x1
@@ -1537,6 +1724,13 @@ const (
RLIMIT_SIGPENDING = 0xb
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0xffffffffffffffff
+ RNDADDENTROPY = 0x40085203
+ RNDADDTOENTCNT = 0x40045201
+ RNDCLEARPOOL = 0x5206
+ RNDGETENTCNT = 0x80045200
+ RNDGETPOOL = 0x80085202
+ RNDRESEEDCRNG = 0x5207
+ RNDZAPENTCNT = 0x5204
RTAX_ADVMSS = 0x8
RTAX_CC_ALGO = 0x10
RTAX_CWND = 0x7
@@ -1744,6 +1938,8 @@ const (
SECCOMP_MODE_STRICT = 0x1
SECURITYFS_MAGIC = 0x73636673
SELINUX_MAGIC = 0xf97cff8c
+ SFD_CLOEXEC = 0x80000
+ SFD_NONBLOCK = 0x800
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -1894,6 +2090,7 @@ const (
SO_ATTACH_REUSEPORT_CBPF = 0x33
SO_ATTACH_REUSEPORT_EBPF = 0x34
SO_BINDTODEVICE = 0x19
+ SO_BINDTOIFINDEX = 0x3e
SO_BPF_EXTENSIONS = 0x30
SO_BROADCAST = 0x6
SO_BSDCOMPAT = 0xe
@@ -1905,6 +2102,17 @@ const (
SO_DETACH_FILTER = 0x1b
SO_DOMAIN = 0x27
SO_DONTROUTE = 0x5
+ SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1
+ SO_EE_CODE_TXTIME_MISSED = 0x2
+ SO_EE_CODE_ZEROCOPY_COPIED = 0x1
+ SO_EE_ORIGIN_ICMP = 0x2
+ SO_EE_ORIGIN_ICMP6 = 0x3
+ SO_EE_ORIGIN_LOCAL = 0x1
+ SO_EE_ORIGIN_NONE = 0x0
+ SO_EE_ORIGIN_TIMESTAMPING = 0x4
+ SO_EE_ORIGIN_TXSTATUS = 0x4
+ SO_EE_ORIGIN_TXTIME = 0x6
+ SO_EE_ORIGIN_ZEROCOPY = 0x5
SO_ERROR = 0x4
SO_GET_FILTER = 0x1a
SO_INCOMING_CPU = 0x31
@@ -1931,6 +2139,8 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVTIMEO = 0x14
+ SO_RCVTIMEO_NEW = 0x42
+ SO_RCVTIMEO_OLD = 0x14
SO_REUSEADDR = 0x2
SO_REUSEPORT = 0xf
SO_RXQ_OVFL = 0x28
@@ -1942,9 +2152,17 @@ const (
SO_SNDBUFFORCE = 0x20
SO_SNDLOWAT = 0x13
SO_SNDTIMEO = 0x15
+ SO_SNDTIMEO_NEW = 0x43
+ SO_SNDTIMEO_OLD = 0x15
SO_TIMESTAMP = 0x1d
SO_TIMESTAMPING = 0x25
+ SO_TIMESTAMPING_NEW = 0x41
+ SO_TIMESTAMPING_OLD = 0x25
SO_TIMESTAMPNS = 0x23
+ SO_TIMESTAMPNS_NEW = 0x40
+ SO_TIMESTAMPNS_OLD = 0x23
+ SO_TIMESTAMP_NEW = 0x3f
+ SO_TIMESTAMP_OLD = 0x1d
SO_TXTIME = 0x3d
SO_TYPE = 0x3
SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2
@@ -2037,7 +2255,10 @@ const (
TCOFLUSH = 0x1
TCOOFF = 0x0
TCOON = 0x1
+ TCP_BPF_IW = 0x3e9
+ TCP_BPF_SNDCWND_CLAMP = 0x3ea
TCP_CC_INFO = 0x1a
+ TCP_CM_INQ = 0x24
TCP_CONGESTION = 0xd
TCP_COOKIE_IN_ALWAYS = 0x1
TCP_COOKIE_MAX = 0x10
@@ -2052,6 +2273,7 @@ const (
TCP_FASTOPEN_KEY = 0x21
TCP_FASTOPEN_NO_COOKIE = 0x22
TCP_INFO = 0xb
+ TCP_INQ = 0x24
TCP_KEEPCNT = 0x6
TCP_KEEPIDLE = 0x4
TCP_KEEPINTVL = 0x5
@@ -2071,6 +2293,9 @@ const (
TCP_QUEUE_SEQ = 0x15
TCP_QUICKACK = 0xc
TCP_REPAIR = 0x13
+ TCP_REPAIR_OFF = 0x0
+ TCP_REPAIR_OFF_NO_WP = -0x1
+ TCP_REPAIR_ON = 0x1
TCP_REPAIR_OPTIONS = 0x16
TCP_REPAIR_QUEUE = 0x14
TCP_REPAIR_WINDOW = 0x1d
@@ -2085,6 +2310,7 @@ const (
TCP_ULP = 0x1f
TCP_USER_TIMEOUT = 0x12
TCP_WINDOW_CLAMP = 0xa
+ TCP_ZEROCOPY_RECEIVE = 0x23
TCSAFLUSH = 0x2
TCSBRK = 0x5409
TCSBRKP = 0x5425
@@ -2101,6 +2327,7 @@ const (
TCSETXF = 0x5434
TCSETXW = 0x5435
TCXONC = 0x540a
+ TIMER_ABSTIME = 0x1
TIOCCBRK = 0x5428
TIOCCONS = 0x541d
TIOCEXCL = 0x540c
@@ -2203,6 +2430,7 @@ const (
TUNGETVNETBE = 0x800454df
TUNGETVNETHDRSZ = 0x800454d7
TUNGETVNETLE = 0x800454dd
+ TUNSETCARRIER = 0x400454e2
TUNSETDEBUG = 0x400454c9
TUNSETFILTEREBPF = 0x800454e1
TUNSETGROUP = 0x400454ce
@@ -2230,8 +2458,10 @@ const (
UBI_IOCMKVOL = 0x40986f00
UBI_IOCRMVOL = 0x40046f01
UBI_IOCRNVOL = 0x51106f03
+ UBI_IOCRPEB = 0x40046f04
UBI_IOCRSVOL = 0x400c6f02
UBI_IOCSETVOLPROP = 0x40104f06
+ UBI_IOCSPEB = 0x40046f05
UBI_IOCVOLCRBLK = 0x40804f07
UBI_IOCVOLRMBLK = 0x4f08
UBI_IOCVOLUP = 0x40084f00
@@ -2380,6 +2610,7 @@ const (
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
+ XDP_PACKET_HEADROOM = 0x100
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index 93f65d7d1..8a9d2eadf 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -41,7 +41,7 @@ const (
AF_KEY = 0xf
AF_LLC = 0x1a
AF_LOCAL = 0x1
- AF_MAX = 0x2c
+ AF_MAX = 0x2d
AF_MPLS = 0x1c
AF_NETBEUI = 0xd
AF_NETLINK = 0x10
@@ -174,6 +174,7 @@ const (
B9600 = 0xd
BALLOON_KVM_MAGIC = 0x13661366
BDEVFS_MAGIC = 0x62646576
+ BINDERFS_SUPER_MAGIC = 0x6c6f6f70
BINFMTFS_MAGIC = 0x42494e4d
BLKBSZGET = 0x80081270
BLKBSZSET = 0x40081271
@@ -196,10 +197,59 @@ const (
BPF_ABS = 0x20
BPF_ADD = 0x0
BPF_ALU = 0x4
+ BPF_ALU64 = 0x7
BPF_AND = 0x50
+ BPF_ANY = 0x0
+ BPF_ARSH = 0xc0
BPF_B = 0x10
+ BPF_BUILD_ID_SIZE = 0x14
+ BPF_CALL = 0x80
+ BPF_DEVCG_ACC_MKNOD = 0x1
+ BPF_DEVCG_ACC_READ = 0x2
+ BPF_DEVCG_ACC_WRITE = 0x4
+ BPF_DEVCG_DEV_BLOCK = 0x1
+ BPF_DEVCG_DEV_CHAR = 0x2
BPF_DIV = 0x30
+ BPF_DW = 0x18
+ BPF_END = 0xd0
+ BPF_EXIST = 0x2
+ BPF_EXIT = 0x90
+ BPF_FROM_BE = 0x8
+ BPF_FROM_LE = 0x0
BPF_FS_MAGIC = 0xcafe4a11
+ BPF_F_ALLOW_MULTI = 0x2
+ BPF_F_ALLOW_OVERRIDE = 0x1
+ BPF_F_ANY_ALIGNMENT = 0x2
+ BPF_F_CTXLEN_MASK = 0xfffff00000000
+ BPF_F_CURRENT_CPU = 0xffffffff
+ BPF_F_CURRENT_NETNS = -0x1
+ BPF_F_DONT_FRAGMENT = 0x4
+ BPF_F_FAST_STACK_CMP = 0x200
+ BPF_F_HDR_FIELD_MASK = 0xf
+ BPF_F_INDEX_MASK = 0xffffffff
+ BPF_F_INGRESS = 0x1
+ BPF_F_INVALIDATE_HASH = 0x2
+ BPF_F_LOCK = 0x4
+ BPF_F_MARK_ENFORCE = 0x40
+ BPF_F_MARK_MANGLED_0 = 0x20
+ BPF_F_NO_COMMON_LRU = 0x2
+ BPF_F_NO_PREALLOC = 0x1
+ BPF_F_NUMA_NODE = 0x4
+ BPF_F_PSEUDO_HDR = 0x10
+ BPF_F_QUERY_EFFECTIVE = 0x1
+ BPF_F_RDONLY = 0x8
+ BPF_F_RECOMPUTE_CSUM = 0x1
+ BPF_F_REUSE_STACKID = 0x400
+ BPF_F_SEQ_NUMBER = 0x8
+ BPF_F_SKIP_FIELD_MASK = 0xff
+ BPF_F_STACK_BUILD_ID = 0x20
+ BPF_F_STRICT_ALIGNMENT = 0x1
+ BPF_F_TUNINFO_IPV6 = 0x1
+ BPF_F_USER_BUILD_ID = 0x800
+ BPF_F_USER_STACK = 0x100
+ BPF_F_WRONLY = 0x10
+ BPF_F_ZERO_CSUM_TX = 0x2
+ BPF_F_ZERO_SEED = 0x40
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@@ -207,8 +257,16 @@ const (
BPF_JEQ = 0x10
BPF_JGE = 0x30
BPF_JGT = 0x20
+ BPF_JLE = 0xb0
+ BPF_JLT = 0xa0
BPF_JMP = 0x5
+ BPF_JMP32 = 0x6
+ BPF_JNE = 0x50
BPF_JSET = 0x40
+ BPF_JSGE = 0x70
+ BPF_JSGT = 0x60
+ BPF_JSLE = 0xd0
+ BPF_JSLT = 0xc0
BPF_K = 0x0
BPF_LD = 0x0
BPF_LDX = 0x1
@@ -222,20 +280,33 @@ const (
BPF_MINOR_VERSION = 0x1
BPF_MISC = 0x7
BPF_MOD = 0x90
+ BPF_MOV = 0xb0
BPF_MSH = 0xa0
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_NET_OFF = -0x100000
+ BPF_NOEXIST = 0x1
+ BPF_OBJ_NAME_LEN = 0x10
BPF_OR = 0x40
+ BPF_PSEUDO_CALL = 0x1
+ BPF_PSEUDO_MAP_FD = 0x1
BPF_RET = 0x6
BPF_RSH = 0x70
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7
+ BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2
+ BPF_SOCK_OPS_RTO_CB_FLAG = 0x1
+ BPF_SOCK_OPS_STATE_CB_FLAG = 0x4
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
+ BPF_TAG_SIZE = 0x8
BPF_TAX = 0x0
+ BPF_TO_BE = 0x8
+ BPF_TO_LE = 0x0
BPF_TXA = 0x80
BPF_W = 0x0
BPF_X = 0x8
+ BPF_XADD = 0xc0
BPF_XOR = 0xa0
BRKINT = 0x2
BS0 = 0x0
@@ -263,6 +334,45 @@ const (
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
+ CAP_AUDIT_CONTROL = 0x1e
+ CAP_AUDIT_READ = 0x25
+ CAP_AUDIT_WRITE = 0x1d
+ CAP_BLOCK_SUSPEND = 0x24
+ CAP_CHOWN = 0x0
+ CAP_DAC_OVERRIDE = 0x1
+ CAP_DAC_READ_SEARCH = 0x2
+ CAP_FOWNER = 0x3
+ CAP_FSETID = 0x4
+ CAP_IPC_LOCK = 0xe
+ CAP_IPC_OWNER = 0xf
+ CAP_KILL = 0x5
+ CAP_LAST_CAP = 0x25
+ CAP_LEASE = 0x1c
+ CAP_LINUX_IMMUTABLE = 0x9
+ CAP_MAC_ADMIN = 0x21
+ CAP_MAC_OVERRIDE = 0x20
+ CAP_MKNOD = 0x1b
+ CAP_NET_ADMIN = 0xc
+ CAP_NET_BIND_SERVICE = 0xa
+ CAP_NET_BROADCAST = 0xb
+ CAP_NET_RAW = 0xd
+ CAP_SETFCAP = 0x1f
+ CAP_SETGID = 0x6
+ CAP_SETPCAP = 0x8
+ CAP_SETUID = 0x7
+ CAP_SYSLOG = 0x22
+ CAP_SYS_ADMIN = 0x15
+ CAP_SYS_BOOT = 0x16
+ CAP_SYS_CHROOT = 0x12
+ CAP_SYS_MODULE = 0x10
+ CAP_SYS_NICE = 0x17
+ CAP_SYS_PACCT = 0x14
+ CAP_SYS_PTRACE = 0x13
+ CAP_SYS_RAWIO = 0x11
+ CAP_SYS_RESOURCE = 0x18
+ CAP_SYS_TIME = 0x19
+ CAP_SYS_TTY_CONFIG = 0x1a
+ CAP_WAKE_ALARM = 0x23
CBAUD = 0x100f
CBAUDEX = 0x1000
CFLUSH = 0xf
@@ -319,6 +429,10 @@ const (
CRDLY = 0x600
CREAD = 0x80
CRTSCTS = 0x80000000
+ CRYPTO_MAX_NAME = 0x40
+ CRYPTO_MSG_MAX = 0x15
+ CRYPTO_NR_MSGTYPES = 0x6
+ CRYPTO_REPORT_MAXSIZE = 0x160
CS5 = 0x0
CS6 = 0x10
CS7 = 0x20
@@ -486,6 +600,60 @@ const (
FALLOC_FL_PUNCH_HOLE = 0x2
FALLOC_FL_UNSHARE_RANGE = 0x40
FALLOC_FL_ZERO_RANGE = 0x10
+ FANOTIFY_METADATA_VERSION = 0x3
+ FAN_ACCESS = 0x1
+ FAN_ACCESS_PERM = 0x20000
+ FAN_ALLOW = 0x1
+ FAN_ALL_CLASS_BITS = 0xc
+ FAN_ALL_EVENTS = 0x3b
+ FAN_ALL_INIT_FLAGS = 0x3f
+ FAN_ALL_MARK_FLAGS = 0xff
+ FAN_ALL_OUTGOING_EVENTS = 0x3403b
+ FAN_ALL_PERM_EVENTS = 0x30000
+ FAN_ATTRIB = 0x4
+ FAN_AUDIT = 0x10
+ FAN_CLASS_CONTENT = 0x4
+ FAN_CLASS_NOTIF = 0x0
+ FAN_CLASS_PRE_CONTENT = 0x8
+ FAN_CLOEXEC = 0x1
+ FAN_CLOSE = 0x18
+ FAN_CLOSE_NOWRITE = 0x10
+ FAN_CLOSE_WRITE = 0x8
+ FAN_CREATE = 0x100
+ FAN_DELETE = 0x200
+ FAN_DELETE_SELF = 0x400
+ FAN_DENY = 0x2
+ FAN_ENABLE_AUDIT = 0x40
+ FAN_EVENT_INFO_TYPE_FID = 0x1
+ FAN_EVENT_METADATA_LEN = 0x18
+ FAN_EVENT_ON_CHILD = 0x8000000
+ FAN_MARK_ADD = 0x1
+ FAN_MARK_DONT_FOLLOW = 0x4
+ FAN_MARK_FILESYSTEM = 0x100
+ FAN_MARK_FLUSH = 0x80
+ FAN_MARK_IGNORED_MASK = 0x20
+ FAN_MARK_IGNORED_SURV_MODIFY = 0x40
+ FAN_MARK_INODE = 0x0
+ FAN_MARK_MOUNT = 0x10
+ FAN_MARK_ONLYDIR = 0x8
+ FAN_MARK_REMOVE = 0x2
+ FAN_MODIFY = 0x2
+ FAN_MOVE = 0xc0
+ FAN_MOVED_FROM = 0x40
+ FAN_MOVED_TO = 0x80
+ FAN_MOVE_SELF = 0x800
+ FAN_NOFD = -0x1
+ FAN_NONBLOCK = 0x2
+ FAN_ONDIR = 0x40000000
+ FAN_OPEN = 0x20
+ FAN_OPEN_EXEC = 0x1000
+ FAN_OPEN_EXEC_PERM = 0x40000
+ FAN_OPEN_PERM = 0x10000
+ FAN_Q_OVERFLOW = 0x4000
+ FAN_REPORT_FID = 0x200
+ FAN_REPORT_TID = 0x100
+ FAN_UNLIMITED_MARKS = 0x20
+ FAN_UNLIMITED_QUEUE = 0x10
FD_CLOEXEC = 0x1
FD_SETSIZE = 0x400
FF0 = 0x0
@@ -493,6 +661,7 @@ const (
FFDLY = 0x8000
FLUSHO = 0x1000
FP_XSTATE_MAGIC2 = 0x46505845
+ FS_ENCRYPTION_MODE_ADIANTUM = 0x9
FS_ENCRYPTION_MODE_AES_128_CBC = 0x5
FS_ENCRYPTION_MODE_AES_128_CTS = 0x6
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
@@ -514,7 +683,7 @@ const (
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
- FS_POLICY_FLAGS_VALID = 0x3
+ FS_POLICY_FLAGS_VALID = 0x7
FUTEXFS_SUPER_MAGIC = 0xbad1dea
F_ADD_SEALS = 0x409
F_DUPFD = 0x0
@@ -707,6 +876,7 @@ const (
IN_ISDIR = 0x40000000
IN_LOOPBACKNET = 0x7f
IN_MASK_ADD = 0x20000000
+ IN_MASK_CREATE = 0x10000000
IN_MODIFY = 0x2
IN_MOVE = 0xc0
IN_MOVED_FROM = 0x40
@@ -778,6 +948,7 @@ const (
IPV6_MINHOPCOUNT = 0x49
IPV6_MTU = 0x18
IPV6_MTU_DISCOVER = 0x17
+ IPV6_MULTICAST_ALL = 0x1d
IPV6_MULTICAST_HOPS = 0x12
IPV6_MULTICAST_IF = 0x11
IPV6_MULTICAST_LOOP = 0x13
@@ -1004,6 +1175,15 @@ const (
MAP_STACK = 0x20000
MAP_SYNC = 0x80000
MAP_TYPE = 0xf
+ MCAST_BLOCK_SOURCE = 0x2b
+ MCAST_EXCLUDE = 0x0
+ MCAST_INCLUDE = 0x1
+ MCAST_JOIN_GROUP = 0x2a
+ MCAST_JOIN_SOURCE_GROUP = 0x2e
+ MCAST_LEAVE_GROUP = 0x2d
+ MCAST_LEAVE_SOURCE_GROUP = 0x2f
+ MCAST_MSFILTER = 0x30
+ MCAST_UNBLOCK_SOURCE = 0x2c
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
@@ -1132,7 +1312,7 @@ const (
NETLINK_UNUSED = 0x1
NETLINK_USERSOCK = 0x2
NETLINK_XFRM = 0x6
- NETNSA_MAX = 0x3
+ NETNSA_MAX = 0x5
NETNSA_NSID_NOT_ASSIGNED = -0x1
NFNETLINK_V0 = 0x0
NFNLGRP_ACCT_QUOTA = 0x8
@@ -1396,6 +1576,12 @@ const (
PR_MCE_KILL_SET = 0x1
PR_MPX_DISABLE_MANAGEMENT = 0x2c
PR_MPX_ENABLE_MANAGEMENT = 0x2b
+ PR_PAC_APDAKEY = 0x4
+ PR_PAC_APDBKEY = 0x8
+ PR_PAC_APGAKEY = 0x10
+ PR_PAC_APIAKEY = 0x1
+ PR_PAC_APIBKEY = 0x2
+ PR_PAC_RESET_KEYS = 0x36
PR_SET_CHILD_SUBREAPER = 0x24
PR_SET_DUMPABLE = 0x4
PR_SET_ENDIAN = 0x14
@@ -1433,6 +1619,7 @@ const (
PR_SET_TSC = 0x1a
PR_SET_UNALIGN = 0x6
PR_SPEC_DISABLE = 0x4
+ PR_SPEC_DISABLE_NOEXEC = 0x10
PR_SPEC_ENABLE = 0x2
PR_SPEC_FORCE_DISABLE = 0x8
PR_SPEC_INDIRECT_BRANCH = 0x1
@@ -1538,6 +1725,13 @@ const (
RLIMIT_SIGPENDING = 0xb
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0xffffffffffffffff
+ RNDADDENTROPY = 0x40085203
+ RNDADDTOENTCNT = 0x40045201
+ RNDCLEARPOOL = 0x5206
+ RNDGETENTCNT = 0x80045200
+ RNDGETPOOL = 0x80085202
+ RNDRESEEDCRNG = 0x5207
+ RNDZAPENTCNT = 0x5204
RTAX_ADVMSS = 0x8
RTAX_CC_ALGO = 0x10
RTAX_CWND = 0x7
@@ -1745,6 +1939,8 @@ const (
SECCOMP_MODE_STRICT = 0x1
SECURITYFS_MAGIC = 0x73636673
SELINUX_MAGIC = 0xf97cff8c
+ SFD_CLOEXEC = 0x80000
+ SFD_NONBLOCK = 0x800
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -1895,6 +2091,7 @@ const (
SO_ATTACH_REUSEPORT_CBPF = 0x33
SO_ATTACH_REUSEPORT_EBPF = 0x34
SO_BINDTODEVICE = 0x19
+ SO_BINDTOIFINDEX = 0x3e
SO_BPF_EXTENSIONS = 0x30
SO_BROADCAST = 0x6
SO_BSDCOMPAT = 0xe
@@ -1906,6 +2103,17 @@ const (
SO_DETACH_FILTER = 0x1b
SO_DOMAIN = 0x27
SO_DONTROUTE = 0x5
+ SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1
+ SO_EE_CODE_TXTIME_MISSED = 0x2
+ SO_EE_CODE_ZEROCOPY_COPIED = 0x1
+ SO_EE_ORIGIN_ICMP = 0x2
+ SO_EE_ORIGIN_ICMP6 = 0x3
+ SO_EE_ORIGIN_LOCAL = 0x1
+ SO_EE_ORIGIN_NONE = 0x0
+ SO_EE_ORIGIN_TIMESTAMPING = 0x4
+ SO_EE_ORIGIN_TXSTATUS = 0x4
+ SO_EE_ORIGIN_TXTIME = 0x6
+ SO_EE_ORIGIN_ZEROCOPY = 0x5
SO_ERROR = 0x4
SO_GET_FILTER = 0x1a
SO_INCOMING_CPU = 0x31
@@ -1932,6 +2140,8 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVTIMEO = 0x14
+ SO_RCVTIMEO_NEW = 0x42
+ SO_RCVTIMEO_OLD = 0x14
SO_REUSEADDR = 0x2
SO_REUSEPORT = 0xf
SO_RXQ_OVFL = 0x28
@@ -1943,9 +2153,17 @@ const (
SO_SNDBUFFORCE = 0x20
SO_SNDLOWAT = 0x13
SO_SNDTIMEO = 0x15
+ SO_SNDTIMEO_NEW = 0x43
+ SO_SNDTIMEO_OLD = 0x15
SO_TIMESTAMP = 0x1d
SO_TIMESTAMPING = 0x25
+ SO_TIMESTAMPING_NEW = 0x41
+ SO_TIMESTAMPING_OLD = 0x25
SO_TIMESTAMPNS = 0x23
+ SO_TIMESTAMPNS_NEW = 0x40
+ SO_TIMESTAMPNS_OLD = 0x23
+ SO_TIMESTAMP_NEW = 0x3f
+ SO_TIMESTAMP_OLD = 0x1d
SO_TXTIME = 0x3d
SO_TYPE = 0x3
SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2
@@ -2038,7 +2256,10 @@ const (
TCOFLUSH = 0x1
TCOOFF = 0x0
TCOON = 0x1
+ TCP_BPF_IW = 0x3e9
+ TCP_BPF_SNDCWND_CLAMP = 0x3ea
TCP_CC_INFO = 0x1a
+ TCP_CM_INQ = 0x24
TCP_CONGESTION = 0xd
TCP_COOKIE_IN_ALWAYS = 0x1
TCP_COOKIE_MAX = 0x10
@@ -2053,6 +2274,7 @@ const (
TCP_FASTOPEN_KEY = 0x21
TCP_FASTOPEN_NO_COOKIE = 0x22
TCP_INFO = 0xb
+ TCP_INQ = 0x24
TCP_KEEPCNT = 0x6
TCP_KEEPIDLE = 0x4
TCP_KEEPINTVL = 0x5
@@ -2072,6 +2294,9 @@ const (
TCP_QUEUE_SEQ = 0x15
TCP_QUICKACK = 0xc
TCP_REPAIR = 0x13
+ TCP_REPAIR_OFF = 0x0
+ TCP_REPAIR_OFF_NO_WP = -0x1
+ TCP_REPAIR_ON = 0x1
TCP_REPAIR_OPTIONS = 0x16
TCP_REPAIR_QUEUE = 0x14
TCP_REPAIR_WINDOW = 0x1d
@@ -2086,6 +2311,7 @@ const (
TCP_ULP = 0x1f
TCP_USER_TIMEOUT = 0x12
TCP_WINDOW_CLAMP = 0xa
+ TCP_ZEROCOPY_RECEIVE = 0x23
TCSAFLUSH = 0x2
TCSBRK = 0x5409
TCSBRKP = 0x5425
@@ -2102,6 +2328,7 @@ const (
TCSETXF = 0x5434
TCSETXW = 0x5435
TCXONC = 0x540a
+ TIMER_ABSTIME = 0x1
TIOCCBRK = 0x5428
TIOCCONS = 0x541d
TIOCEXCL = 0x540c
@@ -2204,6 +2431,7 @@ const (
TUNGETVNETBE = 0x800454df
TUNGETVNETHDRSZ = 0x800454d7
TUNGETVNETLE = 0x800454dd
+ TUNSETCARRIER = 0x400454e2
TUNSETDEBUG = 0x400454c9
TUNSETFILTEREBPF = 0x800454e1
TUNSETGROUP = 0x400454ce
@@ -2231,8 +2459,10 @@ const (
UBI_IOCMKVOL = 0x40986f00
UBI_IOCRMVOL = 0x40046f01
UBI_IOCRNVOL = 0x51106f03
+ UBI_IOCRPEB = 0x40046f04
UBI_IOCRSVOL = 0x400c6f02
UBI_IOCSETVOLPROP = 0x40104f06
+ UBI_IOCSPEB = 0x40046f05
UBI_IOCVOLCRBLK = 0x40804f07
UBI_IOCVOLRMBLK = 0x4f08
UBI_IOCVOLUP = 0x40084f00
@@ -2380,6 +2610,7 @@ const (
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
+ XDP_PACKET_HEADROOM = 0x100
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index ccc57676f..2e7455814 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -41,7 +41,7 @@ const (
AF_KEY = 0xf
AF_LLC = 0x1a
AF_LOCAL = 0x1
- AF_MAX = 0x2c
+ AF_MAX = 0x2d
AF_MPLS = 0x1c
AF_NETBEUI = 0xd
AF_NETLINK = 0x10
@@ -174,6 +174,7 @@ const (
B9600 = 0xd
BALLOON_KVM_MAGIC = 0x13661366
BDEVFS_MAGIC = 0x62646576
+ BINDERFS_SUPER_MAGIC = 0x6c6f6f70
BINFMTFS_MAGIC = 0x42494e4d
BLKBSZGET = 0x80041270
BLKBSZSET = 0x40041271
@@ -196,10 +197,59 @@ const (
BPF_ABS = 0x20
BPF_ADD = 0x0
BPF_ALU = 0x4
+ BPF_ALU64 = 0x7
BPF_AND = 0x50
+ BPF_ANY = 0x0
+ BPF_ARSH = 0xc0
BPF_B = 0x10
+ BPF_BUILD_ID_SIZE = 0x14
+ BPF_CALL = 0x80
+ BPF_DEVCG_ACC_MKNOD = 0x1
+ BPF_DEVCG_ACC_READ = 0x2
+ BPF_DEVCG_ACC_WRITE = 0x4
+ BPF_DEVCG_DEV_BLOCK = 0x1
+ BPF_DEVCG_DEV_CHAR = 0x2
BPF_DIV = 0x30
+ BPF_DW = 0x18
+ BPF_END = 0xd0
+ BPF_EXIST = 0x2
+ BPF_EXIT = 0x90
+ BPF_FROM_BE = 0x8
+ BPF_FROM_LE = 0x0
BPF_FS_MAGIC = 0xcafe4a11
+ BPF_F_ALLOW_MULTI = 0x2
+ BPF_F_ALLOW_OVERRIDE = 0x1
+ BPF_F_ANY_ALIGNMENT = 0x2
+ BPF_F_CTXLEN_MASK = 0xfffff00000000
+ BPF_F_CURRENT_CPU = 0xffffffff
+ BPF_F_CURRENT_NETNS = -0x1
+ BPF_F_DONT_FRAGMENT = 0x4
+ BPF_F_FAST_STACK_CMP = 0x200
+ BPF_F_HDR_FIELD_MASK = 0xf
+ BPF_F_INDEX_MASK = 0xffffffff
+ BPF_F_INGRESS = 0x1
+ BPF_F_INVALIDATE_HASH = 0x2
+ BPF_F_LOCK = 0x4
+ BPF_F_MARK_ENFORCE = 0x40
+ BPF_F_MARK_MANGLED_0 = 0x20
+ BPF_F_NO_COMMON_LRU = 0x2
+ BPF_F_NO_PREALLOC = 0x1
+ BPF_F_NUMA_NODE = 0x4
+ BPF_F_PSEUDO_HDR = 0x10
+ BPF_F_QUERY_EFFECTIVE = 0x1
+ BPF_F_RDONLY = 0x8
+ BPF_F_RECOMPUTE_CSUM = 0x1
+ BPF_F_REUSE_STACKID = 0x400
+ BPF_F_SEQ_NUMBER = 0x8
+ BPF_F_SKIP_FIELD_MASK = 0xff
+ BPF_F_STACK_BUILD_ID = 0x20
+ BPF_F_STRICT_ALIGNMENT = 0x1
+ BPF_F_TUNINFO_IPV6 = 0x1
+ BPF_F_USER_BUILD_ID = 0x800
+ BPF_F_USER_STACK = 0x100
+ BPF_F_WRONLY = 0x10
+ BPF_F_ZERO_CSUM_TX = 0x2
+ BPF_F_ZERO_SEED = 0x40
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@@ -207,8 +257,16 @@ const (
BPF_JEQ = 0x10
BPF_JGE = 0x30
BPF_JGT = 0x20
+ BPF_JLE = 0xb0
+ BPF_JLT = 0xa0
BPF_JMP = 0x5
+ BPF_JMP32 = 0x6
+ BPF_JNE = 0x50
BPF_JSET = 0x40
+ BPF_JSGE = 0x70
+ BPF_JSGT = 0x60
+ BPF_JSLE = 0xd0
+ BPF_JSLT = 0xc0
BPF_K = 0x0
BPF_LD = 0x0
BPF_LDX = 0x1
@@ -222,20 +280,33 @@ const (
BPF_MINOR_VERSION = 0x1
BPF_MISC = 0x7
BPF_MOD = 0x90
+ BPF_MOV = 0xb0
BPF_MSH = 0xa0
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_NET_OFF = -0x100000
+ BPF_NOEXIST = 0x1
+ BPF_OBJ_NAME_LEN = 0x10
BPF_OR = 0x40
+ BPF_PSEUDO_CALL = 0x1
+ BPF_PSEUDO_MAP_FD = 0x1
BPF_RET = 0x6
BPF_RSH = 0x70
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7
+ BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2
+ BPF_SOCK_OPS_RTO_CB_FLAG = 0x1
+ BPF_SOCK_OPS_STATE_CB_FLAG = 0x4
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
+ BPF_TAG_SIZE = 0x8
BPF_TAX = 0x0
+ BPF_TO_BE = 0x8
+ BPF_TO_LE = 0x0
BPF_TXA = 0x80
BPF_W = 0x0
BPF_X = 0x8
+ BPF_XADD = 0xc0
BPF_XOR = 0xa0
BRKINT = 0x2
BS0 = 0x0
@@ -263,6 +334,45 @@ const (
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
+ CAP_AUDIT_CONTROL = 0x1e
+ CAP_AUDIT_READ = 0x25
+ CAP_AUDIT_WRITE = 0x1d
+ CAP_BLOCK_SUSPEND = 0x24
+ CAP_CHOWN = 0x0
+ CAP_DAC_OVERRIDE = 0x1
+ CAP_DAC_READ_SEARCH = 0x2
+ CAP_FOWNER = 0x3
+ CAP_FSETID = 0x4
+ CAP_IPC_LOCK = 0xe
+ CAP_IPC_OWNER = 0xf
+ CAP_KILL = 0x5
+ CAP_LAST_CAP = 0x25
+ CAP_LEASE = 0x1c
+ CAP_LINUX_IMMUTABLE = 0x9
+ CAP_MAC_ADMIN = 0x21
+ CAP_MAC_OVERRIDE = 0x20
+ CAP_MKNOD = 0x1b
+ CAP_NET_ADMIN = 0xc
+ CAP_NET_BIND_SERVICE = 0xa
+ CAP_NET_BROADCAST = 0xb
+ CAP_NET_RAW = 0xd
+ CAP_SETFCAP = 0x1f
+ CAP_SETGID = 0x6
+ CAP_SETPCAP = 0x8
+ CAP_SETUID = 0x7
+ CAP_SYSLOG = 0x22
+ CAP_SYS_ADMIN = 0x15
+ CAP_SYS_BOOT = 0x16
+ CAP_SYS_CHROOT = 0x12
+ CAP_SYS_MODULE = 0x10
+ CAP_SYS_NICE = 0x17
+ CAP_SYS_PACCT = 0x14
+ CAP_SYS_PTRACE = 0x13
+ CAP_SYS_RAWIO = 0x11
+ CAP_SYS_RESOURCE = 0x18
+ CAP_SYS_TIME = 0x19
+ CAP_SYS_TTY_CONFIG = 0x1a
+ CAP_WAKE_ALARM = 0x23
CBAUD = 0x100f
CBAUDEX = 0x1000
CFLUSH = 0xf
@@ -319,6 +429,10 @@ const (
CRDLY = 0x600
CREAD = 0x80
CRTSCTS = 0x80000000
+ CRYPTO_MAX_NAME = 0x40
+ CRYPTO_MSG_MAX = 0x15
+ CRYPTO_NR_MSGTYPES = 0x6
+ CRYPTO_REPORT_MAXSIZE = 0x160
CS5 = 0x0
CS6 = 0x10
CS7 = 0x20
@@ -486,12 +600,67 @@ const (
FALLOC_FL_PUNCH_HOLE = 0x2
FALLOC_FL_UNSHARE_RANGE = 0x40
FALLOC_FL_ZERO_RANGE = 0x10
+ FANOTIFY_METADATA_VERSION = 0x3
+ FAN_ACCESS = 0x1
+ FAN_ACCESS_PERM = 0x20000
+ FAN_ALLOW = 0x1
+ FAN_ALL_CLASS_BITS = 0xc
+ FAN_ALL_EVENTS = 0x3b
+ FAN_ALL_INIT_FLAGS = 0x3f
+ FAN_ALL_MARK_FLAGS = 0xff
+ FAN_ALL_OUTGOING_EVENTS = 0x3403b
+ FAN_ALL_PERM_EVENTS = 0x30000
+ FAN_ATTRIB = 0x4
+ FAN_AUDIT = 0x10
+ FAN_CLASS_CONTENT = 0x4
+ FAN_CLASS_NOTIF = 0x0
+ FAN_CLASS_PRE_CONTENT = 0x8
+ FAN_CLOEXEC = 0x1
+ FAN_CLOSE = 0x18
+ FAN_CLOSE_NOWRITE = 0x10
+ FAN_CLOSE_WRITE = 0x8
+ FAN_CREATE = 0x100
+ FAN_DELETE = 0x200
+ FAN_DELETE_SELF = 0x400
+ FAN_DENY = 0x2
+ FAN_ENABLE_AUDIT = 0x40
+ FAN_EVENT_INFO_TYPE_FID = 0x1
+ FAN_EVENT_METADATA_LEN = 0x18
+ FAN_EVENT_ON_CHILD = 0x8000000
+ FAN_MARK_ADD = 0x1
+ FAN_MARK_DONT_FOLLOW = 0x4
+ FAN_MARK_FILESYSTEM = 0x100
+ FAN_MARK_FLUSH = 0x80
+ FAN_MARK_IGNORED_MASK = 0x20
+ FAN_MARK_IGNORED_SURV_MODIFY = 0x40
+ FAN_MARK_INODE = 0x0
+ FAN_MARK_MOUNT = 0x10
+ FAN_MARK_ONLYDIR = 0x8
+ FAN_MARK_REMOVE = 0x2
+ FAN_MODIFY = 0x2
+ FAN_MOVE = 0xc0
+ FAN_MOVED_FROM = 0x40
+ FAN_MOVED_TO = 0x80
+ FAN_MOVE_SELF = 0x800
+ FAN_NOFD = -0x1
+ FAN_NONBLOCK = 0x2
+ FAN_ONDIR = 0x40000000
+ FAN_OPEN = 0x20
+ FAN_OPEN_EXEC = 0x1000
+ FAN_OPEN_EXEC_PERM = 0x40000
+ FAN_OPEN_PERM = 0x10000
+ FAN_Q_OVERFLOW = 0x4000
+ FAN_REPORT_FID = 0x200
+ FAN_REPORT_TID = 0x100
+ FAN_UNLIMITED_MARKS = 0x20
+ FAN_UNLIMITED_QUEUE = 0x10
FD_CLOEXEC = 0x1
FD_SETSIZE = 0x400
FF0 = 0x0
FF1 = 0x8000
FFDLY = 0x8000
FLUSHO = 0x1000
+ FS_ENCRYPTION_MODE_ADIANTUM = 0x9
FS_ENCRYPTION_MODE_AES_128_CBC = 0x5
FS_ENCRYPTION_MODE_AES_128_CTS = 0x6
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
@@ -513,7 +682,7 @@ const (
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
- FS_POLICY_FLAGS_VALID = 0x3
+ FS_POLICY_FLAGS_VALID = 0x7
FUTEXFS_SUPER_MAGIC = 0xbad1dea
F_ADD_SEALS = 0x409
F_DUPFD = 0x0
@@ -706,6 +875,7 @@ const (
IN_ISDIR = 0x40000000
IN_LOOPBACKNET = 0x7f
IN_MASK_ADD = 0x20000000
+ IN_MASK_CREATE = 0x10000000
IN_MODIFY = 0x2
IN_MOVE = 0xc0
IN_MOVED_FROM = 0x40
@@ -777,6 +947,7 @@ const (
IPV6_MINHOPCOUNT = 0x49
IPV6_MTU = 0x18
IPV6_MTU_DISCOVER = 0x17
+ IPV6_MULTICAST_ALL = 0x1d
IPV6_MULTICAST_HOPS = 0x12
IPV6_MULTICAST_IF = 0x11
IPV6_MULTICAST_LOOP = 0x13
@@ -1002,6 +1173,15 @@ const (
MAP_STACK = 0x20000
MAP_SYNC = 0x80000
MAP_TYPE = 0xf
+ MCAST_BLOCK_SOURCE = 0x2b
+ MCAST_EXCLUDE = 0x0
+ MCAST_INCLUDE = 0x1
+ MCAST_JOIN_GROUP = 0x2a
+ MCAST_JOIN_SOURCE_GROUP = 0x2e
+ MCAST_LEAVE_GROUP = 0x2d
+ MCAST_LEAVE_SOURCE_GROUP = 0x2f
+ MCAST_MSFILTER = 0x30
+ MCAST_UNBLOCK_SOURCE = 0x2c
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
@@ -1130,7 +1310,7 @@ const (
NETLINK_UNUSED = 0x1
NETLINK_USERSOCK = 0x2
NETLINK_XFRM = 0x6
- NETNSA_MAX = 0x3
+ NETNSA_MAX = 0x5
NETNSA_NSID_NOT_ASSIGNED = -0x1
NFNETLINK_V0 = 0x0
NFNLGRP_ACCT_QUOTA = 0x8
@@ -1394,6 +1574,12 @@ const (
PR_MCE_KILL_SET = 0x1
PR_MPX_DISABLE_MANAGEMENT = 0x2c
PR_MPX_ENABLE_MANAGEMENT = 0x2b
+ PR_PAC_APDAKEY = 0x4
+ PR_PAC_APDBKEY = 0x8
+ PR_PAC_APGAKEY = 0x10
+ PR_PAC_APIAKEY = 0x1
+ PR_PAC_APIBKEY = 0x2
+ PR_PAC_RESET_KEYS = 0x36
PR_SET_CHILD_SUBREAPER = 0x24
PR_SET_DUMPABLE = 0x4
PR_SET_ENDIAN = 0x14
@@ -1431,6 +1617,7 @@ const (
PR_SET_TSC = 0x1a
PR_SET_UNALIGN = 0x6
PR_SPEC_DISABLE = 0x4
+ PR_SPEC_DISABLE_NOEXEC = 0x10
PR_SPEC_ENABLE = 0x2
PR_SPEC_FORCE_DISABLE = 0x8
PR_SPEC_INDIRECT_BRANCH = 0x1
@@ -1544,6 +1731,13 @@ const (
RLIMIT_SIGPENDING = 0xb
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0xffffffffffffffff
+ RNDADDENTROPY = 0x40085203
+ RNDADDTOENTCNT = 0x40045201
+ RNDCLEARPOOL = 0x5206
+ RNDGETENTCNT = 0x80045200
+ RNDGETPOOL = 0x80085202
+ RNDRESEEDCRNG = 0x5207
+ RNDZAPENTCNT = 0x5204
RTAX_ADVMSS = 0x8
RTAX_CC_ALGO = 0x10
RTAX_CWND = 0x7
@@ -1751,6 +1945,8 @@ const (
SECCOMP_MODE_STRICT = 0x1
SECURITYFS_MAGIC = 0x73636673
SELINUX_MAGIC = 0xf97cff8c
+ SFD_CLOEXEC = 0x80000
+ SFD_NONBLOCK = 0x800
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -1901,6 +2097,7 @@ const (
SO_ATTACH_REUSEPORT_CBPF = 0x33
SO_ATTACH_REUSEPORT_EBPF = 0x34
SO_BINDTODEVICE = 0x19
+ SO_BINDTOIFINDEX = 0x3e
SO_BPF_EXTENSIONS = 0x30
SO_BROADCAST = 0x6
SO_BSDCOMPAT = 0xe
@@ -1912,6 +2109,17 @@ const (
SO_DETACH_FILTER = 0x1b
SO_DOMAIN = 0x27
SO_DONTROUTE = 0x5
+ SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1
+ SO_EE_CODE_TXTIME_MISSED = 0x2
+ SO_EE_CODE_ZEROCOPY_COPIED = 0x1
+ SO_EE_ORIGIN_ICMP = 0x2
+ SO_EE_ORIGIN_ICMP6 = 0x3
+ SO_EE_ORIGIN_LOCAL = 0x1
+ SO_EE_ORIGIN_NONE = 0x0
+ SO_EE_ORIGIN_TIMESTAMPING = 0x4
+ SO_EE_ORIGIN_TXSTATUS = 0x4
+ SO_EE_ORIGIN_TXTIME = 0x6
+ SO_EE_ORIGIN_ZEROCOPY = 0x5
SO_ERROR = 0x4
SO_GET_FILTER = 0x1a
SO_INCOMING_CPU = 0x31
@@ -1938,6 +2146,8 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVTIMEO = 0x14
+ SO_RCVTIMEO_NEW = 0x42
+ SO_RCVTIMEO_OLD = 0x14
SO_REUSEADDR = 0x2
SO_REUSEPORT = 0xf
SO_RXQ_OVFL = 0x28
@@ -1949,9 +2159,17 @@ const (
SO_SNDBUFFORCE = 0x20
SO_SNDLOWAT = 0x13
SO_SNDTIMEO = 0x15
+ SO_SNDTIMEO_NEW = 0x43
+ SO_SNDTIMEO_OLD = 0x15
SO_TIMESTAMP = 0x1d
SO_TIMESTAMPING = 0x25
+ SO_TIMESTAMPING_NEW = 0x41
+ SO_TIMESTAMPING_OLD = 0x25
SO_TIMESTAMPNS = 0x23
+ SO_TIMESTAMPNS_NEW = 0x40
+ SO_TIMESTAMPNS_OLD = 0x23
+ SO_TIMESTAMP_NEW = 0x3f
+ SO_TIMESTAMP_OLD = 0x1d
SO_TXTIME = 0x3d
SO_TYPE = 0x3
SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2
@@ -2044,7 +2262,10 @@ const (
TCOFLUSH = 0x1
TCOOFF = 0x0
TCOON = 0x1
+ TCP_BPF_IW = 0x3e9
+ TCP_BPF_SNDCWND_CLAMP = 0x3ea
TCP_CC_INFO = 0x1a
+ TCP_CM_INQ = 0x24
TCP_CONGESTION = 0xd
TCP_COOKIE_IN_ALWAYS = 0x1
TCP_COOKIE_MAX = 0x10
@@ -2059,6 +2280,7 @@ const (
TCP_FASTOPEN_KEY = 0x21
TCP_FASTOPEN_NO_COOKIE = 0x22
TCP_INFO = 0xb
+ TCP_INQ = 0x24
TCP_KEEPCNT = 0x6
TCP_KEEPIDLE = 0x4
TCP_KEEPINTVL = 0x5
@@ -2078,6 +2300,9 @@ const (
TCP_QUEUE_SEQ = 0x15
TCP_QUICKACK = 0xc
TCP_REPAIR = 0x13
+ TCP_REPAIR_OFF = 0x0
+ TCP_REPAIR_OFF_NO_WP = -0x1
+ TCP_REPAIR_ON = 0x1
TCP_REPAIR_OPTIONS = 0x16
TCP_REPAIR_QUEUE = 0x14
TCP_REPAIR_WINDOW = 0x1d
@@ -2092,6 +2317,7 @@ const (
TCP_ULP = 0x1f
TCP_USER_TIMEOUT = 0x12
TCP_WINDOW_CLAMP = 0xa
+ TCP_ZEROCOPY_RECEIVE = 0x23
TCSAFLUSH = 0x2
TCSBRK = 0x5409
TCSBRKP = 0x5425
@@ -2108,6 +2334,7 @@ const (
TCSETXF = 0x5434
TCSETXW = 0x5435
TCXONC = 0x540a
+ TIMER_ABSTIME = 0x1
TIOCCBRK = 0x5428
TIOCCONS = 0x541d
TIOCEXCL = 0x540c
@@ -2210,6 +2437,7 @@ const (
TUNGETVNETBE = 0x800454df
TUNGETVNETHDRSZ = 0x800454d7
TUNGETVNETLE = 0x800454dd
+ TUNSETCARRIER = 0x400454e2
TUNSETDEBUG = 0x400454c9
TUNSETFILTEREBPF = 0x800454e1
TUNSETGROUP = 0x400454ce
@@ -2237,8 +2465,10 @@ const (
UBI_IOCMKVOL = 0x40986f00
UBI_IOCRMVOL = 0x40046f01
UBI_IOCRNVOL = 0x51106f03
+ UBI_IOCRPEB = 0x40046f04
UBI_IOCRSVOL = 0x400c6f02
UBI_IOCSETVOLPROP = 0x40104f06
+ UBI_IOCSPEB = 0x40046f05
UBI_IOCVOLCRBLK = 0x40804f07
UBI_IOCVOLRMBLK = 0x4f08
UBI_IOCVOLUP = 0x40084f00
@@ -2386,6 +2616,7 @@ const (
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
+ XDP_PACKET_HEADROOM = 0x100
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index 9c57337c6..b1dc633a2 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -41,7 +41,7 @@ const (
AF_KEY = 0xf
AF_LLC = 0x1a
AF_LOCAL = 0x1
- AF_MAX = 0x2c
+ AF_MAX = 0x2d
AF_MPLS = 0x1c
AF_NETBEUI = 0xd
AF_NETLINK = 0x10
@@ -174,6 +174,7 @@ const (
B9600 = 0xd
BALLOON_KVM_MAGIC = 0x13661366
BDEVFS_MAGIC = 0x62646576
+ BINDERFS_SUPER_MAGIC = 0x6c6f6f70
BINFMTFS_MAGIC = 0x42494e4d
BLKBSZGET = 0x80081270
BLKBSZSET = 0x40081271
@@ -196,10 +197,59 @@ const (
BPF_ABS = 0x20
BPF_ADD = 0x0
BPF_ALU = 0x4
+ BPF_ALU64 = 0x7
BPF_AND = 0x50
+ BPF_ANY = 0x0
+ BPF_ARSH = 0xc0
BPF_B = 0x10
+ BPF_BUILD_ID_SIZE = 0x14
+ BPF_CALL = 0x80
+ BPF_DEVCG_ACC_MKNOD = 0x1
+ BPF_DEVCG_ACC_READ = 0x2
+ BPF_DEVCG_ACC_WRITE = 0x4
+ BPF_DEVCG_DEV_BLOCK = 0x1
+ BPF_DEVCG_DEV_CHAR = 0x2
BPF_DIV = 0x30
+ BPF_DW = 0x18
+ BPF_END = 0xd0
+ BPF_EXIST = 0x2
+ BPF_EXIT = 0x90
+ BPF_FROM_BE = 0x8
+ BPF_FROM_LE = 0x0
BPF_FS_MAGIC = 0xcafe4a11
+ BPF_F_ALLOW_MULTI = 0x2
+ BPF_F_ALLOW_OVERRIDE = 0x1
+ BPF_F_ANY_ALIGNMENT = 0x2
+ BPF_F_CTXLEN_MASK = 0xfffff00000000
+ BPF_F_CURRENT_CPU = 0xffffffff
+ BPF_F_CURRENT_NETNS = -0x1
+ BPF_F_DONT_FRAGMENT = 0x4
+ BPF_F_FAST_STACK_CMP = 0x200
+ BPF_F_HDR_FIELD_MASK = 0xf
+ BPF_F_INDEX_MASK = 0xffffffff
+ BPF_F_INGRESS = 0x1
+ BPF_F_INVALIDATE_HASH = 0x2
+ BPF_F_LOCK = 0x4
+ BPF_F_MARK_ENFORCE = 0x40
+ BPF_F_MARK_MANGLED_0 = 0x20
+ BPF_F_NO_COMMON_LRU = 0x2
+ BPF_F_NO_PREALLOC = 0x1
+ BPF_F_NUMA_NODE = 0x4
+ BPF_F_PSEUDO_HDR = 0x10
+ BPF_F_QUERY_EFFECTIVE = 0x1
+ BPF_F_RDONLY = 0x8
+ BPF_F_RECOMPUTE_CSUM = 0x1
+ BPF_F_REUSE_STACKID = 0x400
+ BPF_F_SEQ_NUMBER = 0x8
+ BPF_F_SKIP_FIELD_MASK = 0xff
+ BPF_F_STACK_BUILD_ID = 0x20
+ BPF_F_STRICT_ALIGNMENT = 0x1
+ BPF_F_TUNINFO_IPV6 = 0x1
+ BPF_F_USER_BUILD_ID = 0x800
+ BPF_F_USER_STACK = 0x100
+ BPF_F_WRONLY = 0x10
+ BPF_F_ZERO_CSUM_TX = 0x2
+ BPF_F_ZERO_SEED = 0x40
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@@ -207,8 +257,16 @@ const (
BPF_JEQ = 0x10
BPF_JGE = 0x30
BPF_JGT = 0x20
+ BPF_JLE = 0xb0
+ BPF_JLT = 0xa0
BPF_JMP = 0x5
+ BPF_JMP32 = 0x6
+ BPF_JNE = 0x50
BPF_JSET = 0x40
+ BPF_JSGE = 0x70
+ BPF_JSGT = 0x60
+ BPF_JSLE = 0xd0
+ BPF_JSLT = 0xc0
BPF_K = 0x0
BPF_LD = 0x0
BPF_LDX = 0x1
@@ -222,20 +280,33 @@ const (
BPF_MINOR_VERSION = 0x1
BPF_MISC = 0x7
BPF_MOD = 0x90
+ BPF_MOV = 0xb0
BPF_MSH = 0xa0
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_NET_OFF = -0x100000
+ BPF_NOEXIST = 0x1
+ BPF_OBJ_NAME_LEN = 0x10
BPF_OR = 0x40
+ BPF_PSEUDO_CALL = 0x1
+ BPF_PSEUDO_MAP_FD = 0x1
BPF_RET = 0x6
BPF_RSH = 0x70
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7
+ BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2
+ BPF_SOCK_OPS_RTO_CB_FLAG = 0x1
+ BPF_SOCK_OPS_STATE_CB_FLAG = 0x4
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
+ BPF_TAG_SIZE = 0x8
BPF_TAX = 0x0
+ BPF_TO_BE = 0x8
+ BPF_TO_LE = 0x0
BPF_TXA = 0x80
BPF_W = 0x0
BPF_X = 0x8
+ BPF_XADD = 0xc0
BPF_XOR = 0xa0
BRKINT = 0x2
BS0 = 0x0
@@ -263,6 +334,45 @@ const (
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
+ CAP_AUDIT_CONTROL = 0x1e
+ CAP_AUDIT_READ = 0x25
+ CAP_AUDIT_WRITE = 0x1d
+ CAP_BLOCK_SUSPEND = 0x24
+ CAP_CHOWN = 0x0
+ CAP_DAC_OVERRIDE = 0x1
+ CAP_DAC_READ_SEARCH = 0x2
+ CAP_FOWNER = 0x3
+ CAP_FSETID = 0x4
+ CAP_IPC_LOCK = 0xe
+ CAP_IPC_OWNER = 0xf
+ CAP_KILL = 0x5
+ CAP_LAST_CAP = 0x25
+ CAP_LEASE = 0x1c
+ CAP_LINUX_IMMUTABLE = 0x9
+ CAP_MAC_ADMIN = 0x21
+ CAP_MAC_OVERRIDE = 0x20
+ CAP_MKNOD = 0x1b
+ CAP_NET_ADMIN = 0xc
+ CAP_NET_BIND_SERVICE = 0xa
+ CAP_NET_BROADCAST = 0xb
+ CAP_NET_RAW = 0xd
+ CAP_SETFCAP = 0x1f
+ CAP_SETGID = 0x6
+ CAP_SETPCAP = 0x8
+ CAP_SETUID = 0x7
+ CAP_SYSLOG = 0x22
+ CAP_SYS_ADMIN = 0x15
+ CAP_SYS_BOOT = 0x16
+ CAP_SYS_CHROOT = 0x12
+ CAP_SYS_MODULE = 0x10
+ CAP_SYS_NICE = 0x17
+ CAP_SYS_PACCT = 0x14
+ CAP_SYS_PTRACE = 0x13
+ CAP_SYS_RAWIO = 0x11
+ CAP_SYS_RESOURCE = 0x18
+ CAP_SYS_TIME = 0x19
+ CAP_SYS_TTY_CONFIG = 0x1a
+ CAP_WAKE_ALARM = 0x23
CBAUD = 0x100f
CBAUDEX = 0x1000
CFLUSH = 0xf
@@ -319,6 +429,10 @@ const (
CRDLY = 0x600
CREAD = 0x80
CRTSCTS = 0x80000000
+ CRYPTO_MAX_NAME = 0x40
+ CRYPTO_MSG_MAX = 0x15
+ CRYPTO_NR_MSGTYPES = 0x6
+ CRYPTO_REPORT_MAXSIZE = 0x160
CS5 = 0x0
CS6 = 0x10
CS7 = 0x20
@@ -488,6 +602,60 @@ const (
FALLOC_FL_PUNCH_HOLE = 0x2
FALLOC_FL_UNSHARE_RANGE = 0x40
FALLOC_FL_ZERO_RANGE = 0x10
+ FANOTIFY_METADATA_VERSION = 0x3
+ FAN_ACCESS = 0x1
+ FAN_ACCESS_PERM = 0x20000
+ FAN_ALLOW = 0x1
+ FAN_ALL_CLASS_BITS = 0xc
+ FAN_ALL_EVENTS = 0x3b
+ FAN_ALL_INIT_FLAGS = 0x3f
+ FAN_ALL_MARK_FLAGS = 0xff
+ FAN_ALL_OUTGOING_EVENTS = 0x3403b
+ FAN_ALL_PERM_EVENTS = 0x30000
+ FAN_ATTRIB = 0x4
+ FAN_AUDIT = 0x10
+ FAN_CLASS_CONTENT = 0x4
+ FAN_CLASS_NOTIF = 0x0
+ FAN_CLASS_PRE_CONTENT = 0x8
+ FAN_CLOEXEC = 0x1
+ FAN_CLOSE = 0x18
+ FAN_CLOSE_NOWRITE = 0x10
+ FAN_CLOSE_WRITE = 0x8
+ FAN_CREATE = 0x100
+ FAN_DELETE = 0x200
+ FAN_DELETE_SELF = 0x400
+ FAN_DENY = 0x2
+ FAN_ENABLE_AUDIT = 0x40
+ FAN_EVENT_INFO_TYPE_FID = 0x1
+ FAN_EVENT_METADATA_LEN = 0x18
+ FAN_EVENT_ON_CHILD = 0x8000000
+ FAN_MARK_ADD = 0x1
+ FAN_MARK_DONT_FOLLOW = 0x4
+ FAN_MARK_FILESYSTEM = 0x100
+ FAN_MARK_FLUSH = 0x80
+ FAN_MARK_IGNORED_MASK = 0x20
+ FAN_MARK_IGNORED_SURV_MODIFY = 0x40
+ FAN_MARK_INODE = 0x0
+ FAN_MARK_MOUNT = 0x10
+ FAN_MARK_ONLYDIR = 0x8
+ FAN_MARK_REMOVE = 0x2
+ FAN_MODIFY = 0x2
+ FAN_MOVE = 0xc0
+ FAN_MOVED_FROM = 0x40
+ FAN_MOVED_TO = 0x80
+ FAN_MOVE_SELF = 0x800
+ FAN_NOFD = -0x1
+ FAN_NONBLOCK = 0x2
+ FAN_ONDIR = 0x40000000
+ FAN_OPEN = 0x20
+ FAN_OPEN_EXEC = 0x1000
+ FAN_OPEN_EXEC_PERM = 0x40000
+ FAN_OPEN_PERM = 0x10000
+ FAN_Q_OVERFLOW = 0x4000
+ FAN_REPORT_FID = 0x200
+ FAN_REPORT_TID = 0x100
+ FAN_UNLIMITED_MARKS = 0x20
+ FAN_UNLIMITED_QUEUE = 0x10
FD_CLOEXEC = 0x1
FD_SETSIZE = 0x400
FF0 = 0x0
@@ -495,6 +663,7 @@ const (
FFDLY = 0x8000
FLUSHO = 0x1000
FPSIMD_MAGIC = 0x46508001
+ FS_ENCRYPTION_MODE_ADIANTUM = 0x9
FS_ENCRYPTION_MODE_AES_128_CBC = 0x5
FS_ENCRYPTION_MODE_AES_128_CTS = 0x6
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
@@ -516,7 +685,7 @@ const (
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
- FS_POLICY_FLAGS_VALID = 0x3
+ FS_POLICY_FLAGS_VALID = 0x7
FUTEXFS_SUPER_MAGIC = 0xbad1dea
F_ADD_SEALS = 0x409
F_DUPFD = 0x0
@@ -709,6 +878,7 @@ const (
IN_ISDIR = 0x40000000
IN_LOOPBACKNET = 0x7f
IN_MASK_ADD = 0x20000000
+ IN_MASK_CREATE = 0x10000000
IN_MODIFY = 0x2
IN_MOVE = 0xc0
IN_MOVED_FROM = 0x40
@@ -780,6 +950,7 @@ const (
IPV6_MINHOPCOUNT = 0x49
IPV6_MTU = 0x18
IPV6_MTU_DISCOVER = 0x17
+ IPV6_MULTICAST_ALL = 0x1d
IPV6_MULTICAST_HOPS = 0x12
IPV6_MULTICAST_IF = 0x11
IPV6_MULTICAST_LOOP = 0x13
@@ -1005,6 +1176,15 @@ const (
MAP_STACK = 0x20000
MAP_SYNC = 0x80000
MAP_TYPE = 0xf
+ MCAST_BLOCK_SOURCE = 0x2b
+ MCAST_EXCLUDE = 0x0
+ MCAST_INCLUDE = 0x1
+ MCAST_JOIN_GROUP = 0x2a
+ MCAST_JOIN_SOURCE_GROUP = 0x2e
+ MCAST_LEAVE_GROUP = 0x2d
+ MCAST_LEAVE_SOURCE_GROUP = 0x2f
+ MCAST_MSFILTER = 0x30
+ MCAST_UNBLOCK_SOURCE = 0x2c
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
@@ -1133,7 +1313,7 @@ const (
NETLINK_UNUSED = 0x1
NETLINK_USERSOCK = 0x2
NETLINK_XFRM = 0x6
- NETNSA_MAX = 0x3
+ NETNSA_MAX = 0x5
NETNSA_NSID_NOT_ASSIGNED = -0x1
NFNETLINK_V0 = 0x0
NFNLGRP_ACCT_QUOTA = 0x8
@@ -1397,6 +1577,12 @@ const (
PR_MCE_KILL_SET = 0x1
PR_MPX_DISABLE_MANAGEMENT = 0x2c
PR_MPX_ENABLE_MANAGEMENT = 0x2b
+ PR_PAC_APDAKEY = 0x4
+ PR_PAC_APDBKEY = 0x8
+ PR_PAC_APGAKEY = 0x10
+ PR_PAC_APIAKEY = 0x1
+ PR_PAC_APIBKEY = 0x2
+ PR_PAC_RESET_KEYS = 0x36
PR_SET_CHILD_SUBREAPER = 0x24
PR_SET_DUMPABLE = 0x4
PR_SET_ENDIAN = 0x14
@@ -1434,6 +1620,7 @@ const (
PR_SET_TSC = 0x1a
PR_SET_UNALIGN = 0x6
PR_SPEC_DISABLE = 0x4
+ PR_SPEC_DISABLE_NOEXEC = 0x10
PR_SPEC_ENABLE = 0x2
PR_SPEC_FORCE_DISABLE = 0x8
PR_SPEC_INDIRECT_BRANCH = 0x1
@@ -1528,6 +1715,13 @@ const (
RLIMIT_SIGPENDING = 0xb
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0xffffffffffffffff
+ RNDADDENTROPY = 0x40085203
+ RNDADDTOENTCNT = 0x40045201
+ RNDCLEARPOOL = 0x5206
+ RNDGETENTCNT = 0x80045200
+ RNDGETPOOL = 0x80085202
+ RNDRESEEDCRNG = 0x5207
+ RNDZAPENTCNT = 0x5204
RTAX_ADVMSS = 0x8
RTAX_CC_ALGO = 0x10
RTAX_CWND = 0x7
@@ -1735,6 +1929,8 @@ const (
SECCOMP_MODE_STRICT = 0x1
SECURITYFS_MAGIC = 0x73636673
SELINUX_MAGIC = 0xf97cff8c
+ SFD_CLOEXEC = 0x80000
+ SFD_NONBLOCK = 0x800
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -1885,6 +2081,7 @@ const (
SO_ATTACH_REUSEPORT_CBPF = 0x33
SO_ATTACH_REUSEPORT_EBPF = 0x34
SO_BINDTODEVICE = 0x19
+ SO_BINDTOIFINDEX = 0x3e
SO_BPF_EXTENSIONS = 0x30
SO_BROADCAST = 0x6
SO_BSDCOMPAT = 0xe
@@ -1896,6 +2093,17 @@ const (
SO_DETACH_FILTER = 0x1b
SO_DOMAIN = 0x27
SO_DONTROUTE = 0x5
+ SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1
+ SO_EE_CODE_TXTIME_MISSED = 0x2
+ SO_EE_CODE_ZEROCOPY_COPIED = 0x1
+ SO_EE_ORIGIN_ICMP = 0x2
+ SO_EE_ORIGIN_ICMP6 = 0x3
+ SO_EE_ORIGIN_LOCAL = 0x1
+ SO_EE_ORIGIN_NONE = 0x0
+ SO_EE_ORIGIN_TIMESTAMPING = 0x4
+ SO_EE_ORIGIN_TXSTATUS = 0x4
+ SO_EE_ORIGIN_TXTIME = 0x6
+ SO_EE_ORIGIN_ZEROCOPY = 0x5
SO_ERROR = 0x4
SO_GET_FILTER = 0x1a
SO_INCOMING_CPU = 0x31
@@ -1922,6 +2130,8 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVTIMEO = 0x14
+ SO_RCVTIMEO_NEW = 0x42
+ SO_RCVTIMEO_OLD = 0x14
SO_REUSEADDR = 0x2
SO_REUSEPORT = 0xf
SO_RXQ_OVFL = 0x28
@@ -1933,9 +2143,17 @@ const (
SO_SNDBUFFORCE = 0x20
SO_SNDLOWAT = 0x13
SO_SNDTIMEO = 0x15
+ SO_SNDTIMEO_NEW = 0x43
+ SO_SNDTIMEO_OLD = 0x15
SO_TIMESTAMP = 0x1d
SO_TIMESTAMPING = 0x25
+ SO_TIMESTAMPING_NEW = 0x41
+ SO_TIMESTAMPING_OLD = 0x25
SO_TIMESTAMPNS = 0x23
+ SO_TIMESTAMPNS_NEW = 0x40
+ SO_TIMESTAMPNS_OLD = 0x23
+ SO_TIMESTAMP_NEW = 0x3f
+ SO_TIMESTAMP_OLD = 0x1d
SO_TXTIME = 0x3d
SO_TYPE = 0x3
SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2
@@ -2029,7 +2247,10 @@ const (
TCOFLUSH = 0x1
TCOOFF = 0x0
TCOON = 0x1
+ TCP_BPF_IW = 0x3e9
+ TCP_BPF_SNDCWND_CLAMP = 0x3ea
TCP_CC_INFO = 0x1a
+ TCP_CM_INQ = 0x24
TCP_CONGESTION = 0xd
TCP_COOKIE_IN_ALWAYS = 0x1
TCP_COOKIE_MAX = 0x10
@@ -2044,6 +2265,7 @@ const (
TCP_FASTOPEN_KEY = 0x21
TCP_FASTOPEN_NO_COOKIE = 0x22
TCP_INFO = 0xb
+ TCP_INQ = 0x24
TCP_KEEPCNT = 0x6
TCP_KEEPIDLE = 0x4
TCP_KEEPINTVL = 0x5
@@ -2063,6 +2285,9 @@ const (
TCP_QUEUE_SEQ = 0x15
TCP_QUICKACK = 0xc
TCP_REPAIR = 0x13
+ TCP_REPAIR_OFF = 0x0
+ TCP_REPAIR_OFF_NO_WP = -0x1
+ TCP_REPAIR_ON = 0x1
TCP_REPAIR_OPTIONS = 0x16
TCP_REPAIR_QUEUE = 0x14
TCP_REPAIR_WINDOW = 0x1d
@@ -2077,6 +2302,7 @@ const (
TCP_ULP = 0x1f
TCP_USER_TIMEOUT = 0x12
TCP_WINDOW_CLAMP = 0xa
+ TCP_ZEROCOPY_RECEIVE = 0x23
TCSAFLUSH = 0x2
TCSBRK = 0x5409
TCSBRKP = 0x5425
@@ -2093,6 +2319,7 @@ const (
TCSETXF = 0x5434
TCSETXW = 0x5435
TCXONC = 0x540a
+ TIMER_ABSTIME = 0x1
TIOCCBRK = 0x5428
TIOCCONS = 0x541d
TIOCEXCL = 0x540c
@@ -2195,6 +2422,7 @@ const (
TUNGETVNETBE = 0x800454df
TUNGETVNETHDRSZ = 0x800454d7
TUNGETVNETLE = 0x800454dd
+ TUNSETCARRIER = 0x400454e2
TUNSETDEBUG = 0x400454c9
TUNSETFILTEREBPF = 0x800454e1
TUNSETGROUP = 0x400454ce
@@ -2222,8 +2450,10 @@ const (
UBI_IOCMKVOL = 0x40986f00
UBI_IOCRMVOL = 0x40046f01
UBI_IOCRNVOL = 0x51106f03
+ UBI_IOCRPEB = 0x40046f04
UBI_IOCRSVOL = 0x400c6f02
UBI_IOCSETVOLPROP = 0x40104f06
+ UBI_IOCSPEB = 0x40046f05
UBI_IOCVOLCRBLK = 0x40804f07
UBI_IOCVOLRMBLK = 0x4f08
UBI_IOCVOLUP = 0x40084f00
@@ -2371,6 +2601,7 @@ const (
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
+ XDP_PACKET_HEADROOM = 0x100
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index 66cdbfd58..ad4d9afb6 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -41,7 +41,7 @@ const (
AF_KEY = 0xf
AF_LLC = 0x1a
AF_LOCAL = 0x1
- AF_MAX = 0x2c
+ AF_MAX = 0x2d
AF_MPLS = 0x1c
AF_NETBEUI = 0xd
AF_NETLINK = 0x10
@@ -174,6 +174,7 @@ const (
B9600 = 0xd
BALLOON_KVM_MAGIC = 0x13661366
BDEVFS_MAGIC = 0x62646576
+ BINDERFS_SUPER_MAGIC = 0x6c6f6f70
BINFMTFS_MAGIC = 0x42494e4d
BLKBSZGET = 0x40041270
BLKBSZSET = 0x80041271
@@ -196,10 +197,59 @@ const (
BPF_ABS = 0x20
BPF_ADD = 0x0
BPF_ALU = 0x4
+ BPF_ALU64 = 0x7
BPF_AND = 0x50
+ BPF_ANY = 0x0
+ BPF_ARSH = 0xc0
BPF_B = 0x10
+ BPF_BUILD_ID_SIZE = 0x14
+ BPF_CALL = 0x80
+ BPF_DEVCG_ACC_MKNOD = 0x1
+ BPF_DEVCG_ACC_READ = 0x2
+ BPF_DEVCG_ACC_WRITE = 0x4
+ BPF_DEVCG_DEV_BLOCK = 0x1
+ BPF_DEVCG_DEV_CHAR = 0x2
BPF_DIV = 0x30
+ BPF_DW = 0x18
+ BPF_END = 0xd0
+ BPF_EXIST = 0x2
+ BPF_EXIT = 0x90
+ BPF_FROM_BE = 0x8
+ BPF_FROM_LE = 0x0
BPF_FS_MAGIC = 0xcafe4a11
+ BPF_F_ALLOW_MULTI = 0x2
+ BPF_F_ALLOW_OVERRIDE = 0x1
+ BPF_F_ANY_ALIGNMENT = 0x2
+ BPF_F_CTXLEN_MASK = 0xfffff00000000
+ BPF_F_CURRENT_CPU = 0xffffffff
+ BPF_F_CURRENT_NETNS = -0x1
+ BPF_F_DONT_FRAGMENT = 0x4
+ BPF_F_FAST_STACK_CMP = 0x200
+ BPF_F_HDR_FIELD_MASK = 0xf
+ BPF_F_INDEX_MASK = 0xffffffff
+ BPF_F_INGRESS = 0x1
+ BPF_F_INVALIDATE_HASH = 0x2
+ BPF_F_LOCK = 0x4
+ BPF_F_MARK_ENFORCE = 0x40
+ BPF_F_MARK_MANGLED_0 = 0x20
+ BPF_F_NO_COMMON_LRU = 0x2
+ BPF_F_NO_PREALLOC = 0x1
+ BPF_F_NUMA_NODE = 0x4
+ BPF_F_PSEUDO_HDR = 0x10
+ BPF_F_QUERY_EFFECTIVE = 0x1
+ BPF_F_RDONLY = 0x8
+ BPF_F_RECOMPUTE_CSUM = 0x1
+ BPF_F_REUSE_STACKID = 0x400
+ BPF_F_SEQ_NUMBER = 0x8
+ BPF_F_SKIP_FIELD_MASK = 0xff
+ BPF_F_STACK_BUILD_ID = 0x20
+ BPF_F_STRICT_ALIGNMENT = 0x1
+ BPF_F_TUNINFO_IPV6 = 0x1
+ BPF_F_USER_BUILD_ID = 0x800
+ BPF_F_USER_STACK = 0x100
+ BPF_F_WRONLY = 0x10
+ BPF_F_ZERO_CSUM_TX = 0x2
+ BPF_F_ZERO_SEED = 0x40
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@@ -207,8 +257,16 @@ const (
BPF_JEQ = 0x10
BPF_JGE = 0x30
BPF_JGT = 0x20
+ BPF_JLE = 0xb0
+ BPF_JLT = 0xa0
BPF_JMP = 0x5
+ BPF_JMP32 = 0x6
+ BPF_JNE = 0x50
BPF_JSET = 0x40
+ BPF_JSGE = 0x70
+ BPF_JSGT = 0x60
+ BPF_JSLE = 0xd0
+ BPF_JSLT = 0xc0
BPF_K = 0x0
BPF_LD = 0x0
BPF_LDX = 0x1
@@ -222,20 +280,33 @@ const (
BPF_MINOR_VERSION = 0x1
BPF_MISC = 0x7
BPF_MOD = 0x90
+ BPF_MOV = 0xb0
BPF_MSH = 0xa0
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_NET_OFF = -0x100000
+ BPF_NOEXIST = 0x1
+ BPF_OBJ_NAME_LEN = 0x10
BPF_OR = 0x40
+ BPF_PSEUDO_CALL = 0x1
+ BPF_PSEUDO_MAP_FD = 0x1
BPF_RET = 0x6
BPF_RSH = 0x70
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7
+ BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2
+ BPF_SOCK_OPS_RTO_CB_FLAG = 0x1
+ BPF_SOCK_OPS_STATE_CB_FLAG = 0x4
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
+ BPF_TAG_SIZE = 0x8
BPF_TAX = 0x0
+ BPF_TO_BE = 0x8
+ BPF_TO_LE = 0x0
BPF_TXA = 0x80
BPF_W = 0x0
BPF_X = 0x8
+ BPF_XADD = 0xc0
BPF_XOR = 0xa0
BRKINT = 0x2
BS0 = 0x0
@@ -263,6 +334,45 @@ const (
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
+ CAP_AUDIT_CONTROL = 0x1e
+ CAP_AUDIT_READ = 0x25
+ CAP_AUDIT_WRITE = 0x1d
+ CAP_BLOCK_SUSPEND = 0x24
+ CAP_CHOWN = 0x0
+ CAP_DAC_OVERRIDE = 0x1
+ CAP_DAC_READ_SEARCH = 0x2
+ CAP_FOWNER = 0x3
+ CAP_FSETID = 0x4
+ CAP_IPC_LOCK = 0xe
+ CAP_IPC_OWNER = 0xf
+ CAP_KILL = 0x5
+ CAP_LAST_CAP = 0x25
+ CAP_LEASE = 0x1c
+ CAP_LINUX_IMMUTABLE = 0x9
+ CAP_MAC_ADMIN = 0x21
+ CAP_MAC_OVERRIDE = 0x20
+ CAP_MKNOD = 0x1b
+ CAP_NET_ADMIN = 0xc
+ CAP_NET_BIND_SERVICE = 0xa
+ CAP_NET_BROADCAST = 0xb
+ CAP_NET_RAW = 0xd
+ CAP_SETFCAP = 0x1f
+ CAP_SETGID = 0x6
+ CAP_SETPCAP = 0x8
+ CAP_SETUID = 0x7
+ CAP_SYSLOG = 0x22
+ CAP_SYS_ADMIN = 0x15
+ CAP_SYS_BOOT = 0x16
+ CAP_SYS_CHROOT = 0x12
+ CAP_SYS_MODULE = 0x10
+ CAP_SYS_NICE = 0x17
+ CAP_SYS_PACCT = 0x14
+ CAP_SYS_PTRACE = 0x13
+ CAP_SYS_RAWIO = 0x11
+ CAP_SYS_RESOURCE = 0x18
+ CAP_SYS_TIME = 0x19
+ CAP_SYS_TTY_CONFIG = 0x1a
+ CAP_WAKE_ALARM = 0x23
CBAUD = 0x100f
CBAUDEX = 0x1000
CFLUSH = 0xf
@@ -319,6 +429,10 @@ const (
CRDLY = 0x600
CREAD = 0x80
CRTSCTS = 0x80000000
+ CRYPTO_MAX_NAME = 0x40
+ CRYPTO_MSG_MAX = 0x15
+ CRYPTO_NR_MSGTYPES = 0x6
+ CRYPTO_REPORT_MAXSIZE = 0x160
CS5 = 0x0
CS6 = 0x10
CS7 = 0x20
@@ -486,12 +600,67 @@ const (
FALLOC_FL_PUNCH_HOLE = 0x2
FALLOC_FL_UNSHARE_RANGE = 0x40
FALLOC_FL_ZERO_RANGE = 0x10
+ FANOTIFY_METADATA_VERSION = 0x3
+ FAN_ACCESS = 0x1
+ FAN_ACCESS_PERM = 0x20000
+ FAN_ALLOW = 0x1
+ FAN_ALL_CLASS_BITS = 0xc
+ FAN_ALL_EVENTS = 0x3b
+ FAN_ALL_INIT_FLAGS = 0x3f
+ FAN_ALL_MARK_FLAGS = 0xff
+ FAN_ALL_OUTGOING_EVENTS = 0x3403b
+ FAN_ALL_PERM_EVENTS = 0x30000
+ FAN_ATTRIB = 0x4
+ FAN_AUDIT = 0x10
+ FAN_CLASS_CONTENT = 0x4
+ FAN_CLASS_NOTIF = 0x0
+ FAN_CLASS_PRE_CONTENT = 0x8
+ FAN_CLOEXEC = 0x1
+ FAN_CLOSE = 0x18
+ FAN_CLOSE_NOWRITE = 0x10
+ FAN_CLOSE_WRITE = 0x8
+ FAN_CREATE = 0x100
+ FAN_DELETE = 0x200
+ FAN_DELETE_SELF = 0x400
+ FAN_DENY = 0x2
+ FAN_ENABLE_AUDIT = 0x40
+ FAN_EVENT_INFO_TYPE_FID = 0x1
+ FAN_EVENT_METADATA_LEN = 0x18
+ FAN_EVENT_ON_CHILD = 0x8000000
+ FAN_MARK_ADD = 0x1
+ FAN_MARK_DONT_FOLLOW = 0x4
+ FAN_MARK_FILESYSTEM = 0x100
+ FAN_MARK_FLUSH = 0x80
+ FAN_MARK_IGNORED_MASK = 0x20
+ FAN_MARK_IGNORED_SURV_MODIFY = 0x40
+ FAN_MARK_INODE = 0x0
+ FAN_MARK_MOUNT = 0x10
+ FAN_MARK_ONLYDIR = 0x8
+ FAN_MARK_REMOVE = 0x2
+ FAN_MODIFY = 0x2
+ FAN_MOVE = 0xc0
+ FAN_MOVED_FROM = 0x40
+ FAN_MOVED_TO = 0x80
+ FAN_MOVE_SELF = 0x800
+ FAN_NOFD = -0x1
+ FAN_NONBLOCK = 0x2
+ FAN_ONDIR = 0x40000000
+ FAN_OPEN = 0x20
+ FAN_OPEN_EXEC = 0x1000
+ FAN_OPEN_EXEC_PERM = 0x40000
+ FAN_OPEN_PERM = 0x10000
+ FAN_Q_OVERFLOW = 0x4000
+ FAN_REPORT_FID = 0x200
+ FAN_REPORT_TID = 0x100
+ FAN_UNLIMITED_MARKS = 0x20
+ FAN_UNLIMITED_QUEUE = 0x10
FD_CLOEXEC = 0x1
FD_SETSIZE = 0x400
FF0 = 0x0
FF1 = 0x8000
FFDLY = 0x8000
FLUSHO = 0x2000
+ FS_ENCRYPTION_MODE_ADIANTUM = 0x9
FS_ENCRYPTION_MODE_AES_128_CBC = 0x5
FS_ENCRYPTION_MODE_AES_128_CTS = 0x6
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
@@ -513,7 +682,7 @@ const (
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
- FS_POLICY_FLAGS_VALID = 0x3
+ FS_POLICY_FLAGS_VALID = 0x7
FUTEXFS_SUPER_MAGIC = 0xbad1dea
F_ADD_SEALS = 0x409
F_DUPFD = 0x0
@@ -706,6 +875,7 @@ const (
IN_ISDIR = 0x40000000
IN_LOOPBACKNET = 0x7f
IN_MASK_ADD = 0x20000000
+ IN_MASK_CREATE = 0x10000000
IN_MODIFY = 0x2
IN_MOVE = 0xc0
IN_MOVED_FROM = 0x40
@@ -777,6 +947,7 @@ const (
IPV6_MINHOPCOUNT = 0x49
IPV6_MTU = 0x18
IPV6_MTU_DISCOVER = 0x17
+ IPV6_MULTICAST_ALL = 0x1d
IPV6_MULTICAST_HOPS = 0x12
IPV6_MULTICAST_IF = 0x11
IPV6_MULTICAST_LOOP = 0x13
@@ -1002,6 +1173,15 @@ const (
MAP_SHARED_VALIDATE = 0x3
MAP_STACK = 0x40000
MAP_TYPE = 0xf
+ MCAST_BLOCK_SOURCE = 0x2b
+ MCAST_EXCLUDE = 0x0
+ MCAST_INCLUDE = 0x1
+ MCAST_JOIN_GROUP = 0x2a
+ MCAST_JOIN_SOURCE_GROUP = 0x2e
+ MCAST_LEAVE_GROUP = 0x2d
+ MCAST_LEAVE_SOURCE_GROUP = 0x2f
+ MCAST_MSFILTER = 0x30
+ MCAST_UNBLOCK_SOURCE = 0x2c
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
@@ -1130,7 +1310,7 @@ const (
NETLINK_UNUSED = 0x1
NETLINK_USERSOCK = 0x2
NETLINK_XFRM = 0x6
- NETNSA_MAX = 0x3
+ NETNSA_MAX = 0x5
NETNSA_NSID_NOT_ASSIGNED = -0x1
NFNETLINK_V0 = 0x0
NFNLGRP_ACCT_QUOTA = 0x8
@@ -1394,6 +1574,12 @@ const (
PR_MCE_KILL_SET = 0x1
PR_MPX_DISABLE_MANAGEMENT = 0x2c
PR_MPX_ENABLE_MANAGEMENT = 0x2b
+ PR_PAC_APDAKEY = 0x4
+ PR_PAC_APDBKEY = 0x8
+ PR_PAC_APGAKEY = 0x10
+ PR_PAC_APIAKEY = 0x1
+ PR_PAC_APIBKEY = 0x2
+ PR_PAC_RESET_KEYS = 0x36
PR_SET_CHILD_SUBREAPER = 0x24
PR_SET_DUMPABLE = 0x4
PR_SET_ENDIAN = 0x14
@@ -1431,6 +1617,7 @@ const (
PR_SET_TSC = 0x1a
PR_SET_UNALIGN = 0x6
PR_SPEC_DISABLE = 0x4
+ PR_SPEC_DISABLE_NOEXEC = 0x10
PR_SPEC_ENABLE = 0x2
PR_SPEC_FORCE_DISABLE = 0x8
PR_SPEC_INDIRECT_BRANCH = 0x1
@@ -1537,6 +1724,13 @@ const (
RLIMIT_SIGPENDING = 0xb
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0xffffffffffffffff
+ RNDADDENTROPY = 0x80085203
+ RNDADDTOENTCNT = 0x80045201
+ RNDCLEARPOOL = 0x20005206
+ RNDGETENTCNT = 0x40045200
+ RNDGETPOOL = 0x40085202
+ RNDRESEEDCRNG = 0x20005207
+ RNDZAPENTCNT = 0x20005204
RTAX_ADVMSS = 0x8
RTAX_CC_ALGO = 0x10
RTAX_CWND = 0x7
@@ -1744,6 +1938,8 @@ const (
SECCOMP_MODE_STRICT = 0x1
SECURITYFS_MAGIC = 0x73636673
SELINUX_MAGIC = 0xf97cff8c
+ SFD_CLOEXEC = 0x80000
+ SFD_NONBLOCK = 0x80
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -1894,6 +2090,7 @@ const (
SO_ATTACH_REUSEPORT_CBPF = 0x33
SO_ATTACH_REUSEPORT_EBPF = 0x34
SO_BINDTODEVICE = 0x19
+ SO_BINDTOIFINDEX = 0x3e
SO_BPF_EXTENSIONS = 0x30
SO_BROADCAST = 0x20
SO_BSDCOMPAT = 0xe
@@ -1905,6 +2102,17 @@ const (
SO_DETACH_FILTER = 0x1b
SO_DOMAIN = 0x1029
SO_DONTROUTE = 0x10
+ SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1
+ SO_EE_CODE_TXTIME_MISSED = 0x2
+ SO_EE_CODE_ZEROCOPY_COPIED = 0x1
+ SO_EE_ORIGIN_ICMP = 0x2
+ SO_EE_ORIGIN_ICMP6 = 0x3
+ SO_EE_ORIGIN_LOCAL = 0x1
+ SO_EE_ORIGIN_NONE = 0x0
+ SO_EE_ORIGIN_TIMESTAMPING = 0x4
+ SO_EE_ORIGIN_TXSTATUS = 0x4
+ SO_EE_ORIGIN_TXTIME = 0x6
+ SO_EE_ORIGIN_ZEROCOPY = 0x5
SO_ERROR = 0x1007
SO_GET_FILTER = 0x1a
SO_INCOMING_CPU = 0x31
@@ -1931,6 +2139,8 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
SO_RCVTIMEO = 0x1006
+ SO_RCVTIMEO_NEW = 0x42
+ SO_RCVTIMEO_OLD = 0x1006
SO_REUSEADDR = 0x4
SO_REUSEPORT = 0x200
SO_RXQ_OVFL = 0x28
@@ -1942,10 +2152,18 @@ const (
SO_SNDBUFFORCE = 0x1f
SO_SNDLOWAT = 0x1003
SO_SNDTIMEO = 0x1005
+ SO_SNDTIMEO_NEW = 0x43
+ SO_SNDTIMEO_OLD = 0x1005
SO_STYLE = 0x1008
SO_TIMESTAMP = 0x1d
SO_TIMESTAMPING = 0x25
+ SO_TIMESTAMPING_NEW = 0x41
+ SO_TIMESTAMPING_OLD = 0x25
SO_TIMESTAMPNS = 0x23
+ SO_TIMESTAMPNS_NEW = 0x40
+ SO_TIMESTAMPNS_OLD = 0x23
+ SO_TIMESTAMP_NEW = 0x3f
+ SO_TIMESTAMP_OLD = 0x1d
SO_TXTIME = 0x3d
SO_TYPE = 0x1008
SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2
@@ -2037,7 +2255,10 @@ const (
TCOFLUSH = 0x1
TCOOFF = 0x0
TCOON = 0x1
+ TCP_BPF_IW = 0x3e9
+ TCP_BPF_SNDCWND_CLAMP = 0x3ea
TCP_CC_INFO = 0x1a
+ TCP_CM_INQ = 0x24
TCP_CONGESTION = 0xd
TCP_COOKIE_IN_ALWAYS = 0x1
TCP_COOKIE_MAX = 0x10
@@ -2052,6 +2273,7 @@ const (
TCP_FASTOPEN_KEY = 0x21
TCP_FASTOPEN_NO_COOKIE = 0x22
TCP_INFO = 0xb
+ TCP_INQ = 0x24
TCP_KEEPCNT = 0x6
TCP_KEEPIDLE = 0x4
TCP_KEEPINTVL = 0x5
@@ -2071,6 +2293,9 @@ const (
TCP_QUEUE_SEQ = 0x15
TCP_QUICKACK = 0xc
TCP_REPAIR = 0x13
+ TCP_REPAIR_OFF = 0x0
+ TCP_REPAIR_OFF_NO_WP = -0x1
+ TCP_REPAIR_ON = 0x1
TCP_REPAIR_OPTIONS = 0x16
TCP_REPAIR_QUEUE = 0x14
TCP_REPAIR_WINDOW = 0x1d
@@ -2085,6 +2310,7 @@ const (
TCP_ULP = 0x1f
TCP_USER_TIMEOUT = 0x12
TCP_WINDOW_CLAMP = 0xa
+ TCP_ZEROCOPY_RECEIVE = 0x23
TCSAFLUSH = 0x5410
TCSBRK = 0x5405
TCSBRKP = 0x5486
@@ -2098,6 +2324,7 @@ const (
TCSETSW = 0x540f
TCSETSW2 = 0x8030542c
TCXONC = 0x5406
+ TIMER_ABSTIME = 0x1
TIOCCBRK = 0x5428
TIOCCONS = 0x80047478
TIOCEXCL = 0x740d
@@ -2205,6 +2432,7 @@ const (
TUNGETVNETBE = 0x400454df
TUNGETVNETHDRSZ = 0x400454d7
TUNGETVNETLE = 0x400454dd
+ TUNSETCARRIER = 0x800454e2
TUNSETDEBUG = 0x800454c9
TUNSETFILTEREBPF = 0x400454e1
TUNSETGROUP = 0x800454ce
@@ -2232,8 +2460,10 @@ const (
UBI_IOCMKVOL = 0x80986f00
UBI_IOCRMVOL = 0x80046f01
UBI_IOCRNVOL = 0x91106f03
+ UBI_IOCRPEB = 0x80046f04
UBI_IOCRSVOL = 0x800c6f02
UBI_IOCSETVOLPROP = 0x80104f06
+ UBI_IOCSPEB = 0x80046f05
UBI_IOCVOLCRBLK = 0x80804f07
UBI_IOCVOLRMBLK = 0x20004f08
UBI_IOCVOLUP = 0x80084f00
@@ -2382,6 +2612,7 @@ const (
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
+ XDP_PACKET_HEADROOM = 0x100
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index 038cfeb7d..fe2965028 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -41,7 +41,7 @@ const (
AF_KEY = 0xf
AF_LLC = 0x1a
AF_LOCAL = 0x1
- AF_MAX = 0x2c
+ AF_MAX = 0x2d
AF_MPLS = 0x1c
AF_NETBEUI = 0xd
AF_NETLINK = 0x10
@@ -174,6 +174,7 @@ const (
B9600 = 0xd
BALLOON_KVM_MAGIC = 0x13661366
BDEVFS_MAGIC = 0x62646576
+ BINDERFS_SUPER_MAGIC = 0x6c6f6f70
BINFMTFS_MAGIC = 0x42494e4d
BLKBSZGET = 0x40081270
BLKBSZSET = 0x80081271
@@ -196,10 +197,59 @@ const (
BPF_ABS = 0x20
BPF_ADD = 0x0
BPF_ALU = 0x4
+ BPF_ALU64 = 0x7
BPF_AND = 0x50
+ BPF_ANY = 0x0
+ BPF_ARSH = 0xc0
BPF_B = 0x10
+ BPF_BUILD_ID_SIZE = 0x14
+ BPF_CALL = 0x80
+ BPF_DEVCG_ACC_MKNOD = 0x1
+ BPF_DEVCG_ACC_READ = 0x2
+ BPF_DEVCG_ACC_WRITE = 0x4
+ BPF_DEVCG_DEV_BLOCK = 0x1
+ BPF_DEVCG_DEV_CHAR = 0x2
BPF_DIV = 0x30
+ BPF_DW = 0x18
+ BPF_END = 0xd0
+ BPF_EXIST = 0x2
+ BPF_EXIT = 0x90
+ BPF_FROM_BE = 0x8
+ BPF_FROM_LE = 0x0
BPF_FS_MAGIC = 0xcafe4a11
+ BPF_F_ALLOW_MULTI = 0x2
+ BPF_F_ALLOW_OVERRIDE = 0x1
+ BPF_F_ANY_ALIGNMENT = 0x2
+ BPF_F_CTXLEN_MASK = 0xfffff00000000
+ BPF_F_CURRENT_CPU = 0xffffffff
+ BPF_F_CURRENT_NETNS = -0x1
+ BPF_F_DONT_FRAGMENT = 0x4
+ BPF_F_FAST_STACK_CMP = 0x200
+ BPF_F_HDR_FIELD_MASK = 0xf
+ BPF_F_INDEX_MASK = 0xffffffff
+ BPF_F_INGRESS = 0x1
+ BPF_F_INVALIDATE_HASH = 0x2
+ BPF_F_LOCK = 0x4
+ BPF_F_MARK_ENFORCE = 0x40
+ BPF_F_MARK_MANGLED_0 = 0x20
+ BPF_F_NO_COMMON_LRU = 0x2
+ BPF_F_NO_PREALLOC = 0x1
+ BPF_F_NUMA_NODE = 0x4
+ BPF_F_PSEUDO_HDR = 0x10
+ BPF_F_QUERY_EFFECTIVE = 0x1
+ BPF_F_RDONLY = 0x8
+ BPF_F_RECOMPUTE_CSUM = 0x1
+ BPF_F_REUSE_STACKID = 0x400
+ BPF_F_SEQ_NUMBER = 0x8
+ BPF_F_SKIP_FIELD_MASK = 0xff
+ BPF_F_STACK_BUILD_ID = 0x20
+ BPF_F_STRICT_ALIGNMENT = 0x1
+ BPF_F_TUNINFO_IPV6 = 0x1
+ BPF_F_USER_BUILD_ID = 0x800
+ BPF_F_USER_STACK = 0x100
+ BPF_F_WRONLY = 0x10
+ BPF_F_ZERO_CSUM_TX = 0x2
+ BPF_F_ZERO_SEED = 0x40
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@@ -207,8 +257,16 @@ const (
BPF_JEQ = 0x10
BPF_JGE = 0x30
BPF_JGT = 0x20
+ BPF_JLE = 0xb0
+ BPF_JLT = 0xa0
BPF_JMP = 0x5
+ BPF_JMP32 = 0x6
+ BPF_JNE = 0x50
BPF_JSET = 0x40
+ BPF_JSGE = 0x70
+ BPF_JSGT = 0x60
+ BPF_JSLE = 0xd0
+ BPF_JSLT = 0xc0
BPF_K = 0x0
BPF_LD = 0x0
BPF_LDX = 0x1
@@ -222,20 +280,33 @@ const (
BPF_MINOR_VERSION = 0x1
BPF_MISC = 0x7
BPF_MOD = 0x90
+ BPF_MOV = 0xb0
BPF_MSH = 0xa0
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_NET_OFF = -0x100000
+ BPF_NOEXIST = 0x1
+ BPF_OBJ_NAME_LEN = 0x10
BPF_OR = 0x40
+ BPF_PSEUDO_CALL = 0x1
+ BPF_PSEUDO_MAP_FD = 0x1
BPF_RET = 0x6
BPF_RSH = 0x70
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7
+ BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2
+ BPF_SOCK_OPS_RTO_CB_FLAG = 0x1
+ BPF_SOCK_OPS_STATE_CB_FLAG = 0x4
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
+ BPF_TAG_SIZE = 0x8
BPF_TAX = 0x0
+ BPF_TO_BE = 0x8
+ BPF_TO_LE = 0x0
BPF_TXA = 0x80
BPF_W = 0x0
BPF_X = 0x8
+ BPF_XADD = 0xc0
BPF_XOR = 0xa0
BRKINT = 0x2
BS0 = 0x0
@@ -263,6 +334,45 @@ const (
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
+ CAP_AUDIT_CONTROL = 0x1e
+ CAP_AUDIT_READ = 0x25
+ CAP_AUDIT_WRITE = 0x1d
+ CAP_BLOCK_SUSPEND = 0x24
+ CAP_CHOWN = 0x0
+ CAP_DAC_OVERRIDE = 0x1
+ CAP_DAC_READ_SEARCH = 0x2
+ CAP_FOWNER = 0x3
+ CAP_FSETID = 0x4
+ CAP_IPC_LOCK = 0xe
+ CAP_IPC_OWNER = 0xf
+ CAP_KILL = 0x5
+ CAP_LAST_CAP = 0x25
+ CAP_LEASE = 0x1c
+ CAP_LINUX_IMMUTABLE = 0x9
+ CAP_MAC_ADMIN = 0x21
+ CAP_MAC_OVERRIDE = 0x20
+ CAP_MKNOD = 0x1b
+ CAP_NET_ADMIN = 0xc
+ CAP_NET_BIND_SERVICE = 0xa
+ CAP_NET_BROADCAST = 0xb
+ CAP_NET_RAW = 0xd
+ CAP_SETFCAP = 0x1f
+ CAP_SETGID = 0x6
+ CAP_SETPCAP = 0x8
+ CAP_SETUID = 0x7
+ CAP_SYSLOG = 0x22
+ CAP_SYS_ADMIN = 0x15
+ CAP_SYS_BOOT = 0x16
+ CAP_SYS_CHROOT = 0x12
+ CAP_SYS_MODULE = 0x10
+ CAP_SYS_NICE = 0x17
+ CAP_SYS_PACCT = 0x14
+ CAP_SYS_PTRACE = 0x13
+ CAP_SYS_RAWIO = 0x11
+ CAP_SYS_RESOURCE = 0x18
+ CAP_SYS_TIME = 0x19
+ CAP_SYS_TTY_CONFIG = 0x1a
+ CAP_WAKE_ALARM = 0x23
CBAUD = 0x100f
CBAUDEX = 0x1000
CFLUSH = 0xf
@@ -319,6 +429,10 @@ const (
CRDLY = 0x600
CREAD = 0x80
CRTSCTS = 0x80000000
+ CRYPTO_MAX_NAME = 0x40
+ CRYPTO_MSG_MAX = 0x15
+ CRYPTO_NR_MSGTYPES = 0x6
+ CRYPTO_REPORT_MAXSIZE = 0x160
CS5 = 0x0
CS6 = 0x10
CS7 = 0x20
@@ -486,12 +600,67 @@ const (
FALLOC_FL_PUNCH_HOLE = 0x2
FALLOC_FL_UNSHARE_RANGE = 0x40
FALLOC_FL_ZERO_RANGE = 0x10
+ FANOTIFY_METADATA_VERSION = 0x3
+ FAN_ACCESS = 0x1
+ FAN_ACCESS_PERM = 0x20000
+ FAN_ALLOW = 0x1
+ FAN_ALL_CLASS_BITS = 0xc
+ FAN_ALL_EVENTS = 0x3b
+ FAN_ALL_INIT_FLAGS = 0x3f
+ FAN_ALL_MARK_FLAGS = 0xff
+ FAN_ALL_OUTGOING_EVENTS = 0x3403b
+ FAN_ALL_PERM_EVENTS = 0x30000
+ FAN_ATTRIB = 0x4
+ FAN_AUDIT = 0x10
+ FAN_CLASS_CONTENT = 0x4
+ FAN_CLASS_NOTIF = 0x0
+ FAN_CLASS_PRE_CONTENT = 0x8
+ FAN_CLOEXEC = 0x1
+ FAN_CLOSE = 0x18
+ FAN_CLOSE_NOWRITE = 0x10
+ FAN_CLOSE_WRITE = 0x8
+ FAN_CREATE = 0x100
+ FAN_DELETE = 0x200
+ FAN_DELETE_SELF = 0x400
+ FAN_DENY = 0x2
+ FAN_ENABLE_AUDIT = 0x40
+ FAN_EVENT_INFO_TYPE_FID = 0x1
+ FAN_EVENT_METADATA_LEN = 0x18
+ FAN_EVENT_ON_CHILD = 0x8000000
+ FAN_MARK_ADD = 0x1
+ FAN_MARK_DONT_FOLLOW = 0x4
+ FAN_MARK_FILESYSTEM = 0x100
+ FAN_MARK_FLUSH = 0x80
+ FAN_MARK_IGNORED_MASK = 0x20
+ FAN_MARK_IGNORED_SURV_MODIFY = 0x40
+ FAN_MARK_INODE = 0x0
+ FAN_MARK_MOUNT = 0x10
+ FAN_MARK_ONLYDIR = 0x8
+ FAN_MARK_REMOVE = 0x2
+ FAN_MODIFY = 0x2
+ FAN_MOVE = 0xc0
+ FAN_MOVED_FROM = 0x40
+ FAN_MOVED_TO = 0x80
+ FAN_MOVE_SELF = 0x800
+ FAN_NOFD = -0x1
+ FAN_NONBLOCK = 0x2
+ FAN_ONDIR = 0x40000000
+ FAN_OPEN = 0x20
+ FAN_OPEN_EXEC = 0x1000
+ FAN_OPEN_EXEC_PERM = 0x40000
+ FAN_OPEN_PERM = 0x10000
+ FAN_Q_OVERFLOW = 0x4000
+ FAN_REPORT_FID = 0x200
+ FAN_REPORT_TID = 0x100
+ FAN_UNLIMITED_MARKS = 0x20
+ FAN_UNLIMITED_QUEUE = 0x10
FD_CLOEXEC = 0x1
FD_SETSIZE = 0x400
FF0 = 0x0
FF1 = 0x8000
FFDLY = 0x8000
FLUSHO = 0x2000
+ FS_ENCRYPTION_MODE_ADIANTUM = 0x9
FS_ENCRYPTION_MODE_AES_128_CBC = 0x5
FS_ENCRYPTION_MODE_AES_128_CTS = 0x6
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
@@ -513,7 +682,7 @@ const (
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
- FS_POLICY_FLAGS_VALID = 0x3
+ FS_POLICY_FLAGS_VALID = 0x7
FUTEXFS_SUPER_MAGIC = 0xbad1dea
F_ADD_SEALS = 0x409
F_DUPFD = 0x0
@@ -706,6 +875,7 @@ const (
IN_ISDIR = 0x40000000
IN_LOOPBACKNET = 0x7f
IN_MASK_ADD = 0x20000000
+ IN_MASK_CREATE = 0x10000000
IN_MODIFY = 0x2
IN_MOVE = 0xc0
IN_MOVED_FROM = 0x40
@@ -777,6 +947,7 @@ const (
IPV6_MINHOPCOUNT = 0x49
IPV6_MTU = 0x18
IPV6_MTU_DISCOVER = 0x17
+ IPV6_MULTICAST_ALL = 0x1d
IPV6_MULTICAST_HOPS = 0x12
IPV6_MULTICAST_IF = 0x11
IPV6_MULTICAST_LOOP = 0x13
@@ -1002,6 +1173,15 @@ const (
MAP_SHARED_VALIDATE = 0x3
MAP_STACK = 0x40000
MAP_TYPE = 0xf
+ MCAST_BLOCK_SOURCE = 0x2b
+ MCAST_EXCLUDE = 0x0
+ MCAST_INCLUDE = 0x1
+ MCAST_JOIN_GROUP = 0x2a
+ MCAST_JOIN_SOURCE_GROUP = 0x2e
+ MCAST_LEAVE_GROUP = 0x2d
+ MCAST_LEAVE_SOURCE_GROUP = 0x2f
+ MCAST_MSFILTER = 0x30
+ MCAST_UNBLOCK_SOURCE = 0x2c
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
@@ -1130,7 +1310,7 @@ const (
NETLINK_UNUSED = 0x1
NETLINK_USERSOCK = 0x2
NETLINK_XFRM = 0x6
- NETNSA_MAX = 0x3
+ NETNSA_MAX = 0x5
NETNSA_NSID_NOT_ASSIGNED = -0x1
NFNETLINK_V0 = 0x0
NFNLGRP_ACCT_QUOTA = 0x8
@@ -1394,6 +1574,12 @@ const (
PR_MCE_KILL_SET = 0x1
PR_MPX_DISABLE_MANAGEMENT = 0x2c
PR_MPX_ENABLE_MANAGEMENT = 0x2b
+ PR_PAC_APDAKEY = 0x4
+ PR_PAC_APDBKEY = 0x8
+ PR_PAC_APGAKEY = 0x10
+ PR_PAC_APIAKEY = 0x1
+ PR_PAC_APIBKEY = 0x2
+ PR_PAC_RESET_KEYS = 0x36
PR_SET_CHILD_SUBREAPER = 0x24
PR_SET_DUMPABLE = 0x4
PR_SET_ENDIAN = 0x14
@@ -1431,6 +1617,7 @@ const (
PR_SET_TSC = 0x1a
PR_SET_UNALIGN = 0x6
PR_SPEC_DISABLE = 0x4
+ PR_SPEC_DISABLE_NOEXEC = 0x10
PR_SPEC_ENABLE = 0x2
PR_SPEC_FORCE_DISABLE = 0x8
PR_SPEC_INDIRECT_BRANCH = 0x1
@@ -1537,6 +1724,13 @@ const (
RLIMIT_SIGPENDING = 0xb
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0xffffffffffffffff
+ RNDADDENTROPY = 0x80085203
+ RNDADDTOENTCNT = 0x80045201
+ RNDCLEARPOOL = 0x20005206
+ RNDGETENTCNT = 0x40045200
+ RNDGETPOOL = 0x40085202
+ RNDRESEEDCRNG = 0x20005207
+ RNDZAPENTCNT = 0x20005204
RTAX_ADVMSS = 0x8
RTAX_CC_ALGO = 0x10
RTAX_CWND = 0x7
@@ -1744,6 +1938,8 @@ const (
SECCOMP_MODE_STRICT = 0x1
SECURITYFS_MAGIC = 0x73636673
SELINUX_MAGIC = 0xf97cff8c
+ SFD_CLOEXEC = 0x80000
+ SFD_NONBLOCK = 0x80
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -1894,6 +2090,7 @@ const (
SO_ATTACH_REUSEPORT_CBPF = 0x33
SO_ATTACH_REUSEPORT_EBPF = 0x34
SO_BINDTODEVICE = 0x19
+ SO_BINDTOIFINDEX = 0x3e
SO_BPF_EXTENSIONS = 0x30
SO_BROADCAST = 0x20
SO_BSDCOMPAT = 0xe
@@ -1905,6 +2102,17 @@ const (
SO_DETACH_FILTER = 0x1b
SO_DOMAIN = 0x1029
SO_DONTROUTE = 0x10
+ SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1
+ SO_EE_CODE_TXTIME_MISSED = 0x2
+ SO_EE_CODE_ZEROCOPY_COPIED = 0x1
+ SO_EE_ORIGIN_ICMP = 0x2
+ SO_EE_ORIGIN_ICMP6 = 0x3
+ SO_EE_ORIGIN_LOCAL = 0x1
+ SO_EE_ORIGIN_NONE = 0x0
+ SO_EE_ORIGIN_TIMESTAMPING = 0x4
+ SO_EE_ORIGIN_TXSTATUS = 0x4
+ SO_EE_ORIGIN_TXTIME = 0x6
+ SO_EE_ORIGIN_ZEROCOPY = 0x5
SO_ERROR = 0x1007
SO_GET_FILTER = 0x1a
SO_INCOMING_CPU = 0x31
@@ -1931,6 +2139,8 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
SO_RCVTIMEO = 0x1006
+ SO_RCVTIMEO_NEW = 0x42
+ SO_RCVTIMEO_OLD = 0x1006
SO_REUSEADDR = 0x4
SO_REUSEPORT = 0x200
SO_RXQ_OVFL = 0x28
@@ -1942,10 +2152,18 @@ const (
SO_SNDBUFFORCE = 0x1f
SO_SNDLOWAT = 0x1003
SO_SNDTIMEO = 0x1005
+ SO_SNDTIMEO_NEW = 0x43
+ SO_SNDTIMEO_OLD = 0x1005
SO_STYLE = 0x1008
SO_TIMESTAMP = 0x1d
SO_TIMESTAMPING = 0x25
+ SO_TIMESTAMPING_NEW = 0x41
+ SO_TIMESTAMPING_OLD = 0x25
SO_TIMESTAMPNS = 0x23
+ SO_TIMESTAMPNS_NEW = 0x40
+ SO_TIMESTAMPNS_OLD = 0x23
+ SO_TIMESTAMP_NEW = 0x3f
+ SO_TIMESTAMP_OLD = 0x1d
SO_TXTIME = 0x3d
SO_TYPE = 0x1008
SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2
@@ -2037,7 +2255,10 @@ const (
TCOFLUSH = 0x1
TCOOFF = 0x0
TCOON = 0x1
+ TCP_BPF_IW = 0x3e9
+ TCP_BPF_SNDCWND_CLAMP = 0x3ea
TCP_CC_INFO = 0x1a
+ TCP_CM_INQ = 0x24
TCP_CONGESTION = 0xd
TCP_COOKIE_IN_ALWAYS = 0x1
TCP_COOKIE_MAX = 0x10
@@ -2052,6 +2273,7 @@ const (
TCP_FASTOPEN_KEY = 0x21
TCP_FASTOPEN_NO_COOKIE = 0x22
TCP_INFO = 0xb
+ TCP_INQ = 0x24
TCP_KEEPCNT = 0x6
TCP_KEEPIDLE = 0x4
TCP_KEEPINTVL = 0x5
@@ -2071,6 +2293,9 @@ const (
TCP_QUEUE_SEQ = 0x15
TCP_QUICKACK = 0xc
TCP_REPAIR = 0x13
+ TCP_REPAIR_OFF = 0x0
+ TCP_REPAIR_OFF_NO_WP = -0x1
+ TCP_REPAIR_ON = 0x1
TCP_REPAIR_OPTIONS = 0x16
TCP_REPAIR_QUEUE = 0x14
TCP_REPAIR_WINDOW = 0x1d
@@ -2085,6 +2310,7 @@ const (
TCP_ULP = 0x1f
TCP_USER_TIMEOUT = 0x12
TCP_WINDOW_CLAMP = 0xa
+ TCP_ZEROCOPY_RECEIVE = 0x23
TCSAFLUSH = 0x5410
TCSBRK = 0x5405
TCSBRKP = 0x5486
@@ -2098,6 +2324,7 @@ const (
TCSETSW = 0x540f
TCSETSW2 = 0x8030542c
TCXONC = 0x5406
+ TIMER_ABSTIME = 0x1
TIOCCBRK = 0x5428
TIOCCONS = 0x80047478
TIOCEXCL = 0x740d
@@ -2205,6 +2432,7 @@ const (
TUNGETVNETBE = 0x400454df
TUNGETVNETHDRSZ = 0x400454d7
TUNGETVNETLE = 0x400454dd
+ TUNSETCARRIER = 0x800454e2
TUNSETDEBUG = 0x800454c9
TUNSETFILTEREBPF = 0x400454e1
TUNSETGROUP = 0x800454ce
@@ -2232,8 +2460,10 @@ const (
UBI_IOCMKVOL = 0x80986f00
UBI_IOCRMVOL = 0x80046f01
UBI_IOCRNVOL = 0x91106f03
+ UBI_IOCRPEB = 0x80046f04
UBI_IOCRSVOL = 0x800c6f02
UBI_IOCSETVOLPROP = 0x80104f06
+ UBI_IOCSPEB = 0x80046f05
UBI_IOCVOLCRBLK = 0x80804f07
UBI_IOCVOLRMBLK = 0x20004f08
UBI_IOCVOLUP = 0x80084f00
@@ -2382,6 +2612,7 @@ const (
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
+ XDP_PACKET_HEADROOM = 0x100
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index 2b9af2f5e..608878303 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -41,7 +41,7 @@ const (
AF_KEY = 0xf
AF_LLC = 0x1a
AF_LOCAL = 0x1
- AF_MAX = 0x2c
+ AF_MAX = 0x2d
AF_MPLS = 0x1c
AF_NETBEUI = 0xd
AF_NETLINK = 0x10
@@ -174,6 +174,7 @@ const (
B9600 = 0xd
BALLOON_KVM_MAGIC = 0x13661366
BDEVFS_MAGIC = 0x62646576
+ BINDERFS_SUPER_MAGIC = 0x6c6f6f70
BINFMTFS_MAGIC = 0x42494e4d
BLKBSZGET = 0x40081270
BLKBSZSET = 0x80081271
@@ -196,10 +197,59 @@ const (
BPF_ABS = 0x20
BPF_ADD = 0x0
BPF_ALU = 0x4
+ BPF_ALU64 = 0x7
BPF_AND = 0x50
+ BPF_ANY = 0x0
+ BPF_ARSH = 0xc0
BPF_B = 0x10
+ BPF_BUILD_ID_SIZE = 0x14
+ BPF_CALL = 0x80
+ BPF_DEVCG_ACC_MKNOD = 0x1
+ BPF_DEVCG_ACC_READ = 0x2
+ BPF_DEVCG_ACC_WRITE = 0x4
+ BPF_DEVCG_DEV_BLOCK = 0x1
+ BPF_DEVCG_DEV_CHAR = 0x2
BPF_DIV = 0x30
+ BPF_DW = 0x18
+ BPF_END = 0xd0
+ BPF_EXIST = 0x2
+ BPF_EXIT = 0x90
+ BPF_FROM_BE = 0x8
+ BPF_FROM_LE = 0x0
BPF_FS_MAGIC = 0xcafe4a11
+ BPF_F_ALLOW_MULTI = 0x2
+ BPF_F_ALLOW_OVERRIDE = 0x1
+ BPF_F_ANY_ALIGNMENT = 0x2
+ BPF_F_CTXLEN_MASK = 0xfffff00000000
+ BPF_F_CURRENT_CPU = 0xffffffff
+ BPF_F_CURRENT_NETNS = -0x1
+ BPF_F_DONT_FRAGMENT = 0x4
+ BPF_F_FAST_STACK_CMP = 0x200
+ BPF_F_HDR_FIELD_MASK = 0xf
+ BPF_F_INDEX_MASK = 0xffffffff
+ BPF_F_INGRESS = 0x1
+ BPF_F_INVALIDATE_HASH = 0x2
+ BPF_F_LOCK = 0x4
+ BPF_F_MARK_ENFORCE = 0x40
+ BPF_F_MARK_MANGLED_0 = 0x20
+ BPF_F_NO_COMMON_LRU = 0x2
+ BPF_F_NO_PREALLOC = 0x1
+ BPF_F_NUMA_NODE = 0x4
+ BPF_F_PSEUDO_HDR = 0x10
+ BPF_F_QUERY_EFFECTIVE = 0x1
+ BPF_F_RDONLY = 0x8
+ BPF_F_RECOMPUTE_CSUM = 0x1
+ BPF_F_REUSE_STACKID = 0x400
+ BPF_F_SEQ_NUMBER = 0x8
+ BPF_F_SKIP_FIELD_MASK = 0xff
+ BPF_F_STACK_BUILD_ID = 0x20
+ BPF_F_STRICT_ALIGNMENT = 0x1
+ BPF_F_TUNINFO_IPV6 = 0x1
+ BPF_F_USER_BUILD_ID = 0x800
+ BPF_F_USER_STACK = 0x100
+ BPF_F_WRONLY = 0x10
+ BPF_F_ZERO_CSUM_TX = 0x2
+ BPF_F_ZERO_SEED = 0x40
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@@ -207,8 +257,16 @@ const (
BPF_JEQ = 0x10
BPF_JGE = 0x30
BPF_JGT = 0x20
+ BPF_JLE = 0xb0
+ BPF_JLT = 0xa0
BPF_JMP = 0x5
+ BPF_JMP32 = 0x6
+ BPF_JNE = 0x50
BPF_JSET = 0x40
+ BPF_JSGE = 0x70
+ BPF_JSGT = 0x60
+ BPF_JSLE = 0xd0
+ BPF_JSLT = 0xc0
BPF_K = 0x0
BPF_LD = 0x0
BPF_LDX = 0x1
@@ -222,20 +280,33 @@ const (
BPF_MINOR_VERSION = 0x1
BPF_MISC = 0x7
BPF_MOD = 0x90
+ BPF_MOV = 0xb0
BPF_MSH = 0xa0
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_NET_OFF = -0x100000
+ BPF_NOEXIST = 0x1
+ BPF_OBJ_NAME_LEN = 0x10
BPF_OR = 0x40
+ BPF_PSEUDO_CALL = 0x1
+ BPF_PSEUDO_MAP_FD = 0x1
BPF_RET = 0x6
BPF_RSH = 0x70
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7
+ BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2
+ BPF_SOCK_OPS_RTO_CB_FLAG = 0x1
+ BPF_SOCK_OPS_STATE_CB_FLAG = 0x4
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
+ BPF_TAG_SIZE = 0x8
BPF_TAX = 0x0
+ BPF_TO_BE = 0x8
+ BPF_TO_LE = 0x0
BPF_TXA = 0x80
BPF_W = 0x0
BPF_X = 0x8
+ BPF_XADD = 0xc0
BPF_XOR = 0xa0
BRKINT = 0x2
BS0 = 0x0
@@ -263,6 +334,45 @@ const (
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
+ CAP_AUDIT_CONTROL = 0x1e
+ CAP_AUDIT_READ = 0x25
+ CAP_AUDIT_WRITE = 0x1d
+ CAP_BLOCK_SUSPEND = 0x24
+ CAP_CHOWN = 0x0
+ CAP_DAC_OVERRIDE = 0x1
+ CAP_DAC_READ_SEARCH = 0x2
+ CAP_FOWNER = 0x3
+ CAP_FSETID = 0x4
+ CAP_IPC_LOCK = 0xe
+ CAP_IPC_OWNER = 0xf
+ CAP_KILL = 0x5
+ CAP_LAST_CAP = 0x25
+ CAP_LEASE = 0x1c
+ CAP_LINUX_IMMUTABLE = 0x9
+ CAP_MAC_ADMIN = 0x21
+ CAP_MAC_OVERRIDE = 0x20
+ CAP_MKNOD = 0x1b
+ CAP_NET_ADMIN = 0xc
+ CAP_NET_BIND_SERVICE = 0xa
+ CAP_NET_BROADCAST = 0xb
+ CAP_NET_RAW = 0xd
+ CAP_SETFCAP = 0x1f
+ CAP_SETGID = 0x6
+ CAP_SETPCAP = 0x8
+ CAP_SETUID = 0x7
+ CAP_SYSLOG = 0x22
+ CAP_SYS_ADMIN = 0x15
+ CAP_SYS_BOOT = 0x16
+ CAP_SYS_CHROOT = 0x12
+ CAP_SYS_MODULE = 0x10
+ CAP_SYS_NICE = 0x17
+ CAP_SYS_PACCT = 0x14
+ CAP_SYS_PTRACE = 0x13
+ CAP_SYS_RAWIO = 0x11
+ CAP_SYS_RESOURCE = 0x18
+ CAP_SYS_TIME = 0x19
+ CAP_SYS_TTY_CONFIG = 0x1a
+ CAP_WAKE_ALARM = 0x23
CBAUD = 0x100f
CBAUDEX = 0x1000
CFLUSH = 0xf
@@ -319,6 +429,10 @@ const (
CRDLY = 0x600
CREAD = 0x80
CRTSCTS = 0x80000000
+ CRYPTO_MAX_NAME = 0x40
+ CRYPTO_MSG_MAX = 0x15
+ CRYPTO_NR_MSGTYPES = 0x6
+ CRYPTO_REPORT_MAXSIZE = 0x160
CS5 = 0x0
CS6 = 0x10
CS7 = 0x20
@@ -486,12 +600,67 @@ const (
FALLOC_FL_PUNCH_HOLE = 0x2
FALLOC_FL_UNSHARE_RANGE = 0x40
FALLOC_FL_ZERO_RANGE = 0x10
+ FANOTIFY_METADATA_VERSION = 0x3
+ FAN_ACCESS = 0x1
+ FAN_ACCESS_PERM = 0x20000
+ FAN_ALLOW = 0x1
+ FAN_ALL_CLASS_BITS = 0xc
+ FAN_ALL_EVENTS = 0x3b
+ FAN_ALL_INIT_FLAGS = 0x3f
+ FAN_ALL_MARK_FLAGS = 0xff
+ FAN_ALL_OUTGOING_EVENTS = 0x3403b
+ FAN_ALL_PERM_EVENTS = 0x30000
+ FAN_ATTRIB = 0x4
+ FAN_AUDIT = 0x10
+ FAN_CLASS_CONTENT = 0x4
+ FAN_CLASS_NOTIF = 0x0
+ FAN_CLASS_PRE_CONTENT = 0x8
+ FAN_CLOEXEC = 0x1
+ FAN_CLOSE = 0x18
+ FAN_CLOSE_NOWRITE = 0x10
+ FAN_CLOSE_WRITE = 0x8
+ FAN_CREATE = 0x100
+ FAN_DELETE = 0x200
+ FAN_DELETE_SELF = 0x400
+ FAN_DENY = 0x2
+ FAN_ENABLE_AUDIT = 0x40
+ FAN_EVENT_INFO_TYPE_FID = 0x1
+ FAN_EVENT_METADATA_LEN = 0x18
+ FAN_EVENT_ON_CHILD = 0x8000000
+ FAN_MARK_ADD = 0x1
+ FAN_MARK_DONT_FOLLOW = 0x4
+ FAN_MARK_FILESYSTEM = 0x100
+ FAN_MARK_FLUSH = 0x80
+ FAN_MARK_IGNORED_MASK = 0x20
+ FAN_MARK_IGNORED_SURV_MODIFY = 0x40
+ FAN_MARK_INODE = 0x0
+ FAN_MARK_MOUNT = 0x10
+ FAN_MARK_ONLYDIR = 0x8
+ FAN_MARK_REMOVE = 0x2
+ FAN_MODIFY = 0x2
+ FAN_MOVE = 0xc0
+ FAN_MOVED_FROM = 0x40
+ FAN_MOVED_TO = 0x80
+ FAN_MOVE_SELF = 0x800
+ FAN_NOFD = -0x1
+ FAN_NONBLOCK = 0x2
+ FAN_ONDIR = 0x40000000
+ FAN_OPEN = 0x20
+ FAN_OPEN_EXEC = 0x1000
+ FAN_OPEN_EXEC_PERM = 0x40000
+ FAN_OPEN_PERM = 0x10000
+ FAN_Q_OVERFLOW = 0x4000
+ FAN_REPORT_FID = 0x200
+ FAN_REPORT_TID = 0x100
+ FAN_UNLIMITED_MARKS = 0x20
+ FAN_UNLIMITED_QUEUE = 0x10
FD_CLOEXEC = 0x1
FD_SETSIZE = 0x400
FF0 = 0x0
FF1 = 0x8000
FFDLY = 0x8000
FLUSHO = 0x2000
+ FS_ENCRYPTION_MODE_ADIANTUM = 0x9
FS_ENCRYPTION_MODE_AES_128_CBC = 0x5
FS_ENCRYPTION_MODE_AES_128_CTS = 0x6
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
@@ -513,7 +682,7 @@ const (
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
- FS_POLICY_FLAGS_VALID = 0x3
+ FS_POLICY_FLAGS_VALID = 0x7
FUTEXFS_SUPER_MAGIC = 0xbad1dea
F_ADD_SEALS = 0x409
F_DUPFD = 0x0
@@ -706,6 +875,7 @@ const (
IN_ISDIR = 0x40000000
IN_LOOPBACKNET = 0x7f
IN_MASK_ADD = 0x20000000
+ IN_MASK_CREATE = 0x10000000
IN_MODIFY = 0x2
IN_MOVE = 0xc0
IN_MOVED_FROM = 0x40
@@ -777,6 +947,7 @@ const (
IPV6_MINHOPCOUNT = 0x49
IPV6_MTU = 0x18
IPV6_MTU_DISCOVER = 0x17
+ IPV6_MULTICAST_ALL = 0x1d
IPV6_MULTICAST_HOPS = 0x12
IPV6_MULTICAST_IF = 0x11
IPV6_MULTICAST_LOOP = 0x13
@@ -1002,6 +1173,15 @@ const (
MAP_SHARED_VALIDATE = 0x3
MAP_STACK = 0x40000
MAP_TYPE = 0xf
+ MCAST_BLOCK_SOURCE = 0x2b
+ MCAST_EXCLUDE = 0x0
+ MCAST_INCLUDE = 0x1
+ MCAST_JOIN_GROUP = 0x2a
+ MCAST_JOIN_SOURCE_GROUP = 0x2e
+ MCAST_LEAVE_GROUP = 0x2d
+ MCAST_LEAVE_SOURCE_GROUP = 0x2f
+ MCAST_MSFILTER = 0x30
+ MCAST_UNBLOCK_SOURCE = 0x2c
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
@@ -1130,7 +1310,7 @@ const (
NETLINK_UNUSED = 0x1
NETLINK_USERSOCK = 0x2
NETLINK_XFRM = 0x6
- NETNSA_MAX = 0x3
+ NETNSA_MAX = 0x5
NETNSA_NSID_NOT_ASSIGNED = -0x1
NFNETLINK_V0 = 0x0
NFNLGRP_ACCT_QUOTA = 0x8
@@ -1394,6 +1574,12 @@ const (
PR_MCE_KILL_SET = 0x1
PR_MPX_DISABLE_MANAGEMENT = 0x2c
PR_MPX_ENABLE_MANAGEMENT = 0x2b
+ PR_PAC_APDAKEY = 0x4
+ PR_PAC_APDBKEY = 0x8
+ PR_PAC_APGAKEY = 0x10
+ PR_PAC_APIAKEY = 0x1
+ PR_PAC_APIBKEY = 0x2
+ PR_PAC_RESET_KEYS = 0x36
PR_SET_CHILD_SUBREAPER = 0x24
PR_SET_DUMPABLE = 0x4
PR_SET_ENDIAN = 0x14
@@ -1431,6 +1617,7 @@ const (
PR_SET_TSC = 0x1a
PR_SET_UNALIGN = 0x6
PR_SPEC_DISABLE = 0x4
+ PR_SPEC_DISABLE_NOEXEC = 0x10
PR_SPEC_ENABLE = 0x2
PR_SPEC_FORCE_DISABLE = 0x8
PR_SPEC_INDIRECT_BRANCH = 0x1
@@ -1537,6 +1724,13 @@ const (
RLIMIT_SIGPENDING = 0xb
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0xffffffffffffffff
+ RNDADDENTROPY = 0x80085203
+ RNDADDTOENTCNT = 0x80045201
+ RNDCLEARPOOL = 0x20005206
+ RNDGETENTCNT = 0x40045200
+ RNDGETPOOL = 0x40085202
+ RNDRESEEDCRNG = 0x20005207
+ RNDZAPENTCNT = 0x20005204
RTAX_ADVMSS = 0x8
RTAX_CC_ALGO = 0x10
RTAX_CWND = 0x7
@@ -1744,6 +1938,8 @@ const (
SECCOMP_MODE_STRICT = 0x1
SECURITYFS_MAGIC = 0x73636673
SELINUX_MAGIC = 0xf97cff8c
+ SFD_CLOEXEC = 0x80000
+ SFD_NONBLOCK = 0x80
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -1894,6 +2090,7 @@ const (
SO_ATTACH_REUSEPORT_CBPF = 0x33
SO_ATTACH_REUSEPORT_EBPF = 0x34
SO_BINDTODEVICE = 0x19
+ SO_BINDTOIFINDEX = 0x3e
SO_BPF_EXTENSIONS = 0x30
SO_BROADCAST = 0x20
SO_BSDCOMPAT = 0xe
@@ -1905,6 +2102,17 @@ const (
SO_DETACH_FILTER = 0x1b
SO_DOMAIN = 0x1029
SO_DONTROUTE = 0x10
+ SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1
+ SO_EE_CODE_TXTIME_MISSED = 0x2
+ SO_EE_CODE_ZEROCOPY_COPIED = 0x1
+ SO_EE_ORIGIN_ICMP = 0x2
+ SO_EE_ORIGIN_ICMP6 = 0x3
+ SO_EE_ORIGIN_LOCAL = 0x1
+ SO_EE_ORIGIN_NONE = 0x0
+ SO_EE_ORIGIN_TIMESTAMPING = 0x4
+ SO_EE_ORIGIN_TXSTATUS = 0x4
+ SO_EE_ORIGIN_TXTIME = 0x6
+ SO_EE_ORIGIN_ZEROCOPY = 0x5
SO_ERROR = 0x1007
SO_GET_FILTER = 0x1a
SO_INCOMING_CPU = 0x31
@@ -1931,6 +2139,8 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
SO_RCVTIMEO = 0x1006
+ SO_RCVTIMEO_NEW = 0x42
+ SO_RCVTIMEO_OLD = 0x1006
SO_REUSEADDR = 0x4
SO_REUSEPORT = 0x200
SO_RXQ_OVFL = 0x28
@@ -1942,10 +2152,18 @@ const (
SO_SNDBUFFORCE = 0x1f
SO_SNDLOWAT = 0x1003
SO_SNDTIMEO = 0x1005
+ SO_SNDTIMEO_NEW = 0x43
+ SO_SNDTIMEO_OLD = 0x1005
SO_STYLE = 0x1008
SO_TIMESTAMP = 0x1d
SO_TIMESTAMPING = 0x25
+ SO_TIMESTAMPING_NEW = 0x41
+ SO_TIMESTAMPING_OLD = 0x25
SO_TIMESTAMPNS = 0x23
+ SO_TIMESTAMPNS_NEW = 0x40
+ SO_TIMESTAMPNS_OLD = 0x23
+ SO_TIMESTAMP_NEW = 0x3f
+ SO_TIMESTAMP_OLD = 0x1d
SO_TXTIME = 0x3d
SO_TYPE = 0x1008
SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2
@@ -2037,7 +2255,10 @@ const (
TCOFLUSH = 0x1
TCOOFF = 0x0
TCOON = 0x1
+ TCP_BPF_IW = 0x3e9
+ TCP_BPF_SNDCWND_CLAMP = 0x3ea
TCP_CC_INFO = 0x1a
+ TCP_CM_INQ = 0x24
TCP_CONGESTION = 0xd
TCP_COOKIE_IN_ALWAYS = 0x1
TCP_COOKIE_MAX = 0x10
@@ -2052,6 +2273,7 @@ const (
TCP_FASTOPEN_KEY = 0x21
TCP_FASTOPEN_NO_COOKIE = 0x22
TCP_INFO = 0xb
+ TCP_INQ = 0x24
TCP_KEEPCNT = 0x6
TCP_KEEPIDLE = 0x4
TCP_KEEPINTVL = 0x5
@@ -2071,6 +2293,9 @@ const (
TCP_QUEUE_SEQ = 0x15
TCP_QUICKACK = 0xc
TCP_REPAIR = 0x13
+ TCP_REPAIR_OFF = 0x0
+ TCP_REPAIR_OFF_NO_WP = -0x1
+ TCP_REPAIR_ON = 0x1
TCP_REPAIR_OPTIONS = 0x16
TCP_REPAIR_QUEUE = 0x14
TCP_REPAIR_WINDOW = 0x1d
@@ -2085,6 +2310,7 @@ const (
TCP_ULP = 0x1f
TCP_USER_TIMEOUT = 0x12
TCP_WINDOW_CLAMP = 0xa
+ TCP_ZEROCOPY_RECEIVE = 0x23
TCSAFLUSH = 0x5410
TCSBRK = 0x5405
TCSBRKP = 0x5486
@@ -2098,6 +2324,7 @@ const (
TCSETSW = 0x540f
TCSETSW2 = 0x8030542c
TCXONC = 0x5406
+ TIMER_ABSTIME = 0x1
TIOCCBRK = 0x5428
TIOCCONS = 0x80047478
TIOCEXCL = 0x740d
@@ -2205,6 +2432,7 @@ const (
TUNGETVNETBE = 0x400454df
TUNGETVNETHDRSZ = 0x400454d7
TUNGETVNETLE = 0x400454dd
+ TUNSETCARRIER = 0x800454e2
TUNSETDEBUG = 0x800454c9
TUNSETFILTEREBPF = 0x400454e1
TUNSETGROUP = 0x800454ce
@@ -2232,8 +2460,10 @@ const (
UBI_IOCMKVOL = 0x80986f00
UBI_IOCRMVOL = 0x80046f01
UBI_IOCRNVOL = 0x91106f03
+ UBI_IOCRPEB = 0x80046f04
UBI_IOCRSVOL = 0x800c6f02
UBI_IOCSETVOLPROP = 0x80104f06
+ UBI_IOCSPEB = 0x80046f05
UBI_IOCVOLCRBLK = 0x80804f07
UBI_IOCVOLRMBLK = 0x20004f08
UBI_IOCVOLUP = 0x80084f00
@@ -2382,6 +2612,7 @@ const (
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
+ XDP_PACKET_HEADROOM = 0x100
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index d117b6367..4cf9ddfad 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -41,7 +41,7 @@ const (
AF_KEY = 0xf
AF_LLC = 0x1a
AF_LOCAL = 0x1
- AF_MAX = 0x2c
+ AF_MAX = 0x2d
AF_MPLS = 0x1c
AF_NETBEUI = 0xd
AF_NETLINK = 0x10
@@ -174,6 +174,7 @@ const (
B9600 = 0xd
BALLOON_KVM_MAGIC = 0x13661366
BDEVFS_MAGIC = 0x62646576
+ BINDERFS_SUPER_MAGIC = 0x6c6f6f70
BINFMTFS_MAGIC = 0x42494e4d
BLKBSZGET = 0x40041270
BLKBSZSET = 0x80041271
@@ -196,10 +197,59 @@ const (
BPF_ABS = 0x20
BPF_ADD = 0x0
BPF_ALU = 0x4
+ BPF_ALU64 = 0x7
BPF_AND = 0x50
+ BPF_ANY = 0x0
+ BPF_ARSH = 0xc0
BPF_B = 0x10
+ BPF_BUILD_ID_SIZE = 0x14
+ BPF_CALL = 0x80
+ BPF_DEVCG_ACC_MKNOD = 0x1
+ BPF_DEVCG_ACC_READ = 0x2
+ BPF_DEVCG_ACC_WRITE = 0x4
+ BPF_DEVCG_DEV_BLOCK = 0x1
+ BPF_DEVCG_DEV_CHAR = 0x2
BPF_DIV = 0x30
+ BPF_DW = 0x18
+ BPF_END = 0xd0
+ BPF_EXIST = 0x2
+ BPF_EXIT = 0x90
+ BPF_FROM_BE = 0x8
+ BPF_FROM_LE = 0x0
BPF_FS_MAGIC = 0xcafe4a11
+ BPF_F_ALLOW_MULTI = 0x2
+ BPF_F_ALLOW_OVERRIDE = 0x1
+ BPF_F_ANY_ALIGNMENT = 0x2
+ BPF_F_CTXLEN_MASK = 0xfffff00000000
+ BPF_F_CURRENT_CPU = 0xffffffff
+ BPF_F_CURRENT_NETNS = -0x1
+ BPF_F_DONT_FRAGMENT = 0x4
+ BPF_F_FAST_STACK_CMP = 0x200
+ BPF_F_HDR_FIELD_MASK = 0xf
+ BPF_F_INDEX_MASK = 0xffffffff
+ BPF_F_INGRESS = 0x1
+ BPF_F_INVALIDATE_HASH = 0x2
+ BPF_F_LOCK = 0x4
+ BPF_F_MARK_ENFORCE = 0x40
+ BPF_F_MARK_MANGLED_0 = 0x20
+ BPF_F_NO_COMMON_LRU = 0x2
+ BPF_F_NO_PREALLOC = 0x1
+ BPF_F_NUMA_NODE = 0x4
+ BPF_F_PSEUDO_HDR = 0x10
+ BPF_F_QUERY_EFFECTIVE = 0x1
+ BPF_F_RDONLY = 0x8
+ BPF_F_RECOMPUTE_CSUM = 0x1
+ BPF_F_REUSE_STACKID = 0x400
+ BPF_F_SEQ_NUMBER = 0x8
+ BPF_F_SKIP_FIELD_MASK = 0xff
+ BPF_F_STACK_BUILD_ID = 0x20
+ BPF_F_STRICT_ALIGNMENT = 0x1
+ BPF_F_TUNINFO_IPV6 = 0x1
+ BPF_F_USER_BUILD_ID = 0x800
+ BPF_F_USER_STACK = 0x100
+ BPF_F_WRONLY = 0x10
+ BPF_F_ZERO_CSUM_TX = 0x2
+ BPF_F_ZERO_SEED = 0x40
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@@ -207,8 +257,16 @@ const (
BPF_JEQ = 0x10
BPF_JGE = 0x30
BPF_JGT = 0x20
+ BPF_JLE = 0xb0
+ BPF_JLT = 0xa0
BPF_JMP = 0x5
+ BPF_JMP32 = 0x6
+ BPF_JNE = 0x50
BPF_JSET = 0x40
+ BPF_JSGE = 0x70
+ BPF_JSGT = 0x60
+ BPF_JSLE = 0xd0
+ BPF_JSLT = 0xc0
BPF_K = 0x0
BPF_LD = 0x0
BPF_LDX = 0x1
@@ -222,20 +280,33 @@ const (
BPF_MINOR_VERSION = 0x1
BPF_MISC = 0x7
BPF_MOD = 0x90
+ BPF_MOV = 0xb0
BPF_MSH = 0xa0
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_NET_OFF = -0x100000
+ BPF_NOEXIST = 0x1
+ BPF_OBJ_NAME_LEN = 0x10
BPF_OR = 0x40
+ BPF_PSEUDO_CALL = 0x1
+ BPF_PSEUDO_MAP_FD = 0x1
BPF_RET = 0x6
BPF_RSH = 0x70
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7
+ BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2
+ BPF_SOCK_OPS_RTO_CB_FLAG = 0x1
+ BPF_SOCK_OPS_STATE_CB_FLAG = 0x4
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
+ BPF_TAG_SIZE = 0x8
BPF_TAX = 0x0
+ BPF_TO_BE = 0x8
+ BPF_TO_LE = 0x0
BPF_TXA = 0x80
BPF_W = 0x0
BPF_X = 0x8
+ BPF_XADD = 0xc0
BPF_XOR = 0xa0
BRKINT = 0x2
BS0 = 0x0
@@ -263,6 +334,45 @@ const (
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
+ CAP_AUDIT_CONTROL = 0x1e
+ CAP_AUDIT_READ = 0x25
+ CAP_AUDIT_WRITE = 0x1d
+ CAP_BLOCK_SUSPEND = 0x24
+ CAP_CHOWN = 0x0
+ CAP_DAC_OVERRIDE = 0x1
+ CAP_DAC_READ_SEARCH = 0x2
+ CAP_FOWNER = 0x3
+ CAP_FSETID = 0x4
+ CAP_IPC_LOCK = 0xe
+ CAP_IPC_OWNER = 0xf
+ CAP_KILL = 0x5
+ CAP_LAST_CAP = 0x25
+ CAP_LEASE = 0x1c
+ CAP_LINUX_IMMUTABLE = 0x9
+ CAP_MAC_ADMIN = 0x21
+ CAP_MAC_OVERRIDE = 0x20
+ CAP_MKNOD = 0x1b
+ CAP_NET_ADMIN = 0xc
+ CAP_NET_BIND_SERVICE = 0xa
+ CAP_NET_BROADCAST = 0xb
+ CAP_NET_RAW = 0xd
+ CAP_SETFCAP = 0x1f
+ CAP_SETGID = 0x6
+ CAP_SETPCAP = 0x8
+ CAP_SETUID = 0x7
+ CAP_SYSLOG = 0x22
+ CAP_SYS_ADMIN = 0x15
+ CAP_SYS_BOOT = 0x16
+ CAP_SYS_CHROOT = 0x12
+ CAP_SYS_MODULE = 0x10
+ CAP_SYS_NICE = 0x17
+ CAP_SYS_PACCT = 0x14
+ CAP_SYS_PTRACE = 0x13
+ CAP_SYS_RAWIO = 0x11
+ CAP_SYS_RESOURCE = 0x18
+ CAP_SYS_TIME = 0x19
+ CAP_SYS_TTY_CONFIG = 0x1a
+ CAP_WAKE_ALARM = 0x23
CBAUD = 0x100f
CBAUDEX = 0x1000
CFLUSH = 0xf
@@ -319,6 +429,10 @@ const (
CRDLY = 0x600
CREAD = 0x80
CRTSCTS = 0x80000000
+ CRYPTO_MAX_NAME = 0x40
+ CRYPTO_MSG_MAX = 0x15
+ CRYPTO_NR_MSGTYPES = 0x6
+ CRYPTO_REPORT_MAXSIZE = 0x160
CS5 = 0x0
CS6 = 0x10
CS7 = 0x20
@@ -486,12 +600,67 @@ const (
FALLOC_FL_PUNCH_HOLE = 0x2
FALLOC_FL_UNSHARE_RANGE = 0x40
FALLOC_FL_ZERO_RANGE = 0x10
+ FANOTIFY_METADATA_VERSION = 0x3
+ FAN_ACCESS = 0x1
+ FAN_ACCESS_PERM = 0x20000
+ FAN_ALLOW = 0x1
+ FAN_ALL_CLASS_BITS = 0xc
+ FAN_ALL_EVENTS = 0x3b
+ FAN_ALL_INIT_FLAGS = 0x3f
+ FAN_ALL_MARK_FLAGS = 0xff
+ FAN_ALL_OUTGOING_EVENTS = 0x3403b
+ FAN_ALL_PERM_EVENTS = 0x30000
+ FAN_ATTRIB = 0x4
+ FAN_AUDIT = 0x10
+ FAN_CLASS_CONTENT = 0x4
+ FAN_CLASS_NOTIF = 0x0
+ FAN_CLASS_PRE_CONTENT = 0x8
+ FAN_CLOEXEC = 0x1
+ FAN_CLOSE = 0x18
+ FAN_CLOSE_NOWRITE = 0x10
+ FAN_CLOSE_WRITE = 0x8
+ FAN_CREATE = 0x100
+ FAN_DELETE = 0x200
+ FAN_DELETE_SELF = 0x400
+ FAN_DENY = 0x2
+ FAN_ENABLE_AUDIT = 0x40
+ FAN_EVENT_INFO_TYPE_FID = 0x1
+ FAN_EVENT_METADATA_LEN = 0x18
+ FAN_EVENT_ON_CHILD = 0x8000000
+ FAN_MARK_ADD = 0x1
+ FAN_MARK_DONT_FOLLOW = 0x4
+ FAN_MARK_FILESYSTEM = 0x100
+ FAN_MARK_FLUSH = 0x80
+ FAN_MARK_IGNORED_MASK = 0x20
+ FAN_MARK_IGNORED_SURV_MODIFY = 0x40
+ FAN_MARK_INODE = 0x0
+ FAN_MARK_MOUNT = 0x10
+ FAN_MARK_ONLYDIR = 0x8
+ FAN_MARK_REMOVE = 0x2
+ FAN_MODIFY = 0x2
+ FAN_MOVE = 0xc0
+ FAN_MOVED_FROM = 0x40
+ FAN_MOVED_TO = 0x80
+ FAN_MOVE_SELF = 0x800
+ FAN_NOFD = -0x1
+ FAN_NONBLOCK = 0x2
+ FAN_ONDIR = 0x40000000
+ FAN_OPEN = 0x20
+ FAN_OPEN_EXEC = 0x1000
+ FAN_OPEN_EXEC_PERM = 0x40000
+ FAN_OPEN_PERM = 0x10000
+ FAN_Q_OVERFLOW = 0x4000
+ FAN_REPORT_FID = 0x200
+ FAN_REPORT_TID = 0x100
+ FAN_UNLIMITED_MARKS = 0x20
+ FAN_UNLIMITED_QUEUE = 0x10
FD_CLOEXEC = 0x1
FD_SETSIZE = 0x400
FF0 = 0x0
FF1 = 0x8000
FFDLY = 0x8000
FLUSHO = 0x2000
+ FS_ENCRYPTION_MODE_ADIANTUM = 0x9
FS_ENCRYPTION_MODE_AES_128_CBC = 0x5
FS_ENCRYPTION_MODE_AES_128_CTS = 0x6
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
@@ -513,7 +682,7 @@ const (
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
- FS_POLICY_FLAGS_VALID = 0x3
+ FS_POLICY_FLAGS_VALID = 0x7
FUTEXFS_SUPER_MAGIC = 0xbad1dea
F_ADD_SEALS = 0x409
F_DUPFD = 0x0
@@ -706,6 +875,7 @@ const (
IN_ISDIR = 0x40000000
IN_LOOPBACKNET = 0x7f
IN_MASK_ADD = 0x20000000
+ IN_MASK_CREATE = 0x10000000
IN_MODIFY = 0x2
IN_MOVE = 0xc0
IN_MOVED_FROM = 0x40
@@ -777,6 +947,7 @@ const (
IPV6_MINHOPCOUNT = 0x49
IPV6_MTU = 0x18
IPV6_MTU_DISCOVER = 0x17
+ IPV6_MULTICAST_ALL = 0x1d
IPV6_MULTICAST_HOPS = 0x12
IPV6_MULTICAST_IF = 0x11
IPV6_MULTICAST_LOOP = 0x13
@@ -1002,6 +1173,15 @@ const (
MAP_SHARED_VALIDATE = 0x3
MAP_STACK = 0x40000
MAP_TYPE = 0xf
+ MCAST_BLOCK_SOURCE = 0x2b
+ MCAST_EXCLUDE = 0x0
+ MCAST_INCLUDE = 0x1
+ MCAST_JOIN_GROUP = 0x2a
+ MCAST_JOIN_SOURCE_GROUP = 0x2e
+ MCAST_LEAVE_GROUP = 0x2d
+ MCAST_LEAVE_SOURCE_GROUP = 0x2f
+ MCAST_MSFILTER = 0x30
+ MCAST_UNBLOCK_SOURCE = 0x2c
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
@@ -1130,7 +1310,7 @@ const (
NETLINK_UNUSED = 0x1
NETLINK_USERSOCK = 0x2
NETLINK_XFRM = 0x6
- NETNSA_MAX = 0x3
+ NETNSA_MAX = 0x5
NETNSA_NSID_NOT_ASSIGNED = -0x1
NFNETLINK_V0 = 0x0
NFNLGRP_ACCT_QUOTA = 0x8
@@ -1394,6 +1574,12 @@ const (
PR_MCE_KILL_SET = 0x1
PR_MPX_DISABLE_MANAGEMENT = 0x2c
PR_MPX_ENABLE_MANAGEMENT = 0x2b
+ PR_PAC_APDAKEY = 0x4
+ PR_PAC_APDBKEY = 0x8
+ PR_PAC_APGAKEY = 0x10
+ PR_PAC_APIAKEY = 0x1
+ PR_PAC_APIBKEY = 0x2
+ PR_PAC_RESET_KEYS = 0x36
PR_SET_CHILD_SUBREAPER = 0x24
PR_SET_DUMPABLE = 0x4
PR_SET_ENDIAN = 0x14
@@ -1431,6 +1617,7 @@ const (
PR_SET_TSC = 0x1a
PR_SET_UNALIGN = 0x6
PR_SPEC_DISABLE = 0x4
+ PR_SPEC_DISABLE_NOEXEC = 0x10
PR_SPEC_ENABLE = 0x2
PR_SPEC_FORCE_DISABLE = 0x8
PR_SPEC_INDIRECT_BRANCH = 0x1
@@ -1537,6 +1724,13 @@ const (
RLIMIT_SIGPENDING = 0xb
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0xffffffffffffffff
+ RNDADDENTROPY = 0x80085203
+ RNDADDTOENTCNT = 0x80045201
+ RNDCLEARPOOL = 0x20005206
+ RNDGETENTCNT = 0x40045200
+ RNDGETPOOL = 0x40085202
+ RNDRESEEDCRNG = 0x20005207
+ RNDZAPENTCNT = 0x20005204
RTAX_ADVMSS = 0x8
RTAX_CC_ALGO = 0x10
RTAX_CWND = 0x7
@@ -1744,6 +1938,8 @@ const (
SECCOMP_MODE_STRICT = 0x1
SECURITYFS_MAGIC = 0x73636673
SELINUX_MAGIC = 0xf97cff8c
+ SFD_CLOEXEC = 0x80000
+ SFD_NONBLOCK = 0x80
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -1894,6 +2090,7 @@ const (
SO_ATTACH_REUSEPORT_CBPF = 0x33
SO_ATTACH_REUSEPORT_EBPF = 0x34
SO_BINDTODEVICE = 0x19
+ SO_BINDTOIFINDEX = 0x3e
SO_BPF_EXTENSIONS = 0x30
SO_BROADCAST = 0x20
SO_BSDCOMPAT = 0xe
@@ -1905,6 +2102,17 @@ const (
SO_DETACH_FILTER = 0x1b
SO_DOMAIN = 0x1029
SO_DONTROUTE = 0x10
+ SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1
+ SO_EE_CODE_TXTIME_MISSED = 0x2
+ SO_EE_CODE_ZEROCOPY_COPIED = 0x1
+ SO_EE_ORIGIN_ICMP = 0x2
+ SO_EE_ORIGIN_ICMP6 = 0x3
+ SO_EE_ORIGIN_LOCAL = 0x1
+ SO_EE_ORIGIN_NONE = 0x0
+ SO_EE_ORIGIN_TIMESTAMPING = 0x4
+ SO_EE_ORIGIN_TXSTATUS = 0x4
+ SO_EE_ORIGIN_TXTIME = 0x6
+ SO_EE_ORIGIN_ZEROCOPY = 0x5
SO_ERROR = 0x1007
SO_GET_FILTER = 0x1a
SO_INCOMING_CPU = 0x31
@@ -1931,6 +2139,8 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
SO_RCVTIMEO = 0x1006
+ SO_RCVTIMEO_NEW = 0x42
+ SO_RCVTIMEO_OLD = 0x1006
SO_REUSEADDR = 0x4
SO_REUSEPORT = 0x200
SO_RXQ_OVFL = 0x28
@@ -1942,10 +2152,18 @@ const (
SO_SNDBUFFORCE = 0x1f
SO_SNDLOWAT = 0x1003
SO_SNDTIMEO = 0x1005
+ SO_SNDTIMEO_NEW = 0x43
+ SO_SNDTIMEO_OLD = 0x1005
SO_STYLE = 0x1008
SO_TIMESTAMP = 0x1d
SO_TIMESTAMPING = 0x25
+ SO_TIMESTAMPING_NEW = 0x41
+ SO_TIMESTAMPING_OLD = 0x25
SO_TIMESTAMPNS = 0x23
+ SO_TIMESTAMPNS_NEW = 0x40
+ SO_TIMESTAMPNS_OLD = 0x23
+ SO_TIMESTAMP_NEW = 0x3f
+ SO_TIMESTAMP_OLD = 0x1d
SO_TXTIME = 0x3d
SO_TYPE = 0x1008
SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2
@@ -2037,7 +2255,10 @@ const (
TCOFLUSH = 0x1
TCOOFF = 0x0
TCOON = 0x1
+ TCP_BPF_IW = 0x3e9
+ TCP_BPF_SNDCWND_CLAMP = 0x3ea
TCP_CC_INFO = 0x1a
+ TCP_CM_INQ = 0x24
TCP_CONGESTION = 0xd
TCP_COOKIE_IN_ALWAYS = 0x1
TCP_COOKIE_MAX = 0x10
@@ -2052,6 +2273,7 @@ const (
TCP_FASTOPEN_KEY = 0x21
TCP_FASTOPEN_NO_COOKIE = 0x22
TCP_INFO = 0xb
+ TCP_INQ = 0x24
TCP_KEEPCNT = 0x6
TCP_KEEPIDLE = 0x4
TCP_KEEPINTVL = 0x5
@@ -2071,6 +2293,9 @@ const (
TCP_QUEUE_SEQ = 0x15
TCP_QUICKACK = 0xc
TCP_REPAIR = 0x13
+ TCP_REPAIR_OFF = 0x0
+ TCP_REPAIR_OFF_NO_WP = -0x1
+ TCP_REPAIR_ON = 0x1
TCP_REPAIR_OPTIONS = 0x16
TCP_REPAIR_QUEUE = 0x14
TCP_REPAIR_WINDOW = 0x1d
@@ -2085,6 +2310,7 @@ const (
TCP_ULP = 0x1f
TCP_USER_TIMEOUT = 0x12
TCP_WINDOW_CLAMP = 0xa
+ TCP_ZEROCOPY_RECEIVE = 0x23
TCSAFLUSH = 0x5410
TCSBRK = 0x5405
TCSBRKP = 0x5486
@@ -2098,6 +2324,7 @@ const (
TCSETSW = 0x540f
TCSETSW2 = 0x8030542c
TCXONC = 0x5406
+ TIMER_ABSTIME = 0x1
TIOCCBRK = 0x5428
TIOCCONS = 0x80047478
TIOCEXCL = 0x740d
@@ -2205,6 +2432,7 @@ const (
TUNGETVNETBE = 0x400454df
TUNGETVNETHDRSZ = 0x400454d7
TUNGETVNETLE = 0x400454dd
+ TUNSETCARRIER = 0x800454e2
TUNSETDEBUG = 0x800454c9
TUNSETFILTEREBPF = 0x400454e1
TUNSETGROUP = 0x800454ce
@@ -2232,8 +2460,10 @@ const (
UBI_IOCMKVOL = 0x80986f00
UBI_IOCRMVOL = 0x80046f01
UBI_IOCRNVOL = 0x91106f03
+ UBI_IOCRPEB = 0x80046f04
UBI_IOCRSVOL = 0x800c6f02
UBI_IOCSETVOLPROP = 0x80104f06
+ UBI_IOCSPEB = 0x80046f05
UBI_IOCVOLCRBLK = 0x80804f07
UBI_IOCVOLRMBLK = 0x20004f08
UBI_IOCVOLUP = 0x80084f00
@@ -2382,6 +2612,7 @@ const (
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
+ XDP_PACKET_HEADROOM = 0x100
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index ed12556bf..374e3007f 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -41,7 +41,7 @@ const (
AF_KEY = 0xf
AF_LLC = 0x1a
AF_LOCAL = 0x1
- AF_MAX = 0x2c
+ AF_MAX = 0x2d
AF_MPLS = 0x1c
AF_NETBEUI = 0xd
AF_NETLINK = 0x10
@@ -174,6 +174,7 @@ const (
B9600 = 0xd
BALLOON_KVM_MAGIC = 0x13661366
BDEVFS_MAGIC = 0x62646576
+ BINDERFS_SUPER_MAGIC = 0x6c6f6f70
BINFMTFS_MAGIC = 0x42494e4d
BLKBSZGET = 0x40081270
BLKBSZSET = 0x80081271
@@ -196,10 +197,59 @@ const (
BPF_ABS = 0x20
BPF_ADD = 0x0
BPF_ALU = 0x4
+ BPF_ALU64 = 0x7
BPF_AND = 0x50
+ BPF_ANY = 0x0
+ BPF_ARSH = 0xc0
BPF_B = 0x10
+ BPF_BUILD_ID_SIZE = 0x14
+ BPF_CALL = 0x80
+ BPF_DEVCG_ACC_MKNOD = 0x1
+ BPF_DEVCG_ACC_READ = 0x2
+ BPF_DEVCG_ACC_WRITE = 0x4
+ BPF_DEVCG_DEV_BLOCK = 0x1
+ BPF_DEVCG_DEV_CHAR = 0x2
BPF_DIV = 0x30
+ BPF_DW = 0x18
+ BPF_END = 0xd0
+ BPF_EXIST = 0x2
+ BPF_EXIT = 0x90
+ BPF_FROM_BE = 0x8
+ BPF_FROM_LE = 0x0
BPF_FS_MAGIC = 0xcafe4a11
+ BPF_F_ALLOW_MULTI = 0x2
+ BPF_F_ALLOW_OVERRIDE = 0x1
+ BPF_F_ANY_ALIGNMENT = 0x2
+ BPF_F_CTXLEN_MASK = 0xfffff00000000
+ BPF_F_CURRENT_CPU = 0xffffffff
+ BPF_F_CURRENT_NETNS = -0x1
+ BPF_F_DONT_FRAGMENT = 0x4
+ BPF_F_FAST_STACK_CMP = 0x200
+ BPF_F_HDR_FIELD_MASK = 0xf
+ BPF_F_INDEX_MASK = 0xffffffff
+ BPF_F_INGRESS = 0x1
+ BPF_F_INVALIDATE_HASH = 0x2
+ BPF_F_LOCK = 0x4
+ BPF_F_MARK_ENFORCE = 0x40
+ BPF_F_MARK_MANGLED_0 = 0x20
+ BPF_F_NO_COMMON_LRU = 0x2
+ BPF_F_NO_PREALLOC = 0x1
+ BPF_F_NUMA_NODE = 0x4
+ BPF_F_PSEUDO_HDR = 0x10
+ BPF_F_QUERY_EFFECTIVE = 0x1
+ BPF_F_RDONLY = 0x8
+ BPF_F_RECOMPUTE_CSUM = 0x1
+ BPF_F_REUSE_STACKID = 0x400
+ BPF_F_SEQ_NUMBER = 0x8
+ BPF_F_SKIP_FIELD_MASK = 0xff
+ BPF_F_STACK_BUILD_ID = 0x20
+ BPF_F_STRICT_ALIGNMENT = 0x1
+ BPF_F_TUNINFO_IPV6 = 0x1
+ BPF_F_USER_BUILD_ID = 0x800
+ BPF_F_USER_STACK = 0x100
+ BPF_F_WRONLY = 0x10
+ BPF_F_ZERO_CSUM_TX = 0x2
+ BPF_F_ZERO_SEED = 0x40
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@@ -207,8 +257,16 @@ const (
BPF_JEQ = 0x10
BPF_JGE = 0x30
BPF_JGT = 0x20
+ BPF_JLE = 0xb0
+ BPF_JLT = 0xa0
BPF_JMP = 0x5
+ BPF_JMP32 = 0x6
+ BPF_JNE = 0x50
BPF_JSET = 0x40
+ BPF_JSGE = 0x70
+ BPF_JSGT = 0x60
+ BPF_JSLE = 0xd0
+ BPF_JSLT = 0xc0
BPF_K = 0x0
BPF_LD = 0x0
BPF_LDX = 0x1
@@ -222,20 +280,33 @@ const (
BPF_MINOR_VERSION = 0x1
BPF_MISC = 0x7
BPF_MOD = 0x90
+ BPF_MOV = 0xb0
BPF_MSH = 0xa0
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_NET_OFF = -0x100000
+ BPF_NOEXIST = 0x1
+ BPF_OBJ_NAME_LEN = 0x10
BPF_OR = 0x40
+ BPF_PSEUDO_CALL = 0x1
+ BPF_PSEUDO_MAP_FD = 0x1
BPF_RET = 0x6
BPF_RSH = 0x70
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7
+ BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2
+ BPF_SOCK_OPS_RTO_CB_FLAG = 0x1
+ BPF_SOCK_OPS_STATE_CB_FLAG = 0x4
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
+ BPF_TAG_SIZE = 0x8
BPF_TAX = 0x0
+ BPF_TO_BE = 0x8
+ BPF_TO_LE = 0x0
BPF_TXA = 0x80
BPF_W = 0x0
BPF_X = 0x8
+ BPF_XADD = 0xc0
BPF_XOR = 0xa0
BRKINT = 0x2
BS0 = 0x0
@@ -263,6 +334,45 @@ const (
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
+ CAP_AUDIT_CONTROL = 0x1e
+ CAP_AUDIT_READ = 0x25
+ CAP_AUDIT_WRITE = 0x1d
+ CAP_BLOCK_SUSPEND = 0x24
+ CAP_CHOWN = 0x0
+ CAP_DAC_OVERRIDE = 0x1
+ CAP_DAC_READ_SEARCH = 0x2
+ CAP_FOWNER = 0x3
+ CAP_FSETID = 0x4
+ CAP_IPC_LOCK = 0xe
+ CAP_IPC_OWNER = 0xf
+ CAP_KILL = 0x5
+ CAP_LAST_CAP = 0x25
+ CAP_LEASE = 0x1c
+ CAP_LINUX_IMMUTABLE = 0x9
+ CAP_MAC_ADMIN = 0x21
+ CAP_MAC_OVERRIDE = 0x20
+ CAP_MKNOD = 0x1b
+ CAP_NET_ADMIN = 0xc
+ CAP_NET_BIND_SERVICE = 0xa
+ CAP_NET_BROADCAST = 0xb
+ CAP_NET_RAW = 0xd
+ CAP_SETFCAP = 0x1f
+ CAP_SETGID = 0x6
+ CAP_SETPCAP = 0x8
+ CAP_SETUID = 0x7
+ CAP_SYSLOG = 0x22
+ CAP_SYS_ADMIN = 0x15
+ CAP_SYS_BOOT = 0x16
+ CAP_SYS_CHROOT = 0x12
+ CAP_SYS_MODULE = 0x10
+ CAP_SYS_NICE = 0x17
+ CAP_SYS_PACCT = 0x14
+ CAP_SYS_PTRACE = 0x13
+ CAP_SYS_RAWIO = 0x11
+ CAP_SYS_RESOURCE = 0x18
+ CAP_SYS_TIME = 0x19
+ CAP_SYS_TTY_CONFIG = 0x1a
+ CAP_WAKE_ALARM = 0x23
CBAUD = 0xff
CBAUDEX = 0x0
CFLUSH = 0xf
@@ -319,6 +429,10 @@ const (
CRDLY = 0x3000
CREAD = 0x800
CRTSCTS = 0x80000000
+ CRYPTO_MAX_NAME = 0x40
+ CRYPTO_MSG_MAX = 0x15
+ CRYPTO_NR_MSGTYPES = 0x6
+ CRYPTO_REPORT_MAXSIZE = 0x160
CS5 = 0x0
CS6 = 0x100
CS7 = 0x200
@@ -486,12 +600,67 @@ const (
FALLOC_FL_PUNCH_HOLE = 0x2
FALLOC_FL_UNSHARE_RANGE = 0x40
FALLOC_FL_ZERO_RANGE = 0x10
+ FANOTIFY_METADATA_VERSION = 0x3
+ FAN_ACCESS = 0x1
+ FAN_ACCESS_PERM = 0x20000
+ FAN_ALLOW = 0x1
+ FAN_ALL_CLASS_BITS = 0xc
+ FAN_ALL_EVENTS = 0x3b
+ FAN_ALL_INIT_FLAGS = 0x3f
+ FAN_ALL_MARK_FLAGS = 0xff
+ FAN_ALL_OUTGOING_EVENTS = 0x3403b
+ FAN_ALL_PERM_EVENTS = 0x30000
+ FAN_ATTRIB = 0x4
+ FAN_AUDIT = 0x10
+ FAN_CLASS_CONTENT = 0x4
+ FAN_CLASS_NOTIF = 0x0
+ FAN_CLASS_PRE_CONTENT = 0x8
+ FAN_CLOEXEC = 0x1
+ FAN_CLOSE = 0x18
+ FAN_CLOSE_NOWRITE = 0x10
+ FAN_CLOSE_WRITE = 0x8
+ FAN_CREATE = 0x100
+ FAN_DELETE = 0x200
+ FAN_DELETE_SELF = 0x400
+ FAN_DENY = 0x2
+ FAN_ENABLE_AUDIT = 0x40
+ FAN_EVENT_INFO_TYPE_FID = 0x1
+ FAN_EVENT_METADATA_LEN = 0x18
+ FAN_EVENT_ON_CHILD = 0x8000000
+ FAN_MARK_ADD = 0x1
+ FAN_MARK_DONT_FOLLOW = 0x4
+ FAN_MARK_FILESYSTEM = 0x100
+ FAN_MARK_FLUSH = 0x80
+ FAN_MARK_IGNORED_MASK = 0x20
+ FAN_MARK_IGNORED_SURV_MODIFY = 0x40
+ FAN_MARK_INODE = 0x0
+ FAN_MARK_MOUNT = 0x10
+ FAN_MARK_ONLYDIR = 0x8
+ FAN_MARK_REMOVE = 0x2
+ FAN_MODIFY = 0x2
+ FAN_MOVE = 0xc0
+ FAN_MOVED_FROM = 0x40
+ FAN_MOVED_TO = 0x80
+ FAN_MOVE_SELF = 0x800
+ FAN_NOFD = -0x1
+ FAN_NONBLOCK = 0x2
+ FAN_ONDIR = 0x40000000
+ FAN_OPEN = 0x20
+ FAN_OPEN_EXEC = 0x1000
+ FAN_OPEN_EXEC_PERM = 0x40000
+ FAN_OPEN_PERM = 0x10000
+ FAN_Q_OVERFLOW = 0x4000
+ FAN_REPORT_FID = 0x200
+ FAN_REPORT_TID = 0x100
+ FAN_UNLIMITED_MARKS = 0x20
+ FAN_UNLIMITED_QUEUE = 0x10
FD_CLOEXEC = 0x1
FD_SETSIZE = 0x400
FF0 = 0x0
FF1 = 0x4000
FFDLY = 0x4000
FLUSHO = 0x800000
+ FS_ENCRYPTION_MODE_ADIANTUM = 0x9
FS_ENCRYPTION_MODE_AES_128_CBC = 0x5
FS_ENCRYPTION_MODE_AES_128_CTS = 0x6
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
@@ -513,7 +682,7 @@ const (
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
- FS_POLICY_FLAGS_VALID = 0x3
+ FS_POLICY_FLAGS_VALID = 0x7
FUTEXFS_SUPER_MAGIC = 0xbad1dea
F_ADD_SEALS = 0x409
F_DUPFD = 0x0
@@ -706,6 +875,7 @@ const (
IN_ISDIR = 0x40000000
IN_LOOPBACKNET = 0x7f
IN_MASK_ADD = 0x20000000
+ IN_MASK_CREATE = 0x10000000
IN_MODIFY = 0x2
IN_MOVE = 0xc0
IN_MOVED_FROM = 0x40
@@ -777,6 +947,7 @@ const (
IPV6_MINHOPCOUNT = 0x49
IPV6_MTU = 0x18
IPV6_MTU_DISCOVER = 0x17
+ IPV6_MULTICAST_ALL = 0x1d
IPV6_MULTICAST_HOPS = 0x12
IPV6_MULTICAST_IF = 0x11
IPV6_MULTICAST_LOOP = 0x13
@@ -1001,6 +1172,15 @@ const (
MAP_SHARED_VALIDATE = 0x3
MAP_STACK = 0x20000
MAP_TYPE = 0xf
+ MCAST_BLOCK_SOURCE = 0x2b
+ MCAST_EXCLUDE = 0x0
+ MCAST_INCLUDE = 0x1
+ MCAST_JOIN_GROUP = 0x2a
+ MCAST_JOIN_SOURCE_GROUP = 0x2e
+ MCAST_LEAVE_GROUP = 0x2d
+ MCAST_LEAVE_SOURCE_GROUP = 0x2f
+ MCAST_MSFILTER = 0x30
+ MCAST_UNBLOCK_SOURCE = 0x2c
MCL_CURRENT = 0x2000
MCL_FUTURE = 0x4000
MCL_ONFAULT = 0x8000
@@ -1129,7 +1309,7 @@ const (
NETLINK_UNUSED = 0x1
NETLINK_USERSOCK = 0x2
NETLINK_XFRM = 0x6
- NETNSA_MAX = 0x3
+ NETNSA_MAX = 0x5
NETNSA_NSID_NOT_ASSIGNED = -0x1
NFNETLINK_V0 = 0x0
NFNLGRP_ACCT_QUOTA = 0x8
@@ -1396,6 +1576,12 @@ const (
PR_MCE_KILL_SET = 0x1
PR_MPX_DISABLE_MANAGEMENT = 0x2c
PR_MPX_ENABLE_MANAGEMENT = 0x2b
+ PR_PAC_APDAKEY = 0x4
+ PR_PAC_APDBKEY = 0x8
+ PR_PAC_APGAKEY = 0x10
+ PR_PAC_APIAKEY = 0x1
+ PR_PAC_APIBKEY = 0x2
+ PR_PAC_RESET_KEYS = 0x36
PR_SET_CHILD_SUBREAPER = 0x24
PR_SET_DUMPABLE = 0x4
PR_SET_ENDIAN = 0x14
@@ -1433,6 +1619,7 @@ const (
PR_SET_TSC = 0x1a
PR_SET_UNALIGN = 0x6
PR_SPEC_DISABLE = 0x4
+ PR_SPEC_DISABLE_NOEXEC = 0x10
PR_SPEC_ENABLE = 0x2
PR_SPEC_FORCE_DISABLE = 0x8
PR_SPEC_INDIRECT_BRANCH = 0x1
@@ -1595,6 +1782,13 @@ const (
RLIMIT_SIGPENDING = 0xb
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0xffffffffffffffff
+ RNDADDENTROPY = 0x80085203
+ RNDADDTOENTCNT = 0x80045201
+ RNDCLEARPOOL = 0x20005206
+ RNDGETENTCNT = 0x40045200
+ RNDGETPOOL = 0x40085202
+ RNDRESEEDCRNG = 0x20005207
+ RNDZAPENTCNT = 0x20005204
RTAX_ADVMSS = 0x8
RTAX_CC_ALGO = 0x10
RTAX_CWND = 0x7
@@ -1802,6 +1996,8 @@ const (
SECCOMP_MODE_STRICT = 0x1
SECURITYFS_MAGIC = 0x73636673
SELINUX_MAGIC = 0xf97cff8c
+ SFD_CLOEXEC = 0x80000
+ SFD_NONBLOCK = 0x800
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -1952,6 +2148,7 @@ const (
SO_ATTACH_REUSEPORT_CBPF = 0x33
SO_ATTACH_REUSEPORT_EBPF = 0x34
SO_BINDTODEVICE = 0x19
+ SO_BINDTOIFINDEX = 0x3e
SO_BPF_EXTENSIONS = 0x30
SO_BROADCAST = 0x6
SO_BSDCOMPAT = 0xe
@@ -1963,6 +2160,17 @@ const (
SO_DETACH_FILTER = 0x1b
SO_DOMAIN = 0x27
SO_DONTROUTE = 0x5
+ SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1
+ SO_EE_CODE_TXTIME_MISSED = 0x2
+ SO_EE_CODE_ZEROCOPY_COPIED = 0x1
+ SO_EE_ORIGIN_ICMP = 0x2
+ SO_EE_ORIGIN_ICMP6 = 0x3
+ SO_EE_ORIGIN_LOCAL = 0x1
+ SO_EE_ORIGIN_NONE = 0x0
+ SO_EE_ORIGIN_TIMESTAMPING = 0x4
+ SO_EE_ORIGIN_TXSTATUS = 0x4
+ SO_EE_ORIGIN_TXTIME = 0x6
+ SO_EE_ORIGIN_ZEROCOPY = 0x5
SO_ERROR = 0x4
SO_GET_FILTER = 0x1a
SO_INCOMING_CPU = 0x31
@@ -1989,6 +2197,8 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x10
SO_RCVTIMEO = 0x12
+ SO_RCVTIMEO_NEW = 0x42
+ SO_RCVTIMEO_OLD = 0x12
SO_REUSEADDR = 0x2
SO_REUSEPORT = 0xf
SO_RXQ_OVFL = 0x28
@@ -2000,9 +2210,17 @@ const (
SO_SNDBUFFORCE = 0x20
SO_SNDLOWAT = 0x11
SO_SNDTIMEO = 0x13
+ SO_SNDTIMEO_NEW = 0x43
+ SO_SNDTIMEO_OLD = 0x13
SO_TIMESTAMP = 0x1d
SO_TIMESTAMPING = 0x25
+ SO_TIMESTAMPING_NEW = 0x41
+ SO_TIMESTAMPING_OLD = 0x25
SO_TIMESTAMPNS = 0x23
+ SO_TIMESTAMPNS_NEW = 0x40
+ SO_TIMESTAMPNS_OLD = 0x23
+ SO_TIMESTAMP_NEW = 0x3f
+ SO_TIMESTAMP_OLD = 0x1d
SO_TXTIME = 0x3d
SO_TYPE = 0x3
SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2
@@ -2093,7 +2311,10 @@ const (
TCOFLUSH = 0x1
TCOOFF = 0x0
TCOON = 0x1
+ TCP_BPF_IW = 0x3e9
+ TCP_BPF_SNDCWND_CLAMP = 0x3ea
TCP_CC_INFO = 0x1a
+ TCP_CM_INQ = 0x24
TCP_CONGESTION = 0xd
TCP_COOKIE_IN_ALWAYS = 0x1
TCP_COOKIE_MAX = 0x10
@@ -2108,6 +2329,7 @@ const (
TCP_FASTOPEN_KEY = 0x21
TCP_FASTOPEN_NO_COOKIE = 0x22
TCP_INFO = 0xb
+ TCP_INQ = 0x24
TCP_KEEPCNT = 0x6
TCP_KEEPIDLE = 0x4
TCP_KEEPINTVL = 0x5
@@ -2127,6 +2349,9 @@ const (
TCP_QUEUE_SEQ = 0x15
TCP_QUICKACK = 0xc
TCP_REPAIR = 0x13
+ TCP_REPAIR_OFF = 0x0
+ TCP_REPAIR_OFF_NO_WP = -0x1
+ TCP_REPAIR_ON = 0x1
TCP_REPAIR_OPTIONS = 0x16
TCP_REPAIR_QUEUE = 0x14
TCP_REPAIR_WINDOW = 0x1d
@@ -2141,6 +2366,7 @@ const (
TCP_ULP = 0x1f
TCP_USER_TIMEOUT = 0x12
TCP_WINDOW_CLAMP = 0xa
+ TCP_ZEROCOPY_RECEIVE = 0x23
TCSAFLUSH = 0x2
TCSBRK = 0x2000741d
TCSBRKP = 0x5425
@@ -2151,6 +2377,7 @@ const (
TCSETSF = 0x802c7416
TCSETSW = 0x802c7415
TCXONC = 0x2000741e
+ TIMER_ABSTIME = 0x1
TIOCCBRK = 0x5428
TIOCCONS = 0x541d
TIOCEXCL = 0x540c
@@ -2265,6 +2492,7 @@ const (
TUNGETVNETBE = 0x400454df
TUNGETVNETHDRSZ = 0x400454d7
TUNGETVNETLE = 0x400454dd
+ TUNSETCARRIER = 0x800454e2
TUNSETDEBUG = 0x800454c9
TUNSETFILTEREBPF = 0x400454e1
TUNSETGROUP = 0x800454ce
@@ -2292,8 +2520,10 @@ const (
UBI_IOCMKVOL = 0x80986f00
UBI_IOCRMVOL = 0x80046f01
UBI_IOCRNVOL = 0x91106f03
+ UBI_IOCRPEB = 0x80046f04
UBI_IOCRSVOL = 0x800c6f02
UBI_IOCSETVOLPROP = 0x80104f06
+ UBI_IOCSPEB = 0x80046f05
UBI_IOCVOLCRBLK = 0x80804f07
UBI_IOCVOLRMBLK = 0x20004f08
UBI_IOCVOLUP = 0x80084f00
@@ -2441,6 +2671,7 @@ const (
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
+ XDP_PACKET_HEADROOM = 0x100
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index 4aa4fa9c9..badf14102 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -41,7 +41,7 @@ const (
AF_KEY = 0xf
AF_LLC = 0x1a
AF_LOCAL = 0x1
- AF_MAX = 0x2c
+ AF_MAX = 0x2d
AF_MPLS = 0x1c
AF_NETBEUI = 0xd
AF_NETLINK = 0x10
@@ -174,6 +174,7 @@ const (
B9600 = 0xd
BALLOON_KVM_MAGIC = 0x13661366
BDEVFS_MAGIC = 0x62646576
+ BINDERFS_SUPER_MAGIC = 0x6c6f6f70
BINFMTFS_MAGIC = 0x42494e4d
BLKBSZGET = 0x40081270
BLKBSZSET = 0x80081271
@@ -196,10 +197,59 @@ const (
BPF_ABS = 0x20
BPF_ADD = 0x0
BPF_ALU = 0x4
+ BPF_ALU64 = 0x7
BPF_AND = 0x50
+ BPF_ANY = 0x0
+ BPF_ARSH = 0xc0
BPF_B = 0x10
+ BPF_BUILD_ID_SIZE = 0x14
+ BPF_CALL = 0x80
+ BPF_DEVCG_ACC_MKNOD = 0x1
+ BPF_DEVCG_ACC_READ = 0x2
+ BPF_DEVCG_ACC_WRITE = 0x4
+ BPF_DEVCG_DEV_BLOCK = 0x1
+ BPF_DEVCG_DEV_CHAR = 0x2
BPF_DIV = 0x30
+ BPF_DW = 0x18
+ BPF_END = 0xd0
+ BPF_EXIST = 0x2
+ BPF_EXIT = 0x90
+ BPF_FROM_BE = 0x8
+ BPF_FROM_LE = 0x0
BPF_FS_MAGIC = 0xcafe4a11
+ BPF_F_ALLOW_MULTI = 0x2
+ BPF_F_ALLOW_OVERRIDE = 0x1
+ BPF_F_ANY_ALIGNMENT = 0x2
+ BPF_F_CTXLEN_MASK = 0xfffff00000000
+ BPF_F_CURRENT_CPU = 0xffffffff
+ BPF_F_CURRENT_NETNS = -0x1
+ BPF_F_DONT_FRAGMENT = 0x4
+ BPF_F_FAST_STACK_CMP = 0x200
+ BPF_F_HDR_FIELD_MASK = 0xf
+ BPF_F_INDEX_MASK = 0xffffffff
+ BPF_F_INGRESS = 0x1
+ BPF_F_INVALIDATE_HASH = 0x2
+ BPF_F_LOCK = 0x4
+ BPF_F_MARK_ENFORCE = 0x40
+ BPF_F_MARK_MANGLED_0 = 0x20
+ BPF_F_NO_COMMON_LRU = 0x2
+ BPF_F_NO_PREALLOC = 0x1
+ BPF_F_NUMA_NODE = 0x4
+ BPF_F_PSEUDO_HDR = 0x10
+ BPF_F_QUERY_EFFECTIVE = 0x1
+ BPF_F_RDONLY = 0x8
+ BPF_F_RECOMPUTE_CSUM = 0x1
+ BPF_F_REUSE_STACKID = 0x400
+ BPF_F_SEQ_NUMBER = 0x8
+ BPF_F_SKIP_FIELD_MASK = 0xff
+ BPF_F_STACK_BUILD_ID = 0x20
+ BPF_F_STRICT_ALIGNMENT = 0x1
+ BPF_F_TUNINFO_IPV6 = 0x1
+ BPF_F_USER_BUILD_ID = 0x800
+ BPF_F_USER_STACK = 0x100
+ BPF_F_WRONLY = 0x10
+ BPF_F_ZERO_CSUM_TX = 0x2
+ BPF_F_ZERO_SEED = 0x40
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@@ -207,8 +257,16 @@ const (
BPF_JEQ = 0x10
BPF_JGE = 0x30
BPF_JGT = 0x20
+ BPF_JLE = 0xb0
+ BPF_JLT = 0xa0
BPF_JMP = 0x5
+ BPF_JMP32 = 0x6
+ BPF_JNE = 0x50
BPF_JSET = 0x40
+ BPF_JSGE = 0x70
+ BPF_JSGT = 0x60
+ BPF_JSLE = 0xd0
+ BPF_JSLT = 0xc0
BPF_K = 0x0
BPF_LD = 0x0
BPF_LDX = 0x1
@@ -222,20 +280,33 @@ const (
BPF_MINOR_VERSION = 0x1
BPF_MISC = 0x7
BPF_MOD = 0x90
+ BPF_MOV = 0xb0
BPF_MSH = 0xa0
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_NET_OFF = -0x100000
+ BPF_NOEXIST = 0x1
+ BPF_OBJ_NAME_LEN = 0x10
BPF_OR = 0x40
+ BPF_PSEUDO_CALL = 0x1
+ BPF_PSEUDO_MAP_FD = 0x1
BPF_RET = 0x6
BPF_RSH = 0x70
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7
+ BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2
+ BPF_SOCK_OPS_RTO_CB_FLAG = 0x1
+ BPF_SOCK_OPS_STATE_CB_FLAG = 0x4
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
+ BPF_TAG_SIZE = 0x8
BPF_TAX = 0x0
+ BPF_TO_BE = 0x8
+ BPF_TO_LE = 0x0
BPF_TXA = 0x80
BPF_W = 0x0
BPF_X = 0x8
+ BPF_XADD = 0xc0
BPF_XOR = 0xa0
BRKINT = 0x2
BS0 = 0x0
@@ -263,6 +334,45 @@ const (
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
+ CAP_AUDIT_CONTROL = 0x1e
+ CAP_AUDIT_READ = 0x25
+ CAP_AUDIT_WRITE = 0x1d
+ CAP_BLOCK_SUSPEND = 0x24
+ CAP_CHOWN = 0x0
+ CAP_DAC_OVERRIDE = 0x1
+ CAP_DAC_READ_SEARCH = 0x2
+ CAP_FOWNER = 0x3
+ CAP_FSETID = 0x4
+ CAP_IPC_LOCK = 0xe
+ CAP_IPC_OWNER = 0xf
+ CAP_KILL = 0x5
+ CAP_LAST_CAP = 0x25
+ CAP_LEASE = 0x1c
+ CAP_LINUX_IMMUTABLE = 0x9
+ CAP_MAC_ADMIN = 0x21
+ CAP_MAC_OVERRIDE = 0x20
+ CAP_MKNOD = 0x1b
+ CAP_NET_ADMIN = 0xc
+ CAP_NET_BIND_SERVICE = 0xa
+ CAP_NET_BROADCAST = 0xb
+ CAP_NET_RAW = 0xd
+ CAP_SETFCAP = 0x1f
+ CAP_SETGID = 0x6
+ CAP_SETPCAP = 0x8
+ CAP_SETUID = 0x7
+ CAP_SYSLOG = 0x22
+ CAP_SYS_ADMIN = 0x15
+ CAP_SYS_BOOT = 0x16
+ CAP_SYS_CHROOT = 0x12
+ CAP_SYS_MODULE = 0x10
+ CAP_SYS_NICE = 0x17
+ CAP_SYS_PACCT = 0x14
+ CAP_SYS_PTRACE = 0x13
+ CAP_SYS_RAWIO = 0x11
+ CAP_SYS_RESOURCE = 0x18
+ CAP_SYS_TIME = 0x19
+ CAP_SYS_TTY_CONFIG = 0x1a
+ CAP_WAKE_ALARM = 0x23
CBAUD = 0xff
CBAUDEX = 0x0
CFLUSH = 0xf
@@ -319,6 +429,10 @@ const (
CRDLY = 0x3000
CREAD = 0x800
CRTSCTS = 0x80000000
+ CRYPTO_MAX_NAME = 0x40
+ CRYPTO_MSG_MAX = 0x15
+ CRYPTO_NR_MSGTYPES = 0x6
+ CRYPTO_REPORT_MAXSIZE = 0x160
CS5 = 0x0
CS6 = 0x100
CS7 = 0x200
@@ -486,12 +600,67 @@ const (
FALLOC_FL_PUNCH_HOLE = 0x2
FALLOC_FL_UNSHARE_RANGE = 0x40
FALLOC_FL_ZERO_RANGE = 0x10
+ FANOTIFY_METADATA_VERSION = 0x3
+ FAN_ACCESS = 0x1
+ FAN_ACCESS_PERM = 0x20000
+ FAN_ALLOW = 0x1
+ FAN_ALL_CLASS_BITS = 0xc
+ FAN_ALL_EVENTS = 0x3b
+ FAN_ALL_INIT_FLAGS = 0x3f
+ FAN_ALL_MARK_FLAGS = 0xff
+ FAN_ALL_OUTGOING_EVENTS = 0x3403b
+ FAN_ALL_PERM_EVENTS = 0x30000
+ FAN_ATTRIB = 0x4
+ FAN_AUDIT = 0x10
+ FAN_CLASS_CONTENT = 0x4
+ FAN_CLASS_NOTIF = 0x0
+ FAN_CLASS_PRE_CONTENT = 0x8
+ FAN_CLOEXEC = 0x1
+ FAN_CLOSE = 0x18
+ FAN_CLOSE_NOWRITE = 0x10
+ FAN_CLOSE_WRITE = 0x8
+ FAN_CREATE = 0x100
+ FAN_DELETE = 0x200
+ FAN_DELETE_SELF = 0x400
+ FAN_DENY = 0x2
+ FAN_ENABLE_AUDIT = 0x40
+ FAN_EVENT_INFO_TYPE_FID = 0x1
+ FAN_EVENT_METADATA_LEN = 0x18
+ FAN_EVENT_ON_CHILD = 0x8000000
+ FAN_MARK_ADD = 0x1
+ FAN_MARK_DONT_FOLLOW = 0x4
+ FAN_MARK_FILESYSTEM = 0x100
+ FAN_MARK_FLUSH = 0x80
+ FAN_MARK_IGNORED_MASK = 0x20
+ FAN_MARK_IGNORED_SURV_MODIFY = 0x40
+ FAN_MARK_INODE = 0x0
+ FAN_MARK_MOUNT = 0x10
+ FAN_MARK_ONLYDIR = 0x8
+ FAN_MARK_REMOVE = 0x2
+ FAN_MODIFY = 0x2
+ FAN_MOVE = 0xc0
+ FAN_MOVED_FROM = 0x40
+ FAN_MOVED_TO = 0x80
+ FAN_MOVE_SELF = 0x800
+ FAN_NOFD = -0x1
+ FAN_NONBLOCK = 0x2
+ FAN_ONDIR = 0x40000000
+ FAN_OPEN = 0x20
+ FAN_OPEN_EXEC = 0x1000
+ FAN_OPEN_EXEC_PERM = 0x40000
+ FAN_OPEN_PERM = 0x10000
+ FAN_Q_OVERFLOW = 0x4000
+ FAN_REPORT_FID = 0x200
+ FAN_REPORT_TID = 0x100
+ FAN_UNLIMITED_MARKS = 0x20
+ FAN_UNLIMITED_QUEUE = 0x10
FD_CLOEXEC = 0x1
FD_SETSIZE = 0x400
FF0 = 0x0
FF1 = 0x4000
FFDLY = 0x4000
FLUSHO = 0x800000
+ FS_ENCRYPTION_MODE_ADIANTUM = 0x9
FS_ENCRYPTION_MODE_AES_128_CBC = 0x5
FS_ENCRYPTION_MODE_AES_128_CTS = 0x6
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
@@ -513,7 +682,7 @@ const (
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
- FS_POLICY_FLAGS_VALID = 0x3
+ FS_POLICY_FLAGS_VALID = 0x7
FUTEXFS_SUPER_MAGIC = 0xbad1dea
F_ADD_SEALS = 0x409
F_DUPFD = 0x0
@@ -706,6 +875,7 @@ const (
IN_ISDIR = 0x40000000
IN_LOOPBACKNET = 0x7f
IN_MASK_ADD = 0x20000000
+ IN_MASK_CREATE = 0x10000000
IN_MODIFY = 0x2
IN_MOVE = 0xc0
IN_MOVED_FROM = 0x40
@@ -777,6 +947,7 @@ const (
IPV6_MINHOPCOUNT = 0x49
IPV6_MTU = 0x18
IPV6_MTU_DISCOVER = 0x17
+ IPV6_MULTICAST_ALL = 0x1d
IPV6_MULTICAST_HOPS = 0x12
IPV6_MULTICAST_IF = 0x11
IPV6_MULTICAST_LOOP = 0x13
@@ -1001,6 +1172,15 @@ const (
MAP_SHARED_VALIDATE = 0x3
MAP_STACK = 0x20000
MAP_TYPE = 0xf
+ MCAST_BLOCK_SOURCE = 0x2b
+ MCAST_EXCLUDE = 0x0
+ MCAST_INCLUDE = 0x1
+ MCAST_JOIN_GROUP = 0x2a
+ MCAST_JOIN_SOURCE_GROUP = 0x2e
+ MCAST_LEAVE_GROUP = 0x2d
+ MCAST_LEAVE_SOURCE_GROUP = 0x2f
+ MCAST_MSFILTER = 0x30
+ MCAST_UNBLOCK_SOURCE = 0x2c
MCL_CURRENT = 0x2000
MCL_FUTURE = 0x4000
MCL_ONFAULT = 0x8000
@@ -1129,7 +1309,7 @@ const (
NETLINK_UNUSED = 0x1
NETLINK_USERSOCK = 0x2
NETLINK_XFRM = 0x6
- NETNSA_MAX = 0x3
+ NETNSA_MAX = 0x5
NETNSA_NSID_NOT_ASSIGNED = -0x1
NFNETLINK_V0 = 0x0
NFNLGRP_ACCT_QUOTA = 0x8
@@ -1396,6 +1576,12 @@ const (
PR_MCE_KILL_SET = 0x1
PR_MPX_DISABLE_MANAGEMENT = 0x2c
PR_MPX_ENABLE_MANAGEMENT = 0x2b
+ PR_PAC_APDAKEY = 0x4
+ PR_PAC_APDBKEY = 0x8
+ PR_PAC_APGAKEY = 0x10
+ PR_PAC_APIAKEY = 0x1
+ PR_PAC_APIBKEY = 0x2
+ PR_PAC_RESET_KEYS = 0x36
PR_SET_CHILD_SUBREAPER = 0x24
PR_SET_DUMPABLE = 0x4
PR_SET_ENDIAN = 0x14
@@ -1433,6 +1619,7 @@ const (
PR_SET_TSC = 0x1a
PR_SET_UNALIGN = 0x6
PR_SPEC_DISABLE = 0x4
+ PR_SPEC_DISABLE_NOEXEC = 0x10
PR_SPEC_ENABLE = 0x2
PR_SPEC_FORCE_DISABLE = 0x8
PR_SPEC_INDIRECT_BRANCH = 0x1
@@ -1595,6 +1782,13 @@ const (
RLIMIT_SIGPENDING = 0xb
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0xffffffffffffffff
+ RNDADDENTROPY = 0x80085203
+ RNDADDTOENTCNT = 0x80045201
+ RNDCLEARPOOL = 0x20005206
+ RNDGETENTCNT = 0x40045200
+ RNDGETPOOL = 0x40085202
+ RNDRESEEDCRNG = 0x20005207
+ RNDZAPENTCNT = 0x20005204
RTAX_ADVMSS = 0x8
RTAX_CC_ALGO = 0x10
RTAX_CWND = 0x7
@@ -1802,6 +1996,8 @@ const (
SECCOMP_MODE_STRICT = 0x1
SECURITYFS_MAGIC = 0x73636673
SELINUX_MAGIC = 0xf97cff8c
+ SFD_CLOEXEC = 0x80000
+ SFD_NONBLOCK = 0x800
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -1952,6 +2148,7 @@ const (
SO_ATTACH_REUSEPORT_CBPF = 0x33
SO_ATTACH_REUSEPORT_EBPF = 0x34
SO_BINDTODEVICE = 0x19
+ SO_BINDTOIFINDEX = 0x3e
SO_BPF_EXTENSIONS = 0x30
SO_BROADCAST = 0x6
SO_BSDCOMPAT = 0xe
@@ -1963,6 +2160,17 @@ const (
SO_DETACH_FILTER = 0x1b
SO_DOMAIN = 0x27
SO_DONTROUTE = 0x5
+ SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1
+ SO_EE_CODE_TXTIME_MISSED = 0x2
+ SO_EE_CODE_ZEROCOPY_COPIED = 0x1
+ SO_EE_ORIGIN_ICMP = 0x2
+ SO_EE_ORIGIN_ICMP6 = 0x3
+ SO_EE_ORIGIN_LOCAL = 0x1
+ SO_EE_ORIGIN_NONE = 0x0
+ SO_EE_ORIGIN_TIMESTAMPING = 0x4
+ SO_EE_ORIGIN_TXSTATUS = 0x4
+ SO_EE_ORIGIN_TXTIME = 0x6
+ SO_EE_ORIGIN_ZEROCOPY = 0x5
SO_ERROR = 0x4
SO_GET_FILTER = 0x1a
SO_INCOMING_CPU = 0x31
@@ -1989,6 +2197,8 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x10
SO_RCVTIMEO = 0x12
+ SO_RCVTIMEO_NEW = 0x42
+ SO_RCVTIMEO_OLD = 0x12
SO_REUSEADDR = 0x2
SO_REUSEPORT = 0xf
SO_RXQ_OVFL = 0x28
@@ -2000,9 +2210,17 @@ const (
SO_SNDBUFFORCE = 0x20
SO_SNDLOWAT = 0x11
SO_SNDTIMEO = 0x13
+ SO_SNDTIMEO_NEW = 0x43
+ SO_SNDTIMEO_OLD = 0x13
SO_TIMESTAMP = 0x1d
SO_TIMESTAMPING = 0x25
+ SO_TIMESTAMPING_NEW = 0x41
+ SO_TIMESTAMPING_OLD = 0x25
SO_TIMESTAMPNS = 0x23
+ SO_TIMESTAMPNS_NEW = 0x40
+ SO_TIMESTAMPNS_OLD = 0x23
+ SO_TIMESTAMP_NEW = 0x3f
+ SO_TIMESTAMP_OLD = 0x1d
SO_TXTIME = 0x3d
SO_TYPE = 0x3
SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2
@@ -2093,7 +2311,10 @@ const (
TCOFLUSH = 0x1
TCOOFF = 0x0
TCOON = 0x1
+ TCP_BPF_IW = 0x3e9
+ TCP_BPF_SNDCWND_CLAMP = 0x3ea
TCP_CC_INFO = 0x1a
+ TCP_CM_INQ = 0x24
TCP_CONGESTION = 0xd
TCP_COOKIE_IN_ALWAYS = 0x1
TCP_COOKIE_MAX = 0x10
@@ -2108,6 +2329,7 @@ const (
TCP_FASTOPEN_KEY = 0x21
TCP_FASTOPEN_NO_COOKIE = 0x22
TCP_INFO = 0xb
+ TCP_INQ = 0x24
TCP_KEEPCNT = 0x6
TCP_KEEPIDLE = 0x4
TCP_KEEPINTVL = 0x5
@@ -2127,6 +2349,9 @@ const (
TCP_QUEUE_SEQ = 0x15
TCP_QUICKACK = 0xc
TCP_REPAIR = 0x13
+ TCP_REPAIR_OFF = 0x0
+ TCP_REPAIR_OFF_NO_WP = -0x1
+ TCP_REPAIR_ON = 0x1
TCP_REPAIR_OPTIONS = 0x16
TCP_REPAIR_QUEUE = 0x14
TCP_REPAIR_WINDOW = 0x1d
@@ -2141,6 +2366,7 @@ const (
TCP_ULP = 0x1f
TCP_USER_TIMEOUT = 0x12
TCP_WINDOW_CLAMP = 0xa
+ TCP_ZEROCOPY_RECEIVE = 0x23
TCSAFLUSH = 0x2
TCSBRK = 0x2000741d
TCSBRKP = 0x5425
@@ -2151,6 +2377,7 @@ const (
TCSETSF = 0x802c7416
TCSETSW = 0x802c7415
TCXONC = 0x2000741e
+ TIMER_ABSTIME = 0x1
TIOCCBRK = 0x5428
TIOCCONS = 0x541d
TIOCEXCL = 0x540c
@@ -2265,6 +2492,7 @@ const (
TUNGETVNETBE = 0x400454df
TUNGETVNETHDRSZ = 0x400454d7
TUNGETVNETLE = 0x400454dd
+ TUNSETCARRIER = 0x800454e2
TUNSETDEBUG = 0x800454c9
TUNSETFILTEREBPF = 0x400454e1
TUNSETGROUP = 0x800454ce
@@ -2292,8 +2520,10 @@ const (
UBI_IOCMKVOL = 0x80986f00
UBI_IOCRMVOL = 0x80046f01
UBI_IOCRNVOL = 0x91106f03
+ UBI_IOCRPEB = 0x80046f04
UBI_IOCRSVOL = 0x800c6f02
UBI_IOCSETVOLPROP = 0x80104f06
+ UBI_IOCSPEB = 0x80046f05
UBI_IOCVOLCRBLK = 0x80804f07
UBI_IOCVOLRMBLK = 0x20004f08
UBI_IOCVOLUP = 0x80084f00
@@ -2441,6 +2671,7 @@ const (
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
+ XDP_PACKET_HEADROOM = 0x100
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index cfe9ef8b1..0ce8c7eff 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -41,7 +41,7 @@ const (
AF_KEY = 0xf
AF_LLC = 0x1a
AF_LOCAL = 0x1
- AF_MAX = 0x2c
+ AF_MAX = 0x2d
AF_MPLS = 0x1c
AF_NETBEUI = 0xd
AF_NETLINK = 0x10
@@ -174,6 +174,7 @@ const (
B9600 = 0xd
BALLOON_KVM_MAGIC = 0x13661366
BDEVFS_MAGIC = 0x62646576
+ BINDERFS_SUPER_MAGIC = 0x6c6f6f70
BINFMTFS_MAGIC = 0x42494e4d
BLKBSZGET = 0x80081270
BLKBSZSET = 0x40081271
@@ -196,10 +197,59 @@ const (
BPF_ABS = 0x20
BPF_ADD = 0x0
BPF_ALU = 0x4
+ BPF_ALU64 = 0x7
BPF_AND = 0x50
+ BPF_ANY = 0x0
+ BPF_ARSH = 0xc0
BPF_B = 0x10
+ BPF_BUILD_ID_SIZE = 0x14
+ BPF_CALL = 0x80
+ BPF_DEVCG_ACC_MKNOD = 0x1
+ BPF_DEVCG_ACC_READ = 0x2
+ BPF_DEVCG_ACC_WRITE = 0x4
+ BPF_DEVCG_DEV_BLOCK = 0x1
+ BPF_DEVCG_DEV_CHAR = 0x2
BPF_DIV = 0x30
+ BPF_DW = 0x18
+ BPF_END = 0xd0
+ BPF_EXIST = 0x2
+ BPF_EXIT = 0x90
+ BPF_FROM_BE = 0x8
+ BPF_FROM_LE = 0x0
BPF_FS_MAGIC = 0xcafe4a11
+ BPF_F_ALLOW_MULTI = 0x2
+ BPF_F_ALLOW_OVERRIDE = 0x1
+ BPF_F_ANY_ALIGNMENT = 0x2
+ BPF_F_CTXLEN_MASK = 0xfffff00000000
+ BPF_F_CURRENT_CPU = 0xffffffff
+ BPF_F_CURRENT_NETNS = -0x1
+ BPF_F_DONT_FRAGMENT = 0x4
+ BPF_F_FAST_STACK_CMP = 0x200
+ BPF_F_HDR_FIELD_MASK = 0xf
+ BPF_F_INDEX_MASK = 0xffffffff
+ BPF_F_INGRESS = 0x1
+ BPF_F_INVALIDATE_HASH = 0x2
+ BPF_F_LOCK = 0x4
+ BPF_F_MARK_ENFORCE = 0x40
+ BPF_F_MARK_MANGLED_0 = 0x20
+ BPF_F_NO_COMMON_LRU = 0x2
+ BPF_F_NO_PREALLOC = 0x1
+ BPF_F_NUMA_NODE = 0x4
+ BPF_F_PSEUDO_HDR = 0x10
+ BPF_F_QUERY_EFFECTIVE = 0x1
+ BPF_F_RDONLY = 0x8
+ BPF_F_RECOMPUTE_CSUM = 0x1
+ BPF_F_REUSE_STACKID = 0x400
+ BPF_F_SEQ_NUMBER = 0x8
+ BPF_F_SKIP_FIELD_MASK = 0xff
+ BPF_F_STACK_BUILD_ID = 0x20
+ BPF_F_STRICT_ALIGNMENT = 0x1
+ BPF_F_TUNINFO_IPV6 = 0x1
+ BPF_F_USER_BUILD_ID = 0x800
+ BPF_F_USER_STACK = 0x100
+ BPF_F_WRONLY = 0x10
+ BPF_F_ZERO_CSUM_TX = 0x2
+ BPF_F_ZERO_SEED = 0x40
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@@ -207,8 +257,16 @@ const (
BPF_JEQ = 0x10
BPF_JGE = 0x30
BPF_JGT = 0x20
+ BPF_JLE = 0xb0
+ BPF_JLT = 0xa0
BPF_JMP = 0x5
+ BPF_JMP32 = 0x6
+ BPF_JNE = 0x50
BPF_JSET = 0x40
+ BPF_JSGE = 0x70
+ BPF_JSGT = 0x60
+ BPF_JSLE = 0xd0
+ BPF_JSLT = 0xc0
BPF_K = 0x0
BPF_LD = 0x0
BPF_LDX = 0x1
@@ -222,20 +280,33 @@ const (
BPF_MINOR_VERSION = 0x1
BPF_MISC = 0x7
BPF_MOD = 0x90
+ BPF_MOV = 0xb0
BPF_MSH = 0xa0
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_NET_OFF = -0x100000
+ BPF_NOEXIST = 0x1
+ BPF_OBJ_NAME_LEN = 0x10
BPF_OR = 0x40
+ BPF_PSEUDO_CALL = 0x1
+ BPF_PSEUDO_MAP_FD = 0x1
BPF_RET = 0x6
BPF_RSH = 0x70
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7
+ BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2
+ BPF_SOCK_OPS_RTO_CB_FLAG = 0x1
+ BPF_SOCK_OPS_STATE_CB_FLAG = 0x4
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
+ BPF_TAG_SIZE = 0x8
BPF_TAX = 0x0
+ BPF_TO_BE = 0x8
+ BPF_TO_LE = 0x0
BPF_TXA = 0x80
BPF_W = 0x0
BPF_X = 0x8
+ BPF_XADD = 0xc0
BPF_XOR = 0xa0
BRKINT = 0x2
BS0 = 0x0
@@ -263,6 +334,45 @@ const (
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
+ CAP_AUDIT_CONTROL = 0x1e
+ CAP_AUDIT_READ = 0x25
+ CAP_AUDIT_WRITE = 0x1d
+ CAP_BLOCK_SUSPEND = 0x24
+ CAP_CHOWN = 0x0
+ CAP_DAC_OVERRIDE = 0x1
+ CAP_DAC_READ_SEARCH = 0x2
+ CAP_FOWNER = 0x3
+ CAP_FSETID = 0x4
+ CAP_IPC_LOCK = 0xe
+ CAP_IPC_OWNER = 0xf
+ CAP_KILL = 0x5
+ CAP_LAST_CAP = 0x25
+ CAP_LEASE = 0x1c
+ CAP_LINUX_IMMUTABLE = 0x9
+ CAP_MAC_ADMIN = 0x21
+ CAP_MAC_OVERRIDE = 0x20
+ CAP_MKNOD = 0x1b
+ CAP_NET_ADMIN = 0xc
+ CAP_NET_BIND_SERVICE = 0xa
+ CAP_NET_BROADCAST = 0xb
+ CAP_NET_RAW = 0xd
+ CAP_SETFCAP = 0x1f
+ CAP_SETGID = 0x6
+ CAP_SETPCAP = 0x8
+ CAP_SETUID = 0x7
+ CAP_SYSLOG = 0x22
+ CAP_SYS_ADMIN = 0x15
+ CAP_SYS_BOOT = 0x16
+ CAP_SYS_CHROOT = 0x12
+ CAP_SYS_MODULE = 0x10
+ CAP_SYS_NICE = 0x17
+ CAP_SYS_PACCT = 0x14
+ CAP_SYS_PTRACE = 0x13
+ CAP_SYS_RAWIO = 0x11
+ CAP_SYS_RESOURCE = 0x18
+ CAP_SYS_TIME = 0x19
+ CAP_SYS_TTY_CONFIG = 0x1a
+ CAP_WAKE_ALARM = 0x23
CBAUD = 0x100f
CBAUDEX = 0x1000
CFLUSH = 0xf
@@ -319,6 +429,10 @@ const (
CRDLY = 0x600
CREAD = 0x80
CRTSCTS = 0x80000000
+ CRYPTO_MAX_NAME = 0x40
+ CRYPTO_MSG_MAX = 0x15
+ CRYPTO_NR_MSGTYPES = 0x6
+ CRYPTO_REPORT_MAXSIZE = 0x160
CS5 = 0x0
CS6 = 0x10
CS7 = 0x20
@@ -486,12 +600,67 @@ const (
FALLOC_FL_PUNCH_HOLE = 0x2
FALLOC_FL_UNSHARE_RANGE = 0x40
FALLOC_FL_ZERO_RANGE = 0x10
+ FANOTIFY_METADATA_VERSION = 0x3
+ FAN_ACCESS = 0x1
+ FAN_ACCESS_PERM = 0x20000
+ FAN_ALLOW = 0x1
+ FAN_ALL_CLASS_BITS = 0xc
+ FAN_ALL_EVENTS = 0x3b
+ FAN_ALL_INIT_FLAGS = 0x3f
+ FAN_ALL_MARK_FLAGS = 0xff
+ FAN_ALL_OUTGOING_EVENTS = 0x3403b
+ FAN_ALL_PERM_EVENTS = 0x30000
+ FAN_ATTRIB = 0x4
+ FAN_AUDIT = 0x10
+ FAN_CLASS_CONTENT = 0x4
+ FAN_CLASS_NOTIF = 0x0
+ FAN_CLASS_PRE_CONTENT = 0x8
+ FAN_CLOEXEC = 0x1
+ FAN_CLOSE = 0x18
+ FAN_CLOSE_NOWRITE = 0x10
+ FAN_CLOSE_WRITE = 0x8
+ FAN_CREATE = 0x100
+ FAN_DELETE = 0x200
+ FAN_DELETE_SELF = 0x400
+ FAN_DENY = 0x2
+ FAN_ENABLE_AUDIT = 0x40
+ FAN_EVENT_INFO_TYPE_FID = 0x1
+ FAN_EVENT_METADATA_LEN = 0x18
+ FAN_EVENT_ON_CHILD = 0x8000000
+ FAN_MARK_ADD = 0x1
+ FAN_MARK_DONT_FOLLOW = 0x4
+ FAN_MARK_FILESYSTEM = 0x100
+ FAN_MARK_FLUSH = 0x80
+ FAN_MARK_IGNORED_MASK = 0x20
+ FAN_MARK_IGNORED_SURV_MODIFY = 0x40
+ FAN_MARK_INODE = 0x0
+ FAN_MARK_MOUNT = 0x10
+ FAN_MARK_ONLYDIR = 0x8
+ FAN_MARK_REMOVE = 0x2
+ FAN_MODIFY = 0x2
+ FAN_MOVE = 0xc0
+ FAN_MOVED_FROM = 0x40
+ FAN_MOVED_TO = 0x80
+ FAN_MOVE_SELF = 0x800
+ FAN_NOFD = -0x1
+ FAN_NONBLOCK = 0x2
+ FAN_ONDIR = 0x40000000
+ FAN_OPEN = 0x20
+ FAN_OPEN_EXEC = 0x1000
+ FAN_OPEN_EXEC_PERM = 0x40000
+ FAN_OPEN_PERM = 0x10000
+ FAN_Q_OVERFLOW = 0x4000
+ FAN_REPORT_FID = 0x200
+ FAN_REPORT_TID = 0x100
+ FAN_UNLIMITED_MARKS = 0x20
+ FAN_UNLIMITED_QUEUE = 0x10
FD_CLOEXEC = 0x1
FD_SETSIZE = 0x400
FF0 = 0x0
FF1 = 0x8000
FFDLY = 0x8000
FLUSHO = 0x1000
+ FS_ENCRYPTION_MODE_ADIANTUM = 0x9
FS_ENCRYPTION_MODE_AES_128_CBC = 0x5
FS_ENCRYPTION_MODE_AES_128_CTS = 0x6
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
@@ -513,7 +682,7 @@ const (
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
- FS_POLICY_FLAGS_VALID = 0x3
+ FS_POLICY_FLAGS_VALID = 0x7
FUTEXFS_SUPER_MAGIC = 0xbad1dea
F_ADD_SEALS = 0x409
F_DUPFD = 0x0
@@ -706,6 +875,7 @@ const (
IN_ISDIR = 0x40000000
IN_LOOPBACKNET = 0x7f
IN_MASK_ADD = 0x20000000
+ IN_MASK_CREATE = 0x10000000
IN_MODIFY = 0x2
IN_MOVE = 0xc0
IN_MOVED_FROM = 0x40
@@ -777,6 +947,7 @@ const (
IPV6_MINHOPCOUNT = 0x49
IPV6_MTU = 0x18
IPV6_MTU_DISCOVER = 0x17
+ IPV6_MULTICAST_ALL = 0x1d
IPV6_MULTICAST_HOPS = 0x12
IPV6_MULTICAST_IF = 0x11
IPV6_MULTICAST_LOOP = 0x13
@@ -1002,6 +1173,15 @@ const (
MAP_STACK = 0x20000
MAP_SYNC = 0x80000
MAP_TYPE = 0xf
+ MCAST_BLOCK_SOURCE = 0x2b
+ MCAST_EXCLUDE = 0x0
+ MCAST_INCLUDE = 0x1
+ MCAST_JOIN_GROUP = 0x2a
+ MCAST_JOIN_SOURCE_GROUP = 0x2e
+ MCAST_LEAVE_GROUP = 0x2d
+ MCAST_LEAVE_SOURCE_GROUP = 0x2f
+ MCAST_MSFILTER = 0x30
+ MCAST_UNBLOCK_SOURCE = 0x2c
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
@@ -1130,7 +1310,7 @@ const (
NETLINK_UNUSED = 0x1
NETLINK_USERSOCK = 0x2
NETLINK_XFRM = 0x6
- NETNSA_MAX = 0x3
+ NETNSA_MAX = 0x5
NETNSA_NSID_NOT_ASSIGNED = -0x1
NFNETLINK_V0 = 0x0
NFNLGRP_ACCT_QUOTA = 0x8
@@ -1394,6 +1574,12 @@ const (
PR_MCE_KILL_SET = 0x1
PR_MPX_DISABLE_MANAGEMENT = 0x2c
PR_MPX_ENABLE_MANAGEMENT = 0x2b
+ PR_PAC_APDAKEY = 0x4
+ PR_PAC_APDBKEY = 0x8
+ PR_PAC_APGAKEY = 0x10
+ PR_PAC_APIAKEY = 0x1
+ PR_PAC_APIBKEY = 0x2
+ PR_PAC_RESET_KEYS = 0x36
PR_SET_CHILD_SUBREAPER = 0x24
PR_SET_DUMPABLE = 0x4
PR_SET_ENDIAN = 0x14
@@ -1431,6 +1617,7 @@ const (
PR_SET_TSC = 0x1a
PR_SET_UNALIGN = 0x6
PR_SPEC_DISABLE = 0x4
+ PR_SPEC_DISABLE_NOEXEC = 0x10
PR_SPEC_ENABLE = 0x2
PR_SPEC_FORCE_DISABLE = 0x8
PR_SPEC_INDIRECT_BRANCH = 0x1
@@ -1525,6 +1712,13 @@ const (
RLIMIT_SIGPENDING = 0xb
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0xffffffffffffffff
+ RNDADDENTROPY = 0x40085203
+ RNDADDTOENTCNT = 0x40045201
+ RNDCLEARPOOL = 0x5206
+ RNDGETENTCNT = 0x80045200
+ RNDGETPOOL = 0x80085202
+ RNDRESEEDCRNG = 0x5207
+ RNDZAPENTCNT = 0x5204
RTAX_ADVMSS = 0x8
RTAX_CC_ALGO = 0x10
RTAX_CWND = 0x7
@@ -1732,6 +1926,8 @@ const (
SECCOMP_MODE_STRICT = 0x1
SECURITYFS_MAGIC = 0x73636673
SELINUX_MAGIC = 0xf97cff8c
+ SFD_CLOEXEC = 0x80000
+ SFD_NONBLOCK = 0x800
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -1882,6 +2078,7 @@ const (
SO_ATTACH_REUSEPORT_CBPF = 0x33
SO_ATTACH_REUSEPORT_EBPF = 0x34
SO_BINDTODEVICE = 0x19
+ SO_BINDTOIFINDEX = 0x3e
SO_BPF_EXTENSIONS = 0x30
SO_BROADCAST = 0x6
SO_BSDCOMPAT = 0xe
@@ -1893,6 +2090,17 @@ const (
SO_DETACH_FILTER = 0x1b
SO_DOMAIN = 0x27
SO_DONTROUTE = 0x5
+ SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1
+ SO_EE_CODE_TXTIME_MISSED = 0x2
+ SO_EE_CODE_ZEROCOPY_COPIED = 0x1
+ SO_EE_ORIGIN_ICMP = 0x2
+ SO_EE_ORIGIN_ICMP6 = 0x3
+ SO_EE_ORIGIN_LOCAL = 0x1
+ SO_EE_ORIGIN_NONE = 0x0
+ SO_EE_ORIGIN_TIMESTAMPING = 0x4
+ SO_EE_ORIGIN_TXSTATUS = 0x4
+ SO_EE_ORIGIN_TXTIME = 0x6
+ SO_EE_ORIGIN_ZEROCOPY = 0x5
SO_ERROR = 0x4
SO_GET_FILTER = 0x1a
SO_INCOMING_CPU = 0x31
@@ -1919,6 +2127,8 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVTIMEO = 0x14
+ SO_RCVTIMEO_NEW = 0x42
+ SO_RCVTIMEO_OLD = 0x14
SO_REUSEADDR = 0x2
SO_REUSEPORT = 0xf
SO_RXQ_OVFL = 0x28
@@ -1930,9 +2140,17 @@ const (
SO_SNDBUFFORCE = 0x20
SO_SNDLOWAT = 0x13
SO_SNDTIMEO = 0x15
+ SO_SNDTIMEO_NEW = 0x43
+ SO_SNDTIMEO_OLD = 0x15
SO_TIMESTAMP = 0x1d
SO_TIMESTAMPING = 0x25
+ SO_TIMESTAMPING_NEW = 0x41
+ SO_TIMESTAMPING_OLD = 0x25
SO_TIMESTAMPNS = 0x23
+ SO_TIMESTAMPNS_NEW = 0x40
+ SO_TIMESTAMPNS_OLD = 0x23
+ SO_TIMESTAMP_NEW = 0x3f
+ SO_TIMESTAMP_OLD = 0x1d
SO_TXTIME = 0x3d
SO_TYPE = 0x3
SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2
@@ -2025,7 +2243,10 @@ const (
TCOFLUSH = 0x1
TCOOFF = 0x0
TCOON = 0x1
+ TCP_BPF_IW = 0x3e9
+ TCP_BPF_SNDCWND_CLAMP = 0x3ea
TCP_CC_INFO = 0x1a
+ TCP_CM_INQ = 0x24
TCP_CONGESTION = 0xd
TCP_COOKIE_IN_ALWAYS = 0x1
TCP_COOKIE_MAX = 0x10
@@ -2040,6 +2261,7 @@ const (
TCP_FASTOPEN_KEY = 0x21
TCP_FASTOPEN_NO_COOKIE = 0x22
TCP_INFO = 0xb
+ TCP_INQ = 0x24
TCP_KEEPCNT = 0x6
TCP_KEEPIDLE = 0x4
TCP_KEEPINTVL = 0x5
@@ -2059,6 +2281,9 @@ const (
TCP_QUEUE_SEQ = 0x15
TCP_QUICKACK = 0xc
TCP_REPAIR = 0x13
+ TCP_REPAIR_OFF = 0x0
+ TCP_REPAIR_OFF_NO_WP = -0x1
+ TCP_REPAIR_ON = 0x1
TCP_REPAIR_OPTIONS = 0x16
TCP_REPAIR_QUEUE = 0x14
TCP_REPAIR_WINDOW = 0x1d
@@ -2073,6 +2298,7 @@ const (
TCP_ULP = 0x1f
TCP_USER_TIMEOUT = 0x12
TCP_WINDOW_CLAMP = 0xa
+ TCP_ZEROCOPY_RECEIVE = 0x23
TCSAFLUSH = 0x2
TCSBRK = 0x5409
TCSBRKP = 0x5425
@@ -2089,6 +2315,7 @@ const (
TCSETXF = 0x5434
TCSETXW = 0x5435
TCXONC = 0x540a
+ TIMER_ABSTIME = 0x1
TIOCCBRK = 0x5428
TIOCCONS = 0x541d
TIOCEXCL = 0x540c
@@ -2191,6 +2418,7 @@ const (
TUNGETVNETBE = 0x800454df
TUNGETVNETHDRSZ = 0x800454d7
TUNGETVNETLE = 0x800454dd
+ TUNSETCARRIER = 0x400454e2
TUNSETDEBUG = 0x400454c9
TUNSETFILTEREBPF = 0x800454e1
TUNSETGROUP = 0x400454ce
@@ -2218,8 +2446,10 @@ const (
UBI_IOCMKVOL = 0x40986f00
UBI_IOCRMVOL = 0x40046f01
UBI_IOCRNVOL = 0x51106f03
+ UBI_IOCRPEB = 0x40046f04
UBI_IOCRSVOL = 0x400c6f02
UBI_IOCSETVOLPROP = 0x40104f06
+ UBI_IOCSPEB = 0x40046f05
UBI_IOCVOLCRBLK = 0x40804f07
UBI_IOCVOLRMBLK = 0x4f08
UBI_IOCVOLUP = 0x40084f00
@@ -2367,6 +2597,7 @@ const (
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
+ XDP_PACKET_HEADROOM = 0x100
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index 114966110..47675125a 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -41,7 +41,7 @@ const (
AF_KEY = 0xf
AF_LLC = 0x1a
AF_LOCAL = 0x1
- AF_MAX = 0x2c
+ AF_MAX = 0x2d
AF_MPLS = 0x1c
AF_NETBEUI = 0xd
AF_NETLINK = 0x10
@@ -174,6 +174,7 @@ const (
B9600 = 0xd
BALLOON_KVM_MAGIC = 0x13661366
BDEVFS_MAGIC = 0x62646576
+ BINDERFS_SUPER_MAGIC = 0x6c6f6f70
BINFMTFS_MAGIC = 0x42494e4d
BLKBSZGET = 0x80081270
BLKBSZSET = 0x40081271
@@ -196,10 +197,59 @@ const (
BPF_ABS = 0x20
BPF_ADD = 0x0
BPF_ALU = 0x4
+ BPF_ALU64 = 0x7
BPF_AND = 0x50
+ BPF_ANY = 0x0
+ BPF_ARSH = 0xc0
BPF_B = 0x10
+ BPF_BUILD_ID_SIZE = 0x14
+ BPF_CALL = 0x80
+ BPF_DEVCG_ACC_MKNOD = 0x1
+ BPF_DEVCG_ACC_READ = 0x2
+ BPF_DEVCG_ACC_WRITE = 0x4
+ BPF_DEVCG_DEV_BLOCK = 0x1
+ BPF_DEVCG_DEV_CHAR = 0x2
BPF_DIV = 0x30
+ BPF_DW = 0x18
+ BPF_END = 0xd0
+ BPF_EXIST = 0x2
+ BPF_EXIT = 0x90
+ BPF_FROM_BE = 0x8
+ BPF_FROM_LE = 0x0
BPF_FS_MAGIC = 0xcafe4a11
+ BPF_F_ALLOW_MULTI = 0x2
+ BPF_F_ALLOW_OVERRIDE = 0x1
+ BPF_F_ANY_ALIGNMENT = 0x2
+ BPF_F_CTXLEN_MASK = 0xfffff00000000
+ BPF_F_CURRENT_CPU = 0xffffffff
+ BPF_F_CURRENT_NETNS = -0x1
+ BPF_F_DONT_FRAGMENT = 0x4
+ BPF_F_FAST_STACK_CMP = 0x200
+ BPF_F_HDR_FIELD_MASK = 0xf
+ BPF_F_INDEX_MASK = 0xffffffff
+ BPF_F_INGRESS = 0x1
+ BPF_F_INVALIDATE_HASH = 0x2
+ BPF_F_LOCK = 0x4
+ BPF_F_MARK_ENFORCE = 0x40
+ BPF_F_MARK_MANGLED_0 = 0x20
+ BPF_F_NO_COMMON_LRU = 0x2
+ BPF_F_NO_PREALLOC = 0x1
+ BPF_F_NUMA_NODE = 0x4
+ BPF_F_PSEUDO_HDR = 0x10
+ BPF_F_QUERY_EFFECTIVE = 0x1
+ BPF_F_RDONLY = 0x8
+ BPF_F_RECOMPUTE_CSUM = 0x1
+ BPF_F_REUSE_STACKID = 0x400
+ BPF_F_SEQ_NUMBER = 0x8
+ BPF_F_SKIP_FIELD_MASK = 0xff
+ BPF_F_STACK_BUILD_ID = 0x20
+ BPF_F_STRICT_ALIGNMENT = 0x1
+ BPF_F_TUNINFO_IPV6 = 0x1
+ BPF_F_USER_BUILD_ID = 0x800
+ BPF_F_USER_STACK = 0x100
+ BPF_F_WRONLY = 0x10
+ BPF_F_ZERO_CSUM_TX = 0x2
+ BPF_F_ZERO_SEED = 0x40
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@@ -207,8 +257,16 @@ const (
BPF_JEQ = 0x10
BPF_JGE = 0x30
BPF_JGT = 0x20
+ BPF_JLE = 0xb0
+ BPF_JLT = 0xa0
BPF_JMP = 0x5
+ BPF_JMP32 = 0x6
+ BPF_JNE = 0x50
BPF_JSET = 0x40
+ BPF_JSGE = 0x70
+ BPF_JSGT = 0x60
+ BPF_JSLE = 0xd0
+ BPF_JSLT = 0xc0
BPF_K = 0x0
BPF_LD = 0x0
BPF_LDX = 0x1
@@ -222,20 +280,33 @@ const (
BPF_MINOR_VERSION = 0x1
BPF_MISC = 0x7
BPF_MOD = 0x90
+ BPF_MOV = 0xb0
BPF_MSH = 0xa0
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_NET_OFF = -0x100000
+ BPF_NOEXIST = 0x1
+ BPF_OBJ_NAME_LEN = 0x10
BPF_OR = 0x40
+ BPF_PSEUDO_CALL = 0x1
+ BPF_PSEUDO_MAP_FD = 0x1
BPF_RET = 0x6
BPF_RSH = 0x70
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7
+ BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2
+ BPF_SOCK_OPS_RTO_CB_FLAG = 0x1
+ BPF_SOCK_OPS_STATE_CB_FLAG = 0x4
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
+ BPF_TAG_SIZE = 0x8
BPF_TAX = 0x0
+ BPF_TO_BE = 0x8
+ BPF_TO_LE = 0x0
BPF_TXA = 0x80
BPF_W = 0x0
BPF_X = 0x8
+ BPF_XADD = 0xc0
BPF_XOR = 0xa0
BRKINT = 0x2
BS0 = 0x0
@@ -263,6 +334,45 @@ const (
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
+ CAP_AUDIT_CONTROL = 0x1e
+ CAP_AUDIT_READ = 0x25
+ CAP_AUDIT_WRITE = 0x1d
+ CAP_BLOCK_SUSPEND = 0x24
+ CAP_CHOWN = 0x0
+ CAP_DAC_OVERRIDE = 0x1
+ CAP_DAC_READ_SEARCH = 0x2
+ CAP_FOWNER = 0x3
+ CAP_FSETID = 0x4
+ CAP_IPC_LOCK = 0xe
+ CAP_IPC_OWNER = 0xf
+ CAP_KILL = 0x5
+ CAP_LAST_CAP = 0x25
+ CAP_LEASE = 0x1c
+ CAP_LINUX_IMMUTABLE = 0x9
+ CAP_MAC_ADMIN = 0x21
+ CAP_MAC_OVERRIDE = 0x20
+ CAP_MKNOD = 0x1b
+ CAP_NET_ADMIN = 0xc
+ CAP_NET_BIND_SERVICE = 0xa
+ CAP_NET_BROADCAST = 0xb
+ CAP_NET_RAW = 0xd
+ CAP_SETFCAP = 0x1f
+ CAP_SETGID = 0x6
+ CAP_SETPCAP = 0x8
+ CAP_SETUID = 0x7
+ CAP_SYSLOG = 0x22
+ CAP_SYS_ADMIN = 0x15
+ CAP_SYS_BOOT = 0x16
+ CAP_SYS_CHROOT = 0x12
+ CAP_SYS_MODULE = 0x10
+ CAP_SYS_NICE = 0x17
+ CAP_SYS_PACCT = 0x14
+ CAP_SYS_PTRACE = 0x13
+ CAP_SYS_RAWIO = 0x11
+ CAP_SYS_RESOURCE = 0x18
+ CAP_SYS_TIME = 0x19
+ CAP_SYS_TTY_CONFIG = 0x1a
+ CAP_WAKE_ALARM = 0x23
CBAUD = 0x100f
CBAUDEX = 0x1000
CFLUSH = 0xf
@@ -319,6 +429,10 @@ const (
CRDLY = 0x600
CREAD = 0x80
CRTSCTS = 0x80000000
+ CRYPTO_MAX_NAME = 0x40
+ CRYPTO_MSG_MAX = 0x15
+ CRYPTO_NR_MSGTYPES = 0x6
+ CRYPTO_REPORT_MAXSIZE = 0x160
CS5 = 0x0
CS6 = 0x10
CS7 = 0x20
@@ -486,12 +600,67 @@ const (
FALLOC_FL_PUNCH_HOLE = 0x2
FALLOC_FL_UNSHARE_RANGE = 0x40
FALLOC_FL_ZERO_RANGE = 0x10
+ FANOTIFY_METADATA_VERSION = 0x3
+ FAN_ACCESS = 0x1
+ FAN_ACCESS_PERM = 0x20000
+ FAN_ALLOW = 0x1
+ FAN_ALL_CLASS_BITS = 0xc
+ FAN_ALL_EVENTS = 0x3b
+ FAN_ALL_INIT_FLAGS = 0x3f
+ FAN_ALL_MARK_FLAGS = 0xff
+ FAN_ALL_OUTGOING_EVENTS = 0x3403b
+ FAN_ALL_PERM_EVENTS = 0x30000
+ FAN_ATTRIB = 0x4
+ FAN_AUDIT = 0x10
+ FAN_CLASS_CONTENT = 0x4
+ FAN_CLASS_NOTIF = 0x0
+ FAN_CLASS_PRE_CONTENT = 0x8
+ FAN_CLOEXEC = 0x1
+ FAN_CLOSE = 0x18
+ FAN_CLOSE_NOWRITE = 0x10
+ FAN_CLOSE_WRITE = 0x8
+ FAN_CREATE = 0x100
+ FAN_DELETE = 0x200
+ FAN_DELETE_SELF = 0x400
+ FAN_DENY = 0x2
+ FAN_ENABLE_AUDIT = 0x40
+ FAN_EVENT_INFO_TYPE_FID = 0x1
+ FAN_EVENT_METADATA_LEN = 0x18
+ FAN_EVENT_ON_CHILD = 0x8000000
+ FAN_MARK_ADD = 0x1
+ FAN_MARK_DONT_FOLLOW = 0x4
+ FAN_MARK_FILESYSTEM = 0x100
+ FAN_MARK_FLUSH = 0x80
+ FAN_MARK_IGNORED_MASK = 0x20
+ FAN_MARK_IGNORED_SURV_MODIFY = 0x40
+ FAN_MARK_INODE = 0x0
+ FAN_MARK_MOUNT = 0x10
+ FAN_MARK_ONLYDIR = 0x8
+ FAN_MARK_REMOVE = 0x2
+ FAN_MODIFY = 0x2
+ FAN_MOVE = 0xc0
+ FAN_MOVED_FROM = 0x40
+ FAN_MOVED_TO = 0x80
+ FAN_MOVE_SELF = 0x800
+ FAN_NOFD = -0x1
+ FAN_NONBLOCK = 0x2
+ FAN_ONDIR = 0x40000000
+ FAN_OPEN = 0x20
+ FAN_OPEN_EXEC = 0x1000
+ FAN_OPEN_EXEC_PERM = 0x40000
+ FAN_OPEN_PERM = 0x10000
+ FAN_Q_OVERFLOW = 0x4000
+ FAN_REPORT_FID = 0x200
+ FAN_REPORT_TID = 0x100
+ FAN_UNLIMITED_MARKS = 0x20
+ FAN_UNLIMITED_QUEUE = 0x10
FD_CLOEXEC = 0x1
FD_SETSIZE = 0x400
FF0 = 0x0
FF1 = 0x8000
FFDLY = 0x8000
FLUSHO = 0x1000
+ FS_ENCRYPTION_MODE_ADIANTUM = 0x9
FS_ENCRYPTION_MODE_AES_128_CBC = 0x5
FS_ENCRYPTION_MODE_AES_128_CTS = 0x6
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
@@ -513,7 +682,7 @@ const (
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
- FS_POLICY_FLAGS_VALID = 0x3
+ FS_POLICY_FLAGS_VALID = 0x7
FUTEXFS_SUPER_MAGIC = 0xbad1dea
F_ADD_SEALS = 0x409
F_DUPFD = 0x0
@@ -706,6 +875,7 @@ const (
IN_ISDIR = 0x40000000
IN_LOOPBACKNET = 0x7f
IN_MASK_ADD = 0x20000000
+ IN_MASK_CREATE = 0x10000000
IN_MODIFY = 0x2
IN_MOVE = 0xc0
IN_MOVED_FROM = 0x40
@@ -777,6 +947,7 @@ const (
IPV6_MINHOPCOUNT = 0x49
IPV6_MTU = 0x18
IPV6_MTU_DISCOVER = 0x17
+ IPV6_MULTICAST_ALL = 0x1d
IPV6_MULTICAST_HOPS = 0x12
IPV6_MULTICAST_IF = 0x11
IPV6_MULTICAST_LOOP = 0x13
@@ -1002,6 +1173,15 @@ const (
MAP_STACK = 0x20000
MAP_SYNC = 0x80000
MAP_TYPE = 0xf
+ MCAST_BLOCK_SOURCE = 0x2b
+ MCAST_EXCLUDE = 0x0
+ MCAST_INCLUDE = 0x1
+ MCAST_JOIN_GROUP = 0x2a
+ MCAST_JOIN_SOURCE_GROUP = 0x2e
+ MCAST_LEAVE_GROUP = 0x2d
+ MCAST_LEAVE_SOURCE_GROUP = 0x2f
+ MCAST_MSFILTER = 0x30
+ MCAST_UNBLOCK_SOURCE = 0x2c
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
@@ -1130,7 +1310,7 @@ const (
NETLINK_UNUSED = 0x1
NETLINK_USERSOCK = 0x2
NETLINK_XFRM = 0x6
- NETNSA_MAX = 0x3
+ NETNSA_MAX = 0x5
NETNSA_NSID_NOT_ASSIGNED = -0x1
NFNETLINK_V0 = 0x0
NFNLGRP_ACCT_QUOTA = 0x8
@@ -1394,6 +1574,12 @@ const (
PR_MCE_KILL_SET = 0x1
PR_MPX_DISABLE_MANAGEMENT = 0x2c
PR_MPX_ENABLE_MANAGEMENT = 0x2b
+ PR_PAC_APDAKEY = 0x4
+ PR_PAC_APDBKEY = 0x8
+ PR_PAC_APGAKEY = 0x10
+ PR_PAC_APIAKEY = 0x1
+ PR_PAC_APIBKEY = 0x2
+ PR_PAC_RESET_KEYS = 0x36
PR_SET_CHILD_SUBREAPER = 0x24
PR_SET_DUMPABLE = 0x4
PR_SET_ENDIAN = 0x14
@@ -1431,6 +1617,7 @@ const (
PR_SET_TSC = 0x1a
PR_SET_UNALIGN = 0x6
PR_SPEC_DISABLE = 0x4
+ PR_SPEC_DISABLE_NOEXEC = 0x10
PR_SPEC_ENABLE = 0x2
PR_SPEC_FORCE_DISABLE = 0x8
PR_SPEC_INDIRECT_BRANCH = 0x1
@@ -1598,6 +1785,13 @@ const (
RLIMIT_SIGPENDING = 0xb
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0xffffffffffffffff
+ RNDADDENTROPY = 0x40085203
+ RNDADDTOENTCNT = 0x40045201
+ RNDCLEARPOOL = 0x5206
+ RNDGETENTCNT = 0x80045200
+ RNDGETPOOL = 0x80085202
+ RNDRESEEDCRNG = 0x5207
+ RNDZAPENTCNT = 0x5204
RTAX_ADVMSS = 0x8
RTAX_CC_ALGO = 0x10
RTAX_CWND = 0x7
@@ -1805,6 +1999,8 @@ const (
SECCOMP_MODE_STRICT = 0x1
SECURITYFS_MAGIC = 0x73636673
SELINUX_MAGIC = 0xf97cff8c
+ SFD_CLOEXEC = 0x80000
+ SFD_NONBLOCK = 0x800
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -1955,6 +2151,7 @@ const (
SO_ATTACH_REUSEPORT_CBPF = 0x33
SO_ATTACH_REUSEPORT_EBPF = 0x34
SO_BINDTODEVICE = 0x19
+ SO_BINDTOIFINDEX = 0x3e
SO_BPF_EXTENSIONS = 0x30
SO_BROADCAST = 0x6
SO_BSDCOMPAT = 0xe
@@ -1966,6 +2163,17 @@ const (
SO_DETACH_FILTER = 0x1b
SO_DOMAIN = 0x27
SO_DONTROUTE = 0x5
+ SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1
+ SO_EE_CODE_TXTIME_MISSED = 0x2
+ SO_EE_CODE_ZEROCOPY_COPIED = 0x1
+ SO_EE_ORIGIN_ICMP = 0x2
+ SO_EE_ORIGIN_ICMP6 = 0x3
+ SO_EE_ORIGIN_LOCAL = 0x1
+ SO_EE_ORIGIN_NONE = 0x0
+ SO_EE_ORIGIN_TIMESTAMPING = 0x4
+ SO_EE_ORIGIN_TXSTATUS = 0x4
+ SO_EE_ORIGIN_TXTIME = 0x6
+ SO_EE_ORIGIN_ZEROCOPY = 0x5
SO_ERROR = 0x4
SO_GET_FILTER = 0x1a
SO_INCOMING_CPU = 0x31
@@ -1992,6 +2200,8 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVTIMEO = 0x14
+ SO_RCVTIMEO_NEW = 0x42
+ SO_RCVTIMEO_OLD = 0x14
SO_REUSEADDR = 0x2
SO_REUSEPORT = 0xf
SO_RXQ_OVFL = 0x28
@@ -2003,9 +2213,17 @@ const (
SO_SNDBUFFORCE = 0x20
SO_SNDLOWAT = 0x13
SO_SNDTIMEO = 0x15
+ SO_SNDTIMEO_NEW = 0x43
+ SO_SNDTIMEO_OLD = 0x15
SO_TIMESTAMP = 0x1d
SO_TIMESTAMPING = 0x25
+ SO_TIMESTAMPING_NEW = 0x41
+ SO_TIMESTAMPING_OLD = 0x25
SO_TIMESTAMPNS = 0x23
+ SO_TIMESTAMPNS_NEW = 0x40
+ SO_TIMESTAMPNS_OLD = 0x23
+ SO_TIMESTAMP_NEW = 0x3f
+ SO_TIMESTAMP_OLD = 0x1d
SO_TXTIME = 0x3d
SO_TYPE = 0x3
SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2
@@ -2098,7 +2316,10 @@ const (
TCOFLUSH = 0x1
TCOOFF = 0x0
TCOON = 0x1
+ TCP_BPF_IW = 0x3e9
+ TCP_BPF_SNDCWND_CLAMP = 0x3ea
TCP_CC_INFO = 0x1a
+ TCP_CM_INQ = 0x24
TCP_CONGESTION = 0xd
TCP_COOKIE_IN_ALWAYS = 0x1
TCP_COOKIE_MAX = 0x10
@@ -2113,6 +2334,7 @@ const (
TCP_FASTOPEN_KEY = 0x21
TCP_FASTOPEN_NO_COOKIE = 0x22
TCP_INFO = 0xb
+ TCP_INQ = 0x24
TCP_KEEPCNT = 0x6
TCP_KEEPIDLE = 0x4
TCP_KEEPINTVL = 0x5
@@ -2132,6 +2354,9 @@ const (
TCP_QUEUE_SEQ = 0x15
TCP_QUICKACK = 0xc
TCP_REPAIR = 0x13
+ TCP_REPAIR_OFF = 0x0
+ TCP_REPAIR_OFF_NO_WP = -0x1
+ TCP_REPAIR_ON = 0x1
TCP_REPAIR_OPTIONS = 0x16
TCP_REPAIR_QUEUE = 0x14
TCP_REPAIR_WINDOW = 0x1d
@@ -2146,6 +2371,7 @@ const (
TCP_ULP = 0x1f
TCP_USER_TIMEOUT = 0x12
TCP_WINDOW_CLAMP = 0xa
+ TCP_ZEROCOPY_RECEIVE = 0x23
TCSAFLUSH = 0x2
TCSBRK = 0x5409
TCSBRKP = 0x5425
@@ -2162,6 +2388,7 @@ const (
TCSETXF = 0x5434
TCSETXW = 0x5435
TCXONC = 0x540a
+ TIMER_ABSTIME = 0x1
TIOCCBRK = 0x5428
TIOCCONS = 0x541d
TIOCEXCL = 0x540c
@@ -2264,6 +2491,7 @@ const (
TUNGETVNETBE = 0x800454df
TUNGETVNETHDRSZ = 0x800454d7
TUNGETVNETLE = 0x800454dd
+ TUNSETCARRIER = 0x400454e2
TUNSETDEBUG = 0x400454c9
TUNSETFILTEREBPF = 0x800454e1
TUNSETGROUP = 0x400454ce
@@ -2291,8 +2519,10 @@ const (
UBI_IOCMKVOL = 0x40986f00
UBI_IOCRMVOL = 0x40046f01
UBI_IOCRNVOL = 0x51106f03
+ UBI_IOCRPEB = 0x40046f04
UBI_IOCRSVOL = 0x400c6f02
UBI_IOCSETVOLPROP = 0x40104f06
+ UBI_IOCSPEB = 0x40046f05
UBI_IOCVOLCRBLK = 0x40804f07
UBI_IOCVOLRMBLK = 0x4f08
UBI_IOCVOLUP = 0x40084f00
@@ -2440,6 +2670,7 @@ const (
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
+ XDP_PACKET_HEADROOM = 0x100
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index 036f3247c..a46fc9b43 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -41,7 +41,7 @@ const (
AF_KEY = 0xf
AF_LLC = 0x1a
AF_LOCAL = 0x1
- AF_MAX = 0x2c
+ AF_MAX = 0x2d
AF_MPLS = 0x1c
AF_NETBEUI = 0xd
AF_NETLINK = 0x10
@@ -177,6 +177,7 @@ const (
B9600 = 0xd
BALLOON_KVM_MAGIC = 0x13661366
BDEVFS_MAGIC = 0x62646576
+ BINDERFS_SUPER_MAGIC = 0x6c6f6f70
BINFMTFS_MAGIC = 0x42494e4d
BLKBSZGET = 0x40081270
BLKBSZSET = 0x80081271
@@ -199,10 +200,59 @@ const (
BPF_ABS = 0x20
BPF_ADD = 0x0
BPF_ALU = 0x4
+ BPF_ALU64 = 0x7
BPF_AND = 0x50
+ BPF_ANY = 0x0
+ BPF_ARSH = 0xc0
BPF_B = 0x10
+ BPF_BUILD_ID_SIZE = 0x14
+ BPF_CALL = 0x80
+ BPF_DEVCG_ACC_MKNOD = 0x1
+ BPF_DEVCG_ACC_READ = 0x2
+ BPF_DEVCG_ACC_WRITE = 0x4
+ BPF_DEVCG_DEV_BLOCK = 0x1
+ BPF_DEVCG_DEV_CHAR = 0x2
BPF_DIV = 0x30
+ BPF_DW = 0x18
+ BPF_END = 0xd0
+ BPF_EXIST = 0x2
+ BPF_EXIT = 0x90
+ BPF_FROM_BE = 0x8
+ BPF_FROM_LE = 0x0
BPF_FS_MAGIC = 0xcafe4a11
+ BPF_F_ALLOW_MULTI = 0x2
+ BPF_F_ALLOW_OVERRIDE = 0x1
+ BPF_F_ANY_ALIGNMENT = 0x2
+ BPF_F_CTXLEN_MASK = 0xfffff00000000
+ BPF_F_CURRENT_CPU = 0xffffffff
+ BPF_F_CURRENT_NETNS = -0x1
+ BPF_F_DONT_FRAGMENT = 0x4
+ BPF_F_FAST_STACK_CMP = 0x200
+ BPF_F_HDR_FIELD_MASK = 0xf
+ BPF_F_INDEX_MASK = 0xffffffff
+ BPF_F_INGRESS = 0x1
+ BPF_F_INVALIDATE_HASH = 0x2
+ BPF_F_LOCK = 0x4
+ BPF_F_MARK_ENFORCE = 0x40
+ BPF_F_MARK_MANGLED_0 = 0x20
+ BPF_F_NO_COMMON_LRU = 0x2
+ BPF_F_NO_PREALLOC = 0x1
+ BPF_F_NUMA_NODE = 0x4
+ BPF_F_PSEUDO_HDR = 0x10
+ BPF_F_QUERY_EFFECTIVE = 0x1
+ BPF_F_RDONLY = 0x8
+ BPF_F_RECOMPUTE_CSUM = 0x1
+ BPF_F_REUSE_STACKID = 0x400
+ BPF_F_SEQ_NUMBER = 0x8
+ BPF_F_SKIP_FIELD_MASK = 0xff
+ BPF_F_STACK_BUILD_ID = 0x20
+ BPF_F_STRICT_ALIGNMENT = 0x1
+ BPF_F_TUNINFO_IPV6 = 0x1
+ BPF_F_USER_BUILD_ID = 0x800
+ BPF_F_USER_STACK = 0x100
+ BPF_F_WRONLY = 0x10
+ BPF_F_ZERO_CSUM_TX = 0x2
+ BPF_F_ZERO_SEED = 0x40
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@@ -210,8 +260,16 @@ const (
BPF_JEQ = 0x10
BPF_JGE = 0x30
BPF_JGT = 0x20
+ BPF_JLE = 0xb0
+ BPF_JLT = 0xa0
BPF_JMP = 0x5
+ BPF_JMP32 = 0x6
+ BPF_JNE = 0x50
BPF_JSET = 0x40
+ BPF_JSGE = 0x70
+ BPF_JSGT = 0x60
+ BPF_JSLE = 0xd0
+ BPF_JSLT = 0xc0
BPF_K = 0x0
BPF_LD = 0x0
BPF_LDX = 0x1
@@ -225,20 +283,33 @@ const (
BPF_MINOR_VERSION = 0x1
BPF_MISC = 0x7
BPF_MOD = 0x90
+ BPF_MOV = 0xb0
BPF_MSH = 0xa0
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_NET_OFF = -0x100000
+ BPF_NOEXIST = 0x1
+ BPF_OBJ_NAME_LEN = 0x10
BPF_OR = 0x40
+ BPF_PSEUDO_CALL = 0x1
+ BPF_PSEUDO_MAP_FD = 0x1
BPF_RET = 0x6
BPF_RSH = 0x70
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7
+ BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2
+ BPF_SOCK_OPS_RTO_CB_FLAG = 0x1
+ BPF_SOCK_OPS_STATE_CB_FLAG = 0x4
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
+ BPF_TAG_SIZE = 0x8
BPF_TAX = 0x0
+ BPF_TO_BE = 0x8
+ BPF_TO_LE = 0x0
BPF_TXA = 0x80
BPF_W = 0x0
BPF_X = 0x8
+ BPF_XADD = 0xc0
BPF_XOR = 0xa0
BRKINT = 0x2
BS0 = 0x0
@@ -266,6 +337,45 @@ const (
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
+ CAP_AUDIT_CONTROL = 0x1e
+ CAP_AUDIT_READ = 0x25
+ CAP_AUDIT_WRITE = 0x1d
+ CAP_BLOCK_SUSPEND = 0x24
+ CAP_CHOWN = 0x0
+ CAP_DAC_OVERRIDE = 0x1
+ CAP_DAC_READ_SEARCH = 0x2
+ CAP_FOWNER = 0x3
+ CAP_FSETID = 0x4
+ CAP_IPC_LOCK = 0xe
+ CAP_IPC_OWNER = 0xf
+ CAP_KILL = 0x5
+ CAP_LAST_CAP = 0x25
+ CAP_LEASE = 0x1c
+ CAP_LINUX_IMMUTABLE = 0x9
+ CAP_MAC_ADMIN = 0x21
+ CAP_MAC_OVERRIDE = 0x20
+ CAP_MKNOD = 0x1b
+ CAP_NET_ADMIN = 0xc
+ CAP_NET_BIND_SERVICE = 0xa
+ CAP_NET_BROADCAST = 0xb
+ CAP_NET_RAW = 0xd
+ CAP_SETFCAP = 0x1f
+ CAP_SETGID = 0x6
+ CAP_SETPCAP = 0x8
+ CAP_SETUID = 0x7
+ CAP_SYSLOG = 0x22
+ CAP_SYS_ADMIN = 0x15
+ CAP_SYS_BOOT = 0x16
+ CAP_SYS_CHROOT = 0x12
+ CAP_SYS_MODULE = 0x10
+ CAP_SYS_NICE = 0x17
+ CAP_SYS_PACCT = 0x14
+ CAP_SYS_PTRACE = 0x13
+ CAP_SYS_RAWIO = 0x11
+ CAP_SYS_RESOURCE = 0x18
+ CAP_SYS_TIME = 0x19
+ CAP_SYS_TTY_CONFIG = 0x1a
+ CAP_WAKE_ALARM = 0x23
CBAUD = 0x100f
CBAUDEX = 0x1000
CFLUSH = 0xf
@@ -322,6 +432,10 @@ const (
CRDLY = 0x600
CREAD = 0x80
CRTSCTS = 0x80000000
+ CRYPTO_MAX_NAME = 0x40
+ CRYPTO_MSG_MAX = 0x15
+ CRYPTO_NR_MSGTYPES = 0x6
+ CRYPTO_REPORT_MAXSIZE = 0x160
CS5 = 0x0
CS6 = 0x10
CS7 = 0x20
@@ -490,12 +604,67 @@ const (
FALLOC_FL_PUNCH_HOLE = 0x2
FALLOC_FL_UNSHARE_RANGE = 0x40
FALLOC_FL_ZERO_RANGE = 0x10
+ FANOTIFY_METADATA_VERSION = 0x3
+ FAN_ACCESS = 0x1
+ FAN_ACCESS_PERM = 0x20000
+ FAN_ALLOW = 0x1
+ FAN_ALL_CLASS_BITS = 0xc
+ FAN_ALL_EVENTS = 0x3b
+ FAN_ALL_INIT_FLAGS = 0x3f
+ FAN_ALL_MARK_FLAGS = 0xff
+ FAN_ALL_OUTGOING_EVENTS = 0x3403b
+ FAN_ALL_PERM_EVENTS = 0x30000
+ FAN_ATTRIB = 0x4
+ FAN_AUDIT = 0x10
+ FAN_CLASS_CONTENT = 0x4
+ FAN_CLASS_NOTIF = 0x0
+ FAN_CLASS_PRE_CONTENT = 0x8
+ FAN_CLOEXEC = 0x1
+ FAN_CLOSE = 0x18
+ FAN_CLOSE_NOWRITE = 0x10
+ FAN_CLOSE_WRITE = 0x8
+ FAN_CREATE = 0x100
+ FAN_DELETE = 0x200
+ FAN_DELETE_SELF = 0x400
+ FAN_DENY = 0x2
+ FAN_ENABLE_AUDIT = 0x40
+ FAN_EVENT_INFO_TYPE_FID = 0x1
+ FAN_EVENT_METADATA_LEN = 0x18
+ FAN_EVENT_ON_CHILD = 0x8000000
+ FAN_MARK_ADD = 0x1
+ FAN_MARK_DONT_FOLLOW = 0x4
+ FAN_MARK_FILESYSTEM = 0x100
+ FAN_MARK_FLUSH = 0x80
+ FAN_MARK_IGNORED_MASK = 0x20
+ FAN_MARK_IGNORED_SURV_MODIFY = 0x40
+ FAN_MARK_INODE = 0x0
+ FAN_MARK_MOUNT = 0x10
+ FAN_MARK_ONLYDIR = 0x8
+ FAN_MARK_REMOVE = 0x2
+ FAN_MODIFY = 0x2
+ FAN_MOVE = 0xc0
+ FAN_MOVED_FROM = 0x40
+ FAN_MOVED_TO = 0x80
+ FAN_MOVE_SELF = 0x800
+ FAN_NOFD = -0x1
+ FAN_NONBLOCK = 0x2
+ FAN_ONDIR = 0x40000000
+ FAN_OPEN = 0x20
+ FAN_OPEN_EXEC = 0x1000
+ FAN_OPEN_EXEC_PERM = 0x40000
+ FAN_OPEN_PERM = 0x10000
+ FAN_Q_OVERFLOW = 0x4000
+ FAN_REPORT_FID = 0x200
+ FAN_REPORT_TID = 0x100
+ FAN_UNLIMITED_MARKS = 0x20
+ FAN_UNLIMITED_QUEUE = 0x10
FD_CLOEXEC = 0x1
FD_SETSIZE = 0x400
FF0 = 0x0
FF1 = 0x8000
FFDLY = 0x8000
FLUSHO = 0x1000
+ FS_ENCRYPTION_MODE_ADIANTUM = 0x9
FS_ENCRYPTION_MODE_AES_128_CBC = 0x5
FS_ENCRYPTION_MODE_AES_128_CTS = 0x6
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
@@ -517,7 +686,7 @@ const (
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
- FS_POLICY_FLAGS_VALID = 0x3
+ FS_POLICY_FLAGS_VALID = 0x7
FUTEXFS_SUPER_MAGIC = 0xbad1dea
F_ADD_SEALS = 0x409
F_DUPFD = 0x0
@@ -710,6 +879,7 @@ const (
IN_ISDIR = 0x40000000
IN_LOOPBACKNET = 0x7f
IN_MASK_ADD = 0x20000000
+ IN_MASK_CREATE = 0x10000000
IN_MODIFY = 0x2
IN_MOVE = 0xc0
IN_MOVED_FROM = 0x40
@@ -781,6 +951,7 @@ const (
IPV6_MINHOPCOUNT = 0x49
IPV6_MTU = 0x18
IPV6_MTU_DISCOVER = 0x17
+ IPV6_MULTICAST_ALL = 0x1d
IPV6_MULTICAST_HOPS = 0x12
IPV6_MULTICAST_IF = 0x11
IPV6_MULTICAST_LOOP = 0x13
@@ -1006,6 +1177,15 @@ const (
MAP_SHARED_VALIDATE = 0x3
MAP_STACK = 0x20000
MAP_TYPE = 0xf
+ MCAST_BLOCK_SOURCE = 0x2b
+ MCAST_EXCLUDE = 0x0
+ MCAST_INCLUDE = 0x1
+ MCAST_JOIN_GROUP = 0x2a
+ MCAST_JOIN_SOURCE_GROUP = 0x2e
+ MCAST_LEAVE_GROUP = 0x2d
+ MCAST_LEAVE_SOURCE_GROUP = 0x2f
+ MCAST_MSFILTER = 0x30
+ MCAST_UNBLOCK_SOURCE = 0x2c
MCL_CURRENT = 0x2000
MCL_FUTURE = 0x4000
MCL_ONFAULT = 0x8000
@@ -1134,7 +1314,7 @@ const (
NETLINK_UNUSED = 0x1
NETLINK_USERSOCK = 0x2
NETLINK_XFRM = 0x6
- NETNSA_MAX = 0x3
+ NETNSA_MAX = 0x5
NETNSA_NSID_NOT_ASSIGNED = -0x1
NFNETLINK_V0 = 0x0
NFNLGRP_ACCT_QUOTA = 0x8
@@ -1398,6 +1578,12 @@ const (
PR_MCE_KILL_SET = 0x1
PR_MPX_DISABLE_MANAGEMENT = 0x2c
PR_MPX_ENABLE_MANAGEMENT = 0x2b
+ PR_PAC_APDAKEY = 0x4
+ PR_PAC_APDBKEY = 0x8
+ PR_PAC_APGAKEY = 0x10
+ PR_PAC_APIAKEY = 0x1
+ PR_PAC_APIBKEY = 0x2
+ PR_PAC_RESET_KEYS = 0x36
PR_SET_CHILD_SUBREAPER = 0x24
PR_SET_DUMPABLE = 0x4
PR_SET_ENDIAN = 0x14
@@ -1435,6 +1621,7 @@ const (
PR_SET_TSC = 0x1a
PR_SET_UNALIGN = 0x6
PR_SPEC_DISABLE = 0x4
+ PR_SPEC_DISABLE_NOEXEC = 0x10
PR_SPEC_ENABLE = 0x2
PR_SPEC_FORCE_DISABLE = 0x8
PR_SPEC_INDIRECT_BRANCH = 0x1
@@ -1590,6 +1777,13 @@ const (
RLIMIT_SIGPENDING = 0xb
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0xffffffffffffffff
+ RNDADDENTROPY = 0x80085203
+ RNDADDTOENTCNT = 0x80045201
+ RNDCLEARPOOL = 0x20005206
+ RNDGETENTCNT = 0x40045200
+ RNDGETPOOL = 0x40085202
+ RNDRESEEDCRNG = 0x20005207
+ RNDZAPENTCNT = 0x20005204
RTAX_ADVMSS = 0x8
RTAX_CC_ALGO = 0x10
RTAX_CWND = 0x7
@@ -1797,6 +1991,8 @@ const (
SECCOMP_MODE_STRICT = 0x1
SECURITYFS_MAGIC = 0x73636673
SELINUX_MAGIC = 0xf97cff8c
+ SFD_CLOEXEC = 0x400000
+ SFD_NONBLOCK = 0x4000
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -1947,6 +2143,7 @@ const (
SO_ATTACH_REUSEPORT_CBPF = 0x35
SO_ATTACH_REUSEPORT_EBPF = 0x36
SO_BINDTODEVICE = 0xd
+ SO_BINDTOIFINDEX = 0x41
SO_BPF_EXTENSIONS = 0x32
SO_BROADCAST = 0x20
SO_BSDCOMPAT = 0x400
@@ -1958,6 +2155,17 @@ const (
SO_DETACH_FILTER = 0x1b
SO_DOMAIN = 0x1029
SO_DONTROUTE = 0x10
+ SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1
+ SO_EE_CODE_TXTIME_MISSED = 0x2
+ SO_EE_CODE_ZEROCOPY_COPIED = 0x1
+ SO_EE_ORIGIN_ICMP = 0x2
+ SO_EE_ORIGIN_ICMP6 = 0x3
+ SO_EE_ORIGIN_LOCAL = 0x1
+ SO_EE_ORIGIN_NONE = 0x0
+ SO_EE_ORIGIN_TIMESTAMPING = 0x4
+ SO_EE_ORIGIN_TXSTATUS = 0x4
+ SO_EE_ORIGIN_TXTIME = 0x6
+ SO_EE_ORIGIN_ZEROCOPY = 0x5
SO_ERROR = 0x1007
SO_GET_FILTER = 0x1a
SO_INCOMING_CPU = 0x33
@@ -1984,6 +2192,8 @@ const (
SO_RCVBUFFORCE = 0x100b
SO_RCVLOWAT = 0x800
SO_RCVTIMEO = 0x2000
+ SO_RCVTIMEO_NEW = 0x44
+ SO_RCVTIMEO_OLD = 0x2000
SO_REUSEADDR = 0x4
SO_REUSEPORT = 0x200
SO_RXQ_OVFL = 0x24
@@ -1995,9 +2205,17 @@ const (
SO_SNDBUFFORCE = 0x100a
SO_SNDLOWAT = 0x1000
SO_SNDTIMEO = 0x4000
+ SO_SNDTIMEO_NEW = 0x45
+ SO_SNDTIMEO_OLD = 0x4000
SO_TIMESTAMP = 0x1d
SO_TIMESTAMPING = 0x23
+ SO_TIMESTAMPING_NEW = 0x43
+ SO_TIMESTAMPING_OLD = 0x23
SO_TIMESTAMPNS = 0x21
+ SO_TIMESTAMPNS_NEW = 0x42
+ SO_TIMESTAMPNS_OLD = 0x21
+ SO_TIMESTAMP_NEW = 0x46
+ SO_TIMESTAMP_OLD = 0x1d
SO_TXTIME = 0x3f
SO_TYPE = 0x1008
SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2
@@ -2089,7 +2307,10 @@ const (
TCOFLUSH = 0x1
TCOOFF = 0x0
TCOON = 0x1
+ TCP_BPF_IW = 0x3e9
+ TCP_BPF_SNDCWND_CLAMP = 0x3ea
TCP_CC_INFO = 0x1a
+ TCP_CM_INQ = 0x24
TCP_CONGESTION = 0xd
TCP_COOKIE_IN_ALWAYS = 0x1
TCP_COOKIE_MAX = 0x10
@@ -2104,6 +2325,7 @@ const (
TCP_FASTOPEN_KEY = 0x21
TCP_FASTOPEN_NO_COOKIE = 0x22
TCP_INFO = 0xb
+ TCP_INQ = 0x24
TCP_KEEPCNT = 0x6
TCP_KEEPIDLE = 0x4
TCP_KEEPINTVL = 0x5
@@ -2123,6 +2345,9 @@ const (
TCP_QUEUE_SEQ = 0x15
TCP_QUICKACK = 0xc
TCP_REPAIR = 0x13
+ TCP_REPAIR_OFF = 0x0
+ TCP_REPAIR_OFF_NO_WP = -0x1
+ TCP_REPAIR_ON = 0x1
TCP_REPAIR_OPTIONS = 0x16
TCP_REPAIR_QUEUE = 0x14
TCP_REPAIR_WINDOW = 0x1d
@@ -2137,6 +2362,7 @@ const (
TCP_ULP = 0x1f
TCP_USER_TIMEOUT = 0x12
TCP_WINDOW_CLAMP = 0xa
+ TCP_ZEROCOPY_RECEIVE = 0x23
TCSAFLUSH = 0x2
TCSBRK = 0x20005405
TCSBRKP = 0x5425
@@ -2150,6 +2376,7 @@ const (
TCSETSW = 0x8024540a
TCSETSW2 = 0x802c540e
TCXONC = 0x20005406
+ TIMER_ABSTIME = 0x1
TIOCCBRK = 0x2000747a
TIOCCONS = 0x20007424
TIOCEXCL = 0x2000740d
@@ -2253,6 +2480,7 @@ const (
TUNGETVNETBE = 0x400454df
TUNGETVNETHDRSZ = 0x400454d7
TUNGETVNETLE = 0x400454dd
+ TUNSETCARRIER = 0x800454e2
TUNSETDEBUG = 0x800454c9
TUNSETFILTEREBPF = 0x400454e1
TUNSETGROUP = 0x800454ce
@@ -2280,8 +2508,10 @@ const (
UBI_IOCMKVOL = 0x80986f00
UBI_IOCRMVOL = 0x80046f01
UBI_IOCRNVOL = 0x91106f03
+ UBI_IOCRPEB = 0x80046f04
UBI_IOCRSVOL = 0x800c6f02
UBI_IOCSETVOLPROP = 0x80104f06
+ UBI_IOCSPEB = 0x80046f05
UBI_IOCVOLCRBLK = 0x80804f07
UBI_IOCVOLRMBLK = 0x20004f08
UBI_IOCVOLUP = 0x80084f00
@@ -2429,6 +2659,7 @@ const (
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
+ XDP_PACKET_HEADROOM = 0x100
XDP_PGOFF_RX_RING = 0x0
XDP_PGOFF_TX_RING = 0x80000000
XDP_RX_RING = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go
new file mode 100644
index 000000000..fb6c60441
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go
@@ -0,0 +1,1762 @@
+// mkerrors.sh -m64
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build arm64,netbsd
+
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs -- -m64 _const.go
+
+package unix
+
+import "syscall"
+
+const (
+ AF_APPLETALK = 0x10
+ AF_ARP = 0x1c
+ AF_BLUETOOTH = 0x1f
+ AF_CCITT = 0xa
+ AF_CHAOS = 0x5
+ AF_CNT = 0x15
+ AF_COIP = 0x14
+ AF_DATAKIT = 0x9
+ AF_DECnet = 0xc
+ AF_DLI = 0xd
+ AF_E164 = 0x1a
+ AF_ECMA = 0x8
+ AF_HYLINK = 0xf
+ AF_IEEE80211 = 0x20
+ AF_IMPLINK = 0x3
+ AF_INET = 0x2
+ AF_INET6 = 0x18
+ AF_IPX = 0x17
+ AF_ISDN = 0x1a
+ AF_ISO = 0x7
+ AF_LAT = 0xe
+ AF_LINK = 0x12
+ AF_LOCAL = 0x1
+ AF_MAX = 0x23
+ AF_MPLS = 0x21
+ AF_NATM = 0x1b
+ AF_NS = 0x6
+ AF_OROUTE = 0x11
+ AF_OSI = 0x7
+ AF_PUP = 0x4
+ AF_ROUTE = 0x22
+ AF_SNA = 0xb
+ AF_UNIX = 0x1
+ AF_UNSPEC = 0x0
+ ARPHRD_ARCNET = 0x7
+ ARPHRD_ETHER = 0x1
+ ARPHRD_FRELAY = 0xf
+ ARPHRD_IEEE1394 = 0x18
+ ARPHRD_IEEE802 = 0x6
+ ARPHRD_STRIP = 0x17
+ B0 = 0x0
+ B110 = 0x6e
+ B115200 = 0x1c200
+ B1200 = 0x4b0
+ B134 = 0x86
+ B14400 = 0x3840
+ B150 = 0x96
+ B1800 = 0x708
+ B19200 = 0x4b00
+ B200 = 0xc8
+ B230400 = 0x38400
+ B2400 = 0x960
+ B28800 = 0x7080
+ B300 = 0x12c
+ B38400 = 0x9600
+ B460800 = 0x70800
+ B4800 = 0x12c0
+ B50 = 0x32
+ B57600 = 0xe100
+ B600 = 0x258
+ B7200 = 0x1c20
+ B75 = 0x4b
+ B76800 = 0x12c00
+ B921600 = 0xe1000
+ B9600 = 0x2580
+ BIOCFEEDBACK = 0x8004427d
+ BIOCFLUSH = 0x20004268
+ BIOCGBLEN = 0x40044266
+ BIOCGDLT = 0x4004426a
+ BIOCGDLTLIST = 0xc0104277
+ BIOCGETIF = 0x4090426b
+ BIOCGFEEDBACK = 0x4004427c
+ BIOCGHDRCMPLT = 0x40044274
+ BIOCGRTIMEOUT = 0x4010427b
+ BIOCGSEESENT = 0x40044278
+ BIOCGSTATS = 0x4080426f
+ BIOCGSTATSOLD = 0x4008426f
+ BIOCIMMEDIATE = 0x80044270
+ BIOCPROMISC = 0x20004269
+ BIOCSBLEN = 0xc0044266
+ BIOCSDLT = 0x80044276
+ BIOCSETF = 0x80104267
+ BIOCSETIF = 0x8090426c
+ BIOCSFEEDBACK = 0x8004427d
+ BIOCSHDRCMPLT = 0x80044275
+ BIOCSRTIMEOUT = 0x8010427a
+ BIOCSSEESENT = 0x80044279
+ BIOCSTCPF = 0x80104272
+ BIOCSUDPF = 0x80104273
+ BIOCVERSION = 0x40044271
+ BPF_A = 0x10
+ BPF_ABS = 0x20
+ BPF_ADD = 0x0
+ BPF_ALIGNMENT = 0x8
+ BPF_ALIGNMENT32 = 0x4
+ BPF_ALU = 0x4
+ BPF_AND = 0x50
+ BPF_B = 0x10
+ BPF_DFLTBUFSIZE = 0x100000
+ BPF_DIV = 0x30
+ BPF_H = 0x8
+ BPF_IMM = 0x0
+ BPF_IND = 0x40
+ BPF_JA = 0x0
+ BPF_JEQ = 0x10
+ BPF_JGE = 0x30
+ BPF_JGT = 0x20
+ BPF_JMP = 0x5
+ BPF_JSET = 0x40
+ BPF_K = 0x0
+ BPF_LD = 0x0
+ BPF_LDX = 0x1
+ BPF_LEN = 0x80
+ BPF_LSH = 0x60
+ BPF_MAJOR_VERSION = 0x1
+ BPF_MAXBUFSIZE = 0x1000000
+ BPF_MAXINSNS = 0x200
+ BPF_MEM = 0x60
+ BPF_MEMWORDS = 0x10
+ BPF_MINBUFSIZE = 0x20
+ BPF_MINOR_VERSION = 0x1
+ BPF_MISC = 0x7
+ BPF_MSH = 0xa0
+ BPF_MUL = 0x20
+ BPF_NEG = 0x80
+ BPF_OR = 0x40
+ BPF_RELEASE = 0x30bb6
+ BPF_RET = 0x6
+ BPF_RSH = 0x70
+ BPF_ST = 0x2
+ BPF_STX = 0x3
+ BPF_SUB = 0x10
+ BPF_TAX = 0x0
+ BPF_TXA = 0x80
+ BPF_W = 0x0
+ BPF_X = 0x8
+ BRKINT = 0x2
+ CFLUSH = 0xf
+ CLOCAL = 0x8000
+ CLONE_CSIGNAL = 0xff
+ CLONE_FILES = 0x400
+ CLONE_FS = 0x200
+ CLONE_PID = 0x1000
+ CLONE_PTRACE = 0x2000
+ CLONE_SIGHAND = 0x800
+ CLONE_VFORK = 0x4000
+ CLONE_VM = 0x100
+ CREAD = 0x800
+ CRTSCTS = 0x10000
+ CS5 = 0x0
+ CS6 = 0x100
+ CS7 = 0x200
+ CS8 = 0x300
+ CSIZE = 0x300
+ CSTART = 0x11
+ CSTATUS = 0x14
+ CSTOP = 0x13
+ CSTOPB = 0x400
+ CSUSP = 0x1a
+ CTL_HW = 0x6
+ CTL_KERN = 0x1
+ CTL_MAXNAME = 0xc
+ CTL_NET = 0x4
+ CTL_QUERY = -0x2
+ DIOCBSFLUSH = 0x20006478
+ DLT_A429 = 0xb8
+ DLT_A653_ICM = 0xb9
+ DLT_AIRONET_HEADER = 0x78
+ DLT_AOS = 0xde
+ DLT_APPLE_IP_OVER_IEEE1394 = 0x8a
+ DLT_ARCNET = 0x7
+ DLT_ARCNET_LINUX = 0x81
+ DLT_ATM_CLIP = 0x13
+ DLT_ATM_RFC1483 = 0xb
+ DLT_AURORA = 0x7e
+ DLT_AX25 = 0x3
+ DLT_AX25_KISS = 0xca
+ DLT_BACNET_MS_TP = 0xa5
+ DLT_BLUETOOTH_HCI_H4 = 0xbb
+ DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9
+ DLT_CAN20B = 0xbe
+ DLT_CAN_SOCKETCAN = 0xe3
+ DLT_CHAOS = 0x5
+ DLT_CISCO_IOS = 0x76
+ DLT_C_HDLC = 0x68
+ DLT_C_HDLC_WITH_DIR = 0xcd
+ DLT_DECT = 0xdd
+ DLT_DOCSIS = 0x8f
+ DLT_ECONET = 0x73
+ DLT_EN10MB = 0x1
+ DLT_EN3MB = 0x2
+ DLT_ENC = 0x6d
+ DLT_ERF = 0xc5
+ DLT_ERF_ETH = 0xaf
+ DLT_ERF_POS = 0xb0
+ DLT_FC_2 = 0xe0
+ DLT_FC_2_WITH_FRAME_DELIMS = 0xe1
+ DLT_FDDI = 0xa
+ DLT_FLEXRAY = 0xd2
+ DLT_FRELAY = 0x6b
+ DLT_FRELAY_WITH_DIR = 0xce
+ DLT_GCOM_SERIAL = 0xad
+ DLT_GCOM_T1E1 = 0xac
+ DLT_GPF_F = 0xab
+ DLT_GPF_T = 0xaa
+ DLT_GPRS_LLC = 0xa9
+ DLT_GSMTAP_ABIS = 0xda
+ DLT_GSMTAP_UM = 0xd9
+ DLT_HDLC = 0x10
+ DLT_HHDLC = 0x79
+ DLT_HIPPI = 0xf
+ DLT_IBM_SN = 0x92
+ DLT_IBM_SP = 0x91
+ DLT_IEEE802 = 0x6
+ DLT_IEEE802_11 = 0x69
+ DLT_IEEE802_11_RADIO = 0x7f
+ DLT_IEEE802_11_RADIO_AVS = 0xa3
+ DLT_IEEE802_15_4 = 0xc3
+ DLT_IEEE802_15_4_LINUX = 0xbf
+ DLT_IEEE802_15_4_NONASK_PHY = 0xd7
+ DLT_IEEE802_16_MAC_CPS = 0xbc
+ DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1
+ DLT_IPMB = 0xc7
+ DLT_IPMB_LINUX = 0xd1
+ DLT_IPNET = 0xe2
+ DLT_IPV4 = 0xe4
+ DLT_IPV6 = 0xe5
+ DLT_IP_OVER_FC = 0x7a
+ DLT_JUNIPER_ATM1 = 0x89
+ DLT_JUNIPER_ATM2 = 0x87
+ DLT_JUNIPER_CHDLC = 0xb5
+ DLT_JUNIPER_ES = 0x84
+ DLT_JUNIPER_ETHER = 0xb2
+ DLT_JUNIPER_FRELAY = 0xb4
+ DLT_JUNIPER_GGSN = 0x85
+ DLT_JUNIPER_ISM = 0xc2
+ DLT_JUNIPER_MFR = 0x86
+ DLT_JUNIPER_MLFR = 0x83
+ DLT_JUNIPER_MLPPP = 0x82
+ DLT_JUNIPER_MONITOR = 0xa4
+ DLT_JUNIPER_PIC_PEER = 0xae
+ DLT_JUNIPER_PPP = 0xb3
+ DLT_JUNIPER_PPPOE = 0xa7
+ DLT_JUNIPER_PPPOE_ATM = 0xa8
+ DLT_JUNIPER_SERVICES = 0x88
+ DLT_JUNIPER_ST = 0xc8
+ DLT_JUNIPER_VP = 0xb7
+ DLT_LAPB_WITH_DIR = 0xcf
+ DLT_LAPD = 0xcb
+ DLT_LIN = 0xd4
+ DLT_LINUX_EVDEV = 0xd8
+ DLT_LINUX_IRDA = 0x90
+ DLT_LINUX_LAPD = 0xb1
+ DLT_LINUX_SLL = 0x71
+ DLT_LOOP = 0x6c
+ DLT_LTALK = 0x72
+ DLT_MFR = 0xb6
+ DLT_MOST = 0xd3
+ DLT_MPLS = 0xdb
+ DLT_MTP2 = 0x8c
+ DLT_MTP2_WITH_PHDR = 0x8b
+ DLT_MTP3 = 0x8d
+ DLT_NULL = 0x0
+ DLT_PCI_EXP = 0x7d
+ DLT_PFLOG = 0x75
+ DLT_PFSYNC = 0x12
+ DLT_PPI = 0xc0
+ DLT_PPP = 0x9
+ DLT_PPP_BSDOS = 0xe
+ DLT_PPP_ETHER = 0x33
+ DLT_PPP_PPPD = 0xa6
+ DLT_PPP_SERIAL = 0x32
+ DLT_PPP_WITH_DIR = 0xcc
+ DLT_PRISM_HEADER = 0x77
+ DLT_PRONET = 0x4
+ DLT_RAIF1 = 0xc6
+ DLT_RAW = 0xc
+ DLT_RAWAF_MASK = 0x2240000
+ DLT_RIO = 0x7c
+ DLT_SCCP = 0x8e
+ DLT_SITA = 0xc4
+ DLT_SLIP = 0x8
+ DLT_SLIP_BSDOS = 0xd
+ DLT_SUNATM = 0x7b
+ DLT_SYMANTEC_FIREWALL = 0x63
+ DLT_TZSP = 0x80
+ DLT_USB = 0xba
+ DLT_USB_LINUX = 0xbd
+ DLT_USB_LINUX_MMAPPED = 0xdc
+ DLT_WIHART = 0xdf
+ DLT_X2E_SERIAL = 0xd5
+ DLT_X2E_XORAYA = 0xd6
+ DT_BLK = 0x6
+ DT_CHR = 0x2
+ DT_DIR = 0x4
+ DT_FIFO = 0x1
+ DT_LNK = 0xa
+ DT_REG = 0x8
+ DT_SOCK = 0xc
+ DT_UNKNOWN = 0x0
+ DT_WHT = 0xe
+ ECHO = 0x8
+ ECHOCTL = 0x40
+ ECHOE = 0x2
+ ECHOK = 0x4
+ ECHOKE = 0x1
+ ECHONL = 0x10
+ ECHOPRT = 0x20
+ EMUL_LINUX = 0x1
+ EMUL_LINUX32 = 0x5
+ EMUL_MAXID = 0x6
+ ETHERCAP_JUMBO_MTU = 0x4
+ ETHERCAP_VLAN_HWTAGGING = 0x2
+ ETHERCAP_VLAN_MTU = 0x1
+ ETHERMIN = 0x2e
+ ETHERMTU = 0x5dc
+ ETHERMTU_JUMBO = 0x2328
+ ETHERTYPE_8023 = 0x4
+ ETHERTYPE_AARP = 0x80f3
+ ETHERTYPE_ACCTON = 0x8390
+ ETHERTYPE_AEONIC = 0x8036
+ ETHERTYPE_ALPHA = 0x814a
+ ETHERTYPE_AMBER = 0x6008
+ ETHERTYPE_AMOEBA = 0x8145
+ ETHERTYPE_APOLLO = 0x80f7
+ ETHERTYPE_APOLLODOMAIN = 0x8019
+ ETHERTYPE_APPLETALK = 0x809b
+ ETHERTYPE_APPLITEK = 0x80c7
+ ETHERTYPE_ARGONAUT = 0x803a
+ ETHERTYPE_ARP = 0x806
+ ETHERTYPE_AT = 0x809b
+ ETHERTYPE_ATALK = 0x809b
+ ETHERTYPE_ATOMIC = 0x86df
+ ETHERTYPE_ATT = 0x8069
+ ETHERTYPE_ATTSTANFORD = 0x8008
+ ETHERTYPE_AUTOPHON = 0x806a
+ ETHERTYPE_AXIS = 0x8856
+ ETHERTYPE_BCLOOP = 0x9003
+ ETHERTYPE_BOFL = 0x8102
+ ETHERTYPE_CABLETRON = 0x7034
+ ETHERTYPE_CHAOS = 0x804
+ ETHERTYPE_COMDESIGN = 0x806c
+ ETHERTYPE_COMPUGRAPHIC = 0x806d
+ ETHERTYPE_COUNTERPOINT = 0x8062
+ ETHERTYPE_CRONUS = 0x8004
+ ETHERTYPE_CRONUSVLN = 0x8003
+ ETHERTYPE_DCA = 0x1234
+ ETHERTYPE_DDE = 0x807b
+ ETHERTYPE_DEBNI = 0xaaaa
+ ETHERTYPE_DECAM = 0x8048
+ ETHERTYPE_DECCUST = 0x6006
+ ETHERTYPE_DECDIAG = 0x6005
+ ETHERTYPE_DECDNS = 0x803c
+ ETHERTYPE_DECDTS = 0x803e
+ ETHERTYPE_DECEXPER = 0x6000
+ ETHERTYPE_DECLAST = 0x8041
+ ETHERTYPE_DECLTM = 0x803f
+ ETHERTYPE_DECMUMPS = 0x6009
+ ETHERTYPE_DECNETBIOS = 0x8040
+ ETHERTYPE_DELTACON = 0x86de
+ ETHERTYPE_DIDDLE = 0x4321
+ ETHERTYPE_DLOG1 = 0x660
+ ETHERTYPE_DLOG2 = 0x661
+ ETHERTYPE_DN = 0x6003
+ ETHERTYPE_DOGFIGHT = 0x1989
+ ETHERTYPE_DSMD = 0x8039
+ ETHERTYPE_ECMA = 0x803
+ ETHERTYPE_ENCRYPT = 0x803d
+ ETHERTYPE_ES = 0x805d
+ ETHERTYPE_EXCELAN = 0x8010
+ ETHERTYPE_EXPERDATA = 0x8049
+ ETHERTYPE_FLIP = 0x8146
+ ETHERTYPE_FLOWCONTROL = 0x8808
+ ETHERTYPE_FRARP = 0x808
+ ETHERTYPE_GENDYN = 0x8068
+ ETHERTYPE_HAYES = 0x8130
+ ETHERTYPE_HIPPI_FP = 0x8180
+ ETHERTYPE_HITACHI = 0x8820
+ ETHERTYPE_HP = 0x8005
+ ETHERTYPE_IEEEPUP = 0xa00
+ ETHERTYPE_IEEEPUPAT = 0xa01
+ ETHERTYPE_IMLBL = 0x4c42
+ ETHERTYPE_IMLBLDIAG = 0x424c
+ ETHERTYPE_IP = 0x800
+ ETHERTYPE_IPAS = 0x876c
+ ETHERTYPE_IPV6 = 0x86dd
+ ETHERTYPE_IPX = 0x8137
+ ETHERTYPE_IPXNEW = 0x8037
+ ETHERTYPE_KALPANA = 0x8582
+ ETHERTYPE_LANBRIDGE = 0x8038
+ ETHERTYPE_LANPROBE = 0x8888
+ ETHERTYPE_LAT = 0x6004
+ ETHERTYPE_LBACK = 0x9000
+ ETHERTYPE_LITTLE = 0x8060
+ ETHERTYPE_LOGICRAFT = 0x8148
+ ETHERTYPE_LOOPBACK = 0x9000
+ ETHERTYPE_MATRA = 0x807a
+ ETHERTYPE_MAX = 0xffff
+ ETHERTYPE_MERIT = 0x807c
+ ETHERTYPE_MICP = 0x873a
+ ETHERTYPE_MOPDL = 0x6001
+ ETHERTYPE_MOPRC = 0x6002
+ ETHERTYPE_MOTOROLA = 0x818d
+ ETHERTYPE_MPLS = 0x8847
+ ETHERTYPE_MPLS_MCAST = 0x8848
+ ETHERTYPE_MUMPS = 0x813f
+ ETHERTYPE_NBPCC = 0x3c04
+ ETHERTYPE_NBPCLAIM = 0x3c09
+ ETHERTYPE_NBPCLREQ = 0x3c05
+ ETHERTYPE_NBPCLRSP = 0x3c06
+ ETHERTYPE_NBPCREQ = 0x3c02
+ ETHERTYPE_NBPCRSP = 0x3c03
+ ETHERTYPE_NBPDG = 0x3c07
+ ETHERTYPE_NBPDGB = 0x3c08
+ ETHERTYPE_NBPDLTE = 0x3c0a
+ ETHERTYPE_NBPRAR = 0x3c0c
+ ETHERTYPE_NBPRAS = 0x3c0b
+ ETHERTYPE_NBPRST = 0x3c0d
+ ETHERTYPE_NBPSCD = 0x3c01
+ ETHERTYPE_NBPVCD = 0x3c00
+ ETHERTYPE_NBS = 0x802
+ ETHERTYPE_NCD = 0x8149
+ ETHERTYPE_NESTAR = 0x8006
+ ETHERTYPE_NETBEUI = 0x8191
+ ETHERTYPE_NOVELL = 0x8138
+ ETHERTYPE_NS = 0x600
+ ETHERTYPE_NSAT = 0x601
+ ETHERTYPE_NSCOMPAT = 0x807
+ ETHERTYPE_NTRAILER = 0x10
+ ETHERTYPE_OS9 = 0x7007
+ ETHERTYPE_OS9NET = 0x7009
+ ETHERTYPE_PACER = 0x80c6
+ ETHERTYPE_PAE = 0x888e
+ ETHERTYPE_PCS = 0x4242
+ ETHERTYPE_PLANNING = 0x8044
+ ETHERTYPE_PPP = 0x880b
+ ETHERTYPE_PPPOE = 0x8864
+ ETHERTYPE_PPPOEDISC = 0x8863
+ ETHERTYPE_PRIMENTS = 0x7031
+ ETHERTYPE_PUP = 0x200
+ ETHERTYPE_PUPAT = 0x200
+ ETHERTYPE_RACAL = 0x7030
+ ETHERTYPE_RATIONAL = 0x8150
+ ETHERTYPE_RAWFR = 0x6559
+ ETHERTYPE_RCL = 0x1995
+ ETHERTYPE_RDP = 0x8739
+ ETHERTYPE_RETIX = 0x80f2
+ ETHERTYPE_REVARP = 0x8035
+ ETHERTYPE_SCA = 0x6007
+ ETHERTYPE_SECTRA = 0x86db
+ ETHERTYPE_SECUREDATA = 0x876d
+ ETHERTYPE_SGITW = 0x817e
+ ETHERTYPE_SG_BOUNCE = 0x8016
+ ETHERTYPE_SG_DIAG = 0x8013
+ ETHERTYPE_SG_NETGAMES = 0x8014
+ ETHERTYPE_SG_RESV = 0x8015
+ ETHERTYPE_SIMNET = 0x5208
+ ETHERTYPE_SLOWPROTOCOLS = 0x8809
+ ETHERTYPE_SNA = 0x80d5
+ ETHERTYPE_SNMP = 0x814c
+ ETHERTYPE_SONIX = 0xfaf5
+ ETHERTYPE_SPIDER = 0x809f
+ ETHERTYPE_SPRITE = 0x500
+ ETHERTYPE_STP = 0x8181
+ ETHERTYPE_TALARIS = 0x812b
+ ETHERTYPE_TALARISMC = 0x852b
+ ETHERTYPE_TCPCOMP = 0x876b
+ ETHERTYPE_TCPSM = 0x9002
+ ETHERTYPE_TEC = 0x814f
+ ETHERTYPE_TIGAN = 0x802f
+ ETHERTYPE_TRAIL = 0x1000
+ ETHERTYPE_TRANSETHER = 0x6558
+ ETHERTYPE_TYMSHARE = 0x802e
+ ETHERTYPE_UBBST = 0x7005
+ ETHERTYPE_UBDEBUG = 0x900
+ ETHERTYPE_UBDIAGLOOP = 0x7002
+ ETHERTYPE_UBDL = 0x7000
+ ETHERTYPE_UBNIU = 0x7001
+ ETHERTYPE_UBNMC = 0x7003
+ ETHERTYPE_VALID = 0x1600
+ ETHERTYPE_VARIAN = 0x80dd
+ ETHERTYPE_VAXELN = 0x803b
+ ETHERTYPE_VEECO = 0x8067
+ ETHERTYPE_VEXP = 0x805b
+ ETHERTYPE_VGLAB = 0x8131
+ ETHERTYPE_VINES = 0xbad
+ ETHERTYPE_VINESECHO = 0xbaf
+ ETHERTYPE_VINESLOOP = 0xbae
+ ETHERTYPE_VITAL = 0xff00
+ ETHERTYPE_VLAN = 0x8100
+ ETHERTYPE_VLTLMAN = 0x8080
+ ETHERTYPE_VPROD = 0x805c
+ ETHERTYPE_VURESERVED = 0x8147
+ ETHERTYPE_WATERLOO = 0x8130
+ ETHERTYPE_WELLFLEET = 0x8103
+ ETHERTYPE_X25 = 0x805
+ ETHERTYPE_X75 = 0x801
+ ETHERTYPE_XNSSM = 0x9001
+ ETHERTYPE_XTP = 0x817d
+ ETHER_ADDR_LEN = 0x6
+ ETHER_CRC_LEN = 0x4
+ ETHER_CRC_POLY_BE = 0x4c11db6
+ ETHER_CRC_POLY_LE = 0xedb88320
+ ETHER_HDR_LEN = 0xe
+ ETHER_MAX_LEN = 0x5ee
+ ETHER_MAX_LEN_JUMBO = 0x233a
+ ETHER_MIN_LEN = 0x40
+ ETHER_PPPOE_ENCAP_LEN = 0x8
+ ETHER_TYPE_LEN = 0x2
+ ETHER_VLAN_ENCAP_LEN = 0x4
+ EVFILT_AIO = 0x2
+ EVFILT_PROC = 0x4
+ EVFILT_READ = 0x0
+ EVFILT_SIGNAL = 0x5
+ EVFILT_SYSCOUNT = 0x7
+ EVFILT_TIMER = 0x6
+ EVFILT_VNODE = 0x3
+ EVFILT_WRITE = 0x1
+ EV_ADD = 0x1
+ EV_CLEAR = 0x20
+ EV_DELETE = 0x2
+ EV_DISABLE = 0x8
+ EV_ENABLE = 0x4
+ EV_EOF = 0x8000
+ EV_ERROR = 0x4000
+ EV_FLAG1 = 0x2000
+ EV_ONESHOT = 0x10
+ EV_SYSFLAGS = 0xf000
+ EXTA = 0x4b00
+ EXTATTR_CMD_START = 0x1
+ EXTATTR_CMD_STOP = 0x2
+ EXTATTR_NAMESPACE_SYSTEM = 0x2
+ EXTATTR_NAMESPACE_USER = 0x1
+ EXTB = 0x9600
+ EXTPROC = 0x800
+ FD_CLOEXEC = 0x1
+ FD_SETSIZE = 0x100
+ FLUSHO = 0x800000
+ F_CLOSEM = 0xa
+ F_DUPFD = 0x0
+ F_DUPFD_CLOEXEC = 0xc
+ F_FSCTL = -0x80000000
+ F_FSDIRMASK = 0x70000000
+ F_FSIN = 0x10000000
+ F_FSINOUT = 0x30000000
+ F_FSOUT = 0x20000000
+ F_FSPRIV = 0x8000
+ F_FSVOID = 0x40000000
+ F_GETFD = 0x1
+ F_GETFL = 0x3
+ F_GETLK = 0x7
+ F_GETNOSIGPIPE = 0xd
+ F_GETOWN = 0x5
+ F_MAXFD = 0xb
+ F_OK = 0x0
+ F_PARAM_MASK = 0xfff
+ F_PARAM_MAX = 0xfff
+ F_RDLCK = 0x1
+ F_SETFD = 0x2
+ F_SETFL = 0x4
+ F_SETLK = 0x8
+ F_SETLKW = 0x9
+ F_SETNOSIGPIPE = 0xe
+ F_SETOWN = 0x6
+ F_UNLCK = 0x2
+ F_WRLCK = 0x3
+ HUPCL = 0x4000
+ HW_MACHINE = 0x1
+ ICANON = 0x100
+ ICMP6_FILTER = 0x12
+ ICRNL = 0x100
+ IEXTEN = 0x400
+ IFAN_ARRIVAL = 0x0
+ IFAN_DEPARTURE = 0x1
+ IFA_ROUTE = 0x1
+ IFF_ALLMULTI = 0x200
+ IFF_BROADCAST = 0x2
+ IFF_CANTCHANGE = 0x8f52
+ IFF_DEBUG = 0x4
+ IFF_LINK0 = 0x1000
+ IFF_LINK1 = 0x2000
+ IFF_LINK2 = 0x4000
+ IFF_LOOPBACK = 0x8
+ IFF_MULTICAST = 0x8000
+ IFF_NOARP = 0x80
+ IFF_NOTRAILERS = 0x20
+ IFF_OACTIVE = 0x400
+ IFF_POINTOPOINT = 0x10
+ IFF_PROMISC = 0x100
+ IFF_RUNNING = 0x40
+ IFF_SIMPLEX = 0x800
+ IFF_UP = 0x1
+ IFNAMSIZ = 0x10
+ IFT_1822 = 0x2
+ IFT_A12MPPSWITCH = 0x82
+ IFT_AAL2 = 0xbb
+ IFT_AAL5 = 0x31
+ IFT_ADSL = 0x5e
+ IFT_AFLANE8023 = 0x3b
+ IFT_AFLANE8025 = 0x3c
+ IFT_ARAP = 0x58
+ IFT_ARCNET = 0x23
+ IFT_ARCNETPLUS = 0x24
+ IFT_ASYNC = 0x54
+ IFT_ATM = 0x25
+ IFT_ATMDXI = 0x69
+ IFT_ATMFUNI = 0x6a
+ IFT_ATMIMA = 0x6b
+ IFT_ATMLOGICAL = 0x50
+ IFT_ATMRADIO = 0xbd
+ IFT_ATMSUBINTERFACE = 0x86
+ IFT_ATMVCIENDPT = 0xc2
+ IFT_ATMVIRTUAL = 0x95
+ IFT_BGPPOLICYACCOUNTING = 0xa2
+ IFT_BRIDGE = 0xd1
+ IFT_BSC = 0x53
+ IFT_CARP = 0xf8
+ IFT_CCTEMUL = 0x3d
+ IFT_CEPT = 0x13
+ IFT_CES = 0x85
+ IFT_CHANNEL = 0x46
+ IFT_CNR = 0x55
+ IFT_COFFEE = 0x84
+ IFT_COMPOSITELINK = 0x9b
+ IFT_DCN = 0x8d
+ IFT_DIGITALPOWERLINE = 0x8a
+ IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
+ IFT_DLSW = 0x4a
+ IFT_DOCSCABLEDOWNSTREAM = 0x80
+ IFT_DOCSCABLEMACLAYER = 0x7f
+ IFT_DOCSCABLEUPSTREAM = 0x81
+ IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd
+ IFT_DS0 = 0x51
+ IFT_DS0BUNDLE = 0x52
+ IFT_DS1FDL = 0xaa
+ IFT_DS3 = 0x1e
+ IFT_DTM = 0x8c
+ IFT_DVBASILN = 0xac
+ IFT_DVBASIOUT = 0xad
+ IFT_DVBRCCDOWNSTREAM = 0x93
+ IFT_DVBRCCMACLAYER = 0x92
+ IFT_DVBRCCUPSTREAM = 0x94
+ IFT_ECONET = 0xce
+ IFT_EON = 0x19
+ IFT_EPLRS = 0x57
+ IFT_ESCON = 0x49
+ IFT_ETHER = 0x6
+ IFT_FAITH = 0xf2
+ IFT_FAST = 0x7d
+ IFT_FASTETHER = 0x3e
+ IFT_FASTETHERFX = 0x45
+ IFT_FDDI = 0xf
+ IFT_FIBRECHANNEL = 0x38
+ IFT_FRAMERELAYINTERCONNECT = 0x3a
+ IFT_FRAMERELAYMPI = 0x5c
+ IFT_FRDLCIENDPT = 0xc1
+ IFT_FRELAY = 0x20
+ IFT_FRELAYDCE = 0x2c
+ IFT_FRF16MFRBUNDLE = 0xa3
+ IFT_FRFORWARD = 0x9e
+ IFT_G703AT2MB = 0x43
+ IFT_G703AT64K = 0x42
+ IFT_GIF = 0xf0
+ IFT_GIGABITETHERNET = 0x75
+ IFT_GR303IDT = 0xb2
+ IFT_GR303RDT = 0xb1
+ IFT_H323GATEKEEPER = 0xa4
+ IFT_H323PROXY = 0xa5
+ IFT_HDH1822 = 0x3
+ IFT_HDLC = 0x76
+ IFT_HDSL2 = 0xa8
+ IFT_HIPERLAN2 = 0xb7
+ IFT_HIPPI = 0x2f
+ IFT_HIPPIINTERFACE = 0x39
+ IFT_HOSTPAD = 0x5a
+ IFT_HSSI = 0x2e
+ IFT_HY = 0xe
+ IFT_IBM370PARCHAN = 0x48
+ IFT_IDSL = 0x9a
+ IFT_IEEE1394 = 0x90
+ IFT_IEEE80211 = 0x47
+ IFT_IEEE80212 = 0x37
+ IFT_IEEE8023ADLAG = 0xa1
+ IFT_IFGSN = 0x91
+ IFT_IMT = 0xbe
+ IFT_INFINIBAND = 0xc7
+ IFT_INTERLEAVE = 0x7c
+ IFT_IP = 0x7e
+ IFT_IPFORWARD = 0x8e
+ IFT_IPOVERATM = 0x72
+ IFT_IPOVERCDLC = 0x6d
+ IFT_IPOVERCLAW = 0x6e
+ IFT_IPSWITCH = 0x4e
+ IFT_ISDN = 0x3f
+ IFT_ISDNBASIC = 0x14
+ IFT_ISDNPRIMARY = 0x15
+ IFT_ISDNS = 0x4b
+ IFT_ISDNU = 0x4c
+ IFT_ISO88022LLC = 0x29
+ IFT_ISO88023 = 0x7
+ IFT_ISO88024 = 0x8
+ IFT_ISO88025 = 0x9
+ IFT_ISO88025CRFPINT = 0x62
+ IFT_ISO88025DTR = 0x56
+ IFT_ISO88025FIBER = 0x73
+ IFT_ISO88026 = 0xa
+ IFT_ISUP = 0xb3
+ IFT_L2VLAN = 0x87
+ IFT_L3IPVLAN = 0x88
+ IFT_L3IPXVLAN = 0x89
+ IFT_LAPB = 0x10
+ IFT_LAPD = 0x4d
+ IFT_LAPF = 0x77
+ IFT_LINEGROUP = 0xd2
+ IFT_LOCALTALK = 0x2a
+ IFT_LOOP = 0x18
+ IFT_MEDIAMAILOVERIP = 0x8b
+ IFT_MFSIGLINK = 0xa7
+ IFT_MIOX25 = 0x26
+ IFT_MODEM = 0x30
+ IFT_MPC = 0x71
+ IFT_MPLS = 0xa6
+ IFT_MPLSTUNNEL = 0x96
+ IFT_MSDSL = 0x8f
+ IFT_MVL = 0xbf
+ IFT_MYRINET = 0x63
+ IFT_NFAS = 0xaf
+ IFT_NSIP = 0x1b
+ IFT_OPTICALCHANNEL = 0xc3
+ IFT_OPTICALTRANSPORT = 0xc4
+ IFT_OTHER = 0x1
+ IFT_P10 = 0xc
+ IFT_P80 = 0xd
+ IFT_PARA = 0x22
+ IFT_PFLOG = 0xf5
+ IFT_PFSYNC = 0xf6
+ IFT_PLC = 0xae
+ IFT_PON155 = 0xcf
+ IFT_PON622 = 0xd0
+ IFT_POS = 0xab
+ IFT_PPP = 0x17
+ IFT_PPPMULTILINKBUNDLE = 0x6c
+ IFT_PROPATM = 0xc5
+ IFT_PROPBWAP2MP = 0xb8
+ IFT_PROPCNLS = 0x59
+ IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
+ IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
+ IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
+ IFT_PROPMUX = 0x36
+ IFT_PROPVIRTUAL = 0x35
+ IFT_PROPWIRELESSP2P = 0x9d
+ IFT_PTPSERIAL = 0x16
+ IFT_PVC = 0xf1
+ IFT_Q2931 = 0xc9
+ IFT_QLLC = 0x44
+ IFT_RADIOMAC = 0xbc
+ IFT_RADSL = 0x5f
+ IFT_REACHDSL = 0xc0
+ IFT_RFC1483 = 0x9f
+ IFT_RS232 = 0x21
+ IFT_RSRB = 0x4f
+ IFT_SDLC = 0x11
+ IFT_SDSL = 0x60
+ IFT_SHDSL = 0xa9
+ IFT_SIP = 0x1f
+ IFT_SIPSIG = 0xcc
+ IFT_SIPTG = 0xcb
+ IFT_SLIP = 0x1c
+ IFT_SMDSDXI = 0x2b
+ IFT_SMDSICIP = 0x34
+ IFT_SONET = 0x27
+ IFT_SONETOVERHEADCHANNEL = 0xb9
+ IFT_SONETPATH = 0x32
+ IFT_SONETVT = 0x33
+ IFT_SRP = 0x97
+ IFT_SS7SIGLINK = 0x9c
+ IFT_STACKTOSTACK = 0x6f
+ IFT_STARLAN = 0xb
+ IFT_STF = 0xd7
+ IFT_T1 = 0x12
+ IFT_TDLC = 0x74
+ IFT_TELINK = 0xc8
+ IFT_TERMPAD = 0x5b
+ IFT_TR008 = 0xb0
+ IFT_TRANSPHDLC = 0x7b
+ IFT_TUNNEL = 0x83
+ IFT_ULTRA = 0x1d
+ IFT_USB = 0xa0
+ IFT_V11 = 0x40
+ IFT_V35 = 0x2d
+ IFT_V36 = 0x41
+ IFT_V37 = 0x78
+ IFT_VDSL = 0x61
+ IFT_VIRTUALIPADDRESS = 0x70
+ IFT_VIRTUALTG = 0xca
+ IFT_VOICEDID = 0xd5
+ IFT_VOICEEM = 0x64
+ IFT_VOICEEMFGD = 0xd3
+ IFT_VOICEENCAP = 0x67
+ IFT_VOICEFGDEANA = 0xd4
+ IFT_VOICEFXO = 0x65
+ IFT_VOICEFXS = 0x66
+ IFT_VOICEOVERATM = 0x98
+ IFT_VOICEOVERCABLE = 0xc6
+ IFT_VOICEOVERFRAMERELAY = 0x99
+ IFT_VOICEOVERIP = 0x68
+ IFT_X213 = 0x5d
+ IFT_X25 = 0x5
+ IFT_X25DDN = 0x4
+ IFT_X25HUNTGROUP = 0x7a
+ IFT_X25MLP = 0x79
+ IFT_X25PLE = 0x28
+ IFT_XETHER = 0x1a
+ IGNBRK = 0x1
+ IGNCR = 0x80
+ IGNPAR = 0x4
+ IMAXBEL = 0x2000
+ INLCR = 0x40
+ INPCK = 0x10
+ IN_CLASSA_HOST = 0xffffff
+ IN_CLASSA_MAX = 0x80
+ IN_CLASSA_NET = 0xff000000
+ IN_CLASSA_NSHIFT = 0x18
+ IN_CLASSB_HOST = 0xffff
+ IN_CLASSB_MAX = 0x10000
+ IN_CLASSB_NET = 0xffff0000
+ IN_CLASSB_NSHIFT = 0x10
+ IN_CLASSC_HOST = 0xff
+ IN_CLASSC_NET = 0xffffff00
+ IN_CLASSC_NSHIFT = 0x8
+ IN_CLASSD_HOST = 0xfffffff
+ IN_CLASSD_NET = 0xf0000000
+ IN_CLASSD_NSHIFT = 0x1c
+ IN_LOOPBACKNET = 0x7f
+ IPPROTO_AH = 0x33
+ IPPROTO_CARP = 0x70
+ IPPROTO_DONE = 0x101
+ IPPROTO_DSTOPTS = 0x3c
+ IPPROTO_EGP = 0x8
+ IPPROTO_ENCAP = 0x62
+ IPPROTO_EON = 0x50
+ IPPROTO_ESP = 0x32
+ IPPROTO_ETHERIP = 0x61
+ IPPROTO_FRAGMENT = 0x2c
+ IPPROTO_GGP = 0x3
+ IPPROTO_GRE = 0x2f
+ IPPROTO_HOPOPTS = 0x0
+ IPPROTO_ICMP = 0x1
+ IPPROTO_ICMPV6 = 0x3a
+ IPPROTO_IDP = 0x16
+ IPPROTO_IGMP = 0x2
+ IPPROTO_IP = 0x0
+ IPPROTO_IPCOMP = 0x6c
+ IPPROTO_IPIP = 0x4
+ IPPROTO_IPV4 = 0x4
+ IPPROTO_IPV6 = 0x29
+ IPPROTO_IPV6_ICMP = 0x3a
+ IPPROTO_MAX = 0x100
+ IPPROTO_MAXID = 0x34
+ IPPROTO_MOBILE = 0x37
+ IPPROTO_NONE = 0x3b
+ IPPROTO_PFSYNC = 0xf0
+ IPPROTO_PIM = 0x67
+ IPPROTO_PUP = 0xc
+ IPPROTO_RAW = 0xff
+ IPPROTO_ROUTING = 0x2b
+ IPPROTO_RSVP = 0x2e
+ IPPROTO_TCP = 0x6
+ IPPROTO_TP = 0x1d
+ IPPROTO_UDP = 0x11
+ IPPROTO_VRRP = 0x70
+ IPV6_CHECKSUM = 0x1a
+ IPV6_DEFAULT_MULTICAST_HOPS = 0x1
+ IPV6_DEFAULT_MULTICAST_LOOP = 0x1
+ IPV6_DEFHLIM = 0x40
+ IPV6_DONTFRAG = 0x3e
+ IPV6_DSTOPTS = 0x32
+ IPV6_FAITH = 0x1d
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
+ IPV6_FRAGTTL = 0x78
+ IPV6_HLIMDEC = 0x1
+ IPV6_HOPLIMIT = 0x2f
+ IPV6_HOPOPTS = 0x31
+ IPV6_IPSEC_POLICY = 0x1c
+ IPV6_JOIN_GROUP = 0xc
+ IPV6_LEAVE_GROUP = 0xd
+ IPV6_MAXHLIM = 0xff
+ IPV6_MAXPACKET = 0xffff
+ IPV6_MMTU = 0x500
+ IPV6_MULTICAST_HOPS = 0xa
+ IPV6_MULTICAST_IF = 0x9
+ IPV6_MULTICAST_LOOP = 0xb
+ IPV6_NEXTHOP = 0x30
+ IPV6_PATHMTU = 0x2c
+ IPV6_PKTINFO = 0x2e
+ IPV6_PORTRANGE = 0xe
+ IPV6_PORTRANGE_DEFAULT = 0x0
+ IPV6_PORTRANGE_HIGH = 0x1
+ IPV6_PORTRANGE_LOW = 0x2
+ IPV6_RECVDSTOPTS = 0x28
+ IPV6_RECVHOPLIMIT = 0x25
+ IPV6_RECVHOPOPTS = 0x27
+ IPV6_RECVPATHMTU = 0x2b
+ IPV6_RECVPKTINFO = 0x24
+ IPV6_RECVRTHDR = 0x26
+ IPV6_RECVTCLASS = 0x39
+ IPV6_RTHDR = 0x33
+ IPV6_RTHDRDSTOPTS = 0x23
+ IPV6_RTHDR_LOOSE = 0x0
+ IPV6_RTHDR_STRICT = 0x1
+ IPV6_RTHDR_TYPE_0 = 0x0
+ IPV6_SOCKOPT_RESERVED1 = 0x3
+ IPV6_TCLASS = 0x3d
+ IPV6_UNICAST_HOPS = 0x4
+ IPV6_USE_MIN_MTU = 0x2a
+ IPV6_V6ONLY = 0x1b
+ IPV6_VERSION = 0x60
+ IPV6_VERSION_MASK = 0xf0
+ IP_ADD_MEMBERSHIP = 0xc
+ IP_DEFAULT_MULTICAST_LOOP = 0x1
+ IP_DEFAULT_MULTICAST_TTL = 0x1
+ IP_DF = 0x4000
+ IP_DROP_MEMBERSHIP = 0xd
+ IP_EF = 0x8000
+ IP_ERRORMTU = 0x15
+ IP_HDRINCL = 0x2
+ IP_IPSEC_POLICY = 0x16
+ IP_MAXPACKET = 0xffff
+ IP_MAX_MEMBERSHIPS = 0x14
+ IP_MF = 0x2000
+ IP_MINFRAGSIZE = 0x45
+ IP_MINTTL = 0x18
+ IP_MSS = 0x240
+ IP_MULTICAST_IF = 0x9
+ IP_MULTICAST_LOOP = 0xb
+ IP_MULTICAST_TTL = 0xa
+ IP_OFFMASK = 0x1fff
+ IP_OPTIONS = 0x1
+ IP_PORTRANGE = 0x13
+ IP_PORTRANGE_DEFAULT = 0x0
+ IP_PORTRANGE_HIGH = 0x1
+ IP_PORTRANGE_LOW = 0x2
+ IP_RECVDSTADDR = 0x7
+ IP_RECVIF = 0x14
+ IP_RECVOPTS = 0x5
+ IP_RECVRETOPTS = 0x6
+ IP_RECVTTL = 0x17
+ IP_RETOPTS = 0x8
+ IP_RF = 0x8000
+ IP_TOS = 0x3
+ IP_TTL = 0x4
+ ISIG = 0x80
+ ISTRIP = 0x20
+ IXANY = 0x800
+ IXOFF = 0x400
+ IXON = 0x200
+ KERN_HOSTNAME = 0xa
+ KERN_OSRELEASE = 0x2
+ KERN_OSTYPE = 0x1
+ KERN_VERSION = 0x4
+ LOCK_EX = 0x2
+ LOCK_NB = 0x4
+ LOCK_SH = 0x1
+ LOCK_UN = 0x8
+ MADV_DONTNEED = 0x4
+ MADV_FREE = 0x6
+ MADV_NORMAL = 0x0
+ MADV_RANDOM = 0x1
+ MADV_SEQUENTIAL = 0x2
+ MADV_SPACEAVAIL = 0x5
+ MADV_WILLNEED = 0x3
+ MAP_ALIGNMENT_16MB = 0x18000000
+ MAP_ALIGNMENT_1TB = 0x28000000
+ MAP_ALIGNMENT_256TB = 0x30000000
+ MAP_ALIGNMENT_4GB = 0x20000000
+ MAP_ALIGNMENT_64KB = 0x10000000
+ MAP_ALIGNMENT_64PB = 0x38000000
+ MAP_ALIGNMENT_MASK = -0x1000000
+ MAP_ALIGNMENT_SHIFT = 0x18
+ MAP_ANON = 0x1000
+ MAP_FILE = 0x0
+ MAP_FIXED = 0x10
+ MAP_HASSEMAPHORE = 0x200
+ MAP_INHERIT = 0x80
+ MAP_INHERIT_COPY = 0x1
+ MAP_INHERIT_DEFAULT = 0x1
+ MAP_INHERIT_DONATE_COPY = 0x3
+ MAP_INHERIT_NONE = 0x2
+ MAP_INHERIT_SHARE = 0x0
+ MAP_NORESERVE = 0x40
+ MAP_PRIVATE = 0x2
+ MAP_RENAME = 0x20
+ MAP_SHARED = 0x1
+ MAP_STACK = 0x2000
+ MAP_TRYFIXED = 0x400
+ MAP_WIRED = 0x800
+ MCL_CURRENT = 0x1
+ MCL_FUTURE = 0x2
+ MNT_ASYNC = 0x40
+ MNT_BASIC_FLAGS = 0xe782807f
+ MNT_DEFEXPORTED = 0x200
+ MNT_DISCARD = 0x800000
+ MNT_EXKERB = 0x800
+ MNT_EXNORESPORT = 0x8000000
+ MNT_EXPORTANON = 0x400
+ MNT_EXPORTED = 0x100
+ MNT_EXPUBLIC = 0x10000000
+ MNT_EXRDONLY = 0x80
+ MNT_EXTATTR = 0x1000000
+ MNT_FORCE = 0x80000
+ MNT_GETARGS = 0x400000
+ MNT_IGNORE = 0x100000
+ MNT_LAZY = 0x3
+ MNT_LOCAL = 0x1000
+ MNT_LOG = 0x2000000
+ MNT_NOATIME = 0x4000000
+ MNT_NOCOREDUMP = 0x8000
+ MNT_NODEV = 0x10
+ MNT_NODEVMTIME = 0x40000000
+ MNT_NOEXEC = 0x4
+ MNT_NOSUID = 0x8
+ MNT_NOWAIT = 0x2
+ MNT_OP_FLAGS = 0x4d0000
+ MNT_QUOTA = 0x2000
+ MNT_RDONLY = 0x1
+ MNT_RELATIME = 0x20000
+ MNT_RELOAD = 0x40000
+ MNT_ROOTFS = 0x4000
+ MNT_SOFTDEP = 0x80000000
+ MNT_SYMPERM = 0x20000000
+ MNT_SYNCHRONOUS = 0x2
+ MNT_UNION = 0x20
+ MNT_UPDATE = 0x10000
+ MNT_VISFLAGMASK = 0xff90ffff
+ MNT_WAIT = 0x1
+ MSG_BCAST = 0x100
+ MSG_CMSG_CLOEXEC = 0x800
+ MSG_CONTROLMBUF = 0x2000000
+ MSG_CTRUNC = 0x20
+ MSG_DONTROUTE = 0x4
+ MSG_DONTWAIT = 0x80
+ MSG_EOR = 0x8
+ MSG_IOVUSRSPACE = 0x4000000
+ MSG_LENUSRSPACE = 0x8000000
+ MSG_MCAST = 0x200
+ MSG_NAMEMBUF = 0x1000000
+ MSG_NBIO = 0x1000
+ MSG_NOSIGNAL = 0x400
+ MSG_OOB = 0x1
+ MSG_PEEK = 0x2
+ MSG_TRUNC = 0x10
+ MSG_USERFLAGS = 0xffffff
+ MSG_WAITALL = 0x40
+ MS_ASYNC = 0x1
+ MS_INVALIDATE = 0x2
+ MS_SYNC = 0x4
+ NAME_MAX = 0x1ff
+ NET_RT_DUMP = 0x1
+ NET_RT_FLAGS = 0x2
+ NET_RT_IFLIST = 0x5
+ NET_RT_MAXID = 0x6
+ NET_RT_OIFLIST = 0x4
+ NET_RT_OOIFLIST = 0x3
+ NOFLSH = 0x80000000
+ NOTE_ATTRIB = 0x8
+ NOTE_CHILD = 0x4
+ NOTE_DELETE = 0x1
+ NOTE_EXEC = 0x20000000
+ NOTE_EXIT = 0x80000000
+ NOTE_EXTEND = 0x4
+ NOTE_FORK = 0x40000000
+ NOTE_LINK = 0x10
+ NOTE_LOWAT = 0x1
+ NOTE_PCTRLMASK = 0xf0000000
+ NOTE_PDATAMASK = 0xfffff
+ NOTE_RENAME = 0x20
+ NOTE_REVOKE = 0x40
+ NOTE_TRACK = 0x1
+ NOTE_TRACKERR = 0x2
+ NOTE_WRITE = 0x2
+ OCRNL = 0x10
+ OFIOGETBMAP = 0xc004667a
+ ONLCR = 0x2
+ ONLRET = 0x40
+ ONOCR = 0x20
+ ONOEOT = 0x8
+ OPOST = 0x1
+ O_ACCMODE = 0x3
+ O_ALT_IO = 0x40000
+ O_APPEND = 0x8
+ O_ASYNC = 0x40
+ O_CLOEXEC = 0x400000
+ O_CREAT = 0x200
+ O_DIRECT = 0x80000
+ O_DIRECTORY = 0x200000
+ O_DSYNC = 0x10000
+ O_EXCL = 0x800
+ O_EXLOCK = 0x20
+ O_FSYNC = 0x80
+ O_NDELAY = 0x4
+ O_NOCTTY = 0x8000
+ O_NOFOLLOW = 0x100
+ O_NONBLOCK = 0x4
+ O_NOSIGPIPE = 0x1000000
+ O_RDONLY = 0x0
+ O_RDWR = 0x2
+ O_RSYNC = 0x20000
+ O_SHLOCK = 0x10
+ O_SYNC = 0x80
+ O_TRUNC = 0x400
+ O_WRONLY = 0x1
+ PARENB = 0x1000
+ PARMRK = 0x8
+ PARODD = 0x2000
+ PENDIN = 0x20000000
+ PRIO_PGRP = 0x1
+ PRIO_PROCESS = 0x0
+ PRIO_USER = 0x2
+ PRI_IOFLUSH = 0x7c
+ PROT_EXEC = 0x4
+ PROT_NONE = 0x0
+ PROT_READ = 0x1
+ PROT_WRITE = 0x2
+ RLIMIT_AS = 0xa
+ RLIMIT_CORE = 0x4
+ RLIMIT_CPU = 0x0
+ RLIMIT_DATA = 0x2
+ RLIMIT_FSIZE = 0x1
+ RLIMIT_MEMLOCK = 0x6
+ RLIMIT_NOFILE = 0x8
+ RLIMIT_NPROC = 0x7
+ RLIMIT_RSS = 0x5
+ RLIMIT_STACK = 0x3
+ RLIM_INFINITY = 0x7fffffffffffffff
+ RTAX_AUTHOR = 0x6
+ RTAX_BRD = 0x7
+ RTAX_DST = 0x0
+ RTAX_GATEWAY = 0x1
+ RTAX_GENMASK = 0x3
+ RTAX_IFA = 0x5
+ RTAX_IFP = 0x4
+ RTAX_MAX = 0x9
+ RTAX_NETMASK = 0x2
+ RTAX_TAG = 0x8
+ RTA_AUTHOR = 0x40
+ RTA_BRD = 0x80
+ RTA_DST = 0x1
+ RTA_GATEWAY = 0x2
+ RTA_GENMASK = 0x8
+ RTA_IFA = 0x20
+ RTA_IFP = 0x10
+ RTA_NETMASK = 0x4
+ RTA_TAG = 0x100
+ RTF_ANNOUNCE = 0x20000
+ RTF_BLACKHOLE = 0x1000
+ RTF_CLONED = 0x2000
+ RTF_CLONING = 0x100
+ RTF_DONE = 0x40
+ RTF_DYNAMIC = 0x10
+ RTF_GATEWAY = 0x2
+ RTF_HOST = 0x4
+ RTF_LLINFO = 0x400
+ RTF_MASK = 0x80
+ RTF_MODIFIED = 0x20
+ RTF_PROTO1 = 0x8000
+ RTF_PROTO2 = 0x4000
+ RTF_REJECT = 0x8
+ RTF_SRC = 0x10000
+ RTF_STATIC = 0x800
+ RTF_UP = 0x1
+ RTF_XRESOLVE = 0x200
+ RTM_ADD = 0x1
+ RTM_CHANGE = 0x3
+ RTM_CHGADDR = 0x15
+ RTM_DELADDR = 0xd
+ RTM_DELETE = 0x2
+ RTM_GET = 0x4
+ RTM_IEEE80211 = 0x11
+ RTM_IFANNOUNCE = 0x10
+ RTM_IFINFO = 0x14
+ RTM_LLINFO_UPD = 0x13
+ RTM_LOCK = 0x8
+ RTM_LOSING = 0x5
+ RTM_MISS = 0x7
+ RTM_NEWADDR = 0xc
+ RTM_OIFINFO = 0xf
+ RTM_OLDADD = 0x9
+ RTM_OLDDEL = 0xa
+ RTM_OOIFINFO = 0xe
+ RTM_REDIRECT = 0x6
+ RTM_RESOLVE = 0xb
+ RTM_RTTUNIT = 0xf4240
+ RTM_SETGATE = 0x12
+ RTM_VERSION = 0x4
+ RTV_EXPIRE = 0x4
+ RTV_HOPCOUNT = 0x2
+ RTV_MTU = 0x1
+ RTV_RPIPE = 0x8
+ RTV_RTT = 0x40
+ RTV_RTTVAR = 0x80
+ RTV_SPIPE = 0x10
+ RTV_SSTHRESH = 0x20
+ RUSAGE_CHILDREN = -0x1
+ RUSAGE_SELF = 0x0
+ SCM_CREDS = 0x4
+ SCM_RIGHTS = 0x1
+ SCM_TIMESTAMP = 0x8
+ SHUT_RD = 0x0
+ SHUT_RDWR = 0x2
+ SHUT_WR = 0x1
+ SIOCADDMULTI = 0x80906931
+ SIOCADDRT = 0x8038720a
+ SIOCAIFADDR = 0x8040691a
+ SIOCALIFADDR = 0x8118691c
+ SIOCATMARK = 0x40047307
+ SIOCDELMULTI = 0x80906932
+ SIOCDELRT = 0x8038720b
+ SIOCDIFADDR = 0x80906919
+ SIOCDIFPHYADDR = 0x80906949
+ SIOCDLIFADDR = 0x8118691e
+ SIOCGDRVSPEC = 0xc028697b
+ SIOCGETPFSYNC = 0xc09069f8
+ SIOCGETSGCNT = 0xc0207534
+ SIOCGETVIFCNT = 0xc0287533
+ SIOCGHIWAT = 0x40047301
+ SIOCGIFADDR = 0xc0906921
+ SIOCGIFADDRPREF = 0xc0986920
+ SIOCGIFALIAS = 0xc040691b
+ SIOCGIFBRDADDR = 0xc0906923
+ SIOCGIFCAP = 0xc0206976
+ SIOCGIFCONF = 0xc0106926
+ SIOCGIFDATA = 0xc0986985
+ SIOCGIFDLT = 0xc0906977
+ SIOCGIFDSTADDR = 0xc0906922
+ SIOCGIFFLAGS = 0xc0906911
+ SIOCGIFGENERIC = 0xc090693a
+ SIOCGIFMEDIA = 0xc0306936
+ SIOCGIFMETRIC = 0xc0906917
+ SIOCGIFMTU = 0xc090697e
+ SIOCGIFNETMASK = 0xc0906925
+ SIOCGIFPDSTADDR = 0xc0906948
+ SIOCGIFPSRCADDR = 0xc0906947
+ SIOCGLIFADDR = 0xc118691d
+ SIOCGLIFPHYADDR = 0xc118694b
+ SIOCGLINKSTR = 0xc0286987
+ SIOCGLOWAT = 0x40047303
+ SIOCGPGRP = 0x40047309
+ SIOCGVH = 0xc0906983
+ SIOCIFCREATE = 0x8090697a
+ SIOCIFDESTROY = 0x80906979
+ SIOCIFGCLONERS = 0xc0106978
+ SIOCINITIFADDR = 0xc0706984
+ SIOCSDRVSPEC = 0x8028697b
+ SIOCSETPFSYNC = 0x809069f7
+ SIOCSHIWAT = 0x80047300
+ SIOCSIFADDR = 0x8090690c
+ SIOCSIFADDRPREF = 0x8098691f
+ SIOCSIFBRDADDR = 0x80906913
+ SIOCSIFCAP = 0x80206975
+ SIOCSIFDSTADDR = 0x8090690e
+ SIOCSIFFLAGS = 0x80906910
+ SIOCSIFGENERIC = 0x80906939
+ SIOCSIFMEDIA = 0xc0906935
+ SIOCSIFMETRIC = 0x80906918
+ SIOCSIFMTU = 0x8090697f
+ SIOCSIFNETMASK = 0x80906916
+ SIOCSIFPHYADDR = 0x80406946
+ SIOCSLIFPHYADDR = 0x8118694a
+ SIOCSLINKSTR = 0x80286988
+ SIOCSLOWAT = 0x80047302
+ SIOCSPGRP = 0x80047308
+ SIOCSVH = 0xc0906982
+ SIOCZIFDATA = 0xc0986986
+ SOCK_CLOEXEC = 0x10000000
+ SOCK_DGRAM = 0x2
+ SOCK_FLAGS_MASK = 0xf0000000
+ SOCK_NONBLOCK = 0x20000000
+ SOCK_NOSIGPIPE = 0x40000000
+ SOCK_RAW = 0x3
+ SOCK_RDM = 0x4
+ SOCK_SEQPACKET = 0x5
+ SOCK_STREAM = 0x1
+ SOL_SOCKET = 0xffff
+ SOMAXCONN = 0x80
+ SO_ACCEPTCONN = 0x2
+ SO_ACCEPTFILTER = 0x1000
+ SO_BROADCAST = 0x20
+ SO_DEBUG = 0x1
+ SO_DONTROUTE = 0x10
+ SO_ERROR = 0x1007
+ SO_KEEPALIVE = 0x8
+ SO_LINGER = 0x80
+ SO_NOHEADER = 0x100a
+ SO_NOSIGPIPE = 0x800
+ SO_OOBINLINE = 0x100
+ SO_OVERFLOWED = 0x1009
+ SO_RCVBUF = 0x1002
+ SO_RCVLOWAT = 0x1004
+ SO_RCVTIMEO = 0x100c
+ SO_REUSEADDR = 0x4
+ SO_REUSEPORT = 0x200
+ SO_SNDBUF = 0x1001
+ SO_SNDLOWAT = 0x1003
+ SO_SNDTIMEO = 0x100b
+ SO_TIMESTAMP = 0x2000
+ SO_TYPE = 0x1008
+ SO_USELOOPBACK = 0x40
+ SYSCTL_VERSION = 0x1000000
+ SYSCTL_VERS_0 = 0x0
+ SYSCTL_VERS_1 = 0x1000000
+ SYSCTL_VERS_MASK = 0xff000000
+ S_ARCH1 = 0x10000
+ S_ARCH2 = 0x20000
+ S_BLKSIZE = 0x200
+ S_IEXEC = 0x40
+ S_IFBLK = 0x6000
+ S_IFCHR = 0x2000
+ S_IFDIR = 0x4000
+ S_IFIFO = 0x1000
+ S_IFLNK = 0xa000
+ S_IFMT = 0xf000
+ S_IFREG = 0x8000
+ S_IFSOCK = 0xc000
+ S_IFWHT = 0xe000
+ S_IREAD = 0x100
+ S_IRGRP = 0x20
+ S_IROTH = 0x4
+ S_IRUSR = 0x100
+ S_IRWXG = 0x38
+ S_IRWXO = 0x7
+ S_IRWXU = 0x1c0
+ S_ISGID = 0x400
+ S_ISTXT = 0x200
+ S_ISUID = 0x800
+ S_ISVTX = 0x200
+ S_IWGRP = 0x10
+ S_IWOTH = 0x2
+ S_IWRITE = 0x80
+ S_IWUSR = 0x80
+ S_IXGRP = 0x8
+ S_IXOTH = 0x1
+ S_IXUSR = 0x40
+ S_LOGIN_SET = 0x1
+ TCIFLUSH = 0x1
+ TCIOFLUSH = 0x3
+ TCOFLUSH = 0x2
+ TCP_CONGCTL = 0x20
+ TCP_KEEPCNT = 0x6
+ TCP_KEEPIDLE = 0x3
+ TCP_KEEPINIT = 0x7
+ TCP_KEEPINTVL = 0x5
+ TCP_MAXBURST = 0x4
+ TCP_MAXSEG = 0x2
+ TCP_MAXWIN = 0xffff
+ TCP_MAX_WINSHIFT = 0xe
+ TCP_MD5SIG = 0x10
+ TCP_MINMSS = 0xd8
+ TCP_MSS = 0x218
+ TCP_NODELAY = 0x1
+ TCSAFLUSH = 0x2
+ TIOCCBRK = 0x2000747a
+ TIOCCDTR = 0x20007478
+ TIOCCONS = 0x80047462
+ TIOCDCDTIMESTAMP = 0x40107458
+ TIOCDRAIN = 0x2000745e
+ TIOCEXCL = 0x2000740d
+ TIOCEXT = 0x80047460
+ TIOCFLAG_CDTRCTS = 0x10
+ TIOCFLAG_CLOCAL = 0x2
+ TIOCFLAG_CRTSCTS = 0x4
+ TIOCFLAG_MDMBUF = 0x8
+ TIOCFLAG_SOFTCAR = 0x1
+ TIOCFLUSH = 0x80047410
+ TIOCGETA = 0x402c7413
+ TIOCGETD = 0x4004741a
+ TIOCGFLAGS = 0x4004745d
+ TIOCGLINED = 0x40207442
+ TIOCGPGRP = 0x40047477
+ TIOCGQSIZE = 0x40047481
+ TIOCGRANTPT = 0x20007447
+ TIOCGSID = 0x40047463
+ TIOCGSIZE = 0x40087468
+ TIOCGWINSZ = 0x40087468
+ TIOCMBIC = 0x8004746b
+ TIOCMBIS = 0x8004746c
+ TIOCMGET = 0x4004746a
+ TIOCMSET = 0x8004746d
+ TIOCM_CAR = 0x40
+ TIOCM_CD = 0x40
+ TIOCM_CTS = 0x20
+ TIOCM_DSR = 0x100
+ TIOCM_DTR = 0x2
+ TIOCM_LE = 0x1
+ TIOCM_RI = 0x80
+ TIOCM_RNG = 0x80
+ TIOCM_RTS = 0x4
+ TIOCM_SR = 0x10
+ TIOCM_ST = 0x8
+ TIOCNOTTY = 0x20007471
+ TIOCNXCL = 0x2000740e
+ TIOCOUTQ = 0x40047473
+ TIOCPKT = 0x80047470
+ TIOCPKT_DATA = 0x0
+ TIOCPKT_DOSTOP = 0x20
+ TIOCPKT_FLUSHREAD = 0x1
+ TIOCPKT_FLUSHWRITE = 0x2
+ TIOCPKT_IOCTL = 0x40
+ TIOCPKT_NOSTOP = 0x10
+ TIOCPKT_START = 0x8
+ TIOCPKT_STOP = 0x4
+ TIOCPTMGET = 0x40287446
+ TIOCPTSNAME = 0x40287448
+ TIOCRCVFRAME = 0x80087445
+ TIOCREMOTE = 0x80047469
+ TIOCSBRK = 0x2000747b
+ TIOCSCTTY = 0x20007461
+ TIOCSDTR = 0x20007479
+ TIOCSETA = 0x802c7414
+ TIOCSETAF = 0x802c7416
+ TIOCSETAW = 0x802c7415
+ TIOCSETD = 0x8004741b
+ TIOCSFLAGS = 0x8004745c
+ TIOCSIG = 0x2000745f
+ TIOCSLINED = 0x80207443
+ TIOCSPGRP = 0x80047476
+ TIOCSQSIZE = 0x80047480
+ TIOCSSIZE = 0x80087467
+ TIOCSTART = 0x2000746e
+ TIOCSTAT = 0x80047465
+ TIOCSTI = 0x80017472
+ TIOCSTOP = 0x2000746f
+ TIOCSWINSZ = 0x80087467
+ TIOCUCNTL = 0x80047466
+ TIOCXMTFRAME = 0x80087444
+ TOSTOP = 0x400000
+ VDISCARD = 0xf
+ VDSUSP = 0xb
+ VEOF = 0x0
+ VEOL = 0x1
+ VEOL2 = 0x2
+ VERASE = 0x3
+ VINTR = 0x8
+ VKILL = 0x5
+ VLNEXT = 0xe
+ VMIN = 0x10
+ VQUIT = 0x9
+ VREPRINT = 0x6
+ VSTART = 0xc
+ VSTATUS = 0x12
+ VSTOP = 0xd
+ VSUSP = 0xa
+ VTIME = 0x11
+ VWERASE = 0x4
+ WALL = 0x8
+ WALLSIG = 0x8
+ WALTSIG = 0x4
+ WCLONE = 0x4
+ WCOREFLAG = 0x80
+ WNOHANG = 0x1
+ WNOWAIT = 0x10000
+ WNOZOMBIE = 0x20000
+ WOPTSCHECKED = 0x40000
+ WSTOPPED = 0x7f
+ WUNTRACED = 0x2
+)
+
+// Errors
+const (
+ E2BIG = syscall.Errno(0x7)
+ EACCES = syscall.Errno(0xd)
+ EADDRINUSE = syscall.Errno(0x30)
+ EADDRNOTAVAIL = syscall.Errno(0x31)
+ EAFNOSUPPORT = syscall.Errno(0x2f)
+ EAGAIN = syscall.Errno(0x23)
+ EALREADY = syscall.Errno(0x25)
+ EAUTH = syscall.Errno(0x50)
+ EBADF = syscall.Errno(0x9)
+ EBADMSG = syscall.Errno(0x58)
+ EBADRPC = syscall.Errno(0x48)
+ EBUSY = syscall.Errno(0x10)
+ ECANCELED = syscall.Errno(0x57)
+ ECHILD = syscall.Errno(0xa)
+ ECONNABORTED = syscall.Errno(0x35)
+ ECONNREFUSED = syscall.Errno(0x3d)
+ ECONNRESET = syscall.Errno(0x36)
+ EDEADLK = syscall.Errno(0xb)
+ EDESTADDRREQ = syscall.Errno(0x27)
+ EDOM = syscall.Errno(0x21)
+ EDQUOT = syscall.Errno(0x45)
+ EEXIST = syscall.Errno(0x11)
+ EFAULT = syscall.Errno(0xe)
+ EFBIG = syscall.Errno(0x1b)
+ EFTYPE = syscall.Errno(0x4f)
+ EHOSTDOWN = syscall.Errno(0x40)
+ EHOSTUNREACH = syscall.Errno(0x41)
+ EIDRM = syscall.Errno(0x52)
+ EILSEQ = syscall.Errno(0x55)
+ EINPROGRESS = syscall.Errno(0x24)
+ EINTR = syscall.Errno(0x4)
+ EINVAL = syscall.Errno(0x16)
+ EIO = syscall.Errno(0x5)
+ EISCONN = syscall.Errno(0x38)
+ EISDIR = syscall.Errno(0x15)
+ ELAST = syscall.Errno(0x60)
+ ELOOP = syscall.Errno(0x3e)
+ EMFILE = syscall.Errno(0x18)
+ EMLINK = syscall.Errno(0x1f)
+ EMSGSIZE = syscall.Errno(0x28)
+ EMULTIHOP = syscall.Errno(0x5e)
+ ENAMETOOLONG = syscall.Errno(0x3f)
+ ENEEDAUTH = syscall.Errno(0x51)
+ ENETDOWN = syscall.Errno(0x32)
+ ENETRESET = syscall.Errno(0x34)
+ ENETUNREACH = syscall.Errno(0x33)
+ ENFILE = syscall.Errno(0x17)
+ ENOATTR = syscall.Errno(0x5d)
+ ENOBUFS = syscall.Errno(0x37)
+ ENODATA = syscall.Errno(0x59)
+ ENODEV = syscall.Errno(0x13)
+ ENOENT = syscall.Errno(0x2)
+ ENOEXEC = syscall.Errno(0x8)
+ ENOLCK = syscall.Errno(0x4d)
+ ENOLINK = syscall.Errno(0x5f)
+ ENOMEM = syscall.Errno(0xc)
+ ENOMSG = syscall.Errno(0x53)
+ ENOPROTOOPT = syscall.Errno(0x2a)
+ ENOSPC = syscall.Errno(0x1c)
+ ENOSR = syscall.Errno(0x5a)
+ ENOSTR = syscall.Errno(0x5b)
+ ENOSYS = syscall.Errno(0x4e)
+ ENOTBLK = syscall.Errno(0xf)
+ ENOTCONN = syscall.Errno(0x39)
+ ENOTDIR = syscall.Errno(0x14)
+ ENOTEMPTY = syscall.Errno(0x42)
+ ENOTSOCK = syscall.Errno(0x26)
+ ENOTSUP = syscall.Errno(0x56)
+ ENOTTY = syscall.Errno(0x19)
+ ENXIO = syscall.Errno(0x6)
+ EOPNOTSUPP = syscall.Errno(0x2d)
+ EOVERFLOW = syscall.Errno(0x54)
+ EPERM = syscall.Errno(0x1)
+ EPFNOSUPPORT = syscall.Errno(0x2e)
+ EPIPE = syscall.Errno(0x20)
+ EPROCLIM = syscall.Errno(0x43)
+ EPROCUNAVAIL = syscall.Errno(0x4c)
+ EPROGMISMATCH = syscall.Errno(0x4b)
+ EPROGUNAVAIL = syscall.Errno(0x4a)
+ EPROTO = syscall.Errno(0x60)
+ EPROTONOSUPPORT = syscall.Errno(0x2b)
+ EPROTOTYPE = syscall.Errno(0x29)
+ ERANGE = syscall.Errno(0x22)
+ EREMOTE = syscall.Errno(0x47)
+ EROFS = syscall.Errno(0x1e)
+ ERPCMISMATCH = syscall.Errno(0x49)
+ ESHUTDOWN = syscall.Errno(0x3a)
+ ESOCKTNOSUPPORT = syscall.Errno(0x2c)
+ ESPIPE = syscall.Errno(0x1d)
+ ESRCH = syscall.Errno(0x3)
+ ESTALE = syscall.Errno(0x46)
+ ETIME = syscall.Errno(0x5c)
+ ETIMEDOUT = syscall.Errno(0x3c)
+ ETOOMANYREFS = syscall.Errno(0x3b)
+ ETXTBSY = syscall.Errno(0x1a)
+ EUSERS = syscall.Errno(0x44)
+ EWOULDBLOCK = syscall.Errno(0x23)
+ EXDEV = syscall.Errno(0x12)
+)
+
+// Signals
+const (
+ SIGABRT = syscall.Signal(0x6)
+ SIGALRM = syscall.Signal(0xe)
+ SIGBUS = syscall.Signal(0xa)
+ SIGCHLD = syscall.Signal(0x14)
+ SIGCONT = syscall.Signal(0x13)
+ SIGEMT = syscall.Signal(0x7)
+ SIGFPE = syscall.Signal(0x8)
+ SIGHUP = syscall.Signal(0x1)
+ SIGILL = syscall.Signal(0x4)
+ SIGINFO = syscall.Signal(0x1d)
+ SIGINT = syscall.Signal(0x2)
+ SIGIO = syscall.Signal(0x17)
+ SIGIOT = syscall.Signal(0x6)
+ SIGKILL = syscall.Signal(0x9)
+ SIGPIPE = syscall.Signal(0xd)
+ SIGPROF = syscall.Signal(0x1b)
+ SIGPWR = syscall.Signal(0x20)
+ SIGQUIT = syscall.Signal(0x3)
+ SIGSEGV = syscall.Signal(0xb)
+ SIGSTOP = syscall.Signal(0x11)
+ SIGSYS = syscall.Signal(0xc)
+ SIGTERM = syscall.Signal(0xf)
+ SIGTRAP = syscall.Signal(0x5)
+ SIGTSTP = syscall.Signal(0x12)
+ SIGTTIN = syscall.Signal(0x15)
+ SIGTTOU = syscall.Signal(0x16)
+ SIGURG = syscall.Signal(0x10)
+ SIGUSR1 = syscall.Signal(0x1e)
+ SIGUSR2 = syscall.Signal(0x1f)
+ SIGVTALRM = syscall.Signal(0x1a)
+ SIGWINCH = syscall.Signal(0x1c)
+ SIGXCPU = syscall.Signal(0x18)
+ SIGXFSZ = syscall.Signal(0x19)
+)
+
+// Error table
+var errorList = [...]struct {
+ num syscall.Errno
+ name string
+ desc string
+}{
+ {1, "EPERM", "operation not permitted"},
+ {2, "ENOENT", "no such file or directory"},
+ {3, "ESRCH", "no such process"},
+ {4, "EINTR", "interrupted system call"},
+ {5, "EIO", "input/output error"},
+ {6, "ENXIO", "device not configured"},
+ {7, "E2BIG", "argument list too long"},
+ {8, "ENOEXEC", "exec format error"},
+ {9, "EBADF", "bad file descriptor"},
+ {10, "ECHILD", "no child processes"},
+ {11, "EDEADLK", "resource deadlock avoided"},
+ {12, "ENOMEM", "cannot allocate memory"},
+ {13, "EACCES", "permission denied"},
+ {14, "EFAULT", "bad address"},
+ {15, "ENOTBLK", "block device required"},
+ {16, "EBUSY", "device busy"},
+ {17, "EEXIST", "file exists"},
+ {18, "EXDEV", "cross-device link"},
+ {19, "ENODEV", "operation not supported by device"},
+ {20, "ENOTDIR", "not a directory"},
+ {21, "EISDIR", "is a directory"},
+ {22, "EINVAL", "invalid argument"},
+ {23, "ENFILE", "too many open files in system"},
+ {24, "EMFILE", "too many open files"},
+ {25, "ENOTTY", "inappropriate ioctl for device"},
+ {26, "ETXTBSY", "text file busy"},
+ {27, "EFBIG", "file too large"},
+ {28, "ENOSPC", "no space left on device"},
+ {29, "ESPIPE", "illegal seek"},
+ {30, "EROFS", "read-only file system"},
+ {31, "EMLINK", "too many links"},
+ {32, "EPIPE", "broken pipe"},
+ {33, "EDOM", "numerical argument out of domain"},
+ {34, "ERANGE", "result too large or too small"},
+ {35, "EAGAIN", "resource temporarily unavailable"},
+ {36, "EINPROGRESS", "operation now in progress"},
+ {37, "EALREADY", "operation already in progress"},
+ {38, "ENOTSOCK", "socket operation on non-socket"},
+ {39, "EDESTADDRREQ", "destination address required"},
+ {40, "EMSGSIZE", "message too long"},
+ {41, "EPROTOTYPE", "protocol wrong type for socket"},
+ {42, "ENOPROTOOPT", "protocol option not available"},
+ {43, "EPROTONOSUPPORT", "protocol not supported"},
+ {44, "ESOCKTNOSUPPORT", "socket type not supported"},
+ {45, "EOPNOTSUPP", "operation not supported"},
+ {46, "EPFNOSUPPORT", "protocol family not supported"},
+ {47, "EAFNOSUPPORT", "address family not supported by protocol family"},
+ {48, "EADDRINUSE", "address already in use"},
+ {49, "EADDRNOTAVAIL", "can't assign requested address"},
+ {50, "ENETDOWN", "network is down"},
+ {51, "ENETUNREACH", "network is unreachable"},
+ {52, "ENETRESET", "network dropped connection on reset"},
+ {53, "ECONNABORTED", "software caused connection abort"},
+ {54, "ECONNRESET", "connection reset by peer"},
+ {55, "ENOBUFS", "no buffer space available"},
+ {56, "EISCONN", "socket is already connected"},
+ {57, "ENOTCONN", "socket is not connected"},
+ {58, "ESHUTDOWN", "can't send after socket shutdown"},
+ {59, "ETOOMANYREFS", "too many references: can't splice"},
+ {60, "ETIMEDOUT", "connection timed out"},
+ {61, "ECONNREFUSED", "connection refused"},
+ {62, "ELOOP", "too many levels of symbolic links"},
+ {63, "ENAMETOOLONG", "file name too long"},
+ {64, "EHOSTDOWN", "host is down"},
+ {65, "EHOSTUNREACH", "no route to host"},
+ {66, "ENOTEMPTY", "directory not empty"},
+ {67, "EPROCLIM", "too many processes"},
+ {68, "EUSERS", "too many users"},
+ {69, "EDQUOT", "disc quota exceeded"},
+ {70, "ESTALE", "stale NFS file handle"},
+ {71, "EREMOTE", "too many levels of remote in path"},
+ {72, "EBADRPC", "RPC struct is bad"},
+ {73, "ERPCMISMATCH", "RPC version wrong"},
+ {74, "EPROGUNAVAIL", "RPC prog. not avail"},
+ {75, "EPROGMISMATCH", "program version wrong"},
+ {76, "EPROCUNAVAIL", "bad procedure for program"},
+ {77, "ENOLCK", "no locks available"},
+ {78, "ENOSYS", "function not implemented"},
+ {79, "EFTYPE", "inappropriate file type or format"},
+ {80, "EAUTH", "authentication error"},
+ {81, "ENEEDAUTH", "need authenticator"},
+ {82, "EIDRM", "identifier removed"},
+ {83, "ENOMSG", "no message of desired type"},
+ {84, "EOVERFLOW", "value too large to be stored in data type"},
+ {85, "EILSEQ", "illegal byte sequence"},
+ {86, "ENOTSUP", "not supported"},
+ {87, "ECANCELED", "operation Canceled"},
+ {88, "EBADMSG", "bad or Corrupt message"},
+ {89, "ENODATA", "no message available"},
+ {90, "ENOSR", "no STREAM resources"},
+ {91, "ENOSTR", "not a STREAM"},
+ {92, "ETIME", "STREAM ioctl timeout"},
+ {93, "ENOATTR", "attribute not found"},
+ {94, "EMULTIHOP", "multihop attempted"},
+ {95, "ENOLINK", "link has been severed"},
+ {96, "ELAST", "protocol error"},
+}
+
+// Signal table
+var signalList = [...]struct {
+ num syscall.Signal
+ name string
+ desc string
+}{
+ {1, "SIGHUP", "hangup"},
+ {2, "SIGINT", "interrupt"},
+ {3, "SIGQUIT", "quit"},
+ {4, "SIGILL", "illegal instruction"},
+ {5, "SIGTRAP", "trace/BPT trap"},
+ {6, "SIGIOT", "abort trap"},
+ {7, "SIGEMT", "EMT trap"},
+ {8, "SIGFPE", "floating point exception"},
+ {9, "SIGKILL", "killed"},
+ {10, "SIGBUS", "bus error"},
+ {11, "SIGSEGV", "segmentation fault"},
+ {12, "SIGSYS", "bad system call"},
+ {13, "SIGPIPE", "broken pipe"},
+ {14, "SIGALRM", "alarm clock"},
+ {15, "SIGTERM", "terminated"},
+ {16, "SIGURG", "urgent I/O condition"},
+ {17, "SIGSTOP", "stopped (signal)"},
+ {18, "SIGTSTP", "stopped"},
+ {19, "SIGCONT", "continued"},
+ {20, "SIGCHLD", "child exited"},
+ {21, "SIGTTIN", "stopped (tty input)"},
+ {22, "SIGTTOU", "stopped (tty output)"},
+ {23, "SIGIO", "I/O possible"},
+ {24, "SIGXCPU", "cputime limit exceeded"},
+ {25, "SIGXFSZ", "filesize limit exceeded"},
+ {26, "SIGVTALRM", "virtual timer expired"},
+ {27, "SIGPROF", "profiling timer expired"},
+ {28, "SIGWINCH", "window size changes"},
+ {29, "SIGINFO", "information request"},
+ {30, "SIGUSR1", "user defined signal 1"},
+ {31, "SIGUSR2", "user defined signal 2"},
+ {32, "SIGPWR", "power fail/restart"},
+}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go
new file mode 100644
index 000000000..ec5f92de8
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go
@@ -0,0 +1,1789 @@
+// mkerrors.sh -m64
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build arm64,openbsd
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs -- -m64 _const.go
+
+package unix
+
+import "syscall"
+
+const (
+ AF_APPLETALK = 0x10
+ AF_BLUETOOTH = 0x20
+ AF_CCITT = 0xa
+ AF_CHAOS = 0x5
+ AF_CNT = 0x15
+ AF_COIP = 0x14
+ AF_DATAKIT = 0x9
+ AF_DECnet = 0xc
+ AF_DLI = 0xd
+ AF_E164 = 0x1a
+ AF_ECMA = 0x8
+ AF_ENCAP = 0x1c
+ AF_HYLINK = 0xf
+ AF_IMPLINK = 0x3
+ AF_INET = 0x2
+ AF_INET6 = 0x18
+ AF_IPX = 0x17
+ AF_ISDN = 0x1a
+ AF_ISO = 0x7
+ AF_KEY = 0x1e
+ AF_LAT = 0xe
+ AF_LINK = 0x12
+ AF_LOCAL = 0x1
+ AF_MAX = 0x24
+ AF_MPLS = 0x21
+ AF_NATM = 0x1b
+ AF_NS = 0x6
+ AF_OSI = 0x7
+ AF_PUP = 0x4
+ AF_ROUTE = 0x11
+ AF_SIP = 0x1d
+ AF_SNA = 0xb
+ AF_UNIX = 0x1
+ AF_UNSPEC = 0x0
+ ALTWERASE = 0x200
+ ARPHRD_ETHER = 0x1
+ ARPHRD_FRELAY = 0xf
+ ARPHRD_IEEE1394 = 0x18
+ ARPHRD_IEEE802 = 0x6
+ B0 = 0x0
+ B110 = 0x6e
+ B115200 = 0x1c200
+ B1200 = 0x4b0
+ B134 = 0x86
+ B14400 = 0x3840
+ B150 = 0x96
+ B1800 = 0x708
+ B19200 = 0x4b00
+ B200 = 0xc8
+ B230400 = 0x38400
+ B2400 = 0x960
+ B28800 = 0x7080
+ B300 = 0x12c
+ B38400 = 0x9600
+ B4800 = 0x12c0
+ B50 = 0x32
+ B57600 = 0xe100
+ B600 = 0x258
+ B7200 = 0x1c20
+ B75 = 0x4b
+ B76800 = 0x12c00
+ B9600 = 0x2580
+ BIOCFLUSH = 0x20004268
+ BIOCGBLEN = 0x40044266
+ BIOCGDIRFILT = 0x4004427c
+ BIOCGDLT = 0x4004426a
+ BIOCGDLTLIST = 0xc010427b
+ BIOCGETIF = 0x4020426b
+ BIOCGFILDROP = 0x40044278
+ BIOCGHDRCMPLT = 0x40044274
+ BIOCGRSIG = 0x40044273
+ BIOCGRTIMEOUT = 0x4010426e
+ BIOCGSTATS = 0x4008426f
+ BIOCIMMEDIATE = 0x80044270
+ BIOCLOCK = 0x20004276
+ BIOCPROMISC = 0x20004269
+ BIOCSBLEN = 0xc0044266
+ BIOCSDIRFILT = 0x8004427d
+ BIOCSDLT = 0x8004427a
+ BIOCSETF = 0x80104267
+ BIOCSETIF = 0x8020426c
+ BIOCSETWF = 0x80104277
+ BIOCSFILDROP = 0x80044279
+ BIOCSHDRCMPLT = 0x80044275
+ BIOCSRSIG = 0x80044272
+ BIOCSRTIMEOUT = 0x8010426d
+ BIOCVERSION = 0x40044271
+ BPF_A = 0x10
+ BPF_ABS = 0x20
+ BPF_ADD = 0x0
+ BPF_ALIGNMENT = 0x4
+ BPF_ALU = 0x4
+ BPF_AND = 0x50
+ BPF_B = 0x10
+ BPF_DIRECTION_IN = 0x1
+ BPF_DIRECTION_OUT = 0x2
+ BPF_DIV = 0x30
+ BPF_FILDROP_CAPTURE = 0x1
+ BPF_FILDROP_DROP = 0x2
+ BPF_FILDROP_PASS = 0x0
+ BPF_H = 0x8
+ BPF_IMM = 0x0
+ BPF_IND = 0x40
+ BPF_JA = 0x0
+ BPF_JEQ = 0x10
+ BPF_JGE = 0x30
+ BPF_JGT = 0x20
+ BPF_JMP = 0x5
+ BPF_JSET = 0x40
+ BPF_K = 0x0
+ BPF_LD = 0x0
+ BPF_LDX = 0x1
+ BPF_LEN = 0x80
+ BPF_LSH = 0x60
+ BPF_MAJOR_VERSION = 0x1
+ BPF_MAXBUFSIZE = 0x200000
+ BPF_MAXINSNS = 0x200
+ BPF_MEM = 0x60
+ BPF_MEMWORDS = 0x10
+ BPF_MINBUFSIZE = 0x20
+ BPF_MINOR_VERSION = 0x1
+ BPF_MISC = 0x7
+ BPF_MSH = 0xa0
+ BPF_MUL = 0x20
+ BPF_NEG = 0x80
+ BPF_OR = 0x40
+ BPF_RELEASE = 0x30bb6
+ BPF_RET = 0x6
+ BPF_RSH = 0x70
+ BPF_ST = 0x2
+ BPF_STX = 0x3
+ BPF_SUB = 0x10
+ BPF_TAX = 0x0
+ BPF_TXA = 0x80
+ BPF_W = 0x0
+ BPF_X = 0x8
+ BRKINT = 0x2
+ CFLUSH = 0xf
+ CLOCAL = 0x8000
+ CLOCK_BOOTTIME = 0x6
+ CLOCK_MONOTONIC = 0x3
+ CLOCK_PROCESS_CPUTIME_ID = 0x2
+ CLOCK_REALTIME = 0x0
+ CLOCK_THREAD_CPUTIME_ID = 0x4
+ CLOCK_UPTIME = 0x5
+ CREAD = 0x800
+ CRTSCTS = 0x10000
+ CS5 = 0x0
+ CS6 = 0x100
+ CS7 = 0x200
+ CS8 = 0x300
+ CSIZE = 0x300
+ CSTART = 0x11
+ CSTATUS = 0xff
+ CSTOP = 0x13
+ CSTOPB = 0x400
+ CSUSP = 0x1a
+ CTL_HW = 0x6
+ CTL_KERN = 0x1
+ CTL_MAXNAME = 0xc
+ CTL_NET = 0x4
+ DIOCOSFPFLUSH = 0x2000444e
+ DLT_ARCNET = 0x7
+ DLT_ATM_RFC1483 = 0xb
+ DLT_AX25 = 0x3
+ DLT_CHAOS = 0x5
+ DLT_C_HDLC = 0x68
+ DLT_EN10MB = 0x1
+ DLT_EN3MB = 0x2
+ DLT_ENC = 0xd
+ DLT_FDDI = 0xa
+ DLT_IEEE802 = 0x6
+ DLT_IEEE802_11 = 0x69
+ DLT_IEEE802_11_RADIO = 0x7f
+ DLT_LOOP = 0xc
+ DLT_MPLS = 0xdb
+ DLT_NULL = 0x0
+ DLT_OPENFLOW = 0x10b
+ DLT_PFLOG = 0x75
+ DLT_PFSYNC = 0x12
+ DLT_PPP = 0x9
+ DLT_PPP_BSDOS = 0x10
+ DLT_PPP_ETHER = 0x33
+ DLT_PPP_SERIAL = 0x32
+ DLT_PRONET = 0x4
+ DLT_RAW = 0xe
+ DLT_SLIP = 0x8
+ DLT_SLIP_BSDOS = 0xf
+ DLT_USBPCAP = 0xf9
+ DLT_USER0 = 0x93
+ DLT_USER1 = 0x94
+ DLT_USER10 = 0x9d
+ DLT_USER11 = 0x9e
+ DLT_USER12 = 0x9f
+ DLT_USER13 = 0xa0
+ DLT_USER14 = 0xa1
+ DLT_USER15 = 0xa2
+ DLT_USER2 = 0x95
+ DLT_USER3 = 0x96
+ DLT_USER4 = 0x97
+ DLT_USER5 = 0x98
+ DLT_USER6 = 0x99
+ DLT_USER7 = 0x9a
+ DLT_USER8 = 0x9b
+ DLT_USER9 = 0x9c
+ DT_BLK = 0x6
+ DT_CHR = 0x2
+ DT_DIR = 0x4
+ DT_FIFO = 0x1
+ DT_LNK = 0xa
+ DT_REG = 0x8
+ DT_SOCK = 0xc
+ DT_UNKNOWN = 0x0
+ ECHO = 0x8
+ ECHOCTL = 0x40
+ ECHOE = 0x2
+ ECHOK = 0x4
+ ECHOKE = 0x1
+ ECHONL = 0x10
+ ECHOPRT = 0x20
+ EMT_TAGOVF = 0x1
+ EMUL_ENABLED = 0x1
+ EMUL_NATIVE = 0x2
+ ENDRUNDISC = 0x9
+ ETHERMIN = 0x2e
+ ETHERMTU = 0x5dc
+ ETHERTYPE_8023 = 0x4
+ ETHERTYPE_AARP = 0x80f3
+ ETHERTYPE_ACCTON = 0x8390
+ ETHERTYPE_AEONIC = 0x8036
+ ETHERTYPE_ALPHA = 0x814a
+ ETHERTYPE_AMBER = 0x6008
+ ETHERTYPE_AMOEBA = 0x8145
+ ETHERTYPE_AOE = 0x88a2
+ ETHERTYPE_APOLLO = 0x80f7
+ ETHERTYPE_APOLLODOMAIN = 0x8019
+ ETHERTYPE_APPLETALK = 0x809b
+ ETHERTYPE_APPLITEK = 0x80c7
+ ETHERTYPE_ARGONAUT = 0x803a
+ ETHERTYPE_ARP = 0x806
+ ETHERTYPE_AT = 0x809b
+ ETHERTYPE_ATALK = 0x809b
+ ETHERTYPE_ATOMIC = 0x86df
+ ETHERTYPE_ATT = 0x8069
+ ETHERTYPE_ATTSTANFORD = 0x8008
+ ETHERTYPE_AUTOPHON = 0x806a
+ ETHERTYPE_AXIS = 0x8856
+ ETHERTYPE_BCLOOP = 0x9003
+ ETHERTYPE_BOFL = 0x8102
+ ETHERTYPE_CABLETRON = 0x7034
+ ETHERTYPE_CHAOS = 0x804
+ ETHERTYPE_COMDESIGN = 0x806c
+ ETHERTYPE_COMPUGRAPHIC = 0x806d
+ ETHERTYPE_COUNTERPOINT = 0x8062
+ ETHERTYPE_CRONUS = 0x8004
+ ETHERTYPE_CRONUSVLN = 0x8003
+ ETHERTYPE_DCA = 0x1234
+ ETHERTYPE_DDE = 0x807b
+ ETHERTYPE_DEBNI = 0xaaaa
+ ETHERTYPE_DECAM = 0x8048
+ ETHERTYPE_DECCUST = 0x6006
+ ETHERTYPE_DECDIAG = 0x6005
+ ETHERTYPE_DECDNS = 0x803c
+ ETHERTYPE_DECDTS = 0x803e
+ ETHERTYPE_DECEXPER = 0x6000
+ ETHERTYPE_DECLAST = 0x8041
+ ETHERTYPE_DECLTM = 0x803f
+ ETHERTYPE_DECMUMPS = 0x6009
+ ETHERTYPE_DECNETBIOS = 0x8040
+ ETHERTYPE_DELTACON = 0x86de
+ ETHERTYPE_DIDDLE = 0x4321
+ ETHERTYPE_DLOG1 = 0x660
+ ETHERTYPE_DLOG2 = 0x661
+ ETHERTYPE_DN = 0x6003
+ ETHERTYPE_DOGFIGHT = 0x1989
+ ETHERTYPE_DSMD = 0x8039
+ ETHERTYPE_ECMA = 0x803
+ ETHERTYPE_ENCRYPT = 0x803d
+ ETHERTYPE_ES = 0x805d
+ ETHERTYPE_EXCELAN = 0x8010
+ ETHERTYPE_EXPERDATA = 0x8049
+ ETHERTYPE_FLIP = 0x8146
+ ETHERTYPE_FLOWCONTROL = 0x8808
+ ETHERTYPE_FRARP = 0x808
+ ETHERTYPE_GENDYN = 0x8068
+ ETHERTYPE_HAYES = 0x8130
+ ETHERTYPE_HIPPI_FP = 0x8180
+ ETHERTYPE_HITACHI = 0x8820
+ ETHERTYPE_HP = 0x8005
+ ETHERTYPE_IEEEPUP = 0xa00
+ ETHERTYPE_IEEEPUPAT = 0xa01
+ ETHERTYPE_IMLBL = 0x4c42
+ ETHERTYPE_IMLBLDIAG = 0x424c
+ ETHERTYPE_IP = 0x800
+ ETHERTYPE_IPAS = 0x876c
+ ETHERTYPE_IPV6 = 0x86dd
+ ETHERTYPE_IPX = 0x8137
+ ETHERTYPE_IPXNEW = 0x8037
+ ETHERTYPE_KALPANA = 0x8582
+ ETHERTYPE_LANBRIDGE = 0x8038
+ ETHERTYPE_LANPROBE = 0x8888
+ ETHERTYPE_LAT = 0x6004
+ ETHERTYPE_LBACK = 0x9000
+ ETHERTYPE_LITTLE = 0x8060
+ ETHERTYPE_LLDP = 0x88cc
+ ETHERTYPE_LOGICRAFT = 0x8148
+ ETHERTYPE_LOOPBACK = 0x9000
+ ETHERTYPE_MATRA = 0x807a
+ ETHERTYPE_MAX = 0xffff
+ ETHERTYPE_MERIT = 0x807c
+ ETHERTYPE_MICP = 0x873a
+ ETHERTYPE_MOPDL = 0x6001
+ ETHERTYPE_MOPRC = 0x6002
+ ETHERTYPE_MOTOROLA = 0x818d
+ ETHERTYPE_MPLS = 0x8847
+ ETHERTYPE_MPLS_MCAST = 0x8848
+ ETHERTYPE_MUMPS = 0x813f
+ ETHERTYPE_NBPCC = 0x3c04
+ ETHERTYPE_NBPCLAIM = 0x3c09
+ ETHERTYPE_NBPCLREQ = 0x3c05
+ ETHERTYPE_NBPCLRSP = 0x3c06
+ ETHERTYPE_NBPCREQ = 0x3c02
+ ETHERTYPE_NBPCRSP = 0x3c03
+ ETHERTYPE_NBPDG = 0x3c07
+ ETHERTYPE_NBPDGB = 0x3c08
+ ETHERTYPE_NBPDLTE = 0x3c0a
+ ETHERTYPE_NBPRAR = 0x3c0c
+ ETHERTYPE_NBPRAS = 0x3c0b
+ ETHERTYPE_NBPRST = 0x3c0d
+ ETHERTYPE_NBPSCD = 0x3c01
+ ETHERTYPE_NBPVCD = 0x3c00
+ ETHERTYPE_NBS = 0x802
+ ETHERTYPE_NCD = 0x8149
+ ETHERTYPE_NESTAR = 0x8006
+ ETHERTYPE_NETBEUI = 0x8191
+ ETHERTYPE_NOVELL = 0x8138
+ ETHERTYPE_NS = 0x600
+ ETHERTYPE_NSAT = 0x601
+ ETHERTYPE_NSCOMPAT = 0x807
+ ETHERTYPE_NTRAILER = 0x10
+ ETHERTYPE_OS9 = 0x7007
+ ETHERTYPE_OS9NET = 0x7009
+ ETHERTYPE_PACER = 0x80c6
+ ETHERTYPE_PAE = 0x888e
+ ETHERTYPE_PBB = 0x88e7
+ ETHERTYPE_PCS = 0x4242
+ ETHERTYPE_PLANNING = 0x8044
+ ETHERTYPE_PPP = 0x880b
+ ETHERTYPE_PPPOE = 0x8864
+ ETHERTYPE_PPPOEDISC = 0x8863
+ ETHERTYPE_PRIMENTS = 0x7031
+ ETHERTYPE_PUP = 0x200
+ ETHERTYPE_PUPAT = 0x200
+ ETHERTYPE_QINQ = 0x88a8
+ ETHERTYPE_RACAL = 0x7030
+ ETHERTYPE_RATIONAL = 0x8150
+ ETHERTYPE_RAWFR = 0x6559
+ ETHERTYPE_RCL = 0x1995
+ ETHERTYPE_RDP = 0x8739
+ ETHERTYPE_RETIX = 0x80f2
+ ETHERTYPE_REVARP = 0x8035
+ ETHERTYPE_SCA = 0x6007
+ ETHERTYPE_SECTRA = 0x86db
+ ETHERTYPE_SECUREDATA = 0x876d
+ ETHERTYPE_SGITW = 0x817e
+ ETHERTYPE_SG_BOUNCE = 0x8016
+ ETHERTYPE_SG_DIAG = 0x8013
+ ETHERTYPE_SG_NETGAMES = 0x8014
+ ETHERTYPE_SG_RESV = 0x8015
+ ETHERTYPE_SIMNET = 0x5208
+ ETHERTYPE_SLOW = 0x8809
+ ETHERTYPE_SNA = 0x80d5
+ ETHERTYPE_SNMP = 0x814c
+ ETHERTYPE_SONIX = 0xfaf5
+ ETHERTYPE_SPIDER = 0x809f
+ ETHERTYPE_SPRITE = 0x500
+ ETHERTYPE_STP = 0x8181
+ ETHERTYPE_TALARIS = 0x812b
+ ETHERTYPE_TALARISMC = 0x852b
+ ETHERTYPE_TCPCOMP = 0x876b
+ ETHERTYPE_TCPSM = 0x9002
+ ETHERTYPE_TEC = 0x814f
+ ETHERTYPE_TIGAN = 0x802f
+ ETHERTYPE_TRAIL = 0x1000
+ ETHERTYPE_TRANSETHER = 0x6558
+ ETHERTYPE_TYMSHARE = 0x802e
+ ETHERTYPE_UBBST = 0x7005
+ ETHERTYPE_UBDEBUG = 0x900
+ ETHERTYPE_UBDIAGLOOP = 0x7002
+ ETHERTYPE_UBDL = 0x7000
+ ETHERTYPE_UBNIU = 0x7001
+ ETHERTYPE_UBNMC = 0x7003
+ ETHERTYPE_VALID = 0x1600
+ ETHERTYPE_VARIAN = 0x80dd
+ ETHERTYPE_VAXELN = 0x803b
+ ETHERTYPE_VEECO = 0x8067
+ ETHERTYPE_VEXP = 0x805b
+ ETHERTYPE_VGLAB = 0x8131
+ ETHERTYPE_VINES = 0xbad
+ ETHERTYPE_VINESECHO = 0xbaf
+ ETHERTYPE_VINESLOOP = 0xbae
+ ETHERTYPE_VITAL = 0xff00
+ ETHERTYPE_VLAN = 0x8100
+ ETHERTYPE_VLTLMAN = 0x8080
+ ETHERTYPE_VPROD = 0x805c
+ ETHERTYPE_VURESERVED = 0x8147
+ ETHERTYPE_WATERLOO = 0x8130
+ ETHERTYPE_WELLFLEET = 0x8103
+ ETHERTYPE_X25 = 0x805
+ ETHERTYPE_X75 = 0x801
+ ETHERTYPE_XNSSM = 0x9001
+ ETHERTYPE_XTP = 0x817d
+ ETHER_ADDR_LEN = 0x6
+ ETHER_ALIGN = 0x2
+ ETHER_CRC_LEN = 0x4
+ ETHER_CRC_POLY_BE = 0x4c11db6
+ ETHER_CRC_POLY_LE = 0xedb88320
+ ETHER_HDR_LEN = 0xe
+ ETHER_MAX_DIX_LEN = 0x600
+ ETHER_MAX_HARDMTU_LEN = 0xff9b
+ ETHER_MAX_LEN = 0x5ee
+ ETHER_MIN_LEN = 0x40
+ ETHER_TYPE_LEN = 0x2
+ ETHER_VLAN_ENCAP_LEN = 0x4
+ EVFILT_AIO = -0x3
+ EVFILT_DEVICE = -0x8
+ EVFILT_PROC = -0x5
+ EVFILT_READ = -0x1
+ EVFILT_SIGNAL = -0x6
+ EVFILT_SYSCOUNT = 0x8
+ EVFILT_TIMER = -0x7
+ EVFILT_VNODE = -0x4
+ EVFILT_WRITE = -0x2
+ EVL_ENCAPLEN = 0x4
+ EVL_PRIO_BITS = 0xd
+ EVL_PRIO_MAX = 0x7
+ EVL_VLID_MASK = 0xfff
+ EVL_VLID_MAX = 0xffe
+ EVL_VLID_MIN = 0x1
+ EVL_VLID_NULL = 0x0
+ EV_ADD = 0x1
+ EV_CLEAR = 0x20
+ EV_DELETE = 0x2
+ EV_DISABLE = 0x8
+ EV_DISPATCH = 0x80
+ EV_ENABLE = 0x4
+ EV_EOF = 0x8000
+ EV_ERROR = 0x4000
+ EV_FLAG1 = 0x2000
+ EV_ONESHOT = 0x10
+ EV_RECEIPT = 0x40
+ EV_SYSFLAGS = 0xf000
+ EXTA = 0x4b00
+ EXTB = 0x9600
+ EXTPROC = 0x800
+ FD_CLOEXEC = 0x1
+ FD_SETSIZE = 0x400
+ FLUSHO = 0x800000
+ F_DUPFD = 0x0
+ F_DUPFD_CLOEXEC = 0xa
+ F_GETFD = 0x1
+ F_GETFL = 0x3
+ F_GETLK = 0x7
+ F_GETOWN = 0x5
+ F_ISATTY = 0xb
+ F_OK = 0x0
+ F_RDLCK = 0x1
+ F_SETFD = 0x2
+ F_SETFL = 0x4
+ F_SETLK = 0x8
+ F_SETLKW = 0x9
+ F_SETOWN = 0x6
+ F_UNLCK = 0x2
+ F_WRLCK = 0x3
+ HUPCL = 0x4000
+ HW_MACHINE = 0x1
+ ICANON = 0x100
+ ICMP6_FILTER = 0x12
+ ICRNL = 0x100
+ IEXTEN = 0x400
+ IFAN_ARRIVAL = 0x0
+ IFAN_DEPARTURE = 0x1
+ IFF_ALLMULTI = 0x200
+ IFF_BROADCAST = 0x2
+ IFF_CANTCHANGE = 0x8e52
+ IFF_DEBUG = 0x4
+ IFF_LINK0 = 0x1000
+ IFF_LINK1 = 0x2000
+ IFF_LINK2 = 0x4000
+ IFF_LOOPBACK = 0x8
+ IFF_MULTICAST = 0x8000
+ IFF_NOARP = 0x80
+ IFF_OACTIVE = 0x400
+ IFF_POINTOPOINT = 0x10
+ IFF_PROMISC = 0x100
+ IFF_RUNNING = 0x40
+ IFF_SIMPLEX = 0x800
+ IFF_STATICARP = 0x20
+ IFF_UP = 0x1
+ IFNAMSIZ = 0x10
+ IFT_1822 = 0x2
+ IFT_A12MPPSWITCH = 0x82
+ IFT_AAL2 = 0xbb
+ IFT_AAL5 = 0x31
+ IFT_ADSL = 0x5e
+ IFT_AFLANE8023 = 0x3b
+ IFT_AFLANE8025 = 0x3c
+ IFT_ARAP = 0x58
+ IFT_ARCNET = 0x23
+ IFT_ARCNETPLUS = 0x24
+ IFT_ASYNC = 0x54
+ IFT_ATM = 0x25
+ IFT_ATMDXI = 0x69
+ IFT_ATMFUNI = 0x6a
+ IFT_ATMIMA = 0x6b
+ IFT_ATMLOGICAL = 0x50
+ IFT_ATMRADIO = 0xbd
+ IFT_ATMSUBINTERFACE = 0x86
+ IFT_ATMVCIENDPT = 0xc2
+ IFT_ATMVIRTUAL = 0x95
+ IFT_BGPPOLICYACCOUNTING = 0xa2
+ IFT_BLUETOOTH = 0xf8
+ IFT_BRIDGE = 0xd1
+ IFT_BSC = 0x53
+ IFT_CARP = 0xf7
+ IFT_CCTEMUL = 0x3d
+ IFT_CEPT = 0x13
+ IFT_CES = 0x85
+ IFT_CHANNEL = 0x46
+ IFT_CNR = 0x55
+ IFT_COFFEE = 0x84
+ IFT_COMPOSITELINK = 0x9b
+ IFT_DCN = 0x8d
+ IFT_DIGITALPOWERLINE = 0x8a
+ IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
+ IFT_DLSW = 0x4a
+ IFT_DOCSCABLEDOWNSTREAM = 0x80
+ IFT_DOCSCABLEMACLAYER = 0x7f
+ IFT_DOCSCABLEUPSTREAM = 0x81
+ IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd
+ IFT_DS0 = 0x51
+ IFT_DS0BUNDLE = 0x52
+ IFT_DS1FDL = 0xaa
+ IFT_DS3 = 0x1e
+ IFT_DTM = 0x8c
+ IFT_DUMMY = 0xf1
+ IFT_DVBASILN = 0xac
+ IFT_DVBASIOUT = 0xad
+ IFT_DVBRCCDOWNSTREAM = 0x93
+ IFT_DVBRCCMACLAYER = 0x92
+ IFT_DVBRCCUPSTREAM = 0x94
+ IFT_ECONET = 0xce
+ IFT_ENC = 0xf4
+ IFT_EON = 0x19
+ IFT_EPLRS = 0x57
+ IFT_ESCON = 0x49
+ IFT_ETHER = 0x6
+ IFT_FAITH = 0xf3
+ IFT_FAST = 0x7d
+ IFT_FASTETHER = 0x3e
+ IFT_FASTETHERFX = 0x45
+ IFT_FDDI = 0xf
+ IFT_FIBRECHANNEL = 0x38
+ IFT_FRAMERELAYINTERCONNECT = 0x3a
+ IFT_FRAMERELAYMPI = 0x5c
+ IFT_FRDLCIENDPT = 0xc1
+ IFT_FRELAY = 0x20
+ IFT_FRELAYDCE = 0x2c
+ IFT_FRF16MFRBUNDLE = 0xa3
+ IFT_FRFORWARD = 0x9e
+ IFT_G703AT2MB = 0x43
+ IFT_G703AT64K = 0x42
+ IFT_GIF = 0xf0
+ IFT_GIGABITETHERNET = 0x75
+ IFT_GR303IDT = 0xb2
+ IFT_GR303RDT = 0xb1
+ IFT_H323GATEKEEPER = 0xa4
+ IFT_H323PROXY = 0xa5
+ IFT_HDH1822 = 0x3
+ IFT_HDLC = 0x76
+ IFT_HDSL2 = 0xa8
+ IFT_HIPERLAN2 = 0xb7
+ IFT_HIPPI = 0x2f
+ IFT_HIPPIINTERFACE = 0x39
+ IFT_HOSTPAD = 0x5a
+ IFT_HSSI = 0x2e
+ IFT_HY = 0xe
+ IFT_IBM370PARCHAN = 0x48
+ IFT_IDSL = 0x9a
+ IFT_IEEE1394 = 0x90
+ IFT_IEEE80211 = 0x47
+ IFT_IEEE80212 = 0x37
+ IFT_IEEE8023ADLAG = 0xa1
+ IFT_IFGSN = 0x91
+ IFT_IMT = 0xbe
+ IFT_INFINIBAND = 0xc7
+ IFT_INTERLEAVE = 0x7c
+ IFT_IP = 0x7e
+ IFT_IPFORWARD = 0x8e
+ IFT_IPOVERATM = 0x72
+ IFT_IPOVERCDLC = 0x6d
+ IFT_IPOVERCLAW = 0x6e
+ IFT_IPSWITCH = 0x4e
+ IFT_ISDN = 0x3f
+ IFT_ISDNBASIC = 0x14
+ IFT_ISDNPRIMARY = 0x15
+ IFT_ISDNS = 0x4b
+ IFT_ISDNU = 0x4c
+ IFT_ISO88022LLC = 0x29
+ IFT_ISO88023 = 0x7
+ IFT_ISO88024 = 0x8
+ IFT_ISO88025 = 0x9
+ IFT_ISO88025CRFPINT = 0x62
+ IFT_ISO88025DTR = 0x56
+ IFT_ISO88025FIBER = 0x73
+ IFT_ISO88026 = 0xa
+ IFT_ISUP = 0xb3
+ IFT_L2VLAN = 0x87
+ IFT_L3IPVLAN = 0x88
+ IFT_L3IPXVLAN = 0x89
+ IFT_LAPB = 0x10
+ IFT_LAPD = 0x4d
+ IFT_LAPF = 0x77
+ IFT_LINEGROUP = 0xd2
+ IFT_LOCALTALK = 0x2a
+ IFT_LOOP = 0x18
+ IFT_MBIM = 0xfa
+ IFT_MEDIAMAILOVERIP = 0x8b
+ IFT_MFSIGLINK = 0xa7
+ IFT_MIOX25 = 0x26
+ IFT_MODEM = 0x30
+ IFT_MPC = 0x71
+ IFT_MPLS = 0xa6
+ IFT_MPLSTUNNEL = 0x96
+ IFT_MSDSL = 0x8f
+ IFT_MVL = 0xbf
+ IFT_MYRINET = 0x63
+ IFT_NFAS = 0xaf
+ IFT_NSIP = 0x1b
+ IFT_OPTICALCHANNEL = 0xc3
+ IFT_OPTICALTRANSPORT = 0xc4
+ IFT_OTHER = 0x1
+ IFT_P10 = 0xc
+ IFT_P80 = 0xd
+ IFT_PARA = 0x22
+ IFT_PFLOG = 0xf5
+ IFT_PFLOW = 0xf9
+ IFT_PFSYNC = 0xf6
+ IFT_PLC = 0xae
+ IFT_PON155 = 0xcf
+ IFT_PON622 = 0xd0
+ IFT_POS = 0xab
+ IFT_PPP = 0x17
+ IFT_PPPMULTILINKBUNDLE = 0x6c
+ IFT_PROPATM = 0xc5
+ IFT_PROPBWAP2MP = 0xb8
+ IFT_PROPCNLS = 0x59
+ IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
+ IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
+ IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
+ IFT_PROPMUX = 0x36
+ IFT_PROPVIRTUAL = 0x35
+ IFT_PROPWIRELESSP2P = 0x9d
+ IFT_PTPSERIAL = 0x16
+ IFT_PVC = 0xf2
+ IFT_Q2931 = 0xc9
+ IFT_QLLC = 0x44
+ IFT_RADIOMAC = 0xbc
+ IFT_RADSL = 0x5f
+ IFT_REACHDSL = 0xc0
+ IFT_RFC1483 = 0x9f
+ IFT_RS232 = 0x21
+ IFT_RSRB = 0x4f
+ IFT_SDLC = 0x11
+ IFT_SDSL = 0x60
+ IFT_SHDSL = 0xa9
+ IFT_SIP = 0x1f
+ IFT_SIPSIG = 0xcc
+ IFT_SIPTG = 0xcb
+ IFT_SLIP = 0x1c
+ IFT_SMDSDXI = 0x2b
+ IFT_SMDSICIP = 0x34
+ IFT_SONET = 0x27
+ IFT_SONETOVERHEADCHANNEL = 0xb9
+ IFT_SONETPATH = 0x32
+ IFT_SONETVT = 0x33
+ IFT_SRP = 0x97
+ IFT_SS7SIGLINK = 0x9c
+ IFT_STACKTOSTACK = 0x6f
+ IFT_STARLAN = 0xb
+ IFT_T1 = 0x12
+ IFT_TDLC = 0x74
+ IFT_TELINK = 0xc8
+ IFT_TERMPAD = 0x5b
+ IFT_TR008 = 0xb0
+ IFT_TRANSPHDLC = 0x7b
+ IFT_TUNNEL = 0x83
+ IFT_ULTRA = 0x1d
+ IFT_USB = 0xa0
+ IFT_V11 = 0x40
+ IFT_V35 = 0x2d
+ IFT_V36 = 0x41
+ IFT_V37 = 0x78
+ IFT_VDSL = 0x61
+ IFT_VIRTUALIPADDRESS = 0x70
+ IFT_VIRTUALTG = 0xca
+ IFT_VOICEDID = 0xd5
+ IFT_VOICEEM = 0x64
+ IFT_VOICEEMFGD = 0xd3
+ IFT_VOICEENCAP = 0x67
+ IFT_VOICEFGDEANA = 0xd4
+ IFT_VOICEFXO = 0x65
+ IFT_VOICEFXS = 0x66
+ IFT_VOICEOVERATM = 0x98
+ IFT_VOICEOVERCABLE = 0xc6
+ IFT_VOICEOVERFRAMERELAY = 0x99
+ IFT_VOICEOVERIP = 0x68
+ IFT_X213 = 0x5d
+ IFT_X25 = 0x5
+ IFT_X25DDN = 0x4
+ IFT_X25HUNTGROUP = 0x7a
+ IFT_X25MLP = 0x79
+ IFT_X25PLE = 0x28
+ IFT_XETHER = 0x1a
+ IGNBRK = 0x1
+ IGNCR = 0x80
+ IGNPAR = 0x4
+ IMAXBEL = 0x2000
+ INLCR = 0x40
+ INPCK = 0x10
+ IN_CLASSA_HOST = 0xffffff
+ IN_CLASSA_MAX = 0x80
+ IN_CLASSA_NET = 0xff000000
+ IN_CLASSA_NSHIFT = 0x18
+ IN_CLASSB_HOST = 0xffff
+ IN_CLASSB_MAX = 0x10000
+ IN_CLASSB_NET = 0xffff0000
+ IN_CLASSB_NSHIFT = 0x10
+ IN_CLASSC_HOST = 0xff
+ IN_CLASSC_NET = 0xffffff00
+ IN_CLASSC_NSHIFT = 0x8
+ IN_CLASSD_HOST = 0xfffffff
+ IN_CLASSD_NET = 0xf0000000
+ IN_CLASSD_NSHIFT = 0x1c
+ IN_LOOPBACKNET = 0x7f
+ IN_RFC3021_HOST = 0x1
+ IN_RFC3021_NET = 0xfffffffe
+ IN_RFC3021_NSHIFT = 0x1f
+ IPPROTO_AH = 0x33
+ IPPROTO_CARP = 0x70
+ IPPROTO_DIVERT = 0x102
+ IPPROTO_DONE = 0x101
+ IPPROTO_DSTOPTS = 0x3c
+ IPPROTO_EGP = 0x8
+ IPPROTO_ENCAP = 0x62
+ IPPROTO_EON = 0x50
+ IPPROTO_ESP = 0x32
+ IPPROTO_ETHERIP = 0x61
+ IPPROTO_FRAGMENT = 0x2c
+ IPPROTO_GGP = 0x3
+ IPPROTO_GRE = 0x2f
+ IPPROTO_HOPOPTS = 0x0
+ IPPROTO_ICMP = 0x1
+ IPPROTO_ICMPV6 = 0x3a
+ IPPROTO_IDP = 0x16
+ IPPROTO_IGMP = 0x2
+ IPPROTO_IP = 0x0
+ IPPROTO_IPCOMP = 0x6c
+ IPPROTO_IPIP = 0x4
+ IPPROTO_IPV4 = 0x4
+ IPPROTO_IPV6 = 0x29
+ IPPROTO_MAX = 0x100
+ IPPROTO_MAXID = 0x103
+ IPPROTO_MOBILE = 0x37
+ IPPROTO_MPLS = 0x89
+ IPPROTO_NONE = 0x3b
+ IPPROTO_PFSYNC = 0xf0
+ IPPROTO_PIM = 0x67
+ IPPROTO_PUP = 0xc
+ IPPROTO_RAW = 0xff
+ IPPROTO_ROUTING = 0x2b
+ IPPROTO_RSVP = 0x2e
+ IPPROTO_TCP = 0x6
+ IPPROTO_TP = 0x1d
+ IPPROTO_UDP = 0x11
+ IPV6_AUTH_LEVEL = 0x35
+ IPV6_AUTOFLOWLABEL = 0x3b
+ IPV6_CHECKSUM = 0x1a
+ IPV6_DEFAULT_MULTICAST_HOPS = 0x1
+ IPV6_DEFAULT_MULTICAST_LOOP = 0x1
+ IPV6_DEFHLIM = 0x40
+ IPV6_DONTFRAG = 0x3e
+ IPV6_DSTOPTS = 0x32
+ IPV6_ESP_NETWORK_LEVEL = 0x37
+ IPV6_ESP_TRANS_LEVEL = 0x36
+ IPV6_FAITH = 0x1d
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
+ IPV6_FRAGTTL = 0x78
+ IPV6_HLIMDEC = 0x1
+ IPV6_HOPLIMIT = 0x2f
+ IPV6_HOPOPTS = 0x31
+ IPV6_IPCOMP_LEVEL = 0x3c
+ IPV6_JOIN_GROUP = 0xc
+ IPV6_LEAVE_GROUP = 0xd
+ IPV6_MAXHLIM = 0xff
+ IPV6_MAXPACKET = 0xffff
+ IPV6_MINHOPCOUNT = 0x41
+ IPV6_MMTU = 0x500
+ IPV6_MULTICAST_HOPS = 0xa
+ IPV6_MULTICAST_IF = 0x9
+ IPV6_MULTICAST_LOOP = 0xb
+ IPV6_NEXTHOP = 0x30
+ IPV6_OPTIONS = 0x1
+ IPV6_PATHMTU = 0x2c
+ IPV6_PIPEX = 0x3f
+ IPV6_PKTINFO = 0x2e
+ IPV6_PORTRANGE = 0xe
+ IPV6_PORTRANGE_DEFAULT = 0x0
+ IPV6_PORTRANGE_HIGH = 0x1
+ IPV6_PORTRANGE_LOW = 0x2
+ IPV6_RECVDSTOPTS = 0x28
+ IPV6_RECVDSTPORT = 0x40
+ IPV6_RECVHOPLIMIT = 0x25
+ IPV6_RECVHOPOPTS = 0x27
+ IPV6_RECVPATHMTU = 0x2b
+ IPV6_RECVPKTINFO = 0x24
+ IPV6_RECVRTHDR = 0x26
+ IPV6_RECVTCLASS = 0x39
+ IPV6_RTABLE = 0x1021
+ IPV6_RTHDR = 0x33
+ IPV6_RTHDRDSTOPTS = 0x23
+ IPV6_RTHDR_LOOSE = 0x0
+ IPV6_RTHDR_STRICT = 0x1
+ IPV6_RTHDR_TYPE_0 = 0x0
+ IPV6_SOCKOPT_RESERVED1 = 0x3
+ IPV6_TCLASS = 0x3d
+ IPV6_UNICAST_HOPS = 0x4
+ IPV6_USE_MIN_MTU = 0x2a
+ IPV6_V6ONLY = 0x1b
+ IPV6_VERSION = 0x60
+ IPV6_VERSION_MASK = 0xf0
+ IP_ADD_MEMBERSHIP = 0xc
+ IP_AUTH_LEVEL = 0x14
+ IP_DEFAULT_MULTICAST_LOOP = 0x1
+ IP_DEFAULT_MULTICAST_TTL = 0x1
+ IP_DF = 0x4000
+ IP_DROP_MEMBERSHIP = 0xd
+ IP_ESP_NETWORK_LEVEL = 0x16
+ IP_ESP_TRANS_LEVEL = 0x15
+ IP_HDRINCL = 0x2
+ IP_IPCOMP_LEVEL = 0x1d
+ IP_IPDEFTTL = 0x25
+ IP_IPSECFLOWINFO = 0x24
+ IP_IPSEC_LOCAL_AUTH = 0x1b
+ IP_IPSEC_LOCAL_CRED = 0x19
+ IP_IPSEC_LOCAL_ID = 0x17
+ IP_IPSEC_REMOTE_AUTH = 0x1c
+ IP_IPSEC_REMOTE_CRED = 0x1a
+ IP_IPSEC_REMOTE_ID = 0x18
+ IP_MAXPACKET = 0xffff
+ IP_MAX_MEMBERSHIPS = 0xfff
+ IP_MF = 0x2000
+ IP_MINTTL = 0x20
+ IP_MIN_MEMBERSHIPS = 0xf
+ IP_MSS = 0x240
+ IP_MULTICAST_IF = 0x9
+ IP_MULTICAST_LOOP = 0xb
+ IP_MULTICAST_TTL = 0xa
+ IP_OFFMASK = 0x1fff
+ IP_OPTIONS = 0x1
+ IP_PIPEX = 0x22
+ IP_PORTRANGE = 0x13
+ IP_PORTRANGE_DEFAULT = 0x0
+ IP_PORTRANGE_HIGH = 0x1
+ IP_PORTRANGE_LOW = 0x2
+ IP_RECVDSTADDR = 0x7
+ IP_RECVDSTPORT = 0x21
+ IP_RECVIF = 0x1e
+ IP_RECVOPTS = 0x5
+ IP_RECVRETOPTS = 0x6
+ IP_RECVRTABLE = 0x23
+ IP_RECVTTL = 0x1f
+ IP_RETOPTS = 0x8
+ IP_RF = 0x8000
+ IP_RTABLE = 0x1021
+ IP_SENDSRCADDR = 0x7
+ IP_TOS = 0x3
+ IP_TTL = 0x4
+ ISIG = 0x80
+ ISTRIP = 0x20
+ IUCLC = 0x1000
+ IXANY = 0x800
+ IXOFF = 0x400
+ IXON = 0x200
+ KERN_HOSTNAME = 0xa
+ KERN_OSRELEASE = 0x2
+ KERN_OSTYPE = 0x1
+ KERN_VERSION = 0x4
+ LCNT_OVERLOAD_FLUSH = 0x6
+ LOCK_EX = 0x2
+ LOCK_NB = 0x4
+ LOCK_SH = 0x1
+ LOCK_UN = 0x8
+ MADV_DONTNEED = 0x4
+ MADV_FREE = 0x6
+ MADV_NORMAL = 0x0
+ MADV_RANDOM = 0x1
+ MADV_SEQUENTIAL = 0x2
+ MADV_SPACEAVAIL = 0x5
+ MADV_WILLNEED = 0x3
+ MAP_ANON = 0x1000
+ MAP_ANONYMOUS = 0x1000
+ MAP_CONCEAL = 0x8000
+ MAP_COPY = 0x2
+ MAP_FILE = 0x0
+ MAP_FIXED = 0x10
+ MAP_FLAGMASK = 0xfff7
+ MAP_HASSEMAPHORE = 0x0
+ MAP_INHERIT = 0x0
+ MAP_INHERIT_COPY = 0x1
+ MAP_INHERIT_NONE = 0x2
+ MAP_INHERIT_SHARE = 0x0
+ MAP_INHERIT_ZERO = 0x3
+ MAP_NOEXTEND = 0x0
+ MAP_NORESERVE = 0x0
+ MAP_PRIVATE = 0x2
+ MAP_RENAME = 0x0
+ MAP_SHARED = 0x1
+ MAP_STACK = 0x4000
+ MAP_TRYFIXED = 0x0
+ MCL_CURRENT = 0x1
+ MCL_FUTURE = 0x2
+ MNT_ASYNC = 0x40
+ MNT_DEFEXPORTED = 0x200
+ MNT_DELEXPORT = 0x20000
+ MNT_DOOMED = 0x8000000
+ MNT_EXPORTANON = 0x400
+ MNT_EXPORTED = 0x100
+ MNT_EXRDONLY = 0x80
+ MNT_FORCE = 0x80000
+ MNT_LAZY = 0x3
+ MNT_LOCAL = 0x1000
+ MNT_NOATIME = 0x8000
+ MNT_NODEV = 0x10
+ MNT_NOEXEC = 0x4
+ MNT_NOPERM = 0x20
+ MNT_NOSUID = 0x8
+ MNT_NOWAIT = 0x2
+ MNT_QUOTA = 0x2000
+ MNT_RDONLY = 0x1
+ MNT_RELOAD = 0x40000
+ MNT_ROOTFS = 0x4000
+ MNT_SOFTDEP = 0x4000000
+ MNT_STALLED = 0x100000
+ MNT_SWAPPABLE = 0x200000
+ MNT_SYNCHRONOUS = 0x2
+ MNT_UPDATE = 0x10000
+ MNT_VISFLAGMASK = 0x400ffff
+ MNT_WAIT = 0x1
+ MNT_WANTRDWR = 0x2000000
+ MNT_WXALLOWED = 0x800
+ MSG_BCAST = 0x100
+ MSG_CMSG_CLOEXEC = 0x800
+ MSG_CTRUNC = 0x20
+ MSG_DONTROUTE = 0x4
+ MSG_DONTWAIT = 0x80
+ MSG_EOR = 0x8
+ MSG_MCAST = 0x200
+ MSG_NOSIGNAL = 0x400
+ MSG_OOB = 0x1
+ MSG_PEEK = 0x2
+ MSG_TRUNC = 0x10
+ MSG_WAITALL = 0x40
+ MS_ASYNC = 0x1
+ MS_INVALIDATE = 0x4
+ MS_SYNC = 0x2
+ NAME_MAX = 0xff
+ NET_RT_DUMP = 0x1
+ NET_RT_FLAGS = 0x2
+ NET_RT_IFLIST = 0x3
+ NET_RT_IFNAMES = 0x6
+ NET_RT_MAXID = 0x7
+ NET_RT_STATS = 0x4
+ NET_RT_TABLE = 0x5
+ NOFLSH = 0x80000000
+ NOKERNINFO = 0x2000000
+ NOTE_ATTRIB = 0x8
+ NOTE_CHANGE = 0x1
+ NOTE_CHILD = 0x4
+ NOTE_DELETE = 0x1
+ NOTE_EOF = 0x2
+ NOTE_EXEC = 0x20000000
+ NOTE_EXIT = 0x80000000
+ NOTE_EXTEND = 0x4
+ NOTE_FORK = 0x40000000
+ NOTE_LINK = 0x10
+ NOTE_LOWAT = 0x1
+ NOTE_PCTRLMASK = 0xf0000000
+ NOTE_PDATAMASK = 0xfffff
+ NOTE_RENAME = 0x20
+ NOTE_REVOKE = 0x40
+ NOTE_TRACK = 0x1
+ NOTE_TRACKERR = 0x2
+ NOTE_TRUNCATE = 0x80
+ NOTE_WRITE = 0x2
+ OCRNL = 0x10
+ OLCUC = 0x20
+ ONLCR = 0x2
+ ONLRET = 0x80
+ ONOCR = 0x40
+ ONOEOT = 0x8
+ OPOST = 0x1
+ OXTABS = 0x4
+ O_ACCMODE = 0x3
+ O_APPEND = 0x8
+ O_ASYNC = 0x40
+ O_CLOEXEC = 0x10000
+ O_CREAT = 0x200
+ O_DIRECTORY = 0x20000
+ O_DSYNC = 0x80
+ O_EXCL = 0x800
+ O_EXLOCK = 0x20
+ O_FSYNC = 0x80
+ O_NDELAY = 0x4
+ O_NOCTTY = 0x8000
+ O_NOFOLLOW = 0x100
+ O_NONBLOCK = 0x4
+ O_RDONLY = 0x0
+ O_RDWR = 0x2
+ O_RSYNC = 0x80
+ O_SHLOCK = 0x10
+ O_SYNC = 0x80
+ O_TRUNC = 0x400
+ O_WRONLY = 0x1
+ PARENB = 0x1000
+ PARMRK = 0x8
+ PARODD = 0x2000
+ PENDIN = 0x20000000
+ PF_FLUSH = 0x1
+ PRIO_PGRP = 0x1
+ PRIO_PROCESS = 0x0
+ PRIO_USER = 0x2
+ PROT_EXEC = 0x4
+ PROT_NONE = 0x0
+ PROT_READ = 0x1
+ PROT_WRITE = 0x2
+ RLIMIT_CORE = 0x4
+ RLIMIT_CPU = 0x0
+ RLIMIT_DATA = 0x2
+ RLIMIT_FSIZE = 0x1
+ RLIMIT_MEMLOCK = 0x6
+ RLIMIT_NOFILE = 0x8
+ RLIMIT_NPROC = 0x7
+ RLIMIT_RSS = 0x5
+ RLIMIT_STACK = 0x3
+ RLIM_INFINITY = 0x7fffffffffffffff
+ RTAX_AUTHOR = 0x6
+ RTAX_BFD = 0xb
+ RTAX_BRD = 0x7
+ RTAX_DNS = 0xc
+ RTAX_DST = 0x0
+ RTAX_GATEWAY = 0x1
+ RTAX_GENMASK = 0x3
+ RTAX_IFA = 0x5
+ RTAX_IFP = 0x4
+ RTAX_LABEL = 0xa
+ RTAX_MAX = 0xf
+ RTAX_NETMASK = 0x2
+ RTAX_SEARCH = 0xe
+ RTAX_SRC = 0x8
+ RTAX_SRCMASK = 0x9
+ RTAX_STATIC = 0xd
+ RTA_AUTHOR = 0x40
+ RTA_BFD = 0x800
+ RTA_BRD = 0x80
+ RTA_DNS = 0x1000
+ RTA_DST = 0x1
+ RTA_GATEWAY = 0x2
+ RTA_GENMASK = 0x8
+ RTA_IFA = 0x20
+ RTA_IFP = 0x10
+ RTA_LABEL = 0x400
+ RTA_NETMASK = 0x4
+ RTA_SEARCH = 0x4000
+ RTA_SRC = 0x100
+ RTA_SRCMASK = 0x200
+ RTA_STATIC = 0x2000
+ RTF_ANNOUNCE = 0x4000
+ RTF_BFD = 0x1000000
+ RTF_BLACKHOLE = 0x1000
+ RTF_BROADCAST = 0x400000
+ RTF_CACHED = 0x20000
+ RTF_CLONED = 0x10000
+ RTF_CLONING = 0x100
+ RTF_CONNECTED = 0x800000
+ RTF_DONE = 0x40
+ RTF_DYNAMIC = 0x10
+ RTF_FMASK = 0x110fc08
+ RTF_GATEWAY = 0x2
+ RTF_HOST = 0x4
+ RTF_LLINFO = 0x400
+ RTF_LOCAL = 0x200000
+ RTF_MODIFIED = 0x20
+ RTF_MPATH = 0x40000
+ RTF_MPLS = 0x100000
+ RTF_MULTICAST = 0x200
+ RTF_PERMANENT_ARP = 0x2000
+ RTF_PROTO1 = 0x8000
+ RTF_PROTO2 = 0x4000
+ RTF_PROTO3 = 0x2000
+ RTF_REJECT = 0x8
+ RTF_STATIC = 0x800
+ RTF_UP = 0x1
+ RTF_USETRAILERS = 0x8000
+ RTM_80211INFO = 0x15
+ RTM_ADD = 0x1
+ RTM_BFD = 0x12
+ RTM_CHANGE = 0x3
+ RTM_CHGADDRATTR = 0x14
+ RTM_DELADDR = 0xd
+ RTM_DELETE = 0x2
+ RTM_DESYNC = 0x10
+ RTM_GET = 0x4
+ RTM_IFANNOUNCE = 0xf
+ RTM_IFINFO = 0xe
+ RTM_INVALIDATE = 0x11
+ RTM_LOSING = 0x5
+ RTM_MAXSIZE = 0x800
+ RTM_MISS = 0x7
+ RTM_NEWADDR = 0xc
+ RTM_PROPOSAL = 0x13
+ RTM_REDIRECT = 0x6
+ RTM_RESOLVE = 0xb
+ RTM_RTTUNIT = 0xf4240
+ RTM_VERSION = 0x5
+ RTV_EXPIRE = 0x4
+ RTV_HOPCOUNT = 0x2
+ RTV_MTU = 0x1
+ RTV_RPIPE = 0x8
+ RTV_RTT = 0x40
+ RTV_RTTVAR = 0x80
+ RTV_SPIPE = 0x10
+ RTV_SSTHRESH = 0x20
+ RT_TABLEID_BITS = 0x8
+ RT_TABLEID_MASK = 0xff
+ RT_TABLEID_MAX = 0xff
+ RUSAGE_CHILDREN = -0x1
+ RUSAGE_SELF = 0x0
+ RUSAGE_THREAD = 0x1
+ SCM_RIGHTS = 0x1
+ SCM_TIMESTAMP = 0x4
+ SHUT_RD = 0x0
+ SHUT_RDWR = 0x2
+ SHUT_WR = 0x1
+ SIOCADDMULTI = 0x80206931
+ SIOCAIFADDR = 0x8040691a
+ SIOCAIFGROUP = 0x80286987
+ SIOCATMARK = 0x40047307
+ SIOCBRDGADD = 0x8060693c
+ SIOCBRDGADDL = 0x80606949
+ SIOCBRDGADDS = 0x80606941
+ SIOCBRDGARL = 0x808c694d
+ SIOCBRDGDADDR = 0x81286947
+ SIOCBRDGDEL = 0x8060693d
+ SIOCBRDGDELS = 0x80606942
+ SIOCBRDGFLUSH = 0x80606948
+ SIOCBRDGFRL = 0x808c694e
+ SIOCBRDGGCACHE = 0xc0186941
+ SIOCBRDGGFD = 0xc0186952
+ SIOCBRDGGHT = 0xc0186951
+ SIOCBRDGGIFFLGS = 0xc060693e
+ SIOCBRDGGMA = 0xc0186953
+ SIOCBRDGGPARAM = 0xc0406958
+ SIOCBRDGGPRI = 0xc0186950
+ SIOCBRDGGRL = 0xc030694f
+ SIOCBRDGGTO = 0xc0186946
+ SIOCBRDGIFS = 0xc0606942
+ SIOCBRDGRTS = 0xc0206943
+ SIOCBRDGSADDR = 0xc1286944
+ SIOCBRDGSCACHE = 0x80186940
+ SIOCBRDGSFD = 0x80186952
+ SIOCBRDGSHT = 0x80186951
+ SIOCBRDGSIFCOST = 0x80606955
+ SIOCBRDGSIFFLGS = 0x8060693f
+ SIOCBRDGSIFPRIO = 0x80606954
+ SIOCBRDGSIFPROT = 0x8060694a
+ SIOCBRDGSMA = 0x80186953
+ SIOCBRDGSPRI = 0x80186950
+ SIOCBRDGSPROTO = 0x8018695a
+ SIOCBRDGSTO = 0x80186945
+ SIOCBRDGSTXHC = 0x80186959
+ SIOCDELLABEL = 0x80206997
+ SIOCDELMULTI = 0x80206932
+ SIOCDIFADDR = 0x80206919
+ SIOCDIFGROUP = 0x80286989
+ SIOCDIFPARENT = 0x802069b4
+ SIOCDIFPHYADDR = 0x80206949
+ SIOCDPWE3NEIGHBOR = 0x802069de
+ SIOCDVNETID = 0x802069af
+ SIOCGETKALIVE = 0xc01869a4
+ SIOCGETLABEL = 0x8020699a
+ SIOCGETMPWCFG = 0xc02069ae
+ SIOCGETPFLOW = 0xc02069fe
+ SIOCGETPFSYNC = 0xc02069f8
+ SIOCGETSGCNT = 0xc0207534
+ SIOCGETVIFCNT = 0xc0287533
+ SIOCGETVLAN = 0xc0206990
+ SIOCGIFADDR = 0xc0206921
+ SIOCGIFBRDADDR = 0xc0206923
+ SIOCGIFCONF = 0xc0106924
+ SIOCGIFDATA = 0xc020691b
+ SIOCGIFDESCR = 0xc0206981
+ SIOCGIFDSTADDR = 0xc0206922
+ SIOCGIFFLAGS = 0xc0206911
+ SIOCGIFGATTR = 0xc028698b
+ SIOCGIFGENERIC = 0xc020693a
+ SIOCGIFGLIST = 0xc028698d
+ SIOCGIFGMEMB = 0xc028698a
+ SIOCGIFGROUP = 0xc0286988
+ SIOCGIFHARDMTU = 0xc02069a5
+ SIOCGIFLLPRIO = 0xc02069b6
+ SIOCGIFMEDIA = 0xc0406938
+ SIOCGIFMETRIC = 0xc0206917
+ SIOCGIFMTU = 0xc020697e
+ SIOCGIFNETMASK = 0xc0206925
+ SIOCGIFPAIR = 0xc02069b1
+ SIOCGIFPARENT = 0xc02069b3
+ SIOCGIFPRIORITY = 0xc020699c
+ SIOCGIFRDOMAIN = 0xc02069a0
+ SIOCGIFRTLABEL = 0xc0206983
+ SIOCGIFRXR = 0x802069aa
+ SIOCGIFSFFPAGE = 0xc1126939
+ SIOCGIFXFLAGS = 0xc020699e
+ SIOCGLIFPHYADDR = 0xc218694b
+ SIOCGLIFPHYDF = 0xc02069c2
+ SIOCGLIFPHYECN = 0xc02069c8
+ SIOCGLIFPHYRTABLE = 0xc02069a2
+ SIOCGLIFPHYTTL = 0xc02069a9
+ SIOCGPGRP = 0x40047309
+ SIOCGPWE3 = 0xc0206998
+ SIOCGPWE3CTRLWORD = 0xc02069dc
+ SIOCGPWE3FAT = 0xc02069dd
+ SIOCGPWE3NEIGHBOR = 0xc21869de
+ SIOCGSPPPPARAMS = 0xc0206994
+ SIOCGTXHPRIO = 0xc02069c6
+ SIOCGUMBINFO = 0xc02069be
+ SIOCGUMBPARAM = 0xc02069c0
+ SIOCGVH = 0xc02069f6
+ SIOCGVNETFLOWID = 0xc02069c4
+ SIOCGVNETID = 0xc02069a7
+ SIOCIFAFATTACH = 0x801169ab
+ SIOCIFAFDETACH = 0x801169ac
+ SIOCIFCREATE = 0x8020697a
+ SIOCIFDESTROY = 0x80206979
+ SIOCIFGCLONERS = 0xc0106978
+ SIOCSETKALIVE = 0x801869a3
+ SIOCSETLABEL = 0x80206999
+ SIOCSETMPWCFG = 0x802069ad
+ SIOCSETPFLOW = 0x802069fd
+ SIOCSETPFSYNC = 0x802069f7
+ SIOCSETVLAN = 0x8020698f
+ SIOCSIFADDR = 0x8020690c
+ SIOCSIFBRDADDR = 0x80206913
+ SIOCSIFDESCR = 0x80206980
+ SIOCSIFDSTADDR = 0x8020690e
+ SIOCSIFFLAGS = 0x80206910
+ SIOCSIFGATTR = 0x8028698c
+ SIOCSIFGENERIC = 0x80206939
+ SIOCSIFLLADDR = 0x8020691f
+ SIOCSIFLLPRIO = 0x802069b5
+ SIOCSIFMEDIA = 0xc0206937
+ SIOCSIFMETRIC = 0x80206918
+ SIOCSIFMTU = 0x8020697f
+ SIOCSIFNETMASK = 0x80206916
+ SIOCSIFPAIR = 0x802069b0
+ SIOCSIFPARENT = 0x802069b2
+ SIOCSIFPRIORITY = 0x8020699b
+ SIOCSIFRDOMAIN = 0x8020699f
+ SIOCSIFRTLABEL = 0x80206982
+ SIOCSIFXFLAGS = 0x8020699d
+ SIOCSLIFPHYADDR = 0x8218694a
+ SIOCSLIFPHYDF = 0x802069c1
+ SIOCSLIFPHYECN = 0x802069c7
+ SIOCSLIFPHYRTABLE = 0x802069a1
+ SIOCSLIFPHYTTL = 0x802069a8
+ SIOCSPGRP = 0x80047308
+ SIOCSPWE3CTRLWORD = 0x802069dc
+ SIOCSPWE3FAT = 0x802069dd
+ SIOCSPWE3NEIGHBOR = 0x821869de
+ SIOCSSPPPPARAMS = 0x80206993
+ SIOCSTXHPRIO = 0x802069c5
+ SIOCSUMBPARAM = 0x802069bf
+ SIOCSVH = 0xc02069f5
+ SIOCSVNETFLOWID = 0x802069c3
+ SIOCSVNETID = 0x802069a6
+ SIOCSWGDPID = 0xc018695b
+ SIOCSWGMAXFLOW = 0xc0186960
+ SIOCSWGMAXGROUP = 0xc018695d
+ SIOCSWSDPID = 0x8018695c
+ SIOCSWSPORTNO = 0xc060695f
+ SOCK_CLOEXEC = 0x8000
+ SOCK_DGRAM = 0x2
+ SOCK_DNS = 0x1000
+ SOCK_NONBLOCK = 0x4000
+ SOCK_RAW = 0x3
+ SOCK_RDM = 0x4
+ SOCK_SEQPACKET = 0x5
+ SOCK_STREAM = 0x1
+ SOL_SOCKET = 0xffff
+ SOMAXCONN = 0x80
+ SO_ACCEPTCONN = 0x2
+ SO_BINDANY = 0x1000
+ SO_BROADCAST = 0x20
+ SO_DEBUG = 0x1
+ SO_DONTROUTE = 0x10
+ SO_ERROR = 0x1007
+ SO_KEEPALIVE = 0x8
+ SO_LINGER = 0x80
+ SO_NETPROC = 0x1020
+ SO_OOBINLINE = 0x100
+ SO_PEERCRED = 0x1022
+ SO_RCVBUF = 0x1002
+ SO_RCVLOWAT = 0x1004
+ SO_RCVTIMEO = 0x1006
+ SO_REUSEADDR = 0x4
+ SO_REUSEPORT = 0x200
+ SO_RTABLE = 0x1021
+ SO_SNDBUF = 0x1001
+ SO_SNDLOWAT = 0x1003
+ SO_SNDTIMEO = 0x1005
+ SO_SPLICE = 0x1023
+ SO_TIMESTAMP = 0x800
+ SO_TYPE = 0x1008
+ SO_USELOOPBACK = 0x40
+ SO_ZEROIZE = 0x2000
+ S_BLKSIZE = 0x200
+ S_IEXEC = 0x40
+ S_IFBLK = 0x6000
+ S_IFCHR = 0x2000
+ S_IFDIR = 0x4000
+ S_IFIFO = 0x1000
+ S_IFLNK = 0xa000
+ S_IFMT = 0xf000
+ S_IFREG = 0x8000
+ S_IFSOCK = 0xc000
+ S_IREAD = 0x100
+ S_IRGRP = 0x20
+ S_IROTH = 0x4
+ S_IRUSR = 0x100
+ S_IRWXG = 0x38
+ S_IRWXO = 0x7
+ S_IRWXU = 0x1c0
+ S_ISGID = 0x400
+ S_ISTXT = 0x200
+ S_ISUID = 0x800
+ S_ISVTX = 0x200
+ S_IWGRP = 0x10
+ S_IWOTH = 0x2
+ S_IWRITE = 0x80
+ S_IWUSR = 0x80
+ S_IXGRP = 0x8
+ S_IXOTH = 0x1
+ S_IXUSR = 0x40
+ TCIFLUSH = 0x1
+ TCIOFF = 0x3
+ TCIOFLUSH = 0x3
+ TCION = 0x4
+ TCOFLUSH = 0x2
+ TCOOFF = 0x1
+ TCOON = 0x2
+ TCP_MAXBURST = 0x4
+ TCP_MAXSEG = 0x2
+ TCP_MAXWIN = 0xffff
+ TCP_MAX_SACK = 0x3
+ TCP_MAX_WINSHIFT = 0xe
+ TCP_MD5SIG = 0x4
+ TCP_MSS = 0x200
+ TCP_NODELAY = 0x1
+ TCP_NOPUSH = 0x10
+ TCP_SACK_ENABLE = 0x8
+ TCSAFLUSH = 0x2
+ TIMER_ABSTIME = 0x1
+ TIMER_RELTIME = 0x0
+ TIOCCBRK = 0x2000747a
+ TIOCCDTR = 0x20007478
+ TIOCCHKVERAUTH = 0x2000741e
+ TIOCCLRVERAUTH = 0x2000741d
+ TIOCCONS = 0x80047462
+ TIOCDRAIN = 0x2000745e
+ TIOCEXCL = 0x2000740d
+ TIOCEXT = 0x80047460
+ TIOCFLAG_CLOCAL = 0x2
+ TIOCFLAG_CRTSCTS = 0x4
+ TIOCFLAG_MDMBUF = 0x8
+ TIOCFLAG_PPS = 0x10
+ TIOCFLAG_SOFTCAR = 0x1
+ TIOCFLUSH = 0x80047410
+ TIOCGETA = 0x402c7413
+ TIOCGETD = 0x4004741a
+ TIOCGFLAGS = 0x4004745d
+ TIOCGPGRP = 0x40047477
+ TIOCGSID = 0x40047463
+ TIOCGTSTAMP = 0x4010745b
+ TIOCGWINSZ = 0x40087468
+ TIOCMBIC = 0x8004746b
+ TIOCMBIS = 0x8004746c
+ TIOCMGET = 0x4004746a
+ TIOCMODG = 0x4004746a
+ TIOCMODS = 0x8004746d
+ TIOCMSET = 0x8004746d
+ TIOCM_CAR = 0x40
+ TIOCM_CD = 0x40
+ TIOCM_CTS = 0x20
+ TIOCM_DSR = 0x100
+ TIOCM_DTR = 0x2
+ TIOCM_LE = 0x1
+ TIOCM_RI = 0x80
+ TIOCM_RNG = 0x80
+ TIOCM_RTS = 0x4
+ TIOCM_SR = 0x10
+ TIOCM_ST = 0x8
+ TIOCNOTTY = 0x20007471
+ TIOCNXCL = 0x2000740e
+ TIOCOUTQ = 0x40047473
+ TIOCPKT = 0x80047470
+ TIOCPKT_DATA = 0x0
+ TIOCPKT_DOSTOP = 0x20
+ TIOCPKT_FLUSHREAD = 0x1
+ TIOCPKT_FLUSHWRITE = 0x2
+ TIOCPKT_IOCTL = 0x40
+ TIOCPKT_NOSTOP = 0x10
+ TIOCPKT_START = 0x8
+ TIOCPKT_STOP = 0x4
+ TIOCREMOTE = 0x80047469
+ TIOCSBRK = 0x2000747b
+ TIOCSCTTY = 0x20007461
+ TIOCSDTR = 0x20007479
+ TIOCSETA = 0x802c7414
+ TIOCSETAF = 0x802c7416
+ TIOCSETAW = 0x802c7415
+ TIOCSETD = 0x8004741b
+ TIOCSETVERAUTH = 0x8004741c
+ TIOCSFLAGS = 0x8004745c
+ TIOCSIG = 0x8004745f
+ TIOCSPGRP = 0x80047476
+ TIOCSTART = 0x2000746e
+ TIOCSTAT = 0x20007465
+ TIOCSTOP = 0x2000746f
+ TIOCSTSTAMP = 0x8008745a
+ TIOCSWINSZ = 0x80087467
+ TIOCUCNTL = 0x80047466
+ TIOCUCNTL_CBRK = 0x7a
+ TIOCUCNTL_SBRK = 0x7b
+ TOSTOP = 0x400000
+ UTIME_NOW = -0x2
+ UTIME_OMIT = -0x1
+ VDISCARD = 0xf
+ VDSUSP = 0xb
+ VEOF = 0x0
+ VEOL = 0x1
+ VEOL2 = 0x2
+ VERASE = 0x3
+ VINTR = 0x8
+ VKILL = 0x5
+ VLNEXT = 0xe
+ VMIN = 0x10
+ VM_ANONMIN = 0x7
+ VM_LOADAVG = 0x2
+ VM_MALLOC_CONF = 0xc
+ VM_MAXID = 0xd
+ VM_MAXSLP = 0xa
+ VM_METER = 0x1
+ VM_NKMEMPAGES = 0x6
+ VM_PSSTRINGS = 0x3
+ VM_SWAPENCRYPT = 0x5
+ VM_USPACE = 0xb
+ VM_UVMEXP = 0x4
+ VM_VNODEMIN = 0x9
+ VM_VTEXTMIN = 0x8
+ VQUIT = 0x9
+ VREPRINT = 0x6
+ VSTART = 0xc
+ VSTATUS = 0x12
+ VSTOP = 0xd
+ VSUSP = 0xa
+ VTIME = 0x11
+ VWERASE = 0x4
+ WALTSIG = 0x4
+ WCONTINUED = 0x8
+ WCOREFLAG = 0x80
+ WNOHANG = 0x1
+ WUNTRACED = 0x2
+ XCASE = 0x1000000
+)
+
+// Errors
+const (
+ E2BIG = syscall.Errno(0x7)
+ EACCES = syscall.Errno(0xd)
+ EADDRINUSE = syscall.Errno(0x30)
+ EADDRNOTAVAIL = syscall.Errno(0x31)
+ EAFNOSUPPORT = syscall.Errno(0x2f)
+ EAGAIN = syscall.Errno(0x23)
+ EALREADY = syscall.Errno(0x25)
+ EAUTH = syscall.Errno(0x50)
+ EBADF = syscall.Errno(0x9)
+ EBADMSG = syscall.Errno(0x5c)
+ EBADRPC = syscall.Errno(0x48)
+ EBUSY = syscall.Errno(0x10)
+ ECANCELED = syscall.Errno(0x58)
+ ECHILD = syscall.Errno(0xa)
+ ECONNABORTED = syscall.Errno(0x35)
+ ECONNREFUSED = syscall.Errno(0x3d)
+ ECONNRESET = syscall.Errno(0x36)
+ EDEADLK = syscall.Errno(0xb)
+ EDESTADDRREQ = syscall.Errno(0x27)
+ EDOM = syscall.Errno(0x21)
+ EDQUOT = syscall.Errno(0x45)
+ EEXIST = syscall.Errno(0x11)
+ EFAULT = syscall.Errno(0xe)
+ EFBIG = syscall.Errno(0x1b)
+ EFTYPE = syscall.Errno(0x4f)
+ EHOSTDOWN = syscall.Errno(0x40)
+ EHOSTUNREACH = syscall.Errno(0x41)
+ EIDRM = syscall.Errno(0x59)
+ EILSEQ = syscall.Errno(0x54)
+ EINPROGRESS = syscall.Errno(0x24)
+ EINTR = syscall.Errno(0x4)
+ EINVAL = syscall.Errno(0x16)
+ EIO = syscall.Errno(0x5)
+ EIPSEC = syscall.Errno(0x52)
+ EISCONN = syscall.Errno(0x38)
+ EISDIR = syscall.Errno(0x15)
+ ELAST = syscall.Errno(0x5f)
+ ELOOP = syscall.Errno(0x3e)
+ EMEDIUMTYPE = syscall.Errno(0x56)
+ EMFILE = syscall.Errno(0x18)
+ EMLINK = syscall.Errno(0x1f)
+ EMSGSIZE = syscall.Errno(0x28)
+ ENAMETOOLONG = syscall.Errno(0x3f)
+ ENEEDAUTH = syscall.Errno(0x51)
+ ENETDOWN = syscall.Errno(0x32)
+ ENETRESET = syscall.Errno(0x34)
+ ENETUNREACH = syscall.Errno(0x33)
+ ENFILE = syscall.Errno(0x17)
+ ENOATTR = syscall.Errno(0x53)
+ ENOBUFS = syscall.Errno(0x37)
+ ENODEV = syscall.Errno(0x13)
+ ENOENT = syscall.Errno(0x2)
+ ENOEXEC = syscall.Errno(0x8)
+ ENOLCK = syscall.Errno(0x4d)
+ ENOMEDIUM = syscall.Errno(0x55)
+ ENOMEM = syscall.Errno(0xc)
+ ENOMSG = syscall.Errno(0x5a)
+ ENOPROTOOPT = syscall.Errno(0x2a)
+ ENOSPC = syscall.Errno(0x1c)
+ ENOSYS = syscall.Errno(0x4e)
+ ENOTBLK = syscall.Errno(0xf)
+ ENOTCONN = syscall.Errno(0x39)
+ ENOTDIR = syscall.Errno(0x14)
+ ENOTEMPTY = syscall.Errno(0x42)
+ ENOTRECOVERABLE = syscall.Errno(0x5d)
+ ENOTSOCK = syscall.Errno(0x26)
+ ENOTSUP = syscall.Errno(0x5b)
+ ENOTTY = syscall.Errno(0x19)
+ ENXIO = syscall.Errno(0x6)
+ EOPNOTSUPP = syscall.Errno(0x2d)
+ EOVERFLOW = syscall.Errno(0x57)
+ EOWNERDEAD = syscall.Errno(0x5e)
+ EPERM = syscall.Errno(0x1)
+ EPFNOSUPPORT = syscall.Errno(0x2e)
+ EPIPE = syscall.Errno(0x20)
+ EPROCLIM = syscall.Errno(0x43)
+ EPROCUNAVAIL = syscall.Errno(0x4c)
+ EPROGMISMATCH = syscall.Errno(0x4b)
+ EPROGUNAVAIL = syscall.Errno(0x4a)
+ EPROTO = syscall.Errno(0x5f)
+ EPROTONOSUPPORT = syscall.Errno(0x2b)
+ EPROTOTYPE = syscall.Errno(0x29)
+ ERANGE = syscall.Errno(0x22)
+ EREMOTE = syscall.Errno(0x47)
+ EROFS = syscall.Errno(0x1e)
+ ERPCMISMATCH = syscall.Errno(0x49)
+ ESHUTDOWN = syscall.Errno(0x3a)
+ ESOCKTNOSUPPORT = syscall.Errno(0x2c)
+ ESPIPE = syscall.Errno(0x1d)
+ ESRCH = syscall.Errno(0x3)
+ ESTALE = syscall.Errno(0x46)
+ ETIMEDOUT = syscall.Errno(0x3c)
+ ETOOMANYREFS = syscall.Errno(0x3b)
+ ETXTBSY = syscall.Errno(0x1a)
+ EUSERS = syscall.Errno(0x44)
+ EWOULDBLOCK = syscall.Errno(0x23)
+ EXDEV = syscall.Errno(0x12)
+)
+
+// Signals
+const (
+ SIGABRT = syscall.Signal(0x6)
+ SIGALRM = syscall.Signal(0xe)
+ SIGBUS = syscall.Signal(0xa)
+ SIGCHLD = syscall.Signal(0x14)
+ SIGCONT = syscall.Signal(0x13)
+ SIGEMT = syscall.Signal(0x7)
+ SIGFPE = syscall.Signal(0x8)
+ SIGHUP = syscall.Signal(0x1)
+ SIGILL = syscall.Signal(0x4)
+ SIGINFO = syscall.Signal(0x1d)
+ SIGINT = syscall.Signal(0x2)
+ SIGIO = syscall.Signal(0x17)
+ SIGIOT = syscall.Signal(0x6)
+ SIGKILL = syscall.Signal(0x9)
+ SIGPIPE = syscall.Signal(0xd)
+ SIGPROF = syscall.Signal(0x1b)
+ SIGQUIT = syscall.Signal(0x3)
+ SIGSEGV = syscall.Signal(0xb)
+ SIGSTOP = syscall.Signal(0x11)
+ SIGSYS = syscall.Signal(0xc)
+ SIGTERM = syscall.Signal(0xf)
+ SIGTHR = syscall.Signal(0x20)
+ SIGTRAP = syscall.Signal(0x5)
+ SIGTSTP = syscall.Signal(0x12)
+ SIGTTIN = syscall.Signal(0x15)
+ SIGTTOU = syscall.Signal(0x16)
+ SIGURG = syscall.Signal(0x10)
+ SIGUSR1 = syscall.Signal(0x1e)
+ SIGUSR2 = syscall.Signal(0x1f)
+ SIGVTALRM = syscall.Signal(0x1a)
+ SIGWINCH = syscall.Signal(0x1c)
+ SIGXCPU = syscall.Signal(0x18)
+ SIGXFSZ = syscall.Signal(0x19)
+)
+
+// Error table
+var errorList = [...]struct {
+ num syscall.Errno
+ name string
+ desc string
+}{
+ {1, "EPERM", "operation not permitted"},
+ {2, "ENOENT", "no such file or directory"},
+ {3, "ESRCH", "no such process"},
+ {4, "EINTR", "interrupted system call"},
+ {5, "EIO", "input/output error"},
+ {6, "ENXIO", "device not configured"},
+ {7, "E2BIG", "argument list too long"},
+ {8, "ENOEXEC", "exec format error"},
+ {9, "EBADF", "bad file descriptor"},
+ {10, "ECHILD", "no child processes"},
+ {11, "EDEADLK", "resource deadlock avoided"},
+ {12, "ENOMEM", "cannot allocate memory"},
+ {13, "EACCES", "permission denied"},
+ {14, "EFAULT", "bad address"},
+ {15, "ENOTBLK", "block device required"},
+ {16, "EBUSY", "device busy"},
+ {17, "EEXIST", "file exists"},
+ {18, "EXDEV", "cross-device link"},
+ {19, "ENODEV", "operation not supported by device"},
+ {20, "ENOTDIR", "not a directory"},
+ {21, "EISDIR", "is a directory"},
+ {22, "EINVAL", "invalid argument"},
+ {23, "ENFILE", "too many open files in system"},
+ {24, "EMFILE", "too many open files"},
+ {25, "ENOTTY", "inappropriate ioctl for device"},
+ {26, "ETXTBSY", "text file busy"},
+ {27, "EFBIG", "file too large"},
+ {28, "ENOSPC", "no space left on device"},
+ {29, "ESPIPE", "illegal seek"},
+ {30, "EROFS", "read-only file system"},
+ {31, "EMLINK", "too many links"},
+ {32, "EPIPE", "broken pipe"},
+ {33, "EDOM", "numerical argument out of domain"},
+ {34, "ERANGE", "result too large"},
+ {35, "EAGAIN", "resource temporarily unavailable"},
+ {36, "EINPROGRESS", "operation now in progress"},
+ {37, "EALREADY", "operation already in progress"},
+ {38, "ENOTSOCK", "socket operation on non-socket"},
+ {39, "EDESTADDRREQ", "destination address required"},
+ {40, "EMSGSIZE", "message too long"},
+ {41, "EPROTOTYPE", "protocol wrong type for socket"},
+ {42, "ENOPROTOOPT", "protocol not available"},
+ {43, "EPROTONOSUPPORT", "protocol not supported"},
+ {44, "ESOCKTNOSUPPORT", "socket type not supported"},
+ {45, "EOPNOTSUPP", "operation not supported"},
+ {46, "EPFNOSUPPORT", "protocol family not supported"},
+ {47, "EAFNOSUPPORT", "address family not supported by protocol family"},
+ {48, "EADDRINUSE", "address already in use"},
+ {49, "EADDRNOTAVAIL", "can't assign requested address"},
+ {50, "ENETDOWN", "network is down"},
+ {51, "ENETUNREACH", "network is unreachable"},
+ {52, "ENETRESET", "network dropped connection on reset"},
+ {53, "ECONNABORTED", "software caused connection abort"},
+ {54, "ECONNRESET", "connection reset by peer"},
+ {55, "ENOBUFS", "no buffer space available"},
+ {56, "EISCONN", "socket is already connected"},
+ {57, "ENOTCONN", "socket is not connected"},
+ {58, "ESHUTDOWN", "can't send after socket shutdown"},
+ {59, "ETOOMANYREFS", "too many references: can't splice"},
+ {60, "ETIMEDOUT", "operation timed out"},
+ {61, "ECONNREFUSED", "connection refused"},
+ {62, "ELOOP", "too many levels of symbolic links"},
+ {63, "ENAMETOOLONG", "file name too long"},
+ {64, "EHOSTDOWN", "host is down"},
+ {65, "EHOSTUNREACH", "no route to host"},
+ {66, "ENOTEMPTY", "directory not empty"},
+ {67, "EPROCLIM", "too many processes"},
+ {68, "EUSERS", "too many users"},
+ {69, "EDQUOT", "disk quota exceeded"},
+ {70, "ESTALE", "stale NFS file handle"},
+ {71, "EREMOTE", "too many levels of remote in path"},
+ {72, "EBADRPC", "RPC struct is bad"},
+ {73, "ERPCMISMATCH", "RPC version wrong"},
+ {74, "EPROGUNAVAIL", "RPC program not available"},
+ {75, "EPROGMISMATCH", "program version wrong"},
+ {76, "EPROCUNAVAIL", "bad procedure for program"},
+ {77, "ENOLCK", "no locks available"},
+ {78, "ENOSYS", "function not implemented"},
+ {79, "EFTYPE", "inappropriate file type or format"},
+ {80, "EAUTH", "authentication error"},
+ {81, "ENEEDAUTH", "need authenticator"},
+ {82, "EIPSEC", "IPsec processing failure"},
+ {83, "ENOATTR", "attribute not found"},
+ {84, "EILSEQ", "illegal byte sequence"},
+ {85, "ENOMEDIUM", "no medium found"},
+ {86, "EMEDIUMTYPE", "wrong medium type"},
+ {87, "EOVERFLOW", "value too large to be stored in data type"},
+ {88, "ECANCELED", "operation canceled"},
+ {89, "EIDRM", "identifier removed"},
+ {90, "ENOMSG", "no message of desired type"},
+ {91, "ENOTSUP", "not supported"},
+ {92, "EBADMSG", "bad message"},
+ {93, "ENOTRECOVERABLE", "state not recoverable"},
+ {94, "EOWNERDEAD", "previous owner died"},
+ {95, "ELAST", "protocol error"},
+}
+
+// Signal table
+var signalList = [...]struct {
+ num syscall.Signal
+ name string
+ desc string
+}{
+ {1, "SIGHUP", "hangup"},
+ {2, "SIGINT", "interrupt"},
+ {3, "SIGQUIT", "quit"},
+ {4, "SIGILL", "illegal instruction"},
+ {5, "SIGTRAP", "trace/BPT trap"},
+ {6, "SIGABRT", "abort trap"},
+ {7, "SIGEMT", "EMT trap"},
+ {8, "SIGFPE", "floating point exception"},
+ {9, "SIGKILL", "killed"},
+ {10, "SIGBUS", "bus error"},
+ {11, "SIGSEGV", "segmentation fault"},
+ {12, "SIGSYS", "bad system call"},
+ {13, "SIGPIPE", "broken pipe"},
+ {14, "SIGALRM", "alarm clock"},
+ {15, "SIGTERM", "terminated"},
+ {16, "SIGURG", "urgent I/O condition"},
+ {17, "SIGSTOP", "suspended (signal)"},
+ {18, "SIGTSTP", "suspended"},
+ {19, "SIGCONT", "continued"},
+ {20, "SIGCHLD", "child exited"},
+ {21, "SIGTTIN", "stopped (tty input)"},
+ {22, "SIGTTOU", "stopped (tty output)"},
+ {23, "SIGIO", "I/O possible"},
+ {24, "SIGXCPU", "cputime limit exceeded"},
+ {25, "SIGXFSZ", "filesize limit exceeded"},
+ {26, "SIGVTALRM", "virtual timer expired"},
+ {27, "SIGPROF", "profiling timer expired"},
+ {28, "SIGWINCH", "window size changes"},
+ {29, "SIGINFO", "information request"},
+ {30, "SIGUSR1", "user defined signal 1"},
+ {31, "SIGUSR2", "user defined signal 2"},
+ {32, "SIGTHR", "thread AST"},
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go
index 79f6e0566..ed657ff1b 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go
@@ -83,6 +83,8 @@ int lstat(uintptr_t, uintptr_t);
int pause();
int pread64(int, uintptr_t, size_t, long long);
int pwrite64(int, uintptr_t, size_t, long long);
+#define c_select select
+int select(int, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
int pselect(int, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
int setregid(int, int);
int setreuid(int, int);
@@ -103,8 +105,8 @@ int getpeername(int, uintptr_t, uintptr_t);
int getsockname(int, uintptr_t, uintptr_t);
int recvfrom(int, uintptr_t, size_t, int, uintptr_t, uintptr_t);
int sendto(int, uintptr_t, size_t, int, uintptr_t, uintptr_t);
-int recvmsg(int, uintptr_t, int);
-int sendmsg(int, uintptr_t, int);
+int nrecvmsg(int, uintptr_t, int);
+int nsendmsg(int, uintptr_t, int);
int munmap(uintptr_t, uintptr_t);
int madvise(uintptr_t, size_t, int);
int mprotect(uintptr_t, size_t, int);
@@ -118,6 +120,8 @@ int poll(uintptr_t, int, int);
int gettimeofday(uintptr_t, uintptr_t);
int time(uintptr_t);
int utime(uintptr_t, uintptr_t);
+unsigned long long getsystemcfg(int);
+int umount(uintptr_t);
int getrlimit64(int, uintptr_t);
int setrlimit64(int, uintptr_t);
long long lseek64(int, long long, int);
@@ -855,7 +859,7 @@ func Fchown(fd int, uid int, gid int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Fstat(fd int, stat *Stat_t) (err error) {
+func fstat(fd int, stat *Stat_t) (err error) {
r0, er := C.fstat(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(stat))))
if r0 == -1 && er != nil {
err = er
@@ -865,7 +869,7 @@ func Fstat(fd int, stat *Stat_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) {
+func fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) {
_p0 := uintptr(unsafe.Pointer(C.CString(path)))
r0, er := C.fstatat(C.int(dirfd), C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat))), C.int(flags))
if r0 == -1 && er != nil {
@@ -949,7 +953,7 @@ func Listen(s int, n int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Lstat(path string, stat *Stat_t) (err error) {
+func lstat(path string, stat *Stat_t) (err error) {
_p0 := uintptr(unsafe.Pointer(C.CString(path)))
r0, er := C.lstat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat))))
if r0 == -1 && er != nil {
@@ -1004,6 +1008,17 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
+ r0, er := C.c_select(C.int(nfd), C.uintptr_t(uintptr(unsafe.Pointer(r))), C.uintptr_t(uintptr(unsafe.Pointer(w))), C.uintptr_t(uintptr(unsafe.Pointer(e))), C.uintptr_t(uintptr(unsafe.Pointer(timeout))))
+ n = int(r0)
+ if r0 == -1 && er != nil {
+ err = er
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
r0, er := C.pselect(C.int(nfd), C.uintptr_t(uintptr(unsafe.Pointer(r))), C.uintptr_t(uintptr(unsafe.Pointer(w))), C.uintptr_t(uintptr(unsafe.Pointer(e))), C.uintptr_t(uintptr(unsafe.Pointer(timeout))), C.uintptr_t(uintptr(unsafe.Pointer(sigmask))))
n = int(r0)
@@ -1056,9 +1071,9 @@ func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n i
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Stat(path string, stat *Stat_t) (err error) {
+func stat(path string, statptr *Stat_t) (err error) {
_p0 := uintptr(unsafe.Pointer(C.CString(path)))
- r0, er := C.stat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat))))
+ r0, er := C.stat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(statptr))))
if r0 == -1 && er != nil {
err = er
}
@@ -1225,7 +1240,7 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
- r0, er := C.recvmsg(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(msg))), C.int(flags))
+ r0, er := C.nrecvmsg(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(msg))), C.int(flags))
n = int(r0)
if r0 == -1 && er != nil {
err = er
@@ -1236,7 +1251,7 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
- r0, er := C.sendmsg(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(msg))), C.int(flags))
+ r0, er := C.nsendmsg(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(msg))), C.int(flags))
n = int(r0)
if r0 == -1 && er != nil {
err = er
@@ -1409,6 +1424,25 @@ func Utime(path string, buf *Utimbuf) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Getsystemcfg(label int) (n uint64) {
+ r0, _ := C.getsystemcfg(C.int(label))
+ n = uint64(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func umount(target string) (err error) {
+ _p0 := uintptr(unsafe.Pointer(C.CString(target)))
+ r0, er := C.umount(C.uintptr_t(_p0))
+ if r0 == -1 && er != nil {
+ err = er
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getrlimit(resource int, rlim *Rlimit) (err error) {
r0, er := C.getrlimit64(C.int(resource), C.uintptr_t(uintptr(unsafe.Pointer(rlim))))
if r0 == -1 && er != nil {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go
index 3e929e520..664b293b4 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go
@@ -1,4 +1,4 @@
-// mksyscall_aix_ppc64.pl -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go
+// go run mksyscall_aix_ppc64.go -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build aix,ppc64
@@ -803,7 +803,7 @@ func Fchown(fd int, uid int, gid int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Fstat(fd int, stat *Stat_t) (err error) {
+func fstat(fd int, stat *Stat_t) (err error) {
_, e1 := callfstat(fd, uintptr(unsafe.Pointer(stat)))
if e1 != 0 {
err = errnoErr(e1)
@@ -813,7 +813,7 @@ func Fstat(fd int, stat *Stat_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) {
+func fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
@@ -905,7 +905,7 @@ func Listen(s int, n int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Lstat(path string, stat *Stat_t) (err error) {
+func lstat(path string, stat *Stat_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
@@ -960,6 +960,17 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
+ r0, e1 := callselect(nfd, uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
r0, e1 := callpselect(nfd, uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)))
n = int(r0)
@@ -1012,13 +1023,13 @@ func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n i
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Stat(path string, stat *Stat_t) (err error) {
+func stat(path string, statptr *Stat_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
- _, e1 := callstat(uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)))
+ _, e1 := callstat(uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statptr)))
if e1 != 0 {
err = errnoErr(e1)
}
@@ -1189,7 +1200,7 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
- r0, e1 := callrecvmsg(s, uintptr(unsafe.Pointer(msg)), flags)
+ r0, e1 := callnrecvmsg(s, uintptr(unsafe.Pointer(msg)), flags)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1200,7 +1211,7 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
- r0, e1 := callsendmsg(s, uintptr(unsafe.Pointer(msg)), flags)
+ r0, e1 := callnsendmsg(s, uintptr(unsafe.Pointer(msg)), flags)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1367,6 +1378,29 @@ func Utime(path string, buf *Utimbuf) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Getsystemcfg(label int) (n uint64) {
+ r0, _ := callgetsystemcfg(label)
+ n = uint64(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func umount(target string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(target)
+ if err != nil {
+ return
+ }
+ _, e1 := callumount(uintptr(unsafe.Pointer(_p0)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getrlimit(resource int, rlim *Rlimit) (err error) {
_, e1 := callgetrlimit(resource, uintptr(unsafe.Pointer(rlim)))
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go
index a185ee842..4b3a8ad7b 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go
@@ -1,4 +1,4 @@
-// mksyscall_aix_ppc64.pl -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go
+// go run mksyscall_aix_ppc64.go -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build aix,ppc64
@@ -85,6 +85,7 @@ import (
//go:cgo_import_dynamic libc_pause pause "libc.a/shr_64.o"
//go:cgo_import_dynamic libc_pread64 pread64 "libc.a/shr_64.o"
//go:cgo_import_dynamic libc_pwrite64 pwrite64 "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_select select "libc.a/shr_64.o"
//go:cgo_import_dynamic libc_pselect pselect "libc.a/shr_64.o"
//go:cgo_import_dynamic libc_setregid setregid "libc.a/shr_64.o"
//go:cgo_import_dynamic libc_setreuid setreuid "libc.a/shr_64.o"
@@ -105,8 +106,8 @@ import (
//go:cgo_import_dynamic libc_getsockname getsockname "libc.a/shr_64.o"
//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.a/shr_64.o"
//go:cgo_import_dynamic libc_sendto sendto "libc.a/shr_64.o"
-//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.a/shr_64.o"
-//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_nrecvmsg nrecvmsg "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_nsendmsg nsendmsg "libc.a/shr_64.o"
//go:cgo_import_dynamic libc_munmap munmap "libc.a/shr_64.o"
//go:cgo_import_dynamic libc_madvise madvise "libc.a/shr_64.o"
//go:cgo_import_dynamic libc_mprotect mprotect "libc.a/shr_64.o"
@@ -120,6 +121,8 @@ import (
//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.a/shr_64.o"
//go:cgo_import_dynamic libc_time time "libc.a/shr_64.o"
//go:cgo_import_dynamic libc_utime utime "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_umount umount "libc.a/shr_64.o"
//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.a/shr_64.o"
//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.a/shr_64.o"
//go:cgo_import_dynamic libc_lseek lseek "libc.a/shr_64.o"
@@ -200,6 +203,7 @@ import (
//go:linkname libc_pause libc_pause
//go:linkname libc_pread64 libc_pread64
//go:linkname libc_pwrite64 libc_pwrite64
+//go:linkname libc_select libc_select
//go:linkname libc_pselect libc_pselect
//go:linkname libc_setregid libc_setregid
//go:linkname libc_setreuid libc_setreuid
@@ -220,8 +224,8 @@ import (
//go:linkname libc_getsockname libc_getsockname
//go:linkname libc_recvfrom libc_recvfrom
//go:linkname libc_sendto libc_sendto
-//go:linkname libc_recvmsg libc_recvmsg
-//go:linkname libc_sendmsg libc_sendmsg
+//go:linkname libc_nrecvmsg libc_nrecvmsg
+//go:linkname libc_nsendmsg libc_nsendmsg
//go:linkname libc_munmap libc_munmap
//go:linkname libc_madvise libc_madvise
//go:linkname libc_mprotect libc_mprotect
@@ -235,6 +239,8 @@ import (
//go:linkname libc_gettimeofday libc_gettimeofday
//go:linkname libc_time libc_time
//go:linkname libc_utime libc_utime
+//go:linkname libc_getsystemcfg libc_getsystemcfg
+//go:linkname libc_umount libc_umount
//go:linkname libc_getrlimit libc_getrlimit
//go:linkname libc_setrlimit libc_setrlimit
//go:linkname libc_lseek libc_lseek
@@ -318,6 +324,7 @@ var (
libc_pause,
libc_pread64,
libc_pwrite64,
+ libc_select,
libc_pselect,
libc_setregid,
libc_setreuid,
@@ -338,8 +345,8 @@ var (
libc_getsockname,
libc_recvfrom,
libc_sendto,
- libc_recvmsg,
- libc_sendmsg,
+ libc_nrecvmsg,
+ libc_nsendmsg,
libc_munmap,
libc_madvise,
libc_mprotect,
@@ -353,6 +360,8 @@ var (
libc_gettimeofday,
libc_time,
libc_utime,
+ libc_getsystemcfg,
+ libc_umount,
libc_getrlimit,
libc_setrlimit,
libc_lseek,
@@ -890,6 +899,13 @@ func callpwrite64(fd int, _p0 uintptr, _lenp0 int, offset int64) (r1 uintptr, e1
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func callselect(nfd int, r uintptr, w uintptr, e uintptr, timeout uintptr) (r1 uintptr, e1 Errno) {
+ r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_select)), 5, uintptr(nfd), r, w, e, timeout, 0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func callpselect(nfd int, r uintptr, w uintptr, e uintptr, timeout uintptr, sigmask uintptr) (r1 uintptr, e1 Errno) {
r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_pselect)), 6, uintptr(nfd), r, w, e, timeout, sigmask)
return
@@ -925,8 +941,8 @@ func callsplice(rfd int, roff uintptr, wfd int, woff uintptr, len int, flags int
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func callstat(_p0 uintptr, stat uintptr) (r1 uintptr, e1 Errno) {
- r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_stat)), 2, _p0, stat, 0, 0, 0, 0)
+func callstat(_p0 uintptr, statptr uintptr) (r1 uintptr, e1 Errno) {
+ r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_stat)), 2, _p0, statptr, 0, 0, 0, 0)
return
}
@@ -1030,15 +1046,15 @@ func callsendto(s int, _p0 uintptr, _lenp0 int, flags int, to uintptr, addrlen u
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func callrecvmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) {
- r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_recvmsg)), 3, uintptr(s), msg, uintptr(flags), 0, 0, 0)
+func callnrecvmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) {
+ r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_nrecvmsg)), 3, uintptr(s), msg, uintptr(flags), 0, 0, 0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func callsendmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) {
- r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_sendmsg)), 3, uintptr(s), msg, uintptr(flags), 0, 0, 0)
+func callnsendmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) {
+ r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_nsendmsg)), 3, uintptr(s), msg, uintptr(flags), 0, 0, 0)
return
}
@@ -1135,6 +1151,20 @@ func callutime(_p0 uintptr, buf uintptr) (r1 uintptr, e1 Errno) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func callgetsystemcfg(label int) (r1 uintptr, e1 Errno) {
+ r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func callumount(_p0 uintptr) (r1 uintptr, e1 Errno) {
+ r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_umount)), 1, _p0, 0, 0, 0, 0, 0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func callgetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) {
r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_getrlimit)), 2, uintptr(resource), rlim, 0, 0, 0, 0)
return
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go
index aef7c0e78..cde4dbc5f 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go
@@ -1,4 +1,4 @@
-// mksyscall_aix_ppc64.pl -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go
+// go run mksyscall_aix_ppc64.go -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build aix,ppc64
@@ -83,6 +83,8 @@ int lstat(uintptr_t, uintptr_t);
int pause();
int pread64(int, uintptr_t, size_t, long long);
int pwrite64(int, uintptr_t, size_t, long long);
+#define c_select select
+int select(int, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
int pselect(int, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
int setregid(int, int);
int setreuid(int, int);
@@ -103,8 +105,8 @@ int getpeername(int, uintptr_t, uintptr_t);
int getsockname(int, uintptr_t, uintptr_t);
int recvfrom(int, uintptr_t, size_t, int, uintptr_t, uintptr_t);
int sendto(int, uintptr_t, size_t, int, uintptr_t, uintptr_t);
-int recvmsg(int, uintptr_t, int);
-int sendmsg(int, uintptr_t, int);
+int nrecvmsg(int, uintptr_t, int);
+int nsendmsg(int, uintptr_t, int);
int munmap(uintptr_t, uintptr_t);
int madvise(uintptr_t, size_t, int);
int mprotect(uintptr_t, size_t, int);
@@ -118,6 +120,8 @@ int poll(uintptr_t, int, int);
int gettimeofday(uintptr_t, uintptr_t);
int time(uintptr_t);
int utime(uintptr_t, uintptr_t);
+unsigned long long getsystemcfg(int);
+int umount(uintptr_t);
int getrlimit(int, uintptr_t);
int setrlimit(int, uintptr_t);
long long lseek(int, long long, int);
@@ -731,6 +735,14 @@ func callpwrite64(fd int, _p0 uintptr, _lenp0 int, offset int64) (r1 uintptr, e1
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func callselect(nfd int, r uintptr, w uintptr, e uintptr, timeout uintptr) (r1 uintptr, e1 Errno) {
+ r1 = uintptr(C.c_select(C.int(nfd), C.uintptr_t(r), C.uintptr_t(w), C.uintptr_t(e), C.uintptr_t(timeout)))
+ e1 = syscall.GetErrno()
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func callpselect(nfd int, r uintptr, w uintptr, e uintptr, timeout uintptr, sigmask uintptr) (r1 uintptr, e1 Errno) {
r1 = uintptr(C.pselect(C.int(nfd), C.uintptr_t(r), C.uintptr_t(w), C.uintptr_t(e), C.uintptr_t(timeout), C.uintptr_t(sigmask)))
e1 = syscall.GetErrno()
@@ -771,8 +783,8 @@ func callsplice(rfd int, roff uintptr, wfd int, woff uintptr, len int, flags int
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func callstat(_p0 uintptr, stat uintptr) (r1 uintptr, e1 Errno) {
- r1 = uintptr(C.stat(C.uintptr_t(_p0), C.uintptr_t(stat)))
+func callstat(_p0 uintptr, statptr uintptr) (r1 uintptr, e1 Errno) {
+ r1 = uintptr(C.stat(C.uintptr_t(_p0), C.uintptr_t(statptr)))
e1 = syscall.GetErrno()
return
}
@@ -891,16 +903,16 @@ func callsendto(s int, _p0 uintptr, _lenp0 int, flags int, to uintptr, addrlen u
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func callrecvmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) {
- r1 = uintptr(C.recvmsg(C.int(s), C.uintptr_t(msg), C.int(flags)))
+func callnrecvmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) {
+ r1 = uintptr(C.nrecvmsg(C.int(s), C.uintptr_t(msg), C.int(flags)))
e1 = syscall.GetErrno()
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func callsendmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) {
- r1 = uintptr(C.sendmsg(C.int(s), C.uintptr_t(msg), C.int(flags)))
+func callnsendmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) {
+ r1 = uintptr(C.nsendmsg(C.int(s), C.uintptr_t(msg), C.int(flags)))
e1 = syscall.GetErrno()
return
}
@@ -1011,6 +1023,22 @@ func callutime(_p0 uintptr, buf uintptr) (r1 uintptr, e1 Errno) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func callgetsystemcfg(label int) (r1 uintptr, e1 Errno) {
+ r1 = uintptr(C.getsystemcfg(C.int(label)))
+ e1 = syscall.GetErrno()
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func callumount(_p0 uintptr) (r1 uintptr, e1 Errno) {
+ r1 = uintptr(C.umount(C.uintptr_t(_p0)))
+ e1 = syscall.GetErrno()
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func callgetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) {
r1 = uintptr(C.getrlimit(C.int(resource), C.uintptr_t(rlim)))
e1 = syscall.GetErrno()
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
index b50178d67..c142e33e9 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
@@ -943,6 +943,21 @@ func libc_chroot_trampoline()
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_clock_gettime_trampoline()
+
+//go:linkname libc_clock_gettime libc_clock_gettime
+//go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
index da9b900a8..1a3915197 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
@@ -108,6 +108,8 @@ TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0
JMP libc_chown(SB)
TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0
JMP libc_chroot(SB)
+TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0
+ JMP libc_clock_gettime(SB)
TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0
JMP libc_close(SB)
TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
index da9986dd2..cdfe9318b 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
@@ -749,6 +749,23 @@ func Ftruncate(fd int, length int64) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Getdents(fd int, buf []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
@@ -1194,6 +1211,26 @@ func Rename(from string, to string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Renameat(fromfd int, from string, tofd int, to string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(from)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(to)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Revoke(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
index 80903e47b..a783306b2 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
@@ -387,6 +387,16 @@ func pipe2(p *[2]_C_int, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ptrace(request int, pid int, addr uintptr, data int) (err error) {
+ _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getcwd(buf []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
@@ -1019,7 +1029,7 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) {
+func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
index cd250ff0e..f995520d3 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
@@ -387,6 +387,16 @@ func pipe2(p *[2]_C_int, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ptrace(request int, pid int, addr uintptr, data int) (err error) {
+ _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getcwd(buf []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
@@ -1019,7 +1029,7 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) {
+func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
index 290a9c2cb..d681acd43 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
@@ -387,6 +387,16 @@ func pipe2(p *[2]_C_int, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ptrace(request int, pid int, addr uintptr, data int) (err error) {
+ _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getcwd(buf []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
@@ -1019,7 +1029,7 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) {
+func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go
index c6df9d2e8..5049b2ede 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go
@@ -404,6 +404,16 @@ func Getcwd(buf []byte) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ptrace(request int, pid int, addr uintptr, data int) (err error) {
+ _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func ioctl(fd int, req uint, arg uintptr) (err error) {
_, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
if e1 != 0 {
@@ -1019,7 +1029,7 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) {
+func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
index 5356a5175..c5e46e4cf 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
@@ -14,6 +14,27 @@ var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) {
+ _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(mask>>32), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func fchmodat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Capget(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Capset(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -437,6 +478,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) {
+ _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
@@ -1195,26 +1246,6 @@ func Removexattr(path string, attr string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(oldpath)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(newpath)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(oldpath)
@@ -1370,6 +1401,17 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) {
+ r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0)
+ newfd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -1661,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(pathname)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func pipe(p *[2]_C_int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
if e1 != 0 {
@@ -1901,6 +1969,26 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
r0, _, e1 := Syscall6(SYS_SENDFILE64, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
written = int(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
index 0f6d265d8..da8819e48 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
@@ -14,6 +14,27 @@ var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) {
+ _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func fchmodat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Capget(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Capset(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -437,6 +478,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) {
+ _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
@@ -1195,26 +1246,6 @@ func Removexattr(path string, attr string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(oldpath)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(newpath)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(oldpath)
@@ -1370,6 +1401,17 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) {
+ r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0)
+ newfd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -1661,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(pathname)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {
@@ -1906,6 +1974,26 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Seek(fd int, offset int64, whence int) (off int64, err error) {
r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence))
off = int64(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
index 012261ad5..6ad9be6dd 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
@@ -14,6 +14,27 @@ var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) {
+ _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(mask>>32), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func fchmodat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Capget(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Capset(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -437,6 +478,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) {
+ _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
@@ -1195,26 +1246,6 @@ func Removexattr(path string, attr string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(oldpath)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(newpath)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(oldpath)
@@ -1370,6 +1401,17 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) {
+ r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0)
+ newfd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -1661,6 +1703,42 @@ func faccessat(dirfd int, path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(pathname)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe(p *[2]_C_int) (err error) {
+ _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func pipe2(p *[2]_C_int, flags int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
if e1 != 0 {
@@ -2016,6 +2094,26 @@ func Pause() (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
r0, _, e1 := Syscall6(SYS_SENDFILE64, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
written = int(r0)
@@ -2292,3 +2390,18 @@ func armSyncFileRange(fd int, flags int, off int64, n int64) (err error) {
}
return
}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(cmdline)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
index b890cb03c..f88331782 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
@@ -14,6 +14,27 @@ var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) {
+ _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func fchmodat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Capget(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Capset(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -437,6 +478,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) {
+ _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
@@ -1195,26 +1246,6 @@ func Removexattr(path string, attr string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(oldpath)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(newpath)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(oldpath)
@@ -1370,6 +1401,17 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) {
+ r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0)
+ newfd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -1661,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(pathname)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
var _p0 unsafe.Pointer
if len(events) > 0 {
@@ -1829,6 +1897,26 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Seek(fd int, offset int64, whence int) (off int64, err error) {
r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence))
off = int64(r0)
@@ -2189,3 +2277,18 @@ func pipe2(p *[2]_C_int, flags int) (err error) {
}
return
}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(cmdline)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
index cc17b43d3..8eebc6c77 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
@@ -14,6 +14,27 @@ var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) {
+ _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask>>32), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func fchmodat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Capget(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Capset(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -437,6 +478,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) {
+ _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
@@ -1195,26 +1246,6 @@ func Removexattr(path string, attr string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(oldpath)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(newpath)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(oldpath)
@@ -1370,6 +1401,17 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) {
+ r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0)
+ newfd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -1661,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(pathname)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {
@@ -1820,6 +1888,26 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
n = int(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
index 25026415d..ecf62a677 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
@@ -14,6 +14,27 @@ var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) {
+ _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func fchmodat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Capget(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Capset(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -437,6 +478,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) {
+ _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
@@ -1195,26 +1246,6 @@ func Removexattr(path string, attr string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(oldpath)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(newpath)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(oldpath)
@@ -1370,6 +1401,17 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) {
+ r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0)
+ newfd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -1661,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(pathname)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {
@@ -1850,6 +1918,26 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Seek(fd int, offset int64, whence int) (off int64, err error) {
r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence))
off = int64(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
index 83d8bb8af..1ba0f7b6f 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
@@ -14,6 +14,27 @@ var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) {
+ _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func fchmodat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Capget(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Capset(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -437,6 +478,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) {
+ _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
@@ -1195,26 +1246,6 @@ func Removexattr(path string, attr string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(oldpath)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(newpath)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(oldpath)
@@ -1370,6 +1401,17 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) {
+ r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0)
+ newfd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -1661,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(pathname)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {
@@ -1850,6 +1918,26 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Seek(fd int, offset int64, whence int) (off int64, err error) {
r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence))
off = int64(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
index b16b3e102..20012b2f0 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
@@ -14,6 +14,27 @@ var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) {
+ _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(mask>>32), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func fchmodat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Capget(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Capset(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -437,6 +478,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) {
+ _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
@@ -1195,26 +1246,6 @@ func Removexattr(path string, attr string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(oldpath)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(newpath)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(oldpath)
@@ -1370,6 +1401,17 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) {
+ r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0)
+ newfd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -1661,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(pathname)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {
@@ -1820,6 +1888,26 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
n = int(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
index 27b6a6bf0..2b520deaa 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
@@ -14,6 +14,27 @@ var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) {
+ _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func fchmodat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Capget(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Capset(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -437,6 +478,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) {
+ _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
@@ -1195,26 +1246,6 @@ func Removexattr(path string, attr string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(oldpath)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(newpath)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(oldpath)
@@ -1370,6 +1401,17 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) {
+ r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0)
+ newfd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -1661,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(pathname)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {
@@ -1921,6 +1989,26 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Seek(fd int, offset int64, whence int) (off int64, err error) {
r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence))
off = int64(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
index f7ecc9afd..d9f044c95 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
@@ -14,6 +14,27 @@ var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) {
+ _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func fchmodat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Capget(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Capset(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -437,6 +478,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) {
+ _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
@@ -1195,26 +1246,6 @@ func Removexattr(path string, attr string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(oldpath)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(newpath)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(oldpath)
@@ -1370,6 +1401,17 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) {
+ r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0)
+ newfd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -1661,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(pathname)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {
@@ -1921,6 +1989,26 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Seek(fd int, offset int64, whence int) (off int64, err error) {
r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence))
off = int64(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
index e3cd4e53f..9feed65eb 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
@@ -14,6 +14,27 @@ var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) {
+ _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func fchmodat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Capget(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Capset(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -437,6 +478,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) {
+ _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
@@ -1195,26 +1246,6 @@ func Removexattr(path string, attr string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(oldpath)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(newpath)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(oldpath)
@@ -1370,6 +1401,17 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) {
+ r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0)
+ newfd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -1661,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(pathname)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
var _p0 unsafe.Pointer
if len(events) > 0 {
@@ -2189,3 +2257,18 @@ func pipe2(p *[2]_C_int, flags int) (err error) {
}
return
}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(cmdline)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
index 3001d3798..0a6515088 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
@@ -14,6 +14,27 @@ var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) {
+ _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func fchmodat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Capget(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Capset(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -437,6 +478,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) {
+ _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
@@ -1195,26 +1246,6 @@ func Removexattr(path string, attr string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(oldpath)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(newpath)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(oldpath)
@@ -1370,6 +1401,17 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) {
+ r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0)
+ newfd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -1661,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(pathname)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {
@@ -1891,6 +1959,26 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Seek(fd int, offset int64, whence int) (off int64, err error) {
r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence))
off = int64(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
index aafe3660f..e27f66930 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
@@ -14,6 +14,27 @@ var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) {
+ _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func fchmodat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -387,6 +408,26 @@ func Adjtimex(buf *Timex) (state int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Capget(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Capset(hdr *CapUserHeader, data *CapUserData) (err error) {
+ _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -437,6 +478,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) {
+ _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
@@ -1195,26 +1246,6 @@ func Removexattr(path string, attr string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(oldpath)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(newpath)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(oldpath)
@@ -1370,6 +1401,17 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) {
+ r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0)
+ newfd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -1661,6 +1703,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(pathname)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
var _p0 unsafe.Pointer
if len(events) > 0 {
@@ -1890,6 +1958,26 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Seek(fd int, offset int64, whence int) (off int64, err error) {
r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence))
off = int64(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
index 642db7670..7e0582664 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
@@ -389,7 +389,7 @@ func pipe() (fd1 int, fd2 int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func getdents(fd int, buf []byte) (n int, err error) {
+func Getdents(fd int, buf []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
index 59585fee3..d94d076aa 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
@@ -389,7 +389,7 @@ func pipe() (fd1 int, fd2 int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func getdents(fd int, buf []byte) (n int, err error) {
+func Getdents(fd int, buf []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
index 6ec31434b..cf5bf3d05 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
@@ -389,7 +389,7 @@ func pipe() (fd1 int, fd2 int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func getdents(fd int, buf []byte) (n int, err error) {
+func Getdents(fd int, buf []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go
new file mode 100644
index 000000000..243a9317c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go
@@ -0,0 +1,1826 @@
+// go run mksyscall.go -netbsd -tags netbsd,arm64 syscall_bsd.go syscall_netbsd.go syscall_netbsd_arm64.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build netbsd,arm64
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var _ syscall.Errno
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setgroups(ngid int, gid *_Gid_t) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
+ r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
+ wpid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socket(domain int, typ int, proto int) (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
+ _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
+ _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Shutdown(s int, how int) (err error) {
+ _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
+ _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) {
+ r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
+ var _p0 unsafe.Pointer
+ if len(mib) > 0 {
+ _p0 = unsafe.Pointer(&mib[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimes(path string, timeval *[2]Timeval) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func futimes(fd int, timeval *[2]Timeval) (err error) {
+ _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntl(fd int, cmd int, arg int) (val int, err error) {
+ r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
+ val = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Madvise(b []byte, behav int) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mlock(b []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mlockall(flags int) (err error) {
+ _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mprotect(b []byte, prot int) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Msync(b []byte, flags int) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Munlock(b []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Munlockall() (err error) {
+ _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe() (fd1 int, fd2 int, err error) {
+ r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0)
+ fd1 = int(r0)
+ fd2 = int(r1)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getdents(fd int, buf []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getcwd(buf []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req uint, arg uintptr) (err error) {
+ _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Access(path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Adjtime(delta *Timeval, olddelta *Timeval) (err error) {
+ _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chdir(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chflags(path string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chmod(path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chown(path string, uid int, gid int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chroot(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Close(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup(fd int) (nfd int, err error) {
+ r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0)
+ nfd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup2(from int, to int) (err error) {
+ _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Exit(code int) {
+ Syscall(SYS_EXIT, uintptr(code), 0, 0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(attrname)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0)
+ ret = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(attrname)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0)
+ ret = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(attrname)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error) {
+ r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FD, uintptr(fd), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0)
+ ret = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(file)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(attrname)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0)
+ ret = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(file)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(attrname)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0)
+ ret = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(file)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(attrname)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(file)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0)
+ ret = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(link)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(attrname)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_EXTATTR_GET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0)
+ ret = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(link)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(attrname)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_EXTATTR_SET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0)
+ ret = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(link)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(attrname)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_EXTATTR_DELETE_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(link)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0)
+ ret = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fadvise(fd int, offset int64, length int64, advice int) (err error) {
+ _, _, e1 := Syscall6(SYS_POSIX_FADVISE, uintptr(fd), 0, uintptr(offset), 0, uintptr(length), uintptr(advice))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchdir(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchflags(fd int, flags int) (err error) {
+ _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmod(fd int, mode uint32) (err error) {
+ _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchown(fd int, uid int, gid int) (err error) {
+ _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Flock(fd int, how int) (err error) {
+ _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fpathconf(fd int, name int) (val int, err error) {
+ r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0)
+ val = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstat(fd int, stat *Stat_t) (err error) {
+ _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fsync(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ftruncate(fd int, length int64) (err error) {
+ _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getegid() (egid int) {
+ r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
+ egid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Geteuid() (uid int) {
+ r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getgid() (gid int) {
+ r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0)
+ gid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpgid(pid int) (pgid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
+ pgid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpgrp() (pgrp int) {
+ r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0)
+ pgrp = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpid() (pid int) {
+ r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
+ pid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getppid() (ppid int) {
+ r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0)
+ ppid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpriority(which int, who int) (prio int, err error) {
+ r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
+ prio = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrlimit(which int, lim *Rlimit) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrusage(who int, rusage *Rusage) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getsid(pid int) (sid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
+ sid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Gettimeofday(tv *Timeval) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getuid() (uid int) {
+ r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Issetugid() (tainted bool) {
+ r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0)
+ tainted = bool(r0 != 0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Kill(pid int, signum syscall.Signal) (err error) {
+ _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Kqueue() (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lchown(path string, uid int, gid int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Link(path string, link string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(link)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(link)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Listen(s int, backlog int) (err error) {
+ _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lstat(path string, stat *Stat_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkdir(path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkdirat(dirfd int, path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkfifo(path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkfifoat(dirfd int, path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mknod(path string, mode uint32, dev int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
+ _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Open(path string, mode int, perm uint32) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pathconf(path string, name int) (val int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0)
+ val = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pread(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func read(fd int, p []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Readlink(path string, buf []byte) (n int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(buf) > 0 {
+ _p1 = unsafe.Pointer(&buf[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(buf) > 0 {
+ _p1 = unsafe.Pointer(&buf[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Rename(from string, to string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(from)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(to)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Renameat(fromfd int, from string, tofd int, to string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(from)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(to)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Revoke(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Rmdir(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
+ r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0)
+ newoffset = int64(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) {
+ _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setegid(egid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Seteuid(euid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setgid(gid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpgid(pid int, pgid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpriority(which int, who int, prio int) (err error) {
+ _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setregid(rgid int, egid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setreuid(ruid int, euid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setrlimit(which int, lim *Rlimit) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setsid() (pid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
+ pid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Settimeofday(tp *Timeval) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setuid(uid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Stat(path string, stat *Stat_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Symlink(path string, link string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(link)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sync() (err error) {
+ _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Truncate(path string, length int64) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Umask(newmask int) (oldmask int) {
+ r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0)
+ oldmask = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unlink(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unlinkat(dirfd int, path string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unmount(path string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func write(fd int, p []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) {
+ r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0)
+ ret = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func munmap(addr uintptr, length uintptr) (err error) {
+ _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
index 6a489fac0..a9532d078 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
@@ -387,7 +387,7 @@ func pipe(p *[2]_C_int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func getdents(fd int, buf []byte) (n int, err error) {
+func Getdents(fd int, buf []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
index 30cba4347..0cb9f0177 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
@@ -387,7 +387,7 @@ func pipe(p *[2]_C_int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func getdents(fd int, buf []byte) (n int, err error) {
+func Getdents(fd int, buf []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
index fa1beda33..6fc99b549 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
@@ -387,7 +387,7 @@ func pipe(p *[2]_C_int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func getdents(fd int, buf []byte) (n int, err error) {
+func Getdents(fd int, buf []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
new file mode 100644
index 000000000..27878a72b
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
@@ -0,0 +1,1692 @@
+// go run mksyscall.go -openbsd -tags openbsd,arm64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm64.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build openbsd,arm64
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var _ syscall.Errno
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setgroups(ngid int, gid *_Gid_t) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
+ r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
+ wpid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socket(domain int, typ int, proto int) (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
+ _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
+ _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Shutdown(s int, how int) (err error) {
+ _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
+ _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) {
+ r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
+ var _p0 unsafe.Pointer
+ if len(mib) > 0 {
+ _p0 = unsafe.Pointer(&mib[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimes(path string, timeval *[2]Timeval) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func futimes(fd int, timeval *[2]Timeval) (err error) {
+ _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntl(fd int, cmd int, arg int) (val int, err error) {
+ r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
+ val = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Madvise(b []byte, behav int) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mlock(b []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mlockall(flags int) (err error) {
+ _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mprotect(b []byte, prot int) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Msync(b []byte, flags int) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Munlock(b []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Munlockall() (err error) {
+ _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe(p *[2]_C_int) (err error) {
+ _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getdents(fd int, buf []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getcwd(buf []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req uint, arg uintptr) (err error) {
+ _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
+ r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Access(path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Adjtime(delta *Timeval, olddelta *Timeval) (err error) {
+ _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chdir(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chflags(path string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chmod(path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chown(path string, uid int, gid int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chroot(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Close(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup(fd int) (nfd int, err error) {
+ r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0)
+ nfd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup2(from int, to int) (err error) {
+ _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Exit(code int) {
+ Syscall(SYS_EXIT, uintptr(code), 0, 0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchdir(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchflags(fd int, flags int) (err error) {
+ _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmod(fd int, mode uint32) (err error) {
+ _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchown(fd int, uid int, gid int) (err error) {
+ _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Flock(fd int, how int) (err error) {
+ _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fpathconf(fd int, name int) (val int, err error) {
+ r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0)
+ val = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstat(fd int, stat *Stat_t) (err error) {
+ _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstatfs(fd int, stat *Statfs_t) (err error) {
+ _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fsync(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ftruncate(fd int, length int64) (err error) {
+ _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getegid() (egid int) {
+ r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
+ egid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Geteuid() (uid int) {
+ r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getgid() (gid int) {
+ r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0)
+ gid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpgid(pid int) (pgid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
+ pgid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpgrp() (pgrp int) {
+ r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0)
+ pgrp = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpid() (pid int) {
+ r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
+ pid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getppid() (ppid int) {
+ r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0)
+ ppid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpriority(which int, who int) (prio int, err error) {
+ r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
+ prio = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrlimit(which int, lim *Rlimit) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrtable() (rtable int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0)
+ rtable = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrusage(who int, rusage *Rusage) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getsid(pid int) (sid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
+ sid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Gettimeofday(tv *Timeval) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getuid() (uid int) {
+ r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Issetugid() (tainted bool) {
+ r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0)
+ tainted = bool(r0 != 0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Kill(pid int, signum syscall.Signal) (err error) {
+ _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Kqueue() (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lchown(path string, uid int, gid int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Link(path string, link string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(link)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(link)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Listen(s int, backlog int) (err error) {
+ _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lstat(path string, stat *Stat_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkdir(path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkdirat(dirfd int, path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkfifo(path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkfifoat(dirfd int, path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mknod(path string, mode uint32, dev int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
+ _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Open(path string, mode int, perm uint32) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pathconf(path string, name int) (val int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0)
+ val = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pread(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func read(fd int, p []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Readlink(path string, buf []byte) (n int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(buf) > 0 {
+ _p1 = unsafe.Pointer(&buf[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(buf) > 0 {
+ _p1 = unsafe.Pointer(&buf[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Rename(from string, to string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(from)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(to)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Renameat(fromfd int, from string, tofd int, to string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(from)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(to)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Revoke(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Rmdir(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
+ r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0)
+ newoffset = int64(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) {
+ _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setegid(egid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Seteuid(euid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setgid(gid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setlogin(name string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpgid(pid int, pgid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpriority(which int, who int, prio int) (err error) {
+ _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setregid(rgid int, egid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setreuid(ruid int, euid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setresgid(rgid int, egid int, sgid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setresuid(ruid int, euid int, suid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setrlimit(which int, lim *Rlimit) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setrtable(rtable int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setsid() (pid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
+ pid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Settimeofday(tp *Timeval) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setuid(uid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Stat(path string, stat *Stat_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Statfs(path string, stat *Statfs_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Symlink(path string, link string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(link)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sync() (err error) {
+ _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Truncate(path string, length int64) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Umask(newmask int) (oldmask int) {
+ r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0)
+ oldmask = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unlink(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unlinkat(dirfd int, path string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unmount(path string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func write(fd int, p []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) {
+ r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0)
+ ret = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func munmap(addr uintptr, length uintptr) (err error) {
+ _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
index 97b22a499..5f614760c 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
@@ -1,4 +1,4 @@
-// mksyscall_solaris.pl -tags solaris,amd64 syscall_solaris.go syscall_solaris_amd64.go
+// go run mksyscall_solaris.go -tags solaris,amd64 syscall_solaris.go syscall_solaris_amd64.go
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build solaris,amd64
diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go
index b005031ab..37dcc74c2 100644
--- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go
@@ -1,6 +1,8 @@
// mksysctl_openbsd.pl
// Code generated by the command above; DO NOT EDIT.
+// +build 386,openbsd
+
package unix
type mibentry struct {
diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go
index d014451c9..fe6caa6eb 100644
--- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go
@@ -1,4 +1,4 @@
-// mksysctl_openbsd.pl
+// go run mksysctl_openbsd.go
// Code generated by the command above; DO NOT EDIT.
// +build amd64,openbsd
diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go
index b005031ab..6eb8c0b08 100644
--- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go
@@ -1,6 +1,8 @@
-// mksysctl_openbsd.pl
+// go run mksysctl_openbsd.go
// Code generated by the command above; DO NOT EDIT.
+// +build arm,openbsd
+
package unix
type mibentry struct {
diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go
new file mode 100644
index 000000000..ba4304fd2
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go
@@ -0,0 +1,275 @@
+// go run mksysctl_openbsd.go
+// Code generated by the command above; DO NOT EDIT.
+
+// +build arm64,openbsd
+
+package unix
+
+type mibentry struct {
+ ctlname string
+ ctloid []_C_int
+}
+
+var sysctlMib = []mibentry{
+ {"ddb.console", []_C_int{9, 6}},
+ {"ddb.log", []_C_int{9, 7}},
+ {"ddb.max_line", []_C_int{9, 3}},
+ {"ddb.max_width", []_C_int{9, 2}},
+ {"ddb.panic", []_C_int{9, 5}},
+ {"ddb.profile", []_C_int{9, 9}},
+ {"ddb.radix", []_C_int{9, 1}},
+ {"ddb.tab_stop_width", []_C_int{9, 4}},
+ {"ddb.trigger", []_C_int{9, 8}},
+ {"fs.posix.setuid", []_C_int{3, 1, 1}},
+ {"hw.allowpowerdown", []_C_int{6, 22}},
+ {"hw.byteorder", []_C_int{6, 4}},
+ {"hw.cpuspeed", []_C_int{6, 12}},
+ {"hw.diskcount", []_C_int{6, 10}},
+ {"hw.disknames", []_C_int{6, 8}},
+ {"hw.diskstats", []_C_int{6, 9}},
+ {"hw.machine", []_C_int{6, 1}},
+ {"hw.model", []_C_int{6, 2}},
+ {"hw.ncpu", []_C_int{6, 3}},
+ {"hw.ncpufound", []_C_int{6, 21}},
+ {"hw.ncpuonline", []_C_int{6, 25}},
+ {"hw.pagesize", []_C_int{6, 7}},
+ {"hw.perfpolicy", []_C_int{6, 23}},
+ {"hw.physmem", []_C_int{6, 19}},
+ {"hw.product", []_C_int{6, 15}},
+ {"hw.serialno", []_C_int{6, 17}},
+ {"hw.setperf", []_C_int{6, 13}},
+ {"hw.smt", []_C_int{6, 24}},
+ {"hw.usermem", []_C_int{6, 20}},
+ {"hw.uuid", []_C_int{6, 18}},
+ {"hw.vendor", []_C_int{6, 14}},
+ {"hw.version", []_C_int{6, 16}},
+ {"kern.allowkmem", []_C_int{1, 52}},
+ {"kern.argmax", []_C_int{1, 8}},
+ {"kern.audio", []_C_int{1, 84}},
+ {"kern.boottime", []_C_int{1, 21}},
+ {"kern.bufcachepercent", []_C_int{1, 72}},
+ {"kern.ccpu", []_C_int{1, 45}},
+ {"kern.clockrate", []_C_int{1, 12}},
+ {"kern.consdev", []_C_int{1, 75}},
+ {"kern.cp_time", []_C_int{1, 40}},
+ {"kern.cp_time2", []_C_int{1, 71}},
+ {"kern.cpustats", []_C_int{1, 85}},
+ {"kern.domainname", []_C_int{1, 22}},
+ {"kern.file", []_C_int{1, 73}},
+ {"kern.forkstat", []_C_int{1, 42}},
+ {"kern.fscale", []_C_int{1, 46}},
+ {"kern.fsync", []_C_int{1, 33}},
+ {"kern.global_ptrace", []_C_int{1, 81}},
+ {"kern.hostid", []_C_int{1, 11}},
+ {"kern.hostname", []_C_int{1, 10}},
+ {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}},
+ {"kern.job_control", []_C_int{1, 19}},
+ {"kern.malloc.buckets", []_C_int{1, 39, 1}},
+ {"kern.malloc.kmemnames", []_C_int{1, 39, 3}},
+ {"kern.maxclusters", []_C_int{1, 67}},
+ {"kern.maxfiles", []_C_int{1, 7}},
+ {"kern.maxlocksperuid", []_C_int{1, 70}},
+ {"kern.maxpartitions", []_C_int{1, 23}},
+ {"kern.maxproc", []_C_int{1, 6}},
+ {"kern.maxthread", []_C_int{1, 25}},
+ {"kern.maxvnodes", []_C_int{1, 5}},
+ {"kern.mbstat", []_C_int{1, 59}},
+ {"kern.msgbuf", []_C_int{1, 48}},
+ {"kern.msgbufsize", []_C_int{1, 38}},
+ {"kern.nchstats", []_C_int{1, 41}},
+ {"kern.netlivelocks", []_C_int{1, 76}},
+ {"kern.nfiles", []_C_int{1, 56}},
+ {"kern.ngroups", []_C_int{1, 18}},
+ {"kern.nosuidcoredump", []_C_int{1, 32}},
+ {"kern.nprocs", []_C_int{1, 47}},
+ {"kern.nselcoll", []_C_int{1, 43}},
+ {"kern.nthreads", []_C_int{1, 26}},
+ {"kern.numvnodes", []_C_int{1, 58}},
+ {"kern.osrelease", []_C_int{1, 2}},
+ {"kern.osrevision", []_C_int{1, 3}},
+ {"kern.ostype", []_C_int{1, 1}},
+ {"kern.osversion", []_C_int{1, 27}},
+ {"kern.pool_debug", []_C_int{1, 77}},
+ {"kern.posix1version", []_C_int{1, 17}},
+ {"kern.proc", []_C_int{1, 66}},
+ {"kern.rawpartition", []_C_int{1, 24}},
+ {"kern.saved_ids", []_C_int{1, 20}},
+ {"kern.securelevel", []_C_int{1, 9}},
+ {"kern.seminfo", []_C_int{1, 61}},
+ {"kern.shminfo", []_C_int{1, 62}},
+ {"kern.somaxconn", []_C_int{1, 28}},
+ {"kern.sominconn", []_C_int{1, 29}},
+ {"kern.splassert", []_C_int{1, 54}},
+ {"kern.stackgap_random", []_C_int{1, 50}},
+ {"kern.sysvipc_info", []_C_int{1, 51}},
+ {"kern.sysvmsg", []_C_int{1, 34}},
+ {"kern.sysvsem", []_C_int{1, 35}},
+ {"kern.sysvshm", []_C_int{1, 36}},
+ {"kern.timecounter.choice", []_C_int{1, 69, 4}},
+ {"kern.timecounter.hardware", []_C_int{1, 69, 3}},
+ {"kern.timecounter.tick", []_C_int{1, 69, 1}},
+ {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}},
+ {"kern.tty.tk_cancc", []_C_int{1, 44, 4}},
+ {"kern.tty.tk_nin", []_C_int{1, 44, 1}},
+ {"kern.tty.tk_nout", []_C_int{1, 44, 2}},
+ {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}},
+ {"kern.tty.ttyinfo", []_C_int{1, 44, 5}},
+ {"kern.ttycount", []_C_int{1, 57}},
+ {"kern.version", []_C_int{1, 4}},
+ {"kern.watchdog.auto", []_C_int{1, 64, 2}},
+ {"kern.watchdog.period", []_C_int{1, 64, 1}},
+ {"kern.witnesswatch", []_C_int{1, 53}},
+ {"kern.wxabort", []_C_int{1, 74}},
+ {"net.bpf.bufsize", []_C_int{4, 31, 1}},
+ {"net.bpf.maxbufsize", []_C_int{4, 31, 2}},
+ {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}},
+ {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}},
+ {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}},
+ {"net.inet.carp.log", []_C_int{4, 2, 112, 3}},
+ {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}},
+ {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}},
+ {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}},
+ {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}},
+ {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}},
+ {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}},
+ {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}},
+ {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}},
+ {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}},
+ {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}},
+ {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}},
+ {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}},
+ {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}},
+ {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}},
+ {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}},
+ {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}},
+ {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}},
+ {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}},
+ {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}},
+ {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}},
+ {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}},
+ {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}},
+ {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}},
+ {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}},
+ {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}},
+ {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}},
+ {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}},
+ {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}},
+ {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}},
+ {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}},
+ {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}},
+ {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}},
+ {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}},
+ {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}},
+ {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}},
+ {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}},
+ {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}},
+ {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}},
+ {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}},
+ {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}},
+ {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}},
+ {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}},
+ {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}},
+ {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}},
+ {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}},
+ {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}},
+ {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}},
+ {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}},
+ {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}},
+ {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}},
+ {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}},
+ {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}},
+ {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}},
+ {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}},
+ {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}},
+ {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}},
+ {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}},
+ {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}},
+ {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}},
+ {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}},
+ {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}},
+ {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}},
+ {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}},
+ {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}},
+ {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}},
+ {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}},
+ {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}},
+ {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}},
+ {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}},
+ {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}},
+ {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}},
+ {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}},
+ {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}},
+ {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}},
+ {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}},
+ {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}},
+ {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}},
+ {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}},
+ {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}},
+ {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}},
+ {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}},
+ {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}},
+ {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}},
+ {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}},
+ {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}},
+ {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}},
+ {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}},
+ {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}},
+ {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}},
+ {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}},
+ {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}},
+ {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}},
+ {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}},
+ {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}},
+ {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}},
+ {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}},
+ {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}},
+ {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}},
+ {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}},
+ {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}},
+ {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}},
+ {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}},
+ {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}},
+ {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}},
+ {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}},
+ {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}},
+ {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}},
+ {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}},
+ {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}},
+ {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}},
+ {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}},
+ {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}},
+ {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}},
+ {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}},
+ {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}},
+ {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}},
+ {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}},
+ {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}},
+ {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}},
+ {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}},
+ {"net.key.sadb_dump", []_C_int{4, 30, 1}},
+ {"net.key.spd_dump", []_C_int{4, 30, 2}},
+ {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}},
+ {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}},
+ {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}},
+ {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}},
+ {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}},
+ {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}},
+ {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}},
+ {"net.mpls.ttl", []_C_int{4, 33, 2}},
+ {"net.pflow.stats", []_C_int{4, 34, 1}},
+ {"net.pipex.enable", []_C_int{4, 35, 1}},
+ {"vm.anonmin", []_C_int{2, 7}},
+ {"vm.loadavg", []_C_int{2, 2}},
+ {"vm.malloc_conf", []_C_int{2, 12}},
+ {"vm.maxslp", []_C_int{2, 10}},
+ {"vm.nkmempages", []_C_int{2, 6}},
+ {"vm.psstrings", []_C_int{2, 3}},
+ {"vm.swapencrypt.enable", []_C_int{2, 5, 0}},
+ {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}},
+ {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}},
+ {"vm.uspace", []_C_int{2, 11}},
+ {"vm.uvmexp", []_C_int{2, 4}},
+ {"vm.vmmeter", []_C_int{2, 1}},
+ {"vm.vnodemin", []_C_int{2, 9}},
+ {"vm.vtextmin", []_C_int{2, 8}},
+}
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go
index 9e2837e0e..654dd3da3 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go
@@ -1,4 +1,4 @@
-// go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.13.sdk/usr/include/sys/syscall.h
+// go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.14.sdk/usr/include/sys/syscall.h
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build amd64,darwin
@@ -431,6 +431,8 @@ const (
SYS_NTP_ADJTIME = 527
SYS_NTP_GETTIME = 528
SYS_OS_FAULT_WITH_PAYLOAD = 529
- SYS_MAXSYSCALL = 530
+ SYS_KQUEUE_WORKLOOP_CTL = 530
+ SYS___MACH_BRIDGE_REMOTE_TIME = 531
+ SYS_MAXSYSCALL = 532
SYS_INVALID = 63
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go
index ff3976edb..464c9a983 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go
@@ -1,4 +1,4 @@
-// go run mksysnum.go http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master
+// go run mksysnum.go https://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build amd64,dragonfly
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go
index b1e81b717..9474974b6 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go
@@ -1,4 +1,4 @@
-// go run mksysnum.go http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master
+// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build 386,freebsd
@@ -118,8 +118,6 @@ const (
SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); }
SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); }
SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); }
- SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); }
- SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); }
SYS_SETFIB = 175 // { int setfib(int fibnum); }
SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); }
SYS_SETGID = 181 // { int setgid(gid_t gid); }
@@ -133,10 +131,6 @@ const (
SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int
SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int
SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); }
- SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); }
- SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, off_t offset, int whence); }
- SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, off_t length); }
- SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, off_t length); }
SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int
SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); }
SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); }
@@ -164,6 +158,7 @@ const (
SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); }
SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); }
SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); }
+ SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); }
SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); }
SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); }
SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); }
@@ -197,13 +192,10 @@ const (
SYS_GETSID = 310 // { int getsid(pid_t pid); }
SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); }
SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); }
- SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); }
+ SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); }
SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); }
SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); }
SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); }
- SYS_OAIO_READ = 318 // { int oaio_read(struct oaiocb *aiocbp); }
- SYS_OAIO_WRITE = 319 // { int oaio_write(struct oaiocb *aiocbp); }
- SYS_OLIO_LISTIO = 320 // { int olio_listio(int mode, struct oaiocb * const *acb_list, int nent, struct osigevent *sig); }
SYS_YIELD = 321 // { int yield(void); }
SYS_MLOCKALL = 324 // { int mlockall(int how); }
SYS_MUNLOCKALL = 325 // { int munlockall(void); }
@@ -236,7 +228,7 @@ const (
SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); }
- SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); }
+ SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); }
SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); }
SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); }
SYS_KQUEUE = 362 // { int kqueue(void); }
@@ -258,7 +250,7 @@ const (
SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); }
SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); }
SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); }
- SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int flags); }
+ SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); }
SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); }
SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); }
SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); }
@@ -293,8 +285,6 @@ const (
SYS_THR_EXIT = 431 // { void thr_exit(long *state); }
SYS_THR_SELF = 432 // { int thr_self(long *id); }
SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); }
- SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); }
- SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); }
SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); }
SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); }
SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); }
@@ -400,4 +390,7 @@ const (
SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); }
SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); }
SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); }
+ SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); }
+ SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); }
+ SYS_FDATASYNC = 550 // { int fdatasync(int fd); }
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go
index 73e277fe7..48a7beae7 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go
@@ -1,4 +1,4 @@
-// go run mksysnum.go http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master
+// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build amd64,freebsd
@@ -118,8 +118,6 @@ const (
SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); }
SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); }
SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); }
- SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); }
- SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); }
SYS_SETFIB = 175 // { int setfib(int fibnum); }
SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); }
SYS_SETGID = 181 // { int setgid(gid_t gid); }
@@ -133,10 +131,6 @@ const (
SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int
SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int
SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); }
- SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); }
- SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, off_t offset, int whence); }
- SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, off_t length); }
- SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, off_t length); }
SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int
SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); }
SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); }
@@ -164,6 +158,7 @@ const (
SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); }
SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); }
SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); }
+ SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); }
SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); }
SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); }
SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); }
@@ -197,13 +192,10 @@ const (
SYS_GETSID = 310 // { int getsid(pid_t pid); }
SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); }
SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); }
- SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); }
+ SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); }
SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); }
SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); }
SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); }
- SYS_OAIO_READ = 318 // { int oaio_read(struct oaiocb *aiocbp); }
- SYS_OAIO_WRITE = 319 // { int oaio_write(struct oaiocb *aiocbp); }
- SYS_OLIO_LISTIO = 320 // { int olio_listio(int mode, struct oaiocb * const *acb_list, int nent, struct osigevent *sig); }
SYS_YIELD = 321 // { int yield(void); }
SYS_MLOCKALL = 324 // { int mlockall(int how); }
SYS_MUNLOCKALL = 325 // { int munlockall(void); }
@@ -236,7 +228,7 @@ const (
SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); }
- SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); }
+ SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); }
SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); }
SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); }
SYS_KQUEUE = 362 // { int kqueue(void); }
@@ -258,7 +250,7 @@ const (
SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); }
SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); }
SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); }
- SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int flags); }
+ SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); }
SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); }
SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); }
SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); }
@@ -293,8 +285,6 @@ const (
SYS_THR_EXIT = 431 // { void thr_exit(long *state); }
SYS_THR_SELF = 432 // { int thr_self(long *id); }
SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); }
- SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); }
- SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); }
SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); }
SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); }
SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); }
@@ -400,4 +390,7 @@ const (
SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); }
SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); }
SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); }
+ SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); }
+ SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); }
+ SYS_FDATASYNC = 550 // { int fdatasync(int fd); }
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go
index e12b46919..4a6dfd4a7 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go
@@ -1,4 +1,4 @@
-// go run mksysnum.go http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master
+// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build arm,freebsd
@@ -118,8 +118,6 @@ const (
SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); }
SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); }
SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); }
- SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); }
- SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); }
SYS_SETFIB = 175 // { int setfib(int fibnum); }
SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); }
SYS_SETGID = 181 // { int setgid(gid_t gid); }
@@ -133,10 +131,6 @@ const (
SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int
SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int
SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); }
- SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); }
- SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, off_t offset, int whence); }
- SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, off_t length); }
- SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, off_t length); }
SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int
SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); }
SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); }
@@ -164,6 +158,7 @@ const (
SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); }
SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); }
SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); }
+ SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); }
SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); }
SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); }
SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); }
@@ -197,13 +192,10 @@ const (
SYS_GETSID = 310 // { int getsid(pid_t pid); }
SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); }
SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); }
- SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); }
+ SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); }
SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); }
SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); }
SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); }
- SYS_OAIO_READ = 318 // { int oaio_read(struct oaiocb *aiocbp); }
- SYS_OAIO_WRITE = 319 // { int oaio_write(struct oaiocb *aiocbp); }
- SYS_OLIO_LISTIO = 320 // { int olio_listio(int mode, struct oaiocb * const *acb_list, int nent, struct osigevent *sig); }
SYS_YIELD = 321 // { int yield(void); }
SYS_MLOCKALL = 324 // { int mlockall(int how); }
SYS_MUNLOCKALL = 325 // { int munlockall(void); }
@@ -236,7 +228,7 @@ const (
SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); }
- SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); }
+ SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); }
SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); }
SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); }
SYS_KQUEUE = 362 // { int kqueue(void); }
@@ -258,7 +250,7 @@ const (
SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); }
SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); }
SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); }
- SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int flags); }
+ SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); }
SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); }
SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); }
SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); }
@@ -293,8 +285,6 @@ const (
SYS_THR_EXIT = 431 // { void thr_exit(long *state); }
SYS_THR_SELF = 432 // { int thr_self(long *id); }
SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); }
- SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); }
- SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); }
SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); }
SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); }
SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); }
@@ -400,4 +390,7 @@ const (
SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); }
SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); }
SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); }
+ SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); }
+ SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); }
+ SYS_FDATASYNC = 550 // { int fdatasync(int fd); }
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go
index 8c1e16ca5..3e51af8ed 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go
@@ -1,4 +1,4 @@
-// mksysnum_freebsd.pl
+// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build arm64,freebsd
@@ -7,13 +7,13 @@ package unix
const (
// SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int
- SYS_EXIT = 1 // { void sys_exit(int rval); } exit \
+ SYS_EXIT = 1 // { void sys_exit(int rval); } exit sys_exit_args void
SYS_FORK = 2 // { int fork(void); }
- SYS_READ = 3 // { ssize_t read(int fd, void *buf, \
- SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \
+ SYS_READ = 3 // { ssize_t read(int fd, void *buf, size_t nbyte); }
+ SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, size_t nbyte); }
SYS_OPEN = 5 // { int open(char *path, int flags, int mode); }
SYS_CLOSE = 6 // { int close(int fd); }
- SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \
+ SYS_WAIT4 = 7 // { int wait4(int pid, int *status, int options, struct rusage *rusage); }
SYS_LINK = 9 // { int link(char *path, char *link); }
SYS_UNLINK = 10 // { int unlink(char *path); }
SYS_CHDIR = 12 // { int chdir(char *path); }
@@ -21,20 +21,20 @@ const (
SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); }
SYS_CHMOD = 15 // { int chmod(char *path, int mode); }
SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); }
- SYS_OBREAK = 17 // { int obreak(char *nsize); } break \
+ SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int
SYS_GETPID = 20 // { pid_t getpid(void); }
- SYS_MOUNT = 21 // { int mount(char *type, char *path, \
+ SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); }
SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); }
SYS_SETUID = 23 // { int setuid(uid_t uid); }
SYS_GETUID = 24 // { uid_t getuid(void); }
SYS_GETEUID = 25 // { uid_t geteuid(void); }
- SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \
- SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \
- SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \
- SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \
- SYS_ACCEPT = 30 // { int accept(int s, \
- SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \
- SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \
+ SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, caddr_t addr, int data); }
+ SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, int flags); }
+ SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, int flags); }
+ SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, size_t len, int flags, struct sockaddr * __restrict from, __socklen_t * __restrict fromlenaddr); }
+ SYS_ACCEPT = 30 // { int accept(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen); }
+ SYS_GETPEERNAME = 31 // { int getpeername(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); }
+ SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); }
SYS_ACCESS = 33 // { int access(char *path, int amode); }
SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); }
SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); }
@@ -42,56 +42,57 @@ const (
SYS_KILL = 37 // { int kill(int pid, int signum); }
SYS_GETPPID = 39 // { pid_t getppid(void); }
SYS_DUP = 41 // { int dup(u_int fd); }
+ SYS_PIPE = 42 // { int pipe(void); }
SYS_GETEGID = 43 // { gid_t getegid(void); }
- SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \
- SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \
+ SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); }
+ SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); }
SYS_GETGID = 47 // { gid_t getgid(void); }
- SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \
+ SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int namelen); }
SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); }
SYS_ACCT = 51 // { int acct(char *path); }
- SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \
- SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \
+ SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, stack_t *oss); }
+ SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, caddr_t data); }
SYS_REBOOT = 55 // { int reboot(int opt); }
SYS_REVOKE = 56 // { int revoke(char *path); }
SYS_SYMLINK = 57 // { int symlink(char *path, char *link); }
- SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \
- SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \
- SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \
+ SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); }
+ SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); }
+ SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int
SYS_CHROOT = 61 // { int chroot(char *path); }
- SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \
+ SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); }
SYS_VFORK = 66 // { int vfork(void); }
SYS_SBRK = 69 // { int sbrk(int incr); }
SYS_SSTK = 70 // { int sstk(int incr); }
- SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \
+ SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int
SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); }
- SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \
- SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \
- SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \
- SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \
- SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \
+ SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); }
+ SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); }
+ SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); }
+ SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); }
+ SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, gid_t *gidset); }
SYS_GETPGRP = 81 // { int getpgrp(void); }
SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); }
- SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \
+ SYS_SETITIMER = 83 // { int setitimer(u_int which, struct itimerval *itv, struct itimerval *oitv); }
SYS_SWAPON = 85 // { int swapon(char *name); }
- SYS_GETITIMER = 86 // { int getitimer(u_int which, \
+ SYS_GETITIMER = 86 // { int getitimer(u_int which, struct itimerval *itv); }
SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); }
SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); }
SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); }
- SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \
+ SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); }
SYS_FSYNC = 95 // { int fsync(int fd); }
- SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \
- SYS_SOCKET = 97 // { int socket(int domain, int type, \
- SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \
+ SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, int prio); }
+ SYS_SOCKET = 97 // { int socket(int domain, int type, int protocol); }
+ SYS_CONNECT = 98 // { int connect(int s, caddr_t name, int namelen); }
SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); }
- SYS_BIND = 104 // { int bind(int s, caddr_t name, \
- SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \
+ SYS_BIND = 104 // { int bind(int s, caddr_t name, int namelen); }
+ SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, caddr_t val, int valsize); }
SYS_LISTEN = 106 // { int listen(int s, int backlog); }
- SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \
- SYS_GETRUSAGE = 117 // { int getrusage(int who, \
- SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \
- SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \
- SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \
- SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \
+ SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, struct timezone *tzp); }
+ SYS_GETRUSAGE = 117 // { int getrusage(int who, struct rusage *rusage); }
+ SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, caddr_t val, int *avalsize); }
+ SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, u_int iovcnt); }
+ SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, u_int iovcnt); }
+ SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, struct timezone *tzp); }
SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); }
SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); }
SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); }
@@ -99,24 +100,24 @@ const (
SYS_RENAME = 128 // { int rename(char *from, char *to); }
SYS_FLOCK = 131 // { int flock(int fd, int how); }
SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); }
- SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \
+ SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen); }
SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); }
- SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \
+ SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, int protocol, int *rsv); }
SYS_MKDIR = 136 // { int mkdir(char *path, int mode); }
SYS_RMDIR = 137 // { int rmdir(char *path); }
- SYS_UTIMES = 138 // { int utimes(char *path, \
- SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \
+ SYS_UTIMES = 138 // { int utimes(char *path, struct timeval *tptr); }
+ SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, struct timeval *olddelta); }
SYS_SETSID = 147 // { int setsid(void); }
- SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \
+ SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, caddr_t arg); }
SYS_NLM_SYSCALL = 154 // { int nlm_syscall(int debug_level, int grace_period, int addr_count, char **addrs); }
SYS_NFSSVC = 155 // { int nfssvc(int flag, caddr_t argp); }
- SYS_LGETFH = 160 // { int lgetfh(char *fname, \
- SYS_GETFH = 161 // { int getfh(char *fname, \
+ SYS_LGETFH = 160 // { int lgetfh(char *fname, struct fhandle *fhp); }
+ SYS_GETFH = 161 // { int getfh(char *fname, struct fhandle *fhp); }
SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); }
- SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \
- SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, \
- SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, \
- SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, \
+ SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, struct rtprio *rtp); }
+ SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); }
+ SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); }
+ SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); }
SYS_SETFIB = 175 // { int setfib(int fibnum); }
SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); }
SYS_SETGID = 181 // { int setgid(gid_t gid); }
@@ -127,269 +128,269 @@ const (
SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); }
SYS_PATHCONF = 191 // { int pathconf(char *path, int name); }
SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); }
- SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \
- SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \
- SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \
- SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \
+ SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int
+ SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int
+ SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); }
+ SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int
SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); }
SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); }
SYS_UNDELETE = 205 // { int undelete(char *path); }
SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); }
SYS_GETPGID = 207 // { int getpgid(pid_t pid); }
- SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \
- SYS_SEMGET = 221 // { int semget(key_t key, int nsems, \
- SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, \
+ SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, int timeout); }
+ SYS_SEMGET = 221 // { int semget(key_t key, int nsems, int semflg); }
+ SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); }
SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); }
- SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, \
- SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, \
- SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, \
+ SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); }
+ SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); }
+ SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); }
SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); }
- SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, \
- SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \
- SYS_CLOCK_SETTIME = 233 // { int clock_settime( \
- SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \
- SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \
+ SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); }
+ SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); }
+ SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); }
+ SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); }
+ SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); }
SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); }
- SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \
- SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \
+ SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); }
+ SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct itimerspec *value); }
SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); }
- SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \
+ SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); }
SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); }
- SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \
- SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \
- SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, \
- SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\
+ SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); }
+ SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); }
+ SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); }
+ SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); }
SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); }
- SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \
+ SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); }
SYS_RFORK = 251 // { int rfork(int flags); }
- SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \
+ SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); }
SYS_ISSETUGID = 253 // { int issetugid(void); }
SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); }
SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); }
SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); }
- SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, \
- SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \
+ SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); }
+ SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); }
SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); }
- SYS_LUTIMES = 276 // { int lutimes(char *path, \
+ SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); }
SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); }
SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); }
SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); }
- SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \
- SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \
- SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \
- SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \
+ SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); }
+ SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); }
+ SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); }
+ SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); }
SYS_MODNEXT = 300 // { int modnext(int modid); }
- SYS_MODSTAT = 301 // { int modstat(int modid, \
+ SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); }
SYS_MODFNEXT = 302 // { int modfnext(int modid); }
SYS_MODFIND = 303 // { int modfind(const char *name); }
SYS_KLDLOAD = 304 // { int kldload(const char *file); }
SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); }
SYS_KLDFIND = 306 // { int kldfind(const char *file); }
SYS_KLDNEXT = 307 // { int kldnext(int fileid); }
- SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \
+ SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); }
SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); }
SYS_GETSID = 310 // { int getsid(pid_t pid); }
- SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \
- SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \
+ SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); }
+ SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); }
SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); }
- SYS_AIO_SUSPEND = 315 // { int aio_suspend( \
- SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, \
+ SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); }
+ SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); }
SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); }
SYS_YIELD = 321 // { int yield(void); }
SYS_MLOCKALL = 324 // { int mlockall(int how); }
SYS_MUNLOCKALL = 325 // { int munlockall(void); }
SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); }
- SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \
- SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \
- SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \
+ SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); }
+ SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); }
+ SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); }
SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); }
SYS_SCHED_YIELD = 331 // { int sched_yield (void); }
SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); }
SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); }
- SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \
+ SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, struct timespec *interval); }
SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); }
- SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \
+ SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, void *data); }
SYS_JAIL = 338 // { int jail(struct jail *jail); }
- SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \
+ SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, const sigset_t *set, sigset_t *oset); }
SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); }
SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); }
- SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \
- SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \
- SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \
- SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \
- SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \
- SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \
- SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \
- SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \
- SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \
- SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \
- SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \
- SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \
- SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \
- SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \
- SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( \
- SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \
- SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \
+ SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec *timeout); }
+ SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, siginfo_t *info); }
+ SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, acl_type_t type, struct acl *aclp); }
+ SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, acl_type_t type, struct acl *aclp); }
+ SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, acl_type_t type, struct acl *aclp); }
+ SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, acl_type_t type, struct acl *aclp); }
+ SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, acl_type_t type); }
+ SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, acl_type_t type); }
+ SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); }
+ SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); }
+ SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); }
+ SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
+ SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
+ SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); }
+ SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); }
+ SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); }
+ SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); }
SYS_KQUEUE = 362 // { int kqueue(void); }
- SYS_KEVENT = 363 // { int kevent(int fd, \
- SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \
- SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \
- SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \
+ SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); }
+ SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
+ SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
+ SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); }
SYS___SETUGID = 374 // { int __setugid(int flag); }
SYS_EACCESS = 376 // { int eaccess(char *path, int amode); }
- SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \
+ SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, unsigned int iovcnt, int flags); }
SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); }
SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); }
- SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \
- SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \
- SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \
- SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \
- SYS_KENV = 390 // { int kenv(int what, const char *name, \
- SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \
- SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \
- SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \
- SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \
- SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \
- SYS_STATFS = 396 // { int statfs(char *path, \
+ SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, struct mac *mac_p); }
+ SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, struct mac *mac_p); }
+ SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, struct mac *mac_p); }
+ SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, struct mac *mac_p); }
+ SYS_KENV = 390 // { int kenv(int what, const char *name, char *value, int len); }
+ SYS_LCHFLAGS = 391 // { int lchflags(const char *path, u_long flags); }
+ SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); }
+ SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); }
+ SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); }
+ SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); }
+ SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); }
SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); }
- SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \
+ SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); }
SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); }
SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); }
SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); }
SYS_KSEM_TRYWAIT = 403 // { int ksem_trywait(semid_t id); }
- SYS_KSEM_INIT = 404 // { int ksem_init(semid_t *idp, \
- SYS_KSEM_OPEN = 405 // { int ksem_open(semid_t *idp, \
+ SYS_KSEM_INIT = 404 // { int ksem_init(semid_t *idp, unsigned int value); }
+ SYS_KSEM_OPEN = 405 // { int ksem_open(semid_t *idp, const char *name, int oflag, mode_t mode, unsigned int value); }
SYS_KSEM_UNLINK = 406 // { int ksem_unlink(const char *name); }
SYS_KSEM_GETVALUE = 407 // { int ksem_getvalue(semid_t id, int *val); }
SYS_KSEM_DESTROY = 408 // { int ksem_destroy(semid_t id); }
- SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \
- SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \
- SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \
- SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \
- SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \
- SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \
- SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \
- SYS_SIGACTION = 416 // { int sigaction(int sig, \
- SYS_SIGRETURN = 417 // { int sigreturn( \
+ SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); }
+ SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); }
+ SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); }
+ SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
+ SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
+ SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); }
+ SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); }
+ SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); }
+ SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); }
SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); }
- SYS_SETCONTEXT = 422 // { int setcontext( \
- SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \
+ SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); }
+ SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); }
SYS_SWAPOFF = 424 // { int swapoff(const char *name); }
- SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \
- SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \
- SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \
- SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \
- SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \
- SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \
+ SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); }
+ SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, acl_type_t type, struct acl *aclp); }
+ SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, acl_type_t type); }
+ SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, acl_type_t type, struct acl *aclp); }
+ SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, int *sig); }
+ SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, int flags); }
SYS_THR_EXIT = 431 // { void thr_exit(long *state); }
SYS_THR_SELF = 432 // { int thr_self(long *id); }
SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); }
SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); }
- SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \
- SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \
- SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \
- SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, \
- SYS_THR_SUSPEND = 442 // { int thr_suspend( \
+ SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); }
+ SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); }
+ SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); }
+ SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); }
+ SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); }
SYS_THR_WAKE = 443 // { int thr_wake(long id); }
SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); }
- SYS_AUDIT = 445 // { int audit(const void *record, \
- SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \
+ SYS_AUDIT = 445 // { int audit(const void *record, u_int length); }
+ SYS_AUDITON = 446 // { int auditon(int cmd, void *data, u_int length); }
SYS_GETAUID = 447 // { int getauid(uid_t *auid); }
SYS_SETAUID = 448 // { int setauid(uid_t *auid); }
SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); }
SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); }
- SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \
- SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \
+ SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); }
+ SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); }
SYS_AUDITCTL = 453 // { int auditctl(char *path); }
- SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \
- SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \
+ SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); }
+ SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); }
SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); }
- SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, \
- SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, \
- SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, \
- SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, \
- SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, \
+ SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); }
+ SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); }
+ SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); }
+ SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);}
+ SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); }
SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); }
SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); }
SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); }
SYS_AIO_FSYNC = 465 // { int aio_fsync(int op, struct aiocb *aiocbp); }
- SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \
+ SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, lwpid_t lwpid, struct rtprio *rtp); }
SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); }
- SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \
- SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \
- SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \
- SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \
- SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \
- SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \
- SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \
+ SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); }
+ SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); }
+ SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); }
+ SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); }
+ SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); }
+ SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); }
+ SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, int whence); }
SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); }
SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); }
SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); }
- SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \
+ SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, mode_t mode); }
SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); }
SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); }
- SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \
- SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \
- SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \
- SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \
- SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \
- SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \
- SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \
- SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \
- SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \
- SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \
- SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \
+ SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, cpusetid_t setid); }
+ SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, cpuwhich_t which, id_t id, cpusetid_t *setid); }
+ SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *mask); }
+ SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, const cpuset_t *mask); }
+ SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, int flag); }
+ SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); }
+ SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); }
+ SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); }
+ SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); }
+ SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); }
+ SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); }
SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); }
SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); }
- SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \
- SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \
- SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \
- SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \
- SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \
+ SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); }
+ SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); }
+ SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); }
+ SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); }
+ SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); }
SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); }
SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); }
SYS_GSSD_SYSCALL = 505 // { int gssd_syscall(char *path); }
- SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \
- SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \
+ SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, unsigned int iovcnt, int flags); }
+ SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, unsigned int iovcnt, int flags); }
SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); }
SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); }
- SYS___SEMCTL = 510 // { int __semctl(int semid, int semnum, \
- SYS_MSGCTL = 511 // { int msgctl(int msqid, int cmd, \
- SYS_SHMCTL = 512 // { int shmctl(int shmid, int cmd, \
+ SYS___SEMCTL = 510 // { int __semctl(int semid, int semnum, int cmd, union semun *arg); }
+ SYS_MSGCTL = 511 // { int msgctl(int msqid, int cmd, struct msqid_ds *buf); }
+ SYS_SHMCTL = 512 // { int shmctl(int shmid, int cmd, struct shmid_ds *buf); }
SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); }
- SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, \
+ SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, int fd, cap_rights_t *rightsp); }
SYS_CAP_ENTER = 516 // { int cap_enter(void); }
SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); }
SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); }
SYS_PDKILL = 519 // { int pdkill(int fd, int signum); }
SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); }
- SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \
- SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \
+ SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *sm); }
+ SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, size_t namelen); }
SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); }
- SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \
- SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \
- SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \
- SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \
- SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \
- SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \
- SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \
- SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \
- SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, \
- SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, \
- SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, \
- SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, \
- SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, \
- SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \
- SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \
- SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \
- SYS_ACCEPT4 = 541 // { int accept4(int s, \
+ SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); }
+ SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); }
+ SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); }
+ SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); }
+ SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); }
+ SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, off_t offset, off_t len); }
+ SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, off_t len, int advice); }
+ SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, int *status, int options, struct __wrusage *wrusage, siginfo_t *info); }
+ SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, cap_rights_t *rightsp); }
+ SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, const u_long *cmds, size_t ncmds); }
+ SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, u_long *cmds, size_t maxcmds); }
+ SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, uint32_t fcntlrights); }
+ SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, uint32_t *fcntlrightsp); }
+ SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, int namelen); }
+ SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, int namelen); }
+ SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, u_long flags, int atflag); }
+ SYS_ACCEPT4 = 541 // { int accept4(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen, int flags); }
SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); }
SYS_AIO_MLOCK = 543 // { int aio_mlock(struct aiocb *aiocbp); }
- SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \
- SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \
- SYS_FUTIMENS = 546 // { int futimens(int fd, \
- SYS_UTIMENSAT = 547 // { int utimensat(int fd, \
- SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, \
- SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, \
+ SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, int com, void *data); }
+ SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); }
+ SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); }
+ SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); }
+ SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); }
+ SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); }
SYS_FDATASYNC = 550 // { int fdatasync(int fd); }
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
index 8d17873de..33b6e4d1a 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
@@ -6,387 +6,421 @@
package unix
const (
- SYS_RESTART_SYSCALL = 0
- SYS_EXIT = 1
- SYS_FORK = 2
- SYS_READ = 3
- SYS_WRITE = 4
- SYS_OPEN = 5
- SYS_CLOSE = 6
- SYS_WAITPID = 7
- SYS_CREAT = 8
- SYS_LINK = 9
- SYS_UNLINK = 10
- SYS_EXECVE = 11
- SYS_CHDIR = 12
- SYS_TIME = 13
- SYS_MKNOD = 14
- SYS_CHMOD = 15
- SYS_LCHOWN = 16
- SYS_BREAK = 17
- SYS_OLDSTAT = 18
- SYS_LSEEK = 19
- SYS_GETPID = 20
- SYS_MOUNT = 21
- SYS_UMOUNT = 22
- SYS_SETUID = 23
- SYS_GETUID = 24
- SYS_STIME = 25
- SYS_PTRACE = 26
- SYS_ALARM = 27
- SYS_OLDFSTAT = 28
- SYS_PAUSE = 29
- SYS_UTIME = 30
- SYS_STTY = 31
- SYS_GTTY = 32
- SYS_ACCESS = 33
- SYS_NICE = 34
- SYS_FTIME = 35
- SYS_SYNC = 36
- SYS_KILL = 37
- SYS_RENAME = 38
- SYS_MKDIR = 39
- SYS_RMDIR = 40
- SYS_DUP = 41
- SYS_PIPE = 42
- SYS_TIMES = 43
- SYS_PROF = 44
- SYS_BRK = 45
- SYS_SETGID = 46
- SYS_GETGID = 47
- SYS_SIGNAL = 48
- SYS_GETEUID = 49
- SYS_GETEGID = 50
- SYS_ACCT = 51
- SYS_UMOUNT2 = 52
- SYS_LOCK = 53
- SYS_IOCTL = 54
- SYS_FCNTL = 55
- SYS_MPX = 56
- SYS_SETPGID = 57
- SYS_ULIMIT = 58
- SYS_OLDOLDUNAME = 59
- SYS_UMASK = 60
- SYS_CHROOT = 61
- SYS_USTAT = 62
- SYS_DUP2 = 63
- SYS_GETPPID = 64
- SYS_GETPGRP = 65
- SYS_SETSID = 66
- SYS_SIGACTION = 67
- SYS_SGETMASK = 68
- SYS_SSETMASK = 69
- SYS_SETREUID = 70
- SYS_SETREGID = 71
- SYS_SIGSUSPEND = 72
- SYS_SIGPENDING = 73
- SYS_SETHOSTNAME = 74
- SYS_SETRLIMIT = 75
- SYS_GETRLIMIT = 76
- SYS_GETRUSAGE = 77
- SYS_GETTIMEOFDAY = 78
- SYS_SETTIMEOFDAY = 79
- SYS_GETGROUPS = 80
- SYS_SETGROUPS = 81
- SYS_SELECT = 82
- SYS_SYMLINK = 83
- SYS_OLDLSTAT = 84
- SYS_READLINK = 85
- SYS_USELIB = 86
- SYS_SWAPON = 87
- SYS_REBOOT = 88
- SYS_READDIR = 89
- SYS_MMAP = 90
- SYS_MUNMAP = 91
- SYS_TRUNCATE = 92
- SYS_FTRUNCATE = 93
- SYS_FCHMOD = 94
- SYS_FCHOWN = 95
- SYS_GETPRIORITY = 96
- SYS_SETPRIORITY = 97
- SYS_PROFIL = 98
- SYS_STATFS = 99
- SYS_FSTATFS = 100
- SYS_IOPERM = 101
- SYS_SOCKETCALL = 102
- SYS_SYSLOG = 103
- SYS_SETITIMER = 104
- SYS_GETITIMER = 105
- SYS_STAT = 106
- SYS_LSTAT = 107
- SYS_FSTAT = 108
- SYS_OLDUNAME = 109
- SYS_IOPL = 110
- SYS_VHANGUP = 111
- SYS_IDLE = 112
- SYS_VM86OLD = 113
- SYS_WAIT4 = 114
- SYS_SWAPOFF = 115
- SYS_SYSINFO = 116
- SYS_IPC = 117
- SYS_FSYNC = 118
- SYS_SIGRETURN = 119
- SYS_CLONE = 120
- SYS_SETDOMAINNAME = 121
- SYS_UNAME = 122
- SYS_MODIFY_LDT = 123
- SYS_ADJTIMEX = 124
- SYS_MPROTECT = 125
- SYS_SIGPROCMASK = 126
- SYS_CREATE_MODULE = 127
- SYS_INIT_MODULE = 128
- SYS_DELETE_MODULE = 129
- SYS_GET_KERNEL_SYMS = 130
- SYS_QUOTACTL = 131
- SYS_GETPGID = 132
- SYS_FCHDIR = 133
- SYS_BDFLUSH = 134
- SYS_SYSFS = 135
- SYS_PERSONALITY = 136
- SYS_AFS_SYSCALL = 137
- SYS_SETFSUID = 138
- SYS_SETFSGID = 139
- SYS__LLSEEK = 140
- SYS_GETDENTS = 141
- SYS__NEWSELECT = 142
- SYS_FLOCK = 143
- SYS_MSYNC = 144
- SYS_READV = 145
- SYS_WRITEV = 146
- SYS_GETSID = 147
- SYS_FDATASYNC = 148
- SYS__SYSCTL = 149
- SYS_MLOCK = 150
- SYS_MUNLOCK = 151
- SYS_MLOCKALL = 152
- SYS_MUNLOCKALL = 153
- SYS_SCHED_SETPARAM = 154
- SYS_SCHED_GETPARAM = 155
- SYS_SCHED_SETSCHEDULER = 156
- SYS_SCHED_GETSCHEDULER = 157
- SYS_SCHED_YIELD = 158
- SYS_SCHED_GET_PRIORITY_MAX = 159
- SYS_SCHED_GET_PRIORITY_MIN = 160
- SYS_SCHED_RR_GET_INTERVAL = 161
- SYS_NANOSLEEP = 162
- SYS_MREMAP = 163
- SYS_SETRESUID = 164
- SYS_GETRESUID = 165
- SYS_VM86 = 166
- SYS_QUERY_MODULE = 167
- SYS_POLL = 168
- SYS_NFSSERVCTL = 169
- SYS_SETRESGID = 170
- SYS_GETRESGID = 171
- SYS_PRCTL = 172
- SYS_RT_SIGRETURN = 173
- SYS_RT_SIGACTION = 174
- SYS_RT_SIGPROCMASK = 175
- SYS_RT_SIGPENDING = 176
- SYS_RT_SIGTIMEDWAIT = 177
- SYS_RT_SIGQUEUEINFO = 178
- SYS_RT_SIGSUSPEND = 179
- SYS_PREAD64 = 180
- SYS_PWRITE64 = 181
- SYS_CHOWN = 182
- SYS_GETCWD = 183
- SYS_CAPGET = 184
- SYS_CAPSET = 185
- SYS_SIGALTSTACK = 186
- SYS_SENDFILE = 187
- SYS_GETPMSG = 188
- SYS_PUTPMSG = 189
- SYS_VFORK = 190
- SYS_UGETRLIMIT = 191
- SYS_MMAP2 = 192
- SYS_TRUNCATE64 = 193
- SYS_FTRUNCATE64 = 194
- SYS_STAT64 = 195
- SYS_LSTAT64 = 196
- SYS_FSTAT64 = 197
- SYS_LCHOWN32 = 198
- SYS_GETUID32 = 199
- SYS_GETGID32 = 200
- SYS_GETEUID32 = 201
- SYS_GETEGID32 = 202
- SYS_SETREUID32 = 203
- SYS_SETREGID32 = 204
- SYS_GETGROUPS32 = 205
- SYS_SETGROUPS32 = 206
- SYS_FCHOWN32 = 207
- SYS_SETRESUID32 = 208
- SYS_GETRESUID32 = 209
- SYS_SETRESGID32 = 210
- SYS_GETRESGID32 = 211
- SYS_CHOWN32 = 212
- SYS_SETUID32 = 213
- SYS_SETGID32 = 214
- SYS_SETFSUID32 = 215
- SYS_SETFSGID32 = 216
- SYS_PIVOT_ROOT = 217
- SYS_MINCORE = 218
- SYS_MADVISE = 219
- SYS_GETDENTS64 = 220
- SYS_FCNTL64 = 221
- SYS_GETTID = 224
- SYS_READAHEAD = 225
- SYS_SETXATTR = 226
- SYS_LSETXATTR = 227
- SYS_FSETXATTR = 228
- SYS_GETXATTR = 229
- SYS_LGETXATTR = 230
- SYS_FGETXATTR = 231
- SYS_LISTXATTR = 232
- SYS_LLISTXATTR = 233
- SYS_FLISTXATTR = 234
- SYS_REMOVEXATTR = 235
- SYS_LREMOVEXATTR = 236
- SYS_FREMOVEXATTR = 237
- SYS_TKILL = 238
- SYS_SENDFILE64 = 239
- SYS_FUTEX = 240
- SYS_SCHED_SETAFFINITY = 241
- SYS_SCHED_GETAFFINITY = 242
- SYS_SET_THREAD_AREA = 243
- SYS_GET_THREAD_AREA = 244
- SYS_IO_SETUP = 245
- SYS_IO_DESTROY = 246
- SYS_IO_GETEVENTS = 247
- SYS_IO_SUBMIT = 248
- SYS_IO_CANCEL = 249
- SYS_FADVISE64 = 250
- SYS_EXIT_GROUP = 252
- SYS_LOOKUP_DCOOKIE = 253
- SYS_EPOLL_CREATE = 254
- SYS_EPOLL_CTL = 255
- SYS_EPOLL_WAIT = 256
- SYS_REMAP_FILE_PAGES = 257
- SYS_SET_TID_ADDRESS = 258
- SYS_TIMER_CREATE = 259
- SYS_TIMER_SETTIME = 260
- SYS_TIMER_GETTIME = 261
- SYS_TIMER_GETOVERRUN = 262
- SYS_TIMER_DELETE = 263
- SYS_CLOCK_SETTIME = 264
- SYS_CLOCK_GETTIME = 265
- SYS_CLOCK_GETRES = 266
- SYS_CLOCK_NANOSLEEP = 267
- SYS_STATFS64 = 268
- SYS_FSTATFS64 = 269
- SYS_TGKILL = 270
- SYS_UTIMES = 271
- SYS_FADVISE64_64 = 272
- SYS_VSERVER = 273
- SYS_MBIND = 274
- SYS_GET_MEMPOLICY = 275
- SYS_SET_MEMPOLICY = 276
- SYS_MQ_OPEN = 277
- SYS_MQ_UNLINK = 278
- SYS_MQ_TIMEDSEND = 279
- SYS_MQ_TIMEDRECEIVE = 280
- SYS_MQ_NOTIFY = 281
- SYS_MQ_GETSETATTR = 282
- SYS_KEXEC_LOAD = 283
- SYS_WAITID = 284
- SYS_ADD_KEY = 286
- SYS_REQUEST_KEY = 287
- SYS_KEYCTL = 288
- SYS_IOPRIO_SET = 289
- SYS_IOPRIO_GET = 290
- SYS_INOTIFY_INIT = 291
- SYS_INOTIFY_ADD_WATCH = 292
- SYS_INOTIFY_RM_WATCH = 293
- SYS_MIGRATE_PAGES = 294
- SYS_OPENAT = 295
- SYS_MKDIRAT = 296
- SYS_MKNODAT = 297
- SYS_FCHOWNAT = 298
- SYS_FUTIMESAT = 299
- SYS_FSTATAT64 = 300
- SYS_UNLINKAT = 301
- SYS_RENAMEAT = 302
- SYS_LINKAT = 303
- SYS_SYMLINKAT = 304
- SYS_READLINKAT = 305
- SYS_FCHMODAT = 306
- SYS_FACCESSAT = 307
- SYS_PSELECT6 = 308
- SYS_PPOLL = 309
- SYS_UNSHARE = 310
- SYS_SET_ROBUST_LIST = 311
- SYS_GET_ROBUST_LIST = 312
- SYS_SPLICE = 313
- SYS_SYNC_FILE_RANGE = 314
- SYS_TEE = 315
- SYS_VMSPLICE = 316
- SYS_MOVE_PAGES = 317
- SYS_GETCPU = 318
- SYS_EPOLL_PWAIT = 319
- SYS_UTIMENSAT = 320
- SYS_SIGNALFD = 321
- SYS_TIMERFD_CREATE = 322
- SYS_EVENTFD = 323
- SYS_FALLOCATE = 324
- SYS_TIMERFD_SETTIME = 325
- SYS_TIMERFD_GETTIME = 326
- SYS_SIGNALFD4 = 327
- SYS_EVENTFD2 = 328
- SYS_EPOLL_CREATE1 = 329
- SYS_DUP3 = 330
- SYS_PIPE2 = 331
- SYS_INOTIFY_INIT1 = 332
- SYS_PREADV = 333
- SYS_PWRITEV = 334
- SYS_RT_TGSIGQUEUEINFO = 335
- SYS_PERF_EVENT_OPEN = 336
- SYS_RECVMMSG = 337
- SYS_FANOTIFY_INIT = 338
- SYS_FANOTIFY_MARK = 339
- SYS_PRLIMIT64 = 340
- SYS_NAME_TO_HANDLE_AT = 341
- SYS_OPEN_BY_HANDLE_AT = 342
- SYS_CLOCK_ADJTIME = 343
- SYS_SYNCFS = 344
- SYS_SENDMMSG = 345
- SYS_SETNS = 346
- SYS_PROCESS_VM_READV = 347
- SYS_PROCESS_VM_WRITEV = 348
- SYS_KCMP = 349
- SYS_FINIT_MODULE = 350
- SYS_SCHED_SETATTR = 351
- SYS_SCHED_GETATTR = 352
- SYS_RENAMEAT2 = 353
- SYS_SECCOMP = 354
- SYS_GETRANDOM = 355
- SYS_MEMFD_CREATE = 356
- SYS_BPF = 357
- SYS_EXECVEAT = 358
- SYS_SOCKET = 359
- SYS_SOCKETPAIR = 360
- SYS_BIND = 361
- SYS_CONNECT = 362
- SYS_LISTEN = 363
- SYS_ACCEPT4 = 364
- SYS_GETSOCKOPT = 365
- SYS_SETSOCKOPT = 366
- SYS_GETSOCKNAME = 367
- SYS_GETPEERNAME = 368
- SYS_SENDTO = 369
- SYS_SENDMSG = 370
- SYS_RECVFROM = 371
- SYS_RECVMSG = 372
- SYS_SHUTDOWN = 373
- SYS_USERFAULTFD = 374
- SYS_MEMBARRIER = 375
- SYS_MLOCK2 = 376
- SYS_COPY_FILE_RANGE = 377
- SYS_PREADV2 = 378
- SYS_PWRITEV2 = 379
- SYS_PKEY_MPROTECT = 380
- SYS_PKEY_ALLOC = 381
- SYS_PKEY_FREE = 382
- SYS_STATX = 383
- SYS_ARCH_PRCTL = 384
- SYS_IO_PGETEVENTS = 385
- SYS_RSEQ = 386
+ SYS_RESTART_SYSCALL = 0
+ SYS_EXIT = 1
+ SYS_FORK = 2
+ SYS_READ = 3
+ SYS_WRITE = 4
+ SYS_OPEN = 5
+ SYS_CLOSE = 6
+ SYS_WAITPID = 7
+ SYS_CREAT = 8
+ SYS_LINK = 9
+ SYS_UNLINK = 10
+ SYS_EXECVE = 11
+ SYS_CHDIR = 12
+ SYS_TIME = 13
+ SYS_MKNOD = 14
+ SYS_CHMOD = 15
+ SYS_LCHOWN = 16
+ SYS_BREAK = 17
+ SYS_OLDSTAT = 18
+ SYS_LSEEK = 19
+ SYS_GETPID = 20
+ SYS_MOUNT = 21
+ SYS_UMOUNT = 22
+ SYS_SETUID = 23
+ SYS_GETUID = 24
+ SYS_STIME = 25
+ SYS_PTRACE = 26
+ SYS_ALARM = 27
+ SYS_OLDFSTAT = 28
+ SYS_PAUSE = 29
+ SYS_UTIME = 30
+ SYS_STTY = 31
+ SYS_GTTY = 32
+ SYS_ACCESS = 33
+ SYS_NICE = 34
+ SYS_FTIME = 35
+ SYS_SYNC = 36
+ SYS_KILL = 37
+ SYS_RENAME = 38
+ SYS_MKDIR = 39
+ SYS_RMDIR = 40
+ SYS_DUP = 41
+ SYS_PIPE = 42
+ SYS_TIMES = 43
+ SYS_PROF = 44
+ SYS_BRK = 45
+ SYS_SETGID = 46
+ SYS_GETGID = 47
+ SYS_SIGNAL = 48
+ SYS_GETEUID = 49
+ SYS_GETEGID = 50
+ SYS_ACCT = 51
+ SYS_UMOUNT2 = 52
+ SYS_LOCK = 53
+ SYS_IOCTL = 54
+ SYS_FCNTL = 55
+ SYS_MPX = 56
+ SYS_SETPGID = 57
+ SYS_ULIMIT = 58
+ SYS_OLDOLDUNAME = 59
+ SYS_UMASK = 60
+ SYS_CHROOT = 61
+ SYS_USTAT = 62
+ SYS_DUP2 = 63
+ SYS_GETPPID = 64
+ SYS_GETPGRP = 65
+ SYS_SETSID = 66
+ SYS_SIGACTION = 67
+ SYS_SGETMASK = 68
+ SYS_SSETMASK = 69
+ SYS_SETREUID = 70
+ SYS_SETREGID = 71
+ SYS_SIGSUSPEND = 72
+ SYS_SIGPENDING = 73
+ SYS_SETHOSTNAME = 74
+ SYS_SETRLIMIT = 75
+ SYS_GETRLIMIT = 76
+ SYS_GETRUSAGE = 77
+ SYS_GETTIMEOFDAY = 78
+ SYS_SETTIMEOFDAY = 79
+ SYS_GETGROUPS = 80
+ SYS_SETGROUPS = 81
+ SYS_SELECT = 82
+ SYS_SYMLINK = 83
+ SYS_OLDLSTAT = 84
+ SYS_READLINK = 85
+ SYS_USELIB = 86
+ SYS_SWAPON = 87
+ SYS_REBOOT = 88
+ SYS_READDIR = 89
+ SYS_MMAP = 90
+ SYS_MUNMAP = 91
+ SYS_TRUNCATE = 92
+ SYS_FTRUNCATE = 93
+ SYS_FCHMOD = 94
+ SYS_FCHOWN = 95
+ SYS_GETPRIORITY = 96
+ SYS_SETPRIORITY = 97
+ SYS_PROFIL = 98
+ SYS_STATFS = 99
+ SYS_FSTATFS = 100
+ SYS_IOPERM = 101
+ SYS_SOCKETCALL = 102
+ SYS_SYSLOG = 103
+ SYS_SETITIMER = 104
+ SYS_GETITIMER = 105
+ SYS_STAT = 106
+ SYS_LSTAT = 107
+ SYS_FSTAT = 108
+ SYS_OLDUNAME = 109
+ SYS_IOPL = 110
+ SYS_VHANGUP = 111
+ SYS_IDLE = 112
+ SYS_VM86OLD = 113
+ SYS_WAIT4 = 114
+ SYS_SWAPOFF = 115
+ SYS_SYSINFO = 116
+ SYS_IPC = 117
+ SYS_FSYNC = 118
+ SYS_SIGRETURN = 119
+ SYS_CLONE = 120
+ SYS_SETDOMAINNAME = 121
+ SYS_UNAME = 122
+ SYS_MODIFY_LDT = 123
+ SYS_ADJTIMEX = 124
+ SYS_MPROTECT = 125
+ SYS_SIGPROCMASK = 126
+ SYS_CREATE_MODULE = 127
+ SYS_INIT_MODULE = 128
+ SYS_DELETE_MODULE = 129
+ SYS_GET_KERNEL_SYMS = 130
+ SYS_QUOTACTL = 131
+ SYS_GETPGID = 132
+ SYS_FCHDIR = 133
+ SYS_BDFLUSH = 134
+ SYS_SYSFS = 135
+ SYS_PERSONALITY = 136
+ SYS_AFS_SYSCALL = 137
+ SYS_SETFSUID = 138
+ SYS_SETFSGID = 139
+ SYS__LLSEEK = 140
+ SYS_GETDENTS = 141
+ SYS__NEWSELECT = 142
+ SYS_FLOCK = 143
+ SYS_MSYNC = 144
+ SYS_READV = 145
+ SYS_WRITEV = 146
+ SYS_GETSID = 147
+ SYS_FDATASYNC = 148
+ SYS__SYSCTL = 149
+ SYS_MLOCK = 150
+ SYS_MUNLOCK = 151
+ SYS_MLOCKALL = 152
+ SYS_MUNLOCKALL = 153
+ SYS_SCHED_SETPARAM = 154
+ SYS_SCHED_GETPARAM = 155
+ SYS_SCHED_SETSCHEDULER = 156
+ SYS_SCHED_GETSCHEDULER = 157
+ SYS_SCHED_YIELD = 158
+ SYS_SCHED_GET_PRIORITY_MAX = 159
+ SYS_SCHED_GET_PRIORITY_MIN = 160
+ SYS_SCHED_RR_GET_INTERVAL = 161
+ SYS_NANOSLEEP = 162
+ SYS_MREMAP = 163
+ SYS_SETRESUID = 164
+ SYS_GETRESUID = 165
+ SYS_VM86 = 166
+ SYS_QUERY_MODULE = 167
+ SYS_POLL = 168
+ SYS_NFSSERVCTL = 169
+ SYS_SETRESGID = 170
+ SYS_GETRESGID = 171
+ SYS_PRCTL = 172
+ SYS_RT_SIGRETURN = 173
+ SYS_RT_SIGACTION = 174
+ SYS_RT_SIGPROCMASK = 175
+ SYS_RT_SIGPENDING = 176
+ SYS_RT_SIGTIMEDWAIT = 177
+ SYS_RT_SIGQUEUEINFO = 178
+ SYS_RT_SIGSUSPEND = 179
+ SYS_PREAD64 = 180
+ SYS_PWRITE64 = 181
+ SYS_CHOWN = 182
+ SYS_GETCWD = 183
+ SYS_CAPGET = 184
+ SYS_CAPSET = 185
+ SYS_SIGALTSTACK = 186
+ SYS_SENDFILE = 187
+ SYS_GETPMSG = 188
+ SYS_PUTPMSG = 189
+ SYS_VFORK = 190
+ SYS_UGETRLIMIT = 191
+ SYS_MMAP2 = 192
+ SYS_TRUNCATE64 = 193
+ SYS_FTRUNCATE64 = 194
+ SYS_STAT64 = 195
+ SYS_LSTAT64 = 196
+ SYS_FSTAT64 = 197
+ SYS_LCHOWN32 = 198
+ SYS_GETUID32 = 199
+ SYS_GETGID32 = 200
+ SYS_GETEUID32 = 201
+ SYS_GETEGID32 = 202
+ SYS_SETREUID32 = 203
+ SYS_SETREGID32 = 204
+ SYS_GETGROUPS32 = 205
+ SYS_SETGROUPS32 = 206
+ SYS_FCHOWN32 = 207
+ SYS_SETRESUID32 = 208
+ SYS_GETRESUID32 = 209
+ SYS_SETRESGID32 = 210
+ SYS_GETRESGID32 = 211
+ SYS_CHOWN32 = 212
+ SYS_SETUID32 = 213
+ SYS_SETGID32 = 214
+ SYS_SETFSUID32 = 215
+ SYS_SETFSGID32 = 216
+ SYS_PIVOT_ROOT = 217
+ SYS_MINCORE = 218
+ SYS_MADVISE = 219
+ SYS_GETDENTS64 = 220
+ SYS_FCNTL64 = 221
+ SYS_GETTID = 224
+ SYS_READAHEAD = 225
+ SYS_SETXATTR = 226
+ SYS_LSETXATTR = 227
+ SYS_FSETXATTR = 228
+ SYS_GETXATTR = 229
+ SYS_LGETXATTR = 230
+ SYS_FGETXATTR = 231
+ SYS_LISTXATTR = 232
+ SYS_LLISTXATTR = 233
+ SYS_FLISTXATTR = 234
+ SYS_REMOVEXATTR = 235
+ SYS_LREMOVEXATTR = 236
+ SYS_FREMOVEXATTR = 237
+ SYS_TKILL = 238
+ SYS_SENDFILE64 = 239
+ SYS_FUTEX = 240
+ SYS_SCHED_SETAFFINITY = 241
+ SYS_SCHED_GETAFFINITY = 242
+ SYS_SET_THREAD_AREA = 243
+ SYS_GET_THREAD_AREA = 244
+ SYS_IO_SETUP = 245
+ SYS_IO_DESTROY = 246
+ SYS_IO_GETEVENTS = 247
+ SYS_IO_SUBMIT = 248
+ SYS_IO_CANCEL = 249
+ SYS_FADVISE64 = 250
+ SYS_EXIT_GROUP = 252
+ SYS_LOOKUP_DCOOKIE = 253
+ SYS_EPOLL_CREATE = 254
+ SYS_EPOLL_CTL = 255
+ SYS_EPOLL_WAIT = 256
+ SYS_REMAP_FILE_PAGES = 257
+ SYS_SET_TID_ADDRESS = 258
+ SYS_TIMER_CREATE = 259
+ SYS_TIMER_SETTIME = 260
+ SYS_TIMER_GETTIME = 261
+ SYS_TIMER_GETOVERRUN = 262
+ SYS_TIMER_DELETE = 263
+ SYS_CLOCK_SETTIME = 264
+ SYS_CLOCK_GETTIME = 265
+ SYS_CLOCK_GETRES = 266
+ SYS_CLOCK_NANOSLEEP = 267
+ SYS_STATFS64 = 268
+ SYS_FSTATFS64 = 269
+ SYS_TGKILL = 270
+ SYS_UTIMES = 271
+ SYS_FADVISE64_64 = 272
+ SYS_VSERVER = 273
+ SYS_MBIND = 274
+ SYS_GET_MEMPOLICY = 275
+ SYS_SET_MEMPOLICY = 276
+ SYS_MQ_OPEN = 277
+ SYS_MQ_UNLINK = 278
+ SYS_MQ_TIMEDSEND = 279
+ SYS_MQ_TIMEDRECEIVE = 280
+ SYS_MQ_NOTIFY = 281
+ SYS_MQ_GETSETATTR = 282
+ SYS_KEXEC_LOAD = 283
+ SYS_WAITID = 284
+ SYS_ADD_KEY = 286
+ SYS_REQUEST_KEY = 287
+ SYS_KEYCTL = 288
+ SYS_IOPRIO_SET = 289
+ SYS_IOPRIO_GET = 290
+ SYS_INOTIFY_INIT = 291
+ SYS_INOTIFY_ADD_WATCH = 292
+ SYS_INOTIFY_RM_WATCH = 293
+ SYS_MIGRATE_PAGES = 294
+ SYS_OPENAT = 295
+ SYS_MKDIRAT = 296
+ SYS_MKNODAT = 297
+ SYS_FCHOWNAT = 298
+ SYS_FUTIMESAT = 299
+ SYS_FSTATAT64 = 300
+ SYS_UNLINKAT = 301
+ SYS_RENAMEAT = 302
+ SYS_LINKAT = 303
+ SYS_SYMLINKAT = 304
+ SYS_READLINKAT = 305
+ SYS_FCHMODAT = 306
+ SYS_FACCESSAT = 307
+ SYS_PSELECT6 = 308
+ SYS_PPOLL = 309
+ SYS_UNSHARE = 310
+ SYS_SET_ROBUST_LIST = 311
+ SYS_GET_ROBUST_LIST = 312
+ SYS_SPLICE = 313
+ SYS_SYNC_FILE_RANGE = 314
+ SYS_TEE = 315
+ SYS_VMSPLICE = 316
+ SYS_MOVE_PAGES = 317
+ SYS_GETCPU = 318
+ SYS_EPOLL_PWAIT = 319
+ SYS_UTIMENSAT = 320
+ SYS_SIGNALFD = 321
+ SYS_TIMERFD_CREATE = 322
+ SYS_EVENTFD = 323
+ SYS_FALLOCATE = 324
+ SYS_TIMERFD_SETTIME = 325
+ SYS_TIMERFD_GETTIME = 326
+ SYS_SIGNALFD4 = 327
+ SYS_EVENTFD2 = 328
+ SYS_EPOLL_CREATE1 = 329
+ SYS_DUP3 = 330
+ SYS_PIPE2 = 331
+ SYS_INOTIFY_INIT1 = 332
+ SYS_PREADV = 333
+ SYS_PWRITEV = 334
+ SYS_RT_TGSIGQUEUEINFO = 335
+ SYS_PERF_EVENT_OPEN = 336
+ SYS_RECVMMSG = 337
+ SYS_FANOTIFY_INIT = 338
+ SYS_FANOTIFY_MARK = 339
+ SYS_PRLIMIT64 = 340
+ SYS_NAME_TO_HANDLE_AT = 341
+ SYS_OPEN_BY_HANDLE_AT = 342
+ SYS_CLOCK_ADJTIME = 343
+ SYS_SYNCFS = 344
+ SYS_SENDMMSG = 345
+ SYS_SETNS = 346
+ SYS_PROCESS_VM_READV = 347
+ SYS_PROCESS_VM_WRITEV = 348
+ SYS_KCMP = 349
+ SYS_FINIT_MODULE = 350
+ SYS_SCHED_SETATTR = 351
+ SYS_SCHED_GETATTR = 352
+ SYS_RENAMEAT2 = 353
+ SYS_SECCOMP = 354
+ SYS_GETRANDOM = 355
+ SYS_MEMFD_CREATE = 356
+ SYS_BPF = 357
+ SYS_EXECVEAT = 358
+ SYS_SOCKET = 359
+ SYS_SOCKETPAIR = 360
+ SYS_BIND = 361
+ SYS_CONNECT = 362
+ SYS_LISTEN = 363
+ SYS_ACCEPT4 = 364
+ SYS_GETSOCKOPT = 365
+ SYS_SETSOCKOPT = 366
+ SYS_GETSOCKNAME = 367
+ SYS_GETPEERNAME = 368
+ SYS_SENDTO = 369
+ SYS_SENDMSG = 370
+ SYS_RECVFROM = 371
+ SYS_RECVMSG = 372
+ SYS_SHUTDOWN = 373
+ SYS_USERFAULTFD = 374
+ SYS_MEMBARRIER = 375
+ SYS_MLOCK2 = 376
+ SYS_COPY_FILE_RANGE = 377
+ SYS_PREADV2 = 378
+ SYS_PWRITEV2 = 379
+ SYS_PKEY_MPROTECT = 380
+ SYS_PKEY_ALLOC = 381
+ SYS_PKEY_FREE = 382
+ SYS_STATX = 383
+ SYS_ARCH_PRCTL = 384
+ SYS_IO_PGETEVENTS = 385
+ SYS_RSEQ = 386
+ SYS_SEMGET = 393
+ SYS_SEMCTL = 394
+ SYS_SHMGET = 395
+ SYS_SHMCTL = 396
+ SYS_SHMAT = 397
+ SYS_SHMDT = 398
+ SYS_MSGGET = 399
+ SYS_MSGSND = 400
+ SYS_MSGRCV = 401
+ SYS_MSGCTL = 402
+ SYS_CLOCK_GETTIME64 = 403
+ SYS_CLOCK_SETTIME64 = 404
+ SYS_CLOCK_ADJTIME64 = 405
+ SYS_CLOCK_GETRES_TIME64 = 406
+ SYS_CLOCK_NANOSLEEP_TIME64 = 407
+ SYS_TIMER_GETTIME64 = 408
+ SYS_TIMER_SETTIME64 = 409
+ SYS_TIMERFD_GETTIME64 = 410
+ SYS_TIMERFD_SETTIME64 = 411
+ SYS_UTIMENSAT_TIME64 = 412
+ SYS_PSELECT6_TIME64 = 413
+ SYS_PPOLL_TIME64 = 414
+ SYS_IO_PGETEVENTS_TIME64 = 416
+ SYS_RECVMMSG_TIME64 = 417
+ SYS_MQ_TIMEDSEND_TIME64 = 418
+ SYS_MQ_TIMEDRECEIVE_TIME64 = 419
+ SYS_SEMTIMEDOP_TIME64 = 420
+ SYS_RT_SIGTIMEDWAIT_TIME64 = 421
+ SYS_FUTEX_TIME64 = 422
+ SYS_SCHED_RR_GET_INTERVAL_TIME64 = 423
+ SYS_PIDFD_SEND_SIGNAL = 424
+ SYS_IO_URING_SETUP = 425
+ SYS_IO_URING_ENTER = 426
+ SYS_IO_URING_REGISTER = 427
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
index b3d8ad79d..9ba207847 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
@@ -341,4 +341,8 @@ const (
SYS_STATX = 332
SYS_IO_PGETEVENTS = 333
SYS_RSEQ = 334
+ SYS_PIDFD_SEND_SIGNAL = 424
+ SYS_IO_URING_SETUP = 425
+ SYS_IO_URING_ENTER = 426
+ SYS_IO_URING_REGISTER = 427
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
index e092822fb..94f68f101 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
@@ -6,359 +6,385 @@
package unix
const (
- SYS_RESTART_SYSCALL = 0
- SYS_EXIT = 1
- SYS_FORK = 2
- SYS_READ = 3
- SYS_WRITE = 4
- SYS_OPEN = 5
- SYS_CLOSE = 6
- SYS_CREAT = 8
- SYS_LINK = 9
- SYS_UNLINK = 10
- SYS_EXECVE = 11
- SYS_CHDIR = 12
- SYS_MKNOD = 14
- SYS_CHMOD = 15
- SYS_LCHOWN = 16
- SYS_LSEEK = 19
- SYS_GETPID = 20
- SYS_MOUNT = 21
- SYS_SETUID = 23
- SYS_GETUID = 24
- SYS_PTRACE = 26
- SYS_PAUSE = 29
- SYS_ACCESS = 33
- SYS_NICE = 34
- SYS_SYNC = 36
- SYS_KILL = 37
- SYS_RENAME = 38
- SYS_MKDIR = 39
- SYS_RMDIR = 40
- SYS_DUP = 41
- SYS_PIPE = 42
- SYS_TIMES = 43
- SYS_BRK = 45
- SYS_SETGID = 46
- SYS_GETGID = 47
- SYS_GETEUID = 49
- SYS_GETEGID = 50
- SYS_ACCT = 51
- SYS_UMOUNT2 = 52
- SYS_IOCTL = 54
- SYS_FCNTL = 55
- SYS_SETPGID = 57
- SYS_UMASK = 60
- SYS_CHROOT = 61
- SYS_USTAT = 62
- SYS_DUP2 = 63
- SYS_GETPPID = 64
- SYS_GETPGRP = 65
- SYS_SETSID = 66
- SYS_SIGACTION = 67
- SYS_SETREUID = 70
- SYS_SETREGID = 71
- SYS_SIGSUSPEND = 72
- SYS_SIGPENDING = 73
- SYS_SETHOSTNAME = 74
- SYS_SETRLIMIT = 75
- SYS_GETRUSAGE = 77
- SYS_GETTIMEOFDAY = 78
- SYS_SETTIMEOFDAY = 79
- SYS_GETGROUPS = 80
- SYS_SETGROUPS = 81
- SYS_SYMLINK = 83
- SYS_READLINK = 85
- SYS_USELIB = 86
- SYS_SWAPON = 87
- SYS_REBOOT = 88
- SYS_MUNMAP = 91
- SYS_TRUNCATE = 92
- SYS_FTRUNCATE = 93
- SYS_FCHMOD = 94
- SYS_FCHOWN = 95
- SYS_GETPRIORITY = 96
- SYS_SETPRIORITY = 97
- SYS_STATFS = 99
- SYS_FSTATFS = 100
- SYS_SYSLOG = 103
- SYS_SETITIMER = 104
- SYS_GETITIMER = 105
- SYS_STAT = 106
- SYS_LSTAT = 107
- SYS_FSTAT = 108
- SYS_VHANGUP = 111
- SYS_WAIT4 = 114
- SYS_SWAPOFF = 115
- SYS_SYSINFO = 116
- SYS_FSYNC = 118
- SYS_SIGRETURN = 119
- SYS_CLONE = 120
- SYS_SETDOMAINNAME = 121
- SYS_UNAME = 122
- SYS_ADJTIMEX = 124
- SYS_MPROTECT = 125
- SYS_SIGPROCMASK = 126
- SYS_INIT_MODULE = 128
- SYS_DELETE_MODULE = 129
- SYS_QUOTACTL = 131
- SYS_GETPGID = 132
- SYS_FCHDIR = 133
- SYS_BDFLUSH = 134
- SYS_SYSFS = 135
- SYS_PERSONALITY = 136
- SYS_SETFSUID = 138
- SYS_SETFSGID = 139
- SYS__LLSEEK = 140
- SYS_GETDENTS = 141
- SYS__NEWSELECT = 142
- SYS_FLOCK = 143
- SYS_MSYNC = 144
- SYS_READV = 145
- SYS_WRITEV = 146
- SYS_GETSID = 147
- SYS_FDATASYNC = 148
- SYS__SYSCTL = 149
- SYS_MLOCK = 150
- SYS_MUNLOCK = 151
- SYS_MLOCKALL = 152
- SYS_MUNLOCKALL = 153
- SYS_SCHED_SETPARAM = 154
- SYS_SCHED_GETPARAM = 155
- SYS_SCHED_SETSCHEDULER = 156
- SYS_SCHED_GETSCHEDULER = 157
- SYS_SCHED_YIELD = 158
- SYS_SCHED_GET_PRIORITY_MAX = 159
- SYS_SCHED_GET_PRIORITY_MIN = 160
- SYS_SCHED_RR_GET_INTERVAL = 161
- SYS_NANOSLEEP = 162
- SYS_MREMAP = 163
- SYS_SETRESUID = 164
- SYS_GETRESUID = 165
- SYS_POLL = 168
- SYS_NFSSERVCTL = 169
- SYS_SETRESGID = 170
- SYS_GETRESGID = 171
- SYS_PRCTL = 172
- SYS_RT_SIGRETURN = 173
- SYS_RT_SIGACTION = 174
- SYS_RT_SIGPROCMASK = 175
- SYS_RT_SIGPENDING = 176
- SYS_RT_SIGTIMEDWAIT = 177
- SYS_RT_SIGQUEUEINFO = 178
- SYS_RT_SIGSUSPEND = 179
- SYS_PREAD64 = 180
- SYS_PWRITE64 = 181
- SYS_CHOWN = 182
- SYS_GETCWD = 183
- SYS_CAPGET = 184
- SYS_CAPSET = 185
- SYS_SIGALTSTACK = 186
- SYS_SENDFILE = 187
- SYS_VFORK = 190
- SYS_UGETRLIMIT = 191
- SYS_MMAP2 = 192
- SYS_TRUNCATE64 = 193
- SYS_FTRUNCATE64 = 194
- SYS_STAT64 = 195
- SYS_LSTAT64 = 196
- SYS_FSTAT64 = 197
- SYS_LCHOWN32 = 198
- SYS_GETUID32 = 199
- SYS_GETGID32 = 200
- SYS_GETEUID32 = 201
- SYS_GETEGID32 = 202
- SYS_SETREUID32 = 203
- SYS_SETREGID32 = 204
- SYS_GETGROUPS32 = 205
- SYS_SETGROUPS32 = 206
- SYS_FCHOWN32 = 207
- SYS_SETRESUID32 = 208
- SYS_GETRESUID32 = 209
- SYS_SETRESGID32 = 210
- SYS_GETRESGID32 = 211
- SYS_CHOWN32 = 212
- SYS_SETUID32 = 213
- SYS_SETGID32 = 214
- SYS_SETFSUID32 = 215
- SYS_SETFSGID32 = 216
- SYS_GETDENTS64 = 217
- SYS_PIVOT_ROOT = 218
- SYS_MINCORE = 219
- SYS_MADVISE = 220
- SYS_FCNTL64 = 221
- SYS_GETTID = 224
- SYS_READAHEAD = 225
- SYS_SETXATTR = 226
- SYS_LSETXATTR = 227
- SYS_FSETXATTR = 228
- SYS_GETXATTR = 229
- SYS_LGETXATTR = 230
- SYS_FGETXATTR = 231
- SYS_LISTXATTR = 232
- SYS_LLISTXATTR = 233
- SYS_FLISTXATTR = 234
- SYS_REMOVEXATTR = 235
- SYS_LREMOVEXATTR = 236
- SYS_FREMOVEXATTR = 237
- SYS_TKILL = 238
- SYS_SENDFILE64 = 239
- SYS_FUTEX = 240
- SYS_SCHED_SETAFFINITY = 241
- SYS_SCHED_GETAFFINITY = 242
- SYS_IO_SETUP = 243
- SYS_IO_DESTROY = 244
- SYS_IO_GETEVENTS = 245
- SYS_IO_SUBMIT = 246
- SYS_IO_CANCEL = 247
- SYS_EXIT_GROUP = 248
- SYS_LOOKUP_DCOOKIE = 249
- SYS_EPOLL_CREATE = 250
- SYS_EPOLL_CTL = 251
- SYS_EPOLL_WAIT = 252
- SYS_REMAP_FILE_PAGES = 253
- SYS_SET_TID_ADDRESS = 256
- SYS_TIMER_CREATE = 257
- SYS_TIMER_SETTIME = 258
- SYS_TIMER_GETTIME = 259
- SYS_TIMER_GETOVERRUN = 260
- SYS_TIMER_DELETE = 261
- SYS_CLOCK_SETTIME = 262
- SYS_CLOCK_GETTIME = 263
- SYS_CLOCK_GETRES = 264
- SYS_CLOCK_NANOSLEEP = 265
- SYS_STATFS64 = 266
- SYS_FSTATFS64 = 267
- SYS_TGKILL = 268
- SYS_UTIMES = 269
- SYS_ARM_FADVISE64_64 = 270
- SYS_PCICONFIG_IOBASE = 271
- SYS_PCICONFIG_READ = 272
- SYS_PCICONFIG_WRITE = 273
- SYS_MQ_OPEN = 274
- SYS_MQ_UNLINK = 275
- SYS_MQ_TIMEDSEND = 276
- SYS_MQ_TIMEDRECEIVE = 277
- SYS_MQ_NOTIFY = 278
- SYS_MQ_GETSETATTR = 279
- SYS_WAITID = 280
- SYS_SOCKET = 281
- SYS_BIND = 282
- SYS_CONNECT = 283
- SYS_LISTEN = 284
- SYS_ACCEPT = 285
- SYS_GETSOCKNAME = 286
- SYS_GETPEERNAME = 287
- SYS_SOCKETPAIR = 288
- SYS_SEND = 289
- SYS_SENDTO = 290
- SYS_RECV = 291
- SYS_RECVFROM = 292
- SYS_SHUTDOWN = 293
- SYS_SETSOCKOPT = 294
- SYS_GETSOCKOPT = 295
- SYS_SENDMSG = 296
- SYS_RECVMSG = 297
- SYS_SEMOP = 298
- SYS_SEMGET = 299
- SYS_SEMCTL = 300
- SYS_MSGSND = 301
- SYS_MSGRCV = 302
- SYS_MSGGET = 303
- SYS_MSGCTL = 304
- SYS_SHMAT = 305
- SYS_SHMDT = 306
- SYS_SHMGET = 307
- SYS_SHMCTL = 308
- SYS_ADD_KEY = 309
- SYS_REQUEST_KEY = 310
- SYS_KEYCTL = 311
- SYS_SEMTIMEDOP = 312
- SYS_VSERVER = 313
- SYS_IOPRIO_SET = 314
- SYS_IOPRIO_GET = 315
- SYS_INOTIFY_INIT = 316
- SYS_INOTIFY_ADD_WATCH = 317
- SYS_INOTIFY_RM_WATCH = 318
- SYS_MBIND = 319
- SYS_GET_MEMPOLICY = 320
- SYS_SET_MEMPOLICY = 321
- SYS_OPENAT = 322
- SYS_MKDIRAT = 323
- SYS_MKNODAT = 324
- SYS_FCHOWNAT = 325
- SYS_FUTIMESAT = 326
- SYS_FSTATAT64 = 327
- SYS_UNLINKAT = 328
- SYS_RENAMEAT = 329
- SYS_LINKAT = 330
- SYS_SYMLINKAT = 331
- SYS_READLINKAT = 332
- SYS_FCHMODAT = 333
- SYS_FACCESSAT = 334
- SYS_PSELECT6 = 335
- SYS_PPOLL = 336
- SYS_UNSHARE = 337
- SYS_SET_ROBUST_LIST = 338
- SYS_GET_ROBUST_LIST = 339
- SYS_SPLICE = 340
- SYS_ARM_SYNC_FILE_RANGE = 341
- SYS_TEE = 342
- SYS_VMSPLICE = 343
- SYS_MOVE_PAGES = 344
- SYS_GETCPU = 345
- SYS_EPOLL_PWAIT = 346
- SYS_KEXEC_LOAD = 347
- SYS_UTIMENSAT = 348
- SYS_SIGNALFD = 349
- SYS_TIMERFD_CREATE = 350
- SYS_EVENTFD = 351
- SYS_FALLOCATE = 352
- SYS_TIMERFD_SETTIME = 353
- SYS_TIMERFD_GETTIME = 354
- SYS_SIGNALFD4 = 355
- SYS_EVENTFD2 = 356
- SYS_EPOLL_CREATE1 = 357
- SYS_DUP3 = 358
- SYS_PIPE2 = 359
- SYS_INOTIFY_INIT1 = 360
- SYS_PREADV = 361
- SYS_PWRITEV = 362
- SYS_RT_TGSIGQUEUEINFO = 363
- SYS_PERF_EVENT_OPEN = 364
- SYS_RECVMMSG = 365
- SYS_ACCEPT4 = 366
- SYS_FANOTIFY_INIT = 367
- SYS_FANOTIFY_MARK = 368
- SYS_PRLIMIT64 = 369
- SYS_NAME_TO_HANDLE_AT = 370
- SYS_OPEN_BY_HANDLE_AT = 371
- SYS_CLOCK_ADJTIME = 372
- SYS_SYNCFS = 373
- SYS_SENDMMSG = 374
- SYS_SETNS = 375
- SYS_PROCESS_VM_READV = 376
- SYS_PROCESS_VM_WRITEV = 377
- SYS_KCMP = 378
- SYS_FINIT_MODULE = 379
- SYS_SCHED_SETATTR = 380
- SYS_SCHED_GETATTR = 381
- SYS_RENAMEAT2 = 382
- SYS_SECCOMP = 383
- SYS_GETRANDOM = 384
- SYS_MEMFD_CREATE = 385
- SYS_BPF = 386
- SYS_EXECVEAT = 387
- SYS_USERFAULTFD = 388
- SYS_MEMBARRIER = 389
- SYS_MLOCK2 = 390
- SYS_COPY_FILE_RANGE = 391
- SYS_PREADV2 = 392
- SYS_PWRITEV2 = 393
- SYS_PKEY_MPROTECT = 394
- SYS_PKEY_ALLOC = 395
- SYS_PKEY_FREE = 396
- SYS_STATX = 397
- SYS_RSEQ = 398
- SYS_IO_PGETEVENTS = 399
+ SYS_RESTART_SYSCALL = 0
+ SYS_EXIT = 1
+ SYS_FORK = 2
+ SYS_READ = 3
+ SYS_WRITE = 4
+ SYS_OPEN = 5
+ SYS_CLOSE = 6
+ SYS_CREAT = 8
+ SYS_LINK = 9
+ SYS_UNLINK = 10
+ SYS_EXECVE = 11
+ SYS_CHDIR = 12
+ SYS_MKNOD = 14
+ SYS_CHMOD = 15
+ SYS_LCHOWN = 16
+ SYS_LSEEK = 19
+ SYS_GETPID = 20
+ SYS_MOUNT = 21
+ SYS_SETUID = 23
+ SYS_GETUID = 24
+ SYS_PTRACE = 26
+ SYS_PAUSE = 29
+ SYS_ACCESS = 33
+ SYS_NICE = 34
+ SYS_SYNC = 36
+ SYS_KILL = 37
+ SYS_RENAME = 38
+ SYS_MKDIR = 39
+ SYS_RMDIR = 40
+ SYS_DUP = 41
+ SYS_PIPE = 42
+ SYS_TIMES = 43
+ SYS_BRK = 45
+ SYS_SETGID = 46
+ SYS_GETGID = 47
+ SYS_GETEUID = 49
+ SYS_GETEGID = 50
+ SYS_ACCT = 51
+ SYS_UMOUNT2 = 52
+ SYS_IOCTL = 54
+ SYS_FCNTL = 55
+ SYS_SETPGID = 57
+ SYS_UMASK = 60
+ SYS_CHROOT = 61
+ SYS_USTAT = 62
+ SYS_DUP2 = 63
+ SYS_GETPPID = 64
+ SYS_GETPGRP = 65
+ SYS_SETSID = 66
+ SYS_SIGACTION = 67
+ SYS_SETREUID = 70
+ SYS_SETREGID = 71
+ SYS_SIGSUSPEND = 72
+ SYS_SIGPENDING = 73
+ SYS_SETHOSTNAME = 74
+ SYS_SETRLIMIT = 75
+ SYS_GETRUSAGE = 77
+ SYS_GETTIMEOFDAY = 78
+ SYS_SETTIMEOFDAY = 79
+ SYS_GETGROUPS = 80
+ SYS_SETGROUPS = 81
+ SYS_SYMLINK = 83
+ SYS_READLINK = 85
+ SYS_USELIB = 86
+ SYS_SWAPON = 87
+ SYS_REBOOT = 88
+ SYS_MUNMAP = 91
+ SYS_TRUNCATE = 92
+ SYS_FTRUNCATE = 93
+ SYS_FCHMOD = 94
+ SYS_FCHOWN = 95
+ SYS_GETPRIORITY = 96
+ SYS_SETPRIORITY = 97
+ SYS_STATFS = 99
+ SYS_FSTATFS = 100
+ SYS_SYSLOG = 103
+ SYS_SETITIMER = 104
+ SYS_GETITIMER = 105
+ SYS_STAT = 106
+ SYS_LSTAT = 107
+ SYS_FSTAT = 108
+ SYS_VHANGUP = 111
+ SYS_WAIT4 = 114
+ SYS_SWAPOFF = 115
+ SYS_SYSINFO = 116
+ SYS_FSYNC = 118
+ SYS_SIGRETURN = 119
+ SYS_CLONE = 120
+ SYS_SETDOMAINNAME = 121
+ SYS_UNAME = 122
+ SYS_ADJTIMEX = 124
+ SYS_MPROTECT = 125
+ SYS_SIGPROCMASK = 126
+ SYS_INIT_MODULE = 128
+ SYS_DELETE_MODULE = 129
+ SYS_QUOTACTL = 131
+ SYS_GETPGID = 132
+ SYS_FCHDIR = 133
+ SYS_BDFLUSH = 134
+ SYS_SYSFS = 135
+ SYS_PERSONALITY = 136
+ SYS_SETFSUID = 138
+ SYS_SETFSGID = 139
+ SYS__LLSEEK = 140
+ SYS_GETDENTS = 141
+ SYS__NEWSELECT = 142
+ SYS_FLOCK = 143
+ SYS_MSYNC = 144
+ SYS_READV = 145
+ SYS_WRITEV = 146
+ SYS_GETSID = 147
+ SYS_FDATASYNC = 148
+ SYS__SYSCTL = 149
+ SYS_MLOCK = 150
+ SYS_MUNLOCK = 151
+ SYS_MLOCKALL = 152
+ SYS_MUNLOCKALL = 153
+ SYS_SCHED_SETPARAM = 154
+ SYS_SCHED_GETPARAM = 155
+ SYS_SCHED_SETSCHEDULER = 156
+ SYS_SCHED_GETSCHEDULER = 157
+ SYS_SCHED_YIELD = 158
+ SYS_SCHED_GET_PRIORITY_MAX = 159
+ SYS_SCHED_GET_PRIORITY_MIN = 160
+ SYS_SCHED_RR_GET_INTERVAL = 161
+ SYS_NANOSLEEP = 162
+ SYS_MREMAP = 163
+ SYS_SETRESUID = 164
+ SYS_GETRESUID = 165
+ SYS_POLL = 168
+ SYS_NFSSERVCTL = 169
+ SYS_SETRESGID = 170
+ SYS_GETRESGID = 171
+ SYS_PRCTL = 172
+ SYS_RT_SIGRETURN = 173
+ SYS_RT_SIGACTION = 174
+ SYS_RT_SIGPROCMASK = 175
+ SYS_RT_SIGPENDING = 176
+ SYS_RT_SIGTIMEDWAIT = 177
+ SYS_RT_SIGQUEUEINFO = 178
+ SYS_RT_SIGSUSPEND = 179
+ SYS_PREAD64 = 180
+ SYS_PWRITE64 = 181
+ SYS_CHOWN = 182
+ SYS_GETCWD = 183
+ SYS_CAPGET = 184
+ SYS_CAPSET = 185
+ SYS_SIGALTSTACK = 186
+ SYS_SENDFILE = 187
+ SYS_VFORK = 190
+ SYS_UGETRLIMIT = 191
+ SYS_MMAP2 = 192
+ SYS_TRUNCATE64 = 193
+ SYS_FTRUNCATE64 = 194
+ SYS_STAT64 = 195
+ SYS_LSTAT64 = 196
+ SYS_FSTAT64 = 197
+ SYS_LCHOWN32 = 198
+ SYS_GETUID32 = 199
+ SYS_GETGID32 = 200
+ SYS_GETEUID32 = 201
+ SYS_GETEGID32 = 202
+ SYS_SETREUID32 = 203
+ SYS_SETREGID32 = 204
+ SYS_GETGROUPS32 = 205
+ SYS_SETGROUPS32 = 206
+ SYS_FCHOWN32 = 207
+ SYS_SETRESUID32 = 208
+ SYS_GETRESUID32 = 209
+ SYS_SETRESGID32 = 210
+ SYS_GETRESGID32 = 211
+ SYS_CHOWN32 = 212
+ SYS_SETUID32 = 213
+ SYS_SETGID32 = 214
+ SYS_SETFSUID32 = 215
+ SYS_SETFSGID32 = 216
+ SYS_GETDENTS64 = 217
+ SYS_PIVOT_ROOT = 218
+ SYS_MINCORE = 219
+ SYS_MADVISE = 220
+ SYS_FCNTL64 = 221
+ SYS_GETTID = 224
+ SYS_READAHEAD = 225
+ SYS_SETXATTR = 226
+ SYS_LSETXATTR = 227
+ SYS_FSETXATTR = 228
+ SYS_GETXATTR = 229
+ SYS_LGETXATTR = 230
+ SYS_FGETXATTR = 231
+ SYS_LISTXATTR = 232
+ SYS_LLISTXATTR = 233
+ SYS_FLISTXATTR = 234
+ SYS_REMOVEXATTR = 235
+ SYS_LREMOVEXATTR = 236
+ SYS_FREMOVEXATTR = 237
+ SYS_TKILL = 238
+ SYS_SENDFILE64 = 239
+ SYS_FUTEX = 240
+ SYS_SCHED_SETAFFINITY = 241
+ SYS_SCHED_GETAFFINITY = 242
+ SYS_IO_SETUP = 243
+ SYS_IO_DESTROY = 244
+ SYS_IO_GETEVENTS = 245
+ SYS_IO_SUBMIT = 246
+ SYS_IO_CANCEL = 247
+ SYS_EXIT_GROUP = 248
+ SYS_LOOKUP_DCOOKIE = 249
+ SYS_EPOLL_CREATE = 250
+ SYS_EPOLL_CTL = 251
+ SYS_EPOLL_WAIT = 252
+ SYS_REMAP_FILE_PAGES = 253
+ SYS_SET_TID_ADDRESS = 256
+ SYS_TIMER_CREATE = 257
+ SYS_TIMER_SETTIME = 258
+ SYS_TIMER_GETTIME = 259
+ SYS_TIMER_GETOVERRUN = 260
+ SYS_TIMER_DELETE = 261
+ SYS_CLOCK_SETTIME = 262
+ SYS_CLOCK_GETTIME = 263
+ SYS_CLOCK_GETRES = 264
+ SYS_CLOCK_NANOSLEEP = 265
+ SYS_STATFS64 = 266
+ SYS_FSTATFS64 = 267
+ SYS_TGKILL = 268
+ SYS_UTIMES = 269
+ SYS_ARM_FADVISE64_64 = 270
+ SYS_PCICONFIG_IOBASE = 271
+ SYS_PCICONFIG_READ = 272
+ SYS_PCICONFIG_WRITE = 273
+ SYS_MQ_OPEN = 274
+ SYS_MQ_UNLINK = 275
+ SYS_MQ_TIMEDSEND = 276
+ SYS_MQ_TIMEDRECEIVE = 277
+ SYS_MQ_NOTIFY = 278
+ SYS_MQ_GETSETATTR = 279
+ SYS_WAITID = 280
+ SYS_SOCKET = 281
+ SYS_BIND = 282
+ SYS_CONNECT = 283
+ SYS_LISTEN = 284
+ SYS_ACCEPT = 285
+ SYS_GETSOCKNAME = 286
+ SYS_GETPEERNAME = 287
+ SYS_SOCKETPAIR = 288
+ SYS_SEND = 289
+ SYS_SENDTO = 290
+ SYS_RECV = 291
+ SYS_RECVFROM = 292
+ SYS_SHUTDOWN = 293
+ SYS_SETSOCKOPT = 294
+ SYS_GETSOCKOPT = 295
+ SYS_SENDMSG = 296
+ SYS_RECVMSG = 297
+ SYS_SEMOP = 298
+ SYS_SEMGET = 299
+ SYS_SEMCTL = 300
+ SYS_MSGSND = 301
+ SYS_MSGRCV = 302
+ SYS_MSGGET = 303
+ SYS_MSGCTL = 304
+ SYS_SHMAT = 305
+ SYS_SHMDT = 306
+ SYS_SHMGET = 307
+ SYS_SHMCTL = 308
+ SYS_ADD_KEY = 309
+ SYS_REQUEST_KEY = 310
+ SYS_KEYCTL = 311
+ SYS_SEMTIMEDOP = 312
+ SYS_VSERVER = 313
+ SYS_IOPRIO_SET = 314
+ SYS_IOPRIO_GET = 315
+ SYS_INOTIFY_INIT = 316
+ SYS_INOTIFY_ADD_WATCH = 317
+ SYS_INOTIFY_RM_WATCH = 318
+ SYS_MBIND = 319
+ SYS_GET_MEMPOLICY = 320
+ SYS_SET_MEMPOLICY = 321
+ SYS_OPENAT = 322
+ SYS_MKDIRAT = 323
+ SYS_MKNODAT = 324
+ SYS_FCHOWNAT = 325
+ SYS_FUTIMESAT = 326
+ SYS_FSTATAT64 = 327
+ SYS_UNLINKAT = 328
+ SYS_RENAMEAT = 329
+ SYS_LINKAT = 330
+ SYS_SYMLINKAT = 331
+ SYS_READLINKAT = 332
+ SYS_FCHMODAT = 333
+ SYS_FACCESSAT = 334
+ SYS_PSELECT6 = 335
+ SYS_PPOLL = 336
+ SYS_UNSHARE = 337
+ SYS_SET_ROBUST_LIST = 338
+ SYS_GET_ROBUST_LIST = 339
+ SYS_SPLICE = 340
+ SYS_ARM_SYNC_FILE_RANGE = 341
+ SYS_TEE = 342
+ SYS_VMSPLICE = 343
+ SYS_MOVE_PAGES = 344
+ SYS_GETCPU = 345
+ SYS_EPOLL_PWAIT = 346
+ SYS_KEXEC_LOAD = 347
+ SYS_UTIMENSAT = 348
+ SYS_SIGNALFD = 349
+ SYS_TIMERFD_CREATE = 350
+ SYS_EVENTFD = 351
+ SYS_FALLOCATE = 352
+ SYS_TIMERFD_SETTIME = 353
+ SYS_TIMERFD_GETTIME = 354
+ SYS_SIGNALFD4 = 355
+ SYS_EVENTFD2 = 356
+ SYS_EPOLL_CREATE1 = 357
+ SYS_DUP3 = 358
+ SYS_PIPE2 = 359
+ SYS_INOTIFY_INIT1 = 360
+ SYS_PREADV = 361
+ SYS_PWRITEV = 362
+ SYS_RT_TGSIGQUEUEINFO = 363
+ SYS_PERF_EVENT_OPEN = 364
+ SYS_RECVMMSG = 365
+ SYS_ACCEPT4 = 366
+ SYS_FANOTIFY_INIT = 367
+ SYS_FANOTIFY_MARK = 368
+ SYS_PRLIMIT64 = 369
+ SYS_NAME_TO_HANDLE_AT = 370
+ SYS_OPEN_BY_HANDLE_AT = 371
+ SYS_CLOCK_ADJTIME = 372
+ SYS_SYNCFS = 373
+ SYS_SENDMMSG = 374
+ SYS_SETNS = 375
+ SYS_PROCESS_VM_READV = 376
+ SYS_PROCESS_VM_WRITEV = 377
+ SYS_KCMP = 378
+ SYS_FINIT_MODULE = 379
+ SYS_SCHED_SETATTR = 380
+ SYS_SCHED_GETATTR = 381
+ SYS_RENAMEAT2 = 382
+ SYS_SECCOMP = 383
+ SYS_GETRANDOM = 384
+ SYS_MEMFD_CREATE = 385
+ SYS_BPF = 386
+ SYS_EXECVEAT = 387
+ SYS_USERFAULTFD = 388
+ SYS_MEMBARRIER = 389
+ SYS_MLOCK2 = 390
+ SYS_COPY_FILE_RANGE = 391
+ SYS_PREADV2 = 392
+ SYS_PWRITEV2 = 393
+ SYS_PKEY_MPROTECT = 394
+ SYS_PKEY_ALLOC = 395
+ SYS_PKEY_FREE = 396
+ SYS_STATX = 397
+ SYS_RSEQ = 398
+ SYS_IO_PGETEVENTS = 399
+ SYS_MIGRATE_PAGES = 400
+ SYS_KEXEC_FILE_LOAD = 401
+ SYS_CLOCK_GETTIME64 = 403
+ SYS_CLOCK_SETTIME64 = 404
+ SYS_CLOCK_ADJTIME64 = 405
+ SYS_CLOCK_GETRES_TIME64 = 406
+ SYS_CLOCK_NANOSLEEP_TIME64 = 407
+ SYS_TIMER_GETTIME64 = 408
+ SYS_TIMER_SETTIME64 = 409
+ SYS_TIMERFD_GETTIME64 = 410
+ SYS_TIMERFD_SETTIME64 = 411
+ SYS_UTIMENSAT_TIME64 = 412
+ SYS_PSELECT6_TIME64 = 413
+ SYS_PPOLL_TIME64 = 414
+ SYS_IO_PGETEVENTS_TIME64 = 416
+ SYS_RECVMMSG_TIME64 = 417
+ SYS_MQ_TIMEDSEND_TIME64 = 418
+ SYS_MQ_TIMEDRECEIVE_TIME64 = 419
+ SYS_SEMTIMEDOP_TIME64 = 420
+ SYS_RT_SIGTIMEDWAIT_TIME64 = 421
+ SYS_FUTEX_TIME64 = 422
+ SYS_SCHED_RR_GET_INTERVAL_TIME64 = 423
+ SYS_PIDFD_SEND_SIGNAL = 424
+ SYS_IO_URING_SETUP = 425
+ SYS_IO_URING_ENTER = 426
+ SYS_IO_URING_REGISTER = 427
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
index 320696789..15c413516 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
@@ -285,4 +285,9 @@ const (
SYS_STATX = 291
SYS_IO_PGETEVENTS = 292
SYS_RSEQ = 293
+ SYS_KEXEC_FILE_LOAD = 294
+ SYS_PIDFD_SEND_SIGNAL = 424
+ SYS_IO_URING_SETUP = 425
+ SYS_IO_URING_ENTER = 426
+ SYS_IO_URING_REGISTER = 427
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
index 6893a5bd0..638465b14 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
@@ -6,372 +6,406 @@
package unix
const (
- SYS_SYSCALL = 4000
- SYS_EXIT = 4001
- SYS_FORK = 4002
- SYS_READ = 4003
- SYS_WRITE = 4004
- SYS_OPEN = 4005
- SYS_CLOSE = 4006
- SYS_WAITPID = 4007
- SYS_CREAT = 4008
- SYS_LINK = 4009
- SYS_UNLINK = 4010
- SYS_EXECVE = 4011
- SYS_CHDIR = 4012
- SYS_TIME = 4013
- SYS_MKNOD = 4014
- SYS_CHMOD = 4015
- SYS_LCHOWN = 4016
- SYS_BREAK = 4017
- SYS_UNUSED18 = 4018
- SYS_LSEEK = 4019
- SYS_GETPID = 4020
- SYS_MOUNT = 4021
- SYS_UMOUNT = 4022
- SYS_SETUID = 4023
- SYS_GETUID = 4024
- SYS_STIME = 4025
- SYS_PTRACE = 4026
- SYS_ALARM = 4027
- SYS_UNUSED28 = 4028
- SYS_PAUSE = 4029
- SYS_UTIME = 4030
- SYS_STTY = 4031
- SYS_GTTY = 4032
- SYS_ACCESS = 4033
- SYS_NICE = 4034
- SYS_FTIME = 4035
- SYS_SYNC = 4036
- SYS_KILL = 4037
- SYS_RENAME = 4038
- SYS_MKDIR = 4039
- SYS_RMDIR = 4040
- SYS_DUP = 4041
- SYS_PIPE = 4042
- SYS_TIMES = 4043
- SYS_PROF = 4044
- SYS_BRK = 4045
- SYS_SETGID = 4046
- SYS_GETGID = 4047
- SYS_SIGNAL = 4048
- SYS_GETEUID = 4049
- SYS_GETEGID = 4050
- SYS_ACCT = 4051
- SYS_UMOUNT2 = 4052
- SYS_LOCK = 4053
- SYS_IOCTL = 4054
- SYS_FCNTL = 4055
- SYS_MPX = 4056
- SYS_SETPGID = 4057
- SYS_ULIMIT = 4058
- SYS_UNUSED59 = 4059
- SYS_UMASK = 4060
- SYS_CHROOT = 4061
- SYS_USTAT = 4062
- SYS_DUP2 = 4063
- SYS_GETPPID = 4064
- SYS_GETPGRP = 4065
- SYS_SETSID = 4066
- SYS_SIGACTION = 4067
- SYS_SGETMASK = 4068
- SYS_SSETMASK = 4069
- SYS_SETREUID = 4070
- SYS_SETREGID = 4071
- SYS_SIGSUSPEND = 4072
- SYS_SIGPENDING = 4073
- SYS_SETHOSTNAME = 4074
- SYS_SETRLIMIT = 4075
- SYS_GETRLIMIT = 4076
- SYS_GETRUSAGE = 4077
- SYS_GETTIMEOFDAY = 4078
- SYS_SETTIMEOFDAY = 4079
- SYS_GETGROUPS = 4080
- SYS_SETGROUPS = 4081
- SYS_RESERVED82 = 4082
- SYS_SYMLINK = 4083
- SYS_UNUSED84 = 4084
- SYS_READLINK = 4085
- SYS_USELIB = 4086
- SYS_SWAPON = 4087
- SYS_REBOOT = 4088
- SYS_READDIR = 4089
- SYS_MMAP = 4090
- SYS_MUNMAP = 4091
- SYS_TRUNCATE = 4092
- SYS_FTRUNCATE = 4093
- SYS_FCHMOD = 4094
- SYS_FCHOWN = 4095
- SYS_GETPRIORITY = 4096
- SYS_SETPRIORITY = 4097
- SYS_PROFIL = 4098
- SYS_STATFS = 4099
- SYS_FSTATFS = 4100
- SYS_IOPERM = 4101
- SYS_SOCKETCALL = 4102
- SYS_SYSLOG = 4103
- SYS_SETITIMER = 4104
- SYS_GETITIMER = 4105
- SYS_STAT = 4106
- SYS_LSTAT = 4107
- SYS_FSTAT = 4108
- SYS_UNUSED109 = 4109
- SYS_IOPL = 4110
- SYS_VHANGUP = 4111
- SYS_IDLE = 4112
- SYS_VM86 = 4113
- SYS_WAIT4 = 4114
- SYS_SWAPOFF = 4115
- SYS_SYSINFO = 4116
- SYS_IPC = 4117
- SYS_FSYNC = 4118
- SYS_SIGRETURN = 4119
- SYS_CLONE = 4120
- SYS_SETDOMAINNAME = 4121
- SYS_UNAME = 4122
- SYS_MODIFY_LDT = 4123
- SYS_ADJTIMEX = 4124
- SYS_MPROTECT = 4125
- SYS_SIGPROCMASK = 4126
- SYS_CREATE_MODULE = 4127
- SYS_INIT_MODULE = 4128
- SYS_DELETE_MODULE = 4129
- SYS_GET_KERNEL_SYMS = 4130
- SYS_QUOTACTL = 4131
- SYS_GETPGID = 4132
- SYS_FCHDIR = 4133
- SYS_BDFLUSH = 4134
- SYS_SYSFS = 4135
- SYS_PERSONALITY = 4136
- SYS_AFS_SYSCALL = 4137
- SYS_SETFSUID = 4138
- SYS_SETFSGID = 4139
- SYS__LLSEEK = 4140
- SYS_GETDENTS = 4141
- SYS__NEWSELECT = 4142
- SYS_FLOCK = 4143
- SYS_MSYNC = 4144
- SYS_READV = 4145
- SYS_WRITEV = 4146
- SYS_CACHEFLUSH = 4147
- SYS_CACHECTL = 4148
- SYS_SYSMIPS = 4149
- SYS_UNUSED150 = 4150
- SYS_GETSID = 4151
- SYS_FDATASYNC = 4152
- SYS__SYSCTL = 4153
- SYS_MLOCK = 4154
- SYS_MUNLOCK = 4155
- SYS_MLOCKALL = 4156
- SYS_MUNLOCKALL = 4157
- SYS_SCHED_SETPARAM = 4158
- SYS_SCHED_GETPARAM = 4159
- SYS_SCHED_SETSCHEDULER = 4160
- SYS_SCHED_GETSCHEDULER = 4161
- SYS_SCHED_YIELD = 4162
- SYS_SCHED_GET_PRIORITY_MAX = 4163
- SYS_SCHED_GET_PRIORITY_MIN = 4164
- SYS_SCHED_RR_GET_INTERVAL = 4165
- SYS_NANOSLEEP = 4166
- SYS_MREMAP = 4167
- SYS_ACCEPT = 4168
- SYS_BIND = 4169
- SYS_CONNECT = 4170
- SYS_GETPEERNAME = 4171
- SYS_GETSOCKNAME = 4172
- SYS_GETSOCKOPT = 4173
- SYS_LISTEN = 4174
- SYS_RECV = 4175
- SYS_RECVFROM = 4176
- SYS_RECVMSG = 4177
- SYS_SEND = 4178
- SYS_SENDMSG = 4179
- SYS_SENDTO = 4180
- SYS_SETSOCKOPT = 4181
- SYS_SHUTDOWN = 4182
- SYS_SOCKET = 4183
- SYS_SOCKETPAIR = 4184
- SYS_SETRESUID = 4185
- SYS_GETRESUID = 4186
- SYS_QUERY_MODULE = 4187
- SYS_POLL = 4188
- SYS_NFSSERVCTL = 4189
- SYS_SETRESGID = 4190
- SYS_GETRESGID = 4191
- SYS_PRCTL = 4192
- SYS_RT_SIGRETURN = 4193
- SYS_RT_SIGACTION = 4194
- SYS_RT_SIGPROCMASK = 4195
- SYS_RT_SIGPENDING = 4196
- SYS_RT_SIGTIMEDWAIT = 4197
- SYS_RT_SIGQUEUEINFO = 4198
- SYS_RT_SIGSUSPEND = 4199
- SYS_PREAD64 = 4200
- SYS_PWRITE64 = 4201
- SYS_CHOWN = 4202
- SYS_GETCWD = 4203
- SYS_CAPGET = 4204
- SYS_CAPSET = 4205
- SYS_SIGALTSTACK = 4206
- SYS_SENDFILE = 4207
- SYS_GETPMSG = 4208
- SYS_PUTPMSG = 4209
- SYS_MMAP2 = 4210
- SYS_TRUNCATE64 = 4211
- SYS_FTRUNCATE64 = 4212
- SYS_STAT64 = 4213
- SYS_LSTAT64 = 4214
- SYS_FSTAT64 = 4215
- SYS_PIVOT_ROOT = 4216
- SYS_MINCORE = 4217
- SYS_MADVISE = 4218
- SYS_GETDENTS64 = 4219
- SYS_FCNTL64 = 4220
- SYS_RESERVED221 = 4221
- SYS_GETTID = 4222
- SYS_READAHEAD = 4223
- SYS_SETXATTR = 4224
- SYS_LSETXATTR = 4225
- SYS_FSETXATTR = 4226
- SYS_GETXATTR = 4227
- SYS_LGETXATTR = 4228
- SYS_FGETXATTR = 4229
- SYS_LISTXATTR = 4230
- SYS_LLISTXATTR = 4231
- SYS_FLISTXATTR = 4232
- SYS_REMOVEXATTR = 4233
- SYS_LREMOVEXATTR = 4234
- SYS_FREMOVEXATTR = 4235
- SYS_TKILL = 4236
- SYS_SENDFILE64 = 4237
- SYS_FUTEX = 4238
- SYS_SCHED_SETAFFINITY = 4239
- SYS_SCHED_GETAFFINITY = 4240
- SYS_IO_SETUP = 4241
- SYS_IO_DESTROY = 4242
- SYS_IO_GETEVENTS = 4243
- SYS_IO_SUBMIT = 4244
- SYS_IO_CANCEL = 4245
- SYS_EXIT_GROUP = 4246
- SYS_LOOKUP_DCOOKIE = 4247
- SYS_EPOLL_CREATE = 4248
- SYS_EPOLL_CTL = 4249
- SYS_EPOLL_WAIT = 4250
- SYS_REMAP_FILE_PAGES = 4251
- SYS_SET_TID_ADDRESS = 4252
- SYS_RESTART_SYSCALL = 4253
- SYS_FADVISE64 = 4254
- SYS_STATFS64 = 4255
- SYS_FSTATFS64 = 4256
- SYS_TIMER_CREATE = 4257
- SYS_TIMER_SETTIME = 4258
- SYS_TIMER_GETTIME = 4259
- SYS_TIMER_GETOVERRUN = 4260
- SYS_TIMER_DELETE = 4261
- SYS_CLOCK_SETTIME = 4262
- SYS_CLOCK_GETTIME = 4263
- SYS_CLOCK_GETRES = 4264
- SYS_CLOCK_NANOSLEEP = 4265
- SYS_TGKILL = 4266
- SYS_UTIMES = 4267
- SYS_MBIND = 4268
- SYS_GET_MEMPOLICY = 4269
- SYS_SET_MEMPOLICY = 4270
- SYS_MQ_OPEN = 4271
- SYS_MQ_UNLINK = 4272
- SYS_MQ_TIMEDSEND = 4273
- SYS_MQ_TIMEDRECEIVE = 4274
- SYS_MQ_NOTIFY = 4275
- SYS_MQ_GETSETATTR = 4276
- SYS_VSERVER = 4277
- SYS_WAITID = 4278
- SYS_ADD_KEY = 4280
- SYS_REQUEST_KEY = 4281
- SYS_KEYCTL = 4282
- SYS_SET_THREAD_AREA = 4283
- SYS_INOTIFY_INIT = 4284
- SYS_INOTIFY_ADD_WATCH = 4285
- SYS_INOTIFY_RM_WATCH = 4286
- SYS_MIGRATE_PAGES = 4287
- SYS_OPENAT = 4288
- SYS_MKDIRAT = 4289
- SYS_MKNODAT = 4290
- SYS_FCHOWNAT = 4291
- SYS_FUTIMESAT = 4292
- SYS_FSTATAT64 = 4293
- SYS_UNLINKAT = 4294
- SYS_RENAMEAT = 4295
- SYS_LINKAT = 4296
- SYS_SYMLINKAT = 4297
- SYS_READLINKAT = 4298
- SYS_FCHMODAT = 4299
- SYS_FACCESSAT = 4300
- SYS_PSELECT6 = 4301
- SYS_PPOLL = 4302
- SYS_UNSHARE = 4303
- SYS_SPLICE = 4304
- SYS_SYNC_FILE_RANGE = 4305
- SYS_TEE = 4306
- SYS_VMSPLICE = 4307
- SYS_MOVE_PAGES = 4308
- SYS_SET_ROBUST_LIST = 4309
- SYS_GET_ROBUST_LIST = 4310
- SYS_KEXEC_LOAD = 4311
- SYS_GETCPU = 4312
- SYS_EPOLL_PWAIT = 4313
- SYS_IOPRIO_SET = 4314
- SYS_IOPRIO_GET = 4315
- SYS_UTIMENSAT = 4316
- SYS_SIGNALFD = 4317
- SYS_TIMERFD = 4318
- SYS_EVENTFD = 4319
- SYS_FALLOCATE = 4320
- SYS_TIMERFD_CREATE = 4321
- SYS_TIMERFD_GETTIME = 4322
- SYS_TIMERFD_SETTIME = 4323
- SYS_SIGNALFD4 = 4324
- SYS_EVENTFD2 = 4325
- SYS_EPOLL_CREATE1 = 4326
- SYS_DUP3 = 4327
- SYS_PIPE2 = 4328
- SYS_INOTIFY_INIT1 = 4329
- SYS_PREADV = 4330
- SYS_PWRITEV = 4331
- SYS_RT_TGSIGQUEUEINFO = 4332
- SYS_PERF_EVENT_OPEN = 4333
- SYS_ACCEPT4 = 4334
- SYS_RECVMMSG = 4335
- SYS_FANOTIFY_INIT = 4336
- SYS_FANOTIFY_MARK = 4337
- SYS_PRLIMIT64 = 4338
- SYS_NAME_TO_HANDLE_AT = 4339
- SYS_OPEN_BY_HANDLE_AT = 4340
- SYS_CLOCK_ADJTIME = 4341
- SYS_SYNCFS = 4342
- SYS_SENDMMSG = 4343
- SYS_SETNS = 4344
- SYS_PROCESS_VM_READV = 4345
- SYS_PROCESS_VM_WRITEV = 4346
- SYS_KCMP = 4347
- SYS_FINIT_MODULE = 4348
- SYS_SCHED_SETATTR = 4349
- SYS_SCHED_GETATTR = 4350
- SYS_RENAMEAT2 = 4351
- SYS_SECCOMP = 4352
- SYS_GETRANDOM = 4353
- SYS_MEMFD_CREATE = 4354
- SYS_BPF = 4355
- SYS_EXECVEAT = 4356
- SYS_USERFAULTFD = 4357
- SYS_MEMBARRIER = 4358
- SYS_MLOCK2 = 4359
- SYS_COPY_FILE_RANGE = 4360
- SYS_PREADV2 = 4361
- SYS_PWRITEV2 = 4362
- SYS_PKEY_MPROTECT = 4363
- SYS_PKEY_ALLOC = 4364
- SYS_PKEY_FREE = 4365
- SYS_STATX = 4366
- SYS_RSEQ = 4367
- SYS_IO_PGETEVENTS = 4368
+ SYS_SYSCALL = 4000
+ SYS_EXIT = 4001
+ SYS_FORK = 4002
+ SYS_READ = 4003
+ SYS_WRITE = 4004
+ SYS_OPEN = 4005
+ SYS_CLOSE = 4006
+ SYS_WAITPID = 4007
+ SYS_CREAT = 4008
+ SYS_LINK = 4009
+ SYS_UNLINK = 4010
+ SYS_EXECVE = 4011
+ SYS_CHDIR = 4012
+ SYS_TIME = 4013
+ SYS_MKNOD = 4014
+ SYS_CHMOD = 4015
+ SYS_LCHOWN = 4016
+ SYS_BREAK = 4017
+ SYS_UNUSED18 = 4018
+ SYS_LSEEK = 4019
+ SYS_GETPID = 4020
+ SYS_MOUNT = 4021
+ SYS_UMOUNT = 4022
+ SYS_SETUID = 4023
+ SYS_GETUID = 4024
+ SYS_STIME = 4025
+ SYS_PTRACE = 4026
+ SYS_ALARM = 4027
+ SYS_UNUSED28 = 4028
+ SYS_PAUSE = 4029
+ SYS_UTIME = 4030
+ SYS_STTY = 4031
+ SYS_GTTY = 4032
+ SYS_ACCESS = 4033
+ SYS_NICE = 4034
+ SYS_FTIME = 4035
+ SYS_SYNC = 4036
+ SYS_KILL = 4037
+ SYS_RENAME = 4038
+ SYS_MKDIR = 4039
+ SYS_RMDIR = 4040
+ SYS_DUP = 4041
+ SYS_PIPE = 4042
+ SYS_TIMES = 4043
+ SYS_PROF = 4044
+ SYS_BRK = 4045
+ SYS_SETGID = 4046
+ SYS_GETGID = 4047
+ SYS_SIGNAL = 4048
+ SYS_GETEUID = 4049
+ SYS_GETEGID = 4050
+ SYS_ACCT = 4051
+ SYS_UMOUNT2 = 4052
+ SYS_LOCK = 4053
+ SYS_IOCTL = 4054
+ SYS_FCNTL = 4055
+ SYS_MPX = 4056
+ SYS_SETPGID = 4057
+ SYS_ULIMIT = 4058
+ SYS_UNUSED59 = 4059
+ SYS_UMASK = 4060
+ SYS_CHROOT = 4061
+ SYS_USTAT = 4062
+ SYS_DUP2 = 4063
+ SYS_GETPPID = 4064
+ SYS_GETPGRP = 4065
+ SYS_SETSID = 4066
+ SYS_SIGACTION = 4067
+ SYS_SGETMASK = 4068
+ SYS_SSETMASK = 4069
+ SYS_SETREUID = 4070
+ SYS_SETREGID = 4071
+ SYS_SIGSUSPEND = 4072
+ SYS_SIGPENDING = 4073
+ SYS_SETHOSTNAME = 4074
+ SYS_SETRLIMIT = 4075
+ SYS_GETRLIMIT = 4076
+ SYS_GETRUSAGE = 4077
+ SYS_GETTIMEOFDAY = 4078
+ SYS_SETTIMEOFDAY = 4079
+ SYS_GETGROUPS = 4080
+ SYS_SETGROUPS = 4081
+ SYS_RESERVED82 = 4082
+ SYS_SYMLINK = 4083
+ SYS_UNUSED84 = 4084
+ SYS_READLINK = 4085
+ SYS_USELIB = 4086
+ SYS_SWAPON = 4087
+ SYS_REBOOT = 4088
+ SYS_READDIR = 4089
+ SYS_MMAP = 4090
+ SYS_MUNMAP = 4091
+ SYS_TRUNCATE = 4092
+ SYS_FTRUNCATE = 4093
+ SYS_FCHMOD = 4094
+ SYS_FCHOWN = 4095
+ SYS_GETPRIORITY = 4096
+ SYS_SETPRIORITY = 4097
+ SYS_PROFIL = 4098
+ SYS_STATFS = 4099
+ SYS_FSTATFS = 4100
+ SYS_IOPERM = 4101
+ SYS_SOCKETCALL = 4102
+ SYS_SYSLOG = 4103
+ SYS_SETITIMER = 4104
+ SYS_GETITIMER = 4105
+ SYS_STAT = 4106
+ SYS_LSTAT = 4107
+ SYS_FSTAT = 4108
+ SYS_UNUSED109 = 4109
+ SYS_IOPL = 4110
+ SYS_VHANGUP = 4111
+ SYS_IDLE = 4112
+ SYS_VM86 = 4113
+ SYS_WAIT4 = 4114
+ SYS_SWAPOFF = 4115
+ SYS_SYSINFO = 4116
+ SYS_IPC = 4117
+ SYS_FSYNC = 4118
+ SYS_SIGRETURN = 4119
+ SYS_CLONE = 4120
+ SYS_SETDOMAINNAME = 4121
+ SYS_UNAME = 4122
+ SYS_MODIFY_LDT = 4123
+ SYS_ADJTIMEX = 4124
+ SYS_MPROTECT = 4125
+ SYS_SIGPROCMASK = 4126
+ SYS_CREATE_MODULE = 4127
+ SYS_INIT_MODULE = 4128
+ SYS_DELETE_MODULE = 4129
+ SYS_GET_KERNEL_SYMS = 4130
+ SYS_QUOTACTL = 4131
+ SYS_GETPGID = 4132
+ SYS_FCHDIR = 4133
+ SYS_BDFLUSH = 4134
+ SYS_SYSFS = 4135
+ SYS_PERSONALITY = 4136
+ SYS_AFS_SYSCALL = 4137
+ SYS_SETFSUID = 4138
+ SYS_SETFSGID = 4139
+ SYS__LLSEEK = 4140
+ SYS_GETDENTS = 4141
+ SYS__NEWSELECT = 4142
+ SYS_FLOCK = 4143
+ SYS_MSYNC = 4144
+ SYS_READV = 4145
+ SYS_WRITEV = 4146
+ SYS_CACHEFLUSH = 4147
+ SYS_CACHECTL = 4148
+ SYS_SYSMIPS = 4149
+ SYS_UNUSED150 = 4150
+ SYS_GETSID = 4151
+ SYS_FDATASYNC = 4152
+ SYS__SYSCTL = 4153
+ SYS_MLOCK = 4154
+ SYS_MUNLOCK = 4155
+ SYS_MLOCKALL = 4156
+ SYS_MUNLOCKALL = 4157
+ SYS_SCHED_SETPARAM = 4158
+ SYS_SCHED_GETPARAM = 4159
+ SYS_SCHED_SETSCHEDULER = 4160
+ SYS_SCHED_GETSCHEDULER = 4161
+ SYS_SCHED_YIELD = 4162
+ SYS_SCHED_GET_PRIORITY_MAX = 4163
+ SYS_SCHED_GET_PRIORITY_MIN = 4164
+ SYS_SCHED_RR_GET_INTERVAL = 4165
+ SYS_NANOSLEEP = 4166
+ SYS_MREMAP = 4167
+ SYS_ACCEPT = 4168
+ SYS_BIND = 4169
+ SYS_CONNECT = 4170
+ SYS_GETPEERNAME = 4171
+ SYS_GETSOCKNAME = 4172
+ SYS_GETSOCKOPT = 4173
+ SYS_LISTEN = 4174
+ SYS_RECV = 4175
+ SYS_RECVFROM = 4176
+ SYS_RECVMSG = 4177
+ SYS_SEND = 4178
+ SYS_SENDMSG = 4179
+ SYS_SENDTO = 4180
+ SYS_SETSOCKOPT = 4181
+ SYS_SHUTDOWN = 4182
+ SYS_SOCKET = 4183
+ SYS_SOCKETPAIR = 4184
+ SYS_SETRESUID = 4185
+ SYS_GETRESUID = 4186
+ SYS_QUERY_MODULE = 4187
+ SYS_POLL = 4188
+ SYS_NFSSERVCTL = 4189
+ SYS_SETRESGID = 4190
+ SYS_GETRESGID = 4191
+ SYS_PRCTL = 4192
+ SYS_RT_SIGRETURN = 4193
+ SYS_RT_SIGACTION = 4194
+ SYS_RT_SIGPROCMASK = 4195
+ SYS_RT_SIGPENDING = 4196
+ SYS_RT_SIGTIMEDWAIT = 4197
+ SYS_RT_SIGQUEUEINFO = 4198
+ SYS_RT_SIGSUSPEND = 4199
+ SYS_PREAD64 = 4200
+ SYS_PWRITE64 = 4201
+ SYS_CHOWN = 4202
+ SYS_GETCWD = 4203
+ SYS_CAPGET = 4204
+ SYS_CAPSET = 4205
+ SYS_SIGALTSTACK = 4206
+ SYS_SENDFILE = 4207
+ SYS_GETPMSG = 4208
+ SYS_PUTPMSG = 4209
+ SYS_MMAP2 = 4210
+ SYS_TRUNCATE64 = 4211
+ SYS_FTRUNCATE64 = 4212
+ SYS_STAT64 = 4213
+ SYS_LSTAT64 = 4214
+ SYS_FSTAT64 = 4215
+ SYS_PIVOT_ROOT = 4216
+ SYS_MINCORE = 4217
+ SYS_MADVISE = 4218
+ SYS_GETDENTS64 = 4219
+ SYS_FCNTL64 = 4220
+ SYS_RESERVED221 = 4221
+ SYS_GETTID = 4222
+ SYS_READAHEAD = 4223
+ SYS_SETXATTR = 4224
+ SYS_LSETXATTR = 4225
+ SYS_FSETXATTR = 4226
+ SYS_GETXATTR = 4227
+ SYS_LGETXATTR = 4228
+ SYS_FGETXATTR = 4229
+ SYS_LISTXATTR = 4230
+ SYS_LLISTXATTR = 4231
+ SYS_FLISTXATTR = 4232
+ SYS_REMOVEXATTR = 4233
+ SYS_LREMOVEXATTR = 4234
+ SYS_FREMOVEXATTR = 4235
+ SYS_TKILL = 4236
+ SYS_SENDFILE64 = 4237
+ SYS_FUTEX = 4238
+ SYS_SCHED_SETAFFINITY = 4239
+ SYS_SCHED_GETAFFINITY = 4240
+ SYS_IO_SETUP = 4241
+ SYS_IO_DESTROY = 4242
+ SYS_IO_GETEVENTS = 4243
+ SYS_IO_SUBMIT = 4244
+ SYS_IO_CANCEL = 4245
+ SYS_EXIT_GROUP = 4246
+ SYS_LOOKUP_DCOOKIE = 4247
+ SYS_EPOLL_CREATE = 4248
+ SYS_EPOLL_CTL = 4249
+ SYS_EPOLL_WAIT = 4250
+ SYS_REMAP_FILE_PAGES = 4251
+ SYS_SET_TID_ADDRESS = 4252
+ SYS_RESTART_SYSCALL = 4253
+ SYS_FADVISE64 = 4254
+ SYS_STATFS64 = 4255
+ SYS_FSTATFS64 = 4256
+ SYS_TIMER_CREATE = 4257
+ SYS_TIMER_SETTIME = 4258
+ SYS_TIMER_GETTIME = 4259
+ SYS_TIMER_GETOVERRUN = 4260
+ SYS_TIMER_DELETE = 4261
+ SYS_CLOCK_SETTIME = 4262
+ SYS_CLOCK_GETTIME = 4263
+ SYS_CLOCK_GETRES = 4264
+ SYS_CLOCK_NANOSLEEP = 4265
+ SYS_TGKILL = 4266
+ SYS_UTIMES = 4267
+ SYS_MBIND = 4268
+ SYS_GET_MEMPOLICY = 4269
+ SYS_SET_MEMPOLICY = 4270
+ SYS_MQ_OPEN = 4271
+ SYS_MQ_UNLINK = 4272
+ SYS_MQ_TIMEDSEND = 4273
+ SYS_MQ_TIMEDRECEIVE = 4274
+ SYS_MQ_NOTIFY = 4275
+ SYS_MQ_GETSETATTR = 4276
+ SYS_VSERVER = 4277
+ SYS_WAITID = 4278
+ SYS_ADD_KEY = 4280
+ SYS_REQUEST_KEY = 4281
+ SYS_KEYCTL = 4282
+ SYS_SET_THREAD_AREA = 4283
+ SYS_INOTIFY_INIT = 4284
+ SYS_INOTIFY_ADD_WATCH = 4285
+ SYS_INOTIFY_RM_WATCH = 4286
+ SYS_MIGRATE_PAGES = 4287
+ SYS_OPENAT = 4288
+ SYS_MKDIRAT = 4289
+ SYS_MKNODAT = 4290
+ SYS_FCHOWNAT = 4291
+ SYS_FUTIMESAT = 4292
+ SYS_FSTATAT64 = 4293
+ SYS_UNLINKAT = 4294
+ SYS_RENAMEAT = 4295
+ SYS_LINKAT = 4296
+ SYS_SYMLINKAT = 4297
+ SYS_READLINKAT = 4298
+ SYS_FCHMODAT = 4299
+ SYS_FACCESSAT = 4300
+ SYS_PSELECT6 = 4301
+ SYS_PPOLL = 4302
+ SYS_UNSHARE = 4303
+ SYS_SPLICE = 4304
+ SYS_SYNC_FILE_RANGE = 4305
+ SYS_TEE = 4306
+ SYS_VMSPLICE = 4307
+ SYS_MOVE_PAGES = 4308
+ SYS_SET_ROBUST_LIST = 4309
+ SYS_GET_ROBUST_LIST = 4310
+ SYS_KEXEC_LOAD = 4311
+ SYS_GETCPU = 4312
+ SYS_EPOLL_PWAIT = 4313
+ SYS_IOPRIO_SET = 4314
+ SYS_IOPRIO_GET = 4315
+ SYS_UTIMENSAT = 4316
+ SYS_SIGNALFD = 4317
+ SYS_TIMERFD = 4318
+ SYS_EVENTFD = 4319
+ SYS_FALLOCATE = 4320
+ SYS_TIMERFD_CREATE = 4321
+ SYS_TIMERFD_GETTIME = 4322
+ SYS_TIMERFD_SETTIME = 4323
+ SYS_SIGNALFD4 = 4324
+ SYS_EVENTFD2 = 4325
+ SYS_EPOLL_CREATE1 = 4326
+ SYS_DUP3 = 4327
+ SYS_PIPE2 = 4328
+ SYS_INOTIFY_INIT1 = 4329
+ SYS_PREADV = 4330
+ SYS_PWRITEV = 4331
+ SYS_RT_TGSIGQUEUEINFO = 4332
+ SYS_PERF_EVENT_OPEN = 4333
+ SYS_ACCEPT4 = 4334
+ SYS_RECVMMSG = 4335
+ SYS_FANOTIFY_INIT = 4336
+ SYS_FANOTIFY_MARK = 4337
+ SYS_PRLIMIT64 = 4338
+ SYS_NAME_TO_HANDLE_AT = 4339
+ SYS_OPEN_BY_HANDLE_AT = 4340
+ SYS_CLOCK_ADJTIME = 4341
+ SYS_SYNCFS = 4342
+ SYS_SENDMMSG = 4343
+ SYS_SETNS = 4344
+ SYS_PROCESS_VM_READV = 4345
+ SYS_PROCESS_VM_WRITEV = 4346
+ SYS_KCMP = 4347
+ SYS_FINIT_MODULE = 4348
+ SYS_SCHED_SETATTR = 4349
+ SYS_SCHED_GETATTR = 4350
+ SYS_RENAMEAT2 = 4351
+ SYS_SECCOMP = 4352
+ SYS_GETRANDOM = 4353
+ SYS_MEMFD_CREATE = 4354
+ SYS_BPF = 4355
+ SYS_EXECVEAT = 4356
+ SYS_USERFAULTFD = 4357
+ SYS_MEMBARRIER = 4358
+ SYS_MLOCK2 = 4359
+ SYS_COPY_FILE_RANGE = 4360
+ SYS_PREADV2 = 4361
+ SYS_PWRITEV2 = 4362
+ SYS_PKEY_MPROTECT = 4363
+ SYS_PKEY_ALLOC = 4364
+ SYS_PKEY_FREE = 4365
+ SYS_STATX = 4366
+ SYS_RSEQ = 4367
+ SYS_IO_PGETEVENTS = 4368
+ SYS_SEMGET = 4393
+ SYS_SEMCTL = 4394
+ SYS_SHMGET = 4395
+ SYS_SHMCTL = 4396
+ SYS_SHMAT = 4397
+ SYS_SHMDT = 4398
+ SYS_MSGGET = 4399
+ SYS_MSGSND = 4400
+ SYS_MSGRCV = 4401
+ SYS_MSGCTL = 4402
+ SYS_CLOCK_GETTIME64 = 4403
+ SYS_CLOCK_SETTIME64 = 4404
+ SYS_CLOCK_ADJTIME64 = 4405
+ SYS_CLOCK_GETRES_TIME64 = 4406
+ SYS_CLOCK_NANOSLEEP_TIME64 = 4407
+ SYS_TIMER_GETTIME64 = 4408
+ SYS_TIMER_SETTIME64 = 4409
+ SYS_TIMERFD_GETTIME64 = 4410
+ SYS_TIMERFD_SETTIME64 = 4411
+ SYS_UTIMENSAT_TIME64 = 4412
+ SYS_PSELECT6_TIME64 = 4413
+ SYS_PPOLL_TIME64 = 4414
+ SYS_IO_PGETEVENTS_TIME64 = 4416
+ SYS_RECVMMSG_TIME64 = 4417
+ SYS_MQ_TIMEDSEND_TIME64 = 4418
+ SYS_MQ_TIMEDRECEIVE_TIME64 = 4419
+ SYS_SEMTIMEDOP_TIME64 = 4420
+ SYS_RT_SIGTIMEDWAIT_TIME64 = 4421
+ SYS_FUTEX_TIME64 = 4422
+ SYS_SCHED_RR_GET_INTERVAL_TIME64 = 4423
+ SYS_PIDFD_SEND_SIGNAL = 4424
+ SYS_IO_URING_SETUP = 4425
+ SYS_IO_URING_ENTER = 4426
+ SYS_IO_URING_REGISTER = 4427
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
index 40164cacd..57ec82aac 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
@@ -334,4 +334,8 @@ const (
SYS_STATX = 5326
SYS_RSEQ = 5327
SYS_IO_PGETEVENTS = 5328
+ SYS_PIDFD_SEND_SIGNAL = 5424
+ SYS_IO_URING_SETUP = 5425
+ SYS_IO_URING_ENTER = 5426
+ SYS_IO_URING_REGISTER = 5427
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
index 8a909738b..825a3e3b0 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
@@ -334,4 +334,8 @@ const (
SYS_STATX = 5326
SYS_RSEQ = 5327
SYS_IO_PGETEVENTS = 5328
+ SYS_PIDFD_SEND_SIGNAL = 5424
+ SYS_IO_URING_SETUP = 5425
+ SYS_IO_URING_ENTER = 5426
+ SYS_IO_URING_REGISTER = 5427
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
index 8d7818422..f152dfdd0 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
@@ -6,372 +6,406 @@
package unix
const (
- SYS_SYSCALL = 4000
- SYS_EXIT = 4001
- SYS_FORK = 4002
- SYS_READ = 4003
- SYS_WRITE = 4004
- SYS_OPEN = 4005
- SYS_CLOSE = 4006
- SYS_WAITPID = 4007
- SYS_CREAT = 4008
- SYS_LINK = 4009
- SYS_UNLINK = 4010
- SYS_EXECVE = 4011
- SYS_CHDIR = 4012
- SYS_TIME = 4013
- SYS_MKNOD = 4014
- SYS_CHMOD = 4015
- SYS_LCHOWN = 4016
- SYS_BREAK = 4017
- SYS_UNUSED18 = 4018
- SYS_LSEEK = 4019
- SYS_GETPID = 4020
- SYS_MOUNT = 4021
- SYS_UMOUNT = 4022
- SYS_SETUID = 4023
- SYS_GETUID = 4024
- SYS_STIME = 4025
- SYS_PTRACE = 4026
- SYS_ALARM = 4027
- SYS_UNUSED28 = 4028
- SYS_PAUSE = 4029
- SYS_UTIME = 4030
- SYS_STTY = 4031
- SYS_GTTY = 4032
- SYS_ACCESS = 4033
- SYS_NICE = 4034
- SYS_FTIME = 4035
- SYS_SYNC = 4036
- SYS_KILL = 4037
- SYS_RENAME = 4038
- SYS_MKDIR = 4039
- SYS_RMDIR = 4040
- SYS_DUP = 4041
- SYS_PIPE = 4042
- SYS_TIMES = 4043
- SYS_PROF = 4044
- SYS_BRK = 4045
- SYS_SETGID = 4046
- SYS_GETGID = 4047
- SYS_SIGNAL = 4048
- SYS_GETEUID = 4049
- SYS_GETEGID = 4050
- SYS_ACCT = 4051
- SYS_UMOUNT2 = 4052
- SYS_LOCK = 4053
- SYS_IOCTL = 4054
- SYS_FCNTL = 4055
- SYS_MPX = 4056
- SYS_SETPGID = 4057
- SYS_ULIMIT = 4058
- SYS_UNUSED59 = 4059
- SYS_UMASK = 4060
- SYS_CHROOT = 4061
- SYS_USTAT = 4062
- SYS_DUP2 = 4063
- SYS_GETPPID = 4064
- SYS_GETPGRP = 4065
- SYS_SETSID = 4066
- SYS_SIGACTION = 4067
- SYS_SGETMASK = 4068
- SYS_SSETMASK = 4069
- SYS_SETREUID = 4070
- SYS_SETREGID = 4071
- SYS_SIGSUSPEND = 4072
- SYS_SIGPENDING = 4073
- SYS_SETHOSTNAME = 4074
- SYS_SETRLIMIT = 4075
- SYS_GETRLIMIT = 4076
- SYS_GETRUSAGE = 4077
- SYS_GETTIMEOFDAY = 4078
- SYS_SETTIMEOFDAY = 4079
- SYS_GETGROUPS = 4080
- SYS_SETGROUPS = 4081
- SYS_RESERVED82 = 4082
- SYS_SYMLINK = 4083
- SYS_UNUSED84 = 4084
- SYS_READLINK = 4085
- SYS_USELIB = 4086
- SYS_SWAPON = 4087
- SYS_REBOOT = 4088
- SYS_READDIR = 4089
- SYS_MMAP = 4090
- SYS_MUNMAP = 4091
- SYS_TRUNCATE = 4092
- SYS_FTRUNCATE = 4093
- SYS_FCHMOD = 4094
- SYS_FCHOWN = 4095
- SYS_GETPRIORITY = 4096
- SYS_SETPRIORITY = 4097
- SYS_PROFIL = 4098
- SYS_STATFS = 4099
- SYS_FSTATFS = 4100
- SYS_IOPERM = 4101
- SYS_SOCKETCALL = 4102
- SYS_SYSLOG = 4103
- SYS_SETITIMER = 4104
- SYS_GETITIMER = 4105
- SYS_STAT = 4106
- SYS_LSTAT = 4107
- SYS_FSTAT = 4108
- SYS_UNUSED109 = 4109
- SYS_IOPL = 4110
- SYS_VHANGUP = 4111
- SYS_IDLE = 4112
- SYS_VM86 = 4113
- SYS_WAIT4 = 4114
- SYS_SWAPOFF = 4115
- SYS_SYSINFO = 4116
- SYS_IPC = 4117
- SYS_FSYNC = 4118
- SYS_SIGRETURN = 4119
- SYS_CLONE = 4120
- SYS_SETDOMAINNAME = 4121
- SYS_UNAME = 4122
- SYS_MODIFY_LDT = 4123
- SYS_ADJTIMEX = 4124
- SYS_MPROTECT = 4125
- SYS_SIGPROCMASK = 4126
- SYS_CREATE_MODULE = 4127
- SYS_INIT_MODULE = 4128
- SYS_DELETE_MODULE = 4129
- SYS_GET_KERNEL_SYMS = 4130
- SYS_QUOTACTL = 4131
- SYS_GETPGID = 4132
- SYS_FCHDIR = 4133
- SYS_BDFLUSH = 4134
- SYS_SYSFS = 4135
- SYS_PERSONALITY = 4136
- SYS_AFS_SYSCALL = 4137
- SYS_SETFSUID = 4138
- SYS_SETFSGID = 4139
- SYS__LLSEEK = 4140
- SYS_GETDENTS = 4141
- SYS__NEWSELECT = 4142
- SYS_FLOCK = 4143
- SYS_MSYNC = 4144
- SYS_READV = 4145
- SYS_WRITEV = 4146
- SYS_CACHEFLUSH = 4147
- SYS_CACHECTL = 4148
- SYS_SYSMIPS = 4149
- SYS_UNUSED150 = 4150
- SYS_GETSID = 4151
- SYS_FDATASYNC = 4152
- SYS__SYSCTL = 4153
- SYS_MLOCK = 4154
- SYS_MUNLOCK = 4155
- SYS_MLOCKALL = 4156
- SYS_MUNLOCKALL = 4157
- SYS_SCHED_SETPARAM = 4158
- SYS_SCHED_GETPARAM = 4159
- SYS_SCHED_SETSCHEDULER = 4160
- SYS_SCHED_GETSCHEDULER = 4161
- SYS_SCHED_YIELD = 4162
- SYS_SCHED_GET_PRIORITY_MAX = 4163
- SYS_SCHED_GET_PRIORITY_MIN = 4164
- SYS_SCHED_RR_GET_INTERVAL = 4165
- SYS_NANOSLEEP = 4166
- SYS_MREMAP = 4167
- SYS_ACCEPT = 4168
- SYS_BIND = 4169
- SYS_CONNECT = 4170
- SYS_GETPEERNAME = 4171
- SYS_GETSOCKNAME = 4172
- SYS_GETSOCKOPT = 4173
- SYS_LISTEN = 4174
- SYS_RECV = 4175
- SYS_RECVFROM = 4176
- SYS_RECVMSG = 4177
- SYS_SEND = 4178
- SYS_SENDMSG = 4179
- SYS_SENDTO = 4180
- SYS_SETSOCKOPT = 4181
- SYS_SHUTDOWN = 4182
- SYS_SOCKET = 4183
- SYS_SOCKETPAIR = 4184
- SYS_SETRESUID = 4185
- SYS_GETRESUID = 4186
- SYS_QUERY_MODULE = 4187
- SYS_POLL = 4188
- SYS_NFSSERVCTL = 4189
- SYS_SETRESGID = 4190
- SYS_GETRESGID = 4191
- SYS_PRCTL = 4192
- SYS_RT_SIGRETURN = 4193
- SYS_RT_SIGACTION = 4194
- SYS_RT_SIGPROCMASK = 4195
- SYS_RT_SIGPENDING = 4196
- SYS_RT_SIGTIMEDWAIT = 4197
- SYS_RT_SIGQUEUEINFO = 4198
- SYS_RT_SIGSUSPEND = 4199
- SYS_PREAD64 = 4200
- SYS_PWRITE64 = 4201
- SYS_CHOWN = 4202
- SYS_GETCWD = 4203
- SYS_CAPGET = 4204
- SYS_CAPSET = 4205
- SYS_SIGALTSTACK = 4206
- SYS_SENDFILE = 4207
- SYS_GETPMSG = 4208
- SYS_PUTPMSG = 4209
- SYS_MMAP2 = 4210
- SYS_TRUNCATE64 = 4211
- SYS_FTRUNCATE64 = 4212
- SYS_STAT64 = 4213
- SYS_LSTAT64 = 4214
- SYS_FSTAT64 = 4215
- SYS_PIVOT_ROOT = 4216
- SYS_MINCORE = 4217
- SYS_MADVISE = 4218
- SYS_GETDENTS64 = 4219
- SYS_FCNTL64 = 4220
- SYS_RESERVED221 = 4221
- SYS_GETTID = 4222
- SYS_READAHEAD = 4223
- SYS_SETXATTR = 4224
- SYS_LSETXATTR = 4225
- SYS_FSETXATTR = 4226
- SYS_GETXATTR = 4227
- SYS_LGETXATTR = 4228
- SYS_FGETXATTR = 4229
- SYS_LISTXATTR = 4230
- SYS_LLISTXATTR = 4231
- SYS_FLISTXATTR = 4232
- SYS_REMOVEXATTR = 4233
- SYS_LREMOVEXATTR = 4234
- SYS_FREMOVEXATTR = 4235
- SYS_TKILL = 4236
- SYS_SENDFILE64 = 4237
- SYS_FUTEX = 4238
- SYS_SCHED_SETAFFINITY = 4239
- SYS_SCHED_GETAFFINITY = 4240
- SYS_IO_SETUP = 4241
- SYS_IO_DESTROY = 4242
- SYS_IO_GETEVENTS = 4243
- SYS_IO_SUBMIT = 4244
- SYS_IO_CANCEL = 4245
- SYS_EXIT_GROUP = 4246
- SYS_LOOKUP_DCOOKIE = 4247
- SYS_EPOLL_CREATE = 4248
- SYS_EPOLL_CTL = 4249
- SYS_EPOLL_WAIT = 4250
- SYS_REMAP_FILE_PAGES = 4251
- SYS_SET_TID_ADDRESS = 4252
- SYS_RESTART_SYSCALL = 4253
- SYS_FADVISE64 = 4254
- SYS_STATFS64 = 4255
- SYS_FSTATFS64 = 4256
- SYS_TIMER_CREATE = 4257
- SYS_TIMER_SETTIME = 4258
- SYS_TIMER_GETTIME = 4259
- SYS_TIMER_GETOVERRUN = 4260
- SYS_TIMER_DELETE = 4261
- SYS_CLOCK_SETTIME = 4262
- SYS_CLOCK_GETTIME = 4263
- SYS_CLOCK_GETRES = 4264
- SYS_CLOCK_NANOSLEEP = 4265
- SYS_TGKILL = 4266
- SYS_UTIMES = 4267
- SYS_MBIND = 4268
- SYS_GET_MEMPOLICY = 4269
- SYS_SET_MEMPOLICY = 4270
- SYS_MQ_OPEN = 4271
- SYS_MQ_UNLINK = 4272
- SYS_MQ_TIMEDSEND = 4273
- SYS_MQ_TIMEDRECEIVE = 4274
- SYS_MQ_NOTIFY = 4275
- SYS_MQ_GETSETATTR = 4276
- SYS_VSERVER = 4277
- SYS_WAITID = 4278
- SYS_ADD_KEY = 4280
- SYS_REQUEST_KEY = 4281
- SYS_KEYCTL = 4282
- SYS_SET_THREAD_AREA = 4283
- SYS_INOTIFY_INIT = 4284
- SYS_INOTIFY_ADD_WATCH = 4285
- SYS_INOTIFY_RM_WATCH = 4286
- SYS_MIGRATE_PAGES = 4287
- SYS_OPENAT = 4288
- SYS_MKDIRAT = 4289
- SYS_MKNODAT = 4290
- SYS_FCHOWNAT = 4291
- SYS_FUTIMESAT = 4292
- SYS_FSTATAT64 = 4293
- SYS_UNLINKAT = 4294
- SYS_RENAMEAT = 4295
- SYS_LINKAT = 4296
- SYS_SYMLINKAT = 4297
- SYS_READLINKAT = 4298
- SYS_FCHMODAT = 4299
- SYS_FACCESSAT = 4300
- SYS_PSELECT6 = 4301
- SYS_PPOLL = 4302
- SYS_UNSHARE = 4303
- SYS_SPLICE = 4304
- SYS_SYNC_FILE_RANGE = 4305
- SYS_TEE = 4306
- SYS_VMSPLICE = 4307
- SYS_MOVE_PAGES = 4308
- SYS_SET_ROBUST_LIST = 4309
- SYS_GET_ROBUST_LIST = 4310
- SYS_KEXEC_LOAD = 4311
- SYS_GETCPU = 4312
- SYS_EPOLL_PWAIT = 4313
- SYS_IOPRIO_SET = 4314
- SYS_IOPRIO_GET = 4315
- SYS_UTIMENSAT = 4316
- SYS_SIGNALFD = 4317
- SYS_TIMERFD = 4318
- SYS_EVENTFD = 4319
- SYS_FALLOCATE = 4320
- SYS_TIMERFD_CREATE = 4321
- SYS_TIMERFD_GETTIME = 4322
- SYS_TIMERFD_SETTIME = 4323
- SYS_SIGNALFD4 = 4324
- SYS_EVENTFD2 = 4325
- SYS_EPOLL_CREATE1 = 4326
- SYS_DUP3 = 4327
- SYS_PIPE2 = 4328
- SYS_INOTIFY_INIT1 = 4329
- SYS_PREADV = 4330
- SYS_PWRITEV = 4331
- SYS_RT_TGSIGQUEUEINFO = 4332
- SYS_PERF_EVENT_OPEN = 4333
- SYS_ACCEPT4 = 4334
- SYS_RECVMMSG = 4335
- SYS_FANOTIFY_INIT = 4336
- SYS_FANOTIFY_MARK = 4337
- SYS_PRLIMIT64 = 4338
- SYS_NAME_TO_HANDLE_AT = 4339
- SYS_OPEN_BY_HANDLE_AT = 4340
- SYS_CLOCK_ADJTIME = 4341
- SYS_SYNCFS = 4342
- SYS_SENDMMSG = 4343
- SYS_SETNS = 4344
- SYS_PROCESS_VM_READV = 4345
- SYS_PROCESS_VM_WRITEV = 4346
- SYS_KCMP = 4347
- SYS_FINIT_MODULE = 4348
- SYS_SCHED_SETATTR = 4349
- SYS_SCHED_GETATTR = 4350
- SYS_RENAMEAT2 = 4351
- SYS_SECCOMP = 4352
- SYS_GETRANDOM = 4353
- SYS_MEMFD_CREATE = 4354
- SYS_BPF = 4355
- SYS_EXECVEAT = 4356
- SYS_USERFAULTFD = 4357
- SYS_MEMBARRIER = 4358
- SYS_MLOCK2 = 4359
- SYS_COPY_FILE_RANGE = 4360
- SYS_PREADV2 = 4361
- SYS_PWRITEV2 = 4362
- SYS_PKEY_MPROTECT = 4363
- SYS_PKEY_ALLOC = 4364
- SYS_PKEY_FREE = 4365
- SYS_STATX = 4366
- SYS_RSEQ = 4367
- SYS_IO_PGETEVENTS = 4368
+ SYS_SYSCALL = 4000
+ SYS_EXIT = 4001
+ SYS_FORK = 4002
+ SYS_READ = 4003
+ SYS_WRITE = 4004
+ SYS_OPEN = 4005
+ SYS_CLOSE = 4006
+ SYS_WAITPID = 4007
+ SYS_CREAT = 4008
+ SYS_LINK = 4009
+ SYS_UNLINK = 4010
+ SYS_EXECVE = 4011
+ SYS_CHDIR = 4012
+ SYS_TIME = 4013
+ SYS_MKNOD = 4014
+ SYS_CHMOD = 4015
+ SYS_LCHOWN = 4016
+ SYS_BREAK = 4017
+ SYS_UNUSED18 = 4018
+ SYS_LSEEK = 4019
+ SYS_GETPID = 4020
+ SYS_MOUNT = 4021
+ SYS_UMOUNT = 4022
+ SYS_SETUID = 4023
+ SYS_GETUID = 4024
+ SYS_STIME = 4025
+ SYS_PTRACE = 4026
+ SYS_ALARM = 4027
+ SYS_UNUSED28 = 4028
+ SYS_PAUSE = 4029
+ SYS_UTIME = 4030
+ SYS_STTY = 4031
+ SYS_GTTY = 4032
+ SYS_ACCESS = 4033
+ SYS_NICE = 4034
+ SYS_FTIME = 4035
+ SYS_SYNC = 4036
+ SYS_KILL = 4037
+ SYS_RENAME = 4038
+ SYS_MKDIR = 4039
+ SYS_RMDIR = 4040
+ SYS_DUP = 4041
+ SYS_PIPE = 4042
+ SYS_TIMES = 4043
+ SYS_PROF = 4044
+ SYS_BRK = 4045
+ SYS_SETGID = 4046
+ SYS_GETGID = 4047
+ SYS_SIGNAL = 4048
+ SYS_GETEUID = 4049
+ SYS_GETEGID = 4050
+ SYS_ACCT = 4051
+ SYS_UMOUNT2 = 4052
+ SYS_LOCK = 4053
+ SYS_IOCTL = 4054
+ SYS_FCNTL = 4055
+ SYS_MPX = 4056
+ SYS_SETPGID = 4057
+ SYS_ULIMIT = 4058
+ SYS_UNUSED59 = 4059
+ SYS_UMASK = 4060
+ SYS_CHROOT = 4061
+ SYS_USTAT = 4062
+ SYS_DUP2 = 4063
+ SYS_GETPPID = 4064
+ SYS_GETPGRP = 4065
+ SYS_SETSID = 4066
+ SYS_SIGACTION = 4067
+ SYS_SGETMASK = 4068
+ SYS_SSETMASK = 4069
+ SYS_SETREUID = 4070
+ SYS_SETREGID = 4071
+ SYS_SIGSUSPEND = 4072
+ SYS_SIGPENDING = 4073
+ SYS_SETHOSTNAME = 4074
+ SYS_SETRLIMIT = 4075
+ SYS_GETRLIMIT = 4076
+ SYS_GETRUSAGE = 4077
+ SYS_GETTIMEOFDAY = 4078
+ SYS_SETTIMEOFDAY = 4079
+ SYS_GETGROUPS = 4080
+ SYS_SETGROUPS = 4081
+ SYS_RESERVED82 = 4082
+ SYS_SYMLINK = 4083
+ SYS_UNUSED84 = 4084
+ SYS_READLINK = 4085
+ SYS_USELIB = 4086
+ SYS_SWAPON = 4087
+ SYS_REBOOT = 4088
+ SYS_READDIR = 4089
+ SYS_MMAP = 4090
+ SYS_MUNMAP = 4091
+ SYS_TRUNCATE = 4092
+ SYS_FTRUNCATE = 4093
+ SYS_FCHMOD = 4094
+ SYS_FCHOWN = 4095
+ SYS_GETPRIORITY = 4096
+ SYS_SETPRIORITY = 4097
+ SYS_PROFIL = 4098
+ SYS_STATFS = 4099
+ SYS_FSTATFS = 4100
+ SYS_IOPERM = 4101
+ SYS_SOCKETCALL = 4102
+ SYS_SYSLOG = 4103
+ SYS_SETITIMER = 4104
+ SYS_GETITIMER = 4105
+ SYS_STAT = 4106
+ SYS_LSTAT = 4107
+ SYS_FSTAT = 4108
+ SYS_UNUSED109 = 4109
+ SYS_IOPL = 4110
+ SYS_VHANGUP = 4111
+ SYS_IDLE = 4112
+ SYS_VM86 = 4113
+ SYS_WAIT4 = 4114
+ SYS_SWAPOFF = 4115
+ SYS_SYSINFO = 4116
+ SYS_IPC = 4117
+ SYS_FSYNC = 4118
+ SYS_SIGRETURN = 4119
+ SYS_CLONE = 4120
+ SYS_SETDOMAINNAME = 4121
+ SYS_UNAME = 4122
+ SYS_MODIFY_LDT = 4123
+ SYS_ADJTIMEX = 4124
+ SYS_MPROTECT = 4125
+ SYS_SIGPROCMASK = 4126
+ SYS_CREATE_MODULE = 4127
+ SYS_INIT_MODULE = 4128
+ SYS_DELETE_MODULE = 4129
+ SYS_GET_KERNEL_SYMS = 4130
+ SYS_QUOTACTL = 4131
+ SYS_GETPGID = 4132
+ SYS_FCHDIR = 4133
+ SYS_BDFLUSH = 4134
+ SYS_SYSFS = 4135
+ SYS_PERSONALITY = 4136
+ SYS_AFS_SYSCALL = 4137
+ SYS_SETFSUID = 4138
+ SYS_SETFSGID = 4139
+ SYS__LLSEEK = 4140
+ SYS_GETDENTS = 4141
+ SYS__NEWSELECT = 4142
+ SYS_FLOCK = 4143
+ SYS_MSYNC = 4144
+ SYS_READV = 4145
+ SYS_WRITEV = 4146
+ SYS_CACHEFLUSH = 4147
+ SYS_CACHECTL = 4148
+ SYS_SYSMIPS = 4149
+ SYS_UNUSED150 = 4150
+ SYS_GETSID = 4151
+ SYS_FDATASYNC = 4152
+ SYS__SYSCTL = 4153
+ SYS_MLOCK = 4154
+ SYS_MUNLOCK = 4155
+ SYS_MLOCKALL = 4156
+ SYS_MUNLOCKALL = 4157
+ SYS_SCHED_SETPARAM = 4158
+ SYS_SCHED_GETPARAM = 4159
+ SYS_SCHED_SETSCHEDULER = 4160
+ SYS_SCHED_GETSCHEDULER = 4161
+ SYS_SCHED_YIELD = 4162
+ SYS_SCHED_GET_PRIORITY_MAX = 4163
+ SYS_SCHED_GET_PRIORITY_MIN = 4164
+ SYS_SCHED_RR_GET_INTERVAL = 4165
+ SYS_NANOSLEEP = 4166
+ SYS_MREMAP = 4167
+ SYS_ACCEPT = 4168
+ SYS_BIND = 4169
+ SYS_CONNECT = 4170
+ SYS_GETPEERNAME = 4171
+ SYS_GETSOCKNAME = 4172
+ SYS_GETSOCKOPT = 4173
+ SYS_LISTEN = 4174
+ SYS_RECV = 4175
+ SYS_RECVFROM = 4176
+ SYS_RECVMSG = 4177
+ SYS_SEND = 4178
+ SYS_SENDMSG = 4179
+ SYS_SENDTO = 4180
+ SYS_SETSOCKOPT = 4181
+ SYS_SHUTDOWN = 4182
+ SYS_SOCKET = 4183
+ SYS_SOCKETPAIR = 4184
+ SYS_SETRESUID = 4185
+ SYS_GETRESUID = 4186
+ SYS_QUERY_MODULE = 4187
+ SYS_POLL = 4188
+ SYS_NFSSERVCTL = 4189
+ SYS_SETRESGID = 4190
+ SYS_GETRESGID = 4191
+ SYS_PRCTL = 4192
+ SYS_RT_SIGRETURN = 4193
+ SYS_RT_SIGACTION = 4194
+ SYS_RT_SIGPROCMASK = 4195
+ SYS_RT_SIGPENDING = 4196
+ SYS_RT_SIGTIMEDWAIT = 4197
+ SYS_RT_SIGQUEUEINFO = 4198
+ SYS_RT_SIGSUSPEND = 4199
+ SYS_PREAD64 = 4200
+ SYS_PWRITE64 = 4201
+ SYS_CHOWN = 4202
+ SYS_GETCWD = 4203
+ SYS_CAPGET = 4204
+ SYS_CAPSET = 4205
+ SYS_SIGALTSTACK = 4206
+ SYS_SENDFILE = 4207
+ SYS_GETPMSG = 4208
+ SYS_PUTPMSG = 4209
+ SYS_MMAP2 = 4210
+ SYS_TRUNCATE64 = 4211
+ SYS_FTRUNCATE64 = 4212
+ SYS_STAT64 = 4213
+ SYS_LSTAT64 = 4214
+ SYS_FSTAT64 = 4215
+ SYS_PIVOT_ROOT = 4216
+ SYS_MINCORE = 4217
+ SYS_MADVISE = 4218
+ SYS_GETDENTS64 = 4219
+ SYS_FCNTL64 = 4220
+ SYS_RESERVED221 = 4221
+ SYS_GETTID = 4222
+ SYS_READAHEAD = 4223
+ SYS_SETXATTR = 4224
+ SYS_LSETXATTR = 4225
+ SYS_FSETXATTR = 4226
+ SYS_GETXATTR = 4227
+ SYS_LGETXATTR = 4228
+ SYS_FGETXATTR = 4229
+ SYS_LISTXATTR = 4230
+ SYS_LLISTXATTR = 4231
+ SYS_FLISTXATTR = 4232
+ SYS_REMOVEXATTR = 4233
+ SYS_LREMOVEXATTR = 4234
+ SYS_FREMOVEXATTR = 4235
+ SYS_TKILL = 4236
+ SYS_SENDFILE64 = 4237
+ SYS_FUTEX = 4238
+ SYS_SCHED_SETAFFINITY = 4239
+ SYS_SCHED_GETAFFINITY = 4240
+ SYS_IO_SETUP = 4241
+ SYS_IO_DESTROY = 4242
+ SYS_IO_GETEVENTS = 4243
+ SYS_IO_SUBMIT = 4244
+ SYS_IO_CANCEL = 4245
+ SYS_EXIT_GROUP = 4246
+ SYS_LOOKUP_DCOOKIE = 4247
+ SYS_EPOLL_CREATE = 4248
+ SYS_EPOLL_CTL = 4249
+ SYS_EPOLL_WAIT = 4250
+ SYS_REMAP_FILE_PAGES = 4251
+ SYS_SET_TID_ADDRESS = 4252
+ SYS_RESTART_SYSCALL = 4253
+ SYS_FADVISE64 = 4254
+ SYS_STATFS64 = 4255
+ SYS_FSTATFS64 = 4256
+ SYS_TIMER_CREATE = 4257
+ SYS_TIMER_SETTIME = 4258
+ SYS_TIMER_GETTIME = 4259
+ SYS_TIMER_GETOVERRUN = 4260
+ SYS_TIMER_DELETE = 4261
+ SYS_CLOCK_SETTIME = 4262
+ SYS_CLOCK_GETTIME = 4263
+ SYS_CLOCK_GETRES = 4264
+ SYS_CLOCK_NANOSLEEP = 4265
+ SYS_TGKILL = 4266
+ SYS_UTIMES = 4267
+ SYS_MBIND = 4268
+ SYS_GET_MEMPOLICY = 4269
+ SYS_SET_MEMPOLICY = 4270
+ SYS_MQ_OPEN = 4271
+ SYS_MQ_UNLINK = 4272
+ SYS_MQ_TIMEDSEND = 4273
+ SYS_MQ_TIMEDRECEIVE = 4274
+ SYS_MQ_NOTIFY = 4275
+ SYS_MQ_GETSETATTR = 4276
+ SYS_VSERVER = 4277
+ SYS_WAITID = 4278
+ SYS_ADD_KEY = 4280
+ SYS_REQUEST_KEY = 4281
+ SYS_KEYCTL = 4282
+ SYS_SET_THREAD_AREA = 4283
+ SYS_INOTIFY_INIT = 4284
+ SYS_INOTIFY_ADD_WATCH = 4285
+ SYS_INOTIFY_RM_WATCH = 4286
+ SYS_MIGRATE_PAGES = 4287
+ SYS_OPENAT = 4288
+ SYS_MKDIRAT = 4289
+ SYS_MKNODAT = 4290
+ SYS_FCHOWNAT = 4291
+ SYS_FUTIMESAT = 4292
+ SYS_FSTATAT64 = 4293
+ SYS_UNLINKAT = 4294
+ SYS_RENAMEAT = 4295
+ SYS_LINKAT = 4296
+ SYS_SYMLINKAT = 4297
+ SYS_READLINKAT = 4298
+ SYS_FCHMODAT = 4299
+ SYS_FACCESSAT = 4300
+ SYS_PSELECT6 = 4301
+ SYS_PPOLL = 4302
+ SYS_UNSHARE = 4303
+ SYS_SPLICE = 4304
+ SYS_SYNC_FILE_RANGE = 4305
+ SYS_TEE = 4306
+ SYS_VMSPLICE = 4307
+ SYS_MOVE_PAGES = 4308
+ SYS_SET_ROBUST_LIST = 4309
+ SYS_GET_ROBUST_LIST = 4310
+ SYS_KEXEC_LOAD = 4311
+ SYS_GETCPU = 4312
+ SYS_EPOLL_PWAIT = 4313
+ SYS_IOPRIO_SET = 4314
+ SYS_IOPRIO_GET = 4315
+ SYS_UTIMENSAT = 4316
+ SYS_SIGNALFD = 4317
+ SYS_TIMERFD = 4318
+ SYS_EVENTFD = 4319
+ SYS_FALLOCATE = 4320
+ SYS_TIMERFD_CREATE = 4321
+ SYS_TIMERFD_GETTIME = 4322
+ SYS_TIMERFD_SETTIME = 4323
+ SYS_SIGNALFD4 = 4324
+ SYS_EVENTFD2 = 4325
+ SYS_EPOLL_CREATE1 = 4326
+ SYS_DUP3 = 4327
+ SYS_PIPE2 = 4328
+ SYS_INOTIFY_INIT1 = 4329
+ SYS_PREADV = 4330
+ SYS_PWRITEV = 4331
+ SYS_RT_TGSIGQUEUEINFO = 4332
+ SYS_PERF_EVENT_OPEN = 4333
+ SYS_ACCEPT4 = 4334
+ SYS_RECVMMSG = 4335
+ SYS_FANOTIFY_INIT = 4336
+ SYS_FANOTIFY_MARK = 4337
+ SYS_PRLIMIT64 = 4338
+ SYS_NAME_TO_HANDLE_AT = 4339
+ SYS_OPEN_BY_HANDLE_AT = 4340
+ SYS_CLOCK_ADJTIME = 4341
+ SYS_SYNCFS = 4342
+ SYS_SENDMMSG = 4343
+ SYS_SETNS = 4344
+ SYS_PROCESS_VM_READV = 4345
+ SYS_PROCESS_VM_WRITEV = 4346
+ SYS_KCMP = 4347
+ SYS_FINIT_MODULE = 4348
+ SYS_SCHED_SETATTR = 4349
+ SYS_SCHED_GETATTR = 4350
+ SYS_RENAMEAT2 = 4351
+ SYS_SECCOMP = 4352
+ SYS_GETRANDOM = 4353
+ SYS_MEMFD_CREATE = 4354
+ SYS_BPF = 4355
+ SYS_EXECVEAT = 4356
+ SYS_USERFAULTFD = 4357
+ SYS_MEMBARRIER = 4358
+ SYS_MLOCK2 = 4359
+ SYS_COPY_FILE_RANGE = 4360
+ SYS_PREADV2 = 4361
+ SYS_PWRITEV2 = 4362
+ SYS_PKEY_MPROTECT = 4363
+ SYS_PKEY_ALLOC = 4364
+ SYS_PKEY_FREE = 4365
+ SYS_STATX = 4366
+ SYS_RSEQ = 4367
+ SYS_IO_PGETEVENTS = 4368
+ SYS_SEMGET = 4393
+ SYS_SEMCTL = 4394
+ SYS_SHMGET = 4395
+ SYS_SHMCTL = 4396
+ SYS_SHMAT = 4397
+ SYS_SHMDT = 4398
+ SYS_MSGGET = 4399
+ SYS_MSGSND = 4400
+ SYS_MSGRCV = 4401
+ SYS_MSGCTL = 4402
+ SYS_CLOCK_GETTIME64 = 4403
+ SYS_CLOCK_SETTIME64 = 4404
+ SYS_CLOCK_ADJTIME64 = 4405
+ SYS_CLOCK_GETRES_TIME64 = 4406
+ SYS_CLOCK_NANOSLEEP_TIME64 = 4407
+ SYS_TIMER_GETTIME64 = 4408
+ SYS_TIMER_SETTIME64 = 4409
+ SYS_TIMERFD_GETTIME64 = 4410
+ SYS_TIMERFD_SETTIME64 = 4411
+ SYS_UTIMENSAT_TIME64 = 4412
+ SYS_PSELECT6_TIME64 = 4413
+ SYS_PPOLL_TIME64 = 4414
+ SYS_IO_PGETEVENTS_TIME64 = 4416
+ SYS_RECVMMSG_TIME64 = 4417
+ SYS_MQ_TIMEDSEND_TIME64 = 4418
+ SYS_MQ_TIMEDRECEIVE_TIME64 = 4419
+ SYS_SEMTIMEDOP_TIME64 = 4420
+ SYS_RT_SIGTIMEDWAIT_TIME64 = 4421
+ SYS_FUTEX_TIME64 = 4422
+ SYS_SCHED_RR_GET_INTERVAL_TIME64 = 4423
+ SYS_PIDFD_SEND_SIGNAL = 4424
+ SYS_IO_URING_SETUP = 4425
+ SYS_IO_URING_ENTER = 4426
+ SYS_IO_URING_REGISTER = 4427
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
index ec5bde3d5..7cbe78b19 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
@@ -372,4 +372,19 @@ const (
SYS_PKEY_MPROTECT = 386
SYS_RSEQ = 387
SYS_IO_PGETEVENTS = 388
+ SYS_SEMTIMEDOP = 392
+ SYS_SEMGET = 393
+ SYS_SEMCTL = 394
+ SYS_SHMGET = 395
+ SYS_SHMCTL = 396
+ SYS_SHMAT = 397
+ SYS_SHMDT = 398
+ SYS_MSGGET = 399
+ SYS_MSGSND = 400
+ SYS_MSGRCV = 401
+ SYS_MSGCTL = 402
+ SYS_PIDFD_SEND_SIGNAL = 424
+ SYS_IO_URING_SETUP = 425
+ SYS_IO_URING_ENTER = 426
+ SYS_IO_URING_REGISTER = 427
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
index bdbabdbcd..51a2f1236 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
@@ -372,4 +372,19 @@ const (
SYS_PKEY_MPROTECT = 386
SYS_RSEQ = 387
SYS_IO_PGETEVENTS = 388
+ SYS_SEMTIMEDOP = 392
+ SYS_SEMGET = 393
+ SYS_SEMCTL = 394
+ SYS_SHMGET = 395
+ SYS_SHMCTL = 396
+ SYS_SHMAT = 397
+ SYS_SHMDT = 398
+ SYS_MSGGET = 399
+ SYS_MSGSND = 400
+ SYS_MSGRCV = 401
+ SYS_MSGCTL = 402
+ SYS_PIDFD_SEND_SIGNAL = 424
+ SYS_IO_URING_SETUP = 425
+ SYS_IO_URING_ENTER = 426
+ SYS_IO_URING_REGISTER = 427
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
index 473c74613..323432ae3 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
@@ -284,4 +284,9 @@ const (
SYS_STATX = 291
SYS_IO_PGETEVENTS = 292
SYS_RSEQ = 293
+ SYS_KEXEC_FILE_LOAD = 294
+ SYS_PIDFD_SEND_SIGNAL = 424
+ SYS_IO_URING_SETUP = 425
+ SYS_IO_URING_ENTER = 426
+ SYS_IO_URING_REGISTER = 427
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
index 6eb7c257f..9dca97484 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
@@ -334,4 +334,22 @@ const (
SYS_KEXEC_FILE_LOAD = 381
SYS_IO_PGETEVENTS = 382
SYS_RSEQ = 383
+ SYS_PKEY_MPROTECT = 384
+ SYS_PKEY_ALLOC = 385
+ SYS_PKEY_FREE = 386
+ SYS_SEMTIMEDOP = 392
+ SYS_SEMGET = 393
+ SYS_SEMCTL = 394
+ SYS_SHMGET = 395
+ SYS_SHMCTL = 396
+ SYS_SHMAT = 397
+ SYS_SHMDT = 398
+ SYS_MSGGET = 399
+ SYS_MSGSND = 400
+ SYS_MSGRCV = 401
+ SYS_MSGCTL = 402
+ SYS_PIDFD_SEND_SIGNAL = 424
+ SYS_IO_URING_SETUP = 425
+ SYS_IO_URING_ENTER = 426
+ SYS_IO_URING_REGISTER = 427
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
index 93480fcb1..d3da46f0d 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
@@ -253,6 +253,7 @@ const (
SYS_TIMER_GETOVERRUN = 264
SYS_TIMER_DELETE = 265
SYS_TIMER_CREATE = 266
+ SYS_VSERVER = 267
SYS_IO_SETUP = 268
SYS_IO_DESTROY = 269
SYS_IO_SUBMIT = 270
@@ -347,4 +348,23 @@ const (
SYS_PWRITEV2 = 359
SYS_STATX = 360
SYS_IO_PGETEVENTS = 361
+ SYS_PKEY_MPROTECT = 362
+ SYS_PKEY_ALLOC = 363
+ SYS_PKEY_FREE = 364
+ SYS_RSEQ = 365
+ SYS_SEMTIMEDOP = 392
+ SYS_SEMGET = 393
+ SYS_SEMCTL = 394
+ SYS_SHMGET = 395
+ SYS_SHMCTL = 396
+ SYS_SHMAT = 397
+ SYS_SHMDT = 398
+ SYS_MSGGET = 399
+ SYS_MSGSND = 400
+ SYS_MSGRCV = 401
+ SYS_MSGCTL = 402
+ SYS_PIDFD_SEND_SIGNAL = 424
+ SYS_IO_URING_SETUP = 425
+ SYS_IO_URING_ENTER = 426
+ SYS_IO_URING_REGISTER = 427
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go
new file mode 100644
index 000000000..0291c0931
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go
@@ -0,0 +1,274 @@
+// go run mksysnum.go http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master
+// Code generated by the command above; DO NOT EDIT.
+
+// +build arm64,netbsd
+
+package unix
+
+const (
+ SYS_EXIT = 1 // { void|sys||exit(int rval); }
+ SYS_FORK = 2 // { int|sys||fork(void); }
+ SYS_READ = 3 // { ssize_t|sys||read(int fd, void *buf, size_t nbyte); }
+ SYS_WRITE = 4 // { ssize_t|sys||write(int fd, const void *buf, size_t nbyte); }
+ SYS_OPEN = 5 // { int|sys||open(const char *path, int flags, ... mode_t mode); }
+ SYS_CLOSE = 6 // { int|sys||close(int fd); }
+ SYS_LINK = 9 // { int|sys||link(const char *path, const char *link); }
+ SYS_UNLINK = 10 // { int|sys||unlink(const char *path); }
+ SYS_CHDIR = 12 // { int|sys||chdir(const char *path); }
+ SYS_FCHDIR = 13 // { int|sys||fchdir(int fd); }
+ SYS_CHMOD = 15 // { int|sys||chmod(const char *path, mode_t mode); }
+ SYS_CHOWN = 16 // { int|sys||chown(const char *path, uid_t uid, gid_t gid); }
+ SYS_BREAK = 17 // { int|sys||obreak(char *nsize); }
+ SYS_GETPID = 20 // { pid_t|sys||getpid_with_ppid(void); }
+ SYS_UNMOUNT = 22 // { int|sys||unmount(const char *path, int flags); }
+ SYS_SETUID = 23 // { int|sys||setuid(uid_t uid); }
+ SYS_GETUID = 24 // { uid_t|sys||getuid_with_euid(void); }
+ SYS_GETEUID = 25 // { uid_t|sys||geteuid(void); }
+ SYS_PTRACE = 26 // { int|sys||ptrace(int req, pid_t pid, void *addr, int data); }
+ SYS_RECVMSG = 27 // { ssize_t|sys||recvmsg(int s, struct msghdr *msg, int flags); }
+ SYS_SENDMSG = 28 // { ssize_t|sys||sendmsg(int s, const struct msghdr *msg, int flags); }
+ SYS_RECVFROM = 29 // { ssize_t|sys||recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); }
+ SYS_ACCEPT = 30 // { int|sys||accept(int s, struct sockaddr *name, socklen_t *anamelen); }
+ SYS_GETPEERNAME = 31 // { int|sys||getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); }
+ SYS_GETSOCKNAME = 32 // { int|sys||getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); }
+ SYS_ACCESS = 33 // { int|sys||access(const char *path, int flags); }
+ SYS_CHFLAGS = 34 // { int|sys||chflags(const char *path, u_long flags); }
+ SYS_FCHFLAGS = 35 // { int|sys||fchflags(int fd, u_long flags); }
+ SYS_SYNC = 36 // { void|sys||sync(void); }
+ SYS_KILL = 37 // { int|sys||kill(pid_t pid, int signum); }
+ SYS_GETPPID = 39 // { pid_t|sys||getppid(void); }
+ SYS_DUP = 41 // { int|sys||dup(int fd); }
+ SYS_PIPE = 42 // { int|sys||pipe(void); }
+ SYS_GETEGID = 43 // { gid_t|sys||getegid(void); }
+ SYS_PROFIL = 44 // { int|sys||profil(char *samples, size_t size, u_long offset, u_int scale); }
+ SYS_KTRACE = 45 // { int|sys||ktrace(const char *fname, int ops, int facs, pid_t pid); }
+ SYS_GETGID = 47 // { gid_t|sys||getgid_with_egid(void); }
+ SYS___GETLOGIN = 49 // { int|sys||__getlogin(char *namebuf, size_t namelen); }
+ SYS___SETLOGIN = 50 // { int|sys||__setlogin(const char *namebuf); }
+ SYS_ACCT = 51 // { int|sys||acct(const char *path); }
+ SYS_IOCTL = 54 // { int|sys||ioctl(int fd, u_long com, ... void *data); }
+ SYS_REVOKE = 56 // { int|sys||revoke(const char *path); }
+ SYS_SYMLINK = 57 // { int|sys||symlink(const char *path, const char *link); }
+ SYS_READLINK = 58 // { ssize_t|sys||readlink(const char *path, char *buf, size_t count); }
+ SYS_EXECVE = 59 // { int|sys||execve(const char *path, char * const *argp, char * const *envp); }
+ SYS_UMASK = 60 // { mode_t|sys||umask(mode_t newmask); }
+ SYS_CHROOT = 61 // { int|sys||chroot(const char *path); }
+ SYS_VFORK = 66 // { int|sys||vfork(void); }
+ SYS_SBRK = 69 // { int|sys||sbrk(intptr_t incr); }
+ SYS_SSTK = 70 // { int|sys||sstk(int incr); }
+ SYS_VADVISE = 72 // { int|sys||ovadvise(int anom); }
+ SYS_MUNMAP = 73 // { int|sys||munmap(void *addr, size_t len); }
+ SYS_MPROTECT = 74 // { int|sys||mprotect(void *addr, size_t len, int prot); }
+ SYS_MADVISE = 75 // { int|sys||madvise(void *addr, size_t len, int behav); }
+ SYS_MINCORE = 78 // { int|sys||mincore(void *addr, size_t len, char *vec); }
+ SYS_GETGROUPS = 79 // { int|sys||getgroups(int gidsetsize, gid_t *gidset); }
+ SYS_SETGROUPS = 80 // { int|sys||setgroups(int gidsetsize, const gid_t *gidset); }
+ SYS_GETPGRP = 81 // { int|sys||getpgrp(void); }
+ SYS_SETPGID = 82 // { int|sys||setpgid(pid_t pid, pid_t pgid); }
+ SYS_DUP2 = 90 // { int|sys||dup2(int from, int to); }
+ SYS_FCNTL = 92 // { int|sys||fcntl(int fd, int cmd, ... void *arg); }
+ SYS_FSYNC = 95 // { int|sys||fsync(int fd); }
+ SYS_SETPRIORITY = 96 // { int|sys||setpriority(int which, id_t who, int prio); }
+ SYS_CONNECT = 98 // { int|sys||connect(int s, const struct sockaddr *name, socklen_t namelen); }
+ SYS_GETPRIORITY = 100 // { int|sys||getpriority(int which, id_t who); }
+ SYS_BIND = 104 // { int|sys||bind(int s, const struct sockaddr *name, socklen_t namelen); }
+ SYS_SETSOCKOPT = 105 // { int|sys||setsockopt(int s, int level, int name, const void *val, socklen_t valsize); }
+ SYS_LISTEN = 106 // { int|sys||listen(int s, int backlog); }
+ SYS_GETSOCKOPT = 118 // { int|sys||getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); }
+ SYS_READV = 120 // { ssize_t|sys||readv(int fd, const struct iovec *iovp, int iovcnt); }
+ SYS_WRITEV = 121 // { ssize_t|sys||writev(int fd, const struct iovec *iovp, int iovcnt); }
+ SYS_FCHOWN = 123 // { int|sys||fchown(int fd, uid_t uid, gid_t gid); }
+ SYS_FCHMOD = 124 // { int|sys||fchmod(int fd, mode_t mode); }
+ SYS_SETREUID = 126 // { int|sys||setreuid(uid_t ruid, uid_t euid); }
+ SYS_SETREGID = 127 // { int|sys||setregid(gid_t rgid, gid_t egid); }
+ SYS_RENAME = 128 // { int|sys||rename(const char *from, const char *to); }
+ SYS_FLOCK = 131 // { int|sys||flock(int fd, int how); }
+ SYS_MKFIFO = 132 // { int|sys||mkfifo(const char *path, mode_t mode); }
+ SYS_SENDTO = 133 // { ssize_t|sys||sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); }
+ SYS_SHUTDOWN = 134 // { int|sys||shutdown(int s, int how); }
+ SYS_SOCKETPAIR = 135 // { int|sys||socketpair(int domain, int type, int protocol, int *rsv); }
+ SYS_MKDIR = 136 // { int|sys||mkdir(const char *path, mode_t mode); }
+ SYS_RMDIR = 137 // { int|sys||rmdir(const char *path); }
+ SYS_SETSID = 147 // { int|sys||setsid(void); }
+ SYS_SYSARCH = 165 // { int|sys||sysarch(int op, void *parms); }
+ SYS_PREAD = 173 // { ssize_t|sys||pread(int fd, void *buf, size_t nbyte, int PAD, off_t offset); }
+ SYS_PWRITE = 174 // { ssize_t|sys||pwrite(int fd, const void *buf, size_t nbyte, int PAD, off_t offset); }
+ SYS_NTP_ADJTIME = 176 // { int|sys||ntp_adjtime(struct timex *tp); }
+ SYS_SETGID = 181 // { int|sys||setgid(gid_t gid); }
+ SYS_SETEGID = 182 // { int|sys||setegid(gid_t egid); }
+ SYS_SETEUID = 183 // { int|sys||seteuid(uid_t euid); }
+ SYS_PATHCONF = 191 // { long|sys||pathconf(const char *path, int name); }
+ SYS_FPATHCONF = 192 // { long|sys||fpathconf(int fd, int name); }
+ SYS_GETRLIMIT = 194 // { int|sys||getrlimit(int which, struct rlimit *rlp); }
+ SYS_SETRLIMIT = 195 // { int|sys||setrlimit(int which, const struct rlimit *rlp); }
+ SYS_MMAP = 197 // { void *|sys||mmap(void *addr, size_t len, int prot, int flags, int fd, long PAD, off_t pos); }
+ SYS_LSEEK = 199 // { off_t|sys||lseek(int fd, int PAD, off_t offset, int whence); }
+ SYS_TRUNCATE = 200 // { int|sys||truncate(const char *path, int PAD, off_t length); }
+ SYS_FTRUNCATE = 201 // { int|sys||ftruncate(int fd, int PAD, off_t length); }
+ SYS___SYSCTL = 202 // { int|sys||__sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, const void *new, size_t newlen); }
+ SYS_MLOCK = 203 // { int|sys||mlock(const void *addr, size_t len); }
+ SYS_MUNLOCK = 204 // { int|sys||munlock(const void *addr, size_t len); }
+ SYS_UNDELETE = 205 // { int|sys||undelete(const char *path); }
+ SYS_GETPGID = 207 // { pid_t|sys||getpgid(pid_t pid); }
+ SYS_REBOOT = 208 // { int|sys||reboot(int opt, char *bootstr); }
+ SYS_POLL = 209 // { int|sys||poll(struct pollfd *fds, u_int nfds, int timeout); }
+ SYS_SEMGET = 221 // { int|sys||semget(key_t key, int nsems, int semflg); }
+ SYS_SEMOP = 222 // { int|sys||semop(int semid, struct sembuf *sops, size_t nsops); }
+ SYS_SEMCONFIG = 223 // { int|sys||semconfig(int flag); }
+ SYS_MSGGET = 225 // { int|sys||msgget(key_t key, int msgflg); }
+ SYS_MSGSND = 226 // { int|sys||msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); }
+ SYS_MSGRCV = 227 // { ssize_t|sys||msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); }
+ SYS_SHMAT = 228 // { void *|sys||shmat(int shmid, const void *shmaddr, int shmflg); }
+ SYS_SHMDT = 230 // { int|sys||shmdt(const void *shmaddr); }
+ SYS_SHMGET = 231 // { int|sys||shmget(key_t key, size_t size, int shmflg); }
+ SYS_TIMER_CREATE = 235 // { int|sys||timer_create(clockid_t clock_id, struct sigevent *evp, timer_t *timerid); }
+ SYS_TIMER_DELETE = 236 // { int|sys||timer_delete(timer_t timerid); }
+ SYS_TIMER_GETOVERRUN = 239 // { int|sys||timer_getoverrun(timer_t timerid); }
+ SYS_FDATASYNC = 241 // { int|sys||fdatasync(int fd); }
+ SYS_MLOCKALL = 242 // { int|sys||mlockall(int flags); }
+ SYS_MUNLOCKALL = 243 // { int|sys||munlockall(void); }
+ SYS_SIGQUEUEINFO = 245 // { int|sys||sigqueueinfo(pid_t pid, const siginfo_t *info); }
+ SYS_MODCTL = 246 // { int|sys||modctl(int cmd, void *arg); }
+ SYS___POSIX_RENAME = 270 // { int|sys||__posix_rename(const char *from, const char *to); }
+ SYS_SWAPCTL = 271 // { int|sys||swapctl(int cmd, void *arg, int misc); }
+ SYS_MINHERIT = 273 // { int|sys||minherit(void *addr, size_t len, int inherit); }
+ SYS_LCHMOD = 274 // { int|sys||lchmod(const char *path, mode_t mode); }
+ SYS_LCHOWN = 275 // { int|sys||lchown(const char *path, uid_t uid, gid_t gid); }
+ SYS_MSYNC = 277 // { int|sys|13|msync(void *addr, size_t len, int flags); }
+ SYS___POSIX_CHOWN = 283 // { int|sys||__posix_chown(const char *path, uid_t uid, gid_t gid); }
+ SYS___POSIX_FCHOWN = 284 // { int|sys||__posix_fchown(int fd, uid_t uid, gid_t gid); }
+ SYS___POSIX_LCHOWN = 285 // { int|sys||__posix_lchown(const char *path, uid_t uid, gid_t gid); }
+ SYS_GETSID = 286 // { pid_t|sys||getsid(pid_t pid); }
+ SYS___CLONE = 287 // { pid_t|sys||__clone(int flags, void *stack); }
+ SYS_FKTRACE = 288 // { int|sys||fktrace(int fd, int ops, int facs, pid_t pid); }
+ SYS_PREADV = 289 // { ssize_t|sys||preadv(int fd, const struct iovec *iovp, int iovcnt, int PAD, off_t offset); }
+ SYS_PWRITEV = 290 // { ssize_t|sys||pwritev(int fd, const struct iovec *iovp, int iovcnt, int PAD, off_t offset); }
+ SYS___GETCWD = 296 // { int|sys||__getcwd(char *bufp, size_t length); }
+ SYS_FCHROOT = 297 // { int|sys||fchroot(int fd); }
+ SYS_LCHFLAGS = 304 // { int|sys||lchflags(const char *path, u_long flags); }
+ SYS_ISSETUGID = 305 // { int|sys||issetugid(void); }
+ SYS_UTRACE = 306 // { int|sys||utrace(const char *label, void *addr, size_t len); }
+ SYS_GETCONTEXT = 307 // { int|sys||getcontext(struct __ucontext *ucp); }
+ SYS_SETCONTEXT = 308 // { int|sys||setcontext(const struct __ucontext *ucp); }
+ SYS__LWP_CREATE = 309 // { int|sys||_lwp_create(const struct __ucontext *ucp, u_long flags, lwpid_t *new_lwp); }
+ SYS__LWP_EXIT = 310 // { int|sys||_lwp_exit(void); }
+ SYS__LWP_SELF = 311 // { lwpid_t|sys||_lwp_self(void); }
+ SYS__LWP_WAIT = 312 // { int|sys||_lwp_wait(lwpid_t wait_for, lwpid_t *departed); }
+ SYS__LWP_SUSPEND = 313 // { int|sys||_lwp_suspend(lwpid_t target); }
+ SYS__LWP_CONTINUE = 314 // { int|sys||_lwp_continue(lwpid_t target); }
+ SYS__LWP_WAKEUP = 315 // { int|sys||_lwp_wakeup(lwpid_t target); }
+ SYS__LWP_GETPRIVATE = 316 // { void *|sys||_lwp_getprivate(void); }
+ SYS__LWP_SETPRIVATE = 317 // { void|sys||_lwp_setprivate(void *ptr); }
+ SYS__LWP_KILL = 318 // { int|sys||_lwp_kill(lwpid_t target, int signo); }
+ SYS__LWP_DETACH = 319 // { int|sys||_lwp_detach(lwpid_t target); }
+ SYS__LWP_UNPARK = 321 // { int|sys||_lwp_unpark(lwpid_t target, const void *hint); }
+ SYS__LWP_UNPARK_ALL = 322 // { ssize_t|sys||_lwp_unpark_all(const lwpid_t *targets, size_t ntargets, const void *hint); }
+ SYS__LWP_SETNAME = 323 // { int|sys||_lwp_setname(lwpid_t target, const char *name); }
+ SYS__LWP_GETNAME = 324 // { int|sys||_lwp_getname(lwpid_t target, char *name, size_t len); }
+ SYS__LWP_CTL = 325 // { int|sys||_lwp_ctl(int features, struct lwpctl **address); }
+ SYS___SIGACTION_SIGTRAMP = 340 // { int|sys||__sigaction_sigtramp(int signum, const struct sigaction *nsa, struct sigaction *osa, const void *tramp, int vers); }
+ SYS_PMC_GET_INFO = 341 // { int|sys||pmc_get_info(int ctr, int op, void *args); }
+ SYS_PMC_CONTROL = 342 // { int|sys||pmc_control(int ctr, int op, void *args); }
+ SYS_RASCTL = 343 // { int|sys||rasctl(void *addr, size_t len, int op); }
+ SYS_KQUEUE = 344 // { int|sys||kqueue(void); }
+ SYS__SCHED_SETPARAM = 346 // { int|sys||_sched_setparam(pid_t pid, lwpid_t lid, int policy, const struct sched_param *params); }
+ SYS__SCHED_GETPARAM = 347 // { int|sys||_sched_getparam(pid_t pid, lwpid_t lid, int *policy, struct sched_param *params); }
+ SYS__SCHED_SETAFFINITY = 348 // { int|sys||_sched_setaffinity(pid_t pid, lwpid_t lid, size_t size, const cpuset_t *cpuset); }
+ SYS__SCHED_GETAFFINITY = 349 // { int|sys||_sched_getaffinity(pid_t pid, lwpid_t lid, size_t size, cpuset_t *cpuset); }
+ SYS_SCHED_YIELD = 350 // { int|sys||sched_yield(void); }
+ SYS_FSYNC_RANGE = 354 // { int|sys||fsync_range(int fd, int flags, off_t start, off_t length); }
+ SYS_UUIDGEN = 355 // { int|sys||uuidgen(struct uuid *store, int count); }
+ SYS_GETVFSSTAT = 356 // { int|sys||getvfsstat(struct statvfs *buf, size_t bufsize, int flags); }
+ SYS_STATVFS1 = 357 // { int|sys||statvfs1(const char *path, struct statvfs *buf, int flags); }
+ SYS_FSTATVFS1 = 358 // { int|sys||fstatvfs1(int fd, struct statvfs *buf, int flags); }
+ SYS_EXTATTRCTL = 360 // { int|sys||extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); }
+ SYS_EXTATTR_SET_FILE = 361 // { int|sys||extattr_set_file(const char *path, int attrnamespace, const char *attrname, const void *data, size_t nbytes); }
+ SYS_EXTATTR_GET_FILE = 362 // { ssize_t|sys||extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
+ SYS_EXTATTR_DELETE_FILE = 363 // { int|sys||extattr_delete_file(const char *path, int attrnamespace, const char *attrname); }
+ SYS_EXTATTR_SET_FD = 364 // { int|sys||extattr_set_fd(int fd, int attrnamespace, const char *attrname, const void *data, size_t nbytes); }
+ SYS_EXTATTR_GET_FD = 365 // { ssize_t|sys||extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
+ SYS_EXTATTR_DELETE_FD = 366 // { int|sys||extattr_delete_fd(int fd, int attrnamespace, const char *attrname); }
+ SYS_EXTATTR_SET_LINK = 367 // { int|sys||extattr_set_link(const char *path, int attrnamespace, const char *attrname, const void *data, size_t nbytes); }
+ SYS_EXTATTR_GET_LINK = 368 // { ssize_t|sys||extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
+ SYS_EXTATTR_DELETE_LINK = 369 // { int|sys||extattr_delete_link(const char *path, int attrnamespace, const char *attrname); }
+ SYS_EXTATTR_LIST_FD = 370 // { ssize_t|sys||extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); }
+ SYS_EXTATTR_LIST_FILE = 371 // { ssize_t|sys||extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); }
+ SYS_EXTATTR_LIST_LINK = 372 // { ssize_t|sys||extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); }
+ SYS_SETXATTR = 375 // { int|sys||setxattr(const char *path, const char *name, const void *value, size_t size, int flags); }
+ SYS_LSETXATTR = 376 // { int|sys||lsetxattr(const char *path, const char *name, const void *value, size_t size, int flags); }
+ SYS_FSETXATTR = 377 // { int|sys||fsetxattr(int fd, const char *name, const void *value, size_t size, int flags); }
+ SYS_GETXATTR = 378 // { int|sys||getxattr(const char *path, const char *name, void *value, size_t size); }
+ SYS_LGETXATTR = 379 // { int|sys||lgetxattr(const char *path, const char *name, void *value, size_t size); }
+ SYS_FGETXATTR = 380 // { int|sys||fgetxattr(int fd, const char *name, void *value, size_t size); }
+ SYS_LISTXATTR = 381 // { int|sys||listxattr(const char *path, char *list, size_t size); }
+ SYS_LLISTXATTR = 382 // { int|sys||llistxattr(const char *path, char *list, size_t size); }
+ SYS_FLISTXATTR = 383 // { int|sys||flistxattr(int fd, char *list, size_t size); }
+ SYS_REMOVEXATTR = 384 // { int|sys||removexattr(const char *path, const char *name); }
+ SYS_LREMOVEXATTR = 385 // { int|sys||lremovexattr(const char *path, const char *name); }
+ SYS_FREMOVEXATTR = 386 // { int|sys||fremovexattr(int fd, const char *name); }
+ SYS_GETDENTS = 390 // { int|sys|30|getdents(int fd, char *buf, size_t count); }
+ SYS_SOCKET = 394 // { int|sys|30|socket(int domain, int type, int protocol); }
+ SYS_GETFH = 395 // { int|sys|30|getfh(const char *fname, void *fhp, size_t *fh_size); }
+ SYS_MOUNT = 410 // { int|sys|50|mount(const char *type, const char *path, int flags, void *data, size_t data_len); }
+ SYS_MREMAP = 411 // { void *|sys||mremap(void *old_address, size_t old_size, void *new_address, size_t new_size, int flags); }
+ SYS_PSET_CREATE = 412 // { int|sys||pset_create(psetid_t *psid); }
+ SYS_PSET_DESTROY = 413 // { int|sys||pset_destroy(psetid_t psid); }
+ SYS_PSET_ASSIGN = 414 // { int|sys||pset_assign(psetid_t psid, cpuid_t cpuid, psetid_t *opsid); }
+ SYS__PSET_BIND = 415 // { int|sys||_pset_bind(idtype_t idtype, id_t first_id, id_t second_id, psetid_t psid, psetid_t *opsid); }
+ SYS_POSIX_FADVISE = 416 // { int|sys|50|posix_fadvise(int fd, int PAD, off_t offset, off_t len, int advice); }
+ SYS_SELECT = 417 // { int|sys|50|select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); }
+ SYS_GETTIMEOFDAY = 418 // { int|sys|50|gettimeofday(struct timeval *tp, void *tzp); }
+ SYS_SETTIMEOFDAY = 419 // { int|sys|50|settimeofday(const struct timeval *tv, const void *tzp); }
+ SYS_UTIMES = 420 // { int|sys|50|utimes(const char *path, const struct timeval *tptr); }
+ SYS_ADJTIME = 421 // { int|sys|50|adjtime(const struct timeval *delta, struct timeval *olddelta); }
+ SYS_FUTIMES = 423 // { int|sys|50|futimes(int fd, const struct timeval *tptr); }
+ SYS_LUTIMES = 424 // { int|sys|50|lutimes(const char *path, const struct timeval *tptr); }
+ SYS_SETITIMER = 425 // { int|sys|50|setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); }
+ SYS_GETITIMER = 426 // { int|sys|50|getitimer(int which, struct itimerval *itv); }
+ SYS_CLOCK_GETTIME = 427 // { int|sys|50|clock_gettime(clockid_t clock_id, struct timespec *tp); }
+ SYS_CLOCK_SETTIME = 428 // { int|sys|50|clock_settime(clockid_t clock_id, const struct timespec *tp); }
+ SYS_CLOCK_GETRES = 429 // { int|sys|50|clock_getres(clockid_t clock_id, struct timespec *tp); }
+ SYS_NANOSLEEP = 430 // { int|sys|50|nanosleep(const struct timespec *rqtp, struct timespec *rmtp); }
+ SYS___SIGTIMEDWAIT = 431 // { int|sys|50|__sigtimedwait(const sigset_t *set, siginfo_t *info, struct timespec *timeout); }
+ SYS__LWP_PARK = 434 // { int|sys|50|_lwp_park(const struct timespec *ts, lwpid_t unpark, const void *hint, const void *unparkhint); }
+ SYS_KEVENT = 435 // { int|sys|50|kevent(int fd, const struct kevent *changelist, size_t nchanges, struct kevent *eventlist, size_t nevents, const struct timespec *timeout); }
+ SYS_PSELECT = 436 // { int|sys|50|pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); }
+ SYS_POLLTS = 437 // { int|sys|50|pollts(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); }
+ SYS_STAT = 439 // { int|sys|50|stat(const char *path, struct stat *ub); }
+ SYS_FSTAT = 440 // { int|sys|50|fstat(int fd, struct stat *sb); }
+ SYS_LSTAT = 441 // { int|sys|50|lstat(const char *path, struct stat *ub); }
+ SYS___SEMCTL = 442 // { int|sys|50|__semctl(int semid, int semnum, int cmd, ... union __semun *arg); }
+ SYS_SHMCTL = 443 // { int|sys|50|shmctl(int shmid, int cmd, struct shmid_ds *buf); }
+ SYS_MSGCTL = 444 // { int|sys|50|msgctl(int msqid, int cmd, struct msqid_ds *buf); }
+ SYS_GETRUSAGE = 445 // { int|sys|50|getrusage(int who, struct rusage *rusage); }
+ SYS_TIMER_SETTIME = 446 // { int|sys|50|timer_settime(timer_t timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); }
+ SYS_TIMER_GETTIME = 447 // { int|sys|50|timer_gettime(timer_t timerid, struct itimerspec *value); }
+ SYS_NTP_GETTIME = 448 // { int|sys|50|ntp_gettime(struct ntptimeval *ntvp); }
+ SYS_WAIT4 = 449 // { int|sys|50|wait4(pid_t pid, int *status, int options, struct rusage *rusage); }
+ SYS_MKNOD = 450 // { int|sys|50|mknod(const char *path, mode_t mode, dev_t dev); }
+ SYS_FHSTAT = 451 // { int|sys|50|fhstat(const void *fhp, size_t fh_size, struct stat *sb); }
+ SYS_PIPE2 = 453 // { int|sys||pipe2(int *fildes, int flags); }
+ SYS_DUP3 = 454 // { int|sys||dup3(int from, int to, int flags); }
+ SYS_KQUEUE1 = 455 // { int|sys||kqueue1(int flags); }
+ SYS_PACCEPT = 456 // { int|sys||paccept(int s, struct sockaddr *name, socklen_t *anamelen, const sigset_t *mask, int flags); }
+ SYS_LINKAT = 457 // { int|sys||linkat(int fd1, const char *name1, int fd2, const char *name2, int flags); }
+ SYS_RENAMEAT = 458 // { int|sys||renameat(int fromfd, const char *from, int tofd, const char *to); }
+ SYS_MKFIFOAT = 459 // { int|sys||mkfifoat(int fd, const char *path, mode_t mode); }
+ SYS_MKNODAT = 460 // { int|sys||mknodat(int fd, const char *path, mode_t mode, uint32_t dev); }
+ SYS_MKDIRAT = 461 // { int|sys||mkdirat(int fd, const char *path, mode_t mode); }
+ SYS_FACCESSAT = 462 // { int|sys||faccessat(int fd, const char *path, int amode, int flag); }
+ SYS_FCHMODAT = 463 // { int|sys||fchmodat(int fd, const char *path, mode_t mode, int flag); }
+ SYS_FCHOWNAT = 464 // { int|sys||fchownat(int fd, const char *path, uid_t owner, gid_t group, int flag); }
+ SYS_FEXECVE = 465 // { int|sys||fexecve(int fd, char * const *argp, char * const *envp); }
+ SYS_FSTATAT = 466 // { int|sys||fstatat(int fd, const char *path, struct stat *buf, int flag); }
+ SYS_UTIMENSAT = 467 // { int|sys||utimensat(int fd, const char *path, const struct timespec *tptr, int flag); }
+ SYS_OPENAT = 468 // { int|sys||openat(int fd, const char *path, int oflags, ... mode_t mode); }
+ SYS_READLINKAT = 469 // { int|sys||readlinkat(int fd, const char *path, char *buf, size_t bufsize); }
+ SYS_SYMLINKAT = 470 // { int|sys||symlinkat(const char *path1, int fd, const char *path2); }
+ SYS_UNLINKAT = 471 // { int|sys||unlinkat(int fd, const char *path, int flag); }
+ SYS_FUTIMENS = 472 // { int|sys||futimens(int fd, const struct timespec *tptr); }
+ SYS___QUOTACTL = 473 // { int|sys||__quotactl(const char *path, struct quotactl_args *args); }
+ SYS_POSIX_SPAWN = 474 // { int|sys||posix_spawn(pid_t *pid, const char *path, const struct posix_spawn_file_actions *file_actions, const struct posix_spawnattr *attrp, char *const *argv, char *const *envp); }
+ SYS_RECVMMSG = 475 // { int|sys||recvmmsg(int s, struct mmsghdr *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout); }
+ SYS_SENDMMSG = 476 // { int|sys||sendmmsg(int s, struct mmsghdr *mmsg, unsigned int vlen, unsigned int flags); }
+)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go
index d5bf3c449..b0207d1c9 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go
@@ -1,4 +1,4 @@
-// go run mksysnum.go http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master
+// go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build 386,openbsd
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go
index cbcfdfb33..f0dec6f0b 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go
@@ -1,4 +1,4 @@
-// go run mksysnum.go http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master
+// go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build amd64,openbsd
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go
index a84cead96..33d1dc540 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go
@@ -1,4 +1,4 @@
-// go run mksysnum.go http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master
+// go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build arm,openbsd
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go
new file mode 100644
index 000000000..fe2b689b6
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go
@@ -0,0 +1,217 @@
+// go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build arm64,openbsd
+
+package unix
+
+const (
+ SYS_EXIT = 1 // { void sys_exit(int rval); }
+ SYS_FORK = 2 // { int sys_fork(void); }
+ SYS_READ = 3 // { ssize_t sys_read(int fd, void *buf, size_t nbyte); }
+ SYS_WRITE = 4 // { ssize_t sys_write(int fd, const void *buf, size_t nbyte); }
+ SYS_OPEN = 5 // { int sys_open(const char *path, int flags, ... mode_t mode); }
+ SYS_CLOSE = 6 // { int sys_close(int fd); }
+ SYS_GETENTROPY = 7 // { int sys_getentropy(void *buf, size_t nbyte); }
+ SYS___TFORK = 8 // { int sys___tfork(const struct __tfork *param, size_t psize); }
+ SYS_LINK = 9 // { int sys_link(const char *path, const char *link); }
+ SYS_UNLINK = 10 // { int sys_unlink(const char *path); }
+ SYS_WAIT4 = 11 // { pid_t sys_wait4(pid_t pid, int *status, int options, struct rusage *rusage); }
+ SYS_CHDIR = 12 // { int sys_chdir(const char *path); }
+ SYS_FCHDIR = 13 // { int sys_fchdir(int fd); }
+ SYS_MKNOD = 14 // { int sys_mknod(const char *path, mode_t mode, dev_t dev); }
+ SYS_CHMOD = 15 // { int sys_chmod(const char *path, mode_t mode); }
+ SYS_CHOWN = 16 // { int sys_chown(const char *path, uid_t uid, gid_t gid); }
+ SYS_OBREAK = 17 // { int sys_obreak(char *nsize); } break
+ SYS_GETDTABLECOUNT = 18 // { int sys_getdtablecount(void); }
+ SYS_GETRUSAGE = 19 // { int sys_getrusage(int who, struct rusage *rusage); }
+ SYS_GETPID = 20 // { pid_t sys_getpid(void); }
+ SYS_MOUNT = 21 // { int sys_mount(const char *type, const char *path, int flags, void *data); }
+ SYS_UNMOUNT = 22 // { int sys_unmount(const char *path, int flags); }
+ SYS_SETUID = 23 // { int sys_setuid(uid_t uid); }
+ SYS_GETUID = 24 // { uid_t sys_getuid(void); }
+ SYS_GETEUID = 25 // { uid_t sys_geteuid(void); }
+ SYS_PTRACE = 26 // { int sys_ptrace(int req, pid_t pid, caddr_t addr, int data); }
+ SYS_RECVMSG = 27 // { ssize_t sys_recvmsg(int s, struct msghdr *msg, int flags); }
+ SYS_SENDMSG = 28 // { ssize_t sys_sendmsg(int s, const struct msghdr *msg, int flags); }
+ SYS_RECVFROM = 29 // { ssize_t sys_recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); }
+ SYS_ACCEPT = 30 // { int sys_accept(int s, struct sockaddr *name, socklen_t *anamelen); }
+ SYS_GETPEERNAME = 31 // { int sys_getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); }
+ SYS_GETSOCKNAME = 32 // { int sys_getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); }
+ SYS_ACCESS = 33 // { int sys_access(const char *path, int amode); }
+ SYS_CHFLAGS = 34 // { int sys_chflags(const char *path, u_int flags); }
+ SYS_FCHFLAGS = 35 // { int sys_fchflags(int fd, u_int flags); }
+ SYS_SYNC = 36 // { void sys_sync(void); }
+ SYS_STAT = 38 // { int sys_stat(const char *path, struct stat *ub); }
+ SYS_GETPPID = 39 // { pid_t sys_getppid(void); }
+ SYS_LSTAT = 40 // { int sys_lstat(const char *path, struct stat *ub); }
+ SYS_DUP = 41 // { int sys_dup(int fd); }
+ SYS_FSTATAT = 42 // { int sys_fstatat(int fd, const char *path, struct stat *buf, int flag); }
+ SYS_GETEGID = 43 // { gid_t sys_getegid(void); }
+ SYS_PROFIL = 44 // { int sys_profil(caddr_t samples, size_t size, u_long offset, u_int scale); }
+ SYS_KTRACE = 45 // { int sys_ktrace(const char *fname, int ops, int facs, pid_t pid); }
+ SYS_SIGACTION = 46 // { int sys_sigaction(int signum, const struct sigaction *nsa, struct sigaction *osa); }
+ SYS_GETGID = 47 // { gid_t sys_getgid(void); }
+ SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); }
+ SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); }
+ SYS_ACCT = 51 // { int sys_acct(const char *path); }
+ SYS_SIGPENDING = 52 // { int sys_sigpending(void); }
+ SYS_FSTAT = 53 // { int sys_fstat(int fd, struct stat *sb); }
+ SYS_IOCTL = 54 // { int sys_ioctl(int fd, u_long com, ... void *data); }
+ SYS_REBOOT = 55 // { int sys_reboot(int opt); }
+ SYS_REVOKE = 56 // { int sys_revoke(const char *path); }
+ SYS_SYMLINK = 57 // { int sys_symlink(const char *path, const char *link); }
+ SYS_READLINK = 58 // { ssize_t sys_readlink(const char *path, char *buf, size_t count); }
+ SYS_EXECVE = 59 // { int sys_execve(const char *path, char * const *argp, char * const *envp); }
+ SYS_UMASK = 60 // { mode_t sys_umask(mode_t newmask); }
+ SYS_CHROOT = 61 // { int sys_chroot(const char *path); }
+ SYS_GETFSSTAT = 62 // { int sys_getfsstat(struct statfs *buf, size_t bufsize, int flags); }
+ SYS_STATFS = 63 // { int sys_statfs(const char *path, struct statfs *buf); }
+ SYS_FSTATFS = 64 // { int sys_fstatfs(int fd, struct statfs *buf); }
+ SYS_FHSTATFS = 65 // { int sys_fhstatfs(const fhandle_t *fhp, struct statfs *buf); }
+ SYS_VFORK = 66 // { int sys_vfork(void); }
+ SYS_GETTIMEOFDAY = 67 // { int sys_gettimeofday(struct timeval *tp, struct timezone *tzp); }
+ SYS_SETTIMEOFDAY = 68 // { int sys_settimeofday(const struct timeval *tv, const struct timezone *tzp); }
+ SYS_SETITIMER = 69 // { int sys_setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); }
+ SYS_GETITIMER = 70 // { int sys_getitimer(int which, struct itimerval *itv); }
+ SYS_SELECT = 71 // { int sys_select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); }
+ SYS_KEVENT = 72 // { int sys_kevent(int fd, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); }
+ SYS_MUNMAP = 73 // { int sys_munmap(void *addr, size_t len); }
+ SYS_MPROTECT = 74 // { int sys_mprotect(void *addr, size_t len, int prot); }
+ SYS_MADVISE = 75 // { int sys_madvise(void *addr, size_t len, int behav); }
+ SYS_UTIMES = 76 // { int sys_utimes(const char *path, const struct timeval *tptr); }
+ SYS_FUTIMES = 77 // { int sys_futimes(int fd, const struct timeval *tptr); }
+ SYS_GETGROUPS = 79 // { int sys_getgroups(int gidsetsize, gid_t *gidset); }
+ SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, const gid_t *gidset); }
+ SYS_GETPGRP = 81 // { int sys_getpgrp(void); }
+ SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, pid_t pgid); }
+ SYS_FUTEX = 83 // { int sys_futex(uint32_t *f, int op, int val, const struct timespec *timeout, uint32_t *g); }
+ SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, const struct timespec *times, int flag); }
+ SYS_FUTIMENS = 85 // { int sys_futimens(int fd, const struct timespec *times); }
+ SYS_KBIND = 86 // { int sys_kbind(const struct __kbind *param, size_t psize, int64_t proc_cookie); }
+ SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, struct timespec *tp); }
+ SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, const struct timespec *tp); }
+ SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, struct timespec *tp); }
+ SYS_DUP2 = 90 // { int sys_dup2(int from, int to); }
+ SYS_NANOSLEEP = 91 // { int sys_nanosleep(const struct timespec *rqtp, struct timespec *rmtp); }
+ SYS_FCNTL = 92 // { int sys_fcntl(int fd, int cmd, ... void *arg); }
+ SYS_ACCEPT4 = 93 // { int sys_accept4(int s, struct sockaddr *name, socklen_t *anamelen, int flags); }
+ SYS___THRSLEEP = 94 // { int sys___thrsleep(const volatile void *ident, clockid_t clock_id, const struct timespec *tp, void *lock, const int *abort); }
+ SYS_FSYNC = 95 // { int sys_fsync(int fd); }
+ SYS_SETPRIORITY = 96 // { int sys_setpriority(int which, id_t who, int prio); }
+ SYS_SOCKET = 97 // { int sys_socket(int domain, int type, int protocol); }
+ SYS_CONNECT = 98 // { int sys_connect(int s, const struct sockaddr *name, socklen_t namelen); }
+ SYS_GETDENTS = 99 // { int sys_getdents(int fd, void *buf, size_t buflen); }
+ SYS_GETPRIORITY = 100 // { int sys_getpriority(int which, id_t who); }
+ SYS_PIPE2 = 101 // { int sys_pipe2(int *fdp, int flags); }
+ SYS_DUP3 = 102 // { int sys_dup3(int from, int to, int flags); }
+ SYS_SIGRETURN = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); }
+ SYS_BIND = 104 // { int sys_bind(int s, const struct sockaddr *name, socklen_t namelen); }
+ SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, const void *val, socklen_t valsize); }
+ SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); }
+ SYS_CHFLAGSAT = 107 // { int sys_chflagsat(int fd, const char *path, u_int flags, int atflags); }
+ SYS_PLEDGE = 108 // { int sys_pledge(const char *promises, const char *execpromises); }
+ SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); }
+ SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); }
+ SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); }
+ SYS_SENDSYSLOG = 112 // { int sys_sendsyslog(const char *buf, size_t nbyte, int flags); }
+ SYS_UNVEIL = 114 // { int sys_unveil(const char *path, const char *permissions); }
+ SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); }
+ SYS_THRKILL = 119 // { int sys_thrkill(pid_t tid, int signum, void *tcb); }
+ SYS_READV = 120 // { ssize_t sys_readv(int fd, const struct iovec *iovp, int iovcnt); }
+ SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, const struct iovec *iovp, int iovcnt); }
+ SYS_KILL = 122 // { int sys_kill(int pid, int signum); }
+ SYS_FCHOWN = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); }
+ SYS_FCHMOD = 124 // { int sys_fchmod(int fd, mode_t mode); }
+ SYS_SETREUID = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); }
+ SYS_SETREGID = 127 // { int sys_setregid(gid_t rgid, gid_t egid); }
+ SYS_RENAME = 128 // { int sys_rename(const char *from, const char *to); }
+ SYS_FLOCK = 131 // { int sys_flock(int fd, int how); }
+ SYS_MKFIFO = 132 // { int sys_mkfifo(const char *path, mode_t mode); }
+ SYS_SENDTO = 133 // { ssize_t sys_sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); }
+ SYS_SHUTDOWN = 134 // { int sys_shutdown(int s, int how); }
+ SYS_SOCKETPAIR = 135 // { int sys_socketpair(int domain, int type, int protocol, int *rsv); }
+ SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); }
+ SYS_RMDIR = 137 // { int sys_rmdir(const char *path); }
+ SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, struct timeval *olddelta); }
+ SYS_GETLOGIN_R = 141 // { int sys_getlogin_r(char *namebuf, u_int namelen); }
+ SYS_SETSID = 147 // { int sys_setsid(void); }
+ SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, int uid, char *arg); }
+ SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); }
+ SYS_GETFH = 161 // { int sys_getfh(const char *fname, fhandle_t *fhp); }
+ SYS_SYSARCH = 165 // { int sys_sysarch(int op, void *parms); }
+ SYS_PREAD = 173 // { ssize_t sys_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); }
+ SYS_PWRITE = 174 // { ssize_t sys_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); }
+ SYS_SETGID = 181 // { int sys_setgid(gid_t gid); }
+ SYS_SETEGID = 182 // { int sys_setegid(gid_t egid); }
+ SYS_SETEUID = 183 // { int sys_seteuid(uid_t euid); }
+ SYS_PATHCONF = 191 // { long sys_pathconf(const char *path, int name); }
+ SYS_FPATHCONF = 192 // { long sys_fpathconf(int fd, int name); }
+ SYS_SWAPCTL = 193 // { int sys_swapctl(int cmd, const void *arg, int misc); }
+ SYS_GETRLIMIT = 194 // { int sys_getrlimit(int which, struct rlimit *rlp); }
+ SYS_SETRLIMIT = 195 // { int sys_setrlimit(int which, const struct rlimit *rlp); }
+ SYS_MMAP = 197 // { void *sys_mmap(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); }
+ SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, int whence); }
+ SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, off_t length); }
+ SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); }
+ SYS_SYSCTL = 202 // { int sys_sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); }
+ SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); }
+ SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); }
+ SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); }
+ SYS_UTRACE = 209 // { int sys_utrace(const char *label, const void *addr, size_t len); }
+ SYS_SEMGET = 221 // { int sys_semget(key_t key, int nsems, int semflg); }
+ SYS_MSGGET = 225 // { int sys_msgget(key_t key, int msgflg); }
+ SYS_MSGSND = 226 // { int sys_msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); }
+ SYS_MSGRCV = 227 // { int sys_msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); }
+ SYS_SHMAT = 228 // { void *sys_shmat(int shmid, const void *shmaddr, int shmflg); }
+ SYS_SHMDT = 230 // { int sys_shmdt(const void *shmaddr); }
+ SYS_MINHERIT = 250 // { int sys_minherit(void *addr, size_t len, int inherit); }
+ SYS_POLL = 252 // { int sys_poll(struct pollfd *fds, u_int nfds, int timeout); }
+ SYS_ISSETUGID = 253 // { int sys_issetugid(void); }
+ SYS_LCHOWN = 254 // { int sys_lchown(const char *path, uid_t uid, gid_t gid); }
+ SYS_GETSID = 255 // { pid_t sys_getsid(pid_t pid); }
+ SYS_MSYNC = 256 // { int sys_msync(void *addr, size_t len, int flags); }
+ SYS_PIPE = 263 // { int sys_pipe(int *fdp); }
+ SYS_FHOPEN = 264 // { int sys_fhopen(const fhandle_t *fhp, int flags); }
+ SYS_PREADV = 267 // { ssize_t sys_preadv(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); }
+ SYS_PWRITEV = 268 // { ssize_t sys_pwritev(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); }
+ SYS_KQUEUE = 269 // { int sys_kqueue(void); }
+ SYS_MLOCKALL = 271 // { int sys_mlockall(int flags); }
+ SYS_MUNLOCKALL = 272 // { int sys_munlockall(void); }
+ SYS_GETRESUID = 281 // { int sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); }
+ SYS_SETRESUID = 282 // { int sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); }
+ SYS_GETRESGID = 283 // { int sys_getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); }
+ SYS_SETRESGID = 284 // { int sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); }
+ SYS_MQUERY = 286 // { void *sys_mquery(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); }
+ SYS_CLOSEFROM = 287 // { int sys_closefrom(int fd); }
+ SYS_SIGALTSTACK = 288 // { int sys_sigaltstack(const struct sigaltstack *nss, struct sigaltstack *oss); }
+ SYS_SHMGET = 289 // { int sys_shmget(key_t key, size_t size, int shmflg); }
+ SYS_SEMOP = 290 // { int sys_semop(int semid, struct sembuf *sops, size_t nsops); }
+ SYS_FHSTAT = 294 // { int sys_fhstat(const fhandle_t *fhp, struct stat *sb); }
+ SYS___SEMCTL = 295 // { int sys___semctl(int semid, int semnum, int cmd, union semun *arg); }
+ SYS_SHMCTL = 296 // { int sys_shmctl(int shmid, int cmd, struct shmid_ds *buf); }
+ SYS_MSGCTL = 297 // { int sys_msgctl(int msqid, int cmd, struct msqid_ds *buf); }
+ SYS_SCHED_YIELD = 298 // { int sys_sched_yield(void); }
+ SYS_GETTHRID = 299 // { pid_t sys_getthrid(void); }
+ SYS___THRWAKEUP = 301 // { int sys___thrwakeup(const volatile void *ident, int n); }
+ SYS___THREXIT = 302 // { void sys___threxit(pid_t *notdead); }
+ SYS___THRSIGDIVERT = 303 // { int sys___thrsigdivert(sigset_t sigmask, siginfo_t *info, const struct timespec *timeout); }
+ SYS___GETCWD = 304 // { int sys___getcwd(char *buf, size_t len); }
+ SYS_ADJFREQ = 305 // { int sys_adjfreq(const int64_t *freq, int64_t *oldfreq); }
+ SYS_SETRTABLE = 310 // { int sys_setrtable(int rtableid); }
+ SYS_GETRTABLE = 311 // { int sys_getrtable(void); }
+ SYS_FACCESSAT = 313 // { int sys_faccessat(int fd, const char *path, int amode, int flag); }
+ SYS_FCHMODAT = 314 // { int sys_fchmodat(int fd, const char *path, mode_t mode, int flag); }
+ SYS_FCHOWNAT = 315 // { int sys_fchownat(int fd, const char *path, uid_t uid, gid_t gid, int flag); }
+ SYS_LINKAT = 317 // { int sys_linkat(int fd1, const char *path1, int fd2, const char *path2, int flag); }
+ SYS_MKDIRAT = 318 // { int sys_mkdirat(int fd, const char *path, mode_t mode); }
+ SYS_MKFIFOAT = 319 // { int sys_mkfifoat(int fd, const char *path, mode_t mode); }
+ SYS_MKNODAT = 320 // { int sys_mknodat(int fd, const char *path, mode_t mode, dev_t dev); }
+ SYS_OPENAT = 321 // { int sys_openat(int fd, const char *path, int flags, ... mode_t mode); }
+ SYS_READLINKAT = 322 // { ssize_t sys_readlinkat(int fd, const char *path, char *buf, size_t count); }
+ SYS_RENAMEAT = 323 // { int sys_renameat(int fromfd, const char *from, int tofd, const char *to); }
+ SYS_SYMLINKAT = 324 // { int sys_symlinkat(const char *path, int fd, const char *link); }
+ SYS_UNLINKAT = 325 // { int sys_unlinkat(int fd, const char *path, int flag); }
+ SYS___SET_TCB = 329 // { void sys___set_tcb(void *tcb); }
+ SYS___GET_TCB = 330 // { void *sys___get_tcb(void); }
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go
index cedc9b0f2..2c1f815e6 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go
@@ -30,11 +30,6 @@ type Timespec struct {
Nsec int32
}
-type StTimespec struct {
- Sec int32
- Nsec int32
-}
-
type Timeval struct {
Sec int32
Usec int32
@@ -101,9 +96,9 @@ type Stat_t struct {
Gid uint32
Rdev uint32
Size int32
- Atim StTimespec
- Mtim StTimespec
- Ctim StTimespec
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
Blksize int32
Blocks int32
Vfstype int32
@@ -148,6 +143,17 @@ type RawSockaddrUnix struct {
Path [1023]uint8
}
+type RawSockaddrDatalink struct {
+ Len uint8
+ Family uint8
+ Index uint16
+ Type uint8
+ Nlen uint8
+ Alen uint8
+ Slen uint8
+ Data [120]uint8
+}
+
type RawSockaddr struct {
Len uint8
Family uint8
@@ -207,17 +213,18 @@ type Msghdr struct {
}
const (
- SizeofSockaddrInet4 = 0x10
- SizeofSockaddrInet6 = 0x1c
- SizeofSockaddrAny = 0x404
- SizeofSockaddrUnix = 0x401
- SizeofLinger = 0x8
- SizeofIPMreq = 0x8
- SizeofIPv6Mreq = 0x14
- SizeofIPv6MTUInfo = 0x20
- SizeofMsghdr = 0x1c
- SizeofCmsghdr = 0xc
- SizeofICMPv6Filter = 0x20
+ SizeofSockaddrInet4 = 0x10
+ SizeofSockaddrInet6 = 0x1c
+ SizeofSockaddrAny = 0x404
+ SizeofSockaddrUnix = 0x401
+ SizeofSockaddrDatalink = 0x80
+ SizeofLinger = 0x8
+ SizeofIPMreq = 0x8
+ SizeofIPv6Mreq = 0x14
+ SizeofIPv6MTUInfo = 0x20
+ SizeofMsghdr = 0x1c
+ SizeofCmsghdr = 0xc
+ SizeofICMPv6Filter = 0x20
)
const (
diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go
index f46482d27..b4a069ecb 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go
@@ -30,12 +30,6 @@ type Timespec struct {
Nsec int64
}
-type StTimespec struct {
- Sec int64
- Nsec int32
- _ [4]byte
-}
-
type Timeval struct {
Sec int64
Usec int32
@@ -103,10 +97,9 @@ type Stat_t struct {
Gid uint32
Rdev uint64
Ssize int32
- _ [4]byte
- Atim StTimespec
- Mtim StTimespec
- Ctim StTimespec
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
Blksize int64
Blocks int64
Vfstype int32
@@ -154,6 +147,17 @@ type RawSockaddrUnix struct {
Path [1023]uint8
}
+type RawSockaddrDatalink struct {
+ Len uint8
+ Family uint8
+ Index uint16
+ Type uint8
+ Nlen uint8
+ Alen uint8
+ Slen uint8
+ Data [120]uint8
+}
+
type RawSockaddr struct {
Len uint8
Family uint8
@@ -205,27 +209,26 @@ type Linger struct {
type Msghdr struct {
Name *byte
Namelen uint32
- _ [4]byte
Iov *Iovec
Iovlen int32
- _ [4]byte
Control *byte
Controllen uint32
Flags int32
}
const (
- SizeofSockaddrInet4 = 0x10
- SizeofSockaddrInet6 = 0x1c
- SizeofSockaddrAny = 0x404
- SizeofSockaddrUnix = 0x401
- SizeofLinger = 0x8
- SizeofIPMreq = 0x8
- SizeofIPv6Mreq = 0x14
- SizeofIPv6MTUInfo = 0x20
- SizeofMsghdr = 0x30
- SizeofCmsghdr = 0xc
- SizeofICMPv6Filter = 0x20
+ SizeofSockaddrInet4 = 0x10
+ SizeofSockaddrInet6 = 0x1c
+ SizeofSockaddrAny = 0x404
+ SizeofSockaddrUnix = 0x401
+ SizeofSockaddrDatalink = 0x80
+ SizeofLinger = 0x8
+ SizeofIPMreq = 0x8
+ SizeofIPv6Mreq = 0x14
+ SizeofIPv6MTUInfo = 0x20
+ SizeofMsghdr = 0x30
+ SizeofCmsghdr = 0xc
+ SizeofICMPv6Filter = 0x20
)
const (
@@ -339,7 +342,6 @@ type Statfs_t struct {
Ffree uint64
Fsid Fsid64_t
Vfstype int32
- _ [4]byte
Fsize uint64
Vfsnumber int32
Vfsoff int32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go
index 2aeb52a88..9f47b87c5 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go
@@ -59,24 +59,24 @@ type Rlimit struct {
type _Gid_t uint32
type Stat_t struct {
- Dev int32
- Mode uint16
- Nlink uint16
- Ino uint64
- Uid uint32
- Gid uint32
- Rdev int32
- Atimespec Timespec
- Mtimespec Timespec
- Ctimespec Timespec
- Birthtimespec Timespec
- Size int64
- Blocks int64
- Blksize int32
- Flags uint32
- Gen uint32
- Lspare int32
- Qspare [2]int64
+ Dev int32
+ Mode uint16
+ Nlink uint16
+ Ino uint64
+ Uid uint32
+ Gid uint32
+ Rdev int32
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Btim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint32
+ Lspare int32
+ Qspare [2]int64
}
type Statfs_t struct {
@@ -487,3 +487,13 @@ type Utsname struct {
Version [256]byte
Machine [256]byte
}
+
+const SizeofClockinfo = 0x14
+
+type Clockinfo struct {
+ Hz int32
+ Tick int32
+ Tickadj int32
+ Stathz int32
+ Profhz int32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
index 0d0d9f2cc..966798a87 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
@@ -63,25 +63,25 @@ type Rlimit struct {
type _Gid_t uint32
type Stat_t struct {
- Dev int32
- Mode uint16
- Nlink uint16
- Ino uint64
- Uid uint32
- Gid uint32
- Rdev int32
- _ [4]byte
- Atimespec Timespec
- Mtimespec Timespec
- Ctimespec Timespec
- Birthtimespec Timespec
- Size int64
- Blocks int64
- Blksize int32
- Flags uint32
- Gen uint32
- Lspare int32
- Qspare [2]int64
+ Dev int32
+ Mode uint16
+ Nlink uint16
+ Ino uint64
+ Uid uint32
+ Gid uint32
+ Rdev int32
+ _ [4]byte
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Btim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint32
+ Lspare int32
+ Qspare [2]int64
}
type Statfs_t struct {
@@ -497,3 +497,13 @@ type Utsname struct {
Version [256]byte
Machine [256]byte
}
+
+const SizeofClockinfo = 0x14
+
+type Clockinfo struct {
+ Hz int32
+ Tick int32
+ Tickadj int32
+ Stathz int32
+ Profhz int32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go
index 04e344b78..4fe4c9cd7 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go
@@ -60,24 +60,24 @@ type Rlimit struct {
type _Gid_t uint32
type Stat_t struct {
- Dev int32
- Mode uint16
- Nlink uint16
- Ino uint64
- Uid uint32
- Gid uint32
- Rdev int32
- Atimespec Timespec
- Mtimespec Timespec
- Ctimespec Timespec
- Birthtimespec Timespec
- Size int64
- Blocks int64
- Blksize int32
- Flags uint32
- Gen uint32
- Lspare int32
- Qspare [2]int64
+ Dev int32
+ Mode uint16
+ Nlink uint16
+ Ino uint64
+ Uid uint32
+ Gid uint32
+ Rdev int32
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Btim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint32
+ Lspare int32
+ Qspare [2]int64
}
type Statfs_t struct {
@@ -488,3 +488,13 @@ type Utsname struct {
Version [256]byte
Machine [256]byte
}
+
+const SizeofClockinfo = 0x14
+
+type Clockinfo struct {
+ Hz int32
+ Tick int32
+ Tickadj int32
+ Stathz int32
+ Profhz int32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
index 9fec185c1..21999e4b0 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
@@ -63,25 +63,25 @@ type Rlimit struct {
type _Gid_t uint32
type Stat_t struct {
- Dev int32
- Mode uint16
- Nlink uint16
- Ino uint64
- Uid uint32
- Gid uint32
- Rdev int32
- _ [4]byte
- Atimespec Timespec
- Mtimespec Timespec
- Ctimespec Timespec
- Birthtimespec Timespec
- Size int64
- Blocks int64
- Blksize int32
- Flags uint32
- Gen uint32
- Lspare int32
- Qspare [2]int64
+ Dev int32
+ Mode uint16
+ Nlink uint16
+ Ino uint64
+ Uid uint32
+ Gid uint32
+ Rdev int32
+ _ [4]byte
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Btim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint32
+ Lspare int32
+ Qspare [2]int64
}
type Statfs_t struct {
@@ -497,3 +497,13 @@ type Utsname struct {
Version [256]byte
Machine [256]byte
}
+
+const SizeofClockinfo = 0x14
+
+type Clockinfo struct {
+ Hz int32
+ Tick int32
+ Tickadj int32
+ Stathz int32
+ Profhz int32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go
index 7b34e2e2c..c206f2b05 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go
@@ -57,25 +57,25 @@ type Rlimit struct {
type _Gid_t uint32
type Stat_t struct {
- Ino uint64
- Nlink uint32
- Dev uint32
- Mode uint16
- Padding1 uint16
- Uid uint32
- Gid uint32
- Rdev uint32
- Atim Timespec
- Mtim Timespec
- Ctim Timespec
- Size int64
- Blocks int64
- Blksize uint32
- Flags uint32
- Gen uint32
- Lspare int32
- Qspare1 int64
- Qspare2 int64
+ Ino uint64
+ Nlink uint32
+ Dev uint32
+ Mode uint16
+ _1 uint16
+ Uid uint32
+ Gid uint32
+ Rdev uint32
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Size int64
+ Blocks int64
+ Blksize uint32
+ Flags uint32
+ Gen uint32
+ Lspare int32
+ Qspare1 int64
+ Qspare2 int64
}
type Statfs_t struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
index c146c1ad3..7312e95ff 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
@@ -62,50 +62,50 @@ const (
)
type Stat_t struct {
- Dev uint64
- Ino uint64
- Nlink uint64
- Mode uint16
- _0 int16
- Uid uint32
- Gid uint32
- _1 int32
- Rdev uint64
- Atim_ext int32
- Atim Timespec
- Mtim_ext int32
- Mtim Timespec
- Ctim_ext int32
- Ctim Timespec
- Btim_ext int32
- Birthtim Timespec
- Size int64
- Blocks int64
- Blksize int32
- Flags uint32
- Gen uint64
- Spare [10]uint64
+ Dev uint64
+ Ino uint64
+ Nlink uint64
+ Mode uint16
+ _0 int16
+ Uid uint32
+ Gid uint32
+ _1 int32
+ Rdev uint64
+ _ int32
+ Atim Timespec
+ _ int32
+ Mtim Timespec
+ _ int32
+ Ctim Timespec
+ _ int32
+ Btim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint64
+ Spare [10]uint64
}
type stat_freebsd11_t struct {
- Dev uint32
- Ino uint32
- Mode uint16
- Nlink uint16
- Uid uint32
- Gid uint32
- Rdev uint32
- Atim Timespec
- Mtim Timespec
- Ctim Timespec
- Size int64
- Blocks int64
- Blksize int32
- Flags uint32
- Gen uint32
- Lspare int32
- Birthtim Timespec
- _ [8]byte
+ Dev uint32
+ Ino uint32
+ Mode uint16
+ Nlink uint16
+ Uid uint32
+ Gid uint32
+ Rdev uint32
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint32
+ Lspare int32
+ Btim Timespec
+ _ [8]byte
}
type Statfs_t struct {
@@ -324,11 +324,108 @@ const (
)
const (
- PTRACE_TRACEME = 0x0
- PTRACE_CONT = 0x7
- PTRACE_KILL = 0x8
+ PTRACE_ATTACH = 0xa
+ PTRACE_CONT = 0x7
+ PTRACE_DETACH = 0xb
+ PTRACE_GETFPREGS = 0x23
+ PTRACE_GETFSBASE = 0x47
+ PTRACE_GETLWPLIST = 0xf
+ PTRACE_GETNUMLWPS = 0xe
+ PTRACE_GETREGS = 0x21
+ PTRACE_GETXSTATE = 0x45
+ PTRACE_IO = 0xc
+ PTRACE_KILL = 0x8
+ PTRACE_LWPEVENTS = 0x18
+ PTRACE_LWPINFO = 0xd
+ PTRACE_SETFPREGS = 0x24
+ PTRACE_SETREGS = 0x22
+ PTRACE_SINGLESTEP = 0x9
+ PTRACE_TRACEME = 0x0
+)
+
+const (
+ PIOD_READ_D = 0x1
+ PIOD_WRITE_D = 0x2
+ PIOD_READ_I = 0x3
+ PIOD_WRITE_I = 0x4
+)
+
+const (
+ PL_FLAG_BORN = 0x100
+ PL_FLAG_EXITED = 0x200
+ PL_FLAG_SI = 0x20
+)
+
+const (
+ TRAP_BRKPT = 0x1
+ TRAP_TRACE = 0x2
)
+type PtraceLwpInfoStruct struct {
+ Lwpid int32
+ Event int32
+ Flags int32
+ Sigmask Sigset_t
+ Siglist Sigset_t
+ Siginfo __Siginfo
+ Tdname [20]int8
+ Child_pid int32
+ Syscall_code uint32
+ Syscall_narg uint32
+}
+
+type __Siginfo struct {
+ Signo int32
+ Errno int32
+ Code int32
+ Pid int32
+ Uid uint32
+ Status int32
+ Addr *byte
+ Value [4]byte
+ X_reason [32]byte
+}
+
+type Sigset_t struct {
+ Val [4]uint32
+}
+
+type Reg struct {
+ Fs uint32
+ Es uint32
+ Ds uint32
+ Edi uint32
+ Esi uint32
+ Ebp uint32
+ Isp uint32
+ Ebx uint32
+ Edx uint32
+ Ecx uint32
+ Eax uint32
+ Trapno uint32
+ Err uint32
+ Eip uint32
+ Cs uint32
+ Eflags uint32
+ Esp uint32
+ Ss uint32
+ Gs uint32
+}
+
+type FpReg struct {
+ Env [7]uint32
+ Acc [8][10]uint8
+ Ex_sw uint32
+ Pad [64]uint8
+}
+
+type PtraceIoDesc struct {
+ Op int32
+ Offs *byte
+ Addr *byte
+ Len uint
+}
+
type Kevent_t struct {
Ident uint32
Filter int16
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
index ac33a8dd4..29ba2f5bf 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
@@ -62,45 +62,45 @@ const (
)
type Stat_t struct {
- Dev uint64
- Ino uint64
- Nlink uint64
- Mode uint16
- _0 int16
- Uid uint32
- Gid uint32
- _1 int32
- Rdev uint64
- Atim Timespec
- Mtim Timespec
- Ctim Timespec
- Birthtim Timespec
- Size int64
- Blocks int64
- Blksize int32
- Flags uint32
- Gen uint64
- Spare [10]uint64
+ Dev uint64
+ Ino uint64
+ Nlink uint64
+ Mode uint16
+ _0 int16
+ Uid uint32
+ Gid uint32
+ _1 int32
+ Rdev uint64
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Btim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint64
+ Spare [10]uint64
}
type stat_freebsd11_t struct {
- Dev uint32
- Ino uint32
- Mode uint16
- Nlink uint16
- Uid uint32
- Gid uint32
- Rdev uint32
- Atim Timespec
- Mtim Timespec
- Ctim Timespec
- Size int64
- Blocks int64
- Blksize int32
- Flags uint32
- Gen uint32
- Lspare int32
- Birthtim Timespec
+ Dev uint32
+ Ino uint32
+ Mode uint16
+ Nlink uint16
+ Uid uint32
+ Gid uint32
+ Rdev uint32
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint32
+ Lspare int32
+ Btim Timespec
}
type Statfs_t struct {
@@ -322,11 +322,115 @@ const (
)
const (
- PTRACE_TRACEME = 0x0
- PTRACE_CONT = 0x7
- PTRACE_KILL = 0x8
+ PTRACE_ATTACH = 0xa
+ PTRACE_CONT = 0x7
+ PTRACE_DETACH = 0xb
+ PTRACE_GETFPREGS = 0x23
+ PTRACE_GETFSBASE = 0x47
+ PTRACE_GETLWPLIST = 0xf
+ PTRACE_GETNUMLWPS = 0xe
+ PTRACE_GETREGS = 0x21
+ PTRACE_GETXSTATE = 0x45
+ PTRACE_IO = 0xc
+ PTRACE_KILL = 0x8
+ PTRACE_LWPEVENTS = 0x18
+ PTRACE_LWPINFO = 0xd
+ PTRACE_SETFPREGS = 0x24
+ PTRACE_SETREGS = 0x22
+ PTRACE_SINGLESTEP = 0x9
+ PTRACE_TRACEME = 0x0
)
+const (
+ PIOD_READ_D = 0x1
+ PIOD_WRITE_D = 0x2
+ PIOD_READ_I = 0x3
+ PIOD_WRITE_I = 0x4
+)
+
+const (
+ PL_FLAG_BORN = 0x100
+ PL_FLAG_EXITED = 0x200
+ PL_FLAG_SI = 0x20
+)
+
+const (
+ TRAP_BRKPT = 0x1
+ TRAP_TRACE = 0x2
+)
+
+type PtraceLwpInfoStruct struct {
+ Lwpid int32
+ Event int32
+ Flags int32
+ Sigmask Sigset_t
+ Siglist Sigset_t
+ Siginfo __Siginfo
+ Tdname [20]int8
+ Child_pid int32
+ Syscall_code uint32
+ Syscall_narg uint32
+}
+
+type __Siginfo struct {
+ Signo int32
+ Errno int32
+ Code int32
+ Pid int32
+ Uid uint32
+ Status int32
+ Addr *byte
+ Value [8]byte
+ _ [40]byte
+}
+
+type Sigset_t struct {
+ Val [4]uint32
+}
+
+type Reg struct {
+ R15 int64
+ R14 int64
+ R13 int64
+ R12 int64
+ R11 int64
+ R10 int64
+ R9 int64
+ R8 int64
+ Rdi int64
+ Rsi int64
+ Rbp int64
+ Rbx int64
+ Rdx int64
+ Rcx int64
+ Rax int64
+ Trapno uint32
+ Fs uint16
+ Gs uint16
+ Err uint32
+ Es uint16
+ Ds uint16
+ Rip int64
+ Cs int64
+ Rflags int64
+ Rsp int64
+ Ss int64
+}
+
+type FpReg struct {
+ Env [4]uint64
+ Acc [8][16]uint8
+ Xacc [16][16]uint8
+ Spare [12]uint64
+}
+
+type PtraceIoDesc struct {
+ Op int32
+ Offs *byte
+ Addr *byte
+ Len uint
+}
+
type Kevent_t struct {
Ident uint64
Filter int16
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
index e27511a64..b4090ef31 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
@@ -64,45 +64,45 @@ const (
)
type Stat_t struct {
- Dev uint64
- Ino uint64
- Nlink uint64
- Mode uint16
- _0 int16
- Uid uint32
- Gid uint32
- _1 int32
- Rdev uint64
- Atim Timespec
- Mtim Timespec
- Ctim Timespec
- Birthtim Timespec
- Size int64
- Blocks int64
- Blksize int32
- Flags uint32
- Gen uint64
- Spare [10]uint64
+ Dev uint64
+ Ino uint64
+ Nlink uint64
+ Mode uint16
+ _0 int16
+ Uid uint32
+ Gid uint32
+ _1 int32
+ Rdev uint64
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Btim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint64
+ Spare [10]uint64
}
type stat_freebsd11_t struct {
- Dev uint32
- Ino uint32
- Mode uint16
- Nlink uint16
- Uid uint32
- Gid uint32
- Rdev uint32
- Atim Timespec
- Mtim Timespec
- Ctim Timespec
- Size int64
- Blocks int64
- Blksize int32
- Flags uint32
- Gen uint32
- Lspare int32
- Birthtim Timespec
+ Dev uint32
+ Ino uint32
+ Mode uint16
+ Nlink uint16
+ Uid uint32
+ Gid uint32
+ Rdev uint32
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint32
+ Lspare int32
+ Btim Timespec
}
type Statfs_t struct {
@@ -322,11 +322,92 @@ const (
)
const (
- PTRACE_TRACEME = 0x0
- PTRACE_CONT = 0x7
- PTRACE_KILL = 0x8
+ PTRACE_ATTACH = 0xa
+ PTRACE_CONT = 0x7
+ PTRACE_DETACH = 0xb
+ PTRACE_GETFPREGS = 0x23
+ PTRACE_GETFSBASE = 0x47
+ PTRACE_GETLWPLIST = 0xf
+ PTRACE_GETNUMLWPS = 0xe
+ PTRACE_GETREGS = 0x21
+ PTRACE_GETXSTATE = 0x45
+ PTRACE_IO = 0xc
+ PTRACE_KILL = 0x8
+ PTRACE_LWPEVENTS = 0x18
+ PTRACE_LWPINFO = 0xd
+ PTRACE_SETFPREGS = 0x24
+ PTRACE_SETREGS = 0x22
+ PTRACE_SINGLESTEP = 0x9
+ PTRACE_TRACEME = 0x0
+)
+
+const (
+ PIOD_READ_D = 0x1
+ PIOD_WRITE_D = 0x2
+ PIOD_READ_I = 0x3
+ PIOD_WRITE_I = 0x4
+)
+
+const (
+ PL_FLAG_BORN = 0x100
+ PL_FLAG_EXITED = 0x200
+ PL_FLAG_SI = 0x20
+)
+
+const (
+ TRAP_BRKPT = 0x1
+ TRAP_TRACE = 0x2
)
+type PtraceLwpInfoStruct struct {
+ Lwpid int32
+ Event int32
+ Flags int32
+ Sigmask Sigset_t
+ Siglist Sigset_t
+ Siginfo __Siginfo
+ Tdname [20]int8
+ Child_pid int32
+ Syscall_code uint32
+ Syscall_narg uint32
+}
+
+type __Siginfo struct {
+ Signo int32
+ Errno int32
+ Code int32
+ Pid int32
+ Uid uint32
+ Status int32
+ Addr *byte
+ Value [4]byte
+ X_reason [32]byte
+}
+
+type Sigset_t struct {
+ Val [4]uint32
+}
+
+type Reg struct {
+ R [13]uint32
+ R_sp uint32
+ R_lr uint32
+ R_pc uint32
+ R_cpsr uint32
+}
+
+type FpReg struct {
+ Fpr_fpsr uint32
+ Fpr [8][3]uint32
+}
+
+type PtraceIoDesc struct {
+ Op int32
+ Offs *byte
+ Addr *byte
+ Len uint
+}
+
type Kevent_t struct {
Ident uint32
Filter int16
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
index 2aadc1a4d..1542a8773 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
@@ -62,45 +62,45 @@ const (
)
type Stat_t struct {
- Dev uint64
- Ino uint64
- Nlink uint64
- Mode uint16
- _0 int16
- Uid uint32
- Gid uint32
- _1 int32
- Rdev uint64
- Atim Timespec
- Mtim Timespec
- Ctim Timespec
- Birthtim Timespec
- Size int64
- Blocks int64
- Blksize int32
- Flags uint32
- Gen uint64
- Spare [10]uint64
+ Dev uint64
+ Ino uint64
+ Nlink uint64
+ Mode uint16
+ _0 int16
+ Uid uint32
+ Gid uint32
+ _1 int32
+ Rdev uint64
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Btim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint64
+ Spare [10]uint64
}
type stat_freebsd11_t struct {
- Dev uint32
- Ino uint32
- Mode uint16
- Nlink uint16
- Uid uint32
- Gid uint32
- Rdev uint32
- Atim Timespec
- Mtim Timespec
- Ctim Timespec
- Size int64
- Blocks int64
- Blksize int32
- Flags uint32
- Gen uint32
- Lspare int32
- Birthtim Timespec
+ Dev uint32
+ Ino uint32
+ Mode uint16
+ Nlink uint16
+ Uid uint32
+ Gid uint32
+ Rdev uint32
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint32
+ Lspare int32
+ Btim Timespec
}
type Statfs_t struct {
@@ -322,11 +322,93 @@ const (
)
const (
- PTRACE_TRACEME = 0x0
- PTRACE_CONT = 0x7
- PTRACE_KILL = 0x8
+ PTRACE_ATTACH = 0xa
+ PTRACE_CONT = 0x7
+ PTRACE_DETACH = 0xb
+ PTRACE_GETFPREGS = 0x23
+ PTRACE_GETFSBASE = 0x47
+ PTRACE_GETLWPLIST = 0xf
+ PTRACE_GETNUMLWPS = 0xe
+ PTRACE_GETREGS = 0x21
+ PTRACE_GETXSTATE = 0x45
+ PTRACE_IO = 0xc
+ PTRACE_KILL = 0x8
+ PTRACE_LWPEVENTS = 0x18
+ PTRACE_LWPINFO = 0xd
+ PTRACE_SETFPREGS = 0x24
+ PTRACE_SETREGS = 0x22
+ PTRACE_SINGLESTEP = 0x9
+ PTRACE_TRACEME = 0x0
+)
+
+const (
+ PIOD_READ_D = 0x1
+ PIOD_WRITE_D = 0x2
+ PIOD_READ_I = 0x3
+ PIOD_WRITE_I = 0x4
+)
+
+const (
+ PL_FLAG_BORN = 0x100
+ PL_FLAG_EXITED = 0x200
+ PL_FLAG_SI = 0x20
+)
+
+const (
+ TRAP_BRKPT = 0x1
+ TRAP_TRACE = 0x2
)
+type PtraceLwpInfoStruct struct {
+ Lwpid int32
+ Event int32
+ Flags int32
+ Sigmask Sigset_t
+ Siglist Sigset_t
+ Siginfo __Siginfo
+ Tdname [20]int8
+ Child_pid int32
+ Syscall_code uint32
+ Syscall_narg uint32
+}
+
+type __Siginfo struct {
+ Signo int32
+ Errno int32
+ Code int32
+ Pid int32
+ Uid uint32
+ Status int32
+ Addr *byte
+ Value [8]byte
+ X_reason [40]byte
+}
+
+type Sigset_t struct {
+ Val [4]uint32
+}
+
+type Reg struct {
+ X [30]uint64
+ Lr uint64
+ Sp uint64
+ Elr uint64
+ Spsr uint32
+}
+
+type FpReg struct {
+ Fp_q [32]uint128
+ Fp_sr uint32
+ Fp_cr uint32
+}
+
+type PtraceIoDesc struct {
+ Op int32
+ Offs *byte
+ Addr *byte
+ Len uint
+}
+
type Kevent_t struct {
Ident uint64
Filter int16
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index ebf10d48d..5492b9666 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -405,6 +405,11 @@ type TCPInfo struct {
Total_retrans uint32
}
+type CanFilter struct {
+ Id uint32
+ Mask uint32
+}
+
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
@@ -434,141 +439,185 @@ const (
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
SizeofTCPInfo = 0x68
+ SizeofCanFilter = 0x8
)
const (
- IFA_UNSPEC = 0x0
- IFA_ADDRESS = 0x1
- IFA_LOCAL = 0x2
- IFA_LABEL = 0x3
- IFA_BROADCAST = 0x4
- IFA_ANYCAST = 0x5
- IFA_CACHEINFO = 0x6
- IFA_MULTICAST = 0x7
- IFLA_UNSPEC = 0x0
- IFLA_ADDRESS = 0x1
- IFLA_BROADCAST = 0x2
- IFLA_IFNAME = 0x3
- IFLA_INFO_KIND = 0x1
- IFLA_MTU = 0x4
- IFLA_LINK = 0x5
- IFLA_QDISC = 0x6
- IFLA_STATS = 0x7
- IFLA_COST = 0x8
- IFLA_PRIORITY = 0x9
- IFLA_MASTER = 0xa
- IFLA_WIRELESS = 0xb
- IFLA_PROTINFO = 0xc
- IFLA_TXQLEN = 0xd
- IFLA_MAP = 0xe
- IFLA_WEIGHT = 0xf
- IFLA_OPERSTATE = 0x10
- IFLA_LINKMODE = 0x11
- IFLA_LINKINFO = 0x12
- IFLA_NET_NS_PID = 0x13
- IFLA_IFALIAS = 0x14
- IFLA_NUM_VF = 0x15
- IFLA_VFINFO_LIST = 0x16
- IFLA_STATS64 = 0x17
- IFLA_VF_PORTS = 0x18
- IFLA_PORT_SELF = 0x19
- IFLA_AF_SPEC = 0x1a
- IFLA_GROUP = 0x1b
- IFLA_NET_NS_FD = 0x1c
- IFLA_EXT_MASK = 0x1d
- IFLA_PROMISCUITY = 0x1e
- IFLA_NUM_TX_QUEUES = 0x1f
- IFLA_NUM_RX_QUEUES = 0x20
- IFLA_CARRIER = 0x21
- IFLA_PHYS_PORT_ID = 0x22
- IFLA_CARRIER_CHANGES = 0x23
- IFLA_PHYS_SWITCH_ID = 0x24
- IFLA_LINK_NETNSID = 0x25
- IFLA_PHYS_PORT_NAME = 0x26
- IFLA_PROTO_DOWN = 0x27
- IFLA_GSO_MAX_SEGS = 0x28
- IFLA_GSO_MAX_SIZE = 0x29
- IFLA_PAD = 0x2a
- IFLA_XDP = 0x2b
- IFLA_EVENT = 0x2c
- IFLA_NEW_NETNSID = 0x2d
- IFLA_IF_NETNSID = 0x2e
- IFLA_MAX = 0x33
- RT_SCOPE_UNIVERSE = 0x0
- RT_SCOPE_SITE = 0xc8
- RT_SCOPE_LINK = 0xfd
- RT_SCOPE_HOST = 0xfe
- RT_SCOPE_NOWHERE = 0xff
- RT_TABLE_UNSPEC = 0x0
- RT_TABLE_COMPAT = 0xfc
- RT_TABLE_DEFAULT = 0xfd
- RT_TABLE_MAIN = 0xfe
- RT_TABLE_LOCAL = 0xff
- RT_TABLE_MAX = 0xffffffff
- RTA_UNSPEC = 0x0
- RTA_DST = 0x1
- RTA_SRC = 0x2
- RTA_IIF = 0x3
- RTA_OIF = 0x4
- RTA_GATEWAY = 0x5
- RTA_PRIORITY = 0x6
- RTA_PREFSRC = 0x7
- RTA_METRICS = 0x8
- RTA_MULTIPATH = 0x9
- RTA_FLOW = 0xb
- RTA_CACHEINFO = 0xc
- RTA_TABLE = 0xf
- RTA_MARK = 0x10
- RTA_MFC_STATS = 0x11
- RTA_VIA = 0x12
- RTA_NEWDST = 0x13
- RTA_PREF = 0x14
- RTA_ENCAP_TYPE = 0x15
- RTA_ENCAP = 0x16
- RTA_EXPIRES = 0x17
- RTA_PAD = 0x18
- RTA_UID = 0x19
- RTA_TTL_PROPAGATE = 0x1a
- RTA_IP_PROTO = 0x1b
- RTA_SPORT = 0x1c
- RTA_DPORT = 0x1d
- RTN_UNSPEC = 0x0
- RTN_UNICAST = 0x1
- RTN_LOCAL = 0x2
- RTN_BROADCAST = 0x3
- RTN_ANYCAST = 0x4
- RTN_MULTICAST = 0x5
- RTN_BLACKHOLE = 0x6
- RTN_UNREACHABLE = 0x7
- RTN_PROHIBIT = 0x8
- RTN_THROW = 0x9
- RTN_NAT = 0xa
- RTN_XRESOLVE = 0xb
- RTNLGRP_NONE = 0x0
- RTNLGRP_LINK = 0x1
- RTNLGRP_NOTIFY = 0x2
- RTNLGRP_NEIGH = 0x3
- RTNLGRP_TC = 0x4
- RTNLGRP_IPV4_IFADDR = 0x5
- RTNLGRP_IPV4_MROUTE = 0x6
- RTNLGRP_IPV4_ROUTE = 0x7
- RTNLGRP_IPV4_RULE = 0x8
- RTNLGRP_IPV6_IFADDR = 0x9
- RTNLGRP_IPV6_MROUTE = 0xa
- RTNLGRP_IPV6_ROUTE = 0xb
- RTNLGRP_IPV6_IFINFO = 0xc
- RTNLGRP_IPV6_PREFIX = 0x12
- RTNLGRP_IPV6_RULE = 0x13
- RTNLGRP_ND_USEROPT = 0x14
- SizeofNlMsghdr = 0x10
- SizeofNlMsgerr = 0x14
- SizeofRtGenmsg = 0x1
- SizeofNlAttr = 0x4
- SizeofRtAttr = 0x4
- SizeofIfInfomsg = 0x10
- SizeofIfAddrmsg = 0x8
- SizeofRtMsg = 0xc
- SizeofRtNexthop = 0x8
+ NDA_UNSPEC = 0x0
+ NDA_DST = 0x1
+ NDA_LLADDR = 0x2
+ NDA_CACHEINFO = 0x3
+ NDA_PROBES = 0x4
+ NDA_VLAN = 0x5
+ NDA_PORT = 0x6
+ NDA_VNI = 0x7
+ NDA_IFINDEX = 0x8
+ NDA_MASTER = 0x9
+ NDA_LINK_NETNSID = 0xa
+ NDA_SRC_VNI = 0xb
+ NTF_USE = 0x1
+ NTF_SELF = 0x2
+ NTF_MASTER = 0x4
+ NTF_PROXY = 0x8
+ NTF_EXT_LEARNED = 0x10
+ NTF_OFFLOADED = 0x20
+ NTF_ROUTER = 0x80
+ NUD_INCOMPLETE = 0x1
+ NUD_REACHABLE = 0x2
+ NUD_STALE = 0x4
+ NUD_DELAY = 0x8
+ NUD_PROBE = 0x10
+ NUD_FAILED = 0x20
+ NUD_NOARP = 0x40
+ NUD_PERMANENT = 0x80
+ NUD_NONE = 0x0
+ IFA_UNSPEC = 0x0
+ IFA_ADDRESS = 0x1
+ IFA_LOCAL = 0x2
+ IFA_LABEL = 0x3
+ IFA_BROADCAST = 0x4
+ IFA_ANYCAST = 0x5
+ IFA_CACHEINFO = 0x6
+ IFA_MULTICAST = 0x7
+ IFA_FLAGS = 0x8
+ IFA_RT_PRIORITY = 0x9
+ IFA_TARGET_NETNSID = 0xa
+ IFLA_UNSPEC = 0x0
+ IFLA_ADDRESS = 0x1
+ IFLA_BROADCAST = 0x2
+ IFLA_IFNAME = 0x3
+ IFLA_MTU = 0x4
+ IFLA_LINK = 0x5
+ IFLA_QDISC = 0x6
+ IFLA_STATS = 0x7
+ IFLA_COST = 0x8
+ IFLA_PRIORITY = 0x9
+ IFLA_MASTER = 0xa
+ IFLA_WIRELESS = 0xb
+ IFLA_PROTINFO = 0xc
+ IFLA_TXQLEN = 0xd
+ IFLA_MAP = 0xe
+ IFLA_WEIGHT = 0xf
+ IFLA_OPERSTATE = 0x10
+ IFLA_LINKMODE = 0x11
+ IFLA_LINKINFO = 0x12
+ IFLA_NET_NS_PID = 0x13
+ IFLA_IFALIAS = 0x14
+ IFLA_NUM_VF = 0x15
+ IFLA_VFINFO_LIST = 0x16
+ IFLA_STATS64 = 0x17
+ IFLA_VF_PORTS = 0x18
+ IFLA_PORT_SELF = 0x19
+ IFLA_AF_SPEC = 0x1a
+ IFLA_GROUP = 0x1b
+ IFLA_NET_NS_FD = 0x1c
+ IFLA_EXT_MASK = 0x1d
+ IFLA_PROMISCUITY = 0x1e
+ IFLA_NUM_TX_QUEUES = 0x1f
+ IFLA_NUM_RX_QUEUES = 0x20
+ IFLA_CARRIER = 0x21
+ IFLA_PHYS_PORT_ID = 0x22
+ IFLA_CARRIER_CHANGES = 0x23
+ IFLA_PHYS_SWITCH_ID = 0x24
+ IFLA_LINK_NETNSID = 0x25
+ IFLA_PHYS_PORT_NAME = 0x26
+ IFLA_PROTO_DOWN = 0x27
+ IFLA_GSO_MAX_SEGS = 0x28
+ IFLA_GSO_MAX_SIZE = 0x29
+ IFLA_PAD = 0x2a
+ IFLA_XDP = 0x2b
+ IFLA_EVENT = 0x2c
+ IFLA_NEW_NETNSID = 0x2d
+ IFLA_IF_NETNSID = 0x2e
+ IFLA_TARGET_NETNSID = 0x2e
+ IFLA_CARRIER_UP_COUNT = 0x2f
+ IFLA_CARRIER_DOWN_COUNT = 0x30
+ IFLA_NEW_IFINDEX = 0x31
+ IFLA_MIN_MTU = 0x32
+ IFLA_MAX_MTU = 0x33
+ IFLA_MAX = 0x33
+ IFLA_INFO_KIND = 0x1
+ IFLA_INFO_DATA = 0x2
+ IFLA_INFO_XSTATS = 0x3
+ IFLA_INFO_SLAVE_KIND = 0x4
+ IFLA_INFO_SLAVE_DATA = 0x5
+ RT_SCOPE_UNIVERSE = 0x0
+ RT_SCOPE_SITE = 0xc8
+ RT_SCOPE_LINK = 0xfd
+ RT_SCOPE_HOST = 0xfe
+ RT_SCOPE_NOWHERE = 0xff
+ RT_TABLE_UNSPEC = 0x0
+ RT_TABLE_COMPAT = 0xfc
+ RT_TABLE_DEFAULT = 0xfd
+ RT_TABLE_MAIN = 0xfe
+ RT_TABLE_LOCAL = 0xff
+ RT_TABLE_MAX = 0xffffffff
+ RTA_UNSPEC = 0x0
+ RTA_DST = 0x1
+ RTA_SRC = 0x2
+ RTA_IIF = 0x3
+ RTA_OIF = 0x4
+ RTA_GATEWAY = 0x5
+ RTA_PRIORITY = 0x6
+ RTA_PREFSRC = 0x7
+ RTA_METRICS = 0x8
+ RTA_MULTIPATH = 0x9
+ RTA_FLOW = 0xb
+ RTA_CACHEINFO = 0xc
+ RTA_TABLE = 0xf
+ RTA_MARK = 0x10
+ RTA_MFC_STATS = 0x11
+ RTA_VIA = 0x12
+ RTA_NEWDST = 0x13
+ RTA_PREF = 0x14
+ RTA_ENCAP_TYPE = 0x15
+ RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
+ RTN_UNSPEC = 0x0
+ RTN_UNICAST = 0x1
+ RTN_LOCAL = 0x2
+ RTN_BROADCAST = 0x3
+ RTN_ANYCAST = 0x4
+ RTN_MULTICAST = 0x5
+ RTN_BLACKHOLE = 0x6
+ RTN_UNREACHABLE = 0x7
+ RTN_PROHIBIT = 0x8
+ RTN_THROW = 0x9
+ RTN_NAT = 0xa
+ RTN_XRESOLVE = 0xb
+ RTNLGRP_NONE = 0x0
+ RTNLGRP_LINK = 0x1
+ RTNLGRP_NOTIFY = 0x2
+ RTNLGRP_NEIGH = 0x3
+ RTNLGRP_TC = 0x4
+ RTNLGRP_IPV4_IFADDR = 0x5
+ RTNLGRP_IPV4_MROUTE = 0x6
+ RTNLGRP_IPV4_ROUTE = 0x7
+ RTNLGRP_IPV4_RULE = 0x8
+ RTNLGRP_IPV6_IFADDR = 0x9
+ RTNLGRP_IPV6_MROUTE = 0xa
+ RTNLGRP_IPV6_ROUTE = 0xb
+ RTNLGRP_IPV6_IFINFO = 0xc
+ RTNLGRP_IPV6_PREFIX = 0x12
+ RTNLGRP_IPV6_RULE = 0x13
+ RTNLGRP_ND_USEROPT = 0x14
+ SizeofNlMsghdr = 0x10
+ SizeofNlMsgerr = 0x14
+ SizeofRtGenmsg = 0x1
+ SizeofNlAttr = 0x4
+ SizeofRtAttr = 0x4
+ SizeofIfInfomsg = 0x10
+ SizeofIfAddrmsg = 0x8
+ SizeofRtMsg = 0xc
+ SizeofRtNexthop = 0x8
+ SizeofNdUseroptmsg = 0x10
+ SizeofNdMsg = 0xc
)
type NlMsghdr struct {
@@ -634,6 +683,27 @@ type RtNexthop struct {
Ifindex int32
}
+type NdUseroptmsg struct {
+ Family uint8
+ Pad1 uint8
+ Opts_len uint16
+ Ifindex int32
+ Icmp_type uint8
+ Icmp_code uint8
+ Pad2 uint16
+ Pad3 uint32
+}
+
+type NdMsg struct {
+ Family uint8
+ Pad1 uint8
+ Pad2 uint16
+ Ifindex int32
+ State uint16
+ Flags uint8
+ Type uint8
+}
+
const (
SizeofSockFilter = 0x8
SizeofSockFprog = 0x8
@@ -759,7 +829,32 @@ type Sigset_t struct {
Val [32]uint32
}
-const RNDGETENTCNT = 0x80045200
+const _C__NSIG = 0x41
+
+type SignalfdSiginfo struct {
+ Signo uint32
+ Errno int32
+ Code int32
+ Pid uint32
+ Uid uint32
+ Fd int32
+ Tid uint32
+ Band uint32
+ Overrun uint32
+ Trapno uint32
+ Status int32
+ Int int32
+ Ptr uint64
+ Utime uint64
+ Stime uint64
+ Addr uint64
+ Addr_lsb uint16
+ _ uint16
+ Syscall int32
+ Call_addr uint64
+ Arch uint32
+ _ [28]uint8
+}
const PERF_IOC_FLAG_GROUP = 0x1
@@ -932,7 +1027,8 @@ type PerfEventAttr struct {
Clockid int32
Sample_regs_intr uint64
Aux_watermark uint32
- _ uint32
+ Sample_max_stack uint16
+ _ uint16
}
type PerfEventMmapPage struct {
@@ -1035,6 +1131,7 @@ const (
PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7
PERF_COUNT_SW_EMULATION_FAULTS = 0x8
PERF_COUNT_SW_DUMMY = 0x9
+ PERF_COUNT_SW_BPF_OUTPUT = 0xa
PERF_SAMPLE_IP = 0x1
PERF_SAMPLE_TID = 0x2
@@ -1056,21 +1153,38 @@ const (
PERF_SAMPLE_BRANCH_ANY_CALL = 0x10
PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20
PERF_SAMPLE_BRANCH_IND_CALL = 0x40
+ PERF_SAMPLE_BRANCH_ABORT_TX = 0x80
+ PERF_SAMPLE_BRANCH_IN_TX = 0x100
+ PERF_SAMPLE_BRANCH_NO_TX = 0x200
+ PERF_SAMPLE_BRANCH_COND = 0x400
+ PERF_SAMPLE_BRANCH_CALL_STACK = 0x800
+ PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000
+ PERF_SAMPLE_BRANCH_CALL = 0x2000
+ PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000
+ PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000
+ PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000
PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1
PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2
PERF_FORMAT_ID = 0x4
PERF_FORMAT_GROUP = 0x8
- PERF_RECORD_MMAP = 0x1
- PERF_RECORD_LOST = 0x2
- PERF_RECORD_COMM = 0x3
- PERF_RECORD_EXIT = 0x4
- PERF_RECORD_THROTTLE = 0x5
- PERF_RECORD_UNTHROTTLE = 0x6
- PERF_RECORD_FORK = 0x7
- PERF_RECORD_READ = 0x8
- PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP = 0x1
+ PERF_RECORD_LOST = 0x2
+ PERF_RECORD_COMM = 0x3
+ PERF_RECORD_EXIT = 0x4
+ PERF_RECORD_THROTTLE = 0x5
+ PERF_RECORD_UNTHROTTLE = 0x6
+ PERF_RECORD_FORK = 0x7
+ PERF_RECORD_READ = 0x8
+ PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP2 = 0xa
+ PERF_RECORD_AUX = 0xb
+ PERF_RECORD_ITRACE_START = 0xc
+ PERF_RECORD_LOST_SAMPLES = 0xd
+ PERF_RECORD_SWITCH = 0xe
+ PERF_RECORD_SWITCH_CPU_WIDE = 0xf
+ PERF_RECORD_NAMESPACES = 0x10
PERF_CONTEXT_HV = -0x20
PERF_CONTEXT_KERNEL = -0x80
@@ -1083,6 +1197,7 @@ const (
PERF_FLAG_FD_NO_GROUP = 0x1
PERF_FLAG_FD_OUTPUT = 0x2
PERF_FLAG_PID_CGROUP = 0x4
+ PERF_FLAG_FD_CLOEXEC = 0x8
)
const (
@@ -1339,6 +1454,21 @@ type TpacketBlockDesc struct {
Hdr [40]byte
}
+type TpacketBDTS struct {
+ Sec uint32
+ Usec uint32
+}
+
+type TpacketHdrV1 struct {
+ Block_status uint32
+ Num_pkts uint32
+ Offset_to_first_pkt uint32
+ Blk_len uint32
+ Seq_num uint64
+ Ts_first_pkt TpacketBDTS
+ Ts_last_pkt TpacketBDTS
+}
+
type TpacketReq struct {
Block_size uint32
Block_nr uint32
@@ -1387,6 +1517,9 @@ const (
SizeofTpacketHdr = 0x18
SizeofTpacket2Hdr = 0x20
SizeofTpacket3Hdr = 0x30
+
+ SizeofTpacketStats = 0x8
+ SizeofTpacketStatsV3 = 0xc
)
const (
@@ -1964,6 +2097,10 @@ const (
NCSI_CHANNEL_ATTR_VLAN_ID = 0xa
)
+type ScmTimestamping struct {
+ Ts [3]Timespec
+}
+
const (
SOF_TIMESTAMPING_TX_HARDWARE = 0x1
SOF_TIMESTAMPING_TX_SOFTWARE = 0x2
@@ -1983,4 +2120,367 @@ const (
SOF_TIMESTAMPING_LAST = 0x4000
SOF_TIMESTAMPING_MASK = 0x7fff
+
+ SCM_TSTAMP_SND = 0x0
+ SCM_TSTAMP_SCHED = 0x1
+ SCM_TSTAMP_ACK = 0x2
+)
+
+type SockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type FanotifyEventMetadata struct {
+ Event_len uint32
+ Vers uint8
+ Reserved uint8
+ Metadata_len uint16
+ Mask uint64
+ Fd int32
+ Pid int32
+}
+
+type FanotifyResponse struct {
+ Fd int32
+ Response uint32
+}
+
+const (
+ CRYPTO_MSG_BASE = 0x10
+ CRYPTO_MSG_NEWALG = 0x10
+ CRYPTO_MSG_DELALG = 0x11
+ CRYPTO_MSG_UPDATEALG = 0x12
+ CRYPTO_MSG_GETALG = 0x13
+ CRYPTO_MSG_DELRNG = 0x14
+ CRYPTO_MSG_GETSTAT = 0x15
+)
+
+const (
+ CRYPTOCFGA_UNSPEC = 0x0
+ CRYPTOCFGA_PRIORITY_VAL = 0x1
+ CRYPTOCFGA_REPORT_LARVAL = 0x2
+ CRYPTOCFGA_REPORT_HASH = 0x3
+ CRYPTOCFGA_REPORT_BLKCIPHER = 0x4
+ CRYPTOCFGA_REPORT_AEAD = 0x5
+ CRYPTOCFGA_REPORT_COMPRESS = 0x6
+ CRYPTOCFGA_REPORT_RNG = 0x7
+ CRYPTOCFGA_REPORT_CIPHER = 0x8
+ CRYPTOCFGA_REPORT_AKCIPHER = 0x9
+ CRYPTOCFGA_REPORT_KPP = 0xa
+ CRYPTOCFGA_REPORT_ACOMP = 0xb
+ CRYPTOCFGA_STAT_LARVAL = 0xc
+ CRYPTOCFGA_STAT_HASH = 0xd
+ CRYPTOCFGA_STAT_BLKCIPHER = 0xe
+ CRYPTOCFGA_STAT_AEAD = 0xf
+ CRYPTOCFGA_STAT_COMPRESS = 0x10
+ CRYPTOCFGA_STAT_RNG = 0x11
+ CRYPTOCFGA_STAT_CIPHER = 0x12
+ CRYPTOCFGA_STAT_AKCIPHER = 0x13
+ CRYPTOCFGA_STAT_KPP = 0x14
+ CRYPTOCFGA_STAT_ACOMP = 0x15
+)
+
+type CryptoUserAlg struct {
+ Name [64]int8
+ Driver_name [64]int8
+ Module_name [64]int8
+ Type uint32
+ Mask uint32
+ Refcnt uint32
+ Flags uint32
+}
+
+type CryptoStatAEAD struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatAKCipher struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Verify_cnt uint64
+ Sign_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCipher struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCompress struct {
+ Type [64]int8
+ Compress_cnt uint64
+ Compress_tlen uint64
+ Decompress_cnt uint64
+ Decompress_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatHash struct {
+ Type [64]int8
+ Hash_cnt uint64
+ Hash_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatKPP struct {
+ Type [64]int8
+ Setsecret_cnt uint64
+ Generate_public_key_cnt uint64
+ Compute_shared_secret_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatRNG struct {
+ Type [64]int8
+ Generate_cnt uint64
+ Generate_tlen uint64
+ Seed_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatLarval struct {
+ Type [64]int8
+}
+
+type CryptoReportLarval struct {
+ Type [64]int8
+}
+
+type CryptoReportHash struct {
+ Type [64]int8
+ Blocksize uint32
+ Digestsize uint32
+}
+
+type CryptoReportCipher struct {
+ Type [64]int8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+}
+
+type CryptoReportBlkCipher struct {
+ Type [64]int8
+ Geniv [64]int8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+ Ivsize uint32
+}
+
+type CryptoReportAEAD struct {
+ Type [64]int8
+ Geniv [64]int8
+ Blocksize uint32
+ Maxauthsize uint32
+ Ivsize uint32
+}
+
+type CryptoReportComp struct {
+ Type [64]int8
+}
+
+type CryptoReportRNG struct {
+ Type [64]int8
+ Seedsize uint32
+}
+
+type CryptoReportAKCipher struct {
+ Type [64]int8
+}
+
+type CryptoReportKPP struct {
+ Type [64]int8
+}
+
+type CryptoReportAcomp struct {
+ Type [64]int8
+}
+
+const (
+ BPF_REG_0 = 0x0
+ BPF_REG_1 = 0x1
+ BPF_REG_2 = 0x2
+ BPF_REG_3 = 0x3
+ BPF_REG_4 = 0x4
+ BPF_REG_5 = 0x5
+ BPF_REG_6 = 0x6
+ BPF_REG_7 = 0x7
+ BPF_REG_8 = 0x8
+ BPF_REG_9 = 0x9
+ BPF_REG_10 = 0xa
+ BPF_MAP_CREATE = 0x0
+ BPF_MAP_LOOKUP_ELEM = 0x1
+ BPF_MAP_UPDATE_ELEM = 0x2
+ BPF_MAP_DELETE_ELEM = 0x3
+ BPF_MAP_GET_NEXT_KEY = 0x4
+ BPF_PROG_LOAD = 0x5
+ BPF_OBJ_PIN = 0x6
+ BPF_OBJ_GET = 0x7
+ BPF_PROG_ATTACH = 0x8
+ BPF_PROG_DETACH = 0x9
+ BPF_PROG_TEST_RUN = 0xa
+ BPF_PROG_GET_NEXT_ID = 0xb
+ BPF_MAP_GET_NEXT_ID = 0xc
+ BPF_PROG_GET_FD_BY_ID = 0xd
+ BPF_MAP_GET_FD_BY_ID = 0xe
+ BPF_OBJ_GET_INFO_BY_FD = 0xf
+ BPF_PROG_QUERY = 0x10
+ BPF_RAW_TRACEPOINT_OPEN = 0x11
+ BPF_BTF_LOAD = 0x12
+ BPF_BTF_GET_FD_BY_ID = 0x13
+ BPF_TASK_FD_QUERY = 0x14
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15
+ BPF_MAP_TYPE_UNSPEC = 0x0
+ BPF_MAP_TYPE_HASH = 0x1
+ BPF_MAP_TYPE_ARRAY = 0x2
+ BPF_MAP_TYPE_PROG_ARRAY = 0x3
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4
+ BPF_MAP_TYPE_PERCPU_HASH = 0x5
+ BPF_MAP_TYPE_PERCPU_ARRAY = 0x6
+ BPF_MAP_TYPE_STACK_TRACE = 0x7
+ BPF_MAP_TYPE_CGROUP_ARRAY = 0x8
+ BPF_MAP_TYPE_LRU_HASH = 0x9
+ BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa
+ BPF_MAP_TYPE_LPM_TRIE = 0xb
+ BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc
+ BPF_MAP_TYPE_HASH_OF_MAPS = 0xd
+ BPF_MAP_TYPE_DEVMAP = 0xe
+ BPF_MAP_TYPE_SOCKMAP = 0xf
+ BPF_MAP_TYPE_CPUMAP = 0x10
+ BPF_MAP_TYPE_XSKMAP = 0x11
+ BPF_MAP_TYPE_SOCKHASH = 0x12
+ BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
+ BPF_MAP_TYPE_QUEUE = 0x16
+ BPF_MAP_TYPE_STACK = 0x17
+ BPF_PROG_TYPE_UNSPEC = 0x0
+ BPF_PROG_TYPE_SOCKET_FILTER = 0x1
+ BPF_PROG_TYPE_KPROBE = 0x2
+ BPF_PROG_TYPE_SCHED_CLS = 0x3
+ BPF_PROG_TYPE_SCHED_ACT = 0x4
+ BPF_PROG_TYPE_TRACEPOINT = 0x5
+ BPF_PROG_TYPE_XDP = 0x6
+ BPF_PROG_TYPE_PERF_EVENT = 0x7
+ BPF_PROG_TYPE_CGROUP_SKB = 0x8
+ BPF_PROG_TYPE_CGROUP_SOCK = 0x9
+ BPF_PROG_TYPE_LWT_IN = 0xa
+ BPF_PROG_TYPE_LWT_OUT = 0xb
+ BPF_PROG_TYPE_LWT_XMIT = 0xc
+ BPF_PROG_TYPE_SOCK_OPS = 0xd
+ BPF_PROG_TYPE_SK_SKB = 0xe
+ BPF_PROG_TYPE_CGROUP_DEVICE = 0xf
+ BPF_PROG_TYPE_SK_MSG = 0x10
+ BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11
+ BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12
+ BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13
+ BPF_PROG_TYPE_LIRC_MODE2 = 0x14
+ BPF_PROG_TYPE_SK_REUSEPORT = 0x15
+ BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16
+ BPF_CGROUP_INET_INGRESS = 0x0
+ BPF_CGROUP_INET_EGRESS = 0x1
+ BPF_CGROUP_INET_SOCK_CREATE = 0x2
+ BPF_CGROUP_SOCK_OPS = 0x3
+ BPF_SK_SKB_STREAM_PARSER = 0x4
+ BPF_SK_SKB_STREAM_VERDICT = 0x5
+ BPF_CGROUP_DEVICE = 0x6
+ BPF_SK_MSG_VERDICT = 0x7
+ BPF_CGROUP_INET4_BIND = 0x8
+ BPF_CGROUP_INET6_BIND = 0x9
+ BPF_CGROUP_INET4_CONNECT = 0xa
+ BPF_CGROUP_INET6_CONNECT = 0xb
+ BPF_CGROUP_INET4_POST_BIND = 0xc
+ BPF_CGROUP_INET6_POST_BIND = 0xd
+ BPF_CGROUP_UDP4_SENDMSG = 0xe
+ BPF_CGROUP_UDP6_SENDMSG = 0xf
+ BPF_LIRC_MODE2 = 0x10
+ BPF_FLOW_DISSECTOR = 0x11
+ BPF_STACK_BUILD_ID_EMPTY = 0x0
+ BPF_STACK_BUILD_ID_VALID = 0x1
+ BPF_STACK_BUILD_ID_IP = 0x2
+ BPF_ADJ_ROOM_NET = 0x0
+ BPF_HDR_START_MAC = 0x0
+ BPF_HDR_START_NET = 0x1
+ BPF_LWT_ENCAP_SEG6 = 0x0
+ BPF_LWT_ENCAP_SEG6_INLINE = 0x1
+ BPF_OK = 0x0
+ BPF_DROP = 0x2
+ BPF_REDIRECT = 0x7
+ BPF_SOCK_OPS_VOID = 0x0
+ BPF_SOCK_OPS_TIMEOUT_INIT = 0x1
+ BPF_SOCK_OPS_RWND_INIT = 0x2
+ BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3
+ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4
+ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5
+ BPF_SOCK_OPS_NEEDS_ECN = 0x6
+ BPF_SOCK_OPS_BASE_RTT = 0x7
+ BPF_SOCK_OPS_RTO_CB = 0x8
+ BPF_SOCK_OPS_RETRANS_CB = 0x9
+ BPF_SOCK_OPS_STATE_CB = 0xa
+ BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb
+ BPF_TCP_ESTABLISHED = 0x1
+ BPF_TCP_SYN_SENT = 0x2
+ BPF_TCP_SYN_RECV = 0x3
+ BPF_TCP_FIN_WAIT1 = 0x4
+ BPF_TCP_FIN_WAIT2 = 0x5
+ BPF_TCP_TIME_WAIT = 0x6
+ BPF_TCP_CLOSE = 0x7
+ BPF_TCP_CLOSE_WAIT = 0x8
+ BPF_TCP_LAST_ACK = 0x9
+ BPF_TCP_LISTEN = 0xa
+ BPF_TCP_CLOSING = 0xb
+ BPF_TCP_NEW_SYN_RECV = 0xc
+ BPF_TCP_MAX_STATES = 0xd
+ BPF_FIB_LKUP_RET_SUCCESS = 0x0
+ BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
+ BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
+ BPF_FIB_LKUP_RET_PROHIBIT = 0x3
+ BPF_FIB_LKUP_RET_NOT_FWDED = 0x4
+ BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5
+ BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
+ BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
+ BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
+ BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
+ BPF_FD_TYPE_TRACEPOINT = 0x1
+ BPF_FD_TYPE_KPROBE = 0x2
+ BPF_FD_TYPE_KRETPROBE = 0x3
+ BPF_FD_TYPE_UPROBE = 0x4
+ BPF_FD_TYPE_URETPROBE = 0x5
+)
+
+type CapUserHeader struct {
+ Version uint32
+ Pid int32
+}
+
+type CapUserData struct {
+ Effective uint32
+ Permitted uint32
+ Inheritable uint32
+}
+
+const (
+ LINUX_CAPABILITY_VERSION_1 = 0x19980330
+ LINUX_CAPABILITY_VERSION_2 = 0x20071026
+ LINUX_CAPABILITY_VERSION_3 = 0x20080522
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index 99a6900d2..caf33b2c5 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -406,6 +406,11 @@ type TCPInfo struct {
Total_retrans uint32
}
+type CanFilter struct {
+ Id uint32
+ Mask uint32
+}
+
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
@@ -435,141 +440,185 @@ const (
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
SizeofTCPInfo = 0x68
+ SizeofCanFilter = 0x8
)
const (
- IFA_UNSPEC = 0x0
- IFA_ADDRESS = 0x1
- IFA_LOCAL = 0x2
- IFA_LABEL = 0x3
- IFA_BROADCAST = 0x4
- IFA_ANYCAST = 0x5
- IFA_CACHEINFO = 0x6
- IFA_MULTICAST = 0x7
- IFLA_UNSPEC = 0x0
- IFLA_ADDRESS = 0x1
- IFLA_BROADCAST = 0x2
- IFLA_IFNAME = 0x3
- IFLA_INFO_KIND = 0x1
- IFLA_MTU = 0x4
- IFLA_LINK = 0x5
- IFLA_QDISC = 0x6
- IFLA_STATS = 0x7
- IFLA_COST = 0x8
- IFLA_PRIORITY = 0x9
- IFLA_MASTER = 0xa
- IFLA_WIRELESS = 0xb
- IFLA_PROTINFO = 0xc
- IFLA_TXQLEN = 0xd
- IFLA_MAP = 0xe
- IFLA_WEIGHT = 0xf
- IFLA_OPERSTATE = 0x10
- IFLA_LINKMODE = 0x11
- IFLA_LINKINFO = 0x12
- IFLA_NET_NS_PID = 0x13
- IFLA_IFALIAS = 0x14
- IFLA_NUM_VF = 0x15
- IFLA_VFINFO_LIST = 0x16
- IFLA_STATS64 = 0x17
- IFLA_VF_PORTS = 0x18
- IFLA_PORT_SELF = 0x19
- IFLA_AF_SPEC = 0x1a
- IFLA_GROUP = 0x1b
- IFLA_NET_NS_FD = 0x1c
- IFLA_EXT_MASK = 0x1d
- IFLA_PROMISCUITY = 0x1e
- IFLA_NUM_TX_QUEUES = 0x1f
- IFLA_NUM_RX_QUEUES = 0x20
- IFLA_CARRIER = 0x21
- IFLA_PHYS_PORT_ID = 0x22
- IFLA_CARRIER_CHANGES = 0x23
- IFLA_PHYS_SWITCH_ID = 0x24
- IFLA_LINK_NETNSID = 0x25
- IFLA_PHYS_PORT_NAME = 0x26
- IFLA_PROTO_DOWN = 0x27
- IFLA_GSO_MAX_SEGS = 0x28
- IFLA_GSO_MAX_SIZE = 0x29
- IFLA_PAD = 0x2a
- IFLA_XDP = 0x2b
- IFLA_EVENT = 0x2c
- IFLA_NEW_NETNSID = 0x2d
- IFLA_IF_NETNSID = 0x2e
- IFLA_MAX = 0x33
- RT_SCOPE_UNIVERSE = 0x0
- RT_SCOPE_SITE = 0xc8
- RT_SCOPE_LINK = 0xfd
- RT_SCOPE_HOST = 0xfe
- RT_SCOPE_NOWHERE = 0xff
- RT_TABLE_UNSPEC = 0x0
- RT_TABLE_COMPAT = 0xfc
- RT_TABLE_DEFAULT = 0xfd
- RT_TABLE_MAIN = 0xfe
- RT_TABLE_LOCAL = 0xff
- RT_TABLE_MAX = 0xffffffff
- RTA_UNSPEC = 0x0
- RTA_DST = 0x1
- RTA_SRC = 0x2
- RTA_IIF = 0x3
- RTA_OIF = 0x4
- RTA_GATEWAY = 0x5
- RTA_PRIORITY = 0x6
- RTA_PREFSRC = 0x7
- RTA_METRICS = 0x8
- RTA_MULTIPATH = 0x9
- RTA_FLOW = 0xb
- RTA_CACHEINFO = 0xc
- RTA_TABLE = 0xf
- RTA_MARK = 0x10
- RTA_MFC_STATS = 0x11
- RTA_VIA = 0x12
- RTA_NEWDST = 0x13
- RTA_PREF = 0x14
- RTA_ENCAP_TYPE = 0x15
- RTA_ENCAP = 0x16
- RTA_EXPIRES = 0x17
- RTA_PAD = 0x18
- RTA_UID = 0x19
- RTA_TTL_PROPAGATE = 0x1a
- RTA_IP_PROTO = 0x1b
- RTA_SPORT = 0x1c
- RTA_DPORT = 0x1d
- RTN_UNSPEC = 0x0
- RTN_UNICAST = 0x1
- RTN_LOCAL = 0x2
- RTN_BROADCAST = 0x3
- RTN_ANYCAST = 0x4
- RTN_MULTICAST = 0x5
- RTN_BLACKHOLE = 0x6
- RTN_UNREACHABLE = 0x7
- RTN_PROHIBIT = 0x8
- RTN_THROW = 0x9
- RTN_NAT = 0xa
- RTN_XRESOLVE = 0xb
- RTNLGRP_NONE = 0x0
- RTNLGRP_LINK = 0x1
- RTNLGRP_NOTIFY = 0x2
- RTNLGRP_NEIGH = 0x3
- RTNLGRP_TC = 0x4
- RTNLGRP_IPV4_IFADDR = 0x5
- RTNLGRP_IPV4_MROUTE = 0x6
- RTNLGRP_IPV4_ROUTE = 0x7
- RTNLGRP_IPV4_RULE = 0x8
- RTNLGRP_IPV6_IFADDR = 0x9
- RTNLGRP_IPV6_MROUTE = 0xa
- RTNLGRP_IPV6_ROUTE = 0xb
- RTNLGRP_IPV6_IFINFO = 0xc
- RTNLGRP_IPV6_PREFIX = 0x12
- RTNLGRP_IPV6_RULE = 0x13
- RTNLGRP_ND_USEROPT = 0x14
- SizeofNlMsghdr = 0x10
- SizeofNlMsgerr = 0x14
- SizeofRtGenmsg = 0x1
- SizeofNlAttr = 0x4
- SizeofRtAttr = 0x4
- SizeofIfInfomsg = 0x10
- SizeofIfAddrmsg = 0x8
- SizeofRtMsg = 0xc
- SizeofRtNexthop = 0x8
+ NDA_UNSPEC = 0x0
+ NDA_DST = 0x1
+ NDA_LLADDR = 0x2
+ NDA_CACHEINFO = 0x3
+ NDA_PROBES = 0x4
+ NDA_VLAN = 0x5
+ NDA_PORT = 0x6
+ NDA_VNI = 0x7
+ NDA_IFINDEX = 0x8
+ NDA_MASTER = 0x9
+ NDA_LINK_NETNSID = 0xa
+ NDA_SRC_VNI = 0xb
+ NTF_USE = 0x1
+ NTF_SELF = 0x2
+ NTF_MASTER = 0x4
+ NTF_PROXY = 0x8
+ NTF_EXT_LEARNED = 0x10
+ NTF_OFFLOADED = 0x20
+ NTF_ROUTER = 0x80
+ NUD_INCOMPLETE = 0x1
+ NUD_REACHABLE = 0x2
+ NUD_STALE = 0x4
+ NUD_DELAY = 0x8
+ NUD_PROBE = 0x10
+ NUD_FAILED = 0x20
+ NUD_NOARP = 0x40
+ NUD_PERMANENT = 0x80
+ NUD_NONE = 0x0
+ IFA_UNSPEC = 0x0
+ IFA_ADDRESS = 0x1
+ IFA_LOCAL = 0x2
+ IFA_LABEL = 0x3
+ IFA_BROADCAST = 0x4
+ IFA_ANYCAST = 0x5
+ IFA_CACHEINFO = 0x6
+ IFA_MULTICAST = 0x7
+ IFA_FLAGS = 0x8
+ IFA_RT_PRIORITY = 0x9
+ IFA_TARGET_NETNSID = 0xa
+ IFLA_UNSPEC = 0x0
+ IFLA_ADDRESS = 0x1
+ IFLA_BROADCAST = 0x2
+ IFLA_IFNAME = 0x3
+ IFLA_MTU = 0x4
+ IFLA_LINK = 0x5
+ IFLA_QDISC = 0x6
+ IFLA_STATS = 0x7
+ IFLA_COST = 0x8
+ IFLA_PRIORITY = 0x9
+ IFLA_MASTER = 0xa
+ IFLA_WIRELESS = 0xb
+ IFLA_PROTINFO = 0xc
+ IFLA_TXQLEN = 0xd
+ IFLA_MAP = 0xe
+ IFLA_WEIGHT = 0xf
+ IFLA_OPERSTATE = 0x10
+ IFLA_LINKMODE = 0x11
+ IFLA_LINKINFO = 0x12
+ IFLA_NET_NS_PID = 0x13
+ IFLA_IFALIAS = 0x14
+ IFLA_NUM_VF = 0x15
+ IFLA_VFINFO_LIST = 0x16
+ IFLA_STATS64 = 0x17
+ IFLA_VF_PORTS = 0x18
+ IFLA_PORT_SELF = 0x19
+ IFLA_AF_SPEC = 0x1a
+ IFLA_GROUP = 0x1b
+ IFLA_NET_NS_FD = 0x1c
+ IFLA_EXT_MASK = 0x1d
+ IFLA_PROMISCUITY = 0x1e
+ IFLA_NUM_TX_QUEUES = 0x1f
+ IFLA_NUM_RX_QUEUES = 0x20
+ IFLA_CARRIER = 0x21
+ IFLA_PHYS_PORT_ID = 0x22
+ IFLA_CARRIER_CHANGES = 0x23
+ IFLA_PHYS_SWITCH_ID = 0x24
+ IFLA_LINK_NETNSID = 0x25
+ IFLA_PHYS_PORT_NAME = 0x26
+ IFLA_PROTO_DOWN = 0x27
+ IFLA_GSO_MAX_SEGS = 0x28
+ IFLA_GSO_MAX_SIZE = 0x29
+ IFLA_PAD = 0x2a
+ IFLA_XDP = 0x2b
+ IFLA_EVENT = 0x2c
+ IFLA_NEW_NETNSID = 0x2d
+ IFLA_IF_NETNSID = 0x2e
+ IFLA_TARGET_NETNSID = 0x2e
+ IFLA_CARRIER_UP_COUNT = 0x2f
+ IFLA_CARRIER_DOWN_COUNT = 0x30
+ IFLA_NEW_IFINDEX = 0x31
+ IFLA_MIN_MTU = 0x32
+ IFLA_MAX_MTU = 0x33
+ IFLA_MAX = 0x33
+ IFLA_INFO_KIND = 0x1
+ IFLA_INFO_DATA = 0x2
+ IFLA_INFO_XSTATS = 0x3
+ IFLA_INFO_SLAVE_KIND = 0x4
+ IFLA_INFO_SLAVE_DATA = 0x5
+ RT_SCOPE_UNIVERSE = 0x0
+ RT_SCOPE_SITE = 0xc8
+ RT_SCOPE_LINK = 0xfd
+ RT_SCOPE_HOST = 0xfe
+ RT_SCOPE_NOWHERE = 0xff
+ RT_TABLE_UNSPEC = 0x0
+ RT_TABLE_COMPAT = 0xfc
+ RT_TABLE_DEFAULT = 0xfd
+ RT_TABLE_MAIN = 0xfe
+ RT_TABLE_LOCAL = 0xff
+ RT_TABLE_MAX = 0xffffffff
+ RTA_UNSPEC = 0x0
+ RTA_DST = 0x1
+ RTA_SRC = 0x2
+ RTA_IIF = 0x3
+ RTA_OIF = 0x4
+ RTA_GATEWAY = 0x5
+ RTA_PRIORITY = 0x6
+ RTA_PREFSRC = 0x7
+ RTA_METRICS = 0x8
+ RTA_MULTIPATH = 0x9
+ RTA_FLOW = 0xb
+ RTA_CACHEINFO = 0xc
+ RTA_TABLE = 0xf
+ RTA_MARK = 0x10
+ RTA_MFC_STATS = 0x11
+ RTA_VIA = 0x12
+ RTA_NEWDST = 0x13
+ RTA_PREF = 0x14
+ RTA_ENCAP_TYPE = 0x15
+ RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
+ RTN_UNSPEC = 0x0
+ RTN_UNICAST = 0x1
+ RTN_LOCAL = 0x2
+ RTN_BROADCAST = 0x3
+ RTN_ANYCAST = 0x4
+ RTN_MULTICAST = 0x5
+ RTN_BLACKHOLE = 0x6
+ RTN_UNREACHABLE = 0x7
+ RTN_PROHIBIT = 0x8
+ RTN_THROW = 0x9
+ RTN_NAT = 0xa
+ RTN_XRESOLVE = 0xb
+ RTNLGRP_NONE = 0x0
+ RTNLGRP_LINK = 0x1
+ RTNLGRP_NOTIFY = 0x2
+ RTNLGRP_NEIGH = 0x3
+ RTNLGRP_TC = 0x4
+ RTNLGRP_IPV4_IFADDR = 0x5
+ RTNLGRP_IPV4_MROUTE = 0x6
+ RTNLGRP_IPV4_ROUTE = 0x7
+ RTNLGRP_IPV4_RULE = 0x8
+ RTNLGRP_IPV6_IFADDR = 0x9
+ RTNLGRP_IPV6_MROUTE = 0xa
+ RTNLGRP_IPV6_ROUTE = 0xb
+ RTNLGRP_IPV6_IFINFO = 0xc
+ RTNLGRP_IPV6_PREFIX = 0x12
+ RTNLGRP_IPV6_RULE = 0x13
+ RTNLGRP_ND_USEROPT = 0x14
+ SizeofNlMsghdr = 0x10
+ SizeofNlMsgerr = 0x14
+ SizeofRtGenmsg = 0x1
+ SizeofNlAttr = 0x4
+ SizeofRtAttr = 0x4
+ SizeofIfInfomsg = 0x10
+ SizeofIfAddrmsg = 0x8
+ SizeofRtMsg = 0xc
+ SizeofRtNexthop = 0x8
+ SizeofNdUseroptmsg = 0x10
+ SizeofNdMsg = 0xc
)
type NlMsghdr struct {
@@ -635,6 +684,27 @@ type RtNexthop struct {
Ifindex int32
}
+type NdUseroptmsg struct {
+ Family uint8
+ Pad1 uint8
+ Opts_len uint16
+ Ifindex int32
+ Icmp_type uint8
+ Icmp_code uint8
+ Pad2 uint16
+ Pad3 uint32
+}
+
+type NdMsg struct {
+ Family uint8
+ Pad1 uint8
+ Pad2 uint16
+ Ifindex int32
+ State uint16
+ Flags uint8
+ Type uint8
+}
+
const (
SizeofSockFilter = 0x8
SizeofSockFprog = 0x10
@@ -772,7 +842,32 @@ type Sigset_t struct {
Val [16]uint64
}
-const RNDGETENTCNT = 0x80045200
+const _C__NSIG = 0x41
+
+type SignalfdSiginfo struct {
+ Signo uint32
+ Errno int32
+ Code int32
+ Pid uint32
+ Uid uint32
+ Fd int32
+ Tid uint32
+ Band uint32
+ Overrun uint32
+ Trapno uint32
+ Status int32
+ Int int32
+ Ptr uint64
+ Utime uint64
+ Stime uint64
+ Addr uint64
+ Addr_lsb uint16
+ _ uint16
+ Syscall int32
+ Call_addr uint64
+ Arch uint32
+ _ [28]uint8
+}
const PERF_IOC_FLAG_GROUP = 0x1
@@ -943,7 +1038,8 @@ type PerfEventAttr struct {
Clockid int32
Sample_regs_intr uint64
Aux_watermark uint32
- _ uint32
+ Sample_max_stack uint16
+ _ uint16
}
type PerfEventMmapPage struct {
@@ -1046,6 +1142,7 @@ const (
PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7
PERF_COUNT_SW_EMULATION_FAULTS = 0x8
PERF_COUNT_SW_DUMMY = 0x9
+ PERF_COUNT_SW_BPF_OUTPUT = 0xa
PERF_SAMPLE_IP = 0x1
PERF_SAMPLE_TID = 0x2
@@ -1067,21 +1164,38 @@ const (
PERF_SAMPLE_BRANCH_ANY_CALL = 0x10
PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20
PERF_SAMPLE_BRANCH_IND_CALL = 0x40
+ PERF_SAMPLE_BRANCH_ABORT_TX = 0x80
+ PERF_SAMPLE_BRANCH_IN_TX = 0x100
+ PERF_SAMPLE_BRANCH_NO_TX = 0x200
+ PERF_SAMPLE_BRANCH_COND = 0x400
+ PERF_SAMPLE_BRANCH_CALL_STACK = 0x800
+ PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000
+ PERF_SAMPLE_BRANCH_CALL = 0x2000
+ PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000
+ PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000
+ PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000
PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1
PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2
PERF_FORMAT_ID = 0x4
PERF_FORMAT_GROUP = 0x8
- PERF_RECORD_MMAP = 0x1
- PERF_RECORD_LOST = 0x2
- PERF_RECORD_COMM = 0x3
- PERF_RECORD_EXIT = 0x4
- PERF_RECORD_THROTTLE = 0x5
- PERF_RECORD_UNTHROTTLE = 0x6
- PERF_RECORD_FORK = 0x7
- PERF_RECORD_READ = 0x8
- PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP = 0x1
+ PERF_RECORD_LOST = 0x2
+ PERF_RECORD_COMM = 0x3
+ PERF_RECORD_EXIT = 0x4
+ PERF_RECORD_THROTTLE = 0x5
+ PERF_RECORD_UNTHROTTLE = 0x6
+ PERF_RECORD_FORK = 0x7
+ PERF_RECORD_READ = 0x8
+ PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP2 = 0xa
+ PERF_RECORD_AUX = 0xb
+ PERF_RECORD_ITRACE_START = 0xc
+ PERF_RECORD_LOST_SAMPLES = 0xd
+ PERF_RECORD_SWITCH = 0xe
+ PERF_RECORD_SWITCH_CPU_WIDE = 0xf
+ PERF_RECORD_NAMESPACES = 0x10
PERF_CONTEXT_HV = -0x20
PERF_CONTEXT_KERNEL = -0x80
@@ -1094,6 +1208,7 @@ const (
PERF_FLAG_FD_NO_GROUP = 0x1
PERF_FLAG_FD_OUTPUT = 0x2
PERF_FLAG_PID_CGROUP = 0x4
+ PERF_FLAG_FD_CLOEXEC = 0x8
)
const (
@@ -1351,6 +1466,21 @@ type TpacketBlockDesc struct {
Hdr [40]byte
}
+type TpacketBDTS struct {
+ Sec uint32
+ Usec uint32
+}
+
+type TpacketHdrV1 struct {
+ Block_status uint32
+ Num_pkts uint32
+ Offset_to_first_pkt uint32
+ Blk_len uint32
+ Seq_num uint64
+ Ts_first_pkt TpacketBDTS
+ Ts_last_pkt TpacketBDTS
+}
+
type TpacketReq struct {
Block_size uint32
Block_nr uint32
@@ -1399,6 +1529,9 @@ const (
SizeofTpacketHdr = 0x20
SizeofTpacket2Hdr = 0x20
SizeofTpacket3Hdr = 0x30
+
+ SizeofTpacketStats = 0x8
+ SizeofTpacketStatsV3 = 0xc
)
const (
@@ -1977,6 +2110,10 @@ const (
NCSI_CHANNEL_ATTR_VLAN_ID = 0xa
)
+type ScmTimestamping struct {
+ Ts [3]Timespec
+}
+
const (
SOF_TIMESTAMPING_TX_HARDWARE = 0x1
SOF_TIMESTAMPING_TX_SOFTWARE = 0x2
@@ -1996,4 +2133,367 @@ const (
SOF_TIMESTAMPING_LAST = 0x4000
SOF_TIMESTAMPING_MASK = 0x7fff
+
+ SCM_TSTAMP_SND = 0x0
+ SCM_TSTAMP_SCHED = 0x1
+ SCM_TSTAMP_ACK = 0x2
+)
+
+type SockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type FanotifyEventMetadata struct {
+ Event_len uint32
+ Vers uint8
+ Reserved uint8
+ Metadata_len uint16
+ Mask uint64
+ Fd int32
+ Pid int32
+}
+
+type FanotifyResponse struct {
+ Fd int32
+ Response uint32
+}
+
+const (
+ CRYPTO_MSG_BASE = 0x10
+ CRYPTO_MSG_NEWALG = 0x10
+ CRYPTO_MSG_DELALG = 0x11
+ CRYPTO_MSG_UPDATEALG = 0x12
+ CRYPTO_MSG_GETALG = 0x13
+ CRYPTO_MSG_DELRNG = 0x14
+ CRYPTO_MSG_GETSTAT = 0x15
+)
+
+const (
+ CRYPTOCFGA_UNSPEC = 0x0
+ CRYPTOCFGA_PRIORITY_VAL = 0x1
+ CRYPTOCFGA_REPORT_LARVAL = 0x2
+ CRYPTOCFGA_REPORT_HASH = 0x3
+ CRYPTOCFGA_REPORT_BLKCIPHER = 0x4
+ CRYPTOCFGA_REPORT_AEAD = 0x5
+ CRYPTOCFGA_REPORT_COMPRESS = 0x6
+ CRYPTOCFGA_REPORT_RNG = 0x7
+ CRYPTOCFGA_REPORT_CIPHER = 0x8
+ CRYPTOCFGA_REPORT_AKCIPHER = 0x9
+ CRYPTOCFGA_REPORT_KPP = 0xa
+ CRYPTOCFGA_REPORT_ACOMP = 0xb
+ CRYPTOCFGA_STAT_LARVAL = 0xc
+ CRYPTOCFGA_STAT_HASH = 0xd
+ CRYPTOCFGA_STAT_BLKCIPHER = 0xe
+ CRYPTOCFGA_STAT_AEAD = 0xf
+ CRYPTOCFGA_STAT_COMPRESS = 0x10
+ CRYPTOCFGA_STAT_RNG = 0x11
+ CRYPTOCFGA_STAT_CIPHER = 0x12
+ CRYPTOCFGA_STAT_AKCIPHER = 0x13
+ CRYPTOCFGA_STAT_KPP = 0x14
+ CRYPTOCFGA_STAT_ACOMP = 0x15
+)
+
+type CryptoUserAlg struct {
+ Name [64]int8
+ Driver_name [64]int8
+ Module_name [64]int8
+ Type uint32
+ Mask uint32
+ Refcnt uint32
+ Flags uint32
+}
+
+type CryptoStatAEAD struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatAKCipher struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Verify_cnt uint64
+ Sign_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCipher struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCompress struct {
+ Type [64]int8
+ Compress_cnt uint64
+ Compress_tlen uint64
+ Decompress_cnt uint64
+ Decompress_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatHash struct {
+ Type [64]int8
+ Hash_cnt uint64
+ Hash_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatKPP struct {
+ Type [64]int8
+ Setsecret_cnt uint64
+ Generate_public_key_cnt uint64
+ Compute_shared_secret_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatRNG struct {
+ Type [64]int8
+ Generate_cnt uint64
+ Generate_tlen uint64
+ Seed_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatLarval struct {
+ Type [64]int8
+}
+
+type CryptoReportLarval struct {
+ Type [64]int8
+}
+
+type CryptoReportHash struct {
+ Type [64]int8
+ Blocksize uint32
+ Digestsize uint32
+}
+
+type CryptoReportCipher struct {
+ Type [64]int8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+}
+
+type CryptoReportBlkCipher struct {
+ Type [64]int8
+ Geniv [64]int8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+ Ivsize uint32
+}
+
+type CryptoReportAEAD struct {
+ Type [64]int8
+ Geniv [64]int8
+ Blocksize uint32
+ Maxauthsize uint32
+ Ivsize uint32
+}
+
+type CryptoReportComp struct {
+ Type [64]int8
+}
+
+type CryptoReportRNG struct {
+ Type [64]int8
+ Seedsize uint32
+}
+
+type CryptoReportAKCipher struct {
+ Type [64]int8
+}
+
+type CryptoReportKPP struct {
+ Type [64]int8
+}
+
+type CryptoReportAcomp struct {
+ Type [64]int8
+}
+
+const (
+ BPF_REG_0 = 0x0
+ BPF_REG_1 = 0x1
+ BPF_REG_2 = 0x2
+ BPF_REG_3 = 0x3
+ BPF_REG_4 = 0x4
+ BPF_REG_5 = 0x5
+ BPF_REG_6 = 0x6
+ BPF_REG_7 = 0x7
+ BPF_REG_8 = 0x8
+ BPF_REG_9 = 0x9
+ BPF_REG_10 = 0xa
+ BPF_MAP_CREATE = 0x0
+ BPF_MAP_LOOKUP_ELEM = 0x1
+ BPF_MAP_UPDATE_ELEM = 0x2
+ BPF_MAP_DELETE_ELEM = 0x3
+ BPF_MAP_GET_NEXT_KEY = 0x4
+ BPF_PROG_LOAD = 0x5
+ BPF_OBJ_PIN = 0x6
+ BPF_OBJ_GET = 0x7
+ BPF_PROG_ATTACH = 0x8
+ BPF_PROG_DETACH = 0x9
+ BPF_PROG_TEST_RUN = 0xa
+ BPF_PROG_GET_NEXT_ID = 0xb
+ BPF_MAP_GET_NEXT_ID = 0xc
+ BPF_PROG_GET_FD_BY_ID = 0xd
+ BPF_MAP_GET_FD_BY_ID = 0xe
+ BPF_OBJ_GET_INFO_BY_FD = 0xf
+ BPF_PROG_QUERY = 0x10
+ BPF_RAW_TRACEPOINT_OPEN = 0x11
+ BPF_BTF_LOAD = 0x12
+ BPF_BTF_GET_FD_BY_ID = 0x13
+ BPF_TASK_FD_QUERY = 0x14
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15
+ BPF_MAP_TYPE_UNSPEC = 0x0
+ BPF_MAP_TYPE_HASH = 0x1
+ BPF_MAP_TYPE_ARRAY = 0x2
+ BPF_MAP_TYPE_PROG_ARRAY = 0x3
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4
+ BPF_MAP_TYPE_PERCPU_HASH = 0x5
+ BPF_MAP_TYPE_PERCPU_ARRAY = 0x6
+ BPF_MAP_TYPE_STACK_TRACE = 0x7
+ BPF_MAP_TYPE_CGROUP_ARRAY = 0x8
+ BPF_MAP_TYPE_LRU_HASH = 0x9
+ BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa
+ BPF_MAP_TYPE_LPM_TRIE = 0xb
+ BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc
+ BPF_MAP_TYPE_HASH_OF_MAPS = 0xd
+ BPF_MAP_TYPE_DEVMAP = 0xe
+ BPF_MAP_TYPE_SOCKMAP = 0xf
+ BPF_MAP_TYPE_CPUMAP = 0x10
+ BPF_MAP_TYPE_XSKMAP = 0x11
+ BPF_MAP_TYPE_SOCKHASH = 0x12
+ BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
+ BPF_MAP_TYPE_QUEUE = 0x16
+ BPF_MAP_TYPE_STACK = 0x17
+ BPF_PROG_TYPE_UNSPEC = 0x0
+ BPF_PROG_TYPE_SOCKET_FILTER = 0x1
+ BPF_PROG_TYPE_KPROBE = 0x2
+ BPF_PROG_TYPE_SCHED_CLS = 0x3
+ BPF_PROG_TYPE_SCHED_ACT = 0x4
+ BPF_PROG_TYPE_TRACEPOINT = 0x5
+ BPF_PROG_TYPE_XDP = 0x6
+ BPF_PROG_TYPE_PERF_EVENT = 0x7
+ BPF_PROG_TYPE_CGROUP_SKB = 0x8
+ BPF_PROG_TYPE_CGROUP_SOCK = 0x9
+ BPF_PROG_TYPE_LWT_IN = 0xa
+ BPF_PROG_TYPE_LWT_OUT = 0xb
+ BPF_PROG_TYPE_LWT_XMIT = 0xc
+ BPF_PROG_TYPE_SOCK_OPS = 0xd
+ BPF_PROG_TYPE_SK_SKB = 0xe
+ BPF_PROG_TYPE_CGROUP_DEVICE = 0xf
+ BPF_PROG_TYPE_SK_MSG = 0x10
+ BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11
+ BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12
+ BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13
+ BPF_PROG_TYPE_LIRC_MODE2 = 0x14
+ BPF_PROG_TYPE_SK_REUSEPORT = 0x15
+ BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16
+ BPF_CGROUP_INET_INGRESS = 0x0
+ BPF_CGROUP_INET_EGRESS = 0x1
+ BPF_CGROUP_INET_SOCK_CREATE = 0x2
+ BPF_CGROUP_SOCK_OPS = 0x3
+ BPF_SK_SKB_STREAM_PARSER = 0x4
+ BPF_SK_SKB_STREAM_VERDICT = 0x5
+ BPF_CGROUP_DEVICE = 0x6
+ BPF_SK_MSG_VERDICT = 0x7
+ BPF_CGROUP_INET4_BIND = 0x8
+ BPF_CGROUP_INET6_BIND = 0x9
+ BPF_CGROUP_INET4_CONNECT = 0xa
+ BPF_CGROUP_INET6_CONNECT = 0xb
+ BPF_CGROUP_INET4_POST_BIND = 0xc
+ BPF_CGROUP_INET6_POST_BIND = 0xd
+ BPF_CGROUP_UDP4_SENDMSG = 0xe
+ BPF_CGROUP_UDP6_SENDMSG = 0xf
+ BPF_LIRC_MODE2 = 0x10
+ BPF_FLOW_DISSECTOR = 0x11
+ BPF_STACK_BUILD_ID_EMPTY = 0x0
+ BPF_STACK_BUILD_ID_VALID = 0x1
+ BPF_STACK_BUILD_ID_IP = 0x2
+ BPF_ADJ_ROOM_NET = 0x0
+ BPF_HDR_START_MAC = 0x0
+ BPF_HDR_START_NET = 0x1
+ BPF_LWT_ENCAP_SEG6 = 0x0
+ BPF_LWT_ENCAP_SEG6_INLINE = 0x1
+ BPF_OK = 0x0
+ BPF_DROP = 0x2
+ BPF_REDIRECT = 0x7
+ BPF_SOCK_OPS_VOID = 0x0
+ BPF_SOCK_OPS_TIMEOUT_INIT = 0x1
+ BPF_SOCK_OPS_RWND_INIT = 0x2
+ BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3
+ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4
+ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5
+ BPF_SOCK_OPS_NEEDS_ECN = 0x6
+ BPF_SOCK_OPS_BASE_RTT = 0x7
+ BPF_SOCK_OPS_RTO_CB = 0x8
+ BPF_SOCK_OPS_RETRANS_CB = 0x9
+ BPF_SOCK_OPS_STATE_CB = 0xa
+ BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb
+ BPF_TCP_ESTABLISHED = 0x1
+ BPF_TCP_SYN_SENT = 0x2
+ BPF_TCP_SYN_RECV = 0x3
+ BPF_TCP_FIN_WAIT1 = 0x4
+ BPF_TCP_FIN_WAIT2 = 0x5
+ BPF_TCP_TIME_WAIT = 0x6
+ BPF_TCP_CLOSE = 0x7
+ BPF_TCP_CLOSE_WAIT = 0x8
+ BPF_TCP_LAST_ACK = 0x9
+ BPF_TCP_LISTEN = 0xa
+ BPF_TCP_CLOSING = 0xb
+ BPF_TCP_NEW_SYN_RECV = 0xc
+ BPF_TCP_MAX_STATES = 0xd
+ BPF_FIB_LKUP_RET_SUCCESS = 0x0
+ BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
+ BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
+ BPF_FIB_LKUP_RET_PROHIBIT = 0x3
+ BPF_FIB_LKUP_RET_NOT_FWDED = 0x4
+ BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5
+ BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
+ BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
+ BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
+ BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
+ BPF_FD_TYPE_TRACEPOINT = 0x1
+ BPF_FD_TYPE_KPROBE = 0x2
+ BPF_FD_TYPE_KRETPROBE = 0x3
+ BPF_FD_TYPE_UPROBE = 0x4
+ BPF_FD_TYPE_URETPROBE = 0x5
+)
+
+type CapUserHeader struct {
+ Version uint32
+ Pid int32
+}
+
+type CapUserData struct {
+ Effective uint32
+ Permitted uint32
+ Inheritable uint32
+}
+
+const (
+ LINUX_CAPABILITY_VERSION_1 = 0x19980330
+ LINUX_CAPABILITY_VERSION_2 = 0x20071026
+ LINUX_CAPABILITY_VERSION_3 = 0x20080522
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index 5ccc4b541..93aec7e22 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -409,6 +409,11 @@ type TCPInfo struct {
Total_retrans uint32
}
+type CanFilter struct {
+ Id uint32
+ Mask uint32
+}
+
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
@@ -438,141 +443,185 @@ const (
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
SizeofTCPInfo = 0x68
+ SizeofCanFilter = 0x8
)
const (
- IFA_UNSPEC = 0x0
- IFA_ADDRESS = 0x1
- IFA_LOCAL = 0x2
- IFA_LABEL = 0x3
- IFA_BROADCAST = 0x4
- IFA_ANYCAST = 0x5
- IFA_CACHEINFO = 0x6
- IFA_MULTICAST = 0x7
- IFLA_UNSPEC = 0x0
- IFLA_ADDRESS = 0x1
- IFLA_BROADCAST = 0x2
- IFLA_IFNAME = 0x3
- IFLA_INFO_KIND = 0x1
- IFLA_MTU = 0x4
- IFLA_LINK = 0x5
- IFLA_QDISC = 0x6
- IFLA_STATS = 0x7
- IFLA_COST = 0x8
- IFLA_PRIORITY = 0x9
- IFLA_MASTER = 0xa
- IFLA_WIRELESS = 0xb
- IFLA_PROTINFO = 0xc
- IFLA_TXQLEN = 0xd
- IFLA_MAP = 0xe
- IFLA_WEIGHT = 0xf
- IFLA_OPERSTATE = 0x10
- IFLA_LINKMODE = 0x11
- IFLA_LINKINFO = 0x12
- IFLA_NET_NS_PID = 0x13
- IFLA_IFALIAS = 0x14
- IFLA_NUM_VF = 0x15
- IFLA_VFINFO_LIST = 0x16
- IFLA_STATS64 = 0x17
- IFLA_VF_PORTS = 0x18
- IFLA_PORT_SELF = 0x19
- IFLA_AF_SPEC = 0x1a
- IFLA_GROUP = 0x1b
- IFLA_NET_NS_FD = 0x1c
- IFLA_EXT_MASK = 0x1d
- IFLA_PROMISCUITY = 0x1e
- IFLA_NUM_TX_QUEUES = 0x1f
- IFLA_NUM_RX_QUEUES = 0x20
- IFLA_CARRIER = 0x21
- IFLA_PHYS_PORT_ID = 0x22
- IFLA_CARRIER_CHANGES = 0x23
- IFLA_PHYS_SWITCH_ID = 0x24
- IFLA_LINK_NETNSID = 0x25
- IFLA_PHYS_PORT_NAME = 0x26
- IFLA_PROTO_DOWN = 0x27
- IFLA_GSO_MAX_SEGS = 0x28
- IFLA_GSO_MAX_SIZE = 0x29
- IFLA_PAD = 0x2a
- IFLA_XDP = 0x2b
- IFLA_EVENT = 0x2c
- IFLA_NEW_NETNSID = 0x2d
- IFLA_IF_NETNSID = 0x2e
- IFLA_MAX = 0x33
- RT_SCOPE_UNIVERSE = 0x0
- RT_SCOPE_SITE = 0xc8
- RT_SCOPE_LINK = 0xfd
- RT_SCOPE_HOST = 0xfe
- RT_SCOPE_NOWHERE = 0xff
- RT_TABLE_UNSPEC = 0x0
- RT_TABLE_COMPAT = 0xfc
- RT_TABLE_DEFAULT = 0xfd
- RT_TABLE_MAIN = 0xfe
- RT_TABLE_LOCAL = 0xff
- RT_TABLE_MAX = 0xffffffff
- RTA_UNSPEC = 0x0
- RTA_DST = 0x1
- RTA_SRC = 0x2
- RTA_IIF = 0x3
- RTA_OIF = 0x4
- RTA_GATEWAY = 0x5
- RTA_PRIORITY = 0x6
- RTA_PREFSRC = 0x7
- RTA_METRICS = 0x8
- RTA_MULTIPATH = 0x9
- RTA_FLOW = 0xb
- RTA_CACHEINFO = 0xc
- RTA_TABLE = 0xf
- RTA_MARK = 0x10
- RTA_MFC_STATS = 0x11
- RTA_VIA = 0x12
- RTA_NEWDST = 0x13
- RTA_PREF = 0x14
- RTA_ENCAP_TYPE = 0x15
- RTA_ENCAP = 0x16
- RTA_EXPIRES = 0x17
- RTA_PAD = 0x18
- RTA_UID = 0x19
- RTA_TTL_PROPAGATE = 0x1a
- RTA_IP_PROTO = 0x1b
- RTA_SPORT = 0x1c
- RTA_DPORT = 0x1d
- RTN_UNSPEC = 0x0
- RTN_UNICAST = 0x1
- RTN_LOCAL = 0x2
- RTN_BROADCAST = 0x3
- RTN_ANYCAST = 0x4
- RTN_MULTICAST = 0x5
- RTN_BLACKHOLE = 0x6
- RTN_UNREACHABLE = 0x7
- RTN_PROHIBIT = 0x8
- RTN_THROW = 0x9
- RTN_NAT = 0xa
- RTN_XRESOLVE = 0xb
- RTNLGRP_NONE = 0x0
- RTNLGRP_LINK = 0x1
- RTNLGRP_NOTIFY = 0x2
- RTNLGRP_NEIGH = 0x3
- RTNLGRP_TC = 0x4
- RTNLGRP_IPV4_IFADDR = 0x5
- RTNLGRP_IPV4_MROUTE = 0x6
- RTNLGRP_IPV4_ROUTE = 0x7
- RTNLGRP_IPV4_RULE = 0x8
- RTNLGRP_IPV6_IFADDR = 0x9
- RTNLGRP_IPV6_MROUTE = 0xa
- RTNLGRP_IPV6_ROUTE = 0xb
- RTNLGRP_IPV6_IFINFO = 0xc
- RTNLGRP_IPV6_PREFIX = 0x12
- RTNLGRP_IPV6_RULE = 0x13
- RTNLGRP_ND_USEROPT = 0x14
- SizeofNlMsghdr = 0x10
- SizeofNlMsgerr = 0x14
- SizeofRtGenmsg = 0x1
- SizeofNlAttr = 0x4
- SizeofRtAttr = 0x4
- SizeofIfInfomsg = 0x10
- SizeofIfAddrmsg = 0x8
- SizeofRtMsg = 0xc
- SizeofRtNexthop = 0x8
+ NDA_UNSPEC = 0x0
+ NDA_DST = 0x1
+ NDA_LLADDR = 0x2
+ NDA_CACHEINFO = 0x3
+ NDA_PROBES = 0x4
+ NDA_VLAN = 0x5
+ NDA_PORT = 0x6
+ NDA_VNI = 0x7
+ NDA_IFINDEX = 0x8
+ NDA_MASTER = 0x9
+ NDA_LINK_NETNSID = 0xa
+ NDA_SRC_VNI = 0xb
+ NTF_USE = 0x1
+ NTF_SELF = 0x2
+ NTF_MASTER = 0x4
+ NTF_PROXY = 0x8
+ NTF_EXT_LEARNED = 0x10
+ NTF_OFFLOADED = 0x20
+ NTF_ROUTER = 0x80
+ NUD_INCOMPLETE = 0x1
+ NUD_REACHABLE = 0x2
+ NUD_STALE = 0x4
+ NUD_DELAY = 0x8
+ NUD_PROBE = 0x10
+ NUD_FAILED = 0x20
+ NUD_NOARP = 0x40
+ NUD_PERMANENT = 0x80
+ NUD_NONE = 0x0
+ IFA_UNSPEC = 0x0
+ IFA_ADDRESS = 0x1
+ IFA_LOCAL = 0x2
+ IFA_LABEL = 0x3
+ IFA_BROADCAST = 0x4
+ IFA_ANYCAST = 0x5
+ IFA_CACHEINFO = 0x6
+ IFA_MULTICAST = 0x7
+ IFA_FLAGS = 0x8
+ IFA_RT_PRIORITY = 0x9
+ IFA_TARGET_NETNSID = 0xa
+ IFLA_UNSPEC = 0x0
+ IFLA_ADDRESS = 0x1
+ IFLA_BROADCAST = 0x2
+ IFLA_IFNAME = 0x3
+ IFLA_MTU = 0x4
+ IFLA_LINK = 0x5
+ IFLA_QDISC = 0x6
+ IFLA_STATS = 0x7
+ IFLA_COST = 0x8
+ IFLA_PRIORITY = 0x9
+ IFLA_MASTER = 0xa
+ IFLA_WIRELESS = 0xb
+ IFLA_PROTINFO = 0xc
+ IFLA_TXQLEN = 0xd
+ IFLA_MAP = 0xe
+ IFLA_WEIGHT = 0xf
+ IFLA_OPERSTATE = 0x10
+ IFLA_LINKMODE = 0x11
+ IFLA_LINKINFO = 0x12
+ IFLA_NET_NS_PID = 0x13
+ IFLA_IFALIAS = 0x14
+ IFLA_NUM_VF = 0x15
+ IFLA_VFINFO_LIST = 0x16
+ IFLA_STATS64 = 0x17
+ IFLA_VF_PORTS = 0x18
+ IFLA_PORT_SELF = 0x19
+ IFLA_AF_SPEC = 0x1a
+ IFLA_GROUP = 0x1b
+ IFLA_NET_NS_FD = 0x1c
+ IFLA_EXT_MASK = 0x1d
+ IFLA_PROMISCUITY = 0x1e
+ IFLA_NUM_TX_QUEUES = 0x1f
+ IFLA_NUM_RX_QUEUES = 0x20
+ IFLA_CARRIER = 0x21
+ IFLA_PHYS_PORT_ID = 0x22
+ IFLA_CARRIER_CHANGES = 0x23
+ IFLA_PHYS_SWITCH_ID = 0x24
+ IFLA_LINK_NETNSID = 0x25
+ IFLA_PHYS_PORT_NAME = 0x26
+ IFLA_PROTO_DOWN = 0x27
+ IFLA_GSO_MAX_SEGS = 0x28
+ IFLA_GSO_MAX_SIZE = 0x29
+ IFLA_PAD = 0x2a
+ IFLA_XDP = 0x2b
+ IFLA_EVENT = 0x2c
+ IFLA_NEW_NETNSID = 0x2d
+ IFLA_IF_NETNSID = 0x2e
+ IFLA_TARGET_NETNSID = 0x2e
+ IFLA_CARRIER_UP_COUNT = 0x2f
+ IFLA_CARRIER_DOWN_COUNT = 0x30
+ IFLA_NEW_IFINDEX = 0x31
+ IFLA_MIN_MTU = 0x32
+ IFLA_MAX_MTU = 0x33
+ IFLA_MAX = 0x33
+ IFLA_INFO_KIND = 0x1
+ IFLA_INFO_DATA = 0x2
+ IFLA_INFO_XSTATS = 0x3
+ IFLA_INFO_SLAVE_KIND = 0x4
+ IFLA_INFO_SLAVE_DATA = 0x5
+ RT_SCOPE_UNIVERSE = 0x0
+ RT_SCOPE_SITE = 0xc8
+ RT_SCOPE_LINK = 0xfd
+ RT_SCOPE_HOST = 0xfe
+ RT_SCOPE_NOWHERE = 0xff
+ RT_TABLE_UNSPEC = 0x0
+ RT_TABLE_COMPAT = 0xfc
+ RT_TABLE_DEFAULT = 0xfd
+ RT_TABLE_MAIN = 0xfe
+ RT_TABLE_LOCAL = 0xff
+ RT_TABLE_MAX = 0xffffffff
+ RTA_UNSPEC = 0x0
+ RTA_DST = 0x1
+ RTA_SRC = 0x2
+ RTA_IIF = 0x3
+ RTA_OIF = 0x4
+ RTA_GATEWAY = 0x5
+ RTA_PRIORITY = 0x6
+ RTA_PREFSRC = 0x7
+ RTA_METRICS = 0x8
+ RTA_MULTIPATH = 0x9
+ RTA_FLOW = 0xb
+ RTA_CACHEINFO = 0xc
+ RTA_TABLE = 0xf
+ RTA_MARK = 0x10
+ RTA_MFC_STATS = 0x11
+ RTA_VIA = 0x12
+ RTA_NEWDST = 0x13
+ RTA_PREF = 0x14
+ RTA_ENCAP_TYPE = 0x15
+ RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
+ RTN_UNSPEC = 0x0
+ RTN_UNICAST = 0x1
+ RTN_LOCAL = 0x2
+ RTN_BROADCAST = 0x3
+ RTN_ANYCAST = 0x4
+ RTN_MULTICAST = 0x5
+ RTN_BLACKHOLE = 0x6
+ RTN_UNREACHABLE = 0x7
+ RTN_PROHIBIT = 0x8
+ RTN_THROW = 0x9
+ RTN_NAT = 0xa
+ RTN_XRESOLVE = 0xb
+ RTNLGRP_NONE = 0x0
+ RTNLGRP_LINK = 0x1
+ RTNLGRP_NOTIFY = 0x2
+ RTNLGRP_NEIGH = 0x3
+ RTNLGRP_TC = 0x4
+ RTNLGRP_IPV4_IFADDR = 0x5
+ RTNLGRP_IPV4_MROUTE = 0x6
+ RTNLGRP_IPV4_ROUTE = 0x7
+ RTNLGRP_IPV4_RULE = 0x8
+ RTNLGRP_IPV6_IFADDR = 0x9
+ RTNLGRP_IPV6_MROUTE = 0xa
+ RTNLGRP_IPV6_ROUTE = 0xb
+ RTNLGRP_IPV6_IFINFO = 0xc
+ RTNLGRP_IPV6_PREFIX = 0x12
+ RTNLGRP_IPV6_RULE = 0x13
+ RTNLGRP_ND_USEROPT = 0x14
+ SizeofNlMsghdr = 0x10
+ SizeofNlMsgerr = 0x14
+ SizeofRtGenmsg = 0x1
+ SizeofNlAttr = 0x4
+ SizeofRtAttr = 0x4
+ SizeofIfInfomsg = 0x10
+ SizeofIfAddrmsg = 0x8
+ SizeofRtMsg = 0xc
+ SizeofRtNexthop = 0x8
+ SizeofNdUseroptmsg = 0x10
+ SizeofNdMsg = 0xc
)
type NlMsghdr struct {
@@ -638,6 +687,27 @@ type RtNexthop struct {
Ifindex int32
}
+type NdUseroptmsg struct {
+ Family uint8
+ Pad1 uint8
+ Opts_len uint16
+ Ifindex int32
+ Icmp_type uint8
+ Icmp_code uint8
+ Pad2 uint16
+ Pad3 uint32
+}
+
+type NdMsg struct {
+ Family uint8
+ Pad1 uint8
+ Pad2 uint16
+ Ifindex int32
+ State uint16
+ Flags uint8
+ Type uint8
+}
+
const (
SizeofSockFilter = 0x8
SizeofSockFprog = 0x8
@@ -748,7 +818,32 @@ type Sigset_t struct {
Val [32]uint32
}
-const RNDGETENTCNT = 0x80045200
+const _C__NSIG = 0x41
+
+type SignalfdSiginfo struct {
+ Signo uint32
+ Errno int32
+ Code int32
+ Pid uint32
+ Uid uint32
+ Fd int32
+ Tid uint32
+ Band uint32
+ Overrun uint32
+ Trapno uint32
+ Status int32
+ Int int32
+ Ptr uint64
+ Utime uint64
+ Stime uint64
+ Addr uint64
+ Addr_lsb uint16
+ _ uint16
+ Syscall int32
+ Call_addr uint64
+ Arch uint32
+ _ [28]uint8
+}
const PERF_IOC_FLAG_GROUP = 0x1
@@ -921,7 +1016,8 @@ type PerfEventAttr struct {
Clockid int32
Sample_regs_intr uint64
Aux_watermark uint32
- _ uint32
+ Sample_max_stack uint16
+ _ uint16
}
type PerfEventMmapPage struct {
@@ -1024,6 +1120,7 @@ const (
PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7
PERF_COUNT_SW_EMULATION_FAULTS = 0x8
PERF_COUNT_SW_DUMMY = 0x9
+ PERF_COUNT_SW_BPF_OUTPUT = 0xa
PERF_SAMPLE_IP = 0x1
PERF_SAMPLE_TID = 0x2
@@ -1045,21 +1142,38 @@ const (
PERF_SAMPLE_BRANCH_ANY_CALL = 0x10
PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20
PERF_SAMPLE_BRANCH_IND_CALL = 0x40
+ PERF_SAMPLE_BRANCH_ABORT_TX = 0x80
+ PERF_SAMPLE_BRANCH_IN_TX = 0x100
+ PERF_SAMPLE_BRANCH_NO_TX = 0x200
+ PERF_SAMPLE_BRANCH_COND = 0x400
+ PERF_SAMPLE_BRANCH_CALL_STACK = 0x800
+ PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000
+ PERF_SAMPLE_BRANCH_CALL = 0x2000
+ PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000
+ PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000
+ PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000
PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1
PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2
PERF_FORMAT_ID = 0x4
PERF_FORMAT_GROUP = 0x8
- PERF_RECORD_MMAP = 0x1
- PERF_RECORD_LOST = 0x2
- PERF_RECORD_COMM = 0x3
- PERF_RECORD_EXIT = 0x4
- PERF_RECORD_THROTTLE = 0x5
- PERF_RECORD_UNTHROTTLE = 0x6
- PERF_RECORD_FORK = 0x7
- PERF_RECORD_READ = 0x8
- PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP = 0x1
+ PERF_RECORD_LOST = 0x2
+ PERF_RECORD_COMM = 0x3
+ PERF_RECORD_EXIT = 0x4
+ PERF_RECORD_THROTTLE = 0x5
+ PERF_RECORD_UNTHROTTLE = 0x6
+ PERF_RECORD_FORK = 0x7
+ PERF_RECORD_READ = 0x8
+ PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP2 = 0xa
+ PERF_RECORD_AUX = 0xb
+ PERF_RECORD_ITRACE_START = 0xc
+ PERF_RECORD_LOST_SAMPLES = 0xd
+ PERF_RECORD_SWITCH = 0xe
+ PERF_RECORD_SWITCH_CPU_WIDE = 0xf
+ PERF_RECORD_NAMESPACES = 0x10
PERF_CONTEXT_HV = -0x20
PERF_CONTEXT_KERNEL = -0x80
@@ -1072,6 +1186,7 @@ const (
PERF_FLAG_FD_NO_GROUP = 0x1
PERF_FLAG_FD_OUTPUT = 0x2
PERF_FLAG_PID_CGROUP = 0x4
+ PERF_FLAG_FD_CLOEXEC = 0x8
)
const (
@@ -1329,6 +1444,21 @@ type TpacketBlockDesc struct {
Hdr [40]byte
}
+type TpacketBDTS struct {
+ Sec uint32
+ Usec uint32
+}
+
+type TpacketHdrV1 struct {
+ Block_status uint32
+ Num_pkts uint32
+ Offset_to_first_pkt uint32
+ Blk_len uint32
+ Seq_num uint64
+ Ts_first_pkt TpacketBDTS
+ Ts_last_pkt TpacketBDTS
+}
+
type TpacketReq struct {
Block_size uint32
Block_nr uint32
@@ -1377,6 +1507,9 @@ const (
SizeofTpacketHdr = 0x18
SizeofTpacket2Hdr = 0x20
SizeofTpacket3Hdr = 0x30
+
+ SizeofTpacketStats = 0x8
+ SizeofTpacketStatsV3 = 0xc
)
const (
@@ -1955,6 +2088,10 @@ const (
NCSI_CHANNEL_ATTR_VLAN_ID = 0xa
)
+type ScmTimestamping struct {
+ Ts [3]Timespec
+}
+
const (
SOF_TIMESTAMPING_TX_HARDWARE = 0x1
SOF_TIMESTAMPING_TX_SOFTWARE = 0x2
@@ -1974,4 +2111,367 @@ const (
SOF_TIMESTAMPING_LAST = 0x4000
SOF_TIMESTAMPING_MASK = 0x7fff
+
+ SCM_TSTAMP_SND = 0x0
+ SCM_TSTAMP_SCHED = 0x1
+ SCM_TSTAMP_ACK = 0x2
+)
+
+type SockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type FanotifyEventMetadata struct {
+ Event_len uint32
+ Vers uint8
+ Reserved uint8
+ Metadata_len uint16
+ Mask uint64
+ Fd int32
+ Pid int32
+}
+
+type FanotifyResponse struct {
+ Fd int32
+ Response uint32
+}
+
+const (
+ CRYPTO_MSG_BASE = 0x10
+ CRYPTO_MSG_NEWALG = 0x10
+ CRYPTO_MSG_DELALG = 0x11
+ CRYPTO_MSG_UPDATEALG = 0x12
+ CRYPTO_MSG_GETALG = 0x13
+ CRYPTO_MSG_DELRNG = 0x14
+ CRYPTO_MSG_GETSTAT = 0x15
+)
+
+const (
+ CRYPTOCFGA_UNSPEC = 0x0
+ CRYPTOCFGA_PRIORITY_VAL = 0x1
+ CRYPTOCFGA_REPORT_LARVAL = 0x2
+ CRYPTOCFGA_REPORT_HASH = 0x3
+ CRYPTOCFGA_REPORT_BLKCIPHER = 0x4
+ CRYPTOCFGA_REPORT_AEAD = 0x5
+ CRYPTOCFGA_REPORT_COMPRESS = 0x6
+ CRYPTOCFGA_REPORT_RNG = 0x7
+ CRYPTOCFGA_REPORT_CIPHER = 0x8
+ CRYPTOCFGA_REPORT_AKCIPHER = 0x9
+ CRYPTOCFGA_REPORT_KPP = 0xa
+ CRYPTOCFGA_REPORT_ACOMP = 0xb
+ CRYPTOCFGA_STAT_LARVAL = 0xc
+ CRYPTOCFGA_STAT_HASH = 0xd
+ CRYPTOCFGA_STAT_BLKCIPHER = 0xe
+ CRYPTOCFGA_STAT_AEAD = 0xf
+ CRYPTOCFGA_STAT_COMPRESS = 0x10
+ CRYPTOCFGA_STAT_RNG = 0x11
+ CRYPTOCFGA_STAT_CIPHER = 0x12
+ CRYPTOCFGA_STAT_AKCIPHER = 0x13
+ CRYPTOCFGA_STAT_KPP = 0x14
+ CRYPTOCFGA_STAT_ACOMP = 0x15
+)
+
+type CryptoUserAlg struct {
+ Name [64]uint8
+ Driver_name [64]uint8
+ Module_name [64]uint8
+ Type uint32
+ Mask uint32
+ Refcnt uint32
+ Flags uint32
+}
+
+type CryptoStatAEAD struct {
+ Type [64]uint8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatAKCipher struct {
+ Type [64]uint8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Verify_cnt uint64
+ Sign_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCipher struct {
+ Type [64]uint8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCompress struct {
+ Type [64]uint8
+ Compress_cnt uint64
+ Compress_tlen uint64
+ Decompress_cnt uint64
+ Decompress_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatHash struct {
+ Type [64]uint8
+ Hash_cnt uint64
+ Hash_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatKPP struct {
+ Type [64]uint8
+ Setsecret_cnt uint64
+ Generate_public_key_cnt uint64
+ Compute_shared_secret_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatRNG struct {
+ Type [64]uint8
+ Generate_cnt uint64
+ Generate_tlen uint64
+ Seed_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatLarval struct {
+ Type [64]uint8
+}
+
+type CryptoReportLarval struct {
+ Type [64]uint8
+}
+
+type CryptoReportHash struct {
+ Type [64]uint8
+ Blocksize uint32
+ Digestsize uint32
+}
+
+type CryptoReportCipher struct {
+ Type [64]uint8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+}
+
+type CryptoReportBlkCipher struct {
+ Type [64]uint8
+ Geniv [64]uint8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+ Ivsize uint32
+}
+
+type CryptoReportAEAD struct {
+ Type [64]uint8
+ Geniv [64]uint8
+ Blocksize uint32
+ Maxauthsize uint32
+ Ivsize uint32
+}
+
+type CryptoReportComp struct {
+ Type [64]uint8
+}
+
+type CryptoReportRNG struct {
+ Type [64]uint8
+ Seedsize uint32
+}
+
+type CryptoReportAKCipher struct {
+ Type [64]uint8
+}
+
+type CryptoReportKPP struct {
+ Type [64]uint8
+}
+
+type CryptoReportAcomp struct {
+ Type [64]uint8
+}
+
+const (
+ BPF_REG_0 = 0x0
+ BPF_REG_1 = 0x1
+ BPF_REG_2 = 0x2
+ BPF_REG_3 = 0x3
+ BPF_REG_4 = 0x4
+ BPF_REG_5 = 0x5
+ BPF_REG_6 = 0x6
+ BPF_REG_7 = 0x7
+ BPF_REG_8 = 0x8
+ BPF_REG_9 = 0x9
+ BPF_REG_10 = 0xa
+ BPF_MAP_CREATE = 0x0
+ BPF_MAP_LOOKUP_ELEM = 0x1
+ BPF_MAP_UPDATE_ELEM = 0x2
+ BPF_MAP_DELETE_ELEM = 0x3
+ BPF_MAP_GET_NEXT_KEY = 0x4
+ BPF_PROG_LOAD = 0x5
+ BPF_OBJ_PIN = 0x6
+ BPF_OBJ_GET = 0x7
+ BPF_PROG_ATTACH = 0x8
+ BPF_PROG_DETACH = 0x9
+ BPF_PROG_TEST_RUN = 0xa
+ BPF_PROG_GET_NEXT_ID = 0xb
+ BPF_MAP_GET_NEXT_ID = 0xc
+ BPF_PROG_GET_FD_BY_ID = 0xd
+ BPF_MAP_GET_FD_BY_ID = 0xe
+ BPF_OBJ_GET_INFO_BY_FD = 0xf
+ BPF_PROG_QUERY = 0x10
+ BPF_RAW_TRACEPOINT_OPEN = 0x11
+ BPF_BTF_LOAD = 0x12
+ BPF_BTF_GET_FD_BY_ID = 0x13
+ BPF_TASK_FD_QUERY = 0x14
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15
+ BPF_MAP_TYPE_UNSPEC = 0x0
+ BPF_MAP_TYPE_HASH = 0x1
+ BPF_MAP_TYPE_ARRAY = 0x2
+ BPF_MAP_TYPE_PROG_ARRAY = 0x3
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4
+ BPF_MAP_TYPE_PERCPU_HASH = 0x5
+ BPF_MAP_TYPE_PERCPU_ARRAY = 0x6
+ BPF_MAP_TYPE_STACK_TRACE = 0x7
+ BPF_MAP_TYPE_CGROUP_ARRAY = 0x8
+ BPF_MAP_TYPE_LRU_HASH = 0x9
+ BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa
+ BPF_MAP_TYPE_LPM_TRIE = 0xb
+ BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc
+ BPF_MAP_TYPE_HASH_OF_MAPS = 0xd
+ BPF_MAP_TYPE_DEVMAP = 0xe
+ BPF_MAP_TYPE_SOCKMAP = 0xf
+ BPF_MAP_TYPE_CPUMAP = 0x10
+ BPF_MAP_TYPE_XSKMAP = 0x11
+ BPF_MAP_TYPE_SOCKHASH = 0x12
+ BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
+ BPF_MAP_TYPE_QUEUE = 0x16
+ BPF_MAP_TYPE_STACK = 0x17
+ BPF_PROG_TYPE_UNSPEC = 0x0
+ BPF_PROG_TYPE_SOCKET_FILTER = 0x1
+ BPF_PROG_TYPE_KPROBE = 0x2
+ BPF_PROG_TYPE_SCHED_CLS = 0x3
+ BPF_PROG_TYPE_SCHED_ACT = 0x4
+ BPF_PROG_TYPE_TRACEPOINT = 0x5
+ BPF_PROG_TYPE_XDP = 0x6
+ BPF_PROG_TYPE_PERF_EVENT = 0x7
+ BPF_PROG_TYPE_CGROUP_SKB = 0x8
+ BPF_PROG_TYPE_CGROUP_SOCK = 0x9
+ BPF_PROG_TYPE_LWT_IN = 0xa
+ BPF_PROG_TYPE_LWT_OUT = 0xb
+ BPF_PROG_TYPE_LWT_XMIT = 0xc
+ BPF_PROG_TYPE_SOCK_OPS = 0xd
+ BPF_PROG_TYPE_SK_SKB = 0xe
+ BPF_PROG_TYPE_CGROUP_DEVICE = 0xf
+ BPF_PROG_TYPE_SK_MSG = 0x10
+ BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11
+ BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12
+ BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13
+ BPF_PROG_TYPE_LIRC_MODE2 = 0x14
+ BPF_PROG_TYPE_SK_REUSEPORT = 0x15
+ BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16
+ BPF_CGROUP_INET_INGRESS = 0x0
+ BPF_CGROUP_INET_EGRESS = 0x1
+ BPF_CGROUP_INET_SOCK_CREATE = 0x2
+ BPF_CGROUP_SOCK_OPS = 0x3
+ BPF_SK_SKB_STREAM_PARSER = 0x4
+ BPF_SK_SKB_STREAM_VERDICT = 0x5
+ BPF_CGROUP_DEVICE = 0x6
+ BPF_SK_MSG_VERDICT = 0x7
+ BPF_CGROUP_INET4_BIND = 0x8
+ BPF_CGROUP_INET6_BIND = 0x9
+ BPF_CGROUP_INET4_CONNECT = 0xa
+ BPF_CGROUP_INET6_CONNECT = 0xb
+ BPF_CGROUP_INET4_POST_BIND = 0xc
+ BPF_CGROUP_INET6_POST_BIND = 0xd
+ BPF_CGROUP_UDP4_SENDMSG = 0xe
+ BPF_CGROUP_UDP6_SENDMSG = 0xf
+ BPF_LIRC_MODE2 = 0x10
+ BPF_FLOW_DISSECTOR = 0x11
+ BPF_STACK_BUILD_ID_EMPTY = 0x0
+ BPF_STACK_BUILD_ID_VALID = 0x1
+ BPF_STACK_BUILD_ID_IP = 0x2
+ BPF_ADJ_ROOM_NET = 0x0
+ BPF_HDR_START_MAC = 0x0
+ BPF_HDR_START_NET = 0x1
+ BPF_LWT_ENCAP_SEG6 = 0x0
+ BPF_LWT_ENCAP_SEG6_INLINE = 0x1
+ BPF_OK = 0x0
+ BPF_DROP = 0x2
+ BPF_REDIRECT = 0x7
+ BPF_SOCK_OPS_VOID = 0x0
+ BPF_SOCK_OPS_TIMEOUT_INIT = 0x1
+ BPF_SOCK_OPS_RWND_INIT = 0x2
+ BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3
+ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4
+ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5
+ BPF_SOCK_OPS_NEEDS_ECN = 0x6
+ BPF_SOCK_OPS_BASE_RTT = 0x7
+ BPF_SOCK_OPS_RTO_CB = 0x8
+ BPF_SOCK_OPS_RETRANS_CB = 0x9
+ BPF_SOCK_OPS_STATE_CB = 0xa
+ BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb
+ BPF_TCP_ESTABLISHED = 0x1
+ BPF_TCP_SYN_SENT = 0x2
+ BPF_TCP_SYN_RECV = 0x3
+ BPF_TCP_FIN_WAIT1 = 0x4
+ BPF_TCP_FIN_WAIT2 = 0x5
+ BPF_TCP_TIME_WAIT = 0x6
+ BPF_TCP_CLOSE = 0x7
+ BPF_TCP_CLOSE_WAIT = 0x8
+ BPF_TCP_LAST_ACK = 0x9
+ BPF_TCP_LISTEN = 0xa
+ BPF_TCP_CLOSING = 0xb
+ BPF_TCP_NEW_SYN_RECV = 0xc
+ BPF_TCP_MAX_STATES = 0xd
+ BPF_FIB_LKUP_RET_SUCCESS = 0x0
+ BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
+ BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
+ BPF_FIB_LKUP_RET_PROHIBIT = 0x3
+ BPF_FIB_LKUP_RET_NOT_FWDED = 0x4
+ BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5
+ BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
+ BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
+ BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
+ BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
+ BPF_FD_TYPE_TRACEPOINT = 0x1
+ BPF_FD_TYPE_KPROBE = 0x2
+ BPF_FD_TYPE_KRETPROBE = 0x3
+ BPF_FD_TYPE_UPROBE = 0x4
+ BPF_FD_TYPE_URETPROBE = 0x5
+)
+
+type CapUserHeader struct {
+ Version uint32
+ Pid int32
+}
+
+type CapUserData struct {
+ Effective uint32
+ Permitted uint32
+ Inheritable uint32
+}
+
+const (
+ LINUX_CAPABILITY_VERSION_1 = 0x19980330
+ LINUX_CAPABILITY_VERSION_2 = 0x20071026
+ LINUX_CAPABILITY_VERSION_3 = 0x20080522
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index be375d9bb..0a038436d 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -407,6 +407,11 @@ type TCPInfo struct {
Total_retrans uint32
}
+type CanFilter struct {
+ Id uint32
+ Mask uint32
+}
+
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
@@ -436,141 +441,185 @@ const (
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
SizeofTCPInfo = 0x68
+ SizeofCanFilter = 0x8
)
const (
- IFA_UNSPEC = 0x0
- IFA_ADDRESS = 0x1
- IFA_LOCAL = 0x2
- IFA_LABEL = 0x3
- IFA_BROADCAST = 0x4
- IFA_ANYCAST = 0x5
- IFA_CACHEINFO = 0x6
- IFA_MULTICAST = 0x7
- IFLA_UNSPEC = 0x0
- IFLA_ADDRESS = 0x1
- IFLA_BROADCAST = 0x2
- IFLA_IFNAME = 0x3
- IFLA_INFO_KIND = 0x1
- IFLA_MTU = 0x4
- IFLA_LINK = 0x5
- IFLA_QDISC = 0x6
- IFLA_STATS = 0x7
- IFLA_COST = 0x8
- IFLA_PRIORITY = 0x9
- IFLA_MASTER = 0xa
- IFLA_WIRELESS = 0xb
- IFLA_PROTINFO = 0xc
- IFLA_TXQLEN = 0xd
- IFLA_MAP = 0xe
- IFLA_WEIGHT = 0xf
- IFLA_OPERSTATE = 0x10
- IFLA_LINKMODE = 0x11
- IFLA_LINKINFO = 0x12
- IFLA_NET_NS_PID = 0x13
- IFLA_IFALIAS = 0x14
- IFLA_NUM_VF = 0x15
- IFLA_VFINFO_LIST = 0x16
- IFLA_STATS64 = 0x17
- IFLA_VF_PORTS = 0x18
- IFLA_PORT_SELF = 0x19
- IFLA_AF_SPEC = 0x1a
- IFLA_GROUP = 0x1b
- IFLA_NET_NS_FD = 0x1c
- IFLA_EXT_MASK = 0x1d
- IFLA_PROMISCUITY = 0x1e
- IFLA_NUM_TX_QUEUES = 0x1f
- IFLA_NUM_RX_QUEUES = 0x20
- IFLA_CARRIER = 0x21
- IFLA_PHYS_PORT_ID = 0x22
- IFLA_CARRIER_CHANGES = 0x23
- IFLA_PHYS_SWITCH_ID = 0x24
- IFLA_LINK_NETNSID = 0x25
- IFLA_PHYS_PORT_NAME = 0x26
- IFLA_PROTO_DOWN = 0x27
- IFLA_GSO_MAX_SEGS = 0x28
- IFLA_GSO_MAX_SIZE = 0x29
- IFLA_PAD = 0x2a
- IFLA_XDP = 0x2b
- IFLA_EVENT = 0x2c
- IFLA_NEW_NETNSID = 0x2d
- IFLA_IF_NETNSID = 0x2e
- IFLA_MAX = 0x33
- RT_SCOPE_UNIVERSE = 0x0
- RT_SCOPE_SITE = 0xc8
- RT_SCOPE_LINK = 0xfd
- RT_SCOPE_HOST = 0xfe
- RT_SCOPE_NOWHERE = 0xff
- RT_TABLE_UNSPEC = 0x0
- RT_TABLE_COMPAT = 0xfc
- RT_TABLE_DEFAULT = 0xfd
- RT_TABLE_MAIN = 0xfe
- RT_TABLE_LOCAL = 0xff
- RT_TABLE_MAX = 0xffffffff
- RTA_UNSPEC = 0x0
- RTA_DST = 0x1
- RTA_SRC = 0x2
- RTA_IIF = 0x3
- RTA_OIF = 0x4
- RTA_GATEWAY = 0x5
- RTA_PRIORITY = 0x6
- RTA_PREFSRC = 0x7
- RTA_METRICS = 0x8
- RTA_MULTIPATH = 0x9
- RTA_FLOW = 0xb
- RTA_CACHEINFO = 0xc
- RTA_TABLE = 0xf
- RTA_MARK = 0x10
- RTA_MFC_STATS = 0x11
- RTA_VIA = 0x12
- RTA_NEWDST = 0x13
- RTA_PREF = 0x14
- RTA_ENCAP_TYPE = 0x15
- RTA_ENCAP = 0x16
- RTA_EXPIRES = 0x17
- RTA_PAD = 0x18
- RTA_UID = 0x19
- RTA_TTL_PROPAGATE = 0x1a
- RTA_IP_PROTO = 0x1b
- RTA_SPORT = 0x1c
- RTA_DPORT = 0x1d
- RTN_UNSPEC = 0x0
- RTN_UNICAST = 0x1
- RTN_LOCAL = 0x2
- RTN_BROADCAST = 0x3
- RTN_ANYCAST = 0x4
- RTN_MULTICAST = 0x5
- RTN_BLACKHOLE = 0x6
- RTN_UNREACHABLE = 0x7
- RTN_PROHIBIT = 0x8
- RTN_THROW = 0x9
- RTN_NAT = 0xa
- RTN_XRESOLVE = 0xb
- RTNLGRP_NONE = 0x0
- RTNLGRP_LINK = 0x1
- RTNLGRP_NOTIFY = 0x2
- RTNLGRP_NEIGH = 0x3
- RTNLGRP_TC = 0x4
- RTNLGRP_IPV4_IFADDR = 0x5
- RTNLGRP_IPV4_MROUTE = 0x6
- RTNLGRP_IPV4_ROUTE = 0x7
- RTNLGRP_IPV4_RULE = 0x8
- RTNLGRP_IPV6_IFADDR = 0x9
- RTNLGRP_IPV6_MROUTE = 0xa
- RTNLGRP_IPV6_ROUTE = 0xb
- RTNLGRP_IPV6_IFINFO = 0xc
- RTNLGRP_IPV6_PREFIX = 0x12
- RTNLGRP_IPV6_RULE = 0x13
- RTNLGRP_ND_USEROPT = 0x14
- SizeofNlMsghdr = 0x10
- SizeofNlMsgerr = 0x14
- SizeofRtGenmsg = 0x1
- SizeofNlAttr = 0x4
- SizeofRtAttr = 0x4
- SizeofIfInfomsg = 0x10
- SizeofIfAddrmsg = 0x8
- SizeofRtMsg = 0xc
- SizeofRtNexthop = 0x8
+ NDA_UNSPEC = 0x0
+ NDA_DST = 0x1
+ NDA_LLADDR = 0x2
+ NDA_CACHEINFO = 0x3
+ NDA_PROBES = 0x4
+ NDA_VLAN = 0x5
+ NDA_PORT = 0x6
+ NDA_VNI = 0x7
+ NDA_IFINDEX = 0x8
+ NDA_MASTER = 0x9
+ NDA_LINK_NETNSID = 0xa
+ NDA_SRC_VNI = 0xb
+ NTF_USE = 0x1
+ NTF_SELF = 0x2
+ NTF_MASTER = 0x4
+ NTF_PROXY = 0x8
+ NTF_EXT_LEARNED = 0x10
+ NTF_OFFLOADED = 0x20
+ NTF_ROUTER = 0x80
+ NUD_INCOMPLETE = 0x1
+ NUD_REACHABLE = 0x2
+ NUD_STALE = 0x4
+ NUD_DELAY = 0x8
+ NUD_PROBE = 0x10
+ NUD_FAILED = 0x20
+ NUD_NOARP = 0x40
+ NUD_PERMANENT = 0x80
+ NUD_NONE = 0x0
+ IFA_UNSPEC = 0x0
+ IFA_ADDRESS = 0x1
+ IFA_LOCAL = 0x2
+ IFA_LABEL = 0x3
+ IFA_BROADCAST = 0x4
+ IFA_ANYCAST = 0x5
+ IFA_CACHEINFO = 0x6
+ IFA_MULTICAST = 0x7
+ IFA_FLAGS = 0x8
+ IFA_RT_PRIORITY = 0x9
+ IFA_TARGET_NETNSID = 0xa
+ IFLA_UNSPEC = 0x0
+ IFLA_ADDRESS = 0x1
+ IFLA_BROADCAST = 0x2
+ IFLA_IFNAME = 0x3
+ IFLA_MTU = 0x4
+ IFLA_LINK = 0x5
+ IFLA_QDISC = 0x6
+ IFLA_STATS = 0x7
+ IFLA_COST = 0x8
+ IFLA_PRIORITY = 0x9
+ IFLA_MASTER = 0xa
+ IFLA_WIRELESS = 0xb
+ IFLA_PROTINFO = 0xc
+ IFLA_TXQLEN = 0xd
+ IFLA_MAP = 0xe
+ IFLA_WEIGHT = 0xf
+ IFLA_OPERSTATE = 0x10
+ IFLA_LINKMODE = 0x11
+ IFLA_LINKINFO = 0x12
+ IFLA_NET_NS_PID = 0x13
+ IFLA_IFALIAS = 0x14
+ IFLA_NUM_VF = 0x15
+ IFLA_VFINFO_LIST = 0x16
+ IFLA_STATS64 = 0x17
+ IFLA_VF_PORTS = 0x18
+ IFLA_PORT_SELF = 0x19
+ IFLA_AF_SPEC = 0x1a
+ IFLA_GROUP = 0x1b
+ IFLA_NET_NS_FD = 0x1c
+ IFLA_EXT_MASK = 0x1d
+ IFLA_PROMISCUITY = 0x1e
+ IFLA_NUM_TX_QUEUES = 0x1f
+ IFLA_NUM_RX_QUEUES = 0x20
+ IFLA_CARRIER = 0x21
+ IFLA_PHYS_PORT_ID = 0x22
+ IFLA_CARRIER_CHANGES = 0x23
+ IFLA_PHYS_SWITCH_ID = 0x24
+ IFLA_LINK_NETNSID = 0x25
+ IFLA_PHYS_PORT_NAME = 0x26
+ IFLA_PROTO_DOWN = 0x27
+ IFLA_GSO_MAX_SEGS = 0x28
+ IFLA_GSO_MAX_SIZE = 0x29
+ IFLA_PAD = 0x2a
+ IFLA_XDP = 0x2b
+ IFLA_EVENT = 0x2c
+ IFLA_NEW_NETNSID = 0x2d
+ IFLA_IF_NETNSID = 0x2e
+ IFLA_TARGET_NETNSID = 0x2e
+ IFLA_CARRIER_UP_COUNT = 0x2f
+ IFLA_CARRIER_DOWN_COUNT = 0x30
+ IFLA_NEW_IFINDEX = 0x31
+ IFLA_MIN_MTU = 0x32
+ IFLA_MAX_MTU = 0x33
+ IFLA_MAX = 0x33
+ IFLA_INFO_KIND = 0x1
+ IFLA_INFO_DATA = 0x2
+ IFLA_INFO_XSTATS = 0x3
+ IFLA_INFO_SLAVE_KIND = 0x4
+ IFLA_INFO_SLAVE_DATA = 0x5
+ RT_SCOPE_UNIVERSE = 0x0
+ RT_SCOPE_SITE = 0xc8
+ RT_SCOPE_LINK = 0xfd
+ RT_SCOPE_HOST = 0xfe
+ RT_SCOPE_NOWHERE = 0xff
+ RT_TABLE_UNSPEC = 0x0
+ RT_TABLE_COMPAT = 0xfc
+ RT_TABLE_DEFAULT = 0xfd
+ RT_TABLE_MAIN = 0xfe
+ RT_TABLE_LOCAL = 0xff
+ RT_TABLE_MAX = 0xffffffff
+ RTA_UNSPEC = 0x0
+ RTA_DST = 0x1
+ RTA_SRC = 0x2
+ RTA_IIF = 0x3
+ RTA_OIF = 0x4
+ RTA_GATEWAY = 0x5
+ RTA_PRIORITY = 0x6
+ RTA_PREFSRC = 0x7
+ RTA_METRICS = 0x8
+ RTA_MULTIPATH = 0x9
+ RTA_FLOW = 0xb
+ RTA_CACHEINFO = 0xc
+ RTA_TABLE = 0xf
+ RTA_MARK = 0x10
+ RTA_MFC_STATS = 0x11
+ RTA_VIA = 0x12
+ RTA_NEWDST = 0x13
+ RTA_PREF = 0x14
+ RTA_ENCAP_TYPE = 0x15
+ RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
+ RTN_UNSPEC = 0x0
+ RTN_UNICAST = 0x1
+ RTN_LOCAL = 0x2
+ RTN_BROADCAST = 0x3
+ RTN_ANYCAST = 0x4
+ RTN_MULTICAST = 0x5
+ RTN_BLACKHOLE = 0x6
+ RTN_UNREACHABLE = 0x7
+ RTN_PROHIBIT = 0x8
+ RTN_THROW = 0x9
+ RTN_NAT = 0xa
+ RTN_XRESOLVE = 0xb
+ RTNLGRP_NONE = 0x0
+ RTNLGRP_LINK = 0x1
+ RTNLGRP_NOTIFY = 0x2
+ RTNLGRP_NEIGH = 0x3
+ RTNLGRP_TC = 0x4
+ RTNLGRP_IPV4_IFADDR = 0x5
+ RTNLGRP_IPV4_MROUTE = 0x6
+ RTNLGRP_IPV4_ROUTE = 0x7
+ RTNLGRP_IPV4_RULE = 0x8
+ RTNLGRP_IPV6_IFADDR = 0x9
+ RTNLGRP_IPV6_MROUTE = 0xa
+ RTNLGRP_IPV6_ROUTE = 0xb
+ RTNLGRP_IPV6_IFINFO = 0xc
+ RTNLGRP_IPV6_PREFIX = 0x12
+ RTNLGRP_IPV6_RULE = 0x13
+ RTNLGRP_ND_USEROPT = 0x14
+ SizeofNlMsghdr = 0x10
+ SizeofNlMsgerr = 0x14
+ SizeofRtGenmsg = 0x1
+ SizeofNlAttr = 0x4
+ SizeofRtAttr = 0x4
+ SizeofIfInfomsg = 0x10
+ SizeofIfAddrmsg = 0x8
+ SizeofRtMsg = 0xc
+ SizeofRtNexthop = 0x8
+ SizeofNdUseroptmsg = 0x10
+ SizeofNdMsg = 0xc
)
type NlMsghdr struct {
@@ -636,6 +685,27 @@ type RtNexthop struct {
Ifindex int32
}
+type NdUseroptmsg struct {
+ Family uint8
+ Pad1 uint8
+ Opts_len uint16
+ Ifindex int32
+ Icmp_type uint8
+ Icmp_code uint8
+ Pad2 uint16
+ Pad3 uint32
+}
+
+type NdMsg struct {
+ Family uint8
+ Pad1 uint8
+ Pad2 uint16
+ Ifindex int32
+ State uint16
+ Flags uint8
+ Type uint8
+}
+
const (
SizeofSockFilter = 0x8
SizeofSockFprog = 0x10
@@ -751,7 +821,32 @@ type Sigset_t struct {
Val [16]uint64
}
-const RNDGETENTCNT = 0x80045200
+const _C__NSIG = 0x41
+
+type SignalfdSiginfo struct {
+ Signo uint32
+ Errno int32
+ Code int32
+ Pid uint32
+ Uid uint32
+ Fd int32
+ Tid uint32
+ Band uint32
+ Overrun uint32
+ Trapno uint32
+ Status int32
+ Int int32
+ Ptr uint64
+ Utime uint64
+ Stime uint64
+ Addr uint64
+ Addr_lsb uint16
+ _ uint16
+ Syscall int32
+ Call_addr uint64
+ Arch uint32
+ _ [28]uint8
+}
const PERF_IOC_FLAG_GROUP = 0x1
@@ -922,7 +1017,8 @@ type PerfEventAttr struct {
Clockid int32
Sample_regs_intr uint64
Aux_watermark uint32
- _ uint32
+ Sample_max_stack uint16
+ _ uint16
}
type PerfEventMmapPage struct {
@@ -1025,6 +1121,7 @@ const (
PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7
PERF_COUNT_SW_EMULATION_FAULTS = 0x8
PERF_COUNT_SW_DUMMY = 0x9
+ PERF_COUNT_SW_BPF_OUTPUT = 0xa
PERF_SAMPLE_IP = 0x1
PERF_SAMPLE_TID = 0x2
@@ -1046,21 +1143,38 @@ const (
PERF_SAMPLE_BRANCH_ANY_CALL = 0x10
PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20
PERF_SAMPLE_BRANCH_IND_CALL = 0x40
+ PERF_SAMPLE_BRANCH_ABORT_TX = 0x80
+ PERF_SAMPLE_BRANCH_IN_TX = 0x100
+ PERF_SAMPLE_BRANCH_NO_TX = 0x200
+ PERF_SAMPLE_BRANCH_COND = 0x400
+ PERF_SAMPLE_BRANCH_CALL_STACK = 0x800
+ PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000
+ PERF_SAMPLE_BRANCH_CALL = 0x2000
+ PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000
+ PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000
+ PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000
PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1
PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2
PERF_FORMAT_ID = 0x4
PERF_FORMAT_GROUP = 0x8
- PERF_RECORD_MMAP = 0x1
- PERF_RECORD_LOST = 0x2
- PERF_RECORD_COMM = 0x3
- PERF_RECORD_EXIT = 0x4
- PERF_RECORD_THROTTLE = 0x5
- PERF_RECORD_UNTHROTTLE = 0x6
- PERF_RECORD_FORK = 0x7
- PERF_RECORD_READ = 0x8
- PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP = 0x1
+ PERF_RECORD_LOST = 0x2
+ PERF_RECORD_COMM = 0x3
+ PERF_RECORD_EXIT = 0x4
+ PERF_RECORD_THROTTLE = 0x5
+ PERF_RECORD_UNTHROTTLE = 0x6
+ PERF_RECORD_FORK = 0x7
+ PERF_RECORD_READ = 0x8
+ PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP2 = 0xa
+ PERF_RECORD_AUX = 0xb
+ PERF_RECORD_ITRACE_START = 0xc
+ PERF_RECORD_LOST_SAMPLES = 0xd
+ PERF_RECORD_SWITCH = 0xe
+ PERF_RECORD_SWITCH_CPU_WIDE = 0xf
+ PERF_RECORD_NAMESPACES = 0x10
PERF_CONTEXT_HV = -0x20
PERF_CONTEXT_KERNEL = -0x80
@@ -1073,6 +1187,7 @@ const (
PERF_FLAG_FD_NO_GROUP = 0x1
PERF_FLAG_FD_OUTPUT = 0x2
PERF_FLAG_PID_CGROUP = 0x4
+ PERF_FLAG_FD_CLOEXEC = 0x8
)
const (
@@ -1330,6 +1445,21 @@ type TpacketBlockDesc struct {
Hdr [40]byte
}
+type TpacketBDTS struct {
+ Sec uint32
+ Usec uint32
+}
+
+type TpacketHdrV1 struct {
+ Block_status uint32
+ Num_pkts uint32
+ Offset_to_first_pkt uint32
+ Blk_len uint32
+ Seq_num uint64
+ Ts_first_pkt TpacketBDTS
+ Ts_last_pkt TpacketBDTS
+}
+
type TpacketReq struct {
Block_size uint32
Block_nr uint32
@@ -1378,6 +1508,9 @@ const (
SizeofTpacketHdr = 0x20
SizeofTpacket2Hdr = 0x20
SizeofTpacket3Hdr = 0x30
+
+ SizeofTpacketStats = 0x8
+ SizeofTpacketStatsV3 = 0xc
)
const (
@@ -1956,6 +2089,10 @@ const (
NCSI_CHANNEL_ATTR_VLAN_ID = 0xa
)
+type ScmTimestamping struct {
+ Ts [3]Timespec
+}
+
const (
SOF_TIMESTAMPING_TX_HARDWARE = 0x1
SOF_TIMESTAMPING_TX_SOFTWARE = 0x2
@@ -1975,4 +2112,367 @@ const (
SOF_TIMESTAMPING_LAST = 0x4000
SOF_TIMESTAMPING_MASK = 0x7fff
+
+ SCM_TSTAMP_SND = 0x0
+ SCM_TSTAMP_SCHED = 0x1
+ SCM_TSTAMP_ACK = 0x2
+)
+
+type SockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type FanotifyEventMetadata struct {
+ Event_len uint32
+ Vers uint8
+ Reserved uint8
+ Metadata_len uint16
+ Mask uint64
+ Fd int32
+ Pid int32
+}
+
+type FanotifyResponse struct {
+ Fd int32
+ Response uint32
+}
+
+const (
+ CRYPTO_MSG_BASE = 0x10
+ CRYPTO_MSG_NEWALG = 0x10
+ CRYPTO_MSG_DELALG = 0x11
+ CRYPTO_MSG_UPDATEALG = 0x12
+ CRYPTO_MSG_GETALG = 0x13
+ CRYPTO_MSG_DELRNG = 0x14
+ CRYPTO_MSG_GETSTAT = 0x15
+)
+
+const (
+ CRYPTOCFGA_UNSPEC = 0x0
+ CRYPTOCFGA_PRIORITY_VAL = 0x1
+ CRYPTOCFGA_REPORT_LARVAL = 0x2
+ CRYPTOCFGA_REPORT_HASH = 0x3
+ CRYPTOCFGA_REPORT_BLKCIPHER = 0x4
+ CRYPTOCFGA_REPORT_AEAD = 0x5
+ CRYPTOCFGA_REPORT_COMPRESS = 0x6
+ CRYPTOCFGA_REPORT_RNG = 0x7
+ CRYPTOCFGA_REPORT_CIPHER = 0x8
+ CRYPTOCFGA_REPORT_AKCIPHER = 0x9
+ CRYPTOCFGA_REPORT_KPP = 0xa
+ CRYPTOCFGA_REPORT_ACOMP = 0xb
+ CRYPTOCFGA_STAT_LARVAL = 0xc
+ CRYPTOCFGA_STAT_HASH = 0xd
+ CRYPTOCFGA_STAT_BLKCIPHER = 0xe
+ CRYPTOCFGA_STAT_AEAD = 0xf
+ CRYPTOCFGA_STAT_COMPRESS = 0x10
+ CRYPTOCFGA_STAT_RNG = 0x11
+ CRYPTOCFGA_STAT_CIPHER = 0x12
+ CRYPTOCFGA_STAT_AKCIPHER = 0x13
+ CRYPTOCFGA_STAT_KPP = 0x14
+ CRYPTOCFGA_STAT_ACOMP = 0x15
+)
+
+type CryptoUserAlg struct {
+ Name [64]int8
+ Driver_name [64]int8
+ Module_name [64]int8
+ Type uint32
+ Mask uint32
+ Refcnt uint32
+ Flags uint32
+}
+
+type CryptoStatAEAD struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatAKCipher struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Verify_cnt uint64
+ Sign_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCipher struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCompress struct {
+ Type [64]int8
+ Compress_cnt uint64
+ Compress_tlen uint64
+ Decompress_cnt uint64
+ Decompress_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatHash struct {
+ Type [64]int8
+ Hash_cnt uint64
+ Hash_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatKPP struct {
+ Type [64]int8
+ Setsecret_cnt uint64
+ Generate_public_key_cnt uint64
+ Compute_shared_secret_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatRNG struct {
+ Type [64]int8
+ Generate_cnt uint64
+ Generate_tlen uint64
+ Seed_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatLarval struct {
+ Type [64]int8
+}
+
+type CryptoReportLarval struct {
+ Type [64]int8
+}
+
+type CryptoReportHash struct {
+ Type [64]int8
+ Blocksize uint32
+ Digestsize uint32
+}
+
+type CryptoReportCipher struct {
+ Type [64]int8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+}
+
+type CryptoReportBlkCipher struct {
+ Type [64]int8
+ Geniv [64]int8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+ Ivsize uint32
+}
+
+type CryptoReportAEAD struct {
+ Type [64]int8
+ Geniv [64]int8
+ Blocksize uint32
+ Maxauthsize uint32
+ Ivsize uint32
+}
+
+type CryptoReportComp struct {
+ Type [64]int8
+}
+
+type CryptoReportRNG struct {
+ Type [64]int8
+ Seedsize uint32
+}
+
+type CryptoReportAKCipher struct {
+ Type [64]int8
+}
+
+type CryptoReportKPP struct {
+ Type [64]int8
+}
+
+type CryptoReportAcomp struct {
+ Type [64]int8
+}
+
+const (
+ BPF_REG_0 = 0x0
+ BPF_REG_1 = 0x1
+ BPF_REG_2 = 0x2
+ BPF_REG_3 = 0x3
+ BPF_REG_4 = 0x4
+ BPF_REG_5 = 0x5
+ BPF_REG_6 = 0x6
+ BPF_REG_7 = 0x7
+ BPF_REG_8 = 0x8
+ BPF_REG_9 = 0x9
+ BPF_REG_10 = 0xa
+ BPF_MAP_CREATE = 0x0
+ BPF_MAP_LOOKUP_ELEM = 0x1
+ BPF_MAP_UPDATE_ELEM = 0x2
+ BPF_MAP_DELETE_ELEM = 0x3
+ BPF_MAP_GET_NEXT_KEY = 0x4
+ BPF_PROG_LOAD = 0x5
+ BPF_OBJ_PIN = 0x6
+ BPF_OBJ_GET = 0x7
+ BPF_PROG_ATTACH = 0x8
+ BPF_PROG_DETACH = 0x9
+ BPF_PROG_TEST_RUN = 0xa
+ BPF_PROG_GET_NEXT_ID = 0xb
+ BPF_MAP_GET_NEXT_ID = 0xc
+ BPF_PROG_GET_FD_BY_ID = 0xd
+ BPF_MAP_GET_FD_BY_ID = 0xe
+ BPF_OBJ_GET_INFO_BY_FD = 0xf
+ BPF_PROG_QUERY = 0x10
+ BPF_RAW_TRACEPOINT_OPEN = 0x11
+ BPF_BTF_LOAD = 0x12
+ BPF_BTF_GET_FD_BY_ID = 0x13
+ BPF_TASK_FD_QUERY = 0x14
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15
+ BPF_MAP_TYPE_UNSPEC = 0x0
+ BPF_MAP_TYPE_HASH = 0x1
+ BPF_MAP_TYPE_ARRAY = 0x2
+ BPF_MAP_TYPE_PROG_ARRAY = 0x3
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4
+ BPF_MAP_TYPE_PERCPU_HASH = 0x5
+ BPF_MAP_TYPE_PERCPU_ARRAY = 0x6
+ BPF_MAP_TYPE_STACK_TRACE = 0x7
+ BPF_MAP_TYPE_CGROUP_ARRAY = 0x8
+ BPF_MAP_TYPE_LRU_HASH = 0x9
+ BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa
+ BPF_MAP_TYPE_LPM_TRIE = 0xb
+ BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc
+ BPF_MAP_TYPE_HASH_OF_MAPS = 0xd
+ BPF_MAP_TYPE_DEVMAP = 0xe
+ BPF_MAP_TYPE_SOCKMAP = 0xf
+ BPF_MAP_TYPE_CPUMAP = 0x10
+ BPF_MAP_TYPE_XSKMAP = 0x11
+ BPF_MAP_TYPE_SOCKHASH = 0x12
+ BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
+ BPF_MAP_TYPE_QUEUE = 0x16
+ BPF_MAP_TYPE_STACK = 0x17
+ BPF_PROG_TYPE_UNSPEC = 0x0
+ BPF_PROG_TYPE_SOCKET_FILTER = 0x1
+ BPF_PROG_TYPE_KPROBE = 0x2
+ BPF_PROG_TYPE_SCHED_CLS = 0x3
+ BPF_PROG_TYPE_SCHED_ACT = 0x4
+ BPF_PROG_TYPE_TRACEPOINT = 0x5
+ BPF_PROG_TYPE_XDP = 0x6
+ BPF_PROG_TYPE_PERF_EVENT = 0x7
+ BPF_PROG_TYPE_CGROUP_SKB = 0x8
+ BPF_PROG_TYPE_CGROUP_SOCK = 0x9
+ BPF_PROG_TYPE_LWT_IN = 0xa
+ BPF_PROG_TYPE_LWT_OUT = 0xb
+ BPF_PROG_TYPE_LWT_XMIT = 0xc
+ BPF_PROG_TYPE_SOCK_OPS = 0xd
+ BPF_PROG_TYPE_SK_SKB = 0xe
+ BPF_PROG_TYPE_CGROUP_DEVICE = 0xf
+ BPF_PROG_TYPE_SK_MSG = 0x10
+ BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11
+ BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12
+ BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13
+ BPF_PROG_TYPE_LIRC_MODE2 = 0x14
+ BPF_PROG_TYPE_SK_REUSEPORT = 0x15
+ BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16
+ BPF_CGROUP_INET_INGRESS = 0x0
+ BPF_CGROUP_INET_EGRESS = 0x1
+ BPF_CGROUP_INET_SOCK_CREATE = 0x2
+ BPF_CGROUP_SOCK_OPS = 0x3
+ BPF_SK_SKB_STREAM_PARSER = 0x4
+ BPF_SK_SKB_STREAM_VERDICT = 0x5
+ BPF_CGROUP_DEVICE = 0x6
+ BPF_SK_MSG_VERDICT = 0x7
+ BPF_CGROUP_INET4_BIND = 0x8
+ BPF_CGROUP_INET6_BIND = 0x9
+ BPF_CGROUP_INET4_CONNECT = 0xa
+ BPF_CGROUP_INET6_CONNECT = 0xb
+ BPF_CGROUP_INET4_POST_BIND = 0xc
+ BPF_CGROUP_INET6_POST_BIND = 0xd
+ BPF_CGROUP_UDP4_SENDMSG = 0xe
+ BPF_CGROUP_UDP6_SENDMSG = 0xf
+ BPF_LIRC_MODE2 = 0x10
+ BPF_FLOW_DISSECTOR = 0x11
+ BPF_STACK_BUILD_ID_EMPTY = 0x0
+ BPF_STACK_BUILD_ID_VALID = 0x1
+ BPF_STACK_BUILD_ID_IP = 0x2
+ BPF_ADJ_ROOM_NET = 0x0
+ BPF_HDR_START_MAC = 0x0
+ BPF_HDR_START_NET = 0x1
+ BPF_LWT_ENCAP_SEG6 = 0x0
+ BPF_LWT_ENCAP_SEG6_INLINE = 0x1
+ BPF_OK = 0x0
+ BPF_DROP = 0x2
+ BPF_REDIRECT = 0x7
+ BPF_SOCK_OPS_VOID = 0x0
+ BPF_SOCK_OPS_TIMEOUT_INIT = 0x1
+ BPF_SOCK_OPS_RWND_INIT = 0x2
+ BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3
+ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4
+ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5
+ BPF_SOCK_OPS_NEEDS_ECN = 0x6
+ BPF_SOCK_OPS_BASE_RTT = 0x7
+ BPF_SOCK_OPS_RTO_CB = 0x8
+ BPF_SOCK_OPS_RETRANS_CB = 0x9
+ BPF_SOCK_OPS_STATE_CB = 0xa
+ BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb
+ BPF_TCP_ESTABLISHED = 0x1
+ BPF_TCP_SYN_SENT = 0x2
+ BPF_TCP_SYN_RECV = 0x3
+ BPF_TCP_FIN_WAIT1 = 0x4
+ BPF_TCP_FIN_WAIT2 = 0x5
+ BPF_TCP_TIME_WAIT = 0x6
+ BPF_TCP_CLOSE = 0x7
+ BPF_TCP_CLOSE_WAIT = 0x8
+ BPF_TCP_LAST_ACK = 0x9
+ BPF_TCP_LISTEN = 0xa
+ BPF_TCP_CLOSING = 0xb
+ BPF_TCP_NEW_SYN_RECV = 0xc
+ BPF_TCP_MAX_STATES = 0xd
+ BPF_FIB_LKUP_RET_SUCCESS = 0x0
+ BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
+ BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
+ BPF_FIB_LKUP_RET_PROHIBIT = 0x3
+ BPF_FIB_LKUP_RET_NOT_FWDED = 0x4
+ BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5
+ BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
+ BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
+ BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
+ BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
+ BPF_FD_TYPE_TRACEPOINT = 0x1
+ BPF_FD_TYPE_KPROBE = 0x2
+ BPF_FD_TYPE_KRETPROBE = 0x3
+ BPF_FD_TYPE_UPROBE = 0x4
+ BPF_FD_TYPE_URETPROBE = 0x5
+)
+
+type CapUserHeader struct {
+ Version uint32
+ Pid int32
+}
+
+type CapUserData struct {
+ Effective uint32
+ Permitted uint32
+ Inheritable uint32
+}
+
+const (
+ LINUX_CAPABILITY_VERSION_1 = 0x19980330
+ LINUX_CAPABILITY_VERSION_2 = 0x20071026
+ LINUX_CAPABILITY_VERSION_3 = 0x20080522
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
index 195f7e1f8..2de0e5800 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
@@ -408,6 +408,11 @@ type TCPInfo struct {
Total_retrans uint32
}
+type CanFilter struct {
+ Id uint32
+ Mask uint32
+}
+
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
@@ -437,141 +442,185 @@ const (
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
SizeofTCPInfo = 0x68
+ SizeofCanFilter = 0x8
)
const (
- IFA_UNSPEC = 0x0
- IFA_ADDRESS = 0x1
- IFA_LOCAL = 0x2
- IFA_LABEL = 0x3
- IFA_BROADCAST = 0x4
- IFA_ANYCAST = 0x5
- IFA_CACHEINFO = 0x6
- IFA_MULTICAST = 0x7
- IFLA_UNSPEC = 0x0
- IFLA_ADDRESS = 0x1
- IFLA_BROADCAST = 0x2
- IFLA_IFNAME = 0x3
- IFLA_INFO_KIND = 0x1
- IFLA_MTU = 0x4
- IFLA_LINK = 0x5
- IFLA_QDISC = 0x6
- IFLA_STATS = 0x7
- IFLA_COST = 0x8
- IFLA_PRIORITY = 0x9
- IFLA_MASTER = 0xa
- IFLA_WIRELESS = 0xb
- IFLA_PROTINFO = 0xc
- IFLA_TXQLEN = 0xd
- IFLA_MAP = 0xe
- IFLA_WEIGHT = 0xf
- IFLA_OPERSTATE = 0x10
- IFLA_LINKMODE = 0x11
- IFLA_LINKINFO = 0x12
- IFLA_NET_NS_PID = 0x13
- IFLA_IFALIAS = 0x14
- IFLA_NUM_VF = 0x15
- IFLA_VFINFO_LIST = 0x16
- IFLA_STATS64 = 0x17
- IFLA_VF_PORTS = 0x18
- IFLA_PORT_SELF = 0x19
- IFLA_AF_SPEC = 0x1a
- IFLA_GROUP = 0x1b
- IFLA_NET_NS_FD = 0x1c
- IFLA_EXT_MASK = 0x1d
- IFLA_PROMISCUITY = 0x1e
- IFLA_NUM_TX_QUEUES = 0x1f
- IFLA_NUM_RX_QUEUES = 0x20
- IFLA_CARRIER = 0x21
- IFLA_PHYS_PORT_ID = 0x22
- IFLA_CARRIER_CHANGES = 0x23
- IFLA_PHYS_SWITCH_ID = 0x24
- IFLA_LINK_NETNSID = 0x25
- IFLA_PHYS_PORT_NAME = 0x26
- IFLA_PROTO_DOWN = 0x27
- IFLA_GSO_MAX_SEGS = 0x28
- IFLA_GSO_MAX_SIZE = 0x29
- IFLA_PAD = 0x2a
- IFLA_XDP = 0x2b
- IFLA_EVENT = 0x2c
- IFLA_NEW_NETNSID = 0x2d
- IFLA_IF_NETNSID = 0x2e
- IFLA_MAX = 0x33
- RT_SCOPE_UNIVERSE = 0x0
- RT_SCOPE_SITE = 0xc8
- RT_SCOPE_LINK = 0xfd
- RT_SCOPE_HOST = 0xfe
- RT_SCOPE_NOWHERE = 0xff
- RT_TABLE_UNSPEC = 0x0
- RT_TABLE_COMPAT = 0xfc
- RT_TABLE_DEFAULT = 0xfd
- RT_TABLE_MAIN = 0xfe
- RT_TABLE_LOCAL = 0xff
- RT_TABLE_MAX = 0xffffffff
- RTA_UNSPEC = 0x0
- RTA_DST = 0x1
- RTA_SRC = 0x2
- RTA_IIF = 0x3
- RTA_OIF = 0x4
- RTA_GATEWAY = 0x5
- RTA_PRIORITY = 0x6
- RTA_PREFSRC = 0x7
- RTA_METRICS = 0x8
- RTA_MULTIPATH = 0x9
- RTA_FLOW = 0xb
- RTA_CACHEINFO = 0xc
- RTA_TABLE = 0xf
- RTA_MARK = 0x10
- RTA_MFC_STATS = 0x11
- RTA_VIA = 0x12
- RTA_NEWDST = 0x13
- RTA_PREF = 0x14
- RTA_ENCAP_TYPE = 0x15
- RTA_ENCAP = 0x16
- RTA_EXPIRES = 0x17
- RTA_PAD = 0x18
- RTA_UID = 0x19
- RTA_TTL_PROPAGATE = 0x1a
- RTA_IP_PROTO = 0x1b
- RTA_SPORT = 0x1c
- RTA_DPORT = 0x1d
- RTN_UNSPEC = 0x0
- RTN_UNICAST = 0x1
- RTN_LOCAL = 0x2
- RTN_BROADCAST = 0x3
- RTN_ANYCAST = 0x4
- RTN_MULTICAST = 0x5
- RTN_BLACKHOLE = 0x6
- RTN_UNREACHABLE = 0x7
- RTN_PROHIBIT = 0x8
- RTN_THROW = 0x9
- RTN_NAT = 0xa
- RTN_XRESOLVE = 0xb
- RTNLGRP_NONE = 0x0
- RTNLGRP_LINK = 0x1
- RTNLGRP_NOTIFY = 0x2
- RTNLGRP_NEIGH = 0x3
- RTNLGRP_TC = 0x4
- RTNLGRP_IPV4_IFADDR = 0x5
- RTNLGRP_IPV4_MROUTE = 0x6
- RTNLGRP_IPV4_ROUTE = 0x7
- RTNLGRP_IPV4_RULE = 0x8
- RTNLGRP_IPV6_IFADDR = 0x9
- RTNLGRP_IPV6_MROUTE = 0xa
- RTNLGRP_IPV6_ROUTE = 0xb
- RTNLGRP_IPV6_IFINFO = 0xc
- RTNLGRP_IPV6_PREFIX = 0x12
- RTNLGRP_IPV6_RULE = 0x13
- RTNLGRP_ND_USEROPT = 0x14
- SizeofNlMsghdr = 0x10
- SizeofNlMsgerr = 0x14
- SizeofRtGenmsg = 0x1
- SizeofNlAttr = 0x4
- SizeofRtAttr = 0x4
- SizeofIfInfomsg = 0x10
- SizeofIfAddrmsg = 0x8
- SizeofRtMsg = 0xc
- SizeofRtNexthop = 0x8
+ NDA_UNSPEC = 0x0
+ NDA_DST = 0x1
+ NDA_LLADDR = 0x2
+ NDA_CACHEINFO = 0x3
+ NDA_PROBES = 0x4
+ NDA_VLAN = 0x5
+ NDA_PORT = 0x6
+ NDA_VNI = 0x7
+ NDA_IFINDEX = 0x8
+ NDA_MASTER = 0x9
+ NDA_LINK_NETNSID = 0xa
+ NDA_SRC_VNI = 0xb
+ NTF_USE = 0x1
+ NTF_SELF = 0x2
+ NTF_MASTER = 0x4
+ NTF_PROXY = 0x8
+ NTF_EXT_LEARNED = 0x10
+ NTF_OFFLOADED = 0x20
+ NTF_ROUTER = 0x80
+ NUD_INCOMPLETE = 0x1
+ NUD_REACHABLE = 0x2
+ NUD_STALE = 0x4
+ NUD_DELAY = 0x8
+ NUD_PROBE = 0x10
+ NUD_FAILED = 0x20
+ NUD_NOARP = 0x40
+ NUD_PERMANENT = 0x80
+ NUD_NONE = 0x0
+ IFA_UNSPEC = 0x0
+ IFA_ADDRESS = 0x1
+ IFA_LOCAL = 0x2
+ IFA_LABEL = 0x3
+ IFA_BROADCAST = 0x4
+ IFA_ANYCAST = 0x5
+ IFA_CACHEINFO = 0x6
+ IFA_MULTICAST = 0x7
+ IFA_FLAGS = 0x8
+ IFA_RT_PRIORITY = 0x9
+ IFA_TARGET_NETNSID = 0xa
+ IFLA_UNSPEC = 0x0
+ IFLA_ADDRESS = 0x1
+ IFLA_BROADCAST = 0x2
+ IFLA_IFNAME = 0x3
+ IFLA_MTU = 0x4
+ IFLA_LINK = 0x5
+ IFLA_QDISC = 0x6
+ IFLA_STATS = 0x7
+ IFLA_COST = 0x8
+ IFLA_PRIORITY = 0x9
+ IFLA_MASTER = 0xa
+ IFLA_WIRELESS = 0xb
+ IFLA_PROTINFO = 0xc
+ IFLA_TXQLEN = 0xd
+ IFLA_MAP = 0xe
+ IFLA_WEIGHT = 0xf
+ IFLA_OPERSTATE = 0x10
+ IFLA_LINKMODE = 0x11
+ IFLA_LINKINFO = 0x12
+ IFLA_NET_NS_PID = 0x13
+ IFLA_IFALIAS = 0x14
+ IFLA_NUM_VF = 0x15
+ IFLA_VFINFO_LIST = 0x16
+ IFLA_STATS64 = 0x17
+ IFLA_VF_PORTS = 0x18
+ IFLA_PORT_SELF = 0x19
+ IFLA_AF_SPEC = 0x1a
+ IFLA_GROUP = 0x1b
+ IFLA_NET_NS_FD = 0x1c
+ IFLA_EXT_MASK = 0x1d
+ IFLA_PROMISCUITY = 0x1e
+ IFLA_NUM_TX_QUEUES = 0x1f
+ IFLA_NUM_RX_QUEUES = 0x20
+ IFLA_CARRIER = 0x21
+ IFLA_PHYS_PORT_ID = 0x22
+ IFLA_CARRIER_CHANGES = 0x23
+ IFLA_PHYS_SWITCH_ID = 0x24
+ IFLA_LINK_NETNSID = 0x25
+ IFLA_PHYS_PORT_NAME = 0x26
+ IFLA_PROTO_DOWN = 0x27
+ IFLA_GSO_MAX_SEGS = 0x28
+ IFLA_GSO_MAX_SIZE = 0x29
+ IFLA_PAD = 0x2a
+ IFLA_XDP = 0x2b
+ IFLA_EVENT = 0x2c
+ IFLA_NEW_NETNSID = 0x2d
+ IFLA_IF_NETNSID = 0x2e
+ IFLA_TARGET_NETNSID = 0x2e
+ IFLA_CARRIER_UP_COUNT = 0x2f
+ IFLA_CARRIER_DOWN_COUNT = 0x30
+ IFLA_NEW_IFINDEX = 0x31
+ IFLA_MIN_MTU = 0x32
+ IFLA_MAX_MTU = 0x33
+ IFLA_MAX = 0x33
+ IFLA_INFO_KIND = 0x1
+ IFLA_INFO_DATA = 0x2
+ IFLA_INFO_XSTATS = 0x3
+ IFLA_INFO_SLAVE_KIND = 0x4
+ IFLA_INFO_SLAVE_DATA = 0x5
+ RT_SCOPE_UNIVERSE = 0x0
+ RT_SCOPE_SITE = 0xc8
+ RT_SCOPE_LINK = 0xfd
+ RT_SCOPE_HOST = 0xfe
+ RT_SCOPE_NOWHERE = 0xff
+ RT_TABLE_UNSPEC = 0x0
+ RT_TABLE_COMPAT = 0xfc
+ RT_TABLE_DEFAULT = 0xfd
+ RT_TABLE_MAIN = 0xfe
+ RT_TABLE_LOCAL = 0xff
+ RT_TABLE_MAX = 0xffffffff
+ RTA_UNSPEC = 0x0
+ RTA_DST = 0x1
+ RTA_SRC = 0x2
+ RTA_IIF = 0x3
+ RTA_OIF = 0x4
+ RTA_GATEWAY = 0x5
+ RTA_PRIORITY = 0x6
+ RTA_PREFSRC = 0x7
+ RTA_METRICS = 0x8
+ RTA_MULTIPATH = 0x9
+ RTA_FLOW = 0xb
+ RTA_CACHEINFO = 0xc
+ RTA_TABLE = 0xf
+ RTA_MARK = 0x10
+ RTA_MFC_STATS = 0x11
+ RTA_VIA = 0x12
+ RTA_NEWDST = 0x13
+ RTA_PREF = 0x14
+ RTA_ENCAP_TYPE = 0x15
+ RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
+ RTN_UNSPEC = 0x0
+ RTN_UNICAST = 0x1
+ RTN_LOCAL = 0x2
+ RTN_BROADCAST = 0x3
+ RTN_ANYCAST = 0x4
+ RTN_MULTICAST = 0x5
+ RTN_BLACKHOLE = 0x6
+ RTN_UNREACHABLE = 0x7
+ RTN_PROHIBIT = 0x8
+ RTN_THROW = 0x9
+ RTN_NAT = 0xa
+ RTN_XRESOLVE = 0xb
+ RTNLGRP_NONE = 0x0
+ RTNLGRP_LINK = 0x1
+ RTNLGRP_NOTIFY = 0x2
+ RTNLGRP_NEIGH = 0x3
+ RTNLGRP_TC = 0x4
+ RTNLGRP_IPV4_IFADDR = 0x5
+ RTNLGRP_IPV4_MROUTE = 0x6
+ RTNLGRP_IPV4_ROUTE = 0x7
+ RTNLGRP_IPV4_RULE = 0x8
+ RTNLGRP_IPV6_IFADDR = 0x9
+ RTNLGRP_IPV6_MROUTE = 0xa
+ RTNLGRP_IPV6_ROUTE = 0xb
+ RTNLGRP_IPV6_IFINFO = 0xc
+ RTNLGRP_IPV6_PREFIX = 0x12
+ RTNLGRP_IPV6_RULE = 0x13
+ RTNLGRP_ND_USEROPT = 0x14
+ SizeofNlMsghdr = 0x10
+ SizeofNlMsgerr = 0x14
+ SizeofRtGenmsg = 0x1
+ SizeofNlAttr = 0x4
+ SizeofRtAttr = 0x4
+ SizeofIfInfomsg = 0x10
+ SizeofIfAddrmsg = 0x8
+ SizeofRtMsg = 0xc
+ SizeofRtNexthop = 0x8
+ SizeofNdUseroptmsg = 0x10
+ SizeofNdMsg = 0xc
)
type NlMsghdr struct {
@@ -637,6 +686,27 @@ type RtNexthop struct {
Ifindex int32
}
+type NdUseroptmsg struct {
+ Family uint8
+ Pad1 uint8
+ Opts_len uint16
+ Ifindex int32
+ Icmp_type uint8
+ Icmp_code uint8
+ Pad2 uint16
+ Pad3 uint32
+}
+
+type NdMsg struct {
+ Family uint8
+ Pad1 uint8
+ Pad2 uint16
+ Ifindex int32
+ State uint16
+ Flags uint8
+ Type uint8
+}
+
const (
SizeofSockFilter = 0x8
SizeofSockFprog = 0x8
@@ -753,7 +823,32 @@ type Sigset_t struct {
Val [32]uint32
}
-const RNDGETENTCNT = 0x40045200
+const _C__NSIG = 0x80
+
+type SignalfdSiginfo struct {
+ Signo uint32
+ Errno int32
+ Code int32
+ Pid uint32
+ Uid uint32
+ Fd int32
+ Tid uint32
+ Band uint32
+ Overrun uint32
+ Trapno uint32
+ Status int32
+ Int int32
+ Ptr uint64
+ Utime uint64
+ Stime uint64
+ Addr uint64
+ Addr_lsb uint16
+ _ uint16
+ Syscall int32
+ Call_addr uint64
+ Arch uint32
+ _ [28]uint8
+}
const PERF_IOC_FLAG_GROUP = 0x1
@@ -926,7 +1021,8 @@ type PerfEventAttr struct {
Clockid int32
Sample_regs_intr uint64
Aux_watermark uint32
- _ uint32
+ Sample_max_stack uint16
+ _ uint16
}
type PerfEventMmapPage struct {
@@ -1029,6 +1125,7 @@ const (
PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7
PERF_COUNT_SW_EMULATION_FAULTS = 0x8
PERF_COUNT_SW_DUMMY = 0x9
+ PERF_COUNT_SW_BPF_OUTPUT = 0xa
PERF_SAMPLE_IP = 0x1
PERF_SAMPLE_TID = 0x2
@@ -1050,21 +1147,38 @@ const (
PERF_SAMPLE_BRANCH_ANY_CALL = 0x10
PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20
PERF_SAMPLE_BRANCH_IND_CALL = 0x40
+ PERF_SAMPLE_BRANCH_ABORT_TX = 0x80
+ PERF_SAMPLE_BRANCH_IN_TX = 0x100
+ PERF_SAMPLE_BRANCH_NO_TX = 0x200
+ PERF_SAMPLE_BRANCH_COND = 0x400
+ PERF_SAMPLE_BRANCH_CALL_STACK = 0x800
+ PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000
+ PERF_SAMPLE_BRANCH_CALL = 0x2000
+ PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000
+ PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000
+ PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000
PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1
PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2
PERF_FORMAT_ID = 0x4
PERF_FORMAT_GROUP = 0x8
- PERF_RECORD_MMAP = 0x1
- PERF_RECORD_LOST = 0x2
- PERF_RECORD_COMM = 0x3
- PERF_RECORD_EXIT = 0x4
- PERF_RECORD_THROTTLE = 0x5
- PERF_RECORD_UNTHROTTLE = 0x6
- PERF_RECORD_FORK = 0x7
- PERF_RECORD_READ = 0x8
- PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP = 0x1
+ PERF_RECORD_LOST = 0x2
+ PERF_RECORD_COMM = 0x3
+ PERF_RECORD_EXIT = 0x4
+ PERF_RECORD_THROTTLE = 0x5
+ PERF_RECORD_UNTHROTTLE = 0x6
+ PERF_RECORD_FORK = 0x7
+ PERF_RECORD_READ = 0x8
+ PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP2 = 0xa
+ PERF_RECORD_AUX = 0xb
+ PERF_RECORD_ITRACE_START = 0xc
+ PERF_RECORD_LOST_SAMPLES = 0xd
+ PERF_RECORD_SWITCH = 0xe
+ PERF_RECORD_SWITCH_CPU_WIDE = 0xf
+ PERF_RECORD_NAMESPACES = 0x10
PERF_CONTEXT_HV = -0x20
PERF_CONTEXT_KERNEL = -0x80
@@ -1077,6 +1191,7 @@ const (
PERF_FLAG_FD_NO_GROUP = 0x1
PERF_FLAG_FD_OUTPUT = 0x2
PERF_FLAG_PID_CGROUP = 0x4
+ PERF_FLAG_FD_CLOEXEC = 0x8
)
const (
@@ -1335,6 +1450,21 @@ type TpacketBlockDesc struct {
Hdr [40]byte
}
+type TpacketBDTS struct {
+ Sec uint32
+ Usec uint32
+}
+
+type TpacketHdrV1 struct {
+ Block_status uint32
+ Num_pkts uint32
+ Offset_to_first_pkt uint32
+ Blk_len uint32
+ Seq_num uint64
+ Ts_first_pkt TpacketBDTS
+ Ts_last_pkt TpacketBDTS
+}
+
type TpacketReq struct {
Block_size uint32
Block_nr uint32
@@ -1383,6 +1513,9 @@ const (
SizeofTpacketHdr = 0x18
SizeofTpacket2Hdr = 0x20
SizeofTpacket3Hdr = 0x30
+
+ SizeofTpacketStats = 0x8
+ SizeofTpacketStatsV3 = 0xc
)
const (
@@ -1961,6 +2094,10 @@ const (
NCSI_CHANNEL_ATTR_VLAN_ID = 0xa
)
+type ScmTimestamping struct {
+ Ts [3]Timespec
+}
+
const (
SOF_TIMESTAMPING_TX_HARDWARE = 0x1
SOF_TIMESTAMPING_TX_SOFTWARE = 0x2
@@ -1980,4 +2117,367 @@ const (
SOF_TIMESTAMPING_LAST = 0x4000
SOF_TIMESTAMPING_MASK = 0x7fff
+
+ SCM_TSTAMP_SND = 0x0
+ SCM_TSTAMP_SCHED = 0x1
+ SCM_TSTAMP_ACK = 0x2
+)
+
+type SockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type FanotifyEventMetadata struct {
+ Event_len uint32
+ Vers uint8
+ Reserved uint8
+ Metadata_len uint16
+ Mask uint64
+ Fd int32
+ Pid int32
+}
+
+type FanotifyResponse struct {
+ Fd int32
+ Response uint32
+}
+
+const (
+ CRYPTO_MSG_BASE = 0x10
+ CRYPTO_MSG_NEWALG = 0x10
+ CRYPTO_MSG_DELALG = 0x11
+ CRYPTO_MSG_UPDATEALG = 0x12
+ CRYPTO_MSG_GETALG = 0x13
+ CRYPTO_MSG_DELRNG = 0x14
+ CRYPTO_MSG_GETSTAT = 0x15
+)
+
+const (
+ CRYPTOCFGA_UNSPEC = 0x0
+ CRYPTOCFGA_PRIORITY_VAL = 0x1
+ CRYPTOCFGA_REPORT_LARVAL = 0x2
+ CRYPTOCFGA_REPORT_HASH = 0x3
+ CRYPTOCFGA_REPORT_BLKCIPHER = 0x4
+ CRYPTOCFGA_REPORT_AEAD = 0x5
+ CRYPTOCFGA_REPORT_COMPRESS = 0x6
+ CRYPTOCFGA_REPORT_RNG = 0x7
+ CRYPTOCFGA_REPORT_CIPHER = 0x8
+ CRYPTOCFGA_REPORT_AKCIPHER = 0x9
+ CRYPTOCFGA_REPORT_KPP = 0xa
+ CRYPTOCFGA_REPORT_ACOMP = 0xb
+ CRYPTOCFGA_STAT_LARVAL = 0xc
+ CRYPTOCFGA_STAT_HASH = 0xd
+ CRYPTOCFGA_STAT_BLKCIPHER = 0xe
+ CRYPTOCFGA_STAT_AEAD = 0xf
+ CRYPTOCFGA_STAT_COMPRESS = 0x10
+ CRYPTOCFGA_STAT_RNG = 0x11
+ CRYPTOCFGA_STAT_CIPHER = 0x12
+ CRYPTOCFGA_STAT_AKCIPHER = 0x13
+ CRYPTOCFGA_STAT_KPP = 0x14
+ CRYPTOCFGA_STAT_ACOMP = 0x15
+)
+
+type CryptoUserAlg struct {
+ Name [64]int8
+ Driver_name [64]int8
+ Module_name [64]int8
+ Type uint32
+ Mask uint32
+ Refcnt uint32
+ Flags uint32
+}
+
+type CryptoStatAEAD struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatAKCipher struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Verify_cnt uint64
+ Sign_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCipher struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCompress struct {
+ Type [64]int8
+ Compress_cnt uint64
+ Compress_tlen uint64
+ Decompress_cnt uint64
+ Decompress_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatHash struct {
+ Type [64]int8
+ Hash_cnt uint64
+ Hash_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatKPP struct {
+ Type [64]int8
+ Setsecret_cnt uint64
+ Generate_public_key_cnt uint64
+ Compute_shared_secret_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatRNG struct {
+ Type [64]int8
+ Generate_cnt uint64
+ Generate_tlen uint64
+ Seed_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatLarval struct {
+ Type [64]int8
+}
+
+type CryptoReportLarval struct {
+ Type [64]int8
+}
+
+type CryptoReportHash struct {
+ Type [64]int8
+ Blocksize uint32
+ Digestsize uint32
+}
+
+type CryptoReportCipher struct {
+ Type [64]int8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+}
+
+type CryptoReportBlkCipher struct {
+ Type [64]int8
+ Geniv [64]int8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+ Ivsize uint32
+}
+
+type CryptoReportAEAD struct {
+ Type [64]int8
+ Geniv [64]int8
+ Blocksize uint32
+ Maxauthsize uint32
+ Ivsize uint32
+}
+
+type CryptoReportComp struct {
+ Type [64]int8
+}
+
+type CryptoReportRNG struct {
+ Type [64]int8
+ Seedsize uint32
+}
+
+type CryptoReportAKCipher struct {
+ Type [64]int8
+}
+
+type CryptoReportKPP struct {
+ Type [64]int8
+}
+
+type CryptoReportAcomp struct {
+ Type [64]int8
+}
+
+const (
+ BPF_REG_0 = 0x0
+ BPF_REG_1 = 0x1
+ BPF_REG_2 = 0x2
+ BPF_REG_3 = 0x3
+ BPF_REG_4 = 0x4
+ BPF_REG_5 = 0x5
+ BPF_REG_6 = 0x6
+ BPF_REG_7 = 0x7
+ BPF_REG_8 = 0x8
+ BPF_REG_9 = 0x9
+ BPF_REG_10 = 0xa
+ BPF_MAP_CREATE = 0x0
+ BPF_MAP_LOOKUP_ELEM = 0x1
+ BPF_MAP_UPDATE_ELEM = 0x2
+ BPF_MAP_DELETE_ELEM = 0x3
+ BPF_MAP_GET_NEXT_KEY = 0x4
+ BPF_PROG_LOAD = 0x5
+ BPF_OBJ_PIN = 0x6
+ BPF_OBJ_GET = 0x7
+ BPF_PROG_ATTACH = 0x8
+ BPF_PROG_DETACH = 0x9
+ BPF_PROG_TEST_RUN = 0xa
+ BPF_PROG_GET_NEXT_ID = 0xb
+ BPF_MAP_GET_NEXT_ID = 0xc
+ BPF_PROG_GET_FD_BY_ID = 0xd
+ BPF_MAP_GET_FD_BY_ID = 0xe
+ BPF_OBJ_GET_INFO_BY_FD = 0xf
+ BPF_PROG_QUERY = 0x10
+ BPF_RAW_TRACEPOINT_OPEN = 0x11
+ BPF_BTF_LOAD = 0x12
+ BPF_BTF_GET_FD_BY_ID = 0x13
+ BPF_TASK_FD_QUERY = 0x14
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15
+ BPF_MAP_TYPE_UNSPEC = 0x0
+ BPF_MAP_TYPE_HASH = 0x1
+ BPF_MAP_TYPE_ARRAY = 0x2
+ BPF_MAP_TYPE_PROG_ARRAY = 0x3
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4
+ BPF_MAP_TYPE_PERCPU_HASH = 0x5
+ BPF_MAP_TYPE_PERCPU_ARRAY = 0x6
+ BPF_MAP_TYPE_STACK_TRACE = 0x7
+ BPF_MAP_TYPE_CGROUP_ARRAY = 0x8
+ BPF_MAP_TYPE_LRU_HASH = 0x9
+ BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa
+ BPF_MAP_TYPE_LPM_TRIE = 0xb
+ BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc
+ BPF_MAP_TYPE_HASH_OF_MAPS = 0xd
+ BPF_MAP_TYPE_DEVMAP = 0xe
+ BPF_MAP_TYPE_SOCKMAP = 0xf
+ BPF_MAP_TYPE_CPUMAP = 0x10
+ BPF_MAP_TYPE_XSKMAP = 0x11
+ BPF_MAP_TYPE_SOCKHASH = 0x12
+ BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
+ BPF_MAP_TYPE_QUEUE = 0x16
+ BPF_MAP_TYPE_STACK = 0x17
+ BPF_PROG_TYPE_UNSPEC = 0x0
+ BPF_PROG_TYPE_SOCKET_FILTER = 0x1
+ BPF_PROG_TYPE_KPROBE = 0x2
+ BPF_PROG_TYPE_SCHED_CLS = 0x3
+ BPF_PROG_TYPE_SCHED_ACT = 0x4
+ BPF_PROG_TYPE_TRACEPOINT = 0x5
+ BPF_PROG_TYPE_XDP = 0x6
+ BPF_PROG_TYPE_PERF_EVENT = 0x7
+ BPF_PROG_TYPE_CGROUP_SKB = 0x8
+ BPF_PROG_TYPE_CGROUP_SOCK = 0x9
+ BPF_PROG_TYPE_LWT_IN = 0xa
+ BPF_PROG_TYPE_LWT_OUT = 0xb
+ BPF_PROG_TYPE_LWT_XMIT = 0xc
+ BPF_PROG_TYPE_SOCK_OPS = 0xd
+ BPF_PROG_TYPE_SK_SKB = 0xe
+ BPF_PROG_TYPE_CGROUP_DEVICE = 0xf
+ BPF_PROG_TYPE_SK_MSG = 0x10
+ BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11
+ BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12
+ BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13
+ BPF_PROG_TYPE_LIRC_MODE2 = 0x14
+ BPF_PROG_TYPE_SK_REUSEPORT = 0x15
+ BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16
+ BPF_CGROUP_INET_INGRESS = 0x0
+ BPF_CGROUP_INET_EGRESS = 0x1
+ BPF_CGROUP_INET_SOCK_CREATE = 0x2
+ BPF_CGROUP_SOCK_OPS = 0x3
+ BPF_SK_SKB_STREAM_PARSER = 0x4
+ BPF_SK_SKB_STREAM_VERDICT = 0x5
+ BPF_CGROUP_DEVICE = 0x6
+ BPF_SK_MSG_VERDICT = 0x7
+ BPF_CGROUP_INET4_BIND = 0x8
+ BPF_CGROUP_INET6_BIND = 0x9
+ BPF_CGROUP_INET4_CONNECT = 0xa
+ BPF_CGROUP_INET6_CONNECT = 0xb
+ BPF_CGROUP_INET4_POST_BIND = 0xc
+ BPF_CGROUP_INET6_POST_BIND = 0xd
+ BPF_CGROUP_UDP4_SENDMSG = 0xe
+ BPF_CGROUP_UDP6_SENDMSG = 0xf
+ BPF_LIRC_MODE2 = 0x10
+ BPF_FLOW_DISSECTOR = 0x11
+ BPF_STACK_BUILD_ID_EMPTY = 0x0
+ BPF_STACK_BUILD_ID_VALID = 0x1
+ BPF_STACK_BUILD_ID_IP = 0x2
+ BPF_ADJ_ROOM_NET = 0x0
+ BPF_HDR_START_MAC = 0x0
+ BPF_HDR_START_NET = 0x1
+ BPF_LWT_ENCAP_SEG6 = 0x0
+ BPF_LWT_ENCAP_SEG6_INLINE = 0x1
+ BPF_OK = 0x0
+ BPF_DROP = 0x2
+ BPF_REDIRECT = 0x7
+ BPF_SOCK_OPS_VOID = 0x0
+ BPF_SOCK_OPS_TIMEOUT_INIT = 0x1
+ BPF_SOCK_OPS_RWND_INIT = 0x2
+ BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3
+ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4
+ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5
+ BPF_SOCK_OPS_NEEDS_ECN = 0x6
+ BPF_SOCK_OPS_BASE_RTT = 0x7
+ BPF_SOCK_OPS_RTO_CB = 0x8
+ BPF_SOCK_OPS_RETRANS_CB = 0x9
+ BPF_SOCK_OPS_STATE_CB = 0xa
+ BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb
+ BPF_TCP_ESTABLISHED = 0x1
+ BPF_TCP_SYN_SENT = 0x2
+ BPF_TCP_SYN_RECV = 0x3
+ BPF_TCP_FIN_WAIT1 = 0x4
+ BPF_TCP_FIN_WAIT2 = 0x5
+ BPF_TCP_TIME_WAIT = 0x6
+ BPF_TCP_CLOSE = 0x7
+ BPF_TCP_CLOSE_WAIT = 0x8
+ BPF_TCP_LAST_ACK = 0x9
+ BPF_TCP_LISTEN = 0xa
+ BPF_TCP_CLOSING = 0xb
+ BPF_TCP_NEW_SYN_RECV = 0xc
+ BPF_TCP_MAX_STATES = 0xd
+ BPF_FIB_LKUP_RET_SUCCESS = 0x0
+ BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
+ BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
+ BPF_FIB_LKUP_RET_PROHIBIT = 0x3
+ BPF_FIB_LKUP_RET_NOT_FWDED = 0x4
+ BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5
+ BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
+ BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
+ BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
+ BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
+ BPF_FD_TYPE_TRACEPOINT = 0x1
+ BPF_FD_TYPE_KPROBE = 0x2
+ BPF_FD_TYPE_KRETPROBE = 0x3
+ BPF_FD_TYPE_UPROBE = 0x4
+ BPF_FD_TYPE_URETPROBE = 0x5
+)
+
+type CapUserHeader struct {
+ Version uint32
+ Pid int32
+}
+
+type CapUserData struct {
+ Effective uint32
+ Permitted uint32
+ Inheritable uint32
+}
+
+const (
+ LINUX_CAPABILITY_VERSION_1 = 0x19980330
+ LINUX_CAPABILITY_VERSION_2 = 0x20071026
+ LINUX_CAPABILITY_VERSION_3 = 0x20080522
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
index 77acf5698..3735eb42e 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -407,6 +407,11 @@ type TCPInfo struct {
Total_retrans uint32
}
+type CanFilter struct {
+ Id uint32
+ Mask uint32
+}
+
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
@@ -436,141 +441,185 @@ const (
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
SizeofTCPInfo = 0x68
+ SizeofCanFilter = 0x8
)
const (
- IFA_UNSPEC = 0x0
- IFA_ADDRESS = 0x1
- IFA_LOCAL = 0x2
- IFA_LABEL = 0x3
- IFA_BROADCAST = 0x4
- IFA_ANYCAST = 0x5
- IFA_CACHEINFO = 0x6
- IFA_MULTICAST = 0x7
- IFLA_UNSPEC = 0x0
- IFLA_ADDRESS = 0x1
- IFLA_BROADCAST = 0x2
- IFLA_IFNAME = 0x3
- IFLA_INFO_KIND = 0x1
- IFLA_MTU = 0x4
- IFLA_LINK = 0x5
- IFLA_QDISC = 0x6
- IFLA_STATS = 0x7
- IFLA_COST = 0x8
- IFLA_PRIORITY = 0x9
- IFLA_MASTER = 0xa
- IFLA_WIRELESS = 0xb
- IFLA_PROTINFO = 0xc
- IFLA_TXQLEN = 0xd
- IFLA_MAP = 0xe
- IFLA_WEIGHT = 0xf
- IFLA_OPERSTATE = 0x10
- IFLA_LINKMODE = 0x11
- IFLA_LINKINFO = 0x12
- IFLA_NET_NS_PID = 0x13
- IFLA_IFALIAS = 0x14
- IFLA_NUM_VF = 0x15
- IFLA_VFINFO_LIST = 0x16
- IFLA_STATS64 = 0x17
- IFLA_VF_PORTS = 0x18
- IFLA_PORT_SELF = 0x19
- IFLA_AF_SPEC = 0x1a
- IFLA_GROUP = 0x1b
- IFLA_NET_NS_FD = 0x1c
- IFLA_EXT_MASK = 0x1d
- IFLA_PROMISCUITY = 0x1e
- IFLA_NUM_TX_QUEUES = 0x1f
- IFLA_NUM_RX_QUEUES = 0x20
- IFLA_CARRIER = 0x21
- IFLA_PHYS_PORT_ID = 0x22
- IFLA_CARRIER_CHANGES = 0x23
- IFLA_PHYS_SWITCH_ID = 0x24
- IFLA_LINK_NETNSID = 0x25
- IFLA_PHYS_PORT_NAME = 0x26
- IFLA_PROTO_DOWN = 0x27
- IFLA_GSO_MAX_SEGS = 0x28
- IFLA_GSO_MAX_SIZE = 0x29
- IFLA_PAD = 0x2a
- IFLA_XDP = 0x2b
- IFLA_EVENT = 0x2c
- IFLA_NEW_NETNSID = 0x2d
- IFLA_IF_NETNSID = 0x2e
- IFLA_MAX = 0x33
- RT_SCOPE_UNIVERSE = 0x0
- RT_SCOPE_SITE = 0xc8
- RT_SCOPE_LINK = 0xfd
- RT_SCOPE_HOST = 0xfe
- RT_SCOPE_NOWHERE = 0xff
- RT_TABLE_UNSPEC = 0x0
- RT_TABLE_COMPAT = 0xfc
- RT_TABLE_DEFAULT = 0xfd
- RT_TABLE_MAIN = 0xfe
- RT_TABLE_LOCAL = 0xff
- RT_TABLE_MAX = 0xffffffff
- RTA_UNSPEC = 0x0
- RTA_DST = 0x1
- RTA_SRC = 0x2
- RTA_IIF = 0x3
- RTA_OIF = 0x4
- RTA_GATEWAY = 0x5
- RTA_PRIORITY = 0x6
- RTA_PREFSRC = 0x7
- RTA_METRICS = 0x8
- RTA_MULTIPATH = 0x9
- RTA_FLOW = 0xb
- RTA_CACHEINFO = 0xc
- RTA_TABLE = 0xf
- RTA_MARK = 0x10
- RTA_MFC_STATS = 0x11
- RTA_VIA = 0x12
- RTA_NEWDST = 0x13
- RTA_PREF = 0x14
- RTA_ENCAP_TYPE = 0x15
- RTA_ENCAP = 0x16
- RTA_EXPIRES = 0x17
- RTA_PAD = 0x18
- RTA_UID = 0x19
- RTA_TTL_PROPAGATE = 0x1a
- RTA_IP_PROTO = 0x1b
- RTA_SPORT = 0x1c
- RTA_DPORT = 0x1d
- RTN_UNSPEC = 0x0
- RTN_UNICAST = 0x1
- RTN_LOCAL = 0x2
- RTN_BROADCAST = 0x3
- RTN_ANYCAST = 0x4
- RTN_MULTICAST = 0x5
- RTN_BLACKHOLE = 0x6
- RTN_UNREACHABLE = 0x7
- RTN_PROHIBIT = 0x8
- RTN_THROW = 0x9
- RTN_NAT = 0xa
- RTN_XRESOLVE = 0xb
- RTNLGRP_NONE = 0x0
- RTNLGRP_LINK = 0x1
- RTNLGRP_NOTIFY = 0x2
- RTNLGRP_NEIGH = 0x3
- RTNLGRP_TC = 0x4
- RTNLGRP_IPV4_IFADDR = 0x5
- RTNLGRP_IPV4_MROUTE = 0x6
- RTNLGRP_IPV4_ROUTE = 0x7
- RTNLGRP_IPV4_RULE = 0x8
- RTNLGRP_IPV6_IFADDR = 0x9
- RTNLGRP_IPV6_MROUTE = 0xa
- RTNLGRP_IPV6_ROUTE = 0xb
- RTNLGRP_IPV6_IFINFO = 0xc
- RTNLGRP_IPV6_PREFIX = 0x12
- RTNLGRP_IPV6_RULE = 0x13
- RTNLGRP_ND_USEROPT = 0x14
- SizeofNlMsghdr = 0x10
- SizeofNlMsgerr = 0x14
- SizeofRtGenmsg = 0x1
- SizeofNlAttr = 0x4
- SizeofRtAttr = 0x4
- SizeofIfInfomsg = 0x10
- SizeofIfAddrmsg = 0x8
- SizeofRtMsg = 0xc
- SizeofRtNexthop = 0x8
+ NDA_UNSPEC = 0x0
+ NDA_DST = 0x1
+ NDA_LLADDR = 0x2
+ NDA_CACHEINFO = 0x3
+ NDA_PROBES = 0x4
+ NDA_VLAN = 0x5
+ NDA_PORT = 0x6
+ NDA_VNI = 0x7
+ NDA_IFINDEX = 0x8
+ NDA_MASTER = 0x9
+ NDA_LINK_NETNSID = 0xa
+ NDA_SRC_VNI = 0xb
+ NTF_USE = 0x1
+ NTF_SELF = 0x2
+ NTF_MASTER = 0x4
+ NTF_PROXY = 0x8
+ NTF_EXT_LEARNED = 0x10
+ NTF_OFFLOADED = 0x20
+ NTF_ROUTER = 0x80
+ NUD_INCOMPLETE = 0x1
+ NUD_REACHABLE = 0x2
+ NUD_STALE = 0x4
+ NUD_DELAY = 0x8
+ NUD_PROBE = 0x10
+ NUD_FAILED = 0x20
+ NUD_NOARP = 0x40
+ NUD_PERMANENT = 0x80
+ NUD_NONE = 0x0
+ IFA_UNSPEC = 0x0
+ IFA_ADDRESS = 0x1
+ IFA_LOCAL = 0x2
+ IFA_LABEL = 0x3
+ IFA_BROADCAST = 0x4
+ IFA_ANYCAST = 0x5
+ IFA_CACHEINFO = 0x6
+ IFA_MULTICAST = 0x7
+ IFA_FLAGS = 0x8
+ IFA_RT_PRIORITY = 0x9
+ IFA_TARGET_NETNSID = 0xa
+ IFLA_UNSPEC = 0x0
+ IFLA_ADDRESS = 0x1
+ IFLA_BROADCAST = 0x2
+ IFLA_IFNAME = 0x3
+ IFLA_MTU = 0x4
+ IFLA_LINK = 0x5
+ IFLA_QDISC = 0x6
+ IFLA_STATS = 0x7
+ IFLA_COST = 0x8
+ IFLA_PRIORITY = 0x9
+ IFLA_MASTER = 0xa
+ IFLA_WIRELESS = 0xb
+ IFLA_PROTINFO = 0xc
+ IFLA_TXQLEN = 0xd
+ IFLA_MAP = 0xe
+ IFLA_WEIGHT = 0xf
+ IFLA_OPERSTATE = 0x10
+ IFLA_LINKMODE = 0x11
+ IFLA_LINKINFO = 0x12
+ IFLA_NET_NS_PID = 0x13
+ IFLA_IFALIAS = 0x14
+ IFLA_NUM_VF = 0x15
+ IFLA_VFINFO_LIST = 0x16
+ IFLA_STATS64 = 0x17
+ IFLA_VF_PORTS = 0x18
+ IFLA_PORT_SELF = 0x19
+ IFLA_AF_SPEC = 0x1a
+ IFLA_GROUP = 0x1b
+ IFLA_NET_NS_FD = 0x1c
+ IFLA_EXT_MASK = 0x1d
+ IFLA_PROMISCUITY = 0x1e
+ IFLA_NUM_TX_QUEUES = 0x1f
+ IFLA_NUM_RX_QUEUES = 0x20
+ IFLA_CARRIER = 0x21
+ IFLA_PHYS_PORT_ID = 0x22
+ IFLA_CARRIER_CHANGES = 0x23
+ IFLA_PHYS_SWITCH_ID = 0x24
+ IFLA_LINK_NETNSID = 0x25
+ IFLA_PHYS_PORT_NAME = 0x26
+ IFLA_PROTO_DOWN = 0x27
+ IFLA_GSO_MAX_SEGS = 0x28
+ IFLA_GSO_MAX_SIZE = 0x29
+ IFLA_PAD = 0x2a
+ IFLA_XDP = 0x2b
+ IFLA_EVENT = 0x2c
+ IFLA_NEW_NETNSID = 0x2d
+ IFLA_IF_NETNSID = 0x2e
+ IFLA_TARGET_NETNSID = 0x2e
+ IFLA_CARRIER_UP_COUNT = 0x2f
+ IFLA_CARRIER_DOWN_COUNT = 0x30
+ IFLA_NEW_IFINDEX = 0x31
+ IFLA_MIN_MTU = 0x32
+ IFLA_MAX_MTU = 0x33
+ IFLA_MAX = 0x33
+ IFLA_INFO_KIND = 0x1
+ IFLA_INFO_DATA = 0x2
+ IFLA_INFO_XSTATS = 0x3
+ IFLA_INFO_SLAVE_KIND = 0x4
+ IFLA_INFO_SLAVE_DATA = 0x5
+ RT_SCOPE_UNIVERSE = 0x0
+ RT_SCOPE_SITE = 0xc8
+ RT_SCOPE_LINK = 0xfd
+ RT_SCOPE_HOST = 0xfe
+ RT_SCOPE_NOWHERE = 0xff
+ RT_TABLE_UNSPEC = 0x0
+ RT_TABLE_COMPAT = 0xfc
+ RT_TABLE_DEFAULT = 0xfd
+ RT_TABLE_MAIN = 0xfe
+ RT_TABLE_LOCAL = 0xff
+ RT_TABLE_MAX = 0xffffffff
+ RTA_UNSPEC = 0x0
+ RTA_DST = 0x1
+ RTA_SRC = 0x2
+ RTA_IIF = 0x3
+ RTA_OIF = 0x4
+ RTA_GATEWAY = 0x5
+ RTA_PRIORITY = 0x6
+ RTA_PREFSRC = 0x7
+ RTA_METRICS = 0x8
+ RTA_MULTIPATH = 0x9
+ RTA_FLOW = 0xb
+ RTA_CACHEINFO = 0xc
+ RTA_TABLE = 0xf
+ RTA_MARK = 0x10
+ RTA_MFC_STATS = 0x11
+ RTA_VIA = 0x12
+ RTA_NEWDST = 0x13
+ RTA_PREF = 0x14
+ RTA_ENCAP_TYPE = 0x15
+ RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
+ RTN_UNSPEC = 0x0
+ RTN_UNICAST = 0x1
+ RTN_LOCAL = 0x2
+ RTN_BROADCAST = 0x3
+ RTN_ANYCAST = 0x4
+ RTN_MULTICAST = 0x5
+ RTN_BLACKHOLE = 0x6
+ RTN_UNREACHABLE = 0x7
+ RTN_PROHIBIT = 0x8
+ RTN_THROW = 0x9
+ RTN_NAT = 0xa
+ RTN_XRESOLVE = 0xb
+ RTNLGRP_NONE = 0x0
+ RTNLGRP_LINK = 0x1
+ RTNLGRP_NOTIFY = 0x2
+ RTNLGRP_NEIGH = 0x3
+ RTNLGRP_TC = 0x4
+ RTNLGRP_IPV4_IFADDR = 0x5
+ RTNLGRP_IPV4_MROUTE = 0x6
+ RTNLGRP_IPV4_ROUTE = 0x7
+ RTNLGRP_IPV4_RULE = 0x8
+ RTNLGRP_IPV6_IFADDR = 0x9
+ RTNLGRP_IPV6_MROUTE = 0xa
+ RTNLGRP_IPV6_ROUTE = 0xb
+ RTNLGRP_IPV6_IFINFO = 0xc
+ RTNLGRP_IPV6_PREFIX = 0x12
+ RTNLGRP_IPV6_RULE = 0x13
+ RTNLGRP_ND_USEROPT = 0x14
+ SizeofNlMsghdr = 0x10
+ SizeofNlMsgerr = 0x14
+ SizeofRtGenmsg = 0x1
+ SizeofNlAttr = 0x4
+ SizeofRtAttr = 0x4
+ SizeofIfInfomsg = 0x10
+ SizeofIfAddrmsg = 0x8
+ SizeofRtMsg = 0xc
+ SizeofRtNexthop = 0x8
+ SizeofNdUseroptmsg = 0x10
+ SizeofNdMsg = 0xc
)
type NlMsghdr struct {
@@ -636,6 +685,27 @@ type RtNexthop struct {
Ifindex int32
}
+type NdUseroptmsg struct {
+ Family uint8
+ Pad1 uint8
+ Opts_len uint16
+ Ifindex int32
+ Icmp_type uint8
+ Icmp_code uint8
+ Pad2 uint16
+ Pad3 uint32
+}
+
+type NdMsg struct {
+ Family uint8
+ Pad1 uint8
+ Pad2 uint16
+ Ifindex int32
+ State uint16
+ Flags uint8
+ Type uint8
+}
+
const (
SizeofSockFilter = 0x8
SizeofSockFprog = 0x10
@@ -753,7 +823,32 @@ type Sigset_t struct {
Val [16]uint64
}
-const RNDGETENTCNT = 0x40045200
+const _C__NSIG = 0x80
+
+type SignalfdSiginfo struct {
+ Signo uint32
+ Errno int32
+ Code int32
+ Pid uint32
+ Uid uint32
+ Fd int32
+ Tid uint32
+ Band uint32
+ Overrun uint32
+ Trapno uint32
+ Status int32
+ Int int32
+ Ptr uint64
+ Utime uint64
+ Stime uint64
+ Addr uint64
+ Addr_lsb uint16
+ _ uint16
+ Syscall int32
+ Call_addr uint64
+ Arch uint32
+ _ [28]uint8
+}
const PERF_IOC_FLAG_GROUP = 0x1
@@ -924,7 +1019,8 @@ type PerfEventAttr struct {
Clockid int32
Sample_regs_intr uint64
Aux_watermark uint32
- _ uint32
+ Sample_max_stack uint16
+ _ uint16
}
type PerfEventMmapPage struct {
@@ -1027,6 +1123,7 @@ const (
PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7
PERF_COUNT_SW_EMULATION_FAULTS = 0x8
PERF_COUNT_SW_DUMMY = 0x9
+ PERF_COUNT_SW_BPF_OUTPUT = 0xa
PERF_SAMPLE_IP = 0x1
PERF_SAMPLE_TID = 0x2
@@ -1048,21 +1145,38 @@ const (
PERF_SAMPLE_BRANCH_ANY_CALL = 0x10
PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20
PERF_SAMPLE_BRANCH_IND_CALL = 0x40
+ PERF_SAMPLE_BRANCH_ABORT_TX = 0x80
+ PERF_SAMPLE_BRANCH_IN_TX = 0x100
+ PERF_SAMPLE_BRANCH_NO_TX = 0x200
+ PERF_SAMPLE_BRANCH_COND = 0x400
+ PERF_SAMPLE_BRANCH_CALL_STACK = 0x800
+ PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000
+ PERF_SAMPLE_BRANCH_CALL = 0x2000
+ PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000
+ PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000
+ PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000
PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1
PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2
PERF_FORMAT_ID = 0x4
PERF_FORMAT_GROUP = 0x8
- PERF_RECORD_MMAP = 0x1
- PERF_RECORD_LOST = 0x2
- PERF_RECORD_COMM = 0x3
- PERF_RECORD_EXIT = 0x4
- PERF_RECORD_THROTTLE = 0x5
- PERF_RECORD_UNTHROTTLE = 0x6
- PERF_RECORD_FORK = 0x7
- PERF_RECORD_READ = 0x8
- PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP = 0x1
+ PERF_RECORD_LOST = 0x2
+ PERF_RECORD_COMM = 0x3
+ PERF_RECORD_EXIT = 0x4
+ PERF_RECORD_THROTTLE = 0x5
+ PERF_RECORD_UNTHROTTLE = 0x6
+ PERF_RECORD_FORK = 0x7
+ PERF_RECORD_READ = 0x8
+ PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP2 = 0xa
+ PERF_RECORD_AUX = 0xb
+ PERF_RECORD_ITRACE_START = 0xc
+ PERF_RECORD_LOST_SAMPLES = 0xd
+ PERF_RECORD_SWITCH = 0xe
+ PERF_RECORD_SWITCH_CPU_WIDE = 0xf
+ PERF_RECORD_NAMESPACES = 0x10
PERF_CONTEXT_HV = -0x20
PERF_CONTEXT_KERNEL = -0x80
@@ -1075,6 +1189,7 @@ const (
PERF_FLAG_FD_NO_GROUP = 0x1
PERF_FLAG_FD_OUTPUT = 0x2
PERF_FLAG_PID_CGROUP = 0x4
+ PERF_FLAG_FD_CLOEXEC = 0x8
)
const (
@@ -1332,6 +1447,21 @@ type TpacketBlockDesc struct {
Hdr [40]byte
}
+type TpacketBDTS struct {
+ Sec uint32
+ Usec uint32
+}
+
+type TpacketHdrV1 struct {
+ Block_status uint32
+ Num_pkts uint32
+ Offset_to_first_pkt uint32
+ Blk_len uint32
+ Seq_num uint64
+ Ts_first_pkt TpacketBDTS
+ Ts_last_pkt TpacketBDTS
+}
+
type TpacketReq struct {
Block_size uint32
Block_nr uint32
@@ -1380,6 +1510,9 @@ const (
SizeofTpacketHdr = 0x20
SizeofTpacket2Hdr = 0x20
SizeofTpacket3Hdr = 0x30
+
+ SizeofTpacketStats = 0x8
+ SizeofTpacketStatsV3 = 0xc
)
const (
@@ -1958,6 +2091,10 @@ const (
NCSI_CHANNEL_ATTR_VLAN_ID = 0xa
)
+type ScmTimestamping struct {
+ Ts [3]Timespec
+}
+
const (
SOF_TIMESTAMPING_TX_HARDWARE = 0x1
SOF_TIMESTAMPING_TX_SOFTWARE = 0x2
@@ -1977,4 +2114,367 @@ const (
SOF_TIMESTAMPING_LAST = 0x4000
SOF_TIMESTAMPING_MASK = 0x7fff
+
+ SCM_TSTAMP_SND = 0x0
+ SCM_TSTAMP_SCHED = 0x1
+ SCM_TSTAMP_ACK = 0x2
+)
+
+type SockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type FanotifyEventMetadata struct {
+ Event_len uint32
+ Vers uint8
+ Reserved uint8
+ Metadata_len uint16
+ Mask uint64
+ Fd int32
+ Pid int32
+}
+
+type FanotifyResponse struct {
+ Fd int32
+ Response uint32
+}
+
+const (
+ CRYPTO_MSG_BASE = 0x10
+ CRYPTO_MSG_NEWALG = 0x10
+ CRYPTO_MSG_DELALG = 0x11
+ CRYPTO_MSG_UPDATEALG = 0x12
+ CRYPTO_MSG_GETALG = 0x13
+ CRYPTO_MSG_DELRNG = 0x14
+ CRYPTO_MSG_GETSTAT = 0x15
+)
+
+const (
+ CRYPTOCFGA_UNSPEC = 0x0
+ CRYPTOCFGA_PRIORITY_VAL = 0x1
+ CRYPTOCFGA_REPORT_LARVAL = 0x2
+ CRYPTOCFGA_REPORT_HASH = 0x3
+ CRYPTOCFGA_REPORT_BLKCIPHER = 0x4
+ CRYPTOCFGA_REPORT_AEAD = 0x5
+ CRYPTOCFGA_REPORT_COMPRESS = 0x6
+ CRYPTOCFGA_REPORT_RNG = 0x7
+ CRYPTOCFGA_REPORT_CIPHER = 0x8
+ CRYPTOCFGA_REPORT_AKCIPHER = 0x9
+ CRYPTOCFGA_REPORT_KPP = 0xa
+ CRYPTOCFGA_REPORT_ACOMP = 0xb
+ CRYPTOCFGA_STAT_LARVAL = 0xc
+ CRYPTOCFGA_STAT_HASH = 0xd
+ CRYPTOCFGA_STAT_BLKCIPHER = 0xe
+ CRYPTOCFGA_STAT_AEAD = 0xf
+ CRYPTOCFGA_STAT_COMPRESS = 0x10
+ CRYPTOCFGA_STAT_RNG = 0x11
+ CRYPTOCFGA_STAT_CIPHER = 0x12
+ CRYPTOCFGA_STAT_AKCIPHER = 0x13
+ CRYPTOCFGA_STAT_KPP = 0x14
+ CRYPTOCFGA_STAT_ACOMP = 0x15
+)
+
+type CryptoUserAlg struct {
+ Name [64]int8
+ Driver_name [64]int8
+ Module_name [64]int8
+ Type uint32
+ Mask uint32
+ Refcnt uint32
+ Flags uint32
+}
+
+type CryptoStatAEAD struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatAKCipher struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Verify_cnt uint64
+ Sign_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCipher struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCompress struct {
+ Type [64]int8
+ Compress_cnt uint64
+ Compress_tlen uint64
+ Decompress_cnt uint64
+ Decompress_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatHash struct {
+ Type [64]int8
+ Hash_cnt uint64
+ Hash_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatKPP struct {
+ Type [64]int8
+ Setsecret_cnt uint64
+ Generate_public_key_cnt uint64
+ Compute_shared_secret_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatRNG struct {
+ Type [64]int8
+ Generate_cnt uint64
+ Generate_tlen uint64
+ Seed_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatLarval struct {
+ Type [64]int8
+}
+
+type CryptoReportLarval struct {
+ Type [64]int8
+}
+
+type CryptoReportHash struct {
+ Type [64]int8
+ Blocksize uint32
+ Digestsize uint32
+}
+
+type CryptoReportCipher struct {
+ Type [64]int8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+}
+
+type CryptoReportBlkCipher struct {
+ Type [64]int8
+ Geniv [64]int8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+ Ivsize uint32
+}
+
+type CryptoReportAEAD struct {
+ Type [64]int8
+ Geniv [64]int8
+ Blocksize uint32
+ Maxauthsize uint32
+ Ivsize uint32
+}
+
+type CryptoReportComp struct {
+ Type [64]int8
+}
+
+type CryptoReportRNG struct {
+ Type [64]int8
+ Seedsize uint32
+}
+
+type CryptoReportAKCipher struct {
+ Type [64]int8
+}
+
+type CryptoReportKPP struct {
+ Type [64]int8
+}
+
+type CryptoReportAcomp struct {
+ Type [64]int8
+}
+
+const (
+ BPF_REG_0 = 0x0
+ BPF_REG_1 = 0x1
+ BPF_REG_2 = 0x2
+ BPF_REG_3 = 0x3
+ BPF_REG_4 = 0x4
+ BPF_REG_5 = 0x5
+ BPF_REG_6 = 0x6
+ BPF_REG_7 = 0x7
+ BPF_REG_8 = 0x8
+ BPF_REG_9 = 0x9
+ BPF_REG_10 = 0xa
+ BPF_MAP_CREATE = 0x0
+ BPF_MAP_LOOKUP_ELEM = 0x1
+ BPF_MAP_UPDATE_ELEM = 0x2
+ BPF_MAP_DELETE_ELEM = 0x3
+ BPF_MAP_GET_NEXT_KEY = 0x4
+ BPF_PROG_LOAD = 0x5
+ BPF_OBJ_PIN = 0x6
+ BPF_OBJ_GET = 0x7
+ BPF_PROG_ATTACH = 0x8
+ BPF_PROG_DETACH = 0x9
+ BPF_PROG_TEST_RUN = 0xa
+ BPF_PROG_GET_NEXT_ID = 0xb
+ BPF_MAP_GET_NEXT_ID = 0xc
+ BPF_PROG_GET_FD_BY_ID = 0xd
+ BPF_MAP_GET_FD_BY_ID = 0xe
+ BPF_OBJ_GET_INFO_BY_FD = 0xf
+ BPF_PROG_QUERY = 0x10
+ BPF_RAW_TRACEPOINT_OPEN = 0x11
+ BPF_BTF_LOAD = 0x12
+ BPF_BTF_GET_FD_BY_ID = 0x13
+ BPF_TASK_FD_QUERY = 0x14
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15
+ BPF_MAP_TYPE_UNSPEC = 0x0
+ BPF_MAP_TYPE_HASH = 0x1
+ BPF_MAP_TYPE_ARRAY = 0x2
+ BPF_MAP_TYPE_PROG_ARRAY = 0x3
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4
+ BPF_MAP_TYPE_PERCPU_HASH = 0x5
+ BPF_MAP_TYPE_PERCPU_ARRAY = 0x6
+ BPF_MAP_TYPE_STACK_TRACE = 0x7
+ BPF_MAP_TYPE_CGROUP_ARRAY = 0x8
+ BPF_MAP_TYPE_LRU_HASH = 0x9
+ BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa
+ BPF_MAP_TYPE_LPM_TRIE = 0xb
+ BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc
+ BPF_MAP_TYPE_HASH_OF_MAPS = 0xd
+ BPF_MAP_TYPE_DEVMAP = 0xe
+ BPF_MAP_TYPE_SOCKMAP = 0xf
+ BPF_MAP_TYPE_CPUMAP = 0x10
+ BPF_MAP_TYPE_XSKMAP = 0x11
+ BPF_MAP_TYPE_SOCKHASH = 0x12
+ BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
+ BPF_MAP_TYPE_QUEUE = 0x16
+ BPF_MAP_TYPE_STACK = 0x17
+ BPF_PROG_TYPE_UNSPEC = 0x0
+ BPF_PROG_TYPE_SOCKET_FILTER = 0x1
+ BPF_PROG_TYPE_KPROBE = 0x2
+ BPF_PROG_TYPE_SCHED_CLS = 0x3
+ BPF_PROG_TYPE_SCHED_ACT = 0x4
+ BPF_PROG_TYPE_TRACEPOINT = 0x5
+ BPF_PROG_TYPE_XDP = 0x6
+ BPF_PROG_TYPE_PERF_EVENT = 0x7
+ BPF_PROG_TYPE_CGROUP_SKB = 0x8
+ BPF_PROG_TYPE_CGROUP_SOCK = 0x9
+ BPF_PROG_TYPE_LWT_IN = 0xa
+ BPF_PROG_TYPE_LWT_OUT = 0xb
+ BPF_PROG_TYPE_LWT_XMIT = 0xc
+ BPF_PROG_TYPE_SOCK_OPS = 0xd
+ BPF_PROG_TYPE_SK_SKB = 0xe
+ BPF_PROG_TYPE_CGROUP_DEVICE = 0xf
+ BPF_PROG_TYPE_SK_MSG = 0x10
+ BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11
+ BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12
+ BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13
+ BPF_PROG_TYPE_LIRC_MODE2 = 0x14
+ BPF_PROG_TYPE_SK_REUSEPORT = 0x15
+ BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16
+ BPF_CGROUP_INET_INGRESS = 0x0
+ BPF_CGROUP_INET_EGRESS = 0x1
+ BPF_CGROUP_INET_SOCK_CREATE = 0x2
+ BPF_CGROUP_SOCK_OPS = 0x3
+ BPF_SK_SKB_STREAM_PARSER = 0x4
+ BPF_SK_SKB_STREAM_VERDICT = 0x5
+ BPF_CGROUP_DEVICE = 0x6
+ BPF_SK_MSG_VERDICT = 0x7
+ BPF_CGROUP_INET4_BIND = 0x8
+ BPF_CGROUP_INET6_BIND = 0x9
+ BPF_CGROUP_INET4_CONNECT = 0xa
+ BPF_CGROUP_INET6_CONNECT = 0xb
+ BPF_CGROUP_INET4_POST_BIND = 0xc
+ BPF_CGROUP_INET6_POST_BIND = 0xd
+ BPF_CGROUP_UDP4_SENDMSG = 0xe
+ BPF_CGROUP_UDP6_SENDMSG = 0xf
+ BPF_LIRC_MODE2 = 0x10
+ BPF_FLOW_DISSECTOR = 0x11
+ BPF_STACK_BUILD_ID_EMPTY = 0x0
+ BPF_STACK_BUILD_ID_VALID = 0x1
+ BPF_STACK_BUILD_ID_IP = 0x2
+ BPF_ADJ_ROOM_NET = 0x0
+ BPF_HDR_START_MAC = 0x0
+ BPF_HDR_START_NET = 0x1
+ BPF_LWT_ENCAP_SEG6 = 0x0
+ BPF_LWT_ENCAP_SEG6_INLINE = 0x1
+ BPF_OK = 0x0
+ BPF_DROP = 0x2
+ BPF_REDIRECT = 0x7
+ BPF_SOCK_OPS_VOID = 0x0
+ BPF_SOCK_OPS_TIMEOUT_INIT = 0x1
+ BPF_SOCK_OPS_RWND_INIT = 0x2
+ BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3
+ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4
+ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5
+ BPF_SOCK_OPS_NEEDS_ECN = 0x6
+ BPF_SOCK_OPS_BASE_RTT = 0x7
+ BPF_SOCK_OPS_RTO_CB = 0x8
+ BPF_SOCK_OPS_RETRANS_CB = 0x9
+ BPF_SOCK_OPS_STATE_CB = 0xa
+ BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb
+ BPF_TCP_ESTABLISHED = 0x1
+ BPF_TCP_SYN_SENT = 0x2
+ BPF_TCP_SYN_RECV = 0x3
+ BPF_TCP_FIN_WAIT1 = 0x4
+ BPF_TCP_FIN_WAIT2 = 0x5
+ BPF_TCP_TIME_WAIT = 0x6
+ BPF_TCP_CLOSE = 0x7
+ BPF_TCP_CLOSE_WAIT = 0x8
+ BPF_TCP_LAST_ACK = 0x9
+ BPF_TCP_LISTEN = 0xa
+ BPF_TCP_CLOSING = 0xb
+ BPF_TCP_NEW_SYN_RECV = 0xc
+ BPF_TCP_MAX_STATES = 0xd
+ BPF_FIB_LKUP_RET_SUCCESS = 0x0
+ BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
+ BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
+ BPF_FIB_LKUP_RET_PROHIBIT = 0x3
+ BPF_FIB_LKUP_RET_NOT_FWDED = 0x4
+ BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5
+ BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
+ BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
+ BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
+ BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
+ BPF_FD_TYPE_TRACEPOINT = 0x1
+ BPF_FD_TYPE_KPROBE = 0x2
+ BPF_FD_TYPE_KRETPROBE = 0x3
+ BPF_FD_TYPE_UPROBE = 0x4
+ BPF_FD_TYPE_URETPROBE = 0x5
+)
+
+type CapUserHeader struct {
+ Version uint32
+ Pid int32
+}
+
+type CapUserData struct {
+ Effective uint32
+ Permitted uint32
+ Inheritable uint32
+}
+
+const (
+ LINUX_CAPABILITY_VERSION_1 = 0x19980330
+ LINUX_CAPABILITY_VERSION_2 = 0x20071026
+ LINUX_CAPABILITY_VERSION_3 = 0x20080522
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
index 2fb7498da..073c29939 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -407,6 +407,11 @@ type TCPInfo struct {
Total_retrans uint32
}
+type CanFilter struct {
+ Id uint32
+ Mask uint32
+}
+
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
@@ -436,141 +441,185 @@ const (
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
SizeofTCPInfo = 0x68
+ SizeofCanFilter = 0x8
)
const (
- IFA_UNSPEC = 0x0
- IFA_ADDRESS = 0x1
- IFA_LOCAL = 0x2
- IFA_LABEL = 0x3
- IFA_BROADCAST = 0x4
- IFA_ANYCAST = 0x5
- IFA_CACHEINFO = 0x6
- IFA_MULTICAST = 0x7
- IFLA_UNSPEC = 0x0
- IFLA_ADDRESS = 0x1
- IFLA_BROADCAST = 0x2
- IFLA_IFNAME = 0x3
- IFLA_INFO_KIND = 0x1
- IFLA_MTU = 0x4
- IFLA_LINK = 0x5
- IFLA_QDISC = 0x6
- IFLA_STATS = 0x7
- IFLA_COST = 0x8
- IFLA_PRIORITY = 0x9
- IFLA_MASTER = 0xa
- IFLA_WIRELESS = 0xb
- IFLA_PROTINFO = 0xc
- IFLA_TXQLEN = 0xd
- IFLA_MAP = 0xe
- IFLA_WEIGHT = 0xf
- IFLA_OPERSTATE = 0x10
- IFLA_LINKMODE = 0x11
- IFLA_LINKINFO = 0x12
- IFLA_NET_NS_PID = 0x13
- IFLA_IFALIAS = 0x14
- IFLA_NUM_VF = 0x15
- IFLA_VFINFO_LIST = 0x16
- IFLA_STATS64 = 0x17
- IFLA_VF_PORTS = 0x18
- IFLA_PORT_SELF = 0x19
- IFLA_AF_SPEC = 0x1a
- IFLA_GROUP = 0x1b
- IFLA_NET_NS_FD = 0x1c
- IFLA_EXT_MASK = 0x1d
- IFLA_PROMISCUITY = 0x1e
- IFLA_NUM_TX_QUEUES = 0x1f
- IFLA_NUM_RX_QUEUES = 0x20
- IFLA_CARRIER = 0x21
- IFLA_PHYS_PORT_ID = 0x22
- IFLA_CARRIER_CHANGES = 0x23
- IFLA_PHYS_SWITCH_ID = 0x24
- IFLA_LINK_NETNSID = 0x25
- IFLA_PHYS_PORT_NAME = 0x26
- IFLA_PROTO_DOWN = 0x27
- IFLA_GSO_MAX_SEGS = 0x28
- IFLA_GSO_MAX_SIZE = 0x29
- IFLA_PAD = 0x2a
- IFLA_XDP = 0x2b
- IFLA_EVENT = 0x2c
- IFLA_NEW_NETNSID = 0x2d
- IFLA_IF_NETNSID = 0x2e
- IFLA_MAX = 0x33
- RT_SCOPE_UNIVERSE = 0x0
- RT_SCOPE_SITE = 0xc8
- RT_SCOPE_LINK = 0xfd
- RT_SCOPE_HOST = 0xfe
- RT_SCOPE_NOWHERE = 0xff
- RT_TABLE_UNSPEC = 0x0
- RT_TABLE_COMPAT = 0xfc
- RT_TABLE_DEFAULT = 0xfd
- RT_TABLE_MAIN = 0xfe
- RT_TABLE_LOCAL = 0xff
- RT_TABLE_MAX = 0xffffffff
- RTA_UNSPEC = 0x0
- RTA_DST = 0x1
- RTA_SRC = 0x2
- RTA_IIF = 0x3
- RTA_OIF = 0x4
- RTA_GATEWAY = 0x5
- RTA_PRIORITY = 0x6
- RTA_PREFSRC = 0x7
- RTA_METRICS = 0x8
- RTA_MULTIPATH = 0x9
- RTA_FLOW = 0xb
- RTA_CACHEINFO = 0xc
- RTA_TABLE = 0xf
- RTA_MARK = 0x10
- RTA_MFC_STATS = 0x11
- RTA_VIA = 0x12
- RTA_NEWDST = 0x13
- RTA_PREF = 0x14
- RTA_ENCAP_TYPE = 0x15
- RTA_ENCAP = 0x16
- RTA_EXPIRES = 0x17
- RTA_PAD = 0x18
- RTA_UID = 0x19
- RTA_TTL_PROPAGATE = 0x1a
- RTA_IP_PROTO = 0x1b
- RTA_SPORT = 0x1c
- RTA_DPORT = 0x1d
- RTN_UNSPEC = 0x0
- RTN_UNICAST = 0x1
- RTN_LOCAL = 0x2
- RTN_BROADCAST = 0x3
- RTN_ANYCAST = 0x4
- RTN_MULTICAST = 0x5
- RTN_BLACKHOLE = 0x6
- RTN_UNREACHABLE = 0x7
- RTN_PROHIBIT = 0x8
- RTN_THROW = 0x9
- RTN_NAT = 0xa
- RTN_XRESOLVE = 0xb
- RTNLGRP_NONE = 0x0
- RTNLGRP_LINK = 0x1
- RTNLGRP_NOTIFY = 0x2
- RTNLGRP_NEIGH = 0x3
- RTNLGRP_TC = 0x4
- RTNLGRP_IPV4_IFADDR = 0x5
- RTNLGRP_IPV4_MROUTE = 0x6
- RTNLGRP_IPV4_ROUTE = 0x7
- RTNLGRP_IPV4_RULE = 0x8
- RTNLGRP_IPV6_IFADDR = 0x9
- RTNLGRP_IPV6_MROUTE = 0xa
- RTNLGRP_IPV6_ROUTE = 0xb
- RTNLGRP_IPV6_IFINFO = 0xc
- RTNLGRP_IPV6_PREFIX = 0x12
- RTNLGRP_IPV6_RULE = 0x13
- RTNLGRP_ND_USEROPT = 0x14
- SizeofNlMsghdr = 0x10
- SizeofNlMsgerr = 0x14
- SizeofRtGenmsg = 0x1
- SizeofNlAttr = 0x4
- SizeofRtAttr = 0x4
- SizeofIfInfomsg = 0x10
- SizeofIfAddrmsg = 0x8
- SizeofRtMsg = 0xc
- SizeofRtNexthop = 0x8
+ NDA_UNSPEC = 0x0
+ NDA_DST = 0x1
+ NDA_LLADDR = 0x2
+ NDA_CACHEINFO = 0x3
+ NDA_PROBES = 0x4
+ NDA_VLAN = 0x5
+ NDA_PORT = 0x6
+ NDA_VNI = 0x7
+ NDA_IFINDEX = 0x8
+ NDA_MASTER = 0x9
+ NDA_LINK_NETNSID = 0xa
+ NDA_SRC_VNI = 0xb
+ NTF_USE = 0x1
+ NTF_SELF = 0x2
+ NTF_MASTER = 0x4
+ NTF_PROXY = 0x8
+ NTF_EXT_LEARNED = 0x10
+ NTF_OFFLOADED = 0x20
+ NTF_ROUTER = 0x80
+ NUD_INCOMPLETE = 0x1
+ NUD_REACHABLE = 0x2
+ NUD_STALE = 0x4
+ NUD_DELAY = 0x8
+ NUD_PROBE = 0x10
+ NUD_FAILED = 0x20
+ NUD_NOARP = 0x40
+ NUD_PERMANENT = 0x80
+ NUD_NONE = 0x0
+ IFA_UNSPEC = 0x0
+ IFA_ADDRESS = 0x1
+ IFA_LOCAL = 0x2
+ IFA_LABEL = 0x3
+ IFA_BROADCAST = 0x4
+ IFA_ANYCAST = 0x5
+ IFA_CACHEINFO = 0x6
+ IFA_MULTICAST = 0x7
+ IFA_FLAGS = 0x8
+ IFA_RT_PRIORITY = 0x9
+ IFA_TARGET_NETNSID = 0xa
+ IFLA_UNSPEC = 0x0
+ IFLA_ADDRESS = 0x1
+ IFLA_BROADCAST = 0x2
+ IFLA_IFNAME = 0x3
+ IFLA_MTU = 0x4
+ IFLA_LINK = 0x5
+ IFLA_QDISC = 0x6
+ IFLA_STATS = 0x7
+ IFLA_COST = 0x8
+ IFLA_PRIORITY = 0x9
+ IFLA_MASTER = 0xa
+ IFLA_WIRELESS = 0xb
+ IFLA_PROTINFO = 0xc
+ IFLA_TXQLEN = 0xd
+ IFLA_MAP = 0xe
+ IFLA_WEIGHT = 0xf
+ IFLA_OPERSTATE = 0x10
+ IFLA_LINKMODE = 0x11
+ IFLA_LINKINFO = 0x12
+ IFLA_NET_NS_PID = 0x13
+ IFLA_IFALIAS = 0x14
+ IFLA_NUM_VF = 0x15
+ IFLA_VFINFO_LIST = 0x16
+ IFLA_STATS64 = 0x17
+ IFLA_VF_PORTS = 0x18
+ IFLA_PORT_SELF = 0x19
+ IFLA_AF_SPEC = 0x1a
+ IFLA_GROUP = 0x1b
+ IFLA_NET_NS_FD = 0x1c
+ IFLA_EXT_MASK = 0x1d
+ IFLA_PROMISCUITY = 0x1e
+ IFLA_NUM_TX_QUEUES = 0x1f
+ IFLA_NUM_RX_QUEUES = 0x20
+ IFLA_CARRIER = 0x21
+ IFLA_PHYS_PORT_ID = 0x22
+ IFLA_CARRIER_CHANGES = 0x23
+ IFLA_PHYS_SWITCH_ID = 0x24
+ IFLA_LINK_NETNSID = 0x25
+ IFLA_PHYS_PORT_NAME = 0x26
+ IFLA_PROTO_DOWN = 0x27
+ IFLA_GSO_MAX_SEGS = 0x28
+ IFLA_GSO_MAX_SIZE = 0x29
+ IFLA_PAD = 0x2a
+ IFLA_XDP = 0x2b
+ IFLA_EVENT = 0x2c
+ IFLA_NEW_NETNSID = 0x2d
+ IFLA_IF_NETNSID = 0x2e
+ IFLA_TARGET_NETNSID = 0x2e
+ IFLA_CARRIER_UP_COUNT = 0x2f
+ IFLA_CARRIER_DOWN_COUNT = 0x30
+ IFLA_NEW_IFINDEX = 0x31
+ IFLA_MIN_MTU = 0x32
+ IFLA_MAX_MTU = 0x33
+ IFLA_MAX = 0x33
+ IFLA_INFO_KIND = 0x1
+ IFLA_INFO_DATA = 0x2
+ IFLA_INFO_XSTATS = 0x3
+ IFLA_INFO_SLAVE_KIND = 0x4
+ IFLA_INFO_SLAVE_DATA = 0x5
+ RT_SCOPE_UNIVERSE = 0x0
+ RT_SCOPE_SITE = 0xc8
+ RT_SCOPE_LINK = 0xfd
+ RT_SCOPE_HOST = 0xfe
+ RT_SCOPE_NOWHERE = 0xff
+ RT_TABLE_UNSPEC = 0x0
+ RT_TABLE_COMPAT = 0xfc
+ RT_TABLE_DEFAULT = 0xfd
+ RT_TABLE_MAIN = 0xfe
+ RT_TABLE_LOCAL = 0xff
+ RT_TABLE_MAX = 0xffffffff
+ RTA_UNSPEC = 0x0
+ RTA_DST = 0x1
+ RTA_SRC = 0x2
+ RTA_IIF = 0x3
+ RTA_OIF = 0x4
+ RTA_GATEWAY = 0x5
+ RTA_PRIORITY = 0x6
+ RTA_PREFSRC = 0x7
+ RTA_METRICS = 0x8
+ RTA_MULTIPATH = 0x9
+ RTA_FLOW = 0xb
+ RTA_CACHEINFO = 0xc
+ RTA_TABLE = 0xf
+ RTA_MARK = 0x10
+ RTA_MFC_STATS = 0x11
+ RTA_VIA = 0x12
+ RTA_NEWDST = 0x13
+ RTA_PREF = 0x14
+ RTA_ENCAP_TYPE = 0x15
+ RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
+ RTN_UNSPEC = 0x0
+ RTN_UNICAST = 0x1
+ RTN_LOCAL = 0x2
+ RTN_BROADCAST = 0x3
+ RTN_ANYCAST = 0x4
+ RTN_MULTICAST = 0x5
+ RTN_BLACKHOLE = 0x6
+ RTN_UNREACHABLE = 0x7
+ RTN_PROHIBIT = 0x8
+ RTN_THROW = 0x9
+ RTN_NAT = 0xa
+ RTN_XRESOLVE = 0xb
+ RTNLGRP_NONE = 0x0
+ RTNLGRP_LINK = 0x1
+ RTNLGRP_NOTIFY = 0x2
+ RTNLGRP_NEIGH = 0x3
+ RTNLGRP_TC = 0x4
+ RTNLGRP_IPV4_IFADDR = 0x5
+ RTNLGRP_IPV4_MROUTE = 0x6
+ RTNLGRP_IPV4_ROUTE = 0x7
+ RTNLGRP_IPV4_RULE = 0x8
+ RTNLGRP_IPV6_IFADDR = 0x9
+ RTNLGRP_IPV6_MROUTE = 0xa
+ RTNLGRP_IPV6_ROUTE = 0xb
+ RTNLGRP_IPV6_IFINFO = 0xc
+ RTNLGRP_IPV6_PREFIX = 0x12
+ RTNLGRP_IPV6_RULE = 0x13
+ RTNLGRP_ND_USEROPT = 0x14
+ SizeofNlMsghdr = 0x10
+ SizeofNlMsgerr = 0x14
+ SizeofRtGenmsg = 0x1
+ SizeofNlAttr = 0x4
+ SizeofRtAttr = 0x4
+ SizeofIfInfomsg = 0x10
+ SizeofIfAddrmsg = 0x8
+ SizeofRtMsg = 0xc
+ SizeofRtNexthop = 0x8
+ SizeofNdUseroptmsg = 0x10
+ SizeofNdMsg = 0xc
)
type NlMsghdr struct {
@@ -636,6 +685,27 @@ type RtNexthop struct {
Ifindex int32
}
+type NdUseroptmsg struct {
+ Family uint8
+ Pad1 uint8
+ Opts_len uint16
+ Ifindex int32
+ Icmp_type uint8
+ Icmp_code uint8
+ Pad2 uint16
+ Pad3 uint32
+}
+
+type NdMsg struct {
+ Family uint8
+ Pad1 uint8
+ Pad2 uint16
+ Ifindex int32
+ State uint16
+ Flags uint8
+ Type uint8
+}
+
const (
SizeofSockFilter = 0x8
SizeofSockFprog = 0x10
@@ -753,7 +823,32 @@ type Sigset_t struct {
Val [16]uint64
}
-const RNDGETENTCNT = 0x40045200
+const _C__NSIG = 0x80
+
+type SignalfdSiginfo struct {
+ Signo uint32
+ Errno int32
+ Code int32
+ Pid uint32
+ Uid uint32
+ Fd int32
+ Tid uint32
+ Band uint32
+ Overrun uint32
+ Trapno uint32
+ Status int32
+ Int int32
+ Ptr uint64
+ Utime uint64
+ Stime uint64
+ Addr uint64
+ Addr_lsb uint16
+ _ uint16
+ Syscall int32
+ Call_addr uint64
+ Arch uint32
+ _ [28]uint8
+}
const PERF_IOC_FLAG_GROUP = 0x1
@@ -924,7 +1019,8 @@ type PerfEventAttr struct {
Clockid int32
Sample_regs_intr uint64
Aux_watermark uint32
- _ uint32
+ Sample_max_stack uint16
+ _ uint16
}
type PerfEventMmapPage struct {
@@ -1027,6 +1123,7 @@ const (
PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7
PERF_COUNT_SW_EMULATION_FAULTS = 0x8
PERF_COUNT_SW_DUMMY = 0x9
+ PERF_COUNT_SW_BPF_OUTPUT = 0xa
PERF_SAMPLE_IP = 0x1
PERF_SAMPLE_TID = 0x2
@@ -1048,21 +1145,38 @@ const (
PERF_SAMPLE_BRANCH_ANY_CALL = 0x10
PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20
PERF_SAMPLE_BRANCH_IND_CALL = 0x40
+ PERF_SAMPLE_BRANCH_ABORT_TX = 0x80
+ PERF_SAMPLE_BRANCH_IN_TX = 0x100
+ PERF_SAMPLE_BRANCH_NO_TX = 0x200
+ PERF_SAMPLE_BRANCH_COND = 0x400
+ PERF_SAMPLE_BRANCH_CALL_STACK = 0x800
+ PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000
+ PERF_SAMPLE_BRANCH_CALL = 0x2000
+ PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000
+ PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000
+ PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000
PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1
PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2
PERF_FORMAT_ID = 0x4
PERF_FORMAT_GROUP = 0x8
- PERF_RECORD_MMAP = 0x1
- PERF_RECORD_LOST = 0x2
- PERF_RECORD_COMM = 0x3
- PERF_RECORD_EXIT = 0x4
- PERF_RECORD_THROTTLE = 0x5
- PERF_RECORD_UNTHROTTLE = 0x6
- PERF_RECORD_FORK = 0x7
- PERF_RECORD_READ = 0x8
- PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP = 0x1
+ PERF_RECORD_LOST = 0x2
+ PERF_RECORD_COMM = 0x3
+ PERF_RECORD_EXIT = 0x4
+ PERF_RECORD_THROTTLE = 0x5
+ PERF_RECORD_UNTHROTTLE = 0x6
+ PERF_RECORD_FORK = 0x7
+ PERF_RECORD_READ = 0x8
+ PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP2 = 0xa
+ PERF_RECORD_AUX = 0xb
+ PERF_RECORD_ITRACE_START = 0xc
+ PERF_RECORD_LOST_SAMPLES = 0xd
+ PERF_RECORD_SWITCH = 0xe
+ PERF_RECORD_SWITCH_CPU_WIDE = 0xf
+ PERF_RECORD_NAMESPACES = 0x10
PERF_CONTEXT_HV = -0x20
PERF_CONTEXT_KERNEL = -0x80
@@ -1075,6 +1189,7 @@ const (
PERF_FLAG_FD_NO_GROUP = 0x1
PERF_FLAG_FD_OUTPUT = 0x2
PERF_FLAG_PID_CGROUP = 0x4
+ PERF_FLAG_FD_CLOEXEC = 0x8
)
const (
@@ -1332,6 +1447,21 @@ type TpacketBlockDesc struct {
Hdr [40]byte
}
+type TpacketBDTS struct {
+ Sec uint32
+ Usec uint32
+}
+
+type TpacketHdrV1 struct {
+ Block_status uint32
+ Num_pkts uint32
+ Offset_to_first_pkt uint32
+ Blk_len uint32
+ Seq_num uint64
+ Ts_first_pkt TpacketBDTS
+ Ts_last_pkt TpacketBDTS
+}
+
type TpacketReq struct {
Block_size uint32
Block_nr uint32
@@ -1380,6 +1510,9 @@ const (
SizeofTpacketHdr = 0x20
SizeofTpacket2Hdr = 0x20
SizeofTpacket3Hdr = 0x30
+
+ SizeofTpacketStats = 0x8
+ SizeofTpacketStatsV3 = 0xc
)
const (
@@ -1958,6 +2091,10 @@ const (
NCSI_CHANNEL_ATTR_VLAN_ID = 0xa
)
+type ScmTimestamping struct {
+ Ts [3]Timespec
+}
+
const (
SOF_TIMESTAMPING_TX_HARDWARE = 0x1
SOF_TIMESTAMPING_TX_SOFTWARE = 0x2
@@ -1977,4 +2114,367 @@ const (
SOF_TIMESTAMPING_LAST = 0x4000
SOF_TIMESTAMPING_MASK = 0x7fff
+
+ SCM_TSTAMP_SND = 0x0
+ SCM_TSTAMP_SCHED = 0x1
+ SCM_TSTAMP_ACK = 0x2
+)
+
+type SockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type FanotifyEventMetadata struct {
+ Event_len uint32
+ Vers uint8
+ Reserved uint8
+ Metadata_len uint16
+ Mask uint64
+ Fd int32
+ Pid int32
+}
+
+type FanotifyResponse struct {
+ Fd int32
+ Response uint32
+}
+
+const (
+ CRYPTO_MSG_BASE = 0x10
+ CRYPTO_MSG_NEWALG = 0x10
+ CRYPTO_MSG_DELALG = 0x11
+ CRYPTO_MSG_UPDATEALG = 0x12
+ CRYPTO_MSG_GETALG = 0x13
+ CRYPTO_MSG_DELRNG = 0x14
+ CRYPTO_MSG_GETSTAT = 0x15
+)
+
+const (
+ CRYPTOCFGA_UNSPEC = 0x0
+ CRYPTOCFGA_PRIORITY_VAL = 0x1
+ CRYPTOCFGA_REPORT_LARVAL = 0x2
+ CRYPTOCFGA_REPORT_HASH = 0x3
+ CRYPTOCFGA_REPORT_BLKCIPHER = 0x4
+ CRYPTOCFGA_REPORT_AEAD = 0x5
+ CRYPTOCFGA_REPORT_COMPRESS = 0x6
+ CRYPTOCFGA_REPORT_RNG = 0x7
+ CRYPTOCFGA_REPORT_CIPHER = 0x8
+ CRYPTOCFGA_REPORT_AKCIPHER = 0x9
+ CRYPTOCFGA_REPORT_KPP = 0xa
+ CRYPTOCFGA_REPORT_ACOMP = 0xb
+ CRYPTOCFGA_STAT_LARVAL = 0xc
+ CRYPTOCFGA_STAT_HASH = 0xd
+ CRYPTOCFGA_STAT_BLKCIPHER = 0xe
+ CRYPTOCFGA_STAT_AEAD = 0xf
+ CRYPTOCFGA_STAT_COMPRESS = 0x10
+ CRYPTOCFGA_STAT_RNG = 0x11
+ CRYPTOCFGA_STAT_CIPHER = 0x12
+ CRYPTOCFGA_STAT_AKCIPHER = 0x13
+ CRYPTOCFGA_STAT_KPP = 0x14
+ CRYPTOCFGA_STAT_ACOMP = 0x15
+)
+
+type CryptoUserAlg struct {
+ Name [64]int8
+ Driver_name [64]int8
+ Module_name [64]int8
+ Type uint32
+ Mask uint32
+ Refcnt uint32
+ Flags uint32
+}
+
+type CryptoStatAEAD struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatAKCipher struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Verify_cnt uint64
+ Sign_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCipher struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCompress struct {
+ Type [64]int8
+ Compress_cnt uint64
+ Compress_tlen uint64
+ Decompress_cnt uint64
+ Decompress_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatHash struct {
+ Type [64]int8
+ Hash_cnt uint64
+ Hash_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatKPP struct {
+ Type [64]int8
+ Setsecret_cnt uint64
+ Generate_public_key_cnt uint64
+ Compute_shared_secret_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatRNG struct {
+ Type [64]int8
+ Generate_cnt uint64
+ Generate_tlen uint64
+ Seed_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatLarval struct {
+ Type [64]int8
+}
+
+type CryptoReportLarval struct {
+ Type [64]int8
+}
+
+type CryptoReportHash struct {
+ Type [64]int8
+ Blocksize uint32
+ Digestsize uint32
+}
+
+type CryptoReportCipher struct {
+ Type [64]int8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+}
+
+type CryptoReportBlkCipher struct {
+ Type [64]int8
+ Geniv [64]int8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+ Ivsize uint32
+}
+
+type CryptoReportAEAD struct {
+ Type [64]int8
+ Geniv [64]int8
+ Blocksize uint32
+ Maxauthsize uint32
+ Ivsize uint32
+}
+
+type CryptoReportComp struct {
+ Type [64]int8
+}
+
+type CryptoReportRNG struct {
+ Type [64]int8
+ Seedsize uint32
+}
+
+type CryptoReportAKCipher struct {
+ Type [64]int8
+}
+
+type CryptoReportKPP struct {
+ Type [64]int8
+}
+
+type CryptoReportAcomp struct {
+ Type [64]int8
+}
+
+const (
+ BPF_REG_0 = 0x0
+ BPF_REG_1 = 0x1
+ BPF_REG_2 = 0x2
+ BPF_REG_3 = 0x3
+ BPF_REG_4 = 0x4
+ BPF_REG_5 = 0x5
+ BPF_REG_6 = 0x6
+ BPF_REG_7 = 0x7
+ BPF_REG_8 = 0x8
+ BPF_REG_9 = 0x9
+ BPF_REG_10 = 0xa
+ BPF_MAP_CREATE = 0x0
+ BPF_MAP_LOOKUP_ELEM = 0x1
+ BPF_MAP_UPDATE_ELEM = 0x2
+ BPF_MAP_DELETE_ELEM = 0x3
+ BPF_MAP_GET_NEXT_KEY = 0x4
+ BPF_PROG_LOAD = 0x5
+ BPF_OBJ_PIN = 0x6
+ BPF_OBJ_GET = 0x7
+ BPF_PROG_ATTACH = 0x8
+ BPF_PROG_DETACH = 0x9
+ BPF_PROG_TEST_RUN = 0xa
+ BPF_PROG_GET_NEXT_ID = 0xb
+ BPF_MAP_GET_NEXT_ID = 0xc
+ BPF_PROG_GET_FD_BY_ID = 0xd
+ BPF_MAP_GET_FD_BY_ID = 0xe
+ BPF_OBJ_GET_INFO_BY_FD = 0xf
+ BPF_PROG_QUERY = 0x10
+ BPF_RAW_TRACEPOINT_OPEN = 0x11
+ BPF_BTF_LOAD = 0x12
+ BPF_BTF_GET_FD_BY_ID = 0x13
+ BPF_TASK_FD_QUERY = 0x14
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15
+ BPF_MAP_TYPE_UNSPEC = 0x0
+ BPF_MAP_TYPE_HASH = 0x1
+ BPF_MAP_TYPE_ARRAY = 0x2
+ BPF_MAP_TYPE_PROG_ARRAY = 0x3
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4
+ BPF_MAP_TYPE_PERCPU_HASH = 0x5
+ BPF_MAP_TYPE_PERCPU_ARRAY = 0x6
+ BPF_MAP_TYPE_STACK_TRACE = 0x7
+ BPF_MAP_TYPE_CGROUP_ARRAY = 0x8
+ BPF_MAP_TYPE_LRU_HASH = 0x9
+ BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa
+ BPF_MAP_TYPE_LPM_TRIE = 0xb
+ BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc
+ BPF_MAP_TYPE_HASH_OF_MAPS = 0xd
+ BPF_MAP_TYPE_DEVMAP = 0xe
+ BPF_MAP_TYPE_SOCKMAP = 0xf
+ BPF_MAP_TYPE_CPUMAP = 0x10
+ BPF_MAP_TYPE_XSKMAP = 0x11
+ BPF_MAP_TYPE_SOCKHASH = 0x12
+ BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
+ BPF_MAP_TYPE_QUEUE = 0x16
+ BPF_MAP_TYPE_STACK = 0x17
+ BPF_PROG_TYPE_UNSPEC = 0x0
+ BPF_PROG_TYPE_SOCKET_FILTER = 0x1
+ BPF_PROG_TYPE_KPROBE = 0x2
+ BPF_PROG_TYPE_SCHED_CLS = 0x3
+ BPF_PROG_TYPE_SCHED_ACT = 0x4
+ BPF_PROG_TYPE_TRACEPOINT = 0x5
+ BPF_PROG_TYPE_XDP = 0x6
+ BPF_PROG_TYPE_PERF_EVENT = 0x7
+ BPF_PROG_TYPE_CGROUP_SKB = 0x8
+ BPF_PROG_TYPE_CGROUP_SOCK = 0x9
+ BPF_PROG_TYPE_LWT_IN = 0xa
+ BPF_PROG_TYPE_LWT_OUT = 0xb
+ BPF_PROG_TYPE_LWT_XMIT = 0xc
+ BPF_PROG_TYPE_SOCK_OPS = 0xd
+ BPF_PROG_TYPE_SK_SKB = 0xe
+ BPF_PROG_TYPE_CGROUP_DEVICE = 0xf
+ BPF_PROG_TYPE_SK_MSG = 0x10
+ BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11
+ BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12
+ BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13
+ BPF_PROG_TYPE_LIRC_MODE2 = 0x14
+ BPF_PROG_TYPE_SK_REUSEPORT = 0x15
+ BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16
+ BPF_CGROUP_INET_INGRESS = 0x0
+ BPF_CGROUP_INET_EGRESS = 0x1
+ BPF_CGROUP_INET_SOCK_CREATE = 0x2
+ BPF_CGROUP_SOCK_OPS = 0x3
+ BPF_SK_SKB_STREAM_PARSER = 0x4
+ BPF_SK_SKB_STREAM_VERDICT = 0x5
+ BPF_CGROUP_DEVICE = 0x6
+ BPF_SK_MSG_VERDICT = 0x7
+ BPF_CGROUP_INET4_BIND = 0x8
+ BPF_CGROUP_INET6_BIND = 0x9
+ BPF_CGROUP_INET4_CONNECT = 0xa
+ BPF_CGROUP_INET6_CONNECT = 0xb
+ BPF_CGROUP_INET4_POST_BIND = 0xc
+ BPF_CGROUP_INET6_POST_BIND = 0xd
+ BPF_CGROUP_UDP4_SENDMSG = 0xe
+ BPF_CGROUP_UDP6_SENDMSG = 0xf
+ BPF_LIRC_MODE2 = 0x10
+ BPF_FLOW_DISSECTOR = 0x11
+ BPF_STACK_BUILD_ID_EMPTY = 0x0
+ BPF_STACK_BUILD_ID_VALID = 0x1
+ BPF_STACK_BUILD_ID_IP = 0x2
+ BPF_ADJ_ROOM_NET = 0x0
+ BPF_HDR_START_MAC = 0x0
+ BPF_HDR_START_NET = 0x1
+ BPF_LWT_ENCAP_SEG6 = 0x0
+ BPF_LWT_ENCAP_SEG6_INLINE = 0x1
+ BPF_OK = 0x0
+ BPF_DROP = 0x2
+ BPF_REDIRECT = 0x7
+ BPF_SOCK_OPS_VOID = 0x0
+ BPF_SOCK_OPS_TIMEOUT_INIT = 0x1
+ BPF_SOCK_OPS_RWND_INIT = 0x2
+ BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3
+ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4
+ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5
+ BPF_SOCK_OPS_NEEDS_ECN = 0x6
+ BPF_SOCK_OPS_BASE_RTT = 0x7
+ BPF_SOCK_OPS_RTO_CB = 0x8
+ BPF_SOCK_OPS_RETRANS_CB = 0x9
+ BPF_SOCK_OPS_STATE_CB = 0xa
+ BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb
+ BPF_TCP_ESTABLISHED = 0x1
+ BPF_TCP_SYN_SENT = 0x2
+ BPF_TCP_SYN_RECV = 0x3
+ BPF_TCP_FIN_WAIT1 = 0x4
+ BPF_TCP_FIN_WAIT2 = 0x5
+ BPF_TCP_TIME_WAIT = 0x6
+ BPF_TCP_CLOSE = 0x7
+ BPF_TCP_CLOSE_WAIT = 0x8
+ BPF_TCP_LAST_ACK = 0x9
+ BPF_TCP_LISTEN = 0xa
+ BPF_TCP_CLOSING = 0xb
+ BPF_TCP_NEW_SYN_RECV = 0xc
+ BPF_TCP_MAX_STATES = 0xd
+ BPF_FIB_LKUP_RET_SUCCESS = 0x0
+ BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
+ BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
+ BPF_FIB_LKUP_RET_PROHIBIT = 0x3
+ BPF_FIB_LKUP_RET_NOT_FWDED = 0x4
+ BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5
+ BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
+ BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
+ BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
+ BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
+ BPF_FD_TYPE_TRACEPOINT = 0x1
+ BPF_FD_TYPE_KPROBE = 0x2
+ BPF_FD_TYPE_KRETPROBE = 0x3
+ BPF_FD_TYPE_UPROBE = 0x4
+ BPF_FD_TYPE_URETPROBE = 0x5
+)
+
+type CapUserHeader struct {
+ Version uint32
+ Pid int32
+}
+
+type CapUserData struct {
+ Effective uint32
+ Permitted uint32
+ Inheritable uint32
+}
+
+const (
+ LINUX_CAPABILITY_VERSION_1 = 0x19980330
+ LINUX_CAPABILITY_VERSION_2 = 0x20071026
+ LINUX_CAPABILITY_VERSION_3 = 0x20080522
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
index 41cb14863..58d09f75e 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
@@ -408,6 +408,11 @@ type TCPInfo struct {
Total_retrans uint32
}
+type CanFilter struct {
+ Id uint32
+ Mask uint32
+}
+
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
@@ -437,141 +442,185 @@ const (
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
SizeofTCPInfo = 0x68
+ SizeofCanFilter = 0x8
)
const (
- IFA_UNSPEC = 0x0
- IFA_ADDRESS = 0x1
- IFA_LOCAL = 0x2
- IFA_LABEL = 0x3
- IFA_BROADCAST = 0x4
- IFA_ANYCAST = 0x5
- IFA_CACHEINFO = 0x6
- IFA_MULTICAST = 0x7
- IFLA_UNSPEC = 0x0
- IFLA_ADDRESS = 0x1
- IFLA_BROADCAST = 0x2
- IFLA_IFNAME = 0x3
- IFLA_INFO_KIND = 0x1
- IFLA_MTU = 0x4
- IFLA_LINK = 0x5
- IFLA_QDISC = 0x6
- IFLA_STATS = 0x7
- IFLA_COST = 0x8
- IFLA_PRIORITY = 0x9
- IFLA_MASTER = 0xa
- IFLA_WIRELESS = 0xb
- IFLA_PROTINFO = 0xc
- IFLA_TXQLEN = 0xd
- IFLA_MAP = 0xe
- IFLA_WEIGHT = 0xf
- IFLA_OPERSTATE = 0x10
- IFLA_LINKMODE = 0x11
- IFLA_LINKINFO = 0x12
- IFLA_NET_NS_PID = 0x13
- IFLA_IFALIAS = 0x14
- IFLA_NUM_VF = 0x15
- IFLA_VFINFO_LIST = 0x16
- IFLA_STATS64 = 0x17
- IFLA_VF_PORTS = 0x18
- IFLA_PORT_SELF = 0x19
- IFLA_AF_SPEC = 0x1a
- IFLA_GROUP = 0x1b
- IFLA_NET_NS_FD = 0x1c
- IFLA_EXT_MASK = 0x1d
- IFLA_PROMISCUITY = 0x1e
- IFLA_NUM_TX_QUEUES = 0x1f
- IFLA_NUM_RX_QUEUES = 0x20
- IFLA_CARRIER = 0x21
- IFLA_PHYS_PORT_ID = 0x22
- IFLA_CARRIER_CHANGES = 0x23
- IFLA_PHYS_SWITCH_ID = 0x24
- IFLA_LINK_NETNSID = 0x25
- IFLA_PHYS_PORT_NAME = 0x26
- IFLA_PROTO_DOWN = 0x27
- IFLA_GSO_MAX_SEGS = 0x28
- IFLA_GSO_MAX_SIZE = 0x29
- IFLA_PAD = 0x2a
- IFLA_XDP = 0x2b
- IFLA_EVENT = 0x2c
- IFLA_NEW_NETNSID = 0x2d
- IFLA_IF_NETNSID = 0x2e
- IFLA_MAX = 0x33
- RT_SCOPE_UNIVERSE = 0x0
- RT_SCOPE_SITE = 0xc8
- RT_SCOPE_LINK = 0xfd
- RT_SCOPE_HOST = 0xfe
- RT_SCOPE_NOWHERE = 0xff
- RT_TABLE_UNSPEC = 0x0
- RT_TABLE_COMPAT = 0xfc
- RT_TABLE_DEFAULT = 0xfd
- RT_TABLE_MAIN = 0xfe
- RT_TABLE_LOCAL = 0xff
- RT_TABLE_MAX = 0xffffffff
- RTA_UNSPEC = 0x0
- RTA_DST = 0x1
- RTA_SRC = 0x2
- RTA_IIF = 0x3
- RTA_OIF = 0x4
- RTA_GATEWAY = 0x5
- RTA_PRIORITY = 0x6
- RTA_PREFSRC = 0x7
- RTA_METRICS = 0x8
- RTA_MULTIPATH = 0x9
- RTA_FLOW = 0xb
- RTA_CACHEINFO = 0xc
- RTA_TABLE = 0xf
- RTA_MARK = 0x10
- RTA_MFC_STATS = 0x11
- RTA_VIA = 0x12
- RTA_NEWDST = 0x13
- RTA_PREF = 0x14
- RTA_ENCAP_TYPE = 0x15
- RTA_ENCAP = 0x16
- RTA_EXPIRES = 0x17
- RTA_PAD = 0x18
- RTA_UID = 0x19
- RTA_TTL_PROPAGATE = 0x1a
- RTA_IP_PROTO = 0x1b
- RTA_SPORT = 0x1c
- RTA_DPORT = 0x1d
- RTN_UNSPEC = 0x0
- RTN_UNICAST = 0x1
- RTN_LOCAL = 0x2
- RTN_BROADCAST = 0x3
- RTN_ANYCAST = 0x4
- RTN_MULTICAST = 0x5
- RTN_BLACKHOLE = 0x6
- RTN_UNREACHABLE = 0x7
- RTN_PROHIBIT = 0x8
- RTN_THROW = 0x9
- RTN_NAT = 0xa
- RTN_XRESOLVE = 0xb
- RTNLGRP_NONE = 0x0
- RTNLGRP_LINK = 0x1
- RTNLGRP_NOTIFY = 0x2
- RTNLGRP_NEIGH = 0x3
- RTNLGRP_TC = 0x4
- RTNLGRP_IPV4_IFADDR = 0x5
- RTNLGRP_IPV4_MROUTE = 0x6
- RTNLGRP_IPV4_ROUTE = 0x7
- RTNLGRP_IPV4_RULE = 0x8
- RTNLGRP_IPV6_IFADDR = 0x9
- RTNLGRP_IPV6_MROUTE = 0xa
- RTNLGRP_IPV6_ROUTE = 0xb
- RTNLGRP_IPV6_IFINFO = 0xc
- RTNLGRP_IPV6_PREFIX = 0x12
- RTNLGRP_IPV6_RULE = 0x13
- RTNLGRP_ND_USEROPT = 0x14
- SizeofNlMsghdr = 0x10
- SizeofNlMsgerr = 0x14
- SizeofRtGenmsg = 0x1
- SizeofNlAttr = 0x4
- SizeofRtAttr = 0x4
- SizeofIfInfomsg = 0x10
- SizeofIfAddrmsg = 0x8
- SizeofRtMsg = 0xc
- SizeofRtNexthop = 0x8
+ NDA_UNSPEC = 0x0
+ NDA_DST = 0x1
+ NDA_LLADDR = 0x2
+ NDA_CACHEINFO = 0x3
+ NDA_PROBES = 0x4
+ NDA_VLAN = 0x5
+ NDA_PORT = 0x6
+ NDA_VNI = 0x7
+ NDA_IFINDEX = 0x8
+ NDA_MASTER = 0x9
+ NDA_LINK_NETNSID = 0xa
+ NDA_SRC_VNI = 0xb
+ NTF_USE = 0x1
+ NTF_SELF = 0x2
+ NTF_MASTER = 0x4
+ NTF_PROXY = 0x8
+ NTF_EXT_LEARNED = 0x10
+ NTF_OFFLOADED = 0x20
+ NTF_ROUTER = 0x80
+ NUD_INCOMPLETE = 0x1
+ NUD_REACHABLE = 0x2
+ NUD_STALE = 0x4
+ NUD_DELAY = 0x8
+ NUD_PROBE = 0x10
+ NUD_FAILED = 0x20
+ NUD_NOARP = 0x40
+ NUD_PERMANENT = 0x80
+ NUD_NONE = 0x0
+ IFA_UNSPEC = 0x0
+ IFA_ADDRESS = 0x1
+ IFA_LOCAL = 0x2
+ IFA_LABEL = 0x3
+ IFA_BROADCAST = 0x4
+ IFA_ANYCAST = 0x5
+ IFA_CACHEINFO = 0x6
+ IFA_MULTICAST = 0x7
+ IFA_FLAGS = 0x8
+ IFA_RT_PRIORITY = 0x9
+ IFA_TARGET_NETNSID = 0xa
+ IFLA_UNSPEC = 0x0
+ IFLA_ADDRESS = 0x1
+ IFLA_BROADCAST = 0x2
+ IFLA_IFNAME = 0x3
+ IFLA_MTU = 0x4
+ IFLA_LINK = 0x5
+ IFLA_QDISC = 0x6
+ IFLA_STATS = 0x7
+ IFLA_COST = 0x8
+ IFLA_PRIORITY = 0x9
+ IFLA_MASTER = 0xa
+ IFLA_WIRELESS = 0xb
+ IFLA_PROTINFO = 0xc
+ IFLA_TXQLEN = 0xd
+ IFLA_MAP = 0xe
+ IFLA_WEIGHT = 0xf
+ IFLA_OPERSTATE = 0x10
+ IFLA_LINKMODE = 0x11
+ IFLA_LINKINFO = 0x12
+ IFLA_NET_NS_PID = 0x13
+ IFLA_IFALIAS = 0x14
+ IFLA_NUM_VF = 0x15
+ IFLA_VFINFO_LIST = 0x16
+ IFLA_STATS64 = 0x17
+ IFLA_VF_PORTS = 0x18
+ IFLA_PORT_SELF = 0x19
+ IFLA_AF_SPEC = 0x1a
+ IFLA_GROUP = 0x1b
+ IFLA_NET_NS_FD = 0x1c
+ IFLA_EXT_MASK = 0x1d
+ IFLA_PROMISCUITY = 0x1e
+ IFLA_NUM_TX_QUEUES = 0x1f
+ IFLA_NUM_RX_QUEUES = 0x20
+ IFLA_CARRIER = 0x21
+ IFLA_PHYS_PORT_ID = 0x22
+ IFLA_CARRIER_CHANGES = 0x23
+ IFLA_PHYS_SWITCH_ID = 0x24
+ IFLA_LINK_NETNSID = 0x25
+ IFLA_PHYS_PORT_NAME = 0x26
+ IFLA_PROTO_DOWN = 0x27
+ IFLA_GSO_MAX_SEGS = 0x28
+ IFLA_GSO_MAX_SIZE = 0x29
+ IFLA_PAD = 0x2a
+ IFLA_XDP = 0x2b
+ IFLA_EVENT = 0x2c
+ IFLA_NEW_NETNSID = 0x2d
+ IFLA_IF_NETNSID = 0x2e
+ IFLA_TARGET_NETNSID = 0x2e
+ IFLA_CARRIER_UP_COUNT = 0x2f
+ IFLA_CARRIER_DOWN_COUNT = 0x30
+ IFLA_NEW_IFINDEX = 0x31
+ IFLA_MIN_MTU = 0x32
+ IFLA_MAX_MTU = 0x33
+ IFLA_MAX = 0x33
+ IFLA_INFO_KIND = 0x1
+ IFLA_INFO_DATA = 0x2
+ IFLA_INFO_XSTATS = 0x3
+ IFLA_INFO_SLAVE_KIND = 0x4
+ IFLA_INFO_SLAVE_DATA = 0x5
+ RT_SCOPE_UNIVERSE = 0x0
+ RT_SCOPE_SITE = 0xc8
+ RT_SCOPE_LINK = 0xfd
+ RT_SCOPE_HOST = 0xfe
+ RT_SCOPE_NOWHERE = 0xff
+ RT_TABLE_UNSPEC = 0x0
+ RT_TABLE_COMPAT = 0xfc
+ RT_TABLE_DEFAULT = 0xfd
+ RT_TABLE_MAIN = 0xfe
+ RT_TABLE_LOCAL = 0xff
+ RT_TABLE_MAX = 0xffffffff
+ RTA_UNSPEC = 0x0
+ RTA_DST = 0x1
+ RTA_SRC = 0x2
+ RTA_IIF = 0x3
+ RTA_OIF = 0x4
+ RTA_GATEWAY = 0x5
+ RTA_PRIORITY = 0x6
+ RTA_PREFSRC = 0x7
+ RTA_METRICS = 0x8
+ RTA_MULTIPATH = 0x9
+ RTA_FLOW = 0xb
+ RTA_CACHEINFO = 0xc
+ RTA_TABLE = 0xf
+ RTA_MARK = 0x10
+ RTA_MFC_STATS = 0x11
+ RTA_VIA = 0x12
+ RTA_NEWDST = 0x13
+ RTA_PREF = 0x14
+ RTA_ENCAP_TYPE = 0x15
+ RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
+ RTN_UNSPEC = 0x0
+ RTN_UNICAST = 0x1
+ RTN_LOCAL = 0x2
+ RTN_BROADCAST = 0x3
+ RTN_ANYCAST = 0x4
+ RTN_MULTICAST = 0x5
+ RTN_BLACKHOLE = 0x6
+ RTN_UNREACHABLE = 0x7
+ RTN_PROHIBIT = 0x8
+ RTN_THROW = 0x9
+ RTN_NAT = 0xa
+ RTN_XRESOLVE = 0xb
+ RTNLGRP_NONE = 0x0
+ RTNLGRP_LINK = 0x1
+ RTNLGRP_NOTIFY = 0x2
+ RTNLGRP_NEIGH = 0x3
+ RTNLGRP_TC = 0x4
+ RTNLGRP_IPV4_IFADDR = 0x5
+ RTNLGRP_IPV4_MROUTE = 0x6
+ RTNLGRP_IPV4_ROUTE = 0x7
+ RTNLGRP_IPV4_RULE = 0x8
+ RTNLGRP_IPV6_IFADDR = 0x9
+ RTNLGRP_IPV6_MROUTE = 0xa
+ RTNLGRP_IPV6_ROUTE = 0xb
+ RTNLGRP_IPV6_IFINFO = 0xc
+ RTNLGRP_IPV6_PREFIX = 0x12
+ RTNLGRP_IPV6_RULE = 0x13
+ RTNLGRP_ND_USEROPT = 0x14
+ SizeofNlMsghdr = 0x10
+ SizeofNlMsgerr = 0x14
+ SizeofRtGenmsg = 0x1
+ SizeofNlAttr = 0x4
+ SizeofRtAttr = 0x4
+ SizeofIfInfomsg = 0x10
+ SizeofIfAddrmsg = 0x8
+ SizeofRtMsg = 0xc
+ SizeofRtNexthop = 0x8
+ SizeofNdUseroptmsg = 0x10
+ SizeofNdMsg = 0xc
)
type NlMsghdr struct {
@@ -637,6 +686,27 @@ type RtNexthop struct {
Ifindex int32
}
+type NdUseroptmsg struct {
+ Family uint8
+ Pad1 uint8
+ Opts_len uint16
+ Ifindex int32
+ Icmp_type uint8
+ Icmp_code uint8
+ Pad2 uint16
+ Pad3 uint32
+}
+
+type NdMsg struct {
+ Family uint8
+ Pad1 uint8
+ Pad2 uint16
+ Ifindex int32
+ State uint16
+ Flags uint8
+ Type uint8
+}
+
const (
SizeofSockFilter = 0x8
SizeofSockFprog = 0x8
@@ -753,7 +823,32 @@ type Sigset_t struct {
Val [32]uint32
}
-const RNDGETENTCNT = 0x40045200
+const _C__NSIG = 0x80
+
+type SignalfdSiginfo struct {
+ Signo uint32
+ Errno int32
+ Code int32
+ Pid uint32
+ Uid uint32
+ Fd int32
+ Tid uint32
+ Band uint32
+ Overrun uint32
+ Trapno uint32
+ Status int32
+ Int int32
+ Ptr uint64
+ Utime uint64
+ Stime uint64
+ Addr uint64
+ Addr_lsb uint16
+ _ uint16
+ Syscall int32
+ Call_addr uint64
+ Arch uint32
+ _ [28]uint8
+}
const PERF_IOC_FLAG_GROUP = 0x1
@@ -926,7 +1021,8 @@ type PerfEventAttr struct {
Clockid int32
Sample_regs_intr uint64
Aux_watermark uint32
- _ uint32
+ Sample_max_stack uint16
+ _ uint16
}
type PerfEventMmapPage struct {
@@ -1029,6 +1125,7 @@ const (
PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7
PERF_COUNT_SW_EMULATION_FAULTS = 0x8
PERF_COUNT_SW_DUMMY = 0x9
+ PERF_COUNT_SW_BPF_OUTPUT = 0xa
PERF_SAMPLE_IP = 0x1
PERF_SAMPLE_TID = 0x2
@@ -1050,21 +1147,38 @@ const (
PERF_SAMPLE_BRANCH_ANY_CALL = 0x10
PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20
PERF_SAMPLE_BRANCH_IND_CALL = 0x40
+ PERF_SAMPLE_BRANCH_ABORT_TX = 0x80
+ PERF_SAMPLE_BRANCH_IN_TX = 0x100
+ PERF_SAMPLE_BRANCH_NO_TX = 0x200
+ PERF_SAMPLE_BRANCH_COND = 0x400
+ PERF_SAMPLE_BRANCH_CALL_STACK = 0x800
+ PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000
+ PERF_SAMPLE_BRANCH_CALL = 0x2000
+ PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000
+ PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000
+ PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000
PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1
PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2
PERF_FORMAT_ID = 0x4
PERF_FORMAT_GROUP = 0x8
- PERF_RECORD_MMAP = 0x1
- PERF_RECORD_LOST = 0x2
- PERF_RECORD_COMM = 0x3
- PERF_RECORD_EXIT = 0x4
- PERF_RECORD_THROTTLE = 0x5
- PERF_RECORD_UNTHROTTLE = 0x6
- PERF_RECORD_FORK = 0x7
- PERF_RECORD_READ = 0x8
- PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP = 0x1
+ PERF_RECORD_LOST = 0x2
+ PERF_RECORD_COMM = 0x3
+ PERF_RECORD_EXIT = 0x4
+ PERF_RECORD_THROTTLE = 0x5
+ PERF_RECORD_UNTHROTTLE = 0x6
+ PERF_RECORD_FORK = 0x7
+ PERF_RECORD_READ = 0x8
+ PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP2 = 0xa
+ PERF_RECORD_AUX = 0xb
+ PERF_RECORD_ITRACE_START = 0xc
+ PERF_RECORD_LOST_SAMPLES = 0xd
+ PERF_RECORD_SWITCH = 0xe
+ PERF_RECORD_SWITCH_CPU_WIDE = 0xf
+ PERF_RECORD_NAMESPACES = 0x10
PERF_CONTEXT_HV = -0x20
PERF_CONTEXT_KERNEL = -0x80
@@ -1077,6 +1191,7 @@ const (
PERF_FLAG_FD_NO_GROUP = 0x1
PERF_FLAG_FD_OUTPUT = 0x2
PERF_FLAG_PID_CGROUP = 0x4
+ PERF_FLAG_FD_CLOEXEC = 0x8
)
const (
@@ -1335,6 +1450,21 @@ type TpacketBlockDesc struct {
Hdr [40]byte
}
+type TpacketBDTS struct {
+ Sec uint32
+ Usec uint32
+}
+
+type TpacketHdrV1 struct {
+ Block_status uint32
+ Num_pkts uint32
+ Offset_to_first_pkt uint32
+ Blk_len uint32
+ Seq_num uint64
+ Ts_first_pkt TpacketBDTS
+ Ts_last_pkt TpacketBDTS
+}
+
type TpacketReq struct {
Block_size uint32
Block_nr uint32
@@ -1383,6 +1513,9 @@ const (
SizeofTpacketHdr = 0x18
SizeofTpacket2Hdr = 0x20
SizeofTpacket3Hdr = 0x30
+
+ SizeofTpacketStats = 0x8
+ SizeofTpacketStatsV3 = 0xc
)
const (
@@ -1961,6 +2094,10 @@ const (
NCSI_CHANNEL_ATTR_VLAN_ID = 0xa
)
+type ScmTimestamping struct {
+ Ts [3]Timespec
+}
+
const (
SOF_TIMESTAMPING_TX_HARDWARE = 0x1
SOF_TIMESTAMPING_TX_SOFTWARE = 0x2
@@ -1980,4 +2117,367 @@ const (
SOF_TIMESTAMPING_LAST = 0x4000
SOF_TIMESTAMPING_MASK = 0x7fff
+
+ SCM_TSTAMP_SND = 0x0
+ SCM_TSTAMP_SCHED = 0x1
+ SCM_TSTAMP_ACK = 0x2
+)
+
+type SockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type FanotifyEventMetadata struct {
+ Event_len uint32
+ Vers uint8
+ Reserved uint8
+ Metadata_len uint16
+ Mask uint64
+ Fd int32
+ Pid int32
+}
+
+type FanotifyResponse struct {
+ Fd int32
+ Response uint32
+}
+
+const (
+ CRYPTO_MSG_BASE = 0x10
+ CRYPTO_MSG_NEWALG = 0x10
+ CRYPTO_MSG_DELALG = 0x11
+ CRYPTO_MSG_UPDATEALG = 0x12
+ CRYPTO_MSG_GETALG = 0x13
+ CRYPTO_MSG_DELRNG = 0x14
+ CRYPTO_MSG_GETSTAT = 0x15
+)
+
+const (
+ CRYPTOCFGA_UNSPEC = 0x0
+ CRYPTOCFGA_PRIORITY_VAL = 0x1
+ CRYPTOCFGA_REPORT_LARVAL = 0x2
+ CRYPTOCFGA_REPORT_HASH = 0x3
+ CRYPTOCFGA_REPORT_BLKCIPHER = 0x4
+ CRYPTOCFGA_REPORT_AEAD = 0x5
+ CRYPTOCFGA_REPORT_COMPRESS = 0x6
+ CRYPTOCFGA_REPORT_RNG = 0x7
+ CRYPTOCFGA_REPORT_CIPHER = 0x8
+ CRYPTOCFGA_REPORT_AKCIPHER = 0x9
+ CRYPTOCFGA_REPORT_KPP = 0xa
+ CRYPTOCFGA_REPORT_ACOMP = 0xb
+ CRYPTOCFGA_STAT_LARVAL = 0xc
+ CRYPTOCFGA_STAT_HASH = 0xd
+ CRYPTOCFGA_STAT_BLKCIPHER = 0xe
+ CRYPTOCFGA_STAT_AEAD = 0xf
+ CRYPTOCFGA_STAT_COMPRESS = 0x10
+ CRYPTOCFGA_STAT_RNG = 0x11
+ CRYPTOCFGA_STAT_CIPHER = 0x12
+ CRYPTOCFGA_STAT_AKCIPHER = 0x13
+ CRYPTOCFGA_STAT_KPP = 0x14
+ CRYPTOCFGA_STAT_ACOMP = 0x15
+)
+
+type CryptoUserAlg struct {
+ Name [64]int8
+ Driver_name [64]int8
+ Module_name [64]int8
+ Type uint32
+ Mask uint32
+ Refcnt uint32
+ Flags uint32
+}
+
+type CryptoStatAEAD struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatAKCipher struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Verify_cnt uint64
+ Sign_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCipher struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCompress struct {
+ Type [64]int8
+ Compress_cnt uint64
+ Compress_tlen uint64
+ Decompress_cnt uint64
+ Decompress_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatHash struct {
+ Type [64]int8
+ Hash_cnt uint64
+ Hash_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatKPP struct {
+ Type [64]int8
+ Setsecret_cnt uint64
+ Generate_public_key_cnt uint64
+ Compute_shared_secret_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatRNG struct {
+ Type [64]int8
+ Generate_cnt uint64
+ Generate_tlen uint64
+ Seed_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatLarval struct {
+ Type [64]int8
+}
+
+type CryptoReportLarval struct {
+ Type [64]int8
+}
+
+type CryptoReportHash struct {
+ Type [64]int8
+ Blocksize uint32
+ Digestsize uint32
+}
+
+type CryptoReportCipher struct {
+ Type [64]int8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+}
+
+type CryptoReportBlkCipher struct {
+ Type [64]int8
+ Geniv [64]int8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+ Ivsize uint32
+}
+
+type CryptoReportAEAD struct {
+ Type [64]int8
+ Geniv [64]int8
+ Blocksize uint32
+ Maxauthsize uint32
+ Ivsize uint32
+}
+
+type CryptoReportComp struct {
+ Type [64]int8
+}
+
+type CryptoReportRNG struct {
+ Type [64]int8
+ Seedsize uint32
+}
+
+type CryptoReportAKCipher struct {
+ Type [64]int8
+}
+
+type CryptoReportKPP struct {
+ Type [64]int8
+}
+
+type CryptoReportAcomp struct {
+ Type [64]int8
+}
+
+const (
+ BPF_REG_0 = 0x0
+ BPF_REG_1 = 0x1
+ BPF_REG_2 = 0x2
+ BPF_REG_3 = 0x3
+ BPF_REG_4 = 0x4
+ BPF_REG_5 = 0x5
+ BPF_REG_6 = 0x6
+ BPF_REG_7 = 0x7
+ BPF_REG_8 = 0x8
+ BPF_REG_9 = 0x9
+ BPF_REG_10 = 0xa
+ BPF_MAP_CREATE = 0x0
+ BPF_MAP_LOOKUP_ELEM = 0x1
+ BPF_MAP_UPDATE_ELEM = 0x2
+ BPF_MAP_DELETE_ELEM = 0x3
+ BPF_MAP_GET_NEXT_KEY = 0x4
+ BPF_PROG_LOAD = 0x5
+ BPF_OBJ_PIN = 0x6
+ BPF_OBJ_GET = 0x7
+ BPF_PROG_ATTACH = 0x8
+ BPF_PROG_DETACH = 0x9
+ BPF_PROG_TEST_RUN = 0xa
+ BPF_PROG_GET_NEXT_ID = 0xb
+ BPF_MAP_GET_NEXT_ID = 0xc
+ BPF_PROG_GET_FD_BY_ID = 0xd
+ BPF_MAP_GET_FD_BY_ID = 0xe
+ BPF_OBJ_GET_INFO_BY_FD = 0xf
+ BPF_PROG_QUERY = 0x10
+ BPF_RAW_TRACEPOINT_OPEN = 0x11
+ BPF_BTF_LOAD = 0x12
+ BPF_BTF_GET_FD_BY_ID = 0x13
+ BPF_TASK_FD_QUERY = 0x14
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15
+ BPF_MAP_TYPE_UNSPEC = 0x0
+ BPF_MAP_TYPE_HASH = 0x1
+ BPF_MAP_TYPE_ARRAY = 0x2
+ BPF_MAP_TYPE_PROG_ARRAY = 0x3
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4
+ BPF_MAP_TYPE_PERCPU_HASH = 0x5
+ BPF_MAP_TYPE_PERCPU_ARRAY = 0x6
+ BPF_MAP_TYPE_STACK_TRACE = 0x7
+ BPF_MAP_TYPE_CGROUP_ARRAY = 0x8
+ BPF_MAP_TYPE_LRU_HASH = 0x9
+ BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa
+ BPF_MAP_TYPE_LPM_TRIE = 0xb
+ BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc
+ BPF_MAP_TYPE_HASH_OF_MAPS = 0xd
+ BPF_MAP_TYPE_DEVMAP = 0xe
+ BPF_MAP_TYPE_SOCKMAP = 0xf
+ BPF_MAP_TYPE_CPUMAP = 0x10
+ BPF_MAP_TYPE_XSKMAP = 0x11
+ BPF_MAP_TYPE_SOCKHASH = 0x12
+ BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
+ BPF_MAP_TYPE_QUEUE = 0x16
+ BPF_MAP_TYPE_STACK = 0x17
+ BPF_PROG_TYPE_UNSPEC = 0x0
+ BPF_PROG_TYPE_SOCKET_FILTER = 0x1
+ BPF_PROG_TYPE_KPROBE = 0x2
+ BPF_PROG_TYPE_SCHED_CLS = 0x3
+ BPF_PROG_TYPE_SCHED_ACT = 0x4
+ BPF_PROG_TYPE_TRACEPOINT = 0x5
+ BPF_PROG_TYPE_XDP = 0x6
+ BPF_PROG_TYPE_PERF_EVENT = 0x7
+ BPF_PROG_TYPE_CGROUP_SKB = 0x8
+ BPF_PROG_TYPE_CGROUP_SOCK = 0x9
+ BPF_PROG_TYPE_LWT_IN = 0xa
+ BPF_PROG_TYPE_LWT_OUT = 0xb
+ BPF_PROG_TYPE_LWT_XMIT = 0xc
+ BPF_PROG_TYPE_SOCK_OPS = 0xd
+ BPF_PROG_TYPE_SK_SKB = 0xe
+ BPF_PROG_TYPE_CGROUP_DEVICE = 0xf
+ BPF_PROG_TYPE_SK_MSG = 0x10
+ BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11
+ BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12
+ BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13
+ BPF_PROG_TYPE_LIRC_MODE2 = 0x14
+ BPF_PROG_TYPE_SK_REUSEPORT = 0x15
+ BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16
+ BPF_CGROUP_INET_INGRESS = 0x0
+ BPF_CGROUP_INET_EGRESS = 0x1
+ BPF_CGROUP_INET_SOCK_CREATE = 0x2
+ BPF_CGROUP_SOCK_OPS = 0x3
+ BPF_SK_SKB_STREAM_PARSER = 0x4
+ BPF_SK_SKB_STREAM_VERDICT = 0x5
+ BPF_CGROUP_DEVICE = 0x6
+ BPF_SK_MSG_VERDICT = 0x7
+ BPF_CGROUP_INET4_BIND = 0x8
+ BPF_CGROUP_INET6_BIND = 0x9
+ BPF_CGROUP_INET4_CONNECT = 0xa
+ BPF_CGROUP_INET6_CONNECT = 0xb
+ BPF_CGROUP_INET4_POST_BIND = 0xc
+ BPF_CGROUP_INET6_POST_BIND = 0xd
+ BPF_CGROUP_UDP4_SENDMSG = 0xe
+ BPF_CGROUP_UDP6_SENDMSG = 0xf
+ BPF_LIRC_MODE2 = 0x10
+ BPF_FLOW_DISSECTOR = 0x11
+ BPF_STACK_BUILD_ID_EMPTY = 0x0
+ BPF_STACK_BUILD_ID_VALID = 0x1
+ BPF_STACK_BUILD_ID_IP = 0x2
+ BPF_ADJ_ROOM_NET = 0x0
+ BPF_HDR_START_MAC = 0x0
+ BPF_HDR_START_NET = 0x1
+ BPF_LWT_ENCAP_SEG6 = 0x0
+ BPF_LWT_ENCAP_SEG6_INLINE = 0x1
+ BPF_OK = 0x0
+ BPF_DROP = 0x2
+ BPF_REDIRECT = 0x7
+ BPF_SOCK_OPS_VOID = 0x0
+ BPF_SOCK_OPS_TIMEOUT_INIT = 0x1
+ BPF_SOCK_OPS_RWND_INIT = 0x2
+ BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3
+ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4
+ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5
+ BPF_SOCK_OPS_NEEDS_ECN = 0x6
+ BPF_SOCK_OPS_BASE_RTT = 0x7
+ BPF_SOCK_OPS_RTO_CB = 0x8
+ BPF_SOCK_OPS_RETRANS_CB = 0x9
+ BPF_SOCK_OPS_STATE_CB = 0xa
+ BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb
+ BPF_TCP_ESTABLISHED = 0x1
+ BPF_TCP_SYN_SENT = 0x2
+ BPF_TCP_SYN_RECV = 0x3
+ BPF_TCP_FIN_WAIT1 = 0x4
+ BPF_TCP_FIN_WAIT2 = 0x5
+ BPF_TCP_TIME_WAIT = 0x6
+ BPF_TCP_CLOSE = 0x7
+ BPF_TCP_CLOSE_WAIT = 0x8
+ BPF_TCP_LAST_ACK = 0x9
+ BPF_TCP_LISTEN = 0xa
+ BPF_TCP_CLOSING = 0xb
+ BPF_TCP_NEW_SYN_RECV = 0xc
+ BPF_TCP_MAX_STATES = 0xd
+ BPF_FIB_LKUP_RET_SUCCESS = 0x0
+ BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
+ BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
+ BPF_FIB_LKUP_RET_PROHIBIT = 0x3
+ BPF_FIB_LKUP_RET_NOT_FWDED = 0x4
+ BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5
+ BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
+ BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
+ BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
+ BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
+ BPF_FD_TYPE_TRACEPOINT = 0x1
+ BPF_FD_TYPE_KPROBE = 0x2
+ BPF_FD_TYPE_KRETPROBE = 0x3
+ BPF_FD_TYPE_UPROBE = 0x4
+ BPF_FD_TYPE_URETPROBE = 0x5
+)
+
+type CapUserHeader struct {
+ Version uint32
+ Pid int32
+}
+
+type CapUserData struct {
+ Effective uint32
+ Permitted uint32
+ Inheritable uint32
+}
+
+const (
+ LINUX_CAPABILITY_VERSION_1 = 0x19980330
+ LINUX_CAPABILITY_VERSION_2 = 0x20071026
+ LINUX_CAPABILITY_VERSION_3 = 0x20080522
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index 8e6b5fa68..3f1e62e03 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -408,6 +408,11 @@ type TCPInfo struct {
Total_retrans uint32
}
+type CanFilter struct {
+ Id uint32
+ Mask uint32
+}
+
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
@@ -437,141 +442,185 @@ const (
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
SizeofTCPInfo = 0x68
+ SizeofCanFilter = 0x8
)
const (
- IFA_UNSPEC = 0x0
- IFA_ADDRESS = 0x1
- IFA_LOCAL = 0x2
- IFA_LABEL = 0x3
- IFA_BROADCAST = 0x4
- IFA_ANYCAST = 0x5
- IFA_CACHEINFO = 0x6
- IFA_MULTICAST = 0x7
- IFLA_UNSPEC = 0x0
- IFLA_ADDRESS = 0x1
- IFLA_BROADCAST = 0x2
- IFLA_IFNAME = 0x3
- IFLA_INFO_KIND = 0x1
- IFLA_MTU = 0x4
- IFLA_LINK = 0x5
- IFLA_QDISC = 0x6
- IFLA_STATS = 0x7
- IFLA_COST = 0x8
- IFLA_PRIORITY = 0x9
- IFLA_MASTER = 0xa
- IFLA_WIRELESS = 0xb
- IFLA_PROTINFO = 0xc
- IFLA_TXQLEN = 0xd
- IFLA_MAP = 0xe
- IFLA_WEIGHT = 0xf
- IFLA_OPERSTATE = 0x10
- IFLA_LINKMODE = 0x11
- IFLA_LINKINFO = 0x12
- IFLA_NET_NS_PID = 0x13
- IFLA_IFALIAS = 0x14
- IFLA_NUM_VF = 0x15
- IFLA_VFINFO_LIST = 0x16
- IFLA_STATS64 = 0x17
- IFLA_VF_PORTS = 0x18
- IFLA_PORT_SELF = 0x19
- IFLA_AF_SPEC = 0x1a
- IFLA_GROUP = 0x1b
- IFLA_NET_NS_FD = 0x1c
- IFLA_EXT_MASK = 0x1d
- IFLA_PROMISCUITY = 0x1e
- IFLA_NUM_TX_QUEUES = 0x1f
- IFLA_NUM_RX_QUEUES = 0x20
- IFLA_CARRIER = 0x21
- IFLA_PHYS_PORT_ID = 0x22
- IFLA_CARRIER_CHANGES = 0x23
- IFLA_PHYS_SWITCH_ID = 0x24
- IFLA_LINK_NETNSID = 0x25
- IFLA_PHYS_PORT_NAME = 0x26
- IFLA_PROTO_DOWN = 0x27
- IFLA_GSO_MAX_SEGS = 0x28
- IFLA_GSO_MAX_SIZE = 0x29
- IFLA_PAD = 0x2a
- IFLA_XDP = 0x2b
- IFLA_EVENT = 0x2c
- IFLA_NEW_NETNSID = 0x2d
- IFLA_IF_NETNSID = 0x2e
- IFLA_MAX = 0x33
- RT_SCOPE_UNIVERSE = 0x0
- RT_SCOPE_SITE = 0xc8
- RT_SCOPE_LINK = 0xfd
- RT_SCOPE_HOST = 0xfe
- RT_SCOPE_NOWHERE = 0xff
- RT_TABLE_UNSPEC = 0x0
- RT_TABLE_COMPAT = 0xfc
- RT_TABLE_DEFAULT = 0xfd
- RT_TABLE_MAIN = 0xfe
- RT_TABLE_LOCAL = 0xff
- RT_TABLE_MAX = 0xffffffff
- RTA_UNSPEC = 0x0
- RTA_DST = 0x1
- RTA_SRC = 0x2
- RTA_IIF = 0x3
- RTA_OIF = 0x4
- RTA_GATEWAY = 0x5
- RTA_PRIORITY = 0x6
- RTA_PREFSRC = 0x7
- RTA_METRICS = 0x8
- RTA_MULTIPATH = 0x9
- RTA_FLOW = 0xb
- RTA_CACHEINFO = 0xc
- RTA_TABLE = 0xf
- RTA_MARK = 0x10
- RTA_MFC_STATS = 0x11
- RTA_VIA = 0x12
- RTA_NEWDST = 0x13
- RTA_PREF = 0x14
- RTA_ENCAP_TYPE = 0x15
- RTA_ENCAP = 0x16
- RTA_EXPIRES = 0x17
- RTA_PAD = 0x18
- RTA_UID = 0x19
- RTA_TTL_PROPAGATE = 0x1a
- RTA_IP_PROTO = 0x1b
- RTA_SPORT = 0x1c
- RTA_DPORT = 0x1d
- RTN_UNSPEC = 0x0
- RTN_UNICAST = 0x1
- RTN_LOCAL = 0x2
- RTN_BROADCAST = 0x3
- RTN_ANYCAST = 0x4
- RTN_MULTICAST = 0x5
- RTN_BLACKHOLE = 0x6
- RTN_UNREACHABLE = 0x7
- RTN_PROHIBIT = 0x8
- RTN_THROW = 0x9
- RTN_NAT = 0xa
- RTN_XRESOLVE = 0xb
- RTNLGRP_NONE = 0x0
- RTNLGRP_LINK = 0x1
- RTNLGRP_NOTIFY = 0x2
- RTNLGRP_NEIGH = 0x3
- RTNLGRP_TC = 0x4
- RTNLGRP_IPV4_IFADDR = 0x5
- RTNLGRP_IPV4_MROUTE = 0x6
- RTNLGRP_IPV4_ROUTE = 0x7
- RTNLGRP_IPV4_RULE = 0x8
- RTNLGRP_IPV6_IFADDR = 0x9
- RTNLGRP_IPV6_MROUTE = 0xa
- RTNLGRP_IPV6_ROUTE = 0xb
- RTNLGRP_IPV6_IFINFO = 0xc
- RTNLGRP_IPV6_PREFIX = 0x12
- RTNLGRP_IPV6_RULE = 0x13
- RTNLGRP_ND_USEROPT = 0x14
- SizeofNlMsghdr = 0x10
- SizeofNlMsgerr = 0x14
- SizeofRtGenmsg = 0x1
- SizeofNlAttr = 0x4
- SizeofRtAttr = 0x4
- SizeofIfInfomsg = 0x10
- SizeofIfAddrmsg = 0x8
- SizeofRtMsg = 0xc
- SizeofRtNexthop = 0x8
+ NDA_UNSPEC = 0x0
+ NDA_DST = 0x1
+ NDA_LLADDR = 0x2
+ NDA_CACHEINFO = 0x3
+ NDA_PROBES = 0x4
+ NDA_VLAN = 0x5
+ NDA_PORT = 0x6
+ NDA_VNI = 0x7
+ NDA_IFINDEX = 0x8
+ NDA_MASTER = 0x9
+ NDA_LINK_NETNSID = 0xa
+ NDA_SRC_VNI = 0xb
+ NTF_USE = 0x1
+ NTF_SELF = 0x2
+ NTF_MASTER = 0x4
+ NTF_PROXY = 0x8
+ NTF_EXT_LEARNED = 0x10
+ NTF_OFFLOADED = 0x20
+ NTF_ROUTER = 0x80
+ NUD_INCOMPLETE = 0x1
+ NUD_REACHABLE = 0x2
+ NUD_STALE = 0x4
+ NUD_DELAY = 0x8
+ NUD_PROBE = 0x10
+ NUD_FAILED = 0x20
+ NUD_NOARP = 0x40
+ NUD_PERMANENT = 0x80
+ NUD_NONE = 0x0
+ IFA_UNSPEC = 0x0
+ IFA_ADDRESS = 0x1
+ IFA_LOCAL = 0x2
+ IFA_LABEL = 0x3
+ IFA_BROADCAST = 0x4
+ IFA_ANYCAST = 0x5
+ IFA_CACHEINFO = 0x6
+ IFA_MULTICAST = 0x7
+ IFA_FLAGS = 0x8
+ IFA_RT_PRIORITY = 0x9
+ IFA_TARGET_NETNSID = 0xa
+ IFLA_UNSPEC = 0x0
+ IFLA_ADDRESS = 0x1
+ IFLA_BROADCAST = 0x2
+ IFLA_IFNAME = 0x3
+ IFLA_MTU = 0x4
+ IFLA_LINK = 0x5
+ IFLA_QDISC = 0x6
+ IFLA_STATS = 0x7
+ IFLA_COST = 0x8
+ IFLA_PRIORITY = 0x9
+ IFLA_MASTER = 0xa
+ IFLA_WIRELESS = 0xb
+ IFLA_PROTINFO = 0xc
+ IFLA_TXQLEN = 0xd
+ IFLA_MAP = 0xe
+ IFLA_WEIGHT = 0xf
+ IFLA_OPERSTATE = 0x10
+ IFLA_LINKMODE = 0x11
+ IFLA_LINKINFO = 0x12
+ IFLA_NET_NS_PID = 0x13
+ IFLA_IFALIAS = 0x14
+ IFLA_NUM_VF = 0x15
+ IFLA_VFINFO_LIST = 0x16
+ IFLA_STATS64 = 0x17
+ IFLA_VF_PORTS = 0x18
+ IFLA_PORT_SELF = 0x19
+ IFLA_AF_SPEC = 0x1a
+ IFLA_GROUP = 0x1b
+ IFLA_NET_NS_FD = 0x1c
+ IFLA_EXT_MASK = 0x1d
+ IFLA_PROMISCUITY = 0x1e
+ IFLA_NUM_TX_QUEUES = 0x1f
+ IFLA_NUM_RX_QUEUES = 0x20
+ IFLA_CARRIER = 0x21
+ IFLA_PHYS_PORT_ID = 0x22
+ IFLA_CARRIER_CHANGES = 0x23
+ IFLA_PHYS_SWITCH_ID = 0x24
+ IFLA_LINK_NETNSID = 0x25
+ IFLA_PHYS_PORT_NAME = 0x26
+ IFLA_PROTO_DOWN = 0x27
+ IFLA_GSO_MAX_SEGS = 0x28
+ IFLA_GSO_MAX_SIZE = 0x29
+ IFLA_PAD = 0x2a
+ IFLA_XDP = 0x2b
+ IFLA_EVENT = 0x2c
+ IFLA_NEW_NETNSID = 0x2d
+ IFLA_IF_NETNSID = 0x2e
+ IFLA_TARGET_NETNSID = 0x2e
+ IFLA_CARRIER_UP_COUNT = 0x2f
+ IFLA_CARRIER_DOWN_COUNT = 0x30
+ IFLA_NEW_IFINDEX = 0x31
+ IFLA_MIN_MTU = 0x32
+ IFLA_MAX_MTU = 0x33
+ IFLA_MAX = 0x33
+ IFLA_INFO_KIND = 0x1
+ IFLA_INFO_DATA = 0x2
+ IFLA_INFO_XSTATS = 0x3
+ IFLA_INFO_SLAVE_KIND = 0x4
+ IFLA_INFO_SLAVE_DATA = 0x5
+ RT_SCOPE_UNIVERSE = 0x0
+ RT_SCOPE_SITE = 0xc8
+ RT_SCOPE_LINK = 0xfd
+ RT_SCOPE_HOST = 0xfe
+ RT_SCOPE_NOWHERE = 0xff
+ RT_TABLE_UNSPEC = 0x0
+ RT_TABLE_COMPAT = 0xfc
+ RT_TABLE_DEFAULT = 0xfd
+ RT_TABLE_MAIN = 0xfe
+ RT_TABLE_LOCAL = 0xff
+ RT_TABLE_MAX = 0xffffffff
+ RTA_UNSPEC = 0x0
+ RTA_DST = 0x1
+ RTA_SRC = 0x2
+ RTA_IIF = 0x3
+ RTA_OIF = 0x4
+ RTA_GATEWAY = 0x5
+ RTA_PRIORITY = 0x6
+ RTA_PREFSRC = 0x7
+ RTA_METRICS = 0x8
+ RTA_MULTIPATH = 0x9
+ RTA_FLOW = 0xb
+ RTA_CACHEINFO = 0xc
+ RTA_TABLE = 0xf
+ RTA_MARK = 0x10
+ RTA_MFC_STATS = 0x11
+ RTA_VIA = 0x12
+ RTA_NEWDST = 0x13
+ RTA_PREF = 0x14
+ RTA_ENCAP_TYPE = 0x15
+ RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
+ RTN_UNSPEC = 0x0
+ RTN_UNICAST = 0x1
+ RTN_LOCAL = 0x2
+ RTN_BROADCAST = 0x3
+ RTN_ANYCAST = 0x4
+ RTN_MULTICAST = 0x5
+ RTN_BLACKHOLE = 0x6
+ RTN_UNREACHABLE = 0x7
+ RTN_PROHIBIT = 0x8
+ RTN_THROW = 0x9
+ RTN_NAT = 0xa
+ RTN_XRESOLVE = 0xb
+ RTNLGRP_NONE = 0x0
+ RTNLGRP_LINK = 0x1
+ RTNLGRP_NOTIFY = 0x2
+ RTNLGRP_NEIGH = 0x3
+ RTNLGRP_TC = 0x4
+ RTNLGRP_IPV4_IFADDR = 0x5
+ RTNLGRP_IPV4_MROUTE = 0x6
+ RTNLGRP_IPV4_ROUTE = 0x7
+ RTNLGRP_IPV4_RULE = 0x8
+ RTNLGRP_IPV6_IFADDR = 0x9
+ RTNLGRP_IPV6_MROUTE = 0xa
+ RTNLGRP_IPV6_ROUTE = 0xb
+ RTNLGRP_IPV6_IFINFO = 0xc
+ RTNLGRP_IPV6_PREFIX = 0x12
+ RTNLGRP_IPV6_RULE = 0x13
+ RTNLGRP_ND_USEROPT = 0x14
+ SizeofNlMsghdr = 0x10
+ SizeofNlMsgerr = 0x14
+ SizeofRtGenmsg = 0x1
+ SizeofNlAttr = 0x4
+ SizeofRtAttr = 0x4
+ SizeofIfInfomsg = 0x10
+ SizeofIfAddrmsg = 0x8
+ SizeofRtMsg = 0xc
+ SizeofRtNexthop = 0x8
+ SizeofNdUseroptmsg = 0x10
+ SizeofNdMsg = 0xc
)
type NlMsghdr struct {
@@ -637,6 +686,27 @@ type RtNexthop struct {
Ifindex int32
}
+type NdUseroptmsg struct {
+ Family uint8
+ Pad1 uint8
+ Opts_len uint16
+ Ifindex int32
+ Icmp_type uint8
+ Icmp_code uint8
+ Pad2 uint16
+ Pad3 uint32
+}
+
+type NdMsg struct {
+ Family uint8
+ Pad1 uint8
+ Pad2 uint16
+ Ifindex int32
+ State uint16
+ Flags uint8
+ Type uint8
+}
+
const (
SizeofSockFilter = 0x8
SizeofSockFprog = 0x10
@@ -761,7 +831,32 @@ type Sigset_t struct {
Val [16]uint64
}
-const RNDGETENTCNT = 0x40045200
+const _C__NSIG = 0x41
+
+type SignalfdSiginfo struct {
+ Signo uint32
+ Errno int32
+ Code int32
+ Pid uint32
+ Uid uint32
+ Fd int32
+ Tid uint32
+ Band uint32
+ Overrun uint32
+ Trapno uint32
+ Status int32
+ Int int32
+ Ptr uint64
+ Utime uint64
+ Stime uint64
+ Addr uint64
+ Addr_lsb uint16
+ _ uint16
+ Syscall int32
+ Call_addr uint64
+ Arch uint32
+ _ [28]uint8
+}
const PERF_IOC_FLAG_GROUP = 0x1
@@ -932,7 +1027,8 @@ type PerfEventAttr struct {
Clockid int32
Sample_regs_intr uint64
Aux_watermark uint32
- _ uint32
+ Sample_max_stack uint16
+ _ uint16
}
type PerfEventMmapPage struct {
@@ -1035,6 +1131,7 @@ const (
PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7
PERF_COUNT_SW_EMULATION_FAULTS = 0x8
PERF_COUNT_SW_DUMMY = 0x9
+ PERF_COUNT_SW_BPF_OUTPUT = 0xa
PERF_SAMPLE_IP = 0x1
PERF_SAMPLE_TID = 0x2
@@ -1056,21 +1153,38 @@ const (
PERF_SAMPLE_BRANCH_ANY_CALL = 0x10
PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20
PERF_SAMPLE_BRANCH_IND_CALL = 0x40
+ PERF_SAMPLE_BRANCH_ABORT_TX = 0x80
+ PERF_SAMPLE_BRANCH_IN_TX = 0x100
+ PERF_SAMPLE_BRANCH_NO_TX = 0x200
+ PERF_SAMPLE_BRANCH_COND = 0x400
+ PERF_SAMPLE_BRANCH_CALL_STACK = 0x800
+ PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000
+ PERF_SAMPLE_BRANCH_CALL = 0x2000
+ PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000
+ PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000
+ PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000
PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1
PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2
PERF_FORMAT_ID = 0x4
PERF_FORMAT_GROUP = 0x8
- PERF_RECORD_MMAP = 0x1
- PERF_RECORD_LOST = 0x2
- PERF_RECORD_COMM = 0x3
- PERF_RECORD_EXIT = 0x4
- PERF_RECORD_THROTTLE = 0x5
- PERF_RECORD_UNTHROTTLE = 0x6
- PERF_RECORD_FORK = 0x7
- PERF_RECORD_READ = 0x8
- PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP = 0x1
+ PERF_RECORD_LOST = 0x2
+ PERF_RECORD_COMM = 0x3
+ PERF_RECORD_EXIT = 0x4
+ PERF_RECORD_THROTTLE = 0x5
+ PERF_RECORD_UNTHROTTLE = 0x6
+ PERF_RECORD_FORK = 0x7
+ PERF_RECORD_READ = 0x8
+ PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP2 = 0xa
+ PERF_RECORD_AUX = 0xb
+ PERF_RECORD_ITRACE_START = 0xc
+ PERF_RECORD_LOST_SAMPLES = 0xd
+ PERF_RECORD_SWITCH = 0xe
+ PERF_RECORD_SWITCH_CPU_WIDE = 0xf
+ PERF_RECORD_NAMESPACES = 0x10
PERF_CONTEXT_HV = -0x20
PERF_CONTEXT_KERNEL = -0x80
@@ -1083,6 +1197,7 @@ const (
PERF_FLAG_FD_NO_GROUP = 0x1
PERF_FLAG_FD_OUTPUT = 0x2
PERF_FLAG_PID_CGROUP = 0x4
+ PERF_FLAG_FD_CLOEXEC = 0x8
)
const (
@@ -1340,6 +1455,21 @@ type TpacketBlockDesc struct {
Hdr [40]byte
}
+type TpacketBDTS struct {
+ Sec uint32
+ Usec uint32
+}
+
+type TpacketHdrV1 struct {
+ Block_status uint32
+ Num_pkts uint32
+ Offset_to_first_pkt uint32
+ Blk_len uint32
+ Seq_num uint64
+ Ts_first_pkt TpacketBDTS
+ Ts_last_pkt TpacketBDTS
+}
+
type TpacketReq struct {
Block_size uint32
Block_nr uint32
@@ -1388,6 +1518,9 @@ const (
SizeofTpacketHdr = 0x20
SizeofTpacket2Hdr = 0x20
SizeofTpacket3Hdr = 0x30
+
+ SizeofTpacketStats = 0x8
+ SizeofTpacketStatsV3 = 0xc
)
const (
@@ -1966,6 +2099,10 @@ const (
NCSI_CHANNEL_ATTR_VLAN_ID = 0xa
)
+type ScmTimestamping struct {
+ Ts [3]Timespec
+}
+
const (
SOF_TIMESTAMPING_TX_HARDWARE = 0x1
SOF_TIMESTAMPING_TX_SOFTWARE = 0x2
@@ -1985,4 +2122,367 @@ const (
SOF_TIMESTAMPING_LAST = 0x4000
SOF_TIMESTAMPING_MASK = 0x7fff
+
+ SCM_TSTAMP_SND = 0x0
+ SCM_TSTAMP_SCHED = 0x1
+ SCM_TSTAMP_ACK = 0x2
+)
+
+type SockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type FanotifyEventMetadata struct {
+ Event_len uint32
+ Vers uint8
+ Reserved uint8
+ Metadata_len uint16
+ Mask uint64
+ Fd int32
+ Pid int32
+}
+
+type FanotifyResponse struct {
+ Fd int32
+ Response uint32
+}
+
+const (
+ CRYPTO_MSG_BASE = 0x10
+ CRYPTO_MSG_NEWALG = 0x10
+ CRYPTO_MSG_DELALG = 0x11
+ CRYPTO_MSG_UPDATEALG = 0x12
+ CRYPTO_MSG_GETALG = 0x13
+ CRYPTO_MSG_DELRNG = 0x14
+ CRYPTO_MSG_GETSTAT = 0x15
+)
+
+const (
+ CRYPTOCFGA_UNSPEC = 0x0
+ CRYPTOCFGA_PRIORITY_VAL = 0x1
+ CRYPTOCFGA_REPORT_LARVAL = 0x2
+ CRYPTOCFGA_REPORT_HASH = 0x3
+ CRYPTOCFGA_REPORT_BLKCIPHER = 0x4
+ CRYPTOCFGA_REPORT_AEAD = 0x5
+ CRYPTOCFGA_REPORT_COMPRESS = 0x6
+ CRYPTOCFGA_REPORT_RNG = 0x7
+ CRYPTOCFGA_REPORT_CIPHER = 0x8
+ CRYPTOCFGA_REPORT_AKCIPHER = 0x9
+ CRYPTOCFGA_REPORT_KPP = 0xa
+ CRYPTOCFGA_REPORT_ACOMP = 0xb
+ CRYPTOCFGA_STAT_LARVAL = 0xc
+ CRYPTOCFGA_STAT_HASH = 0xd
+ CRYPTOCFGA_STAT_BLKCIPHER = 0xe
+ CRYPTOCFGA_STAT_AEAD = 0xf
+ CRYPTOCFGA_STAT_COMPRESS = 0x10
+ CRYPTOCFGA_STAT_RNG = 0x11
+ CRYPTOCFGA_STAT_CIPHER = 0x12
+ CRYPTOCFGA_STAT_AKCIPHER = 0x13
+ CRYPTOCFGA_STAT_KPP = 0x14
+ CRYPTOCFGA_STAT_ACOMP = 0x15
+)
+
+type CryptoUserAlg struct {
+ Name [64]uint8
+ Driver_name [64]uint8
+ Module_name [64]uint8
+ Type uint32
+ Mask uint32
+ Refcnt uint32
+ Flags uint32
+}
+
+type CryptoStatAEAD struct {
+ Type [64]uint8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatAKCipher struct {
+ Type [64]uint8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Verify_cnt uint64
+ Sign_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCipher struct {
+ Type [64]uint8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCompress struct {
+ Type [64]uint8
+ Compress_cnt uint64
+ Compress_tlen uint64
+ Decompress_cnt uint64
+ Decompress_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatHash struct {
+ Type [64]uint8
+ Hash_cnt uint64
+ Hash_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatKPP struct {
+ Type [64]uint8
+ Setsecret_cnt uint64
+ Generate_public_key_cnt uint64
+ Compute_shared_secret_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatRNG struct {
+ Type [64]uint8
+ Generate_cnt uint64
+ Generate_tlen uint64
+ Seed_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatLarval struct {
+ Type [64]uint8
+}
+
+type CryptoReportLarval struct {
+ Type [64]uint8
+}
+
+type CryptoReportHash struct {
+ Type [64]uint8
+ Blocksize uint32
+ Digestsize uint32
+}
+
+type CryptoReportCipher struct {
+ Type [64]uint8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+}
+
+type CryptoReportBlkCipher struct {
+ Type [64]uint8
+ Geniv [64]uint8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+ Ivsize uint32
+}
+
+type CryptoReportAEAD struct {
+ Type [64]uint8
+ Geniv [64]uint8
+ Blocksize uint32
+ Maxauthsize uint32
+ Ivsize uint32
+}
+
+type CryptoReportComp struct {
+ Type [64]uint8
+}
+
+type CryptoReportRNG struct {
+ Type [64]uint8
+ Seedsize uint32
+}
+
+type CryptoReportAKCipher struct {
+ Type [64]uint8
+}
+
+type CryptoReportKPP struct {
+ Type [64]uint8
+}
+
+type CryptoReportAcomp struct {
+ Type [64]uint8
+}
+
+const (
+ BPF_REG_0 = 0x0
+ BPF_REG_1 = 0x1
+ BPF_REG_2 = 0x2
+ BPF_REG_3 = 0x3
+ BPF_REG_4 = 0x4
+ BPF_REG_5 = 0x5
+ BPF_REG_6 = 0x6
+ BPF_REG_7 = 0x7
+ BPF_REG_8 = 0x8
+ BPF_REG_9 = 0x9
+ BPF_REG_10 = 0xa
+ BPF_MAP_CREATE = 0x0
+ BPF_MAP_LOOKUP_ELEM = 0x1
+ BPF_MAP_UPDATE_ELEM = 0x2
+ BPF_MAP_DELETE_ELEM = 0x3
+ BPF_MAP_GET_NEXT_KEY = 0x4
+ BPF_PROG_LOAD = 0x5
+ BPF_OBJ_PIN = 0x6
+ BPF_OBJ_GET = 0x7
+ BPF_PROG_ATTACH = 0x8
+ BPF_PROG_DETACH = 0x9
+ BPF_PROG_TEST_RUN = 0xa
+ BPF_PROG_GET_NEXT_ID = 0xb
+ BPF_MAP_GET_NEXT_ID = 0xc
+ BPF_PROG_GET_FD_BY_ID = 0xd
+ BPF_MAP_GET_FD_BY_ID = 0xe
+ BPF_OBJ_GET_INFO_BY_FD = 0xf
+ BPF_PROG_QUERY = 0x10
+ BPF_RAW_TRACEPOINT_OPEN = 0x11
+ BPF_BTF_LOAD = 0x12
+ BPF_BTF_GET_FD_BY_ID = 0x13
+ BPF_TASK_FD_QUERY = 0x14
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15
+ BPF_MAP_TYPE_UNSPEC = 0x0
+ BPF_MAP_TYPE_HASH = 0x1
+ BPF_MAP_TYPE_ARRAY = 0x2
+ BPF_MAP_TYPE_PROG_ARRAY = 0x3
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4
+ BPF_MAP_TYPE_PERCPU_HASH = 0x5
+ BPF_MAP_TYPE_PERCPU_ARRAY = 0x6
+ BPF_MAP_TYPE_STACK_TRACE = 0x7
+ BPF_MAP_TYPE_CGROUP_ARRAY = 0x8
+ BPF_MAP_TYPE_LRU_HASH = 0x9
+ BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa
+ BPF_MAP_TYPE_LPM_TRIE = 0xb
+ BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc
+ BPF_MAP_TYPE_HASH_OF_MAPS = 0xd
+ BPF_MAP_TYPE_DEVMAP = 0xe
+ BPF_MAP_TYPE_SOCKMAP = 0xf
+ BPF_MAP_TYPE_CPUMAP = 0x10
+ BPF_MAP_TYPE_XSKMAP = 0x11
+ BPF_MAP_TYPE_SOCKHASH = 0x12
+ BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
+ BPF_MAP_TYPE_QUEUE = 0x16
+ BPF_MAP_TYPE_STACK = 0x17
+ BPF_PROG_TYPE_UNSPEC = 0x0
+ BPF_PROG_TYPE_SOCKET_FILTER = 0x1
+ BPF_PROG_TYPE_KPROBE = 0x2
+ BPF_PROG_TYPE_SCHED_CLS = 0x3
+ BPF_PROG_TYPE_SCHED_ACT = 0x4
+ BPF_PROG_TYPE_TRACEPOINT = 0x5
+ BPF_PROG_TYPE_XDP = 0x6
+ BPF_PROG_TYPE_PERF_EVENT = 0x7
+ BPF_PROG_TYPE_CGROUP_SKB = 0x8
+ BPF_PROG_TYPE_CGROUP_SOCK = 0x9
+ BPF_PROG_TYPE_LWT_IN = 0xa
+ BPF_PROG_TYPE_LWT_OUT = 0xb
+ BPF_PROG_TYPE_LWT_XMIT = 0xc
+ BPF_PROG_TYPE_SOCK_OPS = 0xd
+ BPF_PROG_TYPE_SK_SKB = 0xe
+ BPF_PROG_TYPE_CGROUP_DEVICE = 0xf
+ BPF_PROG_TYPE_SK_MSG = 0x10
+ BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11
+ BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12
+ BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13
+ BPF_PROG_TYPE_LIRC_MODE2 = 0x14
+ BPF_PROG_TYPE_SK_REUSEPORT = 0x15
+ BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16
+ BPF_CGROUP_INET_INGRESS = 0x0
+ BPF_CGROUP_INET_EGRESS = 0x1
+ BPF_CGROUP_INET_SOCK_CREATE = 0x2
+ BPF_CGROUP_SOCK_OPS = 0x3
+ BPF_SK_SKB_STREAM_PARSER = 0x4
+ BPF_SK_SKB_STREAM_VERDICT = 0x5
+ BPF_CGROUP_DEVICE = 0x6
+ BPF_SK_MSG_VERDICT = 0x7
+ BPF_CGROUP_INET4_BIND = 0x8
+ BPF_CGROUP_INET6_BIND = 0x9
+ BPF_CGROUP_INET4_CONNECT = 0xa
+ BPF_CGROUP_INET6_CONNECT = 0xb
+ BPF_CGROUP_INET4_POST_BIND = 0xc
+ BPF_CGROUP_INET6_POST_BIND = 0xd
+ BPF_CGROUP_UDP4_SENDMSG = 0xe
+ BPF_CGROUP_UDP6_SENDMSG = 0xf
+ BPF_LIRC_MODE2 = 0x10
+ BPF_FLOW_DISSECTOR = 0x11
+ BPF_STACK_BUILD_ID_EMPTY = 0x0
+ BPF_STACK_BUILD_ID_VALID = 0x1
+ BPF_STACK_BUILD_ID_IP = 0x2
+ BPF_ADJ_ROOM_NET = 0x0
+ BPF_HDR_START_MAC = 0x0
+ BPF_HDR_START_NET = 0x1
+ BPF_LWT_ENCAP_SEG6 = 0x0
+ BPF_LWT_ENCAP_SEG6_INLINE = 0x1
+ BPF_OK = 0x0
+ BPF_DROP = 0x2
+ BPF_REDIRECT = 0x7
+ BPF_SOCK_OPS_VOID = 0x0
+ BPF_SOCK_OPS_TIMEOUT_INIT = 0x1
+ BPF_SOCK_OPS_RWND_INIT = 0x2
+ BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3
+ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4
+ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5
+ BPF_SOCK_OPS_NEEDS_ECN = 0x6
+ BPF_SOCK_OPS_BASE_RTT = 0x7
+ BPF_SOCK_OPS_RTO_CB = 0x8
+ BPF_SOCK_OPS_RETRANS_CB = 0x9
+ BPF_SOCK_OPS_STATE_CB = 0xa
+ BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb
+ BPF_TCP_ESTABLISHED = 0x1
+ BPF_TCP_SYN_SENT = 0x2
+ BPF_TCP_SYN_RECV = 0x3
+ BPF_TCP_FIN_WAIT1 = 0x4
+ BPF_TCP_FIN_WAIT2 = 0x5
+ BPF_TCP_TIME_WAIT = 0x6
+ BPF_TCP_CLOSE = 0x7
+ BPF_TCP_CLOSE_WAIT = 0x8
+ BPF_TCP_LAST_ACK = 0x9
+ BPF_TCP_LISTEN = 0xa
+ BPF_TCP_CLOSING = 0xb
+ BPF_TCP_NEW_SYN_RECV = 0xc
+ BPF_TCP_MAX_STATES = 0xd
+ BPF_FIB_LKUP_RET_SUCCESS = 0x0
+ BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
+ BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
+ BPF_FIB_LKUP_RET_PROHIBIT = 0x3
+ BPF_FIB_LKUP_RET_NOT_FWDED = 0x4
+ BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5
+ BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
+ BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
+ BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
+ BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
+ BPF_FD_TYPE_TRACEPOINT = 0x1
+ BPF_FD_TYPE_KPROBE = 0x2
+ BPF_FD_TYPE_KRETPROBE = 0x3
+ BPF_FD_TYPE_UPROBE = 0x4
+ BPF_FD_TYPE_URETPROBE = 0x5
+)
+
+type CapUserHeader struct {
+ Version uint32
+ Pid int32
+}
+
+type CapUserData struct {
+ Effective uint32
+ Permitted uint32
+ Inheritable uint32
+}
+
+const (
+ LINUX_CAPABILITY_VERSION_1 = 0x19980330
+ LINUX_CAPABILITY_VERSION_2 = 0x20071026
+ LINUX_CAPABILITY_VERSION_3 = 0x20080522
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index 019d2d6a9..e67be11eb 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -408,6 +408,11 @@ type TCPInfo struct {
Total_retrans uint32
}
+type CanFilter struct {
+ Id uint32
+ Mask uint32
+}
+
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
@@ -437,141 +442,185 @@ const (
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
SizeofTCPInfo = 0x68
+ SizeofCanFilter = 0x8
)
const (
- IFA_UNSPEC = 0x0
- IFA_ADDRESS = 0x1
- IFA_LOCAL = 0x2
- IFA_LABEL = 0x3
- IFA_BROADCAST = 0x4
- IFA_ANYCAST = 0x5
- IFA_CACHEINFO = 0x6
- IFA_MULTICAST = 0x7
- IFLA_UNSPEC = 0x0
- IFLA_ADDRESS = 0x1
- IFLA_BROADCAST = 0x2
- IFLA_IFNAME = 0x3
- IFLA_INFO_KIND = 0x1
- IFLA_MTU = 0x4
- IFLA_LINK = 0x5
- IFLA_QDISC = 0x6
- IFLA_STATS = 0x7
- IFLA_COST = 0x8
- IFLA_PRIORITY = 0x9
- IFLA_MASTER = 0xa
- IFLA_WIRELESS = 0xb
- IFLA_PROTINFO = 0xc
- IFLA_TXQLEN = 0xd
- IFLA_MAP = 0xe
- IFLA_WEIGHT = 0xf
- IFLA_OPERSTATE = 0x10
- IFLA_LINKMODE = 0x11
- IFLA_LINKINFO = 0x12
- IFLA_NET_NS_PID = 0x13
- IFLA_IFALIAS = 0x14
- IFLA_NUM_VF = 0x15
- IFLA_VFINFO_LIST = 0x16
- IFLA_STATS64 = 0x17
- IFLA_VF_PORTS = 0x18
- IFLA_PORT_SELF = 0x19
- IFLA_AF_SPEC = 0x1a
- IFLA_GROUP = 0x1b
- IFLA_NET_NS_FD = 0x1c
- IFLA_EXT_MASK = 0x1d
- IFLA_PROMISCUITY = 0x1e
- IFLA_NUM_TX_QUEUES = 0x1f
- IFLA_NUM_RX_QUEUES = 0x20
- IFLA_CARRIER = 0x21
- IFLA_PHYS_PORT_ID = 0x22
- IFLA_CARRIER_CHANGES = 0x23
- IFLA_PHYS_SWITCH_ID = 0x24
- IFLA_LINK_NETNSID = 0x25
- IFLA_PHYS_PORT_NAME = 0x26
- IFLA_PROTO_DOWN = 0x27
- IFLA_GSO_MAX_SEGS = 0x28
- IFLA_GSO_MAX_SIZE = 0x29
- IFLA_PAD = 0x2a
- IFLA_XDP = 0x2b
- IFLA_EVENT = 0x2c
- IFLA_NEW_NETNSID = 0x2d
- IFLA_IF_NETNSID = 0x2e
- IFLA_MAX = 0x33
- RT_SCOPE_UNIVERSE = 0x0
- RT_SCOPE_SITE = 0xc8
- RT_SCOPE_LINK = 0xfd
- RT_SCOPE_HOST = 0xfe
- RT_SCOPE_NOWHERE = 0xff
- RT_TABLE_UNSPEC = 0x0
- RT_TABLE_COMPAT = 0xfc
- RT_TABLE_DEFAULT = 0xfd
- RT_TABLE_MAIN = 0xfe
- RT_TABLE_LOCAL = 0xff
- RT_TABLE_MAX = 0xffffffff
- RTA_UNSPEC = 0x0
- RTA_DST = 0x1
- RTA_SRC = 0x2
- RTA_IIF = 0x3
- RTA_OIF = 0x4
- RTA_GATEWAY = 0x5
- RTA_PRIORITY = 0x6
- RTA_PREFSRC = 0x7
- RTA_METRICS = 0x8
- RTA_MULTIPATH = 0x9
- RTA_FLOW = 0xb
- RTA_CACHEINFO = 0xc
- RTA_TABLE = 0xf
- RTA_MARK = 0x10
- RTA_MFC_STATS = 0x11
- RTA_VIA = 0x12
- RTA_NEWDST = 0x13
- RTA_PREF = 0x14
- RTA_ENCAP_TYPE = 0x15
- RTA_ENCAP = 0x16
- RTA_EXPIRES = 0x17
- RTA_PAD = 0x18
- RTA_UID = 0x19
- RTA_TTL_PROPAGATE = 0x1a
- RTA_IP_PROTO = 0x1b
- RTA_SPORT = 0x1c
- RTA_DPORT = 0x1d
- RTN_UNSPEC = 0x0
- RTN_UNICAST = 0x1
- RTN_LOCAL = 0x2
- RTN_BROADCAST = 0x3
- RTN_ANYCAST = 0x4
- RTN_MULTICAST = 0x5
- RTN_BLACKHOLE = 0x6
- RTN_UNREACHABLE = 0x7
- RTN_PROHIBIT = 0x8
- RTN_THROW = 0x9
- RTN_NAT = 0xa
- RTN_XRESOLVE = 0xb
- RTNLGRP_NONE = 0x0
- RTNLGRP_LINK = 0x1
- RTNLGRP_NOTIFY = 0x2
- RTNLGRP_NEIGH = 0x3
- RTNLGRP_TC = 0x4
- RTNLGRP_IPV4_IFADDR = 0x5
- RTNLGRP_IPV4_MROUTE = 0x6
- RTNLGRP_IPV4_ROUTE = 0x7
- RTNLGRP_IPV4_RULE = 0x8
- RTNLGRP_IPV6_IFADDR = 0x9
- RTNLGRP_IPV6_MROUTE = 0xa
- RTNLGRP_IPV6_ROUTE = 0xb
- RTNLGRP_IPV6_IFINFO = 0xc
- RTNLGRP_IPV6_PREFIX = 0x12
- RTNLGRP_IPV6_RULE = 0x13
- RTNLGRP_ND_USEROPT = 0x14
- SizeofNlMsghdr = 0x10
- SizeofNlMsgerr = 0x14
- SizeofRtGenmsg = 0x1
- SizeofNlAttr = 0x4
- SizeofRtAttr = 0x4
- SizeofIfInfomsg = 0x10
- SizeofIfAddrmsg = 0x8
- SizeofRtMsg = 0xc
- SizeofRtNexthop = 0x8
+ NDA_UNSPEC = 0x0
+ NDA_DST = 0x1
+ NDA_LLADDR = 0x2
+ NDA_CACHEINFO = 0x3
+ NDA_PROBES = 0x4
+ NDA_VLAN = 0x5
+ NDA_PORT = 0x6
+ NDA_VNI = 0x7
+ NDA_IFINDEX = 0x8
+ NDA_MASTER = 0x9
+ NDA_LINK_NETNSID = 0xa
+ NDA_SRC_VNI = 0xb
+ NTF_USE = 0x1
+ NTF_SELF = 0x2
+ NTF_MASTER = 0x4
+ NTF_PROXY = 0x8
+ NTF_EXT_LEARNED = 0x10
+ NTF_OFFLOADED = 0x20
+ NTF_ROUTER = 0x80
+ NUD_INCOMPLETE = 0x1
+ NUD_REACHABLE = 0x2
+ NUD_STALE = 0x4
+ NUD_DELAY = 0x8
+ NUD_PROBE = 0x10
+ NUD_FAILED = 0x20
+ NUD_NOARP = 0x40
+ NUD_PERMANENT = 0x80
+ NUD_NONE = 0x0
+ IFA_UNSPEC = 0x0
+ IFA_ADDRESS = 0x1
+ IFA_LOCAL = 0x2
+ IFA_LABEL = 0x3
+ IFA_BROADCAST = 0x4
+ IFA_ANYCAST = 0x5
+ IFA_CACHEINFO = 0x6
+ IFA_MULTICAST = 0x7
+ IFA_FLAGS = 0x8
+ IFA_RT_PRIORITY = 0x9
+ IFA_TARGET_NETNSID = 0xa
+ IFLA_UNSPEC = 0x0
+ IFLA_ADDRESS = 0x1
+ IFLA_BROADCAST = 0x2
+ IFLA_IFNAME = 0x3
+ IFLA_MTU = 0x4
+ IFLA_LINK = 0x5
+ IFLA_QDISC = 0x6
+ IFLA_STATS = 0x7
+ IFLA_COST = 0x8
+ IFLA_PRIORITY = 0x9
+ IFLA_MASTER = 0xa
+ IFLA_WIRELESS = 0xb
+ IFLA_PROTINFO = 0xc
+ IFLA_TXQLEN = 0xd
+ IFLA_MAP = 0xe
+ IFLA_WEIGHT = 0xf
+ IFLA_OPERSTATE = 0x10
+ IFLA_LINKMODE = 0x11
+ IFLA_LINKINFO = 0x12
+ IFLA_NET_NS_PID = 0x13
+ IFLA_IFALIAS = 0x14
+ IFLA_NUM_VF = 0x15
+ IFLA_VFINFO_LIST = 0x16
+ IFLA_STATS64 = 0x17
+ IFLA_VF_PORTS = 0x18
+ IFLA_PORT_SELF = 0x19
+ IFLA_AF_SPEC = 0x1a
+ IFLA_GROUP = 0x1b
+ IFLA_NET_NS_FD = 0x1c
+ IFLA_EXT_MASK = 0x1d
+ IFLA_PROMISCUITY = 0x1e
+ IFLA_NUM_TX_QUEUES = 0x1f
+ IFLA_NUM_RX_QUEUES = 0x20
+ IFLA_CARRIER = 0x21
+ IFLA_PHYS_PORT_ID = 0x22
+ IFLA_CARRIER_CHANGES = 0x23
+ IFLA_PHYS_SWITCH_ID = 0x24
+ IFLA_LINK_NETNSID = 0x25
+ IFLA_PHYS_PORT_NAME = 0x26
+ IFLA_PROTO_DOWN = 0x27
+ IFLA_GSO_MAX_SEGS = 0x28
+ IFLA_GSO_MAX_SIZE = 0x29
+ IFLA_PAD = 0x2a
+ IFLA_XDP = 0x2b
+ IFLA_EVENT = 0x2c
+ IFLA_NEW_NETNSID = 0x2d
+ IFLA_IF_NETNSID = 0x2e
+ IFLA_TARGET_NETNSID = 0x2e
+ IFLA_CARRIER_UP_COUNT = 0x2f
+ IFLA_CARRIER_DOWN_COUNT = 0x30
+ IFLA_NEW_IFINDEX = 0x31
+ IFLA_MIN_MTU = 0x32
+ IFLA_MAX_MTU = 0x33
+ IFLA_MAX = 0x33
+ IFLA_INFO_KIND = 0x1
+ IFLA_INFO_DATA = 0x2
+ IFLA_INFO_XSTATS = 0x3
+ IFLA_INFO_SLAVE_KIND = 0x4
+ IFLA_INFO_SLAVE_DATA = 0x5
+ RT_SCOPE_UNIVERSE = 0x0
+ RT_SCOPE_SITE = 0xc8
+ RT_SCOPE_LINK = 0xfd
+ RT_SCOPE_HOST = 0xfe
+ RT_SCOPE_NOWHERE = 0xff
+ RT_TABLE_UNSPEC = 0x0
+ RT_TABLE_COMPAT = 0xfc
+ RT_TABLE_DEFAULT = 0xfd
+ RT_TABLE_MAIN = 0xfe
+ RT_TABLE_LOCAL = 0xff
+ RT_TABLE_MAX = 0xffffffff
+ RTA_UNSPEC = 0x0
+ RTA_DST = 0x1
+ RTA_SRC = 0x2
+ RTA_IIF = 0x3
+ RTA_OIF = 0x4
+ RTA_GATEWAY = 0x5
+ RTA_PRIORITY = 0x6
+ RTA_PREFSRC = 0x7
+ RTA_METRICS = 0x8
+ RTA_MULTIPATH = 0x9
+ RTA_FLOW = 0xb
+ RTA_CACHEINFO = 0xc
+ RTA_TABLE = 0xf
+ RTA_MARK = 0x10
+ RTA_MFC_STATS = 0x11
+ RTA_VIA = 0x12
+ RTA_NEWDST = 0x13
+ RTA_PREF = 0x14
+ RTA_ENCAP_TYPE = 0x15
+ RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
+ RTN_UNSPEC = 0x0
+ RTN_UNICAST = 0x1
+ RTN_LOCAL = 0x2
+ RTN_BROADCAST = 0x3
+ RTN_ANYCAST = 0x4
+ RTN_MULTICAST = 0x5
+ RTN_BLACKHOLE = 0x6
+ RTN_UNREACHABLE = 0x7
+ RTN_PROHIBIT = 0x8
+ RTN_THROW = 0x9
+ RTN_NAT = 0xa
+ RTN_XRESOLVE = 0xb
+ RTNLGRP_NONE = 0x0
+ RTNLGRP_LINK = 0x1
+ RTNLGRP_NOTIFY = 0x2
+ RTNLGRP_NEIGH = 0x3
+ RTNLGRP_TC = 0x4
+ RTNLGRP_IPV4_IFADDR = 0x5
+ RTNLGRP_IPV4_MROUTE = 0x6
+ RTNLGRP_IPV4_ROUTE = 0x7
+ RTNLGRP_IPV4_RULE = 0x8
+ RTNLGRP_IPV6_IFADDR = 0x9
+ RTNLGRP_IPV6_MROUTE = 0xa
+ RTNLGRP_IPV6_ROUTE = 0xb
+ RTNLGRP_IPV6_IFINFO = 0xc
+ RTNLGRP_IPV6_PREFIX = 0x12
+ RTNLGRP_IPV6_RULE = 0x13
+ RTNLGRP_ND_USEROPT = 0x14
+ SizeofNlMsghdr = 0x10
+ SizeofNlMsgerr = 0x14
+ SizeofRtGenmsg = 0x1
+ SizeofNlAttr = 0x4
+ SizeofRtAttr = 0x4
+ SizeofIfInfomsg = 0x10
+ SizeofIfAddrmsg = 0x8
+ SizeofRtMsg = 0xc
+ SizeofRtNexthop = 0x8
+ SizeofNdUseroptmsg = 0x10
+ SizeofNdMsg = 0xc
)
type NlMsghdr struct {
@@ -637,6 +686,27 @@ type RtNexthop struct {
Ifindex int32
}
+type NdUseroptmsg struct {
+ Family uint8
+ Pad1 uint8
+ Opts_len uint16
+ Ifindex int32
+ Icmp_type uint8
+ Icmp_code uint8
+ Pad2 uint16
+ Pad3 uint32
+}
+
+type NdMsg struct {
+ Family uint8
+ Pad1 uint8
+ Pad2 uint16
+ Ifindex int32
+ State uint16
+ Flags uint8
+ Type uint8
+}
+
const (
SizeofSockFilter = 0x8
SizeofSockFprog = 0x10
@@ -761,7 +831,32 @@ type Sigset_t struct {
Val [16]uint64
}
-const RNDGETENTCNT = 0x40045200
+const _C__NSIG = 0x41
+
+type SignalfdSiginfo struct {
+ Signo uint32
+ Errno int32
+ Code int32
+ Pid uint32
+ Uid uint32
+ Fd int32
+ Tid uint32
+ Band uint32
+ Overrun uint32
+ Trapno uint32
+ Status int32
+ Int int32
+ Ptr uint64
+ Utime uint64
+ Stime uint64
+ Addr uint64
+ Addr_lsb uint16
+ _ uint16
+ Syscall int32
+ Call_addr uint64
+ Arch uint32
+ _ [28]uint8
+}
const PERF_IOC_FLAG_GROUP = 0x1
@@ -932,7 +1027,8 @@ type PerfEventAttr struct {
Clockid int32
Sample_regs_intr uint64
Aux_watermark uint32
- _ uint32
+ Sample_max_stack uint16
+ _ uint16
}
type PerfEventMmapPage struct {
@@ -1035,6 +1131,7 @@ const (
PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7
PERF_COUNT_SW_EMULATION_FAULTS = 0x8
PERF_COUNT_SW_DUMMY = 0x9
+ PERF_COUNT_SW_BPF_OUTPUT = 0xa
PERF_SAMPLE_IP = 0x1
PERF_SAMPLE_TID = 0x2
@@ -1056,21 +1153,38 @@ const (
PERF_SAMPLE_BRANCH_ANY_CALL = 0x10
PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20
PERF_SAMPLE_BRANCH_IND_CALL = 0x40
+ PERF_SAMPLE_BRANCH_ABORT_TX = 0x80
+ PERF_SAMPLE_BRANCH_IN_TX = 0x100
+ PERF_SAMPLE_BRANCH_NO_TX = 0x200
+ PERF_SAMPLE_BRANCH_COND = 0x400
+ PERF_SAMPLE_BRANCH_CALL_STACK = 0x800
+ PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000
+ PERF_SAMPLE_BRANCH_CALL = 0x2000
+ PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000
+ PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000
+ PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000
PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1
PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2
PERF_FORMAT_ID = 0x4
PERF_FORMAT_GROUP = 0x8
- PERF_RECORD_MMAP = 0x1
- PERF_RECORD_LOST = 0x2
- PERF_RECORD_COMM = 0x3
- PERF_RECORD_EXIT = 0x4
- PERF_RECORD_THROTTLE = 0x5
- PERF_RECORD_UNTHROTTLE = 0x6
- PERF_RECORD_FORK = 0x7
- PERF_RECORD_READ = 0x8
- PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP = 0x1
+ PERF_RECORD_LOST = 0x2
+ PERF_RECORD_COMM = 0x3
+ PERF_RECORD_EXIT = 0x4
+ PERF_RECORD_THROTTLE = 0x5
+ PERF_RECORD_UNTHROTTLE = 0x6
+ PERF_RECORD_FORK = 0x7
+ PERF_RECORD_READ = 0x8
+ PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP2 = 0xa
+ PERF_RECORD_AUX = 0xb
+ PERF_RECORD_ITRACE_START = 0xc
+ PERF_RECORD_LOST_SAMPLES = 0xd
+ PERF_RECORD_SWITCH = 0xe
+ PERF_RECORD_SWITCH_CPU_WIDE = 0xf
+ PERF_RECORD_NAMESPACES = 0x10
PERF_CONTEXT_HV = -0x20
PERF_CONTEXT_KERNEL = -0x80
@@ -1083,6 +1197,7 @@ const (
PERF_FLAG_FD_NO_GROUP = 0x1
PERF_FLAG_FD_OUTPUT = 0x2
PERF_FLAG_PID_CGROUP = 0x4
+ PERF_FLAG_FD_CLOEXEC = 0x8
)
const (
@@ -1340,6 +1455,21 @@ type TpacketBlockDesc struct {
Hdr [40]byte
}
+type TpacketBDTS struct {
+ Sec uint32
+ Usec uint32
+}
+
+type TpacketHdrV1 struct {
+ Block_status uint32
+ Num_pkts uint32
+ Offset_to_first_pkt uint32
+ Blk_len uint32
+ Seq_num uint64
+ Ts_first_pkt TpacketBDTS
+ Ts_last_pkt TpacketBDTS
+}
+
type TpacketReq struct {
Block_size uint32
Block_nr uint32
@@ -1388,6 +1518,9 @@ const (
SizeofTpacketHdr = 0x20
SizeofTpacket2Hdr = 0x20
SizeofTpacket3Hdr = 0x30
+
+ SizeofTpacketStats = 0x8
+ SizeofTpacketStatsV3 = 0xc
)
const (
@@ -1966,6 +2099,10 @@ const (
NCSI_CHANNEL_ATTR_VLAN_ID = 0xa
)
+type ScmTimestamping struct {
+ Ts [3]Timespec
+}
+
const (
SOF_TIMESTAMPING_TX_HARDWARE = 0x1
SOF_TIMESTAMPING_TX_SOFTWARE = 0x2
@@ -1985,4 +2122,367 @@ const (
SOF_TIMESTAMPING_LAST = 0x4000
SOF_TIMESTAMPING_MASK = 0x7fff
+
+ SCM_TSTAMP_SND = 0x0
+ SCM_TSTAMP_SCHED = 0x1
+ SCM_TSTAMP_ACK = 0x2
+)
+
+type SockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type FanotifyEventMetadata struct {
+ Event_len uint32
+ Vers uint8
+ Reserved uint8
+ Metadata_len uint16
+ Mask uint64
+ Fd int32
+ Pid int32
+}
+
+type FanotifyResponse struct {
+ Fd int32
+ Response uint32
+}
+
+const (
+ CRYPTO_MSG_BASE = 0x10
+ CRYPTO_MSG_NEWALG = 0x10
+ CRYPTO_MSG_DELALG = 0x11
+ CRYPTO_MSG_UPDATEALG = 0x12
+ CRYPTO_MSG_GETALG = 0x13
+ CRYPTO_MSG_DELRNG = 0x14
+ CRYPTO_MSG_GETSTAT = 0x15
+)
+
+const (
+ CRYPTOCFGA_UNSPEC = 0x0
+ CRYPTOCFGA_PRIORITY_VAL = 0x1
+ CRYPTOCFGA_REPORT_LARVAL = 0x2
+ CRYPTOCFGA_REPORT_HASH = 0x3
+ CRYPTOCFGA_REPORT_BLKCIPHER = 0x4
+ CRYPTOCFGA_REPORT_AEAD = 0x5
+ CRYPTOCFGA_REPORT_COMPRESS = 0x6
+ CRYPTOCFGA_REPORT_RNG = 0x7
+ CRYPTOCFGA_REPORT_CIPHER = 0x8
+ CRYPTOCFGA_REPORT_AKCIPHER = 0x9
+ CRYPTOCFGA_REPORT_KPP = 0xa
+ CRYPTOCFGA_REPORT_ACOMP = 0xb
+ CRYPTOCFGA_STAT_LARVAL = 0xc
+ CRYPTOCFGA_STAT_HASH = 0xd
+ CRYPTOCFGA_STAT_BLKCIPHER = 0xe
+ CRYPTOCFGA_STAT_AEAD = 0xf
+ CRYPTOCFGA_STAT_COMPRESS = 0x10
+ CRYPTOCFGA_STAT_RNG = 0x11
+ CRYPTOCFGA_STAT_CIPHER = 0x12
+ CRYPTOCFGA_STAT_AKCIPHER = 0x13
+ CRYPTOCFGA_STAT_KPP = 0x14
+ CRYPTOCFGA_STAT_ACOMP = 0x15
+)
+
+type CryptoUserAlg struct {
+ Name [64]uint8
+ Driver_name [64]uint8
+ Module_name [64]uint8
+ Type uint32
+ Mask uint32
+ Refcnt uint32
+ Flags uint32
+}
+
+type CryptoStatAEAD struct {
+ Type [64]uint8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatAKCipher struct {
+ Type [64]uint8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Verify_cnt uint64
+ Sign_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCipher struct {
+ Type [64]uint8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCompress struct {
+ Type [64]uint8
+ Compress_cnt uint64
+ Compress_tlen uint64
+ Decompress_cnt uint64
+ Decompress_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatHash struct {
+ Type [64]uint8
+ Hash_cnt uint64
+ Hash_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatKPP struct {
+ Type [64]uint8
+ Setsecret_cnt uint64
+ Generate_public_key_cnt uint64
+ Compute_shared_secret_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatRNG struct {
+ Type [64]uint8
+ Generate_cnt uint64
+ Generate_tlen uint64
+ Seed_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatLarval struct {
+ Type [64]uint8
+}
+
+type CryptoReportLarval struct {
+ Type [64]uint8
+}
+
+type CryptoReportHash struct {
+ Type [64]uint8
+ Blocksize uint32
+ Digestsize uint32
+}
+
+type CryptoReportCipher struct {
+ Type [64]uint8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+}
+
+type CryptoReportBlkCipher struct {
+ Type [64]uint8
+ Geniv [64]uint8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+ Ivsize uint32
+}
+
+type CryptoReportAEAD struct {
+ Type [64]uint8
+ Geniv [64]uint8
+ Blocksize uint32
+ Maxauthsize uint32
+ Ivsize uint32
+}
+
+type CryptoReportComp struct {
+ Type [64]uint8
+}
+
+type CryptoReportRNG struct {
+ Type [64]uint8
+ Seedsize uint32
+}
+
+type CryptoReportAKCipher struct {
+ Type [64]uint8
+}
+
+type CryptoReportKPP struct {
+ Type [64]uint8
+}
+
+type CryptoReportAcomp struct {
+ Type [64]uint8
+}
+
+const (
+ BPF_REG_0 = 0x0
+ BPF_REG_1 = 0x1
+ BPF_REG_2 = 0x2
+ BPF_REG_3 = 0x3
+ BPF_REG_4 = 0x4
+ BPF_REG_5 = 0x5
+ BPF_REG_6 = 0x6
+ BPF_REG_7 = 0x7
+ BPF_REG_8 = 0x8
+ BPF_REG_9 = 0x9
+ BPF_REG_10 = 0xa
+ BPF_MAP_CREATE = 0x0
+ BPF_MAP_LOOKUP_ELEM = 0x1
+ BPF_MAP_UPDATE_ELEM = 0x2
+ BPF_MAP_DELETE_ELEM = 0x3
+ BPF_MAP_GET_NEXT_KEY = 0x4
+ BPF_PROG_LOAD = 0x5
+ BPF_OBJ_PIN = 0x6
+ BPF_OBJ_GET = 0x7
+ BPF_PROG_ATTACH = 0x8
+ BPF_PROG_DETACH = 0x9
+ BPF_PROG_TEST_RUN = 0xa
+ BPF_PROG_GET_NEXT_ID = 0xb
+ BPF_MAP_GET_NEXT_ID = 0xc
+ BPF_PROG_GET_FD_BY_ID = 0xd
+ BPF_MAP_GET_FD_BY_ID = 0xe
+ BPF_OBJ_GET_INFO_BY_FD = 0xf
+ BPF_PROG_QUERY = 0x10
+ BPF_RAW_TRACEPOINT_OPEN = 0x11
+ BPF_BTF_LOAD = 0x12
+ BPF_BTF_GET_FD_BY_ID = 0x13
+ BPF_TASK_FD_QUERY = 0x14
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15
+ BPF_MAP_TYPE_UNSPEC = 0x0
+ BPF_MAP_TYPE_HASH = 0x1
+ BPF_MAP_TYPE_ARRAY = 0x2
+ BPF_MAP_TYPE_PROG_ARRAY = 0x3
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4
+ BPF_MAP_TYPE_PERCPU_HASH = 0x5
+ BPF_MAP_TYPE_PERCPU_ARRAY = 0x6
+ BPF_MAP_TYPE_STACK_TRACE = 0x7
+ BPF_MAP_TYPE_CGROUP_ARRAY = 0x8
+ BPF_MAP_TYPE_LRU_HASH = 0x9
+ BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa
+ BPF_MAP_TYPE_LPM_TRIE = 0xb
+ BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc
+ BPF_MAP_TYPE_HASH_OF_MAPS = 0xd
+ BPF_MAP_TYPE_DEVMAP = 0xe
+ BPF_MAP_TYPE_SOCKMAP = 0xf
+ BPF_MAP_TYPE_CPUMAP = 0x10
+ BPF_MAP_TYPE_XSKMAP = 0x11
+ BPF_MAP_TYPE_SOCKHASH = 0x12
+ BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
+ BPF_MAP_TYPE_QUEUE = 0x16
+ BPF_MAP_TYPE_STACK = 0x17
+ BPF_PROG_TYPE_UNSPEC = 0x0
+ BPF_PROG_TYPE_SOCKET_FILTER = 0x1
+ BPF_PROG_TYPE_KPROBE = 0x2
+ BPF_PROG_TYPE_SCHED_CLS = 0x3
+ BPF_PROG_TYPE_SCHED_ACT = 0x4
+ BPF_PROG_TYPE_TRACEPOINT = 0x5
+ BPF_PROG_TYPE_XDP = 0x6
+ BPF_PROG_TYPE_PERF_EVENT = 0x7
+ BPF_PROG_TYPE_CGROUP_SKB = 0x8
+ BPF_PROG_TYPE_CGROUP_SOCK = 0x9
+ BPF_PROG_TYPE_LWT_IN = 0xa
+ BPF_PROG_TYPE_LWT_OUT = 0xb
+ BPF_PROG_TYPE_LWT_XMIT = 0xc
+ BPF_PROG_TYPE_SOCK_OPS = 0xd
+ BPF_PROG_TYPE_SK_SKB = 0xe
+ BPF_PROG_TYPE_CGROUP_DEVICE = 0xf
+ BPF_PROG_TYPE_SK_MSG = 0x10
+ BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11
+ BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12
+ BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13
+ BPF_PROG_TYPE_LIRC_MODE2 = 0x14
+ BPF_PROG_TYPE_SK_REUSEPORT = 0x15
+ BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16
+ BPF_CGROUP_INET_INGRESS = 0x0
+ BPF_CGROUP_INET_EGRESS = 0x1
+ BPF_CGROUP_INET_SOCK_CREATE = 0x2
+ BPF_CGROUP_SOCK_OPS = 0x3
+ BPF_SK_SKB_STREAM_PARSER = 0x4
+ BPF_SK_SKB_STREAM_VERDICT = 0x5
+ BPF_CGROUP_DEVICE = 0x6
+ BPF_SK_MSG_VERDICT = 0x7
+ BPF_CGROUP_INET4_BIND = 0x8
+ BPF_CGROUP_INET6_BIND = 0x9
+ BPF_CGROUP_INET4_CONNECT = 0xa
+ BPF_CGROUP_INET6_CONNECT = 0xb
+ BPF_CGROUP_INET4_POST_BIND = 0xc
+ BPF_CGROUP_INET6_POST_BIND = 0xd
+ BPF_CGROUP_UDP4_SENDMSG = 0xe
+ BPF_CGROUP_UDP6_SENDMSG = 0xf
+ BPF_LIRC_MODE2 = 0x10
+ BPF_FLOW_DISSECTOR = 0x11
+ BPF_STACK_BUILD_ID_EMPTY = 0x0
+ BPF_STACK_BUILD_ID_VALID = 0x1
+ BPF_STACK_BUILD_ID_IP = 0x2
+ BPF_ADJ_ROOM_NET = 0x0
+ BPF_HDR_START_MAC = 0x0
+ BPF_HDR_START_NET = 0x1
+ BPF_LWT_ENCAP_SEG6 = 0x0
+ BPF_LWT_ENCAP_SEG6_INLINE = 0x1
+ BPF_OK = 0x0
+ BPF_DROP = 0x2
+ BPF_REDIRECT = 0x7
+ BPF_SOCK_OPS_VOID = 0x0
+ BPF_SOCK_OPS_TIMEOUT_INIT = 0x1
+ BPF_SOCK_OPS_RWND_INIT = 0x2
+ BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3
+ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4
+ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5
+ BPF_SOCK_OPS_NEEDS_ECN = 0x6
+ BPF_SOCK_OPS_BASE_RTT = 0x7
+ BPF_SOCK_OPS_RTO_CB = 0x8
+ BPF_SOCK_OPS_RETRANS_CB = 0x9
+ BPF_SOCK_OPS_STATE_CB = 0xa
+ BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb
+ BPF_TCP_ESTABLISHED = 0x1
+ BPF_TCP_SYN_SENT = 0x2
+ BPF_TCP_SYN_RECV = 0x3
+ BPF_TCP_FIN_WAIT1 = 0x4
+ BPF_TCP_FIN_WAIT2 = 0x5
+ BPF_TCP_TIME_WAIT = 0x6
+ BPF_TCP_CLOSE = 0x7
+ BPF_TCP_CLOSE_WAIT = 0x8
+ BPF_TCP_LAST_ACK = 0x9
+ BPF_TCP_LISTEN = 0xa
+ BPF_TCP_CLOSING = 0xb
+ BPF_TCP_NEW_SYN_RECV = 0xc
+ BPF_TCP_MAX_STATES = 0xd
+ BPF_FIB_LKUP_RET_SUCCESS = 0x0
+ BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
+ BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
+ BPF_FIB_LKUP_RET_PROHIBIT = 0x3
+ BPF_FIB_LKUP_RET_NOT_FWDED = 0x4
+ BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5
+ BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
+ BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
+ BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
+ BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
+ BPF_FD_TYPE_TRACEPOINT = 0x1
+ BPF_FD_TYPE_KPROBE = 0x2
+ BPF_FD_TYPE_KRETPROBE = 0x3
+ BPF_FD_TYPE_UPROBE = 0x4
+ BPF_FD_TYPE_URETPROBE = 0x5
+)
+
+type CapUserHeader struct {
+ Version uint32
+ Pid int32
+}
+
+type CapUserData struct {
+ Effective uint32
+ Permitted uint32
+ Inheritable uint32
+}
+
+const (
+ LINUX_CAPABILITY_VERSION_1 = 0x19980330
+ LINUX_CAPABILITY_VERSION_2 = 0x20071026
+ LINUX_CAPABILITY_VERSION_3 = 0x20080522
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
index cf110ce2b..f44f29403 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
@@ -212,7 +212,7 @@ type RawSockaddrInet6 struct {
type RawSockaddrUnix struct {
Family uint16
- Path [108]uint8
+ Path [108]int8
}
type RawSockaddrLinklayer struct {
@@ -407,6 +407,11 @@ type TCPInfo struct {
Total_retrans uint32
}
+type CanFilter struct {
+ Id uint32
+ Mask uint32
+}
+
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
@@ -436,141 +441,185 @@ const (
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
SizeofTCPInfo = 0x68
+ SizeofCanFilter = 0x8
)
const (
- IFA_UNSPEC = 0x0
- IFA_ADDRESS = 0x1
- IFA_LOCAL = 0x2
- IFA_LABEL = 0x3
- IFA_BROADCAST = 0x4
- IFA_ANYCAST = 0x5
- IFA_CACHEINFO = 0x6
- IFA_MULTICAST = 0x7
- IFLA_UNSPEC = 0x0
- IFLA_ADDRESS = 0x1
- IFLA_BROADCAST = 0x2
- IFLA_IFNAME = 0x3
- IFLA_INFO_KIND = 0x1
- IFLA_MTU = 0x4
- IFLA_LINK = 0x5
- IFLA_QDISC = 0x6
- IFLA_STATS = 0x7
- IFLA_COST = 0x8
- IFLA_PRIORITY = 0x9
- IFLA_MASTER = 0xa
- IFLA_WIRELESS = 0xb
- IFLA_PROTINFO = 0xc
- IFLA_TXQLEN = 0xd
- IFLA_MAP = 0xe
- IFLA_WEIGHT = 0xf
- IFLA_OPERSTATE = 0x10
- IFLA_LINKMODE = 0x11
- IFLA_LINKINFO = 0x12
- IFLA_NET_NS_PID = 0x13
- IFLA_IFALIAS = 0x14
- IFLA_NUM_VF = 0x15
- IFLA_VFINFO_LIST = 0x16
- IFLA_STATS64 = 0x17
- IFLA_VF_PORTS = 0x18
- IFLA_PORT_SELF = 0x19
- IFLA_AF_SPEC = 0x1a
- IFLA_GROUP = 0x1b
- IFLA_NET_NS_FD = 0x1c
- IFLA_EXT_MASK = 0x1d
- IFLA_PROMISCUITY = 0x1e
- IFLA_NUM_TX_QUEUES = 0x1f
- IFLA_NUM_RX_QUEUES = 0x20
- IFLA_CARRIER = 0x21
- IFLA_PHYS_PORT_ID = 0x22
- IFLA_CARRIER_CHANGES = 0x23
- IFLA_PHYS_SWITCH_ID = 0x24
- IFLA_LINK_NETNSID = 0x25
- IFLA_PHYS_PORT_NAME = 0x26
- IFLA_PROTO_DOWN = 0x27
- IFLA_GSO_MAX_SEGS = 0x28
- IFLA_GSO_MAX_SIZE = 0x29
- IFLA_PAD = 0x2a
- IFLA_XDP = 0x2b
- IFLA_EVENT = 0x2c
- IFLA_NEW_NETNSID = 0x2d
- IFLA_IF_NETNSID = 0x2e
- IFLA_MAX = 0x33
- RT_SCOPE_UNIVERSE = 0x0
- RT_SCOPE_SITE = 0xc8
- RT_SCOPE_LINK = 0xfd
- RT_SCOPE_HOST = 0xfe
- RT_SCOPE_NOWHERE = 0xff
- RT_TABLE_UNSPEC = 0x0
- RT_TABLE_COMPAT = 0xfc
- RT_TABLE_DEFAULT = 0xfd
- RT_TABLE_MAIN = 0xfe
- RT_TABLE_LOCAL = 0xff
- RT_TABLE_MAX = 0xffffffff
- RTA_UNSPEC = 0x0
- RTA_DST = 0x1
- RTA_SRC = 0x2
- RTA_IIF = 0x3
- RTA_OIF = 0x4
- RTA_GATEWAY = 0x5
- RTA_PRIORITY = 0x6
- RTA_PREFSRC = 0x7
- RTA_METRICS = 0x8
- RTA_MULTIPATH = 0x9
- RTA_FLOW = 0xb
- RTA_CACHEINFO = 0xc
- RTA_TABLE = 0xf
- RTA_MARK = 0x10
- RTA_MFC_STATS = 0x11
- RTA_VIA = 0x12
- RTA_NEWDST = 0x13
- RTA_PREF = 0x14
- RTA_ENCAP_TYPE = 0x15
- RTA_ENCAP = 0x16
- RTA_EXPIRES = 0x17
- RTA_PAD = 0x18
- RTA_UID = 0x19
- RTA_TTL_PROPAGATE = 0x1a
- RTA_IP_PROTO = 0x1b
- RTA_SPORT = 0x1c
- RTA_DPORT = 0x1d
- RTN_UNSPEC = 0x0
- RTN_UNICAST = 0x1
- RTN_LOCAL = 0x2
- RTN_BROADCAST = 0x3
- RTN_ANYCAST = 0x4
- RTN_MULTICAST = 0x5
- RTN_BLACKHOLE = 0x6
- RTN_UNREACHABLE = 0x7
- RTN_PROHIBIT = 0x8
- RTN_THROW = 0x9
- RTN_NAT = 0xa
- RTN_XRESOLVE = 0xb
- RTNLGRP_NONE = 0x0
- RTNLGRP_LINK = 0x1
- RTNLGRP_NOTIFY = 0x2
- RTNLGRP_NEIGH = 0x3
- RTNLGRP_TC = 0x4
- RTNLGRP_IPV4_IFADDR = 0x5
- RTNLGRP_IPV4_MROUTE = 0x6
- RTNLGRP_IPV4_ROUTE = 0x7
- RTNLGRP_IPV4_RULE = 0x8
- RTNLGRP_IPV6_IFADDR = 0x9
- RTNLGRP_IPV6_MROUTE = 0xa
- RTNLGRP_IPV6_ROUTE = 0xb
- RTNLGRP_IPV6_IFINFO = 0xc
- RTNLGRP_IPV6_PREFIX = 0x12
- RTNLGRP_IPV6_RULE = 0x13
- RTNLGRP_ND_USEROPT = 0x14
- SizeofNlMsghdr = 0x10
- SizeofNlMsgerr = 0x14
- SizeofRtGenmsg = 0x1
- SizeofNlAttr = 0x4
- SizeofRtAttr = 0x4
- SizeofIfInfomsg = 0x10
- SizeofIfAddrmsg = 0x8
- SizeofRtMsg = 0xc
- SizeofRtNexthop = 0x8
+ NDA_UNSPEC = 0x0
+ NDA_DST = 0x1
+ NDA_LLADDR = 0x2
+ NDA_CACHEINFO = 0x3
+ NDA_PROBES = 0x4
+ NDA_VLAN = 0x5
+ NDA_PORT = 0x6
+ NDA_VNI = 0x7
+ NDA_IFINDEX = 0x8
+ NDA_MASTER = 0x9
+ NDA_LINK_NETNSID = 0xa
+ NDA_SRC_VNI = 0xb
+ NTF_USE = 0x1
+ NTF_SELF = 0x2
+ NTF_MASTER = 0x4
+ NTF_PROXY = 0x8
+ NTF_EXT_LEARNED = 0x10
+ NTF_OFFLOADED = 0x20
+ NTF_ROUTER = 0x80
+ NUD_INCOMPLETE = 0x1
+ NUD_REACHABLE = 0x2
+ NUD_STALE = 0x4
+ NUD_DELAY = 0x8
+ NUD_PROBE = 0x10
+ NUD_FAILED = 0x20
+ NUD_NOARP = 0x40
+ NUD_PERMANENT = 0x80
+ NUD_NONE = 0x0
+ IFA_UNSPEC = 0x0
+ IFA_ADDRESS = 0x1
+ IFA_LOCAL = 0x2
+ IFA_LABEL = 0x3
+ IFA_BROADCAST = 0x4
+ IFA_ANYCAST = 0x5
+ IFA_CACHEINFO = 0x6
+ IFA_MULTICAST = 0x7
+ IFA_FLAGS = 0x8
+ IFA_RT_PRIORITY = 0x9
+ IFA_TARGET_NETNSID = 0xa
+ IFLA_UNSPEC = 0x0
+ IFLA_ADDRESS = 0x1
+ IFLA_BROADCAST = 0x2
+ IFLA_IFNAME = 0x3
+ IFLA_MTU = 0x4
+ IFLA_LINK = 0x5
+ IFLA_QDISC = 0x6
+ IFLA_STATS = 0x7
+ IFLA_COST = 0x8
+ IFLA_PRIORITY = 0x9
+ IFLA_MASTER = 0xa
+ IFLA_WIRELESS = 0xb
+ IFLA_PROTINFO = 0xc
+ IFLA_TXQLEN = 0xd
+ IFLA_MAP = 0xe
+ IFLA_WEIGHT = 0xf
+ IFLA_OPERSTATE = 0x10
+ IFLA_LINKMODE = 0x11
+ IFLA_LINKINFO = 0x12
+ IFLA_NET_NS_PID = 0x13
+ IFLA_IFALIAS = 0x14
+ IFLA_NUM_VF = 0x15
+ IFLA_VFINFO_LIST = 0x16
+ IFLA_STATS64 = 0x17
+ IFLA_VF_PORTS = 0x18
+ IFLA_PORT_SELF = 0x19
+ IFLA_AF_SPEC = 0x1a
+ IFLA_GROUP = 0x1b
+ IFLA_NET_NS_FD = 0x1c
+ IFLA_EXT_MASK = 0x1d
+ IFLA_PROMISCUITY = 0x1e
+ IFLA_NUM_TX_QUEUES = 0x1f
+ IFLA_NUM_RX_QUEUES = 0x20
+ IFLA_CARRIER = 0x21
+ IFLA_PHYS_PORT_ID = 0x22
+ IFLA_CARRIER_CHANGES = 0x23
+ IFLA_PHYS_SWITCH_ID = 0x24
+ IFLA_LINK_NETNSID = 0x25
+ IFLA_PHYS_PORT_NAME = 0x26
+ IFLA_PROTO_DOWN = 0x27
+ IFLA_GSO_MAX_SEGS = 0x28
+ IFLA_GSO_MAX_SIZE = 0x29
+ IFLA_PAD = 0x2a
+ IFLA_XDP = 0x2b
+ IFLA_EVENT = 0x2c
+ IFLA_NEW_NETNSID = 0x2d
+ IFLA_IF_NETNSID = 0x2e
+ IFLA_TARGET_NETNSID = 0x2e
+ IFLA_CARRIER_UP_COUNT = 0x2f
+ IFLA_CARRIER_DOWN_COUNT = 0x30
+ IFLA_NEW_IFINDEX = 0x31
+ IFLA_MIN_MTU = 0x32
+ IFLA_MAX_MTU = 0x33
+ IFLA_MAX = 0x33
+ IFLA_INFO_KIND = 0x1
+ IFLA_INFO_DATA = 0x2
+ IFLA_INFO_XSTATS = 0x3
+ IFLA_INFO_SLAVE_KIND = 0x4
+ IFLA_INFO_SLAVE_DATA = 0x5
+ RT_SCOPE_UNIVERSE = 0x0
+ RT_SCOPE_SITE = 0xc8
+ RT_SCOPE_LINK = 0xfd
+ RT_SCOPE_HOST = 0xfe
+ RT_SCOPE_NOWHERE = 0xff
+ RT_TABLE_UNSPEC = 0x0
+ RT_TABLE_COMPAT = 0xfc
+ RT_TABLE_DEFAULT = 0xfd
+ RT_TABLE_MAIN = 0xfe
+ RT_TABLE_LOCAL = 0xff
+ RT_TABLE_MAX = 0xffffffff
+ RTA_UNSPEC = 0x0
+ RTA_DST = 0x1
+ RTA_SRC = 0x2
+ RTA_IIF = 0x3
+ RTA_OIF = 0x4
+ RTA_GATEWAY = 0x5
+ RTA_PRIORITY = 0x6
+ RTA_PREFSRC = 0x7
+ RTA_METRICS = 0x8
+ RTA_MULTIPATH = 0x9
+ RTA_FLOW = 0xb
+ RTA_CACHEINFO = 0xc
+ RTA_TABLE = 0xf
+ RTA_MARK = 0x10
+ RTA_MFC_STATS = 0x11
+ RTA_VIA = 0x12
+ RTA_NEWDST = 0x13
+ RTA_PREF = 0x14
+ RTA_ENCAP_TYPE = 0x15
+ RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
+ RTN_UNSPEC = 0x0
+ RTN_UNICAST = 0x1
+ RTN_LOCAL = 0x2
+ RTN_BROADCAST = 0x3
+ RTN_ANYCAST = 0x4
+ RTN_MULTICAST = 0x5
+ RTN_BLACKHOLE = 0x6
+ RTN_UNREACHABLE = 0x7
+ RTN_PROHIBIT = 0x8
+ RTN_THROW = 0x9
+ RTN_NAT = 0xa
+ RTN_XRESOLVE = 0xb
+ RTNLGRP_NONE = 0x0
+ RTNLGRP_LINK = 0x1
+ RTNLGRP_NOTIFY = 0x2
+ RTNLGRP_NEIGH = 0x3
+ RTNLGRP_TC = 0x4
+ RTNLGRP_IPV4_IFADDR = 0x5
+ RTNLGRP_IPV4_MROUTE = 0x6
+ RTNLGRP_IPV4_ROUTE = 0x7
+ RTNLGRP_IPV4_RULE = 0x8
+ RTNLGRP_IPV6_IFADDR = 0x9
+ RTNLGRP_IPV6_MROUTE = 0xa
+ RTNLGRP_IPV6_ROUTE = 0xb
+ RTNLGRP_IPV6_IFINFO = 0xc
+ RTNLGRP_IPV6_PREFIX = 0x12
+ RTNLGRP_IPV6_RULE = 0x13
+ RTNLGRP_ND_USEROPT = 0x14
+ SizeofNlMsghdr = 0x10
+ SizeofNlMsgerr = 0x14
+ SizeofRtGenmsg = 0x1
+ SizeofNlAttr = 0x4
+ SizeofRtAttr = 0x4
+ SizeofIfInfomsg = 0x10
+ SizeofIfAddrmsg = 0x8
+ SizeofRtMsg = 0xc
+ SizeofRtNexthop = 0x8
+ SizeofNdUseroptmsg = 0x10
+ SizeofNdMsg = 0xc
)
type NlMsghdr struct {
@@ -636,6 +685,27 @@ type RtNexthop struct {
Ifindex int32
}
+type NdUseroptmsg struct {
+ Family uint8
+ Pad1 uint8
+ Opts_len uint16
+ Ifindex int32
+ Icmp_type uint8
+ Icmp_code uint8
+ Pad2 uint16
+ Pad3 uint32
+}
+
+type NdMsg struct {
+ Family uint8
+ Pad1 uint8
+ Pad2 uint16
+ Ifindex int32
+ State uint16
+ Flags uint8
+ Type uint8
+}
+
const (
SizeofSockFilter = 0x8
SizeofSockFprog = 0x10
@@ -778,7 +848,32 @@ type Sigset_t struct {
Val [16]uint64
}
-const RNDGETENTCNT = 0x80045200
+const _C__NSIG = 0x41
+
+type SignalfdSiginfo struct {
+ Signo uint32
+ Errno int32
+ Code int32
+ Pid uint32
+ Uid uint32
+ Fd int32
+ Tid uint32
+ Band uint32
+ Overrun uint32
+ Trapno uint32
+ Status int32
+ Int int32
+ Ptr uint64
+ Utime uint64
+ Stime uint64
+ Addr uint64
+ Addr_lsb uint16
+ _ uint16
+ Syscall int32
+ Call_addr uint64
+ Arch uint32
+ _ [28]uint8
+}
const PERF_IOC_FLAG_GROUP = 0x1
@@ -949,7 +1044,8 @@ type PerfEventAttr struct {
Clockid int32
Sample_regs_intr uint64
Aux_watermark uint32
- _ uint32
+ Sample_max_stack uint16
+ _ uint16
}
type PerfEventMmapPage struct {
@@ -1052,6 +1148,7 @@ const (
PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7
PERF_COUNT_SW_EMULATION_FAULTS = 0x8
PERF_COUNT_SW_DUMMY = 0x9
+ PERF_COUNT_SW_BPF_OUTPUT = 0xa
PERF_SAMPLE_IP = 0x1
PERF_SAMPLE_TID = 0x2
@@ -1073,21 +1170,38 @@ const (
PERF_SAMPLE_BRANCH_ANY_CALL = 0x10
PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20
PERF_SAMPLE_BRANCH_IND_CALL = 0x40
+ PERF_SAMPLE_BRANCH_ABORT_TX = 0x80
+ PERF_SAMPLE_BRANCH_IN_TX = 0x100
+ PERF_SAMPLE_BRANCH_NO_TX = 0x200
+ PERF_SAMPLE_BRANCH_COND = 0x400
+ PERF_SAMPLE_BRANCH_CALL_STACK = 0x800
+ PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000
+ PERF_SAMPLE_BRANCH_CALL = 0x2000
+ PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000
+ PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000
+ PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000
PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1
PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2
PERF_FORMAT_ID = 0x4
PERF_FORMAT_GROUP = 0x8
- PERF_RECORD_MMAP = 0x1
- PERF_RECORD_LOST = 0x2
- PERF_RECORD_COMM = 0x3
- PERF_RECORD_EXIT = 0x4
- PERF_RECORD_THROTTLE = 0x5
- PERF_RECORD_UNTHROTTLE = 0x6
- PERF_RECORD_FORK = 0x7
- PERF_RECORD_READ = 0x8
- PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP = 0x1
+ PERF_RECORD_LOST = 0x2
+ PERF_RECORD_COMM = 0x3
+ PERF_RECORD_EXIT = 0x4
+ PERF_RECORD_THROTTLE = 0x5
+ PERF_RECORD_UNTHROTTLE = 0x6
+ PERF_RECORD_FORK = 0x7
+ PERF_RECORD_READ = 0x8
+ PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP2 = 0xa
+ PERF_RECORD_AUX = 0xb
+ PERF_RECORD_ITRACE_START = 0xc
+ PERF_RECORD_LOST_SAMPLES = 0xd
+ PERF_RECORD_SWITCH = 0xe
+ PERF_RECORD_SWITCH_CPU_WIDE = 0xf
+ PERF_RECORD_NAMESPACES = 0x10
PERF_CONTEXT_HV = -0x20
PERF_CONTEXT_KERNEL = -0x80
@@ -1100,6 +1214,7 @@ const (
PERF_FLAG_FD_NO_GROUP = 0x1
PERF_FLAG_FD_OUTPUT = 0x2
PERF_FLAG_PID_CGROUP = 0x4
+ PERF_FLAG_FD_CLOEXEC = 0x8
)
const (
@@ -1357,6 +1472,21 @@ type TpacketBlockDesc struct {
Hdr [40]byte
}
+type TpacketBDTS struct {
+ Sec uint32
+ Usec uint32
+}
+
+type TpacketHdrV1 struct {
+ Block_status uint32
+ Num_pkts uint32
+ Offset_to_first_pkt uint32
+ Blk_len uint32
+ Seq_num uint64
+ Ts_first_pkt TpacketBDTS
+ Ts_last_pkt TpacketBDTS
+}
+
type TpacketReq struct {
Block_size uint32
Block_nr uint32
@@ -1405,6 +1535,9 @@ const (
SizeofTpacketHdr = 0x20
SizeofTpacket2Hdr = 0x20
SizeofTpacket3Hdr = 0x30
+
+ SizeofTpacketStats = 0x8
+ SizeofTpacketStatsV3 = 0xc
)
const (
@@ -1983,6 +2116,10 @@ const (
NCSI_CHANNEL_ATTR_VLAN_ID = 0xa
)
+type ScmTimestamping struct {
+ Ts [3]Timespec
+}
+
const (
SOF_TIMESTAMPING_TX_HARDWARE = 0x1
SOF_TIMESTAMPING_TX_SOFTWARE = 0x2
@@ -2002,4 +2139,367 @@ const (
SOF_TIMESTAMPING_LAST = 0x4000
SOF_TIMESTAMPING_MASK = 0x7fff
+
+ SCM_TSTAMP_SND = 0x0
+ SCM_TSTAMP_SCHED = 0x1
+ SCM_TSTAMP_ACK = 0x2
+)
+
+type SockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type FanotifyEventMetadata struct {
+ Event_len uint32
+ Vers uint8
+ Reserved uint8
+ Metadata_len uint16
+ Mask uint64
+ Fd int32
+ Pid int32
+}
+
+type FanotifyResponse struct {
+ Fd int32
+ Response uint32
+}
+
+const (
+ CRYPTO_MSG_BASE = 0x10
+ CRYPTO_MSG_NEWALG = 0x10
+ CRYPTO_MSG_DELALG = 0x11
+ CRYPTO_MSG_UPDATEALG = 0x12
+ CRYPTO_MSG_GETALG = 0x13
+ CRYPTO_MSG_DELRNG = 0x14
+ CRYPTO_MSG_GETSTAT = 0x15
+)
+
+const (
+ CRYPTOCFGA_UNSPEC = 0x0
+ CRYPTOCFGA_PRIORITY_VAL = 0x1
+ CRYPTOCFGA_REPORT_LARVAL = 0x2
+ CRYPTOCFGA_REPORT_HASH = 0x3
+ CRYPTOCFGA_REPORT_BLKCIPHER = 0x4
+ CRYPTOCFGA_REPORT_AEAD = 0x5
+ CRYPTOCFGA_REPORT_COMPRESS = 0x6
+ CRYPTOCFGA_REPORT_RNG = 0x7
+ CRYPTOCFGA_REPORT_CIPHER = 0x8
+ CRYPTOCFGA_REPORT_AKCIPHER = 0x9
+ CRYPTOCFGA_REPORT_KPP = 0xa
+ CRYPTOCFGA_REPORT_ACOMP = 0xb
+ CRYPTOCFGA_STAT_LARVAL = 0xc
+ CRYPTOCFGA_STAT_HASH = 0xd
+ CRYPTOCFGA_STAT_BLKCIPHER = 0xe
+ CRYPTOCFGA_STAT_AEAD = 0xf
+ CRYPTOCFGA_STAT_COMPRESS = 0x10
+ CRYPTOCFGA_STAT_RNG = 0x11
+ CRYPTOCFGA_STAT_CIPHER = 0x12
+ CRYPTOCFGA_STAT_AKCIPHER = 0x13
+ CRYPTOCFGA_STAT_KPP = 0x14
+ CRYPTOCFGA_STAT_ACOMP = 0x15
+)
+
+type CryptoUserAlg struct {
+ Name [64]uint8
+ Driver_name [64]uint8
+ Module_name [64]uint8
+ Type uint32
+ Mask uint32
+ Refcnt uint32
+ Flags uint32
+}
+
+type CryptoStatAEAD struct {
+ Type [64]uint8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatAKCipher struct {
+ Type [64]uint8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Verify_cnt uint64
+ Sign_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCipher struct {
+ Type [64]uint8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCompress struct {
+ Type [64]uint8
+ Compress_cnt uint64
+ Compress_tlen uint64
+ Decompress_cnt uint64
+ Decompress_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatHash struct {
+ Type [64]uint8
+ Hash_cnt uint64
+ Hash_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatKPP struct {
+ Type [64]uint8
+ Setsecret_cnt uint64
+ Generate_public_key_cnt uint64
+ Compute_shared_secret_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatRNG struct {
+ Type [64]uint8
+ Generate_cnt uint64
+ Generate_tlen uint64
+ Seed_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatLarval struct {
+ Type [64]uint8
+}
+
+type CryptoReportLarval struct {
+ Type [64]uint8
+}
+
+type CryptoReportHash struct {
+ Type [64]uint8
+ Blocksize uint32
+ Digestsize uint32
+}
+
+type CryptoReportCipher struct {
+ Type [64]uint8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+}
+
+type CryptoReportBlkCipher struct {
+ Type [64]uint8
+ Geniv [64]uint8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+ Ivsize uint32
+}
+
+type CryptoReportAEAD struct {
+ Type [64]uint8
+ Geniv [64]uint8
+ Blocksize uint32
+ Maxauthsize uint32
+ Ivsize uint32
+}
+
+type CryptoReportComp struct {
+ Type [64]uint8
+}
+
+type CryptoReportRNG struct {
+ Type [64]uint8
+ Seedsize uint32
+}
+
+type CryptoReportAKCipher struct {
+ Type [64]uint8
+}
+
+type CryptoReportKPP struct {
+ Type [64]uint8
+}
+
+type CryptoReportAcomp struct {
+ Type [64]uint8
+}
+
+const (
+ BPF_REG_0 = 0x0
+ BPF_REG_1 = 0x1
+ BPF_REG_2 = 0x2
+ BPF_REG_3 = 0x3
+ BPF_REG_4 = 0x4
+ BPF_REG_5 = 0x5
+ BPF_REG_6 = 0x6
+ BPF_REG_7 = 0x7
+ BPF_REG_8 = 0x8
+ BPF_REG_9 = 0x9
+ BPF_REG_10 = 0xa
+ BPF_MAP_CREATE = 0x0
+ BPF_MAP_LOOKUP_ELEM = 0x1
+ BPF_MAP_UPDATE_ELEM = 0x2
+ BPF_MAP_DELETE_ELEM = 0x3
+ BPF_MAP_GET_NEXT_KEY = 0x4
+ BPF_PROG_LOAD = 0x5
+ BPF_OBJ_PIN = 0x6
+ BPF_OBJ_GET = 0x7
+ BPF_PROG_ATTACH = 0x8
+ BPF_PROG_DETACH = 0x9
+ BPF_PROG_TEST_RUN = 0xa
+ BPF_PROG_GET_NEXT_ID = 0xb
+ BPF_MAP_GET_NEXT_ID = 0xc
+ BPF_PROG_GET_FD_BY_ID = 0xd
+ BPF_MAP_GET_FD_BY_ID = 0xe
+ BPF_OBJ_GET_INFO_BY_FD = 0xf
+ BPF_PROG_QUERY = 0x10
+ BPF_RAW_TRACEPOINT_OPEN = 0x11
+ BPF_BTF_LOAD = 0x12
+ BPF_BTF_GET_FD_BY_ID = 0x13
+ BPF_TASK_FD_QUERY = 0x14
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15
+ BPF_MAP_TYPE_UNSPEC = 0x0
+ BPF_MAP_TYPE_HASH = 0x1
+ BPF_MAP_TYPE_ARRAY = 0x2
+ BPF_MAP_TYPE_PROG_ARRAY = 0x3
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4
+ BPF_MAP_TYPE_PERCPU_HASH = 0x5
+ BPF_MAP_TYPE_PERCPU_ARRAY = 0x6
+ BPF_MAP_TYPE_STACK_TRACE = 0x7
+ BPF_MAP_TYPE_CGROUP_ARRAY = 0x8
+ BPF_MAP_TYPE_LRU_HASH = 0x9
+ BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa
+ BPF_MAP_TYPE_LPM_TRIE = 0xb
+ BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc
+ BPF_MAP_TYPE_HASH_OF_MAPS = 0xd
+ BPF_MAP_TYPE_DEVMAP = 0xe
+ BPF_MAP_TYPE_SOCKMAP = 0xf
+ BPF_MAP_TYPE_CPUMAP = 0x10
+ BPF_MAP_TYPE_XSKMAP = 0x11
+ BPF_MAP_TYPE_SOCKHASH = 0x12
+ BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
+ BPF_MAP_TYPE_QUEUE = 0x16
+ BPF_MAP_TYPE_STACK = 0x17
+ BPF_PROG_TYPE_UNSPEC = 0x0
+ BPF_PROG_TYPE_SOCKET_FILTER = 0x1
+ BPF_PROG_TYPE_KPROBE = 0x2
+ BPF_PROG_TYPE_SCHED_CLS = 0x3
+ BPF_PROG_TYPE_SCHED_ACT = 0x4
+ BPF_PROG_TYPE_TRACEPOINT = 0x5
+ BPF_PROG_TYPE_XDP = 0x6
+ BPF_PROG_TYPE_PERF_EVENT = 0x7
+ BPF_PROG_TYPE_CGROUP_SKB = 0x8
+ BPF_PROG_TYPE_CGROUP_SOCK = 0x9
+ BPF_PROG_TYPE_LWT_IN = 0xa
+ BPF_PROG_TYPE_LWT_OUT = 0xb
+ BPF_PROG_TYPE_LWT_XMIT = 0xc
+ BPF_PROG_TYPE_SOCK_OPS = 0xd
+ BPF_PROG_TYPE_SK_SKB = 0xe
+ BPF_PROG_TYPE_CGROUP_DEVICE = 0xf
+ BPF_PROG_TYPE_SK_MSG = 0x10
+ BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11
+ BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12
+ BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13
+ BPF_PROG_TYPE_LIRC_MODE2 = 0x14
+ BPF_PROG_TYPE_SK_REUSEPORT = 0x15
+ BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16
+ BPF_CGROUP_INET_INGRESS = 0x0
+ BPF_CGROUP_INET_EGRESS = 0x1
+ BPF_CGROUP_INET_SOCK_CREATE = 0x2
+ BPF_CGROUP_SOCK_OPS = 0x3
+ BPF_SK_SKB_STREAM_PARSER = 0x4
+ BPF_SK_SKB_STREAM_VERDICT = 0x5
+ BPF_CGROUP_DEVICE = 0x6
+ BPF_SK_MSG_VERDICT = 0x7
+ BPF_CGROUP_INET4_BIND = 0x8
+ BPF_CGROUP_INET6_BIND = 0x9
+ BPF_CGROUP_INET4_CONNECT = 0xa
+ BPF_CGROUP_INET6_CONNECT = 0xb
+ BPF_CGROUP_INET4_POST_BIND = 0xc
+ BPF_CGROUP_INET6_POST_BIND = 0xd
+ BPF_CGROUP_UDP4_SENDMSG = 0xe
+ BPF_CGROUP_UDP6_SENDMSG = 0xf
+ BPF_LIRC_MODE2 = 0x10
+ BPF_FLOW_DISSECTOR = 0x11
+ BPF_STACK_BUILD_ID_EMPTY = 0x0
+ BPF_STACK_BUILD_ID_VALID = 0x1
+ BPF_STACK_BUILD_ID_IP = 0x2
+ BPF_ADJ_ROOM_NET = 0x0
+ BPF_HDR_START_MAC = 0x0
+ BPF_HDR_START_NET = 0x1
+ BPF_LWT_ENCAP_SEG6 = 0x0
+ BPF_LWT_ENCAP_SEG6_INLINE = 0x1
+ BPF_OK = 0x0
+ BPF_DROP = 0x2
+ BPF_REDIRECT = 0x7
+ BPF_SOCK_OPS_VOID = 0x0
+ BPF_SOCK_OPS_TIMEOUT_INIT = 0x1
+ BPF_SOCK_OPS_RWND_INIT = 0x2
+ BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3
+ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4
+ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5
+ BPF_SOCK_OPS_NEEDS_ECN = 0x6
+ BPF_SOCK_OPS_BASE_RTT = 0x7
+ BPF_SOCK_OPS_RTO_CB = 0x8
+ BPF_SOCK_OPS_RETRANS_CB = 0x9
+ BPF_SOCK_OPS_STATE_CB = 0xa
+ BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb
+ BPF_TCP_ESTABLISHED = 0x1
+ BPF_TCP_SYN_SENT = 0x2
+ BPF_TCP_SYN_RECV = 0x3
+ BPF_TCP_FIN_WAIT1 = 0x4
+ BPF_TCP_FIN_WAIT2 = 0x5
+ BPF_TCP_TIME_WAIT = 0x6
+ BPF_TCP_CLOSE = 0x7
+ BPF_TCP_CLOSE_WAIT = 0x8
+ BPF_TCP_LAST_ACK = 0x9
+ BPF_TCP_LISTEN = 0xa
+ BPF_TCP_CLOSING = 0xb
+ BPF_TCP_NEW_SYN_RECV = 0xc
+ BPF_TCP_MAX_STATES = 0xd
+ BPF_FIB_LKUP_RET_SUCCESS = 0x0
+ BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
+ BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
+ BPF_FIB_LKUP_RET_PROHIBIT = 0x3
+ BPF_FIB_LKUP_RET_NOT_FWDED = 0x4
+ BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5
+ BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
+ BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
+ BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
+ BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
+ BPF_FD_TYPE_TRACEPOINT = 0x1
+ BPF_FD_TYPE_KPROBE = 0x2
+ BPF_FD_TYPE_KRETPROBE = 0x3
+ BPF_FD_TYPE_UPROBE = 0x4
+ BPF_FD_TYPE_URETPROBE = 0x5
+)
+
+type CapUserHeader struct {
+ Version uint32
+ Pid int32
+}
+
+type CapUserData struct {
+ Effective uint32
+ Permitted uint32
+ Inheritable uint32
+}
+
+const (
+ LINUX_CAPABILITY_VERSION_1 = 0x19980330
+ LINUX_CAPABILITY_VERSION_2 = 0x20071026
+ LINUX_CAPABILITY_VERSION_3 = 0x20080522
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
index abdc0863d..90bf5dcc7 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
@@ -406,6 +406,11 @@ type TCPInfo struct {
Total_retrans uint32
}
+type CanFilter struct {
+ Id uint32
+ Mask uint32
+}
+
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
@@ -435,141 +440,185 @@ const (
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
SizeofTCPInfo = 0x68
+ SizeofCanFilter = 0x8
)
const (
- IFA_UNSPEC = 0x0
- IFA_ADDRESS = 0x1
- IFA_LOCAL = 0x2
- IFA_LABEL = 0x3
- IFA_BROADCAST = 0x4
- IFA_ANYCAST = 0x5
- IFA_CACHEINFO = 0x6
- IFA_MULTICAST = 0x7
- IFLA_UNSPEC = 0x0
- IFLA_ADDRESS = 0x1
- IFLA_BROADCAST = 0x2
- IFLA_IFNAME = 0x3
- IFLA_INFO_KIND = 0x1
- IFLA_MTU = 0x4
- IFLA_LINK = 0x5
- IFLA_QDISC = 0x6
- IFLA_STATS = 0x7
- IFLA_COST = 0x8
- IFLA_PRIORITY = 0x9
- IFLA_MASTER = 0xa
- IFLA_WIRELESS = 0xb
- IFLA_PROTINFO = 0xc
- IFLA_TXQLEN = 0xd
- IFLA_MAP = 0xe
- IFLA_WEIGHT = 0xf
- IFLA_OPERSTATE = 0x10
- IFLA_LINKMODE = 0x11
- IFLA_LINKINFO = 0x12
- IFLA_NET_NS_PID = 0x13
- IFLA_IFALIAS = 0x14
- IFLA_NUM_VF = 0x15
- IFLA_VFINFO_LIST = 0x16
- IFLA_STATS64 = 0x17
- IFLA_VF_PORTS = 0x18
- IFLA_PORT_SELF = 0x19
- IFLA_AF_SPEC = 0x1a
- IFLA_GROUP = 0x1b
- IFLA_NET_NS_FD = 0x1c
- IFLA_EXT_MASK = 0x1d
- IFLA_PROMISCUITY = 0x1e
- IFLA_NUM_TX_QUEUES = 0x1f
- IFLA_NUM_RX_QUEUES = 0x20
- IFLA_CARRIER = 0x21
- IFLA_PHYS_PORT_ID = 0x22
- IFLA_CARRIER_CHANGES = 0x23
- IFLA_PHYS_SWITCH_ID = 0x24
- IFLA_LINK_NETNSID = 0x25
- IFLA_PHYS_PORT_NAME = 0x26
- IFLA_PROTO_DOWN = 0x27
- IFLA_GSO_MAX_SEGS = 0x28
- IFLA_GSO_MAX_SIZE = 0x29
- IFLA_PAD = 0x2a
- IFLA_XDP = 0x2b
- IFLA_EVENT = 0x2c
- IFLA_NEW_NETNSID = 0x2d
- IFLA_IF_NETNSID = 0x2e
- IFLA_MAX = 0x33
- RT_SCOPE_UNIVERSE = 0x0
- RT_SCOPE_SITE = 0xc8
- RT_SCOPE_LINK = 0xfd
- RT_SCOPE_HOST = 0xfe
- RT_SCOPE_NOWHERE = 0xff
- RT_TABLE_UNSPEC = 0x0
- RT_TABLE_COMPAT = 0xfc
- RT_TABLE_DEFAULT = 0xfd
- RT_TABLE_MAIN = 0xfe
- RT_TABLE_LOCAL = 0xff
- RT_TABLE_MAX = 0xffffffff
- RTA_UNSPEC = 0x0
- RTA_DST = 0x1
- RTA_SRC = 0x2
- RTA_IIF = 0x3
- RTA_OIF = 0x4
- RTA_GATEWAY = 0x5
- RTA_PRIORITY = 0x6
- RTA_PREFSRC = 0x7
- RTA_METRICS = 0x8
- RTA_MULTIPATH = 0x9
- RTA_FLOW = 0xb
- RTA_CACHEINFO = 0xc
- RTA_TABLE = 0xf
- RTA_MARK = 0x10
- RTA_MFC_STATS = 0x11
- RTA_VIA = 0x12
- RTA_NEWDST = 0x13
- RTA_PREF = 0x14
- RTA_ENCAP_TYPE = 0x15
- RTA_ENCAP = 0x16
- RTA_EXPIRES = 0x17
- RTA_PAD = 0x18
- RTA_UID = 0x19
- RTA_TTL_PROPAGATE = 0x1a
- RTA_IP_PROTO = 0x1b
- RTA_SPORT = 0x1c
- RTA_DPORT = 0x1d
- RTN_UNSPEC = 0x0
- RTN_UNICAST = 0x1
- RTN_LOCAL = 0x2
- RTN_BROADCAST = 0x3
- RTN_ANYCAST = 0x4
- RTN_MULTICAST = 0x5
- RTN_BLACKHOLE = 0x6
- RTN_UNREACHABLE = 0x7
- RTN_PROHIBIT = 0x8
- RTN_THROW = 0x9
- RTN_NAT = 0xa
- RTN_XRESOLVE = 0xb
- RTNLGRP_NONE = 0x0
- RTNLGRP_LINK = 0x1
- RTNLGRP_NOTIFY = 0x2
- RTNLGRP_NEIGH = 0x3
- RTNLGRP_TC = 0x4
- RTNLGRP_IPV4_IFADDR = 0x5
- RTNLGRP_IPV4_MROUTE = 0x6
- RTNLGRP_IPV4_ROUTE = 0x7
- RTNLGRP_IPV4_RULE = 0x8
- RTNLGRP_IPV6_IFADDR = 0x9
- RTNLGRP_IPV6_MROUTE = 0xa
- RTNLGRP_IPV6_ROUTE = 0xb
- RTNLGRP_IPV6_IFINFO = 0xc
- RTNLGRP_IPV6_PREFIX = 0x12
- RTNLGRP_IPV6_RULE = 0x13
- RTNLGRP_ND_USEROPT = 0x14
- SizeofNlMsghdr = 0x10
- SizeofNlMsgerr = 0x14
- SizeofRtGenmsg = 0x1
- SizeofNlAttr = 0x4
- SizeofRtAttr = 0x4
- SizeofIfInfomsg = 0x10
- SizeofIfAddrmsg = 0x8
- SizeofRtMsg = 0xc
- SizeofRtNexthop = 0x8
+ NDA_UNSPEC = 0x0
+ NDA_DST = 0x1
+ NDA_LLADDR = 0x2
+ NDA_CACHEINFO = 0x3
+ NDA_PROBES = 0x4
+ NDA_VLAN = 0x5
+ NDA_PORT = 0x6
+ NDA_VNI = 0x7
+ NDA_IFINDEX = 0x8
+ NDA_MASTER = 0x9
+ NDA_LINK_NETNSID = 0xa
+ NDA_SRC_VNI = 0xb
+ NTF_USE = 0x1
+ NTF_SELF = 0x2
+ NTF_MASTER = 0x4
+ NTF_PROXY = 0x8
+ NTF_EXT_LEARNED = 0x10
+ NTF_OFFLOADED = 0x20
+ NTF_ROUTER = 0x80
+ NUD_INCOMPLETE = 0x1
+ NUD_REACHABLE = 0x2
+ NUD_STALE = 0x4
+ NUD_DELAY = 0x8
+ NUD_PROBE = 0x10
+ NUD_FAILED = 0x20
+ NUD_NOARP = 0x40
+ NUD_PERMANENT = 0x80
+ NUD_NONE = 0x0
+ IFA_UNSPEC = 0x0
+ IFA_ADDRESS = 0x1
+ IFA_LOCAL = 0x2
+ IFA_LABEL = 0x3
+ IFA_BROADCAST = 0x4
+ IFA_ANYCAST = 0x5
+ IFA_CACHEINFO = 0x6
+ IFA_MULTICAST = 0x7
+ IFA_FLAGS = 0x8
+ IFA_RT_PRIORITY = 0x9
+ IFA_TARGET_NETNSID = 0xa
+ IFLA_UNSPEC = 0x0
+ IFLA_ADDRESS = 0x1
+ IFLA_BROADCAST = 0x2
+ IFLA_IFNAME = 0x3
+ IFLA_MTU = 0x4
+ IFLA_LINK = 0x5
+ IFLA_QDISC = 0x6
+ IFLA_STATS = 0x7
+ IFLA_COST = 0x8
+ IFLA_PRIORITY = 0x9
+ IFLA_MASTER = 0xa
+ IFLA_WIRELESS = 0xb
+ IFLA_PROTINFO = 0xc
+ IFLA_TXQLEN = 0xd
+ IFLA_MAP = 0xe
+ IFLA_WEIGHT = 0xf
+ IFLA_OPERSTATE = 0x10
+ IFLA_LINKMODE = 0x11
+ IFLA_LINKINFO = 0x12
+ IFLA_NET_NS_PID = 0x13
+ IFLA_IFALIAS = 0x14
+ IFLA_NUM_VF = 0x15
+ IFLA_VFINFO_LIST = 0x16
+ IFLA_STATS64 = 0x17
+ IFLA_VF_PORTS = 0x18
+ IFLA_PORT_SELF = 0x19
+ IFLA_AF_SPEC = 0x1a
+ IFLA_GROUP = 0x1b
+ IFLA_NET_NS_FD = 0x1c
+ IFLA_EXT_MASK = 0x1d
+ IFLA_PROMISCUITY = 0x1e
+ IFLA_NUM_TX_QUEUES = 0x1f
+ IFLA_NUM_RX_QUEUES = 0x20
+ IFLA_CARRIER = 0x21
+ IFLA_PHYS_PORT_ID = 0x22
+ IFLA_CARRIER_CHANGES = 0x23
+ IFLA_PHYS_SWITCH_ID = 0x24
+ IFLA_LINK_NETNSID = 0x25
+ IFLA_PHYS_PORT_NAME = 0x26
+ IFLA_PROTO_DOWN = 0x27
+ IFLA_GSO_MAX_SEGS = 0x28
+ IFLA_GSO_MAX_SIZE = 0x29
+ IFLA_PAD = 0x2a
+ IFLA_XDP = 0x2b
+ IFLA_EVENT = 0x2c
+ IFLA_NEW_NETNSID = 0x2d
+ IFLA_IF_NETNSID = 0x2e
+ IFLA_TARGET_NETNSID = 0x2e
+ IFLA_CARRIER_UP_COUNT = 0x2f
+ IFLA_CARRIER_DOWN_COUNT = 0x30
+ IFLA_NEW_IFINDEX = 0x31
+ IFLA_MIN_MTU = 0x32
+ IFLA_MAX_MTU = 0x33
+ IFLA_MAX = 0x33
+ IFLA_INFO_KIND = 0x1
+ IFLA_INFO_DATA = 0x2
+ IFLA_INFO_XSTATS = 0x3
+ IFLA_INFO_SLAVE_KIND = 0x4
+ IFLA_INFO_SLAVE_DATA = 0x5
+ RT_SCOPE_UNIVERSE = 0x0
+ RT_SCOPE_SITE = 0xc8
+ RT_SCOPE_LINK = 0xfd
+ RT_SCOPE_HOST = 0xfe
+ RT_SCOPE_NOWHERE = 0xff
+ RT_TABLE_UNSPEC = 0x0
+ RT_TABLE_COMPAT = 0xfc
+ RT_TABLE_DEFAULT = 0xfd
+ RT_TABLE_MAIN = 0xfe
+ RT_TABLE_LOCAL = 0xff
+ RT_TABLE_MAX = 0xffffffff
+ RTA_UNSPEC = 0x0
+ RTA_DST = 0x1
+ RTA_SRC = 0x2
+ RTA_IIF = 0x3
+ RTA_OIF = 0x4
+ RTA_GATEWAY = 0x5
+ RTA_PRIORITY = 0x6
+ RTA_PREFSRC = 0x7
+ RTA_METRICS = 0x8
+ RTA_MULTIPATH = 0x9
+ RTA_FLOW = 0xb
+ RTA_CACHEINFO = 0xc
+ RTA_TABLE = 0xf
+ RTA_MARK = 0x10
+ RTA_MFC_STATS = 0x11
+ RTA_VIA = 0x12
+ RTA_NEWDST = 0x13
+ RTA_PREF = 0x14
+ RTA_ENCAP_TYPE = 0x15
+ RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
+ RTN_UNSPEC = 0x0
+ RTN_UNICAST = 0x1
+ RTN_LOCAL = 0x2
+ RTN_BROADCAST = 0x3
+ RTN_ANYCAST = 0x4
+ RTN_MULTICAST = 0x5
+ RTN_BLACKHOLE = 0x6
+ RTN_UNREACHABLE = 0x7
+ RTN_PROHIBIT = 0x8
+ RTN_THROW = 0x9
+ RTN_NAT = 0xa
+ RTN_XRESOLVE = 0xb
+ RTNLGRP_NONE = 0x0
+ RTNLGRP_LINK = 0x1
+ RTNLGRP_NOTIFY = 0x2
+ RTNLGRP_NEIGH = 0x3
+ RTNLGRP_TC = 0x4
+ RTNLGRP_IPV4_IFADDR = 0x5
+ RTNLGRP_IPV4_MROUTE = 0x6
+ RTNLGRP_IPV4_ROUTE = 0x7
+ RTNLGRP_IPV4_RULE = 0x8
+ RTNLGRP_IPV6_IFADDR = 0x9
+ RTNLGRP_IPV6_MROUTE = 0xa
+ RTNLGRP_IPV6_ROUTE = 0xb
+ RTNLGRP_IPV6_IFINFO = 0xc
+ RTNLGRP_IPV6_PREFIX = 0x12
+ RTNLGRP_IPV6_RULE = 0x13
+ RTNLGRP_ND_USEROPT = 0x14
+ SizeofNlMsghdr = 0x10
+ SizeofNlMsgerr = 0x14
+ SizeofRtGenmsg = 0x1
+ SizeofNlAttr = 0x4
+ SizeofRtAttr = 0x4
+ SizeofIfInfomsg = 0x10
+ SizeofIfAddrmsg = 0x8
+ SizeofRtMsg = 0xc
+ SizeofRtNexthop = 0x8
+ SizeofNdUseroptmsg = 0x10
+ SizeofNdMsg = 0xc
)
type NlMsghdr struct {
@@ -635,6 +684,27 @@ type RtNexthop struct {
Ifindex int32
}
+type NdUseroptmsg struct {
+ Family uint8
+ Pad1 uint8
+ Opts_len uint16
+ Ifindex int32
+ Icmp_type uint8
+ Icmp_code uint8
+ Pad2 uint16
+ Pad3 uint32
+}
+
+type NdMsg struct {
+ Family uint8
+ Pad1 uint8
+ Pad2 uint16
+ Ifindex int32
+ State uint16
+ Flags uint8
+ Type uint8
+}
+
const (
SizeofSockFilter = 0x8
SizeofSockFprog = 0x10
@@ -774,7 +844,32 @@ type Sigset_t struct {
Val [16]uint64
}
-const RNDGETENTCNT = 0x80045200
+const _C__NSIG = 0x41
+
+type SignalfdSiginfo struct {
+ Signo uint32
+ Errno int32
+ Code int32
+ Pid uint32
+ Uid uint32
+ Fd int32
+ Tid uint32
+ Band uint32
+ Overrun uint32
+ Trapno uint32
+ Status int32
+ Int int32
+ Ptr uint64
+ Utime uint64
+ Stime uint64
+ Addr uint64
+ Addr_lsb uint16
+ _ uint16
+ Syscall int32
+ Call_addr uint64
+ Arch uint32
+ _ [28]uint8
+}
const PERF_IOC_FLAG_GROUP = 0x1
@@ -945,7 +1040,8 @@ type PerfEventAttr struct {
Clockid int32
Sample_regs_intr uint64
Aux_watermark uint32
- _ uint32
+ Sample_max_stack uint16
+ _ uint16
}
type PerfEventMmapPage struct {
@@ -1048,6 +1144,7 @@ const (
PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7
PERF_COUNT_SW_EMULATION_FAULTS = 0x8
PERF_COUNT_SW_DUMMY = 0x9
+ PERF_COUNT_SW_BPF_OUTPUT = 0xa
PERF_SAMPLE_IP = 0x1
PERF_SAMPLE_TID = 0x2
@@ -1069,21 +1166,38 @@ const (
PERF_SAMPLE_BRANCH_ANY_CALL = 0x10
PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20
PERF_SAMPLE_BRANCH_IND_CALL = 0x40
+ PERF_SAMPLE_BRANCH_ABORT_TX = 0x80
+ PERF_SAMPLE_BRANCH_IN_TX = 0x100
+ PERF_SAMPLE_BRANCH_NO_TX = 0x200
+ PERF_SAMPLE_BRANCH_COND = 0x400
+ PERF_SAMPLE_BRANCH_CALL_STACK = 0x800
+ PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000
+ PERF_SAMPLE_BRANCH_CALL = 0x2000
+ PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000
+ PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000
+ PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000
PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1
PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2
PERF_FORMAT_ID = 0x4
PERF_FORMAT_GROUP = 0x8
- PERF_RECORD_MMAP = 0x1
- PERF_RECORD_LOST = 0x2
- PERF_RECORD_COMM = 0x3
- PERF_RECORD_EXIT = 0x4
- PERF_RECORD_THROTTLE = 0x5
- PERF_RECORD_UNTHROTTLE = 0x6
- PERF_RECORD_FORK = 0x7
- PERF_RECORD_READ = 0x8
- PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP = 0x1
+ PERF_RECORD_LOST = 0x2
+ PERF_RECORD_COMM = 0x3
+ PERF_RECORD_EXIT = 0x4
+ PERF_RECORD_THROTTLE = 0x5
+ PERF_RECORD_UNTHROTTLE = 0x6
+ PERF_RECORD_FORK = 0x7
+ PERF_RECORD_READ = 0x8
+ PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP2 = 0xa
+ PERF_RECORD_AUX = 0xb
+ PERF_RECORD_ITRACE_START = 0xc
+ PERF_RECORD_LOST_SAMPLES = 0xd
+ PERF_RECORD_SWITCH = 0xe
+ PERF_RECORD_SWITCH_CPU_WIDE = 0xf
+ PERF_RECORD_NAMESPACES = 0x10
PERF_CONTEXT_HV = -0x20
PERF_CONTEXT_KERNEL = -0x80
@@ -1096,6 +1210,7 @@ const (
PERF_FLAG_FD_NO_GROUP = 0x1
PERF_FLAG_FD_OUTPUT = 0x2
PERF_FLAG_PID_CGROUP = 0x4
+ PERF_FLAG_FD_CLOEXEC = 0x8
)
const (
@@ -1354,6 +1469,21 @@ type TpacketBlockDesc struct {
Hdr [40]byte
}
+type TpacketBDTS struct {
+ Sec uint32
+ Usec uint32
+}
+
+type TpacketHdrV1 struct {
+ Block_status uint32
+ Num_pkts uint32
+ Offset_to_first_pkt uint32
+ Blk_len uint32
+ Seq_num uint64
+ Ts_first_pkt TpacketBDTS
+ Ts_last_pkt TpacketBDTS
+}
+
type TpacketReq struct {
Block_size uint32
Block_nr uint32
@@ -1402,6 +1532,9 @@ const (
SizeofTpacketHdr = 0x20
SizeofTpacket2Hdr = 0x20
SizeofTpacket3Hdr = 0x30
+
+ SizeofTpacketStats = 0x8
+ SizeofTpacketStatsV3 = 0xc
)
const (
@@ -1980,6 +2113,10 @@ const (
NCSI_CHANNEL_ATTR_VLAN_ID = 0xa
)
+type ScmTimestamping struct {
+ Ts [3]Timespec
+}
+
const (
SOF_TIMESTAMPING_TX_HARDWARE = 0x1
SOF_TIMESTAMPING_TX_SOFTWARE = 0x2
@@ -1999,4 +2136,367 @@ const (
SOF_TIMESTAMPING_LAST = 0x4000
SOF_TIMESTAMPING_MASK = 0x7fff
+
+ SCM_TSTAMP_SND = 0x0
+ SCM_TSTAMP_SCHED = 0x1
+ SCM_TSTAMP_ACK = 0x2
+)
+
+type SockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type FanotifyEventMetadata struct {
+ Event_len uint32
+ Vers uint8
+ Reserved uint8
+ Metadata_len uint16
+ Mask uint64
+ Fd int32
+ Pid int32
+}
+
+type FanotifyResponse struct {
+ Fd int32
+ Response uint32
+}
+
+const (
+ CRYPTO_MSG_BASE = 0x10
+ CRYPTO_MSG_NEWALG = 0x10
+ CRYPTO_MSG_DELALG = 0x11
+ CRYPTO_MSG_UPDATEALG = 0x12
+ CRYPTO_MSG_GETALG = 0x13
+ CRYPTO_MSG_DELRNG = 0x14
+ CRYPTO_MSG_GETSTAT = 0x15
+)
+
+const (
+ CRYPTOCFGA_UNSPEC = 0x0
+ CRYPTOCFGA_PRIORITY_VAL = 0x1
+ CRYPTOCFGA_REPORT_LARVAL = 0x2
+ CRYPTOCFGA_REPORT_HASH = 0x3
+ CRYPTOCFGA_REPORT_BLKCIPHER = 0x4
+ CRYPTOCFGA_REPORT_AEAD = 0x5
+ CRYPTOCFGA_REPORT_COMPRESS = 0x6
+ CRYPTOCFGA_REPORT_RNG = 0x7
+ CRYPTOCFGA_REPORT_CIPHER = 0x8
+ CRYPTOCFGA_REPORT_AKCIPHER = 0x9
+ CRYPTOCFGA_REPORT_KPP = 0xa
+ CRYPTOCFGA_REPORT_ACOMP = 0xb
+ CRYPTOCFGA_STAT_LARVAL = 0xc
+ CRYPTOCFGA_STAT_HASH = 0xd
+ CRYPTOCFGA_STAT_BLKCIPHER = 0xe
+ CRYPTOCFGA_STAT_AEAD = 0xf
+ CRYPTOCFGA_STAT_COMPRESS = 0x10
+ CRYPTOCFGA_STAT_RNG = 0x11
+ CRYPTOCFGA_STAT_CIPHER = 0x12
+ CRYPTOCFGA_STAT_AKCIPHER = 0x13
+ CRYPTOCFGA_STAT_KPP = 0x14
+ CRYPTOCFGA_STAT_ACOMP = 0x15
+)
+
+type CryptoUserAlg struct {
+ Name [64]int8
+ Driver_name [64]int8
+ Module_name [64]int8
+ Type uint32
+ Mask uint32
+ Refcnt uint32
+ Flags uint32
+}
+
+type CryptoStatAEAD struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatAKCipher struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Verify_cnt uint64
+ Sign_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCipher struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCompress struct {
+ Type [64]int8
+ Compress_cnt uint64
+ Compress_tlen uint64
+ Decompress_cnt uint64
+ Decompress_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatHash struct {
+ Type [64]int8
+ Hash_cnt uint64
+ Hash_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatKPP struct {
+ Type [64]int8
+ Setsecret_cnt uint64
+ Generate_public_key_cnt uint64
+ Compute_shared_secret_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatRNG struct {
+ Type [64]int8
+ Generate_cnt uint64
+ Generate_tlen uint64
+ Seed_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatLarval struct {
+ Type [64]int8
+}
+
+type CryptoReportLarval struct {
+ Type [64]int8
+}
+
+type CryptoReportHash struct {
+ Type [64]int8
+ Blocksize uint32
+ Digestsize uint32
+}
+
+type CryptoReportCipher struct {
+ Type [64]int8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+}
+
+type CryptoReportBlkCipher struct {
+ Type [64]int8
+ Geniv [64]int8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+ Ivsize uint32
+}
+
+type CryptoReportAEAD struct {
+ Type [64]int8
+ Geniv [64]int8
+ Blocksize uint32
+ Maxauthsize uint32
+ Ivsize uint32
+}
+
+type CryptoReportComp struct {
+ Type [64]int8
+}
+
+type CryptoReportRNG struct {
+ Type [64]int8
+ Seedsize uint32
+}
+
+type CryptoReportAKCipher struct {
+ Type [64]int8
+}
+
+type CryptoReportKPP struct {
+ Type [64]int8
+}
+
+type CryptoReportAcomp struct {
+ Type [64]int8
+}
+
+const (
+ BPF_REG_0 = 0x0
+ BPF_REG_1 = 0x1
+ BPF_REG_2 = 0x2
+ BPF_REG_3 = 0x3
+ BPF_REG_4 = 0x4
+ BPF_REG_5 = 0x5
+ BPF_REG_6 = 0x6
+ BPF_REG_7 = 0x7
+ BPF_REG_8 = 0x8
+ BPF_REG_9 = 0x9
+ BPF_REG_10 = 0xa
+ BPF_MAP_CREATE = 0x0
+ BPF_MAP_LOOKUP_ELEM = 0x1
+ BPF_MAP_UPDATE_ELEM = 0x2
+ BPF_MAP_DELETE_ELEM = 0x3
+ BPF_MAP_GET_NEXT_KEY = 0x4
+ BPF_PROG_LOAD = 0x5
+ BPF_OBJ_PIN = 0x6
+ BPF_OBJ_GET = 0x7
+ BPF_PROG_ATTACH = 0x8
+ BPF_PROG_DETACH = 0x9
+ BPF_PROG_TEST_RUN = 0xa
+ BPF_PROG_GET_NEXT_ID = 0xb
+ BPF_MAP_GET_NEXT_ID = 0xc
+ BPF_PROG_GET_FD_BY_ID = 0xd
+ BPF_MAP_GET_FD_BY_ID = 0xe
+ BPF_OBJ_GET_INFO_BY_FD = 0xf
+ BPF_PROG_QUERY = 0x10
+ BPF_RAW_TRACEPOINT_OPEN = 0x11
+ BPF_BTF_LOAD = 0x12
+ BPF_BTF_GET_FD_BY_ID = 0x13
+ BPF_TASK_FD_QUERY = 0x14
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15
+ BPF_MAP_TYPE_UNSPEC = 0x0
+ BPF_MAP_TYPE_HASH = 0x1
+ BPF_MAP_TYPE_ARRAY = 0x2
+ BPF_MAP_TYPE_PROG_ARRAY = 0x3
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4
+ BPF_MAP_TYPE_PERCPU_HASH = 0x5
+ BPF_MAP_TYPE_PERCPU_ARRAY = 0x6
+ BPF_MAP_TYPE_STACK_TRACE = 0x7
+ BPF_MAP_TYPE_CGROUP_ARRAY = 0x8
+ BPF_MAP_TYPE_LRU_HASH = 0x9
+ BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa
+ BPF_MAP_TYPE_LPM_TRIE = 0xb
+ BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc
+ BPF_MAP_TYPE_HASH_OF_MAPS = 0xd
+ BPF_MAP_TYPE_DEVMAP = 0xe
+ BPF_MAP_TYPE_SOCKMAP = 0xf
+ BPF_MAP_TYPE_CPUMAP = 0x10
+ BPF_MAP_TYPE_XSKMAP = 0x11
+ BPF_MAP_TYPE_SOCKHASH = 0x12
+ BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
+ BPF_MAP_TYPE_QUEUE = 0x16
+ BPF_MAP_TYPE_STACK = 0x17
+ BPF_PROG_TYPE_UNSPEC = 0x0
+ BPF_PROG_TYPE_SOCKET_FILTER = 0x1
+ BPF_PROG_TYPE_KPROBE = 0x2
+ BPF_PROG_TYPE_SCHED_CLS = 0x3
+ BPF_PROG_TYPE_SCHED_ACT = 0x4
+ BPF_PROG_TYPE_TRACEPOINT = 0x5
+ BPF_PROG_TYPE_XDP = 0x6
+ BPF_PROG_TYPE_PERF_EVENT = 0x7
+ BPF_PROG_TYPE_CGROUP_SKB = 0x8
+ BPF_PROG_TYPE_CGROUP_SOCK = 0x9
+ BPF_PROG_TYPE_LWT_IN = 0xa
+ BPF_PROG_TYPE_LWT_OUT = 0xb
+ BPF_PROG_TYPE_LWT_XMIT = 0xc
+ BPF_PROG_TYPE_SOCK_OPS = 0xd
+ BPF_PROG_TYPE_SK_SKB = 0xe
+ BPF_PROG_TYPE_CGROUP_DEVICE = 0xf
+ BPF_PROG_TYPE_SK_MSG = 0x10
+ BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11
+ BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12
+ BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13
+ BPF_PROG_TYPE_LIRC_MODE2 = 0x14
+ BPF_PROG_TYPE_SK_REUSEPORT = 0x15
+ BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16
+ BPF_CGROUP_INET_INGRESS = 0x0
+ BPF_CGROUP_INET_EGRESS = 0x1
+ BPF_CGROUP_INET_SOCK_CREATE = 0x2
+ BPF_CGROUP_SOCK_OPS = 0x3
+ BPF_SK_SKB_STREAM_PARSER = 0x4
+ BPF_SK_SKB_STREAM_VERDICT = 0x5
+ BPF_CGROUP_DEVICE = 0x6
+ BPF_SK_MSG_VERDICT = 0x7
+ BPF_CGROUP_INET4_BIND = 0x8
+ BPF_CGROUP_INET6_BIND = 0x9
+ BPF_CGROUP_INET4_CONNECT = 0xa
+ BPF_CGROUP_INET6_CONNECT = 0xb
+ BPF_CGROUP_INET4_POST_BIND = 0xc
+ BPF_CGROUP_INET6_POST_BIND = 0xd
+ BPF_CGROUP_UDP4_SENDMSG = 0xe
+ BPF_CGROUP_UDP6_SENDMSG = 0xf
+ BPF_LIRC_MODE2 = 0x10
+ BPF_FLOW_DISSECTOR = 0x11
+ BPF_STACK_BUILD_ID_EMPTY = 0x0
+ BPF_STACK_BUILD_ID_VALID = 0x1
+ BPF_STACK_BUILD_ID_IP = 0x2
+ BPF_ADJ_ROOM_NET = 0x0
+ BPF_HDR_START_MAC = 0x0
+ BPF_HDR_START_NET = 0x1
+ BPF_LWT_ENCAP_SEG6 = 0x0
+ BPF_LWT_ENCAP_SEG6_INLINE = 0x1
+ BPF_OK = 0x0
+ BPF_DROP = 0x2
+ BPF_REDIRECT = 0x7
+ BPF_SOCK_OPS_VOID = 0x0
+ BPF_SOCK_OPS_TIMEOUT_INIT = 0x1
+ BPF_SOCK_OPS_RWND_INIT = 0x2
+ BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3
+ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4
+ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5
+ BPF_SOCK_OPS_NEEDS_ECN = 0x6
+ BPF_SOCK_OPS_BASE_RTT = 0x7
+ BPF_SOCK_OPS_RTO_CB = 0x8
+ BPF_SOCK_OPS_RETRANS_CB = 0x9
+ BPF_SOCK_OPS_STATE_CB = 0xa
+ BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb
+ BPF_TCP_ESTABLISHED = 0x1
+ BPF_TCP_SYN_SENT = 0x2
+ BPF_TCP_SYN_RECV = 0x3
+ BPF_TCP_FIN_WAIT1 = 0x4
+ BPF_TCP_FIN_WAIT2 = 0x5
+ BPF_TCP_TIME_WAIT = 0x6
+ BPF_TCP_CLOSE = 0x7
+ BPF_TCP_CLOSE_WAIT = 0x8
+ BPF_TCP_LAST_ACK = 0x9
+ BPF_TCP_LISTEN = 0xa
+ BPF_TCP_CLOSING = 0xb
+ BPF_TCP_NEW_SYN_RECV = 0xc
+ BPF_TCP_MAX_STATES = 0xd
+ BPF_FIB_LKUP_RET_SUCCESS = 0x0
+ BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
+ BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
+ BPF_FIB_LKUP_RET_PROHIBIT = 0x3
+ BPF_FIB_LKUP_RET_NOT_FWDED = 0x4
+ BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5
+ BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
+ BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
+ BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
+ BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
+ BPF_FD_TYPE_TRACEPOINT = 0x1
+ BPF_FD_TYPE_KPROBE = 0x2
+ BPF_FD_TYPE_KRETPROBE = 0x3
+ BPF_FD_TYPE_UPROBE = 0x4
+ BPF_FD_TYPE_URETPROBE = 0x5
+)
+
+type CapUserHeader struct {
+ Version uint32
+ Pid int32
+}
+
+type CapUserData struct {
+ Effective uint32
+ Permitted uint32
+ Inheritable uint32
+}
+
+const (
+ LINUX_CAPABILITY_VERSION_1 = 0x19980330
+ LINUX_CAPABILITY_VERSION_2 = 0x20071026
+ LINUX_CAPABILITY_VERSION_3 = 0x20080522
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
index b0c9798d7..4f054dcbb 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
@@ -410,6 +410,11 @@ type TCPInfo struct {
Total_retrans uint32
}
+type CanFilter struct {
+ Id uint32
+ Mask uint32
+}
+
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
@@ -439,141 +444,185 @@ const (
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
SizeofTCPInfo = 0x68
+ SizeofCanFilter = 0x8
)
const (
- IFA_UNSPEC = 0x0
- IFA_ADDRESS = 0x1
- IFA_LOCAL = 0x2
- IFA_LABEL = 0x3
- IFA_BROADCAST = 0x4
- IFA_ANYCAST = 0x5
- IFA_CACHEINFO = 0x6
- IFA_MULTICAST = 0x7
- IFLA_UNSPEC = 0x0
- IFLA_ADDRESS = 0x1
- IFLA_BROADCAST = 0x2
- IFLA_IFNAME = 0x3
- IFLA_INFO_KIND = 0x1
- IFLA_MTU = 0x4
- IFLA_LINK = 0x5
- IFLA_QDISC = 0x6
- IFLA_STATS = 0x7
- IFLA_COST = 0x8
- IFLA_PRIORITY = 0x9
- IFLA_MASTER = 0xa
- IFLA_WIRELESS = 0xb
- IFLA_PROTINFO = 0xc
- IFLA_TXQLEN = 0xd
- IFLA_MAP = 0xe
- IFLA_WEIGHT = 0xf
- IFLA_OPERSTATE = 0x10
- IFLA_LINKMODE = 0x11
- IFLA_LINKINFO = 0x12
- IFLA_NET_NS_PID = 0x13
- IFLA_IFALIAS = 0x14
- IFLA_NUM_VF = 0x15
- IFLA_VFINFO_LIST = 0x16
- IFLA_STATS64 = 0x17
- IFLA_VF_PORTS = 0x18
- IFLA_PORT_SELF = 0x19
- IFLA_AF_SPEC = 0x1a
- IFLA_GROUP = 0x1b
- IFLA_NET_NS_FD = 0x1c
- IFLA_EXT_MASK = 0x1d
- IFLA_PROMISCUITY = 0x1e
- IFLA_NUM_TX_QUEUES = 0x1f
- IFLA_NUM_RX_QUEUES = 0x20
- IFLA_CARRIER = 0x21
- IFLA_PHYS_PORT_ID = 0x22
- IFLA_CARRIER_CHANGES = 0x23
- IFLA_PHYS_SWITCH_ID = 0x24
- IFLA_LINK_NETNSID = 0x25
- IFLA_PHYS_PORT_NAME = 0x26
- IFLA_PROTO_DOWN = 0x27
- IFLA_GSO_MAX_SEGS = 0x28
- IFLA_GSO_MAX_SIZE = 0x29
- IFLA_PAD = 0x2a
- IFLA_XDP = 0x2b
- IFLA_EVENT = 0x2c
- IFLA_NEW_NETNSID = 0x2d
- IFLA_IF_NETNSID = 0x2e
- IFLA_MAX = 0x33
- RT_SCOPE_UNIVERSE = 0x0
- RT_SCOPE_SITE = 0xc8
- RT_SCOPE_LINK = 0xfd
- RT_SCOPE_HOST = 0xfe
- RT_SCOPE_NOWHERE = 0xff
- RT_TABLE_UNSPEC = 0x0
- RT_TABLE_COMPAT = 0xfc
- RT_TABLE_DEFAULT = 0xfd
- RT_TABLE_MAIN = 0xfe
- RT_TABLE_LOCAL = 0xff
- RT_TABLE_MAX = 0xffffffff
- RTA_UNSPEC = 0x0
- RTA_DST = 0x1
- RTA_SRC = 0x2
- RTA_IIF = 0x3
- RTA_OIF = 0x4
- RTA_GATEWAY = 0x5
- RTA_PRIORITY = 0x6
- RTA_PREFSRC = 0x7
- RTA_METRICS = 0x8
- RTA_MULTIPATH = 0x9
- RTA_FLOW = 0xb
- RTA_CACHEINFO = 0xc
- RTA_TABLE = 0xf
- RTA_MARK = 0x10
- RTA_MFC_STATS = 0x11
- RTA_VIA = 0x12
- RTA_NEWDST = 0x13
- RTA_PREF = 0x14
- RTA_ENCAP_TYPE = 0x15
- RTA_ENCAP = 0x16
- RTA_EXPIRES = 0x17
- RTA_PAD = 0x18
- RTA_UID = 0x19
- RTA_TTL_PROPAGATE = 0x1a
- RTA_IP_PROTO = 0x1b
- RTA_SPORT = 0x1c
- RTA_DPORT = 0x1d
- RTN_UNSPEC = 0x0
- RTN_UNICAST = 0x1
- RTN_LOCAL = 0x2
- RTN_BROADCAST = 0x3
- RTN_ANYCAST = 0x4
- RTN_MULTICAST = 0x5
- RTN_BLACKHOLE = 0x6
- RTN_UNREACHABLE = 0x7
- RTN_PROHIBIT = 0x8
- RTN_THROW = 0x9
- RTN_NAT = 0xa
- RTN_XRESOLVE = 0xb
- RTNLGRP_NONE = 0x0
- RTNLGRP_LINK = 0x1
- RTNLGRP_NOTIFY = 0x2
- RTNLGRP_NEIGH = 0x3
- RTNLGRP_TC = 0x4
- RTNLGRP_IPV4_IFADDR = 0x5
- RTNLGRP_IPV4_MROUTE = 0x6
- RTNLGRP_IPV4_ROUTE = 0x7
- RTNLGRP_IPV4_RULE = 0x8
- RTNLGRP_IPV6_IFADDR = 0x9
- RTNLGRP_IPV6_MROUTE = 0xa
- RTNLGRP_IPV6_ROUTE = 0xb
- RTNLGRP_IPV6_IFINFO = 0xc
- RTNLGRP_IPV6_PREFIX = 0x12
- RTNLGRP_IPV6_RULE = 0x13
- RTNLGRP_ND_USEROPT = 0x14
- SizeofNlMsghdr = 0x10
- SizeofNlMsgerr = 0x14
- SizeofRtGenmsg = 0x1
- SizeofNlAttr = 0x4
- SizeofRtAttr = 0x4
- SizeofIfInfomsg = 0x10
- SizeofIfAddrmsg = 0x8
- SizeofRtMsg = 0xc
- SizeofRtNexthop = 0x8
+ NDA_UNSPEC = 0x0
+ NDA_DST = 0x1
+ NDA_LLADDR = 0x2
+ NDA_CACHEINFO = 0x3
+ NDA_PROBES = 0x4
+ NDA_VLAN = 0x5
+ NDA_PORT = 0x6
+ NDA_VNI = 0x7
+ NDA_IFINDEX = 0x8
+ NDA_MASTER = 0x9
+ NDA_LINK_NETNSID = 0xa
+ NDA_SRC_VNI = 0xb
+ NTF_USE = 0x1
+ NTF_SELF = 0x2
+ NTF_MASTER = 0x4
+ NTF_PROXY = 0x8
+ NTF_EXT_LEARNED = 0x10
+ NTF_OFFLOADED = 0x20
+ NTF_ROUTER = 0x80
+ NUD_INCOMPLETE = 0x1
+ NUD_REACHABLE = 0x2
+ NUD_STALE = 0x4
+ NUD_DELAY = 0x8
+ NUD_PROBE = 0x10
+ NUD_FAILED = 0x20
+ NUD_NOARP = 0x40
+ NUD_PERMANENT = 0x80
+ NUD_NONE = 0x0
+ IFA_UNSPEC = 0x0
+ IFA_ADDRESS = 0x1
+ IFA_LOCAL = 0x2
+ IFA_LABEL = 0x3
+ IFA_BROADCAST = 0x4
+ IFA_ANYCAST = 0x5
+ IFA_CACHEINFO = 0x6
+ IFA_MULTICAST = 0x7
+ IFA_FLAGS = 0x8
+ IFA_RT_PRIORITY = 0x9
+ IFA_TARGET_NETNSID = 0xa
+ IFLA_UNSPEC = 0x0
+ IFLA_ADDRESS = 0x1
+ IFLA_BROADCAST = 0x2
+ IFLA_IFNAME = 0x3
+ IFLA_MTU = 0x4
+ IFLA_LINK = 0x5
+ IFLA_QDISC = 0x6
+ IFLA_STATS = 0x7
+ IFLA_COST = 0x8
+ IFLA_PRIORITY = 0x9
+ IFLA_MASTER = 0xa
+ IFLA_WIRELESS = 0xb
+ IFLA_PROTINFO = 0xc
+ IFLA_TXQLEN = 0xd
+ IFLA_MAP = 0xe
+ IFLA_WEIGHT = 0xf
+ IFLA_OPERSTATE = 0x10
+ IFLA_LINKMODE = 0x11
+ IFLA_LINKINFO = 0x12
+ IFLA_NET_NS_PID = 0x13
+ IFLA_IFALIAS = 0x14
+ IFLA_NUM_VF = 0x15
+ IFLA_VFINFO_LIST = 0x16
+ IFLA_STATS64 = 0x17
+ IFLA_VF_PORTS = 0x18
+ IFLA_PORT_SELF = 0x19
+ IFLA_AF_SPEC = 0x1a
+ IFLA_GROUP = 0x1b
+ IFLA_NET_NS_FD = 0x1c
+ IFLA_EXT_MASK = 0x1d
+ IFLA_PROMISCUITY = 0x1e
+ IFLA_NUM_TX_QUEUES = 0x1f
+ IFLA_NUM_RX_QUEUES = 0x20
+ IFLA_CARRIER = 0x21
+ IFLA_PHYS_PORT_ID = 0x22
+ IFLA_CARRIER_CHANGES = 0x23
+ IFLA_PHYS_SWITCH_ID = 0x24
+ IFLA_LINK_NETNSID = 0x25
+ IFLA_PHYS_PORT_NAME = 0x26
+ IFLA_PROTO_DOWN = 0x27
+ IFLA_GSO_MAX_SEGS = 0x28
+ IFLA_GSO_MAX_SIZE = 0x29
+ IFLA_PAD = 0x2a
+ IFLA_XDP = 0x2b
+ IFLA_EVENT = 0x2c
+ IFLA_NEW_NETNSID = 0x2d
+ IFLA_IF_NETNSID = 0x2e
+ IFLA_TARGET_NETNSID = 0x2e
+ IFLA_CARRIER_UP_COUNT = 0x2f
+ IFLA_CARRIER_DOWN_COUNT = 0x30
+ IFLA_NEW_IFINDEX = 0x31
+ IFLA_MIN_MTU = 0x32
+ IFLA_MAX_MTU = 0x33
+ IFLA_MAX = 0x33
+ IFLA_INFO_KIND = 0x1
+ IFLA_INFO_DATA = 0x2
+ IFLA_INFO_XSTATS = 0x3
+ IFLA_INFO_SLAVE_KIND = 0x4
+ IFLA_INFO_SLAVE_DATA = 0x5
+ RT_SCOPE_UNIVERSE = 0x0
+ RT_SCOPE_SITE = 0xc8
+ RT_SCOPE_LINK = 0xfd
+ RT_SCOPE_HOST = 0xfe
+ RT_SCOPE_NOWHERE = 0xff
+ RT_TABLE_UNSPEC = 0x0
+ RT_TABLE_COMPAT = 0xfc
+ RT_TABLE_DEFAULT = 0xfd
+ RT_TABLE_MAIN = 0xfe
+ RT_TABLE_LOCAL = 0xff
+ RT_TABLE_MAX = 0xffffffff
+ RTA_UNSPEC = 0x0
+ RTA_DST = 0x1
+ RTA_SRC = 0x2
+ RTA_IIF = 0x3
+ RTA_OIF = 0x4
+ RTA_GATEWAY = 0x5
+ RTA_PRIORITY = 0x6
+ RTA_PREFSRC = 0x7
+ RTA_METRICS = 0x8
+ RTA_MULTIPATH = 0x9
+ RTA_FLOW = 0xb
+ RTA_CACHEINFO = 0xc
+ RTA_TABLE = 0xf
+ RTA_MARK = 0x10
+ RTA_MFC_STATS = 0x11
+ RTA_VIA = 0x12
+ RTA_NEWDST = 0x13
+ RTA_PREF = 0x14
+ RTA_ENCAP_TYPE = 0x15
+ RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
+ RTN_UNSPEC = 0x0
+ RTN_UNICAST = 0x1
+ RTN_LOCAL = 0x2
+ RTN_BROADCAST = 0x3
+ RTN_ANYCAST = 0x4
+ RTN_MULTICAST = 0x5
+ RTN_BLACKHOLE = 0x6
+ RTN_UNREACHABLE = 0x7
+ RTN_PROHIBIT = 0x8
+ RTN_THROW = 0x9
+ RTN_NAT = 0xa
+ RTN_XRESOLVE = 0xb
+ RTNLGRP_NONE = 0x0
+ RTNLGRP_LINK = 0x1
+ RTNLGRP_NOTIFY = 0x2
+ RTNLGRP_NEIGH = 0x3
+ RTNLGRP_TC = 0x4
+ RTNLGRP_IPV4_IFADDR = 0x5
+ RTNLGRP_IPV4_MROUTE = 0x6
+ RTNLGRP_IPV4_ROUTE = 0x7
+ RTNLGRP_IPV4_RULE = 0x8
+ RTNLGRP_IPV6_IFADDR = 0x9
+ RTNLGRP_IPV6_MROUTE = 0xa
+ RTNLGRP_IPV6_ROUTE = 0xb
+ RTNLGRP_IPV6_IFINFO = 0xc
+ RTNLGRP_IPV6_PREFIX = 0x12
+ RTNLGRP_IPV6_RULE = 0x13
+ RTNLGRP_ND_USEROPT = 0x14
+ SizeofNlMsghdr = 0x10
+ SizeofNlMsgerr = 0x14
+ SizeofRtGenmsg = 0x1
+ SizeofNlAttr = 0x4
+ SizeofRtAttr = 0x4
+ SizeofIfInfomsg = 0x10
+ SizeofIfAddrmsg = 0x8
+ SizeofRtMsg = 0xc
+ SizeofRtNexthop = 0x8
+ SizeofNdUseroptmsg = 0x10
+ SizeofNdMsg = 0xc
)
type NlMsghdr struct {
@@ -639,6 +688,27 @@ type RtNexthop struct {
Ifindex int32
}
+type NdUseroptmsg struct {
+ Family uint8
+ Pad1 uint8
+ Opts_len uint16
+ Ifindex int32
+ Icmp_type uint8
+ Icmp_code uint8
+ Pad2 uint16
+ Pad3 uint32
+}
+
+type NdMsg struct {
+ Family uint8
+ Pad1 uint8
+ Pad2 uint16
+ Ifindex int32
+ State uint16
+ Flags uint8
+ Type uint8
+}
+
const (
SizeofSockFilter = 0x8
SizeofSockFprog = 0x10
@@ -756,7 +826,32 @@ type Sigset_t struct {
Val [16]uint64
}
-const RNDGETENTCNT = 0x40045200
+const _C__NSIG = 0x41
+
+type SignalfdSiginfo struct {
+ Signo uint32
+ Errno int32
+ Code int32
+ Pid uint32
+ Uid uint32
+ Fd int32
+ Tid uint32
+ Band uint32
+ Overrun uint32
+ Trapno uint32
+ Status int32
+ Int int32
+ Ptr uint64
+ Utime uint64
+ Stime uint64
+ Addr uint64
+ Addr_lsb uint16
+ _ uint16
+ Syscall int32
+ Call_addr uint64
+ Arch uint32
+ _ [28]uint8
+}
const PERF_IOC_FLAG_GROUP = 0x1
@@ -927,7 +1022,8 @@ type PerfEventAttr struct {
Clockid int32
Sample_regs_intr uint64
Aux_watermark uint32
- _ uint32
+ Sample_max_stack uint16
+ _ uint16
}
type PerfEventMmapPage struct {
@@ -1030,6 +1126,7 @@ const (
PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7
PERF_COUNT_SW_EMULATION_FAULTS = 0x8
PERF_COUNT_SW_DUMMY = 0x9
+ PERF_COUNT_SW_BPF_OUTPUT = 0xa
PERF_SAMPLE_IP = 0x1
PERF_SAMPLE_TID = 0x2
@@ -1051,21 +1148,38 @@ const (
PERF_SAMPLE_BRANCH_ANY_CALL = 0x10
PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20
PERF_SAMPLE_BRANCH_IND_CALL = 0x40
+ PERF_SAMPLE_BRANCH_ABORT_TX = 0x80
+ PERF_SAMPLE_BRANCH_IN_TX = 0x100
+ PERF_SAMPLE_BRANCH_NO_TX = 0x200
+ PERF_SAMPLE_BRANCH_COND = 0x400
+ PERF_SAMPLE_BRANCH_CALL_STACK = 0x800
+ PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000
+ PERF_SAMPLE_BRANCH_CALL = 0x2000
+ PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000
+ PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000
+ PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000
PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1
PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2
PERF_FORMAT_ID = 0x4
PERF_FORMAT_GROUP = 0x8
- PERF_RECORD_MMAP = 0x1
- PERF_RECORD_LOST = 0x2
- PERF_RECORD_COMM = 0x3
- PERF_RECORD_EXIT = 0x4
- PERF_RECORD_THROTTLE = 0x5
- PERF_RECORD_UNTHROTTLE = 0x6
- PERF_RECORD_FORK = 0x7
- PERF_RECORD_READ = 0x8
- PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP = 0x1
+ PERF_RECORD_LOST = 0x2
+ PERF_RECORD_COMM = 0x3
+ PERF_RECORD_EXIT = 0x4
+ PERF_RECORD_THROTTLE = 0x5
+ PERF_RECORD_UNTHROTTLE = 0x6
+ PERF_RECORD_FORK = 0x7
+ PERF_RECORD_READ = 0x8
+ PERF_RECORD_SAMPLE = 0x9
+ PERF_RECORD_MMAP2 = 0xa
+ PERF_RECORD_AUX = 0xb
+ PERF_RECORD_ITRACE_START = 0xc
+ PERF_RECORD_LOST_SAMPLES = 0xd
+ PERF_RECORD_SWITCH = 0xe
+ PERF_RECORD_SWITCH_CPU_WIDE = 0xf
+ PERF_RECORD_NAMESPACES = 0x10
PERF_CONTEXT_HV = -0x20
PERF_CONTEXT_KERNEL = -0x80
@@ -1078,6 +1192,7 @@ const (
PERF_FLAG_FD_NO_GROUP = 0x1
PERF_FLAG_FD_OUTPUT = 0x2
PERF_FLAG_PID_CGROUP = 0x4
+ PERF_FLAG_FD_CLOEXEC = 0x8
)
const (
@@ -1335,6 +1450,21 @@ type TpacketBlockDesc struct {
Hdr [40]byte
}
+type TpacketBDTS struct {
+ Sec uint32
+ Usec uint32
+}
+
+type TpacketHdrV1 struct {
+ Block_status uint32
+ Num_pkts uint32
+ Offset_to_first_pkt uint32
+ Blk_len uint32
+ Seq_num uint64
+ Ts_first_pkt TpacketBDTS
+ Ts_last_pkt TpacketBDTS
+}
+
type TpacketReq struct {
Block_size uint32
Block_nr uint32
@@ -1383,6 +1513,9 @@ const (
SizeofTpacketHdr = 0x20
SizeofTpacket2Hdr = 0x20
SizeofTpacket3Hdr = 0x30
+
+ SizeofTpacketStats = 0x8
+ SizeofTpacketStatsV3 = 0xc
)
const (
@@ -1961,6 +2094,10 @@ const (
NCSI_CHANNEL_ATTR_VLAN_ID = 0xa
)
+type ScmTimestamping struct {
+ Ts [3]Timespec
+}
+
const (
SOF_TIMESTAMPING_TX_HARDWARE = 0x1
SOF_TIMESTAMPING_TX_SOFTWARE = 0x2
@@ -1980,4 +2117,367 @@ const (
SOF_TIMESTAMPING_LAST = 0x4000
SOF_TIMESTAMPING_MASK = 0x7fff
+
+ SCM_TSTAMP_SND = 0x0
+ SCM_TSTAMP_SCHED = 0x1
+ SCM_TSTAMP_ACK = 0x2
+)
+
+type SockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+type FanotifyEventMetadata struct {
+ Event_len uint32
+ Vers uint8
+ Reserved uint8
+ Metadata_len uint16
+ Mask uint64
+ Fd int32
+ Pid int32
+}
+
+type FanotifyResponse struct {
+ Fd int32
+ Response uint32
+}
+
+const (
+ CRYPTO_MSG_BASE = 0x10
+ CRYPTO_MSG_NEWALG = 0x10
+ CRYPTO_MSG_DELALG = 0x11
+ CRYPTO_MSG_UPDATEALG = 0x12
+ CRYPTO_MSG_GETALG = 0x13
+ CRYPTO_MSG_DELRNG = 0x14
+ CRYPTO_MSG_GETSTAT = 0x15
+)
+
+const (
+ CRYPTOCFGA_UNSPEC = 0x0
+ CRYPTOCFGA_PRIORITY_VAL = 0x1
+ CRYPTOCFGA_REPORT_LARVAL = 0x2
+ CRYPTOCFGA_REPORT_HASH = 0x3
+ CRYPTOCFGA_REPORT_BLKCIPHER = 0x4
+ CRYPTOCFGA_REPORT_AEAD = 0x5
+ CRYPTOCFGA_REPORT_COMPRESS = 0x6
+ CRYPTOCFGA_REPORT_RNG = 0x7
+ CRYPTOCFGA_REPORT_CIPHER = 0x8
+ CRYPTOCFGA_REPORT_AKCIPHER = 0x9
+ CRYPTOCFGA_REPORT_KPP = 0xa
+ CRYPTOCFGA_REPORT_ACOMP = 0xb
+ CRYPTOCFGA_STAT_LARVAL = 0xc
+ CRYPTOCFGA_STAT_HASH = 0xd
+ CRYPTOCFGA_STAT_BLKCIPHER = 0xe
+ CRYPTOCFGA_STAT_AEAD = 0xf
+ CRYPTOCFGA_STAT_COMPRESS = 0x10
+ CRYPTOCFGA_STAT_RNG = 0x11
+ CRYPTOCFGA_STAT_CIPHER = 0x12
+ CRYPTOCFGA_STAT_AKCIPHER = 0x13
+ CRYPTOCFGA_STAT_KPP = 0x14
+ CRYPTOCFGA_STAT_ACOMP = 0x15
+)
+
+type CryptoUserAlg struct {
+ Name [64]int8
+ Driver_name [64]int8
+ Module_name [64]int8
+ Type uint32
+ Mask uint32
+ Refcnt uint32
+ Flags uint32
+}
+
+type CryptoStatAEAD struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatAKCipher struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Verify_cnt uint64
+ Sign_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCipher struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCompress struct {
+ Type [64]int8
+ Compress_cnt uint64
+ Compress_tlen uint64
+ Decompress_cnt uint64
+ Decompress_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatHash struct {
+ Type [64]int8
+ Hash_cnt uint64
+ Hash_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatKPP struct {
+ Type [64]int8
+ Setsecret_cnt uint64
+ Generate_public_key_cnt uint64
+ Compute_shared_secret_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatRNG struct {
+ Type [64]int8
+ Generate_cnt uint64
+ Generate_tlen uint64
+ Seed_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatLarval struct {
+ Type [64]int8
+}
+
+type CryptoReportLarval struct {
+ Type [64]int8
+}
+
+type CryptoReportHash struct {
+ Type [64]int8
+ Blocksize uint32
+ Digestsize uint32
+}
+
+type CryptoReportCipher struct {
+ Type [64]int8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+}
+
+type CryptoReportBlkCipher struct {
+ Type [64]int8
+ Geniv [64]int8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+ Ivsize uint32
+}
+
+type CryptoReportAEAD struct {
+ Type [64]int8
+ Geniv [64]int8
+ Blocksize uint32
+ Maxauthsize uint32
+ Ivsize uint32
+}
+
+type CryptoReportComp struct {
+ Type [64]int8
+}
+
+type CryptoReportRNG struct {
+ Type [64]int8
+ Seedsize uint32
+}
+
+type CryptoReportAKCipher struct {
+ Type [64]int8
+}
+
+type CryptoReportKPP struct {
+ Type [64]int8
+}
+
+type CryptoReportAcomp struct {
+ Type [64]int8
+}
+
+const (
+ BPF_REG_0 = 0x0
+ BPF_REG_1 = 0x1
+ BPF_REG_2 = 0x2
+ BPF_REG_3 = 0x3
+ BPF_REG_4 = 0x4
+ BPF_REG_5 = 0x5
+ BPF_REG_6 = 0x6
+ BPF_REG_7 = 0x7
+ BPF_REG_8 = 0x8
+ BPF_REG_9 = 0x9
+ BPF_REG_10 = 0xa
+ BPF_MAP_CREATE = 0x0
+ BPF_MAP_LOOKUP_ELEM = 0x1
+ BPF_MAP_UPDATE_ELEM = 0x2
+ BPF_MAP_DELETE_ELEM = 0x3
+ BPF_MAP_GET_NEXT_KEY = 0x4
+ BPF_PROG_LOAD = 0x5
+ BPF_OBJ_PIN = 0x6
+ BPF_OBJ_GET = 0x7
+ BPF_PROG_ATTACH = 0x8
+ BPF_PROG_DETACH = 0x9
+ BPF_PROG_TEST_RUN = 0xa
+ BPF_PROG_GET_NEXT_ID = 0xb
+ BPF_MAP_GET_NEXT_ID = 0xc
+ BPF_PROG_GET_FD_BY_ID = 0xd
+ BPF_MAP_GET_FD_BY_ID = 0xe
+ BPF_OBJ_GET_INFO_BY_FD = 0xf
+ BPF_PROG_QUERY = 0x10
+ BPF_RAW_TRACEPOINT_OPEN = 0x11
+ BPF_BTF_LOAD = 0x12
+ BPF_BTF_GET_FD_BY_ID = 0x13
+ BPF_TASK_FD_QUERY = 0x14
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15
+ BPF_MAP_TYPE_UNSPEC = 0x0
+ BPF_MAP_TYPE_HASH = 0x1
+ BPF_MAP_TYPE_ARRAY = 0x2
+ BPF_MAP_TYPE_PROG_ARRAY = 0x3
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4
+ BPF_MAP_TYPE_PERCPU_HASH = 0x5
+ BPF_MAP_TYPE_PERCPU_ARRAY = 0x6
+ BPF_MAP_TYPE_STACK_TRACE = 0x7
+ BPF_MAP_TYPE_CGROUP_ARRAY = 0x8
+ BPF_MAP_TYPE_LRU_HASH = 0x9
+ BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa
+ BPF_MAP_TYPE_LPM_TRIE = 0xb
+ BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc
+ BPF_MAP_TYPE_HASH_OF_MAPS = 0xd
+ BPF_MAP_TYPE_DEVMAP = 0xe
+ BPF_MAP_TYPE_SOCKMAP = 0xf
+ BPF_MAP_TYPE_CPUMAP = 0x10
+ BPF_MAP_TYPE_XSKMAP = 0x11
+ BPF_MAP_TYPE_SOCKHASH = 0x12
+ BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
+ BPF_MAP_TYPE_QUEUE = 0x16
+ BPF_MAP_TYPE_STACK = 0x17
+ BPF_PROG_TYPE_UNSPEC = 0x0
+ BPF_PROG_TYPE_SOCKET_FILTER = 0x1
+ BPF_PROG_TYPE_KPROBE = 0x2
+ BPF_PROG_TYPE_SCHED_CLS = 0x3
+ BPF_PROG_TYPE_SCHED_ACT = 0x4
+ BPF_PROG_TYPE_TRACEPOINT = 0x5
+ BPF_PROG_TYPE_XDP = 0x6
+ BPF_PROG_TYPE_PERF_EVENT = 0x7
+ BPF_PROG_TYPE_CGROUP_SKB = 0x8
+ BPF_PROG_TYPE_CGROUP_SOCK = 0x9
+ BPF_PROG_TYPE_LWT_IN = 0xa
+ BPF_PROG_TYPE_LWT_OUT = 0xb
+ BPF_PROG_TYPE_LWT_XMIT = 0xc
+ BPF_PROG_TYPE_SOCK_OPS = 0xd
+ BPF_PROG_TYPE_SK_SKB = 0xe
+ BPF_PROG_TYPE_CGROUP_DEVICE = 0xf
+ BPF_PROG_TYPE_SK_MSG = 0x10
+ BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11
+ BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12
+ BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13
+ BPF_PROG_TYPE_LIRC_MODE2 = 0x14
+ BPF_PROG_TYPE_SK_REUSEPORT = 0x15
+ BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16
+ BPF_CGROUP_INET_INGRESS = 0x0
+ BPF_CGROUP_INET_EGRESS = 0x1
+ BPF_CGROUP_INET_SOCK_CREATE = 0x2
+ BPF_CGROUP_SOCK_OPS = 0x3
+ BPF_SK_SKB_STREAM_PARSER = 0x4
+ BPF_SK_SKB_STREAM_VERDICT = 0x5
+ BPF_CGROUP_DEVICE = 0x6
+ BPF_SK_MSG_VERDICT = 0x7
+ BPF_CGROUP_INET4_BIND = 0x8
+ BPF_CGROUP_INET6_BIND = 0x9
+ BPF_CGROUP_INET4_CONNECT = 0xa
+ BPF_CGROUP_INET6_CONNECT = 0xb
+ BPF_CGROUP_INET4_POST_BIND = 0xc
+ BPF_CGROUP_INET6_POST_BIND = 0xd
+ BPF_CGROUP_UDP4_SENDMSG = 0xe
+ BPF_CGROUP_UDP6_SENDMSG = 0xf
+ BPF_LIRC_MODE2 = 0x10
+ BPF_FLOW_DISSECTOR = 0x11
+ BPF_STACK_BUILD_ID_EMPTY = 0x0
+ BPF_STACK_BUILD_ID_VALID = 0x1
+ BPF_STACK_BUILD_ID_IP = 0x2
+ BPF_ADJ_ROOM_NET = 0x0
+ BPF_HDR_START_MAC = 0x0
+ BPF_HDR_START_NET = 0x1
+ BPF_LWT_ENCAP_SEG6 = 0x0
+ BPF_LWT_ENCAP_SEG6_INLINE = 0x1
+ BPF_OK = 0x0
+ BPF_DROP = 0x2
+ BPF_REDIRECT = 0x7
+ BPF_SOCK_OPS_VOID = 0x0
+ BPF_SOCK_OPS_TIMEOUT_INIT = 0x1
+ BPF_SOCK_OPS_RWND_INIT = 0x2
+ BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3
+ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4
+ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5
+ BPF_SOCK_OPS_NEEDS_ECN = 0x6
+ BPF_SOCK_OPS_BASE_RTT = 0x7
+ BPF_SOCK_OPS_RTO_CB = 0x8
+ BPF_SOCK_OPS_RETRANS_CB = 0x9
+ BPF_SOCK_OPS_STATE_CB = 0xa
+ BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb
+ BPF_TCP_ESTABLISHED = 0x1
+ BPF_TCP_SYN_SENT = 0x2
+ BPF_TCP_SYN_RECV = 0x3
+ BPF_TCP_FIN_WAIT1 = 0x4
+ BPF_TCP_FIN_WAIT2 = 0x5
+ BPF_TCP_TIME_WAIT = 0x6
+ BPF_TCP_CLOSE = 0x7
+ BPF_TCP_CLOSE_WAIT = 0x8
+ BPF_TCP_LAST_ACK = 0x9
+ BPF_TCP_LISTEN = 0xa
+ BPF_TCP_CLOSING = 0xb
+ BPF_TCP_NEW_SYN_RECV = 0xc
+ BPF_TCP_MAX_STATES = 0xd
+ BPF_FIB_LKUP_RET_SUCCESS = 0x0
+ BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
+ BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
+ BPF_FIB_LKUP_RET_PROHIBIT = 0x3
+ BPF_FIB_LKUP_RET_NOT_FWDED = 0x4
+ BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5
+ BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
+ BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
+ BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
+ BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
+ BPF_FD_TYPE_TRACEPOINT = 0x1
+ BPF_FD_TYPE_KPROBE = 0x2
+ BPF_FD_TYPE_KRETPROBE = 0x3
+ BPF_FD_TYPE_UPROBE = 0x4
+ BPF_FD_TYPE_URETPROBE = 0x5
+)
+
+type CapUserHeader struct {
+ Version uint32
+ Pid int32
+}
+
+type CapUserData struct {
+ Effective uint32
+ Permitted uint32
+ Inheritable uint32
+}
+
+const (
+ LINUX_CAPABILITY_VERSION_1 = 0x19980330
+ LINUX_CAPABILITY_VERSION_2 = 0x20071026
+ LINUX_CAPABILITY_VERSION_3 = 0x20080522
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go
index 2dae0c17a..86736ab6e 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go
@@ -57,23 +57,23 @@ type Rlimit struct {
type _Gid_t uint32
type Stat_t struct {
- Dev uint64
- Mode uint32
- Ino uint64
- Nlink uint32
- Uid uint32
- Gid uint32
- Rdev uint64
- Atimespec Timespec
- Mtimespec Timespec
- Ctimespec Timespec
- Birthtimespec Timespec
- Size int64
- Blocks int64
- Blksize uint32
- Flags uint32
- Gen uint32
- Spare [2]uint32
+ Dev uint64
+ Mode uint32
+ Ino uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Rdev uint64
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Btim Timespec
+ Size int64
+ Blocks int64
+ Blksize uint32
+ Flags uint32
+ Gen uint32
+ Spare [2]uint32
}
type Statfs_t [0]byte
@@ -411,6 +411,7 @@ type Ptmget struct {
const (
AT_FDCWD = -0x64
+ AT_SYMLINK_FOLLOW = 0x400
AT_SYMLINK_NOFOLLOW = 0x200
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go
index 1f0e76c0c..3427811f9 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go
@@ -58,26 +58,26 @@ type Rlimit struct {
type _Gid_t uint32
type Stat_t struct {
- Dev uint64
- Mode uint32
- Pad_cgo_0 [4]byte
- Ino uint64
- Nlink uint32
- Uid uint32
- Gid uint32
- Pad_cgo_1 [4]byte
- Rdev uint64
- Atimespec Timespec
- Mtimespec Timespec
- Ctimespec Timespec
- Birthtimespec Timespec
- Size int64
- Blocks int64
- Blksize uint32
- Flags uint32
- Gen uint32
- Spare [2]uint32
- Pad_cgo_2 [4]byte
+ Dev uint64
+ Mode uint32
+ _ [4]byte
+ Ino uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ _ [4]byte
+ Rdev uint64
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Btim Timespec
+ Size int64
+ Blocks int64
+ Blksize uint32
+ Flags uint32
+ Gen uint32
+ Spare [2]uint32
+ _ [4]byte
}
type Statfs_t [0]byte
@@ -418,6 +418,7 @@ type Ptmget struct {
const (
AT_FDCWD = -0x64
+ AT_SYMLINK_FOLLOW = 0x400
AT_SYMLINK_NOFOLLOW = 0x200
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
index 53f2159c7..399f37a43 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
@@ -59,26 +59,26 @@ type Rlimit struct {
type _Gid_t uint32
type Stat_t struct {
- Dev uint64
- Mode uint32
- Pad_cgo_0 [4]byte
- Ino uint64
- Nlink uint32
- Uid uint32
- Gid uint32
- Pad_cgo_1 [4]byte
- Rdev uint64
- Atimespec Timespec
- Mtimespec Timespec
- Ctimespec Timespec
- Birthtimespec Timespec
- Size int64
- Blocks int64
- Blksize uint32
- Flags uint32
- Gen uint32
- Spare [2]uint32
- Pad_cgo_2 [4]byte
+ Dev uint64
+ Mode uint32
+ _ [4]byte
+ Ino uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ _ [4]byte
+ Rdev uint64
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Btim Timespec
+ Size int64
+ Blocks int64
+ Blksize uint32
+ Flags uint32
+ Gen uint32
+ Spare [2]uint32
+ _ [4]byte
}
type Statfs_t [0]byte
@@ -416,6 +416,7 @@ type Ptmget struct {
const (
AT_FDCWD = -0x64
+ AT_SYMLINK_FOLLOW = 0x400
AT_SYMLINK_NOFOLLOW = 0x200
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go
new file mode 100644
index 000000000..32f0c15d9
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go
@@ -0,0 +1,473 @@
+// cgo -godefs types_netbsd.go | go run mkpost.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build arm64,netbsd
+
+package unix
+
+const (
+ SizeofPtr = 0x8
+ SizeofShort = 0x2
+ SizeofInt = 0x4
+ SizeofLong = 0x8
+ SizeofLongLong = 0x8
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+)
+
+type Timespec struct {
+ Sec int64
+ Nsec int64
+}
+
+type Timeval struct {
+ Sec int64
+ Usec int32
+ Pad_cgo_0 [4]byte
+}
+
+type Rusage struct {
+ Utime Timeval
+ Stime Timeval
+ Maxrss int64
+ Ixrss int64
+ Idrss int64
+ Isrss int64
+ Minflt int64
+ Majflt int64
+ Nswap int64
+ Inblock int64
+ Oublock int64
+ Msgsnd int64
+ Msgrcv int64
+ Nsignals int64
+ Nvcsw int64
+ Nivcsw int64
+}
+
+type Rlimit struct {
+ Cur uint64
+ Max uint64
+}
+
+type _Gid_t uint32
+
+type Stat_t struct {
+ Dev uint64
+ Mode uint32
+ _ [4]byte
+ Ino uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ _ [4]byte
+ Rdev uint64
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Btim Timespec
+ Size int64
+ Blocks int64
+ Blksize uint32
+ Flags uint32
+ Gen uint32
+ Spare [2]uint32
+ _ [4]byte
+}
+
+type Statfs_t [0]byte
+
+type Flock_t struct {
+ Start int64
+ Len int64
+ Pid int32
+ Type int16
+ Whence int16
+}
+
+type Dirent struct {
+ Fileno uint64
+ Reclen uint16
+ Namlen uint16
+ Type uint8
+ Name [512]int8
+ Pad_cgo_0 [3]byte
+}
+
+type Fsid struct {
+ X__fsid_val [2]int32
+}
+
+const (
+ PathMax = 0x400
+)
+
+const (
+ FADV_NORMAL = 0x0
+ FADV_RANDOM = 0x1
+ FADV_SEQUENTIAL = 0x2
+ FADV_WILLNEED = 0x3
+ FADV_DONTNEED = 0x4
+ FADV_NOREUSE = 0x5
+)
+
+type RawSockaddrInet4 struct {
+ Len uint8
+ Family uint8
+ Port uint16
+ Addr [4]byte /* in_addr */
+ Zero [8]int8
+}
+
+type RawSockaddrInet6 struct {
+ Len uint8
+ Family uint8
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type RawSockaddrUnix struct {
+ Len uint8
+ Family uint8
+ Path [104]int8
+}
+
+type RawSockaddrDatalink struct {
+ Len uint8
+ Family uint8
+ Index uint16
+ Type uint8
+ Nlen uint8
+ Alen uint8
+ Slen uint8
+ Data [12]int8
+}
+
+type RawSockaddr struct {
+ Len uint8
+ Family uint8
+ Data [14]int8
+}
+
+type RawSockaddrAny struct {
+ Addr RawSockaddr
+ Pad [92]int8
+}
+
+type _Socklen uint32
+
+type Linger struct {
+ Onoff int32
+ Linger int32
+}
+
+type Iovec struct {
+ Base *byte
+ Len uint64
+}
+
+type IPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type IPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Interface uint32
+}
+
+type Msghdr struct {
+ Name *byte
+ Namelen uint32
+ Pad_cgo_0 [4]byte
+ Iov *Iovec
+ Iovlen int32
+ Pad_cgo_1 [4]byte
+ Control *byte
+ Controllen uint32
+ Flags int32
+}
+
+type Cmsghdr struct {
+ Len uint32
+ Level int32
+ Type int32
+}
+
+type Inet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex uint32
+}
+
+type IPv6MTUInfo struct {
+ Addr RawSockaddrInet6
+ Mtu uint32
+}
+
+type ICMPv6Filter struct {
+ Filt [8]uint32
+}
+
+const (
+ SizeofSockaddrInet4 = 0x10
+ SizeofSockaddrInet6 = 0x1c
+ SizeofSockaddrAny = 0x6c
+ SizeofSockaddrUnix = 0x6a
+ SizeofSockaddrDatalink = 0x14
+ SizeofLinger = 0x8
+ SizeofIPMreq = 0x8
+ SizeofIPv6Mreq = 0x14
+ SizeofMsghdr = 0x30
+ SizeofCmsghdr = 0xc
+ SizeofInet6Pktinfo = 0x14
+ SizeofIPv6MTUInfo = 0x20
+ SizeofICMPv6Filter = 0x20
+)
+
+const (
+ PTRACE_TRACEME = 0x0
+ PTRACE_CONT = 0x7
+ PTRACE_KILL = 0x8
+)
+
+type Kevent_t struct {
+ Ident uint64
+ Filter uint32
+ Flags uint32
+ Fflags uint32
+ Pad_cgo_0 [4]byte
+ Data int64
+ Udata int64
+}
+
+type FdSet struct {
+ Bits [8]uint32
+}
+
+const (
+ SizeofIfMsghdr = 0x98
+ SizeofIfData = 0x88
+ SizeofIfaMsghdr = 0x18
+ SizeofIfAnnounceMsghdr = 0x18
+ SizeofRtMsghdr = 0x78
+ SizeofRtMetrics = 0x50
+)
+
+type IfMsghdr struct {
+ Msglen uint16
+ Version uint8
+ Type uint8
+ Addrs int32
+ Flags int32
+ Index uint16
+ Pad_cgo_0 [2]byte
+ Data IfData
+}
+
+type IfData struct {
+ Type uint8
+ Addrlen uint8
+ Hdrlen uint8
+ Pad_cgo_0 [1]byte
+ Link_state int32
+ Mtu uint64
+ Metric uint64
+ Baudrate uint64
+ Ipackets uint64
+ Ierrors uint64
+ Opackets uint64
+ Oerrors uint64
+ Collisions uint64
+ Ibytes uint64
+ Obytes uint64
+ Imcasts uint64
+ Omcasts uint64
+ Iqdrops uint64
+ Noproto uint64
+ Lastchange Timespec
+}
+
+type IfaMsghdr struct {
+ Msglen uint16
+ Version uint8
+ Type uint8
+ Addrs int32
+ Flags int32
+ Metric int32
+ Index uint16
+ Pad_cgo_0 [6]byte
+}
+
+type IfAnnounceMsghdr struct {
+ Msglen uint16
+ Version uint8
+ Type uint8
+ Index uint16
+ Name [16]int8
+ What uint16
+}
+
+type RtMsghdr struct {
+ Msglen uint16
+ Version uint8
+ Type uint8
+ Index uint16
+ Pad_cgo_0 [2]byte
+ Flags int32
+ Addrs int32
+ Pid int32
+ Seq int32
+ Errno int32
+ Use int32
+ Inits int32
+ Pad_cgo_1 [4]byte
+ Rmx RtMetrics
+}
+
+type RtMetrics struct {
+ Locks uint64
+ Mtu uint64
+ Hopcount uint64
+ Recvpipe uint64
+ Sendpipe uint64
+ Ssthresh uint64
+ Rtt uint64
+ Rttvar uint64
+ Expire int64
+ Pksent int64
+}
+
+type Mclpool [0]byte
+
+const (
+ SizeofBpfVersion = 0x4
+ SizeofBpfStat = 0x80
+ SizeofBpfProgram = 0x10
+ SizeofBpfInsn = 0x8
+ SizeofBpfHdr = 0x20
+)
+
+type BpfVersion struct {
+ Major uint16
+ Minor uint16
+}
+
+type BpfStat struct {
+ Recv uint64
+ Drop uint64
+ Capt uint64
+ Padding [13]uint64
+}
+
+type BpfProgram struct {
+ Len uint32
+ Pad_cgo_0 [4]byte
+ Insns *BpfInsn
+}
+
+type BpfInsn struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
+
+type BpfHdr struct {
+ Tstamp BpfTimeval
+ Caplen uint32
+ Datalen uint32
+ Hdrlen uint16
+ Pad_cgo_0 [6]byte
+}
+
+type BpfTimeval struct {
+ Sec int64
+ Usec int64
+}
+
+type Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed int32
+ Ospeed int32
+}
+
+type Winsize struct {
+ Row uint16
+ Col uint16
+ Xpixel uint16
+ Ypixel uint16
+}
+
+type Ptmget struct {
+ Cfd int32
+ Sfd int32
+ Cn [1024]byte
+ Sn [1024]byte
+}
+
+const (
+ AT_FDCWD = -0x64
+ AT_SYMLINK_FOLLOW = 0x400
+ AT_SYMLINK_NOFOLLOW = 0x200
+)
+
+type PollFd struct {
+ Fd int32
+ Events int16
+ Revents int16
+}
+
+const (
+ POLLERR = 0x8
+ POLLHUP = 0x10
+ POLLIN = 0x1
+ POLLNVAL = 0x20
+ POLLOUT = 0x4
+ POLLPRI = 0x2
+ POLLRDBAND = 0x80
+ POLLRDNORM = 0x40
+ POLLWRBAND = 0x100
+ POLLWRNORM = 0x4
+)
+
+type Sysctlnode struct {
+ Flags uint32
+ Num int32
+ Name [32]int8
+ Ver uint32
+ X__rsvd uint32
+ Un [16]byte
+ X_sysctl_size [8]byte
+ X_sysctl_func [8]byte
+ X_sysctl_parent [8]byte
+ X_sysctl_desc [8]byte
+}
+
+type Utsname struct {
+ Sysname [256]byte
+ Nodename [256]byte
+ Release [256]byte
+ Version [256]byte
+ Machine [256]byte
+}
+
+const SizeofClockinfo = 0x14
+
+type Clockinfo struct {
+ Hz int32
+ Tick int32
+ Tickadj int32
+ Stathz int32
+ Profhz int32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go
index 8b37d8399..61ea0019a 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go
@@ -436,6 +436,7 @@ type Winsize struct {
const (
AT_FDCWD = -0x64
+ AT_SYMLINK_FOLLOW = 0x4
AT_SYMLINK_NOFOLLOW = 0x2
)
@@ -558,3 +559,13 @@ type Uvmexp struct {
Fpswtch int32
Kmapent int32
}
+
+const SizeofClockinfo = 0x14
+
+type Clockinfo struct {
+ Hz int32
+ Tick int32
+ Tickadj int32
+ Stathz int32
+ Profhz int32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go
index 6efea4635..87a493f68 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go
@@ -436,6 +436,7 @@ type Winsize struct {
const (
AT_FDCWD = -0x64
+ AT_SYMLINK_FOLLOW = 0x4
AT_SYMLINK_NOFOLLOW = 0x2
)
@@ -558,3 +559,13 @@ type Uvmexp struct {
Fpswtch int32
Kmapent int32
}
+
+const SizeofClockinfo = 0x14
+
+type Clockinfo struct {
+ Hz int32
+ Tick int32
+ Tickadj int32
+ Stathz int32
+ Profhz int32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go
index 510efc3ea..d80836efa 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go
@@ -437,6 +437,7 @@ type Winsize struct {
const (
AT_FDCWD = -0x64
+ AT_SYMLINK_FOLLOW = 0x4
AT_SYMLINK_NOFOLLOW = 0x2
)
@@ -559,3 +560,13 @@ type Uvmexp struct {
Fpswtch int32
Kmapent int32
}
+
+const SizeofClockinfo = 0x14
+
+type Clockinfo struct {
+ Hz int32
+ Tick int32
+ Tickadj int32
+ Stathz int32
+ Profhz int32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go
new file mode 100644
index 000000000..4e158746f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go
@@ -0,0 +1,565 @@
+// cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build arm64,openbsd
+
+package unix
+
+const (
+ SizeofPtr = 0x8
+ SizeofShort = 0x2
+ SizeofInt = 0x4
+ SizeofLong = 0x8
+ SizeofLongLong = 0x8
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+)
+
+type Timespec struct {
+ Sec int64
+ Nsec int64
+}
+
+type Timeval struct {
+ Sec int64
+ Usec int64
+}
+
+type Rusage struct {
+ Utime Timeval
+ Stime Timeval
+ Maxrss int64
+ Ixrss int64
+ Idrss int64
+ Isrss int64
+ Minflt int64
+ Majflt int64
+ Nswap int64
+ Inblock int64
+ Oublock int64
+ Msgsnd int64
+ Msgrcv int64
+ Nsignals int64
+ Nvcsw int64
+ Nivcsw int64
+}
+
+type Rlimit struct {
+ Cur uint64
+ Max uint64
+}
+
+type _Gid_t uint32
+
+type Stat_t struct {
+ Mode uint32
+ Dev int32
+ Ino uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Rdev int32
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint32
+ _ Timespec
+}
+
+type Statfs_t struct {
+ F_flags uint32
+ F_bsize uint32
+ F_iosize uint32
+ F_blocks uint64
+ F_bfree uint64
+ F_bavail int64
+ F_files uint64
+ F_ffree uint64
+ F_favail int64
+ F_syncwrites uint64
+ F_syncreads uint64
+ F_asyncwrites uint64
+ F_asyncreads uint64
+ F_fsid Fsid
+ F_namemax uint32
+ F_owner uint32
+ F_ctime uint64
+ F_fstypename [16]int8
+ F_mntonname [90]int8
+ F_mntfromname [90]int8
+ F_mntfromspec [90]int8
+ _ [2]byte
+ Mount_info [160]byte
+}
+
+type Flock_t struct {
+ Start int64
+ Len int64
+ Pid int32
+ Type int16
+ Whence int16
+}
+
+type Dirent struct {
+ Fileno uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Namlen uint8
+ _ [4]uint8
+ Name [256]int8
+}
+
+type Fsid struct {
+ Val [2]int32
+}
+
+const (
+ PathMax = 0x400
+)
+
+type RawSockaddrInet4 struct {
+ Len uint8
+ Family uint8
+ Port uint16
+ Addr [4]byte /* in_addr */
+ Zero [8]int8
+}
+
+type RawSockaddrInet6 struct {
+ Len uint8
+ Family uint8
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type RawSockaddrUnix struct {
+ Len uint8
+ Family uint8
+ Path [104]int8
+}
+
+type RawSockaddrDatalink struct {
+ Len uint8
+ Family uint8
+ Index uint16
+ Type uint8
+ Nlen uint8
+ Alen uint8
+ Slen uint8
+ Data [24]int8
+}
+
+type RawSockaddr struct {
+ Len uint8
+ Family uint8
+ Data [14]int8
+}
+
+type RawSockaddrAny struct {
+ Addr RawSockaddr
+ Pad [92]int8
+}
+
+type _Socklen uint32
+
+type Linger struct {
+ Onoff int32
+ Linger int32
+}
+
+type Iovec struct {
+ Base *byte
+ Len uint64
+}
+
+type IPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type IPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Interface uint32
+}
+
+type Msghdr struct {
+ Name *byte
+ Namelen uint32
+ Iov *Iovec
+ Iovlen uint32
+ Control *byte
+ Controllen uint32
+ Flags int32
+}
+
+type Cmsghdr struct {
+ Len uint32
+ Level int32
+ Type int32
+}
+
+type Inet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex uint32
+}
+
+type IPv6MTUInfo struct {
+ Addr RawSockaddrInet6
+ Mtu uint32
+}
+
+type ICMPv6Filter struct {
+ Filt [8]uint32
+}
+
+const (
+ SizeofSockaddrInet4 = 0x10
+ SizeofSockaddrInet6 = 0x1c
+ SizeofSockaddrAny = 0x6c
+ SizeofSockaddrUnix = 0x6a
+ SizeofSockaddrDatalink = 0x20
+ SizeofLinger = 0x8
+ SizeofIPMreq = 0x8
+ SizeofIPv6Mreq = 0x14
+ SizeofMsghdr = 0x30
+ SizeofCmsghdr = 0xc
+ SizeofInet6Pktinfo = 0x14
+ SizeofIPv6MTUInfo = 0x20
+ SizeofICMPv6Filter = 0x20
+)
+
+const (
+ PTRACE_TRACEME = 0x0
+ PTRACE_CONT = 0x7
+ PTRACE_KILL = 0x8
+)
+
+type Kevent_t struct {
+ Ident uint64
+ Filter int16
+ Flags uint16
+ Fflags uint32
+ Data int64
+ Udata *byte
+}
+
+type FdSet struct {
+ Bits [32]uint32
+}
+
+const (
+ SizeofIfMsghdr = 0xa8
+ SizeofIfData = 0x90
+ SizeofIfaMsghdr = 0x18
+ SizeofIfAnnounceMsghdr = 0x1a
+ SizeofRtMsghdr = 0x60
+ SizeofRtMetrics = 0x38
+)
+
+type IfMsghdr struct {
+ Msglen uint16
+ Version uint8
+ Type uint8
+ Hdrlen uint16
+ Index uint16
+ Tableid uint16
+ Pad1 uint8
+ Pad2 uint8
+ Addrs int32
+ Flags int32
+ Xflags int32
+ Data IfData
+}
+
+type IfData struct {
+ Type uint8
+ Addrlen uint8
+ Hdrlen uint8
+ Link_state uint8
+ Mtu uint32
+ Metric uint32
+ Rdomain uint32
+ Baudrate uint64
+ Ipackets uint64
+ Ierrors uint64
+ Opackets uint64
+ Oerrors uint64
+ Collisions uint64
+ Ibytes uint64
+ Obytes uint64
+ Imcasts uint64
+ Omcasts uint64
+ Iqdrops uint64
+ Oqdrops uint64
+ Noproto uint64
+ Capabilities uint32
+ Lastchange Timeval
+}
+
+type IfaMsghdr struct {
+ Msglen uint16
+ Version uint8
+ Type uint8
+ Hdrlen uint16
+ Index uint16
+ Tableid uint16
+ Pad1 uint8
+ Pad2 uint8
+ Addrs int32
+ Flags int32
+ Metric int32
+}
+
+type IfAnnounceMsghdr struct {
+ Msglen uint16
+ Version uint8
+ Type uint8
+ Hdrlen uint16
+ Index uint16
+ What uint16
+ Name [16]int8
+}
+
+type RtMsghdr struct {
+ Msglen uint16
+ Version uint8
+ Type uint8
+ Hdrlen uint16
+ Index uint16
+ Tableid uint16
+ Priority uint8
+ Mpls uint8
+ Addrs int32
+ Flags int32
+ Fmask int32
+ Pid int32
+ Seq int32
+ Errno int32
+ Inits uint32
+ Rmx RtMetrics
+}
+
+type RtMetrics struct {
+ Pksent uint64
+ Expire int64
+ Locks uint32
+ Mtu uint32
+ Refcnt uint32
+ Hopcount uint32
+ Recvpipe uint32
+ Sendpipe uint32
+ Ssthresh uint32
+ Rtt uint32
+ Rttvar uint32
+ Pad uint32
+}
+
+type Mclpool struct{}
+
+const (
+ SizeofBpfVersion = 0x4
+ SizeofBpfStat = 0x8
+ SizeofBpfProgram = 0x10
+ SizeofBpfInsn = 0x8
+ SizeofBpfHdr = 0x14
+)
+
+type BpfVersion struct {
+ Major uint16
+ Minor uint16
+}
+
+type BpfStat struct {
+ Recv uint32
+ Drop uint32
+}
+
+type BpfProgram struct {
+ Len uint32
+ Insns *BpfInsn
+}
+
+type BpfInsn struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
+
+type BpfHdr struct {
+ Tstamp BpfTimeval
+ Caplen uint32
+ Datalen uint32
+ Hdrlen uint16
+ _ [2]byte
+}
+
+type BpfTimeval struct {
+ Sec uint32
+ Usec uint32
+}
+
+type Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed int32
+ Ospeed int32
+}
+
+type Winsize struct {
+ Row uint16
+ Col uint16
+ Xpixel uint16
+ Ypixel uint16
+}
+
+const (
+ AT_FDCWD = -0x64
+ AT_SYMLINK_FOLLOW = 0x4
+ AT_SYMLINK_NOFOLLOW = 0x2
+)
+
+type PollFd struct {
+ Fd int32
+ Events int16
+ Revents int16
+}
+
+const (
+ POLLERR = 0x8
+ POLLHUP = 0x10
+ POLLIN = 0x1
+ POLLNVAL = 0x20
+ POLLOUT = 0x4
+ POLLPRI = 0x2
+ POLLRDBAND = 0x80
+ POLLRDNORM = 0x40
+ POLLWRBAND = 0x100
+ POLLWRNORM = 0x4
+)
+
+type Sigset_t uint32
+
+type Utsname struct {
+ Sysname [256]byte
+ Nodename [256]byte
+ Release [256]byte
+ Version [256]byte
+ Machine [256]byte
+}
+
+const SizeofUvmexp = 0x158
+
+type Uvmexp struct {
+ Pagesize int32
+ Pagemask int32
+ Pageshift int32
+ Npages int32
+ Free int32
+ Active int32
+ Inactive int32
+ Paging int32
+ Wired int32
+ Zeropages int32
+ Reserve_pagedaemon int32
+ Reserve_kernel int32
+ Unused01 int32
+ Vnodepages int32
+ Vtextpages int32
+ Freemin int32
+ Freetarg int32
+ Inactarg int32
+ Wiredmax int32
+ Anonmin int32
+ Vtextmin int32
+ Vnodemin int32
+ Anonminpct int32
+ Vtextminpct int32
+ Vnodeminpct int32
+ Nswapdev int32
+ Swpages int32
+ Swpginuse int32
+ Swpgonly int32
+ Nswget int32
+ Nanon int32
+ Unused05 int32
+ Unused06 int32
+ Faults int32
+ Traps int32
+ Intrs int32
+ Swtch int32
+ Softs int32
+ Syscalls int32
+ Pageins int32
+ Unused07 int32
+ Unused08 int32
+ Pgswapin int32
+ Pgswapout int32
+ Forks int32
+ Forks_ppwait int32
+ Forks_sharevm int32
+ Pga_zerohit int32
+ Pga_zeromiss int32
+ Unused09 int32
+ Fltnoram int32
+ Fltnoanon int32
+ Fltnoamap int32
+ Fltpgwait int32
+ Fltpgrele int32
+ Fltrelck int32
+ Fltrelckok int32
+ Fltanget int32
+ Fltanretry int32
+ Fltamcopy int32
+ Fltnamap int32
+ Fltnomap int32
+ Fltlget int32
+ Fltget int32
+ Flt_anon int32
+ Flt_acow int32
+ Flt_obj int32
+ Flt_prcopy int32
+ Flt_przero int32
+ Pdwoke int32
+ Pdrevs int32
+ Pdswout int32
+ Pdfreed int32
+ Pdscans int32
+ Pdanscan int32
+ Pdobscan int32
+ Pdreact int32
+ Pdbusy int32
+ Pdpageouts int32
+ Pdpending int32
+ Pddeact int32
+ Unused11 int32
+ Unused12 int32
+ Unused13 int32
+ Fpswtch int32
+ Kmapent int32
+}
+
+const SizeofClockinfo = 0x14
+
+type Clockinfo struct {
+ Hz int32
+ Tick int32
+ Tickadj int32
+ Stathz int32
+ Profhz int32
+}
diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go
index e92c05b21..ba67658db 100644
--- a/vendor/golang.org/x/sys/windows/dll_windows.go
+++ b/vendor/golang.org/x/sys/windows/dll_windows.go
@@ -359,11 +359,11 @@ func loadLibraryEx(name string, system bool) (*DLL, error) {
// trying to load "foo.dll" out of the system
// folder, but LoadLibraryEx doesn't support
// that yet on their system, so emulate it.
- windir, _ := Getenv("WINDIR") // old var; apparently works on XP
- if windir == "" {
- return nil, errString("%WINDIR% not defined")
+ systemdir, err := GetSystemDirectory()
+ if err != nil {
+ return nil, err
}
- loadDLL = windir + "\\System32\\" + name
+ loadDLL = systemdir + "\\" + name
}
}
h, err := LoadLibraryEx(loadDLL, 0, flags)
diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go
index bdc71e241..f482a9fab 100644
--- a/vendor/golang.org/x/sys/windows/env_windows.go
+++ b/vendor/golang.org/x/sys/windows/env_windows.go
@@ -6,7 +6,11 @@
package windows
-import "syscall"
+import (
+ "syscall"
+ "unicode/utf16"
+ "unsafe"
+)
func Getenv(key string) (value string, found bool) {
return syscall.Getenv(key)
@@ -24,6 +28,34 @@ func Environ() []string {
return syscall.Environ()
}
+// Returns a default environment associated with the token, rather than the current
+// process. If inheritExisting is true, then this environment also inherits the
+// environment of the current process.
+func (token Token) Environ(inheritExisting bool) (env []string, err error) {
+ var block *uint16
+ err = CreateEnvironmentBlock(&block, token, inheritExisting)
+ if err != nil {
+ return nil, err
+ }
+ defer DestroyEnvironmentBlock(block)
+ blockp := uintptr(unsafe.Pointer(block))
+ for {
+ entry := (*[(1 << 30) - 1]uint16)(unsafe.Pointer(blockp))[:]
+ for i, v := range entry {
+ if v == 0 {
+ entry = entry[:i]
+ break
+ }
+ }
+ if len(entry) == 0 {
+ break
+ }
+ env = append(env, string(utf16.Decode(entry)))
+ blockp += 2 * (uintptr(len(entry)) + 1)
+ }
+ return env, nil
+}
+
func Unsetenv(key string) error {
return syscall.Unsetenv(key)
}
diff --git a/vendor/golang.org/x/sys/windows/mkerrors.bash b/vendor/golang.org/x/sys/windows/mkerrors.bash
new file mode 100644
index 000000000..2163843a1
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/mkerrors.bash
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+# Copyright 2019 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+set -e
+shopt -s nullglob
+
+winerror="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/winerror.h | sort -Vr | head -n 1)"
+[[ -n $winerror ]] || { echo "Unable to find winerror.h" >&2; exit 1; }
+
+declare -A errors
+
+{
+ echo "// Code generated by 'mkerrors.bash'; DO NOT EDIT."
+ echo
+ echo "package windows"
+ echo "import \"syscall\""
+ echo "const ("
+
+ while read -r line; do
+ unset vtype
+ if [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +([A-Z0-9_]+\()?([A-Z][A-Z0-9_]+k?)\)? ]]; then
+ key="${BASH_REMATCH[1]}"
+ value="${BASH_REMATCH[3]}"
+ elif [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +([A-Z0-9_]+\()?((0x)?[0-9A-Fa-f]+)L?\)? ]]; then
+ key="${BASH_REMATCH[1]}"
+ value="${BASH_REMATCH[3]}"
+ vtype="${BASH_REMATCH[2]}"
+ elif [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +\(\(([A-Z]+)\)((0x)?[0-9A-Fa-f]+)L?\) ]]; then
+ key="${BASH_REMATCH[1]}"
+ value="${BASH_REMATCH[3]}"
+ vtype="${BASH_REMATCH[2]}"
+ else
+ continue
+ fi
+ [[ -n $key && -n $value ]] || continue
+ [[ -z ${errors["$key"]} ]] || continue
+ errors["$key"]="$value"
+ if [[ -v vtype ]]; then
+ if [[ $key == FACILITY_* || $key == NO_ERROR ]]; then
+ vtype=""
+ elif [[ $vtype == *HANDLE* || $vtype == *HRESULT* ]]; then
+ vtype="Handle"
+ else
+ vtype="syscall.Errno"
+ fi
+ last_vtype="$vtype"
+ else
+ vtype=""
+ if [[ $last_vtype == Handle && $value == NO_ERROR ]]; then
+ value="S_OK"
+ elif [[ $last_vtype == syscall.Errno && $value == NO_ERROR ]]; then
+ value="ERROR_SUCCESS"
+ fi
+ fi
+
+ echo "$key $vtype = $value"
+ done < "$winerror"
+
+ echo ")"
+} | gofmt > "zerrors_windows.go"
diff --git a/vendor/golang.org/x/sys/windows/mkknownfolderids.bash b/vendor/golang.org/x/sys/windows/mkknownfolderids.bash
new file mode 100644
index 000000000..ab8924e93
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/mkknownfolderids.bash
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+# Copyright 2019 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+set -e
+shopt -s nullglob
+
+knownfolders="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/um/KnownFolders.h | sort -Vr | head -n 1)"
+[[ -n $knownfolders ]] || { echo "Unable to find KnownFolders.h" >&2; exit 1; }
+
+{
+ echo "// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT."
+ echo
+ echo "package windows"
+ echo "type KNOWNFOLDERID GUID"
+ echo "var ("
+ while read -r line; do
+ [[ $line =~ DEFINE_KNOWN_FOLDER\((FOLDERID_[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+)\) ]] || continue
+ printf "%s = &KNOWNFOLDERID{0x%08x, 0x%04x, 0x%04x, [8]byte{0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x}}\n" \
+ "${BASH_REMATCH[1]}" $(( "${BASH_REMATCH[2]}" )) $(( "${BASH_REMATCH[3]}" )) $(( "${BASH_REMATCH[4]}" )) \
+ $(( "${BASH_REMATCH[5]}" )) $(( "${BASH_REMATCH[6]}" )) $(( "${BASH_REMATCH[7]}" )) $(( "${BASH_REMATCH[8]}" )) \
+ $(( "${BASH_REMATCH[9]}" )) $(( "${BASH_REMATCH[10]}" )) $(( "${BASH_REMATCH[11]}" )) $(( "${BASH_REMATCH[12]}" ))
+ done < "$knownfolders"
+ echo ")"
+} | gofmt > "zknownfolderids_windows.go"
diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go
index fb7db0ef8..627705727 100644
--- a/vendor/golang.org/x/sys/windows/mksyscall.go
+++ b/vendor/golang.org/x/sys/windows/mksyscall.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build generate
+
package windows
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go
diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go
index 0ac95ffe7..cf843ce2b 100644
--- a/vendor/golang.org/x/sys/windows/registry/mksyscall.go
+++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build generate
+
package registry
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go syscall.go
diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go
index 71d4e15ba..7487e05f8 100644
--- a/vendor/golang.org/x/sys/windows/registry/value.go
+++ b/vendor/golang.org/x/sys/windows/registry/value.go
@@ -68,7 +68,7 @@ func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error
return int(l), valtype, nil
}
-func (k Key) getValue(name string, buf []byte) (date []byte, valtype uint32, err error) {
+func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) {
p, err := syscall.UTF16PtrFromString(name)
if err != nil {
return nil, 0, err
@@ -241,12 +241,15 @@ func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error
if len(data) != 4 {
return 0, typ, errors.New("DWORD value is not 4 bytes long")
}
- return uint64(*(*uint32)(unsafe.Pointer(&data[0]))), DWORD, nil
+ var val32 uint32
+ copy((*[4]byte)(unsafe.Pointer(&val32))[:], data)
+ return uint64(val32), DWORD, nil
case QWORD:
if len(data) != 8 {
return 0, typ, errors.New("QWORD value is not 8 bytes long")
}
- return uint64(*(*uint64)(unsafe.Pointer(&data[0]))), QWORD, nil
+ copy((*[8]byte)(unsafe.Pointer(&val))[:], data)
+ return val, QWORD, nil
default:
return 0, typ, ErrUnexpectedType
}
diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go
index 4f17a3331..61b49647b 100644
--- a/vendor/golang.org/x/sys/windows/security_windows.go
+++ b/vendor/golang.org/x/sys/windows/security_windows.go
@@ -149,7 +149,7 @@ const (
DOMAIN_ALIAS_RID_REMOTE_DESKTOP_USERS = 0x22b
DOMAIN_ALIAS_RID_NETWORK_CONFIGURATION_OPS = 0x22c
DOMAIN_ALIAS_RID_INCOMING_FOREST_TRUST_BUILDERS = 0x22d
- DOMAIN_ALIAS_RID_MONITORING_USERS = 0X22e
+ DOMAIN_ALIAS_RID_MONITORING_USERS = 0x22e
DOMAIN_ALIAS_RID_LOGGING_USERS = 0x22f
DOMAIN_ALIAS_RID_AUTHORIZATIONACCESS = 0x230
DOMAIN_ALIAS_RID_TS_LICENSE_SERVERS = 0x231
@@ -169,15 +169,21 @@ const (
//sys GetLengthSid(sid *SID) (len uint32) = advapi32.GetLengthSid
//sys CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) = advapi32.CopySid
//sys AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) = advapi32.AllocateAndInitializeSid
+//sys createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) = advapi32.CreateWellKnownSid
+//sys isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) = advapi32.IsWellKnownSid
//sys FreeSid(sid *SID) (err error) [failretval!=0] = advapi32.FreeSid
//sys EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) = advapi32.EqualSid
+//sys getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) = advapi32.GetSidIdentifierAuthority
+//sys getSidSubAuthorityCount(sid *SID) (count *uint8) = advapi32.GetSidSubAuthorityCount
+//sys getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) = advapi32.GetSidSubAuthority
+//sys isValidSid(sid *SID) (isValid bool) = advapi32.IsValidSid
// The security identifier (SID) structure is a variable-length
// structure used to uniquely identify users or groups.
type SID struct{}
// StringToSid converts a string-format security identifier
-// sid into a valid, functional sid.
+// SID into a valid, functional SID.
func StringToSid(s string) (*SID, error) {
var sid *SID
p, e := UTF16PtrFromString(s)
@@ -192,7 +198,7 @@ func StringToSid(s string) (*SID, error) {
return sid.Copy()
}
-// LookupSID retrieves a security identifier sid for the account
+// LookupSID retrieves a security identifier SID for the account
// and the name of the domain on which the account was found.
// System specify target computer to search.
func LookupSID(system, account string) (sid *SID, domain string, accType uint32, err error) {
@@ -229,7 +235,7 @@ func LookupSID(system, account string) (sid *SID, domain string, accType uint32,
}
}
-// String converts sid to a string format
+// String converts SID to a string format
// suitable for display, storage, or transmission.
func (sid *SID) String() (string, error) {
var s *uint16
@@ -241,12 +247,12 @@ func (sid *SID) String() (string, error) {
return UTF16ToString((*[256]uint16)(unsafe.Pointer(s))[:]), nil
}
-// Len returns the length, in bytes, of a valid security identifier sid.
+// Len returns the length, in bytes, of a valid security identifier SID.
func (sid *SID) Len() int {
return int(GetLengthSid(sid))
}
-// Copy creates a duplicate of security identifier sid.
+// Copy creates a duplicate of security identifier SID.
func (sid *SID) Copy() (*SID, error) {
b := make([]byte, sid.Len())
sid2 := (*SID)(unsafe.Pointer(&b[0]))
@@ -257,8 +263,42 @@ func (sid *SID) Copy() (*SID, error) {
return sid2, nil
}
-// LookupAccount retrieves the name of the account for this sid
-// and the name of the first domain on which this sid is found.
+// IdentifierAuthority returns the identifier authority of the SID.
+func (sid *SID) IdentifierAuthority() SidIdentifierAuthority {
+ return *getSidIdentifierAuthority(sid)
+}
+
+// SubAuthorityCount returns the number of sub-authorities in the SID.
+func (sid *SID) SubAuthorityCount() uint8 {
+ return *getSidSubAuthorityCount(sid)
+}
+
+// SubAuthority returns the sub-authority of the SID as specified by
+// the index, which must be less than sid.SubAuthorityCount().
+func (sid *SID) SubAuthority(idx uint32) uint32 {
+ if idx >= uint32(sid.SubAuthorityCount()) {
+ panic("sub-authority index out of range")
+ }
+ return *getSidSubAuthority(sid, idx)
+}
+
+// IsValid returns whether the SID has a valid revision and length.
+func (sid *SID) IsValid() bool {
+ return isValidSid(sid)
+}
+
+// Equals compares two SIDs for equality.
+func (sid *SID) Equals(sid2 *SID) bool {
+ return EqualSid(sid, sid2)
+}
+
+// IsWellKnown determines whether the SID matches the well-known sidType.
+func (sid *SID) IsWellKnown(sidType WELL_KNOWN_SID_TYPE) bool {
+ return isWellKnownSid(sid, sidType)
+}
+
+// LookupAccount retrieves the name of the account for this SID
+// and the name of the first domain on which this SID is found.
// System specify target computer to search for.
func (sid *SID) LookupAccount(system string) (account, domain string, accType uint32, err error) {
var sys *uint16
@@ -286,6 +326,158 @@ func (sid *SID) LookupAccount(system string) (account, domain string, accType ui
}
}
+// Various types of pre-specified SIDs that can be synthesized and compared at runtime.
+type WELL_KNOWN_SID_TYPE uint32
+
+const (
+ WinNullSid = 0
+ WinWorldSid = 1
+ WinLocalSid = 2
+ WinCreatorOwnerSid = 3
+ WinCreatorGroupSid = 4
+ WinCreatorOwnerServerSid = 5
+ WinCreatorGroupServerSid = 6
+ WinNtAuthoritySid = 7
+ WinDialupSid = 8
+ WinNetworkSid = 9
+ WinBatchSid = 10
+ WinInteractiveSid = 11
+ WinServiceSid = 12
+ WinAnonymousSid = 13
+ WinProxySid = 14
+ WinEnterpriseControllersSid = 15
+ WinSelfSid = 16
+ WinAuthenticatedUserSid = 17
+ WinRestrictedCodeSid = 18
+ WinTerminalServerSid = 19
+ WinRemoteLogonIdSid = 20
+ WinLogonIdsSid = 21
+ WinLocalSystemSid = 22
+ WinLocalServiceSid = 23
+ WinNetworkServiceSid = 24
+ WinBuiltinDomainSid = 25
+ WinBuiltinAdministratorsSid = 26
+ WinBuiltinUsersSid = 27
+ WinBuiltinGuestsSid = 28
+ WinBuiltinPowerUsersSid = 29
+ WinBuiltinAccountOperatorsSid = 30
+ WinBuiltinSystemOperatorsSid = 31
+ WinBuiltinPrintOperatorsSid = 32
+ WinBuiltinBackupOperatorsSid = 33
+ WinBuiltinReplicatorSid = 34
+ WinBuiltinPreWindows2000CompatibleAccessSid = 35
+ WinBuiltinRemoteDesktopUsersSid = 36
+ WinBuiltinNetworkConfigurationOperatorsSid = 37
+ WinAccountAdministratorSid = 38
+ WinAccountGuestSid = 39
+ WinAccountKrbtgtSid = 40
+ WinAccountDomainAdminsSid = 41
+ WinAccountDomainUsersSid = 42
+ WinAccountDomainGuestsSid = 43
+ WinAccountComputersSid = 44
+ WinAccountControllersSid = 45
+ WinAccountCertAdminsSid = 46
+ WinAccountSchemaAdminsSid = 47
+ WinAccountEnterpriseAdminsSid = 48
+ WinAccountPolicyAdminsSid = 49
+ WinAccountRasAndIasServersSid = 50
+ WinNTLMAuthenticationSid = 51
+ WinDigestAuthenticationSid = 52
+ WinSChannelAuthenticationSid = 53
+ WinThisOrganizationSid = 54
+ WinOtherOrganizationSid = 55
+ WinBuiltinIncomingForestTrustBuildersSid = 56
+ WinBuiltinPerfMonitoringUsersSid = 57
+ WinBuiltinPerfLoggingUsersSid = 58
+ WinBuiltinAuthorizationAccessSid = 59
+ WinBuiltinTerminalServerLicenseServersSid = 60
+ WinBuiltinDCOMUsersSid = 61
+ WinBuiltinIUsersSid = 62
+ WinIUserSid = 63
+ WinBuiltinCryptoOperatorsSid = 64
+ WinUntrustedLabelSid = 65
+ WinLowLabelSid = 66
+ WinMediumLabelSid = 67
+ WinHighLabelSid = 68
+ WinSystemLabelSid = 69
+ WinWriteRestrictedCodeSid = 70
+ WinCreatorOwnerRightsSid = 71
+ WinCacheablePrincipalsGroupSid = 72
+ WinNonCacheablePrincipalsGroupSid = 73
+ WinEnterpriseReadonlyControllersSid = 74
+ WinAccountReadonlyControllersSid = 75
+ WinBuiltinEventLogReadersGroup = 76
+ WinNewEnterpriseReadonlyControllersSid = 77
+ WinBuiltinCertSvcDComAccessGroup = 78
+ WinMediumPlusLabelSid = 79
+ WinLocalLogonSid = 80
+ WinConsoleLogonSid = 81
+ WinThisOrganizationCertificateSid = 82
+ WinApplicationPackageAuthoritySid = 83
+ WinBuiltinAnyPackageSid = 84
+ WinCapabilityInternetClientSid = 85
+ WinCapabilityInternetClientServerSid = 86
+ WinCapabilityPrivateNetworkClientServerSid = 87
+ WinCapabilityPicturesLibrarySid = 88
+ WinCapabilityVideosLibrarySid = 89
+ WinCapabilityMusicLibrarySid = 90
+ WinCapabilityDocumentsLibrarySid = 91
+ WinCapabilitySharedUserCertificatesSid = 92
+ WinCapabilityEnterpriseAuthenticationSid = 93
+ WinCapabilityRemovableStorageSid = 94
+ WinBuiltinRDSRemoteAccessServersSid = 95
+ WinBuiltinRDSEndpointServersSid = 96
+ WinBuiltinRDSManagementServersSid = 97
+ WinUserModeDriversSid = 98
+ WinBuiltinHyperVAdminsSid = 99
+ WinAccountCloneableControllersSid = 100
+ WinBuiltinAccessControlAssistanceOperatorsSid = 101
+ WinBuiltinRemoteManagementUsersSid = 102
+ WinAuthenticationAuthorityAssertedSid = 103
+ WinAuthenticationServiceAssertedSid = 104
+ WinLocalAccountSid = 105
+ WinLocalAccountAndAdministratorSid = 106
+ WinAccountProtectedUsersSid = 107
+ WinCapabilityAppointmentsSid = 108
+ WinCapabilityContactsSid = 109
+ WinAccountDefaultSystemManagedSid = 110
+ WinBuiltinDefaultSystemManagedGroupSid = 111
+ WinBuiltinStorageReplicaAdminsSid = 112
+ WinAccountKeyAdminsSid = 113
+ WinAccountEnterpriseKeyAdminsSid = 114
+ WinAuthenticationKeyTrustSid = 115
+ WinAuthenticationKeyPropertyMFASid = 116
+ WinAuthenticationKeyPropertyAttestationSid = 117
+ WinAuthenticationFreshKeyAuthSid = 118
+ WinBuiltinDeviceOwnersSid = 119
+)
+
+// Creates a SID for a well-known predefined alias, generally using the constants of the form
+// Win*Sid, for the local machine.
+func CreateWellKnownSid(sidType WELL_KNOWN_SID_TYPE) (*SID, error) {
+ return CreateWellKnownDomainSid(sidType, nil)
+}
+
+// Creates a SID for a well-known predefined alias, generally using the constants of the form
+// Win*Sid, for the domain specified by the domainSid parameter.
+func CreateWellKnownDomainSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID) (*SID, error) {
+ n := uint32(50)
+ for {
+ b := make([]byte, n)
+ sid := (*SID)(unsafe.Pointer(&b[0]))
+ err := createWellKnownSid(sidType, domainSid, sid, &n)
+ if err == nil {
+ return sid, nil
+ }
+ if err != ERROR_INSUFFICIENT_BUFFER {
+ return nil, err
+ }
+ if n <= uint32(len(b)) {
+ return nil, err
+ }
+ }
+}
+
const (
// do not reorder
TOKEN_ASSIGN_PRIMARY = 1 << iota
@@ -349,6 +541,53 @@ const (
MaxTokenInfoClass
)
+// Group attributes inside of Tokengroups.Groups[i].Attributes
+const (
+ SE_GROUP_MANDATORY = 0x00000001
+ SE_GROUP_ENABLED_BY_DEFAULT = 0x00000002
+ SE_GROUP_ENABLED = 0x00000004
+ SE_GROUP_OWNER = 0x00000008
+ SE_GROUP_USE_FOR_DENY_ONLY = 0x00000010
+ SE_GROUP_INTEGRITY = 0x00000020
+ SE_GROUP_INTEGRITY_ENABLED = 0x00000040
+ SE_GROUP_LOGON_ID = 0xC0000000
+ SE_GROUP_RESOURCE = 0x20000000
+ SE_GROUP_VALID_ATTRIBUTES = SE_GROUP_MANDATORY | SE_GROUP_ENABLED_BY_DEFAULT | SE_GROUP_ENABLED | SE_GROUP_OWNER | SE_GROUP_USE_FOR_DENY_ONLY | SE_GROUP_LOGON_ID | SE_GROUP_RESOURCE | SE_GROUP_INTEGRITY | SE_GROUP_INTEGRITY_ENABLED
+)
+
+// Privilege attributes
+const (
+ SE_PRIVILEGE_ENABLED_BY_DEFAULT = 0x00000001
+ SE_PRIVILEGE_ENABLED = 0x00000002
+ SE_PRIVILEGE_REMOVED = 0x00000004
+ SE_PRIVILEGE_USED_FOR_ACCESS = 0x80000000
+ SE_PRIVILEGE_VALID_ATTRIBUTES = SE_PRIVILEGE_ENABLED_BY_DEFAULT | SE_PRIVILEGE_ENABLED | SE_PRIVILEGE_REMOVED | SE_PRIVILEGE_USED_FOR_ACCESS
+)
+
+// Token types
+const (
+ TokenPrimary = 1
+ TokenImpersonation = 2
+)
+
+// Impersonation levels
+const (
+ SecurityAnonymous = 0
+ SecurityIdentification = 1
+ SecurityImpersonation = 2
+ SecurityDelegation = 3
+)
+
+type LUID struct {
+ LowPart uint32
+ HighPart int32
+}
+
+type LUIDAndAttributes struct {
+ Luid LUID
+ Attributes uint32
+}
+
type SIDAndAttributes struct {
Sid *SID
Attributes uint32
@@ -364,14 +603,47 @@ type Tokenprimarygroup struct {
type Tokengroups struct {
GroupCount uint32
- Groups [1]SIDAndAttributes
+ Groups [1]SIDAndAttributes // Use AllGroups() for iterating.
+}
+
+// AllGroups returns a slice that can be used to iterate over the groups in g.
+func (g *Tokengroups) AllGroups() []SIDAndAttributes {
+ return (*[(1 << 28) - 1]SIDAndAttributes)(unsafe.Pointer(&g.Groups[0]))[:g.GroupCount:g.GroupCount]
+}
+
+type Tokenprivileges struct {
+ PrivilegeCount uint32
+ Privileges [1]LUIDAndAttributes // Use AllPrivileges() for iterating.
+}
+
+// AllPrivileges returns a slice that can be used to iterate over the privileges in p.
+func (p *Tokenprivileges) AllPrivileges() []LUIDAndAttributes {
+ return (*[(1 << 27) - 1]LUIDAndAttributes)(unsafe.Pointer(&p.Privileges[0]))[:p.PrivilegeCount:p.PrivilegeCount]
+}
+
+type Tokenmandatorylabel struct {
+ Label SIDAndAttributes
+}
+
+func (tml *Tokenmandatorylabel) Size() uint32 {
+ return uint32(unsafe.Sizeof(Tokenmandatorylabel{})) + GetLengthSid(tml.Label.Sid)
}
// Authorization Functions
-//sys checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership
-//sys OpenProcessToken(h Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken
-//sys GetTokenInformation(t Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation
+//sys checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership
+//sys OpenProcessToken(process Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken
+//sys OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) = advapi32.OpenThreadToken
+//sys ImpersonateSelf(impersonationlevel uint32) (err error) = advapi32.ImpersonateSelf
+//sys RevertToSelf() (err error) = advapi32.RevertToSelf
+//sys SetThreadToken(thread *Handle, token Token) (err error) = advapi32.SetThreadToken
+//sys LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) = advapi32.LookupPrivilegeValueW
+//sys AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) = advapi32.AdjustTokenPrivileges
+//sys AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) = advapi32.AdjustTokenGroups
+//sys GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation
+//sys SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) = advapi32.SetTokenInformation
+//sys DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) = advapi32.DuplicateTokenEx
//sys GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW
+//sys getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemDirectoryW
// An access token contains the security information for a logon session.
// The system creates an access token when a user logs on, and every
@@ -383,7 +655,9 @@ type Tokengroups struct {
type Token Handle
// OpenCurrentProcessToken opens the access token
-// associated with current process.
+// associated with current process. It is a real
+// token that needs to be closed, unlike
+// GetCurrentProcessToken.
func OpenCurrentProcessToken() (Token, error) {
p, e := GetCurrentProcess()
if e != nil {
@@ -397,6 +671,27 @@ func OpenCurrentProcessToken() (Token, error) {
return t, nil
}
+// GetCurrentProcessToken returns the access token associated with
+// the current process. It is a pseudo token that does not need
+// to be closed.
+func GetCurrentProcessToken() Token {
+ return Token(^uintptr(4 - 1))
+}
+
+// GetCurrentThreadToken return the access token associated with
+// the current thread. It is a pseudo token that does not need
+// to be closed.
+func GetCurrentThreadToken() Token {
+ return Token(^uintptr(5 - 1))
+}
+
+// GetCurrentThreadEffectiveToken returns the effective access token
+// associated with the current thread. It is a pseudo token that does
+// not need to be closed.
+func GetCurrentThreadEffectiveToken() Token {
+ return Token(^uintptr(6 - 1))
+}
+
// Close releases access to access token.
func (t Token) Close() error {
return CloseHandle(Handle(t))
@@ -468,6 +763,45 @@ func (t Token) GetUserProfileDirectory() (string, error) {
}
}
+// IsElevated returns whether the current token is elevated from a UAC perspective.
+func (token Token) IsElevated() bool {
+ var isElevated uint32
+ var outLen uint32
+ err := GetTokenInformation(token, TokenElevation, (*byte)(unsafe.Pointer(&isElevated)), uint32(unsafe.Sizeof(isElevated)), &outLen)
+ if err != nil {
+ return false
+ }
+ return outLen == uint32(unsafe.Sizeof(isElevated)) && isElevated != 0
+}
+
+// GetLinkedToken returns the linked token, which may be an elevated UAC token.
+func (token Token) GetLinkedToken() (Token, error) {
+ var linkedToken Token
+ var outLen uint32
+ err := GetTokenInformation(token, TokenLinkedToken, (*byte)(unsafe.Pointer(&linkedToken)), uint32(unsafe.Sizeof(linkedToken)), &outLen)
+ if err != nil {
+ return Token(0), err
+ }
+ return linkedToken, nil
+}
+
+// GetSystemDirectory retrieves path to current location of the system
+// directory, which is typically, though not always, C:\Windows\System32.
+func GetSystemDirectory() (string, error) {
+ n := uint32(MAX_PATH)
+ for {
+ b := make([]uint16, n)
+ l, e := getSystemDirectory(&b[0], n)
+ if e != nil {
+ return "", e
+ }
+ if l <= n {
+ return UTF16ToString(b[:l]), nil
+ }
+ n = l
+ }
+}
+
// IsMember reports whether the access token t is a member of the provided SID.
func (t Token) IsMember(sid *SID) (bool, error) {
var b int32
@@ -476,3 +810,45 @@ func (t Token) IsMember(sid *SID) (bool, error) {
}
return b != 0, nil
}
+
+const (
+ WTS_CONSOLE_CONNECT = 0x1
+ WTS_CONSOLE_DISCONNECT = 0x2
+ WTS_REMOTE_CONNECT = 0x3
+ WTS_REMOTE_DISCONNECT = 0x4
+ WTS_SESSION_LOGON = 0x5
+ WTS_SESSION_LOGOFF = 0x6
+ WTS_SESSION_LOCK = 0x7
+ WTS_SESSION_UNLOCK = 0x8
+ WTS_SESSION_REMOTE_CONTROL = 0x9
+ WTS_SESSION_CREATE = 0xa
+ WTS_SESSION_TERMINATE = 0xb
+)
+
+const (
+ WTSActive = 0
+ WTSConnected = 1
+ WTSConnectQuery = 2
+ WTSShadow = 3
+ WTSDisconnected = 4
+ WTSIdle = 5
+ WTSListen = 6
+ WTSReset = 7
+ WTSDown = 8
+ WTSInit = 9
+)
+
+type WTSSESSION_NOTIFICATION struct {
+ Size uint32
+ SessionID uint32
+}
+
+type WTS_SESSION_INFO struct {
+ SessionID uint32
+ WindowStationName *uint16
+ State uint32
+}
+
+//sys WTSQueryUserToken(session uint32, token *Token) (err error) = wtsapi32.WTSQueryUserToken
+//sys WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) = wtsapi32.WTSEnumerateSessionsW
+//sys WTSFreeMemory(ptr uintptr) = wtsapi32.WTSFreeMemory
diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go
index 62fc31b40..03383f1df 100644
--- a/vendor/golang.org/x/sys/windows/service.go
+++ b/vendor/golang.org/x/sys/windows/service.go
@@ -85,23 +85,47 @@ const (
SERVICE_INACTIVE = 2
SERVICE_STATE_ALL = 3
- SERVICE_QUERY_CONFIG = 1
- SERVICE_CHANGE_CONFIG = 2
- SERVICE_QUERY_STATUS = 4
- SERVICE_ENUMERATE_DEPENDENTS = 8
- SERVICE_START = 16
- SERVICE_STOP = 32
- SERVICE_PAUSE_CONTINUE = 64
- SERVICE_INTERROGATE = 128
- SERVICE_USER_DEFINED_CONTROL = 256
- SERVICE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SERVICE_QUERY_CONFIG | SERVICE_CHANGE_CONFIG | SERVICE_QUERY_STATUS | SERVICE_ENUMERATE_DEPENDENTS | SERVICE_START | SERVICE_STOP | SERVICE_PAUSE_CONTINUE | SERVICE_INTERROGATE | SERVICE_USER_DEFINED_CONTROL
+ SERVICE_QUERY_CONFIG = 1
+ SERVICE_CHANGE_CONFIG = 2
+ SERVICE_QUERY_STATUS = 4
+ SERVICE_ENUMERATE_DEPENDENTS = 8
+ SERVICE_START = 16
+ SERVICE_STOP = 32
+ SERVICE_PAUSE_CONTINUE = 64
+ SERVICE_INTERROGATE = 128
+ SERVICE_USER_DEFINED_CONTROL = 256
+ SERVICE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SERVICE_QUERY_CONFIG | SERVICE_CHANGE_CONFIG | SERVICE_QUERY_STATUS | SERVICE_ENUMERATE_DEPENDENTS | SERVICE_START | SERVICE_STOP | SERVICE_PAUSE_CONTINUE | SERVICE_INTERROGATE | SERVICE_USER_DEFINED_CONTROL
+
SERVICE_RUNS_IN_SYSTEM_PROCESS = 1
- SERVICE_CONFIG_DESCRIPTION = 1
- SERVICE_CONFIG_FAILURE_ACTIONS = 2
- NO_ERROR = 0
+ SERVICE_CONFIG_DESCRIPTION = 1
+ SERVICE_CONFIG_FAILURE_ACTIONS = 2
+ SERVICE_CONFIG_DELAYED_AUTO_START_INFO = 3
+ SERVICE_CONFIG_FAILURE_ACTIONS_FLAG = 4
+ SERVICE_CONFIG_SERVICE_SID_INFO = 5
+ SERVICE_CONFIG_REQUIRED_PRIVILEGES_INFO = 6
+ SERVICE_CONFIG_PRESHUTDOWN_INFO = 7
+ SERVICE_CONFIG_TRIGGER_INFO = 8
+ SERVICE_CONFIG_PREFERRED_NODE = 9
+ SERVICE_CONFIG_LAUNCH_PROTECTED = 12
+
+ SERVICE_SID_TYPE_NONE = 0
+ SERVICE_SID_TYPE_UNRESTRICTED = 1
+ SERVICE_SID_TYPE_RESTRICTED = 2 | SERVICE_SID_TYPE_UNRESTRICTED
SC_ENUM_PROCESS_INFO = 0
+
+ SERVICE_NOTIFY_STATUS_CHANGE = 2
+ SERVICE_NOTIFY_STOPPED = 0x00000001
+ SERVICE_NOTIFY_START_PENDING = 0x00000002
+ SERVICE_NOTIFY_STOP_PENDING = 0x00000004
+ SERVICE_NOTIFY_RUNNING = 0x00000008
+ SERVICE_NOTIFY_CONTINUE_PENDING = 0x00000010
+ SERVICE_NOTIFY_PAUSE_PENDING = 0x00000020
+ SERVICE_NOTIFY_PAUSED = 0x00000040
+ SERVICE_NOTIFY_CREATED = 0x00000080
+ SERVICE_NOTIFY_DELETED = 0x00000100
+ SERVICE_NOTIFY_DELETE_PENDING = 0x00000200
)
type SERVICE_STATUS struct {
@@ -153,6 +177,16 @@ type ENUM_SERVICE_STATUS_PROCESS struct {
ServiceStatusProcess SERVICE_STATUS_PROCESS
}
+type SERVICE_NOTIFY struct {
+ Version uint32
+ NotifyCallback uintptr
+ Context uintptr
+ NotificationStatus uint32
+ ServiceStatus SERVICE_STATUS_PROCESS
+ NotificationTriggered uint32
+ ServiceNames *uint16
+}
+
type SERVICE_FAILURE_ACTIONS struct {
ResetPeriod uint32
RebootMsg *uint16
@@ -166,12 +200,19 @@ type SC_ACTION struct {
Delay uint32
}
+type QUERY_SERVICE_LOCK_STATUS struct {
+ IsLocked uint32
+ LockOwner *uint16
+ LockDuration uint32
+}
+
//sys CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle
//sys CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW
//sys OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW
//sys DeleteService(service Handle) (err error) = advapi32.DeleteService
//sys StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) = advapi32.StartServiceW
//sys QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) = advapi32.QueryServiceStatus
+//sys QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceLockStatusW
//sys ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) = advapi32.ControlService
//sys StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) = advapi32.StartServiceCtrlDispatcherW
//sys SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) = advapi32.SetServiceStatus
@@ -180,4 +221,5 @@ type SC_ACTION struct {
//sys ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) = advapi32.ChangeServiceConfig2W
//sys QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfig2W
//sys EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) = advapi32.EnumServicesStatusExW
-//sys QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceStatusEx
+//sys QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceStatusEx
+//sys NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) = advapi32.NotifyServiceStatusChangeW
diff --git a/vendor/golang.org/x/sys/windows/svc/event.go b/vendor/golang.org/x/sys/windows/svc/event.go
deleted file mode 100644
index 0508e2288..000000000
--- a/vendor/golang.org/x/sys/windows/svc/event.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows
-
-package svc
-
-import (
- "errors"
-
- "golang.org/x/sys/windows"
-)
-
-// event represents auto-reset, initially non-signaled Windows event.
-// It is used to communicate between go and asm parts of this package.
-type event struct {
- h windows.Handle
-}
-
-func newEvent() (*event, error) {
- h, err := windows.CreateEvent(nil, 0, 0, nil)
- if err != nil {
- return nil, err
- }
- return &event{h: h}, nil
-}
-
-func (e *event) Close() error {
- return windows.CloseHandle(e.h)
-}
-
-func (e *event) Set() error {
- return windows.SetEvent(e.h)
-}
-
-func (e *event) Wait() error {
- s, err := windows.WaitForSingleObject(e.h, windows.INFINITE)
- switch s {
- case windows.WAIT_OBJECT_0:
- break
- case windows.WAIT_FAILED:
- return err
- default:
- return errors.New("unexpected result from WaitForSingleObject")
- }
- return nil
-}
diff --git a/vendor/golang.org/x/sys/windows/svc/go12.c b/vendor/golang.org/x/sys/windows/svc/go12.c
deleted file mode 100644
index 6f1be1fa3..000000000
--- a/vendor/golang.org/x/sys/windows/svc/go12.c
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows
-// +build !go1.3
-
-// copied from pkg/runtime
-typedef unsigned int uint32;
-typedef unsigned long long int uint64;
-#ifdef _64BIT
-typedef uint64 uintptr;
-#else
-typedef uint32 uintptr;
-#endif
-
-// from sys_386.s or sys_amd64.s
-void ·servicemain(void);
-
-void
-·getServiceMain(uintptr *r)
-{
- *r = (uintptr)·servicemain;
-}
diff --git a/vendor/golang.org/x/sys/windows/svc/go12.go b/vendor/golang.org/x/sys/windows/svc/go12.go
deleted file mode 100644
index cd8b913c9..000000000
--- a/vendor/golang.org/x/sys/windows/svc/go12.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows
-// +build !go1.3
-
-package svc
-
-// from go12.c
-func getServiceMain(r *uintptr)
diff --git a/vendor/golang.org/x/sys/windows/svc/go13.go b/vendor/golang.org/x/sys/windows/svc/go13.go
deleted file mode 100644
index 9d7f3cec5..000000000
--- a/vendor/golang.org/x/sys/windows/svc/go13.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows
-// +build go1.3
-
-package svc
-
-import "unsafe"
-
-const ptrSize = 4 << (^uintptr(0) >> 63) // unsafe.Sizeof(uintptr(0)) but an ideal const
-
-// Should be a built-in for unsafe.Pointer?
-func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
- return unsafe.Pointer(uintptr(p) + x)
-}
-
-// funcPC returns the entry PC of the function f.
-// It assumes that f is a func value. Otherwise the behavior is undefined.
-func funcPC(f interface{}) uintptr {
- return **(**uintptr)(add(unsafe.Pointer(&f), ptrSize))
-}
-
-// from sys_386.s and sys_amd64.s
-func servicectlhandler(ctl uint32) uintptr
-func servicemain(argc uint32, argv **uint16)
-
-func getServiceMain(r *uintptr) {
- *r = funcPC(servicemain)
-}
diff --git a/vendor/golang.org/x/sys/windows/svc/security.go b/vendor/golang.org/x/sys/windows/svc/security.go
deleted file mode 100644
index 6fbc9236e..000000000
--- a/vendor/golang.org/x/sys/windows/svc/security.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows
-
-package svc
-
-import (
- "unsafe"
-
- "golang.org/x/sys/windows"
-)
-
-func allocSid(subAuth0 uint32) (*windows.SID, error) {
- var sid *windows.SID
- err := windows.AllocateAndInitializeSid(&windows.SECURITY_NT_AUTHORITY,
- 1, subAuth0, 0, 0, 0, 0, 0, 0, 0, &sid)
- if err != nil {
- return nil, err
- }
- return sid, nil
-}
-
-// IsAnInteractiveSession determines if calling process is running interactively.
-// It queries the process token for membership in the Interactive group.
-// http://stackoverflow.com/questions/2668851/how-do-i-detect-that-my-application-is-running-as-service-or-in-an-interactive-s
-func IsAnInteractiveSession() (bool, error) {
- interSid, err := allocSid(windows.SECURITY_INTERACTIVE_RID)
- if err != nil {
- return false, err
- }
- defer windows.FreeSid(interSid)
-
- serviceSid, err := allocSid(windows.SECURITY_SERVICE_RID)
- if err != nil {
- return false, err
- }
- defer windows.FreeSid(serviceSid)
-
- t, err := windows.OpenCurrentProcessToken()
- if err != nil {
- return false, err
- }
- defer t.Close()
-
- gs, err := t.GetTokenGroups()
- if err != nil {
- return false, err
- }
- p := unsafe.Pointer(&gs.Groups[0])
- groups := (*[2 << 20]windows.SIDAndAttributes)(p)[:gs.GroupCount]
- for _, g := range groups {
- if windows.EqualSid(g.Sid, interSid) {
- return true, nil
- }
- if windows.EqualSid(g.Sid, serviceSid) {
- return false, nil
- }
- }
- return false, nil
-}
diff --git a/vendor/golang.org/x/sys/windows/svc/service.go b/vendor/golang.org/x/sys/windows/svc/service.go
deleted file mode 100644
index cda26b54b..000000000
--- a/vendor/golang.org/x/sys/windows/svc/service.go
+++ /dev/null
@@ -1,363 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows
-
-// Package svc provides everything required to build Windows service.
-//
-package svc
-
-import (
- "errors"
- "runtime"
- "syscall"
- "unsafe"
-
- "golang.org/x/sys/windows"
-)
-
-// State describes service execution state (Stopped, Running and so on).
-type State uint32
-
-const (
- Stopped = State(windows.SERVICE_STOPPED)
- StartPending = State(windows.SERVICE_START_PENDING)
- StopPending = State(windows.SERVICE_STOP_PENDING)
- Running = State(windows.SERVICE_RUNNING)
- ContinuePending = State(windows.SERVICE_CONTINUE_PENDING)
- PausePending = State(windows.SERVICE_PAUSE_PENDING)
- Paused = State(windows.SERVICE_PAUSED)
-)
-
-// Cmd represents service state change request. It is sent to a service
-// by the service manager, and should be actioned upon by the service.
-type Cmd uint32
-
-const (
- Stop = Cmd(windows.SERVICE_CONTROL_STOP)
- Pause = Cmd(windows.SERVICE_CONTROL_PAUSE)
- Continue = Cmd(windows.SERVICE_CONTROL_CONTINUE)
- Interrogate = Cmd(windows.SERVICE_CONTROL_INTERROGATE)
- Shutdown = Cmd(windows.SERVICE_CONTROL_SHUTDOWN)
- ParamChange = Cmd(windows.SERVICE_CONTROL_PARAMCHANGE)
- NetBindAdd = Cmd(windows.SERVICE_CONTROL_NETBINDADD)
- NetBindRemove = Cmd(windows.SERVICE_CONTROL_NETBINDREMOVE)
- NetBindEnable = Cmd(windows.SERVICE_CONTROL_NETBINDENABLE)
- NetBindDisable = Cmd(windows.SERVICE_CONTROL_NETBINDDISABLE)
- DeviceEvent = Cmd(windows.SERVICE_CONTROL_DEVICEEVENT)
- HardwareProfileChange = Cmd(windows.SERVICE_CONTROL_HARDWAREPROFILECHANGE)
- PowerEvent = Cmd(windows.SERVICE_CONTROL_POWEREVENT)
- SessionChange = Cmd(windows.SERVICE_CONTROL_SESSIONCHANGE)
-)
-
-// Accepted is used to describe commands accepted by the service.
-// Note that Interrogate is always accepted.
-type Accepted uint32
-
-const (
- AcceptStop = Accepted(windows.SERVICE_ACCEPT_STOP)
- AcceptShutdown = Accepted(windows.SERVICE_ACCEPT_SHUTDOWN)
- AcceptPauseAndContinue = Accepted(windows.SERVICE_ACCEPT_PAUSE_CONTINUE)
- AcceptParamChange = Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)
- AcceptNetBindChange = Accepted(windows.SERVICE_ACCEPT_NETBINDCHANGE)
- AcceptHardwareProfileChange = Accepted(windows.SERVICE_ACCEPT_HARDWAREPROFILECHANGE)
- AcceptPowerEvent = Accepted(windows.SERVICE_ACCEPT_POWEREVENT)
- AcceptSessionChange = Accepted(windows.SERVICE_ACCEPT_SESSIONCHANGE)
-)
-
-// Status combines State and Accepted commands to fully describe running service.
-type Status struct {
- State State
- Accepts Accepted
- CheckPoint uint32 // used to report progress during a lengthy operation
- WaitHint uint32 // estimated time required for a pending operation, in milliseconds
-}
-
-// ChangeRequest is sent to the service Handler to request service status change.
-type ChangeRequest struct {
- Cmd Cmd
- EventType uint32
- EventData uintptr
- CurrentStatus Status
-}
-
-// Handler is the interface that must be implemented to build Windows service.
-type Handler interface {
-
- // Execute will be called by the package code at the start of
- // the service, and the service will exit once Execute completes.
- // Inside Execute you must read service change requests from r and
- // act accordingly. You must keep service control manager up to date
- // about state of your service by writing into s as required.
- // args contains service name followed by argument strings passed
- // to the service.
- // You can provide service exit code in exitCode return parameter,
- // with 0 being "no error". You can also indicate if exit code,
- // if any, is service specific or not by using svcSpecificEC
- // parameter.
- Execute(args []string, r <-chan ChangeRequest, s chan<- Status) (svcSpecificEC bool, exitCode uint32)
-}
-
-var (
- // These are used by asm code.
- goWaitsH uintptr
- cWaitsH uintptr
- ssHandle uintptr
- sName *uint16
- sArgc uintptr
- sArgv **uint16
- ctlHandlerExProc uintptr
- cSetEvent uintptr
- cWaitForSingleObject uintptr
- cRegisterServiceCtrlHandlerExW uintptr
-)
-
-func init() {
- k := syscall.MustLoadDLL("kernel32.dll")
- cSetEvent = k.MustFindProc("SetEvent").Addr()
- cWaitForSingleObject = k.MustFindProc("WaitForSingleObject").Addr()
- a := syscall.MustLoadDLL("advapi32.dll")
- cRegisterServiceCtrlHandlerExW = a.MustFindProc("RegisterServiceCtrlHandlerExW").Addr()
-}
-
-// The HandlerEx prototype also has a context pointer but since we don't use
-// it at start-up time we don't have to pass it over either.
-type ctlEvent struct {
- cmd Cmd
- eventType uint32
- eventData uintptr
- errno uint32
-}
-
-// service provides access to windows service api.
-type service struct {
- name string
- h windows.Handle
- cWaits *event
- goWaits *event
- c chan ctlEvent
- handler Handler
-}
-
-func newService(name string, handler Handler) (*service, error) {
- var s service
- var err error
- s.name = name
- s.c = make(chan ctlEvent)
- s.handler = handler
- s.cWaits, err = newEvent()
- if err != nil {
- return nil, err
- }
- s.goWaits, err = newEvent()
- if err != nil {
- s.cWaits.Close()
- return nil, err
- }
- return &s, nil
-}
-
-func (s *service) close() error {
- s.cWaits.Close()
- s.goWaits.Close()
- return nil
-}
-
-type exitCode struct {
- isSvcSpecific bool
- errno uint32
-}
-
-func (s *service) updateStatus(status *Status, ec *exitCode) error {
- if s.h == 0 {
- return errors.New("updateStatus with no service status handle")
- }
- var t windows.SERVICE_STATUS
- t.ServiceType = windows.SERVICE_WIN32_OWN_PROCESS
- t.CurrentState = uint32(status.State)
- if status.Accepts&AcceptStop != 0 {
- t.ControlsAccepted |= windows.SERVICE_ACCEPT_STOP
- }
- if status.Accepts&AcceptShutdown != 0 {
- t.ControlsAccepted |= windows.SERVICE_ACCEPT_SHUTDOWN
- }
- if status.Accepts&AcceptPauseAndContinue != 0 {
- t.ControlsAccepted |= windows.SERVICE_ACCEPT_PAUSE_CONTINUE
- }
- if status.Accepts&AcceptParamChange != 0 {
- t.ControlsAccepted |= windows.SERVICE_ACCEPT_PARAMCHANGE
- }
- if status.Accepts&AcceptNetBindChange != 0 {
- t.ControlsAccepted |= windows.SERVICE_ACCEPT_NETBINDCHANGE
- }
- if status.Accepts&AcceptHardwareProfileChange != 0 {
- t.ControlsAccepted |= windows.SERVICE_ACCEPT_HARDWAREPROFILECHANGE
- }
- if status.Accepts&AcceptPowerEvent != 0 {
- t.ControlsAccepted |= windows.SERVICE_ACCEPT_POWEREVENT
- }
- if status.Accepts&AcceptSessionChange != 0 {
- t.ControlsAccepted |= windows.SERVICE_ACCEPT_SESSIONCHANGE
- }
- if ec.errno == 0 {
- t.Win32ExitCode = windows.NO_ERROR
- t.ServiceSpecificExitCode = windows.NO_ERROR
- } else if ec.isSvcSpecific {
- t.Win32ExitCode = uint32(windows.ERROR_SERVICE_SPECIFIC_ERROR)
- t.ServiceSpecificExitCode = ec.errno
- } else {
- t.Win32ExitCode = ec.errno
- t.ServiceSpecificExitCode = windows.NO_ERROR
- }
- t.CheckPoint = status.CheckPoint
- t.WaitHint = status.WaitHint
- return windows.SetServiceStatus(s.h, &t)
-}
-
-const (
- sysErrSetServiceStatusFailed = uint32(syscall.APPLICATION_ERROR) + iota
- sysErrNewThreadInCallback
-)
-
-func (s *service) run() {
- s.goWaits.Wait()
- s.h = windows.Handle(ssHandle)
- argv := (*[100]*int16)(unsafe.Pointer(sArgv))[:sArgc]
- args := make([]string, len(argv))
- for i, a := range argv {
- args[i] = syscall.UTF16ToString((*[1 << 20]uint16)(unsafe.Pointer(a))[:])
- }
-
- cmdsToHandler := make(chan ChangeRequest)
- changesFromHandler := make(chan Status)
- exitFromHandler := make(chan exitCode)
-
- go func() {
- ss, errno := s.handler.Execute(args, cmdsToHandler, changesFromHandler)
- exitFromHandler <- exitCode{ss, errno}
- }()
-
- status := Status{State: Stopped}
- ec := exitCode{isSvcSpecific: true, errno: 0}
- var outch chan ChangeRequest
- inch := s.c
- var cmd Cmd
- var evtype uint32
- var evdata uintptr
-loop:
- for {
- select {
- case r := <-inch:
- if r.errno != 0 {
- ec.errno = r.errno
- break loop
- }
- inch = nil
- outch = cmdsToHandler
- cmd = r.cmd
- evtype = r.eventType
- evdata = r.eventData
- case outch <- ChangeRequest{cmd, evtype, evdata, status}:
- inch = s.c
- outch = nil
- case c := <-changesFromHandler:
- err := s.updateStatus(&c, &ec)
- if err != nil {
- // best suitable error number
- ec.errno = sysErrSetServiceStatusFailed
- if err2, ok := err.(syscall.Errno); ok {
- ec.errno = uint32(err2)
- }
- break loop
- }
- status = c
- case ec = <-exitFromHandler:
- break loop
- }
- }
-
- s.updateStatus(&Status{State: Stopped}, &ec)
- s.cWaits.Set()
-}
-
-func newCallback(fn interface{}) (cb uintptr, err error) {
- defer func() {
- r := recover()
- if r == nil {
- return
- }
- cb = 0
- switch v := r.(type) {
- case string:
- err = errors.New(v)
- case error:
- err = v
- default:
- err = errors.New("unexpected panic in syscall.NewCallback")
- }
- }()
- return syscall.NewCallback(fn), nil
-}
-
-// BUG(brainman): There is no mechanism to run multiple services
-// inside one single executable. Perhaps, it can be overcome by
-// using RegisterServiceCtrlHandlerEx Windows api.
-
-// Run executes service name by calling appropriate handler function.
-func Run(name string, handler Handler) error {
- runtime.LockOSThread()
-
- tid := windows.GetCurrentThreadId()
-
- s, err := newService(name, handler)
- if err != nil {
- return err
- }
-
- ctlHandler := func(ctl uint32, evtype uint32, evdata uintptr, context uintptr) uintptr {
- e := ctlEvent{cmd: Cmd(ctl), eventType: evtype, eventData: evdata}
- // We assume that this callback function is running on
- // the same thread as Run. Nowhere in MS documentation
- // I could find statement to guarantee that. So putting
- // check here to verify, otherwise things will go bad
- // quickly, if ignored.
- i := windows.GetCurrentThreadId()
- if i != tid {
- e.errno = sysErrNewThreadInCallback
- }
- s.c <- e
- // Always return NO_ERROR (0) for now.
- return 0
- }
-
- var svcmain uintptr
- getServiceMain(&svcmain)
- t := []windows.SERVICE_TABLE_ENTRY{
- {ServiceName: syscall.StringToUTF16Ptr(s.name), ServiceProc: svcmain},
- {ServiceName: nil, ServiceProc: 0},
- }
-
- goWaitsH = uintptr(s.goWaits.h)
- cWaitsH = uintptr(s.cWaits.h)
- sName = t[0].ServiceName
- ctlHandlerExProc, err = newCallback(ctlHandler)
- if err != nil {
- return err
- }
-
- go s.run()
-
- err = windows.StartServiceCtrlDispatcher(&t[0])
- if err != nil {
- return err
- }
- return nil
-}
-
-// StatusHandle returns service status handle. It is safe to call this function
-// from inside the Handler.Execute because then it is guaranteed to be set.
-// This code will have to change once multiple services are possible per process.
-func StatusHandle() windows.Handle {
- return windows.Handle(ssHandle)
-}
diff --git a/vendor/golang.org/x/sys/windows/svc/sys_386.s b/vendor/golang.org/x/sys/windows/svc/sys_386.s
deleted file mode 100644
index 2c82a9d91..000000000
--- a/vendor/golang.org/x/sys/windows/svc/sys_386.s
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows
-
-// func servicemain(argc uint32, argv **uint16)
-TEXT ·servicemain(SB),7,$0
- MOVL argc+0(FP), AX
- MOVL AX, ·sArgc(SB)
- MOVL argv+4(FP), AX
- MOVL AX, ·sArgv(SB)
-
- PUSHL BP
- PUSHL BX
- PUSHL SI
- PUSHL DI
-
- SUBL $12, SP
-
- MOVL ·sName(SB), AX
- MOVL AX, (SP)
- MOVL $·servicectlhandler(SB), AX
- MOVL AX, 4(SP)
- MOVL $0, 8(SP)
- MOVL ·cRegisterServiceCtrlHandlerExW(SB), AX
- MOVL SP, BP
- CALL AX
- MOVL BP, SP
- CMPL AX, $0
- JE exit
- MOVL AX, ·ssHandle(SB)
-
- MOVL ·goWaitsH(SB), AX
- MOVL AX, (SP)
- MOVL ·cSetEvent(SB), AX
- MOVL SP, BP
- CALL AX
- MOVL BP, SP
-
- MOVL ·cWaitsH(SB), AX
- MOVL AX, (SP)
- MOVL $-1, AX
- MOVL AX, 4(SP)
- MOVL ·cWaitForSingleObject(SB), AX
- MOVL SP, BP
- CALL AX
- MOVL BP, SP
-
-exit:
- ADDL $12, SP
-
- POPL DI
- POPL SI
- POPL BX
- POPL BP
-
- MOVL 0(SP), CX
- ADDL $12, SP
- JMP CX
-
-// I do not know why, but this seems to be the only way to call
-// ctlHandlerProc on Windows 7.
-
-// func servicectlhandler(ctl uint32, evtype uint32, evdata uintptr, context uintptr) uintptr {
-TEXT ·servicectlhandler(SB),7,$0
- MOVL ·ctlHandlerExProc(SB), CX
- JMP CX
diff --git a/vendor/golang.org/x/sys/windows/svc/sys_amd64.s b/vendor/golang.org/x/sys/windows/svc/sys_amd64.s
deleted file mode 100644
index bde25e9c4..000000000
--- a/vendor/golang.org/x/sys/windows/svc/sys_amd64.s
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows
-
-// func servicemain(argc uint32, argv **uint16)
-TEXT ·servicemain(SB),7,$0
- MOVL CX, ·sArgc(SB)
- MOVQ DX, ·sArgv(SB)
-
- SUBQ $32, SP // stack for the first 4 syscall params
-
- MOVQ ·sName(SB), CX
- MOVQ $·servicectlhandler(SB), DX
- // BUG(pastarmovj): Figure out a way to pass in context in R8.
- MOVQ ·cRegisterServiceCtrlHandlerExW(SB), AX
- CALL AX
- CMPQ AX, $0
- JE exit
- MOVQ AX, ·ssHandle(SB)
-
- MOVQ ·goWaitsH(SB), CX
- MOVQ ·cSetEvent(SB), AX
- CALL AX
-
- MOVQ ·cWaitsH(SB), CX
- MOVQ $4294967295, DX
- MOVQ ·cWaitForSingleObject(SB), AX
- CALL AX
-
-exit:
- ADDQ $32, SP
- RET
-
-// I do not know why, but this seems to be the only way to call
-// ctlHandlerProc on Windows 7.
-
-// func ·servicectlhandler(ctl uint32, evtype uint32, evdata uintptr, context uintptr) uintptr {
-TEXT ·servicectlhandler(SB),7,$0
- MOVQ ·ctlHandlerExProc(SB), AX
- JMP AX
diff --git a/vendor/golang.org/x/sys/windows/svc/sys_arm.s b/vendor/golang.org/x/sys/windows/svc/sys_arm.s
deleted file mode 100644
index 33c692a8d..000000000
--- a/vendor/golang.org/x/sys/windows/svc/sys_arm.s
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows
-
-#include "textflag.h"
-
-// func servicemain(argc uint32, argv **uint16)
-TEXT ·servicemain(SB),NOSPLIT|NOFRAME,$0
- MOVM.DB.W [R4, R14], (R13) // push {r4, lr}
- MOVW R13, R4
- BIC $0x7, R13 // alignment for ABI
-
- MOVW R0, ·sArgc(SB)
- MOVW R1, ·sArgv(SB)
-
- MOVW ·sName(SB), R0
- MOVW ·ctlHandlerExProc(SB), R1
- MOVW $0, R2
- MOVW ·cRegisterServiceCtrlHandlerExW(SB), R3
- BL (R3)
- CMP $0, R0
- BEQ exit
- MOVW R0, ·ssHandle(SB)
-
- MOVW ·goWaitsH(SB), R0
- MOVW ·cSetEvent(SB), R1
- BL (R1)
-
- MOVW ·cWaitsH(SB), R0
- MOVW $-1, R1
- MOVW ·cWaitForSingleObject(SB), R2
- BL (R2)
-
-exit:
- MOVW R4, R13 // free extra stack space
- MOVM.IA.W (R13), [R4, R15] // pop {r4, pc}
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index 8a00b71f1..b23050924 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -10,6 +10,7 @@ import (
errorspkg "errors"
"sync"
"syscall"
+ "time"
"unicode/utf16"
"unsafe"
)
@@ -55,6 +56,10 @@ const (
FILE_UNICODE_ON_DISK = 0x00000004
FILE_VOLUME_IS_COMPRESSED = 0x00008000
FILE_VOLUME_QUOTAS = 0x00000020
+
+ // Return values of SleepEx and other APC functions
+ STATUS_USER_APC = 0x000000C0
+ WAIT_IO_COMPLETION = STATUS_USER_APC
)
// StringToUTF16 is deprecated. Use UTF16FromString instead.
@@ -134,9 +139,11 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys GetVersion() (ver uint32, err error)
//sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW
//sys ExitProcess(exitcode uint32)
-//sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile int32) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW
+//sys IsWow64Process(handle Handle, isWow64 *bool) (err error) = IsWow64Process
+//sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW
//sys ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error)
//sys WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error)
+//sys GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error)
//sys SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) [failretval==0xffffffff]
//sys CloseHandle(handle Handle) (err error)
//sys GetStdHandle(stdhandle uint32) (handle Handle, err error) [failretval==InvalidHandle]
@@ -145,6 +152,7 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys findNextFile1(handle Handle, data *win32finddata1) (err error) = FindNextFileW
//sys FindClose(handle Handle) (err error)
//sys GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error)
+//sys GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error)
//sys GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) = GetCurrentDirectoryW
//sys SetCurrentDirectory(path *uint16) (err error) = SetCurrentDirectoryW
//sys CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) = CreateDirectoryW
@@ -164,14 +172,18 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys CancelIo(s Handle) (err error)
//sys CancelIoEx(s Handle, o *Overlapped) (err error)
//sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW
-//sys OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err error)
+//sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error)
+//sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) = shell32.ShellExecuteW
+//sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath
//sys TerminateProcess(handle Handle, exitcode uint32) (err error)
//sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error)
//sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW
//sys GetCurrentProcess() (pseudoHandle Handle, err error)
+//sys GetCurrentThread() (pseudoHandle Handle, err error)
//sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error)
//sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error)
//sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff]
+//sys waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] = WaitForMultipleObjects
//sys GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) = GetTempPathW
//sys CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error)
//sys GetFileType(filehandle Handle) (n uint32, err error)
@@ -182,6 +194,9 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys FreeEnvironmentStrings(envs *uint16) (err error) = kernel32.FreeEnvironmentStringsW
//sys GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) = kernel32.GetEnvironmentVariableW
//sys SetEnvironmentVariable(name *uint16, value *uint16) (err error) = kernel32.SetEnvironmentVariableW
+//sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock
+//sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock
+//sys getTickCount64() (ms uint64) = kernel32.GetTickCount64
//sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error)
//sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW
//sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW
@@ -220,7 +235,7 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegQueryInfoKeyW
//sys RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegEnumKeyExW
//sys RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegQueryValueExW
-//sys getCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId
+//sys GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId
//sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode
//sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode
//sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo
@@ -229,6 +244,8 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot
//sys Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32FirstW
//sys Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32NextW
+//sys Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error)
+//sys Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error)
//sys DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error)
// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL.
//sys CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) [failretval&0xff==0] = CreateSymbolicLinkW
@@ -240,6 +257,18 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys SetEvent(event Handle) (err error) = kernel32.SetEvent
//sys ResetEvent(event Handle) (err error) = kernel32.ResetEvent
//sys PulseEvent(event Handle) (err error) = kernel32.PulseEvent
+//sys SleepEx(milliseconds uint32, alertable bool) (ret uint32) = kernel32.SleepEx
+//sys CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) = kernel32.CreateJobObjectW
+//sys AssignProcessToJobObject(job Handle, process Handle) (err error) = kernel32.AssignProcessToJobObject
+//sys TerminateJobObject(job Handle, exitCode uint32) (err error) = kernel32.TerminateJobObject
+//sys SetErrorMode(mode uint32) (ret uint32) = kernel32.SetErrorMode
+//sys ResumeThread(thread Handle) (ret uint32, err error) [failretval==0xffffffff] = kernel32.ResumeThread
+//sys SetPriorityClass(process Handle, priorityClass uint32) (err error) = kernel32.SetPriorityClass
+//sys GetPriorityClass(process Handle) (ret uint32, err error) = kernel32.GetPriorityClass
+//sys SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error)
+//sys GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error)
+//sys GetProcessId(process Handle) (id uint32, err error)
+//sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error)
// Volume Management Functions
//sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW
@@ -261,6 +290,12 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) [failretval==0] = QueryDosDeviceW
//sys SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) = SetVolumeLabelW
//sys SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) = SetVolumeMountPointW
+//sys MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW
+//sys clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) = ole32.CLSIDFromString
+//sys stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) = ole32.StringFromGUID2
+//sys coCreateGuid(pguid *GUID) (ret error) = ole32.CoCreateGuid
+//sys CoTaskMemFree(address unsafe.Pointer) = ole32.CoTaskMemFree
+//sys rtlGetVersion(info *OsVersionInfoEx) (ret error) = ntdll.RtlGetVersion
// syscall interface implementation for other packages
@@ -475,6 +510,10 @@ func ComputerName() (name string, err error) {
return string(utf16.Decode(b[0:n])), nil
}
+func DurationSinceBoot() time.Duration {
+ return time.Duration(getTickCount64()) * time.Millisecond
+}
+
func Ftruncate(fd Handle, length int64) (err error) {
curoffset, e := Seek(fd, 0, 1)
if e != nil {
@@ -558,9 +597,6 @@ func Fsync(fd Handle) (err error) {
}
func Chmod(path string, mode uint32) (err error) {
- if mode == 0 {
- return syscall.EINVAL
- }
p, e := UTF16PtrFromString(path)
if e != nil {
return e
@@ -589,6 +625,18 @@ func LoadSetFileCompletionNotificationModes() error {
return procSetFileCompletionNotificationModes.Find()
}
+func WaitForMultipleObjects(handles []Handle, waitAll bool, waitMilliseconds uint32) (event uint32, err error) {
+ // Every other win32 array API takes arguments as "pointer, count", except for this function. So we
+ // can't declare it as a usual [] type, because mksyscall will use the opposite order. We therefore
+ // trivially stub this ourselves.
+
+ var handlePtr *Handle
+ if len(handles) > 0 {
+ handlePtr = &handles[0]
+ }
+ return waitForMultipleObjects(uint32(len(handles)), uintptr(unsafe.Pointer(handlePtr)), waitAll, waitMilliseconds)
+}
+
// net api calls
const socket_error = uintptr(^uint32(0))
@@ -1075,7 +1123,7 @@ func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) {
return syscall.EWINDOWS
}
-func Getpid() (pid int) { return int(getCurrentProcessId()) }
+func Getpid() (pid int) { return int(GetCurrentProcessId()) }
func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) {
// NOTE(rsc): The Win32finddata struct is wrong for the system call:
@@ -1203,3 +1251,70 @@ func Readlink(path string, buf []byte) (n int, err error) {
return n, nil
}
+
+// GUIDFromString parses a string in the form of
+// "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}" into a GUID.
+func GUIDFromString(str string) (GUID, error) {
+ guid := GUID{}
+ str16, err := syscall.UTF16PtrFromString(str)
+ if err != nil {
+ return guid, err
+ }
+ err = clsidFromString(str16, &guid)
+ if err != nil {
+ return guid, err
+ }
+ return guid, nil
+}
+
+// GenerateGUID creates a new random GUID.
+func GenerateGUID() (GUID, error) {
+ guid := GUID{}
+ err := coCreateGuid(&guid)
+ if err != nil {
+ return guid, err
+ }
+ return guid, nil
+}
+
+// String returns the canonical string form of the GUID,
+// in the form of "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}".
+func (guid GUID) String() string {
+ var str [100]uint16
+ chars := stringFromGUID2(&guid, &str[0], int32(len(str)))
+ if chars <= 1 {
+ return ""
+ }
+ return string(utf16.Decode(str[:chars-1]))
+}
+
+// KnownFolderPath returns a well-known folder path for the current user, specified by one of
+// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag.
+func KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) {
+ return Token(0).KnownFolderPath(folderID, flags)
+}
+
+// KnownFolderPath returns a well-known folder path for the user token, specified by one of
+// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag.
+func (t Token) KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) {
+ var p *uint16
+ err := shGetKnownFolderPath(folderID, flags, t, &p)
+ if err != nil {
+ return "", err
+ }
+ defer CoTaskMemFree(unsafe.Pointer(p))
+ return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(p))[:]), nil
+}
+
+// RtlGetVersion returns the true version of the underlying operating system, ignoring
+// any manifesting or compatibility layers on top of the win32 layer.
+func RtlGetVersion() *OsVersionInfoEx {
+ info := &OsVersionInfoEx{}
+ info.osVersionInfoSize = uint32(unsafe.Sizeof(*info))
+ // According to documentation, this function always succeeds.
+ // The function doesn't even check the validity of the
+ // osVersionInfoSize member. Disassembling ntdll.dll indicates
+ // that the documentation is indeed correct about that.
+ _ = rtlGetVersion(info)
+ return info
+}
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index 141ca81bd..8a563f92b 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -4,33 +4,10 @@
package windows
-import "syscall"
-
-const (
- // Windows errors.
- ERROR_FILE_NOT_FOUND syscall.Errno = 2
- ERROR_PATH_NOT_FOUND syscall.Errno = 3
- ERROR_ACCESS_DENIED syscall.Errno = 5
- ERROR_NO_MORE_FILES syscall.Errno = 18
- ERROR_HANDLE_EOF syscall.Errno = 38
- ERROR_NETNAME_DELETED syscall.Errno = 64
- ERROR_FILE_EXISTS syscall.Errno = 80
- ERROR_BROKEN_PIPE syscall.Errno = 109
- ERROR_BUFFER_OVERFLOW syscall.Errno = 111
- ERROR_INSUFFICIENT_BUFFER syscall.Errno = 122
- ERROR_MOD_NOT_FOUND syscall.Errno = 126
- ERROR_PROC_NOT_FOUND syscall.Errno = 127
- ERROR_ALREADY_EXISTS syscall.Errno = 183
- ERROR_ENVVAR_NOT_FOUND syscall.Errno = 203
- ERROR_MORE_DATA syscall.Errno = 234
- ERROR_OPERATION_ABORTED syscall.Errno = 995
- ERROR_IO_PENDING syscall.Errno = 997
- ERROR_SERVICE_SPECIFIC_ERROR syscall.Errno = 1066
- ERROR_NOT_FOUND syscall.Errno = 1168
- ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314
- WSAEACCES syscall.Errno = 10013
- WSAEMSGSIZE syscall.Errno = 10040
- WSAECONNRESET syscall.Errno = 10054
+import (
+ "net"
+ "syscall"
+ "unsafe"
)
const (
@@ -126,9 +103,19 @@ const (
OPEN_ALWAYS = 4
TRUNCATE_EXISTING = 5
- FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000
- FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
- FILE_FLAG_OVERLAPPED = 0x40000000
+ FILE_FLAG_OPEN_REQUIRING_OPLOCK = 0x00040000
+ FILE_FLAG_FIRST_PIPE_INSTANCE = 0x00080000
+ FILE_FLAG_OPEN_NO_RECALL = 0x00100000
+ FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000
+ FILE_FLAG_SESSION_AWARE = 0x00800000
+ FILE_FLAG_POSIX_SEMANTICS = 0x01000000
+ FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
+ FILE_FLAG_DELETE_ON_CLOSE = 0x04000000
+ FILE_FLAG_SEQUENTIAL_SCAN = 0x08000000
+ FILE_FLAG_RANDOM_ACCESS = 0x10000000
+ FILE_FLAG_NO_BUFFERING = 0x20000000
+ FILE_FLAG_OVERLAPPED = 0x40000000
+ FILE_FLAG_WRITE_THROUGH = 0x80000000
HANDLE_FLAG_INHERIT = 0x00000001
STARTF_USESTDHANDLES = 0x00000100
@@ -167,14 +154,43 @@ const (
IGNORE = 0
INFINITE = 0xffffffff
- WAIT_TIMEOUT = 258
WAIT_ABANDONED = 0x00000080
WAIT_OBJECT_0 = 0x00000000
WAIT_FAILED = 0xFFFFFFFF
- PROCESS_TERMINATE = 1
- PROCESS_QUERY_INFORMATION = 0x00000400
- SYNCHRONIZE = 0x00100000
+ // Standard access rights.
+ DELETE = 0x00010000
+ READ_CONTROL = 0x00020000
+ SYNCHRONIZE = 0x00100000
+ WRITE_DAC = 0x00040000
+ WRITE_OWNER = 0x00080000
+
+ // Access rights for process.
+ PROCESS_CREATE_PROCESS = 0x0080
+ PROCESS_CREATE_THREAD = 0x0002
+ PROCESS_DUP_HANDLE = 0x0040
+ PROCESS_QUERY_INFORMATION = 0x0400
+ PROCESS_QUERY_LIMITED_INFORMATION = 0x1000
+ PROCESS_SET_INFORMATION = 0x0200
+ PROCESS_SET_QUOTA = 0x0100
+ PROCESS_SUSPEND_RESUME = 0x0800
+ PROCESS_TERMINATE = 0x0001
+ PROCESS_VM_OPERATION = 0x0008
+ PROCESS_VM_READ = 0x0010
+ PROCESS_VM_WRITE = 0x0020
+
+ // Access rights for thread.
+ THREAD_DIRECT_IMPERSONATION = 0x0200
+ THREAD_GET_CONTEXT = 0x0008
+ THREAD_IMPERSONATE = 0x0100
+ THREAD_QUERY_INFORMATION = 0x0040
+ THREAD_QUERY_LIMITED_INFORMATION = 0x0800
+ THREAD_SET_CONTEXT = 0x0010
+ THREAD_SET_INFORMATION = 0x0020
+ THREAD_SET_LIMITED_INFORMATION = 0x0400
+ THREAD_SET_THREAD_TOKEN = 0x0080
+ THREAD_SUSPEND_RESUME = 0x0002
+ THREAD_TERMINATE = 0x0001
FILE_MAP_COPY = 0x01
FILE_MAP_WRITE = 0x02
@@ -402,12 +418,6 @@ const (
CERT_CHAIN_POLICY_EV = 8
CERT_CHAIN_POLICY_SSL_F12 = 9
- CERT_E_EXPIRED = 0x800B0101
- CERT_E_ROLE = 0x800B0103
- CERT_E_PURPOSE = 0x800B0106
- CERT_E_UNTRUSTEDROOT = 0x800B0109
- CERT_E_CN_NO_MATCH = 0x800B010F
-
/* AuthType values for SSLExtraCertChainPolicyPara struct */
AUTHTYPE_CLIENT = 1
AUTHTYPE_SERVER = 2
@@ -420,6 +430,26 @@ const (
SECURITY_FLAG_IGNORE_CERT_DATE_INVALID = 0x00002000
)
+const (
+ // flags for SetErrorMode
+ SEM_FAILCRITICALERRORS = 0x0001
+ SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
+ SEM_NOGPFAULTERRORBOX = 0x0002
+ SEM_NOOPENFILEERRORBOX = 0x8000
+)
+
+const (
+ // Priority class.
+ ABOVE_NORMAL_PRIORITY_CLASS = 0x00008000
+ BELOW_NORMAL_PRIORITY_CLASS = 0x00004000
+ HIGH_PRIORITY_CLASS = 0x00000080
+ IDLE_PRIORITY_CLASS = 0x00000040
+ NORMAL_PRIORITY_CLASS = 0x00000020
+ PROCESS_MODE_BACKGROUND_BEGIN = 0x00100000
+ PROCESS_MODE_BACKGROUND_END = 0x00200000
+ REALTIME_PRIORITY_CLASS = 0x00000100
+)
+
var (
OID_PKIX_KP_SERVER_AUTH = []byte("1.3.6.1.5.5.7.3.1\x00")
OID_SERVER_GATED_CRYPTO = []byte("1.3.6.1.4.1.311.10.3.3\x00")
@@ -629,6 +659,16 @@ type ProcessEntry32 struct {
ExeFile [MAX_PATH]uint16
}
+type ThreadEntry32 struct {
+ Size uint32
+ Usage uint32
+ ThreadID uint32
+ OwnerProcessID uint32
+ BasePri int32
+ DeltaPri int32
+ Flags uint32
+}
+
type Systemtime struct {
Year uint16
Month uint16
@@ -850,10 +890,6 @@ const (
)
const (
- DNS_INFO_NO_RECORDS = 0x251D
-)
-
-const (
// flags inside DNSRecord.Dw
DnsSectionQuestion = 0x0000
DnsSectionAnswer = 0x0001
@@ -1314,6 +1350,41 @@ const (
ComputerNameMax = 8
)
+// For MessageBox()
+const (
+ MB_OK = 0x00000000
+ MB_OKCANCEL = 0x00000001
+ MB_ABORTRETRYIGNORE = 0x00000002
+ MB_YESNOCANCEL = 0x00000003
+ MB_YESNO = 0x00000004
+ MB_RETRYCANCEL = 0x00000005
+ MB_CANCELTRYCONTINUE = 0x00000006
+ MB_ICONHAND = 0x00000010
+ MB_ICONQUESTION = 0x00000020
+ MB_ICONEXCLAMATION = 0x00000030
+ MB_ICONASTERISK = 0x00000040
+ MB_USERICON = 0x00000080
+ MB_ICONWARNING = MB_ICONEXCLAMATION
+ MB_ICONERROR = MB_ICONHAND
+ MB_ICONINFORMATION = MB_ICONASTERISK
+ MB_ICONSTOP = MB_ICONHAND
+ MB_DEFBUTTON1 = 0x00000000
+ MB_DEFBUTTON2 = 0x00000100
+ MB_DEFBUTTON3 = 0x00000200
+ MB_DEFBUTTON4 = 0x00000300
+ MB_APPLMODAL = 0x00000000
+ MB_SYSTEMMODAL = 0x00001000
+ MB_TASKMODAL = 0x00002000
+ MB_HELP = 0x00004000
+ MB_NOFOCUS = 0x00008000
+ MB_SETFOREGROUND = 0x00010000
+ MB_DEFAULT_DESKTOP_ONLY = 0x00020000
+ MB_TOPMOST = 0x00040000
+ MB_RIGHT = 0x00080000
+ MB_RTLREADING = 0x00100000
+ MB_SERVICE_NOTIFICATION = 0x00200000
+)
+
const (
MOVEFILE_REPLACE_EXISTING = 0x1
MOVEFILE_COPY_ALLOWED = 0x2
@@ -1342,6 +1413,16 @@ type SocketAddress struct {
SockaddrLength int32
}
+// IP returns an IPv4 or IPv6 address, or nil if the underlying SocketAddress is neither.
+func (addr *SocketAddress) IP() net.IP {
+ if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet4{}) && addr.Sockaddr.Addr.Family == AF_INET {
+ return (*RawSockaddrInet4)(unsafe.Pointer(addr.Sockaddr)).Addr[:]
+ } else if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet6{}) && addr.Sockaddr.Addr.Family == AF_INET6 {
+ return (*RawSockaddrInet6)(unsafe.Pointer(addr.Sockaddr)).Addr[:]
+ }
+ return nil
+}
+
type IpAdapterUnicastAddress struct {
Length uint32
Flags uint32
@@ -1467,3 +1548,118 @@ type ConsoleScreenBufferInfo struct {
}
const UNIX_PATH_MAX = 108 // defined in afunix.h
+
+const (
+ // flags for JOBOBJECT_BASIC_LIMIT_INFORMATION.LimitFlags
+ JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 0x00000008
+ JOB_OBJECT_LIMIT_AFFINITY = 0x00000010
+ JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800
+ JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x00000400
+ JOB_OBJECT_LIMIT_JOB_MEMORY = 0x00000200
+ JOB_OBJECT_LIMIT_JOB_TIME = 0x00000004
+ JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000
+ JOB_OBJECT_LIMIT_PRESERVE_JOB_TIME = 0x00000040
+ JOB_OBJECT_LIMIT_PRIORITY_CLASS = 0x00000020
+ JOB_OBJECT_LIMIT_PROCESS_MEMORY = 0x00000100
+ JOB_OBJECT_LIMIT_PROCESS_TIME = 0x00000002
+ JOB_OBJECT_LIMIT_SCHEDULING_CLASS = 0x00000080
+ JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK = 0x00001000
+ JOB_OBJECT_LIMIT_SUBSET_AFFINITY = 0x00004000
+ JOB_OBJECT_LIMIT_WORKINGSET = 0x00000001
+)
+
+type JOBOBJECT_BASIC_LIMIT_INFORMATION struct {
+ PerProcessUserTimeLimit int64
+ PerJobUserTimeLimit int64
+ LimitFlags uint32
+ MinimumWorkingSetSize uintptr
+ MaximumWorkingSetSize uintptr
+ ActiveProcessLimit uint32
+ Affinity uintptr
+ PriorityClass uint32
+ SchedulingClass uint32
+}
+
+type IO_COUNTERS struct {
+ ReadOperationCount uint64
+ WriteOperationCount uint64
+ OtherOperationCount uint64
+ ReadTransferCount uint64
+ WriteTransferCount uint64
+ OtherTransferCount uint64
+}
+
+type JOBOBJECT_EXTENDED_LIMIT_INFORMATION struct {
+ BasicLimitInformation JOBOBJECT_BASIC_LIMIT_INFORMATION
+ IoInfo IO_COUNTERS
+ ProcessMemoryLimit uintptr
+ JobMemoryLimit uintptr
+ PeakProcessMemoryUsed uintptr
+ PeakJobMemoryUsed uintptr
+}
+
+const (
+ // UIRestrictionsClass
+ JOB_OBJECT_UILIMIT_DESKTOP = 0x00000040
+ JOB_OBJECT_UILIMIT_DISPLAYSETTINGS = 0x00000010
+ JOB_OBJECT_UILIMIT_EXITWINDOWS = 0x00000080
+ JOB_OBJECT_UILIMIT_GLOBALATOMS = 0x00000020
+ JOB_OBJECT_UILIMIT_HANDLES = 0x00000001
+ JOB_OBJECT_UILIMIT_READCLIPBOARD = 0x00000002
+ JOB_OBJECT_UILIMIT_SYSTEMPARAMETERS = 0x00000008
+ JOB_OBJECT_UILIMIT_WRITECLIPBOARD = 0x00000004
+)
+
+type JOBOBJECT_BASIC_UI_RESTRICTIONS struct {
+ UIRestrictionsClass uint32
+}
+
+const (
+ // JobObjectInformationClass
+ JobObjectAssociateCompletionPortInformation = 7
+ JobObjectBasicLimitInformation = 2
+ JobObjectBasicUIRestrictions = 4
+ JobObjectCpuRateControlInformation = 15
+ JobObjectEndOfJobTimeInformation = 6
+ JobObjectExtendedLimitInformation = 9
+ JobObjectGroupInformation = 11
+ JobObjectGroupInformationEx = 14
+ JobObjectLimitViolationInformation2 = 35
+ JobObjectNetRateControlInformation = 32
+ JobObjectNotificationLimitInformation = 12
+ JobObjectNotificationLimitInformation2 = 34
+ JobObjectSecurityLimitInformation = 5
+)
+
+const (
+ KF_FLAG_DEFAULT = 0x00000000
+ KF_FLAG_FORCE_APP_DATA_REDIRECTION = 0x00080000
+ KF_FLAG_RETURN_FILTER_REDIRECTION_TARGET = 0x00040000
+ KF_FLAG_FORCE_PACKAGE_REDIRECTION = 0x00020000
+ KF_FLAG_NO_PACKAGE_REDIRECTION = 0x00010000
+ KF_FLAG_FORCE_APPCONTAINER_REDIRECTION = 0x00020000
+ KF_FLAG_NO_APPCONTAINER_REDIRECTION = 0x00010000
+ KF_FLAG_CREATE = 0x00008000
+ KF_FLAG_DONT_VERIFY = 0x00004000
+ KF_FLAG_DONT_UNEXPAND = 0x00002000
+ KF_FLAG_NO_ALIAS = 0x00001000
+ KF_FLAG_INIT = 0x00000800
+ KF_FLAG_DEFAULT_PATH = 0x00000400
+ KF_FLAG_NOT_PARENT_RELATIVE = 0x00000200
+ KF_FLAG_SIMPLE_IDLIST = 0x00000100
+ KF_FLAG_ALIAS_ONLY = 0x80000000
+)
+
+type OsVersionInfoEx struct {
+ osVersionInfoSize uint32
+ MajorVersion uint32
+ MinorVersion uint32
+ BuildNumber uint32
+ PlatformId uint32
+ CsdVersion [128]uint16
+ ServicePackMajor uint16
+ ServicePackMinor uint16
+ SuiteMask uint16
+ ProductType byte
+ _ byte
+}
diff --git a/vendor/golang.org/x/sys/windows/zerrors_windows.go b/vendor/golang.org/x/sys/windows/zerrors_windows.go
new file mode 100644
index 000000000..f02120035
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/zerrors_windows.go
@@ -0,0 +1,6853 @@
+// Code generated by 'mkerrors.bash'; DO NOT EDIT.
+
+package windows
+
+import "syscall"
+
+const (
+ FACILITY_NULL = 0
+ FACILITY_RPC = 1
+ FACILITY_DISPATCH = 2
+ FACILITY_STORAGE = 3
+ FACILITY_ITF = 4
+ FACILITY_WIN32 = 7
+ FACILITY_WINDOWS = 8
+ FACILITY_SSPI = 9
+ FACILITY_SECURITY = 9
+ FACILITY_CONTROL = 10
+ FACILITY_CERT = 11
+ FACILITY_INTERNET = 12
+ FACILITY_MEDIASERVER = 13
+ FACILITY_MSMQ = 14
+ FACILITY_SETUPAPI = 15
+ FACILITY_SCARD = 16
+ FACILITY_COMPLUS = 17
+ FACILITY_AAF = 18
+ FACILITY_URT = 19
+ FACILITY_ACS = 20
+ FACILITY_DPLAY = 21
+ FACILITY_UMI = 22
+ FACILITY_SXS = 23
+ FACILITY_WINDOWS_CE = 24
+ FACILITY_HTTP = 25
+ FACILITY_USERMODE_COMMONLOG = 26
+ FACILITY_WER = 27
+ FACILITY_USERMODE_FILTER_MANAGER = 31
+ FACILITY_BACKGROUNDCOPY = 32
+ FACILITY_CONFIGURATION = 33
+ FACILITY_WIA = 33
+ FACILITY_STATE_MANAGEMENT = 34
+ FACILITY_METADIRECTORY = 35
+ FACILITY_WINDOWSUPDATE = 36
+ FACILITY_DIRECTORYSERVICE = 37
+ FACILITY_GRAPHICS = 38
+ FACILITY_SHELL = 39
+ FACILITY_NAP = 39
+ FACILITY_TPM_SERVICES = 40
+ FACILITY_TPM_SOFTWARE = 41
+ FACILITY_UI = 42
+ FACILITY_XAML = 43
+ FACILITY_ACTION_QUEUE = 44
+ FACILITY_PLA = 48
+ FACILITY_WINDOWS_SETUP = 48
+ FACILITY_FVE = 49
+ FACILITY_FWP = 50
+ FACILITY_WINRM = 51
+ FACILITY_NDIS = 52
+ FACILITY_USERMODE_HYPERVISOR = 53
+ FACILITY_CMI = 54
+ FACILITY_USERMODE_VIRTUALIZATION = 55
+ FACILITY_USERMODE_VOLMGR = 56
+ FACILITY_BCD = 57
+ FACILITY_USERMODE_VHD = 58
+ FACILITY_USERMODE_HNS = 59
+ FACILITY_SDIAG = 60
+ FACILITY_WEBSERVICES = 61
+ FACILITY_WINPE = 61
+ FACILITY_WPN = 62
+ FACILITY_WINDOWS_STORE = 63
+ FACILITY_INPUT = 64
+ FACILITY_EAP = 66
+ FACILITY_WINDOWS_DEFENDER = 80
+ FACILITY_OPC = 81
+ FACILITY_XPS = 82
+ FACILITY_MBN = 84
+ FACILITY_POWERSHELL = 84
+ FACILITY_RAS = 83
+ FACILITY_P2P_INT = 98
+ FACILITY_P2P = 99
+ FACILITY_DAF = 100
+ FACILITY_BLUETOOTH_ATT = 101
+ FACILITY_AUDIO = 102
+ FACILITY_STATEREPOSITORY = 103
+ FACILITY_VISUALCPP = 109
+ FACILITY_SCRIPT = 112
+ FACILITY_PARSE = 113
+ FACILITY_BLB = 120
+ FACILITY_BLB_CLI = 121
+ FACILITY_WSBAPP = 122
+ FACILITY_BLBUI = 128
+ FACILITY_USN = 129
+ FACILITY_USERMODE_VOLSNAP = 130
+ FACILITY_TIERING = 131
+ FACILITY_WSB_ONLINE = 133
+ FACILITY_ONLINE_ID = 134
+ FACILITY_DEVICE_UPDATE_AGENT = 135
+ FACILITY_DRVSERVICING = 136
+ FACILITY_DLS = 153
+ FACILITY_DELIVERY_OPTIMIZATION = 208
+ FACILITY_USERMODE_SPACES = 231
+ FACILITY_USER_MODE_SECURITY_CORE = 232
+ FACILITY_USERMODE_LICENSING = 234
+ FACILITY_SOS = 160
+ FACILITY_DEBUGGERS = 176
+ FACILITY_SPP = 256
+ FACILITY_RESTORE = 256
+ FACILITY_DMSERVER = 256
+ FACILITY_DEPLOYMENT_SERVICES_SERVER = 257
+ FACILITY_DEPLOYMENT_SERVICES_IMAGING = 258
+ FACILITY_DEPLOYMENT_SERVICES_MANAGEMENT = 259
+ FACILITY_DEPLOYMENT_SERVICES_UTIL = 260
+ FACILITY_DEPLOYMENT_SERVICES_BINLSVC = 261
+ FACILITY_DEPLOYMENT_SERVICES_PXE = 263
+ FACILITY_DEPLOYMENT_SERVICES_TFTP = 264
+ FACILITY_DEPLOYMENT_SERVICES_TRANSPORT_MANAGEMENT = 272
+ FACILITY_DEPLOYMENT_SERVICES_DRIVER_PROVISIONING = 278
+ FACILITY_DEPLOYMENT_SERVICES_MULTICAST_SERVER = 289
+ FACILITY_DEPLOYMENT_SERVICES_MULTICAST_CLIENT = 290
+ FACILITY_DEPLOYMENT_SERVICES_CONTENT_PROVIDER = 293
+ FACILITY_LINGUISTIC_SERVICES = 305
+ FACILITY_AUDIOSTREAMING = 1094
+ FACILITY_ACCELERATOR = 1536
+ FACILITY_WMAAECMA = 1996
+ FACILITY_DIRECTMUSIC = 2168
+ FACILITY_DIRECT3D10 = 2169
+ FACILITY_DXGI = 2170
+ FACILITY_DXGI_DDI = 2171
+ FACILITY_DIRECT3D11 = 2172
+ FACILITY_DIRECT3D11_DEBUG = 2173
+ FACILITY_DIRECT3D12 = 2174
+ FACILITY_DIRECT3D12_DEBUG = 2175
+ FACILITY_LEAP = 2184
+ FACILITY_AUDCLNT = 2185
+ FACILITY_WINCODEC_DWRITE_DWM = 2200
+ FACILITY_WINML = 2192
+ FACILITY_DIRECT2D = 2201
+ FACILITY_DEFRAG = 2304
+ FACILITY_USERMODE_SDBUS = 2305
+ FACILITY_JSCRIPT = 2306
+ FACILITY_PIDGENX = 2561
+ FACILITY_EAS = 85
+ FACILITY_WEB = 885
+ FACILITY_WEB_SOCKET = 886
+ FACILITY_MOBILE = 1793
+ FACILITY_SQLITE = 1967
+ FACILITY_UTC = 1989
+ FACILITY_WEP = 2049
+ FACILITY_SYNCENGINE = 2050
+ FACILITY_XBOX = 2339
+ FACILITY_PIX = 2748
+ ERROR_SUCCESS syscall.Errno = 0
+ NO_ERROR = 0
+ SEC_E_OK Handle = 0x00000000
+ ERROR_INVALID_FUNCTION syscall.Errno = 1
+ ERROR_FILE_NOT_FOUND syscall.Errno = 2
+ ERROR_PATH_NOT_FOUND syscall.Errno = 3
+ ERROR_TOO_MANY_OPEN_FILES syscall.Errno = 4
+ ERROR_ACCESS_DENIED syscall.Errno = 5
+ ERROR_INVALID_HANDLE syscall.Errno = 6
+ ERROR_ARENA_TRASHED syscall.Errno = 7
+ ERROR_NOT_ENOUGH_MEMORY syscall.Errno = 8
+ ERROR_INVALID_BLOCK syscall.Errno = 9
+ ERROR_BAD_ENVIRONMENT syscall.Errno = 10
+ ERROR_BAD_FORMAT syscall.Errno = 11
+ ERROR_INVALID_ACCESS syscall.Errno = 12
+ ERROR_INVALID_DATA syscall.Errno = 13
+ ERROR_OUTOFMEMORY syscall.Errno = 14
+ ERROR_INVALID_DRIVE syscall.Errno = 15
+ ERROR_CURRENT_DIRECTORY syscall.Errno = 16
+ ERROR_NOT_SAME_DEVICE syscall.Errno = 17
+ ERROR_NO_MORE_FILES syscall.Errno = 18
+ ERROR_WRITE_PROTECT syscall.Errno = 19
+ ERROR_BAD_UNIT syscall.Errno = 20
+ ERROR_NOT_READY syscall.Errno = 21
+ ERROR_BAD_COMMAND syscall.Errno = 22
+ ERROR_CRC syscall.Errno = 23
+ ERROR_BAD_LENGTH syscall.Errno = 24
+ ERROR_SEEK syscall.Errno = 25
+ ERROR_NOT_DOS_DISK syscall.Errno = 26
+ ERROR_SECTOR_NOT_FOUND syscall.Errno = 27
+ ERROR_OUT_OF_PAPER syscall.Errno = 28
+ ERROR_WRITE_FAULT syscall.Errno = 29
+ ERROR_READ_FAULT syscall.Errno = 30
+ ERROR_GEN_FAILURE syscall.Errno = 31
+ ERROR_SHARING_VIOLATION syscall.Errno = 32
+ ERROR_LOCK_VIOLATION syscall.Errno = 33
+ ERROR_WRONG_DISK syscall.Errno = 34
+ ERROR_SHARING_BUFFER_EXCEEDED syscall.Errno = 36
+ ERROR_HANDLE_EOF syscall.Errno = 38
+ ERROR_HANDLE_DISK_FULL syscall.Errno = 39
+ ERROR_NOT_SUPPORTED syscall.Errno = 50
+ ERROR_REM_NOT_LIST syscall.Errno = 51
+ ERROR_DUP_NAME syscall.Errno = 52
+ ERROR_BAD_NETPATH syscall.Errno = 53
+ ERROR_NETWORK_BUSY syscall.Errno = 54
+ ERROR_DEV_NOT_EXIST syscall.Errno = 55
+ ERROR_TOO_MANY_CMDS syscall.Errno = 56
+ ERROR_ADAP_HDW_ERR syscall.Errno = 57
+ ERROR_BAD_NET_RESP syscall.Errno = 58
+ ERROR_UNEXP_NET_ERR syscall.Errno = 59
+ ERROR_BAD_REM_ADAP syscall.Errno = 60
+ ERROR_PRINTQ_FULL syscall.Errno = 61
+ ERROR_NO_SPOOL_SPACE syscall.Errno = 62
+ ERROR_PRINT_CANCELLED syscall.Errno = 63
+ ERROR_NETNAME_DELETED syscall.Errno = 64
+ ERROR_NETWORK_ACCESS_DENIED syscall.Errno = 65
+ ERROR_BAD_DEV_TYPE syscall.Errno = 66
+ ERROR_BAD_NET_NAME syscall.Errno = 67
+ ERROR_TOO_MANY_NAMES syscall.Errno = 68
+ ERROR_TOO_MANY_SESS syscall.Errno = 69
+ ERROR_SHARING_PAUSED syscall.Errno = 70
+ ERROR_REQ_NOT_ACCEP syscall.Errno = 71
+ ERROR_REDIR_PAUSED syscall.Errno = 72
+ ERROR_FILE_EXISTS syscall.Errno = 80
+ ERROR_CANNOT_MAKE syscall.Errno = 82
+ ERROR_FAIL_I24 syscall.Errno = 83
+ ERROR_OUT_OF_STRUCTURES syscall.Errno = 84
+ ERROR_ALREADY_ASSIGNED syscall.Errno = 85
+ ERROR_INVALID_PASSWORD syscall.Errno = 86
+ ERROR_INVALID_PARAMETER syscall.Errno = 87
+ ERROR_NET_WRITE_FAULT syscall.Errno = 88
+ ERROR_NO_PROC_SLOTS syscall.Errno = 89
+ ERROR_TOO_MANY_SEMAPHORES syscall.Errno = 100
+ ERROR_EXCL_SEM_ALREADY_OWNED syscall.Errno = 101
+ ERROR_SEM_IS_SET syscall.Errno = 102
+ ERROR_TOO_MANY_SEM_REQUESTS syscall.Errno = 103
+ ERROR_INVALID_AT_INTERRUPT_TIME syscall.Errno = 104
+ ERROR_SEM_OWNER_DIED syscall.Errno = 105
+ ERROR_SEM_USER_LIMIT syscall.Errno = 106
+ ERROR_DISK_CHANGE syscall.Errno = 107
+ ERROR_DRIVE_LOCKED syscall.Errno = 108
+ ERROR_BROKEN_PIPE syscall.Errno = 109
+ ERROR_OPEN_FAILED syscall.Errno = 110
+ ERROR_BUFFER_OVERFLOW syscall.Errno = 111
+ ERROR_DISK_FULL syscall.Errno = 112
+ ERROR_NO_MORE_SEARCH_HANDLES syscall.Errno = 113
+ ERROR_INVALID_TARGET_HANDLE syscall.Errno = 114
+ ERROR_INVALID_CATEGORY syscall.Errno = 117
+ ERROR_INVALID_VERIFY_SWITCH syscall.Errno = 118
+ ERROR_BAD_DRIVER_LEVEL syscall.Errno = 119
+ ERROR_CALL_NOT_IMPLEMENTED syscall.Errno = 120
+ ERROR_SEM_TIMEOUT syscall.Errno = 121
+ ERROR_INSUFFICIENT_BUFFER syscall.Errno = 122
+ ERROR_INVALID_NAME syscall.Errno = 123
+ ERROR_INVALID_LEVEL syscall.Errno = 124
+ ERROR_NO_VOLUME_LABEL syscall.Errno = 125
+ ERROR_MOD_NOT_FOUND syscall.Errno = 126
+ ERROR_PROC_NOT_FOUND syscall.Errno = 127
+ ERROR_WAIT_NO_CHILDREN syscall.Errno = 128
+ ERROR_CHILD_NOT_COMPLETE syscall.Errno = 129
+ ERROR_DIRECT_ACCESS_HANDLE syscall.Errno = 130
+ ERROR_NEGATIVE_SEEK syscall.Errno = 131
+ ERROR_SEEK_ON_DEVICE syscall.Errno = 132
+ ERROR_IS_JOIN_TARGET syscall.Errno = 133
+ ERROR_IS_JOINED syscall.Errno = 134
+ ERROR_IS_SUBSTED syscall.Errno = 135
+ ERROR_NOT_JOINED syscall.Errno = 136
+ ERROR_NOT_SUBSTED syscall.Errno = 137
+ ERROR_JOIN_TO_JOIN syscall.Errno = 138
+ ERROR_SUBST_TO_SUBST syscall.Errno = 139
+ ERROR_JOIN_TO_SUBST syscall.Errno = 140
+ ERROR_SUBST_TO_JOIN syscall.Errno = 141
+ ERROR_BUSY_DRIVE syscall.Errno = 142
+ ERROR_SAME_DRIVE syscall.Errno = 143
+ ERROR_DIR_NOT_ROOT syscall.Errno = 144
+ ERROR_DIR_NOT_EMPTY syscall.Errno = 145
+ ERROR_IS_SUBST_PATH syscall.Errno = 146
+ ERROR_IS_JOIN_PATH syscall.Errno = 147
+ ERROR_PATH_BUSY syscall.Errno = 148
+ ERROR_IS_SUBST_TARGET syscall.Errno = 149
+ ERROR_SYSTEM_TRACE syscall.Errno = 150
+ ERROR_INVALID_EVENT_COUNT syscall.Errno = 151
+ ERROR_TOO_MANY_MUXWAITERS syscall.Errno = 152
+ ERROR_INVALID_LIST_FORMAT syscall.Errno = 153
+ ERROR_LABEL_TOO_LONG syscall.Errno = 154
+ ERROR_TOO_MANY_TCBS syscall.Errno = 155
+ ERROR_SIGNAL_REFUSED syscall.Errno = 156
+ ERROR_DISCARDED syscall.Errno = 157
+ ERROR_NOT_LOCKED syscall.Errno = 158
+ ERROR_BAD_THREADID_ADDR syscall.Errno = 159
+ ERROR_BAD_ARGUMENTS syscall.Errno = 160
+ ERROR_BAD_PATHNAME syscall.Errno = 161
+ ERROR_SIGNAL_PENDING syscall.Errno = 162
+ ERROR_MAX_THRDS_REACHED syscall.Errno = 164
+ ERROR_LOCK_FAILED syscall.Errno = 167
+ ERROR_BUSY syscall.Errno = 170
+ ERROR_DEVICE_SUPPORT_IN_PROGRESS syscall.Errno = 171
+ ERROR_CANCEL_VIOLATION syscall.Errno = 173
+ ERROR_ATOMIC_LOCKS_NOT_SUPPORTED syscall.Errno = 174
+ ERROR_INVALID_SEGMENT_NUMBER syscall.Errno = 180
+ ERROR_INVALID_ORDINAL syscall.Errno = 182
+ ERROR_ALREADY_EXISTS syscall.Errno = 183
+ ERROR_INVALID_FLAG_NUMBER syscall.Errno = 186
+ ERROR_SEM_NOT_FOUND syscall.Errno = 187
+ ERROR_INVALID_STARTING_CODESEG syscall.Errno = 188
+ ERROR_INVALID_STACKSEG syscall.Errno = 189
+ ERROR_INVALID_MODULETYPE syscall.Errno = 190
+ ERROR_INVALID_EXE_SIGNATURE syscall.Errno = 191
+ ERROR_EXE_MARKED_INVALID syscall.Errno = 192
+ ERROR_BAD_EXE_FORMAT syscall.Errno = 193
+ ERROR_ITERATED_DATA_EXCEEDS_64k syscall.Errno = 194
+ ERROR_INVALID_MINALLOCSIZE syscall.Errno = 195
+ ERROR_DYNLINK_FROM_INVALID_RING syscall.Errno = 196
+ ERROR_IOPL_NOT_ENABLED syscall.Errno = 197
+ ERROR_INVALID_SEGDPL syscall.Errno = 198
+ ERROR_AUTODATASEG_EXCEEDS_64k syscall.Errno = 199
+ ERROR_RING2SEG_MUST_BE_MOVABLE syscall.Errno = 200
+ ERROR_RELOC_CHAIN_XEEDS_SEGLIM syscall.Errno = 201
+ ERROR_INFLOOP_IN_RELOC_CHAIN syscall.Errno = 202
+ ERROR_ENVVAR_NOT_FOUND syscall.Errno = 203
+ ERROR_NO_SIGNAL_SENT syscall.Errno = 205
+ ERROR_FILENAME_EXCED_RANGE syscall.Errno = 206
+ ERROR_RING2_STACK_IN_USE syscall.Errno = 207
+ ERROR_META_EXPANSION_TOO_LONG syscall.Errno = 208
+ ERROR_INVALID_SIGNAL_NUMBER syscall.Errno = 209
+ ERROR_THREAD_1_INACTIVE syscall.Errno = 210
+ ERROR_LOCKED syscall.Errno = 212
+ ERROR_TOO_MANY_MODULES syscall.Errno = 214
+ ERROR_NESTING_NOT_ALLOWED syscall.Errno = 215
+ ERROR_EXE_MACHINE_TYPE_MISMATCH syscall.Errno = 216
+ ERROR_EXE_CANNOT_MODIFY_SIGNED_BINARY syscall.Errno = 217
+ ERROR_EXE_CANNOT_MODIFY_STRONG_SIGNED_BINARY syscall.Errno = 218
+ ERROR_FILE_CHECKED_OUT syscall.Errno = 220
+ ERROR_CHECKOUT_REQUIRED syscall.Errno = 221
+ ERROR_BAD_FILE_TYPE syscall.Errno = 222
+ ERROR_FILE_TOO_LARGE syscall.Errno = 223
+ ERROR_FORMS_AUTH_REQUIRED syscall.Errno = 224
+ ERROR_VIRUS_INFECTED syscall.Errno = 225
+ ERROR_VIRUS_DELETED syscall.Errno = 226
+ ERROR_PIPE_LOCAL syscall.Errno = 229
+ ERROR_BAD_PIPE syscall.Errno = 230
+ ERROR_PIPE_BUSY syscall.Errno = 231
+ ERROR_NO_DATA syscall.Errno = 232
+ ERROR_PIPE_NOT_CONNECTED syscall.Errno = 233
+ ERROR_MORE_DATA syscall.Errno = 234
+ ERROR_NO_WORK_DONE syscall.Errno = 235
+ ERROR_VC_DISCONNECTED syscall.Errno = 240
+ ERROR_INVALID_EA_NAME syscall.Errno = 254
+ ERROR_EA_LIST_INCONSISTENT syscall.Errno = 255
+ WAIT_TIMEOUT syscall.Errno = 258
+ ERROR_NO_MORE_ITEMS syscall.Errno = 259
+ ERROR_CANNOT_COPY syscall.Errno = 266
+ ERROR_DIRECTORY syscall.Errno = 267
+ ERROR_EAS_DIDNT_FIT syscall.Errno = 275
+ ERROR_EA_FILE_CORRUPT syscall.Errno = 276
+ ERROR_EA_TABLE_FULL syscall.Errno = 277
+ ERROR_INVALID_EA_HANDLE syscall.Errno = 278
+ ERROR_EAS_NOT_SUPPORTED syscall.Errno = 282
+ ERROR_NOT_OWNER syscall.Errno = 288
+ ERROR_TOO_MANY_POSTS syscall.Errno = 298
+ ERROR_PARTIAL_COPY syscall.Errno = 299
+ ERROR_OPLOCK_NOT_GRANTED syscall.Errno = 300
+ ERROR_INVALID_OPLOCK_PROTOCOL syscall.Errno = 301
+ ERROR_DISK_TOO_FRAGMENTED syscall.Errno = 302
+ ERROR_DELETE_PENDING syscall.Errno = 303
+ ERROR_INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING syscall.Errno = 304
+ ERROR_SHORT_NAMES_NOT_ENABLED_ON_VOLUME syscall.Errno = 305
+ ERROR_SECURITY_STREAM_IS_INCONSISTENT syscall.Errno = 306
+ ERROR_INVALID_LOCK_RANGE syscall.Errno = 307
+ ERROR_IMAGE_SUBSYSTEM_NOT_PRESENT syscall.Errno = 308
+ ERROR_NOTIFICATION_GUID_ALREADY_DEFINED syscall.Errno = 309
+ ERROR_INVALID_EXCEPTION_HANDLER syscall.Errno = 310
+ ERROR_DUPLICATE_PRIVILEGES syscall.Errno = 311
+ ERROR_NO_RANGES_PROCESSED syscall.Errno = 312
+ ERROR_NOT_ALLOWED_ON_SYSTEM_FILE syscall.Errno = 313
+ ERROR_DISK_RESOURCES_EXHAUSTED syscall.Errno = 314
+ ERROR_INVALID_TOKEN syscall.Errno = 315
+ ERROR_DEVICE_FEATURE_NOT_SUPPORTED syscall.Errno = 316
+ ERROR_MR_MID_NOT_FOUND syscall.Errno = 317
+ ERROR_SCOPE_NOT_FOUND syscall.Errno = 318
+ ERROR_UNDEFINED_SCOPE syscall.Errno = 319
+ ERROR_INVALID_CAP syscall.Errno = 320
+ ERROR_DEVICE_UNREACHABLE syscall.Errno = 321
+ ERROR_DEVICE_NO_RESOURCES syscall.Errno = 322
+ ERROR_DATA_CHECKSUM_ERROR syscall.Errno = 323
+ ERROR_INTERMIXED_KERNEL_EA_OPERATION syscall.Errno = 324
+ ERROR_FILE_LEVEL_TRIM_NOT_SUPPORTED syscall.Errno = 326
+ ERROR_OFFSET_ALIGNMENT_VIOLATION syscall.Errno = 327
+ ERROR_INVALID_FIELD_IN_PARAMETER_LIST syscall.Errno = 328
+ ERROR_OPERATION_IN_PROGRESS syscall.Errno = 329
+ ERROR_BAD_DEVICE_PATH syscall.Errno = 330
+ ERROR_TOO_MANY_DESCRIPTORS syscall.Errno = 331
+ ERROR_SCRUB_DATA_DISABLED syscall.Errno = 332
+ ERROR_NOT_REDUNDANT_STORAGE syscall.Errno = 333
+ ERROR_RESIDENT_FILE_NOT_SUPPORTED syscall.Errno = 334
+ ERROR_COMPRESSED_FILE_NOT_SUPPORTED syscall.Errno = 335
+ ERROR_DIRECTORY_NOT_SUPPORTED syscall.Errno = 336
+ ERROR_NOT_READ_FROM_COPY syscall.Errno = 337
+ ERROR_FT_WRITE_FAILURE syscall.Errno = 338
+ ERROR_FT_DI_SCAN_REQUIRED syscall.Errno = 339
+ ERROR_INVALID_KERNEL_INFO_VERSION syscall.Errno = 340
+ ERROR_INVALID_PEP_INFO_VERSION syscall.Errno = 341
+ ERROR_OBJECT_NOT_EXTERNALLY_BACKED syscall.Errno = 342
+ ERROR_EXTERNAL_BACKING_PROVIDER_UNKNOWN syscall.Errno = 343
+ ERROR_COMPRESSION_NOT_BENEFICIAL syscall.Errno = 344
+ ERROR_STORAGE_TOPOLOGY_ID_MISMATCH syscall.Errno = 345
+ ERROR_BLOCKED_BY_PARENTAL_CONTROLS syscall.Errno = 346
+ ERROR_BLOCK_TOO_MANY_REFERENCES syscall.Errno = 347
+ ERROR_MARKED_TO_DISALLOW_WRITES syscall.Errno = 348
+ ERROR_ENCLAVE_FAILURE syscall.Errno = 349
+ ERROR_FAIL_NOACTION_REBOOT syscall.Errno = 350
+ ERROR_FAIL_SHUTDOWN syscall.Errno = 351
+ ERROR_FAIL_RESTART syscall.Errno = 352
+ ERROR_MAX_SESSIONS_REACHED syscall.Errno = 353
+ ERROR_NETWORK_ACCESS_DENIED_EDP syscall.Errno = 354
+ ERROR_DEVICE_HINT_NAME_BUFFER_TOO_SMALL syscall.Errno = 355
+ ERROR_EDP_POLICY_DENIES_OPERATION syscall.Errno = 356
+ ERROR_EDP_DPL_POLICY_CANT_BE_SATISFIED syscall.Errno = 357
+ ERROR_CLOUD_FILE_SYNC_ROOT_METADATA_CORRUPT syscall.Errno = 358
+ ERROR_DEVICE_IN_MAINTENANCE syscall.Errno = 359
+ ERROR_NOT_SUPPORTED_ON_DAX syscall.Errno = 360
+ ERROR_DAX_MAPPING_EXISTS syscall.Errno = 361
+ ERROR_CLOUD_FILE_PROVIDER_NOT_RUNNING syscall.Errno = 362
+ ERROR_CLOUD_FILE_METADATA_CORRUPT syscall.Errno = 363
+ ERROR_CLOUD_FILE_METADATA_TOO_LARGE syscall.Errno = 364
+ ERROR_CLOUD_FILE_PROPERTY_BLOB_TOO_LARGE syscall.Errno = 365
+ ERROR_CLOUD_FILE_PROPERTY_BLOB_CHECKSUM_MISMATCH syscall.Errno = 366
+ ERROR_CHILD_PROCESS_BLOCKED syscall.Errno = 367
+ ERROR_STORAGE_LOST_DATA_PERSISTENCE syscall.Errno = 368
+ ERROR_FILE_SYSTEM_VIRTUALIZATION_UNAVAILABLE syscall.Errno = 369
+ ERROR_FILE_SYSTEM_VIRTUALIZATION_METADATA_CORRUPT syscall.Errno = 370
+ ERROR_FILE_SYSTEM_VIRTUALIZATION_BUSY syscall.Errno = 371
+ ERROR_FILE_SYSTEM_VIRTUALIZATION_PROVIDER_UNKNOWN syscall.Errno = 372
+ ERROR_GDI_HANDLE_LEAK syscall.Errno = 373
+ ERROR_CLOUD_FILE_TOO_MANY_PROPERTY_BLOBS syscall.Errno = 374
+ ERROR_CLOUD_FILE_PROPERTY_VERSION_NOT_SUPPORTED syscall.Errno = 375
+ ERROR_NOT_A_CLOUD_FILE syscall.Errno = 376
+ ERROR_CLOUD_FILE_NOT_IN_SYNC syscall.Errno = 377
+ ERROR_CLOUD_FILE_ALREADY_CONNECTED syscall.Errno = 378
+ ERROR_CLOUD_FILE_NOT_SUPPORTED syscall.Errno = 379
+ ERROR_CLOUD_FILE_INVALID_REQUEST syscall.Errno = 380
+ ERROR_CLOUD_FILE_READ_ONLY_VOLUME syscall.Errno = 381
+ ERROR_CLOUD_FILE_CONNECTED_PROVIDER_ONLY syscall.Errno = 382
+ ERROR_CLOUD_FILE_VALIDATION_FAILED syscall.Errno = 383
+ ERROR_SMB1_NOT_AVAILABLE syscall.Errno = 384
+ ERROR_FILE_SYSTEM_VIRTUALIZATION_INVALID_OPERATION syscall.Errno = 385
+ ERROR_CLOUD_FILE_AUTHENTICATION_FAILED syscall.Errno = 386
+ ERROR_CLOUD_FILE_INSUFFICIENT_RESOURCES syscall.Errno = 387
+ ERROR_CLOUD_FILE_NETWORK_UNAVAILABLE syscall.Errno = 388
+ ERROR_CLOUD_FILE_UNSUCCESSFUL syscall.Errno = 389
+ ERROR_CLOUD_FILE_NOT_UNDER_SYNC_ROOT syscall.Errno = 390
+ ERROR_CLOUD_FILE_IN_USE syscall.Errno = 391
+ ERROR_CLOUD_FILE_PINNED syscall.Errno = 392
+ ERROR_CLOUD_FILE_REQUEST_ABORTED syscall.Errno = 393
+ ERROR_CLOUD_FILE_PROPERTY_CORRUPT syscall.Errno = 394
+ ERROR_CLOUD_FILE_ACCESS_DENIED syscall.Errno = 395
+ ERROR_CLOUD_FILE_INCOMPATIBLE_HARDLINKS syscall.Errno = 396
+ ERROR_CLOUD_FILE_PROPERTY_LOCK_CONFLICT syscall.Errno = 397
+ ERROR_CLOUD_FILE_REQUEST_CANCELED syscall.Errno = 398
+ ERROR_EXTERNAL_SYSKEY_NOT_SUPPORTED syscall.Errno = 399
+ ERROR_THREAD_MODE_ALREADY_BACKGROUND syscall.Errno = 400
+ ERROR_THREAD_MODE_NOT_BACKGROUND syscall.Errno = 401
+ ERROR_PROCESS_MODE_ALREADY_BACKGROUND syscall.Errno = 402
+ ERROR_PROCESS_MODE_NOT_BACKGROUND syscall.Errno = 403
+ ERROR_CLOUD_FILE_PROVIDER_TERMINATED syscall.Errno = 404
+ ERROR_NOT_A_CLOUD_SYNC_ROOT syscall.Errno = 405
+ ERROR_FILE_PROTECTED_UNDER_DPL syscall.Errno = 406
+ ERROR_VOLUME_NOT_CLUSTER_ALIGNED syscall.Errno = 407
+ ERROR_NO_PHYSICALLY_ALIGNED_FREE_SPACE_FOUND syscall.Errno = 408
+ ERROR_APPX_FILE_NOT_ENCRYPTED syscall.Errno = 409
+ ERROR_RWRAW_ENCRYPTED_FILE_NOT_ENCRYPTED syscall.Errno = 410
+ ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILEOFFSET syscall.Errno = 411
+ ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILERANGE syscall.Errno = 412
+ ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_PARAMETER syscall.Errno = 413
+ ERROR_LINUX_SUBSYSTEM_NOT_PRESENT syscall.Errno = 414
+ ERROR_FT_READ_FAILURE syscall.Errno = 415
+ ERROR_STORAGE_RESERVE_ID_INVALID syscall.Errno = 416
+ ERROR_STORAGE_RESERVE_DOES_NOT_EXIST syscall.Errno = 417
+ ERROR_STORAGE_RESERVE_ALREADY_EXISTS syscall.Errno = 418
+ ERROR_STORAGE_RESERVE_NOT_EMPTY syscall.Errno = 419
+ ERROR_NOT_A_DAX_VOLUME syscall.Errno = 420
+ ERROR_NOT_DAX_MAPPABLE syscall.Errno = 421
+ ERROR_TIME_CRITICAL_THREAD syscall.Errno = 422
+ ERROR_DPL_NOT_SUPPORTED_FOR_USER syscall.Errno = 423
+ ERROR_CASE_DIFFERING_NAMES_IN_DIR syscall.Errno = 424
+ ERROR_CAPAUTHZ_NOT_DEVUNLOCKED syscall.Errno = 450
+ ERROR_CAPAUTHZ_CHANGE_TYPE syscall.Errno = 451
+ ERROR_CAPAUTHZ_NOT_PROVISIONED syscall.Errno = 452
+ ERROR_CAPAUTHZ_NOT_AUTHORIZED syscall.Errno = 453
+ ERROR_CAPAUTHZ_NO_POLICY syscall.Errno = 454
+ ERROR_CAPAUTHZ_DB_CORRUPTED syscall.Errno = 455
+ ERROR_CAPAUTHZ_SCCD_INVALID_CATALOG syscall.Errno = 456
+ ERROR_CAPAUTHZ_SCCD_NO_AUTH_ENTITY syscall.Errno = 457
+ ERROR_CAPAUTHZ_SCCD_PARSE_ERROR syscall.Errno = 458
+ ERROR_CAPAUTHZ_SCCD_DEV_MODE_REQUIRED syscall.Errno = 459
+ ERROR_CAPAUTHZ_SCCD_NO_CAPABILITY_MATCH syscall.Errno = 460
+ ERROR_PNP_QUERY_REMOVE_DEVICE_TIMEOUT syscall.Errno = 480
+ ERROR_PNP_QUERY_REMOVE_RELATED_DEVICE_TIMEOUT syscall.Errno = 481
+ ERROR_PNP_QUERY_REMOVE_UNRELATED_DEVICE_TIMEOUT syscall.Errno = 482
+ ERROR_DEVICE_HARDWARE_ERROR syscall.Errno = 483
+ ERROR_INVALID_ADDRESS syscall.Errno = 487
+ ERROR_VRF_CFG_ENABLED syscall.Errno = 1183
+ ERROR_PARTITION_TERMINATING syscall.Errno = 1184
+ ERROR_USER_PROFILE_LOAD syscall.Errno = 500
+ ERROR_ARITHMETIC_OVERFLOW syscall.Errno = 534
+ ERROR_PIPE_CONNECTED syscall.Errno = 535
+ ERROR_PIPE_LISTENING syscall.Errno = 536
+ ERROR_VERIFIER_STOP syscall.Errno = 537
+ ERROR_ABIOS_ERROR syscall.Errno = 538
+ ERROR_WX86_WARNING syscall.Errno = 539
+ ERROR_WX86_ERROR syscall.Errno = 540
+ ERROR_TIMER_NOT_CANCELED syscall.Errno = 541
+ ERROR_UNWIND syscall.Errno = 542
+ ERROR_BAD_STACK syscall.Errno = 543
+ ERROR_INVALID_UNWIND_TARGET syscall.Errno = 544
+ ERROR_INVALID_PORT_ATTRIBUTES syscall.Errno = 545
+ ERROR_PORT_MESSAGE_TOO_LONG syscall.Errno = 546
+ ERROR_INVALID_QUOTA_LOWER syscall.Errno = 547
+ ERROR_DEVICE_ALREADY_ATTACHED syscall.Errno = 548
+ ERROR_INSTRUCTION_MISALIGNMENT syscall.Errno = 549
+ ERROR_PROFILING_NOT_STARTED syscall.Errno = 550
+ ERROR_PROFILING_NOT_STOPPED syscall.Errno = 551
+ ERROR_COULD_NOT_INTERPRET syscall.Errno = 552
+ ERROR_PROFILING_AT_LIMIT syscall.Errno = 553
+ ERROR_CANT_WAIT syscall.Errno = 554
+ ERROR_CANT_TERMINATE_SELF syscall.Errno = 555
+ ERROR_UNEXPECTED_MM_CREATE_ERR syscall.Errno = 556
+ ERROR_UNEXPECTED_MM_MAP_ERROR syscall.Errno = 557
+ ERROR_UNEXPECTED_MM_EXTEND_ERR syscall.Errno = 558
+ ERROR_BAD_FUNCTION_TABLE syscall.Errno = 559
+ ERROR_NO_GUID_TRANSLATION syscall.Errno = 560
+ ERROR_INVALID_LDT_SIZE syscall.Errno = 561
+ ERROR_INVALID_LDT_OFFSET syscall.Errno = 563
+ ERROR_INVALID_LDT_DESCRIPTOR syscall.Errno = 564
+ ERROR_TOO_MANY_THREADS syscall.Errno = 565
+ ERROR_THREAD_NOT_IN_PROCESS syscall.Errno = 566
+ ERROR_PAGEFILE_QUOTA_EXCEEDED syscall.Errno = 567
+ ERROR_LOGON_SERVER_CONFLICT syscall.Errno = 568
+ ERROR_SYNCHRONIZATION_REQUIRED syscall.Errno = 569
+ ERROR_NET_OPEN_FAILED syscall.Errno = 570
+ ERROR_IO_PRIVILEGE_FAILED syscall.Errno = 571
+ ERROR_CONTROL_C_EXIT syscall.Errno = 572
+ ERROR_MISSING_SYSTEMFILE syscall.Errno = 573
+ ERROR_UNHANDLED_EXCEPTION syscall.Errno = 574
+ ERROR_APP_INIT_FAILURE syscall.Errno = 575
+ ERROR_PAGEFILE_CREATE_FAILED syscall.Errno = 576
+ ERROR_INVALID_IMAGE_HASH syscall.Errno = 577
+ ERROR_NO_PAGEFILE syscall.Errno = 578
+ ERROR_ILLEGAL_FLOAT_CONTEXT syscall.Errno = 579
+ ERROR_NO_EVENT_PAIR syscall.Errno = 580
+ ERROR_DOMAIN_CTRLR_CONFIG_ERROR syscall.Errno = 581
+ ERROR_ILLEGAL_CHARACTER syscall.Errno = 582
+ ERROR_UNDEFINED_CHARACTER syscall.Errno = 583
+ ERROR_FLOPPY_VOLUME syscall.Errno = 584
+ ERROR_BIOS_FAILED_TO_CONNECT_INTERRUPT syscall.Errno = 585
+ ERROR_BACKUP_CONTROLLER syscall.Errno = 586
+ ERROR_MUTANT_LIMIT_EXCEEDED syscall.Errno = 587
+ ERROR_FS_DRIVER_REQUIRED syscall.Errno = 588
+ ERROR_CANNOT_LOAD_REGISTRY_FILE syscall.Errno = 589
+ ERROR_DEBUG_ATTACH_FAILED syscall.Errno = 590
+ ERROR_SYSTEM_PROCESS_TERMINATED syscall.Errno = 591
+ ERROR_DATA_NOT_ACCEPTED syscall.Errno = 592
+ ERROR_VDM_HARD_ERROR syscall.Errno = 593
+ ERROR_DRIVER_CANCEL_TIMEOUT syscall.Errno = 594
+ ERROR_REPLY_MESSAGE_MISMATCH syscall.Errno = 595
+ ERROR_LOST_WRITEBEHIND_DATA syscall.Errno = 596
+ ERROR_CLIENT_SERVER_PARAMETERS_INVALID syscall.Errno = 597
+ ERROR_NOT_TINY_STREAM syscall.Errno = 598
+ ERROR_STACK_OVERFLOW_READ syscall.Errno = 599
+ ERROR_CONVERT_TO_LARGE syscall.Errno = 600
+ ERROR_FOUND_OUT_OF_SCOPE syscall.Errno = 601
+ ERROR_ALLOCATE_BUCKET syscall.Errno = 602
+ ERROR_MARSHALL_OVERFLOW syscall.Errno = 603
+ ERROR_INVALID_VARIANT syscall.Errno = 604
+ ERROR_BAD_COMPRESSION_BUFFER syscall.Errno = 605
+ ERROR_AUDIT_FAILED syscall.Errno = 606
+ ERROR_TIMER_RESOLUTION_NOT_SET syscall.Errno = 607
+ ERROR_INSUFFICIENT_LOGON_INFO syscall.Errno = 608
+ ERROR_BAD_DLL_ENTRYPOINT syscall.Errno = 609
+ ERROR_BAD_SERVICE_ENTRYPOINT syscall.Errno = 610
+ ERROR_IP_ADDRESS_CONFLICT1 syscall.Errno = 611
+ ERROR_IP_ADDRESS_CONFLICT2 syscall.Errno = 612
+ ERROR_REGISTRY_QUOTA_LIMIT syscall.Errno = 613
+ ERROR_NO_CALLBACK_ACTIVE syscall.Errno = 614
+ ERROR_PWD_TOO_SHORT syscall.Errno = 615
+ ERROR_PWD_TOO_RECENT syscall.Errno = 616
+ ERROR_PWD_HISTORY_CONFLICT syscall.Errno = 617
+ ERROR_UNSUPPORTED_COMPRESSION syscall.Errno = 618
+ ERROR_INVALID_HW_PROFILE syscall.Errno = 619
+ ERROR_INVALID_PLUGPLAY_DEVICE_PATH syscall.Errno = 620
+ ERROR_QUOTA_LIST_INCONSISTENT syscall.Errno = 621
+ ERROR_EVALUATION_EXPIRATION syscall.Errno = 622
+ ERROR_ILLEGAL_DLL_RELOCATION syscall.Errno = 623
+ ERROR_DLL_INIT_FAILED_LOGOFF syscall.Errno = 624
+ ERROR_VALIDATE_CONTINUE syscall.Errno = 625
+ ERROR_NO_MORE_MATCHES syscall.Errno = 626
+ ERROR_RANGE_LIST_CONFLICT syscall.Errno = 627
+ ERROR_SERVER_SID_MISMATCH syscall.Errno = 628
+ ERROR_CANT_ENABLE_DENY_ONLY syscall.Errno = 629
+ ERROR_FLOAT_MULTIPLE_FAULTS syscall.Errno = 630
+ ERROR_FLOAT_MULTIPLE_TRAPS syscall.Errno = 631
+ ERROR_NOINTERFACE syscall.Errno = 632
+ ERROR_DRIVER_FAILED_SLEEP syscall.Errno = 633
+ ERROR_CORRUPT_SYSTEM_FILE syscall.Errno = 634
+ ERROR_COMMITMENT_MINIMUM syscall.Errno = 635
+ ERROR_PNP_RESTART_ENUMERATION syscall.Errno = 636
+ ERROR_SYSTEM_IMAGE_BAD_SIGNATURE syscall.Errno = 637
+ ERROR_PNP_REBOOT_REQUIRED syscall.Errno = 638
+ ERROR_INSUFFICIENT_POWER syscall.Errno = 639
+ ERROR_MULTIPLE_FAULT_VIOLATION syscall.Errno = 640
+ ERROR_SYSTEM_SHUTDOWN syscall.Errno = 641
+ ERROR_PORT_NOT_SET syscall.Errno = 642
+ ERROR_DS_VERSION_CHECK_FAILURE syscall.Errno = 643
+ ERROR_RANGE_NOT_FOUND syscall.Errno = 644
+ ERROR_NOT_SAFE_MODE_DRIVER syscall.Errno = 646
+ ERROR_FAILED_DRIVER_ENTRY syscall.Errno = 647
+ ERROR_DEVICE_ENUMERATION_ERROR syscall.Errno = 648
+ ERROR_MOUNT_POINT_NOT_RESOLVED syscall.Errno = 649
+ ERROR_INVALID_DEVICE_OBJECT_PARAMETER syscall.Errno = 650
+ ERROR_MCA_OCCURED syscall.Errno = 651
+ ERROR_DRIVER_DATABASE_ERROR syscall.Errno = 652
+ ERROR_SYSTEM_HIVE_TOO_LARGE syscall.Errno = 653
+ ERROR_DRIVER_FAILED_PRIOR_UNLOAD syscall.Errno = 654
+ ERROR_VOLSNAP_PREPARE_HIBERNATE syscall.Errno = 655
+ ERROR_HIBERNATION_FAILURE syscall.Errno = 656
+ ERROR_PWD_TOO_LONG syscall.Errno = 657
+ ERROR_FILE_SYSTEM_LIMITATION syscall.Errno = 665
+ ERROR_ASSERTION_FAILURE syscall.Errno = 668
+ ERROR_ACPI_ERROR syscall.Errno = 669
+ ERROR_WOW_ASSERTION syscall.Errno = 670
+ ERROR_PNP_BAD_MPS_TABLE syscall.Errno = 671
+ ERROR_PNP_TRANSLATION_FAILED syscall.Errno = 672
+ ERROR_PNP_IRQ_TRANSLATION_FAILED syscall.Errno = 673
+ ERROR_PNP_INVALID_ID syscall.Errno = 674
+ ERROR_WAKE_SYSTEM_DEBUGGER syscall.Errno = 675
+ ERROR_HANDLES_CLOSED syscall.Errno = 676
+ ERROR_EXTRANEOUS_INFORMATION syscall.Errno = 677
+ ERROR_RXACT_COMMIT_NECESSARY syscall.Errno = 678
+ ERROR_MEDIA_CHECK syscall.Errno = 679
+ ERROR_GUID_SUBSTITUTION_MADE syscall.Errno = 680
+ ERROR_STOPPED_ON_SYMLINK syscall.Errno = 681
+ ERROR_LONGJUMP syscall.Errno = 682
+ ERROR_PLUGPLAY_QUERY_VETOED syscall.Errno = 683
+ ERROR_UNWIND_CONSOLIDATE syscall.Errno = 684
+ ERROR_REGISTRY_HIVE_RECOVERED syscall.Errno = 685
+ ERROR_DLL_MIGHT_BE_INSECURE syscall.Errno = 686
+ ERROR_DLL_MIGHT_BE_INCOMPATIBLE syscall.Errno = 687
+ ERROR_DBG_EXCEPTION_NOT_HANDLED syscall.Errno = 688
+ ERROR_DBG_REPLY_LATER syscall.Errno = 689
+ ERROR_DBG_UNABLE_TO_PROVIDE_HANDLE syscall.Errno = 690
+ ERROR_DBG_TERMINATE_THREAD syscall.Errno = 691
+ ERROR_DBG_TERMINATE_PROCESS syscall.Errno = 692
+ ERROR_DBG_CONTROL_C syscall.Errno = 693
+ ERROR_DBG_PRINTEXCEPTION_C syscall.Errno = 694
+ ERROR_DBG_RIPEXCEPTION syscall.Errno = 695
+ ERROR_DBG_CONTROL_BREAK syscall.Errno = 696
+ ERROR_DBG_COMMAND_EXCEPTION syscall.Errno = 697
+ ERROR_OBJECT_NAME_EXISTS syscall.Errno = 698
+ ERROR_THREAD_WAS_SUSPENDED syscall.Errno = 699
+ ERROR_IMAGE_NOT_AT_BASE syscall.Errno = 700
+ ERROR_RXACT_STATE_CREATED syscall.Errno = 701
+ ERROR_SEGMENT_NOTIFICATION syscall.Errno = 702
+ ERROR_BAD_CURRENT_DIRECTORY syscall.Errno = 703
+ ERROR_FT_READ_RECOVERY_FROM_BACKUP syscall.Errno = 704
+ ERROR_FT_WRITE_RECOVERY syscall.Errno = 705
+ ERROR_IMAGE_MACHINE_TYPE_MISMATCH syscall.Errno = 706
+ ERROR_RECEIVE_PARTIAL syscall.Errno = 707
+ ERROR_RECEIVE_EXPEDITED syscall.Errno = 708
+ ERROR_RECEIVE_PARTIAL_EXPEDITED syscall.Errno = 709
+ ERROR_EVENT_DONE syscall.Errno = 710
+ ERROR_EVENT_PENDING syscall.Errno = 711
+ ERROR_CHECKING_FILE_SYSTEM syscall.Errno = 712
+ ERROR_FATAL_APP_EXIT syscall.Errno = 713
+ ERROR_PREDEFINED_HANDLE syscall.Errno = 714
+ ERROR_WAS_UNLOCKED syscall.Errno = 715
+ ERROR_SERVICE_NOTIFICATION syscall.Errno = 716
+ ERROR_WAS_LOCKED syscall.Errno = 717
+ ERROR_LOG_HARD_ERROR syscall.Errno = 718
+ ERROR_ALREADY_WIN32 syscall.Errno = 719
+ ERROR_IMAGE_MACHINE_TYPE_MISMATCH_EXE syscall.Errno = 720
+ ERROR_NO_YIELD_PERFORMED syscall.Errno = 721
+ ERROR_TIMER_RESUME_IGNORED syscall.Errno = 722
+ ERROR_ARBITRATION_UNHANDLED syscall.Errno = 723
+ ERROR_CARDBUS_NOT_SUPPORTED syscall.Errno = 724
+ ERROR_MP_PROCESSOR_MISMATCH syscall.Errno = 725
+ ERROR_HIBERNATED syscall.Errno = 726
+ ERROR_RESUME_HIBERNATION syscall.Errno = 727
+ ERROR_FIRMWARE_UPDATED syscall.Errno = 728
+ ERROR_DRIVERS_LEAKING_LOCKED_PAGES syscall.Errno = 729
+ ERROR_WAKE_SYSTEM syscall.Errno = 730
+ ERROR_WAIT_1 syscall.Errno = 731
+ ERROR_WAIT_2 syscall.Errno = 732
+ ERROR_WAIT_3 syscall.Errno = 733
+ ERROR_WAIT_63 syscall.Errno = 734
+ ERROR_ABANDONED_WAIT_0 syscall.Errno = 735
+ ERROR_ABANDONED_WAIT_63 syscall.Errno = 736
+ ERROR_USER_APC syscall.Errno = 737
+ ERROR_KERNEL_APC syscall.Errno = 738
+ ERROR_ALERTED syscall.Errno = 739
+ ERROR_ELEVATION_REQUIRED syscall.Errno = 740
+ ERROR_REPARSE syscall.Errno = 741
+ ERROR_OPLOCK_BREAK_IN_PROGRESS syscall.Errno = 742
+ ERROR_VOLUME_MOUNTED syscall.Errno = 743
+ ERROR_RXACT_COMMITTED syscall.Errno = 744
+ ERROR_NOTIFY_CLEANUP syscall.Errno = 745
+ ERROR_PRIMARY_TRANSPORT_CONNECT_FAILED syscall.Errno = 746
+ ERROR_PAGE_FAULT_TRANSITION syscall.Errno = 747
+ ERROR_PAGE_FAULT_DEMAND_ZERO syscall.Errno = 748
+ ERROR_PAGE_FAULT_COPY_ON_WRITE syscall.Errno = 749
+ ERROR_PAGE_FAULT_GUARD_PAGE syscall.Errno = 750
+ ERROR_PAGE_FAULT_PAGING_FILE syscall.Errno = 751
+ ERROR_CACHE_PAGE_LOCKED syscall.Errno = 752
+ ERROR_CRASH_DUMP syscall.Errno = 753
+ ERROR_BUFFER_ALL_ZEROS syscall.Errno = 754
+ ERROR_REPARSE_OBJECT syscall.Errno = 755
+ ERROR_RESOURCE_REQUIREMENTS_CHANGED syscall.Errno = 756
+ ERROR_TRANSLATION_COMPLETE syscall.Errno = 757
+ ERROR_NOTHING_TO_TERMINATE syscall.Errno = 758
+ ERROR_PROCESS_NOT_IN_JOB syscall.Errno = 759
+ ERROR_PROCESS_IN_JOB syscall.Errno = 760
+ ERROR_VOLSNAP_HIBERNATE_READY syscall.Errno = 761
+ ERROR_FSFILTER_OP_COMPLETED_SUCCESSFULLY syscall.Errno = 762
+ ERROR_INTERRUPT_VECTOR_ALREADY_CONNECTED syscall.Errno = 763
+ ERROR_INTERRUPT_STILL_CONNECTED syscall.Errno = 764
+ ERROR_WAIT_FOR_OPLOCK syscall.Errno = 765
+ ERROR_DBG_EXCEPTION_HANDLED syscall.Errno = 766
+ ERROR_DBG_CONTINUE syscall.Errno = 767
+ ERROR_CALLBACK_POP_STACK syscall.Errno = 768
+ ERROR_COMPRESSION_DISABLED syscall.Errno = 769
+ ERROR_CANTFETCHBACKWARDS syscall.Errno = 770
+ ERROR_CANTSCROLLBACKWARDS syscall.Errno = 771
+ ERROR_ROWSNOTRELEASED syscall.Errno = 772
+ ERROR_BAD_ACCESSOR_FLAGS syscall.Errno = 773
+ ERROR_ERRORS_ENCOUNTERED syscall.Errno = 774
+ ERROR_NOT_CAPABLE syscall.Errno = 775
+ ERROR_REQUEST_OUT_OF_SEQUENCE syscall.Errno = 776
+ ERROR_VERSION_PARSE_ERROR syscall.Errno = 777
+ ERROR_BADSTARTPOSITION syscall.Errno = 778
+ ERROR_MEMORY_HARDWARE syscall.Errno = 779
+ ERROR_DISK_REPAIR_DISABLED syscall.Errno = 780
+ ERROR_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE syscall.Errno = 781
+ ERROR_SYSTEM_POWERSTATE_TRANSITION syscall.Errno = 782
+ ERROR_SYSTEM_POWERSTATE_COMPLEX_TRANSITION syscall.Errno = 783
+ ERROR_MCA_EXCEPTION syscall.Errno = 784
+ ERROR_ACCESS_AUDIT_BY_POLICY syscall.Errno = 785
+ ERROR_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY syscall.Errno = 786
+ ERROR_ABANDON_HIBERFILE syscall.Errno = 787
+ ERROR_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED syscall.Errno = 788
+ ERROR_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR syscall.Errno = 789
+ ERROR_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR syscall.Errno = 790
+ ERROR_BAD_MCFG_TABLE syscall.Errno = 791
+ ERROR_DISK_REPAIR_REDIRECTED syscall.Errno = 792
+ ERROR_DISK_REPAIR_UNSUCCESSFUL syscall.Errno = 793
+ ERROR_CORRUPT_LOG_OVERFULL syscall.Errno = 794
+ ERROR_CORRUPT_LOG_CORRUPTED syscall.Errno = 795
+ ERROR_CORRUPT_LOG_UNAVAILABLE syscall.Errno = 796
+ ERROR_CORRUPT_LOG_DELETED_FULL syscall.Errno = 797
+ ERROR_CORRUPT_LOG_CLEARED syscall.Errno = 798
+ ERROR_ORPHAN_NAME_EXHAUSTED syscall.Errno = 799
+ ERROR_OPLOCK_SWITCHED_TO_NEW_HANDLE syscall.Errno = 800
+ ERROR_CANNOT_GRANT_REQUESTED_OPLOCK syscall.Errno = 801
+ ERROR_CANNOT_BREAK_OPLOCK syscall.Errno = 802
+ ERROR_OPLOCK_HANDLE_CLOSED syscall.Errno = 803
+ ERROR_NO_ACE_CONDITION syscall.Errno = 804
+ ERROR_INVALID_ACE_CONDITION syscall.Errno = 805
+ ERROR_FILE_HANDLE_REVOKED syscall.Errno = 806
+ ERROR_IMAGE_AT_DIFFERENT_BASE syscall.Errno = 807
+ ERROR_ENCRYPTED_IO_NOT_POSSIBLE syscall.Errno = 808
+ ERROR_FILE_METADATA_OPTIMIZATION_IN_PROGRESS syscall.Errno = 809
+ ERROR_QUOTA_ACTIVITY syscall.Errno = 810
+ ERROR_HANDLE_REVOKED syscall.Errno = 811
+ ERROR_CALLBACK_INVOKE_INLINE syscall.Errno = 812
+ ERROR_CPU_SET_INVALID syscall.Errno = 813
+ ERROR_ENCLAVE_NOT_TERMINATED syscall.Errno = 814
+ ERROR_ENCLAVE_VIOLATION syscall.Errno = 815
+ ERROR_EA_ACCESS_DENIED syscall.Errno = 994
+ ERROR_OPERATION_ABORTED syscall.Errno = 995
+ ERROR_IO_INCOMPLETE syscall.Errno = 996
+ ERROR_IO_PENDING syscall.Errno = 997
+ ERROR_NOACCESS syscall.Errno = 998
+ ERROR_SWAPERROR syscall.Errno = 999
+ ERROR_STACK_OVERFLOW syscall.Errno = 1001
+ ERROR_INVALID_MESSAGE syscall.Errno = 1002
+ ERROR_CAN_NOT_COMPLETE syscall.Errno = 1003
+ ERROR_INVALID_FLAGS syscall.Errno = 1004
+ ERROR_UNRECOGNIZED_VOLUME syscall.Errno = 1005
+ ERROR_FILE_INVALID syscall.Errno = 1006
+ ERROR_FULLSCREEN_MODE syscall.Errno = 1007
+ ERROR_NO_TOKEN syscall.Errno = 1008
+ ERROR_BADDB syscall.Errno = 1009
+ ERROR_BADKEY syscall.Errno = 1010
+ ERROR_CANTOPEN syscall.Errno = 1011
+ ERROR_CANTREAD syscall.Errno = 1012
+ ERROR_CANTWRITE syscall.Errno = 1013
+ ERROR_REGISTRY_RECOVERED syscall.Errno = 1014
+ ERROR_REGISTRY_CORRUPT syscall.Errno = 1015
+ ERROR_REGISTRY_IO_FAILED syscall.Errno = 1016
+ ERROR_NOT_REGISTRY_FILE syscall.Errno = 1017
+ ERROR_KEY_DELETED syscall.Errno = 1018
+ ERROR_NO_LOG_SPACE syscall.Errno = 1019
+ ERROR_KEY_HAS_CHILDREN syscall.Errno = 1020
+ ERROR_CHILD_MUST_BE_VOLATILE syscall.Errno = 1021
+ ERROR_NOTIFY_ENUM_DIR syscall.Errno = 1022
+ ERROR_DEPENDENT_SERVICES_RUNNING syscall.Errno = 1051
+ ERROR_INVALID_SERVICE_CONTROL syscall.Errno = 1052
+ ERROR_SERVICE_REQUEST_TIMEOUT syscall.Errno = 1053
+ ERROR_SERVICE_NO_THREAD syscall.Errno = 1054
+ ERROR_SERVICE_DATABASE_LOCKED syscall.Errno = 1055
+ ERROR_SERVICE_ALREADY_RUNNING syscall.Errno = 1056
+ ERROR_INVALID_SERVICE_ACCOUNT syscall.Errno = 1057
+ ERROR_SERVICE_DISABLED syscall.Errno = 1058
+ ERROR_CIRCULAR_DEPENDENCY syscall.Errno = 1059
+ ERROR_SERVICE_DOES_NOT_EXIST syscall.Errno = 1060
+ ERROR_SERVICE_CANNOT_ACCEPT_CTRL syscall.Errno = 1061
+ ERROR_SERVICE_NOT_ACTIVE syscall.Errno = 1062
+ ERROR_FAILED_SERVICE_CONTROLLER_CONNECT syscall.Errno = 1063
+ ERROR_EXCEPTION_IN_SERVICE syscall.Errno = 1064
+ ERROR_DATABASE_DOES_NOT_EXIST syscall.Errno = 1065
+ ERROR_SERVICE_SPECIFIC_ERROR syscall.Errno = 1066
+ ERROR_PROCESS_ABORTED syscall.Errno = 1067
+ ERROR_SERVICE_DEPENDENCY_FAIL syscall.Errno = 1068
+ ERROR_SERVICE_LOGON_FAILED syscall.Errno = 1069
+ ERROR_SERVICE_START_HANG syscall.Errno = 1070
+ ERROR_INVALID_SERVICE_LOCK syscall.Errno = 1071
+ ERROR_SERVICE_MARKED_FOR_DELETE syscall.Errno = 1072
+ ERROR_SERVICE_EXISTS syscall.Errno = 1073
+ ERROR_ALREADY_RUNNING_LKG syscall.Errno = 1074
+ ERROR_SERVICE_DEPENDENCY_DELETED syscall.Errno = 1075
+ ERROR_BOOT_ALREADY_ACCEPTED syscall.Errno = 1076
+ ERROR_SERVICE_NEVER_STARTED syscall.Errno = 1077
+ ERROR_DUPLICATE_SERVICE_NAME syscall.Errno = 1078
+ ERROR_DIFFERENT_SERVICE_ACCOUNT syscall.Errno = 1079
+ ERROR_CANNOT_DETECT_DRIVER_FAILURE syscall.Errno = 1080
+ ERROR_CANNOT_DETECT_PROCESS_ABORT syscall.Errno = 1081
+ ERROR_NO_RECOVERY_PROGRAM syscall.Errno = 1082
+ ERROR_SERVICE_NOT_IN_EXE syscall.Errno = 1083
+ ERROR_NOT_SAFEBOOT_SERVICE syscall.Errno = 1084
+ ERROR_END_OF_MEDIA syscall.Errno = 1100
+ ERROR_FILEMARK_DETECTED syscall.Errno = 1101
+ ERROR_BEGINNING_OF_MEDIA syscall.Errno = 1102
+ ERROR_SETMARK_DETECTED syscall.Errno = 1103
+ ERROR_NO_DATA_DETECTED syscall.Errno = 1104
+ ERROR_PARTITION_FAILURE syscall.Errno = 1105
+ ERROR_INVALID_BLOCK_LENGTH syscall.Errno = 1106
+ ERROR_DEVICE_NOT_PARTITIONED syscall.Errno = 1107
+ ERROR_UNABLE_TO_LOCK_MEDIA syscall.Errno = 1108
+ ERROR_UNABLE_TO_UNLOAD_MEDIA syscall.Errno = 1109
+ ERROR_MEDIA_CHANGED syscall.Errno = 1110
+ ERROR_BUS_RESET syscall.Errno = 1111
+ ERROR_NO_MEDIA_IN_DRIVE syscall.Errno = 1112
+ ERROR_NO_UNICODE_TRANSLATION syscall.Errno = 1113
+ ERROR_DLL_INIT_FAILED syscall.Errno = 1114
+ ERROR_SHUTDOWN_IN_PROGRESS syscall.Errno = 1115
+ ERROR_NO_SHUTDOWN_IN_PROGRESS syscall.Errno = 1116
+ ERROR_IO_DEVICE syscall.Errno = 1117
+ ERROR_SERIAL_NO_DEVICE syscall.Errno = 1118
+ ERROR_IRQ_BUSY syscall.Errno = 1119
+ ERROR_MORE_WRITES syscall.Errno = 1120
+ ERROR_COUNTER_TIMEOUT syscall.Errno = 1121
+ ERROR_FLOPPY_ID_MARK_NOT_FOUND syscall.Errno = 1122
+ ERROR_FLOPPY_WRONG_CYLINDER syscall.Errno = 1123
+ ERROR_FLOPPY_UNKNOWN_ERROR syscall.Errno = 1124
+ ERROR_FLOPPY_BAD_REGISTERS syscall.Errno = 1125
+ ERROR_DISK_RECALIBRATE_FAILED syscall.Errno = 1126
+ ERROR_DISK_OPERATION_FAILED syscall.Errno = 1127
+ ERROR_DISK_RESET_FAILED syscall.Errno = 1128
+ ERROR_EOM_OVERFLOW syscall.Errno = 1129
+ ERROR_NOT_ENOUGH_SERVER_MEMORY syscall.Errno = 1130
+ ERROR_POSSIBLE_DEADLOCK syscall.Errno = 1131
+ ERROR_MAPPED_ALIGNMENT syscall.Errno = 1132
+ ERROR_SET_POWER_STATE_VETOED syscall.Errno = 1140
+ ERROR_SET_POWER_STATE_FAILED syscall.Errno = 1141
+ ERROR_TOO_MANY_LINKS syscall.Errno = 1142
+ ERROR_OLD_WIN_VERSION syscall.Errno = 1150
+ ERROR_APP_WRONG_OS syscall.Errno = 1151
+ ERROR_SINGLE_INSTANCE_APP syscall.Errno = 1152
+ ERROR_RMODE_APP syscall.Errno = 1153
+ ERROR_INVALID_DLL syscall.Errno = 1154
+ ERROR_NO_ASSOCIATION syscall.Errno = 1155
+ ERROR_DDE_FAIL syscall.Errno = 1156
+ ERROR_DLL_NOT_FOUND syscall.Errno = 1157
+ ERROR_NO_MORE_USER_HANDLES syscall.Errno = 1158
+ ERROR_MESSAGE_SYNC_ONLY syscall.Errno = 1159
+ ERROR_SOURCE_ELEMENT_EMPTY syscall.Errno = 1160
+ ERROR_DESTINATION_ELEMENT_FULL syscall.Errno = 1161
+ ERROR_ILLEGAL_ELEMENT_ADDRESS syscall.Errno = 1162
+ ERROR_MAGAZINE_NOT_PRESENT syscall.Errno = 1163
+ ERROR_DEVICE_REINITIALIZATION_NEEDED syscall.Errno = 1164
+ ERROR_DEVICE_REQUIRES_CLEANING syscall.Errno = 1165
+ ERROR_DEVICE_DOOR_OPEN syscall.Errno = 1166
+ ERROR_DEVICE_NOT_CONNECTED syscall.Errno = 1167
+ ERROR_NOT_FOUND syscall.Errno = 1168
+ ERROR_NO_MATCH syscall.Errno = 1169
+ ERROR_SET_NOT_FOUND syscall.Errno = 1170
+ ERROR_POINT_NOT_FOUND syscall.Errno = 1171
+ ERROR_NO_TRACKING_SERVICE syscall.Errno = 1172
+ ERROR_NO_VOLUME_ID syscall.Errno = 1173
+ ERROR_UNABLE_TO_REMOVE_REPLACED syscall.Errno = 1175
+ ERROR_UNABLE_TO_MOVE_REPLACEMENT syscall.Errno = 1176
+ ERROR_UNABLE_TO_MOVE_REPLACEMENT_2 syscall.Errno = 1177
+ ERROR_JOURNAL_DELETE_IN_PROGRESS syscall.Errno = 1178
+ ERROR_JOURNAL_NOT_ACTIVE syscall.Errno = 1179
+ ERROR_POTENTIAL_FILE_FOUND syscall.Errno = 1180
+ ERROR_JOURNAL_ENTRY_DELETED syscall.Errno = 1181
+ ERROR_SHUTDOWN_IS_SCHEDULED syscall.Errno = 1190
+ ERROR_SHUTDOWN_USERS_LOGGED_ON syscall.Errno = 1191
+ ERROR_BAD_DEVICE syscall.Errno = 1200
+ ERROR_CONNECTION_UNAVAIL syscall.Errno = 1201
+ ERROR_DEVICE_ALREADY_REMEMBERED syscall.Errno = 1202
+ ERROR_NO_NET_OR_BAD_PATH syscall.Errno = 1203
+ ERROR_BAD_PROVIDER syscall.Errno = 1204
+ ERROR_CANNOT_OPEN_PROFILE syscall.Errno = 1205
+ ERROR_BAD_PROFILE syscall.Errno = 1206
+ ERROR_NOT_CONTAINER syscall.Errno = 1207
+ ERROR_EXTENDED_ERROR syscall.Errno = 1208
+ ERROR_INVALID_GROUPNAME syscall.Errno = 1209
+ ERROR_INVALID_COMPUTERNAME syscall.Errno = 1210
+ ERROR_INVALID_EVENTNAME syscall.Errno = 1211
+ ERROR_INVALID_DOMAINNAME syscall.Errno = 1212
+ ERROR_INVALID_SERVICENAME syscall.Errno = 1213
+ ERROR_INVALID_NETNAME syscall.Errno = 1214
+ ERROR_INVALID_SHARENAME syscall.Errno = 1215
+ ERROR_INVALID_PASSWORDNAME syscall.Errno = 1216
+ ERROR_INVALID_MESSAGENAME syscall.Errno = 1217
+ ERROR_INVALID_MESSAGEDEST syscall.Errno = 1218
+ ERROR_SESSION_CREDENTIAL_CONFLICT syscall.Errno = 1219
+ ERROR_REMOTE_SESSION_LIMIT_EXCEEDED syscall.Errno = 1220
+ ERROR_DUP_DOMAINNAME syscall.Errno = 1221
+ ERROR_NO_NETWORK syscall.Errno = 1222
+ ERROR_CANCELLED syscall.Errno = 1223
+ ERROR_USER_MAPPED_FILE syscall.Errno = 1224
+ ERROR_CONNECTION_REFUSED syscall.Errno = 1225
+ ERROR_GRACEFUL_DISCONNECT syscall.Errno = 1226
+ ERROR_ADDRESS_ALREADY_ASSOCIATED syscall.Errno = 1227
+ ERROR_ADDRESS_NOT_ASSOCIATED syscall.Errno = 1228
+ ERROR_CONNECTION_INVALID syscall.Errno = 1229
+ ERROR_CONNECTION_ACTIVE syscall.Errno = 1230
+ ERROR_NETWORK_UNREACHABLE syscall.Errno = 1231
+ ERROR_HOST_UNREACHABLE syscall.Errno = 1232
+ ERROR_PROTOCOL_UNREACHABLE syscall.Errno = 1233
+ ERROR_PORT_UNREACHABLE syscall.Errno = 1234
+ ERROR_REQUEST_ABORTED syscall.Errno = 1235
+ ERROR_CONNECTION_ABORTED syscall.Errno = 1236
+ ERROR_RETRY syscall.Errno = 1237
+ ERROR_CONNECTION_COUNT_LIMIT syscall.Errno = 1238
+ ERROR_LOGIN_TIME_RESTRICTION syscall.Errno = 1239
+ ERROR_LOGIN_WKSTA_RESTRICTION syscall.Errno = 1240
+ ERROR_INCORRECT_ADDRESS syscall.Errno = 1241
+ ERROR_ALREADY_REGISTERED syscall.Errno = 1242
+ ERROR_SERVICE_NOT_FOUND syscall.Errno = 1243
+ ERROR_NOT_AUTHENTICATED syscall.Errno = 1244
+ ERROR_NOT_LOGGED_ON syscall.Errno = 1245
+ ERROR_CONTINUE syscall.Errno = 1246
+ ERROR_ALREADY_INITIALIZED syscall.Errno = 1247
+ ERROR_NO_MORE_DEVICES syscall.Errno = 1248
+ ERROR_NO_SUCH_SITE syscall.Errno = 1249
+ ERROR_DOMAIN_CONTROLLER_EXISTS syscall.Errno = 1250
+ ERROR_ONLY_IF_CONNECTED syscall.Errno = 1251
+ ERROR_OVERRIDE_NOCHANGES syscall.Errno = 1252
+ ERROR_BAD_USER_PROFILE syscall.Errno = 1253
+ ERROR_NOT_SUPPORTED_ON_SBS syscall.Errno = 1254
+ ERROR_SERVER_SHUTDOWN_IN_PROGRESS syscall.Errno = 1255
+ ERROR_HOST_DOWN syscall.Errno = 1256
+ ERROR_NON_ACCOUNT_SID syscall.Errno = 1257
+ ERROR_NON_DOMAIN_SID syscall.Errno = 1258
+ ERROR_APPHELP_BLOCK syscall.Errno = 1259
+ ERROR_ACCESS_DISABLED_BY_POLICY syscall.Errno = 1260
+ ERROR_REG_NAT_CONSUMPTION syscall.Errno = 1261
+ ERROR_CSCSHARE_OFFLINE syscall.Errno = 1262
+ ERROR_PKINIT_FAILURE syscall.Errno = 1263
+ ERROR_SMARTCARD_SUBSYSTEM_FAILURE syscall.Errno = 1264
+ ERROR_DOWNGRADE_DETECTED syscall.Errno = 1265
+ ERROR_MACHINE_LOCKED syscall.Errno = 1271
+ ERROR_SMB_GUEST_LOGON_BLOCKED syscall.Errno = 1272
+ ERROR_CALLBACK_SUPPLIED_INVALID_DATA syscall.Errno = 1273
+ ERROR_SYNC_FOREGROUND_REFRESH_REQUIRED syscall.Errno = 1274
+ ERROR_DRIVER_BLOCKED syscall.Errno = 1275
+ ERROR_INVALID_IMPORT_OF_NON_DLL syscall.Errno = 1276
+ ERROR_ACCESS_DISABLED_WEBBLADE syscall.Errno = 1277
+ ERROR_ACCESS_DISABLED_WEBBLADE_TAMPER syscall.Errno = 1278
+ ERROR_RECOVERY_FAILURE syscall.Errno = 1279
+ ERROR_ALREADY_FIBER syscall.Errno = 1280
+ ERROR_ALREADY_THREAD syscall.Errno = 1281
+ ERROR_STACK_BUFFER_OVERRUN syscall.Errno = 1282
+ ERROR_PARAMETER_QUOTA_EXCEEDED syscall.Errno = 1283
+ ERROR_DEBUGGER_INACTIVE syscall.Errno = 1284
+ ERROR_DELAY_LOAD_FAILED syscall.Errno = 1285
+ ERROR_VDM_DISALLOWED syscall.Errno = 1286
+ ERROR_UNIDENTIFIED_ERROR syscall.Errno = 1287
+ ERROR_INVALID_CRUNTIME_PARAMETER syscall.Errno = 1288
+ ERROR_BEYOND_VDL syscall.Errno = 1289
+ ERROR_INCOMPATIBLE_SERVICE_SID_TYPE syscall.Errno = 1290
+ ERROR_DRIVER_PROCESS_TERMINATED syscall.Errno = 1291
+ ERROR_IMPLEMENTATION_LIMIT syscall.Errno = 1292
+ ERROR_PROCESS_IS_PROTECTED syscall.Errno = 1293
+ ERROR_SERVICE_NOTIFY_CLIENT_LAGGING syscall.Errno = 1294
+ ERROR_DISK_QUOTA_EXCEEDED syscall.Errno = 1295
+ ERROR_CONTENT_BLOCKED syscall.Errno = 1296
+ ERROR_INCOMPATIBLE_SERVICE_PRIVILEGE syscall.Errno = 1297
+ ERROR_APP_HANG syscall.Errno = 1298
+ ERROR_INVALID_LABEL syscall.Errno = 1299
+ ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
+ ERROR_SOME_NOT_MAPPED syscall.Errno = 1301
+ ERROR_NO_QUOTAS_FOR_ACCOUNT syscall.Errno = 1302
+ ERROR_LOCAL_USER_SESSION_KEY syscall.Errno = 1303
+ ERROR_NULL_LM_PASSWORD syscall.Errno = 1304
+ ERROR_UNKNOWN_REVISION syscall.Errno = 1305
+ ERROR_REVISION_MISMATCH syscall.Errno = 1306
+ ERROR_INVALID_OWNER syscall.Errno = 1307
+ ERROR_INVALID_PRIMARY_GROUP syscall.Errno = 1308
+ ERROR_NO_IMPERSONATION_TOKEN syscall.Errno = 1309
+ ERROR_CANT_DISABLE_MANDATORY syscall.Errno = 1310
+ ERROR_NO_LOGON_SERVERS syscall.Errno = 1311
+ ERROR_NO_SUCH_LOGON_SESSION syscall.Errno = 1312
+ ERROR_NO_SUCH_PRIVILEGE syscall.Errno = 1313
+ ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314
+ ERROR_INVALID_ACCOUNT_NAME syscall.Errno = 1315
+ ERROR_USER_EXISTS syscall.Errno = 1316
+ ERROR_NO_SUCH_USER syscall.Errno = 1317
+ ERROR_GROUP_EXISTS syscall.Errno = 1318
+ ERROR_NO_SUCH_GROUP syscall.Errno = 1319
+ ERROR_MEMBER_IN_GROUP syscall.Errno = 1320
+ ERROR_MEMBER_NOT_IN_GROUP syscall.Errno = 1321
+ ERROR_LAST_ADMIN syscall.Errno = 1322
+ ERROR_WRONG_PASSWORD syscall.Errno = 1323
+ ERROR_ILL_FORMED_PASSWORD syscall.Errno = 1324
+ ERROR_PASSWORD_RESTRICTION syscall.Errno = 1325
+ ERROR_LOGON_FAILURE syscall.Errno = 1326
+ ERROR_ACCOUNT_RESTRICTION syscall.Errno = 1327
+ ERROR_INVALID_LOGON_HOURS syscall.Errno = 1328
+ ERROR_INVALID_WORKSTATION syscall.Errno = 1329
+ ERROR_PASSWORD_EXPIRED syscall.Errno = 1330
+ ERROR_ACCOUNT_DISABLED syscall.Errno = 1331
+ ERROR_NONE_MAPPED syscall.Errno = 1332
+ ERROR_TOO_MANY_LUIDS_REQUESTED syscall.Errno = 1333
+ ERROR_LUIDS_EXHAUSTED syscall.Errno = 1334
+ ERROR_INVALID_SUB_AUTHORITY syscall.Errno = 1335
+ ERROR_INVALID_ACL syscall.Errno = 1336
+ ERROR_INVALID_SID syscall.Errno = 1337
+ ERROR_INVALID_SECURITY_DESCR syscall.Errno = 1338
+ ERROR_BAD_INHERITANCE_ACL syscall.Errno = 1340
+ ERROR_SERVER_DISABLED syscall.Errno = 1341
+ ERROR_SERVER_NOT_DISABLED syscall.Errno = 1342
+ ERROR_INVALID_ID_AUTHORITY syscall.Errno = 1343
+ ERROR_ALLOTTED_SPACE_EXCEEDED syscall.Errno = 1344
+ ERROR_INVALID_GROUP_ATTRIBUTES syscall.Errno = 1345
+ ERROR_BAD_IMPERSONATION_LEVEL syscall.Errno = 1346
+ ERROR_CANT_OPEN_ANONYMOUS syscall.Errno = 1347
+ ERROR_BAD_VALIDATION_CLASS syscall.Errno = 1348
+ ERROR_BAD_TOKEN_TYPE syscall.Errno = 1349
+ ERROR_NO_SECURITY_ON_OBJECT syscall.Errno = 1350
+ ERROR_CANT_ACCESS_DOMAIN_INFO syscall.Errno = 1351
+ ERROR_INVALID_SERVER_STATE syscall.Errno = 1352
+ ERROR_INVALID_DOMAIN_STATE syscall.Errno = 1353
+ ERROR_INVALID_DOMAIN_ROLE syscall.Errno = 1354
+ ERROR_NO_SUCH_DOMAIN syscall.Errno = 1355
+ ERROR_DOMAIN_EXISTS syscall.Errno = 1356
+ ERROR_DOMAIN_LIMIT_EXCEEDED syscall.Errno = 1357
+ ERROR_INTERNAL_DB_CORRUPTION syscall.Errno = 1358
+ ERROR_INTERNAL_ERROR syscall.Errno = 1359
+ ERROR_GENERIC_NOT_MAPPED syscall.Errno = 1360
+ ERROR_BAD_DESCRIPTOR_FORMAT syscall.Errno = 1361
+ ERROR_NOT_LOGON_PROCESS syscall.Errno = 1362
+ ERROR_LOGON_SESSION_EXISTS syscall.Errno = 1363
+ ERROR_NO_SUCH_PACKAGE syscall.Errno = 1364
+ ERROR_BAD_LOGON_SESSION_STATE syscall.Errno = 1365
+ ERROR_LOGON_SESSION_COLLISION syscall.Errno = 1366
+ ERROR_INVALID_LOGON_TYPE syscall.Errno = 1367
+ ERROR_CANNOT_IMPERSONATE syscall.Errno = 1368
+ ERROR_RXACT_INVALID_STATE syscall.Errno = 1369
+ ERROR_RXACT_COMMIT_FAILURE syscall.Errno = 1370
+ ERROR_SPECIAL_ACCOUNT syscall.Errno = 1371
+ ERROR_SPECIAL_GROUP syscall.Errno = 1372
+ ERROR_SPECIAL_USER syscall.Errno = 1373
+ ERROR_MEMBERS_PRIMARY_GROUP syscall.Errno = 1374
+ ERROR_TOKEN_ALREADY_IN_USE syscall.Errno = 1375
+ ERROR_NO_SUCH_ALIAS syscall.Errno = 1376
+ ERROR_MEMBER_NOT_IN_ALIAS syscall.Errno = 1377
+ ERROR_MEMBER_IN_ALIAS syscall.Errno = 1378
+ ERROR_ALIAS_EXISTS syscall.Errno = 1379
+ ERROR_LOGON_NOT_GRANTED syscall.Errno = 1380
+ ERROR_TOO_MANY_SECRETS syscall.Errno = 1381
+ ERROR_SECRET_TOO_LONG syscall.Errno = 1382
+ ERROR_INTERNAL_DB_ERROR syscall.Errno = 1383
+ ERROR_TOO_MANY_CONTEXT_IDS syscall.Errno = 1384
+ ERROR_LOGON_TYPE_NOT_GRANTED syscall.Errno = 1385
+ ERROR_NT_CROSS_ENCRYPTION_REQUIRED syscall.Errno = 1386
+ ERROR_NO_SUCH_MEMBER syscall.Errno = 1387
+ ERROR_INVALID_MEMBER syscall.Errno = 1388
+ ERROR_TOO_MANY_SIDS syscall.Errno = 1389
+ ERROR_LM_CROSS_ENCRYPTION_REQUIRED syscall.Errno = 1390
+ ERROR_NO_INHERITANCE syscall.Errno = 1391
+ ERROR_FILE_CORRUPT syscall.Errno = 1392
+ ERROR_DISK_CORRUPT syscall.Errno = 1393
+ ERROR_NO_USER_SESSION_KEY syscall.Errno = 1394
+ ERROR_LICENSE_QUOTA_EXCEEDED syscall.Errno = 1395
+ ERROR_WRONG_TARGET_NAME syscall.Errno = 1396
+ ERROR_MUTUAL_AUTH_FAILED syscall.Errno = 1397
+ ERROR_TIME_SKEW syscall.Errno = 1398
+ ERROR_CURRENT_DOMAIN_NOT_ALLOWED syscall.Errno = 1399
+ ERROR_INVALID_WINDOW_HANDLE syscall.Errno = 1400
+ ERROR_INVALID_MENU_HANDLE syscall.Errno = 1401
+ ERROR_INVALID_CURSOR_HANDLE syscall.Errno = 1402
+ ERROR_INVALID_ACCEL_HANDLE syscall.Errno = 1403
+ ERROR_INVALID_HOOK_HANDLE syscall.Errno = 1404
+ ERROR_INVALID_DWP_HANDLE syscall.Errno = 1405
+ ERROR_TLW_WITH_WSCHILD syscall.Errno = 1406
+ ERROR_CANNOT_FIND_WND_CLASS syscall.Errno = 1407
+ ERROR_WINDOW_OF_OTHER_THREAD syscall.Errno = 1408
+ ERROR_HOTKEY_ALREADY_REGISTERED syscall.Errno = 1409
+ ERROR_CLASS_ALREADY_EXISTS syscall.Errno = 1410
+ ERROR_CLASS_DOES_NOT_EXIST syscall.Errno = 1411
+ ERROR_CLASS_HAS_WINDOWS syscall.Errno = 1412
+ ERROR_INVALID_INDEX syscall.Errno = 1413
+ ERROR_INVALID_ICON_HANDLE syscall.Errno = 1414
+ ERROR_PRIVATE_DIALOG_INDEX syscall.Errno = 1415
+ ERROR_LISTBOX_ID_NOT_FOUND syscall.Errno = 1416
+ ERROR_NO_WILDCARD_CHARACTERS syscall.Errno = 1417
+ ERROR_CLIPBOARD_NOT_OPEN syscall.Errno = 1418
+ ERROR_HOTKEY_NOT_REGISTERED syscall.Errno = 1419
+ ERROR_WINDOW_NOT_DIALOG syscall.Errno = 1420
+ ERROR_CONTROL_ID_NOT_FOUND syscall.Errno = 1421
+ ERROR_INVALID_COMBOBOX_MESSAGE syscall.Errno = 1422
+ ERROR_WINDOW_NOT_COMBOBOX syscall.Errno = 1423
+ ERROR_INVALID_EDIT_HEIGHT syscall.Errno = 1424
+ ERROR_DC_NOT_FOUND syscall.Errno = 1425
+ ERROR_INVALID_HOOK_FILTER syscall.Errno = 1426
+ ERROR_INVALID_FILTER_PROC syscall.Errno = 1427
+ ERROR_HOOK_NEEDS_HMOD syscall.Errno = 1428
+ ERROR_GLOBAL_ONLY_HOOK syscall.Errno = 1429
+ ERROR_JOURNAL_HOOK_SET syscall.Errno = 1430
+ ERROR_HOOK_NOT_INSTALLED syscall.Errno = 1431
+ ERROR_INVALID_LB_MESSAGE syscall.Errno = 1432
+ ERROR_SETCOUNT_ON_BAD_LB syscall.Errno = 1433
+ ERROR_LB_WITHOUT_TABSTOPS syscall.Errno = 1434
+ ERROR_DESTROY_OBJECT_OF_OTHER_THREAD syscall.Errno = 1435
+ ERROR_CHILD_WINDOW_MENU syscall.Errno = 1436
+ ERROR_NO_SYSTEM_MENU syscall.Errno = 1437
+ ERROR_INVALID_MSGBOX_STYLE syscall.Errno = 1438
+ ERROR_INVALID_SPI_VALUE syscall.Errno = 1439
+ ERROR_SCREEN_ALREADY_LOCKED syscall.Errno = 1440
+ ERROR_HWNDS_HAVE_DIFF_PARENT syscall.Errno = 1441
+ ERROR_NOT_CHILD_WINDOW syscall.Errno = 1442
+ ERROR_INVALID_GW_COMMAND syscall.Errno = 1443
+ ERROR_INVALID_THREAD_ID syscall.Errno = 1444
+ ERROR_NON_MDICHILD_WINDOW syscall.Errno = 1445
+ ERROR_POPUP_ALREADY_ACTIVE syscall.Errno = 1446
+ ERROR_NO_SCROLLBARS syscall.Errno = 1447
+ ERROR_INVALID_SCROLLBAR_RANGE syscall.Errno = 1448
+ ERROR_INVALID_SHOWWIN_COMMAND syscall.Errno = 1449
+ ERROR_NO_SYSTEM_RESOURCES syscall.Errno = 1450
+ ERROR_NONPAGED_SYSTEM_RESOURCES syscall.Errno = 1451
+ ERROR_PAGED_SYSTEM_RESOURCES syscall.Errno = 1452
+ ERROR_WORKING_SET_QUOTA syscall.Errno = 1453
+ ERROR_PAGEFILE_QUOTA syscall.Errno = 1454
+ ERROR_COMMITMENT_LIMIT syscall.Errno = 1455
+ ERROR_MENU_ITEM_NOT_FOUND syscall.Errno = 1456
+ ERROR_INVALID_KEYBOARD_HANDLE syscall.Errno = 1457
+ ERROR_HOOK_TYPE_NOT_ALLOWED syscall.Errno = 1458
+ ERROR_REQUIRES_INTERACTIVE_WINDOWSTATION syscall.Errno = 1459
+ ERROR_TIMEOUT syscall.Errno = 1460
+ ERROR_INVALID_MONITOR_HANDLE syscall.Errno = 1461
+ ERROR_INCORRECT_SIZE syscall.Errno = 1462
+ ERROR_SYMLINK_CLASS_DISABLED syscall.Errno = 1463
+ ERROR_SYMLINK_NOT_SUPPORTED syscall.Errno = 1464
+ ERROR_XML_PARSE_ERROR syscall.Errno = 1465
+ ERROR_XMLDSIG_ERROR syscall.Errno = 1466
+ ERROR_RESTART_APPLICATION syscall.Errno = 1467
+ ERROR_WRONG_COMPARTMENT syscall.Errno = 1468
+ ERROR_AUTHIP_FAILURE syscall.Errno = 1469
+ ERROR_NO_NVRAM_RESOURCES syscall.Errno = 1470
+ ERROR_NOT_GUI_PROCESS syscall.Errno = 1471
+ ERROR_EVENTLOG_FILE_CORRUPT syscall.Errno = 1500
+ ERROR_EVENTLOG_CANT_START syscall.Errno = 1501
+ ERROR_LOG_FILE_FULL syscall.Errno = 1502
+ ERROR_EVENTLOG_FILE_CHANGED syscall.Errno = 1503
+ ERROR_CONTAINER_ASSIGNED syscall.Errno = 1504
+ ERROR_JOB_NO_CONTAINER syscall.Errno = 1505
+ ERROR_INVALID_TASK_NAME syscall.Errno = 1550
+ ERROR_INVALID_TASK_INDEX syscall.Errno = 1551
+ ERROR_THREAD_ALREADY_IN_TASK syscall.Errno = 1552
+ ERROR_INSTALL_SERVICE_FAILURE syscall.Errno = 1601
+ ERROR_INSTALL_USEREXIT syscall.Errno = 1602
+ ERROR_INSTALL_FAILURE syscall.Errno = 1603
+ ERROR_INSTALL_SUSPEND syscall.Errno = 1604
+ ERROR_UNKNOWN_PRODUCT syscall.Errno = 1605
+ ERROR_UNKNOWN_FEATURE syscall.Errno = 1606
+ ERROR_UNKNOWN_COMPONENT syscall.Errno = 1607
+ ERROR_UNKNOWN_PROPERTY syscall.Errno = 1608
+ ERROR_INVALID_HANDLE_STATE syscall.Errno = 1609
+ ERROR_BAD_CONFIGURATION syscall.Errno = 1610
+ ERROR_INDEX_ABSENT syscall.Errno = 1611
+ ERROR_INSTALL_SOURCE_ABSENT syscall.Errno = 1612
+ ERROR_INSTALL_PACKAGE_VERSION syscall.Errno = 1613
+ ERROR_PRODUCT_UNINSTALLED syscall.Errno = 1614
+ ERROR_BAD_QUERY_SYNTAX syscall.Errno = 1615
+ ERROR_INVALID_FIELD syscall.Errno = 1616
+ ERROR_DEVICE_REMOVED syscall.Errno = 1617
+ ERROR_INSTALL_ALREADY_RUNNING syscall.Errno = 1618
+ ERROR_INSTALL_PACKAGE_OPEN_FAILED syscall.Errno = 1619
+ ERROR_INSTALL_PACKAGE_INVALID syscall.Errno = 1620
+ ERROR_INSTALL_UI_FAILURE syscall.Errno = 1621
+ ERROR_INSTALL_LOG_FAILURE syscall.Errno = 1622
+ ERROR_INSTALL_LANGUAGE_UNSUPPORTED syscall.Errno = 1623
+ ERROR_INSTALL_TRANSFORM_FAILURE syscall.Errno = 1624
+ ERROR_INSTALL_PACKAGE_REJECTED syscall.Errno = 1625
+ ERROR_FUNCTION_NOT_CALLED syscall.Errno = 1626
+ ERROR_FUNCTION_FAILED syscall.Errno = 1627
+ ERROR_INVALID_TABLE syscall.Errno = 1628
+ ERROR_DATATYPE_MISMATCH syscall.Errno = 1629
+ ERROR_UNSUPPORTED_TYPE syscall.Errno = 1630
+ ERROR_CREATE_FAILED syscall.Errno = 1631
+ ERROR_INSTALL_TEMP_UNWRITABLE syscall.Errno = 1632
+ ERROR_INSTALL_PLATFORM_UNSUPPORTED syscall.Errno = 1633
+ ERROR_INSTALL_NOTUSED syscall.Errno = 1634
+ ERROR_PATCH_PACKAGE_OPEN_FAILED syscall.Errno = 1635
+ ERROR_PATCH_PACKAGE_INVALID syscall.Errno = 1636
+ ERROR_PATCH_PACKAGE_UNSUPPORTED syscall.Errno = 1637
+ ERROR_PRODUCT_VERSION syscall.Errno = 1638
+ ERROR_INVALID_COMMAND_LINE syscall.Errno = 1639
+ ERROR_INSTALL_REMOTE_DISALLOWED syscall.Errno = 1640
+ ERROR_SUCCESS_REBOOT_INITIATED syscall.Errno = 1641
+ ERROR_PATCH_TARGET_NOT_FOUND syscall.Errno = 1642
+ ERROR_PATCH_PACKAGE_REJECTED syscall.Errno = 1643
+ ERROR_INSTALL_TRANSFORM_REJECTED syscall.Errno = 1644
+ ERROR_INSTALL_REMOTE_PROHIBITED syscall.Errno = 1645
+ ERROR_PATCH_REMOVAL_UNSUPPORTED syscall.Errno = 1646
+ ERROR_UNKNOWN_PATCH syscall.Errno = 1647
+ ERROR_PATCH_NO_SEQUENCE syscall.Errno = 1648
+ ERROR_PATCH_REMOVAL_DISALLOWED syscall.Errno = 1649
+ ERROR_INVALID_PATCH_XML syscall.Errno = 1650
+ ERROR_PATCH_MANAGED_ADVERTISED_PRODUCT syscall.Errno = 1651
+ ERROR_INSTALL_SERVICE_SAFEBOOT syscall.Errno = 1652
+ ERROR_FAIL_FAST_EXCEPTION syscall.Errno = 1653
+ ERROR_INSTALL_REJECTED syscall.Errno = 1654
+ ERROR_DYNAMIC_CODE_BLOCKED syscall.Errno = 1655
+ ERROR_NOT_SAME_OBJECT syscall.Errno = 1656
+ ERROR_STRICT_CFG_VIOLATION syscall.Errno = 1657
+ ERROR_SET_CONTEXT_DENIED syscall.Errno = 1660
+ ERROR_CROSS_PARTITION_VIOLATION syscall.Errno = 1661
+ RPC_S_INVALID_STRING_BINDING syscall.Errno = 1700
+ RPC_S_WRONG_KIND_OF_BINDING syscall.Errno = 1701
+ RPC_S_INVALID_BINDING syscall.Errno = 1702
+ RPC_S_PROTSEQ_NOT_SUPPORTED syscall.Errno = 1703
+ RPC_S_INVALID_RPC_PROTSEQ syscall.Errno = 1704
+ RPC_S_INVALID_STRING_UUID syscall.Errno = 1705
+ RPC_S_INVALID_ENDPOINT_FORMAT syscall.Errno = 1706
+ RPC_S_INVALID_NET_ADDR syscall.Errno = 1707
+ RPC_S_NO_ENDPOINT_FOUND syscall.Errno = 1708
+ RPC_S_INVALID_TIMEOUT syscall.Errno = 1709
+ RPC_S_OBJECT_NOT_FOUND syscall.Errno = 1710
+ RPC_S_ALREADY_REGISTERED syscall.Errno = 1711
+ RPC_S_TYPE_ALREADY_REGISTERED syscall.Errno = 1712
+ RPC_S_ALREADY_LISTENING syscall.Errno = 1713
+ RPC_S_NO_PROTSEQS_REGISTERED syscall.Errno = 1714
+ RPC_S_NOT_LISTENING syscall.Errno = 1715
+ RPC_S_UNKNOWN_MGR_TYPE syscall.Errno = 1716
+ RPC_S_UNKNOWN_IF syscall.Errno = 1717
+ RPC_S_NO_BINDINGS syscall.Errno = 1718
+ RPC_S_NO_PROTSEQS syscall.Errno = 1719
+ RPC_S_CANT_CREATE_ENDPOINT syscall.Errno = 1720
+ RPC_S_OUT_OF_RESOURCES syscall.Errno = 1721
+ RPC_S_SERVER_UNAVAILABLE syscall.Errno = 1722
+ RPC_S_SERVER_TOO_BUSY syscall.Errno = 1723
+ RPC_S_INVALID_NETWORK_OPTIONS syscall.Errno = 1724
+ RPC_S_NO_CALL_ACTIVE syscall.Errno = 1725
+ RPC_S_CALL_FAILED syscall.Errno = 1726
+ RPC_S_CALL_FAILED_DNE syscall.Errno = 1727
+ RPC_S_PROTOCOL_ERROR syscall.Errno = 1728
+ RPC_S_PROXY_ACCESS_DENIED syscall.Errno = 1729
+ RPC_S_UNSUPPORTED_TRANS_SYN syscall.Errno = 1730
+ RPC_S_UNSUPPORTED_TYPE syscall.Errno = 1732
+ RPC_S_INVALID_TAG syscall.Errno = 1733
+ RPC_S_INVALID_BOUND syscall.Errno = 1734
+ RPC_S_NO_ENTRY_NAME syscall.Errno = 1735
+ RPC_S_INVALID_NAME_SYNTAX syscall.Errno = 1736
+ RPC_S_UNSUPPORTED_NAME_SYNTAX syscall.Errno = 1737
+ RPC_S_UUID_NO_ADDRESS syscall.Errno = 1739
+ RPC_S_DUPLICATE_ENDPOINT syscall.Errno = 1740
+ RPC_S_UNKNOWN_AUTHN_TYPE syscall.Errno = 1741
+ RPC_S_MAX_CALLS_TOO_SMALL syscall.Errno = 1742
+ RPC_S_STRING_TOO_LONG syscall.Errno = 1743
+ RPC_S_PROTSEQ_NOT_FOUND syscall.Errno = 1744
+ RPC_S_PROCNUM_OUT_OF_RANGE syscall.Errno = 1745
+ RPC_S_BINDING_HAS_NO_AUTH syscall.Errno = 1746
+ RPC_S_UNKNOWN_AUTHN_SERVICE syscall.Errno = 1747
+ RPC_S_UNKNOWN_AUTHN_LEVEL syscall.Errno = 1748
+ RPC_S_INVALID_AUTH_IDENTITY syscall.Errno = 1749
+ RPC_S_UNKNOWN_AUTHZ_SERVICE syscall.Errno = 1750
+ EPT_S_INVALID_ENTRY syscall.Errno = 1751
+ EPT_S_CANT_PERFORM_OP syscall.Errno = 1752
+ EPT_S_NOT_REGISTERED syscall.Errno = 1753
+ RPC_S_NOTHING_TO_EXPORT syscall.Errno = 1754
+ RPC_S_INCOMPLETE_NAME syscall.Errno = 1755
+ RPC_S_INVALID_VERS_OPTION syscall.Errno = 1756
+ RPC_S_NO_MORE_MEMBERS syscall.Errno = 1757
+ RPC_S_NOT_ALL_OBJS_UNEXPORTED syscall.Errno = 1758
+ RPC_S_INTERFACE_NOT_FOUND syscall.Errno = 1759
+ RPC_S_ENTRY_ALREADY_EXISTS syscall.Errno = 1760
+ RPC_S_ENTRY_NOT_FOUND syscall.Errno = 1761
+ RPC_S_NAME_SERVICE_UNAVAILABLE syscall.Errno = 1762
+ RPC_S_INVALID_NAF_ID syscall.Errno = 1763
+ RPC_S_CANNOT_SUPPORT syscall.Errno = 1764
+ RPC_S_NO_CONTEXT_AVAILABLE syscall.Errno = 1765
+ RPC_S_INTERNAL_ERROR syscall.Errno = 1766
+ RPC_S_ZERO_DIVIDE syscall.Errno = 1767
+ RPC_S_ADDRESS_ERROR syscall.Errno = 1768
+ RPC_S_FP_DIV_ZERO syscall.Errno = 1769
+ RPC_S_FP_UNDERFLOW syscall.Errno = 1770
+ RPC_S_FP_OVERFLOW syscall.Errno = 1771
+ RPC_X_NO_MORE_ENTRIES syscall.Errno = 1772
+ RPC_X_SS_CHAR_TRANS_OPEN_FAIL syscall.Errno = 1773
+ RPC_X_SS_CHAR_TRANS_SHORT_FILE syscall.Errno = 1774
+ RPC_X_SS_IN_NULL_CONTEXT syscall.Errno = 1775
+ RPC_X_SS_CONTEXT_DAMAGED syscall.Errno = 1777
+ RPC_X_SS_HANDLES_MISMATCH syscall.Errno = 1778
+ RPC_X_SS_CANNOT_GET_CALL_HANDLE syscall.Errno = 1779
+ RPC_X_NULL_REF_POINTER syscall.Errno = 1780
+ RPC_X_ENUM_VALUE_OUT_OF_RANGE syscall.Errno = 1781
+ RPC_X_BYTE_COUNT_TOO_SMALL syscall.Errno = 1782
+ RPC_X_BAD_STUB_DATA syscall.Errno = 1783
+ ERROR_INVALID_USER_BUFFER syscall.Errno = 1784
+ ERROR_UNRECOGNIZED_MEDIA syscall.Errno = 1785
+ ERROR_NO_TRUST_LSA_SECRET syscall.Errno = 1786
+ ERROR_NO_TRUST_SAM_ACCOUNT syscall.Errno = 1787
+ ERROR_TRUSTED_DOMAIN_FAILURE syscall.Errno = 1788
+ ERROR_TRUSTED_RELATIONSHIP_FAILURE syscall.Errno = 1789
+ ERROR_TRUST_FAILURE syscall.Errno = 1790
+ RPC_S_CALL_IN_PROGRESS syscall.Errno = 1791
+ ERROR_NETLOGON_NOT_STARTED syscall.Errno = 1792
+ ERROR_ACCOUNT_EXPIRED syscall.Errno = 1793
+ ERROR_REDIRECTOR_HAS_OPEN_HANDLES syscall.Errno = 1794
+ ERROR_PRINTER_DRIVER_ALREADY_INSTALLED syscall.Errno = 1795
+ ERROR_UNKNOWN_PORT syscall.Errno = 1796
+ ERROR_UNKNOWN_PRINTER_DRIVER syscall.Errno = 1797
+ ERROR_UNKNOWN_PRINTPROCESSOR syscall.Errno = 1798
+ ERROR_INVALID_SEPARATOR_FILE syscall.Errno = 1799
+ ERROR_INVALID_PRIORITY syscall.Errno = 1800
+ ERROR_INVALID_PRINTER_NAME syscall.Errno = 1801
+ ERROR_PRINTER_ALREADY_EXISTS syscall.Errno = 1802
+ ERROR_INVALID_PRINTER_COMMAND syscall.Errno = 1803
+ ERROR_INVALID_DATATYPE syscall.Errno = 1804
+ ERROR_INVALID_ENVIRONMENT syscall.Errno = 1805
+ RPC_S_NO_MORE_BINDINGS syscall.Errno = 1806
+ ERROR_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT syscall.Errno = 1807
+ ERROR_NOLOGON_WORKSTATION_TRUST_ACCOUNT syscall.Errno = 1808
+ ERROR_NOLOGON_SERVER_TRUST_ACCOUNT syscall.Errno = 1809
+ ERROR_DOMAIN_TRUST_INCONSISTENT syscall.Errno = 1810
+ ERROR_SERVER_HAS_OPEN_HANDLES syscall.Errno = 1811
+ ERROR_RESOURCE_DATA_NOT_FOUND syscall.Errno = 1812
+ ERROR_RESOURCE_TYPE_NOT_FOUND syscall.Errno = 1813
+ ERROR_RESOURCE_NAME_NOT_FOUND syscall.Errno = 1814
+ ERROR_RESOURCE_LANG_NOT_FOUND syscall.Errno = 1815
+ ERROR_NOT_ENOUGH_QUOTA syscall.Errno = 1816
+ RPC_S_NO_INTERFACES syscall.Errno = 1817
+ RPC_S_CALL_CANCELLED syscall.Errno = 1818
+ RPC_S_BINDING_INCOMPLETE syscall.Errno = 1819
+ RPC_S_COMM_FAILURE syscall.Errno = 1820
+ RPC_S_UNSUPPORTED_AUTHN_LEVEL syscall.Errno = 1821
+ RPC_S_NO_PRINC_NAME syscall.Errno = 1822
+ RPC_S_NOT_RPC_ERROR syscall.Errno = 1823
+ RPC_S_UUID_LOCAL_ONLY syscall.Errno = 1824
+ RPC_S_SEC_PKG_ERROR syscall.Errno = 1825
+ RPC_S_NOT_CANCELLED syscall.Errno = 1826
+ RPC_X_INVALID_ES_ACTION syscall.Errno = 1827
+ RPC_X_WRONG_ES_VERSION syscall.Errno = 1828
+ RPC_X_WRONG_STUB_VERSION syscall.Errno = 1829
+ RPC_X_INVALID_PIPE_OBJECT syscall.Errno = 1830
+ RPC_X_WRONG_PIPE_ORDER syscall.Errno = 1831
+ RPC_X_WRONG_PIPE_VERSION syscall.Errno = 1832
+ RPC_S_COOKIE_AUTH_FAILED syscall.Errno = 1833
+ RPC_S_DO_NOT_DISTURB syscall.Errno = 1834
+ RPC_S_SYSTEM_HANDLE_COUNT_EXCEEDED syscall.Errno = 1835
+ RPC_S_SYSTEM_HANDLE_TYPE_MISMATCH syscall.Errno = 1836
+ RPC_S_GROUP_MEMBER_NOT_FOUND syscall.Errno = 1898
+ EPT_S_CANT_CREATE syscall.Errno = 1899
+ RPC_S_INVALID_OBJECT syscall.Errno = 1900
+ ERROR_INVALID_TIME syscall.Errno = 1901
+ ERROR_INVALID_FORM_NAME syscall.Errno = 1902
+ ERROR_INVALID_FORM_SIZE syscall.Errno = 1903
+ ERROR_ALREADY_WAITING syscall.Errno = 1904
+ ERROR_PRINTER_DELETED syscall.Errno = 1905
+ ERROR_INVALID_PRINTER_STATE syscall.Errno = 1906
+ ERROR_PASSWORD_MUST_CHANGE syscall.Errno = 1907
+ ERROR_DOMAIN_CONTROLLER_NOT_FOUND syscall.Errno = 1908
+ ERROR_ACCOUNT_LOCKED_OUT syscall.Errno = 1909
+ OR_INVALID_OXID syscall.Errno = 1910
+ OR_INVALID_OID syscall.Errno = 1911
+ OR_INVALID_SET syscall.Errno = 1912
+ RPC_S_SEND_INCOMPLETE syscall.Errno = 1913
+ RPC_S_INVALID_ASYNC_HANDLE syscall.Errno = 1914
+ RPC_S_INVALID_ASYNC_CALL syscall.Errno = 1915
+ RPC_X_PIPE_CLOSED syscall.Errno = 1916
+ RPC_X_PIPE_DISCIPLINE_ERROR syscall.Errno = 1917
+ RPC_X_PIPE_EMPTY syscall.Errno = 1918
+ ERROR_NO_SITENAME syscall.Errno = 1919
+ ERROR_CANT_ACCESS_FILE syscall.Errno = 1920
+ ERROR_CANT_RESOLVE_FILENAME syscall.Errno = 1921
+ RPC_S_ENTRY_TYPE_MISMATCH syscall.Errno = 1922
+ RPC_S_NOT_ALL_OBJS_EXPORTED syscall.Errno = 1923
+ RPC_S_INTERFACE_NOT_EXPORTED syscall.Errno = 1924
+ RPC_S_PROFILE_NOT_ADDED syscall.Errno = 1925
+ RPC_S_PRF_ELT_NOT_ADDED syscall.Errno = 1926
+ RPC_S_PRF_ELT_NOT_REMOVED syscall.Errno = 1927
+ RPC_S_GRP_ELT_NOT_ADDED syscall.Errno = 1928
+ RPC_S_GRP_ELT_NOT_REMOVED syscall.Errno = 1929
+ ERROR_KM_DRIVER_BLOCKED syscall.Errno = 1930
+ ERROR_CONTEXT_EXPIRED syscall.Errno = 1931
+ ERROR_PER_USER_TRUST_QUOTA_EXCEEDED syscall.Errno = 1932
+ ERROR_ALL_USER_TRUST_QUOTA_EXCEEDED syscall.Errno = 1933
+ ERROR_USER_DELETE_TRUST_QUOTA_EXCEEDED syscall.Errno = 1934
+ ERROR_AUTHENTICATION_FIREWALL_FAILED syscall.Errno = 1935
+ ERROR_REMOTE_PRINT_CONNECTIONS_BLOCKED syscall.Errno = 1936
+ ERROR_NTLM_BLOCKED syscall.Errno = 1937
+ ERROR_PASSWORD_CHANGE_REQUIRED syscall.Errno = 1938
+ ERROR_LOST_MODE_LOGON_RESTRICTION syscall.Errno = 1939
+ ERROR_INVALID_PIXEL_FORMAT syscall.Errno = 2000
+ ERROR_BAD_DRIVER syscall.Errno = 2001
+ ERROR_INVALID_WINDOW_STYLE syscall.Errno = 2002
+ ERROR_METAFILE_NOT_SUPPORTED syscall.Errno = 2003
+ ERROR_TRANSFORM_NOT_SUPPORTED syscall.Errno = 2004
+ ERROR_CLIPPING_NOT_SUPPORTED syscall.Errno = 2005
+ ERROR_INVALID_CMM syscall.Errno = 2010
+ ERROR_INVALID_PROFILE syscall.Errno = 2011
+ ERROR_TAG_NOT_FOUND syscall.Errno = 2012
+ ERROR_TAG_NOT_PRESENT syscall.Errno = 2013
+ ERROR_DUPLICATE_TAG syscall.Errno = 2014
+ ERROR_PROFILE_NOT_ASSOCIATED_WITH_DEVICE syscall.Errno = 2015
+ ERROR_PROFILE_NOT_FOUND syscall.Errno = 2016
+ ERROR_INVALID_COLORSPACE syscall.Errno = 2017
+ ERROR_ICM_NOT_ENABLED syscall.Errno = 2018
+ ERROR_DELETING_ICM_XFORM syscall.Errno = 2019
+ ERROR_INVALID_TRANSFORM syscall.Errno = 2020
+ ERROR_COLORSPACE_MISMATCH syscall.Errno = 2021
+ ERROR_INVALID_COLORINDEX syscall.Errno = 2022
+ ERROR_PROFILE_DOES_NOT_MATCH_DEVICE syscall.Errno = 2023
+ ERROR_CONNECTED_OTHER_PASSWORD syscall.Errno = 2108
+ ERROR_CONNECTED_OTHER_PASSWORD_DEFAULT syscall.Errno = 2109
+ ERROR_BAD_USERNAME syscall.Errno = 2202
+ ERROR_NOT_CONNECTED syscall.Errno = 2250
+ ERROR_OPEN_FILES syscall.Errno = 2401
+ ERROR_ACTIVE_CONNECTIONS syscall.Errno = 2402
+ ERROR_DEVICE_IN_USE syscall.Errno = 2404
+ ERROR_UNKNOWN_PRINT_MONITOR syscall.Errno = 3000
+ ERROR_PRINTER_DRIVER_IN_USE syscall.Errno = 3001
+ ERROR_SPOOL_FILE_NOT_FOUND syscall.Errno = 3002
+ ERROR_SPL_NO_STARTDOC syscall.Errno = 3003
+ ERROR_SPL_NO_ADDJOB syscall.Errno = 3004
+ ERROR_PRINT_PROCESSOR_ALREADY_INSTALLED syscall.Errno = 3005
+ ERROR_PRINT_MONITOR_ALREADY_INSTALLED syscall.Errno = 3006
+ ERROR_INVALID_PRINT_MONITOR syscall.Errno = 3007
+ ERROR_PRINT_MONITOR_IN_USE syscall.Errno = 3008
+ ERROR_PRINTER_HAS_JOBS_QUEUED syscall.Errno = 3009
+ ERROR_SUCCESS_REBOOT_REQUIRED syscall.Errno = 3010
+ ERROR_SUCCESS_RESTART_REQUIRED syscall.Errno = 3011
+ ERROR_PRINTER_NOT_FOUND syscall.Errno = 3012
+ ERROR_PRINTER_DRIVER_WARNED syscall.Errno = 3013
+ ERROR_PRINTER_DRIVER_BLOCKED syscall.Errno = 3014
+ ERROR_PRINTER_DRIVER_PACKAGE_IN_USE syscall.Errno = 3015
+ ERROR_CORE_DRIVER_PACKAGE_NOT_FOUND syscall.Errno = 3016
+ ERROR_FAIL_REBOOT_REQUIRED syscall.Errno = 3017
+ ERROR_FAIL_REBOOT_INITIATED syscall.Errno = 3018
+ ERROR_PRINTER_DRIVER_DOWNLOAD_NEEDED syscall.Errno = 3019
+ ERROR_PRINT_JOB_RESTART_REQUIRED syscall.Errno = 3020
+ ERROR_INVALID_PRINTER_DRIVER_MANIFEST syscall.Errno = 3021
+ ERROR_PRINTER_NOT_SHAREABLE syscall.Errno = 3022
+ ERROR_REQUEST_PAUSED syscall.Errno = 3050
+ ERROR_APPEXEC_CONDITION_NOT_SATISFIED syscall.Errno = 3060
+ ERROR_APPEXEC_HANDLE_INVALIDATED syscall.Errno = 3061
+ ERROR_APPEXEC_INVALID_HOST_GENERATION syscall.Errno = 3062
+ ERROR_APPEXEC_UNEXPECTED_PROCESS_REGISTRATION syscall.Errno = 3063
+ ERROR_APPEXEC_INVALID_HOST_STATE syscall.Errno = 3064
+ ERROR_APPEXEC_NO_DONOR syscall.Errno = 3065
+ ERROR_APPEXEC_HOST_ID_MISMATCH syscall.Errno = 3066
+ ERROR_APPEXEC_UNKNOWN_USER syscall.Errno = 3067
+ ERROR_IO_REISSUE_AS_CACHED syscall.Errno = 3950
+ ERROR_WINS_INTERNAL syscall.Errno = 4000
+ ERROR_CAN_NOT_DEL_LOCAL_WINS syscall.Errno = 4001
+ ERROR_STATIC_INIT syscall.Errno = 4002
+ ERROR_INC_BACKUP syscall.Errno = 4003
+ ERROR_FULL_BACKUP syscall.Errno = 4004
+ ERROR_REC_NON_EXISTENT syscall.Errno = 4005
+ ERROR_RPL_NOT_ALLOWED syscall.Errno = 4006
+ PEERDIST_ERROR_CONTENTINFO_VERSION_UNSUPPORTED syscall.Errno = 4050
+ PEERDIST_ERROR_CANNOT_PARSE_CONTENTINFO syscall.Errno = 4051
+ PEERDIST_ERROR_MISSING_DATA syscall.Errno = 4052
+ PEERDIST_ERROR_NO_MORE syscall.Errno = 4053
+ PEERDIST_ERROR_NOT_INITIALIZED syscall.Errno = 4054
+ PEERDIST_ERROR_ALREADY_INITIALIZED syscall.Errno = 4055
+ PEERDIST_ERROR_SHUTDOWN_IN_PROGRESS syscall.Errno = 4056
+ PEERDIST_ERROR_INVALIDATED syscall.Errno = 4057
+ PEERDIST_ERROR_ALREADY_EXISTS syscall.Errno = 4058
+ PEERDIST_ERROR_OPERATION_NOTFOUND syscall.Errno = 4059
+ PEERDIST_ERROR_ALREADY_COMPLETED syscall.Errno = 4060
+ PEERDIST_ERROR_OUT_OF_BOUNDS syscall.Errno = 4061
+ PEERDIST_ERROR_VERSION_UNSUPPORTED syscall.Errno = 4062
+ PEERDIST_ERROR_INVALID_CONFIGURATION syscall.Errno = 4063
+ PEERDIST_ERROR_NOT_LICENSED syscall.Errno = 4064
+ PEERDIST_ERROR_SERVICE_UNAVAILABLE syscall.Errno = 4065
+ PEERDIST_ERROR_TRUST_FAILURE syscall.Errno = 4066
+ ERROR_DHCP_ADDRESS_CONFLICT syscall.Errno = 4100
+ ERROR_WMI_GUID_NOT_FOUND syscall.Errno = 4200
+ ERROR_WMI_INSTANCE_NOT_FOUND syscall.Errno = 4201
+ ERROR_WMI_ITEMID_NOT_FOUND syscall.Errno = 4202
+ ERROR_WMI_TRY_AGAIN syscall.Errno = 4203
+ ERROR_WMI_DP_NOT_FOUND syscall.Errno = 4204
+ ERROR_WMI_UNRESOLVED_INSTANCE_REF syscall.Errno = 4205
+ ERROR_WMI_ALREADY_ENABLED syscall.Errno = 4206
+ ERROR_WMI_GUID_DISCONNECTED syscall.Errno = 4207
+ ERROR_WMI_SERVER_UNAVAILABLE syscall.Errno = 4208
+ ERROR_WMI_DP_FAILED syscall.Errno = 4209
+ ERROR_WMI_INVALID_MOF syscall.Errno = 4210
+ ERROR_WMI_INVALID_REGINFO syscall.Errno = 4211
+ ERROR_WMI_ALREADY_DISABLED syscall.Errno = 4212
+ ERROR_WMI_READ_ONLY syscall.Errno = 4213
+ ERROR_WMI_SET_FAILURE syscall.Errno = 4214
+ ERROR_NOT_APPCONTAINER syscall.Errno = 4250
+ ERROR_APPCONTAINER_REQUIRED syscall.Errno = 4251
+ ERROR_NOT_SUPPORTED_IN_APPCONTAINER syscall.Errno = 4252
+ ERROR_INVALID_PACKAGE_SID_LENGTH syscall.Errno = 4253
+ ERROR_INVALID_MEDIA syscall.Errno = 4300
+ ERROR_INVALID_LIBRARY syscall.Errno = 4301
+ ERROR_INVALID_MEDIA_POOL syscall.Errno = 4302
+ ERROR_DRIVE_MEDIA_MISMATCH syscall.Errno = 4303
+ ERROR_MEDIA_OFFLINE syscall.Errno = 4304
+ ERROR_LIBRARY_OFFLINE syscall.Errno = 4305
+ ERROR_EMPTY syscall.Errno = 4306
+ ERROR_NOT_EMPTY syscall.Errno = 4307
+ ERROR_MEDIA_UNAVAILABLE syscall.Errno = 4308
+ ERROR_RESOURCE_DISABLED syscall.Errno = 4309
+ ERROR_INVALID_CLEANER syscall.Errno = 4310
+ ERROR_UNABLE_TO_CLEAN syscall.Errno = 4311
+ ERROR_OBJECT_NOT_FOUND syscall.Errno = 4312
+ ERROR_DATABASE_FAILURE syscall.Errno = 4313
+ ERROR_DATABASE_FULL syscall.Errno = 4314
+ ERROR_MEDIA_INCOMPATIBLE syscall.Errno = 4315
+ ERROR_RESOURCE_NOT_PRESENT syscall.Errno = 4316
+ ERROR_INVALID_OPERATION syscall.Errno = 4317
+ ERROR_MEDIA_NOT_AVAILABLE syscall.Errno = 4318
+ ERROR_DEVICE_NOT_AVAILABLE syscall.Errno = 4319
+ ERROR_REQUEST_REFUSED syscall.Errno = 4320
+ ERROR_INVALID_DRIVE_OBJECT syscall.Errno = 4321
+ ERROR_LIBRARY_FULL syscall.Errno = 4322
+ ERROR_MEDIUM_NOT_ACCESSIBLE syscall.Errno = 4323
+ ERROR_UNABLE_TO_LOAD_MEDIUM syscall.Errno = 4324
+ ERROR_UNABLE_TO_INVENTORY_DRIVE syscall.Errno = 4325
+ ERROR_UNABLE_TO_INVENTORY_SLOT syscall.Errno = 4326
+ ERROR_UNABLE_TO_INVENTORY_TRANSPORT syscall.Errno = 4327
+ ERROR_TRANSPORT_FULL syscall.Errno = 4328
+ ERROR_CONTROLLING_IEPORT syscall.Errno = 4329
+ ERROR_UNABLE_TO_EJECT_MOUNTED_MEDIA syscall.Errno = 4330
+ ERROR_CLEANER_SLOT_SET syscall.Errno = 4331
+ ERROR_CLEANER_SLOT_NOT_SET syscall.Errno = 4332
+ ERROR_CLEANER_CARTRIDGE_SPENT syscall.Errno = 4333
+ ERROR_UNEXPECTED_OMID syscall.Errno = 4334
+ ERROR_CANT_DELETE_LAST_ITEM syscall.Errno = 4335
+ ERROR_MESSAGE_EXCEEDS_MAX_SIZE syscall.Errno = 4336
+ ERROR_VOLUME_CONTAINS_SYS_FILES syscall.Errno = 4337
+ ERROR_INDIGENOUS_TYPE syscall.Errno = 4338
+ ERROR_NO_SUPPORTING_DRIVES syscall.Errno = 4339
+ ERROR_CLEANER_CARTRIDGE_INSTALLED syscall.Errno = 4340
+ ERROR_IEPORT_FULL syscall.Errno = 4341
+ ERROR_FILE_OFFLINE syscall.Errno = 4350
+ ERROR_REMOTE_STORAGE_NOT_ACTIVE syscall.Errno = 4351
+ ERROR_REMOTE_STORAGE_MEDIA_ERROR syscall.Errno = 4352
+ ERROR_NOT_A_REPARSE_POINT syscall.Errno = 4390
+ ERROR_REPARSE_ATTRIBUTE_CONFLICT syscall.Errno = 4391
+ ERROR_INVALID_REPARSE_DATA syscall.Errno = 4392
+ ERROR_REPARSE_TAG_INVALID syscall.Errno = 4393
+ ERROR_REPARSE_TAG_MISMATCH syscall.Errno = 4394
+ ERROR_REPARSE_POINT_ENCOUNTERED syscall.Errno = 4395
+ ERROR_APP_DATA_NOT_FOUND syscall.Errno = 4400
+ ERROR_APP_DATA_EXPIRED syscall.Errno = 4401
+ ERROR_APP_DATA_CORRUPT syscall.Errno = 4402
+ ERROR_APP_DATA_LIMIT_EXCEEDED syscall.Errno = 4403
+ ERROR_APP_DATA_REBOOT_REQUIRED syscall.Errno = 4404
+ ERROR_SECUREBOOT_ROLLBACK_DETECTED syscall.Errno = 4420
+ ERROR_SECUREBOOT_POLICY_VIOLATION syscall.Errno = 4421
+ ERROR_SECUREBOOT_INVALID_POLICY syscall.Errno = 4422
+ ERROR_SECUREBOOT_POLICY_PUBLISHER_NOT_FOUND syscall.Errno = 4423
+ ERROR_SECUREBOOT_POLICY_NOT_SIGNED syscall.Errno = 4424
+ ERROR_SECUREBOOT_NOT_ENABLED syscall.Errno = 4425
+ ERROR_SECUREBOOT_FILE_REPLACED syscall.Errno = 4426
+ ERROR_SECUREBOOT_POLICY_NOT_AUTHORIZED syscall.Errno = 4427
+ ERROR_SECUREBOOT_POLICY_UNKNOWN syscall.Errno = 4428
+ ERROR_SECUREBOOT_POLICY_MISSING_ANTIROLLBACKVERSION syscall.Errno = 4429
+ ERROR_SECUREBOOT_PLATFORM_ID_MISMATCH syscall.Errno = 4430
+ ERROR_SECUREBOOT_POLICY_ROLLBACK_DETECTED syscall.Errno = 4431
+ ERROR_SECUREBOOT_POLICY_UPGRADE_MISMATCH syscall.Errno = 4432
+ ERROR_SECUREBOOT_REQUIRED_POLICY_FILE_MISSING syscall.Errno = 4433
+ ERROR_SECUREBOOT_NOT_BASE_POLICY syscall.Errno = 4434
+ ERROR_SECUREBOOT_NOT_SUPPLEMENTAL_POLICY syscall.Errno = 4435
+ ERROR_OFFLOAD_READ_FLT_NOT_SUPPORTED syscall.Errno = 4440
+ ERROR_OFFLOAD_WRITE_FLT_NOT_SUPPORTED syscall.Errno = 4441
+ ERROR_OFFLOAD_READ_FILE_NOT_SUPPORTED syscall.Errno = 4442
+ ERROR_OFFLOAD_WRITE_FILE_NOT_SUPPORTED syscall.Errno = 4443
+ ERROR_ALREADY_HAS_STREAM_ID syscall.Errno = 4444
+ ERROR_SMR_GARBAGE_COLLECTION_REQUIRED syscall.Errno = 4445
+ ERROR_WOF_WIM_HEADER_CORRUPT syscall.Errno = 4446
+ ERROR_WOF_WIM_RESOURCE_TABLE_CORRUPT syscall.Errno = 4447
+ ERROR_WOF_FILE_RESOURCE_TABLE_CORRUPT syscall.Errno = 4448
+ ERROR_VOLUME_NOT_SIS_ENABLED syscall.Errno = 4500
+ ERROR_SYSTEM_INTEGRITY_ROLLBACK_DETECTED syscall.Errno = 4550
+ ERROR_SYSTEM_INTEGRITY_POLICY_VIOLATION syscall.Errno = 4551
+ ERROR_SYSTEM_INTEGRITY_INVALID_POLICY syscall.Errno = 4552
+ ERROR_SYSTEM_INTEGRITY_POLICY_NOT_SIGNED syscall.Errno = 4553
+ ERROR_VSM_NOT_INITIALIZED syscall.Errno = 4560
+ ERROR_VSM_DMA_PROTECTION_NOT_IN_USE syscall.Errno = 4561
+ ERROR_PLATFORM_MANIFEST_NOT_AUTHORIZED syscall.Errno = 4570
+ ERROR_PLATFORM_MANIFEST_INVALID syscall.Errno = 4571
+ ERROR_PLATFORM_MANIFEST_FILE_NOT_AUTHORIZED syscall.Errno = 4572
+ ERROR_PLATFORM_MANIFEST_CATALOG_NOT_AUTHORIZED syscall.Errno = 4573
+ ERROR_PLATFORM_MANIFEST_BINARY_ID_NOT_FOUND syscall.Errno = 4574
+ ERROR_PLATFORM_MANIFEST_NOT_ACTIVE syscall.Errno = 4575
+ ERROR_PLATFORM_MANIFEST_NOT_SIGNED syscall.Errno = 4576
+ ERROR_DEPENDENT_RESOURCE_EXISTS syscall.Errno = 5001
+ ERROR_DEPENDENCY_NOT_FOUND syscall.Errno = 5002
+ ERROR_DEPENDENCY_ALREADY_EXISTS syscall.Errno = 5003
+ ERROR_RESOURCE_NOT_ONLINE syscall.Errno = 5004
+ ERROR_HOST_NODE_NOT_AVAILABLE syscall.Errno = 5005
+ ERROR_RESOURCE_NOT_AVAILABLE syscall.Errno = 5006
+ ERROR_RESOURCE_NOT_FOUND syscall.Errno = 5007
+ ERROR_SHUTDOWN_CLUSTER syscall.Errno = 5008
+ ERROR_CANT_EVICT_ACTIVE_NODE syscall.Errno = 5009
+ ERROR_OBJECT_ALREADY_EXISTS syscall.Errno = 5010
+ ERROR_OBJECT_IN_LIST syscall.Errno = 5011
+ ERROR_GROUP_NOT_AVAILABLE syscall.Errno = 5012
+ ERROR_GROUP_NOT_FOUND syscall.Errno = 5013
+ ERROR_GROUP_NOT_ONLINE syscall.Errno = 5014
+ ERROR_HOST_NODE_NOT_RESOURCE_OWNER syscall.Errno = 5015
+ ERROR_HOST_NODE_NOT_GROUP_OWNER syscall.Errno = 5016
+ ERROR_RESMON_CREATE_FAILED syscall.Errno = 5017
+ ERROR_RESMON_ONLINE_FAILED syscall.Errno = 5018
+ ERROR_RESOURCE_ONLINE syscall.Errno = 5019
+ ERROR_QUORUM_RESOURCE syscall.Errno = 5020
+ ERROR_NOT_QUORUM_CAPABLE syscall.Errno = 5021
+ ERROR_CLUSTER_SHUTTING_DOWN syscall.Errno = 5022
+ ERROR_INVALID_STATE syscall.Errno = 5023
+ ERROR_RESOURCE_PROPERTIES_STORED syscall.Errno = 5024
+ ERROR_NOT_QUORUM_CLASS syscall.Errno = 5025
+ ERROR_CORE_RESOURCE syscall.Errno = 5026
+ ERROR_QUORUM_RESOURCE_ONLINE_FAILED syscall.Errno = 5027
+ ERROR_QUORUMLOG_OPEN_FAILED syscall.Errno = 5028
+ ERROR_CLUSTERLOG_CORRUPT syscall.Errno = 5029
+ ERROR_CLUSTERLOG_RECORD_EXCEEDS_MAXSIZE syscall.Errno = 5030
+ ERROR_CLUSTERLOG_EXCEEDS_MAXSIZE syscall.Errno = 5031
+ ERROR_CLUSTERLOG_CHKPOINT_NOT_FOUND syscall.Errno = 5032
+ ERROR_CLUSTERLOG_NOT_ENOUGH_SPACE syscall.Errno = 5033
+ ERROR_QUORUM_OWNER_ALIVE syscall.Errno = 5034
+ ERROR_NETWORK_NOT_AVAILABLE syscall.Errno = 5035
+ ERROR_NODE_NOT_AVAILABLE syscall.Errno = 5036
+ ERROR_ALL_NODES_NOT_AVAILABLE syscall.Errno = 5037
+ ERROR_RESOURCE_FAILED syscall.Errno = 5038
+ ERROR_CLUSTER_INVALID_NODE syscall.Errno = 5039
+ ERROR_CLUSTER_NODE_EXISTS syscall.Errno = 5040
+ ERROR_CLUSTER_JOIN_IN_PROGRESS syscall.Errno = 5041
+ ERROR_CLUSTER_NODE_NOT_FOUND syscall.Errno = 5042
+ ERROR_CLUSTER_LOCAL_NODE_NOT_FOUND syscall.Errno = 5043
+ ERROR_CLUSTER_NETWORK_EXISTS syscall.Errno = 5044
+ ERROR_CLUSTER_NETWORK_NOT_FOUND syscall.Errno = 5045
+ ERROR_CLUSTER_NETINTERFACE_EXISTS syscall.Errno = 5046
+ ERROR_CLUSTER_NETINTERFACE_NOT_FOUND syscall.Errno = 5047
+ ERROR_CLUSTER_INVALID_REQUEST syscall.Errno = 5048
+ ERROR_CLUSTER_INVALID_NETWORK_PROVIDER syscall.Errno = 5049
+ ERROR_CLUSTER_NODE_DOWN syscall.Errno = 5050
+ ERROR_CLUSTER_NODE_UNREACHABLE syscall.Errno = 5051
+ ERROR_CLUSTER_NODE_NOT_MEMBER syscall.Errno = 5052
+ ERROR_CLUSTER_JOIN_NOT_IN_PROGRESS syscall.Errno = 5053
+ ERROR_CLUSTER_INVALID_NETWORK syscall.Errno = 5054
+ ERROR_CLUSTER_NODE_UP syscall.Errno = 5056
+ ERROR_CLUSTER_IPADDR_IN_USE syscall.Errno = 5057
+ ERROR_CLUSTER_NODE_NOT_PAUSED syscall.Errno = 5058
+ ERROR_CLUSTER_NO_SECURITY_CONTEXT syscall.Errno = 5059
+ ERROR_CLUSTER_NETWORK_NOT_INTERNAL syscall.Errno = 5060
+ ERROR_CLUSTER_NODE_ALREADY_UP syscall.Errno = 5061
+ ERROR_CLUSTER_NODE_ALREADY_DOWN syscall.Errno = 5062
+ ERROR_CLUSTER_NETWORK_ALREADY_ONLINE syscall.Errno = 5063
+ ERROR_CLUSTER_NETWORK_ALREADY_OFFLINE syscall.Errno = 5064
+ ERROR_CLUSTER_NODE_ALREADY_MEMBER syscall.Errno = 5065
+ ERROR_CLUSTER_LAST_INTERNAL_NETWORK syscall.Errno = 5066
+ ERROR_CLUSTER_NETWORK_HAS_DEPENDENTS syscall.Errno = 5067
+ ERROR_INVALID_OPERATION_ON_QUORUM syscall.Errno = 5068
+ ERROR_DEPENDENCY_NOT_ALLOWED syscall.Errno = 5069
+ ERROR_CLUSTER_NODE_PAUSED syscall.Errno = 5070
+ ERROR_NODE_CANT_HOST_RESOURCE syscall.Errno = 5071
+ ERROR_CLUSTER_NODE_NOT_READY syscall.Errno = 5072
+ ERROR_CLUSTER_NODE_SHUTTING_DOWN syscall.Errno = 5073
+ ERROR_CLUSTER_JOIN_ABORTED syscall.Errno = 5074
+ ERROR_CLUSTER_INCOMPATIBLE_VERSIONS syscall.Errno = 5075
+ ERROR_CLUSTER_MAXNUM_OF_RESOURCES_EXCEEDED syscall.Errno = 5076
+ ERROR_CLUSTER_SYSTEM_CONFIG_CHANGED syscall.Errno = 5077
+ ERROR_CLUSTER_RESOURCE_TYPE_NOT_FOUND syscall.Errno = 5078
+ ERROR_CLUSTER_RESTYPE_NOT_SUPPORTED syscall.Errno = 5079
+ ERROR_CLUSTER_RESNAME_NOT_FOUND syscall.Errno = 5080
+ ERROR_CLUSTER_NO_RPC_PACKAGES_REGISTERED syscall.Errno = 5081
+ ERROR_CLUSTER_OWNER_NOT_IN_PREFLIST syscall.Errno = 5082
+ ERROR_CLUSTER_DATABASE_SEQMISMATCH syscall.Errno = 5083
+ ERROR_RESMON_INVALID_STATE syscall.Errno = 5084
+ ERROR_CLUSTER_GUM_NOT_LOCKER syscall.Errno = 5085
+ ERROR_QUORUM_DISK_NOT_FOUND syscall.Errno = 5086
+ ERROR_DATABASE_BACKUP_CORRUPT syscall.Errno = 5087
+ ERROR_CLUSTER_NODE_ALREADY_HAS_DFS_ROOT syscall.Errno = 5088
+ ERROR_RESOURCE_PROPERTY_UNCHANGEABLE syscall.Errno = 5089
+ ERROR_NO_ADMIN_ACCESS_POINT syscall.Errno = 5090
+ ERROR_CLUSTER_MEMBERSHIP_INVALID_STATE syscall.Errno = 5890
+ ERROR_CLUSTER_QUORUMLOG_NOT_FOUND syscall.Errno = 5891
+ ERROR_CLUSTER_MEMBERSHIP_HALT syscall.Errno = 5892
+ ERROR_CLUSTER_INSTANCE_ID_MISMATCH syscall.Errno = 5893
+ ERROR_CLUSTER_NETWORK_NOT_FOUND_FOR_IP syscall.Errno = 5894
+ ERROR_CLUSTER_PROPERTY_DATA_TYPE_MISMATCH syscall.Errno = 5895
+ ERROR_CLUSTER_EVICT_WITHOUT_CLEANUP syscall.Errno = 5896
+ ERROR_CLUSTER_PARAMETER_MISMATCH syscall.Errno = 5897
+ ERROR_NODE_CANNOT_BE_CLUSTERED syscall.Errno = 5898
+ ERROR_CLUSTER_WRONG_OS_VERSION syscall.Errno = 5899
+ ERROR_CLUSTER_CANT_CREATE_DUP_CLUSTER_NAME syscall.Errno = 5900
+ ERROR_CLUSCFG_ALREADY_COMMITTED syscall.Errno = 5901
+ ERROR_CLUSCFG_ROLLBACK_FAILED syscall.Errno = 5902
+ ERROR_CLUSCFG_SYSTEM_DISK_DRIVE_LETTER_CONFLICT syscall.Errno = 5903
+ ERROR_CLUSTER_OLD_VERSION syscall.Errno = 5904
+ ERROR_CLUSTER_MISMATCHED_COMPUTER_ACCT_NAME syscall.Errno = 5905
+ ERROR_CLUSTER_NO_NET_ADAPTERS syscall.Errno = 5906
+ ERROR_CLUSTER_POISONED syscall.Errno = 5907
+ ERROR_CLUSTER_GROUP_MOVING syscall.Errno = 5908
+ ERROR_CLUSTER_RESOURCE_TYPE_BUSY syscall.Errno = 5909
+ ERROR_RESOURCE_CALL_TIMED_OUT syscall.Errno = 5910
+ ERROR_INVALID_CLUSTER_IPV6_ADDRESS syscall.Errno = 5911
+ ERROR_CLUSTER_INTERNAL_INVALID_FUNCTION syscall.Errno = 5912
+ ERROR_CLUSTER_PARAMETER_OUT_OF_BOUNDS syscall.Errno = 5913
+ ERROR_CLUSTER_PARTIAL_SEND syscall.Errno = 5914
+ ERROR_CLUSTER_REGISTRY_INVALID_FUNCTION syscall.Errno = 5915
+ ERROR_CLUSTER_INVALID_STRING_TERMINATION syscall.Errno = 5916
+ ERROR_CLUSTER_INVALID_STRING_FORMAT syscall.Errno = 5917
+ ERROR_CLUSTER_DATABASE_TRANSACTION_IN_PROGRESS syscall.Errno = 5918
+ ERROR_CLUSTER_DATABASE_TRANSACTION_NOT_IN_PROGRESS syscall.Errno = 5919
+ ERROR_CLUSTER_NULL_DATA syscall.Errno = 5920
+ ERROR_CLUSTER_PARTIAL_READ syscall.Errno = 5921
+ ERROR_CLUSTER_PARTIAL_WRITE syscall.Errno = 5922
+ ERROR_CLUSTER_CANT_DESERIALIZE_DATA syscall.Errno = 5923
+ ERROR_DEPENDENT_RESOURCE_PROPERTY_CONFLICT syscall.Errno = 5924
+ ERROR_CLUSTER_NO_QUORUM syscall.Errno = 5925
+ ERROR_CLUSTER_INVALID_IPV6_NETWORK syscall.Errno = 5926
+ ERROR_CLUSTER_INVALID_IPV6_TUNNEL_NETWORK syscall.Errno = 5927
+ ERROR_QUORUM_NOT_ALLOWED_IN_THIS_GROUP syscall.Errno = 5928
+ ERROR_DEPENDENCY_TREE_TOO_COMPLEX syscall.Errno = 5929
+ ERROR_EXCEPTION_IN_RESOURCE_CALL syscall.Errno = 5930
+ ERROR_CLUSTER_RHS_FAILED_INITIALIZATION syscall.Errno = 5931
+ ERROR_CLUSTER_NOT_INSTALLED syscall.Errno = 5932
+ ERROR_CLUSTER_RESOURCES_MUST_BE_ONLINE_ON_THE_SAME_NODE syscall.Errno = 5933
+ ERROR_CLUSTER_MAX_NODES_IN_CLUSTER syscall.Errno = 5934
+ ERROR_CLUSTER_TOO_MANY_NODES syscall.Errno = 5935
+ ERROR_CLUSTER_OBJECT_ALREADY_USED syscall.Errno = 5936
+ ERROR_NONCORE_GROUPS_FOUND syscall.Errno = 5937
+ ERROR_FILE_SHARE_RESOURCE_CONFLICT syscall.Errno = 5938
+ ERROR_CLUSTER_EVICT_INVALID_REQUEST syscall.Errno = 5939
+ ERROR_CLUSTER_SINGLETON_RESOURCE syscall.Errno = 5940
+ ERROR_CLUSTER_GROUP_SINGLETON_RESOURCE syscall.Errno = 5941
+ ERROR_CLUSTER_RESOURCE_PROVIDER_FAILED syscall.Errno = 5942
+ ERROR_CLUSTER_RESOURCE_CONFIGURATION_ERROR syscall.Errno = 5943
+ ERROR_CLUSTER_GROUP_BUSY syscall.Errno = 5944
+ ERROR_CLUSTER_NOT_SHARED_VOLUME syscall.Errno = 5945
+ ERROR_CLUSTER_INVALID_SECURITY_DESCRIPTOR syscall.Errno = 5946
+ ERROR_CLUSTER_SHARED_VOLUMES_IN_USE syscall.Errno = 5947
+ ERROR_CLUSTER_USE_SHARED_VOLUMES_API syscall.Errno = 5948
+ ERROR_CLUSTER_BACKUP_IN_PROGRESS syscall.Errno = 5949
+ ERROR_NON_CSV_PATH syscall.Errno = 5950
+ ERROR_CSV_VOLUME_NOT_LOCAL syscall.Errno = 5951
+ ERROR_CLUSTER_WATCHDOG_TERMINATING syscall.Errno = 5952
+ ERROR_CLUSTER_RESOURCE_VETOED_MOVE_INCOMPATIBLE_NODES syscall.Errno = 5953
+ ERROR_CLUSTER_INVALID_NODE_WEIGHT syscall.Errno = 5954
+ ERROR_CLUSTER_RESOURCE_VETOED_CALL syscall.Errno = 5955
+ ERROR_RESMON_SYSTEM_RESOURCES_LACKING syscall.Errno = 5956
+ ERROR_CLUSTER_RESOURCE_VETOED_MOVE_NOT_ENOUGH_RESOURCES_ON_DESTINATION syscall.Errno = 5957
+ ERROR_CLUSTER_RESOURCE_VETOED_MOVE_NOT_ENOUGH_RESOURCES_ON_SOURCE syscall.Errno = 5958
+ ERROR_CLUSTER_GROUP_QUEUED syscall.Errno = 5959
+ ERROR_CLUSTER_RESOURCE_LOCKED_STATUS syscall.Errno = 5960
+ ERROR_CLUSTER_SHARED_VOLUME_FAILOVER_NOT_ALLOWED syscall.Errno = 5961
+ ERROR_CLUSTER_NODE_DRAIN_IN_PROGRESS syscall.Errno = 5962
+ ERROR_CLUSTER_DISK_NOT_CONNECTED syscall.Errno = 5963
+ ERROR_DISK_NOT_CSV_CAPABLE syscall.Errno = 5964
+ ERROR_RESOURCE_NOT_IN_AVAILABLE_STORAGE syscall.Errno = 5965
+ ERROR_CLUSTER_SHARED_VOLUME_REDIRECTED syscall.Errno = 5966
+ ERROR_CLUSTER_SHARED_VOLUME_NOT_REDIRECTED syscall.Errno = 5967
+ ERROR_CLUSTER_CANNOT_RETURN_PROPERTIES syscall.Errno = 5968
+ ERROR_CLUSTER_RESOURCE_CONTAINS_UNSUPPORTED_DIFF_AREA_FOR_SHARED_VOLUMES syscall.Errno = 5969
+ ERROR_CLUSTER_RESOURCE_IS_IN_MAINTENANCE_MODE syscall.Errno = 5970
+ ERROR_CLUSTER_AFFINITY_CONFLICT syscall.Errno = 5971
+ ERROR_CLUSTER_RESOURCE_IS_REPLICA_VIRTUAL_MACHINE syscall.Errno = 5972
+ ERROR_CLUSTER_UPGRADE_INCOMPATIBLE_VERSIONS syscall.Errno = 5973
+ ERROR_CLUSTER_UPGRADE_FIX_QUORUM_NOT_SUPPORTED syscall.Errno = 5974
+ ERROR_CLUSTER_UPGRADE_RESTART_REQUIRED syscall.Errno = 5975
+ ERROR_CLUSTER_UPGRADE_IN_PROGRESS syscall.Errno = 5976
+ ERROR_CLUSTER_UPGRADE_INCOMPLETE syscall.Errno = 5977
+ ERROR_CLUSTER_NODE_IN_GRACE_PERIOD syscall.Errno = 5978
+ ERROR_CLUSTER_CSV_IO_PAUSE_TIMEOUT syscall.Errno = 5979
+ ERROR_NODE_NOT_ACTIVE_CLUSTER_MEMBER syscall.Errno = 5980
+ ERROR_CLUSTER_RESOURCE_NOT_MONITORED syscall.Errno = 5981
+ ERROR_CLUSTER_RESOURCE_DOES_NOT_SUPPORT_UNMONITORED syscall.Errno = 5982
+ ERROR_CLUSTER_RESOURCE_IS_REPLICATED syscall.Errno = 5983
+ ERROR_CLUSTER_NODE_ISOLATED syscall.Errno = 5984
+ ERROR_CLUSTER_NODE_QUARANTINED syscall.Errno = 5985
+ ERROR_CLUSTER_DATABASE_UPDATE_CONDITION_FAILED syscall.Errno = 5986
+ ERROR_CLUSTER_SPACE_DEGRADED syscall.Errno = 5987
+ ERROR_CLUSTER_TOKEN_DELEGATION_NOT_SUPPORTED syscall.Errno = 5988
+ ERROR_CLUSTER_CSV_INVALID_HANDLE syscall.Errno = 5989
+ ERROR_CLUSTER_CSV_SUPPORTED_ONLY_ON_COORDINATOR syscall.Errno = 5990
+ ERROR_GROUPSET_NOT_AVAILABLE syscall.Errno = 5991
+ ERROR_GROUPSET_NOT_FOUND syscall.Errno = 5992
+ ERROR_GROUPSET_CANT_PROVIDE syscall.Errno = 5993
+ ERROR_CLUSTER_FAULT_DOMAIN_PARENT_NOT_FOUND syscall.Errno = 5994
+ ERROR_CLUSTER_FAULT_DOMAIN_INVALID_HIERARCHY syscall.Errno = 5995
+ ERROR_CLUSTER_FAULT_DOMAIN_FAILED_S2D_VALIDATION syscall.Errno = 5996
+ ERROR_CLUSTER_FAULT_DOMAIN_S2D_CONNECTIVITY_LOSS syscall.Errno = 5997
+ ERROR_CLUSTER_INVALID_INFRASTRUCTURE_FILESERVER_NAME syscall.Errno = 5998
+ ERROR_CLUSTERSET_MANAGEMENT_CLUSTER_UNREACHABLE syscall.Errno = 5999
+ ERROR_ENCRYPTION_FAILED syscall.Errno = 6000
+ ERROR_DECRYPTION_FAILED syscall.Errno = 6001
+ ERROR_FILE_ENCRYPTED syscall.Errno = 6002
+ ERROR_NO_RECOVERY_POLICY syscall.Errno = 6003
+ ERROR_NO_EFS syscall.Errno = 6004
+ ERROR_WRONG_EFS syscall.Errno = 6005
+ ERROR_NO_USER_KEYS syscall.Errno = 6006
+ ERROR_FILE_NOT_ENCRYPTED syscall.Errno = 6007
+ ERROR_NOT_EXPORT_FORMAT syscall.Errno = 6008
+ ERROR_FILE_READ_ONLY syscall.Errno = 6009
+ ERROR_DIR_EFS_DISALLOWED syscall.Errno = 6010
+ ERROR_EFS_SERVER_NOT_TRUSTED syscall.Errno = 6011
+ ERROR_BAD_RECOVERY_POLICY syscall.Errno = 6012
+ ERROR_EFS_ALG_BLOB_TOO_BIG syscall.Errno = 6013
+ ERROR_VOLUME_NOT_SUPPORT_EFS syscall.Errno = 6014
+ ERROR_EFS_DISABLED syscall.Errno = 6015
+ ERROR_EFS_VERSION_NOT_SUPPORT syscall.Errno = 6016
+ ERROR_CS_ENCRYPTION_INVALID_SERVER_RESPONSE syscall.Errno = 6017
+ ERROR_CS_ENCRYPTION_UNSUPPORTED_SERVER syscall.Errno = 6018
+ ERROR_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE syscall.Errno = 6019
+ ERROR_CS_ENCRYPTION_NEW_ENCRYPTED_FILE syscall.Errno = 6020
+ ERROR_CS_ENCRYPTION_FILE_NOT_CSE syscall.Errno = 6021
+ ERROR_ENCRYPTION_POLICY_DENIES_OPERATION syscall.Errno = 6022
+ ERROR_NO_BROWSER_SERVERS_FOUND syscall.Errno = 6118
+ SCHED_E_SERVICE_NOT_LOCALSYSTEM syscall.Errno = 6200
+ ERROR_LOG_SECTOR_INVALID syscall.Errno = 6600
+ ERROR_LOG_SECTOR_PARITY_INVALID syscall.Errno = 6601
+ ERROR_LOG_SECTOR_REMAPPED syscall.Errno = 6602
+ ERROR_LOG_BLOCK_INCOMPLETE syscall.Errno = 6603
+ ERROR_LOG_INVALID_RANGE syscall.Errno = 6604
+ ERROR_LOG_BLOCKS_EXHAUSTED syscall.Errno = 6605
+ ERROR_LOG_READ_CONTEXT_INVALID syscall.Errno = 6606
+ ERROR_LOG_RESTART_INVALID syscall.Errno = 6607
+ ERROR_LOG_BLOCK_VERSION syscall.Errno = 6608
+ ERROR_LOG_BLOCK_INVALID syscall.Errno = 6609
+ ERROR_LOG_READ_MODE_INVALID syscall.Errno = 6610
+ ERROR_LOG_NO_RESTART syscall.Errno = 6611
+ ERROR_LOG_METADATA_CORRUPT syscall.Errno = 6612
+ ERROR_LOG_METADATA_INVALID syscall.Errno = 6613
+ ERROR_LOG_METADATA_INCONSISTENT syscall.Errno = 6614
+ ERROR_LOG_RESERVATION_INVALID syscall.Errno = 6615
+ ERROR_LOG_CANT_DELETE syscall.Errno = 6616
+ ERROR_LOG_CONTAINER_LIMIT_EXCEEDED syscall.Errno = 6617
+ ERROR_LOG_START_OF_LOG syscall.Errno = 6618
+ ERROR_LOG_POLICY_ALREADY_INSTALLED syscall.Errno = 6619
+ ERROR_LOG_POLICY_NOT_INSTALLED syscall.Errno = 6620
+ ERROR_LOG_POLICY_INVALID syscall.Errno = 6621
+ ERROR_LOG_POLICY_CONFLICT syscall.Errno = 6622
+ ERROR_LOG_PINNED_ARCHIVE_TAIL syscall.Errno = 6623
+ ERROR_LOG_RECORD_NONEXISTENT syscall.Errno = 6624
+ ERROR_LOG_RECORDS_RESERVED_INVALID syscall.Errno = 6625
+ ERROR_LOG_SPACE_RESERVED_INVALID syscall.Errno = 6626
+ ERROR_LOG_TAIL_INVALID syscall.Errno = 6627
+ ERROR_LOG_FULL syscall.Errno = 6628
+ ERROR_COULD_NOT_RESIZE_LOG syscall.Errno = 6629
+ ERROR_LOG_MULTIPLEXED syscall.Errno = 6630
+ ERROR_LOG_DEDICATED syscall.Errno = 6631
+ ERROR_LOG_ARCHIVE_NOT_IN_PROGRESS syscall.Errno = 6632
+ ERROR_LOG_ARCHIVE_IN_PROGRESS syscall.Errno = 6633
+ ERROR_LOG_EPHEMERAL syscall.Errno = 6634
+ ERROR_LOG_NOT_ENOUGH_CONTAINERS syscall.Errno = 6635
+ ERROR_LOG_CLIENT_ALREADY_REGISTERED syscall.Errno = 6636
+ ERROR_LOG_CLIENT_NOT_REGISTERED syscall.Errno = 6637
+ ERROR_LOG_FULL_HANDLER_IN_PROGRESS syscall.Errno = 6638
+ ERROR_LOG_CONTAINER_READ_FAILED syscall.Errno = 6639
+ ERROR_LOG_CONTAINER_WRITE_FAILED syscall.Errno = 6640
+ ERROR_LOG_CONTAINER_OPEN_FAILED syscall.Errno = 6641
+ ERROR_LOG_CONTAINER_STATE_INVALID syscall.Errno = 6642
+ ERROR_LOG_STATE_INVALID syscall.Errno = 6643
+ ERROR_LOG_PINNED syscall.Errno = 6644
+ ERROR_LOG_METADATA_FLUSH_FAILED syscall.Errno = 6645
+ ERROR_LOG_INCONSISTENT_SECURITY syscall.Errno = 6646
+ ERROR_LOG_APPENDED_FLUSH_FAILED syscall.Errno = 6647
+ ERROR_LOG_PINNED_RESERVATION syscall.Errno = 6648
+ ERROR_INVALID_TRANSACTION syscall.Errno = 6700
+ ERROR_TRANSACTION_NOT_ACTIVE syscall.Errno = 6701
+ ERROR_TRANSACTION_REQUEST_NOT_VALID syscall.Errno = 6702
+ ERROR_TRANSACTION_NOT_REQUESTED syscall.Errno = 6703
+ ERROR_TRANSACTION_ALREADY_ABORTED syscall.Errno = 6704
+ ERROR_TRANSACTION_ALREADY_COMMITTED syscall.Errno = 6705
+ ERROR_TM_INITIALIZATION_FAILED syscall.Errno = 6706
+ ERROR_RESOURCEMANAGER_READ_ONLY syscall.Errno = 6707
+ ERROR_TRANSACTION_NOT_JOINED syscall.Errno = 6708
+ ERROR_TRANSACTION_SUPERIOR_EXISTS syscall.Errno = 6709
+ ERROR_CRM_PROTOCOL_ALREADY_EXISTS syscall.Errno = 6710
+ ERROR_TRANSACTION_PROPAGATION_FAILED syscall.Errno = 6711
+ ERROR_CRM_PROTOCOL_NOT_FOUND syscall.Errno = 6712
+ ERROR_TRANSACTION_INVALID_MARSHALL_BUFFER syscall.Errno = 6713
+ ERROR_CURRENT_TRANSACTION_NOT_VALID syscall.Errno = 6714
+ ERROR_TRANSACTION_NOT_FOUND syscall.Errno = 6715
+ ERROR_RESOURCEMANAGER_NOT_FOUND syscall.Errno = 6716
+ ERROR_ENLISTMENT_NOT_FOUND syscall.Errno = 6717
+ ERROR_TRANSACTIONMANAGER_NOT_FOUND syscall.Errno = 6718
+ ERROR_TRANSACTIONMANAGER_NOT_ONLINE syscall.Errno = 6719
+ ERROR_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION syscall.Errno = 6720
+ ERROR_TRANSACTION_NOT_ROOT syscall.Errno = 6721
+ ERROR_TRANSACTION_OBJECT_EXPIRED syscall.Errno = 6722
+ ERROR_TRANSACTION_RESPONSE_NOT_ENLISTED syscall.Errno = 6723
+ ERROR_TRANSACTION_RECORD_TOO_LONG syscall.Errno = 6724
+ ERROR_IMPLICIT_TRANSACTION_NOT_SUPPORTED syscall.Errno = 6725
+ ERROR_TRANSACTION_INTEGRITY_VIOLATED syscall.Errno = 6726
+ ERROR_TRANSACTIONMANAGER_IDENTITY_MISMATCH syscall.Errno = 6727
+ ERROR_RM_CANNOT_BE_FROZEN_FOR_SNAPSHOT syscall.Errno = 6728
+ ERROR_TRANSACTION_MUST_WRITETHROUGH syscall.Errno = 6729
+ ERROR_TRANSACTION_NO_SUPERIOR syscall.Errno = 6730
+ ERROR_HEURISTIC_DAMAGE_POSSIBLE syscall.Errno = 6731
+ ERROR_TRANSACTIONAL_CONFLICT syscall.Errno = 6800
+ ERROR_RM_NOT_ACTIVE syscall.Errno = 6801
+ ERROR_RM_METADATA_CORRUPT syscall.Errno = 6802
+ ERROR_DIRECTORY_NOT_RM syscall.Errno = 6803
+ ERROR_TRANSACTIONS_UNSUPPORTED_REMOTE syscall.Errno = 6805
+ ERROR_LOG_RESIZE_INVALID_SIZE syscall.Errno = 6806
+ ERROR_OBJECT_NO_LONGER_EXISTS syscall.Errno = 6807
+ ERROR_STREAM_MINIVERSION_NOT_FOUND syscall.Errno = 6808
+ ERROR_STREAM_MINIVERSION_NOT_VALID syscall.Errno = 6809
+ ERROR_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION syscall.Errno = 6810
+ ERROR_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT syscall.Errno = 6811
+ ERROR_CANT_CREATE_MORE_STREAM_MINIVERSIONS syscall.Errno = 6812
+ ERROR_REMOTE_FILE_VERSION_MISMATCH syscall.Errno = 6814
+ ERROR_HANDLE_NO_LONGER_VALID syscall.Errno = 6815
+ ERROR_NO_TXF_METADATA syscall.Errno = 6816
+ ERROR_LOG_CORRUPTION_DETECTED syscall.Errno = 6817
+ ERROR_CANT_RECOVER_WITH_HANDLE_OPEN syscall.Errno = 6818
+ ERROR_RM_DISCONNECTED syscall.Errno = 6819
+ ERROR_ENLISTMENT_NOT_SUPERIOR syscall.Errno = 6820
+ ERROR_RECOVERY_NOT_NEEDED syscall.Errno = 6821
+ ERROR_RM_ALREADY_STARTED syscall.Errno = 6822
+ ERROR_FILE_IDENTITY_NOT_PERSISTENT syscall.Errno = 6823
+ ERROR_CANT_BREAK_TRANSACTIONAL_DEPENDENCY syscall.Errno = 6824
+ ERROR_CANT_CROSS_RM_BOUNDARY syscall.Errno = 6825
+ ERROR_TXF_DIR_NOT_EMPTY syscall.Errno = 6826
+ ERROR_INDOUBT_TRANSACTIONS_EXIST syscall.Errno = 6827
+ ERROR_TM_VOLATILE syscall.Errno = 6828
+ ERROR_ROLLBACK_TIMER_EXPIRED syscall.Errno = 6829
+ ERROR_TXF_ATTRIBUTE_CORRUPT syscall.Errno = 6830
+ ERROR_EFS_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6831
+ ERROR_TRANSACTIONAL_OPEN_NOT_ALLOWED syscall.Errno = 6832
+ ERROR_LOG_GROWTH_FAILED syscall.Errno = 6833
+ ERROR_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE syscall.Errno = 6834
+ ERROR_TXF_METADATA_ALREADY_PRESENT syscall.Errno = 6835
+ ERROR_TRANSACTION_SCOPE_CALLBACKS_NOT_SET syscall.Errno = 6836
+ ERROR_TRANSACTION_REQUIRED_PROMOTION syscall.Errno = 6837
+ ERROR_CANNOT_EXECUTE_FILE_IN_TRANSACTION syscall.Errno = 6838
+ ERROR_TRANSACTIONS_NOT_FROZEN syscall.Errno = 6839
+ ERROR_TRANSACTION_FREEZE_IN_PROGRESS syscall.Errno = 6840
+ ERROR_NOT_SNAPSHOT_VOLUME syscall.Errno = 6841
+ ERROR_NO_SAVEPOINT_WITH_OPEN_FILES syscall.Errno = 6842
+ ERROR_DATA_LOST_REPAIR syscall.Errno = 6843
+ ERROR_SPARSE_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6844
+ ERROR_TM_IDENTITY_MISMATCH syscall.Errno = 6845
+ ERROR_FLOATED_SECTION syscall.Errno = 6846
+ ERROR_CANNOT_ACCEPT_TRANSACTED_WORK syscall.Errno = 6847
+ ERROR_CANNOT_ABORT_TRANSACTIONS syscall.Errno = 6848
+ ERROR_BAD_CLUSTERS syscall.Errno = 6849
+ ERROR_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6850
+ ERROR_VOLUME_DIRTY syscall.Errno = 6851
+ ERROR_NO_LINK_TRACKING_IN_TRANSACTION syscall.Errno = 6852
+ ERROR_OPERATION_NOT_SUPPORTED_IN_TRANSACTION syscall.Errno = 6853
+ ERROR_EXPIRED_HANDLE syscall.Errno = 6854
+ ERROR_TRANSACTION_NOT_ENLISTED syscall.Errno = 6855
+ ERROR_CTX_WINSTATION_NAME_INVALID syscall.Errno = 7001
+ ERROR_CTX_INVALID_PD syscall.Errno = 7002
+ ERROR_CTX_PD_NOT_FOUND syscall.Errno = 7003
+ ERROR_CTX_WD_NOT_FOUND syscall.Errno = 7004
+ ERROR_CTX_CANNOT_MAKE_EVENTLOG_ENTRY syscall.Errno = 7005
+ ERROR_CTX_SERVICE_NAME_COLLISION syscall.Errno = 7006
+ ERROR_CTX_CLOSE_PENDING syscall.Errno = 7007
+ ERROR_CTX_NO_OUTBUF syscall.Errno = 7008
+ ERROR_CTX_MODEM_INF_NOT_FOUND syscall.Errno = 7009
+ ERROR_CTX_INVALID_MODEMNAME syscall.Errno = 7010
+ ERROR_CTX_MODEM_RESPONSE_ERROR syscall.Errno = 7011
+ ERROR_CTX_MODEM_RESPONSE_TIMEOUT syscall.Errno = 7012
+ ERROR_CTX_MODEM_RESPONSE_NO_CARRIER syscall.Errno = 7013
+ ERROR_CTX_MODEM_RESPONSE_NO_DIALTONE syscall.Errno = 7014
+ ERROR_CTX_MODEM_RESPONSE_BUSY syscall.Errno = 7015
+ ERROR_CTX_MODEM_RESPONSE_VOICE syscall.Errno = 7016
+ ERROR_CTX_TD_ERROR syscall.Errno = 7017
+ ERROR_CTX_WINSTATION_NOT_FOUND syscall.Errno = 7022
+ ERROR_CTX_WINSTATION_ALREADY_EXISTS syscall.Errno = 7023
+ ERROR_CTX_WINSTATION_BUSY syscall.Errno = 7024
+ ERROR_CTX_BAD_VIDEO_MODE syscall.Errno = 7025
+ ERROR_CTX_GRAPHICS_INVALID syscall.Errno = 7035
+ ERROR_CTX_LOGON_DISABLED syscall.Errno = 7037
+ ERROR_CTX_NOT_CONSOLE syscall.Errno = 7038
+ ERROR_CTX_CLIENT_QUERY_TIMEOUT syscall.Errno = 7040
+ ERROR_CTX_CONSOLE_DISCONNECT syscall.Errno = 7041
+ ERROR_CTX_CONSOLE_CONNECT syscall.Errno = 7042
+ ERROR_CTX_SHADOW_DENIED syscall.Errno = 7044
+ ERROR_CTX_WINSTATION_ACCESS_DENIED syscall.Errno = 7045
+ ERROR_CTX_INVALID_WD syscall.Errno = 7049
+ ERROR_CTX_SHADOW_INVALID syscall.Errno = 7050
+ ERROR_CTX_SHADOW_DISABLED syscall.Errno = 7051
+ ERROR_CTX_CLIENT_LICENSE_IN_USE syscall.Errno = 7052
+ ERROR_CTX_CLIENT_LICENSE_NOT_SET syscall.Errno = 7053
+ ERROR_CTX_LICENSE_NOT_AVAILABLE syscall.Errno = 7054
+ ERROR_CTX_LICENSE_CLIENT_INVALID syscall.Errno = 7055
+ ERROR_CTX_LICENSE_EXPIRED syscall.Errno = 7056
+ ERROR_CTX_SHADOW_NOT_RUNNING syscall.Errno = 7057
+ ERROR_CTX_SHADOW_ENDED_BY_MODE_CHANGE syscall.Errno = 7058
+ ERROR_ACTIVATION_COUNT_EXCEEDED syscall.Errno = 7059
+ ERROR_CTX_WINSTATIONS_DISABLED syscall.Errno = 7060
+ ERROR_CTX_ENCRYPTION_LEVEL_REQUIRED syscall.Errno = 7061
+ ERROR_CTX_SESSION_IN_USE syscall.Errno = 7062
+ ERROR_CTX_NO_FORCE_LOGOFF syscall.Errno = 7063
+ ERROR_CTX_ACCOUNT_RESTRICTION syscall.Errno = 7064
+ ERROR_RDP_PROTOCOL_ERROR syscall.Errno = 7065
+ ERROR_CTX_CDM_CONNECT syscall.Errno = 7066
+ ERROR_CTX_CDM_DISCONNECT syscall.Errno = 7067
+ ERROR_CTX_SECURITY_LAYER_ERROR syscall.Errno = 7068
+ ERROR_TS_INCOMPATIBLE_SESSIONS syscall.Errno = 7069
+ ERROR_TS_VIDEO_SUBSYSTEM_ERROR syscall.Errno = 7070
+ FRS_ERR_INVALID_API_SEQUENCE syscall.Errno = 8001
+ FRS_ERR_STARTING_SERVICE syscall.Errno = 8002
+ FRS_ERR_STOPPING_SERVICE syscall.Errno = 8003
+ FRS_ERR_INTERNAL_API syscall.Errno = 8004
+ FRS_ERR_INTERNAL syscall.Errno = 8005
+ FRS_ERR_SERVICE_COMM syscall.Errno = 8006
+ FRS_ERR_INSUFFICIENT_PRIV syscall.Errno = 8007
+ FRS_ERR_AUTHENTICATION syscall.Errno = 8008
+ FRS_ERR_PARENT_INSUFFICIENT_PRIV syscall.Errno = 8009
+ FRS_ERR_PARENT_AUTHENTICATION syscall.Errno = 8010
+ FRS_ERR_CHILD_TO_PARENT_COMM syscall.Errno = 8011
+ FRS_ERR_PARENT_TO_CHILD_COMM syscall.Errno = 8012
+ FRS_ERR_SYSVOL_POPULATE syscall.Errno = 8013
+ FRS_ERR_SYSVOL_POPULATE_TIMEOUT syscall.Errno = 8014
+ FRS_ERR_SYSVOL_IS_BUSY syscall.Errno = 8015
+ FRS_ERR_SYSVOL_DEMOTE syscall.Errno = 8016
+ FRS_ERR_INVALID_SERVICE_PARAMETER syscall.Errno = 8017
+ DS_S_SUCCESS = ERROR_SUCCESS
+ ERROR_DS_NOT_INSTALLED syscall.Errno = 8200
+ ERROR_DS_MEMBERSHIP_EVALUATED_LOCALLY syscall.Errno = 8201
+ ERROR_DS_NO_ATTRIBUTE_OR_VALUE syscall.Errno = 8202
+ ERROR_DS_INVALID_ATTRIBUTE_SYNTAX syscall.Errno = 8203
+ ERROR_DS_ATTRIBUTE_TYPE_UNDEFINED syscall.Errno = 8204
+ ERROR_DS_ATTRIBUTE_OR_VALUE_EXISTS syscall.Errno = 8205
+ ERROR_DS_BUSY syscall.Errno = 8206
+ ERROR_DS_UNAVAILABLE syscall.Errno = 8207
+ ERROR_DS_NO_RIDS_ALLOCATED syscall.Errno = 8208
+ ERROR_DS_NO_MORE_RIDS syscall.Errno = 8209
+ ERROR_DS_INCORRECT_ROLE_OWNER syscall.Errno = 8210
+ ERROR_DS_RIDMGR_INIT_ERROR syscall.Errno = 8211
+ ERROR_DS_OBJ_CLASS_VIOLATION syscall.Errno = 8212
+ ERROR_DS_CANT_ON_NON_LEAF syscall.Errno = 8213
+ ERROR_DS_CANT_ON_RDN syscall.Errno = 8214
+ ERROR_DS_CANT_MOD_OBJ_CLASS syscall.Errno = 8215
+ ERROR_DS_CROSS_DOM_MOVE_ERROR syscall.Errno = 8216
+ ERROR_DS_GC_NOT_AVAILABLE syscall.Errno = 8217
+ ERROR_SHARED_POLICY syscall.Errno = 8218
+ ERROR_POLICY_OBJECT_NOT_FOUND syscall.Errno = 8219
+ ERROR_POLICY_ONLY_IN_DS syscall.Errno = 8220
+ ERROR_PROMOTION_ACTIVE syscall.Errno = 8221
+ ERROR_NO_PROMOTION_ACTIVE syscall.Errno = 8222
+ ERROR_DS_OPERATIONS_ERROR syscall.Errno = 8224
+ ERROR_DS_PROTOCOL_ERROR syscall.Errno = 8225
+ ERROR_DS_TIMELIMIT_EXCEEDED syscall.Errno = 8226
+ ERROR_DS_SIZELIMIT_EXCEEDED syscall.Errno = 8227
+ ERROR_DS_ADMIN_LIMIT_EXCEEDED syscall.Errno = 8228
+ ERROR_DS_COMPARE_FALSE syscall.Errno = 8229
+ ERROR_DS_COMPARE_TRUE syscall.Errno = 8230
+ ERROR_DS_AUTH_METHOD_NOT_SUPPORTED syscall.Errno = 8231
+ ERROR_DS_STRONG_AUTH_REQUIRED syscall.Errno = 8232
+ ERROR_DS_INAPPROPRIATE_AUTH syscall.Errno = 8233
+ ERROR_DS_AUTH_UNKNOWN syscall.Errno = 8234
+ ERROR_DS_REFERRAL syscall.Errno = 8235
+ ERROR_DS_UNAVAILABLE_CRIT_EXTENSION syscall.Errno = 8236
+ ERROR_DS_CONFIDENTIALITY_REQUIRED syscall.Errno = 8237
+ ERROR_DS_INAPPROPRIATE_MATCHING syscall.Errno = 8238
+ ERROR_DS_CONSTRAINT_VIOLATION syscall.Errno = 8239
+ ERROR_DS_NO_SUCH_OBJECT syscall.Errno = 8240
+ ERROR_DS_ALIAS_PROBLEM syscall.Errno = 8241
+ ERROR_DS_INVALID_DN_SYNTAX syscall.Errno = 8242
+ ERROR_DS_IS_LEAF syscall.Errno = 8243
+ ERROR_DS_ALIAS_DEREF_PROBLEM syscall.Errno = 8244
+ ERROR_DS_UNWILLING_TO_PERFORM syscall.Errno = 8245
+ ERROR_DS_LOOP_DETECT syscall.Errno = 8246
+ ERROR_DS_NAMING_VIOLATION syscall.Errno = 8247
+ ERROR_DS_OBJECT_RESULTS_TOO_LARGE syscall.Errno = 8248
+ ERROR_DS_AFFECTS_MULTIPLE_DSAS syscall.Errno = 8249
+ ERROR_DS_SERVER_DOWN syscall.Errno = 8250
+ ERROR_DS_LOCAL_ERROR syscall.Errno = 8251
+ ERROR_DS_ENCODING_ERROR syscall.Errno = 8252
+ ERROR_DS_DECODING_ERROR syscall.Errno = 8253
+ ERROR_DS_FILTER_UNKNOWN syscall.Errno = 8254
+ ERROR_DS_PARAM_ERROR syscall.Errno = 8255
+ ERROR_DS_NOT_SUPPORTED syscall.Errno = 8256
+ ERROR_DS_NO_RESULTS_RETURNED syscall.Errno = 8257
+ ERROR_DS_CONTROL_NOT_FOUND syscall.Errno = 8258
+ ERROR_DS_CLIENT_LOOP syscall.Errno = 8259
+ ERROR_DS_REFERRAL_LIMIT_EXCEEDED syscall.Errno = 8260
+ ERROR_DS_SORT_CONTROL_MISSING syscall.Errno = 8261
+ ERROR_DS_OFFSET_RANGE_ERROR syscall.Errno = 8262
+ ERROR_DS_RIDMGR_DISABLED syscall.Errno = 8263
+ ERROR_DS_ROOT_MUST_BE_NC syscall.Errno = 8301
+ ERROR_DS_ADD_REPLICA_INHIBITED syscall.Errno = 8302
+ ERROR_DS_ATT_NOT_DEF_IN_SCHEMA syscall.Errno = 8303
+ ERROR_DS_MAX_OBJ_SIZE_EXCEEDED syscall.Errno = 8304
+ ERROR_DS_OBJ_STRING_NAME_EXISTS syscall.Errno = 8305
+ ERROR_DS_NO_RDN_DEFINED_IN_SCHEMA syscall.Errno = 8306
+ ERROR_DS_RDN_DOESNT_MATCH_SCHEMA syscall.Errno = 8307
+ ERROR_DS_NO_REQUESTED_ATTS_FOUND syscall.Errno = 8308
+ ERROR_DS_USER_BUFFER_TO_SMALL syscall.Errno = 8309
+ ERROR_DS_ATT_IS_NOT_ON_OBJ syscall.Errno = 8310
+ ERROR_DS_ILLEGAL_MOD_OPERATION syscall.Errno = 8311
+ ERROR_DS_OBJ_TOO_LARGE syscall.Errno = 8312
+ ERROR_DS_BAD_INSTANCE_TYPE syscall.Errno = 8313
+ ERROR_DS_MASTERDSA_REQUIRED syscall.Errno = 8314
+ ERROR_DS_OBJECT_CLASS_REQUIRED syscall.Errno = 8315
+ ERROR_DS_MISSING_REQUIRED_ATT syscall.Errno = 8316
+ ERROR_DS_ATT_NOT_DEF_FOR_CLASS syscall.Errno = 8317
+ ERROR_DS_ATT_ALREADY_EXISTS syscall.Errno = 8318
+ ERROR_DS_CANT_ADD_ATT_VALUES syscall.Errno = 8320
+ ERROR_DS_SINGLE_VALUE_CONSTRAINT syscall.Errno = 8321
+ ERROR_DS_RANGE_CONSTRAINT syscall.Errno = 8322
+ ERROR_DS_ATT_VAL_ALREADY_EXISTS syscall.Errno = 8323
+ ERROR_DS_CANT_REM_MISSING_ATT syscall.Errno = 8324
+ ERROR_DS_CANT_REM_MISSING_ATT_VAL syscall.Errno = 8325
+ ERROR_DS_ROOT_CANT_BE_SUBREF syscall.Errno = 8326
+ ERROR_DS_NO_CHAINING syscall.Errno = 8327
+ ERROR_DS_NO_CHAINED_EVAL syscall.Errno = 8328
+ ERROR_DS_NO_PARENT_OBJECT syscall.Errno = 8329
+ ERROR_DS_PARENT_IS_AN_ALIAS syscall.Errno = 8330
+ ERROR_DS_CANT_MIX_MASTER_AND_REPS syscall.Errno = 8331
+ ERROR_DS_CHILDREN_EXIST syscall.Errno = 8332
+ ERROR_DS_OBJ_NOT_FOUND syscall.Errno = 8333
+ ERROR_DS_ALIASED_OBJ_MISSING syscall.Errno = 8334
+ ERROR_DS_BAD_NAME_SYNTAX syscall.Errno = 8335
+ ERROR_DS_ALIAS_POINTS_TO_ALIAS syscall.Errno = 8336
+ ERROR_DS_CANT_DEREF_ALIAS syscall.Errno = 8337
+ ERROR_DS_OUT_OF_SCOPE syscall.Errno = 8338
+ ERROR_DS_OBJECT_BEING_REMOVED syscall.Errno = 8339
+ ERROR_DS_CANT_DELETE_DSA_OBJ syscall.Errno = 8340
+ ERROR_DS_GENERIC_ERROR syscall.Errno = 8341
+ ERROR_DS_DSA_MUST_BE_INT_MASTER syscall.Errno = 8342
+ ERROR_DS_CLASS_NOT_DSA syscall.Errno = 8343
+ ERROR_DS_INSUFF_ACCESS_RIGHTS syscall.Errno = 8344
+ ERROR_DS_ILLEGAL_SUPERIOR syscall.Errno = 8345
+ ERROR_DS_ATTRIBUTE_OWNED_BY_SAM syscall.Errno = 8346
+ ERROR_DS_NAME_TOO_MANY_PARTS syscall.Errno = 8347
+ ERROR_DS_NAME_TOO_LONG syscall.Errno = 8348
+ ERROR_DS_NAME_VALUE_TOO_LONG syscall.Errno = 8349
+ ERROR_DS_NAME_UNPARSEABLE syscall.Errno = 8350
+ ERROR_DS_NAME_TYPE_UNKNOWN syscall.Errno = 8351
+ ERROR_DS_NOT_AN_OBJECT syscall.Errno = 8352
+ ERROR_DS_SEC_DESC_TOO_SHORT syscall.Errno = 8353
+ ERROR_DS_SEC_DESC_INVALID syscall.Errno = 8354
+ ERROR_DS_NO_DELETED_NAME syscall.Errno = 8355
+ ERROR_DS_SUBREF_MUST_HAVE_PARENT syscall.Errno = 8356
+ ERROR_DS_NCNAME_MUST_BE_NC syscall.Errno = 8357
+ ERROR_DS_CANT_ADD_SYSTEM_ONLY syscall.Errno = 8358
+ ERROR_DS_CLASS_MUST_BE_CONCRETE syscall.Errno = 8359
+ ERROR_DS_INVALID_DMD syscall.Errno = 8360
+ ERROR_DS_OBJ_GUID_EXISTS syscall.Errno = 8361
+ ERROR_DS_NOT_ON_BACKLINK syscall.Errno = 8362
+ ERROR_DS_NO_CROSSREF_FOR_NC syscall.Errno = 8363
+ ERROR_DS_SHUTTING_DOWN syscall.Errno = 8364
+ ERROR_DS_UNKNOWN_OPERATION syscall.Errno = 8365
+ ERROR_DS_INVALID_ROLE_OWNER syscall.Errno = 8366
+ ERROR_DS_COULDNT_CONTACT_FSMO syscall.Errno = 8367
+ ERROR_DS_CROSS_NC_DN_RENAME syscall.Errno = 8368
+ ERROR_DS_CANT_MOD_SYSTEM_ONLY syscall.Errno = 8369
+ ERROR_DS_REPLICATOR_ONLY syscall.Errno = 8370
+ ERROR_DS_OBJ_CLASS_NOT_DEFINED syscall.Errno = 8371
+ ERROR_DS_OBJ_CLASS_NOT_SUBCLASS syscall.Errno = 8372
+ ERROR_DS_NAME_REFERENCE_INVALID syscall.Errno = 8373
+ ERROR_DS_CROSS_REF_EXISTS syscall.Errno = 8374
+ ERROR_DS_CANT_DEL_MASTER_CROSSREF syscall.Errno = 8375
+ ERROR_DS_SUBTREE_NOTIFY_NOT_NC_HEAD syscall.Errno = 8376
+ ERROR_DS_NOTIFY_FILTER_TOO_COMPLEX syscall.Errno = 8377
+ ERROR_DS_DUP_RDN syscall.Errno = 8378
+ ERROR_DS_DUP_OID syscall.Errno = 8379
+ ERROR_DS_DUP_MAPI_ID syscall.Errno = 8380
+ ERROR_DS_DUP_SCHEMA_ID_GUID syscall.Errno = 8381
+ ERROR_DS_DUP_LDAP_DISPLAY_NAME syscall.Errno = 8382
+ ERROR_DS_SEMANTIC_ATT_TEST syscall.Errno = 8383
+ ERROR_DS_SYNTAX_MISMATCH syscall.Errno = 8384
+ ERROR_DS_EXISTS_IN_MUST_HAVE syscall.Errno = 8385
+ ERROR_DS_EXISTS_IN_MAY_HAVE syscall.Errno = 8386
+ ERROR_DS_NONEXISTENT_MAY_HAVE syscall.Errno = 8387
+ ERROR_DS_NONEXISTENT_MUST_HAVE syscall.Errno = 8388
+ ERROR_DS_AUX_CLS_TEST_FAIL syscall.Errno = 8389
+ ERROR_DS_NONEXISTENT_POSS_SUP syscall.Errno = 8390
+ ERROR_DS_SUB_CLS_TEST_FAIL syscall.Errno = 8391
+ ERROR_DS_BAD_RDN_ATT_ID_SYNTAX syscall.Errno = 8392
+ ERROR_DS_EXISTS_IN_AUX_CLS syscall.Errno = 8393
+ ERROR_DS_EXISTS_IN_SUB_CLS syscall.Errno = 8394
+ ERROR_DS_EXISTS_IN_POSS_SUP syscall.Errno = 8395
+ ERROR_DS_RECALCSCHEMA_FAILED syscall.Errno = 8396
+ ERROR_DS_TREE_DELETE_NOT_FINISHED syscall.Errno = 8397
+ ERROR_DS_CANT_DELETE syscall.Errno = 8398
+ ERROR_DS_ATT_SCHEMA_REQ_ID syscall.Errno = 8399
+ ERROR_DS_BAD_ATT_SCHEMA_SYNTAX syscall.Errno = 8400
+ ERROR_DS_CANT_CACHE_ATT syscall.Errno = 8401
+ ERROR_DS_CANT_CACHE_CLASS syscall.Errno = 8402
+ ERROR_DS_CANT_REMOVE_ATT_CACHE syscall.Errno = 8403
+ ERROR_DS_CANT_REMOVE_CLASS_CACHE syscall.Errno = 8404
+ ERROR_DS_CANT_RETRIEVE_DN syscall.Errno = 8405
+ ERROR_DS_MISSING_SUPREF syscall.Errno = 8406
+ ERROR_DS_CANT_RETRIEVE_INSTANCE syscall.Errno = 8407
+ ERROR_DS_CODE_INCONSISTENCY syscall.Errno = 8408
+ ERROR_DS_DATABASE_ERROR syscall.Errno = 8409
+ ERROR_DS_GOVERNSID_MISSING syscall.Errno = 8410
+ ERROR_DS_MISSING_EXPECTED_ATT syscall.Errno = 8411
+ ERROR_DS_NCNAME_MISSING_CR_REF syscall.Errno = 8412
+ ERROR_DS_SECURITY_CHECKING_ERROR syscall.Errno = 8413
+ ERROR_DS_SCHEMA_NOT_LOADED syscall.Errno = 8414
+ ERROR_DS_SCHEMA_ALLOC_FAILED syscall.Errno = 8415
+ ERROR_DS_ATT_SCHEMA_REQ_SYNTAX syscall.Errno = 8416
+ ERROR_DS_GCVERIFY_ERROR syscall.Errno = 8417
+ ERROR_DS_DRA_SCHEMA_MISMATCH syscall.Errno = 8418
+ ERROR_DS_CANT_FIND_DSA_OBJ syscall.Errno = 8419
+ ERROR_DS_CANT_FIND_EXPECTED_NC syscall.Errno = 8420
+ ERROR_DS_CANT_FIND_NC_IN_CACHE syscall.Errno = 8421
+ ERROR_DS_CANT_RETRIEVE_CHILD syscall.Errno = 8422
+ ERROR_DS_SECURITY_ILLEGAL_MODIFY syscall.Errno = 8423
+ ERROR_DS_CANT_REPLACE_HIDDEN_REC syscall.Errno = 8424
+ ERROR_DS_BAD_HIERARCHY_FILE syscall.Errno = 8425
+ ERROR_DS_BUILD_HIERARCHY_TABLE_FAILED syscall.Errno = 8426
+ ERROR_DS_CONFIG_PARAM_MISSING syscall.Errno = 8427
+ ERROR_DS_COUNTING_AB_INDICES_FAILED syscall.Errno = 8428
+ ERROR_DS_HIERARCHY_TABLE_MALLOC_FAILED syscall.Errno = 8429
+ ERROR_DS_INTERNAL_FAILURE syscall.Errno = 8430
+ ERROR_DS_UNKNOWN_ERROR syscall.Errno = 8431
+ ERROR_DS_ROOT_REQUIRES_CLASS_TOP syscall.Errno = 8432
+ ERROR_DS_REFUSING_FSMO_ROLES syscall.Errno = 8433
+ ERROR_DS_MISSING_FSMO_SETTINGS syscall.Errno = 8434
+ ERROR_DS_UNABLE_TO_SURRENDER_ROLES syscall.Errno = 8435
+ ERROR_DS_DRA_GENERIC syscall.Errno = 8436
+ ERROR_DS_DRA_INVALID_PARAMETER syscall.Errno = 8437
+ ERROR_DS_DRA_BUSY syscall.Errno = 8438
+ ERROR_DS_DRA_BAD_DN syscall.Errno = 8439
+ ERROR_DS_DRA_BAD_NC syscall.Errno = 8440
+ ERROR_DS_DRA_DN_EXISTS syscall.Errno = 8441
+ ERROR_DS_DRA_INTERNAL_ERROR syscall.Errno = 8442
+ ERROR_DS_DRA_INCONSISTENT_DIT syscall.Errno = 8443
+ ERROR_DS_DRA_CONNECTION_FAILED syscall.Errno = 8444
+ ERROR_DS_DRA_BAD_INSTANCE_TYPE syscall.Errno = 8445
+ ERROR_DS_DRA_OUT_OF_MEM syscall.Errno = 8446
+ ERROR_DS_DRA_MAIL_PROBLEM syscall.Errno = 8447
+ ERROR_DS_DRA_REF_ALREADY_EXISTS syscall.Errno = 8448
+ ERROR_DS_DRA_REF_NOT_FOUND syscall.Errno = 8449
+ ERROR_DS_DRA_OBJ_IS_REP_SOURCE syscall.Errno = 8450
+ ERROR_DS_DRA_DB_ERROR syscall.Errno = 8451
+ ERROR_DS_DRA_NO_REPLICA syscall.Errno = 8452
+ ERROR_DS_DRA_ACCESS_DENIED syscall.Errno = 8453
+ ERROR_DS_DRA_NOT_SUPPORTED syscall.Errno = 8454
+ ERROR_DS_DRA_RPC_CANCELLED syscall.Errno = 8455
+ ERROR_DS_DRA_SOURCE_DISABLED syscall.Errno = 8456
+ ERROR_DS_DRA_SINK_DISABLED syscall.Errno = 8457
+ ERROR_DS_DRA_NAME_COLLISION syscall.Errno = 8458
+ ERROR_DS_DRA_SOURCE_REINSTALLED syscall.Errno = 8459
+ ERROR_DS_DRA_MISSING_PARENT syscall.Errno = 8460
+ ERROR_DS_DRA_PREEMPTED syscall.Errno = 8461
+ ERROR_DS_DRA_ABANDON_SYNC syscall.Errno = 8462
+ ERROR_DS_DRA_SHUTDOWN syscall.Errno = 8463
+ ERROR_DS_DRA_INCOMPATIBLE_PARTIAL_SET syscall.Errno = 8464
+ ERROR_DS_DRA_SOURCE_IS_PARTIAL_REPLICA syscall.Errno = 8465
+ ERROR_DS_DRA_EXTN_CONNECTION_FAILED syscall.Errno = 8466
+ ERROR_DS_INSTALL_SCHEMA_MISMATCH syscall.Errno = 8467
+ ERROR_DS_DUP_LINK_ID syscall.Errno = 8468
+ ERROR_DS_NAME_ERROR_RESOLVING syscall.Errno = 8469
+ ERROR_DS_NAME_ERROR_NOT_FOUND syscall.Errno = 8470
+ ERROR_DS_NAME_ERROR_NOT_UNIQUE syscall.Errno = 8471
+ ERROR_DS_NAME_ERROR_NO_MAPPING syscall.Errno = 8472
+ ERROR_DS_NAME_ERROR_DOMAIN_ONLY syscall.Errno = 8473
+ ERROR_DS_NAME_ERROR_NO_SYNTACTICAL_MAPPING syscall.Errno = 8474
+ ERROR_DS_CONSTRUCTED_ATT_MOD syscall.Errno = 8475
+ ERROR_DS_WRONG_OM_OBJ_CLASS syscall.Errno = 8476
+ ERROR_DS_DRA_REPL_PENDING syscall.Errno = 8477
+ ERROR_DS_DS_REQUIRED syscall.Errno = 8478
+ ERROR_DS_INVALID_LDAP_DISPLAY_NAME syscall.Errno = 8479
+ ERROR_DS_NON_BASE_SEARCH syscall.Errno = 8480
+ ERROR_DS_CANT_RETRIEVE_ATTS syscall.Errno = 8481
+ ERROR_DS_BACKLINK_WITHOUT_LINK syscall.Errno = 8482
+ ERROR_DS_EPOCH_MISMATCH syscall.Errno = 8483
+ ERROR_DS_SRC_NAME_MISMATCH syscall.Errno = 8484
+ ERROR_DS_SRC_AND_DST_NC_IDENTICAL syscall.Errno = 8485
+ ERROR_DS_DST_NC_MISMATCH syscall.Errno = 8486
+ ERROR_DS_NOT_AUTHORITIVE_FOR_DST_NC syscall.Errno = 8487
+ ERROR_DS_SRC_GUID_MISMATCH syscall.Errno = 8488
+ ERROR_DS_CANT_MOVE_DELETED_OBJECT syscall.Errno = 8489
+ ERROR_DS_PDC_OPERATION_IN_PROGRESS syscall.Errno = 8490
+ ERROR_DS_CROSS_DOMAIN_CLEANUP_REQD syscall.Errno = 8491
+ ERROR_DS_ILLEGAL_XDOM_MOVE_OPERATION syscall.Errno = 8492
+ ERROR_DS_CANT_WITH_ACCT_GROUP_MEMBERSHPS syscall.Errno = 8493
+ ERROR_DS_NC_MUST_HAVE_NC_PARENT syscall.Errno = 8494
+ ERROR_DS_CR_IMPOSSIBLE_TO_VALIDATE syscall.Errno = 8495
+ ERROR_DS_DST_DOMAIN_NOT_NATIVE syscall.Errno = 8496
+ ERROR_DS_MISSING_INFRASTRUCTURE_CONTAINER syscall.Errno = 8497
+ ERROR_DS_CANT_MOVE_ACCOUNT_GROUP syscall.Errno = 8498
+ ERROR_DS_CANT_MOVE_RESOURCE_GROUP syscall.Errno = 8499
+ ERROR_DS_INVALID_SEARCH_FLAG syscall.Errno = 8500
+ ERROR_DS_NO_TREE_DELETE_ABOVE_NC syscall.Errno = 8501
+ ERROR_DS_COULDNT_LOCK_TREE_FOR_DELETE syscall.Errno = 8502
+ ERROR_DS_COULDNT_IDENTIFY_OBJECTS_FOR_TREE_DELETE syscall.Errno = 8503
+ ERROR_DS_SAM_INIT_FAILURE syscall.Errno = 8504
+ ERROR_DS_SENSITIVE_GROUP_VIOLATION syscall.Errno = 8505
+ ERROR_DS_CANT_MOD_PRIMARYGROUPID syscall.Errno = 8506
+ ERROR_DS_ILLEGAL_BASE_SCHEMA_MOD syscall.Errno = 8507
+ ERROR_DS_NONSAFE_SCHEMA_CHANGE syscall.Errno = 8508
+ ERROR_DS_SCHEMA_UPDATE_DISALLOWED syscall.Errno = 8509
+ ERROR_DS_CANT_CREATE_UNDER_SCHEMA syscall.Errno = 8510
+ ERROR_DS_INSTALL_NO_SRC_SCH_VERSION syscall.Errno = 8511
+ ERROR_DS_INSTALL_NO_SCH_VERSION_IN_INIFILE syscall.Errno = 8512
+ ERROR_DS_INVALID_GROUP_TYPE syscall.Errno = 8513
+ ERROR_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN syscall.Errno = 8514
+ ERROR_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN syscall.Errno = 8515
+ ERROR_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER syscall.Errno = 8516
+ ERROR_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER syscall.Errno = 8517
+ ERROR_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER syscall.Errno = 8518
+ ERROR_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER syscall.Errno = 8519
+ ERROR_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER syscall.Errno = 8520
+ ERROR_DS_HAVE_PRIMARY_MEMBERS syscall.Errno = 8521
+ ERROR_DS_STRING_SD_CONVERSION_FAILED syscall.Errno = 8522
+ ERROR_DS_NAMING_MASTER_GC syscall.Errno = 8523
+ ERROR_DS_DNS_LOOKUP_FAILURE syscall.Errno = 8524
+ ERROR_DS_COULDNT_UPDATE_SPNS syscall.Errno = 8525
+ ERROR_DS_CANT_RETRIEVE_SD syscall.Errno = 8526
+ ERROR_DS_KEY_NOT_UNIQUE syscall.Errno = 8527
+ ERROR_DS_WRONG_LINKED_ATT_SYNTAX syscall.Errno = 8528
+ ERROR_DS_SAM_NEED_BOOTKEY_PASSWORD syscall.Errno = 8529
+ ERROR_DS_SAM_NEED_BOOTKEY_FLOPPY syscall.Errno = 8530
+ ERROR_DS_CANT_START syscall.Errno = 8531
+ ERROR_DS_INIT_FAILURE syscall.Errno = 8532
+ ERROR_DS_NO_PKT_PRIVACY_ON_CONNECTION syscall.Errno = 8533
+ ERROR_DS_SOURCE_DOMAIN_IN_FOREST syscall.Errno = 8534
+ ERROR_DS_DESTINATION_DOMAIN_NOT_IN_FOREST syscall.Errno = 8535
+ ERROR_DS_DESTINATION_AUDITING_NOT_ENABLED syscall.Errno = 8536
+ ERROR_DS_CANT_FIND_DC_FOR_SRC_DOMAIN syscall.Errno = 8537
+ ERROR_DS_SRC_OBJ_NOT_GROUP_OR_USER syscall.Errno = 8538
+ ERROR_DS_SRC_SID_EXISTS_IN_FOREST syscall.Errno = 8539
+ ERROR_DS_SRC_AND_DST_OBJECT_CLASS_MISMATCH syscall.Errno = 8540
+ ERROR_SAM_INIT_FAILURE syscall.Errno = 8541
+ ERROR_DS_DRA_SCHEMA_INFO_SHIP syscall.Errno = 8542
+ ERROR_DS_DRA_SCHEMA_CONFLICT syscall.Errno = 8543
+ ERROR_DS_DRA_EARLIER_SCHEMA_CONFLICT syscall.Errno = 8544
+ ERROR_DS_DRA_OBJ_NC_MISMATCH syscall.Errno = 8545
+ ERROR_DS_NC_STILL_HAS_DSAS syscall.Errno = 8546
+ ERROR_DS_GC_REQUIRED syscall.Errno = 8547
+ ERROR_DS_LOCAL_MEMBER_OF_LOCAL_ONLY syscall.Errno = 8548
+ ERROR_DS_NO_FPO_IN_UNIVERSAL_GROUPS syscall.Errno = 8549
+ ERROR_DS_CANT_ADD_TO_GC syscall.Errno = 8550
+ ERROR_DS_NO_CHECKPOINT_WITH_PDC syscall.Errno = 8551
+ ERROR_DS_SOURCE_AUDITING_NOT_ENABLED syscall.Errno = 8552
+ ERROR_DS_CANT_CREATE_IN_NONDOMAIN_NC syscall.Errno = 8553
+ ERROR_DS_INVALID_NAME_FOR_SPN syscall.Errno = 8554
+ ERROR_DS_FILTER_USES_CONTRUCTED_ATTRS syscall.Errno = 8555
+ ERROR_DS_UNICODEPWD_NOT_IN_QUOTES syscall.Errno = 8556
+ ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED syscall.Errno = 8557
+ ERROR_DS_MUST_BE_RUN_ON_DST_DC syscall.Errno = 8558
+ ERROR_DS_SRC_DC_MUST_BE_SP4_OR_GREATER syscall.Errno = 8559
+ ERROR_DS_CANT_TREE_DELETE_CRITICAL_OBJ syscall.Errno = 8560
+ ERROR_DS_INIT_FAILURE_CONSOLE syscall.Errno = 8561
+ ERROR_DS_SAM_INIT_FAILURE_CONSOLE syscall.Errno = 8562
+ ERROR_DS_FOREST_VERSION_TOO_HIGH syscall.Errno = 8563
+ ERROR_DS_DOMAIN_VERSION_TOO_HIGH syscall.Errno = 8564
+ ERROR_DS_FOREST_VERSION_TOO_LOW syscall.Errno = 8565
+ ERROR_DS_DOMAIN_VERSION_TOO_LOW syscall.Errno = 8566
+ ERROR_DS_INCOMPATIBLE_VERSION syscall.Errno = 8567
+ ERROR_DS_LOW_DSA_VERSION syscall.Errno = 8568
+ ERROR_DS_NO_BEHAVIOR_VERSION_IN_MIXEDDOMAIN syscall.Errno = 8569
+ ERROR_DS_NOT_SUPPORTED_SORT_ORDER syscall.Errno = 8570
+ ERROR_DS_NAME_NOT_UNIQUE syscall.Errno = 8571
+ ERROR_DS_MACHINE_ACCOUNT_CREATED_PRENT4 syscall.Errno = 8572
+ ERROR_DS_OUT_OF_VERSION_STORE syscall.Errno = 8573
+ ERROR_DS_INCOMPATIBLE_CONTROLS_USED syscall.Errno = 8574
+ ERROR_DS_NO_REF_DOMAIN syscall.Errno = 8575
+ ERROR_DS_RESERVED_LINK_ID syscall.Errno = 8576
+ ERROR_DS_LINK_ID_NOT_AVAILABLE syscall.Errno = 8577
+ ERROR_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER syscall.Errno = 8578
+ ERROR_DS_MODIFYDN_DISALLOWED_BY_INSTANCE_TYPE syscall.Errno = 8579
+ ERROR_DS_NO_OBJECT_MOVE_IN_SCHEMA_NC syscall.Errno = 8580
+ ERROR_DS_MODIFYDN_DISALLOWED_BY_FLAG syscall.Errno = 8581
+ ERROR_DS_MODIFYDN_WRONG_GRANDPARENT syscall.Errno = 8582
+ ERROR_DS_NAME_ERROR_TRUST_REFERRAL syscall.Errno = 8583
+ ERROR_NOT_SUPPORTED_ON_STANDARD_SERVER syscall.Errno = 8584
+ ERROR_DS_CANT_ACCESS_REMOTE_PART_OF_AD syscall.Errno = 8585
+ ERROR_DS_CR_IMPOSSIBLE_TO_VALIDATE_V2 syscall.Errno = 8586
+ ERROR_DS_THREAD_LIMIT_EXCEEDED syscall.Errno = 8587
+ ERROR_DS_NOT_CLOSEST syscall.Errno = 8588
+ ERROR_DS_CANT_DERIVE_SPN_WITHOUT_SERVER_REF syscall.Errno = 8589
+ ERROR_DS_SINGLE_USER_MODE_FAILED syscall.Errno = 8590
+ ERROR_DS_NTDSCRIPT_SYNTAX_ERROR syscall.Errno = 8591
+ ERROR_DS_NTDSCRIPT_PROCESS_ERROR syscall.Errno = 8592
+ ERROR_DS_DIFFERENT_REPL_EPOCHS syscall.Errno = 8593
+ ERROR_DS_DRS_EXTENSIONS_CHANGED syscall.Errno = 8594
+ ERROR_DS_REPLICA_SET_CHANGE_NOT_ALLOWED_ON_DISABLED_CR syscall.Errno = 8595
+ ERROR_DS_NO_MSDS_INTID syscall.Errno = 8596
+ ERROR_DS_DUP_MSDS_INTID syscall.Errno = 8597
+ ERROR_DS_EXISTS_IN_RDNATTID syscall.Errno = 8598
+ ERROR_DS_AUTHORIZATION_FAILED syscall.Errno = 8599
+ ERROR_DS_INVALID_SCRIPT syscall.Errno = 8600
+ ERROR_DS_REMOTE_CROSSREF_OP_FAILED syscall.Errno = 8601
+ ERROR_DS_CROSS_REF_BUSY syscall.Errno = 8602
+ ERROR_DS_CANT_DERIVE_SPN_FOR_DELETED_DOMAIN syscall.Errno = 8603
+ ERROR_DS_CANT_DEMOTE_WITH_WRITEABLE_NC syscall.Errno = 8604
+ ERROR_DS_DUPLICATE_ID_FOUND syscall.Errno = 8605
+ ERROR_DS_INSUFFICIENT_ATTR_TO_CREATE_OBJECT syscall.Errno = 8606
+ ERROR_DS_GROUP_CONVERSION_ERROR syscall.Errno = 8607
+ ERROR_DS_CANT_MOVE_APP_BASIC_GROUP syscall.Errno = 8608
+ ERROR_DS_CANT_MOVE_APP_QUERY_GROUP syscall.Errno = 8609
+ ERROR_DS_ROLE_NOT_VERIFIED syscall.Errno = 8610
+ ERROR_DS_WKO_CONTAINER_CANNOT_BE_SPECIAL syscall.Errno = 8611
+ ERROR_DS_DOMAIN_RENAME_IN_PROGRESS syscall.Errno = 8612
+ ERROR_DS_EXISTING_AD_CHILD_NC syscall.Errno = 8613
+ ERROR_DS_REPL_LIFETIME_EXCEEDED syscall.Errno = 8614
+ ERROR_DS_DISALLOWED_IN_SYSTEM_CONTAINER syscall.Errno = 8615
+ ERROR_DS_LDAP_SEND_QUEUE_FULL syscall.Errno = 8616
+ ERROR_DS_DRA_OUT_SCHEDULE_WINDOW syscall.Errno = 8617
+ ERROR_DS_POLICY_NOT_KNOWN syscall.Errno = 8618
+ ERROR_NO_SITE_SETTINGS_OBJECT syscall.Errno = 8619
+ ERROR_NO_SECRETS syscall.Errno = 8620
+ ERROR_NO_WRITABLE_DC_FOUND syscall.Errno = 8621
+ ERROR_DS_NO_SERVER_OBJECT syscall.Errno = 8622
+ ERROR_DS_NO_NTDSA_OBJECT syscall.Errno = 8623
+ ERROR_DS_NON_ASQ_SEARCH syscall.Errno = 8624
+ ERROR_DS_AUDIT_FAILURE syscall.Errno = 8625
+ ERROR_DS_INVALID_SEARCH_FLAG_SUBTREE syscall.Errno = 8626
+ ERROR_DS_INVALID_SEARCH_FLAG_TUPLE syscall.Errno = 8627
+ ERROR_DS_HIERARCHY_TABLE_TOO_DEEP syscall.Errno = 8628
+ ERROR_DS_DRA_CORRUPT_UTD_VECTOR syscall.Errno = 8629
+ ERROR_DS_DRA_SECRETS_DENIED syscall.Errno = 8630
+ ERROR_DS_RESERVED_MAPI_ID syscall.Errno = 8631
+ ERROR_DS_MAPI_ID_NOT_AVAILABLE syscall.Errno = 8632
+ ERROR_DS_DRA_MISSING_KRBTGT_SECRET syscall.Errno = 8633
+ ERROR_DS_DOMAIN_NAME_EXISTS_IN_FOREST syscall.Errno = 8634
+ ERROR_DS_FLAT_NAME_EXISTS_IN_FOREST syscall.Errno = 8635
+ ERROR_INVALID_USER_PRINCIPAL_NAME syscall.Errno = 8636
+ ERROR_DS_OID_MAPPED_GROUP_CANT_HAVE_MEMBERS syscall.Errno = 8637
+ ERROR_DS_OID_NOT_FOUND syscall.Errno = 8638
+ ERROR_DS_DRA_RECYCLED_TARGET syscall.Errno = 8639
+ ERROR_DS_DISALLOWED_NC_REDIRECT syscall.Errno = 8640
+ ERROR_DS_HIGH_ADLDS_FFL syscall.Errno = 8641
+ ERROR_DS_HIGH_DSA_VERSION syscall.Errno = 8642
+ ERROR_DS_LOW_ADLDS_FFL syscall.Errno = 8643
+ ERROR_DOMAIN_SID_SAME_AS_LOCAL_WORKSTATION syscall.Errno = 8644
+ ERROR_DS_UNDELETE_SAM_VALIDATION_FAILED syscall.Errno = 8645
+ ERROR_INCORRECT_ACCOUNT_TYPE syscall.Errno = 8646
+ ERROR_DS_SPN_VALUE_NOT_UNIQUE_IN_FOREST syscall.Errno = 8647
+ ERROR_DS_UPN_VALUE_NOT_UNIQUE_IN_FOREST syscall.Errno = 8648
+ ERROR_DS_MISSING_FOREST_TRUST syscall.Errno = 8649
+ ERROR_DS_VALUE_KEY_NOT_UNIQUE syscall.Errno = 8650
+ DNS_ERROR_RESPONSE_CODES_BASE syscall.Errno = 9000
+ DNS_ERROR_RCODE_NO_ERROR = ERROR_SUCCESS
+ DNS_ERROR_MASK syscall.Errno = 0x00002328
+ DNS_ERROR_RCODE_FORMAT_ERROR syscall.Errno = 9001
+ DNS_ERROR_RCODE_SERVER_FAILURE syscall.Errno = 9002
+ DNS_ERROR_RCODE_NAME_ERROR syscall.Errno = 9003
+ DNS_ERROR_RCODE_NOT_IMPLEMENTED syscall.Errno = 9004
+ DNS_ERROR_RCODE_REFUSED syscall.Errno = 9005
+ DNS_ERROR_RCODE_YXDOMAIN syscall.Errno = 9006
+ DNS_ERROR_RCODE_YXRRSET syscall.Errno = 9007
+ DNS_ERROR_RCODE_NXRRSET syscall.Errno = 9008
+ DNS_ERROR_RCODE_NOTAUTH syscall.Errno = 9009
+ DNS_ERROR_RCODE_NOTZONE syscall.Errno = 9010
+ DNS_ERROR_RCODE_BADSIG syscall.Errno = 9016
+ DNS_ERROR_RCODE_BADKEY syscall.Errno = 9017
+ DNS_ERROR_RCODE_BADTIME syscall.Errno = 9018
+ DNS_ERROR_RCODE_LAST = DNS_ERROR_RCODE_BADTIME
+ DNS_ERROR_DNSSEC_BASE syscall.Errno = 9100
+ DNS_ERROR_KEYMASTER_REQUIRED syscall.Errno = 9101
+ DNS_ERROR_NOT_ALLOWED_ON_SIGNED_ZONE syscall.Errno = 9102
+ DNS_ERROR_NSEC3_INCOMPATIBLE_WITH_RSA_SHA1 syscall.Errno = 9103
+ DNS_ERROR_NOT_ENOUGH_SIGNING_KEY_DESCRIPTORS syscall.Errno = 9104
+ DNS_ERROR_UNSUPPORTED_ALGORITHM syscall.Errno = 9105
+ DNS_ERROR_INVALID_KEY_SIZE syscall.Errno = 9106
+ DNS_ERROR_SIGNING_KEY_NOT_ACCESSIBLE syscall.Errno = 9107
+ DNS_ERROR_KSP_DOES_NOT_SUPPORT_PROTECTION syscall.Errno = 9108
+ DNS_ERROR_UNEXPECTED_DATA_PROTECTION_ERROR syscall.Errno = 9109
+ DNS_ERROR_UNEXPECTED_CNG_ERROR syscall.Errno = 9110
+ DNS_ERROR_UNKNOWN_SIGNING_PARAMETER_VERSION syscall.Errno = 9111
+ DNS_ERROR_KSP_NOT_ACCESSIBLE syscall.Errno = 9112
+ DNS_ERROR_TOO_MANY_SKDS syscall.Errno = 9113
+ DNS_ERROR_INVALID_ROLLOVER_PERIOD syscall.Errno = 9114
+ DNS_ERROR_INVALID_INITIAL_ROLLOVER_OFFSET syscall.Errno = 9115
+ DNS_ERROR_ROLLOVER_IN_PROGRESS syscall.Errno = 9116
+ DNS_ERROR_STANDBY_KEY_NOT_PRESENT syscall.Errno = 9117
+ DNS_ERROR_NOT_ALLOWED_ON_ZSK syscall.Errno = 9118
+ DNS_ERROR_NOT_ALLOWED_ON_ACTIVE_SKD syscall.Errno = 9119
+ DNS_ERROR_ROLLOVER_ALREADY_QUEUED syscall.Errno = 9120
+ DNS_ERROR_NOT_ALLOWED_ON_UNSIGNED_ZONE syscall.Errno = 9121
+ DNS_ERROR_BAD_KEYMASTER syscall.Errno = 9122
+ DNS_ERROR_INVALID_SIGNATURE_VALIDITY_PERIOD syscall.Errno = 9123
+ DNS_ERROR_INVALID_NSEC3_ITERATION_COUNT syscall.Errno = 9124
+ DNS_ERROR_DNSSEC_IS_DISABLED syscall.Errno = 9125
+ DNS_ERROR_INVALID_XML syscall.Errno = 9126
+ DNS_ERROR_NO_VALID_TRUST_ANCHORS syscall.Errno = 9127
+ DNS_ERROR_ROLLOVER_NOT_POKEABLE syscall.Errno = 9128
+ DNS_ERROR_NSEC3_NAME_COLLISION syscall.Errno = 9129
+ DNS_ERROR_NSEC_INCOMPATIBLE_WITH_NSEC3_RSA_SHA1 syscall.Errno = 9130
+ DNS_ERROR_PACKET_FMT_BASE syscall.Errno = 9500
+ DNS_INFO_NO_RECORDS syscall.Errno = 9501
+ DNS_ERROR_BAD_PACKET syscall.Errno = 9502
+ DNS_ERROR_NO_PACKET syscall.Errno = 9503
+ DNS_ERROR_RCODE syscall.Errno = 9504
+ DNS_ERROR_UNSECURE_PACKET syscall.Errno = 9505
+ DNS_STATUS_PACKET_UNSECURE = DNS_ERROR_UNSECURE_PACKET
+ DNS_REQUEST_PENDING syscall.Errno = 9506
+ DNS_ERROR_NO_MEMORY = ERROR_OUTOFMEMORY
+ DNS_ERROR_INVALID_NAME = ERROR_INVALID_NAME
+ DNS_ERROR_INVALID_DATA = ERROR_INVALID_DATA
+ DNS_ERROR_GENERAL_API_BASE syscall.Errno = 9550
+ DNS_ERROR_INVALID_TYPE syscall.Errno = 9551
+ DNS_ERROR_INVALID_IP_ADDRESS syscall.Errno = 9552
+ DNS_ERROR_INVALID_PROPERTY syscall.Errno = 9553
+ DNS_ERROR_TRY_AGAIN_LATER syscall.Errno = 9554
+ DNS_ERROR_NOT_UNIQUE syscall.Errno = 9555
+ DNS_ERROR_NON_RFC_NAME syscall.Errno = 9556
+ DNS_STATUS_FQDN syscall.Errno = 9557
+ DNS_STATUS_DOTTED_NAME syscall.Errno = 9558
+ DNS_STATUS_SINGLE_PART_NAME syscall.Errno = 9559
+ DNS_ERROR_INVALID_NAME_CHAR syscall.Errno = 9560
+ DNS_ERROR_NUMERIC_NAME syscall.Errno = 9561
+ DNS_ERROR_NOT_ALLOWED_ON_ROOT_SERVER syscall.Errno = 9562
+ DNS_ERROR_NOT_ALLOWED_UNDER_DELEGATION syscall.Errno = 9563
+ DNS_ERROR_CANNOT_FIND_ROOT_HINTS syscall.Errno = 9564
+ DNS_ERROR_INCONSISTENT_ROOT_HINTS syscall.Errno = 9565
+ DNS_ERROR_DWORD_VALUE_TOO_SMALL syscall.Errno = 9566
+ DNS_ERROR_DWORD_VALUE_TOO_LARGE syscall.Errno = 9567
+ DNS_ERROR_BACKGROUND_LOADING syscall.Errno = 9568
+ DNS_ERROR_NOT_ALLOWED_ON_RODC syscall.Errno = 9569
+ DNS_ERROR_NOT_ALLOWED_UNDER_DNAME syscall.Errno = 9570
+ DNS_ERROR_DELEGATION_REQUIRED syscall.Errno = 9571
+ DNS_ERROR_INVALID_POLICY_TABLE syscall.Errno = 9572
+ DNS_ERROR_ADDRESS_REQUIRED syscall.Errno = 9573
+ DNS_ERROR_ZONE_BASE syscall.Errno = 9600
+ DNS_ERROR_ZONE_DOES_NOT_EXIST syscall.Errno = 9601
+ DNS_ERROR_NO_ZONE_INFO syscall.Errno = 9602
+ DNS_ERROR_INVALID_ZONE_OPERATION syscall.Errno = 9603
+ DNS_ERROR_ZONE_CONFIGURATION_ERROR syscall.Errno = 9604
+ DNS_ERROR_ZONE_HAS_NO_SOA_RECORD syscall.Errno = 9605
+ DNS_ERROR_ZONE_HAS_NO_NS_RECORDS syscall.Errno = 9606
+ DNS_ERROR_ZONE_LOCKED syscall.Errno = 9607
+ DNS_ERROR_ZONE_CREATION_FAILED syscall.Errno = 9608
+ DNS_ERROR_ZONE_ALREADY_EXISTS syscall.Errno = 9609
+ DNS_ERROR_AUTOZONE_ALREADY_EXISTS syscall.Errno = 9610
+ DNS_ERROR_INVALID_ZONE_TYPE syscall.Errno = 9611
+ DNS_ERROR_SECONDARY_REQUIRES_MASTER_IP syscall.Errno = 9612
+ DNS_ERROR_ZONE_NOT_SECONDARY syscall.Errno = 9613
+ DNS_ERROR_NEED_SECONDARY_ADDRESSES syscall.Errno = 9614
+ DNS_ERROR_WINS_INIT_FAILED syscall.Errno = 9615
+ DNS_ERROR_NEED_WINS_SERVERS syscall.Errno = 9616
+ DNS_ERROR_NBSTAT_INIT_FAILED syscall.Errno = 9617
+ DNS_ERROR_SOA_DELETE_INVALID syscall.Errno = 9618
+ DNS_ERROR_FORWARDER_ALREADY_EXISTS syscall.Errno = 9619
+ DNS_ERROR_ZONE_REQUIRES_MASTER_IP syscall.Errno = 9620
+ DNS_ERROR_ZONE_IS_SHUTDOWN syscall.Errno = 9621
+ DNS_ERROR_ZONE_LOCKED_FOR_SIGNING syscall.Errno = 9622
+ DNS_ERROR_DATAFILE_BASE syscall.Errno = 9650
+ DNS_ERROR_PRIMARY_REQUIRES_DATAFILE syscall.Errno = 9651
+ DNS_ERROR_INVALID_DATAFILE_NAME syscall.Errno = 9652
+ DNS_ERROR_DATAFILE_OPEN_FAILURE syscall.Errno = 9653
+ DNS_ERROR_FILE_WRITEBACK_FAILED syscall.Errno = 9654
+ DNS_ERROR_DATAFILE_PARSING syscall.Errno = 9655
+ DNS_ERROR_DATABASE_BASE syscall.Errno = 9700
+ DNS_ERROR_RECORD_DOES_NOT_EXIST syscall.Errno = 9701
+ DNS_ERROR_RECORD_FORMAT syscall.Errno = 9702
+ DNS_ERROR_NODE_CREATION_FAILED syscall.Errno = 9703
+ DNS_ERROR_UNKNOWN_RECORD_TYPE syscall.Errno = 9704
+ DNS_ERROR_RECORD_TIMED_OUT syscall.Errno = 9705
+ DNS_ERROR_NAME_NOT_IN_ZONE syscall.Errno = 9706
+ DNS_ERROR_CNAME_LOOP syscall.Errno = 9707
+ DNS_ERROR_NODE_IS_CNAME syscall.Errno = 9708
+ DNS_ERROR_CNAME_COLLISION syscall.Errno = 9709
+ DNS_ERROR_RECORD_ONLY_AT_ZONE_ROOT syscall.Errno = 9710
+ DNS_ERROR_RECORD_ALREADY_EXISTS syscall.Errno = 9711
+ DNS_ERROR_SECONDARY_DATA syscall.Errno = 9712
+ DNS_ERROR_NO_CREATE_CACHE_DATA syscall.Errno = 9713
+ DNS_ERROR_NAME_DOES_NOT_EXIST syscall.Errno = 9714
+ DNS_WARNING_PTR_CREATE_FAILED syscall.Errno = 9715
+ DNS_WARNING_DOMAIN_UNDELETED syscall.Errno = 9716
+ DNS_ERROR_DS_UNAVAILABLE syscall.Errno = 9717
+ DNS_ERROR_DS_ZONE_ALREADY_EXISTS syscall.Errno = 9718
+ DNS_ERROR_NO_BOOTFILE_IF_DS_ZONE syscall.Errno = 9719
+ DNS_ERROR_NODE_IS_DNAME syscall.Errno = 9720
+ DNS_ERROR_DNAME_COLLISION syscall.Errno = 9721
+ DNS_ERROR_ALIAS_LOOP syscall.Errno = 9722
+ DNS_ERROR_OPERATION_BASE syscall.Errno = 9750
+ DNS_INFO_AXFR_COMPLETE syscall.Errno = 9751
+ DNS_ERROR_AXFR syscall.Errno = 9752
+ DNS_INFO_ADDED_LOCAL_WINS syscall.Errno = 9753
+ DNS_ERROR_SECURE_BASE syscall.Errno = 9800
+ DNS_STATUS_CONTINUE_NEEDED syscall.Errno = 9801
+ DNS_ERROR_SETUP_BASE syscall.Errno = 9850
+ DNS_ERROR_NO_TCPIP syscall.Errno = 9851
+ DNS_ERROR_NO_DNS_SERVERS syscall.Errno = 9852
+ DNS_ERROR_DP_BASE syscall.Errno = 9900
+ DNS_ERROR_DP_DOES_NOT_EXIST syscall.Errno = 9901
+ DNS_ERROR_DP_ALREADY_EXISTS syscall.Errno = 9902
+ DNS_ERROR_DP_NOT_ENLISTED syscall.Errno = 9903
+ DNS_ERROR_DP_ALREADY_ENLISTED syscall.Errno = 9904
+ DNS_ERROR_DP_NOT_AVAILABLE syscall.Errno = 9905
+ DNS_ERROR_DP_FSMO_ERROR syscall.Errno = 9906
+ DNS_ERROR_RRL_NOT_ENABLED syscall.Errno = 9911
+ DNS_ERROR_RRL_INVALID_WINDOW_SIZE syscall.Errno = 9912
+ DNS_ERROR_RRL_INVALID_IPV4_PREFIX syscall.Errno = 9913
+ DNS_ERROR_RRL_INVALID_IPV6_PREFIX syscall.Errno = 9914
+ DNS_ERROR_RRL_INVALID_TC_RATE syscall.Errno = 9915
+ DNS_ERROR_RRL_INVALID_LEAK_RATE syscall.Errno = 9916
+ DNS_ERROR_RRL_LEAK_RATE_LESSTHAN_TC_RATE syscall.Errno = 9917
+ DNS_ERROR_VIRTUALIZATION_INSTANCE_ALREADY_EXISTS syscall.Errno = 9921
+ DNS_ERROR_VIRTUALIZATION_INSTANCE_DOES_NOT_EXIST syscall.Errno = 9922
+ DNS_ERROR_VIRTUALIZATION_TREE_LOCKED syscall.Errno = 9923
+ DNS_ERROR_INVAILD_VIRTUALIZATION_INSTANCE_NAME syscall.Errno = 9924
+ DNS_ERROR_DEFAULT_VIRTUALIZATION_INSTANCE syscall.Errno = 9925
+ DNS_ERROR_ZONESCOPE_ALREADY_EXISTS syscall.Errno = 9951
+ DNS_ERROR_ZONESCOPE_DOES_NOT_EXIST syscall.Errno = 9952
+ DNS_ERROR_DEFAULT_ZONESCOPE syscall.Errno = 9953
+ DNS_ERROR_INVALID_ZONESCOPE_NAME syscall.Errno = 9954
+ DNS_ERROR_NOT_ALLOWED_WITH_ZONESCOPES syscall.Errno = 9955
+ DNS_ERROR_LOAD_ZONESCOPE_FAILED syscall.Errno = 9956
+ DNS_ERROR_ZONESCOPE_FILE_WRITEBACK_FAILED syscall.Errno = 9957
+ DNS_ERROR_INVALID_SCOPE_NAME syscall.Errno = 9958
+ DNS_ERROR_SCOPE_DOES_NOT_EXIST syscall.Errno = 9959
+ DNS_ERROR_DEFAULT_SCOPE syscall.Errno = 9960
+ DNS_ERROR_INVALID_SCOPE_OPERATION syscall.Errno = 9961
+ DNS_ERROR_SCOPE_LOCKED syscall.Errno = 9962
+ DNS_ERROR_SCOPE_ALREADY_EXISTS syscall.Errno = 9963
+ DNS_ERROR_POLICY_ALREADY_EXISTS syscall.Errno = 9971
+ DNS_ERROR_POLICY_DOES_NOT_EXIST syscall.Errno = 9972
+ DNS_ERROR_POLICY_INVALID_CRITERIA syscall.Errno = 9973
+ DNS_ERROR_POLICY_INVALID_SETTINGS syscall.Errno = 9974
+ DNS_ERROR_CLIENT_SUBNET_IS_ACCESSED syscall.Errno = 9975
+ DNS_ERROR_CLIENT_SUBNET_DOES_NOT_EXIST syscall.Errno = 9976
+ DNS_ERROR_CLIENT_SUBNET_ALREADY_EXISTS syscall.Errno = 9977
+ DNS_ERROR_SUBNET_DOES_NOT_EXIST syscall.Errno = 9978
+ DNS_ERROR_SUBNET_ALREADY_EXISTS syscall.Errno = 9979
+ DNS_ERROR_POLICY_LOCKED syscall.Errno = 9980
+ DNS_ERROR_POLICY_INVALID_WEIGHT syscall.Errno = 9981
+ DNS_ERROR_POLICY_INVALID_NAME syscall.Errno = 9982
+ DNS_ERROR_POLICY_MISSING_CRITERIA syscall.Errno = 9983
+ DNS_ERROR_INVALID_CLIENT_SUBNET_NAME syscall.Errno = 9984
+ DNS_ERROR_POLICY_PROCESSING_ORDER_INVALID syscall.Errno = 9985
+ DNS_ERROR_POLICY_SCOPE_MISSING syscall.Errno = 9986
+ DNS_ERROR_POLICY_SCOPE_NOT_ALLOWED syscall.Errno = 9987
+ DNS_ERROR_SERVERSCOPE_IS_REFERENCED syscall.Errno = 9988
+ DNS_ERROR_ZONESCOPE_IS_REFERENCED syscall.Errno = 9989
+ DNS_ERROR_POLICY_INVALID_CRITERIA_CLIENT_SUBNET syscall.Errno = 9990
+ DNS_ERROR_POLICY_INVALID_CRITERIA_TRANSPORT_PROTOCOL syscall.Errno = 9991
+ DNS_ERROR_POLICY_INVALID_CRITERIA_NETWORK_PROTOCOL syscall.Errno = 9992
+ DNS_ERROR_POLICY_INVALID_CRITERIA_INTERFACE syscall.Errno = 9993
+ DNS_ERROR_POLICY_INVALID_CRITERIA_FQDN syscall.Errno = 9994
+ DNS_ERROR_POLICY_INVALID_CRITERIA_QUERY_TYPE syscall.Errno = 9995
+ DNS_ERROR_POLICY_INVALID_CRITERIA_TIME_OF_DAY syscall.Errno = 9996
+ WSABASEERR syscall.Errno = 10000
+ WSAEINTR syscall.Errno = 10004
+ WSAEBADF syscall.Errno = 10009
+ WSAEACCES syscall.Errno = 10013
+ WSAEFAULT syscall.Errno = 10014
+ WSAEINVAL syscall.Errno = 10022
+ WSAEMFILE syscall.Errno = 10024
+ WSAEWOULDBLOCK syscall.Errno = 10035
+ WSAEINPROGRESS syscall.Errno = 10036
+ WSAEALREADY syscall.Errno = 10037
+ WSAENOTSOCK syscall.Errno = 10038
+ WSAEDESTADDRREQ syscall.Errno = 10039
+ WSAEMSGSIZE syscall.Errno = 10040
+ WSAEPROTOTYPE syscall.Errno = 10041
+ WSAENOPROTOOPT syscall.Errno = 10042
+ WSAEPROTONOSUPPORT syscall.Errno = 10043
+ WSAESOCKTNOSUPPORT syscall.Errno = 10044
+ WSAEOPNOTSUPP syscall.Errno = 10045
+ WSAEPFNOSUPPORT syscall.Errno = 10046
+ WSAEAFNOSUPPORT syscall.Errno = 10047
+ WSAEADDRINUSE syscall.Errno = 10048
+ WSAEADDRNOTAVAIL syscall.Errno = 10049
+ WSAENETDOWN syscall.Errno = 10050
+ WSAENETUNREACH syscall.Errno = 10051
+ WSAENETRESET syscall.Errno = 10052
+ WSAECONNABORTED syscall.Errno = 10053
+ WSAECONNRESET syscall.Errno = 10054
+ WSAENOBUFS syscall.Errno = 10055
+ WSAEISCONN syscall.Errno = 10056
+ WSAENOTCONN syscall.Errno = 10057
+ WSAESHUTDOWN syscall.Errno = 10058
+ WSAETOOMANYREFS syscall.Errno = 10059
+ WSAETIMEDOUT syscall.Errno = 10060
+ WSAECONNREFUSED syscall.Errno = 10061
+ WSAELOOP syscall.Errno = 10062
+ WSAENAMETOOLONG syscall.Errno = 10063
+ WSAEHOSTDOWN syscall.Errno = 10064
+ WSAEHOSTUNREACH syscall.Errno = 10065
+ WSAENOTEMPTY syscall.Errno = 10066
+ WSAEPROCLIM syscall.Errno = 10067
+ WSAEUSERS syscall.Errno = 10068
+ WSAEDQUOT syscall.Errno = 10069
+ WSAESTALE syscall.Errno = 10070
+ WSAEREMOTE syscall.Errno = 10071
+ WSASYSNOTREADY syscall.Errno = 10091
+ WSAVERNOTSUPPORTED syscall.Errno = 10092
+ WSANOTINITIALISED syscall.Errno = 10093
+ WSAEDISCON syscall.Errno = 10101
+ WSAENOMORE syscall.Errno = 10102
+ WSAECANCELLED syscall.Errno = 10103
+ WSAEINVALIDPROCTABLE syscall.Errno = 10104
+ WSAEINVALIDPROVIDER syscall.Errno = 10105
+ WSAEPROVIDERFAILEDINIT syscall.Errno = 10106
+ WSASYSCALLFAILURE syscall.Errno = 10107
+ WSASERVICE_NOT_FOUND syscall.Errno = 10108
+ WSATYPE_NOT_FOUND syscall.Errno = 10109
+ WSA_E_NO_MORE syscall.Errno = 10110
+ WSA_E_CANCELLED syscall.Errno = 10111
+ WSAEREFUSED syscall.Errno = 10112
+ WSAHOST_NOT_FOUND syscall.Errno = 11001
+ WSATRY_AGAIN syscall.Errno = 11002
+ WSANO_RECOVERY syscall.Errno = 11003
+ WSANO_DATA syscall.Errno = 11004
+ WSA_QOS_RECEIVERS syscall.Errno = 11005
+ WSA_QOS_SENDERS syscall.Errno = 11006
+ WSA_QOS_NO_SENDERS syscall.Errno = 11007
+ WSA_QOS_NO_RECEIVERS syscall.Errno = 11008
+ WSA_QOS_REQUEST_CONFIRMED syscall.Errno = 11009
+ WSA_QOS_ADMISSION_FAILURE syscall.Errno = 11010
+ WSA_QOS_POLICY_FAILURE syscall.Errno = 11011
+ WSA_QOS_BAD_STYLE syscall.Errno = 11012
+ WSA_QOS_BAD_OBJECT syscall.Errno = 11013
+ WSA_QOS_TRAFFIC_CTRL_ERROR syscall.Errno = 11014
+ WSA_QOS_GENERIC_ERROR syscall.Errno = 11015
+ WSA_QOS_ESERVICETYPE syscall.Errno = 11016
+ WSA_QOS_EFLOWSPEC syscall.Errno = 11017
+ WSA_QOS_EPROVSPECBUF syscall.Errno = 11018
+ WSA_QOS_EFILTERSTYLE syscall.Errno = 11019
+ WSA_QOS_EFILTERTYPE syscall.Errno = 11020
+ WSA_QOS_EFILTERCOUNT syscall.Errno = 11021
+ WSA_QOS_EOBJLENGTH syscall.Errno = 11022
+ WSA_QOS_EFLOWCOUNT syscall.Errno = 11023
+ WSA_QOS_EUNKOWNPSOBJ syscall.Errno = 11024
+ WSA_QOS_EPOLICYOBJ syscall.Errno = 11025
+ WSA_QOS_EFLOWDESC syscall.Errno = 11026
+ WSA_QOS_EPSFLOWSPEC syscall.Errno = 11027
+ WSA_QOS_EPSFILTERSPEC syscall.Errno = 11028
+ WSA_QOS_ESDMODEOBJ syscall.Errno = 11029
+ WSA_QOS_ESHAPERATEOBJ syscall.Errno = 11030
+ WSA_QOS_RESERVED_PETYPE syscall.Errno = 11031
+ WSA_SECURE_HOST_NOT_FOUND syscall.Errno = 11032
+ WSA_IPSEC_NAME_POLICY_ERROR syscall.Errno = 11033
+ ERROR_IPSEC_QM_POLICY_EXISTS syscall.Errno = 13000
+ ERROR_IPSEC_QM_POLICY_NOT_FOUND syscall.Errno = 13001
+ ERROR_IPSEC_QM_POLICY_IN_USE syscall.Errno = 13002
+ ERROR_IPSEC_MM_POLICY_EXISTS syscall.Errno = 13003
+ ERROR_IPSEC_MM_POLICY_NOT_FOUND syscall.Errno = 13004
+ ERROR_IPSEC_MM_POLICY_IN_USE syscall.Errno = 13005
+ ERROR_IPSEC_MM_FILTER_EXISTS syscall.Errno = 13006
+ ERROR_IPSEC_MM_FILTER_NOT_FOUND syscall.Errno = 13007
+ ERROR_IPSEC_TRANSPORT_FILTER_EXISTS syscall.Errno = 13008
+ ERROR_IPSEC_TRANSPORT_FILTER_NOT_FOUND syscall.Errno = 13009
+ ERROR_IPSEC_MM_AUTH_EXISTS syscall.Errno = 13010
+ ERROR_IPSEC_MM_AUTH_NOT_FOUND syscall.Errno = 13011
+ ERROR_IPSEC_MM_AUTH_IN_USE syscall.Errno = 13012
+ ERROR_IPSEC_DEFAULT_MM_POLICY_NOT_FOUND syscall.Errno = 13013
+ ERROR_IPSEC_DEFAULT_MM_AUTH_NOT_FOUND syscall.Errno = 13014
+ ERROR_IPSEC_DEFAULT_QM_POLICY_NOT_FOUND syscall.Errno = 13015
+ ERROR_IPSEC_TUNNEL_FILTER_EXISTS syscall.Errno = 13016
+ ERROR_IPSEC_TUNNEL_FILTER_NOT_FOUND syscall.Errno = 13017
+ ERROR_IPSEC_MM_FILTER_PENDING_DELETION syscall.Errno = 13018
+ ERROR_IPSEC_TRANSPORT_FILTER_PENDING_DELETION syscall.Errno = 13019
+ ERROR_IPSEC_TUNNEL_FILTER_PENDING_DELETION syscall.Errno = 13020
+ ERROR_IPSEC_MM_POLICY_PENDING_DELETION syscall.Errno = 13021
+ ERROR_IPSEC_MM_AUTH_PENDING_DELETION syscall.Errno = 13022
+ ERROR_IPSEC_QM_POLICY_PENDING_DELETION syscall.Errno = 13023
+ WARNING_IPSEC_MM_POLICY_PRUNED syscall.Errno = 13024
+ WARNING_IPSEC_QM_POLICY_PRUNED syscall.Errno = 13025
+ ERROR_IPSEC_IKE_NEG_STATUS_BEGIN syscall.Errno = 13800
+ ERROR_IPSEC_IKE_AUTH_FAIL syscall.Errno = 13801
+ ERROR_IPSEC_IKE_ATTRIB_FAIL syscall.Errno = 13802
+ ERROR_IPSEC_IKE_NEGOTIATION_PENDING syscall.Errno = 13803
+ ERROR_IPSEC_IKE_GENERAL_PROCESSING_ERROR syscall.Errno = 13804
+ ERROR_IPSEC_IKE_TIMED_OUT syscall.Errno = 13805
+ ERROR_IPSEC_IKE_NO_CERT syscall.Errno = 13806
+ ERROR_IPSEC_IKE_SA_DELETED syscall.Errno = 13807
+ ERROR_IPSEC_IKE_SA_REAPED syscall.Errno = 13808
+ ERROR_IPSEC_IKE_MM_ACQUIRE_DROP syscall.Errno = 13809
+ ERROR_IPSEC_IKE_QM_ACQUIRE_DROP syscall.Errno = 13810
+ ERROR_IPSEC_IKE_QUEUE_DROP_MM syscall.Errno = 13811
+ ERROR_IPSEC_IKE_QUEUE_DROP_NO_MM syscall.Errno = 13812
+ ERROR_IPSEC_IKE_DROP_NO_RESPONSE syscall.Errno = 13813
+ ERROR_IPSEC_IKE_MM_DELAY_DROP syscall.Errno = 13814
+ ERROR_IPSEC_IKE_QM_DELAY_DROP syscall.Errno = 13815
+ ERROR_IPSEC_IKE_ERROR syscall.Errno = 13816
+ ERROR_IPSEC_IKE_CRL_FAILED syscall.Errno = 13817
+ ERROR_IPSEC_IKE_INVALID_KEY_USAGE syscall.Errno = 13818
+ ERROR_IPSEC_IKE_INVALID_CERT_TYPE syscall.Errno = 13819
+ ERROR_IPSEC_IKE_NO_PRIVATE_KEY syscall.Errno = 13820
+ ERROR_IPSEC_IKE_SIMULTANEOUS_REKEY syscall.Errno = 13821
+ ERROR_IPSEC_IKE_DH_FAIL syscall.Errno = 13822
+ ERROR_IPSEC_IKE_CRITICAL_PAYLOAD_NOT_RECOGNIZED syscall.Errno = 13823
+ ERROR_IPSEC_IKE_INVALID_HEADER syscall.Errno = 13824
+ ERROR_IPSEC_IKE_NO_POLICY syscall.Errno = 13825
+ ERROR_IPSEC_IKE_INVALID_SIGNATURE syscall.Errno = 13826
+ ERROR_IPSEC_IKE_KERBEROS_ERROR syscall.Errno = 13827
+ ERROR_IPSEC_IKE_NO_PUBLIC_KEY syscall.Errno = 13828
+ ERROR_IPSEC_IKE_PROCESS_ERR syscall.Errno = 13829
+ ERROR_IPSEC_IKE_PROCESS_ERR_SA syscall.Errno = 13830
+ ERROR_IPSEC_IKE_PROCESS_ERR_PROP syscall.Errno = 13831
+ ERROR_IPSEC_IKE_PROCESS_ERR_TRANS syscall.Errno = 13832
+ ERROR_IPSEC_IKE_PROCESS_ERR_KE syscall.Errno = 13833
+ ERROR_IPSEC_IKE_PROCESS_ERR_ID syscall.Errno = 13834
+ ERROR_IPSEC_IKE_PROCESS_ERR_CERT syscall.Errno = 13835
+ ERROR_IPSEC_IKE_PROCESS_ERR_CERT_REQ syscall.Errno = 13836
+ ERROR_IPSEC_IKE_PROCESS_ERR_HASH syscall.Errno = 13837
+ ERROR_IPSEC_IKE_PROCESS_ERR_SIG syscall.Errno = 13838
+ ERROR_IPSEC_IKE_PROCESS_ERR_NONCE syscall.Errno = 13839
+ ERROR_IPSEC_IKE_PROCESS_ERR_NOTIFY syscall.Errno = 13840
+ ERROR_IPSEC_IKE_PROCESS_ERR_DELETE syscall.Errno = 13841
+ ERROR_IPSEC_IKE_PROCESS_ERR_VENDOR syscall.Errno = 13842
+ ERROR_IPSEC_IKE_INVALID_PAYLOAD syscall.Errno = 13843
+ ERROR_IPSEC_IKE_LOAD_SOFT_SA syscall.Errno = 13844
+ ERROR_IPSEC_IKE_SOFT_SA_TORN_DOWN syscall.Errno = 13845
+ ERROR_IPSEC_IKE_INVALID_COOKIE syscall.Errno = 13846
+ ERROR_IPSEC_IKE_NO_PEER_CERT syscall.Errno = 13847
+ ERROR_IPSEC_IKE_PEER_CRL_FAILED syscall.Errno = 13848
+ ERROR_IPSEC_IKE_POLICY_CHANGE syscall.Errno = 13849
+ ERROR_IPSEC_IKE_NO_MM_POLICY syscall.Errno = 13850
+ ERROR_IPSEC_IKE_NOTCBPRIV syscall.Errno = 13851
+ ERROR_IPSEC_IKE_SECLOADFAIL syscall.Errno = 13852
+ ERROR_IPSEC_IKE_FAILSSPINIT syscall.Errno = 13853
+ ERROR_IPSEC_IKE_FAILQUERYSSP syscall.Errno = 13854
+ ERROR_IPSEC_IKE_SRVACQFAIL syscall.Errno = 13855
+ ERROR_IPSEC_IKE_SRVQUERYCRED syscall.Errno = 13856
+ ERROR_IPSEC_IKE_GETSPIFAIL syscall.Errno = 13857
+ ERROR_IPSEC_IKE_INVALID_FILTER syscall.Errno = 13858
+ ERROR_IPSEC_IKE_OUT_OF_MEMORY syscall.Errno = 13859
+ ERROR_IPSEC_IKE_ADD_UPDATE_KEY_FAILED syscall.Errno = 13860
+ ERROR_IPSEC_IKE_INVALID_POLICY syscall.Errno = 13861
+ ERROR_IPSEC_IKE_UNKNOWN_DOI syscall.Errno = 13862
+ ERROR_IPSEC_IKE_INVALID_SITUATION syscall.Errno = 13863
+ ERROR_IPSEC_IKE_DH_FAILURE syscall.Errno = 13864
+ ERROR_IPSEC_IKE_INVALID_GROUP syscall.Errno = 13865
+ ERROR_IPSEC_IKE_ENCRYPT syscall.Errno = 13866
+ ERROR_IPSEC_IKE_DECRYPT syscall.Errno = 13867
+ ERROR_IPSEC_IKE_POLICY_MATCH syscall.Errno = 13868
+ ERROR_IPSEC_IKE_UNSUPPORTED_ID syscall.Errno = 13869
+ ERROR_IPSEC_IKE_INVALID_HASH syscall.Errno = 13870
+ ERROR_IPSEC_IKE_INVALID_HASH_ALG syscall.Errno = 13871
+ ERROR_IPSEC_IKE_INVALID_HASH_SIZE syscall.Errno = 13872
+ ERROR_IPSEC_IKE_INVALID_ENCRYPT_ALG syscall.Errno = 13873
+ ERROR_IPSEC_IKE_INVALID_AUTH_ALG syscall.Errno = 13874
+ ERROR_IPSEC_IKE_INVALID_SIG syscall.Errno = 13875
+ ERROR_IPSEC_IKE_LOAD_FAILED syscall.Errno = 13876
+ ERROR_IPSEC_IKE_RPC_DELETE syscall.Errno = 13877
+ ERROR_IPSEC_IKE_BENIGN_REINIT syscall.Errno = 13878
+ ERROR_IPSEC_IKE_INVALID_RESPONDER_LIFETIME_NOTIFY syscall.Errno = 13879
+ ERROR_IPSEC_IKE_INVALID_MAJOR_VERSION syscall.Errno = 13880
+ ERROR_IPSEC_IKE_INVALID_CERT_KEYLEN syscall.Errno = 13881
+ ERROR_IPSEC_IKE_MM_LIMIT syscall.Errno = 13882
+ ERROR_IPSEC_IKE_NEGOTIATION_DISABLED syscall.Errno = 13883
+ ERROR_IPSEC_IKE_QM_LIMIT syscall.Errno = 13884
+ ERROR_IPSEC_IKE_MM_EXPIRED syscall.Errno = 13885
+ ERROR_IPSEC_IKE_PEER_MM_ASSUMED_INVALID syscall.Errno = 13886
+ ERROR_IPSEC_IKE_CERT_CHAIN_POLICY_MISMATCH syscall.Errno = 13887
+ ERROR_IPSEC_IKE_UNEXPECTED_MESSAGE_ID syscall.Errno = 13888
+ ERROR_IPSEC_IKE_INVALID_AUTH_PAYLOAD syscall.Errno = 13889
+ ERROR_IPSEC_IKE_DOS_COOKIE_SENT syscall.Errno = 13890
+ ERROR_IPSEC_IKE_SHUTTING_DOWN syscall.Errno = 13891
+ ERROR_IPSEC_IKE_CGA_AUTH_FAILED syscall.Errno = 13892
+ ERROR_IPSEC_IKE_PROCESS_ERR_NATOA syscall.Errno = 13893
+ ERROR_IPSEC_IKE_INVALID_MM_FOR_QM syscall.Errno = 13894
+ ERROR_IPSEC_IKE_QM_EXPIRED syscall.Errno = 13895
+ ERROR_IPSEC_IKE_TOO_MANY_FILTERS syscall.Errno = 13896
+ ERROR_IPSEC_IKE_NEG_STATUS_END syscall.Errno = 13897
+ ERROR_IPSEC_IKE_KILL_DUMMY_NAP_TUNNEL syscall.Errno = 13898
+ ERROR_IPSEC_IKE_INNER_IP_ASSIGNMENT_FAILURE syscall.Errno = 13899
+ ERROR_IPSEC_IKE_REQUIRE_CP_PAYLOAD_MISSING syscall.Errno = 13900
+ ERROR_IPSEC_KEY_MODULE_IMPERSONATION_NEGOTIATION_PENDING syscall.Errno = 13901
+ ERROR_IPSEC_IKE_COEXISTENCE_SUPPRESS syscall.Errno = 13902
+ ERROR_IPSEC_IKE_RATELIMIT_DROP syscall.Errno = 13903
+ ERROR_IPSEC_IKE_PEER_DOESNT_SUPPORT_MOBIKE syscall.Errno = 13904
+ ERROR_IPSEC_IKE_AUTHORIZATION_FAILURE syscall.Errno = 13905
+ ERROR_IPSEC_IKE_STRONG_CRED_AUTHORIZATION_FAILURE syscall.Errno = 13906
+ ERROR_IPSEC_IKE_AUTHORIZATION_FAILURE_WITH_OPTIONAL_RETRY syscall.Errno = 13907
+ ERROR_IPSEC_IKE_STRONG_CRED_AUTHORIZATION_AND_CERTMAP_FAILURE syscall.Errno = 13908
+ ERROR_IPSEC_IKE_NEG_STATUS_EXTENDED_END syscall.Errno = 13909
+ ERROR_IPSEC_BAD_SPI syscall.Errno = 13910
+ ERROR_IPSEC_SA_LIFETIME_EXPIRED syscall.Errno = 13911
+ ERROR_IPSEC_WRONG_SA syscall.Errno = 13912
+ ERROR_IPSEC_REPLAY_CHECK_FAILED syscall.Errno = 13913
+ ERROR_IPSEC_INVALID_PACKET syscall.Errno = 13914
+ ERROR_IPSEC_INTEGRITY_CHECK_FAILED syscall.Errno = 13915
+ ERROR_IPSEC_CLEAR_TEXT_DROP syscall.Errno = 13916
+ ERROR_IPSEC_AUTH_FIREWALL_DROP syscall.Errno = 13917
+ ERROR_IPSEC_THROTTLE_DROP syscall.Errno = 13918
+ ERROR_IPSEC_DOSP_BLOCK syscall.Errno = 13925
+ ERROR_IPSEC_DOSP_RECEIVED_MULTICAST syscall.Errno = 13926
+ ERROR_IPSEC_DOSP_INVALID_PACKET syscall.Errno = 13927
+ ERROR_IPSEC_DOSP_STATE_LOOKUP_FAILED syscall.Errno = 13928
+ ERROR_IPSEC_DOSP_MAX_ENTRIES syscall.Errno = 13929
+ ERROR_IPSEC_DOSP_KEYMOD_NOT_ALLOWED syscall.Errno = 13930
+ ERROR_IPSEC_DOSP_NOT_INSTALLED syscall.Errno = 13931
+ ERROR_IPSEC_DOSP_MAX_PER_IP_RATELIMIT_QUEUES syscall.Errno = 13932
+ ERROR_SXS_SECTION_NOT_FOUND syscall.Errno = 14000
+ ERROR_SXS_CANT_GEN_ACTCTX syscall.Errno = 14001
+ ERROR_SXS_INVALID_ACTCTXDATA_FORMAT syscall.Errno = 14002
+ ERROR_SXS_ASSEMBLY_NOT_FOUND syscall.Errno = 14003
+ ERROR_SXS_MANIFEST_FORMAT_ERROR syscall.Errno = 14004
+ ERROR_SXS_MANIFEST_PARSE_ERROR syscall.Errno = 14005
+ ERROR_SXS_ACTIVATION_CONTEXT_DISABLED syscall.Errno = 14006
+ ERROR_SXS_KEY_NOT_FOUND syscall.Errno = 14007
+ ERROR_SXS_VERSION_CONFLICT syscall.Errno = 14008
+ ERROR_SXS_WRONG_SECTION_TYPE syscall.Errno = 14009
+ ERROR_SXS_THREAD_QUERIES_DISABLED syscall.Errno = 14010
+ ERROR_SXS_PROCESS_DEFAULT_ALREADY_SET syscall.Errno = 14011
+ ERROR_SXS_UNKNOWN_ENCODING_GROUP syscall.Errno = 14012
+ ERROR_SXS_UNKNOWN_ENCODING syscall.Errno = 14013
+ ERROR_SXS_INVALID_XML_NAMESPACE_URI syscall.Errno = 14014
+ ERROR_SXS_ROOT_MANIFEST_DEPENDENCY_NOT_INSTALLED syscall.Errno = 14015
+ ERROR_SXS_LEAF_MANIFEST_DEPENDENCY_NOT_INSTALLED syscall.Errno = 14016
+ ERROR_SXS_INVALID_ASSEMBLY_IDENTITY_ATTRIBUTE syscall.Errno = 14017
+ ERROR_SXS_MANIFEST_MISSING_REQUIRED_DEFAULT_NAMESPACE syscall.Errno = 14018
+ ERROR_SXS_MANIFEST_INVALID_REQUIRED_DEFAULT_NAMESPACE syscall.Errno = 14019
+ ERROR_SXS_PRIVATE_MANIFEST_CROSS_PATH_WITH_REPARSE_POINT syscall.Errno = 14020
+ ERROR_SXS_DUPLICATE_DLL_NAME syscall.Errno = 14021
+ ERROR_SXS_DUPLICATE_WINDOWCLASS_NAME syscall.Errno = 14022
+ ERROR_SXS_DUPLICATE_CLSID syscall.Errno = 14023
+ ERROR_SXS_DUPLICATE_IID syscall.Errno = 14024
+ ERROR_SXS_DUPLICATE_TLBID syscall.Errno = 14025
+ ERROR_SXS_DUPLICATE_PROGID syscall.Errno = 14026
+ ERROR_SXS_DUPLICATE_ASSEMBLY_NAME syscall.Errno = 14027
+ ERROR_SXS_FILE_HASH_MISMATCH syscall.Errno = 14028
+ ERROR_SXS_POLICY_PARSE_ERROR syscall.Errno = 14029
+ ERROR_SXS_XML_E_MISSINGQUOTE syscall.Errno = 14030
+ ERROR_SXS_XML_E_COMMENTSYNTAX syscall.Errno = 14031
+ ERROR_SXS_XML_E_BADSTARTNAMECHAR syscall.Errno = 14032
+ ERROR_SXS_XML_E_BADNAMECHAR syscall.Errno = 14033
+ ERROR_SXS_XML_E_BADCHARINSTRING syscall.Errno = 14034
+ ERROR_SXS_XML_E_XMLDECLSYNTAX syscall.Errno = 14035
+ ERROR_SXS_XML_E_BADCHARDATA syscall.Errno = 14036
+ ERROR_SXS_XML_E_MISSINGWHITESPACE syscall.Errno = 14037
+ ERROR_SXS_XML_E_EXPECTINGTAGEND syscall.Errno = 14038
+ ERROR_SXS_XML_E_MISSINGSEMICOLON syscall.Errno = 14039
+ ERROR_SXS_XML_E_UNBALANCEDPAREN syscall.Errno = 14040
+ ERROR_SXS_XML_E_INTERNALERROR syscall.Errno = 14041
+ ERROR_SXS_XML_E_UNEXPECTED_WHITESPACE syscall.Errno = 14042
+ ERROR_SXS_XML_E_INCOMPLETE_ENCODING syscall.Errno = 14043
+ ERROR_SXS_XML_E_MISSING_PAREN syscall.Errno = 14044
+ ERROR_SXS_XML_E_EXPECTINGCLOSEQUOTE syscall.Errno = 14045
+ ERROR_SXS_XML_E_MULTIPLE_COLONS syscall.Errno = 14046
+ ERROR_SXS_XML_E_INVALID_DECIMAL syscall.Errno = 14047
+ ERROR_SXS_XML_E_INVALID_HEXIDECIMAL syscall.Errno = 14048
+ ERROR_SXS_XML_E_INVALID_UNICODE syscall.Errno = 14049
+ ERROR_SXS_XML_E_WHITESPACEORQUESTIONMARK syscall.Errno = 14050
+ ERROR_SXS_XML_E_UNEXPECTEDENDTAG syscall.Errno = 14051
+ ERROR_SXS_XML_E_UNCLOSEDTAG syscall.Errno = 14052
+ ERROR_SXS_XML_E_DUPLICATEATTRIBUTE syscall.Errno = 14053
+ ERROR_SXS_XML_E_MULTIPLEROOTS syscall.Errno = 14054
+ ERROR_SXS_XML_E_INVALIDATROOTLEVEL syscall.Errno = 14055
+ ERROR_SXS_XML_E_BADXMLDECL syscall.Errno = 14056
+ ERROR_SXS_XML_E_MISSINGROOT syscall.Errno = 14057
+ ERROR_SXS_XML_E_UNEXPECTEDEOF syscall.Errno = 14058
+ ERROR_SXS_XML_E_BADPEREFINSUBSET syscall.Errno = 14059
+ ERROR_SXS_XML_E_UNCLOSEDSTARTTAG syscall.Errno = 14060
+ ERROR_SXS_XML_E_UNCLOSEDENDTAG syscall.Errno = 14061
+ ERROR_SXS_XML_E_UNCLOSEDSTRING syscall.Errno = 14062
+ ERROR_SXS_XML_E_UNCLOSEDCOMMENT syscall.Errno = 14063
+ ERROR_SXS_XML_E_UNCLOSEDDECL syscall.Errno = 14064
+ ERROR_SXS_XML_E_UNCLOSEDCDATA syscall.Errno = 14065
+ ERROR_SXS_XML_E_RESERVEDNAMESPACE syscall.Errno = 14066
+ ERROR_SXS_XML_E_INVALIDENCODING syscall.Errno = 14067
+ ERROR_SXS_XML_E_INVALIDSWITCH syscall.Errno = 14068
+ ERROR_SXS_XML_E_BADXMLCASE syscall.Errno = 14069
+ ERROR_SXS_XML_E_INVALID_STANDALONE syscall.Errno = 14070
+ ERROR_SXS_XML_E_UNEXPECTED_STANDALONE syscall.Errno = 14071
+ ERROR_SXS_XML_E_INVALID_VERSION syscall.Errno = 14072
+ ERROR_SXS_XML_E_MISSINGEQUALS syscall.Errno = 14073
+ ERROR_SXS_PROTECTION_RECOVERY_FAILED syscall.Errno = 14074
+ ERROR_SXS_PROTECTION_PUBLIC_KEY_TOO_SHORT syscall.Errno = 14075
+ ERROR_SXS_PROTECTION_CATALOG_NOT_VALID syscall.Errno = 14076
+ ERROR_SXS_UNTRANSLATABLE_HRESULT syscall.Errno = 14077
+ ERROR_SXS_PROTECTION_CATALOG_FILE_MISSING syscall.Errno = 14078
+ ERROR_SXS_MISSING_ASSEMBLY_IDENTITY_ATTRIBUTE syscall.Errno = 14079
+ ERROR_SXS_INVALID_ASSEMBLY_IDENTITY_ATTRIBUTE_NAME syscall.Errno = 14080
+ ERROR_SXS_ASSEMBLY_MISSING syscall.Errno = 14081
+ ERROR_SXS_CORRUPT_ACTIVATION_STACK syscall.Errno = 14082
+ ERROR_SXS_CORRUPTION syscall.Errno = 14083
+ ERROR_SXS_EARLY_DEACTIVATION syscall.Errno = 14084
+ ERROR_SXS_INVALID_DEACTIVATION syscall.Errno = 14085
+ ERROR_SXS_MULTIPLE_DEACTIVATION syscall.Errno = 14086
+ ERROR_SXS_PROCESS_TERMINATION_REQUESTED syscall.Errno = 14087
+ ERROR_SXS_RELEASE_ACTIVATION_CONTEXT syscall.Errno = 14088
+ ERROR_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY syscall.Errno = 14089
+ ERROR_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE syscall.Errno = 14090
+ ERROR_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME syscall.Errno = 14091
+ ERROR_SXS_IDENTITY_DUPLICATE_ATTRIBUTE syscall.Errno = 14092
+ ERROR_SXS_IDENTITY_PARSE_ERROR syscall.Errno = 14093
+ ERROR_MALFORMED_SUBSTITUTION_STRING syscall.Errno = 14094
+ ERROR_SXS_INCORRECT_PUBLIC_KEY_TOKEN syscall.Errno = 14095
+ ERROR_UNMAPPED_SUBSTITUTION_STRING syscall.Errno = 14096
+ ERROR_SXS_ASSEMBLY_NOT_LOCKED syscall.Errno = 14097
+ ERROR_SXS_COMPONENT_STORE_CORRUPT syscall.Errno = 14098
+ ERROR_ADVANCED_INSTALLER_FAILED syscall.Errno = 14099
+ ERROR_XML_ENCODING_MISMATCH syscall.Errno = 14100
+ ERROR_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT syscall.Errno = 14101
+ ERROR_SXS_IDENTITIES_DIFFERENT syscall.Errno = 14102
+ ERROR_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT syscall.Errno = 14103
+ ERROR_SXS_FILE_NOT_PART_OF_ASSEMBLY syscall.Errno = 14104
+ ERROR_SXS_MANIFEST_TOO_BIG syscall.Errno = 14105
+ ERROR_SXS_SETTING_NOT_REGISTERED syscall.Errno = 14106
+ ERROR_SXS_TRANSACTION_CLOSURE_INCOMPLETE syscall.Errno = 14107
+ ERROR_SMI_PRIMITIVE_INSTALLER_FAILED syscall.Errno = 14108
+ ERROR_GENERIC_COMMAND_FAILED syscall.Errno = 14109
+ ERROR_SXS_FILE_HASH_MISSING syscall.Errno = 14110
+ ERROR_EVT_INVALID_CHANNEL_PATH syscall.Errno = 15000
+ ERROR_EVT_INVALID_QUERY syscall.Errno = 15001
+ ERROR_EVT_PUBLISHER_METADATA_NOT_FOUND syscall.Errno = 15002
+ ERROR_EVT_EVENT_TEMPLATE_NOT_FOUND syscall.Errno = 15003
+ ERROR_EVT_INVALID_PUBLISHER_NAME syscall.Errno = 15004
+ ERROR_EVT_INVALID_EVENT_DATA syscall.Errno = 15005
+ ERROR_EVT_CHANNEL_NOT_FOUND syscall.Errno = 15007
+ ERROR_EVT_MALFORMED_XML_TEXT syscall.Errno = 15008
+ ERROR_EVT_SUBSCRIPTION_TO_DIRECT_CHANNEL syscall.Errno = 15009
+ ERROR_EVT_CONFIGURATION_ERROR syscall.Errno = 15010
+ ERROR_EVT_QUERY_RESULT_STALE syscall.Errno = 15011
+ ERROR_EVT_QUERY_RESULT_INVALID_POSITION syscall.Errno = 15012
+ ERROR_EVT_NON_VALIDATING_MSXML syscall.Errno = 15013
+ ERROR_EVT_FILTER_ALREADYSCOPED syscall.Errno = 15014
+ ERROR_EVT_FILTER_NOTELTSET syscall.Errno = 15015
+ ERROR_EVT_FILTER_INVARG syscall.Errno = 15016
+ ERROR_EVT_FILTER_INVTEST syscall.Errno = 15017
+ ERROR_EVT_FILTER_INVTYPE syscall.Errno = 15018
+ ERROR_EVT_FILTER_PARSEERR syscall.Errno = 15019
+ ERROR_EVT_FILTER_UNSUPPORTEDOP syscall.Errno = 15020
+ ERROR_EVT_FILTER_UNEXPECTEDTOKEN syscall.Errno = 15021
+ ERROR_EVT_INVALID_OPERATION_OVER_ENABLED_DIRECT_CHANNEL syscall.Errno = 15022
+ ERROR_EVT_INVALID_CHANNEL_PROPERTY_VALUE syscall.Errno = 15023
+ ERROR_EVT_INVALID_PUBLISHER_PROPERTY_VALUE syscall.Errno = 15024
+ ERROR_EVT_CHANNEL_CANNOT_ACTIVATE syscall.Errno = 15025
+ ERROR_EVT_FILTER_TOO_COMPLEX syscall.Errno = 15026
+ ERROR_EVT_MESSAGE_NOT_FOUND syscall.Errno = 15027
+ ERROR_EVT_MESSAGE_ID_NOT_FOUND syscall.Errno = 15028
+ ERROR_EVT_UNRESOLVED_VALUE_INSERT syscall.Errno = 15029
+ ERROR_EVT_UNRESOLVED_PARAMETER_INSERT syscall.Errno = 15030
+ ERROR_EVT_MAX_INSERTS_REACHED syscall.Errno = 15031
+ ERROR_EVT_EVENT_DEFINITION_NOT_FOUND syscall.Errno = 15032
+ ERROR_EVT_MESSAGE_LOCALE_NOT_FOUND syscall.Errno = 15033
+ ERROR_EVT_VERSION_TOO_OLD syscall.Errno = 15034
+ ERROR_EVT_VERSION_TOO_NEW syscall.Errno = 15035
+ ERROR_EVT_CANNOT_OPEN_CHANNEL_OF_QUERY syscall.Errno = 15036
+ ERROR_EVT_PUBLISHER_DISABLED syscall.Errno = 15037
+ ERROR_EVT_FILTER_OUT_OF_RANGE syscall.Errno = 15038
+ ERROR_EC_SUBSCRIPTION_CANNOT_ACTIVATE syscall.Errno = 15080
+ ERROR_EC_LOG_DISABLED syscall.Errno = 15081
+ ERROR_EC_CIRCULAR_FORWARDING syscall.Errno = 15082
+ ERROR_EC_CREDSTORE_FULL syscall.Errno = 15083
+ ERROR_EC_CRED_NOT_FOUND syscall.Errno = 15084
+ ERROR_EC_NO_ACTIVE_CHANNEL syscall.Errno = 15085
+ ERROR_MUI_FILE_NOT_FOUND syscall.Errno = 15100
+ ERROR_MUI_INVALID_FILE syscall.Errno = 15101
+ ERROR_MUI_INVALID_RC_CONFIG syscall.Errno = 15102
+ ERROR_MUI_INVALID_LOCALE_NAME syscall.Errno = 15103
+ ERROR_MUI_INVALID_ULTIMATEFALLBACK_NAME syscall.Errno = 15104
+ ERROR_MUI_FILE_NOT_LOADED syscall.Errno = 15105
+ ERROR_RESOURCE_ENUM_USER_STOP syscall.Errno = 15106
+ ERROR_MUI_INTLSETTINGS_UILANG_NOT_INSTALLED syscall.Errno = 15107
+ ERROR_MUI_INTLSETTINGS_INVALID_LOCALE_NAME syscall.Errno = 15108
+ ERROR_MRM_RUNTIME_NO_DEFAULT_OR_NEUTRAL_RESOURCE syscall.Errno = 15110
+ ERROR_MRM_INVALID_PRICONFIG syscall.Errno = 15111
+ ERROR_MRM_INVALID_FILE_TYPE syscall.Errno = 15112
+ ERROR_MRM_UNKNOWN_QUALIFIER syscall.Errno = 15113
+ ERROR_MRM_INVALID_QUALIFIER_VALUE syscall.Errno = 15114
+ ERROR_MRM_NO_CANDIDATE syscall.Errno = 15115
+ ERROR_MRM_NO_MATCH_OR_DEFAULT_CANDIDATE syscall.Errno = 15116
+ ERROR_MRM_RESOURCE_TYPE_MISMATCH syscall.Errno = 15117
+ ERROR_MRM_DUPLICATE_MAP_NAME syscall.Errno = 15118
+ ERROR_MRM_DUPLICATE_ENTRY syscall.Errno = 15119
+ ERROR_MRM_INVALID_RESOURCE_IDENTIFIER syscall.Errno = 15120
+ ERROR_MRM_FILEPATH_TOO_LONG syscall.Errno = 15121
+ ERROR_MRM_UNSUPPORTED_DIRECTORY_TYPE syscall.Errno = 15122
+ ERROR_MRM_INVALID_PRI_FILE syscall.Errno = 15126
+ ERROR_MRM_NAMED_RESOURCE_NOT_FOUND syscall.Errno = 15127
+ ERROR_MRM_MAP_NOT_FOUND syscall.Errno = 15135
+ ERROR_MRM_UNSUPPORTED_PROFILE_TYPE syscall.Errno = 15136
+ ERROR_MRM_INVALID_QUALIFIER_OPERATOR syscall.Errno = 15137
+ ERROR_MRM_INDETERMINATE_QUALIFIER_VALUE syscall.Errno = 15138
+ ERROR_MRM_AUTOMERGE_ENABLED syscall.Errno = 15139
+ ERROR_MRM_TOO_MANY_RESOURCES syscall.Errno = 15140
+ ERROR_MRM_UNSUPPORTED_FILE_TYPE_FOR_MERGE syscall.Errno = 15141
+ ERROR_MRM_UNSUPPORTED_FILE_TYPE_FOR_LOAD_UNLOAD_PRI_FILE syscall.Errno = 15142
+ ERROR_MRM_NO_CURRENT_VIEW_ON_THREAD syscall.Errno = 15143
+ ERROR_DIFFERENT_PROFILE_RESOURCE_MANAGER_EXIST syscall.Errno = 15144
+ ERROR_OPERATION_NOT_ALLOWED_FROM_SYSTEM_COMPONENT syscall.Errno = 15145
+ ERROR_MRM_DIRECT_REF_TO_NON_DEFAULT_RESOURCE syscall.Errno = 15146
+ ERROR_MRM_GENERATION_COUNT_MISMATCH syscall.Errno = 15147
+ ERROR_PRI_MERGE_VERSION_MISMATCH syscall.Errno = 15148
+ ERROR_PRI_MERGE_MISSING_SCHEMA syscall.Errno = 15149
+ ERROR_PRI_MERGE_LOAD_FILE_FAILED syscall.Errno = 15150
+ ERROR_PRI_MERGE_ADD_FILE_FAILED syscall.Errno = 15151
+ ERROR_PRI_MERGE_WRITE_FILE_FAILED syscall.Errno = 15152
+ ERROR_PRI_MERGE_MULTIPLE_PACKAGE_FAMILIES_NOT_ALLOWED syscall.Errno = 15153
+ ERROR_PRI_MERGE_MULTIPLE_MAIN_PACKAGES_NOT_ALLOWED syscall.Errno = 15154
+ ERROR_PRI_MERGE_BUNDLE_PACKAGES_NOT_ALLOWED syscall.Errno = 15155
+ ERROR_PRI_MERGE_MAIN_PACKAGE_REQUIRED syscall.Errno = 15156
+ ERROR_PRI_MERGE_RESOURCE_PACKAGE_REQUIRED syscall.Errno = 15157
+ ERROR_PRI_MERGE_INVALID_FILE_NAME syscall.Errno = 15158
+ ERROR_MRM_PACKAGE_NOT_FOUND syscall.Errno = 15159
+ ERROR_MCA_INVALID_CAPABILITIES_STRING syscall.Errno = 15200
+ ERROR_MCA_INVALID_VCP_VERSION syscall.Errno = 15201
+ ERROR_MCA_MONITOR_VIOLATES_MCCS_SPECIFICATION syscall.Errno = 15202
+ ERROR_MCA_MCCS_VERSION_MISMATCH syscall.Errno = 15203
+ ERROR_MCA_UNSUPPORTED_MCCS_VERSION syscall.Errno = 15204
+ ERROR_MCA_INTERNAL_ERROR syscall.Errno = 15205
+ ERROR_MCA_INVALID_TECHNOLOGY_TYPE_RETURNED syscall.Errno = 15206
+ ERROR_MCA_UNSUPPORTED_COLOR_TEMPERATURE syscall.Errno = 15207
+ ERROR_AMBIGUOUS_SYSTEM_DEVICE syscall.Errno = 15250
+ ERROR_SYSTEM_DEVICE_NOT_FOUND syscall.Errno = 15299
+ ERROR_HASH_NOT_SUPPORTED syscall.Errno = 15300
+ ERROR_HASH_NOT_PRESENT syscall.Errno = 15301
+ ERROR_SECONDARY_IC_PROVIDER_NOT_REGISTERED syscall.Errno = 15321
+ ERROR_GPIO_CLIENT_INFORMATION_INVALID syscall.Errno = 15322
+ ERROR_GPIO_VERSION_NOT_SUPPORTED syscall.Errno = 15323
+ ERROR_GPIO_INVALID_REGISTRATION_PACKET syscall.Errno = 15324
+ ERROR_GPIO_OPERATION_DENIED syscall.Errno = 15325
+ ERROR_GPIO_INCOMPATIBLE_CONNECT_MODE syscall.Errno = 15326
+ ERROR_GPIO_INTERRUPT_ALREADY_UNMASKED syscall.Errno = 15327
+ ERROR_CANNOT_SWITCH_RUNLEVEL syscall.Errno = 15400
+ ERROR_INVALID_RUNLEVEL_SETTING syscall.Errno = 15401
+ ERROR_RUNLEVEL_SWITCH_TIMEOUT syscall.Errno = 15402
+ ERROR_RUNLEVEL_SWITCH_AGENT_TIMEOUT syscall.Errno = 15403
+ ERROR_RUNLEVEL_SWITCH_IN_PROGRESS syscall.Errno = 15404
+ ERROR_SERVICES_FAILED_AUTOSTART syscall.Errno = 15405
+ ERROR_COM_TASK_STOP_PENDING syscall.Errno = 15501
+ ERROR_INSTALL_OPEN_PACKAGE_FAILED syscall.Errno = 15600
+ ERROR_INSTALL_PACKAGE_NOT_FOUND syscall.Errno = 15601
+ ERROR_INSTALL_INVALID_PACKAGE syscall.Errno = 15602
+ ERROR_INSTALL_RESOLVE_DEPENDENCY_FAILED syscall.Errno = 15603
+ ERROR_INSTALL_OUT_OF_DISK_SPACE syscall.Errno = 15604
+ ERROR_INSTALL_NETWORK_FAILURE syscall.Errno = 15605
+ ERROR_INSTALL_REGISTRATION_FAILURE syscall.Errno = 15606
+ ERROR_INSTALL_DEREGISTRATION_FAILURE syscall.Errno = 15607
+ ERROR_INSTALL_CANCEL syscall.Errno = 15608
+ ERROR_INSTALL_FAILED syscall.Errno = 15609
+ ERROR_REMOVE_FAILED syscall.Errno = 15610
+ ERROR_PACKAGE_ALREADY_EXISTS syscall.Errno = 15611
+ ERROR_NEEDS_REMEDIATION syscall.Errno = 15612
+ ERROR_INSTALL_PREREQUISITE_FAILED syscall.Errno = 15613
+ ERROR_PACKAGE_REPOSITORY_CORRUPTED syscall.Errno = 15614
+ ERROR_INSTALL_POLICY_FAILURE syscall.Errno = 15615
+ ERROR_PACKAGE_UPDATING syscall.Errno = 15616
+ ERROR_DEPLOYMENT_BLOCKED_BY_POLICY syscall.Errno = 15617
+ ERROR_PACKAGES_IN_USE syscall.Errno = 15618
+ ERROR_RECOVERY_FILE_CORRUPT syscall.Errno = 15619
+ ERROR_INVALID_STAGED_SIGNATURE syscall.Errno = 15620
+ ERROR_DELETING_EXISTING_APPLICATIONDATA_STORE_FAILED syscall.Errno = 15621
+ ERROR_INSTALL_PACKAGE_DOWNGRADE syscall.Errno = 15622
+ ERROR_SYSTEM_NEEDS_REMEDIATION syscall.Errno = 15623
+ ERROR_APPX_INTEGRITY_FAILURE_CLR_NGEN syscall.Errno = 15624
+ ERROR_RESILIENCY_FILE_CORRUPT syscall.Errno = 15625
+ ERROR_INSTALL_FIREWALL_SERVICE_NOT_RUNNING syscall.Errno = 15626
+ ERROR_PACKAGE_MOVE_FAILED syscall.Errno = 15627
+ ERROR_INSTALL_VOLUME_NOT_EMPTY syscall.Errno = 15628
+ ERROR_INSTALL_VOLUME_OFFLINE syscall.Errno = 15629
+ ERROR_INSTALL_VOLUME_CORRUPT syscall.Errno = 15630
+ ERROR_NEEDS_REGISTRATION syscall.Errno = 15631
+ ERROR_INSTALL_WRONG_PROCESSOR_ARCHITECTURE syscall.Errno = 15632
+ ERROR_DEV_SIDELOAD_LIMIT_EXCEEDED syscall.Errno = 15633
+ ERROR_INSTALL_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE syscall.Errno = 15634
+ ERROR_PACKAGE_NOT_SUPPORTED_ON_FILESYSTEM syscall.Errno = 15635
+ ERROR_PACKAGE_MOVE_BLOCKED_BY_STREAMING syscall.Errno = 15636
+ ERROR_INSTALL_OPTIONAL_PACKAGE_APPLICATIONID_NOT_UNIQUE syscall.Errno = 15637
+ ERROR_PACKAGE_STAGING_ONHOLD syscall.Errno = 15638
+ ERROR_INSTALL_INVALID_RELATED_SET_UPDATE syscall.Errno = 15639
+ ERROR_INSTALL_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE_FULLTRUST_CAPABILITY syscall.Errno = 15640
+ ERROR_DEPLOYMENT_BLOCKED_BY_USER_LOG_OFF syscall.Errno = 15641
+ ERROR_PROVISION_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE_PROVISIONED syscall.Errno = 15642
+ ERROR_PACKAGES_REPUTATION_CHECK_FAILED syscall.Errno = 15643
+ ERROR_PACKAGES_REPUTATION_CHECK_TIMEDOUT syscall.Errno = 15644
+ ERROR_DEPLOYMENT_OPTION_NOT_SUPPORTED syscall.Errno = 15645
+ ERROR_APPINSTALLER_ACTIVATION_BLOCKED syscall.Errno = 15646
+ ERROR_REGISTRATION_FROM_REMOTE_DRIVE_NOT_SUPPORTED syscall.Errno = 15647
+ APPMODEL_ERROR_NO_PACKAGE syscall.Errno = 15700
+ APPMODEL_ERROR_PACKAGE_RUNTIME_CORRUPT syscall.Errno = 15701
+ APPMODEL_ERROR_PACKAGE_IDENTITY_CORRUPT syscall.Errno = 15702
+ APPMODEL_ERROR_NO_APPLICATION syscall.Errno = 15703
+ APPMODEL_ERROR_DYNAMIC_PROPERTY_READ_FAILED syscall.Errno = 15704
+ APPMODEL_ERROR_DYNAMIC_PROPERTY_INVALID syscall.Errno = 15705
+ APPMODEL_ERROR_PACKAGE_NOT_AVAILABLE syscall.Errno = 15706
+ ERROR_STATE_LOAD_STORE_FAILED syscall.Errno = 15800
+ ERROR_STATE_GET_VERSION_FAILED syscall.Errno = 15801
+ ERROR_STATE_SET_VERSION_FAILED syscall.Errno = 15802
+ ERROR_STATE_STRUCTURED_RESET_FAILED syscall.Errno = 15803
+ ERROR_STATE_OPEN_CONTAINER_FAILED syscall.Errno = 15804
+ ERROR_STATE_CREATE_CONTAINER_FAILED syscall.Errno = 15805
+ ERROR_STATE_DELETE_CONTAINER_FAILED syscall.Errno = 15806
+ ERROR_STATE_READ_SETTING_FAILED syscall.Errno = 15807
+ ERROR_STATE_WRITE_SETTING_FAILED syscall.Errno = 15808
+ ERROR_STATE_DELETE_SETTING_FAILED syscall.Errno = 15809
+ ERROR_STATE_QUERY_SETTING_FAILED syscall.Errno = 15810
+ ERROR_STATE_READ_COMPOSITE_SETTING_FAILED syscall.Errno = 15811
+ ERROR_STATE_WRITE_COMPOSITE_SETTING_FAILED syscall.Errno = 15812
+ ERROR_STATE_ENUMERATE_CONTAINER_FAILED syscall.Errno = 15813
+ ERROR_STATE_ENUMERATE_SETTINGS_FAILED syscall.Errno = 15814
+ ERROR_STATE_COMPOSITE_SETTING_VALUE_SIZE_LIMIT_EXCEEDED syscall.Errno = 15815
+ ERROR_STATE_SETTING_VALUE_SIZE_LIMIT_EXCEEDED syscall.Errno = 15816
+ ERROR_STATE_SETTING_NAME_SIZE_LIMIT_EXCEEDED syscall.Errno = 15817
+ ERROR_STATE_CONTAINER_NAME_SIZE_LIMIT_EXCEEDED syscall.Errno = 15818
+ ERROR_API_UNAVAILABLE syscall.Errno = 15841
+ STORE_ERROR_UNLICENSED syscall.Errno = 15861
+ STORE_ERROR_UNLICENSED_USER syscall.Errno = 15862
+ STORE_ERROR_PENDING_COM_TRANSACTION syscall.Errno = 15863
+ STORE_ERROR_LICENSE_REVOKED syscall.Errno = 15864
+ SEVERITY_SUCCESS syscall.Errno = 0
+ SEVERITY_ERROR syscall.Errno = 1
+ FACILITY_NT_BIT = 0x10000000
+ E_NOT_SET = ERROR_NOT_FOUND
+ E_NOT_VALID_STATE = ERROR_INVALID_STATE
+ E_NOT_SUFFICIENT_BUFFER = ERROR_INSUFFICIENT_BUFFER
+ E_TIME_CRITICAL_THREAD = ERROR_TIME_CRITICAL_THREAD
+ NOERROR syscall.Errno = 0
+ E_UNEXPECTED Handle = 0x8000FFFF
+ E_NOTIMPL Handle = 0x80004001
+ E_OUTOFMEMORY Handle = 0x8007000E
+ E_INVALIDARG Handle = 0x80070057
+ E_NOINTERFACE Handle = 0x80004002
+ E_POINTER Handle = 0x80004003
+ E_HANDLE Handle = 0x80070006
+ E_ABORT Handle = 0x80004004
+ E_FAIL Handle = 0x80004005
+ E_ACCESSDENIED Handle = 0x80070005
+ E_PENDING Handle = 0x8000000A
+ E_BOUNDS Handle = 0x8000000B
+ E_CHANGED_STATE Handle = 0x8000000C
+ E_ILLEGAL_STATE_CHANGE Handle = 0x8000000D
+ E_ILLEGAL_METHOD_CALL Handle = 0x8000000E
+ RO_E_METADATA_NAME_NOT_FOUND Handle = 0x8000000F
+ RO_E_METADATA_NAME_IS_NAMESPACE Handle = 0x80000010
+ RO_E_METADATA_INVALID_TYPE_FORMAT Handle = 0x80000011
+ RO_E_INVALID_METADATA_FILE Handle = 0x80000012
+ RO_E_CLOSED Handle = 0x80000013
+ RO_E_EXCLUSIVE_WRITE Handle = 0x80000014
+ RO_E_CHANGE_NOTIFICATION_IN_PROGRESS Handle = 0x80000015
+ RO_E_ERROR_STRING_NOT_FOUND Handle = 0x80000016
+ E_STRING_NOT_NULL_TERMINATED Handle = 0x80000017
+ E_ILLEGAL_DELEGATE_ASSIGNMENT Handle = 0x80000018
+ E_ASYNC_OPERATION_NOT_STARTED Handle = 0x80000019
+ E_APPLICATION_EXITING Handle = 0x8000001A
+ E_APPLICATION_VIEW_EXITING Handle = 0x8000001B
+ RO_E_MUST_BE_AGILE Handle = 0x8000001C
+ RO_E_UNSUPPORTED_FROM_MTA Handle = 0x8000001D
+ RO_E_COMMITTED Handle = 0x8000001E
+ RO_E_BLOCKED_CROSS_ASTA_CALL Handle = 0x8000001F
+ RO_E_CANNOT_ACTIVATE_FULL_TRUST_SERVER Handle = 0x80000020
+ RO_E_CANNOT_ACTIVATE_UNIVERSAL_APPLICATION_SERVER Handle = 0x80000021
+ CO_E_INIT_TLS Handle = 0x80004006
+ CO_E_INIT_SHARED_ALLOCATOR Handle = 0x80004007
+ CO_E_INIT_MEMORY_ALLOCATOR Handle = 0x80004008
+ CO_E_INIT_CLASS_CACHE Handle = 0x80004009
+ CO_E_INIT_RPC_CHANNEL Handle = 0x8000400A
+ CO_E_INIT_TLS_SET_CHANNEL_CONTROL Handle = 0x8000400B
+ CO_E_INIT_TLS_CHANNEL_CONTROL Handle = 0x8000400C
+ CO_E_INIT_UNACCEPTED_USER_ALLOCATOR Handle = 0x8000400D
+ CO_E_INIT_SCM_MUTEX_EXISTS Handle = 0x8000400E
+ CO_E_INIT_SCM_FILE_MAPPING_EXISTS Handle = 0x8000400F
+ CO_E_INIT_SCM_MAP_VIEW_OF_FILE Handle = 0x80004010
+ CO_E_INIT_SCM_EXEC_FAILURE Handle = 0x80004011
+ CO_E_INIT_ONLY_SINGLE_THREADED Handle = 0x80004012
+ CO_E_CANT_REMOTE Handle = 0x80004013
+ CO_E_BAD_SERVER_NAME Handle = 0x80004014
+ CO_E_WRONG_SERVER_IDENTITY Handle = 0x80004015
+ CO_E_OLE1DDE_DISABLED Handle = 0x80004016
+ CO_E_RUNAS_SYNTAX Handle = 0x80004017
+ CO_E_CREATEPROCESS_FAILURE Handle = 0x80004018
+ CO_E_RUNAS_CREATEPROCESS_FAILURE Handle = 0x80004019
+ CO_E_RUNAS_LOGON_FAILURE Handle = 0x8000401A
+ CO_E_LAUNCH_PERMSSION_DENIED Handle = 0x8000401B
+ CO_E_START_SERVICE_FAILURE Handle = 0x8000401C
+ CO_E_REMOTE_COMMUNICATION_FAILURE Handle = 0x8000401D
+ CO_E_SERVER_START_TIMEOUT Handle = 0x8000401E
+ CO_E_CLSREG_INCONSISTENT Handle = 0x8000401F
+ CO_E_IIDREG_INCONSISTENT Handle = 0x80004020
+ CO_E_NOT_SUPPORTED Handle = 0x80004021
+ CO_E_RELOAD_DLL Handle = 0x80004022
+ CO_E_MSI_ERROR Handle = 0x80004023
+ CO_E_ATTEMPT_TO_CREATE_OUTSIDE_CLIENT_CONTEXT Handle = 0x80004024
+ CO_E_SERVER_PAUSED Handle = 0x80004025
+ CO_E_SERVER_NOT_PAUSED Handle = 0x80004026
+ CO_E_CLASS_DISABLED Handle = 0x80004027
+ CO_E_CLRNOTAVAILABLE Handle = 0x80004028
+ CO_E_ASYNC_WORK_REJECTED Handle = 0x80004029
+ CO_E_SERVER_INIT_TIMEOUT Handle = 0x8000402A
+ CO_E_NO_SECCTX_IN_ACTIVATE Handle = 0x8000402B
+ CO_E_TRACKER_CONFIG Handle = 0x80004030
+ CO_E_THREADPOOL_CONFIG Handle = 0x80004031
+ CO_E_SXS_CONFIG Handle = 0x80004032
+ CO_E_MALFORMED_SPN Handle = 0x80004033
+ CO_E_UNREVOKED_REGISTRATION_ON_APARTMENT_SHUTDOWN Handle = 0x80004034
+ CO_E_PREMATURE_STUB_RUNDOWN Handle = 0x80004035
+ S_OK Handle = 0
+ S_FALSE Handle = 1
+ OLE_E_FIRST Handle = 0x80040000
+ OLE_E_LAST Handle = 0x800400FF
+ OLE_S_FIRST Handle = 0x00040000
+ OLE_S_LAST Handle = 0x000400FF
+ OLE_E_OLEVERB Handle = 0x80040000
+ OLE_E_ADVF Handle = 0x80040001
+ OLE_E_ENUM_NOMORE Handle = 0x80040002
+ OLE_E_ADVISENOTSUPPORTED Handle = 0x80040003
+ OLE_E_NOCONNECTION Handle = 0x80040004
+ OLE_E_NOTRUNNING Handle = 0x80040005
+ OLE_E_NOCACHE Handle = 0x80040006
+ OLE_E_BLANK Handle = 0x80040007
+ OLE_E_CLASSDIFF Handle = 0x80040008
+ OLE_E_CANT_GETMONIKER Handle = 0x80040009
+ OLE_E_CANT_BINDTOSOURCE Handle = 0x8004000A
+ OLE_E_STATIC Handle = 0x8004000B
+ OLE_E_PROMPTSAVECANCELLED Handle = 0x8004000C
+ OLE_E_INVALIDRECT Handle = 0x8004000D
+ OLE_E_WRONGCOMPOBJ Handle = 0x8004000E
+ OLE_E_INVALIDHWND Handle = 0x8004000F
+ OLE_E_NOT_INPLACEACTIVE Handle = 0x80040010
+ OLE_E_CANTCONVERT Handle = 0x80040011
+ OLE_E_NOSTORAGE Handle = 0x80040012
+ DV_E_FORMATETC Handle = 0x80040064
+ DV_E_DVTARGETDEVICE Handle = 0x80040065
+ DV_E_STGMEDIUM Handle = 0x80040066
+ DV_E_STATDATA Handle = 0x80040067
+ DV_E_LINDEX Handle = 0x80040068
+ DV_E_TYMED Handle = 0x80040069
+ DV_E_CLIPFORMAT Handle = 0x8004006A
+ DV_E_DVASPECT Handle = 0x8004006B
+ DV_E_DVTARGETDEVICE_SIZE Handle = 0x8004006C
+ DV_E_NOIVIEWOBJECT Handle = 0x8004006D
+ DRAGDROP_E_FIRST syscall.Errno = 0x80040100
+ DRAGDROP_E_LAST syscall.Errno = 0x8004010F
+ DRAGDROP_S_FIRST syscall.Errno = 0x00040100
+ DRAGDROP_S_LAST syscall.Errno = 0x0004010F
+ DRAGDROP_E_NOTREGISTERED Handle = 0x80040100
+ DRAGDROP_E_ALREADYREGISTERED Handle = 0x80040101
+ DRAGDROP_E_INVALIDHWND Handle = 0x80040102
+ DRAGDROP_E_CONCURRENT_DRAG_ATTEMPTED Handle = 0x80040103
+ CLASSFACTORY_E_FIRST syscall.Errno = 0x80040110
+ CLASSFACTORY_E_LAST syscall.Errno = 0x8004011F
+ CLASSFACTORY_S_FIRST syscall.Errno = 0x00040110
+ CLASSFACTORY_S_LAST syscall.Errno = 0x0004011F
+ CLASS_E_NOAGGREGATION Handle = 0x80040110
+ CLASS_E_CLASSNOTAVAILABLE Handle = 0x80040111
+ CLASS_E_NOTLICENSED Handle = 0x80040112
+ MARSHAL_E_FIRST syscall.Errno = 0x80040120
+ MARSHAL_E_LAST syscall.Errno = 0x8004012F
+ MARSHAL_S_FIRST syscall.Errno = 0x00040120
+ MARSHAL_S_LAST syscall.Errno = 0x0004012F
+ DATA_E_FIRST syscall.Errno = 0x80040130
+ DATA_E_LAST syscall.Errno = 0x8004013F
+ DATA_S_FIRST syscall.Errno = 0x00040130
+ DATA_S_LAST syscall.Errno = 0x0004013F
+ VIEW_E_FIRST syscall.Errno = 0x80040140
+ VIEW_E_LAST syscall.Errno = 0x8004014F
+ VIEW_S_FIRST syscall.Errno = 0x00040140
+ VIEW_S_LAST syscall.Errno = 0x0004014F
+ VIEW_E_DRAW Handle = 0x80040140
+ REGDB_E_FIRST syscall.Errno = 0x80040150
+ REGDB_E_LAST syscall.Errno = 0x8004015F
+ REGDB_S_FIRST syscall.Errno = 0x00040150
+ REGDB_S_LAST syscall.Errno = 0x0004015F
+ REGDB_E_READREGDB Handle = 0x80040150
+ REGDB_E_WRITEREGDB Handle = 0x80040151
+ REGDB_E_KEYMISSING Handle = 0x80040152
+ REGDB_E_INVALIDVALUE Handle = 0x80040153
+ REGDB_E_CLASSNOTREG Handle = 0x80040154
+ REGDB_E_IIDNOTREG Handle = 0x80040155
+ REGDB_E_BADTHREADINGMODEL Handle = 0x80040156
+ REGDB_E_PACKAGEPOLICYVIOLATION Handle = 0x80040157
+ CAT_E_FIRST syscall.Errno = 0x80040160
+ CAT_E_LAST syscall.Errno = 0x80040161
+ CAT_E_CATIDNOEXIST Handle = 0x80040160
+ CAT_E_NODESCRIPTION Handle = 0x80040161
+ CS_E_FIRST syscall.Errno = 0x80040164
+ CS_E_LAST syscall.Errno = 0x8004016F
+ CS_E_PACKAGE_NOTFOUND Handle = 0x80040164
+ CS_E_NOT_DELETABLE Handle = 0x80040165
+ CS_E_CLASS_NOTFOUND Handle = 0x80040166
+ CS_E_INVALID_VERSION Handle = 0x80040167
+ CS_E_NO_CLASSSTORE Handle = 0x80040168
+ CS_E_OBJECT_NOTFOUND Handle = 0x80040169
+ CS_E_OBJECT_ALREADY_EXISTS Handle = 0x8004016A
+ CS_E_INVALID_PATH Handle = 0x8004016B
+ CS_E_NETWORK_ERROR Handle = 0x8004016C
+ CS_E_ADMIN_LIMIT_EXCEEDED Handle = 0x8004016D
+ CS_E_SCHEMA_MISMATCH Handle = 0x8004016E
+ CS_E_INTERNAL_ERROR Handle = 0x8004016F
+ CACHE_E_FIRST syscall.Errno = 0x80040170
+ CACHE_E_LAST syscall.Errno = 0x8004017F
+ CACHE_S_FIRST syscall.Errno = 0x00040170
+ CACHE_S_LAST syscall.Errno = 0x0004017F
+ CACHE_E_NOCACHE_UPDATED Handle = 0x80040170
+ OLEOBJ_E_FIRST syscall.Errno = 0x80040180
+ OLEOBJ_E_LAST syscall.Errno = 0x8004018F
+ OLEOBJ_S_FIRST syscall.Errno = 0x00040180
+ OLEOBJ_S_LAST syscall.Errno = 0x0004018F
+ OLEOBJ_E_NOVERBS Handle = 0x80040180
+ OLEOBJ_E_INVALIDVERB Handle = 0x80040181
+ CLIENTSITE_E_FIRST syscall.Errno = 0x80040190
+ CLIENTSITE_E_LAST syscall.Errno = 0x8004019F
+ CLIENTSITE_S_FIRST syscall.Errno = 0x00040190
+ CLIENTSITE_S_LAST syscall.Errno = 0x0004019F
+ INPLACE_E_NOTUNDOABLE Handle = 0x800401A0
+ INPLACE_E_NOTOOLSPACE Handle = 0x800401A1
+ INPLACE_E_FIRST syscall.Errno = 0x800401A0
+ INPLACE_E_LAST syscall.Errno = 0x800401AF
+ INPLACE_S_FIRST syscall.Errno = 0x000401A0
+ INPLACE_S_LAST syscall.Errno = 0x000401AF
+ ENUM_E_FIRST syscall.Errno = 0x800401B0
+ ENUM_E_LAST syscall.Errno = 0x800401BF
+ ENUM_S_FIRST syscall.Errno = 0x000401B0
+ ENUM_S_LAST syscall.Errno = 0x000401BF
+ CONVERT10_E_FIRST syscall.Errno = 0x800401C0
+ CONVERT10_E_LAST syscall.Errno = 0x800401CF
+ CONVERT10_S_FIRST syscall.Errno = 0x000401C0
+ CONVERT10_S_LAST syscall.Errno = 0x000401CF
+ CONVERT10_E_OLESTREAM_GET Handle = 0x800401C0
+ CONVERT10_E_OLESTREAM_PUT Handle = 0x800401C1
+ CONVERT10_E_OLESTREAM_FMT Handle = 0x800401C2
+ CONVERT10_E_OLESTREAM_BITMAP_TO_DIB Handle = 0x800401C3
+ CONVERT10_E_STG_FMT Handle = 0x800401C4
+ CONVERT10_E_STG_NO_STD_STREAM Handle = 0x800401C5
+ CONVERT10_E_STG_DIB_TO_BITMAP Handle = 0x800401C6
+ CLIPBRD_E_FIRST syscall.Errno = 0x800401D0
+ CLIPBRD_E_LAST syscall.Errno = 0x800401DF
+ CLIPBRD_S_FIRST syscall.Errno = 0x000401D0
+ CLIPBRD_S_LAST syscall.Errno = 0x000401DF
+ CLIPBRD_E_CANT_OPEN Handle = 0x800401D0
+ CLIPBRD_E_CANT_EMPTY Handle = 0x800401D1
+ CLIPBRD_E_CANT_SET Handle = 0x800401D2
+ CLIPBRD_E_BAD_DATA Handle = 0x800401D3
+ CLIPBRD_E_CANT_CLOSE Handle = 0x800401D4
+ MK_E_FIRST syscall.Errno = 0x800401E0
+ MK_E_LAST syscall.Errno = 0x800401EF
+ MK_S_FIRST syscall.Errno = 0x000401E0
+ MK_S_LAST syscall.Errno = 0x000401EF
+ MK_E_CONNECTMANUALLY Handle = 0x800401E0
+ MK_E_EXCEEDEDDEADLINE Handle = 0x800401E1
+ MK_E_NEEDGENERIC Handle = 0x800401E2
+ MK_E_UNAVAILABLE Handle = 0x800401E3
+ MK_E_SYNTAX Handle = 0x800401E4
+ MK_E_NOOBJECT Handle = 0x800401E5
+ MK_E_INVALIDEXTENSION Handle = 0x800401E6
+ MK_E_INTERMEDIATEINTERFACENOTSUPPORTED Handle = 0x800401E7
+ MK_E_NOTBINDABLE Handle = 0x800401E8
+ MK_E_NOTBOUND Handle = 0x800401E9
+ MK_E_CANTOPENFILE Handle = 0x800401EA
+ MK_E_MUSTBOTHERUSER Handle = 0x800401EB
+ MK_E_NOINVERSE Handle = 0x800401EC
+ MK_E_NOSTORAGE Handle = 0x800401ED
+ MK_E_NOPREFIX Handle = 0x800401EE
+ MK_E_ENUMERATION_FAILED Handle = 0x800401EF
+ CO_E_FIRST syscall.Errno = 0x800401F0
+ CO_E_LAST syscall.Errno = 0x800401FF
+ CO_S_FIRST syscall.Errno = 0x000401F0
+ CO_S_LAST syscall.Errno = 0x000401FF
+ CO_E_NOTINITIALIZED Handle = 0x800401F0
+ CO_E_ALREADYINITIALIZED Handle = 0x800401F1
+ CO_E_CANTDETERMINECLASS Handle = 0x800401F2
+ CO_E_CLASSSTRING Handle = 0x800401F3
+ CO_E_IIDSTRING Handle = 0x800401F4
+ CO_E_APPNOTFOUND Handle = 0x800401F5
+ CO_E_APPSINGLEUSE Handle = 0x800401F6
+ CO_E_ERRORINAPP Handle = 0x800401F7
+ CO_E_DLLNOTFOUND Handle = 0x800401F8
+ CO_E_ERRORINDLL Handle = 0x800401F9
+ CO_E_WRONGOSFORAPP Handle = 0x800401FA
+ CO_E_OBJNOTREG Handle = 0x800401FB
+ CO_E_OBJISREG Handle = 0x800401FC
+ CO_E_OBJNOTCONNECTED Handle = 0x800401FD
+ CO_E_APPDIDNTREG Handle = 0x800401FE
+ CO_E_RELEASED Handle = 0x800401FF
+ EVENT_E_FIRST syscall.Errno = 0x80040200
+ EVENT_E_LAST syscall.Errno = 0x8004021F
+ EVENT_S_FIRST syscall.Errno = 0x00040200
+ EVENT_S_LAST syscall.Errno = 0x0004021F
+ EVENT_S_SOME_SUBSCRIBERS_FAILED Handle = 0x00040200
+ EVENT_E_ALL_SUBSCRIBERS_FAILED Handle = 0x80040201
+ EVENT_S_NOSUBSCRIBERS Handle = 0x00040202
+ EVENT_E_QUERYSYNTAX Handle = 0x80040203
+ EVENT_E_QUERYFIELD Handle = 0x80040204
+ EVENT_E_INTERNALEXCEPTION Handle = 0x80040205
+ EVENT_E_INTERNALERROR Handle = 0x80040206
+ EVENT_E_INVALID_PER_USER_SID Handle = 0x80040207
+ EVENT_E_USER_EXCEPTION Handle = 0x80040208
+ EVENT_E_TOO_MANY_METHODS Handle = 0x80040209
+ EVENT_E_MISSING_EVENTCLASS Handle = 0x8004020A
+ EVENT_E_NOT_ALL_REMOVED Handle = 0x8004020B
+ EVENT_E_COMPLUS_NOT_INSTALLED Handle = 0x8004020C
+ EVENT_E_CANT_MODIFY_OR_DELETE_UNCONFIGURED_OBJECT Handle = 0x8004020D
+ EVENT_E_CANT_MODIFY_OR_DELETE_CONFIGURED_OBJECT Handle = 0x8004020E
+ EVENT_E_INVALID_EVENT_CLASS_PARTITION Handle = 0x8004020F
+ EVENT_E_PER_USER_SID_NOT_LOGGED_ON Handle = 0x80040210
+ TPC_E_INVALID_PROPERTY Handle = 0x80040241
+ TPC_E_NO_DEFAULT_TABLET Handle = 0x80040212
+ TPC_E_UNKNOWN_PROPERTY Handle = 0x8004021B
+ TPC_E_INVALID_INPUT_RECT Handle = 0x80040219
+ TPC_E_INVALID_STROKE Handle = 0x80040222
+ TPC_E_INITIALIZE_FAIL Handle = 0x80040223
+ TPC_E_NOT_RELEVANT Handle = 0x80040232
+ TPC_E_INVALID_PACKET_DESCRIPTION Handle = 0x80040233
+ TPC_E_RECOGNIZER_NOT_REGISTERED Handle = 0x80040235
+ TPC_E_INVALID_RIGHTS Handle = 0x80040236
+ TPC_E_OUT_OF_ORDER_CALL Handle = 0x80040237
+ TPC_E_QUEUE_FULL Handle = 0x80040238
+ TPC_E_INVALID_CONFIGURATION Handle = 0x80040239
+ TPC_E_INVALID_DATA_FROM_RECOGNIZER Handle = 0x8004023A
+ TPC_S_TRUNCATED Handle = 0x00040252
+ TPC_S_INTERRUPTED Handle = 0x00040253
+ TPC_S_NO_DATA_TO_PROCESS Handle = 0x00040254
+ XACT_E_FIRST syscall.Errno = 0x8004D000
+ XACT_E_LAST syscall.Errno = 0x8004D02B
+ XACT_S_FIRST syscall.Errno = 0x0004D000
+ XACT_S_LAST syscall.Errno = 0x0004D010
+ XACT_E_ALREADYOTHERSINGLEPHASE Handle = 0x8004D000
+ XACT_E_CANTRETAIN Handle = 0x8004D001
+ XACT_E_COMMITFAILED Handle = 0x8004D002
+ XACT_E_COMMITPREVENTED Handle = 0x8004D003
+ XACT_E_HEURISTICABORT Handle = 0x8004D004
+ XACT_E_HEURISTICCOMMIT Handle = 0x8004D005
+ XACT_E_HEURISTICDAMAGE Handle = 0x8004D006
+ XACT_E_HEURISTICDANGER Handle = 0x8004D007
+ XACT_E_ISOLATIONLEVEL Handle = 0x8004D008
+ XACT_E_NOASYNC Handle = 0x8004D009
+ XACT_E_NOENLIST Handle = 0x8004D00A
+ XACT_E_NOISORETAIN Handle = 0x8004D00B
+ XACT_E_NORESOURCE Handle = 0x8004D00C
+ XACT_E_NOTCURRENT Handle = 0x8004D00D
+ XACT_E_NOTRANSACTION Handle = 0x8004D00E
+ XACT_E_NOTSUPPORTED Handle = 0x8004D00F
+ XACT_E_UNKNOWNRMGRID Handle = 0x8004D010
+ XACT_E_WRONGSTATE Handle = 0x8004D011
+ XACT_E_WRONGUOW Handle = 0x8004D012
+ XACT_E_XTIONEXISTS Handle = 0x8004D013
+ XACT_E_NOIMPORTOBJECT Handle = 0x8004D014
+ XACT_E_INVALIDCOOKIE Handle = 0x8004D015
+ XACT_E_INDOUBT Handle = 0x8004D016
+ XACT_E_NOTIMEOUT Handle = 0x8004D017
+ XACT_E_ALREADYINPROGRESS Handle = 0x8004D018
+ XACT_E_ABORTED Handle = 0x8004D019
+ XACT_E_LOGFULL Handle = 0x8004D01A
+ XACT_E_TMNOTAVAILABLE Handle = 0x8004D01B
+ XACT_E_CONNECTION_DOWN Handle = 0x8004D01C
+ XACT_E_CONNECTION_DENIED Handle = 0x8004D01D
+ XACT_E_REENLISTTIMEOUT Handle = 0x8004D01E
+ XACT_E_TIP_CONNECT_FAILED Handle = 0x8004D01F
+ XACT_E_TIP_PROTOCOL_ERROR Handle = 0x8004D020
+ XACT_E_TIP_PULL_FAILED Handle = 0x8004D021
+ XACT_E_DEST_TMNOTAVAILABLE Handle = 0x8004D022
+ XACT_E_TIP_DISABLED Handle = 0x8004D023
+ XACT_E_NETWORK_TX_DISABLED Handle = 0x8004D024
+ XACT_E_PARTNER_NETWORK_TX_DISABLED Handle = 0x8004D025
+ XACT_E_XA_TX_DISABLED Handle = 0x8004D026
+ XACT_E_UNABLE_TO_READ_DTC_CONFIG Handle = 0x8004D027
+ XACT_E_UNABLE_TO_LOAD_DTC_PROXY Handle = 0x8004D028
+ XACT_E_ABORTING Handle = 0x8004D029
+ XACT_E_PUSH_COMM_FAILURE Handle = 0x8004D02A
+ XACT_E_PULL_COMM_FAILURE Handle = 0x8004D02B
+ XACT_E_LU_TX_DISABLED Handle = 0x8004D02C
+ XACT_E_CLERKNOTFOUND Handle = 0x8004D080
+ XACT_E_CLERKEXISTS Handle = 0x8004D081
+ XACT_E_RECOVERYINPROGRESS Handle = 0x8004D082
+ XACT_E_TRANSACTIONCLOSED Handle = 0x8004D083
+ XACT_E_INVALIDLSN Handle = 0x8004D084
+ XACT_E_REPLAYREQUEST Handle = 0x8004D085
+ XACT_S_ASYNC Handle = 0x0004D000
+ XACT_S_DEFECT Handle = 0x0004D001
+ XACT_S_READONLY Handle = 0x0004D002
+ XACT_S_SOMENORETAIN Handle = 0x0004D003
+ XACT_S_OKINFORM Handle = 0x0004D004
+ XACT_S_MADECHANGESCONTENT Handle = 0x0004D005
+ XACT_S_MADECHANGESINFORM Handle = 0x0004D006
+ XACT_S_ALLNORETAIN Handle = 0x0004D007
+ XACT_S_ABORTING Handle = 0x0004D008
+ XACT_S_SINGLEPHASE Handle = 0x0004D009
+ XACT_S_LOCALLY_OK Handle = 0x0004D00A
+ XACT_S_LASTRESOURCEMANAGER Handle = 0x0004D010
+ CONTEXT_E_FIRST syscall.Errno = 0x8004E000
+ CONTEXT_E_LAST syscall.Errno = 0x8004E02F
+ CONTEXT_S_FIRST syscall.Errno = 0x0004E000
+ CONTEXT_S_LAST syscall.Errno = 0x0004E02F
+ CONTEXT_E_ABORTED Handle = 0x8004E002
+ CONTEXT_E_ABORTING Handle = 0x8004E003
+ CONTEXT_E_NOCONTEXT Handle = 0x8004E004
+ CONTEXT_E_WOULD_DEADLOCK Handle = 0x8004E005
+ CONTEXT_E_SYNCH_TIMEOUT Handle = 0x8004E006
+ CONTEXT_E_OLDREF Handle = 0x8004E007
+ CONTEXT_E_ROLENOTFOUND Handle = 0x8004E00C
+ CONTEXT_E_TMNOTAVAILABLE Handle = 0x8004E00F
+ CO_E_ACTIVATIONFAILED Handle = 0x8004E021
+ CO_E_ACTIVATIONFAILED_EVENTLOGGED Handle = 0x8004E022
+ CO_E_ACTIVATIONFAILED_CATALOGERROR Handle = 0x8004E023
+ CO_E_ACTIVATIONFAILED_TIMEOUT Handle = 0x8004E024
+ CO_E_INITIALIZATIONFAILED Handle = 0x8004E025
+ CONTEXT_E_NOJIT Handle = 0x8004E026
+ CONTEXT_E_NOTRANSACTION Handle = 0x8004E027
+ CO_E_THREADINGMODEL_CHANGED Handle = 0x8004E028
+ CO_E_NOIISINTRINSICS Handle = 0x8004E029
+ CO_E_NOCOOKIES Handle = 0x8004E02A
+ CO_E_DBERROR Handle = 0x8004E02B
+ CO_E_NOTPOOLED Handle = 0x8004E02C
+ CO_E_NOTCONSTRUCTED Handle = 0x8004E02D
+ CO_E_NOSYNCHRONIZATION Handle = 0x8004E02E
+ CO_E_ISOLEVELMISMATCH Handle = 0x8004E02F
+ CO_E_CALL_OUT_OF_TX_SCOPE_NOT_ALLOWED Handle = 0x8004E030
+ CO_E_EXIT_TRANSACTION_SCOPE_NOT_CALLED Handle = 0x8004E031
+ OLE_S_USEREG Handle = 0x00040000
+ OLE_S_STATIC Handle = 0x00040001
+ OLE_S_MAC_CLIPFORMAT Handle = 0x00040002
+ DRAGDROP_S_DROP Handle = 0x00040100
+ DRAGDROP_S_CANCEL Handle = 0x00040101
+ DRAGDROP_S_USEDEFAULTCURSORS Handle = 0x00040102
+ DATA_S_SAMEFORMATETC Handle = 0x00040130
+ VIEW_S_ALREADY_FROZEN Handle = 0x00040140
+ CACHE_S_FORMATETC_NOTSUPPORTED Handle = 0x00040170
+ CACHE_S_SAMECACHE Handle = 0x00040171
+ CACHE_S_SOMECACHES_NOTUPDATED Handle = 0x00040172
+ OLEOBJ_S_INVALIDVERB Handle = 0x00040180
+ OLEOBJ_S_CANNOT_DOVERB_NOW Handle = 0x00040181
+ OLEOBJ_S_INVALIDHWND Handle = 0x00040182
+ INPLACE_S_TRUNCATED Handle = 0x000401A0
+ CONVERT10_S_NO_PRESENTATION Handle = 0x000401C0
+ MK_S_REDUCED_TO_SELF Handle = 0x000401E2
+ MK_S_ME Handle = 0x000401E4
+ MK_S_HIM Handle = 0x000401E5
+ MK_S_US Handle = 0x000401E6
+ MK_S_MONIKERALREADYREGISTERED Handle = 0x000401E7
+ SCHED_S_TASK_READY Handle = 0x00041300
+ SCHED_S_TASK_RUNNING Handle = 0x00041301
+ SCHED_S_TASK_DISABLED Handle = 0x00041302
+ SCHED_S_TASK_HAS_NOT_RUN Handle = 0x00041303
+ SCHED_S_TASK_NO_MORE_RUNS Handle = 0x00041304
+ SCHED_S_TASK_NOT_SCHEDULED Handle = 0x00041305
+ SCHED_S_TASK_TERMINATED Handle = 0x00041306
+ SCHED_S_TASK_NO_VALID_TRIGGERS Handle = 0x00041307
+ SCHED_S_EVENT_TRIGGER Handle = 0x00041308
+ SCHED_E_TRIGGER_NOT_FOUND Handle = 0x80041309
+ SCHED_E_TASK_NOT_READY Handle = 0x8004130A
+ SCHED_E_TASK_NOT_RUNNING Handle = 0x8004130B
+ SCHED_E_SERVICE_NOT_INSTALLED Handle = 0x8004130C
+ SCHED_E_CANNOT_OPEN_TASK Handle = 0x8004130D
+ SCHED_E_INVALID_TASK Handle = 0x8004130E
+ SCHED_E_ACCOUNT_INFORMATION_NOT_SET Handle = 0x8004130F
+ SCHED_E_ACCOUNT_NAME_NOT_FOUND Handle = 0x80041310
+ SCHED_E_ACCOUNT_DBASE_CORRUPT Handle = 0x80041311
+ SCHED_E_NO_SECURITY_SERVICES Handle = 0x80041312
+ SCHED_E_UNKNOWN_OBJECT_VERSION Handle = 0x80041313
+ SCHED_E_UNSUPPORTED_ACCOUNT_OPTION Handle = 0x80041314
+ SCHED_E_SERVICE_NOT_RUNNING Handle = 0x80041315
+ SCHED_E_UNEXPECTEDNODE Handle = 0x80041316
+ SCHED_E_NAMESPACE Handle = 0x80041317
+ SCHED_E_INVALIDVALUE Handle = 0x80041318
+ SCHED_E_MISSINGNODE Handle = 0x80041319
+ SCHED_E_MALFORMEDXML Handle = 0x8004131A
+ SCHED_S_SOME_TRIGGERS_FAILED Handle = 0x0004131B
+ SCHED_S_BATCH_LOGON_PROBLEM Handle = 0x0004131C
+ SCHED_E_TOO_MANY_NODES Handle = 0x8004131D
+ SCHED_E_PAST_END_BOUNDARY Handle = 0x8004131E
+ SCHED_E_ALREADY_RUNNING Handle = 0x8004131F
+ SCHED_E_USER_NOT_LOGGED_ON Handle = 0x80041320
+ SCHED_E_INVALID_TASK_HASH Handle = 0x80041321
+ SCHED_E_SERVICE_NOT_AVAILABLE Handle = 0x80041322
+ SCHED_E_SERVICE_TOO_BUSY Handle = 0x80041323
+ SCHED_E_TASK_ATTEMPTED Handle = 0x80041324
+ SCHED_S_TASK_QUEUED Handle = 0x00041325
+ SCHED_E_TASK_DISABLED Handle = 0x80041326
+ SCHED_E_TASK_NOT_V1_COMPAT Handle = 0x80041327
+ SCHED_E_START_ON_DEMAND Handle = 0x80041328
+ SCHED_E_TASK_NOT_UBPM_COMPAT Handle = 0x80041329
+ SCHED_E_DEPRECATED_FEATURE_USED Handle = 0x80041330
+ CO_E_CLASS_CREATE_FAILED Handle = 0x80080001
+ CO_E_SCM_ERROR Handle = 0x80080002
+ CO_E_SCM_RPC_FAILURE Handle = 0x80080003
+ CO_E_BAD_PATH Handle = 0x80080004
+ CO_E_SERVER_EXEC_FAILURE Handle = 0x80080005
+ CO_E_OBJSRV_RPC_FAILURE Handle = 0x80080006
+ MK_E_NO_NORMALIZED Handle = 0x80080007
+ CO_E_SERVER_STOPPING Handle = 0x80080008
+ MEM_E_INVALID_ROOT Handle = 0x80080009
+ MEM_E_INVALID_LINK Handle = 0x80080010
+ MEM_E_INVALID_SIZE Handle = 0x80080011
+ CO_S_NOTALLINTERFACES Handle = 0x00080012
+ CO_S_MACHINENAMENOTFOUND Handle = 0x00080013
+ CO_E_MISSING_DISPLAYNAME Handle = 0x80080015
+ CO_E_RUNAS_VALUE_MUST_BE_AAA Handle = 0x80080016
+ CO_E_ELEVATION_DISABLED Handle = 0x80080017
+ APPX_E_PACKAGING_INTERNAL Handle = 0x80080200
+ APPX_E_INTERLEAVING_NOT_ALLOWED Handle = 0x80080201
+ APPX_E_RELATIONSHIPS_NOT_ALLOWED Handle = 0x80080202
+ APPX_E_MISSING_REQUIRED_FILE Handle = 0x80080203
+ APPX_E_INVALID_MANIFEST Handle = 0x80080204
+ APPX_E_INVALID_BLOCKMAP Handle = 0x80080205
+ APPX_E_CORRUPT_CONTENT Handle = 0x80080206
+ APPX_E_BLOCK_HASH_INVALID Handle = 0x80080207
+ APPX_E_REQUESTED_RANGE_TOO_LARGE Handle = 0x80080208
+ APPX_E_INVALID_SIP_CLIENT_DATA Handle = 0x80080209
+ APPX_E_INVALID_KEY_INFO Handle = 0x8008020A
+ APPX_E_INVALID_CONTENTGROUPMAP Handle = 0x8008020B
+ APPX_E_INVALID_APPINSTALLER Handle = 0x8008020C
+ APPX_E_DELTA_BASELINE_VERSION_MISMATCH Handle = 0x8008020D
+ APPX_E_DELTA_PACKAGE_MISSING_FILE Handle = 0x8008020E
+ APPX_E_INVALID_DELTA_PACKAGE Handle = 0x8008020F
+ APPX_E_DELTA_APPENDED_PACKAGE_NOT_ALLOWED Handle = 0x80080210
+ APPX_E_INVALID_PACKAGING_LAYOUT Handle = 0x80080211
+ APPX_E_INVALID_PACKAGESIGNCONFIG Handle = 0x80080212
+ APPX_E_RESOURCESPRI_NOT_ALLOWED Handle = 0x80080213
+ APPX_E_FILE_COMPRESSION_MISMATCH Handle = 0x80080214
+ APPX_E_INVALID_PAYLOAD_PACKAGE_EXTENSION Handle = 0x80080215
+ APPX_E_INVALID_ENCRYPTION_EXCLUSION_FILE_LIST Handle = 0x80080216
+ BT_E_SPURIOUS_ACTIVATION Handle = 0x80080300
+ DISP_E_UNKNOWNINTERFACE Handle = 0x80020001
+ DISP_E_MEMBERNOTFOUND Handle = 0x80020003
+ DISP_E_PARAMNOTFOUND Handle = 0x80020004
+ DISP_E_TYPEMISMATCH Handle = 0x80020005
+ DISP_E_UNKNOWNNAME Handle = 0x80020006
+ DISP_E_NONAMEDARGS Handle = 0x80020007
+ DISP_E_BADVARTYPE Handle = 0x80020008
+ DISP_E_EXCEPTION Handle = 0x80020009
+ DISP_E_OVERFLOW Handle = 0x8002000A
+ DISP_E_BADINDEX Handle = 0x8002000B
+ DISP_E_UNKNOWNLCID Handle = 0x8002000C
+ DISP_E_ARRAYISLOCKED Handle = 0x8002000D
+ DISP_E_BADPARAMCOUNT Handle = 0x8002000E
+ DISP_E_PARAMNOTOPTIONAL Handle = 0x8002000F
+ DISP_E_BADCALLEE Handle = 0x80020010
+ DISP_E_NOTACOLLECTION Handle = 0x80020011
+ DISP_E_DIVBYZERO Handle = 0x80020012
+ DISP_E_BUFFERTOOSMALL Handle = 0x80020013
+ TYPE_E_BUFFERTOOSMALL Handle = 0x80028016
+ TYPE_E_FIELDNOTFOUND Handle = 0x80028017
+ TYPE_E_INVDATAREAD Handle = 0x80028018
+ TYPE_E_UNSUPFORMAT Handle = 0x80028019
+ TYPE_E_REGISTRYACCESS Handle = 0x8002801C
+ TYPE_E_LIBNOTREGISTERED Handle = 0x8002801D
+ TYPE_E_UNDEFINEDTYPE Handle = 0x80028027
+ TYPE_E_QUALIFIEDNAMEDISALLOWED Handle = 0x80028028
+ TYPE_E_INVALIDSTATE Handle = 0x80028029
+ TYPE_E_WRONGTYPEKIND Handle = 0x8002802A
+ TYPE_E_ELEMENTNOTFOUND Handle = 0x8002802B
+ TYPE_E_AMBIGUOUSNAME Handle = 0x8002802C
+ TYPE_E_NAMECONFLICT Handle = 0x8002802D
+ TYPE_E_UNKNOWNLCID Handle = 0x8002802E
+ TYPE_E_DLLFUNCTIONNOTFOUND Handle = 0x8002802F
+ TYPE_E_BADMODULEKIND Handle = 0x800288BD
+ TYPE_E_SIZETOOBIG Handle = 0x800288C5
+ TYPE_E_DUPLICATEID Handle = 0x800288C6
+ TYPE_E_INVALIDID Handle = 0x800288CF
+ TYPE_E_TYPEMISMATCH Handle = 0x80028CA0
+ TYPE_E_OUTOFBOUNDS Handle = 0x80028CA1
+ TYPE_E_IOERROR Handle = 0x80028CA2
+ TYPE_E_CANTCREATETMPFILE Handle = 0x80028CA3
+ TYPE_E_CANTLOADLIBRARY Handle = 0x80029C4A
+ TYPE_E_INCONSISTENTPROPFUNCS Handle = 0x80029C83
+ TYPE_E_CIRCULARTYPE Handle = 0x80029C84
+ STG_E_INVALIDFUNCTION Handle = 0x80030001
+ STG_E_FILENOTFOUND Handle = 0x80030002
+ STG_E_PATHNOTFOUND Handle = 0x80030003
+ STG_E_TOOMANYOPENFILES Handle = 0x80030004
+ STG_E_ACCESSDENIED Handle = 0x80030005
+ STG_E_INVALIDHANDLE Handle = 0x80030006
+ STG_E_INSUFFICIENTMEMORY Handle = 0x80030008
+ STG_E_INVALIDPOINTER Handle = 0x80030009
+ STG_E_NOMOREFILES Handle = 0x80030012
+ STG_E_DISKISWRITEPROTECTED Handle = 0x80030013
+ STG_E_SEEKERROR Handle = 0x80030019
+ STG_E_WRITEFAULT Handle = 0x8003001D
+ STG_E_READFAULT Handle = 0x8003001E
+ STG_E_SHAREVIOLATION Handle = 0x80030020
+ STG_E_LOCKVIOLATION Handle = 0x80030021
+ STG_E_FILEALREADYEXISTS Handle = 0x80030050
+ STG_E_INVALIDPARAMETER Handle = 0x80030057
+ STG_E_MEDIUMFULL Handle = 0x80030070
+ STG_E_PROPSETMISMATCHED Handle = 0x800300F0
+ STG_E_ABNORMALAPIEXIT Handle = 0x800300FA
+ STG_E_INVALIDHEADER Handle = 0x800300FB
+ STG_E_INVALIDNAME Handle = 0x800300FC
+ STG_E_UNKNOWN Handle = 0x800300FD
+ STG_E_UNIMPLEMENTEDFUNCTION Handle = 0x800300FE
+ STG_E_INVALIDFLAG Handle = 0x800300FF
+ STG_E_INUSE Handle = 0x80030100
+ STG_E_NOTCURRENT Handle = 0x80030101
+ STG_E_REVERTED Handle = 0x80030102
+ STG_E_CANTSAVE Handle = 0x80030103
+ STG_E_OLDFORMAT Handle = 0x80030104
+ STG_E_OLDDLL Handle = 0x80030105
+ STG_E_SHAREREQUIRED Handle = 0x80030106
+ STG_E_NOTFILEBASEDSTORAGE Handle = 0x80030107
+ STG_E_EXTANTMARSHALLINGS Handle = 0x80030108
+ STG_E_DOCFILECORRUPT Handle = 0x80030109
+ STG_E_BADBASEADDRESS Handle = 0x80030110
+ STG_E_DOCFILETOOLARGE Handle = 0x80030111
+ STG_E_NOTSIMPLEFORMAT Handle = 0x80030112
+ STG_E_INCOMPLETE Handle = 0x80030201
+ STG_E_TERMINATED Handle = 0x80030202
+ STG_S_CONVERTED Handle = 0x00030200
+ STG_S_BLOCK Handle = 0x00030201
+ STG_S_RETRYNOW Handle = 0x00030202
+ STG_S_MONITORING Handle = 0x00030203
+ STG_S_MULTIPLEOPENS Handle = 0x00030204
+ STG_S_CONSOLIDATIONFAILED Handle = 0x00030205
+ STG_S_CANNOTCONSOLIDATE Handle = 0x00030206
+ STG_S_POWER_CYCLE_REQUIRED Handle = 0x00030207
+ STG_E_FIRMWARE_SLOT_INVALID Handle = 0x80030208
+ STG_E_FIRMWARE_IMAGE_INVALID Handle = 0x80030209
+ STG_E_DEVICE_UNRESPONSIVE Handle = 0x8003020A
+ STG_E_STATUS_COPY_PROTECTION_FAILURE Handle = 0x80030305
+ STG_E_CSS_AUTHENTICATION_FAILURE Handle = 0x80030306
+ STG_E_CSS_KEY_NOT_PRESENT Handle = 0x80030307
+ STG_E_CSS_KEY_NOT_ESTABLISHED Handle = 0x80030308
+ STG_E_CSS_SCRAMBLED_SECTOR Handle = 0x80030309
+ STG_E_CSS_REGION_MISMATCH Handle = 0x8003030A
+ STG_E_RESETS_EXHAUSTED Handle = 0x8003030B
+ RPC_E_CALL_REJECTED Handle = 0x80010001
+ RPC_E_CALL_CANCELED Handle = 0x80010002
+ RPC_E_CANTPOST_INSENDCALL Handle = 0x80010003
+ RPC_E_CANTCALLOUT_INASYNCCALL Handle = 0x80010004
+ RPC_E_CANTCALLOUT_INEXTERNALCALL Handle = 0x80010005
+ RPC_E_CONNECTION_TERMINATED Handle = 0x80010006
+ RPC_E_SERVER_DIED Handle = 0x80010007
+ RPC_E_CLIENT_DIED Handle = 0x80010008
+ RPC_E_INVALID_DATAPACKET Handle = 0x80010009
+ RPC_E_CANTTRANSMIT_CALL Handle = 0x8001000A
+ RPC_E_CLIENT_CANTMARSHAL_DATA Handle = 0x8001000B
+ RPC_E_CLIENT_CANTUNMARSHAL_DATA Handle = 0x8001000C
+ RPC_E_SERVER_CANTMARSHAL_DATA Handle = 0x8001000D
+ RPC_E_SERVER_CANTUNMARSHAL_DATA Handle = 0x8001000E
+ RPC_E_INVALID_DATA Handle = 0x8001000F
+ RPC_E_INVALID_PARAMETER Handle = 0x80010010
+ RPC_E_CANTCALLOUT_AGAIN Handle = 0x80010011
+ RPC_E_SERVER_DIED_DNE Handle = 0x80010012
+ RPC_E_SYS_CALL_FAILED Handle = 0x80010100
+ RPC_E_OUT_OF_RESOURCES Handle = 0x80010101
+ RPC_E_ATTEMPTED_MULTITHREAD Handle = 0x80010102
+ RPC_E_NOT_REGISTERED Handle = 0x80010103
+ RPC_E_FAULT Handle = 0x80010104
+ RPC_E_SERVERFAULT Handle = 0x80010105
+ RPC_E_CHANGED_MODE Handle = 0x80010106
+ RPC_E_INVALIDMETHOD Handle = 0x80010107
+ RPC_E_DISCONNECTED Handle = 0x80010108
+ RPC_E_RETRY Handle = 0x80010109
+ RPC_E_SERVERCALL_RETRYLATER Handle = 0x8001010A
+ RPC_E_SERVERCALL_REJECTED Handle = 0x8001010B
+ RPC_E_INVALID_CALLDATA Handle = 0x8001010C
+ RPC_E_CANTCALLOUT_ININPUTSYNCCALL Handle = 0x8001010D
+ RPC_E_WRONG_THREAD Handle = 0x8001010E
+ RPC_E_THREAD_NOT_INIT Handle = 0x8001010F
+ RPC_E_VERSION_MISMATCH Handle = 0x80010110
+ RPC_E_INVALID_HEADER Handle = 0x80010111
+ RPC_E_INVALID_EXTENSION Handle = 0x80010112
+ RPC_E_INVALID_IPID Handle = 0x80010113
+ RPC_E_INVALID_OBJECT Handle = 0x80010114
+ RPC_S_CALLPENDING Handle = 0x80010115
+ RPC_S_WAITONTIMER Handle = 0x80010116
+ RPC_E_CALL_COMPLETE Handle = 0x80010117
+ RPC_E_UNSECURE_CALL Handle = 0x80010118
+ RPC_E_TOO_LATE Handle = 0x80010119
+ RPC_E_NO_GOOD_SECURITY_PACKAGES Handle = 0x8001011A
+ RPC_E_ACCESS_DENIED Handle = 0x8001011B
+ RPC_E_REMOTE_DISABLED Handle = 0x8001011C
+ RPC_E_INVALID_OBJREF Handle = 0x8001011D
+ RPC_E_NO_CONTEXT Handle = 0x8001011E
+ RPC_E_TIMEOUT Handle = 0x8001011F
+ RPC_E_NO_SYNC Handle = 0x80010120
+ RPC_E_FULLSIC_REQUIRED Handle = 0x80010121
+ RPC_E_INVALID_STD_NAME Handle = 0x80010122
+ CO_E_FAILEDTOIMPERSONATE Handle = 0x80010123
+ CO_E_FAILEDTOGETSECCTX Handle = 0x80010124
+ CO_E_FAILEDTOOPENTHREADTOKEN Handle = 0x80010125
+ CO_E_FAILEDTOGETTOKENINFO Handle = 0x80010126
+ CO_E_TRUSTEEDOESNTMATCHCLIENT Handle = 0x80010127
+ CO_E_FAILEDTOQUERYCLIENTBLANKET Handle = 0x80010128
+ CO_E_FAILEDTOSETDACL Handle = 0x80010129
+ CO_E_ACCESSCHECKFAILED Handle = 0x8001012A
+ CO_E_NETACCESSAPIFAILED Handle = 0x8001012B
+ CO_E_WRONGTRUSTEENAMESYNTAX Handle = 0x8001012C
+ CO_E_INVALIDSID Handle = 0x8001012D
+ CO_E_CONVERSIONFAILED Handle = 0x8001012E
+ CO_E_NOMATCHINGSIDFOUND Handle = 0x8001012F
+ CO_E_LOOKUPACCSIDFAILED Handle = 0x80010130
+ CO_E_NOMATCHINGNAMEFOUND Handle = 0x80010131
+ CO_E_LOOKUPACCNAMEFAILED Handle = 0x80010132
+ CO_E_SETSERLHNDLFAILED Handle = 0x80010133
+ CO_E_FAILEDTOGETWINDIR Handle = 0x80010134
+ CO_E_PATHTOOLONG Handle = 0x80010135
+ CO_E_FAILEDTOGENUUID Handle = 0x80010136
+ CO_E_FAILEDTOCREATEFILE Handle = 0x80010137
+ CO_E_FAILEDTOCLOSEHANDLE Handle = 0x80010138
+ CO_E_EXCEEDSYSACLLIMIT Handle = 0x80010139
+ CO_E_ACESINWRONGORDER Handle = 0x8001013A
+ CO_E_INCOMPATIBLESTREAMVERSION Handle = 0x8001013B
+ CO_E_FAILEDTOOPENPROCESSTOKEN Handle = 0x8001013C
+ CO_E_DECODEFAILED Handle = 0x8001013D
+ CO_E_ACNOTINITIALIZED Handle = 0x8001013F
+ CO_E_CANCEL_DISABLED Handle = 0x80010140
+ RPC_E_UNEXPECTED Handle = 0x8001FFFF
+ ERROR_AUDITING_DISABLED Handle = 0xC0090001
+ ERROR_ALL_SIDS_FILTERED Handle = 0xC0090002
+ ERROR_BIZRULES_NOT_ENABLED Handle = 0xC0090003
+ NTE_BAD_UID Handle = 0x80090001
+ NTE_BAD_HASH Handle = 0x80090002
+ NTE_BAD_KEY Handle = 0x80090003
+ NTE_BAD_LEN Handle = 0x80090004
+ NTE_BAD_DATA Handle = 0x80090005
+ NTE_BAD_SIGNATURE Handle = 0x80090006
+ NTE_BAD_VER Handle = 0x80090007
+ NTE_BAD_ALGID Handle = 0x80090008
+ NTE_BAD_FLAGS Handle = 0x80090009
+ NTE_BAD_TYPE Handle = 0x8009000A
+ NTE_BAD_KEY_STATE Handle = 0x8009000B
+ NTE_BAD_HASH_STATE Handle = 0x8009000C
+ NTE_NO_KEY Handle = 0x8009000D
+ NTE_NO_MEMORY Handle = 0x8009000E
+ NTE_EXISTS Handle = 0x8009000F
+ NTE_PERM Handle = 0x80090010
+ NTE_NOT_FOUND Handle = 0x80090011
+ NTE_DOUBLE_ENCRYPT Handle = 0x80090012
+ NTE_BAD_PROVIDER Handle = 0x80090013
+ NTE_BAD_PROV_TYPE Handle = 0x80090014
+ NTE_BAD_PUBLIC_KEY Handle = 0x80090015
+ NTE_BAD_KEYSET Handle = 0x80090016
+ NTE_PROV_TYPE_NOT_DEF Handle = 0x80090017
+ NTE_PROV_TYPE_ENTRY_BAD Handle = 0x80090018
+ NTE_KEYSET_NOT_DEF Handle = 0x80090019
+ NTE_KEYSET_ENTRY_BAD Handle = 0x8009001A
+ NTE_PROV_TYPE_NO_MATCH Handle = 0x8009001B
+ NTE_SIGNATURE_FILE_BAD Handle = 0x8009001C
+ NTE_PROVIDER_DLL_FAIL Handle = 0x8009001D
+ NTE_PROV_DLL_NOT_FOUND Handle = 0x8009001E
+ NTE_BAD_KEYSET_PARAM Handle = 0x8009001F
+ NTE_FAIL Handle = 0x80090020
+ NTE_SYS_ERR Handle = 0x80090021
+ NTE_SILENT_CONTEXT Handle = 0x80090022
+ NTE_TOKEN_KEYSET_STORAGE_FULL Handle = 0x80090023
+ NTE_TEMPORARY_PROFILE Handle = 0x80090024
+ NTE_FIXEDPARAMETER Handle = 0x80090025
+ NTE_INVALID_HANDLE Handle = 0x80090026
+ NTE_INVALID_PARAMETER Handle = 0x80090027
+ NTE_BUFFER_TOO_SMALL Handle = 0x80090028
+ NTE_NOT_SUPPORTED Handle = 0x80090029
+ NTE_NO_MORE_ITEMS Handle = 0x8009002A
+ NTE_BUFFERS_OVERLAP Handle = 0x8009002B
+ NTE_DECRYPTION_FAILURE Handle = 0x8009002C
+ NTE_INTERNAL_ERROR Handle = 0x8009002D
+ NTE_UI_REQUIRED Handle = 0x8009002E
+ NTE_HMAC_NOT_SUPPORTED Handle = 0x8009002F
+ NTE_DEVICE_NOT_READY Handle = 0x80090030
+ NTE_AUTHENTICATION_IGNORED Handle = 0x80090031
+ NTE_VALIDATION_FAILED Handle = 0x80090032
+ NTE_INCORRECT_PASSWORD Handle = 0x80090033
+ NTE_ENCRYPTION_FAILURE Handle = 0x80090034
+ NTE_DEVICE_NOT_FOUND Handle = 0x80090035
+ NTE_USER_CANCELLED Handle = 0x80090036
+ NTE_PASSWORD_CHANGE_REQUIRED Handle = 0x80090037
+ NTE_NOT_ACTIVE_CONSOLE Handle = 0x80090038
+ SEC_E_INSUFFICIENT_MEMORY Handle = 0x80090300
+ SEC_E_INVALID_HANDLE Handle = 0x80090301
+ SEC_E_UNSUPPORTED_FUNCTION Handle = 0x80090302
+ SEC_E_TARGET_UNKNOWN Handle = 0x80090303
+ SEC_E_INTERNAL_ERROR Handle = 0x80090304
+ SEC_E_SECPKG_NOT_FOUND Handle = 0x80090305
+ SEC_E_NOT_OWNER Handle = 0x80090306
+ SEC_E_CANNOT_INSTALL Handle = 0x80090307
+ SEC_E_INVALID_TOKEN Handle = 0x80090308
+ SEC_E_CANNOT_PACK Handle = 0x80090309
+ SEC_E_QOP_NOT_SUPPORTED Handle = 0x8009030A
+ SEC_E_NO_IMPERSONATION Handle = 0x8009030B
+ SEC_E_LOGON_DENIED Handle = 0x8009030C
+ SEC_E_UNKNOWN_CREDENTIALS Handle = 0x8009030D
+ SEC_E_NO_CREDENTIALS Handle = 0x8009030E
+ SEC_E_MESSAGE_ALTERED Handle = 0x8009030F
+ SEC_E_OUT_OF_SEQUENCE Handle = 0x80090310
+ SEC_E_NO_AUTHENTICATING_AUTHORITY Handle = 0x80090311
+ SEC_I_CONTINUE_NEEDED Handle = 0x00090312
+ SEC_I_COMPLETE_NEEDED Handle = 0x00090313
+ SEC_I_COMPLETE_AND_CONTINUE Handle = 0x00090314
+ SEC_I_LOCAL_LOGON Handle = 0x00090315
+ SEC_E_BAD_PKGID Handle = 0x80090316
+ SEC_E_CONTEXT_EXPIRED Handle = 0x80090317
+ SEC_I_CONTEXT_EXPIRED Handle = 0x00090317
+ SEC_E_INCOMPLETE_MESSAGE Handle = 0x80090318
+ SEC_E_INCOMPLETE_CREDENTIALS Handle = 0x80090320
+ SEC_E_BUFFER_TOO_SMALL Handle = 0x80090321
+ SEC_I_INCOMPLETE_CREDENTIALS Handle = 0x00090320
+ SEC_I_RENEGOTIATE Handle = 0x00090321
+ SEC_E_WRONG_PRINCIPAL Handle = 0x80090322
+ SEC_I_NO_LSA_CONTEXT Handle = 0x00090323
+ SEC_E_TIME_SKEW Handle = 0x80090324
+ SEC_E_UNTRUSTED_ROOT Handle = 0x80090325
+ SEC_E_ILLEGAL_MESSAGE Handle = 0x80090326
+ SEC_E_CERT_UNKNOWN Handle = 0x80090327
+ SEC_E_CERT_EXPIRED Handle = 0x80090328
+ SEC_E_ENCRYPT_FAILURE Handle = 0x80090329
+ SEC_E_DECRYPT_FAILURE Handle = 0x80090330
+ SEC_E_ALGORITHM_MISMATCH Handle = 0x80090331
+ SEC_E_SECURITY_QOS_FAILED Handle = 0x80090332
+ SEC_E_UNFINISHED_CONTEXT_DELETED Handle = 0x80090333
+ SEC_E_NO_TGT_REPLY Handle = 0x80090334
+ SEC_E_NO_IP_ADDRESSES Handle = 0x80090335
+ SEC_E_WRONG_CREDENTIAL_HANDLE Handle = 0x80090336
+ SEC_E_CRYPTO_SYSTEM_INVALID Handle = 0x80090337
+ SEC_E_MAX_REFERRALS_EXCEEDED Handle = 0x80090338
+ SEC_E_MUST_BE_KDC Handle = 0x80090339
+ SEC_E_STRONG_CRYPTO_NOT_SUPPORTED Handle = 0x8009033A
+ SEC_E_TOO_MANY_PRINCIPALS Handle = 0x8009033B
+ SEC_E_NO_PA_DATA Handle = 0x8009033C
+ SEC_E_PKINIT_NAME_MISMATCH Handle = 0x8009033D
+ SEC_E_SMARTCARD_LOGON_REQUIRED Handle = 0x8009033E
+ SEC_E_SHUTDOWN_IN_PROGRESS Handle = 0x8009033F
+ SEC_E_KDC_INVALID_REQUEST Handle = 0x80090340
+ SEC_E_KDC_UNABLE_TO_REFER Handle = 0x80090341
+ SEC_E_KDC_UNKNOWN_ETYPE Handle = 0x80090342
+ SEC_E_UNSUPPORTED_PREAUTH Handle = 0x80090343
+ SEC_E_DELEGATION_REQUIRED Handle = 0x80090345
+ SEC_E_BAD_BINDINGS Handle = 0x80090346
+ SEC_E_MULTIPLE_ACCOUNTS Handle = 0x80090347
+ SEC_E_NO_KERB_KEY Handle = 0x80090348
+ SEC_E_CERT_WRONG_USAGE Handle = 0x80090349
+ SEC_E_DOWNGRADE_DETECTED Handle = 0x80090350
+ SEC_E_SMARTCARD_CERT_REVOKED Handle = 0x80090351
+ SEC_E_ISSUING_CA_UNTRUSTED Handle = 0x80090352
+ SEC_E_REVOCATION_OFFLINE_C Handle = 0x80090353
+ SEC_E_PKINIT_CLIENT_FAILURE Handle = 0x80090354
+ SEC_E_SMARTCARD_CERT_EXPIRED Handle = 0x80090355
+ SEC_E_NO_S4U_PROT_SUPPORT Handle = 0x80090356
+ SEC_E_CROSSREALM_DELEGATION_FAILURE Handle = 0x80090357
+ SEC_E_REVOCATION_OFFLINE_KDC Handle = 0x80090358
+ SEC_E_ISSUING_CA_UNTRUSTED_KDC Handle = 0x80090359
+ SEC_E_KDC_CERT_EXPIRED Handle = 0x8009035A
+ SEC_E_KDC_CERT_REVOKED Handle = 0x8009035B
+ SEC_I_SIGNATURE_NEEDED Handle = 0x0009035C
+ SEC_E_INVALID_PARAMETER Handle = 0x8009035D
+ SEC_E_DELEGATION_POLICY Handle = 0x8009035E
+ SEC_E_POLICY_NLTM_ONLY Handle = 0x8009035F
+ SEC_I_NO_RENEGOTIATION Handle = 0x00090360
+ SEC_E_NO_CONTEXT Handle = 0x80090361
+ SEC_E_PKU2U_CERT_FAILURE Handle = 0x80090362
+ SEC_E_MUTUAL_AUTH_FAILED Handle = 0x80090363
+ SEC_I_MESSAGE_FRAGMENT Handle = 0x00090364
+ SEC_E_ONLY_HTTPS_ALLOWED Handle = 0x80090365
+ SEC_I_CONTINUE_NEEDED_MESSAGE_OK Handle = 0x00090366
+ SEC_E_APPLICATION_PROTOCOL_MISMATCH Handle = 0x80090367
+ SEC_I_ASYNC_CALL_PENDING Handle = 0x00090368
+ SEC_E_INVALID_UPN_NAME Handle = 0x80090369
+ SEC_E_NO_SPM = SEC_E_INTERNAL_ERROR
+ SEC_E_NOT_SUPPORTED = SEC_E_UNSUPPORTED_FUNCTION
+ CRYPT_E_MSG_ERROR Handle = 0x80091001
+ CRYPT_E_UNKNOWN_ALGO Handle = 0x80091002
+ CRYPT_E_OID_FORMAT Handle = 0x80091003
+ CRYPT_E_INVALID_MSG_TYPE Handle = 0x80091004
+ CRYPT_E_UNEXPECTED_ENCODING Handle = 0x80091005
+ CRYPT_E_AUTH_ATTR_MISSING Handle = 0x80091006
+ CRYPT_E_HASH_VALUE Handle = 0x80091007
+ CRYPT_E_INVALID_INDEX Handle = 0x80091008
+ CRYPT_E_ALREADY_DECRYPTED Handle = 0x80091009
+ CRYPT_E_NOT_DECRYPTED Handle = 0x8009100A
+ CRYPT_E_RECIPIENT_NOT_FOUND Handle = 0x8009100B
+ CRYPT_E_CONTROL_TYPE Handle = 0x8009100C
+ CRYPT_E_ISSUER_SERIALNUMBER Handle = 0x8009100D
+ CRYPT_E_SIGNER_NOT_FOUND Handle = 0x8009100E
+ CRYPT_E_ATTRIBUTES_MISSING Handle = 0x8009100F
+ CRYPT_E_STREAM_MSG_NOT_READY Handle = 0x80091010
+ CRYPT_E_STREAM_INSUFFICIENT_DATA Handle = 0x80091011
+ CRYPT_I_NEW_PROTECTION_REQUIRED Handle = 0x00091012
+ CRYPT_E_BAD_LEN Handle = 0x80092001
+ CRYPT_E_BAD_ENCODE Handle = 0x80092002
+ CRYPT_E_FILE_ERROR Handle = 0x80092003
+ CRYPT_E_NOT_FOUND Handle = 0x80092004
+ CRYPT_E_EXISTS Handle = 0x80092005
+ CRYPT_E_NO_PROVIDER Handle = 0x80092006
+ CRYPT_E_SELF_SIGNED Handle = 0x80092007
+ CRYPT_E_DELETED_PREV Handle = 0x80092008
+ CRYPT_E_NO_MATCH Handle = 0x80092009
+ CRYPT_E_UNEXPECTED_MSG_TYPE Handle = 0x8009200A
+ CRYPT_E_NO_KEY_PROPERTY Handle = 0x8009200B
+ CRYPT_E_NO_DECRYPT_CERT Handle = 0x8009200C
+ CRYPT_E_BAD_MSG Handle = 0x8009200D
+ CRYPT_E_NO_SIGNER Handle = 0x8009200E
+ CRYPT_E_PENDING_CLOSE Handle = 0x8009200F
+ CRYPT_E_REVOKED Handle = 0x80092010
+ CRYPT_E_NO_REVOCATION_DLL Handle = 0x80092011
+ CRYPT_E_NO_REVOCATION_CHECK Handle = 0x80092012
+ CRYPT_E_REVOCATION_OFFLINE Handle = 0x80092013
+ CRYPT_E_NOT_IN_REVOCATION_DATABASE Handle = 0x80092014
+ CRYPT_E_INVALID_NUMERIC_STRING Handle = 0x80092020
+ CRYPT_E_INVALID_PRINTABLE_STRING Handle = 0x80092021
+ CRYPT_E_INVALID_IA5_STRING Handle = 0x80092022
+ CRYPT_E_INVALID_X500_STRING Handle = 0x80092023
+ CRYPT_E_NOT_CHAR_STRING Handle = 0x80092024
+ CRYPT_E_FILERESIZED Handle = 0x80092025
+ CRYPT_E_SECURITY_SETTINGS Handle = 0x80092026
+ CRYPT_E_NO_VERIFY_USAGE_DLL Handle = 0x80092027
+ CRYPT_E_NO_VERIFY_USAGE_CHECK Handle = 0x80092028
+ CRYPT_E_VERIFY_USAGE_OFFLINE Handle = 0x80092029
+ CRYPT_E_NOT_IN_CTL Handle = 0x8009202A
+ CRYPT_E_NO_TRUSTED_SIGNER Handle = 0x8009202B
+ CRYPT_E_MISSING_PUBKEY_PARA Handle = 0x8009202C
+ CRYPT_E_OBJECT_LOCATOR_OBJECT_NOT_FOUND Handle = 0x8009202D
+ CRYPT_E_OSS_ERROR Handle = 0x80093000
+ OSS_MORE_BUF Handle = 0x80093001
+ OSS_NEGATIVE_UINTEGER Handle = 0x80093002
+ OSS_PDU_RANGE Handle = 0x80093003
+ OSS_MORE_INPUT Handle = 0x80093004
+ OSS_DATA_ERROR Handle = 0x80093005
+ OSS_BAD_ARG Handle = 0x80093006
+ OSS_BAD_VERSION Handle = 0x80093007
+ OSS_OUT_MEMORY Handle = 0x80093008
+ OSS_PDU_MISMATCH Handle = 0x80093009
+ OSS_LIMITED Handle = 0x8009300A
+ OSS_BAD_PTR Handle = 0x8009300B
+ OSS_BAD_TIME Handle = 0x8009300C
+ OSS_INDEFINITE_NOT_SUPPORTED Handle = 0x8009300D
+ OSS_MEM_ERROR Handle = 0x8009300E
+ OSS_BAD_TABLE Handle = 0x8009300F
+ OSS_TOO_LONG Handle = 0x80093010
+ OSS_CONSTRAINT_VIOLATED Handle = 0x80093011
+ OSS_FATAL_ERROR Handle = 0x80093012
+ OSS_ACCESS_SERIALIZATION_ERROR Handle = 0x80093013
+ OSS_NULL_TBL Handle = 0x80093014
+ OSS_NULL_FCN Handle = 0x80093015
+ OSS_BAD_ENCRULES Handle = 0x80093016
+ OSS_UNAVAIL_ENCRULES Handle = 0x80093017
+ OSS_CANT_OPEN_TRACE_WINDOW Handle = 0x80093018
+ OSS_UNIMPLEMENTED Handle = 0x80093019
+ OSS_OID_DLL_NOT_LINKED Handle = 0x8009301A
+ OSS_CANT_OPEN_TRACE_FILE Handle = 0x8009301B
+ OSS_TRACE_FILE_ALREADY_OPEN Handle = 0x8009301C
+ OSS_TABLE_MISMATCH Handle = 0x8009301D
+ OSS_TYPE_NOT_SUPPORTED Handle = 0x8009301E
+ OSS_REAL_DLL_NOT_LINKED Handle = 0x8009301F
+ OSS_REAL_CODE_NOT_LINKED Handle = 0x80093020
+ OSS_OUT_OF_RANGE Handle = 0x80093021
+ OSS_COPIER_DLL_NOT_LINKED Handle = 0x80093022
+ OSS_CONSTRAINT_DLL_NOT_LINKED Handle = 0x80093023
+ OSS_COMPARATOR_DLL_NOT_LINKED Handle = 0x80093024
+ OSS_COMPARATOR_CODE_NOT_LINKED Handle = 0x80093025
+ OSS_MEM_MGR_DLL_NOT_LINKED Handle = 0x80093026
+ OSS_PDV_DLL_NOT_LINKED Handle = 0x80093027
+ OSS_PDV_CODE_NOT_LINKED Handle = 0x80093028
+ OSS_API_DLL_NOT_LINKED Handle = 0x80093029
+ OSS_BERDER_DLL_NOT_LINKED Handle = 0x8009302A
+ OSS_PER_DLL_NOT_LINKED Handle = 0x8009302B
+ OSS_OPEN_TYPE_ERROR Handle = 0x8009302C
+ OSS_MUTEX_NOT_CREATED Handle = 0x8009302D
+ OSS_CANT_CLOSE_TRACE_FILE Handle = 0x8009302E
+ CRYPT_E_ASN1_ERROR Handle = 0x80093100
+ CRYPT_E_ASN1_INTERNAL Handle = 0x80093101
+ CRYPT_E_ASN1_EOD Handle = 0x80093102
+ CRYPT_E_ASN1_CORRUPT Handle = 0x80093103
+ CRYPT_E_ASN1_LARGE Handle = 0x80093104
+ CRYPT_E_ASN1_CONSTRAINT Handle = 0x80093105
+ CRYPT_E_ASN1_MEMORY Handle = 0x80093106
+ CRYPT_E_ASN1_OVERFLOW Handle = 0x80093107
+ CRYPT_E_ASN1_BADPDU Handle = 0x80093108
+ CRYPT_E_ASN1_BADARGS Handle = 0x80093109
+ CRYPT_E_ASN1_BADREAL Handle = 0x8009310A
+ CRYPT_E_ASN1_BADTAG Handle = 0x8009310B
+ CRYPT_E_ASN1_CHOICE Handle = 0x8009310C
+ CRYPT_E_ASN1_RULE Handle = 0x8009310D
+ CRYPT_E_ASN1_UTF8 Handle = 0x8009310E
+ CRYPT_E_ASN1_PDU_TYPE Handle = 0x80093133
+ CRYPT_E_ASN1_NYI Handle = 0x80093134
+ CRYPT_E_ASN1_EXTENDED Handle = 0x80093201
+ CRYPT_E_ASN1_NOEOD Handle = 0x80093202
+ CERTSRV_E_BAD_REQUESTSUBJECT Handle = 0x80094001
+ CERTSRV_E_NO_REQUEST Handle = 0x80094002
+ CERTSRV_E_BAD_REQUESTSTATUS Handle = 0x80094003
+ CERTSRV_E_PROPERTY_EMPTY Handle = 0x80094004
+ CERTSRV_E_INVALID_CA_CERTIFICATE Handle = 0x80094005
+ CERTSRV_E_SERVER_SUSPENDED Handle = 0x80094006
+ CERTSRV_E_ENCODING_LENGTH Handle = 0x80094007
+ CERTSRV_E_ROLECONFLICT Handle = 0x80094008
+ CERTSRV_E_RESTRICTEDOFFICER Handle = 0x80094009
+ CERTSRV_E_KEY_ARCHIVAL_NOT_CONFIGURED Handle = 0x8009400A
+ CERTSRV_E_NO_VALID_KRA Handle = 0x8009400B
+ CERTSRV_E_BAD_REQUEST_KEY_ARCHIVAL Handle = 0x8009400C
+ CERTSRV_E_NO_CAADMIN_DEFINED Handle = 0x8009400D
+ CERTSRV_E_BAD_RENEWAL_CERT_ATTRIBUTE Handle = 0x8009400E
+ CERTSRV_E_NO_DB_SESSIONS Handle = 0x8009400F
+ CERTSRV_E_ALIGNMENT_FAULT Handle = 0x80094010
+ CERTSRV_E_ENROLL_DENIED Handle = 0x80094011
+ CERTSRV_E_TEMPLATE_DENIED Handle = 0x80094012
+ CERTSRV_E_DOWNLEVEL_DC_SSL_OR_UPGRADE Handle = 0x80094013
+ CERTSRV_E_ADMIN_DENIED_REQUEST Handle = 0x80094014
+ CERTSRV_E_NO_POLICY_SERVER Handle = 0x80094015
+ CERTSRV_E_WEAK_SIGNATURE_OR_KEY Handle = 0x80094016
+ CERTSRV_E_KEY_ATTESTATION_NOT_SUPPORTED Handle = 0x80094017
+ CERTSRV_E_ENCRYPTION_CERT_REQUIRED Handle = 0x80094018
+ CERTSRV_E_UNSUPPORTED_CERT_TYPE Handle = 0x80094800
+ CERTSRV_E_NO_CERT_TYPE Handle = 0x80094801
+ CERTSRV_E_TEMPLATE_CONFLICT Handle = 0x80094802
+ CERTSRV_E_SUBJECT_ALT_NAME_REQUIRED Handle = 0x80094803
+ CERTSRV_E_ARCHIVED_KEY_REQUIRED Handle = 0x80094804
+ CERTSRV_E_SMIME_REQUIRED Handle = 0x80094805
+ CERTSRV_E_BAD_RENEWAL_SUBJECT Handle = 0x80094806
+ CERTSRV_E_BAD_TEMPLATE_VERSION Handle = 0x80094807
+ CERTSRV_E_TEMPLATE_POLICY_REQUIRED Handle = 0x80094808
+ CERTSRV_E_SIGNATURE_POLICY_REQUIRED Handle = 0x80094809
+ CERTSRV_E_SIGNATURE_COUNT Handle = 0x8009480A
+ CERTSRV_E_SIGNATURE_REJECTED Handle = 0x8009480B
+ CERTSRV_E_ISSUANCE_POLICY_REQUIRED Handle = 0x8009480C
+ CERTSRV_E_SUBJECT_UPN_REQUIRED Handle = 0x8009480D
+ CERTSRV_E_SUBJECT_DIRECTORY_GUID_REQUIRED Handle = 0x8009480E
+ CERTSRV_E_SUBJECT_DNS_REQUIRED Handle = 0x8009480F
+ CERTSRV_E_ARCHIVED_KEY_UNEXPECTED Handle = 0x80094810
+ CERTSRV_E_KEY_LENGTH Handle = 0x80094811
+ CERTSRV_E_SUBJECT_EMAIL_REQUIRED Handle = 0x80094812
+ CERTSRV_E_UNKNOWN_CERT_TYPE Handle = 0x80094813
+ CERTSRV_E_CERT_TYPE_OVERLAP Handle = 0x80094814
+ CERTSRV_E_TOO_MANY_SIGNATURES Handle = 0x80094815
+ CERTSRV_E_RENEWAL_BAD_PUBLIC_KEY Handle = 0x80094816
+ CERTSRV_E_INVALID_EK Handle = 0x80094817
+ CERTSRV_E_INVALID_IDBINDING Handle = 0x80094818
+ CERTSRV_E_INVALID_ATTESTATION Handle = 0x80094819
+ CERTSRV_E_KEY_ATTESTATION Handle = 0x8009481A
+ CERTSRV_E_CORRUPT_KEY_ATTESTATION Handle = 0x8009481B
+ CERTSRV_E_EXPIRED_CHALLENGE Handle = 0x8009481C
+ CERTSRV_E_INVALID_RESPONSE Handle = 0x8009481D
+ CERTSRV_E_INVALID_REQUESTID Handle = 0x8009481E
+ CERTSRV_E_REQUEST_PRECERTIFICATE_MISMATCH Handle = 0x8009481F
+ CERTSRV_E_PENDING_CLIENT_RESPONSE Handle = 0x80094820
+ XENROLL_E_KEY_NOT_EXPORTABLE Handle = 0x80095000
+ XENROLL_E_CANNOT_ADD_ROOT_CERT Handle = 0x80095001
+ XENROLL_E_RESPONSE_KA_HASH_NOT_FOUND Handle = 0x80095002
+ XENROLL_E_RESPONSE_UNEXPECTED_KA_HASH Handle = 0x80095003
+ XENROLL_E_RESPONSE_KA_HASH_MISMATCH Handle = 0x80095004
+ XENROLL_E_KEYSPEC_SMIME_MISMATCH Handle = 0x80095005
+ TRUST_E_SYSTEM_ERROR Handle = 0x80096001
+ TRUST_E_NO_SIGNER_CERT Handle = 0x80096002
+ TRUST_E_COUNTER_SIGNER Handle = 0x80096003
+ TRUST_E_CERT_SIGNATURE Handle = 0x80096004
+ TRUST_E_TIME_STAMP Handle = 0x80096005
+ TRUST_E_BAD_DIGEST Handle = 0x80096010
+ TRUST_E_MALFORMED_SIGNATURE Handle = 0x80096011
+ TRUST_E_BASIC_CONSTRAINTS Handle = 0x80096019
+ TRUST_E_FINANCIAL_CRITERIA Handle = 0x8009601E
+ MSSIPOTF_E_OUTOFMEMRANGE Handle = 0x80097001
+ MSSIPOTF_E_CANTGETOBJECT Handle = 0x80097002
+ MSSIPOTF_E_NOHEADTABLE Handle = 0x80097003
+ MSSIPOTF_E_BAD_MAGICNUMBER Handle = 0x80097004
+ MSSIPOTF_E_BAD_OFFSET_TABLE Handle = 0x80097005
+ MSSIPOTF_E_TABLE_TAGORDER Handle = 0x80097006
+ MSSIPOTF_E_TABLE_LONGWORD Handle = 0x80097007
+ MSSIPOTF_E_BAD_FIRST_TABLE_PLACEMENT Handle = 0x80097008
+ MSSIPOTF_E_TABLES_OVERLAP Handle = 0x80097009
+ MSSIPOTF_E_TABLE_PADBYTES Handle = 0x8009700A
+ MSSIPOTF_E_FILETOOSMALL Handle = 0x8009700B
+ MSSIPOTF_E_TABLE_CHECKSUM Handle = 0x8009700C
+ MSSIPOTF_E_FILE_CHECKSUM Handle = 0x8009700D
+ MSSIPOTF_E_FAILED_POLICY Handle = 0x80097010
+ MSSIPOTF_E_FAILED_HINTS_CHECK Handle = 0x80097011
+ MSSIPOTF_E_NOT_OPENTYPE Handle = 0x80097012
+ MSSIPOTF_E_FILE Handle = 0x80097013
+ MSSIPOTF_E_CRYPT Handle = 0x80097014
+ MSSIPOTF_E_BADVERSION Handle = 0x80097015
+ MSSIPOTF_E_DSIG_STRUCTURE Handle = 0x80097016
+ MSSIPOTF_E_PCONST_CHECK Handle = 0x80097017
+ MSSIPOTF_E_STRUCTURE Handle = 0x80097018
+ ERROR_CRED_REQUIRES_CONFIRMATION Handle = 0x80097019
+ NTE_OP_OK syscall.Errno = 0
+ TRUST_E_PROVIDER_UNKNOWN Handle = 0x800B0001
+ TRUST_E_ACTION_UNKNOWN Handle = 0x800B0002
+ TRUST_E_SUBJECT_FORM_UNKNOWN Handle = 0x800B0003
+ TRUST_E_SUBJECT_NOT_TRUSTED Handle = 0x800B0004
+ DIGSIG_E_ENCODE Handle = 0x800B0005
+ DIGSIG_E_DECODE Handle = 0x800B0006
+ DIGSIG_E_EXTENSIBILITY Handle = 0x800B0007
+ DIGSIG_E_CRYPTO Handle = 0x800B0008
+ PERSIST_E_SIZEDEFINITE Handle = 0x800B0009
+ PERSIST_E_SIZEINDEFINITE Handle = 0x800B000A
+ PERSIST_E_NOTSELFSIZING Handle = 0x800B000B
+ TRUST_E_NOSIGNATURE Handle = 0x800B0100
+ CERT_E_EXPIRED Handle = 0x800B0101
+ CERT_E_VALIDITYPERIODNESTING Handle = 0x800B0102
+ CERT_E_ROLE Handle = 0x800B0103
+ CERT_E_PATHLENCONST Handle = 0x800B0104
+ CERT_E_CRITICAL Handle = 0x800B0105
+ CERT_E_PURPOSE Handle = 0x800B0106
+ CERT_E_ISSUERCHAINING Handle = 0x800B0107
+ CERT_E_MALFORMED Handle = 0x800B0108
+ CERT_E_UNTRUSTEDROOT Handle = 0x800B0109
+ CERT_E_CHAINING Handle = 0x800B010A
+ TRUST_E_FAIL Handle = 0x800B010B
+ CERT_E_REVOKED Handle = 0x800B010C
+ CERT_E_UNTRUSTEDTESTROOT Handle = 0x800B010D
+ CERT_E_REVOCATION_FAILURE Handle = 0x800B010E
+ CERT_E_CN_NO_MATCH Handle = 0x800B010F
+ CERT_E_WRONG_USAGE Handle = 0x800B0110
+ TRUST_E_EXPLICIT_DISTRUST Handle = 0x800B0111
+ CERT_E_UNTRUSTEDCA Handle = 0x800B0112
+ CERT_E_INVALID_POLICY Handle = 0x800B0113
+ CERT_E_INVALID_NAME Handle = 0x800B0114
+ SPAPI_E_EXPECTED_SECTION_NAME Handle = 0x800F0000
+ SPAPI_E_BAD_SECTION_NAME_LINE Handle = 0x800F0001
+ SPAPI_E_SECTION_NAME_TOO_LONG Handle = 0x800F0002
+ SPAPI_E_GENERAL_SYNTAX Handle = 0x800F0003
+ SPAPI_E_WRONG_INF_STYLE Handle = 0x800F0100
+ SPAPI_E_SECTION_NOT_FOUND Handle = 0x800F0101
+ SPAPI_E_LINE_NOT_FOUND Handle = 0x800F0102
+ SPAPI_E_NO_BACKUP Handle = 0x800F0103
+ SPAPI_E_NO_ASSOCIATED_CLASS Handle = 0x800F0200
+ SPAPI_E_CLASS_MISMATCH Handle = 0x800F0201
+ SPAPI_E_DUPLICATE_FOUND Handle = 0x800F0202
+ SPAPI_E_NO_DRIVER_SELECTED Handle = 0x800F0203
+ SPAPI_E_KEY_DOES_NOT_EXIST Handle = 0x800F0204
+ SPAPI_E_INVALID_DEVINST_NAME Handle = 0x800F0205
+ SPAPI_E_INVALID_CLASS Handle = 0x800F0206
+ SPAPI_E_DEVINST_ALREADY_EXISTS Handle = 0x800F0207
+ SPAPI_E_DEVINFO_NOT_REGISTERED Handle = 0x800F0208
+ SPAPI_E_INVALID_REG_PROPERTY Handle = 0x800F0209
+ SPAPI_E_NO_INF Handle = 0x800F020A
+ SPAPI_E_NO_SUCH_DEVINST Handle = 0x800F020B
+ SPAPI_E_CANT_LOAD_CLASS_ICON Handle = 0x800F020C
+ SPAPI_E_INVALID_CLASS_INSTALLER Handle = 0x800F020D
+ SPAPI_E_DI_DO_DEFAULT Handle = 0x800F020E
+ SPAPI_E_DI_NOFILECOPY Handle = 0x800F020F
+ SPAPI_E_INVALID_HWPROFILE Handle = 0x800F0210
+ SPAPI_E_NO_DEVICE_SELECTED Handle = 0x800F0211
+ SPAPI_E_DEVINFO_LIST_LOCKED Handle = 0x800F0212
+ SPAPI_E_DEVINFO_DATA_LOCKED Handle = 0x800F0213
+ SPAPI_E_DI_BAD_PATH Handle = 0x800F0214
+ SPAPI_E_NO_CLASSINSTALL_PARAMS Handle = 0x800F0215
+ SPAPI_E_FILEQUEUE_LOCKED Handle = 0x800F0216
+ SPAPI_E_BAD_SERVICE_INSTALLSECT Handle = 0x800F0217
+ SPAPI_E_NO_CLASS_DRIVER_LIST Handle = 0x800F0218
+ SPAPI_E_NO_ASSOCIATED_SERVICE Handle = 0x800F0219
+ SPAPI_E_NO_DEFAULT_DEVICE_INTERFACE Handle = 0x800F021A
+ SPAPI_E_DEVICE_INTERFACE_ACTIVE Handle = 0x800F021B
+ SPAPI_E_DEVICE_INTERFACE_REMOVED Handle = 0x800F021C
+ SPAPI_E_BAD_INTERFACE_INSTALLSECT Handle = 0x800F021D
+ SPAPI_E_NO_SUCH_INTERFACE_CLASS Handle = 0x800F021E
+ SPAPI_E_INVALID_REFERENCE_STRING Handle = 0x800F021F
+ SPAPI_E_INVALID_MACHINENAME Handle = 0x800F0220
+ SPAPI_E_REMOTE_COMM_FAILURE Handle = 0x800F0221
+ SPAPI_E_MACHINE_UNAVAILABLE Handle = 0x800F0222
+ SPAPI_E_NO_CONFIGMGR_SERVICES Handle = 0x800F0223
+ SPAPI_E_INVALID_PROPPAGE_PROVIDER Handle = 0x800F0224
+ SPAPI_E_NO_SUCH_DEVICE_INTERFACE Handle = 0x800F0225
+ SPAPI_E_DI_POSTPROCESSING_REQUIRED Handle = 0x800F0226
+ SPAPI_E_INVALID_COINSTALLER Handle = 0x800F0227
+ SPAPI_E_NO_COMPAT_DRIVERS Handle = 0x800F0228
+ SPAPI_E_NO_DEVICE_ICON Handle = 0x800F0229
+ SPAPI_E_INVALID_INF_LOGCONFIG Handle = 0x800F022A
+ SPAPI_E_DI_DONT_INSTALL Handle = 0x800F022B
+ SPAPI_E_INVALID_FILTER_DRIVER Handle = 0x800F022C
+ SPAPI_E_NON_WINDOWS_NT_DRIVER Handle = 0x800F022D
+ SPAPI_E_NON_WINDOWS_DRIVER Handle = 0x800F022E
+ SPAPI_E_NO_CATALOG_FOR_OEM_INF Handle = 0x800F022F
+ SPAPI_E_DEVINSTALL_QUEUE_NONNATIVE Handle = 0x800F0230
+ SPAPI_E_NOT_DISABLEABLE Handle = 0x800F0231
+ SPAPI_E_CANT_REMOVE_DEVINST Handle = 0x800F0232
+ SPAPI_E_INVALID_TARGET Handle = 0x800F0233
+ SPAPI_E_DRIVER_NONNATIVE Handle = 0x800F0234
+ SPAPI_E_IN_WOW64 Handle = 0x800F0235
+ SPAPI_E_SET_SYSTEM_RESTORE_POINT Handle = 0x800F0236
+ SPAPI_E_INCORRECTLY_COPIED_INF Handle = 0x800F0237
+ SPAPI_E_SCE_DISABLED Handle = 0x800F0238
+ SPAPI_E_UNKNOWN_EXCEPTION Handle = 0x800F0239
+ SPAPI_E_PNP_REGISTRY_ERROR Handle = 0x800F023A
+ SPAPI_E_REMOTE_REQUEST_UNSUPPORTED Handle = 0x800F023B
+ SPAPI_E_NOT_AN_INSTALLED_OEM_INF Handle = 0x800F023C
+ SPAPI_E_INF_IN_USE_BY_DEVICES Handle = 0x800F023D
+ SPAPI_E_DI_FUNCTION_OBSOLETE Handle = 0x800F023E
+ SPAPI_E_NO_AUTHENTICODE_CATALOG Handle = 0x800F023F
+ SPAPI_E_AUTHENTICODE_DISALLOWED Handle = 0x800F0240
+ SPAPI_E_AUTHENTICODE_TRUSTED_PUBLISHER Handle = 0x800F0241
+ SPAPI_E_AUTHENTICODE_TRUST_NOT_ESTABLISHED Handle = 0x800F0242
+ SPAPI_E_AUTHENTICODE_PUBLISHER_NOT_TRUSTED Handle = 0x800F0243
+ SPAPI_E_SIGNATURE_OSATTRIBUTE_MISMATCH Handle = 0x800F0244
+ SPAPI_E_ONLY_VALIDATE_VIA_AUTHENTICODE Handle = 0x800F0245
+ SPAPI_E_DEVICE_INSTALLER_NOT_READY Handle = 0x800F0246
+ SPAPI_E_DRIVER_STORE_ADD_FAILED Handle = 0x800F0247
+ SPAPI_E_DEVICE_INSTALL_BLOCKED Handle = 0x800F0248
+ SPAPI_E_DRIVER_INSTALL_BLOCKED Handle = 0x800F0249
+ SPAPI_E_WRONG_INF_TYPE Handle = 0x800F024A
+ SPAPI_E_FILE_HASH_NOT_IN_CATALOG Handle = 0x800F024B
+ SPAPI_E_DRIVER_STORE_DELETE_FAILED Handle = 0x800F024C
+ SPAPI_E_UNRECOVERABLE_STACK_OVERFLOW Handle = 0x800F0300
+ SPAPI_E_ERROR_NOT_INSTALLED Handle = 0x800F1000
+ SCARD_S_SUCCESS = S_OK
+ SCARD_F_INTERNAL_ERROR Handle = 0x80100001
+ SCARD_E_CANCELLED Handle = 0x80100002
+ SCARD_E_INVALID_HANDLE Handle = 0x80100003
+ SCARD_E_INVALID_PARAMETER Handle = 0x80100004
+ SCARD_E_INVALID_TARGET Handle = 0x80100005
+ SCARD_E_NO_MEMORY Handle = 0x80100006
+ SCARD_F_WAITED_TOO_LONG Handle = 0x80100007
+ SCARD_E_INSUFFICIENT_BUFFER Handle = 0x80100008
+ SCARD_E_UNKNOWN_READER Handle = 0x80100009
+ SCARD_E_TIMEOUT Handle = 0x8010000A
+ SCARD_E_SHARING_VIOLATION Handle = 0x8010000B
+ SCARD_E_NO_SMARTCARD Handle = 0x8010000C
+ SCARD_E_UNKNOWN_CARD Handle = 0x8010000D
+ SCARD_E_CANT_DISPOSE Handle = 0x8010000E
+ SCARD_E_PROTO_MISMATCH Handle = 0x8010000F
+ SCARD_E_NOT_READY Handle = 0x80100010
+ SCARD_E_INVALID_VALUE Handle = 0x80100011
+ SCARD_E_SYSTEM_CANCELLED Handle = 0x80100012
+ SCARD_F_COMM_ERROR Handle = 0x80100013
+ SCARD_F_UNKNOWN_ERROR Handle = 0x80100014
+ SCARD_E_INVALID_ATR Handle = 0x80100015
+ SCARD_E_NOT_TRANSACTED Handle = 0x80100016
+ SCARD_E_READER_UNAVAILABLE Handle = 0x80100017
+ SCARD_P_SHUTDOWN Handle = 0x80100018
+ SCARD_E_PCI_TOO_SMALL Handle = 0x80100019
+ SCARD_E_READER_UNSUPPORTED Handle = 0x8010001A
+ SCARD_E_DUPLICATE_READER Handle = 0x8010001B
+ SCARD_E_CARD_UNSUPPORTED Handle = 0x8010001C
+ SCARD_E_NO_SERVICE Handle = 0x8010001D
+ SCARD_E_SERVICE_STOPPED Handle = 0x8010001E
+ SCARD_E_UNEXPECTED Handle = 0x8010001F
+ SCARD_E_ICC_INSTALLATION Handle = 0x80100020
+ SCARD_E_ICC_CREATEORDER Handle = 0x80100021
+ SCARD_E_UNSUPPORTED_FEATURE Handle = 0x80100022
+ SCARD_E_DIR_NOT_FOUND Handle = 0x80100023
+ SCARD_E_FILE_NOT_FOUND Handle = 0x80100024
+ SCARD_E_NO_DIR Handle = 0x80100025
+ SCARD_E_NO_FILE Handle = 0x80100026
+ SCARD_E_NO_ACCESS Handle = 0x80100027
+ SCARD_E_WRITE_TOO_MANY Handle = 0x80100028
+ SCARD_E_BAD_SEEK Handle = 0x80100029
+ SCARD_E_INVALID_CHV Handle = 0x8010002A
+ SCARD_E_UNKNOWN_RES_MNG Handle = 0x8010002B
+ SCARD_E_NO_SUCH_CERTIFICATE Handle = 0x8010002C
+ SCARD_E_CERTIFICATE_UNAVAILABLE Handle = 0x8010002D
+ SCARD_E_NO_READERS_AVAILABLE Handle = 0x8010002E
+ SCARD_E_COMM_DATA_LOST Handle = 0x8010002F
+ SCARD_E_NO_KEY_CONTAINER Handle = 0x80100030
+ SCARD_E_SERVER_TOO_BUSY Handle = 0x80100031
+ SCARD_E_PIN_CACHE_EXPIRED Handle = 0x80100032
+ SCARD_E_NO_PIN_CACHE Handle = 0x80100033
+ SCARD_E_READ_ONLY_CARD Handle = 0x80100034
+ SCARD_W_UNSUPPORTED_CARD Handle = 0x80100065
+ SCARD_W_UNRESPONSIVE_CARD Handle = 0x80100066
+ SCARD_W_UNPOWERED_CARD Handle = 0x80100067
+ SCARD_W_RESET_CARD Handle = 0x80100068
+ SCARD_W_REMOVED_CARD Handle = 0x80100069
+ SCARD_W_SECURITY_VIOLATION Handle = 0x8010006A
+ SCARD_W_WRONG_CHV Handle = 0x8010006B
+ SCARD_W_CHV_BLOCKED Handle = 0x8010006C
+ SCARD_W_EOF Handle = 0x8010006D
+ SCARD_W_CANCELLED_BY_USER Handle = 0x8010006E
+ SCARD_W_CARD_NOT_AUTHENTICATED Handle = 0x8010006F
+ SCARD_W_CACHE_ITEM_NOT_FOUND Handle = 0x80100070
+ SCARD_W_CACHE_ITEM_STALE Handle = 0x80100071
+ SCARD_W_CACHE_ITEM_TOO_BIG Handle = 0x80100072
+ COMADMIN_E_OBJECTERRORS Handle = 0x80110401
+ COMADMIN_E_OBJECTINVALID Handle = 0x80110402
+ COMADMIN_E_KEYMISSING Handle = 0x80110403
+ COMADMIN_E_ALREADYINSTALLED Handle = 0x80110404
+ COMADMIN_E_APP_FILE_WRITEFAIL Handle = 0x80110407
+ COMADMIN_E_APP_FILE_READFAIL Handle = 0x80110408
+ COMADMIN_E_APP_FILE_VERSION Handle = 0x80110409
+ COMADMIN_E_BADPATH Handle = 0x8011040A
+ COMADMIN_E_APPLICATIONEXISTS Handle = 0x8011040B
+ COMADMIN_E_ROLEEXISTS Handle = 0x8011040C
+ COMADMIN_E_CANTCOPYFILE Handle = 0x8011040D
+ COMADMIN_E_NOUSER Handle = 0x8011040F
+ COMADMIN_E_INVALIDUSERIDS Handle = 0x80110410
+ COMADMIN_E_NOREGISTRYCLSID Handle = 0x80110411
+ COMADMIN_E_BADREGISTRYPROGID Handle = 0x80110412
+ COMADMIN_E_AUTHENTICATIONLEVEL Handle = 0x80110413
+ COMADMIN_E_USERPASSWDNOTVALID Handle = 0x80110414
+ COMADMIN_E_CLSIDORIIDMISMATCH Handle = 0x80110418
+ COMADMIN_E_REMOTEINTERFACE Handle = 0x80110419
+ COMADMIN_E_DLLREGISTERSERVER Handle = 0x8011041A
+ COMADMIN_E_NOSERVERSHARE Handle = 0x8011041B
+ COMADMIN_E_DLLLOADFAILED Handle = 0x8011041D
+ COMADMIN_E_BADREGISTRYLIBID Handle = 0x8011041E
+ COMADMIN_E_APPDIRNOTFOUND Handle = 0x8011041F
+ COMADMIN_E_REGISTRARFAILED Handle = 0x80110423
+ COMADMIN_E_COMPFILE_DOESNOTEXIST Handle = 0x80110424
+ COMADMIN_E_COMPFILE_LOADDLLFAIL Handle = 0x80110425
+ COMADMIN_E_COMPFILE_GETCLASSOBJ Handle = 0x80110426
+ COMADMIN_E_COMPFILE_CLASSNOTAVAIL Handle = 0x80110427
+ COMADMIN_E_COMPFILE_BADTLB Handle = 0x80110428
+ COMADMIN_E_COMPFILE_NOTINSTALLABLE Handle = 0x80110429
+ COMADMIN_E_NOTCHANGEABLE Handle = 0x8011042A
+ COMADMIN_E_NOTDELETEABLE Handle = 0x8011042B
+ COMADMIN_E_SESSION Handle = 0x8011042C
+ COMADMIN_E_COMP_MOVE_LOCKED Handle = 0x8011042D
+ COMADMIN_E_COMP_MOVE_BAD_DEST Handle = 0x8011042E
+ COMADMIN_E_REGISTERTLB Handle = 0x80110430
+ COMADMIN_E_SYSTEMAPP Handle = 0x80110433
+ COMADMIN_E_COMPFILE_NOREGISTRAR Handle = 0x80110434
+ COMADMIN_E_COREQCOMPINSTALLED Handle = 0x80110435
+ COMADMIN_E_SERVICENOTINSTALLED Handle = 0x80110436
+ COMADMIN_E_PROPERTYSAVEFAILED Handle = 0x80110437
+ COMADMIN_E_OBJECTEXISTS Handle = 0x80110438
+ COMADMIN_E_COMPONENTEXISTS Handle = 0x80110439
+ COMADMIN_E_REGFILE_CORRUPT Handle = 0x8011043B
+ COMADMIN_E_PROPERTY_OVERFLOW Handle = 0x8011043C
+ COMADMIN_E_NOTINREGISTRY Handle = 0x8011043E
+ COMADMIN_E_OBJECTNOTPOOLABLE Handle = 0x8011043F
+ COMADMIN_E_APPLID_MATCHES_CLSID Handle = 0x80110446
+ COMADMIN_E_ROLE_DOES_NOT_EXIST Handle = 0x80110447
+ COMADMIN_E_START_APP_NEEDS_COMPONENTS Handle = 0x80110448
+ COMADMIN_E_REQUIRES_DIFFERENT_PLATFORM Handle = 0x80110449
+ COMADMIN_E_CAN_NOT_EXPORT_APP_PROXY Handle = 0x8011044A
+ COMADMIN_E_CAN_NOT_START_APP Handle = 0x8011044B
+ COMADMIN_E_CAN_NOT_EXPORT_SYS_APP Handle = 0x8011044C
+ COMADMIN_E_CANT_SUBSCRIBE_TO_COMPONENT Handle = 0x8011044D
+ COMADMIN_E_EVENTCLASS_CANT_BE_SUBSCRIBER Handle = 0x8011044E
+ COMADMIN_E_LIB_APP_PROXY_INCOMPATIBLE Handle = 0x8011044F
+ COMADMIN_E_BASE_PARTITION_ONLY Handle = 0x80110450
+ COMADMIN_E_START_APP_DISABLED Handle = 0x80110451
+ COMADMIN_E_CAT_DUPLICATE_PARTITION_NAME Handle = 0x80110457
+ COMADMIN_E_CAT_INVALID_PARTITION_NAME Handle = 0x80110458
+ COMADMIN_E_CAT_PARTITION_IN_USE Handle = 0x80110459
+ COMADMIN_E_FILE_PARTITION_DUPLICATE_FILES Handle = 0x8011045A
+ COMADMIN_E_CAT_IMPORTED_COMPONENTS_NOT_ALLOWED Handle = 0x8011045B
+ COMADMIN_E_AMBIGUOUS_APPLICATION_NAME Handle = 0x8011045C
+ COMADMIN_E_AMBIGUOUS_PARTITION_NAME Handle = 0x8011045D
+ COMADMIN_E_REGDB_NOTINITIALIZED Handle = 0x80110472
+ COMADMIN_E_REGDB_NOTOPEN Handle = 0x80110473
+ COMADMIN_E_REGDB_SYSTEMERR Handle = 0x80110474
+ COMADMIN_E_REGDB_ALREADYRUNNING Handle = 0x80110475
+ COMADMIN_E_MIG_VERSIONNOTSUPPORTED Handle = 0x80110480
+ COMADMIN_E_MIG_SCHEMANOTFOUND Handle = 0x80110481
+ COMADMIN_E_CAT_BITNESSMISMATCH Handle = 0x80110482
+ COMADMIN_E_CAT_UNACCEPTABLEBITNESS Handle = 0x80110483
+ COMADMIN_E_CAT_WRONGAPPBITNESS Handle = 0x80110484
+ COMADMIN_E_CAT_PAUSE_RESUME_NOT_SUPPORTED Handle = 0x80110485
+ COMADMIN_E_CAT_SERVERFAULT Handle = 0x80110486
+ COMQC_E_APPLICATION_NOT_QUEUED Handle = 0x80110600
+ COMQC_E_NO_QUEUEABLE_INTERFACES Handle = 0x80110601
+ COMQC_E_QUEUING_SERVICE_NOT_AVAILABLE Handle = 0x80110602
+ COMQC_E_NO_IPERSISTSTREAM Handle = 0x80110603
+ COMQC_E_BAD_MESSAGE Handle = 0x80110604
+ COMQC_E_UNAUTHENTICATED Handle = 0x80110605
+ COMQC_E_UNTRUSTED_ENQUEUER Handle = 0x80110606
+ MSDTC_E_DUPLICATE_RESOURCE Handle = 0x80110701
+ COMADMIN_E_OBJECT_PARENT_MISSING Handle = 0x80110808
+ COMADMIN_E_OBJECT_DOES_NOT_EXIST Handle = 0x80110809
+ COMADMIN_E_APP_NOT_RUNNING Handle = 0x8011080A
+ COMADMIN_E_INVALID_PARTITION Handle = 0x8011080B
+ COMADMIN_E_SVCAPP_NOT_POOLABLE_OR_RECYCLABLE Handle = 0x8011080D
+ COMADMIN_E_USER_IN_SET Handle = 0x8011080E
+ COMADMIN_E_CANTRECYCLELIBRARYAPPS Handle = 0x8011080F
+ COMADMIN_E_CANTRECYCLESERVICEAPPS Handle = 0x80110811
+ COMADMIN_E_PROCESSALREADYRECYCLED Handle = 0x80110812
+ COMADMIN_E_PAUSEDPROCESSMAYNOTBERECYCLED Handle = 0x80110813
+ COMADMIN_E_CANTMAKEINPROCSERVICE Handle = 0x80110814
+ COMADMIN_E_PROGIDINUSEBYCLSID Handle = 0x80110815
+ COMADMIN_E_DEFAULT_PARTITION_NOT_IN_SET Handle = 0x80110816
+ COMADMIN_E_RECYCLEDPROCESSMAYNOTBEPAUSED Handle = 0x80110817
+ COMADMIN_E_PARTITION_ACCESSDENIED Handle = 0x80110818
+ COMADMIN_E_PARTITION_MSI_ONLY Handle = 0x80110819
+ COMADMIN_E_LEGACYCOMPS_NOT_ALLOWED_IN_1_0_FORMAT Handle = 0x8011081A
+ COMADMIN_E_LEGACYCOMPS_NOT_ALLOWED_IN_NONBASE_PARTITIONS Handle = 0x8011081B
+ COMADMIN_E_COMP_MOVE_SOURCE Handle = 0x8011081C
+ COMADMIN_E_COMP_MOVE_DEST Handle = 0x8011081D
+ COMADMIN_E_COMP_MOVE_PRIVATE Handle = 0x8011081E
+ COMADMIN_E_BASEPARTITION_REQUIRED_IN_SET Handle = 0x8011081F
+ COMADMIN_E_CANNOT_ALIAS_EVENTCLASS Handle = 0x80110820
+ COMADMIN_E_PRIVATE_ACCESSDENIED Handle = 0x80110821
+ COMADMIN_E_SAFERINVALID Handle = 0x80110822
+ COMADMIN_E_REGISTRY_ACCESSDENIED Handle = 0x80110823
+ COMADMIN_E_PARTITIONS_DISABLED Handle = 0x80110824
+ WER_S_REPORT_DEBUG Handle = 0x001B0000
+ WER_S_REPORT_UPLOADED Handle = 0x001B0001
+ WER_S_REPORT_QUEUED Handle = 0x001B0002
+ WER_S_DISABLED Handle = 0x001B0003
+ WER_S_SUSPENDED_UPLOAD Handle = 0x001B0004
+ WER_S_DISABLED_QUEUE Handle = 0x001B0005
+ WER_S_DISABLED_ARCHIVE Handle = 0x001B0006
+ WER_S_REPORT_ASYNC Handle = 0x001B0007
+ WER_S_IGNORE_ASSERT_INSTANCE Handle = 0x001B0008
+ WER_S_IGNORE_ALL_ASSERTS Handle = 0x001B0009
+ WER_S_ASSERT_CONTINUE Handle = 0x001B000A
+ WER_S_THROTTLED Handle = 0x001B000B
+ WER_S_REPORT_UPLOADED_CAB Handle = 0x001B000C
+ WER_E_CRASH_FAILURE Handle = 0x801B8000
+ WER_E_CANCELED Handle = 0x801B8001
+ WER_E_NETWORK_FAILURE Handle = 0x801B8002
+ WER_E_NOT_INITIALIZED Handle = 0x801B8003
+ WER_E_ALREADY_REPORTING Handle = 0x801B8004
+ WER_E_DUMP_THROTTLED Handle = 0x801B8005
+ WER_E_INSUFFICIENT_CONSENT Handle = 0x801B8006
+ WER_E_TOO_HEAVY Handle = 0x801B8007
+ ERROR_FLT_IO_COMPLETE Handle = 0x001F0001
+ ERROR_FLT_NO_HANDLER_DEFINED Handle = 0x801F0001
+ ERROR_FLT_CONTEXT_ALREADY_DEFINED Handle = 0x801F0002
+ ERROR_FLT_INVALID_ASYNCHRONOUS_REQUEST Handle = 0x801F0003
+ ERROR_FLT_DISALLOW_FAST_IO Handle = 0x801F0004
+ ERROR_FLT_INVALID_NAME_REQUEST Handle = 0x801F0005
+ ERROR_FLT_NOT_SAFE_TO_POST_OPERATION Handle = 0x801F0006
+ ERROR_FLT_NOT_INITIALIZED Handle = 0x801F0007
+ ERROR_FLT_FILTER_NOT_READY Handle = 0x801F0008
+ ERROR_FLT_POST_OPERATION_CLEANUP Handle = 0x801F0009
+ ERROR_FLT_INTERNAL_ERROR Handle = 0x801F000A
+ ERROR_FLT_DELETING_OBJECT Handle = 0x801F000B
+ ERROR_FLT_MUST_BE_NONPAGED_POOL Handle = 0x801F000C
+ ERROR_FLT_DUPLICATE_ENTRY Handle = 0x801F000D
+ ERROR_FLT_CBDQ_DISABLED Handle = 0x801F000E
+ ERROR_FLT_DO_NOT_ATTACH Handle = 0x801F000F
+ ERROR_FLT_DO_NOT_DETACH Handle = 0x801F0010
+ ERROR_FLT_INSTANCE_ALTITUDE_COLLISION Handle = 0x801F0011
+ ERROR_FLT_INSTANCE_NAME_COLLISION Handle = 0x801F0012
+ ERROR_FLT_FILTER_NOT_FOUND Handle = 0x801F0013
+ ERROR_FLT_VOLUME_NOT_FOUND Handle = 0x801F0014
+ ERROR_FLT_INSTANCE_NOT_FOUND Handle = 0x801F0015
+ ERROR_FLT_CONTEXT_ALLOCATION_NOT_FOUND Handle = 0x801F0016
+ ERROR_FLT_INVALID_CONTEXT_REGISTRATION Handle = 0x801F0017
+ ERROR_FLT_NAME_CACHE_MISS Handle = 0x801F0018
+ ERROR_FLT_NO_DEVICE_OBJECT Handle = 0x801F0019
+ ERROR_FLT_VOLUME_ALREADY_MOUNTED Handle = 0x801F001A
+ ERROR_FLT_ALREADY_ENLISTED Handle = 0x801F001B
+ ERROR_FLT_CONTEXT_ALREADY_LINKED Handle = 0x801F001C
+ ERROR_FLT_NO_WAITER_FOR_REPLY Handle = 0x801F0020
+ ERROR_FLT_REGISTRATION_BUSY Handle = 0x801F0023
+ ERROR_HUNG_DISPLAY_DRIVER_THREAD Handle = 0x80260001
+ DWM_E_COMPOSITIONDISABLED Handle = 0x80263001
+ DWM_E_REMOTING_NOT_SUPPORTED Handle = 0x80263002
+ DWM_E_NO_REDIRECTION_SURFACE_AVAILABLE Handle = 0x80263003
+ DWM_E_NOT_QUEUING_PRESENTS Handle = 0x80263004
+ DWM_E_ADAPTER_NOT_FOUND Handle = 0x80263005
+ DWM_S_GDI_REDIRECTION_SURFACE Handle = 0x00263005
+ DWM_E_TEXTURE_TOO_LARGE Handle = 0x80263007
+ DWM_S_GDI_REDIRECTION_SURFACE_BLT_VIA_GDI Handle = 0x00263008
+ ERROR_MONITOR_NO_DESCRIPTOR Handle = 0x00261001
+ ERROR_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT Handle = 0x00261002
+ ERROR_MONITOR_INVALID_DESCRIPTOR_CHECKSUM Handle = 0xC0261003
+ ERROR_MONITOR_INVALID_STANDARD_TIMING_BLOCK Handle = 0xC0261004
+ ERROR_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED Handle = 0xC0261005
+ ERROR_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK Handle = 0xC0261006
+ ERROR_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK Handle = 0xC0261007
+ ERROR_MONITOR_NO_MORE_DESCRIPTOR_DATA Handle = 0xC0261008
+ ERROR_MONITOR_INVALID_DETAILED_TIMING_BLOCK Handle = 0xC0261009
+ ERROR_MONITOR_INVALID_MANUFACTURE_DATE Handle = 0xC026100A
+ ERROR_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER Handle = 0xC0262000
+ ERROR_GRAPHICS_INSUFFICIENT_DMA_BUFFER Handle = 0xC0262001
+ ERROR_GRAPHICS_INVALID_DISPLAY_ADAPTER Handle = 0xC0262002
+ ERROR_GRAPHICS_ADAPTER_WAS_RESET Handle = 0xC0262003
+ ERROR_GRAPHICS_INVALID_DRIVER_MODEL Handle = 0xC0262004
+ ERROR_GRAPHICS_PRESENT_MODE_CHANGED Handle = 0xC0262005
+ ERROR_GRAPHICS_PRESENT_OCCLUDED Handle = 0xC0262006
+ ERROR_GRAPHICS_PRESENT_DENIED Handle = 0xC0262007
+ ERROR_GRAPHICS_CANNOTCOLORCONVERT Handle = 0xC0262008
+ ERROR_GRAPHICS_DRIVER_MISMATCH Handle = 0xC0262009
+ ERROR_GRAPHICS_PARTIAL_DATA_POPULATED Handle = 0x4026200A
+ ERROR_GRAPHICS_PRESENT_REDIRECTION_DISABLED Handle = 0xC026200B
+ ERROR_GRAPHICS_PRESENT_UNOCCLUDED Handle = 0xC026200C
+ ERROR_GRAPHICS_WINDOWDC_NOT_AVAILABLE Handle = 0xC026200D
+ ERROR_GRAPHICS_WINDOWLESS_PRESENT_DISABLED Handle = 0xC026200E
+ ERROR_GRAPHICS_PRESENT_INVALID_WINDOW Handle = 0xC026200F
+ ERROR_GRAPHICS_PRESENT_BUFFER_NOT_BOUND Handle = 0xC0262010
+ ERROR_GRAPHICS_VAIL_STATE_CHANGED Handle = 0xC0262011
+ ERROR_GRAPHICS_NO_VIDEO_MEMORY Handle = 0xC0262100
+ ERROR_GRAPHICS_CANT_LOCK_MEMORY Handle = 0xC0262101
+ ERROR_GRAPHICS_ALLOCATION_BUSY Handle = 0xC0262102
+ ERROR_GRAPHICS_TOO_MANY_REFERENCES Handle = 0xC0262103
+ ERROR_GRAPHICS_TRY_AGAIN_LATER Handle = 0xC0262104
+ ERROR_GRAPHICS_TRY_AGAIN_NOW Handle = 0xC0262105
+ ERROR_GRAPHICS_ALLOCATION_INVALID Handle = 0xC0262106
+ ERROR_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE Handle = 0xC0262107
+ ERROR_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED Handle = 0xC0262108
+ ERROR_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION Handle = 0xC0262109
+ ERROR_GRAPHICS_INVALID_ALLOCATION_USAGE Handle = 0xC0262110
+ ERROR_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION Handle = 0xC0262111
+ ERROR_GRAPHICS_ALLOCATION_CLOSED Handle = 0xC0262112
+ ERROR_GRAPHICS_INVALID_ALLOCATION_INSTANCE Handle = 0xC0262113
+ ERROR_GRAPHICS_INVALID_ALLOCATION_HANDLE Handle = 0xC0262114
+ ERROR_GRAPHICS_WRONG_ALLOCATION_DEVICE Handle = 0xC0262115
+ ERROR_GRAPHICS_ALLOCATION_CONTENT_LOST Handle = 0xC0262116
+ ERROR_GRAPHICS_GPU_EXCEPTION_ON_DEVICE Handle = 0xC0262200
+ ERROR_GRAPHICS_SKIP_ALLOCATION_PREPARATION Handle = 0x40262201
+ ERROR_GRAPHICS_INVALID_VIDPN_TOPOLOGY Handle = 0xC0262300
+ ERROR_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED Handle = 0xC0262301
+ ERROR_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED Handle = 0xC0262302
+ ERROR_GRAPHICS_INVALID_VIDPN Handle = 0xC0262303
+ ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE Handle = 0xC0262304
+ ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET Handle = 0xC0262305
+ ERROR_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED Handle = 0xC0262306
+ ERROR_GRAPHICS_MODE_NOT_PINNED Handle = 0x00262307
+ ERROR_GRAPHICS_INVALID_VIDPN_SOURCEMODESET Handle = 0xC0262308
+ ERROR_GRAPHICS_INVALID_VIDPN_TARGETMODESET Handle = 0xC0262309
+ ERROR_GRAPHICS_INVALID_FREQUENCY Handle = 0xC026230A
+ ERROR_GRAPHICS_INVALID_ACTIVE_REGION Handle = 0xC026230B
+ ERROR_GRAPHICS_INVALID_TOTAL_REGION Handle = 0xC026230C
+ ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE Handle = 0xC0262310
+ ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE Handle = 0xC0262311
+ ERROR_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET Handle = 0xC0262312
+ ERROR_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY Handle = 0xC0262313
+ ERROR_GRAPHICS_MODE_ALREADY_IN_MODESET Handle = 0xC0262314
+ ERROR_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET Handle = 0xC0262315
+ ERROR_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET Handle = 0xC0262316
+ ERROR_GRAPHICS_SOURCE_ALREADY_IN_SET Handle = 0xC0262317
+ ERROR_GRAPHICS_TARGET_ALREADY_IN_SET Handle = 0xC0262318
+ ERROR_GRAPHICS_INVALID_VIDPN_PRESENT_PATH Handle = 0xC0262319
+ ERROR_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY Handle = 0xC026231A
+ ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET Handle = 0xC026231B
+ ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE Handle = 0xC026231C
+ ERROR_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET Handle = 0xC026231D
+ ERROR_GRAPHICS_NO_PREFERRED_MODE Handle = 0x0026231E
+ ERROR_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET Handle = 0xC026231F
+ ERROR_GRAPHICS_STALE_MODESET Handle = 0xC0262320
+ ERROR_GRAPHICS_INVALID_MONITOR_SOURCEMODESET Handle = 0xC0262321
+ ERROR_GRAPHICS_INVALID_MONITOR_SOURCE_MODE Handle = 0xC0262322
+ ERROR_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN Handle = 0xC0262323
+ ERROR_GRAPHICS_MODE_ID_MUST_BE_UNIQUE Handle = 0xC0262324
+ ERROR_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION Handle = 0xC0262325
+ ERROR_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES Handle = 0xC0262326
+ ERROR_GRAPHICS_PATH_NOT_IN_TOPOLOGY Handle = 0xC0262327
+ ERROR_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE Handle = 0xC0262328
+ ERROR_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET Handle = 0xC0262329
+ ERROR_GRAPHICS_INVALID_MONITORDESCRIPTORSET Handle = 0xC026232A
+ ERROR_GRAPHICS_INVALID_MONITORDESCRIPTOR Handle = 0xC026232B
+ ERROR_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET Handle = 0xC026232C
+ ERROR_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET Handle = 0xC026232D
+ ERROR_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE Handle = 0xC026232E
+ ERROR_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE Handle = 0xC026232F
+ ERROR_GRAPHICS_RESOURCES_NOT_RELATED Handle = 0xC0262330
+ ERROR_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE Handle = 0xC0262331
+ ERROR_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE Handle = 0xC0262332
+ ERROR_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET Handle = 0xC0262333
+ ERROR_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER Handle = 0xC0262334
+ ERROR_GRAPHICS_NO_VIDPNMGR Handle = 0xC0262335
+ ERROR_GRAPHICS_NO_ACTIVE_VIDPN Handle = 0xC0262336
+ ERROR_GRAPHICS_STALE_VIDPN_TOPOLOGY Handle = 0xC0262337
+ ERROR_GRAPHICS_MONITOR_NOT_CONNECTED Handle = 0xC0262338
+ ERROR_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY Handle = 0xC0262339
+ ERROR_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE Handle = 0xC026233A
+ ERROR_GRAPHICS_INVALID_VISIBLEREGION_SIZE Handle = 0xC026233B
+ ERROR_GRAPHICS_INVALID_STRIDE Handle = 0xC026233C
+ ERROR_GRAPHICS_INVALID_PIXELFORMAT Handle = 0xC026233D
+ ERROR_GRAPHICS_INVALID_COLORBASIS Handle = 0xC026233E
+ ERROR_GRAPHICS_INVALID_PIXELVALUEACCESSMODE Handle = 0xC026233F
+ ERROR_GRAPHICS_TARGET_NOT_IN_TOPOLOGY Handle = 0xC0262340
+ ERROR_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT Handle = 0xC0262341
+ ERROR_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0xC0262342
+ ERROR_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN Handle = 0xC0262343
+ ERROR_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL Handle = 0xC0262344
+ ERROR_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION Handle = 0xC0262345
+ ERROR_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED Handle = 0xC0262346
+ ERROR_GRAPHICS_INVALID_GAMMA_RAMP Handle = 0xC0262347
+ ERROR_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED Handle = 0xC0262348
+ ERROR_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED Handle = 0xC0262349
+ ERROR_GRAPHICS_MODE_NOT_IN_MODESET Handle = 0xC026234A
+ ERROR_GRAPHICS_DATASET_IS_EMPTY Handle = 0x0026234B
+ ERROR_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET Handle = 0x0026234C
+ ERROR_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON Handle = 0xC026234D
+ ERROR_GRAPHICS_INVALID_PATH_CONTENT_TYPE Handle = 0xC026234E
+ ERROR_GRAPHICS_INVALID_COPYPROTECTION_TYPE Handle = 0xC026234F
+ ERROR_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS Handle = 0xC0262350
+ ERROR_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED Handle = 0x00262351
+ ERROR_GRAPHICS_INVALID_SCANLINE_ORDERING Handle = 0xC0262352
+ ERROR_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED Handle = 0xC0262353
+ ERROR_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS Handle = 0xC0262354
+ ERROR_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT Handle = 0xC0262355
+ ERROR_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM Handle = 0xC0262356
+ ERROR_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN Handle = 0xC0262357
+ ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT Handle = 0xC0262358
+ ERROR_GRAPHICS_MAX_NUM_PATHS_REACHED Handle = 0xC0262359
+ ERROR_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION Handle = 0xC026235A
+ ERROR_GRAPHICS_INVALID_CLIENT_TYPE Handle = 0xC026235B
+ ERROR_GRAPHICS_CLIENTVIDPN_NOT_SET Handle = 0xC026235C
+ ERROR_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED Handle = 0xC0262400
+ ERROR_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED Handle = 0xC0262401
+ ERROR_GRAPHICS_UNKNOWN_CHILD_STATUS Handle = 0x4026242F
+ ERROR_GRAPHICS_NOT_A_LINKED_ADAPTER Handle = 0xC0262430
+ ERROR_GRAPHICS_LEADLINK_NOT_ENUMERATED Handle = 0xC0262431
+ ERROR_GRAPHICS_CHAINLINKS_NOT_ENUMERATED Handle = 0xC0262432
+ ERROR_GRAPHICS_ADAPTER_CHAIN_NOT_READY Handle = 0xC0262433
+ ERROR_GRAPHICS_CHAINLINKS_NOT_STARTED Handle = 0xC0262434
+ ERROR_GRAPHICS_CHAINLINKS_NOT_POWERED_ON Handle = 0xC0262435
+ ERROR_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE Handle = 0xC0262436
+ ERROR_GRAPHICS_LEADLINK_START_DEFERRED Handle = 0x40262437
+ ERROR_GRAPHICS_NOT_POST_DEVICE_DRIVER Handle = 0xC0262438
+ ERROR_GRAPHICS_POLLING_TOO_FREQUENTLY Handle = 0x40262439
+ ERROR_GRAPHICS_START_DEFERRED Handle = 0x4026243A
+ ERROR_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED Handle = 0xC026243B
+ ERROR_GRAPHICS_DEPENDABLE_CHILD_STATUS Handle = 0x4026243C
+ ERROR_GRAPHICS_OPM_NOT_SUPPORTED Handle = 0xC0262500
+ ERROR_GRAPHICS_COPP_NOT_SUPPORTED Handle = 0xC0262501
+ ERROR_GRAPHICS_UAB_NOT_SUPPORTED Handle = 0xC0262502
+ ERROR_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS Handle = 0xC0262503
+ ERROR_GRAPHICS_OPM_NO_VIDEO_OUTPUTS_EXIST Handle = 0xC0262505
+ ERROR_GRAPHICS_OPM_INTERNAL_ERROR Handle = 0xC026250B
+ ERROR_GRAPHICS_OPM_INVALID_HANDLE Handle = 0xC026250C
+ ERROR_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH Handle = 0xC026250E
+ ERROR_GRAPHICS_OPM_SPANNING_MODE_ENABLED Handle = 0xC026250F
+ ERROR_GRAPHICS_OPM_THEATER_MODE_ENABLED Handle = 0xC0262510
+ ERROR_GRAPHICS_PVP_HFS_FAILED Handle = 0xC0262511
+ ERROR_GRAPHICS_OPM_INVALID_SRM Handle = 0xC0262512
+ ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP Handle = 0xC0262513
+ ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP Handle = 0xC0262514
+ ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA Handle = 0xC0262515
+ ERROR_GRAPHICS_OPM_HDCP_SRM_NEVER_SET Handle = 0xC0262516
+ ERROR_GRAPHICS_OPM_RESOLUTION_TOO_HIGH Handle = 0xC0262517
+ ERROR_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE Handle = 0xC0262518
+ ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_NO_LONGER_EXISTS Handle = 0xC026251A
+ ERROR_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS Handle = 0xC026251B
+ ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS Handle = 0xC026251C
+ ERROR_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST Handle = 0xC026251D
+ ERROR_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR Handle = 0xC026251E
+ ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS Handle = 0xC026251F
+ ERROR_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED Handle = 0xC0262520
+ ERROR_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST Handle = 0xC0262521
+ ERROR_GRAPHICS_I2C_NOT_SUPPORTED Handle = 0xC0262580
+ ERROR_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST Handle = 0xC0262581
+ ERROR_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA Handle = 0xC0262582
+ ERROR_GRAPHICS_I2C_ERROR_RECEIVING_DATA Handle = 0xC0262583
+ ERROR_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED Handle = 0xC0262584
+ ERROR_GRAPHICS_DDCCI_INVALID_DATA Handle = 0xC0262585
+ ERROR_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE Handle = 0xC0262586
+ ERROR_GRAPHICS_MCA_INVALID_CAPABILITIES_STRING Handle = 0xC0262587
+ ERROR_GRAPHICS_MCA_INTERNAL_ERROR Handle = 0xC0262588
+ ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND Handle = 0xC0262589
+ ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH Handle = 0xC026258A
+ ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM Handle = 0xC026258B
+ ERROR_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE Handle = 0xC026258C
+ ERROR_GRAPHICS_MONITOR_NO_LONGER_EXISTS Handle = 0xC026258D
+ ERROR_GRAPHICS_DDCCI_CURRENT_CURRENT_VALUE_GREATER_THAN_MAXIMUM_VALUE Handle = 0xC02625D8
+ ERROR_GRAPHICS_MCA_INVALID_VCP_VERSION Handle = 0xC02625D9
+ ERROR_GRAPHICS_MCA_MONITOR_VIOLATES_MCCS_SPECIFICATION Handle = 0xC02625DA
+ ERROR_GRAPHICS_MCA_MCCS_VERSION_MISMATCH Handle = 0xC02625DB
+ ERROR_GRAPHICS_MCA_UNSUPPORTED_MCCS_VERSION Handle = 0xC02625DC
+ ERROR_GRAPHICS_MCA_INVALID_TECHNOLOGY_TYPE_RETURNED Handle = 0xC02625DE
+ ERROR_GRAPHICS_MCA_UNSUPPORTED_COLOR_TEMPERATURE Handle = 0xC02625DF
+ ERROR_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED Handle = 0xC02625E0
+ ERROR_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME Handle = 0xC02625E1
+ ERROR_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP Handle = 0xC02625E2
+ ERROR_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED Handle = 0xC02625E3
+ ERROR_GRAPHICS_INVALID_POINTER Handle = 0xC02625E4
+ ERROR_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE Handle = 0xC02625E5
+ ERROR_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL Handle = 0xC02625E6
+ ERROR_GRAPHICS_INTERNAL_ERROR Handle = 0xC02625E7
+ ERROR_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS Handle = 0xC02605E8
+ NAP_E_INVALID_PACKET Handle = 0x80270001
+ NAP_E_MISSING_SOH Handle = 0x80270002
+ NAP_E_CONFLICTING_ID Handle = 0x80270003
+ NAP_E_NO_CACHED_SOH Handle = 0x80270004
+ NAP_E_STILL_BOUND Handle = 0x80270005
+ NAP_E_NOT_REGISTERED Handle = 0x80270006
+ NAP_E_NOT_INITIALIZED Handle = 0x80270007
+ NAP_E_MISMATCHED_ID Handle = 0x80270008
+ NAP_E_NOT_PENDING Handle = 0x80270009
+ NAP_E_ID_NOT_FOUND Handle = 0x8027000A
+ NAP_E_MAXSIZE_TOO_SMALL Handle = 0x8027000B
+ NAP_E_SERVICE_NOT_RUNNING Handle = 0x8027000C
+ NAP_S_CERT_ALREADY_PRESENT Handle = 0x0027000D
+ NAP_E_ENTITY_DISABLED Handle = 0x8027000E
+ NAP_E_NETSH_GROUPPOLICY_ERROR Handle = 0x8027000F
+ NAP_E_TOO_MANY_CALLS Handle = 0x80270010
+ NAP_E_SHV_CONFIG_EXISTED Handle = 0x80270011
+ NAP_E_SHV_CONFIG_NOT_FOUND Handle = 0x80270012
+ NAP_E_SHV_TIMEOUT Handle = 0x80270013
+ TPM_E_ERROR_MASK Handle = 0x80280000
+ TPM_E_AUTHFAIL Handle = 0x80280001
+ TPM_E_BADINDEX Handle = 0x80280002
+ TPM_E_BAD_PARAMETER Handle = 0x80280003
+ TPM_E_AUDITFAILURE Handle = 0x80280004
+ TPM_E_CLEAR_DISABLED Handle = 0x80280005
+ TPM_E_DEACTIVATED Handle = 0x80280006
+ TPM_E_DISABLED Handle = 0x80280007
+ TPM_E_DISABLED_CMD Handle = 0x80280008
+ TPM_E_FAIL Handle = 0x80280009
+ TPM_E_BAD_ORDINAL Handle = 0x8028000A
+ TPM_E_INSTALL_DISABLED Handle = 0x8028000B
+ TPM_E_INVALID_KEYHANDLE Handle = 0x8028000C
+ TPM_E_KEYNOTFOUND Handle = 0x8028000D
+ TPM_E_INAPPROPRIATE_ENC Handle = 0x8028000E
+ TPM_E_MIGRATEFAIL Handle = 0x8028000F
+ TPM_E_INVALID_PCR_INFO Handle = 0x80280010
+ TPM_E_NOSPACE Handle = 0x80280011
+ TPM_E_NOSRK Handle = 0x80280012
+ TPM_E_NOTSEALED_BLOB Handle = 0x80280013
+ TPM_E_OWNER_SET Handle = 0x80280014
+ TPM_E_RESOURCES Handle = 0x80280015
+ TPM_E_SHORTRANDOM Handle = 0x80280016
+ TPM_E_SIZE Handle = 0x80280017
+ TPM_E_WRONGPCRVAL Handle = 0x80280018
+ TPM_E_BAD_PARAM_SIZE Handle = 0x80280019
+ TPM_E_SHA_THREAD Handle = 0x8028001A
+ TPM_E_SHA_ERROR Handle = 0x8028001B
+ TPM_E_FAILEDSELFTEST Handle = 0x8028001C
+ TPM_E_AUTH2FAIL Handle = 0x8028001D
+ TPM_E_BADTAG Handle = 0x8028001E
+ TPM_E_IOERROR Handle = 0x8028001F
+ TPM_E_ENCRYPT_ERROR Handle = 0x80280020
+ TPM_E_DECRYPT_ERROR Handle = 0x80280021
+ TPM_E_INVALID_AUTHHANDLE Handle = 0x80280022
+ TPM_E_NO_ENDORSEMENT Handle = 0x80280023
+ TPM_E_INVALID_KEYUSAGE Handle = 0x80280024
+ TPM_E_WRONG_ENTITYTYPE Handle = 0x80280025
+ TPM_E_INVALID_POSTINIT Handle = 0x80280026
+ TPM_E_INAPPROPRIATE_SIG Handle = 0x80280027
+ TPM_E_BAD_KEY_PROPERTY Handle = 0x80280028
+ TPM_E_BAD_MIGRATION Handle = 0x80280029
+ TPM_E_BAD_SCHEME Handle = 0x8028002A
+ TPM_E_BAD_DATASIZE Handle = 0x8028002B
+ TPM_E_BAD_MODE Handle = 0x8028002C
+ TPM_E_BAD_PRESENCE Handle = 0x8028002D
+ TPM_E_BAD_VERSION Handle = 0x8028002E
+ TPM_E_NO_WRAP_TRANSPORT Handle = 0x8028002F
+ TPM_E_AUDITFAIL_UNSUCCESSFUL Handle = 0x80280030
+ TPM_E_AUDITFAIL_SUCCESSFUL Handle = 0x80280031
+ TPM_E_NOTRESETABLE Handle = 0x80280032
+ TPM_E_NOTLOCAL Handle = 0x80280033
+ TPM_E_BAD_TYPE Handle = 0x80280034
+ TPM_E_INVALID_RESOURCE Handle = 0x80280035
+ TPM_E_NOTFIPS Handle = 0x80280036
+ TPM_E_INVALID_FAMILY Handle = 0x80280037
+ TPM_E_NO_NV_PERMISSION Handle = 0x80280038
+ TPM_E_REQUIRES_SIGN Handle = 0x80280039
+ TPM_E_KEY_NOTSUPPORTED Handle = 0x8028003A
+ TPM_E_AUTH_CONFLICT Handle = 0x8028003B
+ TPM_E_AREA_LOCKED Handle = 0x8028003C
+ TPM_E_BAD_LOCALITY Handle = 0x8028003D
+ TPM_E_READ_ONLY Handle = 0x8028003E
+ TPM_E_PER_NOWRITE Handle = 0x8028003F
+ TPM_E_FAMILYCOUNT Handle = 0x80280040
+ TPM_E_WRITE_LOCKED Handle = 0x80280041
+ TPM_E_BAD_ATTRIBUTES Handle = 0x80280042
+ TPM_E_INVALID_STRUCTURE Handle = 0x80280043
+ TPM_E_KEY_OWNER_CONTROL Handle = 0x80280044
+ TPM_E_BAD_COUNTER Handle = 0x80280045
+ TPM_E_NOT_FULLWRITE Handle = 0x80280046
+ TPM_E_CONTEXT_GAP Handle = 0x80280047
+ TPM_E_MAXNVWRITES Handle = 0x80280048
+ TPM_E_NOOPERATOR Handle = 0x80280049
+ TPM_E_RESOURCEMISSING Handle = 0x8028004A
+ TPM_E_DELEGATE_LOCK Handle = 0x8028004B
+ TPM_E_DELEGATE_FAMILY Handle = 0x8028004C
+ TPM_E_DELEGATE_ADMIN Handle = 0x8028004D
+ TPM_E_TRANSPORT_NOTEXCLUSIVE Handle = 0x8028004E
+ TPM_E_OWNER_CONTROL Handle = 0x8028004F
+ TPM_E_DAA_RESOURCES Handle = 0x80280050
+ TPM_E_DAA_INPUT_DATA0 Handle = 0x80280051
+ TPM_E_DAA_INPUT_DATA1 Handle = 0x80280052
+ TPM_E_DAA_ISSUER_SETTINGS Handle = 0x80280053
+ TPM_E_DAA_TPM_SETTINGS Handle = 0x80280054
+ TPM_E_DAA_STAGE Handle = 0x80280055
+ TPM_E_DAA_ISSUER_VALIDITY Handle = 0x80280056
+ TPM_E_DAA_WRONG_W Handle = 0x80280057
+ TPM_E_BAD_HANDLE Handle = 0x80280058
+ TPM_E_BAD_DELEGATE Handle = 0x80280059
+ TPM_E_BADCONTEXT Handle = 0x8028005A
+ TPM_E_TOOMANYCONTEXTS Handle = 0x8028005B
+ TPM_E_MA_TICKET_SIGNATURE Handle = 0x8028005C
+ TPM_E_MA_DESTINATION Handle = 0x8028005D
+ TPM_E_MA_SOURCE Handle = 0x8028005E
+ TPM_E_MA_AUTHORITY Handle = 0x8028005F
+ TPM_E_PERMANENTEK Handle = 0x80280061
+ TPM_E_BAD_SIGNATURE Handle = 0x80280062
+ TPM_E_NOCONTEXTSPACE Handle = 0x80280063
+ TPM_20_E_ASYMMETRIC Handle = 0x80280081
+ TPM_20_E_ATTRIBUTES Handle = 0x80280082
+ TPM_20_E_HASH Handle = 0x80280083
+ TPM_20_E_VALUE Handle = 0x80280084
+ TPM_20_E_HIERARCHY Handle = 0x80280085
+ TPM_20_E_KEY_SIZE Handle = 0x80280087
+ TPM_20_E_MGF Handle = 0x80280088
+ TPM_20_E_MODE Handle = 0x80280089
+ TPM_20_E_TYPE Handle = 0x8028008A
+ TPM_20_E_HANDLE Handle = 0x8028008B
+ TPM_20_E_KDF Handle = 0x8028008C
+ TPM_20_E_RANGE Handle = 0x8028008D
+ TPM_20_E_AUTH_FAIL Handle = 0x8028008E
+ TPM_20_E_NONCE Handle = 0x8028008F
+ TPM_20_E_PP Handle = 0x80280090
+ TPM_20_E_SCHEME Handle = 0x80280092
+ TPM_20_E_SIZE Handle = 0x80280095
+ TPM_20_E_SYMMETRIC Handle = 0x80280096
+ TPM_20_E_TAG Handle = 0x80280097
+ TPM_20_E_SELECTOR Handle = 0x80280098
+ TPM_20_E_INSUFFICIENT Handle = 0x8028009A
+ TPM_20_E_SIGNATURE Handle = 0x8028009B
+ TPM_20_E_KEY Handle = 0x8028009C
+ TPM_20_E_POLICY_FAIL Handle = 0x8028009D
+ TPM_20_E_INTEGRITY Handle = 0x8028009F
+ TPM_20_E_TICKET Handle = 0x802800A0
+ TPM_20_E_RESERVED_BITS Handle = 0x802800A1
+ TPM_20_E_BAD_AUTH Handle = 0x802800A2
+ TPM_20_E_EXPIRED Handle = 0x802800A3
+ TPM_20_E_POLICY_CC Handle = 0x802800A4
+ TPM_20_E_BINDING Handle = 0x802800A5
+ TPM_20_E_CURVE Handle = 0x802800A6
+ TPM_20_E_ECC_POINT Handle = 0x802800A7
+ TPM_20_E_INITIALIZE Handle = 0x80280100
+ TPM_20_E_FAILURE Handle = 0x80280101
+ TPM_20_E_SEQUENCE Handle = 0x80280103
+ TPM_20_E_PRIVATE Handle = 0x8028010B
+ TPM_20_E_HMAC Handle = 0x80280119
+ TPM_20_E_DISABLED Handle = 0x80280120
+ TPM_20_E_EXCLUSIVE Handle = 0x80280121
+ TPM_20_E_ECC_CURVE Handle = 0x80280123
+ TPM_20_E_AUTH_TYPE Handle = 0x80280124
+ TPM_20_E_AUTH_MISSING Handle = 0x80280125
+ TPM_20_E_POLICY Handle = 0x80280126
+ TPM_20_E_PCR Handle = 0x80280127
+ TPM_20_E_PCR_CHANGED Handle = 0x80280128
+ TPM_20_E_UPGRADE Handle = 0x8028012D
+ TPM_20_E_TOO_MANY_CONTEXTS Handle = 0x8028012E
+ TPM_20_E_AUTH_UNAVAILABLE Handle = 0x8028012F
+ TPM_20_E_REBOOT Handle = 0x80280130
+ TPM_20_E_UNBALANCED Handle = 0x80280131
+ TPM_20_E_COMMAND_SIZE Handle = 0x80280142
+ TPM_20_E_COMMAND_CODE Handle = 0x80280143
+ TPM_20_E_AUTHSIZE Handle = 0x80280144
+ TPM_20_E_AUTH_CONTEXT Handle = 0x80280145
+ TPM_20_E_NV_RANGE Handle = 0x80280146
+ TPM_20_E_NV_SIZE Handle = 0x80280147
+ TPM_20_E_NV_LOCKED Handle = 0x80280148
+ TPM_20_E_NV_AUTHORIZATION Handle = 0x80280149
+ TPM_20_E_NV_UNINITIALIZED Handle = 0x8028014A
+ TPM_20_E_NV_SPACE Handle = 0x8028014B
+ TPM_20_E_NV_DEFINED Handle = 0x8028014C
+ TPM_20_E_BAD_CONTEXT Handle = 0x80280150
+ TPM_20_E_CPHASH Handle = 0x80280151
+ TPM_20_E_PARENT Handle = 0x80280152
+ TPM_20_E_NEEDS_TEST Handle = 0x80280153
+ TPM_20_E_NO_RESULT Handle = 0x80280154
+ TPM_20_E_SENSITIVE Handle = 0x80280155
+ TPM_E_COMMAND_BLOCKED Handle = 0x80280400
+ TPM_E_INVALID_HANDLE Handle = 0x80280401
+ TPM_E_DUPLICATE_VHANDLE Handle = 0x80280402
+ TPM_E_EMBEDDED_COMMAND_BLOCKED Handle = 0x80280403
+ TPM_E_EMBEDDED_COMMAND_UNSUPPORTED Handle = 0x80280404
+ TPM_E_RETRY Handle = 0x80280800
+ TPM_E_NEEDS_SELFTEST Handle = 0x80280801
+ TPM_E_DOING_SELFTEST Handle = 0x80280802
+ TPM_E_DEFEND_LOCK_RUNNING Handle = 0x80280803
+ TPM_20_E_CONTEXT_GAP Handle = 0x80280901
+ TPM_20_E_OBJECT_MEMORY Handle = 0x80280902
+ TPM_20_E_SESSION_MEMORY Handle = 0x80280903
+ TPM_20_E_MEMORY Handle = 0x80280904
+ TPM_20_E_SESSION_HANDLES Handle = 0x80280905
+ TPM_20_E_OBJECT_HANDLES Handle = 0x80280906
+ TPM_20_E_LOCALITY Handle = 0x80280907
+ TPM_20_E_YIELDED Handle = 0x80280908
+ TPM_20_E_CANCELED Handle = 0x80280909
+ TPM_20_E_TESTING Handle = 0x8028090A
+ TPM_20_E_NV_RATE Handle = 0x80280920
+ TPM_20_E_LOCKOUT Handle = 0x80280921
+ TPM_20_E_RETRY Handle = 0x80280922
+ TPM_20_E_NV_UNAVAILABLE Handle = 0x80280923
+ TBS_E_INTERNAL_ERROR Handle = 0x80284001
+ TBS_E_BAD_PARAMETER Handle = 0x80284002
+ TBS_E_INVALID_OUTPUT_POINTER Handle = 0x80284003
+ TBS_E_INVALID_CONTEXT Handle = 0x80284004
+ TBS_E_INSUFFICIENT_BUFFER Handle = 0x80284005
+ TBS_E_IOERROR Handle = 0x80284006
+ TBS_E_INVALID_CONTEXT_PARAM Handle = 0x80284007
+ TBS_E_SERVICE_NOT_RUNNING Handle = 0x80284008
+ TBS_E_TOO_MANY_TBS_CONTEXTS Handle = 0x80284009
+ TBS_E_TOO_MANY_RESOURCES Handle = 0x8028400A
+ TBS_E_SERVICE_START_PENDING Handle = 0x8028400B
+ TBS_E_PPI_NOT_SUPPORTED Handle = 0x8028400C
+ TBS_E_COMMAND_CANCELED Handle = 0x8028400D
+ TBS_E_BUFFER_TOO_LARGE Handle = 0x8028400E
+ TBS_E_TPM_NOT_FOUND Handle = 0x8028400F
+ TBS_E_SERVICE_DISABLED Handle = 0x80284010
+ TBS_E_NO_EVENT_LOG Handle = 0x80284011
+ TBS_E_ACCESS_DENIED Handle = 0x80284012
+ TBS_E_PROVISIONING_NOT_ALLOWED Handle = 0x80284013
+ TBS_E_PPI_FUNCTION_UNSUPPORTED Handle = 0x80284014
+ TBS_E_OWNERAUTH_NOT_FOUND Handle = 0x80284015
+ TBS_E_PROVISIONING_INCOMPLETE Handle = 0x80284016
+ TPMAPI_E_INVALID_STATE Handle = 0x80290100
+ TPMAPI_E_NOT_ENOUGH_DATA Handle = 0x80290101
+ TPMAPI_E_TOO_MUCH_DATA Handle = 0x80290102
+ TPMAPI_E_INVALID_OUTPUT_POINTER Handle = 0x80290103
+ TPMAPI_E_INVALID_PARAMETER Handle = 0x80290104
+ TPMAPI_E_OUT_OF_MEMORY Handle = 0x80290105
+ TPMAPI_E_BUFFER_TOO_SMALL Handle = 0x80290106
+ TPMAPI_E_INTERNAL_ERROR Handle = 0x80290107
+ TPMAPI_E_ACCESS_DENIED Handle = 0x80290108
+ TPMAPI_E_AUTHORIZATION_FAILED Handle = 0x80290109
+ TPMAPI_E_INVALID_CONTEXT_HANDLE Handle = 0x8029010A
+ TPMAPI_E_TBS_COMMUNICATION_ERROR Handle = 0x8029010B
+ TPMAPI_E_TPM_COMMAND_ERROR Handle = 0x8029010C
+ TPMAPI_E_MESSAGE_TOO_LARGE Handle = 0x8029010D
+ TPMAPI_E_INVALID_ENCODING Handle = 0x8029010E
+ TPMAPI_E_INVALID_KEY_SIZE Handle = 0x8029010F
+ TPMAPI_E_ENCRYPTION_FAILED Handle = 0x80290110
+ TPMAPI_E_INVALID_KEY_PARAMS Handle = 0x80290111
+ TPMAPI_E_INVALID_MIGRATION_AUTHORIZATION_BLOB Handle = 0x80290112
+ TPMAPI_E_INVALID_PCR_INDEX Handle = 0x80290113
+ TPMAPI_E_INVALID_DELEGATE_BLOB Handle = 0x80290114
+ TPMAPI_E_INVALID_CONTEXT_PARAMS Handle = 0x80290115
+ TPMAPI_E_INVALID_KEY_BLOB Handle = 0x80290116
+ TPMAPI_E_INVALID_PCR_DATA Handle = 0x80290117
+ TPMAPI_E_INVALID_OWNER_AUTH Handle = 0x80290118
+ TPMAPI_E_FIPS_RNG_CHECK_FAILED Handle = 0x80290119
+ TPMAPI_E_EMPTY_TCG_LOG Handle = 0x8029011A
+ TPMAPI_E_INVALID_TCG_LOG_ENTRY Handle = 0x8029011B
+ TPMAPI_E_TCG_SEPARATOR_ABSENT Handle = 0x8029011C
+ TPMAPI_E_TCG_INVALID_DIGEST_ENTRY Handle = 0x8029011D
+ TPMAPI_E_POLICY_DENIES_OPERATION Handle = 0x8029011E
+ TPMAPI_E_NV_BITS_NOT_DEFINED Handle = 0x8029011F
+ TPMAPI_E_NV_BITS_NOT_READY Handle = 0x80290120
+ TPMAPI_E_SEALING_KEY_NOT_AVAILABLE Handle = 0x80290121
+ TPMAPI_E_NO_AUTHORIZATION_CHAIN_FOUND Handle = 0x80290122
+ TPMAPI_E_SVN_COUNTER_NOT_AVAILABLE Handle = 0x80290123
+ TPMAPI_E_OWNER_AUTH_NOT_NULL Handle = 0x80290124
+ TPMAPI_E_ENDORSEMENT_AUTH_NOT_NULL Handle = 0x80290125
+ TPMAPI_E_AUTHORIZATION_REVOKED Handle = 0x80290126
+ TPMAPI_E_MALFORMED_AUTHORIZATION_KEY Handle = 0x80290127
+ TPMAPI_E_AUTHORIZING_KEY_NOT_SUPPORTED Handle = 0x80290128
+ TPMAPI_E_INVALID_AUTHORIZATION_SIGNATURE Handle = 0x80290129
+ TPMAPI_E_MALFORMED_AUTHORIZATION_POLICY Handle = 0x8029012A
+ TPMAPI_E_MALFORMED_AUTHORIZATION_OTHER Handle = 0x8029012B
+ TPMAPI_E_SEALING_KEY_CHANGED Handle = 0x8029012C
+ TBSIMP_E_BUFFER_TOO_SMALL Handle = 0x80290200
+ TBSIMP_E_CLEANUP_FAILED Handle = 0x80290201
+ TBSIMP_E_INVALID_CONTEXT_HANDLE Handle = 0x80290202
+ TBSIMP_E_INVALID_CONTEXT_PARAM Handle = 0x80290203
+ TBSIMP_E_TPM_ERROR Handle = 0x80290204
+ TBSIMP_E_HASH_BAD_KEY Handle = 0x80290205
+ TBSIMP_E_DUPLICATE_VHANDLE Handle = 0x80290206
+ TBSIMP_E_INVALID_OUTPUT_POINTER Handle = 0x80290207
+ TBSIMP_E_INVALID_PARAMETER Handle = 0x80290208
+ TBSIMP_E_RPC_INIT_FAILED Handle = 0x80290209
+ TBSIMP_E_SCHEDULER_NOT_RUNNING Handle = 0x8029020A
+ TBSIMP_E_COMMAND_CANCELED Handle = 0x8029020B
+ TBSIMP_E_OUT_OF_MEMORY Handle = 0x8029020C
+ TBSIMP_E_LIST_NO_MORE_ITEMS Handle = 0x8029020D
+ TBSIMP_E_LIST_NOT_FOUND Handle = 0x8029020E
+ TBSIMP_E_NOT_ENOUGH_SPACE Handle = 0x8029020F
+ TBSIMP_E_NOT_ENOUGH_TPM_CONTEXTS Handle = 0x80290210
+ TBSIMP_E_COMMAND_FAILED Handle = 0x80290211
+ TBSIMP_E_UNKNOWN_ORDINAL Handle = 0x80290212
+ TBSIMP_E_RESOURCE_EXPIRED Handle = 0x80290213
+ TBSIMP_E_INVALID_RESOURCE Handle = 0x80290214
+ TBSIMP_E_NOTHING_TO_UNLOAD Handle = 0x80290215
+ TBSIMP_E_HASH_TABLE_FULL Handle = 0x80290216
+ TBSIMP_E_TOO_MANY_TBS_CONTEXTS Handle = 0x80290217
+ TBSIMP_E_TOO_MANY_RESOURCES Handle = 0x80290218
+ TBSIMP_E_PPI_NOT_SUPPORTED Handle = 0x80290219
+ TBSIMP_E_TPM_INCOMPATIBLE Handle = 0x8029021A
+ TBSIMP_E_NO_EVENT_LOG Handle = 0x8029021B
+ TPM_E_PPI_ACPI_FAILURE Handle = 0x80290300
+ TPM_E_PPI_USER_ABORT Handle = 0x80290301
+ TPM_E_PPI_BIOS_FAILURE Handle = 0x80290302
+ TPM_E_PPI_NOT_SUPPORTED Handle = 0x80290303
+ TPM_E_PPI_BLOCKED_IN_BIOS Handle = 0x80290304
+ TPM_E_PCP_ERROR_MASK Handle = 0x80290400
+ TPM_E_PCP_DEVICE_NOT_READY Handle = 0x80290401
+ TPM_E_PCP_INVALID_HANDLE Handle = 0x80290402
+ TPM_E_PCP_INVALID_PARAMETER Handle = 0x80290403
+ TPM_E_PCP_FLAG_NOT_SUPPORTED Handle = 0x80290404
+ TPM_E_PCP_NOT_SUPPORTED Handle = 0x80290405
+ TPM_E_PCP_BUFFER_TOO_SMALL Handle = 0x80290406
+ TPM_E_PCP_INTERNAL_ERROR Handle = 0x80290407
+ TPM_E_PCP_AUTHENTICATION_FAILED Handle = 0x80290408
+ TPM_E_PCP_AUTHENTICATION_IGNORED Handle = 0x80290409
+ TPM_E_PCP_POLICY_NOT_FOUND Handle = 0x8029040A
+ TPM_E_PCP_PROFILE_NOT_FOUND Handle = 0x8029040B
+ TPM_E_PCP_VALIDATION_FAILED Handle = 0x8029040C
+ TPM_E_PCP_WRONG_PARENT Handle = 0x8029040E
+ TPM_E_KEY_NOT_LOADED Handle = 0x8029040F
+ TPM_E_NO_KEY_CERTIFICATION Handle = 0x80290410
+ TPM_E_KEY_NOT_FINALIZED Handle = 0x80290411
+ TPM_E_ATTESTATION_CHALLENGE_NOT_SET Handle = 0x80290412
+ TPM_E_NOT_PCR_BOUND Handle = 0x80290413
+ TPM_E_KEY_ALREADY_FINALIZED Handle = 0x80290414
+ TPM_E_KEY_USAGE_POLICY_NOT_SUPPORTED Handle = 0x80290415
+ TPM_E_KEY_USAGE_POLICY_INVALID Handle = 0x80290416
+ TPM_E_SOFT_KEY_ERROR Handle = 0x80290417
+ TPM_E_KEY_NOT_AUTHENTICATED Handle = 0x80290418
+ TPM_E_PCP_KEY_NOT_AIK Handle = 0x80290419
+ TPM_E_KEY_NOT_SIGNING_KEY Handle = 0x8029041A
+ TPM_E_LOCKED_OUT Handle = 0x8029041B
+ TPM_E_CLAIM_TYPE_NOT_SUPPORTED Handle = 0x8029041C
+ TPM_E_VERSION_NOT_SUPPORTED Handle = 0x8029041D
+ TPM_E_BUFFER_LENGTH_MISMATCH Handle = 0x8029041E
+ TPM_E_PCP_IFX_RSA_KEY_CREATION_BLOCKED Handle = 0x8029041F
+ TPM_E_PCP_TICKET_MISSING Handle = 0x80290420
+ TPM_E_PCP_RAW_POLICY_NOT_SUPPORTED Handle = 0x80290421
+ TPM_E_PCP_KEY_HANDLE_INVALIDATED Handle = 0x80290422
+ TPM_E_PCP_UNSUPPORTED_PSS_SALT Handle = 0x40290423
+ TPM_E_ZERO_EXHAUST_ENABLED Handle = 0x80290500
+ PLA_E_DCS_NOT_FOUND Handle = 0x80300002
+ PLA_E_DCS_IN_USE Handle = 0x803000AA
+ PLA_E_TOO_MANY_FOLDERS Handle = 0x80300045
+ PLA_E_NO_MIN_DISK Handle = 0x80300070
+ PLA_E_DCS_ALREADY_EXISTS Handle = 0x803000B7
+ PLA_S_PROPERTY_IGNORED Handle = 0x00300100
+ PLA_E_PROPERTY_CONFLICT Handle = 0x80300101
+ PLA_E_DCS_SINGLETON_REQUIRED Handle = 0x80300102
+ PLA_E_CREDENTIALS_REQUIRED Handle = 0x80300103
+ PLA_E_DCS_NOT_RUNNING Handle = 0x80300104
+ PLA_E_CONFLICT_INCL_EXCL_API Handle = 0x80300105
+ PLA_E_NETWORK_EXE_NOT_VALID Handle = 0x80300106
+ PLA_E_EXE_ALREADY_CONFIGURED Handle = 0x80300107
+ PLA_E_EXE_PATH_NOT_VALID Handle = 0x80300108
+ PLA_E_DC_ALREADY_EXISTS Handle = 0x80300109
+ PLA_E_DCS_START_WAIT_TIMEOUT Handle = 0x8030010A
+ PLA_E_DC_START_WAIT_TIMEOUT Handle = 0x8030010B
+ PLA_E_REPORT_WAIT_TIMEOUT Handle = 0x8030010C
+ PLA_E_NO_DUPLICATES Handle = 0x8030010D
+ PLA_E_EXE_FULL_PATH_REQUIRED Handle = 0x8030010E
+ PLA_E_INVALID_SESSION_NAME Handle = 0x8030010F
+ PLA_E_PLA_CHANNEL_NOT_ENABLED Handle = 0x80300110
+ PLA_E_TASKSCHED_CHANNEL_NOT_ENABLED Handle = 0x80300111
+ PLA_E_RULES_MANAGER_FAILED Handle = 0x80300112
+ PLA_E_CABAPI_FAILURE Handle = 0x80300113
+ FVE_E_LOCKED_VOLUME Handle = 0x80310000
+ FVE_E_NOT_ENCRYPTED Handle = 0x80310001
+ FVE_E_NO_TPM_BIOS Handle = 0x80310002
+ FVE_E_NO_MBR_METRIC Handle = 0x80310003
+ FVE_E_NO_BOOTSECTOR_METRIC Handle = 0x80310004
+ FVE_E_NO_BOOTMGR_METRIC Handle = 0x80310005
+ FVE_E_WRONG_BOOTMGR Handle = 0x80310006
+ FVE_E_SECURE_KEY_REQUIRED Handle = 0x80310007
+ FVE_E_NOT_ACTIVATED Handle = 0x80310008
+ FVE_E_ACTION_NOT_ALLOWED Handle = 0x80310009
+ FVE_E_AD_SCHEMA_NOT_INSTALLED Handle = 0x8031000A
+ FVE_E_AD_INVALID_DATATYPE Handle = 0x8031000B
+ FVE_E_AD_INVALID_DATASIZE Handle = 0x8031000C
+ FVE_E_AD_NO_VALUES Handle = 0x8031000D
+ FVE_E_AD_ATTR_NOT_SET Handle = 0x8031000E
+ FVE_E_AD_GUID_NOT_FOUND Handle = 0x8031000F
+ FVE_E_BAD_INFORMATION Handle = 0x80310010
+ FVE_E_TOO_SMALL Handle = 0x80310011
+ FVE_E_SYSTEM_VOLUME Handle = 0x80310012
+ FVE_E_FAILED_WRONG_FS Handle = 0x80310013
+ FVE_E_BAD_PARTITION_SIZE Handle = 0x80310014
+ FVE_E_NOT_SUPPORTED Handle = 0x80310015
+ FVE_E_BAD_DATA Handle = 0x80310016
+ FVE_E_VOLUME_NOT_BOUND Handle = 0x80310017
+ FVE_E_TPM_NOT_OWNED Handle = 0x80310018
+ FVE_E_NOT_DATA_VOLUME Handle = 0x80310019
+ FVE_E_AD_INSUFFICIENT_BUFFER Handle = 0x8031001A
+ FVE_E_CONV_READ Handle = 0x8031001B
+ FVE_E_CONV_WRITE Handle = 0x8031001C
+ FVE_E_KEY_REQUIRED Handle = 0x8031001D
+ FVE_E_CLUSTERING_NOT_SUPPORTED Handle = 0x8031001E
+ FVE_E_VOLUME_BOUND_ALREADY Handle = 0x8031001F
+ FVE_E_OS_NOT_PROTECTED Handle = 0x80310020
+ FVE_E_PROTECTION_DISABLED Handle = 0x80310021
+ FVE_E_RECOVERY_KEY_REQUIRED Handle = 0x80310022
+ FVE_E_FOREIGN_VOLUME Handle = 0x80310023
+ FVE_E_OVERLAPPED_UPDATE Handle = 0x80310024
+ FVE_E_TPM_SRK_AUTH_NOT_ZERO Handle = 0x80310025
+ FVE_E_FAILED_SECTOR_SIZE Handle = 0x80310026
+ FVE_E_FAILED_AUTHENTICATION Handle = 0x80310027
+ FVE_E_NOT_OS_VOLUME Handle = 0x80310028
+ FVE_E_AUTOUNLOCK_ENABLED Handle = 0x80310029
+ FVE_E_WRONG_BOOTSECTOR Handle = 0x8031002A
+ FVE_E_WRONG_SYSTEM_FS Handle = 0x8031002B
+ FVE_E_POLICY_PASSWORD_REQUIRED Handle = 0x8031002C
+ FVE_E_CANNOT_SET_FVEK_ENCRYPTED Handle = 0x8031002D
+ FVE_E_CANNOT_ENCRYPT_NO_KEY Handle = 0x8031002E
+ FVE_E_BOOTABLE_CDDVD Handle = 0x80310030
+ FVE_E_PROTECTOR_EXISTS Handle = 0x80310031
+ FVE_E_RELATIVE_PATH Handle = 0x80310032
+ FVE_E_PROTECTOR_NOT_FOUND Handle = 0x80310033
+ FVE_E_INVALID_KEY_FORMAT Handle = 0x80310034
+ FVE_E_INVALID_PASSWORD_FORMAT Handle = 0x80310035
+ FVE_E_FIPS_RNG_CHECK_FAILED Handle = 0x80310036
+ FVE_E_FIPS_PREVENTS_RECOVERY_PASSWORD Handle = 0x80310037
+ FVE_E_FIPS_PREVENTS_EXTERNAL_KEY_EXPORT Handle = 0x80310038
+ FVE_E_NOT_DECRYPTED Handle = 0x80310039
+ FVE_E_INVALID_PROTECTOR_TYPE Handle = 0x8031003A
+ FVE_E_NO_PROTECTORS_TO_TEST Handle = 0x8031003B
+ FVE_E_KEYFILE_NOT_FOUND Handle = 0x8031003C
+ FVE_E_KEYFILE_INVALID Handle = 0x8031003D
+ FVE_E_KEYFILE_NO_VMK Handle = 0x8031003E
+ FVE_E_TPM_DISABLED Handle = 0x8031003F
+ FVE_E_NOT_ALLOWED_IN_SAFE_MODE Handle = 0x80310040
+ FVE_E_TPM_INVALID_PCR Handle = 0x80310041
+ FVE_E_TPM_NO_VMK Handle = 0x80310042
+ FVE_E_PIN_INVALID Handle = 0x80310043
+ FVE_E_AUTH_INVALID_APPLICATION Handle = 0x80310044
+ FVE_E_AUTH_INVALID_CONFIG Handle = 0x80310045
+ FVE_E_FIPS_DISABLE_PROTECTION_NOT_ALLOWED Handle = 0x80310046
+ FVE_E_FS_NOT_EXTENDED Handle = 0x80310047
+ FVE_E_FIRMWARE_TYPE_NOT_SUPPORTED Handle = 0x80310048
+ FVE_E_NO_LICENSE Handle = 0x80310049
+ FVE_E_NOT_ON_STACK Handle = 0x8031004A
+ FVE_E_FS_MOUNTED Handle = 0x8031004B
+ FVE_E_TOKEN_NOT_IMPERSONATED Handle = 0x8031004C
+ FVE_E_DRY_RUN_FAILED Handle = 0x8031004D
+ FVE_E_REBOOT_REQUIRED Handle = 0x8031004E
+ FVE_E_DEBUGGER_ENABLED Handle = 0x8031004F
+ FVE_E_RAW_ACCESS Handle = 0x80310050
+ FVE_E_RAW_BLOCKED Handle = 0x80310051
+ FVE_E_BCD_APPLICATIONS_PATH_INCORRECT Handle = 0x80310052
+ FVE_E_NOT_ALLOWED_IN_VERSION Handle = 0x80310053
+ FVE_E_NO_AUTOUNLOCK_MASTER_KEY Handle = 0x80310054
+ FVE_E_MOR_FAILED Handle = 0x80310055
+ FVE_E_HIDDEN_VOLUME Handle = 0x80310056
+ FVE_E_TRANSIENT_STATE Handle = 0x80310057
+ FVE_E_PUBKEY_NOT_ALLOWED Handle = 0x80310058
+ FVE_E_VOLUME_HANDLE_OPEN Handle = 0x80310059
+ FVE_E_NO_FEATURE_LICENSE Handle = 0x8031005A
+ FVE_E_INVALID_STARTUP_OPTIONS Handle = 0x8031005B
+ FVE_E_POLICY_RECOVERY_PASSWORD_NOT_ALLOWED Handle = 0x8031005C
+ FVE_E_POLICY_RECOVERY_PASSWORD_REQUIRED Handle = 0x8031005D
+ FVE_E_POLICY_RECOVERY_KEY_NOT_ALLOWED Handle = 0x8031005E
+ FVE_E_POLICY_RECOVERY_KEY_REQUIRED Handle = 0x8031005F
+ FVE_E_POLICY_STARTUP_PIN_NOT_ALLOWED Handle = 0x80310060
+ FVE_E_POLICY_STARTUP_PIN_REQUIRED Handle = 0x80310061
+ FVE_E_POLICY_STARTUP_KEY_NOT_ALLOWED Handle = 0x80310062
+ FVE_E_POLICY_STARTUP_KEY_REQUIRED Handle = 0x80310063
+ FVE_E_POLICY_STARTUP_PIN_KEY_NOT_ALLOWED Handle = 0x80310064
+ FVE_E_POLICY_STARTUP_PIN_KEY_REQUIRED Handle = 0x80310065
+ FVE_E_POLICY_STARTUP_TPM_NOT_ALLOWED Handle = 0x80310066
+ FVE_E_POLICY_STARTUP_TPM_REQUIRED Handle = 0x80310067
+ FVE_E_POLICY_INVALID_PIN_LENGTH Handle = 0x80310068
+ FVE_E_KEY_PROTECTOR_NOT_SUPPORTED Handle = 0x80310069
+ FVE_E_POLICY_PASSPHRASE_NOT_ALLOWED Handle = 0x8031006A
+ FVE_E_POLICY_PASSPHRASE_REQUIRED Handle = 0x8031006B
+ FVE_E_FIPS_PREVENTS_PASSPHRASE Handle = 0x8031006C
+ FVE_E_OS_VOLUME_PASSPHRASE_NOT_ALLOWED Handle = 0x8031006D
+ FVE_E_INVALID_BITLOCKER_OID Handle = 0x8031006E
+ FVE_E_VOLUME_TOO_SMALL Handle = 0x8031006F
+ FVE_E_DV_NOT_SUPPORTED_ON_FS Handle = 0x80310070
+ FVE_E_DV_NOT_ALLOWED_BY_GP Handle = 0x80310071
+ FVE_E_POLICY_USER_CERTIFICATE_NOT_ALLOWED Handle = 0x80310072
+ FVE_E_POLICY_USER_CERTIFICATE_REQUIRED Handle = 0x80310073
+ FVE_E_POLICY_USER_CERT_MUST_BE_HW Handle = 0x80310074
+ FVE_E_POLICY_USER_CONFIGURE_FDV_AUTOUNLOCK_NOT_ALLOWED Handle = 0x80310075
+ FVE_E_POLICY_USER_CONFIGURE_RDV_AUTOUNLOCK_NOT_ALLOWED Handle = 0x80310076
+ FVE_E_POLICY_USER_CONFIGURE_RDV_NOT_ALLOWED Handle = 0x80310077
+ FVE_E_POLICY_USER_ENABLE_RDV_NOT_ALLOWED Handle = 0x80310078
+ FVE_E_POLICY_USER_DISABLE_RDV_NOT_ALLOWED Handle = 0x80310079
+ FVE_E_POLICY_INVALID_PASSPHRASE_LENGTH Handle = 0x80310080
+ FVE_E_POLICY_PASSPHRASE_TOO_SIMPLE Handle = 0x80310081
+ FVE_E_RECOVERY_PARTITION Handle = 0x80310082
+ FVE_E_POLICY_CONFLICT_FDV_RK_OFF_AUK_ON Handle = 0x80310083
+ FVE_E_POLICY_CONFLICT_RDV_RK_OFF_AUK_ON Handle = 0x80310084
+ FVE_E_NON_BITLOCKER_OID Handle = 0x80310085
+ FVE_E_POLICY_PROHIBITS_SELFSIGNED Handle = 0x80310086
+ FVE_E_POLICY_CONFLICT_RO_AND_STARTUP_KEY_REQUIRED Handle = 0x80310087
+ FVE_E_CONV_RECOVERY_FAILED Handle = 0x80310088
+ FVE_E_VIRTUALIZED_SPACE_TOO_BIG Handle = 0x80310089
+ FVE_E_POLICY_CONFLICT_OSV_RP_OFF_ADB_ON Handle = 0x80310090
+ FVE_E_POLICY_CONFLICT_FDV_RP_OFF_ADB_ON Handle = 0x80310091
+ FVE_E_POLICY_CONFLICT_RDV_RP_OFF_ADB_ON Handle = 0x80310092
+ FVE_E_NON_BITLOCKER_KU Handle = 0x80310093
+ FVE_E_PRIVATEKEY_AUTH_FAILED Handle = 0x80310094
+ FVE_E_REMOVAL_OF_DRA_FAILED Handle = 0x80310095
+ FVE_E_OPERATION_NOT_SUPPORTED_ON_VISTA_VOLUME Handle = 0x80310096
+ FVE_E_CANT_LOCK_AUTOUNLOCK_ENABLED_VOLUME Handle = 0x80310097
+ FVE_E_FIPS_HASH_KDF_NOT_ALLOWED Handle = 0x80310098
+ FVE_E_ENH_PIN_INVALID Handle = 0x80310099
+ FVE_E_INVALID_PIN_CHARS Handle = 0x8031009A
+ FVE_E_INVALID_DATUM_TYPE Handle = 0x8031009B
+ FVE_E_EFI_ONLY Handle = 0x8031009C
+ FVE_E_MULTIPLE_NKP_CERTS Handle = 0x8031009D
+ FVE_E_REMOVAL_OF_NKP_FAILED Handle = 0x8031009E
+ FVE_E_INVALID_NKP_CERT Handle = 0x8031009F
+ FVE_E_NO_EXISTING_PIN Handle = 0x803100A0
+ FVE_E_PROTECTOR_CHANGE_PIN_MISMATCH Handle = 0x803100A1
+ FVE_E_PIN_PROTECTOR_CHANGE_BY_STD_USER_DISALLOWED Handle = 0x803100A2
+ FVE_E_PROTECTOR_CHANGE_MAX_PIN_CHANGE_ATTEMPTS_REACHED Handle = 0x803100A3
+ FVE_E_POLICY_PASSPHRASE_REQUIRES_ASCII Handle = 0x803100A4
+ FVE_E_FULL_ENCRYPTION_NOT_ALLOWED_ON_TP_STORAGE Handle = 0x803100A5
+ FVE_E_WIPE_NOT_ALLOWED_ON_TP_STORAGE Handle = 0x803100A6
+ FVE_E_KEY_LENGTH_NOT_SUPPORTED_BY_EDRIVE Handle = 0x803100A7
+ FVE_E_NO_EXISTING_PASSPHRASE Handle = 0x803100A8
+ FVE_E_PROTECTOR_CHANGE_PASSPHRASE_MISMATCH Handle = 0x803100A9
+ FVE_E_PASSPHRASE_TOO_LONG Handle = 0x803100AA
+ FVE_E_NO_PASSPHRASE_WITH_TPM Handle = 0x803100AB
+ FVE_E_NO_TPM_WITH_PASSPHRASE Handle = 0x803100AC
+ FVE_E_NOT_ALLOWED_ON_CSV_STACK Handle = 0x803100AD
+ FVE_E_NOT_ALLOWED_ON_CLUSTER Handle = 0x803100AE
+ FVE_E_EDRIVE_NO_FAILOVER_TO_SW Handle = 0x803100AF
+ FVE_E_EDRIVE_BAND_IN_USE Handle = 0x803100B0
+ FVE_E_EDRIVE_DISALLOWED_BY_GP Handle = 0x803100B1
+ FVE_E_EDRIVE_INCOMPATIBLE_VOLUME Handle = 0x803100B2
+ FVE_E_NOT_ALLOWED_TO_UPGRADE_WHILE_CONVERTING Handle = 0x803100B3
+ FVE_E_EDRIVE_DV_NOT_SUPPORTED Handle = 0x803100B4
+ FVE_E_NO_PREBOOT_KEYBOARD_DETECTED Handle = 0x803100B5
+ FVE_E_NO_PREBOOT_KEYBOARD_OR_WINRE_DETECTED Handle = 0x803100B6
+ FVE_E_POLICY_REQUIRES_STARTUP_PIN_ON_TOUCH_DEVICE Handle = 0x803100B7
+ FVE_E_POLICY_REQUIRES_RECOVERY_PASSWORD_ON_TOUCH_DEVICE Handle = 0x803100B8
+ FVE_E_WIPE_CANCEL_NOT_APPLICABLE Handle = 0x803100B9
+ FVE_E_SECUREBOOT_DISABLED Handle = 0x803100BA
+ FVE_E_SECUREBOOT_CONFIGURATION_INVALID Handle = 0x803100BB
+ FVE_E_EDRIVE_DRY_RUN_FAILED Handle = 0x803100BC
+ FVE_E_SHADOW_COPY_PRESENT Handle = 0x803100BD
+ FVE_E_POLICY_INVALID_ENHANCED_BCD_SETTINGS Handle = 0x803100BE
+ FVE_E_EDRIVE_INCOMPATIBLE_FIRMWARE Handle = 0x803100BF
+ FVE_E_PROTECTOR_CHANGE_MAX_PASSPHRASE_CHANGE_ATTEMPTS_REACHED Handle = 0x803100C0
+ FVE_E_PASSPHRASE_PROTECTOR_CHANGE_BY_STD_USER_DISALLOWED Handle = 0x803100C1
+ FVE_E_LIVEID_ACCOUNT_SUSPENDED Handle = 0x803100C2
+ FVE_E_LIVEID_ACCOUNT_BLOCKED Handle = 0x803100C3
+ FVE_E_NOT_PROVISIONED_ON_ALL_VOLUMES Handle = 0x803100C4
+ FVE_E_DE_FIXED_DATA_NOT_SUPPORTED Handle = 0x803100C5
+ FVE_E_DE_HARDWARE_NOT_COMPLIANT Handle = 0x803100C6
+ FVE_E_DE_WINRE_NOT_CONFIGURED Handle = 0x803100C7
+ FVE_E_DE_PROTECTION_SUSPENDED Handle = 0x803100C8
+ FVE_E_DE_OS_VOLUME_NOT_PROTECTED Handle = 0x803100C9
+ FVE_E_DE_DEVICE_LOCKEDOUT Handle = 0x803100CA
+ FVE_E_DE_PROTECTION_NOT_YET_ENABLED Handle = 0x803100CB
+ FVE_E_INVALID_PIN_CHARS_DETAILED Handle = 0x803100CC
+ FVE_E_DEVICE_LOCKOUT_COUNTER_UNAVAILABLE Handle = 0x803100CD
+ FVE_E_DEVICELOCKOUT_COUNTER_MISMATCH Handle = 0x803100CE
+ FVE_E_BUFFER_TOO_LARGE Handle = 0x803100CF
+ FVE_E_NO_SUCH_CAPABILITY_ON_TARGET Handle = 0x803100D0
+ FVE_E_DE_PREVENTED_FOR_OS Handle = 0x803100D1
+ FVE_E_DE_VOLUME_OPTED_OUT Handle = 0x803100D2
+ FVE_E_DE_VOLUME_NOT_SUPPORTED Handle = 0x803100D3
+ FVE_E_EOW_NOT_SUPPORTED_IN_VERSION Handle = 0x803100D4
+ FVE_E_ADBACKUP_NOT_ENABLED Handle = 0x803100D5
+ FVE_E_VOLUME_EXTEND_PREVENTS_EOW_DECRYPT Handle = 0x803100D6
+ FVE_E_NOT_DE_VOLUME Handle = 0x803100D7
+ FVE_E_PROTECTION_CANNOT_BE_DISABLED Handle = 0x803100D8
+ FVE_E_OSV_KSR_NOT_ALLOWED Handle = 0x803100D9
+ FWP_E_CALLOUT_NOT_FOUND Handle = 0x80320001
+ FWP_E_CONDITION_NOT_FOUND Handle = 0x80320002
+ FWP_E_FILTER_NOT_FOUND Handle = 0x80320003
+ FWP_E_LAYER_NOT_FOUND Handle = 0x80320004
+ FWP_E_PROVIDER_NOT_FOUND Handle = 0x80320005
+ FWP_E_PROVIDER_CONTEXT_NOT_FOUND Handle = 0x80320006
+ FWP_E_SUBLAYER_NOT_FOUND Handle = 0x80320007
+ FWP_E_NOT_FOUND Handle = 0x80320008
+ FWP_E_ALREADY_EXISTS Handle = 0x80320009
+ FWP_E_IN_USE Handle = 0x8032000A
+ FWP_E_DYNAMIC_SESSION_IN_PROGRESS Handle = 0x8032000B
+ FWP_E_WRONG_SESSION Handle = 0x8032000C
+ FWP_E_NO_TXN_IN_PROGRESS Handle = 0x8032000D
+ FWP_E_TXN_IN_PROGRESS Handle = 0x8032000E
+ FWP_E_TXN_ABORTED Handle = 0x8032000F
+ FWP_E_SESSION_ABORTED Handle = 0x80320010
+ FWP_E_INCOMPATIBLE_TXN Handle = 0x80320011
+ FWP_E_TIMEOUT Handle = 0x80320012
+ FWP_E_NET_EVENTS_DISABLED Handle = 0x80320013
+ FWP_E_INCOMPATIBLE_LAYER Handle = 0x80320014
+ FWP_E_KM_CLIENTS_ONLY Handle = 0x80320015
+ FWP_E_LIFETIME_MISMATCH Handle = 0x80320016
+ FWP_E_BUILTIN_OBJECT Handle = 0x80320017
+ FWP_E_TOO_MANY_CALLOUTS Handle = 0x80320018
+ FWP_E_NOTIFICATION_DROPPED Handle = 0x80320019
+ FWP_E_TRAFFIC_MISMATCH Handle = 0x8032001A
+ FWP_E_INCOMPATIBLE_SA_STATE Handle = 0x8032001B
+ FWP_E_NULL_POINTER Handle = 0x8032001C
+ FWP_E_INVALID_ENUMERATOR Handle = 0x8032001D
+ FWP_E_INVALID_FLAGS Handle = 0x8032001E
+ FWP_E_INVALID_NET_MASK Handle = 0x8032001F
+ FWP_E_INVALID_RANGE Handle = 0x80320020
+ FWP_E_INVALID_INTERVAL Handle = 0x80320021
+ FWP_E_ZERO_LENGTH_ARRAY Handle = 0x80320022
+ FWP_E_NULL_DISPLAY_NAME Handle = 0x80320023
+ FWP_E_INVALID_ACTION_TYPE Handle = 0x80320024
+ FWP_E_INVALID_WEIGHT Handle = 0x80320025
+ FWP_E_MATCH_TYPE_MISMATCH Handle = 0x80320026
+ FWP_E_TYPE_MISMATCH Handle = 0x80320027
+ FWP_E_OUT_OF_BOUNDS Handle = 0x80320028
+ FWP_E_RESERVED Handle = 0x80320029
+ FWP_E_DUPLICATE_CONDITION Handle = 0x8032002A
+ FWP_E_DUPLICATE_KEYMOD Handle = 0x8032002B
+ FWP_E_ACTION_INCOMPATIBLE_WITH_LAYER Handle = 0x8032002C
+ FWP_E_ACTION_INCOMPATIBLE_WITH_SUBLAYER Handle = 0x8032002D
+ FWP_E_CONTEXT_INCOMPATIBLE_WITH_LAYER Handle = 0x8032002E
+ FWP_E_CONTEXT_INCOMPATIBLE_WITH_CALLOUT Handle = 0x8032002F
+ FWP_E_INCOMPATIBLE_AUTH_METHOD Handle = 0x80320030
+ FWP_E_INCOMPATIBLE_DH_GROUP Handle = 0x80320031
+ FWP_E_EM_NOT_SUPPORTED Handle = 0x80320032
+ FWP_E_NEVER_MATCH Handle = 0x80320033
+ FWP_E_PROVIDER_CONTEXT_MISMATCH Handle = 0x80320034
+ FWP_E_INVALID_PARAMETER Handle = 0x80320035
+ FWP_E_TOO_MANY_SUBLAYERS Handle = 0x80320036
+ FWP_E_CALLOUT_NOTIFICATION_FAILED Handle = 0x80320037
+ FWP_E_INVALID_AUTH_TRANSFORM Handle = 0x80320038
+ FWP_E_INVALID_CIPHER_TRANSFORM Handle = 0x80320039
+ FWP_E_INCOMPATIBLE_CIPHER_TRANSFORM Handle = 0x8032003A
+ FWP_E_INVALID_TRANSFORM_COMBINATION Handle = 0x8032003B
+ FWP_E_DUPLICATE_AUTH_METHOD Handle = 0x8032003C
+ FWP_E_INVALID_TUNNEL_ENDPOINT Handle = 0x8032003D
+ FWP_E_L2_DRIVER_NOT_READY Handle = 0x8032003E
+ FWP_E_KEY_DICTATOR_ALREADY_REGISTERED Handle = 0x8032003F
+ FWP_E_KEY_DICTATION_INVALID_KEYING_MATERIAL Handle = 0x80320040
+ FWP_E_CONNECTIONS_DISABLED Handle = 0x80320041
+ FWP_E_INVALID_DNS_NAME Handle = 0x80320042
+ FWP_E_STILL_ON Handle = 0x80320043
+ FWP_E_IKEEXT_NOT_RUNNING Handle = 0x80320044
+ FWP_E_DROP_NOICMP Handle = 0x80320104
+ WS_S_ASYNC Handle = 0x003D0000
+ WS_S_END Handle = 0x003D0001
+ WS_E_INVALID_FORMAT Handle = 0x803D0000
+ WS_E_OBJECT_FAULTED Handle = 0x803D0001
+ WS_E_NUMERIC_OVERFLOW Handle = 0x803D0002
+ WS_E_INVALID_OPERATION Handle = 0x803D0003
+ WS_E_OPERATION_ABORTED Handle = 0x803D0004
+ WS_E_ENDPOINT_ACCESS_DENIED Handle = 0x803D0005
+ WS_E_OPERATION_TIMED_OUT Handle = 0x803D0006
+ WS_E_OPERATION_ABANDONED Handle = 0x803D0007
+ WS_E_QUOTA_EXCEEDED Handle = 0x803D0008
+ WS_E_NO_TRANSLATION_AVAILABLE Handle = 0x803D0009
+ WS_E_SECURITY_VERIFICATION_FAILURE Handle = 0x803D000A
+ WS_E_ADDRESS_IN_USE Handle = 0x803D000B
+ WS_E_ADDRESS_NOT_AVAILABLE Handle = 0x803D000C
+ WS_E_ENDPOINT_NOT_FOUND Handle = 0x803D000D
+ WS_E_ENDPOINT_NOT_AVAILABLE Handle = 0x803D000E
+ WS_E_ENDPOINT_FAILURE Handle = 0x803D000F
+ WS_E_ENDPOINT_UNREACHABLE Handle = 0x803D0010
+ WS_E_ENDPOINT_ACTION_NOT_SUPPORTED Handle = 0x803D0011
+ WS_E_ENDPOINT_TOO_BUSY Handle = 0x803D0012
+ WS_E_ENDPOINT_FAULT_RECEIVED Handle = 0x803D0013
+ WS_E_ENDPOINT_DISCONNECTED Handle = 0x803D0014
+ WS_E_PROXY_FAILURE Handle = 0x803D0015
+ WS_E_PROXY_ACCESS_DENIED Handle = 0x803D0016
+ WS_E_NOT_SUPPORTED Handle = 0x803D0017
+ WS_E_PROXY_REQUIRES_BASIC_AUTH Handle = 0x803D0018
+ WS_E_PROXY_REQUIRES_DIGEST_AUTH Handle = 0x803D0019
+ WS_E_PROXY_REQUIRES_NTLM_AUTH Handle = 0x803D001A
+ WS_E_PROXY_REQUIRES_NEGOTIATE_AUTH Handle = 0x803D001B
+ WS_E_SERVER_REQUIRES_BASIC_AUTH Handle = 0x803D001C
+ WS_E_SERVER_REQUIRES_DIGEST_AUTH Handle = 0x803D001D
+ WS_E_SERVER_REQUIRES_NTLM_AUTH Handle = 0x803D001E
+ WS_E_SERVER_REQUIRES_NEGOTIATE_AUTH Handle = 0x803D001F
+ WS_E_INVALID_ENDPOINT_URL Handle = 0x803D0020
+ WS_E_OTHER Handle = 0x803D0021
+ WS_E_SECURITY_TOKEN_EXPIRED Handle = 0x803D0022
+ WS_E_SECURITY_SYSTEM_FAILURE Handle = 0x803D0023
+ ERROR_NDIS_INTERFACE_CLOSING syscall.Errno = 0x80340002
+ ERROR_NDIS_BAD_VERSION syscall.Errno = 0x80340004
+ ERROR_NDIS_BAD_CHARACTERISTICS syscall.Errno = 0x80340005
+ ERROR_NDIS_ADAPTER_NOT_FOUND syscall.Errno = 0x80340006
+ ERROR_NDIS_OPEN_FAILED syscall.Errno = 0x80340007
+ ERROR_NDIS_DEVICE_FAILED syscall.Errno = 0x80340008
+ ERROR_NDIS_MULTICAST_FULL syscall.Errno = 0x80340009
+ ERROR_NDIS_MULTICAST_EXISTS syscall.Errno = 0x8034000A
+ ERROR_NDIS_MULTICAST_NOT_FOUND syscall.Errno = 0x8034000B
+ ERROR_NDIS_REQUEST_ABORTED syscall.Errno = 0x8034000C
+ ERROR_NDIS_RESET_IN_PROGRESS syscall.Errno = 0x8034000D
+ ERROR_NDIS_NOT_SUPPORTED syscall.Errno = 0x803400BB
+ ERROR_NDIS_INVALID_PACKET syscall.Errno = 0x8034000F
+ ERROR_NDIS_ADAPTER_NOT_READY syscall.Errno = 0x80340011
+ ERROR_NDIS_INVALID_LENGTH syscall.Errno = 0x80340014
+ ERROR_NDIS_INVALID_DATA syscall.Errno = 0x80340015
+ ERROR_NDIS_BUFFER_TOO_SHORT syscall.Errno = 0x80340016
+ ERROR_NDIS_INVALID_OID syscall.Errno = 0x80340017
+ ERROR_NDIS_ADAPTER_REMOVED syscall.Errno = 0x80340018
+ ERROR_NDIS_UNSUPPORTED_MEDIA syscall.Errno = 0x80340019
+ ERROR_NDIS_GROUP_ADDRESS_IN_USE syscall.Errno = 0x8034001A
+ ERROR_NDIS_FILE_NOT_FOUND syscall.Errno = 0x8034001B
+ ERROR_NDIS_ERROR_READING_FILE syscall.Errno = 0x8034001C
+ ERROR_NDIS_ALREADY_MAPPED syscall.Errno = 0x8034001D
+ ERROR_NDIS_RESOURCE_CONFLICT syscall.Errno = 0x8034001E
+ ERROR_NDIS_MEDIA_DISCONNECTED syscall.Errno = 0x8034001F
+ ERROR_NDIS_INVALID_ADDRESS syscall.Errno = 0x80340022
+ ERROR_NDIS_INVALID_DEVICE_REQUEST syscall.Errno = 0x80340010
+ ERROR_NDIS_PAUSED syscall.Errno = 0x8034002A
+ ERROR_NDIS_INTERFACE_NOT_FOUND syscall.Errno = 0x8034002B
+ ERROR_NDIS_UNSUPPORTED_REVISION syscall.Errno = 0x8034002C
+ ERROR_NDIS_INVALID_PORT syscall.Errno = 0x8034002D
+ ERROR_NDIS_INVALID_PORT_STATE syscall.Errno = 0x8034002E
+ ERROR_NDIS_LOW_POWER_STATE syscall.Errno = 0x8034002F
+ ERROR_NDIS_REINIT_REQUIRED syscall.Errno = 0x80340030
+ ERROR_NDIS_NO_QUEUES syscall.Errno = 0x80340031
+ ERROR_NDIS_DOT11_AUTO_CONFIG_ENABLED syscall.Errno = 0x80342000
+ ERROR_NDIS_DOT11_MEDIA_IN_USE syscall.Errno = 0x80342001
+ ERROR_NDIS_DOT11_POWER_STATE_INVALID syscall.Errno = 0x80342002
+ ERROR_NDIS_PM_WOL_PATTERN_LIST_FULL syscall.Errno = 0x80342003
+ ERROR_NDIS_PM_PROTOCOL_OFFLOAD_LIST_FULL syscall.Errno = 0x80342004
+ ERROR_NDIS_DOT11_AP_CHANNEL_CURRENTLY_NOT_AVAILABLE syscall.Errno = 0x80342005
+ ERROR_NDIS_DOT11_AP_BAND_CURRENTLY_NOT_AVAILABLE syscall.Errno = 0x80342006
+ ERROR_NDIS_DOT11_AP_CHANNEL_NOT_ALLOWED syscall.Errno = 0x80342007
+ ERROR_NDIS_DOT11_AP_BAND_NOT_ALLOWED syscall.Errno = 0x80342008
+ ERROR_NDIS_INDICATION_REQUIRED syscall.Errno = 0x00340001
+ ERROR_NDIS_OFFLOAD_POLICY syscall.Errno = 0xC034100F
+ ERROR_NDIS_OFFLOAD_CONNECTION_REJECTED syscall.Errno = 0xC0341012
+ ERROR_NDIS_OFFLOAD_PATH_REJECTED syscall.Errno = 0xC0341013
+ ERROR_HV_INVALID_HYPERCALL_CODE syscall.Errno = 0xC0350002
+ ERROR_HV_INVALID_HYPERCALL_INPUT syscall.Errno = 0xC0350003
+ ERROR_HV_INVALID_ALIGNMENT syscall.Errno = 0xC0350004
+ ERROR_HV_INVALID_PARAMETER syscall.Errno = 0xC0350005
+ ERROR_HV_ACCESS_DENIED syscall.Errno = 0xC0350006
+ ERROR_HV_INVALID_PARTITION_STATE syscall.Errno = 0xC0350007
+ ERROR_HV_OPERATION_DENIED syscall.Errno = 0xC0350008
+ ERROR_HV_UNKNOWN_PROPERTY syscall.Errno = 0xC0350009
+ ERROR_HV_PROPERTY_VALUE_OUT_OF_RANGE syscall.Errno = 0xC035000A
+ ERROR_HV_INSUFFICIENT_MEMORY syscall.Errno = 0xC035000B
+ ERROR_HV_PARTITION_TOO_DEEP syscall.Errno = 0xC035000C
+ ERROR_HV_INVALID_PARTITION_ID syscall.Errno = 0xC035000D
+ ERROR_HV_INVALID_VP_INDEX syscall.Errno = 0xC035000E
+ ERROR_HV_INVALID_PORT_ID syscall.Errno = 0xC0350011
+ ERROR_HV_INVALID_CONNECTION_ID syscall.Errno = 0xC0350012
+ ERROR_HV_INSUFFICIENT_BUFFERS syscall.Errno = 0xC0350013
+ ERROR_HV_NOT_ACKNOWLEDGED syscall.Errno = 0xC0350014
+ ERROR_HV_INVALID_VP_STATE syscall.Errno = 0xC0350015
+ ERROR_HV_ACKNOWLEDGED syscall.Errno = 0xC0350016
+ ERROR_HV_INVALID_SAVE_RESTORE_STATE syscall.Errno = 0xC0350017
+ ERROR_HV_INVALID_SYNIC_STATE syscall.Errno = 0xC0350018
+ ERROR_HV_OBJECT_IN_USE syscall.Errno = 0xC0350019
+ ERROR_HV_INVALID_PROXIMITY_DOMAIN_INFO syscall.Errno = 0xC035001A
+ ERROR_HV_NO_DATA syscall.Errno = 0xC035001B
+ ERROR_HV_INACTIVE syscall.Errno = 0xC035001C
+ ERROR_HV_NO_RESOURCES syscall.Errno = 0xC035001D
+ ERROR_HV_FEATURE_UNAVAILABLE syscall.Errno = 0xC035001E
+ ERROR_HV_INSUFFICIENT_BUFFER syscall.Errno = 0xC0350033
+ ERROR_HV_INSUFFICIENT_DEVICE_DOMAINS syscall.Errno = 0xC0350038
+ ERROR_HV_CPUID_FEATURE_VALIDATION syscall.Errno = 0xC035003C
+ ERROR_HV_CPUID_XSAVE_FEATURE_VALIDATION syscall.Errno = 0xC035003D
+ ERROR_HV_PROCESSOR_STARTUP_TIMEOUT syscall.Errno = 0xC035003E
+ ERROR_HV_SMX_ENABLED syscall.Errno = 0xC035003F
+ ERROR_HV_INVALID_LP_INDEX syscall.Errno = 0xC0350041
+ ERROR_HV_INVALID_REGISTER_VALUE syscall.Errno = 0xC0350050
+ ERROR_HV_INVALID_VTL_STATE syscall.Errno = 0xC0350051
+ ERROR_HV_NX_NOT_DETECTED syscall.Errno = 0xC0350055
+ ERROR_HV_INVALID_DEVICE_ID syscall.Errno = 0xC0350057
+ ERROR_HV_INVALID_DEVICE_STATE syscall.Errno = 0xC0350058
+ ERROR_HV_PENDING_PAGE_REQUESTS syscall.Errno = 0x00350059
+ ERROR_HV_PAGE_REQUEST_INVALID syscall.Errno = 0xC0350060
+ ERROR_HV_INVALID_CPU_GROUP_ID syscall.Errno = 0xC035006F
+ ERROR_HV_INVALID_CPU_GROUP_STATE syscall.Errno = 0xC0350070
+ ERROR_HV_OPERATION_FAILED syscall.Errno = 0xC0350071
+ ERROR_HV_NOT_ALLOWED_WITH_NESTED_VIRT_ACTIVE syscall.Errno = 0xC0350072
+ ERROR_HV_INSUFFICIENT_ROOT_MEMORY syscall.Errno = 0xC0350073
+ ERROR_HV_NOT_PRESENT syscall.Errno = 0xC0351000
+ ERROR_VID_DUPLICATE_HANDLER syscall.Errno = 0xC0370001
+ ERROR_VID_TOO_MANY_HANDLERS syscall.Errno = 0xC0370002
+ ERROR_VID_QUEUE_FULL syscall.Errno = 0xC0370003
+ ERROR_VID_HANDLER_NOT_PRESENT syscall.Errno = 0xC0370004
+ ERROR_VID_INVALID_OBJECT_NAME syscall.Errno = 0xC0370005
+ ERROR_VID_PARTITION_NAME_TOO_LONG syscall.Errno = 0xC0370006
+ ERROR_VID_MESSAGE_QUEUE_NAME_TOO_LONG syscall.Errno = 0xC0370007
+ ERROR_VID_PARTITION_ALREADY_EXISTS syscall.Errno = 0xC0370008
+ ERROR_VID_PARTITION_DOES_NOT_EXIST syscall.Errno = 0xC0370009
+ ERROR_VID_PARTITION_NAME_NOT_FOUND syscall.Errno = 0xC037000A
+ ERROR_VID_MESSAGE_QUEUE_ALREADY_EXISTS syscall.Errno = 0xC037000B
+ ERROR_VID_EXCEEDED_MBP_ENTRY_MAP_LIMIT syscall.Errno = 0xC037000C
+ ERROR_VID_MB_STILL_REFERENCED syscall.Errno = 0xC037000D
+ ERROR_VID_CHILD_GPA_PAGE_SET_CORRUPTED syscall.Errno = 0xC037000E
+ ERROR_VID_INVALID_NUMA_SETTINGS syscall.Errno = 0xC037000F
+ ERROR_VID_INVALID_NUMA_NODE_INDEX syscall.Errno = 0xC0370010
+ ERROR_VID_NOTIFICATION_QUEUE_ALREADY_ASSOCIATED syscall.Errno = 0xC0370011
+ ERROR_VID_INVALID_MEMORY_BLOCK_HANDLE syscall.Errno = 0xC0370012
+ ERROR_VID_PAGE_RANGE_OVERFLOW syscall.Errno = 0xC0370013
+ ERROR_VID_INVALID_MESSAGE_QUEUE_HANDLE syscall.Errno = 0xC0370014
+ ERROR_VID_INVALID_GPA_RANGE_HANDLE syscall.Errno = 0xC0370015
+ ERROR_VID_NO_MEMORY_BLOCK_NOTIFICATION_QUEUE syscall.Errno = 0xC0370016
+ ERROR_VID_MEMORY_BLOCK_LOCK_COUNT_EXCEEDED syscall.Errno = 0xC0370017
+ ERROR_VID_INVALID_PPM_HANDLE syscall.Errno = 0xC0370018
+ ERROR_VID_MBPS_ARE_LOCKED syscall.Errno = 0xC0370019
+ ERROR_VID_MESSAGE_QUEUE_CLOSED syscall.Errno = 0xC037001A
+ ERROR_VID_VIRTUAL_PROCESSOR_LIMIT_EXCEEDED syscall.Errno = 0xC037001B
+ ERROR_VID_STOP_PENDING syscall.Errno = 0xC037001C
+ ERROR_VID_INVALID_PROCESSOR_STATE syscall.Errno = 0xC037001D
+ ERROR_VID_EXCEEDED_KM_CONTEXT_COUNT_LIMIT syscall.Errno = 0xC037001E
+ ERROR_VID_KM_INTERFACE_ALREADY_INITIALIZED syscall.Errno = 0xC037001F
+ ERROR_VID_MB_PROPERTY_ALREADY_SET_RESET syscall.Errno = 0xC0370020
+ ERROR_VID_MMIO_RANGE_DESTROYED syscall.Errno = 0xC0370021
+ ERROR_VID_INVALID_CHILD_GPA_PAGE_SET syscall.Errno = 0xC0370022
+ ERROR_VID_RESERVE_PAGE_SET_IS_BEING_USED syscall.Errno = 0xC0370023
+ ERROR_VID_RESERVE_PAGE_SET_TOO_SMALL syscall.Errno = 0xC0370024
+ ERROR_VID_MBP_ALREADY_LOCKED_USING_RESERVED_PAGE syscall.Errno = 0xC0370025
+ ERROR_VID_MBP_COUNT_EXCEEDED_LIMIT syscall.Errno = 0xC0370026
+ ERROR_VID_SAVED_STATE_CORRUPT syscall.Errno = 0xC0370027
+ ERROR_VID_SAVED_STATE_UNRECOGNIZED_ITEM syscall.Errno = 0xC0370028
+ ERROR_VID_SAVED_STATE_INCOMPATIBLE syscall.Errno = 0xC0370029
+ ERROR_VID_VTL_ACCESS_DENIED syscall.Errno = 0xC037002A
+ ERROR_VMCOMPUTE_TERMINATED_DURING_START syscall.Errno = 0xC0370100
+ ERROR_VMCOMPUTE_IMAGE_MISMATCH syscall.Errno = 0xC0370101
+ ERROR_VMCOMPUTE_HYPERV_NOT_INSTALLED syscall.Errno = 0xC0370102
+ ERROR_VMCOMPUTE_OPERATION_PENDING syscall.Errno = 0xC0370103
+ ERROR_VMCOMPUTE_TOO_MANY_NOTIFICATIONS syscall.Errno = 0xC0370104
+ ERROR_VMCOMPUTE_INVALID_STATE syscall.Errno = 0xC0370105
+ ERROR_VMCOMPUTE_UNEXPECTED_EXIT syscall.Errno = 0xC0370106
+ ERROR_VMCOMPUTE_TERMINATED syscall.Errno = 0xC0370107
+ ERROR_VMCOMPUTE_CONNECT_FAILED syscall.Errno = 0xC0370108
+ ERROR_VMCOMPUTE_TIMEOUT syscall.Errno = 0xC0370109
+ ERROR_VMCOMPUTE_CONNECTION_CLOSED syscall.Errno = 0xC037010A
+ ERROR_VMCOMPUTE_UNKNOWN_MESSAGE syscall.Errno = 0xC037010B
+ ERROR_VMCOMPUTE_UNSUPPORTED_PROTOCOL_VERSION syscall.Errno = 0xC037010C
+ ERROR_VMCOMPUTE_INVALID_JSON syscall.Errno = 0xC037010D
+ ERROR_VMCOMPUTE_SYSTEM_NOT_FOUND syscall.Errno = 0xC037010E
+ ERROR_VMCOMPUTE_SYSTEM_ALREADY_EXISTS syscall.Errno = 0xC037010F
+ ERROR_VMCOMPUTE_SYSTEM_ALREADY_STOPPED syscall.Errno = 0xC0370110
+ ERROR_VMCOMPUTE_PROTOCOL_ERROR syscall.Errno = 0xC0370111
+ ERROR_VMCOMPUTE_INVALID_LAYER syscall.Errno = 0xC0370112
+ ERROR_VMCOMPUTE_WINDOWS_INSIDER_REQUIRED syscall.Errno = 0xC0370113
+ HCS_E_TERMINATED_DURING_START Handle = 0x80370100
+ HCS_E_IMAGE_MISMATCH Handle = 0x80370101
+ HCS_E_HYPERV_NOT_INSTALLED Handle = 0x80370102
+ HCS_E_INVALID_STATE Handle = 0x80370105
+ HCS_E_UNEXPECTED_EXIT Handle = 0x80370106
+ HCS_E_TERMINATED Handle = 0x80370107
+ HCS_E_CONNECT_FAILED Handle = 0x80370108
+ HCS_E_CONNECTION_TIMEOUT Handle = 0x80370109
+ HCS_E_CONNECTION_CLOSED Handle = 0x8037010A
+ HCS_E_UNKNOWN_MESSAGE Handle = 0x8037010B
+ HCS_E_UNSUPPORTED_PROTOCOL_VERSION Handle = 0x8037010C
+ HCS_E_INVALID_JSON Handle = 0x8037010D
+ HCS_E_SYSTEM_NOT_FOUND Handle = 0x8037010E
+ HCS_E_SYSTEM_ALREADY_EXISTS Handle = 0x8037010F
+ HCS_E_SYSTEM_ALREADY_STOPPED Handle = 0x80370110
+ HCS_E_PROTOCOL_ERROR Handle = 0x80370111
+ HCS_E_INVALID_LAYER Handle = 0x80370112
+ HCS_E_WINDOWS_INSIDER_REQUIRED Handle = 0x80370113
+ HCS_E_SERVICE_NOT_AVAILABLE Handle = 0x80370114
+ HCS_E_OPERATION_NOT_STARTED Handle = 0x80370115
+ HCS_E_OPERATION_ALREADY_STARTED Handle = 0x80370116
+ HCS_E_OPERATION_PENDING Handle = 0x80370117
+ HCS_E_OPERATION_TIMEOUT Handle = 0x80370118
+ HCS_E_OPERATION_SYSTEM_CALLBACK_ALREADY_SET Handle = 0x80370119
+ HCS_E_OPERATION_RESULT_ALLOCATION_FAILED Handle = 0x8037011A
+ HCS_E_ACCESS_DENIED Handle = 0x8037011B
+ HCS_E_GUEST_CRITICAL_ERROR Handle = 0x8037011C
+ ERROR_VNET_VIRTUAL_SWITCH_NAME_NOT_FOUND syscall.Errno = 0xC0370200
+ ERROR_VID_REMOTE_NODE_PARENT_GPA_PAGES_USED syscall.Errno = 0x80370001
+ WHV_E_UNKNOWN_CAPABILITY Handle = 0x80370300
+ WHV_E_INSUFFICIENT_BUFFER Handle = 0x80370301
+ WHV_E_UNKNOWN_PROPERTY Handle = 0x80370302
+ WHV_E_UNSUPPORTED_HYPERVISOR_CONFIG Handle = 0x80370303
+ WHV_E_INVALID_PARTITION_CONFIG Handle = 0x80370304
+ WHV_E_GPA_RANGE_NOT_FOUND Handle = 0x80370305
+ WHV_E_VP_ALREADY_EXISTS Handle = 0x80370306
+ WHV_E_VP_DOES_NOT_EXIST Handle = 0x80370307
+ WHV_E_INVALID_VP_STATE Handle = 0x80370308
+ WHV_E_INVALID_VP_REGISTER_NAME Handle = 0x80370309
+ ERROR_VSMB_SAVED_STATE_FILE_NOT_FOUND syscall.Errno = 0xC0370400
+ ERROR_VSMB_SAVED_STATE_CORRUPT syscall.Errno = 0xC0370401
+ ERROR_VOLMGR_INCOMPLETE_REGENERATION syscall.Errno = 0x80380001
+ ERROR_VOLMGR_INCOMPLETE_DISK_MIGRATION syscall.Errno = 0x80380002
+ ERROR_VOLMGR_DATABASE_FULL syscall.Errno = 0xC0380001
+ ERROR_VOLMGR_DISK_CONFIGURATION_CORRUPTED syscall.Errno = 0xC0380002
+ ERROR_VOLMGR_DISK_CONFIGURATION_NOT_IN_SYNC syscall.Errno = 0xC0380003
+ ERROR_VOLMGR_PACK_CONFIG_UPDATE_FAILED syscall.Errno = 0xC0380004
+ ERROR_VOLMGR_DISK_CONTAINS_NON_SIMPLE_VOLUME syscall.Errno = 0xC0380005
+ ERROR_VOLMGR_DISK_DUPLICATE syscall.Errno = 0xC0380006
+ ERROR_VOLMGR_DISK_DYNAMIC syscall.Errno = 0xC0380007
+ ERROR_VOLMGR_DISK_ID_INVALID syscall.Errno = 0xC0380008
+ ERROR_VOLMGR_DISK_INVALID syscall.Errno = 0xC0380009
+ ERROR_VOLMGR_DISK_LAST_VOTER syscall.Errno = 0xC038000A
+ ERROR_VOLMGR_DISK_LAYOUT_INVALID syscall.Errno = 0xC038000B
+ ERROR_VOLMGR_DISK_LAYOUT_NON_BASIC_BETWEEN_BASIC_PARTITIONS syscall.Errno = 0xC038000C
+ ERROR_VOLMGR_DISK_LAYOUT_NOT_CYLINDER_ALIGNED syscall.Errno = 0xC038000D
+ ERROR_VOLMGR_DISK_LAYOUT_PARTITIONS_TOO_SMALL syscall.Errno = 0xC038000E
+ ERROR_VOLMGR_DISK_LAYOUT_PRIMARY_BETWEEN_LOGICAL_PARTITIONS syscall.Errno = 0xC038000F
+ ERROR_VOLMGR_DISK_LAYOUT_TOO_MANY_PARTITIONS syscall.Errno = 0xC0380010
+ ERROR_VOLMGR_DISK_MISSING syscall.Errno = 0xC0380011
+ ERROR_VOLMGR_DISK_NOT_EMPTY syscall.Errno = 0xC0380012
+ ERROR_VOLMGR_DISK_NOT_ENOUGH_SPACE syscall.Errno = 0xC0380013
+ ERROR_VOLMGR_DISK_REVECTORING_FAILED syscall.Errno = 0xC0380014
+ ERROR_VOLMGR_DISK_SECTOR_SIZE_INVALID syscall.Errno = 0xC0380015
+ ERROR_VOLMGR_DISK_SET_NOT_CONTAINED syscall.Errno = 0xC0380016
+ ERROR_VOLMGR_DISK_USED_BY_MULTIPLE_MEMBERS syscall.Errno = 0xC0380017
+ ERROR_VOLMGR_DISK_USED_BY_MULTIPLE_PLEXES syscall.Errno = 0xC0380018
+ ERROR_VOLMGR_DYNAMIC_DISK_NOT_SUPPORTED syscall.Errno = 0xC0380019
+ ERROR_VOLMGR_EXTENT_ALREADY_USED syscall.Errno = 0xC038001A
+ ERROR_VOLMGR_EXTENT_NOT_CONTIGUOUS syscall.Errno = 0xC038001B
+ ERROR_VOLMGR_EXTENT_NOT_IN_PUBLIC_REGION syscall.Errno = 0xC038001C
+ ERROR_VOLMGR_EXTENT_NOT_SECTOR_ALIGNED syscall.Errno = 0xC038001D
+ ERROR_VOLMGR_EXTENT_OVERLAPS_EBR_PARTITION syscall.Errno = 0xC038001E
+ ERROR_VOLMGR_EXTENT_VOLUME_LENGTHS_DO_NOT_MATCH syscall.Errno = 0xC038001F
+ ERROR_VOLMGR_FAULT_TOLERANT_NOT_SUPPORTED syscall.Errno = 0xC0380020
+ ERROR_VOLMGR_INTERLEAVE_LENGTH_INVALID syscall.Errno = 0xC0380021
+ ERROR_VOLMGR_MAXIMUM_REGISTERED_USERS syscall.Errno = 0xC0380022
+ ERROR_VOLMGR_MEMBER_IN_SYNC syscall.Errno = 0xC0380023
+ ERROR_VOLMGR_MEMBER_INDEX_DUPLICATE syscall.Errno = 0xC0380024
+ ERROR_VOLMGR_MEMBER_INDEX_INVALID syscall.Errno = 0xC0380025
+ ERROR_VOLMGR_MEMBER_MISSING syscall.Errno = 0xC0380026
+ ERROR_VOLMGR_MEMBER_NOT_DETACHED syscall.Errno = 0xC0380027
+ ERROR_VOLMGR_MEMBER_REGENERATING syscall.Errno = 0xC0380028
+ ERROR_VOLMGR_ALL_DISKS_FAILED syscall.Errno = 0xC0380029
+ ERROR_VOLMGR_NO_REGISTERED_USERS syscall.Errno = 0xC038002A
+ ERROR_VOLMGR_NO_SUCH_USER syscall.Errno = 0xC038002B
+ ERROR_VOLMGR_NOTIFICATION_RESET syscall.Errno = 0xC038002C
+ ERROR_VOLMGR_NUMBER_OF_MEMBERS_INVALID syscall.Errno = 0xC038002D
+ ERROR_VOLMGR_NUMBER_OF_PLEXES_INVALID syscall.Errno = 0xC038002E
+ ERROR_VOLMGR_PACK_DUPLICATE syscall.Errno = 0xC038002F
+ ERROR_VOLMGR_PACK_ID_INVALID syscall.Errno = 0xC0380030
+ ERROR_VOLMGR_PACK_INVALID syscall.Errno = 0xC0380031
+ ERROR_VOLMGR_PACK_NAME_INVALID syscall.Errno = 0xC0380032
+ ERROR_VOLMGR_PACK_OFFLINE syscall.Errno = 0xC0380033
+ ERROR_VOLMGR_PACK_HAS_QUORUM syscall.Errno = 0xC0380034
+ ERROR_VOLMGR_PACK_WITHOUT_QUORUM syscall.Errno = 0xC0380035
+ ERROR_VOLMGR_PARTITION_STYLE_INVALID syscall.Errno = 0xC0380036
+ ERROR_VOLMGR_PARTITION_UPDATE_FAILED syscall.Errno = 0xC0380037
+ ERROR_VOLMGR_PLEX_IN_SYNC syscall.Errno = 0xC0380038
+ ERROR_VOLMGR_PLEX_INDEX_DUPLICATE syscall.Errno = 0xC0380039
+ ERROR_VOLMGR_PLEX_INDEX_INVALID syscall.Errno = 0xC038003A
+ ERROR_VOLMGR_PLEX_LAST_ACTIVE syscall.Errno = 0xC038003B
+ ERROR_VOLMGR_PLEX_MISSING syscall.Errno = 0xC038003C
+ ERROR_VOLMGR_PLEX_REGENERATING syscall.Errno = 0xC038003D
+ ERROR_VOLMGR_PLEX_TYPE_INVALID syscall.Errno = 0xC038003E
+ ERROR_VOLMGR_PLEX_NOT_RAID5 syscall.Errno = 0xC038003F
+ ERROR_VOLMGR_PLEX_NOT_SIMPLE syscall.Errno = 0xC0380040
+ ERROR_VOLMGR_STRUCTURE_SIZE_INVALID syscall.Errno = 0xC0380041
+ ERROR_VOLMGR_TOO_MANY_NOTIFICATION_REQUESTS syscall.Errno = 0xC0380042
+ ERROR_VOLMGR_TRANSACTION_IN_PROGRESS syscall.Errno = 0xC0380043
+ ERROR_VOLMGR_UNEXPECTED_DISK_LAYOUT_CHANGE syscall.Errno = 0xC0380044
+ ERROR_VOLMGR_VOLUME_CONTAINS_MISSING_DISK syscall.Errno = 0xC0380045
+ ERROR_VOLMGR_VOLUME_ID_INVALID syscall.Errno = 0xC0380046
+ ERROR_VOLMGR_VOLUME_LENGTH_INVALID syscall.Errno = 0xC0380047
+ ERROR_VOLMGR_VOLUME_LENGTH_NOT_SECTOR_SIZE_MULTIPLE syscall.Errno = 0xC0380048
+ ERROR_VOLMGR_VOLUME_NOT_MIRRORED syscall.Errno = 0xC0380049
+ ERROR_VOLMGR_VOLUME_NOT_RETAINED syscall.Errno = 0xC038004A
+ ERROR_VOLMGR_VOLUME_OFFLINE syscall.Errno = 0xC038004B
+ ERROR_VOLMGR_VOLUME_RETAINED syscall.Errno = 0xC038004C
+ ERROR_VOLMGR_NUMBER_OF_EXTENTS_INVALID syscall.Errno = 0xC038004D
+ ERROR_VOLMGR_DIFFERENT_SECTOR_SIZE syscall.Errno = 0xC038004E
+ ERROR_VOLMGR_BAD_BOOT_DISK syscall.Errno = 0xC038004F
+ ERROR_VOLMGR_PACK_CONFIG_OFFLINE syscall.Errno = 0xC0380050
+ ERROR_VOLMGR_PACK_CONFIG_ONLINE syscall.Errno = 0xC0380051
+ ERROR_VOLMGR_NOT_PRIMARY_PACK syscall.Errno = 0xC0380052
+ ERROR_VOLMGR_PACK_LOG_UPDATE_FAILED syscall.Errno = 0xC0380053
+ ERROR_VOLMGR_NUMBER_OF_DISKS_IN_PLEX_INVALID syscall.Errno = 0xC0380054
+ ERROR_VOLMGR_NUMBER_OF_DISKS_IN_MEMBER_INVALID syscall.Errno = 0xC0380055
+ ERROR_VOLMGR_VOLUME_MIRRORED syscall.Errno = 0xC0380056
+ ERROR_VOLMGR_PLEX_NOT_SIMPLE_SPANNED syscall.Errno = 0xC0380057
+ ERROR_VOLMGR_NO_VALID_LOG_COPIES syscall.Errno = 0xC0380058
+ ERROR_VOLMGR_PRIMARY_PACK_PRESENT syscall.Errno = 0xC0380059
+ ERROR_VOLMGR_NUMBER_OF_DISKS_INVALID syscall.Errno = 0xC038005A
+ ERROR_VOLMGR_MIRROR_NOT_SUPPORTED syscall.Errno = 0xC038005B
+ ERROR_VOLMGR_RAID5_NOT_SUPPORTED syscall.Errno = 0xC038005C
+ ERROR_BCD_NOT_ALL_ENTRIES_IMPORTED syscall.Errno = 0x80390001
+ ERROR_BCD_TOO_MANY_ELEMENTS syscall.Errno = 0xC0390002
+ ERROR_BCD_NOT_ALL_ENTRIES_SYNCHRONIZED syscall.Errno = 0x80390003
+ ERROR_VHD_DRIVE_FOOTER_MISSING syscall.Errno = 0xC03A0001
+ ERROR_VHD_DRIVE_FOOTER_CHECKSUM_MISMATCH syscall.Errno = 0xC03A0002
+ ERROR_VHD_DRIVE_FOOTER_CORRUPT syscall.Errno = 0xC03A0003
+ ERROR_VHD_FORMAT_UNKNOWN syscall.Errno = 0xC03A0004
+ ERROR_VHD_FORMAT_UNSUPPORTED_VERSION syscall.Errno = 0xC03A0005
+ ERROR_VHD_SPARSE_HEADER_CHECKSUM_MISMATCH syscall.Errno = 0xC03A0006
+ ERROR_VHD_SPARSE_HEADER_UNSUPPORTED_VERSION syscall.Errno = 0xC03A0007
+ ERROR_VHD_SPARSE_HEADER_CORRUPT syscall.Errno = 0xC03A0008
+ ERROR_VHD_BLOCK_ALLOCATION_FAILURE syscall.Errno = 0xC03A0009
+ ERROR_VHD_BLOCK_ALLOCATION_TABLE_CORRUPT syscall.Errno = 0xC03A000A
+ ERROR_VHD_INVALID_BLOCK_SIZE syscall.Errno = 0xC03A000B
+ ERROR_VHD_BITMAP_MISMATCH syscall.Errno = 0xC03A000C
+ ERROR_VHD_PARENT_VHD_NOT_FOUND syscall.Errno = 0xC03A000D
+ ERROR_VHD_CHILD_PARENT_ID_MISMATCH syscall.Errno = 0xC03A000E
+ ERROR_VHD_CHILD_PARENT_TIMESTAMP_MISMATCH syscall.Errno = 0xC03A000F
+ ERROR_VHD_METADATA_READ_FAILURE syscall.Errno = 0xC03A0010
+ ERROR_VHD_METADATA_WRITE_FAILURE syscall.Errno = 0xC03A0011
+ ERROR_VHD_INVALID_SIZE syscall.Errno = 0xC03A0012
+ ERROR_VHD_INVALID_FILE_SIZE syscall.Errno = 0xC03A0013
+ ERROR_VIRTDISK_PROVIDER_NOT_FOUND syscall.Errno = 0xC03A0014
+ ERROR_VIRTDISK_NOT_VIRTUAL_DISK syscall.Errno = 0xC03A0015
+ ERROR_VHD_PARENT_VHD_ACCESS_DENIED syscall.Errno = 0xC03A0016
+ ERROR_VHD_CHILD_PARENT_SIZE_MISMATCH syscall.Errno = 0xC03A0017
+ ERROR_VHD_DIFFERENCING_CHAIN_CYCLE_DETECTED syscall.Errno = 0xC03A0018
+ ERROR_VHD_DIFFERENCING_CHAIN_ERROR_IN_PARENT syscall.Errno = 0xC03A0019
+ ERROR_VIRTUAL_DISK_LIMITATION syscall.Errno = 0xC03A001A
+ ERROR_VHD_INVALID_TYPE syscall.Errno = 0xC03A001B
+ ERROR_VHD_INVALID_STATE syscall.Errno = 0xC03A001C
+ ERROR_VIRTDISK_UNSUPPORTED_DISK_SECTOR_SIZE syscall.Errno = 0xC03A001D
+ ERROR_VIRTDISK_DISK_ALREADY_OWNED syscall.Errno = 0xC03A001E
+ ERROR_VIRTDISK_DISK_ONLINE_AND_WRITABLE syscall.Errno = 0xC03A001F
+ ERROR_CTLOG_TRACKING_NOT_INITIALIZED syscall.Errno = 0xC03A0020
+ ERROR_CTLOG_LOGFILE_SIZE_EXCEEDED_MAXSIZE syscall.Errno = 0xC03A0021
+ ERROR_CTLOG_VHD_CHANGED_OFFLINE syscall.Errno = 0xC03A0022
+ ERROR_CTLOG_INVALID_TRACKING_STATE syscall.Errno = 0xC03A0023
+ ERROR_CTLOG_INCONSISTENT_TRACKING_FILE syscall.Errno = 0xC03A0024
+ ERROR_VHD_RESIZE_WOULD_TRUNCATE_DATA syscall.Errno = 0xC03A0025
+ ERROR_VHD_COULD_NOT_COMPUTE_MINIMUM_VIRTUAL_SIZE syscall.Errno = 0xC03A0026
+ ERROR_VHD_ALREADY_AT_OR_BELOW_MINIMUM_VIRTUAL_SIZE syscall.Errno = 0xC03A0027
+ ERROR_VHD_METADATA_FULL syscall.Errno = 0xC03A0028
+ ERROR_VHD_INVALID_CHANGE_TRACKING_ID syscall.Errno = 0xC03A0029
+ ERROR_VHD_CHANGE_TRACKING_DISABLED syscall.Errno = 0xC03A002A
+ ERROR_VHD_MISSING_CHANGE_TRACKING_INFORMATION syscall.Errno = 0xC03A0030
+ ERROR_QUERY_STORAGE_ERROR syscall.Errno = 0x803A0001
+ HCN_E_NETWORK_NOT_FOUND Handle = 0x803B0001
+ HCN_E_ENDPOINT_NOT_FOUND Handle = 0x803B0002
+ HCN_E_LAYER_NOT_FOUND Handle = 0x803B0003
+ HCN_E_SWITCH_NOT_FOUND Handle = 0x803B0004
+ HCN_E_SUBNET_NOT_FOUND Handle = 0x803B0005
+ HCN_E_ADAPTER_NOT_FOUND Handle = 0x803B0006
+ HCN_E_PORT_NOT_FOUND Handle = 0x803B0007
+ HCN_E_POLICY_NOT_FOUND Handle = 0x803B0008
+ HCN_E_VFP_PORTSETTING_NOT_FOUND Handle = 0x803B0009
+ HCN_E_INVALID_NETWORK Handle = 0x803B000A
+ HCN_E_INVALID_NETWORK_TYPE Handle = 0x803B000B
+ HCN_E_INVALID_ENDPOINT Handle = 0x803B000C
+ HCN_E_INVALID_POLICY Handle = 0x803B000D
+ HCN_E_INVALID_POLICY_TYPE Handle = 0x803B000E
+ HCN_E_INVALID_REMOTE_ENDPOINT_OPERATION Handle = 0x803B000F
+ HCN_E_NETWORK_ALREADY_EXISTS Handle = 0x803B0010
+ HCN_E_LAYER_ALREADY_EXISTS Handle = 0x803B0011
+ HCN_E_POLICY_ALREADY_EXISTS Handle = 0x803B0012
+ HCN_E_PORT_ALREADY_EXISTS Handle = 0x803B0013
+ HCN_E_ENDPOINT_ALREADY_ATTACHED Handle = 0x803B0014
+ HCN_E_REQUEST_UNSUPPORTED Handle = 0x803B0015
+ HCN_E_MAPPING_NOT_SUPPORTED Handle = 0x803B0016
+ HCN_E_DEGRADED_OPERATION Handle = 0x803B0017
+ HCN_E_SHARED_SWITCH_MODIFICATION Handle = 0x803B0018
+ HCN_E_GUID_CONVERSION_FAILURE Handle = 0x803B0019
+ HCN_E_REGKEY_FAILURE Handle = 0x803B001A
+ HCN_E_INVALID_JSON Handle = 0x803B001B
+ HCN_E_INVALID_JSON_REFERENCE Handle = 0x803B001C
+ HCN_E_ENDPOINT_SHARING_DISABLED Handle = 0x803B001D
+ HCN_E_INVALID_IP Handle = 0x803B001E
+ HCN_E_SWITCH_EXTENSION_NOT_FOUND Handle = 0x803B001F
+ HCN_E_MANAGER_STOPPED Handle = 0x803B0020
+ GCN_E_MODULE_NOT_FOUND Handle = 0x803B0021
+ GCN_E_NO_REQUEST_HANDLERS Handle = 0x803B0022
+ GCN_E_REQUEST_UNSUPPORTED Handle = 0x803B0023
+ GCN_E_RUNTIMEKEYS_FAILED Handle = 0x803B0024
+ GCN_E_NETADAPTER_TIMEOUT Handle = 0x803B0025
+ GCN_E_NETADAPTER_NOT_FOUND Handle = 0x803B0026
+ GCN_E_NETCOMPARTMENT_NOT_FOUND Handle = 0x803B0027
+ GCN_E_NETINTERFACE_NOT_FOUND Handle = 0x803B0028
+ GCN_E_DEFAULTNAMESPACE_EXISTS Handle = 0x803B0029
+ SDIAG_E_CANCELLED syscall.Errno = 0x803C0100
+ SDIAG_E_SCRIPT syscall.Errno = 0x803C0101
+ SDIAG_E_POWERSHELL syscall.Errno = 0x803C0102
+ SDIAG_E_MANAGEDHOST syscall.Errno = 0x803C0103
+ SDIAG_E_NOVERIFIER syscall.Errno = 0x803C0104
+ SDIAG_S_CANNOTRUN syscall.Errno = 0x003C0105
+ SDIAG_E_DISABLED syscall.Errno = 0x803C0106
+ SDIAG_E_TRUST syscall.Errno = 0x803C0107
+ SDIAG_E_CANNOTRUN syscall.Errno = 0x803C0108
+ SDIAG_E_VERSION syscall.Errno = 0x803C0109
+ SDIAG_E_RESOURCE syscall.Errno = 0x803C010A
+ SDIAG_E_ROOTCAUSE syscall.Errno = 0x803C010B
+ WPN_E_CHANNEL_CLOSED Handle = 0x803E0100
+ WPN_E_CHANNEL_REQUEST_NOT_COMPLETE Handle = 0x803E0101
+ WPN_E_INVALID_APP Handle = 0x803E0102
+ WPN_E_OUTSTANDING_CHANNEL_REQUEST Handle = 0x803E0103
+ WPN_E_DUPLICATE_CHANNEL Handle = 0x803E0104
+ WPN_E_PLATFORM_UNAVAILABLE Handle = 0x803E0105
+ WPN_E_NOTIFICATION_POSTED Handle = 0x803E0106
+ WPN_E_NOTIFICATION_HIDDEN Handle = 0x803E0107
+ WPN_E_NOTIFICATION_NOT_POSTED Handle = 0x803E0108
+ WPN_E_CLOUD_DISABLED Handle = 0x803E0109
+ WPN_E_CLOUD_INCAPABLE Handle = 0x803E0110
+ WPN_E_CLOUD_AUTH_UNAVAILABLE Handle = 0x803E011A
+ WPN_E_CLOUD_SERVICE_UNAVAILABLE Handle = 0x803E011B
+ WPN_E_FAILED_LOCK_SCREEN_UPDATE_INTIALIZATION Handle = 0x803E011C
+ WPN_E_NOTIFICATION_DISABLED Handle = 0x803E0111
+ WPN_E_NOTIFICATION_INCAPABLE Handle = 0x803E0112
+ WPN_E_INTERNET_INCAPABLE Handle = 0x803E0113
+ WPN_E_NOTIFICATION_TYPE_DISABLED Handle = 0x803E0114
+ WPN_E_NOTIFICATION_SIZE Handle = 0x803E0115
+ WPN_E_TAG_SIZE Handle = 0x803E0116
+ WPN_E_ACCESS_DENIED Handle = 0x803E0117
+ WPN_E_DUPLICATE_REGISTRATION Handle = 0x803E0118
+ WPN_E_PUSH_NOTIFICATION_INCAPABLE Handle = 0x803E0119
+ WPN_E_DEV_ID_SIZE Handle = 0x803E0120
+ WPN_E_TAG_ALPHANUMERIC Handle = 0x803E012A
+ WPN_E_INVALID_HTTP_STATUS_CODE Handle = 0x803E012B
+ WPN_E_OUT_OF_SESSION Handle = 0x803E0200
+ WPN_E_POWER_SAVE Handle = 0x803E0201
+ WPN_E_IMAGE_NOT_FOUND_IN_CACHE Handle = 0x803E0202
+ WPN_E_ALL_URL_NOT_COMPLETED Handle = 0x803E0203
+ WPN_E_INVALID_CLOUD_IMAGE Handle = 0x803E0204
+ WPN_E_NOTIFICATION_ID_MATCHED Handle = 0x803E0205
+ WPN_E_CALLBACK_ALREADY_REGISTERED Handle = 0x803E0206
+ WPN_E_TOAST_NOTIFICATION_DROPPED Handle = 0x803E0207
+ WPN_E_STORAGE_LOCKED Handle = 0x803E0208
+ WPN_E_GROUP_SIZE Handle = 0x803E0209
+ WPN_E_GROUP_ALPHANUMERIC Handle = 0x803E020A
+ WPN_E_CLOUD_DISABLED_FOR_APP Handle = 0x803E020B
+ E_MBN_CONTEXT_NOT_ACTIVATED Handle = 0x80548201
+ E_MBN_BAD_SIM Handle = 0x80548202
+ E_MBN_DATA_CLASS_NOT_AVAILABLE Handle = 0x80548203
+ E_MBN_INVALID_ACCESS_STRING Handle = 0x80548204
+ E_MBN_MAX_ACTIVATED_CONTEXTS Handle = 0x80548205
+ E_MBN_PACKET_SVC_DETACHED Handle = 0x80548206
+ E_MBN_PROVIDER_NOT_VISIBLE Handle = 0x80548207
+ E_MBN_RADIO_POWER_OFF Handle = 0x80548208
+ E_MBN_SERVICE_NOT_ACTIVATED Handle = 0x80548209
+ E_MBN_SIM_NOT_INSERTED Handle = 0x8054820A
+ E_MBN_VOICE_CALL_IN_PROGRESS Handle = 0x8054820B
+ E_MBN_INVALID_CACHE Handle = 0x8054820C
+ E_MBN_NOT_REGISTERED Handle = 0x8054820D
+ E_MBN_PROVIDERS_NOT_FOUND Handle = 0x8054820E
+ E_MBN_PIN_NOT_SUPPORTED Handle = 0x8054820F
+ E_MBN_PIN_REQUIRED Handle = 0x80548210
+ E_MBN_PIN_DISABLED Handle = 0x80548211
+ E_MBN_FAILURE Handle = 0x80548212
+ E_MBN_INVALID_PROFILE Handle = 0x80548218
+ E_MBN_DEFAULT_PROFILE_EXIST Handle = 0x80548219
+ E_MBN_SMS_ENCODING_NOT_SUPPORTED Handle = 0x80548220
+ E_MBN_SMS_FILTER_NOT_SUPPORTED Handle = 0x80548221
+ E_MBN_SMS_INVALID_MEMORY_INDEX Handle = 0x80548222
+ E_MBN_SMS_LANG_NOT_SUPPORTED Handle = 0x80548223
+ E_MBN_SMS_MEMORY_FAILURE Handle = 0x80548224
+ E_MBN_SMS_NETWORK_TIMEOUT Handle = 0x80548225
+ E_MBN_SMS_UNKNOWN_SMSC_ADDRESS Handle = 0x80548226
+ E_MBN_SMS_FORMAT_NOT_SUPPORTED Handle = 0x80548227
+ E_MBN_SMS_OPERATION_NOT_ALLOWED Handle = 0x80548228
+ E_MBN_SMS_MEMORY_FULL Handle = 0x80548229
+ PEER_E_IPV6_NOT_INSTALLED Handle = 0x80630001
+ PEER_E_NOT_INITIALIZED Handle = 0x80630002
+ PEER_E_CANNOT_START_SERVICE Handle = 0x80630003
+ PEER_E_NOT_LICENSED Handle = 0x80630004
+ PEER_E_INVALID_GRAPH Handle = 0x80630010
+ PEER_E_DBNAME_CHANGED Handle = 0x80630011
+ PEER_E_DUPLICATE_GRAPH Handle = 0x80630012
+ PEER_E_GRAPH_NOT_READY Handle = 0x80630013
+ PEER_E_GRAPH_SHUTTING_DOWN Handle = 0x80630014
+ PEER_E_GRAPH_IN_USE Handle = 0x80630015
+ PEER_E_INVALID_DATABASE Handle = 0x80630016
+ PEER_E_TOO_MANY_ATTRIBUTES Handle = 0x80630017
+ PEER_E_CONNECTION_NOT_FOUND Handle = 0x80630103
+ PEER_E_CONNECT_SELF Handle = 0x80630106
+ PEER_E_ALREADY_LISTENING Handle = 0x80630107
+ PEER_E_NODE_NOT_FOUND Handle = 0x80630108
+ PEER_E_CONNECTION_FAILED Handle = 0x80630109
+ PEER_E_CONNECTION_NOT_AUTHENTICATED Handle = 0x8063010A
+ PEER_E_CONNECTION_REFUSED Handle = 0x8063010B
+ PEER_E_CLASSIFIER_TOO_LONG Handle = 0x80630201
+ PEER_E_TOO_MANY_IDENTITIES Handle = 0x80630202
+ PEER_E_NO_KEY_ACCESS Handle = 0x80630203
+ PEER_E_GROUPS_EXIST Handle = 0x80630204
+ PEER_E_RECORD_NOT_FOUND Handle = 0x80630301
+ PEER_E_DATABASE_ACCESSDENIED Handle = 0x80630302
+ PEER_E_DBINITIALIZATION_FAILED Handle = 0x80630303
+ PEER_E_MAX_RECORD_SIZE_EXCEEDED Handle = 0x80630304
+ PEER_E_DATABASE_ALREADY_PRESENT Handle = 0x80630305
+ PEER_E_DATABASE_NOT_PRESENT Handle = 0x80630306
+ PEER_E_IDENTITY_NOT_FOUND Handle = 0x80630401
+ PEER_E_EVENT_HANDLE_NOT_FOUND Handle = 0x80630501
+ PEER_E_INVALID_SEARCH Handle = 0x80630601
+ PEER_E_INVALID_ATTRIBUTES Handle = 0x80630602
+ PEER_E_INVITATION_NOT_TRUSTED Handle = 0x80630701
+ PEER_E_CHAIN_TOO_LONG Handle = 0x80630703
+ PEER_E_INVALID_TIME_PERIOD Handle = 0x80630705
+ PEER_E_CIRCULAR_CHAIN_DETECTED Handle = 0x80630706
+ PEER_E_CERT_STORE_CORRUPTED Handle = 0x80630801
+ PEER_E_NO_CLOUD Handle = 0x80631001
+ PEER_E_CLOUD_NAME_AMBIGUOUS Handle = 0x80631005
+ PEER_E_INVALID_RECORD Handle = 0x80632010
+ PEER_E_NOT_AUTHORIZED Handle = 0x80632020
+ PEER_E_PASSWORD_DOES_NOT_MEET_POLICY Handle = 0x80632021
+ PEER_E_DEFERRED_VALIDATION Handle = 0x80632030
+ PEER_E_INVALID_GROUP_PROPERTIES Handle = 0x80632040
+ PEER_E_INVALID_PEER_NAME Handle = 0x80632050
+ PEER_E_INVALID_CLASSIFIER Handle = 0x80632060
+ PEER_E_INVALID_FRIENDLY_NAME Handle = 0x80632070
+ PEER_E_INVALID_ROLE_PROPERTY Handle = 0x80632071
+ PEER_E_INVALID_CLASSIFIER_PROPERTY Handle = 0x80632072
+ PEER_E_INVALID_RECORD_EXPIRATION Handle = 0x80632080
+ PEER_E_INVALID_CREDENTIAL_INFO Handle = 0x80632081
+ PEER_E_INVALID_CREDENTIAL Handle = 0x80632082
+ PEER_E_INVALID_RECORD_SIZE Handle = 0x80632083
+ PEER_E_UNSUPPORTED_VERSION Handle = 0x80632090
+ PEER_E_GROUP_NOT_READY Handle = 0x80632091
+ PEER_E_GROUP_IN_USE Handle = 0x80632092
+ PEER_E_INVALID_GROUP Handle = 0x80632093
+ PEER_E_NO_MEMBERS_FOUND Handle = 0x80632094
+ PEER_E_NO_MEMBER_CONNECTIONS Handle = 0x80632095
+ PEER_E_UNABLE_TO_LISTEN Handle = 0x80632096
+ PEER_E_IDENTITY_DELETED Handle = 0x806320A0
+ PEER_E_SERVICE_NOT_AVAILABLE Handle = 0x806320A1
+ PEER_E_CONTACT_NOT_FOUND Handle = 0x80636001
+ PEER_S_GRAPH_DATA_CREATED Handle = 0x00630001
+ PEER_S_NO_EVENT_DATA Handle = 0x00630002
+ PEER_S_ALREADY_CONNECTED Handle = 0x00632000
+ PEER_S_SUBSCRIPTION_EXISTS Handle = 0x00636000
+ PEER_S_NO_CONNECTIVITY Handle = 0x00630005
+ PEER_S_ALREADY_A_MEMBER Handle = 0x00630006
+ PEER_E_CANNOT_CONVERT_PEER_NAME Handle = 0x80634001
+ PEER_E_INVALID_PEER_HOST_NAME Handle = 0x80634002
+ PEER_E_NO_MORE Handle = 0x80634003
+ PEER_E_PNRP_DUPLICATE_PEER_NAME Handle = 0x80634005
+ PEER_E_INVITE_CANCELLED Handle = 0x80637000
+ PEER_E_INVITE_RESPONSE_NOT_AVAILABLE Handle = 0x80637001
+ PEER_E_NOT_SIGNED_IN Handle = 0x80637003
+ PEER_E_PRIVACY_DECLINED Handle = 0x80637004
+ PEER_E_TIMEOUT Handle = 0x80637005
+ PEER_E_INVALID_ADDRESS Handle = 0x80637007
+ PEER_E_FW_EXCEPTION_DISABLED Handle = 0x80637008
+ PEER_E_FW_BLOCKED_BY_POLICY Handle = 0x80637009
+ PEER_E_FW_BLOCKED_BY_SHIELDS_UP Handle = 0x8063700A
+ PEER_E_FW_DECLINED Handle = 0x8063700B
+ UI_E_CREATE_FAILED Handle = 0x802A0001
+ UI_E_SHUTDOWN_CALLED Handle = 0x802A0002
+ UI_E_ILLEGAL_REENTRANCY Handle = 0x802A0003
+ UI_E_OBJECT_SEALED Handle = 0x802A0004
+ UI_E_VALUE_NOT_SET Handle = 0x802A0005
+ UI_E_VALUE_NOT_DETERMINED Handle = 0x802A0006
+ UI_E_INVALID_OUTPUT Handle = 0x802A0007
+ UI_E_BOOLEAN_EXPECTED Handle = 0x802A0008
+ UI_E_DIFFERENT_OWNER Handle = 0x802A0009
+ UI_E_AMBIGUOUS_MATCH Handle = 0x802A000A
+ UI_E_FP_OVERFLOW Handle = 0x802A000B
+ UI_E_WRONG_THREAD Handle = 0x802A000C
+ UI_E_STORYBOARD_ACTIVE Handle = 0x802A0101
+ UI_E_STORYBOARD_NOT_PLAYING Handle = 0x802A0102
+ UI_E_START_KEYFRAME_AFTER_END Handle = 0x802A0103
+ UI_E_END_KEYFRAME_NOT_DETERMINED Handle = 0x802A0104
+ UI_E_LOOPS_OVERLAP Handle = 0x802A0105
+ UI_E_TRANSITION_ALREADY_USED Handle = 0x802A0106
+ UI_E_TRANSITION_NOT_IN_STORYBOARD Handle = 0x802A0107
+ UI_E_TRANSITION_ECLIPSED Handle = 0x802A0108
+ UI_E_TIME_BEFORE_LAST_UPDATE Handle = 0x802A0109
+ UI_E_TIMER_CLIENT_ALREADY_CONNECTED Handle = 0x802A010A
+ UI_E_INVALID_DIMENSION Handle = 0x802A010B
+ UI_E_PRIMITIVE_OUT_OF_BOUNDS Handle = 0x802A010C
+ UI_E_WINDOW_CLOSED Handle = 0x802A0201
+ E_BLUETOOTH_ATT_INVALID_HANDLE Handle = 0x80650001
+ E_BLUETOOTH_ATT_READ_NOT_PERMITTED Handle = 0x80650002
+ E_BLUETOOTH_ATT_WRITE_NOT_PERMITTED Handle = 0x80650003
+ E_BLUETOOTH_ATT_INVALID_PDU Handle = 0x80650004
+ E_BLUETOOTH_ATT_INSUFFICIENT_AUTHENTICATION Handle = 0x80650005
+ E_BLUETOOTH_ATT_REQUEST_NOT_SUPPORTED Handle = 0x80650006
+ E_BLUETOOTH_ATT_INVALID_OFFSET Handle = 0x80650007
+ E_BLUETOOTH_ATT_INSUFFICIENT_AUTHORIZATION Handle = 0x80650008
+ E_BLUETOOTH_ATT_PREPARE_QUEUE_FULL Handle = 0x80650009
+ E_BLUETOOTH_ATT_ATTRIBUTE_NOT_FOUND Handle = 0x8065000A
+ E_BLUETOOTH_ATT_ATTRIBUTE_NOT_LONG Handle = 0x8065000B
+ E_BLUETOOTH_ATT_INSUFFICIENT_ENCRYPTION_KEY_SIZE Handle = 0x8065000C
+ E_BLUETOOTH_ATT_INVALID_ATTRIBUTE_VALUE_LENGTH Handle = 0x8065000D
+ E_BLUETOOTH_ATT_UNLIKELY Handle = 0x8065000E
+ E_BLUETOOTH_ATT_INSUFFICIENT_ENCRYPTION Handle = 0x8065000F
+ E_BLUETOOTH_ATT_UNSUPPORTED_GROUP_TYPE Handle = 0x80650010
+ E_BLUETOOTH_ATT_INSUFFICIENT_RESOURCES Handle = 0x80650011
+ E_BLUETOOTH_ATT_UNKNOWN_ERROR Handle = 0x80651000
+ E_AUDIO_ENGINE_NODE_NOT_FOUND Handle = 0x80660001
+ E_HDAUDIO_EMPTY_CONNECTION_LIST Handle = 0x80660002
+ E_HDAUDIO_CONNECTION_LIST_NOT_SUPPORTED Handle = 0x80660003
+ E_HDAUDIO_NO_LOGICAL_DEVICES_CREATED Handle = 0x80660004
+ E_HDAUDIO_NULL_LINKED_LIST_ENTRY Handle = 0x80660005
+ STATEREPOSITORY_E_CONCURRENCY_LOCKING_FAILURE Handle = 0x80670001
+ STATEREPOSITORY_E_STATEMENT_INPROGRESS Handle = 0x80670002
+ STATEREPOSITORY_E_CONFIGURATION_INVALID Handle = 0x80670003
+ STATEREPOSITORY_E_UNKNOWN_SCHEMA_VERSION Handle = 0x80670004
+ STATEREPOSITORY_ERROR_DICTIONARY_CORRUPTED Handle = 0x80670005
+ STATEREPOSITORY_E_BLOCKED Handle = 0x80670006
+ STATEREPOSITORY_E_BUSY_RETRY Handle = 0x80670007
+ STATEREPOSITORY_E_BUSY_RECOVERY_RETRY Handle = 0x80670008
+ STATEREPOSITORY_E_LOCKED_RETRY Handle = 0x80670009
+ STATEREPOSITORY_E_LOCKED_SHAREDCACHE_RETRY Handle = 0x8067000A
+ STATEREPOSITORY_E_TRANSACTION_REQUIRED Handle = 0x8067000B
+ STATEREPOSITORY_E_BUSY_TIMEOUT_EXCEEDED Handle = 0x8067000C
+ STATEREPOSITORY_E_BUSY_RECOVERY_TIMEOUT_EXCEEDED Handle = 0x8067000D
+ STATEREPOSITORY_E_LOCKED_TIMEOUT_EXCEEDED Handle = 0x8067000E
+ STATEREPOSITORY_E_LOCKED_SHAREDCACHE_TIMEOUT_EXCEEDED Handle = 0x8067000F
+ STATEREPOSITORY_E_SERVICE_STOP_IN_PROGRESS Handle = 0x80670010
+ STATEREPOSTORY_E_NESTED_TRANSACTION_NOT_SUPPORTED Handle = 0x80670011
+ STATEREPOSITORY_ERROR_CACHE_CORRUPTED Handle = 0x80670012
+ STATEREPOSITORY_TRANSACTION_CALLER_ID_CHANGED Handle = 0x00670013
+ STATEREPOSITORY_TRANSACTION_IN_PROGRESS Handle = 0x00670014
+ ERROR_SPACES_POOL_WAS_DELETED Handle = 0x00E70001
+ ERROR_SPACES_FAULT_DOMAIN_TYPE_INVALID Handle = 0x80E70001
+ ERROR_SPACES_INTERNAL_ERROR Handle = 0x80E70002
+ ERROR_SPACES_RESILIENCY_TYPE_INVALID Handle = 0x80E70003
+ ERROR_SPACES_DRIVE_SECTOR_SIZE_INVALID Handle = 0x80E70004
+ ERROR_SPACES_DRIVE_REDUNDANCY_INVALID Handle = 0x80E70006
+ ERROR_SPACES_NUMBER_OF_DATA_COPIES_INVALID Handle = 0x80E70007
+ ERROR_SPACES_PARITY_LAYOUT_INVALID Handle = 0x80E70008
+ ERROR_SPACES_INTERLEAVE_LENGTH_INVALID Handle = 0x80E70009
+ ERROR_SPACES_NUMBER_OF_COLUMNS_INVALID Handle = 0x80E7000A
+ ERROR_SPACES_NOT_ENOUGH_DRIVES Handle = 0x80E7000B
+ ERROR_SPACES_EXTENDED_ERROR Handle = 0x80E7000C
+ ERROR_SPACES_PROVISIONING_TYPE_INVALID Handle = 0x80E7000D
+ ERROR_SPACES_ALLOCATION_SIZE_INVALID Handle = 0x80E7000E
+ ERROR_SPACES_ENCLOSURE_AWARE_INVALID Handle = 0x80E7000F
+ ERROR_SPACES_WRITE_CACHE_SIZE_INVALID Handle = 0x80E70010
+ ERROR_SPACES_NUMBER_OF_GROUPS_INVALID Handle = 0x80E70011
+ ERROR_SPACES_DRIVE_OPERATIONAL_STATE_INVALID Handle = 0x80E70012
+ ERROR_SPACES_ENTRY_INCOMPLETE Handle = 0x80E70013
+ ERROR_SPACES_ENTRY_INVALID Handle = 0x80E70014
+ ERROR_VOLSNAP_BOOTFILE_NOT_VALID Handle = 0x80820001
+ ERROR_VOLSNAP_ACTIVATION_TIMEOUT Handle = 0x80820002
+ ERROR_TIERING_NOT_SUPPORTED_ON_VOLUME Handle = 0x80830001
+ ERROR_TIERING_VOLUME_DISMOUNT_IN_PROGRESS Handle = 0x80830002
+ ERROR_TIERING_STORAGE_TIER_NOT_FOUND Handle = 0x80830003
+ ERROR_TIERING_INVALID_FILE_ID Handle = 0x80830004
+ ERROR_TIERING_WRONG_CLUSTER_NODE Handle = 0x80830005
+ ERROR_TIERING_ALREADY_PROCESSING Handle = 0x80830006
+ ERROR_TIERING_CANNOT_PIN_OBJECT Handle = 0x80830007
+ ERROR_TIERING_FILE_IS_NOT_PINNED Handle = 0x80830008
+ ERROR_NOT_A_TIERED_VOLUME Handle = 0x80830009
+ ERROR_ATTRIBUTE_NOT_PRESENT Handle = 0x8083000A
+ ERROR_SECCORE_INVALID_COMMAND Handle = 0xC0E80000
+ ERROR_NO_APPLICABLE_APP_LICENSES_FOUND Handle = 0xC0EA0001
+ ERROR_CLIP_LICENSE_NOT_FOUND Handle = 0xC0EA0002
+ ERROR_CLIP_DEVICE_LICENSE_MISSING Handle = 0xC0EA0003
+ ERROR_CLIP_LICENSE_INVALID_SIGNATURE Handle = 0xC0EA0004
+ ERROR_CLIP_KEYHOLDER_LICENSE_MISSING_OR_INVALID Handle = 0xC0EA0005
+ ERROR_CLIP_LICENSE_EXPIRED Handle = 0xC0EA0006
+ ERROR_CLIP_LICENSE_SIGNED_BY_UNKNOWN_SOURCE Handle = 0xC0EA0007
+ ERROR_CLIP_LICENSE_NOT_SIGNED Handle = 0xC0EA0008
+ ERROR_CLIP_LICENSE_HARDWARE_ID_OUT_OF_TOLERANCE Handle = 0xC0EA0009
+ ERROR_CLIP_LICENSE_DEVICE_ID_MISMATCH Handle = 0xC0EA000A
+ DXGI_STATUS_OCCLUDED Handle = 0x087A0001
+ DXGI_STATUS_CLIPPED Handle = 0x087A0002
+ DXGI_STATUS_NO_REDIRECTION Handle = 0x087A0004
+ DXGI_STATUS_NO_DESKTOP_ACCESS Handle = 0x087A0005
+ DXGI_STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0x087A0006
+ DXGI_STATUS_MODE_CHANGED Handle = 0x087A0007
+ DXGI_STATUS_MODE_CHANGE_IN_PROGRESS Handle = 0x087A0008
+ DXGI_ERROR_INVALID_CALL Handle = 0x887A0001
+ DXGI_ERROR_NOT_FOUND Handle = 0x887A0002
+ DXGI_ERROR_MORE_DATA Handle = 0x887A0003
+ DXGI_ERROR_UNSUPPORTED Handle = 0x887A0004
+ DXGI_ERROR_DEVICE_REMOVED Handle = 0x887A0005
+ DXGI_ERROR_DEVICE_HUNG Handle = 0x887A0006
+ DXGI_ERROR_DEVICE_RESET Handle = 0x887A0007
+ DXGI_ERROR_WAS_STILL_DRAWING Handle = 0x887A000A
+ DXGI_ERROR_FRAME_STATISTICS_DISJOINT Handle = 0x887A000B
+ DXGI_ERROR_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0x887A000C
+ DXGI_ERROR_DRIVER_INTERNAL_ERROR Handle = 0x887A0020
+ DXGI_ERROR_NONEXCLUSIVE Handle = 0x887A0021
+ DXGI_ERROR_NOT_CURRENTLY_AVAILABLE Handle = 0x887A0022
+ DXGI_ERROR_REMOTE_CLIENT_DISCONNECTED Handle = 0x887A0023
+ DXGI_ERROR_REMOTE_OUTOFMEMORY Handle = 0x887A0024
+ DXGI_ERROR_ACCESS_LOST Handle = 0x887A0026
+ DXGI_ERROR_WAIT_TIMEOUT Handle = 0x887A0027
+ DXGI_ERROR_SESSION_DISCONNECTED Handle = 0x887A0028
+ DXGI_ERROR_RESTRICT_TO_OUTPUT_STALE Handle = 0x887A0029
+ DXGI_ERROR_CANNOT_PROTECT_CONTENT Handle = 0x887A002A
+ DXGI_ERROR_ACCESS_DENIED Handle = 0x887A002B
+ DXGI_ERROR_NAME_ALREADY_EXISTS Handle = 0x887A002C
+ DXGI_ERROR_SDK_COMPONENT_MISSING Handle = 0x887A002D
+ DXGI_ERROR_NOT_CURRENT Handle = 0x887A002E
+ DXGI_ERROR_HW_PROTECTION_OUTOFMEMORY Handle = 0x887A0030
+ DXGI_ERROR_DYNAMIC_CODE_POLICY_VIOLATION Handle = 0x887A0031
+ DXGI_ERROR_NON_COMPOSITED_UI Handle = 0x887A0032
+ DXGI_STATUS_UNOCCLUDED Handle = 0x087A0009
+ DXGI_STATUS_DDA_WAS_STILL_DRAWING Handle = 0x087A000A
+ DXGI_ERROR_MODE_CHANGE_IN_PROGRESS Handle = 0x887A0025
+ DXGI_STATUS_PRESENT_REQUIRED Handle = 0x087A002F
+ DXGI_ERROR_CACHE_CORRUPT Handle = 0x887A0033
+ DXGI_ERROR_CACHE_FULL Handle = 0x887A0034
+ DXGI_ERROR_CACHE_HASH_COLLISION Handle = 0x887A0035
+ DXGI_ERROR_ALREADY_EXISTS Handle = 0x887A0036
+ DXGI_DDI_ERR_WASSTILLDRAWING Handle = 0x887B0001
+ DXGI_DDI_ERR_UNSUPPORTED Handle = 0x887B0002
+ DXGI_DDI_ERR_NONEXCLUSIVE Handle = 0x887B0003
+ D3D10_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS Handle = 0x88790001
+ D3D10_ERROR_FILE_NOT_FOUND Handle = 0x88790002
+ D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS Handle = 0x887C0001
+ D3D11_ERROR_FILE_NOT_FOUND Handle = 0x887C0002
+ D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS Handle = 0x887C0003
+ D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD Handle = 0x887C0004
+ D3D12_ERROR_ADAPTER_NOT_FOUND Handle = 0x887E0001
+ D3D12_ERROR_DRIVER_VERSION_MISMATCH Handle = 0x887E0002
+ D2DERR_WRONG_STATE Handle = 0x88990001
+ D2DERR_NOT_INITIALIZED Handle = 0x88990002
+ D2DERR_UNSUPPORTED_OPERATION Handle = 0x88990003
+ D2DERR_SCANNER_FAILED Handle = 0x88990004
+ D2DERR_SCREEN_ACCESS_DENIED Handle = 0x88990005
+ D2DERR_DISPLAY_STATE_INVALID Handle = 0x88990006
+ D2DERR_ZERO_VECTOR Handle = 0x88990007
+ D2DERR_INTERNAL_ERROR Handle = 0x88990008
+ D2DERR_DISPLAY_FORMAT_NOT_SUPPORTED Handle = 0x88990009
+ D2DERR_INVALID_CALL Handle = 0x8899000A
+ D2DERR_NO_HARDWARE_DEVICE Handle = 0x8899000B
+ D2DERR_RECREATE_TARGET Handle = 0x8899000C
+ D2DERR_TOO_MANY_SHADER_ELEMENTS Handle = 0x8899000D
+ D2DERR_SHADER_COMPILE_FAILED Handle = 0x8899000E
+ D2DERR_MAX_TEXTURE_SIZE_EXCEEDED Handle = 0x8899000F
+ D2DERR_UNSUPPORTED_VERSION Handle = 0x88990010
+ D2DERR_BAD_NUMBER Handle = 0x88990011
+ D2DERR_WRONG_FACTORY Handle = 0x88990012
+ D2DERR_LAYER_ALREADY_IN_USE Handle = 0x88990013
+ D2DERR_POP_CALL_DID_NOT_MATCH_PUSH Handle = 0x88990014
+ D2DERR_WRONG_RESOURCE_DOMAIN Handle = 0x88990015
+ D2DERR_PUSH_POP_UNBALANCED Handle = 0x88990016
+ D2DERR_RENDER_TARGET_HAS_LAYER_OR_CLIPRECT Handle = 0x88990017
+ D2DERR_INCOMPATIBLE_BRUSH_TYPES Handle = 0x88990018
+ D2DERR_WIN32_ERROR Handle = 0x88990019
+ D2DERR_TARGET_NOT_GDI_COMPATIBLE Handle = 0x8899001A
+ D2DERR_TEXT_EFFECT_IS_WRONG_TYPE Handle = 0x8899001B
+ D2DERR_TEXT_RENDERER_NOT_RELEASED Handle = 0x8899001C
+ D2DERR_EXCEEDS_MAX_BITMAP_SIZE Handle = 0x8899001D
+ D2DERR_INVALID_GRAPH_CONFIGURATION Handle = 0x8899001E
+ D2DERR_INVALID_INTERNAL_GRAPH_CONFIGURATION Handle = 0x8899001F
+ D2DERR_CYCLIC_GRAPH Handle = 0x88990020
+ D2DERR_BITMAP_CANNOT_DRAW Handle = 0x88990021
+ D2DERR_OUTSTANDING_BITMAP_REFERENCES Handle = 0x88990022
+ D2DERR_ORIGINAL_TARGET_NOT_BOUND Handle = 0x88990023
+ D2DERR_INVALID_TARGET Handle = 0x88990024
+ D2DERR_BITMAP_BOUND_AS_TARGET Handle = 0x88990025
+ D2DERR_INSUFFICIENT_DEVICE_CAPABILITIES Handle = 0x88990026
+ D2DERR_INTERMEDIATE_TOO_LARGE Handle = 0x88990027
+ D2DERR_EFFECT_IS_NOT_REGISTERED Handle = 0x88990028
+ D2DERR_INVALID_PROPERTY Handle = 0x88990029
+ D2DERR_NO_SUBPROPERTIES Handle = 0x8899002A
+ D2DERR_PRINT_JOB_CLOSED Handle = 0x8899002B
+ D2DERR_PRINT_FORMAT_NOT_SUPPORTED Handle = 0x8899002C
+ D2DERR_TOO_MANY_TRANSFORM_INPUTS Handle = 0x8899002D
+ D2DERR_INVALID_GLYPH_IMAGE Handle = 0x8899002E
+ DWRITE_E_FILEFORMAT Handle = 0x88985000
+ DWRITE_E_UNEXPECTED Handle = 0x88985001
+ DWRITE_E_NOFONT Handle = 0x88985002
+ DWRITE_E_FILENOTFOUND Handle = 0x88985003
+ DWRITE_E_FILEACCESS Handle = 0x88985004
+ DWRITE_E_FONTCOLLECTIONOBSOLETE Handle = 0x88985005
+ DWRITE_E_ALREADYREGISTERED Handle = 0x88985006
+ DWRITE_E_CACHEFORMAT Handle = 0x88985007
+ DWRITE_E_CACHEVERSION Handle = 0x88985008
+ DWRITE_E_UNSUPPORTEDOPERATION Handle = 0x88985009
+ DWRITE_E_TEXTRENDERERINCOMPATIBLE Handle = 0x8898500A
+ DWRITE_E_FLOWDIRECTIONCONFLICTS Handle = 0x8898500B
+ DWRITE_E_NOCOLOR Handle = 0x8898500C
+ DWRITE_E_REMOTEFONT Handle = 0x8898500D
+ DWRITE_E_DOWNLOADCANCELLED Handle = 0x8898500E
+ DWRITE_E_DOWNLOADFAILED Handle = 0x8898500F
+ DWRITE_E_TOOMANYDOWNLOADS Handle = 0x88985010
+ WINCODEC_ERR_WRONGSTATE Handle = 0x88982F04
+ WINCODEC_ERR_VALUEOUTOFRANGE Handle = 0x88982F05
+ WINCODEC_ERR_UNKNOWNIMAGEFORMAT Handle = 0x88982F07
+ WINCODEC_ERR_UNSUPPORTEDVERSION Handle = 0x88982F0B
+ WINCODEC_ERR_NOTINITIALIZED Handle = 0x88982F0C
+ WINCODEC_ERR_ALREADYLOCKED Handle = 0x88982F0D
+ WINCODEC_ERR_PROPERTYNOTFOUND Handle = 0x88982F40
+ WINCODEC_ERR_PROPERTYNOTSUPPORTED Handle = 0x88982F41
+ WINCODEC_ERR_PROPERTYSIZE Handle = 0x88982F42
+ WINCODEC_ERR_CODECPRESENT Handle = 0x88982F43
+ WINCODEC_ERR_CODECNOTHUMBNAIL Handle = 0x88982F44
+ WINCODEC_ERR_PALETTEUNAVAILABLE Handle = 0x88982F45
+ WINCODEC_ERR_CODECTOOMANYSCANLINES Handle = 0x88982F46
+ WINCODEC_ERR_INTERNALERROR Handle = 0x88982F48
+ WINCODEC_ERR_SOURCERECTDOESNOTMATCHDIMENSIONS Handle = 0x88982F49
+ WINCODEC_ERR_COMPONENTNOTFOUND Handle = 0x88982F50
+ WINCODEC_ERR_IMAGESIZEOUTOFRANGE Handle = 0x88982F51
+ WINCODEC_ERR_TOOMUCHMETADATA Handle = 0x88982F52
+ WINCODEC_ERR_BADIMAGE Handle = 0x88982F60
+ WINCODEC_ERR_BADHEADER Handle = 0x88982F61
+ WINCODEC_ERR_FRAMEMISSING Handle = 0x88982F62
+ WINCODEC_ERR_BADMETADATAHEADER Handle = 0x88982F63
+ WINCODEC_ERR_BADSTREAMDATA Handle = 0x88982F70
+ WINCODEC_ERR_STREAMWRITE Handle = 0x88982F71
+ WINCODEC_ERR_STREAMREAD Handle = 0x88982F72
+ WINCODEC_ERR_STREAMNOTAVAILABLE Handle = 0x88982F73
+ WINCODEC_ERR_UNSUPPORTEDPIXELFORMAT Handle = 0x88982F80
+ WINCODEC_ERR_UNSUPPORTEDOPERATION Handle = 0x88982F81
+ WINCODEC_ERR_INVALIDREGISTRATION Handle = 0x88982F8A
+ WINCODEC_ERR_COMPONENTINITIALIZEFAILURE Handle = 0x88982F8B
+ WINCODEC_ERR_INSUFFICIENTBUFFER Handle = 0x88982F8C
+ WINCODEC_ERR_DUPLICATEMETADATAPRESENT Handle = 0x88982F8D
+ WINCODEC_ERR_PROPERTYUNEXPECTEDTYPE Handle = 0x88982F8E
+ WINCODEC_ERR_UNEXPECTEDSIZE Handle = 0x88982F8F
+ WINCODEC_ERR_INVALIDQUERYREQUEST Handle = 0x88982F90
+ WINCODEC_ERR_UNEXPECTEDMETADATATYPE Handle = 0x88982F91
+ WINCODEC_ERR_REQUESTONLYVALIDATMETADATAROOT Handle = 0x88982F92
+ WINCODEC_ERR_INVALIDQUERYCHARACTER Handle = 0x88982F93
+ WINCODEC_ERR_WIN32ERROR Handle = 0x88982F94
+ WINCODEC_ERR_INVALIDPROGRESSIVELEVEL Handle = 0x88982F95
+ WINCODEC_ERR_INVALIDJPEGSCANINDEX Handle = 0x88982F96
+ MILERR_OBJECTBUSY Handle = 0x88980001
+ MILERR_INSUFFICIENTBUFFER Handle = 0x88980002
+ MILERR_WIN32ERROR Handle = 0x88980003
+ MILERR_SCANNER_FAILED Handle = 0x88980004
+ MILERR_SCREENACCESSDENIED Handle = 0x88980005
+ MILERR_DISPLAYSTATEINVALID Handle = 0x88980006
+ MILERR_NONINVERTIBLEMATRIX Handle = 0x88980007
+ MILERR_ZEROVECTOR Handle = 0x88980008
+ MILERR_TERMINATED Handle = 0x88980009
+ MILERR_BADNUMBER Handle = 0x8898000A
+ MILERR_INTERNALERROR Handle = 0x88980080
+ MILERR_DISPLAYFORMATNOTSUPPORTED Handle = 0x88980084
+ MILERR_INVALIDCALL Handle = 0x88980085
+ MILERR_ALREADYLOCKED Handle = 0x88980086
+ MILERR_NOTLOCKED Handle = 0x88980087
+ MILERR_DEVICECANNOTRENDERTEXT Handle = 0x88980088
+ MILERR_GLYPHBITMAPMISSED Handle = 0x88980089
+ MILERR_MALFORMEDGLYPHCACHE Handle = 0x8898008A
+ MILERR_GENERIC_IGNORE Handle = 0x8898008B
+ MILERR_MALFORMED_GUIDELINE_DATA Handle = 0x8898008C
+ MILERR_NO_HARDWARE_DEVICE Handle = 0x8898008D
+ MILERR_NEED_RECREATE_AND_PRESENT Handle = 0x8898008E
+ MILERR_ALREADY_INITIALIZED Handle = 0x8898008F
+ MILERR_MISMATCHED_SIZE Handle = 0x88980090
+ MILERR_NO_REDIRECTION_SURFACE_AVAILABLE Handle = 0x88980091
+ MILERR_REMOTING_NOT_SUPPORTED Handle = 0x88980092
+ MILERR_QUEUED_PRESENT_NOT_SUPPORTED Handle = 0x88980093
+ MILERR_NOT_QUEUING_PRESENTS Handle = 0x88980094
+ MILERR_NO_REDIRECTION_SURFACE_RETRY_LATER Handle = 0x88980095
+ MILERR_TOOMANYSHADERELEMNTS Handle = 0x88980096
+ MILERR_MROW_READLOCK_FAILED Handle = 0x88980097
+ MILERR_MROW_UPDATE_FAILED Handle = 0x88980098
+ MILERR_SHADER_COMPILE_FAILED Handle = 0x88980099
+ MILERR_MAX_TEXTURE_SIZE_EXCEEDED Handle = 0x8898009A
+ MILERR_QPC_TIME_WENT_BACKWARD Handle = 0x8898009B
+ MILERR_DXGI_ENUMERATION_OUT_OF_SYNC Handle = 0x8898009D
+ MILERR_ADAPTER_NOT_FOUND Handle = 0x8898009E
+ MILERR_COLORSPACE_NOT_SUPPORTED Handle = 0x8898009F
+ MILERR_PREFILTER_NOT_SUPPORTED Handle = 0x889800A0
+ MILERR_DISPLAYID_ACCESS_DENIED Handle = 0x889800A1
+ UCEERR_INVALIDPACKETHEADER Handle = 0x88980400
+ UCEERR_UNKNOWNPACKET Handle = 0x88980401
+ UCEERR_ILLEGALPACKET Handle = 0x88980402
+ UCEERR_MALFORMEDPACKET Handle = 0x88980403
+ UCEERR_ILLEGALHANDLE Handle = 0x88980404
+ UCEERR_HANDLELOOKUPFAILED Handle = 0x88980405
+ UCEERR_RENDERTHREADFAILURE Handle = 0x88980406
+ UCEERR_CTXSTACKFRSTTARGETNULL Handle = 0x88980407
+ UCEERR_CONNECTIONIDLOOKUPFAILED Handle = 0x88980408
+ UCEERR_BLOCKSFULL Handle = 0x88980409
+ UCEERR_MEMORYFAILURE Handle = 0x8898040A
+ UCEERR_PACKETRECORDOUTOFRANGE Handle = 0x8898040B
+ UCEERR_ILLEGALRECORDTYPE Handle = 0x8898040C
+ UCEERR_OUTOFHANDLES Handle = 0x8898040D
+ UCEERR_UNCHANGABLE_UPDATE_ATTEMPTED Handle = 0x8898040E
+ UCEERR_NO_MULTIPLE_WORKER_THREADS Handle = 0x8898040F
+ UCEERR_REMOTINGNOTSUPPORTED Handle = 0x88980410
+ UCEERR_MISSINGENDCOMMAND Handle = 0x88980411
+ UCEERR_MISSINGBEGINCOMMAND Handle = 0x88980412
+ UCEERR_CHANNELSYNCTIMEDOUT Handle = 0x88980413
+ UCEERR_CHANNELSYNCABANDONED Handle = 0x88980414
+ UCEERR_UNSUPPORTEDTRANSPORTVERSION Handle = 0x88980415
+ UCEERR_TRANSPORTUNAVAILABLE Handle = 0x88980416
+ UCEERR_FEEDBACK_UNSUPPORTED Handle = 0x88980417
+ UCEERR_COMMANDTRANSPORTDENIED Handle = 0x88980418
+ UCEERR_GRAPHICSSTREAMUNAVAILABLE Handle = 0x88980419
+ UCEERR_GRAPHICSSTREAMALREADYOPEN Handle = 0x88980420
+ UCEERR_TRANSPORTDISCONNECTED Handle = 0x88980421
+ UCEERR_TRANSPORTOVERLOADED Handle = 0x88980422
+ UCEERR_PARTITION_ZOMBIED Handle = 0x88980423
+ MILAVERR_NOCLOCK Handle = 0x88980500
+ MILAVERR_NOMEDIATYPE Handle = 0x88980501
+ MILAVERR_NOVIDEOMIXER Handle = 0x88980502
+ MILAVERR_NOVIDEOPRESENTER Handle = 0x88980503
+ MILAVERR_NOREADYFRAMES Handle = 0x88980504
+ MILAVERR_MODULENOTLOADED Handle = 0x88980505
+ MILAVERR_WMPFACTORYNOTREGISTERED Handle = 0x88980506
+ MILAVERR_INVALIDWMPVERSION Handle = 0x88980507
+ MILAVERR_INSUFFICIENTVIDEORESOURCES Handle = 0x88980508
+ MILAVERR_VIDEOACCELERATIONNOTAVAILABLE Handle = 0x88980509
+ MILAVERR_REQUESTEDTEXTURETOOBIG Handle = 0x8898050A
+ MILAVERR_SEEKFAILED Handle = 0x8898050B
+ MILAVERR_UNEXPECTEDWMPFAILURE Handle = 0x8898050C
+ MILAVERR_MEDIAPLAYERCLOSED Handle = 0x8898050D
+ MILAVERR_UNKNOWNHARDWAREERROR Handle = 0x8898050E
+ MILEFFECTSERR_UNKNOWNPROPERTY Handle = 0x8898060E
+ MILEFFECTSERR_EFFECTNOTPARTOFGROUP Handle = 0x8898060F
+ MILEFFECTSERR_NOINPUTSOURCEATTACHED Handle = 0x88980610
+ MILEFFECTSERR_CONNECTORNOTCONNECTED Handle = 0x88980611
+ MILEFFECTSERR_CONNECTORNOTASSOCIATEDWITHEFFECT Handle = 0x88980612
+ MILEFFECTSERR_RESERVED Handle = 0x88980613
+ MILEFFECTSERR_CYCLEDETECTED Handle = 0x88980614
+ MILEFFECTSERR_EFFECTINMORETHANONEGRAPH Handle = 0x88980615
+ MILEFFECTSERR_EFFECTALREADYINAGRAPH Handle = 0x88980616
+ MILEFFECTSERR_EFFECTHASNOCHILDREN Handle = 0x88980617
+ MILEFFECTSERR_ALREADYATTACHEDTOLISTENER Handle = 0x88980618
+ MILEFFECTSERR_NOTAFFINETRANSFORM Handle = 0x88980619
+ MILEFFECTSERR_EMPTYBOUNDS Handle = 0x8898061A
+ MILEFFECTSERR_OUTPUTSIZETOOLARGE Handle = 0x8898061B
+ DWMERR_STATE_TRANSITION_FAILED Handle = 0x88980700
+ DWMERR_THEME_FAILED Handle = 0x88980701
+ DWMERR_CATASTROPHIC_FAILURE Handle = 0x88980702
+ DCOMPOSITION_ERROR_WINDOW_ALREADY_COMPOSED Handle = 0x88980800
+ DCOMPOSITION_ERROR_SURFACE_BEING_RENDERED Handle = 0x88980801
+ DCOMPOSITION_ERROR_SURFACE_NOT_BEING_RENDERED Handle = 0x88980802
+ ONL_E_INVALID_AUTHENTICATION_TARGET Handle = 0x80860001
+ ONL_E_ACCESS_DENIED_BY_TOU Handle = 0x80860002
+ ONL_E_INVALID_APPLICATION Handle = 0x80860003
+ ONL_E_PASSWORD_UPDATE_REQUIRED Handle = 0x80860004
+ ONL_E_ACCOUNT_UPDATE_REQUIRED Handle = 0x80860005
+ ONL_E_FORCESIGNIN Handle = 0x80860006
+ ONL_E_ACCOUNT_LOCKED Handle = 0x80860007
+ ONL_E_PARENTAL_CONSENT_REQUIRED Handle = 0x80860008
+ ONL_E_EMAIL_VERIFICATION_REQUIRED Handle = 0x80860009
+ ONL_E_ACCOUNT_SUSPENDED_COMPROIMISE Handle = 0x8086000A
+ ONL_E_ACCOUNT_SUSPENDED_ABUSE Handle = 0x8086000B
+ ONL_E_ACTION_REQUIRED Handle = 0x8086000C
+ ONL_CONNECTION_COUNT_LIMIT Handle = 0x8086000D
+ ONL_E_CONNECTED_ACCOUNT_CAN_NOT_SIGNOUT Handle = 0x8086000E
+ ONL_E_USER_AUTHENTICATION_REQUIRED Handle = 0x8086000F
+ ONL_E_REQUEST_THROTTLED Handle = 0x80860010
+ FA_E_MAX_PERSISTED_ITEMS_REACHED Handle = 0x80270220
+ FA_E_HOMEGROUP_NOT_AVAILABLE Handle = 0x80270222
+ E_MONITOR_RESOLUTION_TOO_LOW Handle = 0x80270250
+ E_ELEVATED_ACTIVATION_NOT_SUPPORTED Handle = 0x80270251
+ E_UAC_DISABLED Handle = 0x80270252
+ E_FULL_ADMIN_NOT_SUPPORTED Handle = 0x80270253
+ E_APPLICATION_NOT_REGISTERED Handle = 0x80270254
+ E_MULTIPLE_EXTENSIONS_FOR_APPLICATION Handle = 0x80270255
+ E_MULTIPLE_PACKAGES_FOR_FAMILY Handle = 0x80270256
+ E_APPLICATION_MANAGER_NOT_RUNNING Handle = 0x80270257
+ S_STORE_LAUNCHED_FOR_REMEDIATION Handle = 0x00270258
+ S_APPLICATION_ACTIVATION_ERROR_HANDLED_BY_DIALOG Handle = 0x00270259
+ E_APPLICATION_ACTIVATION_TIMED_OUT Handle = 0x8027025A
+ E_APPLICATION_ACTIVATION_EXEC_FAILURE Handle = 0x8027025B
+ E_APPLICATION_TEMPORARY_LICENSE_ERROR Handle = 0x8027025C
+ E_APPLICATION_TRIAL_LICENSE_EXPIRED Handle = 0x8027025D
+ E_SKYDRIVE_ROOT_TARGET_FILE_SYSTEM_NOT_SUPPORTED Handle = 0x80270260
+ E_SKYDRIVE_ROOT_TARGET_OVERLAP Handle = 0x80270261
+ E_SKYDRIVE_ROOT_TARGET_CANNOT_INDEX Handle = 0x80270262
+ E_SKYDRIVE_FILE_NOT_UPLOADED Handle = 0x80270263
+ E_SKYDRIVE_UPDATE_AVAILABILITY_FAIL Handle = 0x80270264
+ E_SKYDRIVE_ROOT_TARGET_VOLUME_ROOT_NOT_SUPPORTED Handle = 0x80270265
+ E_SYNCENGINE_FILE_SIZE_OVER_LIMIT Handle = 0x8802B001
+ E_SYNCENGINE_FILE_SIZE_EXCEEDS_REMAINING_QUOTA Handle = 0x8802B002
+ E_SYNCENGINE_UNSUPPORTED_FILE_NAME Handle = 0x8802B003
+ E_SYNCENGINE_FOLDER_ITEM_COUNT_LIMIT_EXCEEDED Handle = 0x8802B004
+ E_SYNCENGINE_FILE_SYNC_PARTNER_ERROR Handle = 0x8802B005
+ E_SYNCENGINE_SYNC_PAUSED_BY_SERVICE Handle = 0x8802B006
+ E_SYNCENGINE_FILE_IDENTIFIER_UNKNOWN Handle = 0x8802C002
+ E_SYNCENGINE_SERVICE_AUTHENTICATION_FAILED Handle = 0x8802C003
+ E_SYNCENGINE_UNKNOWN_SERVICE_ERROR Handle = 0x8802C004
+ E_SYNCENGINE_SERVICE_RETURNED_UNEXPECTED_SIZE Handle = 0x8802C005
+ E_SYNCENGINE_REQUEST_BLOCKED_BY_SERVICE Handle = 0x8802C006
+ E_SYNCENGINE_REQUEST_BLOCKED_DUE_TO_CLIENT_ERROR Handle = 0x8802C007
+ E_SYNCENGINE_FOLDER_INACCESSIBLE Handle = 0x8802D001
+ E_SYNCENGINE_UNSUPPORTED_FOLDER_NAME Handle = 0x8802D002
+ E_SYNCENGINE_UNSUPPORTED_MARKET Handle = 0x8802D003
+ E_SYNCENGINE_PATH_LENGTH_LIMIT_EXCEEDED Handle = 0x8802D004
+ E_SYNCENGINE_REMOTE_PATH_LENGTH_LIMIT_EXCEEDED Handle = 0x8802D005
+ E_SYNCENGINE_CLIENT_UPDATE_NEEDED Handle = 0x8802D006
+ E_SYNCENGINE_PROXY_AUTHENTICATION_REQUIRED Handle = 0x8802D007
+ E_SYNCENGINE_STORAGE_SERVICE_PROVISIONING_FAILED Handle = 0x8802D008
+ E_SYNCENGINE_UNSUPPORTED_REPARSE_POINT Handle = 0x8802D009
+ E_SYNCENGINE_STORAGE_SERVICE_BLOCKED Handle = 0x8802D00A
+ E_SYNCENGINE_FOLDER_IN_REDIRECTION Handle = 0x8802D00B
+ EAS_E_POLICY_NOT_MANAGED_BY_OS Handle = 0x80550001
+ EAS_E_POLICY_COMPLIANT_WITH_ACTIONS Handle = 0x80550002
+ EAS_E_REQUESTED_POLICY_NOT_ENFORCEABLE Handle = 0x80550003
+ EAS_E_CURRENT_USER_HAS_BLANK_PASSWORD Handle = 0x80550004
+ EAS_E_REQUESTED_POLICY_PASSWORD_EXPIRATION_INCOMPATIBLE Handle = 0x80550005
+ EAS_E_USER_CANNOT_CHANGE_PASSWORD Handle = 0x80550006
+ EAS_E_ADMINS_HAVE_BLANK_PASSWORD Handle = 0x80550007
+ EAS_E_ADMINS_CANNOT_CHANGE_PASSWORD Handle = 0x80550008
+ EAS_E_LOCAL_CONTROLLED_USERS_CANNOT_CHANGE_PASSWORD Handle = 0x80550009
+ EAS_E_PASSWORD_POLICY_NOT_ENFORCEABLE_FOR_CONNECTED_ADMINS Handle = 0x8055000A
+ EAS_E_CONNECTED_ADMINS_NEED_TO_CHANGE_PASSWORD Handle = 0x8055000B
+ EAS_E_PASSWORD_POLICY_NOT_ENFORCEABLE_FOR_CURRENT_CONNECTED_USER Handle = 0x8055000C
+ EAS_E_CURRENT_CONNECTED_USER_NEED_TO_CHANGE_PASSWORD Handle = 0x8055000D
+ WEB_E_UNSUPPORTED_FORMAT Handle = 0x83750001
+ WEB_E_INVALID_XML Handle = 0x83750002
+ WEB_E_MISSING_REQUIRED_ELEMENT Handle = 0x83750003
+ WEB_E_MISSING_REQUIRED_ATTRIBUTE Handle = 0x83750004
+ WEB_E_UNEXPECTED_CONTENT Handle = 0x83750005
+ WEB_E_RESOURCE_TOO_LARGE Handle = 0x83750006
+ WEB_E_INVALID_JSON_STRING Handle = 0x83750007
+ WEB_E_INVALID_JSON_NUMBER Handle = 0x83750008
+ WEB_E_JSON_VALUE_NOT_FOUND Handle = 0x83750009
+ HTTP_E_STATUS_UNEXPECTED Handle = 0x80190001
+ HTTP_E_STATUS_UNEXPECTED_REDIRECTION Handle = 0x80190003
+ HTTP_E_STATUS_UNEXPECTED_CLIENT_ERROR Handle = 0x80190004
+ HTTP_E_STATUS_UNEXPECTED_SERVER_ERROR Handle = 0x80190005
+ HTTP_E_STATUS_AMBIGUOUS Handle = 0x8019012C
+ HTTP_E_STATUS_MOVED Handle = 0x8019012D
+ HTTP_E_STATUS_REDIRECT Handle = 0x8019012E
+ HTTP_E_STATUS_REDIRECT_METHOD Handle = 0x8019012F
+ HTTP_E_STATUS_NOT_MODIFIED Handle = 0x80190130
+ HTTP_E_STATUS_USE_PROXY Handle = 0x80190131
+ HTTP_E_STATUS_REDIRECT_KEEP_VERB Handle = 0x80190133
+ HTTP_E_STATUS_BAD_REQUEST Handle = 0x80190190
+ HTTP_E_STATUS_DENIED Handle = 0x80190191
+ HTTP_E_STATUS_PAYMENT_REQ Handle = 0x80190192
+ HTTP_E_STATUS_FORBIDDEN Handle = 0x80190193
+ HTTP_E_STATUS_NOT_FOUND Handle = 0x80190194
+ HTTP_E_STATUS_BAD_METHOD Handle = 0x80190195
+ HTTP_E_STATUS_NONE_ACCEPTABLE Handle = 0x80190196
+ HTTP_E_STATUS_PROXY_AUTH_REQ Handle = 0x80190197
+ HTTP_E_STATUS_REQUEST_TIMEOUT Handle = 0x80190198
+ HTTP_E_STATUS_CONFLICT Handle = 0x80190199
+ HTTP_E_STATUS_GONE Handle = 0x8019019A
+ HTTP_E_STATUS_LENGTH_REQUIRED Handle = 0x8019019B
+ HTTP_E_STATUS_PRECOND_FAILED Handle = 0x8019019C
+ HTTP_E_STATUS_REQUEST_TOO_LARGE Handle = 0x8019019D
+ HTTP_E_STATUS_URI_TOO_LONG Handle = 0x8019019E
+ HTTP_E_STATUS_UNSUPPORTED_MEDIA Handle = 0x8019019F
+ HTTP_E_STATUS_RANGE_NOT_SATISFIABLE Handle = 0x801901A0
+ HTTP_E_STATUS_EXPECTATION_FAILED Handle = 0x801901A1
+ HTTP_E_STATUS_SERVER_ERROR Handle = 0x801901F4
+ HTTP_E_STATUS_NOT_SUPPORTED Handle = 0x801901F5
+ HTTP_E_STATUS_BAD_GATEWAY Handle = 0x801901F6
+ HTTP_E_STATUS_SERVICE_UNAVAIL Handle = 0x801901F7
+ HTTP_E_STATUS_GATEWAY_TIMEOUT Handle = 0x801901F8
+ HTTP_E_STATUS_VERSION_NOT_SUP Handle = 0x801901F9
+ E_INVALID_PROTOCOL_OPERATION Handle = 0x83760001
+ E_INVALID_PROTOCOL_FORMAT Handle = 0x83760002
+ E_PROTOCOL_EXTENSIONS_NOT_SUPPORTED Handle = 0x83760003
+ E_SUBPROTOCOL_NOT_SUPPORTED Handle = 0x83760004
+ E_PROTOCOL_VERSION_NOT_SUPPORTED Handle = 0x83760005
+ INPUT_E_OUT_OF_ORDER Handle = 0x80400000
+ INPUT_E_REENTRANCY Handle = 0x80400001
+ INPUT_E_MULTIMODAL Handle = 0x80400002
+ INPUT_E_PACKET Handle = 0x80400003
+ INPUT_E_FRAME Handle = 0x80400004
+ INPUT_E_HISTORY Handle = 0x80400005
+ INPUT_E_DEVICE_INFO Handle = 0x80400006
+ INPUT_E_TRANSFORM Handle = 0x80400007
+ INPUT_E_DEVICE_PROPERTY Handle = 0x80400008
+ INET_E_INVALID_URL Handle = 0x800C0002
+ INET_E_NO_SESSION Handle = 0x800C0003
+ INET_E_CANNOT_CONNECT Handle = 0x800C0004
+ INET_E_RESOURCE_NOT_FOUND Handle = 0x800C0005
+ INET_E_OBJECT_NOT_FOUND Handle = 0x800C0006
+ INET_E_DATA_NOT_AVAILABLE Handle = 0x800C0007
+ INET_E_DOWNLOAD_FAILURE Handle = 0x800C0008
+ INET_E_AUTHENTICATION_REQUIRED Handle = 0x800C0009
+ INET_E_NO_VALID_MEDIA Handle = 0x800C000A
+ INET_E_CONNECTION_TIMEOUT Handle = 0x800C000B
+ INET_E_INVALID_REQUEST Handle = 0x800C000C
+ INET_E_UNKNOWN_PROTOCOL Handle = 0x800C000D
+ INET_E_SECURITY_PROBLEM Handle = 0x800C000E
+ INET_E_CANNOT_LOAD_DATA Handle = 0x800C000F
+ INET_E_CANNOT_INSTANTIATE_OBJECT Handle = 0x800C0010
+ INET_E_INVALID_CERTIFICATE Handle = 0x800C0019
+ INET_E_REDIRECT_FAILED Handle = 0x800C0014
+ INET_E_REDIRECT_TO_DIR Handle = 0x800C0015
+ ERROR_DBG_CREATE_PROCESS_FAILURE_LOCKDOWN Handle = 0x80B00001
+ ERROR_DBG_ATTACH_PROCESS_FAILURE_LOCKDOWN Handle = 0x80B00002
+ ERROR_DBG_CONNECT_SERVER_FAILURE_LOCKDOWN Handle = 0x80B00003
+ ERROR_DBG_START_SERVER_FAILURE_LOCKDOWN Handle = 0x80B00004
+ ERROR_IO_PREEMPTED Handle = 0x89010001
+ JSCRIPT_E_CANTEXECUTE Handle = 0x89020001
+ WEP_E_NOT_PROVISIONED_ON_ALL_VOLUMES Handle = 0x88010001
+ WEP_E_FIXED_DATA_NOT_SUPPORTED Handle = 0x88010002
+ WEP_E_HARDWARE_NOT_COMPLIANT Handle = 0x88010003
+ WEP_E_LOCK_NOT_CONFIGURED Handle = 0x88010004
+ WEP_E_PROTECTION_SUSPENDED Handle = 0x88010005
+ WEP_E_NO_LICENSE Handle = 0x88010006
+ WEP_E_OS_NOT_PROTECTED Handle = 0x88010007
+ WEP_E_UNEXPECTED_FAIL Handle = 0x88010008
+ WEP_E_BUFFER_TOO_LARGE Handle = 0x88010009
+ ERROR_SVHDX_ERROR_STORED Handle = 0xC05C0000
+ ERROR_SVHDX_ERROR_NOT_AVAILABLE Handle = 0xC05CFF00
+ ERROR_SVHDX_UNIT_ATTENTION_AVAILABLE Handle = 0xC05CFF01
+ ERROR_SVHDX_UNIT_ATTENTION_CAPACITY_DATA_CHANGED Handle = 0xC05CFF02
+ ERROR_SVHDX_UNIT_ATTENTION_RESERVATIONS_PREEMPTED Handle = 0xC05CFF03
+ ERROR_SVHDX_UNIT_ATTENTION_RESERVATIONS_RELEASED Handle = 0xC05CFF04
+ ERROR_SVHDX_UNIT_ATTENTION_REGISTRATIONS_PREEMPTED Handle = 0xC05CFF05
+ ERROR_SVHDX_UNIT_ATTENTION_OPERATING_DEFINITION_CHANGED Handle = 0xC05CFF06
+ ERROR_SVHDX_RESERVATION_CONFLICT Handle = 0xC05CFF07
+ ERROR_SVHDX_WRONG_FILE_TYPE Handle = 0xC05CFF08
+ ERROR_SVHDX_VERSION_MISMATCH Handle = 0xC05CFF09
+ ERROR_VHD_SHARED Handle = 0xC05CFF0A
+ ERROR_SVHDX_NO_INITIATOR Handle = 0xC05CFF0B
+ ERROR_VHDSET_BACKING_STORAGE_NOT_FOUND Handle = 0xC05CFF0C
+ ERROR_SMB_NO_PREAUTH_INTEGRITY_HASH_OVERLAP Handle = 0xC05D0000
+ ERROR_SMB_BAD_CLUSTER_DIALECT Handle = 0xC05D0001
+ WININET_E_OUT_OF_HANDLES Handle = 0x80072EE1
+ WININET_E_TIMEOUT Handle = 0x80072EE2
+ WININET_E_EXTENDED_ERROR Handle = 0x80072EE3
+ WININET_E_INTERNAL_ERROR Handle = 0x80072EE4
+ WININET_E_INVALID_URL Handle = 0x80072EE5
+ WININET_E_UNRECOGNIZED_SCHEME Handle = 0x80072EE6
+ WININET_E_NAME_NOT_RESOLVED Handle = 0x80072EE7
+ WININET_E_PROTOCOL_NOT_FOUND Handle = 0x80072EE8
+ WININET_E_INVALID_OPTION Handle = 0x80072EE9
+ WININET_E_BAD_OPTION_LENGTH Handle = 0x80072EEA
+ WININET_E_OPTION_NOT_SETTABLE Handle = 0x80072EEB
+ WININET_E_SHUTDOWN Handle = 0x80072EEC
+ WININET_E_INCORRECT_USER_NAME Handle = 0x80072EED
+ WININET_E_INCORRECT_PASSWORD Handle = 0x80072EEE
+ WININET_E_LOGIN_FAILURE Handle = 0x80072EEF
+ WININET_E_INVALID_OPERATION Handle = 0x80072EF0
+ WININET_E_OPERATION_CANCELLED Handle = 0x80072EF1
+ WININET_E_INCORRECT_HANDLE_TYPE Handle = 0x80072EF2
+ WININET_E_INCORRECT_HANDLE_STATE Handle = 0x80072EF3
+ WININET_E_NOT_PROXY_REQUEST Handle = 0x80072EF4
+ WININET_E_REGISTRY_VALUE_NOT_FOUND Handle = 0x80072EF5
+ WININET_E_BAD_REGISTRY_PARAMETER Handle = 0x80072EF6
+ WININET_E_NO_DIRECT_ACCESS Handle = 0x80072EF7
+ WININET_E_NO_CONTEXT Handle = 0x80072EF8
+ WININET_E_NO_CALLBACK Handle = 0x80072EF9
+ WININET_E_REQUEST_PENDING Handle = 0x80072EFA
+ WININET_E_INCORRECT_FORMAT Handle = 0x80072EFB
+ WININET_E_ITEM_NOT_FOUND Handle = 0x80072EFC
+ WININET_E_CANNOT_CONNECT Handle = 0x80072EFD
+ WININET_E_CONNECTION_ABORTED Handle = 0x80072EFE
+ WININET_E_CONNECTION_RESET Handle = 0x80072EFF
+ WININET_E_FORCE_RETRY Handle = 0x80072F00
+ WININET_E_INVALID_PROXY_REQUEST Handle = 0x80072F01
+ WININET_E_NEED_UI Handle = 0x80072F02
+ WININET_E_HANDLE_EXISTS Handle = 0x80072F04
+ WININET_E_SEC_CERT_DATE_INVALID Handle = 0x80072F05
+ WININET_E_SEC_CERT_CN_INVALID Handle = 0x80072F06
+ WININET_E_HTTP_TO_HTTPS_ON_REDIR Handle = 0x80072F07
+ WININET_E_HTTPS_TO_HTTP_ON_REDIR Handle = 0x80072F08
+ WININET_E_MIXED_SECURITY Handle = 0x80072F09
+ WININET_E_CHG_POST_IS_NON_SECURE Handle = 0x80072F0A
+ WININET_E_POST_IS_NON_SECURE Handle = 0x80072F0B
+ WININET_E_CLIENT_AUTH_CERT_NEEDED Handle = 0x80072F0C
+ WININET_E_INVALID_CA Handle = 0x80072F0D
+ WININET_E_CLIENT_AUTH_NOT_SETUP Handle = 0x80072F0E
+ WININET_E_ASYNC_THREAD_FAILED Handle = 0x80072F0F
+ WININET_E_REDIRECT_SCHEME_CHANGE Handle = 0x80072F10
+ WININET_E_DIALOG_PENDING Handle = 0x80072F11
+ WININET_E_RETRY_DIALOG Handle = 0x80072F12
+ WININET_E_NO_NEW_CONTAINERS Handle = 0x80072F13
+ WININET_E_HTTPS_HTTP_SUBMIT_REDIR Handle = 0x80072F14
+ WININET_E_SEC_CERT_ERRORS Handle = 0x80072F17
+ WININET_E_SEC_CERT_REV_FAILED Handle = 0x80072F19
+ WININET_E_HEADER_NOT_FOUND Handle = 0x80072F76
+ WININET_E_DOWNLEVEL_SERVER Handle = 0x80072F77
+ WININET_E_INVALID_SERVER_RESPONSE Handle = 0x80072F78
+ WININET_E_INVALID_HEADER Handle = 0x80072F79
+ WININET_E_INVALID_QUERY_REQUEST Handle = 0x80072F7A
+ WININET_E_HEADER_ALREADY_EXISTS Handle = 0x80072F7B
+ WININET_E_REDIRECT_FAILED Handle = 0x80072F7C
+ WININET_E_SECURITY_CHANNEL_ERROR Handle = 0x80072F7D
+ WININET_E_UNABLE_TO_CACHE_FILE Handle = 0x80072F7E
+ WININET_E_TCPIP_NOT_INSTALLED Handle = 0x80072F7F
+ WININET_E_DISCONNECTED Handle = 0x80072F83
+ WININET_E_SERVER_UNREACHABLE Handle = 0x80072F84
+ WININET_E_PROXY_SERVER_UNREACHABLE Handle = 0x80072F85
+ WININET_E_BAD_AUTO_PROXY_SCRIPT Handle = 0x80072F86
+ WININET_E_UNABLE_TO_DOWNLOAD_SCRIPT Handle = 0x80072F87
+ WININET_E_SEC_INVALID_CERT Handle = 0x80072F89
+ WININET_E_SEC_CERT_REVOKED Handle = 0x80072F8A
+ WININET_E_FAILED_DUETOSECURITYCHECK Handle = 0x80072F8B
+ WININET_E_NOT_INITIALIZED Handle = 0x80072F8C
+ WININET_E_LOGIN_FAILURE_DISPLAY_ENTITY_BODY Handle = 0x80072F8E
+ WININET_E_DECODING_FAILED Handle = 0x80072F8F
+ WININET_E_NOT_REDIRECTED Handle = 0x80072F80
+ WININET_E_COOKIE_NEEDS_CONFIRMATION Handle = 0x80072F81
+ WININET_E_COOKIE_DECLINED Handle = 0x80072F82
+ WININET_E_REDIRECT_NEEDS_CONFIRMATION Handle = 0x80072F88
+ SQLITE_E_ERROR Handle = 0x87AF0001
+ SQLITE_E_INTERNAL Handle = 0x87AF0002
+ SQLITE_E_PERM Handle = 0x87AF0003
+ SQLITE_E_ABORT Handle = 0x87AF0004
+ SQLITE_E_BUSY Handle = 0x87AF0005
+ SQLITE_E_LOCKED Handle = 0x87AF0006
+ SQLITE_E_NOMEM Handle = 0x87AF0007
+ SQLITE_E_READONLY Handle = 0x87AF0008
+ SQLITE_E_INTERRUPT Handle = 0x87AF0009
+ SQLITE_E_IOERR Handle = 0x87AF000A
+ SQLITE_E_CORRUPT Handle = 0x87AF000B
+ SQLITE_E_NOTFOUND Handle = 0x87AF000C
+ SQLITE_E_FULL Handle = 0x87AF000D
+ SQLITE_E_CANTOPEN Handle = 0x87AF000E
+ SQLITE_E_PROTOCOL Handle = 0x87AF000F
+ SQLITE_E_EMPTY Handle = 0x87AF0010
+ SQLITE_E_SCHEMA Handle = 0x87AF0011
+ SQLITE_E_TOOBIG Handle = 0x87AF0012
+ SQLITE_E_CONSTRAINT Handle = 0x87AF0013
+ SQLITE_E_MISMATCH Handle = 0x87AF0014
+ SQLITE_E_MISUSE Handle = 0x87AF0015
+ SQLITE_E_NOLFS Handle = 0x87AF0016
+ SQLITE_E_AUTH Handle = 0x87AF0017
+ SQLITE_E_FORMAT Handle = 0x87AF0018
+ SQLITE_E_RANGE Handle = 0x87AF0019
+ SQLITE_E_NOTADB Handle = 0x87AF001A
+ SQLITE_E_NOTICE Handle = 0x87AF001B
+ SQLITE_E_WARNING Handle = 0x87AF001C
+ SQLITE_E_ROW Handle = 0x87AF0064
+ SQLITE_E_DONE Handle = 0x87AF0065
+ SQLITE_E_IOERR_READ Handle = 0x87AF010A
+ SQLITE_E_IOERR_SHORT_READ Handle = 0x87AF020A
+ SQLITE_E_IOERR_WRITE Handle = 0x87AF030A
+ SQLITE_E_IOERR_FSYNC Handle = 0x87AF040A
+ SQLITE_E_IOERR_DIR_FSYNC Handle = 0x87AF050A
+ SQLITE_E_IOERR_TRUNCATE Handle = 0x87AF060A
+ SQLITE_E_IOERR_FSTAT Handle = 0x87AF070A
+ SQLITE_E_IOERR_UNLOCK Handle = 0x87AF080A
+ SQLITE_E_IOERR_RDLOCK Handle = 0x87AF090A
+ SQLITE_E_IOERR_DELETE Handle = 0x87AF0A0A
+ SQLITE_E_IOERR_BLOCKED Handle = 0x87AF0B0A
+ SQLITE_E_IOERR_NOMEM Handle = 0x87AF0C0A
+ SQLITE_E_IOERR_ACCESS Handle = 0x87AF0D0A
+ SQLITE_E_IOERR_CHECKRESERVEDLOCK Handle = 0x87AF0E0A
+ SQLITE_E_IOERR_LOCK Handle = 0x87AF0F0A
+ SQLITE_E_IOERR_CLOSE Handle = 0x87AF100A
+ SQLITE_E_IOERR_DIR_CLOSE Handle = 0x87AF110A
+ SQLITE_E_IOERR_SHMOPEN Handle = 0x87AF120A
+ SQLITE_E_IOERR_SHMSIZE Handle = 0x87AF130A
+ SQLITE_E_IOERR_SHMLOCK Handle = 0x87AF140A
+ SQLITE_E_IOERR_SHMMAP Handle = 0x87AF150A
+ SQLITE_E_IOERR_SEEK Handle = 0x87AF160A
+ SQLITE_E_IOERR_DELETE_NOENT Handle = 0x87AF170A
+ SQLITE_E_IOERR_MMAP Handle = 0x87AF180A
+ SQLITE_E_IOERR_GETTEMPPATH Handle = 0x87AF190A
+ SQLITE_E_IOERR_CONVPATH Handle = 0x87AF1A0A
+ SQLITE_E_IOERR_VNODE Handle = 0x87AF1A02
+ SQLITE_E_IOERR_AUTH Handle = 0x87AF1A03
+ SQLITE_E_LOCKED_SHAREDCACHE Handle = 0x87AF0106
+ SQLITE_E_BUSY_RECOVERY Handle = 0x87AF0105
+ SQLITE_E_BUSY_SNAPSHOT Handle = 0x87AF0205
+ SQLITE_E_CANTOPEN_NOTEMPDIR Handle = 0x87AF010E
+ SQLITE_E_CANTOPEN_ISDIR Handle = 0x87AF020E
+ SQLITE_E_CANTOPEN_FULLPATH Handle = 0x87AF030E
+ SQLITE_E_CANTOPEN_CONVPATH Handle = 0x87AF040E
+ SQLITE_E_CORRUPT_VTAB Handle = 0x87AF010B
+ SQLITE_E_READONLY_RECOVERY Handle = 0x87AF0108
+ SQLITE_E_READONLY_CANTLOCK Handle = 0x87AF0208
+ SQLITE_E_READONLY_ROLLBACK Handle = 0x87AF0308
+ SQLITE_E_READONLY_DBMOVED Handle = 0x87AF0408
+ SQLITE_E_ABORT_ROLLBACK Handle = 0x87AF0204
+ SQLITE_E_CONSTRAINT_CHECK Handle = 0x87AF0113
+ SQLITE_E_CONSTRAINT_COMMITHOOK Handle = 0x87AF0213
+ SQLITE_E_CONSTRAINT_FOREIGNKEY Handle = 0x87AF0313
+ SQLITE_E_CONSTRAINT_FUNCTION Handle = 0x87AF0413
+ SQLITE_E_CONSTRAINT_NOTNULL Handle = 0x87AF0513
+ SQLITE_E_CONSTRAINT_PRIMARYKEY Handle = 0x87AF0613
+ SQLITE_E_CONSTRAINT_TRIGGER Handle = 0x87AF0713
+ SQLITE_E_CONSTRAINT_UNIQUE Handle = 0x87AF0813
+ SQLITE_E_CONSTRAINT_VTAB Handle = 0x87AF0913
+ SQLITE_E_CONSTRAINT_ROWID Handle = 0x87AF0A13
+ SQLITE_E_NOTICE_RECOVER_WAL Handle = 0x87AF011B
+ SQLITE_E_NOTICE_RECOVER_ROLLBACK Handle = 0x87AF021B
+ SQLITE_E_WARNING_AUTOINDEX Handle = 0x87AF011C
+ UTC_E_TOGGLE_TRACE_STARTED Handle = 0x87C51001
+ UTC_E_ALTERNATIVE_TRACE_CANNOT_PREEMPT Handle = 0x87C51002
+ UTC_E_AOT_NOT_RUNNING Handle = 0x87C51003
+ UTC_E_SCRIPT_TYPE_INVALID Handle = 0x87C51004
+ UTC_E_SCENARIODEF_NOT_FOUND Handle = 0x87C51005
+ UTC_E_TRACEPROFILE_NOT_FOUND Handle = 0x87C51006
+ UTC_E_FORWARDER_ALREADY_ENABLED Handle = 0x87C51007
+ UTC_E_FORWARDER_ALREADY_DISABLED Handle = 0x87C51008
+ UTC_E_EVENTLOG_ENTRY_MALFORMED Handle = 0x87C51009
+ UTC_E_DIAGRULES_SCHEMAVERSION_MISMATCH Handle = 0x87C5100A
+ UTC_E_SCRIPT_TERMINATED Handle = 0x87C5100B
+ UTC_E_INVALID_CUSTOM_FILTER Handle = 0x87C5100C
+ UTC_E_TRACE_NOT_RUNNING Handle = 0x87C5100D
+ UTC_E_REESCALATED_TOO_QUICKLY Handle = 0x87C5100E
+ UTC_E_ESCALATION_ALREADY_RUNNING Handle = 0x87C5100F
+ UTC_E_PERFTRACK_ALREADY_TRACING Handle = 0x87C51010
+ UTC_E_REACHED_MAX_ESCALATIONS Handle = 0x87C51011
+ UTC_E_FORWARDER_PRODUCER_MISMATCH Handle = 0x87C51012
+ UTC_E_INTENTIONAL_SCRIPT_FAILURE Handle = 0x87C51013
+ UTC_E_SQM_INIT_FAILED Handle = 0x87C51014
+ UTC_E_NO_WER_LOGGER_SUPPORTED Handle = 0x87C51015
+ UTC_E_TRACERS_DONT_EXIST Handle = 0x87C51016
+ UTC_E_WINRT_INIT_FAILED Handle = 0x87C51017
+ UTC_E_SCENARIODEF_SCHEMAVERSION_MISMATCH Handle = 0x87C51018
+ UTC_E_INVALID_FILTER Handle = 0x87C51019
+ UTC_E_EXE_TERMINATED Handle = 0x87C5101A
+ UTC_E_ESCALATION_NOT_AUTHORIZED Handle = 0x87C5101B
+ UTC_E_SETUP_NOT_AUTHORIZED Handle = 0x87C5101C
+ UTC_E_CHILD_PROCESS_FAILED Handle = 0x87C5101D
+ UTC_E_COMMAND_LINE_NOT_AUTHORIZED Handle = 0x87C5101E
+ UTC_E_CANNOT_LOAD_SCENARIO_EDITOR_XML Handle = 0x87C5101F
+ UTC_E_ESCALATION_TIMED_OUT Handle = 0x87C51020
+ UTC_E_SETUP_TIMED_OUT Handle = 0x87C51021
+ UTC_E_TRIGGER_MISMATCH Handle = 0x87C51022
+ UTC_E_TRIGGER_NOT_FOUND Handle = 0x87C51023
+ UTC_E_SIF_NOT_SUPPORTED Handle = 0x87C51024
+ UTC_E_DELAY_TERMINATED Handle = 0x87C51025
+ UTC_E_DEVICE_TICKET_ERROR Handle = 0x87C51026
+ UTC_E_TRACE_BUFFER_LIMIT_EXCEEDED Handle = 0x87C51027
+ UTC_E_API_RESULT_UNAVAILABLE Handle = 0x87C51028
+ UTC_E_RPC_TIMEOUT Handle = 0x87C51029
+ UTC_E_RPC_WAIT_FAILED Handle = 0x87C5102A
+ UTC_E_API_BUSY Handle = 0x87C5102B
+ UTC_E_TRACE_MIN_DURATION_REQUIREMENT_NOT_MET Handle = 0x87C5102C
+ UTC_E_EXCLUSIVITY_NOT_AVAILABLE Handle = 0x87C5102D
+ UTC_E_GETFILE_FILE_PATH_NOT_APPROVED Handle = 0x87C5102E
+ UTC_E_ESCALATION_DIRECTORY_ALREADY_EXISTS Handle = 0x87C5102F
+ UTC_E_TIME_TRIGGER_ON_START_INVALID Handle = 0x87C51030
+ UTC_E_TIME_TRIGGER_ONLY_VALID_ON_SINGLE_TRANSITION Handle = 0x87C51031
+ UTC_E_TIME_TRIGGER_INVALID_TIME_RANGE Handle = 0x87C51032
+ UTC_E_MULTIPLE_TIME_TRIGGER_ON_SINGLE_STATE Handle = 0x87C51033
+ UTC_E_BINARY_MISSING Handle = 0x87C51034
+ UTC_E_NETWORK_CAPTURE_NOT_ALLOWED Handle = 0x87C51035
+ UTC_E_FAILED_TO_RESOLVE_CONTAINER_ID Handle = 0x87C51036
+ UTC_E_UNABLE_TO_RESOLVE_SESSION Handle = 0x87C51037
+ UTC_E_THROTTLED Handle = 0x87C51038
+ UTC_E_UNAPPROVED_SCRIPT Handle = 0x87C51039
+ UTC_E_SCRIPT_MISSING Handle = 0x87C5103A
+ UTC_E_SCENARIO_THROTTLED Handle = 0x87C5103B
+ UTC_E_API_NOT_SUPPORTED Handle = 0x87C5103C
+ UTC_E_GETFILE_EXTERNAL_PATH_NOT_APPROVED Handle = 0x87C5103D
+ UTC_E_TRY_GET_SCENARIO_TIMEOUT_EXCEEDED Handle = 0x87C5103E
+ UTC_E_CERT_REV_FAILED Handle = 0x87C5103F
+ UTC_E_FAILED_TO_START_NDISCAP Handle = 0x87C51040
+ UTC_E_KERNELDUMP_LIMIT_REACHED Handle = 0x87C51041
+ UTC_E_MISSING_AGGREGATE_EVENT_TAG Handle = 0x87C51042
+ UTC_E_INVALID_AGGREGATION_STRUCT Handle = 0x87C51043
+ UTC_E_ACTION_NOT_SUPPORTED_IN_DESTINATION Handle = 0x87C51044
+ UTC_E_FILTER_MISSING_ATTRIBUTE Handle = 0x87C51045
+ UTC_E_FILTER_INVALID_TYPE Handle = 0x87C51046
+ UTC_E_FILTER_VARIABLE_NOT_FOUND Handle = 0x87C51047
+ UTC_E_FILTER_FUNCTION_RESTRICTED Handle = 0x87C51048
+ UTC_E_FILTER_VERSION_MISMATCH Handle = 0x87C51049
+ UTC_E_FILTER_INVALID_FUNCTION Handle = 0x87C51050
+ UTC_E_FILTER_INVALID_FUNCTION_PARAMS Handle = 0x87C51051
+ UTC_E_FILTER_INVALID_COMMAND Handle = 0x87C51052
+ UTC_E_FILTER_ILLEGAL_EVAL Handle = 0x87C51053
+ UTC_E_TTTRACER_RETURNED_ERROR Handle = 0x87C51054
+ UTC_E_AGENT_DIAGNOSTICS_TOO_LARGE Handle = 0x87C51055
+ UTC_E_FAILED_TO_RECEIVE_AGENT_DIAGNOSTICS Handle = 0x87C51056
+ UTC_E_SCENARIO_HAS_NO_ACTIONS Handle = 0x87C51057
+ UTC_E_TTTRACER_STORAGE_FULL Handle = 0x87C51058
+ UTC_E_INSUFFICIENT_SPACE_TO_START_TRACE Handle = 0x87C51059
+ UTC_E_ESCALATION_CANCELLED_AT_SHUTDOWN Handle = 0x87C5105A
+ UTC_E_GETFILEINFOACTION_FILE_NOT_APPROVED Handle = 0x87C5105B
+ WINML_ERR_INVALID_DEVICE Handle = 0x88900001
+ WINML_ERR_INVALID_BINDING Handle = 0x88900002
+ WINML_ERR_VALUE_NOTFOUND Handle = 0x88900003
+ WINML_ERR_SIZE_MISMATCH Handle = 0x88900004
+)
diff --git a/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go b/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go
new file mode 100644
index 000000000..6048ac679
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go
@@ -0,0 +1,149 @@
+// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT.
+
+package windows
+
+type KNOWNFOLDERID GUID
+
+var (
+ FOLDERID_NetworkFolder = &KNOWNFOLDERID{0xd20beec4, 0x5ca8, 0x4905, [8]byte{0xae, 0x3b, 0xbf, 0x25, 0x1e, 0xa0, 0x9b, 0x53}}
+ FOLDERID_ComputerFolder = &KNOWNFOLDERID{0x0ac0837c, 0xbbf8, 0x452a, [8]byte{0x85, 0x0d, 0x79, 0xd0, 0x8e, 0x66, 0x7c, 0xa7}}
+ FOLDERID_InternetFolder = &KNOWNFOLDERID{0x4d9f7874, 0x4e0c, 0x4904, [8]byte{0x96, 0x7b, 0x40, 0xb0, 0xd2, 0x0c, 0x3e, 0x4b}}
+ FOLDERID_ControlPanelFolder = &KNOWNFOLDERID{0x82a74aeb, 0xaeb4, 0x465c, [8]byte{0xa0, 0x14, 0xd0, 0x97, 0xee, 0x34, 0x6d, 0x63}}
+ FOLDERID_PrintersFolder = &KNOWNFOLDERID{0x76fc4e2d, 0xd6ad, 0x4519, [8]byte{0xa6, 0x63, 0x37, 0xbd, 0x56, 0x06, 0x81, 0x85}}
+ FOLDERID_SyncManagerFolder = &KNOWNFOLDERID{0x43668bf8, 0xc14e, 0x49b2, [8]byte{0x97, 0xc9, 0x74, 0x77, 0x84, 0xd7, 0x84, 0xb7}}
+ FOLDERID_SyncSetupFolder = &KNOWNFOLDERID{0x0f214138, 0xb1d3, 0x4a90, [8]byte{0xbb, 0xa9, 0x27, 0xcb, 0xc0, 0xc5, 0x38, 0x9a}}
+ FOLDERID_ConflictFolder = &KNOWNFOLDERID{0x4bfefb45, 0x347d, 0x4006, [8]byte{0xa5, 0xbe, 0xac, 0x0c, 0xb0, 0x56, 0x71, 0x92}}
+ FOLDERID_SyncResultsFolder = &KNOWNFOLDERID{0x289a9a43, 0xbe44, 0x4057, [8]byte{0xa4, 0x1b, 0x58, 0x7a, 0x76, 0xd7, 0xe7, 0xf9}}
+ FOLDERID_RecycleBinFolder = &KNOWNFOLDERID{0xb7534046, 0x3ecb, 0x4c18, [8]byte{0xbe, 0x4e, 0x64, 0xcd, 0x4c, 0xb7, 0xd6, 0xac}}
+ FOLDERID_ConnectionsFolder = &KNOWNFOLDERID{0x6f0cd92b, 0x2e97, 0x45d1, [8]byte{0x88, 0xff, 0xb0, 0xd1, 0x86, 0xb8, 0xde, 0xdd}}
+ FOLDERID_Fonts = &KNOWNFOLDERID{0xfd228cb7, 0xae11, 0x4ae3, [8]byte{0x86, 0x4c, 0x16, 0xf3, 0x91, 0x0a, 0xb8, 0xfe}}
+ FOLDERID_Desktop = &KNOWNFOLDERID{0xb4bfcc3a, 0xdb2c, 0x424c, [8]byte{0xb0, 0x29, 0x7f, 0xe9, 0x9a, 0x87, 0xc6, 0x41}}
+ FOLDERID_Startup = &KNOWNFOLDERID{0xb97d20bb, 0xf46a, 0x4c97, [8]byte{0xba, 0x10, 0x5e, 0x36, 0x08, 0x43, 0x08, 0x54}}
+ FOLDERID_Programs = &KNOWNFOLDERID{0xa77f5d77, 0x2e2b, 0x44c3, [8]byte{0xa6, 0xa2, 0xab, 0xa6, 0x01, 0x05, 0x4a, 0x51}}
+ FOLDERID_StartMenu = &KNOWNFOLDERID{0x625b53c3, 0xab48, 0x4ec1, [8]byte{0xba, 0x1f, 0xa1, 0xef, 0x41, 0x46, 0xfc, 0x19}}
+ FOLDERID_Recent = &KNOWNFOLDERID{0xae50c081, 0xebd2, 0x438a, [8]byte{0x86, 0x55, 0x8a, 0x09, 0x2e, 0x34, 0x98, 0x7a}}
+ FOLDERID_SendTo = &KNOWNFOLDERID{0x8983036c, 0x27c0, 0x404b, [8]byte{0x8f, 0x08, 0x10, 0x2d, 0x10, 0xdc, 0xfd, 0x74}}
+ FOLDERID_Documents = &KNOWNFOLDERID{0xfdd39ad0, 0x238f, 0x46af, [8]byte{0xad, 0xb4, 0x6c, 0x85, 0x48, 0x03, 0x69, 0xc7}}
+ FOLDERID_Favorites = &KNOWNFOLDERID{0x1777f761, 0x68ad, 0x4d8a, [8]byte{0x87, 0xbd, 0x30, 0xb7, 0x59, 0xfa, 0x33, 0xdd}}
+ FOLDERID_NetHood = &KNOWNFOLDERID{0xc5abbf53, 0xe17f, 0x4121, [8]byte{0x89, 0x00, 0x86, 0x62, 0x6f, 0xc2, 0xc9, 0x73}}
+ FOLDERID_PrintHood = &KNOWNFOLDERID{0x9274bd8d, 0xcfd1, 0x41c3, [8]byte{0xb3, 0x5e, 0xb1, 0x3f, 0x55, 0xa7, 0x58, 0xf4}}
+ FOLDERID_Templates = &KNOWNFOLDERID{0xa63293e8, 0x664e, 0x48db, [8]byte{0xa0, 0x79, 0xdf, 0x75, 0x9e, 0x05, 0x09, 0xf7}}
+ FOLDERID_CommonStartup = &KNOWNFOLDERID{0x82a5ea35, 0xd9cd, 0x47c5, [8]byte{0x96, 0x29, 0xe1, 0x5d, 0x2f, 0x71, 0x4e, 0x6e}}
+ FOLDERID_CommonPrograms = &KNOWNFOLDERID{0x0139d44e, 0x6afe, 0x49f2, [8]byte{0x86, 0x90, 0x3d, 0xaf, 0xca, 0xe6, 0xff, 0xb8}}
+ FOLDERID_CommonStartMenu = &KNOWNFOLDERID{0xa4115719, 0xd62e, 0x491d, [8]byte{0xaa, 0x7c, 0xe7, 0x4b, 0x8b, 0xe3, 0xb0, 0x67}}
+ FOLDERID_PublicDesktop = &KNOWNFOLDERID{0xc4aa340d, 0xf20f, 0x4863, [8]byte{0xaf, 0xef, 0xf8, 0x7e, 0xf2, 0xe6, 0xba, 0x25}}
+ FOLDERID_ProgramData = &KNOWNFOLDERID{0x62ab5d82, 0xfdc1, 0x4dc3, [8]byte{0xa9, 0xdd, 0x07, 0x0d, 0x1d, 0x49, 0x5d, 0x97}}
+ FOLDERID_CommonTemplates = &KNOWNFOLDERID{0xb94237e7, 0x57ac, 0x4347, [8]byte{0x91, 0x51, 0xb0, 0x8c, 0x6c, 0x32, 0xd1, 0xf7}}
+ FOLDERID_PublicDocuments = &KNOWNFOLDERID{0xed4824af, 0xdce4, 0x45a8, [8]byte{0x81, 0xe2, 0xfc, 0x79, 0x65, 0x08, 0x36, 0x34}}
+ FOLDERID_RoamingAppData = &KNOWNFOLDERID{0x3eb685db, 0x65f9, 0x4cf6, [8]byte{0xa0, 0x3a, 0xe3, 0xef, 0x65, 0x72, 0x9f, 0x3d}}
+ FOLDERID_LocalAppData = &KNOWNFOLDERID{0xf1b32785, 0x6fba, 0x4fcf, [8]byte{0x9d, 0x55, 0x7b, 0x8e, 0x7f, 0x15, 0x70, 0x91}}
+ FOLDERID_LocalAppDataLow = &KNOWNFOLDERID{0xa520a1a4, 0x1780, 0x4ff6, [8]byte{0xbd, 0x18, 0x16, 0x73, 0x43, 0xc5, 0xaf, 0x16}}
+ FOLDERID_InternetCache = &KNOWNFOLDERID{0x352481e8, 0x33be, 0x4251, [8]byte{0xba, 0x85, 0x60, 0x07, 0xca, 0xed, 0xcf, 0x9d}}
+ FOLDERID_Cookies = &KNOWNFOLDERID{0x2b0f765d, 0xc0e9, 0x4171, [8]byte{0x90, 0x8e, 0x08, 0xa6, 0x11, 0xb8, 0x4f, 0xf6}}
+ FOLDERID_History = &KNOWNFOLDERID{0xd9dc8a3b, 0xb784, 0x432e, [8]byte{0xa7, 0x81, 0x5a, 0x11, 0x30, 0xa7, 0x59, 0x63}}
+ FOLDERID_System = &KNOWNFOLDERID{0x1ac14e77, 0x02e7, 0x4e5d, [8]byte{0xb7, 0x44, 0x2e, 0xb1, 0xae, 0x51, 0x98, 0xb7}}
+ FOLDERID_SystemX86 = &KNOWNFOLDERID{0xd65231b0, 0xb2f1, 0x4857, [8]byte{0xa4, 0xce, 0xa8, 0xe7, 0xc6, 0xea, 0x7d, 0x27}}
+ FOLDERID_Windows = &KNOWNFOLDERID{0xf38bf404, 0x1d43, 0x42f2, [8]byte{0x93, 0x05, 0x67, 0xde, 0x0b, 0x28, 0xfc, 0x23}}
+ FOLDERID_Profile = &KNOWNFOLDERID{0x5e6c858f, 0x0e22, 0x4760, [8]byte{0x9a, 0xfe, 0xea, 0x33, 0x17, 0xb6, 0x71, 0x73}}
+ FOLDERID_Pictures = &KNOWNFOLDERID{0x33e28130, 0x4e1e, 0x4676, [8]byte{0x83, 0x5a, 0x98, 0x39, 0x5c, 0x3b, 0xc3, 0xbb}}
+ FOLDERID_ProgramFilesX86 = &KNOWNFOLDERID{0x7c5a40ef, 0xa0fb, 0x4bfc, [8]byte{0x87, 0x4a, 0xc0, 0xf2, 0xe0, 0xb9, 0xfa, 0x8e}}
+ FOLDERID_ProgramFilesCommonX86 = &KNOWNFOLDERID{0xde974d24, 0xd9c6, 0x4d3e, [8]byte{0xbf, 0x91, 0xf4, 0x45, 0x51, 0x20, 0xb9, 0x17}}
+ FOLDERID_ProgramFilesX64 = &KNOWNFOLDERID{0x6d809377, 0x6af0, 0x444b, [8]byte{0x89, 0x57, 0xa3, 0x77, 0x3f, 0x02, 0x20, 0x0e}}
+ FOLDERID_ProgramFilesCommonX64 = &KNOWNFOLDERID{0x6365d5a7, 0x0f0d, 0x45e5, [8]byte{0x87, 0xf6, 0x0d, 0xa5, 0x6b, 0x6a, 0x4f, 0x7d}}
+ FOLDERID_ProgramFiles = &KNOWNFOLDERID{0x905e63b6, 0xc1bf, 0x494e, [8]byte{0xb2, 0x9c, 0x65, 0xb7, 0x32, 0xd3, 0xd2, 0x1a}}
+ FOLDERID_ProgramFilesCommon = &KNOWNFOLDERID{0xf7f1ed05, 0x9f6d, 0x47a2, [8]byte{0xaa, 0xae, 0x29, 0xd3, 0x17, 0xc6, 0xf0, 0x66}}
+ FOLDERID_UserProgramFiles = &KNOWNFOLDERID{0x5cd7aee2, 0x2219, 0x4a67, [8]byte{0xb8, 0x5d, 0x6c, 0x9c, 0xe1, 0x56, 0x60, 0xcb}}
+ FOLDERID_UserProgramFilesCommon = &KNOWNFOLDERID{0xbcbd3057, 0xca5c, 0x4622, [8]byte{0xb4, 0x2d, 0xbc, 0x56, 0xdb, 0x0a, 0xe5, 0x16}}
+ FOLDERID_AdminTools = &KNOWNFOLDERID{0x724ef170, 0xa42d, 0x4fef, [8]byte{0x9f, 0x26, 0xb6, 0x0e, 0x84, 0x6f, 0xba, 0x4f}}
+ FOLDERID_CommonAdminTools = &KNOWNFOLDERID{0xd0384e7d, 0xbac3, 0x4797, [8]byte{0x8f, 0x14, 0xcb, 0xa2, 0x29, 0xb3, 0x92, 0xb5}}
+ FOLDERID_Music = &KNOWNFOLDERID{0x4bd8d571, 0x6d19, 0x48d3, [8]byte{0xbe, 0x97, 0x42, 0x22, 0x20, 0x08, 0x0e, 0x43}}
+ FOLDERID_Videos = &KNOWNFOLDERID{0x18989b1d, 0x99b5, 0x455b, [8]byte{0x84, 0x1c, 0xab, 0x7c, 0x74, 0xe4, 0xdd, 0xfc}}
+ FOLDERID_Ringtones = &KNOWNFOLDERID{0xc870044b, 0xf49e, 0x4126, [8]byte{0xa9, 0xc3, 0xb5, 0x2a, 0x1f, 0xf4, 0x11, 0xe8}}
+ FOLDERID_PublicPictures = &KNOWNFOLDERID{0xb6ebfb86, 0x6907, 0x413c, [8]byte{0x9a, 0xf7, 0x4f, 0xc2, 0xab, 0xf0, 0x7c, 0xc5}}
+ FOLDERID_PublicMusic = &KNOWNFOLDERID{0x3214fab5, 0x9757, 0x4298, [8]byte{0xbb, 0x61, 0x92, 0xa9, 0xde, 0xaa, 0x44, 0xff}}
+ FOLDERID_PublicVideos = &KNOWNFOLDERID{0x2400183a, 0x6185, 0x49fb, [8]byte{0xa2, 0xd8, 0x4a, 0x39, 0x2a, 0x60, 0x2b, 0xa3}}
+ FOLDERID_PublicRingtones = &KNOWNFOLDERID{0xe555ab60, 0x153b, 0x4d17, [8]byte{0x9f, 0x04, 0xa5, 0xfe, 0x99, 0xfc, 0x15, 0xec}}
+ FOLDERID_ResourceDir = &KNOWNFOLDERID{0x8ad10c31, 0x2adb, 0x4296, [8]byte{0xa8, 0xf7, 0xe4, 0x70, 0x12, 0x32, 0xc9, 0x72}}
+ FOLDERID_LocalizedResourcesDir = &KNOWNFOLDERID{0x2a00375e, 0x224c, 0x49de, [8]byte{0xb8, 0xd1, 0x44, 0x0d, 0xf7, 0xef, 0x3d, 0xdc}}
+ FOLDERID_CommonOEMLinks = &KNOWNFOLDERID{0xc1bae2d0, 0x10df, 0x4334, [8]byte{0xbe, 0xdd, 0x7a, 0xa2, 0x0b, 0x22, 0x7a, 0x9d}}
+ FOLDERID_CDBurning = &KNOWNFOLDERID{0x9e52ab10, 0xf80d, 0x49df, [8]byte{0xac, 0xb8, 0x43, 0x30, 0xf5, 0x68, 0x78, 0x55}}
+ FOLDERID_UserProfiles = &KNOWNFOLDERID{0x0762d272, 0xc50a, 0x4bb0, [8]byte{0xa3, 0x82, 0x69, 0x7d, 0xcd, 0x72, 0x9b, 0x80}}
+ FOLDERID_Playlists = &KNOWNFOLDERID{0xde92c1c7, 0x837f, 0x4f69, [8]byte{0xa3, 0xbb, 0x86, 0xe6, 0x31, 0x20, 0x4a, 0x23}}
+ FOLDERID_SamplePlaylists = &KNOWNFOLDERID{0x15ca69b3, 0x30ee, 0x49c1, [8]byte{0xac, 0xe1, 0x6b, 0x5e, 0xc3, 0x72, 0xaf, 0xb5}}
+ FOLDERID_SampleMusic = &KNOWNFOLDERID{0xb250c668, 0xf57d, 0x4ee1, [8]byte{0xa6, 0x3c, 0x29, 0x0e, 0xe7, 0xd1, 0xaa, 0x1f}}
+ FOLDERID_SamplePictures = &KNOWNFOLDERID{0xc4900540, 0x2379, 0x4c75, [8]byte{0x84, 0x4b, 0x64, 0xe6, 0xfa, 0xf8, 0x71, 0x6b}}
+ FOLDERID_SampleVideos = &KNOWNFOLDERID{0x859ead94, 0x2e85, 0x48ad, [8]byte{0xa7, 0x1a, 0x09, 0x69, 0xcb, 0x56, 0xa6, 0xcd}}
+ FOLDERID_PhotoAlbums = &KNOWNFOLDERID{0x69d2cf90, 0xfc33, 0x4fb7, [8]byte{0x9a, 0x0c, 0xeb, 0xb0, 0xf0, 0xfc, 0xb4, 0x3c}}
+ FOLDERID_Public = &KNOWNFOLDERID{0xdfdf76a2, 0xc82a, 0x4d63, [8]byte{0x90, 0x6a, 0x56, 0x44, 0xac, 0x45, 0x73, 0x85}}
+ FOLDERID_ChangeRemovePrograms = &KNOWNFOLDERID{0xdf7266ac, 0x9274, 0x4867, [8]byte{0x8d, 0x55, 0x3b, 0xd6, 0x61, 0xde, 0x87, 0x2d}}
+ FOLDERID_AppUpdates = &KNOWNFOLDERID{0xa305ce99, 0xf527, 0x492b, [8]byte{0x8b, 0x1a, 0x7e, 0x76, 0xfa, 0x98, 0xd6, 0xe4}}
+ FOLDERID_AddNewPrograms = &KNOWNFOLDERID{0xde61d971, 0x5ebc, 0x4f02, [8]byte{0xa3, 0xa9, 0x6c, 0x82, 0x89, 0x5e, 0x5c, 0x04}}
+ FOLDERID_Downloads = &KNOWNFOLDERID{0x374de290, 0x123f, 0x4565, [8]byte{0x91, 0x64, 0x39, 0xc4, 0x92, 0x5e, 0x46, 0x7b}}
+ FOLDERID_PublicDownloads = &KNOWNFOLDERID{0x3d644c9b, 0x1fb8, 0x4f30, [8]byte{0x9b, 0x45, 0xf6, 0x70, 0x23, 0x5f, 0x79, 0xc0}}
+ FOLDERID_SavedSearches = &KNOWNFOLDERID{0x7d1d3a04, 0xdebb, 0x4115, [8]byte{0x95, 0xcf, 0x2f, 0x29, 0xda, 0x29, 0x20, 0xda}}
+ FOLDERID_QuickLaunch = &KNOWNFOLDERID{0x52a4f021, 0x7b75, 0x48a9, [8]byte{0x9f, 0x6b, 0x4b, 0x87, 0xa2, 0x10, 0xbc, 0x8f}}
+ FOLDERID_Contacts = &KNOWNFOLDERID{0x56784854, 0xc6cb, 0x462b, [8]byte{0x81, 0x69, 0x88, 0xe3, 0x50, 0xac, 0xb8, 0x82}}
+ FOLDERID_SidebarParts = &KNOWNFOLDERID{0xa75d362e, 0x50fc, 0x4fb7, [8]byte{0xac, 0x2c, 0xa8, 0xbe, 0xaa, 0x31, 0x44, 0x93}}
+ FOLDERID_SidebarDefaultParts = &KNOWNFOLDERID{0x7b396e54, 0x9ec5, 0x4300, [8]byte{0xbe, 0x0a, 0x24, 0x82, 0xeb, 0xae, 0x1a, 0x26}}
+ FOLDERID_PublicGameTasks = &KNOWNFOLDERID{0xdebf2536, 0xe1a8, 0x4c59, [8]byte{0xb6, 0xa2, 0x41, 0x45, 0x86, 0x47, 0x6a, 0xea}}
+ FOLDERID_GameTasks = &KNOWNFOLDERID{0x054fae61, 0x4dd8, 0x4787, [8]byte{0x80, 0xb6, 0x09, 0x02, 0x20, 0xc4, 0xb7, 0x00}}
+ FOLDERID_SavedGames = &KNOWNFOLDERID{0x4c5c32ff, 0xbb9d, 0x43b0, [8]byte{0xb5, 0xb4, 0x2d, 0x72, 0xe5, 0x4e, 0xaa, 0xa4}}
+ FOLDERID_Games = &KNOWNFOLDERID{0xcac52c1a, 0xb53d, 0x4edc, [8]byte{0x92, 0xd7, 0x6b, 0x2e, 0x8a, 0xc1, 0x94, 0x34}}
+ FOLDERID_SEARCH_MAPI = &KNOWNFOLDERID{0x98ec0e18, 0x2098, 0x4d44, [8]byte{0x86, 0x44, 0x66, 0x97, 0x93, 0x15, 0xa2, 0x81}}
+ FOLDERID_SEARCH_CSC = &KNOWNFOLDERID{0xee32e446, 0x31ca, 0x4aba, [8]byte{0x81, 0x4f, 0xa5, 0xeb, 0xd2, 0xfd, 0x6d, 0x5e}}
+ FOLDERID_Links = &KNOWNFOLDERID{0xbfb9d5e0, 0xc6a9, 0x404c, [8]byte{0xb2, 0xb2, 0xae, 0x6d, 0xb6, 0xaf, 0x49, 0x68}}
+ FOLDERID_UsersFiles = &KNOWNFOLDERID{0xf3ce0f7c, 0x4901, 0x4acc, [8]byte{0x86, 0x48, 0xd5, 0xd4, 0x4b, 0x04, 0xef, 0x8f}}
+ FOLDERID_UsersLibraries = &KNOWNFOLDERID{0xa302545d, 0xdeff, 0x464b, [8]byte{0xab, 0xe8, 0x61, 0xc8, 0x64, 0x8d, 0x93, 0x9b}}
+ FOLDERID_SearchHome = &KNOWNFOLDERID{0x190337d1, 0xb8ca, 0x4121, [8]byte{0xa6, 0x39, 0x6d, 0x47, 0x2d, 0x16, 0x97, 0x2a}}
+ FOLDERID_OriginalImages = &KNOWNFOLDERID{0x2c36c0aa, 0x5812, 0x4b87, [8]byte{0xbf, 0xd0, 0x4c, 0xd0, 0xdf, 0xb1, 0x9b, 0x39}}
+ FOLDERID_DocumentsLibrary = &KNOWNFOLDERID{0x7b0db17d, 0x9cd2, 0x4a93, [8]byte{0x97, 0x33, 0x46, 0xcc, 0x89, 0x02, 0x2e, 0x7c}}
+ FOLDERID_MusicLibrary = &KNOWNFOLDERID{0x2112ab0a, 0xc86a, 0x4ffe, [8]byte{0xa3, 0x68, 0x0d, 0xe9, 0x6e, 0x47, 0x01, 0x2e}}
+ FOLDERID_PicturesLibrary = &KNOWNFOLDERID{0xa990ae9f, 0xa03b, 0x4e80, [8]byte{0x94, 0xbc, 0x99, 0x12, 0xd7, 0x50, 0x41, 0x04}}
+ FOLDERID_VideosLibrary = &KNOWNFOLDERID{0x491e922f, 0x5643, 0x4af4, [8]byte{0xa7, 0xeb, 0x4e, 0x7a, 0x13, 0x8d, 0x81, 0x74}}
+ FOLDERID_RecordedTVLibrary = &KNOWNFOLDERID{0x1a6fdba2, 0xf42d, 0x4358, [8]byte{0xa7, 0x98, 0xb7, 0x4d, 0x74, 0x59, 0x26, 0xc5}}
+ FOLDERID_HomeGroup = &KNOWNFOLDERID{0x52528a6b, 0xb9e3, 0x4add, [8]byte{0xb6, 0x0d, 0x58, 0x8c, 0x2d, 0xba, 0x84, 0x2d}}
+ FOLDERID_HomeGroupCurrentUser = &KNOWNFOLDERID{0x9b74b6a3, 0x0dfd, 0x4f11, [8]byte{0x9e, 0x78, 0x5f, 0x78, 0x00, 0xf2, 0xe7, 0x72}}
+ FOLDERID_DeviceMetadataStore = &KNOWNFOLDERID{0x5ce4a5e9, 0xe4eb, 0x479d, [8]byte{0xb8, 0x9f, 0x13, 0x0c, 0x02, 0x88, 0x61, 0x55}}
+ FOLDERID_Libraries = &KNOWNFOLDERID{0x1b3ea5dc, 0xb587, 0x4786, [8]byte{0xb4, 0xef, 0xbd, 0x1d, 0xc3, 0x32, 0xae, 0xae}}
+ FOLDERID_PublicLibraries = &KNOWNFOLDERID{0x48daf80b, 0xe6cf, 0x4f4e, [8]byte{0xb8, 0x00, 0x0e, 0x69, 0xd8, 0x4e, 0xe3, 0x84}}
+ FOLDERID_UserPinned = &KNOWNFOLDERID{0x9e3995ab, 0x1f9c, 0x4f13, [8]byte{0xb8, 0x27, 0x48, 0xb2, 0x4b, 0x6c, 0x71, 0x74}}
+ FOLDERID_ImplicitAppShortcuts = &KNOWNFOLDERID{0xbcb5256f, 0x79f6, 0x4cee, [8]byte{0xb7, 0x25, 0xdc, 0x34, 0xe4, 0x02, 0xfd, 0x46}}
+ FOLDERID_AccountPictures = &KNOWNFOLDERID{0x008ca0b1, 0x55b4, 0x4c56, [8]byte{0xb8, 0xa8, 0x4d, 0xe4, 0xb2, 0x99, 0xd3, 0xbe}}
+ FOLDERID_PublicUserTiles = &KNOWNFOLDERID{0x0482af6c, 0x08f1, 0x4c34, [8]byte{0x8c, 0x90, 0xe1, 0x7e, 0xc9, 0x8b, 0x1e, 0x17}}
+ FOLDERID_AppsFolder = &KNOWNFOLDERID{0x1e87508d, 0x89c2, 0x42f0, [8]byte{0x8a, 0x7e, 0x64, 0x5a, 0x0f, 0x50, 0xca, 0x58}}
+ FOLDERID_StartMenuAllPrograms = &KNOWNFOLDERID{0xf26305ef, 0x6948, 0x40b9, [8]byte{0xb2, 0x55, 0x81, 0x45, 0x3d, 0x09, 0xc7, 0x85}}
+ FOLDERID_CommonStartMenuPlaces = &KNOWNFOLDERID{0xa440879f, 0x87a0, 0x4f7d, [8]byte{0xb7, 0x00, 0x02, 0x07, 0xb9, 0x66, 0x19, 0x4a}}
+ FOLDERID_ApplicationShortcuts = &KNOWNFOLDERID{0xa3918781, 0xe5f2, 0x4890, [8]byte{0xb3, 0xd9, 0xa7, 0xe5, 0x43, 0x32, 0x32, 0x8c}}
+ FOLDERID_RoamingTiles = &KNOWNFOLDERID{0x00bcfc5a, 0xed94, 0x4e48, [8]byte{0x96, 0xa1, 0x3f, 0x62, 0x17, 0xf2, 0x19, 0x90}}
+ FOLDERID_RoamedTileImages = &KNOWNFOLDERID{0xaaa8d5a5, 0xf1d6, 0x4259, [8]byte{0xba, 0xa8, 0x78, 0xe7, 0xef, 0x60, 0x83, 0x5e}}
+ FOLDERID_Screenshots = &KNOWNFOLDERID{0xb7bede81, 0xdf94, 0x4682, [8]byte{0xa7, 0xd8, 0x57, 0xa5, 0x26, 0x20, 0xb8, 0x6f}}
+ FOLDERID_CameraRoll = &KNOWNFOLDERID{0xab5fb87b, 0x7ce2, 0x4f83, [8]byte{0x91, 0x5d, 0x55, 0x08, 0x46, 0xc9, 0x53, 0x7b}}
+ FOLDERID_SkyDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}}
+ FOLDERID_OneDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}}
+ FOLDERID_SkyDriveDocuments = &KNOWNFOLDERID{0x24d89e24, 0x2f19, 0x4534, [8]byte{0x9d, 0xde, 0x6a, 0x66, 0x71, 0xfb, 0xb8, 0xfe}}
+ FOLDERID_SkyDrivePictures = &KNOWNFOLDERID{0x339719b5, 0x8c47, 0x4894, [8]byte{0x94, 0xc2, 0xd8, 0xf7, 0x7a, 0xdd, 0x44, 0xa6}}
+ FOLDERID_SkyDriveMusic = &KNOWNFOLDERID{0xc3f2459e, 0x80d6, 0x45dc, [8]byte{0xbf, 0xef, 0x1f, 0x76, 0x9f, 0x2b, 0xe7, 0x30}}
+ FOLDERID_SkyDriveCameraRoll = &KNOWNFOLDERID{0x767e6811, 0x49cb, 0x4273, [8]byte{0x87, 0xc2, 0x20, 0xf3, 0x55, 0xe1, 0x08, 0x5b}}
+ FOLDERID_SearchHistory = &KNOWNFOLDERID{0x0d4c3db6, 0x03a3, 0x462f, [8]byte{0xa0, 0xe6, 0x08, 0x92, 0x4c, 0x41, 0xb5, 0xd4}}
+ FOLDERID_SearchTemplates = &KNOWNFOLDERID{0x7e636bfe, 0xdfa9, 0x4d5e, [8]byte{0xb4, 0x56, 0xd7, 0xb3, 0x98, 0x51, 0xd8, 0xa9}}
+ FOLDERID_CameraRollLibrary = &KNOWNFOLDERID{0x2b20df75, 0x1eda, 0x4039, [8]byte{0x80, 0x97, 0x38, 0x79, 0x82, 0x27, 0xd5, 0xb7}}
+ FOLDERID_SavedPictures = &KNOWNFOLDERID{0x3b193882, 0xd3ad, 0x4eab, [8]byte{0x96, 0x5a, 0x69, 0x82, 0x9d, 0x1f, 0xb5, 0x9f}}
+ FOLDERID_SavedPicturesLibrary = &KNOWNFOLDERID{0xe25b5812, 0xbe88, 0x4bd9, [8]byte{0x94, 0xb0, 0x29, 0x23, 0x34, 0x77, 0xb6, 0xc3}}
+ FOLDERID_RetailDemo = &KNOWNFOLDERID{0x12d4c69e, 0x24ad, 0x4923, [8]byte{0xbe, 0x19, 0x31, 0x32, 0x1c, 0x43, 0xa7, 0x67}}
+ FOLDERID_Device = &KNOWNFOLDERID{0x1c2ac1dc, 0x4358, 0x4b6c, [8]byte{0x97, 0x33, 0xaf, 0x21, 0x15, 0x65, 0x76, 0xf0}}
+ FOLDERID_DevelopmentFiles = &KNOWNFOLDERID{0xdbe8e08e, 0x3053, 0x4bbc, [8]byte{0xb1, 0x83, 0x2a, 0x7b, 0x2b, 0x19, 0x1e, 0x59}}
+ FOLDERID_Objects3D = &KNOWNFOLDERID{0x31c0dd25, 0x9439, 0x4f12, [8]byte{0xbf, 0x41, 0x7f, 0xf4, 0xed, 0xa3, 0x87, 0x22}}
+ FOLDERID_AppCaptures = &KNOWNFOLDERID{0xedc0fe71, 0x98d8, 0x4f4a, [8]byte{0xb9, 0x20, 0xc8, 0xdc, 0x13, 0x3c, 0xb1, 0x65}}
+ FOLDERID_LocalDocuments = &KNOWNFOLDERID{0xf42ee2d3, 0x909f, 0x4907, [8]byte{0x88, 0x71, 0x4c, 0x22, 0xfc, 0x0b, 0xf7, 0x56}}
+ FOLDERID_LocalPictures = &KNOWNFOLDERID{0x0ddd015d, 0xb06c, 0x45d5, [8]byte{0x8c, 0x4c, 0xf5, 0x97, 0x13, 0x85, 0x46, 0x39}}
+ FOLDERID_LocalVideos = &KNOWNFOLDERID{0x35286a68, 0x3c57, 0x41a1, [8]byte{0xbb, 0xb1, 0x0e, 0xae, 0x73, 0xd7, 0x6c, 0x95}}
+ FOLDERID_LocalMusic = &KNOWNFOLDERID{0xa0c69a99, 0x21c8, 0x4671, [8]byte{0x87, 0x03, 0x79, 0x34, 0x16, 0x2f, 0xcf, 0x1d}}
+ FOLDERID_LocalDownloads = &KNOWNFOLDERID{0x7d83ee9b, 0x2244, 0x4e70, [8]byte{0xb1, 0xf5, 0x53, 0x93, 0x04, 0x2a, 0xf1, 0xe4}}
+ FOLDERID_RecordedCalls = &KNOWNFOLDERID{0x2f8b40c2, 0x83ed, 0x48ee, [8]byte{0xb3, 0x83, 0xa1, 0xf1, 0x57, 0xec, 0x6f, 0x9a}}
+ FOLDERID_AllAppMods = &KNOWNFOLDERID{0x7ad67899, 0x66af, 0x43ba, [8]byte{0x91, 0x56, 0x6a, 0xad, 0x42, 0xe6, 0xc5, 0x96}}
+ FOLDERID_CurrentAppMods = &KNOWNFOLDERID{0x3db40b20, 0x2a30, 0x4dbe, [8]byte{0x91, 0x7e, 0x77, 0x1d, 0xd2, 0x1d, 0xd0, 0x99}}
+ FOLDERID_AppDataDesktop = &KNOWNFOLDERID{0xb2c5e279, 0x7add, 0x439f, [8]byte{0xb2, 0x8c, 0xc4, 0x1f, 0xe1, 0xbb, 0xf6, 0x72}}
+ FOLDERID_AppDataDocuments = &KNOWNFOLDERID{0x7be16610, 0x1f7f, 0x44ac, [8]byte{0xbf, 0xf0, 0x83, 0xe1, 0x5f, 0x2f, 0xfc, 0xa1}}
+ FOLDERID_AppDataFavorites = &KNOWNFOLDERID{0x7cfbefbc, 0xde1f, 0x45aa, [8]byte{0xb8, 0x43, 0xa5, 0x42, 0xac, 0x53, 0x6c, 0xc9}}
+ FOLDERID_AppDataProgramData = &KNOWNFOLDERID{0x559d40a3, 0xa036, 0x40fa, [8]byte{0xaf, 0x61, 0x84, 0xcb, 0x43, 0x0a, 0x4d, 0x34}}
+)
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index fc56aec03..d461bed98 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -38,14 +38,18 @@ var (
modadvapi32 = NewLazySystemDLL("advapi32.dll")
modkernel32 = NewLazySystemDLL("kernel32.dll")
modshell32 = NewLazySystemDLL("shell32.dll")
+ moduserenv = NewLazySystemDLL("userenv.dll")
modmswsock = NewLazySystemDLL("mswsock.dll")
modcrypt32 = NewLazySystemDLL("crypt32.dll")
+ moduser32 = NewLazySystemDLL("user32.dll")
+ modole32 = NewLazySystemDLL("ole32.dll")
+ modntdll = NewLazySystemDLL("ntdll.dll")
modws2_32 = NewLazySystemDLL("ws2_32.dll")
moddnsapi = NewLazySystemDLL("dnsapi.dll")
modiphlpapi = NewLazySystemDLL("iphlpapi.dll")
modsecur32 = NewLazySystemDLL("secur32.dll")
modnetapi32 = NewLazySystemDLL("netapi32.dll")
- moduserenv = NewLazySystemDLL("userenv.dll")
+ modwtsapi32 = NewLazySystemDLL("wtsapi32.dll")
procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW")
procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource")
@@ -57,6 +61,7 @@ var (
procDeleteService = modadvapi32.NewProc("DeleteService")
procStartServiceW = modadvapi32.NewProc("StartServiceW")
procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus")
+ procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW")
procControlService = modadvapi32.NewProc("ControlService")
procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW")
procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus")
@@ -66,6 +71,7 @@ var (
procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W")
procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW")
procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx")
+ procNotifyServiceStatusChangeW = modadvapi32.NewProc("NotifyServiceStatusChangeW")
procGetLastError = modkernel32.NewProc("GetLastError")
procLoadLibraryW = modkernel32.NewProc("LoadLibraryW")
procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW")
@@ -74,9 +80,11 @@ var (
procGetVersion = modkernel32.NewProc("GetVersion")
procFormatMessageW = modkernel32.NewProc("FormatMessageW")
procExitProcess = modkernel32.NewProc("ExitProcess")
+ procIsWow64Process = modkernel32.NewProc("IsWow64Process")
procCreateFileW = modkernel32.NewProc("CreateFileW")
procReadFile = modkernel32.NewProc("ReadFile")
procWriteFile = modkernel32.NewProc("WriteFile")
+ procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult")
procSetFilePointer = modkernel32.NewProc("SetFilePointer")
procCloseHandle = modkernel32.NewProc("CloseHandle")
procGetStdHandle = modkernel32.NewProc("GetStdHandle")
@@ -85,6 +93,7 @@ var (
procFindNextFileW = modkernel32.NewProc("FindNextFileW")
procFindClose = modkernel32.NewProc("FindClose")
procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle")
+ procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx")
procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW")
procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW")
procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW")
@@ -105,13 +114,17 @@ var (
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
procCreateProcessW = modkernel32.NewProc("CreateProcessW")
procOpenProcess = modkernel32.NewProc("OpenProcess")
+ procShellExecuteW = modshell32.NewProc("ShellExecuteW")
+ procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath")
procTerminateProcess = modkernel32.NewProc("TerminateProcess")
procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess")
procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW")
procGetCurrentProcess = modkernel32.NewProc("GetCurrentProcess")
+ procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
procGetProcessTimes = modkernel32.NewProc("GetProcessTimes")
procDuplicateHandle = modkernel32.NewProc("DuplicateHandle")
procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject")
+ procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects")
procGetTempPathW = modkernel32.NewProc("GetTempPathW")
procCreatePipe = modkernel32.NewProc("CreatePipe")
procGetFileType = modkernel32.NewProc("GetFileType")
@@ -122,6 +135,9 @@ var (
procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW")
procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW")
procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW")
+ procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock")
+ procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock")
+ procGetTickCount64 = modkernel32.NewProc("GetTickCount64")
procSetFileTime = modkernel32.NewProc("SetFileTime")
procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW")
procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW")
@@ -169,6 +185,8 @@ var (
procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot")
procProcess32FirstW = modkernel32.NewProc("Process32FirstW")
procProcess32NextW = modkernel32.NewProc("Process32NextW")
+ procThread32First = modkernel32.NewProc("Thread32First")
+ procThread32Next = modkernel32.NewProc("Thread32Next")
procDeviceIoControl = modkernel32.NewProc("DeviceIoControl")
procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW")
procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW")
@@ -179,6 +197,18 @@ var (
procSetEvent = modkernel32.NewProc("SetEvent")
procResetEvent = modkernel32.NewProc("ResetEvent")
procPulseEvent = modkernel32.NewProc("PulseEvent")
+ procSleepEx = modkernel32.NewProc("SleepEx")
+ procCreateJobObjectW = modkernel32.NewProc("CreateJobObjectW")
+ procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject")
+ procTerminateJobObject = modkernel32.NewProc("TerminateJobObject")
+ procSetErrorMode = modkernel32.NewProc("SetErrorMode")
+ procResumeThread = modkernel32.NewProc("ResumeThread")
+ procSetPriorityClass = modkernel32.NewProc("SetPriorityClass")
+ procGetPriorityClass = modkernel32.NewProc("GetPriorityClass")
+ procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject")
+ procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent")
+ procGetProcessId = modkernel32.NewProc("GetProcessId")
+ procOpenThread = modkernel32.NewProc("OpenThread")
procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW")
procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW")
procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW")
@@ -198,6 +228,12 @@ var (
procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW")
procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW")
procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW")
+ procMessageBoxW = moduser32.NewProc("MessageBoxW")
+ procCLSIDFromString = modole32.NewProc("CLSIDFromString")
+ procStringFromGUID2 = modole32.NewProc("StringFromGUID2")
+ procCoCreateGuid = modole32.NewProc("CoCreateGuid")
+ procCoTaskMemFree = modole32.NewProc("CoTaskMemFree")
+ procRtlGetVersion = modntdll.NewProc("RtlGetVersion")
procWSAStartup = modws2_32.NewProc("WSAStartup")
procWSACleanup = modws2_32.NewProc("WSACleanup")
procWSAIoctl = modws2_32.NewProc("WSAIoctl")
@@ -245,12 +281,31 @@ var (
procGetLengthSid = modadvapi32.NewProc("GetLengthSid")
procCopySid = modadvapi32.NewProc("CopySid")
procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid")
+ procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid")
+ procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid")
procFreeSid = modadvapi32.NewProc("FreeSid")
procEqualSid = modadvapi32.NewProc("EqualSid")
+ procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority")
+ procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount")
+ procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority")
+ procIsValidSid = modadvapi32.NewProc("IsValidSid")
procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership")
procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken")
+ procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
+ procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
+ procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
+ procSetThreadToken = modadvapi32.NewProc("SetThreadToken")
+ procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
+ procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
+ procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups")
procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation")
+ procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation")
+ procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx")
procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW")
+ procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW")
+ procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken")
+ procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW")
+ procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory")
)
func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) {
@@ -377,6 +432,18 @@ func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) {
return
}
+func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) {
r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status)))
if r1 == 0 {
@@ -485,6 +552,14 @@ func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize
return
}
+func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) {
+ r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier)))
+ if r0 != 0 {
+ ret = syscall.Errno(r0)
+ }
+ return
+}
+
func GetLastError() (lasterr error) {
r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0)
if r0 != 0 {
@@ -606,7 +681,19 @@ func ExitProcess(exitcode uint32) {
return
}
-func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile int32) (handle Handle, err error) {
+func IsWow64Process(handle Handle, isWow64 *bool) (err error) {
+ r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(isWow64)), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
handle = Handle(r0)
if handle == InvalidHandle {
@@ -651,6 +738,24 @@ func WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped)
return
}
+func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) {
+ var _p0 uint32
+ if wait {
+ _p0 = 1
+ } else {
+ _p0 = 0
+ }
+ r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) {
r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0)
newlowoffset = uint32(r0)
@@ -750,6 +855,18 @@ func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (e
return
}
+func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) {
r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0)
n = uint32(r0)
@@ -973,14 +1090,14 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA
return
}
-func OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err error) {
+func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) {
var _p0 uint32
if inheritHandle {
_p0 = 1
} else {
_p0 = 0
}
- r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(da), uintptr(_p0), uintptr(pid))
+ r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId))
handle = Handle(r0)
if handle == 0 {
if e1 != 0 {
@@ -992,6 +1109,26 @@ func OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err
return
}
+func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd))
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) {
+ r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0)
+ if r0 != 0 {
+ ret = syscall.Errno(r0)
+ }
+ return
+}
+
func TerminateProcess(handle Handle, exitcode uint32) (err error) {
r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0)
if r1 == 0 {
@@ -1041,6 +1178,19 @@ func GetCurrentProcess() (pseudoHandle Handle, err error) {
return
}
+func GetCurrentThread() (pseudoHandle Handle, err error) {
+ r0, _, e1 := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
+ pseudoHandle = Handle(r0)
+ if pseudoHandle == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) {
r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0)
if r1 == 0 {
@@ -1084,6 +1234,25 @@ func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32,
return
}
+func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) {
+ var _p0 uint32
+ if waitAll {
+ _p0 = 1
+ } else {
+ _p0 = 0
+ }
+ r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0)
+ event = uint32(r0)
+ if event == 0xffffffff {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) {
r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0)
n = uint32(r0)
@@ -1208,6 +1377,42 @@ func SetEnvironmentVariable(name *uint16, value *uint16) (err error) {
return
}
+func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) {
+ var _p0 uint32
+ if inheritExisting {
+ _p0 = 1
+ } else {
+ _p0 = 0
+ }
+ r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0))
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func DestroyEnvironmentBlock(block *uint16) (err error) {
+ r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func getTickCount64() (ms uint64) {
+ r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0)
+ ms = uint64(r0)
+ return
+}
+
func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) {
r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0)
if r1 == 0 {
@@ -1650,7 +1855,7 @@ func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32
return
}
-func getCurrentProcessId() (pid uint32) {
+func GetCurrentProcessId() (pid uint32) {
r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0)
pid = uint32(r0)
return
@@ -1753,6 +1958,30 @@ func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) {
return
}
+func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) {
+ r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) {
+ r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) {
r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0)
if r1 == 0 {
@@ -1876,6 +2105,156 @@ func PulseEvent(event Handle) (err error) {
return
}
+func SleepEx(milliseconds uint32, alertable bool) (ret uint32) {
+ var _p0 uint32
+ if alertable {
+ _p0 = 1
+ } else {
+ _p0 = 0
+ }
+ r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0)
+ ret = uint32(r0)
+ return
+}
+
+func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) {
+ r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0)
+ handle = Handle(r0)
+ if handle == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func AssignProcessToJobObject(job Handle, process Handle) (err error) {
+ r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func TerminateJobObject(job Handle, exitCode uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func SetErrorMode(mode uint32) (ret uint32) {
+ r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0)
+ ret = uint32(r0)
+ return
+}
+
+func ResumeThread(thread Handle) (ret uint32, err error) {
+ r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0)
+ ret = uint32(r0)
+ if ret == 0xffffffff {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func SetPriorityClass(process Handle, priorityClass uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func GetPriorityClass(process Handle) (ret uint32, err error) {
+ r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0)
+ ret = uint32(r0)
+ if ret == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) {
+ r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0)
+ ret = int(r0)
+ if ret == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func GetProcessId(process Handle) (id uint32, err error) {
+ r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0)
+ id = uint32(r0)
+ if id == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) {
+ var _p0 uint32
+ if inheritHandle {
+ _p0 = 1
+ } else {
+ _p0 = 0
+ }
+ r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId))
+ handle = Handle(r0)
+ if handle == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) {
r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)))
if r1 == 0 {
@@ -2103,6 +2482,54 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro
return
}
+func MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) {
+ r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0)
+ ret = int32(r0)
+ if ret == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) {
+ r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0)
+ if r0 != 0 {
+ ret = syscall.Errno(r0)
+ }
+ return
+}
+
+func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) {
+ r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax))
+ chars = int32(r0)
+ return
+}
+
+func coCreateGuid(pguid *GUID) (ret error) {
+ r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0)
+ if r0 != 0 {
+ ret = syscall.Errno(r0)
+ }
+ return
+}
+
+func CoTaskMemFree(address unsafe.Pointer) {
+ syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0)
+ return
+}
+
+func rtlGetVersion(info *OsVersionInfoEx) (ret error) {
+ r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0)
+ if r0 != 0 {
+ ret = syscall.Errno(r0)
+ }
+ return
+}
+
func WSAStartup(verreq uint32, data *WSAData) (sockerr error) {
r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0)
if r0 != 0 {
@@ -2633,6 +3060,24 @@ func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, s
return
}
+func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) {
+ r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0)
+ isWellKnown = r0 != 0
+ return
+}
+
func FreeSid(sid *SID) (err error) {
r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
if r1 != 0 {
@@ -2651,6 +3096,30 @@ func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) {
return
}
+func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) {
+ r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0))
+ return
+}
+
+func getSidSubAuthorityCount(sid *SID) (count *uint8) {
+ r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ count = (*uint8)(unsafe.Pointer(r0))
+ return
+}
+
+func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) {
+ r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0)
+ subAuthority = (*uint32)(unsafe.Pointer(r0))
+ return
+}
+
+func isValidSid(sid *SID) (isValid bool) {
+ r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ isValid = r0 != 0
+ return
+}
+
func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) {
r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember)))
if r1 == 0 {
@@ -2663,8 +3132,38 @@ func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (
return
}
-func OpenProcessToken(h Handle, access uint32, token *Token) (err error) {
- r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(h), uintptr(access), uintptr(unsafe.Pointer(token)))
+func OpenProcessToken(process Handle, access uint32, token *Token) (err error) {
+ r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token)))
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) {
+ var _p0 uint32
+ if openAsSelf {
+ _p0 = 1
+ } else {
+ _p0 = 0
+ }
+ r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func ImpersonateSelf(impersonationlevel uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
@@ -2675,8 +3174,104 @@ func OpenProcessToken(h Handle, access uint32, token *Token) (err error) {
return
}
-func GetTokenInformation(t Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(t), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0)
+func RevertToSelf() (err error) {
+ r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func SetThreadToken(thread *Handle, token Token) (err error) {
+ r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) {
+ r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) {
+ var _p0 uint32
+ if disableAllPrivileges {
+ _p0 = 1
+ } else {
+ _p0 = 0
+ }
+ r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) {
+ var _p0 uint32
+ if resetToDefault {
+ _p0 = 1
+ } else {
+ _p0 = 0
+ }
+ r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) {
+ r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken)))
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
@@ -2698,3 +3293,45 @@ func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) {
}
return
}
+
+func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
+ r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0)
+ len = uint32(r0)
+ if len == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func WTSQueryUserToken(session uint32, token *Token) (err error) {
+ r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func WTSFreeMemory(ptr uintptr) {
+ syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0)
+ return
+}
diff --git a/vendor/golang.org/x/text/AUTHORS b/vendor/golang.org/x/text/AUTHORS
new file mode 100644
index 000000000..15167cd74
--- /dev/null
+++ b/vendor/golang.org/x/text/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/text/CONTRIBUTORS b/vendor/golang.org/x/text/CONTRIBUTORS
new file mode 100644
index 000000000..1c4577e96
--- /dev/null
+++ b/vendor/golang.org/x/text/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/text/README.md b/vendor/golang.org/x/text/README.md
deleted file mode 100644
index 61d5ee4d1..000000000
--- a/vendor/golang.org/x/text/README.md
+++ /dev/null
@@ -1,93 +0,0 @@
-# Go Text
-
-This repository holds supplementary Go libraries for text processing, many involving Unicode.
-
-## Semantic Versioning
-This repo uses Semantic versioning (http://semver.org/), so
-1. MAJOR version when you make incompatible API changes,
-1. MINOR version when you add functionality in a backwards-compatible manner,
- and
-1. PATCH version when you make backwards-compatible bug fixes.
-
-Until version 1.0.0 of x/text is reached, the minor version is considered a
-major version. So going from 0.1.0 to 0.2.0 is considered to be a major version
-bump.
-
-A major new CLDR version is mapped to a minor version increase in x/text.
-Any other new CLDR version is mapped to a patch version increase in x/text.
-
-It is important that the Unicode version used in `x/text` matches the one used
-by your Go compiler. The `x/text` repository supports multiple versions of
-Unicode and will match the version of Unicode to that of the Go compiler. At the
-moment this is supported for Go compilers from version 1.7.
-
-## Download/Install
-
-The easiest way to install is to run `go get -u golang.org/x/text`. You can
-also manually git clone the repository to `$GOPATH/src/golang.org/x/text`.
-
-## Contribute
-To submit changes to this repository, see http://golang.org/doc/contribute.html.
-
-To generate the tables in this repository (except for the encoding tables),
-run go generate from this directory. By default tables are generated for the
-Unicode version in core and the CLDR version defined in
-golang.org/x/text/unicode/cldr.
-
-Running go generate will as a side effect create a DATA subdirectory in this
-directory, which holds all files that are used as a source for generating the
-tables. This directory will also serve as a cache.
-
-## Testing
-Run
-
- go test ./...
-
-from this directory to run all tests. Add the "-tags icu" flag to also run
-ICU conformance tests (if available). This requires that you have the correct
-ICU version installed on your system.
-
-TODO:
-- updating unversioned source files.
-
-## Generating Tables
-
-To generate the tables in this repository (except for the encoding
-tables), run `go generate` from this directory. By default tables are
-generated for the Unicode version in core and the CLDR version defined in
-golang.org/x/text/unicode/cldr.
-
-Running go generate will as a side effect create a DATA subdirectory in this
-directory which holds all files that are used as a source for generating the
-tables. This directory will also serve as a cache.
-
-## Versions
-To update a Unicode version run
-
- UNICODE_VERSION=x.x.x go generate
-
-where `x.x.x` must correspond to a directory in https://www.unicode.org/Public/.
-If this version is newer than the version in core it will also update the
-relevant packages there. The idna package in x/net will always be updated.
-
-To update a CLDR version run
-
- CLDR_VERSION=version go generate
-
-where `version` must correspond to a directory in
-https://www.unicode.org/Public/cldr/.
-
-Note that the code gets adapted over time to changes in the data and that
-backwards compatibility is not maintained.
-So updating to a different version may not work.
-
-The files in DATA/{iana|icu|w3|whatwg} are currently not versioned.
-
-## Report Issues / Send Patches
-
-This repository uses Gerrit for code changes. To learn how to submit changes to
-this repository, see https://golang.org/doc/contribute.html.
-
-The main issue tracker for the image repository is located at
-https://github.com/golang/go/issues. Prefix your issue with "x/text:" in the
-subject line, so it is easy to find.
diff --git a/vendor/golang.org/x/text/encoding/charmap/maketables.go b/vendor/golang.org/x/text/encoding/charmap/maketables.go
new file mode 100644
index 000000000..f7941701e
--- /dev/null
+++ b/vendor/golang.org/x/text/encoding/charmap/maketables.go
@@ -0,0 +1,556 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "log"
+ "net/http"
+ "sort"
+ "strings"
+ "unicode/utf8"
+
+ "golang.org/x/text/encoding"
+ "golang.org/x/text/internal/gen"
+)
+
+const ascii = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" +
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+ ` !"#$%&'()*+,-./0123456789:;<=>?` +
+ `@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_` +
+ "`abcdefghijklmnopqrstuvwxyz{|}~\u007f"
+
+var encodings = []struct {
+ name string
+ mib string
+ comment string
+ varName string
+ replacement byte
+ mapping string
+}{
+ {
+ "IBM Code Page 037",
+ "IBM037",
+ "",
+ "CodePage037",
+ 0x3f,
+ "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM037-2.1.2.ucm",
+ },
+ {
+ "IBM Code Page 437",
+ "PC8CodePage437",
+ "",
+ "CodePage437",
+ encoding.ASCIISub,
+ "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM437-2.1.2.ucm",
+ },
+ {
+ "IBM Code Page 850",
+ "PC850Multilingual",
+ "",
+ "CodePage850",
+ encoding.ASCIISub,
+ "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM850-2.1.2.ucm",
+ },
+ {
+ "IBM Code Page 852",
+ "PCp852",
+ "",
+ "CodePage852",
+ encoding.ASCIISub,
+ "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM852-2.1.2.ucm",
+ },
+ {
+ "IBM Code Page 855",
+ "IBM855",
+ "",
+ "CodePage855",
+ encoding.ASCIISub,
+ "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM855-2.1.2.ucm",
+ },
+ {
+ "Windows Code Page 858", // PC latin1 with Euro
+ "IBM00858",
+ "",
+ "CodePage858",
+ encoding.ASCIISub,
+ "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/windows-858-2000.ucm",
+ },
+ {
+ "IBM Code Page 860",
+ "IBM860",
+ "",
+ "CodePage860",
+ encoding.ASCIISub,
+ "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM860-2.1.2.ucm",
+ },
+ {
+ "IBM Code Page 862",
+ "PC862LatinHebrew",
+ "",
+ "CodePage862",
+ encoding.ASCIISub,
+ "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM862-2.1.2.ucm",
+ },
+ {
+ "IBM Code Page 863",
+ "IBM863",
+ "",
+ "CodePage863",
+ encoding.ASCIISub,
+ "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM863-2.1.2.ucm",
+ },
+ {
+ "IBM Code Page 865",
+ "IBM865",
+ "",
+ "CodePage865",
+ encoding.ASCIISub,
+ "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM865-2.1.2.ucm",
+ },
+ {
+ "IBM Code Page 866",
+ "IBM866",
+ "",
+ "CodePage866",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-ibm866.txt",
+ },
+ {
+ "IBM Code Page 1047",
+ "IBM1047",
+ "",
+ "CodePage1047",
+ 0x3f,
+ "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM1047-2.1.2.ucm",
+ },
+ {
+ "IBM Code Page 1140",
+ "IBM01140",
+ "",
+ "CodePage1140",
+ 0x3f,
+ "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/ibm-1140_P100-1997.ucm",
+ },
+ {
+ "ISO 8859-1",
+ "ISOLatin1",
+ "",
+ "ISO8859_1",
+ encoding.ASCIISub,
+ "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_1-1998.ucm",
+ },
+ {
+ "ISO 8859-2",
+ "ISOLatin2",
+ "",
+ "ISO8859_2",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-iso-8859-2.txt",
+ },
+ {
+ "ISO 8859-3",
+ "ISOLatin3",
+ "",
+ "ISO8859_3",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-iso-8859-3.txt",
+ },
+ {
+ "ISO 8859-4",
+ "ISOLatin4",
+ "",
+ "ISO8859_4",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-iso-8859-4.txt",
+ },
+ {
+ "ISO 8859-5",
+ "ISOLatinCyrillic",
+ "",
+ "ISO8859_5",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-iso-8859-5.txt",
+ },
+ {
+ "ISO 8859-6",
+ "ISOLatinArabic",
+ "",
+ "ISO8859_6,ISO8859_6E,ISO8859_6I",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-iso-8859-6.txt",
+ },
+ {
+ "ISO 8859-7",
+ "ISOLatinGreek",
+ "",
+ "ISO8859_7",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-iso-8859-7.txt",
+ },
+ {
+ "ISO 8859-8",
+ "ISOLatinHebrew",
+ "",
+ "ISO8859_8,ISO8859_8E,ISO8859_8I",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-iso-8859-8.txt",
+ },
+ {
+ "ISO 8859-9",
+ "ISOLatin5",
+ "",
+ "ISO8859_9",
+ encoding.ASCIISub,
+ "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_9-1999.ucm",
+ },
+ {
+ "ISO 8859-10",
+ "ISOLatin6",
+ "",
+ "ISO8859_10",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-iso-8859-10.txt",
+ },
+ {
+ "ISO 8859-13",
+ "ISO885913",
+ "",
+ "ISO8859_13",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-iso-8859-13.txt",
+ },
+ {
+ "ISO 8859-14",
+ "ISO885914",
+ "",
+ "ISO8859_14",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-iso-8859-14.txt",
+ },
+ {
+ "ISO 8859-15",
+ "ISO885915",
+ "",
+ "ISO8859_15",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-iso-8859-15.txt",
+ },
+ {
+ "ISO 8859-16",
+ "ISO885916",
+ "",
+ "ISO8859_16",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-iso-8859-16.txt",
+ },
+ {
+ "KOI8-R",
+ "KOI8R",
+ "",
+ "KOI8R",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-koi8-r.txt",
+ },
+ {
+ "KOI8-U",
+ "KOI8U",
+ "",
+ "KOI8U",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-koi8-u.txt",
+ },
+ {
+ "Macintosh",
+ "Macintosh",
+ "",
+ "Macintosh",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-macintosh.txt",
+ },
+ {
+ "Macintosh Cyrillic",
+ "MacintoshCyrillic",
+ "",
+ "MacintoshCyrillic",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-x-mac-cyrillic.txt",
+ },
+ {
+ "Windows 874",
+ "Windows874",
+ "",
+ "Windows874",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-windows-874.txt",
+ },
+ {
+ "Windows 1250",
+ "Windows1250",
+ "",
+ "Windows1250",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-windows-1250.txt",
+ },
+ {
+ "Windows 1251",
+ "Windows1251",
+ "",
+ "Windows1251",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-windows-1251.txt",
+ },
+ {
+ "Windows 1252",
+ "Windows1252",
+ "",
+ "Windows1252",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-windows-1252.txt",
+ },
+ {
+ "Windows 1253",
+ "Windows1253",
+ "",
+ "Windows1253",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-windows-1253.txt",
+ },
+ {
+ "Windows 1254",
+ "Windows1254",
+ "",
+ "Windows1254",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-windows-1254.txt",
+ },
+ {
+ "Windows 1255",
+ "Windows1255",
+ "",
+ "Windows1255",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-windows-1255.txt",
+ },
+ {
+ "Windows 1256",
+ "Windows1256",
+ "",
+ "Windows1256",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-windows-1256.txt",
+ },
+ {
+ "Windows 1257",
+ "Windows1257",
+ "",
+ "Windows1257",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-windows-1257.txt",
+ },
+ {
+ "Windows 1258",
+ "Windows1258",
+ "",
+ "Windows1258",
+ encoding.ASCIISub,
+ "http://encoding.spec.whatwg.org/index-windows-1258.txt",
+ },
+ {
+ "X-User-Defined",
+ "XUserDefined",
+ "It is defined at http://encoding.spec.whatwg.org/#x-user-defined",
+ "XUserDefined",
+ encoding.ASCIISub,
+ ascii +
+ "\uf780\uf781\uf782\uf783\uf784\uf785\uf786\uf787" +
+ "\uf788\uf789\uf78a\uf78b\uf78c\uf78d\uf78e\uf78f" +
+ "\uf790\uf791\uf792\uf793\uf794\uf795\uf796\uf797" +
+ "\uf798\uf799\uf79a\uf79b\uf79c\uf79d\uf79e\uf79f" +
+ "\uf7a0\uf7a1\uf7a2\uf7a3\uf7a4\uf7a5\uf7a6\uf7a7" +
+ "\uf7a8\uf7a9\uf7aa\uf7ab\uf7ac\uf7ad\uf7ae\uf7af" +
+ "\uf7b0\uf7b1\uf7b2\uf7b3\uf7b4\uf7b5\uf7b6\uf7b7" +
+ "\uf7b8\uf7b9\uf7ba\uf7bb\uf7bc\uf7bd\uf7be\uf7bf" +
+ "\uf7c0\uf7c1\uf7c2\uf7c3\uf7c4\uf7c5\uf7c6\uf7c7" +
+ "\uf7c8\uf7c9\uf7ca\uf7cb\uf7cc\uf7cd\uf7ce\uf7cf" +
+ "\uf7d0\uf7d1\uf7d2\uf7d3\uf7d4\uf7d5\uf7d6\uf7d7" +
+ "\uf7d8\uf7d9\uf7da\uf7db\uf7dc\uf7dd\uf7de\uf7df" +
+ "\uf7e0\uf7e1\uf7e2\uf7e3\uf7e4\uf7e5\uf7e6\uf7e7" +
+ "\uf7e8\uf7e9\uf7ea\uf7eb\uf7ec\uf7ed\uf7ee\uf7ef" +
+ "\uf7f0\uf7f1\uf7f2\uf7f3\uf7f4\uf7f5\uf7f6\uf7f7" +
+ "\uf7f8\uf7f9\uf7fa\uf7fb\uf7fc\uf7fd\uf7fe\uf7ff",
+ },
+}
+
+func getWHATWG(url string) string {
+ res, err := http.Get(url)
+ if err != nil {
+ log.Fatalf("%q: Get: %v", url, err)
+ }
+ defer res.Body.Close()
+
+ mapping := make([]rune, 128)
+ for i := range mapping {
+ mapping[i] = '\ufffd'
+ }
+
+ scanner := bufio.NewScanner(res.Body)
+ for scanner.Scan() {
+ s := strings.TrimSpace(scanner.Text())
+ if s == "" || s[0] == '#' {
+ continue
+ }
+ x, y := 0, 0
+ if _, err := fmt.Sscanf(s, "%d\t0x%x", &x, &y); err != nil {
+ log.Fatalf("could not parse %q", s)
+ }
+ if x < 0 || 128 <= x {
+ log.Fatalf("code %d is out of range", x)
+ }
+ if 0x80 <= y && y < 0xa0 {
+ // We diverge from the WHATWG spec by mapping control characters
+ // in the range [0x80, 0xa0) to U+FFFD.
+ continue
+ }
+ mapping[x] = rune(y)
+ }
+ return ascii + string(mapping)
+}
+
+func getUCM(url string) string {
+ res, err := http.Get(url)
+ if err != nil {
+ log.Fatalf("%q: Get: %v", url, err)
+ }
+ defer res.Body.Close()
+
+ mapping := make([]rune, 256)
+ for i := range mapping {
+ mapping[i] = '\ufffd'
+ }
+
+ charsFound := 0
+ scanner := bufio.NewScanner(res.Body)
+ for scanner.Scan() {
+ s := strings.TrimSpace(scanner.Text())
+ if s == "" || s[0] == '#' {
+ continue
+ }
+ var c byte
+ var r rune
+ if _, err := fmt.Sscanf(s, `<U%x> \x%x |0`, &r, &c); err != nil {
+ continue
+ }
+ mapping[c] = r
+ charsFound++
+ }
+
+ if charsFound < 200 {
+ log.Fatalf("%q: only %d characters found (wrong page format?)", url, charsFound)
+ }
+
+ return string(mapping)
+}
+
+func main() {
+ mibs := map[string]bool{}
+ all := []string{}
+
+ w := gen.NewCodeWriter()
+ defer w.WriteGoFile("tables.go", "charmap")
+
+ printf := func(s string, a ...interface{}) { fmt.Fprintf(w, s, a...) }
+
+ printf("import (\n")
+ printf("\t\"golang.org/x/text/encoding\"\n")
+ printf("\t\"golang.org/x/text/encoding/internal/identifier\"\n")
+ printf(")\n\n")
+ for _, e := range encodings {
+ varNames := strings.Split(e.varName, ",")
+ all = append(all, varNames...)
+ varName := varNames[0]
+ switch {
+ case strings.HasPrefix(e.mapping, "http://encoding.spec.whatwg.org/"):
+ e.mapping = getWHATWG(e.mapping)
+ case strings.HasPrefix(e.mapping, "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/"):
+ e.mapping = getUCM(e.mapping)
+ }
+
+ asciiSuperset, low := strings.HasPrefix(e.mapping, ascii), 0x00
+ if asciiSuperset {
+ low = 0x80
+ }
+ lvn := 1
+ if strings.HasPrefix(varName, "ISO") || strings.HasPrefix(varName, "KOI") {
+ lvn = 3
+ }
+ lowerVarName := strings.ToLower(varName[:lvn]) + varName[lvn:]
+ printf("// %s is the %s encoding.\n", varName, e.name)
+ if e.comment != "" {
+ printf("//\n// %s\n", e.comment)
+ }
+ printf("var %s *Charmap = &%s\n\nvar %s = Charmap{\nname: %q,\n",
+ varName, lowerVarName, lowerVarName, e.name)
+ if mibs[e.mib] {
+ log.Fatalf("MIB type %q declared multiple times.", e.mib)
+ }
+ printf("mib: identifier.%s,\n", e.mib)
+ printf("asciiSuperset: %t,\n", asciiSuperset)
+ printf("low: 0x%02x,\n", low)
+ printf("replacement: 0x%02x,\n", e.replacement)
+
+ printf("decode: [256]utf8Enc{\n")
+ i, backMapping := 0, map[rune]byte{}
+ for _, c := range e.mapping {
+ if _, ok := backMapping[c]; !ok && c != utf8.RuneError {
+ backMapping[c] = byte(i)
+ }
+ var buf [8]byte
+ n := utf8.EncodeRune(buf[:], c)
+ if n > 3 {
+ panic(fmt.Sprintf("rune %q (%U) is too long", c, c))
+ }
+ printf("{%d,[3]byte{0x%02x,0x%02x,0x%02x}},", n, buf[0], buf[1], buf[2])
+ if i%2 == 1 {
+ printf("\n")
+ }
+ i++
+ }
+ printf("},\n")
+
+ printf("encode: [256]uint32{\n")
+ encode := make([]uint32, 0, 256)
+ for c, i := range backMapping {
+ encode = append(encode, uint32(i)<<24|uint32(c))
+ }
+ sort.Sort(byRune(encode))
+ for len(encode) < cap(encode) {
+ encode = append(encode, encode[len(encode)-1])
+ }
+ for i, enc := range encode {
+ printf("0x%08x,", enc)
+ if i%8 == 7 {
+ printf("\n")
+ }
+ }
+ printf("},\n}\n")
+
+ // Add an estimate of the size of a single Charmap{} struct value, which
+ // includes two 256 elem arrays of 4 bytes and some extra fields, which
+ // align to 3 uint64s on 64-bit architectures.
+ w.Size += 2*4*256 + 3*8
+ }
+ // TODO: add proper line breaking.
+ printf("var listAll = []encoding.Encoding{\n%s,\n}\n\n", strings.Join(all, ",\n"))
+}
+
+type byRune []uint32
+
+func (b byRune) Len() int { return len(b) }
+func (b byRune) Less(i, j int) bool { return b[i]&0xffffff < b[j]&0xffffff }
+func (b byRune) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
diff --git a/vendor/golang.org/x/text/encoding/htmlindex/gen.go b/vendor/golang.org/x/text/encoding/htmlindex/gen.go
new file mode 100644
index 000000000..ac6b4a77f
--- /dev/null
+++ b/vendor/golang.org/x/text/encoding/htmlindex/gen.go
@@ -0,0 +1,173 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "log"
+ "strings"
+
+ "golang.org/x/text/internal/gen"
+)
+
+type group struct {
+ Encodings []struct {
+ Labels []string
+ Name string
+ }
+}
+
+func main() {
+ gen.Init()
+
+ r := gen.Open("https://encoding.spec.whatwg.org", "whatwg", "encodings.json")
+ var groups []group
+ if err := json.NewDecoder(r).Decode(&groups); err != nil {
+ log.Fatalf("Error reading encodings.json: %v", err)
+ }
+
+ w := &bytes.Buffer{}
+ fmt.Fprintln(w, "type htmlEncoding byte")
+ fmt.Fprintln(w, "const (")
+ for i, g := range groups {
+ for _, e := range g.Encodings {
+ key := strings.ToLower(e.Name)
+ name := consts[key]
+ if name == "" {
+ log.Fatalf("No const defined for %s.", key)
+ }
+ if i == 0 {
+ fmt.Fprintf(w, "%s htmlEncoding = iota\n", name)
+ } else {
+ fmt.Fprintf(w, "%s\n", name)
+ }
+ }
+ }
+ fmt.Fprintln(w, "numEncodings")
+ fmt.Fprint(w, ")\n\n")
+
+ fmt.Fprintln(w, "var canonical = [numEncodings]string{")
+ for _, g := range groups {
+ for _, e := range g.Encodings {
+ fmt.Fprintf(w, "%q,\n", strings.ToLower(e.Name))
+ }
+ }
+ fmt.Fprint(w, "}\n\n")
+
+ fmt.Fprintln(w, "var nameMap = map[string]htmlEncoding{")
+ for _, g := range groups {
+ for _, e := range g.Encodings {
+ for _, l := range e.Labels {
+ key := strings.ToLower(e.Name)
+ name := consts[key]
+ fmt.Fprintf(w, "%q: %s,\n", l, name)
+ }
+ }
+ }
+ fmt.Fprint(w, "}\n\n")
+
+ var tags []string
+ fmt.Fprintln(w, "var localeMap = []htmlEncoding{")
+ for _, loc := range locales {
+ tags = append(tags, loc.tag)
+ fmt.Fprintf(w, "%s, // %s \n", consts[loc.name], loc.tag)
+ }
+ fmt.Fprint(w, "}\n\n")
+
+ fmt.Fprintf(w, "const locales = %q\n", strings.Join(tags, " "))
+
+ gen.WriteGoFile("tables.go", "htmlindex", w.Bytes())
+}
+
+// consts maps canonical encoding name to internal constant.
+var consts = map[string]string{
+ "utf-8": "utf8",
+ "ibm866": "ibm866",
+ "iso-8859-2": "iso8859_2",
+ "iso-8859-3": "iso8859_3",
+ "iso-8859-4": "iso8859_4",
+ "iso-8859-5": "iso8859_5",
+ "iso-8859-6": "iso8859_6",
+ "iso-8859-7": "iso8859_7",
+ "iso-8859-8": "iso8859_8",
+ "iso-8859-8-i": "iso8859_8I",
+ "iso-8859-10": "iso8859_10",
+ "iso-8859-13": "iso8859_13",
+ "iso-8859-14": "iso8859_14",
+ "iso-8859-15": "iso8859_15",
+ "iso-8859-16": "iso8859_16",
+ "koi8-r": "koi8r",
+ "koi8-u": "koi8u",
+ "macintosh": "macintosh",
+ "windows-874": "windows874",
+ "windows-1250": "windows1250",
+ "windows-1251": "windows1251",
+ "windows-1252": "windows1252",
+ "windows-1253": "windows1253",
+ "windows-1254": "windows1254",
+ "windows-1255": "windows1255",
+ "windows-1256": "windows1256",
+ "windows-1257": "windows1257",
+ "windows-1258": "windows1258",
+ "x-mac-cyrillic": "macintoshCyrillic",
+ "gbk": "gbk",
+ "gb18030": "gb18030",
+ // "hz-gb-2312": "hzgb2312", // Was removed from WhatWG
+ "big5": "big5",
+ "euc-jp": "eucjp",
+ "iso-2022-jp": "iso2022jp",
+ "shift_jis": "shiftJIS",
+ "euc-kr": "euckr",
+ "replacement": "replacement",
+ "utf-16be": "utf16be",
+ "utf-16le": "utf16le",
+ "x-user-defined": "xUserDefined",
+}
+
+// locales is taken from
+// https://html.spec.whatwg.org/multipage/syntax.html#encoding-sniffing-algorithm.
+var locales = []struct{ tag, name string }{
+ // The default value. Explicitly state latin to benefit from the exact
+ // script option, while still making 1252 the default encoding for languages
+ // written in Latin script.
+ {"und_Latn", "windows-1252"},
+ {"ar", "windows-1256"},
+ {"ba", "windows-1251"},
+ {"be", "windows-1251"},
+ {"bg", "windows-1251"},
+ {"cs", "windows-1250"},
+ {"el", "iso-8859-7"},
+ {"et", "windows-1257"},
+ {"fa", "windows-1256"},
+ {"he", "windows-1255"},
+ {"hr", "windows-1250"},
+ {"hu", "iso-8859-2"},
+ {"ja", "shift_jis"},
+ {"kk", "windows-1251"},
+ {"ko", "euc-kr"},
+ {"ku", "windows-1254"},
+ {"ky", "windows-1251"},
+ {"lt", "windows-1257"},
+ {"lv", "windows-1257"},
+ {"mk", "windows-1251"},
+ {"pl", "iso-8859-2"},
+ {"ru", "windows-1251"},
+ {"sah", "windows-1251"},
+ {"sk", "windows-1250"},
+ {"sl", "iso-8859-2"},
+ {"sr", "windows-1251"},
+ {"tg", "windows-1251"},
+ {"th", "windows-874"},
+ {"tr", "windows-1254"},
+ {"tt", "windows-1251"},
+ {"uk", "windows-1251"},
+ {"vi", "windows-1258"},
+ {"zh-hans", "gb18030"},
+ {"zh-hant", "big5"},
+}
diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go b/vendor/golang.org/x/text/encoding/internal/identifier/gen.go
new file mode 100644
index 000000000..26cfef9c6
--- /dev/null
+++ b/vendor/golang.org/x/text/encoding/internal/identifier/gen.go
@@ -0,0 +1,142 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "log"
+ "strings"
+
+ "golang.org/x/text/internal/gen"
+)
+
+type registry struct {
+ XMLName xml.Name `xml:"registry"`
+ Updated string `xml:"updated"`
+ Registry []struct {
+ ID string `xml:"id,attr"`
+ Record []struct {
+ Name string `xml:"name"`
+ Xref []struct {
+ Type string `xml:"type,attr"`
+ Data string `xml:"data,attr"`
+ } `xml:"xref"`
+ Desc struct {
+ Data string `xml:",innerxml"`
+ // Any []struct {
+ // Data string `xml:",chardata"`
+ // } `xml:",any"`
+ // Data string `xml:",chardata"`
+ } `xml:"description,"`
+ MIB string `xml:"value"`
+ Alias []string `xml:"alias"`
+ MIME string `xml:"preferred_alias"`
+ } `xml:"record"`
+ } `xml:"registry"`
+}
+
+func main() {
+ r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml")
+ reg := &registry{}
+ if err := xml.NewDecoder(r).Decode(&reg); err != nil && err != io.EOF {
+ log.Fatalf("Error decoding charset registry: %v", err)
+ }
+ if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" {
+ log.Fatalf("Unexpected ID %s", reg.Registry[0].ID)
+ }
+
+ w := &bytes.Buffer{}
+ fmt.Fprintf(w, "const (\n")
+ for _, rec := range reg.Registry[0].Record {
+ constName := ""
+ for _, a := range rec.Alias {
+ if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 {
+ // Some of the constant definitions have comments in them. Strip those.
+ constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0])
+ }
+ }
+ if constName == "" {
+ switch rec.MIB {
+ case "2085":
+ constName = "HZGB2312" // Not listed as alias for some reason.
+ default:
+ log.Fatalf("No cs alias defined for %s.", rec.MIB)
+ }
+ }
+ if rec.MIME != "" {
+ rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME)
+ }
+ fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME)
+ if len(rec.Desc.Data) > 0 {
+ fmt.Fprint(w, "// ")
+ d := xml.NewDecoder(strings.NewReader(rec.Desc.Data))
+ inElem := true
+ attr := ""
+ for {
+ t, err := d.Token()
+ if err != nil {
+ if err != io.EOF {
+ log.Fatal(err)
+ }
+ break
+ }
+ switch x := t.(type) {
+ case xml.CharData:
+ attr = "" // Don't need attribute info.
+ a := bytes.Split([]byte(x), []byte("\n"))
+ for i, b := range a {
+ if b = bytes.TrimSpace(b); len(b) != 0 {
+ if !inElem && i > 0 {
+ fmt.Fprint(w, "\n// ")
+ }
+ inElem = false
+ fmt.Fprintf(w, "%s ", string(b))
+ }
+ }
+ case xml.StartElement:
+ if x.Name.Local == "xref" {
+ inElem = true
+ use := false
+ for _, a := range x.Attr {
+ if a.Name.Local == "type" {
+ use = use || a.Value != "person"
+ }
+ if a.Name.Local == "data" && use {
+ // Patch up URLs to use https. From some links, the
+ // https version is different from the http one.
+ s := a.Value
+ s = strings.Replace(s, "http://", "https://", -1)
+ s = strings.Replace(s, "/unicode/", "/", -1)
+ attr = s + " "
+ }
+ }
+ }
+ case xml.EndElement:
+ inElem = false
+ fmt.Fprint(w, attr)
+ }
+ }
+ fmt.Fprint(w, "\n")
+ }
+ for _, x := range rec.Xref {
+ switch x.Type {
+ case "rfc":
+ fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data))
+ case "uri":
+ fmt.Fprintf(w, "// Reference: %s\n", x.Data)
+ }
+ }
+ fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB)
+ fmt.Fprintln(w)
+ }
+ fmt.Fprintln(w, ")")
+
+ gen.WriteGoFile("mib.go", "identifier", w.Bytes())
+}
diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/mib.go b/vendor/golang.org/x/text/encoding/internal/identifier/mib.go
index 8cc29021c..fc7df1bc7 100644
--- a/vendor/golang.org/x/text/encoding/internal/identifier/mib.go
+++ b/vendor/golang.org/x/text/encoding/internal/identifier/mib.go
@@ -538,8 +538,6 @@ const (
// ISO111ECMACyrillic is the MIB identifier with IANA name ECMA-cyrillic.
//
// ISO registry
- // (formerly ECMA
- // registry )
ISO111ECMACyrillic MIB = 77
// ISO121Canadian1 is the MIB identifier with IANA name CSA_Z243.4-1985-1.
@@ -732,18 +730,18 @@ const (
// ISO885913 is the MIB identifier with IANA name ISO-8859-13.
//
- // ISO See http://www.iana.org/assignments/charset-reg/ISO-8859-13 http://www.iana.org/assignments/charset-reg/ISO-8859-13
+ // ISO See https://www.iana.org/assignments/charset-reg/ISO-8859-13 https://www.iana.org/assignments/charset-reg/ISO-8859-13
ISO885913 MIB = 109
// ISO885914 is the MIB identifier with IANA name ISO-8859-14.
//
- // ISO See http://www.iana.org/assignments/charset-reg/ISO-8859-14
+ // ISO See https://www.iana.org/assignments/charset-reg/ISO-8859-14
ISO885914 MIB = 110
// ISO885915 is the MIB identifier with IANA name ISO-8859-15.
//
// ISO
- // Please see: http://www.iana.org/assignments/charset-reg/ISO-8859-15
+ // Please see: https://www.iana.org/assignments/charset-reg/ISO-8859-15
ISO885915 MIB = 111
// ISO885916 is the MIB identifier with IANA name ISO-8859-16.
@@ -754,41 +752,41 @@ const (
// GBK is the MIB identifier with IANA name GBK.
//
// Chinese IT Standardization Technical Committee
- // Please see: http://www.iana.org/assignments/charset-reg/GBK
+ // Please see: https://www.iana.org/assignments/charset-reg/GBK
GBK MIB = 113
// GB18030 is the MIB identifier with IANA name GB18030.
//
// Chinese IT Standardization Technical Committee
- // Please see: http://www.iana.org/assignments/charset-reg/GB18030
+ // Please see: https://www.iana.org/assignments/charset-reg/GB18030
GB18030 MIB = 114
// OSDEBCDICDF0415 is the MIB identifier with IANA name OSD_EBCDIC_DF04_15.
//
// Fujitsu-Siemens standard mainframe EBCDIC encoding
- // Please see: http://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-15
+ // Please see: https://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-15
OSDEBCDICDF0415 MIB = 115
// OSDEBCDICDF03IRV is the MIB identifier with IANA name OSD_EBCDIC_DF03_IRV.
//
// Fujitsu-Siemens standard mainframe EBCDIC encoding
- // Please see: http://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF03-IRV
+ // Please see: https://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF03-IRV
OSDEBCDICDF03IRV MIB = 116
// OSDEBCDICDF041 is the MIB identifier with IANA name OSD_EBCDIC_DF04_1.
//
// Fujitsu-Siemens standard mainframe EBCDIC encoding
- // Please see: http://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-1
+ // Please see: https://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-1
OSDEBCDICDF041 MIB = 117
// ISO115481 is the MIB identifier with IANA name ISO-11548-1.
//
- // See http://www.iana.org/assignments/charset-reg/ISO-11548-1
+ // See https://www.iana.org/assignments/charset-reg/ISO-11548-1
ISO115481 MIB = 118
// KZ1048 is the MIB identifier with IANA name KZ-1048.
//
- // See http://www.iana.org/assignments/charset-reg/KZ-1048
+ // See https://www.iana.org/assignments/charset-reg/KZ-1048
KZ1048 MIB = 119
// Unicode is the MIB identifier with IANA name ISO-10646-UCS-2.
@@ -855,7 +853,7 @@ const (
// SCSU is the MIB identifier with IANA name SCSU.
//
- // SCSU See http://www.iana.org/assignments/charset-reg/SCSU
+ // SCSU See https://www.iana.org/assignments/charset-reg/SCSU
SCSU MIB = 1011
// UTF7 is the MIB identifier with IANA name UTF-7.
@@ -884,22 +882,22 @@ const (
// CESU8 is the MIB identifier with IANA name CESU-8.
//
- // https://www.unicode.org/unicode/reports/tr26
+ // https://www.unicode.org/reports/tr26
CESU8 MIB = 1016
// UTF32 is the MIB identifier with IANA name UTF-32.
//
- // https://www.unicode.org/unicode/reports/tr19/
+ // https://www.unicode.org/reports/tr19/
UTF32 MIB = 1017
// UTF32BE is the MIB identifier with IANA name UTF-32BE.
//
- // https://www.unicode.org/unicode/reports/tr19/
+ // https://www.unicode.org/reports/tr19/
UTF32BE MIB = 1018
// UTF32LE is the MIB identifier with IANA name UTF-32LE.
//
- // https://www.unicode.org/unicode/reports/tr19/
+ // https://www.unicode.org/reports/tr19/
UTF32LE MIB = 1019
// BOCU1 is the MIB identifier with IANA name BOCU-1.
@@ -1461,152 +1459,152 @@ const (
// IBM00858 is the MIB identifier with IANA name IBM00858.
//
- // IBM See http://www.iana.org/assignments/charset-reg/IBM00858
+ // IBM See https://www.iana.org/assignments/charset-reg/IBM00858
IBM00858 MIB = 2089
// IBM00924 is the MIB identifier with IANA name IBM00924.
//
- // IBM See http://www.iana.org/assignments/charset-reg/IBM00924
+ // IBM See https://www.iana.org/assignments/charset-reg/IBM00924
IBM00924 MIB = 2090
// IBM01140 is the MIB identifier with IANA name IBM01140.
//
- // IBM See http://www.iana.org/assignments/charset-reg/IBM01140
+ // IBM See https://www.iana.org/assignments/charset-reg/IBM01140
IBM01140 MIB = 2091
// IBM01141 is the MIB identifier with IANA name IBM01141.
//
- // IBM See http://www.iana.org/assignments/charset-reg/IBM01141
+ // IBM See https://www.iana.org/assignments/charset-reg/IBM01141
IBM01141 MIB = 2092
// IBM01142 is the MIB identifier with IANA name IBM01142.
//
- // IBM See http://www.iana.org/assignments/charset-reg/IBM01142
+ // IBM See https://www.iana.org/assignments/charset-reg/IBM01142
IBM01142 MIB = 2093
// IBM01143 is the MIB identifier with IANA name IBM01143.
//
- // IBM See http://www.iana.org/assignments/charset-reg/IBM01143
+ // IBM See https://www.iana.org/assignments/charset-reg/IBM01143
IBM01143 MIB = 2094
// IBM01144 is the MIB identifier with IANA name IBM01144.
//
- // IBM See http://www.iana.org/assignments/charset-reg/IBM01144
+ // IBM See https://www.iana.org/assignments/charset-reg/IBM01144
IBM01144 MIB = 2095
// IBM01145 is the MIB identifier with IANA name IBM01145.
//
- // IBM See http://www.iana.org/assignments/charset-reg/IBM01145
+ // IBM See https://www.iana.org/assignments/charset-reg/IBM01145
IBM01145 MIB = 2096
// IBM01146 is the MIB identifier with IANA name IBM01146.
//
- // IBM See http://www.iana.org/assignments/charset-reg/IBM01146
+ // IBM See https://www.iana.org/assignments/charset-reg/IBM01146
IBM01146 MIB = 2097
// IBM01147 is the MIB identifier with IANA name IBM01147.
//
- // IBM See http://www.iana.org/assignments/charset-reg/IBM01147
+ // IBM See https://www.iana.org/assignments/charset-reg/IBM01147
IBM01147 MIB = 2098
// IBM01148 is the MIB identifier with IANA name IBM01148.
//
- // IBM See http://www.iana.org/assignments/charset-reg/IBM01148
+ // IBM See https://www.iana.org/assignments/charset-reg/IBM01148
IBM01148 MIB = 2099
// IBM01149 is the MIB identifier with IANA name IBM01149.
//
- // IBM See http://www.iana.org/assignments/charset-reg/IBM01149
+ // IBM See https://www.iana.org/assignments/charset-reg/IBM01149
IBM01149 MIB = 2100
// Big5HKSCS is the MIB identifier with IANA name Big5-HKSCS.
//
- // See http://www.iana.org/assignments/charset-reg/Big5-HKSCS
+ // See https://www.iana.org/assignments/charset-reg/Big5-HKSCS
Big5HKSCS MIB = 2101
// IBM1047 is the MIB identifier with IANA name IBM1047.
//
- // IBM1047 (EBCDIC Latin 1/Open Systems) http://www-1.ibm.com/servers/eserver/iseries/software/globalization/pdf/cp01047z.pdf
+ // IBM1047 (EBCDIC Latin 1/Open Systems) https://www-1.ibm.com/servers/eserver/iseries/software/globalization/pdf/cp01047z.pdf
IBM1047 MIB = 2102
// PTCP154 is the MIB identifier with IANA name PTCP154.
//
- // See http://www.iana.org/assignments/charset-reg/PTCP154
+ // See https://www.iana.org/assignments/charset-reg/PTCP154
PTCP154 MIB = 2103
// Amiga1251 is the MIB identifier with IANA name Amiga-1251.
//
- // See http://www.amiga.ultranet.ru/Amiga-1251.html
+ // See https://www.amiga.ultranet.ru/Amiga-1251.html
Amiga1251 MIB = 2104
// KOI7switched is the MIB identifier with IANA name KOI7-switched.
//
- // See http://www.iana.org/assignments/charset-reg/KOI7-switched
+ // See https://www.iana.org/assignments/charset-reg/KOI7-switched
KOI7switched MIB = 2105
// BRF is the MIB identifier with IANA name BRF.
//
- // See http://www.iana.org/assignments/charset-reg/BRF
+ // See https://www.iana.org/assignments/charset-reg/BRF
BRF MIB = 2106
// TSCII is the MIB identifier with IANA name TSCII.
//
- // See http://www.iana.org/assignments/charset-reg/TSCII
+ // See https://www.iana.org/assignments/charset-reg/TSCII
TSCII MIB = 2107
// CP51932 is the MIB identifier with IANA name CP51932.
//
- // See http://www.iana.org/assignments/charset-reg/CP51932
+ // See https://www.iana.org/assignments/charset-reg/CP51932
CP51932 MIB = 2108
// Windows874 is the MIB identifier with IANA name windows-874.
//
- // See http://www.iana.org/assignments/charset-reg/windows-874
+ // See https://www.iana.org/assignments/charset-reg/windows-874
Windows874 MIB = 2109
// Windows1250 is the MIB identifier with IANA name windows-1250.
//
- // Microsoft http://www.iana.org/assignments/charset-reg/windows-1250
+ // Microsoft https://www.iana.org/assignments/charset-reg/windows-1250
Windows1250 MIB = 2250
// Windows1251 is the MIB identifier with IANA name windows-1251.
//
- // Microsoft http://www.iana.org/assignments/charset-reg/windows-1251
+ // Microsoft https://www.iana.org/assignments/charset-reg/windows-1251
Windows1251 MIB = 2251
// Windows1252 is the MIB identifier with IANA name windows-1252.
//
- // Microsoft http://www.iana.org/assignments/charset-reg/windows-1252
+ // Microsoft https://www.iana.org/assignments/charset-reg/windows-1252
Windows1252 MIB = 2252
// Windows1253 is the MIB identifier with IANA name windows-1253.
//
- // Microsoft http://www.iana.org/assignments/charset-reg/windows-1253
+ // Microsoft https://www.iana.org/assignments/charset-reg/windows-1253
Windows1253 MIB = 2253
// Windows1254 is the MIB identifier with IANA name windows-1254.
//
- // Microsoft http://www.iana.org/assignments/charset-reg/windows-1254
+ // Microsoft https://www.iana.org/assignments/charset-reg/windows-1254
Windows1254 MIB = 2254
// Windows1255 is the MIB identifier with IANA name windows-1255.
//
- // Microsoft http://www.iana.org/assignments/charset-reg/windows-1255
+ // Microsoft https://www.iana.org/assignments/charset-reg/windows-1255
Windows1255 MIB = 2255
// Windows1256 is the MIB identifier with IANA name windows-1256.
//
- // Microsoft http://www.iana.org/assignments/charset-reg/windows-1256
+ // Microsoft https://www.iana.org/assignments/charset-reg/windows-1256
Windows1256 MIB = 2256
// Windows1257 is the MIB identifier with IANA name windows-1257.
//
- // Microsoft http://www.iana.org/assignments/charset-reg/windows-1257
+ // Microsoft https://www.iana.org/assignments/charset-reg/windows-1257
Windows1257 MIB = 2257
// Windows1258 is the MIB identifier with IANA name windows-1258.
//
- // Microsoft http://www.iana.org/assignments/charset-reg/windows-1258
+ // Microsoft https://www.iana.org/assignments/charset-reg/windows-1258
Windows1258 MIB = 2258
// TIS620 is the MIB identifier with IANA name TIS-620.
@@ -1616,6 +1614,6 @@ const (
// CP50220 is the MIB identifier with IANA name CP50220.
//
- // See http://www.iana.org/assignments/charset-reg/CP50220
+ // See https://www.iana.org/assignments/charset-reg/CP50220
CP50220 MIB = 2260
)
diff --git a/vendor/golang.org/x/text/encoding/japanese/maketables.go b/vendor/golang.org/x/text/encoding/japanese/maketables.go
new file mode 100644
index 000000000..023957a67
--- /dev/null
+++ b/vendor/golang.org/x/text/encoding/japanese/maketables.go
@@ -0,0 +1,161 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+// This program generates tables.go:
+// go run maketables.go | gofmt > tables.go
+
+// TODO: Emoji extensions?
+// https://www.unicode.org/faq/emoji_dingbats.html
+// https://www.unicode.org/Public/UNIDATA/EmojiSources.txt
+
+import (
+ "bufio"
+ "fmt"
+ "log"
+ "net/http"
+ "sort"
+ "strings"
+)
+
+type entry struct {
+ jisCode, table int
+}
+
+func main() {
+ fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
+ fmt.Printf("// Package japanese provides Japanese encodings such as EUC-JP and Shift JIS.\n")
+ fmt.Printf(`package japanese // import "golang.org/x/text/encoding/japanese"` + "\n\n")
+
+ reverse := [65536]entry{}
+ for i := range reverse {
+ reverse[i].table = -1
+ }
+
+ tables := []struct {
+ url string
+ name string
+ }{
+ {"http://encoding.spec.whatwg.org/index-jis0208.txt", "0208"},
+ {"http://encoding.spec.whatwg.org/index-jis0212.txt", "0212"},
+ }
+ for i, table := range tables {
+ res, err := http.Get(table.url)
+ if err != nil {
+ log.Fatalf("%q: Get: %v", table.url, err)
+ }
+ defer res.Body.Close()
+
+ mapping := [65536]uint16{}
+
+ scanner := bufio.NewScanner(res.Body)
+ for scanner.Scan() {
+ s := strings.TrimSpace(scanner.Text())
+ if s == "" || s[0] == '#' {
+ continue
+ }
+ x, y := 0, uint16(0)
+ if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
+ log.Fatalf("%q: could not parse %q", table.url, s)
+ }
+ if x < 0 || 120*94 <= x {
+ log.Fatalf("%q: JIS code %d is out of range", table.url, x)
+ }
+ mapping[x] = y
+ if reverse[y].table == -1 {
+ reverse[y] = entry{jisCode: x, table: i}
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ log.Fatalf("%q: scanner error: %v", table.url, err)
+ }
+
+ fmt.Printf("// jis%sDecode is the decoding table from JIS %s code to Unicode.\n// It is defined at %s\n",
+ table.name, table.name, table.url)
+ fmt.Printf("var jis%sDecode = [...]uint16{\n", table.name)
+ for i, m := range mapping {
+ if m != 0 {
+ fmt.Printf("\t%d: 0x%04X,\n", i, m)
+ }
+ }
+ fmt.Printf("}\n\n")
+ }
+
+ // Any run of at least separation continuous zero entries in the reverse map will
+ // be a separate encode table.
+ const separation = 1024
+
+ intervals := []interval(nil)
+ low, high := -1, -1
+ for i, v := range reverse {
+ if v.table == -1 {
+ continue
+ }
+ if low < 0 {
+ low = i
+ } else if i-high >= separation {
+ if high >= 0 {
+ intervals = append(intervals, interval{low, high})
+ }
+ low = i
+ }
+ high = i + 1
+ }
+ if high >= 0 {
+ intervals = append(intervals, interval{low, high})
+ }
+ sort.Sort(byDecreasingLength(intervals))
+
+ fmt.Printf("const (\n")
+ fmt.Printf("\tjis0208 = 1\n")
+ fmt.Printf("\tjis0212 = 2\n")
+ fmt.Printf("\tcodeMask = 0x7f\n")
+ fmt.Printf("\tcodeShift = 7\n")
+ fmt.Printf("\ttableShift = 14\n")
+ fmt.Printf(")\n\n")
+
+ fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
+ fmt.Printf("// encodeX are the encoding tables from Unicode to JIS code,\n")
+ fmt.Printf("// sorted by decreasing length.\n")
+ for i, v := range intervals {
+ fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high)
+ }
+ fmt.Printf("//\n")
+ fmt.Printf("// The high two bits of the value record whether the JIS code comes from the\n")
+ fmt.Printf("// JIS0208 table (high bits == 1) or the JIS0212 table (high bits == 2).\n")
+ fmt.Printf("// The low 14 bits are two 7-bit unsigned integers j1 and j2 that form the\n")
+ fmt.Printf("// JIS code (94*j1 + j2) within that table.\n")
+ fmt.Printf("\n")
+
+ for i, v := range intervals {
+ fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
+ fmt.Printf("var encode%d = [...]uint16{\n", i)
+ for j := v.low; j < v.high; j++ {
+ x := reverse[j]
+ if x.table == -1 {
+ continue
+ }
+ fmt.Printf("\t%d - %d: jis%s<<14 | 0x%02X<<7 | 0x%02X,\n",
+ j, v.low, tables[x.table].name, x.jisCode/94, x.jisCode%94)
+ }
+ fmt.Printf("}\n\n")
+ }
+}
+
+// interval is a half-open interval [low, high).
+type interval struct {
+ low, high int
+}
+
+func (i interval) len() int { return i.high - i.low }
+
+// byDecreasingLength sorts intervals by decreasing length.
+type byDecreasingLength []interval
+
+func (b byDecreasingLength) Len() int { return len(b) }
+func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
+func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
diff --git a/vendor/golang.org/x/text/encoding/korean/maketables.go b/vendor/golang.org/x/text/encoding/korean/maketables.go
new file mode 100644
index 000000000..c84034fb6
--- /dev/null
+++ b/vendor/golang.org/x/text/encoding/korean/maketables.go
@@ -0,0 +1,143 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+// This program generates tables.go:
+// go run maketables.go | gofmt > tables.go
+
+import (
+ "bufio"
+ "fmt"
+ "log"
+ "net/http"
+ "sort"
+ "strings"
+)
+
+func main() {
+ fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
+ fmt.Printf("// Package korean provides Korean encodings such as EUC-KR.\n")
+ fmt.Printf(`package korean // import "golang.org/x/text/encoding/korean"` + "\n\n")
+
+ res, err := http.Get("http://encoding.spec.whatwg.org/index-euc-kr.txt")
+ if err != nil {
+ log.Fatalf("Get: %v", err)
+ }
+ defer res.Body.Close()
+
+ mapping := [65536]uint16{}
+ reverse := [65536]uint16{}
+
+ scanner := bufio.NewScanner(res.Body)
+ for scanner.Scan() {
+ s := strings.TrimSpace(scanner.Text())
+ if s == "" || s[0] == '#' {
+ continue
+ }
+ x, y := uint16(0), uint16(0)
+ if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
+ log.Fatalf("could not parse %q", s)
+ }
+ if x < 0 || 178*(0xc7-0x81)+(0xfe-0xc7)*94+(0xff-0xa1) <= x {
+ log.Fatalf("EUC-KR code %d is out of range", x)
+ }
+ mapping[x] = y
+ if reverse[y] == 0 {
+ c0, c1 := uint16(0), uint16(0)
+ if x < 178*(0xc7-0x81) {
+ c0 = uint16(x/178) + 0x81
+ c1 = uint16(x % 178)
+ switch {
+ case c1 < 1*26:
+ c1 += 0x41
+ case c1 < 2*26:
+ c1 += 0x47
+ default:
+ c1 += 0x4d
+ }
+ } else {
+ x -= 178 * (0xc7 - 0x81)
+ c0 = uint16(x/94) + 0xc7
+ c1 = uint16(x%94) + 0xa1
+ }
+ reverse[y] = c0<<8 | c1
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ log.Fatalf("scanner error: %v", err)
+ }
+
+ fmt.Printf("// decode is the decoding table from EUC-KR code to Unicode.\n")
+ fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-euc-kr.txt\n")
+ fmt.Printf("var decode = [...]uint16{\n")
+ for i, v := range mapping {
+ if v != 0 {
+ fmt.Printf("\t%d: 0x%04X,\n", i, v)
+ }
+ }
+ fmt.Printf("}\n\n")
+
+ // Any run of at least separation continuous zero entries in the reverse map will
+ // be a separate encode table.
+ const separation = 1024
+
+ intervals := []interval(nil)
+ low, high := -1, -1
+ for i, v := range reverse {
+ if v == 0 {
+ continue
+ }
+ if low < 0 {
+ low = i
+ } else if i-high >= separation {
+ if high >= 0 {
+ intervals = append(intervals, interval{low, high})
+ }
+ low = i
+ }
+ high = i + 1
+ }
+ if high >= 0 {
+ intervals = append(intervals, interval{low, high})
+ }
+ sort.Sort(byDecreasingLength(intervals))
+
+ fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
+ fmt.Printf("// encodeX are the encoding tables from Unicode to EUC-KR code,\n")
+ fmt.Printf("// sorted by decreasing length.\n")
+ for i, v := range intervals {
+ fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high)
+ }
+ fmt.Printf("\n")
+
+ for i, v := range intervals {
+ fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
+ fmt.Printf("var encode%d = [...]uint16{\n", i)
+ for j := v.low; j < v.high; j++ {
+ x := reverse[j]
+ if x == 0 {
+ continue
+ }
+ fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x)
+ }
+ fmt.Printf("}\n\n")
+ }
+}
+
+// interval is a half-open interval [low, high).
+type interval struct {
+ low, high int
+}
+
+func (i interval) len() int { return i.high - i.low }
+
+// byDecreasingLength sorts intervals by decreasing length.
+type byDecreasingLength []interval
+
+func (b byDecreasingLength) Len() int { return len(b) }
+func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
+func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
diff --git a/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go b/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go
new file mode 100644
index 000000000..55016c786
--- /dev/null
+++ b/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go
@@ -0,0 +1,161 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+// This program generates tables.go:
+// go run maketables.go | gofmt > tables.go
+
+import (
+ "bufio"
+ "fmt"
+ "log"
+ "net/http"
+ "sort"
+ "strings"
+)
+
+func main() {
+ fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
+ fmt.Printf("// Package simplifiedchinese provides Simplified Chinese encodings such as GBK.\n")
+ fmt.Printf(`package simplifiedchinese // import "golang.org/x/text/encoding/simplifiedchinese"` + "\n\n")
+
+ printGB18030()
+ printGBK()
+}
+
+func printGB18030() {
+ res, err := http.Get("http://encoding.spec.whatwg.org/index-gb18030.txt")
+ if err != nil {
+ log.Fatalf("Get: %v", err)
+ }
+ defer res.Body.Close()
+
+ fmt.Printf("// gb18030 is the table from http://encoding.spec.whatwg.org/index-gb18030.txt\n")
+ fmt.Printf("var gb18030 = [...][2]uint16{\n")
+ scanner := bufio.NewScanner(res.Body)
+ for scanner.Scan() {
+ s := strings.TrimSpace(scanner.Text())
+ if s == "" || s[0] == '#' {
+ continue
+ }
+ x, y := uint32(0), uint32(0)
+ if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
+ log.Fatalf("could not parse %q", s)
+ }
+ if x < 0x10000 && y < 0x10000 {
+ fmt.Printf("\t{0x%04x, 0x%04x},\n", x, y)
+ }
+ }
+ fmt.Printf("}\n\n")
+}
+
+func printGBK() {
+ res, err := http.Get("http://encoding.spec.whatwg.org/index-gbk.txt")
+ if err != nil {
+ log.Fatalf("Get: %v", err)
+ }
+ defer res.Body.Close()
+
+ mapping := [65536]uint16{}
+ reverse := [65536]uint16{}
+
+ scanner := bufio.NewScanner(res.Body)
+ for scanner.Scan() {
+ s := strings.TrimSpace(scanner.Text())
+ if s == "" || s[0] == '#' {
+ continue
+ }
+ x, y := uint16(0), uint16(0)
+ if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
+ log.Fatalf("could not parse %q", s)
+ }
+ if x < 0 || 126*190 <= x {
+ log.Fatalf("GBK code %d is out of range", x)
+ }
+ mapping[x] = y
+ if reverse[y] == 0 {
+ c0, c1 := x/190, x%190
+ if c1 >= 0x3f {
+ c1++
+ }
+ reverse[y] = (0x81+c0)<<8 | (0x40 + c1)
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ log.Fatalf("scanner error: %v", err)
+ }
+
+ fmt.Printf("// decode is the decoding table from GBK code to Unicode.\n")
+ fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-gbk.txt\n")
+ fmt.Printf("var decode = [...]uint16{\n")
+ for i, v := range mapping {
+ if v != 0 {
+ fmt.Printf("\t%d: 0x%04X,\n", i, v)
+ }
+ }
+ fmt.Printf("}\n\n")
+
+ // Any run of at least separation continuous zero entries in the reverse map will
+ // be a separate encode table.
+ const separation = 1024
+
+ intervals := []interval(nil)
+ low, high := -1, -1
+ for i, v := range reverse {
+ if v == 0 {
+ continue
+ }
+ if low < 0 {
+ low = i
+ } else if i-high >= separation {
+ if high >= 0 {
+ intervals = append(intervals, interval{low, high})
+ }
+ low = i
+ }
+ high = i + 1
+ }
+ if high >= 0 {
+ intervals = append(intervals, interval{low, high})
+ }
+ sort.Sort(byDecreasingLength(intervals))
+
+ fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
+ fmt.Printf("// encodeX are the encoding tables from Unicode to GBK code,\n")
+ fmt.Printf("// sorted by decreasing length.\n")
+ for i, v := range intervals {
+ fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high)
+ }
+ fmt.Printf("\n")
+
+ for i, v := range intervals {
+ fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
+ fmt.Printf("var encode%d = [...]uint16{\n", i)
+ for j := v.low; j < v.high; j++ {
+ x := reverse[j]
+ if x == 0 {
+ continue
+ }
+ fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x)
+ }
+ fmt.Printf("}\n\n")
+ }
+}
+
+// interval is a half-open interval [low, high).
+type interval struct {
+ low, high int
+}
+
+func (i interval) len() int { return i.high - i.low }
+
+// byDecreasingLength sorts intervals by decreasing length.
+type byDecreasingLength []interval
+
+func (b byDecreasingLength) Len() int { return len(b) }
+func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
+func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
diff --git a/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go b/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go
new file mode 100644
index 000000000..cf7fdb31a
--- /dev/null
+++ b/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go
@@ -0,0 +1,140 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+// This program generates tables.go:
+// go run maketables.go | gofmt > tables.go
+
+import (
+ "bufio"
+ "fmt"
+ "log"
+ "net/http"
+ "sort"
+ "strings"
+)
+
+func main() {
+ fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
+ fmt.Printf("// Package traditionalchinese provides Traditional Chinese encodings such as Big5.\n")
+ fmt.Printf(`package traditionalchinese // import "golang.org/x/text/encoding/traditionalchinese"` + "\n\n")
+
+ res, err := http.Get("http://encoding.spec.whatwg.org/index-big5.txt")
+ if err != nil {
+ log.Fatalf("Get: %v", err)
+ }
+ defer res.Body.Close()
+
+ mapping := [65536]uint32{}
+ reverse := [65536 * 4]uint16{}
+
+ scanner := bufio.NewScanner(res.Body)
+ for scanner.Scan() {
+ s := strings.TrimSpace(scanner.Text())
+ if s == "" || s[0] == '#' {
+ continue
+ }
+ x, y := uint16(0), uint32(0)
+ if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
+ log.Fatalf("could not parse %q", s)
+ }
+ if x < 0 || 126*157 <= x {
+ log.Fatalf("Big5 code %d is out of range", x)
+ }
+ mapping[x] = y
+
+ // The WHATWG spec http://encoding.spec.whatwg.org/#indexes says that
+ // "The index pointer for code point in index is the first pointer
+ // corresponding to code point in index", which would normally mean
+ // that the code below should be guarded by "if reverse[y] == 0", but
+ // last instead of first seems to match the behavior of
+ // "iconv -f UTF-8 -t BIG5". For example, U+8005 者 occurs twice in
+ // http://encoding.spec.whatwg.org/index-big5.txt, as index 2148
+ // (encoded as "\x8e\xcd") and index 6543 (encoded as "\xaa\xcc")
+ // and "echo 者 | iconv -f UTF-8 -t BIG5 | xxd" gives "\xaa\xcc".
+ c0, c1 := x/157, x%157
+ if c1 < 0x3f {
+ c1 += 0x40
+ } else {
+ c1 += 0x62
+ }
+ reverse[y] = (0x81+c0)<<8 | c1
+ }
+ if err := scanner.Err(); err != nil {
+ log.Fatalf("scanner error: %v", err)
+ }
+
+ fmt.Printf("// decode is the decoding table from Big5 code to Unicode.\n")
+ fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-big5.txt\n")
+ fmt.Printf("var decode = [...]uint32{\n")
+ for i, v := range mapping {
+ if v != 0 {
+ fmt.Printf("\t%d: 0x%08X,\n", i, v)
+ }
+ }
+ fmt.Printf("}\n\n")
+
+ // Any run of at least separation continuous zero entries in the reverse map will
+ // be a separate encode table.
+ const separation = 1024
+
+ intervals := []interval(nil)
+ low, high := -1, -1
+ for i, v := range reverse {
+ if v == 0 {
+ continue
+ }
+ if low < 0 {
+ low = i
+ } else if i-high >= separation {
+ if high >= 0 {
+ intervals = append(intervals, interval{low, high})
+ }
+ low = i
+ }
+ high = i + 1
+ }
+ if high >= 0 {
+ intervals = append(intervals, interval{low, high})
+ }
+ sort.Sort(byDecreasingLength(intervals))
+
+ fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
+ fmt.Printf("// encodeX are the encoding tables from Unicode to Big5 code,\n")
+ fmt.Printf("// sorted by decreasing length.\n")
+ for i, v := range intervals {
+ fmt.Printf("// encode%d: %5d entries for runes in [%6d, %6d).\n", i, v.len(), v.low, v.high)
+ }
+ fmt.Printf("\n")
+
+ for i, v := range intervals {
+ fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
+ fmt.Printf("var encode%d = [...]uint16{\n", i)
+ for j := v.low; j < v.high; j++ {
+ x := reverse[j]
+ if x == 0 {
+ continue
+ }
+ fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x)
+ }
+ fmt.Printf("}\n\n")
+ }
+}
+
+// interval is a half-open interval [low, high).
+type interval struct {
+ low, high int
+}
+
+func (i interval) len() int { return i.high - i.low }
+
+// byDecreasingLength sorts intervals by decreasing length.
+type byDecreasingLength []interval
+
+func (b byDecreasingLength) Len() int { return len(b) }
+func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
+func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
diff --git a/vendor/golang.org/x/text/go.mod b/vendor/golang.org/x/text/go.mod
deleted file mode 100644
index 5eb1e8b16..000000000
--- a/vendor/golang.org/x/text/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module golang.org/x/text
-
-require golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e
diff --git a/vendor/golang.org/x/text/internal/language/compact/gen.go b/vendor/golang.org/x/text/internal/language/compact/gen.go
new file mode 100644
index 000000000..0c36a052f
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/language/compact/gen.go
@@ -0,0 +1,64 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// Language tag table generator.
+// Data read from the web.
+
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+
+ "golang.org/x/text/internal/gen"
+ "golang.org/x/text/unicode/cldr"
+)
+
+var (
+ test = flag.Bool("test",
+ false,
+ "test existing tables; can be used to compare web data with package data.")
+ outputFile = flag.String("output",
+ "tables.go",
+ "output file for generated tables")
+)
+
+func main() {
+ gen.Init()
+
+ w := gen.NewCodeWriter()
+ defer w.WriteGoFile("tables.go", "compact")
+
+ fmt.Fprintln(w, `import "golang.org/x/text/internal/language"`)
+
+ b := newBuilder(w)
+ gen.WriteCLDRVersion(w)
+
+ b.writeCompactIndex()
+}
+
+type builder struct {
+ w *gen.CodeWriter
+ data *cldr.CLDR
+ supp *cldr.SupplementalData
+}
+
+func newBuilder(w *gen.CodeWriter) *builder {
+ r := gen.OpenCLDRCoreZip()
+ defer r.Close()
+ d := &cldr.Decoder{}
+ data, err := d.DecodeZip(r)
+ if err != nil {
+ log.Fatal(err)
+ }
+ b := builder{
+ w: w,
+ data: data,
+ supp: data.Supplemental(),
+ }
+ return &b
+}
diff --git a/vendor/golang.org/x/text/internal/language/compact/gen_index.go b/vendor/golang.org/x/text/internal/language/compact/gen_index.go
new file mode 100644
index 000000000..136cefaf0
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/language/compact/gen_index.go
@@ -0,0 +1,113 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+// This file generates derivative tables based on the language package itself.
+
+import (
+ "fmt"
+ "log"
+ "sort"
+ "strings"
+
+ "golang.org/x/text/internal/language"
+)
+
+// Compact indices:
+// Note -va-X variants only apply to localization variants.
+// BCP variants only ever apply to language.
+// The only ambiguity between tags is with regions.
+
+func (b *builder) writeCompactIndex() {
+ // Collect all language tags for which we have any data in CLDR.
+ m := map[language.Tag]bool{}
+ for _, lang := range b.data.Locales() {
+ // We include all locales unconditionally to be consistent with en_US.
+ // We want en_US, even though it has no data associated with it.
+
+ // TODO: put any of the languages for which no data exists at the end
+ // of the index. This allows all components based on ICU to use that
+ // as the cutoff point.
+ // if x := data.RawLDML(lang); false ||
+ // x.LocaleDisplayNames != nil ||
+ // x.Characters != nil ||
+ // x.Delimiters != nil ||
+ // x.Measurement != nil ||
+ // x.Dates != nil ||
+ // x.Numbers != nil ||
+ // x.Units != nil ||
+ // x.ListPatterns != nil ||
+ // x.Collations != nil ||
+ // x.Segmentations != nil ||
+ // x.Rbnf != nil ||
+ // x.Annotations != nil ||
+ // x.Metadata != nil {
+
+ // TODO: support POSIX natively, albeit non-standard.
+ tag := language.Make(strings.Replace(lang, "_POSIX", "-u-va-posix", 1))
+ m[tag] = true
+ // }
+ }
+
+ // TODO: plural rules are also defined for the deprecated tags:
+ // iw mo sh tl
+ // Consider removing these as compact tags.
+
+ // Include locales for plural rules, which uses a different structure.
+ for _, plurals := range b.supp.Plurals {
+ for _, rules := range plurals.PluralRules {
+ for _, lang := range strings.Split(rules.Locales, " ") {
+ m[language.Make(lang)] = true
+ }
+ }
+ }
+
+ var coreTags []language.CompactCoreInfo
+ var special []string
+
+ for t := range m {
+ if x := t.Extensions(); len(x) != 0 && fmt.Sprint(x) != "[u-va-posix]" {
+ log.Fatalf("Unexpected extension %v in %v", x, t)
+ }
+ if len(t.Variants()) == 0 && len(t.Extensions()) == 0 {
+ cci, ok := language.GetCompactCore(t)
+ if !ok {
+ log.Fatalf("Locale for non-basic language %q", t)
+ }
+ coreTags = append(coreTags, cci)
+ } else {
+ special = append(special, t.String())
+ }
+ }
+
+ w := b.w
+
+ sort.Slice(coreTags, func(i, j int) bool { return coreTags[i] < coreTags[j] })
+ sort.Strings(special)
+
+ w.WriteComment(`
+ NumCompactTags is the number of common tags. The maximum tag is
+ NumCompactTags-1.`)
+ w.WriteConst("NumCompactTags", len(m))
+
+ fmt.Fprintln(w, "const (")
+ for i, t := range coreTags {
+ fmt.Fprintf(w, "%s ID = %d\n", ident(t.Tag().String()), i)
+ }
+ for i, t := range special {
+ fmt.Fprintf(w, "%s ID = %d\n", ident(t), i+len(coreTags))
+ }
+ fmt.Fprintln(w, ")")
+
+ w.WriteVar("coreTags", coreTags)
+
+ w.WriteConst("specialTagsStr", strings.Join(special, " "))
+}
+
+func ident(s string) string {
+ return strings.Replace(s, "-", "", -1) + "Index"
+}
diff --git a/vendor/golang.org/x/text/internal/language/compact/gen_parents.go b/vendor/golang.org/x/text/internal/language/compact/gen_parents.go
new file mode 100644
index 000000000..9543d5832
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/language/compact/gen_parents.go
@@ -0,0 +1,54 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import (
+ "log"
+
+ "golang.org/x/text/internal/gen"
+ "golang.org/x/text/internal/language"
+ "golang.org/x/text/internal/language/compact"
+ "golang.org/x/text/unicode/cldr"
+)
+
+func main() {
+ r := gen.OpenCLDRCoreZip()
+ defer r.Close()
+
+ d := &cldr.Decoder{}
+ data, err := d.DecodeZip(r)
+ if err != nil {
+ log.Fatalf("DecodeZip: %v", err)
+ }
+
+ w := gen.NewCodeWriter()
+ defer w.WriteGoFile("parents.go", "compact")
+
+ // Create parents table.
+ type ID uint16
+ parents := make([]ID, compact.NumCompactTags)
+ for _, loc := range data.Locales() {
+ tag := language.MustParse(loc)
+ index, ok := compact.FromTag(tag)
+ if !ok {
+ continue
+ }
+ parentIndex := compact.ID(0) // und
+ for p := tag.Parent(); p != language.Und; p = p.Parent() {
+ if x, ok := compact.FromTag(p); ok {
+ parentIndex = x
+ break
+ }
+ }
+ parents[index] = ID(parentIndex)
+ }
+
+ w.WriteComment(`
+ parents maps a compact index of a tag to the compact index of the parent of
+ this tag.`)
+ w.WriteVar("parents", parents)
+}
diff --git a/vendor/golang.org/x/text/internal/language/gen.go b/vendor/golang.org/x/text/internal/language/gen.go
new file mode 100644
index 000000000..cdcc7febc
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/language/gen.go
@@ -0,0 +1,1520 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// Language tag table generator.
+// Data read from the web.
+
+package main
+
+import (
+ "bufio"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "math"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+
+ "golang.org/x/text/internal/gen"
+ "golang.org/x/text/internal/tag"
+ "golang.org/x/text/unicode/cldr"
+)
+
+var (
+ test = flag.Bool("test",
+ false,
+ "test existing tables; can be used to compare web data with package data.")
+ outputFile = flag.String("output",
+ "tables.go",
+ "output file for generated tables")
+)
+
+var comment = []string{
+ `
+lang holds an alphabetically sorted list of ISO-639 language identifiers.
+All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag.
+For 2-byte language identifiers, the two successive bytes have the following meaning:
+ - if the first letter of the 2- and 3-letter ISO codes are the same:
+ the second and third letter of the 3-letter ISO code.
+ - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3.
+For 3-byte language identifiers the 4th byte is 0.`,
+ `
+langNoIndex is a bit vector of all 3-letter language codes that are not used as an index
+in lookup tables. The language ids for these language codes are derived directly
+from the letters and are not consecutive.`,
+ `
+altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives
+to 2-letter language codes that cannot be derived using the method described above.
+Each 3-letter code is followed by its 1-byte langID.`,
+ `
+altLangIndex is used to convert indexes in altLangISO3 to langIDs.`,
+ `
+AliasMap maps langIDs to their suggested replacements.`,
+ `
+script is an alphabetically sorted list of ISO 15924 codes. The index
+of the script in the string, divided by 4, is the internal scriptID.`,
+ `
+isoRegionOffset needs to be added to the index of regionISO to obtain the regionID
+for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for
+the UN.M49 codes used for groups.)`,
+ `
+regionISO holds a list of alphabetically sorted 2-letter ISO region codes.
+Each 2-letter codes is followed by two bytes with the following meaning:
+ - [A-Z}{2}: the first letter of the 2-letter code plus these two
+ letters form the 3-letter ISO code.
+ - 0, n: index into altRegionISO3.`,
+ `
+regionTypes defines the status of a region for various standards.`,
+ `
+m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are
+codes indicating collections of regions.`,
+ `
+m49Index gives indexes into fromM49 based on the three most significant bits
+of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in
+ fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]]
+for an entry where the first 7 bits match the 7 lsb of the UN.M49 code.
+The region code is stored in the 9 lsb of the indexed value.`,
+ `
+fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details.`,
+ `
+altRegionISO3 holds a list of 3-letter region codes that cannot be
+mapped to 2-letter codes using the default algorithm. This is a short list.`,
+ `
+altRegionIDs holds a list of regionIDs the positions of which match those
+of the 3-letter ISO codes in altRegionISO3.`,
+ `
+variantNumSpecialized is the number of specialized variants in variants.`,
+ `
+suppressScript is an index from langID to the dominant script for that language,
+if it exists. If a script is given, it should be suppressed from the language tag.`,
+ `
+likelyLang is a lookup table, indexed by langID, for the most likely
+scripts and regions given incomplete information. If more entries exist for a
+given language, region and script are the index and size respectively
+of the list in likelyLangList.`,
+ `
+likelyLangList holds lists info associated with likelyLang.`,
+ `
+likelyRegion is a lookup table, indexed by regionID, for the most likely
+languages and scripts given incomplete information. If more entries exist
+for a given regionID, lang and script are the index and size respectively
+of the list in likelyRegionList.
+TODO: exclude containers and user-definable regions from the list.`,
+ `
+likelyRegionList holds lists info associated with likelyRegion.`,
+ `
+likelyScript is a lookup table, indexed by scriptID, for the most likely
+languages and regions given a script.`,
+ `
+nRegionGroups is the number of region groups.`,
+ `
+regionInclusion maps region identifiers to sets of regions in regionInclusionBits,
+where each set holds all groupings that are directly connected in a region
+containment graph.`,
+ `
+regionInclusionBits is an array of bit vectors where every vector represents
+a set of region groupings. These sets are used to compute the distance
+between two regions for the purpose of language matching.`,
+ `
+regionInclusionNext marks, for each entry in regionInclusionBits, the set of
+all groups that are reachable from the groups set in the respective entry.`,
+}
+
+// TODO: consider changing some of these structures to tries. This can reduce
+// memory, but may increase the need for memory allocations. This could be
+// mitigated if we can piggyback on language tags for common cases.
+
+func failOnError(e error) {
+ if e != nil {
+ log.Panic(e)
+ }
+}
+
+type setType int
+
+const (
+ Indexed setType = 1 + iota // all elements must be of same size
+ Linear
+)
+
+type stringSet struct {
+ s []string
+ sorted, frozen bool
+
+ // We often need to update values after the creation of an index is completed.
+ // We include a convenience map for keeping track of this.
+ update map[string]string
+ typ setType // used for checking.
+}
+
+func (ss *stringSet) clone() stringSet {
+ c := *ss
+ c.s = append([]string(nil), c.s...)
+ return c
+}
+
+func (ss *stringSet) setType(t setType) {
+ if ss.typ != t && ss.typ != 0 {
+ log.Panicf("type %d cannot be assigned as it was already %d", t, ss.typ)
+ }
+}
+
+// parse parses a whitespace-separated string and initializes ss with its
+// components.
+func (ss *stringSet) parse(s string) {
+ scan := bufio.NewScanner(strings.NewReader(s))
+ scan.Split(bufio.ScanWords)
+ for scan.Scan() {
+ ss.add(scan.Text())
+ }
+}
+
+func (ss *stringSet) assertChangeable() {
+ if ss.frozen {
+ log.Panic("attempt to modify a frozen stringSet")
+ }
+}
+
+func (ss *stringSet) add(s string) {
+ ss.assertChangeable()
+ ss.s = append(ss.s, s)
+ ss.sorted = ss.frozen
+}
+
+func (ss *stringSet) freeze() {
+ ss.compact()
+ ss.frozen = true
+}
+
+func (ss *stringSet) compact() {
+ if ss.sorted {
+ return
+ }
+ a := ss.s
+ sort.Strings(a)
+ k := 0
+ for i := 1; i < len(a); i++ {
+ if a[k] != a[i] {
+ a[k+1] = a[i]
+ k++
+ }
+ }
+ ss.s = a[:k+1]
+ ss.sorted = ss.frozen
+}
+
+type funcSorter struct {
+ fn func(a, b string) bool
+ sort.StringSlice
+}
+
+func (s funcSorter) Less(i, j int) bool {
+ return s.fn(s.StringSlice[i], s.StringSlice[j])
+}
+
+func (ss *stringSet) sortFunc(f func(a, b string) bool) {
+ ss.compact()
+ sort.Sort(funcSorter{f, sort.StringSlice(ss.s)})
+}
+
+func (ss *stringSet) remove(s string) {
+ ss.assertChangeable()
+ if i, ok := ss.find(s); ok {
+ copy(ss.s[i:], ss.s[i+1:])
+ ss.s = ss.s[:len(ss.s)-1]
+ }
+}
+
+func (ss *stringSet) replace(ol, nu string) {
+ ss.s[ss.index(ol)] = nu
+ ss.sorted = ss.frozen
+}
+
+func (ss *stringSet) index(s string) int {
+ ss.setType(Indexed)
+ i, ok := ss.find(s)
+ if !ok {
+ if i < len(ss.s) {
+ log.Panicf("find: item %q is not in list. Closest match is %q.", s, ss.s[i])
+ }
+ log.Panicf("find: item %q is not in list", s)
+
+ }
+ return i
+}
+
+func (ss *stringSet) find(s string) (int, bool) {
+ ss.compact()
+ i := sort.SearchStrings(ss.s, s)
+ return i, i != len(ss.s) && ss.s[i] == s
+}
+
+func (ss *stringSet) slice() []string {
+ ss.compact()
+ return ss.s
+}
+
+func (ss *stringSet) updateLater(v, key string) {
+ if ss.update == nil {
+ ss.update = map[string]string{}
+ }
+ ss.update[v] = key
+}
+
+// join joins the string and ensures that all entries are of the same length.
+func (ss *stringSet) join() string {
+ ss.setType(Indexed)
+ n := len(ss.s[0])
+ for _, s := range ss.s {
+ if len(s) != n {
+ log.Panicf("join: not all entries are of the same length: %q", s)
+ }
+ }
+ ss.s = append(ss.s, strings.Repeat("\xff", n))
+ return strings.Join(ss.s, "")
+}
+
+// ianaEntry holds information for an entry in the IANA Language Subtag Repository.
+// All types use the same entry.
+// See http://tools.ietf.org/html/bcp47#section-5.1 for a description of the various
+// fields.
+type ianaEntry struct {
+ typ string
+ description []string
+ scope string
+ added string
+ preferred string
+ deprecated string
+ suppressScript string
+ macro string
+ prefix []string
+}
+
+type builder struct {
+ w *gen.CodeWriter
+ hw io.Writer // MultiWriter for w and w.Hash
+ data *cldr.CLDR
+ supp *cldr.SupplementalData
+
+ // indices
+ locale stringSet // common locales
+ lang stringSet // canonical language ids (2 or 3 letter ISO codes) with data
+ langNoIndex stringSet // 3-letter ISO codes with no associated data
+ script stringSet // 4-letter ISO codes
+ region stringSet // 2-letter ISO or 3-digit UN M49 codes
+ variant stringSet // 4-8-alphanumeric variant code.
+
+ // Region codes that are groups with their corresponding group IDs.
+ groups map[int]index
+
+ // langInfo
+ registry map[string]*ianaEntry
+}
+
+type index uint
+
+func newBuilder(w *gen.CodeWriter) *builder {
+ r := gen.OpenCLDRCoreZip()
+ defer r.Close()
+ d := &cldr.Decoder{}
+ data, err := d.DecodeZip(r)
+ failOnError(err)
+ b := builder{
+ w: w,
+ hw: io.MultiWriter(w, w.Hash),
+ data: data,
+ supp: data.Supplemental(),
+ }
+ b.parseRegistry()
+ return &b
+}
+
+func (b *builder) parseRegistry() {
+ r := gen.OpenIANAFile("assignments/language-subtag-registry")
+ defer r.Close()
+ b.registry = make(map[string]*ianaEntry)
+
+ scan := bufio.NewScanner(r)
+ scan.Split(bufio.ScanWords)
+ var record *ianaEntry
+ for more := scan.Scan(); more; {
+ key := scan.Text()
+ more = scan.Scan()
+ value := scan.Text()
+ switch key {
+ case "Type:":
+ record = &ianaEntry{typ: value}
+ case "Subtag:", "Tag:":
+ if s := strings.SplitN(value, "..", 2); len(s) > 1 {
+ for a := s[0]; a <= s[1]; a = inc(a) {
+ b.addToRegistry(a, record)
+ }
+ } else {
+ b.addToRegistry(value, record)
+ }
+ case "Suppress-Script:":
+ record.suppressScript = value
+ case "Added:":
+ record.added = value
+ case "Deprecated:":
+ record.deprecated = value
+ case "Macrolanguage:":
+ record.macro = value
+ case "Preferred-Value:":
+ record.preferred = value
+ case "Prefix:":
+ record.prefix = append(record.prefix, value)
+ case "Scope:":
+ record.scope = value
+ case "Description:":
+ buf := []byte(value)
+ for more = scan.Scan(); more; more = scan.Scan() {
+ b := scan.Bytes()
+ if b[0] == '%' || b[len(b)-1] == ':' {
+ break
+ }
+ buf = append(buf, ' ')
+ buf = append(buf, b...)
+ }
+ record.description = append(record.description, string(buf))
+ continue
+ default:
+ continue
+ }
+ more = scan.Scan()
+ }
+ if scan.Err() != nil {
+ log.Panic(scan.Err())
+ }
+}
+
+func (b *builder) addToRegistry(key string, entry *ianaEntry) {
+ if info, ok := b.registry[key]; ok {
+ if info.typ != "language" || entry.typ != "extlang" {
+ log.Fatalf("parseRegistry: tag %q already exists", key)
+ }
+ } else {
+ b.registry[key] = entry
+ }
+}
+
+var commentIndex = make(map[string]string)
+
+func init() {
+ for _, s := range comment {
+ key := strings.TrimSpace(strings.SplitN(s, " ", 2)[0])
+ commentIndex[key] = s
+ }
+}
+
+func (b *builder) comment(name string) {
+ if s := commentIndex[name]; len(s) > 0 {
+ b.w.WriteComment(s)
+ } else {
+ fmt.Fprintln(b.w)
+ }
+}
+
+func (b *builder) pf(f string, x ...interface{}) {
+ fmt.Fprintf(b.hw, f, x...)
+ fmt.Fprint(b.hw, "\n")
+}
+
+func (b *builder) p(x ...interface{}) {
+ fmt.Fprintln(b.hw, x...)
+}
+
+func (b *builder) addSize(s int) {
+ b.w.Size += s
+ b.pf("// Size: %d bytes", s)
+}
+
+func (b *builder) writeConst(name string, x interface{}) {
+ b.comment(name)
+ b.w.WriteConst(name, x)
+}
+
+// writeConsts computes f(v) for all v in values and writes the results
+// as constants named _v to a single constant block.
+func (b *builder) writeConsts(f func(string) int, values ...string) {
+ b.pf("const (")
+ for _, v := range values {
+ b.pf("\t_%s = %v", v, f(v))
+ }
+ b.pf(")")
+}
+
+// writeType writes the type of the given value, which must be a struct.
+func (b *builder) writeType(value interface{}) {
+ b.comment(reflect.TypeOf(value).Name())
+ b.w.WriteType(value)
+}
+
+func (b *builder) writeSlice(name string, ss interface{}) {
+ b.writeSliceAddSize(name, 0, ss)
+}
+
+func (b *builder) writeSliceAddSize(name string, extraSize int, ss interface{}) {
+ b.comment(name)
+ b.w.Size += extraSize
+ v := reflect.ValueOf(ss)
+ t := v.Type().Elem()
+ b.pf("// Size: %d bytes, %d elements", v.Len()*int(t.Size())+extraSize, v.Len())
+
+ fmt.Fprintf(b.w, "var %s = ", name)
+ b.w.WriteArray(ss)
+ b.p()
+}
+
+type FromTo struct {
+ From, To uint16
+}
+
+func (b *builder) writeSortedMap(name string, ss *stringSet, index func(s string) uint16) {
+ ss.sortFunc(func(a, b string) bool {
+ return index(a) < index(b)
+ })
+ m := []FromTo{}
+ for _, s := range ss.s {
+ m = append(m, FromTo{index(s), index(ss.update[s])})
+ }
+ b.writeSlice(name, m)
+}
+
+const base = 'z' - 'a' + 1
+
+func strToInt(s string) uint {
+ v := uint(0)
+ for i := 0; i < len(s); i++ {
+ v *= base
+ v += uint(s[i] - 'a')
+ }
+ return v
+}
+
+// converts the given integer to the original ASCII string passed to strToInt.
+// len(s) must match the number of characters obtained.
+func intToStr(v uint, s []byte) {
+ for i := len(s) - 1; i >= 0; i-- {
+ s[i] = byte(v%base) + 'a'
+ v /= base
+ }
+}
+
+func (b *builder) writeBitVector(name string, ss []string) {
+ vec := make([]uint8, int(math.Ceil(math.Pow(base, float64(len(ss[0])))/8)))
+ for _, s := range ss {
+ v := strToInt(s)
+ vec[v/8] |= 1 << (v % 8)
+ }
+ b.writeSlice(name, vec)
+}
+
+// TODO: convert this type into a list or two-stage trie.
+func (b *builder) writeMapFunc(name string, m map[string]string, f func(string) uint16) {
+ b.comment(name)
+ v := reflect.ValueOf(m)
+ sz := v.Len() * (2 + int(v.Type().Key().Size()))
+ for _, k := range m {
+ sz += len(k)
+ }
+ b.addSize(sz)
+ keys := []string{}
+ b.pf(`var %s = map[string]uint16{`, name)
+ for k := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ b.pf("\t%q: %v,", k, f(m[k]))
+ }
+ b.p("}")
+}
+
+func (b *builder) writeMap(name string, m interface{}) {
+ b.comment(name)
+ v := reflect.ValueOf(m)
+ sz := v.Len() * (2 + int(v.Type().Key().Size()) + int(v.Type().Elem().Size()))
+ b.addSize(sz)
+ f := strings.FieldsFunc(fmt.Sprintf("%#v", m), func(r rune) bool {
+ return strings.IndexRune("{}, ", r) != -1
+ })
+ sort.Strings(f[1:])
+ b.pf(`var %s = %s{`, name, f[0])
+ for _, kv := range f[1:] {
+ b.pf("\t%s,", kv)
+ }
+ b.p("}")
+}
+
+func (b *builder) langIndex(s string) uint16 {
+ if s == "und" {
+ return 0
+ }
+ if i, ok := b.lang.find(s); ok {
+ return uint16(i)
+ }
+ return uint16(strToInt(s)) + uint16(len(b.lang.s))
+}
+
+// inc advances the string to its lexicographical successor.
+func inc(s string) string {
+ const maxTagLength = 4
+ var buf [maxTagLength]byte
+ intToStr(strToInt(strings.ToLower(s))+1, buf[:len(s)])
+ for i := 0; i < len(s); i++ {
+ if s[i] <= 'Z' {
+ buf[i] -= 'a' - 'A'
+ }
+ }
+ return string(buf[:len(s)])
+}
+
+func (b *builder) parseIndices() {
+ meta := b.supp.Metadata
+
+ for k, v := range b.registry {
+ var ss *stringSet
+ switch v.typ {
+ case "language":
+ if len(k) == 2 || v.suppressScript != "" || v.scope == "special" {
+ b.lang.add(k)
+ continue
+ } else {
+ ss = &b.langNoIndex
+ }
+ case "region":
+ ss = &b.region
+ case "script":
+ ss = &b.script
+ case "variant":
+ ss = &b.variant
+ default:
+ continue
+ }
+ ss.add(k)
+ }
+ // Include any language for which there is data.
+ for _, lang := range b.data.Locales() {
+ if x := b.data.RawLDML(lang); false ||
+ x.LocaleDisplayNames != nil ||
+ x.Characters != nil ||
+ x.Delimiters != nil ||
+ x.Measurement != nil ||
+ x.Dates != nil ||
+ x.Numbers != nil ||
+ x.Units != nil ||
+ x.ListPatterns != nil ||
+ x.Collations != nil ||
+ x.Segmentations != nil ||
+ x.Rbnf != nil ||
+ x.Annotations != nil ||
+ x.Metadata != nil {
+
+ from := strings.Split(lang, "_")
+ if lang := from[0]; lang != "root" {
+ b.lang.add(lang)
+ }
+ }
+ }
+ // Include locales for plural rules, which uses a different structure.
+ for _, plurals := range b.data.Supplemental().Plurals {
+ for _, rules := range plurals.PluralRules {
+ for _, lang := range strings.Split(rules.Locales, " ") {
+ if lang = strings.Split(lang, "_")[0]; lang != "root" {
+ b.lang.add(lang)
+ }
+ }
+ }
+ }
+ // Include languages in likely subtags.
+ for _, m := range b.supp.LikelySubtags.LikelySubtag {
+ from := strings.Split(m.From, "_")
+ b.lang.add(from[0])
+ }
+ // Include ISO-639 alpha-3 bibliographic entries.
+ for _, a := range meta.Alias.LanguageAlias {
+ if a.Reason == "bibliographic" {
+ b.langNoIndex.add(a.Type)
+ }
+ }
+ // Include regions in territoryAlias (not all are in the IANA registry!)
+ for _, reg := range b.supp.Metadata.Alias.TerritoryAlias {
+ if len(reg.Type) == 2 {
+ b.region.add(reg.Type)
+ }
+ }
+
+ for _, s := range b.lang.s {
+ if len(s) == 3 {
+ b.langNoIndex.remove(s)
+ }
+ }
+ b.writeConst("NumLanguages", len(b.lang.slice())+len(b.langNoIndex.slice()))
+ b.writeConst("NumScripts", len(b.script.slice()))
+ b.writeConst("NumRegions", len(b.region.slice()))
+
+ // Add dummy codes at the start of each list to represent "unspecified".
+ b.lang.add("---")
+ b.script.add("----")
+ b.region.add("---")
+
+ // common locales
+ b.locale.parse(meta.DefaultContent.Locales)
+}
+
+// TODO: region inclusion data will probably not be use used in future matchers.
+
+func (b *builder) computeRegionGroups() {
+ b.groups = make(map[int]index)
+
+ // Create group indices.
+ for i := 1; b.region.s[i][0] < 'A'; i++ { // Base M49 indices on regionID.
+ b.groups[i] = index(len(b.groups))
+ }
+ for _, g := range b.supp.TerritoryContainment.Group {
+ // Skip UN and EURO zone as they are flattening the containment
+ // relationship.
+ if g.Type == "EZ" || g.Type == "UN" {
+ continue
+ }
+ group := b.region.index(g.Type)
+ if _, ok := b.groups[group]; !ok {
+ b.groups[group] = index(len(b.groups))
+ }
+ }
+ if len(b.groups) > 64 {
+ log.Fatalf("only 64 groups supported, found %d", len(b.groups))
+ }
+ b.writeConst("nRegionGroups", len(b.groups))
+}
+
+var langConsts = []string{
+ "af", "am", "ar", "az", "bg", "bn", "ca", "cs", "da", "de", "el", "en", "es",
+ "et", "fa", "fi", "fil", "fr", "gu", "he", "hi", "hr", "hu", "hy", "id", "is",
+ "it", "ja", "ka", "kk", "km", "kn", "ko", "ky", "lo", "lt", "lv", "mk", "ml",
+ "mn", "mo", "mr", "ms", "mul", "my", "nb", "ne", "nl", "no", "pa", "pl", "pt",
+ "ro", "ru", "sh", "si", "sk", "sl", "sq", "sr", "sv", "sw", "ta", "te", "th",
+ "tl", "tn", "tr", "uk", "ur", "uz", "vi", "zh", "zu",
+
+ // constants for grandfathered tags (if not already defined)
+ "jbo", "ami", "bnn", "hak", "tlh", "lb", "nv", "pwn", "tao", "tay", "tsu",
+ "nn", "sfb", "vgt", "sgg", "cmn", "nan", "hsn",
+}
+
+// writeLanguage generates all tables needed for language canonicalization.
+func (b *builder) writeLanguage() {
+ meta := b.supp.Metadata
+
+ b.writeConst("nonCanonicalUnd", b.lang.index("und"))
+ b.writeConsts(func(s string) int { return int(b.langIndex(s)) }, langConsts...)
+ b.writeConst("langPrivateStart", b.langIndex("qaa"))
+ b.writeConst("langPrivateEnd", b.langIndex("qtz"))
+
+ // Get language codes that need to be mapped (overlong 3-letter codes,
+ // deprecated 2-letter codes, legacy and grandfathered tags.)
+ langAliasMap := stringSet{}
+ aliasTypeMap := map[string]AliasType{}
+
+ // altLangISO3 get the alternative ISO3 names that need to be mapped.
+ altLangISO3 := stringSet{}
+ // Add dummy start to avoid the use of index 0.
+ altLangISO3.add("---")
+ altLangISO3.updateLater("---", "aa")
+
+ lang := b.lang.clone()
+ for _, a := range meta.Alias.LanguageAlias {
+ if a.Replacement == "" {
+ a.Replacement = "und"
+ }
+ // TODO: support mapping to tags
+ repl := strings.SplitN(a.Replacement, "_", 2)[0]
+ if a.Reason == "overlong" {
+ if len(a.Replacement) == 2 && len(a.Type) == 3 {
+ lang.updateLater(a.Replacement, a.Type)
+ }
+ } else if len(a.Type) <= 3 {
+ switch a.Reason {
+ case "macrolanguage":
+ aliasTypeMap[a.Type] = Macro
+ case "deprecated":
+ // handled elsewhere
+ continue
+ case "bibliographic", "legacy":
+ if a.Type == "no" {
+ continue
+ }
+ aliasTypeMap[a.Type] = Legacy
+ default:
+ log.Fatalf("new %s alias: %s", a.Reason, a.Type)
+ }
+ langAliasMap.add(a.Type)
+ langAliasMap.updateLater(a.Type, repl)
+ }
+ }
+ // Manually add the mapping of "nb" (Norwegian) to its macro language.
+ // This can be removed if CLDR adopts this change.
+ langAliasMap.add("nb")
+ langAliasMap.updateLater("nb", "no")
+ aliasTypeMap["nb"] = Macro
+
+ for k, v := range b.registry {
+ // Also add deprecated values for 3-letter ISO codes, which CLDR omits.
+ if v.typ == "language" && v.deprecated != "" && v.preferred != "" {
+ langAliasMap.add(k)
+ langAliasMap.updateLater(k, v.preferred)
+ aliasTypeMap[k] = Deprecated
+ }
+ }
+ // Fix CLDR mappings.
+ lang.updateLater("tl", "tgl")
+ lang.updateLater("sh", "hbs")
+ lang.updateLater("mo", "mol")
+ lang.updateLater("no", "nor")
+ lang.updateLater("tw", "twi")
+ lang.updateLater("nb", "nob")
+ lang.updateLater("ak", "aka")
+ lang.updateLater("bh", "bih")
+
+ // Ensure that each 2-letter code is matched with a 3-letter code.
+ for _, v := range lang.s[1:] {
+ s, ok := lang.update[v]
+ if !ok {
+ if s, ok = lang.update[langAliasMap.update[v]]; !ok {
+ continue
+ }
+ lang.update[v] = s
+ }
+ if v[0] != s[0] {
+ altLangISO3.add(s)
+ altLangISO3.updateLater(s, v)
+ }
+ }
+
+ // Complete canonicalized language tags.
+ lang.freeze()
+ for i, v := range lang.s {
+ // We can avoid these manual entries by using the IANA registry directly.
+ // Seems easier to update the list manually, as changes are rare.
+ // The panic in this loop will trigger if we miss an entry.
+ add := ""
+ if s, ok := lang.update[v]; ok {
+ if s[0] == v[0] {
+ add = s[1:]
+ } else {
+ add = string([]byte{0, byte(altLangISO3.index(s))})
+ }
+ } else if len(v) == 3 {
+ add = "\x00"
+ } else {
+ log.Panicf("no data for long form of %q", v)
+ }
+ lang.s[i] += add
+ }
+ b.writeConst("lang", tag.Index(lang.join()))
+
+ b.writeConst("langNoIndexOffset", len(b.lang.s))
+
+ // space of all valid 3-letter language identifiers.
+ b.writeBitVector("langNoIndex", b.langNoIndex.slice())
+
+ altLangIndex := []uint16{}
+ for i, s := range altLangISO3.slice() {
+ altLangISO3.s[i] += string([]byte{byte(len(altLangIndex))})
+ if i > 0 {
+ idx := b.lang.index(altLangISO3.update[s])
+ altLangIndex = append(altLangIndex, uint16(idx))
+ }
+ }
+ b.writeConst("altLangISO3", tag.Index(altLangISO3.join()))
+ b.writeSlice("altLangIndex", altLangIndex)
+
+ b.writeSortedMap("AliasMap", &langAliasMap, b.langIndex)
+ types := make([]AliasType, len(langAliasMap.s))
+ for i, s := range langAliasMap.s {
+ types[i] = aliasTypeMap[s]
+ }
+ b.writeSlice("AliasTypes", types)
+}
+
+var scriptConsts = []string{
+ "Latn", "Hani", "Hans", "Hant", "Qaaa", "Qaai", "Qabx", "Zinh", "Zyyy",
+ "Zzzz",
+}
+
+func (b *builder) writeScript() {
+ b.writeConsts(b.script.index, scriptConsts...)
+ b.writeConst("script", tag.Index(b.script.join()))
+
+ supp := make([]uint8, len(b.lang.slice()))
+ for i, v := range b.lang.slice()[1:] {
+ if sc := b.registry[v].suppressScript; sc != "" {
+ supp[i+1] = uint8(b.script.index(sc))
+ }
+ }
+ b.writeSlice("suppressScript", supp)
+
+ // There is only one deprecated script in CLDR. This value is hard-coded.
+ // We check here if the code must be updated.
+ for _, a := range b.supp.Metadata.Alias.ScriptAlias {
+ if a.Type != "Qaai" {
+ log.Panicf("unexpected deprecated stript %q", a.Type)
+ }
+ }
+}
+
+func parseM49(s string) int16 {
+ if len(s) == 0 {
+ return 0
+ }
+ v, err := strconv.ParseUint(s, 10, 10)
+ failOnError(err)
+ return int16(v)
+}
+
+var regionConsts = []string{
+ "001", "419", "BR", "CA", "ES", "GB", "MD", "PT", "UK", "US",
+ "ZZ", "XA", "XC", "XK", // Unofficial tag for Kosovo.
+}
+
+func (b *builder) writeRegion() {
+ b.writeConsts(b.region.index, regionConsts...)
+
+ isoOffset := b.region.index("AA")
+ m49map := make([]int16, len(b.region.slice()))
+ fromM49map := make(map[int16]int)
+ altRegionISO3 := ""
+ altRegionIDs := []uint16{}
+
+ b.writeConst("isoRegionOffset", isoOffset)
+
+ // 2-letter region lookup and mapping to numeric codes.
+ regionISO := b.region.clone()
+ regionISO.s = regionISO.s[isoOffset:]
+ regionISO.sorted = false
+
+ regionTypes := make([]byte, len(b.region.s))
+
+ // Is the region valid BCP 47?
+ for s, e := range b.registry {
+ if len(s) == 2 && s == strings.ToUpper(s) {
+ i := b.region.index(s)
+ for _, d := range e.description {
+ if strings.Contains(d, "Private use") {
+ regionTypes[i] = iso3166UserAssigned
+ }
+ }
+ regionTypes[i] |= bcp47Region
+ }
+ }
+
+ // Is the region a valid ccTLD?
+ r := gen.OpenIANAFile("domains/root/db")
+ defer r.Close()
+
+ buf, err := ioutil.ReadAll(r)
+ failOnError(err)
+ re := regexp.MustCompile(`"/domains/root/db/([a-z]{2}).html"`)
+ for _, m := range re.FindAllSubmatch(buf, -1) {
+ i := b.region.index(strings.ToUpper(string(m[1])))
+ regionTypes[i] |= ccTLD
+ }
+
+ b.writeSlice("regionTypes", regionTypes)
+
+ iso3Set := make(map[string]int)
+ update := func(iso2, iso3 string) {
+ i := regionISO.index(iso2)
+ if j, ok := iso3Set[iso3]; !ok && iso3[0] == iso2[0] {
+ regionISO.s[i] += iso3[1:]
+ iso3Set[iso3] = -1
+ } else {
+ if ok && j >= 0 {
+ regionISO.s[i] += string([]byte{0, byte(j)})
+ } else {
+ iso3Set[iso3] = len(altRegionISO3)
+ regionISO.s[i] += string([]byte{0, byte(len(altRegionISO3))})
+ altRegionISO3 += iso3
+ altRegionIDs = append(altRegionIDs, uint16(isoOffset+i))
+ }
+ }
+ }
+ for _, tc := range b.supp.CodeMappings.TerritoryCodes {
+ i := regionISO.index(tc.Type) + isoOffset
+ if d := m49map[i]; d != 0 {
+ log.Panicf("%s found as a duplicate UN.M49 code of %03d", tc.Numeric, d)
+ }
+ m49 := parseM49(tc.Numeric)
+ m49map[i] = m49
+ if r := fromM49map[m49]; r == 0 {
+ fromM49map[m49] = i
+ } else if r != i {
+ dep := b.registry[regionISO.s[r-isoOffset]].deprecated
+ if t := b.registry[tc.Type]; t != nil && dep != "" && (t.deprecated == "" || t.deprecated > dep) {
+ fromM49map[m49] = i
+ }
+ }
+ }
+ for _, ta := range b.supp.Metadata.Alias.TerritoryAlias {
+ if len(ta.Type) == 3 && ta.Type[0] <= '9' && len(ta.Replacement) == 2 {
+ from := parseM49(ta.Type)
+ if r := fromM49map[from]; r == 0 {
+ fromM49map[from] = regionISO.index(ta.Replacement) + isoOffset
+ }
+ }
+ }
+ for _, tc := range b.supp.CodeMappings.TerritoryCodes {
+ if len(tc.Alpha3) == 3 {
+ update(tc.Type, tc.Alpha3)
+ }
+ }
+ // This entries are not included in territoryCodes. Mostly 3-letter variants
+ // of deleted codes and an entry for QU.
+ for _, m := range []struct{ iso2, iso3 string }{
+ {"CT", "CTE"},
+ {"DY", "DHY"},
+ {"HV", "HVO"},
+ {"JT", "JTN"},
+ {"MI", "MID"},
+ {"NH", "NHB"},
+ {"NQ", "ATN"},
+ {"PC", "PCI"},
+ {"PU", "PUS"},
+ {"PZ", "PCZ"},
+ {"RH", "RHO"},
+ {"VD", "VDR"},
+ {"WK", "WAK"},
+ // These three-letter codes are used for others as well.
+ {"FQ", "ATF"},
+ } {
+ update(m.iso2, m.iso3)
+ }
+ for i, s := range regionISO.s {
+ if len(s) != 4 {
+ regionISO.s[i] = s + " "
+ }
+ }
+ b.writeConst("regionISO", tag.Index(regionISO.join()))
+ b.writeConst("altRegionISO3", altRegionISO3)
+ b.writeSlice("altRegionIDs", altRegionIDs)
+
+ // Create list of deprecated regions.
+ // TODO: consider inserting SF -> FI. Not included by CLDR, but is the only
+ // Transitionally-reserved mapping not included.
+ regionOldMap := stringSet{}
+ // Include regions in territoryAlias (not all are in the IANA registry!)
+ for _, reg := range b.supp.Metadata.Alias.TerritoryAlias {
+ if len(reg.Type) == 2 && reg.Reason == "deprecated" && len(reg.Replacement) == 2 {
+ regionOldMap.add(reg.Type)
+ regionOldMap.updateLater(reg.Type, reg.Replacement)
+ i, _ := regionISO.find(reg.Type)
+ j, _ := regionISO.find(reg.Replacement)
+ if k := m49map[i+isoOffset]; k == 0 {
+ m49map[i+isoOffset] = m49map[j+isoOffset]
+ }
+ }
+ }
+ b.writeSortedMap("regionOldMap", &regionOldMap, func(s string) uint16 {
+ return uint16(b.region.index(s))
+ })
+ // 3-digit region lookup, groupings.
+ for i := 1; i < isoOffset; i++ {
+ m := parseM49(b.region.s[i])
+ m49map[i] = m
+ fromM49map[m] = i
+ }
+ b.writeSlice("m49", m49map)
+
+ const (
+ searchBits = 7
+ regionBits = 9
+ )
+ if len(m49map) >= 1<<regionBits {
+ log.Fatalf("Maximum number of regions exceeded: %d > %d", len(m49map), 1<<regionBits)
+ }
+ m49Index := [9]int16{}
+ fromM49 := []uint16{}
+ m49 := []int{}
+ for k, _ := range fromM49map {
+ m49 = append(m49, int(k))
+ }
+ sort.Ints(m49)
+ for _, k := range m49[1:] {
+ val := (k & (1<<searchBits - 1)) << regionBits
+ fromM49 = append(fromM49, uint16(val|fromM49map[int16(k)]))
+ m49Index[1:][k>>searchBits] = int16(len(fromM49))
+ }
+ b.writeSlice("m49Index", m49Index)
+ b.writeSlice("fromM49", fromM49)
+}
+
+const (
+ // TODO: put these lists in regionTypes as user data? Could be used for
+ // various optimizations and refinements and could be exposed in the API.
+ iso3166Except = "AC CP DG EA EU FX IC SU TA UK"
+ iso3166Trans = "AN BU CS NT TP YU ZR" // SF is not in our set of Regions.
+ // DY and RH are actually not deleted, but indeterminately reserved.
+ iso3166DelCLDR = "CT DD DY FQ HV JT MI NH NQ PC PU PZ RH VD WK YD"
+)
+
+const (
+ iso3166UserAssigned = 1 << iota
+ ccTLD
+ bcp47Region
+)
+
+func find(list []string, s string) int {
+ for i, t := range list {
+ if t == s {
+ return i
+ }
+ }
+ return -1
+}
+
+// writeVariants generates per-variant information and creates a map from variant
+// name to index value. We assign index values such that sorting multiple
+// variants by index value will result in the correct order.
+// There are two types of variants: specialized and general. Specialized variants
+// are only applicable to certain language or language-script pairs. Generalized
+// variants apply to any language. Generalized variants always sort after
+// specialized variants. We will therefore always assign a higher index value
+// to a generalized variant than any other variant. Generalized variants are
+// sorted alphabetically among themselves.
+// Specialized variants may also sort after other specialized variants. Such
+// variants will be ordered after any of the variants they may follow.
+// We assume that if a variant x is followed by a variant y, then for any prefix
+// p of x, p-x is a prefix of y. This allows us to order tags based on the
+// maximum of the length of any of its prefixes.
+// TODO: it is possible to define a set of Prefix values on variants such that
+// a total order cannot be defined to the point that this algorithm breaks.
+// In other words, we cannot guarantee the same order of variants for the
+// future using the same algorithm or for non-compliant combinations of
+// variants. For this reason, consider using simple alphabetic sorting
+// of variants and ignore Prefix restrictions altogether.
+func (b *builder) writeVariant() {
+ generalized := stringSet{}
+ specialized := stringSet{}
+ specializedExtend := stringSet{}
+ // Collate the variants by type and check assumptions.
+ for _, v := range b.variant.slice() {
+ e := b.registry[v]
+ if len(e.prefix) == 0 {
+ generalized.add(v)
+ continue
+ }
+ c := strings.Split(e.prefix[0], "-")
+ hasScriptOrRegion := false
+ if len(c) > 1 {
+ _, hasScriptOrRegion = b.script.find(c[1])
+ if !hasScriptOrRegion {
+ _, hasScriptOrRegion = b.region.find(c[1])
+
+ }
+ }
+ if len(c) == 1 || len(c) == 2 && hasScriptOrRegion {
+ // Variant is preceded by a language.
+ specialized.add(v)
+ continue
+ }
+ // Variant is preceded by another variant.
+ specializedExtend.add(v)
+ prefix := c[0] + "-"
+ if hasScriptOrRegion {
+ prefix += c[1]
+ }
+ for _, p := range e.prefix {
+ // Verify that the prefix minus the last element is a prefix of the
+ // predecessor element.
+ i := strings.LastIndex(p, "-")
+ pred := b.registry[p[i+1:]]
+ if find(pred.prefix, p[:i]) < 0 {
+ log.Fatalf("prefix %q for variant %q not consistent with predecessor spec", p, v)
+ }
+ // The sorting used below does not work in the general case. It works
+ // if we assume that variants that may be followed by others only have
+ // prefixes of the same length. Verify this.
+ count := strings.Count(p[:i], "-")
+ for _, q := range pred.prefix {
+ if c := strings.Count(q, "-"); c != count {
+ log.Fatalf("variant %q preceding %q has a prefix %q of size %d; want %d", p[i+1:], v, q, c, count)
+ }
+ }
+ if !strings.HasPrefix(p, prefix) {
+ log.Fatalf("prefix %q of variant %q should start with %q", p, v, prefix)
+ }
+ }
+ }
+
+ // Sort extended variants.
+ a := specializedExtend.s
+ less := func(v, w string) bool {
+ // Sort by the maximum number of elements.
+ maxCount := func(s string) (max int) {
+ for _, p := range b.registry[s].prefix {
+ if c := strings.Count(p, "-"); c > max {
+ max = c
+ }
+ }
+ return
+ }
+ if cv, cw := maxCount(v), maxCount(w); cv != cw {
+ return cv < cw
+ }
+ // Sort by name as tie breaker.
+ return v < w
+ }
+ sort.Sort(funcSorter{less, sort.StringSlice(a)})
+ specializedExtend.frozen = true
+
+ // Create index from variant name to index.
+ variantIndex := make(map[string]uint8)
+ add := func(s []string) {
+ for _, v := range s {
+ variantIndex[v] = uint8(len(variantIndex))
+ }
+ }
+ add(specialized.slice())
+ add(specializedExtend.s)
+ numSpecialized := len(variantIndex)
+ add(generalized.slice())
+ if n := len(variantIndex); n > 255 {
+ log.Fatalf("maximum number of variants exceeded: was %d; want <= 255", n)
+ }
+ b.writeMap("variantIndex", variantIndex)
+ b.writeConst("variantNumSpecialized", numSpecialized)
+}
+
+func (b *builder) writeLanguageInfo() {
+}
+
+// writeLikelyData writes tables that are used both for finding parent relations and for
+// language matching. Each entry contains additional bits to indicate the status of the
+// data to know when it cannot be used for parent relations.
+func (b *builder) writeLikelyData() {
+ const (
+ isList = 1 << iota
+ scriptInFrom
+ regionInFrom
+ )
+ type ( // generated types
+ likelyScriptRegion struct {
+ region uint16
+ script uint8
+ flags uint8
+ }
+ likelyLangScript struct {
+ lang uint16
+ script uint8
+ flags uint8
+ }
+ likelyLangRegion struct {
+ lang uint16
+ region uint16
+ }
+ // likelyTag is used for getting likely tags for group regions, where
+ // the likely region might be a region contained in the group.
+ likelyTag struct {
+ lang uint16
+ region uint16
+ script uint8
+ }
+ )
+ var ( // generated variables
+ likelyRegionGroup = make([]likelyTag, len(b.groups))
+ likelyLang = make([]likelyScriptRegion, len(b.lang.s))
+ likelyRegion = make([]likelyLangScript, len(b.region.s))
+ likelyScript = make([]likelyLangRegion, len(b.script.s))
+ likelyLangList = []likelyScriptRegion{}
+ likelyRegionList = []likelyLangScript{}
+ )
+ type fromTo struct {
+ from, to []string
+ }
+ langToOther := map[int][]fromTo{}
+ regionToOther := map[int][]fromTo{}
+ for _, m := range b.supp.LikelySubtags.LikelySubtag {
+ from := strings.Split(m.From, "_")
+ to := strings.Split(m.To, "_")
+ if len(to) != 3 {
+ log.Fatalf("invalid number of subtags in %q: found %d, want 3", m.To, len(to))
+ }
+ if len(from) > 3 {
+ log.Fatalf("invalid number of subtags: found %d, want 1-3", len(from))
+ }
+ if from[0] != to[0] && from[0] != "und" {
+ log.Fatalf("unexpected language change in expansion: %s -> %s", from, to)
+ }
+ if len(from) == 3 {
+ if from[2] != to[2] {
+ log.Fatalf("unexpected region change in expansion: %s -> %s", from, to)
+ }
+ if from[0] != "und" {
+ log.Fatalf("unexpected fully specified from tag: %s -> %s", from, to)
+ }
+ }
+ if len(from) == 1 || from[0] != "und" {
+ id := 0
+ if from[0] != "und" {
+ id = b.lang.index(from[0])
+ }
+ langToOther[id] = append(langToOther[id], fromTo{from, to})
+ } else if len(from) == 2 && len(from[1]) == 4 {
+ sid := b.script.index(from[1])
+ likelyScript[sid].lang = uint16(b.langIndex(to[0]))
+ likelyScript[sid].region = uint16(b.region.index(to[2]))
+ } else {
+ r := b.region.index(from[len(from)-1])
+ if id, ok := b.groups[r]; ok {
+ if from[0] != "und" {
+ log.Fatalf("region changed unexpectedly: %s -> %s", from, to)
+ }
+ likelyRegionGroup[id].lang = uint16(b.langIndex(to[0]))
+ likelyRegionGroup[id].script = uint8(b.script.index(to[1]))
+ likelyRegionGroup[id].region = uint16(b.region.index(to[2]))
+ } else {
+ regionToOther[r] = append(regionToOther[r], fromTo{from, to})
+ }
+ }
+ }
+ b.writeType(likelyLangRegion{})
+ b.writeSlice("likelyScript", likelyScript)
+
+ for id := range b.lang.s {
+ list := langToOther[id]
+ if len(list) == 1 {
+ likelyLang[id].region = uint16(b.region.index(list[0].to[2]))
+ likelyLang[id].script = uint8(b.script.index(list[0].to[1]))
+ } else if len(list) > 1 {
+ likelyLang[id].flags = isList
+ likelyLang[id].region = uint16(len(likelyLangList))
+ likelyLang[id].script = uint8(len(list))
+ for _, x := range list {
+ flags := uint8(0)
+ if len(x.from) > 1 {
+ if x.from[1] == x.to[2] {
+ flags = regionInFrom
+ } else {
+ flags = scriptInFrom
+ }
+ }
+ likelyLangList = append(likelyLangList, likelyScriptRegion{
+ region: uint16(b.region.index(x.to[2])),
+ script: uint8(b.script.index(x.to[1])),
+ flags: flags,
+ })
+ }
+ }
+ }
+ // TODO: merge suppressScript data with this table.
+ b.writeType(likelyScriptRegion{})
+ b.writeSlice("likelyLang", likelyLang)
+ b.writeSlice("likelyLangList", likelyLangList)
+
+ for id := range b.region.s {
+ list := regionToOther[id]
+ if len(list) == 1 {
+ likelyRegion[id].lang = uint16(b.langIndex(list[0].to[0]))
+ likelyRegion[id].script = uint8(b.script.index(list[0].to[1]))
+ if len(list[0].from) > 2 {
+ likelyRegion[id].flags = scriptInFrom
+ }
+ } else if len(list) > 1 {
+ likelyRegion[id].flags = isList
+ likelyRegion[id].lang = uint16(len(likelyRegionList))
+ likelyRegion[id].script = uint8(len(list))
+ for i, x := range list {
+ if len(x.from) == 2 && i != 0 || i > 0 && len(x.from) != 3 {
+ log.Fatalf("unspecified script must be first in list: %v at %d", x.from, i)
+ }
+ x := likelyLangScript{
+ lang: uint16(b.langIndex(x.to[0])),
+ script: uint8(b.script.index(x.to[1])),
+ }
+ if len(list[0].from) > 2 {
+ x.flags = scriptInFrom
+ }
+ likelyRegionList = append(likelyRegionList, x)
+ }
+ }
+ }
+ b.writeType(likelyLangScript{})
+ b.writeSlice("likelyRegion", likelyRegion)
+ b.writeSlice("likelyRegionList", likelyRegionList)
+
+ b.writeType(likelyTag{})
+ b.writeSlice("likelyRegionGroup", likelyRegionGroup)
+}
+
+func (b *builder) writeRegionInclusionData() {
+ var (
+ // mm holds for each group the set of groups with a distance of 1.
+ mm = make(map[int][]index)
+
+ // containment holds for each group the transitive closure of
+ // containment of other groups.
+ containment = make(map[index][]index)
+ )
+ for _, g := range b.supp.TerritoryContainment.Group {
+ // Skip UN and EURO zone as they are flattening the containment
+ // relationship.
+ if g.Type == "EZ" || g.Type == "UN" {
+ continue
+ }
+ group := b.region.index(g.Type)
+ groupIdx := b.groups[group]
+ for _, mem := range strings.Split(g.Contains, " ") {
+ r := b.region.index(mem)
+ mm[r] = append(mm[r], groupIdx)
+ if g, ok := b.groups[r]; ok {
+ mm[group] = append(mm[group], g)
+ containment[groupIdx] = append(containment[groupIdx], g)
+ }
+ }
+ }
+
+ regionContainment := make([]uint64, len(b.groups))
+ for _, g := range b.groups {
+ l := containment[g]
+
+ // Compute the transitive closure of containment.
+ for i := 0; i < len(l); i++ {
+ l = append(l, containment[l[i]]...)
+ }
+
+ // Compute the bitmask.
+ regionContainment[g] = 1 << g
+ for _, v := range l {
+ regionContainment[g] |= 1 << v
+ }
+ }
+ b.writeSlice("regionContainment", regionContainment)
+
+ regionInclusion := make([]uint8, len(b.region.s))
+ bvs := make(map[uint64]index)
+ // Make the first bitvector positions correspond with the groups.
+ for r, i := range b.groups {
+ bv := uint64(1 << i)
+ for _, g := range mm[r] {
+ bv |= 1 << g
+ }
+ bvs[bv] = i
+ regionInclusion[r] = uint8(bvs[bv])
+ }
+ for r := 1; r < len(b.region.s); r++ {
+ if _, ok := b.groups[r]; !ok {
+ bv := uint64(0)
+ for _, g := range mm[r] {
+ bv |= 1 << g
+ }
+ if bv == 0 {
+ // Pick the world for unspecified regions.
+ bv = 1 << b.groups[b.region.index("001")]
+ }
+ if _, ok := bvs[bv]; !ok {
+ bvs[bv] = index(len(bvs))
+ }
+ regionInclusion[r] = uint8(bvs[bv])
+ }
+ }
+ b.writeSlice("regionInclusion", regionInclusion)
+ regionInclusionBits := make([]uint64, len(bvs))
+ for k, v := range bvs {
+ regionInclusionBits[v] = uint64(k)
+ }
+ // Add bit vectors for increasingly large distances until a fixed point is reached.
+ regionInclusionNext := []uint8{}
+ for i := 0; i < len(regionInclusionBits); i++ {
+ bits := regionInclusionBits[i]
+ next := bits
+ for i := uint(0); i < uint(len(b.groups)); i++ {
+ if bits&(1<<i) != 0 {
+ next |= regionInclusionBits[i]
+ }
+ }
+ if _, ok := bvs[next]; !ok {
+ bvs[next] = index(len(bvs))
+ regionInclusionBits = append(regionInclusionBits, next)
+ }
+ regionInclusionNext = append(regionInclusionNext, uint8(bvs[next]))
+ }
+ b.writeSlice("regionInclusionBits", regionInclusionBits)
+ b.writeSlice("regionInclusionNext", regionInclusionNext)
+}
+
+type parentRel struct {
+ lang uint16
+ script uint8
+ maxScript uint8
+ toRegion uint16
+ fromRegion []uint16
+}
+
+func (b *builder) writeParents() {
+ b.writeType(parentRel{})
+
+ parents := []parentRel{}
+
+ // Construct parent overrides.
+ n := 0
+ for _, p := range b.data.Supplemental().ParentLocales.ParentLocale {
+ // Skipping non-standard scripts to root is implemented using addTags.
+ if p.Parent == "root" {
+ continue
+ }
+
+ sub := strings.Split(p.Parent, "_")
+ parent := parentRel{lang: b.langIndex(sub[0])}
+ if len(sub) == 2 {
+ // TODO: check that all undefined scripts are indeed Latn in these
+ // cases.
+ parent.maxScript = uint8(b.script.index("Latn"))
+ parent.toRegion = uint16(b.region.index(sub[1]))
+ } else {
+ parent.script = uint8(b.script.index(sub[1]))
+ parent.maxScript = parent.script
+ parent.toRegion = uint16(b.region.index(sub[2]))
+ }
+ for _, c := range strings.Split(p.Locales, " ") {
+ region := b.region.index(c[strings.LastIndex(c, "_")+1:])
+ parent.fromRegion = append(parent.fromRegion, uint16(region))
+ }
+ parents = append(parents, parent)
+ n += len(parent.fromRegion)
+ }
+ b.writeSliceAddSize("parents", n*2, parents)
+}
+
+func main() {
+ gen.Init()
+
+ gen.Repackage("gen_common.go", "common.go", "language")
+
+ w := gen.NewCodeWriter()
+ defer w.WriteGoFile("tables.go", "language")
+
+ fmt.Fprintln(w, `import "golang.org/x/text/internal/tag"`)
+
+ b := newBuilder(w)
+ gen.WriteCLDRVersion(w)
+
+ b.parseIndices()
+ b.writeType(FromTo{})
+ b.writeLanguage()
+ b.writeScript()
+ b.writeRegion()
+ b.writeVariant()
+ // TODO: b.writeLocale()
+ b.computeRegionGroups()
+ b.writeLikelyData()
+ b.writeRegionInclusionData()
+ b.writeParents()
+}
diff --git a/vendor/golang.org/x/text/internal/language/gen_common.go b/vendor/golang.org/x/text/internal/language/gen_common.go
new file mode 100644
index 000000000..c419ceeb1
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/language/gen_common.go
@@ -0,0 +1,20 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+// This file contains code common to the maketables.go and the package code.
+
+// AliasType is the type of an alias in AliasMap.
+type AliasType int8
+
+const (
+ Deprecated AliasType = iota
+ Macro
+ Legacy
+
+ AliasTypeUnknown AliasType = -1
+)
diff --git a/vendor/golang.org/x/text/language/gen.go b/vendor/golang.org/x/text/language/gen.go
new file mode 100644
index 000000000..3004eb42c
--- /dev/null
+++ b/vendor/golang.org/x/text/language/gen.go
@@ -0,0 +1,305 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// Language tag table generator.
+// Data read from the web.
+
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "sort"
+ "strconv"
+ "strings"
+
+ "golang.org/x/text/internal/gen"
+ "golang.org/x/text/internal/language"
+ "golang.org/x/text/unicode/cldr"
+)
+
+var (
+ test = flag.Bool("test",
+ false,
+ "test existing tables; can be used to compare web data with package data.")
+ outputFile = flag.String("output",
+ "tables.go",
+ "output file for generated tables")
+)
+
+func main() {
+ gen.Init()
+
+ w := gen.NewCodeWriter()
+ defer w.WriteGoFile("tables.go", "language")
+
+ b := newBuilder(w)
+ gen.WriteCLDRVersion(w)
+
+ b.writeConstants()
+ b.writeMatchData()
+}
+
+type builder struct {
+ w *gen.CodeWriter
+ hw io.Writer // MultiWriter for w and w.Hash
+ data *cldr.CLDR
+ supp *cldr.SupplementalData
+}
+
+func (b *builder) langIndex(s string) uint16 {
+ return uint16(language.MustParseBase(s))
+}
+
+func (b *builder) regionIndex(s string) int {
+ return int(language.MustParseRegion(s))
+}
+
+func (b *builder) scriptIndex(s string) int {
+ return int(language.MustParseScript(s))
+}
+
+func newBuilder(w *gen.CodeWriter) *builder {
+ r := gen.OpenCLDRCoreZip()
+ defer r.Close()
+ d := &cldr.Decoder{}
+ data, err := d.DecodeZip(r)
+ if err != nil {
+ log.Fatal(err)
+ }
+ b := builder{
+ w: w,
+ hw: io.MultiWriter(w, w.Hash),
+ data: data,
+ supp: data.Supplemental(),
+ }
+ return &b
+}
+
+// writeConsts computes f(v) for all v in values and writes the results
+// as constants named _v to a single constant block.
+func (b *builder) writeConsts(f func(string) int, values ...string) {
+ fmt.Fprintln(b.w, "const (")
+ for _, v := range values {
+ fmt.Fprintf(b.w, "\t_%s = %v\n", v, f(v))
+ }
+ fmt.Fprintln(b.w, ")")
+}
+
+// TODO: region inclusion data will probably not be use used in future matchers.
+
+var langConsts = []string{
+ "de", "en", "fr", "it", "mo", "no", "nb", "pt", "sh", "mul", "und",
+}
+
+var scriptConsts = []string{
+ "Latn", "Hani", "Hans", "Hant", "Qaaa", "Qaai", "Qabx", "Zinh", "Zyyy",
+ "Zzzz",
+}
+
+var regionConsts = []string{
+ "001", "419", "BR", "CA", "ES", "GB", "MD", "PT", "UK", "US",
+ "ZZ", "XA", "XC", "XK", // Unofficial tag for Kosovo.
+}
+
+func (b *builder) writeConstants() {
+ b.writeConsts(func(s string) int { return int(b.langIndex(s)) }, langConsts...)
+ b.writeConsts(b.regionIndex, regionConsts...)
+ b.writeConsts(b.scriptIndex, scriptConsts...)
+}
+
+type mutualIntelligibility struct {
+ want, have uint16
+ distance uint8
+ oneway bool
+}
+
+type scriptIntelligibility struct {
+ wantLang, haveLang uint16
+ wantScript, haveScript uint8
+ distance uint8
+ // Always oneway
+}
+
+type regionIntelligibility struct {
+ lang uint16 // compact language id
+ script uint8 // 0 means any
+ group uint8 // 0 means any; if bit 7 is set it means inverse
+ distance uint8
+ // Always twoway.
+}
+
+// writeMatchData writes tables with languages and scripts for which there is
+// mutual intelligibility. The data is based on CLDR's languageMatching data.
+// Note that we use a different algorithm than the one defined by CLDR and that
+// we slightly modify the data. For example, we convert scores to confidence levels.
+// We also drop all region-related data as we use a different algorithm to
+// determine region equivalence.
+func (b *builder) writeMatchData() {
+ lm := b.supp.LanguageMatching.LanguageMatches
+ cldr.MakeSlice(&lm).SelectAnyOf("type", "written_new")
+
+ regionHierarchy := map[string][]string{}
+ for _, g := range b.supp.TerritoryContainment.Group {
+ regions := strings.Split(g.Contains, " ")
+ regionHierarchy[g.Type] = append(regionHierarchy[g.Type], regions...)
+ }
+ regionToGroups := make([]uint8, language.NumRegions)
+
+ idToIndex := map[string]uint8{}
+ for i, mv := range lm[0].MatchVariable {
+ if i > 6 {
+ log.Fatalf("Too many groups: %d", i)
+ }
+ idToIndex[mv.Id] = uint8(i + 1)
+ // TODO: also handle '-'
+ for _, r := range strings.Split(mv.Value, "+") {
+ todo := []string{r}
+ for k := 0; k < len(todo); k++ {
+ r := todo[k]
+ regionToGroups[b.regionIndex(r)] |= 1 << uint8(i)
+ todo = append(todo, regionHierarchy[r]...)
+ }
+ }
+ }
+ b.w.WriteVar("regionToGroups", regionToGroups)
+
+ // maps language id to in- and out-of-group region.
+ paradigmLocales := [][3]uint16{}
+ locales := strings.Split(lm[0].ParadigmLocales[0].Locales, " ")
+ for i := 0; i < len(locales); i += 2 {
+ x := [3]uint16{}
+ for j := 0; j < 2; j++ {
+ pc := strings.SplitN(locales[i+j], "-", 2)
+ x[0] = b.langIndex(pc[0])
+ if len(pc) == 2 {
+ x[1+j] = uint16(b.regionIndex(pc[1]))
+ }
+ }
+ paradigmLocales = append(paradigmLocales, x)
+ }
+ b.w.WriteVar("paradigmLocales", paradigmLocales)
+
+ b.w.WriteType(mutualIntelligibility{})
+ b.w.WriteType(scriptIntelligibility{})
+ b.w.WriteType(regionIntelligibility{})
+
+ matchLang := []mutualIntelligibility{}
+ matchScript := []scriptIntelligibility{}
+ matchRegion := []regionIntelligibility{}
+ // Convert the languageMatch entries in lists keyed by desired language.
+ for _, m := range lm[0].LanguageMatch {
+ // Different versions of CLDR use different separators.
+ desired := strings.Replace(m.Desired, "-", "_", -1)
+ supported := strings.Replace(m.Supported, "-", "_", -1)
+ d := strings.Split(desired, "_")
+ s := strings.Split(supported, "_")
+ if len(d) != len(s) {
+ log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
+ continue
+ }
+ distance, _ := strconv.ParseInt(m.Distance, 10, 8)
+ switch len(d) {
+ case 2:
+ if desired == supported && desired == "*_*" {
+ continue
+ }
+ // language-script pair.
+ matchScript = append(matchScript, scriptIntelligibility{
+ wantLang: uint16(b.langIndex(d[0])),
+ haveLang: uint16(b.langIndex(s[0])),
+ wantScript: uint8(b.scriptIndex(d[1])),
+ haveScript: uint8(b.scriptIndex(s[1])),
+ distance: uint8(distance),
+ })
+ if m.Oneway != "true" {
+ matchScript = append(matchScript, scriptIntelligibility{
+ wantLang: uint16(b.langIndex(s[0])),
+ haveLang: uint16(b.langIndex(d[0])),
+ wantScript: uint8(b.scriptIndex(s[1])),
+ haveScript: uint8(b.scriptIndex(d[1])),
+ distance: uint8(distance),
+ })
+ }
+ case 1:
+ if desired == supported && desired == "*" {
+ continue
+ }
+ if distance == 1 {
+ // nb == no is already handled by macro mapping. Check there
+ // really is only this case.
+ if d[0] != "no" || s[0] != "nb" {
+ log.Fatalf("unhandled equivalence %s == %s", s[0], d[0])
+ }
+ continue
+ }
+ // TODO: consider dropping oneway field and just doubling the entry.
+ matchLang = append(matchLang, mutualIntelligibility{
+ want: uint16(b.langIndex(d[0])),
+ have: uint16(b.langIndex(s[0])),
+ distance: uint8(distance),
+ oneway: m.Oneway == "true",
+ })
+ case 3:
+ if desired == supported && desired == "*_*_*" {
+ continue
+ }
+ if desired != supported {
+ // This is now supported by CLDR, but only one case, which
+ // should already be covered by paradigm locales. For instance,
+ // test case "und, en, en-GU, en-IN, en-GB ; en-ZA ; en-GB" in
+ // testdata/CLDRLocaleMatcherTest.txt tests this.
+ if supported != "en_*_GB" {
+ log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
+ }
+ continue
+ }
+ ri := regionIntelligibility{
+ lang: b.langIndex(d[0]),
+ distance: uint8(distance),
+ }
+ if d[1] != "*" {
+ ri.script = uint8(b.scriptIndex(d[1]))
+ }
+ switch {
+ case d[2] == "*":
+ ri.group = 0x80 // not contained in anything
+ case strings.HasPrefix(d[2], "$!"):
+ ri.group = 0x80
+ d[2] = "$" + d[2][len("$!"):]
+ fallthrough
+ case strings.HasPrefix(d[2], "$"):
+ ri.group |= idToIndex[d[2]]
+ }
+ matchRegion = append(matchRegion, ri)
+ default:
+ log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
+ }
+ }
+ sort.SliceStable(matchLang, func(i, j int) bool {
+ return matchLang[i].distance < matchLang[j].distance
+ })
+ b.w.WriteComment(`
+ matchLang holds pairs of langIDs of base languages that are typically
+ mutually intelligible. Each pair is associated with a confidence and
+ whether the intelligibility goes one or both ways.`)
+ b.w.WriteVar("matchLang", matchLang)
+
+ b.w.WriteComment(`
+ matchScript holds pairs of scriptIDs where readers of one script
+ can typically also read the other. Each is associated with a confidence.`)
+ sort.SliceStable(matchScript, func(i, j int) bool {
+ return matchScript[i].distance < matchScript[j].distance
+ })
+ b.w.WriteVar("matchScript", matchScript)
+
+ sort.SliceStable(matchRegion, func(i, j int) bool {
+ return matchRegion[i].distance < matchRegion[j].distance
+ })
+ b.w.WriteVar("matchRegion", matchRegion)
+}
diff --git a/vendor/golang.org/x/text/language/language.go b/vendor/golang.org/x/text/language/language.go
index b939c89f1..abfa17f66 100644
--- a/vendor/golang.org/x/text/language/language.go
+++ b/vendor/golang.org/x/text/language/language.go
@@ -530,7 +530,7 @@ func (r Region) String() string {
// Note that not all regions have a 3-letter ISO code.
// In such cases this method returns "ZZZ".
func (r Region) ISO3() string {
- return r.regionID.String()
+ return r.regionID.ISO3()
}
// M49 returns the UN M.49 encoding of r, or 0 if this encoding
diff --git a/vendor/golang.org/x/text/transform/transform.go b/vendor/golang.org/x/text/transform/transform.go
index 919e3d950..520b9ada0 100644
--- a/vendor/golang.org/x/text/transform/transform.go
+++ b/vendor/golang.org/x/text/transform/transform.go
@@ -493,7 +493,7 @@ func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err erro
return dstL.n, srcL.p, err
}
-// Deprecated: use runes.Remove instead.
+// Deprecated: Use runes.Remove instead.
func RemoveFunc(f func(r rune) bool) Transformer {
return removeF(f)
}
diff --git a/vendor/golang.org/x/text/unicode/bidi/gen.go b/vendor/golang.org/x/text/unicode/bidi/gen.go
new file mode 100644
index 000000000..987fc169c
--- /dev/null
+++ b/vendor/golang.org/x/text/unicode/bidi/gen.go
@@ -0,0 +1,133 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import (
+ "flag"
+ "log"
+
+ "golang.org/x/text/internal/gen"
+ "golang.org/x/text/internal/triegen"
+ "golang.org/x/text/internal/ucd"
+)
+
+var outputFile = flag.String("out", "tables.go", "output file")
+
+func main() {
+ gen.Init()
+ gen.Repackage("gen_trieval.go", "trieval.go", "bidi")
+ gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi")
+
+ genTables()
+}
+
+// bidiClass names and codes taken from class "bc" in
+// https://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt
+var bidiClass = map[string]Class{
+ "AL": AL, // ArabicLetter
+ "AN": AN, // ArabicNumber
+ "B": B, // ParagraphSeparator
+ "BN": BN, // BoundaryNeutral
+ "CS": CS, // CommonSeparator
+ "EN": EN, // EuropeanNumber
+ "ES": ES, // EuropeanSeparator
+ "ET": ET, // EuropeanTerminator
+ "L": L, // LeftToRight
+ "NSM": NSM, // NonspacingMark
+ "ON": ON, // OtherNeutral
+ "R": R, // RightToLeft
+ "S": S, // SegmentSeparator
+ "WS": WS, // WhiteSpace
+
+ "FSI": Control,
+ "PDF": Control,
+ "PDI": Control,
+ "LRE": Control,
+ "LRI": Control,
+ "LRO": Control,
+ "RLE": Control,
+ "RLI": Control,
+ "RLO": Control,
+}
+
+func genTables() {
+ if numClass > 0x0F {
+ log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass)
+ }
+ w := gen.NewCodeWriter()
+ defer w.WriteVersionedGoFile(*outputFile, "bidi")
+
+ gen.WriteUnicodeVersion(w)
+
+ t := triegen.NewTrie("bidi")
+
+ // Build data about bracket mapping. These bits need to be or-ed with
+ // any other bits.
+ orMask := map[rune]uint64{}
+
+ xorMap := map[rune]int{}
+ xorMasks := []rune{0} // First value is no-op.
+
+ ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) {
+ r1 := p.Rune(0)
+ r2 := p.Rune(1)
+ xor := r1 ^ r2
+ if _, ok := xorMap[xor]; !ok {
+ xorMap[xor] = len(xorMasks)
+ xorMasks = append(xorMasks, xor)
+ }
+ entry := uint64(xorMap[xor]) << xorMaskShift
+ switch p.String(2) {
+ case "o":
+ entry |= openMask
+ case "c", "n":
+ default:
+ log.Fatalf("Unknown bracket class %q.", p.String(2))
+ }
+ orMask[r1] = entry
+ })
+
+ w.WriteComment(`
+ xorMasks contains masks to be xor-ed with brackets to get the reverse
+ version.`)
+ w.WriteVar("xorMasks", xorMasks)
+
+ done := map[rune]bool{}
+
+ insert := func(r rune, c Class) {
+ if !done[r] {
+ t.Insert(r, orMask[r]|uint64(c))
+ done[r] = true
+ }
+ }
+
+ // Insert the derived BiDi properties.
+ ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) {
+ r := p.Rune(0)
+ class, ok := bidiClass[p.String(1)]
+ if !ok {
+ log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1))
+ }
+ insert(r, class)
+ })
+ visitDefaults(insert)
+
+ // TODO: use sparse blocks. This would reduce table size considerably
+ // from the looks of it.
+
+ sz, err := t.Gen(w)
+ if err != nil {
+ log.Fatal(err)
+ }
+ w.Size += sz
+}
+
+// dummy values to make methods in gen_common compile. The real versions
+// will be generated by this file to tables.go.
+var (
+ xorMasks []rune
+)
diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go
new file mode 100644
index 000000000..02c3b505d
--- /dev/null
+++ b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go
@@ -0,0 +1,57 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import (
+ "unicode"
+
+ "golang.org/x/text/internal/gen"
+ "golang.org/x/text/internal/ucd"
+ "golang.org/x/text/unicode/rangetable"
+)
+
+// These tables are hand-extracted from:
+// https://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt
+func visitDefaults(fn func(r rune, c Class)) {
+ // first write default values for ranges listed above.
+ visitRunes(fn, AL, []rune{
+ 0x0600, 0x07BF, // Arabic
+ 0x08A0, 0x08FF, // Arabic Extended-A
+ 0xFB50, 0xFDCF, // Arabic Presentation Forms
+ 0xFDF0, 0xFDFF,
+ 0xFE70, 0xFEFF,
+ 0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols
+ })
+ visitRunes(fn, R, []rune{
+ 0x0590, 0x05FF, // Hebrew
+ 0x07C0, 0x089F, // Nko et al.
+ 0xFB1D, 0xFB4F,
+ 0x00010800, 0x00010FFF, // Cypriot Syllabary et. al.
+ 0x0001E800, 0x0001EDFF,
+ 0x0001EF00, 0x0001EFFF,
+ })
+ visitRunes(fn, ET, []rune{ // European Terminator
+ 0x20A0, 0x20Cf, // Currency symbols
+ })
+ rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) {
+ fn(r, BN) // Boundary Neutral
+ })
+ ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) {
+ if p.String(1) == "Default_Ignorable_Code_Point" {
+ fn(p.Rune(0), BN) // Boundary Neutral
+ }
+ })
+}
+
+func visitRunes(fn func(r rune, c Class), c Class, runes []rune) {
+ for i := 0; i < len(runes); i += 2 {
+ lo, hi := runes[i], runes[i+1]
+ for j := lo; j <= hi; j++ {
+ fn(j, c)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go
new file mode 100644
index 000000000..9cb994289
--- /dev/null
+++ b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go
@@ -0,0 +1,64 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+// Class is the Unicode BiDi class. Each rune has a single class.
+type Class uint
+
+const (
+ L Class = iota // LeftToRight
+ R // RightToLeft
+ EN // EuropeanNumber
+ ES // EuropeanSeparator
+ ET // EuropeanTerminator
+ AN // ArabicNumber
+ CS // CommonSeparator
+ B // ParagraphSeparator
+ S // SegmentSeparator
+ WS // WhiteSpace
+ ON // OtherNeutral
+ BN // BoundaryNeutral
+ NSM // NonspacingMark
+ AL // ArabicLetter
+ Control // Control LRO - PDI
+
+ numClass
+
+ LRO // LeftToRightOverride
+ RLO // RightToLeftOverride
+ LRE // LeftToRightEmbedding
+ RLE // RightToLeftEmbedding
+ PDF // PopDirectionalFormat
+ LRI // LeftToRightIsolate
+ RLI // RightToLeftIsolate
+ FSI // FirstStrongIsolate
+ PDI // PopDirectionalIsolate
+
+ unknownClass = ^Class(0)
+)
+
+var controlToClass = map[rune]Class{
+ 0x202D: LRO, // LeftToRightOverride,
+ 0x202E: RLO, // RightToLeftOverride,
+ 0x202A: LRE, // LeftToRightEmbedding,
+ 0x202B: RLE, // RightToLeftEmbedding,
+ 0x202C: PDF, // PopDirectionalFormat,
+ 0x2066: LRI, // LeftToRightIsolate,
+ 0x2067: RLI, // RightToLeftIsolate,
+ 0x2068: FSI, // FirstStrongIsolate,
+ 0x2069: PDI, // PopDirectionalIsolate,
+}
+
+// A trie entry has the following bits:
+// 7..5 XOR mask for brackets
+// 4 1: Bracket open, 0: Bracket close
+// 3..0 Class type
+
+const (
+ openMask = 0x10
+ xorMaskShift = 5
+)
diff --git a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go
index 2e1ff1959..d8c94e1bd 100644
--- a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go
+++ b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go
@@ -1,6 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
-// +build go1.10
+// +build go1.10,!go1.13
package bidi
diff --git a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go
new file mode 100644
index 000000000..022e3c690
--- /dev/null
+++ b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go
@@ -0,0 +1,1887 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// +build go1.13
+
+package bidi
+
+// UnicodeVersion is the Unicode version from which the tables in this package are derived.
+const UnicodeVersion = "11.0.0"
+
+// xorMasks contains masks to be xor-ed with brackets to get the reverse
+// version.
+var xorMasks = []int32{ // 8 elements
+ 0, 1, 6, 7, 3, 15, 29, 63,
+} // Size: 56 bytes
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *bidiTrie) lookup(s []byte) (v uint8, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return bidiValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := bidiIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := bidiIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = bidiIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := bidiIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = bidiIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = bidiIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *bidiTrie) lookupUnsafe(s []byte) uint8 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return bidiValues[c0]
+ }
+ i := bidiIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = bidiIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = bidiIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *bidiTrie) lookupString(s string) (v uint8, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return bidiValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := bidiIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := bidiIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = bidiIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := bidiIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = bidiIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = bidiIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *bidiTrie) lookupStringUnsafe(s string) uint8 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return bidiValues[c0]
+ }
+ i := bidiIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = bidiIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = bidiIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// bidiTrie. Total size: 16512 bytes (16.12 KiB). Checksum: 2a9cf1317f2ffaa.
+type bidiTrie struct{}
+
+func newBidiTrie(i int) *bidiTrie {
+ return &bidiTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *bidiTrie) lookupValue(n uint32, b byte) uint8 {
+ switch {
+ default:
+ return uint8(bidiValues[n<<6+uint32(b)])
+ }
+}
+
+// bidiValues: 234 blocks, 14976 entries, 14976 bytes
+// The third block is the zero block.
+var bidiValues = [14976]uint8{
+ // Block 0x0, offset 0x0
+ 0x00: 0x000b, 0x01: 0x000b, 0x02: 0x000b, 0x03: 0x000b, 0x04: 0x000b, 0x05: 0x000b,
+ 0x06: 0x000b, 0x07: 0x000b, 0x08: 0x000b, 0x09: 0x0008, 0x0a: 0x0007, 0x0b: 0x0008,
+ 0x0c: 0x0009, 0x0d: 0x0007, 0x0e: 0x000b, 0x0f: 0x000b, 0x10: 0x000b, 0x11: 0x000b,
+ 0x12: 0x000b, 0x13: 0x000b, 0x14: 0x000b, 0x15: 0x000b, 0x16: 0x000b, 0x17: 0x000b,
+ 0x18: 0x000b, 0x19: 0x000b, 0x1a: 0x000b, 0x1b: 0x000b, 0x1c: 0x0007, 0x1d: 0x0007,
+ 0x1e: 0x0007, 0x1f: 0x0008, 0x20: 0x0009, 0x21: 0x000a, 0x22: 0x000a, 0x23: 0x0004,
+ 0x24: 0x0004, 0x25: 0x0004, 0x26: 0x000a, 0x27: 0x000a, 0x28: 0x003a, 0x29: 0x002a,
+ 0x2a: 0x000a, 0x2b: 0x0003, 0x2c: 0x0006, 0x2d: 0x0003, 0x2e: 0x0006, 0x2f: 0x0006,
+ 0x30: 0x0002, 0x31: 0x0002, 0x32: 0x0002, 0x33: 0x0002, 0x34: 0x0002, 0x35: 0x0002,
+ 0x36: 0x0002, 0x37: 0x0002, 0x38: 0x0002, 0x39: 0x0002, 0x3a: 0x0006, 0x3b: 0x000a,
+ 0x3c: 0x000a, 0x3d: 0x000a, 0x3e: 0x000a, 0x3f: 0x000a,
+ // Block 0x1, offset 0x40
+ 0x40: 0x000a,
+ 0x5b: 0x005a, 0x5c: 0x000a, 0x5d: 0x004a,
+ 0x5e: 0x000a, 0x5f: 0x000a, 0x60: 0x000a,
+ 0x7b: 0x005a,
+ 0x7c: 0x000a, 0x7d: 0x004a, 0x7e: 0x000a, 0x7f: 0x000b,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc0: 0x000b, 0xc1: 0x000b, 0xc2: 0x000b, 0xc3: 0x000b, 0xc4: 0x000b, 0xc5: 0x0007,
+ 0xc6: 0x000b, 0xc7: 0x000b, 0xc8: 0x000b, 0xc9: 0x000b, 0xca: 0x000b, 0xcb: 0x000b,
+ 0xcc: 0x000b, 0xcd: 0x000b, 0xce: 0x000b, 0xcf: 0x000b, 0xd0: 0x000b, 0xd1: 0x000b,
+ 0xd2: 0x000b, 0xd3: 0x000b, 0xd4: 0x000b, 0xd5: 0x000b, 0xd6: 0x000b, 0xd7: 0x000b,
+ 0xd8: 0x000b, 0xd9: 0x000b, 0xda: 0x000b, 0xdb: 0x000b, 0xdc: 0x000b, 0xdd: 0x000b,
+ 0xde: 0x000b, 0xdf: 0x000b, 0xe0: 0x0006, 0xe1: 0x000a, 0xe2: 0x0004, 0xe3: 0x0004,
+ 0xe4: 0x0004, 0xe5: 0x0004, 0xe6: 0x000a, 0xe7: 0x000a, 0xe8: 0x000a, 0xe9: 0x000a,
+ 0xeb: 0x000a, 0xec: 0x000a, 0xed: 0x000b, 0xee: 0x000a, 0xef: 0x000a,
+ 0xf0: 0x0004, 0xf1: 0x0004, 0xf2: 0x0002, 0xf3: 0x0002, 0xf4: 0x000a,
+ 0xf6: 0x000a, 0xf7: 0x000a, 0xf8: 0x000a, 0xf9: 0x0002, 0xfb: 0x000a,
+ 0xfc: 0x000a, 0xfd: 0x000a, 0xfe: 0x000a, 0xff: 0x000a,
+ // Block 0x4, offset 0x100
+ 0x117: 0x000a,
+ 0x137: 0x000a,
+ // Block 0x5, offset 0x140
+ 0x179: 0x000a, 0x17a: 0x000a,
+ // Block 0x6, offset 0x180
+ 0x182: 0x000a, 0x183: 0x000a, 0x184: 0x000a, 0x185: 0x000a,
+ 0x186: 0x000a, 0x187: 0x000a, 0x188: 0x000a, 0x189: 0x000a, 0x18a: 0x000a, 0x18b: 0x000a,
+ 0x18c: 0x000a, 0x18d: 0x000a, 0x18e: 0x000a, 0x18f: 0x000a,
+ 0x192: 0x000a, 0x193: 0x000a, 0x194: 0x000a, 0x195: 0x000a, 0x196: 0x000a, 0x197: 0x000a,
+ 0x198: 0x000a, 0x199: 0x000a, 0x19a: 0x000a, 0x19b: 0x000a, 0x19c: 0x000a, 0x19d: 0x000a,
+ 0x19e: 0x000a, 0x19f: 0x000a,
+ 0x1a5: 0x000a, 0x1a6: 0x000a, 0x1a7: 0x000a, 0x1a8: 0x000a, 0x1a9: 0x000a,
+ 0x1aa: 0x000a, 0x1ab: 0x000a, 0x1ac: 0x000a, 0x1ad: 0x000a, 0x1af: 0x000a,
+ 0x1b0: 0x000a, 0x1b1: 0x000a, 0x1b2: 0x000a, 0x1b3: 0x000a, 0x1b4: 0x000a, 0x1b5: 0x000a,
+ 0x1b6: 0x000a, 0x1b7: 0x000a, 0x1b8: 0x000a, 0x1b9: 0x000a, 0x1ba: 0x000a, 0x1bb: 0x000a,
+ 0x1bc: 0x000a, 0x1bd: 0x000a, 0x1be: 0x000a, 0x1bf: 0x000a,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x000c, 0x1c1: 0x000c, 0x1c2: 0x000c, 0x1c3: 0x000c, 0x1c4: 0x000c, 0x1c5: 0x000c,
+ 0x1c6: 0x000c, 0x1c7: 0x000c, 0x1c8: 0x000c, 0x1c9: 0x000c, 0x1ca: 0x000c, 0x1cb: 0x000c,
+ 0x1cc: 0x000c, 0x1cd: 0x000c, 0x1ce: 0x000c, 0x1cf: 0x000c, 0x1d0: 0x000c, 0x1d1: 0x000c,
+ 0x1d2: 0x000c, 0x1d3: 0x000c, 0x1d4: 0x000c, 0x1d5: 0x000c, 0x1d6: 0x000c, 0x1d7: 0x000c,
+ 0x1d8: 0x000c, 0x1d9: 0x000c, 0x1da: 0x000c, 0x1db: 0x000c, 0x1dc: 0x000c, 0x1dd: 0x000c,
+ 0x1de: 0x000c, 0x1df: 0x000c, 0x1e0: 0x000c, 0x1e1: 0x000c, 0x1e2: 0x000c, 0x1e3: 0x000c,
+ 0x1e4: 0x000c, 0x1e5: 0x000c, 0x1e6: 0x000c, 0x1e7: 0x000c, 0x1e8: 0x000c, 0x1e9: 0x000c,
+ 0x1ea: 0x000c, 0x1eb: 0x000c, 0x1ec: 0x000c, 0x1ed: 0x000c, 0x1ee: 0x000c, 0x1ef: 0x000c,
+ 0x1f0: 0x000c, 0x1f1: 0x000c, 0x1f2: 0x000c, 0x1f3: 0x000c, 0x1f4: 0x000c, 0x1f5: 0x000c,
+ 0x1f6: 0x000c, 0x1f7: 0x000c, 0x1f8: 0x000c, 0x1f9: 0x000c, 0x1fa: 0x000c, 0x1fb: 0x000c,
+ 0x1fc: 0x000c, 0x1fd: 0x000c, 0x1fe: 0x000c, 0x1ff: 0x000c,
+ // Block 0x8, offset 0x200
+ 0x200: 0x000c, 0x201: 0x000c, 0x202: 0x000c, 0x203: 0x000c, 0x204: 0x000c, 0x205: 0x000c,
+ 0x206: 0x000c, 0x207: 0x000c, 0x208: 0x000c, 0x209: 0x000c, 0x20a: 0x000c, 0x20b: 0x000c,
+ 0x20c: 0x000c, 0x20d: 0x000c, 0x20e: 0x000c, 0x20f: 0x000c, 0x210: 0x000c, 0x211: 0x000c,
+ 0x212: 0x000c, 0x213: 0x000c, 0x214: 0x000c, 0x215: 0x000c, 0x216: 0x000c, 0x217: 0x000c,
+ 0x218: 0x000c, 0x219: 0x000c, 0x21a: 0x000c, 0x21b: 0x000c, 0x21c: 0x000c, 0x21d: 0x000c,
+ 0x21e: 0x000c, 0x21f: 0x000c, 0x220: 0x000c, 0x221: 0x000c, 0x222: 0x000c, 0x223: 0x000c,
+ 0x224: 0x000c, 0x225: 0x000c, 0x226: 0x000c, 0x227: 0x000c, 0x228: 0x000c, 0x229: 0x000c,
+ 0x22a: 0x000c, 0x22b: 0x000c, 0x22c: 0x000c, 0x22d: 0x000c, 0x22e: 0x000c, 0x22f: 0x000c,
+ 0x234: 0x000a, 0x235: 0x000a,
+ 0x23e: 0x000a,
+ // Block 0x9, offset 0x240
+ 0x244: 0x000a, 0x245: 0x000a,
+ 0x247: 0x000a,
+ // Block 0xa, offset 0x280
+ 0x2b6: 0x000a,
+ // Block 0xb, offset 0x2c0
+ 0x2c3: 0x000c, 0x2c4: 0x000c, 0x2c5: 0x000c,
+ 0x2c6: 0x000c, 0x2c7: 0x000c, 0x2c8: 0x000c, 0x2c9: 0x000c,
+ // Block 0xc, offset 0x300
+ 0x30a: 0x000a,
+ 0x30d: 0x000a, 0x30e: 0x000a, 0x30f: 0x0004, 0x310: 0x0001, 0x311: 0x000c,
+ 0x312: 0x000c, 0x313: 0x000c, 0x314: 0x000c, 0x315: 0x000c, 0x316: 0x000c, 0x317: 0x000c,
+ 0x318: 0x000c, 0x319: 0x000c, 0x31a: 0x000c, 0x31b: 0x000c, 0x31c: 0x000c, 0x31d: 0x000c,
+ 0x31e: 0x000c, 0x31f: 0x000c, 0x320: 0x000c, 0x321: 0x000c, 0x322: 0x000c, 0x323: 0x000c,
+ 0x324: 0x000c, 0x325: 0x000c, 0x326: 0x000c, 0x327: 0x000c, 0x328: 0x000c, 0x329: 0x000c,
+ 0x32a: 0x000c, 0x32b: 0x000c, 0x32c: 0x000c, 0x32d: 0x000c, 0x32e: 0x000c, 0x32f: 0x000c,
+ 0x330: 0x000c, 0x331: 0x000c, 0x332: 0x000c, 0x333: 0x000c, 0x334: 0x000c, 0x335: 0x000c,
+ 0x336: 0x000c, 0x337: 0x000c, 0x338: 0x000c, 0x339: 0x000c, 0x33a: 0x000c, 0x33b: 0x000c,
+ 0x33c: 0x000c, 0x33d: 0x000c, 0x33e: 0x0001, 0x33f: 0x000c,
+ // Block 0xd, offset 0x340
+ 0x340: 0x0001, 0x341: 0x000c, 0x342: 0x000c, 0x343: 0x0001, 0x344: 0x000c, 0x345: 0x000c,
+ 0x346: 0x0001, 0x347: 0x000c, 0x348: 0x0001, 0x349: 0x0001, 0x34a: 0x0001, 0x34b: 0x0001,
+ 0x34c: 0x0001, 0x34d: 0x0001, 0x34e: 0x0001, 0x34f: 0x0001, 0x350: 0x0001, 0x351: 0x0001,
+ 0x352: 0x0001, 0x353: 0x0001, 0x354: 0x0001, 0x355: 0x0001, 0x356: 0x0001, 0x357: 0x0001,
+ 0x358: 0x0001, 0x359: 0x0001, 0x35a: 0x0001, 0x35b: 0x0001, 0x35c: 0x0001, 0x35d: 0x0001,
+ 0x35e: 0x0001, 0x35f: 0x0001, 0x360: 0x0001, 0x361: 0x0001, 0x362: 0x0001, 0x363: 0x0001,
+ 0x364: 0x0001, 0x365: 0x0001, 0x366: 0x0001, 0x367: 0x0001, 0x368: 0x0001, 0x369: 0x0001,
+ 0x36a: 0x0001, 0x36b: 0x0001, 0x36c: 0x0001, 0x36d: 0x0001, 0x36e: 0x0001, 0x36f: 0x0001,
+ 0x370: 0x0001, 0x371: 0x0001, 0x372: 0x0001, 0x373: 0x0001, 0x374: 0x0001, 0x375: 0x0001,
+ 0x376: 0x0001, 0x377: 0x0001, 0x378: 0x0001, 0x379: 0x0001, 0x37a: 0x0001, 0x37b: 0x0001,
+ 0x37c: 0x0001, 0x37d: 0x0001, 0x37e: 0x0001, 0x37f: 0x0001,
+ // Block 0xe, offset 0x380
+ 0x380: 0x0005, 0x381: 0x0005, 0x382: 0x0005, 0x383: 0x0005, 0x384: 0x0005, 0x385: 0x0005,
+ 0x386: 0x000a, 0x387: 0x000a, 0x388: 0x000d, 0x389: 0x0004, 0x38a: 0x0004, 0x38b: 0x000d,
+ 0x38c: 0x0006, 0x38d: 0x000d, 0x38e: 0x000a, 0x38f: 0x000a, 0x390: 0x000c, 0x391: 0x000c,
+ 0x392: 0x000c, 0x393: 0x000c, 0x394: 0x000c, 0x395: 0x000c, 0x396: 0x000c, 0x397: 0x000c,
+ 0x398: 0x000c, 0x399: 0x000c, 0x39a: 0x000c, 0x39b: 0x000d, 0x39c: 0x000d, 0x39d: 0x000d,
+ 0x39e: 0x000d, 0x39f: 0x000d, 0x3a0: 0x000d, 0x3a1: 0x000d, 0x3a2: 0x000d, 0x3a3: 0x000d,
+ 0x3a4: 0x000d, 0x3a5: 0x000d, 0x3a6: 0x000d, 0x3a7: 0x000d, 0x3a8: 0x000d, 0x3a9: 0x000d,
+ 0x3aa: 0x000d, 0x3ab: 0x000d, 0x3ac: 0x000d, 0x3ad: 0x000d, 0x3ae: 0x000d, 0x3af: 0x000d,
+ 0x3b0: 0x000d, 0x3b1: 0x000d, 0x3b2: 0x000d, 0x3b3: 0x000d, 0x3b4: 0x000d, 0x3b5: 0x000d,
+ 0x3b6: 0x000d, 0x3b7: 0x000d, 0x3b8: 0x000d, 0x3b9: 0x000d, 0x3ba: 0x000d, 0x3bb: 0x000d,
+ 0x3bc: 0x000d, 0x3bd: 0x000d, 0x3be: 0x000d, 0x3bf: 0x000d,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x000d, 0x3c1: 0x000d, 0x3c2: 0x000d, 0x3c3: 0x000d, 0x3c4: 0x000d, 0x3c5: 0x000d,
+ 0x3c6: 0x000d, 0x3c7: 0x000d, 0x3c8: 0x000d, 0x3c9: 0x000d, 0x3ca: 0x000d, 0x3cb: 0x000c,
+ 0x3cc: 0x000c, 0x3cd: 0x000c, 0x3ce: 0x000c, 0x3cf: 0x000c, 0x3d0: 0x000c, 0x3d1: 0x000c,
+ 0x3d2: 0x000c, 0x3d3: 0x000c, 0x3d4: 0x000c, 0x3d5: 0x000c, 0x3d6: 0x000c, 0x3d7: 0x000c,
+ 0x3d8: 0x000c, 0x3d9: 0x000c, 0x3da: 0x000c, 0x3db: 0x000c, 0x3dc: 0x000c, 0x3dd: 0x000c,
+ 0x3de: 0x000c, 0x3df: 0x000c, 0x3e0: 0x0005, 0x3e1: 0x0005, 0x3e2: 0x0005, 0x3e3: 0x0005,
+ 0x3e4: 0x0005, 0x3e5: 0x0005, 0x3e6: 0x0005, 0x3e7: 0x0005, 0x3e8: 0x0005, 0x3e9: 0x0005,
+ 0x3ea: 0x0004, 0x3eb: 0x0005, 0x3ec: 0x0005, 0x3ed: 0x000d, 0x3ee: 0x000d, 0x3ef: 0x000d,
+ 0x3f0: 0x000c, 0x3f1: 0x000d, 0x3f2: 0x000d, 0x3f3: 0x000d, 0x3f4: 0x000d, 0x3f5: 0x000d,
+ 0x3f6: 0x000d, 0x3f7: 0x000d, 0x3f8: 0x000d, 0x3f9: 0x000d, 0x3fa: 0x000d, 0x3fb: 0x000d,
+ 0x3fc: 0x000d, 0x3fd: 0x000d, 0x3fe: 0x000d, 0x3ff: 0x000d,
+ // Block 0x10, offset 0x400
+ 0x400: 0x000d, 0x401: 0x000d, 0x402: 0x000d, 0x403: 0x000d, 0x404: 0x000d, 0x405: 0x000d,
+ 0x406: 0x000d, 0x407: 0x000d, 0x408: 0x000d, 0x409: 0x000d, 0x40a: 0x000d, 0x40b: 0x000d,
+ 0x40c: 0x000d, 0x40d: 0x000d, 0x40e: 0x000d, 0x40f: 0x000d, 0x410: 0x000d, 0x411: 0x000d,
+ 0x412: 0x000d, 0x413: 0x000d, 0x414: 0x000d, 0x415: 0x000d, 0x416: 0x000d, 0x417: 0x000d,
+ 0x418: 0x000d, 0x419: 0x000d, 0x41a: 0x000d, 0x41b: 0x000d, 0x41c: 0x000d, 0x41d: 0x000d,
+ 0x41e: 0x000d, 0x41f: 0x000d, 0x420: 0x000d, 0x421: 0x000d, 0x422: 0x000d, 0x423: 0x000d,
+ 0x424: 0x000d, 0x425: 0x000d, 0x426: 0x000d, 0x427: 0x000d, 0x428: 0x000d, 0x429: 0x000d,
+ 0x42a: 0x000d, 0x42b: 0x000d, 0x42c: 0x000d, 0x42d: 0x000d, 0x42e: 0x000d, 0x42f: 0x000d,
+ 0x430: 0x000d, 0x431: 0x000d, 0x432: 0x000d, 0x433: 0x000d, 0x434: 0x000d, 0x435: 0x000d,
+ 0x436: 0x000d, 0x437: 0x000d, 0x438: 0x000d, 0x439: 0x000d, 0x43a: 0x000d, 0x43b: 0x000d,
+ 0x43c: 0x000d, 0x43d: 0x000d, 0x43e: 0x000d, 0x43f: 0x000d,
+ // Block 0x11, offset 0x440
+ 0x440: 0x000d, 0x441: 0x000d, 0x442: 0x000d, 0x443: 0x000d, 0x444: 0x000d, 0x445: 0x000d,
+ 0x446: 0x000d, 0x447: 0x000d, 0x448: 0x000d, 0x449: 0x000d, 0x44a: 0x000d, 0x44b: 0x000d,
+ 0x44c: 0x000d, 0x44d: 0x000d, 0x44e: 0x000d, 0x44f: 0x000d, 0x450: 0x000d, 0x451: 0x000d,
+ 0x452: 0x000d, 0x453: 0x000d, 0x454: 0x000d, 0x455: 0x000d, 0x456: 0x000c, 0x457: 0x000c,
+ 0x458: 0x000c, 0x459: 0x000c, 0x45a: 0x000c, 0x45b: 0x000c, 0x45c: 0x000c, 0x45d: 0x0005,
+ 0x45e: 0x000a, 0x45f: 0x000c, 0x460: 0x000c, 0x461: 0x000c, 0x462: 0x000c, 0x463: 0x000c,
+ 0x464: 0x000c, 0x465: 0x000d, 0x466: 0x000d, 0x467: 0x000c, 0x468: 0x000c, 0x469: 0x000a,
+ 0x46a: 0x000c, 0x46b: 0x000c, 0x46c: 0x000c, 0x46d: 0x000c, 0x46e: 0x000d, 0x46f: 0x000d,
+ 0x470: 0x0002, 0x471: 0x0002, 0x472: 0x0002, 0x473: 0x0002, 0x474: 0x0002, 0x475: 0x0002,
+ 0x476: 0x0002, 0x477: 0x0002, 0x478: 0x0002, 0x479: 0x0002, 0x47a: 0x000d, 0x47b: 0x000d,
+ 0x47c: 0x000d, 0x47d: 0x000d, 0x47e: 0x000d, 0x47f: 0x000d,
+ // Block 0x12, offset 0x480
+ 0x480: 0x000d, 0x481: 0x000d, 0x482: 0x000d, 0x483: 0x000d, 0x484: 0x000d, 0x485: 0x000d,
+ 0x486: 0x000d, 0x487: 0x000d, 0x488: 0x000d, 0x489: 0x000d, 0x48a: 0x000d, 0x48b: 0x000d,
+ 0x48c: 0x000d, 0x48d: 0x000d, 0x48e: 0x000d, 0x48f: 0x000d, 0x490: 0x000d, 0x491: 0x000c,
+ 0x492: 0x000d, 0x493: 0x000d, 0x494: 0x000d, 0x495: 0x000d, 0x496: 0x000d, 0x497: 0x000d,
+ 0x498: 0x000d, 0x499: 0x000d, 0x49a: 0x000d, 0x49b: 0x000d, 0x49c: 0x000d, 0x49d: 0x000d,
+ 0x49e: 0x000d, 0x49f: 0x000d, 0x4a0: 0x000d, 0x4a1: 0x000d, 0x4a2: 0x000d, 0x4a3: 0x000d,
+ 0x4a4: 0x000d, 0x4a5: 0x000d, 0x4a6: 0x000d, 0x4a7: 0x000d, 0x4a8: 0x000d, 0x4a9: 0x000d,
+ 0x4aa: 0x000d, 0x4ab: 0x000d, 0x4ac: 0x000d, 0x4ad: 0x000d, 0x4ae: 0x000d, 0x4af: 0x000d,
+ 0x4b0: 0x000c, 0x4b1: 0x000c, 0x4b2: 0x000c, 0x4b3: 0x000c, 0x4b4: 0x000c, 0x4b5: 0x000c,
+ 0x4b6: 0x000c, 0x4b7: 0x000c, 0x4b8: 0x000c, 0x4b9: 0x000c, 0x4ba: 0x000c, 0x4bb: 0x000c,
+ 0x4bc: 0x000c, 0x4bd: 0x000c, 0x4be: 0x000c, 0x4bf: 0x000c,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0x000c, 0x4c1: 0x000c, 0x4c2: 0x000c, 0x4c3: 0x000c, 0x4c4: 0x000c, 0x4c5: 0x000c,
+ 0x4c6: 0x000c, 0x4c7: 0x000c, 0x4c8: 0x000c, 0x4c9: 0x000c, 0x4ca: 0x000c, 0x4cb: 0x000d,
+ 0x4cc: 0x000d, 0x4cd: 0x000d, 0x4ce: 0x000d, 0x4cf: 0x000d, 0x4d0: 0x000d, 0x4d1: 0x000d,
+ 0x4d2: 0x000d, 0x4d3: 0x000d, 0x4d4: 0x000d, 0x4d5: 0x000d, 0x4d6: 0x000d, 0x4d7: 0x000d,
+ 0x4d8: 0x000d, 0x4d9: 0x000d, 0x4da: 0x000d, 0x4db: 0x000d, 0x4dc: 0x000d, 0x4dd: 0x000d,
+ 0x4de: 0x000d, 0x4df: 0x000d, 0x4e0: 0x000d, 0x4e1: 0x000d, 0x4e2: 0x000d, 0x4e3: 0x000d,
+ 0x4e4: 0x000d, 0x4e5: 0x000d, 0x4e6: 0x000d, 0x4e7: 0x000d, 0x4e8: 0x000d, 0x4e9: 0x000d,
+ 0x4ea: 0x000d, 0x4eb: 0x000d, 0x4ec: 0x000d, 0x4ed: 0x000d, 0x4ee: 0x000d, 0x4ef: 0x000d,
+ 0x4f0: 0x000d, 0x4f1: 0x000d, 0x4f2: 0x000d, 0x4f3: 0x000d, 0x4f4: 0x000d, 0x4f5: 0x000d,
+ 0x4f6: 0x000d, 0x4f7: 0x000d, 0x4f8: 0x000d, 0x4f9: 0x000d, 0x4fa: 0x000d, 0x4fb: 0x000d,
+ 0x4fc: 0x000d, 0x4fd: 0x000d, 0x4fe: 0x000d, 0x4ff: 0x000d,
+ // Block 0x14, offset 0x500
+ 0x500: 0x000d, 0x501: 0x000d, 0x502: 0x000d, 0x503: 0x000d, 0x504: 0x000d, 0x505: 0x000d,
+ 0x506: 0x000d, 0x507: 0x000d, 0x508: 0x000d, 0x509: 0x000d, 0x50a: 0x000d, 0x50b: 0x000d,
+ 0x50c: 0x000d, 0x50d: 0x000d, 0x50e: 0x000d, 0x50f: 0x000d, 0x510: 0x000d, 0x511: 0x000d,
+ 0x512: 0x000d, 0x513: 0x000d, 0x514: 0x000d, 0x515: 0x000d, 0x516: 0x000d, 0x517: 0x000d,
+ 0x518: 0x000d, 0x519: 0x000d, 0x51a: 0x000d, 0x51b: 0x000d, 0x51c: 0x000d, 0x51d: 0x000d,
+ 0x51e: 0x000d, 0x51f: 0x000d, 0x520: 0x000d, 0x521: 0x000d, 0x522: 0x000d, 0x523: 0x000d,
+ 0x524: 0x000d, 0x525: 0x000d, 0x526: 0x000c, 0x527: 0x000c, 0x528: 0x000c, 0x529: 0x000c,
+ 0x52a: 0x000c, 0x52b: 0x000c, 0x52c: 0x000c, 0x52d: 0x000c, 0x52e: 0x000c, 0x52f: 0x000c,
+ 0x530: 0x000c, 0x531: 0x000d, 0x532: 0x000d, 0x533: 0x000d, 0x534: 0x000d, 0x535: 0x000d,
+ 0x536: 0x000d, 0x537: 0x000d, 0x538: 0x000d, 0x539: 0x000d, 0x53a: 0x000d, 0x53b: 0x000d,
+ 0x53c: 0x000d, 0x53d: 0x000d, 0x53e: 0x000d, 0x53f: 0x000d,
+ // Block 0x15, offset 0x540
+ 0x540: 0x0001, 0x541: 0x0001, 0x542: 0x0001, 0x543: 0x0001, 0x544: 0x0001, 0x545: 0x0001,
+ 0x546: 0x0001, 0x547: 0x0001, 0x548: 0x0001, 0x549: 0x0001, 0x54a: 0x0001, 0x54b: 0x0001,
+ 0x54c: 0x0001, 0x54d: 0x0001, 0x54e: 0x0001, 0x54f: 0x0001, 0x550: 0x0001, 0x551: 0x0001,
+ 0x552: 0x0001, 0x553: 0x0001, 0x554: 0x0001, 0x555: 0x0001, 0x556: 0x0001, 0x557: 0x0001,
+ 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001,
+ 0x55e: 0x0001, 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001,
+ 0x564: 0x0001, 0x565: 0x0001, 0x566: 0x0001, 0x567: 0x0001, 0x568: 0x0001, 0x569: 0x0001,
+ 0x56a: 0x0001, 0x56b: 0x000c, 0x56c: 0x000c, 0x56d: 0x000c, 0x56e: 0x000c, 0x56f: 0x000c,
+ 0x570: 0x000c, 0x571: 0x000c, 0x572: 0x000c, 0x573: 0x000c, 0x574: 0x0001, 0x575: 0x0001,
+ 0x576: 0x000a, 0x577: 0x000a, 0x578: 0x000a, 0x579: 0x000a, 0x57a: 0x0001, 0x57b: 0x0001,
+ 0x57c: 0x0001, 0x57d: 0x000c, 0x57e: 0x0001, 0x57f: 0x0001,
+ // Block 0x16, offset 0x580
+ 0x580: 0x0001, 0x581: 0x0001, 0x582: 0x0001, 0x583: 0x0001, 0x584: 0x0001, 0x585: 0x0001,
+ 0x586: 0x0001, 0x587: 0x0001, 0x588: 0x0001, 0x589: 0x0001, 0x58a: 0x0001, 0x58b: 0x0001,
+ 0x58c: 0x0001, 0x58d: 0x0001, 0x58e: 0x0001, 0x58f: 0x0001, 0x590: 0x0001, 0x591: 0x0001,
+ 0x592: 0x0001, 0x593: 0x0001, 0x594: 0x0001, 0x595: 0x0001, 0x596: 0x000c, 0x597: 0x000c,
+ 0x598: 0x000c, 0x599: 0x000c, 0x59a: 0x0001, 0x59b: 0x000c, 0x59c: 0x000c, 0x59d: 0x000c,
+ 0x59e: 0x000c, 0x59f: 0x000c, 0x5a0: 0x000c, 0x5a1: 0x000c, 0x5a2: 0x000c, 0x5a3: 0x000c,
+ 0x5a4: 0x0001, 0x5a5: 0x000c, 0x5a6: 0x000c, 0x5a7: 0x000c, 0x5a8: 0x0001, 0x5a9: 0x000c,
+ 0x5aa: 0x000c, 0x5ab: 0x000c, 0x5ac: 0x000c, 0x5ad: 0x000c, 0x5ae: 0x0001, 0x5af: 0x0001,
+ 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001,
+ 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001,
+ 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001,
+ 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, 0x5cb: 0x0001,
+ 0x5cc: 0x0001, 0x5cd: 0x0001, 0x5ce: 0x0001, 0x5cf: 0x0001, 0x5d0: 0x0001, 0x5d1: 0x0001,
+ 0x5d2: 0x0001, 0x5d3: 0x0001, 0x5d4: 0x0001, 0x5d5: 0x0001, 0x5d6: 0x0001, 0x5d7: 0x0001,
+ 0x5d8: 0x0001, 0x5d9: 0x000c, 0x5da: 0x000c, 0x5db: 0x000c, 0x5dc: 0x0001, 0x5dd: 0x0001,
+ 0x5de: 0x0001, 0x5df: 0x0001, 0x5e0: 0x000d, 0x5e1: 0x000d, 0x5e2: 0x000d, 0x5e3: 0x000d,
+ 0x5e4: 0x000d, 0x5e5: 0x000d, 0x5e6: 0x000d, 0x5e7: 0x000d, 0x5e8: 0x000d, 0x5e9: 0x000d,
+ 0x5ea: 0x000d, 0x5eb: 0x000d, 0x5ec: 0x000d, 0x5ed: 0x000d, 0x5ee: 0x000d, 0x5ef: 0x000d,
+ 0x5f0: 0x0001, 0x5f1: 0x0001, 0x5f2: 0x0001, 0x5f3: 0x0001, 0x5f4: 0x0001, 0x5f5: 0x0001,
+ 0x5f6: 0x0001, 0x5f7: 0x0001, 0x5f8: 0x0001, 0x5f9: 0x0001, 0x5fa: 0x0001, 0x5fb: 0x0001,
+ 0x5fc: 0x0001, 0x5fd: 0x0001, 0x5fe: 0x0001, 0x5ff: 0x0001,
+ // Block 0x18, offset 0x600
+ 0x600: 0x0001, 0x601: 0x0001, 0x602: 0x0001, 0x603: 0x0001, 0x604: 0x0001, 0x605: 0x0001,
+ 0x606: 0x0001, 0x607: 0x0001, 0x608: 0x0001, 0x609: 0x0001, 0x60a: 0x0001, 0x60b: 0x0001,
+ 0x60c: 0x0001, 0x60d: 0x0001, 0x60e: 0x0001, 0x60f: 0x0001, 0x610: 0x0001, 0x611: 0x0001,
+ 0x612: 0x0001, 0x613: 0x0001, 0x614: 0x0001, 0x615: 0x0001, 0x616: 0x0001, 0x617: 0x0001,
+ 0x618: 0x0001, 0x619: 0x0001, 0x61a: 0x0001, 0x61b: 0x0001, 0x61c: 0x0001, 0x61d: 0x0001,
+ 0x61e: 0x0001, 0x61f: 0x0001, 0x620: 0x000d, 0x621: 0x000d, 0x622: 0x000d, 0x623: 0x000d,
+ 0x624: 0x000d, 0x625: 0x000d, 0x626: 0x000d, 0x627: 0x000d, 0x628: 0x000d, 0x629: 0x000d,
+ 0x62a: 0x000d, 0x62b: 0x000d, 0x62c: 0x000d, 0x62d: 0x000d, 0x62e: 0x000d, 0x62f: 0x000d,
+ 0x630: 0x000d, 0x631: 0x000d, 0x632: 0x000d, 0x633: 0x000d, 0x634: 0x000d, 0x635: 0x000d,
+ 0x636: 0x000d, 0x637: 0x000d, 0x638: 0x000d, 0x639: 0x000d, 0x63a: 0x000d, 0x63b: 0x000d,
+ 0x63c: 0x000d, 0x63d: 0x000d, 0x63e: 0x000d, 0x63f: 0x000d,
+ // Block 0x19, offset 0x640
+ 0x640: 0x000d, 0x641: 0x000d, 0x642: 0x000d, 0x643: 0x000d, 0x644: 0x000d, 0x645: 0x000d,
+ 0x646: 0x000d, 0x647: 0x000d, 0x648: 0x000d, 0x649: 0x000d, 0x64a: 0x000d, 0x64b: 0x000d,
+ 0x64c: 0x000d, 0x64d: 0x000d, 0x64e: 0x000d, 0x64f: 0x000d, 0x650: 0x000d, 0x651: 0x000d,
+ 0x652: 0x000d, 0x653: 0x000c, 0x654: 0x000c, 0x655: 0x000c, 0x656: 0x000c, 0x657: 0x000c,
+ 0x658: 0x000c, 0x659: 0x000c, 0x65a: 0x000c, 0x65b: 0x000c, 0x65c: 0x000c, 0x65d: 0x000c,
+ 0x65e: 0x000c, 0x65f: 0x000c, 0x660: 0x000c, 0x661: 0x000c, 0x662: 0x0005, 0x663: 0x000c,
+ 0x664: 0x000c, 0x665: 0x000c, 0x666: 0x000c, 0x667: 0x000c, 0x668: 0x000c, 0x669: 0x000c,
+ 0x66a: 0x000c, 0x66b: 0x000c, 0x66c: 0x000c, 0x66d: 0x000c, 0x66e: 0x000c, 0x66f: 0x000c,
+ 0x670: 0x000c, 0x671: 0x000c, 0x672: 0x000c, 0x673: 0x000c, 0x674: 0x000c, 0x675: 0x000c,
+ 0x676: 0x000c, 0x677: 0x000c, 0x678: 0x000c, 0x679: 0x000c, 0x67a: 0x000c, 0x67b: 0x000c,
+ 0x67c: 0x000c, 0x67d: 0x000c, 0x67e: 0x000c, 0x67f: 0x000c,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x000c, 0x681: 0x000c, 0x682: 0x000c,
+ 0x6ba: 0x000c,
+ 0x6bc: 0x000c,
+ // Block 0x1b, offset 0x6c0
+ 0x6c1: 0x000c, 0x6c2: 0x000c, 0x6c3: 0x000c, 0x6c4: 0x000c, 0x6c5: 0x000c,
+ 0x6c6: 0x000c, 0x6c7: 0x000c, 0x6c8: 0x000c,
+ 0x6cd: 0x000c, 0x6d1: 0x000c,
+ 0x6d2: 0x000c, 0x6d3: 0x000c, 0x6d4: 0x000c, 0x6d5: 0x000c, 0x6d6: 0x000c, 0x6d7: 0x000c,
+ 0x6e2: 0x000c, 0x6e3: 0x000c,
+ // Block 0x1c, offset 0x700
+ 0x701: 0x000c,
+ 0x73c: 0x000c,
+ // Block 0x1d, offset 0x740
+ 0x741: 0x000c, 0x742: 0x000c, 0x743: 0x000c, 0x744: 0x000c,
+ 0x74d: 0x000c,
+ 0x762: 0x000c, 0x763: 0x000c,
+ 0x772: 0x0004, 0x773: 0x0004,
+ 0x77b: 0x0004,
+ 0x77e: 0x000c,
+ // Block 0x1e, offset 0x780
+ 0x781: 0x000c, 0x782: 0x000c,
+ 0x7bc: 0x000c,
+ // Block 0x1f, offset 0x7c0
+ 0x7c1: 0x000c, 0x7c2: 0x000c,
+ 0x7c7: 0x000c, 0x7c8: 0x000c, 0x7cb: 0x000c,
+ 0x7cc: 0x000c, 0x7cd: 0x000c, 0x7d1: 0x000c,
+ 0x7f0: 0x000c, 0x7f1: 0x000c, 0x7f5: 0x000c,
+ // Block 0x20, offset 0x800
+ 0x801: 0x000c, 0x802: 0x000c, 0x803: 0x000c, 0x804: 0x000c, 0x805: 0x000c,
+ 0x807: 0x000c, 0x808: 0x000c,
+ 0x80d: 0x000c,
+ 0x822: 0x000c, 0x823: 0x000c,
+ 0x831: 0x0004,
+ 0x83a: 0x000c, 0x83b: 0x000c,
+ 0x83c: 0x000c, 0x83d: 0x000c, 0x83e: 0x000c, 0x83f: 0x000c,
+ // Block 0x21, offset 0x840
+ 0x841: 0x000c,
+ 0x87c: 0x000c, 0x87f: 0x000c,
+ // Block 0x22, offset 0x880
+ 0x881: 0x000c, 0x882: 0x000c, 0x883: 0x000c, 0x884: 0x000c,
+ 0x88d: 0x000c,
+ 0x896: 0x000c,
+ 0x8a2: 0x000c, 0x8a3: 0x000c,
+ // Block 0x23, offset 0x8c0
+ 0x8c2: 0x000c,
+ // Block 0x24, offset 0x900
+ 0x900: 0x000c,
+ 0x90d: 0x000c,
+ 0x933: 0x000a, 0x934: 0x000a, 0x935: 0x000a,
+ 0x936: 0x000a, 0x937: 0x000a, 0x938: 0x000a, 0x939: 0x0004, 0x93a: 0x000a,
+ // Block 0x25, offset 0x940
+ 0x940: 0x000c, 0x944: 0x000c,
+ 0x97e: 0x000c, 0x97f: 0x000c,
+ // Block 0x26, offset 0x980
+ 0x980: 0x000c,
+ 0x986: 0x000c, 0x987: 0x000c, 0x988: 0x000c, 0x98a: 0x000c, 0x98b: 0x000c,
+ 0x98c: 0x000c, 0x98d: 0x000c,
+ 0x995: 0x000c, 0x996: 0x000c,
+ 0x9a2: 0x000c, 0x9a3: 0x000c,
+ 0x9b8: 0x000a, 0x9b9: 0x000a, 0x9ba: 0x000a, 0x9bb: 0x000a,
+ 0x9bc: 0x000a, 0x9bd: 0x000a, 0x9be: 0x000a,
+ // Block 0x27, offset 0x9c0
+ 0x9cc: 0x000c, 0x9cd: 0x000c,
+ 0x9e2: 0x000c, 0x9e3: 0x000c,
+ // Block 0x28, offset 0xa00
+ 0xa00: 0x000c, 0xa01: 0x000c,
+ 0xa3b: 0x000c,
+ 0xa3c: 0x000c,
+ // Block 0x29, offset 0xa40
+ 0xa41: 0x000c, 0xa42: 0x000c, 0xa43: 0x000c, 0xa44: 0x000c,
+ 0xa4d: 0x000c,
+ 0xa62: 0x000c, 0xa63: 0x000c,
+ // Block 0x2a, offset 0xa80
+ 0xa8a: 0x000c,
+ 0xa92: 0x000c, 0xa93: 0x000c, 0xa94: 0x000c, 0xa96: 0x000c,
+ // Block 0x2b, offset 0xac0
+ 0xaf1: 0x000c, 0xaf4: 0x000c, 0xaf5: 0x000c,
+ 0xaf6: 0x000c, 0xaf7: 0x000c, 0xaf8: 0x000c, 0xaf9: 0x000c, 0xafa: 0x000c,
+ 0xaff: 0x0004,
+ // Block 0x2c, offset 0xb00
+ 0xb07: 0x000c, 0xb08: 0x000c, 0xb09: 0x000c, 0xb0a: 0x000c, 0xb0b: 0x000c,
+ 0xb0c: 0x000c, 0xb0d: 0x000c, 0xb0e: 0x000c,
+ // Block 0x2d, offset 0xb40
+ 0xb71: 0x000c, 0xb74: 0x000c, 0xb75: 0x000c,
+ 0xb76: 0x000c, 0xb77: 0x000c, 0xb78: 0x000c, 0xb79: 0x000c, 0xb7b: 0x000c,
+ 0xb7c: 0x000c,
+ // Block 0x2e, offset 0xb80
+ 0xb88: 0x000c, 0xb89: 0x000c, 0xb8a: 0x000c, 0xb8b: 0x000c,
+ 0xb8c: 0x000c, 0xb8d: 0x000c,
+ // Block 0x2f, offset 0xbc0
+ 0xbd8: 0x000c, 0xbd9: 0x000c,
+ 0xbf5: 0x000c,
+ 0xbf7: 0x000c, 0xbf9: 0x000c, 0xbfa: 0x003a, 0xbfb: 0x002a,
+ 0xbfc: 0x003a, 0xbfd: 0x002a,
+ // Block 0x30, offset 0xc00
+ 0xc31: 0x000c, 0xc32: 0x000c, 0xc33: 0x000c, 0xc34: 0x000c, 0xc35: 0x000c,
+ 0xc36: 0x000c, 0xc37: 0x000c, 0xc38: 0x000c, 0xc39: 0x000c, 0xc3a: 0x000c, 0xc3b: 0x000c,
+ 0xc3c: 0x000c, 0xc3d: 0x000c, 0xc3e: 0x000c,
+ // Block 0x31, offset 0xc40
+ 0xc40: 0x000c, 0xc41: 0x000c, 0xc42: 0x000c, 0xc43: 0x000c, 0xc44: 0x000c,
+ 0xc46: 0x000c, 0xc47: 0x000c,
+ 0xc4d: 0x000c, 0xc4e: 0x000c, 0xc4f: 0x000c, 0xc50: 0x000c, 0xc51: 0x000c,
+ 0xc52: 0x000c, 0xc53: 0x000c, 0xc54: 0x000c, 0xc55: 0x000c, 0xc56: 0x000c, 0xc57: 0x000c,
+ 0xc59: 0x000c, 0xc5a: 0x000c, 0xc5b: 0x000c, 0xc5c: 0x000c, 0xc5d: 0x000c,
+ 0xc5e: 0x000c, 0xc5f: 0x000c, 0xc60: 0x000c, 0xc61: 0x000c, 0xc62: 0x000c, 0xc63: 0x000c,
+ 0xc64: 0x000c, 0xc65: 0x000c, 0xc66: 0x000c, 0xc67: 0x000c, 0xc68: 0x000c, 0xc69: 0x000c,
+ 0xc6a: 0x000c, 0xc6b: 0x000c, 0xc6c: 0x000c, 0xc6d: 0x000c, 0xc6e: 0x000c, 0xc6f: 0x000c,
+ 0xc70: 0x000c, 0xc71: 0x000c, 0xc72: 0x000c, 0xc73: 0x000c, 0xc74: 0x000c, 0xc75: 0x000c,
+ 0xc76: 0x000c, 0xc77: 0x000c, 0xc78: 0x000c, 0xc79: 0x000c, 0xc7a: 0x000c, 0xc7b: 0x000c,
+ 0xc7c: 0x000c,
+ // Block 0x32, offset 0xc80
+ 0xc86: 0x000c,
+ // Block 0x33, offset 0xcc0
+ 0xced: 0x000c, 0xcee: 0x000c, 0xcef: 0x000c,
+ 0xcf0: 0x000c, 0xcf2: 0x000c, 0xcf3: 0x000c, 0xcf4: 0x000c, 0xcf5: 0x000c,
+ 0xcf6: 0x000c, 0xcf7: 0x000c, 0xcf9: 0x000c, 0xcfa: 0x000c,
+ 0xcfd: 0x000c, 0xcfe: 0x000c,
+ // Block 0x34, offset 0xd00
+ 0xd18: 0x000c, 0xd19: 0x000c,
+ 0xd1e: 0x000c, 0xd1f: 0x000c, 0xd20: 0x000c,
+ 0xd31: 0x000c, 0xd32: 0x000c, 0xd33: 0x000c, 0xd34: 0x000c,
+ // Block 0x35, offset 0xd40
+ 0xd42: 0x000c, 0xd45: 0x000c,
+ 0xd46: 0x000c,
+ 0xd4d: 0x000c,
+ 0xd5d: 0x000c,
+ // Block 0x36, offset 0xd80
+ 0xd9d: 0x000c,
+ 0xd9e: 0x000c, 0xd9f: 0x000c,
+ // Block 0x37, offset 0xdc0
+ 0xdd0: 0x000a, 0xdd1: 0x000a,
+ 0xdd2: 0x000a, 0xdd3: 0x000a, 0xdd4: 0x000a, 0xdd5: 0x000a, 0xdd6: 0x000a, 0xdd7: 0x000a,
+ 0xdd8: 0x000a, 0xdd9: 0x000a,
+ // Block 0x38, offset 0xe00
+ 0xe00: 0x000a,
+ // Block 0x39, offset 0xe40
+ 0xe40: 0x0009,
+ 0xe5b: 0x007a, 0xe5c: 0x006a,
+ // Block 0x3a, offset 0xe80
+ 0xe92: 0x000c, 0xe93: 0x000c, 0xe94: 0x000c,
+ 0xeb2: 0x000c, 0xeb3: 0x000c, 0xeb4: 0x000c,
+ // Block 0x3b, offset 0xec0
+ 0xed2: 0x000c, 0xed3: 0x000c,
+ 0xef2: 0x000c, 0xef3: 0x000c,
+ // Block 0x3c, offset 0xf00
+ 0xf34: 0x000c, 0xf35: 0x000c,
+ 0xf37: 0x000c, 0xf38: 0x000c, 0xf39: 0x000c, 0xf3a: 0x000c, 0xf3b: 0x000c,
+ 0xf3c: 0x000c, 0xf3d: 0x000c,
+ // Block 0x3d, offset 0xf40
+ 0xf46: 0x000c, 0xf49: 0x000c, 0xf4a: 0x000c, 0xf4b: 0x000c,
+ 0xf4c: 0x000c, 0xf4d: 0x000c, 0xf4e: 0x000c, 0xf4f: 0x000c, 0xf50: 0x000c, 0xf51: 0x000c,
+ 0xf52: 0x000c, 0xf53: 0x000c,
+ 0xf5b: 0x0004, 0xf5d: 0x000c,
+ 0xf70: 0x000a, 0xf71: 0x000a, 0xf72: 0x000a, 0xf73: 0x000a, 0xf74: 0x000a, 0xf75: 0x000a,
+ 0xf76: 0x000a, 0xf77: 0x000a, 0xf78: 0x000a, 0xf79: 0x000a,
+ // Block 0x3e, offset 0xf80
+ 0xf80: 0x000a, 0xf81: 0x000a, 0xf82: 0x000a, 0xf83: 0x000a, 0xf84: 0x000a, 0xf85: 0x000a,
+ 0xf86: 0x000a, 0xf87: 0x000a, 0xf88: 0x000a, 0xf89: 0x000a, 0xf8a: 0x000a, 0xf8b: 0x000c,
+ 0xf8c: 0x000c, 0xf8d: 0x000c, 0xf8e: 0x000b,
+ // Block 0x3f, offset 0xfc0
+ 0xfc5: 0x000c,
+ 0xfc6: 0x000c,
+ 0xfe9: 0x000c,
+ // Block 0x40, offset 0x1000
+ 0x1020: 0x000c, 0x1021: 0x000c, 0x1022: 0x000c,
+ 0x1027: 0x000c, 0x1028: 0x000c,
+ 0x1032: 0x000c,
+ 0x1039: 0x000c, 0x103a: 0x000c, 0x103b: 0x000c,
+ // Block 0x41, offset 0x1040
+ 0x1040: 0x000a, 0x1044: 0x000a, 0x1045: 0x000a,
+ // Block 0x42, offset 0x1080
+ 0x109e: 0x000a, 0x109f: 0x000a, 0x10a0: 0x000a, 0x10a1: 0x000a, 0x10a2: 0x000a, 0x10a3: 0x000a,
+ 0x10a4: 0x000a, 0x10a5: 0x000a, 0x10a6: 0x000a, 0x10a7: 0x000a, 0x10a8: 0x000a, 0x10a9: 0x000a,
+ 0x10aa: 0x000a, 0x10ab: 0x000a, 0x10ac: 0x000a, 0x10ad: 0x000a, 0x10ae: 0x000a, 0x10af: 0x000a,
+ 0x10b0: 0x000a, 0x10b1: 0x000a, 0x10b2: 0x000a, 0x10b3: 0x000a, 0x10b4: 0x000a, 0x10b5: 0x000a,
+ 0x10b6: 0x000a, 0x10b7: 0x000a, 0x10b8: 0x000a, 0x10b9: 0x000a, 0x10ba: 0x000a, 0x10bb: 0x000a,
+ 0x10bc: 0x000a, 0x10bd: 0x000a, 0x10be: 0x000a, 0x10bf: 0x000a,
+ // Block 0x43, offset 0x10c0
+ 0x10d7: 0x000c,
+ 0x10d8: 0x000c, 0x10db: 0x000c,
+ // Block 0x44, offset 0x1100
+ 0x1116: 0x000c,
+ 0x1118: 0x000c, 0x1119: 0x000c, 0x111a: 0x000c, 0x111b: 0x000c, 0x111c: 0x000c, 0x111d: 0x000c,
+ 0x111e: 0x000c, 0x1120: 0x000c, 0x1122: 0x000c,
+ 0x1125: 0x000c, 0x1126: 0x000c, 0x1127: 0x000c, 0x1128: 0x000c, 0x1129: 0x000c,
+ 0x112a: 0x000c, 0x112b: 0x000c, 0x112c: 0x000c,
+ 0x1133: 0x000c, 0x1134: 0x000c, 0x1135: 0x000c,
+ 0x1136: 0x000c, 0x1137: 0x000c, 0x1138: 0x000c, 0x1139: 0x000c, 0x113a: 0x000c, 0x113b: 0x000c,
+ 0x113c: 0x000c, 0x113f: 0x000c,
+ // Block 0x45, offset 0x1140
+ 0x1170: 0x000c, 0x1171: 0x000c, 0x1172: 0x000c, 0x1173: 0x000c, 0x1174: 0x000c, 0x1175: 0x000c,
+ 0x1176: 0x000c, 0x1177: 0x000c, 0x1178: 0x000c, 0x1179: 0x000c, 0x117a: 0x000c, 0x117b: 0x000c,
+ 0x117c: 0x000c, 0x117d: 0x000c, 0x117e: 0x000c,
+ // Block 0x46, offset 0x1180
+ 0x1180: 0x000c, 0x1181: 0x000c, 0x1182: 0x000c, 0x1183: 0x000c,
+ 0x11b4: 0x000c,
+ 0x11b6: 0x000c, 0x11b7: 0x000c, 0x11b8: 0x000c, 0x11b9: 0x000c, 0x11ba: 0x000c,
+ 0x11bc: 0x000c,
+ // Block 0x47, offset 0x11c0
+ 0x11c2: 0x000c,
+ 0x11eb: 0x000c, 0x11ec: 0x000c, 0x11ed: 0x000c, 0x11ee: 0x000c, 0x11ef: 0x000c,
+ 0x11f0: 0x000c, 0x11f1: 0x000c, 0x11f2: 0x000c, 0x11f3: 0x000c,
+ // Block 0x48, offset 0x1200
+ 0x1200: 0x000c, 0x1201: 0x000c,
+ 0x1222: 0x000c, 0x1223: 0x000c,
+ 0x1224: 0x000c, 0x1225: 0x000c, 0x1228: 0x000c, 0x1229: 0x000c,
+ 0x122b: 0x000c, 0x122c: 0x000c, 0x122d: 0x000c,
+ // Block 0x49, offset 0x1240
+ 0x1266: 0x000c, 0x1268: 0x000c, 0x1269: 0x000c,
+ 0x126d: 0x000c, 0x126f: 0x000c,
+ 0x1270: 0x000c, 0x1271: 0x000c,
+ // Block 0x4a, offset 0x1280
+ 0x12ac: 0x000c, 0x12ad: 0x000c, 0x12ae: 0x000c, 0x12af: 0x000c,
+ 0x12b0: 0x000c, 0x12b1: 0x000c, 0x12b2: 0x000c, 0x12b3: 0x000c,
+ 0x12b6: 0x000c, 0x12b7: 0x000c,
+ // Block 0x4b, offset 0x12c0
+ 0x12d0: 0x000c, 0x12d1: 0x000c,
+ 0x12d2: 0x000c, 0x12d4: 0x000c, 0x12d5: 0x000c, 0x12d6: 0x000c, 0x12d7: 0x000c,
+ 0x12d8: 0x000c, 0x12d9: 0x000c, 0x12da: 0x000c, 0x12db: 0x000c, 0x12dc: 0x000c, 0x12dd: 0x000c,
+ 0x12de: 0x000c, 0x12df: 0x000c, 0x12e0: 0x000c, 0x12e2: 0x000c, 0x12e3: 0x000c,
+ 0x12e4: 0x000c, 0x12e5: 0x000c, 0x12e6: 0x000c, 0x12e7: 0x000c, 0x12e8: 0x000c,
+ 0x12ed: 0x000c,
+ 0x12f4: 0x000c,
+ 0x12f8: 0x000c, 0x12f9: 0x000c,
+ // Block 0x4c, offset 0x1300
+ 0x1300: 0x000c, 0x1301: 0x000c, 0x1302: 0x000c, 0x1303: 0x000c, 0x1304: 0x000c, 0x1305: 0x000c,
+ 0x1306: 0x000c, 0x1307: 0x000c, 0x1308: 0x000c, 0x1309: 0x000c, 0x130a: 0x000c, 0x130b: 0x000c,
+ 0x130c: 0x000c, 0x130d: 0x000c, 0x130e: 0x000c, 0x130f: 0x000c, 0x1310: 0x000c, 0x1311: 0x000c,
+ 0x1312: 0x000c, 0x1313: 0x000c, 0x1314: 0x000c, 0x1315: 0x000c, 0x1316: 0x000c, 0x1317: 0x000c,
+ 0x1318: 0x000c, 0x1319: 0x000c, 0x131a: 0x000c, 0x131b: 0x000c, 0x131c: 0x000c, 0x131d: 0x000c,
+ 0x131e: 0x000c, 0x131f: 0x000c, 0x1320: 0x000c, 0x1321: 0x000c, 0x1322: 0x000c, 0x1323: 0x000c,
+ 0x1324: 0x000c, 0x1325: 0x000c, 0x1326: 0x000c, 0x1327: 0x000c, 0x1328: 0x000c, 0x1329: 0x000c,
+ 0x132a: 0x000c, 0x132b: 0x000c, 0x132c: 0x000c, 0x132d: 0x000c, 0x132e: 0x000c, 0x132f: 0x000c,
+ 0x1330: 0x000c, 0x1331: 0x000c, 0x1332: 0x000c, 0x1333: 0x000c, 0x1334: 0x000c, 0x1335: 0x000c,
+ 0x1336: 0x000c, 0x1337: 0x000c, 0x1338: 0x000c, 0x1339: 0x000c, 0x133b: 0x000c,
+ 0x133c: 0x000c, 0x133d: 0x000c, 0x133e: 0x000c, 0x133f: 0x000c,
+ // Block 0x4d, offset 0x1340
+ 0x137d: 0x000a, 0x137f: 0x000a,
+ // Block 0x4e, offset 0x1380
+ 0x1380: 0x000a, 0x1381: 0x000a,
+ 0x138d: 0x000a, 0x138e: 0x000a, 0x138f: 0x000a,
+ 0x139d: 0x000a,
+ 0x139e: 0x000a, 0x139f: 0x000a,
+ 0x13ad: 0x000a, 0x13ae: 0x000a, 0x13af: 0x000a,
+ 0x13bd: 0x000a, 0x13be: 0x000a,
+ // Block 0x4f, offset 0x13c0
+ 0x13c0: 0x0009, 0x13c1: 0x0009, 0x13c2: 0x0009, 0x13c3: 0x0009, 0x13c4: 0x0009, 0x13c5: 0x0009,
+ 0x13c6: 0x0009, 0x13c7: 0x0009, 0x13c8: 0x0009, 0x13c9: 0x0009, 0x13ca: 0x0009, 0x13cb: 0x000b,
+ 0x13cc: 0x000b, 0x13cd: 0x000b, 0x13cf: 0x0001, 0x13d0: 0x000a, 0x13d1: 0x000a,
+ 0x13d2: 0x000a, 0x13d3: 0x000a, 0x13d4: 0x000a, 0x13d5: 0x000a, 0x13d6: 0x000a, 0x13d7: 0x000a,
+ 0x13d8: 0x000a, 0x13d9: 0x000a, 0x13da: 0x000a, 0x13db: 0x000a, 0x13dc: 0x000a, 0x13dd: 0x000a,
+ 0x13de: 0x000a, 0x13df: 0x000a, 0x13e0: 0x000a, 0x13e1: 0x000a, 0x13e2: 0x000a, 0x13e3: 0x000a,
+ 0x13e4: 0x000a, 0x13e5: 0x000a, 0x13e6: 0x000a, 0x13e7: 0x000a, 0x13e8: 0x0009, 0x13e9: 0x0007,
+ 0x13ea: 0x000e, 0x13eb: 0x000e, 0x13ec: 0x000e, 0x13ed: 0x000e, 0x13ee: 0x000e, 0x13ef: 0x0006,
+ 0x13f0: 0x0004, 0x13f1: 0x0004, 0x13f2: 0x0004, 0x13f3: 0x0004, 0x13f4: 0x0004, 0x13f5: 0x000a,
+ 0x13f6: 0x000a, 0x13f7: 0x000a, 0x13f8: 0x000a, 0x13f9: 0x000a, 0x13fa: 0x000a, 0x13fb: 0x000a,
+ 0x13fc: 0x000a, 0x13fd: 0x000a, 0x13fe: 0x000a, 0x13ff: 0x000a,
+ // Block 0x50, offset 0x1400
+ 0x1400: 0x000a, 0x1401: 0x000a, 0x1402: 0x000a, 0x1403: 0x000a, 0x1404: 0x0006, 0x1405: 0x009a,
+ 0x1406: 0x008a, 0x1407: 0x000a, 0x1408: 0x000a, 0x1409: 0x000a, 0x140a: 0x000a, 0x140b: 0x000a,
+ 0x140c: 0x000a, 0x140d: 0x000a, 0x140e: 0x000a, 0x140f: 0x000a, 0x1410: 0x000a, 0x1411: 0x000a,
+ 0x1412: 0x000a, 0x1413: 0x000a, 0x1414: 0x000a, 0x1415: 0x000a, 0x1416: 0x000a, 0x1417: 0x000a,
+ 0x1418: 0x000a, 0x1419: 0x000a, 0x141a: 0x000a, 0x141b: 0x000a, 0x141c: 0x000a, 0x141d: 0x000a,
+ 0x141e: 0x000a, 0x141f: 0x0009, 0x1420: 0x000b, 0x1421: 0x000b, 0x1422: 0x000b, 0x1423: 0x000b,
+ 0x1424: 0x000b, 0x1425: 0x000b, 0x1426: 0x000e, 0x1427: 0x000e, 0x1428: 0x000e, 0x1429: 0x000e,
+ 0x142a: 0x000b, 0x142b: 0x000b, 0x142c: 0x000b, 0x142d: 0x000b, 0x142e: 0x000b, 0x142f: 0x000b,
+ 0x1430: 0x0002, 0x1434: 0x0002, 0x1435: 0x0002,
+ 0x1436: 0x0002, 0x1437: 0x0002, 0x1438: 0x0002, 0x1439: 0x0002, 0x143a: 0x0003, 0x143b: 0x0003,
+ 0x143c: 0x000a, 0x143d: 0x009a, 0x143e: 0x008a,
+ // Block 0x51, offset 0x1440
+ 0x1440: 0x0002, 0x1441: 0x0002, 0x1442: 0x0002, 0x1443: 0x0002, 0x1444: 0x0002, 0x1445: 0x0002,
+ 0x1446: 0x0002, 0x1447: 0x0002, 0x1448: 0x0002, 0x1449: 0x0002, 0x144a: 0x0003, 0x144b: 0x0003,
+ 0x144c: 0x000a, 0x144d: 0x009a, 0x144e: 0x008a,
+ 0x1460: 0x0004, 0x1461: 0x0004, 0x1462: 0x0004, 0x1463: 0x0004,
+ 0x1464: 0x0004, 0x1465: 0x0004, 0x1466: 0x0004, 0x1467: 0x0004, 0x1468: 0x0004, 0x1469: 0x0004,
+ 0x146a: 0x0004, 0x146b: 0x0004, 0x146c: 0x0004, 0x146d: 0x0004, 0x146e: 0x0004, 0x146f: 0x0004,
+ 0x1470: 0x0004, 0x1471: 0x0004, 0x1472: 0x0004, 0x1473: 0x0004, 0x1474: 0x0004, 0x1475: 0x0004,
+ 0x1476: 0x0004, 0x1477: 0x0004, 0x1478: 0x0004, 0x1479: 0x0004, 0x147a: 0x0004, 0x147b: 0x0004,
+ 0x147c: 0x0004, 0x147d: 0x0004, 0x147e: 0x0004, 0x147f: 0x0004,
+ // Block 0x52, offset 0x1480
+ 0x1480: 0x0004, 0x1481: 0x0004, 0x1482: 0x0004, 0x1483: 0x0004, 0x1484: 0x0004, 0x1485: 0x0004,
+ 0x1486: 0x0004, 0x1487: 0x0004, 0x1488: 0x0004, 0x1489: 0x0004, 0x148a: 0x0004, 0x148b: 0x0004,
+ 0x148c: 0x0004, 0x148d: 0x0004, 0x148e: 0x0004, 0x148f: 0x0004, 0x1490: 0x000c, 0x1491: 0x000c,
+ 0x1492: 0x000c, 0x1493: 0x000c, 0x1494: 0x000c, 0x1495: 0x000c, 0x1496: 0x000c, 0x1497: 0x000c,
+ 0x1498: 0x000c, 0x1499: 0x000c, 0x149a: 0x000c, 0x149b: 0x000c, 0x149c: 0x000c, 0x149d: 0x000c,
+ 0x149e: 0x000c, 0x149f: 0x000c, 0x14a0: 0x000c, 0x14a1: 0x000c, 0x14a2: 0x000c, 0x14a3: 0x000c,
+ 0x14a4: 0x000c, 0x14a5: 0x000c, 0x14a6: 0x000c, 0x14a7: 0x000c, 0x14a8: 0x000c, 0x14a9: 0x000c,
+ 0x14aa: 0x000c, 0x14ab: 0x000c, 0x14ac: 0x000c, 0x14ad: 0x000c, 0x14ae: 0x000c, 0x14af: 0x000c,
+ 0x14b0: 0x000c,
+ // Block 0x53, offset 0x14c0
+ 0x14c0: 0x000a, 0x14c1: 0x000a, 0x14c3: 0x000a, 0x14c4: 0x000a, 0x14c5: 0x000a,
+ 0x14c6: 0x000a, 0x14c8: 0x000a, 0x14c9: 0x000a,
+ 0x14d4: 0x000a, 0x14d6: 0x000a, 0x14d7: 0x000a,
+ 0x14d8: 0x000a,
+ 0x14de: 0x000a, 0x14df: 0x000a, 0x14e0: 0x000a, 0x14e1: 0x000a, 0x14e2: 0x000a, 0x14e3: 0x000a,
+ 0x14e5: 0x000a, 0x14e7: 0x000a, 0x14e9: 0x000a,
+ 0x14ee: 0x0004,
+ 0x14fa: 0x000a, 0x14fb: 0x000a,
+ // Block 0x54, offset 0x1500
+ 0x1500: 0x000a, 0x1501: 0x000a, 0x1502: 0x000a, 0x1503: 0x000a, 0x1504: 0x000a,
+ 0x150a: 0x000a, 0x150b: 0x000a,
+ 0x150c: 0x000a, 0x150d: 0x000a, 0x1510: 0x000a, 0x1511: 0x000a,
+ 0x1512: 0x000a, 0x1513: 0x000a, 0x1514: 0x000a, 0x1515: 0x000a, 0x1516: 0x000a, 0x1517: 0x000a,
+ 0x1518: 0x000a, 0x1519: 0x000a, 0x151a: 0x000a, 0x151b: 0x000a, 0x151c: 0x000a, 0x151d: 0x000a,
+ 0x151e: 0x000a, 0x151f: 0x000a,
+ // Block 0x55, offset 0x1540
+ 0x1549: 0x000a, 0x154a: 0x000a, 0x154b: 0x000a,
+ 0x1550: 0x000a, 0x1551: 0x000a,
+ 0x1552: 0x000a, 0x1553: 0x000a, 0x1554: 0x000a, 0x1555: 0x000a, 0x1556: 0x000a, 0x1557: 0x000a,
+ 0x1558: 0x000a, 0x1559: 0x000a, 0x155a: 0x000a, 0x155b: 0x000a, 0x155c: 0x000a, 0x155d: 0x000a,
+ 0x155e: 0x000a, 0x155f: 0x000a, 0x1560: 0x000a, 0x1561: 0x000a, 0x1562: 0x000a, 0x1563: 0x000a,
+ 0x1564: 0x000a, 0x1565: 0x000a, 0x1566: 0x000a, 0x1567: 0x000a, 0x1568: 0x000a, 0x1569: 0x000a,
+ 0x156a: 0x000a, 0x156b: 0x000a, 0x156c: 0x000a, 0x156d: 0x000a, 0x156e: 0x000a, 0x156f: 0x000a,
+ 0x1570: 0x000a, 0x1571: 0x000a, 0x1572: 0x000a, 0x1573: 0x000a, 0x1574: 0x000a, 0x1575: 0x000a,
+ 0x1576: 0x000a, 0x1577: 0x000a, 0x1578: 0x000a, 0x1579: 0x000a, 0x157a: 0x000a, 0x157b: 0x000a,
+ 0x157c: 0x000a, 0x157d: 0x000a, 0x157e: 0x000a, 0x157f: 0x000a,
+ // Block 0x56, offset 0x1580
+ 0x1580: 0x000a, 0x1581: 0x000a, 0x1582: 0x000a, 0x1583: 0x000a, 0x1584: 0x000a, 0x1585: 0x000a,
+ 0x1586: 0x000a, 0x1587: 0x000a, 0x1588: 0x000a, 0x1589: 0x000a, 0x158a: 0x000a, 0x158b: 0x000a,
+ 0x158c: 0x000a, 0x158d: 0x000a, 0x158e: 0x000a, 0x158f: 0x000a, 0x1590: 0x000a, 0x1591: 0x000a,
+ 0x1592: 0x000a, 0x1593: 0x000a, 0x1594: 0x000a, 0x1595: 0x000a, 0x1596: 0x000a, 0x1597: 0x000a,
+ 0x1598: 0x000a, 0x1599: 0x000a, 0x159a: 0x000a, 0x159b: 0x000a, 0x159c: 0x000a, 0x159d: 0x000a,
+ 0x159e: 0x000a, 0x159f: 0x000a, 0x15a0: 0x000a, 0x15a1: 0x000a, 0x15a2: 0x000a, 0x15a3: 0x000a,
+ 0x15a4: 0x000a, 0x15a5: 0x000a, 0x15a6: 0x000a, 0x15a7: 0x000a, 0x15a8: 0x000a, 0x15a9: 0x000a,
+ 0x15aa: 0x000a, 0x15ab: 0x000a, 0x15ac: 0x000a, 0x15ad: 0x000a, 0x15ae: 0x000a, 0x15af: 0x000a,
+ 0x15b0: 0x000a, 0x15b1: 0x000a, 0x15b2: 0x000a, 0x15b3: 0x000a, 0x15b4: 0x000a, 0x15b5: 0x000a,
+ 0x15b6: 0x000a, 0x15b7: 0x000a, 0x15b8: 0x000a, 0x15b9: 0x000a, 0x15ba: 0x000a, 0x15bb: 0x000a,
+ 0x15bc: 0x000a, 0x15bd: 0x000a, 0x15be: 0x000a, 0x15bf: 0x000a,
+ // Block 0x57, offset 0x15c0
+ 0x15c0: 0x000a, 0x15c1: 0x000a, 0x15c2: 0x000a, 0x15c3: 0x000a, 0x15c4: 0x000a, 0x15c5: 0x000a,
+ 0x15c6: 0x000a, 0x15c7: 0x000a, 0x15c8: 0x000a, 0x15c9: 0x000a, 0x15ca: 0x000a, 0x15cb: 0x000a,
+ 0x15cc: 0x000a, 0x15cd: 0x000a, 0x15ce: 0x000a, 0x15cf: 0x000a, 0x15d0: 0x000a, 0x15d1: 0x000a,
+ 0x15d2: 0x0003, 0x15d3: 0x0004, 0x15d4: 0x000a, 0x15d5: 0x000a, 0x15d6: 0x000a, 0x15d7: 0x000a,
+ 0x15d8: 0x000a, 0x15d9: 0x000a, 0x15da: 0x000a, 0x15db: 0x000a, 0x15dc: 0x000a, 0x15dd: 0x000a,
+ 0x15de: 0x000a, 0x15df: 0x000a, 0x15e0: 0x000a, 0x15e1: 0x000a, 0x15e2: 0x000a, 0x15e3: 0x000a,
+ 0x15e4: 0x000a, 0x15e5: 0x000a, 0x15e6: 0x000a, 0x15e7: 0x000a, 0x15e8: 0x000a, 0x15e9: 0x000a,
+ 0x15ea: 0x000a, 0x15eb: 0x000a, 0x15ec: 0x000a, 0x15ed: 0x000a, 0x15ee: 0x000a, 0x15ef: 0x000a,
+ 0x15f0: 0x000a, 0x15f1: 0x000a, 0x15f2: 0x000a, 0x15f3: 0x000a, 0x15f4: 0x000a, 0x15f5: 0x000a,
+ 0x15f6: 0x000a, 0x15f7: 0x000a, 0x15f8: 0x000a, 0x15f9: 0x000a, 0x15fa: 0x000a, 0x15fb: 0x000a,
+ 0x15fc: 0x000a, 0x15fd: 0x000a, 0x15fe: 0x000a, 0x15ff: 0x000a,
+ // Block 0x58, offset 0x1600
+ 0x1600: 0x000a, 0x1601: 0x000a, 0x1602: 0x000a, 0x1603: 0x000a, 0x1604: 0x000a, 0x1605: 0x000a,
+ 0x1606: 0x000a, 0x1607: 0x000a, 0x1608: 0x003a, 0x1609: 0x002a, 0x160a: 0x003a, 0x160b: 0x002a,
+ 0x160c: 0x000a, 0x160d: 0x000a, 0x160e: 0x000a, 0x160f: 0x000a, 0x1610: 0x000a, 0x1611: 0x000a,
+ 0x1612: 0x000a, 0x1613: 0x000a, 0x1614: 0x000a, 0x1615: 0x000a, 0x1616: 0x000a, 0x1617: 0x000a,
+ 0x1618: 0x000a, 0x1619: 0x000a, 0x161a: 0x000a, 0x161b: 0x000a, 0x161c: 0x000a, 0x161d: 0x000a,
+ 0x161e: 0x000a, 0x161f: 0x000a, 0x1620: 0x000a, 0x1621: 0x000a, 0x1622: 0x000a, 0x1623: 0x000a,
+ 0x1624: 0x000a, 0x1625: 0x000a, 0x1626: 0x000a, 0x1627: 0x000a, 0x1628: 0x000a, 0x1629: 0x009a,
+ 0x162a: 0x008a, 0x162b: 0x000a, 0x162c: 0x000a, 0x162d: 0x000a, 0x162e: 0x000a, 0x162f: 0x000a,
+ 0x1630: 0x000a, 0x1631: 0x000a, 0x1632: 0x000a, 0x1633: 0x000a, 0x1634: 0x000a, 0x1635: 0x000a,
+ // Block 0x59, offset 0x1640
+ 0x167b: 0x000a,
+ 0x167c: 0x000a, 0x167d: 0x000a, 0x167e: 0x000a, 0x167f: 0x000a,
+ // Block 0x5a, offset 0x1680
+ 0x1680: 0x000a, 0x1681: 0x000a, 0x1682: 0x000a, 0x1683: 0x000a, 0x1684: 0x000a, 0x1685: 0x000a,
+ 0x1686: 0x000a, 0x1687: 0x000a, 0x1688: 0x000a, 0x1689: 0x000a, 0x168a: 0x000a, 0x168b: 0x000a,
+ 0x168c: 0x000a, 0x168d: 0x000a, 0x168e: 0x000a, 0x168f: 0x000a, 0x1690: 0x000a, 0x1691: 0x000a,
+ 0x1692: 0x000a, 0x1693: 0x000a, 0x1694: 0x000a, 0x1696: 0x000a, 0x1697: 0x000a,
+ 0x1698: 0x000a, 0x1699: 0x000a, 0x169a: 0x000a, 0x169b: 0x000a, 0x169c: 0x000a, 0x169d: 0x000a,
+ 0x169e: 0x000a, 0x169f: 0x000a, 0x16a0: 0x000a, 0x16a1: 0x000a, 0x16a2: 0x000a, 0x16a3: 0x000a,
+ 0x16a4: 0x000a, 0x16a5: 0x000a, 0x16a6: 0x000a, 0x16a7: 0x000a, 0x16a8: 0x000a, 0x16a9: 0x000a,
+ 0x16aa: 0x000a, 0x16ab: 0x000a, 0x16ac: 0x000a, 0x16ad: 0x000a, 0x16ae: 0x000a, 0x16af: 0x000a,
+ 0x16b0: 0x000a, 0x16b1: 0x000a, 0x16b2: 0x000a, 0x16b3: 0x000a, 0x16b4: 0x000a, 0x16b5: 0x000a,
+ 0x16b6: 0x000a, 0x16b7: 0x000a, 0x16b8: 0x000a, 0x16b9: 0x000a, 0x16ba: 0x000a, 0x16bb: 0x000a,
+ 0x16bc: 0x000a, 0x16bd: 0x000a, 0x16be: 0x000a, 0x16bf: 0x000a,
+ // Block 0x5b, offset 0x16c0
+ 0x16c0: 0x000a, 0x16c1: 0x000a, 0x16c2: 0x000a, 0x16c3: 0x000a, 0x16c4: 0x000a, 0x16c5: 0x000a,
+ 0x16c6: 0x000a, 0x16c7: 0x000a, 0x16c8: 0x000a, 0x16c9: 0x000a, 0x16ca: 0x000a, 0x16cb: 0x000a,
+ 0x16cc: 0x000a, 0x16cd: 0x000a, 0x16ce: 0x000a, 0x16cf: 0x000a, 0x16d0: 0x000a, 0x16d1: 0x000a,
+ 0x16d2: 0x000a, 0x16d3: 0x000a, 0x16d4: 0x000a, 0x16d5: 0x000a, 0x16d6: 0x000a, 0x16d7: 0x000a,
+ 0x16d8: 0x000a, 0x16d9: 0x000a, 0x16da: 0x000a, 0x16db: 0x000a, 0x16dc: 0x000a, 0x16dd: 0x000a,
+ 0x16de: 0x000a, 0x16df: 0x000a, 0x16e0: 0x000a, 0x16e1: 0x000a, 0x16e2: 0x000a, 0x16e3: 0x000a,
+ 0x16e4: 0x000a, 0x16e5: 0x000a, 0x16e6: 0x000a,
+ // Block 0x5c, offset 0x1700
+ 0x1700: 0x000a, 0x1701: 0x000a, 0x1702: 0x000a, 0x1703: 0x000a, 0x1704: 0x000a, 0x1705: 0x000a,
+ 0x1706: 0x000a, 0x1707: 0x000a, 0x1708: 0x000a, 0x1709: 0x000a, 0x170a: 0x000a,
+ 0x1720: 0x000a, 0x1721: 0x000a, 0x1722: 0x000a, 0x1723: 0x000a,
+ 0x1724: 0x000a, 0x1725: 0x000a, 0x1726: 0x000a, 0x1727: 0x000a, 0x1728: 0x000a, 0x1729: 0x000a,
+ 0x172a: 0x000a, 0x172b: 0x000a, 0x172c: 0x000a, 0x172d: 0x000a, 0x172e: 0x000a, 0x172f: 0x000a,
+ 0x1730: 0x000a, 0x1731: 0x000a, 0x1732: 0x000a, 0x1733: 0x000a, 0x1734: 0x000a, 0x1735: 0x000a,
+ 0x1736: 0x000a, 0x1737: 0x000a, 0x1738: 0x000a, 0x1739: 0x000a, 0x173a: 0x000a, 0x173b: 0x000a,
+ 0x173c: 0x000a, 0x173d: 0x000a, 0x173e: 0x000a, 0x173f: 0x000a,
+ // Block 0x5d, offset 0x1740
+ 0x1740: 0x000a, 0x1741: 0x000a, 0x1742: 0x000a, 0x1743: 0x000a, 0x1744: 0x000a, 0x1745: 0x000a,
+ 0x1746: 0x000a, 0x1747: 0x000a, 0x1748: 0x0002, 0x1749: 0x0002, 0x174a: 0x0002, 0x174b: 0x0002,
+ 0x174c: 0x0002, 0x174d: 0x0002, 0x174e: 0x0002, 0x174f: 0x0002, 0x1750: 0x0002, 0x1751: 0x0002,
+ 0x1752: 0x0002, 0x1753: 0x0002, 0x1754: 0x0002, 0x1755: 0x0002, 0x1756: 0x0002, 0x1757: 0x0002,
+ 0x1758: 0x0002, 0x1759: 0x0002, 0x175a: 0x0002, 0x175b: 0x0002,
+ // Block 0x5e, offset 0x1780
+ 0x17aa: 0x000a, 0x17ab: 0x000a, 0x17ac: 0x000a, 0x17ad: 0x000a, 0x17ae: 0x000a, 0x17af: 0x000a,
+ 0x17b0: 0x000a, 0x17b1: 0x000a, 0x17b2: 0x000a, 0x17b3: 0x000a, 0x17b4: 0x000a, 0x17b5: 0x000a,
+ 0x17b6: 0x000a, 0x17b7: 0x000a, 0x17b8: 0x000a, 0x17b9: 0x000a, 0x17ba: 0x000a, 0x17bb: 0x000a,
+ 0x17bc: 0x000a, 0x17bd: 0x000a, 0x17be: 0x000a, 0x17bf: 0x000a,
+ // Block 0x5f, offset 0x17c0
+ 0x17c0: 0x000a, 0x17c1: 0x000a, 0x17c2: 0x000a, 0x17c3: 0x000a, 0x17c4: 0x000a, 0x17c5: 0x000a,
+ 0x17c6: 0x000a, 0x17c7: 0x000a, 0x17c8: 0x000a, 0x17c9: 0x000a, 0x17ca: 0x000a, 0x17cb: 0x000a,
+ 0x17cc: 0x000a, 0x17cd: 0x000a, 0x17ce: 0x000a, 0x17cf: 0x000a, 0x17d0: 0x000a, 0x17d1: 0x000a,
+ 0x17d2: 0x000a, 0x17d3: 0x000a, 0x17d4: 0x000a, 0x17d5: 0x000a, 0x17d6: 0x000a, 0x17d7: 0x000a,
+ 0x17d8: 0x000a, 0x17d9: 0x000a, 0x17da: 0x000a, 0x17db: 0x000a, 0x17dc: 0x000a, 0x17dd: 0x000a,
+ 0x17de: 0x000a, 0x17df: 0x000a, 0x17e0: 0x000a, 0x17e1: 0x000a, 0x17e2: 0x000a, 0x17e3: 0x000a,
+ 0x17e4: 0x000a, 0x17e5: 0x000a, 0x17e6: 0x000a, 0x17e7: 0x000a, 0x17e8: 0x000a, 0x17e9: 0x000a,
+ 0x17ea: 0x000a, 0x17eb: 0x000a, 0x17ed: 0x000a, 0x17ee: 0x000a, 0x17ef: 0x000a,
+ 0x17f0: 0x000a, 0x17f1: 0x000a, 0x17f2: 0x000a, 0x17f3: 0x000a, 0x17f4: 0x000a, 0x17f5: 0x000a,
+ 0x17f6: 0x000a, 0x17f7: 0x000a, 0x17f8: 0x000a, 0x17f9: 0x000a, 0x17fa: 0x000a, 0x17fb: 0x000a,
+ 0x17fc: 0x000a, 0x17fd: 0x000a, 0x17fe: 0x000a, 0x17ff: 0x000a,
+ // Block 0x60, offset 0x1800
+ 0x1800: 0x000a, 0x1801: 0x000a, 0x1802: 0x000a, 0x1803: 0x000a, 0x1804: 0x000a, 0x1805: 0x000a,
+ 0x1806: 0x000a, 0x1807: 0x000a, 0x1808: 0x000a, 0x1809: 0x000a, 0x180a: 0x000a, 0x180b: 0x000a,
+ 0x180c: 0x000a, 0x180d: 0x000a, 0x180e: 0x000a, 0x180f: 0x000a, 0x1810: 0x000a, 0x1811: 0x000a,
+ 0x1812: 0x000a, 0x1813: 0x000a, 0x1814: 0x000a, 0x1815: 0x000a, 0x1816: 0x000a, 0x1817: 0x000a,
+ 0x1818: 0x000a, 0x1819: 0x000a, 0x181a: 0x000a, 0x181b: 0x000a, 0x181c: 0x000a, 0x181d: 0x000a,
+ 0x181e: 0x000a, 0x181f: 0x000a, 0x1820: 0x000a, 0x1821: 0x000a, 0x1822: 0x000a, 0x1823: 0x000a,
+ 0x1824: 0x000a, 0x1825: 0x000a, 0x1826: 0x000a, 0x1827: 0x000a, 0x1828: 0x003a, 0x1829: 0x002a,
+ 0x182a: 0x003a, 0x182b: 0x002a, 0x182c: 0x003a, 0x182d: 0x002a, 0x182e: 0x003a, 0x182f: 0x002a,
+ 0x1830: 0x003a, 0x1831: 0x002a, 0x1832: 0x003a, 0x1833: 0x002a, 0x1834: 0x003a, 0x1835: 0x002a,
+ 0x1836: 0x000a, 0x1837: 0x000a, 0x1838: 0x000a, 0x1839: 0x000a, 0x183a: 0x000a, 0x183b: 0x000a,
+ 0x183c: 0x000a, 0x183d: 0x000a, 0x183e: 0x000a, 0x183f: 0x000a,
+ // Block 0x61, offset 0x1840
+ 0x1840: 0x000a, 0x1841: 0x000a, 0x1842: 0x000a, 0x1843: 0x000a, 0x1844: 0x000a, 0x1845: 0x009a,
+ 0x1846: 0x008a, 0x1847: 0x000a, 0x1848: 0x000a, 0x1849: 0x000a, 0x184a: 0x000a, 0x184b: 0x000a,
+ 0x184c: 0x000a, 0x184d: 0x000a, 0x184e: 0x000a, 0x184f: 0x000a, 0x1850: 0x000a, 0x1851: 0x000a,
+ 0x1852: 0x000a, 0x1853: 0x000a, 0x1854: 0x000a, 0x1855: 0x000a, 0x1856: 0x000a, 0x1857: 0x000a,
+ 0x1858: 0x000a, 0x1859: 0x000a, 0x185a: 0x000a, 0x185b: 0x000a, 0x185c: 0x000a, 0x185d: 0x000a,
+ 0x185e: 0x000a, 0x185f: 0x000a, 0x1860: 0x000a, 0x1861: 0x000a, 0x1862: 0x000a, 0x1863: 0x000a,
+ 0x1864: 0x000a, 0x1865: 0x000a, 0x1866: 0x003a, 0x1867: 0x002a, 0x1868: 0x003a, 0x1869: 0x002a,
+ 0x186a: 0x003a, 0x186b: 0x002a, 0x186c: 0x003a, 0x186d: 0x002a, 0x186e: 0x003a, 0x186f: 0x002a,
+ 0x1870: 0x000a, 0x1871: 0x000a, 0x1872: 0x000a, 0x1873: 0x000a, 0x1874: 0x000a, 0x1875: 0x000a,
+ 0x1876: 0x000a, 0x1877: 0x000a, 0x1878: 0x000a, 0x1879: 0x000a, 0x187a: 0x000a, 0x187b: 0x000a,
+ 0x187c: 0x000a, 0x187d: 0x000a, 0x187e: 0x000a, 0x187f: 0x000a,
+ // Block 0x62, offset 0x1880
+ 0x1880: 0x000a, 0x1881: 0x000a, 0x1882: 0x000a, 0x1883: 0x007a, 0x1884: 0x006a, 0x1885: 0x009a,
+ 0x1886: 0x008a, 0x1887: 0x00ba, 0x1888: 0x00aa, 0x1889: 0x009a, 0x188a: 0x008a, 0x188b: 0x007a,
+ 0x188c: 0x006a, 0x188d: 0x00da, 0x188e: 0x002a, 0x188f: 0x003a, 0x1890: 0x00ca, 0x1891: 0x009a,
+ 0x1892: 0x008a, 0x1893: 0x007a, 0x1894: 0x006a, 0x1895: 0x009a, 0x1896: 0x008a, 0x1897: 0x00ba,
+ 0x1898: 0x00aa, 0x1899: 0x000a, 0x189a: 0x000a, 0x189b: 0x000a, 0x189c: 0x000a, 0x189d: 0x000a,
+ 0x189e: 0x000a, 0x189f: 0x000a, 0x18a0: 0x000a, 0x18a1: 0x000a, 0x18a2: 0x000a, 0x18a3: 0x000a,
+ 0x18a4: 0x000a, 0x18a5: 0x000a, 0x18a6: 0x000a, 0x18a7: 0x000a, 0x18a8: 0x000a, 0x18a9: 0x000a,
+ 0x18aa: 0x000a, 0x18ab: 0x000a, 0x18ac: 0x000a, 0x18ad: 0x000a, 0x18ae: 0x000a, 0x18af: 0x000a,
+ 0x18b0: 0x000a, 0x18b1: 0x000a, 0x18b2: 0x000a, 0x18b3: 0x000a, 0x18b4: 0x000a, 0x18b5: 0x000a,
+ 0x18b6: 0x000a, 0x18b7: 0x000a, 0x18b8: 0x000a, 0x18b9: 0x000a, 0x18ba: 0x000a, 0x18bb: 0x000a,
+ 0x18bc: 0x000a, 0x18bd: 0x000a, 0x18be: 0x000a, 0x18bf: 0x000a,
+ // Block 0x63, offset 0x18c0
+ 0x18c0: 0x000a, 0x18c1: 0x000a, 0x18c2: 0x000a, 0x18c3: 0x000a, 0x18c4: 0x000a, 0x18c5: 0x000a,
+ 0x18c6: 0x000a, 0x18c7: 0x000a, 0x18c8: 0x000a, 0x18c9: 0x000a, 0x18ca: 0x000a, 0x18cb: 0x000a,
+ 0x18cc: 0x000a, 0x18cd: 0x000a, 0x18ce: 0x000a, 0x18cf: 0x000a, 0x18d0: 0x000a, 0x18d1: 0x000a,
+ 0x18d2: 0x000a, 0x18d3: 0x000a, 0x18d4: 0x000a, 0x18d5: 0x000a, 0x18d6: 0x000a, 0x18d7: 0x000a,
+ 0x18d8: 0x003a, 0x18d9: 0x002a, 0x18da: 0x003a, 0x18db: 0x002a, 0x18dc: 0x000a, 0x18dd: 0x000a,
+ 0x18de: 0x000a, 0x18df: 0x000a, 0x18e0: 0x000a, 0x18e1: 0x000a, 0x18e2: 0x000a, 0x18e3: 0x000a,
+ 0x18e4: 0x000a, 0x18e5: 0x000a, 0x18e6: 0x000a, 0x18e7: 0x000a, 0x18e8: 0x000a, 0x18e9: 0x000a,
+ 0x18ea: 0x000a, 0x18eb: 0x000a, 0x18ec: 0x000a, 0x18ed: 0x000a, 0x18ee: 0x000a, 0x18ef: 0x000a,
+ 0x18f0: 0x000a, 0x18f1: 0x000a, 0x18f2: 0x000a, 0x18f3: 0x000a, 0x18f4: 0x000a, 0x18f5: 0x000a,
+ 0x18f6: 0x000a, 0x18f7: 0x000a, 0x18f8: 0x000a, 0x18f9: 0x000a, 0x18fa: 0x000a, 0x18fb: 0x000a,
+ 0x18fc: 0x003a, 0x18fd: 0x002a, 0x18fe: 0x000a, 0x18ff: 0x000a,
+ // Block 0x64, offset 0x1900
+ 0x1900: 0x000a, 0x1901: 0x000a, 0x1902: 0x000a, 0x1903: 0x000a, 0x1904: 0x000a, 0x1905: 0x000a,
+ 0x1906: 0x000a, 0x1907: 0x000a, 0x1908: 0x000a, 0x1909: 0x000a, 0x190a: 0x000a, 0x190b: 0x000a,
+ 0x190c: 0x000a, 0x190d: 0x000a, 0x190e: 0x000a, 0x190f: 0x000a, 0x1910: 0x000a, 0x1911: 0x000a,
+ 0x1912: 0x000a, 0x1913: 0x000a, 0x1914: 0x000a, 0x1915: 0x000a, 0x1916: 0x000a, 0x1917: 0x000a,
+ 0x1918: 0x000a, 0x1919: 0x000a, 0x191a: 0x000a, 0x191b: 0x000a, 0x191c: 0x000a, 0x191d: 0x000a,
+ 0x191e: 0x000a, 0x191f: 0x000a, 0x1920: 0x000a, 0x1921: 0x000a, 0x1922: 0x000a, 0x1923: 0x000a,
+ 0x1924: 0x000a, 0x1925: 0x000a, 0x1926: 0x000a, 0x1927: 0x000a, 0x1928: 0x000a, 0x1929: 0x000a,
+ 0x192a: 0x000a, 0x192b: 0x000a, 0x192c: 0x000a, 0x192d: 0x000a, 0x192e: 0x000a, 0x192f: 0x000a,
+ 0x1930: 0x000a, 0x1931: 0x000a, 0x1932: 0x000a, 0x1933: 0x000a,
+ 0x1936: 0x000a, 0x1937: 0x000a, 0x1938: 0x000a, 0x1939: 0x000a, 0x193a: 0x000a, 0x193b: 0x000a,
+ 0x193c: 0x000a, 0x193d: 0x000a, 0x193e: 0x000a, 0x193f: 0x000a,
+ // Block 0x65, offset 0x1940
+ 0x1940: 0x000a, 0x1941: 0x000a, 0x1942: 0x000a, 0x1943: 0x000a, 0x1944: 0x000a, 0x1945: 0x000a,
+ 0x1946: 0x000a, 0x1947: 0x000a, 0x1948: 0x000a, 0x1949: 0x000a, 0x194a: 0x000a, 0x194b: 0x000a,
+ 0x194c: 0x000a, 0x194d: 0x000a, 0x194e: 0x000a, 0x194f: 0x000a, 0x1950: 0x000a, 0x1951: 0x000a,
+ 0x1952: 0x000a, 0x1953: 0x000a, 0x1954: 0x000a, 0x1955: 0x000a,
+ 0x1958: 0x000a, 0x1959: 0x000a, 0x195a: 0x000a, 0x195b: 0x000a, 0x195c: 0x000a, 0x195d: 0x000a,
+ 0x195e: 0x000a, 0x195f: 0x000a, 0x1960: 0x000a, 0x1961: 0x000a, 0x1962: 0x000a, 0x1963: 0x000a,
+ 0x1964: 0x000a, 0x1965: 0x000a, 0x1966: 0x000a, 0x1967: 0x000a, 0x1968: 0x000a, 0x1969: 0x000a,
+ 0x196a: 0x000a, 0x196b: 0x000a, 0x196c: 0x000a, 0x196d: 0x000a, 0x196e: 0x000a, 0x196f: 0x000a,
+ 0x1970: 0x000a, 0x1971: 0x000a, 0x1972: 0x000a, 0x1973: 0x000a, 0x1974: 0x000a, 0x1975: 0x000a,
+ 0x1976: 0x000a, 0x1977: 0x000a, 0x1978: 0x000a, 0x1979: 0x000a, 0x197a: 0x000a, 0x197b: 0x000a,
+ 0x197c: 0x000a, 0x197d: 0x000a, 0x197e: 0x000a, 0x197f: 0x000a,
+ // Block 0x66, offset 0x1980
+ 0x1980: 0x000a, 0x1981: 0x000a, 0x1982: 0x000a, 0x1983: 0x000a, 0x1984: 0x000a, 0x1985: 0x000a,
+ 0x1986: 0x000a, 0x1987: 0x000a, 0x1988: 0x000a, 0x198a: 0x000a, 0x198b: 0x000a,
+ 0x198c: 0x000a, 0x198d: 0x000a, 0x198e: 0x000a, 0x198f: 0x000a, 0x1990: 0x000a, 0x1991: 0x000a,
+ 0x1992: 0x000a, 0x1993: 0x000a, 0x1994: 0x000a, 0x1995: 0x000a, 0x1996: 0x000a, 0x1997: 0x000a,
+ 0x1998: 0x000a, 0x1999: 0x000a, 0x199a: 0x000a, 0x199b: 0x000a, 0x199c: 0x000a, 0x199d: 0x000a,
+ 0x199e: 0x000a, 0x199f: 0x000a, 0x19a0: 0x000a, 0x19a1: 0x000a, 0x19a2: 0x000a, 0x19a3: 0x000a,
+ 0x19a4: 0x000a, 0x19a5: 0x000a, 0x19a6: 0x000a, 0x19a7: 0x000a, 0x19a8: 0x000a, 0x19a9: 0x000a,
+ 0x19aa: 0x000a, 0x19ab: 0x000a, 0x19ac: 0x000a, 0x19ad: 0x000a, 0x19ae: 0x000a, 0x19af: 0x000a,
+ 0x19b0: 0x000a, 0x19b1: 0x000a, 0x19b2: 0x000a, 0x19b3: 0x000a, 0x19b4: 0x000a, 0x19b5: 0x000a,
+ 0x19b6: 0x000a, 0x19b7: 0x000a, 0x19b8: 0x000a, 0x19b9: 0x000a, 0x19ba: 0x000a, 0x19bb: 0x000a,
+ 0x19bc: 0x000a, 0x19bd: 0x000a, 0x19be: 0x000a,
+ // Block 0x67, offset 0x19c0
+ 0x19e5: 0x000a, 0x19e6: 0x000a, 0x19e7: 0x000a, 0x19e8: 0x000a, 0x19e9: 0x000a,
+ 0x19ea: 0x000a, 0x19ef: 0x000c,
+ 0x19f0: 0x000c, 0x19f1: 0x000c,
+ 0x19f9: 0x000a, 0x19fa: 0x000a, 0x19fb: 0x000a,
+ 0x19fc: 0x000a, 0x19fd: 0x000a, 0x19fe: 0x000a, 0x19ff: 0x000a,
+ // Block 0x68, offset 0x1a00
+ 0x1a3f: 0x000c,
+ // Block 0x69, offset 0x1a40
+ 0x1a60: 0x000c, 0x1a61: 0x000c, 0x1a62: 0x000c, 0x1a63: 0x000c,
+ 0x1a64: 0x000c, 0x1a65: 0x000c, 0x1a66: 0x000c, 0x1a67: 0x000c, 0x1a68: 0x000c, 0x1a69: 0x000c,
+ 0x1a6a: 0x000c, 0x1a6b: 0x000c, 0x1a6c: 0x000c, 0x1a6d: 0x000c, 0x1a6e: 0x000c, 0x1a6f: 0x000c,
+ 0x1a70: 0x000c, 0x1a71: 0x000c, 0x1a72: 0x000c, 0x1a73: 0x000c, 0x1a74: 0x000c, 0x1a75: 0x000c,
+ 0x1a76: 0x000c, 0x1a77: 0x000c, 0x1a78: 0x000c, 0x1a79: 0x000c, 0x1a7a: 0x000c, 0x1a7b: 0x000c,
+ 0x1a7c: 0x000c, 0x1a7d: 0x000c, 0x1a7e: 0x000c, 0x1a7f: 0x000c,
+ // Block 0x6a, offset 0x1a80
+ 0x1a80: 0x000a, 0x1a81: 0x000a, 0x1a82: 0x000a, 0x1a83: 0x000a, 0x1a84: 0x000a, 0x1a85: 0x000a,
+ 0x1a86: 0x000a, 0x1a87: 0x000a, 0x1a88: 0x000a, 0x1a89: 0x000a, 0x1a8a: 0x000a, 0x1a8b: 0x000a,
+ 0x1a8c: 0x000a, 0x1a8d: 0x000a, 0x1a8e: 0x000a, 0x1a8f: 0x000a, 0x1a90: 0x000a, 0x1a91: 0x000a,
+ 0x1a92: 0x000a, 0x1a93: 0x000a, 0x1a94: 0x000a, 0x1a95: 0x000a, 0x1a96: 0x000a, 0x1a97: 0x000a,
+ 0x1a98: 0x000a, 0x1a99: 0x000a, 0x1a9a: 0x000a, 0x1a9b: 0x000a, 0x1a9c: 0x000a, 0x1a9d: 0x000a,
+ 0x1a9e: 0x000a, 0x1a9f: 0x000a, 0x1aa0: 0x000a, 0x1aa1: 0x000a, 0x1aa2: 0x003a, 0x1aa3: 0x002a,
+ 0x1aa4: 0x003a, 0x1aa5: 0x002a, 0x1aa6: 0x003a, 0x1aa7: 0x002a, 0x1aa8: 0x003a, 0x1aa9: 0x002a,
+ 0x1aaa: 0x000a, 0x1aab: 0x000a, 0x1aac: 0x000a, 0x1aad: 0x000a, 0x1aae: 0x000a, 0x1aaf: 0x000a,
+ 0x1ab0: 0x000a, 0x1ab1: 0x000a, 0x1ab2: 0x000a, 0x1ab3: 0x000a, 0x1ab4: 0x000a, 0x1ab5: 0x000a,
+ 0x1ab6: 0x000a, 0x1ab7: 0x000a, 0x1ab8: 0x000a, 0x1ab9: 0x000a, 0x1aba: 0x000a, 0x1abb: 0x000a,
+ 0x1abc: 0x000a, 0x1abd: 0x000a, 0x1abe: 0x000a, 0x1abf: 0x000a,
+ // Block 0x6b, offset 0x1ac0
+ 0x1ac0: 0x000a, 0x1ac1: 0x000a, 0x1ac2: 0x000a, 0x1ac3: 0x000a, 0x1ac4: 0x000a, 0x1ac5: 0x000a,
+ 0x1ac6: 0x000a, 0x1ac7: 0x000a, 0x1ac8: 0x000a, 0x1ac9: 0x000a, 0x1aca: 0x000a, 0x1acb: 0x000a,
+ 0x1acc: 0x000a, 0x1acd: 0x000a, 0x1ace: 0x000a,
+ // Block 0x6c, offset 0x1b00
+ 0x1b00: 0x000a, 0x1b01: 0x000a, 0x1b02: 0x000a, 0x1b03: 0x000a, 0x1b04: 0x000a, 0x1b05: 0x000a,
+ 0x1b06: 0x000a, 0x1b07: 0x000a, 0x1b08: 0x000a, 0x1b09: 0x000a, 0x1b0a: 0x000a, 0x1b0b: 0x000a,
+ 0x1b0c: 0x000a, 0x1b0d: 0x000a, 0x1b0e: 0x000a, 0x1b0f: 0x000a, 0x1b10: 0x000a, 0x1b11: 0x000a,
+ 0x1b12: 0x000a, 0x1b13: 0x000a, 0x1b14: 0x000a, 0x1b15: 0x000a, 0x1b16: 0x000a, 0x1b17: 0x000a,
+ 0x1b18: 0x000a, 0x1b19: 0x000a, 0x1b1b: 0x000a, 0x1b1c: 0x000a, 0x1b1d: 0x000a,
+ 0x1b1e: 0x000a, 0x1b1f: 0x000a, 0x1b20: 0x000a, 0x1b21: 0x000a, 0x1b22: 0x000a, 0x1b23: 0x000a,
+ 0x1b24: 0x000a, 0x1b25: 0x000a, 0x1b26: 0x000a, 0x1b27: 0x000a, 0x1b28: 0x000a, 0x1b29: 0x000a,
+ 0x1b2a: 0x000a, 0x1b2b: 0x000a, 0x1b2c: 0x000a, 0x1b2d: 0x000a, 0x1b2e: 0x000a, 0x1b2f: 0x000a,
+ 0x1b30: 0x000a, 0x1b31: 0x000a, 0x1b32: 0x000a, 0x1b33: 0x000a, 0x1b34: 0x000a, 0x1b35: 0x000a,
+ 0x1b36: 0x000a, 0x1b37: 0x000a, 0x1b38: 0x000a, 0x1b39: 0x000a, 0x1b3a: 0x000a, 0x1b3b: 0x000a,
+ 0x1b3c: 0x000a, 0x1b3d: 0x000a, 0x1b3e: 0x000a, 0x1b3f: 0x000a,
+ // Block 0x6d, offset 0x1b40
+ 0x1b40: 0x000a, 0x1b41: 0x000a, 0x1b42: 0x000a, 0x1b43: 0x000a, 0x1b44: 0x000a, 0x1b45: 0x000a,
+ 0x1b46: 0x000a, 0x1b47: 0x000a, 0x1b48: 0x000a, 0x1b49: 0x000a, 0x1b4a: 0x000a, 0x1b4b: 0x000a,
+ 0x1b4c: 0x000a, 0x1b4d: 0x000a, 0x1b4e: 0x000a, 0x1b4f: 0x000a, 0x1b50: 0x000a, 0x1b51: 0x000a,
+ 0x1b52: 0x000a, 0x1b53: 0x000a, 0x1b54: 0x000a, 0x1b55: 0x000a, 0x1b56: 0x000a, 0x1b57: 0x000a,
+ 0x1b58: 0x000a, 0x1b59: 0x000a, 0x1b5a: 0x000a, 0x1b5b: 0x000a, 0x1b5c: 0x000a, 0x1b5d: 0x000a,
+ 0x1b5e: 0x000a, 0x1b5f: 0x000a, 0x1b60: 0x000a, 0x1b61: 0x000a, 0x1b62: 0x000a, 0x1b63: 0x000a,
+ 0x1b64: 0x000a, 0x1b65: 0x000a, 0x1b66: 0x000a, 0x1b67: 0x000a, 0x1b68: 0x000a, 0x1b69: 0x000a,
+ 0x1b6a: 0x000a, 0x1b6b: 0x000a, 0x1b6c: 0x000a, 0x1b6d: 0x000a, 0x1b6e: 0x000a, 0x1b6f: 0x000a,
+ 0x1b70: 0x000a, 0x1b71: 0x000a, 0x1b72: 0x000a, 0x1b73: 0x000a,
+ // Block 0x6e, offset 0x1b80
+ 0x1b80: 0x000a, 0x1b81: 0x000a, 0x1b82: 0x000a, 0x1b83: 0x000a, 0x1b84: 0x000a, 0x1b85: 0x000a,
+ 0x1b86: 0x000a, 0x1b87: 0x000a, 0x1b88: 0x000a, 0x1b89: 0x000a, 0x1b8a: 0x000a, 0x1b8b: 0x000a,
+ 0x1b8c: 0x000a, 0x1b8d: 0x000a, 0x1b8e: 0x000a, 0x1b8f: 0x000a, 0x1b90: 0x000a, 0x1b91: 0x000a,
+ 0x1b92: 0x000a, 0x1b93: 0x000a, 0x1b94: 0x000a, 0x1b95: 0x000a,
+ 0x1bb0: 0x000a, 0x1bb1: 0x000a, 0x1bb2: 0x000a, 0x1bb3: 0x000a, 0x1bb4: 0x000a, 0x1bb5: 0x000a,
+ 0x1bb6: 0x000a, 0x1bb7: 0x000a, 0x1bb8: 0x000a, 0x1bb9: 0x000a, 0x1bba: 0x000a, 0x1bbb: 0x000a,
+ // Block 0x6f, offset 0x1bc0
+ 0x1bc0: 0x0009, 0x1bc1: 0x000a, 0x1bc2: 0x000a, 0x1bc3: 0x000a, 0x1bc4: 0x000a,
+ 0x1bc8: 0x003a, 0x1bc9: 0x002a, 0x1bca: 0x003a, 0x1bcb: 0x002a,
+ 0x1bcc: 0x003a, 0x1bcd: 0x002a, 0x1bce: 0x003a, 0x1bcf: 0x002a, 0x1bd0: 0x003a, 0x1bd1: 0x002a,
+ 0x1bd2: 0x000a, 0x1bd3: 0x000a, 0x1bd4: 0x003a, 0x1bd5: 0x002a, 0x1bd6: 0x003a, 0x1bd7: 0x002a,
+ 0x1bd8: 0x003a, 0x1bd9: 0x002a, 0x1bda: 0x003a, 0x1bdb: 0x002a, 0x1bdc: 0x000a, 0x1bdd: 0x000a,
+ 0x1bde: 0x000a, 0x1bdf: 0x000a, 0x1be0: 0x000a,
+ 0x1bea: 0x000c, 0x1beb: 0x000c, 0x1bec: 0x000c, 0x1bed: 0x000c,
+ 0x1bf0: 0x000a,
+ 0x1bf6: 0x000a, 0x1bf7: 0x000a,
+ 0x1bfd: 0x000a, 0x1bfe: 0x000a, 0x1bff: 0x000a,
+ // Block 0x70, offset 0x1c00
+ 0x1c19: 0x000c, 0x1c1a: 0x000c, 0x1c1b: 0x000a, 0x1c1c: 0x000a,
+ 0x1c20: 0x000a,
+ // Block 0x71, offset 0x1c40
+ 0x1c7b: 0x000a,
+ // Block 0x72, offset 0x1c80
+ 0x1c80: 0x000a, 0x1c81: 0x000a, 0x1c82: 0x000a, 0x1c83: 0x000a, 0x1c84: 0x000a, 0x1c85: 0x000a,
+ 0x1c86: 0x000a, 0x1c87: 0x000a, 0x1c88: 0x000a, 0x1c89: 0x000a, 0x1c8a: 0x000a, 0x1c8b: 0x000a,
+ 0x1c8c: 0x000a, 0x1c8d: 0x000a, 0x1c8e: 0x000a, 0x1c8f: 0x000a, 0x1c90: 0x000a, 0x1c91: 0x000a,
+ 0x1c92: 0x000a, 0x1c93: 0x000a, 0x1c94: 0x000a, 0x1c95: 0x000a, 0x1c96: 0x000a, 0x1c97: 0x000a,
+ 0x1c98: 0x000a, 0x1c99: 0x000a, 0x1c9a: 0x000a, 0x1c9b: 0x000a, 0x1c9c: 0x000a, 0x1c9d: 0x000a,
+ 0x1c9e: 0x000a, 0x1c9f: 0x000a, 0x1ca0: 0x000a, 0x1ca1: 0x000a, 0x1ca2: 0x000a, 0x1ca3: 0x000a,
+ // Block 0x73, offset 0x1cc0
+ 0x1cdd: 0x000a,
+ 0x1cde: 0x000a,
+ // Block 0x74, offset 0x1d00
+ 0x1d10: 0x000a, 0x1d11: 0x000a,
+ 0x1d12: 0x000a, 0x1d13: 0x000a, 0x1d14: 0x000a, 0x1d15: 0x000a, 0x1d16: 0x000a, 0x1d17: 0x000a,
+ 0x1d18: 0x000a, 0x1d19: 0x000a, 0x1d1a: 0x000a, 0x1d1b: 0x000a, 0x1d1c: 0x000a, 0x1d1d: 0x000a,
+ 0x1d1e: 0x000a, 0x1d1f: 0x000a,
+ 0x1d3c: 0x000a, 0x1d3d: 0x000a, 0x1d3e: 0x000a,
+ // Block 0x75, offset 0x1d40
+ 0x1d71: 0x000a, 0x1d72: 0x000a, 0x1d73: 0x000a, 0x1d74: 0x000a, 0x1d75: 0x000a,
+ 0x1d76: 0x000a, 0x1d77: 0x000a, 0x1d78: 0x000a, 0x1d79: 0x000a, 0x1d7a: 0x000a, 0x1d7b: 0x000a,
+ 0x1d7c: 0x000a, 0x1d7d: 0x000a, 0x1d7e: 0x000a, 0x1d7f: 0x000a,
+ // Block 0x76, offset 0x1d80
+ 0x1d8c: 0x000a, 0x1d8d: 0x000a, 0x1d8e: 0x000a, 0x1d8f: 0x000a,
+ // Block 0x77, offset 0x1dc0
+ 0x1df7: 0x000a, 0x1df8: 0x000a, 0x1df9: 0x000a, 0x1dfa: 0x000a,
+ // Block 0x78, offset 0x1e00
+ 0x1e1e: 0x000a, 0x1e1f: 0x000a,
+ 0x1e3f: 0x000a,
+ // Block 0x79, offset 0x1e40
+ 0x1e50: 0x000a, 0x1e51: 0x000a,
+ 0x1e52: 0x000a, 0x1e53: 0x000a, 0x1e54: 0x000a, 0x1e55: 0x000a, 0x1e56: 0x000a, 0x1e57: 0x000a,
+ 0x1e58: 0x000a, 0x1e59: 0x000a, 0x1e5a: 0x000a, 0x1e5b: 0x000a, 0x1e5c: 0x000a, 0x1e5d: 0x000a,
+ 0x1e5e: 0x000a, 0x1e5f: 0x000a, 0x1e60: 0x000a, 0x1e61: 0x000a, 0x1e62: 0x000a, 0x1e63: 0x000a,
+ 0x1e64: 0x000a, 0x1e65: 0x000a, 0x1e66: 0x000a, 0x1e67: 0x000a, 0x1e68: 0x000a, 0x1e69: 0x000a,
+ 0x1e6a: 0x000a, 0x1e6b: 0x000a, 0x1e6c: 0x000a, 0x1e6d: 0x000a, 0x1e6e: 0x000a, 0x1e6f: 0x000a,
+ 0x1e70: 0x000a, 0x1e71: 0x000a, 0x1e72: 0x000a, 0x1e73: 0x000a, 0x1e74: 0x000a, 0x1e75: 0x000a,
+ 0x1e76: 0x000a, 0x1e77: 0x000a, 0x1e78: 0x000a, 0x1e79: 0x000a, 0x1e7a: 0x000a, 0x1e7b: 0x000a,
+ 0x1e7c: 0x000a, 0x1e7d: 0x000a, 0x1e7e: 0x000a, 0x1e7f: 0x000a,
+ // Block 0x7a, offset 0x1e80
+ 0x1e80: 0x000a, 0x1e81: 0x000a, 0x1e82: 0x000a, 0x1e83: 0x000a, 0x1e84: 0x000a, 0x1e85: 0x000a,
+ 0x1e86: 0x000a,
+ // Block 0x7b, offset 0x1ec0
+ 0x1ecd: 0x000a, 0x1ece: 0x000a, 0x1ecf: 0x000a,
+ // Block 0x7c, offset 0x1f00
+ 0x1f2f: 0x000c,
+ 0x1f30: 0x000c, 0x1f31: 0x000c, 0x1f32: 0x000c, 0x1f33: 0x000a, 0x1f34: 0x000c, 0x1f35: 0x000c,
+ 0x1f36: 0x000c, 0x1f37: 0x000c, 0x1f38: 0x000c, 0x1f39: 0x000c, 0x1f3a: 0x000c, 0x1f3b: 0x000c,
+ 0x1f3c: 0x000c, 0x1f3d: 0x000c, 0x1f3e: 0x000a, 0x1f3f: 0x000a,
+ // Block 0x7d, offset 0x1f40
+ 0x1f5e: 0x000c, 0x1f5f: 0x000c,
+ // Block 0x7e, offset 0x1f80
+ 0x1fb0: 0x000c, 0x1fb1: 0x000c,
+ // Block 0x7f, offset 0x1fc0
+ 0x1fc0: 0x000a, 0x1fc1: 0x000a, 0x1fc2: 0x000a, 0x1fc3: 0x000a, 0x1fc4: 0x000a, 0x1fc5: 0x000a,
+ 0x1fc6: 0x000a, 0x1fc7: 0x000a, 0x1fc8: 0x000a, 0x1fc9: 0x000a, 0x1fca: 0x000a, 0x1fcb: 0x000a,
+ 0x1fcc: 0x000a, 0x1fcd: 0x000a, 0x1fce: 0x000a, 0x1fcf: 0x000a, 0x1fd0: 0x000a, 0x1fd1: 0x000a,
+ 0x1fd2: 0x000a, 0x1fd3: 0x000a, 0x1fd4: 0x000a, 0x1fd5: 0x000a, 0x1fd6: 0x000a, 0x1fd7: 0x000a,
+ 0x1fd8: 0x000a, 0x1fd9: 0x000a, 0x1fda: 0x000a, 0x1fdb: 0x000a, 0x1fdc: 0x000a, 0x1fdd: 0x000a,
+ 0x1fde: 0x000a, 0x1fdf: 0x000a, 0x1fe0: 0x000a, 0x1fe1: 0x000a,
+ // Block 0x80, offset 0x2000
+ 0x2008: 0x000a,
+ // Block 0x81, offset 0x2040
+ 0x2042: 0x000c,
+ 0x2046: 0x000c, 0x204b: 0x000c,
+ 0x2065: 0x000c, 0x2066: 0x000c, 0x2068: 0x000a, 0x2069: 0x000a,
+ 0x206a: 0x000a, 0x206b: 0x000a,
+ 0x2078: 0x0004, 0x2079: 0x0004,
+ // Block 0x82, offset 0x2080
+ 0x20b4: 0x000a, 0x20b5: 0x000a,
+ 0x20b6: 0x000a, 0x20b7: 0x000a,
+ // Block 0x83, offset 0x20c0
+ 0x20c4: 0x000c, 0x20c5: 0x000c,
+ 0x20e0: 0x000c, 0x20e1: 0x000c, 0x20e2: 0x000c, 0x20e3: 0x000c,
+ 0x20e4: 0x000c, 0x20e5: 0x000c, 0x20e6: 0x000c, 0x20e7: 0x000c, 0x20e8: 0x000c, 0x20e9: 0x000c,
+ 0x20ea: 0x000c, 0x20eb: 0x000c, 0x20ec: 0x000c, 0x20ed: 0x000c, 0x20ee: 0x000c, 0x20ef: 0x000c,
+ 0x20f0: 0x000c, 0x20f1: 0x000c,
+ 0x20ff: 0x000c,
+ // Block 0x84, offset 0x2100
+ 0x2126: 0x000c, 0x2127: 0x000c, 0x2128: 0x000c, 0x2129: 0x000c,
+ 0x212a: 0x000c, 0x212b: 0x000c, 0x212c: 0x000c, 0x212d: 0x000c,
+ // Block 0x85, offset 0x2140
+ 0x2147: 0x000c, 0x2148: 0x000c, 0x2149: 0x000c, 0x214a: 0x000c, 0x214b: 0x000c,
+ 0x214c: 0x000c, 0x214d: 0x000c, 0x214e: 0x000c, 0x214f: 0x000c, 0x2150: 0x000c, 0x2151: 0x000c,
+ // Block 0x86, offset 0x2180
+ 0x2180: 0x000c, 0x2181: 0x000c, 0x2182: 0x000c,
+ 0x21b3: 0x000c,
+ 0x21b6: 0x000c, 0x21b7: 0x000c, 0x21b8: 0x000c, 0x21b9: 0x000c,
+ 0x21bc: 0x000c,
+ // Block 0x87, offset 0x21c0
+ 0x21e5: 0x000c,
+ // Block 0x88, offset 0x2200
+ 0x2229: 0x000c,
+ 0x222a: 0x000c, 0x222b: 0x000c, 0x222c: 0x000c, 0x222d: 0x000c, 0x222e: 0x000c,
+ 0x2231: 0x000c, 0x2232: 0x000c, 0x2235: 0x000c,
+ 0x2236: 0x000c,
+ // Block 0x89, offset 0x2240
+ 0x2243: 0x000c,
+ 0x224c: 0x000c,
+ 0x227c: 0x000c,
+ // Block 0x8a, offset 0x2280
+ 0x22b0: 0x000c, 0x22b2: 0x000c, 0x22b3: 0x000c, 0x22b4: 0x000c,
+ 0x22b7: 0x000c, 0x22b8: 0x000c,
+ 0x22be: 0x000c, 0x22bf: 0x000c,
+ // Block 0x8b, offset 0x22c0
+ 0x22c1: 0x000c,
+ 0x22ec: 0x000c, 0x22ed: 0x000c,
+ 0x22f6: 0x000c,
+ // Block 0x8c, offset 0x2300
+ 0x2325: 0x000c, 0x2328: 0x000c,
+ 0x232d: 0x000c,
+ // Block 0x8d, offset 0x2340
+ 0x235d: 0x0001,
+ 0x235e: 0x000c, 0x235f: 0x0001, 0x2360: 0x0001, 0x2361: 0x0001, 0x2362: 0x0001, 0x2363: 0x0001,
+ 0x2364: 0x0001, 0x2365: 0x0001, 0x2366: 0x0001, 0x2367: 0x0001, 0x2368: 0x0001, 0x2369: 0x0003,
+ 0x236a: 0x0001, 0x236b: 0x0001, 0x236c: 0x0001, 0x236d: 0x0001, 0x236e: 0x0001, 0x236f: 0x0001,
+ 0x2370: 0x0001, 0x2371: 0x0001, 0x2372: 0x0001, 0x2373: 0x0001, 0x2374: 0x0001, 0x2375: 0x0001,
+ 0x2376: 0x0001, 0x2377: 0x0001, 0x2378: 0x0001, 0x2379: 0x0001, 0x237a: 0x0001, 0x237b: 0x0001,
+ 0x237c: 0x0001, 0x237d: 0x0001, 0x237e: 0x0001, 0x237f: 0x0001,
+ // Block 0x8e, offset 0x2380
+ 0x2380: 0x0001, 0x2381: 0x0001, 0x2382: 0x0001, 0x2383: 0x0001, 0x2384: 0x0001, 0x2385: 0x0001,
+ 0x2386: 0x0001, 0x2387: 0x0001, 0x2388: 0x0001, 0x2389: 0x0001, 0x238a: 0x0001, 0x238b: 0x0001,
+ 0x238c: 0x0001, 0x238d: 0x0001, 0x238e: 0x0001, 0x238f: 0x0001, 0x2390: 0x000d, 0x2391: 0x000d,
+ 0x2392: 0x000d, 0x2393: 0x000d, 0x2394: 0x000d, 0x2395: 0x000d, 0x2396: 0x000d, 0x2397: 0x000d,
+ 0x2398: 0x000d, 0x2399: 0x000d, 0x239a: 0x000d, 0x239b: 0x000d, 0x239c: 0x000d, 0x239d: 0x000d,
+ 0x239e: 0x000d, 0x239f: 0x000d, 0x23a0: 0x000d, 0x23a1: 0x000d, 0x23a2: 0x000d, 0x23a3: 0x000d,
+ 0x23a4: 0x000d, 0x23a5: 0x000d, 0x23a6: 0x000d, 0x23a7: 0x000d, 0x23a8: 0x000d, 0x23a9: 0x000d,
+ 0x23aa: 0x000d, 0x23ab: 0x000d, 0x23ac: 0x000d, 0x23ad: 0x000d, 0x23ae: 0x000d, 0x23af: 0x000d,
+ 0x23b0: 0x000d, 0x23b1: 0x000d, 0x23b2: 0x000d, 0x23b3: 0x000d, 0x23b4: 0x000d, 0x23b5: 0x000d,
+ 0x23b6: 0x000d, 0x23b7: 0x000d, 0x23b8: 0x000d, 0x23b9: 0x000d, 0x23ba: 0x000d, 0x23bb: 0x000d,
+ 0x23bc: 0x000d, 0x23bd: 0x000d, 0x23be: 0x000d, 0x23bf: 0x000d,
+ // Block 0x8f, offset 0x23c0
+ 0x23c0: 0x000d, 0x23c1: 0x000d, 0x23c2: 0x000d, 0x23c3: 0x000d, 0x23c4: 0x000d, 0x23c5: 0x000d,
+ 0x23c6: 0x000d, 0x23c7: 0x000d, 0x23c8: 0x000d, 0x23c9: 0x000d, 0x23ca: 0x000d, 0x23cb: 0x000d,
+ 0x23cc: 0x000d, 0x23cd: 0x000d, 0x23ce: 0x000d, 0x23cf: 0x000d, 0x23d0: 0x000d, 0x23d1: 0x000d,
+ 0x23d2: 0x000d, 0x23d3: 0x000d, 0x23d4: 0x000d, 0x23d5: 0x000d, 0x23d6: 0x000d, 0x23d7: 0x000d,
+ 0x23d8: 0x000d, 0x23d9: 0x000d, 0x23da: 0x000d, 0x23db: 0x000d, 0x23dc: 0x000d, 0x23dd: 0x000d,
+ 0x23de: 0x000d, 0x23df: 0x000d, 0x23e0: 0x000d, 0x23e1: 0x000d, 0x23e2: 0x000d, 0x23e3: 0x000d,
+ 0x23e4: 0x000d, 0x23e5: 0x000d, 0x23e6: 0x000d, 0x23e7: 0x000d, 0x23e8: 0x000d, 0x23e9: 0x000d,
+ 0x23ea: 0x000d, 0x23eb: 0x000d, 0x23ec: 0x000d, 0x23ed: 0x000d, 0x23ee: 0x000d, 0x23ef: 0x000d,
+ 0x23f0: 0x000d, 0x23f1: 0x000d, 0x23f2: 0x000d, 0x23f3: 0x000d, 0x23f4: 0x000d, 0x23f5: 0x000d,
+ 0x23f6: 0x000d, 0x23f7: 0x000d, 0x23f8: 0x000d, 0x23f9: 0x000d, 0x23fa: 0x000d, 0x23fb: 0x000d,
+ 0x23fc: 0x000d, 0x23fd: 0x000d, 0x23fe: 0x000a, 0x23ff: 0x000a,
+ // Block 0x90, offset 0x2400
+ 0x2400: 0x000d, 0x2401: 0x000d, 0x2402: 0x000d, 0x2403: 0x000d, 0x2404: 0x000d, 0x2405: 0x000d,
+ 0x2406: 0x000d, 0x2407: 0x000d, 0x2408: 0x000d, 0x2409: 0x000d, 0x240a: 0x000d, 0x240b: 0x000d,
+ 0x240c: 0x000d, 0x240d: 0x000d, 0x240e: 0x000d, 0x240f: 0x000d, 0x2410: 0x000b, 0x2411: 0x000b,
+ 0x2412: 0x000b, 0x2413: 0x000b, 0x2414: 0x000b, 0x2415: 0x000b, 0x2416: 0x000b, 0x2417: 0x000b,
+ 0x2418: 0x000b, 0x2419: 0x000b, 0x241a: 0x000b, 0x241b: 0x000b, 0x241c: 0x000b, 0x241d: 0x000b,
+ 0x241e: 0x000b, 0x241f: 0x000b, 0x2420: 0x000b, 0x2421: 0x000b, 0x2422: 0x000b, 0x2423: 0x000b,
+ 0x2424: 0x000b, 0x2425: 0x000b, 0x2426: 0x000b, 0x2427: 0x000b, 0x2428: 0x000b, 0x2429: 0x000b,
+ 0x242a: 0x000b, 0x242b: 0x000b, 0x242c: 0x000b, 0x242d: 0x000b, 0x242e: 0x000b, 0x242f: 0x000b,
+ 0x2430: 0x000d, 0x2431: 0x000d, 0x2432: 0x000d, 0x2433: 0x000d, 0x2434: 0x000d, 0x2435: 0x000d,
+ 0x2436: 0x000d, 0x2437: 0x000d, 0x2438: 0x000d, 0x2439: 0x000d, 0x243a: 0x000d, 0x243b: 0x000d,
+ 0x243c: 0x000d, 0x243d: 0x000a, 0x243e: 0x000d, 0x243f: 0x000d,
+ // Block 0x91, offset 0x2440
+ 0x2440: 0x000c, 0x2441: 0x000c, 0x2442: 0x000c, 0x2443: 0x000c, 0x2444: 0x000c, 0x2445: 0x000c,
+ 0x2446: 0x000c, 0x2447: 0x000c, 0x2448: 0x000c, 0x2449: 0x000c, 0x244a: 0x000c, 0x244b: 0x000c,
+ 0x244c: 0x000c, 0x244d: 0x000c, 0x244e: 0x000c, 0x244f: 0x000c, 0x2450: 0x000a, 0x2451: 0x000a,
+ 0x2452: 0x000a, 0x2453: 0x000a, 0x2454: 0x000a, 0x2455: 0x000a, 0x2456: 0x000a, 0x2457: 0x000a,
+ 0x2458: 0x000a, 0x2459: 0x000a,
+ 0x2460: 0x000c, 0x2461: 0x000c, 0x2462: 0x000c, 0x2463: 0x000c,
+ 0x2464: 0x000c, 0x2465: 0x000c, 0x2466: 0x000c, 0x2467: 0x000c, 0x2468: 0x000c, 0x2469: 0x000c,
+ 0x246a: 0x000c, 0x246b: 0x000c, 0x246c: 0x000c, 0x246d: 0x000c, 0x246e: 0x000c, 0x246f: 0x000c,
+ 0x2470: 0x000a, 0x2471: 0x000a, 0x2472: 0x000a, 0x2473: 0x000a, 0x2474: 0x000a, 0x2475: 0x000a,
+ 0x2476: 0x000a, 0x2477: 0x000a, 0x2478: 0x000a, 0x2479: 0x000a, 0x247a: 0x000a, 0x247b: 0x000a,
+ 0x247c: 0x000a, 0x247d: 0x000a, 0x247e: 0x000a, 0x247f: 0x000a,
+ // Block 0x92, offset 0x2480
+ 0x2480: 0x000a, 0x2481: 0x000a, 0x2482: 0x000a, 0x2483: 0x000a, 0x2484: 0x000a, 0x2485: 0x000a,
+ 0x2486: 0x000a, 0x2487: 0x000a, 0x2488: 0x000a, 0x2489: 0x000a, 0x248a: 0x000a, 0x248b: 0x000a,
+ 0x248c: 0x000a, 0x248d: 0x000a, 0x248e: 0x000a, 0x248f: 0x000a, 0x2490: 0x0006, 0x2491: 0x000a,
+ 0x2492: 0x0006, 0x2494: 0x000a, 0x2495: 0x0006, 0x2496: 0x000a, 0x2497: 0x000a,
+ 0x2498: 0x000a, 0x2499: 0x009a, 0x249a: 0x008a, 0x249b: 0x007a, 0x249c: 0x006a, 0x249d: 0x009a,
+ 0x249e: 0x008a, 0x249f: 0x0004, 0x24a0: 0x000a, 0x24a1: 0x000a, 0x24a2: 0x0003, 0x24a3: 0x0003,
+ 0x24a4: 0x000a, 0x24a5: 0x000a, 0x24a6: 0x000a, 0x24a8: 0x000a, 0x24a9: 0x0004,
+ 0x24aa: 0x0004, 0x24ab: 0x000a,
+ 0x24b0: 0x000d, 0x24b1: 0x000d, 0x24b2: 0x000d, 0x24b3: 0x000d, 0x24b4: 0x000d, 0x24b5: 0x000d,
+ 0x24b6: 0x000d, 0x24b7: 0x000d, 0x24b8: 0x000d, 0x24b9: 0x000d, 0x24ba: 0x000d, 0x24bb: 0x000d,
+ 0x24bc: 0x000d, 0x24bd: 0x000d, 0x24be: 0x000d, 0x24bf: 0x000d,
+ // Block 0x93, offset 0x24c0
+ 0x24c0: 0x000d, 0x24c1: 0x000d, 0x24c2: 0x000d, 0x24c3: 0x000d, 0x24c4: 0x000d, 0x24c5: 0x000d,
+ 0x24c6: 0x000d, 0x24c7: 0x000d, 0x24c8: 0x000d, 0x24c9: 0x000d, 0x24ca: 0x000d, 0x24cb: 0x000d,
+ 0x24cc: 0x000d, 0x24cd: 0x000d, 0x24ce: 0x000d, 0x24cf: 0x000d, 0x24d0: 0x000d, 0x24d1: 0x000d,
+ 0x24d2: 0x000d, 0x24d3: 0x000d, 0x24d4: 0x000d, 0x24d5: 0x000d, 0x24d6: 0x000d, 0x24d7: 0x000d,
+ 0x24d8: 0x000d, 0x24d9: 0x000d, 0x24da: 0x000d, 0x24db: 0x000d, 0x24dc: 0x000d, 0x24dd: 0x000d,
+ 0x24de: 0x000d, 0x24df: 0x000d, 0x24e0: 0x000d, 0x24e1: 0x000d, 0x24e2: 0x000d, 0x24e3: 0x000d,
+ 0x24e4: 0x000d, 0x24e5: 0x000d, 0x24e6: 0x000d, 0x24e7: 0x000d, 0x24e8: 0x000d, 0x24e9: 0x000d,
+ 0x24ea: 0x000d, 0x24eb: 0x000d, 0x24ec: 0x000d, 0x24ed: 0x000d, 0x24ee: 0x000d, 0x24ef: 0x000d,
+ 0x24f0: 0x000d, 0x24f1: 0x000d, 0x24f2: 0x000d, 0x24f3: 0x000d, 0x24f4: 0x000d, 0x24f5: 0x000d,
+ 0x24f6: 0x000d, 0x24f7: 0x000d, 0x24f8: 0x000d, 0x24f9: 0x000d, 0x24fa: 0x000d, 0x24fb: 0x000d,
+ 0x24fc: 0x000d, 0x24fd: 0x000d, 0x24fe: 0x000d, 0x24ff: 0x000b,
+ // Block 0x94, offset 0x2500
+ 0x2501: 0x000a, 0x2502: 0x000a, 0x2503: 0x0004, 0x2504: 0x0004, 0x2505: 0x0004,
+ 0x2506: 0x000a, 0x2507: 0x000a, 0x2508: 0x003a, 0x2509: 0x002a, 0x250a: 0x000a, 0x250b: 0x0003,
+ 0x250c: 0x0006, 0x250d: 0x0003, 0x250e: 0x0006, 0x250f: 0x0006, 0x2510: 0x0002, 0x2511: 0x0002,
+ 0x2512: 0x0002, 0x2513: 0x0002, 0x2514: 0x0002, 0x2515: 0x0002, 0x2516: 0x0002, 0x2517: 0x0002,
+ 0x2518: 0x0002, 0x2519: 0x0002, 0x251a: 0x0006, 0x251b: 0x000a, 0x251c: 0x000a, 0x251d: 0x000a,
+ 0x251e: 0x000a, 0x251f: 0x000a, 0x2520: 0x000a,
+ 0x253b: 0x005a,
+ 0x253c: 0x000a, 0x253d: 0x004a, 0x253e: 0x000a, 0x253f: 0x000a,
+ // Block 0x95, offset 0x2540
+ 0x2540: 0x000a,
+ 0x255b: 0x005a, 0x255c: 0x000a, 0x255d: 0x004a,
+ 0x255e: 0x000a, 0x255f: 0x00fa, 0x2560: 0x00ea, 0x2561: 0x000a, 0x2562: 0x003a, 0x2563: 0x002a,
+ 0x2564: 0x000a, 0x2565: 0x000a,
+ // Block 0x96, offset 0x2580
+ 0x25a0: 0x0004, 0x25a1: 0x0004, 0x25a2: 0x000a, 0x25a3: 0x000a,
+ 0x25a4: 0x000a, 0x25a5: 0x0004, 0x25a6: 0x0004, 0x25a8: 0x000a, 0x25a9: 0x000a,
+ 0x25aa: 0x000a, 0x25ab: 0x000a, 0x25ac: 0x000a, 0x25ad: 0x000a, 0x25ae: 0x000a,
+ 0x25b0: 0x000b, 0x25b1: 0x000b, 0x25b2: 0x000b, 0x25b3: 0x000b, 0x25b4: 0x000b, 0x25b5: 0x000b,
+ 0x25b6: 0x000b, 0x25b7: 0x000b, 0x25b8: 0x000b, 0x25b9: 0x000a, 0x25ba: 0x000a, 0x25bb: 0x000a,
+ 0x25bc: 0x000a, 0x25bd: 0x000a, 0x25be: 0x000b, 0x25bf: 0x000b,
+ // Block 0x97, offset 0x25c0
+ 0x25c1: 0x000a,
+ // Block 0x98, offset 0x2600
+ 0x2600: 0x000a, 0x2601: 0x000a, 0x2602: 0x000a, 0x2603: 0x000a, 0x2604: 0x000a, 0x2605: 0x000a,
+ 0x2606: 0x000a, 0x2607: 0x000a, 0x2608: 0x000a, 0x2609: 0x000a, 0x260a: 0x000a, 0x260b: 0x000a,
+ 0x260c: 0x000a, 0x2610: 0x000a, 0x2611: 0x000a,
+ 0x2612: 0x000a, 0x2613: 0x000a, 0x2614: 0x000a, 0x2615: 0x000a, 0x2616: 0x000a, 0x2617: 0x000a,
+ 0x2618: 0x000a, 0x2619: 0x000a, 0x261a: 0x000a, 0x261b: 0x000a,
+ 0x2620: 0x000a,
+ // Block 0x99, offset 0x2640
+ 0x267d: 0x000c,
+ // Block 0x9a, offset 0x2680
+ 0x26a0: 0x000c, 0x26a1: 0x0002, 0x26a2: 0x0002, 0x26a3: 0x0002,
+ 0x26a4: 0x0002, 0x26a5: 0x0002, 0x26a6: 0x0002, 0x26a7: 0x0002, 0x26a8: 0x0002, 0x26a9: 0x0002,
+ 0x26aa: 0x0002, 0x26ab: 0x0002, 0x26ac: 0x0002, 0x26ad: 0x0002, 0x26ae: 0x0002, 0x26af: 0x0002,
+ 0x26b0: 0x0002, 0x26b1: 0x0002, 0x26b2: 0x0002, 0x26b3: 0x0002, 0x26b4: 0x0002, 0x26b5: 0x0002,
+ 0x26b6: 0x0002, 0x26b7: 0x0002, 0x26b8: 0x0002, 0x26b9: 0x0002, 0x26ba: 0x0002, 0x26bb: 0x0002,
+ // Block 0x9b, offset 0x26c0
+ 0x26f6: 0x000c, 0x26f7: 0x000c, 0x26f8: 0x000c, 0x26f9: 0x000c, 0x26fa: 0x000c,
+ // Block 0x9c, offset 0x2700
+ 0x2700: 0x0001, 0x2701: 0x0001, 0x2702: 0x0001, 0x2703: 0x0001, 0x2704: 0x0001, 0x2705: 0x0001,
+ 0x2706: 0x0001, 0x2707: 0x0001, 0x2708: 0x0001, 0x2709: 0x0001, 0x270a: 0x0001, 0x270b: 0x0001,
+ 0x270c: 0x0001, 0x270d: 0x0001, 0x270e: 0x0001, 0x270f: 0x0001, 0x2710: 0x0001, 0x2711: 0x0001,
+ 0x2712: 0x0001, 0x2713: 0x0001, 0x2714: 0x0001, 0x2715: 0x0001, 0x2716: 0x0001, 0x2717: 0x0001,
+ 0x2718: 0x0001, 0x2719: 0x0001, 0x271a: 0x0001, 0x271b: 0x0001, 0x271c: 0x0001, 0x271d: 0x0001,
+ 0x271e: 0x0001, 0x271f: 0x0001, 0x2720: 0x0001, 0x2721: 0x0001, 0x2722: 0x0001, 0x2723: 0x0001,
+ 0x2724: 0x0001, 0x2725: 0x0001, 0x2726: 0x0001, 0x2727: 0x0001, 0x2728: 0x0001, 0x2729: 0x0001,
+ 0x272a: 0x0001, 0x272b: 0x0001, 0x272c: 0x0001, 0x272d: 0x0001, 0x272e: 0x0001, 0x272f: 0x0001,
+ 0x2730: 0x0001, 0x2731: 0x0001, 0x2732: 0x0001, 0x2733: 0x0001, 0x2734: 0x0001, 0x2735: 0x0001,
+ 0x2736: 0x0001, 0x2737: 0x0001, 0x2738: 0x0001, 0x2739: 0x0001, 0x273a: 0x0001, 0x273b: 0x0001,
+ 0x273c: 0x0001, 0x273d: 0x0001, 0x273e: 0x0001, 0x273f: 0x0001,
+ // Block 0x9d, offset 0x2740
+ 0x2740: 0x0001, 0x2741: 0x0001, 0x2742: 0x0001, 0x2743: 0x0001, 0x2744: 0x0001, 0x2745: 0x0001,
+ 0x2746: 0x0001, 0x2747: 0x0001, 0x2748: 0x0001, 0x2749: 0x0001, 0x274a: 0x0001, 0x274b: 0x0001,
+ 0x274c: 0x0001, 0x274d: 0x0001, 0x274e: 0x0001, 0x274f: 0x0001, 0x2750: 0x0001, 0x2751: 0x0001,
+ 0x2752: 0x0001, 0x2753: 0x0001, 0x2754: 0x0001, 0x2755: 0x0001, 0x2756: 0x0001, 0x2757: 0x0001,
+ 0x2758: 0x0001, 0x2759: 0x0001, 0x275a: 0x0001, 0x275b: 0x0001, 0x275c: 0x0001, 0x275d: 0x0001,
+ 0x275e: 0x0001, 0x275f: 0x000a, 0x2760: 0x0001, 0x2761: 0x0001, 0x2762: 0x0001, 0x2763: 0x0001,
+ 0x2764: 0x0001, 0x2765: 0x0001, 0x2766: 0x0001, 0x2767: 0x0001, 0x2768: 0x0001, 0x2769: 0x0001,
+ 0x276a: 0x0001, 0x276b: 0x0001, 0x276c: 0x0001, 0x276d: 0x0001, 0x276e: 0x0001, 0x276f: 0x0001,
+ 0x2770: 0x0001, 0x2771: 0x0001, 0x2772: 0x0001, 0x2773: 0x0001, 0x2774: 0x0001, 0x2775: 0x0001,
+ 0x2776: 0x0001, 0x2777: 0x0001, 0x2778: 0x0001, 0x2779: 0x0001, 0x277a: 0x0001, 0x277b: 0x0001,
+ 0x277c: 0x0001, 0x277d: 0x0001, 0x277e: 0x0001, 0x277f: 0x0001,
+ // Block 0x9e, offset 0x2780
+ 0x2780: 0x0001, 0x2781: 0x000c, 0x2782: 0x000c, 0x2783: 0x000c, 0x2784: 0x0001, 0x2785: 0x000c,
+ 0x2786: 0x000c, 0x2787: 0x0001, 0x2788: 0x0001, 0x2789: 0x0001, 0x278a: 0x0001, 0x278b: 0x0001,
+ 0x278c: 0x000c, 0x278d: 0x000c, 0x278e: 0x000c, 0x278f: 0x000c, 0x2790: 0x0001, 0x2791: 0x0001,
+ 0x2792: 0x0001, 0x2793: 0x0001, 0x2794: 0x0001, 0x2795: 0x0001, 0x2796: 0x0001, 0x2797: 0x0001,
+ 0x2798: 0x0001, 0x2799: 0x0001, 0x279a: 0x0001, 0x279b: 0x0001, 0x279c: 0x0001, 0x279d: 0x0001,
+ 0x279e: 0x0001, 0x279f: 0x0001, 0x27a0: 0x0001, 0x27a1: 0x0001, 0x27a2: 0x0001, 0x27a3: 0x0001,
+ 0x27a4: 0x0001, 0x27a5: 0x0001, 0x27a6: 0x0001, 0x27a7: 0x0001, 0x27a8: 0x0001, 0x27a9: 0x0001,
+ 0x27aa: 0x0001, 0x27ab: 0x0001, 0x27ac: 0x0001, 0x27ad: 0x0001, 0x27ae: 0x0001, 0x27af: 0x0001,
+ 0x27b0: 0x0001, 0x27b1: 0x0001, 0x27b2: 0x0001, 0x27b3: 0x0001, 0x27b4: 0x0001, 0x27b5: 0x0001,
+ 0x27b6: 0x0001, 0x27b7: 0x0001, 0x27b8: 0x000c, 0x27b9: 0x000c, 0x27ba: 0x000c, 0x27bb: 0x0001,
+ 0x27bc: 0x0001, 0x27bd: 0x0001, 0x27be: 0x0001, 0x27bf: 0x000c,
+ // Block 0x9f, offset 0x27c0
+ 0x27c0: 0x0001, 0x27c1: 0x0001, 0x27c2: 0x0001, 0x27c3: 0x0001, 0x27c4: 0x0001, 0x27c5: 0x0001,
+ 0x27c6: 0x0001, 0x27c7: 0x0001, 0x27c8: 0x0001, 0x27c9: 0x0001, 0x27ca: 0x0001, 0x27cb: 0x0001,
+ 0x27cc: 0x0001, 0x27cd: 0x0001, 0x27ce: 0x0001, 0x27cf: 0x0001, 0x27d0: 0x0001, 0x27d1: 0x0001,
+ 0x27d2: 0x0001, 0x27d3: 0x0001, 0x27d4: 0x0001, 0x27d5: 0x0001, 0x27d6: 0x0001, 0x27d7: 0x0001,
+ 0x27d8: 0x0001, 0x27d9: 0x0001, 0x27da: 0x0001, 0x27db: 0x0001, 0x27dc: 0x0001, 0x27dd: 0x0001,
+ 0x27de: 0x0001, 0x27df: 0x0001, 0x27e0: 0x0001, 0x27e1: 0x0001, 0x27e2: 0x0001, 0x27e3: 0x0001,
+ 0x27e4: 0x0001, 0x27e5: 0x000c, 0x27e6: 0x000c, 0x27e7: 0x0001, 0x27e8: 0x0001, 0x27e9: 0x0001,
+ 0x27ea: 0x0001, 0x27eb: 0x0001, 0x27ec: 0x0001, 0x27ed: 0x0001, 0x27ee: 0x0001, 0x27ef: 0x0001,
+ 0x27f0: 0x0001, 0x27f1: 0x0001, 0x27f2: 0x0001, 0x27f3: 0x0001, 0x27f4: 0x0001, 0x27f5: 0x0001,
+ 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x0001, 0x27f9: 0x0001, 0x27fa: 0x0001, 0x27fb: 0x0001,
+ 0x27fc: 0x0001, 0x27fd: 0x0001, 0x27fe: 0x0001, 0x27ff: 0x0001,
+ // Block 0xa0, offset 0x2800
+ 0x2800: 0x0001, 0x2801: 0x0001, 0x2802: 0x0001, 0x2803: 0x0001, 0x2804: 0x0001, 0x2805: 0x0001,
+ 0x2806: 0x0001, 0x2807: 0x0001, 0x2808: 0x0001, 0x2809: 0x0001, 0x280a: 0x0001, 0x280b: 0x0001,
+ 0x280c: 0x0001, 0x280d: 0x0001, 0x280e: 0x0001, 0x280f: 0x0001, 0x2810: 0x0001, 0x2811: 0x0001,
+ 0x2812: 0x0001, 0x2813: 0x0001, 0x2814: 0x0001, 0x2815: 0x0001, 0x2816: 0x0001, 0x2817: 0x0001,
+ 0x2818: 0x0001, 0x2819: 0x0001, 0x281a: 0x0001, 0x281b: 0x0001, 0x281c: 0x0001, 0x281d: 0x0001,
+ 0x281e: 0x0001, 0x281f: 0x0001, 0x2820: 0x0001, 0x2821: 0x0001, 0x2822: 0x0001, 0x2823: 0x0001,
+ 0x2824: 0x0001, 0x2825: 0x0001, 0x2826: 0x0001, 0x2827: 0x0001, 0x2828: 0x0001, 0x2829: 0x0001,
+ 0x282a: 0x0001, 0x282b: 0x0001, 0x282c: 0x0001, 0x282d: 0x0001, 0x282e: 0x0001, 0x282f: 0x0001,
+ 0x2830: 0x0001, 0x2831: 0x0001, 0x2832: 0x0001, 0x2833: 0x0001, 0x2834: 0x0001, 0x2835: 0x0001,
+ 0x2836: 0x0001, 0x2837: 0x0001, 0x2838: 0x0001, 0x2839: 0x000a, 0x283a: 0x000a, 0x283b: 0x000a,
+ 0x283c: 0x000a, 0x283d: 0x000a, 0x283e: 0x000a, 0x283f: 0x000a,
+ // Block 0xa1, offset 0x2840
+ 0x2840: 0x000d, 0x2841: 0x000d, 0x2842: 0x000d, 0x2843: 0x000d, 0x2844: 0x000d, 0x2845: 0x000d,
+ 0x2846: 0x000d, 0x2847: 0x000d, 0x2848: 0x000d, 0x2849: 0x000d, 0x284a: 0x000d, 0x284b: 0x000d,
+ 0x284c: 0x000d, 0x284d: 0x000d, 0x284e: 0x000d, 0x284f: 0x000d, 0x2850: 0x000d, 0x2851: 0x000d,
+ 0x2852: 0x000d, 0x2853: 0x000d, 0x2854: 0x000d, 0x2855: 0x000d, 0x2856: 0x000d, 0x2857: 0x000d,
+ 0x2858: 0x000d, 0x2859: 0x000d, 0x285a: 0x000d, 0x285b: 0x000d, 0x285c: 0x000d, 0x285d: 0x000d,
+ 0x285e: 0x000d, 0x285f: 0x000d, 0x2860: 0x000d, 0x2861: 0x000d, 0x2862: 0x000d, 0x2863: 0x000d,
+ 0x2864: 0x000c, 0x2865: 0x000c, 0x2866: 0x000c, 0x2867: 0x000c, 0x2868: 0x000d, 0x2869: 0x000d,
+ 0x286a: 0x000d, 0x286b: 0x000d, 0x286c: 0x000d, 0x286d: 0x000d, 0x286e: 0x000d, 0x286f: 0x000d,
+ 0x2870: 0x0005, 0x2871: 0x0005, 0x2872: 0x0005, 0x2873: 0x0005, 0x2874: 0x0005, 0x2875: 0x0005,
+ 0x2876: 0x0005, 0x2877: 0x0005, 0x2878: 0x0005, 0x2879: 0x0005, 0x287a: 0x000d, 0x287b: 0x000d,
+ 0x287c: 0x000d, 0x287d: 0x000d, 0x287e: 0x000d, 0x287f: 0x000d,
+ // Block 0xa2, offset 0x2880
+ 0x2880: 0x0001, 0x2881: 0x0001, 0x2882: 0x0001, 0x2883: 0x0001, 0x2884: 0x0001, 0x2885: 0x0001,
+ 0x2886: 0x0001, 0x2887: 0x0001, 0x2888: 0x0001, 0x2889: 0x0001, 0x288a: 0x0001, 0x288b: 0x0001,
+ 0x288c: 0x0001, 0x288d: 0x0001, 0x288e: 0x0001, 0x288f: 0x0001, 0x2890: 0x0001, 0x2891: 0x0001,
+ 0x2892: 0x0001, 0x2893: 0x0001, 0x2894: 0x0001, 0x2895: 0x0001, 0x2896: 0x0001, 0x2897: 0x0001,
+ 0x2898: 0x0001, 0x2899: 0x0001, 0x289a: 0x0001, 0x289b: 0x0001, 0x289c: 0x0001, 0x289d: 0x0001,
+ 0x289e: 0x0001, 0x289f: 0x0001, 0x28a0: 0x0005, 0x28a1: 0x0005, 0x28a2: 0x0005, 0x28a3: 0x0005,
+ 0x28a4: 0x0005, 0x28a5: 0x0005, 0x28a6: 0x0005, 0x28a7: 0x0005, 0x28a8: 0x0005, 0x28a9: 0x0005,
+ 0x28aa: 0x0005, 0x28ab: 0x0005, 0x28ac: 0x0005, 0x28ad: 0x0005, 0x28ae: 0x0005, 0x28af: 0x0005,
+ 0x28b0: 0x0005, 0x28b1: 0x0005, 0x28b2: 0x0005, 0x28b3: 0x0005, 0x28b4: 0x0005, 0x28b5: 0x0005,
+ 0x28b6: 0x0005, 0x28b7: 0x0005, 0x28b8: 0x0005, 0x28b9: 0x0005, 0x28ba: 0x0005, 0x28bb: 0x0005,
+ 0x28bc: 0x0005, 0x28bd: 0x0005, 0x28be: 0x0005, 0x28bf: 0x0001,
+ // Block 0xa3, offset 0x28c0
+ 0x28c0: 0x0001, 0x28c1: 0x0001, 0x28c2: 0x0001, 0x28c3: 0x0001, 0x28c4: 0x0001, 0x28c5: 0x0001,
+ 0x28c6: 0x0001, 0x28c7: 0x0001, 0x28c8: 0x0001, 0x28c9: 0x0001, 0x28ca: 0x0001, 0x28cb: 0x0001,
+ 0x28cc: 0x0001, 0x28cd: 0x0001, 0x28ce: 0x0001, 0x28cf: 0x0001, 0x28d0: 0x0001, 0x28d1: 0x0001,
+ 0x28d2: 0x0001, 0x28d3: 0x0001, 0x28d4: 0x0001, 0x28d5: 0x0001, 0x28d6: 0x0001, 0x28d7: 0x0001,
+ 0x28d8: 0x0001, 0x28d9: 0x0001, 0x28da: 0x0001, 0x28db: 0x0001, 0x28dc: 0x0001, 0x28dd: 0x0001,
+ 0x28de: 0x0001, 0x28df: 0x0001, 0x28e0: 0x0001, 0x28e1: 0x0001, 0x28e2: 0x0001, 0x28e3: 0x0001,
+ 0x28e4: 0x0001, 0x28e5: 0x0001, 0x28e6: 0x0001, 0x28e7: 0x0001, 0x28e8: 0x0001, 0x28e9: 0x0001,
+ 0x28ea: 0x0001, 0x28eb: 0x0001, 0x28ec: 0x0001, 0x28ed: 0x0001, 0x28ee: 0x0001, 0x28ef: 0x0001,
+ 0x28f0: 0x000d, 0x28f1: 0x000d, 0x28f2: 0x000d, 0x28f3: 0x000d, 0x28f4: 0x000d, 0x28f5: 0x000d,
+ 0x28f6: 0x000d, 0x28f7: 0x000d, 0x28f8: 0x000d, 0x28f9: 0x000d, 0x28fa: 0x000d, 0x28fb: 0x000d,
+ 0x28fc: 0x000d, 0x28fd: 0x000d, 0x28fe: 0x000d, 0x28ff: 0x000d,
+ // Block 0xa4, offset 0x2900
+ 0x2900: 0x000d, 0x2901: 0x000d, 0x2902: 0x000d, 0x2903: 0x000d, 0x2904: 0x000d, 0x2905: 0x000d,
+ 0x2906: 0x000c, 0x2907: 0x000c, 0x2908: 0x000c, 0x2909: 0x000c, 0x290a: 0x000c, 0x290b: 0x000c,
+ 0x290c: 0x000c, 0x290d: 0x000c, 0x290e: 0x000c, 0x290f: 0x000c, 0x2910: 0x000c, 0x2911: 0x000d,
+ 0x2912: 0x000d, 0x2913: 0x000d, 0x2914: 0x000d, 0x2915: 0x000d, 0x2916: 0x000d, 0x2917: 0x000d,
+ 0x2918: 0x000d, 0x2919: 0x000d, 0x291a: 0x000d, 0x291b: 0x000d, 0x291c: 0x000d, 0x291d: 0x000d,
+ 0x291e: 0x000d, 0x291f: 0x000d, 0x2920: 0x000d, 0x2921: 0x000d, 0x2922: 0x000d, 0x2923: 0x000d,
+ 0x2924: 0x000d, 0x2925: 0x000d, 0x2926: 0x000d, 0x2927: 0x000d, 0x2928: 0x000d, 0x2929: 0x000d,
+ 0x292a: 0x000d, 0x292b: 0x000d, 0x292c: 0x000d, 0x292d: 0x000d, 0x292e: 0x000d, 0x292f: 0x000d,
+ 0x2930: 0x0001, 0x2931: 0x0001, 0x2932: 0x0001, 0x2933: 0x0001, 0x2934: 0x0001, 0x2935: 0x0001,
+ 0x2936: 0x0001, 0x2937: 0x0001, 0x2938: 0x0001, 0x2939: 0x0001, 0x293a: 0x0001, 0x293b: 0x0001,
+ 0x293c: 0x0001, 0x293d: 0x0001, 0x293e: 0x0001, 0x293f: 0x0001,
+ // Block 0xa5, offset 0x2940
+ 0x2941: 0x000c,
+ 0x2978: 0x000c, 0x2979: 0x000c, 0x297a: 0x000c, 0x297b: 0x000c,
+ 0x297c: 0x000c, 0x297d: 0x000c, 0x297e: 0x000c, 0x297f: 0x000c,
+ // Block 0xa6, offset 0x2980
+ 0x2980: 0x000c, 0x2981: 0x000c, 0x2982: 0x000c, 0x2983: 0x000c, 0x2984: 0x000c, 0x2985: 0x000c,
+ 0x2986: 0x000c,
+ 0x2992: 0x000a, 0x2993: 0x000a, 0x2994: 0x000a, 0x2995: 0x000a, 0x2996: 0x000a, 0x2997: 0x000a,
+ 0x2998: 0x000a, 0x2999: 0x000a, 0x299a: 0x000a, 0x299b: 0x000a, 0x299c: 0x000a, 0x299d: 0x000a,
+ 0x299e: 0x000a, 0x299f: 0x000a, 0x29a0: 0x000a, 0x29a1: 0x000a, 0x29a2: 0x000a, 0x29a3: 0x000a,
+ 0x29a4: 0x000a, 0x29a5: 0x000a,
+ 0x29bf: 0x000c,
+ // Block 0xa7, offset 0x29c0
+ 0x29c0: 0x000c, 0x29c1: 0x000c,
+ 0x29f3: 0x000c, 0x29f4: 0x000c, 0x29f5: 0x000c,
+ 0x29f6: 0x000c, 0x29f9: 0x000c, 0x29fa: 0x000c,
+ // Block 0xa8, offset 0x2a00
+ 0x2a00: 0x000c, 0x2a01: 0x000c, 0x2a02: 0x000c,
+ 0x2a27: 0x000c, 0x2a28: 0x000c, 0x2a29: 0x000c,
+ 0x2a2a: 0x000c, 0x2a2b: 0x000c, 0x2a2d: 0x000c, 0x2a2e: 0x000c, 0x2a2f: 0x000c,
+ 0x2a30: 0x000c, 0x2a31: 0x000c, 0x2a32: 0x000c, 0x2a33: 0x000c, 0x2a34: 0x000c,
+ // Block 0xa9, offset 0x2a40
+ 0x2a73: 0x000c,
+ // Block 0xaa, offset 0x2a80
+ 0x2a80: 0x000c, 0x2a81: 0x000c,
+ 0x2ab6: 0x000c, 0x2ab7: 0x000c, 0x2ab8: 0x000c, 0x2ab9: 0x000c, 0x2aba: 0x000c, 0x2abb: 0x000c,
+ 0x2abc: 0x000c, 0x2abd: 0x000c, 0x2abe: 0x000c,
+ // Block 0xab, offset 0x2ac0
+ 0x2ac9: 0x000c, 0x2aca: 0x000c, 0x2acb: 0x000c,
+ 0x2acc: 0x000c,
+ // Block 0xac, offset 0x2b00
+ 0x2b2f: 0x000c,
+ 0x2b30: 0x000c, 0x2b31: 0x000c, 0x2b34: 0x000c,
+ 0x2b36: 0x000c, 0x2b37: 0x000c,
+ 0x2b3e: 0x000c,
+ // Block 0xad, offset 0x2b40
+ 0x2b5f: 0x000c, 0x2b63: 0x000c,
+ 0x2b64: 0x000c, 0x2b65: 0x000c, 0x2b66: 0x000c, 0x2b67: 0x000c, 0x2b68: 0x000c, 0x2b69: 0x000c,
+ 0x2b6a: 0x000c,
+ // Block 0xae, offset 0x2b80
+ 0x2b80: 0x000c,
+ 0x2ba6: 0x000c, 0x2ba7: 0x000c, 0x2ba8: 0x000c, 0x2ba9: 0x000c,
+ 0x2baa: 0x000c, 0x2bab: 0x000c, 0x2bac: 0x000c,
+ 0x2bb0: 0x000c, 0x2bb1: 0x000c, 0x2bb2: 0x000c, 0x2bb3: 0x000c, 0x2bb4: 0x000c,
+ // Block 0xaf, offset 0x2bc0
+ 0x2bf8: 0x000c, 0x2bf9: 0x000c, 0x2bfa: 0x000c, 0x2bfb: 0x000c,
+ 0x2bfc: 0x000c, 0x2bfd: 0x000c, 0x2bfe: 0x000c, 0x2bff: 0x000c,
+ // Block 0xb0, offset 0x2c00
+ 0x2c02: 0x000c, 0x2c03: 0x000c, 0x2c04: 0x000c,
+ 0x2c06: 0x000c,
+ 0x2c1e: 0x000c,
+ // Block 0xb1, offset 0x2c40
+ 0x2c73: 0x000c, 0x2c74: 0x000c, 0x2c75: 0x000c,
+ 0x2c76: 0x000c, 0x2c77: 0x000c, 0x2c78: 0x000c, 0x2c7a: 0x000c,
+ 0x2c7f: 0x000c,
+ // Block 0xb2, offset 0x2c80
+ 0x2c80: 0x000c, 0x2c82: 0x000c, 0x2c83: 0x000c,
+ // Block 0xb3, offset 0x2cc0
+ 0x2cf2: 0x000c, 0x2cf3: 0x000c, 0x2cf4: 0x000c, 0x2cf5: 0x000c,
+ 0x2cfc: 0x000c, 0x2cfd: 0x000c, 0x2cff: 0x000c,
+ // Block 0xb4, offset 0x2d00
+ 0x2d00: 0x000c,
+ 0x2d1c: 0x000c, 0x2d1d: 0x000c,
+ // Block 0xb5, offset 0x2d40
+ 0x2d73: 0x000c, 0x2d74: 0x000c, 0x2d75: 0x000c,
+ 0x2d76: 0x000c, 0x2d77: 0x000c, 0x2d78: 0x000c, 0x2d79: 0x000c, 0x2d7a: 0x000c,
+ 0x2d7d: 0x000c, 0x2d7f: 0x000c,
+ // Block 0xb6, offset 0x2d80
+ 0x2d80: 0x000c,
+ 0x2da0: 0x000a, 0x2da1: 0x000a, 0x2da2: 0x000a, 0x2da3: 0x000a,
+ 0x2da4: 0x000a, 0x2da5: 0x000a, 0x2da6: 0x000a, 0x2da7: 0x000a, 0x2da8: 0x000a, 0x2da9: 0x000a,
+ 0x2daa: 0x000a, 0x2dab: 0x000a, 0x2dac: 0x000a,
+ // Block 0xb7, offset 0x2dc0
+ 0x2deb: 0x000c, 0x2ded: 0x000c,
+ 0x2df0: 0x000c, 0x2df1: 0x000c, 0x2df2: 0x000c, 0x2df3: 0x000c, 0x2df4: 0x000c, 0x2df5: 0x000c,
+ 0x2df7: 0x000c,
+ // Block 0xb8, offset 0x2e00
+ 0x2e1d: 0x000c,
+ 0x2e1e: 0x000c, 0x2e1f: 0x000c, 0x2e22: 0x000c, 0x2e23: 0x000c,
+ 0x2e24: 0x000c, 0x2e25: 0x000c, 0x2e27: 0x000c, 0x2e28: 0x000c, 0x2e29: 0x000c,
+ 0x2e2a: 0x000c, 0x2e2b: 0x000c,
+ // Block 0xb9, offset 0x2e40
+ 0x2e6f: 0x000c,
+ 0x2e70: 0x000c, 0x2e71: 0x000c, 0x2e72: 0x000c, 0x2e73: 0x000c, 0x2e74: 0x000c, 0x2e75: 0x000c,
+ 0x2e76: 0x000c, 0x2e77: 0x000c, 0x2e79: 0x000c, 0x2e7a: 0x000c,
+ // Block 0xba, offset 0x2e80
+ 0x2e81: 0x000c, 0x2e82: 0x000c, 0x2e83: 0x000c, 0x2e84: 0x000c, 0x2e85: 0x000c,
+ 0x2e86: 0x000c, 0x2e89: 0x000c, 0x2e8a: 0x000c,
+ 0x2eb3: 0x000c, 0x2eb4: 0x000c, 0x2eb5: 0x000c,
+ 0x2eb6: 0x000c, 0x2eb7: 0x000c, 0x2eb8: 0x000c, 0x2ebb: 0x000c,
+ 0x2ebc: 0x000c, 0x2ebd: 0x000c, 0x2ebe: 0x000c,
+ // Block 0xbb, offset 0x2ec0
+ 0x2ec7: 0x000c,
+ 0x2ed1: 0x000c,
+ 0x2ed2: 0x000c, 0x2ed3: 0x000c, 0x2ed4: 0x000c, 0x2ed5: 0x000c, 0x2ed6: 0x000c,
+ 0x2ed9: 0x000c, 0x2eda: 0x000c, 0x2edb: 0x000c,
+ // Block 0xbc, offset 0x2f00
+ 0x2f0a: 0x000c, 0x2f0b: 0x000c,
+ 0x2f0c: 0x000c, 0x2f0d: 0x000c, 0x2f0e: 0x000c, 0x2f0f: 0x000c, 0x2f10: 0x000c, 0x2f11: 0x000c,
+ 0x2f12: 0x000c, 0x2f13: 0x000c, 0x2f14: 0x000c, 0x2f15: 0x000c, 0x2f16: 0x000c,
+ 0x2f18: 0x000c, 0x2f19: 0x000c,
+ // Block 0xbd, offset 0x2f40
+ 0x2f70: 0x000c, 0x2f71: 0x000c, 0x2f72: 0x000c, 0x2f73: 0x000c, 0x2f74: 0x000c, 0x2f75: 0x000c,
+ 0x2f76: 0x000c, 0x2f78: 0x000c, 0x2f79: 0x000c, 0x2f7a: 0x000c, 0x2f7b: 0x000c,
+ 0x2f7c: 0x000c, 0x2f7d: 0x000c,
+ // Block 0xbe, offset 0x2f80
+ 0x2f92: 0x000c, 0x2f93: 0x000c, 0x2f94: 0x000c, 0x2f95: 0x000c, 0x2f96: 0x000c, 0x2f97: 0x000c,
+ 0x2f98: 0x000c, 0x2f99: 0x000c, 0x2f9a: 0x000c, 0x2f9b: 0x000c, 0x2f9c: 0x000c, 0x2f9d: 0x000c,
+ 0x2f9e: 0x000c, 0x2f9f: 0x000c, 0x2fa0: 0x000c, 0x2fa1: 0x000c, 0x2fa2: 0x000c, 0x2fa3: 0x000c,
+ 0x2fa4: 0x000c, 0x2fa5: 0x000c, 0x2fa6: 0x000c, 0x2fa7: 0x000c,
+ 0x2faa: 0x000c, 0x2fab: 0x000c, 0x2fac: 0x000c, 0x2fad: 0x000c, 0x2fae: 0x000c, 0x2faf: 0x000c,
+ 0x2fb0: 0x000c, 0x2fb2: 0x000c, 0x2fb3: 0x000c, 0x2fb5: 0x000c,
+ 0x2fb6: 0x000c,
+ // Block 0xbf, offset 0x2fc0
+ 0x2ff1: 0x000c, 0x2ff2: 0x000c, 0x2ff3: 0x000c, 0x2ff4: 0x000c, 0x2ff5: 0x000c,
+ 0x2ff6: 0x000c, 0x2ffa: 0x000c,
+ 0x2ffc: 0x000c, 0x2ffd: 0x000c, 0x2fff: 0x000c,
+ // Block 0xc0, offset 0x3000
+ 0x3000: 0x000c, 0x3001: 0x000c, 0x3002: 0x000c, 0x3003: 0x000c, 0x3004: 0x000c, 0x3005: 0x000c,
+ 0x3007: 0x000c,
+ // Block 0xc1, offset 0x3040
+ 0x3050: 0x000c, 0x3051: 0x000c,
+ 0x3055: 0x000c, 0x3057: 0x000c,
+ // Block 0xc2, offset 0x3080
+ 0x30b3: 0x000c, 0x30b4: 0x000c,
+ // Block 0xc3, offset 0x30c0
+ 0x30f0: 0x000c, 0x30f1: 0x000c, 0x30f2: 0x000c, 0x30f3: 0x000c, 0x30f4: 0x000c,
+ // Block 0xc4, offset 0x3100
+ 0x3130: 0x000c, 0x3131: 0x000c, 0x3132: 0x000c, 0x3133: 0x000c, 0x3134: 0x000c, 0x3135: 0x000c,
+ 0x3136: 0x000c,
+ // Block 0xc5, offset 0x3140
+ 0x314f: 0x000c, 0x3150: 0x000c, 0x3151: 0x000c,
+ 0x3152: 0x000c,
+ // Block 0xc6, offset 0x3180
+ 0x319d: 0x000c,
+ 0x319e: 0x000c, 0x31a0: 0x000b, 0x31a1: 0x000b, 0x31a2: 0x000b, 0x31a3: 0x000b,
+ // Block 0xc7, offset 0x31c0
+ 0x31e7: 0x000c, 0x31e8: 0x000c, 0x31e9: 0x000c,
+ 0x31f3: 0x000b, 0x31f4: 0x000b, 0x31f5: 0x000b,
+ 0x31f6: 0x000b, 0x31f7: 0x000b, 0x31f8: 0x000b, 0x31f9: 0x000b, 0x31fa: 0x000b, 0x31fb: 0x000c,
+ 0x31fc: 0x000c, 0x31fd: 0x000c, 0x31fe: 0x000c, 0x31ff: 0x000c,
+ // Block 0xc8, offset 0x3200
+ 0x3200: 0x000c, 0x3201: 0x000c, 0x3202: 0x000c, 0x3205: 0x000c,
+ 0x3206: 0x000c, 0x3207: 0x000c, 0x3208: 0x000c, 0x3209: 0x000c, 0x320a: 0x000c, 0x320b: 0x000c,
+ 0x322a: 0x000c, 0x322b: 0x000c, 0x322c: 0x000c, 0x322d: 0x000c,
+ // Block 0xc9, offset 0x3240
+ 0x3240: 0x000a, 0x3241: 0x000a, 0x3242: 0x000c, 0x3243: 0x000c, 0x3244: 0x000c, 0x3245: 0x000a,
+ // Block 0xca, offset 0x3280
+ 0x3280: 0x000a, 0x3281: 0x000a, 0x3282: 0x000a, 0x3283: 0x000a, 0x3284: 0x000a, 0x3285: 0x000a,
+ 0x3286: 0x000a, 0x3287: 0x000a, 0x3288: 0x000a, 0x3289: 0x000a, 0x328a: 0x000a, 0x328b: 0x000a,
+ 0x328c: 0x000a, 0x328d: 0x000a, 0x328e: 0x000a, 0x328f: 0x000a, 0x3290: 0x000a, 0x3291: 0x000a,
+ 0x3292: 0x000a, 0x3293: 0x000a, 0x3294: 0x000a, 0x3295: 0x000a, 0x3296: 0x000a,
+ // Block 0xcb, offset 0x32c0
+ 0x32db: 0x000a,
+ // Block 0xcc, offset 0x3300
+ 0x3315: 0x000a,
+ // Block 0xcd, offset 0x3340
+ 0x334f: 0x000a,
+ // Block 0xce, offset 0x3380
+ 0x3389: 0x000a,
+ // Block 0xcf, offset 0x33c0
+ 0x33c3: 0x000a,
+ 0x33ce: 0x0002, 0x33cf: 0x0002, 0x33d0: 0x0002, 0x33d1: 0x0002,
+ 0x33d2: 0x0002, 0x33d3: 0x0002, 0x33d4: 0x0002, 0x33d5: 0x0002, 0x33d6: 0x0002, 0x33d7: 0x0002,
+ 0x33d8: 0x0002, 0x33d9: 0x0002, 0x33da: 0x0002, 0x33db: 0x0002, 0x33dc: 0x0002, 0x33dd: 0x0002,
+ 0x33de: 0x0002, 0x33df: 0x0002, 0x33e0: 0x0002, 0x33e1: 0x0002, 0x33e2: 0x0002, 0x33e3: 0x0002,
+ 0x33e4: 0x0002, 0x33e5: 0x0002, 0x33e6: 0x0002, 0x33e7: 0x0002, 0x33e8: 0x0002, 0x33e9: 0x0002,
+ 0x33ea: 0x0002, 0x33eb: 0x0002, 0x33ec: 0x0002, 0x33ed: 0x0002, 0x33ee: 0x0002, 0x33ef: 0x0002,
+ 0x33f0: 0x0002, 0x33f1: 0x0002, 0x33f2: 0x0002, 0x33f3: 0x0002, 0x33f4: 0x0002, 0x33f5: 0x0002,
+ 0x33f6: 0x0002, 0x33f7: 0x0002, 0x33f8: 0x0002, 0x33f9: 0x0002, 0x33fa: 0x0002, 0x33fb: 0x0002,
+ 0x33fc: 0x0002, 0x33fd: 0x0002, 0x33fe: 0x0002, 0x33ff: 0x0002,
+ // Block 0xd0, offset 0x3400
+ 0x3400: 0x000c, 0x3401: 0x000c, 0x3402: 0x000c, 0x3403: 0x000c, 0x3404: 0x000c, 0x3405: 0x000c,
+ 0x3406: 0x000c, 0x3407: 0x000c, 0x3408: 0x000c, 0x3409: 0x000c, 0x340a: 0x000c, 0x340b: 0x000c,
+ 0x340c: 0x000c, 0x340d: 0x000c, 0x340e: 0x000c, 0x340f: 0x000c, 0x3410: 0x000c, 0x3411: 0x000c,
+ 0x3412: 0x000c, 0x3413: 0x000c, 0x3414: 0x000c, 0x3415: 0x000c, 0x3416: 0x000c, 0x3417: 0x000c,
+ 0x3418: 0x000c, 0x3419: 0x000c, 0x341a: 0x000c, 0x341b: 0x000c, 0x341c: 0x000c, 0x341d: 0x000c,
+ 0x341e: 0x000c, 0x341f: 0x000c, 0x3420: 0x000c, 0x3421: 0x000c, 0x3422: 0x000c, 0x3423: 0x000c,
+ 0x3424: 0x000c, 0x3425: 0x000c, 0x3426: 0x000c, 0x3427: 0x000c, 0x3428: 0x000c, 0x3429: 0x000c,
+ 0x342a: 0x000c, 0x342b: 0x000c, 0x342c: 0x000c, 0x342d: 0x000c, 0x342e: 0x000c, 0x342f: 0x000c,
+ 0x3430: 0x000c, 0x3431: 0x000c, 0x3432: 0x000c, 0x3433: 0x000c, 0x3434: 0x000c, 0x3435: 0x000c,
+ 0x3436: 0x000c, 0x343b: 0x000c,
+ 0x343c: 0x000c, 0x343d: 0x000c, 0x343e: 0x000c, 0x343f: 0x000c,
+ // Block 0xd1, offset 0x3440
+ 0x3440: 0x000c, 0x3441: 0x000c, 0x3442: 0x000c, 0x3443: 0x000c, 0x3444: 0x000c, 0x3445: 0x000c,
+ 0x3446: 0x000c, 0x3447: 0x000c, 0x3448: 0x000c, 0x3449: 0x000c, 0x344a: 0x000c, 0x344b: 0x000c,
+ 0x344c: 0x000c, 0x344d: 0x000c, 0x344e: 0x000c, 0x344f: 0x000c, 0x3450: 0x000c, 0x3451: 0x000c,
+ 0x3452: 0x000c, 0x3453: 0x000c, 0x3454: 0x000c, 0x3455: 0x000c, 0x3456: 0x000c, 0x3457: 0x000c,
+ 0x3458: 0x000c, 0x3459: 0x000c, 0x345a: 0x000c, 0x345b: 0x000c, 0x345c: 0x000c, 0x345d: 0x000c,
+ 0x345e: 0x000c, 0x345f: 0x000c, 0x3460: 0x000c, 0x3461: 0x000c, 0x3462: 0x000c, 0x3463: 0x000c,
+ 0x3464: 0x000c, 0x3465: 0x000c, 0x3466: 0x000c, 0x3467: 0x000c, 0x3468: 0x000c, 0x3469: 0x000c,
+ 0x346a: 0x000c, 0x346b: 0x000c, 0x346c: 0x000c,
+ 0x3475: 0x000c,
+ // Block 0xd2, offset 0x3480
+ 0x3484: 0x000c,
+ 0x349b: 0x000c, 0x349c: 0x000c, 0x349d: 0x000c,
+ 0x349e: 0x000c, 0x349f: 0x000c, 0x34a1: 0x000c, 0x34a2: 0x000c, 0x34a3: 0x000c,
+ 0x34a4: 0x000c, 0x34a5: 0x000c, 0x34a6: 0x000c, 0x34a7: 0x000c, 0x34a8: 0x000c, 0x34a9: 0x000c,
+ 0x34aa: 0x000c, 0x34ab: 0x000c, 0x34ac: 0x000c, 0x34ad: 0x000c, 0x34ae: 0x000c, 0x34af: 0x000c,
+ // Block 0xd3, offset 0x34c0
+ 0x34c0: 0x000c, 0x34c1: 0x000c, 0x34c2: 0x000c, 0x34c3: 0x000c, 0x34c4: 0x000c, 0x34c5: 0x000c,
+ 0x34c6: 0x000c, 0x34c8: 0x000c, 0x34c9: 0x000c, 0x34ca: 0x000c, 0x34cb: 0x000c,
+ 0x34cc: 0x000c, 0x34cd: 0x000c, 0x34ce: 0x000c, 0x34cf: 0x000c, 0x34d0: 0x000c, 0x34d1: 0x000c,
+ 0x34d2: 0x000c, 0x34d3: 0x000c, 0x34d4: 0x000c, 0x34d5: 0x000c, 0x34d6: 0x000c, 0x34d7: 0x000c,
+ 0x34d8: 0x000c, 0x34db: 0x000c, 0x34dc: 0x000c, 0x34dd: 0x000c,
+ 0x34de: 0x000c, 0x34df: 0x000c, 0x34e0: 0x000c, 0x34e1: 0x000c, 0x34e3: 0x000c,
+ 0x34e4: 0x000c, 0x34e6: 0x000c, 0x34e7: 0x000c, 0x34e8: 0x000c, 0x34e9: 0x000c,
+ 0x34ea: 0x000c,
+ // Block 0xd4, offset 0x3500
+ 0x3500: 0x0001, 0x3501: 0x0001, 0x3502: 0x0001, 0x3503: 0x0001, 0x3504: 0x0001, 0x3505: 0x0001,
+ 0x3506: 0x0001, 0x3507: 0x0001, 0x3508: 0x0001, 0x3509: 0x0001, 0x350a: 0x0001, 0x350b: 0x0001,
+ 0x350c: 0x0001, 0x350d: 0x0001, 0x350e: 0x0001, 0x350f: 0x0001, 0x3510: 0x000c, 0x3511: 0x000c,
+ 0x3512: 0x000c, 0x3513: 0x000c, 0x3514: 0x000c, 0x3515: 0x000c, 0x3516: 0x000c, 0x3517: 0x0001,
+ 0x3518: 0x0001, 0x3519: 0x0001, 0x351a: 0x0001, 0x351b: 0x0001, 0x351c: 0x0001, 0x351d: 0x0001,
+ 0x351e: 0x0001, 0x351f: 0x0001, 0x3520: 0x0001, 0x3521: 0x0001, 0x3522: 0x0001, 0x3523: 0x0001,
+ 0x3524: 0x0001, 0x3525: 0x0001, 0x3526: 0x0001, 0x3527: 0x0001, 0x3528: 0x0001, 0x3529: 0x0001,
+ 0x352a: 0x0001, 0x352b: 0x0001, 0x352c: 0x0001, 0x352d: 0x0001, 0x352e: 0x0001, 0x352f: 0x0001,
+ 0x3530: 0x0001, 0x3531: 0x0001, 0x3532: 0x0001, 0x3533: 0x0001, 0x3534: 0x0001, 0x3535: 0x0001,
+ 0x3536: 0x0001, 0x3537: 0x0001, 0x3538: 0x0001, 0x3539: 0x0001, 0x353a: 0x0001, 0x353b: 0x0001,
+ 0x353c: 0x0001, 0x353d: 0x0001, 0x353e: 0x0001, 0x353f: 0x0001,
+ // Block 0xd5, offset 0x3540
+ 0x3540: 0x0001, 0x3541: 0x0001, 0x3542: 0x0001, 0x3543: 0x0001, 0x3544: 0x000c, 0x3545: 0x000c,
+ 0x3546: 0x000c, 0x3547: 0x000c, 0x3548: 0x000c, 0x3549: 0x000c, 0x354a: 0x000c, 0x354b: 0x0001,
+ 0x354c: 0x0001, 0x354d: 0x0001, 0x354e: 0x0001, 0x354f: 0x0001, 0x3550: 0x0001, 0x3551: 0x0001,
+ 0x3552: 0x0001, 0x3553: 0x0001, 0x3554: 0x0001, 0x3555: 0x0001, 0x3556: 0x0001, 0x3557: 0x0001,
+ 0x3558: 0x0001, 0x3559: 0x0001, 0x355a: 0x0001, 0x355b: 0x0001, 0x355c: 0x0001, 0x355d: 0x0001,
+ 0x355e: 0x0001, 0x355f: 0x0001, 0x3560: 0x0001, 0x3561: 0x0001, 0x3562: 0x0001, 0x3563: 0x0001,
+ 0x3564: 0x0001, 0x3565: 0x0001, 0x3566: 0x0001, 0x3567: 0x0001, 0x3568: 0x0001, 0x3569: 0x0001,
+ 0x356a: 0x0001, 0x356b: 0x0001, 0x356c: 0x0001, 0x356d: 0x0001, 0x356e: 0x0001, 0x356f: 0x0001,
+ 0x3570: 0x0001, 0x3571: 0x0001, 0x3572: 0x0001, 0x3573: 0x0001, 0x3574: 0x0001, 0x3575: 0x0001,
+ 0x3576: 0x0001, 0x3577: 0x0001, 0x3578: 0x0001, 0x3579: 0x0001, 0x357a: 0x0001, 0x357b: 0x0001,
+ 0x357c: 0x0001, 0x357d: 0x0001, 0x357e: 0x0001, 0x357f: 0x0001,
+ // Block 0xd6, offset 0x3580
+ 0x3580: 0x000d, 0x3581: 0x000d, 0x3582: 0x000d, 0x3583: 0x000d, 0x3584: 0x000d, 0x3585: 0x000d,
+ 0x3586: 0x000d, 0x3587: 0x000d, 0x3588: 0x000d, 0x3589: 0x000d, 0x358a: 0x000d, 0x358b: 0x000d,
+ 0x358c: 0x000d, 0x358d: 0x000d, 0x358e: 0x000d, 0x358f: 0x000d, 0x3590: 0x000d, 0x3591: 0x000d,
+ 0x3592: 0x000d, 0x3593: 0x000d, 0x3594: 0x000d, 0x3595: 0x000d, 0x3596: 0x000d, 0x3597: 0x000d,
+ 0x3598: 0x000d, 0x3599: 0x000d, 0x359a: 0x000d, 0x359b: 0x000d, 0x359c: 0x000d, 0x359d: 0x000d,
+ 0x359e: 0x000d, 0x359f: 0x000d, 0x35a0: 0x000d, 0x35a1: 0x000d, 0x35a2: 0x000d, 0x35a3: 0x000d,
+ 0x35a4: 0x000d, 0x35a5: 0x000d, 0x35a6: 0x000d, 0x35a7: 0x000d, 0x35a8: 0x000d, 0x35a9: 0x000d,
+ 0x35aa: 0x000d, 0x35ab: 0x000d, 0x35ac: 0x000d, 0x35ad: 0x000d, 0x35ae: 0x000d, 0x35af: 0x000d,
+ 0x35b0: 0x000a, 0x35b1: 0x000a, 0x35b2: 0x000d, 0x35b3: 0x000d, 0x35b4: 0x000d, 0x35b5: 0x000d,
+ 0x35b6: 0x000d, 0x35b7: 0x000d, 0x35b8: 0x000d, 0x35b9: 0x000d, 0x35ba: 0x000d, 0x35bb: 0x000d,
+ 0x35bc: 0x000d, 0x35bd: 0x000d, 0x35be: 0x000d, 0x35bf: 0x000d,
+ // Block 0xd7, offset 0x35c0
+ 0x35c0: 0x000a, 0x35c1: 0x000a, 0x35c2: 0x000a, 0x35c3: 0x000a, 0x35c4: 0x000a, 0x35c5: 0x000a,
+ 0x35c6: 0x000a, 0x35c7: 0x000a, 0x35c8: 0x000a, 0x35c9: 0x000a, 0x35ca: 0x000a, 0x35cb: 0x000a,
+ 0x35cc: 0x000a, 0x35cd: 0x000a, 0x35ce: 0x000a, 0x35cf: 0x000a, 0x35d0: 0x000a, 0x35d1: 0x000a,
+ 0x35d2: 0x000a, 0x35d3: 0x000a, 0x35d4: 0x000a, 0x35d5: 0x000a, 0x35d6: 0x000a, 0x35d7: 0x000a,
+ 0x35d8: 0x000a, 0x35d9: 0x000a, 0x35da: 0x000a, 0x35db: 0x000a, 0x35dc: 0x000a, 0x35dd: 0x000a,
+ 0x35de: 0x000a, 0x35df: 0x000a, 0x35e0: 0x000a, 0x35e1: 0x000a, 0x35e2: 0x000a, 0x35e3: 0x000a,
+ 0x35e4: 0x000a, 0x35e5: 0x000a, 0x35e6: 0x000a, 0x35e7: 0x000a, 0x35e8: 0x000a, 0x35e9: 0x000a,
+ 0x35ea: 0x000a, 0x35eb: 0x000a,
+ 0x35f0: 0x000a, 0x35f1: 0x000a, 0x35f2: 0x000a, 0x35f3: 0x000a, 0x35f4: 0x000a, 0x35f5: 0x000a,
+ 0x35f6: 0x000a, 0x35f7: 0x000a, 0x35f8: 0x000a, 0x35f9: 0x000a, 0x35fa: 0x000a, 0x35fb: 0x000a,
+ 0x35fc: 0x000a, 0x35fd: 0x000a, 0x35fe: 0x000a, 0x35ff: 0x000a,
+ // Block 0xd8, offset 0x3600
+ 0x3600: 0x000a, 0x3601: 0x000a, 0x3602: 0x000a, 0x3603: 0x000a, 0x3604: 0x000a, 0x3605: 0x000a,
+ 0x3606: 0x000a, 0x3607: 0x000a, 0x3608: 0x000a, 0x3609: 0x000a, 0x360a: 0x000a, 0x360b: 0x000a,
+ 0x360c: 0x000a, 0x360d: 0x000a, 0x360e: 0x000a, 0x360f: 0x000a, 0x3610: 0x000a, 0x3611: 0x000a,
+ 0x3612: 0x000a, 0x3613: 0x000a,
+ 0x3620: 0x000a, 0x3621: 0x000a, 0x3622: 0x000a, 0x3623: 0x000a,
+ 0x3624: 0x000a, 0x3625: 0x000a, 0x3626: 0x000a, 0x3627: 0x000a, 0x3628: 0x000a, 0x3629: 0x000a,
+ 0x362a: 0x000a, 0x362b: 0x000a, 0x362c: 0x000a, 0x362d: 0x000a, 0x362e: 0x000a,
+ 0x3631: 0x000a, 0x3632: 0x000a, 0x3633: 0x000a, 0x3634: 0x000a, 0x3635: 0x000a,
+ 0x3636: 0x000a, 0x3637: 0x000a, 0x3638: 0x000a, 0x3639: 0x000a, 0x363a: 0x000a, 0x363b: 0x000a,
+ 0x363c: 0x000a, 0x363d: 0x000a, 0x363e: 0x000a, 0x363f: 0x000a,
+ // Block 0xd9, offset 0x3640
+ 0x3641: 0x000a, 0x3642: 0x000a, 0x3643: 0x000a, 0x3644: 0x000a, 0x3645: 0x000a,
+ 0x3646: 0x000a, 0x3647: 0x000a, 0x3648: 0x000a, 0x3649: 0x000a, 0x364a: 0x000a, 0x364b: 0x000a,
+ 0x364c: 0x000a, 0x364d: 0x000a, 0x364e: 0x000a, 0x364f: 0x000a, 0x3651: 0x000a,
+ 0x3652: 0x000a, 0x3653: 0x000a, 0x3654: 0x000a, 0x3655: 0x000a, 0x3656: 0x000a, 0x3657: 0x000a,
+ 0x3658: 0x000a, 0x3659: 0x000a, 0x365a: 0x000a, 0x365b: 0x000a, 0x365c: 0x000a, 0x365d: 0x000a,
+ 0x365e: 0x000a, 0x365f: 0x000a, 0x3660: 0x000a, 0x3661: 0x000a, 0x3662: 0x000a, 0x3663: 0x000a,
+ 0x3664: 0x000a, 0x3665: 0x000a, 0x3666: 0x000a, 0x3667: 0x000a, 0x3668: 0x000a, 0x3669: 0x000a,
+ 0x366a: 0x000a, 0x366b: 0x000a, 0x366c: 0x000a, 0x366d: 0x000a, 0x366e: 0x000a, 0x366f: 0x000a,
+ 0x3670: 0x000a, 0x3671: 0x000a, 0x3672: 0x000a, 0x3673: 0x000a, 0x3674: 0x000a, 0x3675: 0x000a,
+ // Block 0xda, offset 0x3680
+ 0x3680: 0x0002, 0x3681: 0x0002, 0x3682: 0x0002, 0x3683: 0x0002, 0x3684: 0x0002, 0x3685: 0x0002,
+ 0x3686: 0x0002, 0x3687: 0x0002, 0x3688: 0x0002, 0x3689: 0x0002, 0x368a: 0x0002, 0x368b: 0x000a,
+ 0x368c: 0x000a,
+ 0x36af: 0x000a,
+ // Block 0xdb, offset 0x36c0
+ 0x36ea: 0x000a, 0x36eb: 0x000a,
+ // Block 0xdc, offset 0x3700
+ 0x3720: 0x000a, 0x3721: 0x000a, 0x3722: 0x000a, 0x3723: 0x000a,
+ 0x3724: 0x000a, 0x3725: 0x000a,
+ // Block 0xdd, offset 0x3740
+ 0x3740: 0x000a, 0x3741: 0x000a, 0x3742: 0x000a, 0x3743: 0x000a, 0x3744: 0x000a, 0x3745: 0x000a,
+ 0x3746: 0x000a, 0x3747: 0x000a, 0x3748: 0x000a, 0x3749: 0x000a, 0x374a: 0x000a, 0x374b: 0x000a,
+ 0x374c: 0x000a, 0x374d: 0x000a, 0x374e: 0x000a, 0x374f: 0x000a, 0x3750: 0x000a, 0x3751: 0x000a,
+ 0x3752: 0x000a, 0x3753: 0x000a, 0x3754: 0x000a,
+ 0x3760: 0x000a, 0x3761: 0x000a, 0x3762: 0x000a, 0x3763: 0x000a,
+ 0x3764: 0x000a, 0x3765: 0x000a, 0x3766: 0x000a, 0x3767: 0x000a, 0x3768: 0x000a, 0x3769: 0x000a,
+ 0x376a: 0x000a, 0x376b: 0x000a, 0x376c: 0x000a,
+ 0x3770: 0x000a, 0x3771: 0x000a, 0x3772: 0x000a, 0x3773: 0x000a, 0x3774: 0x000a, 0x3775: 0x000a,
+ 0x3776: 0x000a, 0x3777: 0x000a, 0x3778: 0x000a, 0x3779: 0x000a,
+ // Block 0xde, offset 0x3780
+ 0x3780: 0x000a, 0x3781: 0x000a, 0x3782: 0x000a, 0x3783: 0x000a, 0x3784: 0x000a, 0x3785: 0x000a,
+ 0x3786: 0x000a, 0x3787: 0x000a, 0x3788: 0x000a, 0x3789: 0x000a, 0x378a: 0x000a, 0x378b: 0x000a,
+ 0x378c: 0x000a, 0x378d: 0x000a, 0x378e: 0x000a, 0x378f: 0x000a, 0x3790: 0x000a, 0x3791: 0x000a,
+ 0x3792: 0x000a, 0x3793: 0x000a, 0x3794: 0x000a, 0x3795: 0x000a, 0x3796: 0x000a, 0x3797: 0x000a,
+ 0x3798: 0x000a,
+ // Block 0xdf, offset 0x37c0
+ 0x37c0: 0x000a, 0x37c1: 0x000a, 0x37c2: 0x000a, 0x37c3: 0x000a, 0x37c4: 0x000a, 0x37c5: 0x000a,
+ 0x37c6: 0x000a, 0x37c7: 0x000a, 0x37c8: 0x000a, 0x37c9: 0x000a, 0x37ca: 0x000a, 0x37cb: 0x000a,
+ 0x37d0: 0x000a, 0x37d1: 0x000a,
+ 0x37d2: 0x000a, 0x37d3: 0x000a, 0x37d4: 0x000a, 0x37d5: 0x000a, 0x37d6: 0x000a, 0x37d7: 0x000a,
+ 0x37d8: 0x000a, 0x37d9: 0x000a, 0x37da: 0x000a, 0x37db: 0x000a, 0x37dc: 0x000a, 0x37dd: 0x000a,
+ 0x37de: 0x000a, 0x37df: 0x000a, 0x37e0: 0x000a, 0x37e1: 0x000a, 0x37e2: 0x000a, 0x37e3: 0x000a,
+ 0x37e4: 0x000a, 0x37e5: 0x000a, 0x37e6: 0x000a, 0x37e7: 0x000a, 0x37e8: 0x000a, 0x37e9: 0x000a,
+ 0x37ea: 0x000a, 0x37eb: 0x000a, 0x37ec: 0x000a, 0x37ed: 0x000a, 0x37ee: 0x000a, 0x37ef: 0x000a,
+ 0x37f0: 0x000a, 0x37f1: 0x000a, 0x37f2: 0x000a, 0x37f3: 0x000a, 0x37f4: 0x000a, 0x37f5: 0x000a,
+ 0x37f6: 0x000a, 0x37f7: 0x000a, 0x37f8: 0x000a, 0x37f9: 0x000a, 0x37fa: 0x000a, 0x37fb: 0x000a,
+ 0x37fc: 0x000a, 0x37fd: 0x000a, 0x37fe: 0x000a, 0x37ff: 0x000a,
+ // Block 0xe0, offset 0x3800
+ 0x3800: 0x000a, 0x3801: 0x000a, 0x3802: 0x000a, 0x3803: 0x000a, 0x3804: 0x000a, 0x3805: 0x000a,
+ 0x3806: 0x000a, 0x3807: 0x000a,
+ 0x3810: 0x000a, 0x3811: 0x000a,
+ 0x3812: 0x000a, 0x3813: 0x000a, 0x3814: 0x000a, 0x3815: 0x000a, 0x3816: 0x000a, 0x3817: 0x000a,
+ 0x3818: 0x000a, 0x3819: 0x000a,
+ 0x3820: 0x000a, 0x3821: 0x000a, 0x3822: 0x000a, 0x3823: 0x000a,
+ 0x3824: 0x000a, 0x3825: 0x000a, 0x3826: 0x000a, 0x3827: 0x000a, 0x3828: 0x000a, 0x3829: 0x000a,
+ 0x382a: 0x000a, 0x382b: 0x000a, 0x382c: 0x000a, 0x382d: 0x000a, 0x382e: 0x000a, 0x382f: 0x000a,
+ 0x3830: 0x000a, 0x3831: 0x000a, 0x3832: 0x000a, 0x3833: 0x000a, 0x3834: 0x000a, 0x3835: 0x000a,
+ 0x3836: 0x000a, 0x3837: 0x000a, 0x3838: 0x000a, 0x3839: 0x000a, 0x383a: 0x000a, 0x383b: 0x000a,
+ 0x383c: 0x000a, 0x383d: 0x000a, 0x383e: 0x000a, 0x383f: 0x000a,
+ // Block 0xe1, offset 0x3840
+ 0x3840: 0x000a, 0x3841: 0x000a, 0x3842: 0x000a, 0x3843: 0x000a, 0x3844: 0x000a, 0x3845: 0x000a,
+ 0x3846: 0x000a, 0x3847: 0x000a,
+ 0x3850: 0x000a, 0x3851: 0x000a,
+ 0x3852: 0x000a, 0x3853: 0x000a, 0x3854: 0x000a, 0x3855: 0x000a, 0x3856: 0x000a, 0x3857: 0x000a,
+ 0x3858: 0x000a, 0x3859: 0x000a, 0x385a: 0x000a, 0x385b: 0x000a, 0x385c: 0x000a, 0x385d: 0x000a,
+ 0x385e: 0x000a, 0x385f: 0x000a, 0x3860: 0x000a, 0x3861: 0x000a, 0x3862: 0x000a, 0x3863: 0x000a,
+ 0x3864: 0x000a, 0x3865: 0x000a, 0x3866: 0x000a, 0x3867: 0x000a, 0x3868: 0x000a, 0x3869: 0x000a,
+ 0x386a: 0x000a, 0x386b: 0x000a, 0x386c: 0x000a, 0x386d: 0x000a,
+ // Block 0xe2, offset 0x3880
+ 0x3880: 0x000a, 0x3881: 0x000a, 0x3882: 0x000a, 0x3883: 0x000a, 0x3884: 0x000a, 0x3885: 0x000a,
+ 0x3886: 0x000a, 0x3887: 0x000a, 0x3888: 0x000a, 0x3889: 0x000a, 0x388a: 0x000a, 0x388b: 0x000a,
+ 0x3890: 0x000a, 0x3891: 0x000a,
+ 0x3892: 0x000a, 0x3893: 0x000a, 0x3894: 0x000a, 0x3895: 0x000a, 0x3896: 0x000a, 0x3897: 0x000a,
+ 0x3898: 0x000a, 0x3899: 0x000a, 0x389a: 0x000a, 0x389b: 0x000a, 0x389c: 0x000a, 0x389d: 0x000a,
+ 0x389e: 0x000a, 0x389f: 0x000a, 0x38a0: 0x000a, 0x38a1: 0x000a, 0x38a2: 0x000a, 0x38a3: 0x000a,
+ 0x38a4: 0x000a, 0x38a5: 0x000a, 0x38a6: 0x000a, 0x38a7: 0x000a, 0x38a8: 0x000a, 0x38a9: 0x000a,
+ 0x38aa: 0x000a, 0x38ab: 0x000a, 0x38ac: 0x000a, 0x38ad: 0x000a, 0x38ae: 0x000a, 0x38af: 0x000a,
+ 0x38b0: 0x000a, 0x38b1: 0x000a, 0x38b2: 0x000a, 0x38b3: 0x000a, 0x38b4: 0x000a, 0x38b5: 0x000a,
+ 0x38b6: 0x000a, 0x38b7: 0x000a, 0x38b8: 0x000a, 0x38b9: 0x000a, 0x38ba: 0x000a, 0x38bb: 0x000a,
+ 0x38bc: 0x000a, 0x38bd: 0x000a, 0x38be: 0x000a,
+ // Block 0xe3, offset 0x38c0
+ 0x38c0: 0x000a, 0x38c1: 0x000a, 0x38c2: 0x000a, 0x38c3: 0x000a, 0x38c4: 0x000a, 0x38c5: 0x000a,
+ 0x38c6: 0x000a, 0x38c7: 0x000a, 0x38c8: 0x000a, 0x38c9: 0x000a, 0x38ca: 0x000a, 0x38cb: 0x000a,
+ 0x38cc: 0x000a, 0x38cd: 0x000a, 0x38ce: 0x000a, 0x38cf: 0x000a, 0x38d0: 0x000a, 0x38d1: 0x000a,
+ 0x38d2: 0x000a, 0x38d3: 0x000a, 0x38d4: 0x000a, 0x38d5: 0x000a, 0x38d6: 0x000a, 0x38d7: 0x000a,
+ 0x38d8: 0x000a, 0x38d9: 0x000a, 0x38da: 0x000a, 0x38db: 0x000a, 0x38dc: 0x000a, 0x38dd: 0x000a,
+ 0x38de: 0x000a, 0x38df: 0x000a, 0x38e0: 0x000a, 0x38e1: 0x000a, 0x38e2: 0x000a, 0x38e3: 0x000a,
+ 0x38e4: 0x000a, 0x38e5: 0x000a, 0x38e6: 0x000a, 0x38e7: 0x000a, 0x38e8: 0x000a, 0x38e9: 0x000a,
+ 0x38ea: 0x000a, 0x38eb: 0x000a, 0x38ec: 0x000a, 0x38ed: 0x000a, 0x38ee: 0x000a, 0x38ef: 0x000a,
+ 0x38f0: 0x000a, 0x38f3: 0x000a, 0x38f4: 0x000a, 0x38f5: 0x000a,
+ 0x38f6: 0x000a, 0x38fa: 0x000a,
+ 0x38fc: 0x000a, 0x38fd: 0x000a, 0x38fe: 0x000a, 0x38ff: 0x000a,
+ // Block 0xe4, offset 0x3900
+ 0x3900: 0x000a, 0x3901: 0x000a, 0x3902: 0x000a, 0x3903: 0x000a, 0x3904: 0x000a, 0x3905: 0x000a,
+ 0x3906: 0x000a, 0x3907: 0x000a, 0x3908: 0x000a, 0x3909: 0x000a, 0x390a: 0x000a, 0x390b: 0x000a,
+ 0x390c: 0x000a, 0x390d: 0x000a, 0x390e: 0x000a, 0x390f: 0x000a, 0x3910: 0x000a, 0x3911: 0x000a,
+ 0x3912: 0x000a, 0x3913: 0x000a, 0x3914: 0x000a, 0x3915: 0x000a, 0x3916: 0x000a, 0x3917: 0x000a,
+ 0x3918: 0x000a, 0x3919: 0x000a, 0x391a: 0x000a, 0x391b: 0x000a, 0x391c: 0x000a, 0x391d: 0x000a,
+ 0x391e: 0x000a, 0x391f: 0x000a, 0x3920: 0x000a, 0x3921: 0x000a, 0x3922: 0x000a,
+ 0x3930: 0x000a, 0x3931: 0x000a, 0x3932: 0x000a, 0x3933: 0x000a, 0x3934: 0x000a, 0x3935: 0x000a,
+ 0x3936: 0x000a, 0x3937: 0x000a, 0x3938: 0x000a, 0x3939: 0x000a,
+ // Block 0xe5, offset 0x3940
+ 0x3940: 0x000a, 0x3941: 0x000a, 0x3942: 0x000a,
+ 0x3950: 0x000a, 0x3951: 0x000a,
+ 0x3952: 0x000a, 0x3953: 0x000a, 0x3954: 0x000a, 0x3955: 0x000a, 0x3956: 0x000a, 0x3957: 0x000a,
+ 0x3958: 0x000a, 0x3959: 0x000a, 0x395a: 0x000a, 0x395b: 0x000a, 0x395c: 0x000a, 0x395d: 0x000a,
+ 0x395e: 0x000a, 0x395f: 0x000a, 0x3960: 0x000a, 0x3961: 0x000a, 0x3962: 0x000a, 0x3963: 0x000a,
+ 0x3964: 0x000a, 0x3965: 0x000a, 0x3966: 0x000a, 0x3967: 0x000a, 0x3968: 0x000a, 0x3969: 0x000a,
+ 0x396a: 0x000a, 0x396b: 0x000a, 0x396c: 0x000a, 0x396d: 0x000a, 0x396e: 0x000a, 0x396f: 0x000a,
+ 0x3970: 0x000a, 0x3971: 0x000a, 0x3972: 0x000a, 0x3973: 0x000a, 0x3974: 0x000a, 0x3975: 0x000a,
+ 0x3976: 0x000a, 0x3977: 0x000a, 0x3978: 0x000a, 0x3979: 0x000a, 0x397a: 0x000a, 0x397b: 0x000a,
+ 0x397c: 0x000a, 0x397d: 0x000a, 0x397e: 0x000a, 0x397f: 0x000a,
+ // Block 0xe6, offset 0x3980
+ 0x39a0: 0x000a, 0x39a1: 0x000a, 0x39a2: 0x000a, 0x39a3: 0x000a,
+ 0x39a4: 0x000a, 0x39a5: 0x000a, 0x39a6: 0x000a, 0x39a7: 0x000a, 0x39a8: 0x000a, 0x39a9: 0x000a,
+ 0x39aa: 0x000a, 0x39ab: 0x000a, 0x39ac: 0x000a, 0x39ad: 0x000a,
+ // Block 0xe7, offset 0x39c0
+ 0x39fe: 0x000b, 0x39ff: 0x000b,
+ // Block 0xe8, offset 0x3a00
+ 0x3a00: 0x000b, 0x3a01: 0x000b, 0x3a02: 0x000b, 0x3a03: 0x000b, 0x3a04: 0x000b, 0x3a05: 0x000b,
+ 0x3a06: 0x000b, 0x3a07: 0x000b, 0x3a08: 0x000b, 0x3a09: 0x000b, 0x3a0a: 0x000b, 0x3a0b: 0x000b,
+ 0x3a0c: 0x000b, 0x3a0d: 0x000b, 0x3a0e: 0x000b, 0x3a0f: 0x000b, 0x3a10: 0x000b, 0x3a11: 0x000b,
+ 0x3a12: 0x000b, 0x3a13: 0x000b, 0x3a14: 0x000b, 0x3a15: 0x000b, 0x3a16: 0x000b, 0x3a17: 0x000b,
+ 0x3a18: 0x000b, 0x3a19: 0x000b, 0x3a1a: 0x000b, 0x3a1b: 0x000b, 0x3a1c: 0x000b, 0x3a1d: 0x000b,
+ 0x3a1e: 0x000b, 0x3a1f: 0x000b, 0x3a20: 0x000b, 0x3a21: 0x000b, 0x3a22: 0x000b, 0x3a23: 0x000b,
+ 0x3a24: 0x000b, 0x3a25: 0x000b, 0x3a26: 0x000b, 0x3a27: 0x000b, 0x3a28: 0x000b, 0x3a29: 0x000b,
+ 0x3a2a: 0x000b, 0x3a2b: 0x000b, 0x3a2c: 0x000b, 0x3a2d: 0x000b, 0x3a2e: 0x000b, 0x3a2f: 0x000b,
+ 0x3a30: 0x000b, 0x3a31: 0x000b, 0x3a32: 0x000b, 0x3a33: 0x000b, 0x3a34: 0x000b, 0x3a35: 0x000b,
+ 0x3a36: 0x000b, 0x3a37: 0x000b, 0x3a38: 0x000b, 0x3a39: 0x000b, 0x3a3a: 0x000b, 0x3a3b: 0x000b,
+ 0x3a3c: 0x000b, 0x3a3d: 0x000b, 0x3a3e: 0x000b, 0x3a3f: 0x000b,
+ // Block 0xe9, offset 0x3a40
+ 0x3a40: 0x000c, 0x3a41: 0x000c, 0x3a42: 0x000c, 0x3a43: 0x000c, 0x3a44: 0x000c, 0x3a45: 0x000c,
+ 0x3a46: 0x000c, 0x3a47: 0x000c, 0x3a48: 0x000c, 0x3a49: 0x000c, 0x3a4a: 0x000c, 0x3a4b: 0x000c,
+ 0x3a4c: 0x000c, 0x3a4d: 0x000c, 0x3a4e: 0x000c, 0x3a4f: 0x000c, 0x3a50: 0x000c, 0x3a51: 0x000c,
+ 0x3a52: 0x000c, 0x3a53: 0x000c, 0x3a54: 0x000c, 0x3a55: 0x000c, 0x3a56: 0x000c, 0x3a57: 0x000c,
+ 0x3a58: 0x000c, 0x3a59: 0x000c, 0x3a5a: 0x000c, 0x3a5b: 0x000c, 0x3a5c: 0x000c, 0x3a5d: 0x000c,
+ 0x3a5e: 0x000c, 0x3a5f: 0x000c, 0x3a60: 0x000c, 0x3a61: 0x000c, 0x3a62: 0x000c, 0x3a63: 0x000c,
+ 0x3a64: 0x000c, 0x3a65: 0x000c, 0x3a66: 0x000c, 0x3a67: 0x000c, 0x3a68: 0x000c, 0x3a69: 0x000c,
+ 0x3a6a: 0x000c, 0x3a6b: 0x000c, 0x3a6c: 0x000c, 0x3a6d: 0x000c, 0x3a6e: 0x000c, 0x3a6f: 0x000c,
+ 0x3a70: 0x000b, 0x3a71: 0x000b, 0x3a72: 0x000b, 0x3a73: 0x000b, 0x3a74: 0x000b, 0x3a75: 0x000b,
+ 0x3a76: 0x000b, 0x3a77: 0x000b, 0x3a78: 0x000b, 0x3a79: 0x000b, 0x3a7a: 0x000b, 0x3a7b: 0x000b,
+ 0x3a7c: 0x000b, 0x3a7d: 0x000b, 0x3a7e: 0x000b, 0x3a7f: 0x000b,
+}
+
+// bidiIndex: 24 blocks, 1536 entries, 1536 bytes
+// Block 0 is the zero block.
+var bidiIndex = [1536]uint8{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x01, 0xc3: 0x02,
+ 0xca: 0x03, 0xcb: 0x04, 0xcc: 0x05, 0xcd: 0x06, 0xce: 0x07, 0xcf: 0x08,
+ 0xd2: 0x09, 0xd6: 0x0a, 0xd7: 0x0b,
+ 0xd8: 0x0c, 0xd9: 0x0d, 0xda: 0x0e, 0xdb: 0x0f, 0xdc: 0x10, 0xdd: 0x11, 0xde: 0x12, 0xdf: 0x13,
+ 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06,
+ 0xea: 0x07, 0xef: 0x08,
+ 0xf0: 0x11, 0xf1: 0x12, 0xf2: 0x12, 0xf3: 0x14, 0xf4: 0x15,
+ // Block 0x4, offset 0x100
+ 0x120: 0x14, 0x121: 0x15, 0x122: 0x16, 0x123: 0x17, 0x124: 0x18, 0x125: 0x19, 0x126: 0x1a, 0x127: 0x1b,
+ 0x128: 0x1c, 0x129: 0x1d, 0x12a: 0x1c, 0x12b: 0x1e, 0x12c: 0x1f, 0x12d: 0x20, 0x12e: 0x21, 0x12f: 0x22,
+ 0x130: 0x23, 0x131: 0x24, 0x132: 0x1a, 0x133: 0x25, 0x134: 0x26, 0x135: 0x27, 0x137: 0x28,
+ 0x138: 0x29, 0x139: 0x2a, 0x13a: 0x2b, 0x13b: 0x2c, 0x13c: 0x2d, 0x13d: 0x2e, 0x13e: 0x2f, 0x13f: 0x30,
+ // Block 0x5, offset 0x140
+ 0x140: 0x31, 0x141: 0x32, 0x142: 0x33,
+ 0x14d: 0x34, 0x14e: 0x35,
+ 0x150: 0x36,
+ 0x15a: 0x37, 0x15c: 0x38, 0x15d: 0x39, 0x15e: 0x3a, 0x15f: 0x3b,
+ 0x160: 0x3c, 0x162: 0x3d, 0x164: 0x3e, 0x165: 0x3f, 0x167: 0x40,
+ 0x168: 0x41, 0x169: 0x42, 0x16a: 0x43, 0x16c: 0x44, 0x16d: 0x45, 0x16e: 0x46, 0x16f: 0x47,
+ 0x170: 0x48, 0x173: 0x49, 0x177: 0x4a,
+ 0x17e: 0x4b, 0x17f: 0x4c,
+ // Block 0x6, offset 0x180
+ 0x180: 0x4d, 0x181: 0x4e, 0x182: 0x4f, 0x183: 0x50, 0x184: 0x51, 0x185: 0x52, 0x186: 0x53, 0x187: 0x54,
+ 0x188: 0x55, 0x189: 0x54, 0x18a: 0x54, 0x18b: 0x54, 0x18c: 0x56, 0x18d: 0x57, 0x18e: 0x58, 0x18f: 0x54,
+ 0x190: 0x59, 0x191: 0x5a, 0x192: 0x5b, 0x193: 0x5c, 0x194: 0x54, 0x195: 0x54, 0x196: 0x54, 0x197: 0x54,
+ 0x198: 0x54, 0x199: 0x54, 0x19a: 0x5d, 0x19b: 0x54, 0x19c: 0x54, 0x19d: 0x5e, 0x19e: 0x54, 0x19f: 0x5f,
+ 0x1a4: 0x54, 0x1a5: 0x54, 0x1a6: 0x60, 0x1a7: 0x61,
+ 0x1a8: 0x54, 0x1a9: 0x54, 0x1aa: 0x54, 0x1ab: 0x54, 0x1ac: 0x54, 0x1ad: 0x62, 0x1ae: 0x63, 0x1af: 0x64,
+ 0x1b3: 0x65, 0x1b5: 0x66, 0x1b7: 0x67,
+ 0x1b8: 0x68, 0x1b9: 0x69, 0x1ba: 0x6a, 0x1bb: 0x6b, 0x1bc: 0x54, 0x1bd: 0x54, 0x1be: 0x54, 0x1bf: 0x6c,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x6d, 0x1c2: 0x6e, 0x1c3: 0x6f, 0x1c7: 0x70,
+ 0x1c8: 0x71, 0x1c9: 0x72, 0x1ca: 0x73, 0x1cb: 0x74, 0x1cd: 0x75, 0x1cf: 0x76,
+ // Block 0x8, offset 0x200
+ 0x237: 0x54,
+ // Block 0x9, offset 0x240
+ 0x252: 0x77, 0x253: 0x78,
+ 0x258: 0x79, 0x259: 0x7a, 0x25a: 0x7b, 0x25b: 0x7c, 0x25c: 0x7d, 0x25e: 0x7e,
+ 0x260: 0x7f, 0x261: 0x80, 0x263: 0x81, 0x264: 0x82, 0x265: 0x83, 0x266: 0x84, 0x267: 0x85,
+ 0x268: 0x86, 0x269: 0x87, 0x26a: 0x88, 0x26b: 0x89, 0x26f: 0x8a,
+ // Block 0xa, offset 0x280
+ 0x2ac: 0x8b, 0x2ad: 0x8c, 0x2ae: 0x0e, 0x2af: 0x0e,
+ 0x2b0: 0x0e, 0x2b1: 0x0e, 0x2b2: 0x0e, 0x2b3: 0x0e, 0x2b4: 0x8d, 0x2b5: 0x0e, 0x2b6: 0x0e, 0x2b7: 0x8e,
+ 0x2b8: 0x8f, 0x2b9: 0x90, 0x2ba: 0x0e, 0x2bb: 0x91, 0x2bc: 0x92, 0x2bd: 0x93, 0x2bf: 0x94,
+ // Block 0xb, offset 0x2c0
+ 0x2c4: 0x95, 0x2c5: 0x54, 0x2c6: 0x96, 0x2c7: 0x97,
+ 0x2cb: 0x98, 0x2cd: 0x99,
+ 0x2e0: 0x9a, 0x2e1: 0x9a, 0x2e2: 0x9a, 0x2e3: 0x9a, 0x2e4: 0x9b, 0x2e5: 0x9a, 0x2e6: 0x9a, 0x2e7: 0x9a,
+ 0x2e8: 0x9c, 0x2e9: 0x9a, 0x2ea: 0x9a, 0x2eb: 0x9d, 0x2ec: 0x9e, 0x2ed: 0x9a, 0x2ee: 0x9a, 0x2ef: 0x9a,
+ 0x2f0: 0x9a, 0x2f1: 0x9a, 0x2f2: 0x9a, 0x2f3: 0x9a, 0x2f4: 0x9f, 0x2f5: 0x9a, 0x2f6: 0x9a, 0x2f7: 0x9a,
+ 0x2f8: 0x9a, 0x2f9: 0xa0, 0x2fa: 0x9a, 0x2fb: 0x9a, 0x2fc: 0xa1, 0x2fd: 0xa2, 0x2fe: 0x9a, 0x2ff: 0x9a,
+ // Block 0xc, offset 0x300
+ 0x300: 0xa3, 0x301: 0xa4, 0x302: 0xa5, 0x304: 0xa6, 0x305: 0xa7, 0x306: 0xa8, 0x307: 0xa9,
+ 0x308: 0xaa, 0x30b: 0xab, 0x30c: 0x26, 0x30d: 0xac,
+ 0x310: 0xad, 0x311: 0xae, 0x312: 0xaf, 0x313: 0xb0, 0x316: 0xb1, 0x317: 0xb2,
+ 0x318: 0xb3, 0x319: 0xb4, 0x31a: 0xb5, 0x31c: 0xb6,
+ 0x320: 0xb7,
+ 0x328: 0xb8, 0x329: 0xb9, 0x32a: 0xba,
+ 0x330: 0xbb, 0x332: 0xbc, 0x334: 0xbd, 0x335: 0xbe, 0x336: 0xbf,
+ 0x33b: 0xc0,
+ // Block 0xd, offset 0x340
+ 0x36b: 0xc1, 0x36c: 0xc2,
+ 0x37e: 0xc3,
+ // Block 0xe, offset 0x380
+ 0x3b2: 0xc4,
+ // Block 0xf, offset 0x3c0
+ 0x3c5: 0xc5, 0x3c6: 0xc6,
+ 0x3c8: 0x54, 0x3c9: 0xc7, 0x3cc: 0x54, 0x3cd: 0xc8,
+ 0x3db: 0xc9, 0x3dc: 0xca, 0x3dd: 0xcb, 0x3de: 0xcc, 0x3df: 0xcd,
+ 0x3e8: 0xce, 0x3e9: 0xcf, 0x3ea: 0xd0,
+ // Block 0x10, offset 0x400
+ 0x400: 0xd1,
+ 0x420: 0x9a, 0x421: 0x9a, 0x422: 0x9a, 0x423: 0xd2, 0x424: 0x9a, 0x425: 0xd3, 0x426: 0x9a, 0x427: 0x9a,
+ 0x428: 0x9a, 0x429: 0x9a, 0x42a: 0x9a, 0x42b: 0x9a, 0x42c: 0x9a, 0x42d: 0x9a, 0x42e: 0x9a, 0x42f: 0x9a,
+ 0x430: 0x9a, 0x431: 0xa1, 0x432: 0x0e, 0x433: 0x9a, 0x434: 0x9a, 0x435: 0x9a, 0x436: 0x9a, 0x437: 0x9a,
+ 0x438: 0x0e, 0x439: 0x0e, 0x43a: 0x0e, 0x43b: 0xd4, 0x43c: 0x9a, 0x43d: 0x9a, 0x43e: 0x9a, 0x43f: 0x9a,
+ // Block 0x11, offset 0x440
+ 0x440: 0xd5, 0x441: 0x54, 0x442: 0xd6, 0x443: 0xd7, 0x444: 0xd8, 0x445: 0xd9,
+ 0x449: 0xda, 0x44c: 0x54, 0x44d: 0x54, 0x44e: 0x54, 0x44f: 0x54,
+ 0x450: 0x54, 0x451: 0x54, 0x452: 0x54, 0x453: 0x54, 0x454: 0x54, 0x455: 0x54, 0x456: 0x54, 0x457: 0x54,
+ 0x458: 0x54, 0x459: 0x54, 0x45a: 0x54, 0x45b: 0xdb, 0x45c: 0x54, 0x45d: 0x6b, 0x45e: 0x54, 0x45f: 0xdc,
+ 0x460: 0xdd, 0x461: 0xde, 0x462: 0xdf, 0x464: 0xe0, 0x465: 0xe1, 0x466: 0xe2, 0x467: 0xe3,
+ 0x469: 0xe4,
+ 0x47f: 0xe5,
+ // Block 0x12, offset 0x480
+ 0x4bf: 0xe5,
+ // Block 0x13, offset 0x4c0
+ 0x4d0: 0x09, 0x4d1: 0x0a, 0x4d6: 0x0b,
+ 0x4db: 0x0c, 0x4dd: 0x0d, 0x4de: 0x0e, 0x4df: 0x0f,
+ 0x4ef: 0x10,
+ 0x4ff: 0x10,
+ // Block 0x14, offset 0x500
+ 0x50f: 0x10,
+ 0x51f: 0x10,
+ 0x52f: 0x10,
+ 0x53f: 0x10,
+ // Block 0x15, offset 0x540
+ 0x540: 0xe6, 0x541: 0xe6, 0x542: 0xe6, 0x543: 0xe6, 0x544: 0x05, 0x545: 0x05, 0x546: 0x05, 0x547: 0xe7,
+ 0x548: 0xe6, 0x549: 0xe6, 0x54a: 0xe6, 0x54b: 0xe6, 0x54c: 0xe6, 0x54d: 0xe6, 0x54e: 0xe6, 0x54f: 0xe6,
+ 0x550: 0xe6, 0x551: 0xe6, 0x552: 0xe6, 0x553: 0xe6, 0x554: 0xe6, 0x555: 0xe6, 0x556: 0xe6, 0x557: 0xe6,
+ 0x558: 0xe6, 0x559: 0xe6, 0x55a: 0xe6, 0x55b: 0xe6, 0x55c: 0xe6, 0x55d: 0xe6, 0x55e: 0xe6, 0x55f: 0xe6,
+ 0x560: 0xe6, 0x561: 0xe6, 0x562: 0xe6, 0x563: 0xe6, 0x564: 0xe6, 0x565: 0xe6, 0x566: 0xe6, 0x567: 0xe6,
+ 0x568: 0xe6, 0x569: 0xe6, 0x56a: 0xe6, 0x56b: 0xe6, 0x56c: 0xe6, 0x56d: 0xe6, 0x56e: 0xe6, 0x56f: 0xe6,
+ 0x570: 0xe6, 0x571: 0xe6, 0x572: 0xe6, 0x573: 0xe6, 0x574: 0xe6, 0x575: 0xe6, 0x576: 0xe6, 0x577: 0xe6,
+ 0x578: 0xe6, 0x579: 0xe6, 0x57a: 0xe6, 0x57b: 0xe6, 0x57c: 0xe6, 0x57d: 0xe6, 0x57e: 0xe6, 0x57f: 0xe6,
+ // Block 0x16, offset 0x580
+ 0x58f: 0x10,
+ 0x59f: 0x10,
+ 0x5a0: 0x13,
+ 0x5af: 0x10,
+ 0x5bf: 0x10,
+ // Block 0x17, offset 0x5c0
+ 0x5cf: 0x10,
+}
+
+// Total table size 16568 bytes (16KiB); checksum: F50EF68C
diff --git a/vendor/golang.org/x/text/unicode/norm/maketables.go b/vendor/golang.org/x/text/unicode/norm/maketables.go
new file mode 100644
index 000000000..30a3aa933
--- /dev/null
+++ b/vendor/golang.org/x/text/unicode/norm/maketables.go
@@ -0,0 +1,986 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// Normalization table generator.
+// Data read from the web.
+// See forminfo.go for a description of the trie values associated with each rune.
+
+package main
+
+import (
+ "bytes"
+ "encoding/binary"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "sort"
+ "strconv"
+ "strings"
+
+ "golang.org/x/text/internal/gen"
+ "golang.org/x/text/internal/triegen"
+ "golang.org/x/text/internal/ucd"
+)
+
+func main() {
+ gen.Init()
+ loadUnicodeData()
+ compactCCC()
+ loadCompositionExclusions()
+ completeCharFields(FCanonical)
+ completeCharFields(FCompatibility)
+ computeNonStarterCounts()
+ verifyComputed()
+ printChars()
+ testDerived()
+ printTestdata()
+ makeTables()
+}
+
+var (
+ tablelist = flag.String("tables",
+ "all",
+ "comma-separated list of which tables to generate; "+
+ "can be 'decomp', 'recomp', 'info' and 'all'")
+ test = flag.Bool("test",
+ false,
+ "test existing tables against DerivedNormalizationProps and generate test data for regression testing")
+ verbose = flag.Bool("verbose",
+ false,
+ "write data to stdout as it is parsed")
+)
+
+const MaxChar = 0x10FFFF // anything above this shouldn't exist
+
+// Quick Check properties of runes allow us to quickly
+// determine whether a rune may occur in a normal form.
+// For a given normal form, a rune may be guaranteed to occur
+// verbatim (QC=Yes), may or may not combine with another
+// rune (QC=Maybe), or may not occur (QC=No).
+type QCResult int
+
+const (
+ QCUnknown QCResult = iota
+ QCYes
+ QCNo
+ QCMaybe
+)
+
+func (r QCResult) String() string {
+ switch r {
+ case QCYes:
+ return "Yes"
+ case QCNo:
+ return "No"
+ case QCMaybe:
+ return "Maybe"
+ }
+ return "***UNKNOWN***"
+}
+
+const (
+ FCanonical = iota // NFC or NFD
+ FCompatibility // NFKC or NFKD
+ FNumberOfFormTypes
+)
+
+const (
+ MComposed = iota // NFC or NFKC
+ MDecomposed // NFD or NFKD
+ MNumberOfModes
+)
+
+// This contains only the properties we're interested in.
+type Char struct {
+ name string
+ codePoint rune // if zero, this index is not a valid code point.
+ ccc uint8 // canonical combining class
+ origCCC uint8
+ excludeInComp bool // from CompositionExclusions.txt
+ compatDecomp bool // it has a compatibility expansion
+
+ nTrailingNonStarters uint8
+ nLeadingNonStarters uint8 // must be equal to trailing if non-zero
+
+ forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility
+
+ state State
+}
+
+var chars = make([]Char, MaxChar+1)
+var cccMap = make(map[uint8]uint8)
+
+func (c Char) String() string {
+ buf := new(bytes.Buffer)
+
+ fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name)
+ fmt.Fprintf(buf, " ccc: %v\n", c.ccc)
+ fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp)
+ fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp)
+ fmt.Fprintf(buf, " state: %v\n", c.state)
+ fmt.Fprintf(buf, " NFC:\n")
+ fmt.Fprint(buf, c.forms[FCanonical])
+ fmt.Fprintf(buf, " NFKC:\n")
+ fmt.Fprint(buf, c.forms[FCompatibility])
+
+ return buf.String()
+}
+
+// In UnicodeData.txt, some ranges are marked like this:
+// 3400;<CJK Ideograph Extension A, First>;Lo;0;L;;;;;N;;;;;
+// 4DB5;<CJK Ideograph Extension A, Last>;Lo;0;L;;;;;N;;;;;
+// parseCharacter keeps a state variable indicating the weirdness.
+type State int
+
+const (
+ SNormal State = iota // known to be zero for the type
+ SFirst
+ SLast
+ SMissing
+)
+
+var lastChar = rune('\u0000')
+
+func (c Char) isValid() bool {
+ return c.codePoint != 0 && c.state != SMissing
+}
+
+type FormInfo struct {
+ quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed
+ verified [MNumberOfModes]bool // index: MComposed or MDecomposed
+
+ combinesForward bool // May combine with rune on the right
+ combinesBackward bool // May combine with rune on the left
+ isOneWay bool // Never appears in result
+ inDecomp bool // Some decompositions result in this char.
+ decomp Decomposition
+ expandedDecomp Decomposition
+}
+
+func (f FormInfo) String() string {
+ buf := bytes.NewBuffer(make([]byte, 0))
+
+ fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed])
+ fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed])
+ fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward)
+ fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward)
+ fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay)
+ fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp)
+ fmt.Fprintf(buf, " decomposition: %X\n", f.decomp)
+ fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp)
+
+ return buf.String()
+}
+
+type Decomposition []rune
+
+func parseDecomposition(s string, skipfirst bool) (a []rune, err error) {
+ decomp := strings.Split(s, " ")
+ if len(decomp) > 0 && skipfirst {
+ decomp = decomp[1:]
+ }
+ for _, d := range decomp {
+ point, err := strconv.ParseUint(d, 16, 64)
+ if err != nil {
+ return a, err
+ }
+ a = append(a, rune(point))
+ }
+ return a, nil
+}
+
+func loadUnicodeData() {
+ f := gen.OpenUCDFile("UnicodeData.txt")
+ defer f.Close()
+ p := ucd.New(f)
+ for p.Next() {
+ r := p.Rune(ucd.CodePoint)
+ char := &chars[r]
+
+ char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass))
+ decmap := p.String(ucd.DecompMapping)
+
+ exp, err := parseDecomposition(decmap, false)
+ isCompat := false
+ if err != nil {
+ if len(decmap) > 0 {
+ exp, err = parseDecomposition(decmap, true)
+ if err != nil {
+ log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err)
+ }
+ isCompat = true
+ }
+ }
+
+ char.name = p.String(ucd.Name)
+ char.codePoint = r
+ char.forms[FCompatibility].decomp = exp
+ if !isCompat {
+ char.forms[FCanonical].decomp = exp
+ } else {
+ char.compatDecomp = true
+ }
+ if len(decmap) > 0 {
+ char.forms[FCompatibility].decomp = exp
+ }
+ }
+ if err := p.Err(); err != nil {
+ log.Fatal(err)
+ }
+}
+
+// compactCCC converts the sparse set of CCC values to a continguous one,
+// reducing the number of bits needed from 8 to 6.
+func compactCCC() {
+ m := make(map[uint8]uint8)
+ for i := range chars {
+ c := &chars[i]
+ m[c.ccc] = 0
+ }
+ cccs := []int{}
+ for v, _ := range m {
+ cccs = append(cccs, int(v))
+ }
+ sort.Ints(cccs)
+ for i, c := range cccs {
+ cccMap[uint8(i)] = uint8(c)
+ m[uint8(c)] = uint8(i)
+ }
+ for i := range chars {
+ c := &chars[i]
+ c.origCCC = c.ccc
+ c.ccc = m[c.ccc]
+ }
+ if len(m) >= 1<<6 {
+ log.Fatalf("too many difference CCC values: %d >= 64", len(m))
+ }
+}
+
+// CompositionExclusions.txt has form:
+// 0958 # ...
+// See https://unicode.org/reports/tr44/ for full explanation
+func loadCompositionExclusions() {
+ f := gen.OpenUCDFile("CompositionExclusions.txt")
+ defer f.Close()
+ p := ucd.New(f)
+ for p.Next() {
+ c := &chars[p.Rune(0)]
+ if c.excludeInComp {
+ log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint)
+ }
+ c.excludeInComp = true
+ }
+ if e := p.Err(); e != nil {
+ log.Fatal(e)
+ }
+}
+
+// hasCompatDecomp returns true if any of the recursive
+// decompositions contains a compatibility expansion.
+// In this case, the character may not occur in NFK*.
+func hasCompatDecomp(r rune) bool {
+ c := &chars[r]
+ if c.compatDecomp {
+ return true
+ }
+ for _, d := range c.forms[FCompatibility].decomp {
+ if hasCompatDecomp(d) {
+ return true
+ }
+ }
+ return false
+}
+
+// Hangul related constants.
+const (
+ HangulBase = 0xAC00
+ HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28)
+
+ JamoLBase = 0x1100
+ JamoLEnd = 0x1113
+ JamoVBase = 0x1161
+ JamoVEnd = 0x1176
+ JamoTBase = 0x11A8
+ JamoTEnd = 0x11C3
+
+ JamoLVTCount = 19 * 21 * 28
+ JamoTCount = 28
+)
+
+func isHangul(r rune) bool {
+ return HangulBase <= r && r < HangulEnd
+}
+
+func isHangulWithoutJamoT(r rune) bool {
+ if !isHangul(r) {
+ return false
+ }
+ r -= HangulBase
+ return r < JamoLVTCount && r%JamoTCount == 0
+}
+
+func ccc(r rune) uint8 {
+ return chars[r].ccc
+}
+
+// Insert a rune in a buffer, ordered by Canonical Combining Class.
+func insertOrdered(b Decomposition, r rune) Decomposition {
+ n := len(b)
+ b = append(b, 0)
+ cc := ccc(r)
+ if cc > 0 {
+ // Use bubble sort.
+ for ; n > 0; n-- {
+ if ccc(b[n-1]) <= cc {
+ break
+ }
+ b[n] = b[n-1]
+ }
+ }
+ b[n] = r
+ return b
+}
+
+// Recursively decompose.
+func decomposeRecursive(form int, r rune, d Decomposition) Decomposition {
+ dcomp := chars[r].forms[form].decomp
+ if len(dcomp) == 0 {
+ return insertOrdered(d, r)
+ }
+ for _, c := range dcomp {
+ d = decomposeRecursive(form, c, d)
+ }
+ return d
+}
+
+func completeCharFields(form int) {
+ // Phase 0: pre-expand decomposition.
+ for i := range chars {
+ f := &chars[i].forms[form]
+ if len(f.decomp) == 0 {
+ continue
+ }
+ exp := make(Decomposition, 0)
+ for _, c := range f.decomp {
+ exp = decomposeRecursive(form, c, exp)
+ }
+ f.expandedDecomp = exp
+ }
+
+ // Phase 1: composition exclusion, mark decomposition.
+ for i := range chars {
+ c := &chars[i]
+ f := &c.forms[form]
+
+ // Marks script-specific exclusions and version restricted.
+ f.isOneWay = c.excludeInComp
+
+ // Singletons
+ f.isOneWay = f.isOneWay || len(f.decomp) == 1
+
+ // Non-starter decompositions
+ if len(f.decomp) > 1 {
+ chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0
+ f.isOneWay = f.isOneWay || chk
+ }
+
+ // Runes that decompose into more than two runes.
+ f.isOneWay = f.isOneWay || len(f.decomp) > 2
+
+ if form == FCompatibility {
+ f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint)
+ }
+
+ for _, r := range f.decomp {
+ chars[r].forms[form].inDecomp = true
+ }
+ }
+
+ // Phase 2: forward and backward combining.
+ for i := range chars {
+ c := &chars[i]
+ f := &c.forms[form]
+
+ if !f.isOneWay && len(f.decomp) == 2 {
+ f0 := &chars[f.decomp[0]].forms[form]
+ f1 := &chars[f.decomp[1]].forms[form]
+ if !f0.isOneWay {
+ f0.combinesForward = true
+ }
+ if !f1.isOneWay {
+ f1.combinesBackward = true
+ }
+ }
+ if isHangulWithoutJamoT(rune(i)) {
+ f.combinesForward = true
+ }
+ }
+
+ // Phase 3: quick check values.
+ for i := range chars {
+ c := &chars[i]
+ f := &c.forms[form]
+
+ switch {
+ case len(f.decomp) > 0:
+ f.quickCheck[MDecomposed] = QCNo
+ case isHangul(rune(i)):
+ f.quickCheck[MDecomposed] = QCNo
+ default:
+ f.quickCheck[MDecomposed] = QCYes
+ }
+ switch {
+ case f.isOneWay:
+ f.quickCheck[MComposed] = QCNo
+ case (i & 0xffff00) == JamoLBase:
+ f.quickCheck[MComposed] = QCYes
+ if JamoLBase <= i && i < JamoLEnd {
+ f.combinesForward = true
+ }
+ if JamoVBase <= i && i < JamoVEnd {
+ f.quickCheck[MComposed] = QCMaybe
+ f.combinesBackward = true
+ f.combinesForward = true
+ }
+ if JamoTBase <= i && i < JamoTEnd {
+ f.quickCheck[MComposed] = QCMaybe
+ f.combinesBackward = true
+ }
+ case !f.combinesBackward:
+ f.quickCheck[MComposed] = QCYes
+ default:
+ f.quickCheck[MComposed] = QCMaybe
+ }
+ }
+}
+
+func computeNonStarterCounts() {
+ // Phase 4: leading and trailing non-starter count
+ for i := range chars {
+ c := &chars[i]
+
+ runes := []rune{rune(i)}
+ // We always use FCompatibility so that the CGJ insertion points do not
+ // change for repeated normalizations with different forms.
+ if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 {
+ runes = exp
+ }
+ // We consider runes that combine backwards to be non-starters for the
+ // purpose of Stream-Safe Text Processing.
+ for _, r := range runes {
+ if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
+ break
+ }
+ c.nLeadingNonStarters++
+ }
+ for i := len(runes) - 1; i >= 0; i-- {
+ if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
+ break
+ }
+ c.nTrailingNonStarters++
+ }
+ if c.nTrailingNonStarters > 3 {
+ log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes)
+ }
+
+ if isHangul(rune(i)) {
+ c.nTrailingNonStarters = 2
+ if isHangulWithoutJamoT(rune(i)) {
+ c.nTrailingNonStarters = 1
+ }
+ }
+
+ if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t {
+ log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t)
+ }
+ if t := c.nTrailingNonStarters; t > 3 {
+ log.Fatalf("%U: number of trailing non-starters is %d > 3", t)
+ }
+ }
+}
+
+func printBytes(w io.Writer, b []byte, name string) {
+ fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b))
+ fmt.Fprintf(w, "var %s = [...]byte {", name)
+ for i, c := range b {
+ switch {
+ case i%64 == 0:
+ fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63)
+ case i%8 == 0:
+ fmt.Fprintf(w, "\n")
+ }
+ fmt.Fprintf(w, "0x%.2X, ", c)
+ }
+ fmt.Fprint(w, "\n}\n\n")
+}
+
+// See forminfo.go for format.
+func makeEntry(f *FormInfo, c *Char) uint16 {
+ e := uint16(0)
+ if r := c.codePoint; HangulBase <= r && r < HangulEnd {
+ e |= 0x40
+ }
+ if f.combinesForward {
+ e |= 0x20
+ }
+ if f.quickCheck[MDecomposed] == QCNo {
+ e |= 0x4
+ }
+ switch f.quickCheck[MComposed] {
+ case QCYes:
+ case QCNo:
+ e |= 0x10
+ case QCMaybe:
+ e |= 0x18
+ default:
+ log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed])
+ }
+ e |= uint16(c.nTrailingNonStarters)
+ return e
+}
+
+// decompSet keeps track of unique decompositions, grouped by whether
+// the decomposition is followed by a trailing and/or leading CCC.
+type decompSet [7]map[string]bool
+
+const (
+ normalDecomp = iota
+ firstMulti
+ firstCCC
+ endMulti
+ firstLeadingCCC
+ firstCCCZeroExcept
+ firstStarterWithNLead
+ lastDecomp
+)
+
+var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"}
+
+func makeDecompSet() decompSet {
+ m := decompSet{}
+ for i := range m {
+ m[i] = make(map[string]bool)
+ }
+ return m
+}
+func (m *decompSet) insert(key int, s string) {
+ m[key][s] = true
+}
+
+func printCharInfoTables(w io.Writer) int {
+ mkstr := func(r rune, f *FormInfo) (int, string) {
+ d := f.expandedDecomp
+ s := string([]rune(d))
+ if max := 1 << 6; len(s) >= max {
+ const msg = "%U: too many bytes in decomposition: %d >= %d"
+ log.Fatalf(msg, r, len(s), max)
+ }
+ head := uint8(len(s))
+ if f.quickCheck[MComposed] != QCYes {
+ head |= 0x40
+ }
+ if f.combinesForward {
+ head |= 0x80
+ }
+ s = string([]byte{head}) + s
+
+ lccc := ccc(d[0])
+ tccc := ccc(d[len(d)-1])
+ cc := ccc(r)
+ if cc != 0 && lccc == 0 && tccc == 0 {
+ log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc)
+ }
+ if tccc < lccc && lccc != 0 {
+ const msg = "%U: lccc (%d) must be <= tcc (%d)"
+ log.Fatalf(msg, r, lccc, tccc)
+ }
+ index := normalDecomp
+ nTrail := chars[r].nTrailingNonStarters
+ nLead := chars[r].nLeadingNonStarters
+ if tccc > 0 || lccc > 0 || nTrail > 0 {
+ tccc <<= 2
+ tccc |= nTrail
+ s += string([]byte{tccc})
+ index = endMulti
+ for _, r := range d[1:] {
+ if ccc(r) == 0 {
+ index = firstCCC
+ }
+ }
+ if lccc > 0 || nLead > 0 {
+ s += string([]byte{lccc})
+ if index == firstCCC {
+ log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)
+ }
+ index = firstLeadingCCC
+ }
+ if cc != lccc {
+ if cc != 0 {
+ log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc)
+ }
+ index = firstCCCZeroExcept
+ }
+ } else if len(d) > 1 {
+ index = firstMulti
+ }
+ return index, s
+ }
+
+ decompSet := makeDecompSet()
+ const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail.
+ decompSet.insert(firstStarterWithNLead, nLeadStr)
+
+ // Store the uniqued decompositions in a byte buffer,
+ // preceded by their byte length.
+ for _, c := range chars {
+ for _, f := range c.forms {
+ if len(f.expandedDecomp) == 0 {
+ continue
+ }
+ if f.combinesBackward {
+ log.Fatalf("%U: combinesBackward and decompose", c.codePoint)
+ }
+ index, s := mkstr(c.codePoint, &f)
+ decompSet.insert(index, s)
+ }
+ }
+
+ decompositions := bytes.NewBuffer(make([]byte, 0, 10000))
+ size := 0
+ positionMap := make(map[string]uint16)
+ decompositions.WriteString("\000")
+ fmt.Fprintln(w, "const (")
+ for i, m := range decompSet {
+ sa := []string{}
+ for s := range m {
+ sa = append(sa, s)
+ }
+ sort.Strings(sa)
+ for _, s := range sa {
+ p := decompositions.Len()
+ decompositions.WriteString(s)
+ positionMap[s] = uint16(p)
+ }
+ if cname[i] != "" {
+ fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len())
+ }
+ }
+ fmt.Fprintln(w, "maxDecomp = 0x8000")
+ fmt.Fprintln(w, ")")
+ b := decompositions.Bytes()
+ printBytes(w, b, "decomps")
+ size += len(b)
+
+ varnames := []string{"nfc", "nfkc"}
+ for i := 0; i < FNumberOfFormTypes; i++ {
+ trie := triegen.NewTrie(varnames[i])
+
+ for r, c := range chars {
+ f := c.forms[i]
+ d := f.expandedDecomp
+ if len(d) != 0 {
+ _, key := mkstr(c.codePoint, &f)
+ trie.Insert(rune(r), uint64(positionMap[key]))
+ if c.ccc != ccc(d[0]) {
+ // We assume the lead ccc of a decomposition !=0 in this case.
+ if ccc(d[0]) == 0 {
+ log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc)
+ }
+ }
+ } else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward {
+ // Handle cases where it can't be detected that the nLead should be equal
+ // to nTrail.
+ trie.Insert(c.codePoint, uint64(positionMap[nLeadStr]))
+ } else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 {
+ trie.Insert(c.codePoint, uint64(0x8000|v))
+ }
+ }
+ sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]}))
+ if err != nil {
+ log.Fatal(err)
+ }
+ size += sz
+ }
+ return size
+}
+
+func contains(sa []string, s string) bool {
+ for _, a := range sa {
+ if a == s {
+ return true
+ }
+ }
+ return false
+}
+
+func makeTables() {
+ w := &bytes.Buffer{}
+
+ size := 0
+ if *tablelist == "" {
+ return
+ }
+ list := strings.Split(*tablelist, ",")
+ if *tablelist == "all" {
+ list = []string{"recomp", "info"}
+ }
+
+ // Compute maximum decomposition size.
+ max := 0
+ for _, c := range chars {
+ if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max {
+ max = n
+ }
+ }
+ fmt.Fprintln(w, `import "sync"`)
+ fmt.Fprintln(w)
+
+ fmt.Fprintln(w, "const (")
+ fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.")
+ fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion())
+ fmt.Fprintln(w)
+ fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform")
+ fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at")
+ fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that")
+ fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.")
+ fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max)
+ fmt.Fprintln(w, ")\n")
+
+ // Print the CCC remap table.
+ size += len(cccMap)
+ fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap))
+ for i := 0; i < len(cccMap); i++ {
+ if i%8 == 0 {
+ fmt.Fprintln(w)
+ }
+ fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)])
+ }
+ fmt.Fprintln(w, "\n}\n")
+
+ if contains(list, "info") {
+ size += printCharInfoTables(w)
+ }
+
+ if contains(list, "recomp") {
+ // Note that we use 32 bit keys, instead of 64 bit.
+ // This clips the bits of three entries, but we know
+ // this won't cause a collision. The compiler will catch
+ // any changes made to UnicodeData.txt that introduces
+ // a collision.
+ // Note that the recomposition map for NFC and NFKC
+ // are identical.
+
+ // Recomposition map
+ nrentries := 0
+ for _, c := range chars {
+ f := c.forms[FCanonical]
+ if !f.isOneWay && len(f.decomp) > 0 {
+ nrentries++
+ }
+ }
+ sz := nrentries * 8
+ size += sz
+ fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz)
+ fmt.Fprintln(w, "var recompMap map[uint32]rune")
+ fmt.Fprintln(w, "var recompMapOnce sync.Once\n")
+ fmt.Fprintln(w, `const recompMapPacked = "" +`)
+ var buf [8]byte
+ for i, c := range chars {
+ f := c.forms[FCanonical]
+ d := f.decomp
+ if !f.isOneWay && len(d) > 0 {
+ key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1]))
+ binary.BigEndian.PutUint32(buf[:4], key)
+ binary.BigEndian.PutUint32(buf[4:], uint32(i))
+ fmt.Fprintf(w, "\t\t%q + // 0x%.8X: 0x%.8X\n", string(buf[:]), key, uint32(i))
+ }
+ }
+ // hack so we don't have to special case the trailing plus sign
+ fmt.Fprintf(w, ` ""`)
+ fmt.Fprintln(w)
+ }
+
+ fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size)
+ gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes())
+}
+
+func printChars() {
+ if *verbose {
+ for _, c := range chars {
+ if !c.isValid() || c.state == SMissing {
+ continue
+ }
+ fmt.Println(c)
+ }
+ }
+}
+
+// verifyComputed does various consistency tests.
+func verifyComputed() {
+ for i, c := range chars {
+ for _, f := range c.forms {
+ isNo := (f.quickCheck[MDecomposed] == QCNo)
+ if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) {
+ log.Fatalf("%U: NF*D QC must be No if rune decomposes", i)
+ }
+
+ isMaybe := f.quickCheck[MComposed] == QCMaybe
+ if f.combinesBackward != isMaybe {
+ log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i)
+ }
+ if len(f.decomp) > 0 && f.combinesForward && isMaybe {
+ log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i)
+ }
+
+ if len(f.expandedDecomp) != 0 {
+ continue
+ }
+ if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b {
+ // We accept these runes to be treated differently (it only affects
+ // segment breaking in iteration, most likely on improper use), but
+ // reconsider if more characters are added.
+ // U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L;<narrow> 3099;;;;N;;;;;
+ // U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L;<narrow> 309A;;;;N;;;;;
+ // U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<compat> 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;;
+ // U+318E HANGUL LETTER ARAEAE;Lo;0;L;<compat> 11A1;;;;N;HANGUL LETTER ALAE AE;;;;
+ // U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<narrow> 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;;
+ // U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L;<narrow> 3163;;;;N;;;;;
+ if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) {
+ log.Fatalf("%U: nLead was %v; want %v", i, a, b)
+ }
+ }
+ }
+ nfc := c.forms[FCanonical]
+ nfkc := c.forms[FCompatibility]
+ if nfc.combinesBackward != nfkc.combinesBackward {
+ log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint)
+ }
+ }
+}
+
+// Use values in DerivedNormalizationProps.txt to compare against the
+// values we computed.
+// DerivedNormalizationProps.txt has form:
+// 00C0..00C5 ; NFD_QC; N # ...
+// 0374 ; NFD_QC; N # ...
+// See https://unicode.org/reports/tr44/ for full explanation
+func testDerived() {
+ f := gen.OpenUCDFile("DerivedNormalizationProps.txt")
+ defer f.Close()
+ p := ucd.New(f)
+ for p.Next() {
+ r := p.Rune(0)
+ c := &chars[r]
+
+ var ftype, mode int
+ qt := p.String(1)
+ switch qt {
+ case "NFC_QC":
+ ftype, mode = FCanonical, MComposed
+ case "NFD_QC":
+ ftype, mode = FCanonical, MDecomposed
+ case "NFKC_QC":
+ ftype, mode = FCompatibility, MComposed
+ case "NFKD_QC":
+ ftype, mode = FCompatibility, MDecomposed
+ default:
+ continue
+ }
+ var qr QCResult
+ switch p.String(2) {
+ case "Y":
+ qr = QCYes
+ case "N":
+ qr = QCNo
+ case "M":
+ qr = QCMaybe
+ default:
+ log.Fatalf(`Unexpected quick check value "%s"`, p.String(2))
+ }
+ if got := c.forms[ftype].quickCheck[mode]; got != qr {
+ log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr)
+ }
+ c.forms[ftype].verified[mode] = true
+ }
+ if err := p.Err(); err != nil {
+ log.Fatal(err)
+ }
+ // Any unspecified value must be QCYes. Verify this.
+ for i, c := range chars {
+ for j, fd := range c.forms {
+ for k, qr := range fd.quickCheck {
+ if !fd.verified[k] && qr != QCYes {
+ m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n"
+ log.Printf(m, i, j, k, qr, c.name)
+ }
+ }
+ }
+ }
+}
+
+var testHeader = `const (
+ Yes = iota
+ No
+ Maybe
+)
+
+type formData struct {
+ qc uint8
+ combinesForward bool
+ decomposition string
+}
+
+type runeData struct {
+ r rune
+ ccc uint8
+ nLead uint8
+ nTrail uint8
+ f [2]formData // 0: canonical; 1: compatibility
+}
+
+func f(qc uint8, cf bool, dec string) [2]formData {
+ return [2]formData{{qc, cf, dec}, {qc, cf, dec}}
+}
+
+func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData {
+ return [2]formData{{qc, cf, d}, {qck, cfk, dk}}
+}
+
+var testData = []runeData{
+`
+
+func printTestdata() {
+ type lastInfo struct {
+ ccc uint8
+ nLead uint8
+ nTrail uint8
+ f string
+ }
+
+ last := lastInfo{}
+ w := &bytes.Buffer{}
+ fmt.Fprintf(w, testHeader)
+ for r, c := range chars {
+ f := c.forms[FCanonical]
+ qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
+ f = c.forms[FCompatibility]
+ qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
+ s := ""
+ if d == dk && qc == qck && cf == cfk {
+ s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d)
+ } else {
+ s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk)
+ }
+ current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s}
+ if last != current {
+ fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s)
+ last = current
+ }
+ }
+ fmt.Fprintln(w, "}")
+ gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes())
+}
diff --git a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go
index c48a97b0c..26fbd55a1 100644
--- a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go
+++ b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go
@@ -1,6 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
-// +build go1.10
+// +build go1.10,!go1.13
package norm
diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go
new file mode 100644
index 000000000..7297cce32
--- /dev/null
+++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go
@@ -0,0 +1,7693 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// +build go1.13
+
+package norm
+
+import "sync"
+
+const (
+ // Version is the Unicode edition from which the tables are derived.
+ Version = "11.0.0"
+
+ // MaxTransformChunkSize indicates the maximum number of bytes that Transform
+ // may need to write atomically for any Form. Making a destination buffer at
+ // least this size ensures that Transform can always make progress and that
+ // the user does not need to grow the buffer on an ErrShortDst.
+ MaxTransformChunkSize = 35 + maxNonStarters*4
+)
+
+var ccc = [55]uint8{
+ 0, 1, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36,
+ 84, 91, 103, 107, 118, 122, 129, 130,
+ 132, 202, 214, 216, 218, 220, 222, 224,
+ 226, 228, 230, 232, 233, 234, 240,
+}
+
+const (
+ firstMulti = 0x186D
+ firstCCC = 0x2C9E
+ endMulti = 0x2F60
+ firstLeadingCCC = 0x49AE
+ firstCCCZeroExcept = 0x4A78
+ firstStarterWithNLead = 0x4A9F
+ lastDecomp = 0x4AA1
+ maxDecomp = 0x8000
+)
+
+// decomps: 19105 bytes
+var decomps = [...]byte{
+ // Bytes 0 - 3f
+ 0x00, 0x41, 0x20, 0x41, 0x21, 0x41, 0x22, 0x41,
+ 0x23, 0x41, 0x24, 0x41, 0x25, 0x41, 0x26, 0x41,
+ 0x27, 0x41, 0x28, 0x41, 0x29, 0x41, 0x2A, 0x41,
+ 0x2B, 0x41, 0x2C, 0x41, 0x2D, 0x41, 0x2E, 0x41,
+ 0x2F, 0x41, 0x30, 0x41, 0x31, 0x41, 0x32, 0x41,
+ 0x33, 0x41, 0x34, 0x41, 0x35, 0x41, 0x36, 0x41,
+ 0x37, 0x41, 0x38, 0x41, 0x39, 0x41, 0x3A, 0x41,
+ 0x3B, 0x41, 0x3C, 0x41, 0x3D, 0x41, 0x3E, 0x41,
+ // Bytes 40 - 7f
+ 0x3F, 0x41, 0x40, 0x41, 0x41, 0x41, 0x42, 0x41,
+ 0x43, 0x41, 0x44, 0x41, 0x45, 0x41, 0x46, 0x41,
+ 0x47, 0x41, 0x48, 0x41, 0x49, 0x41, 0x4A, 0x41,
+ 0x4B, 0x41, 0x4C, 0x41, 0x4D, 0x41, 0x4E, 0x41,
+ 0x4F, 0x41, 0x50, 0x41, 0x51, 0x41, 0x52, 0x41,
+ 0x53, 0x41, 0x54, 0x41, 0x55, 0x41, 0x56, 0x41,
+ 0x57, 0x41, 0x58, 0x41, 0x59, 0x41, 0x5A, 0x41,
+ 0x5B, 0x41, 0x5C, 0x41, 0x5D, 0x41, 0x5E, 0x41,
+ // Bytes 80 - bf
+ 0x5F, 0x41, 0x60, 0x41, 0x61, 0x41, 0x62, 0x41,
+ 0x63, 0x41, 0x64, 0x41, 0x65, 0x41, 0x66, 0x41,
+ 0x67, 0x41, 0x68, 0x41, 0x69, 0x41, 0x6A, 0x41,
+ 0x6B, 0x41, 0x6C, 0x41, 0x6D, 0x41, 0x6E, 0x41,
+ 0x6F, 0x41, 0x70, 0x41, 0x71, 0x41, 0x72, 0x41,
+ 0x73, 0x41, 0x74, 0x41, 0x75, 0x41, 0x76, 0x41,
+ 0x77, 0x41, 0x78, 0x41, 0x79, 0x41, 0x7A, 0x41,
+ 0x7B, 0x41, 0x7C, 0x41, 0x7D, 0x41, 0x7E, 0x42,
+ // Bytes c0 - ff
+ 0xC2, 0xA2, 0x42, 0xC2, 0xA3, 0x42, 0xC2, 0xA5,
+ 0x42, 0xC2, 0xA6, 0x42, 0xC2, 0xAC, 0x42, 0xC2,
+ 0xB7, 0x42, 0xC3, 0x86, 0x42, 0xC3, 0xB0, 0x42,
+ 0xC4, 0xA6, 0x42, 0xC4, 0xA7, 0x42, 0xC4, 0xB1,
+ 0x42, 0xC5, 0x8B, 0x42, 0xC5, 0x93, 0x42, 0xC6,
+ 0x8E, 0x42, 0xC6, 0x90, 0x42, 0xC6, 0xAB, 0x42,
+ 0xC8, 0xA2, 0x42, 0xC8, 0xB7, 0x42, 0xC9, 0x90,
+ 0x42, 0xC9, 0x91, 0x42, 0xC9, 0x92, 0x42, 0xC9,
+ // Bytes 100 - 13f
+ 0x94, 0x42, 0xC9, 0x95, 0x42, 0xC9, 0x99, 0x42,
+ 0xC9, 0x9B, 0x42, 0xC9, 0x9C, 0x42, 0xC9, 0x9F,
+ 0x42, 0xC9, 0xA1, 0x42, 0xC9, 0xA3, 0x42, 0xC9,
+ 0xA5, 0x42, 0xC9, 0xA6, 0x42, 0xC9, 0xA8, 0x42,
+ 0xC9, 0xA9, 0x42, 0xC9, 0xAA, 0x42, 0xC9, 0xAB,
+ 0x42, 0xC9, 0xAD, 0x42, 0xC9, 0xAF, 0x42, 0xC9,
+ 0xB0, 0x42, 0xC9, 0xB1, 0x42, 0xC9, 0xB2, 0x42,
+ 0xC9, 0xB3, 0x42, 0xC9, 0xB4, 0x42, 0xC9, 0xB5,
+ // Bytes 140 - 17f
+ 0x42, 0xC9, 0xB8, 0x42, 0xC9, 0xB9, 0x42, 0xC9,
+ 0xBB, 0x42, 0xCA, 0x81, 0x42, 0xCA, 0x82, 0x42,
+ 0xCA, 0x83, 0x42, 0xCA, 0x89, 0x42, 0xCA, 0x8A,
+ 0x42, 0xCA, 0x8B, 0x42, 0xCA, 0x8C, 0x42, 0xCA,
+ 0x90, 0x42, 0xCA, 0x91, 0x42, 0xCA, 0x92, 0x42,
+ 0xCA, 0x95, 0x42, 0xCA, 0x9D, 0x42, 0xCA, 0x9F,
+ 0x42, 0xCA, 0xB9, 0x42, 0xCE, 0x91, 0x42, 0xCE,
+ 0x92, 0x42, 0xCE, 0x93, 0x42, 0xCE, 0x94, 0x42,
+ // Bytes 180 - 1bf
+ 0xCE, 0x95, 0x42, 0xCE, 0x96, 0x42, 0xCE, 0x97,
+ 0x42, 0xCE, 0x98, 0x42, 0xCE, 0x99, 0x42, 0xCE,
+ 0x9A, 0x42, 0xCE, 0x9B, 0x42, 0xCE, 0x9C, 0x42,
+ 0xCE, 0x9D, 0x42, 0xCE, 0x9E, 0x42, 0xCE, 0x9F,
+ 0x42, 0xCE, 0xA0, 0x42, 0xCE, 0xA1, 0x42, 0xCE,
+ 0xA3, 0x42, 0xCE, 0xA4, 0x42, 0xCE, 0xA5, 0x42,
+ 0xCE, 0xA6, 0x42, 0xCE, 0xA7, 0x42, 0xCE, 0xA8,
+ 0x42, 0xCE, 0xA9, 0x42, 0xCE, 0xB1, 0x42, 0xCE,
+ // Bytes 1c0 - 1ff
+ 0xB2, 0x42, 0xCE, 0xB3, 0x42, 0xCE, 0xB4, 0x42,
+ 0xCE, 0xB5, 0x42, 0xCE, 0xB6, 0x42, 0xCE, 0xB7,
+ 0x42, 0xCE, 0xB8, 0x42, 0xCE, 0xB9, 0x42, 0xCE,
+ 0xBA, 0x42, 0xCE, 0xBB, 0x42, 0xCE, 0xBC, 0x42,
+ 0xCE, 0xBD, 0x42, 0xCE, 0xBE, 0x42, 0xCE, 0xBF,
+ 0x42, 0xCF, 0x80, 0x42, 0xCF, 0x81, 0x42, 0xCF,
+ 0x82, 0x42, 0xCF, 0x83, 0x42, 0xCF, 0x84, 0x42,
+ 0xCF, 0x85, 0x42, 0xCF, 0x86, 0x42, 0xCF, 0x87,
+ // Bytes 200 - 23f
+ 0x42, 0xCF, 0x88, 0x42, 0xCF, 0x89, 0x42, 0xCF,
+ 0x9C, 0x42, 0xCF, 0x9D, 0x42, 0xD0, 0xBD, 0x42,
+ 0xD1, 0x8A, 0x42, 0xD1, 0x8C, 0x42, 0xD7, 0x90,
+ 0x42, 0xD7, 0x91, 0x42, 0xD7, 0x92, 0x42, 0xD7,
+ 0x93, 0x42, 0xD7, 0x94, 0x42, 0xD7, 0x9B, 0x42,
+ 0xD7, 0x9C, 0x42, 0xD7, 0x9D, 0x42, 0xD7, 0xA2,
+ 0x42, 0xD7, 0xA8, 0x42, 0xD7, 0xAA, 0x42, 0xD8,
+ 0xA1, 0x42, 0xD8, 0xA7, 0x42, 0xD8, 0xA8, 0x42,
+ // Bytes 240 - 27f
+ 0xD8, 0xA9, 0x42, 0xD8, 0xAA, 0x42, 0xD8, 0xAB,
+ 0x42, 0xD8, 0xAC, 0x42, 0xD8, 0xAD, 0x42, 0xD8,
+ 0xAE, 0x42, 0xD8, 0xAF, 0x42, 0xD8, 0xB0, 0x42,
+ 0xD8, 0xB1, 0x42, 0xD8, 0xB2, 0x42, 0xD8, 0xB3,
+ 0x42, 0xD8, 0xB4, 0x42, 0xD8, 0xB5, 0x42, 0xD8,
+ 0xB6, 0x42, 0xD8, 0xB7, 0x42, 0xD8, 0xB8, 0x42,
+ 0xD8, 0xB9, 0x42, 0xD8, 0xBA, 0x42, 0xD9, 0x81,
+ 0x42, 0xD9, 0x82, 0x42, 0xD9, 0x83, 0x42, 0xD9,
+ // Bytes 280 - 2bf
+ 0x84, 0x42, 0xD9, 0x85, 0x42, 0xD9, 0x86, 0x42,
+ 0xD9, 0x87, 0x42, 0xD9, 0x88, 0x42, 0xD9, 0x89,
+ 0x42, 0xD9, 0x8A, 0x42, 0xD9, 0xAE, 0x42, 0xD9,
+ 0xAF, 0x42, 0xD9, 0xB1, 0x42, 0xD9, 0xB9, 0x42,
+ 0xD9, 0xBA, 0x42, 0xD9, 0xBB, 0x42, 0xD9, 0xBE,
+ 0x42, 0xD9, 0xBF, 0x42, 0xDA, 0x80, 0x42, 0xDA,
+ 0x83, 0x42, 0xDA, 0x84, 0x42, 0xDA, 0x86, 0x42,
+ 0xDA, 0x87, 0x42, 0xDA, 0x88, 0x42, 0xDA, 0x8C,
+ // Bytes 2c0 - 2ff
+ 0x42, 0xDA, 0x8D, 0x42, 0xDA, 0x8E, 0x42, 0xDA,
+ 0x91, 0x42, 0xDA, 0x98, 0x42, 0xDA, 0xA1, 0x42,
+ 0xDA, 0xA4, 0x42, 0xDA, 0xA6, 0x42, 0xDA, 0xA9,
+ 0x42, 0xDA, 0xAD, 0x42, 0xDA, 0xAF, 0x42, 0xDA,
+ 0xB1, 0x42, 0xDA, 0xB3, 0x42, 0xDA, 0xBA, 0x42,
+ 0xDA, 0xBB, 0x42, 0xDA, 0xBE, 0x42, 0xDB, 0x81,
+ 0x42, 0xDB, 0x85, 0x42, 0xDB, 0x86, 0x42, 0xDB,
+ 0x87, 0x42, 0xDB, 0x88, 0x42, 0xDB, 0x89, 0x42,
+ // Bytes 300 - 33f
+ 0xDB, 0x8B, 0x42, 0xDB, 0x8C, 0x42, 0xDB, 0x90,
+ 0x42, 0xDB, 0x92, 0x43, 0xE0, 0xBC, 0x8B, 0x43,
+ 0xE1, 0x83, 0x9C, 0x43, 0xE1, 0x84, 0x80, 0x43,
+ 0xE1, 0x84, 0x81, 0x43, 0xE1, 0x84, 0x82, 0x43,
+ 0xE1, 0x84, 0x83, 0x43, 0xE1, 0x84, 0x84, 0x43,
+ 0xE1, 0x84, 0x85, 0x43, 0xE1, 0x84, 0x86, 0x43,
+ 0xE1, 0x84, 0x87, 0x43, 0xE1, 0x84, 0x88, 0x43,
+ 0xE1, 0x84, 0x89, 0x43, 0xE1, 0x84, 0x8A, 0x43,
+ // Bytes 340 - 37f
+ 0xE1, 0x84, 0x8B, 0x43, 0xE1, 0x84, 0x8C, 0x43,
+ 0xE1, 0x84, 0x8D, 0x43, 0xE1, 0x84, 0x8E, 0x43,
+ 0xE1, 0x84, 0x8F, 0x43, 0xE1, 0x84, 0x90, 0x43,
+ 0xE1, 0x84, 0x91, 0x43, 0xE1, 0x84, 0x92, 0x43,
+ 0xE1, 0x84, 0x94, 0x43, 0xE1, 0x84, 0x95, 0x43,
+ 0xE1, 0x84, 0x9A, 0x43, 0xE1, 0x84, 0x9C, 0x43,
+ 0xE1, 0x84, 0x9D, 0x43, 0xE1, 0x84, 0x9E, 0x43,
+ 0xE1, 0x84, 0xA0, 0x43, 0xE1, 0x84, 0xA1, 0x43,
+ // Bytes 380 - 3bf
+ 0xE1, 0x84, 0xA2, 0x43, 0xE1, 0x84, 0xA3, 0x43,
+ 0xE1, 0x84, 0xA7, 0x43, 0xE1, 0x84, 0xA9, 0x43,
+ 0xE1, 0x84, 0xAB, 0x43, 0xE1, 0x84, 0xAC, 0x43,
+ 0xE1, 0x84, 0xAD, 0x43, 0xE1, 0x84, 0xAE, 0x43,
+ 0xE1, 0x84, 0xAF, 0x43, 0xE1, 0x84, 0xB2, 0x43,
+ 0xE1, 0x84, 0xB6, 0x43, 0xE1, 0x85, 0x80, 0x43,
+ 0xE1, 0x85, 0x87, 0x43, 0xE1, 0x85, 0x8C, 0x43,
+ 0xE1, 0x85, 0x97, 0x43, 0xE1, 0x85, 0x98, 0x43,
+ // Bytes 3c0 - 3ff
+ 0xE1, 0x85, 0x99, 0x43, 0xE1, 0x85, 0xA0, 0x43,
+ 0xE1, 0x86, 0x84, 0x43, 0xE1, 0x86, 0x85, 0x43,
+ 0xE1, 0x86, 0x88, 0x43, 0xE1, 0x86, 0x91, 0x43,
+ 0xE1, 0x86, 0x92, 0x43, 0xE1, 0x86, 0x94, 0x43,
+ 0xE1, 0x86, 0x9E, 0x43, 0xE1, 0x86, 0xA1, 0x43,
+ 0xE1, 0x87, 0x87, 0x43, 0xE1, 0x87, 0x88, 0x43,
+ 0xE1, 0x87, 0x8C, 0x43, 0xE1, 0x87, 0x8E, 0x43,
+ 0xE1, 0x87, 0x93, 0x43, 0xE1, 0x87, 0x97, 0x43,
+ // Bytes 400 - 43f
+ 0xE1, 0x87, 0x99, 0x43, 0xE1, 0x87, 0x9D, 0x43,
+ 0xE1, 0x87, 0x9F, 0x43, 0xE1, 0x87, 0xB1, 0x43,
+ 0xE1, 0x87, 0xB2, 0x43, 0xE1, 0xB4, 0x82, 0x43,
+ 0xE1, 0xB4, 0x96, 0x43, 0xE1, 0xB4, 0x97, 0x43,
+ 0xE1, 0xB4, 0x9C, 0x43, 0xE1, 0xB4, 0x9D, 0x43,
+ 0xE1, 0xB4, 0xA5, 0x43, 0xE1, 0xB5, 0xBB, 0x43,
+ 0xE1, 0xB6, 0x85, 0x43, 0xE2, 0x80, 0x82, 0x43,
+ 0xE2, 0x80, 0x83, 0x43, 0xE2, 0x80, 0x90, 0x43,
+ // Bytes 440 - 47f
+ 0xE2, 0x80, 0x93, 0x43, 0xE2, 0x80, 0x94, 0x43,
+ 0xE2, 0x82, 0xA9, 0x43, 0xE2, 0x86, 0x90, 0x43,
+ 0xE2, 0x86, 0x91, 0x43, 0xE2, 0x86, 0x92, 0x43,
+ 0xE2, 0x86, 0x93, 0x43, 0xE2, 0x88, 0x82, 0x43,
+ 0xE2, 0x88, 0x87, 0x43, 0xE2, 0x88, 0x91, 0x43,
+ 0xE2, 0x88, 0x92, 0x43, 0xE2, 0x94, 0x82, 0x43,
+ 0xE2, 0x96, 0xA0, 0x43, 0xE2, 0x97, 0x8B, 0x43,
+ 0xE2, 0xA6, 0x85, 0x43, 0xE2, 0xA6, 0x86, 0x43,
+ // Bytes 480 - 4bf
+ 0xE2, 0xB5, 0xA1, 0x43, 0xE3, 0x80, 0x81, 0x43,
+ 0xE3, 0x80, 0x82, 0x43, 0xE3, 0x80, 0x88, 0x43,
+ 0xE3, 0x80, 0x89, 0x43, 0xE3, 0x80, 0x8A, 0x43,
+ 0xE3, 0x80, 0x8B, 0x43, 0xE3, 0x80, 0x8C, 0x43,
+ 0xE3, 0x80, 0x8D, 0x43, 0xE3, 0x80, 0x8E, 0x43,
+ 0xE3, 0x80, 0x8F, 0x43, 0xE3, 0x80, 0x90, 0x43,
+ 0xE3, 0x80, 0x91, 0x43, 0xE3, 0x80, 0x92, 0x43,
+ 0xE3, 0x80, 0x94, 0x43, 0xE3, 0x80, 0x95, 0x43,
+ // Bytes 4c0 - 4ff
+ 0xE3, 0x80, 0x96, 0x43, 0xE3, 0x80, 0x97, 0x43,
+ 0xE3, 0x82, 0xA1, 0x43, 0xE3, 0x82, 0xA2, 0x43,
+ 0xE3, 0x82, 0xA3, 0x43, 0xE3, 0x82, 0xA4, 0x43,
+ 0xE3, 0x82, 0xA5, 0x43, 0xE3, 0x82, 0xA6, 0x43,
+ 0xE3, 0x82, 0xA7, 0x43, 0xE3, 0x82, 0xA8, 0x43,
+ 0xE3, 0x82, 0xA9, 0x43, 0xE3, 0x82, 0xAA, 0x43,
+ 0xE3, 0x82, 0xAB, 0x43, 0xE3, 0x82, 0xAD, 0x43,
+ 0xE3, 0x82, 0xAF, 0x43, 0xE3, 0x82, 0xB1, 0x43,
+ // Bytes 500 - 53f
+ 0xE3, 0x82, 0xB3, 0x43, 0xE3, 0x82, 0xB5, 0x43,
+ 0xE3, 0x82, 0xB7, 0x43, 0xE3, 0x82, 0xB9, 0x43,
+ 0xE3, 0x82, 0xBB, 0x43, 0xE3, 0x82, 0xBD, 0x43,
+ 0xE3, 0x82, 0xBF, 0x43, 0xE3, 0x83, 0x81, 0x43,
+ 0xE3, 0x83, 0x83, 0x43, 0xE3, 0x83, 0x84, 0x43,
+ 0xE3, 0x83, 0x86, 0x43, 0xE3, 0x83, 0x88, 0x43,
+ 0xE3, 0x83, 0x8A, 0x43, 0xE3, 0x83, 0x8B, 0x43,
+ 0xE3, 0x83, 0x8C, 0x43, 0xE3, 0x83, 0x8D, 0x43,
+ // Bytes 540 - 57f
+ 0xE3, 0x83, 0x8E, 0x43, 0xE3, 0x83, 0x8F, 0x43,
+ 0xE3, 0x83, 0x92, 0x43, 0xE3, 0x83, 0x95, 0x43,
+ 0xE3, 0x83, 0x98, 0x43, 0xE3, 0x83, 0x9B, 0x43,
+ 0xE3, 0x83, 0x9E, 0x43, 0xE3, 0x83, 0x9F, 0x43,
+ 0xE3, 0x83, 0xA0, 0x43, 0xE3, 0x83, 0xA1, 0x43,
+ 0xE3, 0x83, 0xA2, 0x43, 0xE3, 0x83, 0xA3, 0x43,
+ 0xE3, 0x83, 0xA4, 0x43, 0xE3, 0x83, 0xA5, 0x43,
+ 0xE3, 0x83, 0xA6, 0x43, 0xE3, 0x83, 0xA7, 0x43,
+ // Bytes 580 - 5bf
+ 0xE3, 0x83, 0xA8, 0x43, 0xE3, 0x83, 0xA9, 0x43,
+ 0xE3, 0x83, 0xAA, 0x43, 0xE3, 0x83, 0xAB, 0x43,
+ 0xE3, 0x83, 0xAC, 0x43, 0xE3, 0x83, 0xAD, 0x43,
+ 0xE3, 0x83, 0xAF, 0x43, 0xE3, 0x83, 0xB0, 0x43,
+ 0xE3, 0x83, 0xB1, 0x43, 0xE3, 0x83, 0xB2, 0x43,
+ 0xE3, 0x83, 0xB3, 0x43, 0xE3, 0x83, 0xBB, 0x43,
+ 0xE3, 0x83, 0xBC, 0x43, 0xE3, 0x92, 0x9E, 0x43,
+ 0xE3, 0x92, 0xB9, 0x43, 0xE3, 0x92, 0xBB, 0x43,
+ // Bytes 5c0 - 5ff
+ 0xE3, 0x93, 0x9F, 0x43, 0xE3, 0x94, 0x95, 0x43,
+ 0xE3, 0x9B, 0xAE, 0x43, 0xE3, 0x9B, 0xBC, 0x43,
+ 0xE3, 0x9E, 0x81, 0x43, 0xE3, 0xA0, 0xAF, 0x43,
+ 0xE3, 0xA1, 0xA2, 0x43, 0xE3, 0xA1, 0xBC, 0x43,
+ 0xE3, 0xA3, 0x87, 0x43, 0xE3, 0xA3, 0xA3, 0x43,
+ 0xE3, 0xA4, 0x9C, 0x43, 0xE3, 0xA4, 0xBA, 0x43,
+ 0xE3, 0xA8, 0xAE, 0x43, 0xE3, 0xA9, 0xAC, 0x43,
+ 0xE3, 0xAB, 0xA4, 0x43, 0xE3, 0xAC, 0x88, 0x43,
+ // Bytes 600 - 63f
+ 0xE3, 0xAC, 0x99, 0x43, 0xE3, 0xAD, 0x89, 0x43,
+ 0xE3, 0xAE, 0x9D, 0x43, 0xE3, 0xB0, 0x98, 0x43,
+ 0xE3, 0xB1, 0x8E, 0x43, 0xE3, 0xB4, 0xB3, 0x43,
+ 0xE3, 0xB6, 0x96, 0x43, 0xE3, 0xBA, 0xAC, 0x43,
+ 0xE3, 0xBA, 0xB8, 0x43, 0xE3, 0xBC, 0x9B, 0x43,
+ 0xE3, 0xBF, 0xBC, 0x43, 0xE4, 0x80, 0x88, 0x43,
+ 0xE4, 0x80, 0x98, 0x43, 0xE4, 0x80, 0xB9, 0x43,
+ 0xE4, 0x81, 0x86, 0x43, 0xE4, 0x82, 0x96, 0x43,
+ // Bytes 640 - 67f
+ 0xE4, 0x83, 0xA3, 0x43, 0xE4, 0x84, 0xAF, 0x43,
+ 0xE4, 0x88, 0x82, 0x43, 0xE4, 0x88, 0xA7, 0x43,
+ 0xE4, 0x8A, 0xA0, 0x43, 0xE4, 0x8C, 0x81, 0x43,
+ 0xE4, 0x8C, 0xB4, 0x43, 0xE4, 0x8D, 0x99, 0x43,
+ 0xE4, 0x8F, 0x95, 0x43, 0xE4, 0x8F, 0x99, 0x43,
+ 0xE4, 0x90, 0x8B, 0x43, 0xE4, 0x91, 0xAB, 0x43,
+ 0xE4, 0x94, 0xAB, 0x43, 0xE4, 0x95, 0x9D, 0x43,
+ 0xE4, 0x95, 0xA1, 0x43, 0xE4, 0x95, 0xAB, 0x43,
+ // Bytes 680 - 6bf
+ 0xE4, 0x97, 0x97, 0x43, 0xE4, 0x97, 0xB9, 0x43,
+ 0xE4, 0x98, 0xB5, 0x43, 0xE4, 0x9A, 0xBE, 0x43,
+ 0xE4, 0x9B, 0x87, 0x43, 0xE4, 0xA6, 0x95, 0x43,
+ 0xE4, 0xA7, 0xA6, 0x43, 0xE4, 0xA9, 0xAE, 0x43,
+ 0xE4, 0xA9, 0xB6, 0x43, 0xE4, 0xAA, 0xB2, 0x43,
+ 0xE4, 0xAC, 0xB3, 0x43, 0xE4, 0xAF, 0x8E, 0x43,
+ 0xE4, 0xB3, 0x8E, 0x43, 0xE4, 0xB3, 0xAD, 0x43,
+ 0xE4, 0xB3, 0xB8, 0x43, 0xE4, 0xB5, 0x96, 0x43,
+ // Bytes 6c0 - 6ff
+ 0xE4, 0xB8, 0x80, 0x43, 0xE4, 0xB8, 0x81, 0x43,
+ 0xE4, 0xB8, 0x83, 0x43, 0xE4, 0xB8, 0x89, 0x43,
+ 0xE4, 0xB8, 0x8A, 0x43, 0xE4, 0xB8, 0x8B, 0x43,
+ 0xE4, 0xB8, 0x8D, 0x43, 0xE4, 0xB8, 0x99, 0x43,
+ 0xE4, 0xB8, 0xA6, 0x43, 0xE4, 0xB8, 0xA8, 0x43,
+ 0xE4, 0xB8, 0xAD, 0x43, 0xE4, 0xB8, 0xB2, 0x43,
+ 0xE4, 0xB8, 0xB6, 0x43, 0xE4, 0xB8, 0xB8, 0x43,
+ 0xE4, 0xB8, 0xB9, 0x43, 0xE4, 0xB8, 0xBD, 0x43,
+ // Bytes 700 - 73f
+ 0xE4, 0xB8, 0xBF, 0x43, 0xE4, 0xB9, 0x81, 0x43,
+ 0xE4, 0xB9, 0x99, 0x43, 0xE4, 0xB9, 0x9D, 0x43,
+ 0xE4, 0xBA, 0x82, 0x43, 0xE4, 0xBA, 0x85, 0x43,
+ 0xE4, 0xBA, 0x86, 0x43, 0xE4, 0xBA, 0x8C, 0x43,
+ 0xE4, 0xBA, 0x94, 0x43, 0xE4, 0xBA, 0xA0, 0x43,
+ 0xE4, 0xBA, 0xA4, 0x43, 0xE4, 0xBA, 0xAE, 0x43,
+ 0xE4, 0xBA, 0xBA, 0x43, 0xE4, 0xBB, 0x80, 0x43,
+ 0xE4, 0xBB, 0x8C, 0x43, 0xE4, 0xBB, 0xA4, 0x43,
+ // Bytes 740 - 77f
+ 0xE4, 0xBC, 0x81, 0x43, 0xE4, 0xBC, 0x91, 0x43,
+ 0xE4, 0xBD, 0xA0, 0x43, 0xE4, 0xBE, 0x80, 0x43,
+ 0xE4, 0xBE, 0x86, 0x43, 0xE4, 0xBE, 0x8B, 0x43,
+ 0xE4, 0xBE, 0xAE, 0x43, 0xE4, 0xBE, 0xBB, 0x43,
+ 0xE4, 0xBE, 0xBF, 0x43, 0xE5, 0x80, 0x82, 0x43,
+ 0xE5, 0x80, 0xAB, 0x43, 0xE5, 0x81, 0xBA, 0x43,
+ 0xE5, 0x82, 0x99, 0x43, 0xE5, 0x83, 0x8F, 0x43,
+ 0xE5, 0x83, 0x9A, 0x43, 0xE5, 0x83, 0xA7, 0x43,
+ // Bytes 780 - 7bf
+ 0xE5, 0x84, 0xAA, 0x43, 0xE5, 0x84, 0xBF, 0x43,
+ 0xE5, 0x85, 0x80, 0x43, 0xE5, 0x85, 0x85, 0x43,
+ 0xE5, 0x85, 0x8D, 0x43, 0xE5, 0x85, 0x94, 0x43,
+ 0xE5, 0x85, 0xA4, 0x43, 0xE5, 0x85, 0xA5, 0x43,
+ 0xE5, 0x85, 0xA7, 0x43, 0xE5, 0x85, 0xA8, 0x43,
+ 0xE5, 0x85, 0xA9, 0x43, 0xE5, 0x85, 0xAB, 0x43,
+ 0xE5, 0x85, 0xAD, 0x43, 0xE5, 0x85, 0xB7, 0x43,
+ 0xE5, 0x86, 0x80, 0x43, 0xE5, 0x86, 0x82, 0x43,
+ // Bytes 7c0 - 7ff
+ 0xE5, 0x86, 0x8D, 0x43, 0xE5, 0x86, 0x92, 0x43,
+ 0xE5, 0x86, 0x95, 0x43, 0xE5, 0x86, 0x96, 0x43,
+ 0xE5, 0x86, 0x97, 0x43, 0xE5, 0x86, 0x99, 0x43,
+ 0xE5, 0x86, 0xA4, 0x43, 0xE5, 0x86, 0xAB, 0x43,
+ 0xE5, 0x86, 0xAC, 0x43, 0xE5, 0x86, 0xB5, 0x43,
+ 0xE5, 0x86, 0xB7, 0x43, 0xE5, 0x87, 0x89, 0x43,
+ 0xE5, 0x87, 0x8C, 0x43, 0xE5, 0x87, 0x9C, 0x43,
+ 0xE5, 0x87, 0x9E, 0x43, 0xE5, 0x87, 0xA0, 0x43,
+ // Bytes 800 - 83f
+ 0xE5, 0x87, 0xB5, 0x43, 0xE5, 0x88, 0x80, 0x43,
+ 0xE5, 0x88, 0x83, 0x43, 0xE5, 0x88, 0x87, 0x43,
+ 0xE5, 0x88, 0x97, 0x43, 0xE5, 0x88, 0x9D, 0x43,
+ 0xE5, 0x88, 0xA9, 0x43, 0xE5, 0x88, 0xBA, 0x43,
+ 0xE5, 0x88, 0xBB, 0x43, 0xE5, 0x89, 0x86, 0x43,
+ 0xE5, 0x89, 0x8D, 0x43, 0xE5, 0x89, 0xB2, 0x43,
+ 0xE5, 0x89, 0xB7, 0x43, 0xE5, 0x8A, 0x89, 0x43,
+ 0xE5, 0x8A, 0x9B, 0x43, 0xE5, 0x8A, 0xA3, 0x43,
+ // Bytes 840 - 87f
+ 0xE5, 0x8A, 0xB3, 0x43, 0xE5, 0x8A, 0xB4, 0x43,
+ 0xE5, 0x8B, 0x87, 0x43, 0xE5, 0x8B, 0x89, 0x43,
+ 0xE5, 0x8B, 0x92, 0x43, 0xE5, 0x8B, 0x9E, 0x43,
+ 0xE5, 0x8B, 0xA4, 0x43, 0xE5, 0x8B, 0xB5, 0x43,
+ 0xE5, 0x8B, 0xB9, 0x43, 0xE5, 0x8B, 0xBA, 0x43,
+ 0xE5, 0x8C, 0x85, 0x43, 0xE5, 0x8C, 0x86, 0x43,
+ 0xE5, 0x8C, 0x95, 0x43, 0xE5, 0x8C, 0x97, 0x43,
+ 0xE5, 0x8C, 0x9A, 0x43, 0xE5, 0x8C, 0xB8, 0x43,
+ // Bytes 880 - 8bf
+ 0xE5, 0x8C, 0xBB, 0x43, 0xE5, 0x8C, 0xBF, 0x43,
+ 0xE5, 0x8D, 0x81, 0x43, 0xE5, 0x8D, 0x84, 0x43,
+ 0xE5, 0x8D, 0x85, 0x43, 0xE5, 0x8D, 0x89, 0x43,
+ 0xE5, 0x8D, 0x91, 0x43, 0xE5, 0x8D, 0x94, 0x43,
+ 0xE5, 0x8D, 0x9A, 0x43, 0xE5, 0x8D, 0x9C, 0x43,
+ 0xE5, 0x8D, 0xA9, 0x43, 0xE5, 0x8D, 0xB0, 0x43,
+ 0xE5, 0x8D, 0xB3, 0x43, 0xE5, 0x8D, 0xB5, 0x43,
+ 0xE5, 0x8D, 0xBD, 0x43, 0xE5, 0x8D, 0xBF, 0x43,
+ // Bytes 8c0 - 8ff
+ 0xE5, 0x8E, 0x82, 0x43, 0xE5, 0x8E, 0xB6, 0x43,
+ 0xE5, 0x8F, 0x83, 0x43, 0xE5, 0x8F, 0x88, 0x43,
+ 0xE5, 0x8F, 0x8A, 0x43, 0xE5, 0x8F, 0x8C, 0x43,
+ 0xE5, 0x8F, 0x9F, 0x43, 0xE5, 0x8F, 0xA3, 0x43,
+ 0xE5, 0x8F, 0xA5, 0x43, 0xE5, 0x8F, 0xAB, 0x43,
+ 0xE5, 0x8F, 0xAF, 0x43, 0xE5, 0x8F, 0xB1, 0x43,
+ 0xE5, 0x8F, 0xB3, 0x43, 0xE5, 0x90, 0x86, 0x43,
+ 0xE5, 0x90, 0x88, 0x43, 0xE5, 0x90, 0x8D, 0x43,
+ // Bytes 900 - 93f
+ 0xE5, 0x90, 0x8F, 0x43, 0xE5, 0x90, 0x9D, 0x43,
+ 0xE5, 0x90, 0xB8, 0x43, 0xE5, 0x90, 0xB9, 0x43,
+ 0xE5, 0x91, 0x82, 0x43, 0xE5, 0x91, 0x88, 0x43,
+ 0xE5, 0x91, 0xA8, 0x43, 0xE5, 0x92, 0x9E, 0x43,
+ 0xE5, 0x92, 0xA2, 0x43, 0xE5, 0x92, 0xBD, 0x43,
+ 0xE5, 0x93, 0xB6, 0x43, 0xE5, 0x94, 0x90, 0x43,
+ 0xE5, 0x95, 0x8F, 0x43, 0xE5, 0x95, 0x93, 0x43,
+ 0xE5, 0x95, 0x95, 0x43, 0xE5, 0x95, 0xA3, 0x43,
+ // Bytes 940 - 97f
+ 0xE5, 0x96, 0x84, 0x43, 0xE5, 0x96, 0x87, 0x43,
+ 0xE5, 0x96, 0x99, 0x43, 0xE5, 0x96, 0x9D, 0x43,
+ 0xE5, 0x96, 0xAB, 0x43, 0xE5, 0x96, 0xB3, 0x43,
+ 0xE5, 0x96, 0xB6, 0x43, 0xE5, 0x97, 0x80, 0x43,
+ 0xE5, 0x97, 0x82, 0x43, 0xE5, 0x97, 0xA2, 0x43,
+ 0xE5, 0x98, 0x86, 0x43, 0xE5, 0x99, 0x91, 0x43,
+ 0xE5, 0x99, 0xA8, 0x43, 0xE5, 0x99, 0xB4, 0x43,
+ 0xE5, 0x9B, 0x97, 0x43, 0xE5, 0x9B, 0x9B, 0x43,
+ // Bytes 980 - 9bf
+ 0xE5, 0x9B, 0xB9, 0x43, 0xE5, 0x9C, 0x96, 0x43,
+ 0xE5, 0x9C, 0x97, 0x43, 0xE5, 0x9C, 0x9F, 0x43,
+ 0xE5, 0x9C, 0xB0, 0x43, 0xE5, 0x9E, 0x8B, 0x43,
+ 0xE5, 0x9F, 0x8E, 0x43, 0xE5, 0x9F, 0xB4, 0x43,
+ 0xE5, 0xA0, 0x8D, 0x43, 0xE5, 0xA0, 0xB1, 0x43,
+ 0xE5, 0xA0, 0xB2, 0x43, 0xE5, 0xA1, 0x80, 0x43,
+ 0xE5, 0xA1, 0x9A, 0x43, 0xE5, 0xA1, 0x9E, 0x43,
+ 0xE5, 0xA2, 0xA8, 0x43, 0xE5, 0xA2, 0xAC, 0x43,
+ // Bytes 9c0 - 9ff
+ 0xE5, 0xA2, 0xB3, 0x43, 0xE5, 0xA3, 0x98, 0x43,
+ 0xE5, 0xA3, 0x9F, 0x43, 0xE5, 0xA3, 0xAB, 0x43,
+ 0xE5, 0xA3, 0xAE, 0x43, 0xE5, 0xA3, 0xB0, 0x43,
+ 0xE5, 0xA3, 0xB2, 0x43, 0xE5, 0xA3, 0xB7, 0x43,
+ 0xE5, 0xA4, 0x82, 0x43, 0xE5, 0xA4, 0x86, 0x43,
+ 0xE5, 0xA4, 0x8A, 0x43, 0xE5, 0xA4, 0x95, 0x43,
+ 0xE5, 0xA4, 0x9A, 0x43, 0xE5, 0xA4, 0x9C, 0x43,
+ 0xE5, 0xA4, 0xA2, 0x43, 0xE5, 0xA4, 0xA7, 0x43,
+ // Bytes a00 - a3f
+ 0xE5, 0xA4, 0xA9, 0x43, 0xE5, 0xA5, 0x84, 0x43,
+ 0xE5, 0xA5, 0x88, 0x43, 0xE5, 0xA5, 0x91, 0x43,
+ 0xE5, 0xA5, 0x94, 0x43, 0xE5, 0xA5, 0xA2, 0x43,
+ 0xE5, 0xA5, 0xB3, 0x43, 0xE5, 0xA7, 0x98, 0x43,
+ 0xE5, 0xA7, 0xAC, 0x43, 0xE5, 0xA8, 0x9B, 0x43,
+ 0xE5, 0xA8, 0xA7, 0x43, 0xE5, 0xA9, 0xA2, 0x43,
+ 0xE5, 0xA9, 0xA6, 0x43, 0xE5, 0xAA, 0xB5, 0x43,
+ 0xE5, 0xAC, 0x88, 0x43, 0xE5, 0xAC, 0xA8, 0x43,
+ // Bytes a40 - a7f
+ 0xE5, 0xAC, 0xBE, 0x43, 0xE5, 0xAD, 0x90, 0x43,
+ 0xE5, 0xAD, 0x97, 0x43, 0xE5, 0xAD, 0xA6, 0x43,
+ 0xE5, 0xAE, 0x80, 0x43, 0xE5, 0xAE, 0x85, 0x43,
+ 0xE5, 0xAE, 0x97, 0x43, 0xE5, 0xAF, 0x83, 0x43,
+ 0xE5, 0xAF, 0x98, 0x43, 0xE5, 0xAF, 0xA7, 0x43,
+ 0xE5, 0xAF, 0xAE, 0x43, 0xE5, 0xAF, 0xB3, 0x43,
+ 0xE5, 0xAF, 0xB8, 0x43, 0xE5, 0xAF, 0xBF, 0x43,
+ 0xE5, 0xB0, 0x86, 0x43, 0xE5, 0xB0, 0x8F, 0x43,
+ // Bytes a80 - abf
+ 0xE5, 0xB0, 0xA2, 0x43, 0xE5, 0xB0, 0xB8, 0x43,
+ 0xE5, 0xB0, 0xBF, 0x43, 0xE5, 0xB1, 0xA0, 0x43,
+ 0xE5, 0xB1, 0xA2, 0x43, 0xE5, 0xB1, 0xA4, 0x43,
+ 0xE5, 0xB1, 0xA5, 0x43, 0xE5, 0xB1, 0xAE, 0x43,
+ 0xE5, 0xB1, 0xB1, 0x43, 0xE5, 0xB2, 0x8D, 0x43,
+ 0xE5, 0xB3, 0x80, 0x43, 0xE5, 0xB4, 0x99, 0x43,
+ 0xE5, 0xB5, 0x83, 0x43, 0xE5, 0xB5, 0x90, 0x43,
+ 0xE5, 0xB5, 0xAB, 0x43, 0xE5, 0xB5, 0xAE, 0x43,
+ // Bytes ac0 - aff
+ 0xE5, 0xB5, 0xBC, 0x43, 0xE5, 0xB6, 0xB2, 0x43,
+ 0xE5, 0xB6, 0xBA, 0x43, 0xE5, 0xB7, 0x9B, 0x43,
+ 0xE5, 0xB7, 0xA1, 0x43, 0xE5, 0xB7, 0xA2, 0x43,
+ 0xE5, 0xB7, 0xA5, 0x43, 0xE5, 0xB7, 0xA6, 0x43,
+ 0xE5, 0xB7, 0xB1, 0x43, 0xE5, 0xB7, 0xBD, 0x43,
+ 0xE5, 0xB7, 0xBE, 0x43, 0xE5, 0xB8, 0xA8, 0x43,
+ 0xE5, 0xB8, 0xBD, 0x43, 0xE5, 0xB9, 0xA9, 0x43,
+ 0xE5, 0xB9, 0xB2, 0x43, 0xE5, 0xB9, 0xB4, 0x43,
+ // Bytes b00 - b3f
+ 0xE5, 0xB9, 0xBA, 0x43, 0xE5, 0xB9, 0xBC, 0x43,
+ 0xE5, 0xB9, 0xBF, 0x43, 0xE5, 0xBA, 0xA6, 0x43,
+ 0xE5, 0xBA, 0xB0, 0x43, 0xE5, 0xBA, 0xB3, 0x43,
+ 0xE5, 0xBA, 0xB6, 0x43, 0xE5, 0xBB, 0x89, 0x43,
+ 0xE5, 0xBB, 0x8A, 0x43, 0xE5, 0xBB, 0x92, 0x43,
+ 0xE5, 0xBB, 0x93, 0x43, 0xE5, 0xBB, 0x99, 0x43,
+ 0xE5, 0xBB, 0xAC, 0x43, 0xE5, 0xBB, 0xB4, 0x43,
+ 0xE5, 0xBB, 0xBE, 0x43, 0xE5, 0xBC, 0x84, 0x43,
+ // Bytes b40 - b7f
+ 0xE5, 0xBC, 0x8B, 0x43, 0xE5, 0xBC, 0x93, 0x43,
+ 0xE5, 0xBC, 0xA2, 0x43, 0xE5, 0xBD, 0x90, 0x43,
+ 0xE5, 0xBD, 0x93, 0x43, 0xE5, 0xBD, 0xA1, 0x43,
+ 0xE5, 0xBD, 0xA2, 0x43, 0xE5, 0xBD, 0xA9, 0x43,
+ 0xE5, 0xBD, 0xAB, 0x43, 0xE5, 0xBD, 0xB3, 0x43,
+ 0xE5, 0xBE, 0x8B, 0x43, 0xE5, 0xBE, 0x8C, 0x43,
+ 0xE5, 0xBE, 0x97, 0x43, 0xE5, 0xBE, 0x9A, 0x43,
+ 0xE5, 0xBE, 0xA9, 0x43, 0xE5, 0xBE, 0xAD, 0x43,
+ // Bytes b80 - bbf
+ 0xE5, 0xBF, 0x83, 0x43, 0xE5, 0xBF, 0x8D, 0x43,
+ 0xE5, 0xBF, 0x97, 0x43, 0xE5, 0xBF, 0xB5, 0x43,
+ 0xE5, 0xBF, 0xB9, 0x43, 0xE6, 0x80, 0x92, 0x43,
+ 0xE6, 0x80, 0x9C, 0x43, 0xE6, 0x81, 0xB5, 0x43,
+ 0xE6, 0x82, 0x81, 0x43, 0xE6, 0x82, 0x94, 0x43,
+ 0xE6, 0x83, 0x87, 0x43, 0xE6, 0x83, 0x98, 0x43,
+ 0xE6, 0x83, 0xA1, 0x43, 0xE6, 0x84, 0x88, 0x43,
+ 0xE6, 0x85, 0x84, 0x43, 0xE6, 0x85, 0x88, 0x43,
+ // Bytes bc0 - bff
+ 0xE6, 0x85, 0x8C, 0x43, 0xE6, 0x85, 0x8E, 0x43,
+ 0xE6, 0x85, 0xA0, 0x43, 0xE6, 0x85, 0xA8, 0x43,
+ 0xE6, 0x85, 0xBA, 0x43, 0xE6, 0x86, 0x8E, 0x43,
+ 0xE6, 0x86, 0x90, 0x43, 0xE6, 0x86, 0xA4, 0x43,
+ 0xE6, 0x86, 0xAF, 0x43, 0xE6, 0x86, 0xB2, 0x43,
+ 0xE6, 0x87, 0x9E, 0x43, 0xE6, 0x87, 0xB2, 0x43,
+ 0xE6, 0x87, 0xB6, 0x43, 0xE6, 0x88, 0x80, 0x43,
+ 0xE6, 0x88, 0x88, 0x43, 0xE6, 0x88, 0x90, 0x43,
+ // Bytes c00 - c3f
+ 0xE6, 0x88, 0x9B, 0x43, 0xE6, 0x88, 0xAE, 0x43,
+ 0xE6, 0x88, 0xB4, 0x43, 0xE6, 0x88, 0xB6, 0x43,
+ 0xE6, 0x89, 0x8B, 0x43, 0xE6, 0x89, 0x93, 0x43,
+ 0xE6, 0x89, 0x9D, 0x43, 0xE6, 0x8A, 0x95, 0x43,
+ 0xE6, 0x8A, 0xB1, 0x43, 0xE6, 0x8B, 0x89, 0x43,
+ 0xE6, 0x8B, 0x8F, 0x43, 0xE6, 0x8B, 0x93, 0x43,
+ 0xE6, 0x8B, 0x94, 0x43, 0xE6, 0x8B, 0xBC, 0x43,
+ 0xE6, 0x8B, 0xBE, 0x43, 0xE6, 0x8C, 0x87, 0x43,
+ // Bytes c40 - c7f
+ 0xE6, 0x8C, 0xBD, 0x43, 0xE6, 0x8D, 0x90, 0x43,
+ 0xE6, 0x8D, 0x95, 0x43, 0xE6, 0x8D, 0xA8, 0x43,
+ 0xE6, 0x8D, 0xBB, 0x43, 0xE6, 0x8E, 0x83, 0x43,
+ 0xE6, 0x8E, 0xA0, 0x43, 0xE6, 0x8E, 0xA9, 0x43,
+ 0xE6, 0x8F, 0x84, 0x43, 0xE6, 0x8F, 0x85, 0x43,
+ 0xE6, 0x8F, 0xA4, 0x43, 0xE6, 0x90, 0x9C, 0x43,
+ 0xE6, 0x90, 0xA2, 0x43, 0xE6, 0x91, 0x92, 0x43,
+ 0xE6, 0x91, 0xA9, 0x43, 0xE6, 0x91, 0xB7, 0x43,
+ // Bytes c80 - cbf
+ 0xE6, 0x91, 0xBE, 0x43, 0xE6, 0x92, 0x9A, 0x43,
+ 0xE6, 0x92, 0x9D, 0x43, 0xE6, 0x93, 0x84, 0x43,
+ 0xE6, 0x94, 0xAF, 0x43, 0xE6, 0x94, 0xB4, 0x43,
+ 0xE6, 0x95, 0x8F, 0x43, 0xE6, 0x95, 0x96, 0x43,
+ 0xE6, 0x95, 0xAC, 0x43, 0xE6, 0x95, 0xB8, 0x43,
+ 0xE6, 0x96, 0x87, 0x43, 0xE6, 0x96, 0x97, 0x43,
+ 0xE6, 0x96, 0x99, 0x43, 0xE6, 0x96, 0xA4, 0x43,
+ 0xE6, 0x96, 0xB0, 0x43, 0xE6, 0x96, 0xB9, 0x43,
+ // Bytes cc0 - cff
+ 0xE6, 0x97, 0x85, 0x43, 0xE6, 0x97, 0xA0, 0x43,
+ 0xE6, 0x97, 0xA2, 0x43, 0xE6, 0x97, 0xA3, 0x43,
+ 0xE6, 0x97, 0xA5, 0x43, 0xE6, 0x98, 0x93, 0x43,
+ 0xE6, 0x98, 0xA0, 0x43, 0xE6, 0x99, 0x89, 0x43,
+ 0xE6, 0x99, 0xB4, 0x43, 0xE6, 0x9A, 0x88, 0x43,
+ 0xE6, 0x9A, 0x91, 0x43, 0xE6, 0x9A, 0x9C, 0x43,
+ 0xE6, 0x9A, 0xB4, 0x43, 0xE6, 0x9B, 0x86, 0x43,
+ 0xE6, 0x9B, 0xB0, 0x43, 0xE6, 0x9B, 0xB4, 0x43,
+ // Bytes d00 - d3f
+ 0xE6, 0x9B, 0xB8, 0x43, 0xE6, 0x9C, 0x80, 0x43,
+ 0xE6, 0x9C, 0x88, 0x43, 0xE6, 0x9C, 0x89, 0x43,
+ 0xE6, 0x9C, 0x97, 0x43, 0xE6, 0x9C, 0x9B, 0x43,
+ 0xE6, 0x9C, 0xA1, 0x43, 0xE6, 0x9C, 0xA8, 0x43,
+ 0xE6, 0x9D, 0x8E, 0x43, 0xE6, 0x9D, 0x93, 0x43,
+ 0xE6, 0x9D, 0x96, 0x43, 0xE6, 0x9D, 0x9E, 0x43,
+ 0xE6, 0x9D, 0xBB, 0x43, 0xE6, 0x9E, 0x85, 0x43,
+ 0xE6, 0x9E, 0x97, 0x43, 0xE6, 0x9F, 0xB3, 0x43,
+ // Bytes d40 - d7f
+ 0xE6, 0x9F, 0xBA, 0x43, 0xE6, 0xA0, 0x97, 0x43,
+ 0xE6, 0xA0, 0x9F, 0x43, 0xE6, 0xA0, 0xAA, 0x43,
+ 0xE6, 0xA1, 0x92, 0x43, 0xE6, 0xA2, 0x81, 0x43,
+ 0xE6, 0xA2, 0x85, 0x43, 0xE6, 0xA2, 0x8E, 0x43,
+ 0xE6, 0xA2, 0xA8, 0x43, 0xE6, 0xA4, 0x94, 0x43,
+ 0xE6, 0xA5, 0x82, 0x43, 0xE6, 0xA6, 0xA3, 0x43,
+ 0xE6, 0xA7, 0xAA, 0x43, 0xE6, 0xA8, 0x82, 0x43,
+ 0xE6, 0xA8, 0x93, 0x43, 0xE6, 0xAA, 0xA8, 0x43,
+ // Bytes d80 - dbf
+ 0xE6, 0xAB, 0x93, 0x43, 0xE6, 0xAB, 0x9B, 0x43,
+ 0xE6, 0xAC, 0x84, 0x43, 0xE6, 0xAC, 0xA0, 0x43,
+ 0xE6, 0xAC, 0xA1, 0x43, 0xE6, 0xAD, 0x94, 0x43,
+ 0xE6, 0xAD, 0xA2, 0x43, 0xE6, 0xAD, 0xA3, 0x43,
+ 0xE6, 0xAD, 0xB2, 0x43, 0xE6, 0xAD, 0xB7, 0x43,
+ 0xE6, 0xAD, 0xB9, 0x43, 0xE6, 0xAE, 0x9F, 0x43,
+ 0xE6, 0xAE, 0xAE, 0x43, 0xE6, 0xAE, 0xB3, 0x43,
+ 0xE6, 0xAE, 0xBA, 0x43, 0xE6, 0xAE, 0xBB, 0x43,
+ // Bytes dc0 - dff
+ 0xE6, 0xAF, 0x8B, 0x43, 0xE6, 0xAF, 0x8D, 0x43,
+ 0xE6, 0xAF, 0x94, 0x43, 0xE6, 0xAF, 0x9B, 0x43,
+ 0xE6, 0xB0, 0x8F, 0x43, 0xE6, 0xB0, 0x94, 0x43,
+ 0xE6, 0xB0, 0xB4, 0x43, 0xE6, 0xB1, 0x8E, 0x43,
+ 0xE6, 0xB1, 0xA7, 0x43, 0xE6, 0xB2, 0x88, 0x43,
+ 0xE6, 0xB2, 0xBF, 0x43, 0xE6, 0xB3, 0x8C, 0x43,
+ 0xE6, 0xB3, 0x8D, 0x43, 0xE6, 0xB3, 0xA5, 0x43,
+ 0xE6, 0xB3, 0xA8, 0x43, 0xE6, 0xB4, 0x96, 0x43,
+ // Bytes e00 - e3f
+ 0xE6, 0xB4, 0x9B, 0x43, 0xE6, 0xB4, 0x9E, 0x43,
+ 0xE6, 0xB4, 0xB4, 0x43, 0xE6, 0xB4, 0xBE, 0x43,
+ 0xE6, 0xB5, 0x81, 0x43, 0xE6, 0xB5, 0xA9, 0x43,
+ 0xE6, 0xB5, 0xAA, 0x43, 0xE6, 0xB5, 0xB7, 0x43,
+ 0xE6, 0xB5, 0xB8, 0x43, 0xE6, 0xB6, 0x85, 0x43,
+ 0xE6, 0xB7, 0x8B, 0x43, 0xE6, 0xB7, 0x9A, 0x43,
+ 0xE6, 0xB7, 0xAA, 0x43, 0xE6, 0xB7, 0xB9, 0x43,
+ 0xE6, 0xB8, 0x9A, 0x43, 0xE6, 0xB8, 0xAF, 0x43,
+ // Bytes e40 - e7f
+ 0xE6, 0xB9, 0xAE, 0x43, 0xE6, 0xBA, 0x80, 0x43,
+ 0xE6, 0xBA, 0x9C, 0x43, 0xE6, 0xBA, 0xBA, 0x43,
+ 0xE6, 0xBB, 0x87, 0x43, 0xE6, 0xBB, 0x8B, 0x43,
+ 0xE6, 0xBB, 0x91, 0x43, 0xE6, 0xBB, 0x9B, 0x43,
+ 0xE6, 0xBC, 0x8F, 0x43, 0xE6, 0xBC, 0x94, 0x43,
+ 0xE6, 0xBC, 0xA2, 0x43, 0xE6, 0xBC, 0xA3, 0x43,
+ 0xE6, 0xBD, 0xAE, 0x43, 0xE6, 0xBF, 0x86, 0x43,
+ 0xE6, 0xBF, 0xAB, 0x43, 0xE6, 0xBF, 0xBE, 0x43,
+ // Bytes e80 - ebf
+ 0xE7, 0x80, 0x9B, 0x43, 0xE7, 0x80, 0x9E, 0x43,
+ 0xE7, 0x80, 0xB9, 0x43, 0xE7, 0x81, 0x8A, 0x43,
+ 0xE7, 0x81, 0xAB, 0x43, 0xE7, 0x81, 0xB0, 0x43,
+ 0xE7, 0x81, 0xB7, 0x43, 0xE7, 0x81, 0xBD, 0x43,
+ 0xE7, 0x82, 0x99, 0x43, 0xE7, 0x82, 0xAD, 0x43,
+ 0xE7, 0x83, 0x88, 0x43, 0xE7, 0x83, 0x99, 0x43,
+ 0xE7, 0x84, 0xA1, 0x43, 0xE7, 0x85, 0x85, 0x43,
+ 0xE7, 0x85, 0x89, 0x43, 0xE7, 0x85, 0xAE, 0x43,
+ // Bytes ec0 - eff
+ 0xE7, 0x86, 0x9C, 0x43, 0xE7, 0x87, 0x8E, 0x43,
+ 0xE7, 0x87, 0x90, 0x43, 0xE7, 0x88, 0x90, 0x43,
+ 0xE7, 0x88, 0x9B, 0x43, 0xE7, 0x88, 0xA8, 0x43,
+ 0xE7, 0x88, 0xAA, 0x43, 0xE7, 0x88, 0xAB, 0x43,
+ 0xE7, 0x88, 0xB5, 0x43, 0xE7, 0x88, 0xB6, 0x43,
+ 0xE7, 0x88, 0xBB, 0x43, 0xE7, 0x88, 0xBF, 0x43,
+ 0xE7, 0x89, 0x87, 0x43, 0xE7, 0x89, 0x90, 0x43,
+ 0xE7, 0x89, 0x99, 0x43, 0xE7, 0x89, 0x9B, 0x43,
+ // Bytes f00 - f3f
+ 0xE7, 0x89, 0xA2, 0x43, 0xE7, 0x89, 0xB9, 0x43,
+ 0xE7, 0x8A, 0x80, 0x43, 0xE7, 0x8A, 0x95, 0x43,
+ 0xE7, 0x8A, 0xAC, 0x43, 0xE7, 0x8A, 0xAF, 0x43,
+ 0xE7, 0x8B, 0x80, 0x43, 0xE7, 0x8B, 0xBC, 0x43,
+ 0xE7, 0x8C, 0xAA, 0x43, 0xE7, 0x8D, 0xB5, 0x43,
+ 0xE7, 0x8D, 0xBA, 0x43, 0xE7, 0x8E, 0x84, 0x43,
+ 0xE7, 0x8E, 0x87, 0x43, 0xE7, 0x8E, 0x89, 0x43,
+ 0xE7, 0x8E, 0x8B, 0x43, 0xE7, 0x8E, 0xA5, 0x43,
+ // Bytes f40 - f7f
+ 0xE7, 0x8E, 0xB2, 0x43, 0xE7, 0x8F, 0x9E, 0x43,
+ 0xE7, 0x90, 0x86, 0x43, 0xE7, 0x90, 0x89, 0x43,
+ 0xE7, 0x90, 0xA2, 0x43, 0xE7, 0x91, 0x87, 0x43,
+ 0xE7, 0x91, 0x9C, 0x43, 0xE7, 0x91, 0xA9, 0x43,
+ 0xE7, 0x91, 0xB1, 0x43, 0xE7, 0x92, 0x85, 0x43,
+ 0xE7, 0x92, 0x89, 0x43, 0xE7, 0x92, 0x98, 0x43,
+ 0xE7, 0x93, 0x8A, 0x43, 0xE7, 0x93, 0x9C, 0x43,
+ 0xE7, 0x93, 0xA6, 0x43, 0xE7, 0x94, 0x86, 0x43,
+ // Bytes f80 - fbf
+ 0xE7, 0x94, 0x98, 0x43, 0xE7, 0x94, 0x9F, 0x43,
+ 0xE7, 0x94, 0xA4, 0x43, 0xE7, 0x94, 0xA8, 0x43,
+ 0xE7, 0x94, 0xB0, 0x43, 0xE7, 0x94, 0xB2, 0x43,
+ 0xE7, 0x94, 0xB3, 0x43, 0xE7, 0x94, 0xB7, 0x43,
+ 0xE7, 0x94, 0xBB, 0x43, 0xE7, 0x94, 0xBE, 0x43,
+ 0xE7, 0x95, 0x99, 0x43, 0xE7, 0x95, 0xA5, 0x43,
+ 0xE7, 0x95, 0xB0, 0x43, 0xE7, 0x96, 0x8B, 0x43,
+ 0xE7, 0x96, 0x92, 0x43, 0xE7, 0x97, 0xA2, 0x43,
+ // Bytes fc0 - fff
+ 0xE7, 0x98, 0x90, 0x43, 0xE7, 0x98, 0x9D, 0x43,
+ 0xE7, 0x98, 0x9F, 0x43, 0xE7, 0x99, 0x82, 0x43,
+ 0xE7, 0x99, 0xA9, 0x43, 0xE7, 0x99, 0xB6, 0x43,
+ 0xE7, 0x99, 0xBD, 0x43, 0xE7, 0x9A, 0xAE, 0x43,
+ 0xE7, 0x9A, 0xBF, 0x43, 0xE7, 0x9B, 0x8A, 0x43,
+ 0xE7, 0x9B, 0x9B, 0x43, 0xE7, 0x9B, 0xA3, 0x43,
+ 0xE7, 0x9B, 0xA7, 0x43, 0xE7, 0x9B, 0xAE, 0x43,
+ 0xE7, 0x9B, 0xB4, 0x43, 0xE7, 0x9C, 0x81, 0x43,
+ // Bytes 1000 - 103f
+ 0xE7, 0x9C, 0x9E, 0x43, 0xE7, 0x9C, 0x9F, 0x43,
+ 0xE7, 0x9D, 0x80, 0x43, 0xE7, 0x9D, 0x8A, 0x43,
+ 0xE7, 0x9E, 0x8B, 0x43, 0xE7, 0x9E, 0xA7, 0x43,
+ 0xE7, 0x9F, 0x9B, 0x43, 0xE7, 0x9F, 0xA2, 0x43,
+ 0xE7, 0x9F, 0xB3, 0x43, 0xE7, 0xA1, 0x8E, 0x43,
+ 0xE7, 0xA1, 0xAB, 0x43, 0xE7, 0xA2, 0x8C, 0x43,
+ 0xE7, 0xA2, 0x91, 0x43, 0xE7, 0xA3, 0x8A, 0x43,
+ 0xE7, 0xA3, 0x8C, 0x43, 0xE7, 0xA3, 0xBB, 0x43,
+ // Bytes 1040 - 107f
+ 0xE7, 0xA4, 0xAA, 0x43, 0xE7, 0xA4, 0xBA, 0x43,
+ 0xE7, 0xA4, 0xBC, 0x43, 0xE7, 0xA4, 0xBE, 0x43,
+ 0xE7, 0xA5, 0x88, 0x43, 0xE7, 0xA5, 0x89, 0x43,
+ 0xE7, 0xA5, 0x90, 0x43, 0xE7, 0xA5, 0x96, 0x43,
+ 0xE7, 0xA5, 0x9D, 0x43, 0xE7, 0xA5, 0x9E, 0x43,
+ 0xE7, 0xA5, 0xA5, 0x43, 0xE7, 0xA5, 0xBF, 0x43,
+ 0xE7, 0xA6, 0x81, 0x43, 0xE7, 0xA6, 0x8D, 0x43,
+ 0xE7, 0xA6, 0x8E, 0x43, 0xE7, 0xA6, 0x8F, 0x43,
+ // Bytes 1080 - 10bf
+ 0xE7, 0xA6, 0xAE, 0x43, 0xE7, 0xA6, 0xB8, 0x43,
+ 0xE7, 0xA6, 0xBE, 0x43, 0xE7, 0xA7, 0x8A, 0x43,
+ 0xE7, 0xA7, 0x98, 0x43, 0xE7, 0xA7, 0xAB, 0x43,
+ 0xE7, 0xA8, 0x9C, 0x43, 0xE7, 0xA9, 0x80, 0x43,
+ 0xE7, 0xA9, 0x8A, 0x43, 0xE7, 0xA9, 0x8F, 0x43,
+ 0xE7, 0xA9, 0xB4, 0x43, 0xE7, 0xA9, 0xBA, 0x43,
+ 0xE7, 0xAA, 0x81, 0x43, 0xE7, 0xAA, 0xB1, 0x43,
+ 0xE7, 0xAB, 0x8B, 0x43, 0xE7, 0xAB, 0xAE, 0x43,
+ // Bytes 10c0 - 10ff
+ 0xE7, 0xAB, 0xB9, 0x43, 0xE7, 0xAC, 0xA0, 0x43,
+ 0xE7, 0xAE, 0x8F, 0x43, 0xE7, 0xAF, 0x80, 0x43,
+ 0xE7, 0xAF, 0x86, 0x43, 0xE7, 0xAF, 0x89, 0x43,
+ 0xE7, 0xB0, 0xBE, 0x43, 0xE7, 0xB1, 0xA0, 0x43,
+ 0xE7, 0xB1, 0xB3, 0x43, 0xE7, 0xB1, 0xBB, 0x43,
+ 0xE7, 0xB2, 0x92, 0x43, 0xE7, 0xB2, 0xBE, 0x43,
+ 0xE7, 0xB3, 0x92, 0x43, 0xE7, 0xB3, 0x96, 0x43,
+ 0xE7, 0xB3, 0xA3, 0x43, 0xE7, 0xB3, 0xA7, 0x43,
+ // Bytes 1100 - 113f
+ 0xE7, 0xB3, 0xA8, 0x43, 0xE7, 0xB3, 0xB8, 0x43,
+ 0xE7, 0xB4, 0x80, 0x43, 0xE7, 0xB4, 0x90, 0x43,
+ 0xE7, 0xB4, 0xA2, 0x43, 0xE7, 0xB4, 0xAF, 0x43,
+ 0xE7, 0xB5, 0x82, 0x43, 0xE7, 0xB5, 0x9B, 0x43,
+ 0xE7, 0xB5, 0xA3, 0x43, 0xE7, 0xB6, 0xA0, 0x43,
+ 0xE7, 0xB6, 0xBE, 0x43, 0xE7, 0xB7, 0x87, 0x43,
+ 0xE7, 0xB7, 0xB4, 0x43, 0xE7, 0xB8, 0x82, 0x43,
+ 0xE7, 0xB8, 0x89, 0x43, 0xE7, 0xB8, 0xB7, 0x43,
+ // Bytes 1140 - 117f
+ 0xE7, 0xB9, 0x81, 0x43, 0xE7, 0xB9, 0x85, 0x43,
+ 0xE7, 0xBC, 0xB6, 0x43, 0xE7, 0xBC, 0xBE, 0x43,
+ 0xE7, 0xBD, 0x91, 0x43, 0xE7, 0xBD, 0xB2, 0x43,
+ 0xE7, 0xBD, 0xB9, 0x43, 0xE7, 0xBD, 0xBA, 0x43,
+ 0xE7, 0xBE, 0x85, 0x43, 0xE7, 0xBE, 0x8A, 0x43,
+ 0xE7, 0xBE, 0x95, 0x43, 0xE7, 0xBE, 0x9A, 0x43,
+ 0xE7, 0xBE, 0xBD, 0x43, 0xE7, 0xBF, 0xBA, 0x43,
+ 0xE8, 0x80, 0x81, 0x43, 0xE8, 0x80, 0x85, 0x43,
+ // Bytes 1180 - 11bf
+ 0xE8, 0x80, 0x8C, 0x43, 0xE8, 0x80, 0x92, 0x43,
+ 0xE8, 0x80, 0xB3, 0x43, 0xE8, 0x81, 0x86, 0x43,
+ 0xE8, 0x81, 0xA0, 0x43, 0xE8, 0x81, 0xAF, 0x43,
+ 0xE8, 0x81, 0xB0, 0x43, 0xE8, 0x81, 0xBE, 0x43,
+ 0xE8, 0x81, 0xBF, 0x43, 0xE8, 0x82, 0x89, 0x43,
+ 0xE8, 0x82, 0x8B, 0x43, 0xE8, 0x82, 0xAD, 0x43,
+ 0xE8, 0x82, 0xB2, 0x43, 0xE8, 0x84, 0x83, 0x43,
+ 0xE8, 0x84, 0xBE, 0x43, 0xE8, 0x87, 0x98, 0x43,
+ // Bytes 11c0 - 11ff
+ 0xE8, 0x87, 0xA3, 0x43, 0xE8, 0x87, 0xA8, 0x43,
+ 0xE8, 0x87, 0xAA, 0x43, 0xE8, 0x87, 0xAD, 0x43,
+ 0xE8, 0x87, 0xB3, 0x43, 0xE8, 0x87, 0xBC, 0x43,
+ 0xE8, 0x88, 0x81, 0x43, 0xE8, 0x88, 0x84, 0x43,
+ 0xE8, 0x88, 0x8C, 0x43, 0xE8, 0x88, 0x98, 0x43,
+ 0xE8, 0x88, 0x9B, 0x43, 0xE8, 0x88, 0x9F, 0x43,
+ 0xE8, 0x89, 0xAE, 0x43, 0xE8, 0x89, 0xAF, 0x43,
+ 0xE8, 0x89, 0xB2, 0x43, 0xE8, 0x89, 0xB8, 0x43,
+ // Bytes 1200 - 123f
+ 0xE8, 0x89, 0xB9, 0x43, 0xE8, 0x8A, 0x8B, 0x43,
+ 0xE8, 0x8A, 0x91, 0x43, 0xE8, 0x8A, 0x9D, 0x43,
+ 0xE8, 0x8A, 0xB1, 0x43, 0xE8, 0x8A, 0xB3, 0x43,
+ 0xE8, 0x8A, 0xBD, 0x43, 0xE8, 0x8B, 0xA5, 0x43,
+ 0xE8, 0x8B, 0xA6, 0x43, 0xE8, 0x8C, 0x9D, 0x43,
+ 0xE8, 0x8C, 0xA3, 0x43, 0xE8, 0x8C, 0xB6, 0x43,
+ 0xE8, 0x8D, 0x92, 0x43, 0xE8, 0x8D, 0x93, 0x43,
+ 0xE8, 0x8D, 0xA3, 0x43, 0xE8, 0x8E, 0xAD, 0x43,
+ // Bytes 1240 - 127f
+ 0xE8, 0x8E, 0xBD, 0x43, 0xE8, 0x8F, 0x89, 0x43,
+ 0xE8, 0x8F, 0x8A, 0x43, 0xE8, 0x8F, 0x8C, 0x43,
+ 0xE8, 0x8F, 0x9C, 0x43, 0xE8, 0x8F, 0xA7, 0x43,
+ 0xE8, 0x8F, 0xAF, 0x43, 0xE8, 0x8F, 0xB1, 0x43,
+ 0xE8, 0x90, 0xBD, 0x43, 0xE8, 0x91, 0x89, 0x43,
+ 0xE8, 0x91, 0x97, 0x43, 0xE8, 0x93, 0xAE, 0x43,
+ 0xE8, 0x93, 0xB1, 0x43, 0xE8, 0x93, 0xB3, 0x43,
+ 0xE8, 0x93, 0xBC, 0x43, 0xE8, 0x94, 0x96, 0x43,
+ // Bytes 1280 - 12bf
+ 0xE8, 0x95, 0xA4, 0x43, 0xE8, 0x97, 0x8D, 0x43,
+ 0xE8, 0x97, 0xBA, 0x43, 0xE8, 0x98, 0x86, 0x43,
+ 0xE8, 0x98, 0x92, 0x43, 0xE8, 0x98, 0xAD, 0x43,
+ 0xE8, 0x98, 0xBF, 0x43, 0xE8, 0x99, 0x8D, 0x43,
+ 0xE8, 0x99, 0x90, 0x43, 0xE8, 0x99, 0x9C, 0x43,
+ 0xE8, 0x99, 0xA7, 0x43, 0xE8, 0x99, 0xA9, 0x43,
+ 0xE8, 0x99, 0xAB, 0x43, 0xE8, 0x9A, 0x88, 0x43,
+ 0xE8, 0x9A, 0xA9, 0x43, 0xE8, 0x9B, 0xA2, 0x43,
+ // Bytes 12c0 - 12ff
+ 0xE8, 0x9C, 0x8E, 0x43, 0xE8, 0x9C, 0xA8, 0x43,
+ 0xE8, 0x9D, 0xAB, 0x43, 0xE8, 0x9D, 0xB9, 0x43,
+ 0xE8, 0x9E, 0x86, 0x43, 0xE8, 0x9E, 0xBA, 0x43,
+ 0xE8, 0x9F, 0xA1, 0x43, 0xE8, 0xA0, 0x81, 0x43,
+ 0xE8, 0xA0, 0x9F, 0x43, 0xE8, 0xA1, 0x80, 0x43,
+ 0xE8, 0xA1, 0x8C, 0x43, 0xE8, 0xA1, 0xA0, 0x43,
+ 0xE8, 0xA1, 0xA3, 0x43, 0xE8, 0xA3, 0x82, 0x43,
+ 0xE8, 0xA3, 0x8F, 0x43, 0xE8, 0xA3, 0x97, 0x43,
+ // Bytes 1300 - 133f
+ 0xE8, 0xA3, 0x9E, 0x43, 0xE8, 0xA3, 0xA1, 0x43,
+ 0xE8, 0xA3, 0xB8, 0x43, 0xE8, 0xA3, 0xBA, 0x43,
+ 0xE8, 0xA4, 0x90, 0x43, 0xE8, 0xA5, 0x81, 0x43,
+ 0xE8, 0xA5, 0xA4, 0x43, 0xE8, 0xA5, 0xBE, 0x43,
+ 0xE8, 0xA6, 0x86, 0x43, 0xE8, 0xA6, 0x8B, 0x43,
+ 0xE8, 0xA6, 0x96, 0x43, 0xE8, 0xA7, 0x92, 0x43,
+ 0xE8, 0xA7, 0xA3, 0x43, 0xE8, 0xA8, 0x80, 0x43,
+ 0xE8, 0xAA, 0xA0, 0x43, 0xE8, 0xAA, 0xAA, 0x43,
+ // Bytes 1340 - 137f
+ 0xE8, 0xAA, 0xBF, 0x43, 0xE8, 0xAB, 0x8B, 0x43,
+ 0xE8, 0xAB, 0x92, 0x43, 0xE8, 0xAB, 0x96, 0x43,
+ 0xE8, 0xAB, 0xAD, 0x43, 0xE8, 0xAB, 0xB8, 0x43,
+ 0xE8, 0xAB, 0xBE, 0x43, 0xE8, 0xAC, 0x81, 0x43,
+ 0xE8, 0xAC, 0xB9, 0x43, 0xE8, 0xAD, 0x98, 0x43,
+ 0xE8, 0xAE, 0x80, 0x43, 0xE8, 0xAE, 0x8A, 0x43,
+ 0xE8, 0xB0, 0xB7, 0x43, 0xE8, 0xB1, 0x86, 0x43,
+ 0xE8, 0xB1, 0x88, 0x43, 0xE8, 0xB1, 0x95, 0x43,
+ // Bytes 1380 - 13bf
+ 0xE8, 0xB1, 0xB8, 0x43, 0xE8, 0xB2, 0x9D, 0x43,
+ 0xE8, 0xB2, 0xA1, 0x43, 0xE8, 0xB2, 0xA9, 0x43,
+ 0xE8, 0xB2, 0xAB, 0x43, 0xE8, 0xB3, 0x81, 0x43,
+ 0xE8, 0xB3, 0x82, 0x43, 0xE8, 0xB3, 0x87, 0x43,
+ 0xE8, 0xB3, 0x88, 0x43, 0xE8, 0xB3, 0x93, 0x43,
+ 0xE8, 0xB4, 0x88, 0x43, 0xE8, 0xB4, 0x9B, 0x43,
+ 0xE8, 0xB5, 0xA4, 0x43, 0xE8, 0xB5, 0xB0, 0x43,
+ 0xE8, 0xB5, 0xB7, 0x43, 0xE8, 0xB6, 0xB3, 0x43,
+ // Bytes 13c0 - 13ff
+ 0xE8, 0xB6, 0xBC, 0x43, 0xE8, 0xB7, 0x8B, 0x43,
+ 0xE8, 0xB7, 0xAF, 0x43, 0xE8, 0xB7, 0xB0, 0x43,
+ 0xE8, 0xBA, 0xAB, 0x43, 0xE8, 0xBB, 0x8A, 0x43,
+ 0xE8, 0xBB, 0x94, 0x43, 0xE8, 0xBC, 0xA6, 0x43,
+ 0xE8, 0xBC, 0xAA, 0x43, 0xE8, 0xBC, 0xB8, 0x43,
+ 0xE8, 0xBC, 0xBB, 0x43, 0xE8, 0xBD, 0xA2, 0x43,
+ 0xE8, 0xBE, 0x9B, 0x43, 0xE8, 0xBE, 0x9E, 0x43,
+ 0xE8, 0xBE, 0xB0, 0x43, 0xE8, 0xBE, 0xB5, 0x43,
+ // Bytes 1400 - 143f
+ 0xE8, 0xBE, 0xB6, 0x43, 0xE9, 0x80, 0xA3, 0x43,
+ 0xE9, 0x80, 0xB8, 0x43, 0xE9, 0x81, 0x8A, 0x43,
+ 0xE9, 0x81, 0xA9, 0x43, 0xE9, 0x81, 0xB2, 0x43,
+ 0xE9, 0x81, 0xBC, 0x43, 0xE9, 0x82, 0x8F, 0x43,
+ 0xE9, 0x82, 0x91, 0x43, 0xE9, 0x82, 0x94, 0x43,
+ 0xE9, 0x83, 0x8E, 0x43, 0xE9, 0x83, 0x9E, 0x43,
+ 0xE9, 0x83, 0xB1, 0x43, 0xE9, 0x83, 0xBD, 0x43,
+ 0xE9, 0x84, 0x91, 0x43, 0xE9, 0x84, 0x9B, 0x43,
+ // Bytes 1440 - 147f
+ 0xE9, 0x85, 0x89, 0x43, 0xE9, 0x85, 0x8D, 0x43,
+ 0xE9, 0x85, 0xAA, 0x43, 0xE9, 0x86, 0x99, 0x43,
+ 0xE9, 0x86, 0xB4, 0x43, 0xE9, 0x87, 0x86, 0x43,
+ 0xE9, 0x87, 0x8C, 0x43, 0xE9, 0x87, 0x8F, 0x43,
+ 0xE9, 0x87, 0x91, 0x43, 0xE9, 0x88, 0xB4, 0x43,
+ 0xE9, 0x88, 0xB8, 0x43, 0xE9, 0x89, 0xB6, 0x43,
+ 0xE9, 0x89, 0xBC, 0x43, 0xE9, 0x8B, 0x97, 0x43,
+ 0xE9, 0x8B, 0x98, 0x43, 0xE9, 0x8C, 0x84, 0x43,
+ // Bytes 1480 - 14bf
+ 0xE9, 0x8D, 0x8A, 0x43, 0xE9, 0x8F, 0xB9, 0x43,
+ 0xE9, 0x90, 0x95, 0x43, 0xE9, 0x95, 0xB7, 0x43,
+ 0xE9, 0x96, 0x80, 0x43, 0xE9, 0x96, 0x8B, 0x43,
+ 0xE9, 0x96, 0xAD, 0x43, 0xE9, 0x96, 0xB7, 0x43,
+ 0xE9, 0x98, 0x9C, 0x43, 0xE9, 0x98, 0xAE, 0x43,
+ 0xE9, 0x99, 0x8B, 0x43, 0xE9, 0x99, 0x8D, 0x43,
+ 0xE9, 0x99, 0xB5, 0x43, 0xE9, 0x99, 0xB8, 0x43,
+ 0xE9, 0x99, 0xBC, 0x43, 0xE9, 0x9A, 0x86, 0x43,
+ // Bytes 14c0 - 14ff
+ 0xE9, 0x9A, 0xA3, 0x43, 0xE9, 0x9A, 0xB6, 0x43,
+ 0xE9, 0x9A, 0xB7, 0x43, 0xE9, 0x9A, 0xB8, 0x43,
+ 0xE9, 0x9A, 0xB9, 0x43, 0xE9, 0x9B, 0x83, 0x43,
+ 0xE9, 0x9B, 0xA2, 0x43, 0xE9, 0x9B, 0xA3, 0x43,
+ 0xE9, 0x9B, 0xA8, 0x43, 0xE9, 0x9B, 0xB6, 0x43,
+ 0xE9, 0x9B, 0xB7, 0x43, 0xE9, 0x9C, 0xA3, 0x43,
+ 0xE9, 0x9C, 0xB2, 0x43, 0xE9, 0x9D, 0x88, 0x43,
+ 0xE9, 0x9D, 0x91, 0x43, 0xE9, 0x9D, 0x96, 0x43,
+ // Bytes 1500 - 153f
+ 0xE9, 0x9D, 0x9E, 0x43, 0xE9, 0x9D, 0xA2, 0x43,
+ 0xE9, 0x9D, 0xA9, 0x43, 0xE9, 0x9F, 0x8B, 0x43,
+ 0xE9, 0x9F, 0x9B, 0x43, 0xE9, 0x9F, 0xA0, 0x43,
+ 0xE9, 0x9F, 0xAD, 0x43, 0xE9, 0x9F, 0xB3, 0x43,
+ 0xE9, 0x9F, 0xBF, 0x43, 0xE9, 0xA0, 0x81, 0x43,
+ 0xE9, 0xA0, 0x85, 0x43, 0xE9, 0xA0, 0x8B, 0x43,
+ 0xE9, 0xA0, 0x98, 0x43, 0xE9, 0xA0, 0xA9, 0x43,
+ 0xE9, 0xA0, 0xBB, 0x43, 0xE9, 0xA1, 0x9E, 0x43,
+ // Bytes 1540 - 157f
+ 0xE9, 0xA2, 0xA8, 0x43, 0xE9, 0xA3, 0x9B, 0x43,
+ 0xE9, 0xA3, 0x9F, 0x43, 0xE9, 0xA3, 0xA2, 0x43,
+ 0xE9, 0xA3, 0xAF, 0x43, 0xE9, 0xA3, 0xBC, 0x43,
+ 0xE9, 0xA4, 0xA8, 0x43, 0xE9, 0xA4, 0xA9, 0x43,
+ 0xE9, 0xA6, 0x96, 0x43, 0xE9, 0xA6, 0x99, 0x43,
+ 0xE9, 0xA6, 0xA7, 0x43, 0xE9, 0xA6, 0xAC, 0x43,
+ 0xE9, 0xA7, 0x82, 0x43, 0xE9, 0xA7, 0xB1, 0x43,
+ 0xE9, 0xA7, 0xBE, 0x43, 0xE9, 0xA9, 0xAA, 0x43,
+ // Bytes 1580 - 15bf
+ 0xE9, 0xAA, 0xA8, 0x43, 0xE9, 0xAB, 0x98, 0x43,
+ 0xE9, 0xAB, 0x9F, 0x43, 0xE9, 0xAC, 0x92, 0x43,
+ 0xE9, 0xAC, 0xA5, 0x43, 0xE9, 0xAC, 0xAF, 0x43,
+ 0xE9, 0xAC, 0xB2, 0x43, 0xE9, 0xAC, 0xBC, 0x43,
+ 0xE9, 0xAD, 0x9A, 0x43, 0xE9, 0xAD, 0xAF, 0x43,
+ 0xE9, 0xB1, 0x80, 0x43, 0xE9, 0xB1, 0x97, 0x43,
+ 0xE9, 0xB3, 0xA5, 0x43, 0xE9, 0xB3, 0xBD, 0x43,
+ 0xE9, 0xB5, 0xA7, 0x43, 0xE9, 0xB6, 0xB4, 0x43,
+ // Bytes 15c0 - 15ff
+ 0xE9, 0xB7, 0xBA, 0x43, 0xE9, 0xB8, 0x9E, 0x43,
+ 0xE9, 0xB9, 0xB5, 0x43, 0xE9, 0xB9, 0xBF, 0x43,
+ 0xE9, 0xBA, 0x97, 0x43, 0xE9, 0xBA, 0x9F, 0x43,
+ 0xE9, 0xBA, 0xA5, 0x43, 0xE9, 0xBA, 0xBB, 0x43,
+ 0xE9, 0xBB, 0x83, 0x43, 0xE9, 0xBB, 0x8D, 0x43,
+ 0xE9, 0xBB, 0x8E, 0x43, 0xE9, 0xBB, 0x91, 0x43,
+ 0xE9, 0xBB, 0xB9, 0x43, 0xE9, 0xBB, 0xBD, 0x43,
+ 0xE9, 0xBB, 0xBE, 0x43, 0xE9, 0xBC, 0x85, 0x43,
+ // Bytes 1600 - 163f
+ 0xE9, 0xBC, 0x8E, 0x43, 0xE9, 0xBC, 0x8F, 0x43,
+ 0xE9, 0xBC, 0x93, 0x43, 0xE9, 0xBC, 0x96, 0x43,
+ 0xE9, 0xBC, 0xA0, 0x43, 0xE9, 0xBC, 0xBB, 0x43,
+ 0xE9, 0xBD, 0x83, 0x43, 0xE9, 0xBD, 0x8A, 0x43,
+ 0xE9, 0xBD, 0x92, 0x43, 0xE9, 0xBE, 0x8D, 0x43,
+ 0xE9, 0xBE, 0x8E, 0x43, 0xE9, 0xBE, 0x9C, 0x43,
+ 0xE9, 0xBE, 0x9F, 0x43, 0xE9, 0xBE, 0xA0, 0x43,
+ 0xEA, 0x9C, 0xA7, 0x43, 0xEA, 0x9D, 0xAF, 0x43,
+ // Bytes 1640 - 167f
+ 0xEA, 0xAC, 0xB7, 0x43, 0xEA, 0xAD, 0x92, 0x44,
+ 0xF0, 0xA0, 0x84, 0xA2, 0x44, 0xF0, 0xA0, 0x94,
+ 0x9C, 0x44, 0xF0, 0xA0, 0x94, 0xA5, 0x44, 0xF0,
+ 0xA0, 0x95, 0x8B, 0x44, 0xF0, 0xA0, 0x98, 0xBA,
+ 0x44, 0xF0, 0xA0, 0xA0, 0x84, 0x44, 0xF0, 0xA0,
+ 0xA3, 0x9E, 0x44, 0xF0, 0xA0, 0xA8, 0xAC, 0x44,
+ 0xF0, 0xA0, 0xAD, 0xA3, 0x44, 0xF0, 0xA1, 0x93,
+ 0xA4, 0x44, 0xF0, 0xA1, 0x9A, 0xA8, 0x44, 0xF0,
+ // Bytes 1680 - 16bf
+ 0xA1, 0x9B, 0xAA, 0x44, 0xF0, 0xA1, 0xA7, 0x88,
+ 0x44, 0xF0, 0xA1, 0xAC, 0x98, 0x44, 0xF0, 0xA1,
+ 0xB4, 0x8B, 0x44, 0xF0, 0xA1, 0xB7, 0xA4, 0x44,
+ 0xF0, 0xA1, 0xB7, 0xA6, 0x44, 0xF0, 0xA2, 0x86,
+ 0x83, 0x44, 0xF0, 0xA2, 0x86, 0x9F, 0x44, 0xF0,
+ 0xA2, 0x8C, 0xB1, 0x44, 0xF0, 0xA2, 0x9B, 0x94,
+ 0x44, 0xF0, 0xA2, 0xA1, 0x84, 0x44, 0xF0, 0xA2,
+ 0xA1, 0x8A, 0x44, 0xF0, 0xA2, 0xAC, 0x8C, 0x44,
+ // Bytes 16c0 - 16ff
+ 0xF0, 0xA2, 0xAF, 0xB1, 0x44, 0xF0, 0xA3, 0x80,
+ 0x8A, 0x44, 0xF0, 0xA3, 0x8A, 0xB8, 0x44, 0xF0,
+ 0xA3, 0x8D, 0x9F, 0x44, 0xF0, 0xA3, 0x8E, 0x93,
+ 0x44, 0xF0, 0xA3, 0x8E, 0x9C, 0x44, 0xF0, 0xA3,
+ 0x8F, 0x83, 0x44, 0xF0, 0xA3, 0x8F, 0x95, 0x44,
+ 0xF0, 0xA3, 0x91, 0xAD, 0x44, 0xF0, 0xA3, 0x9A,
+ 0xA3, 0x44, 0xF0, 0xA3, 0xA2, 0xA7, 0x44, 0xF0,
+ 0xA3, 0xAA, 0x8D, 0x44, 0xF0, 0xA3, 0xAB, 0xBA,
+ // Bytes 1700 - 173f
+ 0x44, 0xF0, 0xA3, 0xB2, 0xBC, 0x44, 0xF0, 0xA3,
+ 0xB4, 0x9E, 0x44, 0xF0, 0xA3, 0xBB, 0x91, 0x44,
+ 0xF0, 0xA3, 0xBD, 0x9E, 0x44, 0xF0, 0xA3, 0xBE,
+ 0x8E, 0x44, 0xF0, 0xA4, 0x89, 0xA3, 0x44, 0xF0,
+ 0xA4, 0x8B, 0xAE, 0x44, 0xF0, 0xA4, 0x8E, 0xAB,
+ 0x44, 0xF0, 0xA4, 0x98, 0x88, 0x44, 0xF0, 0xA4,
+ 0x9C, 0xB5, 0x44, 0xF0, 0xA4, 0xA0, 0x94, 0x44,
+ 0xF0, 0xA4, 0xB0, 0xB6, 0x44, 0xF0, 0xA4, 0xB2,
+ // Bytes 1740 - 177f
+ 0x92, 0x44, 0xF0, 0xA4, 0xBE, 0xA1, 0x44, 0xF0,
+ 0xA4, 0xBE, 0xB8, 0x44, 0xF0, 0xA5, 0x81, 0x84,
+ 0x44, 0xF0, 0xA5, 0x83, 0xB2, 0x44, 0xF0, 0xA5,
+ 0x83, 0xB3, 0x44, 0xF0, 0xA5, 0x84, 0x99, 0x44,
+ 0xF0, 0xA5, 0x84, 0xB3, 0x44, 0xF0, 0xA5, 0x89,
+ 0x89, 0x44, 0xF0, 0xA5, 0x90, 0x9D, 0x44, 0xF0,
+ 0xA5, 0x98, 0xA6, 0x44, 0xF0, 0xA5, 0x9A, 0x9A,
+ 0x44, 0xF0, 0xA5, 0x9B, 0x85, 0x44, 0xF0, 0xA5,
+ // Bytes 1780 - 17bf
+ 0xA5, 0xBC, 0x44, 0xF0, 0xA5, 0xAA, 0xA7, 0x44,
+ 0xF0, 0xA5, 0xAE, 0xAB, 0x44, 0xF0, 0xA5, 0xB2,
+ 0x80, 0x44, 0xF0, 0xA5, 0xB3, 0x90, 0x44, 0xF0,
+ 0xA5, 0xBE, 0x86, 0x44, 0xF0, 0xA6, 0x87, 0x9A,
+ 0x44, 0xF0, 0xA6, 0x88, 0xA8, 0x44, 0xF0, 0xA6,
+ 0x89, 0x87, 0x44, 0xF0, 0xA6, 0x8B, 0x99, 0x44,
+ 0xF0, 0xA6, 0x8C, 0xBE, 0x44, 0xF0, 0xA6, 0x93,
+ 0x9A, 0x44, 0xF0, 0xA6, 0x94, 0xA3, 0x44, 0xF0,
+ // Bytes 17c0 - 17ff
+ 0xA6, 0x96, 0xA8, 0x44, 0xF0, 0xA6, 0x9E, 0xA7,
+ 0x44, 0xF0, 0xA6, 0x9E, 0xB5, 0x44, 0xF0, 0xA6,
+ 0xAC, 0xBC, 0x44, 0xF0, 0xA6, 0xB0, 0xB6, 0x44,
+ 0xF0, 0xA6, 0xB3, 0x95, 0x44, 0xF0, 0xA6, 0xB5,
+ 0xAB, 0x44, 0xF0, 0xA6, 0xBC, 0xAC, 0x44, 0xF0,
+ 0xA6, 0xBE, 0xB1, 0x44, 0xF0, 0xA7, 0x83, 0x92,
+ 0x44, 0xF0, 0xA7, 0x8F, 0x8A, 0x44, 0xF0, 0xA7,
+ 0x99, 0xA7, 0x44, 0xF0, 0xA7, 0xA2, 0xAE, 0x44,
+ // Bytes 1800 - 183f
+ 0xF0, 0xA7, 0xA5, 0xA6, 0x44, 0xF0, 0xA7, 0xB2,
+ 0xA8, 0x44, 0xF0, 0xA7, 0xBB, 0x93, 0x44, 0xF0,
+ 0xA7, 0xBC, 0xAF, 0x44, 0xF0, 0xA8, 0x97, 0x92,
+ 0x44, 0xF0, 0xA8, 0x97, 0xAD, 0x44, 0xF0, 0xA8,
+ 0x9C, 0xAE, 0x44, 0xF0, 0xA8, 0xAF, 0xBA, 0x44,
+ 0xF0, 0xA8, 0xB5, 0xB7, 0x44, 0xF0, 0xA9, 0x85,
+ 0x85, 0x44, 0xF0, 0xA9, 0x87, 0x9F, 0x44, 0xF0,
+ 0xA9, 0x88, 0x9A, 0x44, 0xF0, 0xA9, 0x90, 0x8A,
+ // Bytes 1840 - 187f
+ 0x44, 0xF0, 0xA9, 0x92, 0x96, 0x44, 0xF0, 0xA9,
+ 0x96, 0xB6, 0x44, 0xF0, 0xA9, 0xAC, 0xB0, 0x44,
+ 0xF0, 0xAA, 0x83, 0x8E, 0x44, 0xF0, 0xAA, 0x84,
+ 0x85, 0x44, 0xF0, 0xAA, 0x88, 0x8E, 0x44, 0xF0,
+ 0xAA, 0x8A, 0x91, 0x44, 0xF0, 0xAA, 0x8E, 0x92,
+ 0x44, 0xF0, 0xAA, 0x98, 0x80, 0x42, 0x21, 0x21,
+ 0x42, 0x21, 0x3F, 0x42, 0x2E, 0x2E, 0x42, 0x30,
+ 0x2C, 0x42, 0x30, 0x2E, 0x42, 0x31, 0x2C, 0x42,
+ // Bytes 1880 - 18bf
+ 0x31, 0x2E, 0x42, 0x31, 0x30, 0x42, 0x31, 0x31,
+ 0x42, 0x31, 0x32, 0x42, 0x31, 0x33, 0x42, 0x31,
+ 0x34, 0x42, 0x31, 0x35, 0x42, 0x31, 0x36, 0x42,
+ 0x31, 0x37, 0x42, 0x31, 0x38, 0x42, 0x31, 0x39,
+ 0x42, 0x32, 0x2C, 0x42, 0x32, 0x2E, 0x42, 0x32,
+ 0x30, 0x42, 0x32, 0x31, 0x42, 0x32, 0x32, 0x42,
+ 0x32, 0x33, 0x42, 0x32, 0x34, 0x42, 0x32, 0x35,
+ 0x42, 0x32, 0x36, 0x42, 0x32, 0x37, 0x42, 0x32,
+ // Bytes 18c0 - 18ff
+ 0x38, 0x42, 0x32, 0x39, 0x42, 0x33, 0x2C, 0x42,
+ 0x33, 0x2E, 0x42, 0x33, 0x30, 0x42, 0x33, 0x31,
+ 0x42, 0x33, 0x32, 0x42, 0x33, 0x33, 0x42, 0x33,
+ 0x34, 0x42, 0x33, 0x35, 0x42, 0x33, 0x36, 0x42,
+ 0x33, 0x37, 0x42, 0x33, 0x38, 0x42, 0x33, 0x39,
+ 0x42, 0x34, 0x2C, 0x42, 0x34, 0x2E, 0x42, 0x34,
+ 0x30, 0x42, 0x34, 0x31, 0x42, 0x34, 0x32, 0x42,
+ 0x34, 0x33, 0x42, 0x34, 0x34, 0x42, 0x34, 0x35,
+ // Bytes 1900 - 193f
+ 0x42, 0x34, 0x36, 0x42, 0x34, 0x37, 0x42, 0x34,
+ 0x38, 0x42, 0x34, 0x39, 0x42, 0x35, 0x2C, 0x42,
+ 0x35, 0x2E, 0x42, 0x35, 0x30, 0x42, 0x36, 0x2C,
+ 0x42, 0x36, 0x2E, 0x42, 0x37, 0x2C, 0x42, 0x37,
+ 0x2E, 0x42, 0x38, 0x2C, 0x42, 0x38, 0x2E, 0x42,
+ 0x39, 0x2C, 0x42, 0x39, 0x2E, 0x42, 0x3D, 0x3D,
+ 0x42, 0x3F, 0x21, 0x42, 0x3F, 0x3F, 0x42, 0x41,
+ 0x55, 0x42, 0x42, 0x71, 0x42, 0x43, 0x44, 0x42,
+ // Bytes 1940 - 197f
+ 0x44, 0x4A, 0x42, 0x44, 0x5A, 0x42, 0x44, 0x7A,
+ 0x42, 0x47, 0x42, 0x42, 0x47, 0x79, 0x42, 0x48,
+ 0x50, 0x42, 0x48, 0x56, 0x42, 0x48, 0x67, 0x42,
+ 0x48, 0x7A, 0x42, 0x49, 0x49, 0x42, 0x49, 0x4A,
+ 0x42, 0x49, 0x55, 0x42, 0x49, 0x56, 0x42, 0x49,
+ 0x58, 0x42, 0x4B, 0x42, 0x42, 0x4B, 0x4B, 0x42,
+ 0x4B, 0x4D, 0x42, 0x4C, 0x4A, 0x42, 0x4C, 0x6A,
+ 0x42, 0x4D, 0x42, 0x42, 0x4D, 0x43, 0x42, 0x4D,
+ // Bytes 1980 - 19bf
+ 0x44, 0x42, 0x4D, 0x56, 0x42, 0x4D, 0x57, 0x42,
+ 0x4E, 0x4A, 0x42, 0x4E, 0x6A, 0x42, 0x4E, 0x6F,
+ 0x42, 0x50, 0x48, 0x42, 0x50, 0x52, 0x42, 0x50,
+ 0x61, 0x42, 0x52, 0x73, 0x42, 0x53, 0x44, 0x42,
+ 0x53, 0x4D, 0x42, 0x53, 0x53, 0x42, 0x53, 0x76,
+ 0x42, 0x54, 0x4D, 0x42, 0x56, 0x49, 0x42, 0x57,
+ 0x43, 0x42, 0x57, 0x5A, 0x42, 0x57, 0x62, 0x42,
+ 0x58, 0x49, 0x42, 0x63, 0x63, 0x42, 0x63, 0x64,
+ // Bytes 19c0 - 19ff
+ 0x42, 0x63, 0x6D, 0x42, 0x64, 0x42, 0x42, 0x64,
+ 0x61, 0x42, 0x64, 0x6C, 0x42, 0x64, 0x6D, 0x42,
+ 0x64, 0x7A, 0x42, 0x65, 0x56, 0x42, 0x66, 0x66,
+ 0x42, 0x66, 0x69, 0x42, 0x66, 0x6C, 0x42, 0x66,
+ 0x6D, 0x42, 0x68, 0x61, 0x42, 0x69, 0x69, 0x42,
+ 0x69, 0x6A, 0x42, 0x69, 0x6E, 0x42, 0x69, 0x76,
+ 0x42, 0x69, 0x78, 0x42, 0x6B, 0x41, 0x42, 0x6B,
+ 0x56, 0x42, 0x6B, 0x57, 0x42, 0x6B, 0x67, 0x42,
+ // Bytes 1a00 - 1a3f
+ 0x6B, 0x6C, 0x42, 0x6B, 0x6D, 0x42, 0x6B, 0x74,
+ 0x42, 0x6C, 0x6A, 0x42, 0x6C, 0x6D, 0x42, 0x6C,
+ 0x6E, 0x42, 0x6C, 0x78, 0x42, 0x6D, 0x32, 0x42,
+ 0x6D, 0x33, 0x42, 0x6D, 0x41, 0x42, 0x6D, 0x56,
+ 0x42, 0x6D, 0x57, 0x42, 0x6D, 0x62, 0x42, 0x6D,
+ 0x67, 0x42, 0x6D, 0x6C, 0x42, 0x6D, 0x6D, 0x42,
+ 0x6D, 0x73, 0x42, 0x6E, 0x41, 0x42, 0x6E, 0x46,
+ 0x42, 0x6E, 0x56, 0x42, 0x6E, 0x57, 0x42, 0x6E,
+ // Bytes 1a40 - 1a7f
+ 0x6A, 0x42, 0x6E, 0x6D, 0x42, 0x6E, 0x73, 0x42,
+ 0x6F, 0x56, 0x42, 0x70, 0x41, 0x42, 0x70, 0x46,
+ 0x42, 0x70, 0x56, 0x42, 0x70, 0x57, 0x42, 0x70,
+ 0x63, 0x42, 0x70, 0x73, 0x42, 0x73, 0x72, 0x42,
+ 0x73, 0x74, 0x42, 0x76, 0x69, 0x42, 0x78, 0x69,
+ 0x43, 0x28, 0x31, 0x29, 0x43, 0x28, 0x32, 0x29,
+ 0x43, 0x28, 0x33, 0x29, 0x43, 0x28, 0x34, 0x29,
+ 0x43, 0x28, 0x35, 0x29, 0x43, 0x28, 0x36, 0x29,
+ // Bytes 1a80 - 1abf
+ 0x43, 0x28, 0x37, 0x29, 0x43, 0x28, 0x38, 0x29,
+ 0x43, 0x28, 0x39, 0x29, 0x43, 0x28, 0x41, 0x29,
+ 0x43, 0x28, 0x42, 0x29, 0x43, 0x28, 0x43, 0x29,
+ 0x43, 0x28, 0x44, 0x29, 0x43, 0x28, 0x45, 0x29,
+ 0x43, 0x28, 0x46, 0x29, 0x43, 0x28, 0x47, 0x29,
+ 0x43, 0x28, 0x48, 0x29, 0x43, 0x28, 0x49, 0x29,
+ 0x43, 0x28, 0x4A, 0x29, 0x43, 0x28, 0x4B, 0x29,
+ 0x43, 0x28, 0x4C, 0x29, 0x43, 0x28, 0x4D, 0x29,
+ // Bytes 1ac0 - 1aff
+ 0x43, 0x28, 0x4E, 0x29, 0x43, 0x28, 0x4F, 0x29,
+ 0x43, 0x28, 0x50, 0x29, 0x43, 0x28, 0x51, 0x29,
+ 0x43, 0x28, 0x52, 0x29, 0x43, 0x28, 0x53, 0x29,
+ 0x43, 0x28, 0x54, 0x29, 0x43, 0x28, 0x55, 0x29,
+ 0x43, 0x28, 0x56, 0x29, 0x43, 0x28, 0x57, 0x29,
+ 0x43, 0x28, 0x58, 0x29, 0x43, 0x28, 0x59, 0x29,
+ 0x43, 0x28, 0x5A, 0x29, 0x43, 0x28, 0x61, 0x29,
+ 0x43, 0x28, 0x62, 0x29, 0x43, 0x28, 0x63, 0x29,
+ // Bytes 1b00 - 1b3f
+ 0x43, 0x28, 0x64, 0x29, 0x43, 0x28, 0x65, 0x29,
+ 0x43, 0x28, 0x66, 0x29, 0x43, 0x28, 0x67, 0x29,
+ 0x43, 0x28, 0x68, 0x29, 0x43, 0x28, 0x69, 0x29,
+ 0x43, 0x28, 0x6A, 0x29, 0x43, 0x28, 0x6B, 0x29,
+ 0x43, 0x28, 0x6C, 0x29, 0x43, 0x28, 0x6D, 0x29,
+ 0x43, 0x28, 0x6E, 0x29, 0x43, 0x28, 0x6F, 0x29,
+ 0x43, 0x28, 0x70, 0x29, 0x43, 0x28, 0x71, 0x29,
+ 0x43, 0x28, 0x72, 0x29, 0x43, 0x28, 0x73, 0x29,
+ // Bytes 1b40 - 1b7f
+ 0x43, 0x28, 0x74, 0x29, 0x43, 0x28, 0x75, 0x29,
+ 0x43, 0x28, 0x76, 0x29, 0x43, 0x28, 0x77, 0x29,
+ 0x43, 0x28, 0x78, 0x29, 0x43, 0x28, 0x79, 0x29,
+ 0x43, 0x28, 0x7A, 0x29, 0x43, 0x2E, 0x2E, 0x2E,
+ 0x43, 0x31, 0x30, 0x2E, 0x43, 0x31, 0x31, 0x2E,
+ 0x43, 0x31, 0x32, 0x2E, 0x43, 0x31, 0x33, 0x2E,
+ 0x43, 0x31, 0x34, 0x2E, 0x43, 0x31, 0x35, 0x2E,
+ 0x43, 0x31, 0x36, 0x2E, 0x43, 0x31, 0x37, 0x2E,
+ // Bytes 1b80 - 1bbf
+ 0x43, 0x31, 0x38, 0x2E, 0x43, 0x31, 0x39, 0x2E,
+ 0x43, 0x32, 0x30, 0x2E, 0x43, 0x3A, 0x3A, 0x3D,
+ 0x43, 0x3D, 0x3D, 0x3D, 0x43, 0x43, 0x6F, 0x2E,
+ 0x43, 0x46, 0x41, 0x58, 0x43, 0x47, 0x48, 0x7A,
+ 0x43, 0x47, 0x50, 0x61, 0x43, 0x49, 0x49, 0x49,
+ 0x43, 0x4C, 0x54, 0x44, 0x43, 0x4C, 0xC2, 0xB7,
+ 0x43, 0x4D, 0x48, 0x7A, 0x43, 0x4D, 0x50, 0x61,
+ 0x43, 0x4D, 0xCE, 0xA9, 0x43, 0x50, 0x50, 0x4D,
+ // Bytes 1bc0 - 1bff
+ 0x43, 0x50, 0x50, 0x56, 0x43, 0x50, 0x54, 0x45,
+ 0x43, 0x54, 0x45, 0x4C, 0x43, 0x54, 0x48, 0x7A,
+ 0x43, 0x56, 0x49, 0x49, 0x43, 0x58, 0x49, 0x49,
+ 0x43, 0x61, 0x2F, 0x63, 0x43, 0x61, 0x2F, 0x73,
+ 0x43, 0x61, 0xCA, 0xBE, 0x43, 0x62, 0x61, 0x72,
+ 0x43, 0x63, 0x2F, 0x6F, 0x43, 0x63, 0x2F, 0x75,
+ 0x43, 0x63, 0x61, 0x6C, 0x43, 0x63, 0x6D, 0x32,
+ 0x43, 0x63, 0x6D, 0x33, 0x43, 0x64, 0x6D, 0x32,
+ // Bytes 1c00 - 1c3f
+ 0x43, 0x64, 0x6D, 0x33, 0x43, 0x65, 0x72, 0x67,
+ 0x43, 0x66, 0x66, 0x69, 0x43, 0x66, 0x66, 0x6C,
+ 0x43, 0x67, 0x61, 0x6C, 0x43, 0x68, 0x50, 0x61,
+ 0x43, 0x69, 0x69, 0x69, 0x43, 0x6B, 0x48, 0x7A,
+ 0x43, 0x6B, 0x50, 0x61, 0x43, 0x6B, 0x6D, 0x32,
+ 0x43, 0x6B, 0x6D, 0x33, 0x43, 0x6B, 0xCE, 0xA9,
+ 0x43, 0x6C, 0x6F, 0x67, 0x43, 0x6C, 0xC2, 0xB7,
+ 0x43, 0x6D, 0x69, 0x6C, 0x43, 0x6D, 0x6D, 0x32,
+ // Bytes 1c40 - 1c7f
+ 0x43, 0x6D, 0x6D, 0x33, 0x43, 0x6D, 0x6F, 0x6C,
+ 0x43, 0x72, 0x61, 0x64, 0x43, 0x76, 0x69, 0x69,
+ 0x43, 0x78, 0x69, 0x69, 0x43, 0xC2, 0xB0, 0x43,
+ 0x43, 0xC2, 0xB0, 0x46, 0x43, 0xCA, 0xBC, 0x6E,
+ 0x43, 0xCE, 0xBC, 0x41, 0x43, 0xCE, 0xBC, 0x46,
+ 0x43, 0xCE, 0xBC, 0x56, 0x43, 0xCE, 0xBC, 0x57,
+ 0x43, 0xCE, 0xBC, 0x67, 0x43, 0xCE, 0xBC, 0x6C,
+ 0x43, 0xCE, 0xBC, 0x6D, 0x43, 0xCE, 0xBC, 0x73,
+ // Bytes 1c80 - 1cbf
+ 0x44, 0x28, 0x31, 0x30, 0x29, 0x44, 0x28, 0x31,
+ 0x31, 0x29, 0x44, 0x28, 0x31, 0x32, 0x29, 0x44,
+ 0x28, 0x31, 0x33, 0x29, 0x44, 0x28, 0x31, 0x34,
+ 0x29, 0x44, 0x28, 0x31, 0x35, 0x29, 0x44, 0x28,
+ 0x31, 0x36, 0x29, 0x44, 0x28, 0x31, 0x37, 0x29,
+ 0x44, 0x28, 0x31, 0x38, 0x29, 0x44, 0x28, 0x31,
+ 0x39, 0x29, 0x44, 0x28, 0x32, 0x30, 0x29, 0x44,
+ 0x30, 0xE7, 0x82, 0xB9, 0x44, 0x31, 0xE2, 0x81,
+ // Bytes 1cc0 - 1cff
+ 0x84, 0x44, 0x31, 0xE6, 0x97, 0xA5, 0x44, 0x31,
+ 0xE6, 0x9C, 0x88, 0x44, 0x31, 0xE7, 0x82, 0xB9,
+ 0x44, 0x32, 0xE6, 0x97, 0xA5, 0x44, 0x32, 0xE6,
+ 0x9C, 0x88, 0x44, 0x32, 0xE7, 0x82, 0xB9, 0x44,
+ 0x33, 0xE6, 0x97, 0xA5, 0x44, 0x33, 0xE6, 0x9C,
+ 0x88, 0x44, 0x33, 0xE7, 0x82, 0xB9, 0x44, 0x34,
+ 0xE6, 0x97, 0xA5, 0x44, 0x34, 0xE6, 0x9C, 0x88,
+ 0x44, 0x34, 0xE7, 0x82, 0xB9, 0x44, 0x35, 0xE6,
+ // Bytes 1d00 - 1d3f
+ 0x97, 0xA5, 0x44, 0x35, 0xE6, 0x9C, 0x88, 0x44,
+ 0x35, 0xE7, 0x82, 0xB9, 0x44, 0x36, 0xE6, 0x97,
+ 0xA5, 0x44, 0x36, 0xE6, 0x9C, 0x88, 0x44, 0x36,
+ 0xE7, 0x82, 0xB9, 0x44, 0x37, 0xE6, 0x97, 0xA5,
+ 0x44, 0x37, 0xE6, 0x9C, 0x88, 0x44, 0x37, 0xE7,
+ 0x82, 0xB9, 0x44, 0x38, 0xE6, 0x97, 0xA5, 0x44,
+ 0x38, 0xE6, 0x9C, 0x88, 0x44, 0x38, 0xE7, 0x82,
+ 0xB9, 0x44, 0x39, 0xE6, 0x97, 0xA5, 0x44, 0x39,
+ // Bytes 1d40 - 1d7f
+ 0xE6, 0x9C, 0x88, 0x44, 0x39, 0xE7, 0x82, 0xB9,
+ 0x44, 0x56, 0x49, 0x49, 0x49, 0x44, 0x61, 0x2E,
+ 0x6D, 0x2E, 0x44, 0x6B, 0x63, 0x61, 0x6C, 0x44,
+ 0x70, 0x2E, 0x6D, 0x2E, 0x44, 0x76, 0x69, 0x69,
+ 0x69, 0x44, 0xD5, 0xA5, 0xD6, 0x82, 0x44, 0xD5,
+ 0xB4, 0xD5, 0xA5, 0x44, 0xD5, 0xB4, 0xD5, 0xAB,
+ 0x44, 0xD5, 0xB4, 0xD5, 0xAD, 0x44, 0xD5, 0xB4,
+ 0xD5, 0xB6, 0x44, 0xD5, 0xBE, 0xD5, 0xB6, 0x44,
+ // Bytes 1d80 - 1dbf
+ 0xD7, 0x90, 0xD7, 0x9C, 0x44, 0xD8, 0xA7, 0xD9,
+ 0xB4, 0x44, 0xD8, 0xA8, 0xD8, 0xAC, 0x44, 0xD8,
+ 0xA8, 0xD8, 0xAD, 0x44, 0xD8, 0xA8, 0xD8, 0xAE,
+ 0x44, 0xD8, 0xA8, 0xD8, 0xB1, 0x44, 0xD8, 0xA8,
+ 0xD8, 0xB2, 0x44, 0xD8, 0xA8, 0xD9, 0x85, 0x44,
+ 0xD8, 0xA8, 0xD9, 0x86, 0x44, 0xD8, 0xA8, 0xD9,
+ 0x87, 0x44, 0xD8, 0xA8, 0xD9, 0x89, 0x44, 0xD8,
+ 0xA8, 0xD9, 0x8A, 0x44, 0xD8, 0xAA, 0xD8, 0xAC,
+ // Bytes 1dc0 - 1dff
+ 0x44, 0xD8, 0xAA, 0xD8, 0xAD, 0x44, 0xD8, 0xAA,
+ 0xD8, 0xAE, 0x44, 0xD8, 0xAA, 0xD8, 0xB1, 0x44,
+ 0xD8, 0xAA, 0xD8, 0xB2, 0x44, 0xD8, 0xAA, 0xD9,
+ 0x85, 0x44, 0xD8, 0xAA, 0xD9, 0x86, 0x44, 0xD8,
+ 0xAA, 0xD9, 0x87, 0x44, 0xD8, 0xAA, 0xD9, 0x89,
+ 0x44, 0xD8, 0xAA, 0xD9, 0x8A, 0x44, 0xD8, 0xAB,
+ 0xD8, 0xAC, 0x44, 0xD8, 0xAB, 0xD8, 0xB1, 0x44,
+ 0xD8, 0xAB, 0xD8, 0xB2, 0x44, 0xD8, 0xAB, 0xD9,
+ // Bytes 1e00 - 1e3f
+ 0x85, 0x44, 0xD8, 0xAB, 0xD9, 0x86, 0x44, 0xD8,
+ 0xAB, 0xD9, 0x87, 0x44, 0xD8, 0xAB, 0xD9, 0x89,
+ 0x44, 0xD8, 0xAB, 0xD9, 0x8A, 0x44, 0xD8, 0xAC,
+ 0xD8, 0xAD, 0x44, 0xD8, 0xAC, 0xD9, 0x85, 0x44,
+ 0xD8, 0xAC, 0xD9, 0x89, 0x44, 0xD8, 0xAC, 0xD9,
+ 0x8A, 0x44, 0xD8, 0xAD, 0xD8, 0xAC, 0x44, 0xD8,
+ 0xAD, 0xD9, 0x85, 0x44, 0xD8, 0xAD, 0xD9, 0x89,
+ 0x44, 0xD8, 0xAD, 0xD9, 0x8A, 0x44, 0xD8, 0xAE,
+ // Bytes 1e40 - 1e7f
+ 0xD8, 0xAC, 0x44, 0xD8, 0xAE, 0xD8, 0xAD, 0x44,
+ 0xD8, 0xAE, 0xD9, 0x85, 0x44, 0xD8, 0xAE, 0xD9,
+ 0x89, 0x44, 0xD8, 0xAE, 0xD9, 0x8A, 0x44, 0xD8,
+ 0xB3, 0xD8, 0xAC, 0x44, 0xD8, 0xB3, 0xD8, 0xAD,
+ 0x44, 0xD8, 0xB3, 0xD8, 0xAE, 0x44, 0xD8, 0xB3,
+ 0xD8, 0xB1, 0x44, 0xD8, 0xB3, 0xD9, 0x85, 0x44,
+ 0xD8, 0xB3, 0xD9, 0x87, 0x44, 0xD8, 0xB3, 0xD9,
+ 0x89, 0x44, 0xD8, 0xB3, 0xD9, 0x8A, 0x44, 0xD8,
+ // Bytes 1e80 - 1ebf
+ 0xB4, 0xD8, 0xAC, 0x44, 0xD8, 0xB4, 0xD8, 0xAD,
+ 0x44, 0xD8, 0xB4, 0xD8, 0xAE, 0x44, 0xD8, 0xB4,
+ 0xD8, 0xB1, 0x44, 0xD8, 0xB4, 0xD9, 0x85, 0x44,
+ 0xD8, 0xB4, 0xD9, 0x87, 0x44, 0xD8, 0xB4, 0xD9,
+ 0x89, 0x44, 0xD8, 0xB4, 0xD9, 0x8A, 0x44, 0xD8,
+ 0xB5, 0xD8, 0xAD, 0x44, 0xD8, 0xB5, 0xD8, 0xAE,
+ 0x44, 0xD8, 0xB5, 0xD8, 0xB1, 0x44, 0xD8, 0xB5,
+ 0xD9, 0x85, 0x44, 0xD8, 0xB5, 0xD9, 0x89, 0x44,
+ // Bytes 1ec0 - 1eff
+ 0xD8, 0xB5, 0xD9, 0x8A, 0x44, 0xD8, 0xB6, 0xD8,
+ 0xAC, 0x44, 0xD8, 0xB6, 0xD8, 0xAD, 0x44, 0xD8,
+ 0xB6, 0xD8, 0xAE, 0x44, 0xD8, 0xB6, 0xD8, 0xB1,
+ 0x44, 0xD8, 0xB6, 0xD9, 0x85, 0x44, 0xD8, 0xB6,
+ 0xD9, 0x89, 0x44, 0xD8, 0xB6, 0xD9, 0x8A, 0x44,
+ 0xD8, 0xB7, 0xD8, 0xAD, 0x44, 0xD8, 0xB7, 0xD9,
+ 0x85, 0x44, 0xD8, 0xB7, 0xD9, 0x89, 0x44, 0xD8,
+ 0xB7, 0xD9, 0x8A, 0x44, 0xD8, 0xB8, 0xD9, 0x85,
+ // Bytes 1f00 - 1f3f
+ 0x44, 0xD8, 0xB9, 0xD8, 0xAC, 0x44, 0xD8, 0xB9,
+ 0xD9, 0x85, 0x44, 0xD8, 0xB9, 0xD9, 0x89, 0x44,
+ 0xD8, 0xB9, 0xD9, 0x8A, 0x44, 0xD8, 0xBA, 0xD8,
+ 0xAC, 0x44, 0xD8, 0xBA, 0xD9, 0x85, 0x44, 0xD8,
+ 0xBA, 0xD9, 0x89, 0x44, 0xD8, 0xBA, 0xD9, 0x8A,
+ 0x44, 0xD9, 0x81, 0xD8, 0xAC, 0x44, 0xD9, 0x81,
+ 0xD8, 0xAD, 0x44, 0xD9, 0x81, 0xD8, 0xAE, 0x44,
+ 0xD9, 0x81, 0xD9, 0x85, 0x44, 0xD9, 0x81, 0xD9,
+ // Bytes 1f40 - 1f7f
+ 0x89, 0x44, 0xD9, 0x81, 0xD9, 0x8A, 0x44, 0xD9,
+ 0x82, 0xD8, 0xAD, 0x44, 0xD9, 0x82, 0xD9, 0x85,
+ 0x44, 0xD9, 0x82, 0xD9, 0x89, 0x44, 0xD9, 0x82,
+ 0xD9, 0x8A, 0x44, 0xD9, 0x83, 0xD8, 0xA7, 0x44,
+ 0xD9, 0x83, 0xD8, 0xAC, 0x44, 0xD9, 0x83, 0xD8,
+ 0xAD, 0x44, 0xD9, 0x83, 0xD8, 0xAE, 0x44, 0xD9,
+ 0x83, 0xD9, 0x84, 0x44, 0xD9, 0x83, 0xD9, 0x85,
+ 0x44, 0xD9, 0x83, 0xD9, 0x89, 0x44, 0xD9, 0x83,
+ // Bytes 1f80 - 1fbf
+ 0xD9, 0x8A, 0x44, 0xD9, 0x84, 0xD8, 0xA7, 0x44,
+ 0xD9, 0x84, 0xD8, 0xAC, 0x44, 0xD9, 0x84, 0xD8,
+ 0xAD, 0x44, 0xD9, 0x84, 0xD8, 0xAE, 0x44, 0xD9,
+ 0x84, 0xD9, 0x85, 0x44, 0xD9, 0x84, 0xD9, 0x87,
+ 0x44, 0xD9, 0x84, 0xD9, 0x89, 0x44, 0xD9, 0x84,
+ 0xD9, 0x8A, 0x44, 0xD9, 0x85, 0xD8, 0xA7, 0x44,
+ 0xD9, 0x85, 0xD8, 0xAC, 0x44, 0xD9, 0x85, 0xD8,
+ 0xAD, 0x44, 0xD9, 0x85, 0xD8, 0xAE, 0x44, 0xD9,
+ // Bytes 1fc0 - 1fff
+ 0x85, 0xD9, 0x85, 0x44, 0xD9, 0x85, 0xD9, 0x89,
+ 0x44, 0xD9, 0x85, 0xD9, 0x8A, 0x44, 0xD9, 0x86,
+ 0xD8, 0xAC, 0x44, 0xD9, 0x86, 0xD8, 0xAD, 0x44,
+ 0xD9, 0x86, 0xD8, 0xAE, 0x44, 0xD9, 0x86, 0xD8,
+ 0xB1, 0x44, 0xD9, 0x86, 0xD8, 0xB2, 0x44, 0xD9,
+ 0x86, 0xD9, 0x85, 0x44, 0xD9, 0x86, 0xD9, 0x86,
+ 0x44, 0xD9, 0x86, 0xD9, 0x87, 0x44, 0xD9, 0x86,
+ 0xD9, 0x89, 0x44, 0xD9, 0x86, 0xD9, 0x8A, 0x44,
+ // Bytes 2000 - 203f
+ 0xD9, 0x87, 0xD8, 0xAC, 0x44, 0xD9, 0x87, 0xD9,
+ 0x85, 0x44, 0xD9, 0x87, 0xD9, 0x89, 0x44, 0xD9,
+ 0x87, 0xD9, 0x8A, 0x44, 0xD9, 0x88, 0xD9, 0xB4,
+ 0x44, 0xD9, 0x8A, 0xD8, 0xAC, 0x44, 0xD9, 0x8A,
+ 0xD8, 0xAD, 0x44, 0xD9, 0x8A, 0xD8, 0xAE, 0x44,
+ 0xD9, 0x8A, 0xD8, 0xB1, 0x44, 0xD9, 0x8A, 0xD8,
+ 0xB2, 0x44, 0xD9, 0x8A, 0xD9, 0x85, 0x44, 0xD9,
+ 0x8A, 0xD9, 0x86, 0x44, 0xD9, 0x8A, 0xD9, 0x87,
+ // Bytes 2040 - 207f
+ 0x44, 0xD9, 0x8A, 0xD9, 0x89, 0x44, 0xD9, 0x8A,
+ 0xD9, 0x8A, 0x44, 0xD9, 0x8A, 0xD9, 0xB4, 0x44,
+ 0xDB, 0x87, 0xD9, 0xB4, 0x45, 0x28, 0xE1, 0x84,
+ 0x80, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x82, 0x29,
+ 0x45, 0x28, 0xE1, 0x84, 0x83, 0x29, 0x45, 0x28,
+ 0xE1, 0x84, 0x85, 0x29, 0x45, 0x28, 0xE1, 0x84,
+ 0x86, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x87, 0x29,
+ 0x45, 0x28, 0xE1, 0x84, 0x89, 0x29, 0x45, 0x28,
+ // Bytes 2080 - 20bf
+ 0xE1, 0x84, 0x8B, 0x29, 0x45, 0x28, 0xE1, 0x84,
+ 0x8C, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x8E, 0x29,
+ 0x45, 0x28, 0xE1, 0x84, 0x8F, 0x29, 0x45, 0x28,
+ 0xE1, 0x84, 0x90, 0x29, 0x45, 0x28, 0xE1, 0x84,
+ 0x91, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x92, 0x29,
+ 0x45, 0x28, 0xE4, 0xB8, 0x80, 0x29, 0x45, 0x28,
+ 0xE4, 0xB8, 0x83, 0x29, 0x45, 0x28, 0xE4, 0xB8,
+ 0x89, 0x29, 0x45, 0x28, 0xE4, 0xB9, 0x9D, 0x29,
+ // Bytes 20c0 - 20ff
+ 0x45, 0x28, 0xE4, 0xBA, 0x8C, 0x29, 0x45, 0x28,
+ 0xE4, 0xBA, 0x94, 0x29, 0x45, 0x28, 0xE4, 0xBB,
+ 0xA3, 0x29, 0x45, 0x28, 0xE4, 0xBC, 0x81, 0x29,
+ 0x45, 0x28, 0xE4, 0xBC, 0x91, 0x29, 0x45, 0x28,
+ 0xE5, 0x85, 0xAB, 0x29, 0x45, 0x28, 0xE5, 0x85,
+ 0xAD, 0x29, 0x45, 0x28, 0xE5, 0x8A, 0xB4, 0x29,
+ 0x45, 0x28, 0xE5, 0x8D, 0x81, 0x29, 0x45, 0x28,
+ 0xE5, 0x8D, 0x94, 0x29, 0x45, 0x28, 0xE5, 0x90,
+ // Bytes 2100 - 213f
+ 0x8D, 0x29, 0x45, 0x28, 0xE5, 0x91, 0xBC, 0x29,
+ 0x45, 0x28, 0xE5, 0x9B, 0x9B, 0x29, 0x45, 0x28,
+ 0xE5, 0x9C, 0x9F, 0x29, 0x45, 0x28, 0xE5, 0xAD,
+ 0xA6, 0x29, 0x45, 0x28, 0xE6, 0x97, 0xA5, 0x29,
+ 0x45, 0x28, 0xE6, 0x9C, 0x88, 0x29, 0x45, 0x28,
+ 0xE6, 0x9C, 0x89, 0x29, 0x45, 0x28, 0xE6, 0x9C,
+ 0xA8, 0x29, 0x45, 0x28, 0xE6, 0xA0, 0xAA, 0x29,
+ 0x45, 0x28, 0xE6, 0xB0, 0xB4, 0x29, 0x45, 0x28,
+ // Bytes 2140 - 217f
+ 0xE7, 0x81, 0xAB, 0x29, 0x45, 0x28, 0xE7, 0x89,
+ 0xB9, 0x29, 0x45, 0x28, 0xE7, 0x9B, 0xA3, 0x29,
+ 0x45, 0x28, 0xE7, 0xA4, 0xBE, 0x29, 0x45, 0x28,
+ 0xE7, 0xA5, 0x9D, 0x29, 0x45, 0x28, 0xE7, 0xA5,
+ 0xAD, 0x29, 0x45, 0x28, 0xE8, 0x87, 0xAA, 0x29,
+ 0x45, 0x28, 0xE8, 0x87, 0xB3, 0x29, 0x45, 0x28,
+ 0xE8, 0xB2, 0xA1, 0x29, 0x45, 0x28, 0xE8, 0xB3,
+ 0x87, 0x29, 0x45, 0x28, 0xE9, 0x87, 0x91, 0x29,
+ // Bytes 2180 - 21bf
+ 0x45, 0x30, 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31,
+ 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x30, 0xE6,
+ 0x9C, 0x88, 0x45, 0x31, 0x30, 0xE7, 0x82, 0xB9,
+ 0x45, 0x31, 0x31, 0xE6, 0x97, 0xA5, 0x45, 0x31,
+ 0x31, 0xE6, 0x9C, 0x88, 0x45, 0x31, 0x31, 0xE7,
+ 0x82, 0xB9, 0x45, 0x31, 0x32, 0xE6, 0x97, 0xA5,
+ 0x45, 0x31, 0x32, 0xE6, 0x9C, 0x88, 0x45, 0x31,
+ 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x33, 0xE6,
+ // Bytes 21c0 - 21ff
+ 0x97, 0xA5, 0x45, 0x31, 0x33, 0xE7, 0x82, 0xB9,
+ 0x45, 0x31, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x31,
+ 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x35, 0xE6,
+ 0x97, 0xA5, 0x45, 0x31, 0x35, 0xE7, 0x82, 0xB9,
+ 0x45, 0x31, 0x36, 0xE6, 0x97, 0xA5, 0x45, 0x31,
+ 0x36, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x37, 0xE6,
+ 0x97, 0xA5, 0x45, 0x31, 0x37, 0xE7, 0x82, 0xB9,
+ 0x45, 0x31, 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x31,
+ // Bytes 2200 - 223f
+ 0x38, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x39, 0xE6,
+ 0x97, 0xA5, 0x45, 0x31, 0x39, 0xE7, 0x82, 0xB9,
+ 0x45, 0x31, 0xE2, 0x81, 0x84, 0x32, 0x45, 0x31,
+ 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, 0xE2, 0x81,
+ 0x84, 0x34, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x35,
+ 0x45, 0x31, 0xE2, 0x81, 0x84, 0x36, 0x45, 0x31,
+ 0xE2, 0x81, 0x84, 0x37, 0x45, 0x31, 0xE2, 0x81,
+ 0x84, 0x38, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x39,
+ // Bytes 2240 - 227f
+ 0x45, 0x32, 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x32,
+ 0x30, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x31, 0xE6,
+ 0x97, 0xA5, 0x45, 0x32, 0x31, 0xE7, 0x82, 0xB9,
+ 0x45, 0x32, 0x32, 0xE6, 0x97, 0xA5, 0x45, 0x32,
+ 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x33, 0xE6,
+ 0x97, 0xA5, 0x45, 0x32, 0x33, 0xE7, 0x82, 0xB9,
+ 0x45, 0x32, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x32,
+ 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x35, 0xE6,
+ // Bytes 2280 - 22bf
+ 0x97, 0xA5, 0x45, 0x32, 0x36, 0xE6, 0x97, 0xA5,
+ 0x45, 0x32, 0x37, 0xE6, 0x97, 0xA5, 0x45, 0x32,
+ 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x39, 0xE6,
+ 0x97, 0xA5, 0x45, 0x32, 0xE2, 0x81, 0x84, 0x33,
+ 0x45, 0x32, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33,
+ 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x33, 0x31, 0xE6,
+ 0x97, 0xA5, 0x45, 0x33, 0xE2, 0x81, 0x84, 0x34,
+ 0x45, 0x33, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33,
+ // Bytes 22c0 - 22ff
+ 0xE2, 0x81, 0x84, 0x38, 0x45, 0x34, 0xE2, 0x81,
+ 0x84, 0x35, 0x45, 0x35, 0xE2, 0x81, 0x84, 0x36,
+ 0x45, 0x35, 0xE2, 0x81, 0x84, 0x38, 0x45, 0x37,
+ 0xE2, 0x81, 0x84, 0x38, 0x45, 0x41, 0xE2, 0x88,
+ 0x95, 0x6D, 0x45, 0x56, 0xE2, 0x88, 0x95, 0x6D,
+ 0x45, 0x6D, 0xE2, 0x88, 0x95, 0x73, 0x46, 0x31,
+ 0xE2, 0x81, 0x84, 0x31, 0x30, 0x46, 0x43, 0xE2,
+ 0x88, 0x95, 0x6B, 0x67, 0x46, 0x6D, 0xE2, 0x88,
+ // Bytes 2300 - 233f
+ 0x95, 0x73, 0x32, 0x46, 0xD8, 0xA8, 0xD8, 0xAD,
+ 0xD9, 0x8A, 0x46, 0xD8, 0xA8, 0xD8, 0xAE, 0xD9,
+ 0x8A, 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x85,
+ 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x89, 0x46,
+ 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8,
+ 0xAA, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, 0xD8, 0xAA,
+ 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8,
+ 0xAE, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, 0xAE,
+ // Bytes 2340 - 237f
+ 0xD9, 0x89, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, 0xD9,
+ 0x8A, 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAC,
+ 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAD, 0x46,
+ 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAE, 0x46, 0xD8,
+ 0xAA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAA,
+ 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD8,
+ 0xAD, 0xD9, 0x89, 0x46, 0xD8, 0xAC, 0xD8, 0xAD,
+ 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD8,
+ // Bytes 2380 - 23bf
+ 0xAD, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x89,
+ 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x8A, 0x46,
+ 0xD8, 0xAD, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8,
+ 0xAD, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAD,
+ 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB3, 0xD8,
+ 0xAC, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, 0xD8, 0xAC,
+ 0xD9, 0x89, 0x46, 0xD8, 0xB3, 0xD8, 0xAD, 0xD8,
+ 0xAC, 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x89,
+ // Bytes 23c0 - 23ff
+ 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x8A, 0x46,
+ 0xD8, 0xB3, 0xD9, 0x85, 0xD8, 0xAC, 0x46, 0xD8,
+ 0xB3, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xB3,
+ 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8,
+ 0xAC, 0xD9, 0x8A, 0x46, 0xD8, 0xB4, 0xD8, 0xAD,
+ 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, 0xD9,
+ 0x8A, 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD8, 0xAE,
+ 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD9, 0x85, 0x46,
+ // Bytes 2400 - 243f
+ 0xD8, 0xB5, 0xD8, 0xAD, 0xD8, 0xAD, 0x46, 0xD8,
+ 0xB5, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xB5,
+ 0xD9, 0x84, 0xD9, 0x89, 0x46, 0xD8, 0xB5, 0xD9,
+ 0x84, 0xDB, 0x92, 0x46, 0xD8, 0xB5, 0xD9, 0x85,
+ 0xD9, 0x85, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9,
+ 0x89, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, 0x8A,
+ 0x46, 0xD8, 0xB6, 0xD8, 0xAE, 0xD9, 0x85, 0x46,
+ 0xD8, 0xB7, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8,
+ // Bytes 2440 - 247f
+ 0xB7, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB7,
+ 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB9, 0xD8,
+ 0xAC, 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85,
+ 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9,
+ 0x89, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, 0x8A,
+ 0x46, 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x85, 0x46,
+ 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8,
+ 0xBA, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x81,
+ // Bytes 2480 - 24bf
+ 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x81, 0xD9,
+ 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x82, 0xD9, 0x84,
+ 0xDB, 0x92, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD8,
+ 0xAD, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x85,
+ 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x8A, 0x46,
+ 0xD9, 0x83, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9,
+ 0x83, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x84,
+ 0xD8, 0xAC, 0xD8, 0xAC, 0x46, 0xD9, 0x84, 0xD8,
+ // Bytes 24c0 - 24ff
+ 0xAC, 0xD9, 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAC,
+ 0xD9, 0x8A, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9,
+ 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x89,
+ 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x8A, 0x46,
+ 0xD9, 0x84, 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9,
+ 0x84, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD9, 0x84,
+ 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD8,
+ 0xAC, 0xD8, 0xAD, 0x46, 0xD9, 0x85, 0xD8, 0xAC,
+ // Bytes 2500 - 253f
+ 0xD8, 0xAE, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9,
+ 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, 0x8A,
+ 0x46, 0xD9, 0x85, 0xD8, 0xAD, 0xD8, 0xAC, 0x46,
+ 0xD9, 0x85, 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9,
+ 0x85, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x85,
+ 0xD8, 0xAE, 0xD8, 0xAC, 0x46, 0xD9, 0x85, 0xD8,
+ 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAE,
+ 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD9, 0x85, 0xD9,
+ // Bytes 2540 - 257f
+ 0x8A, 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD8, 0xAD,
+ 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x85, 0x46,
+ 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x89, 0x46, 0xD9,
+ 0x86, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x86,
+ 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, 0x86, 0xD8,
+ 0xAD, 0xD9, 0x89, 0x46, 0xD9, 0x86, 0xD8, 0xAD,
+ 0xD9, 0x8A, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9,
+ 0x89, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, 0x8A,
+ // Bytes 2580 - 25bf
+ 0x46, 0xD9, 0x87, 0xD9, 0x85, 0xD8, 0xAC, 0x46,
+ 0xD9, 0x87, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9,
+ 0x8A, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x8A,
+ 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9,
+ 0x85, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x85,
+ 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8,
+ 0xA7, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAC,
+ 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAD, 0x46,
+ // Bytes 25c0 - 25ff
+ 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAE, 0x46, 0xD9,
+ 0x8A, 0xD9, 0x94, 0xD8, 0xB1, 0x46, 0xD9, 0x8A,
+ 0xD9, 0x94, 0xD8, 0xB2, 0x46, 0xD9, 0x8A, 0xD9,
+ 0x94, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x94,
+ 0xD9, 0x86, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9,
+ 0x87, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x88,
+ 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x89, 0x46,
+ 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x8A, 0x46, 0xD9,
+ // Bytes 2600 - 263f
+ 0x8A, 0xD9, 0x94, 0xDB, 0x86, 0x46, 0xD9, 0x8A,
+ 0xD9, 0x94, 0xDB, 0x87, 0x46, 0xD9, 0x8A, 0xD9,
+ 0x94, 0xDB, 0x88, 0x46, 0xD9, 0x8A, 0xD9, 0x94,
+ 0xDB, 0x90, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB,
+ 0x95, 0x46, 0xE0, 0xB9, 0x8D, 0xE0, 0xB8, 0xB2,
+ 0x46, 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0x99, 0x46,
+ 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0xA1, 0x46, 0xE0,
+ 0xBB, 0x8D, 0xE0, 0xBA, 0xB2, 0x46, 0xE0, 0xBD,
+ // Bytes 2640 - 267f
+ 0x80, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, 0xBD, 0x82,
+ 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x8C, 0xE0,
+ 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x91, 0xE0, 0xBE,
+ 0xB7, 0x46, 0xE0, 0xBD, 0x96, 0xE0, 0xBE, 0xB7,
+ 0x46, 0xE0, 0xBD, 0x9B, 0xE0, 0xBE, 0xB7, 0x46,
+ 0xE0, 0xBE, 0x90, 0xE0, 0xBE, 0xB5, 0x46, 0xE0,
+ 0xBE, 0x92, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE,
+ 0x9C, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA1,
+ // Bytes 2680 - 26bf
+ 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA6, 0xE0,
+ 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xAB, 0xE0, 0xBE,
+ 0xB7, 0x46, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2,
+ 0x46, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0x46,
+ 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0x46, 0xE2,
+ 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x46, 0xE3, 0x81,
+ 0xBB, 0xE3, 0x81, 0x8B, 0x46, 0xE3, 0x82, 0x88,
+ 0xE3, 0x82, 0x8A, 0x46, 0xE3, 0x82, 0xAD, 0xE3,
+ // Bytes 26c0 - 26ff
+ 0x83, 0xAD, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x82,
+ 0xB3, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0x88,
+ 0x46, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x46,
+ 0xE3, 0x83, 0x8A, 0xE3, 0x83, 0x8E, 0x46, 0xE3,
+ 0x83, 0x9B, 0xE3, 0x83, 0xB3, 0x46, 0xE3, 0x83,
+ 0x9F, 0xE3, 0x83, 0xAA, 0x46, 0xE3, 0x83, 0xAA,
+ 0xE3, 0x83, 0xA9, 0x46, 0xE3, 0x83, 0xAC, 0xE3,
+ 0x83, 0xA0, 0x46, 0xE5, 0xA4, 0xA7, 0xE6, 0xAD,
+ // Bytes 2700 - 273f
+ 0xA3, 0x46, 0xE5, 0xB9, 0xB3, 0xE6, 0x88, 0x90,
+ 0x46, 0xE6, 0x98, 0x8E, 0xE6, 0xB2, 0xBB, 0x46,
+ 0xE6, 0x98, 0xAD, 0xE5, 0x92, 0x8C, 0x47, 0x72,
+ 0x61, 0x64, 0xE2, 0x88, 0x95, 0x73, 0x47, 0xE3,
+ 0x80, 0x94, 0x53, 0xE3, 0x80, 0x95, 0x48, 0x28,
+ 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x29, 0x48,
+ 0x28, 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x29,
+ 0x48, 0x28, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1,
+ // Bytes 2740 - 277f
+ 0x29, 0x48, 0x28, 0xE1, 0x84, 0x85, 0xE1, 0x85,
+ 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x86, 0xE1,
+ 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x87,
+ 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84,
+ 0x89, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1,
+ 0x84, 0x8B, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28,
+ 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x29, 0x48,
+ 0x28, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xAE, 0x29,
+ // Bytes 2780 - 27bf
+ 0x48, 0x28, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1,
+ 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8F, 0xE1, 0x85,
+ 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x90, 0xE1,
+ 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x91,
+ 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84,
+ 0x92, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x72, 0x61,
+ 0x64, 0xE2, 0x88, 0x95, 0x73, 0x32, 0x48, 0xD8,
+ 0xA7, 0xD9, 0x83, 0xD8, 0xA8, 0xD8, 0xB1, 0x48,
+ // Bytes 27c0 - 27ff
+ 0xD8, 0xA7, 0xD9, 0x84, 0xD9, 0x84, 0xD9, 0x87,
+ 0x48, 0xD8, 0xB1, 0xD8, 0xB3, 0xD9, 0x88, 0xD9,
+ 0x84, 0x48, 0xD8, 0xB1, 0xDB, 0x8C, 0xD8, 0xA7,
+ 0xD9, 0x84, 0x48, 0xD8, 0xB5, 0xD9, 0x84, 0xD8,
+ 0xB9, 0xD9, 0x85, 0x48, 0xD8, 0xB9, 0xD9, 0x84,
+ 0xD9, 0x8A, 0xD9, 0x87, 0x48, 0xD9, 0x85, 0xD8,
+ 0xAD, 0xD9, 0x85, 0xD8, 0xAF, 0x48, 0xD9, 0x88,
+ 0xD8, 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x49, 0xE2,
+ // Bytes 2800 - 283f
+ 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2,
+ 0x49, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0xE2,
+ 0x80, 0xB5, 0x49, 0xE2, 0x88, 0xAB, 0xE2, 0x88,
+ 0xAB, 0xE2, 0x88, 0xAB, 0x49, 0xE2, 0x88, 0xAE,
+ 0xE2, 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x49, 0xE3,
+ 0x80, 0x94, 0xE4, 0xB8, 0x89, 0xE3, 0x80, 0x95,
+ 0x49, 0xE3, 0x80, 0x94, 0xE4, 0xBA, 0x8C, 0xE3,
+ 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE5, 0x8B,
+ // Bytes 2840 - 287f
+ 0x9D, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94,
+ 0xE5, 0xAE, 0x89, 0xE3, 0x80, 0x95, 0x49, 0xE3,
+ 0x80, 0x94, 0xE6, 0x89, 0x93, 0xE3, 0x80, 0x95,
+ 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x95, 0x97, 0xE3,
+ 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x9C,
+ 0xAC, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94,
+ 0xE7, 0x82, 0xB9, 0xE3, 0x80, 0x95, 0x49, 0xE3,
+ 0x80, 0x94, 0xE7, 0x9B, 0x97, 0xE3, 0x80, 0x95,
+ // Bytes 2880 - 28bf
+ 0x49, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xBC, 0xE3,
+ 0x83, 0xAB, 0x49, 0xE3, 0x82, 0xA4, 0xE3, 0x83,
+ 0xB3, 0xE3, 0x83, 0x81, 0x49, 0xE3, 0x82, 0xA6,
+ 0xE3, 0x82, 0xA9, 0xE3, 0x83, 0xB3, 0x49, 0xE3,
+ 0x82, 0xAA, 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB9,
+ 0x49, 0xE3, 0x82, 0xAA, 0xE3, 0x83, 0xBC, 0xE3,
+ 0x83, 0xA0, 0x49, 0xE3, 0x82, 0xAB, 0xE3, 0x82,
+ 0xA4, 0xE3, 0x83, 0xAA, 0x49, 0xE3, 0x82, 0xB1,
+ // Bytes 28c0 - 28ff
+ 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xB9, 0x49, 0xE3,
+ 0x82, 0xB3, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x8A,
+ 0x49, 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3,
+ 0x83, 0x81, 0x49, 0xE3, 0x82, 0xBB, 0xE3, 0x83,
+ 0xB3, 0xE3, 0x83, 0x88, 0x49, 0xE3, 0x83, 0x86,
+ 0xE3, 0x82, 0x99, 0xE3, 0x82, 0xB7, 0x49, 0xE3,
+ 0x83, 0x88, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB,
+ 0x49, 0xE3, 0x83, 0x8E, 0xE3, 0x83, 0x83, 0xE3,
+ // Bytes 2900 - 293f
+ 0x83, 0x88, 0x49, 0xE3, 0x83, 0x8F, 0xE3, 0x82,
+ 0xA4, 0xE3, 0x83, 0x84, 0x49, 0xE3, 0x83, 0x92,
+ 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, 0x49, 0xE3,
+ 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xB3,
+ 0x49, 0xE3, 0x83, 0x95, 0xE3, 0x83, 0xA9, 0xE3,
+ 0x83, 0xB3, 0x49, 0xE3, 0x83, 0x98, 0xE3, 0x82,
+ 0x9A, 0xE3, 0x82, 0xBD, 0x49, 0xE3, 0x83, 0x98,
+ 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x84, 0x49, 0xE3,
+ // Bytes 2940 - 297f
+ 0x83, 0x9B, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB,
+ 0x49, 0xE3, 0x83, 0x9B, 0xE3, 0x83, 0xBC, 0xE3,
+ 0x83, 0xB3, 0x49, 0xE3, 0x83, 0x9E, 0xE3, 0x82,
+ 0xA4, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, 0x9E,
+ 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x8F, 0x49, 0xE3,
+ 0x83, 0x9E, 0xE3, 0x83, 0xAB, 0xE3, 0x82, 0xAF,
+ 0x49, 0xE3, 0x83, 0xA4, 0xE3, 0x83, 0xBC, 0xE3,
+ 0x83, 0xAB, 0x49, 0xE3, 0x83, 0xA6, 0xE3, 0x82,
+ // Bytes 2980 - 29bf
+ 0xA2, 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x83, 0xAF,
+ 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0x4C, 0xE2,
+ 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2,
+ 0xE2, 0x80, 0xB2, 0x4C, 0xE2, 0x88, 0xAB, 0xE2,
+ 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB,
+ 0x4C, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xAB, 0xE3,
+ 0x83, 0x95, 0xE3, 0x82, 0xA1, 0x4C, 0xE3, 0x82,
+ 0xA8, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xAB, 0xE3,
+ // Bytes 29c0 - 29ff
+ 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x82,
+ 0x99, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xB3, 0x4C,
+ 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83,
+ 0xB3, 0xE3, 0x83, 0x9E, 0x4C, 0xE3, 0x82, 0xAB,
+ 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0x83, 0xE3, 0x83,
+ 0x88, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x83, 0xAD,
+ 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xBC, 0x4C, 0xE3,
+ 0x82, 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0x8B,
+ // Bytes 2a00 - 2a3f
+ 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAD, 0xE3,
+ 0x83, 0xA5, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xBC,
+ 0x4C, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3,
+ 0x83, 0xA9, 0xE3, 0x83, 0xA0, 0x4C, 0xE3, 0x82,
+ 0xAF, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xBC, 0xE3,
+ 0x83, 0x8D, 0x4C, 0xE3, 0x82, 0xB5, 0xE3, 0x82,
+ 0xA4, 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C,
+ 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83,
+ // Bytes 2a40 - 2a7f
+ 0xBC, 0xE3, 0x82, 0xB9, 0x4C, 0xE3, 0x83, 0x8F,
+ 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x83,
+ 0x84, 0x4C, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A,
+ 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, 0xE3,
+ 0x83, 0x95, 0xE3, 0x82, 0xA3, 0xE3, 0x83, 0xBC,
+ 0xE3, 0x83, 0x88, 0x4C, 0xE3, 0x83, 0x98, 0xE3,
+ 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xBF,
+ 0x4C, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3,
+ // Bytes 2a80 - 2abf
+ 0x83, 0x8B, 0xE3, 0x83, 0x92, 0x4C, 0xE3, 0x83,
+ 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xB3, 0xE3,
+ 0x82, 0xB9, 0x4C, 0xE3, 0x83, 0x9B, 0xE3, 0x82,
+ 0x99, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x88, 0x4C,
+ 0xE3, 0x83, 0x9E, 0xE3, 0x82, 0xA4, 0xE3, 0x82,
+ 0xAF, 0xE3, 0x83, 0xAD, 0x4C, 0xE3, 0x83, 0x9F,
+ 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAD, 0xE3, 0x83,
+ 0xB3, 0x4C, 0xE3, 0x83, 0xA1, 0xE3, 0x83, 0xBC,
+ // Bytes 2ac0 - 2aff
+ 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x4C, 0xE3,
+ 0x83, 0xAA, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88,
+ 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x83, 0xAB, 0xE3,
+ 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC,
+ 0x4C, 0xE6, 0xA0, 0xAA, 0xE5, 0xBC, 0x8F, 0xE4,
+ 0xBC, 0x9A, 0xE7, 0xA4, 0xBE, 0x4E, 0x28, 0xE1,
+ 0x84, 0x8B, 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x92,
+ 0xE1, 0x85, 0xAE, 0x29, 0x4F, 0xD8, 0xAC, 0xD9,
+ // Bytes 2b00 - 2b3f
+ 0x84, 0x20, 0xD8, 0xAC, 0xD9, 0x84, 0xD8, 0xA7,
+ 0xD9, 0x84, 0xD9, 0x87, 0x4F, 0xE3, 0x82, 0xA2,
+ 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83,
+ 0xBC, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xA2,
+ 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x98, 0xE3, 0x82,
+ 0x9A, 0xE3, 0x82, 0xA2, 0x4F, 0xE3, 0x82, 0xAD,
+ 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xAF, 0xE3, 0x83,
+ 0x83, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xB5,
+ // Bytes 2b40 - 2b7f
+ 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x81, 0xE3, 0x83,
+ 0xBC, 0xE3, 0x83, 0xA0, 0x4F, 0xE3, 0x83, 0x8F,
+ 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83,
+ 0xAC, 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x98,
+ 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0xBF, 0xE3, 0x83,
+ 0xBC, 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x9B,
+ 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xA4, 0xE3, 0x83,
+ 0xB3, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x83, 0x9E,
+ // Bytes 2b80 - 2bbf
+ 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB7, 0xE3, 0x83,
+ 0xA7, 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xA1,
+ 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83,
+ 0x88, 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xAB,
+ 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x95, 0xE3, 0x82,
+ 0x99, 0xE3, 0x83, 0xAB, 0x51, 0x28, 0xE1, 0x84,
+ 0x8B, 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x8C, 0xE1,
+ 0x85, 0xA5, 0xE1, 0x86, 0xAB, 0x29, 0x52, 0xE3,
+ // Bytes 2bc0 - 2bff
+ 0x82, 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB,
+ 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83,
+ 0xBC, 0x52, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD,
+ 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83,
+ 0xA9, 0xE3, 0x83, 0xA0, 0x52, 0xE3, 0x82, 0xAD,
+ 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xA1, 0xE3, 0x83,
+ 0xBC, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x52,
+ 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83,
+ // Bytes 2c00 - 2c3f
+ 0xA9, 0xE3, 0x83, 0xA0, 0xE3, 0x83, 0x88, 0xE3,
+ 0x83, 0xB3, 0x52, 0xE3, 0x82, 0xAF, 0xE3, 0x83,
+ 0xAB, 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0xE3,
+ 0x82, 0xA4, 0xE3, 0x83, 0xAD, 0x52, 0xE3, 0x83,
+ 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3,
+ 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88,
+ 0x52, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3,
+ 0x82, 0xA2, 0xE3, 0x82, 0xB9, 0xE3, 0x83, 0x88,
+ // Bytes 2c40 - 2c7f
+ 0xE3, 0x83, 0xAB, 0x52, 0xE3, 0x83, 0x95, 0xE3,
+ 0x82, 0x99, 0xE3, 0x83, 0x83, 0xE3, 0x82, 0xB7,
+ 0xE3, 0x82, 0xA7, 0xE3, 0x83, 0xAB, 0x52, 0xE3,
+ 0x83, 0x9F, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0x8F,
+ 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83,
+ 0xAB, 0x52, 0xE3, 0x83, 0xAC, 0xE3, 0x83, 0xB3,
+ 0xE3, 0x83, 0x88, 0xE3, 0x82, 0xB1, 0xE3, 0x82,
+ 0x99, 0xE3, 0x83, 0xB3, 0x61, 0xD8, 0xB5, 0xD9,
+ // Bytes 2c80 - 2cbf
+ 0x84, 0xD9, 0x89, 0x20, 0xD8, 0xA7, 0xD9, 0x84,
+ 0xD9, 0x84, 0xD9, 0x87, 0x20, 0xD8, 0xB9, 0xD9,
+ 0x84, 0xD9, 0x8A, 0xD9, 0x87, 0x20, 0xD9, 0x88,
+ 0xD8, 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x06, 0xE0,
+ 0xA7, 0x87, 0xE0, 0xA6, 0xBE, 0x01, 0x06, 0xE0,
+ 0xA7, 0x87, 0xE0, 0xA7, 0x97, 0x01, 0x06, 0xE0,
+ 0xAD, 0x87, 0xE0, 0xAC, 0xBE, 0x01, 0x06, 0xE0,
+ 0xAD, 0x87, 0xE0, 0xAD, 0x96, 0x01, 0x06, 0xE0,
+ // Bytes 2cc0 - 2cff
+ 0xAD, 0x87, 0xE0, 0xAD, 0x97, 0x01, 0x06, 0xE0,
+ 0xAE, 0x92, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0,
+ 0xAF, 0x86, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0,
+ 0xAF, 0x86, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0,
+ 0xAF, 0x87, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0,
+ 0xB2, 0xBF, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0,
+ 0xB3, 0x86, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0,
+ 0xB3, 0x86, 0xE0, 0xB3, 0x96, 0x01, 0x06, 0xE0,
+ // Bytes 2d00 - 2d3f
+ 0xB5, 0x86, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0,
+ 0xB5, 0x86, 0xE0, 0xB5, 0x97, 0x01, 0x06, 0xE0,
+ 0xB5, 0x87, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0,
+ 0xB7, 0x99, 0xE0, 0xB7, 0x9F, 0x01, 0x06, 0xE1,
+ 0x80, 0xA5, 0xE1, 0x80, 0xAE, 0x01, 0x06, 0xE1,
+ 0xAC, 0x85, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1,
+ 0xAC, 0x87, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1,
+ 0xAC, 0x89, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1,
+ // Bytes 2d40 - 2d7f
+ 0xAC, 0x8B, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1,
+ 0xAC, 0x8D, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1,
+ 0xAC, 0x91, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1,
+ 0xAC, 0xBA, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1,
+ 0xAC, 0xBC, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1,
+ 0xAC, 0xBE, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1,
+ 0xAC, 0xBF, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1,
+ 0xAD, 0x82, 0xE1, 0xAC, 0xB5, 0x01, 0x08, 0xF0,
+ // Bytes 2d80 - 2dbf
+ 0x91, 0x84, 0xB1, 0xF0, 0x91, 0x84, 0xA7, 0x01,
+ 0x08, 0xF0, 0x91, 0x84, 0xB2, 0xF0, 0x91, 0x84,
+ 0xA7, 0x01, 0x08, 0xF0, 0x91, 0x8D, 0x87, 0xF0,
+ 0x91, 0x8C, 0xBE, 0x01, 0x08, 0xF0, 0x91, 0x8D,
+ 0x87, 0xF0, 0x91, 0x8D, 0x97, 0x01, 0x08, 0xF0,
+ 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xB0, 0x01,
+ 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92,
+ 0xBA, 0x01, 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0,
+ // Bytes 2dc0 - 2dff
+ 0x91, 0x92, 0xBD, 0x01, 0x08, 0xF0, 0x91, 0x96,
+ 0xB8, 0xF0, 0x91, 0x96, 0xAF, 0x01, 0x08, 0xF0,
+ 0x91, 0x96, 0xB9, 0xF0, 0x91, 0x96, 0xAF, 0x01,
+ 0x09, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0xE0,
+ 0xB3, 0x95, 0x02, 0x09, 0xE0, 0xB7, 0x99, 0xE0,
+ 0xB7, 0x8F, 0xE0, 0xB7, 0x8A, 0x12, 0x44, 0x44,
+ 0x5A, 0xCC, 0x8C, 0xC9, 0x44, 0x44, 0x7A, 0xCC,
+ 0x8C, 0xC9, 0x44, 0x64, 0x7A, 0xCC, 0x8C, 0xC9,
+ // Bytes 2e00 - 2e3f
+ 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x93, 0xC9,
+ 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x94, 0xC9,
+ 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x95, 0xB5,
+ 0x46, 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x01,
+ 0x46, 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x01,
+ 0x46, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, 0x01,
+ 0x46, 0xE1, 0x84, 0x85, 0xE1, 0x85, 0xA1, 0x01,
+ 0x46, 0xE1, 0x84, 0x86, 0xE1, 0x85, 0xA1, 0x01,
+ // Bytes 2e40 - 2e7f
+ 0x46, 0xE1, 0x84, 0x87, 0xE1, 0x85, 0xA1, 0x01,
+ 0x46, 0xE1, 0x84, 0x89, 0xE1, 0x85, 0xA1, 0x01,
+ 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA1, 0x01,
+ 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xAE, 0x01,
+ 0x46, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x01,
+ 0x46, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, 0x01,
+ 0x46, 0xE1, 0x84, 0x8F, 0xE1, 0x85, 0xA1, 0x01,
+ 0x46, 0xE1, 0x84, 0x90, 0xE1, 0x85, 0xA1, 0x01,
+ // Bytes 2e80 - 2ebf
+ 0x46, 0xE1, 0x84, 0x91, 0xE1, 0x85, 0xA1, 0x01,
+ 0x46, 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xA1, 0x01,
+ 0x49, 0xE3, 0x83, 0xA1, 0xE3, 0x82, 0xAB, 0xE3,
+ 0x82, 0x99, 0x0D, 0x4C, 0xE1, 0x84, 0x8C, 0xE1,
+ 0x85, 0xAE, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xB4,
+ 0x01, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99,
+ 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x0D, 0x4C,
+ 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0xBC, 0xE3, 0x83,
+ // Bytes 2ec0 - 2eff
+ 0x9B, 0xE3, 0x82, 0x9A, 0x0D, 0x4C, 0xE3, 0x83,
+ 0xA4, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0xE3,
+ 0x82, 0x99, 0x0D, 0x4F, 0xE1, 0x84, 0x8E, 0xE1,
+ 0x85, 0xA1, 0xE1, 0x86, 0xB7, 0xE1, 0x84, 0x80,
+ 0xE1, 0x85, 0xA9, 0x01, 0x4F, 0xE3, 0x82, 0xA4,
+ 0xE3, 0x83, 0x8B, 0xE3, 0x83, 0xB3, 0xE3, 0x82,
+ 0xAF, 0xE3, 0x82, 0x99, 0x0D, 0x4F, 0xE3, 0x82,
+ 0xB7, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xB3, 0xE3,
+ // Bytes 2f00 - 2f3f
+ 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x0D, 0x4F, 0xE3,
+ 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC,
+ 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x0D, 0x4F,
+ 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0xE3, 0x83,
+ 0xB3, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D,
+ 0x52, 0xE3, 0x82, 0xA8, 0xE3, 0x82, 0xB9, 0xE3,
+ 0x82, 0xAF, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88,
+ 0xE3, 0x82, 0x99, 0x0D, 0x52, 0xE3, 0x83, 0x95,
+ // Bytes 2f40 - 2f7f
+ 0xE3, 0x82, 0xA1, 0xE3, 0x83, 0xA9, 0xE3, 0x83,
+ 0x83, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D,
+ 0x86, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0x01,
+ 0x86, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8F, 0x01,
+ 0x03, 0x3C, 0xCC, 0xB8, 0x05, 0x03, 0x3D, 0xCC,
+ 0xB8, 0x05, 0x03, 0x3E, 0xCC, 0xB8, 0x05, 0x03,
+ 0x41, 0xCC, 0x80, 0xC9, 0x03, 0x41, 0xCC, 0x81,
+ 0xC9, 0x03, 0x41, 0xCC, 0x83, 0xC9, 0x03, 0x41,
+ // Bytes 2f80 - 2fbf
+ 0xCC, 0x84, 0xC9, 0x03, 0x41, 0xCC, 0x89, 0xC9,
+ 0x03, 0x41, 0xCC, 0x8C, 0xC9, 0x03, 0x41, 0xCC,
+ 0x8F, 0xC9, 0x03, 0x41, 0xCC, 0x91, 0xC9, 0x03,
+ 0x41, 0xCC, 0xA5, 0xB5, 0x03, 0x41, 0xCC, 0xA8,
+ 0xA5, 0x03, 0x42, 0xCC, 0x87, 0xC9, 0x03, 0x42,
+ 0xCC, 0xA3, 0xB5, 0x03, 0x42, 0xCC, 0xB1, 0xB5,
+ 0x03, 0x43, 0xCC, 0x81, 0xC9, 0x03, 0x43, 0xCC,
+ 0x82, 0xC9, 0x03, 0x43, 0xCC, 0x87, 0xC9, 0x03,
+ // Bytes 2fc0 - 2fff
+ 0x43, 0xCC, 0x8C, 0xC9, 0x03, 0x44, 0xCC, 0x87,
+ 0xC9, 0x03, 0x44, 0xCC, 0x8C, 0xC9, 0x03, 0x44,
+ 0xCC, 0xA3, 0xB5, 0x03, 0x44, 0xCC, 0xA7, 0xA5,
+ 0x03, 0x44, 0xCC, 0xAD, 0xB5, 0x03, 0x44, 0xCC,
+ 0xB1, 0xB5, 0x03, 0x45, 0xCC, 0x80, 0xC9, 0x03,
+ 0x45, 0xCC, 0x81, 0xC9, 0x03, 0x45, 0xCC, 0x83,
+ 0xC9, 0x03, 0x45, 0xCC, 0x86, 0xC9, 0x03, 0x45,
+ 0xCC, 0x87, 0xC9, 0x03, 0x45, 0xCC, 0x88, 0xC9,
+ // Bytes 3000 - 303f
+ 0x03, 0x45, 0xCC, 0x89, 0xC9, 0x03, 0x45, 0xCC,
+ 0x8C, 0xC9, 0x03, 0x45, 0xCC, 0x8F, 0xC9, 0x03,
+ 0x45, 0xCC, 0x91, 0xC9, 0x03, 0x45, 0xCC, 0xA8,
+ 0xA5, 0x03, 0x45, 0xCC, 0xAD, 0xB5, 0x03, 0x45,
+ 0xCC, 0xB0, 0xB5, 0x03, 0x46, 0xCC, 0x87, 0xC9,
+ 0x03, 0x47, 0xCC, 0x81, 0xC9, 0x03, 0x47, 0xCC,
+ 0x82, 0xC9, 0x03, 0x47, 0xCC, 0x84, 0xC9, 0x03,
+ 0x47, 0xCC, 0x86, 0xC9, 0x03, 0x47, 0xCC, 0x87,
+ // Bytes 3040 - 307f
+ 0xC9, 0x03, 0x47, 0xCC, 0x8C, 0xC9, 0x03, 0x47,
+ 0xCC, 0xA7, 0xA5, 0x03, 0x48, 0xCC, 0x82, 0xC9,
+ 0x03, 0x48, 0xCC, 0x87, 0xC9, 0x03, 0x48, 0xCC,
+ 0x88, 0xC9, 0x03, 0x48, 0xCC, 0x8C, 0xC9, 0x03,
+ 0x48, 0xCC, 0xA3, 0xB5, 0x03, 0x48, 0xCC, 0xA7,
+ 0xA5, 0x03, 0x48, 0xCC, 0xAE, 0xB5, 0x03, 0x49,
+ 0xCC, 0x80, 0xC9, 0x03, 0x49, 0xCC, 0x81, 0xC9,
+ 0x03, 0x49, 0xCC, 0x82, 0xC9, 0x03, 0x49, 0xCC,
+ // Bytes 3080 - 30bf
+ 0x83, 0xC9, 0x03, 0x49, 0xCC, 0x84, 0xC9, 0x03,
+ 0x49, 0xCC, 0x86, 0xC9, 0x03, 0x49, 0xCC, 0x87,
+ 0xC9, 0x03, 0x49, 0xCC, 0x89, 0xC9, 0x03, 0x49,
+ 0xCC, 0x8C, 0xC9, 0x03, 0x49, 0xCC, 0x8F, 0xC9,
+ 0x03, 0x49, 0xCC, 0x91, 0xC9, 0x03, 0x49, 0xCC,
+ 0xA3, 0xB5, 0x03, 0x49, 0xCC, 0xA8, 0xA5, 0x03,
+ 0x49, 0xCC, 0xB0, 0xB5, 0x03, 0x4A, 0xCC, 0x82,
+ 0xC9, 0x03, 0x4B, 0xCC, 0x81, 0xC9, 0x03, 0x4B,
+ // Bytes 30c0 - 30ff
+ 0xCC, 0x8C, 0xC9, 0x03, 0x4B, 0xCC, 0xA3, 0xB5,
+ 0x03, 0x4B, 0xCC, 0xA7, 0xA5, 0x03, 0x4B, 0xCC,
+ 0xB1, 0xB5, 0x03, 0x4C, 0xCC, 0x81, 0xC9, 0x03,
+ 0x4C, 0xCC, 0x8C, 0xC9, 0x03, 0x4C, 0xCC, 0xA7,
+ 0xA5, 0x03, 0x4C, 0xCC, 0xAD, 0xB5, 0x03, 0x4C,
+ 0xCC, 0xB1, 0xB5, 0x03, 0x4D, 0xCC, 0x81, 0xC9,
+ 0x03, 0x4D, 0xCC, 0x87, 0xC9, 0x03, 0x4D, 0xCC,
+ 0xA3, 0xB5, 0x03, 0x4E, 0xCC, 0x80, 0xC9, 0x03,
+ // Bytes 3100 - 313f
+ 0x4E, 0xCC, 0x81, 0xC9, 0x03, 0x4E, 0xCC, 0x83,
+ 0xC9, 0x03, 0x4E, 0xCC, 0x87, 0xC9, 0x03, 0x4E,
+ 0xCC, 0x8C, 0xC9, 0x03, 0x4E, 0xCC, 0xA3, 0xB5,
+ 0x03, 0x4E, 0xCC, 0xA7, 0xA5, 0x03, 0x4E, 0xCC,
+ 0xAD, 0xB5, 0x03, 0x4E, 0xCC, 0xB1, 0xB5, 0x03,
+ 0x4F, 0xCC, 0x80, 0xC9, 0x03, 0x4F, 0xCC, 0x81,
+ 0xC9, 0x03, 0x4F, 0xCC, 0x86, 0xC9, 0x03, 0x4F,
+ 0xCC, 0x89, 0xC9, 0x03, 0x4F, 0xCC, 0x8B, 0xC9,
+ // Bytes 3140 - 317f
+ 0x03, 0x4F, 0xCC, 0x8C, 0xC9, 0x03, 0x4F, 0xCC,
+ 0x8F, 0xC9, 0x03, 0x4F, 0xCC, 0x91, 0xC9, 0x03,
+ 0x50, 0xCC, 0x81, 0xC9, 0x03, 0x50, 0xCC, 0x87,
+ 0xC9, 0x03, 0x52, 0xCC, 0x81, 0xC9, 0x03, 0x52,
+ 0xCC, 0x87, 0xC9, 0x03, 0x52, 0xCC, 0x8C, 0xC9,
+ 0x03, 0x52, 0xCC, 0x8F, 0xC9, 0x03, 0x52, 0xCC,
+ 0x91, 0xC9, 0x03, 0x52, 0xCC, 0xA7, 0xA5, 0x03,
+ 0x52, 0xCC, 0xB1, 0xB5, 0x03, 0x53, 0xCC, 0x82,
+ // Bytes 3180 - 31bf
+ 0xC9, 0x03, 0x53, 0xCC, 0x87, 0xC9, 0x03, 0x53,
+ 0xCC, 0xA6, 0xB5, 0x03, 0x53, 0xCC, 0xA7, 0xA5,
+ 0x03, 0x54, 0xCC, 0x87, 0xC9, 0x03, 0x54, 0xCC,
+ 0x8C, 0xC9, 0x03, 0x54, 0xCC, 0xA3, 0xB5, 0x03,
+ 0x54, 0xCC, 0xA6, 0xB5, 0x03, 0x54, 0xCC, 0xA7,
+ 0xA5, 0x03, 0x54, 0xCC, 0xAD, 0xB5, 0x03, 0x54,
+ 0xCC, 0xB1, 0xB5, 0x03, 0x55, 0xCC, 0x80, 0xC9,
+ 0x03, 0x55, 0xCC, 0x81, 0xC9, 0x03, 0x55, 0xCC,
+ // Bytes 31c0 - 31ff
+ 0x82, 0xC9, 0x03, 0x55, 0xCC, 0x86, 0xC9, 0x03,
+ 0x55, 0xCC, 0x89, 0xC9, 0x03, 0x55, 0xCC, 0x8A,
+ 0xC9, 0x03, 0x55, 0xCC, 0x8B, 0xC9, 0x03, 0x55,
+ 0xCC, 0x8C, 0xC9, 0x03, 0x55, 0xCC, 0x8F, 0xC9,
+ 0x03, 0x55, 0xCC, 0x91, 0xC9, 0x03, 0x55, 0xCC,
+ 0xA3, 0xB5, 0x03, 0x55, 0xCC, 0xA4, 0xB5, 0x03,
+ 0x55, 0xCC, 0xA8, 0xA5, 0x03, 0x55, 0xCC, 0xAD,
+ 0xB5, 0x03, 0x55, 0xCC, 0xB0, 0xB5, 0x03, 0x56,
+ // Bytes 3200 - 323f
+ 0xCC, 0x83, 0xC9, 0x03, 0x56, 0xCC, 0xA3, 0xB5,
+ 0x03, 0x57, 0xCC, 0x80, 0xC9, 0x03, 0x57, 0xCC,
+ 0x81, 0xC9, 0x03, 0x57, 0xCC, 0x82, 0xC9, 0x03,
+ 0x57, 0xCC, 0x87, 0xC9, 0x03, 0x57, 0xCC, 0x88,
+ 0xC9, 0x03, 0x57, 0xCC, 0xA3, 0xB5, 0x03, 0x58,
+ 0xCC, 0x87, 0xC9, 0x03, 0x58, 0xCC, 0x88, 0xC9,
+ 0x03, 0x59, 0xCC, 0x80, 0xC9, 0x03, 0x59, 0xCC,
+ 0x81, 0xC9, 0x03, 0x59, 0xCC, 0x82, 0xC9, 0x03,
+ // Bytes 3240 - 327f
+ 0x59, 0xCC, 0x83, 0xC9, 0x03, 0x59, 0xCC, 0x84,
+ 0xC9, 0x03, 0x59, 0xCC, 0x87, 0xC9, 0x03, 0x59,
+ 0xCC, 0x88, 0xC9, 0x03, 0x59, 0xCC, 0x89, 0xC9,
+ 0x03, 0x59, 0xCC, 0xA3, 0xB5, 0x03, 0x5A, 0xCC,
+ 0x81, 0xC9, 0x03, 0x5A, 0xCC, 0x82, 0xC9, 0x03,
+ 0x5A, 0xCC, 0x87, 0xC9, 0x03, 0x5A, 0xCC, 0x8C,
+ 0xC9, 0x03, 0x5A, 0xCC, 0xA3, 0xB5, 0x03, 0x5A,
+ 0xCC, 0xB1, 0xB5, 0x03, 0x61, 0xCC, 0x80, 0xC9,
+ // Bytes 3280 - 32bf
+ 0x03, 0x61, 0xCC, 0x81, 0xC9, 0x03, 0x61, 0xCC,
+ 0x83, 0xC9, 0x03, 0x61, 0xCC, 0x84, 0xC9, 0x03,
+ 0x61, 0xCC, 0x89, 0xC9, 0x03, 0x61, 0xCC, 0x8C,
+ 0xC9, 0x03, 0x61, 0xCC, 0x8F, 0xC9, 0x03, 0x61,
+ 0xCC, 0x91, 0xC9, 0x03, 0x61, 0xCC, 0xA5, 0xB5,
+ 0x03, 0x61, 0xCC, 0xA8, 0xA5, 0x03, 0x62, 0xCC,
+ 0x87, 0xC9, 0x03, 0x62, 0xCC, 0xA3, 0xB5, 0x03,
+ 0x62, 0xCC, 0xB1, 0xB5, 0x03, 0x63, 0xCC, 0x81,
+ // Bytes 32c0 - 32ff
+ 0xC9, 0x03, 0x63, 0xCC, 0x82, 0xC9, 0x03, 0x63,
+ 0xCC, 0x87, 0xC9, 0x03, 0x63, 0xCC, 0x8C, 0xC9,
+ 0x03, 0x64, 0xCC, 0x87, 0xC9, 0x03, 0x64, 0xCC,
+ 0x8C, 0xC9, 0x03, 0x64, 0xCC, 0xA3, 0xB5, 0x03,
+ 0x64, 0xCC, 0xA7, 0xA5, 0x03, 0x64, 0xCC, 0xAD,
+ 0xB5, 0x03, 0x64, 0xCC, 0xB1, 0xB5, 0x03, 0x65,
+ 0xCC, 0x80, 0xC9, 0x03, 0x65, 0xCC, 0x81, 0xC9,
+ 0x03, 0x65, 0xCC, 0x83, 0xC9, 0x03, 0x65, 0xCC,
+ // Bytes 3300 - 333f
+ 0x86, 0xC9, 0x03, 0x65, 0xCC, 0x87, 0xC9, 0x03,
+ 0x65, 0xCC, 0x88, 0xC9, 0x03, 0x65, 0xCC, 0x89,
+ 0xC9, 0x03, 0x65, 0xCC, 0x8C, 0xC9, 0x03, 0x65,
+ 0xCC, 0x8F, 0xC9, 0x03, 0x65, 0xCC, 0x91, 0xC9,
+ 0x03, 0x65, 0xCC, 0xA8, 0xA5, 0x03, 0x65, 0xCC,
+ 0xAD, 0xB5, 0x03, 0x65, 0xCC, 0xB0, 0xB5, 0x03,
+ 0x66, 0xCC, 0x87, 0xC9, 0x03, 0x67, 0xCC, 0x81,
+ 0xC9, 0x03, 0x67, 0xCC, 0x82, 0xC9, 0x03, 0x67,
+ // Bytes 3340 - 337f
+ 0xCC, 0x84, 0xC9, 0x03, 0x67, 0xCC, 0x86, 0xC9,
+ 0x03, 0x67, 0xCC, 0x87, 0xC9, 0x03, 0x67, 0xCC,
+ 0x8C, 0xC9, 0x03, 0x67, 0xCC, 0xA7, 0xA5, 0x03,
+ 0x68, 0xCC, 0x82, 0xC9, 0x03, 0x68, 0xCC, 0x87,
+ 0xC9, 0x03, 0x68, 0xCC, 0x88, 0xC9, 0x03, 0x68,
+ 0xCC, 0x8C, 0xC9, 0x03, 0x68, 0xCC, 0xA3, 0xB5,
+ 0x03, 0x68, 0xCC, 0xA7, 0xA5, 0x03, 0x68, 0xCC,
+ 0xAE, 0xB5, 0x03, 0x68, 0xCC, 0xB1, 0xB5, 0x03,
+ // Bytes 3380 - 33bf
+ 0x69, 0xCC, 0x80, 0xC9, 0x03, 0x69, 0xCC, 0x81,
+ 0xC9, 0x03, 0x69, 0xCC, 0x82, 0xC9, 0x03, 0x69,
+ 0xCC, 0x83, 0xC9, 0x03, 0x69, 0xCC, 0x84, 0xC9,
+ 0x03, 0x69, 0xCC, 0x86, 0xC9, 0x03, 0x69, 0xCC,
+ 0x89, 0xC9, 0x03, 0x69, 0xCC, 0x8C, 0xC9, 0x03,
+ 0x69, 0xCC, 0x8F, 0xC9, 0x03, 0x69, 0xCC, 0x91,
+ 0xC9, 0x03, 0x69, 0xCC, 0xA3, 0xB5, 0x03, 0x69,
+ 0xCC, 0xA8, 0xA5, 0x03, 0x69, 0xCC, 0xB0, 0xB5,
+ // Bytes 33c0 - 33ff
+ 0x03, 0x6A, 0xCC, 0x82, 0xC9, 0x03, 0x6A, 0xCC,
+ 0x8C, 0xC9, 0x03, 0x6B, 0xCC, 0x81, 0xC9, 0x03,
+ 0x6B, 0xCC, 0x8C, 0xC9, 0x03, 0x6B, 0xCC, 0xA3,
+ 0xB5, 0x03, 0x6B, 0xCC, 0xA7, 0xA5, 0x03, 0x6B,
+ 0xCC, 0xB1, 0xB5, 0x03, 0x6C, 0xCC, 0x81, 0xC9,
+ 0x03, 0x6C, 0xCC, 0x8C, 0xC9, 0x03, 0x6C, 0xCC,
+ 0xA7, 0xA5, 0x03, 0x6C, 0xCC, 0xAD, 0xB5, 0x03,
+ 0x6C, 0xCC, 0xB1, 0xB5, 0x03, 0x6D, 0xCC, 0x81,
+ // Bytes 3400 - 343f
+ 0xC9, 0x03, 0x6D, 0xCC, 0x87, 0xC9, 0x03, 0x6D,
+ 0xCC, 0xA3, 0xB5, 0x03, 0x6E, 0xCC, 0x80, 0xC9,
+ 0x03, 0x6E, 0xCC, 0x81, 0xC9, 0x03, 0x6E, 0xCC,
+ 0x83, 0xC9, 0x03, 0x6E, 0xCC, 0x87, 0xC9, 0x03,
+ 0x6E, 0xCC, 0x8C, 0xC9, 0x03, 0x6E, 0xCC, 0xA3,
+ 0xB5, 0x03, 0x6E, 0xCC, 0xA7, 0xA5, 0x03, 0x6E,
+ 0xCC, 0xAD, 0xB5, 0x03, 0x6E, 0xCC, 0xB1, 0xB5,
+ 0x03, 0x6F, 0xCC, 0x80, 0xC9, 0x03, 0x6F, 0xCC,
+ // Bytes 3440 - 347f
+ 0x81, 0xC9, 0x03, 0x6F, 0xCC, 0x86, 0xC9, 0x03,
+ 0x6F, 0xCC, 0x89, 0xC9, 0x03, 0x6F, 0xCC, 0x8B,
+ 0xC9, 0x03, 0x6F, 0xCC, 0x8C, 0xC9, 0x03, 0x6F,
+ 0xCC, 0x8F, 0xC9, 0x03, 0x6F, 0xCC, 0x91, 0xC9,
+ 0x03, 0x70, 0xCC, 0x81, 0xC9, 0x03, 0x70, 0xCC,
+ 0x87, 0xC9, 0x03, 0x72, 0xCC, 0x81, 0xC9, 0x03,
+ 0x72, 0xCC, 0x87, 0xC9, 0x03, 0x72, 0xCC, 0x8C,
+ 0xC9, 0x03, 0x72, 0xCC, 0x8F, 0xC9, 0x03, 0x72,
+ // Bytes 3480 - 34bf
+ 0xCC, 0x91, 0xC9, 0x03, 0x72, 0xCC, 0xA7, 0xA5,
+ 0x03, 0x72, 0xCC, 0xB1, 0xB5, 0x03, 0x73, 0xCC,
+ 0x82, 0xC9, 0x03, 0x73, 0xCC, 0x87, 0xC9, 0x03,
+ 0x73, 0xCC, 0xA6, 0xB5, 0x03, 0x73, 0xCC, 0xA7,
+ 0xA5, 0x03, 0x74, 0xCC, 0x87, 0xC9, 0x03, 0x74,
+ 0xCC, 0x88, 0xC9, 0x03, 0x74, 0xCC, 0x8C, 0xC9,
+ 0x03, 0x74, 0xCC, 0xA3, 0xB5, 0x03, 0x74, 0xCC,
+ 0xA6, 0xB5, 0x03, 0x74, 0xCC, 0xA7, 0xA5, 0x03,
+ // Bytes 34c0 - 34ff
+ 0x74, 0xCC, 0xAD, 0xB5, 0x03, 0x74, 0xCC, 0xB1,
+ 0xB5, 0x03, 0x75, 0xCC, 0x80, 0xC9, 0x03, 0x75,
+ 0xCC, 0x81, 0xC9, 0x03, 0x75, 0xCC, 0x82, 0xC9,
+ 0x03, 0x75, 0xCC, 0x86, 0xC9, 0x03, 0x75, 0xCC,
+ 0x89, 0xC9, 0x03, 0x75, 0xCC, 0x8A, 0xC9, 0x03,
+ 0x75, 0xCC, 0x8B, 0xC9, 0x03, 0x75, 0xCC, 0x8C,
+ 0xC9, 0x03, 0x75, 0xCC, 0x8F, 0xC9, 0x03, 0x75,
+ 0xCC, 0x91, 0xC9, 0x03, 0x75, 0xCC, 0xA3, 0xB5,
+ // Bytes 3500 - 353f
+ 0x03, 0x75, 0xCC, 0xA4, 0xB5, 0x03, 0x75, 0xCC,
+ 0xA8, 0xA5, 0x03, 0x75, 0xCC, 0xAD, 0xB5, 0x03,
+ 0x75, 0xCC, 0xB0, 0xB5, 0x03, 0x76, 0xCC, 0x83,
+ 0xC9, 0x03, 0x76, 0xCC, 0xA3, 0xB5, 0x03, 0x77,
+ 0xCC, 0x80, 0xC9, 0x03, 0x77, 0xCC, 0x81, 0xC9,
+ 0x03, 0x77, 0xCC, 0x82, 0xC9, 0x03, 0x77, 0xCC,
+ 0x87, 0xC9, 0x03, 0x77, 0xCC, 0x88, 0xC9, 0x03,
+ 0x77, 0xCC, 0x8A, 0xC9, 0x03, 0x77, 0xCC, 0xA3,
+ // Bytes 3540 - 357f
+ 0xB5, 0x03, 0x78, 0xCC, 0x87, 0xC9, 0x03, 0x78,
+ 0xCC, 0x88, 0xC9, 0x03, 0x79, 0xCC, 0x80, 0xC9,
+ 0x03, 0x79, 0xCC, 0x81, 0xC9, 0x03, 0x79, 0xCC,
+ 0x82, 0xC9, 0x03, 0x79, 0xCC, 0x83, 0xC9, 0x03,
+ 0x79, 0xCC, 0x84, 0xC9, 0x03, 0x79, 0xCC, 0x87,
+ 0xC9, 0x03, 0x79, 0xCC, 0x88, 0xC9, 0x03, 0x79,
+ 0xCC, 0x89, 0xC9, 0x03, 0x79, 0xCC, 0x8A, 0xC9,
+ 0x03, 0x79, 0xCC, 0xA3, 0xB5, 0x03, 0x7A, 0xCC,
+ // Bytes 3580 - 35bf
+ 0x81, 0xC9, 0x03, 0x7A, 0xCC, 0x82, 0xC9, 0x03,
+ 0x7A, 0xCC, 0x87, 0xC9, 0x03, 0x7A, 0xCC, 0x8C,
+ 0xC9, 0x03, 0x7A, 0xCC, 0xA3, 0xB5, 0x03, 0x7A,
+ 0xCC, 0xB1, 0xB5, 0x04, 0xC2, 0xA8, 0xCC, 0x80,
+ 0xCA, 0x04, 0xC2, 0xA8, 0xCC, 0x81, 0xCA, 0x04,
+ 0xC2, 0xA8, 0xCD, 0x82, 0xCA, 0x04, 0xC3, 0x86,
+ 0xCC, 0x81, 0xC9, 0x04, 0xC3, 0x86, 0xCC, 0x84,
+ 0xC9, 0x04, 0xC3, 0x98, 0xCC, 0x81, 0xC9, 0x04,
+ // Bytes 35c0 - 35ff
+ 0xC3, 0xA6, 0xCC, 0x81, 0xC9, 0x04, 0xC3, 0xA6,
+ 0xCC, 0x84, 0xC9, 0x04, 0xC3, 0xB8, 0xCC, 0x81,
+ 0xC9, 0x04, 0xC5, 0xBF, 0xCC, 0x87, 0xC9, 0x04,
+ 0xC6, 0xB7, 0xCC, 0x8C, 0xC9, 0x04, 0xCA, 0x92,
+ 0xCC, 0x8C, 0xC9, 0x04, 0xCE, 0x91, 0xCC, 0x80,
+ 0xC9, 0x04, 0xCE, 0x91, 0xCC, 0x81, 0xC9, 0x04,
+ 0xCE, 0x91, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0x91,
+ 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0x91, 0xCD, 0x85,
+ // Bytes 3600 - 363f
+ 0xD9, 0x04, 0xCE, 0x95, 0xCC, 0x80, 0xC9, 0x04,
+ 0xCE, 0x95, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0x97,
+ 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x97, 0xCC, 0x81,
+ 0xC9, 0x04, 0xCE, 0x97, 0xCD, 0x85, 0xD9, 0x04,
+ 0xCE, 0x99, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x99,
+ 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0x99, 0xCC, 0x84,
+ 0xC9, 0x04, 0xCE, 0x99, 0xCC, 0x86, 0xC9, 0x04,
+ 0xCE, 0x99, 0xCC, 0x88, 0xC9, 0x04, 0xCE, 0x9F,
+ // Bytes 3640 - 367f
+ 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x9F, 0xCC, 0x81,
+ 0xC9, 0x04, 0xCE, 0xA1, 0xCC, 0x94, 0xC9, 0x04,
+ 0xCE, 0xA5, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0xA5,
+ 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x84,
+ 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x86, 0xC9, 0x04,
+ 0xCE, 0xA5, 0xCC, 0x88, 0xC9, 0x04, 0xCE, 0xA9,
+ 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0xA9, 0xCC, 0x81,
+ 0xC9, 0x04, 0xCE, 0xA9, 0xCD, 0x85, 0xD9, 0x04,
+ // Bytes 3680 - 36bf
+ 0xCE, 0xB1, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0xB1,
+ 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0xB1, 0xCD, 0x85,
+ 0xD9, 0x04, 0xCE, 0xB5, 0xCC, 0x80, 0xC9, 0x04,
+ 0xCE, 0xB5, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xB7,
+ 0xCD, 0x85, 0xD9, 0x04, 0xCE, 0xB9, 0xCC, 0x80,
+ 0xC9, 0x04, 0xCE, 0xB9, 0xCC, 0x81, 0xC9, 0x04,
+ 0xCE, 0xB9, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0xB9,
+ 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0xB9, 0xCD, 0x82,
+ // Bytes 36c0 - 36ff
+ 0xC9, 0x04, 0xCE, 0xBF, 0xCC, 0x80, 0xC9, 0x04,
+ 0xCE, 0xBF, 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x81,
+ 0xCC, 0x93, 0xC9, 0x04, 0xCF, 0x81, 0xCC, 0x94,
+ 0xC9, 0x04, 0xCF, 0x85, 0xCC, 0x80, 0xC9, 0x04,
+ 0xCF, 0x85, 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x85,
+ 0xCC, 0x84, 0xC9, 0x04, 0xCF, 0x85, 0xCC, 0x86,
+ 0xC9, 0x04, 0xCF, 0x85, 0xCD, 0x82, 0xC9, 0x04,
+ 0xCF, 0x89, 0xCD, 0x85, 0xD9, 0x04, 0xCF, 0x92,
+ // Bytes 3700 - 373f
+ 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x92, 0xCC, 0x88,
+ 0xC9, 0x04, 0xD0, 0x86, 0xCC, 0x88, 0xC9, 0x04,
+ 0xD0, 0x90, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0x90,
+ 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x93, 0xCC, 0x81,
+ 0xC9, 0x04, 0xD0, 0x95, 0xCC, 0x80, 0xC9, 0x04,
+ 0xD0, 0x95, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0x95,
+ 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x96, 0xCC, 0x86,
+ 0xC9, 0x04, 0xD0, 0x96, 0xCC, 0x88, 0xC9, 0x04,
+ // Bytes 3740 - 377f
+ 0xD0, 0x97, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x98,
+ 0xCC, 0x80, 0xC9, 0x04, 0xD0, 0x98, 0xCC, 0x84,
+ 0xC9, 0x04, 0xD0, 0x98, 0xCC, 0x86, 0xC9, 0x04,
+ 0xD0, 0x98, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x9A,
+ 0xCC, 0x81, 0xC9, 0x04, 0xD0, 0x9E, 0xCC, 0x88,
+ 0xC9, 0x04, 0xD0, 0xA3, 0xCC, 0x84, 0xC9, 0x04,
+ 0xD0, 0xA3, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xA3,
+ 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xA3, 0xCC, 0x8B,
+ // Bytes 3780 - 37bf
+ 0xC9, 0x04, 0xD0, 0xA7, 0xCC, 0x88, 0xC9, 0x04,
+ 0xD0, 0xAB, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xAD,
+ 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xB0, 0xCC, 0x86,
+ 0xC9, 0x04, 0xD0, 0xB0, 0xCC, 0x88, 0xC9, 0x04,
+ 0xD0, 0xB3, 0xCC, 0x81, 0xC9, 0x04, 0xD0, 0xB5,
+ 0xCC, 0x80, 0xC9, 0x04, 0xD0, 0xB5, 0xCC, 0x86,
+ 0xC9, 0x04, 0xD0, 0xB5, 0xCC, 0x88, 0xC9, 0x04,
+ 0xD0, 0xB6, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB6,
+ // Bytes 37c0 - 37ff
+ 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xB7, 0xCC, 0x88,
+ 0xC9, 0x04, 0xD0, 0xB8, 0xCC, 0x80, 0xC9, 0x04,
+ 0xD0, 0xB8, 0xCC, 0x84, 0xC9, 0x04, 0xD0, 0xB8,
+ 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB8, 0xCC, 0x88,
+ 0xC9, 0x04, 0xD0, 0xBA, 0xCC, 0x81, 0xC9, 0x04,
+ 0xD0, 0xBE, 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0x83,
+ 0xCC, 0x84, 0xC9, 0x04, 0xD1, 0x83, 0xCC, 0x86,
+ 0xC9, 0x04, 0xD1, 0x83, 0xCC, 0x88, 0xC9, 0x04,
+ // Bytes 3800 - 383f
+ 0xD1, 0x83, 0xCC, 0x8B, 0xC9, 0x04, 0xD1, 0x87,
+ 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0x8B, 0xCC, 0x88,
+ 0xC9, 0x04, 0xD1, 0x8D, 0xCC, 0x88, 0xC9, 0x04,
+ 0xD1, 0x96, 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0xB4,
+ 0xCC, 0x8F, 0xC9, 0x04, 0xD1, 0xB5, 0xCC, 0x8F,
+ 0xC9, 0x04, 0xD3, 0x98, 0xCC, 0x88, 0xC9, 0x04,
+ 0xD3, 0x99, 0xCC, 0x88, 0xC9, 0x04, 0xD3, 0xA8,
+ 0xCC, 0x88, 0xC9, 0x04, 0xD3, 0xA9, 0xCC, 0x88,
+ // Bytes 3840 - 387f
+ 0xC9, 0x04, 0xD8, 0xA7, 0xD9, 0x93, 0xC9, 0x04,
+ 0xD8, 0xA7, 0xD9, 0x94, 0xC9, 0x04, 0xD8, 0xA7,
+ 0xD9, 0x95, 0xB5, 0x04, 0xD9, 0x88, 0xD9, 0x94,
+ 0xC9, 0x04, 0xD9, 0x8A, 0xD9, 0x94, 0xC9, 0x04,
+ 0xDB, 0x81, 0xD9, 0x94, 0xC9, 0x04, 0xDB, 0x92,
+ 0xD9, 0x94, 0xC9, 0x04, 0xDB, 0x95, 0xD9, 0x94,
+ 0xC9, 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x80, 0xCA,
+ 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05,
+ // Bytes 3880 - 38bf
+ 0x41, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x41,
+ 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x41, 0xCC,
+ 0x86, 0xCC, 0x80, 0xCA, 0x05, 0x41, 0xCC, 0x86,
+ 0xCC, 0x81, 0xCA, 0x05, 0x41, 0xCC, 0x86, 0xCC,
+ 0x83, 0xCA, 0x05, 0x41, 0xCC, 0x86, 0xCC, 0x89,
+ 0xCA, 0x05, 0x41, 0xCC, 0x87, 0xCC, 0x84, 0xCA,
+ 0x05, 0x41, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05,
+ 0x41, 0xCC, 0x8A, 0xCC, 0x81, 0xCA, 0x05, 0x41,
+ // Bytes 38c0 - 38ff
+ 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x41, 0xCC,
+ 0xA3, 0xCC, 0x86, 0xCA, 0x05, 0x43, 0xCC, 0xA7,
+ 0xCC, 0x81, 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC,
+ 0x80, 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x81,
+ 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x83, 0xCA,
+ 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05,
+ 0x45, 0xCC, 0x84, 0xCC, 0x80, 0xCA, 0x05, 0x45,
+ 0xCC, 0x84, 0xCC, 0x81, 0xCA, 0x05, 0x45, 0xCC,
+ // Bytes 3900 - 393f
+ 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x45, 0xCC, 0xA7,
+ 0xCC, 0x86, 0xCA, 0x05, 0x49, 0xCC, 0x88, 0xCC,
+ 0x81, 0xCA, 0x05, 0x4C, 0xCC, 0xA3, 0xCC, 0x84,
+ 0xCA, 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x80, 0xCA,
+ 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05,
+ 0x4F, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x4F,
+ 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x4F, 0xCC,
+ 0x83, 0xCC, 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x83,
+ // Bytes 3940 - 397f
+ 0xCC, 0x84, 0xCA, 0x05, 0x4F, 0xCC, 0x83, 0xCC,
+ 0x88, 0xCA, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x80,
+ 0xCA, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x81, 0xCA,
+ 0x05, 0x4F, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05,
+ 0x4F, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x4F,
+ 0xCC, 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x4F, 0xCC,
+ 0x9B, 0xCC, 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x9B,
+ 0xCC, 0x83, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, 0xCC,
+ // Bytes 3980 - 39bf
+ 0x89, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, 0xA3,
+ 0xB6, 0x05, 0x4F, 0xCC, 0xA3, 0xCC, 0x82, 0xCA,
+ 0x05, 0x4F, 0xCC, 0xA8, 0xCC, 0x84, 0xCA, 0x05,
+ 0x52, 0xCC, 0xA3, 0xCC, 0x84, 0xCA, 0x05, 0x53,
+ 0xCC, 0x81, 0xCC, 0x87, 0xCA, 0x05, 0x53, 0xCC,
+ 0x8C, 0xCC, 0x87, 0xCA, 0x05, 0x53, 0xCC, 0xA3,
+ 0xCC, 0x87, 0xCA, 0x05, 0x55, 0xCC, 0x83, 0xCC,
+ 0x81, 0xCA, 0x05, 0x55, 0xCC, 0x84, 0xCC, 0x88,
+ // Bytes 39c0 - 39ff
+ 0xCA, 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x80, 0xCA,
+ 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x05,
+ 0x55, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x55,
+ 0xCC, 0x88, 0xCC, 0x8C, 0xCA, 0x05, 0x55, 0xCC,
+ 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x55, 0xCC, 0x9B,
+ 0xCC, 0x81, 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC,
+ 0x83, 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0x89,
+ 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6,
+ // Bytes 3a00 - 3a3f
+ 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x80, 0xCA, 0x05,
+ 0x61, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, 0x61,
+ 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x61, 0xCC,
+ 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x61, 0xCC, 0x86,
+ 0xCC, 0x80, 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC,
+ 0x81, 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x83,
+ 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x89, 0xCA,
+ 0x05, 0x61, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05,
+ // Bytes 3a40 - 3a7f
+ 0x61, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x61,
+ 0xCC, 0x8A, 0xCC, 0x81, 0xCA, 0x05, 0x61, 0xCC,
+ 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x61, 0xCC, 0xA3,
+ 0xCC, 0x86, 0xCA, 0x05, 0x63, 0xCC, 0xA7, 0xCC,
+ 0x81, 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x80,
+ 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x81, 0xCA,
+ 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05,
+ 0x65, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x65,
+ // Bytes 3a80 - 3abf
+ 0xCC, 0x84, 0xCC, 0x80, 0xCA, 0x05, 0x65, 0xCC,
+ 0x84, 0xCC, 0x81, 0xCA, 0x05, 0x65, 0xCC, 0xA3,
+ 0xCC, 0x82, 0xCA, 0x05, 0x65, 0xCC, 0xA7, 0xCC,
+ 0x86, 0xCA, 0x05, 0x69, 0xCC, 0x88, 0xCC, 0x81,
+ 0xCA, 0x05, 0x6C, 0xCC, 0xA3, 0xCC, 0x84, 0xCA,
+ 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x80, 0xCA, 0x05,
+ 0x6F, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, 0x6F,
+ 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x6F, 0xCC,
+ // Bytes 3ac0 - 3aff
+ 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x6F, 0xCC, 0x83,
+ 0xCC, 0x81, 0xCA, 0x05, 0x6F, 0xCC, 0x83, 0xCC,
+ 0x84, 0xCA, 0x05, 0x6F, 0xCC, 0x83, 0xCC, 0x88,
+ 0xCA, 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x80, 0xCA,
+ 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x81, 0xCA, 0x05,
+ 0x6F, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, 0x6F,
+ 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x6F, 0xCC,
+ 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x6F, 0xCC, 0x9B,
+ // Bytes 3b00 - 3b3f
+ 0xCC, 0x81, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC,
+ 0x83, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0x89,
+ 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6,
+ 0x05, 0x6F, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05,
+ 0x6F, 0xCC, 0xA8, 0xCC, 0x84, 0xCA, 0x05, 0x72,
+ 0xCC, 0xA3, 0xCC, 0x84, 0xCA, 0x05, 0x73, 0xCC,
+ 0x81, 0xCC, 0x87, 0xCA, 0x05, 0x73, 0xCC, 0x8C,
+ 0xCC, 0x87, 0xCA, 0x05, 0x73, 0xCC, 0xA3, 0xCC,
+ // Bytes 3b40 - 3b7f
+ 0x87, 0xCA, 0x05, 0x75, 0xCC, 0x83, 0xCC, 0x81,
+ 0xCA, 0x05, 0x75, 0xCC, 0x84, 0xCC, 0x88, 0xCA,
+ 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x80, 0xCA, 0x05,
+ 0x75, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x05, 0x75,
+ 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x75, 0xCC,
+ 0x88, 0xCC, 0x8C, 0xCA, 0x05, 0x75, 0xCC, 0x9B,
+ 0xCC, 0x80, 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC,
+ 0x81, 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x83,
+ // Bytes 3b80 - 3bbf
+ 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x89, 0xCA,
+ 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, 0x05,
+ 0xE1, 0xBE, 0xBF, 0xCC, 0x80, 0xCA, 0x05, 0xE1,
+ 0xBE, 0xBF, 0xCC, 0x81, 0xCA, 0x05, 0xE1, 0xBE,
+ 0xBF, 0xCD, 0x82, 0xCA, 0x05, 0xE1, 0xBF, 0xBE,
+ 0xCC, 0x80, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, 0xCC,
+ 0x81, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, 0xCD, 0x82,
+ 0xCA, 0x05, 0xE2, 0x86, 0x90, 0xCC, 0xB8, 0x05,
+ // Bytes 3bc0 - 3bff
+ 0x05, 0xE2, 0x86, 0x92, 0xCC, 0xB8, 0x05, 0x05,
+ 0xE2, 0x86, 0x94, 0xCC, 0xB8, 0x05, 0x05, 0xE2,
+ 0x87, 0x90, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87,
+ 0x92, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, 0x94,
+ 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x83, 0xCC,
+ 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x88, 0xCC, 0xB8,
+ 0x05, 0x05, 0xE2, 0x88, 0x8B, 0xCC, 0xB8, 0x05,
+ 0x05, 0xE2, 0x88, 0xA3, 0xCC, 0xB8, 0x05, 0x05,
+ // Bytes 3c00 - 3c3f
+ 0xE2, 0x88, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2,
+ 0x88, 0xBC, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89,
+ 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x85,
+ 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x88, 0xCC,
+ 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x8D, 0xCC, 0xB8,
+ 0x05, 0x05, 0xE2, 0x89, 0xA1, 0xCC, 0xB8, 0x05,
+ 0x05, 0xE2, 0x89, 0xA4, 0xCC, 0xB8, 0x05, 0x05,
+ 0xE2, 0x89, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2,
+ // Bytes 3c40 - 3c7f
+ 0x89, 0xB2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89,
+ 0xB3, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB6,
+ 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB7, 0xCC,
+ 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBA, 0xCC, 0xB8,
+ 0x05, 0x05, 0xE2, 0x89, 0xBB, 0xCC, 0xB8, 0x05,
+ 0x05, 0xE2, 0x89, 0xBC, 0xCC, 0xB8, 0x05, 0x05,
+ 0xE2, 0x89, 0xBD, 0xCC, 0xB8, 0x05, 0x05, 0xE2,
+ 0x8A, 0x82, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A,
+ // Bytes 3c80 - 3cbf
+ 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x86,
+ 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x87, 0xCC,
+ 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x91, 0xCC, 0xB8,
+ 0x05, 0x05, 0xE2, 0x8A, 0x92, 0xCC, 0xB8, 0x05,
+ 0x05, 0xE2, 0x8A, 0xA2, 0xCC, 0xB8, 0x05, 0x05,
+ 0xE2, 0x8A, 0xA8, 0xCC, 0xB8, 0x05, 0x05, 0xE2,
+ 0x8A, 0xA9, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A,
+ 0xAB, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB2,
+ // Bytes 3cc0 - 3cff
+ 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB3, 0xCC,
+ 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB4, 0xCC, 0xB8,
+ 0x05, 0x05, 0xE2, 0x8A, 0xB5, 0xCC, 0xB8, 0x05,
+ 0x06, 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x85, 0xDA,
+ 0x06, 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x85, 0xDA,
+ 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x80, 0xCA,
+ 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x81, 0xCA,
+ 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x80, 0xCA,
+ // Bytes 3d00 - 3d3f
+ 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x81, 0xCA,
+ 0x06, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x85, 0xDA,
+ 0x06, 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x85, 0xDA,
+ 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x80, 0xCA,
+ 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x81, 0xCA,
+ 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCD, 0x82, 0xCA,
+ 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x80, 0xCA,
+ 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x81, 0xCA,
+ // Bytes 3d40 - 3d7f
+ 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCD, 0x82, 0xCA,
+ 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x80, 0xCA,
+ 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x81, 0xCA,
+ 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x80, 0xCA,
+ 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x81, 0xCA,
+ 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x80, 0xCA,
+ 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x81, 0xCA,
+ 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCD, 0x82, 0xCA,
+ // Bytes 3d80 - 3dbf
+ 0x06, 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x85, 0xDA,
+ 0x06, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x85, 0xDA,
+ 0x06, 0xCE, 0xB1, 0xCC, 0x80, 0xCD, 0x85, 0xDA,
+ 0x06, 0xCE, 0xB1, 0xCC, 0x81, 0xCD, 0x85, 0xDA,
+ 0x06, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x85, 0xDA,
+ 0x06, 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x85, 0xDA,
+ 0x06, 0xCE, 0xB1, 0xCD, 0x82, 0xCD, 0x85, 0xDA,
+ 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x80, 0xCA,
+ // Bytes 3dc0 - 3dff
+ 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x81, 0xCA,
+ 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x80, 0xCA,
+ 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x81, 0xCA,
+ 0x06, 0xCE, 0xB7, 0xCC, 0x80, 0xCD, 0x85, 0xDA,
+ 0x06, 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x85, 0xDA,
+ 0x06, 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x85, 0xDA,
+ 0x06, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x85, 0xDA,
+ 0x06, 0xCE, 0xB7, 0xCD, 0x82, 0xCD, 0x85, 0xDA,
+ // Bytes 3e00 - 3e3f
+ 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x80, 0xCA,
+ 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x81, 0xCA,
+ 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCD, 0x82, 0xCA,
+ 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x80, 0xCA,
+ 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x81, 0xCA,
+ 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCD, 0x82, 0xCA,
+ 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x80, 0xCA,
+ 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x81, 0xCA,
+ // Bytes 3e40 - 3e7f
+ 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCD, 0x82, 0xCA,
+ 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x80, 0xCA,
+ 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x81, 0xCA,
+ 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x80, 0xCA,
+ 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x81, 0xCA,
+ 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x80, 0xCA,
+ 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x81, 0xCA,
+ 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCD, 0x82, 0xCA,
+ // Bytes 3e80 - 3ebf
+ 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x80, 0xCA,
+ 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x81, 0xCA,
+ 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCD, 0x82, 0xCA,
+ 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x80, 0xCA,
+ 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x81, 0xCA,
+ 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCD, 0x82, 0xCA,
+ 0x06, 0xCF, 0x89, 0xCC, 0x80, 0xCD, 0x85, 0xDA,
+ 0x06, 0xCF, 0x89, 0xCC, 0x81, 0xCD, 0x85, 0xDA,
+ // Bytes 3ec0 - 3eff
+ 0x06, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x85, 0xDA,
+ 0x06, 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x85, 0xDA,
+ 0x06, 0xCF, 0x89, 0xCD, 0x82, 0xCD, 0x85, 0xDA,
+ 0x06, 0xE0, 0xA4, 0xA8, 0xE0, 0xA4, 0xBC, 0x09,
+ 0x06, 0xE0, 0xA4, 0xB0, 0xE0, 0xA4, 0xBC, 0x09,
+ 0x06, 0xE0, 0xA4, 0xB3, 0xE0, 0xA4, 0xBC, 0x09,
+ 0x06, 0xE0, 0xB1, 0x86, 0xE0, 0xB1, 0x96, 0x85,
+ 0x06, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8A, 0x11,
+ // Bytes 3f00 - 3f3f
+ 0x06, 0xE3, 0x81, 0x86, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x81, 0x8B, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x81, 0x8D, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x81, 0x8F, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x81, 0x91, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x81, 0x93, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x81, 0x95, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x81, 0x97, 0xE3, 0x82, 0x99, 0x0D,
+ // Bytes 3f40 - 3f7f
+ 0x06, 0xE3, 0x81, 0x99, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x81, 0x9B, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x81, 0x9D, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x81, 0x9F, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x81, 0xA1, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x81, 0xA4, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x81, 0xA6, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x81, 0xA8, 0xE3, 0x82, 0x99, 0x0D,
+ // Bytes 3f80 - 3fbf
+ 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x9A, 0x0D,
+ 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x9A, 0x0D,
+ 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x9A, 0x0D,
+ 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x9A, 0x0D,
+ // Bytes 3fc0 - 3fff
+ 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x9A, 0x0D,
+ 0x06, 0xE3, 0x82, 0x9D, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x82, 0xA6, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x82, 0xB1, 0xE3, 0x82, 0x99, 0x0D,
+ // Bytes 4000 - 403f
+ 0x06, 0xE3, 0x82, 0xB3, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x82, 0xB5, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x82, 0xB9, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x82, 0xBD, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x83, 0x81, 0xE3, 0x82, 0x99, 0x0D,
+ // Bytes 4040 - 407f
+ 0x06, 0xE3, 0x83, 0x84, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0x0D,
+ 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0x0D,
+ 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, 0x0D,
+ // Bytes 4080 - 40bf
+ 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x9A, 0x0D,
+ 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0x0D,
+ 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0x0D,
+ 0x06, 0xE3, 0x83, 0xAF, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x83, 0xB0, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x83, 0xB1, 0xE3, 0x82, 0x99, 0x0D,
+ // Bytes 40c0 - 40ff
+ 0x06, 0xE3, 0x83, 0xB2, 0xE3, 0x82, 0x99, 0x0D,
+ 0x06, 0xE3, 0x83, 0xBD, 0xE3, 0x82, 0x99, 0x0D,
+ 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCD,
+ 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC,
+ 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC,
+ 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE,
+ 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB,
+ 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCD,
+ // Bytes 4100 - 413f
+ 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCD,
+ 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC,
+ 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE,
+ 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB,
+ 0x08, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCD,
+ 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, 0x94, 0xCC,
+ 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC,
+ 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE,
+ // Bytes 4140 - 417f
+ 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB,
+ 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCD,
+ 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC,
+ 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC,
+ 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE,
+ 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB,
+ 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCD,
+ 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCD,
+ // Bytes 4180 - 41bf
+ 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC,
+ 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE,
+ 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB,
+ 0x08, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCD,
+ 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, 0x94, 0xCC,
+ 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC,
+ 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE,
+ 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB,
+ // Bytes 41c0 - 41ff
+ 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCD,
+ 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC,
+ 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC,
+ 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE,
+ 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB,
+ 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCD,
+ 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCD,
+ 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC,
+ // Bytes 4200 - 423f
+ 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCF,
+ 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB,
+ 0x08, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCD,
+ 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, 0x94, 0xCC,
+ 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC,
+ 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCF,
+ 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB,
+ 0x08, 0xF0, 0x91, 0x82, 0x99, 0xF0, 0x91, 0x82,
+ // Bytes 4240 - 427f
+ 0xBA, 0x09, 0x08, 0xF0, 0x91, 0x82, 0x9B, 0xF0,
+ 0x91, 0x82, 0xBA, 0x09, 0x08, 0xF0, 0x91, 0x82,
+ 0xA5, 0xF0, 0x91, 0x82, 0xBA, 0x09, 0x42, 0xC2,
+ 0xB4, 0x01, 0x43, 0x20, 0xCC, 0x81, 0xC9, 0x43,
+ 0x20, 0xCC, 0x83, 0xC9, 0x43, 0x20, 0xCC, 0x84,
+ 0xC9, 0x43, 0x20, 0xCC, 0x85, 0xC9, 0x43, 0x20,
+ 0xCC, 0x86, 0xC9, 0x43, 0x20, 0xCC, 0x87, 0xC9,
+ 0x43, 0x20, 0xCC, 0x88, 0xC9, 0x43, 0x20, 0xCC,
+ // Bytes 4280 - 42bf
+ 0x8A, 0xC9, 0x43, 0x20, 0xCC, 0x8B, 0xC9, 0x43,
+ 0x20, 0xCC, 0x93, 0xC9, 0x43, 0x20, 0xCC, 0x94,
+ 0xC9, 0x43, 0x20, 0xCC, 0xA7, 0xA5, 0x43, 0x20,
+ 0xCC, 0xA8, 0xA5, 0x43, 0x20, 0xCC, 0xB3, 0xB5,
+ 0x43, 0x20, 0xCD, 0x82, 0xC9, 0x43, 0x20, 0xCD,
+ 0x85, 0xD9, 0x43, 0x20, 0xD9, 0x8B, 0x59, 0x43,
+ 0x20, 0xD9, 0x8C, 0x5D, 0x43, 0x20, 0xD9, 0x8D,
+ 0x61, 0x43, 0x20, 0xD9, 0x8E, 0x65, 0x43, 0x20,
+ // Bytes 42c0 - 42ff
+ 0xD9, 0x8F, 0x69, 0x43, 0x20, 0xD9, 0x90, 0x6D,
+ 0x43, 0x20, 0xD9, 0x91, 0x71, 0x43, 0x20, 0xD9,
+ 0x92, 0x75, 0x43, 0x41, 0xCC, 0x8A, 0xC9, 0x43,
+ 0x73, 0xCC, 0x87, 0xC9, 0x44, 0x20, 0xE3, 0x82,
+ 0x99, 0x0D, 0x44, 0x20, 0xE3, 0x82, 0x9A, 0x0D,
+ 0x44, 0xC2, 0xA8, 0xCC, 0x81, 0xCA, 0x44, 0xCE,
+ 0x91, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0x95, 0xCC,
+ 0x81, 0xC9, 0x44, 0xCE, 0x97, 0xCC, 0x81, 0xC9,
+ // Bytes 4300 - 433f
+ 0x44, 0xCE, 0x99, 0xCC, 0x81, 0xC9, 0x44, 0xCE,
+ 0x9F, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xA5, 0xCC,
+ 0x81, 0xC9, 0x44, 0xCE, 0xA5, 0xCC, 0x88, 0xC9,
+ 0x44, 0xCE, 0xA9, 0xCC, 0x81, 0xC9, 0x44, 0xCE,
+ 0xB1, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xB5, 0xCC,
+ 0x81, 0xC9, 0x44, 0xCE, 0xB7, 0xCC, 0x81, 0xC9,
+ 0x44, 0xCE, 0xB9, 0xCC, 0x81, 0xC9, 0x44, 0xCE,
+ 0xBF, 0xCC, 0x81, 0xC9, 0x44, 0xCF, 0x85, 0xCC,
+ // Bytes 4340 - 437f
+ 0x81, 0xC9, 0x44, 0xCF, 0x89, 0xCC, 0x81, 0xC9,
+ 0x44, 0xD7, 0x90, 0xD6, 0xB7, 0x31, 0x44, 0xD7,
+ 0x90, 0xD6, 0xB8, 0x35, 0x44, 0xD7, 0x90, 0xD6,
+ 0xBC, 0x41, 0x44, 0xD7, 0x91, 0xD6, 0xBC, 0x41,
+ 0x44, 0xD7, 0x91, 0xD6, 0xBF, 0x49, 0x44, 0xD7,
+ 0x92, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x93, 0xD6,
+ 0xBC, 0x41, 0x44, 0xD7, 0x94, 0xD6, 0xBC, 0x41,
+ 0x44, 0xD7, 0x95, 0xD6, 0xB9, 0x39, 0x44, 0xD7,
+ // Bytes 4380 - 43bf
+ 0x95, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x96, 0xD6,
+ 0xBC, 0x41, 0x44, 0xD7, 0x98, 0xD6, 0xBC, 0x41,
+ 0x44, 0xD7, 0x99, 0xD6, 0xB4, 0x25, 0x44, 0xD7,
+ 0x99, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9A, 0xD6,
+ 0xBC, 0x41, 0x44, 0xD7, 0x9B, 0xD6, 0xBC, 0x41,
+ 0x44, 0xD7, 0x9B, 0xD6, 0xBF, 0x49, 0x44, 0xD7,
+ 0x9C, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9E, 0xD6,
+ 0xBC, 0x41, 0x44, 0xD7, 0xA0, 0xD6, 0xBC, 0x41,
+ // Bytes 43c0 - 43ff
+ 0x44, 0xD7, 0xA1, 0xD6, 0xBC, 0x41, 0x44, 0xD7,
+ 0xA3, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA4, 0xD6,
+ 0xBC, 0x41, 0x44, 0xD7, 0xA4, 0xD6, 0xBF, 0x49,
+ 0x44, 0xD7, 0xA6, 0xD6, 0xBC, 0x41, 0x44, 0xD7,
+ 0xA7, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA8, 0xD6,
+ 0xBC, 0x41, 0x44, 0xD7, 0xA9, 0xD6, 0xBC, 0x41,
+ 0x44, 0xD7, 0xA9, 0xD7, 0x81, 0x4D, 0x44, 0xD7,
+ 0xA9, 0xD7, 0x82, 0x51, 0x44, 0xD7, 0xAA, 0xD6,
+ // Bytes 4400 - 443f
+ 0xBC, 0x41, 0x44, 0xD7, 0xB2, 0xD6, 0xB7, 0x31,
+ 0x44, 0xD8, 0xA7, 0xD9, 0x8B, 0x59, 0x44, 0xD8,
+ 0xA7, 0xD9, 0x93, 0xC9, 0x44, 0xD8, 0xA7, 0xD9,
+ 0x94, 0xC9, 0x44, 0xD8, 0xA7, 0xD9, 0x95, 0xB5,
+ 0x44, 0xD8, 0xB0, 0xD9, 0xB0, 0x79, 0x44, 0xD8,
+ 0xB1, 0xD9, 0xB0, 0x79, 0x44, 0xD9, 0x80, 0xD9,
+ 0x8B, 0x59, 0x44, 0xD9, 0x80, 0xD9, 0x8E, 0x65,
+ 0x44, 0xD9, 0x80, 0xD9, 0x8F, 0x69, 0x44, 0xD9,
+ // Bytes 4440 - 447f
+ 0x80, 0xD9, 0x90, 0x6D, 0x44, 0xD9, 0x80, 0xD9,
+ 0x91, 0x71, 0x44, 0xD9, 0x80, 0xD9, 0x92, 0x75,
+ 0x44, 0xD9, 0x87, 0xD9, 0xB0, 0x79, 0x44, 0xD9,
+ 0x88, 0xD9, 0x94, 0xC9, 0x44, 0xD9, 0x89, 0xD9,
+ 0xB0, 0x79, 0x44, 0xD9, 0x8A, 0xD9, 0x94, 0xC9,
+ 0x44, 0xDB, 0x92, 0xD9, 0x94, 0xC9, 0x44, 0xDB,
+ 0x95, 0xD9, 0x94, 0xC9, 0x45, 0x20, 0xCC, 0x88,
+ 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, 0x88, 0xCC,
+ // Bytes 4480 - 44bf
+ 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x88, 0xCD, 0x82,
+ 0xCA, 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x80, 0xCA,
+ 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x45,
+ 0x20, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x45, 0x20,
+ 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC,
+ 0x94, 0xCC, 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x94,
+ 0xCD, 0x82, 0xCA, 0x45, 0x20, 0xD9, 0x8C, 0xD9,
+ 0x91, 0x72, 0x45, 0x20, 0xD9, 0x8D, 0xD9, 0x91,
+ // Bytes 44c0 - 44ff
+ 0x72, 0x45, 0x20, 0xD9, 0x8E, 0xD9, 0x91, 0x72,
+ 0x45, 0x20, 0xD9, 0x8F, 0xD9, 0x91, 0x72, 0x45,
+ 0x20, 0xD9, 0x90, 0xD9, 0x91, 0x72, 0x45, 0x20,
+ 0xD9, 0x91, 0xD9, 0xB0, 0x7A, 0x45, 0xE2, 0xAB,
+ 0x9D, 0xCC, 0xB8, 0x05, 0x46, 0xCE, 0xB9, 0xCC,
+ 0x88, 0xCC, 0x81, 0xCA, 0x46, 0xCF, 0x85, 0xCC,
+ 0x88, 0xCC, 0x81, 0xCA, 0x46, 0xD7, 0xA9, 0xD6,
+ 0xBC, 0xD7, 0x81, 0x4E, 0x46, 0xD7, 0xA9, 0xD6,
+ // Bytes 4500 - 453f
+ 0xBC, 0xD7, 0x82, 0x52, 0x46, 0xD9, 0x80, 0xD9,
+ 0x8E, 0xD9, 0x91, 0x72, 0x46, 0xD9, 0x80, 0xD9,
+ 0x8F, 0xD9, 0x91, 0x72, 0x46, 0xD9, 0x80, 0xD9,
+ 0x90, 0xD9, 0x91, 0x72, 0x46, 0xE0, 0xA4, 0x95,
+ 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x96,
+ 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x97,
+ 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x9C,
+ 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xA1,
+ // Bytes 4540 - 457f
+ 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xA2,
+ 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xAB,
+ 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xAF,
+ 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xA1,
+ 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xA2,
+ 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xAF,
+ 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x96,
+ 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x97,
+ // Bytes 4580 - 45bf
+ 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x9C,
+ 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xAB,
+ 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xB2,
+ 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xB8,
+ 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xAC, 0xA1,
+ 0xE0, 0xAC, 0xBC, 0x09, 0x46, 0xE0, 0xAC, 0xA2,
+ 0xE0, 0xAC, 0xBC, 0x09, 0x46, 0xE0, 0xBE, 0xB2,
+ 0xE0, 0xBE, 0x80, 0x9D, 0x46, 0xE0, 0xBE, 0xB3,
+ // Bytes 45c0 - 45ff
+ 0xE0, 0xBE, 0x80, 0x9D, 0x46, 0xE3, 0x83, 0x86,
+ 0xE3, 0x82, 0x99, 0x0D, 0x48, 0xF0, 0x9D, 0x85,
+ 0x97, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, 0x48, 0xF0,
+ 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xAD,
+ 0x48, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85,
+ 0xA5, 0xAD, 0x48, 0xF0, 0x9D, 0x86, 0xBA, 0xF0,
+ 0x9D, 0x85, 0xA5, 0xAD, 0x49, 0xE0, 0xBE, 0xB2,
+ 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x49,
+ // Bytes 4600 - 463f
+ 0xE0, 0xBE, 0xB3, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE,
+ 0x80, 0x9E, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0,
+ 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xAE,
+ 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85,
+ 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, 0x4C, 0xF0,
+ 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0,
+ 0x9D, 0x85, 0xB0, 0xAE, 0x4C, 0xF0, 0x9D, 0x85,
+ 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85,
+ // Bytes 4640 - 467f
+ 0xB1, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0,
+ 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB2, 0xAE,
+ 0x4C, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85,
+ 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xAE, 0x4C, 0xF0,
+ 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, 0xF0,
+ 0x9D, 0x85, 0xAF, 0xAE, 0x4C, 0xF0, 0x9D, 0x86,
+ 0xBA, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85,
+ 0xAE, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, 0xBA, 0xF0,
+ // Bytes 4680 - 46bf
+ 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE,
+ 0x83, 0x41, 0xCC, 0x82, 0xC9, 0x83, 0x41, 0xCC,
+ 0x86, 0xC9, 0x83, 0x41, 0xCC, 0x87, 0xC9, 0x83,
+ 0x41, 0xCC, 0x88, 0xC9, 0x83, 0x41, 0xCC, 0x8A,
+ 0xC9, 0x83, 0x41, 0xCC, 0xA3, 0xB5, 0x83, 0x43,
+ 0xCC, 0xA7, 0xA5, 0x83, 0x45, 0xCC, 0x82, 0xC9,
+ 0x83, 0x45, 0xCC, 0x84, 0xC9, 0x83, 0x45, 0xCC,
+ 0xA3, 0xB5, 0x83, 0x45, 0xCC, 0xA7, 0xA5, 0x83,
+ // Bytes 46c0 - 46ff
+ 0x49, 0xCC, 0x88, 0xC9, 0x83, 0x4C, 0xCC, 0xA3,
+ 0xB5, 0x83, 0x4F, 0xCC, 0x82, 0xC9, 0x83, 0x4F,
+ 0xCC, 0x83, 0xC9, 0x83, 0x4F, 0xCC, 0x84, 0xC9,
+ 0x83, 0x4F, 0xCC, 0x87, 0xC9, 0x83, 0x4F, 0xCC,
+ 0x88, 0xC9, 0x83, 0x4F, 0xCC, 0x9B, 0xAD, 0x83,
+ 0x4F, 0xCC, 0xA3, 0xB5, 0x83, 0x4F, 0xCC, 0xA8,
+ 0xA5, 0x83, 0x52, 0xCC, 0xA3, 0xB5, 0x83, 0x53,
+ 0xCC, 0x81, 0xC9, 0x83, 0x53, 0xCC, 0x8C, 0xC9,
+ // Bytes 4700 - 473f
+ 0x83, 0x53, 0xCC, 0xA3, 0xB5, 0x83, 0x55, 0xCC,
+ 0x83, 0xC9, 0x83, 0x55, 0xCC, 0x84, 0xC9, 0x83,
+ 0x55, 0xCC, 0x88, 0xC9, 0x83, 0x55, 0xCC, 0x9B,
+ 0xAD, 0x83, 0x61, 0xCC, 0x82, 0xC9, 0x83, 0x61,
+ 0xCC, 0x86, 0xC9, 0x83, 0x61, 0xCC, 0x87, 0xC9,
+ 0x83, 0x61, 0xCC, 0x88, 0xC9, 0x83, 0x61, 0xCC,
+ 0x8A, 0xC9, 0x83, 0x61, 0xCC, 0xA3, 0xB5, 0x83,
+ 0x63, 0xCC, 0xA7, 0xA5, 0x83, 0x65, 0xCC, 0x82,
+ // Bytes 4740 - 477f
+ 0xC9, 0x83, 0x65, 0xCC, 0x84, 0xC9, 0x83, 0x65,
+ 0xCC, 0xA3, 0xB5, 0x83, 0x65, 0xCC, 0xA7, 0xA5,
+ 0x83, 0x69, 0xCC, 0x88, 0xC9, 0x83, 0x6C, 0xCC,
+ 0xA3, 0xB5, 0x83, 0x6F, 0xCC, 0x82, 0xC9, 0x83,
+ 0x6F, 0xCC, 0x83, 0xC9, 0x83, 0x6F, 0xCC, 0x84,
+ 0xC9, 0x83, 0x6F, 0xCC, 0x87, 0xC9, 0x83, 0x6F,
+ 0xCC, 0x88, 0xC9, 0x83, 0x6F, 0xCC, 0x9B, 0xAD,
+ 0x83, 0x6F, 0xCC, 0xA3, 0xB5, 0x83, 0x6F, 0xCC,
+ // Bytes 4780 - 47bf
+ 0xA8, 0xA5, 0x83, 0x72, 0xCC, 0xA3, 0xB5, 0x83,
+ 0x73, 0xCC, 0x81, 0xC9, 0x83, 0x73, 0xCC, 0x8C,
+ 0xC9, 0x83, 0x73, 0xCC, 0xA3, 0xB5, 0x83, 0x75,
+ 0xCC, 0x83, 0xC9, 0x83, 0x75, 0xCC, 0x84, 0xC9,
+ 0x83, 0x75, 0xCC, 0x88, 0xC9, 0x83, 0x75, 0xCC,
+ 0x9B, 0xAD, 0x84, 0xCE, 0x91, 0xCC, 0x93, 0xC9,
+ 0x84, 0xCE, 0x91, 0xCC, 0x94, 0xC9, 0x84, 0xCE,
+ 0x95, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x95, 0xCC,
+ // Bytes 47c0 - 47ff
+ 0x94, 0xC9, 0x84, 0xCE, 0x97, 0xCC, 0x93, 0xC9,
+ 0x84, 0xCE, 0x97, 0xCC, 0x94, 0xC9, 0x84, 0xCE,
+ 0x99, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x99, 0xCC,
+ 0x94, 0xC9, 0x84, 0xCE, 0x9F, 0xCC, 0x93, 0xC9,
+ 0x84, 0xCE, 0x9F, 0xCC, 0x94, 0xC9, 0x84, 0xCE,
+ 0xA5, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xA9, 0xCC,
+ 0x93, 0xC9, 0x84, 0xCE, 0xA9, 0xCC, 0x94, 0xC9,
+ 0x84, 0xCE, 0xB1, 0xCC, 0x80, 0xC9, 0x84, 0xCE,
+ // Bytes 4800 - 483f
+ 0xB1, 0xCC, 0x81, 0xC9, 0x84, 0xCE, 0xB1, 0xCC,
+ 0x93, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, 0x94, 0xC9,
+ 0x84, 0xCE, 0xB1, 0xCD, 0x82, 0xC9, 0x84, 0xCE,
+ 0xB5, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB5, 0xCC,
+ 0x94, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, 0x80, 0xC9,
+ 0x84, 0xCE, 0xB7, 0xCC, 0x81, 0xC9, 0x84, 0xCE,
+ 0xB7, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB7, 0xCC,
+ 0x94, 0xC9, 0x84, 0xCE, 0xB7, 0xCD, 0x82, 0xC9,
+ // Bytes 4840 - 487f
+ 0x84, 0xCE, 0xB9, 0xCC, 0x88, 0xC9, 0x84, 0xCE,
+ 0xB9, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB9, 0xCC,
+ 0x94, 0xC9, 0x84, 0xCE, 0xBF, 0xCC, 0x93, 0xC9,
+ 0x84, 0xCE, 0xBF, 0xCC, 0x94, 0xC9, 0x84, 0xCF,
+ 0x85, 0xCC, 0x88, 0xC9, 0x84, 0xCF, 0x85, 0xCC,
+ 0x93, 0xC9, 0x84, 0xCF, 0x85, 0xCC, 0x94, 0xC9,
+ 0x84, 0xCF, 0x89, 0xCC, 0x80, 0xC9, 0x84, 0xCF,
+ 0x89, 0xCC, 0x81, 0xC9, 0x84, 0xCF, 0x89, 0xCC,
+ // Bytes 4880 - 48bf
+ 0x93, 0xC9, 0x84, 0xCF, 0x89, 0xCC, 0x94, 0xC9,
+ 0x84, 0xCF, 0x89, 0xCD, 0x82, 0xC9, 0x86, 0xCE,
+ 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE,
+ 0x91, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE,
+ 0x91, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE,
+ 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE,
+ 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE,
+ 0x91, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE,
+ // Bytes 48c0 - 48ff
+ 0x97, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE,
+ 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE,
+ 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE,
+ 0x97, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE,
+ 0x97, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE,
+ 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE,
+ 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE,
+ 0xA9, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE,
+ // Bytes 4900 - 493f
+ 0xA9, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE,
+ 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE,
+ 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE,
+ 0xA9, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE,
+ 0xB1, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE,
+ 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE,
+ 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE,
+ 0xB1, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE,
+ // Bytes 4940 - 497f
+ 0xB1, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE,
+ 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE,
+ 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE,
+ 0xB7, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE,
+ 0xB7, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE,
+ 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE,
+ 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE,
+ 0xB7, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCF,
+ // Bytes 4980 - 49bf
+ 0x89, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCF,
+ 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCF,
+ 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCF,
+ 0x89, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCF,
+ 0x89, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCF,
+ 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x42, 0xCC,
+ 0x80, 0xC9, 0x32, 0x42, 0xCC, 0x81, 0xC9, 0x32,
+ 0x42, 0xCC, 0x93, 0xC9, 0x32, 0x43, 0xE1, 0x85,
+ // Bytes 49c0 - 49ff
+ 0xA1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA2, 0x01,
+ 0x00, 0x43, 0xE1, 0x85, 0xA3, 0x01, 0x00, 0x43,
+ 0xE1, 0x85, 0xA4, 0x01, 0x00, 0x43, 0xE1, 0x85,
+ 0xA5, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA6, 0x01,
+ 0x00, 0x43, 0xE1, 0x85, 0xA7, 0x01, 0x00, 0x43,
+ 0xE1, 0x85, 0xA8, 0x01, 0x00, 0x43, 0xE1, 0x85,
+ 0xA9, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAA, 0x01,
+ 0x00, 0x43, 0xE1, 0x85, 0xAB, 0x01, 0x00, 0x43,
+ // Bytes 4a00 - 4a3f
+ 0xE1, 0x85, 0xAC, 0x01, 0x00, 0x43, 0xE1, 0x85,
+ 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAE, 0x01,
+ 0x00, 0x43, 0xE1, 0x85, 0xAF, 0x01, 0x00, 0x43,
+ 0xE1, 0x85, 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x85,
+ 0xB1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB2, 0x01,
+ 0x00, 0x43, 0xE1, 0x85, 0xB3, 0x01, 0x00, 0x43,
+ 0xE1, 0x85, 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x85,
+ 0xB5, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAA, 0x01,
+ // Bytes 4a40 - 4a7f
+ 0x00, 0x43, 0xE1, 0x86, 0xAC, 0x01, 0x00, 0x43,
+ 0xE1, 0x86, 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x86,
+ 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB1, 0x01,
+ 0x00, 0x43, 0xE1, 0x86, 0xB2, 0x01, 0x00, 0x43,
+ 0xE1, 0x86, 0xB3, 0x01, 0x00, 0x43, 0xE1, 0x86,
+ 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB5, 0x01,
+ 0x00, 0x44, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x32,
+ 0x43, 0xE3, 0x82, 0x99, 0x0D, 0x03, 0x43, 0xE3,
+ // Bytes 4a80 - 4abf
+ 0x82, 0x9A, 0x0D, 0x03, 0x46, 0xE0, 0xBD, 0xB1,
+ 0xE0, 0xBD, 0xB2, 0x9E, 0x26, 0x46, 0xE0, 0xBD,
+ 0xB1, 0xE0, 0xBD, 0xB4, 0xA2, 0x26, 0x46, 0xE0,
+ 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x26, 0x00,
+ 0x01,
+}
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *nfcTrie) lookup(s []byte) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return nfcValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := nfcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := nfcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = nfcIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := nfcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = nfcIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = nfcIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *nfcTrie) lookupUnsafe(s []byte) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return nfcValues[c0]
+ }
+ i := nfcIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = nfcIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = nfcIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *nfcTrie) lookupString(s string) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return nfcValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := nfcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := nfcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = nfcIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := nfcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = nfcIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = nfcIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *nfcTrie) lookupStringUnsafe(s string) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return nfcValues[c0]
+ }
+ i := nfcIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = nfcIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = nfcIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// nfcTrie. Total size: 10586 bytes (10.34 KiB). Checksum: dd926e82067bee11.
+type nfcTrie struct{}
+
+func newNfcTrie(i int) *nfcTrie {
+ return &nfcTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *nfcTrie) lookupValue(n uint32, b byte) uint16 {
+ switch {
+ case n < 46:
+ return uint16(nfcValues[n<<6+uint32(b)])
+ default:
+ n -= 46
+ return uint16(nfcSparse.lookup(n, b))
+ }
+}
+
+// nfcValues: 48 blocks, 3072 entries, 6144 bytes
+// The third block is the zero block.
+var nfcValues = [3072]uint16{
+ // Block 0x0, offset 0x0
+ 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000,
+ // Block 0x1, offset 0x40
+ 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000,
+ 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000,
+ 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000,
+ 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000,
+ 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000,
+ 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000,
+ 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000,
+ 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000,
+ 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000,
+ 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc0: 0x2f6f, 0xc1: 0x2f74, 0xc2: 0x4688, 0xc3: 0x2f79, 0xc4: 0x4697, 0xc5: 0x469c,
+ 0xc6: 0xa000, 0xc7: 0x46a6, 0xc8: 0x2fe2, 0xc9: 0x2fe7, 0xca: 0x46ab, 0xcb: 0x2ffb,
+ 0xcc: 0x306e, 0xcd: 0x3073, 0xce: 0x3078, 0xcf: 0x46bf, 0xd1: 0x3104,
+ 0xd2: 0x3127, 0xd3: 0x312c, 0xd4: 0x46c9, 0xd5: 0x46ce, 0xd6: 0x46dd,
+ 0xd8: 0xa000, 0xd9: 0x31b3, 0xda: 0x31b8, 0xdb: 0x31bd, 0xdc: 0x470f, 0xdd: 0x3235,
+ 0xe0: 0x327b, 0xe1: 0x3280, 0xe2: 0x4719, 0xe3: 0x3285,
+ 0xe4: 0x4728, 0xe5: 0x472d, 0xe6: 0xa000, 0xe7: 0x4737, 0xe8: 0x32ee, 0xe9: 0x32f3,
+ 0xea: 0x473c, 0xeb: 0x3307, 0xec: 0x337f, 0xed: 0x3384, 0xee: 0x3389, 0xef: 0x4750,
+ 0xf1: 0x3415, 0xf2: 0x3438, 0xf3: 0x343d, 0xf4: 0x475a, 0xf5: 0x475f,
+ 0xf6: 0x476e, 0xf8: 0xa000, 0xf9: 0x34c9, 0xfa: 0x34ce, 0xfb: 0x34d3,
+ 0xfc: 0x47a0, 0xfd: 0x3550, 0xff: 0x3569,
+ // Block 0x4, offset 0x100
+ 0x100: 0x2f7e, 0x101: 0x328a, 0x102: 0x468d, 0x103: 0x471e, 0x104: 0x2f9c, 0x105: 0x32a8,
+ 0x106: 0x2fb0, 0x107: 0x32bc, 0x108: 0x2fb5, 0x109: 0x32c1, 0x10a: 0x2fba, 0x10b: 0x32c6,
+ 0x10c: 0x2fbf, 0x10d: 0x32cb, 0x10e: 0x2fc9, 0x10f: 0x32d5,
+ 0x112: 0x46b0, 0x113: 0x4741, 0x114: 0x2ff1, 0x115: 0x32fd, 0x116: 0x2ff6, 0x117: 0x3302,
+ 0x118: 0x3014, 0x119: 0x3320, 0x11a: 0x3005, 0x11b: 0x3311, 0x11c: 0x302d, 0x11d: 0x3339,
+ 0x11e: 0x3037, 0x11f: 0x3343, 0x120: 0x303c, 0x121: 0x3348, 0x122: 0x3046, 0x123: 0x3352,
+ 0x124: 0x304b, 0x125: 0x3357, 0x128: 0x307d, 0x129: 0x338e,
+ 0x12a: 0x3082, 0x12b: 0x3393, 0x12c: 0x3087, 0x12d: 0x3398, 0x12e: 0x30aa, 0x12f: 0x33b6,
+ 0x130: 0x308c, 0x134: 0x30b4, 0x135: 0x33c0,
+ 0x136: 0x30c8, 0x137: 0x33d9, 0x139: 0x30d2, 0x13a: 0x33e3, 0x13b: 0x30dc,
+ 0x13c: 0x33ed, 0x13d: 0x30d7, 0x13e: 0x33e8,
+ // Block 0x5, offset 0x140
+ 0x143: 0x30ff, 0x144: 0x3410, 0x145: 0x3118,
+ 0x146: 0x3429, 0x147: 0x310e, 0x148: 0x341f,
+ 0x14c: 0x46d3, 0x14d: 0x4764, 0x14e: 0x3131, 0x14f: 0x3442, 0x150: 0x313b, 0x151: 0x344c,
+ 0x154: 0x3159, 0x155: 0x346a, 0x156: 0x3172, 0x157: 0x3483,
+ 0x158: 0x3163, 0x159: 0x3474, 0x15a: 0x46f6, 0x15b: 0x4787, 0x15c: 0x317c, 0x15d: 0x348d,
+ 0x15e: 0x318b, 0x15f: 0x349c, 0x160: 0x46fb, 0x161: 0x478c, 0x162: 0x31a4, 0x163: 0x34ba,
+ 0x164: 0x3195, 0x165: 0x34ab, 0x168: 0x4705, 0x169: 0x4796,
+ 0x16a: 0x470a, 0x16b: 0x479b, 0x16c: 0x31c2, 0x16d: 0x34d8, 0x16e: 0x31cc, 0x16f: 0x34e2,
+ 0x170: 0x31d1, 0x171: 0x34e7, 0x172: 0x31ef, 0x173: 0x3505, 0x174: 0x3212, 0x175: 0x3528,
+ 0x176: 0x323a, 0x177: 0x3555, 0x178: 0x324e, 0x179: 0x325d, 0x17a: 0x357d, 0x17b: 0x3267,
+ 0x17c: 0x3587, 0x17d: 0x326c, 0x17e: 0x358c, 0x17f: 0xa000,
+ // Block 0x6, offset 0x180
+ 0x184: 0x8100, 0x185: 0x8100,
+ 0x186: 0x8100,
+ 0x18d: 0x2f88, 0x18e: 0x3294, 0x18f: 0x3096, 0x190: 0x33a2, 0x191: 0x3140,
+ 0x192: 0x3451, 0x193: 0x31d6, 0x194: 0x34ec, 0x195: 0x39cf, 0x196: 0x3b5e, 0x197: 0x39c8,
+ 0x198: 0x3b57, 0x199: 0x39d6, 0x19a: 0x3b65, 0x19b: 0x39c1, 0x19c: 0x3b50,
+ 0x19e: 0x38b0, 0x19f: 0x3a3f, 0x1a0: 0x38a9, 0x1a1: 0x3a38, 0x1a2: 0x35b3, 0x1a3: 0x35c5,
+ 0x1a6: 0x3041, 0x1a7: 0x334d, 0x1a8: 0x30be, 0x1a9: 0x33cf,
+ 0x1aa: 0x46ec, 0x1ab: 0x477d, 0x1ac: 0x3990, 0x1ad: 0x3b1f, 0x1ae: 0x35d7, 0x1af: 0x35dd,
+ 0x1b0: 0x33c5, 0x1b4: 0x3028, 0x1b5: 0x3334,
+ 0x1b8: 0x30fa, 0x1b9: 0x340b, 0x1ba: 0x38b7, 0x1bb: 0x3a46,
+ 0x1bc: 0x35ad, 0x1bd: 0x35bf, 0x1be: 0x35b9, 0x1bf: 0x35cb,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x2f8d, 0x1c1: 0x3299, 0x1c2: 0x2f92, 0x1c3: 0x329e, 0x1c4: 0x300a, 0x1c5: 0x3316,
+ 0x1c6: 0x300f, 0x1c7: 0x331b, 0x1c8: 0x309b, 0x1c9: 0x33a7, 0x1ca: 0x30a0, 0x1cb: 0x33ac,
+ 0x1cc: 0x3145, 0x1cd: 0x3456, 0x1ce: 0x314a, 0x1cf: 0x345b, 0x1d0: 0x3168, 0x1d1: 0x3479,
+ 0x1d2: 0x316d, 0x1d3: 0x347e, 0x1d4: 0x31db, 0x1d5: 0x34f1, 0x1d6: 0x31e0, 0x1d7: 0x34f6,
+ 0x1d8: 0x3186, 0x1d9: 0x3497, 0x1da: 0x319f, 0x1db: 0x34b5,
+ 0x1de: 0x305a, 0x1df: 0x3366,
+ 0x1e6: 0x4692, 0x1e7: 0x4723, 0x1e8: 0x46ba, 0x1e9: 0x474b,
+ 0x1ea: 0x395f, 0x1eb: 0x3aee, 0x1ec: 0x393c, 0x1ed: 0x3acb, 0x1ee: 0x46d8, 0x1ef: 0x4769,
+ 0x1f0: 0x3958, 0x1f1: 0x3ae7, 0x1f2: 0x3244, 0x1f3: 0x355f,
+ // Block 0x8, offset 0x200
+ 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132,
+ 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932,
+ 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932,
+ 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d,
+ 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d,
+ 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d,
+ 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d,
+ 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d,
+ 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101,
+ 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d,
+ 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132,
+ // Block 0x9, offset 0x240
+ 0x240: 0x49ae, 0x241: 0x49b3, 0x242: 0x9932, 0x243: 0x49b8, 0x244: 0x4a71, 0x245: 0x9936,
+ 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132,
+ 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132,
+ 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132,
+ 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135,
+ 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132,
+ 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132,
+ 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132,
+ 0x274: 0x0170,
+ 0x27a: 0x8100,
+ 0x27e: 0x0037,
+ // Block 0xa, offset 0x280
+ 0x284: 0x8100, 0x285: 0x35a1,
+ 0x286: 0x35e9, 0x287: 0x00ce, 0x288: 0x3607, 0x289: 0x3613, 0x28a: 0x3625,
+ 0x28c: 0x3643, 0x28e: 0x3655, 0x28f: 0x3673, 0x290: 0x3e08, 0x291: 0xa000,
+ 0x295: 0xa000, 0x297: 0xa000,
+ 0x299: 0xa000,
+ 0x29f: 0xa000, 0x2a1: 0xa000,
+ 0x2a5: 0xa000, 0x2a9: 0xa000,
+ 0x2aa: 0x3637, 0x2ab: 0x3667, 0x2ac: 0x47fe, 0x2ad: 0x3697, 0x2ae: 0x4828, 0x2af: 0x36a9,
+ 0x2b0: 0x3e70, 0x2b1: 0xa000, 0x2b5: 0xa000,
+ 0x2b7: 0xa000, 0x2b9: 0xa000,
+ 0x2bf: 0xa000,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x3721, 0x2c1: 0x372d, 0x2c3: 0x371b,
+ 0x2c6: 0xa000, 0x2c7: 0x3709,
+ 0x2cc: 0x375d, 0x2cd: 0x3745, 0x2ce: 0x376f, 0x2d0: 0xa000,
+ 0x2d3: 0xa000, 0x2d5: 0xa000, 0x2d6: 0xa000, 0x2d7: 0xa000,
+ 0x2d8: 0xa000, 0x2d9: 0x3751, 0x2da: 0xa000,
+ 0x2de: 0xa000, 0x2e3: 0xa000,
+ 0x2e7: 0xa000,
+ 0x2eb: 0xa000, 0x2ed: 0xa000,
+ 0x2f0: 0xa000, 0x2f3: 0xa000, 0x2f5: 0xa000,
+ 0x2f6: 0xa000, 0x2f7: 0xa000, 0x2f8: 0xa000, 0x2f9: 0x37d5, 0x2fa: 0xa000,
+ 0x2fe: 0xa000,
+ // Block 0xc, offset 0x300
+ 0x301: 0x3733, 0x302: 0x37b7,
+ 0x310: 0x370f, 0x311: 0x3793,
+ 0x312: 0x3715, 0x313: 0x3799, 0x316: 0x3727, 0x317: 0x37ab,
+ 0x318: 0xa000, 0x319: 0xa000, 0x31a: 0x3829, 0x31b: 0x382f, 0x31c: 0x3739, 0x31d: 0x37bd,
+ 0x31e: 0x373f, 0x31f: 0x37c3, 0x322: 0x374b, 0x323: 0x37cf,
+ 0x324: 0x3757, 0x325: 0x37db, 0x326: 0x3763, 0x327: 0x37e7, 0x328: 0xa000, 0x329: 0xa000,
+ 0x32a: 0x3835, 0x32b: 0x383b, 0x32c: 0x378d, 0x32d: 0x3811, 0x32e: 0x3769, 0x32f: 0x37ed,
+ 0x330: 0x3775, 0x331: 0x37f9, 0x332: 0x377b, 0x333: 0x37ff, 0x334: 0x3781, 0x335: 0x3805,
+ 0x338: 0x3787, 0x339: 0x380b,
+ // Block 0xd, offset 0x340
+ 0x351: 0x812d,
+ 0x352: 0x8132, 0x353: 0x8132, 0x354: 0x8132, 0x355: 0x8132, 0x356: 0x812d, 0x357: 0x8132,
+ 0x358: 0x8132, 0x359: 0x8132, 0x35a: 0x812e, 0x35b: 0x812d, 0x35c: 0x8132, 0x35d: 0x8132,
+ 0x35e: 0x8132, 0x35f: 0x8132, 0x360: 0x8132, 0x361: 0x8132, 0x362: 0x812d, 0x363: 0x812d,
+ 0x364: 0x812d, 0x365: 0x812d, 0x366: 0x812d, 0x367: 0x812d, 0x368: 0x8132, 0x369: 0x8132,
+ 0x36a: 0x812d, 0x36b: 0x8132, 0x36c: 0x8132, 0x36d: 0x812e, 0x36e: 0x8131, 0x36f: 0x8132,
+ 0x370: 0x8105, 0x371: 0x8106, 0x372: 0x8107, 0x373: 0x8108, 0x374: 0x8109, 0x375: 0x810a,
+ 0x376: 0x810b, 0x377: 0x810c, 0x378: 0x810d, 0x379: 0x810e, 0x37a: 0x810e, 0x37b: 0x810f,
+ 0x37c: 0x8110, 0x37d: 0x8111, 0x37f: 0x8112,
+ // Block 0xe, offset 0x380
+ 0x388: 0xa000, 0x38a: 0xa000, 0x38b: 0x8116,
+ 0x38c: 0x8117, 0x38d: 0x8118, 0x38e: 0x8119, 0x38f: 0x811a, 0x390: 0x811b, 0x391: 0x811c,
+ 0x392: 0x811d, 0x393: 0x9932, 0x394: 0x9932, 0x395: 0x992d, 0x396: 0x812d, 0x397: 0x8132,
+ 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x8132, 0x39b: 0x8132, 0x39c: 0x812d, 0x39d: 0x8132,
+ 0x39e: 0x8132, 0x39f: 0x812d,
+ 0x3b0: 0x811e,
+ // Block 0xf, offset 0x3c0
+ 0x3d3: 0x812d, 0x3d4: 0x8132, 0x3d5: 0x8132, 0x3d6: 0x8132, 0x3d7: 0x8132,
+ 0x3d8: 0x8132, 0x3d9: 0x8132, 0x3da: 0x8132, 0x3db: 0x8132, 0x3dc: 0x8132, 0x3dd: 0x8132,
+ 0x3de: 0x8132, 0x3df: 0x8132, 0x3e0: 0x8132, 0x3e1: 0x8132, 0x3e3: 0x812d,
+ 0x3e4: 0x8132, 0x3e5: 0x8132, 0x3e6: 0x812d, 0x3e7: 0x8132, 0x3e8: 0x8132, 0x3e9: 0x812d,
+ 0x3ea: 0x8132, 0x3eb: 0x8132, 0x3ec: 0x8132, 0x3ed: 0x812d, 0x3ee: 0x812d, 0x3ef: 0x812d,
+ 0x3f0: 0x8116, 0x3f1: 0x8117, 0x3f2: 0x8118, 0x3f3: 0x8132, 0x3f4: 0x8132, 0x3f5: 0x8132,
+ 0x3f6: 0x812d, 0x3f7: 0x8132, 0x3f8: 0x8132, 0x3f9: 0x812d, 0x3fa: 0x812d, 0x3fb: 0x8132,
+ 0x3fc: 0x8132, 0x3fd: 0x8132, 0x3fe: 0x8132, 0x3ff: 0x8132,
+ // Block 0x10, offset 0x400
+ 0x405: 0xa000,
+ 0x406: 0x2d26, 0x407: 0xa000, 0x408: 0x2d2e, 0x409: 0xa000, 0x40a: 0x2d36, 0x40b: 0xa000,
+ 0x40c: 0x2d3e, 0x40d: 0xa000, 0x40e: 0x2d46, 0x411: 0xa000,
+ 0x412: 0x2d4e,
+ 0x434: 0x8102, 0x435: 0x9900,
+ 0x43a: 0xa000, 0x43b: 0x2d56,
+ 0x43c: 0xa000, 0x43d: 0x2d5e, 0x43e: 0xa000, 0x43f: 0xa000,
+ // Block 0x11, offset 0x440
+ 0x440: 0x8132, 0x441: 0x8132, 0x442: 0x812d, 0x443: 0x8132, 0x444: 0x8132, 0x445: 0x8132,
+ 0x446: 0x8132, 0x447: 0x8132, 0x448: 0x8132, 0x449: 0x8132, 0x44a: 0x812d, 0x44b: 0x8132,
+ 0x44c: 0x8132, 0x44d: 0x8135, 0x44e: 0x812a, 0x44f: 0x812d, 0x450: 0x8129, 0x451: 0x8132,
+ 0x452: 0x8132, 0x453: 0x8132, 0x454: 0x8132, 0x455: 0x8132, 0x456: 0x8132, 0x457: 0x8132,
+ 0x458: 0x8132, 0x459: 0x8132, 0x45a: 0x8132, 0x45b: 0x8132, 0x45c: 0x8132, 0x45d: 0x8132,
+ 0x45e: 0x8132, 0x45f: 0x8132, 0x460: 0x8132, 0x461: 0x8132, 0x462: 0x8132, 0x463: 0x8132,
+ 0x464: 0x8132, 0x465: 0x8132, 0x466: 0x8132, 0x467: 0x8132, 0x468: 0x8132, 0x469: 0x8132,
+ 0x46a: 0x8132, 0x46b: 0x8132, 0x46c: 0x8132, 0x46d: 0x8132, 0x46e: 0x8132, 0x46f: 0x8132,
+ 0x470: 0x8132, 0x471: 0x8132, 0x472: 0x8132, 0x473: 0x8132, 0x474: 0x8132, 0x475: 0x8132,
+ 0x476: 0x8133, 0x477: 0x8131, 0x478: 0x8131, 0x479: 0x812d, 0x47b: 0x8132,
+ 0x47c: 0x8134, 0x47d: 0x812d, 0x47e: 0x8132, 0x47f: 0x812d,
+ // Block 0x12, offset 0x480
+ 0x480: 0x2f97, 0x481: 0x32a3, 0x482: 0x2fa1, 0x483: 0x32ad, 0x484: 0x2fa6, 0x485: 0x32b2,
+ 0x486: 0x2fab, 0x487: 0x32b7, 0x488: 0x38cc, 0x489: 0x3a5b, 0x48a: 0x2fc4, 0x48b: 0x32d0,
+ 0x48c: 0x2fce, 0x48d: 0x32da, 0x48e: 0x2fdd, 0x48f: 0x32e9, 0x490: 0x2fd3, 0x491: 0x32df,
+ 0x492: 0x2fd8, 0x493: 0x32e4, 0x494: 0x38ef, 0x495: 0x3a7e, 0x496: 0x38f6, 0x497: 0x3a85,
+ 0x498: 0x3019, 0x499: 0x3325, 0x49a: 0x301e, 0x49b: 0x332a, 0x49c: 0x3904, 0x49d: 0x3a93,
+ 0x49e: 0x3023, 0x49f: 0x332f, 0x4a0: 0x3032, 0x4a1: 0x333e, 0x4a2: 0x3050, 0x4a3: 0x335c,
+ 0x4a4: 0x305f, 0x4a5: 0x336b, 0x4a6: 0x3055, 0x4a7: 0x3361, 0x4a8: 0x3064, 0x4a9: 0x3370,
+ 0x4aa: 0x3069, 0x4ab: 0x3375, 0x4ac: 0x30af, 0x4ad: 0x33bb, 0x4ae: 0x390b, 0x4af: 0x3a9a,
+ 0x4b0: 0x30b9, 0x4b1: 0x33ca, 0x4b2: 0x30c3, 0x4b3: 0x33d4, 0x4b4: 0x30cd, 0x4b5: 0x33de,
+ 0x4b6: 0x46c4, 0x4b7: 0x4755, 0x4b8: 0x3912, 0x4b9: 0x3aa1, 0x4ba: 0x30e6, 0x4bb: 0x33f7,
+ 0x4bc: 0x30e1, 0x4bd: 0x33f2, 0x4be: 0x30eb, 0x4bf: 0x33fc,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0x30f0, 0x4c1: 0x3401, 0x4c2: 0x30f5, 0x4c3: 0x3406, 0x4c4: 0x3109, 0x4c5: 0x341a,
+ 0x4c6: 0x3113, 0x4c7: 0x3424, 0x4c8: 0x3122, 0x4c9: 0x3433, 0x4ca: 0x311d, 0x4cb: 0x342e,
+ 0x4cc: 0x3935, 0x4cd: 0x3ac4, 0x4ce: 0x3943, 0x4cf: 0x3ad2, 0x4d0: 0x394a, 0x4d1: 0x3ad9,
+ 0x4d2: 0x3951, 0x4d3: 0x3ae0, 0x4d4: 0x314f, 0x4d5: 0x3460, 0x4d6: 0x3154, 0x4d7: 0x3465,
+ 0x4d8: 0x315e, 0x4d9: 0x346f, 0x4da: 0x46f1, 0x4db: 0x4782, 0x4dc: 0x3997, 0x4dd: 0x3b26,
+ 0x4de: 0x3177, 0x4df: 0x3488, 0x4e0: 0x3181, 0x4e1: 0x3492, 0x4e2: 0x4700, 0x4e3: 0x4791,
+ 0x4e4: 0x399e, 0x4e5: 0x3b2d, 0x4e6: 0x39a5, 0x4e7: 0x3b34, 0x4e8: 0x39ac, 0x4e9: 0x3b3b,
+ 0x4ea: 0x3190, 0x4eb: 0x34a1, 0x4ec: 0x319a, 0x4ed: 0x34b0, 0x4ee: 0x31ae, 0x4ef: 0x34c4,
+ 0x4f0: 0x31a9, 0x4f1: 0x34bf, 0x4f2: 0x31ea, 0x4f3: 0x3500, 0x4f4: 0x31f9, 0x4f5: 0x350f,
+ 0x4f6: 0x31f4, 0x4f7: 0x350a, 0x4f8: 0x39b3, 0x4f9: 0x3b42, 0x4fa: 0x39ba, 0x4fb: 0x3b49,
+ 0x4fc: 0x31fe, 0x4fd: 0x3514, 0x4fe: 0x3203, 0x4ff: 0x3519,
+ // Block 0x14, offset 0x500
+ 0x500: 0x3208, 0x501: 0x351e, 0x502: 0x320d, 0x503: 0x3523, 0x504: 0x321c, 0x505: 0x3532,
+ 0x506: 0x3217, 0x507: 0x352d, 0x508: 0x3221, 0x509: 0x353c, 0x50a: 0x3226, 0x50b: 0x3541,
+ 0x50c: 0x322b, 0x50d: 0x3546, 0x50e: 0x3249, 0x50f: 0x3564, 0x510: 0x3262, 0x511: 0x3582,
+ 0x512: 0x3271, 0x513: 0x3591, 0x514: 0x3276, 0x515: 0x3596, 0x516: 0x337a, 0x517: 0x34a6,
+ 0x518: 0x3537, 0x519: 0x3573, 0x51b: 0x35d1,
+ 0x520: 0x46a1, 0x521: 0x4732, 0x522: 0x2f83, 0x523: 0x328f,
+ 0x524: 0x3878, 0x525: 0x3a07, 0x526: 0x3871, 0x527: 0x3a00, 0x528: 0x3886, 0x529: 0x3a15,
+ 0x52a: 0x387f, 0x52b: 0x3a0e, 0x52c: 0x38be, 0x52d: 0x3a4d, 0x52e: 0x3894, 0x52f: 0x3a23,
+ 0x530: 0x388d, 0x531: 0x3a1c, 0x532: 0x38a2, 0x533: 0x3a31, 0x534: 0x389b, 0x535: 0x3a2a,
+ 0x536: 0x38c5, 0x537: 0x3a54, 0x538: 0x46b5, 0x539: 0x4746, 0x53a: 0x3000, 0x53b: 0x330c,
+ 0x53c: 0x2fec, 0x53d: 0x32f8, 0x53e: 0x38da, 0x53f: 0x3a69,
+ // Block 0x15, offset 0x540
+ 0x540: 0x38d3, 0x541: 0x3a62, 0x542: 0x38e8, 0x543: 0x3a77, 0x544: 0x38e1, 0x545: 0x3a70,
+ 0x546: 0x38fd, 0x547: 0x3a8c, 0x548: 0x3091, 0x549: 0x339d, 0x54a: 0x30a5, 0x54b: 0x33b1,
+ 0x54c: 0x46e7, 0x54d: 0x4778, 0x54e: 0x3136, 0x54f: 0x3447, 0x550: 0x3920, 0x551: 0x3aaf,
+ 0x552: 0x3919, 0x553: 0x3aa8, 0x554: 0x392e, 0x555: 0x3abd, 0x556: 0x3927, 0x557: 0x3ab6,
+ 0x558: 0x3989, 0x559: 0x3b18, 0x55a: 0x396d, 0x55b: 0x3afc, 0x55c: 0x3966, 0x55d: 0x3af5,
+ 0x55e: 0x397b, 0x55f: 0x3b0a, 0x560: 0x3974, 0x561: 0x3b03, 0x562: 0x3982, 0x563: 0x3b11,
+ 0x564: 0x31e5, 0x565: 0x34fb, 0x566: 0x31c7, 0x567: 0x34dd, 0x568: 0x39e4, 0x569: 0x3b73,
+ 0x56a: 0x39dd, 0x56b: 0x3b6c, 0x56c: 0x39f2, 0x56d: 0x3b81, 0x56e: 0x39eb, 0x56f: 0x3b7a,
+ 0x570: 0x39f9, 0x571: 0x3b88, 0x572: 0x3230, 0x573: 0x354b, 0x574: 0x3258, 0x575: 0x3578,
+ 0x576: 0x3253, 0x577: 0x356e, 0x578: 0x323f, 0x579: 0x355a,
+ // Block 0x16, offset 0x580
+ 0x580: 0x4804, 0x581: 0x480a, 0x582: 0x491e, 0x583: 0x4936, 0x584: 0x4926, 0x585: 0x493e,
+ 0x586: 0x492e, 0x587: 0x4946, 0x588: 0x47aa, 0x589: 0x47b0, 0x58a: 0x488e, 0x58b: 0x48a6,
+ 0x58c: 0x4896, 0x58d: 0x48ae, 0x58e: 0x489e, 0x58f: 0x48b6, 0x590: 0x4816, 0x591: 0x481c,
+ 0x592: 0x3db8, 0x593: 0x3dc8, 0x594: 0x3dc0, 0x595: 0x3dd0,
+ 0x598: 0x47b6, 0x599: 0x47bc, 0x59a: 0x3ce8, 0x59b: 0x3cf8, 0x59c: 0x3cf0, 0x59d: 0x3d00,
+ 0x5a0: 0x482e, 0x5a1: 0x4834, 0x5a2: 0x494e, 0x5a3: 0x4966,
+ 0x5a4: 0x4956, 0x5a5: 0x496e, 0x5a6: 0x495e, 0x5a7: 0x4976, 0x5a8: 0x47c2, 0x5a9: 0x47c8,
+ 0x5aa: 0x48be, 0x5ab: 0x48d6, 0x5ac: 0x48c6, 0x5ad: 0x48de, 0x5ae: 0x48ce, 0x5af: 0x48e6,
+ 0x5b0: 0x4846, 0x5b1: 0x484c, 0x5b2: 0x3e18, 0x5b3: 0x3e30, 0x5b4: 0x3e20, 0x5b5: 0x3e38,
+ 0x5b6: 0x3e28, 0x5b7: 0x3e40, 0x5b8: 0x47ce, 0x5b9: 0x47d4, 0x5ba: 0x3d18, 0x5bb: 0x3d30,
+ 0x5bc: 0x3d20, 0x5bd: 0x3d38, 0x5be: 0x3d28, 0x5bf: 0x3d40,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x4852, 0x5c1: 0x4858, 0x5c2: 0x3e48, 0x5c3: 0x3e58, 0x5c4: 0x3e50, 0x5c5: 0x3e60,
+ 0x5c8: 0x47da, 0x5c9: 0x47e0, 0x5ca: 0x3d48, 0x5cb: 0x3d58,
+ 0x5cc: 0x3d50, 0x5cd: 0x3d60, 0x5d0: 0x4864, 0x5d1: 0x486a,
+ 0x5d2: 0x3e80, 0x5d3: 0x3e98, 0x5d4: 0x3e88, 0x5d5: 0x3ea0, 0x5d6: 0x3e90, 0x5d7: 0x3ea8,
+ 0x5d9: 0x47e6, 0x5db: 0x3d68, 0x5dd: 0x3d70,
+ 0x5df: 0x3d78, 0x5e0: 0x487c, 0x5e1: 0x4882, 0x5e2: 0x497e, 0x5e3: 0x4996,
+ 0x5e4: 0x4986, 0x5e5: 0x499e, 0x5e6: 0x498e, 0x5e7: 0x49a6, 0x5e8: 0x47ec, 0x5e9: 0x47f2,
+ 0x5ea: 0x48ee, 0x5eb: 0x4906, 0x5ec: 0x48f6, 0x5ed: 0x490e, 0x5ee: 0x48fe, 0x5ef: 0x4916,
+ 0x5f0: 0x47f8, 0x5f1: 0x431e, 0x5f2: 0x3691, 0x5f3: 0x4324, 0x5f4: 0x4822, 0x5f5: 0x432a,
+ 0x5f6: 0x36a3, 0x5f7: 0x4330, 0x5f8: 0x36c1, 0x5f9: 0x4336, 0x5fa: 0x36d9, 0x5fb: 0x433c,
+ 0x5fc: 0x4870, 0x5fd: 0x4342,
+ // Block 0x18, offset 0x600
+ 0x600: 0x3da0, 0x601: 0x3da8, 0x602: 0x4184, 0x603: 0x41a2, 0x604: 0x418e, 0x605: 0x41ac,
+ 0x606: 0x4198, 0x607: 0x41b6, 0x608: 0x3cd8, 0x609: 0x3ce0, 0x60a: 0x40d0, 0x60b: 0x40ee,
+ 0x60c: 0x40da, 0x60d: 0x40f8, 0x60e: 0x40e4, 0x60f: 0x4102, 0x610: 0x3de8, 0x611: 0x3df0,
+ 0x612: 0x41c0, 0x613: 0x41de, 0x614: 0x41ca, 0x615: 0x41e8, 0x616: 0x41d4, 0x617: 0x41f2,
+ 0x618: 0x3d08, 0x619: 0x3d10, 0x61a: 0x410c, 0x61b: 0x412a, 0x61c: 0x4116, 0x61d: 0x4134,
+ 0x61e: 0x4120, 0x61f: 0x413e, 0x620: 0x3ec0, 0x621: 0x3ec8, 0x622: 0x41fc, 0x623: 0x421a,
+ 0x624: 0x4206, 0x625: 0x4224, 0x626: 0x4210, 0x627: 0x422e, 0x628: 0x3d80, 0x629: 0x3d88,
+ 0x62a: 0x4148, 0x62b: 0x4166, 0x62c: 0x4152, 0x62d: 0x4170, 0x62e: 0x415c, 0x62f: 0x417a,
+ 0x630: 0x3685, 0x631: 0x367f, 0x632: 0x3d90, 0x633: 0x368b, 0x634: 0x3d98,
+ 0x636: 0x4810, 0x637: 0x3db0, 0x638: 0x35f5, 0x639: 0x35ef, 0x63a: 0x35e3, 0x63b: 0x42ee,
+ 0x63c: 0x35fb, 0x63d: 0x8100, 0x63e: 0x01d3, 0x63f: 0xa100,
+ // Block 0x19, offset 0x640
+ 0x640: 0x8100, 0x641: 0x35a7, 0x642: 0x3dd8, 0x643: 0x369d, 0x644: 0x3de0,
+ 0x646: 0x483a, 0x647: 0x3df8, 0x648: 0x3601, 0x649: 0x42f4, 0x64a: 0x360d, 0x64b: 0x42fa,
+ 0x64c: 0x3619, 0x64d: 0x3b8f, 0x64e: 0x3b96, 0x64f: 0x3b9d, 0x650: 0x36b5, 0x651: 0x36af,
+ 0x652: 0x3e00, 0x653: 0x44e4, 0x656: 0x36bb, 0x657: 0x3e10,
+ 0x658: 0x3631, 0x659: 0x362b, 0x65a: 0x361f, 0x65b: 0x4300, 0x65d: 0x3ba4,
+ 0x65e: 0x3bab, 0x65f: 0x3bb2, 0x660: 0x36eb, 0x661: 0x36e5, 0x662: 0x3e68, 0x663: 0x44ec,
+ 0x664: 0x36cd, 0x665: 0x36d3, 0x666: 0x36f1, 0x667: 0x3e78, 0x668: 0x3661, 0x669: 0x365b,
+ 0x66a: 0x364f, 0x66b: 0x430c, 0x66c: 0x3649, 0x66d: 0x359b, 0x66e: 0x42e8, 0x66f: 0x0081,
+ 0x672: 0x3eb0, 0x673: 0x36f7, 0x674: 0x3eb8,
+ 0x676: 0x4888, 0x677: 0x3ed0, 0x678: 0x363d, 0x679: 0x4306, 0x67a: 0x366d, 0x67b: 0x4318,
+ 0x67c: 0x3679, 0x67d: 0x4256, 0x67e: 0xa100,
+ // Block 0x1a, offset 0x680
+ 0x681: 0x3c06, 0x683: 0xa000, 0x684: 0x3c0d, 0x685: 0xa000,
+ 0x687: 0x3c14, 0x688: 0xa000, 0x689: 0x3c1b,
+ 0x68d: 0xa000,
+ 0x6a0: 0x2f65, 0x6a1: 0xa000, 0x6a2: 0x3c29,
+ 0x6a4: 0xa000, 0x6a5: 0xa000,
+ 0x6ad: 0x3c22, 0x6ae: 0x2f60, 0x6af: 0x2f6a,
+ 0x6b0: 0x3c30, 0x6b1: 0x3c37, 0x6b2: 0xa000, 0x6b3: 0xa000, 0x6b4: 0x3c3e, 0x6b5: 0x3c45,
+ 0x6b6: 0xa000, 0x6b7: 0xa000, 0x6b8: 0x3c4c, 0x6b9: 0x3c53, 0x6ba: 0xa000, 0x6bb: 0xa000,
+ 0x6bc: 0xa000, 0x6bd: 0xa000,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x3c5a, 0x6c1: 0x3c61, 0x6c2: 0xa000, 0x6c3: 0xa000, 0x6c4: 0x3c76, 0x6c5: 0x3c7d,
+ 0x6c6: 0xa000, 0x6c7: 0xa000, 0x6c8: 0x3c84, 0x6c9: 0x3c8b,
+ 0x6d1: 0xa000,
+ 0x6d2: 0xa000,
+ 0x6e2: 0xa000,
+ 0x6e8: 0xa000, 0x6e9: 0xa000,
+ 0x6eb: 0xa000, 0x6ec: 0x3ca0, 0x6ed: 0x3ca7, 0x6ee: 0x3cae, 0x6ef: 0x3cb5,
+ 0x6f2: 0xa000, 0x6f3: 0xa000, 0x6f4: 0xa000, 0x6f5: 0xa000,
+ // Block 0x1c, offset 0x700
+ 0x706: 0xa000, 0x70b: 0xa000,
+ 0x70c: 0x3f08, 0x70d: 0xa000, 0x70e: 0x3f10, 0x70f: 0xa000, 0x710: 0x3f18, 0x711: 0xa000,
+ 0x712: 0x3f20, 0x713: 0xa000, 0x714: 0x3f28, 0x715: 0xa000, 0x716: 0x3f30, 0x717: 0xa000,
+ 0x718: 0x3f38, 0x719: 0xa000, 0x71a: 0x3f40, 0x71b: 0xa000, 0x71c: 0x3f48, 0x71d: 0xa000,
+ 0x71e: 0x3f50, 0x71f: 0xa000, 0x720: 0x3f58, 0x721: 0xa000, 0x722: 0x3f60,
+ 0x724: 0xa000, 0x725: 0x3f68, 0x726: 0xa000, 0x727: 0x3f70, 0x728: 0xa000, 0x729: 0x3f78,
+ 0x72f: 0xa000,
+ 0x730: 0x3f80, 0x731: 0x3f88, 0x732: 0xa000, 0x733: 0x3f90, 0x734: 0x3f98, 0x735: 0xa000,
+ 0x736: 0x3fa0, 0x737: 0x3fa8, 0x738: 0xa000, 0x739: 0x3fb0, 0x73a: 0x3fb8, 0x73b: 0xa000,
+ 0x73c: 0x3fc0, 0x73d: 0x3fc8,
+ // Block 0x1d, offset 0x740
+ 0x754: 0x3f00,
+ 0x759: 0x9903, 0x75a: 0x9903, 0x75b: 0x8100, 0x75c: 0x8100, 0x75d: 0xa000,
+ 0x75e: 0x3fd0,
+ 0x766: 0xa000,
+ 0x76b: 0xa000, 0x76c: 0x3fe0, 0x76d: 0xa000, 0x76e: 0x3fe8, 0x76f: 0xa000,
+ 0x770: 0x3ff0, 0x771: 0xa000, 0x772: 0x3ff8, 0x773: 0xa000, 0x774: 0x4000, 0x775: 0xa000,
+ 0x776: 0x4008, 0x777: 0xa000, 0x778: 0x4010, 0x779: 0xa000, 0x77a: 0x4018, 0x77b: 0xa000,
+ 0x77c: 0x4020, 0x77d: 0xa000, 0x77e: 0x4028, 0x77f: 0xa000,
+ // Block 0x1e, offset 0x780
+ 0x780: 0x4030, 0x781: 0xa000, 0x782: 0x4038, 0x784: 0xa000, 0x785: 0x4040,
+ 0x786: 0xa000, 0x787: 0x4048, 0x788: 0xa000, 0x789: 0x4050,
+ 0x78f: 0xa000, 0x790: 0x4058, 0x791: 0x4060,
+ 0x792: 0xa000, 0x793: 0x4068, 0x794: 0x4070, 0x795: 0xa000, 0x796: 0x4078, 0x797: 0x4080,
+ 0x798: 0xa000, 0x799: 0x4088, 0x79a: 0x4090, 0x79b: 0xa000, 0x79c: 0x4098, 0x79d: 0x40a0,
+ 0x7af: 0xa000,
+ 0x7b0: 0xa000, 0x7b1: 0xa000, 0x7b2: 0xa000, 0x7b4: 0x3fd8,
+ 0x7b7: 0x40a8, 0x7b8: 0x40b0, 0x7b9: 0x40b8, 0x7ba: 0x40c0,
+ 0x7bd: 0xa000, 0x7be: 0x40c8,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x1377, 0x7c1: 0x0cfb, 0x7c2: 0x13d3, 0x7c3: 0x139f, 0x7c4: 0x0e57, 0x7c5: 0x06eb,
+ 0x7c6: 0x08df, 0x7c7: 0x162b, 0x7c8: 0x162b, 0x7c9: 0x0a0b, 0x7ca: 0x145f, 0x7cb: 0x0943,
+ 0x7cc: 0x0a07, 0x7cd: 0x0bef, 0x7ce: 0x0fcf, 0x7cf: 0x115f, 0x7d0: 0x1297, 0x7d1: 0x12d3,
+ 0x7d2: 0x1307, 0x7d3: 0x141b, 0x7d4: 0x0d73, 0x7d5: 0x0dff, 0x7d6: 0x0eab, 0x7d7: 0x0f43,
+ 0x7d8: 0x125f, 0x7d9: 0x1447, 0x7da: 0x1573, 0x7db: 0x070f, 0x7dc: 0x08b3, 0x7dd: 0x0d87,
+ 0x7de: 0x0ecf, 0x7df: 0x1293, 0x7e0: 0x15c3, 0x7e1: 0x0ab3, 0x7e2: 0x0e77, 0x7e3: 0x1283,
+ 0x7e4: 0x1317, 0x7e5: 0x0c23, 0x7e6: 0x11bb, 0x7e7: 0x12df, 0x7e8: 0x0b1f, 0x7e9: 0x0d0f,
+ 0x7ea: 0x0e17, 0x7eb: 0x0f1b, 0x7ec: 0x1427, 0x7ed: 0x074f, 0x7ee: 0x07e7, 0x7ef: 0x0853,
+ 0x7f0: 0x0c8b, 0x7f1: 0x0d7f, 0x7f2: 0x0ecb, 0x7f3: 0x0fef, 0x7f4: 0x1177, 0x7f5: 0x128b,
+ 0x7f6: 0x12a3, 0x7f7: 0x13c7, 0x7f8: 0x14ef, 0x7f9: 0x15a3, 0x7fa: 0x15bf, 0x7fb: 0x102b,
+ 0x7fc: 0x106b, 0x7fd: 0x1123, 0x7fe: 0x1243, 0x7ff: 0x147b,
+ // Block 0x20, offset 0x800
+ 0x800: 0x15cb, 0x801: 0x134b, 0x802: 0x09c7, 0x803: 0x0b3b, 0x804: 0x10db, 0x805: 0x119b,
+ 0x806: 0x0eff, 0x807: 0x1033, 0x808: 0x1397, 0x809: 0x14e7, 0x80a: 0x09c3, 0x80b: 0x0a8f,
+ 0x80c: 0x0d77, 0x80d: 0x0e2b, 0x80e: 0x0e5f, 0x80f: 0x1113, 0x810: 0x113b, 0x811: 0x14a7,
+ 0x812: 0x084f, 0x813: 0x11a7, 0x814: 0x07f3, 0x815: 0x07ef, 0x816: 0x1097, 0x817: 0x1127,
+ 0x818: 0x125b, 0x819: 0x14af, 0x81a: 0x1367, 0x81b: 0x0c27, 0x81c: 0x0d73, 0x81d: 0x1357,
+ 0x81e: 0x06f7, 0x81f: 0x0a63, 0x820: 0x0b93, 0x821: 0x0f2f, 0x822: 0x0faf, 0x823: 0x0873,
+ 0x824: 0x103b, 0x825: 0x075f, 0x826: 0x0b77, 0x827: 0x06d7, 0x828: 0x0deb, 0x829: 0x0ca3,
+ 0x82a: 0x110f, 0x82b: 0x08c7, 0x82c: 0x09b3, 0x82d: 0x0ffb, 0x82e: 0x1263, 0x82f: 0x133b,
+ 0x830: 0x0db7, 0x831: 0x13f7, 0x832: 0x0de3, 0x833: 0x0c37, 0x834: 0x121b, 0x835: 0x0c57,
+ 0x836: 0x0fab, 0x837: 0x072b, 0x838: 0x07a7, 0x839: 0x07eb, 0x83a: 0x0d53, 0x83b: 0x10fb,
+ 0x83c: 0x11f3, 0x83d: 0x1347, 0x83e: 0x145b, 0x83f: 0x085b,
+ // Block 0x21, offset 0x840
+ 0x840: 0x090f, 0x841: 0x0a17, 0x842: 0x0b2f, 0x843: 0x0cbf, 0x844: 0x0e7b, 0x845: 0x103f,
+ 0x846: 0x1497, 0x847: 0x157b, 0x848: 0x15cf, 0x849: 0x15e7, 0x84a: 0x0837, 0x84b: 0x0cf3,
+ 0x84c: 0x0da3, 0x84d: 0x13eb, 0x84e: 0x0afb, 0x84f: 0x0bd7, 0x850: 0x0bf3, 0x851: 0x0c83,
+ 0x852: 0x0e6b, 0x853: 0x0eb7, 0x854: 0x0f67, 0x855: 0x108b, 0x856: 0x112f, 0x857: 0x1193,
+ 0x858: 0x13db, 0x859: 0x126b, 0x85a: 0x1403, 0x85b: 0x147f, 0x85c: 0x080f, 0x85d: 0x083b,
+ 0x85e: 0x0923, 0x85f: 0x0ea7, 0x860: 0x12f3, 0x861: 0x133b, 0x862: 0x0b1b, 0x863: 0x0b8b,
+ 0x864: 0x0c4f, 0x865: 0x0daf, 0x866: 0x10d7, 0x867: 0x0f23, 0x868: 0x073b, 0x869: 0x097f,
+ 0x86a: 0x0a63, 0x86b: 0x0ac7, 0x86c: 0x0b97, 0x86d: 0x0f3f, 0x86e: 0x0f5b, 0x86f: 0x116b,
+ 0x870: 0x118b, 0x871: 0x1463, 0x872: 0x14e3, 0x873: 0x14f3, 0x874: 0x152f, 0x875: 0x0753,
+ 0x876: 0x107f, 0x877: 0x144f, 0x878: 0x14cb, 0x879: 0x0baf, 0x87a: 0x0717, 0x87b: 0x0777,
+ 0x87c: 0x0a67, 0x87d: 0x0a87, 0x87e: 0x0caf, 0x87f: 0x0d73,
+ // Block 0x22, offset 0x880
+ 0x880: 0x0ec3, 0x881: 0x0fcb, 0x882: 0x1277, 0x883: 0x1417, 0x884: 0x1623, 0x885: 0x0ce3,
+ 0x886: 0x14a3, 0x887: 0x0833, 0x888: 0x0d2f, 0x889: 0x0d3b, 0x88a: 0x0e0f, 0x88b: 0x0e47,
+ 0x88c: 0x0f4b, 0x88d: 0x0fa7, 0x88e: 0x1027, 0x88f: 0x110b, 0x890: 0x153b, 0x891: 0x07af,
+ 0x892: 0x0c03, 0x893: 0x14b3, 0x894: 0x0767, 0x895: 0x0aab, 0x896: 0x0e2f, 0x897: 0x13df,
+ 0x898: 0x0b67, 0x899: 0x0bb7, 0x89a: 0x0d43, 0x89b: 0x0f2f, 0x89c: 0x14bb, 0x89d: 0x0817,
+ 0x89e: 0x08ff, 0x89f: 0x0a97, 0x8a0: 0x0cd3, 0x8a1: 0x0d1f, 0x8a2: 0x0d5f, 0x8a3: 0x0df3,
+ 0x8a4: 0x0f47, 0x8a5: 0x0fbb, 0x8a6: 0x1157, 0x8a7: 0x12f7, 0x8a8: 0x1303, 0x8a9: 0x1457,
+ 0x8aa: 0x14d7, 0x8ab: 0x0883, 0x8ac: 0x0e4b, 0x8ad: 0x0903, 0x8ae: 0x0ec7, 0x8af: 0x0f6b,
+ 0x8b0: 0x1287, 0x8b1: 0x14bf, 0x8b2: 0x15ab, 0x8b3: 0x15d3, 0x8b4: 0x0d37, 0x8b5: 0x0e27,
+ 0x8b6: 0x11c3, 0x8b7: 0x10b7, 0x8b8: 0x10c3, 0x8b9: 0x10e7, 0x8ba: 0x0f17, 0x8bb: 0x0e9f,
+ 0x8bc: 0x1363, 0x8bd: 0x0733, 0x8be: 0x122b, 0x8bf: 0x081b,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x080b, 0x8c1: 0x0b0b, 0x8c2: 0x0c2b, 0x8c3: 0x10f3, 0x8c4: 0x0a53, 0x8c5: 0x0e03,
+ 0x8c6: 0x0cef, 0x8c7: 0x13e7, 0x8c8: 0x12e7, 0x8c9: 0x14ab, 0x8ca: 0x1323, 0x8cb: 0x0b27,
+ 0x8cc: 0x0787, 0x8cd: 0x095b, 0x8d0: 0x09af,
+ 0x8d2: 0x0cdf, 0x8d5: 0x07f7, 0x8d6: 0x0f1f, 0x8d7: 0x0fe3,
+ 0x8d8: 0x1047, 0x8d9: 0x1063, 0x8da: 0x1067, 0x8db: 0x107b, 0x8dc: 0x14fb, 0x8dd: 0x10eb,
+ 0x8de: 0x116f, 0x8e0: 0x128f, 0x8e2: 0x1353,
+ 0x8e5: 0x1407, 0x8e6: 0x1433,
+ 0x8ea: 0x154f, 0x8eb: 0x1553, 0x8ec: 0x1557, 0x8ed: 0x15bb, 0x8ee: 0x142b, 0x8ef: 0x14c7,
+ 0x8f0: 0x0757, 0x8f1: 0x077b, 0x8f2: 0x078f, 0x8f3: 0x084b, 0x8f4: 0x0857, 0x8f5: 0x0897,
+ 0x8f6: 0x094b, 0x8f7: 0x0967, 0x8f8: 0x096f, 0x8f9: 0x09ab, 0x8fa: 0x09b7, 0x8fb: 0x0a93,
+ 0x8fc: 0x0a9b, 0x8fd: 0x0ba3, 0x8fe: 0x0bcb, 0x8ff: 0x0bd3,
+ // Block 0x24, offset 0x900
+ 0x900: 0x0beb, 0x901: 0x0c97, 0x902: 0x0cc7, 0x903: 0x0ce7, 0x904: 0x0d57, 0x905: 0x0e1b,
+ 0x906: 0x0e37, 0x907: 0x0e67, 0x908: 0x0ebb, 0x909: 0x0edb, 0x90a: 0x0f4f, 0x90b: 0x102f,
+ 0x90c: 0x104b, 0x90d: 0x1053, 0x90e: 0x104f, 0x90f: 0x1057, 0x910: 0x105b, 0x911: 0x105f,
+ 0x912: 0x1073, 0x913: 0x1077, 0x914: 0x109b, 0x915: 0x10af, 0x916: 0x10cb, 0x917: 0x112f,
+ 0x918: 0x1137, 0x919: 0x113f, 0x91a: 0x1153, 0x91b: 0x117b, 0x91c: 0x11cb, 0x91d: 0x11ff,
+ 0x91e: 0x11ff, 0x91f: 0x1267, 0x920: 0x130f, 0x921: 0x1327, 0x922: 0x135b, 0x923: 0x135f,
+ 0x924: 0x13a3, 0x925: 0x13a7, 0x926: 0x13ff, 0x927: 0x1407, 0x928: 0x14db, 0x929: 0x151f,
+ 0x92a: 0x1537, 0x92b: 0x0b9b, 0x92c: 0x171e, 0x92d: 0x11e3,
+ 0x930: 0x06df, 0x931: 0x07e3, 0x932: 0x07a3, 0x933: 0x074b, 0x934: 0x078b, 0x935: 0x07b7,
+ 0x936: 0x0847, 0x937: 0x0863, 0x938: 0x094b, 0x939: 0x0937, 0x93a: 0x0947, 0x93b: 0x0963,
+ 0x93c: 0x09af, 0x93d: 0x09bf, 0x93e: 0x0a03, 0x93f: 0x0a0f,
+ // Block 0x25, offset 0x940
+ 0x940: 0x0a2b, 0x941: 0x0a3b, 0x942: 0x0b23, 0x943: 0x0b2b, 0x944: 0x0b5b, 0x945: 0x0b7b,
+ 0x946: 0x0bab, 0x947: 0x0bc3, 0x948: 0x0bb3, 0x949: 0x0bd3, 0x94a: 0x0bc7, 0x94b: 0x0beb,
+ 0x94c: 0x0c07, 0x94d: 0x0c5f, 0x94e: 0x0c6b, 0x94f: 0x0c73, 0x950: 0x0c9b, 0x951: 0x0cdf,
+ 0x952: 0x0d0f, 0x953: 0x0d13, 0x954: 0x0d27, 0x955: 0x0da7, 0x956: 0x0db7, 0x957: 0x0e0f,
+ 0x958: 0x0e5b, 0x959: 0x0e53, 0x95a: 0x0e67, 0x95b: 0x0e83, 0x95c: 0x0ebb, 0x95d: 0x1013,
+ 0x95e: 0x0edf, 0x95f: 0x0f13, 0x960: 0x0f1f, 0x961: 0x0f5f, 0x962: 0x0f7b, 0x963: 0x0f9f,
+ 0x964: 0x0fc3, 0x965: 0x0fc7, 0x966: 0x0fe3, 0x967: 0x0fe7, 0x968: 0x0ff7, 0x969: 0x100b,
+ 0x96a: 0x1007, 0x96b: 0x1037, 0x96c: 0x10b3, 0x96d: 0x10cb, 0x96e: 0x10e3, 0x96f: 0x111b,
+ 0x970: 0x112f, 0x971: 0x114b, 0x972: 0x117b, 0x973: 0x122f, 0x974: 0x1257, 0x975: 0x12cb,
+ 0x976: 0x1313, 0x977: 0x131f, 0x978: 0x1327, 0x979: 0x133f, 0x97a: 0x1353, 0x97b: 0x1343,
+ 0x97c: 0x135b, 0x97d: 0x1357, 0x97e: 0x134f, 0x97f: 0x135f,
+ // Block 0x26, offset 0x980
+ 0x980: 0x136b, 0x981: 0x13a7, 0x982: 0x13e3, 0x983: 0x1413, 0x984: 0x144b, 0x985: 0x146b,
+ 0x986: 0x14b7, 0x987: 0x14db, 0x988: 0x14fb, 0x989: 0x150f, 0x98a: 0x151f, 0x98b: 0x152b,
+ 0x98c: 0x1537, 0x98d: 0x158b, 0x98e: 0x162b, 0x98f: 0x16b5, 0x990: 0x16b0, 0x991: 0x16e2,
+ 0x992: 0x0607, 0x993: 0x062f, 0x994: 0x0633, 0x995: 0x1764, 0x996: 0x1791, 0x997: 0x1809,
+ 0x998: 0x1617, 0x999: 0x1627,
+ // Block 0x27, offset 0x9c0
+ 0x9c0: 0x06fb, 0x9c1: 0x06f3, 0x9c2: 0x0703, 0x9c3: 0x1647, 0x9c4: 0x0747, 0x9c5: 0x0757,
+ 0x9c6: 0x075b, 0x9c7: 0x0763, 0x9c8: 0x076b, 0x9c9: 0x076f, 0x9ca: 0x077b, 0x9cb: 0x0773,
+ 0x9cc: 0x05b3, 0x9cd: 0x165b, 0x9ce: 0x078f, 0x9cf: 0x0793, 0x9d0: 0x0797, 0x9d1: 0x07b3,
+ 0x9d2: 0x164c, 0x9d3: 0x05b7, 0x9d4: 0x079f, 0x9d5: 0x07bf, 0x9d6: 0x1656, 0x9d7: 0x07cf,
+ 0x9d8: 0x07d7, 0x9d9: 0x0737, 0x9da: 0x07df, 0x9db: 0x07e3, 0x9dc: 0x1831, 0x9dd: 0x07ff,
+ 0x9de: 0x0807, 0x9df: 0x05bf, 0x9e0: 0x081f, 0x9e1: 0x0823, 0x9e2: 0x082b, 0x9e3: 0x082f,
+ 0x9e4: 0x05c3, 0x9e5: 0x0847, 0x9e6: 0x084b, 0x9e7: 0x0857, 0x9e8: 0x0863, 0x9e9: 0x0867,
+ 0x9ea: 0x086b, 0x9eb: 0x0873, 0x9ec: 0x0893, 0x9ed: 0x0897, 0x9ee: 0x089f, 0x9ef: 0x08af,
+ 0x9f0: 0x08b7, 0x9f1: 0x08bb, 0x9f2: 0x08bb, 0x9f3: 0x08bb, 0x9f4: 0x166a, 0x9f5: 0x0e93,
+ 0x9f6: 0x08cf, 0x9f7: 0x08d7, 0x9f8: 0x166f, 0x9f9: 0x08e3, 0x9fa: 0x08eb, 0x9fb: 0x08f3,
+ 0x9fc: 0x091b, 0x9fd: 0x0907, 0x9fe: 0x0913, 0x9ff: 0x0917,
+ // Block 0x28, offset 0xa00
+ 0xa00: 0x091f, 0xa01: 0x0927, 0xa02: 0x092b, 0xa03: 0x0933, 0xa04: 0x093b, 0xa05: 0x093f,
+ 0xa06: 0x093f, 0xa07: 0x0947, 0xa08: 0x094f, 0xa09: 0x0953, 0xa0a: 0x095f, 0xa0b: 0x0983,
+ 0xa0c: 0x0967, 0xa0d: 0x0987, 0xa0e: 0x096b, 0xa0f: 0x0973, 0xa10: 0x080b, 0xa11: 0x09cf,
+ 0xa12: 0x0997, 0xa13: 0x099b, 0xa14: 0x099f, 0xa15: 0x0993, 0xa16: 0x09a7, 0xa17: 0x09a3,
+ 0xa18: 0x09bb, 0xa19: 0x1674, 0xa1a: 0x09d7, 0xa1b: 0x09db, 0xa1c: 0x09e3, 0xa1d: 0x09ef,
+ 0xa1e: 0x09f7, 0xa1f: 0x0a13, 0xa20: 0x1679, 0xa21: 0x167e, 0xa22: 0x0a1f, 0xa23: 0x0a23,
+ 0xa24: 0x0a27, 0xa25: 0x0a1b, 0xa26: 0x0a2f, 0xa27: 0x05c7, 0xa28: 0x05cb, 0xa29: 0x0a37,
+ 0xa2a: 0x0a3f, 0xa2b: 0x0a3f, 0xa2c: 0x1683, 0xa2d: 0x0a5b, 0xa2e: 0x0a5f, 0xa2f: 0x0a63,
+ 0xa30: 0x0a6b, 0xa31: 0x1688, 0xa32: 0x0a73, 0xa33: 0x0a77, 0xa34: 0x0b4f, 0xa35: 0x0a7f,
+ 0xa36: 0x05cf, 0xa37: 0x0a8b, 0xa38: 0x0a9b, 0xa39: 0x0aa7, 0xa3a: 0x0aa3, 0xa3b: 0x1692,
+ 0xa3c: 0x0aaf, 0xa3d: 0x1697, 0xa3e: 0x0abb, 0xa3f: 0x0ab7,
+ // Block 0x29, offset 0xa40
+ 0xa40: 0x0abf, 0xa41: 0x0acf, 0xa42: 0x0ad3, 0xa43: 0x05d3, 0xa44: 0x0ae3, 0xa45: 0x0aeb,
+ 0xa46: 0x0aef, 0xa47: 0x0af3, 0xa48: 0x05d7, 0xa49: 0x169c, 0xa4a: 0x05db, 0xa4b: 0x0b0f,
+ 0xa4c: 0x0b13, 0xa4d: 0x0b17, 0xa4e: 0x0b1f, 0xa4f: 0x1863, 0xa50: 0x0b37, 0xa51: 0x16a6,
+ 0xa52: 0x16a6, 0xa53: 0x11d7, 0xa54: 0x0b47, 0xa55: 0x0b47, 0xa56: 0x05df, 0xa57: 0x16c9,
+ 0xa58: 0x179b, 0xa59: 0x0b57, 0xa5a: 0x0b5f, 0xa5b: 0x05e3, 0xa5c: 0x0b73, 0xa5d: 0x0b83,
+ 0xa5e: 0x0b87, 0xa5f: 0x0b8f, 0xa60: 0x0b9f, 0xa61: 0x05eb, 0xa62: 0x05e7, 0xa63: 0x0ba3,
+ 0xa64: 0x16ab, 0xa65: 0x0ba7, 0xa66: 0x0bbb, 0xa67: 0x0bbf, 0xa68: 0x0bc3, 0xa69: 0x0bbf,
+ 0xa6a: 0x0bcf, 0xa6b: 0x0bd3, 0xa6c: 0x0be3, 0xa6d: 0x0bdb, 0xa6e: 0x0bdf, 0xa6f: 0x0be7,
+ 0xa70: 0x0beb, 0xa71: 0x0bef, 0xa72: 0x0bfb, 0xa73: 0x0bff, 0xa74: 0x0c17, 0xa75: 0x0c1f,
+ 0xa76: 0x0c2f, 0xa77: 0x0c43, 0xa78: 0x16ba, 0xa79: 0x0c3f, 0xa7a: 0x0c33, 0xa7b: 0x0c4b,
+ 0xa7c: 0x0c53, 0xa7d: 0x0c67, 0xa7e: 0x16bf, 0xa7f: 0x0c6f,
+ // Block 0x2a, offset 0xa80
+ 0xa80: 0x0c63, 0xa81: 0x0c5b, 0xa82: 0x05ef, 0xa83: 0x0c77, 0xa84: 0x0c7f, 0xa85: 0x0c87,
+ 0xa86: 0x0c7b, 0xa87: 0x05f3, 0xa88: 0x0c97, 0xa89: 0x0c9f, 0xa8a: 0x16c4, 0xa8b: 0x0ccb,
+ 0xa8c: 0x0cff, 0xa8d: 0x0cdb, 0xa8e: 0x05ff, 0xa8f: 0x0ce7, 0xa90: 0x05fb, 0xa91: 0x05f7,
+ 0xa92: 0x07c3, 0xa93: 0x07c7, 0xa94: 0x0d03, 0xa95: 0x0ceb, 0xa96: 0x11ab, 0xa97: 0x0663,
+ 0xa98: 0x0d0f, 0xa99: 0x0d13, 0xa9a: 0x0d17, 0xa9b: 0x0d2b, 0xa9c: 0x0d23, 0xa9d: 0x16dd,
+ 0xa9e: 0x0603, 0xa9f: 0x0d3f, 0xaa0: 0x0d33, 0xaa1: 0x0d4f, 0xaa2: 0x0d57, 0xaa3: 0x16e7,
+ 0xaa4: 0x0d5b, 0xaa5: 0x0d47, 0xaa6: 0x0d63, 0xaa7: 0x0607, 0xaa8: 0x0d67, 0xaa9: 0x0d6b,
+ 0xaaa: 0x0d6f, 0xaab: 0x0d7b, 0xaac: 0x16ec, 0xaad: 0x0d83, 0xaae: 0x060b, 0xaaf: 0x0d8f,
+ 0xab0: 0x16f1, 0xab1: 0x0d93, 0xab2: 0x060f, 0xab3: 0x0d9f, 0xab4: 0x0dab, 0xab5: 0x0db7,
+ 0xab6: 0x0dbb, 0xab7: 0x16f6, 0xab8: 0x168d, 0xab9: 0x16fb, 0xaba: 0x0ddb, 0xabb: 0x1700,
+ 0xabc: 0x0de7, 0xabd: 0x0def, 0xabe: 0x0ddf, 0xabf: 0x0dfb,
+ // Block 0x2b, offset 0xac0
+ 0xac0: 0x0e0b, 0xac1: 0x0e1b, 0xac2: 0x0e0f, 0xac3: 0x0e13, 0xac4: 0x0e1f, 0xac5: 0x0e23,
+ 0xac6: 0x1705, 0xac7: 0x0e07, 0xac8: 0x0e3b, 0xac9: 0x0e3f, 0xaca: 0x0613, 0xacb: 0x0e53,
+ 0xacc: 0x0e4f, 0xacd: 0x170a, 0xace: 0x0e33, 0xacf: 0x0e6f, 0xad0: 0x170f, 0xad1: 0x1714,
+ 0xad2: 0x0e73, 0xad3: 0x0e87, 0xad4: 0x0e83, 0xad5: 0x0e7f, 0xad6: 0x0617, 0xad7: 0x0e8b,
+ 0xad8: 0x0e9b, 0xad9: 0x0e97, 0xada: 0x0ea3, 0xadb: 0x1651, 0xadc: 0x0eb3, 0xadd: 0x1719,
+ 0xade: 0x0ebf, 0xadf: 0x1723, 0xae0: 0x0ed3, 0xae1: 0x0edf, 0xae2: 0x0ef3, 0xae3: 0x1728,
+ 0xae4: 0x0f07, 0xae5: 0x0f0b, 0xae6: 0x172d, 0xae7: 0x1732, 0xae8: 0x0f27, 0xae9: 0x0f37,
+ 0xaea: 0x061b, 0xaeb: 0x0f3b, 0xaec: 0x061f, 0xaed: 0x061f, 0xaee: 0x0f53, 0xaef: 0x0f57,
+ 0xaf0: 0x0f5f, 0xaf1: 0x0f63, 0xaf2: 0x0f6f, 0xaf3: 0x0623, 0xaf4: 0x0f87, 0xaf5: 0x1737,
+ 0xaf6: 0x0fa3, 0xaf7: 0x173c, 0xaf8: 0x0faf, 0xaf9: 0x16a1, 0xafa: 0x0fbf, 0xafb: 0x1741,
+ 0xafc: 0x1746, 0xafd: 0x174b, 0xafe: 0x0627, 0xaff: 0x062b,
+ // Block 0x2c, offset 0xb00
+ 0xb00: 0x0ff7, 0xb01: 0x1755, 0xb02: 0x1750, 0xb03: 0x175a, 0xb04: 0x175f, 0xb05: 0x0fff,
+ 0xb06: 0x1003, 0xb07: 0x1003, 0xb08: 0x100b, 0xb09: 0x0633, 0xb0a: 0x100f, 0xb0b: 0x0637,
+ 0xb0c: 0x063b, 0xb0d: 0x1769, 0xb0e: 0x1023, 0xb0f: 0x102b, 0xb10: 0x1037, 0xb11: 0x063f,
+ 0xb12: 0x176e, 0xb13: 0x105b, 0xb14: 0x1773, 0xb15: 0x1778, 0xb16: 0x107b, 0xb17: 0x1093,
+ 0xb18: 0x0643, 0xb19: 0x109b, 0xb1a: 0x109f, 0xb1b: 0x10a3, 0xb1c: 0x177d, 0xb1d: 0x1782,
+ 0xb1e: 0x1782, 0xb1f: 0x10bb, 0xb20: 0x0647, 0xb21: 0x1787, 0xb22: 0x10cf, 0xb23: 0x10d3,
+ 0xb24: 0x064b, 0xb25: 0x178c, 0xb26: 0x10ef, 0xb27: 0x064f, 0xb28: 0x10ff, 0xb29: 0x10f7,
+ 0xb2a: 0x1107, 0xb2b: 0x1796, 0xb2c: 0x111f, 0xb2d: 0x0653, 0xb2e: 0x112b, 0xb2f: 0x1133,
+ 0xb30: 0x1143, 0xb31: 0x0657, 0xb32: 0x17a0, 0xb33: 0x17a5, 0xb34: 0x065b, 0xb35: 0x17aa,
+ 0xb36: 0x115b, 0xb37: 0x17af, 0xb38: 0x1167, 0xb39: 0x1173, 0xb3a: 0x117b, 0xb3b: 0x17b4,
+ 0xb3c: 0x17b9, 0xb3d: 0x118f, 0xb3e: 0x17be, 0xb3f: 0x1197,
+ // Block 0x2d, offset 0xb40
+ 0xb40: 0x16ce, 0xb41: 0x065f, 0xb42: 0x11af, 0xb43: 0x11b3, 0xb44: 0x0667, 0xb45: 0x11b7,
+ 0xb46: 0x0a33, 0xb47: 0x17c3, 0xb48: 0x17c8, 0xb49: 0x16d3, 0xb4a: 0x16d8, 0xb4b: 0x11d7,
+ 0xb4c: 0x11db, 0xb4d: 0x13f3, 0xb4e: 0x066b, 0xb4f: 0x1207, 0xb50: 0x1203, 0xb51: 0x120b,
+ 0xb52: 0x083f, 0xb53: 0x120f, 0xb54: 0x1213, 0xb55: 0x1217, 0xb56: 0x121f, 0xb57: 0x17cd,
+ 0xb58: 0x121b, 0xb59: 0x1223, 0xb5a: 0x1237, 0xb5b: 0x123b, 0xb5c: 0x1227, 0xb5d: 0x123f,
+ 0xb5e: 0x1253, 0xb5f: 0x1267, 0xb60: 0x1233, 0xb61: 0x1247, 0xb62: 0x124b, 0xb63: 0x124f,
+ 0xb64: 0x17d2, 0xb65: 0x17dc, 0xb66: 0x17d7, 0xb67: 0x066f, 0xb68: 0x126f, 0xb69: 0x1273,
+ 0xb6a: 0x127b, 0xb6b: 0x17f0, 0xb6c: 0x127f, 0xb6d: 0x17e1, 0xb6e: 0x0673, 0xb6f: 0x0677,
+ 0xb70: 0x17e6, 0xb71: 0x17eb, 0xb72: 0x067b, 0xb73: 0x129f, 0xb74: 0x12a3, 0xb75: 0x12a7,
+ 0xb76: 0x12ab, 0xb77: 0x12b7, 0xb78: 0x12b3, 0xb79: 0x12bf, 0xb7a: 0x12bb, 0xb7b: 0x12cb,
+ 0xb7c: 0x12c3, 0xb7d: 0x12c7, 0xb7e: 0x12cf, 0xb7f: 0x067f,
+ // Block 0x2e, offset 0xb80
+ 0xb80: 0x12d7, 0xb81: 0x12db, 0xb82: 0x0683, 0xb83: 0x12eb, 0xb84: 0x12ef, 0xb85: 0x17f5,
+ 0xb86: 0x12fb, 0xb87: 0x12ff, 0xb88: 0x0687, 0xb89: 0x130b, 0xb8a: 0x05bb, 0xb8b: 0x17fa,
+ 0xb8c: 0x17ff, 0xb8d: 0x068b, 0xb8e: 0x068f, 0xb8f: 0x1337, 0xb90: 0x134f, 0xb91: 0x136b,
+ 0xb92: 0x137b, 0xb93: 0x1804, 0xb94: 0x138f, 0xb95: 0x1393, 0xb96: 0x13ab, 0xb97: 0x13b7,
+ 0xb98: 0x180e, 0xb99: 0x1660, 0xb9a: 0x13c3, 0xb9b: 0x13bf, 0xb9c: 0x13cb, 0xb9d: 0x1665,
+ 0xb9e: 0x13d7, 0xb9f: 0x13e3, 0xba0: 0x1813, 0xba1: 0x1818, 0xba2: 0x1423, 0xba3: 0x142f,
+ 0xba4: 0x1437, 0xba5: 0x181d, 0xba6: 0x143b, 0xba7: 0x1467, 0xba8: 0x1473, 0xba9: 0x1477,
+ 0xbaa: 0x146f, 0xbab: 0x1483, 0xbac: 0x1487, 0xbad: 0x1822, 0xbae: 0x1493, 0xbaf: 0x0693,
+ 0xbb0: 0x149b, 0xbb1: 0x1827, 0xbb2: 0x0697, 0xbb3: 0x14d3, 0xbb4: 0x0ac3, 0xbb5: 0x14eb,
+ 0xbb6: 0x182c, 0xbb7: 0x1836, 0xbb8: 0x069b, 0xbb9: 0x069f, 0xbba: 0x1513, 0xbbb: 0x183b,
+ 0xbbc: 0x06a3, 0xbbd: 0x1840, 0xbbe: 0x152b, 0xbbf: 0x152b,
+ // Block 0x2f, offset 0xbc0
+ 0xbc0: 0x1533, 0xbc1: 0x1845, 0xbc2: 0x154b, 0xbc3: 0x06a7, 0xbc4: 0x155b, 0xbc5: 0x1567,
+ 0xbc6: 0x156f, 0xbc7: 0x1577, 0xbc8: 0x06ab, 0xbc9: 0x184a, 0xbca: 0x158b, 0xbcb: 0x15a7,
+ 0xbcc: 0x15b3, 0xbcd: 0x06af, 0xbce: 0x06b3, 0xbcf: 0x15b7, 0xbd0: 0x184f, 0xbd1: 0x06b7,
+ 0xbd2: 0x1854, 0xbd3: 0x1859, 0xbd4: 0x185e, 0xbd5: 0x15db, 0xbd6: 0x06bb, 0xbd7: 0x15ef,
+ 0xbd8: 0x15f7, 0xbd9: 0x15fb, 0xbda: 0x1603, 0xbdb: 0x160b, 0xbdc: 0x1613, 0xbdd: 0x1868,
+}
+
+// nfcIndex: 22 blocks, 1408 entries, 1408 bytes
+// Block 0 is the zero block.
+var nfcIndex = [1408]uint8{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x2e, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x2f, 0xc7: 0x04,
+ 0xc8: 0x05, 0xca: 0x30, 0xcb: 0x31, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x32,
+ 0xd0: 0x09, 0xd1: 0x33, 0xd2: 0x34, 0xd3: 0x0a, 0xd6: 0x0b, 0xd7: 0x35,
+ 0xd8: 0x36, 0xd9: 0x0c, 0xdb: 0x37, 0xdc: 0x38, 0xdd: 0x39, 0xdf: 0x3a,
+ 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05,
+ 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a,
+ 0xf0: 0x13,
+ // Block 0x4, offset 0x100
+ 0x120: 0x3b, 0x121: 0x3c, 0x123: 0x0d, 0x124: 0x3d, 0x125: 0x3e, 0x126: 0x3f, 0x127: 0x40,
+ 0x128: 0x41, 0x129: 0x42, 0x12a: 0x43, 0x12b: 0x44, 0x12c: 0x3f, 0x12d: 0x45, 0x12e: 0x46, 0x12f: 0x47,
+ 0x131: 0x48, 0x132: 0x49, 0x133: 0x4a, 0x134: 0x4b, 0x135: 0x4c, 0x137: 0x4d,
+ 0x138: 0x4e, 0x139: 0x4f, 0x13a: 0x50, 0x13b: 0x51, 0x13c: 0x52, 0x13d: 0x53, 0x13e: 0x54, 0x13f: 0x55,
+ // Block 0x5, offset 0x140
+ 0x140: 0x56, 0x142: 0x57, 0x144: 0x58, 0x145: 0x59, 0x146: 0x5a, 0x147: 0x5b,
+ 0x14d: 0x5c,
+ 0x15c: 0x5d, 0x15f: 0x5e,
+ 0x162: 0x5f, 0x164: 0x60,
+ 0x168: 0x61, 0x169: 0x62, 0x16a: 0x63, 0x16c: 0x0e, 0x16d: 0x64, 0x16e: 0x65, 0x16f: 0x66,
+ 0x170: 0x67, 0x173: 0x68, 0x177: 0x0f,
+ 0x178: 0x10, 0x179: 0x11, 0x17a: 0x12, 0x17b: 0x13, 0x17c: 0x14, 0x17d: 0x15, 0x17e: 0x16, 0x17f: 0x17,
+ // Block 0x6, offset 0x180
+ 0x180: 0x69, 0x183: 0x6a, 0x184: 0x6b, 0x186: 0x6c, 0x187: 0x6d,
+ 0x188: 0x6e, 0x189: 0x18, 0x18a: 0x19, 0x18b: 0x6f, 0x18c: 0x70,
+ 0x1ab: 0x71,
+ 0x1b3: 0x72, 0x1b5: 0x73, 0x1b7: 0x74,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x75, 0x1c1: 0x1a, 0x1c2: 0x1b, 0x1c3: 0x1c, 0x1c4: 0x76, 0x1c5: 0x77,
+ 0x1c9: 0x78, 0x1cc: 0x79, 0x1cd: 0x7a,
+ // Block 0x8, offset 0x200
+ 0x219: 0x7b, 0x21a: 0x7c, 0x21b: 0x7d,
+ 0x220: 0x7e, 0x223: 0x7f, 0x224: 0x80, 0x225: 0x81, 0x226: 0x82, 0x227: 0x83,
+ 0x22a: 0x84, 0x22b: 0x85, 0x22f: 0x86,
+ 0x230: 0x87, 0x231: 0x88, 0x232: 0x89, 0x233: 0x8a, 0x234: 0x8b, 0x235: 0x8c, 0x236: 0x8d, 0x237: 0x87,
+ 0x238: 0x88, 0x239: 0x89, 0x23a: 0x8a, 0x23b: 0x8b, 0x23c: 0x8c, 0x23d: 0x8d, 0x23e: 0x87, 0x23f: 0x88,
+ // Block 0x9, offset 0x240
+ 0x240: 0x89, 0x241: 0x8a, 0x242: 0x8b, 0x243: 0x8c, 0x244: 0x8d, 0x245: 0x87, 0x246: 0x88, 0x247: 0x89,
+ 0x248: 0x8a, 0x249: 0x8b, 0x24a: 0x8c, 0x24b: 0x8d, 0x24c: 0x87, 0x24d: 0x88, 0x24e: 0x89, 0x24f: 0x8a,
+ 0x250: 0x8b, 0x251: 0x8c, 0x252: 0x8d, 0x253: 0x87, 0x254: 0x88, 0x255: 0x89, 0x256: 0x8a, 0x257: 0x8b,
+ 0x258: 0x8c, 0x259: 0x8d, 0x25a: 0x87, 0x25b: 0x88, 0x25c: 0x89, 0x25d: 0x8a, 0x25e: 0x8b, 0x25f: 0x8c,
+ 0x260: 0x8d, 0x261: 0x87, 0x262: 0x88, 0x263: 0x89, 0x264: 0x8a, 0x265: 0x8b, 0x266: 0x8c, 0x267: 0x8d,
+ 0x268: 0x87, 0x269: 0x88, 0x26a: 0x89, 0x26b: 0x8a, 0x26c: 0x8b, 0x26d: 0x8c, 0x26e: 0x8d, 0x26f: 0x87,
+ 0x270: 0x88, 0x271: 0x89, 0x272: 0x8a, 0x273: 0x8b, 0x274: 0x8c, 0x275: 0x8d, 0x276: 0x87, 0x277: 0x88,
+ 0x278: 0x89, 0x279: 0x8a, 0x27a: 0x8b, 0x27b: 0x8c, 0x27c: 0x8d, 0x27d: 0x87, 0x27e: 0x88, 0x27f: 0x89,
+ // Block 0xa, offset 0x280
+ 0x280: 0x8a, 0x281: 0x8b, 0x282: 0x8c, 0x283: 0x8d, 0x284: 0x87, 0x285: 0x88, 0x286: 0x89, 0x287: 0x8a,
+ 0x288: 0x8b, 0x289: 0x8c, 0x28a: 0x8d, 0x28b: 0x87, 0x28c: 0x88, 0x28d: 0x89, 0x28e: 0x8a, 0x28f: 0x8b,
+ 0x290: 0x8c, 0x291: 0x8d, 0x292: 0x87, 0x293: 0x88, 0x294: 0x89, 0x295: 0x8a, 0x296: 0x8b, 0x297: 0x8c,
+ 0x298: 0x8d, 0x299: 0x87, 0x29a: 0x88, 0x29b: 0x89, 0x29c: 0x8a, 0x29d: 0x8b, 0x29e: 0x8c, 0x29f: 0x8d,
+ 0x2a0: 0x87, 0x2a1: 0x88, 0x2a2: 0x89, 0x2a3: 0x8a, 0x2a4: 0x8b, 0x2a5: 0x8c, 0x2a6: 0x8d, 0x2a7: 0x87,
+ 0x2a8: 0x88, 0x2a9: 0x89, 0x2aa: 0x8a, 0x2ab: 0x8b, 0x2ac: 0x8c, 0x2ad: 0x8d, 0x2ae: 0x87, 0x2af: 0x88,
+ 0x2b0: 0x89, 0x2b1: 0x8a, 0x2b2: 0x8b, 0x2b3: 0x8c, 0x2b4: 0x8d, 0x2b5: 0x87, 0x2b6: 0x88, 0x2b7: 0x89,
+ 0x2b8: 0x8a, 0x2b9: 0x8b, 0x2ba: 0x8c, 0x2bb: 0x8d, 0x2bc: 0x87, 0x2bd: 0x88, 0x2be: 0x89, 0x2bf: 0x8a,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x8b, 0x2c1: 0x8c, 0x2c2: 0x8d, 0x2c3: 0x87, 0x2c4: 0x88, 0x2c5: 0x89, 0x2c6: 0x8a, 0x2c7: 0x8b,
+ 0x2c8: 0x8c, 0x2c9: 0x8d, 0x2ca: 0x87, 0x2cb: 0x88, 0x2cc: 0x89, 0x2cd: 0x8a, 0x2ce: 0x8b, 0x2cf: 0x8c,
+ 0x2d0: 0x8d, 0x2d1: 0x87, 0x2d2: 0x88, 0x2d3: 0x89, 0x2d4: 0x8a, 0x2d5: 0x8b, 0x2d6: 0x8c, 0x2d7: 0x8d,
+ 0x2d8: 0x87, 0x2d9: 0x88, 0x2da: 0x89, 0x2db: 0x8a, 0x2dc: 0x8b, 0x2dd: 0x8c, 0x2de: 0x8e,
+ // Block 0xc, offset 0x300
+ 0x324: 0x1d, 0x325: 0x1e, 0x326: 0x1f, 0x327: 0x20,
+ 0x328: 0x21, 0x329: 0x22, 0x32a: 0x23, 0x32b: 0x24, 0x32c: 0x8f, 0x32d: 0x90, 0x32e: 0x91,
+ 0x331: 0x92, 0x332: 0x93, 0x333: 0x94, 0x334: 0x95,
+ 0x338: 0x96, 0x339: 0x97, 0x33a: 0x98, 0x33b: 0x99, 0x33e: 0x9a, 0x33f: 0x9b,
+ // Block 0xd, offset 0x340
+ 0x347: 0x9c,
+ 0x34b: 0x9d, 0x34d: 0x9e,
+ 0x368: 0x9f, 0x36b: 0xa0,
+ 0x374: 0xa1,
+ 0x37d: 0xa2,
+ // Block 0xe, offset 0x380
+ 0x381: 0xa3, 0x382: 0xa4, 0x384: 0xa5, 0x385: 0x82, 0x387: 0xa6,
+ 0x388: 0xa7, 0x38b: 0xa8, 0x38c: 0xa9, 0x38d: 0xaa,
+ 0x391: 0xab, 0x392: 0xac, 0x393: 0xad, 0x396: 0xae, 0x397: 0xaf,
+ 0x398: 0x73, 0x39a: 0xb0, 0x39c: 0xb1,
+ 0x3a0: 0xb2,
+ 0x3a8: 0xb3, 0x3a9: 0xb4, 0x3aa: 0xb5,
+ 0x3b0: 0x73, 0x3b5: 0xb6, 0x3b6: 0xb7,
+ // Block 0xf, offset 0x3c0
+ 0x3eb: 0xb8, 0x3ec: 0xb9,
+ // Block 0x10, offset 0x400
+ 0x432: 0xba,
+ // Block 0x11, offset 0x440
+ 0x445: 0xbb, 0x446: 0xbc, 0x447: 0xbd,
+ 0x449: 0xbe,
+ // Block 0x12, offset 0x480
+ 0x480: 0xbf,
+ 0x4a3: 0xc0, 0x4a5: 0xc1,
+ // Block 0x13, offset 0x4c0
+ 0x4c8: 0xc2,
+ // Block 0x14, offset 0x500
+ 0x520: 0x25, 0x521: 0x26, 0x522: 0x27, 0x523: 0x28, 0x524: 0x29, 0x525: 0x2a, 0x526: 0x2b, 0x527: 0x2c,
+ 0x528: 0x2d,
+ // Block 0x15, offset 0x540
+ 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d,
+ 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11,
+ 0x56f: 0x12,
+}
+
+// nfcSparseOffset: 149 entries, 298 bytes
+var nfcSparseOffset = []uint16{0x0, 0x5, 0x9, 0xb, 0xd, 0x18, 0x28, 0x2a, 0x2f, 0x3a, 0x49, 0x56, 0x5e, 0x63, 0x68, 0x6a, 0x72, 0x79, 0x7c, 0x84, 0x88, 0x8c, 0x8e, 0x90, 0x99, 0x9d, 0xa4, 0xa9, 0xac, 0xb6, 0xb9, 0xc0, 0xc8, 0xcb, 0xcd, 0xcf, 0xd1, 0xd6, 0xe7, 0xf3, 0xf5, 0xfb, 0xfd, 0xff, 0x101, 0x103, 0x105, 0x107, 0x10a, 0x10d, 0x10f, 0x112, 0x115, 0x119, 0x11e, 0x127, 0x129, 0x12c, 0x12e, 0x139, 0x13d, 0x14b, 0x14e, 0x154, 0x15a, 0x165, 0x169, 0x16b, 0x16d, 0x16f, 0x171, 0x173, 0x179, 0x17d, 0x17f, 0x181, 0x189, 0x18d, 0x190, 0x192, 0x194, 0x196, 0x199, 0x19b, 0x19d, 0x19f, 0x1a1, 0x1a7, 0x1aa, 0x1ac, 0x1b3, 0x1b9, 0x1bf, 0x1c7, 0x1cd, 0x1d3, 0x1d9, 0x1dd, 0x1eb, 0x1f4, 0x1f7, 0x1fa, 0x1fc, 0x1ff, 0x201, 0x205, 0x20a, 0x20c, 0x20e, 0x213, 0x219, 0x21b, 0x21d, 0x21f, 0x225, 0x228, 0x22a, 0x230, 0x233, 0x23b, 0x242, 0x245, 0x248, 0x24a, 0x24d, 0x255, 0x259, 0x260, 0x263, 0x269, 0x26b, 0x26e, 0x270, 0x273, 0x275, 0x277, 0x279, 0x27c, 0x27e, 0x280, 0x282, 0x284, 0x291, 0x29b, 0x29d, 0x29f, 0x2a5, 0x2a7, 0x2aa}
+
+// nfcSparseValues: 684 entries, 2736 bytes
+var nfcSparseValues = [684]valueRange{
+ // Block 0x0, offset 0x0
+ {value: 0x0000, lo: 0x04},
+ {value: 0xa100, lo: 0xa8, hi: 0xa8},
+ {value: 0x8100, lo: 0xaf, hi: 0xaf},
+ {value: 0x8100, lo: 0xb4, hi: 0xb4},
+ {value: 0x8100, lo: 0xb8, hi: 0xb8},
+ // Block 0x1, offset 0x5
+ {value: 0x0091, lo: 0x03},
+ {value: 0x46e2, lo: 0xa0, hi: 0xa1},
+ {value: 0x4714, lo: 0xaf, hi: 0xb0},
+ {value: 0xa000, lo: 0xb7, hi: 0xb7},
+ // Block 0x2, offset 0x9
+ {value: 0x0000, lo: 0x01},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ // Block 0x3, offset 0xb
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8100, lo: 0x98, hi: 0x9d},
+ // Block 0x4, offset 0xd
+ {value: 0x0006, lo: 0x0a},
+ {value: 0xa000, lo: 0x81, hi: 0x81},
+ {value: 0xa000, lo: 0x85, hi: 0x85},
+ {value: 0xa000, lo: 0x89, hi: 0x89},
+ {value: 0x4840, lo: 0x8a, hi: 0x8a},
+ {value: 0x485e, lo: 0x8b, hi: 0x8b},
+ {value: 0x36c7, lo: 0x8c, hi: 0x8c},
+ {value: 0x36df, lo: 0x8d, hi: 0x8d},
+ {value: 0x4876, lo: 0x8e, hi: 0x8e},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ {value: 0x36fd, lo: 0x93, hi: 0x94},
+ // Block 0x5, offset 0x18
+ {value: 0x0000, lo: 0x0f},
+ {value: 0xa000, lo: 0x83, hi: 0x83},
+ {value: 0xa000, lo: 0x87, hi: 0x87},
+ {value: 0xa000, lo: 0x8b, hi: 0x8b},
+ {value: 0xa000, lo: 0x8d, hi: 0x8d},
+ {value: 0x37a5, lo: 0x90, hi: 0x90},
+ {value: 0x37b1, lo: 0x91, hi: 0x91},
+ {value: 0x379f, lo: 0x93, hi: 0x93},
+ {value: 0xa000, lo: 0x96, hi: 0x96},
+ {value: 0x3817, lo: 0x97, hi: 0x97},
+ {value: 0x37e1, lo: 0x9c, hi: 0x9c},
+ {value: 0x37c9, lo: 0x9d, hi: 0x9d},
+ {value: 0x37f3, lo: 0x9e, hi: 0x9e},
+ {value: 0xa000, lo: 0xb4, hi: 0xb5},
+ {value: 0x381d, lo: 0xb6, hi: 0xb6},
+ {value: 0x3823, lo: 0xb7, hi: 0xb7},
+ // Block 0x6, offset 0x28
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0x83, hi: 0x87},
+ // Block 0x7, offset 0x2a
+ {value: 0x0001, lo: 0x04},
+ {value: 0x8113, lo: 0x81, hi: 0x82},
+ {value: 0x8132, lo: 0x84, hi: 0x84},
+ {value: 0x812d, lo: 0x85, hi: 0x85},
+ {value: 0x810d, lo: 0x87, hi: 0x87},
+ // Block 0x8, offset 0x2f
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x8132, lo: 0x90, hi: 0x97},
+ {value: 0x8119, lo: 0x98, hi: 0x98},
+ {value: 0x811a, lo: 0x99, hi: 0x99},
+ {value: 0x811b, lo: 0x9a, hi: 0x9a},
+ {value: 0x3841, lo: 0xa2, hi: 0xa2},
+ {value: 0x3847, lo: 0xa3, hi: 0xa3},
+ {value: 0x3853, lo: 0xa4, hi: 0xa4},
+ {value: 0x384d, lo: 0xa5, hi: 0xa5},
+ {value: 0x3859, lo: 0xa6, hi: 0xa6},
+ {value: 0xa000, lo: 0xa7, hi: 0xa7},
+ // Block 0x9, offset 0x3a
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x386b, lo: 0x80, hi: 0x80},
+ {value: 0xa000, lo: 0x81, hi: 0x81},
+ {value: 0x385f, lo: 0x82, hi: 0x82},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ {value: 0x3865, lo: 0x93, hi: 0x93},
+ {value: 0xa000, lo: 0x95, hi: 0x95},
+ {value: 0x8132, lo: 0x96, hi: 0x9c},
+ {value: 0x8132, lo: 0x9f, hi: 0xa2},
+ {value: 0x812d, lo: 0xa3, hi: 0xa3},
+ {value: 0x8132, lo: 0xa4, hi: 0xa4},
+ {value: 0x8132, lo: 0xa7, hi: 0xa8},
+ {value: 0x812d, lo: 0xaa, hi: 0xaa},
+ {value: 0x8132, lo: 0xab, hi: 0xac},
+ {value: 0x812d, lo: 0xad, hi: 0xad},
+ // Block 0xa, offset 0x49
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x811f, lo: 0x91, hi: 0x91},
+ {value: 0x8132, lo: 0xb0, hi: 0xb0},
+ {value: 0x812d, lo: 0xb1, hi: 0xb1},
+ {value: 0x8132, lo: 0xb2, hi: 0xb3},
+ {value: 0x812d, lo: 0xb4, hi: 0xb4},
+ {value: 0x8132, lo: 0xb5, hi: 0xb6},
+ {value: 0x812d, lo: 0xb7, hi: 0xb9},
+ {value: 0x8132, lo: 0xba, hi: 0xba},
+ {value: 0x812d, lo: 0xbb, hi: 0xbc},
+ {value: 0x8132, lo: 0xbd, hi: 0xbd},
+ {value: 0x812d, lo: 0xbe, hi: 0xbe},
+ {value: 0x8132, lo: 0xbf, hi: 0xbf},
+ // Block 0xb, offset 0x56
+ {value: 0x0005, lo: 0x07},
+ {value: 0x8132, lo: 0x80, hi: 0x80},
+ {value: 0x8132, lo: 0x81, hi: 0x81},
+ {value: 0x812d, lo: 0x82, hi: 0x83},
+ {value: 0x812d, lo: 0x84, hi: 0x85},
+ {value: 0x812d, lo: 0x86, hi: 0x87},
+ {value: 0x812d, lo: 0x88, hi: 0x89},
+ {value: 0x8132, lo: 0x8a, hi: 0x8a},
+ // Block 0xc, offset 0x5e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x8132, lo: 0xab, hi: 0xb1},
+ {value: 0x812d, lo: 0xb2, hi: 0xb2},
+ {value: 0x8132, lo: 0xb3, hi: 0xb3},
+ {value: 0x812d, lo: 0xbd, hi: 0xbd},
+ // Block 0xd, offset 0x63
+ {value: 0x0000, lo: 0x04},
+ {value: 0x8132, lo: 0x96, hi: 0x99},
+ {value: 0x8132, lo: 0x9b, hi: 0xa3},
+ {value: 0x8132, lo: 0xa5, hi: 0xa7},
+ {value: 0x8132, lo: 0xa9, hi: 0xad},
+ // Block 0xe, offset 0x68
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0x99, hi: 0x9b},
+ // Block 0xf, offset 0x6a
+ {value: 0x0000, lo: 0x07},
+ {value: 0xa000, lo: 0xa8, hi: 0xa8},
+ {value: 0x3ed8, lo: 0xa9, hi: 0xa9},
+ {value: 0xa000, lo: 0xb0, hi: 0xb0},
+ {value: 0x3ee0, lo: 0xb1, hi: 0xb1},
+ {value: 0xa000, lo: 0xb3, hi: 0xb3},
+ {value: 0x3ee8, lo: 0xb4, hi: 0xb4},
+ {value: 0x9902, lo: 0xbc, hi: 0xbc},
+ // Block 0x10, offset 0x72
+ {value: 0x0008, lo: 0x06},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x8132, lo: 0x91, hi: 0x91},
+ {value: 0x812d, lo: 0x92, hi: 0x92},
+ {value: 0x8132, lo: 0x93, hi: 0x93},
+ {value: 0x8132, lo: 0x94, hi: 0x94},
+ {value: 0x451c, lo: 0x98, hi: 0x9f},
+ // Block 0x11, offset 0x79
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8102, lo: 0xbc, hi: 0xbc},
+ {value: 0x9900, lo: 0xbe, hi: 0xbe},
+ // Block 0x12, offset 0x7c
+ {value: 0x0008, lo: 0x07},
+ {value: 0xa000, lo: 0x87, hi: 0x87},
+ {value: 0x2c9e, lo: 0x8b, hi: 0x8c},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x97, hi: 0x97},
+ {value: 0x455c, lo: 0x9c, hi: 0x9d},
+ {value: 0x456c, lo: 0x9f, hi: 0x9f},
+ {value: 0x8132, lo: 0xbe, hi: 0xbe},
+ // Block 0x13, offset 0x84
+ {value: 0x0000, lo: 0x03},
+ {value: 0x4594, lo: 0xb3, hi: 0xb3},
+ {value: 0x459c, lo: 0xb6, hi: 0xb6},
+ {value: 0x8102, lo: 0xbc, hi: 0xbc},
+ // Block 0x14, offset 0x88
+ {value: 0x0008, lo: 0x03},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x4574, lo: 0x99, hi: 0x9b},
+ {value: 0x458c, lo: 0x9e, hi: 0x9e},
+ // Block 0x15, offset 0x8c
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8102, lo: 0xbc, hi: 0xbc},
+ // Block 0x16, offset 0x8e
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ // Block 0x17, offset 0x90
+ {value: 0x0000, lo: 0x08},
+ {value: 0xa000, lo: 0x87, hi: 0x87},
+ {value: 0x2cb6, lo: 0x88, hi: 0x88},
+ {value: 0x2cae, lo: 0x8b, hi: 0x8b},
+ {value: 0x2cbe, lo: 0x8c, hi: 0x8c},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x96, hi: 0x97},
+ {value: 0x45a4, lo: 0x9c, hi: 0x9c},
+ {value: 0x45ac, lo: 0x9d, hi: 0x9d},
+ // Block 0x18, offset 0x99
+ {value: 0x0000, lo: 0x03},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ {value: 0x2cc6, lo: 0x94, hi: 0x94},
+ {value: 0x9900, lo: 0xbe, hi: 0xbe},
+ // Block 0x19, offset 0x9d
+ {value: 0x0000, lo: 0x06},
+ {value: 0xa000, lo: 0x86, hi: 0x87},
+ {value: 0x2cce, lo: 0x8a, hi: 0x8a},
+ {value: 0x2cde, lo: 0x8b, hi: 0x8b},
+ {value: 0x2cd6, lo: 0x8c, hi: 0x8c},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x97, hi: 0x97},
+ // Block 0x1a, offset 0xa4
+ {value: 0x1801, lo: 0x04},
+ {value: 0xa000, lo: 0x86, hi: 0x86},
+ {value: 0x3ef0, lo: 0x88, hi: 0x88},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x8120, lo: 0x95, hi: 0x96},
+ // Block 0x1b, offset 0xa9
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8102, lo: 0xbc, hi: 0xbc},
+ {value: 0xa000, lo: 0xbf, hi: 0xbf},
+ // Block 0x1c, offset 0xac
+ {value: 0x0000, lo: 0x09},
+ {value: 0x2ce6, lo: 0x80, hi: 0x80},
+ {value: 0x9900, lo: 0x82, hi: 0x82},
+ {value: 0xa000, lo: 0x86, hi: 0x86},
+ {value: 0x2cee, lo: 0x87, hi: 0x87},
+ {value: 0x2cf6, lo: 0x88, hi: 0x88},
+ {value: 0x2f50, lo: 0x8a, hi: 0x8a},
+ {value: 0x2dd8, lo: 0x8b, hi: 0x8b},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x95, hi: 0x96},
+ // Block 0x1d, offset 0xb6
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0xbb, hi: 0xbc},
+ {value: 0x9900, lo: 0xbe, hi: 0xbe},
+ // Block 0x1e, offset 0xb9
+ {value: 0x0000, lo: 0x06},
+ {value: 0xa000, lo: 0x86, hi: 0x87},
+ {value: 0x2cfe, lo: 0x8a, hi: 0x8a},
+ {value: 0x2d0e, lo: 0x8b, hi: 0x8b},
+ {value: 0x2d06, lo: 0x8c, hi: 0x8c},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x97, hi: 0x97},
+ // Block 0x1f, offset 0xc0
+ {value: 0x6bea, lo: 0x07},
+ {value: 0x9904, lo: 0x8a, hi: 0x8a},
+ {value: 0x9900, lo: 0x8f, hi: 0x8f},
+ {value: 0xa000, lo: 0x99, hi: 0x99},
+ {value: 0x3ef8, lo: 0x9a, hi: 0x9a},
+ {value: 0x2f58, lo: 0x9c, hi: 0x9c},
+ {value: 0x2de3, lo: 0x9d, hi: 0x9d},
+ {value: 0x2d16, lo: 0x9e, hi: 0x9f},
+ // Block 0x20, offset 0xc8
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8122, lo: 0xb8, hi: 0xb9},
+ {value: 0x8104, lo: 0xba, hi: 0xba},
+ // Block 0x21, offset 0xcb
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8123, lo: 0x88, hi: 0x8b},
+ // Block 0x22, offset 0xcd
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8124, lo: 0xb8, hi: 0xb9},
+ // Block 0x23, offset 0xcf
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8125, lo: 0x88, hi: 0x8b},
+ // Block 0x24, offset 0xd1
+ {value: 0x0000, lo: 0x04},
+ {value: 0x812d, lo: 0x98, hi: 0x99},
+ {value: 0x812d, lo: 0xb5, hi: 0xb5},
+ {value: 0x812d, lo: 0xb7, hi: 0xb7},
+ {value: 0x812b, lo: 0xb9, hi: 0xb9},
+ // Block 0x25, offset 0xd6
+ {value: 0x0000, lo: 0x10},
+ {value: 0x2644, lo: 0x83, hi: 0x83},
+ {value: 0x264b, lo: 0x8d, hi: 0x8d},
+ {value: 0x2652, lo: 0x92, hi: 0x92},
+ {value: 0x2659, lo: 0x97, hi: 0x97},
+ {value: 0x2660, lo: 0x9c, hi: 0x9c},
+ {value: 0x263d, lo: 0xa9, hi: 0xa9},
+ {value: 0x8126, lo: 0xb1, hi: 0xb1},
+ {value: 0x8127, lo: 0xb2, hi: 0xb2},
+ {value: 0x4a84, lo: 0xb3, hi: 0xb3},
+ {value: 0x8128, lo: 0xb4, hi: 0xb4},
+ {value: 0x4a8d, lo: 0xb5, hi: 0xb5},
+ {value: 0x45b4, lo: 0xb6, hi: 0xb6},
+ {value: 0x8200, lo: 0xb7, hi: 0xb7},
+ {value: 0x45bc, lo: 0xb8, hi: 0xb8},
+ {value: 0x8200, lo: 0xb9, hi: 0xb9},
+ {value: 0x8127, lo: 0xba, hi: 0xbd},
+ // Block 0x26, offset 0xe7
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x8127, lo: 0x80, hi: 0x80},
+ {value: 0x4a96, lo: 0x81, hi: 0x81},
+ {value: 0x8132, lo: 0x82, hi: 0x83},
+ {value: 0x8104, lo: 0x84, hi: 0x84},
+ {value: 0x8132, lo: 0x86, hi: 0x87},
+ {value: 0x266e, lo: 0x93, hi: 0x93},
+ {value: 0x2675, lo: 0x9d, hi: 0x9d},
+ {value: 0x267c, lo: 0xa2, hi: 0xa2},
+ {value: 0x2683, lo: 0xa7, hi: 0xa7},
+ {value: 0x268a, lo: 0xac, hi: 0xac},
+ {value: 0x2667, lo: 0xb9, hi: 0xb9},
+ // Block 0x27, offset 0xf3
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0x86, hi: 0x86},
+ // Block 0x28, offset 0xf5
+ {value: 0x0000, lo: 0x05},
+ {value: 0xa000, lo: 0xa5, hi: 0xa5},
+ {value: 0x2d1e, lo: 0xa6, hi: 0xa6},
+ {value: 0x9900, lo: 0xae, hi: 0xae},
+ {value: 0x8102, lo: 0xb7, hi: 0xb7},
+ {value: 0x8104, lo: 0xb9, hi: 0xba},
+ // Block 0x29, offset 0xfb
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0x8d, hi: 0x8d},
+ // Block 0x2a, offset 0xfd
+ {value: 0x0000, lo: 0x01},
+ {value: 0xa000, lo: 0x80, hi: 0x92},
+ // Block 0x2b, offset 0xff
+ {value: 0x0000, lo: 0x01},
+ {value: 0xb900, lo: 0xa1, hi: 0xb5},
+ // Block 0x2c, offset 0x101
+ {value: 0x0000, lo: 0x01},
+ {value: 0x9900, lo: 0xa8, hi: 0xbf},
+ // Block 0x2d, offset 0x103
+ {value: 0x0000, lo: 0x01},
+ {value: 0x9900, lo: 0x80, hi: 0x82},
+ // Block 0x2e, offset 0x105
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0x9d, hi: 0x9f},
+ // Block 0x2f, offset 0x107
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x94, hi: 0x94},
+ {value: 0x8104, lo: 0xb4, hi: 0xb4},
+ // Block 0x30, offset 0x10a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x92, hi: 0x92},
+ {value: 0x8132, lo: 0x9d, hi: 0x9d},
+ // Block 0x31, offset 0x10d
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8131, lo: 0xa9, hi: 0xa9},
+ // Block 0x32, offset 0x10f
+ {value: 0x0004, lo: 0x02},
+ {value: 0x812e, lo: 0xb9, hi: 0xba},
+ {value: 0x812d, lo: 0xbb, hi: 0xbb},
+ // Block 0x33, offset 0x112
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8132, lo: 0x97, hi: 0x97},
+ {value: 0x812d, lo: 0x98, hi: 0x98},
+ // Block 0x34, offset 0x115
+ {value: 0x0000, lo: 0x03},
+ {value: 0x8104, lo: 0xa0, hi: 0xa0},
+ {value: 0x8132, lo: 0xb5, hi: 0xbc},
+ {value: 0x812d, lo: 0xbf, hi: 0xbf},
+ // Block 0x35, offset 0x119
+ {value: 0x0000, lo: 0x04},
+ {value: 0x8132, lo: 0xb0, hi: 0xb4},
+ {value: 0x812d, lo: 0xb5, hi: 0xba},
+ {value: 0x8132, lo: 0xbb, hi: 0xbc},
+ {value: 0x812d, lo: 0xbd, hi: 0xbd},
+ // Block 0x36, offset 0x11e
+ {value: 0x0000, lo: 0x08},
+ {value: 0x2d66, lo: 0x80, hi: 0x80},
+ {value: 0x2d6e, lo: 0x81, hi: 0x81},
+ {value: 0xa000, lo: 0x82, hi: 0x82},
+ {value: 0x2d76, lo: 0x83, hi: 0x83},
+ {value: 0x8104, lo: 0x84, hi: 0x84},
+ {value: 0x8132, lo: 0xab, hi: 0xab},
+ {value: 0x812d, lo: 0xac, hi: 0xac},
+ {value: 0x8132, lo: 0xad, hi: 0xb3},
+ // Block 0x37, offset 0x127
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xaa, hi: 0xab},
+ // Block 0x38, offset 0x129
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8102, lo: 0xa6, hi: 0xa6},
+ {value: 0x8104, lo: 0xb2, hi: 0xb3},
+ // Block 0x39, offset 0x12c
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8102, lo: 0xb7, hi: 0xb7},
+ // Block 0x3a, offset 0x12e
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x8132, lo: 0x90, hi: 0x92},
+ {value: 0x8101, lo: 0x94, hi: 0x94},
+ {value: 0x812d, lo: 0x95, hi: 0x99},
+ {value: 0x8132, lo: 0x9a, hi: 0x9b},
+ {value: 0x812d, lo: 0x9c, hi: 0x9f},
+ {value: 0x8132, lo: 0xa0, hi: 0xa0},
+ {value: 0x8101, lo: 0xa2, hi: 0xa8},
+ {value: 0x812d, lo: 0xad, hi: 0xad},
+ {value: 0x8132, lo: 0xb4, hi: 0xb4},
+ {value: 0x8132, lo: 0xb8, hi: 0xb9},
+ // Block 0x3b, offset 0x139
+ {value: 0x0004, lo: 0x03},
+ {value: 0x0433, lo: 0x80, hi: 0x81},
+ {value: 0x8100, lo: 0x97, hi: 0x97},
+ {value: 0x8100, lo: 0xbe, hi: 0xbe},
+ // Block 0x3c, offset 0x13d
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x8132, lo: 0x90, hi: 0x91},
+ {value: 0x8101, lo: 0x92, hi: 0x93},
+ {value: 0x8132, lo: 0x94, hi: 0x97},
+ {value: 0x8101, lo: 0x98, hi: 0x9a},
+ {value: 0x8132, lo: 0x9b, hi: 0x9c},
+ {value: 0x8132, lo: 0xa1, hi: 0xa1},
+ {value: 0x8101, lo: 0xa5, hi: 0xa6},
+ {value: 0x8132, lo: 0xa7, hi: 0xa7},
+ {value: 0x812d, lo: 0xa8, hi: 0xa8},
+ {value: 0x8132, lo: 0xa9, hi: 0xa9},
+ {value: 0x8101, lo: 0xaa, hi: 0xab},
+ {value: 0x812d, lo: 0xac, hi: 0xaf},
+ {value: 0x8132, lo: 0xb0, hi: 0xb0},
+ // Block 0x3d, offset 0x14b
+ {value: 0x427b, lo: 0x02},
+ {value: 0x01b8, lo: 0xa6, hi: 0xa6},
+ {value: 0x0057, lo: 0xaa, hi: 0xab},
+ // Block 0x3e, offset 0x14e
+ {value: 0x0007, lo: 0x05},
+ {value: 0xa000, lo: 0x90, hi: 0x90},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ {value: 0xa000, lo: 0x94, hi: 0x94},
+ {value: 0x3bb9, lo: 0x9a, hi: 0x9b},
+ {value: 0x3bc7, lo: 0xae, hi: 0xae},
+ // Block 0x3f, offset 0x154
+ {value: 0x000e, lo: 0x05},
+ {value: 0x3bce, lo: 0x8d, hi: 0x8e},
+ {value: 0x3bd5, lo: 0x8f, hi: 0x8f},
+ {value: 0xa000, lo: 0x90, hi: 0x90},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ {value: 0xa000, lo: 0x94, hi: 0x94},
+ // Block 0x40, offset 0x15a
+ {value: 0x6408, lo: 0x0a},
+ {value: 0xa000, lo: 0x83, hi: 0x83},
+ {value: 0x3be3, lo: 0x84, hi: 0x84},
+ {value: 0xa000, lo: 0x88, hi: 0x88},
+ {value: 0x3bea, lo: 0x89, hi: 0x89},
+ {value: 0xa000, lo: 0x8b, hi: 0x8b},
+ {value: 0x3bf1, lo: 0x8c, hi: 0x8c},
+ {value: 0xa000, lo: 0xa3, hi: 0xa3},
+ {value: 0x3bf8, lo: 0xa4, hi: 0xa5},
+ {value: 0x3bff, lo: 0xa6, hi: 0xa6},
+ {value: 0xa000, lo: 0xbc, hi: 0xbc},
+ // Block 0x41, offset 0x165
+ {value: 0x0007, lo: 0x03},
+ {value: 0x3c68, lo: 0xa0, hi: 0xa1},
+ {value: 0x3c92, lo: 0xa2, hi: 0xa3},
+ {value: 0x3cbc, lo: 0xaa, hi: 0xad},
+ // Block 0x42, offset 0x169
+ {value: 0x0004, lo: 0x01},
+ {value: 0x048b, lo: 0xa9, hi: 0xaa},
+ // Block 0x43, offset 0x16b
+ {value: 0x0000, lo: 0x01},
+ {value: 0x44dd, lo: 0x9c, hi: 0x9c},
+ // Block 0x44, offset 0x16d
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xaf, hi: 0xb1},
+ // Block 0x45, offset 0x16f
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xbf, hi: 0xbf},
+ // Block 0x46, offset 0x171
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xa0, hi: 0xbf},
+ // Block 0x47, offset 0x173
+ {value: 0x0000, lo: 0x05},
+ {value: 0x812c, lo: 0xaa, hi: 0xaa},
+ {value: 0x8131, lo: 0xab, hi: 0xab},
+ {value: 0x8133, lo: 0xac, hi: 0xac},
+ {value: 0x812e, lo: 0xad, hi: 0xad},
+ {value: 0x812f, lo: 0xae, hi: 0xaf},
+ // Block 0x48, offset 0x179
+ {value: 0x0000, lo: 0x03},
+ {value: 0x4a9f, lo: 0xb3, hi: 0xb3},
+ {value: 0x4a9f, lo: 0xb5, hi: 0xb6},
+ {value: 0x4a9f, lo: 0xba, hi: 0xbf},
+ // Block 0x49, offset 0x17d
+ {value: 0x0000, lo: 0x01},
+ {value: 0x4a9f, lo: 0x8f, hi: 0xa3},
+ // Block 0x4a, offset 0x17f
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8100, lo: 0xae, hi: 0xbe},
+ // Block 0x4b, offset 0x181
+ {value: 0x0000, lo: 0x07},
+ {value: 0x8100, lo: 0x84, hi: 0x84},
+ {value: 0x8100, lo: 0x87, hi: 0x87},
+ {value: 0x8100, lo: 0x90, hi: 0x90},
+ {value: 0x8100, lo: 0x9e, hi: 0x9e},
+ {value: 0x8100, lo: 0xa1, hi: 0xa1},
+ {value: 0x8100, lo: 0xb2, hi: 0xb2},
+ {value: 0x8100, lo: 0xbb, hi: 0xbb},
+ // Block 0x4c, offset 0x189
+ {value: 0x0000, lo: 0x03},
+ {value: 0x8100, lo: 0x80, hi: 0x80},
+ {value: 0x8100, lo: 0x8b, hi: 0x8b},
+ {value: 0x8100, lo: 0x8e, hi: 0x8e},
+ // Block 0x4d, offset 0x18d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8132, lo: 0xaf, hi: 0xaf},
+ {value: 0x8132, lo: 0xb4, hi: 0xbd},
+ // Block 0x4e, offset 0x190
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0x9e, hi: 0x9f},
+ // Block 0x4f, offset 0x192
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xb0, hi: 0xb1},
+ // Block 0x50, offset 0x194
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x86, hi: 0x86},
+ // Block 0x51, offset 0x196
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x84, hi: 0x84},
+ {value: 0x8132, lo: 0xa0, hi: 0xb1},
+ // Block 0x52, offset 0x199
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0xab, hi: 0xad},
+ // Block 0x53, offset 0x19b
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x93, hi: 0x93},
+ // Block 0x54, offset 0x19d
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8102, lo: 0xb3, hi: 0xb3},
+ // Block 0x55, offset 0x19f
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x80, hi: 0x80},
+ // Block 0x56, offset 0x1a1
+ {value: 0x0000, lo: 0x05},
+ {value: 0x8132, lo: 0xb0, hi: 0xb0},
+ {value: 0x8132, lo: 0xb2, hi: 0xb3},
+ {value: 0x812d, lo: 0xb4, hi: 0xb4},
+ {value: 0x8132, lo: 0xb7, hi: 0xb8},
+ {value: 0x8132, lo: 0xbe, hi: 0xbf},
+ // Block 0x57, offset 0x1a7
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8132, lo: 0x81, hi: 0x81},
+ {value: 0x8104, lo: 0xb6, hi: 0xb6},
+ // Block 0x58, offset 0x1aa
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xad, hi: 0xad},
+ // Block 0x59, offset 0x1ac
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe500, lo: 0x80, hi: 0x80},
+ {value: 0xc600, lo: 0x81, hi: 0x9b},
+ {value: 0xe500, lo: 0x9c, hi: 0x9c},
+ {value: 0xc600, lo: 0x9d, hi: 0xb7},
+ {value: 0xe500, lo: 0xb8, hi: 0xb8},
+ {value: 0xc600, lo: 0xb9, hi: 0xbf},
+ // Block 0x5a, offset 0x1b3
+ {value: 0x0000, lo: 0x05},
+ {value: 0xc600, lo: 0x80, hi: 0x93},
+ {value: 0xe500, lo: 0x94, hi: 0x94},
+ {value: 0xc600, lo: 0x95, hi: 0xaf},
+ {value: 0xe500, lo: 0xb0, hi: 0xb0},
+ {value: 0xc600, lo: 0xb1, hi: 0xbf},
+ // Block 0x5b, offset 0x1b9
+ {value: 0x0000, lo: 0x05},
+ {value: 0xc600, lo: 0x80, hi: 0x8b},
+ {value: 0xe500, lo: 0x8c, hi: 0x8c},
+ {value: 0xc600, lo: 0x8d, hi: 0xa7},
+ {value: 0xe500, lo: 0xa8, hi: 0xa8},
+ {value: 0xc600, lo: 0xa9, hi: 0xbf},
+ // Block 0x5c, offset 0x1bf
+ {value: 0x0000, lo: 0x07},
+ {value: 0xc600, lo: 0x80, hi: 0x83},
+ {value: 0xe500, lo: 0x84, hi: 0x84},
+ {value: 0xc600, lo: 0x85, hi: 0x9f},
+ {value: 0xe500, lo: 0xa0, hi: 0xa0},
+ {value: 0xc600, lo: 0xa1, hi: 0xbb},
+ {value: 0xe500, lo: 0xbc, hi: 0xbc},
+ {value: 0xc600, lo: 0xbd, hi: 0xbf},
+ // Block 0x5d, offset 0x1c7
+ {value: 0x0000, lo: 0x05},
+ {value: 0xc600, lo: 0x80, hi: 0x97},
+ {value: 0xe500, lo: 0x98, hi: 0x98},
+ {value: 0xc600, lo: 0x99, hi: 0xb3},
+ {value: 0xe500, lo: 0xb4, hi: 0xb4},
+ {value: 0xc600, lo: 0xb5, hi: 0xbf},
+ // Block 0x5e, offset 0x1cd
+ {value: 0x0000, lo: 0x05},
+ {value: 0xc600, lo: 0x80, hi: 0x8f},
+ {value: 0xe500, lo: 0x90, hi: 0x90},
+ {value: 0xc600, lo: 0x91, hi: 0xab},
+ {value: 0xe500, lo: 0xac, hi: 0xac},
+ {value: 0xc600, lo: 0xad, hi: 0xbf},
+ // Block 0x5f, offset 0x1d3
+ {value: 0x0000, lo: 0x05},
+ {value: 0xc600, lo: 0x80, hi: 0x87},
+ {value: 0xe500, lo: 0x88, hi: 0x88},
+ {value: 0xc600, lo: 0x89, hi: 0xa3},
+ {value: 0xe500, lo: 0xa4, hi: 0xa4},
+ {value: 0xc600, lo: 0xa5, hi: 0xbf},
+ // Block 0x60, offset 0x1d9
+ {value: 0x0000, lo: 0x03},
+ {value: 0xc600, lo: 0x80, hi: 0x87},
+ {value: 0xe500, lo: 0x88, hi: 0x88},
+ {value: 0xc600, lo: 0x89, hi: 0xa3},
+ // Block 0x61, offset 0x1dd
+ {value: 0x0006, lo: 0x0d},
+ {value: 0x4390, lo: 0x9d, hi: 0x9d},
+ {value: 0x8115, lo: 0x9e, hi: 0x9e},
+ {value: 0x4402, lo: 0x9f, hi: 0x9f},
+ {value: 0x43f0, lo: 0xaa, hi: 0xab},
+ {value: 0x44f4, lo: 0xac, hi: 0xac},
+ {value: 0x44fc, lo: 0xad, hi: 0xad},
+ {value: 0x4348, lo: 0xae, hi: 0xb1},
+ {value: 0x4366, lo: 0xb2, hi: 0xb4},
+ {value: 0x437e, lo: 0xb5, hi: 0xb6},
+ {value: 0x438a, lo: 0xb8, hi: 0xb8},
+ {value: 0x4396, lo: 0xb9, hi: 0xbb},
+ {value: 0x43ae, lo: 0xbc, hi: 0xbc},
+ {value: 0x43b4, lo: 0xbe, hi: 0xbe},
+ // Block 0x62, offset 0x1eb
+ {value: 0x0006, lo: 0x08},
+ {value: 0x43ba, lo: 0x80, hi: 0x81},
+ {value: 0x43c6, lo: 0x83, hi: 0x84},
+ {value: 0x43d8, lo: 0x86, hi: 0x89},
+ {value: 0x43fc, lo: 0x8a, hi: 0x8a},
+ {value: 0x4378, lo: 0x8b, hi: 0x8b},
+ {value: 0x4360, lo: 0x8c, hi: 0x8c},
+ {value: 0x43a8, lo: 0x8d, hi: 0x8d},
+ {value: 0x43d2, lo: 0x8e, hi: 0x8e},
+ // Block 0x63, offset 0x1f4
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8100, lo: 0xa4, hi: 0xa5},
+ {value: 0x8100, lo: 0xb0, hi: 0xb1},
+ // Block 0x64, offset 0x1f7
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8100, lo: 0x9b, hi: 0x9d},
+ {value: 0x8200, lo: 0x9e, hi: 0xa3},
+ // Block 0x65, offset 0x1fa
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8100, lo: 0x90, hi: 0x90},
+ // Block 0x66, offset 0x1fc
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8100, lo: 0x99, hi: 0x99},
+ {value: 0x8200, lo: 0xb2, hi: 0xb4},
+ // Block 0x67, offset 0x1ff
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8100, lo: 0xbc, hi: 0xbd},
+ // Block 0x68, offset 0x201
+ {value: 0x0000, lo: 0x03},
+ {value: 0x8132, lo: 0xa0, hi: 0xa6},
+ {value: 0x812d, lo: 0xa7, hi: 0xad},
+ {value: 0x8132, lo: 0xae, hi: 0xaf},
+ // Block 0x69, offset 0x205
+ {value: 0x0000, lo: 0x04},
+ {value: 0x8100, lo: 0x89, hi: 0x8c},
+ {value: 0x8100, lo: 0xb0, hi: 0xb2},
+ {value: 0x8100, lo: 0xb4, hi: 0xb4},
+ {value: 0x8100, lo: 0xb6, hi: 0xbf},
+ // Block 0x6a, offset 0x20a
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8100, lo: 0x81, hi: 0x8c},
+ // Block 0x6b, offset 0x20c
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8100, lo: 0xb5, hi: 0xba},
+ // Block 0x6c, offset 0x20e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x4a9f, lo: 0x9e, hi: 0x9f},
+ {value: 0x4a9f, lo: 0xa3, hi: 0xa3},
+ {value: 0x4a9f, lo: 0xa5, hi: 0xa6},
+ {value: 0x4a9f, lo: 0xaa, hi: 0xaf},
+ // Block 0x6d, offset 0x213
+ {value: 0x0000, lo: 0x05},
+ {value: 0x4a9f, lo: 0x82, hi: 0x87},
+ {value: 0x4a9f, lo: 0x8a, hi: 0x8f},
+ {value: 0x4a9f, lo: 0x92, hi: 0x97},
+ {value: 0x4a9f, lo: 0x9a, hi: 0x9c},
+ {value: 0x8100, lo: 0xa3, hi: 0xa3},
+ // Block 0x6e, offset 0x219
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0xbd, hi: 0xbd},
+ // Block 0x6f, offset 0x21b
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0xa0, hi: 0xa0},
+ // Block 0x70, offset 0x21d
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xb6, hi: 0xba},
+ // Block 0x71, offset 0x21f
+ {value: 0x002c, lo: 0x05},
+ {value: 0x812d, lo: 0x8d, hi: 0x8d},
+ {value: 0x8132, lo: 0x8f, hi: 0x8f},
+ {value: 0x8132, lo: 0xb8, hi: 0xb8},
+ {value: 0x8101, lo: 0xb9, hi: 0xba},
+ {value: 0x8104, lo: 0xbf, hi: 0xbf},
+ // Block 0x72, offset 0x225
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8132, lo: 0xa5, hi: 0xa5},
+ {value: 0x812d, lo: 0xa6, hi: 0xa6},
+ // Block 0x73, offset 0x228
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xa4, hi: 0xa7},
+ // Block 0x74, offset 0x22a
+ {value: 0x0000, lo: 0x05},
+ {value: 0x812d, lo: 0x86, hi: 0x87},
+ {value: 0x8132, lo: 0x88, hi: 0x8a},
+ {value: 0x812d, lo: 0x8b, hi: 0x8b},
+ {value: 0x8132, lo: 0x8c, hi: 0x8c},
+ {value: 0x812d, lo: 0x8d, hi: 0x90},
+ // Block 0x75, offset 0x230
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x86, hi: 0x86},
+ {value: 0x8104, lo: 0xbf, hi: 0xbf},
+ // Block 0x76, offset 0x233
+ {value: 0x17fe, lo: 0x07},
+ {value: 0xa000, lo: 0x99, hi: 0x99},
+ {value: 0x4238, lo: 0x9a, hi: 0x9a},
+ {value: 0xa000, lo: 0x9b, hi: 0x9b},
+ {value: 0x4242, lo: 0x9c, hi: 0x9c},
+ {value: 0xa000, lo: 0xa5, hi: 0xa5},
+ {value: 0x424c, lo: 0xab, hi: 0xab},
+ {value: 0x8104, lo: 0xb9, hi: 0xba},
+ // Block 0x77, offset 0x23b
+ {value: 0x0000, lo: 0x06},
+ {value: 0x8132, lo: 0x80, hi: 0x82},
+ {value: 0x9900, lo: 0xa7, hi: 0xa7},
+ {value: 0x2d7e, lo: 0xae, hi: 0xae},
+ {value: 0x2d88, lo: 0xaf, hi: 0xaf},
+ {value: 0xa000, lo: 0xb1, hi: 0xb2},
+ {value: 0x8104, lo: 0xb3, hi: 0xb4},
+ // Block 0x78, offset 0x242
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x80, hi: 0x80},
+ {value: 0x8102, lo: 0x8a, hi: 0x8a},
+ // Block 0x79, offset 0x245
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0xb5, hi: 0xb5},
+ {value: 0x8102, lo: 0xb6, hi: 0xb6},
+ // Block 0x7a, offset 0x248
+ {value: 0x0002, lo: 0x01},
+ {value: 0x8102, lo: 0xa9, hi: 0xaa},
+ // Block 0x7b, offset 0x24a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8102, lo: 0xbb, hi: 0xbc},
+ {value: 0x9900, lo: 0xbe, hi: 0xbe},
+ // Block 0x7c, offset 0x24d
+ {value: 0x0000, lo: 0x07},
+ {value: 0xa000, lo: 0x87, hi: 0x87},
+ {value: 0x2d92, lo: 0x8b, hi: 0x8b},
+ {value: 0x2d9c, lo: 0x8c, hi: 0x8c},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x97, hi: 0x97},
+ {value: 0x8132, lo: 0xa6, hi: 0xac},
+ {value: 0x8132, lo: 0xb0, hi: 0xb4},
+ // Block 0x7d, offset 0x255
+ {value: 0x0000, lo: 0x03},
+ {value: 0x8104, lo: 0x82, hi: 0x82},
+ {value: 0x8102, lo: 0x86, hi: 0x86},
+ {value: 0x8132, lo: 0x9e, hi: 0x9e},
+ // Block 0x7e, offset 0x259
+ {value: 0x6b5a, lo: 0x06},
+ {value: 0x9900, lo: 0xb0, hi: 0xb0},
+ {value: 0xa000, lo: 0xb9, hi: 0xb9},
+ {value: 0x9900, lo: 0xba, hi: 0xba},
+ {value: 0x2db0, lo: 0xbb, hi: 0xbb},
+ {value: 0x2da6, lo: 0xbc, hi: 0xbd},
+ {value: 0x2dba, lo: 0xbe, hi: 0xbe},
+ // Block 0x7f, offset 0x260
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x82, hi: 0x82},
+ {value: 0x8102, lo: 0x83, hi: 0x83},
+ // Block 0x80, offset 0x263
+ {value: 0x0000, lo: 0x05},
+ {value: 0x9900, lo: 0xaf, hi: 0xaf},
+ {value: 0xa000, lo: 0xb8, hi: 0xb9},
+ {value: 0x2dc4, lo: 0xba, hi: 0xba},
+ {value: 0x2dce, lo: 0xbb, hi: 0xbb},
+ {value: 0x8104, lo: 0xbf, hi: 0xbf},
+ // Block 0x81, offset 0x269
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8102, lo: 0x80, hi: 0x80},
+ // Block 0x82, offset 0x26b
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0xb6, hi: 0xb6},
+ {value: 0x8102, lo: 0xb7, hi: 0xb7},
+ // Block 0x83, offset 0x26e
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xab, hi: 0xab},
+ // Block 0x84, offset 0x270
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0xb9, hi: 0xb9},
+ {value: 0x8102, lo: 0xba, hi: 0xba},
+ // Block 0x85, offset 0x273
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xb4, hi: 0xb4},
+ // Block 0x86, offset 0x275
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x87, hi: 0x87},
+ // Block 0x87, offset 0x277
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x99, hi: 0x99},
+ // Block 0x88, offset 0x279
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8102, lo: 0x82, hi: 0x82},
+ {value: 0x8104, lo: 0x84, hi: 0x85},
+ // Block 0x89, offset 0x27c
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x97, hi: 0x97},
+ // Block 0x8a, offset 0x27e
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8101, lo: 0xb0, hi: 0xb4},
+ // Block 0x8b, offset 0x280
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xb0, hi: 0xb6},
+ // Block 0x8c, offset 0x282
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8101, lo: 0x9e, hi: 0x9e},
+ // Block 0x8d, offset 0x284
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x45cc, lo: 0x9e, hi: 0x9e},
+ {value: 0x45d6, lo: 0x9f, hi: 0x9f},
+ {value: 0x460a, lo: 0xa0, hi: 0xa0},
+ {value: 0x4618, lo: 0xa1, hi: 0xa1},
+ {value: 0x4626, lo: 0xa2, hi: 0xa2},
+ {value: 0x4634, lo: 0xa3, hi: 0xa3},
+ {value: 0x4642, lo: 0xa4, hi: 0xa4},
+ {value: 0x812b, lo: 0xa5, hi: 0xa6},
+ {value: 0x8101, lo: 0xa7, hi: 0xa9},
+ {value: 0x8130, lo: 0xad, hi: 0xad},
+ {value: 0x812b, lo: 0xae, hi: 0xb2},
+ {value: 0x812d, lo: 0xbb, hi: 0xbf},
+ // Block 0x8e, offset 0x291
+ {value: 0x0000, lo: 0x09},
+ {value: 0x812d, lo: 0x80, hi: 0x82},
+ {value: 0x8132, lo: 0x85, hi: 0x89},
+ {value: 0x812d, lo: 0x8a, hi: 0x8b},
+ {value: 0x8132, lo: 0xaa, hi: 0xad},
+ {value: 0x45e0, lo: 0xbb, hi: 0xbb},
+ {value: 0x45ea, lo: 0xbc, hi: 0xbc},
+ {value: 0x4650, lo: 0xbd, hi: 0xbd},
+ {value: 0x466c, lo: 0xbe, hi: 0xbe},
+ {value: 0x465e, lo: 0xbf, hi: 0xbf},
+ // Block 0x8f, offset 0x29b
+ {value: 0x0000, lo: 0x01},
+ {value: 0x467a, lo: 0x80, hi: 0x80},
+ // Block 0x90, offset 0x29d
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0x82, hi: 0x84},
+ // Block 0x91, offset 0x29f
+ {value: 0x0000, lo: 0x05},
+ {value: 0x8132, lo: 0x80, hi: 0x86},
+ {value: 0x8132, lo: 0x88, hi: 0x98},
+ {value: 0x8132, lo: 0x9b, hi: 0xa1},
+ {value: 0x8132, lo: 0xa3, hi: 0xa4},
+ {value: 0x8132, lo: 0xa6, hi: 0xaa},
+ // Block 0x92, offset 0x2a5
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0x90, hi: 0x96},
+ // Block 0x93, offset 0x2a7
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8132, lo: 0x84, hi: 0x89},
+ {value: 0x8102, lo: 0x8a, hi: 0x8a},
+ // Block 0x94, offset 0x2aa
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8100, lo: 0x93, hi: 0x93},
+}
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *nfkcTrie) lookup(s []byte) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return nfkcValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := nfkcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := nfkcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = nfkcIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := nfkcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = nfkcIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = nfkcIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *nfkcTrie) lookupUnsafe(s []byte) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return nfkcValues[c0]
+ }
+ i := nfkcIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = nfkcIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = nfkcIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *nfkcTrie) lookupString(s string) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return nfkcValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := nfkcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := nfkcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = nfkcIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := nfkcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = nfkcIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = nfkcIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *nfkcTrie) lookupStringUnsafe(s string) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return nfkcValues[c0]
+ }
+ i := nfkcIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = nfkcIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = nfkcIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// nfkcTrie. Total size: 17248 bytes (16.84 KiB). Checksum: 4fb368372b6b1b27.
+type nfkcTrie struct{}
+
+func newNfkcTrie(i int) *nfkcTrie {
+ return &nfkcTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *nfkcTrie) lookupValue(n uint32, b byte) uint16 {
+ switch {
+ case n < 92:
+ return uint16(nfkcValues[n<<6+uint32(b)])
+ default:
+ n -= 92
+ return uint16(nfkcSparse.lookup(n, b))
+ }
+}
+
+// nfkcValues: 94 blocks, 6016 entries, 12032 bytes
+// The third block is the zero block.
+var nfkcValues = [6016]uint16{
+ // Block 0x0, offset 0x0
+ 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000,
+ // Block 0x1, offset 0x40
+ 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000,
+ 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000,
+ 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000,
+ 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000,
+ 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000,
+ 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000,
+ 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000,
+ 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000,
+ 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000,
+ 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc0: 0x2f6f, 0xc1: 0x2f74, 0xc2: 0x4688, 0xc3: 0x2f79, 0xc4: 0x4697, 0xc5: 0x469c,
+ 0xc6: 0xa000, 0xc7: 0x46a6, 0xc8: 0x2fe2, 0xc9: 0x2fe7, 0xca: 0x46ab, 0xcb: 0x2ffb,
+ 0xcc: 0x306e, 0xcd: 0x3073, 0xce: 0x3078, 0xcf: 0x46bf, 0xd1: 0x3104,
+ 0xd2: 0x3127, 0xd3: 0x312c, 0xd4: 0x46c9, 0xd5: 0x46ce, 0xd6: 0x46dd,
+ 0xd8: 0xa000, 0xd9: 0x31b3, 0xda: 0x31b8, 0xdb: 0x31bd, 0xdc: 0x470f, 0xdd: 0x3235,
+ 0xe0: 0x327b, 0xe1: 0x3280, 0xe2: 0x4719, 0xe3: 0x3285,
+ 0xe4: 0x4728, 0xe5: 0x472d, 0xe6: 0xa000, 0xe7: 0x4737, 0xe8: 0x32ee, 0xe9: 0x32f3,
+ 0xea: 0x473c, 0xeb: 0x3307, 0xec: 0x337f, 0xed: 0x3384, 0xee: 0x3389, 0xef: 0x4750,
+ 0xf1: 0x3415, 0xf2: 0x3438, 0xf3: 0x343d, 0xf4: 0x475a, 0xf5: 0x475f,
+ 0xf6: 0x476e, 0xf8: 0xa000, 0xf9: 0x34c9, 0xfa: 0x34ce, 0xfb: 0x34d3,
+ 0xfc: 0x47a0, 0xfd: 0x3550, 0xff: 0x3569,
+ // Block 0x4, offset 0x100
+ 0x100: 0x2f7e, 0x101: 0x328a, 0x102: 0x468d, 0x103: 0x471e, 0x104: 0x2f9c, 0x105: 0x32a8,
+ 0x106: 0x2fb0, 0x107: 0x32bc, 0x108: 0x2fb5, 0x109: 0x32c1, 0x10a: 0x2fba, 0x10b: 0x32c6,
+ 0x10c: 0x2fbf, 0x10d: 0x32cb, 0x10e: 0x2fc9, 0x10f: 0x32d5,
+ 0x112: 0x46b0, 0x113: 0x4741, 0x114: 0x2ff1, 0x115: 0x32fd, 0x116: 0x2ff6, 0x117: 0x3302,
+ 0x118: 0x3014, 0x119: 0x3320, 0x11a: 0x3005, 0x11b: 0x3311, 0x11c: 0x302d, 0x11d: 0x3339,
+ 0x11e: 0x3037, 0x11f: 0x3343, 0x120: 0x303c, 0x121: 0x3348, 0x122: 0x3046, 0x123: 0x3352,
+ 0x124: 0x304b, 0x125: 0x3357, 0x128: 0x307d, 0x129: 0x338e,
+ 0x12a: 0x3082, 0x12b: 0x3393, 0x12c: 0x3087, 0x12d: 0x3398, 0x12e: 0x30aa, 0x12f: 0x33b6,
+ 0x130: 0x308c, 0x132: 0x195d, 0x133: 0x19e7, 0x134: 0x30b4, 0x135: 0x33c0,
+ 0x136: 0x30c8, 0x137: 0x33d9, 0x139: 0x30d2, 0x13a: 0x33e3, 0x13b: 0x30dc,
+ 0x13c: 0x33ed, 0x13d: 0x30d7, 0x13e: 0x33e8, 0x13f: 0x1bac,
+ // Block 0x5, offset 0x140
+ 0x140: 0x1c34, 0x143: 0x30ff, 0x144: 0x3410, 0x145: 0x3118,
+ 0x146: 0x3429, 0x147: 0x310e, 0x148: 0x341f, 0x149: 0x1c5c,
+ 0x14c: 0x46d3, 0x14d: 0x4764, 0x14e: 0x3131, 0x14f: 0x3442, 0x150: 0x313b, 0x151: 0x344c,
+ 0x154: 0x3159, 0x155: 0x346a, 0x156: 0x3172, 0x157: 0x3483,
+ 0x158: 0x3163, 0x159: 0x3474, 0x15a: 0x46f6, 0x15b: 0x4787, 0x15c: 0x317c, 0x15d: 0x348d,
+ 0x15e: 0x318b, 0x15f: 0x349c, 0x160: 0x46fb, 0x161: 0x478c, 0x162: 0x31a4, 0x163: 0x34ba,
+ 0x164: 0x3195, 0x165: 0x34ab, 0x168: 0x4705, 0x169: 0x4796,
+ 0x16a: 0x470a, 0x16b: 0x479b, 0x16c: 0x31c2, 0x16d: 0x34d8, 0x16e: 0x31cc, 0x16f: 0x34e2,
+ 0x170: 0x31d1, 0x171: 0x34e7, 0x172: 0x31ef, 0x173: 0x3505, 0x174: 0x3212, 0x175: 0x3528,
+ 0x176: 0x323a, 0x177: 0x3555, 0x178: 0x324e, 0x179: 0x325d, 0x17a: 0x357d, 0x17b: 0x3267,
+ 0x17c: 0x3587, 0x17d: 0x326c, 0x17e: 0x358c, 0x17f: 0x00a7,
+ // Block 0x6, offset 0x180
+ 0x184: 0x2dee, 0x185: 0x2df4,
+ 0x186: 0x2dfa, 0x187: 0x1972, 0x188: 0x1975, 0x189: 0x1a08, 0x18a: 0x1987, 0x18b: 0x198a,
+ 0x18c: 0x1a3e, 0x18d: 0x2f88, 0x18e: 0x3294, 0x18f: 0x3096, 0x190: 0x33a2, 0x191: 0x3140,
+ 0x192: 0x3451, 0x193: 0x31d6, 0x194: 0x34ec, 0x195: 0x39cf, 0x196: 0x3b5e, 0x197: 0x39c8,
+ 0x198: 0x3b57, 0x199: 0x39d6, 0x19a: 0x3b65, 0x19b: 0x39c1, 0x19c: 0x3b50,
+ 0x19e: 0x38b0, 0x19f: 0x3a3f, 0x1a0: 0x38a9, 0x1a1: 0x3a38, 0x1a2: 0x35b3, 0x1a3: 0x35c5,
+ 0x1a6: 0x3041, 0x1a7: 0x334d, 0x1a8: 0x30be, 0x1a9: 0x33cf,
+ 0x1aa: 0x46ec, 0x1ab: 0x477d, 0x1ac: 0x3990, 0x1ad: 0x3b1f, 0x1ae: 0x35d7, 0x1af: 0x35dd,
+ 0x1b0: 0x33c5, 0x1b1: 0x1942, 0x1b2: 0x1945, 0x1b3: 0x19cf, 0x1b4: 0x3028, 0x1b5: 0x3334,
+ 0x1b8: 0x30fa, 0x1b9: 0x340b, 0x1ba: 0x38b7, 0x1bb: 0x3a46,
+ 0x1bc: 0x35ad, 0x1bd: 0x35bf, 0x1be: 0x35b9, 0x1bf: 0x35cb,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x2f8d, 0x1c1: 0x3299, 0x1c2: 0x2f92, 0x1c3: 0x329e, 0x1c4: 0x300a, 0x1c5: 0x3316,
+ 0x1c6: 0x300f, 0x1c7: 0x331b, 0x1c8: 0x309b, 0x1c9: 0x33a7, 0x1ca: 0x30a0, 0x1cb: 0x33ac,
+ 0x1cc: 0x3145, 0x1cd: 0x3456, 0x1ce: 0x314a, 0x1cf: 0x345b, 0x1d0: 0x3168, 0x1d1: 0x3479,
+ 0x1d2: 0x316d, 0x1d3: 0x347e, 0x1d4: 0x31db, 0x1d5: 0x34f1, 0x1d6: 0x31e0, 0x1d7: 0x34f6,
+ 0x1d8: 0x3186, 0x1d9: 0x3497, 0x1da: 0x319f, 0x1db: 0x34b5,
+ 0x1de: 0x305a, 0x1df: 0x3366,
+ 0x1e6: 0x4692, 0x1e7: 0x4723, 0x1e8: 0x46ba, 0x1e9: 0x474b,
+ 0x1ea: 0x395f, 0x1eb: 0x3aee, 0x1ec: 0x393c, 0x1ed: 0x3acb, 0x1ee: 0x46d8, 0x1ef: 0x4769,
+ 0x1f0: 0x3958, 0x1f1: 0x3ae7, 0x1f2: 0x3244, 0x1f3: 0x355f,
+ // Block 0x8, offset 0x200
+ 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132,
+ 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932,
+ 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932,
+ 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d,
+ 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d,
+ 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d,
+ 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d,
+ 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d,
+ 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101,
+ 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d,
+ 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132,
+ // Block 0x9, offset 0x240
+ 0x240: 0x49ae, 0x241: 0x49b3, 0x242: 0x9932, 0x243: 0x49b8, 0x244: 0x4a71, 0x245: 0x9936,
+ 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132,
+ 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132,
+ 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132,
+ 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135,
+ 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132,
+ 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132,
+ 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132,
+ 0x274: 0x0170,
+ 0x27a: 0x42a5,
+ 0x27e: 0x0037,
+ // Block 0xa, offset 0x280
+ 0x284: 0x425a, 0x285: 0x447b,
+ 0x286: 0x35e9, 0x287: 0x00ce, 0x288: 0x3607, 0x289: 0x3613, 0x28a: 0x3625,
+ 0x28c: 0x3643, 0x28e: 0x3655, 0x28f: 0x3673, 0x290: 0x3e08, 0x291: 0xa000,
+ 0x295: 0xa000, 0x297: 0xa000,
+ 0x299: 0xa000,
+ 0x29f: 0xa000, 0x2a1: 0xa000,
+ 0x2a5: 0xa000, 0x2a9: 0xa000,
+ 0x2aa: 0x3637, 0x2ab: 0x3667, 0x2ac: 0x47fe, 0x2ad: 0x3697, 0x2ae: 0x4828, 0x2af: 0x36a9,
+ 0x2b0: 0x3e70, 0x2b1: 0xa000, 0x2b5: 0xa000,
+ 0x2b7: 0xa000, 0x2b9: 0xa000,
+ 0x2bf: 0xa000,
+ // Block 0xb, offset 0x2c0
+ 0x2c1: 0xa000, 0x2c5: 0xa000,
+ 0x2c9: 0xa000, 0x2ca: 0x4840, 0x2cb: 0x485e,
+ 0x2cc: 0x36c7, 0x2cd: 0x36df, 0x2ce: 0x4876, 0x2d0: 0x01be, 0x2d1: 0x01d0,
+ 0x2d2: 0x01ac, 0x2d3: 0x430c, 0x2d4: 0x4312, 0x2d5: 0x01fa, 0x2d6: 0x01e8,
+ 0x2f0: 0x01d6, 0x2f1: 0x01eb, 0x2f2: 0x01ee, 0x2f4: 0x0188, 0x2f5: 0x01c7,
+ 0x2f9: 0x01a6,
+ // Block 0xc, offset 0x300
+ 0x300: 0x3721, 0x301: 0x372d, 0x303: 0x371b,
+ 0x306: 0xa000, 0x307: 0x3709,
+ 0x30c: 0x375d, 0x30d: 0x3745, 0x30e: 0x376f, 0x310: 0xa000,
+ 0x313: 0xa000, 0x315: 0xa000, 0x316: 0xa000, 0x317: 0xa000,
+ 0x318: 0xa000, 0x319: 0x3751, 0x31a: 0xa000,
+ 0x31e: 0xa000, 0x323: 0xa000,
+ 0x327: 0xa000,
+ 0x32b: 0xa000, 0x32d: 0xa000,
+ 0x330: 0xa000, 0x333: 0xa000, 0x335: 0xa000,
+ 0x336: 0xa000, 0x337: 0xa000, 0x338: 0xa000, 0x339: 0x37d5, 0x33a: 0xa000,
+ 0x33e: 0xa000,
+ // Block 0xd, offset 0x340
+ 0x341: 0x3733, 0x342: 0x37b7,
+ 0x350: 0x370f, 0x351: 0x3793,
+ 0x352: 0x3715, 0x353: 0x3799, 0x356: 0x3727, 0x357: 0x37ab,
+ 0x358: 0xa000, 0x359: 0xa000, 0x35a: 0x3829, 0x35b: 0x382f, 0x35c: 0x3739, 0x35d: 0x37bd,
+ 0x35e: 0x373f, 0x35f: 0x37c3, 0x362: 0x374b, 0x363: 0x37cf,
+ 0x364: 0x3757, 0x365: 0x37db, 0x366: 0x3763, 0x367: 0x37e7, 0x368: 0xa000, 0x369: 0xa000,
+ 0x36a: 0x3835, 0x36b: 0x383b, 0x36c: 0x378d, 0x36d: 0x3811, 0x36e: 0x3769, 0x36f: 0x37ed,
+ 0x370: 0x3775, 0x371: 0x37f9, 0x372: 0x377b, 0x373: 0x37ff, 0x374: 0x3781, 0x375: 0x3805,
+ 0x378: 0x3787, 0x379: 0x380b,
+ // Block 0xe, offset 0x380
+ 0x387: 0x1d61,
+ 0x391: 0x812d,
+ 0x392: 0x8132, 0x393: 0x8132, 0x394: 0x8132, 0x395: 0x8132, 0x396: 0x812d, 0x397: 0x8132,
+ 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x812e, 0x39b: 0x812d, 0x39c: 0x8132, 0x39d: 0x8132,
+ 0x39e: 0x8132, 0x39f: 0x8132, 0x3a0: 0x8132, 0x3a1: 0x8132, 0x3a2: 0x812d, 0x3a3: 0x812d,
+ 0x3a4: 0x812d, 0x3a5: 0x812d, 0x3a6: 0x812d, 0x3a7: 0x812d, 0x3a8: 0x8132, 0x3a9: 0x8132,
+ 0x3aa: 0x812d, 0x3ab: 0x8132, 0x3ac: 0x8132, 0x3ad: 0x812e, 0x3ae: 0x8131, 0x3af: 0x8132,
+ 0x3b0: 0x8105, 0x3b1: 0x8106, 0x3b2: 0x8107, 0x3b3: 0x8108, 0x3b4: 0x8109, 0x3b5: 0x810a,
+ 0x3b6: 0x810b, 0x3b7: 0x810c, 0x3b8: 0x810d, 0x3b9: 0x810e, 0x3ba: 0x810e, 0x3bb: 0x810f,
+ 0x3bc: 0x8110, 0x3bd: 0x8111, 0x3bf: 0x8112,
+ // Block 0xf, offset 0x3c0
+ 0x3c8: 0xa000, 0x3ca: 0xa000, 0x3cb: 0x8116,
+ 0x3cc: 0x8117, 0x3cd: 0x8118, 0x3ce: 0x8119, 0x3cf: 0x811a, 0x3d0: 0x811b, 0x3d1: 0x811c,
+ 0x3d2: 0x811d, 0x3d3: 0x9932, 0x3d4: 0x9932, 0x3d5: 0x992d, 0x3d6: 0x812d, 0x3d7: 0x8132,
+ 0x3d8: 0x8132, 0x3d9: 0x8132, 0x3da: 0x8132, 0x3db: 0x8132, 0x3dc: 0x812d, 0x3dd: 0x8132,
+ 0x3de: 0x8132, 0x3df: 0x812d,
+ 0x3f0: 0x811e, 0x3f5: 0x1d84,
+ 0x3f6: 0x2013, 0x3f7: 0x204f, 0x3f8: 0x204a,
+ // Block 0x10, offset 0x400
+ 0x413: 0x812d, 0x414: 0x8132, 0x415: 0x8132, 0x416: 0x8132, 0x417: 0x8132,
+ 0x418: 0x8132, 0x419: 0x8132, 0x41a: 0x8132, 0x41b: 0x8132, 0x41c: 0x8132, 0x41d: 0x8132,
+ 0x41e: 0x8132, 0x41f: 0x8132, 0x420: 0x8132, 0x421: 0x8132, 0x423: 0x812d,
+ 0x424: 0x8132, 0x425: 0x8132, 0x426: 0x812d, 0x427: 0x8132, 0x428: 0x8132, 0x429: 0x812d,
+ 0x42a: 0x8132, 0x42b: 0x8132, 0x42c: 0x8132, 0x42d: 0x812d, 0x42e: 0x812d, 0x42f: 0x812d,
+ 0x430: 0x8116, 0x431: 0x8117, 0x432: 0x8118, 0x433: 0x8132, 0x434: 0x8132, 0x435: 0x8132,
+ 0x436: 0x812d, 0x437: 0x8132, 0x438: 0x8132, 0x439: 0x812d, 0x43a: 0x812d, 0x43b: 0x8132,
+ 0x43c: 0x8132, 0x43d: 0x8132, 0x43e: 0x8132, 0x43f: 0x8132,
+ // Block 0x11, offset 0x440
+ 0x445: 0xa000,
+ 0x446: 0x2d26, 0x447: 0xa000, 0x448: 0x2d2e, 0x449: 0xa000, 0x44a: 0x2d36, 0x44b: 0xa000,
+ 0x44c: 0x2d3e, 0x44d: 0xa000, 0x44e: 0x2d46, 0x451: 0xa000,
+ 0x452: 0x2d4e,
+ 0x474: 0x8102, 0x475: 0x9900,
+ 0x47a: 0xa000, 0x47b: 0x2d56,
+ 0x47c: 0xa000, 0x47d: 0x2d5e, 0x47e: 0xa000, 0x47f: 0xa000,
+ // Block 0x12, offset 0x480
+ 0x480: 0x0069, 0x481: 0x006b, 0x482: 0x006f, 0x483: 0x0083, 0x484: 0x00f5, 0x485: 0x00f8,
+ 0x486: 0x0413, 0x487: 0x0085, 0x488: 0x0089, 0x489: 0x008b, 0x48a: 0x0104, 0x48b: 0x0107,
+ 0x48c: 0x010a, 0x48d: 0x008f, 0x48f: 0x0097, 0x490: 0x009b, 0x491: 0x00e0,
+ 0x492: 0x009f, 0x493: 0x00fe, 0x494: 0x0417, 0x495: 0x041b, 0x496: 0x00a1, 0x497: 0x00a9,
+ 0x498: 0x00ab, 0x499: 0x0423, 0x49a: 0x012b, 0x49b: 0x00ad, 0x49c: 0x0427, 0x49d: 0x01be,
+ 0x49e: 0x01c1, 0x49f: 0x01c4, 0x4a0: 0x01fa, 0x4a1: 0x01fd, 0x4a2: 0x0093, 0x4a3: 0x00a5,
+ 0x4a4: 0x00ab, 0x4a5: 0x00ad, 0x4a6: 0x01be, 0x4a7: 0x01c1, 0x4a8: 0x01eb, 0x4a9: 0x01fa,
+ 0x4aa: 0x01fd,
+ 0x4b8: 0x020c,
+ // Block 0x13, offset 0x4c0
+ 0x4db: 0x00fb, 0x4dc: 0x0087, 0x4dd: 0x0101,
+ 0x4de: 0x00d4, 0x4df: 0x010a, 0x4e0: 0x008d, 0x4e1: 0x010d, 0x4e2: 0x0110, 0x4e3: 0x0116,
+ 0x4e4: 0x011c, 0x4e5: 0x011f, 0x4e6: 0x0122, 0x4e7: 0x042b, 0x4e8: 0x016a, 0x4e9: 0x0128,
+ 0x4ea: 0x042f, 0x4eb: 0x016d, 0x4ec: 0x0131, 0x4ed: 0x012e, 0x4ee: 0x0134, 0x4ef: 0x0137,
+ 0x4f0: 0x013a, 0x4f1: 0x013d, 0x4f2: 0x0140, 0x4f3: 0x014c, 0x4f4: 0x014f, 0x4f5: 0x00ec,
+ 0x4f6: 0x0152, 0x4f7: 0x0155, 0x4f8: 0x041f, 0x4f9: 0x0158, 0x4fa: 0x015b, 0x4fb: 0x00b5,
+ 0x4fc: 0x015e, 0x4fd: 0x0161, 0x4fe: 0x0164, 0x4ff: 0x01d0,
+ // Block 0x14, offset 0x500
+ 0x500: 0x8132, 0x501: 0x8132, 0x502: 0x812d, 0x503: 0x8132, 0x504: 0x8132, 0x505: 0x8132,
+ 0x506: 0x8132, 0x507: 0x8132, 0x508: 0x8132, 0x509: 0x8132, 0x50a: 0x812d, 0x50b: 0x8132,
+ 0x50c: 0x8132, 0x50d: 0x8135, 0x50e: 0x812a, 0x50f: 0x812d, 0x510: 0x8129, 0x511: 0x8132,
+ 0x512: 0x8132, 0x513: 0x8132, 0x514: 0x8132, 0x515: 0x8132, 0x516: 0x8132, 0x517: 0x8132,
+ 0x518: 0x8132, 0x519: 0x8132, 0x51a: 0x8132, 0x51b: 0x8132, 0x51c: 0x8132, 0x51d: 0x8132,
+ 0x51e: 0x8132, 0x51f: 0x8132, 0x520: 0x8132, 0x521: 0x8132, 0x522: 0x8132, 0x523: 0x8132,
+ 0x524: 0x8132, 0x525: 0x8132, 0x526: 0x8132, 0x527: 0x8132, 0x528: 0x8132, 0x529: 0x8132,
+ 0x52a: 0x8132, 0x52b: 0x8132, 0x52c: 0x8132, 0x52d: 0x8132, 0x52e: 0x8132, 0x52f: 0x8132,
+ 0x530: 0x8132, 0x531: 0x8132, 0x532: 0x8132, 0x533: 0x8132, 0x534: 0x8132, 0x535: 0x8132,
+ 0x536: 0x8133, 0x537: 0x8131, 0x538: 0x8131, 0x539: 0x812d, 0x53b: 0x8132,
+ 0x53c: 0x8134, 0x53d: 0x812d, 0x53e: 0x8132, 0x53f: 0x812d,
+ // Block 0x15, offset 0x540
+ 0x540: 0x2f97, 0x541: 0x32a3, 0x542: 0x2fa1, 0x543: 0x32ad, 0x544: 0x2fa6, 0x545: 0x32b2,
+ 0x546: 0x2fab, 0x547: 0x32b7, 0x548: 0x38cc, 0x549: 0x3a5b, 0x54a: 0x2fc4, 0x54b: 0x32d0,
+ 0x54c: 0x2fce, 0x54d: 0x32da, 0x54e: 0x2fdd, 0x54f: 0x32e9, 0x550: 0x2fd3, 0x551: 0x32df,
+ 0x552: 0x2fd8, 0x553: 0x32e4, 0x554: 0x38ef, 0x555: 0x3a7e, 0x556: 0x38f6, 0x557: 0x3a85,
+ 0x558: 0x3019, 0x559: 0x3325, 0x55a: 0x301e, 0x55b: 0x332a, 0x55c: 0x3904, 0x55d: 0x3a93,
+ 0x55e: 0x3023, 0x55f: 0x332f, 0x560: 0x3032, 0x561: 0x333e, 0x562: 0x3050, 0x563: 0x335c,
+ 0x564: 0x305f, 0x565: 0x336b, 0x566: 0x3055, 0x567: 0x3361, 0x568: 0x3064, 0x569: 0x3370,
+ 0x56a: 0x3069, 0x56b: 0x3375, 0x56c: 0x30af, 0x56d: 0x33bb, 0x56e: 0x390b, 0x56f: 0x3a9a,
+ 0x570: 0x30b9, 0x571: 0x33ca, 0x572: 0x30c3, 0x573: 0x33d4, 0x574: 0x30cd, 0x575: 0x33de,
+ 0x576: 0x46c4, 0x577: 0x4755, 0x578: 0x3912, 0x579: 0x3aa1, 0x57a: 0x30e6, 0x57b: 0x33f7,
+ 0x57c: 0x30e1, 0x57d: 0x33f2, 0x57e: 0x30eb, 0x57f: 0x33fc,
+ // Block 0x16, offset 0x580
+ 0x580: 0x30f0, 0x581: 0x3401, 0x582: 0x30f5, 0x583: 0x3406, 0x584: 0x3109, 0x585: 0x341a,
+ 0x586: 0x3113, 0x587: 0x3424, 0x588: 0x3122, 0x589: 0x3433, 0x58a: 0x311d, 0x58b: 0x342e,
+ 0x58c: 0x3935, 0x58d: 0x3ac4, 0x58e: 0x3943, 0x58f: 0x3ad2, 0x590: 0x394a, 0x591: 0x3ad9,
+ 0x592: 0x3951, 0x593: 0x3ae0, 0x594: 0x314f, 0x595: 0x3460, 0x596: 0x3154, 0x597: 0x3465,
+ 0x598: 0x315e, 0x599: 0x346f, 0x59a: 0x46f1, 0x59b: 0x4782, 0x59c: 0x3997, 0x59d: 0x3b26,
+ 0x59e: 0x3177, 0x59f: 0x3488, 0x5a0: 0x3181, 0x5a1: 0x3492, 0x5a2: 0x4700, 0x5a3: 0x4791,
+ 0x5a4: 0x399e, 0x5a5: 0x3b2d, 0x5a6: 0x39a5, 0x5a7: 0x3b34, 0x5a8: 0x39ac, 0x5a9: 0x3b3b,
+ 0x5aa: 0x3190, 0x5ab: 0x34a1, 0x5ac: 0x319a, 0x5ad: 0x34b0, 0x5ae: 0x31ae, 0x5af: 0x34c4,
+ 0x5b0: 0x31a9, 0x5b1: 0x34bf, 0x5b2: 0x31ea, 0x5b3: 0x3500, 0x5b4: 0x31f9, 0x5b5: 0x350f,
+ 0x5b6: 0x31f4, 0x5b7: 0x350a, 0x5b8: 0x39b3, 0x5b9: 0x3b42, 0x5ba: 0x39ba, 0x5bb: 0x3b49,
+ 0x5bc: 0x31fe, 0x5bd: 0x3514, 0x5be: 0x3203, 0x5bf: 0x3519,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x3208, 0x5c1: 0x351e, 0x5c2: 0x320d, 0x5c3: 0x3523, 0x5c4: 0x321c, 0x5c5: 0x3532,
+ 0x5c6: 0x3217, 0x5c7: 0x352d, 0x5c8: 0x3221, 0x5c9: 0x353c, 0x5ca: 0x3226, 0x5cb: 0x3541,
+ 0x5cc: 0x322b, 0x5cd: 0x3546, 0x5ce: 0x3249, 0x5cf: 0x3564, 0x5d0: 0x3262, 0x5d1: 0x3582,
+ 0x5d2: 0x3271, 0x5d3: 0x3591, 0x5d4: 0x3276, 0x5d5: 0x3596, 0x5d6: 0x337a, 0x5d7: 0x34a6,
+ 0x5d8: 0x3537, 0x5d9: 0x3573, 0x5da: 0x1be0, 0x5db: 0x42d7,
+ 0x5e0: 0x46a1, 0x5e1: 0x4732, 0x5e2: 0x2f83, 0x5e3: 0x328f,
+ 0x5e4: 0x3878, 0x5e5: 0x3a07, 0x5e6: 0x3871, 0x5e7: 0x3a00, 0x5e8: 0x3886, 0x5e9: 0x3a15,
+ 0x5ea: 0x387f, 0x5eb: 0x3a0e, 0x5ec: 0x38be, 0x5ed: 0x3a4d, 0x5ee: 0x3894, 0x5ef: 0x3a23,
+ 0x5f0: 0x388d, 0x5f1: 0x3a1c, 0x5f2: 0x38a2, 0x5f3: 0x3a31, 0x5f4: 0x389b, 0x5f5: 0x3a2a,
+ 0x5f6: 0x38c5, 0x5f7: 0x3a54, 0x5f8: 0x46b5, 0x5f9: 0x4746, 0x5fa: 0x3000, 0x5fb: 0x330c,
+ 0x5fc: 0x2fec, 0x5fd: 0x32f8, 0x5fe: 0x38da, 0x5ff: 0x3a69,
+ // Block 0x18, offset 0x600
+ 0x600: 0x38d3, 0x601: 0x3a62, 0x602: 0x38e8, 0x603: 0x3a77, 0x604: 0x38e1, 0x605: 0x3a70,
+ 0x606: 0x38fd, 0x607: 0x3a8c, 0x608: 0x3091, 0x609: 0x339d, 0x60a: 0x30a5, 0x60b: 0x33b1,
+ 0x60c: 0x46e7, 0x60d: 0x4778, 0x60e: 0x3136, 0x60f: 0x3447, 0x610: 0x3920, 0x611: 0x3aaf,
+ 0x612: 0x3919, 0x613: 0x3aa8, 0x614: 0x392e, 0x615: 0x3abd, 0x616: 0x3927, 0x617: 0x3ab6,
+ 0x618: 0x3989, 0x619: 0x3b18, 0x61a: 0x396d, 0x61b: 0x3afc, 0x61c: 0x3966, 0x61d: 0x3af5,
+ 0x61e: 0x397b, 0x61f: 0x3b0a, 0x620: 0x3974, 0x621: 0x3b03, 0x622: 0x3982, 0x623: 0x3b11,
+ 0x624: 0x31e5, 0x625: 0x34fb, 0x626: 0x31c7, 0x627: 0x34dd, 0x628: 0x39e4, 0x629: 0x3b73,
+ 0x62a: 0x39dd, 0x62b: 0x3b6c, 0x62c: 0x39f2, 0x62d: 0x3b81, 0x62e: 0x39eb, 0x62f: 0x3b7a,
+ 0x630: 0x39f9, 0x631: 0x3b88, 0x632: 0x3230, 0x633: 0x354b, 0x634: 0x3258, 0x635: 0x3578,
+ 0x636: 0x3253, 0x637: 0x356e, 0x638: 0x323f, 0x639: 0x355a,
+ // Block 0x19, offset 0x640
+ 0x640: 0x4804, 0x641: 0x480a, 0x642: 0x491e, 0x643: 0x4936, 0x644: 0x4926, 0x645: 0x493e,
+ 0x646: 0x492e, 0x647: 0x4946, 0x648: 0x47aa, 0x649: 0x47b0, 0x64a: 0x488e, 0x64b: 0x48a6,
+ 0x64c: 0x4896, 0x64d: 0x48ae, 0x64e: 0x489e, 0x64f: 0x48b6, 0x650: 0x4816, 0x651: 0x481c,
+ 0x652: 0x3db8, 0x653: 0x3dc8, 0x654: 0x3dc0, 0x655: 0x3dd0,
+ 0x658: 0x47b6, 0x659: 0x47bc, 0x65a: 0x3ce8, 0x65b: 0x3cf8, 0x65c: 0x3cf0, 0x65d: 0x3d00,
+ 0x660: 0x482e, 0x661: 0x4834, 0x662: 0x494e, 0x663: 0x4966,
+ 0x664: 0x4956, 0x665: 0x496e, 0x666: 0x495e, 0x667: 0x4976, 0x668: 0x47c2, 0x669: 0x47c8,
+ 0x66a: 0x48be, 0x66b: 0x48d6, 0x66c: 0x48c6, 0x66d: 0x48de, 0x66e: 0x48ce, 0x66f: 0x48e6,
+ 0x670: 0x4846, 0x671: 0x484c, 0x672: 0x3e18, 0x673: 0x3e30, 0x674: 0x3e20, 0x675: 0x3e38,
+ 0x676: 0x3e28, 0x677: 0x3e40, 0x678: 0x47ce, 0x679: 0x47d4, 0x67a: 0x3d18, 0x67b: 0x3d30,
+ 0x67c: 0x3d20, 0x67d: 0x3d38, 0x67e: 0x3d28, 0x67f: 0x3d40,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x4852, 0x681: 0x4858, 0x682: 0x3e48, 0x683: 0x3e58, 0x684: 0x3e50, 0x685: 0x3e60,
+ 0x688: 0x47da, 0x689: 0x47e0, 0x68a: 0x3d48, 0x68b: 0x3d58,
+ 0x68c: 0x3d50, 0x68d: 0x3d60, 0x690: 0x4864, 0x691: 0x486a,
+ 0x692: 0x3e80, 0x693: 0x3e98, 0x694: 0x3e88, 0x695: 0x3ea0, 0x696: 0x3e90, 0x697: 0x3ea8,
+ 0x699: 0x47e6, 0x69b: 0x3d68, 0x69d: 0x3d70,
+ 0x69f: 0x3d78, 0x6a0: 0x487c, 0x6a1: 0x4882, 0x6a2: 0x497e, 0x6a3: 0x4996,
+ 0x6a4: 0x4986, 0x6a5: 0x499e, 0x6a6: 0x498e, 0x6a7: 0x49a6, 0x6a8: 0x47ec, 0x6a9: 0x47f2,
+ 0x6aa: 0x48ee, 0x6ab: 0x4906, 0x6ac: 0x48f6, 0x6ad: 0x490e, 0x6ae: 0x48fe, 0x6af: 0x4916,
+ 0x6b0: 0x47f8, 0x6b1: 0x431e, 0x6b2: 0x3691, 0x6b3: 0x4324, 0x6b4: 0x4822, 0x6b5: 0x432a,
+ 0x6b6: 0x36a3, 0x6b7: 0x4330, 0x6b8: 0x36c1, 0x6b9: 0x4336, 0x6ba: 0x36d9, 0x6bb: 0x433c,
+ 0x6bc: 0x4870, 0x6bd: 0x4342,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x3da0, 0x6c1: 0x3da8, 0x6c2: 0x4184, 0x6c3: 0x41a2, 0x6c4: 0x418e, 0x6c5: 0x41ac,
+ 0x6c6: 0x4198, 0x6c7: 0x41b6, 0x6c8: 0x3cd8, 0x6c9: 0x3ce0, 0x6ca: 0x40d0, 0x6cb: 0x40ee,
+ 0x6cc: 0x40da, 0x6cd: 0x40f8, 0x6ce: 0x40e4, 0x6cf: 0x4102, 0x6d0: 0x3de8, 0x6d1: 0x3df0,
+ 0x6d2: 0x41c0, 0x6d3: 0x41de, 0x6d4: 0x41ca, 0x6d5: 0x41e8, 0x6d6: 0x41d4, 0x6d7: 0x41f2,
+ 0x6d8: 0x3d08, 0x6d9: 0x3d10, 0x6da: 0x410c, 0x6db: 0x412a, 0x6dc: 0x4116, 0x6dd: 0x4134,
+ 0x6de: 0x4120, 0x6df: 0x413e, 0x6e0: 0x3ec0, 0x6e1: 0x3ec8, 0x6e2: 0x41fc, 0x6e3: 0x421a,
+ 0x6e4: 0x4206, 0x6e5: 0x4224, 0x6e6: 0x4210, 0x6e7: 0x422e, 0x6e8: 0x3d80, 0x6e9: 0x3d88,
+ 0x6ea: 0x4148, 0x6eb: 0x4166, 0x6ec: 0x4152, 0x6ed: 0x4170, 0x6ee: 0x415c, 0x6ef: 0x417a,
+ 0x6f0: 0x3685, 0x6f1: 0x367f, 0x6f2: 0x3d90, 0x6f3: 0x368b, 0x6f4: 0x3d98,
+ 0x6f6: 0x4810, 0x6f7: 0x3db0, 0x6f8: 0x35f5, 0x6f9: 0x35ef, 0x6fa: 0x35e3, 0x6fb: 0x42ee,
+ 0x6fc: 0x35fb, 0x6fd: 0x4287, 0x6fe: 0x01d3, 0x6ff: 0x4287,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x42a0, 0x701: 0x4482, 0x702: 0x3dd8, 0x703: 0x369d, 0x704: 0x3de0,
+ 0x706: 0x483a, 0x707: 0x3df8, 0x708: 0x3601, 0x709: 0x42f4, 0x70a: 0x360d, 0x70b: 0x42fa,
+ 0x70c: 0x3619, 0x70d: 0x4489, 0x70e: 0x4490, 0x70f: 0x4497, 0x710: 0x36b5, 0x711: 0x36af,
+ 0x712: 0x3e00, 0x713: 0x44e4, 0x716: 0x36bb, 0x717: 0x3e10,
+ 0x718: 0x3631, 0x719: 0x362b, 0x71a: 0x361f, 0x71b: 0x4300, 0x71d: 0x449e,
+ 0x71e: 0x44a5, 0x71f: 0x44ac, 0x720: 0x36eb, 0x721: 0x36e5, 0x722: 0x3e68, 0x723: 0x44ec,
+ 0x724: 0x36cd, 0x725: 0x36d3, 0x726: 0x36f1, 0x727: 0x3e78, 0x728: 0x3661, 0x729: 0x365b,
+ 0x72a: 0x364f, 0x72b: 0x430c, 0x72c: 0x3649, 0x72d: 0x4474, 0x72e: 0x447b, 0x72f: 0x0081,
+ 0x732: 0x3eb0, 0x733: 0x36f7, 0x734: 0x3eb8,
+ 0x736: 0x4888, 0x737: 0x3ed0, 0x738: 0x363d, 0x739: 0x4306, 0x73a: 0x366d, 0x73b: 0x4318,
+ 0x73c: 0x3679, 0x73d: 0x425a, 0x73e: 0x428c,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x1bd8, 0x741: 0x1bdc, 0x742: 0x0047, 0x743: 0x1c54, 0x745: 0x1be8,
+ 0x746: 0x1bec, 0x747: 0x00e9, 0x749: 0x1c58, 0x74a: 0x008f, 0x74b: 0x0051,
+ 0x74c: 0x0051, 0x74d: 0x0051, 0x74e: 0x0091, 0x74f: 0x00da, 0x750: 0x0053, 0x751: 0x0053,
+ 0x752: 0x0059, 0x753: 0x0099, 0x755: 0x005d, 0x756: 0x198d,
+ 0x759: 0x0061, 0x75a: 0x0063, 0x75b: 0x0065, 0x75c: 0x0065, 0x75d: 0x0065,
+ 0x760: 0x199f, 0x761: 0x1bc8, 0x762: 0x19a8,
+ 0x764: 0x0075, 0x766: 0x01b8, 0x768: 0x0075,
+ 0x76a: 0x0057, 0x76b: 0x42d2, 0x76c: 0x0045, 0x76d: 0x0047, 0x76f: 0x008b,
+ 0x770: 0x004b, 0x771: 0x004d, 0x773: 0x005b, 0x774: 0x009f, 0x775: 0x0215,
+ 0x776: 0x0218, 0x777: 0x021b, 0x778: 0x021e, 0x779: 0x0093, 0x77b: 0x1b98,
+ 0x77c: 0x01e8, 0x77d: 0x01c1, 0x77e: 0x0179, 0x77f: 0x01a0,
+ // Block 0x1e, offset 0x780
+ 0x780: 0x0463, 0x785: 0x0049,
+ 0x786: 0x0089, 0x787: 0x008b, 0x788: 0x0093, 0x789: 0x0095,
+ 0x790: 0x222e, 0x791: 0x223a,
+ 0x792: 0x22ee, 0x793: 0x2216, 0x794: 0x229a, 0x795: 0x2222, 0x796: 0x22a0, 0x797: 0x22b8,
+ 0x798: 0x22c4, 0x799: 0x2228, 0x79a: 0x22ca, 0x79b: 0x2234, 0x79c: 0x22be, 0x79d: 0x22d0,
+ 0x79e: 0x22d6, 0x79f: 0x1cbc, 0x7a0: 0x0053, 0x7a1: 0x195a, 0x7a2: 0x1ba4, 0x7a3: 0x1963,
+ 0x7a4: 0x006d, 0x7a5: 0x19ab, 0x7a6: 0x1bd0, 0x7a7: 0x1d48, 0x7a8: 0x1966, 0x7a9: 0x0071,
+ 0x7aa: 0x19b7, 0x7ab: 0x1bd4, 0x7ac: 0x0059, 0x7ad: 0x0047, 0x7ae: 0x0049, 0x7af: 0x005b,
+ 0x7b0: 0x0093, 0x7b1: 0x19e4, 0x7b2: 0x1c18, 0x7b3: 0x19ed, 0x7b4: 0x00ad, 0x7b5: 0x1a62,
+ 0x7b6: 0x1c4c, 0x7b7: 0x1d5c, 0x7b8: 0x19f0, 0x7b9: 0x00b1, 0x7ba: 0x1a65, 0x7bb: 0x1c50,
+ 0x7bc: 0x0099, 0x7bd: 0x0087, 0x7be: 0x0089, 0x7bf: 0x009b,
+ // Block 0x1f, offset 0x7c0
+ 0x7c1: 0x3c06, 0x7c3: 0xa000, 0x7c4: 0x3c0d, 0x7c5: 0xa000,
+ 0x7c7: 0x3c14, 0x7c8: 0xa000, 0x7c9: 0x3c1b,
+ 0x7cd: 0xa000,
+ 0x7e0: 0x2f65, 0x7e1: 0xa000, 0x7e2: 0x3c29,
+ 0x7e4: 0xa000, 0x7e5: 0xa000,
+ 0x7ed: 0x3c22, 0x7ee: 0x2f60, 0x7ef: 0x2f6a,
+ 0x7f0: 0x3c30, 0x7f1: 0x3c37, 0x7f2: 0xa000, 0x7f3: 0xa000, 0x7f4: 0x3c3e, 0x7f5: 0x3c45,
+ 0x7f6: 0xa000, 0x7f7: 0xa000, 0x7f8: 0x3c4c, 0x7f9: 0x3c53, 0x7fa: 0xa000, 0x7fb: 0xa000,
+ 0x7fc: 0xa000, 0x7fd: 0xa000,
+ // Block 0x20, offset 0x800
+ 0x800: 0x3c5a, 0x801: 0x3c61, 0x802: 0xa000, 0x803: 0xa000, 0x804: 0x3c76, 0x805: 0x3c7d,
+ 0x806: 0xa000, 0x807: 0xa000, 0x808: 0x3c84, 0x809: 0x3c8b,
+ 0x811: 0xa000,
+ 0x812: 0xa000,
+ 0x822: 0xa000,
+ 0x828: 0xa000, 0x829: 0xa000,
+ 0x82b: 0xa000, 0x82c: 0x3ca0, 0x82d: 0x3ca7, 0x82e: 0x3cae, 0x82f: 0x3cb5,
+ 0x832: 0xa000, 0x833: 0xa000, 0x834: 0xa000, 0x835: 0xa000,
+ // Block 0x21, offset 0x840
+ 0x860: 0x0023, 0x861: 0x0025, 0x862: 0x0027, 0x863: 0x0029,
+ 0x864: 0x002b, 0x865: 0x002d, 0x866: 0x002f, 0x867: 0x0031, 0x868: 0x0033, 0x869: 0x1882,
+ 0x86a: 0x1885, 0x86b: 0x1888, 0x86c: 0x188b, 0x86d: 0x188e, 0x86e: 0x1891, 0x86f: 0x1894,
+ 0x870: 0x1897, 0x871: 0x189a, 0x872: 0x189d, 0x873: 0x18a6, 0x874: 0x1a68, 0x875: 0x1a6c,
+ 0x876: 0x1a70, 0x877: 0x1a74, 0x878: 0x1a78, 0x879: 0x1a7c, 0x87a: 0x1a80, 0x87b: 0x1a84,
+ 0x87c: 0x1a88, 0x87d: 0x1c80, 0x87e: 0x1c85, 0x87f: 0x1c8a,
+ // Block 0x22, offset 0x880
+ 0x880: 0x1c8f, 0x881: 0x1c94, 0x882: 0x1c99, 0x883: 0x1c9e, 0x884: 0x1ca3, 0x885: 0x1ca8,
+ 0x886: 0x1cad, 0x887: 0x1cb2, 0x888: 0x187f, 0x889: 0x18a3, 0x88a: 0x18c7, 0x88b: 0x18eb,
+ 0x88c: 0x190f, 0x88d: 0x1918, 0x88e: 0x191e, 0x88f: 0x1924, 0x890: 0x192a, 0x891: 0x1b60,
+ 0x892: 0x1b64, 0x893: 0x1b68, 0x894: 0x1b6c, 0x895: 0x1b70, 0x896: 0x1b74, 0x897: 0x1b78,
+ 0x898: 0x1b7c, 0x899: 0x1b80, 0x89a: 0x1b84, 0x89b: 0x1b88, 0x89c: 0x1af4, 0x89d: 0x1af8,
+ 0x89e: 0x1afc, 0x89f: 0x1b00, 0x8a0: 0x1b04, 0x8a1: 0x1b08, 0x8a2: 0x1b0c, 0x8a3: 0x1b10,
+ 0x8a4: 0x1b14, 0x8a5: 0x1b18, 0x8a6: 0x1b1c, 0x8a7: 0x1b20, 0x8a8: 0x1b24, 0x8a9: 0x1b28,
+ 0x8aa: 0x1b2c, 0x8ab: 0x1b30, 0x8ac: 0x1b34, 0x8ad: 0x1b38, 0x8ae: 0x1b3c, 0x8af: 0x1b40,
+ 0x8b0: 0x1b44, 0x8b1: 0x1b48, 0x8b2: 0x1b4c, 0x8b3: 0x1b50, 0x8b4: 0x1b54, 0x8b5: 0x1b58,
+ 0x8b6: 0x0043, 0x8b7: 0x0045, 0x8b8: 0x0047, 0x8b9: 0x0049, 0x8ba: 0x004b, 0x8bb: 0x004d,
+ 0x8bc: 0x004f, 0x8bd: 0x0051, 0x8be: 0x0053, 0x8bf: 0x0055,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x06bf, 0x8c1: 0x06e3, 0x8c2: 0x06ef, 0x8c3: 0x06ff, 0x8c4: 0x0707, 0x8c5: 0x0713,
+ 0x8c6: 0x071b, 0x8c7: 0x0723, 0x8c8: 0x072f, 0x8c9: 0x0783, 0x8ca: 0x079b, 0x8cb: 0x07ab,
+ 0x8cc: 0x07bb, 0x8cd: 0x07cb, 0x8ce: 0x07db, 0x8cf: 0x07fb, 0x8d0: 0x07ff, 0x8d1: 0x0803,
+ 0x8d2: 0x0837, 0x8d3: 0x085f, 0x8d4: 0x086f, 0x8d5: 0x0877, 0x8d6: 0x087b, 0x8d7: 0x0887,
+ 0x8d8: 0x08a3, 0x8d9: 0x08a7, 0x8da: 0x08bf, 0x8db: 0x08c3, 0x8dc: 0x08cb, 0x8dd: 0x08db,
+ 0x8de: 0x0977, 0x8df: 0x098b, 0x8e0: 0x09cb, 0x8e1: 0x09df, 0x8e2: 0x09e7, 0x8e3: 0x09eb,
+ 0x8e4: 0x09fb, 0x8e5: 0x0a17, 0x8e6: 0x0a43, 0x8e7: 0x0a4f, 0x8e8: 0x0a6f, 0x8e9: 0x0a7b,
+ 0x8ea: 0x0a7f, 0x8eb: 0x0a83, 0x8ec: 0x0a9b, 0x8ed: 0x0a9f, 0x8ee: 0x0acb, 0x8ef: 0x0ad7,
+ 0x8f0: 0x0adf, 0x8f1: 0x0ae7, 0x8f2: 0x0af7, 0x8f3: 0x0aff, 0x8f4: 0x0b07, 0x8f5: 0x0b33,
+ 0x8f6: 0x0b37, 0x8f7: 0x0b3f, 0x8f8: 0x0b43, 0x8f9: 0x0b4b, 0x8fa: 0x0b53, 0x8fb: 0x0b63,
+ 0x8fc: 0x0b7f, 0x8fd: 0x0bf7, 0x8fe: 0x0c0b, 0x8ff: 0x0c0f,
+ // Block 0x24, offset 0x900
+ 0x900: 0x0c8f, 0x901: 0x0c93, 0x902: 0x0ca7, 0x903: 0x0cab, 0x904: 0x0cb3, 0x905: 0x0cbb,
+ 0x906: 0x0cc3, 0x907: 0x0ccf, 0x908: 0x0cf7, 0x909: 0x0d07, 0x90a: 0x0d1b, 0x90b: 0x0d8b,
+ 0x90c: 0x0d97, 0x90d: 0x0da7, 0x90e: 0x0db3, 0x90f: 0x0dbf, 0x910: 0x0dc7, 0x911: 0x0dcb,
+ 0x912: 0x0dcf, 0x913: 0x0dd3, 0x914: 0x0dd7, 0x915: 0x0e8f, 0x916: 0x0ed7, 0x917: 0x0ee3,
+ 0x918: 0x0ee7, 0x919: 0x0eeb, 0x91a: 0x0eef, 0x91b: 0x0ef7, 0x91c: 0x0efb, 0x91d: 0x0f0f,
+ 0x91e: 0x0f2b, 0x91f: 0x0f33, 0x920: 0x0f73, 0x921: 0x0f77, 0x922: 0x0f7f, 0x923: 0x0f83,
+ 0x924: 0x0f8b, 0x925: 0x0f8f, 0x926: 0x0fb3, 0x927: 0x0fb7, 0x928: 0x0fd3, 0x929: 0x0fd7,
+ 0x92a: 0x0fdb, 0x92b: 0x0fdf, 0x92c: 0x0ff3, 0x92d: 0x1017, 0x92e: 0x101b, 0x92f: 0x101f,
+ 0x930: 0x1043, 0x931: 0x1083, 0x932: 0x1087, 0x933: 0x10a7, 0x934: 0x10b7, 0x935: 0x10bf,
+ 0x936: 0x10df, 0x937: 0x1103, 0x938: 0x1147, 0x939: 0x114f, 0x93a: 0x1163, 0x93b: 0x116f,
+ 0x93c: 0x1177, 0x93d: 0x117f, 0x93e: 0x1183, 0x93f: 0x1187,
+ // Block 0x25, offset 0x940
+ 0x940: 0x119f, 0x941: 0x11a3, 0x942: 0x11bf, 0x943: 0x11c7, 0x944: 0x11cf, 0x945: 0x11d3,
+ 0x946: 0x11df, 0x947: 0x11e7, 0x948: 0x11eb, 0x949: 0x11ef, 0x94a: 0x11f7, 0x94b: 0x11fb,
+ 0x94c: 0x129b, 0x94d: 0x12af, 0x94e: 0x12e3, 0x94f: 0x12e7, 0x950: 0x12ef, 0x951: 0x131b,
+ 0x952: 0x1323, 0x953: 0x132b, 0x954: 0x1333, 0x955: 0x136f, 0x956: 0x1373, 0x957: 0x137b,
+ 0x958: 0x137f, 0x959: 0x1383, 0x95a: 0x13af, 0x95b: 0x13b3, 0x95c: 0x13bb, 0x95d: 0x13cf,
+ 0x95e: 0x13d3, 0x95f: 0x13ef, 0x960: 0x13f7, 0x961: 0x13fb, 0x962: 0x141f, 0x963: 0x143f,
+ 0x964: 0x1453, 0x965: 0x1457, 0x966: 0x145f, 0x967: 0x148b, 0x968: 0x148f, 0x969: 0x149f,
+ 0x96a: 0x14c3, 0x96b: 0x14cf, 0x96c: 0x14df, 0x96d: 0x14f7, 0x96e: 0x14ff, 0x96f: 0x1503,
+ 0x970: 0x1507, 0x971: 0x150b, 0x972: 0x1517, 0x973: 0x151b, 0x974: 0x1523, 0x975: 0x153f,
+ 0x976: 0x1543, 0x977: 0x1547, 0x978: 0x155f, 0x979: 0x1563, 0x97a: 0x156b, 0x97b: 0x157f,
+ 0x97c: 0x1583, 0x97d: 0x1587, 0x97e: 0x158f, 0x97f: 0x1593,
+ // Block 0x26, offset 0x980
+ 0x986: 0xa000, 0x98b: 0xa000,
+ 0x98c: 0x3f08, 0x98d: 0xa000, 0x98e: 0x3f10, 0x98f: 0xa000, 0x990: 0x3f18, 0x991: 0xa000,
+ 0x992: 0x3f20, 0x993: 0xa000, 0x994: 0x3f28, 0x995: 0xa000, 0x996: 0x3f30, 0x997: 0xa000,
+ 0x998: 0x3f38, 0x999: 0xa000, 0x99a: 0x3f40, 0x99b: 0xa000, 0x99c: 0x3f48, 0x99d: 0xa000,
+ 0x99e: 0x3f50, 0x99f: 0xa000, 0x9a0: 0x3f58, 0x9a1: 0xa000, 0x9a2: 0x3f60,
+ 0x9a4: 0xa000, 0x9a5: 0x3f68, 0x9a6: 0xa000, 0x9a7: 0x3f70, 0x9a8: 0xa000, 0x9a9: 0x3f78,
+ 0x9af: 0xa000,
+ 0x9b0: 0x3f80, 0x9b1: 0x3f88, 0x9b2: 0xa000, 0x9b3: 0x3f90, 0x9b4: 0x3f98, 0x9b5: 0xa000,
+ 0x9b6: 0x3fa0, 0x9b7: 0x3fa8, 0x9b8: 0xa000, 0x9b9: 0x3fb0, 0x9ba: 0x3fb8, 0x9bb: 0xa000,
+ 0x9bc: 0x3fc0, 0x9bd: 0x3fc8,
+ // Block 0x27, offset 0x9c0
+ 0x9d4: 0x3f00,
+ 0x9d9: 0x9903, 0x9da: 0x9903, 0x9db: 0x42dc, 0x9dc: 0x42e2, 0x9dd: 0xa000,
+ 0x9de: 0x3fd0, 0x9df: 0x26b4,
+ 0x9e6: 0xa000,
+ 0x9eb: 0xa000, 0x9ec: 0x3fe0, 0x9ed: 0xa000, 0x9ee: 0x3fe8, 0x9ef: 0xa000,
+ 0x9f0: 0x3ff0, 0x9f1: 0xa000, 0x9f2: 0x3ff8, 0x9f3: 0xa000, 0x9f4: 0x4000, 0x9f5: 0xa000,
+ 0x9f6: 0x4008, 0x9f7: 0xa000, 0x9f8: 0x4010, 0x9f9: 0xa000, 0x9fa: 0x4018, 0x9fb: 0xa000,
+ 0x9fc: 0x4020, 0x9fd: 0xa000, 0x9fe: 0x4028, 0x9ff: 0xa000,
+ // Block 0x28, offset 0xa00
+ 0xa00: 0x4030, 0xa01: 0xa000, 0xa02: 0x4038, 0xa04: 0xa000, 0xa05: 0x4040,
+ 0xa06: 0xa000, 0xa07: 0x4048, 0xa08: 0xa000, 0xa09: 0x4050,
+ 0xa0f: 0xa000, 0xa10: 0x4058, 0xa11: 0x4060,
+ 0xa12: 0xa000, 0xa13: 0x4068, 0xa14: 0x4070, 0xa15: 0xa000, 0xa16: 0x4078, 0xa17: 0x4080,
+ 0xa18: 0xa000, 0xa19: 0x4088, 0xa1a: 0x4090, 0xa1b: 0xa000, 0xa1c: 0x4098, 0xa1d: 0x40a0,
+ 0xa2f: 0xa000,
+ 0xa30: 0xa000, 0xa31: 0xa000, 0xa32: 0xa000, 0xa34: 0x3fd8,
+ 0xa37: 0x40a8, 0xa38: 0x40b0, 0xa39: 0x40b8, 0xa3a: 0x40c0,
+ 0xa3d: 0xa000, 0xa3e: 0x40c8, 0xa3f: 0x26c9,
+ // Block 0x29, offset 0xa40
+ 0xa40: 0x0367, 0xa41: 0x032b, 0xa42: 0x032f, 0xa43: 0x0333, 0xa44: 0x037b, 0xa45: 0x0337,
+ 0xa46: 0x033b, 0xa47: 0x033f, 0xa48: 0x0343, 0xa49: 0x0347, 0xa4a: 0x034b, 0xa4b: 0x034f,
+ 0xa4c: 0x0353, 0xa4d: 0x0357, 0xa4e: 0x035b, 0xa4f: 0x49bd, 0xa50: 0x49c3, 0xa51: 0x49c9,
+ 0xa52: 0x49cf, 0xa53: 0x49d5, 0xa54: 0x49db, 0xa55: 0x49e1, 0xa56: 0x49e7, 0xa57: 0x49ed,
+ 0xa58: 0x49f3, 0xa59: 0x49f9, 0xa5a: 0x49ff, 0xa5b: 0x4a05, 0xa5c: 0x4a0b, 0xa5d: 0x4a11,
+ 0xa5e: 0x4a17, 0xa5f: 0x4a1d, 0xa60: 0x4a23, 0xa61: 0x4a29, 0xa62: 0x4a2f, 0xa63: 0x4a35,
+ 0xa64: 0x03c3, 0xa65: 0x035f, 0xa66: 0x0363, 0xa67: 0x03e7, 0xa68: 0x03eb, 0xa69: 0x03ef,
+ 0xa6a: 0x03f3, 0xa6b: 0x03f7, 0xa6c: 0x03fb, 0xa6d: 0x03ff, 0xa6e: 0x036b, 0xa6f: 0x0403,
+ 0xa70: 0x0407, 0xa71: 0x036f, 0xa72: 0x0373, 0xa73: 0x0377, 0xa74: 0x037f, 0xa75: 0x0383,
+ 0xa76: 0x0387, 0xa77: 0x038b, 0xa78: 0x038f, 0xa79: 0x0393, 0xa7a: 0x0397, 0xa7b: 0x039b,
+ 0xa7c: 0x039f, 0xa7d: 0x03a3, 0xa7e: 0x03a7, 0xa7f: 0x03ab,
+ // Block 0x2a, offset 0xa80
+ 0xa80: 0x03af, 0xa81: 0x03b3, 0xa82: 0x040b, 0xa83: 0x040f, 0xa84: 0x03b7, 0xa85: 0x03bb,
+ 0xa86: 0x03bf, 0xa87: 0x03c7, 0xa88: 0x03cb, 0xa89: 0x03cf, 0xa8a: 0x03d3, 0xa8b: 0x03d7,
+ 0xa8c: 0x03db, 0xa8d: 0x03df, 0xa8e: 0x03e3,
+ 0xa92: 0x06bf, 0xa93: 0x071b, 0xa94: 0x06cb, 0xa95: 0x097b, 0xa96: 0x06cf, 0xa97: 0x06e7,
+ 0xa98: 0x06d3, 0xa99: 0x0f93, 0xa9a: 0x0707, 0xa9b: 0x06db, 0xa9c: 0x06c3, 0xa9d: 0x09ff,
+ 0xa9e: 0x098f, 0xa9f: 0x072f,
+ // Block 0x2b, offset 0xac0
+ 0xac0: 0x2054, 0xac1: 0x205a, 0xac2: 0x2060, 0xac3: 0x2066, 0xac4: 0x206c, 0xac5: 0x2072,
+ 0xac6: 0x2078, 0xac7: 0x207e, 0xac8: 0x2084, 0xac9: 0x208a, 0xaca: 0x2090, 0xacb: 0x2096,
+ 0xacc: 0x209c, 0xacd: 0x20a2, 0xace: 0x2726, 0xacf: 0x272f, 0xad0: 0x2738, 0xad1: 0x2741,
+ 0xad2: 0x274a, 0xad3: 0x2753, 0xad4: 0x275c, 0xad5: 0x2765, 0xad6: 0x276e, 0xad7: 0x2780,
+ 0xad8: 0x2789, 0xad9: 0x2792, 0xada: 0x279b, 0xadb: 0x27a4, 0xadc: 0x2777, 0xadd: 0x2bac,
+ 0xade: 0x2aed, 0xae0: 0x20a8, 0xae1: 0x20c0, 0xae2: 0x20b4, 0xae3: 0x2108,
+ 0xae4: 0x20c6, 0xae5: 0x20e4, 0xae6: 0x20ae, 0xae7: 0x20de, 0xae8: 0x20ba, 0xae9: 0x20f0,
+ 0xaea: 0x2120, 0xaeb: 0x213e, 0xaec: 0x2138, 0xaed: 0x212c, 0xaee: 0x217a, 0xaef: 0x210e,
+ 0xaf0: 0x211a, 0xaf1: 0x2132, 0xaf2: 0x2126, 0xaf3: 0x2150, 0xaf4: 0x20fc, 0xaf5: 0x2144,
+ 0xaf6: 0x216e, 0xaf7: 0x2156, 0xaf8: 0x20ea, 0xaf9: 0x20cc, 0xafa: 0x2102, 0xafb: 0x2114,
+ 0xafc: 0x214a, 0xafd: 0x20d2, 0xafe: 0x2174, 0xaff: 0x20f6,
+ // Block 0x2c, offset 0xb00
+ 0xb00: 0x215c, 0xb01: 0x20d8, 0xb02: 0x2162, 0xb03: 0x2168, 0xb04: 0x092f, 0xb05: 0x0b03,
+ 0xb06: 0x0ca7, 0xb07: 0x10c7,
+ 0xb10: 0x1bc4, 0xb11: 0x18a9,
+ 0xb12: 0x18ac, 0xb13: 0x18af, 0xb14: 0x18b2, 0xb15: 0x18b5, 0xb16: 0x18b8, 0xb17: 0x18bb,
+ 0xb18: 0x18be, 0xb19: 0x18c1, 0xb1a: 0x18ca, 0xb1b: 0x18cd, 0xb1c: 0x18d0, 0xb1d: 0x18d3,
+ 0xb1e: 0x18d6, 0xb1f: 0x18d9, 0xb20: 0x0313, 0xb21: 0x031b, 0xb22: 0x031f, 0xb23: 0x0327,
+ 0xb24: 0x032b, 0xb25: 0x032f, 0xb26: 0x0337, 0xb27: 0x033f, 0xb28: 0x0343, 0xb29: 0x034b,
+ 0xb2a: 0x034f, 0xb2b: 0x0353, 0xb2c: 0x0357, 0xb2d: 0x035b, 0xb2e: 0x2e18, 0xb2f: 0x2e20,
+ 0xb30: 0x2e28, 0xb31: 0x2e30, 0xb32: 0x2e38, 0xb33: 0x2e40, 0xb34: 0x2e48, 0xb35: 0x2e50,
+ 0xb36: 0x2e60, 0xb37: 0x2e68, 0xb38: 0x2e70, 0xb39: 0x2e78, 0xb3a: 0x2e80, 0xb3b: 0x2e88,
+ 0xb3c: 0x2ed3, 0xb3d: 0x2e9b, 0xb3e: 0x2e58,
+ // Block 0x2d, offset 0xb40
+ 0xb40: 0x06bf, 0xb41: 0x071b, 0xb42: 0x06cb, 0xb43: 0x097b, 0xb44: 0x071f, 0xb45: 0x07af,
+ 0xb46: 0x06c7, 0xb47: 0x07ab, 0xb48: 0x070b, 0xb49: 0x0887, 0xb4a: 0x0d07, 0xb4b: 0x0e8f,
+ 0xb4c: 0x0dd7, 0xb4d: 0x0d1b, 0xb4e: 0x145f, 0xb4f: 0x098b, 0xb50: 0x0ccf, 0xb51: 0x0d4b,
+ 0xb52: 0x0d0b, 0xb53: 0x104b, 0xb54: 0x08fb, 0xb55: 0x0f03, 0xb56: 0x1387, 0xb57: 0x105f,
+ 0xb58: 0x0843, 0xb59: 0x108f, 0xb5a: 0x0f9b, 0xb5b: 0x0a17, 0xb5c: 0x140f, 0xb5d: 0x077f,
+ 0xb5e: 0x08ab, 0xb5f: 0x0df7, 0xb60: 0x1527, 0xb61: 0x0743, 0xb62: 0x07d3, 0xb63: 0x0d9b,
+ 0xb64: 0x06cf, 0xb65: 0x06e7, 0xb66: 0x06d3, 0xb67: 0x0adb, 0xb68: 0x08ef, 0xb69: 0x087f,
+ 0xb6a: 0x0a57, 0xb6b: 0x0a4b, 0xb6c: 0x0feb, 0xb6d: 0x073f, 0xb6e: 0x139b, 0xb6f: 0x089b,
+ 0xb70: 0x09f3, 0xb71: 0x18dc, 0xb72: 0x18df, 0xb73: 0x18e2, 0xb74: 0x18e5, 0xb75: 0x18ee,
+ 0xb76: 0x18f1, 0xb77: 0x18f4, 0xb78: 0x18f7, 0xb79: 0x18fa, 0xb7a: 0x18fd, 0xb7b: 0x1900,
+ 0xb7c: 0x1903, 0xb7d: 0x1906, 0xb7e: 0x1909, 0xb7f: 0x1912,
+ // Block 0x2e, offset 0xb80
+ 0xb80: 0x1cc6, 0xb81: 0x1cd5, 0xb82: 0x1ce4, 0xb83: 0x1cf3, 0xb84: 0x1d02, 0xb85: 0x1d11,
+ 0xb86: 0x1d20, 0xb87: 0x1d2f, 0xb88: 0x1d3e, 0xb89: 0x218c, 0xb8a: 0x219e, 0xb8b: 0x21b0,
+ 0xb8c: 0x1954, 0xb8d: 0x1c04, 0xb8e: 0x19d2, 0xb8f: 0x1ba8, 0xb90: 0x04cb, 0xb91: 0x04d3,
+ 0xb92: 0x04db, 0xb93: 0x04e3, 0xb94: 0x04eb, 0xb95: 0x04ef, 0xb96: 0x04f3, 0xb97: 0x04f7,
+ 0xb98: 0x04fb, 0xb99: 0x04ff, 0xb9a: 0x0503, 0xb9b: 0x0507, 0xb9c: 0x050b, 0xb9d: 0x050f,
+ 0xb9e: 0x0513, 0xb9f: 0x0517, 0xba0: 0x051b, 0xba1: 0x0523, 0xba2: 0x0527, 0xba3: 0x052b,
+ 0xba4: 0x052f, 0xba5: 0x0533, 0xba6: 0x0537, 0xba7: 0x053b, 0xba8: 0x053f, 0xba9: 0x0543,
+ 0xbaa: 0x0547, 0xbab: 0x054b, 0xbac: 0x054f, 0xbad: 0x0553, 0xbae: 0x0557, 0xbaf: 0x055b,
+ 0xbb0: 0x055f, 0xbb1: 0x0563, 0xbb2: 0x0567, 0xbb3: 0x056f, 0xbb4: 0x0577, 0xbb5: 0x057f,
+ 0xbb6: 0x0583, 0xbb7: 0x0587, 0xbb8: 0x058b, 0xbb9: 0x058f, 0xbba: 0x0593, 0xbbb: 0x0597,
+ 0xbbc: 0x059b, 0xbbd: 0x059f, 0xbbe: 0x05a3,
+ // Block 0x2f, offset 0xbc0
+ 0xbc0: 0x2b0c, 0xbc1: 0x29a8, 0xbc2: 0x2b1c, 0xbc3: 0x2880, 0xbc4: 0x2ee4, 0xbc5: 0x288a,
+ 0xbc6: 0x2894, 0xbc7: 0x2f28, 0xbc8: 0x29b5, 0xbc9: 0x289e, 0xbca: 0x28a8, 0xbcb: 0x28b2,
+ 0xbcc: 0x29dc, 0xbcd: 0x29e9, 0xbce: 0x29c2, 0xbcf: 0x29cf, 0xbd0: 0x2ea9, 0xbd1: 0x29f6,
+ 0xbd2: 0x2a03, 0xbd3: 0x2bbe, 0xbd4: 0x26bb, 0xbd5: 0x2bd1, 0xbd6: 0x2be4, 0xbd7: 0x2b2c,
+ 0xbd8: 0x2a10, 0xbd9: 0x2bf7, 0xbda: 0x2c0a, 0xbdb: 0x2a1d, 0xbdc: 0x28bc, 0xbdd: 0x28c6,
+ 0xbde: 0x2eb7, 0xbdf: 0x2a2a, 0xbe0: 0x2b3c, 0xbe1: 0x2ef5, 0xbe2: 0x28d0, 0xbe3: 0x28da,
+ 0xbe4: 0x2a37, 0xbe5: 0x28e4, 0xbe6: 0x28ee, 0xbe7: 0x26d0, 0xbe8: 0x26d7, 0xbe9: 0x28f8,
+ 0xbea: 0x2902, 0xbeb: 0x2c1d, 0xbec: 0x2a44, 0xbed: 0x2b4c, 0xbee: 0x2c30, 0xbef: 0x2a51,
+ 0xbf0: 0x2916, 0xbf1: 0x290c, 0xbf2: 0x2f3c, 0xbf3: 0x2a5e, 0xbf4: 0x2c43, 0xbf5: 0x2920,
+ 0xbf6: 0x2b5c, 0xbf7: 0x292a, 0xbf8: 0x2a78, 0xbf9: 0x2934, 0xbfa: 0x2a85, 0xbfb: 0x2f06,
+ 0xbfc: 0x2a6b, 0xbfd: 0x2b6c, 0xbfe: 0x2a92, 0xbff: 0x26de,
+ // Block 0x30, offset 0xc00
+ 0xc00: 0x2f17, 0xc01: 0x293e, 0xc02: 0x2948, 0xc03: 0x2a9f, 0xc04: 0x2952, 0xc05: 0x295c,
+ 0xc06: 0x2966, 0xc07: 0x2b7c, 0xc08: 0x2aac, 0xc09: 0x26e5, 0xc0a: 0x2c56, 0xc0b: 0x2e90,
+ 0xc0c: 0x2b8c, 0xc0d: 0x2ab9, 0xc0e: 0x2ec5, 0xc0f: 0x2970, 0xc10: 0x297a, 0xc11: 0x2ac6,
+ 0xc12: 0x26ec, 0xc13: 0x2ad3, 0xc14: 0x2b9c, 0xc15: 0x26f3, 0xc16: 0x2c69, 0xc17: 0x2984,
+ 0xc18: 0x1cb7, 0xc19: 0x1ccb, 0xc1a: 0x1cda, 0xc1b: 0x1ce9, 0xc1c: 0x1cf8, 0xc1d: 0x1d07,
+ 0xc1e: 0x1d16, 0xc1f: 0x1d25, 0xc20: 0x1d34, 0xc21: 0x1d43, 0xc22: 0x2192, 0xc23: 0x21a4,
+ 0xc24: 0x21b6, 0xc25: 0x21c2, 0xc26: 0x21ce, 0xc27: 0x21da, 0xc28: 0x21e6, 0xc29: 0x21f2,
+ 0xc2a: 0x21fe, 0xc2b: 0x220a, 0xc2c: 0x2246, 0xc2d: 0x2252, 0xc2e: 0x225e, 0xc2f: 0x226a,
+ 0xc30: 0x2276, 0xc31: 0x1c14, 0xc32: 0x19c6, 0xc33: 0x1936, 0xc34: 0x1be4, 0xc35: 0x1a47,
+ 0xc36: 0x1a56, 0xc37: 0x19cc, 0xc38: 0x1bfc, 0xc39: 0x1c00, 0xc3a: 0x1960, 0xc3b: 0x2701,
+ 0xc3c: 0x270f, 0xc3d: 0x26fa, 0xc3e: 0x2708, 0xc3f: 0x2ae0,
+ // Block 0x31, offset 0xc40
+ 0xc40: 0x1a4a, 0xc41: 0x1a32, 0xc42: 0x1c60, 0xc43: 0x1a1a, 0xc44: 0x19f3, 0xc45: 0x1969,
+ 0xc46: 0x1978, 0xc47: 0x1948, 0xc48: 0x1bf0, 0xc49: 0x1d52, 0xc4a: 0x1a4d, 0xc4b: 0x1a35,
+ 0xc4c: 0x1c64, 0xc4d: 0x1c70, 0xc4e: 0x1a26, 0xc4f: 0x19fc, 0xc50: 0x1957, 0xc51: 0x1c1c,
+ 0xc52: 0x1bb0, 0xc53: 0x1b9c, 0xc54: 0x1bcc, 0xc55: 0x1c74, 0xc56: 0x1a29, 0xc57: 0x19c9,
+ 0xc58: 0x19ff, 0xc59: 0x19de, 0xc5a: 0x1a41, 0xc5b: 0x1c78, 0xc5c: 0x1a2c, 0xc5d: 0x19c0,
+ 0xc5e: 0x1a02, 0xc5f: 0x1c3c, 0xc60: 0x1bf4, 0xc61: 0x1a14, 0xc62: 0x1c24, 0xc63: 0x1c40,
+ 0xc64: 0x1bf8, 0xc65: 0x1a17, 0xc66: 0x1c28, 0xc67: 0x22e8, 0xc68: 0x22fc, 0xc69: 0x1996,
+ 0xc6a: 0x1c20, 0xc6b: 0x1bb4, 0xc6c: 0x1ba0, 0xc6d: 0x1c48, 0xc6e: 0x2716, 0xc6f: 0x27ad,
+ 0xc70: 0x1a59, 0xc71: 0x1a44, 0xc72: 0x1c7c, 0xc73: 0x1a2f, 0xc74: 0x1a50, 0xc75: 0x1a38,
+ 0xc76: 0x1c68, 0xc77: 0x1a1d, 0xc78: 0x19f6, 0xc79: 0x1981, 0xc7a: 0x1a53, 0xc7b: 0x1a3b,
+ 0xc7c: 0x1c6c, 0xc7d: 0x1a20, 0xc7e: 0x19f9, 0xc7f: 0x1984,
+ // Block 0x32, offset 0xc80
+ 0xc80: 0x1c2c, 0xc81: 0x1bb8, 0xc82: 0x1d4d, 0xc83: 0x1939, 0xc84: 0x19ba, 0xc85: 0x19bd,
+ 0xc86: 0x22f5, 0xc87: 0x1b94, 0xc88: 0x19c3, 0xc89: 0x194b, 0xc8a: 0x19e1, 0xc8b: 0x194e,
+ 0xc8c: 0x19ea, 0xc8d: 0x196c, 0xc8e: 0x196f, 0xc8f: 0x1a05, 0xc90: 0x1a0b, 0xc91: 0x1a0e,
+ 0xc92: 0x1c30, 0xc93: 0x1a11, 0xc94: 0x1a23, 0xc95: 0x1c38, 0xc96: 0x1c44, 0xc97: 0x1990,
+ 0xc98: 0x1d57, 0xc99: 0x1bbc, 0xc9a: 0x1993, 0xc9b: 0x1a5c, 0xc9c: 0x19a5, 0xc9d: 0x19b4,
+ 0xc9e: 0x22e2, 0xc9f: 0x22dc, 0xca0: 0x1cc1, 0xca1: 0x1cd0, 0xca2: 0x1cdf, 0xca3: 0x1cee,
+ 0xca4: 0x1cfd, 0xca5: 0x1d0c, 0xca6: 0x1d1b, 0xca7: 0x1d2a, 0xca8: 0x1d39, 0xca9: 0x2186,
+ 0xcaa: 0x2198, 0xcab: 0x21aa, 0xcac: 0x21bc, 0xcad: 0x21c8, 0xcae: 0x21d4, 0xcaf: 0x21e0,
+ 0xcb0: 0x21ec, 0xcb1: 0x21f8, 0xcb2: 0x2204, 0xcb3: 0x2240, 0xcb4: 0x224c, 0xcb5: 0x2258,
+ 0xcb6: 0x2264, 0xcb7: 0x2270, 0xcb8: 0x227c, 0xcb9: 0x2282, 0xcba: 0x2288, 0xcbb: 0x228e,
+ 0xcbc: 0x2294, 0xcbd: 0x22a6, 0xcbe: 0x22ac, 0xcbf: 0x1c10,
+ // Block 0x33, offset 0xcc0
+ 0xcc0: 0x1377, 0xcc1: 0x0cfb, 0xcc2: 0x13d3, 0xcc3: 0x139f, 0xcc4: 0x0e57, 0xcc5: 0x06eb,
+ 0xcc6: 0x08df, 0xcc7: 0x162b, 0xcc8: 0x162b, 0xcc9: 0x0a0b, 0xcca: 0x145f, 0xccb: 0x0943,
+ 0xccc: 0x0a07, 0xccd: 0x0bef, 0xcce: 0x0fcf, 0xccf: 0x115f, 0xcd0: 0x1297, 0xcd1: 0x12d3,
+ 0xcd2: 0x1307, 0xcd3: 0x141b, 0xcd4: 0x0d73, 0xcd5: 0x0dff, 0xcd6: 0x0eab, 0xcd7: 0x0f43,
+ 0xcd8: 0x125f, 0xcd9: 0x1447, 0xcda: 0x1573, 0xcdb: 0x070f, 0xcdc: 0x08b3, 0xcdd: 0x0d87,
+ 0xcde: 0x0ecf, 0xcdf: 0x1293, 0xce0: 0x15c3, 0xce1: 0x0ab3, 0xce2: 0x0e77, 0xce3: 0x1283,
+ 0xce4: 0x1317, 0xce5: 0x0c23, 0xce6: 0x11bb, 0xce7: 0x12df, 0xce8: 0x0b1f, 0xce9: 0x0d0f,
+ 0xcea: 0x0e17, 0xceb: 0x0f1b, 0xcec: 0x1427, 0xced: 0x074f, 0xcee: 0x07e7, 0xcef: 0x0853,
+ 0xcf0: 0x0c8b, 0xcf1: 0x0d7f, 0xcf2: 0x0ecb, 0xcf3: 0x0fef, 0xcf4: 0x1177, 0xcf5: 0x128b,
+ 0xcf6: 0x12a3, 0xcf7: 0x13c7, 0xcf8: 0x14ef, 0xcf9: 0x15a3, 0xcfa: 0x15bf, 0xcfb: 0x102b,
+ 0xcfc: 0x106b, 0xcfd: 0x1123, 0xcfe: 0x1243, 0xcff: 0x147b,
+ // Block 0x34, offset 0xd00
+ 0xd00: 0x15cb, 0xd01: 0x134b, 0xd02: 0x09c7, 0xd03: 0x0b3b, 0xd04: 0x10db, 0xd05: 0x119b,
+ 0xd06: 0x0eff, 0xd07: 0x1033, 0xd08: 0x1397, 0xd09: 0x14e7, 0xd0a: 0x09c3, 0xd0b: 0x0a8f,
+ 0xd0c: 0x0d77, 0xd0d: 0x0e2b, 0xd0e: 0x0e5f, 0xd0f: 0x1113, 0xd10: 0x113b, 0xd11: 0x14a7,
+ 0xd12: 0x084f, 0xd13: 0x11a7, 0xd14: 0x07f3, 0xd15: 0x07ef, 0xd16: 0x1097, 0xd17: 0x1127,
+ 0xd18: 0x125b, 0xd19: 0x14af, 0xd1a: 0x1367, 0xd1b: 0x0c27, 0xd1c: 0x0d73, 0xd1d: 0x1357,
+ 0xd1e: 0x06f7, 0xd1f: 0x0a63, 0xd20: 0x0b93, 0xd21: 0x0f2f, 0xd22: 0x0faf, 0xd23: 0x0873,
+ 0xd24: 0x103b, 0xd25: 0x075f, 0xd26: 0x0b77, 0xd27: 0x06d7, 0xd28: 0x0deb, 0xd29: 0x0ca3,
+ 0xd2a: 0x110f, 0xd2b: 0x08c7, 0xd2c: 0x09b3, 0xd2d: 0x0ffb, 0xd2e: 0x1263, 0xd2f: 0x133b,
+ 0xd30: 0x0db7, 0xd31: 0x13f7, 0xd32: 0x0de3, 0xd33: 0x0c37, 0xd34: 0x121b, 0xd35: 0x0c57,
+ 0xd36: 0x0fab, 0xd37: 0x072b, 0xd38: 0x07a7, 0xd39: 0x07eb, 0xd3a: 0x0d53, 0xd3b: 0x10fb,
+ 0xd3c: 0x11f3, 0xd3d: 0x1347, 0xd3e: 0x145b, 0xd3f: 0x085b,
+ // Block 0x35, offset 0xd40
+ 0xd40: 0x090f, 0xd41: 0x0a17, 0xd42: 0x0b2f, 0xd43: 0x0cbf, 0xd44: 0x0e7b, 0xd45: 0x103f,
+ 0xd46: 0x1497, 0xd47: 0x157b, 0xd48: 0x15cf, 0xd49: 0x15e7, 0xd4a: 0x0837, 0xd4b: 0x0cf3,
+ 0xd4c: 0x0da3, 0xd4d: 0x13eb, 0xd4e: 0x0afb, 0xd4f: 0x0bd7, 0xd50: 0x0bf3, 0xd51: 0x0c83,
+ 0xd52: 0x0e6b, 0xd53: 0x0eb7, 0xd54: 0x0f67, 0xd55: 0x108b, 0xd56: 0x112f, 0xd57: 0x1193,
+ 0xd58: 0x13db, 0xd59: 0x126b, 0xd5a: 0x1403, 0xd5b: 0x147f, 0xd5c: 0x080f, 0xd5d: 0x083b,
+ 0xd5e: 0x0923, 0xd5f: 0x0ea7, 0xd60: 0x12f3, 0xd61: 0x133b, 0xd62: 0x0b1b, 0xd63: 0x0b8b,
+ 0xd64: 0x0c4f, 0xd65: 0x0daf, 0xd66: 0x10d7, 0xd67: 0x0f23, 0xd68: 0x073b, 0xd69: 0x097f,
+ 0xd6a: 0x0a63, 0xd6b: 0x0ac7, 0xd6c: 0x0b97, 0xd6d: 0x0f3f, 0xd6e: 0x0f5b, 0xd6f: 0x116b,
+ 0xd70: 0x118b, 0xd71: 0x1463, 0xd72: 0x14e3, 0xd73: 0x14f3, 0xd74: 0x152f, 0xd75: 0x0753,
+ 0xd76: 0x107f, 0xd77: 0x144f, 0xd78: 0x14cb, 0xd79: 0x0baf, 0xd7a: 0x0717, 0xd7b: 0x0777,
+ 0xd7c: 0x0a67, 0xd7d: 0x0a87, 0xd7e: 0x0caf, 0xd7f: 0x0d73,
+ // Block 0x36, offset 0xd80
+ 0xd80: 0x0ec3, 0xd81: 0x0fcb, 0xd82: 0x1277, 0xd83: 0x1417, 0xd84: 0x1623, 0xd85: 0x0ce3,
+ 0xd86: 0x14a3, 0xd87: 0x0833, 0xd88: 0x0d2f, 0xd89: 0x0d3b, 0xd8a: 0x0e0f, 0xd8b: 0x0e47,
+ 0xd8c: 0x0f4b, 0xd8d: 0x0fa7, 0xd8e: 0x1027, 0xd8f: 0x110b, 0xd90: 0x153b, 0xd91: 0x07af,
+ 0xd92: 0x0c03, 0xd93: 0x14b3, 0xd94: 0x0767, 0xd95: 0x0aab, 0xd96: 0x0e2f, 0xd97: 0x13df,
+ 0xd98: 0x0b67, 0xd99: 0x0bb7, 0xd9a: 0x0d43, 0xd9b: 0x0f2f, 0xd9c: 0x14bb, 0xd9d: 0x0817,
+ 0xd9e: 0x08ff, 0xd9f: 0x0a97, 0xda0: 0x0cd3, 0xda1: 0x0d1f, 0xda2: 0x0d5f, 0xda3: 0x0df3,
+ 0xda4: 0x0f47, 0xda5: 0x0fbb, 0xda6: 0x1157, 0xda7: 0x12f7, 0xda8: 0x1303, 0xda9: 0x1457,
+ 0xdaa: 0x14d7, 0xdab: 0x0883, 0xdac: 0x0e4b, 0xdad: 0x0903, 0xdae: 0x0ec7, 0xdaf: 0x0f6b,
+ 0xdb0: 0x1287, 0xdb1: 0x14bf, 0xdb2: 0x15ab, 0xdb3: 0x15d3, 0xdb4: 0x0d37, 0xdb5: 0x0e27,
+ 0xdb6: 0x11c3, 0xdb7: 0x10b7, 0xdb8: 0x10c3, 0xdb9: 0x10e7, 0xdba: 0x0f17, 0xdbb: 0x0e9f,
+ 0xdbc: 0x1363, 0xdbd: 0x0733, 0xdbe: 0x122b, 0xdbf: 0x081b,
+ // Block 0x37, offset 0xdc0
+ 0xdc0: 0x080b, 0xdc1: 0x0b0b, 0xdc2: 0x0c2b, 0xdc3: 0x10f3, 0xdc4: 0x0a53, 0xdc5: 0x0e03,
+ 0xdc6: 0x0cef, 0xdc7: 0x13e7, 0xdc8: 0x12e7, 0xdc9: 0x14ab, 0xdca: 0x1323, 0xdcb: 0x0b27,
+ 0xdcc: 0x0787, 0xdcd: 0x095b, 0xdd0: 0x09af,
+ 0xdd2: 0x0cdf, 0xdd5: 0x07f7, 0xdd6: 0x0f1f, 0xdd7: 0x0fe3,
+ 0xdd8: 0x1047, 0xdd9: 0x1063, 0xdda: 0x1067, 0xddb: 0x107b, 0xddc: 0x14fb, 0xddd: 0x10eb,
+ 0xdde: 0x116f, 0xde0: 0x128f, 0xde2: 0x1353,
+ 0xde5: 0x1407, 0xde6: 0x1433,
+ 0xdea: 0x154f, 0xdeb: 0x1553, 0xdec: 0x1557, 0xded: 0x15bb, 0xdee: 0x142b, 0xdef: 0x14c7,
+ 0xdf0: 0x0757, 0xdf1: 0x077b, 0xdf2: 0x078f, 0xdf3: 0x084b, 0xdf4: 0x0857, 0xdf5: 0x0897,
+ 0xdf6: 0x094b, 0xdf7: 0x0967, 0xdf8: 0x096f, 0xdf9: 0x09ab, 0xdfa: 0x09b7, 0xdfb: 0x0a93,
+ 0xdfc: 0x0a9b, 0xdfd: 0x0ba3, 0xdfe: 0x0bcb, 0xdff: 0x0bd3,
+ // Block 0x38, offset 0xe00
+ 0xe00: 0x0beb, 0xe01: 0x0c97, 0xe02: 0x0cc7, 0xe03: 0x0ce7, 0xe04: 0x0d57, 0xe05: 0x0e1b,
+ 0xe06: 0x0e37, 0xe07: 0x0e67, 0xe08: 0x0ebb, 0xe09: 0x0edb, 0xe0a: 0x0f4f, 0xe0b: 0x102f,
+ 0xe0c: 0x104b, 0xe0d: 0x1053, 0xe0e: 0x104f, 0xe0f: 0x1057, 0xe10: 0x105b, 0xe11: 0x105f,
+ 0xe12: 0x1073, 0xe13: 0x1077, 0xe14: 0x109b, 0xe15: 0x10af, 0xe16: 0x10cb, 0xe17: 0x112f,
+ 0xe18: 0x1137, 0xe19: 0x113f, 0xe1a: 0x1153, 0xe1b: 0x117b, 0xe1c: 0x11cb, 0xe1d: 0x11ff,
+ 0xe1e: 0x11ff, 0xe1f: 0x1267, 0xe20: 0x130f, 0xe21: 0x1327, 0xe22: 0x135b, 0xe23: 0x135f,
+ 0xe24: 0x13a3, 0xe25: 0x13a7, 0xe26: 0x13ff, 0xe27: 0x1407, 0xe28: 0x14db, 0xe29: 0x151f,
+ 0xe2a: 0x1537, 0xe2b: 0x0b9b, 0xe2c: 0x171e, 0xe2d: 0x11e3,
+ 0xe30: 0x06df, 0xe31: 0x07e3, 0xe32: 0x07a3, 0xe33: 0x074b, 0xe34: 0x078b, 0xe35: 0x07b7,
+ 0xe36: 0x0847, 0xe37: 0x0863, 0xe38: 0x094b, 0xe39: 0x0937, 0xe3a: 0x0947, 0xe3b: 0x0963,
+ 0xe3c: 0x09af, 0xe3d: 0x09bf, 0xe3e: 0x0a03, 0xe3f: 0x0a0f,
+ // Block 0x39, offset 0xe40
+ 0xe40: 0x0a2b, 0xe41: 0x0a3b, 0xe42: 0x0b23, 0xe43: 0x0b2b, 0xe44: 0x0b5b, 0xe45: 0x0b7b,
+ 0xe46: 0x0bab, 0xe47: 0x0bc3, 0xe48: 0x0bb3, 0xe49: 0x0bd3, 0xe4a: 0x0bc7, 0xe4b: 0x0beb,
+ 0xe4c: 0x0c07, 0xe4d: 0x0c5f, 0xe4e: 0x0c6b, 0xe4f: 0x0c73, 0xe50: 0x0c9b, 0xe51: 0x0cdf,
+ 0xe52: 0x0d0f, 0xe53: 0x0d13, 0xe54: 0x0d27, 0xe55: 0x0da7, 0xe56: 0x0db7, 0xe57: 0x0e0f,
+ 0xe58: 0x0e5b, 0xe59: 0x0e53, 0xe5a: 0x0e67, 0xe5b: 0x0e83, 0xe5c: 0x0ebb, 0xe5d: 0x1013,
+ 0xe5e: 0x0edf, 0xe5f: 0x0f13, 0xe60: 0x0f1f, 0xe61: 0x0f5f, 0xe62: 0x0f7b, 0xe63: 0x0f9f,
+ 0xe64: 0x0fc3, 0xe65: 0x0fc7, 0xe66: 0x0fe3, 0xe67: 0x0fe7, 0xe68: 0x0ff7, 0xe69: 0x100b,
+ 0xe6a: 0x1007, 0xe6b: 0x1037, 0xe6c: 0x10b3, 0xe6d: 0x10cb, 0xe6e: 0x10e3, 0xe6f: 0x111b,
+ 0xe70: 0x112f, 0xe71: 0x114b, 0xe72: 0x117b, 0xe73: 0x122f, 0xe74: 0x1257, 0xe75: 0x12cb,
+ 0xe76: 0x1313, 0xe77: 0x131f, 0xe78: 0x1327, 0xe79: 0x133f, 0xe7a: 0x1353, 0xe7b: 0x1343,
+ 0xe7c: 0x135b, 0xe7d: 0x1357, 0xe7e: 0x134f, 0xe7f: 0x135f,
+ // Block 0x3a, offset 0xe80
+ 0xe80: 0x136b, 0xe81: 0x13a7, 0xe82: 0x13e3, 0xe83: 0x1413, 0xe84: 0x144b, 0xe85: 0x146b,
+ 0xe86: 0x14b7, 0xe87: 0x14db, 0xe88: 0x14fb, 0xe89: 0x150f, 0xe8a: 0x151f, 0xe8b: 0x152b,
+ 0xe8c: 0x1537, 0xe8d: 0x158b, 0xe8e: 0x162b, 0xe8f: 0x16b5, 0xe90: 0x16b0, 0xe91: 0x16e2,
+ 0xe92: 0x0607, 0xe93: 0x062f, 0xe94: 0x0633, 0xe95: 0x1764, 0xe96: 0x1791, 0xe97: 0x1809,
+ 0xe98: 0x1617, 0xe99: 0x1627,
+ // Block 0x3b, offset 0xec0
+ 0xec0: 0x19d5, 0xec1: 0x19d8, 0xec2: 0x19db, 0xec3: 0x1c08, 0xec4: 0x1c0c, 0xec5: 0x1a5f,
+ 0xec6: 0x1a5f,
+ 0xed3: 0x1d75, 0xed4: 0x1d66, 0xed5: 0x1d6b, 0xed6: 0x1d7a, 0xed7: 0x1d70,
+ 0xedd: 0x4390,
+ 0xede: 0x8115, 0xedf: 0x4402, 0xee0: 0x022d, 0xee1: 0x0215, 0xee2: 0x021e, 0xee3: 0x0221,
+ 0xee4: 0x0224, 0xee5: 0x0227, 0xee6: 0x022a, 0xee7: 0x0230, 0xee8: 0x0233, 0xee9: 0x0017,
+ 0xeea: 0x43f0, 0xeeb: 0x43f6, 0xeec: 0x44f4, 0xeed: 0x44fc, 0xeee: 0x4348, 0xeef: 0x434e,
+ 0xef0: 0x4354, 0xef1: 0x435a, 0xef2: 0x4366, 0xef3: 0x436c, 0xef4: 0x4372, 0xef5: 0x437e,
+ 0xef6: 0x4384, 0xef8: 0x438a, 0xef9: 0x4396, 0xefa: 0x439c, 0xefb: 0x43a2,
+ 0xefc: 0x43ae, 0xefe: 0x43b4,
+ // Block 0x3c, offset 0xf00
+ 0xf00: 0x43ba, 0xf01: 0x43c0, 0xf03: 0x43c6, 0xf04: 0x43cc,
+ 0xf06: 0x43d8, 0xf07: 0x43de, 0xf08: 0x43e4, 0xf09: 0x43ea, 0xf0a: 0x43fc, 0xf0b: 0x4378,
+ 0xf0c: 0x4360, 0xf0d: 0x43a8, 0xf0e: 0x43d2, 0xf0f: 0x1d7f, 0xf10: 0x0299, 0xf11: 0x0299,
+ 0xf12: 0x02a2, 0xf13: 0x02a2, 0xf14: 0x02a2, 0xf15: 0x02a2, 0xf16: 0x02a5, 0xf17: 0x02a5,
+ 0xf18: 0x02a5, 0xf19: 0x02a5, 0xf1a: 0x02ab, 0xf1b: 0x02ab, 0xf1c: 0x02ab, 0xf1d: 0x02ab,
+ 0xf1e: 0x029f, 0xf1f: 0x029f, 0xf20: 0x029f, 0xf21: 0x029f, 0xf22: 0x02a8, 0xf23: 0x02a8,
+ 0xf24: 0x02a8, 0xf25: 0x02a8, 0xf26: 0x029c, 0xf27: 0x029c, 0xf28: 0x029c, 0xf29: 0x029c,
+ 0xf2a: 0x02cf, 0xf2b: 0x02cf, 0xf2c: 0x02cf, 0xf2d: 0x02cf, 0xf2e: 0x02d2, 0xf2f: 0x02d2,
+ 0xf30: 0x02d2, 0xf31: 0x02d2, 0xf32: 0x02b1, 0xf33: 0x02b1, 0xf34: 0x02b1, 0xf35: 0x02b1,
+ 0xf36: 0x02ae, 0xf37: 0x02ae, 0xf38: 0x02ae, 0xf39: 0x02ae, 0xf3a: 0x02b4, 0xf3b: 0x02b4,
+ 0xf3c: 0x02b4, 0xf3d: 0x02b4, 0xf3e: 0x02b7, 0xf3f: 0x02b7,
+ // Block 0x3d, offset 0xf40
+ 0xf40: 0x02b7, 0xf41: 0x02b7, 0xf42: 0x02c0, 0xf43: 0x02c0, 0xf44: 0x02bd, 0xf45: 0x02bd,
+ 0xf46: 0x02c3, 0xf47: 0x02c3, 0xf48: 0x02ba, 0xf49: 0x02ba, 0xf4a: 0x02c9, 0xf4b: 0x02c9,
+ 0xf4c: 0x02c6, 0xf4d: 0x02c6, 0xf4e: 0x02d5, 0xf4f: 0x02d5, 0xf50: 0x02d5, 0xf51: 0x02d5,
+ 0xf52: 0x02db, 0xf53: 0x02db, 0xf54: 0x02db, 0xf55: 0x02db, 0xf56: 0x02e1, 0xf57: 0x02e1,
+ 0xf58: 0x02e1, 0xf59: 0x02e1, 0xf5a: 0x02de, 0xf5b: 0x02de, 0xf5c: 0x02de, 0xf5d: 0x02de,
+ 0xf5e: 0x02e4, 0xf5f: 0x02e4, 0xf60: 0x02e7, 0xf61: 0x02e7, 0xf62: 0x02e7, 0xf63: 0x02e7,
+ 0xf64: 0x446e, 0xf65: 0x446e, 0xf66: 0x02ed, 0xf67: 0x02ed, 0xf68: 0x02ed, 0xf69: 0x02ed,
+ 0xf6a: 0x02ea, 0xf6b: 0x02ea, 0xf6c: 0x02ea, 0xf6d: 0x02ea, 0xf6e: 0x0308, 0xf6f: 0x0308,
+ 0xf70: 0x4468, 0xf71: 0x4468,
+ // Block 0x3e, offset 0xf80
+ 0xf93: 0x02d8, 0xf94: 0x02d8, 0xf95: 0x02d8, 0xf96: 0x02d8, 0xf97: 0x02f6,
+ 0xf98: 0x02f6, 0xf99: 0x02f3, 0xf9a: 0x02f3, 0xf9b: 0x02f9, 0xf9c: 0x02f9, 0xf9d: 0x204f,
+ 0xf9e: 0x02ff, 0xf9f: 0x02ff, 0xfa0: 0x02f0, 0xfa1: 0x02f0, 0xfa2: 0x02fc, 0xfa3: 0x02fc,
+ 0xfa4: 0x0305, 0xfa5: 0x0305, 0xfa6: 0x0305, 0xfa7: 0x0305, 0xfa8: 0x028d, 0xfa9: 0x028d,
+ 0xfaa: 0x25aa, 0xfab: 0x25aa, 0xfac: 0x261a, 0xfad: 0x261a, 0xfae: 0x25e9, 0xfaf: 0x25e9,
+ 0xfb0: 0x2605, 0xfb1: 0x2605, 0xfb2: 0x25fe, 0xfb3: 0x25fe, 0xfb4: 0x260c, 0xfb5: 0x260c,
+ 0xfb6: 0x2613, 0xfb7: 0x2613, 0xfb8: 0x2613, 0xfb9: 0x25f0, 0xfba: 0x25f0, 0xfbb: 0x25f0,
+ 0xfbc: 0x0302, 0xfbd: 0x0302, 0xfbe: 0x0302, 0xfbf: 0x0302,
+ // Block 0x3f, offset 0xfc0
+ 0xfc0: 0x25b1, 0xfc1: 0x25b8, 0xfc2: 0x25d4, 0xfc3: 0x25f0, 0xfc4: 0x25f7, 0xfc5: 0x1d89,
+ 0xfc6: 0x1d8e, 0xfc7: 0x1d93, 0xfc8: 0x1da2, 0xfc9: 0x1db1, 0xfca: 0x1db6, 0xfcb: 0x1dbb,
+ 0xfcc: 0x1dc0, 0xfcd: 0x1dc5, 0xfce: 0x1dd4, 0xfcf: 0x1de3, 0xfd0: 0x1de8, 0xfd1: 0x1ded,
+ 0xfd2: 0x1dfc, 0xfd3: 0x1e0b, 0xfd4: 0x1e10, 0xfd5: 0x1e15, 0xfd6: 0x1e1a, 0xfd7: 0x1e29,
+ 0xfd8: 0x1e2e, 0xfd9: 0x1e3d, 0xfda: 0x1e42, 0xfdb: 0x1e47, 0xfdc: 0x1e56, 0xfdd: 0x1e5b,
+ 0xfde: 0x1e60, 0xfdf: 0x1e6a, 0xfe0: 0x1ea6, 0xfe1: 0x1eb5, 0xfe2: 0x1ec4, 0xfe3: 0x1ec9,
+ 0xfe4: 0x1ece, 0xfe5: 0x1ed8, 0xfe6: 0x1ee7, 0xfe7: 0x1eec, 0xfe8: 0x1efb, 0xfe9: 0x1f00,
+ 0xfea: 0x1f05, 0xfeb: 0x1f14, 0xfec: 0x1f19, 0xfed: 0x1f28, 0xfee: 0x1f2d, 0xfef: 0x1f32,
+ 0xff0: 0x1f37, 0xff1: 0x1f3c, 0xff2: 0x1f41, 0xff3: 0x1f46, 0xff4: 0x1f4b, 0xff5: 0x1f50,
+ 0xff6: 0x1f55, 0xff7: 0x1f5a, 0xff8: 0x1f5f, 0xff9: 0x1f64, 0xffa: 0x1f69, 0xffb: 0x1f6e,
+ 0xffc: 0x1f73, 0xffd: 0x1f78, 0xffe: 0x1f7d, 0xfff: 0x1f87,
+ // Block 0x40, offset 0x1000
+ 0x1000: 0x1f8c, 0x1001: 0x1f91, 0x1002: 0x1f96, 0x1003: 0x1fa0, 0x1004: 0x1fa5, 0x1005: 0x1faf,
+ 0x1006: 0x1fb4, 0x1007: 0x1fb9, 0x1008: 0x1fbe, 0x1009: 0x1fc3, 0x100a: 0x1fc8, 0x100b: 0x1fcd,
+ 0x100c: 0x1fd2, 0x100d: 0x1fd7, 0x100e: 0x1fe6, 0x100f: 0x1ff5, 0x1010: 0x1ffa, 0x1011: 0x1fff,
+ 0x1012: 0x2004, 0x1013: 0x2009, 0x1014: 0x200e, 0x1015: 0x2018, 0x1016: 0x201d, 0x1017: 0x2022,
+ 0x1018: 0x2031, 0x1019: 0x2040, 0x101a: 0x2045, 0x101b: 0x4420, 0x101c: 0x4426, 0x101d: 0x445c,
+ 0x101e: 0x44b3, 0x101f: 0x44ba, 0x1020: 0x44c1, 0x1021: 0x44c8, 0x1022: 0x44cf, 0x1023: 0x44d6,
+ 0x1024: 0x25c6, 0x1025: 0x25cd, 0x1026: 0x25d4, 0x1027: 0x25db, 0x1028: 0x25f0, 0x1029: 0x25f7,
+ 0x102a: 0x1d98, 0x102b: 0x1d9d, 0x102c: 0x1da2, 0x102d: 0x1da7, 0x102e: 0x1db1, 0x102f: 0x1db6,
+ 0x1030: 0x1dca, 0x1031: 0x1dcf, 0x1032: 0x1dd4, 0x1033: 0x1dd9, 0x1034: 0x1de3, 0x1035: 0x1de8,
+ 0x1036: 0x1df2, 0x1037: 0x1df7, 0x1038: 0x1dfc, 0x1039: 0x1e01, 0x103a: 0x1e0b, 0x103b: 0x1e10,
+ 0x103c: 0x1f3c, 0x103d: 0x1f41, 0x103e: 0x1f50, 0x103f: 0x1f55,
+ // Block 0x41, offset 0x1040
+ 0x1040: 0x1f5a, 0x1041: 0x1f6e, 0x1042: 0x1f73, 0x1043: 0x1f78, 0x1044: 0x1f7d, 0x1045: 0x1f96,
+ 0x1046: 0x1fa0, 0x1047: 0x1fa5, 0x1048: 0x1faa, 0x1049: 0x1fbe, 0x104a: 0x1fdc, 0x104b: 0x1fe1,
+ 0x104c: 0x1fe6, 0x104d: 0x1feb, 0x104e: 0x1ff5, 0x104f: 0x1ffa, 0x1050: 0x445c, 0x1051: 0x2027,
+ 0x1052: 0x202c, 0x1053: 0x2031, 0x1054: 0x2036, 0x1055: 0x2040, 0x1056: 0x2045, 0x1057: 0x25b1,
+ 0x1058: 0x25b8, 0x1059: 0x25bf, 0x105a: 0x25d4, 0x105b: 0x25e2, 0x105c: 0x1d89, 0x105d: 0x1d8e,
+ 0x105e: 0x1d93, 0x105f: 0x1da2, 0x1060: 0x1dac, 0x1061: 0x1dbb, 0x1062: 0x1dc0, 0x1063: 0x1dc5,
+ 0x1064: 0x1dd4, 0x1065: 0x1dde, 0x1066: 0x1dfc, 0x1067: 0x1e15, 0x1068: 0x1e1a, 0x1069: 0x1e29,
+ 0x106a: 0x1e2e, 0x106b: 0x1e3d, 0x106c: 0x1e47, 0x106d: 0x1e56, 0x106e: 0x1e5b, 0x106f: 0x1e60,
+ 0x1070: 0x1e6a, 0x1071: 0x1ea6, 0x1072: 0x1eab, 0x1073: 0x1eb5, 0x1074: 0x1ec4, 0x1075: 0x1ec9,
+ 0x1076: 0x1ece, 0x1077: 0x1ed8, 0x1078: 0x1ee7, 0x1079: 0x1efb, 0x107a: 0x1f00, 0x107b: 0x1f05,
+ 0x107c: 0x1f14, 0x107d: 0x1f19, 0x107e: 0x1f28, 0x107f: 0x1f2d,
+ // Block 0x42, offset 0x1080
+ 0x1080: 0x1f32, 0x1081: 0x1f37, 0x1082: 0x1f46, 0x1083: 0x1f4b, 0x1084: 0x1f5f, 0x1085: 0x1f64,
+ 0x1086: 0x1f69, 0x1087: 0x1f6e, 0x1088: 0x1f73, 0x1089: 0x1f87, 0x108a: 0x1f8c, 0x108b: 0x1f91,
+ 0x108c: 0x1f96, 0x108d: 0x1f9b, 0x108e: 0x1faf, 0x108f: 0x1fb4, 0x1090: 0x1fb9, 0x1091: 0x1fbe,
+ 0x1092: 0x1fcd, 0x1093: 0x1fd2, 0x1094: 0x1fd7, 0x1095: 0x1fe6, 0x1096: 0x1ff0, 0x1097: 0x1fff,
+ 0x1098: 0x2004, 0x1099: 0x4450, 0x109a: 0x2018, 0x109b: 0x201d, 0x109c: 0x2022, 0x109d: 0x2031,
+ 0x109e: 0x203b, 0x109f: 0x25d4, 0x10a0: 0x25e2, 0x10a1: 0x1da2, 0x10a2: 0x1dac, 0x10a3: 0x1dd4,
+ 0x10a4: 0x1dde, 0x10a5: 0x1dfc, 0x10a6: 0x1e06, 0x10a7: 0x1e6a, 0x10a8: 0x1e6f, 0x10a9: 0x1e92,
+ 0x10aa: 0x1e97, 0x10ab: 0x1f6e, 0x10ac: 0x1f73, 0x10ad: 0x1f96, 0x10ae: 0x1fe6, 0x10af: 0x1ff0,
+ 0x10b0: 0x2031, 0x10b1: 0x203b, 0x10b2: 0x4504, 0x10b3: 0x450c, 0x10b4: 0x4514, 0x10b5: 0x1ef1,
+ 0x10b6: 0x1ef6, 0x10b7: 0x1f0a, 0x10b8: 0x1f0f, 0x10b9: 0x1f1e, 0x10ba: 0x1f23, 0x10bb: 0x1e74,
+ 0x10bc: 0x1e79, 0x10bd: 0x1e9c, 0x10be: 0x1ea1, 0x10bf: 0x1e33,
+ // Block 0x43, offset 0x10c0
+ 0x10c0: 0x1e38, 0x10c1: 0x1e1f, 0x10c2: 0x1e24, 0x10c3: 0x1e4c, 0x10c4: 0x1e51, 0x10c5: 0x1eba,
+ 0x10c6: 0x1ebf, 0x10c7: 0x1edd, 0x10c8: 0x1ee2, 0x10c9: 0x1e7e, 0x10ca: 0x1e83, 0x10cb: 0x1e88,
+ 0x10cc: 0x1e92, 0x10cd: 0x1e8d, 0x10ce: 0x1e65, 0x10cf: 0x1eb0, 0x10d0: 0x1ed3, 0x10d1: 0x1ef1,
+ 0x10d2: 0x1ef6, 0x10d3: 0x1f0a, 0x10d4: 0x1f0f, 0x10d5: 0x1f1e, 0x10d6: 0x1f23, 0x10d7: 0x1e74,
+ 0x10d8: 0x1e79, 0x10d9: 0x1e9c, 0x10da: 0x1ea1, 0x10db: 0x1e33, 0x10dc: 0x1e38, 0x10dd: 0x1e1f,
+ 0x10de: 0x1e24, 0x10df: 0x1e4c, 0x10e0: 0x1e51, 0x10e1: 0x1eba, 0x10e2: 0x1ebf, 0x10e3: 0x1edd,
+ 0x10e4: 0x1ee2, 0x10e5: 0x1e7e, 0x10e6: 0x1e83, 0x10e7: 0x1e88, 0x10e8: 0x1e92, 0x10e9: 0x1e8d,
+ 0x10ea: 0x1e65, 0x10eb: 0x1eb0, 0x10ec: 0x1ed3, 0x10ed: 0x1e7e, 0x10ee: 0x1e83, 0x10ef: 0x1e88,
+ 0x10f0: 0x1e92, 0x10f1: 0x1e6f, 0x10f2: 0x1e97, 0x10f3: 0x1eec, 0x10f4: 0x1e56, 0x10f5: 0x1e5b,
+ 0x10f6: 0x1e60, 0x10f7: 0x1e7e, 0x10f8: 0x1e83, 0x10f9: 0x1e88, 0x10fa: 0x1eec, 0x10fb: 0x1efb,
+ 0x10fc: 0x4408, 0x10fd: 0x4408,
+ // Block 0x44, offset 0x1100
+ 0x1110: 0x2311, 0x1111: 0x2326,
+ 0x1112: 0x2326, 0x1113: 0x232d, 0x1114: 0x2334, 0x1115: 0x2349, 0x1116: 0x2350, 0x1117: 0x2357,
+ 0x1118: 0x237a, 0x1119: 0x237a, 0x111a: 0x239d, 0x111b: 0x2396, 0x111c: 0x23b2, 0x111d: 0x23a4,
+ 0x111e: 0x23ab, 0x111f: 0x23ce, 0x1120: 0x23ce, 0x1121: 0x23c7, 0x1122: 0x23d5, 0x1123: 0x23d5,
+ 0x1124: 0x23ff, 0x1125: 0x23ff, 0x1126: 0x241b, 0x1127: 0x23e3, 0x1128: 0x23e3, 0x1129: 0x23dc,
+ 0x112a: 0x23f1, 0x112b: 0x23f1, 0x112c: 0x23f8, 0x112d: 0x23f8, 0x112e: 0x2422, 0x112f: 0x2430,
+ 0x1130: 0x2430, 0x1131: 0x2437, 0x1132: 0x2437, 0x1133: 0x243e, 0x1134: 0x2445, 0x1135: 0x244c,
+ 0x1136: 0x2453, 0x1137: 0x2453, 0x1138: 0x245a, 0x1139: 0x2468, 0x113a: 0x2476, 0x113b: 0x246f,
+ 0x113c: 0x247d, 0x113d: 0x247d, 0x113e: 0x2492, 0x113f: 0x2499,
+ // Block 0x45, offset 0x1140
+ 0x1140: 0x24ca, 0x1141: 0x24d8, 0x1142: 0x24d1, 0x1143: 0x24b5, 0x1144: 0x24b5, 0x1145: 0x24df,
+ 0x1146: 0x24df, 0x1147: 0x24e6, 0x1148: 0x24e6, 0x1149: 0x2510, 0x114a: 0x2517, 0x114b: 0x251e,
+ 0x114c: 0x24f4, 0x114d: 0x2502, 0x114e: 0x2525, 0x114f: 0x252c,
+ 0x1152: 0x24fb, 0x1153: 0x2580, 0x1154: 0x2587, 0x1155: 0x255d, 0x1156: 0x2564, 0x1157: 0x2548,
+ 0x1158: 0x2548, 0x1159: 0x254f, 0x115a: 0x2579, 0x115b: 0x2572, 0x115c: 0x259c, 0x115d: 0x259c,
+ 0x115e: 0x230a, 0x115f: 0x231f, 0x1160: 0x2318, 0x1161: 0x2342, 0x1162: 0x233b, 0x1163: 0x2365,
+ 0x1164: 0x235e, 0x1165: 0x2388, 0x1166: 0x236c, 0x1167: 0x2381, 0x1168: 0x23b9, 0x1169: 0x2406,
+ 0x116a: 0x23ea, 0x116b: 0x2429, 0x116c: 0x24c3, 0x116d: 0x24ed, 0x116e: 0x2595, 0x116f: 0x258e,
+ 0x1170: 0x25a3, 0x1171: 0x253a, 0x1172: 0x24a0, 0x1173: 0x256b, 0x1174: 0x2492, 0x1175: 0x24ca,
+ 0x1176: 0x2461, 0x1177: 0x24ae, 0x1178: 0x2541, 0x1179: 0x2533, 0x117a: 0x24bc, 0x117b: 0x24a7,
+ 0x117c: 0x24bc, 0x117d: 0x2541, 0x117e: 0x2373, 0x117f: 0x238f,
+ // Block 0x46, offset 0x1180
+ 0x1180: 0x2509, 0x1181: 0x2484, 0x1182: 0x2303, 0x1183: 0x24a7, 0x1184: 0x244c, 0x1185: 0x241b,
+ 0x1186: 0x23c0, 0x1187: 0x2556,
+ 0x11b0: 0x2414, 0x11b1: 0x248b, 0x11b2: 0x27bf, 0x11b3: 0x27b6, 0x11b4: 0x27ec, 0x11b5: 0x27da,
+ 0x11b6: 0x27c8, 0x11b7: 0x27e3, 0x11b8: 0x27f5, 0x11b9: 0x240d, 0x11ba: 0x2c7c, 0x11bb: 0x2afc,
+ 0x11bc: 0x27d1,
+ // Block 0x47, offset 0x11c0
+ 0x11d0: 0x0019, 0x11d1: 0x0483,
+ 0x11d2: 0x0487, 0x11d3: 0x0035, 0x11d4: 0x0037, 0x11d5: 0x0003, 0x11d6: 0x003f, 0x11d7: 0x04bf,
+ 0x11d8: 0x04c3, 0x11d9: 0x1b5c,
+ 0x11e0: 0x8132, 0x11e1: 0x8132, 0x11e2: 0x8132, 0x11e3: 0x8132,
+ 0x11e4: 0x8132, 0x11e5: 0x8132, 0x11e6: 0x8132, 0x11e7: 0x812d, 0x11e8: 0x812d, 0x11e9: 0x812d,
+ 0x11ea: 0x812d, 0x11eb: 0x812d, 0x11ec: 0x812d, 0x11ed: 0x812d, 0x11ee: 0x8132, 0x11ef: 0x8132,
+ 0x11f0: 0x1873, 0x11f1: 0x0443, 0x11f2: 0x043f, 0x11f3: 0x007f, 0x11f4: 0x007f, 0x11f5: 0x0011,
+ 0x11f6: 0x0013, 0x11f7: 0x00b7, 0x11f8: 0x00bb, 0x11f9: 0x04b7, 0x11fa: 0x04bb, 0x11fb: 0x04ab,
+ 0x11fc: 0x04af, 0x11fd: 0x0493, 0x11fe: 0x0497, 0x11ff: 0x048b,
+ // Block 0x48, offset 0x1200
+ 0x1200: 0x048f, 0x1201: 0x049b, 0x1202: 0x049f, 0x1203: 0x04a3, 0x1204: 0x04a7,
+ 0x1207: 0x0077, 0x1208: 0x007b, 0x1209: 0x4269, 0x120a: 0x4269, 0x120b: 0x4269,
+ 0x120c: 0x4269, 0x120d: 0x007f, 0x120e: 0x007f, 0x120f: 0x007f, 0x1210: 0x0019, 0x1211: 0x0483,
+ 0x1212: 0x001d, 0x1214: 0x0037, 0x1215: 0x0035, 0x1216: 0x003f, 0x1217: 0x0003,
+ 0x1218: 0x0443, 0x1219: 0x0011, 0x121a: 0x0013, 0x121b: 0x00b7, 0x121c: 0x00bb, 0x121d: 0x04b7,
+ 0x121e: 0x04bb, 0x121f: 0x0007, 0x1220: 0x000d, 0x1221: 0x0015, 0x1222: 0x0017, 0x1223: 0x001b,
+ 0x1224: 0x0039, 0x1225: 0x003d, 0x1226: 0x003b, 0x1228: 0x0079, 0x1229: 0x0009,
+ 0x122a: 0x000b, 0x122b: 0x0041,
+ 0x1230: 0x42aa, 0x1231: 0x442c, 0x1232: 0x42af, 0x1234: 0x42b4,
+ 0x1236: 0x42b9, 0x1237: 0x4432, 0x1238: 0x42be, 0x1239: 0x4438, 0x123a: 0x42c3, 0x123b: 0x443e,
+ 0x123c: 0x42c8, 0x123d: 0x4444, 0x123e: 0x42cd, 0x123f: 0x444a,
+ // Block 0x49, offset 0x1240
+ 0x1240: 0x0236, 0x1241: 0x440e, 0x1242: 0x440e, 0x1243: 0x4414, 0x1244: 0x4414, 0x1245: 0x4456,
+ 0x1246: 0x4456, 0x1247: 0x441a, 0x1248: 0x441a, 0x1249: 0x4462, 0x124a: 0x4462, 0x124b: 0x4462,
+ 0x124c: 0x4462, 0x124d: 0x0239, 0x124e: 0x0239, 0x124f: 0x023c, 0x1250: 0x023c, 0x1251: 0x023c,
+ 0x1252: 0x023c, 0x1253: 0x023f, 0x1254: 0x023f, 0x1255: 0x0242, 0x1256: 0x0242, 0x1257: 0x0242,
+ 0x1258: 0x0242, 0x1259: 0x0245, 0x125a: 0x0245, 0x125b: 0x0245, 0x125c: 0x0245, 0x125d: 0x0248,
+ 0x125e: 0x0248, 0x125f: 0x0248, 0x1260: 0x0248, 0x1261: 0x024b, 0x1262: 0x024b, 0x1263: 0x024b,
+ 0x1264: 0x024b, 0x1265: 0x024e, 0x1266: 0x024e, 0x1267: 0x024e, 0x1268: 0x024e, 0x1269: 0x0251,
+ 0x126a: 0x0251, 0x126b: 0x0254, 0x126c: 0x0254, 0x126d: 0x0257, 0x126e: 0x0257, 0x126f: 0x025a,
+ 0x1270: 0x025a, 0x1271: 0x025d, 0x1272: 0x025d, 0x1273: 0x025d, 0x1274: 0x025d, 0x1275: 0x0260,
+ 0x1276: 0x0260, 0x1277: 0x0260, 0x1278: 0x0260, 0x1279: 0x0263, 0x127a: 0x0263, 0x127b: 0x0263,
+ 0x127c: 0x0263, 0x127d: 0x0266, 0x127e: 0x0266, 0x127f: 0x0266,
+ // Block 0x4a, offset 0x1280
+ 0x1280: 0x0266, 0x1281: 0x0269, 0x1282: 0x0269, 0x1283: 0x0269, 0x1284: 0x0269, 0x1285: 0x026c,
+ 0x1286: 0x026c, 0x1287: 0x026c, 0x1288: 0x026c, 0x1289: 0x026f, 0x128a: 0x026f, 0x128b: 0x026f,
+ 0x128c: 0x026f, 0x128d: 0x0272, 0x128e: 0x0272, 0x128f: 0x0272, 0x1290: 0x0272, 0x1291: 0x0275,
+ 0x1292: 0x0275, 0x1293: 0x0275, 0x1294: 0x0275, 0x1295: 0x0278, 0x1296: 0x0278, 0x1297: 0x0278,
+ 0x1298: 0x0278, 0x1299: 0x027b, 0x129a: 0x027b, 0x129b: 0x027b, 0x129c: 0x027b, 0x129d: 0x027e,
+ 0x129e: 0x027e, 0x129f: 0x027e, 0x12a0: 0x027e, 0x12a1: 0x0281, 0x12a2: 0x0281, 0x12a3: 0x0281,
+ 0x12a4: 0x0281, 0x12a5: 0x0284, 0x12a6: 0x0284, 0x12a7: 0x0284, 0x12a8: 0x0284, 0x12a9: 0x0287,
+ 0x12aa: 0x0287, 0x12ab: 0x0287, 0x12ac: 0x0287, 0x12ad: 0x028a, 0x12ae: 0x028a, 0x12af: 0x028d,
+ 0x12b0: 0x028d, 0x12b1: 0x0290, 0x12b2: 0x0290, 0x12b3: 0x0290, 0x12b4: 0x0290, 0x12b5: 0x2e00,
+ 0x12b6: 0x2e00, 0x12b7: 0x2e08, 0x12b8: 0x2e08, 0x12b9: 0x2e10, 0x12ba: 0x2e10, 0x12bb: 0x1f82,
+ 0x12bc: 0x1f82,
+ // Block 0x4b, offset 0x12c0
+ 0x12c0: 0x0081, 0x12c1: 0x0083, 0x12c2: 0x0085, 0x12c3: 0x0087, 0x12c4: 0x0089, 0x12c5: 0x008b,
+ 0x12c6: 0x008d, 0x12c7: 0x008f, 0x12c8: 0x0091, 0x12c9: 0x0093, 0x12ca: 0x0095, 0x12cb: 0x0097,
+ 0x12cc: 0x0099, 0x12cd: 0x009b, 0x12ce: 0x009d, 0x12cf: 0x009f, 0x12d0: 0x00a1, 0x12d1: 0x00a3,
+ 0x12d2: 0x00a5, 0x12d3: 0x00a7, 0x12d4: 0x00a9, 0x12d5: 0x00ab, 0x12d6: 0x00ad, 0x12d7: 0x00af,
+ 0x12d8: 0x00b1, 0x12d9: 0x00b3, 0x12da: 0x00b5, 0x12db: 0x00b7, 0x12dc: 0x00b9, 0x12dd: 0x00bb,
+ 0x12de: 0x00bd, 0x12df: 0x0477, 0x12e0: 0x047b, 0x12e1: 0x0487, 0x12e2: 0x049b, 0x12e3: 0x049f,
+ 0x12e4: 0x0483, 0x12e5: 0x05ab, 0x12e6: 0x05a3, 0x12e7: 0x04c7, 0x12e8: 0x04cf, 0x12e9: 0x04d7,
+ 0x12ea: 0x04df, 0x12eb: 0x04e7, 0x12ec: 0x056b, 0x12ed: 0x0573, 0x12ee: 0x057b, 0x12ef: 0x051f,
+ 0x12f0: 0x05af, 0x12f1: 0x04cb, 0x12f2: 0x04d3, 0x12f3: 0x04db, 0x12f4: 0x04e3, 0x12f5: 0x04eb,
+ 0x12f6: 0x04ef, 0x12f7: 0x04f3, 0x12f8: 0x04f7, 0x12f9: 0x04fb, 0x12fa: 0x04ff, 0x12fb: 0x0503,
+ 0x12fc: 0x0507, 0x12fd: 0x050b, 0x12fe: 0x050f, 0x12ff: 0x0513,
+ // Block 0x4c, offset 0x1300
+ 0x1300: 0x0517, 0x1301: 0x051b, 0x1302: 0x0523, 0x1303: 0x0527, 0x1304: 0x052b, 0x1305: 0x052f,
+ 0x1306: 0x0533, 0x1307: 0x0537, 0x1308: 0x053b, 0x1309: 0x053f, 0x130a: 0x0543, 0x130b: 0x0547,
+ 0x130c: 0x054b, 0x130d: 0x054f, 0x130e: 0x0553, 0x130f: 0x0557, 0x1310: 0x055b, 0x1311: 0x055f,
+ 0x1312: 0x0563, 0x1313: 0x0567, 0x1314: 0x056f, 0x1315: 0x0577, 0x1316: 0x057f, 0x1317: 0x0583,
+ 0x1318: 0x0587, 0x1319: 0x058b, 0x131a: 0x058f, 0x131b: 0x0593, 0x131c: 0x0597, 0x131d: 0x05a7,
+ 0x131e: 0x4a78, 0x131f: 0x4a7e, 0x1320: 0x03c3, 0x1321: 0x0313, 0x1322: 0x0317, 0x1323: 0x4a3b,
+ 0x1324: 0x031b, 0x1325: 0x4a41, 0x1326: 0x4a47, 0x1327: 0x031f, 0x1328: 0x0323, 0x1329: 0x0327,
+ 0x132a: 0x4a4d, 0x132b: 0x4a53, 0x132c: 0x4a59, 0x132d: 0x4a5f, 0x132e: 0x4a65, 0x132f: 0x4a6b,
+ 0x1330: 0x0367, 0x1331: 0x032b, 0x1332: 0x032f, 0x1333: 0x0333, 0x1334: 0x037b, 0x1335: 0x0337,
+ 0x1336: 0x033b, 0x1337: 0x033f, 0x1338: 0x0343, 0x1339: 0x0347, 0x133a: 0x034b, 0x133b: 0x034f,
+ 0x133c: 0x0353, 0x133d: 0x0357, 0x133e: 0x035b,
+ // Block 0x4d, offset 0x1340
+ 0x1342: 0x49bd, 0x1343: 0x49c3, 0x1344: 0x49c9, 0x1345: 0x49cf,
+ 0x1346: 0x49d5, 0x1347: 0x49db, 0x134a: 0x49e1, 0x134b: 0x49e7,
+ 0x134c: 0x49ed, 0x134d: 0x49f3, 0x134e: 0x49f9, 0x134f: 0x49ff,
+ 0x1352: 0x4a05, 0x1353: 0x4a0b, 0x1354: 0x4a11, 0x1355: 0x4a17, 0x1356: 0x4a1d, 0x1357: 0x4a23,
+ 0x135a: 0x4a29, 0x135b: 0x4a2f, 0x135c: 0x4a35,
+ 0x1360: 0x00bf, 0x1361: 0x00c2, 0x1362: 0x00cb, 0x1363: 0x4264,
+ 0x1364: 0x00c8, 0x1365: 0x00c5, 0x1366: 0x0447, 0x1368: 0x046b, 0x1369: 0x044b,
+ 0x136a: 0x044f, 0x136b: 0x0453, 0x136c: 0x0457, 0x136d: 0x046f, 0x136e: 0x0473,
+ // Block 0x4e, offset 0x1380
+ 0x1380: 0x0063, 0x1381: 0x0065, 0x1382: 0x0067, 0x1383: 0x0069, 0x1384: 0x006b, 0x1385: 0x006d,
+ 0x1386: 0x006f, 0x1387: 0x0071, 0x1388: 0x0073, 0x1389: 0x0075, 0x138a: 0x0083, 0x138b: 0x0085,
+ 0x138c: 0x0087, 0x138d: 0x0089, 0x138e: 0x008b, 0x138f: 0x008d, 0x1390: 0x008f, 0x1391: 0x0091,
+ 0x1392: 0x0093, 0x1393: 0x0095, 0x1394: 0x0097, 0x1395: 0x0099, 0x1396: 0x009b, 0x1397: 0x009d,
+ 0x1398: 0x009f, 0x1399: 0x00a1, 0x139a: 0x00a3, 0x139b: 0x00a5, 0x139c: 0x00a7, 0x139d: 0x00a9,
+ 0x139e: 0x00ab, 0x139f: 0x00ad, 0x13a0: 0x00af, 0x13a1: 0x00b1, 0x13a2: 0x00b3, 0x13a3: 0x00b5,
+ 0x13a4: 0x00dd, 0x13a5: 0x00f2, 0x13a8: 0x0173, 0x13a9: 0x0176,
+ 0x13aa: 0x0179, 0x13ab: 0x017c, 0x13ac: 0x017f, 0x13ad: 0x0182, 0x13ae: 0x0185, 0x13af: 0x0188,
+ 0x13b0: 0x018b, 0x13b1: 0x018e, 0x13b2: 0x0191, 0x13b3: 0x0194, 0x13b4: 0x0197, 0x13b5: 0x019a,
+ 0x13b6: 0x019d, 0x13b7: 0x01a0, 0x13b8: 0x01a3, 0x13b9: 0x0188, 0x13ba: 0x01a6, 0x13bb: 0x01a9,
+ 0x13bc: 0x01ac, 0x13bd: 0x01af, 0x13be: 0x01b2, 0x13bf: 0x01b5,
+ // Block 0x4f, offset 0x13c0
+ 0x13c0: 0x01fd, 0x13c1: 0x0200, 0x13c2: 0x0203, 0x13c3: 0x045b, 0x13c4: 0x01c7, 0x13c5: 0x01d0,
+ 0x13c6: 0x01d6, 0x13c7: 0x01fa, 0x13c8: 0x01eb, 0x13c9: 0x01e8, 0x13ca: 0x0206, 0x13cb: 0x0209,
+ 0x13ce: 0x0021, 0x13cf: 0x0023, 0x13d0: 0x0025, 0x13d1: 0x0027,
+ 0x13d2: 0x0029, 0x13d3: 0x002b, 0x13d4: 0x002d, 0x13d5: 0x002f, 0x13d6: 0x0031, 0x13d7: 0x0033,
+ 0x13d8: 0x0021, 0x13d9: 0x0023, 0x13da: 0x0025, 0x13db: 0x0027, 0x13dc: 0x0029, 0x13dd: 0x002b,
+ 0x13de: 0x002d, 0x13df: 0x002f, 0x13e0: 0x0031, 0x13e1: 0x0033, 0x13e2: 0x0021, 0x13e3: 0x0023,
+ 0x13e4: 0x0025, 0x13e5: 0x0027, 0x13e6: 0x0029, 0x13e7: 0x002b, 0x13e8: 0x002d, 0x13e9: 0x002f,
+ 0x13ea: 0x0031, 0x13eb: 0x0033, 0x13ec: 0x0021, 0x13ed: 0x0023, 0x13ee: 0x0025, 0x13ef: 0x0027,
+ 0x13f0: 0x0029, 0x13f1: 0x002b, 0x13f2: 0x002d, 0x13f3: 0x002f, 0x13f4: 0x0031, 0x13f5: 0x0033,
+ 0x13f6: 0x0021, 0x13f7: 0x0023, 0x13f8: 0x0025, 0x13f9: 0x0027, 0x13fa: 0x0029, 0x13fb: 0x002b,
+ 0x13fc: 0x002d, 0x13fd: 0x002f, 0x13fe: 0x0031, 0x13ff: 0x0033,
+ // Block 0x50, offset 0x1400
+ 0x1400: 0x0239, 0x1401: 0x023c, 0x1402: 0x0248, 0x1403: 0x0251, 0x1405: 0x028a,
+ 0x1406: 0x025a, 0x1407: 0x024b, 0x1408: 0x0269, 0x1409: 0x0290, 0x140a: 0x027b, 0x140b: 0x027e,
+ 0x140c: 0x0281, 0x140d: 0x0284, 0x140e: 0x025d, 0x140f: 0x026f, 0x1410: 0x0275, 0x1411: 0x0263,
+ 0x1412: 0x0278, 0x1413: 0x0257, 0x1414: 0x0260, 0x1415: 0x0242, 0x1416: 0x0245, 0x1417: 0x024e,
+ 0x1418: 0x0254, 0x1419: 0x0266, 0x141a: 0x026c, 0x141b: 0x0272, 0x141c: 0x0293, 0x141d: 0x02e4,
+ 0x141e: 0x02cc, 0x141f: 0x0296, 0x1421: 0x023c, 0x1422: 0x0248,
+ 0x1424: 0x0287, 0x1427: 0x024b, 0x1429: 0x0290,
+ 0x142a: 0x027b, 0x142b: 0x027e, 0x142c: 0x0281, 0x142d: 0x0284, 0x142e: 0x025d, 0x142f: 0x026f,
+ 0x1430: 0x0275, 0x1431: 0x0263, 0x1432: 0x0278, 0x1434: 0x0260, 0x1435: 0x0242,
+ 0x1436: 0x0245, 0x1437: 0x024e, 0x1439: 0x0266, 0x143b: 0x0272,
+ // Block 0x51, offset 0x1440
+ 0x1442: 0x0248,
+ 0x1447: 0x024b, 0x1449: 0x0290, 0x144b: 0x027e,
+ 0x144d: 0x0284, 0x144e: 0x025d, 0x144f: 0x026f, 0x1451: 0x0263,
+ 0x1452: 0x0278, 0x1454: 0x0260, 0x1457: 0x024e,
+ 0x1459: 0x0266, 0x145b: 0x0272, 0x145d: 0x02e4,
+ 0x145f: 0x0296, 0x1461: 0x023c, 0x1462: 0x0248,
+ 0x1464: 0x0287, 0x1467: 0x024b, 0x1468: 0x0269, 0x1469: 0x0290,
+ 0x146a: 0x027b, 0x146c: 0x0281, 0x146d: 0x0284, 0x146e: 0x025d, 0x146f: 0x026f,
+ 0x1470: 0x0275, 0x1471: 0x0263, 0x1472: 0x0278, 0x1474: 0x0260, 0x1475: 0x0242,
+ 0x1476: 0x0245, 0x1477: 0x024e, 0x1479: 0x0266, 0x147a: 0x026c, 0x147b: 0x0272,
+ 0x147c: 0x0293, 0x147e: 0x02cc,
+ // Block 0x52, offset 0x1480
+ 0x1480: 0x0239, 0x1481: 0x023c, 0x1482: 0x0248, 0x1483: 0x0251, 0x1484: 0x0287, 0x1485: 0x028a,
+ 0x1486: 0x025a, 0x1487: 0x024b, 0x1488: 0x0269, 0x1489: 0x0290, 0x148b: 0x027e,
+ 0x148c: 0x0281, 0x148d: 0x0284, 0x148e: 0x025d, 0x148f: 0x026f, 0x1490: 0x0275, 0x1491: 0x0263,
+ 0x1492: 0x0278, 0x1493: 0x0257, 0x1494: 0x0260, 0x1495: 0x0242, 0x1496: 0x0245, 0x1497: 0x024e,
+ 0x1498: 0x0254, 0x1499: 0x0266, 0x149a: 0x026c, 0x149b: 0x0272,
+ 0x14a1: 0x023c, 0x14a2: 0x0248, 0x14a3: 0x0251,
+ 0x14a5: 0x028a, 0x14a6: 0x025a, 0x14a7: 0x024b, 0x14a8: 0x0269, 0x14a9: 0x0290,
+ 0x14ab: 0x027e, 0x14ac: 0x0281, 0x14ad: 0x0284, 0x14ae: 0x025d, 0x14af: 0x026f,
+ 0x14b0: 0x0275, 0x14b1: 0x0263, 0x14b2: 0x0278, 0x14b3: 0x0257, 0x14b4: 0x0260, 0x14b5: 0x0242,
+ 0x14b6: 0x0245, 0x14b7: 0x024e, 0x14b8: 0x0254, 0x14b9: 0x0266, 0x14ba: 0x026c, 0x14bb: 0x0272,
+ // Block 0x53, offset 0x14c0
+ 0x14c0: 0x1879, 0x14c1: 0x1876, 0x14c2: 0x187c, 0x14c3: 0x18a0, 0x14c4: 0x18c4, 0x14c5: 0x18e8,
+ 0x14c6: 0x190c, 0x14c7: 0x1915, 0x14c8: 0x191b, 0x14c9: 0x1921, 0x14ca: 0x1927,
+ 0x14d0: 0x1a8c, 0x14d1: 0x1a90,
+ 0x14d2: 0x1a94, 0x14d3: 0x1a98, 0x14d4: 0x1a9c, 0x14d5: 0x1aa0, 0x14d6: 0x1aa4, 0x14d7: 0x1aa8,
+ 0x14d8: 0x1aac, 0x14d9: 0x1ab0, 0x14da: 0x1ab4, 0x14db: 0x1ab8, 0x14dc: 0x1abc, 0x14dd: 0x1ac0,
+ 0x14de: 0x1ac4, 0x14df: 0x1ac8, 0x14e0: 0x1acc, 0x14e1: 0x1ad0, 0x14e2: 0x1ad4, 0x14e3: 0x1ad8,
+ 0x14e4: 0x1adc, 0x14e5: 0x1ae0, 0x14e6: 0x1ae4, 0x14e7: 0x1ae8, 0x14e8: 0x1aec, 0x14e9: 0x1af0,
+ 0x14ea: 0x271e, 0x14eb: 0x0047, 0x14ec: 0x0065, 0x14ed: 0x193c, 0x14ee: 0x19b1,
+ 0x14f0: 0x0043, 0x14f1: 0x0045, 0x14f2: 0x0047, 0x14f3: 0x0049, 0x14f4: 0x004b, 0x14f5: 0x004d,
+ 0x14f6: 0x004f, 0x14f7: 0x0051, 0x14f8: 0x0053, 0x14f9: 0x0055, 0x14fa: 0x0057, 0x14fb: 0x0059,
+ 0x14fc: 0x005b, 0x14fd: 0x005d, 0x14fe: 0x005f, 0x14ff: 0x0061,
+ // Block 0x54, offset 0x1500
+ 0x1500: 0x26ad, 0x1501: 0x26c2, 0x1502: 0x0503,
+ 0x1510: 0x0c0f, 0x1511: 0x0a47,
+ 0x1512: 0x08d3, 0x1513: 0x45c4, 0x1514: 0x071b, 0x1515: 0x09ef, 0x1516: 0x132f, 0x1517: 0x09ff,
+ 0x1518: 0x0727, 0x1519: 0x0cd7, 0x151a: 0x0eaf, 0x151b: 0x0caf, 0x151c: 0x0827, 0x151d: 0x0b6b,
+ 0x151e: 0x07bf, 0x151f: 0x0cb7, 0x1520: 0x0813, 0x1521: 0x1117, 0x1522: 0x0f83, 0x1523: 0x138b,
+ 0x1524: 0x09d3, 0x1525: 0x090b, 0x1526: 0x0e63, 0x1527: 0x0c1b, 0x1528: 0x0c47, 0x1529: 0x06bf,
+ 0x152a: 0x06cb, 0x152b: 0x140b, 0x152c: 0x0adb, 0x152d: 0x06e7, 0x152e: 0x08ef, 0x152f: 0x0c3b,
+ 0x1530: 0x13b3, 0x1531: 0x0c13, 0x1532: 0x106f, 0x1533: 0x10ab, 0x1534: 0x08f7, 0x1535: 0x0e43,
+ 0x1536: 0x0d0b, 0x1537: 0x0d07, 0x1538: 0x0f97, 0x1539: 0x082b, 0x153a: 0x0957, 0x153b: 0x1443,
+ // Block 0x55, offset 0x1540
+ 0x1540: 0x06fb, 0x1541: 0x06f3, 0x1542: 0x0703, 0x1543: 0x1647, 0x1544: 0x0747, 0x1545: 0x0757,
+ 0x1546: 0x075b, 0x1547: 0x0763, 0x1548: 0x076b, 0x1549: 0x076f, 0x154a: 0x077b, 0x154b: 0x0773,
+ 0x154c: 0x05b3, 0x154d: 0x165b, 0x154e: 0x078f, 0x154f: 0x0793, 0x1550: 0x0797, 0x1551: 0x07b3,
+ 0x1552: 0x164c, 0x1553: 0x05b7, 0x1554: 0x079f, 0x1555: 0x07bf, 0x1556: 0x1656, 0x1557: 0x07cf,
+ 0x1558: 0x07d7, 0x1559: 0x0737, 0x155a: 0x07df, 0x155b: 0x07e3, 0x155c: 0x1831, 0x155d: 0x07ff,
+ 0x155e: 0x0807, 0x155f: 0x05bf, 0x1560: 0x081f, 0x1561: 0x0823, 0x1562: 0x082b, 0x1563: 0x082f,
+ 0x1564: 0x05c3, 0x1565: 0x0847, 0x1566: 0x084b, 0x1567: 0x0857, 0x1568: 0x0863, 0x1569: 0x0867,
+ 0x156a: 0x086b, 0x156b: 0x0873, 0x156c: 0x0893, 0x156d: 0x0897, 0x156e: 0x089f, 0x156f: 0x08af,
+ 0x1570: 0x08b7, 0x1571: 0x08bb, 0x1572: 0x08bb, 0x1573: 0x08bb, 0x1574: 0x166a, 0x1575: 0x0e93,
+ 0x1576: 0x08cf, 0x1577: 0x08d7, 0x1578: 0x166f, 0x1579: 0x08e3, 0x157a: 0x08eb, 0x157b: 0x08f3,
+ 0x157c: 0x091b, 0x157d: 0x0907, 0x157e: 0x0913, 0x157f: 0x0917,
+ // Block 0x56, offset 0x1580
+ 0x1580: 0x091f, 0x1581: 0x0927, 0x1582: 0x092b, 0x1583: 0x0933, 0x1584: 0x093b, 0x1585: 0x093f,
+ 0x1586: 0x093f, 0x1587: 0x0947, 0x1588: 0x094f, 0x1589: 0x0953, 0x158a: 0x095f, 0x158b: 0x0983,
+ 0x158c: 0x0967, 0x158d: 0x0987, 0x158e: 0x096b, 0x158f: 0x0973, 0x1590: 0x080b, 0x1591: 0x09cf,
+ 0x1592: 0x0997, 0x1593: 0x099b, 0x1594: 0x099f, 0x1595: 0x0993, 0x1596: 0x09a7, 0x1597: 0x09a3,
+ 0x1598: 0x09bb, 0x1599: 0x1674, 0x159a: 0x09d7, 0x159b: 0x09db, 0x159c: 0x09e3, 0x159d: 0x09ef,
+ 0x159e: 0x09f7, 0x159f: 0x0a13, 0x15a0: 0x1679, 0x15a1: 0x167e, 0x15a2: 0x0a1f, 0x15a3: 0x0a23,
+ 0x15a4: 0x0a27, 0x15a5: 0x0a1b, 0x15a6: 0x0a2f, 0x15a7: 0x05c7, 0x15a8: 0x05cb, 0x15a9: 0x0a37,
+ 0x15aa: 0x0a3f, 0x15ab: 0x0a3f, 0x15ac: 0x1683, 0x15ad: 0x0a5b, 0x15ae: 0x0a5f, 0x15af: 0x0a63,
+ 0x15b0: 0x0a6b, 0x15b1: 0x1688, 0x15b2: 0x0a73, 0x15b3: 0x0a77, 0x15b4: 0x0b4f, 0x15b5: 0x0a7f,
+ 0x15b6: 0x05cf, 0x15b7: 0x0a8b, 0x15b8: 0x0a9b, 0x15b9: 0x0aa7, 0x15ba: 0x0aa3, 0x15bb: 0x1692,
+ 0x15bc: 0x0aaf, 0x15bd: 0x1697, 0x15be: 0x0abb, 0x15bf: 0x0ab7,
+ // Block 0x57, offset 0x15c0
+ 0x15c0: 0x0abf, 0x15c1: 0x0acf, 0x15c2: 0x0ad3, 0x15c3: 0x05d3, 0x15c4: 0x0ae3, 0x15c5: 0x0aeb,
+ 0x15c6: 0x0aef, 0x15c7: 0x0af3, 0x15c8: 0x05d7, 0x15c9: 0x169c, 0x15ca: 0x05db, 0x15cb: 0x0b0f,
+ 0x15cc: 0x0b13, 0x15cd: 0x0b17, 0x15ce: 0x0b1f, 0x15cf: 0x1863, 0x15d0: 0x0b37, 0x15d1: 0x16a6,
+ 0x15d2: 0x16a6, 0x15d3: 0x11d7, 0x15d4: 0x0b47, 0x15d5: 0x0b47, 0x15d6: 0x05df, 0x15d7: 0x16c9,
+ 0x15d8: 0x179b, 0x15d9: 0x0b57, 0x15da: 0x0b5f, 0x15db: 0x05e3, 0x15dc: 0x0b73, 0x15dd: 0x0b83,
+ 0x15de: 0x0b87, 0x15df: 0x0b8f, 0x15e0: 0x0b9f, 0x15e1: 0x05eb, 0x15e2: 0x05e7, 0x15e3: 0x0ba3,
+ 0x15e4: 0x16ab, 0x15e5: 0x0ba7, 0x15e6: 0x0bbb, 0x15e7: 0x0bbf, 0x15e8: 0x0bc3, 0x15e9: 0x0bbf,
+ 0x15ea: 0x0bcf, 0x15eb: 0x0bd3, 0x15ec: 0x0be3, 0x15ed: 0x0bdb, 0x15ee: 0x0bdf, 0x15ef: 0x0be7,
+ 0x15f0: 0x0beb, 0x15f1: 0x0bef, 0x15f2: 0x0bfb, 0x15f3: 0x0bff, 0x15f4: 0x0c17, 0x15f5: 0x0c1f,
+ 0x15f6: 0x0c2f, 0x15f7: 0x0c43, 0x15f8: 0x16ba, 0x15f9: 0x0c3f, 0x15fa: 0x0c33, 0x15fb: 0x0c4b,
+ 0x15fc: 0x0c53, 0x15fd: 0x0c67, 0x15fe: 0x16bf, 0x15ff: 0x0c6f,
+ // Block 0x58, offset 0x1600
+ 0x1600: 0x0c63, 0x1601: 0x0c5b, 0x1602: 0x05ef, 0x1603: 0x0c77, 0x1604: 0x0c7f, 0x1605: 0x0c87,
+ 0x1606: 0x0c7b, 0x1607: 0x05f3, 0x1608: 0x0c97, 0x1609: 0x0c9f, 0x160a: 0x16c4, 0x160b: 0x0ccb,
+ 0x160c: 0x0cff, 0x160d: 0x0cdb, 0x160e: 0x05ff, 0x160f: 0x0ce7, 0x1610: 0x05fb, 0x1611: 0x05f7,
+ 0x1612: 0x07c3, 0x1613: 0x07c7, 0x1614: 0x0d03, 0x1615: 0x0ceb, 0x1616: 0x11ab, 0x1617: 0x0663,
+ 0x1618: 0x0d0f, 0x1619: 0x0d13, 0x161a: 0x0d17, 0x161b: 0x0d2b, 0x161c: 0x0d23, 0x161d: 0x16dd,
+ 0x161e: 0x0603, 0x161f: 0x0d3f, 0x1620: 0x0d33, 0x1621: 0x0d4f, 0x1622: 0x0d57, 0x1623: 0x16e7,
+ 0x1624: 0x0d5b, 0x1625: 0x0d47, 0x1626: 0x0d63, 0x1627: 0x0607, 0x1628: 0x0d67, 0x1629: 0x0d6b,
+ 0x162a: 0x0d6f, 0x162b: 0x0d7b, 0x162c: 0x16ec, 0x162d: 0x0d83, 0x162e: 0x060b, 0x162f: 0x0d8f,
+ 0x1630: 0x16f1, 0x1631: 0x0d93, 0x1632: 0x060f, 0x1633: 0x0d9f, 0x1634: 0x0dab, 0x1635: 0x0db7,
+ 0x1636: 0x0dbb, 0x1637: 0x16f6, 0x1638: 0x168d, 0x1639: 0x16fb, 0x163a: 0x0ddb, 0x163b: 0x1700,
+ 0x163c: 0x0de7, 0x163d: 0x0def, 0x163e: 0x0ddf, 0x163f: 0x0dfb,
+ // Block 0x59, offset 0x1640
+ 0x1640: 0x0e0b, 0x1641: 0x0e1b, 0x1642: 0x0e0f, 0x1643: 0x0e13, 0x1644: 0x0e1f, 0x1645: 0x0e23,
+ 0x1646: 0x1705, 0x1647: 0x0e07, 0x1648: 0x0e3b, 0x1649: 0x0e3f, 0x164a: 0x0613, 0x164b: 0x0e53,
+ 0x164c: 0x0e4f, 0x164d: 0x170a, 0x164e: 0x0e33, 0x164f: 0x0e6f, 0x1650: 0x170f, 0x1651: 0x1714,
+ 0x1652: 0x0e73, 0x1653: 0x0e87, 0x1654: 0x0e83, 0x1655: 0x0e7f, 0x1656: 0x0617, 0x1657: 0x0e8b,
+ 0x1658: 0x0e9b, 0x1659: 0x0e97, 0x165a: 0x0ea3, 0x165b: 0x1651, 0x165c: 0x0eb3, 0x165d: 0x1719,
+ 0x165e: 0x0ebf, 0x165f: 0x1723, 0x1660: 0x0ed3, 0x1661: 0x0edf, 0x1662: 0x0ef3, 0x1663: 0x1728,
+ 0x1664: 0x0f07, 0x1665: 0x0f0b, 0x1666: 0x172d, 0x1667: 0x1732, 0x1668: 0x0f27, 0x1669: 0x0f37,
+ 0x166a: 0x061b, 0x166b: 0x0f3b, 0x166c: 0x061f, 0x166d: 0x061f, 0x166e: 0x0f53, 0x166f: 0x0f57,
+ 0x1670: 0x0f5f, 0x1671: 0x0f63, 0x1672: 0x0f6f, 0x1673: 0x0623, 0x1674: 0x0f87, 0x1675: 0x1737,
+ 0x1676: 0x0fa3, 0x1677: 0x173c, 0x1678: 0x0faf, 0x1679: 0x16a1, 0x167a: 0x0fbf, 0x167b: 0x1741,
+ 0x167c: 0x1746, 0x167d: 0x174b, 0x167e: 0x0627, 0x167f: 0x062b,
+ // Block 0x5a, offset 0x1680
+ 0x1680: 0x0ff7, 0x1681: 0x1755, 0x1682: 0x1750, 0x1683: 0x175a, 0x1684: 0x175f, 0x1685: 0x0fff,
+ 0x1686: 0x1003, 0x1687: 0x1003, 0x1688: 0x100b, 0x1689: 0x0633, 0x168a: 0x100f, 0x168b: 0x0637,
+ 0x168c: 0x063b, 0x168d: 0x1769, 0x168e: 0x1023, 0x168f: 0x102b, 0x1690: 0x1037, 0x1691: 0x063f,
+ 0x1692: 0x176e, 0x1693: 0x105b, 0x1694: 0x1773, 0x1695: 0x1778, 0x1696: 0x107b, 0x1697: 0x1093,
+ 0x1698: 0x0643, 0x1699: 0x109b, 0x169a: 0x109f, 0x169b: 0x10a3, 0x169c: 0x177d, 0x169d: 0x1782,
+ 0x169e: 0x1782, 0x169f: 0x10bb, 0x16a0: 0x0647, 0x16a1: 0x1787, 0x16a2: 0x10cf, 0x16a3: 0x10d3,
+ 0x16a4: 0x064b, 0x16a5: 0x178c, 0x16a6: 0x10ef, 0x16a7: 0x064f, 0x16a8: 0x10ff, 0x16a9: 0x10f7,
+ 0x16aa: 0x1107, 0x16ab: 0x1796, 0x16ac: 0x111f, 0x16ad: 0x0653, 0x16ae: 0x112b, 0x16af: 0x1133,
+ 0x16b0: 0x1143, 0x16b1: 0x0657, 0x16b2: 0x17a0, 0x16b3: 0x17a5, 0x16b4: 0x065b, 0x16b5: 0x17aa,
+ 0x16b6: 0x115b, 0x16b7: 0x17af, 0x16b8: 0x1167, 0x16b9: 0x1173, 0x16ba: 0x117b, 0x16bb: 0x17b4,
+ 0x16bc: 0x17b9, 0x16bd: 0x118f, 0x16be: 0x17be, 0x16bf: 0x1197,
+ // Block 0x5b, offset 0x16c0
+ 0x16c0: 0x16ce, 0x16c1: 0x065f, 0x16c2: 0x11af, 0x16c3: 0x11b3, 0x16c4: 0x0667, 0x16c5: 0x11b7,
+ 0x16c6: 0x0a33, 0x16c7: 0x17c3, 0x16c8: 0x17c8, 0x16c9: 0x16d3, 0x16ca: 0x16d8, 0x16cb: 0x11d7,
+ 0x16cc: 0x11db, 0x16cd: 0x13f3, 0x16ce: 0x066b, 0x16cf: 0x1207, 0x16d0: 0x1203, 0x16d1: 0x120b,
+ 0x16d2: 0x083f, 0x16d3: 0x120f, 0x16d4: 0x1213, 0x16d5: 0x1217, 0x16d6: 0x121f, 0x16d7: 0x17cd,
+ 0x16d8: 0x121b, 0x16d9: 0x1223, 0x16da: 0x1237, 0x16db: 0x123b, 0x16dc: 0x1227, 0x16dd: 0x123f,
+ 0x16de: 0x1253, 0x16df: 0x1267, 0x16e0: 0x1233, 0x16e1: 0x1247, 0x16e2: 0x124b, 0x16e3: 0x124f,
+ 0x16e4: 0x17d2, 0x16e5: 0x17dc, 0x16e6: 0x17d7, 0x16e7: 0x066f, 0x16e8: 0x126f, 0x16e9: 0x1273,
+ 0x16ea: 0x127b, 0x16eb: 0x17f0, 0x16ec: 0x127f, 0x16ed: 0x17e1, 0x16ee: 0x0673, 0x16ef: 0x0677,
+ 0x16f0: 0x17e6, 0x16f1: 0x17eb, 0x16f2: 0x067b, 0x16f3: 0x129f, 0x16f4: 0x12a3, 0x16f5: 0x12a7,
+ 0x16f6: 0x12ab, 0x16f7: 0x12b7, 0x16f8: 0x12b3, 0x16f9: 0x12bf, 0x16fa: 0x12bb, 0x16fb: 0x12cb,
+ 0x16fc: 0x12c3, 0x16fd: 0x12c7, 0x16fe: 0x12cf, 0x16ff: 0x067f,
+ // Block 0x5c, offset 0x1700
+ 0x1700: 0x12d7, 0x1701: 0x12db, 0x1702: 0x0683, 0x1703: 0x12eb, 0x1704: 0x12ef, 0x1705: 0x17f5,
+ 0x1706: 0x12fb, 0x1707: 0x12ff, 0x1708: 0x0687, 0x1709: 0x130b, 0x170a: 0x05bb, 0x170b: 0x17fa,
+ 0x170c: 0x17ff, 0x170d: 0x068b, 0x170e: 0x068f, 0x170f: 0x1337, 0x1710: 0x134f, 0x1711: 0x136b,
+ 0x1712: 0x137b, 0x1713: 0x1804, 0x1714: 0x138f, 0x1715: 0x1393, 0x1716: 0x13ab, 0x1717: 0x13b7,
+ 0x1718: 0x180e, 0x1719: 0x1660, 0x171a: 0x13c3, 0x171b: 0x13bf, 0x171c: 0x13cb, 0x171d: 0x1665,
+ 0x171e: 0x13d7, 0x171f: 0x13e3, 0x1720: 0x1813, 0x1721: 0x1818, 0x1722: 0x1423, 0x1723: 0x142f,
+ 0x1724: 0x1437, 0x1725: 0x181d, 0x1726: 0x143b, 0x1727: 0x1467, 0x1728: 0x1473, 0x1729: 0x1477,
+ 0x172a: 0x146f, 0x172b: 0x1483, 0x172c: 0x1487, 0x172d: 0x1822, 0x172e: 0x1493, 0x172f: 0x0693,
+ 0x1730: 0x149b, 0x1731: 0x1827, 0x1732: 0x0697, 0x1733: 0x14d3, 0x1734: 0x0ac3, 0x1735: 0x14eb,
+ 0x1736: 0x182c, 0x1737: 0x1836, 0x1738: 0x069b, 0x1739: 0x069f, 0x173a: 0x1513, 0x173b: 0x183b,
+ 0x173c: 0x06a3, 0x173d: 0x1840, 0x173e: 0x152b, 0x173f: 0x152b,
+ // Block 0x5d, offset 0x1740
+ 0x1740: 0x1533, 0x1741: 0x1845, 0x1742: 0x154b, 0x1743: 0x06a7, 0x1744: 0x155b, 0x1745: 0x1567,
+ 0x1746: 0x156f, 0x1747: 0x1577, 0x1748: 0x06ab, 0x1749: 0x184a, 0x174a: 0x158b, 0x174b: 0x15a7,
+ 0x174c: 0x15b3, 0x174d: 0x06af, 0x174e: 0x06b3, 0x174f: 0x15b7, 0x1750: 0x184f, 0x1751: 0x06b7,
+ 0x1752: 0x1854, 0x1753: 0x1859, 0x1754: 0x185e, 0x1755: 0x15db, 0x1756: 0x06bb, 0x1757: 0x15ef,
+ 0x1758: 0x15f7, 0x1759: 0x15fb, 0x175a: 0x1603, 0x175b: 0x160b, 0x175c: 0x1613, 0x175d: 0x1868,
+}
+
+// nfkcIndex: 22 blocks, 1408 entries, 1408 bytes
+// Block 0 is the zero block.
+var nfkcIndex = [1408]uint8{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x5c, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x5d, 0xc7: 0x04,
+ 0xc8: 0x05, 0xca: 0x5e, 0xcb: 0x5f, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x09,
+ 0xd0: 0x0a, 0xd1: 0x60, 0xd2: 0x61, 0xd3: 0x0b, 0xd6: 0x0c, 0xd7: 0x62,
+ 0xd8: 0x63, 0xd9: 0x0d, 0xdb: 0x64, 0xdc: 0x65, 0xdd: 0x66, 0xdf: 0x67,
+ 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05,
+ 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a,
+ 0xf0: 0x13,
+ // Block 0x4, offset 0x100
+ 0x120: 0x68, 0x121: 0x69, 0x123: 0x0e, 0x124: 0x6a, 0x125: 0x6b, 0x126: 0x6c, 0x127: 0x6d,
+ 0x128: 0x6e, 0x129: 0x6f, 0x12a: 0x70, 0x12b: 0x71, 0x12c: 0x6c, 0x12d: 0x72, 0x12e: 0x73, 0x12f: 0x74,
+ 0x131: 0x75, 0x132: 0x76, 0x133: 0x77, 0x134: 0x78, 0x135: 0x79, 0x137: 0x7a,
+ 0x138: 0x7b, 0x139: 0x7c, 0x13a: 0x7d, 0x13b: 0x7e, 0x13c: 0x7f, 0x13d: 0x80, 0x13e: 0x81, 0x13f: 0x82,
+ // Block 0x5, offset 0x140
+ 0x140: 0x83, 0x142: 0x84, 0x143: 0x85, 0x144: 0x86, 0x145: 0x87, 0x146: 0x88, 0x147: 0x89,
+ 0x14d: 0x8a,
+ 0x15c: 0x8b, 0x15f: 0x8c,
+ 0x162: 0x8d, 0x164: 0x8e,
+ 0x168: 0x8f, 0x169: 0x90, 0x16a: 0x91, 0x16c: 0x0f, 0x16d: 0x92, 0x16e: 0x93, 0x16f: 0x94,
+ 0x170: 0x95, 0x173: 0x96, 0x174: 0x97, 0x175: 0x10, 0x176: 0x11, 0x177: 0x12,
+ 0x178: 0x13, 0x179: 0x14, 0x17a: 0x15, 0x17b: 0x16, 0x17c: 0x17, 0x17d: 0x18, 0x17e: 0x19, 0x17f: 0x1a,
+ // Block 0x6, offset 0x180
+ 0x180: 0x98, 0x181: 0x99, 0x182: 0x9a, 0x183: 0x9b, 0x184: 0x1b, 0x185: 0x1c, 0x186: 0x9c, 0x187: 0x9d,
+ 0x188: 0x9e, 0x189: 0x1d, 0x18a: 0x1e, 0x18b: 0x9f, 0x18c: 0xa0,
+ 0x191: 0x1f, 0x192: 0x20, 0x193: 0xa1,
+ 0x1a8: 0xa2, 0x1a9: 0xa3, 0x1ab: 0xa4,
+ 0x1b1: 0xa5, 0x1b3: 0xa6, 0x1b5: 0xa7, 0x1b7: 0xa8,
+ 0x1ba: 0xa9, 0x1bb: 0xaa, 0x1bc: 0x21, 0x1bd: 0x22, 0x1be: 0x23, 0x1bf: 0xab,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0xac, 0x1c1: 0x24, 0x1c2: 0x25, 0x1c3: 0x26, 0x1c4: 0xad, 0x1c5: 0x27, 0x1c6: 0x28,
+ 0x1c8: 0x29, 0x1c9: 0x2a, 0x1ca: 0x2b, 0x1cb: 0x2c, 0x1cc: 0x2d, 0x1cd: 0x2e, 0x1ce: 0x2f, 0x1cf: 0x30,
+ // Block 0x8, offset 0x200
+ 0x219: 0xae, 0x21a: 0xaf, 0x21b: 0xb0, 0x21d: 0xb1, 0x21f: 0xb2,
+ 0x220: 0xb3, 0x223: 0xb4, 0x224: 0xb5, 0x225: 0xb6, 0x226: 0xb7, 0x227: 0xb8,
+ 0x22a: 0xb9, 0x22b: 0xba, 0x22d: 0xbb, 0x22f: 0xbc,
+ 0x230: 0xbd, 0x231: 0xbe, 0x232: 0xbf, 0x233: 0xc0, 0x234: 0xc1, 0x235: 0xc2, 0x236: 0xc3, 0x237: 0xbd,
+ 0x238: 0xbe, 0x239: 0xbf, 0x23a: 0xc0, 0x23b: 0xc1, 0x23c: 0xc2, 0x23d: 0xc3, 0x23e: 0xbd, 0x23f: 0xbe,
+ // Block 0x9, offset 0x240
+ 0x240: 0xbf, 0x241: 0xc0, 0x242: 0xc1, 0x243: 0xc2, 0x244: 0xc3, 0x245: 0xbd, 0x246: 0xbe, 0x247: 0xbf,
+ 0x248: 0xc0, 0x249: 0xc1, 0x24a: 0xc2, 0x24b: 0xc3, 0x24c: 0xbd, 0x24d: 0xbe, 0x24e: 0xbf, 0x24f: 0xc0,
+ 0x250: 0xc1, 0x251: 0xc2, 0x252: 0xc3, 0x253: 0xbd, 0x254: 0xbe, 0x255: 0xbf, 0x256: 0xc0, 0x257: 0xc1,
+ 0x258: 0xc2, 0x259: 0xc3, 0x25a: 0xbd, 0x25b: 0xbe, 0x25c: 0xbf, 0x25d: 0xc0, 0x25e: 0xc1, 0x25f: 0xc2,
+ 0x260: 0xc3, 0x261: 0xbd, 0x262: 0xbe, 0x263: 0xbf, 0x264: 0xc0, 0x265: 0xc1, 0x266: 0xc2, 0x267: 0xc3,
+ 0x268: 0xbd, 0x269: 0xbe, 0x26a: 0xbf, 0x26b: 0xc0, 0x26c: 0xc1, 0x26d: 0xc2, 0x26e: 0xc3, 0x26f: 0xbd,
+ 0x270: 0xbe, 0x271: 0xbf, 0x272: 0xc0, 0x273: 0xc1, 0x274: 0xc2, 0x275: 0xc3, 0x276: 0xbd, 0x277: 0xbe,
+ 0x278: 0xbf, 0x279: 0xc0, 0x27a: 0xc1, 0x27b: 0xc2, 0x27c: 0xc3, 0x27d: 0xbd, 0x27e: 0xbe, 0x27f: 0xbf,
+ // Block 0xa, offset 0x280
+ 0x280: 0xc0, 0x281: 0xc1, 0x282: 0xc2, 0x283: 0xc3, 0x284: 0xbd, 0x285: 0xbe, 0x286: 0xbf, 0x287: 0xc0,
+ 0x288: 0xc1, 0x289: 0xc2, 0x28a: 0xc3, 0x28b: 0xbd, 0x28c: 0xbe, 0x28d: 0xbf, 0x28e: 0xc0, 0x28f: 0xc1,
+ 0x290: 0xc2, 0x291: 0xc3, 0x292: 0xbd, 0x293: 0xbe, 0x294: 0xbf, 0x295: 0xc0, 0x296: 0xc1, 0x297: 0xc2,
+ 0x298: 0xc3, 0x299: 0xbd, 0x29a: 0xbe, 0x29b: 0xbf, 0x29c: 0xc0, 0x29d: 0xc1, 0x29e: 0xc2, 0x29f: 0xc3,
+ 0x2a0: 0xbd, 0x2a1: 0xbe, 0x2a2: 0xbf, 0x2a3: 0xc0, 0x2a4: 0xc1, 0x2a5: 0xc2, 0x2a6: 0xc3, 0x2a7: 0xbd,
+ 0x2a8: 0xbe, 0x2a9: 0xbf, 0x2aa: 0xc0, 0x2ab: 0xc1, 0x2ac: 0xc2, 0x2ad: 0xc3, 0x2ae: 0xbd, 0x2af: 0xbe,
+ 0x2b0: 0xbf, 0x2b1: 0xc0, 0x2b2: 0xc1, 0x2b3: 0xc2, 0x2b4: 0xc3, 0x2b5: 0xbd, 0x2b6: 0xbe, 0x2b7: 0xbf,
+ 0x2b8: 0xc0, 0x2b9: 0xc1, 0x2ba: 0xc2, 0x2bb: 0xc3, 0x2bc: 0xbd, 0x2bd: 0xbe, 0x2be: 0xbf, 0x2bf: 0xc0,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0xc1, 0x2c1: 0xc2, 0x2c2: 0xc3, 0x2c3: 0xbd, 0x2c4: 0xbe, 0x2c5: 0xbf, 0x2c6: 0xc0, 0x2c7: 0xc1,
+ 0x2c8: 0xc2, 0x2c9: 0xc3, 0x2ca: 0xbd, 0x2cb: 0xbe, 0x2cc: 0xbf, 0x2cd: 0xc0, 0x2ce: 0xc1, 0x2cf: 0xc2,
+ 0x2d0: 0xc3, 0x2d1: 0xbd, 0x2d2: 0xbe, 0x2d3: 0xbf, 0x2d4: 0xc0, 0x2d5: 0xc1, 0x2d6: 0xc2, 0x2d7: 0xc3,
+ 0x2d8: 0xbd, 0x2d9: 0xbe, 0x2da: 0xbf, 0x2db: 0xc0, 0x2dc: 0xc1, 0x2dd: 0xc2, 0x2de: 0xc4,
+ // Block 0xc, offset 0x300
+ 0x324: 0x31, 0x325: 0x32, 0x326: 0x33, 0x327: 0x34,
+ 0x328: 0x35, 0x329: 0x36, 0x32a: 0x37, 0x32b: 0x38, 0x32c: 0x39, 0x32d: 0x3a, 0x32e: 0x3b, 0x32f: 0x3c,
+ 0x330: 0x3d, 0x331: 0x3e, 0x332: 0x3f, 0x333: 0x40, 0x334: 0x41, 0x335: 0x42, 0x336: 0x43, 0x337: 0x44,
+ 0x338: 0x45, 0x339: 0x46, 0x33a: 0x47, 0x33b: 0x48, 0x33c: 0xc5, 0x33d: 0x49, 0x33e: 0x4a, 0x33f: 0x4b,
+ // Block 0xd, offset 0x340
+ 0x347: 0xc6,
+ 0x34b: 0xc7, 0x34d: 0xc8,
+ 0x368: 0xc9, 0x36b: 0xca,
+ 0x374: 0xcb,
+ 0x37d: 0xcc,
+ // Block 0xe, offset 0x380
+ 0x381: 0xcd, 0x382: 0xce, 0x384: 0xcf, 0x385: 0xb7, 0x387: 0xd0,
+ 0x388: 0xd1, 0x38b: 0xd2, 0x38c: 0xd3, 0x38d: 0xd4,
+ 0x391: 0xd5, 0x392: 0xd6, 0x393: 0xd7, 0x396: 0xd8, 0x397: 0xd9,
+ 0x398: 0xda, 0x39a: 0xdb, 0x39c: 0xdc,
+ 0x3a0: 0xdd,
+ 0x3a8: 0xde, 0x3a9: 0xdf, 0x3aa: 0xe0,
+ 0x3b0: 0xda, 0x3b5: 0xe1, 0x3b6: 0xe2,
+ // Block 0xf, offset 0x3c0
+ 0x3eb: 0xe3, 0x3ec: 0xe4,
+ // Block 0x10, offset 0x400
+ 0x432: 0xe5,
+ // Block 0x11, offset 0x440
+ 0x445: 0xe6, 0x446: 0xe7, 0x447: 0xe8,
+ 0x449: 0xe9,
+ 0x450: 0xea, 0x451: 0xeb, 0x452: 0xec, 0x453: 0xed, 0x454: 0xee, 0x455: 0xef, 0x456: 0xf0, 0x457: 0xf1,
+ 0x458: 0xf2, 0x459: 0xf3, 0x45a: 0x4c, 0x45b: 0xf4, 0x45c: 0xf5, 0x45d: 0xf6, 0x45e: 0xf7, 0x45f: 0x4d,
+ // Block 0x12, offset 0x480
+ 0x480: 0xf8,
+ 0x4a3: 0xf9, 0x4a5: 0xfa,
+ 0x4b8: 0x4e, 0x4b9: 0x4f, 0x4ba: 0x50,
+ // Block 0x13, offset 0x4c0
+ 0x4c4: 0x51, 0x4c5: 0xfb, 0x4c6: 0xfc,
+ 0x4c8: 0x52, 0x4c9: 0xfd,
+ // Block 0x14, offset 0x500
+ 0x520: 0x53, 0x521: 0x54, 0x522: 0x55, 0x523: 0x56, 0x524: 0x57, 0x525: 0x58, 0x526: 0x59, 0x527: 0x5a,
+ 0x528: 0x5b,
+ // Block 0x15, offset 0x540
+ 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d,
+ 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11,
+ 0x56f: 0x12,
+}
+
+// nfkcSparseOffset: 162 entries, 324 bytes
+var nfkcSparseOffset = []uint16{0x0, 0xe, 0x12, 0x1b, 0x25, 0x35, 0x37, 0x3c, 0x47, 0x56, 0x63, 0x6b, 0x70, 0x75, 0x77, 0x7f, 0x86, 0x89, 0x91, 0x95, 0x99, 0x9b, 0x9d, 0xa6, 0xaa, 0xb1, 0xb6, 0xb9, 0xc3, 0xc6, 0xcd, 0xd5, 0xd9, 0xdb, 0xde, 0xe2, 0xe8, 0xf9, 0x105, 0x107, 0x10d, 0x10f, 0x111, 0x113, 0x115, 0x117, 0x119, 0x11b, 0x11e, 0x121, 0x123, 0x126, 0x129, 0x12d, 0x132, 0x13b, 0x13d, 0x140, 0x142, 0x14d, 0x158, 0x166, 0x174, 0x184, 0x192, 0x199, 0x19f, 0x1ae, 0x1b2, 0x1b4, 0x1b8, 0x1ba, 0x1bd, 0x1bf, 0x1c2, 0x1c4, 0x1c7, 0x1c9, 0x1cb, 0x1cd, 0x1d9, 0x1e3, 0x1ed, 0x1f0, 0x1f4, 0x1f6, 0x1f8, 0x1fa, 0x1fc, 0x1ff, 0x201, 0x203, 0x205, 0x207, 0x20d, 0x210, 0x214, 0x216, 0x21d, 0x223, 0x229, 0x231, 0x237, 0x23d, 0x243, 0x247, 0x249, 0x24b, 0x24d, 0x24f, 0x255, 0x258, 0x25a, 0x260, 0x263, 0x26b, 0x272, 0x275, 0x278, 0x27a, 0x27d, 0x285, 0x289, 0x290, 0x293, 0x299, 0x29b, 0x29d, 0x2a0, 0x2a2, 0x2a5, 0x2a7, 0x2a9, 0x2ab, 0x2ae, 0x2b0, 0x2b2, 0x2b4, 0x2b6, 0x2c3, 0x2cd, 0x2cf, 0x2d1, 0x2d5, 0x2da, 0x2e6, 0x2eb, 0x2f4, 0x2fa, 0x2ff, 0x303, 0x308, 0x30c, 0x31c, 0x32a, 0x338, 0x346, 0x34c, 0x34e, 0x351, 0x35b, 0x35d}
+
+// nfkcSparseValues: 871 entries, 3484 bytes
+var nfkcSparseValues = [871]valueRange{
+ // Block 0x0, offset 0x0
+ {value: 0x0002, lo: 0x0d},
+ {value: 0x0001, lo: 0xa0, hi: 0xa0},
+ {value: 0x4278, lo: 0xa8, hi: 0xa8},
+ {value: 0x0083, lo: 0xaa, hi: 0xaa},
+ {value: 0x4264, lo: 0xaf, hi: 0xaf},
+ {value: 0x0025, lo: 0xb2, hi: 0xb3},
+ {value: 0x425a, lo: 0xb4, hi: 0xb4},
+ {value: 0x01dc, lo: 0xb5, hi: 0xb5},
+ {value: 0x4291, lo: 0xb8, hi: 0xb8},
+ {value: 0x0023, lo: 0xb9, hi: 0xb9},
+ {value: 0x009f, lo: 0xba, hi: 0xba},
+ {value: 0x221c, lo: 0xbc, hi: 0xbc},
+ {value: 0x2210, lo: 0xbd, hi: 0xbd},
+ {value: 0x22b2, lo: 0xbe, hi: 0xbe},
+ // Block 0x1, offset 0xe
+ {value: 0x0091, lo: 0x03},
+ {value: 0x46e2, lo: 0xa0, hi: 0xa1},
+ {value: 0x4714, lo: 0xaf, hi: 0xb0},
+ {value: 0xa000, lo: 0xb7, hi: 0xb7},
+ // Block 0x2, offset 0x12
+ {value: 0x0003, lo: 0x08},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ {value: 0x0091, lo: 0xb0, hi: 0xb0},
+ {value: 0x0119, lo: 0xb1, hi: 0xb1},
+ {value: 0x0095, lo: 0xb2, hi: 0xb2},
+ {value: 0x00a5, lo: 0xb3, hi: 0xb3},
+ {value: 0x0143, lo: 0xb4, hi: 0xb6},
+ {value: 0x00af, lo: 0xb7, hi: 0xb7},
+ {value: 0x00b3, lo: 0xb8, hi: 0xb8},
+ // Block 0x3, offset 0x1b
+ {value: 0x000a, lo: 0x09},
+ {value: 0x426e, lo: 0x98, hi: 0x98},
+ {value: 0x4273, lo: 0x99, hi: 0x9a},
+ {value: 0x4296, lo: 0x9b, hi: 0x9b},
+ {value: 0x425f, lo: 0x9c, hi: 0x9c},
+ {value: 0x4282, lo: 0x9d, hi: 0x9d},
+ {value: 0x0113, lo: 0xa0, hi: 0xa0},
+ {value: 0x0099, lo: 0xa1, hi: 0xa1},
+ {value: 0x00a7, lo: 0xa2, hi: 0xa3},
+ {value: 0x0167, lo: 0xa4, hi: 0xa4},
+ // Block 0x4, offset 0x25
+ {value: 0x0000, lo: 0x0f},
+ {value: 0xa000, lo: 0x83, hi: 0x83},
+ {value: 0xa000, lo: 0x87, hi: 0x87},
+ {value: 0xa000, lo: 0x8b, hi: 0x8b},
+ {value: 0xa000, lo: 0x8d, hi: 0x8d},
+ {value: 0x37a5, lo: 0x90, hi: 0x90},
+ {value: 0x37b1, lo: 0x91, hi: 0x91},
+ {value: 0x379f, lo: 0x93, hi: 0x93},
+ {value: 0xa000, lo: 0x96, hi: 0x96},
+ {value: 0x3817, lo: 0x97, hi: 0x97},
+ {value: 0x37e1, lo: 0x9c, hi: 0x9c},
+ {value: 0x37c9, lo: 0x9d, hi: 0x9d},
+ {value: 0x37f3, lo: 0x9e, hi: 0x9e},
+ {value: 0xa000, lo: 0xb4, hi: 0xb5},
+ {value: 0x381d, lo: 0xb6, hi: 0xb6},
+ {value: 0x3823, lo: 0xb7, hi: 0xb7},
+ // Block 0x5, offset 0x35
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0x83, hi: 0x87},
+ // Block 0x6, offset 0x37
+ {value: 0x0001, lo: 0x04},
+ {value: 0x8113, lo: 0x81, hi: 0x82},
+ {value: 0x8132, lo: 0x84, hi: 0x84},
+ {value: 0x812d, lo: 0x85, hi: 0x85},
+ {value: 0x810d, lo: 0x87, hi: 0x87},
+ // Block 0x7, offset 0x3c
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x8132, lo: 0x90, hi: 0x97},
+ {value: 0x8119, lo: 0x98, hi: 0x98},
+ {value: 0x811a, lo: 0x99, hi: 0x99},
+ {value: 0x811b, lo: 0x9a, hi: 0x9a},
+ {value: 0x3841, lo: 0xa2, hi: 0xa2},
+ {value: 0x3847, lo: 0xa3, hi: 0xa3},
+ {value: 0x3853, lo: 0xa4, hi: 0xa4},
+ {value: 0x384d, lo: 0xa5, hi: 0xa5},
+ {value: 0x3859, lo: 0xa6, hi: 0xa6},
+ {value: 0xa000, lo: 0xa7, hi: 0xa7},
+ // Block 0x8, offset 0x47
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x386b, lo: 0x80, hi: 0x80},
+ {value: 0xa000, lo: 0x81, hi: 0x81},
+ {value: 0x385f, lo: 0x82, hi: 0x82},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ {value: 0x3865, lo: 0x93, hi: 0x93},
+ {value: 0xa000, lo: 0x95, hi: 0x95},
+ {value: 0x8132, lo: 0x96, hi: 0x9c},
+ {value: 0x8132, lo: 0x9f, hi: 0xa2},
+ {value: 0x812d, lo: 0xa3, hi: 0xa3},
+ {value: 0x8132, lo: 0xa4, hi: 0xa4},
+ {value: 0x8132, lo: 0xa7, hi: 0xa8},
+ {value: 0x812d, lo: 0xaa, hi: 0xaa},
+ {value: 0x8132, lo: 0xab, hi: 0xac},
+ {value: 0x812d, lo: 0xad, hi: 0xad},
+ // Block 0x9, offset 0x56
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x811f, lo: 0x91, hi: 0x91},
+ {value: 0x8132, lo: 0xb0, hi: 0xb0},
+ {value: 0x812d, lo: 0xb1, hi: 0xb1},
+ {value: 0x8132, lo: 0xb2, hi: 0xb3},
+ {value: 0x812d, lo: 0xb4, hi: 0xb4},
+ {value: 0x8132, lo: 0xb5, hi: 0xb6},
+ {value: 0x812d, lo: 0xb7, hi: 0xb9},
+ {value: 0x8132, lo: 0xba, hi: 0xba},
+ {value: 0x812d, lo: 0xbb, hi: 0xbc},
+ {value: 0x8132, lo: 0xbd, hi: 0xbd},
+ {value: 0x812d, lo: 0xbe, hi: 0xbe},
+ {value: 0x8132, lo: 0xbf, hi: 0xbf},
+ // Block 0xa, offset 0x63
+ {value: 0x0005, lo: 0x07},
+ {value: 0x8132, lo: 0x80, hi: 0x80},
+ {value: 0x8132, lo: 0x81, hi: 0x81},
+ {value: 0x812d, lo: 0x82, hi: 0x83},
+ {value: 0x812d, lo: 0x84, hi: 0x85},
+ {value: 0x812d, lo: 0x86, hi: 0x87},
+ {value: 0x812d, lo: 0x88, hi: 0x89},
+ {value: 0x8132, lo: 0x8a, hi: 0x8a},
+ // Block 0xb, offset 0x6b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x8132, lo: 0xab, hi: 0xb1},
+ {value: 0x812d, lo: 0xb2, hi: 0xb2},
+ {value: 0x8132, lo: 0xb3, hi: 0xb3},
+ {value: 0x812d, lo: 0xbd, hi: 0xbd},
+ // Block 0xc, offset 0x70
+ {value: 0x0000, lo: 0x04},
+ {value: 0x8132, lo: 0x96, hi: 0x99},
+ {value: 0x8132, lo: 0x9b, hi: 0xa3},
+ {value: 0x8132, lo: 0xa5, hi: 0xa7},
+ {value: 0x8132, lo: 0xa9, hi: 0xad},
+ // Block 0xd, offset 0x75
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0x99, hi: 0x9b},
+ // Block 0xe, offset 0x77
+ {value: 0x0000, lo: 0x07},
+ {value: 0xa000, lo: 0xa8, hi: 0xa8},
+ {value: 0x3ed8, lo: 0xa9, hi: 0xa9},
+ {value: 0xa000, lo: 0xb0, hi: 0xb0},
+ {value: 0x3ee0, lo: 0xb1, hi: 0xb1},
+ {value: 0xa000, lo: 0xb3, hi: 0xb3},
+ {value: 0x3ee8, lo: 0xb4, hi: 0xb4},
+ {value: 0x9902, lo: 0xbc, hi: 0xbc},
+ // Block 0xf, offset 0x7f
+ {value: 0x0008, lo: 0x06},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x8132, lo: 0x91, hi: 0x91},
+ {value: 0x812d, lo: 0x92, hi: 0x92},
+ {value: 0x8132, lo: 0x93, hi: 0x93},
+ {value: 0x8132, lo: 0x94, hi: 0x94},
+ {value: 0x451c, lo: 0x98, hi: 0x9f},
+ // Block 0x10, offset 0x86
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8102, lo: 0xbc, hi: 0xbc},
+ {value: 0x9900, lo: 0xbe, hi: 0xbe},
+ // Block 0x11, offset 0x89
+ {value: 0x0008, lo: 0x07},
+ {value: 0xa000, lo: 0x87, hi: 0x87},
+ {value: 0x2c9e, lo: 0x8b, hi: 0x8c},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x97, hi: 0x97},
+ {value: 0x455c, lo: 0x9c, hi: 0x9d},
+ {value: 0x456c, lo: 0x9f, hi: 0x9f},
+ {value: 0x8132, lo: 0xbe, hi: 0xbe},
+ // Block 0x12, offset 0x91
+ {value: 0x0000, lo: 0x03},
+ {value: 0x4594, lo: 0xb3, hi: 0xb3},
+ {value: 0x459c, lo: 0xb6, hi: 0xb6},
+ {value: 0x8102, lo: 0xbc, hi: 0xbc},
+ // Block 0x13, offset 0x95
+ {value: 0x0008, lo: 0x03},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x4574, lo: 0x99, hi: 0x9b},
+ {value: 0x458c, lo: 0x9e, hi: 0x9e},
+ // Block 0x14, offset 0x99
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8102, lo: 0xbc, hi: 0xbc},
+ // Block 0x15, offset 0x9b
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ // Block 0x16, offset 0x9d
+ {value: 0x0000, lo: 0x08},
+ {value: 0xa000, lo: 0x87, hi: 0x87},
+ {value: 0x2cb6, lo: 0x88, hi: 0x88},
+ {value: 0x2cae, lo: 0x8b, hi: 0x8b},
+ {value: 0x2cbe, lo: 0x8c, hi: 0x8c},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x96, hi: 0x97},
+ {value: 0x45a4, lo: 0x9c, hi: 0x9c},
+ {value: 0x45ac, lo: 0x9d, hi: 0x9d},
+ // Block 0x17, offset 0xa6
+ {value: 0x0000, lo: 0x03},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ {value: 0x2cc6, lo: 0x94, hi: 0x94},
+ {value: 0x9900, lo: 0xbe, hi: 0xbe},
+ // Block 0x18, offset 0xaa
+ {value: 0x0000, lo: 0x06},
+ {value: 0xa000, lo: 0x86, hi: 0x87},
+ {value: 0x2cce, lo: 0x8a, hi: 0x8a},
+ {value: 0x2cde, lo: 0x8b, hi: 0x8b},
+ {value: 0x2cd6, lo: 0x8c, hi: 0x8c},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x97, hi: 0x97},
+ // Block 0x19, offset 0xb1
+ {value: 0x1801, lo: 0x04},
+ {value: 0xa000, lo: 0x86, hi: 0x86},
+ {value: 0x3ef0, lo: 0x88, hi: 0x88},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x8120, lo: 0x95, hi: 0x96},
+ // Block 0x1a, offset 0xb6
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8102, lo: 0xbc, hi: 0xbc},
+ {value: 0xa000, lo: 0xbf, hi: 0xbf},
+ // Block 0x1b, offset 0xb9
+ {value: 0x0000, lo: 0x09},
+ {value: 0x2ce6, lo: 0x80, hi: 0x80},
+ {value: 0x9900, lo: 0x82, hi: 0x82},
+ {value: 0xa000, lo: 0x86, hi: 0x86},
+ {value: 0x2cee, lo: 0x87, hi: 0x87},
+ {value: 0x2cf6, lo: 0x88, hi: 0x88},
+ {value: 0x2f50, lo: 0x8a, hi: 0x8a},
+ {value: 0x2dd8, lo: 0x8b, hi: 0x8b},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x95, hi: 0x96},
+ // Block 0x1c, offset 0xc3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0xbb, hi: 0xbc},
+ {value: 0x9900, lo: 0xbe, hi: 0xbe},
+ // Block 0x1d, offset 0xc6
+ {value: 0x0000, lo: 0x06},
+ {value: 0xa000, lo: 0x86, hi: 0x87},
+ {value: 0x2cfe, lo: 0x8a, hi: 0x8a},
+ {value: 0x2d0e, lo: 0x8b, hi: 0x8b},
+ {value: 0x2d06, lo: 0x8c, hi: 0x8c},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x97, hi: 0x97},
+ // Block 0x1e, offset 0xcd
+ {value: 0x6bea, lo: 0x07},
+ {value: 0x9904, lo: 0x8a, hi: 0x8a},
+ {value: 0x9900, lo: 0x8f, hi: 0x8f},
+ {value: 0xa000, lo: 0x99, hi: 0x99},
+ {value: 0x3ef8, lo: 0x9a, hi: 0x9a},
+ {value: 0x2f58, lo: 0x9c, hi: 0x9c},
+ {value: 0x2de3, lo: 0x9d, hi: 0x9d},
+ {value: 0x2d16, lo: 0x9e, hi: 0x9f},
+ // Block 0x1f, offset 0xd5
+ {value: 0x0000, lo: 0x03},
+ {value: 0x2621, lo: 0xb3, hi: 0xb3},
+ {value: 0x8122, lo: 0xb8, hi: 0xb9},
+ {value: 0x8104, lo: 0xba, hi: 0xba},
+ // Block 0x20, offset 0xd9
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8123, lo: 0x88, hi: 0x8b},
+ // Block 0x21, offset 0xdb
+ {value: 0x0000, lo: 0x02},
+ {value: 0x2636, lo: 0xb3, hi: 0xb3},
+ {value: 0x8124, lo: 0xb8, hi: 0xb9},
+ // Block 0x22, offset 0xde
+ {value: 0x0000, lo: 0x03},
+ {value: 0x8125, lo: 0x88, hi: 0x8b},
+ {value: 0x2628, lo: 0x9c, hi: 0x9c},
+ {value: 0x262f, lo: 0x9d, hi: 0x9d},
+ // Block 0x23, offset 0xe2
+ {value: 0x0000, lo: 0x05},
+ {value: 0x030b, lo: 0x8c, hi: 0x8c},
+ {value: 0x812d, lo: 0x98, hi: 0x99},
+ {value: 0x812d, lo: 0xb5, hi: 0xb5},
+ {value: 0x812d, lo: 0xb7, hi: 0xb7},
+ {value: 0x812b, lo: 0xb9, hi: 0xb9},
+ // Block 0x24, offset 0xe8
+ {value: 0x0000, lo: 0x10},
+ {value: 0x2644, lo: 0x83, hi: 0x83},
+ {value: 0x264b, lo: 0x8d, hi: 0x8d},
+ {value: 0x2652, lo: 0x92, hi: 0x92},
+ {value: 0x2659, lo: 0x97, hi: 0x97},
+ {value: 0x2660, lo: 0x9c, hi: 0x9c},
+ {value: 0x263d, lo: 0xa9, hi: 0xa9},
+ {value: 0x8126, lo: 0xb1, hi: 0xb1},
+ {value: 0x8127, lo: 0xb2, hi: 0xb2},
+ {value: 0x4a84, lo: 0xb3, hi: 0xb3},
+ {value: 0x8128, lo: 0xb4, hi: 0xb4},
+ {value: 0x4a8d, lo: 0xb5, hi: 0xb5},
+ {value: 0x45b4, lo: 0xb6, hi: 0xb6},
+ {value: 0x45f4, lo: 0xb7, hi: 0xb7},
+ {value: 0x45bc, lo: 0xb8, hi: 0xb8},
+ {value: 0x45ff, lo: 0xb9, hi: 0xb9},
+ {value: 0x8127, lo: 0xba, hi: 0xbd},
+ // Block 0x25, offset 0xf9
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x8127, lo: 0x80, hi: 0x80},
+ {value: 0x4a96, lo: 0x81, hi: 0x81},
+ {value: 0x8132, lo: 0x82, hi: 0x83},
+ {value: 0x8104, lo: 0x84, hi: 0x84},
+ {value: 0x8132, lo: 0x86, hi: 0x87},
+ {value: 0x266e, lo: 0x93, hi: 0x93},
+ {value: 0x2675, lo: 0x9d, hi: 0x9d},
+ {value: 0x267c, lo: 0xa2, hi: 0xa2},
+ {value: 0x2683, lo: 0xa7, hi: 0xa7},
+ {value: 0x268a, lo: 0xac, hi: 0xac},
+ {value: 0x2667, lo: 0xb9, hi: 0xb9},
+ // Block 0x26, offset 0x105
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0x86, hi: 0x86},
+ // Block 0x27, offset 0x107
+ {value: 0x0000, lo: 0x05},
+ {value: 0xa000, lo: 0xa5, hi: 0xa5},
+ {value: 0x2d1e, lo: 0xa6, hi: 0xa6},
+ {value: 0x9900, lo: 0xae, hi: 0xae},
+ {value: 0x8102, lo: 0xb7, hi: 0xb7},
+ {value: 0x8104, lo: 0xb9, hi: 0xba},
+ // Block 0x28, offset 0x10d
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0x8d, hi: 0x8d},
+ // Block 0x29, offset 0x10f
+ {value: 0x0000, lo: 0x01},
+ {value: 0x030f, lo: 0xbc, hi: 0xbc},
+ // Block 0x2a, offset 0x111
+ {value: 0x0000, lo: 0x01},
+ {value: 0xa000, lo: 0x80, hi: 0x92},
+ // Block 0x2b, offset 0x113
+ {value: 0x0000, lo: 0x01},
+ {value: 0xb900, lo: 0xa1, hi: 0xb5},
+ // Block 0x2c, offset 0x115
+ {value: 0x0000, lo: 0x01},
+ {value: 0x9900, lo: 0xa8, hi: 0xbf},
+ // Block 0x2d, offset 0x117
+ {value: 0x0000, lo: 0x01},
+ {value: 0x9900, lo: 0x80, hi: 0x82},
+ // Block 0x2e, offset 0x119
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0x9d, hi: 0x9f},
+ // Block 0x2f, offset 0x11b
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x94, hi: 0x94},
+ {value: 0x8104, lo: 0xb4, hi: 0xb4},
+ // Block 0x30, offset 0x11e
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x92, hi: 0x92},
+ {value: 0x8132, lo: 0x9d, hi: 0x9d},
+ // Block 0x31, offset 0x121
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8131, lo: 0xa9, hi: 0xa9},
+ // Block 0x32, offset 0x123
+ {value: 0x0004, lo: 0x02},
+ {value: 0x812e, lo: 0xb9, hi: 0xba},
+ {value: 0x812d, lo: 0xbb, hi: 0xbb},
+ // Block 0x33, offset 0x126
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8132, lo: 0x97, hi: 0x97},
+ {value: 0x812d, lo: 0x98, hi: 0x98},
+ // Block 0x34, offset 0x129
+ {value: 0x0000, lo: 0x03},
+ {value: 0x8104, lo: 0xa0, hi: 0xa0},
+ {value: 0x8132, lo: 0xb5, hi: 0xbc},
+ {value: 0x812d, lo: 0xbf, hi: 0xbf},
+ // Block 0x35, offset 0x12d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x8132, lo: 0xb0, hi: 0xb4},
+ {value: 0x812d, lo: 0xb5, hi: 0xba},
+ {value: 0x8132, lo: 0xbb, hi: 0xbc},
+ {value: 0x812d, lo: 0xbd, hi: 0xbd},
+ // Block 0x36, offset 0x132
+ {value: 0x0000, lo: 0x08},
+ {value: 0x2d66, lo: 0x80, hi: 0x80},
+ {value: 0x2d6e, lo: 0x81, hi: 0x81},
+ {value: 0xa000, lo: 0x82, hi: 0x82},
+ {value: 0x2d76, lo: 0x83, hi: 0x83},
+ {value: 0x8104, lo: 0x84, hi: 0x84},
+ {value: 0x8132, lo: 0xab, hi: 0xab},
+ {value: 0x812d, lo: 0xac, hi: 0xac},
+ {value: 0x8132, lo: 0xad, hi: 0xb3},
+ // Block 0x37, offset 0x13b
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xaa, hi: 0xab},
+ // Block 0x38, offset 0x13d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8102, lo: 0xa6, hi: 0xa6},
+ {value: 0x8104, lo: 0xb2, hi: 0xb3},
+ // Block 0x39, offset 0x140
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8102, lo: 0xb7, hi: 0xb7},
+ // Block 0x3a, offset 0x142
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x8132, lo: 0x90, hi: 0x92},
+ {value: 0x8101, lo: 0x94, hi: 0x94},
+ {value: 0x812d, lo: 0x95, hi: 0x99},
+ {value: 0x8132, lo: 0x9a, hi: 0x9b},
+ {value: 0x812d, lo: 0x9c, hi: 0x9f},
+ {value: 0x8132, lo: 0xa0, hi: 0xa0},
+ {value: 0x8101, lo: 0xa2, hi: 0xa8},
+ {value: 0x812d, lo: 0xad, hi: 0xad},
+ {value: 0x8132, lo: 0xb4, hi: 0xb4},
+ {value: 0x8132, lo: 0xb8, hi: 0xb9},
+ // Block 0x3b, offset 0x14d
+ {value: 0x0002, lo: 0x0a},
+ {value: 0x0043, lo: 0xac, hi: 0xac},
+ {value: 0x00d1, lo: 0xad, hi: 0xad},
+ {value: 0x0045, lo: 0xae, hi: 0xae},
+ {value: 0x0049, lo: 0xb0, hi: 0xb1},
+ {value: 0x00e6, lo: 0xb2, hi: 0xb2},
+ {value: 0x004f, lo: 0xb3, hi: 0xba},
+ {value: 0x005f, lo: 0xbc, hi: 0xbc},
+ {value: 0x00ef, lo: 0xbd, hi: 0xbd},
+ {value: 0x0061, lo: 0xbe, hi: 0xbe},
+ {value: 0x0065, lo: 0xbf, hi: 0xbf},
+ // Block 0x3c, offset 0x158
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0001, lo: 0x80, hi: 0x8a},
+ {value: 0x043b, lo: 0x91, hi: 0x91},
+ {value: 0x429b, lo: 0x97, hi: 0x97},
+ {value: 0x001d, lo: 0xa4, hi: 0xa4},
+ {value: 0x1873, lo: 0xa5, hi: 0xa5},
+ {value: 0x1b5c, lo: 0xa6, hi: 0xa6},
+ {value: 0x0001, lo: 0xaf, hi: 0xaf},
+ {value: 0x2691, lo: 0xb3, hi: 0xb3},
+ {value: 0x27fe, lo: 0xb4, hi: 0xb4},
+ {value: 0x2698, lo: 0xb6, hi: 0xb6},
+ {value: 0x2808, lo: 0xb7, hi: 0xb7},
+ {value: 0x186d, lo: 0xbc, hi: 0xbc},
+ {value: 0x4269, lo: 0xbe, hi: 0xbe},
+ // Block 0x3d, offset 0x166
+ {value: 0x0002, lo: 0x0d},
+ {value: 0x1933, lo: 0x87, hi: 0x87},
+ {value: 0x1930, lo: 0x88, hi: 0x88},
+ {value: 0x1870, lo: 0x89, hi: 0x89},
+ {value: 0x298e, lo: 0x97, hi: 0x97},
+ {value: 0x0001, lo: 0x9f, hi: 0x9f},
+ {value: 0x0021, lo: 0xb0, hi: 0xb0},
+ {value: 0x0093, lo: 0xb1, hi: 0xb1},
+ {value: 0x0029, lo: 0xb4, hi: 0xb9},
+ {value: 0x0017, lo: 0xba, hi: 0xba},
+ {value: 0x0467, lo: 0xbb, hi: 0xbb},
+ {value: 0x003b, lo: 0xbc, hi: 0xbc},
+ {value: 0x0011, lo: 0xbd, hi: 0xbe},
+ {value: 0x009d, lo: 0xbf, hi: 0xbf},
+ // Block 0x3e, offset 0x174
+ {value: 0x0002, lo: 0x0f},
+ {value: 0x0021, lo: 0x80, hi: 0x89},
+ {value: 0x0017, lo: 0x8a, hi: 0x8a},
+ {value: 0x0467, lo: 0x8b, hi: 0x8b},
+ {value: 0x003b, lo: 0x8c, hi: 0x8c},
+ {value: 0x0011, lo: 0x8d, hi: 0x8e},
+ {value: 0x0083, lo: 0x90, hi: 0x90},
+ {value: 0x008b, lo: 0x91, hi: 0x91},
+ {value: 0x009f, lo: 0x92, hi: 0x92},
+ {value: 0x00b1, lo: 0x93, hi: 0x93},
+ {value: 0x0104, lo: 0x94, hi: 0x94},
+ {value: 0x0091, lo: 0x95, hi: 0x95},
+ {value: 0x0097, lo: 0x96, hi: 0x99},
+ {value: 0x00a1, lo: 0x9a, hi: 0x9a},
+ {value: 0x00a7, lo: 0x9b, hi: 0x9c},
+ {value: 0x1999, lo: 0xa8, hi: 0xa8},
+ // Block 0x3f, offset 0x184
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x8132, lo: 0x90, hi: 0x91},
+ {value: 0x8101, lo: 0x92, hi: 0x93},
+ {value: 0x8132, lo: 0x94, hi: 0x97},
+ {value: 0x8101, lo: 0x98, hi: 0x9a},
+ {value: 0x8132, lo: 0x9b, hi: 0x9c},
+ {value: 0x8132, lo: 0xa1, hi: 0xa1},
+ {value: 0x8101, lo: 0xa5, hi: 0xa6},
+ {value: 0x8132, lo: 0xa7, hi: 0xa7},
+ {value: 0x812d, lo: 0xa8, hi: 0xa8},
+ {value: 0x8132, lo: 0xa9, hi: 0xa9},
+ {value: 0x8101, lo: 0xaa, hi: 0xab},
+ {value: 0x812d, lo: 0xac, hi: 0xaf},
+ {value: 0x8132, lo: 0xb0, hi: 0xb0},
+ // Block 0x40, offset 0x192
+ {value: 0x0007, lo: 0x06},
+ {value: 0x2180, lo: 0x89, hi: 0x89},
+ {value: 0xa000, lo: 0x90, hi: 0x90},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ {value: 0xa000, lo: 0x94, hi: 0x94},
+ {value: 0x3bb9, lo: 0x9a, hi: 0x9b},
+ {value: 0x3bc7, lo: 0xae, hi: 0xae},
+ // Block 0x41, offset 0x199
+ {value: 0x000e, lo: 0x05},
+ {value: 0x3bce, lo: 0x8d, hi: 0x8e},
+ {value: 0x3bd5, lo: 0x8f, hi: 0x8f},
+ {value: 0xa000, lo: 0x90, hi: 0x90},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ {value: 0xa000, lo: 0x94, hi: 0x94},
+ // Block 0x42, offset 0x19f
+ {value: 0x0173, lo: 0x0e},
+ {value: 0xa000, lo: 0x83, hi: 0x83},
+ {value: 0x3be3, lo: 0x84, hi: 0x84},
+ {value: 0xa000, lo: 0x88, hi: 0x88},
+ {value: 0x3bea, lo: 0x89, hi: 0x89},
+ {value: 0xa000, lo: 0x8b, hi: 0x8b},
+ {value: 0x3bf1, lo: 0x8c, hi: 0x8c},
+ {value: 0xa000, lo: 0xa3, hi: 0xa3},
+ {value: 0x3bf8, lo: 0xa4, hi: 0xa4},
+ {value: 0xa000, lo: 0xa5, hi: 0xa5},
+ {value: 0x3bff, lo: 0xa6, hi: 0xa6},
+ {value: 0x269f, lo: 0xac, hi: 0xad},
+ {value: 0x26a6, lo: 0xaf, hi: 0xaf},
+ {value: 0x281c, lo: 0xb0, hi: 0xb0},
+ {value: 0xa000, lo: 0xbc, hi: 0xbc},
+ // Block 0x43, offset 0x1ae
+ {value: 0x0007, lo: 0x03},
+ {value: 0x3c68, lo: 0xa0, hi: 0xa1},
+ {value: 0x3c92, lo: 0xa2, hi: 0xa3},
+ {value: 0x3cbc, lo: 0xaa, hi: 0xad},
+ // Block 0x44, offset 0x1b2
+ {value: 0x0004, lo: 0x01},
+ {value: 0x048b, lo: 0xa9, hi: 0xaa},
+ // Block 0x45, offset 0x1b4
+ {value: 0x0002, lo: 0x03},
+ {value: 0x0057, lo: 0x80, hi: 0x8f},
+ {value: 0x0083, lo: 0x90, hi: 0xa9},
+ {value: 0x0021, lo: 0xaa, hi: 0xaa},
+ // Block 0x46, offset 0x1b8
+ {value: 0x0000, lo: 0x01},
+ {value: 0x299b, lo: 0x8c, hi: 0x8c},
+ // Block 0x47, offset 0x1ba
+ {value: 0x0263, lo: 0x02},
+ {value: 0x1b8c, lo: 0xb4, hi: 0xb4},
+ {value: 0x192d, lo: 0xb5, hi: 0xb6},
+ // Block 0x48, offset 0x1bd
+ {value: 0x0000, lo: 0x01},
+ {value: 0x44dd, lo: 0x9c, hi: 0x9c},
+ // Block 0x49, offset 0x1bf
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0095, lo: 0xbc, hi: 0xbc},
+ {value: 0x006d, lo: 0xbd, hi: 0xbd},
+ // Block 0x4a, offset 0x1c2
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xaf, hi: 0xb1},
+ // Block 0x4b, offset 0x1c4
+ {value: 0x0000, lo: 0x02},
+ {value: 0x047f, lo: 0xaf, hi: 0xaf},
+ {value: 0x8104, lo: 0xbf, hi: 0xbf},
+ // Block 0x4c, offset 0x1c7
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xa0, hi: 0xbf},
+ // Block 0x4d, offset 0x1c9
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0dc3, lo: 0x9f, hi: 0x9f},
+ // Block 0x4e, offset 0x1cb
+ {value: 0x0000, lo: 0x01},
+ {value: 0x162f, lo: 0xb3, hi: 0xb3},
+ // Block 0x4f, offset 0x1cd
+ {value: 0x0004, lo: 0x0b},
+ {value: 0x1597, lo: 0x80, hi: 0x82},
+ {value: 0x15af, lo: 0x83, hi: 0x83},
+ {value: 0x15c7, lo: 0x84, hi: 0x85},
+ {value: 0x15d7, lo: 0x86, hi: 0x89},
+ {value: 0x15eb, lo: 0x8a, hi: 0x8c},
+ {value: 0x15ff, lo: 0x8d, hi: 0x8d},
+ {value: 0x1607, lo: 0x8e, hi: 0x8e},
+ {value: 0x160f, lo: 0x8f, hi: 0x90},
+ {value: 0x161b, lo: 0x91, hi: 0x93},
+ {value: 0x162b, lo: 0x94, hi: 0x94},
+ {value: 0x1633, lo: 0x95, hi: 0x95},
+ // Block 0x50, offset 0x1d9
+ {value: 0x0004, lo: 0x09},
+ {value: 0x0001, lo: 0x80, hi: 0x80},
+ {value: 0x812c, lo: 0xaa, hi: 0xaa},
+ {value: 0x8131, lo: 0xab, hi: 0xab},
+ {value: 0x8133, lo: 0xac, hi: 0xac},
+ {value: 0x812e, lo: 0xad, hi: 0xad},
+ {value: 0x812f, lo: 0xae, hi: 0xae},
+ {value: 0x812f, lo: 0xaf, hi: 0xaf},
+ {value: 0x04b3, lo: 0xb6, hi: 0xb6},
+ {value: 0x0887, lo: 0xb8, hi: 0xba},
+ // Block 0x51, offset 0x1e3
+ {value: 0x0006, lo: 0x09},
+ {value: 0x0313, lo: 0xb1, hi: 0xb1},
+ {value: 0x0317, lo: 0xb2, hi: 0xb2},
+ {value: 0x4a3b, lo: 0xb3, hi: 0xb3},
+ {value: 0x031b, lo: 0xb4, hi: 0xb4},
+ {value: 0x4a41, lo: 0xb5, hi: 0xb6},
+ {value: 0x031f, lo: 0xb7, hi: 0xb7},
+ {value: 0x0323, lo: 0xb8, hi: 0xb8},
+ {value: 0x0327, lo: 0xb9, hi: 0xb9},
+ {value: 0x4a4d, lo: 0xba, hi: 0xbf},
+ // Block 0x52, offset 0x1ed
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8132, lo: 0xaf, hi: 0xaf},
+ {value: 0x8132, lo: 0xb4, hi: 0xbd},
+ // Block 0x53, offset 0x1f0
+ {value: 0x0000, lo: 0x03},
+ {value: 0x020f, lo: 0x9c, hi: 0x9c},
+ {value: 0x0212, lo: 0x9d, hi: 0x9d},
+ {value: 0x8132, lo: 0x9e, hi: 0x9f},
+ // Block 0x54, offset 0x1f4
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xb0, hi: 0xb1},
+ // Block 0x55, offset 0x1f6
+ {value: 0x0000, lo: 0x01},
+ {value: 0x163b, lo: 0xb0, hi: 0xb0},
+ // Block 0x56, offset 0x1f8
+ {value: 0x000c, lo: 0x01},
+ {value: 0x00d7, lo: 0xb8, hi: 0xb9},
+ // Block 0x57, offset 0x1fa
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x86, hi: 0x86},
+ // Block 0x58, offset 0x1fc
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x84, hi: 0x84},
+ {value: 0x8132, lo: 0xa0, hi: 0xb1},
+ // Block 0x59, offset 0x1ff
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0xab, hi: 0xad},
+ // Block 0x5a, offset 0x201
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x93, hi: 0x93},
+ // Block 0x5b, offset 0x203
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8102, lo: 0xb3, hi: 0xb3},
+ // Block 0x5c, offset 0x205
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x80, hi: 0x80},
+ // Block 0x5d, offset 0x207
+ {value: 0x0000, lo: 0x05},
+ {value: 0x8132, lo: 0xb0, hi: 0xb0},
+ {value: 0x8132, lo: 0xb2, hi: 0xb3},
+ {value: 0x812d, lo: 0xb4, hi: 0xb4},
+ {value: 0x8132, lo: 0xb7, hi: 0xb8},
+ {value: 0x8132, lo: 0xbe, hi: 0xbf},
+ // Block 0x5e, offset 0x20d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8132, lo: 0x81, hi: 0x81},
+ {value: 0x8104, lo: 0xb6, hi: 0xb6},
+ // Block 0x5f, offset 0x210
+ {value: 0x0008, lo: 0x03},
+ {value: 0x1637, lo: 0x9c, hi: 0x9d},
+ {value: 0x0125, lo: 0x9e, hi: 0x9e},
+ {value: 0x1643, lo: 0x9f, hi: 0x9f},
+ // Block 0x60, offset 0x214
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xad, hi: 0xad},
+ // Block 0x61, offset 0x216
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe500, lo: 0x80, hi: 0x80},
+ {value: 0xc600, lo: 0x81, hi: 0x9b},
+ {value: 0xe500, lo: 0x9c, hi: 0x9c},
+ {value: 0xc600, lo: 0x9d, hi: 0xb7},
+ {value: 0xe500, lo: 0xb8, hi: 0xb8},
+ {value: 0xc600, lo: 0xb9, hi: 0xbf},
+ // Block 0x62, offset 0x21d
+ {value: 0x0000, lo: 0x05},
+ {value: 0xc600, lo: 0x80, hi: 0x93},
+ {value: 0xe500, lo: 0x94, hi: 0x94},
+ {value: 0xc600, lo: 0x95, hi: 0xaf},
+ {value: 0xe500, lo: 0xb0, hi: 0xb0},
+ {value: 0xc600, lo: 0xb1, hi: 0xbf},
+ // Block 0x63, offset 0x223
+ {value: 0x0000, lo: 0x05},
+ {value: 0xc600, lo: 0x80, hi: 0x8b},
+ {value: 0xe500, lo: 0x8c, hi: 0x8c},
+ {value: 0xc600, lo: 0x8d, hi: 0xa7},
+ {value: 0xe500, lo: 0xa8, hi: 0xa8},
+ {value: 0xc600, lo: 0xa9, hi: 0xbf},
+ // Block 0x64, offset 0x229
+ {value: 0x0000, lo: 0x07},
+ {value: 0xc600, lo: 0x80, hi: 0x83},
+ {value: 0xe500, lo: 0x84, hi: 0x84},
+ {value: 0xc600, lo: 0x85, hi: 0x9f},
+ {value: 0xe500, lo: 0xa0, hi: 0xa0},
+ {value: 0xc600, lo: 0xa1, hi: 0xbb},
+ {value: 0xe500, lo: 0xbc, hi: 0xbc},
+ {value: 0xc600, lo: 0xbd, hi: 0xbf},
+ // Block 0x65, offset 0x231
+ {value: 0x0000, lo: 0x05},
+ {value: 0xc600, lo: 0x80, hi: 0x97},
+ {value: 0xe500, lo: 0x98, hi: 0x98},
+ {value: 0xc600, lo: 0x99, hi: 0xb3},
+ {value: 0xe500, lo: 0xb4, hi: 0xb4},
+ {value: 0xc600, lo: 0xb5, hi: 0xbf},
+ // Block 0x66, offset 0x237
+ {value: 0x0000, lo: 0x05},
+ {value: 0xc600, lo: 0x80, hi: 0x8f},
+ {value: 0xe500, lo: 0x90, hi: 0x90},
+ {value: 0xc600, lo: 0x91, hi: 0xab},
+ {value: 0xe500, lo: 0xac, hi: 0xac},
+ {value: 0xc600, lo: 0xad, hi: 0xbf},
+ // Block 0x67, offset 0x23d
+ {value: 0x0000, lo: 0x05},
+ {value: 0xc600, lo: 0x80, hi: 0x87},
+ {value: 0xe500, lo: 0x88, hi: 0x88},
+ {value: 0xc600, lo: 0x89, hi: 0xa3},
+ {value: 0xe500, lo: 0xa4, hi: 0xa4},
+ {value: 0xc600, lo: 0xa5, hi: 0xbf},
+ // Block 0x68, offset 0x243
+ {value: 0x0000, lo: 0x03},
+ {value: 0xc600, lo: 0x80, hi: 0x87},
+ {value: 0xe500, lo: 0x88, hi: 0x88},
+ {value: 0xc600, lo: 0x89, hi: 0xa3},
+ // Block 0x69, offset 0x247
+ {value: 0x0002, lo: 0x01},
+ {value: 0x0003, lo: 0x81, hi: 0xbf},
+ // Block 0x6a, offset 0x249
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0xbd, hi: 0xbd},
+ // Block 0x6b, offset 0x24b
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0xa0, hi: 0xa0},
+ // Block 0x6c, offset 0x24d
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xb6, hi: 0xba},
+ // Block 0x6d, offset 0x24f
+ {value: 0x002c, lo: 0x05},
+ {value: 0x812d, lo: 0x8d, hi: 0x8d},
+ {value: 0x8132, lo: 0x8f, hi: 0x8f},
+ {value: 0x8132, lo: 0xb8, hi: 0xb8},
+ {value: 0x8101, lo: 0xb9, hi: 0xba},
+ {value: 0x8104, lo: 0xbf, hi: 0xbf},
+ // Block 0x6e, offset 0x255
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8132, lo: 0xa5, hi: 0xa5},
+ {value: 0x812d, lo: 0xa6, hi: 0xa6},
+ // Block 0x6f, offset 0x258
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xa4, hi: 0xa7},
+ // Block 0x70, offset 0x25a
+ {value: 0x0000, lo: 0x05},
+ {value: 0x812d, lo: 0x86, hi: 0x87},
+ {value: 0x8132, lo: 0x88, hi: 0x8a},
+ {value: 0x812d, lo: 0x8b, hi: 0x8b},
+ {value: 0x8132, lo: 0x8c, hi: 0x8c},
+ {value: 0x812d, lo: 0x8d, hi: 0x90},
+ // Block 0x71, offset 0x260
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x86, hi: 0x86},
+ {value: 0x8104, lo: 0xbf, hi: 0xbf},
+ // Block 0x72, offset 0x263
+ {value: 0x17fe, lo: 0x07},
+ {value: 0xa000, lo: 0x99, hi: 0x99},
+ {value: 0x4238, lo: 0x9a, hi: 0x9a},
+ {value: 0xa000, lo: 0x9b, hi: 0x9b},
+ {value: 0x4242, lo: 0x9c, hi: 0x9c},
+ {value: 0xa000, lo: 0xa5, hi: 0xa5},
+ {value: 0x424c, lo: 0xab, hi: 0xab},
+ {value: 0x8104, lo: 0xb9, hi: 0xba},
+ // Block 0x73, offset 0x26b
+ {value: 0x0000, lo: 0x06},
+ {value: 0x8132, lo: 0x80, hi: 0x82},
+ {value: 0x9900, lo: 0xa7, hi: 0xa7},
+ {value: 0x2d7e, lo: 0xae, hi: 0xae},
+ {value: 0x2d88, lo: 0xaf, hi: 0xaf},
+ {value: 0xa000, lo: 0xb1, hi: 0xb2},
+ {value: 0x8104, lo: 0xb3, hi: 0xb4},
+ // Block 0x74, offset 0x272
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x80, hi: 0x80},
+ {value: 0x8102, lo: 0x8a, hi: 0x8a},
+ // Block 0x75, offset 0x275
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0xb5, hi: 0xb5},
+ {value: 0x8102, lo: 0xb6, hi: 0xb6},
+ // Block 0x76, offset 0x278
+ {value: 0x0002, lo: 0x01},
+ {value: 0x8102, lo: 0xa9, hi: 0xaa},
+ // Block 0x77, offset 0x27a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8102, lo: 0xbb, hi: 0xbc},
+ {value: 0x9900, lo: 0xbe, hi: 0xbe},
+ // Block 0x78, offset 0x27d
+ {value: 0x0000, lo: 0x07},
+ {value: 0xa000, lo: 0x87, hi: 0x87},
+ {value: 0x2d92, lo: 0x8b, hi: 0x8b},
+ {value: 0x2d9c, lo: 0x8c, hi: 0x8c},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x97, hi: 0x97},
+ {value: 0x8132, lo: 0xa6, hi: 0xac},
+ {value: 0x8132, lo: 0xb0, hi: 0xb4},
+ // Block 0x79, offset 0x285
+ {value: 0x0000, lo: 0x03},
+ {value: 0x8104, lo: 0x82, hi: 0x82},
+ {value: 0x8102, lo: 0x86, hi: 0x86},
+ {value: 0x8132, lo: 0x9e, hi: 0x9e},
+ // Block 0x7a, offset 0x289
+ {value: 0x6b5a, lo: 0x06},
+ {value: 0x9900, lo: 0xb0, hi: 0xb0},
+ {value: 0xa000, lo: 0xb9, hi: 0xb9},
+ {value: 0x9900, lo: 0xba, hi: 0xba},
+ {value: 0x2db0, lo: 0xbb, hi: 0xbb},
+ {value: 0x2da6, lo: 0xbc, hi: 0xbd},
+ {value: 0x2dba, lo: 0xbe, hi: 0xbe},
+ // Block 0x7b, offset 0x290
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x82, hi: 0x82},
+ {value: 0x8102, lo: 0x83, hi: 0x83},
+ // Block 0x7c, offset 0x293
+ {value: 0x0000, lo: 0x05},
+ {value: 0x9900, lo: 0xaf, hi: 0xaf},
+ {value: 0xa000, lo: 0xb8, hi: 0xb9},
+ {value: 0x2dc4, lo: 0xba, hi: 0xba},
+ {value: 0x2dce, lo: 0xbb, hi: 0xbb},
+ {value: 0x8104, lo: 0xbf, hi: 0xbf},
+ // Block 0x7d, offset 0x299
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8102, lo: 0x80, hi: 0x80},
+ // Block 0x7e, offset 0x29b
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xbf, hi: 0xbf},
+ // Block 0x7f, offset 0x29d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0xb6, hi: 0xb6},
+ {value: 0x8102, lo: 0xb7, hi: 0xb7},
+ // Block 0x80, offset 0x2a0
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xab, hi: 0xab},
+ // Block 0x81, offset 0x2a2
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0xb9, hi: 0xb9},
+ {value: 0x8102, lo: 0xba, hi: 0xba},
+ // Block 0x82, offset 0x2a5
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xb4, hi: 0xb4},
+ // Block 0x83, offset 0x2a7
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x87, hi: 0x87},
+ // Block 0x84, offset 0x2a9
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x99, hi: 0x99},
+ // Block 0x85, offset 0x2ab
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8102, lo: 0x82, hi: 0x82},
+ {value: 0x8104, lo: 0x84, hi: 0x85},
+ // Block 0x86, offset 0x2ae
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x97, hi: 0x97},
+ // Block 0x87, offset 0x2b0
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8101, lo: 0xb0, hi: 0xb4},
+ // Block 0x88, offset 0x2b2
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xb0, hi: 0xb6},
+ // Block 0x89, offset 0x2b4
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8101, lo: 0x9e, hi: 0x9e},
+ // Block 0x8a, offset 0x2b6
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x45cc, lo: 0x9e, hi: 0x9e},
+ {value: 0x45d6, lo: 0x9f, hi: 0x9f},
+ {value: 0x460a, lo: 0xa0, hi: 0xa0},
+ {value: 0x4618, lo: 0xa1, hi: 0xa1},
+ {value: 0x4626, lo: 0xa2, hi: 0xa2},
+ {value: 0x4634, lo: 0xa3, hi: 0xa3},
+ {value: 0x4642, lo: 0xa4, hi: 0xa4},
+ {value: 0x812b, lo: 0xa5, hi: 0xa6},
+ {value: 0x8101, lo: 0xa7, hi: 0xa9},
+ {value: 0x8130, lo: 0xad, hi: 0xad},
+ {value: 0x812b, lo: 0xae, hi: 0xb2},
+ {value: 0x812d, lo: 0xbb, hi: 0xbf},
+ // Block 0x8b, offset 0x2c3
+ {value: 0x0000, lo: 0x09},
+ {value: 0x812d, lo: 0x80, hi: 0x82},
+ {value: 0x8132, lo: 0x85, hi: 0x89},
+ {value: 0x812d, lo: 0x8a, hi: 0x8b},
+ {value: 0x8132, lo: 0xaa, hi: 0xad},
+ {value: 0x45e0, lo: 0xbb, hi: 0xbb},
+ {value: 0x45ea, lo: 0xbc, hi: 0xbc},
+ {value: 0x4650, lo: 0xbd, hi: 0xbd},
+ {value: 0x466c, lo: 0xbe, hi: 0xbe},
+ {value: 0x465e, lo: 0xbf, hi: 0xbf},
+ // Block 0x8c, offset 0x2cd
+ {value: 0x0000, lo: 0x01},
+ {value: 0x467a, lo: 0x80, hi: 0x80},
+ // Block 0x8d, offset 0x2cf
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0x82, hi: 0x84},
+ // Block 0x8e, offset 0x2d1
+ {value: 0x0002, lo: 0x03},
+ {value: 0x0043, lo: 0x80, hi: 0x99},
+ {value: 0x0083, lo: 0x9a, hi: 0xb3},
+ {value: 0x0043, lo: 0xb4, hi: 0xbf},
+ // Block 0x8f, offset 0x2d5
+ {value: 0x0002, lo: 0x04},
+ {value: 0x005b, lo: 0x80, hi: 0x8d},
+ {value: 0x0083, lo: 0x8e, hi: 0x94},
+ {value: 0x0093, lo: 0x96, hi: 0xa7},
+ {value: 0x0043, lo: 0xa8, hi: 0xbf},
+ // Block 0x90, offset 0x2da
+ {value: 0x0002, lo: 0x0b},
+ {value: 0x0073, lo: 0x80, hi: 0x81},
+ {value: 0x0083, lo: 0x82, hi: 0x9b},
+ {value: 0x0043, lo: 0x9c, hi: 0x9c},
+ {value: 0x0047, lo: 0x9e, hi: 0x9f},
+ {value: 0x004f, lo: 0xa2, hi: 0xa2},
+ {value: 0x0055, lo: 0xa5, hi: 0xa6},
+ {value: 0x005d, lo: 0xa9, hi: 0xac},
+ {value: 0x0067, lo: 0xae, hi: 0xb5},
+ {value: 0x0083, lo: 0xb6, hi: 0xb9},
+ {value: 0x008d, lo: 0xbb, hi: 0xbb},
+ {value: 0x0091, lo: 0xbd, hi: 0xbf},
+ // Block 0x91, offset 0x2e6
+ {value: 0x0002, lo: 0x04},
+ {value: 0x0097, lo: 0x80, hi: 0x83},
+ {value: 0x00a1, lo: 0x85, hi: 0x8f},
+ {value: 0x0043, lo: 0x90, hi: 0xa9},
+ {value: 0x0083, lo: 0xaa, hi: 0xbf},
+ // Block 0x92, offset 0x2eb
+ {value: 0x0002, lo: 0x08},
+ {value: 0x00af, lo: 0x80, hi: 0x83},
+ {value: 0x0043, lo: 0x84, hi: 0x85},
+ {value: 0x0049, lo: 0x87, hi: 0x8a},
+ {value: 0x0055, lo: 0x8d, hi: 0x94},
+ {value: 0x0067, lo: 0x96, hi: 0x9c},
+ {value: 0x0083, lo: 0x9e, hi: 0xb7},
+ {value: 0x0043, lo: 0xb8, hi: 0xb9},
+ {value: 0x0049, lo: 0xbb, hi: 0xbe},
+ // Block 0x93, offset 0x2f4
+ {value: 0x0002, lo: 0x05},
+ {value: 0x0053, lo: 0x80, hi: 0x84},
+ {value: 0x005f, lo: 0x86, hi: 0x86},
+ {value: 0x0067, lo: 0x8a, hi: 0x90},
+ {value: 0x0083, lo: 0x92, hi: 0xab},
+ {value: 0x0043, lo: 0xac, hi: 0xbf},
+ // Block 0x94, offset 0x2fa
+ {value: 0x0002, lo: 0x04},
+ {value: 0x006b, lo: 0x80, hi: 0x85},
+ {value: 0x0083, lo: 0x86, hi: 0x9f},
+ {value: 0x0043, lo: 0xa0, hi: 0xb9},
+ {value: 0x0083, lo: 0xba, hi: 0xbf},
+ // Block 0x95, offset 0x2ff
+ {value: 0x0002, lo: 0x03},
+ {value: 0x008f, lo: 0x80, hi: 0x93},
+ {value: 0x0043, lo: 0x94, hi: 0xad},
+ {value: 0x0083, lo: 0xae, hi: 0xbf},
+ // Block 0x96, offset 0x303
+ {value: 0x0002, lo: 0x04},
+ {value: 0x00a7, lo: 0x80, hi: 0x87},
+ {value: 0x0043, lo: 0x88, hi: 0xa1},
+ {value: 0x0083, lo: 0xa2, hi: 0xbb},
+ {value: 0x0043, lo: 0xbc, hi: 0xbf},
+ // Block 0x97, offset 0x308
+ {value: 0x0002, lo: 0x03},
+ {value: 0x004b, lo: 0x80, hi: 0x95},
+ {value: 0x0083, lo: 0x96, hi: 0xaf},
+ {value: 0x0043, lo: 0xb0, hi: 0xbf},
+ // Block 0x98, offset 0x30c
+ {value: 0x0003, lo: 0x0f},
+ {value: 0x01b8, lo: 0x80, hi: 0x80},
+ {value: 0x045f, lo: 0x81, hi: 0x81},
+ {value: 0x01bb, lo: 0x82, hi: 0x9a},
+ {value: 0x045b, lo: 0x9b, hi: 0x9b},
+ {value: 0x01c7, lo: 0x9c, hi: 0x9c},
+ {value: 0x01d0, lo: 0x9d, hi: 0x9d},
+ {value: 0x01d6, lo: 0x9e, hi: 0x9e},
+ {value: 0x01fa, lo: 0x9f, hi: 0x9f},
+ {value: 0x01eb, lo: 0xa0, hi: 0xa0},
+ {value: 0x01e8, lo: 0xa1, hi: 0xa1},
+ {value: 0x0173, lo: 0xa2, hi: 0xb2},
+ {value: 0x0188, lo: 0xb3, hi: 0xb3},
+ {value: 0x01a6, lo: 0xb4, hi: 0xba},
+ {value: 0x045f, lo: 0xbb, hi: 0xbb},
+ {value: 0x01bb, lo: 0xbc, hi: 0xbf},
+ // Block 0x99, offset 0x31c
+ {value: 0x0003, lo: 0x0d},
+ {value: 0x01c7, lo: 0x80, hi: 0x94},
+ {value: 0x045b, lo: 0x95, hi: 0x95},
+ {value: 0x01c7, lo: 0x96, hi: 0x96},
+ {value: 0x01d0, lo: 0x97, hi: 0x97},
+ {value: 0x01d6, lo: 0x98, hi: 0x98},
+ {value: 0x01fa, lo: 0x99, hi: 0x99},
+ {value: 0x01eb, lo: 0x9a, hi: 0x9a},
+ {value: 0x01e8, lo: 0x9b, hi: 0x9b},
+ {value: 0x0173, lo: 0x9c, hi: 0xac},
+ {value: 0x0188, lo: 0xad, hi: 0xad},
+ {value: 0x01a6, lo: 0xae, hi: 0xb4},
+ {value: 0x045f, lo: 0xb5, hi: 0xb5},
+ {value: 0x01bb, lo: 0xb6, hi: 0xbf},
+ // Block 0x9a, offset 0x32a
+ {value: 0x0003, lo: 0x0d},
+ {value: 0x01d9, lo: 0x80, hi: 0x8e},
+ {value: 0x045b, lo: 0x8f, hi: 0x8f},
+ {value: 0x01c7, lo: 0x90, hi: 0x90},
+ {value: 0x01d0, lo: 0x91, hi: 0x91},
+ {value: 0x01d6, lo: 0x92, hi: 0x92},
+ {value: 0x01fa, lo: 0x93, hi: 0x93},
+ {value: 0x01eb, lo: 0x94, hi: 0x94},
+ {value: 0x01e8, lo: 0x95, hi: 0x95},
+ {value: 0x0173, lo: 0x96, hi: 0xa6},
+ {value: 0x0188, lo: 0xa7, hi: 0xa7},
+ {value: 0x01a6, lo: 0xa8, hi: 0xae},
+ {value: 0x045f, lo: 0xaf, hi: 0xaf},
+ {value: 0x01bb, lo: 0xb0, hi: 0xbf},
+ // Block 0x9b, offset 0x338
+ {value: 0x0003, lo: 0x0d},
+ {value: 0x01eb, lo: 0x80, hi: 0x88},
+ {value: 0x045b, lo: 0x89, hi: 0x89},
+ {value: 0x01c7, lo: 0x8a, hi: 0x8a},
+ {value: 0x01d0, lo: 0x8b, hi: 0x8b},
+ {value: 0x01d6, lo: 0x8c, hi: 0x8c},
+ {value: 0x01fa, lo: 0x8d, hi: 0x8d},
+ {value: 0x01eb, lo: 0x8e, hi: 0x8e},
+ {value: 0x01e8, lo: 0x8f, hi: 0x8f},
+ {value: 0x0173, lo: 0x90, hi: 0xa0},
+ {value: 0x0188, lo: 0xa1, hi: 0xa1},
+ {value: 0x01a6, lo: 0xa2, hi: 0xa8},
+ {value: 0x045f, lo: 0xa9, hi: 0xa9},
+ {value: 0x01bb, lo: 0xaa, hi: 0xbf},
+ // Block 0x9c, offset 0x346
+ {value: 0x0000, lo: 0x05},
+ {value: 0x8132, lo: 0x80, hi: 0x86},
+ {value: 0x8132, lo: 0x88, hi: 0x98},
+ {value: 0x8132, lo: 0x9b, hi: 0xa1},
+ {value: 0x8132, lo: 0xa3, hi: 0xa4},
+ {value: 0x8132, lo: 0xa6, hi: 0xaa},
+ // Block 0x9d, offset 0x34c
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0x90, hi: 0x96},
+ // Block 0x9e, offset 0x34e
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8132, lo: 0x84, hi: 0x89},
+ {value: 0x8102, lo: 0x8a, hi: 0x8a},
+ // Block 0x9f, offset 0x351
+ {value: 0x0002, lo: 0x09},
+ {value: 0x0063, lo: 0x80, hi: 0x89},
+ {value: 0x1951, lo: 0x8a, hi: 0x8a},
+ {value: 0x1981, lo: 0x8b, hi: 0x8b},
+ {value: 0x199c, lo: 0x8c, hi: 0x8c},
+ {value: 0x19a2, lo: 0x8d, hi: 0x8d},
+ {value: 0x1bc0, lo: 0x8e, hi: 0x8e},
+ {value: 0x19ae, lo: 0x8f, hi: 0x8f},
+ {value: 0x197b, lo: 0xaa, hi: 0xaa},
+ {value: 0x197e, lo: 0xab, hi: 0xab},
+ // Block 0xa0, offset 0x35b
+ {value: 0x0000, lo: 0x01},
+ {value: 0x193f, lo: 0x90, hi: 0x90},
+ // Block 0xa1, offset 0x35d
+ {value: 0x0028, lo: 0x09},
+ {value: 0x2862, lo: 0x80, hi: 0x80},
+ {value: 0x2826, lo: 0x81, hi: 0x81},
+ {value: 0x2830, lo: 0x82, hi: 0x82},
+ {value: 0x2844, lo: 0x83, hi: 0x84},
+ {value: 0x284e, lo: 0x85, hi: 0x86},
+ {value: 0x283a, lo: 0x87, hi: 0x87},
+ {value: 0x2858, lo: 0x88, hi: 0x88},
+ {value: 0x0b6f, lo: 0x90, hi: 0x90},
+ {value: 0x08e7, lo: 0x91, hi: 0x91},
+}
+
+// recompMap: 7520 bytes (entries only)
+var recompMap map[uint32]rune
+var recompMapOnce sync.Once
+
+const recompMapPacked = "" +
+ "\x00A\x03\x00\x00\x00\x00\xc0" + // 0x00410300: 0x000000C0
+ "\x00A\x03\x01\x00\x00\x00\xc1" + // 0x00410301: 0x000000C1
+ "\x00A\x03\x02\x00\x00\x00\xc2" + // 0x00410302: 0x000000C2
+ "\x00A\x03\x03\x00\x00\x00\xc3" + // 0x00410303: 0x000000C3
+ "\x00A\x03\b\x00\x00\x00\xc4" + // 0x00410308: 0x000000C4
+ "\x00A\x03\n\x00\x00\x00\xc5" + // 0x0041030A: 0x000000C5
+ "\x00C\x03'\x00\x00\x00\xc7" + // 0x00430327: 0x000000C7
+ "\x00E\x03\x00\x00\x00\x00\xc8" + // 0x00450300: 0x000000C8
+ "\x00E\x03\x01\x00\x00\x00\xc9" + // 0x00450301: 0x000000C9
+ "\x00E\x03\x02\x00\x00\x00\xca" + // 0x00450302: 0x000000CA
+ "\x00E\x03\b\x00\x00\x00\xcb" + // 0x00450308: 0x000000CB
+ "\x00I\x03\x00\x00\x00\x00\xcc" + // 0x00490300: 0x000000CC
+ "\x00I\x03\x01\x00\x00\x00\xcd" + // 0x00490301: 0x000000CD
+ "\x00I\x03\x02\x00\x00\x00\xce" + // 0x00490302: 0x000000CE
+ "\x00I\x03\b\x00\x00\x00\xcf" + // 0x00490308: 0x000000CF
+ "\x00N\x03\x03\x00\x00\x00\xd1" + // 0x004E0303: 0x000000D1
+ "\x00O\x03\x00\x00\x00\x00\xd2" + // 0x004F0300: 0x000000D2
+ "\x00O\x03\x01\x00\x00\x00\xd3" + // 0x004F0301: 0x000000D3
+ "\x00O\x03\x02\x00\x00\x00\xd4" + // 0x004F0302: 0x000000D4
+ "\x00O\x03\x03\x00\x00\x00\xd5" + // 0x004F0303: 0x000000D5
+ "\x00O\x03\b\x00\x00\x00\xd6" + // 0x004F0308: 0x000000D6
+ "\x00U\x03\x00\x00\x00\x00\xd9" + // 0x00550300: 0x000000D9
+ "\x00U\x03\x01\x00\x00\x00\xda" + // 0x00550301: 0x000000DA
+ "\x00U\x03\x02\x00\x00\x00\xdb" + // 0x00550302: 0x000000DB
+ "\x00U\x03\b\x00\x00\x00\xdc" + // 0x00550308: 0x000000DC
+ "\x00Y\x03\x01\x00\x00\x00\xdd" + // 0x00590301: 0x000000DD
+ "\x00a\x03\x00\x00\x00\x00\xe0" + // 0x00610300: 0x000000E0
+ "\x00a\x03\x01\x00\x00\x00\xe1" + // 0x00610301: 0x000000E1
+ "\x00a\x03\x02\x00\x00\x00\xe2" + // 0x00610302: 0x000000E2
+ "\x00a\x03\x03\x00\x00\x00\xe3" + // 0x00610303: 0x000000E3
+ "\x00a\x03\b\x00\x00\x00\xe4" + // 0x00610308: 0x000000E4
+ "\x00a\x03\n\x00\x00\x00\xe5" + // 0x0061030A: 0x000000E5
+ "\x00c\x03'\x00\x00\x00\xe7" + // 0x00630327: 0x000000E7
+ "\x00e\x03\x00\x00\x00\x00\xe8" + // 0x00650300: 0x000000E8
+ "\x00e\x03\x01\x00\x00\x00\xe9" + // 0x00650301: 0x000000E9
+ "\x00e\x03\x02\x00\x00\x00\xea" + // 0x00650302: 0x000000EA
+ "\x00e\x03\b\x00\x00\x00\xeb" + // 0x00650308: 0x000000EB
+ "\x00i\x03\x00\x00\x00\x00\xec" + // 0x00690300: 0x000000EC
+ "\x00i\x03\x01\x00\x00\x00\xed" + // 0x00690301: 0x000000ED
+ "\x00i\x03\x02\x00\x00\x00\xee" + // 0x00690302: 0x000000EE
+ "\x00i\x03\b\x00\x00\x00\xef" + // 0x00690308: 0x000000EF
+ "\x00n\x03\x03\x00\x00\x00\xf1" + // 0x006E0303: 0x000000F1
+ "\x00o\x03\x00\x00\x00\x00\xf2" + // 0x006F0300: 0x000000F2
+ "\x00o\x03\x01\x00\x00\x00\xf3" + // 0x006F0301: 0x000000F3
+ "\x00o\x03\x02\x00\x00\x00\xf4" + // 0x006F0302: 0x000000F4
+ "\x00o\x03\x03\x00\x00\x00\xf5" + // 0x006F0303: 0x000000F5
+ "\x00o\x03\b\x00\x00\x00\xf6" + // 0x006F0308: 0x000000F6
+ "\x00u\x03\x00\x00\x00\x00\xf9" + // 0x00750300: 0x000000F9
+ "\x00u\x03\x01\x00\x00\x00\xfa" + // 0x00750301: 0x000000FA
+ "\x00u\x03\x02\x00\x00\x00\xfb" + // 0x00750302: 0x000000FB
+ "\x00u\x03\b\x00\x00\x00\xfc" + // 0x00750308: 0x000000FC
+ "\x00y\x03\x01\x00\x00\x00\xfd" + // 0x00790301: 0x000000FD
+ "\x00y\x03\b\x00\x00\x00\xff" + // 0x00790308: 0x000000FF
+ "\x00A\x03\x04\x00\x00\x01\x00" + // 0x00410304: 0x00000100
+ "\x00a\x03\x04\x00\x00\x01\x01" + // 0x00610304: 0x00000101
+ "\x00A\x03\x06\x00\x00\x01\x02" + // 0x00410306: 0x00000102
+ "\x00a\x03\x06\x00\x00\x01\x03" + // 0x00610306: 0x00000103
+ "\x00A\x03(\x00\x00\x01\x04" + // 0x00410328: 0x00000104
+ "\x00a\x03(\x00\x00\x01\x05" + // 0x00610328: 0x00000105
+ "\x00C\x03\x01\x00\x00\x01\x06" + // 0x00430301: 0x00000106
+ "\x00c\x03\x01\x00\x00\x01\a" + // 0x00630301: 0x00000107
+ "\x00C\x03\x02\x00\x00\x01\b" + // 0x00430302: 0x00000108
+ "\x00c\x03\x02\x00\x00\x01\t" + // 0x00630302: 0x00000109
+ "\x00C\x03\a\x00\x00\x01\n" + // 0x00430307: 0x0000010A
+ "\x00c\x03\a\x00\x00\x01\v" + // 0x00630307: 0x0000010B
+ "\x00C\x03\f\x00\x00\x01\f" + // 0x0043030C: 0x0000010C
+ "\x00c\x03\f\x00\x00\x01\r" + // 0x0063030C: 0x0000010D
+ "\x00D\x03\f\x00\x00\x01\x0e" + // 0x0044030C: 0x0000010E
+ "\x00d\x03\f\x00\x00\x01\x0f" + // 0x0064030C: 0x0000010F
+ "\x00E\x03\x04\x00\x00\x01\x12" + // 0x00450304: 0x00000112
+ "\x00e\x03\x04\x00\x00\x01\x13" + // 0x00650304: 0x00000113
+ "\x00E\x03\x06\x00\x00\x01\x14" + // 0x00450306: 0x00000114
+ "\x00e\x03\x06\x00\x00\x01\x15" + // 0x00650306: 0x00000115
+ "\x00E\x03\a\x00\x00\x01\x16" + // 0x00450307: 0x00000116
+ "\x00e\x03\a\x00\x00\x01\x17" + // 0x00650307: 0x00000117
+ "\x00E\x03(\x00\x00\x01\x18" + // 0x00450328: 0x00000118
+ "\x00e\x03(\x00\x00\x01\x19" + // 0x00650328: 0x00000119
+ "\x00E\x03\f\x00\x00\x01\x1a" + // 0x0045030C: 0x0000011A
+ "\x00e\x03\f\x00\x00\x01\x1b" + // 0x0065030C: 0x0000011B
+ "\x00G\x03\x02\x00\x00\x01\x1c" + // 0x00470302: 0x0000011C
+ "\x00g\x03\x02\x00\x00\x01\x1d" + // 0x00670302: 0x0000011D
+ "\x00G\x03\x06\x00\x00\x01\x1e" + // 0x00470306: 0x0000011E
+ "\x00g\x03\x06\x00\x00\x01\x1f" + // 0x00670306: 0x0000011F
+ "\x00G\x03\a\x00\x00\x01 " + // 0x00470307: 0x00000120
+ "\x00g\x03\a\x00\x00\x01!" + // 0x00670307: 0x00000121
+ "\x00G\x03'\x00\x00\x01\"" + // 0x00470327: 0x00000122
+ "\x00g\x03'\x00\x00\x01#" + // 0x00670327: 0x00000123
+ "\x00H\x03\x02\x00\x00\x01$" + // 0x00480302: 0x00000124
+ "\x00h\x03\x02\x00\x00\x01%" + // 0x00680302: 0x00000125
+ "\x00I\x03\x03\x00\x00\x01(" + // 0x00490303: 0x00000128
+ "\x00i\x03\x03\x00\x00\x01)" + // 0x00690303: 0x00000129
+ "\x00I\x03\x04\x00\x00\x01*" + // 0x00490304: 0x0000012A
+ "\x00i\x03\x04\x00\x00\x01+" + // 0x00690304: 0x0000012B
+ "\x00I\x03\x06\x00\x00\x01," + // 0x00490306: 0x0000012C
+ "\x00i\x03\x06\x00\x00\x01-" + // 0x00690306: 0x0000012D
+ "\x00I\x03(\x00\x00\x01." + // 0x00490328: 0x0000012E
+ "\x00i\x03(\x00\x00\x01/" + // 0x00690328: 0x0000012F
+ "\x00I\x03\a\x00\x00\x010" + // 0x00490307: 0x00000130
+ "\x00J\x03\x02\x00\x00\x014" + // 0x004A0302: 0x00000134
+ "\x00j\x03\x02\x00\x00\x015" + // 0x006A0302: 0x00000135
+ "\x00K\x03'\x00\x00\x016" + // 0x004B0327: 0x00000136
+ "\x00k\x03'\x00\x00\x017" + // 0x006B0327: 0x00000137
+ "\x00L\x03\x01\x00\x00\x019" + // 0x004C0301: 0x00000139
+ "\x00l\x03\x01\x00\x00\x01:" + // 0x006C0301: 0x0000013A
+ "\x00L\x03'\x00\x00\x01;" + // 0x004C0327: 0x0000013B
+ "\x00l\x03'\x00\x00\x01<" + // 0x006C0327: 0x0000013C
+ "\x00L\x03\f\x00\x00\x01=" + // 0x004C030C: 0x0000013D
+ "\x00l\x03\f\x00\x00\x01>" + // 0x006C030C: 0x0000013E
+ "\x00N\x03\x01\x00\x00\x01C" + // 0x004E0301: 0x00000143
+ "\x00n\x03\x01\x00\x00\x01D" + // 0x006E0301: 0x00000144
+ "\x00N\x03'\x00\x00\x01E" + // 0x004E0327: 0x00000145
+ "\x00n\x03'\x00\x00\x01F" + // 0x006E0327: 0x00000146
+ "\x00N\x03\f\x00\x00\x01G" + // 0x004E030C: 0x00000147
+ "\x00n\x03\f\x00\x00\x01H" + // 0x006E030C: 0x00000148
+ "\x00O\x03\x04\x00\x00\x01L" + // 0x004F0304: 0x0000014C
+ "\x00o\x03\x04\x00\x00\x01M" + // 0x006F0304: 0x0000014D
+ "\x00O\x03\x06\x00\x00\x01N" + // 0x004F0306: 0x0000014E
+ "\x00o\x03\x06\x00\x00\x01O" + // 0x006F0306: 0x0000014F
+ "\x00O\x03\v\x00\x00\x01P" + // 0x004F030B: 0x00000150
+ "\x00o\x03\v\x00\x00\x01Q" + // 0x006F030B: 0x00000151
+ "\x00R\x03\x01\x00\x00\x01T" + // 0x00520301: 0x00000154
+ "\x00r\x03\x01\x00\x00\x01U" + // 0x00720301: 0x00000155
+ "\x00R\x03'\x00\x00\x01V" + // 0x00520327: 0x00000156
+ "\x00r\x03'\x00\x00\x01W" + // 0x00720327: 0x00000157
+ "\x00R\x03\f\x00\x00\x01X" + // 0x0052030C: 0x00000158
+ "\x00r\x03\f\x00\x00\x01Y" + // 0x0072030C: 0x00000159
+ "\x00S\x03\x01\x00\x00\x01Z" + // 0x00530301: 0x0000015A
+ "\x00s\x03\x01\x00\x00\x01[" + // 0x00730301: 0x0000015B
+ "\x00S\x03\x02\x00\x00\x01\\" + // 0x00530302: 0x0000015C
+ "\x00s\x03\x02\x00\x00\x01]" + // 0x00730302: 0x0000015D
+ "\x00S\x03'\x00\x00\x01^" + // 0x00530327: 0x0000015E
+ "\x00s\x03'\x00\x00\x01_" + // 0x00730327: 0x0000015F
+ "\x00S\x03\f\x00\x00\x01`" + // 0x0053030C: 0x00000160
+ "\x00s\x03\f\x00\x00\x01a" + // 0x0073030C: 0x00000161
+ "\x00T\x03'\x00\x00\x01b" + // 0x00540327: 0x00000162
+ "\x00t\x03'\x00\x00\x01c" + // 0x00740327: 0x00000163
+ "\x00T\x03\f\x00\x00\x01d" + // 0x0054030C: 0x00000164
+ "\x00t\x03\f\x00\x00\x01e" + // 0x0074030C: 0x00000165
+ "\x00U\x03\x03\x00\x00\x01h" + // 0x00550303: 0x00000168
+ "\x00u\x03\x03\x00\x00\x01i" + // 0x00750303: 0x00000169
+ "\x00U\x03\x04\x00\x00\x01j" + // 0x00550304: 0x0000016A
+ "\x00u\x03\x04\x00\x00\x01k" + // 0x00750304: 0x0000016B
+ "\x00U\x03\x06\x00\x00\x01l" + // 0x00550306: 0x0000016C
+ "\x00u\x03\x06\x00\x00\x01m" + // 0x00750306: 0x0000016D
+ "\x00U\x03\n\x00\x00\x01n" + // 0x0055030A: 0x0000016E
+ "\x00u\x03\n\x00\x00\x01o" + // 0x0075030A: 0x0000016F
+ "\x00U\x03\v\x00\x00\x01p" + // 0x0055030B: 0x00000170
+ "\x00u\x03\v\x00\x00\x01q" + // 0x0075030B: 0x00000171
+ "\x00U\x03(\x00\x00\x01r" + // 0x00550328: 0x00000172
+ "\x00u\x03(\x00\x00\x01s" + // 0x00750328: 0x00000173
+ "\x00W\x03\x02\x00\x00\x01t" + // 0x00570302: 0x00000174
+ "\x00w\x03\x02\x00\x00\x01u" + // 0x00770302: 0x00000175
+ "\x00Y\x03\x02\x00\x00\x01v" + // 0x00590302: 0x00000176
+ "\x00y\x03\x02\x00\x00\x01w" + // 0x00790302: 0x00000177
+ "\x00Y\x03\b\x00\x00\x01x" + // 0x00590308: 0x00000178
+ "\x00Z\x03\x01\x00\x00\x01y" + // 0x005A0301: 0x00000179
+ "\x00z\x03\x01\x00\x00\x01z" + // 0x007A0301: 0x0000017A
+ "\x00Z\x03\a\x00\x00\x01{" + // 0x005A0307: 0x0000017B
+ "\x00z\x03\a\x00\x00\x01|" + // 0x007A0307: 0x0000017C
+ "\x00Z\x03\f\x00\x00\x01}" + // 0x005A030C: 0x0000017D
+ "\x00z\x03\f\x00\x00\x01~" + // 0x007A030C: 0x0000017E
+ "\x00O\x03\x1b\x00\x00\x01\xa0" + // 0x004F031B: 0x000001A0
+ "\x00o\x03\x1b\x00\x00\x01\xa1" + // 0x006F031B: 0x000001A1
+ "\x00U\x03\x1b\x00\x00\x01\xaf" + // 0x0055031B: 0x000001AF
+ "\x00u\x03\x1b\x00\x00\x01\xb0" + // 0x0075031B: 0x000001B0
+ "\x00A\x03\f\x00\x00\x01\xcd" + // 0x0041030C: 0x000001CD
+ "\x00a\x03\f\x00\x00\x01\xce" + // 0x0061030C: 0x000001CE
+ "\x00I\x03\f\x00\x00\x01\xcf" + // 0x0049030C: 0x000001CF
+ "\x00i\x03\f\x00\x00\x01\xd0" + // 0x0069030C: 0x000001D0
+ "\x00O\x03\f\x00\x00\x01\xd1" + // 0x004F030C: 0x000001D1
+ "\x00o\x03\f\x00\x00\x01\xd2" + // 0x006F030C: 0x000001D2
+ "\x00U\x03\f\x00\x00\x01\xd3" + // 0x0055030C: 0x000001D3
+ "\x00u\x03\f\x00\x00\x01\xd4" + // 0x0075030C: 0x000001D4
+ "\x00\xdc\x03\x04\x00\x00\x01\xd5" + // 0x00DC0304: 0x000001D5
+ "\x00\xfc\x03\x04\x00\x00\x01\xd6" + // 0x00FC0304: 0x000001D6
+ "\x00\xdc\x03\x01\x00\x00\x01\xd7" + // 0x00DC0301: 0x000001D7
+ "\x00\xfc\x03\x01\x00\x00\x01\xd8" + // 0x00FC0301: 0x000001D8
+ "\x00\xdc\x03\f\x00\x00\x01\xd9" + // 0x00DC030C: 0x000001D9
+ "\x00\xfc\x03\f\x00\x00\x01\xda" + // 0x00FC030C: 0x000001DA
+ "\x00\xdc\x03\x00\x00\x00\x01\xdb" + // 0x00DC0300: 0x000001DB
+ "\x00\xfc\x03\x00\x00\x00\x01\xdc" + // 0x00FC0300: 0x000001DC
+ "\x00\xc4\x03\x04\x00\x00\x01\xde" + // 0x00C40304: 0x000001DE
+ "\x00\xe4\x03\x04\x00\x00\x01\xdf" + // 0x00E40304: 0x000001DF
+ "\x02&\x03\x04\x00\x00\x01\xe0" + // 0x02260304: 0x000001E0
+ "\x02'\x03\x04\x00\x00\x01\xe1" + // 0x02270304: 0x000001E1
+ "\x00\xc6\x03\x04\x00\x00\x01\xe2" + // 0x00C60304: 0x000001E2
+ "\x00\xe6\x03\x04\x00\x00\x01\xe3" + // 0x00E60304: 0x000001E3
+ "\x00G\x03\f\x00\x00\x01\xe6" + // 0x0047030C: 0x000001E6
+ "\x00g\x03\f\x00\x00\x01\xe7" + // 0x0067030C: 0x000001E7
+ "\x00K\x03\f\x00\x00\x01\xe8" + // 0x004B030C: 0x000001E8
+ "\x00k\x03\f\x00\x00\x01\xe9" + // 0x006B030C: 0x000001E9
+ "\x00O\x03(\x00\x00\x01\xea" + // 0x004F0328: 0x000001EA
+ "\x00o\x03(\x00\x00\x01\xeb" + // 0x006F0328: 0x000001EB
+ "\x01\xea\x03\x04\x00\x00\x01\xec" + // 0x01EA0304: 0x000001EC
+ "\x01\xeb\x03\x04\x00\x00\x01\xed" + // 0x01EB0304: 0x000001ED
+ "\x01\xb7\x03\f\x00\x00\x01\xee" + // 0x01B7030C: 0x000001EE
+ "\x02\x92\x03\f\x00\x00\x01\xef" + // 0x0292030C: 0x000001EF
+ "\x00j\x03\f\x00\x00\x01\xf0" + // 0x006A030C: 0x000001F0
+ "\x00G\x03\x01\x00\x00\x01\xf4" + // 0x00470301: 0x000001F4
+ "\x00g\x03\x01\x00\x00\x01\xf5" + // 0x00670301: 0x000001F5
+ "\x00N\x03\x00\x00\x00\x01\xf8" + // 0x004E0300: 0x000001F8
+ "\x00n\x03\x00\x00\x00\x01\xf9" + // 0x006E0300: 0x000001F9
+ "\x00\xc5\x03\x01\x00\x00\x01\xfa" + // 0x00C50301: 0x000001FA
+ "\x00\xe5\x03\x01\x00\x00\x01\xfb" + // 0x00E50301: 0x000001FB
+ "\x00\xc6\x03\x01\x00\x00\x01\xfc" + // 0x00C60301: 0x000001FC
+ "\x00\xe6\x03\x01\x00\x00\x01\xfd" + // 0x00E60301: 0x000001FD
+ "\x00\xd8\x03\x01\x00\x00\x01\xfe" + // 0x00D80301: 0x000001FE
+ "\x00\xf8\x03\x01\x00\x00\x01\xff" + // 0x00F80301: 0x000001FF
+ "\x00A\x03\x0f\x00\x00\x02\x00" + // 0x0041030F: 0x00000200
+ "\x00a\x03\x0f\x00\x00\x02\x01" + // 0x0061030F: 0x00000201
+ "\x00A\x03\x11\x00\x00\x02\x02" + // 0x00410311: 0x00000202
+ "\x00a\x03\x11\x00\x00\x02\x03" + // 0x00610311: 0x00000203
+ "\x00E\x03\x0f\x00\x00\x02\x04" + // 0x0045030F: 0x00000204
+ "\x00e\x03\x0f\x00\x00\x02\x05" + // 0x0065030F: 0x00000205
+ "\x00E\x03\x11\x00\x00\x02\x06" + // 0x00450311: 0x00000206
+ "\x00e\x03\x11\x00\x00\x02\a" + // 0x00650311: 0x00000207
+ "\x00I\x03\x0f\x00\x00\x02\b" + // 0x0049030F: 0x00000208
+ "\x00i\x03\x0f\x00\x00\x02\t" + // 0x0069030F: 0x00000209
+ "\x00I\x03\x11\x00\x00\x02\n" + // 0x00490311: 0x0000020A
+ "\x00i\x03\x11\x00\x00\x02\v" + // 0x00690311: 0x0000020B
+ "\x00O\x03\x0f\x00\x00\x02\f" + // 0x004F030F: 0x0000020C
+ "\x00o\x03\x0f\x00\x00\x02\r" + // 0x006F030F: 0x0000020D
+ "\x00O\x03\x11\x00\x00\x02\x0e" + // 0x004F0311: 0x0000020E
+ "\x00o\x03\x11\x00\x00\x02\x0f" + // 0x006F0311: 0x0000020F
+ "\x00R\x03\x0f\x00\x00\x02\x10" + // 0x0052030F: 0x00000210
+ "\x00r\x03\x0f\x00\x00\x02\x11" + // 0x0072030F: 0x00000211
+ "\x00R\x03\x11\x00\x00\x02\x12" + // 0x00520311: 0x00000212
+ "\x00r\x03\x11\x00\x00\x02\x13" + // 0x00720311: 0x00000213
+ "\x00U\x03\x0f\x00\x00\x02\x14" + // 0x0055030F: 0x00000214
+ "\x00u\x03\x0f\x00\x00\x02\x15" + // 0x0075030F: 0x00000215
+ "\x00U\x03\x11\x00\x00\x02\x16" + // 0x00550311: 0x00000216
+ "\x00u\x03\x11\x00\x00\x02\x17" + // 0x00750311: 0x00000217
+ "\x00S\x03&\x00\x00\x02\x18" + // 0x00530326: 0x00000218
+ "\x00s\x03&\x00\x00\x02\x19" + // 0x00730326: 0x00000219
+ "\x00T\x03&\x00\x00\x02\x1a" + // 0x00540326: 0x0000021A
+ "\x00t\x03&\x00\x00\x02\x1b" + // 0x00740326: 0x0000021B
+ "\x00H\x03\f\x00\x00\x02\x1e" + // 0x0048030C: 0x0000021E
+ "\x00h\x03\f\x00\x00\x02\x1f" + // 0x0068030C: 0x0000021F
+ "\x00A\x03\a\x00\x00\x02&" + // 0x00410307: 0x00000226
+ "\x00a\x03\a\x00\x00\x02'" + // 0x00610307: 0x00000227
+ "\x00E\x03'\x00\x00\x02(" + // 0x00450327: 0x00000228
+ "\x00e\x03'\x00\x00\x02)" + // 0x00650327: 0x00000229
+ "\x00\xd6\x03\x04\x00\x00\x02*" + // 0x00D60304: 0x0000022A
+ "\x00\xf6\x03\x04\x00\x00\x02+" + // 0x00F60304: 0x0000022B
+ "\x00\xd5\x03\x04\x00\x00\x02," + // 0x00D50304: 0x0000022C
+ "\x00\xf5\x03\x04\x00\x00\x02-" + // 0x00F50304: 0x0000022D
+ "\x00O\x03\a\x00\x00\x02." + // 0x004F0307: 0x0000022E
+ "\x00o\x03\a\x00\x00\x02/" + // 0x006F0307: 0x0000022F
+ "\x02.\x03\x04\x00\x00\x020" + // 0x022E0304: 0x00000230
+ "\x02/\x03\x04\x00\x00\x021" + // 0x022F0304: 0x00000231
+ "\x00Y\x03\x04\x00\x00\x022" + // 0x00590304: 0x00000232
+ "\x00y\x03\x04\x00\x00\x023" + // 0x00790304: 0x00000233
+ "\x00\xa8\x03\x01\x00\x00\x03\x85" + // 0x00A80301: 0x00000385
+ "\x03\x91\x03\x01\x00\x00\x03\x86" + // 0x03910301: 0x00000386
+ "\x03\x95\x03\x01\x00\x00\x03\x88" + // 0x03950301: 0x00000388
+ "\x03\x97\x03\x01\x00\x00\x03\x89" + // 0x03970301: 0x00000389
+ "\x03\x99\x03\x01\x00\x00\x03\x8a" + // 0x03990301: 0x0000038A
+ "\x03\x9f\x03\x01\x00\x00\x03\x8c" + // 0x039F0301: 0x0000038C
+ "\x03\xa5\x03\x01\x00\x00\x03\x8e" + // 0x03A50301: 0x0000038E
+ "\x03\xa9\x03\x01\x00\x00\x03\x8f" + // 0x03A90301: 0x0000038F
+ "\x03\xca\x03\x01\x00\x00\x03\x90" + // 0x03CA0301: 0x00000390
+ "\x03\x99\x03\b\x00\x00\x03\xaa" + // 0x03990308: 0x000003AA
+ "\x03\xa5\x03\b\x00\x00\x03\xab" + // 0x03A50308: 0x000003AB
+ "\x03\xb1\x03\x01\x00\x00\x03\xac" + // 0x03B10301: 0x000003AC
+ "\x03\xb5\x03\x01\x00\x00\x03\xad" + // 0x03B50301: 0x000003AD
+ "\x03\xb7\x03\x01\x00\x00\x03\xae" + // 0x03B70301: 0x000003AE
+ "\x03\xb9\x03\x01\x00\x00\x03\xaf" + // 0x03B90301: 0x000003AF
+ "\x03\xcb\x03\x01\x00\x00\x03\xb0" + // 0x03CB0301: 0x000003B0
+ "\x03\xb9\x03\b\x00\x00\x03\xca" + // 0x03B90308: 0x000003CA
+ "\x03\xc5\x03\b\x00\x00\x03\xcb" + // 0x03C50308: 0x000003CB
+ "\x03\xbf\x03\x01\x00\x00\x03\xcc" + // 0x03BF0301: 0x000003CC
+ "\x03\xc5\x03\x01\x00\x00\x03\xcd" + // 0x03C50301: 0x000003CD
+ "\x03\xc9\x03\x01\x00\x00\x03\xce" + // 0x03C90301: 0x000003CE
+ "\x03\xd2\x03\x01\x00\x00\x03\xd3" + // 0x03D20301: 0x000003D3
+ "\x03\xd2\x03\b\x00\x00\x03\xd4" + // 0x03D20308: 0x000003D4
+ "\x04\x15\x03\x00\x00\x00\x04\x00" + // 0x04150300: 0x00000400
+ "\x04\x15\x03\b\x00\x00\x04\x01" + // 0x04150308: 0x00000401
+ "\x04\x13\x03\x01\x00\x00\x04\x03" + // 0x04130301: 0x00000403
+ "\x04\x06\x03\b\x00\x00\x04\a" + // 0x04060308: 0x00000407
+ "\x04\x1a\x03\x01\x00\x00\x04\f" + // 0x041A0301: 0x0000040C
+ "\x04\x18\x03\x00\x00\x00\x04\r" + // 0x04180300: 0x0000040D
+ "\x04#\x03\x06\x00\x00\x04\x0e" + // 0x04230306: 0x0000040E
+ "\x04\x18\x03\x06\x00\x00\x04\x19" + // 0x04180306: 0x00000419
+ "\x048\x03\x06\x00\x00\x049" + // 0x04380306: 0x00000439
+ "\x045\x03\x00\x00\x00\x04P" + // 0x04350300: 0x00000450
+ "\x045\x03\b\x00\x00\x04Q" + // 0x04350308: 0x00000451
+ "\x043\x03\x01\x00\x00\x04S" + // 0x04330301: 0x00000453
+ "\x04V\x03\b\x00\x00\x04W" + // 0x04560308: 0x00000457
+ "\x04:\x03\x01\x00\x00\x04\\" + // 0x043A0301: 0x0000045C
+ "\x048\x03\x00\x00\x00\x04]" + // 0x04380300: 0x0000045D
+ "\x04C\x03\x06\x00\x00\x04^" + // 0x04430306: 0x0000045E
+ "\x04t\x03\x0f\x00\x00\x04v" + // 0x0474030F: 0x00000476
+ "\x04u\x03\x0f\x00\x00\x04w" + // 0x0475030F: 0x00000477
+ "\x04\x16\x03\x06\x00\x00\x04\xc1" + // 0x04160306: 0x000004C1
+ "\x046\x03\x06\x00\x00\x04\xc2" + // 0x04360306: 0x000004C2
+ "\x04\x10\x03\x06\x00\x00\x04\xd0" + // 0x04100306: 0x000004D0
+ "\x040\x03\x06\x00\x00\x04\xd1" + // 0x04300306: 0x000004D1
+ "\x04\x10\x03\b\x00\x00\x04\xd2" + // 0x04100308: 0x000004D2
+ "\x040\x03\b\x00\x00\x04\xd3" + // 0x04300308: 0x000004D3
+ "\x04\x15\x03\x06\x00\x00\x04\xd6" + // 0x04150306: 0x000004D6
+ "\x045\x03\x06\x00\x00\x04\xd7" + // 0x04350306: 0x000004D7
+ "\x04\xd8\x03\b\x00\x00\x04\xda" + // 0x04D80308: 0x000004DA
+ "\x04\xd9\x03\b\x00\x00\x04\xdb" + // 0x04D90308: 0x000004DB
+ "\x04\x16\x03\b\x00\x00\x04\xdc" + // 0x04160308: 0x000004DC
+ "\x046\x03\b\x00\x00\x04\xdd" + // 0x04360308: 0x000004DD
+ "\x04\x17\x03\b\x00\x00\x04\xde" + // 0x04170308: 0x000004DE
+ "\x047\x03\b\x00\x00\x04\xdf" + // 0x04370308: 0x000004DF
+ "\x04\x18\x03\x04\x00\x00\x04\xe2" + // 0x04180304: 0x000004E2
+ "\x048\x03\x04\x00\x00\x04\xe3" + // 0x04380304: 0x000004E3
+ "\x04\x18\x03\b\x00\x00\x04\xe4" + // 0x04180308: 0x000004E4
+ "\x048\x03\b\x00\x00\x04\xe5" + // 0x04380308: 0x000004E5
+ "\x04\x1e\x03\b\x00\x00\x04\xe6" + // 0x041E0308: 0x000004E6
+ "\x04>\x03\b\x00\x00\x04\xe7" + // 0x043E0308: 0x000004E7
+ "\x04\xe8\x03\b\x00\x00\x04\xea" + // 0x04E80308: 0x000004EA
+ "\x04\xe9\x03\b\x00\x00\x04\xeb" + // 0x04E90308: 0x000004EB
+ "\x04-\x03\b\x00\x00\x04\xec" + // 0x042D0308: 0x000004EC
+ "\x04M\x03\b\x00\x00\x04\xed" + // 0x044D0308: 0x000004ED
+ "\x04#\x03\x04\x00\x00\x04\xee" + // 0x04230304: 0x000004EE
+ "\x04C\x03\x04\x00\x00\x04\xef" + // 0x04430304: 0x000004EF
+ "\x04#\x03\b\x00\x00\x04\xf0" + // 0x04230308: 0x000004F0
+ "\x04C\x03\b\x00\x00\x04\xf1" + // 0x04430308: 0x000004F1
+ "\x04#\x03\v\x00\x00\x04\xf2" + // 0x0423030B: 0x000004F2
+ "\x04C\x03\v\x00\x00\x04\xf3" + // 0x0443030B: 0x000004F3
+ "\x04'\x03\b\x00\x00\x04\xf4" + // 0x04270308: 0x000004F4
+ "\x04G\x03\b\x00\x00\x04\xf5" + // 0x04470308: 0x000004F5
+ "\x04+\x03\b\x00\x00\x04\xf8" + // 0x042B0308: 0x000004F8
+ "\x04K\x03\b\x00\x00\x04\xf9" + // 0x044B0308: 0x000004F9
+ "\x06'\x06S\x00\x00\x06\"" + // 0x06270653: 0x00000622
+ "\x06'\x06T\x00\x00\x06#" + // 0x06270654: 0x00000623
+ "\x06H\x06T\x00\x00\x06$" + // 0x06480654: 0x00000624
+ "\x06'\x06U\x00\x00\x06%" + // 0x06270655: 0x00000625
+ "\x06J\x06T\x00\x00\x06&" + // 0x064A0654: 0x00000626
+ "\x06\xd5\x06T\x00\x00\x06\xc0" + // 0x06D50654: 0x000006C0
+ "\x06\xc1\x06T\x00\x00\x06\xc2" + // 0x06C10654: 0x000006C2
+ "\x06\xd2\x06T\x00\x00\x06\xd3" + // 0x06D20654: 0x000006D3
+ "\t(\t<\x00\x00\t)" + // 0x0928093C: 0x00000929
+ "\t0\t<\x00\x00\t1" + // 0x0930093C: 0x00000931
+ "\t3\t<\x00\x00\t4" + // 0x0933093C: 0x00000934
+ "\t\xc7\t\xbe\x00\x00\t\xcb" + // 0x09C709BE: 0x000009CB
+ "\t\xc7\t\xd7\x00\x00\t\xcc" + // 0x09C709D7: 0x000009CC
+ "\vG\vV\x00\x00\vH" + // 0x0B470B56: 0x00000B48
+ "\vG\v>\x00\x00\vK" + // 0x0B470B3E: 0x00000B4B
+ "\vG\vW\x00\x00\vL" + // 0x0B470B57: 0x00000B4C
+ "\v\x92\v\xd7\x00\x00\v\x94" + // 0x0B920BD7: 0x00000B94
+ "\v\xc6\v\xbe\x00\x00\v\xca" + // 0x0BC60BBE: 0x00000BCA
+ "\v\xc7\v\xbe\x00\x00\v\xcb" + // 0x0BC70BBE: 0x00000BCB
+ "\v\xc6\v\xd7\x00\x00\v\xcc" + // 0x0BC60BD7: 0x00000BCC
+ "\fF\fV\x00\x00\fH" + // 0x0C460C56: 0x00000C48
+ "\f\xbf\f\xd5\x00\x00\f\xc0" + // 0x0CBF0CD5: 0x00000CC0
+ "\f\xc6\f\xd5\x00\x00\f\xc7" + // 0x0CC60CD5: 0x00000CC7
+ "\f\xc6\f\xd6\x00\x00\f\xc8" + // 0x0CC60CD6: 0x00000CC8
+ "\f\xc6\f\xc2\x00\x00\f\xca" + // 0x0CC60CC2: 0x00000CCA
+ "\f\xca\f\xd5\x00\x00\f\xcb" + // 0x0CCA0CD5: 0x00000CCB
+ "\rF\r>\x00\x00\rJ" + // 0x0D460D3E: 0x00000D4A
+ "\rG\r>\x00\x00\rK" + // 0x0D470D3E: 0x00000D4B
+ "\rF\rW\x00\x00\rL" + // 0x0D460D57: 0x00000D4C
+ "\r\xd9\r\xca\x00\x00\r\xda" + // 0x0DD90DCA: 0x00000DDA
+ "\r\xd9\r\xcf\x00\x00\r\xdc" + // 0x0DD90DCF: 0x00000DDC
+ "\r\xdc\r\xca\x00\x00\r\xdd" + // 0x0DDC0DCA: 0x00000DDD
+ "\r\xd9\r\xdf\x00\x00\r\xde" + // 0x0DD90DDF: 0x00000DDE
+ "\x10%\x10.\x00\x00\x10&" + // 0x1025102E: 0x00001026
+ "\x1b\x05\x1b5\x00\x00\x1b\x06" + // 0x1B051B35: 0x00001B06
+ "\x1b\a\x1b5\x00\x00\x1b\b" + // 0x1B071B35: 0x00001B08
+ "\x1b\t\x1b5\x00\x00\x1b\n" + // 0x1B091B35: 0x00001B0A
+ "\x1b\v\x1b5\x00\x00\x1b\f" + // 0x1B0B1B35: 0x00001B0C
+ "\x1b\r\x1b5\x00\x00\x1b\x0e" + // 0x1B0D1B35: 0x00001B0E
+ "\x1b\x11\x1b5\x00\x00\x1b\x12" + // 0x1B111B35: 0x00001B12
+ "\x1b:\x1b5\x00\x00\x1b;" + // 0x1B3A1B35: 0x00001B3B
+ "\x1b<\x1b5\x00\x00\x1b=" + // 0x1B3C1B35: 0x00001B3D
+ "\x1b>\x1b5\x00\x00\x1b@" + // 0x1B3E1B35: 0x00001B40
+ "\x1b?\x1b5\x00\x00\x1bA" + // 0x1B3F1B35: 0x00001B41
+ "\x1bB\x1b5\x00\x00\x1bC" + // 0x1B421B35: 0x00001B43
+ "\x00A\x03%\x00\x00\x1e\x00" + // 0x00410325: 0x00001E00
+ "\x00a\x03%\x00\x00\x1e\x01" + // 0x00610325: 0x00001E01
+ "\x00B\x03\a\x00\x00\x1e\x02" + // 0x00420307: 0x00001E02
+ "\x00b\x03\a\x00\x00\x1e\x03" + // 0x00620307: 0x00001E03
+ "\x00B\x03#\x00\x00\x1e\x04" + // 0x00420323: 0x00001E04
+ "\x00b\x03#\x00\x00\x1e\x05" + // 0x00620323: 0x00001E05
+ "\x00B\x031\x00\x00\x1e\x06" + // 0x00420331: 0x00001E06
+ "\x00b\x031\x00\x00\x1e\a" + // 0x00620331: 0x00001E07
+ "\x00\xc7\x03\x01\x00\x00\x1e\b" + // 0x00C70301: 0x00001E08
+ "\x00\xe7\x03\x01\x00\x00\x1e\t" + // 0x00E70301: 0x00001E09
+ "\x00D\x03\a\x00\x00\x1e\n" + // 0x00440307: 0x00001E0A
+ "\x00d\x03\a\x00\x00\x1e\v" + // 0x00640307: 0x00001E0B
+ "\x00D\x03#\x00\x00\x1e\f" + // 0x00440323: 0x00001E0C
+ "\x00d\x03#\x00\x00\x1e\r" + // 0x00640323: 0x00001E0D
+ "\x00D\x031\x00\x00\x1e\x0e" + // 0x00440331: 0x00001E0E
+ "\x00d\x031\x00\x00\x1e\x0f" + // 0x00640331: 0x00001E0F
+ "\x00D\x03'\x00\x00\x1e\x10" + // 0x00440327: 0x00001E10
+ "\x00d\x03'\x00\x00\x1e\x11" + // 0x00640327: 0x00001E11
+ "\x00D\x03-\x00\x00\x1e\x12" + // 0x0044032D: 0x00001E12
+ "\x00d\x03-\x00\x00\x1e\x13" + // 0x0064032D: 0x00001E13
+ "\x01\x12\x03\x00\x00\x00\x1e\x14" + // 0x01120300: 0x00001E14
+ "\x01\x13\x03\x00\x00\x00\x1e\x15" + // 0x01130300: 0x00001E15
+ "\x01\x12\x03\x01\x00\x00\x1e\x16" + // 0x01120301: 0x00001E16
+ "\x01\x13\x03\x01\x00\x00\x1e\x17" + // 0x01130301: 0x00001E17
+ "\x00E\x03-\x00\x00\x1e\x18" + // 0x0045032D: 0x00001E18
+ "\x00e\x03-\x00\x00\x1e\x19" + // 0x0065032D: 0x00001E19
+ "\x00E\x030\x00\x00\x1e\x1a" + // 0x00450330: 0x00001E1A
+ "\x00e\x030\x00\x00\x1e\x1b" + // 0x00650330: 0x00001E1B
+ "\x02(\x03\x06\x00\x00\x1e\x1c" + // 0x02280306: 0x00001E1C
+ "\x02)\x03\x06\x00\x00\x1e\x1d" + // 0x02290306: 0x00001E1D
+ "\x00F\x03\a\x00\x00\x1e\x1e" + // 0x00460307: 0x00001E1E
+ "\x00f\x03\a\x00\x00\x1e\x1f" + // 0x00660307: 0x00001E1F
+ "\x00G\x03\x04\x00\x00\x1e " + // 0x00470304: 0x00001E20
+ "\x00g\x03\x04\x00\x00\x1e!" + // 0x00670304: 0x00001E21
+ "\x00H\x03\a\x00\x00\x1e\"" + // 0x00480307: 0x00001E22
+ "\x00h\x03\a\x00\x00\x1e#" + // 0x00680307: 0x00001E23
+ "\x00H\x03#\x00\x00\x1e$" + // 0x00480323: 0x00001E24
+ "\x00h\x03#\x00\x00\x1e%" + // 0x00680323: 0x00001E25
+ "\x00H\x03\b\x00\x00\x1e&" + // 0x00480308: 0x00001E26
+ "\x00h\x03\b\x00\x00\x1e'" + // 0x00680308: 0x00001E27
+ "\x00H\x03'\x00\x00\x1e(" + // 0x00480327: 0x00001E28
+ "\x00h\x03'\x00\x00\x1e)" + // 0x00680327: 0x00001E29
+ "\x00H\x03.\x00\x00\x1e*" + // 0x0048032E: 0x00001E2A
+ "\x00h\x03.\x00\x00\x1e+" + // 0x0068032E: 0x00001E2B
+ "\x00I\x030\x00\x00\x1e," + // 0x00490330: 0x00001E2C
+ "\x00i\x030\x00\x00\x1e-" + // 0x00690330: 0x00001E2D
+ "\x00\xcf\x03\x01\x00\x00\x1e." + // 0x00CF0301: 0x00001E2E
+ "\x00\xef\x03\x01\x00\x00\x1e/" + // 0x00EF0301: 0x00001E2F
+ "\x00K\x03\x01\x00\x00\x1e0" + // 0x004B0301: 0x00001E30
+ "\x00k\x03\x01\x00\x00\x1e1" + // 0x006B0301: 0x00001E31
+ "\x00K\x03#\x00\x00\x1e2" + // 0x004B0323: 0x00001E32
+ "\x00k\x03#\x00\x00\x1e3" + // 0x006B0323: 0x00001E33
+ "\x00K\x031\x00\x00\x1e4" + // 0x004B0331: 0x00001E34
+ "\x00k\x031\x00\x00\x1e5" + // 0x006B0331: 0x00001E35
+ "\x00L\x03#\x00\x00\x1e6" + // 0x004C0323: 0x00001E36
+ "\x00l\x03#\x00\x00\x1e7" + // 0x006C0323: 0x00001E37
+ "\x1e6\x03\x04\x00\x00\x1e8" + // 0x1E360304: 0x00001E38
+ "\x1e7\x03\x04\x00\x00\x1e9" + // 0x1E370304: 0x00001E39
+ "\x00L\x031\x00\x00\x1e:" + // 0x004C0331: 0x00001E3A
+ "\x00l\x031\x00\x00\x1e;" + // 0x006C0331: 0x00001E3B
+ "\x00L\x03-\x00\x00\x1e<" + // 0x004C032D: 0x00001E3C
+ "\x00l\x03-\x00\x00\x1e=" + // 0x006C032D: 0x00001E3D
+ "\x00M\x03\x01\x00\x00\x1e>" + // 0x004D0301: 0x00001E3E
+ "\x00m\x03\x01\x00\x00\x1e?" + // 0x006D0301: 0x00001E3F
+ "\x00M\x03\a\x00\x00\x1e@" + // 0x004D0307: 0x00001E40
+ "\x00m\x03\a\x00\x00\x1eA" + // 0x006D0307: 0x00001E41
+ "\x00M\x03#\x00\x00\x1eB" + // 0x004D0323: 0x00001E42
+ "\x00m\x03#\x00\x00\x1eC" + // 0x006D0323: 0x00001E43
+ "\x00N\x03\a\x00\x00\x1eD" + // 0x004E0307: 0x00001E44
+ "\x00n\x03\a\x00\x00\x1eE" + // 0x006E0307: 0x00001E45
+ "\x00N\x03#\x00\x00\x1eF" + // 0x004E0323: 0x00001E46
+ "\x00n\x03#\x00\x00\x1eG" + // 0x006E0323: 0x00001E47
+ "\x00N\x031\x00\x00\x1eH" + // 0x004E0331: 0x00001E48
+ "\x00n\x031\x00\x00\x1eI" + // 0x006E0331: 0x00001E49
+ "\x00N\x03-\x00\x00\x1eJ" + // 0x004E032D: 0x00001E4A
+ "\x00n\x03-\x00\x00\x1eK" + // 0x006E032D: 0x00001E4B
+ "\x00\xd5\x03\x01\x00\x00\x1eL" + // 0x00D50301: 0x00001E4C
+ "\x00\xf5\x03\x01\x00\x00\x1eM" + // 0x00F50301: 0x00001E4D
+ "\x00\xd5\x03\b\x00\x00\x1eN" + // 0x00D50308: 0x00001E4E
+ "\x00\xf5\x03\b\x00\x00\x1eO" + // 0x00F50308: 0x00001E4F
+ "\x01L\x03\x00\x00\x00\x1eP" + // 0x014C0300: 0x00001E50
+ "\x01M\x03\x00\x00\x00\x1eQ" + // 0x014D0300: 0x00001E51
+ "\x01L\x03\x01\x00\x00\x1eR" + // 0x014C0301: 0x00001E52
+ "\x01M\x03\x01\x00\x00\x1eS" + // 0x014D0301: 0x00001E53
+ "\x00P\x03\x01\x00\x00\x1eT" + // 0x00500301: 0x00001E54
+ "\x00p\x03\x01\x00\x00\x1eU" + // 0x00700301: 0x00001E55
+ "\x00P\x03\a\x00\x00\x1eV" + // 0x00500307: 0x00001E56
+ "\x00p\x03\a\x00\x00\x1eW" + // 0x00700307: 0x00001E57
+ "\x00R\x03\a\x00\x00\x1eX" + // 0x00520307: 0x00001E58
+ "\x00r\x03\a\x00\x00\x1eY" + // 0x00720307: 0x00001E59
+ "\x00R\x03#\x00\x00\x1eZ" + // 0x00520323: 0x00001E5A
+ "\x00r\x03#\x00\x00\x1e[" + // 0x00720323: 0x00001E5B
+ "\x1eZ\x03\x04\x00\x00\x1e\\" + // 0x1E5A0304: 0x00001E5C
+ "\x1e[\x03\x04\x00\x00\x1e]" + // 0x1E5B0304: 0x00001E5D
+ "\x00R\x031\x00\x00\x1e^" + // 0x00520331: 0x00001E5E
+ "\x00r\x031\x00\x00\x1e_" + // 0x00720331: 0x00001E5F
+ "\x00S\x03\a\x00\x00\x1e`" + // 0x00530307: 0x00001E60
+ "\x00s\x03\a\x00\x00\x1ea" + // 0x00730307: 0x00001E61
+ "\x00S\x03#\x00\x00\x1eb" + // 0x00530323: 0x00001E62
+ "\x00s\x03#\x00\x00\x1ec" + // 0x00730323: 0x00001E63
+ "\x01Z\x03\a\x00\x00\x1ed" + // 0x015A0307: 0x00001E64
+ "\x01[\x03\a\x00\x00\x1ee" + // 0x015B0307: 0x00001E65
+ "\x01`\x03\a\x00\x00\x1ef" + // 0x01600307: 0x00001E66
+ "\x01a\x03\a\x00\x00\x1eg" + // 0x01610307: 0x00001E67
+ "\x1eb\x03\a\x00\x00\x1eh" + // 0x1E620307: 0x00001E68
+ "\x1ec\x03\a\x00\x00\x1ei" + // 0x1E630307: 0x00001E69
+ "\x00T\x03\a\x00\x00\x1ej" + // 0x00540307: 0x00001E6A
+ "\x00t\x03\a\x00\x00\x1ek" + // 0x00740307: 0x00001E6B
+ "\x00T\x03#\x00\x00\x1el" + // 0x00540323: 0x00001E6C
+ "\x00t\x03#\x00\x00\x1em" + // 0x00740323: 0x00001E6D
+ "\x00T\x031\x00\x00\x1en" + // 0x00540331: 0x00001E6E
+ "\x00t\x031\x00\x00\x1eo" + // 0x00740331: 0x00001E6F
+ "\x00T\x03-\x00\x00\x1ep" + // 0x0054032D: 0x00001E70
+ "\x00t\x03-\x00\x00\x1eq" + // 0x0074032D: 0x00001E71
+ "\x00U\x03$\x00\x00\x1er" + // 0x00550324: 0x00001E72
+ "\x00u\x03$\x00\x00\x1es" + // 0x00750324: 0x00001E73
+ "\x00U\x030\x00\x00\x1et" + // 0x00550330: 0x00001E74
+ "\x00u\x030\x00\x00\x1eu" + // 0x00750330: 0x00001E75
+ "\x00U\x03-\x00\x00\x1ev" + // 0x0055032D: 0x00001E76
+ "\x00u\x03-\x00\x00\x1ew" + // 0x0075032D: 0x00001E77
+ "\x01h\x03\x01\x00\x00\x1ex" + // 0x01680301: 0x00001E78
+ "\x01i\x03\x01\x00\x00\x1ey" + // 0x01690301: 0x00001E79
+ "\x01j\x03\b\x00\x00\x1ez" + // 0x016A0308: 0x00001E7A
+ "\x01k\x03\b\x00\x00\x1e{" + // 0x016B0308: 0x00001E7B
+ "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C
+ "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D
+ "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E
+ "\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F
+ "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80
+ "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81
+ "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82
+ "\x00w\x03\x01\x00\x00\x1e\x83" + // 0x00770301: 0x00001E83
+ "\x00W\x03\b\x00\x00\x1e\x84" + // 0x00570308: 0x00001E84
+ "\x00w\x03\b\x00\x00\x1e\x85" + // 0x00770308: 0x00001E85
+ "\x00W\x03\a\x00\x00\x1e\x86" + // 0x00570307: 0x00001E86
+ "\x00w\x03\a\x00\x00\x1e\x87" + // 0x00770307: 0x00001E87
+ "\x00W\x03#\x00\x00\x1e\x88" + // 0x00570323: 0x00001E88
+ "\x00w\x03#\x00\x00\x1e\x89" + // 0x00770323: 0x00001E89
+ "\x00X\x03\a\x00\x00\x1e\x8a" + // 0x00580307: 0x00001E8A
+ "\x00x\x03\a\x00\x00\x1e\x8b" + // 0x00780307: 0x00001E8B
+ "\x00X\x03\b\x00\x00\x1e\x8c" + // 0x00580308: 0x00001E8C
+ "\x00x\x03\b\x00\x00\x1e\x8d" + // 0x00780308: 0x00001E8D
+ "\x00Y\x03\a\x00\x00\x1e\x8e" + // 0x00590307: 0x00001E8E
+ "\x00y\x03\a\x00\x00\x1e\x8f" + // 0x00790307: 0x00001E8F
+ "\x00Z\x03\x02\x00\x00\x1e\x90" + // 0x005A0302: 0x00001E90
+ "\x00z\x03\x02\x00\x00\x1e\x91" + // 0x007A0302: 0x00001E91
+ "\x00Z\x03#\x00\x00\x1e\x92" + // 0x005A0323: 0x00001E92
+ "\x00z\x03#\x00\x00\x1e\x93" + // 0x007A0323: 0x00001E93
+ "\x00Z\x031\x00\x00\x1e\x94" + // 0x005A0331: 0x00001E94
+ "\x00z\x031\x00\x00\x1e\x95" + // 0x007A0331: 0x00001E95
+ "\x00h\x031\x00\x00\x1e\x96" + // 0x00680331: 0x00001E96
+ "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97
+ "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98
+ "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99
+ "\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B
+ "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0
+ "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1
+ "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2
+ "\x00a\x03\t\x00\x00\x1e\xa3" + // 0x00610309: 0x00001EA3
+ "\x00\xc2\x03\x01\x00\x00\x1e\xa4" + // 0x00C20301: 0x00001EA4
+ "\x00\xe2\x03\x01\x00\x00\x1e\xa5" + // 0x00E20301: 0x00001EA5
+ "\x00\xc2\x03\x00\x00\x00\x1e\xa6" + // 0x00C20300: 0x00001EA6
+ "\x00\xe2\x03\x00\x00\x00\x1e\xa7" + // 0x00E20300: 0x00001EA7
+ "\x00\xc2\x03\t\x00\x00\x1e\xa8" + // 0x00C20309: 0x00001EA8
+ "\x00\xe2\x03\t\x00\x00\x1e\xa9" + // 0x00E20309: 0x00001EA9
+ "\x00\xc2\x03\x03\x00\x00\x1e\xaa" + // 0x00C20303: 0x00001EAA
+ "\x00\xe2\x03\x03\x00\x00\x1e\xab" + // 0x00E20303: 0x00001EAB
+ "\x1e\xa0\x03\x02\x00\x00\x1e\xac" + // 0x1EA00302: 0x00001EAC
+ "\x1e\xa1\x03\x02\x00\x00\x1e\xad" + // 0x1EA10302: 0x00001EAD
+ "\x01\x02\x03\x01\x00\x00\x1e\xae" + // 0x01020301: 0x00001EAE
+ "\x01\x03\x03\x01\x00\x00\x1e\xaf" + // 0x01030301: 0x00001EAF
+ "\x01\x02\x03\x00\x00\x00\x1e\xb0" + // 0x01020300: 0x00001EB0
+ "\x01\x03\x03\x00\x00\x00\x1e\xb1" + // 0x01030300: 0x00001EB1
+ "\x01\x02\x03\t\x00\x00\x1e\xb2" + // 0x01020309: 0x00001EB2
+ "\x01\x03\x03\t\x00\x00\x1e\xb3" + // 0x01030309: 0x00001EB3
+ "\x01\x02\x03\x03\x00\x00\x1e\xb4" + // 0x01020303: 0x00001EB4
+ "\x01\x03\x03\x03\x00\x00\x1e\xb5" + // 0x01030303: 0x00001EB5
+ "\x1e\xa0\x03\x06\x00\x00\x1e\xb6" + // 0x1EA00306: 0x00001EB6
+ "\x1e\xa1\x03\x06\x00\x00\x1e\xb7" + // 0x1EA10306: 0x00001EB7
+ "\x00E\x03#\x00\x00\x1e\xb8" + // 0x00450323: 0x00001EB8
+ "\x00e\x03#\x00\x00\x1e\xb9" + // 0x00650323: 0x00001EB9
+ "\x00E\x03\t\x00\x00\x1e\xba" + // 0x00450309: 0x00001EBA
+ "\x00e\x03\t\x00\x00\x1e\xbb" + // 0x00650309: 0x00001EBB
+ "\x00E\x03\x03\x00\x00\x1e\xbc" + // 0x00450303: 0x00001EBC
+ "\x00e\x03\x03\x00\x00\x1e\xbd" + // 0x00650303: 0x00001EBD
+ "\x00\xca\x03\x01\x00\x00\x1e\xbe" + // 0x00CA0301: 0x00001EBE
+ "\x00\xea\x03\x01\x00\x00\x1e\xbf" + // 0x00EA0301: 0x00001EBF
+ "\x00\xca\x03\x00\x00\x00\x1e\xc0" + // 0x00CA0300: 0x00001EC0
+ "\x00\xea\x03\x00\x00\x00\x1e\xc1" + // 0x00EA0300: 0x00001EC1
+ "\x00\xca\x03\t\x00\x00\x1e\xc2" + // 0x00CA0309: 0x00001EC2
+ "\x00\xea\x03\t\x00\x00\x1e\xc3" + // 0x00EA0309: 0x00001EC3
+ "\x00\xca\x03\x03\x00\x00\x1e\xc4" + // 0x00CA0303: 0x00001EC4
+ "\x00\xea\x03\x03\x00\x00\x1e\xc5" + // 0x00EA0303: 0x00001EC5
+ "\x1e\xb8\x03\x02\x00\x00\x1e\xc6" + // 0x1EB80302: 0x00001EC6
+ "\x1e\xb9\x03\x02\x00\x00\x1e\xc7" + // 0x1EB90302: 0x00001EC7
+ "\x00I\x03\t\x00\x00\x1e\xc8" + // 0x00490309: 0x00001EC8
+ "\x00i\x03\t\x00\x00\x1e\xc9" + // 0x00690309: 0x00001EC9
+ "\x00I\x03#\x00\x00\x1e\xca" + // 0x00490323: 0x00001ECA
+ "\x00i\x03#\x00\x00\x1e\xcb" + // 0x00690323: 0x00001ECB
+ "\x00O\x03#\x00\x00\x1e\xcc" + // 0x004F0323: 0x00001ECC
+ "\x00o\x03#\x00\x00\x1e\xcd" + // 0x006F0323: 0x00001ECD
+ "\x00O\x03\t\x00\x00\x1e\xce" + // 0x004F0309: 0x00001ECE
+ "\x00o\x03\t\x00\x00\x1e\xcf" + // 0x006F0309: 0x00001ECF
+ "\x00\xd4\x03\x01\x00\x00\x1e\xd0" + // 0x00D40301: 0x00001ED0
+ "\x00\xf4\x03\x01\x00\x00\x1e\xd1" + // 0x00F40301: 0x00001ED1
+ "\x00\xd4\x03\x00\x00\x00\x1e\xd2" + // 0x00D40300: 0x00001ED2
+ "\x00\xf4\x03\x00\x00\x00\x1e\xd3" + // 0x00F40300: 0x00001ED3
+ "\x00\xd4\x03\t\x00\x00\x1e\xd4" + // 0x00D40309: 0x00001ED4
+ "\x00\xf4\x03\t\x00\x00\x1e\xd5" + // 0x00F40309: 0x00001ED5
+ "\x00\xd4\x03\x03\x00\x00\x1e\xd6" + // 0x00D40303: 0x00001ED6
+ "\x00\xf4\x03\x03\x00\x00\x1e\xd7" + // 0x00F40303: 0x00001ED7
+ "\x1e\xcc\x03\x02\x00\x00\x1e\xd8" + // 0x1ECC0302: 0x00001ED8
+ "\x1e\xcd\x03\x02\x00\x00\x1e\xd9" + // 0x1ECD0302: 0x00001ED9
+ "\x01\xa0\x03\x01\x00\x00\x1e\xda" + // 0x01A00301: 0x00001EDA
+ "\x01\xa1\x03\x01\x00\x00\x1e\xdb" + // 0x01A10301: 0x00001EDB
+ "\x01\xa0\x03\x00\x00\x00\x1e\xdc" + // 0x01A00300: 0x00001EDC
+ "\x01\xa1\x03\x00\x00\x00\x1e\xdd" + // 0x01A10300: 0x00001EDD
+ "\x01\xa0\x03\t\x00\x00\x1e\xde" + // 0x01A00309: 0x00001EDE
+ "\x01\xa1\x03\t\x00\x00\x1e\xdf" + // 0x01A10309: 0x00001EDF
+ "\x01\xa0\x03\x03\x00\x00\x1e\xe0" + // 0x01A00303: 0x00001EE0
+ "\x01\xa1\x03\x03\x00\x00\x1e\xe1" + // 0x01A10303: 0x00001EE1
+ "\x01\xa0\x03#\x00\x00\x1e\xe2" + // 0x01A00323: 0x00001EE2
+ "\x01\xa1\x03#\x00\x00\x1e\xe3" + // 0x01A10323: 0x00001EE3
+ "\x00U\x03#\x00\x00\x1e\xe4" + // 0x00550323: 0x00001EE4
+ "\x00u\x03#\x00\x00\x1e\xe5" + // 0x00750323: 0x00001EE5
+ "\x00U\x03\t\x00\x00\x1e\xe6" + // 0x00550309: 0x00001EE6
+ "\x00u\x03\t\x00\x00\x1e\xe7" + // 0x00750309: 0x00001EE7
+ "\x01\xaf\x03\x01\x00\x00\x1e\xe8" + // 0x01AF0301: 0x00001EE8
+ "\x01\xb0\x03\x01\x00\x00\x1e\xe9" + // 0x01B00301: 0x00001EE9
+ "\x01\xaf\x03\x00\x00\x00\x1e\xea" + // 0x01AF0300: 0x00001EEA
+ "\x01\xb0\x03\x00\x00\x00\x1e\xeb" + // 0x01B00300: 0x00001EEB
+ "\x01\xaf\x03\t\x00\x00\x1e\xec" + // 0x01AF0309: 0x00001EEC
+ "\x01\xb0\x03\t\x00\x00\x1e\xed" + // 0x01B00309: 0x00001EED
+ "\x01\xaf\x03\x03\x00\x00\x1e\xee" + // 0x01AF0303: 0x00001EEE
+ "\x01\xb0\x03\x03\x00\x00\x1e\xef" + // 0x01B00303: 0x00001EEF
+ "\x01\xaf\x03#\x00\x00\x1e\xf0" + // 0x01AF0323: 0x00001EF0
+ "\x01\xb0\x03#\x00\x00\x1e\xf1" + // 0x01B00323: 0x00001EF1
+ "\x00Y\x03\x00\x00\x00\x1e\xf2" + // 0x00590300: 0x00001EF2
+ "\x00y\x03\x00\x00\x00\x1e\xf3" + // 0x00790300: 0x00001EF3
+ "\x00Y\x03#\x00\x00\x1e\xf4" + // 0x00590323: 0x00001EF4
+ "\x00y\x03#\x00\x00\x1e\xf5" + // 0x00790323: 0x00001EF5
+ "\x00Y\x03\t\x00\x00\x1e\xf6" + // 0x00590309: 0x00001EF6
+ "\x00y\x03\t\x00\x00\x1e\xf7" + // 0x00790309: 0x00001EF7
+ "\x00Y\x03\x03\x00\x00\x1e\xf8" + // 0x00590303: 0x00001EF8
+ "\x00y\x03\x03\x00\x00\x1e\xf9" + // 0x00790303: 0x00001EF9
+ "\x03\xb1\x03\x13\x00\x00\x1f\x00" + // 0x03B10313: 0x00001F00
+ "\x03\xb1\x03\x14\x00\x00\x1f\x01" + // 0x03B10314: 0x00001F01
+ "\x1f\x00\x03\x00\x00\x00\x1f\x02" + // 0x1F000300: 0x00001F02
+ "\x1f\x01\x03\x00\x00\x00\x1f\x03" + // 0x1F010300: 0x00001F03
+ "\x1f\x00\x03\x01\x00\x00\x1f\x04" + // 0x1F000301: 0x00001F04
+ "\x1f\x01\x03\x01\x00\x00\x1f\x05" + // 0x1F010301: 0x00001F05
+ "\x1f\x00\x03B\x00\x00\x1f\x06" + // 0x1F000342: 0x00001F06
+ "\x1f\x01\x03B\x00\x00\x1f\a" + // 0x1F010342: 0x00001F07
+ "\x03\x91\x03\x13\x00\x00\x1f\b" + // 0x03910313: 0x00001F08
+ "\x03\x91\x03\x14\x00\x00\x1f\t" + // 0x03910314: 0x00001F09
+ "\x1f\b\x03\x00\x00\x00\x1f\n" + // 0x1F080300: 0x00001F0A
+ "\x1f\t\x03\x00\x00\x00\x1f\v" + // 0x1F090300: 0x00001F0B
+ "\x1f\b\x03\x01\x00\x00\x1f\f" + // 0x1F080301: 0x00001F0C
+ "\x1f\t\x03\x01\x00\x00\x1f\r" + // 0x1F090301: 0x00001F0D
+ "\x1f\b\x03B\x00\x00\x1f\x0e" + // 0x1F080342: 0x00001F0E
+ "\x1f\t\x03B\x00\x00\x1f\x0f" + // 0x1F090342: 0x00001F0F
+ "\x03\xb5\x03\x13\x00\x00\x1f\x10" + // 0x03B50313: 0x00001F10
+ "\x03\xb5\x03\x14\x00\x00\x1f\x11" + // 0x03B50314: 0x00001F11
+ "\x1f\x10\x03\x00\x00\x00\x1f\x12" + // 0x1F100300: 0x00001F12
+ "\x1f\x11\x03\x00\x00\x00\x1f\x13" + // 0x1F110300: 0x00001F13
+ "\x1f\x10\x03\x01\x00\x00\x1f\x14" + // 0x1F100301: 0x00001F14
+ "\x1f\x11\x03\x01\x00\x00\x1f\x15" + // 0x1F110301: 0x00001F15
+ "\x03\x95\x03\x13\x00\x00\x1f\x18" + // 0x03950313: 0x00001F18
+ "\x03\x95\x03\x14\x00\x00\x1f\x19" + // 0x03950314: 0x00001F19
+ "\x1f\x18\x03\x00\x00\x00\x1f\x1a" + // 0x1F180300: 0x00001F1A
+ "\x1f\x19\x03\x00\x00\x00\x1f\x1b" + // 0x1F190300: 0x00001F1B
+ "\x1f\x18\x03\x01\x00\x00\x1f\x1c" + // 0x1F180301: 0x00001F1C
+ "\x1f\x19\x03\x01\x00\x00\x1f\x1d" + // 0x1F190301: 0x00001F1D
+ "\x03\xb7\x03\x13\x00\x00\x1f " + // 0x03B70313: 0x00001F20
+ "\x03\xb7\x03\x14\x00\x00\x1f!" + // 0x03B70314: 0x00001F21
+ "\x1f \x03\x00\x00\x00\x1f\"" + // 0x1F200300: 0x00001F22
+ "\x1f!\x03\x00\x00\x00\x1f#" + // 0x1F210300: 0x00001F23
+ "\x1f \x03\x01\x00\x00\x1f$" + // 0x1F200301: 0x00001F24
+ "\x1f!\x03\x01\x00\x00\x1f%" + // 0x1F210301: 0x00001F25
+ "\x1f \x03B\x00\x00\x1f&" + // 0x1F200342: 0x00001F26
+ "\x1f!\x03B\x00\x00\x1f'" + // 0x1F210342: 0x00001F27
+ "\x03\x97\x03\x13\x00\x00\x1f(" + // 0x03970313: 0x00001F28
+ "\x03\x97\x03\x14\x00\x00\x1f)" + // 0x03970314: 0x00001F29
+ "\x1f(\x03\x00\x00\x00\x1f*" + // 0x1F280300: 0x00001F2A
+ "\x1f)\x03\x00\x00\x00\x1f+" + // 0x1F290300: 0x00001F2B
+ "\x1f(\x03\x01\x00\x00\x1f," + // 0x1F280301: 0x00001F2C
+ "\x1f)\x03\x01\x00\x00\x1f-" + // 0x1F290301: 0x00001F2D
+ "\x1f(\x03B\x00\x00\x1f." + // 0x1F280342: 0x00001F2E
+ "\x1f)\x03B\x00\x00\x1f/" + // 0x1F290342: 0x00001F2F
+ "\x03\xb9\x03\x13\x00\x00\x1f0" + // 0x03B90313: 0x00001F30
+ "\x03\xb9\x03\x14\x00\x00\x1f1" + // 0x03B90314: 0x00001F31
+ "\x1f0\x03\x00\x00\x00\x1f2" + // 0x1F300300: 0x00001F32
+ "\x1f1\x03\x00\x00\x00\x1f3" + // 0x1F310300: 0x00001F33
+ "\x1f0\x03\x01\x00\x00\x1f4" + // 0x1F300301: 0x00001F34
+ "\x1f1\x03\x01\x00\x00\x1f5" + // 0x1F310301: 0x00001F35
+ "\x1f0\x03B\x00\x00\x1f6" + // 0x1F300342: 0x00001F36
+ "\x1f1\x03B\x00\x00\x1f7" + // 0x1F310342: 0x00001F37
+ "\x03\x99\x03\x13\x00\x00\x1f8" + // 0x03990313: 0x00001F38
+ "\x03\x99\x03\x14\x00\x00\x1f9" + // 0x03990314: 0x00001F39
+ "\x1f8\x03\x00\x00\x00\x1f:" + // 0x1F380300: 0x00001F3A
+ "\x1f9\x03\x00\x00\x00\x1f;" + // 0x1F390300: 0x00001F3B
+ "\x1f8\x03\x01\x00\x00\x1f<" + // 0x1F380301: 0x00001F3C
+ "\x1f9\x03\x01\x00\x00\x1f=" + // 0x1F390301: 0x00001F3D
+ "\x1f8\x03B\x00\x00\x1f>" + // 0x1F380342: 0x00001F3E
+ "\x1f9\x03B\x00\x00\x1f?" + // 0x1F390342: 0x00001F3F
+ "\x03\xbf\x03\x13\x00\x00\x1f@" + // 0x03BF0313: 0x00001F40
+ "\x03\xbf\x03\x14\x00\x00\x1fA" + // 0x03BF0314: 0x00001F41
+ "\x1f@\x03\x00\x00\x00\x1fB" + // 0x1F400300: 0x00001F42
+ "\x1fA\x03\x00\x00\x00\x1fC" + // 0x1F410300: 0x00001F43
+ "\x1f@\x03\x01\x00\x00\x1fD" + // 0x1F400301: 0x00001F44
+ "\x1fA\x03\x01\x00\x00\x1fE" + // 0x1F410301: 0x00001F45
+ "\x03\x9f\x03\x13\x00\x00\x1fH" + // 0x039F0313: 0x00001F48
+ "\x03\x9f\x03\x14\x00\x00\x1fI" + // 0x039F0314: 0x00001F49
+ "\x1fH\x03\x00\x00\x00\x1fJ" + // 0x1F480300: 0x00001F4A
+ "\x1fI\x03\x00\x00\x00\x1fK" + // 0x1F490300: 0x00001F4B
+ "\x1fH\x03\x01\x00\x00\x1fL" + // 0x1F480301: 0x00001F4C
+ "\x1fI\x03\x01\x00\x00\x1fM" + // 0x1F490301: 0x00001F4D
+ "\x03\xc5\x03\x13\x00\x00\x1fP" + // 0x03C50313: 0x00001F50
+ "\x03\xc5\x03\x14\x00\x00\x1fQ" + // 0x03C50314: 0x00001F51
+ "\x1fP\x03\x00\x00\x00\x1fR" + // 0x1F500300: 0x00001F52
+ "\x1fQ\x03\x00\x00\x00\x1fS" + // 0x1F510300: 0x00001F53
+ "\x1fP\x03\x01\x00\x00\x1fT" + // 0x1F500301: 0x00001F54
+ "\x1fQ\x03\x01\x00\x00\x1fU" + // 0x1F510301: 0x00001F55
+ "\x1fP\x03B\x00\x00\x1fV" + // 0x1F500342: 0x00001F56
+ "\x1fQ\x03B\x00\x00\x1fW" + // 0x1F510342: 0x00001F57
+ "\x03\xa5\x03\x14\x00\x00\x1fY" + // 0x03A50314: 0x00001F59
+ "\x1fY\x03\x00\x00\x00\x1f[" + // 0x1F590300: 0x00001F5B
+ "\x1fY\x03\x01\x00\x00\x1f]" + // 0x1F590301: 0x00001F5D
+ "\x1fY\x03B\x00\x00\x1f_" + // 0x1F590342: 0x00001F5F
+ "\x03\xc9\x03\x13\x00\x00\x1f`" + // 0x03C90313: 0x00001F60
+ "\x03\xc9\x03\x14\x00\x00\x1fa" + // 0x03C90314: 0x00001F61
+ "\x1f`\x03\x00\x00\x00\x1fb" + // 0x1F600300: 0x00001F62
+ "\x1fa\x03\x00\x00\x00\x1fc" + // 0x1F610300: 0x00001F63
+ "\x1f`\x03\x01\x00\x00\x1fd" + // 0x1F600301: 0x00001F64
+ "\x1fa\x03\x01\x00\x00\x1fe" + // 0x1F610301: 0x00001F65
+ "\x1f`\x03B\x00\x00\x1ff" + // 0x1F600342: 0x00001F66
+ "\x1fa\x03B\x00\x00\x1fg" + // 0x1F610342: 0x00001F67
+ "\x03\xa9\x03\x13\x00\x00\x1fh" + // 0x03A90313: 0x00001F68
+ "\x03\xa9\x03\x14\x00\x00\x1fi" + // 0x03A90314: 0x00001F69
+ "\x1fh\x03\x00\x00\x00\x1fj" + // 0x1F680300: 0x00001F6A
+ "\x1fi\x03\x00\x00\x00\x1fk" + // 0x1F690300: 0x00001F6B
+ "\x1fh\x03\x01\x00\x00\x1fl" + // 0x1F680301: 0x00001F6C
+ "\x1fi\x03\x01\x00\x00\x1fm" + // 0x1F690301: 0x00001F6D
+ "\x1fh\x03B\x00\x00\x1fn" + // 0x1F680342: 0x00001F6E
+ "\x1fi\x03B\x00\x00\x1fo" + // 0x1F690342: 0x00001F6F
+ "\x03\xb1\x03\x00\x00\x00\x1fp" + // 0x03B10300: 0x00001F70
+ "\x03\xb5\x03\x00\x00\x00\x1fr" + // 0x03B50300: 0x00001F72
+ "\x03\xb7\x03\x00\x00\x00\x1ft" + // 0x03B70300: 0x00001F74
+ "\x03\xb9\x03\x00\x00\x00\x1fv" + // 0x03B90300: 0x00001F76
+ "\x03\xbf\x03\x00\x00\x00\x1fx" + // 0x03BF0300: 0x00001F78
+ "\x03\xc5\x03\x00\x00\x00\x1fz" + // 0x03C50300: 0x00001F7A
+ "\x03\xc9\x03\x00\x00\x00\x1f|" + // 0x03C90300: 0x00001F7C
+ "\x1f\x00\x03E\x00\x00\x1f\x80" + // 0x1F000345: 0x00001F80
+ "\x1f\x01\x03E\x00\x00\x1f\x81" + // 0x1F010345: 0x00001F81
+ "\x1f\x02\x03E\x00\x00\x1f\x82" + // 0x1F020345: 0x00001F82
+ "\x1f\x03\x03E\x00\x00\x1f\x83" + // 0x1F030345: 0x00001F83
+ "\x1f\x04\x03E\x00\x00\x1f\x84" + // 0x1F040345: 0x00001F84
+ "\x1f\x05\x03E\x00\x00\x1f\x85" + // 0x1F050345: 0x00001F85
+ "\x1f\x06\x03E\x00\x00\x1f\x86" + // 0x1F060345: 0x00001F86
+ "\x1f\a\x03E\x00\x00\x1f\x87" + // 0x1F070345: 0x00001F87
+ "\x1f\b\x03E\x00\x00\x1f\x88" + // 0x1F080345: 0x00001F88
+ "\x1f\t\x03E\x00\x00\x1f\x89" + // 0x1F090345: 0x00001F89
+ "\x1f\n\x03E\x00\x00\x1f\x8a" + // 0x1F0A0345: 0x00001F8A
+ "\x1f\v\x03E\x00\x00\x1f\x8b" + // 0x1F0B0345: 0x00001F8B
+ "\x1f\f\x03E\x00\x00\x1f\x8c" + // 0x1F0C0345: 0x00001F8C
+ "\x1f\r\x03E\x00\x00\x1f\x8d" + // 0x1F0D0345: 0x00001F8D
+ "\x1f\x0e\x03E\x00\x00\x1f\x8e" + // 0x1F0E0345: 0x00001F8E
+ "\x1f\x0f\x03E\x00\x00\x1f\x8f" + // 0x1F0F0345: 0x00001F8F
+ "\x1f \x03E\x00\x00\x1f\x90" + // 0x1F200345: 0x00001F90
+ "\x1f!\x03E\x00\x00\x1f\x91" + // 0x1F210345: 0x00001F91
+ "\x1f\"\x03E\x00\x00\x1f\x92" + // 0x1F220345: 0x00001F92
+ "\x1f#\x03E\x00\x00\x1f\x93" + // 0x1F230345: 0x00001F93
+ "\x1f$\x03E\x00\x00\x1f\x94" + // 0x1F240345: 0x00001F94
+ "\x1f%\x03E\x00\x00\x1f\x95" + // 0x1F250345: 0x00001F95
+ "\x1f&\x03E\x00\x00\x1f\x96" + // 0x1F260345: 0x00001F96
+ "\x1f'\x03E\x00\x00\x1f\x97" + // 0x1F270345: 0x00001F97
+ "\x1f(\x03E\x00\x00\x1f\x98" + // 0x1F280345: 0x00001F98
+ "\x1f)\x03E\x00\x00\x1f\x99" + // 0x1F290345: 0x00001F99
+ "\x1f*\x03E\x00\x00\x1f\x9a" + // 0x1F2A0345: 0x00001F9A
+ "\x1f+\x03E\x00\x00\x1f\x9b" + // 0x1F2B0345: 0x00001F9B
+ "\x1f,\x03E\x00\x00\x1f\x9c" + // 0x1F2C0345: 0x00001F9C
+ "\x1f-\x03E\x00\x00\x1f\x9d" + // 0x1F2D0345: 0x00001F9D
+ "\x1f.\x03E\x00\x00\x1f\x9e" + // 0x1F2E0345: 0x00001F9E
+ "\x1f/\x03E\x00\x00\x1f\x9f" + // 0x1F2F0345: 0x00001F9F
+ "\x1f`\x03E\x00\x00\x1f\xa0" + // 0x1F600345: 0x00001FA0
+ "\x1fa\x03E\x00\x00\x1f\xa1" + // 0x1F610345: 0x00001FA1
+ "\x1fb\x03E\x00\x00\x1f\xa2" + // 0x1F620345: 0x00001FA2
+ "\x1fc\x03E\x00\x00\x1f\xa3" + // 0x1F630345: 0x00001FA3
+ "\x1fd\x03E\x00\x00\x1f\xa4" + // 0x1F640345: 0x00001FA4
+ "\x1fe\x03E\x00\x00\x1f\xa5" + // 0x1F650345: 0x00001FA5
+ "\x1ff\x03E\x00\x00\x1f\xa6" + // 0x1F660345: 0x00001FA6
+ "\x1fg\x03E\x00\x00\x1f\xa7" + // 0x1F670345: 0x00001FA7
+ "\x1fh\x03E\x00\x00\x1f\xa8" + // 0x1F680345: 0x00001FA8
+ "\x1fi\x03E\x00\x00\x1f\xa9" + // 0x1F690345: 0x00001FA9
+ "\x1fj\x03E\x00\x00\x1f\xaa" + // 0x1F6A0345: 0x00001FAA
+ "\x1fk\x03E\x00\x00\x1f\xab" + // 0x1F6B0345: 0x00001FAB
+ "\x1fl\x03E\x00\x00\x1f\xac" + // 0x1F6C0345: 0x00001FAC
+ "\x1fm\x03E\x00\x00\x1f\xad" + // 0x1F6D0345: 0x00001FAD
+ "\x1fn\x03E\x00\x00\x1f\xae" + // 0x1F6E0345: 0x00001FAE
+ "\x1fo\x03E\x00\x00\x1f\xaf" + // 0x1F6F0345: 0x00001FAF
+ "\x03\xb1\x03\x06\x00\x00\x1f\xb0" + // 0x03B10306: 0x00001FB0
+ "\x03\xb1\x03\x04\x00\x00\x1f\xb1" + // 0x03B10304: 0x00001FB1
+ "\x1fp\x03E\x00\x00\x1f\xb2" + // 0x1F700345: 0x00001FB2
+ "\x03\xb1\x03E\x00\x00\x1f\xb3" + // 0x03B10345: 0x00001FB3
+ "\x03\xac\x03E\x00\x00\x1f\xb4" + // 0x03AC0345: 0x00001FB4
+ "\x03\xb1\x03B\x00\x00\x1f\xb6" + // 0x03B10342: 0x00001FB6
+ "\x1f\xb6\x03E\x00\x00\x1f\xb7" + // 0x1FB60345: 0x00001FB7
+ "\x03\x91\x03\x06\x00\x00\x1f\xb8" + // 0x03910306: 0x00001FB8
+ "\x03\x91\x03\x04\x00\x00\x1f\xb9" + // 0x03910304: 0x00001FB9
+ "\x03\x91\x03\x00\x00\x00\x1f\xba" + // 0x03910300: 0x00001FBA
+ "\x03\x91\x03E\x00\x00\x1f\xbc" + // 0x03910345: 0x00001FBC
+ "\x00\xa8\x03B\x00\x00\x1f\xc1" + // 0x00A80342: 0x00001FC1
+ "\x1ft\x03E\x00\x00\x1f\xc2" + // 0x1F740345: 0x00001FC2
+ "\x03\xb7\x03E\x00\x00\x1f\xc3" + // 0x03B70345: 0x00001FC3
+ "\x03\xae\x03E\x00\x00\x1f\xc4" + // 0x03AE0345: 0x00001FC4
+ "\x03\xb7\x03B\x00\x00\x1f\xc6" + // 0x03B70342: 0x00001FC6
+ "\x1f\xc6\x03E\x00\x00\x1f\xc7" + // 0x1FC60345: 0x00001FC7
+ "\x03\x95\x03\x00\x00\x00\x1f\xc8" + // 0x03950300: 0x00001FC8
+ "\x03\x97\x03\x00\x00\x00\x1f\xca" + // 0x03970300: 0x00001FCA
+ "\x03\x97\x03E\x00\x00\x1f\xcc" + // 0x03970345: 0x00001FCC
+ "\x1f\xbf\x03\x00\x00\x00\x1f\xcd" + // 0x1FBF0300: 0x00001FCD
+ "\x1f\xbf\x03\x01\x00\x00\x1f\xce" + // 0x1FBF0301: 0x00001FCE
+ "\x1f\xbf\x03B\x00\x00\x1f\xcf" + // 0x1FBF0342: 0x00001FCF
+ "\x03\xb9\x03\x06\x00\x00\x1f\xd0" + // 0x03B90306: 0x00001FD0
+ "\x03\xb9\x03\x04\x00\x00\x1f\xd1" + // 0x03B90304: 0x00001FD1
+ "\x03\xca\x03\x00\x00\x00\x1f\xd2" + // 0x03CA0300: 0x00001FD2
+ "\x03\xb9\x03B\x00\x00\x1f\xd6" + // 0x03B90342: 0x00001FD6
+ "\x03\xca\x03B\x00\x00\x1f\xd7" + // 0x03CA0342: 0x00001FD7
+ "\x03\x99\x03\x06\x00\x00\x1f\xd8" + // 0x03990306: 0x00001FD8
+ "\x03\x99\x03\x04\x00\x00\x1f\xd9" + // 0x03990304: 0x00001FD9
+ "\x03\x99\x03\x00\x00\x00\x1f\xda" + // 0x03990300: 0x00001FDA
+ "\x1f\xfe\x03\x00\x00\x00\x1f\xdd" + // 0x1FFE0300: 0x00001FDD
+ "\x1f\xfe\x03\x01\x00\x00\x1f\xde" + // 0x1FFE0301: 0x00001FDE
+ "\x1f\xfe\x03B\x00\x00\x1f\xdf" + // 0x1FFE0342: 0x00001FDF
+ "\x03\xc5\x03\x06\x00\x00\x1f\xe0" + // 0x03C50306: 0x00001FE0
+ "\x03\xc5\x03\x04\x00\x00\x1f\xe1" + // 0x03C50304: 0x00001FE1
+ "\x03\xcb\x03\x00\x00\x00\x1f\xe2" + // 0x03CB0300: 0x00001FE2
+ "\x03\xc1\x03\x13\x00\x00\x1f\xe4" + // 0x03C10313: 0x00001FE4
+ "\x03\xc1\x03\x14\x00\x00\x1f\xe5" + // 0x03C10314: 0x00001FE5
+ "\x03\xc5\x03B\x00\x00\x1f\xe6" + // 0x03C50342: 0x00001FE6
+ "\x03\xcb\x03B\x00\x00\x1f\xe7" + // 0x03CB0342: 0x00001FE7
+ "\x03\xa5\x03\x06\x00\x00\x1f\xe8" + // 0x03A50306: 0x00001FE8
+ "\x03\xa5\x03\x04\x00\x00\x1f\xe9" + // 0x03A50304: 0x00001FE9
+ "\x03\xa5\x03\x00\x00\x00\x1f\xea" + // 0x03A50300: 0x00001FEA
+ "\x03\xa1\x03\x14\x00\x00\x1f\xec" + // 0x03A10314: 0x00001FEC
+ "\x00\xa8\x03\x00\x00\x00\x1f\xed" + // 0x00A80300: 0x00001FED
+ "\x1f|\x03E\x00\x00\x1f\xf2" + // 0x1F7C0345: 0x00001FF2
+ "\x03\xc9\x03E\x00\x00\x1f\xf3" + // 0x03C90345: 0x00001FF3
+ "\x03\xce\x03E\x00\x00\x1f\xf4" + // 0x03CE0345: 0x00001FF4
+ "\x03\xc9\x03B\x00\x00\x1f\xf6" + // 0x03C90342: 0x00001FF6
+ "\x1f\xf6\x03E\x00\x00\x1f\xf7" + // 0x1FF60345: 0x00001FF7
+ "\x03\x9f\x03\x00\x00\x00\x1f\xf8" + // 0x039F0300: 0x00001FF8
+ "\x03\xa9\x03\x00\x00\x00\x1f\xfa" + // 0x03A90300: 0x00001FFA
+ "\x03\xa9\x03E\x00\x00\x1f\xfc" + // 0x03A90345: 0x00001FFC
+ "!\x90\x038\x00\x00!\x9a" + // 0x21900338: 0x0000219A
+ "!\x92\x038\x00\x00!\x9b" + // 0x21920338: 0x0000219B
+ "!\x94\x038\x00\x00!\xae" + // 0x21940338: 0x000021AE
+ "!\xd0\x038\x00\x00!\xcd" + // 0x21D00338: 0x000021CD
+ "!\xd4\x038\x00\x00!\xce" + // 0x21D40338: 0x000021CE
+ "!\xd2\x038\x00\x00!\xcf" + // 0x21D20338: 0x000021CF
+ "\"\x03\x038\x00\x00\"\x04" + // 0x22030338: 0x00002204
+ "\"\b\x038\x00\x00\"\t" + // 0x22080338: 0x00002209
+ "\"\v\x038\x00\x00\"\f" + // 0x220B0338: 0x0000220C
+ "\"#\x038\x00\x00\"$" + // 0x22230338: 0x00002224
+ "\"%\x038\x00\x00\"&" + // 0x22250338: 0x00002226
+ "\"<\x038\x00\x00\"A" + // 0x223C0338: 0x00002241
+ "\"C\x038\x00\x00\"D" + // 0x22430338: 0x00002244
+ "\"E\x038\x00\x00\"G" + // 0x22450338: 0x00002247
+ "\"H\x038\x00\x00\"I" + // 0x22480338: 0x00002249
+ "\x00=\x038\x00\x00\"`" + // 0x003D0338: 0x00002260
+ "\"a\x038\x00\x00\"b" + // 0x22610338: 0x00002262
+ "\"M\x038\x00\x00\"m" + // 0x224D0338: 0x0000226D
+ "\x00<\x038\x00\x00\"n" + // 0x003C0338: 0x0000226E
+ "\x00>\x038\x00\x00\"o" + // 0x003E0338: 0x0000226F
+ "\"d\x038\x00\x00\"p" + // 0x22640338: 0x00002270
+ "\"e\x038\x00\x00\"q" + // 0x22650338: 0x00002271
+ "\"r\x038\x00\x00\"t" + // 0x22720338: 0x00002274
+ "\"s\x038\x00\x00\"u" + // 0x22730338: 0x00002275
+ "\"v\x038\x00\x00\"x" + // 0x22760338: 0x00002278
+ "\"w\x038\x00\x00\"y" + // 0x22770338: 0x00002279
+ "\"z\x038\x00\x00\"\x80" + // 0x227A0338: 0x00002280
+ "\"{\x038\x00\x00\"\x81" + // 0x227B0338: 0x00002281
+ "\"\x82\x038\x00\x00\"\x84" + // 0x22820338: 0x00002284
+ "\"\x83\x038\x00\x00\"\x85" + // 0x22830338: 0x00002285
+ "\"\x86\x038\x00\x00\"\x88" + // 0x22860338: 0x00002288
+ "\"\x87\x038\x00\x00\"\x89" + // 0x22870338: 0x00002289
+ "\"\xa2\x038\x00\x00\"\xac" + // 0x22A20338: 0x000022AC
+ "\"\xa8\x038\x00\x00\"\xad" + // 0x22A80338: 0x000022AD
+ "\"\xa9\x038\x00\x00\"\xae" + // 0x22A90338: 0x000022AE
+ "\"\xab\x038\x00\x00\"\xaf" + // 0x22AB0338: 0x000022AF
+ "\"|\x038\x00\x00\"\xe0" + // 0x227C0338: 0x000022E0
+ "\"}\x038\x00\x00\"\xe1" + // 0x227D0338: 0x000022E1
+ "\"\x91\x038\x00\x00\"\xe2" + // 0x22910338: 0x000022E2
+ "\"\x92\x038\x00\x00\"\xe3" + // 0x22920338: 0x000022E3
+ "\"\xb2\x038\x00\x00\"\xea" + // 0x22B20338: 0x000022EA
+ "\"\xb3\x038\x00\x00\"\xeb" + // 0x22B30338: 0x000022EB
+ "\"\xb4\x038\x00\x00\"\xec" + // 0x22B40338: 0x000022EC
+ "\"\xb5\x038\x00\x00\"\xed" + // 0x22B50338: 0x000022ED
+ "0K0\x99\x00\x000L" + // 0x304B3099: 0x0000304C
+ "0M0\x99\x00\x000N" + // 0x304D3099: 0x0000304E
+ "0O0\x99\x00\x000P" + // 0x304F3099: 0x00003050
+ "0Q0\x99\x00\x000R" + // 0x30513099: 0x00003052
+ "0S0\x99\x00\x000T" + // 0x30533099: 0x00003054
+ "0U0\x99\x00\x000V" + // 0x30553099: 0x00003056
+ "0W0\x99\x00\x000X" + // 0x30573099: 0x00003058
+ "0Y0\x99\x00\x000Z" + // 0x30593099: 0x0000305A
+ "0[0\x99\x00\x000\\" + // 0x305B3099: 0x0000305C
+ "0]0\x99\x00\x000^" + // 0x305D3099: 0x0000305E
+ "0_0\x99\x00\x000`" + // 0x305F3099: 0x00003060
+ "0a0\x99\x00\x000b" + // 0x30613099: 0x00003062
+ "0d0\x99\x00\x000e" + // 0x30643099: 0x00003065
+ "0f0\x99\x00\x000g" + // 0x30663099: 0x00003067
+ "0h0\x99\x00\x000i" + // 0x30683099: 0x00003069
+ "0o0\x99\x00\x000p" + // 0x306F3099: 0x00003070
+ "0o0\x9a\x00\x000q" + // 0x306F309A: 0x00003071
+ "0r0\x99\x00\x000s" + // 0x30723099: 0x00003073
+ "0r0\x9a\x00\x000t" + // 0x3072309A: 0x00003074
+ "0u0\x99\x00\x000v" + // 0x30753099: 0x00003076
+ "0u0\x9a\x00\x000w" + // 0x3075309A: 0x00003077
+ "0x0\x99\x00\x000y" + // 0x30783099: 0x00003079
+ "0x0\x9a\x00\x000z" + // 0x3078309A: 0x0000307A
+ "0{0\x99\x00\x000|" + // 0x307B3099: 0x0000307C
+ "0{0\x9a\x00\x000}" + // 0x307B309A: 0x0000307D
+ "0F0\x99\x00\x000\x94" + // 0x30463099: 0x00003094
+ "0\x9d0\x99\x00\x000\x9e" + // 0x309D3099: 0x0000309E
+ "0\xab0\x99\x00\x000\xac" + // 0x30AB3099: 0x000030AC
+ "0\xad0\x99\x00\x000\xae" + // 0x30AD3099: 0x000030AE
+ "0\xaf0\x99\x00\x000\xb0" + // 0x30AF3099: 0x000030B0
+ "0\xb10\x99\x00\x000\xb2" + // 0x30B13099: 0x000030B2
+ "0\xb30\x99\x00\x000\xb4" + // 0x30B33099: 0x000030B4
+ "0\xb50\x99\x00\x000\xb6" + // 0x30B53099: 0x000030B6
+ "0\xb70\x99\x00\x000\xb8" + // 0x30B73099: 0x000030B8
+ "0\xb90\x99\x00\x000\xba" + // 0x30B93099: 0x000030BA
+ "0\xbb0\x99\x00\x000\xbc" + // 0x30BB3099: 0x000030BC
+ "0\xbd0\x99\x00\x000\xbe" + // 0x30BD3099: 0x000030BE
+ "0\xbf0\x99\x00\x000\xc0" + // 0x30BF3099: 0x000030C0
+ "0\xc10\x99\x00\x000\xc2" + // 0x30C13099: 0x000030C2
+ "0\xc40\x99\x00\x000\xc5" + // 0x30C43099: 0x000030C5
+ "0\xc60\x99\x00\x000\xc7" + // 0x30C63099: 0x000030C7
+ "0\xc80\x99\x00\x000\xc9" + // 0x30C83099: 0x000030C9
+ "0\xcf0\x99\x00\x000\xd0" + // 0x30CF3099: 0x000030D0
+ "0\xcf0\x9a\x00\x000\xd1" + // 0x30CF309A: 0x000030D1
+ "0\xd20\x99\x00\x000\xd3" + // 0x30D23099: 0x000030D3
+ "0\xd20\x9a\x00\x000\xd4" + // 0x30D2309A: 0x000030D4
+ "0\xd50\x99\x00\x000\xd6" + // 0x30D53099: 0x000030D6
+ "0\xd50\x9a\x00\x000\xd7" + // 0x30D5309A: 0x000030D7
+ "0\xd80\x99\x00\x000\xd9" + // 0x30D83099: 0x000030D9
+ "0\xd80\x9a\x00\x000\xda" + // 0x30D8309A: 0x000030DA
+ "0\xdb0\x99\x00\x000\xdc" + // 0x30DB3099: 0x000030DC
+ "0\xdb0\x9a\x00\x000\xdd" + // 0x30DB309A: 0x000030DD
+ "0\xa60\x99\x00\x000\xf4" + // 0x30A63099: 0x000030F4
+ "0\xef0\x99\x00\x000\xf7" + // 0x30EF3099: 0x000030F7
+ "0\xf00\x99\x00\x000\xf8" + // 0x30F03099: 0x000030F8
+ "0\xf10\x99\x00\x000\xf9" + // 0x30F13099: 0x000030F9
+ "0\xf20\x99\x00\x000\xfa" + // 0x30F23099: 0x000030FA
+ "0\xfd0\x99\x00\x000\xfe" + // 0x30FD3099: 0x000030FE
+ "\x10\x99\x10\xba\x00\x01\x10\x9a" + // 0x109910BA: 0x0001109A
+ "\x10\x9b\x10\xba\x00\x01\x10\x9c" + // 0x109B10BA: 0x0001109C
+ "\x10\xa5\x10\xba\x00\x01\x10\xab" + // 0x10A510BA: 0x000110AB
+ "\x111\x11'\x00\x01\x11." + // 0x11311127: 0x0001112E
+ "\x112\x11'\x00\x01\x11/" + // 0x11321127: 0x0001112F
+ "\x13G\x13>\x00\x01\x13K" + // 0x1347133E: 0x0001134B
+ "\x13G\x13W\x00\x01\x13L" + // 0x13471357: 0x0001134C
+ "\x14\xb9\x14\xba\x00\x01\x14\xbb" + // 0x14B914BA: 0x000114BB
+ "\x14\xb9\x14\xb0\x00\x01\x14\xbc" + // 0x14B914B0: 0x000114BC
+ "\x14\xb9\x14\xbd\x00\x01\x14\xbe" + // 0x14B914BD: 0x000114BE
+ "\x15\xb8\x15\xaf\x00\x01\x15\xba" + // 0x15B815AF: 0x000115BA
+ "\x15\xb9\x15\xaf\x00\x01\x15\xbb" + // 0x15B915AF: 0x000115BB
+ ""
+ // Total size of tables: 53KB (54514 bytes)
diff --git a/vendor/golang.org/x/text/unicode/norm/triegen.go b/vendor/golang.org/x/text/unicode/norm/triegen.go
new file mode 100644
index 000000000..45d711900
--- /dev/null
+++ b/vendor/golang.org/x/text/unicode/norm/triegen.go
@@ -0,0 +1,117 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// Trie table generator.
+// Used by make*tables tools to generate a go file with trie data structures
+// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte
+// sequence are used to lookup offsets in the index table to be used for the
+// next byte. The last byte is used to index into a table with 16-bit values.
+
+package main
+
+import (
+ "fmt"
+ "io"
+)
+
+const maxSparseEntries = 16
+
+type normCompacter struct {
+ sparseBlocks [][]uint64
+ sparseOffset []uint16
+ sparseCount int
+ name string
+}
+
+func mostFrequentStride(a []uint64) int {
+ counts := make(map[int]int)
+ var v int
+ for _, x := range a {
+ if stride := int(x) - v; v != 0 && stride >= 0 {
+ counts[stride]++
+ }
+ v = int(x)
+ }
+ var maxs, maxc int
+ for stride, cnt := range counts {
+ if cnt > maxc || (cnt == maxc && stride < maxs) {
+ maxs, maxc = stride, cnt
+ }
+ }
+ return maxs
+}
+
+func countSparseEntries(a []uint64) int {
+ stride := mostFrequentStride(a)
+ var v, count int
+ for _, tv := range a {
+ if int(tv)-v != stride {
+ if tv != 0 {
+ count++
+ }
+ }
+ v = int(tv)
+ }
+ return count
+}
+
+func (c *normCompacter) Size(v []uint64) (sz int, ok bool) {
+ if n := countSparseEntries(v); n <= maxSparseEntries {
+ return (n+1)*4 + 2, true
+ }
+ return 0, false
+}
+
+func (c *normCompacter) Store(v []uint64) uint32 {
+ h := uint32(len(c.sparseOffset))
+ c.sparseBlocks = append(c.sparseBlocks, v)
+ c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount))
+ c.sparseCount += countSparseEntries(v) + 1
+ return h
+}
+
+func (c *normCompacter) Handler() string {
+ return c.name + "Sparse.lookup"
+}
+
+func (c *normCompacter) Print(w io.Writer) (retErr error) {
+ p := func(f string, x ...interface{}) {
+ if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil {
+ retErr = err
+ }
+ }
+
+ ls := len(c.sparseBlocks)
+ p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2)
+ p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset)
+
+ ns := c.sparseCount
+ p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4)
+ p("var %sSparseValues = [%d]valueRange {", c.name, ns)
+ for i, b := range c.sparseBlocks {
+ p("\n// Block %#x, offset %#x", i, c.sparseOffset[i])
+ var v int
+ stride := mostFrequentStride(b)
+ n := countSparseEntries(b)
+ p("\n{value:%#04x,lo:%#02x},", stride, uint8(n))
+ for i, nv := range b {
+ if int(nv)-v != stride {
+ if v != 0 {
+ p(",hi:%#02x},", 0x80+i-1)
+ }
+ if nv != 0 {
+ p("\n{value:%#04x,lo:%#02x", nv, 0x80+i)
+ }
+ }
+ v = int(nv)
+ }
+ if v != 0 {
+ p(",hi:%#02x},", 0x80+len(b)-1)
+ }
+ }
+ p("\n}\n\n")
+ return
+}
diff --git a/vendor/golang.org/x/time/AUTHORS b/vendor/golang.org/x/time/AUTHORS
new file mode 100644
index 000000000..15167cd74
--- /dev/null
+++ b/vendor/golang.org/x/time/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/time/CONTRIBUTORS b/vendor/golang.org/x/time/CONTRIBUTORS
new file mode 100644
index 000000000..1c4577e96
--- /dev/null
+++ b/vendor/golang.org/x/time/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/time/README.md b/vendor/golang.org/x/time/README.md
deleted file mode 100644
index ce9becdde..000000000
--- a/vendor/golang.org/x/time/README.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# Go Time
-
-This repository provides supplementary Go time packages.
-
-## Download/Install
-
-The easiest way to install is to run `go get -u golang.org/x/time`. You can
-also manually git clone the repository to `$GOPATH/src/golang.org/x/time`.
-
-## Report Issues / Send Patches
-
-This repository uses Gerrit for code changes. To learn how to submit changes to
-this repository, see https://golang.org/doc/contribute.html.
-
-The main issue tracker for the time repository is located at
-https://github.com/golang/go/issues. Prefix your issue with "x/time:" in the
-subject line, so it is easy to find.
diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/google.golang.org/appengine/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
new file mode 100644
index 000000000..a6ec19e14
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api.go
@@ -0,0 +1,675 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+ logpb "google.golang.org/appengine/internal/log"
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+const (
+ apiPath = "/rpc_http"
+ defaultTicketSuffix = "/default.20150612t184001.0"
+)
+
+var (
+ // Incoming headers.
+ ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
+ dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
+ traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
+ curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
+ userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
+ remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
+ devRequestIdHeader = http.CanonicalHeaderKey("X-Appengine-Dev-Request-Id")
+
+ // Outgoing headers.
+ apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
+ apiEndpointHeaderValue = []string{"app-engine-apis"}
+ apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
+ apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
+ apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
+ apiContentType = http.CanonicalHeaderKey("Content-Type")
+ apiContentTypeValue = []string{"application/octet-stream"}
+ logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
+
+ apiHTTPClient = &http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: limitDial,
+ },
+ }
+
+ defaultTicketOnce sync.Once
+ defaultTicket string
+ backgroundContextOnce sync.Once
+ backgroundContext netcontext.Context
+)
+
+func apiURL() *url.URL {
+ host, port := "appengine.googleapis.internal", "10001"
+ if h := os.Getenv("API_HOST"); h != "" {
+ host = h
+ }
+ if p := os.Getenv("API_PORT"); p != "" {
+ port = p
+ }
+ return &url.URL{
+ Scheme: "http",
+ Host: host + ":" + port,
+ Path: apiPath,
+ }
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+ c := &context{
+ req: r,
+ outHeader: w.Header(),
+ apiURL: apiURL(),
+ }
+ r = r.WithContext(withContext(r.Context(), c))
+ c.req = r
+
+ stopFlushing := make(chan int)
+
+ // Patch up RemoteAddr so it looks reasonable.
+ if addr := r.Header.Get(userIPHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else {
+ // Should not normally reach here, but pick a sensible default anyway.
+ r.RemoteAddr = "127.0.0.1"
+ }
+ // The address in the headers will most likely be of these forms:
+ // 123.123.123.123
+ // 2001:db8::1
+ // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
+ if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
+ // Assume the remote address is only a host; add a default port.
+ r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
+ }
+
+ // Start goroutine responsible for flushing app logs.
+ // This is done after adding c to ctx.m (and stopped before removing it)
+ // because flushing logs requires making an API call.
+ go c.logFlusher(stopFlushing)
+
+ executeRequestSafely(c, r)
+ c.outHeader = nil // make sure header changes aren't respected any more
+
+ stopFlushing <- 1 // any logging beyond this point will be dropped
+
+ // Flush any pending logs asynchronously.
+ c.pendingLogs.Lock()
+ flushes := c.pendingLogs.flushes
+ if len(c.pendingLogs.lines) > 0 {
+ flushes++
+ }
+ c.pendingLogs.Unlock()
+ flushed := make(chan struct{})
+ go func() {
+ defer close(flushed)
+ // Force a log flush, because with very short requests we
+ // may not ever flush logs.
+ c.flushLog(true)
+ }()
+ w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
+
+ // Avoid nil Write call if c.Write is never called.
+ if c.outCode != 0 {
+ w.WriteHeader(c.outCode)
+ }
+ if c.outBody != nil {
+ w.Write(c.outBody)
+ }
+ // Wait for the last flush to complete before returning,
+ // otherwise the security ticket will not be valid.
+ <-flushed
+}
+
+func executeRequestSafely(c *context, r *http.Request) {
+ defer func() {
+ if x := recover(); x != nil {
+ logf(c, 4, "%s", renderPanic(x)) // 4 == critical
+ c.outCode = 500
+ }
+ }()
+
+ http.DefaultServeMux.ServeHTTP(c, r)
+}
+
+func renderPanic(x interface{}) string {
+ buf := make([]byte, 16<<10) // 16 KB should be plenty
+ buf = buf[:runtime.Stack(buf, false)]
+
+ // Remove the first few stack frames:
+ // this func
+ // the recover closure in the caller
+ // That will root the stack trace at the site of the panic.
+ const (
+ skipStart = "internal.renderPanic"
+ skipFrames = 2
+ )
+ start := bytes.Index(buf, []byte(skipStart))
+ p := start
+ for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
+ p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
+ if p < 0 {
+ break
+ }
+ }
+ if p >= 0 {
+ // buf[start:p+1] is the block to remove.
+ // Copy buf[p+1:] over buf[start:] and shrink buf.
+ copy(buf[start:], buf[p+1:])
+ buf = buf[:len(buf)-(p+1-start)]
+ }
+
+ // Add panic heading.
+ head := fmt.Sprintf("panic: %v\n\n", x)
+ if len(head) > len(buf) {
+ // Extremely unlikely to happen.
+ return head
+ }
+ copy(buf[len(head):], buf)
+ copy(buf, head)
+
+ return string(buf)
+}
+
+// context represents the context of an in-flight HTTP request.
+// It implements the appengine.Context and http.ResponseWriter interfaces.
+type context struct {
+ req *http.Request
+
+ outCode int
+ outHeader http.Header
+ outBody []byte
+
+ pendingLogs struct {
+ sync.Mutex
+ lines []*logpb.UserAppLogLine
+ flushes int
+ }
+
+ apiURL *url.URL
+}
+
+var contextKey = "holds a *context"
+
+// jointContext joins two contexts in a superficial way.
+// It takes values and timeouts from a base context, and only values from another context.
+type jointContext struct {
+ base netcontext.Context
+ valuesOnly netcontext.Context
+}
+
+func (c jointContext) Deadline() (time.Time, bool) {
+ return c.base.Deadline()
+}
+
+func (c jointContext) Done() <-chan struct{} {
+ return c.base.Done()
+}
+
+func (c jointContext) Err() error {
+ return c.base.Err()
+}
+
+func (c jointContext) Value(key interface{}) interface{} {
+ if val := c.base.Value(key); val != nil {
+ return val
+ }
+ return c.valuesOnly.Value(key)
+}
+
+// fromContext returns the App Engine context or nil if ctx is not
+// derived from an App Engine context.
+func fromContext(ctx netcontext.Context) *context {
+ c, _ := ctx.Value(&contextKey).(*context)
+ return c
+}
+
+func withContext(parent netcontext.Context, c *context) netcontext.Context {
+ ctx := netcontext.WithValue(parent, &contextKey, c)
+ if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
+ ctx = withNamespace(ctx, ns)
+ }
+ return ctx
+}
+
+func toContext(c *context) netcontext.Context {
+ return withContext(netcontext.Background(), c)
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+ if c := fromContext(ctx); c != nil {
+ return c.req.Header
+ }
+ return nil
+}
+
+func ReqContext(req *http.Request) netcontext.Context {
+ return req.Context()
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+ return jointContext{
+ base: parent,
+ valuesOnly: req.Context(),
+ }
+}
+
+// DefaultTicket returns a ticket used for background context or dev_appserver.
+func DefaultTicket() string {
+ defaultTicketOnce.Do(func() {
+ if IsDevAppServer() {
+ defaultTicket = "testapp" + defaultTicketSuffix
+ return
+ }
+ appID := partitionlessAppID()
+ escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
+ majVersion := VersionID(nil)
+ if i := strings.Index(majVersion, "."); i > 0 {
+ majVersion = majVersion[:i]
+ }
+ defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
+ })
+ return defaultTicket
+}
+
+func BackgroundContext() netcontext.Context {
+ backgroundContextOnce.Do(func() {
+ // Compute background security ticket.
+ ticket := DefaultTicket()
+
+ c := &context{
+ req: &http.Request{
+ Header: http.Header{
+ ticketHeader: []string{ticket},
+ },
+ },
+ apiURL: apiURL(),
+ }
+ backgroundContext = toContext(c)
+
+ // TODO(dsymonds): Wire up the shutdown handler to do a final flush.
+ go c.logFlusher(make(chan int))
+ })
+
+ return backgroundContext
+}
+
+// RegisterTestRequest registers the HTTP request req for testing, such that
+// any API calls are sent to the provided URL. It returns a closure to delete
+// the registration.
+// It should only be used by aetest package.
+func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) {
+ c := &context{
+ req: req,
+ apiURL: apiURL,
+ }
+ ctx := withContext(decorate(req.Context()), c)
+ req = req.WithContext(ctx)
+ c.req = req
+ return req, func() {}
+}
+
+var errTimeout = &CallError{
+ Detail: "Deadline exceeded",
+ Code: int32(remotepb.RpcError_CANCELLED),
+ Timeout: true,
+}
+
+func (c *context) Header() http.Header { return c.outHeader }
+
+// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
+// codes do not permit a response body (nor response entity headers such as
+// Content-Length, Content-Type, etc).
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+func (c *context) Write(b []byte) (int, error) {
+ if c.outCode == 0 {
+ c.WriteHeader(http.StatusOK)
+ }
+ if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
+ return 0, http.ErrBodyNotAllowed
+ }
+ c.outBody = append(c.outBody, b...)
+ return len(b), nil
+}
+
+func (c *context) WriteHeader(code int) {
+ if c.outCode != 0 {
+ logf(c, 3, "WriteHeader called multiple times on request.") // error level
+ return
+ }
+ c.outCode = code
+}
+
+func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
+ hreq := &http.Request{
+ Method: "POST",
+ URL: c.apiURL,
+ Header: http.Header{
+ apiEndpointHeader: apiEndpointHeaderValue,
+ apiMethodHeader: apiMethodHeaderValue,
+ apiContentType: apiContentTypeValue,
+ apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
+ },
+ Body: ioutil.NopCloser(bytes.NewReader(body)),
+ ContentLength: int64(len(body)),
+ Host: c.apiURL.Host,
+ }
+ if info := c.req.Header.Get(dapperHeader); info != "" {
+ hreq.Header.Set(dapperHeader, info)
+ }
+ if info := c.req.Header.Get(traceHeader); info != "" {
+ hreq.Header.Set(traceHeader, info)
+ }
+
+ tr := apiHTTPClient.Transport.(*http.Transport)
+
+ var timedOut int32 // atomic; set to 1 if timed out
+ t := time.AfterFunc(timeout, func() {
+ atomic.StoreInt32(&timedOut, 1)
+ tr.CancelRequest(hreq)
+ })
+ defer t.Stop()
+ defer func() {
+ // Check if timeout was exceeded.
+ if atomic.LoadInt32(&timedOut) != 0 {
+ err = errTimeout
+ }
+ }()
+
+ hresp, err := apiHTTPClient.Do(hreq)
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ defer hresp.Body.Close()
+ hrespBody, err := ioutil.ReadAll(hresp.Body)
+ if hresp.StatusCode != 200 {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge response bad: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return hrespBody, nil
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+ if ns := NamespaceFromContext(ctx); ns != "" {
+ if fn, ok := NamespaceMods[service]; ok {
+ fn(in, ns)
+ }
+ }
+
+ if f, ctx, ok := callOverrideFromContext(ctx); ok {
+ return f(ctx, service, method, in, out)
+ }
+
+ // Handle already-done contexts quickly.
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ c := fromContext(ctx)
+ if c == nil {
+ // Give a good error message rather than a panic lower down.
+ return errNotAppEngineContext
+ }
+
+ // Apply transaction modifications if we're in a transaction.
+ if t := transactionFromContext(ctx); t != nil {
+ if t.finished {
+ return errors.New("transaction context has expired")
+ }
+ applyTransaction(in, &t.transaction)
+ }
+
+ // Default RPC timeout is 60s.
+ timeout := 60 * time.Second
+ if deadline, ok := ctx.Deadline(); ok {
+ timeout = deadline.Sub(time.Now())
+ }
+
+ data, err := proto.Marshal(in)
+ if err != nil {
+ return err
+ }
+
+ ticket := c.req.Header.Get(ticketHeader)
+ // Use a test ticket under test environment.
+ if ticket == "" {
+ if appid := ctx.Value(&appIDOverrideKey); appid != nil {
+ ticket = appid.(string) + defaultTicketSuffix
+ }
+ }
+ // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
+ if ticket == "" {
+ ticket = DefaultTicket()
+ }
+ if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" {
+ ticket = dri
+ }
+ req := &remotepb.Request{
+ ServiceName: &service,
+ Method: &method,
+ Request: data,
+ RequestId: &ticket,
+ }
+ hreqBody, err := proto.Marshal(req)
+ if err != nil {
+ return err
+ }
+
+ hrespBody, err := c.post(hreqBody, timeout)
+ if err != nil {
+ return err
+ }
+
+ res := &remotepb.Response{}
+ if err := proto.Unmarshal(hrespBody, res); err != nil {
+ return err
+ }
+ if res.RpcError != nil {
+ ce := &CallError{
+ Detail: res.RpcError.GetDetail(),
+ Code: *res.RpcError.Code,
+ }
+ switch remotepb.RpcError_ErrorCode(ce.Code) {
+ case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
+ ce.Timeout = true
+ }
+ return ce
+ }
+ if res.ApplicationError != nil {
+ return &APIError{
+ Service: *req.ServiceName,
+ Detail: res.ApplicationError.GetDetail(),
+ Code: *res.ApplicationError.Code,
+ }
+ }
+ if res.Exception != nil || res.JavaException != nil {
+ // This shouldn't happen, but let's be defensive.
+ return &CallError{
+ Detail: "service bridge returned exception",
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return proto.Unmarshal(res.Response, out)
+}
+
+func (c *context) Request() *http.Request {
+ return c.req
+}
+
+func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
+ // Truncate long log lines.
+ // TODO(dsymonds): Check if this is still necessary.
+ const lim = 8 << 10
+ if len(*ll.Message) > lim {
+ suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
+ ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
+ }
+
+ c.pendingLogs.Lock()
+ c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
+ c.pendingLogs.Unlock()
+}
+
+var logLevelName = map[int64]string{
+ 0: "DEBUG",
+ 1: "INFO",
+ 2: "WARNING",
+ 3: "ERROR",
+ 4: "CRITICAL",
+}
+
+func logf(c *context, level int64, format string, args ...interface{}) {
+ if c == nil {
+ panic("not an App Engine context")
+ }
+ s := fmt.Sprintf(format, args...)
+ s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
+ c.addLogLine(&logpb.UserAppLogLine{
+ TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
+ Level: &level,
+ Message: &s,
+ })
+ // Only duplicate log to stderr if not running on App Engine second generation
+ if !IsSecondGen() {
+ log.Print(logLevelName[level] + ": " + s)
+ }
+}
+
+// flushLog attempts to flush any pending logs to the appserver.
+// It should not be called concurrently.
+func (c *context) flushLog(force bool) (flushed bool) {
+ c.pendingLogs.Lock()
+ // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
+ n, rem := 0, 30<<20
+ for ; n < len(c.pendingLogs.lines); n++ {
+ ll := c.pendingLogs.lines[n]
+ // Each log line will require about 3 bytes of overhead.
+ nb := proto.Size(ll) + 3
+ if nb > rem {
+ break
+ }
+ rem -= nb
+ }
+ lines := c.pendingLogs.lines[:n]
+ c.pendingLogs.lines = c.pendingLogs.lines[n:]
+ c.pendingLogs.Unlock()
+
+ if len(lines) == 0 && !force {
+ // Nothing to flush.
+ return false
+ }
+
+ rescueLogs := false
+ defer func() {
+ if rescueLogs {
+ c.pendingLogs.Lock()
+ c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
+ c.pendingLogs.Unlock()
+ }
+ }()
+
+ buf, err := proto.Marshal(&logpb.UserAppLogGroup{
+ LogLine: lines,
+ })
+ if err != nil {
+ log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
+ rescueLogs = true
+ return false
+ }
+
+ req := &logpb.FlushRequest{
+ Logs: buf,
+ }
+ res := &basepb.VoidProto{}
+ c.pendingLogs.Lock()
+ c.pendingLogs.flushes++
+ c.pendingLogs.Unlock()
+ if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
+ log.Printf("internal.flushLog: Flush RPC: %v", err)
+ rescueLogs = true
+ return false
+ }
+ return true
+}
+
+const (
+ // Log flushing parameters.
+ flushInterval = 1 * time.Second
+ forceFlushInterval = 60 * time.Second
+)
+
+func (c *context) logFlusher(stop <-chan int) {
+ lastFlush := time.Now()
+ tick := time.NewTicker(flushInterval)
+ for {
+ select {
+ case <-stop:
+ // Request finished.
+ tick.Stop()
+ return
+ case <-tick.C:
+ force := time.Now().Sub(lastFlush) > forceFlushInterval
+ if c.flushLog(force) {
+ lastFlush = time.Now()
+ }
+ }
+ }
+}
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+ return toContext(&context{req: req})
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go
new file mode 100644
index 000000000..f0f40b2e3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_classic.go
@@ -0,0 +1,169 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "time"
+
+ "appengine"
+ "appengine_internal"
+ basepb "appengine_internal/base"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+)
+
+var contextKey = "holds an appengine.Context"
+
+// fromContext returns the App Engine context or nil if ctx is not
+// derived from an App Engine context.
+func fromContext(ctx netcontext.Context) appengine.Context {
+ c, _ := ctx.Value(&contextKey).(appengine.Context)
+ return c
+}
+
+// This is only for classic App Engine adapters.
+func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) {
+ c := fromContext(ctx)
+ if c == nil {
+ return nil, errNotAppEngineContext
+ }
+ return c, nil
+}
+
+func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
+ ctx := netcontext.WithValue(parent, &contextKey, c)
+
+ s := &basepb.StringProto{}
+ c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
+ if ns := s.GetValue(); ns != "" {
+ ctx = NamespacedContext(ctx, ns)
+ }
+
+ return ctx
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+ if c := fromContext(ctx); c != nil {
+ if req, ok := c.Request().(*http.Request); ok {
+ return req.Header
+ }
+ }
+ return nil
+}
+
+func ReqContext(req *http.Request) netcontext.Context {
+ return WithContext(netcontext.Background(), req)
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+ c := appengine.NewContext(req)
+ return withContext(parent, c)
+}
+
+type testingContext struct {
+ appengine.Context
+
+ req *http.Request
+}
+
+func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" }
+func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error {
+ if service == "__go__" && method == "GetNamespace" {
+ return nil
+ }
+ return fmt.Errorf("testingContext: unsupported Call")
+}
+func (t *testingContext) Request() interface{} { return t.req }
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+ return withContext(netcontext.Background(), &testingContext{req: req})
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+ if ns := NamespaceFromContext(ctx); ns != "" {
+ if fn, ok := NamespaceMods[service]; ok {
+ fn(in, ns)
+ }
+ }
+
+ if f, ctx, ok := callOverrideFromContext(ctx); ok {
+ return f(ctx, service, method, in, out)
+ }
+
+ // Handle already-done contexts quickly.
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ c := fromContext(ctx)
+ if c == nil {
+ // Give a good error message rather than a panic lower down.
+ return errNotAppEngineContext
+ }
+
+ // Apply transaction modifications if we're in a transaction.
+ if t := transactionFromContext(ctx); t != nil {
+ if t.finished {
+ return errors.New("transaction context has expired")
+ }
+ applyTransaction(in, &t.transaction)
+ }
+
+ var opts *appengine_internal.CallOptions
+ if d, ok := ctx.Deadline(); ok {
+ opts = &appengine_internal.CallOptions{
+ Timeout: d.Sub(time.Now()),
+ }
+ }
+
+ err := c.Call(service, method, in, out, opts)
+ switch v := err.(type) {
+ case *appengine_internal.APIError:
+ return &APIError{
+ Service: v.Service,
+ Detail: v.Detail,
+ Code: v.Code,
+ }
+ case *appengine_internal.CallError:
+ return &CallError{
+ Detail: v.Detail,
+ Code: v.Code,
+ Timeout: v.Timeout,
+ }
+ }
+ return err
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+ panic("handleHTTP called; this should be impossible")
+}
+
+func logf(c appengine.Context, level int64, format string, args ...interface{}) {
+ var fn func(format string, args ...interface{})
+ switch level {
+ case 0:
+ fn = c.Debugf
+ case 1:
+ fn = c.Infof
+ case 2:
+ fn = c.Warningf
+ case 3:
+ fn = c.Errorf
+ case 4:
+ fn = c.Criticalf
+ default:
+ // This shouldn't happen.
+ fn = c.Criticalf
+ }
+ fn(format, args...)
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go
new file mode 100644
index 000000000..e0c0b214b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_common.go
@@ -0,0 +1,123 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "errors"
+ "os"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+)
+
+var errNotAppEngineContext = errors.New("not an App Engine context")
+
+type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
+
+var callOverrideKey = "holds []CallOverrideFunc"
+
+func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
+ // We avoid appending to any existing call override
+ // so we don't risk overwriting a popped stack below.
+ var cofs []CallOverrideFunc
+ if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
+ cofs = append(cofs, uf...)
+ }
+ cofs = append(cofs, f)
+ return netcontext.WithValue(ctx, &callOverrideKey, cofs)
+}
+
+func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
+ cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
+ if len(cofs) == 0 {
+ return nil, nil, false
+ }
+ // We found a list of overrides; grab the last, and reconstitute a
+ // context that will hide it.
+ f := cofs[len(cofs)-1]
+ ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
+ return f, ctx, true
+}
+
+type logOverrideFunc func(level int64, format string, args ...interface{})
+
+var logOverrideKey = "holds a logOverrideFunc"
+
+func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
+ return netcontext.WithValue(ctx, &logOverrideKey, f)
+}
+
+var appIDOverrideKey = "holds a string, being the full app ID"
+
+func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
+ return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
+}
+
+var namespaceKey = "holds the namespace string"
+
+func withNamespace(ctx netcontext.Context, ns string) netcontext.Context {
+ return netcontext.WithValue(ctx, &namespaceKey, ns)
+}
+
+func NamespaceFromContext(ctx netcontext.Context) string {
+ // If there's no namespace, return the empty string.
+ ns, _ := ctx.Value(&namespaceKey).(string)
+ return ns
+}
+
+// FullyQualifiedAppID returns the fully-qualified application ID.
+// This may contain a partition prefix (e.g. "s~" for High Replication apps),
+// or a domain prefix (e.g. "example.com:").
+func FullyQualifiedAppID(ctx netcontext.Context) string {
+ if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
+ return id
+ }
+ return fullyQualifiedAppID(ctx)
+}
+
+func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
+ if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
+ f(level, format, args...)
+ return
+ }
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ logf(c, level, format, args...)
+}
+
+// NamespacedContext wraps a Context to support namespaces.
+func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
+ return withNamespace(ctx, namespace)
+}
+
+// SetTestEnv sets the env variables for testing background ticket in Flex.
+func SetTestEnv() func() {
+ var environ = []struct {
+ key, value string
+ }{
+ {"GAE_LONG_APP_ID", "my-app-id"},
+ {"GAE_MINOR_VERSION", "067924799508853122"},
+ {"GAE_MODULE_INSTANCE", "0"},
+ {"GAE_MODULE_NAME", "default"},
+ {"GAE_MODULE_VERSION", "20150612t184001"},
+ }
+
+ for _, v := range environ {
+ old := os.Getenv(v.key)
+ os.Setenv(v.key, v.value)
+ v.value = old
+ }
+ return func() { // Restore old environment after the test completes.
+ for _, v := range environ {
+ if v.value == "" {
+ os.Unsetenv(v.key)
+ continue
+ }
+ os.Setenv(v.key, v.value)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go
new file mode 100644
index 000000000..11df8c07b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_id.go
@@ -0,0 +1,28 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "strings"
+)
+
+func parseFullAppID(appid string) (partition, domain, displayID string) {
+ if i := strings.Index(appid, "~"); i != -1 {
+ partition, appid = appid[:i], appid[i+1:]
+ }
+ if i := strings.Index(appid, ":"); i != -1 {
+ domain, appid = appid[:i], appid[i+1:]
+ }
+ return partition, domain, appid
+}
+
+// appID returns "appid" or "domain.com:appid".
+func appID(fullAppID string) string {
+ _, dom, dis := parseFullAppID(fullAppID)
+ if dom != "" {
+ return dom + ":" + dis
+ }
+ return dis
+}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
new file mode 100644
index 000000000..db4777e68
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
@@ -0,0 +1,308 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google.golang.org/appengine/internal/base/api_base.proto
+
+package base
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type StringProto struct {
+ Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StringProto) Reset() { *m = StringProto{} }
+func (m *StringProto) String() string { return proto.CompactTextString(m) }
+func (*StringProto) ProtoMessage() {}
+func (*StringProto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{0}
+}
+func (m *StringProto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StringProto.Unmarshal(m, b)
+}
+func (m *StringProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StringProto.Marshal(b, m, deterministic)
+}
+func (dst *StringProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StringProto.Merge(dst, src)
+}
+func (m *StringProto) XXX_Size() int {
+ return xxx_messageInfo_StringProto.Size(m)
+}
+func (m *StringProto) XXX_DiscardUnknown() {
+ xxx_messageInfo_StringProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StringProto proto.InternalMessageInfo
+
+func (m *StringProto) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Integer32Proto struct {
+ Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Integer32Proto) Reset() { *m = Integer32Proto{} }
+func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer32Proto) ProtoMessage() {}
+func (*Integer32Proto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{1}
+}
+func (m *Integer32Proto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Integer32Proto.Unmarshal(m, b)
+}
+func (m *Integer32Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Integer32Proto.Marshal(b, m, deterministic)
+}
+func (dst *Integer32Proto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Integer32Proto.Merge(dst, src)
+}
+func (m *Integer32Proto) XXX_Size() int {
+ return xxx_messageInfo_Integer32Proto.Size(m)
+}
+func (m *Integer32Proto) XXX_DiscardUnknown() {
+ xxx_messageInfo_Integer32Proto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Integer32Proto proto.InternalMessageInfo
+
+func (m *Integer32Proto) GetValue() int32 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Integer64Proto struct {
+ Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Integer64Proto) Reset() { *m = Integer64Proto{} }
+func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer64Proto) ProtoMessage() {}
+func (*Integer64Proto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{2}
+}
+func (m *Integer64Proto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Integer64Proto.Unmarshal(m, b)
+}
+func (m *Integer64Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Integer64Proto.Marshal(b, m, deterministic)
+}
+func (dst *Integer64Proto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Integer64Proto.Merge(dst, src)
+}
+func (m *Integer64Proto) XXX_Size() int {
+ return xxx_messageInfo_Integer64Proto.Size(m)
+}
+func (m *Integer64Proto) XXX_DiscardUnknown() {
+ xxx_messageInfo_Integer64Proto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Integer64Proto proto.InternalMessageInfo
+
+func (m *Integer64Proto) GetValue() int64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type BoolProto struct {
+ Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *BoolProto) Reset() { *m = BoolProto{} }
+func (m *BoolProto) String() string { return proto.CompactTextString(m) }
+func (*BoolProto) ProtoMessage() {}
+func (*BoolProto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{3}
+}
+func (m *BoolProto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_BoolProto.Unmarshal(m, b)
+}
+func (m *BoolProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_BoolProto.Marshal(b, m, deterministic)
+}
+func (dst *BoolProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BoolProto.Merge(dst, src)
+}
+func (m *BoolProto) XXX_Size() int {
+ return xxx_messageInfo_BoolProto.Size(m)
+}
+func (m *BoolProto) XXX_DiscardUnknown() {
+ xxx_messageInfo_BoolProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BoolProto proto.InternalMessageInfo
+
+func (m *BoolProto) GetValue() bool {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return false
+}
+
+type DoubleProto struct {
+ Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DoubleProto) Reset() { *m = DoubleProto{} }
+func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
+func (*DoubleProto) ProtoMessage() {}
+func (*DoubleProto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{4}
+}
+func (m *DoubleProto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DoubleProto.Unmarshal(m, b)
+}
+func (m *DoubleProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DoubleProto.Marshal(b, m, deterministic)
+}
+func (dst *DoubleProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DoubleProto.Merge(dst, src)
+}
+func (m *DoubleProto) XXX_Size() int {
+ return xxx_messageInfo_DoubleProto.Size(m)
+}
+func (m *DoubleProto) XXX_DiscardUnknown() {
+ xxx_messageInfo_DoubleProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DoubleProto proto.InternalMessageInfo
+
+func (m *DoubleProto) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type BytesProto struct {
+ Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *BytesProto) Reset() { *m = BytesProto{} }
+func (m *BytesProto) String() string { return proto.CompactTextString(m) }
+func (*BytesProto) ProtoMessage() {}
+func (*BytesProto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{5}
+}
+func (m *BytesProto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_BytesProto.Unmarshal(m, b)
+}
+func (m *BytesProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_BytesProto.Marshal(b, m, deterministic)
+}
+func (dst *BytesProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BytesProto.Merge(dst, src)
+}
+func (m *BytesProto) XXX_Size() int {
+ return xxx_messageInfo_BytesProto.Size(m)
+}
+func (m *BytesProto) XXX_DiscardUnknown() {
+ xxx_messageInfo_BytesProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BytesProto proto.InternalMessageInfo
+
+func (m *BytesProto) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type VoidProto struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *VoidProto) Reset() { *m = VoidProto{} }
+func (m *VoidProto) String() string { return proto.CompactTextString(m) }
+func (*VoidProto) ProtoMessage() {}
+func (*VoidProto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{6}
+}
+func (m *VoidProto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_VoidProto.Unmarshal(m, b)
+}
+func (m *VoidProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_VoidProto.Marshal(b, m, deterministic)
+}
+func (dst *VoidProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_VoidProto.Merge(dst, src)
+}
+func (m *VoidProto) XXX_Size() int {
+ return xxx_messageInfo_VoidProto.Size(m)
+}
+func (m *VoidProto) XXX_DiscardUnknown() {
+ xxx_messageInfo_VoidProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VoidProto proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*StringProto)(nil), "appengine.base.StringProto")
+ proto.RegisterType((*Integer32Proto)(nil), "appengine.base.Integer32Proto")
+ proto.RegisterType((*Integer64Proto)(nil), "appengine.base.Integer64Proto")
+ proto.RegisterType((*BoolProto)(nil), "appengine.base.BoolProto")
+ proto.RegisterType((*DoubleProto)(nil), "appengine.base.DoubleProto")
+ proto.RegisterType((*BytesProto)(nil), "appengine.base.BytesProto")
+ proto.RegisterType((*VoidProto)(nil), "appengine.base.VoidProto")
+}
+
+func init() {
+ proto.RegisterFile("google.golang.org/appengine/internal/base/api_base.proto", fileDescriptor_api_base_9d49f8792e0c1140)
+}
+
+var fileDescriptor_api_base_9d49f8792e0c1140 = []byte{
+ // 199 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xcf, 0x3f, 0x4b, 0xc6, 0x30,
+ 0x10, 0x06, 0x70, 0x5a, 0xad, 0xb4, 0x57, 0xe9, 0x20, 0x0e, 0x1d, 0xb5, 0x05, 0x71, 0x4a, 0x40,
+ 0x45, 0x9c, 0x83, 0x8b, 0x9b, 0x28, 0x38, 0xb8, 0x48, 0x8a, 0xc7, 0x11, 0x08, 0xb9, 0x90, 0xa6,
+ 0x82, 0xdf, 0x5e, 0xda, 0xd2, 0xfa, 0xc2, 0x9b, 0xed, 0xfe, 0xfc, 0xe0, 0xe1, 0x81, 0x27, 0x62,
+ 0x26, 0x8b, 0x82, 0xd8, 0x6a, 0x47, 0x82, 0x03, 0x49, 0xed, 0x3d, 0x3a, 0x32, 0x0e, 0xa5, 0x71,
+ 0x11, 0x83, 0xd3, 0x56, 0x0e, 0x7a, 0x44, 0xa9, 0xbd, 0xf9, 0x9a, 0x07, 0xe1, 0x03, 0x47, 0xbe,
+ 0x68, 0x76, 0x27, 0xe6, 0x6b, 0xd7, 0x43, 0xfd, 0x1e, 0x83, 0x71, 0xf4, 0xba, 0xbc, 0x2f, 0xa1,
+ 0xf8, 0xd1, 0x76, 0xc2, 0x36, 0xbb, 0xca, 0x6f, 0xab, 0xb7, 0x75, 0xe9, 0x6e, 0xa0, 0x79, 0x71,
+ 0x11, 0x09, 0xc3, 0xfd, 0x5d, 0xc2, 0x15, 0xc7, 0xee, 0xf1, 0x21, 0xe1, 0x4e, 0x36, 0x77, 0x0d,
+ 0x95, 0x62, 0xb6, 0x09, 0x52, 0x6e, 0xa4, 0x87, 0xfa, 0x99, 0xa7, 0xc1, 0x62, 0x02, 0x65, 0xff,
+ 0x79, 0xa0, 0x7e, 0x23, 0x8e, 0xab, 0x69, 0x0f, 0xcd, 0xb9, 0xca, 0xcb, 0xdd, 0xd5, 0x50, 0x7d,
+ 0xb0, 0xf9, 0x5e, 0x98, 0x3a, 0xfb, 0x3c, 0x9d, 0x9b, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xba,
+ 0x37, 0x25, 0xea, 0x44, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto
new file mode 100644
index 000000000..56cd7a3ca
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.proto
@@ -0,0 +1,33 @@
+// Built-in base types for API calls. Primarily useful as return types.
+
+syntax = "proto2";
+option go_package = "base";
+
+package appengine.base;
+
+message StringProto {
+ required string value = 1;
+}
+
+message Integer32Proto {
+ required int32 value = 1;
+}
+
+message Integer64Proto {
+ required int64 value = 1;
+}
+
+message BoolProto {
+ required bool value = 1;
+}
+
+message DoubleProto {
+ required double value = 1;
+}
+
+message BytesProto {
+ required bytes value = 1 [ctype=CORD];
+}
+
+message VoidProto {
+}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
new file mode 100644
index 000000000..2fb748289
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
@@ -0,0 +1,4367 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto
+
+package datastore
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Property_Meaning int32
+
+const (
+ Property_NO_MEANING Property_Meaning = 0
+ Property_BLOB Property_Meaning = 14
+ Property_TEXT Property_Meaning = 15
+ Property_BYTESTRING Property_Meaning = 16
+ Property_ATOM_CATEGORY Property_Meaning = 1
+ Property_ATOM_LINK Property_Meaning = 2
+ Property_ATOM_TITLE Property_Meaning = 3
+ Property_ATOM_CONTENT Property_Meaning = 4
+ Property_ATOM_SUMMARY Property_Meaning = 5
+ Property_ATOM_AUTHOR Property_Meaning = 6
+ Property_GD_WHEN Property_Meaning = 7
+ Property_GD_EMAIL Property_Meaning = 8
+ Property_GEORSS_POINT Property_Meaning = 9
+ Property_GD_IM Property_Meaning = 10
+ Property_GD_PHONENUMBER Property_Meaning = 11
+ Property_GD_POSTALADDRESS Property_Meaning = 12
+ Property_GD_RATING Property_Meaning = 13
+ Property_BLOBKEY Property_Meaning = 17
+ Property_ENTITY_PROTO Property_Meaning = 19
+ Property_INDEX_VALUE Property_Meaning = 18
+)
+
+var Property_Meaning_name = map[int32]string{
+ 0: "NO_MEANING",
+ 14: "BLOB",
+ 15: "TEXT",
+ 16: "BYTESTRING",
+ 1: "ATOM_CATEGORY",
+ 2: "ATOM_LINK",
+ 3: "ATOM_TITLE",
+ 4: "ATOM_CONTENT",
+ 5: "ATOM_SUMMARY",
+ 6: "ATOM_AUTHOR",
+ 7: "GD_WHEN",
+ 8: "GD_EMAIL",
+ 9: "GEORSS_POINT",
+ 10: "GD_IM",
+ 11: "GD_PHONENUMBER",
+ 12: "GD_POSTALADDRESS",
+ 13: "GD_RATING",
+ 17: "BLOBKEY",
+ 19: "ENTITY_PROTO",
+ 18: "INDEX_VALUE",
+}
+var Property_Meaning_value = map[string]int32{
+ "NO_MEANING": 0,
+ "BLOB": 14,
+ "TEXT": 15,
+ "BYTESTRING": 16,
+ "ATOM_CATEGORY": 1,
+ "ATOM_LINK": 2,
+ "ATOM_TITLE": 3,
+ "ATOM_CONTENT": 4,
+ "ATOM_SUMMARY": 5,
+ "ATOM_AUTHOR": 6,
+ "GD_WHEN": 7,
+ "GD_EMAIL": 8,
+ "GEORSS_POINT": 9,
+ "GD_IM": 10,
+ "GD_PHONENUMBER": 11,
+ "GD_POSTALADDRESS": 12,
+ "GD_RATING": 13,
+ "BLOBKEY": 17,
+ "ENTITY_PROTO": 19,
+ "INDEX_VALUE": 18,
+}
+
+func (x Property_Meaning) Enum() *Property_Meaning {
+ p := new(Property_Meaning)
+ *p = x
+ return p
+}
+func (x Property_Meaning) String() string {
+ return proto.EnumName(Property_Meaning_name, int32(x))
+}
+func (x *Property_Meaning) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning")
+ if err != nil {
+ return err
+ }
+ *x = Property_Meaning(value)
+ return nil
+}
+func (Property_Meaning) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 0}
+}
+
+type Property_FtsTokenizationOption int32
+
+const (
+ Property_HTML Property_FtsTokenizationOption = 1
+ Property_ATOM Property_FtsTokenizationOption = 2
+)
+
+var Property_FtsTokenizationOption_name = map[int32]string{
+ 1: "HTML",
+ 2: "ATOM",
+}
+var Property_FtsTokenizationOption_value = map[string]int32{
+ "HTML": 1,
+ "ATOM": 2,
+}
+
+func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption {
+ p := new(Property_FtsTokenizationOption)
+ *p = x
+ return p
+}
+func (x Property_FtsTokenizationOption) String() string {
+ return proto.EnumName(Property_FtsTokenizationOption_name, int32(x))
+}
+func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption")
+ if err != nil {
+ return err
+ }
+ *x = Property_FtsTokenizationOption(value)
+ return nil
+}
+func (Property_FtsTokenizationOption) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 1}
+}
+
+type EntityProto_Kind int32
+
+const (
+ EntityProto_GD_CONTACT EntityProto_Kind = 1
+ EntityProto_GD_EVENT EntityProto_Kind = 2
+ EntityProto_GD_MESSAGE EntityProto_Kind = 3
+)
+
+var EntityProto_Kind_name = map[int32]string{
+ 1: "GD_CONTACT",
+ 2: "GD_EVENT",
+ 3: "GD_MESSAGE",
+}
+var EntityProto_Kind_value = map[string]int32{
+ "GD_CONTACT": 1,
+ "GD_EVENT": 2,
+ "GD_MESSAGE": 3,
+}
+
+func (x EntityProto_Kind) Enum() *EntityProto_Kind {
+ p := new(EntityProto_Kind)
+ *p = x
+ return p
+}
+func (x EntityProto_Kind) String() string {
+ return proto.EnumName(EntityProto_Kind_name, int32(x))
+}
+func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind")
+ if err != nil {
+ return err
+ }
+ *x = EntityProto_Kind(value)
+ return nil
+}
+func (EntityProto_Kind) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6, 0}
+}
+
+type Index_Property_Direction int32
+
+const (
+ Index_Property_ASCENDING Index_Property_Direction = 1
+ Index_Property_DESCENDING Index_Property_Direction = 2
+)
+
+var Index_Property_Direction_name = map[int32]string{
+ 1: "ASCENDING",
+ 2: "DESCENDING",
+}
+var Index_Property_Direction_value = map[string]int32{
+ "ASCENDING": 1,
+ "DESCENDING": 2,
+}
+
+func (x Index_Property_Direction) Enum() *Index_Property_Direction {
+ p := new(Index_Property_Direction)
+ *p = x
+ return p
+}
+func (x Index_Property_Direction) String() string {
+ return proto.EnumName(Index_Property_Direction_name, int32(x))
+}
+func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction")
+ if err != nil {
+ return err
+ }
+ *x = Index_Property_Direction(value)
+ return nil
+}
+func (Index_Property_Direction) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0, 0}
+}
+
+type CompositeIndex_State int32
+
+const (
+ CompositeIndex_WRITE_ONLY CompositeIndex_State = 1
+ CompositeIndex_READ_WRITE CompositeIndex_State = 2
+ CompositeIndex_DELETED CompositeIndex_State = 3
+ CompositeIndex_ERROR CompositeIndex_State = 4
+)
+
+var CompositeIndex_State_name = map[int32]string{
+ 1: "WRITE_ONLY",
+ 2: "READ_WRITE",
+ 3: "DELETED",
+ 4: "ERROR",
+}
+var CompositeIndex_State_value = map[string]int32{
+ "WRITE_ONLY": 1,
+ "READ_WRITE": 2,
+ "DELETED": 3,
+ "ERROR": 4,
+}
+
+func (x CompositeIndex_State) Enum() *CompositeIndex_State {
+ p := new(CompositeIndex_State)
+ *p = x
+ return p
+}
+func (x CompositeIndex_State) String() string {
+ return proto.EnumName(CompositeIndex_State_name, int32(x))
+}
+func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State")
+ if err != nil {
+ return err
+ }
+ *x = CompositeIndex_State(value)
+ return nil
+}
+func (CompositeIndex_State) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9, 0}
+}
+
+type Snapshot_Status int32
+
+const (
+ Snapshot_INACTIVE Snapshot_Status = 0
+ Snapshot_ACTIVE Snapshot_Status = 1
+)
+
+var Snapshot_Status_name = map[int32]string{
+ 0: "INACTIVE",
+ 1: "ACTIVE",
+}
+var Snapshot_Status_value = map[string]int32{
+ "INACTIVE": 0,
+ "ACTIVE": 1,
+}
+
+func (x Snapshot_Status) Enum() *Snapshot_Status {
+ p := new(Snapshot_Status)
+ *p = x
+ return p
+}
+func (x Snapshot_Status) String() string {
+ return proto.EnumName(Snapshot_Status_name, int32(x))
+}
+func (x *Snapshot_Status) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status")
+ if err != nil {
+ return err
+ }
+ *x = Snapshot_Status(value)
+ return nil
+}
+func (Snapshot_Status) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12, 0}
+}
+
+type Query_Hint int32
+
+const (
+ Query_ORDER_FIRST Query_Hint = 1
+ Query_ANCESTOR_FIRST Query_Hint = 2
+ Query_FILTER_FIRST Query_Hint = 3
+)
+
+var Query_Hint_name = map[int32]string{
+ 1: "ORDER_FIRST",
+ 2: "ANCESTOR_FIRST",
+ 3: "FILTER_FIRST",
+}
+var Query_Hint_value = map[string]int32{
+ "ORDER_FIRST": 1,
+ "ANCESTOR_FIRST": 2,
+ "FILTER_FIRST": 3,
+}
+
+func (x Query_Hint) Enum() *Query_Hint {
+ p := new(Query_Hint)
+ *p = x
+ return p
+}
+func (x Query_Hint) String() string {
+ return proto.EnumName(Query_Hint_name, int32(x))
+}
+func (x *Query_Hint) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint")
+ if err != nil {
+ return err
+ }
+ *x = Query_Hint(value)
+ return nil
+}
+func (Query_Hint) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0}
+}
+
+type Query_Filter_Operator int32
+
+const (
+ Query_Filter_LESS_THAN Query_Filter_Operator = 1
+ Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2
+ Query_Filter_GREATER_THAN Query_Filter_Operator = 3
+ Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4
+ Query_Filter_EQUAL Query_Filter_Operator = 5
+ Query_Filter_IN Query_Filter_Operator = 6
+ Query_Filter_EXISTS Query_Filter_Operator = 7
+)
+
+var Query_Filter_Operator_name = map[int32]string{
+ 1: "LESS_THAN",
+ 2: "LESS_THAN_OR_EQUAL",
+ 3: "GREATER_THAN",
+ 4: "GREATER_THAN_OR_EQUAL",
+ 5: "EQUAL",
+ 6: "IN",
+ 7: "EXISTS",
+}
+var Query_Filter_Operator_value = map[string]int32{
+ "LESS_THAN": 1,
+ "LESS_THAN_OR_EQUAL": 2,
+ "GREATER_THAN": 3,
+ "GREATER_THAN_OR_EQUAL": 4,
+ "EQUAL": 5,
+ "IN": 6,
+ "EXISTS": 7,
+}
+
+func (x Query_Filter_Operator) Enum() *Query_Filter_Operator {
+ p := new(Query_Filter_Operator)
+ *p = x
+ return p
+}
+func (x Query_Filter_Operator) String() string {
+ return proto.EnumName(Query_Filter_Operator_name, int32(x))
+}
+func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator")
+ if err != nil {
+ return err
+ }
+ *x = Query_Filter_Operator(value)
+ return nil
+}
+func (Query_Filter_Operator) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0, 0}
+}
+
+type Query_Order_Direction int32
+
+const (
+ Query_Order_ASCENDING Query_Order_Direction = 1
+ Query_Order_DESCENDING Query_Order_Direction = 2
+)
+
+var Query_Order_Direction_name = map[int32]string{
+ 1: "ASCENDING",
+ 2: "DESCENDING",
+}
+var Query_Order_Direction_value = map[string]int32{
+ "ASCENDING": 1,
+ "DESCENDING": 2,
+}
+
+func (x Query_Order_Direction) Enum() *Query_Order_Direction {
+ p := new(Query_Order_Direction)
+ *p = x
+ return p
+}
+func (x Query_Order_Direction) String() string {
+ return proto.EnumName(Query_Order_Direction_name, int32(x))
+}
+func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction")
+ if err != nil {
+ return err
+ }
+ *x = Query_Order_Direction(value)
+ return nil
+}
+func (Query_Order_Direction) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1, 0}
+}
+
+type Error_ErrorCode int32
+
+const (
+ Error_BAD_REQUEST Error_ErrorCode = 1
+ Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2
+ Error_INTERNAL_ERROR Error_ErrorCode = 3
+ Error_NEED_INDEX Error_ErrorCode = 4
+ Error_TIMEOUT Error_ErrorCode = 5
+ Error_PERMISSION_DENIED Error_ErrorCode = 6
+ Error_BIGTABLE_ERROR Error_ErrorCode = 7
+ Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8
+ Error_CAPABILITY_DISABLED Error_ErrorCode = 9
+ Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10
+ Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11
+)
+
+var Error_ErrorCode_name = map[int32]string{
+ 1: "BAD_REQUEST",
+ 2: "CONCURRENT_TRANSACTION",
+ 3: "INTERNAL_ERROR",
+ 4: "NEED_INDEX",
+ 5: "TIMEOUT",
+ 6: "PERMISSION_DENIED",
+ 7: "BIGTABLE_ERROR",
+ 8: "COMMITTED_BUT_STILL_APPLYING",
+ 9: "CAPABILITY_DISABLED",
+ 10: "TRY_ALTERNATE_BACKEND",
+ 11: "SAFE_TIME_TOO_OLD",
+}
+var Error_ErrorCode_value = map[string]int32{
+ "BAD_REQUEST": 1,
+ "CONCURRENT_TRANSACTION": 2,
+ "INTERNAL_ERROR": 3,
+ "NEED_INDEX": 4,
+ "TIMEOUT": 5,
+ "PERMISSION_DENIED": 6,
+ "BIGTABLE_ERROR": 7,
+ "COMMITTED_BUT_STILL_APPLYING": 8,
+ "CAPABILITY_DISABLED": 9,
+ "TRY_ALTERNATE_BACKEND": 10,
+ "SAFE_TIME_TOO_OLD": 11,
+}
+
+func (x Error_ErrorCode) Enum() *Error_ErrorCode {
+ p := new(Error_ErrorCode)
+ *p = x
+ return p
+}
+func (x Error_ErrorCode) String() string {
+ return proto.EnumName(Error_ErrorCode_name, int32(x))
+}
+func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = Error_ErrorCode(value)
+ return nil
+}
+func (Error_ErrorCode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19, 0}
+}
+
+type PutRequest_AutoIdPolicy int32
+
+const (
+ PutRequest_CURRENT PutRequest_AutoIdPolicy = 0
+ PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1
+)
+
+var PutRequest_AutoIdPolicy_name = map[int32]string{
+ 0: "CURRENT",
+ 1: "SEQUENTIAL",
+}
+var PutRequest_AutoIdPolicy_value = map[string]int32{
+ "CURRENT": 0,
+ "SEQUENTIAL": 1,
+}
+
+func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy {
+ p := new(PutRequest_AutoIdPolicy)
+ *p = x
+ return p
+}
+func (x PutRequest_AutoIdPolicy) String() string {
+ return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x))
+}
+func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy")
+ if err != nil {
+ return err
+ }
+ *x = PutRequest_AutoIdPolicy(value)
+ return nil
+}
+func (PutRequest_AutoIdPolicy) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23, 0}
+}
+
+type BeginTransactionRequest_TransactionMode int32
+
+const (
+ BeginTransactionRequest_UNKNOWN BeginTransactionRequest_TransactionMode = 0
+ BeginTransactionRequest_READ_ONLY BeginTransactionRequest_TransactionMode = 1
+ BeginTransactionRequest_READ_WRITE BeginTransactionRequest_TransactionMode = 2
+)
+
+var BeginTransactionRequest_TransactionMode_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "READ_ONLY",
+ 2: "READ_WRITE",
+}
+var BeginTransactionRequest_TransactionMode_value = map[string]int32{
+ "UNKNOWN": 0,
+ "READ_ONLY": 1,
+ "READ_WRITE": 2,
+}
+
+func (x BeginTransactionRequest_TransactionMode) Enum() *BeginTransactionRequest_TransactionMode {
+ p := new(BeginTransactionRequest_TransactionMode)
+ *p = x
+ return p
+}
+func (x BeginTransactionRequest_TransactionMode) String() string {
+ return proto.EnumName(BeginTransactionRequest_TransactionMode_name, int32(x))
+}
+func (x *BeginTransactionRequest_TransactionMode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_TransactionMode_value, data, "BeginTransactionRequest_TransactionMode")
+ if err != nil {
+ return err
+ }
+ *x = BeginTransactionRequest_TransactionMode(value)
+ return nil
+}
+func (BeginTransactionRequest_TransactionMode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36, 0}
+}
+
+type Action struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Action) Reset() { *m = Action{} }
+func (m *Action) String() string { return proto.CompactTextString(m) }
+func (*Action) ProtoMessage() {}
+func (*Action) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{0}
+}
+func (m *Action) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Action.Unmarshal(m, b)
+}
+func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Action.Marshal(b, m, deterministic)
+}
+func (dst *Action) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Action.Merge(dst, src)
+}
+func (m *Action) XXX_Size() int {
+ return xxx_messageInfo_Action.Size(m)
+}
+func (m *Action) XXX_DiscardUnknown() {
+ xxx_messageInfo_Action.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Action proto.InternalMessageInfo
+
+type PropertyValue struct {
+ Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"`
+ BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"`
+ StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"`
+ DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"`
+ Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue,json=pointvalue" json:"pointvalue,omitempty"`
+ Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue,json=uservalue" json:"uservalue,omitempty"`
+ Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue,json=referencevalue" json:"referencevalue,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PropertyValue) Reset() { *m = PropertyValue{} }
+func (m *PropertyValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue) ProtoMessage() {}
+func (*PropertyValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1}
+}
+func (m *PropertyValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PropertyValue.Unmarshal(m, b)
+}
+func (m *PropertyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PropertyValue.Marshal(b, m, deterministic)
+}
+func (dst *PropertyValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PropertyValue.Merge(dst, src)
+}
+func (m *PropertyValue) XXX_Size() int {
+ return xxx_messageInfo_PropertyValue.Size(m)
+}
+func (m *PropertyValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_PropertyValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PropertyValue proto.InternalMessageInfo
+
+func (m *PropertyValue) GetInt64Value() int64 {
+ if m != nil && m.Int64Value != nil {
+ return *m.Int64Value
+ }
+ return 0
+}
+
+func (m *PropertyValue) GetBooleanValue() bool {
+ if m != nil && m.BooleanValue != nil {
+ return *m.BooleanValue
+ }
+ return false
+}
+
+func (m *PropertyValue) GetStringValue() string {
+ if m != nil && m.StringValue != nil {
+ return *m.StringValue
+ }
+ return ""
+}
+
+func (m *PropertyValue) GetDoubleValue() float64 {
+ if m != nil && m.DoubleValue != nil {
+ return *m.DoubleValue
+ }
+ return 0
+}
+
+func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue {
+ if m != nil {
+ return m.Pointvalue
+ }
+ return nil
+}
+
+func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue {
+ if m != nil {
+ return m.Uservalue
+ }
+ return nil
+}
+
+func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue {
+ if m != nil {
+ return m.Referencevalue
+ }
+ return nil
+}
+
+type PropertyValue_PointValue struct {
+ X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"`
+ Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} }
+func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_PointValue) ProtoMessage() {}
+func (*PropertyValue_PointValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 0}
+}
+func (m *PropertyValue_PointValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PropertyValue_PointValue.Unmarshal(m, b)
+}
+func (m *PropertyValue_PointValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PropertyValue_PointValue.Marshal(b, m, deterministic)
+}
+func (dst *PropertyValue_PointValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PropertyValue_PointValue.Merge(dst, src)
+}
+func (m *PropertyValue_PointValue) XXX_Size() int {
+ return xxx_messageInfo_PropertyValue_PointValue.Size(m)
+}
+func (m *PropertyValue_PointValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_PropertyValue_PointValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PropertyValue_PointValue proto.InternalMessageInfo
+
+func (m *PropertyValue_PointValue) GetX() float64 {
+ if m != nil && m.X != nil {
+ return *m.X
+ }
+ return 0
+}
+
+func (m *PropertyValue_PointValue) GetY() float64 {
+ if m != nil && m.Y != nil {
+ return *m.Y
+ }
+ return 0
+}
+
+type PropertyValue_UserValue struct {
+ Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"`
+ AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"`
+ Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"`
+ FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} }
+func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_UserValue) ProtoMessage() {}
+func (*PropertyValue_UserValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 1}
+}
+func (m *PropertyValue_UserValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PropertyValue_UserValue.Unmarshal(m, b)
+}
+func (m *PropertyValue_UserValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PropertyValue_UserValue.Marshal(b, m, deterministic)
+}
+func (dst *PropertyValue_UserValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PropertyValue_UserValue.Merge(dst, src)
+}
+func (m *PropertyValue_UserValue) XXX_Size() int {
+ return xxx_messageInfo_PropertyValue_UserValue.Size(m)
+}
+func (m *PropertyValue_UserValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_PropertyValue_UserValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PropertyValue_UserValue proto.InternalMessageInfo
+
+func (m *PropertyValue_UserValue) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetFederatedProvider() string {
+ if m != nil && m.FederatedProvider != nil {
+ return *m.FederatedProvider
+ }
+ return ""
+}
+
+type PropertyValue_ReferenceValue struct {
+ App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
+ Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement,json=pathelement" json:"pathelement,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} }
+func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue) ProtoMessage() {}
+func (*PropertyValue_ReferenceValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2}
+}
+func (m *PropertyValue_ReferenceValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PropertyValue_ReferenceValue.Unmarshal(m, b)
+}
+func (m *PropertyValue_ReferenceValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PropertyValue_ReferenceValue.Marshal(b, m, deterministic)
+}
+func (dst *PropertyValue_ReferenceValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PropertyValue_ReferenceValue.Merge(dst, src)
+}
+func (m *PropertyValue_ReferenceValue) XXX_Size() int {
+ return xxx_messageInfo_PropertyValue_ReferenceValue.Size(m)
+}
+func (m *PropertyValue_ReferenceValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_PropertyValue_ReferenceValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PropertyValue_ReferenceValue proto.InternalMessageInfo
+
+func (m *PropertyValue_ReferenceValue) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement {
+ if m != nil {
+ return m.Pathelement
+ }
+ return nil
+}
+
+type PropertyValue_ReferenceValue_PathElement struct {
+ Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"`
+ Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"`
+ Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) Reset() {
+ *m = PropertyValue_ReferenceValue_PathElement{}
+}
+func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {}
+func (*PropertyValue_ReferenceValue_PathElement) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2, 0}
+}
+func (m *PropertyValue_ReferenceValue_PathElement) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Unmarshal(m, b)
+}
+func (m *PropertyValue_ReferenceValue_PathElement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Marshal(b, m, deterministic)
+}
+func (dst *PropertyValue_ReferenceValue_PathElement) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Merge(dst, src)
+}
+func (m *PropertyValue_ReferenceValue_PathElement) XXX_Size() int {
+ return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Size(m)
+}
+func (m *PropertyValue_ReferenceValue_PathElement) XXX_DiscardUnknown() {
+ xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PropertyValue_ReferenceValue_PathElement proto.InternalMessageInfo
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+type Property struct {
+ Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"`
+ MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri,json=meaningUri" json:"meaning_uri,omitempty"`
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"`
+ Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"`
+ Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"`
+ FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,json=ftsTokenizationOption,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"`
+ Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Property) Reset() { *m = Property{} }
+func (m *Property) String() string { return proto.CompactTextString(m) }
+func (*Property) ProtoMessage() {}
+func (*Property) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2}
+}
+func (m *Property) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Property.Unmarshal(m, b)
+}
+func (m *Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Property.Marshal(b, m, deterministic)
+}
+func (dst *Property) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Property.Merge(dst, src)
+}
+func (m *Property) XXX_Size() int {
+ return xxx_messageInfo_Property.Size(m)
+}
+func (m *Property) XXX_DiscardUnknown() {
+ xxx_messageInfo_Property.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Property proto.InternalMessageInfo
+
+const Default_Property_Meaning Property_Meaning = Property_NO_MEANING
+const Default_Property_Searchable bool = false
+const Default_Property_Locale string = "en"
+
+func (m *Property) GetMeaning() Property_Meaning {
+ if m != nil && m.Meaning != nil {
+ return *m.Meaning
+ }
+ return Default_Property_Meaning
+}
+
+func (m *Property) GetMeaningUri() string {
+ if m != nil && m.MeaningUri != nil {
+ return *m.MeaningUri
+ }
+ return ""
+}
+
+func (m *Property) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Property) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *Property) GetMultiple() bool {
+ if m != nil && m.Multiple != nil {
+ return *m.Multiple
+ }
+ return false
+}
+
+func (m *Property) GetSearchable() bool {
+ if m != nil && m.Searchable != nil {
+ return *m.Searchable
+ }
+ return Default_Property_Searchable
+}
+
+func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption {
+ if m != nil && m.FtsTokenizationOption != nil {
+ return *m.FtsTokenizationOption
+ }
+ return Property_HTML
+}
+
+func (m *Property) GetLocale() string {
+ if m != nil && m.Locale != nil {
+ return *m.Locale
+ }
+ return Default_Property_Locale
+}
+
+type Path struct {
+ Element []*Path_Element `protobuf:"group,1,rep,name=Element,json=element" json:"element,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Path) Reset() { *m = Path{} }
+func (m *Path) String() string { return proto.CompactTextString(m) }
+func (*Path) ProtoMessage() {}
+func (*Path) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3}
+}
+func (m *Path) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Path.Unmarshal(m, b)
+}
+func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Path.Marshal(b, m, deterministic)
+}
+func (dst *Path) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Path.Merge(dst, src)
+}
+func (m *Path) XXX_Size() int {
+ return xxx_messageInfo_Path.Size(m)
+}
+func (m *Path) XXX_DiscardUnknown() {
+ xxx_messageInfo_Path.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Path proto.InternalMessageInfo
+
+func (m *Path) GetElement() []*Path_Element {
+ if m != nil {
+ return m.Element
+ }
+ return nil
+}
+
+type Path_Element struct {
+ Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"`
+ Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"`
+ Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Path_Element) Reset() { *m = Path_Element{} }
+func (m *Path_Element) String() string { return proto.CompactTextString(m) }
+func (*Path_Element) ProtoMessage() {}
+func (*Path_Element) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3, 0}
+}
+func (m *Path_Element) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Path_Element.Unmarshal(m, b)
+}
+func (m *Path_Element) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Path_Element.Marshal(b, m, deterministic)
+}
+func (dst *Path_Element) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Path_Element.Merge(dst, src)
+}
+func (m *Path_Element) XXX_Size() int {
+ return xxx_messageInfo_Path_Element.Size(m)
+}
+func (m *Path_Element) XXX_DiscardUnknown() {
+ xxx_messageInfo_Path_Element.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Path_Element proto.InternalMessageInfo
+
+func (m *Path_Element) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *Path_Element) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *Path_Element) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+type Reference struct {
+ App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
+ Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Reference) Reset() { *m = Reference{} }
+func (m *Reference) String() string { return proto.CompactTextString(m) }
+func (*Reference) ProtoMessage() {}
+func (*Reference) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{4}
+}
+func (m *Reference) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Reference.Unmarshal(m, b)
+}
+func (m *Reference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Reference.Marshal(b, m, deterministic)
+}
+func (dst *Reference) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Reference.Merge(dst, src)
+}
+func (m *Reference) XXX_Size() int {
+ return xxx_messageInfo_Reference.Size(m)
+}
+func (m *Reference) XXX_DiscardUnknown() {
+ xxx_messageInfo_Reference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Reference proto.InternalMessageInfo
+
+func (m *Reference) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Reference) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *Reference) GetPath() *Path {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+type User struct {
+ Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
+ AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"`
+ Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"`
+ FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *User) Reset() { *m = User{} }
+func (m *User) String() string { return proto.CompactTextString(m) }
+func (*User) ProtoMessage() {}
+func (*User) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{5}
+}
+func (m *User) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_User.Unmarshal(m, b)
+}
+func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_User.Marshal(b, m, deterministic)
+}
+func (dst *User) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_User.Merge(dst, src)
+}
+func (m *User) XXX_Size() int {
+ return xxx_messageInfo_User.Size(m)
+}
+func (m *User) XXX_DiscardUnknown() {
+ xxx_messageInfo_User.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_User proto.InternalMessageInfo
+
+func (m *User) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *User) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *User) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *User) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+func (m *User) GetFederatedProvider() string {
+ if m != nil && m.FederatedProvider != nil {
+ return *m.FederatedProvider
+ }
+ return ""
+}
+
+type EntityProto struct {
+ Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"`
+ EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group,json=entityGroup" json:"entity_group,omitempty"`
+ Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"`
+ Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"`
+ KindUri *string `protobuf:"bytes,5,opt,name=kind_uri,json=kindUri" json:"kind_uri,omitempty"`
+ Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+ RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property,json=rawProperty" json:"raw_property,omitempty"`
+ Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *EntityProto) Reset() { *m = EntityProto{} }
+func (m *EntityProto) String() string { return proto.CompactTextString(m) }
+func (*EntityProto) ProtoMessage() {}
+func (*EntityProto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6}
+}
+func (m *EntityProto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_EntityProto.Unmarshal(m, b)
+}
+func (m *EntityProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_EntityProto.Marshal(b, m, deterministic)
+}
+func (dst *EntityProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EntityProto.Merge(dst, src)
+}
+func (m *EntityProto) XXX_Size() int {
+ return xxx_messageInfo_EntityProto.Size(m)
+}
+func (m *EntityProto) XXX_DiscardUnknown() {
+ xxx_messageInfo_EntityProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EntityProto proto.InternalMessageInfo
+
+func (m *EntityProto) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *EntityProto) GetEntityGroup() *Path {
+ if m != nil {
+ return m.EntityGroup
+ }
+ return nil
+}
+
+func (m *EntityProto) GetOwner() *User {
+ if m != nil {
+ return m.Owner
+ }
+ return nil
+}
+
+func (m *EntityProto) GetKind() EntityProto_Kind {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return EntityProto_GD_CONTACT
+}
+
+func (m *EntityProto) GetKindUri() string {
+ if m != nil && m.KindUri != nil {
+ return *m.KindUri
+ }
+ return ""
+}
+
+func (m *EntityProto) GetProperty() []*Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+func (m *EntityProto) GetRawProperty() []*Property {
+ if m != nil {
+ return m.RawProperty
+ }
+ return nil
+}
+
+func (m *EntityProto) GetRank() int32 {
+ if m != nil && m.Rank != nil {
+ return *m.Rank
+ }
+ return 0
+}
+
+type CompositeProperty struct {
+ IndexId *int64 `protobuf:"varint,1,req,name=index_id,json=indexId" json:"index_id,omitempty"`
+ Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompositeProperty) Reset() { *m = CompositeProperty{} }
+func (m *CompositeProperty) String() string { return proto.CompactTextString(m) }
+func (*CompositeProperty) ProtoMessage() {}
+func (*CompositeProperty) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{7}
+}
+func (m *CompositeProperty) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompositeProperty.Unmarshal(m, b)
+}
+func (m *CompositeProperty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompositeProperty.Marshal(b, m, deterministic)
+}
+func (dst *CompositeProperty) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompositeProperty.Merge(dst, src)
+}
+func (m *CompositeProperty) XXX_Size() int {
+ return xxx_messageInfo_CompositeProperty.Size(m)
+}
+func (m *CompositeProperty) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompositeProperty.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompositeProperty proto.InternalMessageInfo
+
+func (m *CompositeProperty) GetIndexId() int64 {
+ if m != nil && m.IndexId != nil {
+ return *m.IndexId
+ }
+ return 0
+}
+
+func (m *CompositeProperty) GetValue() []string {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Index struct {
+ EntityType *string `protobuf:"bytes,1,req,name=entity_type,json=entityType" json:"entity_type,omitempty"`
+ Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"`
+ Property []*Index_Property `protobuf:"group,2,rep,name=Property,json=property" json:"property,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Index) Reset() { *m = Index{} }
+func (m *Index) String() string { return proto.CompactTextString(m) }
+func (*Index) ProtoMessage() {}
+func (*Index) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8}
+}
+func (m *Index) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Index.Unmarshal(m, b)
+}
+func (m *Index) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Index.Marshal(b, m, deterministic)
+}
+func (dst *Index) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Index.Merge(dst, src)
+}
+func (m *Index) XXX_Size() int {
+ return xxx_messageInfo_Index.Size(m)
+}
+func (m *Index) XXX_DiscardUnknown() {
+ xxx_messageInfo_Index.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Index proto.InternalMessageInfo
+
+func (m *Index) GetEntityType() string {
+ if m != nil && m.EntityType != nil {
+ return *m.EntityType
+ }
+ return ""
+}
+
+func (m *Index) GetAncestor() bool {
+ if m != nil && m.Ancestor != nil {
+ return *m.Ancestor
+ }
+ return false
+}
+
+func (m *Index) GetProperty() []*Index_Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+type Index_Property struct {
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Index_Property) Reset() { *m = Index_Property{} }
+func (m *Index_Property) String() string { return proto.CompactTextString(m) }
+func (*Index_Property) ProtoMessage() {}
+func (*Index_Property) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0}
+}
+func (m *Index_Property) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Index_Property.Unmarshal(m, b)
+}
+func (m *Index_Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Index_Property.Marshal(b, m, deterministic)
+}
+func (dst *Index_Property) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Index_Property.Merge(dst, src)
+}
+func (m *Index_Property) XXX_Size() int {
+ return xxx_messageInfo_Index_Property.Size(m)
+}
+func (m *Index_Property) XXX_DiscardUnknown() {
+ xxx_messageInfo_Index_Property.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Index_Property proto.InternalMessageInfo
+
+const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING
+
+func (m *Index_Property) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Index_Property) GetDirection() Index_Property_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_Index_Property_Direction
+}
+
+type CompositeIndex struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
+ Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
+ Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"`
+ State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"`
+ OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,json=onlyUseIfRequired,def=0" json:"only_use_if_required,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompositeIndex) Reset() { *m = CompositeIndex{} }
+func (m *CompositeIndex) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndex) ProtoMessage() {}
+func (*CompositeIndex) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9}
+}
+func (m *CompositeIndex) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompositeIndex.Unmarshal(m, b)
+}
+func (m *CompositeIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompositeIndex.Marshal(b, m, deterministic)
+}
+func (dst *CompositeIndex) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompositeIndex.Merge(dst, src)
+}
+func (m *CompositeIndex) XXX_Size() int {
+ return xxx_messageInfo_CompositeIndex.Size(m)
+}
+func (m *CompositeIndex) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompositeIndex.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompositeIndex proto.InternalMessageInfo
+
+const Default_CompositeIndex_OnlyUseIfRequired bool = false
+
+func (m *CompositeIndex) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *CompositeIndex) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *CompositeIndex) GetDefinition() *Index {
+ if m != nil {
+ return m.Definition
+ }
+ return nil
+}
+
+func (m *CompositeIndex) GetState() CompositeIndex_State {
+ if m != nil && m.State != nil {
+ return *m.State
+ }
+ return CompositeIndex_WRITE_ONLY
+}
+
+func (m *CompositeIndex) GetOnlyUseIfRequired() bool {
+ if m != nil && m.OnlyUseIfRequired != nil {
+ return *m.OnlyUseIfRequired
+ }
+ return Default_CompositeIndex_OnlyUseIfRequired
+}
+
+type IndexPostfix struct {
+ IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value,json=indexValue" json:"index_value,omitempty"`
+ Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
+ Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *IndexPostfix) Reset() { *m = IndexPostfix{} }
+func (m *IndexPostfix) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix) ProtoMessage() {}
+func (*IndexPostfix) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10}
+}
+func (m *IndexPostfix) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_IndexPostfix.Unmarshal(m, b)
+}
+func (m *IndexPostfix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_IndexPostfix.Marshal(b, m, deterministic)
+}
+func (dst *IndexPostfix) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IndexPostfix.Merge(dst, src)
+}
+func (m *IndexPostfix) XXX_Size() int {
+ return xxx_messageInfo_IndexPostfix.Size(m)
+}
+func (m *IndexPostfix) XXX_DiscardUnknown() {
+ xxx_messageInfo_IndexPostfix.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IndexPostfix proto.InternalMessageInfo
+
+const Default_IndexPostfix_Before bool = true
+
+func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue {
+ if m != nil {
+ return m.IndexValue
+ }
+ return nil
+}
+
+func (m *IndexPostfix) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *IndexPostfix) GetBefore() bool {
+ if m != nil && m.Before != nil {
+ return *m.Before
+ }
+ return Default_IndexPostfix_Before
+}
+
+type IndexPostfix_IndexValue struct {
+ PropertyName *string `protobuf:"bytes,1,req,name=property_name,json=propertyName" json:"property_name,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} }
+func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix_IndexValue) ProtoMessage() {}
+func (*IndexPostfix_IndexValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10, 0}
+}
+func (m *IndexPostfix_IndexValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_IndexPostfix_IndexValue.Unmarshal(m, b)
+}
+func (m *IndexPostfix_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_IndexPostfix_IndexValue.Marshal(b, m, deterministic)
+}
+func (dst *IndexPostfix_IndexValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IndexPostfix_IndexValue.Merge(dst, src)
+}
+func (m *IndexPostfix_IndexValue) XXX_Size() int {
+ return xxx_messageInfo_IndexPostfix_IndexValue.Size(m)
+}
+func (m *IndexPostfix_IndexValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_IndexPostfix_IndexValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IndexPostfix_IndexValue proto.InternalMessageInfo
+
+func (m *IndexPostfix_IndexValue) GetPropertyName() string {
+ if m != nil && m.PropertyName != nil {
+ return *m.PropertyName
+ }
+ return ""
+}
+
+func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type IndexPosition struct {
+ Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
+ Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *IndexPosition) Reset() { *m = IndexPosition{} }
+func (m *IndexPosition) String() string { return proto.CompactTextString(m) }
+func (*IndexPosition) ProtoMessage() {}
+func (*IndexPosition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{11}
+}
+func (m *IndexPosition) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_IndexPosition.Unmarshal(m, b)
+}
+func (m *IndexPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_IndexPosition.Marshal(b, m, deterministic)
+}
+func (dst *IndexPosition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IndexPosition.Merge(dst, src)
+}
+func (m *IndexPosition) XXX_Size() int {
+ return xxx_messageInfo_IndexPosition.Size(m)
+}
+func (m *IndexPosition) XXX_DiscardUnknown() {
+ xxx_messageInfo_IndexPosition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IndexPosition proto.InternalMessageInfo
+
+const Default_IndexPosition_Before bool = true
+
+func (m *IndexPosition) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *IndexPosition) GetBefore() bool {
+ if m != nil && m.Before != nil {
+ return *m.Before
+ }
+ return Default_IndexPosition_Before
+}
+
+type Snapshot struct {
+ Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+func (*Snapshot) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12}
+}
+func (m *Snapshot) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Snapshot.Unmarshal(m, b)
+}
+func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
+}
+func (dst *Snapshot) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Snapshot.Merge(dst, src)
+}
+func (m *Snapshot) XXX_Size() int {
+ return xxx_messageInfo_Snapshot.Size(m)
+}
+func (m *Snapshot) XXX_DiscardUnknown() {
+ xxx_messageInfo_Snapshot.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Snapshot proto.InternalMessageInfo
+
+func (m *Snapshot) GetTs() int64 {
+ if m != nil && m.Ts != nil {
+ return *m.Ts
+ }
+ return 0
+}
+
+type InternalHeader struct {
+ Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *InternalHeader) Reset() { *m = InternalHeader{} }
+func (m *InternalHeader) String() string { return proto.CompactTextString(m) }
+func (*InternalHeader) ProtoMessage() {}
+func (*InternalHeader) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{13}
+}
+func (m *InternalHeader) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_InternalHeader.Unmarshal(m, b)
+}
+func (m *InternalHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_InternalHeader.Marshal(b, m, deterministic)
+}
+func (dst *InternalHeader) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_InternalHeader.Merge(dst, src)
+}
+func (m *InternalHeader) XXX_Size() int {
+ return xxx_messageInfo_InternalHeader.Size(m)
+}
+func (m *InternalHeader) XXX_DiscardUnknown() {
+ xxx_messageInfo_InternalHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_InternalHeader proto.InternalMessageInfo
+
+func (m *InternalHeader) GetQos() string {
+ if m != nil && m.Qos != nil {
+ return *m.Qos
+ }
+ return ""
+}
+
+type Transaction struct {
+ Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+ Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"`
+ App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"`
+ MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Transaction) Reset() { *m = Transaction{} }
+func (m *Transaction) String() string { return proto.CompactTextString(m) }
+func (*Transaction) ProtoMessage() {}
+func (*Transaction) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{14}
+}
+func (m *Transaction) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Transaction.Unmarshal(m, b)
+}
+func (m *Transaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Transaction.Marshal(b, m, deterministic)
+}
+func (dst *Transaction) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Transaction.Merge(dst, src)
+}
+func (m *Transaction) XXX_Size() int {
+ return xxx_messageInfo_Transaction.Size(m)
+}
+func (m *Transaction) XXX_DiscardUnknown() {
+ xxx_messageInfo_Transaction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Transaction proto.InternalMessageInfo
+
+const Default_Transaction_MarkChanges bool = false
+
+func (m *Transaction) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *Transaction) GetHandle() uint64 {
+ if m != nil && m.Handle != nil {
+ return *m.Handle
+ }
+ return 0
+}
+
+func (m *Transaction) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Transaction) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_Transaction_MarkChanges
+}
+
+type Query struct {
+ Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"`
+ App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,29,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
+ Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"`
+ Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"`
+ Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter,json=filter" json:"filter,omitempty"`
+ SearchQuery *string `protobuf:"bytes,8,opt,name=search_query,json=searchQuery" json:"search_query,omitempty"`
+ Order []*Query_Order `protobuf:"group,9,rep,name=Order,json=order" json:"order,omitempty"`
+ Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"`
+ Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"`
+ Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"`
+ Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"`
+ CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"`
+ EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor,json=endCompiledCursor" json:"end_compiled_cursor,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
+ RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,json=requirePerfectPlan,def=0" json:"require_perfect_plan,omitempty"`
+ KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,json=keysOnly,def=0" json:"keys_only,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"`
+ Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"`
+ FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"`
+ Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"`
+ PropertyName []string `protobuf:"bytes,33,rep,name=property_name,json=propertyName" json:"property_name,omitempty"`
+ GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name,json=groupByPropertyName" json:"group_by_property_name,omitempty"`
+ Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"`
+ MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds,json=minSafeTimeSeconds" json:"min_safe_time_seconds,omitempty"`
+ SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name,json=safeReplicaName" json:"safe_replica_name,omitempty"`
+ PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,json=persistOffset,def=0" json:"persist_offset,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Query) Reset() { *m = Query{} }
+func (m *Query) String() string { return proto.CompactTextString(m) }
+func (*Query) ProtoMessage() {}
+func (*Query) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15}
+}
+func (m *Query) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Query.Unmarshal(m, b)
+}
+func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Query.Marshal(b, m, deterministic)
+}
+func (dst *Query) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Query.Merge(dst, src)
+}
+func (m *Query) XXX_Size() int {
+ return xxx_messageInfo_Query.Size(m)
+}
+func (m *Query) XXX_DiscardUnknown() {
+ xxx_messageInfo_Query.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Query proto.InternalMessageInfo
+
+const Default_Query_Offset int32 = 0
+const Default_Query_RequirePerfectPlan bool = false
+const Default_Query_KeysOnly bool = false
+const Default_Query_Compile bool = false
+const Default_Query_PersistOffset bool = false
+
+func (m *Query) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *Query) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Query) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *Query) GetKind() string {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return ""
+}
+
+func (m *Query) GetAncestor() *Reference {
+ if m != nil {
+ return m.Ancestor
+ }
+ return nil
+}
+
+func (m *Query) GetFilter() []*Query_Filter {
+ if m != nil {
+ return m.Filter
+ }
+ return nil
+}
+
+func (m *Query) GetSearchQuery() string {
+ if m != nil && m.SearchQuery != nil {
+ return *m.SearchQuery
+ }
+ return ""
+}
+
+func (m *Query) GetOrder() []*Query_Order {
+ if m != nil {
+ return m.Order
+ }
+ return nil
+}
+
+func (m *Query) GetHint() Query_Hint {
+ if m != nil && m.Hint != nil {
+ return *m.Hint
+ }
+ return Query_ORDER_FIRST
+}
+
+func (m *Query) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *Query) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_Query_Offset
+}
+
+func (m *Query) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+func (m *Query) GetCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.CompiledCursor
+ }
+ return nil
+}
+
+func (m *Query) GetEndCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.EndCompiledCursor
+ }
+ return nil
+}
+
+func (m *Query) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *Query) GetRequirePerfectPlan() bool {
+ if m != nil && m.RequirePerfectPlan != nil {
+ return *m.RequirePerfectPlan
+ }
+ return Default_Query_RequirePerfectPlan
+}
+
+func (m *Query) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return Default_Query_KeysOnly
+}
+
+func (m *Query) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *Query) GetCompile() bool {
+ if m != nil && m.Compile != nil {
+ return *m.Compile
+ }
+ return Default_Query_Compile
+}
+
+func (m *Query) GetFailoverMs() int64 {
+ if m != nil && m.FailoverMs != nil {
+ return *m.FailoverMs
+ }
+ return 0
+}
+
+func (m *Query) GetStrong() bool {
+ if m != nil && m.Strong != nil {
+ return *m.Strong
+ }
+ return false
+}
+
+func (m *Query) GetPropertyName() []string {
+ if m != nil {
+ return m.PropertyName
+ }
+ return nil
+}
+
+func (m *Query) GetGroupByPropertyName() []string {
+ if m != nil {
+ return m.GroupByPropertyName
+ }
+ return nil
+}
+
+func (m *Query) GetDistinct() bool {
+ if m != nil && m.Distinct != nil {
+ return *m.Distinct
+ }
+ return false
+}
+
+func (m *Query) GetMinSafeTimeSeconds() int64 {
+ if m != nil && m.MinSafeTimeSeconds != nil {
+ return *m.MinSafeTimeSeconds
+ }
+ return 0
+}
+
+func (m *Query) GetSafeReplicaName() []string {
+ if m != nil {
+ return m.SafeReplicaName
+ }
+ return nil
+}
+
+func (m *Query) GetPersistOffset() bool {
+ if m != nil && m.PersistOffset != nil {
+ return *m.PersistOffset
+ }
+ return Default_Query_PersistOffset
+}
+
+type Query_Filter struct {
+ Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"`
+ Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Query_Filter) Reset() { *m = Query_Filter{} }
+func (m *Query_Filter) String() string { return proto.CompactTextString(m) }
+func (*Query_Filter) ProtoMessage() {}
+func (*Query_Filter) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0}
+}
+func (m *Query_Filter) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Query_Filter.Unmarshal(m, b)
+}
+func (m *Query_Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Query_Filter.Marshal(b, m, deterministic)
+}
+func (dst *Query_Filter) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Query_Filter.Merge(dst, src)
+}
+func (m *Query_Filter) XXX_Size() int {
+ return xxx_messageInfo_Query_Filter.Size(m)
+}
+func (m *Query_Filter) XXX_DiscardUnknown() {
+ xxx_messageInfo_Query_Filter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Query_Filter proto.InternalMessageInfo
+
+func (m *Query_Filter) GetOp() Query_Filter_Operator {
+ if m != nil && m.Op != nil {
+ return *m.Op
+ }
+ return Query_Filter_LESS_THAN
+}
+
+func (m *Query_Filter) GetProperty() []*Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+type Query_Order struct {
+ Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"`
+ Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Query_Order) Reset() { *m = Query_Order{} }
+func (m *Query_Order) String() string { return proto.CompactTextString(m) }
+func (*Query_Order) ProtoMessage() {}
+func (*Query_Order) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1}
+}
+func (m *Query_Order) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Query_Order.Unmarshal(m, b)
+}
+func (m *Query_Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Query_Order.Marshal(b, m, deterministic)
+}
+func (dst *Query_Order) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Query_Order.Merge(dst, src)
+}
+func (m *Query_Order) XXX_Size() int {
+ return xxx_messageInfo_Query_Order.Size(m)
+}
+func (m *Query_Order) XXX_DiscardUnknown() {
+ xxx_messageInfo_Query_Order.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Query_Order proto.InternalMessageInfo
+
+const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING
+
+func (m *Query_Order) GetProperty() string {
+ if m != nil && m.Property != nil {
+ return *m.Property
+ }
+ return ""
+}
+
+func (m *Query_Order) GetDirection() Query_Order_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_Query_Order_Direction
+}
+
+type CompiledQuery struct {
+ Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan,json=primaryscan" json:"primaryscan,omitempty"`
+ Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan,json=mergejoinscan" json:"mergejoinscan,omitempty"`
+ IndexDef *Index `protobuf:"bytes,21,opt,name=index_def,json=indexDef" json:"index_def,omitempty"`
+ Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
+ Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
+ KeysOnly *bool `protobuf:"varint,12,req,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
+ PropertyName []string `protobuf:"bytes,24,rep,name=property_name,json=propertyName" json:"property_name,omitempty"`
+ DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size,json=distinctInfixSize" json:"distinct_infix_size,omitempty"`
+ Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter,json=entityfilter" json:"entityfilter,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompiledQuery) Reset() { *m = CompiledQuery{} }
+func (m *CompiledQuery) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery) ProtoMessage() {}
+func (*CompiledQuery) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16}
+}
+func (m *CompiledQuery) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledQuery.Unmarshal(m, b)
+}
+func (m *CompiledQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledQuery.Marshal(b, m, deterministic)
+}
+func (dst *CompiledQuery) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledQuery.Merge(dst, src)
+}
+func (m *CompiledQuery) XXX_Size() int {
+ return xxx_messageInfo_CompiledQuery.Size(m)
+}
+func (m *CompiledQuery) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledQuery.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledQuery proto.InternalMessageInfo
+
+const Default_CompiledQuery_Offset int32 = 0
+
+func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan {
+ if m != nil {
+ return m.Primaryscan
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan {
+ if m != nil {
+ return m.Mergejoinscan
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetIndexDef() *Index {
+ if m != nil {
+ return m.IndexDef
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_CompiledQuery_Offset
+}
+
+func (m *CompiledQuery) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+func (m *CompiledQuery) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *CompiledQuery) GetPropertyName() []string {
+ if m != nil {
+ return m.PropertyName
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetDistinctInfixSize() int32 {
+ if m != nil && m.DistinctInfixSize != nil {
+ return *m.DistinctInfixSize
+ }
+ return 0
+}
+
+func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter {
+ if m != nil {
+ return m.Entityfilter
+ }
+ return nil
+}
+
+type CompiledQuery_PrimaryScan struct {
+ IndexName *string `protobuf:"bytes,2,opt,name=index_name,json=indexName" json:"index_name,omitempty"`
+ StartKey *string `protobuf:"bytes,3,opt,name=start_key,json=startKey" json:"start_key,omitempty"`
+ StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive,json=startInclusive" json:"start_inclusive,omitempty"`
+ EndKey *string `protobuf:"bytes,5,opt,name=end_key,json=endKey" json:"end_key,omitempty"`
+ EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive,json=endInclusive" json:"end_inclusive,omitempty"`
+ StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value,json=startPostfixValue" json:"start_postfix_value,omitempty"`
+ EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value,json=endPostfixValue" json:"end_postfix_value,omitempty"`
+ EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us,json=endUnappliedLogTimestampUs" json:"end_unapplied_log_timestamp_us,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} }
+func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_PrimaryScan) ProtoMessage() {}
+func (*CompiledQuery_PrimaryScan) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 0}
+}
+func (m *CompiledQuery_PrimaryScan) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledQuery_PrimaryScan.Unmarshal(m, b)
+}
+func (m *CompiledQuery_PrimaryScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledQuery_PrimaryScan.Marshal(b, m, deterministic)
+}
+func (dst *CompiledQuery_PrimaryScan) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledQuery_PrimaryScan.Merge(dst, src)
+}
+func (m *CompiledQuery_PrimaryScan) XXX_Size() int {
+ return xxx_messageInfo_CompiledQuery_PrimaryScan.Size(m)
+}
+func (m *CompiledQuery_PrimaryScan) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledQuery_PrimaryScan.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledQuery_PrimaryScan proto.InternalMessageInfo
+
+func (m *CompiledQuery_PrimaryScan) GetIndexName() string {
+ if m != nil && m.IndexName != nil {
+ return *m.IndexName
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartKey() string {
+ if m != nil && m.StartKey != nil {
+ return *m.StartKey
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool {
+ if m != nil && m.StartInclusive != nil {
+ return *m.StartInclusive
+ }
+ return false
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndKey() string {
+ if m != nil && m.EndKey != nil {
+ return *m.EndKey
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool {
+ if m != nil && m.EndInclusive != nil {
+ return *m.EndInclusive
+ }
+ return false
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string {
+ if m != nil {
+ return m.StartPostfixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string {
+ if m != nil {
+ return m.EndPostfixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 {
+ if m != nil && m.EndUnappliedLogTimestampUs != nil {
+ return *m.EndUnappliedLogTimestampUs
+ }
+ return 0
+}
+
+type CompiledQuery_MergeJoinScan struct {
+ IndexName *string `protobuf:"bytes,8,req,name=index_name,json=indexName" json:"index_name,omitempty"`
+ PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value,json=prefixValue" json:"prefix_value,omitempty"`
+ ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,json=valuePrefix,def=0" json:"value_prefix,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} }
+func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_MergeJoinScan) ProtoMessage() {}
+func (*CompiledQuery_MergeJoinScan) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 1}
+}
+func (m *CompiledQuery_MergeJoinScan) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledQuery_MergeJoinScan.Unmarshal(m, b)
+}
+func (m *CompiledQuery_MergeJoinScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledQuery_MergeJoinScan.Marshal(b, m, deterministic)
+}
+func (dst *CompiledQuery_MergeJoinScan) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledQuery_MergeJoinScan.Merge(dst, src)
+}
+func (m *CompiledQuery_MergeJoinScan) XXX_Size() int {
+ return xxx_messageInfo_CompiledQuery_MergeJoinScan.Size(m)
+}
+func (m *CompiledQuery_MergeJoinScan) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledQuery_MergeJoinScan.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledQuery_MergeJoinScan proto.InternalMessageInfo
+
+const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false
+
+func (m *CompiledQuery_MergeJoinScan) GetIndexName() string {
+ if m != nil && m.IndexName != nil {
+ return *m.IndexName
+ }
+ return ""
+}
+
+func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string {
+ if m != nil {
+ return m.PrefixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool {
+ if m != nil && m.ValuePrefix != nil {
+ return *m.ValuePrefix
+ }
+ return Default_CompiledQuery_MergeJoinScan_ValuePrefix
+}
+
+type CompiledQuery_EntityFilter struct {
+ Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"`
+ Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"`
+ Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} }
+func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_EntityFilter) ProtoMessage() {}
+func (*CompiledQuery_EntityFilter) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 2}
+}
+func (m *CompiledQuery_EntityFilter) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledQuery_EntityFilter.Unmarshal(m, b)
+}
+func (m *CompiledQuery_EntityFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledQuery_EntityFilter.Marshal(b, m, deterministic)
+}
+func (dst *CompiledQuery_EntityFilter) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledQuery_EntityFilter.Merge(dst, src)
+}
+func (m *CompiledQuery_EntityFilter) XXX_Size() int {
+ return xxx_messageInfo_CompiledQuery_EntityFilter.Size(m)
+}
+func (m *CompiledQuery_EntityFilter) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledQuery_EntityFilter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledQuery_EntityFilter proto.InternalMessageInfo
+
+const Default_CompiledQuery_EntityFilter_Distinct bool = false
+
+func (m *CompiledQuery_EntityFilter) GetDistinct() bool {
+ if m != nil && m.Distinct != nil {
+ return *m.Distinct
+ }
+ return Default_CompiledQuery_EntityFilter_Distinct
+}
+
+func (m *CompiledQuery_EntityFilter) GetKind() string {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return ""
+}
+
+func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference {
+ if m != nil {
+ return m.Ancestor
+ }
+ return nil
+}
+
+type CompiledCursor struct {
+ Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position,json=position" json:"position,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompiledCursor) Reset() { *m = CompiledCursor{} }
+func (m *CompiledCursor) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor) ProtoMessage() {}
+func (*CompiledCursor) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17}
+}
+func (m *CompiledCursor) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledCursor.Unmarshal(m, b)
+}
+func (m *CompiledCursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledCursor.Marshal(b, m, deterministic)
+}
+func (dst *CompiledCursor) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledCursor.Merge(dst, src)
+}
+func (m *CompiledCursor) XXX_Size() int {
+ return xxx_messageInfo_CompiledCursor.Size(m)
+}
+func (m *CompiledCursor) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledCursor.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledCursor proto.InternalMessageInfo
+
+func (m *CompiledCursor) GetPosition() *CompiledCursor_Position {
+ if m != nil {
+ return m.Position
+ }
+ return nil
+}
+
+type CompiledCursor_Position struct {
+ StartKey *string `protobuf:"bytes,27,opt,name=start_key,json=startKey" json:"start_key,omitempty"`
+ Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue,json=indexvalue" json:"indexvalue,omitempty"`
+ Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"`
+ StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,json=startInclusive,def=1" json:"start_inclusive,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} }
+func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position) ProtoMessage() {}
+func (*CompiledCursor_Position) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0}
+}
+func (m *CompiledCursor_Position) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledCursor_Position.Unmarshal(m, b)
+}
+func (m *CompiledCursor_Position) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledCursor_Position.Marshal(b, m, deterministic)
+}
+func (dst *CompiledCursor_Position) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledCursor_Position.Merge(dst, src)
+}
+func (m *CompiledCursor_Position) XXX_Size() int {
+ return xxx_messageInfo_CompiledCursor_Position.Size(m)
+}
+func (m *CompiledCursor_Position) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledCursor_Position.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledCursor_Position proto.InternalMessageInfo
+
+const Default_CompiledCursor_Position_StartInclusive bool = true
+
+func (m *CompiledCursor_Position) GetStartKey() string {
+ if m != nil && m.StartKey != nil {
+ return *m.StartKey
+ }
+ return ""
+}
+
+func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue {
+ if m != nil {
+ return m.Indexvalue
+ }
+ return nil
+}
+
+func (m *CompiledCursor_Position) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *CompiledCursor_Position) GetStartInclusive() bool {
+ if m != nil && m.StartInclusive != nil {
+ return *m.StartInclusive
+ }
+ return Default_CompiledCursor_Position_StartInclusive
+}
+
+type CompiledCursor_Position_IndexValue struct {
+ Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} }
+func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position_IndexValue) ProtoMessage() {}
+func (*CompiledCursor_Position_IndexValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0, 0}
+}
+func (m *CompiledCursor_Position_IndexValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledCursor_Position_IndexValue.Unmarshal(m, b)
+}
+func (m *CompiledCursor_Position_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledCursor_Position_IndexValue.Marshal(b, m, deterministic)
+}
+func (dst *CompiledCursor_Position_IndexValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledCursor_Position_IndexValue.Merge(dst, src)
+}
+func (m *CompiledCursor_Position_IndexValue) XXX_Size() int {
+ return xxx_messageInfo_CompiledCursor_Position_IndexValue.Size(m)
+}
+func (m *CompiledCursor_Position_IndexValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledCursor_Position_IndexValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledCursor_Position_IndexValue proto.InternalMessageInfo
+
+func (m *CompiledCursor_Position_IndexValue) GetProperty() string {
+ if m != nil && m.Property != nil {
+ return *m.Property
+ }
+ return ""
+}
+
+func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Cursor struct {
+ Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"`
+ App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Cursor) Reset() { *m = Cursor{} }
+func (m *Cursor) String() string { return proto.CompactTextString(m) }
+func (*Cursor) ProtoMessage() {}
+func (*Cursor) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{18}
+}
+func (m *Cursor) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Cursor.Unmarshal(m, b)
+}
+func (m *Cursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Cursor.Marshal(b, m, deterministic)
+}
+func (dst *Cursor) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Cursor.Merge(dst, src)
+}
+func (m *Cursor) XXX_Size() int {
+ return xxx_messageInfo_Cursor.Size(m)
+}
+func (m *Cursor) XXX_DiscardUnknown() {
+ xxx_messageInfo_Cursor.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Cursor proto.InternalMessageInfo
+
+func (m *Cursor) GetCursor() uint64 {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return 0
+}
+
+func (m *Cursor) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+type Error struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Error) Reset() { *m = Error{} }
+func (m *Error) String() string { return proto.CompactTextString(m) }
+func (*Error) ProtoMessage() {}
+func (*Error) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19}
+}
+func (m *Error) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Error.Unmarshal(m, b)
+}
+func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Error.Marshal(b, m, deterministic)
+}
+func (dst *Error) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Error.Merge(dst, src)
+}
+func (m *Error) XXX_Size() int {
+ return xxx_messageInfo_Error.Size(m)
+}
+func (m *Error) XXX_DiscardUnknown() {
+ xxx_messageInfo_Error.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Error proto.InternalMessageInfo
+
+type Cost struct {
+ IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes,json=indexWrites" json:"index_writes,omitempty"`
+ IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes,json=indexWriteBytes" json:"index_write_bytes,omitempty"`
+ EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes,json=entityWrites" json:"entity_writes,omitempty"`
+ EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes,json=entityWriteBytes" json:"entity_write_bytes,omitempty"`
+ Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost,json=commitcost" json:"commitcost,omitempty"`
+ ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta,json=approximateStorageDelta" json:"approximate_storage_delta,omitempty"`
+ IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates,json=idSequenceUpdates" json:"id_sequence_updates,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Cost) Reset() { *m = Cost{} }
+func (m *Cost) String() string { return proto.CompactTextString(m) }
+func (*Cost) ProtoMessage() {}
+func (*Cost) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20}
+}
+func (m *Cost) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Cost.Unmarshal(m, b)
+}
+func (m *Cost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Cost.Marshal(b, m, deterministic)
+}
+func (dst *Cost) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Cost.Merge(dst, src)
+}
+func (m *Cost) XXX_Size() int {
+ return xxx_messageInfo_Cost.Size(m)
+}
+func (m *Cost) XXX_DiscardUnknown() {
+ xxx_messageInfo_Cost.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Cost proto.InternalMessageInfo
+
+func (m *Cost) GetIndexWrites() int32 {
+ if m != nil && m.IndexWrites != nil {
+ return *m.IndexWrites
+ }
+ return 0
+}
+
+func (m *Cost) GetIndexWriteBytes() int32 {
+ if m != nil && m.IndexWriteBytes != nil {
+ return *m.IndexWriteBytes
+ }
+ return 0
+}
+
+func (m *Cost) GetEntityWrites() int32 {
+ if m != nil && m.EntityWrites != nil {
+ return *m.EntityWrites
+ }
+ return 0
+}
+
+func (m *Cost) GetEntityWriteBytes() int32 {
+ if m != nil && m.EntityWriteBytes != nil {
+ return *m.EntityWriteBytes
+ }
+ return 0
+}
+
+func (m *Cost) GetCommitcost() *Cost_CommitCost {
+ if m != nil {
+ return m.Commitcost
+ }
+ return nil
+}
+
+func (m *Cost) GetApproximateStorageDelta() int32 {
+ if m != nil && m.ApproximateStorageDelta != nil {
+ return *m.ApproximateStorageDelta
+ }
+ return 0
+}
+
+func (m *Cost) GetIdSequenceUpdates() int32 {
+ if m != nil && m.IdSequenceUpdates != nil {
+ return *m.IdSequenceUpdates
+ }
+ return 0
+}
+
+type Cost_CommitCost struct {
+ RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts,json=requestedEntityPuts" json:"requested_entity_puts,omitempty"`
+ RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes,json=requestedEntityDeletes" json:"requested_entity_deletes,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} }
+func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) }
+func (*Cost_CommitCost) ProtoMessage() {}
+func (*Cost_CommitCost) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20, 0}
+}
+func (m *Cost_CommitCost) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Cost_CommitCost.Unmarshal(m, b)
+}
+func (m *Cost_CommitCost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Cost_CommitCost.Marshal(b, m, deterministic)
+}
+func (dst *Cost_CommitCost) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Cost_CommitCost.Merge(dst, src)
+}
+func (m *Cost_CommitCost) XXX_Size() int {
+ return xxx_messageInfo_Cost_CommitCost.Size(m)
+}
+func (m *Cost_CommitCost) XXX_DiscardUnknown() {
+ xxx_messageInfo_Cost_CommitCost.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Cost_CommitCost proto.InternalMessageInfo
+
+func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 {
+ if m != nil && m.RequestedEntityPuts != nil {
+ return *m.RequestedEntityPuts
+ }
+ return 0
+}
+
+func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 {
+ if m != nil && m.RequestedEntityDeletes != nil {
+ return *m.RequestedEntityDeletes
+ }
+ return 0
+}
+
+type GetRequest struct {
+ Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+ FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"`
+ Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"`
+ AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,json=allowDeferred,def=0" json:"allow_deferred,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetRequest) Reset() { *m = GetRequest{} }
+func (m *GetRequest) String() string { return proto.CompactTextString(m) }
+func (*GetRequest) ProtoMessage() {}
+func (*GetRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{21}
+}
+func (m *GetRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetRequest.Unmarshal(m, b)
+}
+func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic)
+}
+func (dst *GetRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetRequest.Merge(dst, src)
+}
+func (m *GetRequest) XXX_Size() int {
+ return xxx_messageInfo_GetRequest.Size(m)
+}
+func (m *GetRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetRequest proto.InternalMessageInfo
+
+const Default_GetRequest_AllowDeferred bool = false
+
+func (m *GetRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *GetRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *GetRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *GetRequest) GetFailoverMs() int64 {
+ if m != nil && m.FailoverMs != nil {
+ return *m.FailoverMs
+ }
+ return 0
+}
+
+func (m *GetRequest) GetStrong() bool {
+ if m != nil && m.Strong != nil {
+ return *m.Strong
+ }
+ return false
+}
+
+func (m *GetRequest) GetAllowDeferred() bool {
+ if m != nil && m.AllowDeferred != nil {
+ return *m.AllowDeferred
+ }
+ return Default_GetRequest_AllowDeferred
+}
+
+type GetResponse struct {
+ Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity,json=entity" json:"entity,omitempty"`
+ Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"`
+ InOrder *bool `protobuf:"varint,6,opt,name=in_order,json=inOrder,def=1" json:"in_order,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetResponse) Reset() { *m = GetResponse{} }
+func (m *GetResponse) String() string { return proto.CompactTextString(m) }
+func (*GetResponse) ProtoMessage() {}
+func (*GetResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22}
+}
+func (m *GetResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetResponse.Unmarshal(m, b)
+}
+func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic)
+}
+func (dst *GetResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetResponse.Merge(dst, src)
+}
+func (m *GetResponse) XXX_Size() int {
+ return xxx_messageInfo_GetResponse.Size(m)
+}
+func (m *GetResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetResponse proto.InternalMessageInfo
+
+const Default_GetResponse_InOrder bool = true
+
+func (m *GetResponse) GetEntity() []*GetResponse_Entity {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *GetResponse) GetDeferred() []*Reference {
+ if m != nil {
+ return m.Deferred
+ }
+ return nil
+}
+
+func (m *GetResponse) GetInOrder() bool {
+ if m != nil && m.InOrder != nil {
+ return *m.InOrder
+ }
+ return Default_GetResponse_InOrder
+}
+
+type GetResponse_Entity struct {
+ Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"`
+ Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"`
+ Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} }
+func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) }
+func (*GetResponse_Entity) ProtoMessage() {}
+func (*GetResponse_Entity) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22, 0}
+}
+func (m *GetResponse_Entity) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetResponse_Entity.Unmarshal(m, b)
+}
+func (m *GetResponse_Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetResponse_Entity.Marshal(b, m, deterministic)
+}
+func (dst *GetResponse_Entity) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetResponse_Entity.Merge(dst, src)
+}
+func (m *GetResponse_Entity) XXX_Size() int {
+ return xxx_messageInfo_GetResponse_Entity.Size(m)
+}
+func (m *GetResponse_Entity) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetResponse_Entity.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetResponse_Entity proto.InternalMessageInfo
+
+func (m *GetResponse_Entity) GetEntity() *EntityProto {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *GetResponse_Entity) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *GetResponse_Entity) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+type PutRequest struct {
+ Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"`
+ Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
+ Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+ Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+ MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,json=autoIdPolicy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PutRequest) Reset() { *m = PutRequest{} }
+func (m *PutRequest) String() string { return proto.CompactTextString(m) }
+func (*PutRequest) ProtoMessage() {}
+func (*PutRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23}
+}
+func (m *PutRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PutRequest.Unmarshal(m, b)
+}
+func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic)
+}
+func (dst *PutRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PutRequest.Merge(dst, src)
+}
+func (m *PutRequest) XXX_Size() int {
+ return xxx_messageInfo_PutRequest.Size(m)
+}
+func (m *PutRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_PutRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PutRequest proto.InternalMessageInfo
+
+const Default_PutRequest_Trusted bool = false
+const Default_PutRequest_Force bool = false
+const Default_PutRequest_MarkChanges bool = false
+const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT
+
+func (m *PutRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *PutRequest) GetEntity() []*EntityProto {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *PutRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *PutRequest) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *PutRequest) GetTrusted() bool {
+ if m != nil && m.Trusted != nil {
+ return *m.Trusted
+ }
+ return Default_PutRequest_Trusted
+}
+
+func (m *PutRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_PutRequest_Force
+}
+
+func (m *PutRequest) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_PutRequest_MarkChanges
+}
+
+func (m *PutRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy {
+ if m != nil && m.AutoIdPolicy != nil {
+ return *m.AutoIdPolicy
+ }
+ return Default_PutRequest_AutoIdPolicy
+}
+
+type PutResponse struct {
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"`
+ Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PutResponse) Reset() { *m = PutResponse{} }
+func (m *PutResponse) String() string { return proto.CompactTextString(m) }
+func (*PutResponse) ProtoMessage() {}
+func (*PutResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{24}
+}
+func (m *PutResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PutResponse.Unmarshal(m, b)
+}
+func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic)
+}
+func (dst *PutResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PutResponse.Merge(dst, src)
+}
+func (m *PutResponse) XXX_Size() int {
+ return xxx_messageInfo_PutResponse.Size(m)
+}
+func (m *PutResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_PutResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PutResponse proto.InternalMessageInfo
+
+func (m *PutResponse) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *PutResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *PutResponse) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type TouchRequest struct {
+ Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
+ Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *TouchRequest) Reset() { *m = TouchRequest{} }
+func (m *TouchRequest) String() string { return proto.CompactTextString(m) }
+func (*TouchRequest) ProtoMessage() {}
+func (*TouchRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{25}
+}
+func (m *TouchRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_TouchRequest.Unmarshal(m, b)
+}
+func (m *TouchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_TouchRequest.Marshal(b, m, deterministic)
+}
+func (dst *TouchRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TouchRequest.Merge(dst, src)
+}
+func (m *TouchRequest) XXX_Size() int {
+ return xxx_messageInfo_TouchRequest.Size(m)
+}
+func (m *TouchRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_TouchRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TouchRequest proto.InternalMessageInfo
+
+const Default_TouchRequest_Force bool = false
+
+func (m *TouchRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_TouchRequest_Force
+}
+
+func (m *TouchRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+type TouchResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *TouchResponse) Reset() { *m = TouchResponse{} }
+func (m *TouchResponse) String() string { return proto.CompactTextString(m) }
+func (*TouchResponse) ProtoMessage() {}
+func (*TouchResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{26}
+}
+func (m *TouchResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_TouchResponse.Unmarshal(m, b)
+}
+func (m *TouchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_TouchResponse.Marshal(b, m, deterministic)
+}
+func (dst *TouchResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TouchResponse.Merge(dst, src)
+}
+func (m *TouchResponse) XXX_Size() int {
+ return xxx_messageInfo_TouchResponse.Size(m)
+}
+func (m *TouchResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_TouchResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TouchResponse proto.InternalMessageInfo
+
+func (m *TouchResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+type DeleteRequest struct {
+ Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"`
+ Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+ Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+ MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
+func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteRequest) ProtoMessage() {}
+func (*DeleteRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{27}
+}
+func (m *DeleteRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DeleteRequest.Unmarshal(m, b)
+}
+func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic)
+}
+func (dst *DeleteRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeleteRequest.Merge(dst, src)
+}
+func (m *DeleteRequest) XXX_Size() int {
+ return xxx_messageInfo_DeleteRequest.Size(m)
+}
+func (m *DeleteRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeleteRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo
+
+const Default_DeleteRequest_Trusted bool = false
+const Default_DeleteRequest_Force bool = false
+const Default_DeleteRequest_MarkChanges bool = false
+
+func (m *DeleteRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetTrusted() bool {
+ if m != nil && m.Trusted != nil {
+ return *m.Trusted
+ }
+ return Default_DeleteRequest_Trusted
+}
+
+func (m *DeleteRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_DeleteRequest_Force
+}
+
+func (m *DeleteRequest) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_DeleteRequest_MarkChanges
+}
+
+func (m *DeleteRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+type DeleteResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DeleteResponse) Reset() { *m = DeleteResponse{} }
+func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteResponse) ProtoMessage() {}
+func (*DeleteResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{28}
+}
+func (m *DeleteResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DeleteResponse.Unmarshal(m, b)
+}
+func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic)
+}
+func (dst *DeleteResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeleteResponse.Merge(dst, src)
+}
+func (m *DeleteResponse) XXX_Size() int {
+ return xxx_messageInfo_DeleteResponse.Size(m)
+}
+func (m *DeleteResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeleteResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo
+
+func (m *DeleteResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *DeleteResponse) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type NextRequest struct {
+ Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"`
+ Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"`
+ Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
+ Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"`
+ Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *NextRequest) Reset() { *m = NextRequest{} }
+func (m *NextRequest) String() string { return proto.CompactTextString(m) }
+func (*NextRequest) ProtoMessage() {}
+func (*NextRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{29}
+}
+func (m *NextRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_NextRequest.Unmarshal(m, b)
+}
+func (m *NextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_NextRequest.Marshal(b, m, deterministic)
+}
+func (dst *NextRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NextRequest.Merge(dst, src)
+}
+func (m *NextRequest) XXX_Size() int {
+ return xxx_messageInfo_NextRequest.Size(m)
+}
+func (m *NextRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_NextRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NextRequest proto.InternalMessageInfo
+
+const Default_NextRequest_Offset int32 = 0
+const Default_NextRequest_Compile bool = false
+
+func (m *NextRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *NextRequest) GetCursor() *Cursor {
+ if m != nil {
+ return m.Cursor
+ }
+ return nil
+}
+
+func (m *NextRequest) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *NextRequest) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_NextRequest_Offset
+}
+
+func (m *NextRequest) GetCompile() bool {
+ if m != nil && m.Compile != nil {
+ return *m.Compile
+ }
+ return Default_NextRequest_Compile
+}
+
+type QueryResult struct {
+ Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"`
+ Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"`
+ SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results,json=skippedResults" json:"skipped_results,omitempty"`
+ MoreResults *bool `protobuf:"varint,3,req,name=more_results,json=moreResults" json:"more_results,omitempty"`
+ KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
+ IndexOnly *bool `protobuf:"varint,9,opt,name=index_only,json=indexOnly" json:"index_only,omitempty"`
+ SmallOps *bool `protobuf:"varint,10,opt,name=small_ops,json=smallOps" json:"small_ops,omitempty"`
+ CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query,json=compiledQuery" json:"compiled_query,omitempty"`
+ CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"`
+ Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"`
+ Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *QueryResult) Reset() { *m = QueryResult{} }
+func (m *QueryResult) String() string { return proto.CompactTextString(m) }
+func (*QueryResult) ProtoMessage() {}
+func (*QueryResult) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{30}
+}
+func (m *QueryResult) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_QueryResult.Unmarshal(m, b)
+}
+func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic)
+}
+func (dst *QueryResult) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryResult.Merge(dst, src)
+}
+func (m *QueryResult) XXX_Size() int {
+ return xxx_messageInfo_QueryResult.Size(m)
+}
+func (m *QueryResult) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryResult.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryResult proto.InternalMessageInfo
+
+func (m *QueryResult) GetCursor() *Cursor {
+ if m != nil {
+ return m.Cursor
+ }
+ return nil
+}
+
+func (m *QueryResult) GetResult() []*EntityProto {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+func (m *QueryResult) GetSkippedResults() int32 {
+ if m != nil && m.SkippedResults != nil {
+ return *m.SkippedResults
+ }
+ return 0
+}
+
+func (m *QueryResult) GetMoreResults() bool {
+ if m != nil && m.MoreResults != nil {
+ return *m.MoreResults
+ }
+ return false
+}
+
+func (m *QueryResult) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *QueryResult) GetIndexOnly() bool {
+ if m != nil && m.IndexOnly != nil {
+ return *m.IndexOnly
+ }
+ return false
+}
+
+func (m *QueryResult) GetSmallOps() bool {
+ if m != nil && m.SmallOps != nil {
+ return *m.SmallOps
+ }
+ return false
+}
+
+func (m *QueryResult) GetCompiledQuery() *CompiledQuery {
+ if m != nil {
+ return m.CompiledQuery
+ }
+ return nil
+}
+
+func (m *QueryResult) GetCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.CompiledCursor
+ }
+ return nil
+}
+
+func (m *QueryResult) GetIndex() []*CompositeIndex {
+ if m != nil {
+ return m.Index
+ }
+ return nil
+}
+
+func (m *QueryResult) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type AllocateIdsRequest struct {
+ Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+ ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key,json=modelKey" json:"model_key,omitempty"`
+ Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
+ Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"`
+ Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} }
+func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsRequest) ProtoMessage() {}
+func (*AllocateIdsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{31}
+}
+func (m *AllocateIdsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AllocateIdsRequest.Unmarshal(m, b)
+}
+func (m *AllocateIdsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AllocateIdsRequest.Marshal(b, m, deterministic)
+}
+func (dst *AllocateIdsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AllocateIdsRequest.Merge(dst, src)
+}
+func (m *AllocateIdsRequest) XXX_Size() int {
+ return xxx_messageInfo_AllocateIdsRequest.Size(m)
+}
+func (m *AllocateIdsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AllocateIdsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AllocateIdsRequest proto.InternalMessageInfo
+
+func (m *AllocateIdsRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AllocateIdsRequest) GetModelKey() *Reference {
+ if m != nil {
+ return m.ModelKey
+ }
+ return nil
+}
+
+func (m *AllocateIdsRequest) GetSize() int64 {
+ if m != nil && m.Size != nil {
+ return *m.Size
+ }
+ return 0
+}
+
+func (m *AllocateIdsRequest) GetMax() int64 {
+ if m != nil && m.Max != nil {
+ return *m.Max
+ }
+ return 0
+}
+
+func (m *AllocateIdsRequest) GetReserve() []*Reference {
+ if m != nil {
+ return m.Reserve
+ }
+ return nil
+}
+
+type AllocateIdsResponse struct {
+ Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"`
+ End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"`
+ Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} }
+func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsResponse) ProtoMessage() {}
+func (*AllocateIdsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{32}
+}
+func (m *AllocateIdsResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AllocateIdsResponse.Unmarshal(m, b)
+}
+func (m *AllocateIdsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AllocateIdsResponse.Marshal(b, m, deterministic)
+}
+func (dst *AllocateIdsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AllocateIdsResponse.Merge(dst, src)
+}
+func (m *AllocateIdsResponse) XXX_Size() int {
+ return xxx_messageInfo_AllocateIdsResponse.Size(m)
+}
+func (m *AllocateIdsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AllocateIdsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AllocateIdsResponse proto.InternalMessageInfo
+
+func (m *AllocateIdsResponse) GetStart() int64 {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return 0
+}
+
+func (m *AllocateIdsResponse) GetEnd() int64 {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return 0
+}
+
+func (m *AllocateIdsResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+type CompositeIndices struct {
+ Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompositeIndices) Reset() { *m = CompositeIndices{} }
+func (m *CompositeIndices) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndices) ProtoMessage() {}
+func (*CompositeIndices) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{33}
+}
+func (m *CompositeIndices) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompositeIndices.Unmarshal(m, b)
+}
+func (m *CompositeIndices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompositeIndices.Marshal(b, m, deterministic)
+}
+func (dst *CompositeIndices) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompositeIndices.Merge(dst, src)
+}
+func (m *CompositeIndices) XXX_Size() int {
+ return xxx_messageInfo_CompositeIndices.Size(m)
+}
+func (m *CompositeIndices) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompositeIndices.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompositeIndices proto.InternalMessageInfo
+
+func (m *CompositeIndices) GetIndex() []*CompositeIndex {
+ if m != nil {
+ return m.Index
+ }
+ return nil
+}
+
+type AddActionsRequest struct {
+ Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
+ Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} }
+func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) }
+func (*AddActionsRequest) ProtoMessage() {}
+func (*AddActionsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{34}
+}
+func (m *AddActionsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AddActionsRequest.Unmarshal(m, b)
+}
+func (m *AddActionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AddActionsRequest.Marshal(b, m, deterministic)
+}
+func (dst *AddActionsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AddActionsRequest.Merge(dst, src)
+}
+func (m *AddActionsRequest) XXX_Size() int {
+ return xxx_messageInfo_AddActionsRequest.Size(m)
+}
+func (m *AddActionsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AddActionsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AddActionsRequest proto.InternalMessageInfo
+
+func (m *AddActionsRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AddActionsRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *AddActionsRequest) GetAction() []*Action {
+ if m != nil {
+ return m.Action
+ }
+ return nil
+}
+
+type AddActionsResponse struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} }
+func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) }
+func (*AddActionsResponse) ProtoMessage() {}
+func (*AddActionsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{35}
+}
+func (m *AddActionsResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AddActionsResponse.Unmarshal(m, b)
+}
+func (m *AddActionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AddActionsResponse.Marshal(b, m, deterministic)
+}
+func (dst *AddActionsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AddActionsResponse.Merge(dst, src)
+}
+func (m *AddActionsResponse) XXX_Size() int {
+ return xxx_messageInfo_AddActionsResponse.Size(m)
+}
+func (m *AddActionsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AddActionsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AddActionsResponse proto.InternalMessageInfo
+
+type BeginTransactionRequest struct {
+ Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+ App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+ AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,json=allowMultipleEg,def=0" json:"allow_multiple_eg,omitempty"`
+ DatabaseId *string `protobuf:"bytes,4,opt,name=database_id,json=databaseId" json:"database_id,omitempty"`
+ Mode *BeginTransactionRequest_TransactionMode `protobuf:"varint,5,opt,name=mode,enum=appengine.BeginTransactionRequest_TransactionMode,def=0" json:"mode,omitempty"`
+ PreviousTransaction *Transaction `protobuf:"bytes,7,opt,name=previous_transaction,json=previousTransaction" json:"previous_transaction,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} }
+func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
+func (*BeginTransactionRequest) ProtoMessage() {}
+func (*BeginTransactionRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36}
+}
+func (m *BeginTransactionRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_BeginTransactionRequest.Unmarshal(m, b)
+}
+func (m *BeginTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_BeginTransactionRequest.Marshal(b, m, deterministic)
+}
+func (dst *BeginTransactionRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BeginTransactionRequest.Merge(dst, src)
+}
+func (m *BeginTransactionRequest) XXX_Size() int {
+ return xxx_messageInfo_BeginTransactionRequest.Size(m)
+}
+func (m *BeginTransactionRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_BeginTransactionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BeginTransactionRequest proto.InternalMessageInfo
+
+const Default_BeginTransactionRequest_AllowMultipleEg bool = false
+const Default_BeginTransactionRequest_Mode BeginTransactionRequest_TransactionMode = BeginTransactionRequest_UNKNOWN
+
+func (m *BeginTransactionRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *BeginTransactionRequest) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *BeginTransactionRequest) GetAllowMultipleEg() bool {
+ if m != nil && m.AllowMultipleEg != nil {
+ return *m.AllowMultipleEg
+ }
+ return Default_BeginTransactionRequest_AllowMultipleEg
+}
+
+func (m *BeginTransactionRequest) GetDatabaseId() string {
+ if m != nil && m.DatabaseId != nil {
+ return *m.DatabaseId
+ }
+ return ""
+}
+
+func (m *BeginTransactionRequest) GetMode() BeginTransactionRequest_TransactionMode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_BeginTransactionRequest_Mode
+}
+
+func (m *BeginTransactionRequest) GetPreviousTransaction() *Transaction {
+ if m != nil {
+ return m.PreviousTransaction
+ }
+ return nil
+}
+
+type CommitResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version,json=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CommitResponse) Reset() { *m = CommitResponse{} }
+func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse) ProtoMessage() {}
+func (*CommitResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37}
+}
+func (m *CommitResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CommitResponse.Unmarshal(m, b)
+}
+func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic)
+}
+func (dst *CommitResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CommitResponse.Merge(dst, src)
+}
+func (m *CommitResponse) XXX_Size() int {
+ return xxx_messageInfo_CommitResponse.Size(m)
+}
+func (m *CommitResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_CommitResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CommitResponse proto.InternalMessageInfo
+
+func (m *CommitResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *CommitResponse) GetVersion() []*CommitResponse_Version {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type CommitResponse_Version struct {
+ RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key,json=rootEntityKey" json:"root_entity_key,omitempty"`
+ Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} }
+func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse_Version) ProtoMessage() {}
+func (*CommitResponse_Version) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37, 0}
+}
+func (m *CommitResponse_Version) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CommitResponse_Version.Unmarshal(m, b)
+}
+func (m *CommitResponse_Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CommitResponse_Version.Marshal(b, m, deterministic)
+}
+func (dst *CommitResponse_Version) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CommitResponse_Version.Merge(dst, src)
+}
+func (m *CommitResponse_Version) XXX_Size() int {
+ return xxx_messageInfo_CommitResponse_Version.Size(m)
+}
+func (m *CommitResponse_Version) XXX_DiscardUnknown() {
+ xxx_messageInfo_CommitResponse_Version.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CommitResponse_Version proto.InternalMessageInfo
+
+func (m *CommitResponse_Version) GetRootEntityKey() *Reference {
+ if m != nil {
+ return m.RootEntityKey
+ }
+ return nil
+}
+
+func (m *CommitResponse_Version) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*Action)(nil), "appengine.Action")
+ proto.RegisterType((*PropertyValue)(nil), "appengine.PropertyValue")
+ proto.RegisterType((*PropertyValue_PointValue)(nil), "appengine.PropertyValue.PointValue")
+ proto.RegisterType((*PropertyValue_UserValue)(nil), "appengine.PropertyValue.UserValue")
+ proto.RegisterType((*PropertyValue_ReferenceValue)(nil), "appengine.PropertyValue.ReferenceValue")
+ proto.RegisterType((*PropertyValue_ReferenceValue_PathElement)(nil), "appengine.PropertyValue.ReferenceValue.PathElement")
+ proto.RegisterType((*Property)(nil), "appengine.Property")
+ proto.RegisterType((*Path)(nil), "appengine.Path")
+ proto.RegisterType((*Path_Element)(nil), "appengine.Path.Element")
+ proto.RegisterType((*Reference)(nil), "appengine.Reference")
+ proto.RegisterType((*User)(nil), "appengine.User")
+ proto.RegisterType((*EntityProto)(nil), "appengine.EntityProto")
+ proto.RegisterType((*CompositeProperty)(nil), "appengine.CompositeProperty")
+ proto.RegisterType((*Index)(nil), "appengine.Index")
+ proto.RegisterType((*Index_Property)(nil), "appengine.Index.Property")
+ proto.RegisterType((*CompositeIndex)(nil), "appengine.CompositeIndex")
+ proto.RegisterType((*IndexPostfix)(nil), "appengine.IndexPostfix")
+ proto.RegisterType((*IndexPostfix_IndexValue)(nil), "appengine.IndexPostfix.IndexValue")
+ proto.RegisterType((*IndexPosition)(nil), "appengine.IndexPosition")
+ proto.RegisterType((*Snapshot)(nil), "appengine.Snapshot")
+ proto.RegisterType((*InternalHeader)(nil), "appengine.InternalHeader")
+ proto.RegisterType((*Transaction)(nil), "appengine.Transaction")
+ proto.RegisterType((*Query)(nil), "appengine.Query")
+ proto.RegisterType((*Query_Filter)(nil), "appengine.Query.Filter")
+ proto.RegisterType((*Query_Order)(nil), "appengine.Query.Order")
+ proto.RegisterType((*CompiledQuery)(nil), "appengine.CompiledQuery")
+ proto.RegisterType((*CompiledQuery_PrimaryScan)(nil), "appengine.CompiledQuery.PrimaryScan")
+ proto.RegisterType((*CompiledQuery_MergeJoinScan)(nil), "appengine.CompiledQuery.MergeJoinScan")
+ proto.RegisterType((*CompiledQuery_EntityFilter)(nil), "appengine.CompiledQuery.EntityFilter")
+ proto.RegisterType((*CompiledCursor)(nil), "appengine.CompiledCursor")
+ proto.RegisterType((*CompiledCursor_Position)(nil), "appengine.CompiledCursor.Position")
+ proto.RegisterType((*CompiledCursor_Position_IndexValue)(nil), "appengine.CompiledCursor.Position.IndexValue")
+ proto.RegisterType((*Cursor)(nil), "appengine.Cursor")
+ proto.RegisterType((*Error)(nil), "appengine.Error")
+ proto.RegisterType((*Cost)(nil), "appengine.Cost")
+ proto.RegisterType((*Cost_CommitCost)(nil), "appengine.Cost.CommitCost")
+ proto.RegisterType((*GetRequest)(nil), "appengine.GetRequest")
+ proto.RegisterType((*GetResponse)(nil), "appengine.GetResponse")
+ proto.RegisterType((*GetResponse_Entity)(nil), "appengine.GetResponse.Entity")
+ proto.RegisterType((*PutRequest)(nil), "appengine.PutRequest")
+ proto.RegisterType((*PutResponse)(nil), "appengine.PutResponse")
+ proto.RegisterType((*TouchRequest)(nil), "appengine.TouchRequest")
+ proto.RegisterType((*TouchResponse)(nil), "appengine.TouchResponse")
+ proto.RegisterType((*DeleteRequest)(nil), "appengine.DeleteRequest")
+ proto.RegisterType((*DeleteResponse)(nil), "appengine.DeleteResponse")
+ proto.RegisterType((*NextRequest)(nil), "appengine.NextRequest")
+ proto.RegisterType((*QueryResult)(nil), "appengine.QueryResult")
+ proto.RegisterType((*AllocateIdsRequest)(nil), "appengine.AllocateIdsRequest")
+ proto.RegisterType((*AllocateIdsResponse)(nil), "appengine.AllocateIdsResponse")
+ proto.RegisterType((*CompositeIndices)(nil), "appengine.CompositeIndices")
+ proto.RegisterType((*AddActionsRequest)(nil), "appengine.AddActionsRequest")
+ proto.RegisterType((*AddActionsResponse)(nil), "appengine.AddActionsResponse")
+ proto.RegisterType((*BeginTransactionRequest)(nil), "appengine.BeginTransactionRequest")
+ proto.RegisterType((*CommitResponse)(nil), "appengine.CommitResponse")
+ proto.RegisterType((*CommitResponse_Version)(nil), "appengine.CommitResponse.Version")
+}
+
+func init() {
+ proto.RegisterFile("google.golang.org/appengine/internal/datastore/datastore_v3.proto", fileDescriptor_datastore_v3_83b17b80c34f6179)
+}
+
+var fileDescriptor_datastore_v3_83b17b80c34f6179 = []byte{
+ // 4156 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0xe3, 0x46,
+ 0x76, 0x37, 0xc1, 0xef, 0x47, 0x89, 0x82, 0x5a, 0xf3, 0xc1, 0xa1, 0x3f, 0x46, 0xc6, 0xac, 0x6d,
+ 0xd9, 0x6b, 0x73, 0x6c, 0xf9, 0x23, 0x5b, 0x4a, 0x76, 0x1d, 0x4a, 0xc4, 0x68, 0x90, 0xa1, 0x48,
+ 0xb9, 0x09, 0xd9, 0x9e, 0x5c, 0x50, 0x18, 0xa2, 0x29, 0x21, 0x43, 0x02, 0x30, 0x00, 0x6a, 0x46,
+ 0x93, 0xe4, 0x90, 0x4b, 0x2a, 0x55, 0x5b, 0xa9, 0x1c, 0x92, 0x4a, 0x25, 0xf9, 0x07, 0x72, 0xc8,
+ 0x39, 0x95, 0xaa, 0x54, 0xf6, 0x98, 0x5b, 0x0e, 0x7b, 0xc9, 0x31, 0x95, 0x73, 0xf2, 0x27, 0x24,
+ 0x39, 0xa4, 0xfa, 0x75, 0x03, 0x02, 0x28, 0x4a, 0x23, 0x6d, 0xf6, 0x90, 0x13, 0xd1, 0xef, 0xfd,
+ 0xba, 0xf1, 0xfa, 0xf5, 0xfb, 0x6c, 0x10, 0xba, 0xc7, 0xbe, 0x7f, 0x3c, 0x65, 0x9d, 0x63, 0x7f,
+ 0x6a, 0x7b, 0xc7, 0x1d, 0x3f, 0x3c, 0x7e, 0x68, 0x07, 0x01, 0xf3, 0x8e, 0x5d, 0x8f, 0x3d, 0x74,
+ 0xbd, 0x98, 0x85, 0x9e, 0x3d, 0x7d, 0xe8, 0xd8, 0xb1, 0x1d, 0xc5, 0x7e, 0xc8, 0xce, 0x9f, 0xac,
+ 0xd3, 0xcf, 0x3b, 0x41, 0xe8, 0xc7, 0x3e, 0xa9, 0xa7, 0x13, 0xb4, 0x1a, 0x54, 0xba, 0xe3, 0xd8,
+ 0xf5, 0x3d, 0xed, 0x1f, 0x2b, 0xb0, 0x7a, 0x18, 0xfa, 0x01, 0x0b, 0xe3, 0xb3, 0x6f, 0xed, 0xe9,
+ 0x9c, 0x91, 0x77, 0x00, 0x5c, 0x2f, 0xfe, 0xea, 0x0b, 0x1c, 0xb5, 0x0a, 0x9b, 0x85, 0xad, 0x22,
+ 0xcd, 0x50, 0x88, 0x06, 0x2b, 0xcf, 0x7c, 0x7f, 0xca, 0x6c, 0x4f, 0x20, 0x94, 0xcd, 0xc2, 0x56,
+ 0x8d, 0xe6, 0x68, 0x64, 0x13, 0x1a, 0x51, 0x1c, 0xba, 0xde, 0xb1, 0x80, 0x14, 0x37, 0x0b, 0x5b,
+ 0x75, 0x9a, 0x25, 0x71, 0x84, 0xe3, 0xcf, 0x9f, 0x4d, 0x99, 0x40, 0x94, 0x36, 0x0b, 0x5b, 0x05,
+ 0x9a, 0x25, 0x91, 0x3d, 0x80, 0xc0, 0x77, 0xbd, 0xf8, 0x14, 0x01, 0xe5, 0xcd, 0xc2, 0x16, 0x6c,
+ 0x3f, 0xe8, 0xa4, 0x7b, 0xe8, 0xe4, 0xa4, 0xee, 0x1c, 0x72, 0x28, 0x3e, 0xd2, 0xcc, 0x34, 0xf2,
+ 0xdb, 0x50, 0x9f, 0x47, 0x2c, 0x14, 0x6b, 0xd4, 0x70, 0x0d, 0xed, 0xd2, 0x35, 0x8e, 0x22, 0x16,
+ 0x8a, 0x25, 0xce, 0x27, 0x91, 0x21, 0x34, 0x43, 0x36, 0x61, 0x21, 0xf3, 0xc6, 0x4c, 0x2c, 0xb3,
+ 0x82, 0xcb, 0x7c, 0x70, 0xe9, 0x32, 0x34, 0x81, 0x8b, 0xb5, 0x16, 0xa6, 0xb7, 0xb7, 0x00, 0xce,
+ 0x85, 0x25, 0x2b, 0x50, 0x78, 0xd9, 0xaa, 0x6c, 0x2a, 0x5b, 0x05, 0x5a, 0x78, 0xc9, 0x47, 0x67,
+ 0xad, 0xaa, 0x18, 0x9d, 0xb5, 0xff, 0xa9, 0x00, 0xf5, 0x54, 0x26, 0x72, 0x0b, 0xca, 0x6c, 0x66,
+ 0xbb, 0xd3, 0x56, 0x7d, 0x53, 0xd9, 0xaa, 0x53, 0x31, 0x20, 0xf7, 0xa1, 0x61, 0xcf, 0xe3, 0x13,
+ 0xcb, 0xf1, 0x67, 0xb6, 0xeb, 0xb5, 0x00, 0x79, 0xc0, 0x49, 0x3d, 0xa4, 0x90, 0x36, 0xd4, 0x3c,
+ 0x77, 0xfc, 0xdc, 0xb3, 0x67, 0xac, 0xd5, 0xc0, 0x73, 0x48, 0xc7, 0xe4, 0x13, 0x20, 0x13, 0xe6,
+ 0xb0, 0xd0, 0x8e, 0x99, 0x63, 0xb9, 0x0e, 0xf3, 0x62, 0x37, 0x3e, 0x6b, 0xdd, 0x46, 0xd4, 0x7a,
+ 0xca, 0x31, 0x24, 0x23, 0x0f, 0x0f, 0x42, 0xff, 0xd4, 0x75, 0x58, 0xd8, 0xba, 0xb3, 0x00, 0x3f,
+ 0x94, 0x8c, 0xf6, 0xbf, 0x17, 0xa0, 0x99, 0xd7, 0x05, 0x51, 0xa1, 0x68, 0x07, 0x41, 0x6b, 0x15,
+ 0xa5, 0xe4, 0x8f, 0xe4, 0x6d, 0x00, 0x2e, 0x8a, 0x15, 0x05, 0xf6, 0x98, 0xb5, 0x6e, 0xe1, 0x5a,
+ 0x75, 0x4e, 0x19, 0x71, 0x02, 0x39, 0x82, 0x46, 0x60, 0xc7, 0x27, 0x6c, 0xca, 0x66, 0xcc, 0x8b,
+ 0x5b, 0xcd, 0xcd, 0xe2, 0x16, 0x6c, 0x7f, 0x7e, 0x4d, 0xd5, 0x77, 0x0e, 0xed, 0xf8, 0x44, 0x17,
+ 0x53, 0x69, 0x76, 0x9d, 0xb6, 0x0e, 0x8d, 0x0c, 0x8f, 0x10, 0x28, 0xc5, 0x67, 0x01, 0x6b, 0xad,
+ 0xa1, 0x5c, 0xf8, 0x4c, 0x9a, 0xa0, 0xb8, 0x4e, 0x4b, 0x45, 0xf3, 0x57, 0x5c, 0x87, 0x63, 0x50,
+ 0x87, 0xeb, 0x28, 0x22, 0x3e, 0x6b, 0xff, 0x51, 0x86, 0x5a, 0x22, 0x00, 0xe9, 0x42, 0x75, 0xc6,
+ 0x6c, 0xcf, 0xf5, 0x8e, 0xd1, 0x69, 0x9a, 0xdb, 0x6f, 0x2e, 0x11, 0xb3, 0x73, 0x20, 0x20, 0x3b,
+ 0x30, 0x18, 0x5a, 0x07, 0x7a, 0x77, 0x60, 0x0c, 0xf6, 0x69, 0x32, 0x8f, 0x1f, 0xa6, 0x7c, 0xb4,
+ 0xe6, 0xa1, 0x8b, 0x9e, 0x55, 0xa7, 0x20, 0x49, 0x47, 0xa1, 0x9b, 0x0a, 0x51, 0x14, 0x82, 0xe2,
+ 0x21, 0x76, 0xa0, 0x9c, 0xb8, 0x88, 0xb2, 0xd5, 0xd8, 0x6e, 0x5d, 0xa6, 0x1c, 0x2a, 0x60, 0xdc,
+ 0x20, 0x66, 0xf3, 0x69, 0xec, 0x06, 0x53, 0xee, 0x76, 0xca, 0x56, 0x8d, 0xa6, 0x63, 0xf2, 0x1e,
+ 0x40, 0xc4, 0xec, 0x70, 0x7c, 0x62, 0x3f, 0x9b, 0xb2, 0x56, 0x85, 0x7b, 0xf6, 0x4e, 0x79, 0x62,
+ 0x4f, 0x23, 0x46, 0x33, 0x0c, 0x62, 0xc3, 0xdd, 0x49, 0x1c, 0x59, 0xb1, 0xff, 0x9c, 0x79, 0xee,
+ 0x2b, 0x9b, 0x07, 0x12, 0xcb, 0x0f, 0xf8, 0x0f, 0xfa, 0x58, 0x73, 0xfb, 0xc3, 0x65, 0x5b, 0x7f,
+ 0x14, 0x47, 0x66, 0x66, 0xc6, 0x10, 0x27, 0xd0, 0xdb, 0x93, 0x65, 0x64, 0xd2, 0x86, 0xca, 0xd4,
+ 0x1f, 0xdb, 0x53, 0xd6, 0xaa, 0x73, 0x2d, 0xec, 0x28, 0xcc, 0xa3, 0x92, 0xa2, 0xfd, 0xb3, 0x02,
+ 0x55, 0xa9, 0x47, 0xd2, 0x84, 0x8c, 0x26, 0xd5, 0x37, 0x48, 0x0d, 0x4a, 0xbb, 0xfd, 0xe1, 0xae,
+ 0xda, 0xe4, 0x4f, 0xa6, 0xfe, 0xbd, 0xa9, 0xae, 0x71, 0xcc, 0xee, 0x53, 0x53, 0x1f, 0x99, 0x94,
+ 0x63, 0x54, 0xb2, 0x0e, 0xab, 0x5d, 0x73, 0x78, 0x60, 0xed, 0x75, 0x4d, 0x7d, 0x7f, 0x48, 0x9f,
+ 0xaa, 0x05, 0xb2, 0x0a, 0x75, 0x24, 0xf5, 0x8d, 0xc1, 0x13, 0x55, 0xe1, 0x33, 0x70, 0x68, 0x1a,
+ 0x66, 0x5f, 0x57, 0x8b, 0x44, 0x85, 0x15, 0x31, 0x63, 0x38, 0x30, 0xf5, 0x81, 0xa9, 0x96, 0x52,
+ 0xca, 0xe8, 0xe8, 0xe0, 0xa0, 0x4b, 0x9f, 0xaa, 0x65, 0xb2, 0x06, 0x0d, 0xa4, 0x74, 0x8f, 0xcc,
+ 0xc7, 0x43, 0xaa, 0x56, 0x48, 0x03, 0xaa, 0xfb, 0x3d, 0xeb, 0xbb, 0xc7, 0xfa, 0x40, 0xad, 0x92,
+ 0x15, 0xa8, 0xed, 0xf7, 0x2c, 0xfd, 0xa0, 0x6b, 0xf4, 0xd5, 0x1a, 0x9f, 0xbd, 0xaf, 0x0f, 0xe9,
+ 0x68, 0x64, 0x1d, 0x0e, 0x8d, 0x81, 0xa9, 0xd6, 0x49, 0x1d, 0xca, 0xfb, 0x3d, 0xcb, 0x38, 0x50,
+ 0x81, 0x10, 0x68, 0xee, 0xf7, 0xac, 0xc3, 0xc7, 0xc3, 0x81, 0x3e, 0x38, 0x3a, 0xd8, 0xd5, 0xa9,
+ 0xda, 0x20, 0xb7, 0x40, 0xe5, 0xb4, 0xe1, 0xc8, 0xec, 0xf6, 0xbb, 0xbd, 0x1e, 0xd5, 0x47, 0x23,
+ 0x75, 0x85, 0x4b, 0xbd, 0xdf, 0xb3, 0x68, 0xd7, 0xe4, 0xfb, 0x5a, 0xe5, 0x2f, 0xe4, 0x7b, 0x7f,
+ 0xa2, 0x3f, 0x55, 0xd7, 0xf9, 0x2b, 0xf4, 0x81, 0x69, 0x98, 0x4f, 0xad, 0x43, 0x3a, 0x34, 0x87,
+ 0xea, 0x06, 0x17, 0xd0, 0x18, 0xf4, 0xf4, 0xef, 0xad, 0x6f, 0xbb, 0xfd, 0x23, 0x5d, 0x25, 0xda,
+ 0x8f, 0xe1, 0xf6, 0xd2, 0x33, 0xe1, 0xaa, 0x7b, 0x6c, 0x1e, 0xf4, 0xd5, 0x02, 0x7f, 0xe2, 0x9b,
+ 0x52, 0x15, 0xed, 0x0f, 0xa0, 0xc4, 0x5d, 0x86, 0x7c, 0x06, 0xd5, 0xc4, 0x1b, 0x0b, 0xe8, 0x8d,
+ 0x77, 0xb3, 0x67, 0x6d, 0xc7, 0x27, 0x9d, 0xc4, 0xe3, 0x12, 0x5c, 0xbb, 0x0b, 0xd5, 0x45, 0x4f,
+ 0x53, 0x2e, 0x78, 0x5a, 0xf1, 0x82, 0xa7, 0x95, 0x32, 0x9e, 0x66, 0x43, 0x3d, 0xf5, 0xed, 0x9b,
+ 0x47, 0x91, 0x07, 0x50, 0xe2, 0xde, 0xdf, 0x6a, 0xa2, 0x87, 0xac, 0x2d, 0x08, 0x4c, 0x91, 0xa9,
+ 0xfd, 0x43, 0x01, 0x4a, 0x3c, 0xda, 0x9e, 0x07, 0xda, 0xc2, 0x15, 0x81, 0x56, 0xb9, 0x32, 0xd0,
+ 0x16, 0xaf, 0x15, 0x68, 0x2b, 0x37, 0x0b, 0xb4, 0xd5, 0x4b, 0x02, 0xad, 0xf6, 0x67, 0x45, 0x68,
+ 0xe8, 0x38, 0xf3, 0x10, 0x13, 0xfd, 0xfb, 0x50, 0x7c, 0xce, 0xce, 0x50, 0x3f, 0x8d, 0xed, 0x5b,
+ 0x99, 0xdd, 0xa6, 0x2a, 0xa4, 0x1c, 0x40, 0xb6, 0x61, 0x45, 0xbc, 0xd0, 0x3a, 0x0e, 0xfd, 0x79,
+ 0xd0, 0x52, 0x97, 0xab, 0xa7, 0x21, 0x40, 0xfb, 0x1c, 0x43, 0xde, 0x83, 0xb2, 0xff, 0xc2, 0x63,
+ 0x21, 0xc6, 0xc1, 0x3c, 0x98, 0x2b, 0x8f, 0x0a, 0x2e, 0x79, 0x08, 0xa5, 0xe7, 0xae, 0xe7, 0xe0,
+ 0x19, 0xe6, 0x23, 0x61, 0x46, 0xd0, 0xce, 0x13, 0xd7, 0x73, 0x28, 0x02, 0xc9, 0x3d, 0xa8, 0xf1,
+ 0x5f, 0x8c, 0x7b, 0x65, 0xdc, 0x68, 0x95, 0x8f, 0x79, 0xd0, 0x7b, 0x08, 0xb5, 0x40, 0xc6, 0x10,
+ 0x4c, 0x00, 0x8d, 0xed, 0x8d, 0x25, 0xe1, 0x85, 0xa6, 0x20, 0xf2, 0x15, 0xac, 0x84, 0xf6, 0x0b,
+ 0x2b, 0x9d, 0xb4, 0x76, 0xf9, 0xa4, 0x46, 0x68, 0xbf, 0x48, 0x23, 0x38, 0x81, 0x52, 0x68, 0x7b,
+ 0xcf, 0x5b, 0x64, 0xb3, 0xb0, 0x55, 0xa6, 0xf8, 0xac, 0x7d, 0x01, 0x25, 0x2e, 0x25, 0x8f, 0x08,
+ 0xfb, 0x3d, 0xf4, 0xff, 0xee, 0x9e, 0xa9, 0x16, 0x12, 0x7f, 0xfe, 0x96, 0x47, 0x03, 0x45, 0x72,
+ 0x0f, 0xf4, 0xd1, 0xa8, 0xbb, 0xaf, 0xab, 0x45, 0xad, 0x07, 0xeb, 0x7b, 0xfe, 0x2c, 0xf0, 0x23,
+ 0x37, 0x66, 0xe9, 0xf2, 0xf7, 0xa0, 0xe6, 0x7a, 0x0e, 0x7b, 0x69, 0xb9, 0x0e, 0x9a, 0x56, 0x91,
+ 0x56, 0x71, 0x6c, 0x38, 0xdc, 0xe4, 0x4e, 0x65, 0x31, 0x55, 0xe4, 0x26, 0x87, 0x03, 0xed, 0x2f,
+ 0x15, 0x28, 0x1b, 0x1c, 0xc1, 0x8d, 0x4f, 0x9e, 0x14, 0x7a, 0x8f, 0x30, 0x4c, 0x10, 0x24, 0x93,
+ 0xfb, 0x50, 0x1b, 0x6a, 0xb6, 0x37, 0x66, 0xbc, 0xe2, 0xc3, 0x3c, 0x50, 0xa3, 0xe9, 0x98, 0x7c,
+ 0x99, 0xd1, 0x9f, 0x82, 0x2e, 0x7b, 0x2f, 0xa3, 0x0a, 0x7c, 0xc1, 0x12, 0x2d, 0xb6, 0xff, 0xaa,
+ 0x90, 0x49, 0x6e, 0xcb, 0x12, 0x4f, 0x1f, 0xea, 0x8e, 0x1b, 0x32, 0xac, 0x23, 0xe5, 0x41, 0x3f,
+ 0xb8, 0x74, 0xe1, 0x4e, 0x2f, 0x81, 0xee, 0xd4, 0xbb, 0xa3, 0x3d, 0x7d, 0xd0, 0xe3, 0x99, 0xef,
+ 0x7c, 0x01, 0xed, 0x23, 0xa8, 0xa7, 0x10, 0x0c, 0xc7, 0x09, 0x48, 0x2d, 0x70, 0xf5, 0xf6, 0xf4,
+ 0x74, 0xac, 0x68, 0x7f, 0xad, 0x40, 0x33, 0xd5, 0xaf, 0xd0, 0xd0, 0x6d, 0xa8, 0xd8, 0x41, 0x90,
+ 0xa8, 0xb6, 0x4e, 0xcb, 0x76, 0x10, 0x18, 0x8e, 0x8c, 0x2d, 0x0a, 0x6a, 0x9b, 0xc7, 0x96, 0x4f,
+ 0x01, 0x1c, 0x36, 0x71, 0x3d, 0x17, 0x85, 0x2e, 0xa2, 0xc1, 0xab, 0x8b, 0x42, 0xd3, 0x0c, 0x86,
+ 0x7c, 0x09, 0xe5, 0x28, 0xb6, 0x63, 0x91, 0x2b, 0x9b, 0xdb, 0xf7, 0x33, 0xe0, 0xbc, 0x08, 0x9d,
+ 0x11, 0x87, 0x51, 0x81, 0x26, 0x5f, 0xc1, 0x2d, 0xdf, 0x9b, 0x9e, 0x59, 0xf3, 0x88, 0x59, 0xee,
+ 0xc4, 0x0a, 0xd9, 0x0f, 0x73, 0x37, 0x64, 0x4e, 0x3e, 0xa7, 0xae, 0x73, 0xc8, 0x51, 0xc4, 0x8c,
+ 0x09, 0x95, 0x7c, 0xed, 0x6b, 0x28, 0xe3, 0x3a, 0x7c, 0xcf, 0xdf, 0x51, 0xc3, 0xd4, 0xad, 0xe1,
+ 0xa0, 0xff, 0x54, 0xe8, 0x80, 0xea, 0xdd, 0x9e, 0x85, 0x44, 0x55, 0xe1, 0xc1, 0xbe, 0xa7, 0xf7,
+ 0x75, 0x53, 0xef, 0xa9, 0x45, 0x9e, 0x3d, 0x74, 0x4a, 0x87, 0x54, 0x2d, 0x69, 0xff, 0x53, 0x80,
+ 0x15, 0x94, 0xe7, 0xd0, 0x8f, 0xe2, 0x89, 0xfb, 0x92, 0xec, 0x41, 0x43, 0x98, 0xdd, 0xa9, 0x2c,
+ 0xe8, 0xb9, 0x33, 0x68, 0x8b, 0x7b, 0x96, 0x68, 0x31, 0x90, 0x75, 0xb4, 0x9b, 0x3e, 0x27, 0x21,
+ 0x45, 0x41, 0xa7, 0xbf, 0x22, 0xa4, 0xbc, 0x05, 0x95, 0x67, 0x6c, 0xe2, 0x87, 0x22, 0x04, 0xd6,
+ 0x76, 0x4a, 0x71, 0x38, 0x67, 0x54, 0xd2, 0xda, 0x36, 0xc0, 0xf9, 0xfa, 0xe4, 0x01, 0xac, 0x26,
+ 0xc6, 0x66, 0xa1, 0x71, 0x89, 0x93, 0x5b, 0x49, 0x88, 0x83, 0x5c, 0x75, 0xa3, 0x5c, 0xab, 0xba,
+ 0xd1, 0xbe, 0x86, 0xd5, 0x64, 0x3f, 0xe2, 0xfc, 0x54, 0x21, 0x79, 0x01, 0x63, 0xca, 0x82, 0x8c,
+ 0xca, 0x45, 0x19, 0xb5, 0x9f, 0x41, 0x6d, 0xe4, 0xd9, 0x41, 0x74, 0xe2, 0xc7, 0xdc, 0x7a, 0xe2,
+ 0x48, 0xfa, 0xaa, 0x12, 0x47, 0x9a, 0x06, 0x15, 0x7e, 0x38, 0xf3, 0x88, 0xbb, 0xbf, 0x31, 0xe8,
+ 0xee, 0x99, 0xc6, 0xb7, 0xba, 0xfa, 0x06, 0x01, 0xa8, 0xc8, 0xe7, 0x82, 0xa6, 0x41, 0xd3, 0x90,
+ 0xed, 0xd8, 0x63, 0x66, 0x3b, 0x2c, 0xe4, 0x12, 0xfc, 0xe0, 0x47, 0x89, 0x04, 0x3f, 0xf8, 0x91,
+ 0xf6, 0x17, 0x05, 0x68, 0x98, 0xa1, 0xed, 0x45, 0xb6, 0x30, 0xf7, 0xcf, 0xa0, 0x72, 0x82, 0x58,
+ 0x74, 0xa3, 0xc6, 0x82, 0x7f, 0x66, 0x17, 0xa3, 0x12, 0x48, 0xee, 0x40, 0xe5, 0xc4, 0xf6, 0x9c,
+ 0xa9, 0xd0, 0x5a, 0x85, 0xca, 0x51, 0x92, 0x1b, 0x95, 0xf3, 0xdc, 0xb8, 0x05, 0x2b, 0x33, 0x3b,
+ 0x7c, 0x6e, 0x8d, 0x4f, 0x6c, 0xef, 0x98, 0x45, 0xf2, 0x60, 0xa4, 0x05, 0x36, 0x38, 0x6b, 0x4f,
+ 0x70, 0xb4, 0xbf, 0x5f, 0x81, 0xf2, 0x37, 0x73, 0x16, 0x9e, 0x65, 0x04, 0xfa, 0xe0, 0xba, 0x02,
+ 0xc9, 0x17, 0x17, 0x2e, 0x4b, 0xca, 0x6f, 0x2f, 0x26, 0x65, 0x22, 0x53, 0x84, 0xc8, 0x95, 0x22,
+ 0x0b, 0x7c, 0x9a, 0x09, 0x63, 0xeb, 0x57, 0xd8, 0xda, 0x79, 0x70, 0x7b, 0x08, 0x95, 0x89, 0x3b,
+ 0x8d, 0x51, 0x75, 0x8b, 0xd5, 0x08, 0xee, 0xa5, 0xf3, 0x08, 0xd9, 0x54, 0xc2, 0xc8, 0xbb, 0xb0,
+ 0x22, 0x2a, 0x59, 0xeb, 0x07, 0xce, 0xc6, 0x82, 0x95, 0xf7, 0xa6, 0x48, 0x13, 0xbb, 0xff, 0x18,
+ 0xca, 0x7e, 0xc8, 0x37, 0x5f, 0xc7, 0x25, 0xef, 0x5c, 0x58, 0x72, 0xc8, 0xb9, 0x54, 0x80, 0xc8,
+ 0x87, 0x50, 0x3a, 0x71, 0xbd, 0x18, 0xb3, 0x46, 0x73, 0xfb, 0xf6, 0x05, 0xf0, 0x63, 0xd7, 0x8b,
+ 0x29, 0x42, 0x78, 0x98, 0x1f, 0xfb, 0x73, 0x2f, 0x6e, 0xdd, 0xc5, 0x0c, 0x23, 0x06, 0xe4, 0x1e,
+ 0x54, 0xfc, 0xc9, 0x24, 0x62, 0x31, 0x76, 0x96, 0xe5, 0x9d, 0xc2, 0xa7, 0x54, 0x12, 0xf8, 0x84,
+ 0xa9, 0x3b, 0x73, 0x63, 0xec, 0x43, 0xca, 0x54, 0x0c, 0xc8, 0x2e, 0xac, 0x8d, 0xfd, 0x59, 0xe0,
+ 0x4e, 0x99, 0x63, 0x8d, 0xe7, 0x61, 0xe4, 0x87, 0xad, 0x77, 0x2e, 0x1c, 0xd3, 0x9e, 0x44, 0xec,
+ 0x21, 0x80, 0x36, 0xc7, 0xb9, 0x31, 0x31, 0x60, 0x83, 0x79, 0x8e, 0xb5, 0xb8, 0xce, 0xfd, 0xd7,
+ 0xad, 0xb3, 0xce, 0x3c, 0x27, 0x4f, 0x4a, 0xc4, 0xc1, 0x48, 0x68, 0x61, 0xcc, 0x68, 0x6d, 0x60,
+ 0x90, 0xb9, 0x77, 0x69, 0xac, 0x14, 0xe2, 0x64, 0xc2, 0xf7, 0x6f, 0xc0, 0x2d, 0x19, 0x22, 0xad,
+ 0x80, 0x85, 0x13, 0x36, 0x8e, 0xad, 0x60, 0x6a, 0x7b, 0x58, 0xca, 0xa5, 0xc6, 0x4a, 0x24, 0xe4,
+ 0x50, 0x20, 0x0e, 0xa7, 0xb6, 0x47, 0x34, 0xa8, 0x3f, 0x67, 0x67, 0x91, 0xc5, 0x23, 0x29, 0x76,
+ 0xae, 0x29, 0xba, 0xc6, 0xe9, 0x43, 0x6f, 0x7a, 0x46, 0x7e, 0x02, 0x8d, 0xf8, 0xdc, 0xdb, 0xb0,
+ 0x61, 0x6d, 0xe4, 0x4e, 0x35, 0xe3, 0x8b, 0x34, 0x0b, 0x25, 0xf7, 0xa1, 0x2a, 0x35, 0xd4, 0xba,
+ 0x97, 0x5d, 0x3b, 0xa1, 0xf2, 0xc4, 0x3c, 0xb1, 0xdd, 0xa9, 0x7f, 0xca, 0x42, 0x6b, 0x16, 0xb5,
+ 0xda, 0xe2, 0xb6, 0x24, 0x21, 0x1d, 0x44, 0xdc, 0x4f, 0xa3, 0x38, 0xf4, 0xbd, 0xe3, 0xd6, 0x26,
+ 0xde, 0x93, 0xc8, 0xd1, 0xc5, 0xe0, 0xf7, 0x2e, 0x66, 0xfe, 0x7c, 0xf0, 0xfb, 0x1c, 0xee, 0x60,
+ 0x65, 0x66, 0x3d, 0x3b, 0xb3, 0xf2, 0x68, 0x0d, 0xd1, 0x1b, 0xc8, 0xdd, 0x3d, 0x3b, 0xcc, 0x4e,
+ 0x6a, 0x43, 0xcd, 0x71, 0xa3, 0xd8, 0xf5, 0xc6, 0x71, 0xab, 0x85, 0xef, 0x4c, 0xc7, 0xe4, 0x33,
+ 0xb8, 0x3d, 0x73, 0x3d, 0x2b, 0xb2, 0x27, 0xcc, 0x8a, 0x5d, 0xee, 0x9b, 0x6c, 0xec, 0x7b, 0x4e,
+ 0xd4, 0x7a, 0x80, 0x82, 0x93, 0x99, 0xeb, 0x8d, 0xec, 0x09, 0x33, 0xdd, 0x19, 0x1b, 0x09, 0x0e,
+ 0xf9, 0x08, 0xd6, 0x11, 0x1e, 0xb2, 0x60, 0xea, 0x8e, 0x6d, 0xf1, 0xfa, 0x1f, 0xe1, 0xeb, 0xd7,
+ 0x38, 0x83, 0x0a, 0x3a, 0xbe, 0xfa, 0x63, 0x68, 0x06, 0x2c, 0x8c, 0xdc, 0x28, 0xb6, 0xa4, 0x45,
+ 0xbf, 0x97, 0xd5, 0xda, 0xaa, 0x64, 0x0e, 0x91, 0xd7, 0xfe, 0xcf, 0x02, 0x54, 0x84, 0x73, 0x92,
+ 0x4f, 0x41, 0xf1, 0x03, 0xbc, 0x06, 0x69, 0x6e, 0x6f, 0x5e, 0xe2, 0xc1, 0x9d, 0x61, 0xc0, 0xeb,
+ 0x5e, 0x3f, 0xa4, 0x8a, 0x1f, 0xdc, 0xb8, 0x28, 0xd4, 0xfe, 0x10, 0x6a, 0xc9, 0x02, 0xbc, 0xbc,
+ 0xe8, 0xeb, 0xa3, 0x91, 0x65, 0x3e, 0xee, 0x0e, 0xd4, 0x02, 0xb9, 0x03, 0x24, 0x1d, 0x5a, 0x43,
+ 0x6a, 0xe9, 0xdf, 0x1c, 0x75, 0xfb, 0xaa, 0x82, 0x5d, 0x1a, 0xd5, 0xbb, 0xa6, 0x4e, 0x05, 0xb2,
+ 0x48, 0xee, 0xc1, 0xed, 0x2c, 0xe5, 0x1c, 0x5c, 0xc2, 0x14, 0x8c, 0x8f, 0x65, 0x52, 0x01, 0xc5,
+ 0x18, 0xa8, 0x15, 0x9e, 0x16, 0xf4, 0xef, 0x8d, 0x91, 0x39, 0x52, 0xab, 0xed, 0xbf, 0x29, 0x40,
+ 0x19, 0xc3, 0x06, 0x3f, 0x9f, 0x54, 0x72, 0x71, 0x5d, 0x73, 0x5e, 0xb9, 0x1a, 0xd9, 0x92, 0xaa,
+ 0x81, 0x01, 0x65, 0x73, 0x79, 0xf4, 0xf9, 0xb5, 0xd6, 0x53, 0x3f, 0x85, 0x12, 0x8f, 0x52, 0xbc,
+ 0x43, 0x1c, 0xd2, 0x9e, 0x4e, 0xad, 0x47, 0x06, 0x1d, 0xf1, 0x2a, 0x97, 0x40, 0xb3, 0x3b, 0xd8,
+ 0xd3, 0x47, 0xe6, 0x30, 0xa1, 0xa1, 0x56, 0x1e, 0x19, 0x7d, 0x33, 0x45, 0x15, 0xb5, 0x9f, 0xd7,
+ 0x60, 0x35, 0x89, 0x09, 0x22, 0x82, 0x3e, 0x82, 0x46, 0x10, 0xba, 0x33, 0x3b, 0x3c, 0x8b, 0xc6,
+ 0xb6, 0x87, 0x49, 0x01, 0xb6, 0x7f, 0xb4, 0x24, 0xaa, 0x88, 0x1d, 0x1d, 0x0a, 0xec, 0x68, 0x6c,
+ 0x7b, 0x34, 0x3b, 0x91, 0xf4, 0x61, 0x75, 0xc6, 0xc2, 0x63, 0xf6, 0x7b, 0xbe, 0xeb, 0xe1, 0x4a,
+ 0x55, 0x8c, 0xc8, 0xef, 0x5f, 0xba, 0xd2, 0x01, 0x47, 0xff, 0x8e, 0xef, 0x7a, 0xb8, 0x56, 0x7e,
+ 0x32, 0xf9, 0x04, 0xea, 0xa2, 0x12, 0x72, 0xd8, 0x04, 0x63, 0xc5, 0xb2, 0xda, 0x4f, 0xd4, 0xe8,
+ 0x3d, 0x36, 0xc9, 0xc4, 0x65, 0xb8, 0x34, 0x2e, 0x37, 0xb2, 0x71, 0xf9, 0xcd, 0x6c, 0x2c, 0x5a,
+ 0x11, 0x55, 0x78, 0x1a, 0x84, 0x2e, 0x38, 0x7c, 0x6b, 0x89, 0xc3, 0x77, 0x60, 0x23, 0xf1, 0x55,
+ 0xcb, 0xf5, 0x26, 0xee, 0x4b, 0x2b, 0x72, 0x5f, 0x89, 0xd8, 0x53, 0xa6, 0xeb, 0x09, 0xcb, 0xe0,
+ 0x9c, 0x91, 0xfb, 0x8a, 0x11, 0x23, 0xe9, 0xe0, 0x64, 0x0e, 0x5c, 0xc5, 0xab, 0xc9, 0xf7, 0x2e,
+ 0x55, 0x8f, 0x68, 0xbe, 0x64, 0x46, 0xcc, 0x4d, 0x6d, 0xff, 0x52, 0x81, 0x46, 0xe6, 0x1c, 0x78,
+ 0xf6, 0x16, 0xca, 0x42, 0x61, 0xc5, 0x55, 0x94, 0x50, 0x1f, 0x4a, 0xfa, 0x26, 0xd4, 0xa3, 0xd8,
+ 0x0e, 0x63, 0x8b, 0x17, 0x57, 0xb2, 0xdd, 0x45, 0xc2, 0x13, 0x76, 0x46, 0x3e, 0x80, 0x35, 0xc1,
+ 0x74, 0xbd, 0xf1, 0x74, 0x1e, 0xb9, 0xa7, 0xa2, 0x99, 0xaf, 0xd1, 0x26, 0x92, 0x8d, 0x84, 0x4a,
+ 0xee, 0x42, 0x95, 0x67, 0x21, 0xbe, 0x86, 0x68, 0xfa, 0x2a, 0xcc, 0x73, 0xf8, 0x0a, 0x0f, 0x60,
+ 0x95, 0x33, 0xce, 0xe7, 0x57, 0xc4, 0x2d, 0x33, 0xf3, 0x9c, 0xf3, 0xd9, 0x1d, 0xd8, 0x10, 0xaf,
+ 0x09, 0x44, 0xf1, 0x2a, 0x2b, 0xdc, 0x3b, 0xa8, 0xd8, 0x75, 0x64, 0xc9, 0xb2, 0x56, 0x14, 0x9c,
+ 0x1f, 0x01, 0xcf, 0x5e, 0x0b, 0xe8, 0xbb, 0x22, 0x94, 0x31, 0xcf, 0xc9, 0x61, 0x77, 0xe1, 0x1d,
+ 0x8e, 0x9d, 0x7b, 0x76, 0x10, 0x4c, 0x5d, 0xe6, 0x58, 0x53, 0xff, 0x18, 0x43, 0x66, 0x14, 0xdb,
+ 0xb3, 0xc0, 0x9a, 0x47, 0xad, 0x0d, 0x0c, 0x99, 0x6d, 0xe6, 0x39, 0x47, 0x09, 0xa8, 0xef, 0x1f,
+ 0x9b, 0x09, 0xe4, 0x28, 0x6a, 0xff, 0x3e, 0xac, 0xe6, 0xec, 0x71, 0x41, 0xa7, 0x35, 0x74, 0xfe,
+ 0x8c, 0x4e, 0xdf, 0x85, 0x95, 0x20, 0x64, 0xe7, 0xa2, 0xd5, 0x51, 0xb4, 0x86, 0xa0, 0x09, 0xb1,
+ 0xb6, 0x60, 0x05, 0x79, 0x96, 0x20, 0xe6, 0xf3, 0x63, 0x03, 0x59, 0x87, 0xc8, 0x69, 0xbf, 0x80,
+ 0x95, 0xec, 0x69, 0x93, 0x77, 0x33, 0x69, 0xa1, 0x99, 0xcb, 0x93, 0x69, 0x76, 0x48, 0x2a, 0xb2,
+ 0xf5, 0x4b, 0x2a, 0x32, 0x72, 0x9d, 0x8a, 0x4c, 0xfb, 0x2f, 0xd9, 0x9c, 0x65, 0x2a, 0x84, 0x9f,
+ 0x41, 0x2d, 0x90, 0xf5, 0x38, 0x5a, 0x52, 0xfe, 0x12, 0x3e, 0x0f, 0xee, 0x24, 0x95, 0x3b, 0x4d,
+ 0xe7, 0xb4, 0xff, 0x56, 0x81, 0x5a, 0x5a, 0xd0, 0xe7, 0x2c, 0xef, 0xcd, 0x05, 0xcb, 0x3b, 0x90,
+ 0x1a, 0x16, 0x0a, 0x7c, 0x1b, 0xa3, 0xc5, 0x27, 0xaf, 0x7f, 0xd7, 0xc5, 0xb6, 0xe7, 0x34, 0xdb,
+ 0xf6, 0x6c, 0xbe, 0xae, 0xed, 0xf9, 0xe4, 0xa2, 0xc1, 0xbf, 0x95, 0xe9, 0x2d, 0x16, 0xcc, 0xbe,
+ 0xfd, 0x7d, 0xae, 0x0f, 0xca, 0x26, 0x84, 0x77, 0xc4, 0x7e, 0xd2, 0x84, 0x90, 0xb6, 0x3f, 0xf7,
+ 0xaf, 0xd7, 0xfe, 0x6c, 0x43, 0x45, 0xea, 0xfc, 0x0e, 0x54, 0x64, 0x4d, 0x27, 0x1b, 0x04, 0x31,
+ 0x3a, 0x6f, 0x10, 0x0a, 0xb2, 0x4e, 0xd7, 0x7e, 0xae, 0x40, 0x59, 0x0f, 0x43, 0x3f, 0xd4, 0xfe,
+ 0x48, 0x81, 0x3a, 0x3e, 0xed, 0xf9, 0x0e, 0xe3, 0xd9, 0x60, 0xb7, 0xdb, 0xb3, 0xa8, 0xfe, 0xcd,
+ 0x91, 0x8e, 0xd9, 0xa0, 0x0d, 0x77, 0xf6, 0x86, 0x83, 0xbd, 0x23, 0x4a, 0xf5, 0x81, 0x69, 0x99,
+ 0xb4, 0x3b, 0x18, 0xf1, 0xb6, 0x67, 0x38, 0x50, 0x15, 0x9e, 0x29, 0x8c, 0x81, 0xa9, 0xd3, 0x41,
+ 0xb7, 0x6f, 0x89, 0x56, 0xb4, 0x88, 0x77, 0xb3, 0xba, 0xde, 0xb3, 0xf0, 0xd6, 0x51, 0x2d, 0xf1,
+ 0x96, 0xd5, 0x34, 0x0e, 0xf4, 0xe1, 0x91, 0xa9, 0x96, 0xc9, 0x6d, 0x58, 0x3f, 0xd4, 0xe9, 0x81,
+ 0x31, 0x1a, 0x19, 0xc3, 0x81, 0xd5, 0xd3, 0x07, 0x86, 0xde, 0x53, 0x2b, 0x7c, 0x9d, 0x5d, 0x63,
+ 0xdf, 0xec, 0xee, 0xf6, 0x75, 0xb9, 0x4e, 0x95, 0x6c, 0xc2, 0x5b, 0x7b, 0xc3, 0x83, 0x03, 0xc3,
+ 0x34, 0xf5, 0x9e, 0xb5, 0x7b, 0x64, 0x5a, 0x23, 0xd3, 0xe8, 0xf7, 0xad, 0xee, 0xe1, 0x61, 0xff,
+ 0x29, 0x4f, 0x60, 0x35, 0x72, 0x17, 0x36, 0xf6, 0xba, 0x87, 0xdd, 0x5d, 0xa3, 0x6f, 0x98, 0x4f,
+ 0xad, 0x9e, 0x31, 0xe2, 0xf3, 0x7b, 0x6a, 0x9d, 0x27, 0x6c, 0x93, 0x3e, 0xb5, 0xba, 0x7d, 0x14,
+ 0xcd, 0xd4, 0xad, 0xdd, 0xee, 0xde, 0x13, 0x7d, 0xd0, 0x53, 0x81, 0x0b, 0x30, 0xea, 0x3e, 0xd2,
+ 0x2d, 0x2e, 0x92, 0x65, 0x0e, 0x87, 0xd6, 0xb0, 0xdf, 0x53, 0x1b, 0xda, 0xbf, 0x14, 0xa1, 0xb4,
+ 0xe7, 0x47, 0x31, 0xf7, 0x46, 0xe1, 0xac, 0x2f, 0x42, 0x37, 0x66, 0xa2, 0x7f, 0x2b, 0x53, 0xd1,
+ 0x4b, 0x7f, 0x87, 0x24, 0x1e, 0x50, 0x32, 0x10, 0xeb, 0xd9, 0x19, 0xc7, 0x29, 0x88, 0x5b, 0x3b,
+ 0xc7, 0xed, 0x72, 0xb2, 0x88, 0x68, 0x78, 0x85, 0x23, 0xd7, 0x2b, 0x22, 0x4e, 0x06, 0x61, 0xb9,
+ 0xe0, 0xc7, 0x40, 0xb2, 0x20, 0xb9, 0x62, 0x09, 0x91, 0x6a, 0x06, 0x29, 0x96, 0xdc, 0x01, 0x18,
+ 0xfb, 0xb3, 0x99, 0x1b, 0x8f, 0xfd, 0x28, 0x96, 0x5f, 0xc8, 0xda, 0x39, 0x63, 0x8f, 0x62, 0x6e,
+ 0xf1, 0x33, 0x37, 0xe6, 0x8f, 0x34, 0x83, 0x26, 0x3b, 0x70, 0xcf, 0x0e, 0x82, 0xd0, 0x7f, 0xe9,
+ 0xce, 0xec, 0x98, 0x59, 0xdc, 0x73, 0xed, 0x63, 0x66, 0x39, 0x6c, 0x1a, 0xdb, 0xd8, 0x13, 0x95,
+ 0xe9, 0xdd, 0x0c, 0x60, 0x24, 0xf8, 0x3d, 0xce, 0xe6, 0x71, 0xd7, 0x75, 0xac, 0x88, 0xfd, 0x30,
+ 0xe7, 0x1e, 0x60, 0xcd, 0x03, 0xc7, 0xe6, 0x62, 0xd6, 0x45, 0x96, 0x72, 0x9d, 0x91, 0xe4, 0x1c,
+ 0x09, 0x46, 0xfb, 0x15, 0xc0, 0xb9, 0x14, 0x64, 0x1b, 0x6e, 0xf3, 0x3a, 0x9e, 0x45, 0x31, 0x73,
+ 0x2c, 0xb9, 0xdb, 0x60, 0x1e, 0x47, 0x18, 0xe2, 0xcb, 0x74, 0x23, 0x65, 0xca, 0x9b, 0xc2, 0x79,
+ 0x1c, 0x91, 0x9f, 0x40, 0xeb, 0xc2, 0x1c, 0x87, 0x4d, 0x19, 0x7f, 0x6d, 0x15, 0xa7, 0xdd, 0x59,
+ 0x98, 0xd6, 0x13, 0x5c, 0xed, 0x4f, 0x14, 0x80, 0x7d, 0x16, 0x53, 0xc1, 0xcd, 0x34, 0xb6, 0x95,
+ 0xeb, 0x36, 0xb6, 0xef, 0x27, 0x17, 0x08, 0xc5, 0xab, 0x63, 0xc0, 0x42, 0x97, 0xa1, 0xdc, 0xa4,
+ 0xcb, 0xc8, 0x35, 0x11, 0xc5, 0x2b, 0x9a, 0x88, 0x52, 0xae, 0x89, 0xf8, 0x18, 0x9a, 0xf6, 0x74,
+ 0xea, 0xbf, 0xe0, 0x05, 0x0d, 0x0b, 0x43, 0xe6, 0xa0, 0x11, 0x9c, 0xd7, 0xdb, 0xc8, 0xec, 0x49,
+ 0x9e, 0xf6, 0xe7, 0x0a, 0x34, 0x50, 0x15, 0x51, 0xe0, 0x7b, 0x11, 0x23, 0x5f, 0x42, 0x45, 0x5e,
+ 0x44, 0x8b, 0x8b, 0xfc, 0xb7, 0x33, 0xb2, 0x66, 0x70, 0xb2, 0x68, 0xa0, 0x12, 0xcc, 0x33, 0x42,
+ 0xe6, 0x75, 0x97, 0x2b, 0x25, 0x45, 0x91, 0xfb, 0x50, 0x73, 0x3d, 0x4b, 0xb4, 0xd4, 0x95, 0x4c,
+ 0x58, 0xac, 0xba, 0x1e, 0xd6, 0xb2, 0xed, 0x57, 0x50, 0x11, 0x2f, 0x21, 0x9d, 0x54, 0xa6, 0x8b,
+ 0xfa, 0xcb, 0xdc, 0x1c, 0xa7, 0xc2, 0xc8, 0xc3, 0x29, 0xbd, 0x2e, 0x40, 0xb7, 0xa0, 0x7a, 0xca,
+ 0x9b, 0x0f, 0xbc, 0xf4, 0xe3, 0xea, 0x4d, 0x86, 0xda, 0x1f, 0x97, 0x00, 0x0e, 0xe7, 0x4b, 0x0c,
+ 0xa4, 0x71, 0x5d, 0x03, 0xe9, 0xe4, 0xf4, 0xf8, 0x7a, 0x99, 0x7f, 0x75, 0x43, 0x59, 0xd2, 0x69,
+ 0x17, 0x6f, 0xda, 0x69, 0xdf, 0x87, 0x6a, 0x1c, 0xce, 0xb9, 0xa3, 0x08, 0x63, 0x4a, 0x5b, 0x5a,
+ 0x49, 0x25, 0x6f, 0x42, 0x79, 0xe2, 0x87, 0x63, 0x86, 0x8e, 0x95, 0xb2, 0x05, 0xed, 0xc2, 0x65,
+ 0x52, 0xed, 0xb2, 0xcb, 0x24, 0xde, 0xa0, 0x45, 0xf2, 0x1e, 0x0d, 0x0b, 0x99, 0x7c, 0x83, 0x96,
+ 0x5c, 0xb1, 0xd1, 0x14, 0x44, 0xbe, 0x81, 0xa6, 0x3d, 0x8f, 0x7d, 0xcb, 0xe5, 0x15, 0xda, 0xd4,
+ 0x1d, 0x9f, 0x61, 0xd9, 0xdd, 0xcc, 0x7f, 0xaf, 0x4f, 0x0f, 0xaa, 0xd3, 0x9d, 0xc7, 0xbe, 0xe1,
+ 0x1c, 0x22, 0x72, 0xa7, 0x2a, 0x93, 0x12, 0x5d, 0xb1, 0x33, 0x64, 0xed, 0xc7, 0xb0, 0x92, 0x85,
+ 0xf1, 0x04, 0x24, 0x81, 0xea, 0x1b, 0x3c, 0x3b, 0x8d, 0x78, 0x6a, 0x1b, 0x98, 0x46, 0xb7, 0xaf,
+ 0x16, 0xb4, 0x18, 0x1a, 0xb8, 0xbc, 0xf4, 0x8e, 0xeb, 0xba, 0xfd, 0x03, 0x28, 0x61, 0xf8, 0x55,
+ 0x2e, 0x7c, 0x0f, 0xc1, 0x98, 0x8b, 0xcc, 0xbc, 0xf9, 0x15, 0xb3, 0xe6, 0xf7, 0xdf, 0x05, 0x58,
+ 0x31, 0xfd, 0xf9, 0xf8, 0xe4, 0xa2, 0x01, 0xc2, 0xaf, 0x3b, 0x42, 0x2d, 0x31, 0x1f, 0xe5, 0xa6,
+ 0xe6, 0x93, 0x5a, 0x47, 0x71, 0x89, 0x75, 0xdc, 0xf4, 0xcc, 0xb5, 0x2f, 0x60, 0x55, 0x6e, 0x5e,
+ 0x6a, 0x3d, 0xd1, 0x66, 0xe1, 0x0a, 0x6d, 0x6a, 0xbf, 0x50, 0x60, 0x55, 0xc4, 0xf7, 0xff, 0xbb,
+ 0xd2, 0x2a, 0x37, 0x0c, 0xeb, 0xe5, 0x1b, 0x5d, 0x1e, 0xfd, 0xbf, 0xf4, 0x34, 0x6d, 0x08, 0xcd,
+ 0x44, 0x7d, 0x37, 0x50, 0xfb, 0x15, 0x46, 0xfc, 0x8b, 0x02, 0x34, 0x06, 0xec, 0xe5, 0x92, 0x20,
+ 0x5a, 0xbe, 0xee, 0x71, 0x7c, 0x98, 0x2b, 0x57, 0x1b, 0xdb, 0xeb, 0x59, 0x19, 0xc4, 0xd5, 0x63,
+ 0x52, 0xc1, 0xa6, 0xb7, 0xa8, 0xca, 0xf2, 0x5b, 0xd4, 0xd2, 0x62, 0xb7, 0x9e, 0xb9, 0xc5, 0x2b,
+ 0x2e, 0xbb, 0xc5, 0xd3, 0xfe, 0xad, 0x08, 0x0d, 0x6c, 0x90, 0x29, 0x8b, 0xe6, 0xd3, 0x38, 0x27,
+ 0x4c, 0xe1, 0x6a, 0x61, 0x3a, 0x50, 0x09, 0x71, 0x92, 0x74, 0xa5, 0x4b, 0x83, 0xbf, 0x40, 0x61,
+ 0x6b, 0xfc, 0xdc, 0x0d, 0x02, 0xe6, 0x58, 0x82, 0x92, 0x14, 0x30, 0x4d, 0x49, 0x16, 0x22, 0x44,
+ 0xbc, 0xfc, 0x9c, 0xf9, 0x21, 0x4b, 0x51, 0x45, 0xbc, 0x4f, 0x68, 0x70, 0x5a, 0x02, 0xc9, 0xdd,
+ 0x37, 0x88, 0xca, 0xe0, 0xfc, 0xbe, 0x21, 0xed, 0x35, 0x91, 0x5b, 0x47, 0xae, 0xe8, 0x35, 0x91,
+ 0xcd, 0xbb, 0xa8, 0x99, 0x3d, 0x9d, 0x5a, 0x7e, 0x10, 0xa1, 0xd3, 0xd4, 0x68, 0x0d, 0x09, 0xc3,
+ 0x20, 0x22, 0x5f, 0x43, 0x7a, 0x5d, 0x2c, 0x6f, 0xc9, 0xc5, 0x39, 0xb6, 0x2e, 0xbb, 0x58, 0xa0,
+ 0xab, 0xe3, 0xdc, 0xfd, 0xcf, 0x92, 0x1b, 0xea, 0xca, 0x4d, 0x6f, 0xa8, 0x1f, 0x42, 0x59, 0xc4,
+ 0xa8, 0xda, 0xeb, 0x62, 0x94, 0xc0, 0x65, 0xed, 0xb3, 0x91, 0xb7, 0xcf, 0x5f, 0x16, 0x80, 0x74,
+ 0xa7, 0x53, 0x7f, 0x6c, 0xc7, 0xcc, 0x70, 0xa2, 0x8b, 0x66, 0x7a, 0xed, 0xcf, 0x2e, 0x9f, 0x41,
+ 0x7d, 0xe6, 0x3b, 0x6c, 0x6a, 0x25, 0xdf, 0x94, 0x2e, 0xad, 0x7e, 0x10, 0xc6, 0x5b, 0x52, 0x02,
+ 0x25, 0xbc, 0xc4, 0x51, 0xb0, 0xee, 0xc0, 0x67, 0xde, 0x84, 0xcd, 0xec, 0x97, 0xb2, 0x14, 0xe1,
+ 0x8f, 0xa4, 0x03, 0xd5, 0x90, 0x45, 0x2c, 0x3c, 0x65, 0x57, 0x16, 0x55, 0x09, 0x48, 0x7b, 0x06,
+ 0x1b, 0xb9, 0x1d, 0x49, 0x47, 0xbe, 0x85, 0x5f, 0x2b, 0xc3, 0x58, 0x7e, 0xb4, 0x12, 0x03, 0xfe,
+ 0x3a, 0xe6, 0x25, 0x9f, 0x41, 0xf9, 0x63, 0xea, 0xf0, 0xc5, 0xab, 0xe2, 0xec, 0x1e, 0xa8, 0x59,
+ 0x4d, 0xbb, 0x63, 0x0c, 0x36, 0xf2, 0x54, 0x0a, 0xd7, 0x3b, 0x15, 0xed, 0xef, 0x0a, 0xb0, 0xde,
+ 0x75, 0x1c, 0xf1, 0x77, 0xc3, 0x25, 0xaa, 0x2f, 0x5e, 0x57, 0xf5, 0x0b, 0x81, 0x58, 0x84, 0x89,
+ 0x6b, 0x05, 0xe2, 0x0f, 0xa1, 0x92, 0xd6, 0x5a, 0xc5, 0x05, 0x77, 0x16, 0x72, 0x51, 0x09, 0xd0,
+ 0x6e, 0x01, 0xc9, 0x0a, 0x2b, 0xb4, 0xaa, 0xfd, 0x69, 0x11, 0xee, 0xee, 0xb2, 0x63, 0xd7, 0xcb,
+ 0xbe, 0xe2, 0x57, 0xdf, 0xc9, 0xc5, 0x4f, 0x65, 0x9f, 0xc1, 0xba, 0x28, 0xe4, 0x93, 0x7f, 0x62,
+ 0x59, 0xec, 0x58, 0x7e, 0x9d, 0x94, 0xb1, 0x6a, 0x0d, 0xf9, 0x07, 0x92, 0xad, 0xe3, 0x7f, 0xc5,
+ 0x1c, 0x3b, 0xb6, 0x9f, 0xd9, 0x11, 0xb3, 0x5c, 0x47, 0xfe, 0x59, 0x06, 0x12, 0x92, 0xe1, 0x90,
+ 0x21, 0x94, 0xb8, 0x0d, 0xa2, 0xeb, 0x36, 0xb7, 0xb7, 0x33, 0x62, 0x5d, 0xb2, 0x95, 0xac, 0x02,
+ 0x0f, 0x7c, 0x87, 0xed, 0x54, 0x8f, 0x06, 0x4f, 0x06, 0xc3, 0xef, 0x06, 0x14, 0x17, 0x22, 0x06,
+ 0xdc, 0x0a, 0x42, 0x76, 0xea, 0xfa, 0xf3, 0xc8, 0xca, 0x9e, 0x44, 0xf5, 0xca, 0x94, 0xb8, 0x91,
+ 0xcc, 0xc9, 0x10, 0xb5, 0x9f, 0xc2, 0xda, 0xc2, 0xcb, 0x78, 0x6d, 0x26, 0x5f, 0xa7, 0xbe, 0x41,
+ 0x56, 0xa1, 0x8e, 0x1f, 0xbb, 0x97, 0x7f, 0xfb, 0xd6, 0xfe, 0xb5, 0x80, 0x57, 0x4c, 0x33, 0x37,
+ 0xbe, 0x59, 0x06, 0xfb, 0xcd, 0x7c, 0x06, 0x83, 0xed, 0x77, 0xf3, 0xe6, 0x9b, 0x59, 0xb0, 0xf3,
+ 0xad, 0x00, 0xa6, 0x41, 0xa4, 0x6d, 0x43, 0x55, 0xd2, 0xc8, 0x6f, 0xc1, 0x5a, 0xe8, 0xfb, 0x71,
+ 0xd2, 0x89, 0x8a, 0x0e, 0xe4, 0xf2, 0x3f, 0xdb, 0xac, 0x72, 0xb0, 0x48, 0x06, 0x4f, 0xf2, 0xbd,
+ 0x48, 0x59, 0xfc, 0x0d, 0x44, 0x0e, 0x77, 0x1b, 0xbf, 0x5b, 0x4f, 0xff, 0xb7, 0xfb, 0xbf, 0x01,
+ 0x00, 0x00, 0xff, 0xff, 0x35, 0x9f, 0x30, 0x98, 0xf2, 0x2b, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
new file mode 100644
index 000000000..497b4d9a9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
@@ -0,0 +1,551 @@
+syntax = "proto2";
+option go_package = "datastore";
+
+package appengine;
+
+message Action{}
+
+message PropertyValue {
+ optional int64 int64Value = 1;
+ optional bool booleanValue = 2;
+ optional string stringValue = 3;
+ optional double doubleValue = 4;
+
+ optional group PointValue = 5 {
+ required double x = 6;
+ required double y = 7;
+ }
+
+ optional group UserValue = 8 {
+ required string email = 9;
+ required string auth_domain = 10;
+ optional string nickname = 11;
+ optional string federated_identity = 21;
+ optional string federated_provider = 22;
+ }
+
+ optional group ReferenceValue = 12 {
+ required string app = 13;
+ optional string name_space = 20;
+ repeated group PathElement = 14 {
+ required string type = 15;
+ optional int64 id = 16;
+ optional string name = 17;
+ }
+ }
+}
+
+message Property {
+ enum Meaning {
+ NO_MEANING = 0;
+ BLOB = 14;
+ TEXT = 15;
+ BYTESTRING = 16;
+
+ ATOM_CATEGORY = 1;
+ ATOM_LINK = 2;
+ ATOM_TITLE = 3;
+ ATOM_CONTENT = 4;
+ ATOM_SUMMARY = 5;
+ ATOM_AUTHOR = 6;
+
+ GD_WHEN = 7;
+ GD_EMAIL = 8;
+ GEORSS_POINT = 9;
+ GD_IM = 10;
+
+ GD_PHONENUMBER = 11;
+ GD_POSTALADDRESS = 12;
+
+ GD_RATING = 13;
+
+ BLOBKEY = 17;
+ ENTITY_PROTO = 19;
+
+ INDEX_VALUE = 18;
+ };
+
+ optional Meaning meaning = 1 [default = NO_MEANING];
+ optional string meaning_uri = 2;
+
+ required string name = 3;
+
+ required PropertyValue value = 5;
+
+ required bool multiple = 4;
+
+ optional bool searchable = 6 [default=false];
+
+ enum FtsTokenizationOption {
+ HTML = 1;
+ ATOM = 2;
+ }
+
+ optional FtsTokenizationOption fts_tokenization_option = 8;
+
+ optional string locale = 9 [default = "en"];
+}
+
+message Path {
+ repeated group Element = 1 {
+ required string type = 2;
+ optional int64 id = 3;
+ optional string name = 4;
+ }
+}
+
+message Reference {
+ required string app = 13;
+ optional string name_space = 20;
+ required Path path = 14;
+}
+
+message User {
+ required string email = 1;
+ required string auth_domain = 2;
+ optional string nickname = 3;
+ optional string federated_identity = 6;
+ optional string federated_provider = 7;
+}
+
+message EntityProto {
+ required Reference key = 13;
+ required Path entity_group = 16;
+ optional User owner = 17;
+
+ enum Kind {
+ GD_CONTACT = 1;
+ GD_EVENT = 2;
+ GD_MESSAGE = 3;
+ }
+ optional Kind kind = 4;
+ optional string kind_uri = 5;
+
+ repeated Property property = 14;
+ repeated Property raw_property = 15;
+
+ optional int32 rank = 18;
+}
+
+message CompositeProperty {
+ required int64 index_id = 1;
+ repeated string value = 2;
+}
+
+message Index {
+ required string entity_type = 1;
+ required bool ancestor = 5;
+ repeated group Property = 2 {
+ required string name = 3;
+ enum Direction {
+ ASCENDING = 1;
+ DESCENDING = 2;
+ }
+ optional Direction direction = 4 [default = ASCENDING];
+ }
+}
+
+message CompositeIndex {
+ required string app_id = 1;
+ required int64 id = 2;
+ required Index definition = 3;
+
+ enum State {
+ WRITE_ONLY = 1;
+ READ_WRITE = 2;
+ DELETED = 3;
+ ERROR = 4;
+ }
+ required State state = 4;
+
+ optional bool only_use_if_required = 6 [default = false];
+}
+
+message IndexPostfix {
+ message IndexValue {
+ required string property_name = 1;
+ required PropertyValue value = 2;
+ }
+
+ repeated IndexValue index_value = 1;
+
+ optional Reference key = 2;
+
+ optional bool before = 3 [default=true];
+}
+
+message IndexPosition {
+ optional string key = 1;
+
+ optional bool before = 2 [default=true];
+}
+
+message Snapshot {
+ enum Status {
+ INACTIVE = 0;
+ ACTIVE = 1;
+ }
+
+ required int64 ts = 1;
+}
+
+message InternalHeader {
+ optional string qos = 1;
+}
+
+message Transaction {
+ optional InternalHeader header = 4;
+ required fixed64 handle = 1;
+ required string app = 2;
+ optional bool mark_changes = 3 [default = false];
+}
+
+message Query {
+ optional InternalHeader header = 39;
+
+ required string app = 1;
+ optional string name_space = 29;
+
+ optional string kind = 3;
+ optional Reference ancestor = 17;
+
+ repeated group Filter = 4 {
+ enum Operator {
+ LESS_THAN = 1;
+ LESS_THAN_OR_EQUAL = 2;
+ GREATER_THAN = 3;
+ GREATER_THAN_OR_EQUAL = 4;
+ EQUAL = 5;
+ IN = 6;
+ EXISTS = 7;
+ }
+
+ required Operator op = 6;
+ repeated Property property = 14;
+ }
+
+ optional string search_query = 8;
+
+ repeated group Order = 9 {
+ enum Direction {
+ ASCENDING = 1;
+ DESCENDING = 2;
+ }
+
+ required string property = 10;
+ optional Direction direction = 11 [default = ASCENDING];
+ }
+
+ enum Hint {
+ ORDER_FIRST = 1;
+ ANCESTOR_FIRST = 2;
+ FILTER_FIRST = 3;
+ }
+ optional Hint hint = 18;
+
+ optional int32 count = 23;
+
+ optional int32 offset = 12 [default = 0];
+
+ optional int32 limit = 16;
+
+ optional CompiledCursor compiled_cursor = 30;
+ optional CompiledCursor end_compiled_cursor = 31;
+
+ repeated CompositeIndex composite_index = 19;
+
+ optional bool require_perfect_plan = 20 [default = false];
+
+ optional bool keys_only = 21 [default = false];
+
+ optional Transaction transaction = 22;
+
+ optional bool compile = 25 [default = false];
+
+ optional int64 failover_ms = 26;
+
+ optional bool strong = 32;
+
+ repeated string property_name = 33;
+
+ repeated string group_by_property_name = 34;
+
+ optional bool distinct = 24;
+
+ optional int64 min_safe_time_seconds = 35;
+
+ repeated string safe_replica_name = 36;
+
+ optional bool persist_offset = 37 [default=false];
+}
+
+message CompiledQuery {
+ required group PrimaryScan = 1 {
+ optional string index_name = 2;
+
+ optional string start_key = 3;
+ optional bool start_inclusive = 4;
+ optional string end_key = 5;
+ optional bool end_inclusive = 6;
+
+ repeated string start_postfix_value = 22;
+ repeated string end_postfix_value = 23;
+
+ optional int64 end_unapplied_log_timestamp_us = 19;
+ }
+
+ repeated group MergeJoinScan = 7 {
+ required string index_name = 8;
+
+ repeated string prefix_value = 9;
+
+ optional bool value_prefix = 20 [default=false];
+ }
+
+ optional Index index_def = 21;
+
+ optional int32 offset = 10 [default = 0];
+
+ optional int32 limit = 11;
+
+ required bool keys_only = 12;
+
+ repeated string property_name = 24;
+
+ optional int32 distinct_infix_size = 25;
+
+ optional group EntityFilter = 13 {
+ optional bool distinct = 14 [default=false];
+
+ optional string kind = 17;
+ optional Reference ancestor = 18;
+ }
+}
+
+message CompiledCursor {
+ optional group Position = 2 {
+ optional string start_key = 27;
+
+ repeated group IndexValue = 29 {
+ optional string property = 30;
+ required PropertyValue value = 31;
+ }
+
+ optional Reference key = 32;
+
+ optional bool start_inclusive = 28 [default=true];
+ }
+}
+
+message Cursor {
+ required fixed64 cursor = 1;
+
+ optional string app = 2;
+}
+
+message Error {
+ enum ErrorCode {
+ BAD_REQUEST = 1;
+ CONCURRENT_TRANSACTION = 2;
+ INTERNAL_ERROR = 3;
+ NEED_INDEX = 4;
+ TIMEOUT = 5;
+ PERMISSION_DENIED = 6;
+ BIGTABLE_ERROR = 7;
+ COMMITTED_BUT_STILL_APPLYING = 8;
+ CAPABILITY_DISABLED = 9;
+ TRY_ALTERNATE_BACKEND = 10;
+ SAFE_TIME_TOO_OLD = 11;
+ }
+}
+
+message Cost {
+ optional int32 index_writes = 1;
+ optional int32 index_write_bytes = 2;
+ optional int32 entity_writes = 3;
+ optional int32 entity_write_bytes = 4;
+ optional group CommitCost = 5 {
+ optional int32 requested_entity_puts = 6;
+ optional int32 requested_entity_deletes = 7;
+ };
+ optional int32 approximate_storage_delta = 8;
+ optional int32 id_sequence_updates = 9;
+}
+
+message GetRequest {
+ optional InternalHeader header = 6;
+
+ repeated Reference key = 1;
+ optional Transaction transaction = 2;
+
+ optional int64 failover_ms = 3;
+
+ optional bool strong = 4;
+
+ optional bool allow_deferred = 5 [default=false];
+}
+
+message GetResponse {
+ repeated group Entity = 1 {
+ optional EntityProto entity = 2;
+ optional Reference key = 4;
+
+ optional int64 version = 3;
+ }
+
+ repeated Reference deferred = 5;
+
+ optional bool in_order = 6 [default=true];
+}
+
+message PutRequest {
+ optional InternalHeader header = 11;
+
+ repeated EntityProto entity = 1;
+ optional Transaction transaction = 2;
+ repeated CompositeIndex composite_index = 3;
+
+ optional bool trusted = 4 [default = false];
+
+ optional bool force = 7 [default = false];
+
+ optional bool mark_changes = 8 [default = false];
+ repeated Snapshot snapshot = 9;
+
+ enum AutoIdPolicy {
+ CURRENT = 0;
+ SEQUENTIAL = 1;
+ }
+ optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
+}
+
+message PutResponse {
+ repeated Reference key = 1;
+ optional Cost cost = 2;
+ repeated int64 version = 3;
+}
+
+message TouchRequest {
+ optional InternalHeader header = 10;
+
+ repeated Reference key = 1;
+ repeated CompositeIndex composite_index = 2;
+ optional bool force = 3 [default = false];
+ repeated Snapshot snapshot = 9;
+}
+
+message TouchResponse {
+ optional Cost cost = 1;
+}
+
+message DeleteRequest {
+ optional InternalHeader header = 10;
+
+ repeated Reference key = 6;
+ optional Transaction transaction = 5;
+
+ optional bool trusted = 4 [default = false];
+
+ optional bool force = 7 [default = false];
+
+ optional bool mark_changes = 8 [default = false];
+ repeated Snapshot snapshot = 9;
+}
+
+message DeleteResponse {
+ optional Cost cost = 1;
+ repeated int64 version = 3;
+}
+
+message NextRequest {
+ optional InternalHeader header = 5;
+
+ required Cursor cursor = 1;
+ optional int32 count = 2;
+
+ optional int32 offset = 4 [default = 0];
+
+ optional bool compile = 3 [default = false];
+}
+
+message QueryResult {
+ optional Cursor cursor = 1;
+
+ repeated EntityProto result = 2;
+
+ optional int32 skipped_results = 7;
+
+ required bool more_results = 3;
+
+ optional bool keys_only = 4;
+
+ optional bool index_only = 9;
+
+ optional bool small_ops = 10;
+
+ optional CompiledQuery compiled_query = 5;
+
+ optional CompiledCursor compiled_cursor = 6;
+
+ repeated CompositeIndex index = 8;
+
+ repeated int64 version = 11;
+}
+
+message AllocateIdsRequest {
+ optional InternalHeader header = 4;
+
+ optional Reference model_key = 1;
+
+ optional int64 size = 2;
+
+ optional int64 max = 3;
+
+ repeated Reference reserve = 5;
+}
+
+message AllocateIdsResponse {
+ required int64 start = 1;
+ required int64 end = 2;
+ optional Cost cost = 3;
+}
+
+message CompositeIndices {
+ repeated CompositeIndex index = 1;
+}
+
+message AddActionsRequest {
+ optional InternalHeader header = 3;
+
+ required Transaction transaction = 1;
+ repeated Action action = 2;
+}
+
+message AddActionsResponse {
+}
+
+message BeginTransactionRequest {
+ optional InternalHeader header = 3;
+
+ required string app = 1;
+ optional bool allow_multiple_eg = 2 [default = false];
+ optional string database_id = 4;
+
+ enum TransactionMode {
+ UNKNOWN = 0;
+ READ_ONLY = 1;
+ READ_WRITE = 2;
+ }
+ optional TransactionMode mode = 5 [default = UNKNOWN];
+
+ optional Transaction previous_transaction = 7;
+}
+
+message CommitResponse {
+ optional Cost cost = 1;
+
+ repeated group Version = 3 {
+ required Reference root_entity_key = 4;
+ required int64 version = 5;
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go
new file mode 100644
index 000000000..9b4134e42
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity.go
@@ -0,0 +1,55 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "os"
+
+ netcontext "golang.org/x/net/context"
+)
+
+var (
+ // This is set to true in identity_classic.go, which is behind the appengine build tag.
+ // The appengine build tag is set for the first generation runtimes (<= Go 1.9) but not
+ // the second generation runtimes (>= Go 1.11), so this indicates whether we're on a
+ // first-gen runtime. See IsStandard below for the second-gen check.
+ appengineStandard bool
+
+ // This is set to true in identity_flex.go, which is behind the appenginevm build tag.
+ appengineFlex bool
+)
+
+// AppID is the implementation of the wrapper function of the same name in
+// ../identity.go. See that file for commentary.
+func AppID(c netcontext.Context) string {
+ return appID(FullyQualifiedAppID(c))
+}
+
+// IsStandard is the implementation of the wrapper function of the same name in
+// ../appengine.go. See that file for commentary.
+func IsStandard() bool {
+ // appengineStandard will be true for first-gen runtimes (<= Go 1.9) but not
+ // second-gen (>= Go 1.11).
+ return appengineStandard || IsSecondGen()
+}
+
+// IsStandard is the implementation of the wrapper function of the same name in
+// ../appengine.go. See that file for commentary.
+func IsSecondGen() bool {
+ // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime.
+ return os.Getenv("GAE_ENV") == "standard"
+}
+
+// IsFlex is the implementation of the wrapper function of the same name in
+// ../appengine.go. See that file for commentary.
+func IsFlex() bool {
+ return appengineFlex
+}
+
+// IsAppEngine is the implementation of the wrapper function of the same name in
+// ../appengine.go. See that file for commentary.
+func IsAppEngine() bool {
+ return IsStandard() || IsFlex()
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go
new file mode 100644
index 000000000..4e979f45e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_classic.go
@@ -0,0 +1,61 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+ "appengine"
+
+ netcontext "golang.org/x/net/context"
+)
+
+func init() {
+ appengineStandard = true
+}
+
+func DefaultVersionHostname(ctx netcontext.Context) string {
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ return appengine.DefaultVersionHostname(c)
+}
+
+func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
+func ServerSoftware() string { return appengine.ServerSoftware() }
+func InstanceID() string { return appengine.InstanceID() }
+func IsDevAppServer() bool { return appengine.IsDevAppServer() }
+
+func RequestID(ctx netcontext.Context) string {
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ return appengine.RequestID(c)
+}
+
+func ModuleName(ctx netcontext.Context) string {
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ return appengine.ModuleName(c)
+}
+func VersionID(ctx netcontext.Context) string {
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ return appengine.VersionID(c)
+}
+
+func fullyQualifiedAppID(ctx netcontext.Context) string {
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ return c.FullyQualifiedAppID()
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go
new file mode 100644
index 000000000..d5e2e7b5e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_flex.go
@@ -0,0 +1,11 @@
+// Copyright 2018 Google LLC. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appenginevm
+
+package internal
+
+func init() {
+ appengineFlex = true
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go
new file mode 100644
index 000000000..5d8067263
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_vm.go
@@ -0,0 +1,134 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "log"
+ "net/http"
+ "os"
+ "strings"
+
+ netcontext "golang.org/x/net/context"
+)
+
+// These functions are implementations of the wrapper functions
+// in ../appengine/identity.go. See that file for commentary.
+
+const (
+ hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
+ hRequestLogId = "X-AppEngine-Request-Log-Id"
+ hDatacenter = "X-AppEngine-Datacenter"
+)
+
+func ctxHeaders(ctx netcontext.Context) http.Header {
+ c := fromContext(ctx)
+ if c == nil {
+ return nil
+ }
+ return c.Request().Header
+}
+
+func DefaultVersionHostname(ctx netcontext.Context) string {
+ return ctxHeaders(ctx).Get(hDefaultVersionHostname)
+}
+
+func RequestID(ctx netcontext.Context) string {
+ return ctxHeaders(ctx).Get(hRequestLogId)
+}
+
+func Datacenter(ctx netcontext.Context) string {
+ if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" {
+ return dc
+ }
+ // If the header isn't set, read zone from the metadata service.
+ // It has the format projects/[NUMERIC_PROJECT_ID]/zones/[ZONE]
+ zone, err := getMetadata("instance/zone")
+ if err != nil {
+ log.Printf("Datacenter: %v", err)
+ return ""
+ }
+ parts := strings.Split(string(zone), "/")
+ if len(parts) == 0 {
+ return ""
+ }
+ return parts[len(parts)-1]
+}
+
+func ServerSoftware() string {
+ // TODO(dsymonds): Remove fallback when we've verified this.
+ if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
+ return s
+ }
+ if s := os.Getenv("GAE_ENV"); s != "" {
+ return s
+ }
+ return "Google App Engine/1.x.x"
+}
+
+// TODO(dsymonds): Remove the metadata fetches.
+
+func ModuleName(_ netcontext.Context) string {
+ if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
+ return s
+ }
+ if s := os.Getenv("GAE_SERVICE"); s != "" {
+ return s
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_name"))
+}
+
+func VersionID(_ netcontext.Context) string {
+ if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
+ return s1 + "." + s2
+ }
+ if s1, s2 := os.Getenv("GAE_VERSION"), os.Getenv("GAE_DEPLOYMENT_ID"); s1 != "" && s2 != "" {
+ return s1 + "." + s2
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version"))
+}
+
+func InstanceID() string {
+ if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
+ return s
+ }
+ if s := os.Getenv("GAE_INSTANCE"); s != "" {
+ return s
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
+}
+
+func partitionlessAppID() string {
+ // gae_project has everything except the partition prefix.
+ if appID := os.Getenv("GAE_LONG_APP_ID"); appID != "" {
+ return appID
+ }
+ if project := os.Getenv("GOOGLE_CLOUD_PROJECT"); project != "" {
+ return project
+ }
+ return string(mustGetMetadata("instance/attributes/gae_project"))
+}
+
+func fullyQualifiedAppID(_ netcontext.Context) string {
+ if s := os.Getenv("GAE_APPLICATION"); s != "" {
+ return s
+ }
+ appID := partitionlessAppID()
+
+ part := os.Getenv("GAE_PARTITION")
+ if part == "" {
+ part = string(mustGetMetadata("instance/attributes/gae_partition"))
+ }
+
+ if part != "" {
+ appID = part + "~" + appID
+ }
+ return appID
+}
+
+func IsDevAppServer() bool {
+ return os.Getenv("RUN_WITH_DEVAPPSERVER") != ""
+}
diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go
new file mode 100644
index 000000000..051ea3980
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/internal.go
@@ -0,0 +1,110 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package internal provides support for package appengine.
+//
+// Programs should not use this package directly. Its API is not stable.
+// Use packages appengine and appengine/* instead.
+package internal
+
+import (
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+// errorCodeMaps is a map of service name to the error code map for the service.
+var errorCodeMaps = make(map[string]map[int32]string)
+
+// RegisterErrorCodeMap is called from API implementations to register their
+// error code map. This should only be called from init functions.
+func RegisterErrorCodeMap(service string, m map[int32]string) {
+ errorCodeMaps[service] = m
+}
+
+type timeoutCodeKey struct {
+ service string
+ code int32
+}
+
+// timeoutCodes is the set of service+code pairs that represent timeouts.
+var timeoutCodes = make(map[timeoutCodeKey]bool)
+
+func RegisterTimeoutErrorCode(service string, code int32) {
+ timeoutCodes[timeoutCodeKey{service, code}] = true
+}
+
+// APIError is the type returned by appengine.Context's Call method
+// when an API call fails in an API-specific way. This may be, for instance,
+// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
+type APIError struct {
+ Service string
+ Detail string
+ Code int32 // API-specific error code
+}
+
+func (e *APIError) Error() string {
+ if e.Code == 0 {
+ if e.Detail == "" {
+ return "APIError <empty>"
+ }
+ return e.Detail
+ }
+ s := fmt.Sprintf("API error %d", e.Code)
+ if m, ok := errorCodeMaps[e.Service]; ok {
+ s += " (" + e.Service + ": " + m[e.Code] + ")"
+ } else {
+ // Shouldn't happen, but provide a bit more detail if it does.
+ s = e.Service + " " + s
+ }
+ if e.Detail != "" {
+ s += ": " + e.Detail
+ }
+ return s
+}
+
+func (e *APIError) IsTimeout() bool {
+ return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
+}
+
+// CallError is the type returned by appengine.Context's Call method when an
+// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
+type CallError struct {
+ Detail string
+ Code int32
+ // TODO: Remove this if we get a distinguishable error code.
+ Timeout bool
+}
+
+func (e *CallError) Error() string {
+ var msg string
+ switch remotepb.RpcError_ErrorCode(e.Code) {
+ case remotepb.RpcError_UNKNOWN:
+ return e.Detail
+ case remotepb.RpcError_OVER_QUOTA:
+ msg = "Over quota"
+ case remotepb.RpcError_CAPABILITY_DISABLED:
+ msg = "Capability disabled"
+ case remotepb.RpcError_CANCELLED:
+ msg = "Canceled"
+ default:
+ msg = fmt.Sprintf("Call error %d", e.Code)
+ }
+ s := msg + ": " + e.Detail
+ if e.Timeout {
+ s += " (timeout)"
+ }
+ return s
+}
+
+func (e *CallError) IsTimeout() bool {
+ return e.Timeout
+}
+
+// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
+// The function should be prepared to be called on the same message more than once; it should only modify the
+// RPC request the first time.
+var NamespaceMods = make(map[string]func(m proto.Message, namespace string))
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
new file mode 100644
index 000000000..8545ac4ad
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
@@ -0,0 +1,1313 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google.golang.org/appengine/internal/log/log_service.proto
+
+package log
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type LogServiceError_ErrorCode int32
+
+const (
+ LogServiceError_OK LogServiceError_ErrorCode = 0
+ LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1
+ LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2
+)
+
+var LogServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_REQUEST",
+ 2: "STORAGE_ERROR",
+}
+var LogServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_REQUEST": 1,
+ "STORAGE_ERROR": 2,
+}
+
+func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode {
+ p := new(LogServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x LogServiceError_ErrorCode) String() string {
+ return proto.EnumName(LogServiceError_ErrorCode_name, int32(x))
+}
+func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = LogServiceError_ErrorCode(value)
+ return nil
+}
+func (LogServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{0, 0}
+}
+
+type LogServiceError struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LogServiceError) Reset() { *m = LogServiceError{} }
+func (m *LogServiceError) String() string { return proto.CompactTextString(m) }
+func (*LogServiceError) ProtoMessage() {}
+func (*LogServiceError) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{0}
+}
+func (m *LogServiceError) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogServiceError.Unmarshal(m, b)
+}
+func (m *LogServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogServiceError.Marshal(b, m, deterministic)
+}
+func (dst *LogServiceError) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogServiceError.Merge(dst, src)
+}
+func (m *LogServiceError) XXX_Size() int {
+ return xxx_messageInfo_LogServiceError.Size(m)
+}
+func (m *LogServiceError) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogServiceError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogServiceError proto.InternalMessageInfo
+
+type UserAppLogLine struct {
+ TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec,json=timestampUsec" json:"timestamp_usec,omitempty"`
+ Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
+ Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} }
+func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) }
+func (*UserAppLogLine) ProtoMessage() {}
+func (*UserAppLogLine) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{1}
+}
+func (m *UserAppLogLine) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UserAppLogLine.Unmarshal(m, b)
+}
+func (m *UserAppLogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UserAppLogLine.Marshal(b, m, deterministic)
+}
+func (dst *UserAppLogLine) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UserAppLogLine.Merge(dst, src)
+}
+func (m *UserAppLogLine) XXX_Size() int {
+ return xxx_messageInfo_UserAppLogLine.Size(m)
+}
+func (m *UserAppLogLine) XXX_DiscardUnknown() {
+ xxx_messageInfo_UserAppLogLine.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UserAppLogLine proto.InternalMessageInfo
+
+func (m *UserAppLogLine) GetTimestampUsec() int64 {
+ if m != nil && m.TimestampUsec != nil {
+ return *m.TimestampUsec
+ }
+ return 0
+}
+
+func (m *UserAppLogLine) GetLevel() int64 {
+ if m != nil && m.Level != nil {
+ return *m.Level
+ }
+ return 0
+}
+
+func (m *UserAppLogLine) GetMessage() string {
+ if m != nil && m.Message != nil {
+ return *m.Message
+ }
+ return ""
+}
+
+type UserAppLogGroup struct {
+ LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line,json=logLine" json:"log_line,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} }
+func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) }
+func (*UserAppLogGroup) ProtoMessage() {}
+func (*UserAppLogGroup) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{2}
+}
+func (m *UserAppLogGroup) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UserAppLogGroup.Unmarshal(m, b)
+}
+func (m *UserAppLogGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UserAppLogGroup.Marshal(b, m, deterministic)
+}
+func (dst *UserAppLogGroup) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UserAppLogGroup.Merge(dst, src)
+}
+func (m *UserAppLogGroup) XXX_Size() int {
+ return xxx_messageInfo_UserAppLogGroup.Size(m)
+}
+func (m *UserAppLogGroup) XXX_DiscardUnknown() {
+ xxx_messageInfo_UserAppLogGroup.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UserAppLogGroup proto.InternalMessageInfo
+
+func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine {
+ if m != nil {
+ return m.LogLine
+ }
+ return nil
+}
+
+type FlushRequest struct {
+ Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *FlushRequest) Reset() { *m = FlushRequest{} }
+func (m *FlushRequest) String() string { return proto.CompactTextString(m) }
+func (*FlushRequest) ProtoMessage() {}
+func (*FlushRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{3}
+}
+func (m *FlushRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_FlushRequest.Unmarshal(m, b)
+}
+func (m *FlushRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_FlushRequest.Marshal(b, m, deterministic)
+}
+func (dst *FlushRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FlushRequest.Merge(dst, src)
+}
+func (m *FlushRequest) XXX_Size() int {
+ return xxx_messageInfo_FlushRequest.Size(m)
+}
+func (m *FlushRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_FlushRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlushRequest proto.InternalMessageInfo
+
+func (m *FlushRequest) GetLogs() []byte {
+ if m != nil {
+ return m.Logs
+ }
+ return nil
+}
+
+type SetStatusRequest struct {
+ Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} }
+func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) }
+func (*SetStatusRequest) ProtoMessage() {}
+func (*SetStatusRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{4}
+}
+func (m *SetStatusRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SetStatusRequest.Unmarshal(m, b)
+}
+func (m *SetStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SetStatusRequest.Marshal(b, m, deterministic)
+}
+func (dst *SetStatusRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SetStatusRequest.Merge(dst, src)
+}
+func (m *SetStatusRequest) XXX_Size() int {
+ return xxx_messageInfo_SetStatusRequest.Size(m)
+}
+func (m *SetStatusRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SetStatusRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetStatusRequest proto.InternalMessageInfo
+
+func (m *SetStatusRequest) GetStatus() string {
+ if m != nil && m.Status != nil {
+ return *m.Status
+ }
+ return ""
+}
+
+type LogOffset struct {
+ RequestId []byte `protobuf:"bytes,1,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LogOffset) Reset() { *m = LogOffset{} }
+func (m *LogOffset) String() string { return proto.CompactTextString(m) }
+func (*LogOffset) ProtoMessage() {}
+func (*LogOffset) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{5}
+}
+func (m *LogOffset) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogOffset.Unmarshal(m, b)
+}
+func (m *LogOffset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogOffset.Marshal(b, m, deterministic)
+}
+func (dst *LogOffset) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogOffset.Merge(dst, src)
+}
+func (m *LogOffset) XXX_Size() int {
+ return xxx_messageInfo_LogOffset.Size(m)
+}
+func (m *LogOffset) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogOffset.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogOffset proto.InternalMessageInfo
+
+func (m *LogOffset) GetRequestId() []byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+type LogLine struct {
+ Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"`
+ Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
+ LogMessage *string `protobuf:"bytes,3,req,name=log_message,json=logMessage" json:"log_message,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LogLine) Reset() { *m = LogLine{} }
+func (m *LogLine) String() string { return proto.CompactTextString(m) }
+func (*LogLine) ProtoMessage() {}
+func (*LogLine) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{6}
+}
+func (m *LogLine) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogLine.Unmarshal(m, b)
+}
+func (m *LogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogLine.Marshal(b, m, deterministic)
+}
+func (dst *LogLine) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogLine.Merge(dst, src)
+}
+func (m *LogLine) XXX_Size() int {
+ return xxx_messageInfo_LogLine.Size(m)
+}
+func (m *LogLine) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogLine.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogLine proto.InternalMessageInfo
+
+func (m *LogLine) GetTime() int64 {
+ if m != nil && m.Time != nil {
+ return *m.Time
+ }
+ return 0
+}
+
+func (m *LogLine) GetLevel() int32 {
+ if m != nil && m.Level != nil {
+ return *m.Level
+ }
+ return 0
+}
+
+func (m *LogLine) GetLogMessage() string {
+ if m != nil && m.LogMessage != nil {
+ return *m.LogMessage
+ }
+ return ""
+}
+
+type RequestLog struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
+ ModuleId *string `protobuf:"bytes,37,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"`
+ VersionId *string `protobuf:"bytes,2,req,name=version_id,json=versionId" json:"version_id,omitempty"`
+ RequestId []byte `protobuf:"bytes,3,req,name=request_id,json=requestId" json:"request_id,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"`
+ Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"`
+ Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"`
+ StartTime *int64 `protobuf:"varint,6,req,name=start_time,json=startTime" json:"start_time,omitempty"`
+ EndTime *int64 `protobuf:"varint,7,req,name=end_time,json=endTime" json:"end_time,omitempty"`
+ Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"`
+ Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"`
+ Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"`
+ Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"`
+ HttpVersion *string `protobuf:"bytes,12,req,name=http_version,json=httpVersion" json:"http_version,omitempty"`
+ Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"`
+ ResponseSize *int64 `protobuf:"varint,14,req,name=response_size,json=responseSize" json:"response_size,omitempty"`
+ Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"`
+ UserAgent *string `protobuf:"bytes,16,opt,name=user_agent,json=userAgent" json:"user_agent,omitempty"`
+ UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry,json=urlMapEntry" json:"url_map_entry,omitempty"`
+ Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"`
+ ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles,json=apiMcycles" json:"api_mcycles,omitempty"`
+ Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"`
+ Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"`
+ TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name,json=taskQueueName" json:"task_queue_name,omitempty"`
+ TaskName *string `protobuf:"bytes,23,opt,name=task_name,json=taskName" json:"task_name,omitempty"`
+ WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request,json=wasLoadingRequest" json:"was_loading_request,omitempty"`
+ PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time,json=pendingTime" json:"pending_time,omitempty"`
+ ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,json=replicaIndex,def=-1" json:"replica_index,omitempty"`
+ Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"`
+ CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key,json=cloneKey" json:"clone_key,omitempty"`
+ Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"`
+ LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete,json=linesIncomplete" json:"lines_incomplete,omitempty"`
+ AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release,json=appEngineRelease" json:"app_engine_release,omitempty"`
+ ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason,json=exitReason" json:"exit_reason,omitempty"`
+ WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time,json=wasThrottledForTime" json:"was_throttled_for_time,omitempty"`
+ WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests,json=wasThrottledForRequests" json:"was_throttled_for_requests,omitempty"`
+ ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"`
+ ServerName []byte `protobuf:"bytes,34,opt,name=server_name,json=serverName" json:"server_name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *RequestLog) Reset() { *m = RequestLog{} }
+func (m *RequestLog) String() string { return proto.CompactTextString(m) }
+func (*RequestLog) ProtoMessage() {}
+func (*RequestLog) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{7}
+}
+func (m *RequestLog) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_RequestLog.Unmarshal(m, b)
+}
+func (m *RequestLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_RequestLog.Marshal(b, m, deterministic)
+}
+func (dst *RequestLog) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RequestLog.Merge(dst, src)
+}
+func (m *RequestLog) XXX_Size() int {
+ return xxx_messageInfo_RequestLog.Size(m)
+}
+func (m *RequestLog) XXX_DiscardUnknown() {
+ xxx_messageInfo_RequestLog.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RequestLog proto.InternalMessageInfo
+
+const Default_RequestLog_ModuleId string = "default"
+const Default_RequestLog_ReplicaIndex int32 = -1
+const Default_RequestLog_Finished bool = true
+
+func (m *RequestLog) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *RequestLog) GetModuleId() string {
+ if m != nil && m.ModuleId != nil {
+ return *m.ModuleId
+ }
+ return Default_RequestLog_ModuleId
+}
+
+func (m *RequestLog) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+func (m *RequestLog) GetRequestId() []byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+func (m *RequestLog) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *RequestLog) GetIp() string {
+ if m != nil && m.Ip != nil {
+ return *m.Ip
+ }
+ return ""
+}
+
+func (m *RequestLog) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *RequestLog) GetStartTime() int64 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetEndTime() int64 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetLatency() int64 {
+ if m != nil && m.Latency != nil {
+ return *m.Latency
+ }
+ return 0
+}
+
+func (m *RequestLog) GetMcycles() int64 {
+ if m != nil && m.Mcycles != nil {
+ return *m.Mcycles
+ }
+ return 0
+}
+
+func (m *RequestLog) GetMethod() string {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return ""
+}
+
+func (m *RequestLog) GetResource() string {
+ if m != nil && m.Resource != nil {
+ return *m.Resource
+ }
+ return ""
+}
+
+func (m *RequestLog) GetHttpVersion() string {
+ if m != nil && m.HttpVersion != nil {
+ return *m.HttpVersion
+ }
+ return ""
+}
+
+func (m *RequestLog) GetStatus() int32 {
+ if m != nil && m.Status != nil {
+ return *m.Status
+ }
+ return 0
+}
+
+func (m *RequestLog) GetResponseSize() int64 {
+ if m != nil && m.ResponseSize != nil {
+ return *m.ResponseSize
+ }
+ return 0
+}
+
+func (m *RequestLog) GetReferrer() string {
+ if m != nil && m.Referrer != nil {
+ return *m.Referrer
+ }
+ return ""
+}
+
+func (m *RequestLog) GetUserAgent() string {
+ if m != nil && m.UserAgent != nil {
+ return *m.UserAgent
+ }
+ return ""
+}
+
+func (m *RequestLog) GetUrlMapEntry() string {
+ if m != nil && m.UrlMapEntry != nil {
+ return *m.UrlMapEntry
+ }
+ return ""
+}
+
+func (m *RequestLog) GetCombined() string {
+ if m != nil && m.Combined != nil {
+ return *m.Combined
+ }
+ return ""
+}
+
+func (m *RequestLog) GetApiMcycles() int64 {
+ if m != nil && m.ApiMcycles != nil {
+ return *m.ApiMcycles
+ }
+ return 0
+}
+
+func (m *RequestLog) GetHost() string {
+ if m != nil && m.Host != nil {
+ return *m.Host
+ }
+ return ""
+}
+
+func (m *RequestLog) GetCost() float64 {
+ if m != nil && m.Cost != nil {
+ return *m.Cost
+ }
+ return 0
+}
+
+func (m *RequestLog) GetTaskQueueName() string {
+ if m != nil && m.TaskQueueName != nil {
+ return *m.TaskQueueName
+ }
+ return ""
+}
+
+func (m *RequestLog) GetTaskName() string {
+ if m != nil && m.TaskName != nil {
+ return *m.TaskName
+ }
+ return ""
+}
+
+func (m *RequestLog) GetWasLoadingRequest() bool {
+ if m != nil && m.WasLoadingRequest != nil {
+ return *m.WasLoadingRequest
+ }
+ return false
+}
+
+func (m *RequestLog) GetPendingTime() int64 {
+ if m != nil && m.PendingTime != nil {
+ return *m.PendingTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetReplicaIndex() int32 {
+ if m != nil && m.ReplicaIndex != nil {
+ return *m.ReplicaIndex
+ }
+ return Default_RequestLog_ReplicaIndex
+}
+
+func (m *RequestLog) GetFinished() bool {
+ if m != nil && m.Finished != nil {
+ return *m.Finished
+ }
+ return Default_RequestLog_Finished
+}
+
+func (m *RequestLog) GetCloneKey() []byte {
+ if m != nil {
+ return m.CloneKey
+ }
+ return nil
+}
+
+func (m *RequestLog) GetLine() []*LogLine {
+ if m != nil {
+ return m.Line
+ }
+ return nil
+}
+
+func (m *RequestLog) GetLinesIncomplete() bool {
+ if m != nil && m.LinesIncomplete != nil {
+ return *m.LinesIncomplete
+ }
+ return false
+}
+
+func (m *RequestLog) GetAppEngineRelease() []byte {
+ if m != nil {
+ return m.AppEngineRelease
+ }
+ return nil
+}
+
+func (m *RequestLog) GetExitReason() int32 {
+ if m != nil && m.ExitReason != nil {
+ return *m.ExitReason
+ }
+ return 0
+}
+
+func (m *RequestLog) GetWasThrottledForTime() bool {
+ if m != nil && m.WasThrottledForTime != nil {
+ return *m.WasThrottledForTime
+ }
+ return false
+}
+
+func (m *RequestLog) GetWasThrottledForRequests() bool {
+ if m != nil && m.WasThrottledForRequests != nil {
+ return *m.WasThrottledForRequests
+ }
+ return false
+}
+
+func (m *RequestLog) GetThrottledTime() int64 {
+ if m != nil && m.ThrottledTime != nil {
+ return *m.ThrottledTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetServerName() []byte {
+ if m != nil {
+ return m.ServerName
+ }
+ return nil
+}
+
+type LogModuleVersion struct {
+ ModuleId *string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"`
+ VersionId *string `protobuf:"bytes,2,opt,name=version_id,json=versionId" json:"version_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} }
+func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) }
+func (*LogModuleVersion) ProtoMessage() {}
+func (*LogModuleVersion) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{8}
+}
+func (m *LogModuleVersion) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogModuleVersion.Unmarshal(m, b)
+}
+func (m *LogModuleVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogModuleVersion.Marshal(b, m, deterministic)
+}
+func (dst *LogModuleVersion) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogModuleVersion.Merge(dst, src)
+}
+func (m *LogModuleVersion) XXX_Size() int {
+ return xxx_messageInfo_LogModuleVersion.Size(m)
+}
+func (m *LogModuleVersion) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogModuleVersion.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogModuleVersion proto.InternalMessageInfo
+
+const Default_LogModuleVersion_ModuleId string = "default"
+
+func (m *LogModuleVersion) GetModuleId() string {
+ if m != nil && m.ModuleId != nil {
+ return *m.ModuleId
+ }
+ return Default_LogModuleVersion_ModuleId
+}
+
+func (m *LogModuleVersion) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+type LogReadRequest struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
+ VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"`
+ ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version,json=moduleVersion" json:"module_version,omitempty"`
+ StartTime *int64 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
+ EndTime *int64 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"`
+ RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id,json=requestId" json:"request_id,omitempty"`
+ MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level,json=minimumLogLevel" json:"minimum_log_level,omitempty"`
+ IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete,json=includeIncomplete" json:"include_incomplete,omitempty"`
+ Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"`
+ CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex,json=combinedLogRegex" json:"combined_log_regex,omitempty"`
+ HostRegex *string `protobuf:"bytes,15,opt,name=host_regex,json=hostRegex" json:"host_regex,omitempty"`
+ ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index,json=replicaIndex" json:"replica_index,omitempty"`
+ IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs,json=includeAppLogs" json:"include_app_logs,omitempty"`
+ AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request,json=appLogsPerRequest" json:"app_logs_per_request,omitempty"`
+ IncludeHost *bool `protobuf:"varint,11,opt,name=include_host,json=includeHost" json:"include_host,omitempty"`
+ IncludeAll *bool `protobuf:"varint,12,opt,name=include_all,json=includeAll" json:"include_all,omitempty"`
+ CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator,json=cacheIterator" json:"cache_iterator,omitempty"`
+ NumShards *int32 `protobuf:"varint,18,opt,name=num_shards,json=numShards" json:"num_shards,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LogReadRequest) Reset() { *m = LogReadRequest{} }
+func (m *LogReadRequest) String() string { return proto.CompactTextString(m) }
+func (*LogReadRequest) ProtoMessage() {}
+func (*LogReadRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{9}
+}
+func (m *LogReadRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogReadRequest.Unmarshal(m, b)
+}
+func (m *LogReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogReadRequest.Marshal(b, m, deterministic)
+}
+func (dst *LogReadRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogReadRequest.Merge(dst, src)
+}
+func (m *LogReadRequest) XXX_Size() int {
+ return xxx_messageInfo_LogReadRequest.Size(m)
+}
+func (m *LogReadRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogReadRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogReadRequest proto.InternalMessageInfo
+
+func (m *LogReadRequest) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetVersionId() []string {
+ if m != nil {
+ return m.VersionId
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion {
+ if m != nil {
+ return m.ModuleVersion
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetStartTime() int64 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetEndTime() int64 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetRequestId() [][]byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetMinimumLogLevel() int32 {
+ if m != nil && m.MinimumLogLevel != nil {
+ return *m.MinimumLogLevel
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeIncomplete() bool {
+ if m != nil && m.IncludeIncomplete != nil {
+ return *m.IncludeIncomplete
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetCount() int64 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetCombinedLogRegex() string {
+ if m != nil && m.CombinedLogRegex != nil {
+ return *m.CombinedLogRegex
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetHostRegex() string {
+ if m != nil && m.HostRegex != nil {
+ return *m.HostRegex
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetReplicaIndex() int32 {
+ if m != nil && m.ReplicaIndex != nil {
+ return *m.ReplicaIndex
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeAppLogs() bool {
+ if m != nil && m.IncludeAppLogs != nil {
+ return *m.IncludeAppLogs
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetAppLogsPerRequest() int32 {
+ if m != nil && m.AppLogsPerRequest != nil {
+ return *m.AppLogsPerRequest
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeHost() bool {
+ if m != nil && m.IncludeHost != nil {
+ return *m.IncludeHost
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetIncludeAll() bool {
+ if m != nil && m.IncludeAll != nil {
+ return *m.IncludeAll
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetCacheIterator() bool {
+ if m != nil && m.CacheIterator != nil {
+ return *m.CacheIterator
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetNumShards() int32 {
+ if m != nil && m.NumShards != nil {
+ return *m.NumShards
+ }
+ return 0
+}
+
+type LogReadResponse struct {
+ Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"`
+ LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time,json=lastEndTime" json:"last_end_time,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LogReadResponse) Reset() { *m = LogReadResponse{} }
+func (m *LogReadResponse) String() string { return proto.CompactTextString(m) }
+func (*LogReadResponse) ProtoMessage() {}
+func (*LogReadResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{10}
+}
+func (m *LogReadResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogReadResponse.Unmarshal(m, b)
+}
+func (m *LogReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogReadResponse.Marshal(b, m, deterministic)
+}
+func (dst *LogReadResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogReadResponse.Merge(dst, src)
+}
+func (m *LogReadResponse) XXX_Size() int {
+ return xxx_messageInfo_LogReadResponse.Size(m)
+}
+func (m *LogReadResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogReadResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogReadResponse proto.InternalMessageInfo
+
+func (m *LogReadResponse) GetLog() []*RequestLog {
+ if m != nil {
+ return m.Log
+ }
+ return nil
+}
+
+func (m *LogReadResponse) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *LogReadResponse) GetLastEndTime() int64 {
+ if m != nil && m.LastEndTime != nil {
+ return *m.LastEndTime
+ }
+ return 0
+}
+
+type LogUsageRecord struct {
+ VersionId *string `protobuf:"bytes,1,opt,name=version_id,json=versionId" json:"version_id,omitempty"`
+ StartTime *int32 `protobuf:"varint,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
+ EndTime *int32 `protobuf:"varint,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
+ Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
+ TotalSize *int64 `protobuf:"varint,5,opt,name=total_size,json=totalSize" json:"total_size,omitempty"`
+ Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} }
+func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) }
+func (*LogUsageRecord) ProtoMessage() {}
+func (*LogUsageRecord) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{11}
+}
+func (m *LogUsageRecord) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogUsageRecord.Unmarshal(m, b)
+}
+func (m *LogUsageRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogUsageRecord.Marshal(b, m, deterministic)
+}
+func (dst *LogUsageRecord) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogUsageRecord.Merge(dst, src)
+}
+func (m *LogUsageRecord) XXX_Size() int {
+ return xxx_messageInfo_LogUsageRecord.Size(m)
+}
+func (m *LogUsageRecord) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogUsageRecord.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogUsageRecord proto.InternalMessageInfo
+
+func (m *LogUsageRecord) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+func (m *LogUsageRecord) GetStartTime() int32 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetEndTime() int32 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetCount() int64 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetTotalSize() int64 {
+ if m != nil && m.TotalSize != nil {
+ return *m.TotalSize
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetRecords() int32 {
+ if m != nil && m.Records != nil {
+ return *m.Records
+ }
+ return 0
+}
+
+type LogUsageRequest struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
+ VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"`
+ StartTime *int32 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
+ EndTime *int32 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
+ ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,json=resolutionHours,def=1" json:"resolution_hours,omitempty"`
+ CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions,json=combineVersions" json:"combine_versions,omitempty"`
+ UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version,json=usageVersion" json:"usage_version,omitempty"`
+ VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only,json=versionsOnly" json:"versions_only,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} }
+func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) }
+func (*LogUsageRequest) ProtoMessage() {}
+func (*LogUsageRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{12}
+}
+func (m *LogUsageRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogUsageRequest.Unmarshal(m, b)
+}
+func (m *LogUsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogUsageRequest.Marshal(b, m, deterministic)
+}
+func (dst *LogUsageRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogUsageRequest.Merge(dst, src)
+}
+func (m *LogUsageRequest) XXX_Size() int {
+ return xxx_messageInfo_LogUsageRequest.Size(m)
+}
+func (m *LogUsageRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogUsageRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogUsageRequest proto.InternalMessageInfo
+
+const Default_LogUsageRequest_ResolutionHours uint32 = 1
+
+func (m *LogUsageRequest) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *LogUsageRequest) GetVersionId() []string {
+ if m != nil {
+ return m.VersionId
+ }
+ return nil
+}
+
+func (m *LogUsageRequest) GetStartTime() int32 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetEndTime() int32 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetResolutionHours() uint32 {
+ if m != nil && m.ResolutionHours != nil {
+ return *m.ResolutionHours
+ }
+ return Default_LogUsageRequest_ResolutionHours
+}
+
+func (m *LogUsageRequest) GetCombineVersions() bool {
+ if m != nil && m.CombineVersions != nil {
+ return *m.CombineVersions
+ }
+ return false
+}
+
+func (m *LogUsageRequest) GetUsageVersion() int32 {
+ if m != nil && m.UsageVersion != nil {
+ return *m.UsageVersion
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetVersionsOnly() bool {
+ if m != nil && m.VersionsOnly != nil {
+ return *m.VersionsOnly
+ }
+ return false
+}
+
+type LogUsageResponse struct {
+ Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"`
+ Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} }
+func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) }
+func (*LogUsageResponse) ProtoMessage() {}
+func (*LogUsageResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{13}
+}
+func (m *LogUsageResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogUsageResponse.Unmarshal(m, b)
+}
+func (m *LogUsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogUsageResponse.Marshal(b, m, deterministic)
+}
+func (dst *LogUsageResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogUsageResponse.Merge(dst, src)
+}
+func (m *LogUsageResponse) XXX_Size() int {
+ return xxx_messageInfo_LogUsageResponse.Size(m)
+}
+func (m *LogUsageResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogUsageResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogUsageResponse proto.InternalMessageInfo
+
+func (m *LogUsageResponse) GetUsage() []*LogUsageRecord {
+ if m != nil {
+ return m.Usage
+ }
+ return nil
+}
+
+func (m *LogUsageResponse) GetSummary() *LogUsageRecord {
+ if m != nil {
+ return m.Summary
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*LogServiceError)(nil), "appengine.LogServiceError")
+ proto.RegisterType((*UserAppLogLine)(nil), "appengine.UserAppLogLine")
+ proto.RegisterType((*UserAppLogGroup)(nil), "appengine.UserAppLogGroup")
+ proto.RegisterType((*FlushRequest)(nil), "appengine.FlushRequest")
+ proto.RegisterType((*SetStatusRequest)(nil), "appengine.SetStatusRequest")
+ proto.RegisterType((*LogOffset)(nil), "appengine.LogOffset")
+ proto.RegisterType((*LogLine)(nil), "appengine.LogLine")
+ proto.RegisterType((*RequestLog)(nil), "appengine.RequestLog")
+ proto.RegisterType((*LogModuleVersion)(nil), "appengine.LogModuleVersion")
+ proto.RegisterType((*LogReadRequest)(nil), "appengine.LogReadRequest")
+ proto.RegisterType((*LogReadResponse)(nil), "appengine.LogReadResponse")
+ proto.RegisterType((*LogUsageRecord)(nil), "appengine.LogUsageRecord")
+ proto.RegisterType((*LogUsageRequest)(nil), "appengine.LogUsageRequest")
+ proto.RegisterType((*LogUsageResponse)(nil), "appengine.LogUsageResponse")
+}
+
+func init() {
+ proto.RegisterFile("google.golang.org/appengine/internal/log/log_service.proto", fileDescriptor_log_service_f054fd4b5012319d)
+}
+
+var fileDescriptor_log_service_f054fd4b5012319d = []byte{
+ // 1553 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x72, 0xdb, 0xc6,
+ 0x15, 0x2e, 0x48, 0x51, 0x24, 0x0f, 0x49, 0x91, 0x5a, 0xcb, 0xce, 0xda, 0xae, 0x6b, 0x1a, 0x4e,
+ 0x1c, 0xd6, 0x93, 0x48, 0x93, 0xa4, 0x57, 0xca, 0x95, 0xd3, 0x2a, 0x8e, 0x26, 0xb4, 0xd5, 0x40,
+ 0x72, 0x3a, 0xd3, 0x1b, 0x0c, 0x0a, 0x1c, 0x81, 0x18, 0x2f, 0xb1, 0xc8, 0xee, 0xc2, 0x91, 0x72,
+ 0xdb, 0xdb, 0x3e, 0x46, 0x1f, 0xa2, 0xaf, 0xd2, 0xb7, 0xe9, 0xec, 0xd9, 0x05, 0x44, 0x2a, 0x4d,
+ 0xc6, 0x33, 0xb9, 0xe0, 0x10, 0xfb, 0x9d, 0x83, 0xdd, 0xf3, 0xf3, 0x9d, 0x6f, 0x01, 0xc7, 0xb9,
+ 0x94, 0xb9, 0xc0, 0xc3, 0x5c, 0x8a, 0xa4, 0xcc, 0x0f, 0xa5, 0xca, 0x8f, 0x92, 0xaa, 0xc2, 0x32,
+ 0x2f, 0x4a, 0x3c, 0x2a, 0x4a, 0x83, 0xaa, 0x4c, 0xc4, 0x91, 0x90, 0xb9, 0xfd, 0xc5, 0x1a, 0xd5,
+ 0xbb, 0x22, 0xc5, 0xc3, 0x4a, 0x49, 0x23, 0xd9, 0xb0, 0xf5, 0x0c, 0x5f, 0xc3, 0x74, 0x29, 0xf3,
+ 0x73, 0x67, 0x3e, 0x51, 0x4a, 0xaa, 0xf0, 0x4b, 0x18, 0xd2, 0xc3, 0x9f, 0x65, 0x86, 0x6c, 0x17,
+ 0x3a, 0x67, 0xdf, 0xce, 0x7e, 0xc7, 0xee, 0xc0, 0xf4, 0xf4, 0xf5, 0xf7, 0x2f, 0x96, 0xa7, 0x7f,
+ 0x89, 0xa3, 0x93, 0xef, 0xde, 0x9c, 0x9c, 0x5f, 0xcc, 0x02, 0xb6, 0x0f, 0x93, 0xf3, 0x8b, 0xb3,
+ 0xe8, 0xc5, 0xcb, 0x93, 0xf8, 0x24, 0x8a, 0xce, 0xa2, 0x59, 0x27, 0xcc, 0x61, 0xef, 0x8d, 0x46,
+ 0xf5, 0xa2, 0xaa, 0x96, 0x32, 0x5f, 0x16, 0x25, 0xb2, 0x8f, 0x60, 0xcf, 0x14, 0x6b, 0xd4, 0x26,
+ 0x59, 0x57, 0x71, 0xad, 0x31, 0xe5, 0xc1, 0xbc, 0xb3, 0xe8, 0x46, 0x93, 0x16, 0x7d, 0xa3, 0x31,
+ 0x65, 0x07, 0xd0, 0x13, 0xf8, 0x0e, 0x05, 0xef, 0x90, 0xd5, 0x2d, 0x18, 0x87, 0xfe, 0x1a, 0xb5,
+ 0x4e, 0x72, 0xe4, 0xdd, 0x79, 0x67, 0x31, 0x8c, 0x9a, 0x65, 0xf8, 0x12, 0xa6, 0x37, 0x07, 0xbd,
+ 0x54, 0xb2, 0xae, 0xd8, 0x9f, 0x60, 0x60, 0x73, 0x15, 0x45, 0x89, 0xbc, 0x33, 0xef, 0x2e, 0x46,
+ 0x9f, 0xdf, 0x3f, 0x6c, 0x33, 0x3d, 0xdc, 0x0e, 0x2b, 0xea, 0x0b, 0xf7, 0x10, 0x86, 0x30, 0xfe,
+ 0x5a, 0xd4, 0x7a, 0x15, 0xe1, 0x0f, 0x35, 0x6a, 0xc3, 0x18, 0xec, 0x08, 0x99, 0x6b, 0x1e, 0xcc,
+ 0x83, 0xc5, 0x38, 0xa2, 0xe7, 0xf0, 0x39, 0xcc, 0xce, 0xd1, 0x9c, 0x9b, 0xc4, 0xd4, 0xba, 0xf1,
+ 0xbb, 0x07, 0xbb, 0x9a, 0x00, 0xca, 0x67, 0x18, 0xf9, 0x55, 0xf8, 0x1c, 0x86, 0x4b, 0x99, 0x9f,
+ 0x5d, 0x5e, 0x6a, 0x34, 0xec, 0x11, 0x80, 0x72, 0xfe, 0x71, 0x91, 0xf9, 0x2d, 0x87, 0x1e, 0x39,
+ 0xcd, 0xc2, 0x0b, 0xe8, 0x37, 0x65, 0x62, 0xb0, 0x63, 0x0b, 0xe2, 0x8b, 0x43, 0xcf, 0xdb, 0x35,
+ 0xe9, 0x35, 0x35, 0x79, 0x0c, 0x23, 0x9b, 0xe6, 0x76, 0x5d, 0x40, 0xc8, 0xfc, 0x95, 0x2f, 0xcd,
+ 0x3f, 0x01, 0xc0, 0x47, 0xb9, 0x94, 0x39, 0xbb, 0x0b, 0xbb, 0x49, 0x55, 0xb9, 0xf3, 0xad, 0x6b,
+ 0x2f, 0xa9, 0xaa, 0xd3, 0x8c, 0x7d, 0x08, 0xc3, 0xb5, 0xcc, 0x6a, 0x81, 0xd6, 0xf2, 0xd1, 0x3c,
+ 0x58, 0x0c, 0x8f, 0xfb, 0x19, 0x5e, 0x26, 0xb5, 0x30, 0xd1, 0xc0, 0x59, 0x4e, 0x33, 0x9b, 0xc0,
+ 0x3b, 0x54, 0xba, 0x90, 0xa5, 0x75, 0xeb, 0xd0, 0x06, 0x43, 0x8f, 0x38, 0xf3, 0x46, 0x7e, 0x36,
+ 0x94, 0xcd, 0xfc, 0xd8, 0x27, 0xb0, 0x2b, 0xa9, 0x10, 0xfc, 0xe9, 0x3c, 0x58, 0x8c, 0x3e, 0x3f,
+ 0xd8, 0xe8, 0x47, 0x5b, 0xa4, 0xc8, 0xfb, 0xb0, 0x3d, 0xe8, 0x14, 0x15, 0xdf, 0xa1, 0x33, 0x3a,
+ 0x45, 0xc5, 0x1e, 0xc0, 0xa0, 0x2c, 0xd2, 0xb7, 0x65, 0xb2, 0x46, 0xde, 0xb3, 0x01, 0x46, 0xed,
+ 0xda, 0x1e, 0xac, 0x4d, 0xa2, 0x4c, 0x4c, 0x45, 0xdb, 0xa5, 0xa2, 0x0d, 0x09, 0xb9, 0xb0, 0x95,
+ 0xbb, 0x0f, 0x03, 0x2c, 0x33, 0x67, 0xec, 0x93, 0xb1, 0x8f, 0x65, 0x46, 0x26, 0x0e, 0x7d, 0x91,
+ 0x18, 0x2c, 0xd3, 0x6b, 0x3e, 0x70, 0x16, 0xbf, 0x24, 0xb2, 0xa5, 0xd7, 0xa9, 0x40, 0xcd, 0x87,
+ 0xce, 0xe2, 0x97, 0xb6, 0xd7, 0x6b, 0x34, 0x2b, 0x99, 0x71, 0x70, 0xbd, 0x76, 0x2b, 0x1b, 0xa1,
+ 0x42, 0x2d, 0x6b, 0x95, 0x22, 0x1f, 0x91, 0xa5, 0x5d, 0xb3, 0x27, 0x30, 0x5e, 0x19, 0x53, 0xc5,
+ 0xbe, 0x58, 0x7c, 0x4c, 0xf6, 0x91, 0xc5, 0xbe, 0x77, 0xd0, 0x06, 0x85, 0x26, 0xd4, 0x60, 0xbf,
+ 0x62, 0x4f, 0x61, 0xa2, 0x50, 0x57, 0xb2, 0xd4, 0x18, 0xeb, 0xe2, 0x27, 0xe4, 0x7b, 0x14, 0xce,
+ 0xb8, 0x01, 0xcf, 0x8b, 0x9f, 0xd0, 0x9d, 0x7d, 0x89, 0x4a, 0xa1, 0xe2, 0x53, 0x57, 0x9d, 0x66,
+ 0x6d, 0xab, 0x53, 0x6b, 0x54, 0x71, 0x92, 0x63, 0x69, 0xf8, 0x8c, 0xac, 0x43, 0x8b, 0xbc, 0xb0,
+ 0x00, 0x0b, 0x61, 0x52, 0x2b, 0x11, 0xaf, 0x93, 0x2a, 0xc6, 0xd2, 0xa8, 0x6b, 0xbe, 0xef, 0x62,
+ 0xab, 0x95, 0x78, 0x95, 0x54, 0x27, 0x16, 0xb2, 0xdb, 0xa7, 0x72, 0xfd, 0x8f, 0xa2, 0xc4, 0x8c,
+ 0x33, 0x97, 0x5a, 0xb3, 0xb6, 0x0c, 0x4c, 0xaa, 0x22, 0x6e, 0x8a, 0x75, 0x67, 0x1e, 0x2c, 0xba,
+ 0x11, 0x24, 0x55, 0xf1, 0xca, 0xd7, 0x8b, 0xc1, 0xce, 0x4a, 0x6a, 0xc3, 0x0f, 0xe8, 0x64, 0x7a,
+ 0xb6, 0x58, 0x6a, 0xb1, 0xbb, 0xf3, 0x60, 0x11, 0x44, 0xf4, 0xcc, 0x9e, 0xc1, 0xd4, 0x24, 0xfa,
+ 0x6d, 0xfc, 0x43, 0x8d, 0x35, 0xc6, 0xd4, 0xe8, 0x7b, 0xf4, 0xca, 0xc4, 0xc2, 0xdf, 0x59, 0xf4,
+ 0xb5, 0xed, 0xf6, 0x43, 0x18, 0x92, 0x1f, 0x79, 0x7c, 0xe0, 0x92, 0xb5, 0x00, 0x19, 0x0f, 0xe1,
+ 0xce, 0x8f, 0x89, 0x8e, 0x85, 0x4c, 0xb2, 0xa2, 0xcc, 0x63, 0xcf, 0x3e, 0xce, 0xe7, 0xc1, 0x62,
+ 0x10, 0xed, 0xff, 0x98, 0xe8, 0xa5, 0xb3, 0x34, 0x83, 0xfb, 0x04, 0xc6, 0x15, 0x96, 0xe4, 0x4b,
+ 0xfc, 0xb8, 0x4f, 0xe1, 0x8f, 0x3c, 0x46, 0x1c, 0xf9, 0xd8, 0x36, 0xa0, 0x12, 0x45, 0x9a, 0xc4,
+ 0x45, 0x99, 0xe1, 0x15, 0x7f, 0x30, 0x0f, 0x16, 0xbd, 0xe3, 0xce, 0xa7, 0x9f, 0xd9, 0x26, 0x90,
+ 0xe1, 0xd4, 0xe2, 0x6c, 0x0e, 0x83, 0xcb, 0xa2, 0x2c, 0xf4, 0x0a, 0x33, 0xfe, 0xd0, 0x1e, 0x78,
+ 0xbc, 0x63, 0x54, 0x8d, 0x51, 0x8b, 0xda, 0xd0, 0x53, 0x21, 0x4b, 0x8c, 0xdf, 0xe2, 0x35, 0xff,
+ 0x3d, 0x09, 0xc0, 0x80, 0x80, 0x6f, 0xf1, 0x9a, 0x3d, 0x83, 0x1d, 0x52, 0xab, 0x47, 0xa4, 0x56,
+ 0x6c, 0x7b, 0x3a, 0x48, 0xa6, 0xc8, 0xce, 0xfe, 0x08, 0x33, 0xfb, 0xaf, 0xe3, 0xa2, 0x4c, 0xe5,
+ 0xba, 0x12, 0x68, 0x90, 0x7f, 0x48, 0xf9, 0x4d, 0x09, 0x3f, 0x6d, 0x61, 0xf6, 0x09, 0x30, 0x3b,
+ 0xed, 0x6e, 0x9b, 0x58, 0xa1, 0xc0, 0x44, 0x23, 0x7f, 0x46, 0x07, 0xcf, 0x92, 0xaa, 0x3a, 0x21,
+ 0x43, 0xe4, 0x70, 0xdb, 0x49, 0xbc, 0x2a, 0x4c, 0xac, 0x30, 0xd1, 0xb2, 0xe4, 0x7f, 0xb0, 0x69,
+ 0x46, 0x60, 0xa1, 0x88, 0x10, 0xf6, 0x05, 0xdc, 0xb3, 0xc5, 0x35, 0x2b, 0x25, 0x8d, 0x11, 0x98,
+ 0xc5, 0x97, 0x52, 0xb9, 0xb2, 0x3d, 0xa6, 0xf3, 0x6d, 0xe9, 0x2f, 0x1a, 0xe3, 0xd7, 0x52, 0x51,
+ 0xf9, 0xbe, 0x84, 0x07, 0x3f, 0x7f, 0xc9, 0xf7, 0x45, 0xf3, 0x39, 0xbd, 0xf8, 0xc1, 0xad, 0x17,
+ 0x7d, 0x77, 0x34, 0xdd, 0x17, 0xed, 0x8b, 0x74, 0xd2, 0x13, 0x6a, 0xd0, 0xa4, 0x45, 0xe9, 0x8c,
+ 0xc7, 0x30, 0xb2, 0x97, 0x1a, 0x2a, 0x47, 0x8a, 0x90, 0x12, 0x04, 0x07, 0x59, 0x5a, 0x84, 0x7f,
+ 0x83, 0xd9, 0x52, 0xe6, 0xaf, 0x48, 0xc8, 0x9a, 0x81, 0xdb, 0xd2, 0xbc, 0xe0, 0x7d, 0x35, 0x2f,
+ 0xd8, 0xd2, 0xbc, 0xf0, 0xbf, 0x3d, 0xd8, 0x5b, 0xca, 0x3c, 0xc2, 0x24, 0x6b, 0x28, 0xf5, 0x0b,
+ 0x12, 0x7b, 0x7b, 0xa3, 0xee, 0xb6, 0x78, 0x7e, 0x05, 0x7b, 0x3e, 0x9a, 0x46, 0x23, 0xee, 0x10,
+ 0x0f, 0x1e, 0x6e, 0xf3, 0x60, 0x2b, 0x85, 0x68, 0xb2, 0xde, 0xca, 0x68, 0x5b, 0x07, 0xbb, 0x54,
+ 0xa9, 0x5f, 0xd0, 0xc1, 0x1d, 0x32, 0xb6, 0x3a, 0x78, 0xa3, 0xcd, 0xbd, 0xf7, 0xd0, 0xe6, 0x6d,
+ 0xa1, 0xdf, 0x9d, 0x77, 0xb7, 0x85, 0xfe, 0x39, 0xec, 0xaf, 0x8b, 0xb2, 0x58, 0xd7, 0xeb, 0x98,
+ 0xae, 0x60, 0xba, 0xb5, 0xfa, 0xc4, 0xa6, 0xa9, 0x37, 0x58, 0x46, 0xd3, 0xfd, 0xf5, 0x29, 0xb0,
+ 0xa2, 0x4c, 0x45, 0x9d, 0xe1, 0x26, 0x9d, 0x07, 0x6e, 0x5c, 0xbd, 0x65, 0x83, 0xd0, 0x07, 0xd0,
+ 0x4b, 0x65, 0x5d, 0x1a, 0x3e, 0xa4, 0xf8, 0xdd, 0xc2, 0xd2, 0xbc, 0x91, 0x23, 0x3a, 0x51, 0x61,
+ 0x8e, 0x57, 0x7c, 0x8f, 0x7a, 0x35, 0x6b, 0x2c, 0xd4, 0xa5, 0x1c, 0xaf, 0x6c, 0xf4, 0x56, 0x83,
+ 0xbc, 0x97, 0x53, 0xcb, 0xa1, 0x45, 0x9c, 0xf9, 0xe9, 0xed, 0x71, 0x9f, 0x51, 0xe4, 0xdb, 0xa3,
+ 0xbe, 0x80, 0x59, 0x13, 0xb6, 0xed, 0x35, 0x7d, 0x23, 0x00, 0x05, 0xbd, 0xe7, 0x71, 0xf7, 0x75,
+ 0xa1, 0xd9, 0x11, 0x1c, 0x34, 0x1e, 0x71, 0x85, 0x2d, 0xf3, 0xf9, 0x3e, 0xed, 0xba, 0x9f, 0x38,
+ 0xb7, 0xbf, 0xa2, 0xda, 0x50, 0xa4, 0x66, 0x6b, 0x92, 0xcd, 0x11, 0x6d, 0x3b, 0xf2, 0xd8, 0x37,
+ 0x56, 0x29, 0x1f, 0xc3, 0xa8, 0x3d, 0x5d, 0x08, 0x3e, 0x26, 0x0f, 0x68, 0x0e, 0x16, 0xc2, 0x8e,
+ 0x4d, 0x9a, 0xa4, 0x2b, 0x8c, 0x0b, 0x83, 0x2a, 0x31, 0x52, 0xf1, 0x09, 0xf9, 0x4c, 0x08, 0x3d,
+ 0xf5, 0xa0, 0xad, 0x44, 0x59, 0xaf, 0x63, 0xbd, 0x4a, 0x54, 0xa6, 0x39, 0xa3, 0x88, 0x86, 0x65,
+ 0xbd, 0x3e, 0x27, 0x20, 0xfc, 0x57, 0x40, 0xdf, 0x83, 0x8e, 0xdb, 0xee, 0xb2, 0x61, 0x1f, 0x43,
+ 0x57, 0xc8, 0x9c, 0x07, 0xc4, 0xcd, 0xbb, 0x1b, 0x2c, 0xb9, 0xf9, 0xc6, 0x88, 0xac, 0xc7, 0x06,
+ 0xa3, 0x3a, 0xef, 0xc1, 0xa8, 0x10, 0x26, 0x22, 0xd1, 0x26, 0x6e, 0xf9, 0xe9, 0xc8, 0x3b, 0xb2,
+ 0xe0, 0x89, 0xe3, 0x68, 0xf8, 0x9f, 0x80, 0x46, 0xed, 0x8d, 0xfd, 0xac, 0x89, 0x30, 0x95, 0xea,
+ 0xf6, 0x4c, 0x05, 0xb7, 0x86, 0xf3, 0xd6, 0x3c, 0x74, 0x5c, 0x7e, 0xff, 0x7f, 0x1e, 0xba, 0x64,
+ 0x6c, 0xe7, 0xa1, 0xe5, 0xd9, 0xce, 0x26, 0xcf, 0x1e, 0x01, 0x18, 0x69, 0x12, 0xe1, 0xee, 0xe1,
+ 0x9e, 0x9b, 0x2f, 0x42, 0xe8, 0x12, 0xe6, 0xd0, 0x57, 0x14, 0x97, 0xe6, 0xbb, 0x6e, 0x3b, 0xbf,
+ 0x0c, 0xff, 0xdd, 0xa1, 0x4a, 0xfa, 0xd0, 0x7f, 0x8b, 0x4c, 0xfc, 0x7c, 0xc4, 0x7b, 0xbf, 0x36,
+ 0xe2, 0xbd, 0xcd, 0x11, 0x9f, 0xd9, 0xcf, 0x11, 0x51, 0x1b, 0xbb, 0xf7, 0x4a, 0xd6, 0x4a, 0x53,
+ 0x0a, 0x93, 0xe3, 0xe0, 0xb3, 0x68, 0x7a, 0x63, 0xfa, 0xc6, 0x5a, 0xec, 0x25, 0xe3, 0x07, 0xa7,
+ 0xd1, 0x23, 0x97, 0xd4, 0x20, 0x9a, 0x7a, 0xdc, 0x8b, 0x0e, 0x7d, 0xa0, 0xd4, 0x36, 0xb1, 0x56,
+ 0xb8, 0xdc, 0xa8, 0x8f, 0x09, 0x6c, 0xa4, 0xe9, 0x29, 0x4c, 0x9a, 0x7d, 0x62, 0x59, 0x8a, 0x6b,
+ 0x3f, 0xe2, 0xe3, 0x06, 0x3c, 0x2b, 0xc5, 0x75, 0x78, 0x45, 0x2a, 0xed, 0xab, 0xe4, 0x09, 0x77,
+ 0x04, 0x3d, 0xda, 0xc8, 0x53, 0xee, 0xfe, 0x36, 0x8d, 0x36, 0xc8, 0x10, 0x39, 0x3f, 0xf6, 0x05,
+ 0xf4, 0x75, 0xbd, 0x5e, 0x27, 0xea, 0xda, 0x33, 0xef, 0x57, 0x5e, 0x69, 0x3c, 0xbf, 0xea, 0xfd,
+ 0xdd, 0x92, 0xf6, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x70, 0xd9, 0xa0, 0xf8, 0x48, 0x0d, 0x00,
+ 0x00,
+}
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto
new file mode 100644
index 000000000..8981dc475
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/log/log_service.proto
@@ -0,0 +1,150 @@
+syntax = "proto2";
+option go_package = "log";
+
+package appengine;
+
+message LogServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_REQUEST = 1;
+ STORAGE_ERROR = 2;
+ }
+}
+
+message UserAppLogLine {
+ required int64 timestamp_usec = 1;
+ required int64 level = 2;
+ required string message = 3;
+}
+
+message UserAppLogGroup {
+ repeated UserAppLogLine log_line = 2;
+}
+
+message FlushRequest {
+ optional bytes logs = 1;
+}
+
+message SetStatusRequest {
+ required string status = 1;
+}
+
+
+message LogOffset {
+ optional bytes request_id = 1;
+}
+
+message LogLine {
+ required int64 time = 1;
+ required int32 level = 2;
+ required string log_message = 3;
+}
+
+message RequestLog {
+ required string app_id = 1;
+ optional string module_id = 37 [default="default"];
+ required string version_id = 2;
+ required bytes request_id = 3;
+ optional LogOffset offset = 35;
+ required string ip = 4;
+ optional string nickname = 5;
+ required int64 start_time = 6;
+ required int64 end_time = 7;
+ required int64 latency = 8;
+ required int64 mcycles = 9;
+ required string method = 10;
+ required string resource = 11;
+ required string http_version = 12;
+ required int32 status = 13;
+ required int64 response_size = 14;
+ optional string referrer = 15;
+ optional string user_agent = 16;
+ required string url_map_entry = 17;
+ required string combined = 18;
+ optional int64 api_mcycles = 19;
+ optional string host = 20;
+ optional double cost = 21;
+
+ optional string task_queue_name = 22;
+ optional string task_name = 23;
+
+ optional bool was_loading_request = 24;
+ optional int64 pending_time = 25;
+ optional int32 replica_index = 26 [default = -1];
+ optional bool finished = 27 [default = true];
+ optional bytes clone_key = 28;
+
+ repeated LogLine line = 29;
+
+ optional bool lines_incomplete = 36;
+ optional bytes app_engine_release = 38;
+
+ optional int32 exit_reason = 30;
+ optional bool was_throttled_for_time = 31;
+ optional bool was_throttled_for_requests = 32;
+ optional int64 throttled_time = 33;
+
+ optional bytes server_name = 34;
+}
+
+message LogModuleVersion {
+ optional string module_id = 1 [default="default"];
+ optional string version_id = 2;
+}
+
+message LogReadRequest {
+ required string app_id = 1;
+ repeated string version_id = 2;
+ repeated LogModuleVersion module_version = 19;
+
+ optional int64 start_time = 3;
+ optional int64 end_time = 4;
+ optional LogOffset offset = 5;
+ repeated bytes request_id = 6;
+
+ optional int32 minimum_log_level = 7;
+ optional bool include_incomplete = 8;
+ optional int64 count = 9;
+
+ optional string combined_log_regex = 14;
+ optional string host_regex = 15;
+ optional int32 replica_index = 16;
+
+ optional bool include_app_logs = 10;
+ optional int32 app_logs_per_request = 17;
+ optional bool include_host = 11;
+ optional bool include_all = 12;
+ optional bool cache_iterator = 13;
+ optional int32 num_shards = 18;
+}
+
+message LogReadResponse {
+ repeated RequestLog log = 1;
+ optional LogOffset offset = 2;
+ optional int64 last_end_time = 3;
+}
+
+message LogUsageRecord {
+ optional string version_id = 1;
+ optional int32 start_time = 2;
+ optional int32 end_time = 3;
+ optional int64 count = 4;
+ optional int64 total_size = 5;
+ optional int32 records = 6;
+}
+
+message LogUsageRequest {
+ required string app_id = 1;
+ repeated string version_id = 2;
+ optional int32 start_time = 3;
+ optional int32 end_time = 4;
+ optional uint32 resolution_hours = 5 [default = 1];
+ optional bool combine_versions = 6;
+ optional int32 usage_version = 7;
+ optional bool versions_only = 8;
+}
+
+message LogUsageResponse {
+ repeated LogUsageRecord usage = 1;
+ optional LogUsageRecord summary = 2;
+}
diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go
new file mode 100644
index 000000000..1e765312f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main.go
@@ -0,0 +1,16 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+ "appengine_internal"
+)
+
+func Main() {
+ MainPath = ""
+ appengine_internal.Main()
+}
diff --git a/vendor/google.golang.org/appengine/internal/main_common.go b/vendor/google.golang.org/appengine/internal/main_common.go
new file mode 100644
index 000000000..357dce4dd
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main_common.go
@@ -0,0 +1,7 @@
+package internal
+
+// MainPath stores the file path of the main package. On App Engine Standard
+// using Go version 1.9 and below, this will be unset. On App Engine Flex and
+// App Engine Standard second-gen (Go 1.11 and above), this will be the
+// filepath to package main.
+var MainPath string
diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go
new file mode 100644
index 000000000..ddb79a333
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main_vm.go
@@ -0,0 +1,69 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "io"
+ "log"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "runtime"
+)
+
+func Main() {
+ MainPath = filepath.Dir(findMainPath())
+ installHealthChecker(http.DefaultServeMux)
+
+ port := "8080"
+ if s := os.Getenv("PORT"); s != "" {
+ port = s
+ }
+
+ host := ""
+ if IsDevAppServer() {
+ host = "127.0.0.1"
+ }
+ if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil {
+ log.Fatalf("http.ListenAndServe: %v", err)
+ }
+}
+
+// Find the path to package main by looking at the root Caller.
+func findMainPath() string {
+ pc := make([]uintptr, 100)
+ n := runtime.Callers(2, pc)
+ frames := runtime.CallersFrames(pc[:n])
+ for {
+ frame, more := frames.Next()
+ // Tests won't have package main, instead they have testing.tRunner
+ if frame.Function == "main.main" || frame.Function == "testing.tRunner" {
+ return frame.File
+ }
+ if !more {
+ break
+ }
+ }
+ return ""
+}
+
+func installHealthChecker(mux *http.ServeMux) {
+ // If no health check handler has been installed by this point, add a trivial one.
+ const healthPath = "/_ah/health"
+ hreq := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Path: healthPath,
+ },
+ }
+ if _, pat := mux.Handler(hreq); pat != healthPath {
+ mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, "ok")
+ })
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go
new file mode 100644
index 000000000..c4ba63bb4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/metadata.go
@@ -0,0 +1,60 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file has code for accessing metadata.
+//
+// References:
+// https://cloud.google.com/compute/docs/metadata
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+)
+
+const (
+ metadataHost = "metadata"
+ metadataPath = "/computeMetadata/v1/"
+)
+
+var (
+ metadataRequestHeaders = http.Header{
+ "Metadata-Flavor": []string{"Google"},
+ }
+)
+
+// TODO(dsymonds): Do we need to support default values, like Python?
+func mustGetMetadata(key string) []byte {
+ b, err := getMetadata(key)
+ if err != nil {
+ panic(fmt.Sprintf("Metadata fetch failed for '%s': %v", key, err))
+ }
+ return b
+}
+
+func getMetadata(key string) ([]byte, error) {
+ // TODO(dsymonds): May need to use url.Parse to support keys with query args.
+ req := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Scheme: "http",
+ Host: metadataHost,
+ Path: metadataPath + key,
+ },
+ Header: metadataRequestHeaders,
+ Host: metadataHost,
+ }
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
+ }
+ return ioutil.ReadAll(resp.Body)
+}
diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go
new file mode 100644
index 000000000..3b94cf0c6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/net.go
@@ -0,0 +1,56 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file implements a network dialer that limits the number of concurrent connections.
+// It is only used for API calls.
+
+import (
+ "log"
+ "net"
+ "runtime"
+ "sync"
+ "time"
+)
+
+var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
+
+func limitRelease() {
+ // non-blocking
+ select {
+ case <-limitSem:
+ default:
+ // This should not normally happen.
+ log.Print("appengine: unbalanced limitSem release!")
+ }
+}
+
+func limitDial(network, addr string) (net.Conn, error) {
+ limitSem <- 1
+
+ // Dial with a timeout in case the API host is MIA.
+ // The connection should normally be very fast.
+ conn, err := net.DialTimeout(network, addr, 500*time.Millisecond)
+ if err != nil {
+ limitRelease()
+ return nil, err
+ }
+ lc := &limitConn{Conn: conn}
+ runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
+ return lc, nil
+}
+
+type limitConn struct {
+ close sync.Once
+ net.Conn
+}
+
+func (lc *limitConn) Close() error {
+ defer lc.close.Do(func() {
+ limitRelease()
+ runtime.SetFinalizer(lc, nil)
+ })
+ return lc.Conn.Close()
+}
diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh
new file mode 100644
index 000000000..2fdb546a6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/regen.sh
@@ -0,0 +1,40 @@
+#!/bin/bash -e
+#
+# This script rebuilds the generated code for the protocol buffers.
+# To run this you will need protoc and goprotobuf installed;
+# see https://github.com/golang/protobuf for instructions.
+
+PKG=google.golang.org/appengine
+
+function die() {
+ echo 1>&2 $*
+ exit 1
+}
+
+# Sanity check that the right tools are accessible.
+for tool in go protoc protoc-gen-go; do
+ q=$(which $tool) || die "didn't find $tool"
+ echo 1>&2 "$tool: $q"
+done
+
+echo -n 1>&2 "finding package dir... "
+pkgdir=$(go list -f '{{.Dir}}' $PKG)
+echo 1>&2 $pkgdir
+base=$(echo $pkgdir | sed "s,/$PKG\$,,")
+echo 1>&2 "base: $base"
+cd $base
+
+# Run protoc once per package.
+for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do
+ echo 1>&2 "* $dir"
+ protoc --go_out=. $dir/*.proto
+done
+
+for f in $(find $PKG/internal -name '*.pb.go'); do
+ # Remove proto.RegisterEnum calls.
+ # These cause duplicate registration panics when these packages
+ # are used on classic App Engine. proto.RegisterEnum only affects
+ # parsing the text format; we don't care about that.
+ # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17
+ sed -i '/proto.RegisterEnum/d' $f
+done
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
new file mode 100644
index 000000000..8d782a38e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
@@ -0,0 +1,361 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
+
+package remote_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type RpcError_ErrorCode int32
+
+const (
+ RpcError_UNKNOWN RpcError_ErrorCode = 0
+ RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1
+ RpcError_PARSE_ERROR RpcError_ErrorCode = 2
+ RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3
+ RpcError_OVER_QUOTA RpcError_ErrorCode = 4
+ RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5
+ RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6
+ RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7
+ RpcError_BAD_REQUEST RpcError_ErrorCode = 8
+ RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9
+ RpcError_CANCELLED RpcError_ErrorCode = 10
+ RpcError_REPLAY_ERROR RpcError_ErrorCode = 11
+ RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12
+)
+
+var RpcError_ErrorCode_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "CALL_NOT_FOUND",
+ 2: "PARSE_ERROR",
+ 3: "SECURITY_VIOLATION",
+ 4: "OVER_QUOTA",
+ 5: "REQUEST_TOO_LARGE",
+ 6: "CAPABILITY_DISABLED",
+ 7: "FEATURE_DISABLED",
+ 8: "BAD_REQUEST",
+ 9: "RESPONSE_TOO_LARGE",
+ 10: "CANCELLED",
+ 11: "REPLAY_ERROR",
+ 12: "DEADLINE_EXCEEDED",
+}
+var RpcError_ErrorCode_value = map[string]int32{
+ "UNKNOWN": 0,
+ "CALL_NOT_FOUND": 1,
+ "PARSE_ERROR": 2,
+ "SECURITY_VIOLATION": 3,
+ "OVER_QUOTA": 4,
+ "REQUEST_TOO_LARGE": 5,
+ "CAPABILITY_DISABLED": 6,
+ "FEATURE_DISABLED": 7,
+ "BAD_REQUEST": 8,
+ "RESPONSE_TOO_LARGE": 9,
+ "CANCELLED": 10,
+ "REPLAY_ERROR": 11,
+ "DEADLINE_EXCEEDED": 12,
+}
+
+func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {
+ p := new(RpcError_ErrorCode)
+ *p = x
+ return p
+}
+func (x RpcError_ErrorCode) String() string {
+ return proto.EnumName(RpcError_ErrorCode_name, int32(x))
+}
+func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = RpcError_ErrorCode(value)
+ return nil
+}
+func (RpcError_ErrorCode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_remote_api_1978114ec33a273d, []int{2, 0}
+}
+
+type Request struct {
+ ServiceName *string `protobuf:"bytes,2,req,name=service_name,json=serviceName" json:"service_name,omitempty"`
+ Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
+ Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
+ RequestId *string `protobuf:"bytes,5,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Request) Reset() { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage() {}
+func (*Request) Descriptor() ([]byte, []int) {
+ return fileDescriptor_remote_api_1978114ec33a273d, []int{0}
+}
+func (m *Request) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Request.Unmarshal(m, b)
+}
+func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Request.Marshal(b, m, deterministic)
+}
+func (dst *Request) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Request.Merge(dst, src)
+}
+func (m *Request) XXX_Size() int {
+ return xxx_messageInfo_Request.Size(m)
+}
+func (m *Request) XXX_DiscardUnknown() {
+ xxx_messageInfo_Request.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Request proto.InternalMessageInfo
+
+func (m *Request) GetServiceName() string {
+ if m != nil && m.ServiceName != nil {
+ return *m.ServiceName
+ }
+ return ""
+}
+
+func (m *Request) GetMethod() string {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return ""
+}
+
+func (m *Request) GetRequest() []byte {
+ if m != nil {
+ return m.Request
+ }
+ return nil
+}
+
+func (m *Request) GetRequestId() string {
+ if m != nil && m.RequestId != nil {
+ return *m.RequestId
+ }
+ return ""
+}
+
+type ApplicationError struct {
+ Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+ Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ApplicationError) Reset() { *m = ApplicationError{} }
+func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
+func (*ApplicationError) ProtoMessage() {}
+func (*ApplicationError) Descriptor() ([]byte, []int) {
+ return fileDescriptor_remote_api_1978114ec33a273d, []int{1}
+}
+func (m *ApplicationError) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ApplicationError.Unmarshal(m, b)
+}
+func (m *ApplicationError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ApplicationError.Marshal(b, m, deterministic)
+}
+func (dst *ApplicationError) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ApplicationError.Merge(dst, src)
+}
+func (m *ApplicationError) XXX_Size() int {
+ return xxx_messageInfo_ApplicationError.Size(m)
+}
+func (m *ApplicationError) XXX_DiscardUnknown() {
+ xxx_messageInfo_ApplicationError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ApplicationError proto.InternalMessageInfo
+
+func (m *ApplicationError) GetCode() int32 {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return 0
+}
+
+func (m *ApplicationError) GetDetail() string {
+ if m != nil && m.Detail != nil {
+ return *m.Detail
+ }
+ return ""
+}
+
+type RpcError struct {
+ Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+ Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *RpcError) Reset() { *m = RpcError{} }
+func (m *RpcError) String() string { return proto.CompactTextString(m) }
+func (*RpcError) ProtoMessage() {}
+func (*RpcError) Descriptor() ([]byte, []int) {
+ return fileDescriptor_remote_api_1978114ec33a273d, []int{2}
+}
+func (m *RpcError) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_RpcError.Unmarshal(m, b)
+}
+func (m *RpcError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_RpcError.Marshal(b, m, deterministic)
+}
+func (dst *RpcError) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RpcError.Merge(dst, src)
+}
+func (m *RpcError) XXX_Size() int {
+ return xxx_messageInfo_RpcError.Size(m)
+}
+func (m *RpcError) XXX_DiscardUnknown() {
+ xxx_messageInfo_RpcError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RpcError proto.InternalMessageInfo
+
+func (m *RpcError) GetCode() int32 {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return 0
+}
+
+func (m *RpcError) GetDetail() string {
+ if m != nil && m.Detail != nil {
+ return *m.Detail
+ }
+ return ""
+}
+
+type Response struct {
+ Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
+ Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
+ ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error,json=applicationError" json:"application_error,omitempty"`
+ JavaException []byte `protobuf:"bytes,4,opt,name=java_exception,json=javaException" json:"java_exception,omitempty"`
+ RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error,json=rpcError" json:"rpc_error,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Response) Reset() { *m = Response{} }
+func (m *Response) String() string { return proto.CompactTextString(m) }
+func (*Response) ProtoMessage() {}
+func (*Response) Descriptor() ([]byte, []int) {
+ return fileDescriptor_remote_api_1978114ec33a273d, []int{3}
+}
+func (m *Response) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Response.Unmarshal(m, b)
+}
+func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Response.Marshal(b, m, deterministic)
+}
+func (dst *Response) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Response.Merge(dst, src)
+}
+func (m *Response) XXX_Size() int {
+ return xxx_messageInfo_Response.Size(m)
+}
+func (m *Response) XXX_DiscardUnknown() {
+ xxx_messageInfo_Response.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Response proto.InternalMessageInfo
+
+func (m *Response) GetResponse() []byte {
+ if m != nil {
+ return m.Response
+ }
+ return nil
+}
+
+func (m *Response) GetException() []byte {
+ if m != nil {
+ return m.Exception
+ }
+ return nil
+}
+
+func (m *Response) GetApplicationError() *ApplicationError {
+ if m != nil {
+ return m.ApplicationError
+ }
+ return nil
+}
+
+func (m *Response) GetJavaException() []byte {
+ if m != nil {
+ return m.JavaException
+ }
+ return nil
+}
+
+func (m *Response) GetRpcError() *RpcError {
+ if m != nil {
+ return m.RpcError
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Request)(nil), "remote_api.Request")
+ proto.RegisterType((*ApplicationError)(nil), "remote_api.ApplicationError")
+ proto.RegisterType((*RpcError)(nil), "remote_api.RpcError")
+ proto.RegisterType((*Response)(nil), "remote_api.Response")
+}
+
+func init() {
+ proto.RegisterFile("google.golang.org/appengine/internal/remote_api/remote_api.proto", fileDescriptor_remote_api_1978114ec33a273d)
+}
+
+var fileDescriptor_remote_api_1978114ec33a273d = []byte{
+ // 531 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x51, 0x6e, 0xd3, 0x40,
+ 0x10, 0x86, 0xb1, 0x9b, 0x34, 0xf1, 0xc4, 0x2d, 0xdb, 0xa5, 0x14, 0x0b, 0x15, 0x29, 0x44, 0x42,
+ 0xca, 0x53, 0x2a, 0x38, 0x00, 0x62, 0x63, 0x6f, 0x91, 0x85, 0x65, 0xa7, 0x6b, 0xbb, 0x50, 0x5e,
+ 0x56, 0x2b, 0x67, 0x65, 0x8c, 0x12, 0xaf, 0xd9, 0x98, 0x8a, 0x17, 0x6e, 0xc0, 0xb5, 0x38, 0x0c,
+ 0xb7, 0x40, 0x36, 0x6e, 0x63, 0xf5, 0x89, 0xb7, 0x7f, 0x7e, 0x7b, 0xe6, 0x1b, 0xcd, 0xcc, 0xc2,
+ 0xbb, 0x5c, 0xa9, 0x7c, 0x23, 0x17, 0xb9, 0xda, 0x88, 0x32, 0x5f, 0x28, 0x9d, 0x5f, 0x88, 0xaa,
+ 0x92, 0x65, 0x5e, 0x94, 0xf2, 0xa2, 0x28, 0x6b, 0xa9, 0x4b, 0xb1, 0xb9, 0xd0, 0x72, 0xab, 0x6a,
+ 0xc9, 0x45, 0x55, 0xf4, 0xe4, 0xa2, 0xd2, 0xaa, 0x56, 0x18, 0xf6, 0xce, 0xec, 0x27, 0x8c, 0x98,
+ 0xfc, 0xf6, 0x5d, 0xee, 0x6a, 0xfc, 0x12, 0xec, 0x9d, 0xd4, 0xb7, 0x45, 0x26, 0x79, 0x29, 0xb6,
+ 0xd2, 0x31, 0xa7, 0xe6, 0xdc, 0x62, 0x93, 0xce, 0x0b, 0xc5, 0x56, 0xe2, 0x33, 0x38, 0xdc, 0xca,
+ 0xfa, 0x8b, 0x5a, 0x3b, 0x07, 0xed, 0xc7, 0x2e, 0xc2, 0x0e, 0x8c, 0xf4, 0xbf, 0x2a, 0xce, 0x60,
+ 0x6a, 0xce, 0x6d, 0x76, 0x17, 0xe2, 0x17, 0x00, 0x9d, 0xe4, 0xc5, 0xda, 0x19, 0x4e, 0x8d, 0xb9,
+ 0xc5, 0xac, 0xce, 0xf1, 0xd7, 0xb3, 0xb7, 0x80, 0x48, 0x55, 0x6d, 0x8a, 0x4c, 0xd4, 0x85, 0x2a,
+ 0xa9, 0xd6, 0x4a, 0x63, 0x0c, 0x83, 0x4c, 0xad, 0xa5, 0x63, 0x4c, 0xcd, 0xf9, 0x90, 0xb5, 0xba,
+ 0x01, 0xaf, 0x65, 0x2d, 0x8a, 0x4d, 0xd7, 0x55, 0x17, 0xcd, 0x7e, 0x9b, 0x30, 0x66, 0x55, 0xf6,
+ 0x7f, 0x89, 0x46, 0x2f, 0xf1, 0x97, 0x09, 0x56, 0x9b, 0xe5, 0x36, 0x7f, 0x4d, 0x60, 0x94, 0x86,
+ 0x1f, 0xc2, 0xe8, 0x63, 0x88, 0x1e, 0x61, 0x0c, 0xc7, 0x2e, 0x09, 0x02, 0x1e, 0x46, 0x09, 0xbf,
+ 0x8c, 0xd2, 0xd0, 0x43, 0x06, 0x7e, 0x0c, 0x93, 0x15, 0x61, 0x31, 0xe5, 0x94, 0xb1, 0x88, 0x21,
+ 0x13, 0x9f, 0x01, 0x8e, 0xa9, 0x9b, 0x32, 0x3f, 0xb9, 0xe1, 0xd7, 0x7e, 0x14, 0x90, 0xc4, 0x8f,
+ 0x42, 0x74, 0x80, 0x8f, 0x01, 0xa2, 0x6b, 0xca, 0xf8, 0x55, 0x1a, 0x25, 0x04, 0x0d, 0xf0, 0x53,
+ 0x38, 0x61, 0xf4, 0x2a, 0xa5, 0x71, 0xc2, 0x93, 0x28, 0xe2, 0x01, 0x61, 0xef, 0x29, 0x1a, 0xe2,
+ 0x67, 0xf0, 0xc4, 0x25, 0x2b, 0xb2, 0xf4, 0x83, 0xa6, 0x80, 0xe7, 0xc7, 0x64, 0x19, 0x50, 0x0f,
+ 0x1d, 0xe2, 0x53, 0x40, 0x97, 0x94, 0x24, 0x29, 0xa3, 0x7b, 0x77, 0xd4, 0xe0, 0x97, 0xc4, 0xe3,
+ 0x5d, 0x25, 0x34, 0x6e, 0xf0, 0x8c, 0xc6, 0xab, 0x28, 0x8c, 0x69, 0xaf, 0xae, 0x85, 0x8f, 0xc0,
+ 0x72, 0x49, 0xe8, 0xd2, 0xa0, 0xc9, 0x03, 0x8c, 0xc0, 0x66, 0x74, 0x15, 0x90, 0x9b, 0xae, 0xef,
+ 0x49, 0xd3, 0x8f, 0x47, 0x89, 0x17, 0xf8, 0x21, 0xe5, 0xf4, 0x93, 0x4b, 0xa9, 0x47, 0x3d, 0x64,
+ 0xcf, 0xfe, 0x18, 0x30, 0x66, 0x72, 0x57, 0xa9, 0x72, 0x27, 0xf1, 0x73, 0x18, 0xeb, 0x4e, 0x3b,
+ 0xc6, 0xd4, 0x98, 0xdb, 0xec, 0x3e, 0xc6, 0xe7, 0x60, 0xc9, 0x1f, 0x99, 0xac, 0x9a, 0x75, 0xb5,
+ 0x23, 0xb5, 0xd9, 0xde, 0xc0, 0x3e, 0x9c, 0x88, 0xfd, 0x3a, 0xb9, 0x6c, 0x06, 0xec, 0x1c, 0x4c,
+ 0x8d, 0xf9, 0xe4, 0xcd, 0xf9, 0xa2, 0x77, 0x87, 0x0f, 0x77, 0xce, 0x90, 0x78, 0x78, 0x05, 0xaf,
+ 0xe0, 0xf8, 0xab, 0xb8, 0x15, 0x7c, 0x4f, 0x1b, 0xb4, 0xb4, 0xa3, 0xc6, 0xa5, 0xf7, 0xc4, 0xd7,
+ 0x60, 0xe9, 0x2a, 0xeb, 0x48, 0xc3, 0x96, 0x74, 0xda, 0x27, 0xdd, 0x1d, 0x07, 0x1b, 0xeb, 0x4e,
+ 0x2d, 0xed, 0xcf, 0xbd, 0x07, 0xf0, 0x37, 0x00, 0x00, 0xff, 0xff, 0x38, 0xd1, 0x0f, 0x22, 0x4f,
+ 0x03, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
new file mode 100644
index 000000000..f21763a4e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
@@ -0,0 +1,44 @@
+syntax = "proto2";
+option go_package = "remote_api";
+
+package remote_api;
+
+message Request {
+ required string service_name = 2;
+ required string method = 3;
+ required bytes request = 4;
+ optional string request_id = 5;
+}
+
+message ApplicationError {
+ required int32 code = 1;
+ required string detail = 2;
+}
+
+message RpcError {
+ enum ErrorCode {
+ UNKNOWN = 0;
+ CALL_NOT_FOUND = 1;
+ PARSE_ERROR = 2;
+ SECURITY_VIOLATION = 3;
+ OVER_QUOTA = 4;
+ REQUEST_TOO_LARGE = 5;
+ CAPABILITY_DISABLED = 6;
+ FEATURE_DISABLED = 7;
+ BAD_REQUEST = 8;
+ RESPONSE_TOO_LARGE = 9;
+ CANCELLED = 10;
+ REPLAY_ERROR = 11;
+ DEADLINE_EXCEEDED = 12;
+ }
+ required int32 code = 1;
+ optional string detail = 2;
+}
+
+message Response {
+ optional bytes response = 1;
+ optional bytes exception = 2;
+ optional ApplicationError application_error = 3;
+ optional bytes java_exception = 4;
+ optional RpcError rpc_error = 5;
+}
diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go
new file mode 100644
index 000000000..9006ae653
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/transaction.go
@@ -0,0 +1,115 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file implements hooks for applying datastore transactions.
+
+import (
+ "errors"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var transactionSetters = make(map[reflect.Type]reflect.Value)
+
+// RegisterTransactionSetter registers a function that sets transaction information
+// in a protocol buffer message. f should be a function with two arguments,
+// the first being a protocol buffer type, and the second being *datastore.Transaction.
+func RegisterTransactionSetter(f interface{}) {
+ v := reflect.ValueOf(f)
+ transactionSetters[v.Type().In(0)] = v
+}
+
+// applyTransaction applies the transaction t to message pb
+// by using the relevant setter passed to RegisterTransactionSetter.
+func applyTransaction(pb proto.Message, t *pb.Transaction) {
+ v := reflect.ValueOf(pb)
+ if f, ok := transactionSetters[v.Type()]; ok {
+ f.Call([]reflect.Value{v, reflect.ValueOf(t)})
+ }
+}
+
+var transactionKey = "used for *Transaction"
+
+func transactionFromContext(ctx netcontext.Context) *transaction {
+ t, _ := ctx.Value(&transactionKey).(*transaction)
+ return t
+}
+
+func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {
+ return netcontext.WithValue(ctx, &transactionKey, t)
+}
+
+type transaction struct {
+ transaction pb.Transaction
+ finished bool
+}
+
+var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
+
+func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) {
+ if transactionFromContext(c) != nil {
+ return nil, errors.New("nested transactions are not supported")
+ }
+
+ // Begin the transaction.
+ t := &transaction{}
+ req := &pb.BeginTransactionRequest{
+ App: proto.String(FullyQualifiedAppID(c)),
+ }
+ if xg {
+ req.AllowMultipleEg = proto.Bool(true)
+ }
+ if previousTransaction != nil {
+ req.PreviousTransaction = previousTransaction
+ }
+ if readOnly {
+ req.Mode = pb.BeginTransactionRequest_READ_ONLY.Enum()
+ } else {
+ req.Mode = pb.BeginTransactionRequest_READ_WRITE.Enum()
+ }
+ if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil {
+ return nil, err
+ }
+
+ // Call f, rolling back the transaction if f returns a non-nil error, or panics.
+ // The panic is not recovered.
+ defer func() {
+ if t.finished {
+ return
+ }
+ t.finished = true
+ // Ignore the error return value, since we are already returning a non-nil
+ // error (or we're panicking).
+ Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{})
+ }()
+ if err := f(withTransaction(c, t)); err != nil {
+ return &t.transaction, err
+ }
+ t.finished = true
+
+ // Commit the transaction.
+ res := &pb.CommitResponse{}
+ err := Call(c, "datastore_v3", "Commit", &t.transaction, res)
+ if ae, ok := err.(*APIError); ok {
+ /* TODO: restore this conditional
+ if appengine.IsDevAppServer() {
+ */
+ // The Python Dev AppServer raises an ApplicationError with error code 2 (which is
+ // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
+ if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
+ return &t.transaction, ErrConcurrentTransaction
+ }
+ if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
+ return &t.transaction, ErrConcurrentTransaction
+ }
+ }
+ return &t.transaction, err
+}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
new file mode 100644
index 000000000..5f727750a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
@@ -0,0 +1,527 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+
+package urlfetch
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type URLFetchServiceError_ErrorCode int32
+
+const (
+ URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0
+ URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1
+ URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2
+ URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3
+ URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4
+ URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5
+ URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6
+ URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7
+ URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8
+ URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9
+ URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10
+ URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11
+ URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12
+)
+
+var URLFetchServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_URL",
+ 2: "FETCH_ERROR",
+ 3: "UNSPECIFIED_ERROR",
+ 4: "RESPONSE_TOO_LARGE",
+ 5: "DEADLINE_EXCEEDED",
+ 6: "SSL_CERTIFICATE_ERROR",
+ 7: "DNS_ERROR",
+ 8: "CLOSED",
+ 9: "INTERNAL_TRANSIENT_ERROR",
+ 10: "TOO_MANY_REDIRECTS",
+ 11: "MALFORMED_REPLY",
+ 12: "CONNECTION_ERROR",
+}
+var URLFetchServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_URL": 1,
+ "FETCH_ERROR": 2,
+ "UNSPECIFIED_ERROR": 3,
+ "RESPONSE_TOO_LARGE": 4,
+ "DEADLINE_EXCEEDED": 5,
+ "SSL_CERTIFICATE_ERROR": 6,
+ "DNS_ERROR": 7,
+ "CLOSED": 8,
+ "INTERNAL_TRANSIENT_ERROR": 9,
+ "TOO_MANY_REDIRECTS": 10,
+ "MALFORMED_REPLY": 11,
+ "CONNECTION_ERROR": 12,
+}
+
+func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode {
+ p := new(URLFetchServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x URLFetchServiceError_ErrorCode) String() string {
+ return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x))
+}
+func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = URLFetchServiceError_ErrorCode(value)
+ return nil
+}
+func (URLFetchServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0, 0}
+}
+
+type URLFetchRequest_RequestMethod int32
+
+const (
+ URLFetchRequest_GET URLFetchRequest_RequestMethod = 1
+ URLFetchRequest_POST URLFetchRequest_RequestMethod = 2
+ URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3
+ URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4
+ URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5
+ URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6
+)
+
+var URLFetchRequest_RequestMethod_name = map[int32]string{
+ 1: "GET",
+ 2: "POST",
+ 3: "HEAD",
+ 4: "PUT",
+ 5: "DELETE",
+ 6: "PATCH",
+}
+var URLFetchRequest_RequestMethod_value = map[string]int32{
+ "GET": 1,
+ "POST": 2,
+ "HEAD": 3,
+ "PUT": 4,
+ "DELETE": 5,
+ "PATCH": 6,
+}
+
+func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod {
+ p := new(URLFetchRequest_RequestMethod)
+ *p = x
+ return p
+}
+func (x URLFetchRequest_RequestMethod) String() string {
+ return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x))
+}
+func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod")
+ if err != nil {
+ return err
+ }
+ *x = URLFetchRequest_RequestMethod(value)
+ return nil
+}
+func (URLFetchRequest_RequestMethod) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0}
+}
+
+type URLFetchServiceError struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} }
+func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }
+func (*URLFetchServiceError) ProtoMessage() {}
+func (*URLFetchServiceError) Descriptor() ([]byte, []int) {
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0}
+}
+func (m *URLFetchServiceError) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_URLFetchServiceError.Unmarshal(m, b)
+}
+func (m *URLFetchServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_URLFetchServiceError.Marshal(b, m, deterministic)
+}
+func (dst *URLFetchServiceError) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_URLFetchServiceError.Merge(dst, src)
+}
+func (m *URLFetchServiceError) XXX_Size() int {
+ return xxx_messageInfo_URLFetchServiceError.Size(m)
+}
+func (m *URLFetchServiceError) XXX_DiscardUnknown() {
+ xxx_messageInfo_URLFetchServiceError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_URLFetchServiceError proto.InternalMessageInfo
+
+type URLFetchRequest struct {
+ Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"`
+ Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"`
+ Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"`
+ Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"`
+ FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"`
+ Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"`
+ MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} }
+func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest) ProtoMessage() {}
+func (*URLFetchRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1}
+}
+func (m *URLFetchRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_URLFetchRequest.Unmarshal(m, b)
+}
+func (m *URLFetchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_URLFetchRequest.Marshal(b, m, deterministic)
+}
+func (dst *URLFetchRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_URLFetchRequest.Merge(dst, src)
+}
+func (m *URLFetchRequest) XXX_Size() int {
+ return xxx_messageInfo_URLFetchRequest.Size(m)
+}
+func (m *URLFetchRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_URLFetchRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_URLFetchRequest proto.InternalMessageInfo
+
+const Default_URLFetchRequest_FollowRedirects bool = true
+const Default_URLFetchRequest_MustValidateServerCertificate bool = true
+
+func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return URLFetchRequest_GET
+}
+
+func (m *URLFetchRequest) GetUrl() string {
+ if m != nil && m.Url != nil {
+ return *m.Url
+ }
+ return ""
+}
+
+func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *URLFetchRequest) GetPayload() []byte {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *URLFetchRequest) GetFollowRedirects() bool {
+ if m != nil && m.FollowRedirects != nil {
+ return *m.FollowRedirects
+ }
+ return Default_URLFetchRequest_FollowRedirects
+}
+
+func (m *URLFetchRequest) GetDeadline() float64 {
+ if m != nil && m.Deadline != nil {
+ return *m.Deadline
+ }
+ return 0
+}
+
+func (m *URLFetchRequest) GetMustValidateServerCertificate() bool {
+ if m != nil && m.MustValidateServerCertificate != nil {
+ return *m.MustValidateServerCertificate
+ }
+ return Default_URLFetchRequest_MustValidateServerCertificate
+}
+
+type URLFetchRequest_Header struct {
+ Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
+ Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} }
+func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest_Header) ProtoMessage() {}
+func (*URLFetchRequest_Header) Descriptor() ([]byte, []int) {
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0}
+}
+func (m *URLFetchRequest_Header) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_URLFetchRequest_Header.Unmarshal(m, b)
+}
+func (m *URLFetchRequest_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_URLFetchRequest_Header.Marshal(b, m, deterministic)
+}
+func (dst *URLFetchRequest_Header) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_URLFetchRequest_Header.Merge(dst, src)
+}
+func (m *URLFetchRequest_Header) XXX_Size() int {
+ return xxx_messageInfo_URLFetchRequest_Header.Size(m)
+}
+func (m *URLFetchRequest_Header) XXX_DiscardUnknown() {
+ xxx_messageInfo_URLFetchRequest_Header.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_URLFetchRequest_Header proto.InternalMessageInfo
+
+func (m *URLFetchRequest_Header) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *URLFetchRequest_Header) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type URLFetchResponse struct {
+ Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"`
+ StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"`
+ Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"`
+ ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"`
+ ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"`
+ ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"`
+ FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"`
+ ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"`
+ ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"`
+ ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} }
+func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse) ProtoMessage() {}
+func (*URLFetchResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2}
+}
+func (m *URLFetchResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_URLFetchResponse.Unmarshal(m, b)
+}
+func (m *URLFetchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_URLFetchResponse.Marshal(b, m, deterministic)
+}
+func (dst *URLFetchResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_URLFetchResponse.Merge(dst, src)
+}
+func (m *URLFetchResponse) XXX_Size() int {
+ return xxx_messageInfo_URLFetchResponse.Size(m)
+}
+func (m *URLFetchResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_URLFetchResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_URLFetchResponse proto.InternalMessageInfo
+
+const Default_URLFetchResponse_ContentWasTruncated bool = false
+const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0
+const Default_URLFetchResponse_ApiBytesSent int64 = 0
+const Default_URLFetchResponse_ApiBytesReceived int64 = 0
+
+func (m *URLFetchResponse) GetContent() []byte {
+ if m != nil {
+ return m.Content
+ }
+ return nil
+}
+
+func (m *URLFetchResponse) GetStatusCode() int32 {
+ if m != nil && m.StatusCode != nil {
+ return *m.StatusCode
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *URLFetchResponse) GetContentWasTruncated() bool {
+ if m != nil && m.ContentWasTruncated != nil {
+ return *m.ContentWasTruncated
+ }
+ return Default_URLFetchResponse_ContentWasTruncated
+}
+
+func (m *URLFetchResponse) GetExternalBytesSent() int64 {
+ if m != nil && m.ExternalBytesSent != nil {
+ return *m.ExternalBytesSent
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetExternalBytesReceived() int64 {
+ if m != nil && m.ExternalBytesReceived != nil {
+ return *m.ExternalBytesReceived
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetFinalUrl() string {
+ if m != nil && m.FinalUrl != nil {
+ return *m.FinalUrl
+ }
+ return ""
+}
+
+func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 {
+ if m != nil && m.ApiCpuMilliseconds != nil {
+ return *m.ApiCpuMilliseconds
+ }
+ return Default_URLFetchResponse_ApiCpuMilliseconds
+}
+
+func (m *URLFetchResponse) GetApiBytesSent() int64 {
+ if m != nil && m.ApiBytesSent != nil {
+ return *m.ApiBytesSent
+ }
+ return Default_URLFetchResponse_ApiBytesSent
+}
+
+func (m *URLFetchResponse) GetApiBytesReceived() int64 {
+ if m != nil && m.ApiBytesReceived != nil {
+ return *m.ApiBytesReceived
+ }
+ return Default_URLFetchResponse_ApiBytesReceived
+}
+
+type URLFetchResponse_Header struct {
+ Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
+ Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} }
+func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse_Header) ProtoMessage() {}
+func (*URLFetchResponse_Header) Descriptor() ([]byte, []int) {
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2, 0}
+}
+func (m *URLFetchResponse_Header) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_URLFetchResponse_Header.Unmarshal(m, b)
+}
+func (m *URLFetchResponse_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_URLFetchResponse_Header.Marshal(b, m, deterministic)
+}
+func (dst *URLFetchResponse_Header) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_URLFetchResponse_Header.Merge(dst, src)
+}
+func (m *URLFetchResponse_Header) XXX_Size() int {
+ return xxx_messageInfo_URLFetchResponse_Header.Size(m)
+}
+func (m *URLFetchResponse_Header) XXX_DiscardUnknown() {
+ xxx_messageInfo_URLFetchResponse_Header.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_URLFetchResponse_Header proto.InternalMessageInfo
+
+func (m *URLFetchResponse_Header) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *URLFetchResponse_Header) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*URLFetchServiceError)(nil), "appengine.URLFetchServiceError")
+ proto.RegisterType((*URLFetchRequest)(nil), "appengine.URLFetchRequest")
+ proto.RegisterType((*URLFetchRequest_Header)(nil), "appengine.URLFetchRequest.Header")
+ proto.RegisterType((*URLFetchResponse)(nil), "appengine.URLFetchResponse")
+ proto.RegisterType((*URLFetchResponse_Header)(nil), "appengine.URLFetchResponse.Header")
+}
+
+func init() {
+ proto.RegisterFile("google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto", fileDescriptor_urlfetch_service_b245a7065f33bced)
+}
+
+var fileDescriptor_urlfetch_service_b245a7065f33bced = []byte{
+ // 770 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xe3, 0x54,
+ 0x10, 0xc6, 0x76, 0x7e, 0xa7, 0x5d, 0x7a, 0x76, 0xb6, 0x45, 0x66, 0xb5, 0xa0, 0x10, 0x09, 0x29,
+ 0x17, 0x90, 0x2e, 0x2b, 0x24, 0x44, 0xaf, 0x70, 0xed, 0x93, 0xad, 0xa9, 0x63, 0x47, 0xc7, 0x4e,
+ 0x61, 0xb9, 0xb1, 0xac, 0x78, 0x9a, 0x5a, 0xb2, 0xec, 0x60, 0x9f, 0x2c, 0xf4, 0x35, 0x78, 0x0d,
+ 0xde, 0x87, 0xa7, 0xe1, 0x02, 0x9d, 0xc4, 0xc9, 0x6e, 0xbb, 0xd1, 0x4a, 0x5c, 0x65, 0xe6, 0x9b,
+ 0xef, 0xcc, 0x99, 0x7c, 0xdf, 0xf8, 0x80, 0xb3, 0x2c, 0xcb, 0x65, 0x4e, 0xe3, 0x65, 0x99, 0x27,
+ 0xc5, 0x72, 0x5c, 0x56, 0xcb, 0xf3, 0x64, 0xb5, 0xa2, 0x62, 0x99, 0x15, 0x74, 0x9e, 0x15, 0x92,
+ 0xaa, 0x22, 0xc9, 0xcf, 0xd7, 0x55, 0x7e, 0x4b, 0x72, 0x71, 0xb7, 0x0f, 0xe2, 0x9a, 0xaa, 0xb7,
+ 0xd9, 0x82, 0xc6, 0xab, 0xaa, 0x94, 0x25, 0xf6, 0xf7, 0x67, 0x86, 0x7f, 0xeb, 0x70, 0x3a, 0x17,
+ 0xde, 0x44, 0xb1, 0xc2, 0x2d, 0x89, 0x57, 0x55, 0x59, 0x0d, 0xff, 0xd2, 0xa1, 0xbf, 0x89, 0xec,
+ 0x32, 0x25, 0xec, 0x80, 0x1e, 0x5c, 0xb3, 0x4f, 0xf0, 0x04, 0x8e, 0x5c, 0xff, 0xc6, 0xf2, 0x5c,
+ 0x27, 0x9e, 0x0b, 0x8f, 0x69, 0x0a, 0x98, 0xf0, 0xc8, 0xbe, 0x8a, 0xb9, 0x10, 0x81, 0x60, 0x3a,
+ 0x9e, 0xc1, 0xd3, 0xb9, 0x1f, 0xce, 0xb8, 0xed, 0x4e, 0x5c, 0xee, 0x34, 0xb0, 0x81, 0x9f, 0x01,
+ 0x0a, 0x1e, 0xce, 0x02, 0x3f, 0xe4, 0x71, 0x14, 0x04, 0xb1, 0x67, 0x89, 0xd7, 0x9c, 0xb5, 0x14,
+ 0xdd, 0xe1, 0x96, 0xe3, 0xb9, 0x3e, 0x8f, 0xf9, 0xaf, 0x36, 0xe7, 0x0e, 0x77, 0x58, 0x1b, 0x3f,
+ 0x87, 0xb3, 0x30, 0xf4, 0x62, 0x9b, 0x8b, 0xc8, 0x9d, 0xb8, 0xb6, 0x15, 0xf1, 0xa6, 0x53, 0x07,
+ 0x9f, 0x40, 0xdf, 0xf1, 0xc3, 0x26, 0xed, 0x22, 0x40, 0xc7, 0xf6, 0x82, 0x90, 0x3b, 0xac, 0x87,
+ 0x2f, 0xc0, 0x74, 0xfd, 0x88, 0x0b, 0xdf, 0xf2, 0xe2, 0x48, 0x58, 0x7e, 0xe8, 0x72, 0x3f, 0x6a,
+ 0x98, 0x7d, 0x35, 0x82, 0xba, 0x79, 0x6a, 0xf9, 0x6f, 0x62, 0xc1, 0x1d, 0x57, 0x70, 0x3b, 0x0a,
+ 0x19, 0xe0, 0x33, 0x38, 0x99, 0x5a, 0xde, 0x24, 0x10, 0x53, 0xee, 0xc4, 0x82, 0xcf, 0xbc, 0x37,
+ 0xec, 0x08, 0x4f, 0x81, 0xd9, 0x81, 0xef, 0x73, 0x3b, 0x72, 0x03, 0xbf, 0x69, 0x71, 0x3c, 0xfc,
+ 0xc7, 0x80, 0x93, 0x9d, 0x5a, 0x82, 0x7e, 0x5f, 0x53, 0x2d, 0xf1, 0x27, 0xe8, 0x4c, 0x49, 0xde,
+ 0x95, 0xa9, 0xa9, 0x0d, 0xf4, 0xd1, 0xa7, 0xaf, 0x46, 0xe3, 0xbd, 0xba, 0xe3, 0x47, 0xdc, 0x71,
+ 0xf3, 0xbb, 0xe5, 0x8b, 0xe6, 0x1c, 0x32, 0x30, 0xe6, 0x55, 0x6e, 0xea, 0x03, 0x7d, 0xd4, 0x17,
+ 0x2a, 0xc4, 0x1f, 0xa1, 0x73, 0x47, 0x49, 0x4a, 0x95, 0x69, 0x0c, 0x8c, 0x11, 0xbc, 0xfa, 0xea,
+ 0x23, 0x3d, 0xaf, 0x36, 0x44, 0xd1, 0x1c, 0xc0, 0x17, 0xd0, 0x9d, 0x25, 0xf7, 0x79, 0x99, 0xa4,
+ 0x66, 0x67, 0xa0, 0x8d, 0x8e, 0x2f, 0xf5, 0x9e, 0x26, 0x76, 0x10, 0x8e, 0xe1, 0x64, 0x52, 0xe6,
+ 0x79, 0xf9, 0x87, 0xa0, 0x34, 0xab, 0x68, 0x21, 0x6b, 0xb3, 0x3b, 0xd0, 0x46, 0xbd, 0x8b, 0x96,
+ 0xac, 0xd6, 0x24, 0x1e, 0x17, 0xf1, 0x39, 0xf4, 0x1c, 0x4a, 0xd2, 0x3c, 0x2b, 0xc8, 0xec, 0x0d,
+ 0xb4, 0x91, 0x26, 0xf6, 0x39, 0xfe, 0x0c, 0x5f, 0x4c, 0xd7, 0xb5, 0xbc, 0x49, 0xf2, 0x2c, 0x4d,
+ 0x24, 0xa9, 0xed, 0xa1, 0xca, 0xa6, 0x4a, 0x66, 0xb7, 0xd9, 0x22, 0x91, 0x64, 0xf6, 0xdf, 0xeb,
+ 0xfc, 0x71, 0xea, 0xf3, 0x97, 0xd0, 0xd9, 0xfe, 0x0f, 0x25, 0xc6, 0x35, 0xdd, 0x9b, 0xad, 0xad,
+ 0x18, 0xd7, 0x74, 0x8f, 0xa7, 0xd0, 0xbe, 0x49, 0xf2, 0x35, 0x99, 0xed, 0x0d, 0xb6, 0x4d, 0x86,
+ 0x1e, 0x3c, 0x79, 0xa0, 0x26, 0x76, 0xc1, 0x78, 0xcd, 0x23, 0xa6, 0x61, 0x0f, 0x5a, 0xb3, 0x20,
+ 0x8c, 0x98, 0xae, 0xa2, 0x2b, 0x6e, 0x39, 0xcc, 0x50, 0xc5, 0xd9, 0x3c, 0x62, 0x2d, 0xb5, 0x2e,
+ 0x0e, 0xf7, 0x78, 0xc4, 0x59, 0x1b, 0xfb, 0xd0, 0x9e, 0x59, 0x91, 0x7d, 0xc5, 0x3a, 0xc3, 0x7f,
+ 0x0d, 0x60, 0xef, 0x84, 0xad, 0x57, 0x65, 0x51, 0x13, 0x9a, 0xd0, 0xb5, 0xcb, 0x42, 0x52, 0x21,
+ 0x4d, 0x4d, 0x49, 0x29, 0x76, 0x29, 0x7e, 0x09, 0x10, 0xca, 0x44, 0xae, 0x6b, 0xf5, 0x71, 0x6c,
+ 0x8c, 0x6b, 0x8b, 0xf7, 0x10, 0xbc, 0x78, 0xe4, 0xdf, 0xf0, 0xa0, 0x7f, 0xdb, 0x6b, 0x1e, 0x1b,
+ 0xf8, 0x03, 0x3c, 0x6b, 0xae, 0xf9, 0x25, 0xa9, 0xa3, 0x6a, 0x5d, 0x28, 0x81, 0xb6, 0x66, 0xf6,
+ 0x2e, 0xda, 0xb7, 0x49, 0x5e, 0x93, 0x38, 0xc4, 0xc0, 0x6f, 0xe0, 0x29, 0xff, 0x73, 0xfb, 0x02,
+ 0x5c, 0xde, 0x4b, 0xaa, 0x43, 0x35, 0xb8, 0x72, 0xd7, 0x10, 0x1f, 0x16, 0xf0, 0x7b, 0x38, 0x7b,
+ 0x00, 0x0a, 0x5a, 0x50, 0xf6, 0x96, 0xd2, 0x8d, 0xcd, 0x86, 0x38, 0x5c, 0x54, 0xfb, 0x30, 0xc9,
+ 0x8a, 0x24, 0x57, 0xfb, 0xaa, 0xec, 0xed, 0x8b, 0x7d, 0x8e, 0xdf, 0x01, 0x5a, 0xab, 0xcc, 0x5e,
+ 0xad, 0xa7, 0x59, 0x9e, 0x67, 0x35, 0x2d, 0xca, 0x22, 0xad, 0x4d, 0x50, 0xed, 0x2e, 0xb4, 0x97,
+ 0xe2, 0x40, 0x11, 0xbf, 0x86, 0x63, 0x6b, 0x95, 0xbd, 0x9b, 0xf6, 0x68, 0x47, 0x7e, 0x00, 0xe3,
+ 0xb7, 0xc0, 0x76, 0xf9, 0x7e, 0xcc, 0xe3, 0x1d, 0xf5, 0x83, 0xd2, 0xff, 0x5f, 0xa6, 0x4b, 0xf8,
+ 0xad, 0xb7, 0x7b, 0x2a, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x9f, 0x6d, 0x24, 0x63, 0x05,
+ 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
new file mode 100644
index 000000000..f695edf6a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
@@ -0,0 +1,64 @@
+syntax = "proto2";
+option go_package = "urlfetch";
+
+package appengine;
+
+message URLFetchServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_URL = 1;
+ FETCH_ERROR = 2;
+ UNSPECIFIED_ERROR = 3;
+ RESPONSE_TOO_LARGE = 4;
+ DEADLINE_EXCEEDED = 5;
+ SSL_CERTIFICATE_ERROR = 6;
+ DNS_ERROR = 7;
+ CLOSED = 8;
+ INTERNAL_TRANSIENT_ERROR = 9;
+ TOO_MANY_REDIRECTS = 10;
+ MALFORMED_REPLY = 11;
+ CONNECTION_ERROR = 12;
+ }
+}
+
+message URLFetchRequest {
+ enum RequestMethod {
+ GET = 1;
+ POST = 2;
+ HEAD = 3;
+ PUT = 4;
+ DELETE = 5;
+ PATCH = 6;
+ }
+ required RequestMethod Method = 1;
+ required string Url = 2;
+ repeated group Header = 3 {
+ required string Key = 4;
+ required string Value = 5;
+ }
+ optional bytes Payload = 6 [ctype=CORD];
+
+ optional bool FollowRedirects = 7 [default=true];
+
+ optional double Deadline = 8;
+
+ optional bool MustValidateServerCertificate = 9 [default=true];
+}
+
+message URLFetchResponse {
+ optional bytes Content = 1;
+ required int32 StatusCode = 2;
+ repeated group Header = 3 {
+ required string Key = 4;
+ required string Value = 5;
+ }
+ optional bool ContentWasTruncated = 6 [default=false];
+ optional int64 ExternalBytesSent = 7;
+ optional int64 ExternalBytesReceived = 8;
+
+ optional string FinalUrl = 9;
+
+ optional int64 ApiCpuMilliseconds = 10 [default=0];
+ optional int64 ApiBytesSent = 11 [default=0];
+ optional int64 ApiBytesReceived = 12 [default=0];
+}
diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
new file mode 100644
index 000000000..6ffe1e6d9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
@@ -0,0 +1,210 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package urlfetch provides an http.RoundTripper implementation
+// for fetching URLs via App Engine's urlfetch service.
+package urlfetch // import "google.golang.org/appengine/urlfetch"
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/urlfetch"
+)
+
+// Transport is an implementation of http.RoundTripper for
+// App Engine. Users should generally create an http.Client using
+// this transport and use the Client rather than using this transport
+// directly.
+type Transport struct {
+ Context context.Context
+
+ // Controls whether the application checks the validity of SSL certificates
+ // over HTTPS connections. A value of false (the default) instructs the
+ // application to send a request to the server only if the certificate is
+ // valid and signed by a trusted certificate authority (CA), and also
+ // includes a hostname that matches the certificate. A value of true
+ // instructs the application to perform no certificate validation.
+ AllowInvalidServerCertificate bool
+}
+
+// Verify statically that *Transport implements http.RoundTripper.
+var _ http.RoundTripper = (*Transport)(nil)
+
+// Client returns an *http.Client using a default urlfetch Transport. This
+// client will have the default deadline of 5 seconds, and will check the
+// validity of SSL certificates.
+//
+// Any deadline of the provided context will be used for requests through this client;
+// if the client does not have a deadline then a 5 second default is used.
+func Client(ctx context.Context) *http.Client {
+ return &http.Client{
+ Transport: &Transport{
+ Context: ctx,
+ },
+ }
+}
+
+type bodyReader struct {
+ content []byte
+ truncated bool
+ closed bool
+}
+
+// ErrTruncatedBody is the error returned after the final Read() from a
+// response's Body if the body has been truncated by App Engine's proxy.
+var ErrTruncatedBody = errors.New("urlfetch: truncated body")
+
+func statusCodeToText(code int) string {
+ if t := http.StatusText(code); t != "" {
+ return t
+ }
+ return strconv.Itoa(code)
+}
+
+func (br *bodyReader) Read(p []byte) (n int, err error) {
+ if br.closed {
+ if br.truncated {
+ return 0, ErrTruncatedBody
+ }
+ return 0, io.EOF
+ }
+ n = copy(p, br.content)
+ if n > 0 {
+ br.content = br.content[n:]
+ return
+ }
+ if br.truncated {
+ br.closed = true
+ return 0, ErrTruncatedBody
+ }
+ return 0, io.EOF
+}
+
+func (br *bodyReader) Close() error {
+ br.closed = true
+ br.content = nil
+ return nil
+}
+
+// A map of the URL Fetch-accepted methods that take a request body.
+var methodAcceptsRequestBody = map[string]bool{
+ "POST": true,
+ "PUT": true,
+ "PATCH": true,
+}
+
+// urlString returns a valid string given a URL. This function is necessary because
+// the String method of URL doesn't correctly handle URLs with non-empty Opaque values.
+// See http://code.google.com/p/go/issues/detail?id=4860.
+func urlString(u *url.URL) string {
+ if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") {
+ return u.String()
+ }
+ aux := *u
+ aux.Opaque = "//" + aux.Host + aux.Opaque
+ return aux.String()
+}
+
+// RoundTrip issues a single HTTP request and returns its response. Per the
+// http.RoundTripper interface, RoundTrip only returns an error if there
+// was an unsupported request or the URL Fetch proxy fails.
+// Note that HTTP response codes such as 5xx, 403, 404, etc are not
+// errors as far as the transport is concerned and will be returned
+// with err set to nil.
+func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) {
+ methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method]
+ if !ok {
+ return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method)
+ }
+
+ method := pb.URLFetchRequest_RequestMethod(methNum)
+
+ freq := &pb.URLFetchRequest{
+ Method: &method,
+ Url: proto.String(urlString(req.URL)),
+ FollowRedirects: proto.Bool(false), // http.Client's responsibility
+ MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate),
+ }
+ if deadline, ok := t.Context.Deadline(); ok {
+ freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds())
+ }
+
+ for k, vals := range req.Header {
+ for _, val := range vals {
+ freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{
+ Key: proto.String(k),
+ Value: proto.String(val),
+ })
+ }
+ }
+ if methodAcceptsRequestBody[req.Method] && req.Body != nil {
+ // Avoid a []byte copy if req.Body has a Bytes method.
+ switch b := req.Body.(type) {
+ case interface {
+ Bytes() []byte
+ }:
+ freq.Payload = b.Bytes()
+ default:
+ freq.Payload, err = ioutil.ReadAll(req.Body)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ fres := &pb.URLFetchResponse{}
+ if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil {
+ return nil, err
+ }
+
+ res = &http.Response{}
+ res.StatusCode = int(*fres.StatusCode)
+ res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode))
+ res.Header = make(http.Header)
+ res.Request = req
+
+ // Faked:
+ res.ProtoMajor = 1
+ res.ProtoMinor = 1
+ res.Proto = "HTTP/1.1"
+ res.Close = true
+
+ for _, h := range fres.Header {
+ hkey := http.CanonicalHeaderKey(*h.Key)
+ hval := *h.Value
+ if hkey == "Content-Length" {
+ // Will get filled in below for all but HEAD requests.
+ if req.Method == "HEAD" {
+ res.ContentLength, _ = strconv.ParseInt(hval, 10, 64)
+ }
+ continue
+ }
+ res.Header.Add(hkey, hval)
+ }
+
+ if req.Method != "HEAD" {
+ res.ContentLength = int64(len(fres.Content))
+ }
+
+ truncated := fres.GetContentWasTruncated()
+ res.Body = &bodyReader{content: fres.Content, truncated: truncated}
+ return
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name)
+ internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED))
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/.github/ISSUE_TEMPLATE.md b/vendor/gopkg.in/fsnotify.v1/.github/ISSUE_TEMPLATE.md
deleted file mode 100644
index 4ad1aed8f..000000000
--- a/vendor/gopkg.in/fsnotify.v1/.github/ISSUE_TEMPLATE.md
+++ /dev/null
@@ -1,11 +0,0 @@
-Before reporting an issue, please ensure you are using the latest release of fsnotify.
-
-### Which operating system (GOOS) and version are you using?
-
-Linux: lsb_release -a
-macOS: sw_vers
-Windows: systeminfo | findstr /B /C:OS
-
-### Please describe the issue that occurred.
-
-### Are you able to reproduce the issue? Please provide steps to reproduce and a code sample if possible.
diff --git a/vendor/gopkg.in/fsnotify.v1/.github/PULL_REQUEST_TEMPLATE.md b/vendor/gopkg.in/fsnotify.v1/.github/PULL_REQUEST_TEMPLATE.md
deleted file mode 100644
index 64ddf7cef..000000000
--- a/vendor/gopkg.in/fsnotify.v1/.github/PULL_REQUEST_TEMPLATE.md
+++ /dev/null
@@ -1,8 +0,0 @@
-#### What does this pull request do?
-
-
-#### Where should the reviewer start?
-
-
-#### How should this be manually tested?
-
diff --git a/vendor/gopkg.in/fsnotify.v1/example_test.go b/vendor/gopkg.in/fsnotify.v1/example_test.go
deleted file mode 100644
index 700502cb3..000000000
--- a/vendor/gopkg.in/fsnotify.v1/example_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !plan9
-
-package fsnotify_test
-
-import (
- "log"
-
- "github.com/fsnotify/fsnotify"
-)
-
-func ExampleNewWatcher() {
- watcher, err := fsnotify.NewWatcher()
- if err != nil {
- log.Fatal(err)
- }
- defer watcher.Close()
-
- done := make(chan bool)
- go func() {
- for {
- select {
- case event := <-watcher.Events:
- log.Println("event:", event)
- if event.Op&fsnotify.Write == fsnotify.Write {
- log.Println("modified file:", event.Name)
- }
- case err := <-watcher.Errors:
- log.Println("error:", err)
- }
- }
- }()
-
- err = watcher.Add("/tmp/foo")
- if err != nil {
- log.Fatal(err)
- }
- <-done
-}
diff --git a/vendor/gopkg.in/fsnotify.v1/fsnotify_test.go b/vendor/gopkg.in/fsnotify.v1/fsnotify_test.go
deleted file mode 100644
index f9771d9df..000000000
--- a/vendor/gopkg.in/fsnotify.v1/fsnotify_test.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !plan9
-
-package fsnotify
-
-import (
- "os"
- "testing"
- "time"
-)
-
-func TestEventStringWithValue(t *testing.T) {
- for opMask, expectedString := range map[Op]string{
- Chmod | Create: `"/usr/someFile": CREATE|CHMOD`,
- Rename: `"/usr/someFile": RENAME`,
- Remove: `"/usr/someFile": REMOVE`,
- Write | Chmod: `"/usr/someFile": WRITE|CHMOD`,
- } {
- event := Event{Name: "/usr/someFile", Op: opMask}
- if event.String() != expectedString {
- t.Fatalf("Expected %s, got: %v", expectedString, event.String())
- }
-
- }
-}
-
-func TestEventOpStringWithValue(t *testing.T) {
- expectedOpString := "WRITE|CHMOD"
- event := Event{Name: "someFile", Op: Write | Chmod}
- if event.Op.String() != expectedOpString {
- t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String())
- }
-}
-
-func TestEventOpStringWithNoValue(t *testing.T) {
- expectedOpString := ""
- event := Event{Name: "testFile", Op: 0}
- if event.Op.String() != expectedOpString {
- t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String())
- }
-}
-
-// TestWatcherClose tests that the goroutine started by creating the watcher can be
-// signalled to return at any time, even if there is no goroutine listening on the events
-// or errors channels.
-func TestWatcherClose(t *testing.T) {
- t.Parallel()
-
- name := tempMkFile(t, "")
- w := newWatcher(t)
- err := w.Add(name)
- if err != nil {
- t.Fatal(err)
- }
-
- err = os.Remove(name)
- if err != nil {
- t.Fatal(err)
- }
- // Allow the watcher to receive the event.
- time.Sleep(time.Millisecond * 100)
-
- err = w.Close()
- if err != nil {
- t.Fatal(err)
- }
-}
diff --git a/vendor/gopkg.in/fsnotify.v1/inotify_poller_test.go b/vendor/gopkg.in/fsnotify.v1/inotify_poller_test.go
deleted file mode 100644
index 26623efef..000000000
--- a/vendor/gopkg.in/fsnotify.v1/inotify_poller_test.go
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux
-
-package fsnotify
-
-import (
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
-)
-
-type testFd [2]int
-
-func makeTestFd(t *testing.T) testFd {
- var tfd testFd
- errno := unix.Pipe(tfd[:])
- if errno != nil {
- t.Fatalf("Failed to create pipe: %v", errno)
- }
- return tfd
-}
-
-func (tfd testFd) fd() int {
- return tfd[0]
-}
-
-func (tfd testFd) closeWrite(t *testing.T) {
- errno := unix.Close(tfd[1])
- if errno != nil {
- t.Fatalf("Failed to close write end of pipe: %v", errno)
- }
-}
-
-func (tfd testFd) put(t *testing.T) {
- buf := make([]byte, 10)
- _, errno := unix.Write(tfd[1], buf)
- if errno != nil {
- t.Fatalf("Failed to write to pipe: %v", errno)
- }
-}
-
-func (tfd testFd) get(t *testing.T) {
- buf := make([]byte, 10)
- _, errno := unix.Read(tfd[0], buf)
- if errno != nil {
- t.Fatalf("Failed to read from pipe: %v", errno)
- }
-}
-
-func (tfd testFd) close() {
- unix.Close(tfd[1])
- unix.Close(tfd[0])
-}
-
-func makePoller(t *testing.T) (testFd, *fdPoller) {
- tfd := makeTestFd(t)
- poller, err := newFdPoller(tfd.fd())
- if err != nil {
- t.Fatalf("Failed to create poller: %v", err)
- }
- return tfd, poller
-}
-
-func TestPollerWithBadFd(t *testing.T) {
- _, err := newFdPoller(-1)
- if err != unix.EBADF {
- t.Fatalf("Expected EBADF, got: %v", err)
- }
-}
-
-func TestPollerWithData(t *testing.T) {
- tfd, poller := makePoller(t)
- defer tfd.close()
- defer poller.close()
-
- tfd.put(t)
- ok, err := poller.wait()
- if err != nil {
- t.Fatalf("poller failed: %v", err)
- }
- if !ok {
- t.Fatalf("expected poller to return true")
- }
- tfd.get(t)
-}
-
-func TestPollerWithWakeup(t *testing.T) {
- tfd, poller := makePoller(t)
- defer tfd.close()
- defer poller.close()
-
- err := poller.wake()
- if err != nil {
- t.Fatalf("wake failed: %v", err)
- }
- ok, err := poller.wait()
- if err != nil {
- t.Fatalf("poller failed: %v", err)
- }
- if ok {
- t.Fatalf("expected poller to return false")
- }
-}
-
-func TestPollerWithClose(t *testing.T) {
- tfd, poller := makePoller(t)
- defer tfd.close()
- defer poller.close()
-
- tfd.closeWrite(t)
- ok, err := poller.wait()
- if err != nil {
- t.Fatalf("poller failed: %v", err)
- }
- if !ok {
- t.Fatalf("expected poller to return true")
- }
-}
-
-func TestPollerWithWakeupAndData(t *testing.T) {
- tfd, poller := makePoller(t)
- defer tfd.close()
- defer poller.close()
-
- tfd.put(t)
- err := poller.wake()
- if err != nil {
- t.Fatalf("wake failed: %v", err)
- }
-
- // both data and wakeup
- ok, err := poller.wait()
- if err != nil {
- t.Fatalf("poller failed: %v", err)
- }
- if !ok {
- t.Fatalf("expected poller to return true")
- }
-
- // data is still in the buffer, wakeup is cleared
- ok, err = poller.wait()
- if err != nil {
- t.Fatalf("poller failed: %v", err)
- }
- if !ok {
- t.Fatalf("expected poller to return true")
- }
-
- tfd.get(t)
- // data is gone, only wakeup now
- err = poller.wake()
- if err != nil {
- t.Fatalf("wake failed: %v", err)
- }
- ok, err = poller.wait()
- if err != nil {
- t.Fatalf("poller failed: %v", err)
- }
- if ok {
- t.Fatalf("expected poller to return false")
- }
-}
-
-func TestPollerConcurrent(t *testing.T) {
- tfd, poller := makePoller(t)
- defer tfd.close()
- defer poller.close()
-
- oks := make(chan bool)
- live := make(chan bool)
- defer close(live)
- go func() {
- defer close(oks)
- for {
- ok, err := poller.wait()
- if err != nil {
- t.Fatalf("poller failed: %v", err)
- }
- oks <- ok
- if !<-live {
- return
- }
- }
- }()
-
- // Try a write
- select {
- case <-time.After(50 * time.Millisecond):
- case <-oks:
- t.Fatalf("poller did not wait")
- }
- tfd.put(t)
- if !<-oks {
- t.Fatalf("expected true")
- }
- tfd.get(t)
- live <- true
-
- // Try a wakeup
- select {
- case <-time.After(50 * time.Millisecond):
- case <-oks:
- t.Fatalf("poller did not wait")
- }
- err := poller.wake()
- if err != nil {
- t.Fatalf("wake failed: %v", err)
- }
- if <-oks {
- t.Fatalf("expected false")
- }
- live <- true
-
- // Try a close
- select {
- case <-time.After(50 * time.Millisecond):
- case <-oks:
- t.Fatalf("poller did not wait")
- }
- tfd.closeWrite(t)
- if !<-oks {
- t.Fatalf("expected true")
- }
- tfd.get(t)
-}
diff --git a/vendor/gopkg.in/fsnotify.v1/inotify_test.go b/vendor/gopkg.in/fsnotify.v1/inotify_test.go
deleted file mode 100644
index 54f3f00eb..000000000
--- a/vendor/gopkg.in/fsnotify.v1/inotify_test.go
+++ /dev/null
@@ -1,449 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux
-
-package fsnotify
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "strings"
- "testing"
- "time"
-)
-
-func TestInotifyCloseRightAway(t *testing.T) {
- w, err := NewWatcher()
- if err != nil {
- t.Fatalf("Failed to create watcher")
- }
-
- // Close immediately; it won't even reach the first unix.Read.
- w.Close()
-
- // Wait for the close to complete.
- <-time.After(50 * time.Millisecond)
- isWatcherReallyClosed(t, w)
-}
-
-func TestInotifyCloseSlightlyLater(t *testing.T) {
- w, err := NewWatcher()
- if err != nil {
- t.Fatalf("Failed to create watcher")
- }
-
- // Wait until readEvents has reached unix.Read, and Close.
- <-time.After(50 * time.Millisecond)
- w.Close()
-
- // Wait for the close to complete.
- <-time.After(50 * time.Millisecond)
- isWatcherReallyClosed(t, w)
-}
-
-func TestInotifyCloseSlightlyLaterWithWatch(t *testing.T) {
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- w, err := NewWatcher()
- if err != nil {
- t.Fatalf("Failed to create watcher")
- }
- w.Add(testDir)
-
- // Wait until readEvents has reached unix.Read, and Close.
- <-time.After(50 * time.Millisecond)
- w.Close()
-
- // Wait for the close to complete.
- <-time.After(50 * time.Millisecond)
- isWatcherReallyClosed(t, w)
-}
-
-func TestInotifyCloseAfterRead(t *testing.T) {
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- w, err := NewWatcher()
- if err != nil {
- t.Fatalf("Failed to create watcher")
- }
-
- err = w.Add(testDir)
- if err != nil {
- t.Fatalf("Failed to add .")
- }
-
- // Generate an event.
- os.Create(filepath.Join(testDir, "somethingSOMETHINGsomethingSOMETHING"))
-
- // Wait for readEvents to read the event, then close the watcher.
- <-time.After(50 * time.Millisecond)
- w.Close()
-
- // Wait for the close to complete.
- <-time.After(50 * time.Millisecond)
- isWatcherReallyClosed(t, w)
-}
-
-func isWatcherReallyClosed(t *testing.T, w *Watcher) {
- select {
- case err, ok := <-w.Errors:
- if ok {
- t.Fatalf("w.Errors is not closed; readEvents is still alive after closing (error: %v)", err)
- }
- default:
- t.Fatalf("w.Errors would have blocked; readEvents is still alive!")
- }
-
- select {
- case _, ok := <-w.Events:
- if ok {
- t.Fatalf("w.Events is not closed; readEvents is still alive after closing")
- }
- default:
- t.Fatalf("w.Events would have blocked; readEvents is still alive!")
- }
-}
-
-func TestInotifyCloseCreate(t *testing.T) {
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- w, err := NewWatcher()
- if err != nil {
- t.Fatalf("Failed to create watcher: %v", err)
- }
- defer w.Close()
-
- err = w.Add(testDir)
- if err != nil {
- t.Fatalf("Failed to add testDir: %v", err)
- }
- h, err := os.Create(filepath.Join(testDir, "testfile"))
- if err != nil {
- t.Fatalf("Failed to create file in testdir: %v", err)
- }
- h.Close()
- select {
- case _ = <-w.Events:
- case err := <-w.Errors:
- t.Fatalf("Error from watcher: %v", err)
- case <-time.After(50 * time.Millisecond):
- t.Fatalf("Took too long to wait for event")
- }
-
- // At this point, we've received one event, so the goroutine is ready.
- // It's also blocking on unix.Read.
- // Now we try to swap the file descriptor under its nose.
- w.Close()
- w, err = NewWatcher()
- defer w.Close()
- if err != nil {
- t.Fatalf("Failed to create second watcher: %v", err)
- }
-
- <-time.After(50 * time.Millisecond)
- err = w.Add(testDir)
- if err != nil {
- t.Fatalf("Error adding testDir again: %v", err)
- }
-}
-
-// This test verifies the watcher can keep up with file creations/deletions
-// when under load.
-func TestInotifyStress(t *testing.T) {
- maxNumToCreate := 1000
-
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
- testFilePrefix := filepath.Join(testDir, "testfile")
-
- w, err := NewWatcher()
- if err != nil {
- t.Fatalf("Failed to create watcher: %v", err)
- }
- defer w.Close()
-
- err = w.Add(testDir)
- if err != nil {
- t.Fatalf("Failed to add testDir: %v", err)
- }
-
- doneChan := make(chan struct{})
- // The buffer ensures that the file generation goroutine is never blocked.
- errChan := make(chan error, 2*maxNumToCreate)
-
- go func() {
- for i := 0; i < maxNumToCreate; i++ {
- testFile := fmt.Sprintf("%s%d", testFilePrefix, i)
-
- handle, err := os.Create(testFile)
- if err != nil {
- errChan <- fmt.Errorf("Create failed: %v", err)
- continue
- }
-
- err = handle.Close()
- if err != nil {
- errChan <- fmt.Errorf("Close failed: %v", err)
- continue
- }
- }
-
- // If we delete a newly created file too quickly, inotify will skip the
- // create event and only send the delete event.
- time.Sleep(100 * time.Millisecond)
-
- for i := 0; i < maxNumToCreate; i++ {
- testFile := fmt.Sprintf("%s%d", testFilePrefix, i)
- err = os.Remove(testFile)
- if err != nil {
- errChan <- fmt.Errorf("Remove failed: %v", err)
- }
- }
-
- close(doneChan)
- }()
-
- creates := 0
- removes := 0
-
- finished := false
- after := time.After(10 * time.Second)
- for !finished {
- select {
- case <-after:
- t.Fatalf("Not done")
- case <-doneChan:
- finished = true
- case err := <-errChan:
- t.Fatalf("Got an error from file creator goroutine: %v", err)
- case err := <-w.Errors:
- t.Fatalf("Got an error from watcher: %v", err)
- case evt := <-w.Events:
- if !strings.HasPrefix(evt.Name, testFilePrefix) {
- t.Fatalf("Got an event for an unknown file: %s", evt.Name)
- }
- if evt.Op == Create {
- creates++
- }
- if evt.Op == Remove {
- removes++
- }
- }
- }
-
- // Drain remaining events from channels
- count := 0
- for count < 10 {
- select {
- case err := <-errChan:
- t.Fatalf("Got an error from file creator goroutine: %v", err)
- case err := <-w.Errors:
- t.Fatalf("Got an error from watcher: %v", err)
- case evt := <-w.Events:
- if !strings.HasPrefix(evt.Name, testFilePrefix) {
- t.Fatalf("Got an event for an unknown file: %s", evt.Name)
- }
- if evt.Op == Create {
- creates++
- }
- if evt.Op == Remove {
- removes++
- }
- count = 0
- default:
- count++
- // Give the watcher chances to fill the channels.
- time.Sleep(time.Millisecond)
- }
- }
-
- if creates-removes > 1 || creates-removes < -1 {
- t.Fatalf("Creates and removes should not be off by more than one: %d creates, %d removes", creates, removes)
- }
- if creates < 50 {
- t.Fatalf("Expected at least 50 creates, got %d", creates)
- }
-}
-
-func TestInotifyRemoveTwice(t *testing.T) {
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
- testFile := filepath.Join(testDir, "testfile")
-
- handle, err := os.Create(testFile)
- if err != nil {
- t.Fatalf("Create failed: %v", err)
- }
- handle.Close()
-
- w, err := NewWatcher()
- if err != nil {
- t.Fatalf("Failed to create watcher: %v", err)
- }
- defer w.Close()
-
- err = w.Add(testFile)
- if err != nil {
- t.Fatalf("Failed to add testFile: %v", err)
- }
-
- err = w.Remove(testFile)
- if err != nil {
- t.Fatalf("wanted successful remove but got: %v", err)
- }
-
- err = w.Remove(testFile)
- if err == nil {
- t.Fatalf("no error on removing invalid file")
- }
-
- w.mu.Lock()
- defer w.mu.Unlock()
- if len(w.watches) != 0 {
- t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches)
- }
- if len(w.paths) != 0 {
- t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths)
- }
-}
-
-func TestInotifyInnerMapLength(t *testing.T) {
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
- testFile := filepath.Join(testDir, "testfile")
-
- handle, err := os.Create(testFile)
- if err != nil {
- t.Fatalf("Create failed: %v", err)
- }
- handle.Close()
-
- w, err := NewWatcher()
- if err != nil {
- t.Fatalf("Failed to create watcher: %v", err)
- }
- defer w.Close()
-
- err = w.Add(testFile)
- if err != nil {
- t.Fatalf("Failed to add testFile: %v", err)
- }
- go func() {
- for err := range w.Errors {
- t.Fatalf("error received: %s", err)
- }
- }()
-
- err = os.Remove(testFile)
- if err != nil {
- t.Fatalf("Failed to remove testFile: %v", err)
- }
- _ = <-w.Events // consume Remove event
- <-time.After(50 * time.Millisecond) // wait IN_IGNORE propagated
-
- w.mu.Lock()
- defer w.mu.Unlock()
- if len(w.watches) != 0 {
- t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches)
- }
- if len(w.paths) != 0 {
- t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths)
- }
-}
-
-func TestInotifyOverflow(t *testing.T) {
- // We need to generate many more events than the
- // fs.inotify.max_queued_events sysctl setting.
- // We use multiple goroutines (one per directory)
- // to speed up file creation.
- numDirs := 128
- numFiles := 1024
-
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- w, err := NewWatcher()
- if err != nil {
- t.Fatalf("Failed to create watcher: %v", err)
- }
- defer w.Close()
-
- for dn := 0; dn < numDirs; dn++ {
- testSubdir := fmt.Sprintf("%s/%d", testDir, dn)
-
- err := os.Mkdir(testSubdir, 0777)
- if err != nil {
- t.Fatalf("Cannot create subdir: %v", err)
- }
-
- err = w.Add(testSubdir)
- if err != nil {
- t.Fatalf("Failed to add subdir: %v", err)
- }
- }
-
- errChan := make(chan error, numDirs*numFiles)
-
- for dn := 0; dn < numDirs; dn++ {
- testSubdir := fmt.Sprintf("%s/%d", testDir, dn)
-
- go func() {
- for fn := 0; fn < numFiles; fn++ {
- testFile := fmt.Sprintf("%s/%d", testSubdir, fn)
-
- handle, err := os.Create(testFile)
- if err != nil {
- errChan <- fmt.Errorf("Create failed: %v", err)
- continue
- }
-
- err = handle.Close()
- if err != nil {
- errChan <- fmt.Errorf("Close failed: %v", err)
- continue
- }
- }
- }()
- }
-
- creates := 0
- overflows := 0
-
- after := time.After(10 * time.Second)
- for overflows == 0 && creates < numDirs*numFiles {
- select {
- case <-after:
- t.Fatalf("Not done")
- case err := <-errChan:
- t.Fatalf("Got an error from file creator goroutine: %v", err)
- case err := <-w.Errors:
- if err == ErrEventOverflow {
- overflows++
- } else {
- t.Fatalf("Got an error from watcher: %v", err)
- }
- case evt := <-w.Events:
- if !strings.HasPrefix(evt.Name, testDir) {
- t.Fatalf("Got an event for an unknown file: %s", evt.Name)
- }
- if evt.Op == Create {
- creates++
- }
- }
- }
-
- if creates == numDirs*numFiles {
- t.Fatalf("Could not trigger overflow")
- }
-
- if overflows == 0 {
- t.Fatalf("No overflow and not enough creates (expected %d, got %d)",
- numDirs*numFiles, creates)
- }
-}
diff --git a/vendor/gopkg.in/fsnotify.v1/integration_darwin_test.go b/vendor/gopkg.in/fsnotify.v1/integration_darwin_test.go
deleted file mode 100644
index cd6adc273..000000000
--- a/vendor/gopkg.in/fsnotify.v1/integration_darwin_test.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fsnotify
-
-import (
- "os"
- "path/filepath"
- "testing"
- "time"
-
- "golang.org/x/sys/unix"
-)
-
-// testExchangedataForWatcher tests the watcher with the exchangedata operation on macOS.
-//
-// This is widely used for atomic saves on macOS, e.g. TextMate and in Apple's NSDocument.
-//
-// See https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/exchangedata.2.html
-// Also see: https://github.com/textmate/textmate/blob/cd016be29489eba5f3c09b7b70b06da134dda550/Frameworks/io/src/swap_file_data.cc#L20
-func testExchangedataForWatcher(t *testing.T, watchDir bool) {
- // Create directory to watch
- testDir1 := tempMkdir(t)
-
- // For the intermediate file
- testDir2 := tempMkdir(t)
-
- defer os.RemoveAll(testDir1)
- defer os.RemoveAll(testDir2)
-
- resolvedFilename := "TestFsnotifyEvents.file"
-
- // TextMate does:
- //
- // 1. exchangedata (intermediate, resolved)
- // 2. unlink intermediate
- //
- // Let's try to simulate that:
- resolved := filepath.Join(testDir1, resolvedFilename)
- intermediate := filepath.Join(testDir2, resolvedFilename+"~")
-
- // Make sure we create the file before we start watching
- createAndSyncFile(t, resolved)
-
- watcher := newWatcher(t)
-
- // Test both variants in isolation
- if watchDir {
- addWatch(t, watcher, testDir1)
- } else {
- addWatch(t, watcher, resolved)
- }
-
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for err := range watcher.Errors {
- t.Fatalf("error received: %s", err)
- }
- }()
-
- // Receive events on the event channel on a separate goroutine
- eventstream := watcher.Events
- var removeReceived counter
- var createReceived counter
-
- done := make(chan bool)
-
- go func() {
- for event := range eventstream {
- // Only count relevant events
- if event.Name == filepath.Clean(resolved) {
- if event.Op&Remove == Remove {
- removeReceived.increment()
- }
- if event.Op&Create == Create {
- createReceived.increment()
- }
- }
- t.Logf("event received: %s", event)
- }
- done <- true
- }()
-
- // Repeat to make sure the watched file/directory "survives" the REMOVE/CREATE loop.
- for i := 1; i <= 3; i++ {
- // The intermediate file is created in a folder outside the watcher
- createAndSyncFile(t, intermediate)
-
- // 1. Swap
- if err := unix.Exchangedata(intermediate, resolved, 0); err != nil {
- t.Fatalf("[%d] exchangedata failed: %s", i, err)
- }
-
- time.Sleep(50 * time.Millisecond)
-
- // 2. Delete the intermediate file
- err := os.Remove(intermediate)
-
- if err != nil {
- t.Fatalf("[%d] remove %s failed: %s", i, intermediate, err)
- }
-
- time.Sleep(50 * time.Millisecond)
-
- }
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- time.Sleep(500 * time.Millisecond)
-
- // The events will be (CHMOD + REMOVE + CREATE) X 2. Let's focus on the last two:
- if removeReceived.value() < 3 {
- t.Fatal("fsnotify remove events have not been received after 500 ms")
- }
-
- if createReceived.value() < 3 {
- t.Fatal("fsnotify create events have not been received after 500 ms")
- }
-
- watcher.Close()
- t.Log("waiting for the event channel to become closed...")
- select {
- case <-done:
- t.Log("event channel closed")
- case <-time.After(2 * time.Second):
- t.Fatal("event stream was not closed after 2 seconds")
- }
-}
-
-// TestExchangedataInWatchedDir test exchangedata operation on file in watched dir.
-func TestExchangedataInWatchedDir(t *testing.T) {
- testExchangedataForWatcher(t, true)
-}
-
-// TestExchangedataInWatchedDir test exchangedata operation on watched file.
-func TestExchangedataInWatchedFile(t *testing.T) {
- testExchangedataForWatcher(t, false)
-}
-
-func createAndSyncFile(t *testing.T, filepath string) {
- f1, err := os.OpenFile(filepath, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating %s failed: %s", filepath, err)
- }
- f1.Sync()
- f1.Close()
-}
diff --git a/vendor/gopkg.in/fsnotify.v1/integration_test.go b/vendor/gopkg.in/fsnotify.v1/integration_test.go
deleted file mode 100644
index 8b7e9d3ec..000000000
--- a/vendor/gopkg.in/fsnotify.v1/integration_test.go
+++ /dev/null
@@ -1,1237 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !plan9,!solaris
-
-package fsnotify
-
-import (
- "io/ioutil"
- "os"
- "os/exec"
- "path"
- "path/filepath"
- "runtime"
- "sync/atomic"
- "testing"
- "time"
-)
-
-// An atomic counter
-type counter struct {
- val int32
-}
-
-func (c *counter) increment() {
- atomic.AddInt32(&c.val, 1)
-}
-
-func (c *counter) value() int32 {
- return atomic.LoadInt32(&c.val)
-}
-
-func (c *counter) reset() {
- atomic.StoreInt32(&c.val, 0)
-}
-
-// tempMkdir makes a temporary directory
-func tempMkdir(t *testing.T) string {
- dir, err := ioutil.TempDir("", "fsnotify")
- if err != nil {
- t.Fatalf("failed to create test directory: %s", err)
- }
- return dir
-}
-
-// tempMkFile makes a temporary file.
-func tempMkFile(t *testing.T, dir string) string {
- f, err := ioutil.TempFile(dir, "fsnotify")
- if err != nil {
- t.Fatalf("failed to create test file: %v", err)
- }
- defer f.Close()
- return f.Name()
-}
-
-// newWatcher initializes an fsnotify Watcher instance.
-func newWatcher(t *testing.T) *Watcher {
- watcher, err := NewWatcher()
- if err != nil {
- t.Fatalf("NewWatcher() failed: %s", err)
- }
- return watcher
-}
-
-// addWatch adds a watch for a directory
-func addWatch(t *testing.T, watcher *Watcher, dir string) {
- if err := watcher.Add(dir); err != nil {
- t.Fatalf("watcher.Add(%q) failed: %s", dir, err)
- }
-}
-
-func TestFsnotifyMultipleOperations(t *testing.T) {
- watcher := newWatcher(t)
-
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for err := range watcher.Errors {
- t.Fatalf("error received: %s", err)
- }
- }()
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- // Create directory that's not watched
- testDirToMoveFiles := tempMkdir(t)
- defer os.RemoveAll(testDirToMoveFiles)
-
- testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile")
- testFileRenamed := filepath.Join(testDirToMoveFiles, "TestFsnotifySeqRename.testfile")
-
- addWatch(t, watcher, testDir)
-
- // Receive events on the event channel on a separate goroutine
- eventstream := watcher.Events
- var createReceived, modifyReceived, deleteReceived, renameReceived counter
- done := make(chan bool)
- go func() {
- for event := range eventstream {
- // Only count relevant events
- if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
- t.Logf("event received: %s", event)
- if event.Op&Remove == Remove {
- deleteReceived.increment()
- }
- if event.Op&Write == Write {
- modifyReceived.increment()
- }
- if event.Op&Create == Create {
- createReceived.increment()
- }
- if event.Op&Rename == Rename {
- renameReceived.increment()
- }
- } else {
- t.Logf("unexpected event received: %s", event)
- }
- }
- done <- true
- }()
-
- // Create a file
- // This should add at least one event to the fsnotify event queue
- var f *os.File
- f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
-
- time.Sleep(time.Millisecond)
- f.WriteString("data")
- f.Sync()
- f.Close()
-
- time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
- if err := testRename(testFile, testFileRenamed); err != nil {
- t.Fatalf("rename failed: %s", err)
- }
-
- // Modify the file outside of the watched dir
- f, err = os.Open(testFileRenamed)
- if err != nil {
- t.Fatalf("open test renamed file failed: %s", err)
- }
- f.WriteString("data")
- f.Sync()
- f.Close()
-
- time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
- // Recreate the file that was moved
- f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Close()
- time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- time.Sleep(500 * time.Millisecond)
- cReceived := createReceived.value()
- if cReceived != 2 {
- t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
- }
- mReceived := modifyReceived.value()
- if mReceived != 1 {
- t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1)
- }
- dReceived := deleteReceived.value()
- rReceived := renameReceived.value()
- if dReceived+rReceived != 1 {
- t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", rReceived+dReceived, 1)
- }
-
- // Try closing the fsnotify instance
- t.Log("calling Close()")
- watcher.Close()
- t.Log("waiting for the event channel to become closed...")
- select {
- case <-done:
- t.Log("event channel closed")
- case <-time.After(2 * time.Second):
- t.Fatal("event stream was not closed after 2 seconds")
- }
-}
-
-func TestFsnotifyMultipleCreates(t *testing.T) {
- watcher := newWatcher(t)
-
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for err := range watcher.Errors {
- t.Fatalf("error received: %s", err)
- }
- }()
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile")
-
- addWatch(t, watcher, testDir)
-
- // Receive events on the event channel on a separate goroutine
- eventstream := watcher.Events
- var createReceived, modifyReceived, deleteReceived counter
- done := make(chan bool)
- go func() {
- for event := range eventstream {
- // Only count relevant events
- if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
- t.Logf("event received: %s", event)
- if event.Op&Remove == Remove {
- deleteReceived.increment()
- }
- if event.Op&Create == Create {
- createReceived.increment()
- }
- if event.Op&Write == Write {
- modifyReceived.increment()
- }
- } else {
- t.Logf("unexpected event received: %s", event)
- }
- }
- done <- true
- }()
-
- // Create a file
- // This should add at least one event to the fsnotify event queue
- var f *os.File
- f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
-
- time.Sleep(time.Millisecond)
- f.WriteString("data")
- f.Sync()
- f.Close()
-
- time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
- os.Remove(testFile)
-
- time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
- // Recreate the file
- f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Close()
- time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
- // Modify
- f, err = os.OpenFile(testFile, os.O_WRONLY, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
-
- time.Sleep(time.Millisecond)
- f.WriteString("data")
- f.Sync()
- f.Close()
-
- time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
- // Modify
- f, err = os.OpenFile(testFile, os.O_WRONLY, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
-
- time.Sleep(time.Millisecond)
- f.WriteString("data")
- f.Sync()
- f.Close()
-
- time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- time.Sleep(500 * time.Millisecond)
- cReceived := createReceived.value()
- if cReceived != 2 {
- t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
- }
- mReceived := modifyReceived.value()
- if mReceived < 3 {
- t.Fatalf("incorrect number of modify events received after 500 ms (%d vs atleast %d)", mReceived, 3)
- }
- dReceived := deleteReceived.value()
- if dReceived != 1 {
- t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", dReceived, 1)
- }
-
- // Try closing the fsnotify instance
- t.Log("calling Close()")
- watcher.Close()
- t.Log("waiting for the event channel to become closed...")
- select {
- case <-done:
- t.Log("event channel closed")
- case <-time.After(2 * time.Second):
- t.Fatal("event stream was not closed after 2 seconds")
- }
-}
-
-func TestFsnotifyDirOnly(t *testing.T) {
- watcher := newWatcher(t)
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- // Create a file before watching directory
- // This should NOT add any events to the fsnotify event queue
- testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
- {
- var f *os.File
- f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
- f.Close()
- }
-
- addWatch(t, watcher, testDir)
-
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for err := range watcher.Errors {
- t.Fatalf("error received: %s", err)
- }
- }()
-
- testFile := filepath.Join(testDir, "TestFsnotifyDirOnly.testfile")
-
- // Receive events on the event channel on a separate goroutine
- eventstream := watcher.Events
- var createReceived, modifyReceived, deleteReceived counter
- done := make(chan bool)
- go func() {
- for event := range eventstream {
- // Only count relevant events
- if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileAlreadyExists) {
- t.Logf("event received: %s", event)
- if event.Op&Remove == Remove {
- deleteReceived.increment()
- }
- if event.Op&Write == Write {
- modifyReceived.increment()
- }
- if event.Op&Create == Create {
- createReceived.increment()
- }
- } else {
- t.Logf("unexpected event received: %s", event)
- }
- }
- done <- true
- }()
-
- // Create a file
- // This should add at least one event to the fsnotify event queue
- var f *os.File
- f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
-
- time.Sleep(time.Millisecond)
- f.WriteString("data")
- f.Sync()
- f.Close()
-
- time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
- os.Remove(testFile)
- os.Remove(testFileAlreadyExists)
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- time.Sleep(500 * time.Millisecond)
- cReceived := createReceived.value()
- if cReceived != 1 {
- t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 1)
- }
- mReceived := modifyReceived.value()
- if mReceived != 1 {
- t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1)
- }
- dReceived := deleteReceived.value()
- if dReceived != 2 {
- t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2)
- }
-
- // Try closing the fsnotify instance
- t.Log("calling Close()")
- watcher.Close()
- t.Log("waiting for the event channel to become closed...")
- select {
- case <-done:
- t.Log("event channel closed")
- case <-time.After(2 * time.Second):
- t.Fatal("event stream was not closed after 2 seconds")
- }
-}
-
-func TestFsnotifyDeleteWatchedDir(t *testing.T) {
- watcher := newWatcher(t)
- defer watcher.Close()
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- // Create a file before watching directory
- testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
- {
- var f *os.File
- f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
- f.Close()
- }
-
- addWatch(t, watcher, testDir)
-
- // Add a watch for testFile
- addWatch(t, watcher, testFileAlreadyExists)
-
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for err := range watcher.Errors {
- t.Fatalf("error received: %s", err)
- }
- }()
-
- // Receive events on the event channel on a separate goroutine
- eventstream := watcher.Events
- var deleteReceived counter
- go func() {
- for event := range eventstream {
- // Only count relevant events
- if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFileAlreadyExists) {
- t.Logf("event received: %s", event)
- if event.Op&Remove == Remove {
- deleteReceived.increment()
- }
- } else {
- t.Logf("unexpected event received: %s", event)
- }
- }
- }()
-
- os.RemoveAll(testDir)
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- time.Sleep(500 * time.Millisecond)
- dReceived := deleteReceived.value()
- if dReceived < 2 {
- t.Fatalf("did not receive at least %d delete events, received %d after 500 ms", 2, dReceived)
- }
-}
-
-func TestFsnotifySubDir(t *testing.T) {
- watcher := newWatcher(t)
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- testFile1 := filepath.Join(testDir, "TestFsnotifyFile1.testfile")
- testSubDir := filepath.Join(testDir, "sub")
- testSubDirFile := filepath.Join(testDir, "sub/TestFsnotifyFile1.testfile")
-
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for err := range watcher.Errors {
- t.Fatalf("error received: %s", err)
- }
- }()
-
- // Receive events on the event channel on a separate goroutine
- eventstream := watcher.Events
- var createReceived, deleteReceived counter
- done := make(chan bool)
- go func() {
- for event := range eventstream {
- // Only count relevant events
- if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testSubDir) || event.Name == filepath.Clean(testFile1) {
- t.Logf("event received: %s", event)
- if event.Op&Create == Create {
- createReceived.increment()
- }
- if event.Op&Remove == Remove {
- deleteReceived.increment()
- }
- } else {
- t.Logf("unexpected event received: %s", event)
- }
- }
- done <- true
- }()
-
- addWatch(t, watcher, testDir)
-
- // Create sub-directory
- if err := os.Mkdir(testSubDir, 0777); err != nil {
- t.Fatalf("failed to create test sub-directory: %s", err)
- }
-
- // Create a file
- var f *os.File
- f, err := os.OpenFile(testFile1, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
- f.Close()
-
- // Create a file (Should not see this! we are not watching subdir)
- var fs *os.File
- fs, err = os.OpenFile(testSubDirFile, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- fs.Sync()
- fs.Close()
-
- time.Sleep(200 * time.Millisecond)
-
- // Make sure receive deletes for both file and sub-directory
- os.RemoveAll(testSubDir)
- os.Remove(testFile1)
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- time.Sleep(500 * time.Millisecond)
- cReceived := createReceived.value()
- if cReceived != 2 {
- t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
- }
- dReceived := deleteReceived.value()
- if dReceived != 2 {
- t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2)
- }
-
- // Try closing the fsnotify instance
- t.Log("calling Close()")
- watcher.Close()
- t.Log("waiting for the event channel to become closed...")
- select {
- case <-done:
- t.Log("event channel closed")
- case <-time.After(2 * time.Second):
- t.Fatal("event stream was not closed after 2 seconds")
- }
-}
-
-func TestFsnotifyRename(t *testing.T) {
- watcher := newWatcher(t)
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- addWatch(t, watcher, testDir)
-
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for err := range watcher.Errors {
- t.Fatalf("error received: %s", err)
- }
- }()
-
- testFile := filepath.Join(testDir, "TestFsnotifyEvents.testfile")
- testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
-
- // Receive events on the event channel on a separate goroutine
- eventstream := watcher.Events
- var renameReceived counter
- done := make(chan bool)
- go func() {
- for event := range eventstream {
- // Only count relevant events
- if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) {
- if event.Op&Rename == Rename {
- renameReceived.increment()
- }
- t.Logf("event received: %s", event)
- } else {
- t.Logf("unexpected event received: %s", event)
- }
- }
- done <- true
- }()
-
- // Create a file
- // This should add at least one event to the fsnotify event queue
- var f *os.File
- f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
-
- f.WriteString("data")
- f.Sync()
- f.Close()
-
- // Add a watch for testFile
- addWatch(t, watcher, testFile)
-
- if err := testRename(testFile, testFileRenamed); err != nil {
- t.Fatalf("rename failed: %s", err)
- }
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- time.Sleep(500 * time.Millisecond)
- if renameReceived.value() == 0 {
- t.Fatal("fsnotify rename events have not been received after 500 ms")
- }
-
- // Try closing the fsnotify instance
- t.Log("calling Close()")
- watcher.Close()
- t.Log("waiting for the event channel to become closed...")
- select {
- case <-done:
- t.Log("event channel closed")
- case <-time.After(2 * time.Second):
- t.Fatal("event stream was not closed after 2 seconds")
- }
-
- os.Remove(testFileRenamed)
-}
-
-func TestFsnotifyRenameToCreate(t *testing.T) {
- watcher := newWatcher(t)
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- // Create directory to get file
- testDirFrom := tempMkdir(t)
- defer os.RemoveAll(testDirFrom)
-
- addWatch(t, watcher, testDir)
-
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for err := range watcher.Errors {
- t.Fatalf("error received: %s", err)
- }
- }()
-
- testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile")
- testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
-
- // Receive events on the event channel on a separate goroutine
- eventstream := watcher.Events
- var createReceived counter
- done := make(chan bool)
- go func() {
- for event := range eventstream {
- // Only count relevant events
- if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) {
- if event.Op&Create == Create {
- createReceived.increment()
- }
- t.Logf("event received: %s", event)
- } else {
- t.Logf("unexpected event received: %s", event)
- }
- }
- done <- true
- }()
-
- // Create a file
- // This should add at least one event to the fsnotify event queue
- var f *os.File
- f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
- f.Close()
-
- if err := testRename(testFile, testFileRenamed); err != nil {
- t.Fatalf("rename failed: %s", err)
- }
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- time.Sleep(500 * time.Millisecond)
- if createReceived.value() == 0 {
- t.Fatal("fsnotify create events have not been received after 500 ms")
- }
-
- // Try closing the fsnotify instance
- t.Log("calling Close()")
- watcher.Close()
- t.Log("waiting for the event channel to become closed...")
- select {
- case <-done:
- t.Log("event channel closed")
- case <-time.After(2 * time.Second):
- t.Fatal("event stream was not closed after 2 seconds")
- }
-
- os.Remove(testFileRenamed)
-}
-
-func TestFsnotifyRenameToOverwrite(t *testing.T) {
- switch runtime.GOOS {
- case "plan9", "windows":
- t.Skipf("skipping test on %q (os.Rename over existing file does not create event).", runtime.GOOS)
- }
-
- watcher := newWatcher(t)
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- // Create directory to get file
- testDirFrom := tempMkdir(t)
- defer os.RemoveAll(testDirFrom)
-
- testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile")
- testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
-
- // Create a file
- var fr *os.File
- fr, err := os.OpenFile(testFileRenamed, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- fr.Sync()
- fr.Close()
-
- addWatch(t, watcher, testDir)
-
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for err := range watcher.Errors {
- t.Fatalf("error received: %s", err)
- }
- }()
-
- // Receive events on the event channel on a separate goroutine
- eventstream := watcher.Events
- var eventReceived counter
- done := make(chan bool)
- go func() {
- for event := range eventstream {
- // Only count relevant events
- if event.Name == filepath.Clean(testFileRenamed) {
- eventReceived.increment()
- t.Logf("event received: %s", event)
- } else {
- t.Logf("unexpected event received: %s", event)
- }
- }
- done <- true
- }()
-
- // Create a file
- // This should add at least one event to the fsnotify event queue
- var f *os.File
- f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
- f.Close()
-
- if err := testRename(testFile, testFileRenamed); err != nil {
- t.Fatalf("rename failed: %s", err)
- }
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- time.Sleep(500 * time.Millisecond)
- if eventReceived.value() == 0 {
- t.Fatal("fsnotify events have not been received after 500 ms")
- }
-
- // Try closing the fsnotify instance
- t.Log("calling Close()")
- watcher.Close()
- t.Log("waiting for the event channel to become closed...")
- select {
- case <-done:
- t.Log("event channel closed")
- case <-time.After(2 * time.Second):
- t.Fatal("event stream was not closed after 2 seconds")
- }
-
- os.Remove(testFileRenamed)
-}
-
-func TestRemovalOfWatch(t *testing.T) {
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- // Create a file before watching directory
- testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
- {
- var f *os.File
- f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
- f.Close()
- }
-
- watcher := newWatcher(t)
- defer watcher.Close()
-
- addWatch(t, watcher, testDir)
- if err := watcher.Remove(testDir); err != nil {
- t.Fatalf("Could not remove the watch: %v\n", err)
- }
-
- go func() {
- select {
- case ev := <-watcher.Events:
- t.Fatalf("We received event: %v\n", ev)
- case <-time.After(500 * time.Millisecond):
- t.Log("No event received, as expected.")
- }
- }()
-
- time.Sleep(200 * time.Millisecond)
- // Modify the file outside of the watched dir
- f, err := os.Open(testFileAlreadyExists)
- if err != nil {
- t.Fatalf("Open test file failed: %s", err)
- }
- f.WriteString("data")
- f.Sync()
- f.Close()
- if err := os.Chmod(testFileAlreadyExists, 0700); err != nil {
- t.Fatalf("chmod failed: %s", err)
- }
- time.Sleep(400 * time.Millisecond)
-}
-
-func TestFsnotifyAttrib(t *testing.T) {
- if runtime.GOOS == "windows" {
- t.Skip("attributes don't work on Windows.")
- }
-
- watcher := newWatcher(t)
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for err := range watcher.Errors {
- t.Fatalf("error received: %s", err)
- }
- }()
-
- testFile := filepath.Join(testDir, "TestFsnotifyAttrib.testfile")
-
- // Receive events on the event channel on a separate goroutine
- eventstream := watcher.Events
- // The modifyReceived counter counts IsModify events that are not IsAttrib,
- // and the attribReceived counts IsAttrib events (which are also IsModify as
- // a consequence).
- var modifyReceived counter
- var attribReceived counter
- done := make(chan bool)
- go func() {
- for event := range eventstream {
- // Only count relevant events
- if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
- if event.Op&Write == Write {
- modifyReceived.increment()
- }
- if event.Op&Chmod == Chmod {
- attribReceived.increment()
- }
- t.Logf("event received: %s", event)
- } else {
- t.Logf("unexpected event received: %s", event)
- }
- }
- done <- true
- }()
-
- // Create a file
- // This should add at least one event to the fsnotify event queue
- var f *os.File
- f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
-
- f.WriteString("data")
- f.Sync()
- f.Close()
-
- // Add a watch for testFile
- addWatch(t, watcher, testFile)
-
- if err := os.Chmod(testFile, 0700); err != nil {
- t.Fatalf("chmod failed: %s", err)
- }
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- // Creating/writing a file changes also the mtime, so IsAttrib should be set to true here
- time.Sleep(500 * time.Millisecond)
- if modifyReceived.value() != 0 {
- t.Fatal("received an unexpected modify event when creating a test file")
- }
- if attribReceived.value() == 0 {
- t.Fatal("fsnotify attribute events have not received after 500 ms")
- }
-
- // Modifying the contents of the file does not set the attrib flag (although eg. the mtime
- // might have been modified).
- modifyReceived.reset()
- attribReceived.reset()
-
- f, err = os.OpenFile(testFile, os.O_WRONLY, 0)
- if err != nil {
- t.Fatalf("reopening test file failed: %s", err)
- }
-
- f.WriteString("more data")
- f.Sync()
- f.Close()
-
- time.Sleep(500 * time.Millisecond)
-
- if modifyReceived.value() != 1 {
- t.Fatal("didn't receive a modify event after changing test file contents")
- }
-
- if attribReceived.value() != 0 {
- t.Fatal("did receive an unexpected attrib event after changing test file contents")
- }
-
- modifyReceived.reset()
- attribReceived.reset()
-
- // Doing a chmod on the file should trigger an event with the "attrib" flag set (the contents
- // of the file are not changed though)
- if err := os.Chmod(testFile, 0600); err != nil {
- t.Fatalf("chmod failed: %s", err)
- }
-
- time.Sleep(500 * time.Millisecond)
-
- if attribReceived.value() != 1 {
- t.Fatal("didn't receive an attribute change after 500ms")
- }
-
- // Try closing the fsnotify instance
- t.Log("calling Close()")
- watcher.Close()
- t.Log("waiting for the event channel to become closed...")
- select {
- case <-done:
- t.Log("event channel closed")
- case <-time.After(1e9):
- t.Fatal("event stream was not closed after 1 second")
- }
-
- os.Remove(testFile)
-}
-
-func TestFsnotifyClose(t *testing.T) {
- watcher := newWatcher(t)
- watcher.Close()
-
- var done int32
- go func() {
- watcher.Close()
- atomic.StoreInt32(&done, 1)
- }()
-
- time.Sleep(50e6) // 50 ms
- if atomic.LoadInt32(&done) == 0 {
- t.Fatal("double Close() test failed: second Close() call didn't return")
- }
-
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- if err := watcher.Add(testDir); err == nil {
- t.Fatal("expected error on Watch() after Close(), got nil")
- }
-}
-
-func TestFsnotifyFakeSymlink(t *testing.T) {
- if runtime.GOOS == "windows" {
- t.Skip("symlinks don't work on Windows.")
- }
-
- watcher := newWatcher(t)
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- var errorsReceived counter
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for errors := range watcher.Errors {
- t.Logf("Received error: %s", errors)
- errorsReceived.increment()
- }
- }()
-
- // Count the CREATE events received
- var createEventsReceived, otherEventsReceived counter
- go func() {
- for ev := range watcher.Events {
- t.Logf("event received: %s", ev)
- if ev.Op&Create == Create {
- createEventsReceived.increment()
- } else {
- otherEventsReceived.increment()
- }
- }
- }()
-
- addWatch(t, watcher, testDir)
-
- if err := os.Symlink(filepath.Join(testDir, "zzz"), filepath.Join(testDir, "zzznew")); err != nil {
- t.Fatalf("Failed to create bogus symlink: %s", err)
- }
- t.Logf("Created bogus symlink")
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- time.Sleep(500 * time.Millisecond)
-
- // Should not be error, just no events for broken links (watching nothing)
- if errorsReceived.value() > 0 {
- t.Fatal("fsnotify errors have been received.")
- }
- if otherEventsReceived.value() > 0 {
- t.Fatal("fsnotify other events received on the broken link")
- }
-
- // Except for 1 create event (for the link itself)
- if createEventsReceived.value() == 0 {
- t.Fatal("fsnotify create events were not received after 500 ms")
- }
- if createEventsReceived.value() > 1 {
- t.Fatal("fsnotify more create events received than expected")
- }
-
- // Try closing the fsnotify instance
- t.Log("calling Close()")
- watcher.Close()
-}
-
-func TestCyclicSymlink(t *testing.T) {
- if runtime.GOOS == "windows" {
- t.Skip("symlinks don't work on Windows.")
- }
-
- watcher := newWatcher(t)
-
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- link := path.Join(testDir, "link")
- if err := os.Symlink(".", link); err != nil {
- t.Fatalf("could not make symlink: %v", err)
- }
- addWatch(t, watcher, testDir)
-
- var createEventsReceived counter
- go func() {
- for ev := range watcher.Events {
- if ev.Op&Create == Create {
- createEventsReceived.increment()
- }
- }
- }()
-
- if err := os.Remove(link); err != nil {
- t.Fatalf("Error removing link: %v", err)
- }
-
- // It would be nice to be able to expect a delete event here, but kqueue has
- // no way for us to get events on symlinks themselves, because opening them
- // opens an fd to the file to which they point.
-
- if err := ioutil.WriteFile(link, []byte("foo"), 0700); err != nil {
- t.Fatalf("could not make symlink: %v", err)
- }
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- time.Sleep(500 * time.Millisecond)
-
- if got := createEventsReceived.value(); got == 0 {
- t.Errorf("want at least 1 create event got %v", got)
- }
-
- watcher.Close()
-}
-
-// TestConcurrentRemovalOfWatch tests that concurrent calls to RemoveWatch do not race.
-// See https://codereview.appspot.com/103300045/
-// go test -test.run=TestConcurrentRemovalOfWatch -test.cpu=1,1,1,1,1 -race
-func TestConcurrentRemovalOfWatch(t *testing.T) {
- if runtime.GOOS != "darwin" {
- t.Skip("regression test for race only present on darwin")
- }
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- // Create a file before watching directory
- testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
- {
- var f *os.File
- f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
- f.Close()
- }
-
- watcher := newWatcher(t)
- defer watcher.Close()
-
- addWatch(t, watcher, testDir)
-
- // Test that RemoveWatch can be invoked concurrently, with no data races.
- removed1 := make(chan struct{})
- go func() {
- defer close(removed1)
- watcher.Remove(testDir)
- }()
- removed2 := make(chan struct{})
- go func() {
- close(removed2)
- watcher.Remove(testDir)
- }()
- <-removed1
- <-removed2
-}
-
-func TestClose(t *testing.T) {
- // Regression test for #59 bad file descriptor from Close
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- watcher := newWatcher(t)
- if err := watcher.Add(testDir); err != nil {
- t.Fatalf("Expected no error on Add, got %v", err)
- }
- err := watcher.Close()
- if err != nil {
- t.Fatalf("Expected no error on Close, got %v.", err)
- }
-}
-
-// TestRemoveWithClose tests if one can handle Remove events and, at the same
-// time, close Watcher object without any data races.
-func TestRemoveWithClose(t *testing.T) {
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- const fileN = 200
- tempFiles := make([]string, 0, fileN)
- for i := 0; i < fileN; i++ {
- tempFiles = append(tempFiles, tempMkFile(t, testDir))
- }
- watcher := newWatcher(t)
- if err := watcher.Add(testDir); err != nil {
- t.Fatalf("Expected no error on Add, got %v", err)
- }
- startC, stopC := make(chan struct{}), make(chan struct{})
- errC := make(chan error)
- go func() {
- for {
- select {
- case <-watcher.Errors:
- case <-watcher.Events:
- case <-stopC:
- return
- }
- }
- }()
- go func() {
- <-startC
- for _, fileName := range tempFiles {
- os.Remove(fileName)
- }
- }()
- go func() {
- <-startC
- errC <- watcher.Close()
- }()
- close(startC)
- defer close(stopC)
- if err := <-errC; err != nil {
- t.Fatalf("Expected no error on Close, got %v.", err)
- }
-}
-
-func testRename(file1, file2 string) error {
- switch runtime.GOOS {
- case "windows", "plan9":
- return os.Rename(file1, file2)
- default:
- cmd := exec.Command("mv", file1, file2)
- return cmd.Run()
- }
-}
diff --git a/vendor/gopkg.in/mgo.v2/LICENSE b/vendor/gopkg.in/mgo.v2/LICENSE
deleted file mode 100644
index 770c7672b..000000000
--- a/vendor/gopkg.in/mgo.v2/LICENSE
+++ /dev/null
@@ -1,25 +0,0 @@
-mgo - MongoDB driver for Go
-
-Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
-
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/mgo.v2/README.md b/vendor/gopkg.in/mgo.v2/README.md
deleted file mode 100644
index f4e452c04..000000000
--- a/vendor/gopkg.in/mgo.v2/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-The MongoDB driver for Go
--------------------------
-
-Please go to [http://labix.org/mgo](http://labix.org/mgo) for all project details.
diff --git a/vendor/gopkg.in/mgo.v2/internal/sasl/sasl.c b/vendor/gopkg.in/mgo.v2/internal/sasl/sasl.c
deleted file mode 100644
index 8be0bc459..000000000
--- a/vendor/gopkg.in/mgo.v2/internal/sasl/sasl.c
+++ /dev/null
@@ -1,77 +0,0 @@
-// +build !windows
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <sasl/sasl.h>
-
-static int mgo_sasl_simple(void *context, int id, const char **result, unsigned int *len)
-{
- if (!result) {
- return SASL_BADPARAM;
- }
- switch (id) {
- case SASL_CB_USER:
- *result = (char *)context;
- break;
- case SASL_CB_AUTHNAME:
- *result = (char *)context;
- break;
- case SASL_CB_LANGUAGE:
- *result = NULL;
- break;
- default:
- return SASL_BADPARAM;
- }
- if (len) {
- *len = *result ? strlen(*result) : 0;
- }
- return SASL_OK;
-}
-
-typedef int (*callback)(void);
-
-static int mgo_sasl_secret(sasl_conn_t *conn, void *context, int id, sasl_secret_t **result)
-{
- if (!conn || !result || id != SASL_CB_PASS) {
- return SASL_BADPARAM;
- }
- *result = (sasl_secret_t *)context;
- return SASL_OK;
-}
-
-sasl_callback_t *mgo_sasl_callbacks(const char *username, const char *password)
-{
- sasl_callback_t *cb = malloc(4 * sizeof(sasl_callback_t));
- int n = 0;
-
- size_t len = strlen(password);
- sasl_secret_t *secret = (sasl_secret_t*)malloc(sizeof(sasl_secret_t) + len);
- if (!secret) {
- free(cb);
- return NULL;
- }
- strcpy((char *)secret->data, password);
- secret->len = len;
-
- cb[n].id = SASL_CB_PASS;
- cb[n].proc = (callback)&mgo_sasl_secret;
- cb[n].context = secret;
- n++;
-
- cb[n].id = SASL_CB_USER;
- cb[n].proc = (callback)&mgo_sasl_simple;
- cb[n].context = (char*)username;
- n++;
-
- cb[n].id = SASL_CB_AUTHNAME;
- cb[n].proc = (callback)&mgo_sasl_simple;
- cb[n].context = (char*)username;
- n++;
-
- cb[n].id = SASL_CB_LIST_END;
- cb[n].proc = NULL;
- cb[n].context = NULL;
-
- return cb;
-}
diff --git a/vendor/gopkg.in/mgo.v2/internal/sasl/sasl.go b/vendor/gopkg.in/mgo.v2/internal/sasl/sasl.go
deleted file mode 100644
index 8375dddf8..000000000
--- a/vendor/gopkg.in/mgo.v2/internal/sasl/sasl.go
+++ /dev/null
@@ -1,138 +0,0 @@
-// Package sasl is an implementation detail of the mgo package.
-//
-// This package is not meant to be used by itself.
-//
-
-// +build !windows
-
-package sasl
-
-// #cgo LDFLAGS: -lsasl2
-//
-// struct sasl_conn {};
-//
-// #include <stdlib.h>
-// #include <sasl/sasl.h>
-//
-// sasl_callback_t *mgo_sasl_callbacks(const char *username, const char *password);
-//
-import "C"
-
-import (
- "fmt"
- "strings"
- "sync"
- "unsafe"
-)
-
-type saslStepper interface {
- Step(serverData []byte) (clientData []byte, done bool, err error)
- Close()
-}
-
-type saslSession struct {
- conn *C.sasl_conn_t
- step int
- mech string
-
- cstrings []*C.char
- callbacks *C.sasl_callback_t
-}
-
-var initError error
-var initOnce sync.Once
-
-func initSASL() {
- rc := C.sasl_client_init(nil)
- if rc != C.SASL_OK {
- initError = saslError(rc, nil, "cannot initialize SASL library")
- }
-}
-
-func New(username, password, mechanism, service, host string) (saslStepper, error) {
- initOnce.Do(initSASL)
- if initError != nil {
- return nil, initError
- }
-
- ss := &saslSession{mech: mechanism}
- if service == "" {
- service = "mongodb"
- }
- if i := strings.Index(host, ":"); i >= 0 {
- host = host[:i]
- }
- ss.callbacks = C.mgo_sasl_callbacks(ss.cstr(username), ss.cstr(password))
- rc := C.sasl_client_new(ss.cstr(service), ss.cstr(host), nil, nil, ss.callbacks, 0, &ss.conn)
- if rc != C.SASL_OK {
- ss.Close()
- return nil, saslError(rc, nil, "cannot create new SASL client")
- }
- return ss, nil
-}
-
-func (ss *saslSession) cstr(s string) *C.char {
- cstr := C.CString(s)
- ss.cstrings = append(ss.cstrings, cstr)
- return cstr
-}
-
-func (ss *saslSession) Close() {
- for _, cstr := range ss.cstrings {
- C.free(unsafe.Pointer(cstr))
- }
- ss.cstrings = nil
-
- if ss.callbacks != nil {
- C.free(unsafe.Pointer(ss.callbacks))
- }
-
- // The documentation of SASL dispose makes it clear that this should only
- // be done when the connection is done, not when the authentication phase
- // is done, because an encryption layer may have been negotiated.
- // Even then, we'll do this for now, because it's simpler and prevents
- // keeping track of this state for every socket. If it breaks, we'll fix it.
- C.sasl_dispose(&ss.conn)
-}
-
-func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, err error) {
- ss.step++
- if ss.step > 10 {
- return nil, false, fmt.Errorf("too many SASL steps without authentication")
- }
- var cclientData *C.char
- var cclientDataLen C.uint
- var rc C.int
- if ss.step == 1 {
- var mechanism *C.char // ignored - must match cred
- rc = C.sasl_client_start(ss.conn, ss.cstr(ss.mech), nil, &cclientData, &cclientDataLen, &mechanism)
- } else {
- var cserverData *C.char
- var cserverDataLen C.uint
- if len(serverData) > 0 {
- cserverData = (*C.char)(unsafe.Pointer(&serverData[0]))
- cserverDataLen = C.uint(len(serverData))
- }
- rc = C.sasl_client_step(ss.conn, cserverData, cserverDataLen, nil, &cclientData, &cclientDataLen)
- }
- if cclientData != nil && cclientDataLen > 0 {
- clientData = C.GoBytes(unsafe.Pointer(cclientData), C.int(cclientDataLen))
- }
- if rc == C.SASL_OK {
- return clientData, true, nil
- }
- if rc == C.SASL_CONTINUE {
- return clientData, false, nil
- }
- return nil, false, saslError(rc, ss.conn, "cannot establish SASL session")
-}
-
-func saslError(rc C.int, conn *C.sasl_conn_t, msg string) error {
- var detail string
- if conn == nil {
- detail = C.GoString(C.sasl_errstring(rc, nil, nil))
- } else {
- detail = C.GoString(C.sasl_errdetail(conn))
- }
- return fmt.Errorf(msg + ": " + detail)
-}
diff --git a/vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.c b/vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.c
deleted file mode 100644
index c359fd6ed..000000000
--- a/vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.c
+++ /dev/null
@@ -1,122 +0,0 @@
-#include "sasl_windows.h"
-
-static const LPSTR SSPI_PACKAGE_NAME = "kerberos";
-
-SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle *cred_handle, char *username, char *password, char *domain)
-{
- SEC_WINNT_AUTH_IDENTITY auth_identity;
- SECURITY_INTEGER ignored;
-
- auth_identity.Flags = SEC_WINNT_AUTH_IDENTITY_ANSI;
- auth_identity.User = (LPSTR) username;
- auth_identity.UserLength = strlen(username);
- auth_identity.Password = NULL;
- auth_identity.PasswordLength = 0;
- if(password){
- auth_identity.Password = (LPSTR) password;
- auth_identity.PasswordLength = strlen(password);
- }
- auth_identity.Domain = (LPSTR) domain;
- auth_identity.DomainLength = strlen(domain);
- return call_sspi_acquire_credentials_handle(NULL, SSPI_PACKAGE_NAME, SECPKG_CRED_OUTBOUND, NULL, &auth_identity, NULL, NULL, cred_handle, &ignored);
-}
-
-int sspi_step(CredHandle *cred_handle, int has_context, CtxtHandle *context, PVOID buffer, ULONG buffer_length, PVOID *out_buffer, ULONG *out_buffer_length, char *target)
-{
- SecBufferDesc inbuf;
- SecBuffer in_bufs[1];
- SecBufferDesc outbuf;
- SecBuffer out_bufs[1];
-
- if (has_context > 0) {
- // If we already have a context, we now have data to send.
- // Put this data in an inbuf.
- inbuf.ulVersion = SECBUFFER_VERSION;
- inbuf.cBuffers = 1;
- inbuf.pBuffers = in_bufs;
- in_bufs[0].pvBuffer = buffer;
- in_bufs[0].cbBuffer = buffer_length;
- in_bufs[0].BufferType = SECBUFFER_TOKEN;
- }
-
- outbuf.ulVersion = SECBUFFER_VERSION;
- outbuf.cBuffers = 1;
- outbuf.pBuffers = out_bufs;
- out_bufs[0].pvBuffer = NULL;
- out_bufs[0].cbBuffer = 0;
- out_bufs[0].BufferType = SECBUFFER_TOKEN;
-
- ULONG context_attr = 0;
-
- int ret = call_sspi_initialize_security_context(cred_handle,
- has_context > 0 ? context : NULL,
- (LPSTR) target,
- ISC_REQ_ALLOCATE_MEMORY | ISC_REQ_MUTUAL_AUTH,
- 0,
- SECURITY_NETWORK_DREP,
- has_context > 0 ? &inbuf : NULL,
- 0,
- context,
- &outbuf,
- &context_attr,
- NULL);
-
- *out_buffer = malloc(out_bufs[0].cbBuffer);
- *out_buffer_length = out_bufs[0].cbBuffer;
- memcpy(*out_buffer, out_bufs[0].pvBuffer, *out_buffer_length);
-
- return ret;
-}
-
-int sspi_send_client_authz_id(CtxtHandle *context, PVOID *buffer, ULONG *buffer_length, char *user_plus_realm)
-{
- SecPkgContext_Sizes sizes;
- SECURITY_STATUS status = call_sspi_query_context_attributes(context, SECPKG_ATTR_SIZES, &sizes);
-
- if (status != SEC_E_OK) {
- return status;
- }
-
- size_t user_plus_realm_length = strlen(user_plus_realm);
- int msgSize = 4 + user_plus_realm_length;
- char *msg = malloc((sizes.cbSecurityTrailer + msgSize + sizes.cbBlockSize) * sizeof(char));
- msg[sizes.cbSecurityTrailer + 0] = 1;
- msg[sizes.cbSecurityTrailer + 1] = 0;
- msg[sizes.cbSecurityTrailer + 2] = 0;
- msg[sizes.cbSecurityTrailer + 3] = 0;
- memcpy(&msg[sizes.cbSecurityTrailer + 4], user_plus_realm, user_plus_realm_length);
-
- SecBuffer wrapBufs[3];
- SecBufferDesc wrapBufDesc;
- wrapBufDesc.cBuffers = 3;
- wrapBufDesc.pBuffers = wrapBufs;
- wrapBufDesc.ulVersion = SECBUFFER_VERSION;
-
- wrapBufs[0].cbBuffer = sizes.cbSecurityTrailer;
- wrapBufs[0].BufferType = SECBUFFER_TOKEN;
- wrapBufs[0].pvBuffer = msg;
-
- wrapBufs[1].cbBuffer = msgSize;
- wrapBufs[1].BufferType = SECBUFFER_DATA;
- wrapBufs[1].pvBuffer = msg + sizes.cbSecurityTrailer;
-
- wrapBufs[2].cbBuffer = sizes.cbBlockSize;
- wrapBufs[2].BufferType = SECBUFFER_PADDING;
- wrapBufs[2].pvBuffer = msg + sizes.cbSecurityTrailer + msgSize;
-
- status = call_sspi_encrypt_message(context, SECQOP_WRAP_NO_ENCRYPT, &wrapBufDesc, 0);
- if (status != SEC_E_OK) {
- free(msg);
- return status;
- }
-
- *buffer_length = wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer + wrapBufs[2].cbBuffer;
- *buffer = malloc(*buffer_length);
-
- memcpy(*buffer, wrapBufs[0].pvBuffer, wrapBufs[0].cbBuffer);
- memcpy(*buffer + wrapBufs[0].cbBuffer, wrapBufs[1].pvBuffer, wrapBufs[1].cbBuffer);
- memcpy(*buffer + wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer, wrapBufs[2].pvBuffer, wrapBufs[2].cbBuffer);
-
- free(msg);
- return SEC_E_OK;
-}
diff --git a/vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.go b/vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.go
deleted file mode 100644
index d8ec00137..000000000
--- a/vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package sasl
-
-// #include "sasl_windows.h"
-import "C"
-
-import (
- "fmt"
- "strings"
- "sync"
- "unsafe"
-)
-
-type saslStepper interface {
- Step(serverData []byte) (clientData []byte, done bool, err error)
- Close()
-}
-
-type saslSession struct {
- // Credentials
- mech string
- service string
- host string
- userPlusRealm string
- target string
- domain string
-
- // Internal state
- authComplete bool
- errored bool
- step int
-
- // C internal state
- credHandle C.CredHandle
- context C.CtxtHandle
- hasContext C.int
-
- // Keep track of pointers we need to explicitly free
- stringsToFree []*C.char
-}
-
-var initError error
-var initOnce sync.Once
-
-func initSSPI() {
- rc := C.load_secur32_dll()
- if rc != 0 {
- initError = fmt.Errorf("Error loading libraries: %v", rc)
- }
-}
-
-func New(username, password, mechanism, service, host string) (saslStepper, error) {
- initOnce.Do(initSSPI)
- ss := &saslSession{mech: mechanism, hasContext: 0, userPlusRealm: username}
- if service == "" {
- service = "mongodb"
- }
- if i := strings.Index(host, ":"); i >= 0 {
- host = host[:i]
- }
- ss.service = service
- ss.host = host
-
- usernameComponents := strings.Split(username, "@")
- if len(usernameComponents) < 2 {
- return nil, fmt.Errorf("Username '%v' doesn't contain a realm!", username)
- }
- user := usernameComponents[0]
- ss.domain = usernameComponents[1]
- ss.target = fmt.Sprintf("%s/%s", ss.service, ss.host)
-
- var status C.SECURITY_STATUS
- // Step 0: call AcquireCredentialsHandle to get a nice SSPI CredHandle
- if len(password) > 0 {
- status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), ss.cstr(password), ss.cstr(ss.domain))
- } else {
- status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), nil, ss.cstr(ss.domain))
- }
- if status != C.SEC_E_OK {
- ss.errored = true
- return nil, fmt.Errorf("Couldn't create new SSPI client, error code %v", status)
- }
- return ss, nil
-}
-
-func (ss *saslSession) cstr(s string) *C.char {
- cstr := C.CString(s)
- ss.stringsToFree = append(ss.stringsToFree, cstr)
- return cstr
-}
-
-func (ss *saslSession) Close() {
- for _, cstr := range ss.stringsToFree {
- C.free(unsafe.Pointer(cstr))
- }
-}
-
-func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, err error) {
- ss.step++
- if ss.step > 10 {
- return nil, false, fmt.Errorf("too many SSPI steps without authentication")
- }
- var buffer C.PVOID
- var bufferLength C.ULONG
- var outBuffer C.PVOID
- var outBufferLength C.ULONG
- if len(serverData) > 0 {
- buffer = (C.PVOID)(unsafe.Pointer(&serverData[0]))
- bufferLength = C.ULONG(len(serverData))
- }
- var status C.int
- if ss.authComplete {
- // Step 3: last bit of magic to use the correct server credentials
- status = C.sspi_send_client_authz_id(&ss.context, &outBuffer, &outBufferLength, ss.cstr(ss.userPlusRealm))
- } else {
- // Step 1 + Step 2: set up security context with the server and TGT
- status = C.sspi_step(&ss.credHandle, ss.hasContext, &ss.context, buffer, bufferLength, &outBuffer, &outBufferLength, ss.cstr(ss.target))
- }
- if outBuffer != C.PVOID(nil) {
- defer C.free(unsafe.Pointer(outBuffer))
- }
- if status != C.SEC_E_OK && status != C.SEC_I_CONTINUE_NEEDED {
- ss.errored = true
- return nil, false, ss.handleSSPIErrorCode(status)
- }
-
- clientData = C.GoBytes(unsafe.Pointer(outBuffer), C.int(outBufferLength))
- if status == C.SEC_E_OK {
- ss.authComplete = true
- return clientData, true, nil
- } else {
- ss.hasContext = 1
- return clientData, false, nil
- }
-}
-
-func (ss *saslSession) handleSSPIErrorCode(code C.int) error {
- switch {
- case code == C.SEC_E_TARGET_UNKNOWN:
- return fmt.Errorf("Target %v@%v not found", ss.target, ss.domain)
- }
- return fmt.Errorf("Unknown error doing step %v, error code %v", ss.step, code)
-}
diff --git a/vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.h b/vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.h
deleted file mode 100644
index a6b039567..000000000
--- a/vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#include <windows.h>
-
-#include "sspi_windows.h"
-
-SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle* cred_handle, char* username, char* password, char* domain);
-int sspi_step(CredHandle* cred_handle, int has_context, CtxtHandle* context, PVOID buffer, ULONG buffer_length, PVOID* out_buffer, ULONG* out_buffer_length, char* target);
-int sspi_send_client_authz_id(CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* user_plus_realm);
diff --git a/vendor/gopkg.in/mgo.v2/internal/sasl/sspi_windows.c b/vendor/gopkg.in/mgo.v2/internal/sasl/sspi_windows.c
deleted file mode 100644
index 63f9a6f86..000000000
--- a/vendor/gopkg.in/mgo.v2/internal/sasl/sspi_windows.c
+++ /dev/null
@@ -1,96 +0,0 @@
-// Code adapted from the NodeJS kerberos library:
-//
-// https://github.com/christkv/kerberos/tree/master/lib/win32/kerberos_sspi.c
-//
-// Under the terms of the Apache License, Version 2.0:
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-#include <stdlib.h>
-
-#include "sspi_windows.h"
-
-static HINSTANCE sspi_secur32_dll = NULL;
-
-int load_secur32_dll()
-{
- sspi_secur32_dll = LoadLibrary("secur32.dll");
- if (sspi_secur32_dll == NULL) {
- return GetLastError();
- }
- return 0;
-}
-
-SECURITY_STATUS SEC_ENTRY call_sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo)
-{
- if (sspi_secur32_dll == NULL) {
- return -1;
- }
- encryptMessage_fn pfn_encryptMessage = (encryptMessage_fn) GetProcAddress(sspi_secur32_dll, "EncryptMessage");
- if (!pfn_encryptMessage) {
- return -2;
- }
- return (*pfn_encryptMessage)(phContext, fQOP, pMessage, MessageSeqNo);
-}
-
-SECURITY_STATUS SEC_ENTRY call_sspi_acquire_credentials_handle(
- LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse,
- void *pvLogonId, void *pAuthData, SEC_GET_KEY_FN pGetKeyFn, void *pvGetKeyArgument,
- PCredHandle phCredential, PTimeStamp ptsExpiry)
-{
- if (sspi_secur32_dll == NULL) {
- return -1;
- }
- acquireCredentialsHandle_fn pfn_acquireCredentialsHandle;
-#ifdef _UNICODE
- pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn) GetProcAddress(sspi_secur32_dll, "AcquireCredentialsHandleW");
-#else
- pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn) GetProcAddress(sspi_secur32_dll, "AcquireCredentialsHandleA");
-#endif
- if (!pfn_acquireCredentialsHandle) {
- return -2;
- }
- return (*pfn_acquireCredentialsHandle)(
- pszPrincipal, pszPackage, fCredentialUse, pvLogonId, pAuthData,
- pGetKeyFn, pvGetKeyArgument, phCredential, ptsExpiry);
-}
-
-SECURITY_STATUS SEC_ENTRY call_sspi_initialize_security_context(
- PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName,
- unsigned long fContextReq, unsigned long Reserved1, unsigned long TargetDataRep,
- PSecBufferDesc pInput, unsigned long Reserved2, PCtxtHandle phNewContext,
- PSecBufferDesc pOutput, unsigned long *pfContextAttr, PTimeStamp ptsExpiry)
-{
- if (sspi_secur32_dll == NULL) {
- return -1;
- }
- initializeSecurityContext_fn pfn_initializeSecurityContext;
-#ifdef _UNICODE
- pfn_initializeSecurityContext = (initializeSecurityContext_fn) GetProcAddress(sspi_secur32_dll, "InitializeSecurityContextW");
-#else
- pfn_initializeSecurityContext = (initializeSecurityContext_fn) GetProcAddress(sspi_secur32_dll, "InitializeSecurityContextA");
-#endif
- if (!pfn_initializeSecurityContext) {
- return -2;
- }
- return (*pfn_initializeSecurityContext)(
- phCredential, phContext, pszTargetName, fContextReq, Reserved1, TargetDataRep,
- pInput, Reserved2, phNewContext, pOutput, pfContextAttr, ptsExpiry);
-}
-
-SECURITY_STATUS SEC_ENTRY call_sspi_query_context_attributes(PCtxtHandle phContext, unsigned long ulAttribute, void *pBuffer)
-{
- if (sspi_secur32_dll == NULL) {
- return -1;
- }
- queryContextAttributes_fn pfn_queryContextAttributes;
-#ifdef _UNICODE
- pfn_queryContextAttributes = (queryContextAttributes_fn) GetProcAddress(sspi_secur32_dll, "QueryContextAttributesW");
-#else
- pfn_queryContextAttributes = (queryContextAttributes_fn) GetProcAddress(sspi_secur32_dll, "QueryContextAttributesA");
-#endif
- if (!pfn_queryContextAttributes) {
- return -2;
- }
- return (*pfn_queryContextAttributes)(phContext, ulAttribute, pBuffer);
-}
diff --git a/vendor/gopkg.in/mgo.v2/internal/sasl/sspi_windows.h b/vendor/gopkg.in/mgo.v2/internal/sasl/sspi_windows.h
deleted file mode 100644
index d28327031..000000000
--- a/vendor/gopkg.in/mgo.v2/internal/sasl/sspi_windows.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Code adapted from the NodeJS kerberos library:
-//
-// https://github.com/christkv/kerberos/tree/master/lib/win32/kerberos_sspi.h
-//
-// Under the terms of the Apache License, Version 2.0:
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-#ifndef SSPI_WINDOWS_H
-#define SSPI_WINDOWS_H
-
-#define SECURITY_WIN32 1
-
-#include <windows.h>
-#include <sspi.h>
-
-int load_secur32_dll();
-
-SECURITY_STATUS SEC_ENTRY call_sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo);
-
-typedef DWORD (WINAPI *encryptMessage_fn)(PCtxtHandle phContext, ULONG fQOP, PSecBufferDesc pMessage, ULONG MessageSeqNo);
-
-SECURITY_STATUS SEC_ENTRY call_sspi_acquire_credentials_handle(
- LPSTR pszPrincipal, // Name of principal
- LPSTR pszPackage, // Name of package
- unsigned long fCredentialUse, // Flags indicating use
- void *pvLogonId, // Pointer to logon ID
- void *pAuthData, // Package specific data
- SEC_GET_KEY_FN pGetKeyFn, // Pointer to GetKey() func
- void *pvGetKeyArgument, // Value to pass to GetKey()
- PCredHandle phCredential, // (out) Cred Handle
- PTimeStamp ptsExpiry // (out) Lifetime (optional)
-);
-
-typedef DWORD (WINAPI *acquireCredentialsHandle_fn)(
- LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse,
- void *pvLogonId, void *pAuthData, SEC_GET_KEY_FN pGetKeyFn, void *pvGetKeyArgument,
- PCredHandle phCredential, PTimeStamp ptsExpiry
-);
-
-SECURITY_STATUS SEC_ENTRY call_sspi_initialize_security_context(
- PCredHandle phCredential, // Cred to base context
- PCtxtHandle phContext, // Existing context (OPT)
- LPSTR pszTargetName, // Name of target
- unsigned long fContextReq, // Context Requirements
- unsigned long Reserved1, // Reserved, MBZ
- unsigned long TargetDataRep, // Data rep of target
- PSecBufferDesc pInput, // Input Buffers
- unsigned long Reserved2, // Reserved, MBZ
- PCtxtHandle phNewContext, // (out) New Context handle
- PSecBufferDesc pOutput, // (inout) Output Buffers
- unsigned long *pfContextAttr, // (out) Context attrs
- PTimeStamp ptsExpiry // (out) Life span (OPT)
-);
-
-typedef DWORD (WINAPI *initializeSecurityContext_fn)(
- PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName, unsigned long fContextReq,
- unsigned long Reserved1, unsigned long TargetDataRep, PSecBufferDesc pInput, unsigned long Reserved2,
- PCtxtHandle phNewContext, PSecBufferDesc pOutput, unsigned long *pfContextAttr, PTimeStamp ptsExpiry);
-
-SECURITY_STATUS SEC_ENTRY call_sspi_query_context_attributes(
- PCtxtHandle phContext, // Context to query
- unsigned long ulAttribute, // Attribute to query
- void *pBuffer // Buffer for attributes
-);
-
-typedef DWORD (WINAPI *queryContextAttributes_fn)(
- PCtxtHandle phContext, unsigned long ulAttribute, void *pBuffer);
-
-#endif // SSPI_WINDOWS_H
diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml
new file mode 100644
index 000000000..9f556934d
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+
+go:
+ - 1.4
+ - 1.5
+ - 1.6
+ - 1.7
+ - 1.8
+ - 1.9
+ - tip
+
+go_import_path: gopkg.in/yaml.v2
diff --git a/vendor/k8s.io/api/README.md b/vendor/k8s.io/api/README.md
deleted file mode 100644
index 967543a45..000000000
--- a/vendor/k8s.io/api/README.md
+++ /dev/null
@@ -1 +0,0 @@
-This repo is still in the experimental stage. Shortly it will contain the schema of the API that are served by the Kubernetes apiserver.
diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go
index de4e3cee4..edc9b4d60 100644
--- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go
+++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go
@@ -45,24 +45,17 @@ const (
// to one container of a pod.
SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/"
+ // SeccompProfileRuntimeDefault represents the default seccomp profile used by container runtime.
+ SeccompProfileRuntimeDefault string = "runtime/default"
+
+ // DeprecatedSeccompProfileDockerDefault represents the default seccomp profile used by docker.
+ // This is now deprecated and should be replaced by SeccompProfileRuntimeDefault.
+ DeprecatedSeccompProfileDockerDefault string = "docker/default"
+
// PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized)
// in the Annotations of a Node.
PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods"
- // SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure
- // container of a pod. The annotation value is a comma separated list of sysctl_name=value
- // key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by
- // the kubelet. Pods with other sysctls will fail to launch.
- SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls"
-
- // UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure
- // container of a pod. The annotation value is a comma separated list of sysctl_name=value
- // key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly
- // namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use
- // is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet
- // will fail to launch.
- UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls"
-
// ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache
// an object (e.g. secret, config map) before fetching it again from apiserver.
// This annotation can be attached to node.
@@ -85,4 +78,29 @@ const (
//
// Not all cloud providers support this annotation, though AWS & GCE do.
AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges"
+
+ // EndpointsLastChangeTriggerTime is the annotation key, set for endpoints objects, that
+ // represents the timestamp (stored as RFC 3339 date-time string, e.g. '2018-10-22T19:32:52.1Z')
+ // of the last change, of some Pod or Service object, that triggered the endpoints object change.
+ // In other words, if a Pod / Service changed at time T0, that change was observed by endpoints
+ // controller at T1, and the Endpoints object was changed at T2, the
+ // EndpointsLastChangeTriggerTime would be set to T0.
+ //
+ // The "endpoints change trigger" here means any Pod or Service change that resulted in the
+ // Endpoints object change.
+ //
+ // Given the definition of the "endpoints change trigger", please note that this annotation will
+ // be set ONLY for endpoints object changes triggered by either Pod or Service change. If the
+ // Endpoints object changes due to other reasons, this annotation won't be set (or updated if it's
+ // already set).
+ //
+ // This annotation will be used to compute the in-cluster network programming latency SLI, see
+ // https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md
+ EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time"
+
+ // MigratedPluginsAnnotationKey is the annotation key, set for CSINode objects, that is a comma-separated
+ // list of in-tree plugins that will be serviced by the CSI backend on the Node represented by CSINode.
+ // This annotation is used by the Attach Detach Controller to determine whether to use the in-tree or
+ // CSI Backend for a volume plugin on a specific node.
+ MigratedPluginsAnnotationKey = "storage.alpha.kubernetes.io/migrated-plugins"
)
diff --git a/vendor/k8s.io/api/core/v1/doc.go b/vendor/k8s.io/api/core/v1/doc.go
index 96994c624..1bdf0b25b 100644
--- a/vendor/k8s.io/api/core/v1/doc.go
+++ b/vendor/k8s.io/api/core/v1/doc.go
@@ -16,6 +16,7 @@ limitations under the License.
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen=package
+// +k8s:protobuf-gen=package
// Package v1 is the v1 version of the core API.
package v1 // import "k8s.io/api/core/v1"
diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go
index 85c7b634b..977c1d75a 100644
--- a/vendor/k8s.io/api/core/v1/generated.pb.go
+++ b/vendor/k8s.io/api/core/v1/generated.pb.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto
-// DO NOT EDIT!
/*
Package v1 is a generated protocol buffer package.
@@ -34,9 +33,11 @@ limitations under the License.
AzureFileVolumeSource
Binding
CSIPersistentVolumeSource
+ CSIVolumeSource
Capabilities
CephFSPersistentVolumeSource
CephFSVolumeSource
+ CinderPersistentVolumeSource
CinderVolumeSource
ClientIPConfig
ComponentCondition
@@ -46,6 +47,7 @@ limitations under the License.
ConfigMapEnvSource
ConfigMapKeySelector
ConfigMapList
+ ConfigMapNodeConfigSource
ConfigMapProjection
ConfigMapVolumeSource
Container
@@ -57,7 +59,6 @@ limitations under the License.
ContainerStateWaiting
ContainerStatus
DaemonEndpoint
- DeleteOptions
DownwardAPIProjection
DownwardAPIVolumeFile
DownwardAPIVolumeSource
@@ -81,6 +82,7 @@ limitations under the License.
FlockerVolumeSource
GCEPersistentDiskVolumeSource
GitRepoVolumeSource
+ GlusterfsPersistentVolumeSource
GlusterfsVolumeSource
HTTPGetAction
HTTPHeader
@@ -96,7 +98,6 @@ limitations under the License.
LimitRangeList
LimitRangeSpec
List
- ListOptions
LoadBalancerIngress
LoadBalancerStatus
LocalObjectReference
@@ -111,6 +112,7 @@ limitations under the License.
NodeAffinity
NodeCondition
NodeConfigSource
+ NodeConfigStatus
NodeDaemonEndpoints
NodeList
NodeProxyOptions
@@ -122,7 +124,6 @@ limitations under the License.
NodeStatus
NodeSystemInfo
ObjectFieldSelector
- ObjectMeta
ObjectReference
PersistentVolume
PersistentVolumeClaim
@@ -149,6 +150,7 @@ limitations under the License.
PodLogOptions
PodPortForwardOptions
PodProxyOptions
+ PodReadinessGate
PodSecurityContext
PodSignature
PodSpec
@@ -181,6 +183,8 @@ limitations under the License.
SELinuxOptions
ScaleIOPersistentVolumeSource
ScaleIOVolumeSource
+ ScopeSelector
+ ScopedResourceSelectorRequirement
Secret
SecretEnvSource
SecretKeySelector
@@ -193,6 +197,7 @@ limitations under the License.
Service
ServiceAccount
ServiceAccountList
+ ServiceAccountTokenProjection
ServiceList
ServicePort
ServiceProxyOptions
@@ -205,6 +210,9 @@ limitations under the License.
TCPSocketAction
Taint
Toleration
+ TopologySelectorLabelRequirement
+ TopologySelectorTerm
+ TypedLocalObjectReference
Volume
VolumeDevice
VolumeMount
@@ -213,6 +221,7 @@ limitations under the License.
VolumeSource
VsphereVirtualDiskVolumeSource
WeightedPodAffinityTerm
+ WindowsSecurityContextOptions
*/
package v1
@@ -286,770 +295,826 @@ func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) {
return fileDescriptorGenerated, []int{8}
}
+func (m *CSIVolumeSource) Reset() { *m = CSIVolumeSource{} }
+func (*CSIVolumeSource) ProtoMessage() {}
+func (*CSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} }
+
func (m *Capabilities) Reset() { *m = Capabilities{} }
func (*Capabilities) ProtoMessage() {}
-func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} }
+func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} }
func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} }
func (*CephFSPersistentVolumeSource) ProtoMessage() {}
func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{10}
+ return fileDescriptorGenerated, []int{11}
}
func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} }
func (*CephFSVolumeSource) ProtoMessage() {}
-func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} }
+func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} }
+
+func (m *CinderPersistentVolumeSource) Reset() { *m = CinderPersistentVolumeSource{} }
+func (*CinderPersistentVolumeSource) ProtoMessage() {}
+func (*CinderPersistentVolumeSource) Descriptor() ([]byte, []int) {
+ return fileDescriptorGenerated, []int{13}
+}
func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} }
func (*CinderVolumeSource) ProtoMessage() {}
-func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} }
+func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} }
func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} }
func (*ClientIPConfig) ProtoMessage() {}
-func (*ClientIPConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} }
+func (*ClientIPConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} }
func (m *ComponentCondition) Reset() { *m = ComponentCondition{} }
func (*ComponentCondition) ProtoMessage() {}
-func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} }
+func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} }
func (m *ComponentStatus) Reset() { *m = ComponentStatus{} }
func (*ComponentStatus) ProtoMessage() {}
-func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} }
+func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} }
func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} }
func (*ComponentStatusList) ProtoMessage() {}
-func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} }
+func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} }
func (m *ConfigMap) Reset() { *m = ConfigMap{} }
func (*ConfigMap) ProtoMessage() {}
-func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} }
+func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} }
func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} }
func (*ConfigMapEnvSource) ProtoMessage() {}
-func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} }
+func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} }
func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} }
func (*ConfigMapKeySelector) ProtoMessage() {}
-func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} }
+func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} }
func (m *ConfigMapList) Reset() { *m = ConfigMapList{} }
func (*ConfigMapList) ProtoMessage() {}
-func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} }
+func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} }
+
+func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} }
+func (*ConfigMapNodeConfigSource) ProtoMessage() {}
+func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) {
+ return fileDescriptorGenerated, []int{23}
+}
func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} }
func (*ConfigMapProjection) ProtoMessage() {}
-func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} }
+func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} }
func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} }
func (*ConfigMapVolumeSource) ProtoMessage() {}
-func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} }
+func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} }
func (m *Container) Reset() { *m = Container{} }
func (*Container) ProtoMessage() {}
-func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} }
+func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} }
func (m *ContainerImage) Reset() { *m = ContainerImage{} }
func (*ContainerImage) ProtoMessage() {}
-func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} }
+func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} }
func (m *ContainerPort) Reset() { *m = ContainerPort{} }
func (*ContainerPort) ProtoMessage() {}
-func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} }
+func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} }
func (m *ContainerState) Reset() { *m = ContainerState{} }
func (*ContainerState) ProtoMessage() {}
-func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} }
+func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} }
func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} }
func (*ContainerStateRunning) ProtoMessage() {}
-func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} }
+func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} }
func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} }
func (*ContainerStateTerminated) ProtoMessage() {}
func (*ContainerStateTerminated) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{28}
+ return fileDescriptorGenerated, []int{31}
}
func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} }
func (*ContainerStateWaiting) ProtoMessage() {}
-func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} }
+func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} }
func (m *ContainerStatus) Reset() { *m = ContainerStatus{} }
func (*ContainerStatus) ProtoMessage() {}
-func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} }
+func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} }
func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} }
func (*DaemonEndpoint) ProtoMessage() {}
-func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} }
-
-func (m *DeleteOptions) Reset() { *m = DeleteOptions{} }
-func (*DeleteOptions) ProtoMessage() {}
-func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} }
+func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} }
func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} }
func (*DownwardAPIProjection) ProtoMessage() {}
-func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} }
+func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} }
func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} }
func (*DownwardAPIVolumeFile) ProtoMessage() {}
-func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} }
+func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} }
func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} }
func (*DownwardAPIVolumeSource) ProtoMessage() {}
func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{35}
+ return fileDescriptorGenerated, []int{37}
}
func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} }
func (*EmptyDirVolumeSource) ProtoMessage() {}
-func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} }
+func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} }
func (m *EndpointAddress) Reset() { *m = EndpointAddress{} }
func (*EndpointAddress) ProtoMessage() {}
-func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} }
+func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} }
func (m *EndpointPort) Reset() { *m = EndpointPort{} }
func (*EndpointPort) ProtoMessage() {}
-func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} }
+func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} }
func (m *EndpointSubset) Reset() { *m = EndpointSubset{} }
func (*EndpointSubset) ProtoMessage() {}
-func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} }
+func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} }
func (m *Endpoints) Reset() { *m = Endpoints{} }
func (*Endpoints) ProtoMessage() {}
-func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} }
+func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} }
func (m *EndpointsList) Reset() { *m = EndpointsList{} }
func (*EndpointsList) ProtoMessage() {}
-func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} }
+func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} }
func (m *EnvFromSource) Reset() { *m = EnvFromSource{} }
func (*EnvFromSource) ProtoMessage() {}
-func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} }
+func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} }
func (m *EnvVar) Reset() { *m = EnvVar{} }
func (*EnvVar) ProtoMessage() {}
-func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} }
+func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} }
func (m *EnvVarSource) Reset() { *m = EnvVarSource{} }
func (*EnvVarSource) ProtoMessage() {}
-func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} }
+func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} }
func (m *Event) Reset() { *m = Event{} }
func (*Event) ProtoMessage() {}
-func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} }
+func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} }
func (m *EventList) Reset() { *m = EventList{} }
func (*EventList) ProtoMessage() {}
-func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} }
+func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} }
func (m *EventSeries) Reset() { *m = EventSeries{} }
func (*EventSeries) ProtoMessage() {}
-func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} }
+func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} }
func (m *EventSource) Reset() { *m = EventSource{} }
func (*EventSource) ProtoMessage() {}
-func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} }
+func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} }
func (m *ExecAction) Reset() { *m = ExecAction{} }
func (*ExecAction) ProtoMessage() {}
-func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} }
+func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} }
func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} }
func (*FCVolumeSource) ProtoMessage() {}
-func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} }
+func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} }
func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} }
func (*FlexPersistentVolumeSource) ProtoMessage() {}
func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{51}
+ return fileDescriptorGenerated, []int{53}
}
func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} }
func (*FlexVolumeSource) ProtoMessage() {}
-func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} }
+func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} }
func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} }
func (*FlockerVolumeSource) ProtoMessage() {}
-func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} }
+func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} }
func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} }
func (*GCEPersistentDiskVolumeSource) ProtoMessage() {}
func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{54}
+ return fileDescriptorGenerated, []int{56}
}
func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} }
func (*GitRepoVolumeSource) ProtoMessage() {}
-func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} }
+func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} }
+
+func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} }
+func (*GlusterfsPersistentVolumeSource) ProtoMessage() {}
+func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) {
+ return fileDescriptorGenerated, []int{58}
+}
func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} }
func (*GlusterfsVolumeSource) ProtoMessage() {}
-func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} }
+func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} }
func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} }
func (*HTTPGetAction) ProtoMessage() {}
-func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} }
+func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} }
func (m *HTTPHeader) Reset() { *m = HTTPHeader{} }
func (*HTTPHeader) ProtoMessage() {}
-func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} }
+func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} }
func (m *Handler) Reset() { *m = Handler{} }
func (*Handler) ProtoMessage() {}
-func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} }
+func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} }
func (m *HostAlias) Reset() { *m = HostAlias{} }
func (*HostAlias) ProtoMessage() {}
-func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} }
+func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} }
func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} }
func (*HostPathVolumeSource) ProtoMessage() {}
-func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} }
+func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} }
func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} }
func (*ISCSIPersistentVolumeSource) ProtoMessage() {}
func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{62}
+ return fileDescriptorGenerated, []int{65}
}
func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} }
func (*ISCSIVolumeSource) ProtoMessage() {}
-func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} }
+func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} }
func (m *KeyToPath) Reset() { *m = KeyToPath{} }
func (*KeyToPath) ProtoMessage() {}
-func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} }
+func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} }
func (m *Lifecycle) Reset() { *m = Lifecycle{} }
func (*Lifecycle) ProtoMessage() {}
-func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} }
+func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} }
func (m *LimitRange) Reset() { *m = LimitRange{} }
func (*LimitRange) ProtoMessage() {}
-func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} }
+func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} }
func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} }
func (*LimitRangeItem) ProtoMessage() {}
-func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} }
+func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} }
func (m *LimitRangeList) Reset() { *m = LimitRangeList{} }
func (*LimitRangeList) ProtoMessage() {}
-func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} }
+func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} }
func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} }
func (*LimitRangeSpec) ProtoMessage() {}
-func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} }
+func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} }
func (m *List) Reset() { *m = List{} }
func (*List) ProtoMessage() {}
-func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} }
-
-func (m *ListOptions) Reset() { *m = ListOptions{} }
-func (*ListOptions) ProtoMessage() {}
-func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} }
+func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} }
func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} }
func (*LoadBalancerIngress) ProtoMessage() {}
-func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} }
+func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} }
func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} }
func (*LoadBalancerStatus) ProtoMessage() {}
-func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} }
+func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} }
func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} }
func (*LocalObjectReference) ProtoMessage() {}
-func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} }
+func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} }
func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} }
func (*LocalVolumeSource) ProtoMessage() {}
-func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} }
+func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} }
func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} }
func (*NFSVolumeSource) ProtoMessage() {}
-func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} }
+func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} }
func (m *Namespace) Reset() { *m = Namespace{} }
func (*Namespace) ProtoMessage() {}
-func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} }
+func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} }
func (m *NamespaceList) Reset() { *m = NamespaceList{} }
func (*NamespaceList) ProtoMessage() {}
-func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} }
+func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} }
func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} }
func (*NamespaceSpec) ProtoMessage() {}
-func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} }
+func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} }
func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} }
func (*NamespaceStatus) ProtoMessage() {}
-func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} }
+func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} }
func (m *Node) Reset() { *m = Node{} }
func (*Node) ProtoMessage() {}
-func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} }
+func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} }
func (m *NodeAddress) Reset() { *m = NodeAddress{} }
func (*NodeAddress) ProtoMessage() {}
-func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} }
+func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} }
func (m *NodeAffinity) Reset() { *m = NodeAffinity{} }
func (*NodeAffinity) ProtoMessage() {}
-func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} }
+func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} }
func (m *NodeCondition) Reset() { *m = NodeCondition{} }
func (*NodeCondition) ProtoMessage() {}
-func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} }
+func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} }
func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} }
func (*NodeConfigSource) ProtoMessage() {}
-func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} }
+func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} }
+
+func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} }
+func (*NodeConfigStatus) ProtoMessage() {}
+func (*NodeConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} }
func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} }
func (*NodeDaemonEndpoints) ProtoMessage() {}
-func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} }
+func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} }
func (m *NodeList) Reset() { *m = NodeList{} }
func (*NodeList) ProtoMessage() {}
-func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} }
+func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} }
func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} }
func (*NodeProxyOptions) ProtoMessage() {}
-func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} }
+func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} }
func (m *NodeResources) Reset() { *m = NodeResources{} }
func (*NodeResources) ProtoMessage() {}
-func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} }
+func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} }
func (m *NodeSelector) Reset() { *m = NodeSelector{} }
func (*NodeSelector) ProtoMessage() {}
-func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} }
+func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} }
func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} }
func (*NodeSelectorRequirement) ProtoMessage() {}
func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{91}
+ return fileDescriptorGenerated, []int{94}
}
func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} }
func (*NodeSelectorTerm) ProtoMessage() {}
-func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} }
+func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} }
func (m *NodeSpec) Reset() { *m = NodeSpec{} }
func (*NodeSpec) ProtoMessage() {}
-func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} }
+func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} }
func (m *NodeStatus) Reset() { *m = NodeStatus{} }
func (*NodeStatus) ProtoMessage() {}
-func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} }
+func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} }
func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} }
func (*NodeSystemInfo) ProtoMessage() {}
-func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} }
+func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} }
func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} }
func (*ObjectFieldSelector) ProtoMessage() {}
-func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} }
-
-func (m *ObjectMeta) Reset() { *m = ObjectMeta{} }
-func (*ObjectMeta) ProtoMessage() {}
-func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} }
+func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} }
func (m *ObjectReference) Reset() { *m = ObjectReference{} }
func (*ObjectReference) ProtoMessage() {}
-func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} }
+func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} }
func (m *PersistentVolume) Reset() { *m = PersistentVolume{} }
func (*PersistentVolume) ProtoMessage() {}
-func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} }
+func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{101} }
func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} }
func (*PersistentVolumeClaim) ProtoMessage() {}
-func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} }
+func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{102} }
func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} }
func (*PersistentVolumeClaimCondition) ProtoMessage() {}
func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{101}
+ return fileDescriptorGenerated, []int{103}
}
func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} }
func (*PersistentVolumeClaimList) ProtoMessage() {}
func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{102}
+ return fileDescriptorGenerated, []int{104}
}
func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} }
func (*PersistentVolumeClaimSpec) ProtoMessage() {}
func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{103}
+ return fileDescriptorGenerated, []int{105}
}
func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} }
func (*PersistentVolumeClaimStatus) ProtoMessage() {}
func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{104}
+ return fileDescriptorGenerated, []int{106}
}
func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} }
func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {}
func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{105}
+ return fileDescriptorGenerated, []int{107}
}
func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} }
func (*PersistentVolumeList) ProtoMessage() {}
-func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{106} }
+func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} }
func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} }
func (*PersistentVolumeSource) ProtoMessage() {}
func (*PersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{107}
+ return fileDescriptorGenerated, []int{109}
}
func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} }
func (*PersistentVolumeSpec) ProtoMessage() {}
-func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} }
+func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} }
func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} }
func (*PersistentVolumeStatus) ProtoMessage() {}
func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{109}
+ return fileDescriptorGenerated, []int{111}
}
func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} }
func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {}
func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{110}
+ return fileDescriptorGenerated, []int{112}
}
func (m *Pod) Reset() { *m = Pod{} }
func (*Pod) ProtoMessage() {}
-func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} }
+func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} }
func (m *PodAffinity) Reset() { *m = PodAffinity{} }
func (*PodAffinity) ProtoMessage() {}
-func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} }
+func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} }
func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} }
func (*PodAffinityTerm) ProtoMessage() {}
-func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} }
+func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} }
func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} }
func (*PodAntiAffinity) ProtoMessage() {}
-func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} }
+func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} }
func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} }
func (*PodAttachOptions) ProtoMessage() {}
-func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} }
+func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} }
func (m *PodCondition) Reset() { *m = PodCondition{} }
func (*PodCondition) ProtoMessage() {}
-func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} }
+func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} }
func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} }
func (*PodDNSConfig) ProtoMessage() {}
-func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} }
+func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} }
func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} }
func (*PodDNSConfigOption) ProtoMessage() {}
-func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} }
+func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} }
func (m *PodExecOptions) Reset() { *m = PodExecOptions{} }
func (*PodExecOptions) ProtoMessage() {}
-func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} }
+func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} }
func (m *PodList) Reset() { *m = PodList{} }
func (*PodList) ProtoMessage() {}
-func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} }
+func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} }
func (m *PodLogOptions) Reset() { *m = PodLogOptions{} }
func (*PodLogOptions) ProtoMessage() {}
-func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} }
+func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} }
func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} }
func (*PodPortForwardOptions) ProtoMessage() {}
-func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} }
+func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} }
func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} }
func (*PodProxyOptions) ProtoMessage() {}
-func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} }
+func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} }
+
+func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} }
+func (*PodReadinessGate) ProtoMessage() {}
+func (*PodReadinessGate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} }
func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} }
func (*PodSecurityContext) ProtoMessage() {}
-func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} }
+func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} }
func (m *PodSignature) Reset() { *m = PodSignature{} }
func (*PodSignature) ProtoMessage() {}
-func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} }
+func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} }
func (m *PodSpec) Reset() { *m = PodSpec{} }
func (*PodSpec) ProtoMessage() {}
-func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} }
+func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} }
func (m *PodStatus) Reset() { *m = PodStatus{} }
func (*PodStatus) ProtoMessage() {}
-func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} }
+func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} }
func (m *PodStatusResult) Reset() { *m = PodStatusResult{} }
func (*PodStatusResult) ProtoMessage() {}
-func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} }
+func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} }
func (m *PodTemplate) Reset() { *m = PodTemplate{} }
func (*PodTemplate) ProtoMessage() {}
-func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} }
+func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} }
func (m *PodTemplateList) Reset() { *m = PodTemplateList{} }
func (*PodTemplateList) ProtoMessage() {}
-func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} }
+func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} }
func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} }
func (*PodTemplateSpec) ProtoMessage() {}
-func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} }
+func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} }
func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} }
func (*PortworxVolumeSource) ProtoMessage() {}
-func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} }
+func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} }
func (m *Preconditions) Reset() { *m = Preconditions{} }
func (*Preconditions) ProtoMessage() {}
-func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} }
+func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} }
func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} }
func (*PreferAvoidPodsEntry) ProtoMessage() {}
-func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} }
+func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} }
func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} }
func (*PreferredSchedulingTerm) ProtoMessage() {}
func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{135}
+ return fileDescriptorGenerated, []int{138}
}
func (m *Probe) Reset() { *m = Probe{} }
func (*Probe) ProtoMessage() {}
-func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} }
+func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} }
func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} }
func (*ProjectedVolumeSource) ProtoMessage() {}
-func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} }
+func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} }
func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} }
func (*QuobyteVolumeSource) ProtoMessage() {}
-func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{138} }
+func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} }
func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} }
func (*RBDPersistentVolumeSource) ProtoMessage() {}
func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{139}
+ return fileDescriptorGenerated, []int{142}
}
func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} }
func (*RBDVolumeSource) ProtoMessage() {}
-func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} }
+func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} }
func (m *RangeAllocation) Reset() { *m = RangeAllocation{} }
func (*RangeAllocation) ProtoMessage() {}
-func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} }
+func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{144} }
func (m *ReplicationController) Reset() { *m = ReplicationController{} }
func (*ReplicationController) ProtoMessage() {}
-func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} }
+func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{145} }
func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} }
func (*ReplicationControllerCondition) ProtoMessage() {}
func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{143}
+ return fileDescriptorGenerated, []int{146}
}
func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} }
func (*ReplicationControllerList) ProtoMessage() {}
func (*ReplicationControllerList) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{144}
+ return fileDescriptorGenerated, []int{147}
}
func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} }
func (*ReplicationControllerSpec) ProtoMessage() {}
func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{145}
+ return fileDescriptorGenerated, []int{148}
}
func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} }
func (*ReplicationControllerStatus) ProtoMessage() {}
func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{146}
+ return fileDescriptorGenerated, []int{149}
}
func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} }
func (*ResourceFieldSelector) ProtoMessage() {}
-func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{147} }
+func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} }
func (m *ResourceQuota) Reset() { *m = ResourceQuota{} }
func (*ResourceQuota) ProtoMessage() {}
-func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} }
+func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} }
func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} }
func (*ResourceQuotaList) ProtoMessage() {}
-func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} }
+func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} }
func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} }
func (*ResourceQuotaSpec) ProtoMessage() {}
-func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} }
+func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} }
func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} }
func (*ResourceQuotaStatus) ProtoMessage() {}
-func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} }
+func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} }
func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} }
func (*ResourceRequirements) ProtoMessage() {}
-func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} }
+func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} }
func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} }
func (*SELinuxOptions) ProtoMessage() {}
-func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} }
+func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} }
func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} }
func (*ScaleIOPersistentVolumeSource) ProtoMessage() {}
func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{154}
+ return fileDescriptorGenerated, []int{157}
}
func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} }
func (*ScaleIOVolumeSource) ProtoMessage() {}
-func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} }
+func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} }
+
+func (m *ScopeSelector) Reset() { *m = ScopeSelector{} }
+func (*ScopeSelector) ProtoMessage() {}
+func (*ScopeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} }
+
+func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} }
+func (*ScopedResourceSelectorRequirement) ProtoMessage() {}
+func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) {
+ return fileDescriptorGenerated, []int{160}
+}
func (m *Secret) Reset() { *m = Secret{} }
func (*Secret) ProtoMessage() {}
-func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} }
+func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} }
func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} }
func (*SecretEnvSource) ProtoMessage() {}
-func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} }
+func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} }
func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} }
func (*SecretKeySelector) ProtoMessage() {}
-func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} }
+func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} }
func (m *SecretList) Reset() { *m = SecretList{} }
func (*SecretList) ProtoMessage() {}
-func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} }
+func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} }
func (m *SecretProjection) Reset() { *m = SecretProjection{} }
func (*SecretProjection) ProtoMessage() {}
-func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} }
+func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} }
func (m *SecretReference) Reset() { *m = SecretReference{} }
func (*SecretReference) ProtoMessage() {}
-func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} }
+func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} }
func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} }
func (*SecretVolumeSource) ProtoMessage() {}
-func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} }
+func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} }
func (m *SecurityContext) Reset() { *m = SecurityContext{} }
func (*SecurityContext) ProtoMessage() {}
-func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} }
+func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} }
func (m *SerializedReference) Reset() { *m = SerializedReference{} }
func (*SerializedReference) ProtoMessage() {}
-func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} }
+func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} }
func (m *Service) Reset() { *m = Service{} }
func (*Service) ProtoMessage() {}
-func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} }
+func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} }
func (m *ServiceAccount) Reset() { *m = ServiceAccount{} }
func (*ServiceAccount) ProtoMessage() {}
-func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} }
+func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} }
func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} }
func (*ServiceAccountList) ProtoMessage() {}
-func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} }
+func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} }
+
+func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} }
+func (*ServiceAccountTokenProjection) ProtoMessage() {}
+func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) {
+ return fileDescriptorGenerated, []int{173}
+}
func (m *ServiceList) Reset() { *m = ServiceList{} }
func (*ServiceList) ProtoMessage() {}
-func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} }
+func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} }
func (m *ServicePort) Reset() { *m = ServicePort{} }
func (*ServicePort) ProtoMessage() {}
-func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} }
+func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} }
func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} }
func (*ServiceProxyOptions) ProtoMessage() {}
-func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} }
+func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} }
func (m *ServiceSpec) Reset() { *m = ServiceSpec{} }
func (*ServiceSpec) ProtoMessage() {}
-func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} }
+func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} }
func (m *ServiceStatus) Reset() { *m = ServiceStatus{} }
func (*ServiceStatus) ProtoMessage() {}
-func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} }
+func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{178} }
func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} }
func (*SessionAffinityConfig) ProtoMessage() {}
-func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{173} }
+func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} }
func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} }
func (*StorageOSPersistentVolumeSource) ProtoMessage() {}
func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{174}
+ return fileDescriptorGenerated, []int{180}
}
func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} }
func (*StorageOSVolumeSource) ProtoMessage() {}
-func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} }
+func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} }
func (m *Sysctl) Reset() { *m = Sysctl{} }
func (*Sysctl) ProtoMessage() {}
-func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} }
+func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} }
func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} }
func (*TCPSocketAction) ProtoMessage() {}
-func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} }
+func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} }
func (m *Taint) Reset() { *m = Taint{} }
func (*Taint) ProtoMessage() {}
-func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{178} }
+func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{184} }
func (m *Toleration) Reset() { *m = Toleration{} }
func (*Toleration) ProtoMessage() {}
-func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} }
+func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{185} }
+
+func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} }
+func (*TopologySelectorLabelRequirement) ProtoMessage() {}
+func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) {
+ return fileDescriptorGenerated, []int{186}
+}
+
+func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} }
+func (*TopologySelectorTerm) ProtoMessage() {}
+func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{187} }
+
+func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} }
+func (*TypedLocalObjectReference) ProtoMessage() {}
+func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) {
+ return fileDescriptorGenerated, []int{188}
+}
func (m *Volume) Reset() { *m = Volume{} }
func (*Volume) ProtoMessage() {}
-func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{180} }
+func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{189} }
func (m *VolumeDevice) Reset() { *m = VolumeDevice{} }
func (*VolumeDevice) ProtoMessage() {}
-func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} }
+func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{190} }
func (m *VolumeMount) Reset() { *m = VolumeMount{} }
func (*VolumeMount) ProtoMessage() {}
-func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} }
+func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{191} }
func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} }
func (*VolumeNodeAffinity) ProtoMessage() {}
-func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} }
+func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{192} }
func (m *VolumeProjection) Reset() { *m = VolumeProjection{} }
func (*VolumeProjection) ProtoMessage() {}
-func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{184} }
+func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{193} }
func (m *VolumeSource) Reset() { *m = VolumeSource{} }
func (*VolumeSource) ProtoMessage() {}
-func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{185} }
+func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{194} }
func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} }
func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {}
func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{186}
+ return fileDescriptorGenerated, []int{195}
}
func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} }
func (*WeightedPodAffinityTerm) ProtoMessage() {}
func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{187}
+ return fileDescriptorGenerated, []int{196}
+}
+
+func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} }
+func (*WindowsSecurityContextOptions) ProtoMessage() {}
+func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) {
+ return fileDescriptorGenerated, []int{197}
}
func init() {
@@ -1062,9 +1127,11 @@ func init() {
proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource")
proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding")
proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource")
+ proto.RegisterType((*CSIVolumeSource)(nil), "k8s.io.api.core.v1.CSIVolumeSource")
proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities")
proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource")
proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource")
+ proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource")
proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource")
proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig")
proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition")
@@ -1074,6 +1141,7 @@ func init() {
proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.api.core.v1.ConfigMapEnvSource")
proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.api.core.v1.ConfigMapKeySelector")
proto.RegisterType((*ConfigMapList)(nil), "k8s.io.api.core.v1.ConfigMapList")
+ proto.RegisterType((*ConfigMapNodeConfigSource)(nil), "k8s.io.api.core.v1.ConfigMapNodeConfigSource")
proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection")
proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource")
proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container")
@@ -1085,7 +1153,6 @@ func init() {
proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting")
proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus")
proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint")
- proto.RegisterType((*DeleteOptions)(nil), "k8s.io.api.core.v1.DeleteOptions")
proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection")
proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile")
proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeSource")
@@ -1109,6 +1176,7 @@ func init() {
proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource")
proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource")
proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource")
+ proto.RegisterType((*GlusterfsPersistentVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsPersistentVolumeSource")
proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource")
proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction")
proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader")
@@ -1124,7 +1192,6 @@ func init() {
proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList")
proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec")
proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List")
- proto.RegisterType((*ListOptions)(nil), "k8s.io.api.core.v1.ListOptions")
proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress")
proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus")
proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference")
@@ -1139,6 +1206,7 @@ func init() {
proto.RegisterType((*NodeAffinity)(nil), "k8s.io.api.core.v1.NodeAffinity")
proto.RegisterType((*NodeCondition)(nil), "k8s.io.api.core.v1.NodeCondition")
proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource")
+ proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus")
proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints")
proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList")
proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions")
@@ -1150,7 +1218,6 @@ func init() {
proto.RegisterType((*NodeStatus)(nil), "k8s.io.api.core.v1.NodeStatus")
proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.api.core.v1.NodeSystemInfo")
proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.api.core.v1.ObjectFieldSelector")
- proto.RegisterType((*ObjectMeta)(nil), "k8s.io.api.core.v1.ObjectMeta")
proto.RegisterType((*ObjectReference)(nil), "k8s.io.api.core.v1.ObjectReference")
proto.RegisterType((*PersistentVolume)(nil), "k8s.io.api.core.v1.PersistentVolume")
proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.api.core.v1.PersistentVolumeClaim")
@@ -1177,6 +1244,7 @@ func init() {
proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions")
proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions")
proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions")
+ proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate")
proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.api.core.v1.PodSecurityContext")
proto.RegisterType((*PodSignature)(nil), "k8s.io.api.core.v1.PodSignature")
proto.RegisterType((*PodSpec)(nil), "k8s.io.api.core.v1.PodSpec")
@@ -1209,6 +1277,8 @@ func init() {
proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions")
proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource")
proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource")
+ proto.RegisterType((*ScopeSelector)(nil), "k8s.io.api.core.v1.ScopeSelector")
+ proto.RegisterType((*ScopedResourceSelectorRequirement)(nil), "k8s.io.api.core.v1.ScopedResourceSelectorRequirement")
proto.RegisterType((*Secret)(nil), "k8s.io.api.core.v1.Secret")
proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.api.core.v1.SecretEnvSource")
proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.api.core.v1.SecretKeySelector")
@@ -1221,6 +1291,7 @@ func init() {
proto.RegisterType((*Service)(nil), "k8s.io.api.core.v1.Service")
proto.RegisterType((*ServiceAccount)(nil), "k8s.io.api.core.v1.ServiceAccount")
proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.api.core.v1.ServiceAccountList")
+ proto.RegisterType((*ServiceAccountTokenProjection)(nil), "k8s.io.api.core.v1.ServiceAccountTokenProjection")
proto.RegisterType((*ServiceList)(nil), "k8s.io.api.core.v1.ServiceList")
proto.RegisterType((*ServicePort)(nil), "k8s.io.api.core.v1.ServicePort")
proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.api.core.v1.ServiceProxyOptions")
@@ -1233,6 +1304,9 @@ func init() {
proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.api.core.v1.TCPSocketAction")
proto.RegisterType((*Taint)(nil), "k8s.io.api.core.v1.Taint")
proto.RegisterType((*Toleration)(nil), "k8s.io.api.core.v1.Toleration")
+ proto.RegisterType((*TopologySelectorLabelRequirement)(nil), "k8s.io.api.core.v1.TopologySelectorLabelRequirement")
+ proto.RegisterType((*TopologySelectorTerm)(nil), "k8s.io.api.core.v1.TopologySelectorTerm")
+ proto.RegisterType((*TypedLocalObjectReference)(nil), "k8s.io.api.core.v1.TypedLocalObjectReference")
proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume")
proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice")
proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount")
@@ -1241,6 +1315,7 @@ func init() {
proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource")
proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource")
proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm")
+ proto.RegisterType((*WindowsSecurityContextOptions)(nil), "k8s.io.api.core.v1.WindowsSecurityContextOptions")
}
func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -1632,6 +1707,86 @@ func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
}
i += n8
}
+ if m.ControllerExpandSecretRef != nil {
+ dAtA[i] = 0x4a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ControllerExpandSecretRef.Size()))
+ n9, err := m.ControllerExpandSecretRef.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n9
+ }
+ return i, nil
+}
+
+func (m *CSIVolumeSource) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CSIVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
+ i += copy(dAtA[i:], m.Driver)
+ if m.ReadOnly != nil {
+ dAtA[i] = 0x10
+ i++
+ if *m.ReadOnly {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.FSType != nil {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType)))
+ i += copy(dAtA[i:], *m.FSType)
+ }
+ if len(m.VolumeAttributes) > 0 {
+ keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes))
+ for k := range m.VolumeAttributes {
+ keysForVolumeAttributes = append(keysForVolumeAttributes, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes)
+ for _, k := range keysForVolumeAttributes {
+ dAtA[i] = 0x22
+ i++
+ v := m.VolumeAttributes[string(k)]
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ i = encodeVarintGenerated(dAtA, i, uint64(mapSize))
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(k)))
+ i += copy(dAtA[i:], k)
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i += copy(dAtA[i:], v)
+ }
+ }
+ if m.NodePublishSecretRef != nil {
+ dAtA[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.NodePublishSecretRef.Size()))
+ n10, err := m.NodePublishSecretRef.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n10
+ }
return i, nil
}
@@ -1729,11 +1884,11 @@ func (m *CephFSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x2a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size()))
- n9, err := m.SecretRef.MarshalTo(dAtA[i:])
+ n11, err := m.SecretRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n9
+ i += n11
}
dAtA[i] = 0x30
i++
@@ -1792,11 +1947,11 @@ func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x2a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size()))
- n10, err := m.SecretRef.MarshalTo(dAtA[i:])
+ n12, err := m.SecretRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n10
+ i += n12
}
dAtA[i] = 0x30
i++
@@ -1809,6 +1964,50 @@ func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
+func (m *CinderPersistentVolumeSource) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CinderPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID)))
+ i += copy(dAtA[i:], m.VolumeID)
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+ i += copy(dAtA[i:], m.FSType)
+ dAtA[i] = 0x18
+ i++
+ if m.ReadOnly {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ if m.SecretRef != nil {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size()))
+ n13, err := m.SecretRef.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n13
+ }
+ return i, nil
+}
+
func (m *CinderVolumeSource) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -1840,6 +2039,16 @@ func (m *CinderVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0
}
i++
+ if m.SecretRef != nil {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size()))
+ n14, err := m.SecretRef.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n14
+ }
return i, nil
}
@@ -1918,11 +2127,11 @@ func (m *ComponentStatus) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
- n11, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ n15, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n11
+ i += n15
if len(m.Conditions) > 0 {
for _, msg := range m.Conditions {
dAtA[i] = 0x12
@@ -1956,11 +2165,11 @@ func (m *ComponentStatusList) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
- n12, err := m.ListMeta.MarshalTo(dAtA[i:])
+ n16, err := m.ListMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n12
+ i += n16
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -1994,11 +2203,11 @@ func (m *ConfigMap) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
- n13, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ n17, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n13
+ i += n17
if len(m.Data) > 0 {
keysForData := make([]string, 0, len(m.Data))
for k := range m.Data {
@@ -2070,11 +2279,11 @@ func (m *ConfigMapEnvSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size()))
- n14, err := m.LocalObjectReference.MarshalTo(dAtA[i:])
+ n18, err := m.LocalObjectReference.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n14
+ i += n18
if m.Optional != nil {
dAtA[i] = 0x10
i++
@@ -2106,11 +2315,11 @@ func (m *ConfigMapKeySelector) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size()))
- n15, err := m.LocalObjectReference.MarshalTo(dAtA[i:])
+ n19, err := m.LocalObjectReference.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n15
+ i += n19
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
@@ -2146,11 +2355,11 @@ func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
- n16, err := m.ListMeta.MarshalTo(dAtA[i:])
+ n20, err := m.ListMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n16
+ i += n20
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -2166,6 +2375,44 @@ func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
+func (m *ConfigMapNodeConfigSource) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConfigMapNodeConfigSource) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i += copy(dAtA[i:], m.Namespace)
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+ i += copy(dAtA[i:], m.UID)
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion)))
+ i += copy(dAtA[i:], m.ResourceVersion)
+ dAtA[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletConfigKey)))
+ i += copy(dAtA[i:], m.KubeletConfigKey)
+ return i, nil
+}
+
func (m *ConfigMapProjection) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -2184,11 +2431,11 @@ func (m *ConfigMapProjection) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size()))
- n17, err := m.LocalObjectReference.MarshalTo(dAtA[i:])
+ n21, err := m.LocalObjectReference.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n17
+ i += n21
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -2232,11 +2479,11 @@ func (m *ConfigMapVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size()))
- n18, err := m.LocalObjectReference.MarshalTo(dAtA[i:])
+ n22, err := m.LocalObjectReference.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n18
+ i += n22
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -2351,11 +2598,11 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x42
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size()))
- n19, err := m.Resources.MarshalTo(dAtA[i:])
+ n23, err := m.Resources.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n19
+ i += n23
if len(m.VolumeMounts) > 0 {
for _, msg := range m.VolumeMounts {
dAtA[i] = 0x4a
@@ -2372,31 +2619,31 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x52
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LivenessProbe.Size()))
- n20, err := m.LivenessProbe.MarshalTo(dAtA[i:])
+ n24, err := m.LivenessProbe.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n20
+ i += n24
}
if m.ReadinessProbe != nil {
dAtA[i] = 0x5a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ReadinessProbe.Size()))
- n21, err := m.ReadinessProbe.MarshalTo(dAtA[i:])
+ n25, err := m.ReadinessProbe.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n21
+ i += n25
}
if m.Lifecycle != nil {
dAtA[i] = 0x62
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Lifecycle.Size()))
- n22, err := m.Lifecycle.MarshalTo(dAtA[i:])
+ n26, err := m.Lifecycle.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n22
+ i += n26
}
dAtA[i] = 0x6a
i++
@@ -2410,11 +2657,11 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x7a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size()))
- n23, err := m.SecurityContext.MarshalTo(dAtA[i:])
+ n27, err := m.SecurityContext.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n23
+ i += n27
}
dAtA[i] = 0x80
i++
@@ -2574,31 +2821,31 @@ func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Waiting.Size()))
- n24, err := m.Waiting.MarshalTo(dAtA[i:])
+ n28, err := m.Waiting.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n24
+ i += n28
}
if m.Running != nil {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Running.Size()))
- n25, err := m.Running.MarshalTo(dAtA[i:])
+ n29, err := m.Running.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n25
+ i += n29
}
if m.Terminated != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Terminated.Size()))
- n26, err := m.Terminated.MarshalTo(dAtA[i:])
+ n30, err := m.Terminated.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n26
+ i += n30
}
return i, nil
}
@@ -2621,11 +2868,11 @@ func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size()))
- n27, err := m.StartedAt.MarshalTo(dAtA[i:])
+ n31, err := m.StartedAt.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n27
+ i += n31
return i, nil
}
@@ -2661,19 +2908,19 @@ func (m *ContainerStateTerminated) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x2a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size()))
- n28, err := m.StartedAt.MarshalTo(dAtA[i:])
+ n32, err := m.StartedAt.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n28
+ i += n32
dAtA[i] = 0x32
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.FinishedAt.Size()))
- n29, err := m.FinishedAt.MarshalTo(dAtA[i:])
+ n33, err := m.FinishedAt.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n29
+ i += n33
dAtA[i] = 0x3a
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID)))
@@ -2729,19 +2976,19 @@ func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.State.Size()))
- n30, err := m.State.MarshalTo(dAtA[i:])
+ n34, err := m.State.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n30
+ i += n34
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LastTerminationState.Size()))
- n31, err := m.LastTerminationState.MarshalTo(dAtA[i:])
+ n35, err := m.LastTerminationState.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n31
+ i += n35
dAtA[i] = 0x20
i++
if m.Ready {
@@ -2789,55 +3036,6 @@ func (m *DaemonEndpoint) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
-func (m *DeleteOptions) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeleteOptions) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if m.GracePeriodSeconds != nil {
- dAtA[i] = 0x8
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(*m.GracePeriodSeconds))
- }
- if m.Preconditions != nil {
- dAtA[i] = 0x12
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(m.Preconditions.Size()))
- n32, err := m.Preconditions.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n32
- }
- if m.OrphanDependents != nil {
- dAtA[i] = 0x18
- i++
- if *m.OrphanDependents {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i++
- }
- if m.PropagationPolicy != nil {
- dAtA[i] = 0x22
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy)))
- i += copy(dAtA[i:], *m.PropagationPolicy)
- }
- return i, nil
-}
-
func (m *DownwardAPIProjection) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -2891,21 +3089,21 @@ func (m *DownwardAPIVolumeFile) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size()))
- n33, err := m.FieldRef.MarshalTo(dAtA[i:])
+ n36, err := m.FieldRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n33
+ i += n36
}
if m.ResourceFieldRef != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size()))
- n34, err := m.ResourceFieldRef.MarshalTo(dAtA[i:])
+ n37, err := m.ResourceFieldRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n34
+ i += n37
}
if m.Mode != nil {
dAtA[i] = 0x20
@@ -2973,11 +3171,11 @@ func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SizeLimit.Size()))
- n35, err := m.SizeLimit.MarshalTo(dAtA[i:])
+ n38, err := m.SizeLimit.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n35
+ i += n38
}
return i, nil
}
@@ -3005,11 +3203,11 @@ func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.TargetRef.Size()))
- n36, err := m.TargetRef.MarshalTo(dAtA[i:])
+ n39, err := m.TargetRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n36
+ i += n39
}
dAtA[i] = 0x1a
i++
@@ -3125,11 +3323,11 @@ func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
- n37, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ n40, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n37
+ i += n40
if len(m.Subsets) > 0 {
for _, msg := range m.Subsets {
dAtA[i] = 0x12
@@ -3163,11 +3361,11 @@ func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
- n38, err := m.ListMeta.MarshalTo(dAtA[i:])
+ n41, err := m.ListMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n38
+ i += n41
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -3206,21 +3404,21 @@ func (m *EnvFromSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapRef.Size()))
- n39, err := m.ConfigMapRef.MarshalTo(dAtA[i:])
+ n42, err := m.ConfigMapRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n39
+ i += n42
}
if m.SecretRef != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size()))
- n40, err := m.SecretRef.MarshalTo(dAtA[i:])
+ n43, err := m.SecretRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n40
+ i += n43
}
return i, nil
}
@@ -3252,11 +3450,11 @@ func (m *EnvVar) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ValueFrom.Size()))
- n41, err := m.ValueFrom.MarshalTo(dAtA[i:])
+ n44, err := m.ValueFrom.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n41
+ i += n44
}
return i, nil
}
@@ -3280,41 +3478,41 @@ func (m *EnvVarSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size()))
- n42, err := m.FieldRef.MarshalTo(dAtA[i:])
+ n45, err := m.FieldRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n42
+ i += n45
}
if m.ResourceFieldRef != nil {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size()))
- n43, err := m.ResourceFieldRef.MarshalTo(dAtA[i:])
+ n46, err := m.ResourceFieldRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n43
+ i += n46
}
if m.ConfigMapKeyRef != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapKeyRef.Size()))
- n44, err := m.ConfigMapKeyRef.MarshalTo(dAtA[i:])
+ n47, err := m.ConfigMapKeyRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n44
+ i += n47
}
if m.SecretKeyRef != nil {
dAtA[i] = 0x22
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SecretKeyRef.Size()))
- n45, err := m.SecretKeyRef.MarshalTo(dAtA[i:])
+ n48, err := m.SecretKeyRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n45
+ i += n48
}
return i, nil
}
@@ -3337,19 +3535,19 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
- n46, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ n49, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n46
+ i += n49
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.InvolvedObject.Size()))
- n47, err := m.InvolvedObject.MarshalTo(dAtA[i:])
+ n50, err := m.InvolvedObject.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n47
+ i += n50
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
@@ -3361,27 +3559,27 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x2a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size()))
- n48, err := m.Source.MarshalTo(dAtA[i:])
+ n51, err := m.Source.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n48
+ i += n51
dAtA[i] = 0x32
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.FirstTimestamp.Size()))
- n49, err := m.FirstTimestamp.MarshalTo(dAtA[i:])
+ n52, err := m.FirstTimestamp.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n49
+ i += n52
dAtA[i] = 0x3a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LastTimestamp.Size()))
- n50, err := m.LastTimestamp.MarshalTo(dAtA[i:])
+ n53, err := m.LastTimestamp.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n50
+ i += n53
dAtA[i] = 0x40
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Count))
@@ -3392,20 +3590,20 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x52
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size()))
- n51, err := m.EventTime.MarshalTo(dAtA[i:])
+ n54, err := m.EventTime.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n51
+ i += n54
if m.Series != nil {
dAtA[i] = 0x5a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size()))
- n52, err := m.Series.MarshalTo(dAtA[i:])
+ n55, err := m.Series.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n52
+ i += n55
}
dAtA[i] = 0x62
i++
@@ -3415,11 +3613,11 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x6a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size()))
- n53, err := m.Related.MarshalTo(dAtA[i:])
+ n56, err := m.Related.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n53
+ i += n56
}
dAtA[i] = 0x72
i++
@@ -3450,11 +3648,11 @@ func (m *EventList) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
- n54, err := m.ListMeta.MarshalTo(dAtA[i:])
+ n57, err := m.ListMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n54
+ i += n57
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -3491,11 +3689,11 @@ func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size()))
- n55, err := m.LastObservedTime.MarshalTo(dAtA[i:])
+ n58, err := m.LastObservedTime.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n55
+ i += n58
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.State)))
@@ -3654,11 +3852,11 @@ func (m *FlexPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size()))
- n56, err := m.SecretRef.MarshalTo(dAtA[i:])
+ n59, err := m.SecretRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n56
+ i += n59
}
dAtA[i] = 0x20
i++
@@ -3720,11 +3918,11 @@ func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size()))
- n57, err := m.SecretRef.MarshalTo(dAtA[i:])
+ n60, err := m.SecretRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n57
+ i += n60
}
dAtA[i] = 0x20
i++
@@ -3852,6 +4050,46 @@ func (m *GitRepoVolumeSource) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
+func (m *GlusterfsPersistentVolumeSource) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GlusterfsPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName)))
+ i += copy(dAtA[i:], m.EndpointsName)
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+ i += copy(dAtA[i:], m.Path)
+ dAtA[i] = 0x18
+ i++
+ if m.ReadOnly {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ if m.EndpointsNamespace != nil {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.EndpointsNamespace)))
+ i += copy(dAtA[i:], *m.EndpointsNamespace)
+ }
+ return i, nil
+}
+
func (m *GlusterfsVolumeSource) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -3908,11 +4146,11 @@ func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size()))
- n58, err := m.Port.MarshalTo(dAtA[i:])
+ n61, err := m.Port.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n58
+ i += n61
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host)))
@@ -3981,31 +4219,31 @@ func (m *Handler) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Exec.Size()))
- n59, err := m.Exec.MarshalTo(dAtA[i:])
+ n62, err := m.Exec.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n59
+ i += n62
}
if m.HTTPGet != nil {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.HTTPGet.Size()))
- n60, err := m.HTTPGet.MarshalTo(dAtA[i:])
+ n63, err := m.HTTPGet.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n60
+ i += n63
}
if m.TCPSocket != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.TCPSocket.Size()))
- n61, err := m.TCPSocket.MarshalTo(dAtA[i:])
+ n64, err := m.TCPSocket.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n61
+ i += n64
}
return i, nil
}
@@ -4144,11 +4382,11 @@ func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x52
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size()))
- n62, err := m.SecretRef.MarshalTo(dAtA[i:])
+ n65, err := m.SecretRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n62
+ i += n65
}
dAtA[i] = 0x58
i++
@@ -4236,11 +4474,11 @@ func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x52
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size()))
- n63, err := m.SecretRef.MarshalTo(dAtA[i:])
+ n66, err := m.SecretRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n63
+ i += n66
}
dAtA[i] = 0x58
i++
@@ -4309,21 +4547,21 @@ func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.PostStart.Size()))
- n64, err := m.PostStart.MarshalTo(dAtA[i:])
+ n67, err := m.PostStart.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n64
+ i += n67
}
if m.PreStop != nil {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.PreStop.Size()))
- n65, err := m.PreStop.MarshalTo(dAtA[i:])
+ n68, err := m.PreStop.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n65
+ i += n68
}
return i, nil
}
@@ -4346,19 +4584,19 @@ func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
- n66, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ n69, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n66
+ i += n69
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
- n67, err := m.Spec.MarshalTo(dAtA[i:])
+ n70, err := m.Spec.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n67
+ i += n70
return i, nil
}
@@ -4405,11 +4643,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64((&v).Size()))
- n68, err := (&v).MarshalTo(dAtA[i:])
+ n71, err := (&v).MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n68
+ i += n71
}
}
if len(m.Min) > 0 {
@@ -4436,11 +4674,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64((&v).Size()))
- n69, err := (&v).MarshalTo(dAtA[i:])
+ n72, err := (&v).MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n69
+ i += n72
}
}
if len(m.Default) > 0 {
@@ -4467,11 +4705,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64((&v).Size()))
- n70, err := (&v).MarshalTo(dAtA[i:])
+ n73, err := (&v).MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n70
+ i += n73
}
}
if len(m.DefaultRequest) > 0 {
@@ -4498,11 +4736,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64((&v).Size()))
- n71, err := (&v).MarshalTo(dAtA[i:])
+ n74, err := (&v).MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n71
+ i += n74
}
}
if len(m.MaxLimitRequestRatio) > 0 {
@@ -4529,11 +4767,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64((&v).Size()))
- n72, err := (&v).MarshalTo(dAtA[i:])
+ n75, err := (&v).MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n72
+ i += n75
}
}
return i, nil
@@ -4557,11 +4795,11 @@ func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
- n73, err := m.ListMeta.MarshalTo(dAtA[i:])
+ n76, err := m.ListMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n73
+ i += n76
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -4625,11 +4863,11 @@ func (m *List) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
- n74, err := m.ListMeta.MarshalTo(dAtA[i:])
+ n77, err := m.ListMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n74
+ i += n77
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -4645,57 +4883,6 @@ func (m *List) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
-func (m *ListOptions) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.LabelSelector)))
- i += copy(dAtA[i:], m.LabelSelector)
- dAtA[i] = 0x12
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldSelector)))
- i += copy(dAtA[i:], m.FieldSelector)
- dAtA[i] = 0x18
- i++
- if m.Watch {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i++
- dAtA[i] = 0x22
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion)))
- i += copy(dAtA[i:], m.ResourceVersion)
- if m.TimeoutSeconds != nil {
- dAtA[i] = 0x28
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
- }
- dAtA[i] = 0x30
- i++
- if m.IncludeUninitialized {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i++
- return i, nil
-}
-
func (m *LoadBalancerIngress) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -4793,6 +4980,12 @@ func (m *LocalVolumeSource) MarshalTo(dAtA []byte) (int, error) {
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
i += copy(dAtA[i:], m.Path)
+ if m.FSType != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType)))
+ i += copy(dAtA[i:], *m.FSType)
+ }
return i, nil
}
@@ -4848,27 +5041,27 @@ func (m *Namespace) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
- n75, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ n78, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n75
+ i += n78
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
- n76, err := m.Spec.MarshalTo(dAtA[i:])
+ n79, err := m.Spec.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n76
+ i += n79
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
- n77, err := m.Status.MarshalTo(dAtA[i:])
+ n80, err := m.Status.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n77
+ i += n80
return i, nil
}
@@ -4890,11 +5083,11 @@ func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
- n78, err := m.ListMeta.MarshalTo(dAtA[i:])
+ n81, err := m.ListMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n78
+ i += n81
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -4983,27 +5176,27 @@ func (m *Node) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
- n79, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ n82, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n79
+ i += n82
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
- n80, err := m.Spec.MarshalTo(dAtA[i:])
+ n83, err := m.Spec.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n80
+ i += n83
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
- n81, err := m.Status.MarshalTo(dAtA[i:])
+ n84, err := m.Status.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n81
+ i += n84
return i, nil
}
@@ -5052,11 +5245,11 @@ func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size()))
- n82, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:])
+ n85, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n82
+ i += n85
}
if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution {
@@ -5099,19 +5292,19 @@ func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LastHeartbeatTime.Size()))
- n83, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:])
+ n86, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n83
+ i += n86
dAtA[i] = 0x22
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size()))
- n84, err := m.LastTransitionTime.MarshalTo(dAtA[i:])
+ n87, err := m.LastTransitionTime.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n84
+ i += n87
dAtA[i] = 0x2a
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
@@ -5138,16 +5331,68 @@ func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
- if m.ConfigMapRef != nil {
+ if m.ConfigMap != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size()))
+ n88, err := m.ConfigMap.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n88
+ }
+ return i, nil
+}
+
+func (m *NodeConfigStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NodeConfigStatus) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Assigned != nil {
dAtA[i] = 0xa
i++
- i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapRef.Size()))
- n85, err := m.ConfigMapRef.MarshalTo(dAtA[i:])
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Assigned.Size()))
+ n89, err := m.Assigned.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n85
+ i += n89
+ }
+ if m.Active != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Active.Size()))
+ n90, err := m.Active.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n90
}
+ if m.LastKnownGood != nil {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.LastKnownGood.Size()))
+ n91, err := m.LastKnownGood.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n91
+ }
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error)))
+ i += copy(dAtA[i:], m.Error)
return i, nil
}
@@ -5169,11 +5414,11 @@ func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.KubeletEndpoint.Size()))
- n86, err := m.KubeletEndpoint.MarshalTo(dAtA[i:])
+ n92, err := m.KubeletEndpoint.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n86
+ i += n92
return i, nil
}
@@ -5195,11 +5440,11 @@ func (m *NodeList) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
- n87, err := m.ListMeta.MarshalTo(dAtA[i:])
+ n93, err := m.ListMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n87
+ i += n93
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -5276,11 +5521,11 @@ func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64((&v).Size()))
- n88, err := (&v).MarshalTo(dAtA[i:])
+ n94, err := (&v).MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n88
+ i += n94
}
}
return i, nil
@@ -5384,6 +5629,18 @@ func (m *NodeSelectorTerm) MarshalTo(dAtA []byte) (int, error) {
i += n
}
}
+ if len(m.MatchFields) > 0 {
+ for _, msg := range m.MatchFields {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
return i, nil
}
@@ -5408,8 +5665,8 @@ func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) {
i += copy(dAtA[i:], m.PodCIDR)
dAtA[i] = 0x12
i++
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalID)))
- i += copy(dAtA[i:], m.ExternalID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DoNotUse_ExternalID)))
+ i += copy(dAtA[i:], m.DoNotUse_ExternalID)
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderID)))
@@ -5438,11 +5695,11 @@ func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x32
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigSource.Size()))
- n89, err := m.ConfigSource.MarshalTo(dAtA[i:])
+ n95, err := m.ConfigSource.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n89
+ i += n95
}
return i, nil
}
@@ -5486,11 +5743,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64((&v).Size()))
- n90, err := (&v).MarshalTo(dAtA[i:])
+ n96, err := (&v).MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n90
+ i += n96
}
}
if len(m.Allocatable) > 0 {
@@ -5517,11 +5774,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64((&v).Size()))
- n91, err := (&v).MarshalTo(dAtA[i:])
+ n97, err := (&v).MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n91
+ i += n97
}
}
dAtA[i] = 0x1a
@@ -5555,19 +5812,19 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x32
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.DaemonEndpoints.Size()))
- n92, err := m.DaemonEndpoints.MarshalTo(dAtA[i:])
+ n98, err := m.DaemonEndpoints.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n92
+ i += n98
dAtA[i] = 0x3a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.NodeInfo.Size()))
- n93, err := m.NodeInfo.MarshalTo(dAtA[i:])
+ n99, err := m.NodeInfo.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n93
+ i += n99
if len(m.Images) > 0 {
for _, msg := range m.Images {
dAtA[i] = 0x42
@@ -5607,6 +5864,16 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) {
i += n
}
}
+ if m.Config != nil {
+ dAtA[i] = 0x5a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Config.Size()))
+ n100, err := m.Config.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n100
+ }
return i, nil
}
@@ -5694,161 +5961,6 @@ func (m *ObjectFieldSelector) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
-func (m *ObjectMeta) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
- dAtA[i] = 0x12
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName)))
- i += copy(dAtA[i:], m.GenerateName)
- dAtA[i] = 0x1a
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
- i += copy(dAtA[i:], m.Namespace)
- dAtA[i] = 0x22
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink)))
- i += copy(dAtA[i:], m.SelfLink)
- dAtA[i] = 0x2a
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
- i += copy(dAtA[i:], m.UID)
- dAtA[i] = 0x32
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion)))
- i += copy(dAtA[i:], m.ResourceVersion)
- dAtA[i] = 0x38
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(m.Generation))
- dAtA[i] = 0x42
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size()))
- n94, err := m.CreationTimestamp.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n94
- if m.DeletionTimestamp != nil {
- dAtA[i] = 0x4a
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size()))
- n95, err := m.DeletionTimestamp.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n95
- }
- if m.DeletionGracePeriodSeconds != nil {
- dAtA[i] = 0x50
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds))
- }
- if len(m.Labels) > 0 {
- keysForLabels := make([]string, 0, len(m.Labels))
- for k := range m.Labels {
- keysForLabels = append(keysForLabels, string(k))
- }
- github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
- for _, k := range keysForLabels {
- dAtA[i] = 0x5a
- i++
- v := m.Labels[string(k)]
- mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
- i = encodeVarintGenerated(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
- dAtA[i] = 0x12
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
- }
- }
- if len(m.Annotations) > 0 {
- keysForAnnotations := make([]string, 0, len(m.Annotations))
- for k := range m.Annotations {
- keysForAnnotations = append(keysForAnnotations, string(k))
- }
- github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
- for _, k := range keysForAnnotations {
- dAtA[i] = 0x62
- i++
- v := m.Annotations[string(k)]
- mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
- i = encodeVarintGenerated(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
- dAtA[i] = 0x12
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
- }
- }
- if len(m.OwnerReferences) > 0 {
- for _, msg := range m.OwnerReferences {
- dAtA[i] = 0x6a
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
- }
- if len(m.Finalizers) > 0 {
- for _, s := range m.Finalizers {
- dAtA[i] = 0x72
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
- }
- }
- dAtA[i] = 0x7a
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterName)))
- i += copy(dAtA[i:], m.ClusterName)
- if m.Initializers != nil {
- dAtA[i] = 0x82
- i++
- dAtA[i] = 0x1
- i++
- i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size()))
- n96, err := m.Initializers.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n96
- }
- return i, nil
-}
-
func (m *ObjectReference) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -5913,27 +6025,27 @@ func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
- n97, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ n101, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n97
+ i += n101
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
- n98, err := m.Spec.MarshalTo(dAtA[i:])
+ n102, err := m.Spec.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n98
+ i += n102
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
- n99, err := m.Status.MarshalTo(dAtA[i:])
+ n103, err := m.Status.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n99
+ i += n103
return i, nil
}
@@ -5955,27 +6067,27 @@ func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
- n100, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ n104, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n100
+ i += n104
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
- n101, err := m.Spec.MarshalTo(dAtA[i:])
+ n105, err := m.Spec.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n101
+ i += n105
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
- n102, err := m.Status.MarshalTo(dAtA[i:])
+ n106, err := m.Status.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n102
+ i += n106
return i, nil
}
@@ -6005,19 +6117,19 @@ func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size()))
- n103, err := m.LastProbeTime.MarshalTo(dAtA[i:])
+ n107, err := m.LastProbeTime.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n103
+ i += n107
dAtA[i] = 0x22
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size()))
- n104, err := m.LastTransitionTime.MarshalTo(dAtA[i:])
+ n108, err := m.LastTransitionTime.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n104
+ i += n108
dAtA[i] = 0x2a
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
@@ -6047,11 +6159,11 @@ func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
- n105, err := m.ListMeta.MarshalTo(dAtA[i:])
+ n109, err := m.ListMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n105
+ i += n109
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -6100,11 +6212,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size()))
- n106, err := m.Resources.MarshalTo(dAtA[i:])
+ n110, err := m.Resources.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n106
+ i += n110
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName)))
@@ -6113,11 +6225,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x22
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size()))
- n107, err := m.Selector.MarshalTo(dAtA[i:])
+ n111, err := m.Selector.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n107
+ i += n111
}
if m.StorageClassName != nil {
dAtA[i] = 0x2a
@@ -6131,6 +6243,16 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode)))
i += copy(dAtA[i:], *m.VolumeMode)
}
+ if m.DataSource != nil {
+ dAtA[i] = 0x3a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.DataSource.Size()))
+ n112, err := m.DataSource.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n112
+ }
return i, nil
}
@@ -6192,11 +6314,11 @@ func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64((&v).Size()))
- n108, err := (&v).MarshalTo(dAtA[i:])
+ n113, err := (&v).MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n108
+ i += n113
}
}
if len(m.Conditions) > 0 {
@@ -6262,11 +6384,11 @@ func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
- n109, err := m.ListMeta.MarshalTo(dAtA[i:])
+ n114, err := m.ListMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n109
+ i += n114
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -6301,151 +6423,151 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size()))
- n110, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:])
+ n115, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n110
+ i += n115
}
if m.AWSElasticBlockStore != nil {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size()))
- n111, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:])
+ n116, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n111
+ i += n116
}
if m.HostPath != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size()))
- n112, err := m.HostPath.MarshalTo(dAtA[i:])
+ n117, err := m.HostPath.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n112
+ i += n117
}
if m.Glusterfs != nil {
dAtA[i] = 0x22
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size()))
- n113, err := m.Glusterfs.MarshalTo(dAtA[i:])
+ n118, err := m.Glusterfs.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n113
+ i += n118
}
if m.NFS != nil {
dAtA[i] = 0x2a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size()))
- n114, err := m.NFS.MarshalTo(dAtA[i:])
+ n119, err := m.NFS.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n114
+ i += n119
}
if m.RBD != nil {
dAtA[i] = 0x32
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size()))
- n115, err := m.RBD.MarshalTo(dAtA[i:])
+ n120, err := m.RBD.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n115
+ i += n120
}
if m.ISCSI != nil {
dAtA[i] = 0x3a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size()))
- n116, err := m.ISCSI.MarshalTo(dAtA[i:])
+ n121, err := m.ISCSI.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n116
+ i += n121
}
if m.Cinder != nil {
dAtA[i] = 0x42
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size()))
- n117, err := m.Cinder.MarshalTo(dAtA[i:])
+ n122, err := m.Cinder.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n117
+ i += n122
}
if m.CephFS != nil {
dAtA[i] = 0x4a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size()))
- n118, err := m.CephFS.MarshalTo(dAtA[i:])
+ n123, err := m.CephFS.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n118
+ i += n123
}
if m.FC != nil {
dAtA[i] = 0x52
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size()))
- n119, err := m.FC.MarshalTo(dAtA[i:])
+ n124, err := m.FC.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n119
+ i += n124
}
if m.Flocker != nil {
dAtA[i] = 0x5a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size()))
- n120, err := m.Flocker.MarshalTo(dAtA[i:])
+ n125, err := m.Flocker.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n120
+ i += n125
}
if m.FlexVolume != nil {
dAtA[i] = 0x62
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size()))
- n121, err := m.FlexVolume.MarshalTo(dAtA[i:])
+ n126, err := m.FlexVolume.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n121
+ i += n126
}
if m.AzureFile != nil {
dAtA[i] = 0x6a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size()))
- n122, err := m.AzureFile.MarshalTo(dAtA[i:])
+ n127, err := m.AzureFile.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n122
+ i += n127
}
if m.VsphereVolume != nil {
dAtA[i] = 0x72
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size()))
- n123, err := m.VsphereVolume.MarshalTo(dAtA[i:])
+ n128, err := m.VsphereVolume.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n123
+ i += n128
}
if m.Quobyte != nil {
dAtA[i] = 0x7a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size()))
- n124, err := m.Quobyte.MarshalTo(dAtA[i:])
+ n129, err := m.Quobyte.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n124
+ i += n129
}
if m.AzureDisk != nil {
dAtA[i] = 0x82
@@ -6453,11 +6575,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size()))
- n125, err := m.AzureDisk.MarshalTo(dAtA[i:])
+ n130, err := m.AzureDisk.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n125
+ i += n130
}
if m.PhotonPersistentDisk != nil {
dAtA[i] = 0x8a
@@ -6465,11 +6587,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size()))
- n126, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:])
+ n131, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n126
+ i += n131
}
if m.PortworxVolume != nil {
dAtA[i] = 0x92
@@ -6477,11 +6599,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size()))
- n127, err := m.PortworxVolume.MarshalTo(dAtA[i:])
+ n132, err := m.PortworxVolume.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n127
+ i += n132
}
if m.ScaleIO != nil {
dAtA[i] = 0x9a
@@ -6489,11 +6611,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size()))
- n128, err := m.ScaleIO.MarshalTo(dAtA[i:])
+ n133, err := m.ScaleIO.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n128
+ i += n133
}
if m.Local != nil {
dAtA[i] = 0xa2
@@ -6501,11 +6623,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Local.Size()))
- n129, err := m.Local.MarshalTo(dAtA[i:])
+ n134, err := m.Local.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n129
+ i += n134
}
if m.StorageOS != nil {
dAtA[i] = 0xaa
@@ -6513,11 +6635,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size()))
- n130, err := m.StorageOS.MarshalTo(dAtA[i:])
+ n135, err := m.StorageOS.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n130
+ i += n135
}
if m.CSI != nil {
dAtA[i] = 0xb2
@@ -6525,11 +6647,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size()))
- n131, err := m.CSI.MarshalTo(dAtA[i:])
+ n136, err := m.CSI.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n131
+ i += n136
}
return i, nil
}
@@ -6573,21 +6695,21 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64((&v).Size()))
- n132, err := (&v).MarshalTo(dAtA[i:])
+ n137, err := (&v).MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n132
+ i += n137
}
}
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeSource.Size()))
- n133, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:])
+ n138, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n133
+ i += n138
if len(m.AccessModes) > 0 {
for _, s := range m.AccessModes {
dAtA[i] = 0x1a
@@ -6607,11 +6729,11 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x22
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ClaimRef.Size()))
- n134, err := m.ClaimRef.MarshalTo(dAtA[i:])
+ n139, err := m.ClaimRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n134
+ i += n139
}
dAtA[i] = 0x2a
i++
@@ -6646,11 +6768,11 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x4a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.NodeAffinity.Size()))
- n135, err := m.NodeAffinity.MarshalTo(dAtA[i:])
+ n140, err := m.NodeAffinity.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n135
+ i += n140
}
return i, nil
}
@@ -6729,27 +6851,27 @@ func (m *Pod) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
- n136, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ n141, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n136
+ i += n141
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
- n137, err := m.Spec.MarshalTo(dAtA[i:])
+ n142, err := m.Spec.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n137
+ i += n142
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
- n138, err := m.Status.MarshalTo(dAtA[i:])
+ n143, err := m.Status.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n138
+ i += n143
return i, nil
}
@@ -6814,11 +6936,11 @@ func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LabelSelector.Size()))
- n139, err := m.LabelSelector.MarshalTo(dAtA[i:])
+ n144, err := m.LabelSelector.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n139
+ i += n144
}
if len(m.Namespaces) > 0 {
for _, s := range m.Namespaces {
@@ -6964,19 +7086,19 @@ func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size()))
- n140, err := m.LastProbeTime.MarshalTo(dAtA[i:])
+ n145, err := m.LastProbeTime.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n140
+ i += n145
dAtA[i] = 0x22
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size()))
- n141, err := m.LastTransitionTime.MarshalTo(dAtA[i:])
+ n146, err := m.LastTransitionTime.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n141
+ i += n146
dAtA[i] = 0x2a
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
@@ -7163,11 +7285,11 @@ func (m *PodList) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
- n142, err := m.ListMeta.MarshalTo(dAtA[i:])
+ n147, err := m.ListMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n142
+ i += n147
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -7227,11 +7349,11 @@ func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x2a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SinceTime.Size()))
- n143, err := m.SinceTime.MarshalTo(dAtA[i:])
+ n148, err := m.SinceTime.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n143
+ i += n148
}
dAtA[i] = 0x30
i++
@@ -7301,6 +7423,28 @@ func (m *PodProxyOptions) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
+func (m *PodReadinessGate) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PodReadinessGate) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConditionType)))
+ i += copy(dAtA[i:], m.ConditionType)
+ return i, nil
+}
+
func (m *PodSecurityContext) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -7320,11 +7464,11 @@ func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size()))
- n144, err := m.SELinuxOptions.MarshalTo(dAtA[i:])
+ n149, err := m.SELinuxOptions.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n144
+ i += n149
}
if m.RunAsUser != nil {
dAtA[i] = 0x10
@@ -7358,6 +7502,28 @@ func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) {
i++
i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup))
}
+ if len(m.Sysctls) > 0 {
+ for _, msg := range m.Sysctls {
+ dAtA[i] = 0x3a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.WindowsOptions != nil {
+ dAtA[i] = 0x42
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.WindowsOptions.Size()))
+ n150, err := m.WindowsOptions.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n150
+ }
return i, nil
}
@@ -7380,11 +7546,11 @@ func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.PodController.Size()))
- n145, err := m.PodController.MarshalTo(dAtA[i:])
+ n151, err := m.PodController.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n145
+ i += n151
}
return i, nil
}
@@ -7508,11 +7674,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x72
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size()))
- n146, err := m.SecurityContext.MarshalTo(dAtA[i:])
+ n152, err := m.SecurityContext.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n146
+ i += n152
}
if len(m.ImagePullSecrets) > 0 {
for _, msg := range m.ImagePullSecrets {
@@ -7544,11 +7710,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Affinity.Size()))
- n147, err := m.Affinity.MarshalTo(dAtA[i:])
+ n153, err := m.Affinity.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n147
+ i += n153
}
dAtA[i] = 0x9a
i++
@@ -7629,11 +7795,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.DNSConfig.Size()))
- n148, err := m.DNSConfig.MarshalTo(dAtA[i:])
+ n154, err := m.DNSConfig.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n148
+ i += n154
}
if m.ShareProcessNamespace != nil {
dAtA[i] = 0xd8
@@ -7647,6 +7813,81 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) {
}
i++
}
+ if len(m.ReadinessGates) > 0 {
+ for _, msg := range m.ReadinessGates {
+ dAtA[i] = 0xe2
+ i++
+ dAtA[i] = 0x1
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.RuntimeClassName != nil {
+ dAtA[i] = 0xea
+ i++
+ dAtA[i] = 0x1
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RuntimeClassName)))
+ i += copy(dAtA[i:], *m.RuntimeClassName)
+ }
+ if m.EnableServiceLinks != nil {
+ dAtA[i] = 0xf0
+ i++
+ dAtA[i] = 0x1
+ i++
+ if *m.EnableServiceLinks {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.PreemptionPolicy != nil {
+ dAtA[i] = 0xfa
+ i++
+ dAtA[i] = 0x1
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy)))
+ i += copy(dAtA[i:], *m.PreemptionPolicy)
+ }
+ if len(m.Overhead) > 0 {
+ keysForOverhead := make([]string, 0, len(m.Overhead))
+ for k := range m.Overhead {
+ keysForOverhead = append(keysForOverhead, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForOverhead)
+ for _, k := range keysForOverhead {
+ dAtA[i] = 0x82
+ i++
+ dAtA[i] = 0x2
+ i++
+ v := m.Overhead[ResourceName(k)]
+ msgSize := 0
+ if (&v) != nil {
+ msgSize = (&v).Size()
+ msgSize += 1 + sovGenerated(uint64(msgSize))
+ }
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize
+ i = encodeVarintGenerated(dAtA, i, uint64(mapSize))
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(k)))
+ i += copy(dAtA[i:], k)
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size()))
+ n155, err := (&v).MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n155
+ }
+ }
return i, nil
}
@@ -7701,11 +7942,11 @@ func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x3a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size()))
- n149, err := m.StartTime.MarshalTo(dAtA[i:])
+ n156, err := m.StartTime.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n149
+ i += n156
}
if len(m.ContainerStatuses) > 0 {
for _, msg := range m.ContainerStatuses {
@@ -7760,19 +8001,19 @@ func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
- n150, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ n157, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n150
+ i += n157
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
- n151, err := m.Status.MarshalTo(dAtA[i:])
+ n158, err := m.Status.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n151
+ i += n158
return i, nil
}
@@ -7794,19 +8035,19 @@ func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
- n152, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ n159, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n152
+ i += n159
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size()))
- n153, err := m.Template.MarshalTo(dAtA[i:])
+ n160, err := m.Template.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n153
+ i += n160
return i, nil
}
@@ -7828,11 +8069,11 @@ func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
- n154, err := m.ListMeta.MarshalTo(dAtA[i:])
+ n161, err := m.ListMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n154
+ i += n161
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -7866,19 +8107,19 @@ func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
- n155, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ n162, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n155
+ i += n162
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
- n156, err := m.Spec.MarshalTo(dAtA[i:])
+ n163, err := m.Spec.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n156
+ i += n163
return i, nil
}
@@ -7958,19 +8199,19 @@ func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.PodSignature.Size()))
- n157, err := m.PodSignature.MarshalTo(dAtA[i:])
+ n164, err := m.PodSignature.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n157
+ i += n164
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.EvictionTime.Size()))
- n158, err := m.EvictionTime.MarshalTo(dAtA[i:])
+ n165, err := m.EvictionTime.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n158
+ i += n165
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
@@ -8003,11 +8244,11 @@ func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Preference.Size()))
- n159, err := m.Preference.MarshalTo(dAtA[i:])
+ n166, err := m.Preference.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n159
+ i += n166
return i, nil
}
@@ -8029,11 +8270,11 @@ func (m *Probe) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Handler.Size()))
- n160, err := m.Handler.MarshalTo(dAtA[i:])
+ n167, err := m.Handler.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n160
+ i += n167
dAtA[i] = 0x10
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds))
@@ -8126,6 +8367,10 @@ func (m *QuobyteVolumeSource) MarshalTo(dAtA []byte) (int, error) {
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
i += copy(dAtA[i:], m.Group)
+ dAtA[i] = 0x32
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tenant)))
+ i += copy(dAtA[i:], m.Tenant)
return i, nil
}
@@ -8183,11 +8428,11 @@ func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x3a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size()))
- n161, err := m.SecretRef.MarshalTo(dAtA[i:])
+ n168, err := m.SecretRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n161
+ i += n168
}
dAtA[i] = 0x40
i++
@@ -8254,11 +8499,11 @@ func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x3a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size()))
- n162, err := m.SecretRef.MarshalTo(dAtA[i:])
+ n169, err := m.SecretRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n162
+ i += n169
}
dAtA[i] = 0x40
i++
@@ -8289,11 +8534,11 @@ func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
- n163, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ n170, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n163
+ i += n170
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range)))
@@ -8325,27 +8570,27 @@ func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
- n164, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ n171, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n164
+ i += n171
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
- n165, err := m.Spec.MarshalTo(dAtA[i:])
+ n172, err := m.Spec.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n165
+ i += n172
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
- n166, err := m.Status.MarshalTo(dAtA[i:])
+ n173, err := m.Status.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n166
+ i += n173
return i, nil
}
@@ -8375,11 +8620,11 @@ func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size()))
- n167, err := m.LastTransitionTime.MarshalTo(dAtA[i:])
+ n174, err := m.LastTransitionTime.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n167
+ i += n174
dAtA[i] = 0x22
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
@@ -8409,11 +8654,11 @@ func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
- n168, err := m.ListMeta.MarshalTo(dAtA[i:])
+ n175, err := m.ListMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n168
+ i += n175
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -8475,11 +8720,11 @@ func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size()))
- n169, err := m.Template.MarshalTo(dAtA[i:])
+ n176, err := m.Template.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n169
+ i += n176
}
dAtA[i] = 0x20
i++
@@ -8558,11 +8803,11 @@ func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Divisor.Size()))
- n170, err := m.Divisor.MarshalTo(dAtA[i:])
+ n177, err := m.Divisor.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n170
+ i += n177
return i, nil
}
@@ -8584,27 +8829,27 @@ func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
- n171, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ n178, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n171
+ i += n178
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
- n172, err := m.Spec.MarshalTo(dAtA[i:])
+ n179, err := m.Spec.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n172
+ i += n179
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
- n173, err := m.Status.MarshalTo(dAtA[i:])
+ n180, err := m.Status.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n173
+ i += n180
return i, nil
}
@@ -8626,11 +8871,11 @@ func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
- n174, err := m.ListMeta.MarshalTo(dAtA[i:])
+ n181, err := m.ListMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n174
+ i += n181
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -8685,11 +8930,11 @@ func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64((&v).Size()))
- n175, err := (&v).MarshalTo(dAtA[i:])
+ n182, err := (&v).MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n175
+ i += n182
}
}
if len(m.Scopes) > 0 {
@@ -8707,6 +8952,16 @@ func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) {
i += copy(dAtA[i:], s)
}
}
+ if m.ScopeSelector != nil {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ScopeSelector.Size()))
+ n183, err := m.ScopeSelector.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n183
+ }
return i, nil
}
@@ -8749,11 +9004,11 @@ func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64((&v).Size()))
- n176, err := (&v).MarshalTo(dAtA[i:])
+ n184, err := (&v).MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n176
+ i += n184
}
}
if len(m.Used) > 0 {
@@ -8780,11 +9035,11 @@ func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64((&v).Size()))
- n177, err := (&v).MarshalTo(dAtA[i:])
+ n185, err := (&v).MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n177
+ i += n185
}
}
return i, nil
@@ -8829,11 +9084,11 @@ func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64((&v).Size()))
- n178, err := (&v).MarshalTo(dAtA[i:])
+ n186, err := (&v).MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n178
+ i += n186
}
}
if len(m.Requests) > 0 {
@@ -8860,11 +9115,11 @@ func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64((&v).Size()))
- n179, err := (&v).MarshalTo(dAtA[i:])
+ n187, err := (&v).MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n179
+ i += n187
}
}
return i, nil
@@ -8931,11 +9186,11 @@ func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size()))
- n180, err := m.SecretRef.MarshalTo(dAtA[i:])
+ n188, err := m.SecretRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n180
+ i += n188
}
dAtA[i] = 0x20
i++
@@ -9003,11 +9258,11 @@ func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size()))
- n181, err := m.SecretRef.MarshalTo(dAtA[i:])
+ n189, err := m.SecretRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n181
+ i += n189
}
dAtA[i] = 0x20
i++
@@ -9048,6 +9303,77 @@ func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
+func (m *ScopeSelector) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ScopeSelector) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.MatchExpressions) > 0 {
+ for _, msg := range m.MatchExpressions {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *ScopedResourceSelectorRequirement) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ScopedResourceSelectorRequirement) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ScopeName)))
+ i += copy(dAtA[i:], m.ScopeName)
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator)))
+ i += copy(dAtA[i:], m.Operator)
+ if len(m.Values) > 0 {
+ for _, s := range m.Values {
+ dAtA[i] = 0x1a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
func (m *Secret) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -9066,11 +9392,11 @@ func (m *Secret) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
- n182, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ n190, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n182
+ i += n190
if len(m.Data) > 0 {
keysForData := make([]string, 0, len(m.Data))
for k := range m.Data {
@@ -9146,11 +9472,11 @@ func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size()))
- n183, err := m.LocalObjectReference.MarshalTo(dAtA[i:])
+ n191, err := m.LocalObjectReference.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n183
+ i += n191
if m.Optional != nil {
dAtA[i] = 0x10
i++
@@ -9182,11 +9508,11 @@ func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size()))
- n184, err := m.LocalObjectReference.MarshalTo(dAtA[i:])
+ n192, err := m.LocalObjectReference.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n184
+ i += n192
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
@@ -9222,11 +9548,11 @@ func (m *SecretList) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
- n185, err := m.ListMeta.MarshalTo(dAtA[i:])
+ n193, err := m.ListMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n185
+ i += n193
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -9260,11 +9586,11 @@ func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size()))
- n186, err := m.LocalObjectReference.MarshalTo(dAtA[i:])
+ n194, err := m.LocalObjectReference.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n186
+ i += n194
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -9384,11 +9710,11 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Capabilities.Size()))
- n187, err := m.Capabilities.MarshalTo(dAtA[i:])
+ n195, err := m.Capabilities.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n187
+ i += n195
}
if m.Privileged != nil {
dAtA[i] = 0x10
@@ -9404,11 +9730,11 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size()))
- n188, err := m.SELinuxOptions.MarshalTo(dAtA[i:])
+ n196, err := m.SELinuxOptions.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n188
+ i += n196
}
if m.RunAsUser != nil {
dAtA[i] = 0x20
@@ -9450,6 +9776,22 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) {
i++
i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup))
}
+ if m.ProcMount != nil {
+ dAtA[i] = 0x4a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ProcMount)))
+ i += copy(dAtA[i:], *m.ProcMount)
+ }
+ if m.WindowsOptions != nil {
+ dAtA[i] = 0x52
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.WindowsOptions.Size()))
+ n197, err := m.WindowsOptions.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n197
+ }
return i, nil
}
@@ -9471,11 +9813,11 @@ func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Reference.Size()))
- n189, err := m.Reference.MarshalTo(dAtA[i:])
+ n198, err := m.Reference.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n189
+ i += n198
return i, nil
}
@@ -9497,27 +9839,27 @@ func (m *Service) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
- n190, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ n199, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n190
+ i += n199
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
- n191, err := m.Spec.MarshalTo(dAtA[i:])
+ n200, err := m.Spec.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n191
+ i += n200
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
- n192, err := m.Status.MarshalTo(dAtA[i:])
+ n201, err := m.Status.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n192
+ i += n201
return i, nil
}
@@ -9539,11 +9881,11 @@ func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
- n193, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ n202, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n193
+ i += n202
if len(m.Secrets) > 0 {
for _, msg := range m.Secrets {
dAtA[i] = 0x12
@@ -9599,11 +9941,11 @@ func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
- n194, err := m.ListMeta.MarshalTo(dAtA[i:])
+ n203, err := m.ListMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n194
+ i += n203
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -9619,6 +9961,37 @@ func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
+func (m *ServiceAccountTokenProjection) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ServiceAccountTokenProjection) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience)))
+ i += copy(dAtA[i:], m.Audience)
+ if m.ExpirationSeconds != nil {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds))
+ }
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+ i += copy(dAtA[i:], m.Path)
+ return i, nil
+}
+
func (m *ServiceList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -9637,11 +10010,11 @@ func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
- n195, err := m.ListMeta.MarshalTo(dAtA[i:])
+ n204, err := m.ListMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n195
+ i += n204
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -9686,11 +10059,11 @@ func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x22
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.TargetPort.Size()))
- n196, err := m.TargetPort.MarshalTo(dAtA[i:])
+ n205, err := m.TargetPort.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n196
+ i += n205
dAtA[i] = 0x28
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort))
@@ -9837,11 +10210,11 @@ func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x72
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SessionAffinityConfig.Size()))
- n197, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:])
+ n206, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n197
+ i += n206
}
return i, nil
}
@@ -9864,11 +10237,11 @@ func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size()))
- n198, err := m.LoadBalancer.MarshalTo(dAtA[i:])
+ n207, err := m.LoadBalancer.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n198
+ i += n207
return i, nil
}
@@ -9891,11 +10264,11 @@ func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ClientIP.Size()))
- n199, err := m.ClientIP.MarshalTo(dAtA[i:])
+ n208, err := m.ClientIP.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n199
+ i += n208
}
return i, nil
}
@@ -9939,11 +10312,11 @@ func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x2a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size()))
- n200, err := m.SecretRef.MarshalTo(dAtA[i:])
+ n209, err := m.SecretRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n200
+ i += n209
}
return i, nil
}
@@ -9987,11 +10360,11 @@ func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x2a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size()))
- n201, err := m.SecretRef.MarshalTo(dAtA[i:])
+ n210, err := m.SecretRef.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n201
+ i += n210
}
return i, nil
}
@@ -10040,11 +10413,11 @@ func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size()))
- n202, err := m.Port.MarshalTo(dAtA[i:])
+ n211, err := m.Port.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n202
+ i += n211
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host)))
@@ -10083,11 +10456,11 @@ func (m *Taint) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x22
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.TimeAdded.Size()))
- n203, err := m.TimeAdded.MarshalTo(dAtA[i:])
+ n212, err := m.TimeAdded.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n203
+ i += n212
}
return i, nil
}
@@ -10131,6 +10504,105 @@ func (m *Toleration) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
+func (m *TopologySelectorLabelRequirement) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TopologySelectorLabelRequirement) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ if len(m.Values) > 0 {
+ for _, s := range m.Values {
+ dAtA[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *TopologySelectorTerm) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TopologySelectorTerm) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.MatchLabelExpressions) > 0 {
+ for _, msg := range m.MatchLabelExpressions {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *TypedLocalObjectReference) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TypedLocalObjectReference) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.APIGroup != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup)))
+ i += copy(dAtA[i:], *m.APIGroup)
+ }
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+ i += copy(dAtA[i:], m.Kind)
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ return i, nil
+}
+
func (m *Volume) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -10153,11 +10625,11 @@ func (m *Volume) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.VolumeSource.Size()))
- n204, err := m.VolumeSource.MarshalTo(dAtA[i:])
+ n213, err := m.VolumeSource.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n204
+ i += n213
return i, nil
}
@@ -10228,6 +10700,10 @@ func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MountPropagation)))
i += copy(dAtA[i:], *m.MountPropagation)
}
+ dAtA[i] = 0x32
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPathExpr)))
+ i += copy(dAtA[i:], m.SubPathExpr)
return i, nil
}
@@ -10250,11 +10726,11 @@ func (m *VolumeNodeAffinity) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Required.Size()))
- n205, err := m.Required.MarshalTo(dAtA[i:])
+ n214, err := m.Required.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n205
+ i += n214
}
return i, nil
}
@@ -10278,31 +10754,41 @@ func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size()))
- n206, err := m.Secret.MarshalTo(dAtA[i:])
+ n215, err := m.Secret.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n206
+ i += n215
}
if m.DownwardAPI != nil {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size()))
- n207, err := m.DownwardAPI.MarshalTo(dAtA[i:])
+ n216, err := m.DownwardAPI.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n207
+ i += n216
}
if m.ConfigMap != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size()))
- n208, err := m.ConfigMap.MarshalTo(dAtA[i:])
+ n217, err := m.ConfigMap.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n208
+ i += n217
+ }
+ if m.ServiceAccountToken != nil {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ServiceAccountToken.Size()))
+ n218, err := m.ServiceAccountToken.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n218
}
return i, nil
}
@@ -10326,151 +10812,151 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size()))
- n209, err := m.HostPath.MarshalTo(dAtA[i:])
+ n219, err := m.HostPath.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n209
+ i += n219
}
if m.EmptyDir != nil {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size()))
- n210, err := m.EmptyDir.MarshalTo(dAtA[i:])
+ n220, err := m.EmptyDir.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n210
+ i += n220
}
if m.GCEPersistentDisk != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size()))
- n211, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:])
+ n221, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n211
+ i += n221
}
if m.AWSElasticBlockStore != nil {
dAtA[i] = 0x22
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size()))
- n212, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:])
+ n222, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n212
+ i += n222
}
if m.GitRepo != nil {
dAtA[i] = 0x2a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size()))
- n213, err := m.GitRepo.MarshalTo(dAtA[i:])
+ n223, err := m.GitRepo.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n213
+ i += n223
}
if m.Secret != nil {
dAtA[i] = 0x32
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size()))
- n214, err := m.Secret.MarshalTo(dAtA[i:])
+ n224, err := m.Secret.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n214
+ i += n224
}
if m.NFS != nil {
dAtA[i] = 0x3a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size()))
- n215, err := m.NFS.MarshalTo(dAtA[i:])
+ n225, err := m.NFS.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n215
+ i += n225
}
if m.ISCSI != nil {
dAtA[i] = 0x42
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size()))
- n216, err := m.ISCSI.MarshalTo(dAtA[i:])
+ n226, err := m.ISCSI.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n216
+ i += n226
}
if m.Glusterfs != nil {
dAtA[i] = 0x4a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size()))
- n217, err := m.Glusterfs.MarshalTo(dAtA[i:])
+ n227, err := m.Glusterfs.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n217
+ i += n227
}
if m.PersistentVolumeClaim != nil {
dAtA[i] = 0x52
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size()))
- n218, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:])
+ n228, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n218
+ i += n228
}
if m.RBD != nil {
dAtA[i] = 0x5a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size()))
- n219, err := m.RBD.MarshalTo(dAtA[i:])
+ n229, err := m.RBD.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n219
+ i += n229
}
if m.FlexVolume != nil {
dAtA[i] = 0x62
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size()))
- n220, err := m.FlexVolume.MarshalTo(dAtA[i:])
+ n230, err := m.FlexVolume.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n220
+ i += n230
}
if m.Cinder != nil {
dAtA[i] = 0x6a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size()))
- n221, err := m.Cinder.MarshalTo(dAtA[i:])
+ n231, err := m.Cinder.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n221
+ i += n231
}
if m.CephFS != nil {
dAtA[i] = 0x72
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size()))
- n222, err := m.CephFS.MarshalTo(dAtA[i:])
+ n232, err := m.CephFS.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n222
+ i += n232
}
if m.Flocker != nil {
dAtA[i] = 0x7a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size()))
- n223, err := m.Flocker.MarshalTo(dAtA[i:])
+ n233, err := m.Flocker.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n223
+ i += n233
}
if m.DownwardAPI != nil {
dAtA[i] = 0x82
@@ -10478,11 +10964,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size()))
- n224, err := m.DownwardAPI.MarshalTo(dAtA[i:])
+ n234, err := m.DownwardAPI.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n224
+ i += n234
}
if m.FC != nil {
dAtA[i] = 0x8a
@@ -10490,11 +10976,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size()))
- n225, err := m.FC.MarshalTo(dAtA[i:])
+ n235, err := m.FC.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n225
+ i += n235
}
if m.AzureFile != nil {
dAtA[i] = 0x92
@@ -10502,11 +10988,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size()))
- n226, err := m.AzureFile.MarshalTo(dAtA[i:])
+ n236, err := m.AzureFile.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n226
+ i += n236
}
if m.ConfigMap != nil {
dAtA[i] = 0x9a
@@ -10514,11 +11000,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size()))
- n227, err := m.ConfigMap.MarshalTo(dAtA[i:])
+ n237, err := m.ConfigMap.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n227
+ i += n237
}
if m.VsphereVolume != nil {
dAtA[i] = 0xa2
@@ -10526,11 +11012,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size()))
- n228, err := m.VsphereVolume.MarshalTo(dAtA[i:])
+ n238, err := m.VsphereVolume.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n228
+ i += n238
}
if m.Quobyte != nil {
dAtA[i] = 0xaa
@@ -10538,11 +11024,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size()))
- n229, err := m.Quobyte.MarshalTo(dAtA[i:])
+ n239, err := m.Quobyte.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n229
+ i += n239
}
if m.AzureDisk != nil {
dAtA[i] = 0xb2
@@ -10550,11 +11036,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size()))
- n230, err := m.AzureDisk.MarshalTo(dAtA[i:])
+ n240, err := m.AzureDisk.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n230
+ i += n240
}
if m.PhotonPersistentDisk != nil {
dAtA[i] = 0xba
@@ -10562,11 +11048,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size()))
- n231, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:])
+ n241, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n231
+ i += n241
}
if m.PortworxVolume != nil {
dAtA[i] = 0xc2
@@ -10574,11 +11060,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size()))
- n232, err := m.PortworxVolume.MarshalTo(dAtA[i:])
+ n242, err := m.PortworxVolume.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n232
+ i += n242
}
if m.ScaleIO != nil {
dAtA[i] = 0xca
@@ -10586,11 +11072,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size()))
- n233, err := m.ScaleIO.MarshalTo(dAtA[i:])
+ n243, err := m.ScaleIO.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n233
+ i += n243
}
if m.Projected != nil {
dAtA[i] = 0xd2
@@ -10598,11 +11084,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Projected.Size()))
- n234, err := m.Projected.MarshalTo(dAtA[i:])
+ n244, err := m.Projected.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n234
+ i += n244
}
if m.StorageOS != nil {
dAtA[i] = 0xda
@@ -10610,11 +11096,23 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size()))
- n235, err := m.StorageOS.MarshalTo(dAtA[i:])
+ n245, err := m.StorageOS.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n235
+ i += n245
+ }
+ if m.CSI != nil {
+ dAtA[i] = 0xe2
+ i++
+ dAtA[i] = 0x1
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size()))
+ n246, err := m.CSI.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n246
}
return i, nil
}
@@ -10674,32 +11172,44 @@ func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinityTerm.Size()))
- n236, err := m.PodAffinityTerm.MarshalTo(dAtA[i:])
+ n247, err := m.PodAffinityTerm.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n236
+ i += n247
return i, nil
}
-func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
- dAtA[offset] = uint8(v)
- dAtA[offset+1] = uint8(v >> 8)
- dAtA[offset+2] = uint8(v >> 16)
- dAtA[offset+3] = uint8(v >> 24)
- dAtA[offset+4] = uint8(v >> 32)
- dAtA[offset+5] = uint8(v >> 40)
- dAtA[offset+6] = uint8(v >> 48)
- dAtA[offset+7] = uint8(v >> 56)
- return offset + 8
-}
-func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
- dAtA[offset] = uint8(v)
- dAtA[offset+1] = uint8(v >> 8)
- dAtA[offset+2] = uint8(v >> 16)
- dAtA[offset+3] = uint8(v >> 24)
- return offset + 4
+func (m *WindowsSecurityContextOptions) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WindowsSecurityContextOptions) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.GMSACredentialSpecName != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GMSACredentialSpecName)))
+ i += copy(dAtA[i:], *m.GMSACredentialSpecName)
+ }
+ if m.GMSACredentialSpec != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GMSACredentialSpec)))
+ i += copy(dAtA[i:], *m.GMSACredentialSpec)
+ }
+ return i, nil
}
+
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -10852,6 +11362,37 @@ func (m *CSIPersistentVolumeSource) Size() (n int) {
l = m.NodePublishSecretRef.Size()
n += 1 + l + sovGenerated(uint64(l))
}
+ if m.ControllerExpandSecretRef != nil {
+ l = m.ControllerExpandSecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *CSIVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Driver)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ReadOnly != nil {
+ n += 2
+ }
+ if m.FSType != nil {
+ l = len(*m.FSType)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.VolumeAttributes) > 0 {
+ for k, v := range m.VolumeAttributes {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if m.NodePublishSecretRef != nil {
+ l = m.NodePublishSecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -10919,6 +11460,21 @@ func (m *CephFSVolumeSource) Size() (n int) {
return n
}
+func (m *CinderPersistentVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.VolumeID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.FSType)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ if m.SecretRef != nil {
+ l = m.SecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
func (m *CinderVolumeSource) Size() (n int) {
var l int
_ = l
@@ -10927,6 +11483,10 @@ func (m *CinderVolumeSource) Size() (n int) {
l = len(m.FSType)
n += 1 + l + sovGenerated(uint64(l))
n += 2
+ if m.SecretRef != nil {
+ l = m.SecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -11047,6 +11607,22 @@ func (m *ConfigMapList) Size() (n int) {
return n
}
+func (m *ConfigMapNodeConfigSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ResourceVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.KubeletConfigKey)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func (m *ConfigMapProjection) Size() (n int) {
var l int
_ = l
@@ -11273,26 +11849,6 @@ func (m *DaemonEndpoint) Size() (n int) {
return n
}
-func (m *DeleteOptions) Size() (n int) {
- var l int
- _ = l
- if m.GracePeriodSeconds != nil {
- n += 1 + sovGenerated(uint64(*m.GracePeriodSeconds))
- }
- if m.Preconditions != nil {
- l = m.Preconditions.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.OrphanDependents != nil {
- n += 2
- }
- if m.PropagationPolicy != nil {
- l = len(*m.PropagationPolicy)
- n += 1 + l + sovGenerated(uint64(l))
- }
- return n
-}
-
func (m *DownwardAPIProjection) Size() (n int) {
var l int
_ = l
@@ -11674,6 +12230,21 @@ func (m *GitRepoVolumeSource) Size() (n int) {
return n
}
+func (m *GlusterfsPersistentVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.EndpointsName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Path)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ if m.EndpointsNamespace != nil {
+ l = len(*m.EndpointsNamespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
func (m *GlusterfsVolumeSource) Size() (n int) {
var l int
_ = l
@@ -11953,23 +12524,6 @@ func (m *List) Size() (n int) {
return n
}
-func (m *ListOptions) Size() (n int) {
- var l int
- _ = l
- l = len(m.LabelSelector)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.FieldSelector)
- n += 1 + l + sovGenerated(uint64(l))
- n += 2
- l = len(m.ResourceVersion)
- n += 1 + l + sovGenerated(uint64(l))
- if m.TimeoutSeconds != nil {
- n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
- }
- n += 2
- return n
-}
-
func (m *LoadBalancerIngress) Size() (n int) {
var l int
_ = l
@@ -12005,6 +12559,10 @@ func (m *LocalVolumeSource) Size() (n int) {
_ = l
l = len(m.Path)
n += 1 + l + sovGenerated(uint64(l))
+ if m.FSType != nil {
+ l = len(*m.FSType)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -12124,13 +12682,33 @@ func (m *NodeCondition) Size() (n int) {
func (m *NodeConfigSource) Size() (n int) {
var l int
_ = l
- if m.ConfigMapRef != nil {
- l = m.ConfigMapRef.Size()
+ if m.ConfigMap != nil {
+ l = m.ConfigMap.Size()
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
+func (m *NodeConfigStatus) Size() (n int) {
+ var l int
+ _ = l
+ if m.Assigned != nil {
+ l = m.Assigned.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Active != nil {
+ l = m.Active.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.LastKnownGood != nil {
+ l = m.LastKnownGood.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Error)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func (m *NodeDaemonEndpoints) Size() (n int) {
var l int
_ = l
@@ -12213,6 +12791,12 @@ func (m *NodeSelectorTerm) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
+ if len(m.MatchFields) > 0 {
+ for _, e := range m.MatchFields {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
return n
}
@@ -12221,7 +12805,7 @@ func (m *NodeSpec) Size() (n int) {
_ = l
l = len(m.PodCIDR)
n += 1 + l + sovGenerated(uint64(l))
- l = len(m.ExternalID)
+ l = len(m.DoNotUse_ExternalID)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.ProviderID)
n += 1 + l + sovGenerated(uint64(l))
@@ -12296,6 +12880,10 @@ func (m *NodeStatus) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
+ if m.Config != nil {
+ l = m.Config.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -12335,68 +12923,6 @@ func (m *ObjectFieldSelector) Size() (n int) {
return n
}
-func (m *ObjectMeta) Size() (n int) {
- var l int
- _ = l
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.GenerateName)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Namespace)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.SelfLink)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.UID)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.ResourceVersion)
- n += 1 + l + sovGenerated(uint64(l))
- n += 1 + sovGenerated(uint64(m.Generation))
- l = m.CreationTimestamp.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if m.DeletionTimestamp != nil {
- l = m.DeletionTimestamp.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.DeletionGracePeriodSeconds != nil {
- n += 1 + sovGenerated(uint64(*m.DeletionGracePeriodSeconds))
- }
- if len(m.Labels) > 0 {
- for k, v := range m.Labels {
- _ = k
- _ = v
- mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
- n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
- }
- }
- if len(m.Annotations) > 0 {
- for k, v := range m.Annotations {
- _ = k
- _ = v
- mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
- n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
- }
- }
- if len(m.OwnerReferences) > 0 {
- for _, e := range m.OwnerReferences {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- if len(m.Finalizers) > 0 {
- for _, s := range m.Finalizers {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- l = len(m.ClusterName)
- n += 1 + l + sovGenerated(uint64(l))
- if m.Initializers != nil {
- l = m.Initializers.Size()
- n += 2 + l + sovGenerated(uint64(l))
- }
- return n
-}
-
func (m *ObjectReference) Size() (n int) {
var l int
_ = l
@@ -12498,6 +13024,10 @@ func (m *PersistentVolumeClaimSpec) Size() (n int) {
l = len(*m.VolumeMode)
n += 1 + l + sovGenerated(uint64(l))
}
+ if m.DataSource != nil {
+ l = m.DataSource.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -12921,6 +13451,14 @@ func (m *PodProxyOptions) Size() (n int) {
return n
}
+func (m *PodReadinessGate) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.ConditionType)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func (m *PodSecurityContext) Size() (n int) {
var l int
_ = l
@@ -12945,6 +13483,16 @@ func (m *PodSecurityContext) Size() (n int) {
if m.RunAsGroup != nil {
n += 1 + sovGenerated(uint64(*m.RunAsGroup))
}
+ if len(m.Sysctls) > 0 {
+ for _, e := range m.Sysctls {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.WindowsOptions != nil {
+ l = m.WindowsOptions.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -13053,6 +13601,32 @@ func (m *PodSpec) Size() (n int) {
if m.ShareProcessNamespace != nil {
n += 3
}
+ if len(m.ReadinessGates) > 0 {
+ for _, e := range m.ReadinessGates {
+ l = e.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.RuntimeClassName != nil {
+ l = len(*m.RuntimeClassName)
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ if m.EnableServiceLinks != nil {
+ n += 3
+ }
+ if m.PreemptionPolicy != nil {
+ l = len(*m.PreemptionPolicy)
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Overhead) > 0 {
+ for k, v := range m.Overhead {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
return n
}
@@ -13226,6 +13800,8 @@ func (m *QuobyteVolumeSource) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Group)
n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Tenant)
+ n += 1 + l + sovGenerated(uint64(l))
return n
}
@@ -13434,6 +14010,10 @@ func (m *ResourceQuotaSpec) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
+ if m.ScopeSelector != nil {
+ l = m.ScopeSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -13551,6 +14131,34 @@ func (m *ScaleIOVolumeSource) Size() (n int) {
return n
}
+func (m *ScopeSelector) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.MatchExpressions) > 0 {
+ for _, e := range m.MatchExpressions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ScopedResourceSelectorRequirement) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.ScopeName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Operator)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Values) > 0 {
+ for _, s := range m.Values {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
func (m *Secret) Size() (n int) {
var l int
_ = l
@@ -13695,6 +14303,14 @@ func (m *SecurityContext) Size() (n int) {
if m.RunAsGroup != nil {
n += 1 + sovGenerated(uint64(*m.RunAsGroup))
}
+ if m.ProcMount != nil {
+ l = len(*m.ProcMount)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.WindowsOptions != nil {
+ l = m.WindowsOptions.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -13755,6 +14371,19 @@ func (m *ServiceAccountList) Size() (n int) {
return n
}
+func (m *ServiceAccountTokenProjection) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Audience)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ExpirationSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.ExpirationSeconds))
+ }
+ l = len(m.Path)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func (m *ServiceList) Size() (n int) {
var l int
_ = l
@@ -13946,6 +14575,46 @@ func (m *Toleration) Size() (n int) {
return n
}
+func (m *TopologySelectorLabelRequirement) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Values) > 0 {
+ for _, s := range m.Values {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *TopologySelectorTerm) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.MatchLabelExpressions) > 0 {
+ for _, e := range m.MatchLabelExpressions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *TypedLocalObjectReference) Size() (n int) {
+ var l int
+ _ = l
+ if m.APIGroup != nil {
+ l = len(*m.APIGroup)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func (m *Volume) Size() (n int) {
var l int
_ = l
@@ -13980,6 +14649,8 @@ func (m *VolumeMount) Size() (n int) {
l = len(*m.MountPropagation)
n += 1 + l + sovGenerated(uint64(l))
}
+ l = len(m.SubPathExpr)
+ n += 1 + l + sovGenerated(uint64(l))
return n
}
@@ -14008,6 +14679,10 @@ func (m *VolumeProjection) Size() (n int) {
l = m.ConfigMap.Size()
n += 1 + l + sovGenerated(uint64(l))
}
+ if m.ServiceAccountToken != nil {
+ l = m.ServiceAccountToken.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -14122,6 +14797,10 @@ func (m *VolumeSource) Size() (n int) {
l = m.StorageOS.Size()
n += 2 + l + sovGenerated(uint64(l))
}
+ if m.CSI != nil {
+ l = m.CSI.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -14148,6 +14827,20 @@ func (m *WeightedPodAffinityTerm) Size() (n int) {
return n
}
+func (m *WindowsSecurityContextOptions) Size() (n int) {
+ var l int
+ _ = l
+ if m.GMSACredentialSpecName != nil {
+ l = len(*m.GMSACredentialSpecName)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.GMSACredentialSpec != nil {
+ l = len(*m.GMSACredentialSpec)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
func sovGenerated(x uint64) (n int) {
for {
n++
@@ -14281,6 +14974,31 @@ func (this *CSIPersistentVolumeSource) String() string {
`ControllerPublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.ControllerPublishSecretRef), "SecretReference", "SecretReference", 1) + `,`,
`NodeStageSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodeStageSecretRef), "SecretReference", "SecretReference", 1) + `,`,
`NodePublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodePublishSecretRef), "SecretReference", "SecretReference", 1) + `,`,
+ `ControllerExpandSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.ControllerExpandSecretRef), "SecretReference", "SecretReference", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CSIVolumeSource) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes))
+ for k := range this.VolumeAttributes {
+ keysForVolumeAttributes = append(keysForVolumeAttributes, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes)
+ mapStringForVolumeAttributes := "map[string]string{"
+ for _, k := range keysForVolumeAttributes {
+ mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k])
+ }
+ mapStringForVolumeAttributes += "}"
+ s := strings.Join([]string{`&CSIVolumeSource{`,
+ `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
+ `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`,
+ `FSType:` + valueToStringGenerated(this.FSType) + `,`,
+ `VolumeAttributes:` + mapStringForVolumeAttributes + `,`,
+ `NodePublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodePublishSecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`,
`}`,
}, "")
return s
@@ -14326,6 +15044,19 @@ func (this *CephFSVolumeSource) String() string {
}, "")
return s
}
+func (this *CinderPersistentVolumeSource) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CinderPersistentVolumeSource{`,
+ `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`,
+ `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+ `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+ `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *CinderVolumeSource) String() string {
if this == nil {
return "nil"
@@ -14334,6 +15065,7 @@ func (this *CinderVolumeSource) String() string {
`VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`,
`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+ `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`,
`}`,
}, "")
return s
@@ -14449,6 +15181,20 @@ func (this *ConfigMapList) String() string {
}, "")
return s
}
+func (this *ConfigMapNodeConfigSource) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ConfigMapNodeConfigSource{`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+ `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
+ `KubeletConfigKey:` + fmt.Sprintf("%v", this.KubeletConfigKey) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *ConfigMapProjection) String() string {
if this == nil {
return "nil"
@@ -14605,19 +15351,6 @@ func (this *DaemonEndpoint) String() string {
}, "")
return s
}
-func (this *DeleteOptions) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&DeleteOptions{`,
- `GracePeriodSeconds:` + valueToStringGenerated(this.GracePeriodSeconds) + `,`,
- `Preconditions:` + strings.Replace(fmt.Sprintf("%v", this.Preconditions), "Preconditions", "Preconditions", 1) + `,`,
- `OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`,
- `PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`,
- `}`,
- }, "")
- return s
-}
func (this *DownwardAPIProjection) String() string {
if this == nil {
return "nil"
@@ -14925,6 +15658,19 @@ func (this *GitRepoVolumeSource) String() string {
}, "")
return s
}
+func (this *GlusterfsPersistentVolumeSource) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GlusterfsPersistentVolumeSource{`,
+ `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`,
+ `Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+ `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+ `EndpointsNamespace:` + valueToStringGenerated(this.EndpointsNamespace) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *GlusterfsVolumeSource) String() string {
if this == nil {
return "nil"
@@ -15167,21 +15913,6 @@ func (this *List) String() string {
}, "")
return s
}
-func (this *ListOptions) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&ListOptions{`,
- `LabelSelector:` + fmt.Sprintf("%v", this.LabelSelector) + `,`,
- `FieldSelector:` + fmt.Sprintf("%v", this.FieldSelector) + `,`,
- `Watch:` + fmt.Sprintf("%v", this.Watch) + `,`,
- `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
- `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
- `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`,
- `}`,
- }, "")
- return s
-}
func (this *LoadBalancerIngress) String() string {
if this == nil {
return "nil"
@@ -15219,6 +15950,7 @@ func (this *LocalVolumeSource) String() string {
}
s := strings.Join([]string{`&LocalVolumeSource{`,
`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+ `FSType:` + valueToStringGenerated(this.FSType) + `,`,
`}`,
}, "")
return s
@@ -15332,7 +16064,20 @@ func (this *NodeConfigSource) String() string {
return "nil"
}
s := strings.Join([]string{`&NodeConfigSource{`,
- `ConfigMapRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapRef), "ObjectReference", "ObjectReference", 1) + `,`,
+ `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapNodeConfigSource", "ConfigMapNodeConfigSource", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NodeConfigStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NodeConfigStatus{`,
+ `Assigned:` + strings.Replace(fmt.Sprintf("%v", this.Assigned), "NodeConfigSource", "NodeConfigSource", 1) + `,`,
+ `Active:` + strings.Replace(fmt.Sprintf("%v", this.Active), "NodeConfigSource", "NodeConfigSource", 1) + `,`,
+ `LastKnownGood:` + strings.Replace(fmt.Sprintf("%v", this.LastKnownGood), "NodeConfigSource", "NodeConfigSource", 1) + `,`,
+ `Error:` + fmt.Sprintf("%v", this.Error) + `,`,
`}`,
}, "")
return s
@@ -15416,6 +16161,7 @@ func (this *NodeSelectorTerm) String() string {
}
s := strings.Join([]string{`&NodeSelectorTerm{`,
`MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + `,`,
+ `MatchFields:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchFields), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
@@ -15426,7 +16172,7 @@ func (this *NodeSpec) String() string {
}
s := strings.Join([]string{`&NodeSpec{`,
`PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`,
- `ExternalID:` + fmt.Sprintf("%v", this.ExternalID) + `,`,
+ `DoNotUse_ExternalID:` + fmt.Sprintf("%v", this.DoNotUse_ExternalID) + `,`,
`ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`,
`Unschedulable:` + fmt.Sprintf("%v", this.Unschedulable) + `,`,
`Taints:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Taints), "Taint", "Taint", 1), `&`, ``, 1) + `,`,
@@ -15470,6 +16216,7 @@ func (this *NodeStatus) String() string {
`Images:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Images), "ContainerImage", "ContainerImage", 1), `&`, ``, 1) + `,`,
`VolumesInUse:` + fmt.Sprintf("%v", this.VolumesInUse) + `,`,
`VolumesAttached:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumesAttached), "AttachedVolume", "AttachedVolume", 1), `&`, ``, 1) + `,`,
+ `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`,
`}`,
}, "")
return s
@@ -15504,51 +16251,6 @@ func (this *ObjectFieldSelector) String() string {
}, "")
return s
}
-func (this *ObjectMeta) String() string {
- if this == nil {
- return "nil"
- }
- keysForLabels := make([]string, 0, len(this.Labels))
- for k := range this.Labels {
- keysForLabels = append(keysForLabels, k)
- }
- github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
- mapStringForLabels := "map[string]string{"
- for _, k := range keysForLabels {
- mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
- }
- mapStringForLabels += "}"
- keysForAnnotations := make([]string, 0, len(this.Annotations))
- for k := range this.Annotations {
- keysForAnnotations = append(keysForAnnotations, k)
- }
- github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
- mapStringForAnnotations := "map[string]string{"
- for _, k := range keysForAnnotations {
- mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
- }
- mapStringForAnnotations += "}"
- s := strings.Join([]string{`&ObjectMeta{`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `GenerateName:` + fmt.Sprintf("%v", this.GenerateName) + `,`,
- `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
- `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`,
- `UID:` + fmt.Sprintf("%v", this.UID) + `,`,
- `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
- `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`,
- `CreationTimestamp:` + strings.Replace(strings.Replace(this.CreationTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`,
- `DeletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.DeletionTimestamp), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`,
- `DeletionGracePeriodSeconds:` + valueToStringGenerated(this.DeletionGracePeriodSeconds) + `,`,
- `Labels:` + mapStringForLabels + `,`,
- `Annotations:` + mapStringForAnnotations + `,`,
- `OwnerReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OwnerReferences), "OwnerReference", "k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference", 1), `&`, ``, 1) + `,`,
- `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`,
- `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`,
- `Initializers:` + strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializers", "k8s_io_apimachinery_pkg_apis_meta_v1.Initializers", 1) + `,`,
- `}`,
- }, "")
- return s
-}
func (this *ObjectReference) String() string {
if this == nil {
return "nil"
@@ -15626,6 +16328,7 @@ func (this *PersistentVolumeClaimSpec) String() string {
`Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`,
`StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`,
`VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`,
+ `DataSource:` + strings.Replace(fmt.Sprintf("%v", this.DataSource), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`,
`}`,
}, "")
return s
@@ -15683,11 +16386,11 @@ func (this *PersistentVolumeSource) String() string {
`GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`,
`AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`,
`HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`,
- `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`,
+ `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsPersistentVolumeSource", "GlusterfsPersistentVolumeSource", 1) + `,`,
`NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`,
`RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`,
`ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`,
- `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`,
+ `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderPersistentVolumeSource", "CinderPersistentVolumeSource", 1) + `,`,
`CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSPersistentVolumeSource", "CephFSPersistentVolumeSource", 1) + `,`,
`FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`,
`Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`,
@@ -15918,6 +16621,16 @@ func (this *PodProxyOptions) String() string {
}, "")
return s
}
+func (this *PodReadinessGate) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&PodReadinessGate{`,
+ `ConditionType:` + fmt.Sprintf("%v", this.ConditionType) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *PodSecurityContext) String() string {
if this == nil {
return "nil"
@@ -15929,6 +16642,8 @@ func (this *PodSecurityContext) String() string {
`SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`,
`FSGroup:` + valueToStringGenerated(this.FSGroup) + `,`,
`RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`,
+ `Sysctls:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Sysctls), "Sysctl", "Sysctl", 1), `&`, ``, 1) + `,`,
+ `WindowsOptions:` + strings.Replace(fmt.Sprintf("%v", this.WindowsOptions), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`,
`}`,
}, "")
return s
@@ -15957,6 +16672,16 @@ func (this *PodSpec) String() string {
mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k])
}
mapStringForNodeSelector += "}"
+ keysForOverhead := make([]string, 0, len(this.Overhead))
+ for k := range this.Overhead {
+ keysForOverhead = append(keysForOverhead, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForOverhead)
+ mapStringForOverhead := "ResourceList{"
+ for _, k := range keysForOverhead {
+ mapStringForOverhead += fmt.Sprintf("%v: %v,", k, this.Overhead[ResourceName(k)])
+ }
+ mapStringForOverhead += "}"
s := strings.Join([]string{`&PodSpec{`,
`Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "Volume", 1), `&`, ``, 1) + `,`,
`Containers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Containers), "Container", "Container", 1), `&`, ``, 1) + `,`,
@@ -15985,6 +16710,11 @@ func (this *PodSpec) String() string {
`Priority:` + valueToStringGenerated(this.Priority) + `,`,
`DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "PodDNSConfig", "PodDNSConfig", 1) + `,`,
`ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`,
+ `ReadinessGates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ReadinessGates), "PodReadinessGate", "PodReadinessGate", 1), `&`, ``, 1) + `,`,
+ `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`,
+ `EnableServiceLinks:` + valueToStringGenerated(this.EnableServiceLinks) + `,`,
+ `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`,
+ `Overhead:` + mapStringForOverhead + `,`,
`}`,
}, "")
return s
@@ -16135,6 +16865,7 @@ func (this *QuobyteVolumeSource) String() string {
`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
`User:` + fmt.Sprintf("%v", this.User) + `,`,
`Group:` + fmt.Sprintf("%v", this.Group) + `,`,
+ `Tenant:` + fmt.Sprintf("%v", this.Tenant) + `,`,
`}`,
}, "")
return s
@@ -16312,6 +17043,7 @@ func (this *ResourceQuotaSpec) String() string {
s := strings.Join([]string{`&ResourceQuotaSpec{`,
`Hard:` + mapStringForHard + `,`,
`Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`,
+ `ScopeSelector:` + strings.Replace(fmt.Sprintf("%v", this.ScopeSelector), "ScopeSelector", "ScopeSelector", 1) + `,`,
`}`,
}, "")
return s
@@ -16429,6 +17161,28 @@ func (this *ScaleIOVolumeSource) String() string {
}, "")
return s
}
+func (this *ScopeSelector) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ScopeSelector{`,
+ `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "ScopedResourceSelectorRequirement", "ScopedResourceSelectorRequirement", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ScopedResourceSelectorRequirement) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ScopedResourceSelectorRequirement{`,
+ `ScopeName:` + fmt.Sprintf("%v", this.ScopeName) + `,`,
+ `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`,
+ `Values:` + fmt.Sprintf("%v", this.Values) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *Secret) String() string {
if this == nil {
return "nil"
@@ -16545,6 +17299,8 @@ func (this *SecurityContext) String() string {
`ReadOnlyRootFilesystem:` + valueToStringGenerated(this.ReadOnlyRootFilesystem) + `,`,
`AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`,
`RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`,
+ `ProcMount:` + valueToStringGenerated(this.ProcMount) + `,`,
+ `WindowsOptions:` + strings.Replace(fmt.Sprintf("%v", this.WindowsOptions), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`,
`}`,
}, "")
return s
@@ -16595,6 +17351,18 @@ func (this *ServiceAccountList) String() string {
}, "")
return s
}
+func (this *ServiceAccountTokenProjection) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ServiceAccountTokenProjection{`,
+ `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`,
+ `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`,
+ `Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *ServiceList) String() string {
if this == nil {
return "nil"
@@ -16759,6 +17527,39 @@ func (this *Toleration) String() string {
}, "")
return s
}
+func (this *TopologySelectorLabelRequirement) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TopologySelectorLabelRequirement{`,
+ `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+ `Values:` + fmt.Sprintf("%v", this.Values) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TopologySelectorTerm) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TopologySelectorTerm{`,
+ `MatchLabelExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchLabelExpressions), "TopologySelectorLabelRequirement", "TopologySelectorLabelRequirement", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TypedLocalObjectReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TypedLocalObjectReference{`,
+ `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`,
+ `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *Volume) String() string {
if this == nil {
return "nil"
@@ -16791,6 +17592,7 @@ func (this *VolumeMount) String() string {
`MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`,
`SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`,
`MountPropagation:` + valueToStringGenerated(this.MountPropagation) + `,`,
+ `SubPathExpr:` + fmt.Sprintf("%v", this.SubPathExpr) + `,`,
`}`,
}, "")
return s
@@ -16813,6 +17615,7 @@ func (this *VolumeProjection) String() string {
`Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretProjection", "SecretProjection", 1) + `,`,
`DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`,
`ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`,
+ `ServiceAccountToken:` + strings.Replace(fmt.Sprintf("%v", this.ServiceAccountToken), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`,
`}`,
}, "")
return s
@@ -16849,6 +17652,7 @@ func (this *VolumeSource) String() string {
`ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`,
`Projected:` + strings.Replace(fmt.Sprintf("%v", this.Projected), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`,
`StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`,
+ `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIVolumeSource", "CSIVolumeSource", 1) + `,`,
`}`,
}, "")
return s
@@ -16877,6 +17681,17 @@ func (this *WeightedPodAffinityTerm) String() string {
}, "")
return s
}
+func (this *WindowsSecurityContextOptions) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&WindowsSecurityContextOptions{`,
+ `GMSACredentialSpecName:` + valueToStringGenerated(this.GMSACredentialSpecName) + `,`,
+ `GMSACredentialSpec:` + valueToStringGenerated(this.GMSACredentialSpec) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
@@ -18147,51 +18962,14 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.VolumeAttributes == nil {
m.VolumeAttributes = make(map[string]string)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -18201,41 +18979,80 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- m.VolumeAttributes[mapkey] = mapvalue
- } else {
- var mapvalue string
- m.VolumeAttributes[mapkey] = mapvalue
}
+ m.VolumeAttributes[mapkey] = mapvalue
iNdEx = postIndex
case 6:
if wireType != 2 {
@@ -18336,6 +19153,320 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ControllerExpandSecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ControllerExpandSecretRef == nil {
+ m.ControllerExpandSecretRef = &SecretReference{}
+ }
+ if err := m.ControllerExpandSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CSIVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Driver = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.ReadOnly = &b
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.FSType = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.VolumeAttributes == nil {
+ m.VolumeAttributes = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.VolumeAttributes[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NodePublishSecretRef == nil {
+ m.NodePublishSecretRef = &LocalObjectReference{}
+ }
+ if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -18903,6 +20034,167 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CinderPersistentVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CinderPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumeID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FSType = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ReadOnly = bool(v != 0)
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SecretRef == nil {
+ m.SecretRef = &SecretReference{}
+ }
+ if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -19010,6 +20302,39 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error {
}
}
m.ReadOnly = bool(v != 0)
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SecretRef == nil {
+ m.SecretRef = &LocalObjectReference{}
+ }
+ if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -19574,51 +20899,14 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Data == nil {
m.Data = make(map[string]string)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -19628,41 +20916,80 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- m.Data[mapkey] = mapvalue
- } else {
- var mapvalue string
- m.Data[mapkey] = mapvalue
}
+ m.Data[mapkey] = mapvalue
iNdEx = postIndex
case 3:
if wireType != 2 {
@@ -19690,51 +21017,14 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.BinaryData == nil {
m.BinaryData = make(map[string][]byte)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey string
+ mapvalue := []byte{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -19744,42 +21034,81 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var mapbyteLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- mapbyteLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapbyteLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapbyteLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ intMapbyteLen := int(mapbyteLen)
+ if intMapbyteLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postbytesIndex := iNdEx + intMapbyteLen
+ if postbytesIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = make([]byte, mapbyteLen)
+ copy(mapvalue, dAtA[iNdEx:postbytesIndex])
+ iNdEx = postbytesIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- intMapbyteLen := int(mapbyteLen)
- if intMapbyteLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postbytesIndex := iNdEx + intMapbyteLen
- if postbytesIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := make([]byte, mapbyteLen)
- copy(mapvalue, dAtA[iNdEx:postbytesIndex])
- iNdEx = postbytesIndex
- m.BinaryData[mapkey] = mapvalue
- } else {
- var mapvalue []byte
- m.BinaryData[mapkey] = mapvalue
}
+ m.BinaryData[mapkey] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -20144,6 +21473,201 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConfigMapNodeConfigSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConfigMapNodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ResourceVersion = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubeletConfigKey", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.KubeletConfigKey = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -22266,160 +23790,6 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeleteOptions) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeleteOptions: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeleteOptions: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field GracePeriodSeconds", wireType)
- }
- var v int64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= (int64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.GracePeriodSeconds = &v
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Preconditions", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Preconditions == nil {
- m.Preconditions = &Preconditions{}
- }
- if err := m.Preconditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field OrphanDependents", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- b := bool(v != 0)
- m.OrphanDependents = &b
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PropagationPolicy", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- s := DeletionPropagation(dAtA[iNdEx:postIndex])
- m.PropagationPolicy = &s
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -25268,51 +26638,14 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Options == nil {
m.Options = make(map[string]string)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -25322,41 +26655,80 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- m.Options[mapkey] = mapvalue
- } else {
- var mapvalue string
- m.Options[mapkey] = mapvalue
}
+ m.Options[mapkey] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -25545,51 +26917,14 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Options == nil {
m.Options = make(map[string]string)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -25599,41 +26934,80 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- m.Options[mapkey] = mapvalue
- } else {
- var mapvalue string
- m.Options[mapkey] = mapvalue
}
+ m.Options[mapkey] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -26048,6 +27422,164 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GlusterfsPersistentVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GlusterfsPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EndpointsName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.EndpointsName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ReadOnly = bool(v != 0)
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EndpointsNamespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.EndpointsNamespace = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -27960,51 +29492,14 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Max == nil {
m.Max = make(ResourceList)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey ResourceName
+ mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -28014,46 +29509,85 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- m.Max[ResourceName(mapkey)] = *mapvalue
- } else {
- var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity
- m.Max[ResourceName(mapkey)] = mapvalue
}
+ m.Max[ResourceName(mapkey)] = *mapvalue
iNdEx = postIndex
case 3:
if wireType != 2 {
@@ -28081,51 +29615,14 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Min == nil {
m.Min = make(ResourceList)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey ResourceName
+ mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -28135,46 +29632,85 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- m.Min[ResourceName(mapkey)] = *mapvalue
- } else {
- var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity
- m.Min[ResourceName(mapkey)] = mapvalue
}
+ m.Min[ResourceName(mapkey)] = *mapvalue
iNdEx = postIndex
case 4:
if wireType != 2 {
@@ -28202,51 +29738,14 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Default == nil {
m.Default = make(ResourceList)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey ResourceName
+ mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -28256,46 +29755,85 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- m.Default[ResourceName(mapkey)] = *mapvalue
- } else {
- var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity
- m.Default[ResourceName(mapkey)] = mapvalue
}
+ m.Default[ResourceName(mapkey)] = *mapvalue
iNdEx = postIndex
case 5:
if wireType != 2 {
@@ -28323,51 +29861,14 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.DefaultRequest == nil {
m.DefaultRequest = make(ResourceList)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey ResourceName
+ mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -28377,46 +29878,85 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- m.DefaultRequest[ResourceName(mapkey)] = *mapvalue
- } else {
- var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity
- m.DefaultRequest[ResourceName(mapkey)] = mapvalue
}
+ m.DefaultRequest[ResourceName(mapkey)] = *mapvalue
iNdEx = postIndex
case 6:
if wireType != 2 {
@@ -28444,51 +29984,14 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.MaxLimitRequestRatio == nil {
m.MaxLimitRequestRatio = make(ResourceList)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey ResourceName
+ mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -28498,46 +30001,85 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue
- } else {
- var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity
- m.MaxLimitRequestRatio[ResourceName(mapkey)] = mapvalue
}
+ m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -28863,203 +30405,6 @@ func (m *List) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *ListOptions) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ListOptions: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.LabelSelector = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.FieldSelector = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Watch = bool(v != 0)
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ResourceVersion = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType)
- }
- var v int64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= (int64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.TimeoutSeconds = &v
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.IncludeUninitialized = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -29386,6 +30731,36 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error {
}
m.Path = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.FSType = &s
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -30561,9 +31936,92 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error {
return fmt.Errorf("proto: NodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConfigMap == nil {
+ m.ConfigMap = &ConfigMapNodeConfigSource{}
+ }
+ if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NodeConfigStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NodeConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Assigned", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -30587,13 +32045,108 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.ConfigMapRef == nil {
- m.ConfigMapRef = &ObjectReference{}
+ if m.Assigned == nil {
+ m.Assigned = &NodeConfigSource{}
}
- if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Assigned.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Active == nil {
+ m.Active = &NodeConfigSource{}
+ }
+ if err := m.Active.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastKnownGood", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LastKnownGood == nil {
+ m.LastKnownGood = &NodeConfigSource{}
+ }
+ if err := m.LastKnownGood.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Error = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -30940,51 +32493,14 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Capacity == nil {
m.Capacity = make(ResourceList)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey ResourceName
+ mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -30994,46 +32510,85 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- m.Capacity[ResourceName(mapkey)] = *mapvalue
- } else {
- var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity
- m.Capacity[ResourceName(mapkey)] = mapvalue
}
+ m.Capacity[ResourceName(mapkey)] = *mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -31334,6 +32889,37 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchFields", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MatchFields = append(m.MatchFields, NodeSelectorRequirement{})
+ if err := m.MatchFields[len(m.MatchFields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -31415,7 +33001,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error {
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ExternalID", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field DoNotUse_ExternalID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -31440,7 +33026,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.ExternalID = string(dAtA[iNdEx:postIndex])
+ m.DoNotUse_ExternalID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
@@ -31631,51 +33217,14 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Capacity == nil {
m.Capacity = make(ResourceList)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey ResourceName
+ mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -31685,46 +33234,85 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- m.Capacity[ResourceName(mapkey)] = *mapvalue
- } else {
- var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity
- m.Capacity[ResourceName(mapkey)] = mapvalue
}
+ m.Capacity[ResourceName(mapkey)] = *mapvalue
iNdEx = postIndex
case 2:
if wireType != 2 {
@@ -31752,51 +33340,14 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Allocatable == nil {
m.Allocatable = make(ResourceList)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey ResourceName
+ mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -31806,46 +33357,85 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- m.Allocatable[ResourceName(mapkey)] = *mapvalue
- } else {
- var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity
- m.Allocatable[ResourceName(mapkey)] = mapvalue
}
+ m.Allocatable[ResourceName(mapkey)] = *mapvalue
iNdEx = postIndex
case 3:
if wireType != 2 {
@@ -32089,6 +33679,39 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Config == nil {
+ m.Config = &NodeConfigStatus{}
+ }
+ if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -32558,686 +34181,6 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *ObjectMeta) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.GenerateName = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Namespace = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SelfLink = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ResourceVersion = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType)
- }
- m.Generation = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Generation |= (int64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 8:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CreationTimestamp", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.CreationTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 9:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeletionTimestamp", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.DeletionTimestamp == nil {
- m.DeletionTimestamp = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{}
- }
- if err := m.DeletionTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeletionGracePeriodSeconds", wireType)
- }
- var v int64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= (int64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.DeletionGracePeriodSeconds = &v
- case 11:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
- if m.Labels == nil {
- m.Labels = make(map[string]string)
- }
- if iNdEx < postIndex {
- var valuekey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- m.Labels[mapkey] = mapvalue
- } else {
- var mapvalue string
- m.Labels[mapkey] = mapvalue
- }
- iNdEx = postIndex
- case 12:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
- if m.Annotations == nil {
- m.Annotations = make(map[string]string)
- }
- if iNdEx < postIndex {
- var valuekey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- m.Annotations[mapkey] = mapvalue
- } else {
- var mapvalue string
- m.Annotations[mapkey] = mapvalue
- }
- iNdEx = postIndex
- case 13:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.OwnerReferences = append(m.OwnerReferences, k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference{})
- if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 14:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Finalizers = append(m.Finalizers, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 15:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ClusterName = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 16:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Initializers", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Initializers == nil {
- m.Initializers = &k8s_io_apimachinery_pkg_apis_meta_v1.Initializers{}
- }
- if err := m.Initializers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
func (m *ObjectReference) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -34318,6 +35261,39 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error {
s := PersistentVolumeMode(dAtA[iNdEx:postIndex])
m.VolumeMode = &s
iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataSource", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DataSource == nil {
+ m.DataSource = &TypedLocalObjectReference{}
+ }
+ if err := m.DataSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -34452,51 +35428,14 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Capacity == nil {
m.Capacity = make(ResourceList)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey ResourceName
+ mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -34506,46 +35445,85 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- m.Capacity[ResourceName(mapkey)] = *mapvalue
- } else {
- var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity
- m.Capacity[ResourceName(mapkey)] = mapvalue
}
+ m.Capacity[ResourceName(mapkey)] = *mapvalue
iNdEx = postIndex
case 4:
if wireType != 2 {
@@ -34964,7 +35942,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error {
return io.ErrUnexpectedEOF
}
if m.Glusterfs == nil {
- m.Glusterfs = &GlusterfsVolumeSource{}
+ m.Glusterfs = &GlusterfsPersistentVolumeSource{}
}
if err := m.Glusterfs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
@@ -35096,7 +36074,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error {
return io.ErrUnexpectedEOF
}
if m.Cinder == nil {
- m.Cinder = &CinderVolumeSource{}
+ m.Cinder = &CinderPersistentVolumeSource{}
}
if err := m.Cinder.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
@@ -35640,51 +36618,14 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Capacity == nil {
m.Capacity = make(ResourceList)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey ResourceName
+ mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -35694,46 +36635,85 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- m.Capacity[ResourceName(mapkey)] = *mapvalue
- } else {
- var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity
- m.Capacity[ResourceName(mapkey)] = mapvalue
}
+ m.Capacity[ResourceName(mapkey)] = *mapvalue
iNdEx = postIndex
case 2:
if wireType != 2 {
@@ -38103,6 +39083,85 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *PodReadinessGate) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodReadinessGate: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodReadinessGate: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConditionType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ConditionType = PodConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *PodSecurityContext) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -38308,6 +39367,70 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error {
}
}
m.RunAsGroup = &v
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sysctls", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Sysctls = append(m.Sysctls, Sysctl{})
+ if err := m.Sysctls[len(m.Sysctls)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field WindowsOptions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.WindowsOptions == nil {
+ m.WindowsOptions = &WindowsSecurityContextOptions{}
+ }
+ if err := m.WindowsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -38627,51 +39750,14 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.NodeSelector == nil {
m.NodeSelector = make(map[string]string)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -38681,41 +39767,80 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- m.NodeSelector[mapkey] = mapvalue
- } else {
- var mapvalue string
- m.NodeSelector[mapkey] = mapvalue
}
+ m.NodeSelector[mapkey] = mapvalue
iNdEx = postIndex
case 8:
if wireType != 2 {
@@ -39265,6 +40390,241 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error {
}
b := bool(v != 0)
m.ShareProcessNamespace = &b
+ case 28:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadinessGates", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ReadinessGates = append(m.ReadinessGates, PodReadinessGate{})
+ if err := m.ReadinessGates[len(m.ReadinessGates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 29:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClassName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.RuntimeClassName = &s
+ iNdEx = postIndex
+ case 30:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EnableServiceLinks", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.EnableServiceLinks = &b
+ case 31:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := PreemptionPolicy(dAtA[iNdEx:postIndex])
+ m.PreemptionPolicy = &s
+ iNdEx = postIndex
+ case 32:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Overhead == nil {
+ m.Overhead = make(ResourceList)
+ }
+ var mapkey ResourceName
+ mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Overhead[ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -41022,6 +42382,35 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error {
}
m.Group = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tenant", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Tenant = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -42259,51 +43648,14 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Selector == nil {
m.Selector = make(map[string]string)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -42313,41 +43665,80 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- m.Selector[mapkey] = mapvalue
- } else {
- var mapvalue string
- m.Selector[mapkey] = mapvalue
}
+ m.Selector[mapkey] = mapvalue
iNdEx = postIndex
case 3:
if wireType != 2 {
@@ -43042,51 +44433,14 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Hard == nil {
m.Hard = make(ResourceList)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey ResourceName
+ mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -43096,46 +44450,85 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- m.Hard[ResourceName(mapkey)] = *mapvalue
- } else {
- var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity
- m.Hard[ResourceName(mapkey)] = mapvalue
}
+ m.Hard[ResourceName(mapkey)] = *mapvalue
iNdEx = postIndex
case 2:
if wireType != 2 {
@@ -43166,6 +44559,39 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error {
}
m.Scopes = append(m.Scopes, ResourceQuotaScope(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScopeSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ScopeSelector == nil {
+ m.ScopeSelector = &ScopeSelector{}
+ }
+ if err := m.ScopeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -43242,51 +44668,14 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Hard == nil {
m.Hard = make(ResourceList)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey ResourceName
+ mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -43296,46 +44685,85 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- m.Hard[ResourceName(mapkey)] = *mapvalue
- } else {
- var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity
- m.Hard[ResourceName(mapkey)] = mapvalue
}
+ m.Hard[ResourceName(mapkey)] = *mapvalue
iNdEx = postIndex
case 2:
if wireType != 2 {
@@ -43363,51 +44791,14 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Used == nil {
m.Used = make(ResourceList)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey ResourceName
+ mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -43417,46 +44808,85 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- m.Used[ResourceName(mapkey)] = *mapvalue
- } else {
- var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity
- m.Used[ResourceName(mapkey)] = mapvalue
}
+ m.Used[ResourceName(mapkey)] = *mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -43534,51 +44964,14 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Limits == nil {
m.Limits = make(ResourceList)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey ResourceName
+ mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -43588,46 +44981,85 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- m.Limits[ResourceName(mapkey)] = *mapvalue
- } else {
- var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity
- m.Limits[ResourceName(mapkey)] = mapvalue
}
+ m.Limits[ResourceName(mapkey)] = *mapvalue
iNdEx = postIndex
case 2:
if wireType != 2 {
@@ -43655,51 +45087,14 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Requests == nil {
m.Requests = make(ResourceList)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey ResourceName
+ mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -43709,46 +45104,85 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- m.Requests[ResourceName(mapkey)] = *mapvalue
- } else {
- var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity
- m.Requests[ResourceName(mapkey)] = mapvalue
}
+ m.Requests[ResourceName(mapkey)] = *mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -44589,7 +46023,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *Secret) Unmarshal(dAtA []byte) error {
+func (m *ScopeSelector) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -44612,15 +46046,15 @@ func (m *Secret) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: Secret: wiretype end group for non-group")
+ return fmt.Errorf("proto: ScopeSelector: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: ScopeSelector: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -44644,15 +46078,95 @@ func (m *Secret) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.MatchExpressions = append(m.MatchExpressions, ScopedResourceSelectorRequirement{})
+ if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
return err
}
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ScopedResourceSelectorRequirement: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ScopedResourceSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScopeName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ScopeName = ResourceQuotaScope(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -44662,19 +46176,105 @@ func (m *Secret) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Operator = ScopeSelectorOperator(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
+ m.Values = append(m.Values, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Secret) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Secret: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -44684,12 +46284,27 @@ func (m *Secret) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
+ msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var stringLenmapkey uint64
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -44699,26 +46314,26 @@ func (m *Secret) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
+ postIndex := iNdEx + msglen
+ if postIndex > l {
return io.ErrUnexpectedEOF
}
- mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Data == nil {
m.Data = make(map[string][]byte)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey string
+ mapvalue := []byte{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -44728,42 +46343,81 @@ func (m *Secret) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var mapbyteLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- mapbyteLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapbyteLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapbyteLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ intMapbyteLen := int(mapbyteLen)
+ if intMapbyteLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postbytesIndex := iNdEx + intMapbyteLen
+ if postbytesIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = make([]byte, mapbyteLen)
+ copy(mapvalue, dAtA[iNdEx:postbytesIndex])
+ iNdEx = postbytesIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- intMapbyteLen := int(mapbyteLen)
- if intMapbyteLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postbytesIndex := iNdEx + intMapbyteLen
- if postbytesIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := make([]byte, mapbyteLen)
- copy(mapvalue, dAtA[iNdEx:postbytesIndex])
- iNdEx = postbytesIndex
- m.Data[mapkey] = mapvalue
- } else {
- var mapvalue []byte
- m.Data[mapkey] = mapvalue
}
+ m.Data[mapkey] = mapvalue
iNdEx = postIndex
case 3:
if wireType != 2 {
@@ -44820,51 +46474,14 @@ func (m *Secret) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.StringData == nil {
m.StringData = make(map[string]string)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -44874,41 +46491,80 @@ func (m *Secret) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- m.StringData[mapkey] = mapvalue
- } else {
- var mapvalue string
- m.StringData[mapkey] = mapvalue
}
+ m.StringData[mapkey] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -45883,6 +47539,69 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error {
}
}
m.RunAsGroup = &v
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProcMount", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := ProcMountType(dAtA[iNdEx:postIndex])
+ m.ProcMount = &s
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field WindowsOptions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.WindowsOptions == nil {
+ m.WindowsOptions = &WindowsSecurityContextOptions{}
+ }
+ if err := m.WindowsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -46398,6 +48117,134 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ServiceAccountTokenProjection: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ServiceAccountTokenProjection: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Audience", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Audience = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ExpirationSeconds = &v
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *ServiceList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -46850,51 +48697,14 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Selector == nil {
m.Selector = make(map[string]string)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -46904,41 +48714,80 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- m.Selector[mapkey] = mapvalue
- } else {
- var mapvalue string
- m.Selector[mapkey] = mapvalue
}
+ m.Selector[mapkey] = mapvalue
iNdEx = postIndex
case 3:
if wireType != 2 {
@@ -48381,6 +50230,333 @@ func (m *Toleration) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TopologySelectorLabelRequirement: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TopologySelectorLabelRequirement: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Values = append(m.Values, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TopologySelectorTerm: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TopologySelectorTerm: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchLabelExpressions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MatchLabelExpressions = append(m.MatchLabelExpressions, TopologySelectorLabelRequirement{})
+ if err := m.MatchLabelExpressions[len(m.MatchLabelExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TypedLocalObjectReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TypedLocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.APIGroup = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *Volume) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -48764,6 +50940,35 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error {
s := MountPropagationMode(dAtA[iNdEx:postIndex])
m.MountPropagation = &s
iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SubPathExpr", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SubPathExpr = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -48996,6 +51201,39 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountToken", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ServiceAccountToken == nil {
+ m.ServiceAccountToken = &ServiceAccountTokenProjection{}
+ }
+ if err := m.ServiceAccountToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -49937,6 +52175,39 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 28:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CSI", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CSI == nil {
+ m.CSI = &CSIVolumeSource{}
+ }
+ if err := m.CSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -50223,6 +52494,116 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WindowsSecurityContextOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WindowsSecurityContextOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GMSACredentialSpecName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.GMSACredentialSpecName = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GMSACredentialSpec", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.GMSACredentialSpec = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func skipGenerated(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
@@ -50333,797 +52714,826 @@ func init() {
}
var fileDescriptorGenerated = []byte{
- // 12666 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x70, 0x24, 0x49,
- 0x56, 0xd8, 0x55, 0xb7, 0xbe, 0xfa, 0xe9, 0x3b, 0x67, 0x66, 0x57, 0xa3, 0xdd, 0x9d, 0x9e, 0xad,
- 0xbd, 0x9b, 0x9b, 0xfd, 0x92, 0x6e, 0x67, 0x77, 0x6f, 0x97, 0xdb, 0xbb, 0x05, 0x49, 0x2d, 0xcd,
- 0xf4, 0xce, 0x48, 0xd3, 0x9b, 0xad, 0x99, 0xb9, 0x5b, 0x96, 0xf3, 0x95, 0xba, 0x53, 0xad, 0x5a,
- 0x95, 0xaa, 0x7a, 0xab, 0xaa, 0x35, 0xa3, 0x0d, 0x88, 0xb0, 0x8f, 0x0f, 0x1b, 0xc3, 0x8f, 0x0b,
- 0x43, 0xd8, 0x18, 0x08, 0x1c, 0x61, 0xe3, 0x80, 0x33, 0xb6, 0x23, 0x30, 0x18, 0xf0, 0x81, 0x6d,
- 0x8c, 0xfd, 0x03, 0xfe, 0x60, 0xf0, 0x9f, 0x23, 0x82, 0xb0, 0x0c, 0x82, 0xb0, 0x83, 0x1f, 0x76,
- 0x38, 0x4c, 0x84, 0x23, 0x90, 0xb1, 0x71, 0xe4, 0x67, 0x65, 0x56, 0x57, 0x75, 0xb7, 0x66, 0x35,
- 0xda, 0x85, 0xb8, 0x7f, 0xdd, 0xf9, 0x5e, 0xbe, 0xcc, 0xca, 0x8f, 0x97, 0x2f, 0x5f, 0xbe, 0x0f,
- 0x78, 0x63, 0xf7, 0xf5, 0x68, 0xc1, 0x0d, 0x16, 0x77, 0x3b, 0x5b, 0x24, 0xf4, 0x49, 0x4c, 0xa2,
- 0xc5, 0x7d, 0xe2, 0x37, 0x83, 0x70, 0x51, 0x00, 0x9c, 0xb6, 0xbb, 0xd8, 0x08, 0x42, 0xb2, 0xb8,
- 0xff, 0xd2, 0x62, 0x8b, 0xf8, 0x24, 0x74, 0x62, 0xd2, 0x5c, 0x68, 0x87, 0x41, 0x1c, 0x20, 0xc4,
- 0x71, 0x16, 0x9c, 0xb6, 0xbb, 0x40, 0x71, 0x16, 0xf6, 0x5f, 0x9a, 0x7f, 0xb1, 0xe5, 0xc6, 0x3b,
- 0x9d, 0xad, 0x85, 0x46, 0xb0, 0xb7, 0xd8, 0x0a, 0x5a, 0xc1, 0x22, 0x43, 0xdd, 0xea, 0x6c, 0xb3,
- 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0xaf, 0x27, 0xcd, 0x90, 0x07, 0x31, 0xf1, 0x23, 0x37,
- 0xf0, 0xa3, 0x17, 0x9d, 0xb6, 0x1b, 0x91, 0x70, 0x9f, 0x84, 0x8b, 0xed, 0xdd, 0x16, 0x85, 0x45,
- 0x26, 0xc2, 0xe2, 0xfe, 0x4b, 0x5b, 0x24, 0x76, 0xba, 0x7a, 0x34, 0xff, 0x4a, 0x42, 0x6e, 0xcf,
- 0x69, 0xec, 0xb8, 0x3e, 0x09, 0x0f, 0x24, 0x8d, 0xc5, 0x90, 0x44, 0x41, 0x27, 0x6c, 0x90, 0x13,
- 0xd5, 0x8a, 0x16, 0xf7, 0x48, 0xec, 0x64, 0x7c, 0xfd, 0xfc, 0x62, 0x5e, 0xad, 0xb0, 0xe3, 0xc7,
- 0xee, 0x5e, 0x77, 0x33, 0x9f, 0xed, 0x57, 0x21, 0x6a, 0xec, 0x90, 0x3d, 0xa7, 0xab, 0xde, 0xcb,
- 0x79, 0xf5, 0x3a, 0xb1, 0xeb, 0x2d, 0xba, 0x7e, 0x1c, 0xc5, 0x61, 0xba, 0x92, 0xfd, 0x4d, 0x0b,
- 0x2e, 0x2f, 0xdd, 0xab, 0xaf, 0x7a, 0x4e, 0x14, 0xbb, 0x8d, 0x65, 0x2f, 0x68, 0xec, 0xd6, 0xe3,
- 0x20, 0x24, 0x77, 0x03, 0xaf, 0xb3, 0x47, 0xea, 0x6c, 0x20, 0xd0, 0x0b, 0x30, 0xb6, 0xcf, 0xfe,
- 0x57, 0x2b, 0x73, 0xd6, 0x65, 0xeb, 0x6a, 0x69, 0x79, 0xe6, 0x37, 0x0f, 0xcb, 0x9f, 0x38, 0x3a,
- 0x2c, 0x8f, 0xdd, 0x15, 0xe5, 0x58, 0x61, 0xa0, 0x2b, 0x30, 0xb2, 0x1d, 0x6d, 0x1e, 0xb4, 0xc9,
- 0x5c, 0x81, 0xe1, 0x4e, 0x09, 0xdc, 0x91, 0xb5, 0x3a, 0x2d, 0xc5, 0x02, 0x8a, 0x16, 0xa1, 0xd4,
- 0x76, 0xc2, 0xd8, 0x8d, 0xdd, 0xc0, 0x9f, 0x2b, 0x5e, 0xb6, 0xae, 0x0e, 0x2f, 0xcf, 0x0a, 0xd4,
- 0x52, 0x4d, 0x02, 0x70, 0x82, 0x43, 0xbb, 0x11, 0x12, 0xa7, 0x79, 0xdb, 0xf7, 0x0e, 0xe6, 0x86,
- 0x2e, 0x5b, 0x57, 0xc7, 0x92, 0x6e, 0x60, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0xe3, 0x05, 0x18, 0x5b,
- 0xda, 0xde, 0x76, 0x7d, 0x37, 0x3e, 0x40, 0x77, 0x61, 0xc2, 0x0f, 0x9a, 0x44, 0xfe, 0x67, 0x5f,
- 0x31, 0x7e, 0xed, 0xf2, 0x42, 0xf7, 0xca, 0x5c, 0xd8, 0xd0, 0xf0, 0x96, 0x67, 0x8e, 0x0e, 0xcb,
- 0x13, 0x7a, 0x09, 0x36, 0xe8, 0x20, 0x0c, 0xe3, 0xed, 0xa0, 0xa9, 0xc8, 0x16, 0x18, 0xd9, 0x72,
- 0x16, 0xd9, 0x5a, 0x82, 0xb6, 0x3c, 0x7d, 0x74, 0x58, 0x1e, 0xd7, 0x0a, 0xb0, 0x4e, 0x04, 0x6d,
- 0xc1, 0x34, 0xfd, 0xeb, 0xc7, 0xae, 0xa2, 0x5b, 0x64, 0x74, 0x9f, 0xc9, 0xa3, 0xab, 0xa1, 0x2e,
- 0x9f, 0x3b, 0x3a, 0x2c, 0x4f, 0xa7, 0x0a, 0x71, 0x9a, 0xa0, 0xfd, 0x01, 0x4c, 0x2d, 0xc5, 0xb1,
- 0xd3, 0xd8, 0x21, 0x4d, 0x3e, 0x83, 0xe8, 0x15, 0x18, 0xf2, 0x9d, 0x3d, 0x22, 0xe6, 0xf7, 0xb2,
- 0x18, 0xd8, 0xa1, 0x0d, 0x67, 0x8f, 0x1c, 0x1f, 0x96, 0x67, 0xee, 0xf8, 0xee, 0xfb, 0x1d, 0xb1,
- 0x2a, 0x68, 0x19, 0x66, 0xd8, 0xe8, 0x1a, 0x40, 0x93, 0xec, 0xbb, 0x0d, 0x52, 0x73, 0xe2, 0x1d,
- 0x31, 0xdf, 0x48, 0xd4, 0x85, 0x8a, 0x82, 0x60, 0x0d, 0xcb, 0x7e, 0x00, 0xa5, 0xa5, 0xfd, 0xc0,
- 0x6d, 0xd6, 0x82, 0x66, 0x84, 0x76, 0x61, 0xba, 0x1d, 0x92, 0x6d, 0x12, 0xaa, 0xa2, 0x39, 0xeb,
- 0x72, 0xf1, 0xea, 0xf8, 0xb5, 0xab, 0x99, 0x1f, 0x6b, 0xa2, 0xae, 0xfa, 0x71, 0x78, 0xb0, 0xfc,
- 0xb8, 0x68, 0x6f, 0x3a, 0x05, 0xc5, 0x69, 0xca, 0xf6, 0x7f, 0x28, 0xc0, 0x85, 0xa5, 0x0f, 0x3a,
- 0x21, 0xa9, 0xb8, 0xd1, 0x6e, 0x7a, 0x85, 0x37, 0xdd, 0x68, 0x77, 0x23, 0x19, 0x01, 0xb5, 0xb4,
- 0x2a, 0xa2, 0x1c, 0x2b, 0x0c, 0xf4, 0x22, 0x8c, 0xd2, 0xdf, 0x77, 0x70, 0x55, 0x7c, 0xf2, 0x39,
- 0x81, 0x3c, 0x5e, 0x71, 0x62, 0xa7, 0xc2, 0x41, 0x58, 0xe2, 0xa0, 0x75, 0x18, 0x6f, 0xb0, 0x0d,
- 0xd9, 0x5a, 0x0f, 0x9a, 0x84, 0x4d, 0x66, 0x69, 0xf9, 0x79, 0x8a, 0xbe, 0x92, 0x14, 0x1f, 0x1f,
- 0x96, 0xe7, 0x78, 0xdf, 0x04, 0x09, 0x0d, 0x86, 0xf5, 0xfa, 0xc8, 0x56, 0xfb, 0x6b, 0x88, 0x51,
- 0x82, 0x8c, 0xbd, 0x75, 0x55, 0xdb, 0x2a, 0xc3, 0x6c, 0xab, 0x4c, 0x64, 0x6f, 0x13, 0xf4, 0x12,
- 0x0c, 0xed, 0xba, 0x7e, 0x73, 0x6e, 0x84, 0xd1, 0x7a, 0x8a, 0xce, 0xf9, 0x4d, 0xd7, 0x6f, 0x1e,
- 0x1f, 0x96, 0x67, 0x8d, 0xee, 0xd0, 0x42, 0xcc, 0x50, 0xed, 0x3f, 0xb5, 0xa0, 0xcc, 0x60, 0x6b,
- 0xae, 0x47, 0x6a, 0x24, 0x8c, 0xdc, 0x28, 0x26, 0x7e, 0x6c, 0x0c, 0xe8, 0x35, 0x80, 0x88, 0x34,
- 0x42, 0x12, 0x6b, 0x43, 0xaa, 0x16, 0x46, 0x5d, 0x41, 0xb0, 0x86, 0x45, 0x19, 0x42, 0xb4, 0xe3,
- 0x84, 0x6c, 0x7d, 0x89, 0x81, 0x55, 0x0c, 0xa1, 0x2e, 0x01, 0x38, 0xc1, 0x31, 0x18, 0x42, 0xb1,
- 0x1f, 0x43, 0x40, 0x5f, 0x80, 0xe9, 0xa4, 0xb1, 0xa8, 0xed, 0x34, 0xe4, 0x00, 0xb2, 0x2d, 0x53,
- 0x37, 0x41, 0x38, 0x8d, 0x6b, 0xff, 0x13, 0x4b, 0x2c, 0x1e, 0xfa, 0xd5, 0x1f, 0xf3, 0x6f, 0xb5,
- 0x7f, 0xc5, 0x82, 0xd1, 0x65, 0xd7, 0x6f, 0xba, 0x7e, 0x0b, 0x7d, 0x05, 0xc6, 0xe8, 0xd9, 0xd4,
- 0x74, 0x62, 0x47, 0xf0, 0xbd, 0xcf, 0x68, 0x7b, 0x4b, 0x1d, 0x15, 0x0b, 0xed, 0xdd, 0x16, 0x2d,
- 0x88, 0x16, 0x28, 0x36, 0xdd, 0x6d, 0xb7, 0xb7, 0xde, 0x23, 0x8d, 0x78, 0x9d, 0xc4, 0x4e, 0xf2,
- 0x39, 0x49, 0x19, 0x56, 0x54, 0xd1, 0x4d, 0x18, 0x89, 0x9d, 0xb0, 0x45, 0x62, 0xc1, 0x00, 0x33,
- 0x19, 0x15, 0xaf, 0x89, 0xe9, 0x8e, 0x24, 0x7e, 0x83, 0x24, 0xc7, 0xc2, 0x26, 0xab, 0x8a, 0x05,
- 0x09, 0xfb, 0x6f, 0x8f, 0xc0, 0xc5, 0x95, 0x7a, 0x35, 0x67, 0x5d, 0x5d, 0x81, 0x91, 0x66, 0xe8,
- 0xee, 0x93, 0x50, 0x8c, 0xb3, 0xa2, 0x52, 0x61, 0xa5, 0x58, 0x40, 0xd1, 0xeb, 0x30, 0xc1, 0x0f,
- 0xa4, 0x1b, 0x8e, 0xdf, 0xf4, 0xe4, 0x10, 0x9f, 0x17, 0xd8, 0x13, 0x77, 0x35, 0x18, 0x36, 0x30,
- 0x4f, 0xb8, 0xa8, 0xae, 0xa4, 0x36, 0x63, 0xde, 0x61, 0xf7, 0x83, 0x16, 0xcc, 0xf0, 0x66, 0x96,
- 0xe2, 0x38, 0x74, 0xb7, 0x3a, 0x31, 0x89, 0xe6, 0x86, 0x19, 0xa7, 0x5b, 0xc9, 0x1a, 0xad, 0xdc,
- 0x11, 0x58, 0xb8, 0x9b, 0xa2, 0xc2, 0x99, 0xe0, 0x9c, 0x68, 0x77, 0x26, 0x0d, 0xc6, 0x5d, 0xcd,
- 0xa2, 0xef, 0xb5, 0x60, 0xbe, 0x11, 0xf8, 0x71, 0x18, 0x78, 0x1e, 0x09, 0x6b, 0x9d, 0x2d, 0xcf,
- 0x8d, 0x76, 0xf8, 0x3a, 0xc5, 0x64, 0x9b, 0x71, 0x82, 0x9c, 0x39, 0x54, 0x48, 0x62, 0x0e, 0x2f,
- 0x1d, 0x1d, 0x96, 0xe7, 0x57, 0x72, 0x49, 0xe1, 0x1e, 0xcd, 0xa0, 0x5d, 0x40, 0xf4, 0x28, 0xad,
- 0xc7, 0x4e, 0x8b, 0x24, 0x8d, 0x8f, 0x0e, 0xde, 0xf8, 0x63, 0x47, 0x87, 0x65, 0xb4, 0xd1, 0x45,
- 0x02, 0x67, 0x90, 0x45, 0xef, 0xc3, 0x79, 0x5a, 0xda, 0xf5, 0xad, 0x63, 0x83, 0x37, 0x37, 0x77,
- 0x74, 0x58, 0x3e, 0xbf, 0x91, 0x41, 0x04, 0x67, 0x92, 0x9e, 0x5f, 0x81, 0x0b, 0x99, 0x53, 0x85,
- 0x66, 0xa0, 0xb8, 0x4b, 0xb8, 0x08, 0x52, 0xc2, 0xf4, 0x27, 0x3a, 0x0f, 0xc3, 0xfb, 0x8e, 0xd7,
- 0x11, 0xab, 0x14, 0xf3, 0x3f, 0x9f, 0x2b, 0xbc, 0x6e, 0xd9, 0x0d, 0x98, 0x58, 0x71, 0xda, 0xce,
- 0x96, 0xeb, 0xb9, 0xb1, 0x4b, 0x22, 0xf4, 0x69, 0x28, 0x3a, 0xcd, 0x26, 0x3b, 0x22, 0x4b, 0xcb,
- 0x17, 0x8e, 0x0e, 0xcb, 0xc5, 0xa5, 0x26, 0xe5, 0xd5, 0xa0, 0xb0, 0x0e, 0x30, 0xc5, 0x40, 0xcf,
- 0xc1, 0x50, 0x33, 0x0c, 0xda, 0x73, 0x05, 0x86, 0x49, 0x87, 0x6a, 0xa8, 0x12, 0x06, 0xed, 0x14,
- 0x2a, 0xc3, 0xb1, 0x7f, 0xbd, 0x00, 0x4f, 0xae, 0x90, 0xf6, 0xce, 0x5a, 0x3d, 0x67, 0xd3, 0x5d,
- 0x85, 0xb1, 0xbd, 0xc0, 0x77, 0xe3, 0x20, 0x8c, 0x44, 0xd3, 0xec, 0x34, 0x59, 0x17, 0x65, 0x58,
- 0x41, 0xd1, 0x65, 0x18, 0x6a, 0x27, 0x92, 0xc0, 0x84, 0x94, 0x22, 0x98, 0x0c, 0xc0, 0x20, 0x14,
- 0xa3, 0x13, 0x91, 0x50, 0x9c, 0x82, 0x0a, 0xe3, 0x4e, 0x44, 0x42, 0xcc, 0x20, 0x09, 0x3b, 0xa5,
- 0x8c, 0x56, 0x6c, 0xab, 0x14, 0x3b, 0xa5, 0x10, 0xac, 0x61, 0xa1, 0x1a, 0x94, 0x22, 0x35, 0xa9,
- 0xc3, 0x83, 0x4f, 0xea, 0x24, 0xe3, 0xb7, 0x6a, 0x26, 0x13, 0x22, 0x06, 0x1b, 0x18, 0xe9, 0xcb,
- 0x6f, 0x7f, 0xb5, 0x00, 0x88, 0x0f, 0xe1, 0x5f, 0xb2, 0x81, 0xbb, 0xd3, 0x3d, 0x70, 0x99, 0x92,
- 0xd7, 0xad, 0xa0, 0xe1, 0x78, 0x69, 0x16, 0x7e, 0x5a, 0xa3, 0xf7, 0x63, 0x16, 0xa0, 0x15, 0xd7,
- 0x6f, 0x92, 0xf0, 0x0c, 0xae, 0x1d, 0x27, 0x3b, 0x48, 0x6f, 0xc1, 0xd4, 0x8a, 0xe7, 0x12, 0x3f,
- 0xae, 0xd6, 0x56, 0x02, 0x7f, 0xdb, 0x6d, 0xa1, 0xcf, 0xc1, 0x14, 0xbd, 0x85, 0x05, 0x9d, 0xb8,
- 0x4e, 0x1a, 0x81, 0xcf, 0x04, 0x56, 0x7a, 0x77, 0x41, 0x47, 0x87, 0xe5, 0xa9, 0x4d, 0x03, 0x82,
- 0x53, 0x98, 0xf6, 0xef, 0xd3, 0x0f, 0x0d, 0xf6, 0xda, 0x81, 0x4f, 0xfc, 0x78, 0x25, 0xf0, 0x9b,
- 0xfc, 0x62, 0xf3, 0x39, 0x18, 0x8a, 0x69, 0xc7, 0xf9, 0x47, 0x5e, 0x91, 0x53, 0x4b, 0xbb, 0x7b,
- 0x7c, 0x58, 0x7e, 0xac, 0xbb, 0x06, 0xfb, 0x20, 0x56, 0x07, 0x7d, 0x1b, 0x8c, 0x44, 0xb1, 0x13,
- 0x77, 0x22, 0xf1, 0xd9, 0x4f, 0xcb, 0xcf, 0xae, 0xb3, 0xd2, 0xe3, 0xc3, 0xf2, 0xb4, 0xaa, 0xc6,
- 0x8b, 0xb0, 0xa8, 0x80, 0x9e, 0x85, 0xd1, 0x3d, 0x12, 0x45, 0x4e, 0x4b, 0xca, 0xa4, 0xd3, 0xa2,
- 0xee, 0xe8, 0x3a, 0x2f, 0xc6, 0x12, 0x8e, 0x9e, 0x81, 0x61, 0x12, 0x86, 0x41, 0x28, 0x56, 0xd5,
- 0xa4, 0x40, 0x1c, 0x5e, 0xa5, 0x85, 0x98, 0xc3, 0xec, 0xff, 0x68, 0xc1, 0xb4, 0xea, 0x2b, 0x6f,
- 0xeb, 0x0c, 0x84, 0x8f, 0x77, 0x00, 0x1a, 0xf2, 0x03, 0x23, 0xc6, 0xef, 0xc6, 0xaf, 0x5d, 0xc9,
- 0x3c, 0x52, 0xbb, 0x86, 0x31, 0xa1, 0xac, 0x8a, 0x22, 0xac, 0x51, 0xb3, 0xff, 0xb5, 0x05, 0xe7,
- 0x52, 0x5f, 0x74, 0xcb, 0x8d, 0x62, 0xf4, 0x6e, 0xd7, 0x57, 0x2d, 0x0c, 0xf6, 0x55, 0xb4, 0x36,
- 0xfb, 0x26, 0xb5, 0xe6, 0x64, 0x89, 0xf6, 0x45, 0x37, 0x60, 0xd8, 0x8d, 0xc9, 0x9e, 0xfc, 0x98,
- 0x67, 0x7a, 0x7e, 0x0c, 0xef, 0x55, 0x32, 0x23, 0x55, 0x5a, 0x13, 0x73, 0x02, 0xf6, 0x8f, 0x14,
- 0xa1, 0xc4, 0x97, 0xed, 0xba, 0xd3, 0x3e, 0x83, 0xb9, 0xa8, 0xc2, 0x10, 0xa3, 0xce, 0x3b, 0xfe,
- 0xe9, 0xec, 0x8e, 0x8b, 0xee, 0x2c, 0xd0, 0x9b, 0x05, 0x17, 0x5e, 0x14, 0x33, 0xa3, 0x45, 0x98,
- 0x91, 0x40, 0x0e, 0xc0, 0x96, 0xeb, 0x3b, 0xe1, 0x01, 0x2d, 0x9b, 0x2b, 0x32, 0x82, 0x2f, 0xf6,
- 0x26, 0xb8, 0xac, 0xf0, 0x39, 0x59, 0xd5, 0xd7, 0x04, 0x80, 0x35, 0xa2, 0xf3, 0xaf, 0x41, 0x49,
- 0x21, 0x9f, 0xe4, 0x54, 0x9e, 0xff, 0x02, 0x4c, 0xa7, 0xda, 0xea, 0x57, 0x7d, 0x42, 0x3f, 0xd4,
- 0xbf, 0xc1, 0xb8, 0x80, 0xe8, 0xf5, 0xaa, 0xbf, 0x2f, 0xd8, 0xdd, 0x07, 0x70, 0xde, 0xcb, 0xe0,
- 0xb2, 0x62, 0xaa, 0x06, 0xe7, 0xca, 0x4f, 0x8a, 0xcf, 0x3e, 0x9f, 0x05, 0xc5, 0x99, 0x6d, 0xd0,
- 0x83, 0x2a, 0x68, 0xd3, 0x35, 0xef, 0x78, 0xac, 0xbf, 0xe2, 0xbe, 0x78, 0x5b, 0x94, 0x61, 0x05,
- 0xa5, 0x2c, 0xec, 0xbc, 0xea, 0xfc, 0x4d, 0x72, 0x50, 0x27, 0x1e, 0x69, 0xc4, 0x41, 0xf8, 0x91,
- 0x76, 0xff, 0x29, 0x3e, 0xfa, 0x9c, 0x03, 0x8e, 0x0b, 0x02, 0xc5, 0x9b, 0xe4, 0x80, 0x4f, 0x85,
- 0xfe, 0x75, 0xc5, 0x9e, 0x5f, 0xf7, 0xf3, 0x16, 0x4c, 0xaa, 0xaf, 0x3b, 0x83, 0xad, 0xbe, 0x6c,
- 0x6e, 0xf5, 0xa7, 0x7a, 0x2e, 0xf0, 0x9c, 0x4d, 0xfe, 0x17, 0x8c, 0x49, 0x09, 0x9c, 0x5a, 0x18,
- 0xd0, 0xa1, 0xa1, 0xa7, 0xca, 0x47, 0x39, 0x21, 0x83, 0x7c, 0xd7, 0x4d, 0x72, 0xb0, 0x19, 0x50,
- 0x01, 0x27, 0xfb, 0xbb, 0x8c, 0x59, 0x1b, 0xea, 0x39, 0x6b, 0xbf, 0x58, 0x80, 0x0b, 0x6a, 0x04,
- 0x0c, 0x11, 0xe2, 0x2f, 0xfb, 0x18, 0xbc, 0x04, 0xe3, 0x4d, 0xb2, 0xed, 0x74, 0xbc, 0x58, 0xa9,
- 0x8e, 0x86, 0xb9, 0xfa, 0xb0, 0x92, 0x14, 0x63, 0x1d, 0xe7, 0x04, 0xc3, 0xf6, 0xd3, 0xe3, 0xec,
- 0x74, 0x88, 0x1d, 0xba, 0x82, 0xa9, 0x7c, 0xa9, 0x29, 0x00, 0x27, 0x74, 0x05, 0xa0, 0x50, 0xf6,
- 0x3d, 0x03, 0xc3, 0xee, 0x1e, 0x95, 0x16, 0x0a, 0xa6, 0x10, 0x50, 0xa5, 0x85, 0x98, 0xc3, 0xd0,
- 0xa7, 0x60, 0xb4, 0x11, 0xec, 0xed, 0x39, 0x7e, 0x93, 0x31, 0xed, 0xd2, 0xf2, 0x38, 0x15, 0x28,
- 0x56, 0x78, 0x11, 0x96, 0x30, 0xf4, 0x24, 0x0c, 0x39, 0x61, 0x2b, 0x9a, 0x1b, 0x62, 0x38, 0x63,
- 0xb4, 0xa5, 0xa5, 0xb0, 0x15, 0x61, 0x56, 0x4a, 0x25, 0xd9, 0xfb, 0x41, 0xb8, 0xeb, 0xfa, 0xad,
- 0x8a, 0x1b, 0x32, 0xb1, 0x54, 0x93, 0x64, 0xef, 0x29, 0x08, 0xd6, 0xb0, 0xd0, 0x1a, 0x0c, 0xb7,
- 0x83, 0x30, 0x8e, 0xe6, 0x46, 0xd8, 0x70, 0x3f, 0x9d, 0xb3, 0x95, 0xf8, 0xd7, 0xd6, 0x82, 0x30,
- 0x4e, 0x3e, 0x80, 0xfe, 0x8b, 0x30, 0xaf, 0x8e, 0xbe, 0x0d, 0x8a, 0xc4, 0xdf, 0x9f, 0x1b, 0x65,
- 0x54, 0xe6, 0xb3, 0xa8, 0xac, 0xfa, 0xfb, 0x77, 0x9d, 0x30, 0xe1, 0x33, 0xab, 0xfe, 0x3e, 0xa6,
- 0x75, 0xd0, 0x97, 0xa0, 0x24, 0x1f, 0x0f, 0x22, 0x71, 0xb5, 0xcc, 0x5c, 0x62, 0x58, 0x20, 0x61,
- 0xf2, 0x7e, 0xc7, 0x0d, 0xc9, 0x1e, 0xf1, 0xe3, 0x28, 0x51, 0xff, 0x48, 0x68, 0x84, 0x13, 0x6a,
- 0xe8, 0x4b, 0x52, 0x9f, 0xb1, 0x1e, 0x74, 0xfc, 0x38, 0x9a, 0x2b, 0xb1, 0xee, 0x65, 0x6a, 0x9a,
- 0xef, 0x26, 0x78, 0x69, 0x85, 0x07, 0xaf, 0x8c, 0x0d, 0x52, 0x08, 0xc3, 0xa4, 0xe7, 0xee, 0x13,
- 0x9f, 0x44, 0x51, 0x2d, 0x0c, 0xb6, 0xc8, 0x1c, 0xb0, 0x9e, 0x5f, 0xcc, 0x56, 0xc0, 0x06, 0x5b,
- 0x64, 0x79, 0xf6, 0xe8, 0xb0, 0x3c, 0x79, 0x4b, 0xaf, 0x83, 0x4d, 0x12, 0xe8, 0x0e, 0x4c, 0x51,
- 0x11, 0xda, 0x4d, 0x88, 0x8e, 0xf7, 0x23, 0xca, 0xe4, 0x67, 0x6c, 0x54, 0xc2, 0x29, 0x22, 0xe8,
- 0x2d, 0x28, 0x79, 0xee, 0x36, 0x69, 0x1c, 0x34, 0x3c, 0x32, 0x37, 0xc1, 0x28, 0x66, 0x6e, 0xab,
- 0x5b, 0x12, 0x89, 0x5f, 0x51, 0xd4, 0x5f, 0x9c, 0x54, 0x47, 0x77, 0xe1, 0xb1, 0x98, 0x84, 0x7b,
- 0xae, 0xef, 0xd0, 0xed, 0x20, 0x24, 0x5e, 0xa6, 0xc6, 0x9e, 0x64, 0xeb, 0xed, 0x92, 0x18, 0xba,
- 0xc7, 0x36, 0x33, 0xb1, 0x70, 0x4e, 0x6d, 0x74, 0x1b, 0xa6, 0xd9, 0x4e, 0xa8, 0x75, 0x3c, 0xaf,
- 0x16, 0x78, 0x6e, 0xe3, 0x60, 0x6e, 0x8a, 0x11, 0xfc, 0x94, 0xd4, 0x53, 0x57, 0x4d, 0x30, 0xbd,
- 0x93, 0x27, 0xff, 0x70, 0xba, 0x36, 0xda, 0x62, 0x7a, 0xcb, 0x4e, 0xe8, 0xc6, 0x07, 0x74, 0xfd,
- 0x92, 0x07, 0xf1, 0xdc, 0x74, 0xcf, 0x1b, 0xae, 0x8e, 0xaa, 0x94, 0x9b, 0x7a, 0x21, 0x4e, 0x13,
- 0xa4, 0x5b, 0x3b, 0x8a, 0x9b, 0xae, 0x3f, 0x37, 0xc3, 0x38, 0x86, 0xda, 0x19, 0x75, 0x5a, 0x88,
- 0x39, 0x8c, 0xe9, 0x2c, 0xe9, 0x8f, 0xdb, 0x94, 0x83, 0xce, 0x32, 0xc4, 0x44, 0x67, 0x29, 0x01,
- 0x38, 0xc1, 0xa1, 0xc7, 0x72, 0x1c, 0x1f, 0xcc, 0x21, 0x86, 0xaa, 0xb6, 0xcb, 0xe6, 0xe6, 0x97,
- 0x30, 0x2d, 0x47, 0xb7, 0x60, 0x94, 0xf8, 0xfb, 0x6b, 0x61, 0xb0, 0x37, 0x77, 0x2e, 0x7f, 0xcf,
- 0xae, 0x72, 0x14, 0xce, 0xd0, 0x93, 0x2b, 0x8a, 0x28, 0xc6, 0x92, 0x04, 0x7a, 0x00, 0x73, 0x19,
- 0x33, 0xc2, 0x27, 0xe0, 0x3c, 0x9b, 0x80, 0xcf, 0x8b, 0xba, 0x73, 0x9b, 0x39, 0x78, 0xc7, 0x3d,
- 0x60, 0x38, 0x97, 0x3a, 0xfa, 0x2e, 0x98, 0xe4, 0x1b, 0x8a, 0x3f, 0x78, 0x44, 0x73, 0x17, 0xd8,
- 0xd7, 0x5c, 0xce, 0xdf, 0x9c, 0x1c, 0x71, 0xf9, 0x82, 0xe8, 0xd0, 0xa4, 0x5e, 0x1a, 0x61, 0x93,
- 0x9a, 0xbd, 0x05, 0x53, 0x8a, 0x6f, 0xb1, 0xa5, 0x83, 0xca, 0x30, 0x4c, 0x19, 0xb2, 0xd4, 0x29,
- 0x94, 0xe8, 0x4c, 0x31, 0x45, 0x35, 0xe6, 0xe5, 0x6c, 0xa6, 0xdc, 0x0f, 0xc8, 0xf2, 0x41, 0x4c,
- 0xf8, 0xbd, 0xb0, 0xa8, 0xcd, 0x94, 0x04, 0xe0, 0x04, 0xc7, 0xfe, 0x7f, 0x5c, 0xee, 0x49, 0x98,
- 0xe3, 0x00, 0xc7, 0xc1, 0x0b, 0x30, 0xb6, 0x13, 0x44, 0x31, 0xc5, 0x66, 0x6d, 0x0c, 0x27, 0x92,
- 0xce, 0x0d, 0x51, 0x8e, 0x15, 0x06, 0x7a, 0x03, 0x26, 0x1b, 0x7a, 0x03, 0xe2, 0x2c, 0x53, 0x43,
- 0x60, 0xb4, 0x8e, 0x4d, 0x5c, 0xf4, 0x3a, 0x8c, 0xb1, 0xe7, 0xca, 0x46, 0xe0, 0x89, 0x1b, 0xa8,
- 0x3c, 0x90, 0xc7, 0x6a, 0xa2, 0xfc, 0x58, 0xfb, 0x8d, 0x15, 0x36, 0xba, 0x02, 0x23, 0xb4, 0x0b,
- 0xd5, 0x9a, 0x38, 0x45, 0x94, 0x56, 0xe0, 0x06, 0x2b, 0xc5, 0x02, 0x6a, 0xff, 0x9d, 0x82, 0x36,
- 0xca, 0xf4, 0x4e, 0x45, 0x50, 0x0d, 0x46, 0xef, 0x3b, 0x6e, 0xec, 0xfa, 0x2d, 0x21, 0x2e, 0x3c,
- 0xdb, 0xf3, 0x48, 0x61, 0x95, 0xee, 0xf1, 0x0a, 0xfc, 0xd0, 0x13, 0x7f, 0xb0, 0x24, 0x43, 0x29,
- 0x86, 0x1d, 0xdf, 0xa7, 0x14, 0x0b, 0x83, 0x52, 0xc4, 0xbc, 0x02, 0xa7, 0x28, 0xfe, 0x60, 0x49,
- 0x06, 0xbd, 0x0b, 0x20, 0x97, 0x25, 0x69, 0x8a, 0x67, 0xc2, 0x17, 0xfa, 0x13, 0xdd, 0x54, 0x75,
- 0x96, 0xa7, 0xe8, 0x91, 0x9a, 0xfc, 0xc7, 0x1a, 0x3d, 0x3b, 0x66, 0x62, 0x55, 0x77, 0x67, 0xd0,
- 0x77, 0x52, 0x4e, 0xe0, 0x84, 0x31, 0x69, 0x2e, 0xc5, 0x62, 0x70, 0x9e, 0x1b, 0x4c, 0x2a, 0xde,
- 0x74, 0xf7, 0x88, 0xce, 0x35, 0x04, 0x11, 0x9c, 0xd0, 0xb3, 0x7f, 0xb9, 0x08, 0x73, 0x79, 0xdd,
- 0xa5, 0x8b, 0x8e, 0x3c, 0x70, 0xe3, 0x15, 0x2a, 0x0d, 0x59, 0xe6, 0xa2, 0x5b, 0x15, 0xe5, 0x58,
- 0x61, 0xd0, 0xd9, 0x8f, 0xdc, 0x96, 0xbc, 0xd4, 0x0c, 0x27, 0xb3, 0x5f, 0x67, 0xa5, 0x58, 0x40,
- 0x29, 0x5e, 0x48, 0x9c, 0x48, 0xbc, 0x43, 0x6b, 0xab, 0x04, 0xb3, 0x52, 0x2c, 0xa0, 0xba, 0xc6,
- 0x64, 0xa8, 0x8f, 0xc6, 0xc4, 0x18, 0xa2, 0xe1, 0xd3, 0x1d, 0x22, 0xf4, 0x65, 0x80, 0x6d, 0xd7,
- 0x77, 0xa3, 0x1d, 0x46, 0x7d, 0xe4, 0xc4, 0xd4, 0x95, 0x2c, 0xb5, 0xa6, 0xa8, 0x60, 0x8d, 0x22,
- 0x7a, 0x15, 0xc6, 0xd5, 0x06, 0xac, 0x56, 0x98, 0x52, 0x5e, 0x7b, 0xe4, 0x4c, 0xb8, 0x51, 0x05,
- 0xeb, 0x78, 0xf6, 0x7b, 0xe9, 0xf5, 0x22, 0x76, 0x80, 0x36, 0xbe, 0xd6, 0xa0, 0xe3, 0x5b, 0xe8,
- 0x3d, 0xbe, 0xf6, 0x6f, 0x14, 0x61, 0xda, 0x68, 0xac, 0x13, 0x0d, 0xc0, 0xb3, 0xae, 0xd3, 0x73,
- 0xce, 0x89, 0x89, 0xd8, 0x7f, 0x76, 0xff, 0xad, 0xa2, 0x9f, 0x85, 0x74, 0x07, 0xf0, 0xfa, 0xe8,
- 0xcb, 0x50, 0xf2, 0x9c, 0x88, 0x69, 0x5f, 0x88, 0xd8, 0x77, 0x83, 0x10, 0x4b, 0xee, 0x11, 0x4e,
- 0x14, 0x6b, 0x47, 0x0d, 0xa7, 0x9d, 0x90, 0xa4, 0x07, 0x32, 0x95, 0x7d, 0xa4, 0xa1, 0x83, 0xea,
- 0x04, 0x15, 0x90, 0x0e, 0x30, 0x87, 0xa1, 0xd7, 0x61, 0x22, 0x24, 0x6c, 0x55, 0xac, 0x50, 0x51,
- 0x8e, 0x2d, 0xb3, 0xe1, 0x44, 0xe6, 0xc3, 0x1a, 0x0c, 0x1b, 0x98, 0x89, 0x28, 0x3f, 0xd2, 0x43,
- 0x94, 0x7f, 0x16, 0x46, 0xd9, 0x0f, 0xb5, 0x02, 0xd4, 0x6c, 0x54, 0x79, 0x31, 0x96, 0xf0, 0xf4,
- 0x82, 0x19, 0x1b, 0x70, 0xc1, 0x3c, 0x07, 0x53, 0x15, 0x87, 0xec, 0x05, 0xfe, 0xaa, 0xdf, 0x6c,
- 0x07, 0xae, 0x1f, 0xa3, 0x39, 0x18, 0x62, 0xa7, 0x03, 0xdf, 0xdb, 0x43, 0x94, 0x02, 0x1e, 0xa2,
- 0x82, 0xb9, 0xfd, 0xbb, 0x05, 0x98, 0xac, 0x10, 0x8f, 0xc4, 0x84, 0x5f, 0x65, 0x22, 0xb4, 0x06,
- 0xa8, 0x15, 0x3a, 0x0d, 0x52, 0x23, 0xa1, 0x1b, 0x34, 0x75, 0x6d, 0x6c, 0x91, 0x3f, 0x0e, 0x5d,
- 0xef, 0x82, 0xe2, 0x8c, 0x1a, 0xe8, 0x1d, 0x98, 0x6c, 0x87, 0xc4, 0x50, 0x22, 0x5a, 0x79, 0xd2,
- 0x48, 0x4d, 0x47, 0xe4, 0x82, 0xb0, 0x51, 0x84, 0x4d, 0x52, 0xe8, 0x3b, 0x60, 0x26, 0x08, 0xdb,
- 0x3b, 0x8e, 0x5f, 0x21, 0x6d, 0xe2, 0x37, 0xa9, 0xa4, 0x2f, 0x54, 0x10, 0xe7, 0x8f, 0x0e, 0xcb,
- 0x33, 0xb7, 0x53, 0x30, 0xdc, 0x85, 0x8d, 0xde, 0x81, 0xd9, 0x76, 0x18, 0xb4, 0x9d, 0x16, 0x5b,
- 0x28, 0x42, 0xa0, 0xe1, 0xdc, 0xe7, 0x85, 0xa3, 0xc3, 0xf2, 0x6c, 0x2d, 0x0d, 0x3c, 0x3e, 0x2c,
- 0x9f, 0x63, 0x03, 0x45, 0x4b, 0x12, 0x20, 0xee, 0x26, 0x63, 0xb7, 0xe0, 0x42, 0x25, 0xb8, 0xef,
- 0xdf, 0x77, 0xc2, 0xe6, 0x52, 0xad, 0xaa, 0xe9, 0x0e, 0x36, 0xe4, 0xdd, 0x95, 0x1b, 0x63, 0x64,
- 0x9e, 0x53, 0x5a, 0x4d, 0x2e, 0xbf, 0xac, 0xb9, 0x1e, 0xc9, 0xd1, 0x51, 0xfc, 0xbd, 0x82, 0xd1,
- 0x52, 0x82, 0xaf, 0x1e, 0x3e, 0xac, 0xdc, 0x87, 0x8f, 0xb7, 0x61, 0x6c, 0xdb, 0x25, 0x5e, 0x13,
- 0x93, 0x6d, 0x31, 0x33, 0x9f, 0xce, 0x7f, 0x5f, 0x5e, 0xa3, 0x98, 0x52, 0x27, 0xc5, 0x6f, 0xbe,
- 0x6b, 0xa2, 0x32, 0x56, 0x64, 0xd0, 0x2e, 0xcc, 0xc8, 0xab, 0x95, 0x84, 0x8a, 0x4d, 0xfc, 0x6c,
- 0xaf, 0xfb, 0x9a, 0x49, 0x9c, 0x4d, 0x20, 0x4e, 0x91, 0xc1, 0x5d, 0x84, 0xe9, 0x55, 0x77, 0x8f,
- 0x1e, 0x57, 0x43, 0x6c, 0x49, 0xb3, 0xab, 0x2e, 0xbb, 0xb5, 0xb3, 0x52, 0xfb, 0x27, 0x2d, 0x78,
- 0xbc, 0x6b, 0x64, 0x84, 0xf6, 0xe2, 0x94, 0x67, 0x21, 0xad, 0x4d, 0x28, 0xf4, 0xd7, 0x26, 0xd8,
- 0xff, 0xd4, 0x82, 0xf3, 0xab, 0x7b, 0xed, 0xf8, 0xa0, 0xe2, 0x9a, 0x8f, 0x33, 0xaf, 0xc1, 0xc8,
- 0x1e, 0x69, 0xba, 0x9d, 0x3d, 0x31, 0x73, 0x65, 0xc9, 0xd2, 0xd7, 0x59, 0xe9, 0xf1, 0x61, 0x79,
- 0xb2, 0x1e, 0x07, 0xa1, 0xd3, 0x22, 0xbc, 0x00, 0x0b, 0x74, 0x76, 0x30, 0xba, 0x1f, 0x90, 0x5b,
- 0xee, 0x9e, 0x2b, 0xed, 0x05, 0x7a, 0x6a, 0xd4, 0x16, 0xe4, 0x80, 0x2e, 0xbc, 0xdd, 0x71, 0xfc,
- 0xd8, 0x8d, 0x0f, 0xc4, 0xbb, 0x93, 0x24, 0x82, 0x13, 0x7a, 0xf6, 0x37, 0x2d, 0x98, 0x96, 0xbc,
- 0x64, 0xa9, 0xd9, 0x0c, 0x49, 0x14, 0xa1, 0x79, 0x28, 0xb8, 0x6d, 0xd1, 0x4b, 0x10, 0xbd, 0x2c,
- 0x54, 0x6b, 0xb8, 0xe0, 0xb6, 0x51, 0x0d, 0x4a, 0xdc, 0xec, 0x20, 0x59, 0x5c, 0x03, 0x19, 0x2f,
- 0xb0, 0x1e, 0x6c, 0xca, 0x9a, 0x38, 0x21, 0x22, 0xa5, 0x62, 0x76, 0x0e, 0x15, 0xcd, 0x47, 0xab,
- 0x1b, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x15, 0xc6, 0xfc, 0xa0, 0xc9, 0xad, 0x40, 0xf8, 0x9e, 0x66,
- 0x4b, 0x76, 0x43, 0x94, 0x61, 0x05, 0xb5, 0x7f, 0xd8, 0x82, 0x09, 0xf9, 0x65, 0x03, 0x0a, 0xe8,
- 0x74, 0x6b, 0x25, 0xc2, 0x79, 0xb2, 0xb5, 0xa8, 0x80, 0xcd, 0x20, 0x86, 0x5c, 0x5d, 0x3c, 0x89,
- 0x5c, 0x6d, 0xff, 0x44, 0x01, 0xa6, 0x64, 0x77, 0xea, 0x9d, 0xad, 0x88, 0xc4, 0x68, 0x13, 0x4a,
- 0x0e, 0x1f, 0x72, 0x22, 0x57, 0xec, 0x33, 0xd9, 0x17, 0x3a, 0x63, 0x7e, 0x12, 0x51, 0x67, 0x49,
- 0xd6, 0xc6, 0x09, 0x21, 0xe4, 0xc1, 0xac, 0x1f, 0xc4, 0xec, 0xd8, 0x53, 0xf0, 0x5e, 0x0f, 0x23,
- 0x69, 0xea, 0x17, 0x05, 0xf5, 0xd9, 0x8d, 0x34, 0x15, 0xdc, 0x4d, 0x18, 0xad, 0x4a, 0x25, 0x52,
- 0x31, 0xff, 0x0a, 0xa7, 0xcf, 0x42, 0xb6, 0x0e, 0xc9, 0xfe, 0x35, 0x0b, 0x4a, 0x12, 0xed, 0x2c,
- 0xde, 0xc0, 0xd6, 0x61, 0x34, 0x62, 0x93, 0x20, 0x87, 0xc6, 0xee, 0xd5, 0x71, 0x3e, 0x5f, 0xc9,
- 0x69, 0xce, 0xff, 0x47, 0x58, 0xd2, 0x60, 0x5a, 0x70, 0xd5, 0xfd, 0x8f, 0x89, 0x16, 0x5c, 0xf5,
- 0x27, 0xe7, 0x84, 0xf9, 0x6f, 0xac, 0xcf, 0x9a, 0xaa, 0x80, 0x0a, 0x9d, 0xed, 0x90, 0x6c, 0xbb,
- 0x0f, 0xd2, 0x42, 0x67, 0x8d, 0x95, 0x62, 0x01, 0x45, 0xef, 0xc2, 0x44, 0x43, 0x2a, 0x8f, 0x13,
- 0x36, 0x70, 0xa5, 0xa7, 0x2a, 0x5e, 0xbd, 0xda, 0x70, 0x0b, 0xd1, 0x15, 0xad, 0x3e, 0x36, 0xa8,
- 0x99, 0x96, 0x09, 0xc5, 0x7e, 0x96, 0x09, 0x09, 0xdd, 0xdc, 0xb7, 0x75, 0xfb, 0xa7, 0x2c, 0x18,
- 0xe1, 0x2a, 0xc8, 0xc1, 0x74, 0xb6, 0xda, 0x23, 0x56, 0x32, 0x76, 0x77, 0x69, 0xa1, 0x78, 0x94,
- 0x42, 0xeb, 0x50, 0x62, 0x3f, 0x98, 0x2a, 0xa6, 0x98, 0x6f, 0x1a, 0xcb, 0x5b, 0xd5, 0x3b, 0x78,
- 0x57, 0x56, 0xc3, 0x09, 0x05, 0xfb, 0x47, 0x8b, 0x94, 0x55, 0x25, 0xa8, 0xc6, 0x09, 0x6e, 0x3d,
- 0xba, 0x13, 0xbc, 0xf0, 0xa8, 0x4e, 0xf0, 0x16, 0x4c, 0x37, 0xb4, 0x27, 0xaf, 0x64, 0x26, 0xaf,
- 0xf6, 0x5c, 0x24, 0xda, 0xeb, 0x18, 0x57, 0xc3, 0xad, 0x98, 0x44, 0x70, 0x9a, 0x2a, 0xfa, 0x4e,
- 0x98, 0xe0, 0xf3, 0x2c, 0x5a, 0x19, 0x62, 0xad, 0x7c, 0x2a, 0x7f, 0xbd, 0xe8, 0x4d, 0xb0, 0x95,
- 0x58, 0xd7, 0xaa, 0x63, 0x83, 0x98, 0xfd, 0xcb, 0x63, 0x30, 0xbc, 0xba, 0x4f, 0xfc, 0xf8, 0x0c,
- 0x18, 0x52, 0x03, 0xa6, 0x5c, 0x7f, 0x3f, 0xf0, 0xf6, 0x49, 0x93, 0xc3, 0x4f, 0x72, 0xb8, 0x3e,
- 0x26, 0x48, 0x4f, 0x55, 0x0d, 0x12, 0x38, 0x45, 0xf2, 0x51, 0xdc, 0xda, 0xaf, 0xc3, 0x08, 0x9f,
- 0x7b, 0x71, 0x65, 0xcf, 0x54, 0xb0, 0xb3, 0x41, 0x14, 0xbb, 0x20, 0xd1, 0x28, 0x70, 0x8d, 0xbe,
- 0xa8, 0x8e, 0xde, 0x83, 0xa9, 0x6d, 0x37, 0x8c, 0x62, 0x7a, 0xdd, 0x8e, 0x62, 0x67, 0xaf, 0xfd,
- 0x10, 0xb7, 0x74, 0x35, 0x0e, 0x6b, 0x06, 0x25, 0x9c, 0xa2, 0x8c, 0x5a, 0x30, 0x49, 0x2f, 0x8e,
- 0x49, 0x53, 0xa3, 0x27, 0x6e, 0x4a, 0xa9, 0xe1, 0x6e, 0xe9, 0x84, 0xb0, 0x49, 0x97, 0x32, 0x93,
- 0x06, 0xbb, 0x68, 0x8e, 0x31, 0x89, 0x42, 0x31, 0x13, 0x7e, 0xc3, 0xe4, 0x30, 0xca, 0x93, 0x98,
- 0x31, 0x4b, 0xc9, 0xe4, 0x49, 0x9a, 0xc9, 0xca, 0x57, 0xa0, 0x44, 0xe8, 0x10, 0x52, 0xc2, 0xe2,
- 0xb1, 0x61, 0x71, 0xb0, 0xbe, 0xae, 0xbb, 0x8d, 0x30, 0x30, 0xf5, 0x23, 0xab, 0x92, 0x12, 0x4e,
- 0x88, 0xa2, 0x15, 0x18, 0x89, 0x48, 0xe8, 0x92, 0x48, 0x3c, 0x3b, 0xf4, 0x98, 0x46, 0x86, 0xc6,
- 0x6d, 0xa8, 0xf9, 0x6f, 0x2c, 0xaa, 0xd2, 0xe5, 0xe5, 0xb0, 0xdb, 0x10, 0x7b, 0x69, 0xd0, 0x96,
- 0xd7, 0x12, 0x2b, 0xc5, 0x02, 0x8a, 0xde, 0x82, 0xd1, 0x90, 0x78, 0x4c, 0x01, 0x37, 0x39, 0xf8,
- 0x22, 0xe7, 0xfa, 0x3c, 0x5e, 0x0f, 0x4b, 0x02, 0xe8, 0x26, 0xa0, 0x90, 0x50, 0x19, 0xc2, 0xf5,
- 0x5b, 0xca, 0xc4, 0x43, 0xbc, 0x1f, 0x3c, 0x21, 0xda, 0x3f, 0x87, 0x13, 0x0c, 0x69, 0x5d, 0x89,
- 0x33, 0xaa, 0xa1, 0xeb, 0x30, 0xab, 0x4a, 0xab, 0x7e, 0x14, 0x3b, 0x7e, 0x83, 0xb0, 0xa7, 0x83,
- 0x52, 0x22, 0x15, 0xe1, 0x34, 0x02, 0xee, 0xae, 0x63, 0x7f, 0x9d, 0x8a, 0x33, 0x74, 0xb4, 0xce,
- 0x40, 0x16, 0x78, 0xd3, 0x94, 0x05, 0x2e, 0xe6, 0xce, 0x5c, 0x8e, 0x1c, 0x70, 0x64, 0xc1, 0xb8,
- 0x36, 0xb3, 0xc9, 0x9a, 0xb5, 0x7a, 0xac, 0xd9, 0x0e, 0xcc, 0xd0, 0x95, 0x7e, 0x7b, 0x8b, 0xb9,
- 0x13, 0x35, 0xd9, 0xc2, 0x2c, 0x3c, 0xdc, 0xc2, 0x54, 0x86, 0xb8, 0xb7, 0x52, 0x04, 0x71, 0x57,
- 0x13, 0xe8, 0x35, 0xa9, 0x8d, 0x2a, 0x1a, 0xa6, 0x5b, 0x5c, 0xd3, 0x74, 0x7c, 0x58, 0x9e, 0xd1,
- 0x3e, 0x44, 0xd7, 0x3e, 0xd9, 0x5f, 0x91, 0xdf, 0xc8, 0x99, 0xcd, 0x22, 0x94, 0x1a, 0x6a, 0xb1,
- 0x58, 0xa6, 0x31, 0xb9, 0x5a, 0x0e, 0x38, 0xc1, 0xa1, 0x7b, 0x94, 0x5e, 0x41, 0xd2, 0xd6, 0x86,
- 0xf4, 0x82, 0x82, 0x19, 0xc4, 0x7e, 0x19, 0x60, 0xf5, 0x01, 0x69, 0xf0, 0xa5, 0xae, 0x3f, 0xea,
- 0x5a, 0xf9, 0x8f, 0xba, 0xf6, 0x7f, 0xb2, 0x60, 0x6a, 0x6d, 0xc5, 0xb8, 0x26, 0x2e, 0x00, 0xf0,
- 0xbb, 0xd1, 0xbd, 0x7b, 0x1b, 0xf2, 0xbd, 0x82, 0xab, 0x9c, 0x55, 0x29, 0xd6, 0x30, 0xd0, 0x45,
- 0x28, 0x7a, 0x1d, 0x5f, 0x5c, 0x59, 0x46, 0x8f, 0x0e, 0xcb, 0xc5, 0x5b, 0x1d, 0x1f, 0xd3, 0x32,
- 0xcd, 0xc0, 0xaf, 0x38, 0xb0, 0x81, 0x5f, 0x5f, 0x37, 0x21, 0x54, 0x86, 0xe1, 0xfb, 0xf7, 0xdd,
- 0x26, 0x37, 0xc6, 0x16, 0x6f, 0x29, 0xf7, 0xee, 0x55, 0x2b, 0x11, 0xe6, 0xe5, 0xf6, 0xd7, 0x8a,
- 0x30, 0xbf, 0xe6, 0x91, 0x07, 0x1f, 0xd2, 0x20, 0x7d, 0x50, 0xf3, 0xc4, 0x93, 0xc9, 0x8b, 0x27,
- 0xb5, 0xc5, 0xec, 0x3f, 0x1e, 0xdb, 0x30, 0xca, 0x0d, 0x04, 0xa4, 0x79, 0xfa, 0x1b, 0x59, 0xad,
- 0xe7, 0x0f, 0xc8, 0x82, 0xd0, 0xce, 0x71, 0x13, 0x2c, 0x75, 0xd2, 0x8a, 0x52, 0x2c, 0x89, 0xcf,
- 0x7f, 0x0e, 0x26, 0x74, 0xcc, 0x13, 0x59, 0x45, 0xff, 0x8d, 0x22, 0xcc, 0xd0, 0x1e, 0x3c, 0xd2,
- 0x89, 0xb8, 0xd3, 0x3d, 0x11, 0xa7, 0x6d, 0x19, 0xdb, 0x7f, 0x36, 0xde, 0x4d, 0xcf, 0xc6, 0x4b,
- 0x79, 0xb3, 0x71, 0xd6, 0x73, 0xf0, 0xbd, 0x16, 0x9c, 0x5b, 0xf3, 0x82, 0xc6, 0x6e, 0xca, 0x68,
- 0xf7, 0x55, 0x18, 0xa7, 0x7c, 0x3c, 0x32, 0xbc, 0x61, 0x0c, 0xff, 0x28, 0x01, 0xc2, 0x3a, 0x9e,
- 0x56, 0xed, 0xce, 0x9d, 0x6a, 0x25, 0xcb, 0xad, 0x4a, 0x80, 0xb0, 0x8e, 0x67, 0xff, 0xb6, 0x05,
- 0x4f, 0x5d, 0x5f, 0x59, 0x4d, 0x96, 0x62, 0x97, 0x67, 0x17, 0xbd, 0x05, 0x36, 0xb5, 0xae, 0x24,
- 0xb7, 0xc0, 0x0a, 0xeb, 0x85, 0x80, 0x7e, 0x5c, 0xbc, 0x16, 0x7f, 0xd6, 0x82, 0x73, 0xd7, 0xdd,
- 0x98, 0x1e, 0xcb, 0x69, 0x1f, 0x23, 0x7a, 0x2e, 0x47, 0x6e, 0x1c, 0x84, 0x07, 0x69, 0x1f, 0x23,
- 0xac, 0x20, 0x58, 0xc3, 0xe2, 0x2d, 0xef, 0xbb, 0x11, 0xed, 0x69, 0xc1, 0x54, 0x45, 0x61, 0x51,
- 0x8e, 0x15, 0x06, 0xfd, 0xb0, 0xa6, 0x1b, 0xb2, 0xab, 0xc4, 0x81, 0xe0, 0xb0, 0xea, 0xc3, 0x2a,
- 0x12, 0x80, 0x13, 0x1c, 0xfb, 0x27, 0x2d, 0xb8, 0x70, 0xdd, 0xeb, 0x44, 0x31, 0x09, 0xb7, 0x23,
- 0xa3, 0xb3, 0x2f, 0x43, 0x89, 0xc8, 0xeb, 0xba, 0xe8, 0xab, 0x12, 0x30, 0xd5, 0x3d, 0x9e, 0x3b,
- 0x38, 0x29, 0xbc, 0x01, 0x2c, 0xe0, 0x4f, 0x66, 0xb9, 0xfd, 0x0b, 0x05, 0x98, 0xbc, 0xb1, 0xb9,
- 0x59, 0xbb, 0x4e, 0x62, 0x71, 0x8a, 0xf5, 0x57, 0x35, 0x63, 0x4d, 0x63, 0xd6, 0xeb, 0x52, 0xd4,
- 0x89, 0x5d, 0x6f, 0x81, 0x7b, 0xd4, 0x2e, 0x54, 0xfd, 0xf8, 0x76, 0x58, 0x8f, 0x43, 0xd7, 0x6f,
- 0x65, 0xea, 0xd8, 0xe4, 0x59, 0x5b, 0xcc, 0x3b, 0x6b, 0xd1, 0xcb, 0x30, 0xc2, 0x5c, 0x7a, 0xe5,
- 0xf5, 0xe4, 0x09, 0x75, 0xa7, 0x60, 0xa5, 0xc7, 0x87, 0xe5, 0xd2, 0x1d, 0x5c, 0xe5, 0x7f, 0xb0,
- 0x40, 0x45, 0x77, 0x60, 0x7c, 0x27, 0x8e, 0xdb, 0x37, 0x88, 0xd3, 0x24, 0xa1, 0xe4, 0x0e, 0x97,
- 0xb2, 0xb8, 0x03, 0x1d, 0x04, 0x8e, 0x96, 0x6c, 0xa8, 0xa4, 0x2c, 0xc2, 0x3a, 0x1d, 0xbb, 0x0e,
- 0x90, 0xc0, 0x4e, 0x49, 0xbf, 0x60, 0xff, 0x91, 0x05, 0xa3, 0xdc, 0xbb, 0x2a, 0x44, 0x9f, 0x87,
- 0x21, 0xf2, 0x80, 0x34, 0x84, 0xe4, 0x98, 0xd9, 0xe1, 0x44, 0xf0, 0xe0, 0xda, 0x72, 0xfa, 0x1f,
- 0xb3, 0x5a, 0xe8, 0x06, 0x8c, 0xd2, 0xde, 0x5e, 0x57, 0xae, 0x66, 0x4f, 0xe7, 0x7d, 0xb1, 0x9a,
- 0x76, 0x2e, 0xab, 0x88, 0x22, 0x2c, 0xab, 0x33, 0xcd, 0x6f, 0xa3, 0x5d, 0xa7, 0x0c, 0x2c, 0xee,
- 0x75, 0xce, 0x6e, 0xae, 0xd4, 0x38, 0x92, 0xa0, 0xc6, 0x35, 0xbf, 0xb2, 0x10, 0x27, 0x44, 0xec,
- 0x4d, 0x28, 0xd1, 0x49, 0x5d, 0xf2, 0x5c, 0xa7, 0xb7, 0xd2, 0xf9, 0x79, 0x28, 0x49, 0x05, 0x70,
- 0x24, 0x1c, 0x74, 0x18, 0x55, 0xa9, 0x1f, 0x8e, 0x70, 0x02, 0xb7, 0xb7, 0xe1, 0x3c, 0xb3, 0xa6,
- 0x70, 0xe2, 0x1d, 0x63, 0x8f, 0xf5, 0x5f, 0xcc, 0x2f, 0x88, 0x8b, 0x18, 0x9f, 0x99, 0x39, 0xcd,
- 0xa3, 0x60, 0x42, 0x52, 0x4c, 0x2e, 0x65, 0xf6, 0x9f, 0x0c, 0xc1, 0x13, 0xd5, 0x7a, 0xbe, 0xe3,
- 0xdd, 0xeb, 0x30, 0xc1, 0xc5, 0x34, 0xba, 0xb4, 0x1d, 0x4f, 0xb4, 0xab, 0xde, 0x1a, 0x37, 0x35,
- 0x18, 0x36, 0x30, 0xd1, 0x53, 0x50, 0x74, 0xdf, 0xf7, 0xd3, 0xc6, 0xb9, 0xd5, 0xb7, 0x37, 0x30,
- 0x2d, 0xa7, 0x60, 0x2a, 0xf1, 0x71, 0x56, 0xaa, 0xc0, 0x4a, 0xea, 0x7b, 0x13, 0xa6, 0xdc, 0xa8,
- 0x11, 0xb9, 0x55, 0x9f, 0xf2, 0x99, 0xc4, 0x69, 0x33, 0x51, 0x12, 0xd0, 0x4e, 0x2b, 0x28, 0x4e,
- 0x61, 0x6b, 0x7c, 0x7d, 0x78, 0x60, 0xa9, 0xb1, 0xaf, 0xc7, 0x0a, 0x15, 0x88, 0xdb, 0xec, 0xeb,
- 0x22, 0x66, 0x28, 0x28, 0x04, 0x62, 0xfe, 0xc1, 0x11, 0x96, 0x30, 0x7a, 0x03, 0x6b, 0xec, 0x38,
- 0xed, 0xa5, 0x4e, 0xbc, 0x53, 0x71, 0xa3, 0x46, 0xb0, 0x4f, 0xc2, 0x03, 0x76, 0x79, 0x1e, 0x4b,
- 0x6e, 0x60, 0x0a, 0xb0, 0x72, 0x63, 0xa9, 0x46, 0x31, 0x71, 0x77, 0x1d, 0x53, 0x2a, 0x84, 0xd3,
- 0x90, 0x0a, 0x97, 0x60, 0x5a, 0x36, 0x53, 0x27, 0x11, 0x3b, 0x23, 0xc6, 0x59, 0xc7, 0x94, 0x3b,
- 0xb5, 0x28, 0x56, 0xdd, 0x4a, 0xe3, 0xa3, 0xd7, 0x60, 0xd2, 0xf5, 0xdd, 0xd8, 0x75, 0xe2, 0x20,
- 0x64, 0x27, 0x2c, 0xbf, 0x27, 0xb3, 0x47, 0xd1, 0xaa, 0x0e, 0xc0, 0x26, 0x9e, 0xfd, 0xc7, 0x43,
- 0x30, 0xcb, 0xa6, 0xed, 0x5b, 0x2b, 0xec, 0x63, 0xb3, 0xc2, 0xee, 0x74, 0xaf, 0xb0, 0xd3, 0x10,
- 0x77, 0x3f, 0xca, 0x65, 0xf6, 0x1e, 0x94, 0x94, 0x7d, 0xb5, 0x74, 0x11, 0xb0, 0x72, 0x5c, 0x04,
- 0xfa, 0x4b, 0x1f, 0xf2, 0x19, 0xb7, 0x98, 0xf9, 0x8c, 0xfb, 0xf7, 0x2d, 0x48, 0xcc, 0x4c, 0xd1,
- 0x0d, 0x28, 0xb5, 0x03, 0x66, 0xca, 0x11, 0x4a, 0xfb, 0xa8, 0x27, 0x32, 0x0f, 0x2a, 0x7e, 0x28,
- 0xf2, 0xf1, 0xab, 0xc9, 0x1a, 0x38, 0xa9, 0x8c, 0x96, 0x61, 0xb4, 0x1d, 0x92, 0x7a, 0xcc, 0x5c,
- 0x39, 0xfb, 0xd2, 0xe1, 0x6b, 0x84, 0xe3, 0x63, 0x59, 0xd1, 0xfe, 0x45, 0x0b, 0x80, 0xbf, 0x94,
- 0x3a, 0x7e, 0x8b, 0x9c, 0x81, 0xf6, 0xb7, 0x02, 0x43, 0x51, 0x9b, 0x34, 0x7a, 0x19, 0xd9, 0x24,
- 0xfd, 0xa9, 0xb7, 0x49, 0x23, 0x19, 0x70, 0xfa, 0x0f, 0xb3, 0xda, 0xf6, 0xf7, 0x03, 0x4c, 0x25,
- 0x68, 0xd5, 0x98, 0xec, 0xa1, 0x17, 0x0d, 0x47, 0xb9, 0x8b, 0x29, 0x47, 0xb9, 0x12, 0xc3, 0xd6,
- 0x14, 0x8d, 0xef, 0x41, 0x71, 0xcf, 0x79, 0x20, 0x34, 0x49, 0xcf, 0xf7, 0xee, 0x06, 0xa5, 0xbf,
- 0xb0, 0xee, 0x3c, 0xe0, 0x77, 0xa6, 0xe7, 0xe5, 0x02, 0x59, 0x77, 0x1e, 0x1c, 0x73, 0x53, 0x1a,
- 0xc6, 0xa4, 0x6e, 0xb9, 0x51, 0xfc, 0xd5, 0xff, 0x92, 0xfc, 0x67, 0xcb, 0x8e, 0x36, 0xc2, 0xda,
- 0x72, 0x7d, 0xf1, 0x6e, 0x38, 0x50, 0x5b, 0xae, 0x9f, 0x6e, 0xcb, 0xf5, 0x07, 0x68, 0xcb, 0xf5,
- 0xd1, 0x07, 0x30, 0x2a, 0xde, 0xe8, 0x99, 0xfd, 0xbc, 0xa9, 0xa5, 0xca, 0x6b, 0x4f, 0x3c, 0xf1,
- 0xf3, 0x36, 0x17, 0xe5, 0x9d, 0x50, 0x94, 0xf6, 0x6d, 0x57, 0x36, 0x88, 0xfe, 0xae, 0x05, 0x53,
- 0xe2, 0x37, 0x26, 0xef, 0x77, 0x48, 0x14, 0x0b, 0xd9, 0xf3, 0xb3, 0x83, 0xf7, 0x41, 0x54, 0xe4,
- 0x5d, 0xf9, 0xac, 0x64, 0xb3, 0x26, 0xb0, 0x6f, 0x8f, 0x52, 0xbd, 0x40, 0xff, 0xdc, 0x82, 0xf3,
- 0x7b, 0xce, 0x03, 0xde, 0x22, 0x2f, 0xc3, 0x4e, 0xec, 0x06, 0xc2, 0x1f, 0xe0, 0xf3, 0x83, 0x4d,
- 0x7f, 0x57, 0x75, 0xde, 0x49, 0x69, 0x3a, 0x7c, 0x3e, 0x0b, 0xa5, 0x6f, 0x57, 0x33, 0xfb, 0x35,
- 0xbf, 0x0d, 0x63, 0x72, 0xbd, 0x65, 0xdc, 0xbc, 0x2b, 0xba, 0x60, 0x7d, 0x62, 0x13, 0x09, 0xdd,
- 0x5b, 0x8d, 0xb6, 0x23, 0xd6, 0xda, 0x23, 0x6d, 0xe7, 0x3d, 0x98, 0xd0, 0xd7, 0xd8, 0x23, 0x6d,
- 0xeb, 0x7d, 0x38, 0x97, 0xb1, 0x96, 0x1e, 0x69, 0x93, 0xf7, 0xe1, 0x62, 0xee, 0xfa, 0x78, 0x94,
- 0x0d, 0xdb, 0xbf, 0x60, 0xe9, 0x7c, 0xf0, 0x0c, 0x54, 0xf0, 0x2b, 0xa6, 0x0a, 0xfe, 0x52, 0xef,
- 0x9d, 0x93, 0xa3, 0x87, 0x7f, 0x57, 0xef, 0x34, 0xe5, 0xea, 0xe8, 0x2d, 0x18, 0xf1, 0x68, 0x89,
- 0x34, 0x0e, 0xb1, 0xfb, 0xef, 0xc8, 0x44, 0x96, 0x62, 0xe5, 0x11, 0x16, 0x14, 0xec, 0x5f, 0xb1,
- 0x60, 0xe8, 0x0c, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x62, 0x2e, 0x69, 0x11, 0x9a, 0x6b, 0x01, 0x3b,
- 0xf7, 0x57, 0x65, 0xf8, 0xb1, 0x9c, 0x81, 0xf9, 0xbf, 0x05, 0x18, 0xa7, 0x4d, 0x49, 0x2b, 0xc6,
- 0x37, 0x60, 0xd2, 0x73, 0xb6, 0x88, 0x27, 0xdf, 0x71, 0xd3, 0x0a, 0x93, 0x5b, 0x3a, 0x10, 0x9b,
- 0xb8, 0xb4, 0xf2, 0xb6, 0xfe, 0xa4, 0x2d, 0xe4, 0x17, 0x55, 0xd9, 0x78, 0xef, 0xc6, 0x26, 0x2e,
- 0xbd, 0xbb, 0xdf, 0x77, 0xe2, 0xc6, 0x8e, 0x50, 0xa6, 0xa8, 0xee, 0xde, 0xa3, 0x85, 0x98, 0xc3,
- 0xa8, 0x00, 0x27, 0x57, 0xe7, 0x5d, 0x7a, 0x33, 0x0c, 0x7c, 0x21, 0x1e, 0x2b, 0x01, 0x0e, 0x9b,
- 0x60, 0x9c, 0xc6, 0xcf, 0xf0, 0x98, 0x1f, 0x66, 0x36, 0x9a, 0x03, 0x78, 0xcc, 0xa3, 0x1a, 0x9c,
- 0x77, 0xfd, 0x86, 0xd7, 0x69, 0x92, 0x3b, 0x3e, 0x97, 0xee, 0x3c, 0xf7, 0x03, 0xd2, 0x14, 0x02,
- 0xb4, 0x32, 0xa7, 0xad, 0x66, 0xe0, 0xe0, 0xcc, 0x9a, 0xf6, 0x5f, 0x83, 0x73, 0xb7, 0x02, 0xa7,
- 0xb9, 0xec, 0x78, 0x8e, 0xdf, 0x20, 0x61, 0xd5, 0x6f, 0xf5, 0xb5, 0x12, 0xd3, 0x6d, 0xba, 0x0a,
- 0xfd, 0x6c, 0xba, 0xec, 0x1d, 0x40, 0x7a, 0x03, 0xc2, 0x36, 0x19, 0xc3, 0xa8, 0xcb, 0x9b, 0x12,
- 0xcb, 0xff, 0xd3, 0xd9, 0xd2, 0x75, 0x57, 0xcf, 0x34, 0xab, 0x5b, 0x5e, 0x80, 0x25, 0x21, 0xfb,
- 0x75, 0xc8, 0xf4, 0x47, 0xec, 0xaf, 0xb6, 0xb1, 0x5f, 0x85, 0x59, 0x56, 0xf3, 0x64, 0x2a, 0x05,
- 0xfb, 0x07, 0x2d, 0x98, 0xde, 0x48, 0xc5, 0xb8, 0xb8, 0xc2, 0xde, 0x5a, 0x33, 0xf4, 0xee, 0x75,
- 0x56, 0x8a, 0x05, 0xf4, 0xd4, 0xf5, 0x7b, 0x7f, 0x61, 0x41, 0x49, 0x45, 0x67, 0x3a, 0x03, 0xa1,
- 0x76, 0xc5, 0x10, 0x6a, 0x33, 0xf5, 0x4e, 0xaa, 0x3b, 0x79, 0x32, 0x2d, 0xba, 0xa9, 0xa2, 0x35,
- 0xf4, 0x50, 0x39, 0x25, 0x64, 0xb8, 0x6f, 0xff, 0x94, 0x19, 0xd2, 0x41, 0xc6, 0x6f, 0x60, 0x66,
- 0x5a, 0x0a, 0xf7, 0x63, 0x62, 0xa6, 0xa5, 0xfa, 0x93, 0xc3, 0xfd, 0x6a, 0x5a, 0x97, 0xd9, 0xa9,
- 0xf0, 0xed, 0xcc, 0x95, 0x81, 0xed, 0x4d, 0x15, 0x24, 0xa5, 0x2c, 0x5c, 0x13, 0x44, 0xe9, 0x31,
- 0x63, 0x64, 0xe2, 0x1f, 0x8f, 0xa4, 0x95, 0x54, 0xb1, 0x6f, 0xc0, 0x74, 0x6a, 0xc0, 0xd0, 0xab,
- 0x30, 0xdc, 0xde, 0x71, 0x22, 0x92, 0x32, 0x4d, 0x1d, 0xae, 0xd1, 0xc2, 0xe3, 0xc3, 0xf2, 0x94,
- 0xaa, 0xc0, 0x4a, 0x30, 0xc7, 0xb6, 0xff, 0xa7, 0x05, 0x43, 0x1b, 0x41, 0xf3, 0x2c, 0x16, 0xd3,
- 0x9b, 0xc6, 0x62, 0x7a, 0x32, 0x2f, 0x0e, 0x61, 0xee, 0x3a, 0x5a, 0x4b, 0xad, 0xa3, 0x4b, 0xb9,
- 0x14, 0x7a, 0x2f, 0xa1, 0x3d, 0x18, 0x67, 0xd1, 0x0d, 0x85, 0xa9, 0xec, 0xcb, 0xc6, 0xfd, 0xaa,
- 0x9c, 0xba, 0x5f, 0x4d, 0x6b, 0xa8, 0xda, 0x2d, 0xeb, 0x59, 0x18, 0x15, 0xe6, 0x9a, 0x69, 0xa7,
- 0x0d, 0x81, 0x8b, 0x25, 0xdc, 0xfe, 0xa9, 0x22, 0x18, 0xd1, 0x14, 0xd1, 0xaf, 0x59, 0xb0, 0x10,
- 0x72, 0x2f, 0xd8, 0x66, 0xa5, 0x13, 0xba, 0x7e, 0xab, 0xde, 0xd8, 0x21, 0xcd, 0x8e, 0xe7, 0xfa,
- 0xad, 0x6a, 0xcb, 0x0f, 0x54, 0xf1, 0xea, 0x03, 0xd2, 0xe8, 0xb0, 0x37, 0x97, 0x3e, 0xa1, 0x1b,
- 0x95, 0x39, 0xd4, 0xb5, 0xa3, 0xc3, 0xf2, 0x02, 0x3e, 0x11, 0x6d, 0x7c, 0xc2, 0xbe, 0xa0, 0xdf,
- 0xb6, 0x60, 0x91, 0x07, 0x19, 0x1c, 0xbc, 0xff, 0x3d, 0x6e, 0xa3, 0x35, 0x49, 0x2a, 0x21, 0xb2,
- 0x49, 0xc2, 0xbd, 0xe5, 0xd7, 0xc4, 0x80, 0x2e, 0xd6, 0x4e, 0xd6, 0x16, 0x3e, 0x69, 0xe7, 0xec,
- 0x7f, 0x57, 0x84, 0x49, 0x3a, 0x8a, 0x49, 0x6c, 0x9a, 0x57, 0x8d, 0x25, 0xf1, 0x74, 0x6a, 0x49,
- 0xcc, 0x1a, 0xc8, 0xa7, 0x13, 0x96, 0x26, 0x82, 0x59, 0xcf, 0x89, 0xe2, 0x1b, 0xc4, 0x09, 0xe3,
- 0x2d, 0xe2, 0x70, 0x33, 0xa1, 0xe2, 0x89, 0x4d, 0x9a, 0x94, 0xfa, 0xeb, 0x56, 0x9a, 0x18, 0xee,
- 0xa6, 0x8f, 0xf6, 0x01, 0x31, 0x5b, 0xa7, 0xd0, 0xf1, 0x23, 0xfe, 0x2d, 0xae, 0x78, 0x8f, 0x39,
- 0x59, 0xab, 0xf3, 0xa2, 0x55, 0x74, 0xab, 0x8b, 0x1a, 0xce, 0x68, 0x41, 0xb3, 0x61, 0x1b, 0x1e,
- 0xd4, 0x86, 0x6d, 0xa4, 0x8f, 0x67, 0xd4, 0x1e, 0xcc, 0x88, 0x59, 0xd9, 0x76, 0x5b, 0xe2, 0x90,
- 0xfe, 0x52, 0xca, 0xc6, 0xd5, 0x1a, 0xdc, 0x50, 0xa9, 0x8f, 0x81, 0xab, 0xfd, 0xdd, 0x70, 0x8e,
- 0x36, 0x67, 0xfa, 0xf1, 0x44, 0x88, 0xc0, 0xf4, 0x6e, 0x67, 0x8b, 0x78, 0x24, 0x96, 0x65, 0xa2,
- 0xd1, 0x4c, 0xb1, 0xdf, 0xac, 0x9d, 0xc8, 0x96, 0x37, 0x4d, 0x12, 0x38, 0x4d, 0xd3, 0xfe, 0x19,
- 0x0b, 0x98, 0xb5, 0xfc, 0x19, 0x1c, 0x7f, 0x5f, 0x30, 0x8f, 0xbf, 0xb9, 0x3c, 0x0e, 0x94, 0x73,
- 0xf2, 0xbd, 0xc2, 0xa7, 0xa5, 0x16, 0x06, 0x0f, 0x0e, 0xa4, 0xec, 0xdf, 0x5f, 0xe2, 0xfa, 0x3f,
- 0x16, 0xdf, 0x90, 0x2a, 0x28, 0x00, 0xfa, 0x1e, 0x18, 0x6b, 0x38, 0x6d, 0xa7, 0xc1, 0xc3, 0xd8,
- 0xe6, 0x6a, 0x7f, 0x8c, 0x4a, 0x0b, 0x2b, 0xa2, 0x06, 0xd7, 0x66, 0x7c, 0x46, 0x7e, 0xa5, 0x2c,
- 0xee, 0xab, 0xc1, 0x50, 0x4d, 0xce, 0xef, 0xc2, 0xa4, 0x41, 0xec, 0x91, 0x5e, 0x7d, 0xbf, 0x87,
- 0x1f, 0x17, 0xea, 0xc6, 0xb2, 0x07, 0xb3, 0xbe, 0xf6, 0x9f, 0x32, 0x47, 0x29, 0x4e, 0x7f, 0xb2,
- 0xdf, 0x81, 0xc0, 0x38, 0xa9, 0xe6, 0x0d, 0x90, 0x22, 0x83, 0xbb, 0x29, 0xdb, 0xff, 0xd0, 0x82,
- 0xc7, 0x75, 0x44, 0x2d, 0x5e, 0x43, 0x3f, 0x7d, 0x72, 0x05, 0xc6, 0x82, 0x36, 0x09, 0x9d, 0xe4,
- 0x4e, 0x76, 0x55, 0x0e, 0xfa, 0x6d, 0x51, 0x7e, 0x2c, 0xe2, 0x09, 0x4a, 0xea, 0xb2, 0x1c, 0xab,
- 0x9a, 0xc8, 0x86, 0x11, 0x36, 0x18, 0x91, 0x88, 0xa5, 0xc1, 0xcc, 0x14, 0xd9, 0xd3, 0x6a, 0x84,
- 0x05, 0xc4, 0xfe, 0x7e, 0x8b, 0x2f, 0x2c, 0xbd, 0xeb, 0xe8, 0x7d, 0x98, 0xd9, 0xa3, 0xd7, 0xb7,
- 0xd5, 0x07, 0xed, 0x90, 0xab, 0xd1, 0xe5, 0x38, 0x3d, 0xdf, 0x6f, 0x9c, 0xb4, 0x8f, 0x4c, 0x8c,
- 0xd9, 0xd6, 0x53, 0xc4, 0x70, 0x17, 0x79, 0xfb, 0xcf, 0x0a, 0x7c, 0x27, 0x32, 0xa9, 0xee, 0x59,
- 0x18, 0x6d, 0x07, 0xcd, 0x95, 0x6a, 0x05, 0x8b, 0x11, 0x52, 0xec, 0xaa, 0xc6, 0x8b, 0xb1, 0x84,
- 0xa3, 0x6b, 0x00, 0xe4, 0x41, 0x4c, 0x42, 0xdf, 0xf1, 0x94, 0xe1, 0x87, 0x12, 0x9e, 0x56, 0x15,
- 0x04, 0x6b, 0x58, 0xb4, 0x4e, 0x3b, 0x0c, 0xf6, 0xdd, 0x26, 0xf3, 0x36, 0x2c, 0x9a, 0x75, 0x6a,
- 0x0a, 0x82, 0x35, 0x2c, 0x7a, 0x55, 0xee, 0xf8, 0x11, 0x3f, 0x00, 0x9d, 0x2d, 0x11, 0x20, 0x6f,
- 0x2c, 0xb9, 0x2a, 0xdf, 0xd1, 0x81, 0xd8, 0xc4, 0x45, 0x4b, 0x30, 0x12, 0x3b, 0xcc, 0x9c, 0x61,
- 0x38, 0xdf, 0x2c, 0x71, 0x93, 0x62, 0xe8, 0x71, 0x4d, 0x69, 0x05, 0x2c, 0x2a, 0xa2, 0x77, 0x24,
- 0x0b, 0xe6, 0x2c, 0x59, 0xd8, 0x03, 0xe7, 0x2e, 0x5b, 0x9d, 0x7d, 0xeb, 0x3c, 0x58, 0xd8, 0x19,
- 0x1b, 0xb4, 0xec, 0xef, 0x2b, 0x01, 0x24, 0xd2, 0x1e, 0xfa, 0xa0, 0x8b, 0x45, 0xbc, 0xd0, 0x5b,
- 0x3e, 0x3c, 0x3d, 0xfe, 0x80, 0x7e, 0xc0, 0x82, 0x71, 0xc7, 0xf3, 0x82, 0x86, 0x13, 0xb3, 0x51,
- 0x2e, 0xf4, 0x66, 0x51, 0xa2, 0xfd, 0xa5, 0xa4, 0x06, 0xef, 0xc2, 0xcb, 0xd2, 0x52, 0x41, 0x83,
- 0xf4, 0xed, 0x85, 0xde, 0x30, 0xfa, 0x8c, 0xbc, 0x04, 0xf0, 0xe5, 0x31, 0x9f, 0xbe, 0x04, 0x94,
- 0x18, 0x37, 0xd6, 0xe4, 0x7f, 0x74, 0xc7, 0x88, 0x24, 0x37, 0x94, 0x1f, 0x92, 0xc2, 0x10, 0x7a,
- 0xfa, 0x05, 0x91, 0x43, 0x35, 0xdd, 0x2f, 0x6a, 0x38, 0x3f, 0x6e, 0x8b, 0x26, 0x5d, 0xf7, 0xf1,
- 0x89, 0x7a, 0x0f, 0xa6, 0x9b, 0xe6, 0x71, 0x2b, 0x56, 0xd3, 0xa7, 0xf3, 0xe8, 0xa6, 0x4e, 0xe7,
- 0xe4, 0x80, 0x4d, 0x01, 0x70, 0x9a, 0x30, 0xaa, 0x71, 0x0f, 0xb5, 0xaa, 0xbf, 0x1d, 0x08, 0xbb,
- 0x72, 0x3b, 0x77, 0x2e, 0x0f, 0xa2, 0x98, 0xec, 0x51, 0xcc, 0xe4, 0x1c, 0xdd, 0x10, 0x75, 0xb1,
- 0xa2, 0x82, 0xde, 0x82, 0x11, 0xe6, 0x36, 0x1c, 0xcd, 0x8d, 0xe5, 0xeb, 0x01, 0xcd, 0x88, 0x17,
- 0xc9, 0xa6, 0x62, 0x7f, 0x23, 0x2c, 0x28, 0xa0, 0x1b, 0x32, 0x2c, 0x4e, 0x54, 0xf5, 0xef, 0x44,
- 0x84, 0x85, 0xc5, 0x29, 0x2d, 0x7f, 0x32, 0x89, 0x78, 0xc3, 0xcb, 0x33, 0x23, 0x98, 0x1b, 0x35,
- 0xa9, 0xbc, 0x22, 0xfe, 0xcb, 0xc0, 0xe8, 0x73, 0x90, 0xdf, 0x3d, 0x33, 0x78, 0x7a, 0x32, 0x9c,
- 0x77, 0x4d, 0x12, 0x38, 0x4d, 0xf3, 0x4c, 0x8f, 0xcf, 0x79, 0x1f, 0x66, 0xd2, 0x1b, 0xeb, 0x91,
- 0x1e, 0xd7, 0x7f, 0x34, 0x04, 0x53, 0xe6, 0x42, 0x40, 0x8b, 0x50, 0x12, 0x44, 0x54, 0x10, 0x4f,
- 0xb5, 0xb6, 0xd7, 0x25, 0x00, 0x27, 0x38, 0x2c, 0x88, 0x29, 0xab, 0xae, 0xd9, 0x01, 0x26, 0x41,
- 0x4c, 0x15, 0x04, 0x6b, 0x58, 0x54, 0x88, 0xde, 0x0a, 0x82, 0x58, 0x1d, 0x05, 0x6a, 0xb5, 0x2c,
- 0xb3, 0x52, 0x2c, 0xa0, 0xf4, 0x08, 0xd8, 0x25, 0xa1, 0x4f, 0x3c, 0x53, 0x93, 0xa9, 0x8e, 0x80,
- 0x9b, 0x3a, 0x10, 0x9b, 0xb8, 0xf4, 0x48, 0x0b, 0x22, 0xb6, 0xfc, 0x84, 0xa8, 0x9e, 0xd8, 0x55,
- 0xd6, 0xb9, 0xdb, 0xbc, 0x84, 0xa3, 0x2f, 0xc1, 0xe3, 0xca, 0xcb, 0x1d, 0x73, 0xcd, 0xb0, 0x6c,
- 0x71, 0xc4, 0xb8, 0x59, 0x3f, 0xbe, 0x92, 0x8d, 0x86, 0xf3, 0xea, 0xa3, 0x37, 0x61, 0x4a, 0x88,
- 0xc0, 0x92, 0xe2, 0xa8, 0x69, 0xac, 0x70, 0xd3, 0x80, 0xe2, 0x14, 0x36, 0xaa, 0xc0, 0x0c, 0x2d,
- 0x61, 0x52, 0xa8, 0xa4, 0xc0, 0xbd, 0xf5, 0xd5, 0x59, 0x7f, 0x33, 0x05, 0xc7, 0x5d, 0x35, 0xd0,
- 0x12, 0x4c, 0x73, 0x19, 0x85, 0xde, 0x29, 0xd9, 0x3c, 0x08, 0x77, 0x0f, 0xb5, 0x11, 0x6e, 0x9b,
- 0x60, 0x9c, 0xc6, 0x47, 0xaf, 0xc3, 0x84, 0x13, 0x36, 0x76, 0xdc, 0x98, 0x34, 0xe2, 0x4e, 0xc8,
- 0xfd, 0x40, 0x34, 0x6b, 0x8f, 0x25, 0x0d, 0x86, 0x0d, 0x4c, 0xfb, 0x03, 0x38, 0x97, 0xe1, 0x29,
- 0x46, 0x17, 0x8e, 0xd3, 0x76, 0xe5, 0x37, 0xa5, 0x2c, 0x24, 0x97, 0x6a, 0x55, 0xf9, 0x35, 0x1a,
- 0x16, 0x5d, 0x9d, 0x4c, 0x25, 0xae, 0x65, 0x2f, 0x50, 0xab, 0x73, 0x4d, 0x02, 0x70, 0x82, 0x63,
- 0xff, 0x16, 0x80, 0xa6, 0xd0, 0x19, 0xc0, 0x3e, 0xee, 0x75, 0x98, 0x90, 0x29, 0x37, 0xb4, 0x50,
- 0xef, 0xea, 0x33, 0xaf, 0x6b, 0x30, 0x6c, 0x60, 0xd2, 0xbe, 0xf9, 0x2a, 0x50, 0x7d, 0xca, 0x1e,
- 0x33, 0x09, 0x53, 0x9f, 0xe0, 0xa0, 0x17, 0x60, 0x2c, 0x22, 0xde, 0xf6, 0x2d, 0xd7, 0xdf, 0x15,
- 0x0b, 0x5b, 0x71, 0xe1, 0xba, 0x28, 0xc7, 0x0a, 0x03, 0x2d, 0x43, 0xb1, 0xe3, 0x36, 0xc5, 0x52,
- 0x96, 0x07, 0x7e, 0xf1, 0x4e, 0xb5, 0x72, 0x7c, 0x58, 0x7e, 0x3a, 0x2f, 0x93, 0x08, 0xbd, 0xda,
- 0x47, 0x0b, 0x74, 0xfb, 0xd1, 0xca, 0x59, 0x6f, 0x03, 0x23, 0x27, 0x7c, 0x1b, 0xb8, 0x06, 0x20,
- 0xbe, 0x5a, 0xae, 0xe5, 0x62, 0x32, 0x6b, 0xd7, 0x15, 0x04, 0x6b, 0x58, 0x28, 0x82, 0xd9, 0x46,
- 0x48, 0x1c, 0x79, 0x87, 0xe6, 0x3e, 0x4f, 0x63, 0x0f, 0xaf, 0x20, 0x58, 0x49, 0x13, 0xc3, 0xdd,
- 0xf4, 0x51, 0x00, 0xb3, 0x4d, 0x11, 0x54, 0x21, 0x69, 0xb4, 0x74, 0x72, 0x47, 0x2b, 0x66, 0x90,
- 0x93, 0x26, 0x84, 0xbb, 0x69, 0xa3, 0x2f, 0xc3, 0xbc, 0x2c, 0xec, 0x8e, 0x63, 0xc1, 0xb6, 0x4b,
- 0x91, 0xc7, 0x5f, 0xaf, 0xe4, 0x62, 0xe1, 0x1e, 0x14, 0x10, 0x86, 0x11, 0xf6, 0x96, 0x14, 0xcd,
- 0x8d, 0xb3, 0x73, 0xee, 0xb9, 0x7c, 0x65, 0x00, 0x5d, 0xeb, 0x0b, 0xec, 0x1d, 0x4a, 0x98, 0x94,
- 0x27, 0xcf, 0x72, 0xac, 0x10, 0x0b, 0x4a, 0x68, 0x1b, 0xc6, 0x1d, 0xdf, 0x0f, 0x62, 0x87, 0x8b,
- 0x50, 0x13, 0xf9, 0xb2, 0x9f, 0x46, 0x78, 0x29, 0xa9, 0xc1, 0xa9, 0x2b, 0x2b, 0x55, 0x0d, 0x82,
- 0x75, 0xc2, 0xe8, 0x3e, 0x4c, 0x07, 0xf7, 0x29, 0x73, 0x94, 0x5a, 0x8a, 0x68, 0x6e, 0x92, 0xb5,
- 0xf5, 0xca, 0x80, 0x7a, 0x5a, 0xa3, 0xb2, 0xc6, 0xb5, 0x4c, 0xa2, 0x38, 0xdd, 0x0a, 0x5a, 0x30,
- 0xb4, 0xd5, 0x53, 0x89, 0x3b, 0x4b, 0xa2, 0xad, 0xd6, 0x95, 0xd3, 0x2c, 0x2e, 0x0a, 0x37, 0x91,
- 0x66, 0xbb, 0x7f, 0x3a, 0x15, 0x17, 0x25, 0x01, 0x61, 0x1d, 0x0f, 0xed, 0xc0, 0x44, 0xf2, 0x64,
- 0x15, 0x46, 0x2c, 0x2a, 0xdb, 0xf8, 0xb5, 0x6b, 0x83, 0x7d, 0x5c, 0x55, 0xab, 0xc9, 0x6f, 0x0e,
- 0x7a, 0x09, 0x36, 0x28, 0xcf, 0x7f, 0x1b, 0x8c, 0x6b, 0x13, 0x7b, 0xa2, 0x28, 0xb8, 0x6f, 0xc2,
- 0x4c, 0x7a, 0xea, 0x4e, 0xe4, 0x41, 0xf0, 0xbf, 0x0a, 0x30, 0x9d, 0xf1, 0x72, 0xc5, 0xb2, 0x91,
- 0xa4, 0x18, 0x6a, 0x92, 0x7c, 0xc4, 0x64, 0x8b, 0x85, 0x01, 0xd8, 0xa2, 0xe4, 0xd1, 0xc5, 0x5c,
- 0x1e, 0x2d, 0x58, 0xe1, 0xd0, 0x87, 0x61, 0x85, 0xe6, 0xe9, 0x33, 0x3c, 0xd0, 0xe9, 0x73, 0x0a,
- 0xec, 0xd3, 0x38, 0xc0, 0x46, 0x07, 0x38, 0xc0, 0x7e, 0xb4, 0x00, 0x33, 0x69, 0x0b, 0xdf, 0x33,
- 0x78, 0xef, 0x78, 0xcb, 0x78, 0xef, 0xc8, 0xce, 0xed, 0x93, 0xb6, 0x3b, 0xce, 0x7b, 0xfb, 0xc0,
- 0xa9, 0xb7, 0x8f, 0xe7, 0x06, 0xa2, 0xd6, 0xfb, 0x1d, 0xe4, 0x1f, 0x15, 0xe0, 0x42, 0xba, 0xca,
- 0x8a, 0xe7, 0xb8, 0x7b, 0x67, 0x30, 0x36, 0xb7, 0x8d, 0xb1, 0x79, 0x71, 0x90, 0xaf, 0x61, 0x5d,
- 0xcb, 0x1d, 0xa0, 0x7b, 0xa9, 0x01, 0x5a, 0x1c, 0x9c, 0x64, 0xef, 0x51, 0xfa, 0x66, 0x11, 0x2e,
- 0x65, 0xd6, 0x4b, 0x9e, 0x0b, 0xd6, 0x8c, 0xe7, 0x82, 0x6b, 0xa9, 0xe7, 0x02, 0xbb, 0x77, 0xed,
- 0xd3, 0x79, 0x3f, 0x10, 0xee, 0xd0, 0x2c, 0x62, 0xe8, 0x43, 0xbe, 0x1d, 0x18, 0xee, 0xd0, 0x8a,
- 0x10, 0x36, 0xe9, 0xfe, 0x55, 0x7a, 0x33, 0xf8, 0x2d, 0x0b, 0x2e, 0x66, 0xce, 0xcd, 0x19, 0xe8,
- 0xd5, 0x37, 0x4c, 0xbd, 0xfa, 0xb3, 0x03, 0xaf, 0xd6, 0x1c, 0x45, 0xfb, 0x1f, 0x17, 0x73, 0xbe,
- 0x85, 0x69, 0x26, 0x6f, 0xc3, 0xb8, 0xd3, 0x68, 0x90, 0x28, 0x5a, 0x0f, 0x9a, 0x2a, 0x82, 0xe6,
- 0x8b, 0x4c, 0xda, 0x48, 0x8a, 0x8f, 0x0f, 0xcb, 0xf3, 0x69, 0x12, 0x09, 0x18, 0xeb, 0x14, 0xcc,
- 0xa0, 0xbf, 0x85, 0x53, 0x0d, 0xfa, 0x7b, 0x0d, 0x60, 0x5f, 0xe9, 0x2b, 0xd2, 0x6a, 0x4e, 0x4d,
- 0x93, 0xa1, 0x61, 0xa1, 0xef, 0x62, 0xb7, 0x00, 0x6e, 0x0c, 0xc4, 0x97, 0xe2, 0xcb, 0x03, 0xce,
- 0x95, 0x6e, 0x58, 0xc4, 0xe3, 0x6e, 0x28, 0x95, 0xb0, 0x22, 0x89, 0xbe, 0x03, 0x66, 0x22, 0x1e,
- 0xd6, 0x69, 0xc5, 0x73, 0x22, 0xe6, 0xc4, 0x25, 0x56, 0x21, 0x0b, 0xa6, 0x51, 0x4f, 0xc1, 0x70,
- 0x17, 0x36, 0x5a, 0x93, 0x1f, 0xc5, 0x62, 0x50, 0xf1, 0x85, 0x79, 0x25, 0xf9, 0x20, 0x91, 0x0b,
- 0xed, 0x7c, 0x7a, 0xf8, 0xd9, 0xc0, 0x6b, 0x35, 0xed, 0x1f, 0x1d, 0x82, 0x27, 0x7a, 0x30, 0x31,
- 0xb4, 0x64, 0x1a, 0x01, 0x3c, 0x9f, 0xd6, 0xff, 0xcd, 0x67, 0x56, 0x36, 0x14, 0x82, 0xa9, 0xb5,
- 0x52, 0xf8, 0xd0, 0x6b, 0xe5, 0x87, 0x2c, 0x4d, 0x33, 0xcb, 0x4d, 0x85, 0xbf, 0x70, 0x42, 0xe6,
- 0x7c, 0x8a, 0xaa, 0xda, 0xed, 0x0c, 0x7d, 0xe7, 0xb5, 0x81, 0xbb, 0x33, 0xb0, 0x02, 0xf4, 0x6c,
- 0x9f, 0x8c, 0xbe, 0x6a, 0xc1, 0xd3, 0x99, 0xfd, 0x35, 0x8c, 0x96, 0x16, 0xa1, 0xd4, 0xa0, 0x85,
- 0x9a, 0x63, 0x68, 0xe2, 0x31, 0x2f, 0x01, 0x38, 0xc1, 0x31, 0x6c, 0x93, 0x0a, 0x7d, 0x6d, 0x93,
- 0xfe, 0xad, 0x05, 0x5d, 0x0b, 0xf8, 0x0c, 0x38, 0x69, 0xd5, 0xe4, 0xa4, 0x9f, 0x1c, 0x64, 0x2e,
- 0x73, 0x98, 0xe8, 0xef, 0x4f, 0xc3, 0x63, 0x39, 0x9e, 0x60, 0xfb, 0x30, 0xdb, 0x6a, 0x10, 0xd3,
- 0xe5, 0x56, 0x7c, 0x4c, 0xa6, 0x77, 0x72, 0x4f, 0xff, 0x5c, 0x7e, 0x21, 0xee, 0x42, 0xc1, 0xdd,
- 0x4d, 0xa0, 0xaf, 0x5a, 0x70, 0xde, 0xb9, 0x1f, 0x75, 0xa5, 0x2a, 0x15, 0x6b, 0xe6, 0x95, 0x4c,
- 0x3d, 0x6d, 0x9f, 0xd4, 0xa6, 0x3c, 0xab, 0x57, 0x16, 0x16, 0xce, 0x6c, 0x0b, 0x61, 0x11, 0xf4,
- 0x98, 0xca, 0xdb, 0x3d, 0x9c, 0xc2, 0xb3, 0x5c, 0xf6, 0x38, 0x4f, 0x95, 0x10, 0xac, 0xe8, 0xa0,
- 0xbb, 0x50, 0x6a, 0x49, 0x3f, 0x5a, 0xc1, 0xb3, 0x33, 0x0f, 0xc1, 0x4c, 0x67, 0x5b, 0xee, 0x3b,
- 0xa2, 0x40, 0x38, 0x21, 0x85, 0xde, 0x84, 0xa2, 0xbf, 0x1d, 0xf5, 0x4a, 0x87, 0x95, 0xb2, 0xe5,
- 0xe3, 0x01, 0x17, 0x36, 0xd6, 0xea, 0x98, 0x56, 0x44, 0x37, 0xa0, 0x18, 0x6e, 0x35, 0xc5, 0xd3,
- 0x42, 0xa6, 0x5c, 0x8a, 0x97, 0x2b, 0xd9, 0x8b, 0x84, 0x53, 0xc2, 0xcb, 0x15, 0x4c, 0x49, 0xa0,
- 0x1a, 0x0c, 0x33, 0xa7, 0x29, 0xf1, 0x82, 0x90, 0x29, 0x90, 0xf6, 0x70, 0x3e, 0xe4, 0x51, 0x19,
- 0x18, 0x02, 0xe6, 0x84, 0xd0, 0x5b, 0x30, 0xd2, 0x60, 0x19, 0xa3, 0x84, 0xe2, 0x27, 0x3b, 0x5c,
- 0x57, 0x57, 0x4e, 0x29, 0xfe, 0x82, 0xca, 0xcb, 0xb1, 0xa0, 0x80, 0x36, 0x61, 0xa4, 0x41, 0xda,
- 0x3b, 0xdb, 0x91, 0xd0, 0xe7, 0x7c, 0x26, 0x93, 0x56, 0x8f, 0x04, 0x69, 0x82, 0x2a, 0xc3, 0xc0,
- 0x82, 0x16, 0xfa, 0x1c, 0x14, 0xb6, 0x1b, 0xc2, 0x93, 0x2a, 0xf3, 0x0d, 0xc1, 0x8c, 0x94, 0xb1,
- 0x3c, 0x72, 0x74, 0x58, 0x2e, 0xac, 0xad, 0xe0, 0xc2, 0x76, 0x03, 0x6d, 0xc0, 0xe8, 0x36, 0xf7,
- 0xad, 0x17, 0x01, 0x6c, 0x3e, 0x9d, 0xed, 0xf6, 0xdf, 0xe5, 0x7e, 0xcf, 0x3d, 0x80, 0x04, 0x00,
- 0x4b, 0x22, 0x2c, 0x5e, 0xb0, 0x8a, 0x11, 0x20, 0x02, 0xe7, 0x2f, 0x9c, 0x2c, 0xae, 0x83, 0x50,
- 0x73, 0x28, 0x2a, 0x58, 0xa3, 0x88, 0xbe, 0x02, 0x25, 0x47, 0xa6, 0xc6, 0x14, 0x41, 0x70, 0x5e,
- 0xce, 0xdc, 0x8e, 0xbd, 0xb3, 0x86, 0xf2, 0xb5, 0xac, 0x90, 0x70, 0x42, 0x14, 0xed, 0xc2, 0xe4,
- 0x7e, 0xd4, 0xde, 0x21, 0x72, 0xfb, 0xb2, 0x98, 0x38, 0x39, 0xc7, 0xd5, 0x5d, 0x81, 0xe8, 0x86,
- 0x71, 0xc7, 0xf1, 0xba, 0x38, 0x0e, 0x73, 0x1c, 0xbb, 0xab, 0x13, 0xc3, 0x26, 0x6d, 0x3a, 0xfc,
- 0xef, 0x77, 0x82, 0xad, 0x83, 0x98, 0x88, 0x48, 0xfb, 0x99, 0xc3, 0xff, 0x36, 0x47, 0xe9, 0x1e,
- 0x7e, 0x01, 0xc0, 0x92, 0x08, 0xdd, 0xe0, 0x8e, 0x4c, 0x3b, 0x2b, 0x74, 0x39, 0xcf, 0xe6, 0x0e,
- 0x4f, 0x57, 0x7f, 0x93, 0x41, 0x61, 0x9c, 0x31, 0x21, 0xc5, 0x38, 0x62, 0x7b, 0x27, 0x88, 0x03,
- 0x3f, 0xc5, 0x8d, 0x67, 0xf3, 0x39, 0x62, 0x2d, 0x03, 0xbf, 0x9b, 0x23, 0x66, 0x61, 0xe1, 0xcc,
- 0xb6, 0x50, 0x13, 0xa6, 0xda, 0x41, 0x18, 0xdf, 0x0f, 0x42, 0xb9, 0xbe, 0x50, 0x8f, 0x4b, 0xbe,
- 0x81, 0x29, 0x5a, 0x64, 0x76, 0xe0, 0x26, 0x04, 0xa7, 0x68, 0xa2, 0x2f, 0xc2, 0x68, 0xd4, 0x70,
- 0x3c, 0x52, 0xbd, 0x3d, 0x77, 0x2e, 0xff, 0xa8, 0xa9, 0x73, 0x94, 0x9c, 0xd5, 0xc5, 0x26, 0x47,
- 0xa0, 0x60, 0x49, 0x0e, 0xad, 0xc1, 0x30, 0x4b, 0xdf, 0xc2, 0x92, 0x04, 0xe4, 0x04, 0x5b, 0xeb,
- 0xb2, 0x95, 0xe6, 0x1c, 0x89, 0x15, 0x63, 0x5e, 0x9d, 0xee, 0x01, 0x21, 0xeb, 0x06, 0xd1, 0xdc,
- 0x85, 0xfc, 0x3d, 0x20, 0x44, 0xe4, 0xdb, 0xf5, 0x5e, 0x7b, 0x40, 0x21, 0xe1, 0x84, 0x28, 0xe5,
- 0xc7, 0x94, 0x87, 0x3e, 0x96, 0xcf, 0x8f, 0xf3, 0x39, 0x28, 0xe3, 0xc7, 0x94, 0x7f, 0x52, 0x12,
- 0xf6, 0x1f, 0x8c, 0x76, 0xcb, 0x27, 0xec, 0x76, 0xf4, 0x7d, 0x56, 0x97, 0xe9, 0xc0, 0x67, 0x07,
- 0x55, 0xd6, 0x9c, 0xa2, 0x64, 0xfa, 0x55, 0x0b, 0x1e, 0x6b, 0x67, 0x7e, 0x88, 0x38, 0xec, 0x07,
- 0xd3, 0xf9, 0xf0, 0x4f, 0x57, 0x89, 0x3c, 0xb2, 0xe1, 0x38, 0xa7, 0xa5, 0xb4, 0xf4, 0x5f, 0xfc,
- 0xd0, 0xd2, 0xff, 0x3a, 0x8c, 0x31, 0x81, 0x32, 0x89, 0xec, 0x37, 0x90, 0x01, 0x1e, 0x13, 0x1b,
- 0x56, 0x44, 0x45, 0xac, 0x48, 0xa0, 0x1f, 0xb6, 0xe0, 0xa9, 0x74, 0xd7, 0x31, 0x61, 0x60, 0x11,
- 0x25, 0x9a, 0x5f, 0xcc, 0xd6, 0xc4, 0xf7, 0x3f, 0x55, 0xeb, 0x85, 0x7c, 0xdc, 0x0f, 0x01, 0xf7,
- 0x6e, 0x0c, 0x55, 0x32, 0x6e, 0x86, 0x23, 0xe6, 0xcb, 0xe2, 0x00, 0xb7, 0xc3, 0x57, 0x60, 0x62,
- 0x2f, 0xe8, 0xf8, 0xd2, 0x3b, 0x46, 0xf8, 0x3e, 0x33, 0x2d, 0xf6, 0xba, 0x56, 0x8e, 0x0d, 0xac,
- 0xd4, 0x9d, 0x72, 0xec, 0x61, 0xef, 0x94, 0xe8, 0xdd, 0x54, 0x9a, 0xf8, 0x52, 0xbe, 0x6c, 0x21,
- 0xae, 0xdf, 0x27, 0x48, 0x16, 0x7f, 0xb6, 0xf7, 0xa0, 0xaf, 0x5b, 0x19, 0x02, 0x3c, 0xbf, 0x19,
- 0x7f, 0xde, 0xbc, 0x19, 0x5f, 0x49, 0xdf, 0x8c, 0xbb, 0x34, 0xa1, 0xc6, 0xa5, 0x78, 0xf0, 0x18,
- 0xfd, 0x83, 0x06, 0x68, 0xb4, 0x3d, 0xb8, 0xdc, 0xef, 0x58, 0x62, 0xa6, 0x92, 0x4d, 0x65, 0x43,
- 0x90, 0x98, 0x4a, 0x36, 0xab, 0x15, 0xcc, 0x20, 0x83, 0x46, 0xf0, 0xb1, 0xff, 0xbb, 0x05, 0xc5,
- 0x5a, 0xd0, 0x3c, 0x03, 0xcd, 0xee, 0x17, 0x0c, 0xcd, 0xee, 0x13, 0x39, 0xe9, 0xfb, 0x73, 0xf5,
- 0xb8, 0xab, 0x29, 0x3d, 0xee, 0x53, 0x79, 0x04, 0x7a, 0x6b, 0x6d, 0x7f, 0xba, 0x08, 0xe3, 0xb5,
- 0xa0, 0xa9, 0x6c, 0xee, 0xff, 0xfd, 0xc3, 0xd8, 0xdc, 0xe7, 0xc6, 0x5b, 0xd6, 0x28, 0x33, 0x0b,
- 0x4b, 0xe9, 0xce, 0xfb, 0x97, 0xcc, 0xf4, 0xfe, 0x1e, 0x71, 0x5b, 0x3b, 0x31, 0x69, 0xa6, 0x3f,
- 0xe7, 0xec, 0x4c, 0xef, 0xff, 0xab, 0x05, 0xd3, 0xa9, 0xd6, 0x91, 0x97, 0xe5, 0x1b, 0xf8, 0x90,
- 0x1a, 0xbd, 0xd9, 0xbe, 0xce, 0x84, 0x0b, 0x00, 0xea, 0xd9, 0x4c, 0x6a, 0xbb, 0x98, 0xd4, 0xaf,
- 0xde, 0xd5, 0x22, 0xac, 0x61, 0xa0, 0x57, 0x61, 0x3c, 0x0e, 0xda, 0x81, 0x17, 0xb4, 0x0e, 0x6e,
- 0x12, 0x19, 0x33, 0x4a, 0x3d, 0x6e, 0x6e, 0x26, 0x20, 0xac, 0xe3, 0xd9, 0x3f, 0x5b, 0xe4, 0x1f,
- 0xea, 0xc7, 0xee, 0xb7, 0xd6, 0xe4, 0xc7, 0x7b, 0x4d, 0x7e, 0xd3, 0x82, 0x19, 0xda, 0x3a, 0xb3,
- 0x5e, 0x93, 0x87, 0xad, 0xca, 0x15, 0x66, 0xf5, 0xc8, 0x15, 0x76, 0x85, 0xf2, 0xae, 0x66, 0xd0,
- 0x89, 0x85, 0xb6, 0x4c, 0x63, 0x4e, 0xb4, 0x14, 0x0b, 0xa8, 0xc0, 0x23, 0x61, 0x28, 0x3c, 0xfe,
- 0x74, 0x3c, 0x12, 0x86, 0x58, 0x40, 0x65, 0x2a, 0xb1, 0xa1, 0x9c, 0x54, 0x62, 0x2c, 0x02, 0xa6,
- 0xb0, 0x98, 0x12, 0x62, 0x8f, 0x16, 0x01, 0x53, 0x9a, 0x52, 0x25, 0x38, 0xf6, 0x2f, 0x14, 0x61,
- 0xa2, 0x16, 0x34, 0x93, 0x87, 0xab, 0x57, 0x8c, 0x87, 0xab, 0xcb, 0xa9, 0x87, 0xab, 0x19, 0x1d,
- 0xf7, 0x5b, 0xcf, 0x54, 0x1f, 0xd5, 0x33, 0xd5, 0xbf, 0xb1, 0xd8, 0xac, 0x55, 0x36, 0xea, 0x22,
- 0x19, 0xf7, 0x4b, 0x30, 0xce, 0x18, 0x12, 0x73, 0x31, 0x95, 0xaf, 0x39, 0x2c, 0xa3, 0xc5, 0x46,
- 0x52, 0x8c, 0x75, 0x1c, 0x74, 0x15, 0xc6, 0x22, 0xe2, 0x84, 0x8d, 0x1d, 0xc5, 0xe3, 0xc4, 0x5b,
- 0x07, 0x2f, 0xc3, 0x0a, 0x8a, 0xde, 0x4e, 0x82, 0x2f, 0x16, 0xf3, 0xd3, 0x4a, 0xeb, 0xfd, 0xe1,
- 0x5b, 0x24, 0x3f, 0xe2, 0xa2, 0x7d, 0x0f, 0x50, 0x37, 0xfe, 0x00, 0x66, 0x64, 0x65, 0x33, 0xcc,
- 0x5a, 0xa9, 0x2b, 0xc4, 0xda, 0x9f, 0x5b, 0x30, 0x55, 0x0b, 0x9a, 0x74, 0xeb, 0xfe, 0x55, 0xda,
- 0xa7, 0x7a, 0xe4, 0xd9, 0x91, 0x1e, 0x91, 0x67, 0xff, 0xb1, 0x05, 0xa3, 0xb5, 0xa0, 0x79, 0x06,
- 0x3a, 0xf6, 0xcf, 0x9b, 0x3a, 0xf6, 0xc7, 0x73, 0x96, 0x44, 0x8e, 0x5a, 0xfd, 0x97, 0x8a, 0x30,
- 0x49, 0xfb, 0x19, 0xb4, 0xe4, 0x2c, 0x19, 0x23, 0x62, 0x0d, 0x30, 0x22, 0x54, 0xcc, 0x0d, 0x3c,
- 0x2f, 0xb8, 0x9f, 0x9e, 0xb1, 0x35, 0x56, 0x8a, 0x05, 0x14, 0xbd, 0x00, 0x63, 0xed, 0x90, 0xec,
- 0xbb, 0x41, 0x27, 0x4a, 0x7b, 0x53, 0xd7, 0x44, 0x39, 0x56, 0x18, 0xf4, 0xde, 0x15, 0xb9, 0x7e,
- 0x83, 0x48, 0xfb, 0xb2, 0x21, 0x66, 0x5f, 0xc6, 0x43, 0xca, 0x6b, 0xe5, 0xd8, 0xc0, 0x42, 0xf7,
- 0xa0, 0xc4, 0xfe, 0x33, 0x8e, 0x72, 0xf2, 0x24, 0x67, 0x22, 0x8f, 0x8b, 0x20, 0x80, 0x13, 0x5a,
- 0xe8, 0x1a, 0x40, 0x2c, 0x2d, 0xe1, 0x22, 0xe1, 0xec, 0xaf, 0x64, 0x6d, 0x65, 0x23, 0x17, 0x61,
- 0x0d, 0x0b, 0x3d, 0x0f, 0xa5, 0xd8, 0x71, 0xbd, 0x5b, 0xae, 0x4f, 0x22, 0x61, 0x49, 0x28, 0xd2,
- 0xb4, 0x88, 0x42, 0x9c, 0xc0, 0xa9, 0xac, 0xc3, 0x42, 0x49, 0xf0, 0x14, 0x89, 0x63, 0x0c, 0x9b,
- 0xc9, 0x3a, 0xb7, 0x54, 0x29, 0xd6, 0x30, 0xec, 0xd7, 0xe1, 0x42, 0x2d, 0x68, 0xd6, 0x82, 0x30,
- 0x5e, 0x0b, 0xc2, 0xfb, 0x4e, 0xd8, 0x94, 0xf3, 0x57, 0x96, 0x19, 0x43, 0x28, 0xef, 0x19, 0xe6,
- 0x3b, 0xd3, 0xc8, 0x05, 0xf2, 0x32, 0x93, 0x76, 0x4e, 0xe8, 0xf6, 0xf5, 0xbf, 0x0b, 0x8c, 0x51,
- 0xa4, 0xf2, 0x76, 0xa2, 0x2f, 0xc3, 0x54, 0x44, 0x6e, 0xb9, 0x7e, 0xe7, 0x81, 0xbc, 0x1f, 0xf7,
- 0xf0, 0xa9, 0xab, 0xaf, 0xea, 0x98, 0x5c, 0xcb, 0x66, 0x96, 0xe1, 0x14, 0x35, 0x3a, 0x84, 0x61,
- 0xc7, 0x5f, 0x8a, 0xee, 0x44, 0x24, 0x14, 0x79, 0x23, 0xd9, 0x10, 0x62, 0x59, 0x88, 0x13, 0x38,
- 0x5d, 0x32, 0xec, 0xcf, 0x46, 0xe0, 0xe3, 0x20, 0x88, 0xe5, 0x22, 0x63, 0x99, 0xc7, 0xb4, 0x72,
- 0x6c, 0x60, 0xa1, 0x35, 0x40, 0x51, 0xa7, 0xdd, 0xf6, 0xd8, 0x03, 0xb8, 0xe3, 0x5d, 0x0f, 0x83,
- 0x4e, 0x9b, 0x3f, 0x3e, 0x8a, 0xa4, 0x5d, 0xf5, 0x2e, 0x28, 0xce, 0xa8, 0x41, 0x19, 0xc3, 0x76,
- 0xc4, 0x7e, 0x8b, 0x68, 0x12, 0x5c, 0xf3, 0x5d, 0x67, 0x45, 0x58, 0xc2, 0xe8, 0x3c, 0xb3, 0xe6,
- 0x39, 0xe6, 0x48, 0x32, 0xcf, 0x58, 0x95, 0x62, 0x0d, 0xc3, 0xfe, 0x1e, 0x76, 0xc0, 0xb0, 0xf4,
- 0x80, 0x71, 0x27, 0x24, 0x68, 0x0f, 0x26, 0xdb, 0xec, 0xe8, 0x17, 0x81, 0xd6, 0xc5, 0x80, 0x3f,
- 0x9c, 0x9d, 0x21, 0x4f, 0x17, 0xa6, 0x93, 0xc3, 0x26, 0x75, 0xfb, 0xa7, 0x67, 0x18, 0x1f, 0xab,
- 0xf3, 0xeb, 0xdf, 0xa8, 0xf0, 0x1e, 0x10, 0xb2, 0xee, 0x7c, 0xbe, 0x1e, 0x22, 0x39, 0x72, 0x84,
- 0x07, 0x02, 0x96, 0x75, 0xd1, 0xdb, 0xec, 0x95, 0x97, 0x33, 0x8f, 0x7e, 0x79, 0xc6, 0x39, 0x96,
- 0xf1, 0xa0, 0x2b, 0x2a, 0x62, 0x8d, 0x08, 0xba, 0x05, 0x93, 0x22, 0x9b, 0x9c, 0x50, 0x34, 0x15,
- 0x0d, 0x45, 0xc2, 0x24, 0xd6, 0x81, 0xc7, 0xe9, 0x02, 0x6c, 0x56, 0x46, 0x2d, 0x78, 0x4a, 0x4b,
- 0xad, 0x9a, 0x61, 0xeb, 0xca, 0x79, 0xd1, 0xd3, 0x47, 0x87, 0xe5, 0xa7, 0x36, 0x7b, 0x21, 0xe2,
- 0xde, 0x74, 0xd0, 0x6d, 0xb8, 0xe0, 0x34, 0x62, 0x77, 0x9f, 0x54, 0x88, 0xd3, 0xf4, 0x5c, 0x9f,
- 0x98, 0xe1, 0x48, 0x2e, 0x1e, 0x1d, 0x96, 0x2f, 0x2c, 0x65, 0x21, 0xe0, 0xec, 0x7a, 0xe8, 0xf3,
- 0x50, 0x6a, 0xfa, 0x91, 0x18, 0x83, 0x11, 0x23, 0x6b, 0x70, 0xa9, 0xb2, 0x51, 0x57, 0xdf, 0x9f,
- 0xfc, 0xc1, 0x49, 0x05, 0xd4, 0xe2, 0xca, 0x26, 0x75, 0xb7, 0x1b, 0xed, 0x8a, 0x31, 0x93, 0xd6,
- 0x12, 0x18, 0x7e, 0x7a, 0x5c, 0xcb, 0xaa, 0x2c, 0xc9, 0x0d, 0x17, 0x3e, 0x83, 0x30, 0x7a, 0x0b,
- 0x10, 0x15, 0x7e, 0xdc, 0x06, 0x59, 0x6a, 0xb0, 0x78, 0xf7, 0x4c, 0x37, 0x37, 0x66, 0xb8, 0x45,
- 0xa1, 0x7a, 0x17, 0x06, 0xce, 0xa8, 0x85, 0x6e, 0x50, 0x0e, 0xa4, 0x97, 0x0a, 0xc3, 0x7f, 0x29,
- 0x30, 0xcf, 0x55, 0x48, 0x3b, 0x24, 0x0d, 0x27, 0x26, 0x4d, 0x93, 0x22, 0x4e, 0xd5, 0xa3, 0xe7,
- 0x93, 0x4a, 0x7d, 0x05, 0xa6, 0xb9, 0x7a, 0x77, 0xfa, 0x2b, 0x7a, 0xd7, 0xdc, 0x09, 0xa2, 0x78,
- 0x83, 0xc4, 0xf7, 0x83, 0x70, 0x57, 0xc4, 0x10, 0x4c, 0xc2, 0xd9, 0x26, 0x20, 0xac, 0xe3, 0x51,
- 0xd9, 0x92, 0x3d, 0xb3, 0x56, 0x2b, 0xec, 0xd5, 0x6b, 0x2c, 0xd9, 0x27, 0x37, 0x78, 0x31, 0x96,
- 0x70, 0x89, 0x5a, 0xad, 0xad, 0xb0, 0x17, 0xac, 0x14, 0x6a, 0xb5, 0xb6, 0x82, 0x25, 0x1c, 0x91,
- 0xee, 0x8c, 0xcc, 0x53, 0xf9, 0x9a, 0xc2, 0x6e, 0x3e, 0x3e, 0x60, 0x52, 0x66, 0x1f, 0x66, 0x54,
- 0x2e, 0x68, 0x1e, 0x5c, 0x31, 0x9a, 0x9b, 0x66, 0x8b, 0x64, 0xf0, 0xc8, 0x8c, 0x4a, 0xf7, 0x5a,
- 0x4d, 0x51, 0xc2, 0x5d, 0xb4, 0x8d, 0x30, 0x37, 0x33, 0x7d, 0x53, 0x97, 0x2d, 0x42, 0x29, 0xea,
- 0x6c, 0x35, 0x83, 0x3d, 0xc7, 0xf5, 0xd9, 0x83, 0x93, 0x26, 0xb8, 0xd4, 0x25, 0x00, 0x27, 0x38,
- 0x68, 0x0d, 0xc6, 0x1c, 0xa9, 0x58, 0x45, 0xf9, 0x71, 0x2f, 0x94, 0x3a, 0x95, 0x89, 0xe5, 0x4a,
- 0x95, 0xaa, 0xea, 0xa2, 0x37, 0x60, 0x52, 0xb8, 0x66, 0x0a, 0xab, 0xea, 0x73, 0xa6, 0x17, 0x4f,
- 0x5d, 0x07, 0x62, 0x13, 0x17, 0x7d, 0x17, 0x4c, 0x51, 0x2a, 0x09, 0x63, 0x9b, 0x3b, 0x3f, 0x08,
- 0x47, 0xd4, 0x52, 0xd2, 0xe8, 0x95, 0x71, 0x8a, 0x18, 0x6a, 0xc2, 0x93, 0x4e, 0x27, 0x0e, 0x98,
- 0x72, 0xda, 0x5c, 0xff, 0x9b, 0xc1, 0x2e, 0xf1, 0xd9, 0xbb, 0xd0, 0xd8, 0xf2, 0xe5, 0xa3, 0xc3,
- 0xf2, 0x93, 0x4b, 0x3d, 0xf0, 0x70, 0x4f, 0x2a, 0xe8, 0x0e, 0x8c, 0xc7, 0x81, 0x27, 0xdc, 0x21,
- 0xa2, 0xb9, 0xc7, 0xf2, 0xc3, 0x74, 0x6d, 0x2a, 0x34, 0x5d, 0x31, 0xa3, 0xaa, 0x62, 0x9d, 0x0e,
- 0xda, 0xe4, 0x7b, 0x8c, 0x05, 0x30, 0x26, 0xd1, 0xdc, 0xe3, 0xf9, 0x03, 0xa3, 0xe2, 0x1c, 0x9b,
- 0x5b, 0x50, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x1d, 0x66, 0xdb, 0xa1, 0x1b, 0xb0, 0x85, 0xad, 0x1e,
- 0x06, 0xe6, 0xcc, 0x2c, 0x24, 0xb5, 0x34, 0x02, 0xee, 0xae, 0x43, 0x2f, 0x6e, 0xb2, 0x70, 0xee,
- 0x22, 0x4f, 0x69, 0xc7, 0x85, 0x59, 0x5e, 0x86, 0x15, 0x14, 0xad, 0x33, 0xbe, 0xcc, 0xaf, 0x58,
- 0x73, 0xf3, 0xf9, 0xf1, 0x42, 0xf4, 0xab, 0x18, 0x17, 0x74, 0xd4, 0x5f, 0x9c, 0x50, 0xa0, 0xe7,
- 0x46, 0xb4, 0xe3, 0x84, 0xa4, 0x16, 0x06, 0x0d, 0xc2, 0x3b, 0xc3, 0xcd, 0xcf, 0x9f, 0xe0, 0x71,
- 0x56, 0xe9, 0xb9, 0x51, 0xcf, 0x42, 0xc0, 0xd9, 0xf5, 0xe6, 0xbf, 0x1d, 0x66, 0xbb, 0x38, 0xf9,
- 0x89, 0x4c, 0xe7, 0xff, 0x6c, 0x18, 0x4a, 0x4a, 0xf1, 0x8b, 0x16, 0x4d, 0x7d, 0xfe, 0xc5, 0xb4,
- 0x3e, 0x7f, 0x8c, 0xca, 0x9f, 0xba, 0x0a, 0x7f, 0xd3, 0x30, 0xfc, 0x2a, 0xe4, 0xa7, 0xba, 0xd3,
- 0xb5, 0x1e, 0x7d, 0xfd, 0x5c, 0xb5, 0x7b, 0x7c, 0x71, 0xe0, 0x87, 0x81, 0xa1, 0x9e, 0xaa, 0x81,
- 0x01, 0xb3, 0x77, 0xd3, 0xab, 0x6e, 0x3b, 0x68, 0x56, 0x6b, 0xe9, 0x74, 0xb6, 0x35, 0x5a, 0x88,
- 0x39, 0x8c, 0x5d, 0x56, 0xa8, 0xd8, 0xc1, 0x2e, 0x2b, 0xa3, 0x0f, 0x79, 0x59, 0x91, 0x04, 0x70,
- 0x42, 0x0b, 0x79, 0x30, 0xdb, 0x30, 0x33, 0x11, 0x2b, 0xdf, 0xd6, 0x67, 0xfa, 0xe6, 0x04, 0xee,
- 0x68, 0x29, 0x0a, 0x57, 0xd2, 0x54, 0x70, 0x37, 0x61, 0xf4, 0x06, 0x8c, 0xbd, 0x1f, 0x44, 0x6c,
- 0x5b, 0x88, 0xb3, 0x57, 0x7a, 0x13, 0x8e, 0xbd, 0x7d, 0xbb, 0xce, 0xca, 0x8f, 0x0f, 0xcb, 0xe3,
- 0xb5, 0xa0, 0x29, 0xff, 0x62, 0x55, 0x01, 0x3d, 0x80, 0x0b, 0x06, 0xc7, 0x52, 0xdd, 0x85, 0xc1,
- 0xbb, 0xfb, 0x94, 0x68, 0xee, 0x42, 0x35, 0x8b, 0x12, 0xce, 0x6e, 0x80, 0xb2, 0x01, 0x3f, 0x10,
- 0x59, 0xbc, 0xe5, 0xf9, 0xce, 0x8e, 0xf1, 0x92, 0x1e, 0x94, 0x21, 0x85, 0x80, 0xbb, 0xeb, 0xd8,
- 0xdf, 0xe0, 0x7a, 0x72, 0xa1, 0x4d, 0x23, 0x51, 0xc7, 0x3b, 0x8b, 0x84, 0x66, 0xab, 0x86, 0xa2,
- 0xef, 0xa1, 0xdf, 0x62, 0x7e, 0xc3, 0x62, 0x6f, 0x31, 0x9b, 0x64, 0xaf, 0xed, 0x39, 0xf1, 0x59,
- 0x78, 0x5e, 0xbc, 0x0d, 0x63, 0xb1, 0x68, 0xad, 0x57, 0x0e, 0x36, 0xad, 0x53, 0xec, 0x3d, 0x4a,
- 0x9d, 0xfc, 0xb2, 0x14, 0x2b, 0x32, 0xf6, 0xbf, 0xe4, 0x33, 0x20, 0x21, 0x67, 0xa0, 0x74, 0xa9,
- 0x98, 0x4a, 0x97, 0x72, 0x9f, 0x2f, 0xc8, 0x51, 0xbe, 0xfc, 0x0b, 0xb3, 0xdf, 0xec, 0x92, 0xf5,
- 0x71, 0x7f, 0x04, 0xb4, 0x7f, 0xdc, 0x82, 0xf3, 0x59, 0x56, 0x33, 0x54, 0x5a, 0xe3, 0x57, 0x3c,
- 0xf5, 0x28, 0xaa, 0x46, 0xf0, 0xae, 0x28, 0xc7, 0x0a, 0x63, 0xe0, 0xf4, 0x26, 0x27, 0x8b, 0xc1,
- 0x77, 0x1b, 0xcc, 0xec, 0xd7, 0xe8, 0x4d, 0xee, 0x4a, 0x65, 0xa9, 0xf4, 0xd4, 0x27, 0x73, 0xa3,
- 0xb2, 0x7f, 0xae, 0x00, 0xe7, 0xf9, 0xab, 0xc6, 0xd2, 0x7e, 0xe0, 0x36, 0x6b, 0x41, 0x53, 0x38,
- 0x96, 0xbd, 0x03, 0x13, 0x6d, 0xed, 0x5e, 0xde, 0x2b, 0x0a, 0x98, 0x7e, 0x7f, 0x4f, 0xee, 0x47,
- 0x7a, 0x29, 0x36, 0x68, 0xa1, 0x26, 0x4c, 0x90, 0x7d, 0xb7, 0xa1, 0x54, 0xe3, 0x85, 0x13, 0x9f,
- 0x0d, 0xaa, 0x95, 0x55, 0x8d, 0x0e, 0x36, 0xa8, 0x3e, 0x82, 0x6c, 0x85, 0xf6, 0x4f, 0x58, 0xf0,
- 0x78, 0x4e, 0xcc, 0x30, 0xda, 0xdc, 0x7d, 0xf6, 0x7e, 0x24, 0x12, 0x9f, 0xa9, 0xe6, 0xf8, 0xab,
- 0x12, 0x16, 0x50, 0xf4, 0x45, 0x00, 0xfe, 0x2a, 0x44, 0xaf, 0x0b, 0xe2, 0xd3, 0x07, 0x8b, 0xa5,
- 0xa3, 0x05, 0x5c, 0x91, 0xf5, 0xb1, 0x46, 0xcb, 0xfe, 0x99, 0x22, 0x0c, 0xb3, 0x57, 0x08, 0xb4,
- 0x06, 0xa3, 0x3b, 0x3c, 0x42, 0xf9, 0x20, 0xc1, 0xd0, 0x93, 0x7b, 0x17, 0x2f, 0xc0, 0xb2, 0x32,
- 0x5a, 0x87, 0x73, 0xc2, 0x79, 0xb1, 0x42, 0x3c, 0xe7, 0x40, 0x5e, 0xdf, 0x79, 0xb2, 0x30, 0x95,
- 0x1e, 0xaf, 0xda, 0x8d, 0x82, 0xb3, 0xea, 0xa1, 0x37, 0xbb, 0xe2, 0x92, 0xf2, 0xd8, 0xee, 0x4a,
- 0xd8, 0xef, 0x13, 0x9b, 0xf4, 0x0d, 0x98, 0x6c, 0x77, 0x29, 0x2a, 0x86, 0x93, 0x8b, 0x88, 0xa9,
- 0x9c, 0x30, 0x71, 0x99, 0xb9, 0x4c, 0x87, 0x19, 0x07, 0x6d, 0xee, 0x84, 0x24, 0xda, 0x09, 0xbc,
- 0xa6, 0xc8, 0xdf, 0x9f, 0x98, 0xcb, 0xa4, 0xe0, 0xb8, 0xab, 0x06, 0xa5, 0xb2, 0xed, 0xb8, 0x5e,
- 0x27, 0x24, 0x09, 0x95, 0x11, 0x93, 0xca, 0x5a, 0x0a, 0x8e, 0xbb, 0x6a, 0xd0, 0x75, 0x74, 0x41,
- 0x24, 0x7f, 0x97, 0x21, 0x2d, 0x94, 0x0d, 0xd4, 0xa8, 0x74, 0x6d, 0xe9, 0x11, 0x66, 0x49, 0x58,
- 0x89, 0xa8, 0xf4, 0xf1, 0x5a, 0x6a, 0x61, 0xe1, 0xd4, 0x22, 0xa9, 0x3c, 0x4c, 0x0a, 0xf2, 0x3f,
- 0xb0, 0xe0, 0x5c, 0x86, 0xad, 0x25, 0x67, 0x55, 0x2d, 0x37, 0x8a, 0x55, 0x42, 0x24, 0x8d, 0x55,
- 0xf1, 0x72, 0xac, 0x30, 0xe8, 0x7e, 0xe0, 0xcc, 0x30, 0xcd, 0x00, 0x85, 0x2d, 0x93, 0x80, 0x9e,
- 0x8c, 0x01, 0xa2, 0xcb, 0x30, 0xd4, 0x89, 0x48, 0x28, 0x73, 0x77, 0x4b, 0xfe, 0xcd, 0x54, 0xa5,
- 0x0c, 0x42, 0x45, 0xd3, 0x96, 0xd2, 0x52, 0x6a, 0xa2, 0x29, 0x57, 0x3d, 0x72, 0x98, 0xfd, 0xb5,
- 0x22, 0x5c, 0xcc, 0xb5, 0xa5, 0xa6, 0x5d, 0xda, 0x0b, 0x7c, 0x37, 0x0e, 0xd4, 0x0b, 0x17, 0x8f,
- 0x07, 0x44, 0xda, 0x3b, 0xeb, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x05, 0x86, 0xd9, 0xcd, 0xbe, 0x2b,
- 0xe5, 0xd3, 0x72, 0x85, 0x87, 0xa9, 0xe0, 0xe0, 0x81, 0xd3, 0xe9, 0x3d, 0x03, 0x43, 0xed, 0x20,
- 0xf0, 0xd2, 0xcc, 0x88, 0x76, 0x37, 0x08, 0x3c, 0xcc, 0x80, 0xe8, 0x53, 0x62, 0x1c, 0x52, 0x4f,
- 0x3a, 0xd8, 0x69, 0x06, 0x91, 0x36, 0x18, 0xcf, 0xc2, 0xe8, 0x2e, 0x39, 0x08, 0x5d, 0xbf, 0x95,
- 0x7e, 0xea, 0xbb, 0xc9, 0x8b, 0xb1, 0x84, 0x9b, 0x19, 0x4f, 0x46, 0x4f, 0x3b, 0x0f, 0xde, 0x58,
- 0xdf, 0xa3, 0xed, 0x87, 0x8a, 0x30, 0x8d, 0x97, 0x2b, 0xdf, 0x9a, 0x88, 0x3b, 0xdd, 0x13, 0x71,
- 0xda, 0x79, 0xf0, 0xfa, 0xcf, 0xc6, 0x2f, 0x59, 0x30, 0xcd, 0xa2, 0x82, 0x8b, 0x78, 0x36, 0x6e,
- 0xe0, 0x9f, 0x81, 0xe8, 0xf6, 0x0c, 0x0c, 0x87, 0xb4, 0xd1, 0x74, 0x72, 0x2b, 0xd6, 0x13, 0xcc,
- 0x61, 0xe8, 0x49, 0x18, 0x62, 0x5d, 0xa0, 0x93, 0x37, 0xc1, 0xf3, 0x82, 0x54, 0x9c, 0xd8, 0xc1,
- 0xac, 0x94, 0x39, 0x16, 0x63, 0xd2, 0xf6, 0x5c, 0xde, 0xe9, 0x44, 0xd5, 0xff, 0xf1, 0x70, 0x2c,
- 0xce, 0xec, 0xda, 0x87, 0x73, 0x2c, 0xce, 0x26, 0xd9, 0xfb, 0x5a, 0xf4, 0x3f, 0x0a, 0x70, 0x29,
- 0xb3, 0xde, 0xc0, 0x8e, 0xc5, 0xbd, 0x6b, 0x9f, 0x8e, 0xc5, 0x46, 0xb6, 0x21, 0x45, 0xf1, 0x0c,
- 0x0d, 0x29, 0x86, 0x06, 0x95, 0x1c, 0x87, 0x07, 0xf0, 0xf7, 0xcd, 0x1c, 0xb2, 0x8f, 0x89, 0xbf,
- 0x6f, 0x66, 0xdf, 0x72, 0xae, 0x75, 0x7f, 0x51, 0xc8, 0xf9, 0x16, 0x76, 0xc1, 0xbb, 0x4a, 0xf9,
- 0x0c, 0x03, 0x46, 0x42, 0x12, 0x9e, 0xe0, 0x3c, 0x86, 0x97, 0x61, 0x05, 0x45, 0xae, 0xe6, 0x39,
- 0x5b, 0xc8, 0x4f, 0x7d, 0x9a, 0xdb, 0xd4, 0x82, 0xf9, 0x32, 0xa3, 0x07, 0xdf, 0x49, 0x7b, 0xd1,
- 0xae, 0x6b, 0x97, 0xf2, 0xe2, 0xe0, 0x97, 0xf2, 0x89, 0xec, 0x0b, 0x39, 0x5a, 0x82, 0xe9, 0x3d,
- 0xd7, 0xa7, 0x6c, 0xf3, 0xc0, 0x14, 0x45, 0x55, 0x20, 0x89, 0x75, 0x13, 0x8c, 0xd3, 0xf8, 0xf3,
- 0x6f, 0xc0, 0xe4, 0xc3, 0xab, 0x23, 0xbf, 0x59, 0x84, 0x27, 0x7a, 0x6c, 0x7b, 0xce, 0xeb, 0x8d,
- 0x39, 0xd0, 0x78, 0x7d, 0xd7, 0x3c, 0xd4, 0xe0, 0xfc, 0x76, 0xc7, 0xf3, 0x0e, 0x98, 0xad, 0x22,
- 0x69, 0x4a, 0x0c, 0x21, 0x2b, 0xaa, 0x90, 0xff, 0x6b, 0x19, 0x38, 0x38, 0xb3, 0x26, 0x7a, 0x0b,
- 0x50, 0x20, 0xf2, 0x2e, 0x27, 0x21, 0x85, 0xd8, 0xc0, 0x17, 0x93, 0xcd, 0x78, 0xbb, 0x0b, 0x03,
- 0x67, 0xd4, 0xa2, 0x42, 0x3f, 0x3d, 0x95, 0x0e, 0x54, 0xb7, 0x52, 0x42, 0x3f, 0xd6, 0x81, 0xd8,
- 0xc4, 0x45, 0xd7, 0x61, 0xd6, 0xd9, 0x77, 0x5c, 0x1e, 0x62, 0x52, 0x12, 0xe0, 0x52, 0xbf, 0x52,
- 0x82, 0x2d, 0xa5, 0x11, 0x70, 0x77, 0x9d, 0x94, 0xeb, 0xee, 0x48, 0xbe, 0xeb, 0x6e, 0x6f, 0xbe,
- 0xd8, 0x4f, 0xa7, 0x6b, 0xff, 0x67, 0x8b, 0x1e, 0x5f, 0x5c, 0x78, 0x37, 0xc3, 0x6d, 0xbd, 0x01,
- 0x93, 0x4a, 0x37, 0xa9, 0x79, 0xd1, 0xaa, 0x71, 0x58, 0xd1, 0x81, 0xd8, 0xc4, 0xe5, 0x0b, 0x22,
- 0x4a, 0x1c, 0x3a, 0x0c, 0xd1, 0x5d, 0xb8, 0xc9, 0x2b, 0x0c, 0xf4, 0x25, 0x18, 0x6d, 0xba, 0xfb,
- 0x6e, 0x14, 0x84, 0x62, 0xb3, 0x9c, 0xd0, 0x2c, 0x3e, 0xe1, 0x83, 0x15, 0x4e, 0x06, 0x4b, 0x7a,
- 0xf6, 0x0f, 0x15, 0x60, 0x52, 0xb6, 0xf8, 0x76, 0x27, 0x88, 0x9d, 0x33, 0x38, 0x96, 0xaf, 0x1b,
- 0xc7, 0xf2, 0xa7, 0x7a, 0xc5, 0x0a, 0x60, 0x5d, 0xca, 0x3d, 0x8e, 0x6f, 0xa7, 0x8e, 0xe3, 0x4f,
- 0xf7, 0x27, 0xd5, 0xfb, 0x18, 0xfe, 0x57, 0x16, 0xcc, 0x1a, 0xf8, 0x67, 0x70, 0x1a, 0xac, 0x99,
- 0xa7, 0xc1, 0xd3, 0x7d, 0xbf, 0x21, 0xe7, 0x14, 0xf8, 0x7a, 0x21, 0xd5, 0x77, 0xc6, 0xfd, 0xdf,
- 0x87, 0xa1, 0x1d, 0x27, 0x6c, 0xf6, 0x0a, 0x94, 0xdc, 0x55, 0x69, 0xe1, 0x86, 0x13, 0x36, 0x39,
- 0x0f, 0x7f, 0x41, 0x65, 0x6b, 0x75, 0xc2, 0x66, 0x5f, 0xff, 0x25, 0xd6, 0x14, 0x7a, 0x1d, 0x46,
- 0xa2, 0x46, 0xd0, 0x56, 0xd6, 0x85, 0x97, 0x79, 0x26, 0x57, 0x5a, 0x72, 0x7c, 0x58, 0x46, 0x66,
- 0x73, 0xb4, 0x18, 0x0b, 0xfc, 0xf9, 0x16, 0x94, 0x54, 0xd3, 0x8f, 0xd4, 0x37, 0xe4, 0x77, 0x8b,
- 0x70, 0x2e, 0x63, 0x5d, 0xa0, 0xc8, 0x18, 0xad, 0x97, 0x06, 0x5c, 0x4e, 0x1f, 0x72, 0xbc, 0x22,
- 0x76, 0x63, 0x69, 0x8a, 0xf9, 0x1f, 0xb8, 0xd1, 0x3b, 0x11, 0x49, 0x37, 0x4a, 0x8b, 0xfa, 0x37,
- 0x4a, 0x1b, 0x3b, 0xb3, 0xa1, 0xa6, 0x0d, 0xa9, 0x9e, 0x3e, 0xd2, 0x39, 0xfd, 0xd3, 0x22, 0x9c,
- 0xcf, 0x0a, 0x31, 0x82, 0xbe, 0x3b, 0x95, 0x76, 0xe9, 0x95, 0x41, 0x83, 0x93, 0xf0, 0x5c, 0x4c,
- 0x22, 0x26, 0xdb, 0x82, 0x99, 0x88, 0xa9, 0xef, 0x30, 0x8b, 0x36, 0x99, 0x43, 0x61, 0xc8, 0xd3,
- 0x65, 0xc9, 0x2d, 0xfe, 0xd9, 0x81, 0x3b, 0x20, 0xf2, 0x6c, 0x45, 0x29, 0x87, 0x42, 0x59, 0xdc,
- 0xdf, 0xa1, 0x50, 0xb6, 0x3c, 0xef, 0xc2, 0xb8, 0xf6, 0x35, 0x8f, 0x74, 0xc6, 0x77, 0xe9, 0x89,
- 0xa2, 0xf5, 0xfb, 0x91, 0xce, 0xfa, 0x4f, 0x58, 0x90, 0xb2, 0xe9, 0x53, 0x2a, 0x29, 0x2b, 0x57,
- 0x25, 0x75, 0x19, 0x86, 0xc2, 0xc0, 0x23, 0xe9, 0x4c, 0x3c, 0x38, 0xf0, 0x08, 0x66, 0x10, 0x8a,
- 0x11, 0x27, 0x0a, 0x89, 0x09, 0xfd, 0xb2, 0x25, 0xae, 0x51, 0xcf, 0xc0, 0xb0, 0x47, 0xf6, 0x89,
- 0xd4, 0x46, 0x28, 0x9e, 0x7c, 0x8b, 0x16, 0x62, 0x0e, 0xb3, 0x7f, 0x69, 0x08, 0x9e, 0xea, 0xe9,
- 0x92, 0x4b, 0xaf, 0x2c, 0x2d, 0x27, 0x26, 0xf7, 0x9d, 0x83, 0x74, 0x9c, 0xf0, 0xeb, 0xbc, 0x18,
- 0x4b, 0x38, 0xb3, 0x40, 0xe6, 0xa1, 0x46, 0x53, 0x0a, 0x3c, 0x11, 0x61, 0x54, 0x40, 0x4d, 0xc5,
- 0x51, 0xf1, 0x34, 0x14, 0x47, 0xd7, 0x00, 0xa2, 0xc8, 0x5b, 0xf5, 0xa9, 0x04, 0xd6, 0x14, 0xa6,
- 0xcd, 0x49, 0x48, 0xda, 0xfa, 0x2d, 0x01, 0xc1, 0x1a, 0x16, 0xaa, 0xc0, 0x4c, 0x3b, 0x0c, 0x62,
- 0xae, 0x0f, 0xad, 0x70, 0x23, 0x99, 0x61, 0xd3, 0x1b, 0xb2, 0x96, 0x82, 0xe3, 0xae, 0x1a, 0xe8,
- 0x55, 0x18, 0x17, 0x1e, 0x92, 0xb5, 0x20, 0xf0, 0x84, 0xaa, 0x46, 0x99, 0x5c, 0xd4, 0x13, 0x10,
- 0xd6, 0xf1, 0xb4, 0x6a, 0x4c, 0xc9, 0x3a, 0x9a, 0x59, 0x8d, 0x2b, 0x5a, 0x35, 0xbc, 0x54, 0xb8,
- 0xa1, 0xb1, 0x81, 0xc2, 0x0d, 0x25, 0xca, 0xab, 0xd2, 0xc0, 0xef, 0x4a, 0xd0, 0x57, 0xdd, 0xf3,
- 0xf3, 0x43, 0x70, 0x4e, 0x2c, 0x9c, 0x47, 0xbd, 0x5c, 0xee, 0x74, 0x2f, 0x97, 0xd3, 0x50, 0x6f,
- 0x7d, 0x6b, 0xcd, 0x9c, 0xf5, 0x9a, 0xf9, 0x46, 0x11, 0x46, 0xf8, 0x54, 0x9c, 0x81, 0x0c, 0xbf,
- 0x26, 0x94, 0x7e, 0x3d, 0x02, 0xed, 0xf0, 0xbe, 0x2c, 0x54, 0x9c, 0xd8, 0xe1, 0xe7, 0x97, 0x62,
- 0xa3, 0x89, 0x7a, 0x10, 0x2d, 0x18, 0x8c, 0x76, 0x3e, 0xa5, 0xd5, 0x02, 0x4e, 0x43, 0x63, 0xbb,
- 0x5f, 0x06, 0x88, 0xe2, 0xd0, 0xf5, 0x5b, 0x94, 0x86, 0x08, 0xd9, 0xf4, 0x5c, 0x8f, 0xd6, 0xeb,
- 0x0a, 0x99, 0xf7, 0x21, 0x59, 0x82, 0x0a, 0x80, 0x35, 0x8a, 0xf3, 0xaf, 0x41, 0x49, 0x21, 0xf7,
- 0x53, 0x01, 0x4c, 0xe8, 0xa7, 0xde, 0x17, 0x60, 0x3a, 0xd5, 0xd6, 0x89, 0x34, 0x08, 0xbf, 0x6c,
- 0xc1, 0x34, 0xef, 0xf2, 0xaa, 0xbf, 0x2f, 0x36, 0xfb, 0x07, 0x70, 0xde, 0xcb, 0xd8, 0x74, 0x62,
- 0x46, 0x07, 0xdf, 0xa4, 0x4a, 0x63, 0x90, 0x05, 0xc5, 0x99, 0x6d, 0xa0, 0xab, 0x30, 0xc6, 0x5d,
- 0x76, 0x1c, 0x4f, 0xb8, 0x59, 0x4c, 0xf0, 0xd4, 0x1d, 0xbc, 0x0c, 0x2b, 0xa8, 0xfd, 0x7b, 0x16,
- 0xcc, 0xf2, 0x9e, 0xdf, 0x24, 0x07, 0xea, 0x76, 0xfc, 0x51, 0xf6, 0x5d, 0x64, 0x26, 0x29, 0xe4,
- 0x64, 0x26, 0xd1, 0x3f, 0xad, 0xd8, 0xf3, 0xd3, 0x7e, 0xce, 0x02, 0xb1, 0x02, 0xcf, 0xe0, 0x1e,
- 0xf8, 0xed, 0xe6, 0x3d, 0x70, 0x3e, 0x7f, 0x51, 0xe7, 0x5c, 0x00, 0xff, 0xdc, 0x82, 0x19, 0x8e,
- 0x90, 0x3c, 0x44, 0x7e, 0xa4, 0xf3, 0x30, 0x48, 0xba, 0x3c, 0x95, 0x9f, 0x3c, 0xfb, 0xa3, 0x8c,
- 0xc9, 0x1a, 0xea, 0x39, 0x59, 0x4d, 0xb9, 0x81, 0x4e, 0x90, 0x06, 0xf2, 0xc4, 0xc1, 0x74, 0xed,
- 0x3f, 0xb1, 0x00, 0xf1, 0x66, 0x8c, 0x73, 0x99, 0x9e, 0x76, 0xac, 0x54, 0xd3, 0x04, 0x25, 0xac,
- 0x46, 0x41, 0xb0, 0x86, 0x75, 0x2a, 0xc3, 0x93, 0x7a, 0x4d, 0x2e, 0xf6, 0x7f, 0x4d, 0x3e, 0xc1,
- 0x88, 0x7e, 0x63, 0x08, 0xd2, 0x36, 0xda, 0xe8, 0x2e, 0x4c, 0x34, 0x9c, 0xb6, 0xb3, 0xe5, 0x7a,
- 0x6e, 0xec, 0x92, 0xa8, 0x97, 0x19, 0xca, 0x8a, 0x86, 0x27, 0xde, 0x09, 0xb5, 0x12, 0x6c, 0xd0,
- 0x41, 0x0b, 0x00, 0xed, 0xd0, 0xdd, 0x77, 0x3d, 0xd2, 0x62, 0x57, 0x61, 0xe6, 0xd8, 0xc5, 0x6d,
- 0x2b, 0x64, 0x29, 0xd6, 0x30, 0x32, 0x1c, 0x81, 0x8a, 0x8f, 0xce, 0x11, 0x68, 0xe8, 0x84, 0x8e,
- 0x40, 0xc3, 0x03, 0x39, 0x02, 0x61, 0x78, 0x4c, 0x9e, 0xdd, 0xf4, 0xff, 0x9a, 0xeb, 0x11, 0x21,
- 0xb0, 0x71, 0x77, 0xaf, 0xf9, 0xa3, 0xc3, 0xf2, 0x63, 0x38, 0x13, 0x03, 0xe7, 0xd4, 0x44, 0x5f,
- 0x84, 0x39, 0xc7, 0xf3, 0x82, 0xfb, 0x6a, 0xd4, 0x56, 0xa3, 0x86, 0xe3, 0x25, 0xb1, 0xe5, 0xc7,
- 0x96, 0x9f, 0x3c, 0x3a, 0x2c, 0xcf, 0x2d, 0xe5, 0xe0, 0xe0, 0xdc, 0xda, 0x29, 0x3f, 0xa2, 0xb1,
- 0xbe, 0x7e, 0x44, 0xbb, 0x70, 0xae, 0x4e, 0x42, 0x99, 0x89, 0x56, 0x6d, 0xc9, 0x4d, 0x28, 0x85,
- 0x29, 0x26, 0x34, 0x50, 0x0c, 0x18, 0x2d, 0x4e, 0xa8, 0x64, 0x3a, 0x09, 0x21, 0xfb, 0xcf, 0x2c,
- 0x18, 0x15, 0x76, 0xe2, 0x67, 0x20, 0xfb, 0x2c, 0x19, 0xfa, 0xcb, 0x72, 0x36, 0xa3, 0x66, 0x9d,
- 0xc9, 0xd5, 0x5c, 0x56, 0x53, 0x9a, 0xcb, 0xa7, 0x7b, 0x11, 0xe9, 0xad, 0xb3, 0xfc, 0xb1, 0x22,
- 0x4c, 0x99, 0x36, 0xf2, 0x67, 0x30, 0x04, 0x1b, 0x30, 0x1a, 0x09, 0x87, 0x8c, 0x42, 0xbe, 0xe1,
- 0x6c, 0x7a, 0x12, 0x13, 0xab, 0x18, 0xe1, 0x82, 0x21, 0x89, 0x64, 0x7a, 0x7a, 0x14, 0x1f, 0xa1,
- 0xa7, 0x47, 0x3f, 0x37, 0x85, 0xa1, 0xd3, 0x70, 0x53, 0xb0, 0x7f, 0x95, 0x1d, 0x16, 0x7a, 0xf9,
- 0x19, 0xc8, 0x11, 0xd7, 0xcd, 0x63, 0xc5, 0xee, 0xb1, 0xb2, 0x44, 0xa7, 0x72, 0xe4, 0x89, 0x7f,
- 0x66, 0xc1, 0xb8, 0x40, 0x3c, 0x83, 0x6e, 0x7f, 0x87, 0xd9, 0xed, 0x27, 0x7a, 0x74, 0x3b, 0xa7,
- 0xbf, 0xff, 0xa0, 0xa0, 0xfa, 0x5b, 0x0b, 0xc2, 0x78, 0xa0, 0xdc, 0x24, 0x63, 0xf4, 0xf6, 0x18,
- 0x34, 0x02, 0x4f, 0x1c, 0xfe, 0x4f, 0x26, 0x1e, 0xc2, 0xbc, 0xfc, 0x58, 0xfb, 0x8d, 0x15, 0x36,
- 0x73, 0x60, 0x0d, 0xc2, 0x58, 0x1c, 0xb8, 0x89, 0x03, 0x6b, 0x10, 0xc6, 0x98, 0x41, 0x50, 0x13,
- 0x20, 0x76, 0xc2, 0x16, 0x89, 0x69, 0x99, 0x08, 0x36, 0x90, 0xbf, 0x0b, 0x3b, 0xb1, 0xeb, 0x2d,
- 0xb8, 0x7e, 0x1c, 0xc5, 0xe1, 0x42, 0xd5, 0x8f, 0x6f, 0x87, 0xfc, 0x2e, 0xa1, 0xb9, 0xfc, 0x2a,
- 0x5a, 0x58, 0xa3, 0x2b, 0x7d, 0xc8, 0x58, 0x1b, 0xc3, 0xe6, 0xc3, 0xe2, 0x86, 0x28, 0xc7, 0x0a,
- 0xc3, 0x7e, 0x8d, 0xf1, 0x64, 0x36, 0x40, 0x27, 0xf3, 0xc6, 0xfd, 0xed, 0x31, 0x35, 0xb4, 0xec,
- 0x55, 0xa1, 0xa2, 0xfb, 0xfc, 0xf6, 0x66, 0x81, 0xb4, 0x61, 0xdd, 0x1f, 0x21, 0x71, 0x0c, 0x46,
- 0xdf, 0xd9, 0xf5, 0xde, 0xfc, 0x62, 0x1f, 0x5e, 0x7a, 0x82, 0x17, 0x66, 0x16, 0xe0, 0x96, 0x05,
- 0x02, 0xad, 0xd6, 0xd2, 0xd9, 0x63, 0x56, 0x24, 0x00, 0x27, 0x38, 0x68, 0x51, 0xdc, 0x44, 0xb9,
- 0x3e, 0xef, 0x89, 0xd4, 0x4d, 0x54, 0x7e, 0xbe, 0x76, 0x15, 0x7d, 0x09, 0xc6, 0x55, 0x46, 0xbe,
- 0x1a, 0x4f, 0x6c, 0x26, 0x42, 0x2f, 0xac, 0x26, 0xc5, 0x58, 0xc7, 0x41, 0x9b, 0x30, 0x1d, 0xf1,
- 0x74, 0x81, 0x2a, 0xc2, 0x16, 0xd7, 0x33, 0x3c, 0x27, 0xdf, 0xa9, 0xeb, 0x26, 0xf8, 0x98, 0x15,
- 0xf1, 0xcd, 0x2a, 0x1d, 0xc1, 0xd2, 0x24, 0xd0, 0x9b, 0x30, 0xe5, 0xe9, 0x69, 0xd3, 0x6b, 0x42,
- 0x0d, 0xa1, 0xcc, 0x38, 0x8d, 0xa4, 0xea, 0x35, 0x9c, 0xc2, 0xa6, 0x42, 0x83, 0x5e, 0x22, 0xa2,
- 0xc2, 0x39, 0x7e, 0x8b, 0x44, 0x22, 0x9f, 0x18, 0x13, 0x1a, 0x6e, 0xe5, 0xe0, 0xe0, 0xdc, 0xda,
- 0xe8, 0x75, 0x98, 0x90, 0x9f, 0xaf, 0xb9, 0x39, 0x26, 0xc6, 0xc2, 0x1a, 0x0c, 0x1b, 0x98, 0xe8,
- 0x3e, 0x5c, 0x90, 0xff, 0x37, 0x43, 0x67, 0x7b, 0xdb, 0x6d, 0x08, 0x2f, 0x53, 0xee, 0x31, 0xb1,
- 0x24, 0x5d, 0x30, 0x56, 0xb3, 0x90, 0x8e, 0x0f, 0xcb, 0x97, 0xc5, 0xa8, 0x65, 0xc2, 0xd9, 0x24,
- 0x66, 0xd3, 0x47, 0xeb, 0x70, 0x6e, 0x87, 0x38, 0x5e, 0xbc, 0xb3, 0xb2, 0x43, 0x1a, 0xbb, 0x72,
- 0x13, 0x31, 0xe7, 0x49, 0xcd, 0xc4, 0xf6, 0x46, 0x37, 0x0a, 0xce, 0xaa, 0x87, 0xde, 0x85, 0xb9,
- 0x76, 0x67, 0xcb, 0x73, 0xa3, 0x9d, 0x8d, 0x20, 0x66, 0x4f, 0xe3, 0x2a, 0xa1, 0x9d, 0xf0, 0xb2,
- 0x54, 0x8e, 0xa3, 0xb5, 0x1c, 0x3c, 0x9c, 0x4b, 0x01, 0x7d, 0x00, 0x17, 0x52, 0x8b, 0x41, 0xf8,
- 0x7c, 0x4d, 0xe5, 0xc7, 0xd8, 0xac, 0x67, 0x55, 0x10, 0x3e, 0x5c, 0x59, 0x20, 0x9c, 0xdd, 0xc4,
- 0x87, 0x33, 0x98, 0x78, 0x9f, 0x56, 0xd6, 0xa4, 0x1b, 0xf4, 0x15, 0x98, 0xd0, 0x57, 0x91, 0x38,
- 0x60, 0xae, 0x64, 0x1f, 0xfe, 0xda, 0x6a, 0xe3, 0xb2, 0x91, 0x5a, 0x51, 0x3a, 0x0c, 0x1b, 0x14,
- 0x6d, 0x02, 0xd9, 0xdf, 0x87, 0x6e, 0xc1, 0x58, 0xc3, 0x73, 0x89, 0x1f, 0x57, 0x6b, 0xbd, 0xa2,
- 0x09, 0xac, 0x08, 0x1c, 0x31, 0x60, 0x22, 0x28, 0x21, 0x2f, 0xc3, 0x8a, 0x82, 0xfd, 0xeb, 0x05,
- 0x28, 0xf7, 0x89, 0x70, 0x99, 0xd2, 0x19, 0x5a, 0x03, 0xe9, 0x0c, 0x97, 0x64, 0x7a, 0xbe, 0x8d,
- 0xd4, 0x7d, 0x35, 0x95, 0x7a, 0x2f, 0xb9, 0xb5, 0xa6, 0xf1, 0x07, 0xb6, 0xb3, 0xd4, 0xd5, 0x8e,
- 0x43, 0x7d, 0x2d, 0x80, 0x8d, 0xe7, 0x86, 0xe1, 0xc1, 0x25, 0xfa, 0x5c, 0xd5, 0xb1, 0xfd, 0xab,
- 0x05, 0xb8, 0xa0, 0x86, 0xf0, 0xaf, 0xee, 0xc0, 0xdd, 0xe9, 0x1e, 0xb8, 0x53, 0x50, 0xbc, 0xdb,
- 0xb7, 0x61, 0xa4, 0x7e, 0x10, 0x35, 0x62, 0x6f, 0x00, 0x01, 0xe8, 0x19, 0x33, 0xaa, 0x8e, 0x3a,
- 0xa6, 0x8d, 0xc8, 0x3a, 0x7f, 0xd3, 0x82, 0xe9, 0xcd, 0x95, 0x5a, 0x3d, 0x68, 0xec, 0x92, 0x78,
- 0x89, 0xab, 0x95, 0xb0, 0x90, 0x7f, 0xac, 0x87, 0x94, 0x6b, 0xb2, 0x24, 0xa6, 0xcb, 0x30, 0xb4,
- 0x13, 0x44, 0x71, 0xfa, 0x55, 0xee, 0x46, 0x10, 0xc5, 0x98, 0x41, 0xec, 0xdf, 0xb7, 0x60, 0x98,
- 0x25, 0x95, 0xed, 0x97, 0x7c, 0x78, 0x90, 0xef, 0x42, 0xaf, 0xc2, 0x08, 0xd9, 0xde, 0x26, 0x8d,
- 0x58, 0xcc, 0xaa, 0x74, 0xeb, 0x1b, 0x59, 0x65, 0xa5, 0xf4, 0xd0, 0x67, 0x8d, 0xf1, 0xbf, 0x58,
- 0x20, 0xa3, 0x7b, 0x50, 0x8a, 0xdd, 0x3d, 0xb2, 0xd4, 0x6c, 0x8a, 0x77, 0x8d, 0x87, 0xf0, 0xa2,
- 0xdc, 0x94, 0x04, 0x70, 0x42, 0xcb, 0xfe, 0x5a, 0x01, 0x20, 0x71, 0x4d, 0xee, 0xf7, 0x89, 0xcb,
- 0x5d, 0xf9, 0x95, 0xaf, 0x64, 0xe4, 0x57, 0x46, 0x09, 0xc1, 0x8c, 0xec, 0xca, 0x6a, 0x98, 0x8a,
- 0x03, 0x0d, 0xd3, 0xd0, 0x49, 0x86, 0x69, 0x05, 0x66, 0x13, 0xd7, 0x6a, 0x33, 0xce, 0x04, 0x8b,
- 0x78, 0xbf, 0x99, 0x06, 0xe2, 0x6e, 0x7c, 0xfb, 0x07, 0x2c, 0x10, 0xee, 0x09, 0x03, 0x2c, 0xe6,
- 0x77, 0x64, 0x2a, 0x54, 0x23, 0x50, 0xee, 0xe5, 0x7c, 0x7f, 0x0d, 0x11, 0x1e, 0x57, 0x1d, 0x1e,
- 0x46, 0x50, 0x5c, 0x83, 0x96, 0xdd, 0x04, 0x01, 0xad, 0x10, 0xa6, 0x64, 0xe8, 0xdf, 0x9b, 0x6b,
- 0x00, 0x4d, 0x86, 0xab, 0xa5, 0x56, 0x54, 0xac, 0xaa, 0xa2, 0x20, 0x58, 0xc3, 0xb2, 0x7f, 0xa4,
- 0x00, 0xe3, 0x32, 0x30, 0x2b, 0xbd, 0xc7, 0xf7, 0x6f, 0xe5, 0x44, 0x59, 0x19, 0x58, 0x2e, 0x52,
- 0x4a, 0x58, 0x05, 0xef, 0xd7, 0x73, 0x91, 0x4a, 0x00, 0x4e, 0x70, 0xd0, 0xb3, 0x30, 0x1a, 0x75,
- 0xb6, 0x18, 0x7a, 0xca, 0xe8, 0xbe, 0xce, 0x8b, 0xb1, 0x84, 0xa3, 0x2f, 0xc2, 0x0c, 0xaf, 0x17,
- 0x06, 0x6d, 0xa7, 0xc5, 0x35, 0x4e, 0xc3, 0xca, 0x0b, 0x6e, 0x66, 0x3d, 0x05, 0x3b, 0x3e, 0x2c,
- 0x9f, 0x4f, 0x97, 0x31, 0x5d, 0x65, 0x17, 0x15, 0xfb, 0x2b, 0x80, 0xba, 0x63, 0xcd, 0xa2, 0xb7,
- 0xb8, 0x59, 0x85, 0x1b, 0x92, 0x66, 0x2f, 0x25, 0xa4, 0xee, 0xb4, 0x25, 0x0d, 0x5f, 0x79, 0x2d,
- 0xac, 0xea, 0xd3, 0x9d, 0x37, 0x93, 0x76, 0xe1, 0x41, 0x37, 0x60, 0x84, 0x33, 0x55, 0x41, 0xbe,
- 0xc7, 0x1b, 0x97, 0xe6, 0xf8, 0xc3, 0x02, 0xe1, 0x0b, 0xbe, 0x2c, 0xea, 0xa3, 0x77, 0x61, 0xbc,
- 0x19, 0xdc, 0xf7, 0xef, 0x3b, 0x61, 0x73, 0xa9, 0x56, 0x15, 0xeb, 0x32, 0x53, 0x36, 0xab, 0x24,
- 0x68, 0xba, 0x33, 0x11, 0xd3, 0xe7, 0x26, 0x20, 0xac, 0x93, 0x43, 0x9b, 0x2c, 0x7e, 0xd6, 0xb6,
- 0xdb, 0x5a, 0x77, 0xda, 0xbd, 0xec, 0xe0, 0x56, 0x24, 0x92, 0x46, 0x79, 0x52, 0x04, 0xd9, 0xe2,
- 0x00, 0x9c, 0x10, 0xb2, 0xbf, 0x7a, 0x0e, 0x8c, 0xfd, 0x60, 0xe4, 0x7d, 0xb0, 0x4e, 0x29, 0xef,
- 0x03, 0x86, 0x31, 0xb2, 0xd7, 0x8e, 0x0f, 0x2a, 0x6e, 0xd8, 0x2b, 0x71, 0xd0, 0xaa, 0xc0, 0xe9,
- 0xa6, 0x29, 0x21, 0x58, 0xd1, 0xc9, 0x4e, 0xce, 0x51, 0xfc, 0x08, 0x93, 0x73, 0x0c, 0x9d, 0x61,
- 0x72, 0x8e, 0x0d, 0x18, 0x6d, 0xb9, 0x31, 0x26, 0xed, 0x40, 0x08, 0x14, 0x99, 0x2b, 0xe1, 0x3a,
- 0x47, 0xe9, 0x0e, 0x0d, 0x2f, 0x00, 0x58, 0x12, 0x41, 0x6f, 0xa9, 0x3d, 0x30, 0x92, 0x2f, 0x8f,
- 0x77, 0x3f, 0x87, 0x64, 0xee, 0x02, 0x91, 0x8c, 0x63, 0xf4, 0x61, 0x93, 0x71, 0xac, 0xc9, 0x14,
- 0x1a, 0x63, 0xf9, 0x66, 0xa3, 0x2c, 0x43, 0x46, 0x9f, 0xc4, 0x19, 0x46, 0xb2, 0x91, 0xd2, 0xe9,
- 0x25, 0x1b, 0xf9, 0x01, 0x0b, 0x2e, 0xb4, 0xb3, 0xf2, 0xee, 0x88, 0x14, 0x18, 0xaf, 0x0e, 0x9c,
- 0x58, 0xc8, 0x68, 0x90, 0x5d, 0xcc, 0x32, 0xd1, 0x70, 0x76, 0x73, 0x74, 0xa0, 0xc3, 0xad, 0xa6,
- 0xc8, 0x9b, 0xf1, 0x4c, 0x4e, 0xd6, 0x92, 0x1e, 0xb9, 0x4a, 0x36, 0x33, 0x72, 0x65, 0x7c, 0x32,
- 0x2f, 0x57, 0xc6, 0xc0, 0x19, 0x32, 0x92, 0x7c, 0x25, 0x93, 0x1f, 0x3a, 0x5f, 0xc9, 0x5b, 0x2a,
- 0x5f, 0x49, 0x8f, 0xa8, 0x43, 0x3c, 0x1b, 0x49, 0xdf, 0x2c, 0x25, 0x5a, 0xa6, 0x91, 0xe9, 0xd3,
- 0xc9, 0x34, 0x62, 0x30, 0x7b, 0x9e, 0xec, 0xe2, 0xf9, 0x3e, 0xcc, 0xde, 0xa0, 0xdb, 0x9b, 0xdd,
- 0xf3, 0xac, 0x2a, 0xb3, 0x0f, 0x95, 0x55, 0xe5, 0xae, 0x9e, 0xa5, 0x04, 0xf5, 0x49, 0xc3, 0x41,
- 0x91, 0x06, 0xcc, 0x4d, 0x72, 0x57, 0x3f, 0x82, 0xce, 0xe5, 0xd3, 0x55, 0x27, 0x4d, 0x37, 0xdd,
- 0xac, 0x43, 0xa8, 0x3b, 0xe7, 0xc9, 0xf9, 0xb3, 0xc9, 0x79, 0x72, 0xe1, 0xd4, 0x73, 0x9e, 0x3c,
- 0x76, 0x06, 0x39, 0x4f, 0x1e, 0xff, 0x48, 0x73, 0x9e, 0xcc, 0x3d, 0x82, 0x9c, 0x27, 0x1b, 0x49,
- 0xce, 0x93, 0x8b, 0xf9, 0x53, 0x92, 0x61, 0x27, 0x97, 0x93, 0xe9, 0xe4, 0x2e, 0x94, 0xda, 0xd2,
- 0xcb, 0x5b, 0x84, 0x45, 0xca, 0x4e, 0xb6, 0x98, 0xe5, 0x0a, 0xce, 0xa7, 0x44, 0x81, 0x70, 0x42,
- 0x8a, 0xd2, 0x4d, 0x32, 0x9f, 0x3c, 0xd1, 0x43, 0xf5, 0x96, 0xa5, 0xd4, 0xc8, 0xcf, 0x77, 0x62,
- 0xff, 0xad, 0x02, 0x5c, 0xea, 0xbd, 0xae, 0x13, 0x8d, 0x48, 0x2d, 0xd1, 0xe0, 0xa7, 0x34, 0x22,
- 0xfc, 0x9a, 0x91, 0x60, 0x0d, 0x1c, 0x0a, 0xe3, 0x3a, 0xcc, 0x2a, 0x03, 0x39, 0xcf, 0x6d, 0x1c,
- 0x68, 0x49, 0x18, 0x95, 0xb3, 0x4e, 0x3d, 0x8d, 0x80, 0xbb, 0xeb, 0xa0, 0x25, 0x98, 0x36, 0x0a,
- 0xab, 0x15, 0x71, 0x9d, 0x50, 0x2a, 0x98, 0xba, 0x09, 0xc6, 0x69, 0x7c, 0xfb, 0xeb, 0x16, 0x3c,
- 0x9e, 0x13, 0x0e, 0x7c, 0xe0, 0x48, 0x0f, 0xdb, 0x30, 0xdd, 0x36, 0xab, 0xf6, 0x09, 0x08, 0x63,
- 0x04, 0x1d, 0x57, 0x7d, 0x4d, 0x01, 0x70, 0x9a, 0xe8, 0xf2, 0xd5, 0xdf, 0xfc, 0xc3, 0x4b, 0x9f,
- 0xf8, 0x9d, 0x3f, 0xbc, 0xf4, 0x89, 0xdf, 0xfb, 0xc3, 0x4b, 0x9f, 0xf8, 0xeb, 0x47, 0x97, 0xac,
- 0xdf, 0x3c, 0xba, 0x64, 0xfd, 0xce, 0xd1, 0x25, 0xeb, 0xf7, 0x8e, 0x2e, 0x59, 0x7f, 0x70, 0x74,
- 0xc9, 0xfa, 0xda, 0x1f, 0x5d, 0xfa, 0xc4, 0x3b, 0x85, 0xfd, 0x97, 0xfe, 0x7f, 0x00, 0x00, 0x00,
- 0xff, 0xff, 0xcc, 0x93, 0x27, 0x7c, 0x56, 0xe4, 0x00, 0x00,
+ // 13127 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x70, 0x64, 0x57,
+ 0x56, 0xd8, 0xbe, 0x6e, 0x7d, 0x74, 0x1f, 0x7d, 0xdf, 0xf9, 0xb0, 0x46, 0x9e, 0x99, 0x1e, 0x3f,
+ 0xef, 0x8e, 0xc7, 0x6b, 0x5b, 0xb3, 0x1e, 0xdb, 0x6b, 0xb3, 0xf6, 0x1a, 0x24, 0xb5, 0x34, 0x23,
+ 0xcf, 0x48, 0xd3, 0xbe, 0xad, 0x99, 0xd9, 0x35, 0xde, 0xc5, 0x4f, 0xfd, 0xae, 0xa4, 0x67, 0xb5,
+ 0xde, 0x6b, 0xbf, 0xf7, 0x5a, 0x33, 0x72, 0xa0, 0x42, 0x96, 0x40, 0xd8, 0x82, 0xa4, 0xb6, 0x12,
+ 0x2a, 0x1f, 0x40, 0x91, 0x2a, 0x42, 0x0a, 0x08, 0x24, 0x15, 0x02, 0x01, 0xc2, 0x92, 0x84, 0x40,
+ 0x52, 0x45, 0xf2, 0x63, 0x43, 0x52, 0x95, 0x5a, 0xaa, 0xa8, 0x28, 0x20, 0x52, 0xa1, 0xf8, 0x11,
+ 0x48, 0x85, 0xfc, 0x41, 0xa1, 0x42, 0xea, 0x7e, 0xbe, 0x7b, 0x5f, 0xbf, 0xd7, 0xdd, 0x1a, 0x6b,
+ 0x64, 0xb3, 0xb5, 0xff, 0xba, 0xef, 0x39, 0xf7, 0xdc, 0xfb, 0xee, 0xe7, 0x39, 0xe7, 0x9e, 0x0f,
+ 0x78, 0x75, 0xfb, 0x95, 0x68, 0xd6, 0x0b, 0xae, 0x6e, 0xb7, 0xd7, 0x49, 0xe8, 0x93, 0x98, 0x44,
+ 0x57, 0x77, 0x89, 0xef, 0x06, 0xe1, 0x55, 0x01, 0x70, 0x5a, 0xde, 0xd5, 0x46, 0x10, 0x92, 0xab,
+ 0xbb, 0xcf, 0x5f, 0xdd, 0x24, 0x3e, 0x09, 0x9d, 0x98, 0xb8, 0xb3, 0xad, 0x30, 0x88, 0x03, 0x84,
+ 0x38, 0xce, 0xac, 0xd3, 0xf2, 0x66, 0x29, 0xce, 0xec, 0xee, 0xf3, 0x33, 0xcf, 0x6d, 0x7a, 0xf1,
+ 0x56, 0x7b, 0x7d, 0xb6, 0x11, 0xec, 0x5c, 0xdd, 0x0c, 0x36, 0x83, 0xab, 0x0c, 0x75, 0xbd, 0xbd,
+ 0xc1, 0xfe, 0xb1, 0x3f, 0xec, 0x17, 0x27, 0x31, 0xf3, 0x62, 0xd2, 0xcc, 0x8e, 0xd3, 0xd8, 0xf2,
+ 0x7c, 0x12, 0xee, 0x5d, 0x6d, 0x6d, 0x6f, 0xb2, 0x76, 0x43, 0x12, 0x05, 0xed, 0xb0, 0x41, 0xd2,
+ 0x0d, 0x77, 0xad, 0x15, 0x5d, 0xdd, 0x21, 0xb1, 0x93, 0xd1, 0xdd, 0x99, 0xab, 0x79, 0xb5, 0xc2,
+ 0xb6, 0x1f, 0x7b, 0x3b, 0x9d, 0xcd, 0x7c, 0xba, 0x57, 0x85, 0xa8, 0xb1, 0x45, 0x76, 0x9c, 0x8e,
+ 0x7a, 0x2f, 0xe4, 0xd5, 0x6b, 0xc7, 0x5e, 0xf3, 0xaa, 0xe7, 0xc7, 0x51, 0x1c, 0xa6, 0x2b, 0xd9,
+ 0x5f, 0xb7, 0xe0, 0xd2, 0xdc, 0xbd, 0xfa, 0x62, 0xd3, 0x89, 0x62, 0xaf, 0x31, 0xdf, 0x0c, 0x1a,
+ 0xdb, 0xf5, 0x38, 0x08, 0xc9, 0xdd, 0xa0, 0xd9, 0xde, 0x21, 0x75, 0x36, 0x10, 0xe8, 0x59, 0x28,
+ 0xed, 0xb2, 0xff, 0xcb, 0xd5, 0x69, 0xeb, 0x92, 0x75, 0xa5, 0x3c, 0x3f, 0xf9, 0x9b, 0xfb, 0x95,
+ 0x8f, 0x1d, 0xec, 0x57, 0x4a, 0x77, 0x45, 0x39, 0x56, 0x18, 0xe8, 0x32, 0x0c, 0x6d, 0x44, 0x6b,
+ 0x7b, 0x2d, 0x32, 0x5d, 0x60, 0xb8, 0xe3, 0x02, 0x77, 0x68, 0xa9, 0x4e, 0x4b, 0xb1, 0x80, 0xa2,
+ 0xab, 0x50, 0x6e, 0x39, 0x61, 0xec, 0xc5, 0x5e, 0xe0, 0x4f, 0x17, 0x2f, 0x59, 0x57, 0x06, 0xe7,
+ 0xa7, 0x04, 0x6a, 0xb9, 0x26, 0x01, 0x38, 0xc1, 0xa1, 0xdd, 0x08, 0x89, 0xe3, 0xde, 0xf6, 0x9b,
+ 0x7b, 0xd3, 0x03, 0x97, 0xac, 0x2b, 0xa5, 0xa4, 0x1b, 0x58, 0x94, 0x63, 0x85, 0x61, 0xff, 0x70,
+ 0x01, 0x4a, 0x73, 0x1b, 0x1b, 0x9e, 0xef, 0xc5, 0x7b, 0xe8, 0x2e, 0x8c, 0xfa, 0x81, 0x4b, 0xe4,
+ 0x7f, 0xf6, 0x15, 0x23, 0xd7, 0x2e, 0xcd, 0x76, 0x2e, 0xa5, 0xd9, 0x55, 0x0d, 0x6f, 0x7e, 0xf2,
+ 0x60, 0xbf, 0x32, 0xaa, 0x97, 0x60, 0x83, 0x0e, 0xc2, 0x30, 0xd2, 0x0a, 0x5c, 0x45, 0xb6, 0xc0,
+ 0xc8, 0x56, 0xb2, 0xc8, 0xd6, 0x12, 0xb4, 0xf9, 0x89, 0x83, 0xfd, 0xca, 0x88, 0x56, 0x80, 0x75,
+ 0x22, 0x68, 0x1d, 0x26, 0xe8, 0x5f, 0x3f, 0xf6, 0x14, 0xdd, 0x22, 0xa3, 0xfb, 0x64, 0x1e, 0x5d,
+ 0x0d, 0x75, 0xfe, 0xd4, 0xc1, 0x7e, 0x65, 0x22, 0x55, 0x88, 0xd3, 0x04, 0xed, 0xf7, 0x61, 0x7c,
+ 0x2e, 0x8e, 0x9d, 0xc6, 0x16, 0x71, 0xf9, 0x0c, 0xa2, 0x17, 0x61, 0xc0, 0x77, 0x76, 0x88, 0x98,
+ 0xdf, 0x4b, 0x62, 0x60, 0x07, 0x56, 0x9d, 0x1d, 0x72, 0xb8, 0x5f, 0x99, 0xbc, 0xe3, 0x7b, 0xef,
+ 0xb5, 0xc5, 0xaa, 0xa0, 0x65, 0x98, 0x61, 0xa3, 0x6b, 0x00, 0x2e, 0xd9, 0xf5, 0x1a, 0xa4, 0xe6,
+ 0xc4, 0x5b, 0x62, 0xbe, 0x91, 0xa8, 0x0b, 0x55, 0x05, 0xc1, 0x1a, 0x96, 0xfd, 0x00, 0xca, 0x73,
+ 0xbb, 0x81, 0xe7, 0xd6, 0x02, 0x37, 0x42, 0xdb, 0x30, 0xd1, 0x0a, 0xc9, 0x06, 0x09, 0x55, 0xd1,
+ 0xb4, 0x75, 0xa9, 0x78, 0x65, 0xe4, 0xda, 0x95, 0xcc, 0x8f, 0x35, 0x51, 0x17, 0xfd, 0x38, 0xdc,
+ 0x9b, 0x7f, 0x4c, 0xb4, 0x37, 0x91, 0x82, 0xe2, 0x34, 0x65, 0xfb, 0xdf, 0x16, 0xe0, 0xcc, 0xdc,
+ 0xfb, 0xed, 0x90, 0x54, 0xbd, 0x68, 0x3b, 0xbd, 0xc2, 0x5d, 0x2f, 0xda, 0x5e, 0x4d, 0x46, 0x40,
+ 0x2d, 0xad, 0xaa, 0x28, 0xc7, 0x0a, 0x03, 0x3d, 0x07, 0xc3, 0xf4, 0xf7, 0x1d, 0xbc, 0x2c, 0x3e,
+ 0xf9, 0x94, 0x40, 0x1e, 0xa9, 0x3a, 0xb1, 0x53, 0xe5, 0x20, 0x2c, 0x71, 0xd0, 0x0a, 0x8c, 0x34,
+ 0xd8, 0x86, 0xdc, 0x5c, 0x09, 0x5c, 0xc2, 0x26, 0xb3, 0x3c, 0xff, 0x0c, 0x45, 0x5f, 0x48, 0x8a,
+ 0x0f, 0xf7, 0x2b, 0xd3, 0xbc, 0x6f, 0x82, 0x84, 0x06, 0xc3, 0x7a, 0x7d, 0x64, 0xab, 0xfd, 0x35,
+ 0xc0, 0x28, 0x41, 0xc6, 0xde, 0xba, 0xa2, 0x6d, 0x95, 0x41, 0xb6, 0x55, 0x46, 0xb3, 0xb7, 0x09,
+ 0x7a, 0x1e, 0x06, 0xb6, 0x3d, 0xdf, 0x9d, 0x1e, 0x62, 0xb4, 0x2e, 0xd0, 0x39, 0xbf, 0xe9, 0xf9,
+ 0xee, 0xe1, 0x7e, 0x65, 0xca, 0xe8, 0x0e, 0x2d, 0xc4, 0x0c, 0xd5, 0xfe, 0x13, 0x0b, 0x2a, 0x0c,
+ 0xb6, 0xe4, 0x35, 0x49, 0x8d, 0x84, 0x91, 0x17, 0xc5, 0xc4, 0x8f, 0x8d, 0x01, 0xbd, 0x06, 0x10,
+ 0x91, 0x46, 0x48, 0x62, 0x6d, 0x48, 0xd5, 0xc2, 0xa8, 0x2b, 0x08, 0xd6, 0xb0, 0xe8, 0x81, 0x10,
+ 0x6d, 0x39, 0x21, 0x5b, 0x5f, 0x62, 0x60, 0xd5, 0x81, 0x50, 0x97, 0x00, 0x9c, 0xe0, 0x18, 0x07,
+ 0x42, 0xb1, 0xd7, 0x81, 0x80, 0x3e, 0x0b, 0x13, 0x49, 0x63, 0x51, 0xcb, 0x69, 0xc8, 0x01, 0x64,
+ 0x5b, 0xa6, 0x6e, 0x82, 0x70, 0x1a, 0xd7, 0xfe, 0x47, 0x96, 0x58, 0x3c, 0xf4, 0xab, 0x3f, 0xe2,
+ 0xdf, 0x6a, 0xff, 0xb2, 0x05, 0xc3, 0xf3, 0x9e, 0xef, 0x7a, 0xfe, 0x26, 0x7a, 0x07, 0x4a, 0xf4,
+ 0x6e, 0x72, 0x9d, 0xd8, 0x11, 0xe7, 0xde, 0xa7, 0xb4, 0xbd, 0xa5, 0xae, 0x8a, 0xd9, 0xd6, 0xf6,
+ 0x26, 0x2d, 0x88, 0x66, 0x29, 0x36, 0xdd, 0x6d, 0xb7, 0xd7, 0xdf, 0x25, 0x8d, 0x78, 0x85, 0xc4,
+ 0x4e, 0xf2, 0x39, 0x49, 0x19, 0x56, 0x54, 0xd1, 0x4d, 0x18, 0x8a, 0x9d, 0x70, 0x93, 0xc4, 0xe2,
+ 0x00, 0xcc, 0x3c, 0xa8, 0x78, 0x4d, 0x4c, 0x77, 0x24, 0xf1, 0x1b, 0x24, 0xb9, 0x16, 0xd6, 0x58,
+ 0x55, 0x2c, 0x48, 0xd8, 0x7f, 0x7d, 0x18, 0xce, 0x2d, 0xd4, 0x97, 0x73, 0xd6, 0xd5, 0x65, 0x18,
+ 0x72, 0x43, 0x6f, 0x97, 0x84, 0x62, 0x9c, 0x15, 0x95, 0x2a, 0x2b, 0xc5, 0x02, 0x8a, 0x5e, 0x81,
+ 0x51, 0x7e, 0x21, 0xdd, 0x70, 0x7c, 0xb7, 0x29, 0x87, 0xf8, 0xb4, 0xc0, 0x1e, 0xbd, 0xab, 0xc1,
+ 0xb0, 0x81, 0x79, 0xc4, 0x45, 0x75, 0x39, 0xb5, 0x19, 0xf3, 0x2e, 0xbb, 0x2f, 0x5b, 0x30, 0xc9,
+ 0x9b, 0x99, 0x8b, 0xe3, 0xd0, 0x5b, 0x6f, 0xc7, 0x24, 0x9a, 0x1e, 0x64, 0x27, 0xdd, 0x42, 0xd6,
+ 0x68, 0xe5, 0x8e, 0xc0, 0xec, 0xdd, 0x14, 0x15, 0x7e, 0x08, 0x4e, 0x8b, 0x76, 0x27, 0xd3, 0x60,
+ 0xdc, 0xd1, 0x2c, 0xfa, 0x1e, 0x0b, 0x66, 0x1a, 0x81, 0x1f, 0x87, 0x41, 0xb3, 0x49, 0xc2, 0x5a,
+ 0x7b, 0xbd, 0xe9, 0x45, 0x5b, 0x7c, 0x9d, 0x62, 0xb2, 0xc1, 0x4e, 0x82, 0x9c, 0x39, 0x54, 0x48,
+ 0x62, 0x0e, 0x2f, 0x1e, 0xec, 0x57, 0x66, 0x16, 0x72, 0x49, 0xe1, 0x2e, 0xcd, 0xa0, 0x6d, 0x40,
+ 0xf4, 0x2a, 0xad, 0xc7, 0xce, 0x26, 0x49, 0x1a, 0x1f, 0xee, 0xbf, 0xf1, 0xb3, 0x07, 0xfb, 0x15,
+ 0xb4, 0xda, 0x41, 0x02, 0x67, 0x90, 0x45, 0xef, 0xc1, 0x69, 0x5a, 0xda, 0xf1, 0xad, 0xa5, 0xfe,
+ 0x9b, 0x9b, 0x3e, 0xd8, 0xaf, 0x9c, 0x5e, 0xcd, 0x20, 0x82, 0x33, 0x49, 0xa3, 0xef, 0xb6, 0xe0,
+ 0x5c, 0xf2, 0xf9, 0x8b, 0x0f, 0x5a, 0x8e, 0xef, 0x26, 0x0d, 0x97, 0xfb, 0x6f, 0x98, 0x9e, 0xc9,
+ 0xe7, 0x16, 0xf2, 0x28, 0xe1, 0xfc, 0x46, 0x66, 0x16, 0xe0, 0x4c, 0xe6, 0x6a, 0x41, 0x93, 0x50,
+ 0xdc, 0x26, 0x9c, 0x0b, 0x2a, 0x63, 0xfa, 0x13, 0x9d, 0x86, 0xc1, 0x5d, 0xa7, 0xd9, 0x16, 0x1b,
+ 0x05, 0xf3, 0x3f, 0x9f, 0x29, 0xbc, 0x62, 0xd9, 0xff, 0xae, 0x08, 0x13, 0x0b, 0xf5, 0xe5, 0x87,
+ 0xda, 0x85, 0xfa, 0x35, 0x54, 0xe8, 0x7a, 0x0d, 0x25, 0x97, 0x5a, 0x31, 0xf7, 0x52, 0xfb, 0xcb,
+ 0x19, 0x5b, 0x68, 0x80, 0x6d, 0xa1, 0x6f, 0xc9, 0xd9, 0x42, 0xc7, 0xbc, 0x71, 0x76, 0x73, 0x56,
+ 0xd1, 0x20, 0x9b, 0xcc, 0x4c, 0x8e, 0xe5, 0x56, 0xd0, 0x70, 0x9a, 0xe9, 0xa3, 0xef, 0x88, 0x4b,
+ 0xe9, 0x78, 0xe6, 0xb1, 0x01, 0xa3, 0x0b, 0x4e, 0xcb, 0x59, 0xf7, 0x9a, 0x5e, 0xec, 0x91, 0x08,
+ 0x3d, 0x05, 0x45, 0xc7, 0x75, 0x19, 0xb7, 0x55, 0x9e, 0x3f, 0x73, 0xb0, 0x5f, 0x29, 0xce, 0xb9,
+ 0xf4, 0xda, 0x07, 0x85, 0xb5, 0x87, 0x29, 0x06, 0xfa, 0x24, 0x0c, 0xb8, 0x61, 0xd0, 0x9a, 0x2e,
+ 0x30, 0x4c, 0xba, 0xeb, 0x06, 0xaa, 0x61, 0xd0, 0x4a, 0xa1, 0x32, 0x1c, 0xfb, 0xd7, 0x0a, 0x70,
+ 0x7e, 0x81, 0xb4, 0xb6, 0x96, 0xea, 0x39, 0xe7, 0xf7, 0x15, 0x28, 0xed, 0x04, 0xbe, 0x17, 0x07,
+ 0x61, 0x24, 0x9a, 0x66, 0x2b, 0x62, 0x45, 0x94, 0x61, 0x05, 0x45, 0x97, 0x60, 0xa0, 0x95, 0x30,
+ 0x95, 0xa3, 0x92, 0x21, 0x65, 0xec, 0x24, 0x83, 0x50, 0x8c, 0x76, 0x44, 0x42, 0xb1, 0x62, 0x14,
+ 0xc6, 0x9d, 0x88, 0x84, 0x98, 0x41, 0x92, 0x9b, 0x99, 0xde, 0xd9, 0xe2, 0x84, 0x4e, 0xdd, 0xcc,
+ 0x14, 0x82, 0x35, 0x2c, 0x54, 0x83, 0x72, 0x94, 0x9a, 0xd9, 0xbe, 0xb6, 0xe9, 0x18, 0xbb, 0xba,
+ 0xd5, 0x4c, 0x26, 0x44, 0x8c, 0x1b, 0x65, 0xa8, 0xe7, 0xd5, 0xfd, 0xd5, 0x02, 0x20, 0x3e, 0x84,
+ 0x7f, 0xc1, 0x06, 0xee, 0x4e, 0xe7, 0xc0, 0xf5, 0xbf, 0x25, 0x8e, 0x6b, 0xf4, 0xfe, 0x8f, 0x05,
+ 0xe7, 0x17, 0x3c, 0xdf, 0x25, 0x61, 0xce, 0x02, 0x7c, 0x34, 0xb2, 0xec, 0xd1, 0x98, 0x06, 0x63,
+ 0x89, 0x0d, 0x1c, 0xc3, 0x12, 0xb3, 0xff, 0xd8, 0x02, 0xc4, 0x3f, 0xfb, 0x23, 0xf7, 0xb1, 0x77,
+ 0x3a, 0x3f, 0xf6, 0x18, 0x96, 0x85, 0x7d, 0x0b, 0xc6, 0x17, 0x9a, 0x1e, 0xf1, 0xe3, 0xe5, 0xda,
+ 0x42, 0xe0, 0x6f, 0x78, 0x9b, 0xe8, 0x33, 0x30, 0x1e, 0x7b, 0x3b, 0x24, 0x68, 0xc7, 0x75, 0xd2,
+ 0x08, 0x7c, 0x26, 0x49, 0x5a, 0x57, 0x06, 0xe7, 0xd1, 0xc1, 0x7e, 0x65, 0x7c, 0xcd, 0x80, 0xe0,
+ 0x14, 0xa6, 0xfd, 0x3b, 0x74, 0xfc, 0x82, 0x9d, 0x56, 0xe0, 0x13, 0x3f, 0x5e, 0x08, 0x7c, 0x97,
+ 0x6b, 0x1c, 0x3e, 0x03, 0x03, 0x31, 0x1d, 0x0f, 0x3e, 0x76, 0x97, 0xe5, 0x46, 0xa1, 0xa3, 0x70,
+ 0xb8, 0x5f, 0x39, 0xdb, 0x59, 0x83, 0x8d, 0x13, 0xab, 0x83, 0xbe, 0x05, 0x86, 0xa2, 0xd8, 0x89,
+ 0xdb, 0x91, 0x18, 0xcd, 0x27, 0xe4, 0x68, 0xd6, 0x59, 0xe9, 0xe1, 0x7e, 0x65, 0x42, 0x55, 0xe3,
+ 0x45, 0x58, 0x54, 0x40, 0x4f, 0xc3, 0xf0, 0x0e, 0x89, 0x22, 0x67, 0x53, 0xde, 0x86, 0x13, 0xa2,
+ 0xee, 0xf0, 0x0a, 0x2f, 0xc6, 0x12, 0x8e, 0x9e, 0x84, 0x41, 0x12, 0x86, 0x41, 0x28, 0xf6, 0xe8,
+ 0x98, 0x40, 0x1c, 0x5c, 0xa4, 0x85, 0x98, 0xc3, 0xec, 0xff, 0x68, 0xc1, 0x84, 0xea, 0x2b, 0x6f,
+ 0xeb, 0x04, 0xa4, 0x82, 0xb7, 0x00, 0x1a, 0xf2, 0x03, 0x23, 0x76, 0x7b, 0x8c, 0x5c, 0xbb, 0x9c,
+ 0x79, 0x51, 0x77, 0x0c, 0x63, 0x42, 0x59, 0x15, 0x45, 0x58, 0xa3, 0x66, 0xff, 0x4b, 0x0b, 0x4e,
+ 0xa5, 0xbe, 0xe8, 0x96, 0x17, 0xc5, 0xe8, 0xed, 0x8e, 0xaf, 0x9a, 0xed, 0xef, 0xab, 0x68, 0x6d,
+ 0xf6, 0x4d, 0x6a, 0x29, 0xcb, 0x12, 0xed, 0x8b, 0x6e, 0xc0, 0xa0, 0x17, 0x93, 0x1d, 0xf9, 0x31,
+ 0x4f, 0x76, 0xfd, 0x18, 0xde, 0xab, 0x64, 0x46, 0x96, 0x69, 0x4d, 0xcc, 0x09, 0xd8, 0x7f, 0xab,
+ 0x08, 0x65, 0xbe, 0x6c, 0x57, 0x9c, 0xd6, 0x09, 0xcc, 0xc5, 0x32, 0x0c, 0x30, 0xea, 0xbc, 0xe3,
+ 0x4f, 0x65, 0x77, 0x5c, 0x74, 0x67, 0x96, 0x8a, 0xfc, 0x9c, 0x39, 0x52, 0x57, 0x03, 0x2d, 0xc2,
+ 0x8c, 0x04, 0x72, 0x00, 0xd6, 0x3d, 0xdf, 0x09, 0xf7, 0x68, 0xd9, 0x74, 0x91, 0x11, 0x7c, 0xae,
+ 0x3b, 0xc1, 0x79, 0x85, 0xcf, 0xc9, 0xaa, 0xbe, 0x26, 0x00, 0xac, 0x11, 0x9d, 0x79, 0x19, 0xca,
+ 0x0a, 0xf9, 0x28, 0x3c, 0xce, 0xcc, 0x67, 0x61, 0x22, 0xd5, 0x56, 0xaf, 0xea, 0xa3, 0x3a, 0x8b,
+ 0xf4, 0x2b, 0xec, 0x14, 0x10, 0xbd, 0x5e, 0xf4, 0x77, 0xc5, 0x29, 0xfa, 0x3e, 0x9c, 0x6e, 0x66,
+ 0x1c, 0x4e, 0x62, 0xaa, 0xfa, 0x3f, 0xcc, 0xce, 0x8b, 0xcf, 0x3e, 0x9d, 0x05, 0xc5, 0x99, 0x6d,
+ 0xd0, 0x6b, 0x3f, 0x68, 0xd1, 0x35, 0xef, 0x34, 0x75, 0x0e, 0xfa, 0xb6, 0x28, 0xc3, 0x0a, 0x4a,
+ 0x8f, 0xb0, 0xd3, 0xaa, 0xf3, 0x37, 0xc9, 0x5e, 0x9d, 0x34, 0x49, 0x23, 0x0e, 0xc2, 0x0f, 0xb5,
+ 0xfb, 0x17, 0xf8, 0xe8, 0xf3, 0x13, 0x70, 0x44, 0x10, 0x28, 0xde, 0x24, 0x7b, 0x7c, 0x2a, 0xf4,
+ 0xaf, 0x2b, 0x76, 0xfd, 0xba, 0x9f, 0xb3, 0x60, 0x4c, 0x7d, 0xdd, 0x09, 0x6c, 0xf5, 0x79, 0x73,
+ 0xab, 0x5f, 0xe8, 0xba, 0xc0, 0x73, 0x36, 0xf9, 0x57, 0x0b, 0x70, 0x4e, 0xe1, 0x50, 0x76, 0x9f,
+ 0xff, 0x11, 0xab, 0xea, 0x2a, 0x94, 0x7d, 0xa5, 0x88, 0xb2, 0x4c, 0x0d, 0x50, 0xa2, 0x86, 0x4a,
+ 0x70, 0x28, 0xd7, 0xe6, 0x27, 0xda, 0xa2, 0x51, 0x5d, 0x43, 0x2b, 0xb4, 0xb1, 0xf3, 0x50, 0x6c,
+ 0x7b, 0xae, 0xb8, 0x33, 0x3e, 0x25, 0x47, 0xfb, 0xce, 0x72, 0xf5, 0x70, 0xbf, 0xf2, 0x44, 0xde,
+ 0xeb, 0x00, 0xbd, 0xac, 0xa2, 0xd9, 0x3b, 0xcb, 0x55, 0x4c, 0x2b, 0xa3, 0x39, 0x98, 0x90, 0x0f,
+ 0x20, 0x77, 0x29, 0x07, 0x15, 0xf8, 0xe2, 0x6a, 0x51, 0x6a, 0x56, 0x6c, 0x82, 0x71, 0x1a, 0x1f,
+ 0x55, 0x61, 0x72, 0xbb, 0xbd, 0x4e, 0x9a, 0x24, 0xe6, 0x1f, 0x7c, 0x93, 0x70, 0x25, 0x64, 0x39,
+ 0x11, 0xb6, 0x6e, 0xa6, 0xe0, 0xb8, 0xa3, 0x86, 0xfd, 0xe7, 0xec, 0x88, 0x17, 0xa3, 0x57, 0x0b,
+ 0x03, 0xba, 0xb0, 0x28, 0xf5, 0x0f, 0x73, 0x39, 0xf7, 0xb3, 0x2a, 0x6e, 0x92, 0xbd, 0xb5, 0x80,
+ 0x32, 0xdb, 0xd9, 0xab, 0xc2, 0x58, 0xf3, 0x03, 0x5d, 0xd7, 0xfc, 0x2f, 0x14, 0xe0, 0x8c, 0x1a,
+ 0x01, 0x83, 0xaf, 0xfb, 0x8b, 0x3e, 0x06, 0xcf, 0xc3, 0x88, 0x4b, 0x36, 0x9c, 0x76, 0x33, 0x56,
+ 0x1a, 0xf1, 0x41, 0xfe, 0x2a, 0x52, 0x4d, 0x8a, 0xb1, 0x8e, 0x73, 0x84, 0x61, 0xfb, 0x89, 0x11,
+ 0x76, 0xb7, 0xc6, 0x0e, 0x5d, 0xe3, 0x6a, 0xd7, 0x58, 0xb9, 0xbb, 0xe6, 0x49, 0x18, 0xf4, 0x76,
+ 0x28, 0xaf, 0x55, 0x30, 0x59, 0xa8, 0x65, 0x5a, 0x88, 0x39, 0x0c, 0x7d, 0x02, 0x86, 0x1b, 0xc1,
+ 0xce, 0x8e, 0xe3, 0xbb, 0xec, 0xca, 0x2b, 0xcf, 0x8f, 0x50, 0x76, 0x6c, 0x81, 0x17, 0x61, 0x09,
+ 0x43, 0xe7, 0x61, 0xc0, 0x09, 0x37, 0xb9, 0x5a, 0xa2, 0x3c, 0x5f, 0xa2, 0x2d, 0xcd, 0x85, 0x9b,
+ 0x11, 0x66, 0xa5, 0x54, 0xaa, 0xba, 0x1f, 0x84, 0xdb, 0x9e, 0xbf, 0x59, 0xf5, 0x42, 0xb1, 0x25,
+ 0xd4, 0x5d, 0x78, 0x4f, 0x41, 0xb0, 0x86, 0x85, 0x96, 0x60, 0xb0, 0x15, 0x84, 0x71, 0x34, 0x3d,
+ 0xc4, 0x86, 0xfb, 0x89, 0x9c, 0x83, 0x88, 0x7f, 0x6d, 0x2d, 0x08, 0xe3, 0xe4, 0x03, 0xe8, 0xbf,
+ 0x08, 0xf3, 0xea, 0xe8, 0x5b, 0xa0, 0x48, 0xfc, 0xdd, 0xe9, 0x61, 0x46, 0x65, 0x26, 0x8b, 0xca,
+ 0xa2, 0xbf, 0x7b, 0xd7, 0x09, 0x93, 0x53, 0x7a, 0xd1, 0xdf, 0xc5, 0xb4, 0x0e, 0xfa, 0x3c, 0x94,
+ 0xe5, 0x16, 0x8f, 0x84, 0xc6, 0x2c, 0x73, 0x89, 0xc9, 0x83, 0x01, 0x93, 0xf7, 0xda, 0x5e, 0x48,
+ 0x76, 0x88, 0x1f, 0x47, 0xc9, 0x99, 0x26, 0xa1, 0x11, 0x4e, 0xa8, 0xa1, 0xcf, 0x4b, 0x35, 0xed,
+ 0x4a, 0xd0, 0xf6, 0xe3, 0x68, 0xba, 0xcc, 0xba, 0x97, 0xf9, 0x80, 0x76, 0x37, 0xc1, 0x4b, 0xeb,
+ 0x71, 0x79, 0x65, 0x6c, 0x90, 0x42, 0x18, 0xc6, 0x9a, 0xde, 0x2e, 0xf1, 0x49, 0x14, 0xd5, 0xc2,
+ 0x60, 0x9d, 0x4c, 0x03, 0xeb, 0xf9, 0xb9, 0xec, 0x77, 0xa5, 0x60, 0x9d, 0xcc, 0x4f, 0x1d, 0xec,
+ 0x57, 0xc6, 0x6e, 0xe9, 0x75, 0xb0, 0x49, 0x02, 0xdd, 0x81, 0x71, 0x2a, 0xd7, 0x78, 0x09, 0xd1,
+ 0x91, 0x5e, 0x44, 0x99, 0xf4, 0x81, 0x8d, 0x4a, 0x38, 0x45, 0x04, 0xbd, 0x01, 0xe5, 0xa6, 0xb7,
+ 0x41, 0x1a, 0x7b, 0x8d, 0x26, 0x99, 0x1e, 0x65, 0x14, 0x33, 0xb7, 0xd5, 0x2d, 0x89, 0xc4, 0xe5,
+ 0x22, 0xf5, 0x17, 0x27, 0xd5, 0xd1, 0x5d, 0x38, 0x1b, 0x93, 0x70, 0xc7, 0xf3, 0x1d, 0xba, 0x1d,
+ 0x84, 0xbc, 0xc0, 0x5e, 0xe7, 0xc6, 0xd8, 0x7a, 0xbb, 0x28, 0x86, 0xee, 0xec, 0x5a, 0x26, 0x16,
+ 0xce, 0xa9, 0x8d, 0x6e, 0xc3, 0x04, 0xdb, 0x09, 0xb5, 0x76, 0xb3, 0x59, 0x0b, 0x9a, 0x5e, 0x63,
+ 0x6f, 0x7a, 0x9c, 0x11, 0xfc, 0x84, 0xbc, 0x17, 0x96, 0x4d, 0xf0, 0xe1, 0x7e, 0x05, 0x92, 0x7f,
+ 0x38, 0x5d, 0x1b, 0xad, 0xb3, 0xe7, 0x98, 0x76, 0xe8, 0xc5, 0x7b, 0x74, 0xfd, 0x92, 0x07, 0xf1,
+ 0xf4, 0x44, 0x57, 0x51, 0x58, 0x47, 0x55, 0x6f, 0x36, 0x7a, 0x21, 0x4e, 0x13, 0xa4, 0x5b, 0x3b,
+ 0x8a, 0x5d, 0xcf, 0x9f, 0x9e, 0x64, 0x27, 0x86, 0xda, 0x19, 0x75, 0x5a, 0x88, 0x39, 0x8c, 0x3d,
+ 0xc5, 0xd0, 0x1f, 0xb7, 0xe9, 0x09, 0x3a, 0xc5, 0x10, 0x93, 0xa7, 0x18, 0x09, 0xc0, 0x09, 0x0e,
+ 0x65, 0x6a, 0xe2, 0x78, 0x6f, 0x1a, 0x31, 0x54, 0xb5, 0x5d, 0xd6, 0xd6, 0x3e, 0x8f, 0x69, 0x39,
+ 0xba, 0x05, 0xc3, 0xc4, 0xdf, 0x5d, 0x0a, 0x83, 0x9d, 0xe9, 0x53, 0xf9, 0x7b, 0x76, 0x91, 0xa3,
+ 0xf0, 0x03, 0x3d, 0x11, 0xf0, 0x44, 0x31, 0x96, 0x24, 0xd0, 0x03, 0x98, 0xce, 0x98, 0x11, 0x3e,
+ 0x01, 0xa7, 0xd9, 0x04, 0xbc, 0x26, 0xea, 0x4e, 0xaf, 0xe5, 0xe0, 0x1d, 0x76, 0x81, 0xe1, 0x5c,
+ 0xea, 0xe8, 0x0b, 0x30, 0xc6, 0x37, 0x14, 0x7f, 0xc7, 0x8d, 0xa6, 0xcf, 0xb0, 0xaf, 0xb9, 0x94,
+ 0xbf, 0x39, 0x39, 0xe2, 0xfc, 0x19, 0xd1, 0xa1, 0x31, 0xbd, 0x34, 0xc2, 0x26, 0x35, 0x7b, 0x1d,
+ 0xc6, 0xd5, 0xb9, 0xc5, 0x96, 0x0e, 0xaa, 0xc0, 0x20, 0xe3, 0x76, 0x84, 0x7e, 0xab, 0x4c, 0x67,
+ 0x8a, 0x71, 0x42, 0x98, 0x97, 0xb3, 0x99, 0xf2, 0xde, 0x27, 0xf3, 0x7b, 0x31, 0xe1, 0x52, 0x75,
+ 0x51, 0x9b, 0x29, 0x09, 0xc0, 0x09, 0x8e, 0xfd, 0xff, 0x38, 0xd7, 0x98, 0x1c, 0x8e, 0x7d, 0x5c,
+ 0x07, 0xcf, 0x42, 0x69, 0x2b, 0x88, 0x62, 0x8a, 0xcd, 0xda, 0x18, 0x4c, 0xf8, 0xc4, 0x1b, 0xa2,
+ 0x1c, 0x2b, 0x0c, 0xf4, 0x2a, 0x8c, 0x35, 0xf4, 0x06, 0xc4, 0x5d, 0xa6, 0x86, 0xc0, 0x68, 0x1d,
+ 0x9b, 0xb8, 0xe8, 0x15, 0x28, 0x31, 0x2b, 0x8c, 0x46, 0xd0, 0x14, 0x4c, 0x96, 0xbc, 0x90, 0x4b,
+ 0x35, 0x51, 0x7e, 0xa8, 0xfd, 0xc6, 0x0a, 0x1b, 0x5d, 0x86, 0x21, 0xda, 0x85, 0xe5, 0x9a, 0xb8,
+ 0x45, 0x94, 0xaa, 0xe6, 0x06, 0x2b, 0xc5, 0x02, 0x6a, 0xff, 0xcd, 0x82, 0x36, 0xca, 0x54, 0x22,
+ 0x25, 0xa8, 0x06, 0xc3, 0xf7, 0x1d, 0x2f, 0xf6, 0xfc, 0x4d, 0xc1, 0x2e, 0x3c, 0xdd, 0xf5, 0x4a,
+ 0x61, 0x95, 0xee, 0xf1, 0x0a, 0xfc, 0xd2, 0x13, 0x7f, 0xb0, 0x24, 0x43, 0x29, 0x86, 0x6d, 0xdf,
+ 0xa7, 0x14, 0x0b, 0xfd, 0x52, 0xc4, 0xbc, 0x02, 0xa7, 0x28, 0xfe, 0x60, 0x49, 0x06, 0xbd, 0x0d,
+ 0x20, 0x97, 0x25, 0x71, 0x85, 0xf5, 0xc3, 0xb3, 0xbd, 0x89, 0xae, 0xa9, 0x3a, 0xf3, 0xe3, 0xf4,
+ 0x4a, 0x4d, 0xfe, 0x63, 0x8d, 0x9e, 0x1d, 0x33, 0xb6, 0xaa, 0xb3, 0x33, 0xe8, 0xdb, 0xe9, 0x49,
+ 0xe0, 0x84, 0x31, 0x71, 0xe7, 0x62, 0x31, 0x38, 0x9f, 0xec, 0x4f, 0xa6, 0x58, 0xf3, 0x76, 0x88,
+ 0x7e, 0x6a, 0x08, 0x22, 0x38, 0xa1, 0x67, 0xff, 0x52, 0x11, 0xa6, 0xf3, 0xba, 0x4b, 0x17, 0x1d,
+ 0x79, 0xe0, 0xc5, 0x0b, 0x94, 0x1b, 0xb2, 0xcc, 0x45, 0xb7, 0x28, 0xca, 0xb1, 0xc2, 0xa0, 0xb3,
+ 0x1f, 0x79, 0x9b, 0x52, 0x24, 0x1c, 0x4c, 0x66, 0xbf, 0xce, 0x4a, 0xb1, 0x80, 0x52, 0xbc, 0x90,
+ 0x38, 0x91, 0x30, 0xaf, 0xd1, 0x56, 0x09, 0x66, 0xa5, 0x58, 0x40, 0x75, 0x7d, 0xd3, 0x40, 0x0f,
+ 0x7d, 0x93, 0x31, 0x44, 0x83, 0xc7, 0x3b, 0x44, 0xe8, 0x8b, 0x00, 0x1b, 0x9e, 0xef, 0x45, 0x5b,
+ 0x8c, 0xfa, 0xd0, 0x91, 0xa9, 0x2b, 0x5e, 0x6a, 0x49, 0x51, 0xc1, 0x1a, 0x45, 0xf4, 0x12, 0x8c,
+ 0xa8, 0x0d, 0xb8, 0x5c, 0x65, 0x6f, 0x8d, 0x9a, 0xed, 0x46, 0x72, 0x1a, 0x55, 0xb1, 0x8e, 0x67,
+ 0xbf, 0x9b, 0x5e, 0x2f, 0x62, 0x07, 0x68, 0xe3, 0x6b, 0xf5, 0x3b, 0xbe, 0x85, 0xee, 0xe3, 0x6b,
+ 0xff, 0x7a, 0x11, 0x26, 0x8c, 0xc6, 0xda, 0x51, 0x1f, 0x67, 0xd6, 0x75, 0x7a, 0xcf, 0x39, 0x31,
+ 0x11, 0xfb, 0xcf, 0xee, 0xbd, 0x55, 0xf4, 0xbb, 0x90, 0xee, 0x00, 0x5e, 0x1f, 0x7d, 0x11, 0xca,
+ 0x4d, 0x27, 0x62, 0xba, 0x2b, 0x22, 0xf6, 0x5d, 0x3f, 0xc4, 0x12, 0x39, 0xc2, 0x89, 0x62, 0xed,
+ 0xaa, 0xe1, 0xb4, 0x13, 0x92, 0xf4, 0x42, 0xa6, 0xbc, 0x8f, 0xb4, 0xdf, 0x52, 0x9d, 0xa0, 0x0c,
+ 0xd2, 0x1e, 0xe6, 0x30, 0xf4, 0x0a, 0x8c, 0x86, 0x84, 0xad, 0x8a, 0x05, 0xca, 0xca, 0xb1, 0x65,
+ 0x36, 0x98, 0xf0, 0x7c, 0x58, 0x83, 0x61, 0x03, 0x33, 0x61, 0xe5, 0x87, 0xba, 0xb0, 0xf2, 0x4f,
+ 0xc3, 0x30, 0xfb, 0xa1, 0x56, 0x80, 0x9a, 0x8d, 0x65, 0x5e, 0x8c, 0x25, 0x3c, 0xbd, 0x60, 0x4a,
+ 0x7d, 0x2e, 0x98, 0x4f, 0xc2, 0x78, 0xd5, 0x21, 0x3b, 0x81, 0xbf, 0xe8, 0xbb, 0xad, 0xc0, 0xf3,
+ 0x63, 0x34, 0x0d, 0x03, 0xec, 0x76, 0xe0, 0x7b, 0x7b, 0x80, 0x52, 0xc0, 0x03, 0x94, 0x31, 0xb7,
+ 0x37, 0xe1, 0x4c, 0x35, 0xb8, 0xef, 0xdf, 0x77, 0x42, 0x77, 0xae, 0xb6, 0xac, 0xc9, 0xb9, 0xab,
+ 0x52, 0xce, 0xe2, 0xf6, 0x50, 0x99, 0x67, 0xaa, 0x56, 0x93, 0xdf, 0xb5, 0x4b, 0x5e, 0x93, 0xe4,
+ 0x68, 0x23, 0xfe, 0x4e, 0xc1, 0x68, 0x29, 0xc1, 0x57, 0x0f, 0x46, 0x56, 0xee, 0x83, 0xd1, 0x9b,
+ 0x50, 0xda, 0xf0, 0x48, 0xd3, 0xc5, 0x64, 0x43, 0x2c, 0xb1, 0xa7, 0xf2, 0x4d, 0x3c, 0x96, 0x28,
+ 0xa6, 0xd4, 0x3e, 0x71, 0x29, 0x6d, 0x49, 0x54, 0xc6, 0x8a, 0x0c, 0xda, 0x86, 0x49, 0x29, 0x06,
+ 0x48, 0xa8, 0x58, 0x70, 0x4f, 0x77, 0x93, 0x2d, 0x4c, 0xe2, 0xa7, 0x0f, 0xf6, 0x2b, 0x93, 0x38,
+ 0x45, 0x06, 0x77, 0x10, 0xa6, 0x62, 0xd9, 0x0e, 0x3d, 0x5a, 0x07, 0xd8, 0xf0, 0x33, 0xb1, 0x8c,
+ 0x49, 0x98, 0xac, 0xd4, 0xfe, 0x51, 0x0b, 0x1e, 0xeb, 0x18, 0x19, 0x21, 0x69, 0x1f, 0xf3, 0x2c,
+ 0xa4, 0x25, 0xdf, 0x42, 0x6f, 0xc9, 0xd7, 0xfe, 0x59, 0x0b, 0x4e, 0x2f, 0xee, 0xb4, 0xe2, 0xbd,
+ 0xaa, 0x67, 0xbe, 0xee, 0xbc, 0x0c, 0x43, 0x3b, 0xc4, 0xf5, 0xda, 0x3b, 0x62, 0xe6, 0x2a, 0xf2,
+ 0xf8, 0x59, 0x61, 0xa5, 0x87, 0xfb, 0x95, 0xb1, 0x7a, 0x1c, 0x84, 0xce, 0x26, 0xe1, 0x05, 0x58,
+ 0xa0, 0xb3, 0x43, 0xdc, 0x7b, 0x9f, 0xdc, 0xf2, 0x76, 0x3c, 0x69, 0xb2, 0xd3, 0x55, 0x77, 0x36,
+ 0x2b, 0x07, 0x74, 0xf6, 0xcd, 0xb6, 0xe3, 0xc7, 0x5e, 0xbc, 0x27, 0x1e, 0x66, 0x24, 0x11, 0x9c,
+ 0xd0, 0xb3, 0xbf, 0x6e, 0xc1, 0x84, 0x5c, 0xf7, 0x73, 0xae, 0x1b, 0x92, 0x28, 0x42, 0x33, 0x50,
+ 0xf0, 0x5a, 0xa2, 0x97, 0x20, 0x7a, 0x59, 0x58, 0xae, 0xe1, 0x82, 0xd7, 0x42, 0x35, 0x28, 0x73,
+ 0xcb, 0x9f, 0x64, 0x71, 0xf5, 0x65, 0x3f, 0xc4, 0x7a, 0xb0, 0x26, 0x6b, 0xe2, 0x84, 0x88, 0xe4,
+ 0xe0, 0xd8, 0x99, 0x59, 0x34, 0x5f, 0xbd, 0x6e, 0x88, 0x72, 0xac, 0x30, 0xd0, 0x15, 0x28, 0xf9,
+ 0x81, 0xcb, 0x0d, 0xb1, 0xf8, 0xed, 0xc7, 0x96, 0xec, 0xaa, 0x28, 0xc3, 0x0a, 0x6a, 0xff, 0xa0,
+ 0x05, 0xa3, 0xf2, 0xcb, 0xfa, 0x64, 0x26, 0xe9, 0xd6, 0x4a, 0x18, 0xc9, 0x64, 0x6b, 0x51, 0x66,
+ 0x90, 0x41, 0x0c, 0x1e, 0xb0, 0x78, 0x14, 0x1e, 0xd0, 0xfe, 0x91, 0x02, 0x8c, 0xcb, 0xee, 0xd4,
+ 0xdb, 0xeb, 0x11, 0x89, 0xd1, 0x1a, 0x94, 0x1d, 0x3e, 0xe4, 0x44, 0xae, 0xd8, 0x27, 0xb3, 0x85,
+ 0x0f, 0x63, 0x7e, 0x92, 0x6b, 0x79, 0x4e, 0xd6, 0xc6, 0x09, 0x21, 0xd4, 0x84, 0x29, 0x3f, 0x88,
+ 0xd9, 0x11, 0xad, 0xe0, 0xdd, 0x9e, 0x40, 0xd2, 0xd4, 0xcf, 0x09, 0xea, 0x53, 0xab, 0x69, 0x2a,
+ 0xb8, 0x93, 0x30, 0x5a, 0x94, 0x0a, 0x8f, 0x62, 0xbe, 0xb8, 0xa1, 0xcf, 0x42, 0xb6, 0xbe, 0xc3,
+ 0xfe, 0x55, 0x0b, 0xca, 0x12, 0xed, 0x24, 0x5e, 0xbb, 0x56, 0x60, 0x38, 0x62, 0x93, 0x20, 0x87,
+ 0xc6, 0xee, 0xd6, 0x71, 0x3e, 0x5f, 0xc9, 0xcd, 0xc3, 0xff, 0x47, 0x58, 0xd2, 0x60, 0xfa, 0x6e,
+ 0xd5, 0xfd, 0x8f, 0x88, 0xbe, 0x5b, 0xf5, 0x27, 0xe7, 0x86, 0xf9, 0x03, 0xd6, 0x67, 0x4d, 0xac,
+ 0xa5, 0x0c, 0x52, 0x2b, 0x24, 0x1b, 0xde, 0x83, 0x34, 0x83, 0x54, 0x63, 0xa5, 0x58, 0x40, 0xd1,
+ 0xdb, 0x30, 0xda, 0x90, 0x8a, 0xce, 0xe4, 0x18, 0xb8, 0xdc, 0x55, 0xe9, 0xae, 0xde, 0x67, 0xb8,
+ 0x91, 0xf6, 0x82, 0x56, 0x1f, 0x1b, 0xd4, 0xcc, 0xe7, 0xf6, 0x62, 0xaf, 0xe7, 0xf6, 0x84, 0x6e,
+ 0xfe, 0xe3, 0xf3, 0x8f, 0x59, 0x30, 0xc4, 0xd5, 0x65, 0xfd, 0xe9, 0x17, 0xb5, 0xe7, 0xaa, 0x64,
+ 0xec, 0xee, 0xd2, 0x42, 0xf1, 0xfc, 0x84, 0x56, 0xa0, 0xcc, 0x7e, 0x30, 0xb5, 0x41, 0x31, 0xdf,
+ 0x3a, 0x9d, 0xb7, 0xaa, 0x77, 0xf0, 0xae, 0xac, 0x86, 0x13, 0x0a, 0xf6, 0x0f, 0x15, 0xe9, 0x51,
+ 0x95, 0xa0, 0x1a, 0x37, 0xb8, 0xf5, 0xe8, 0x6e, 0xf0, 0xc2, 0xa3, 0xba, 0xc1, 0x37, 0x61, 0xa2,
+ 0xa1, 0x3d, 0x6e, 0x25, 0x33, 0x79, 0xa5, 0xeb, 0x22, 0xd1, 0xde, 0xc1, 0xb8, 0xca, 0x68, 0xc1,
+ 0x24, 0x82, 0xd3, 0x54, 0xd1, 0xb7, 0xc3, 0x28, 0x9f, 0x67, 0xd1, 0x0a, 0xb7, 0x58, 0xf8, 0x44,
+ 0xfe, 0x7a, 0xd1, 0x9b, 0x60, 0x2b, 0xb1, 0xae, 0x55, 0xc7, 0x06, 0x31, 0xfb, 0x97, 0x4a, 0x30,
+ 0xb8, 0xb8, 0x4b, 0xfc, 0xf8, 0x04, 0x0e, 0xa4, 0x06, 0x8c, 0x7b, 0xfe, 0x6e, 0xd0, 0xdc, 0x25,
+ 0x2e, 0x87, 0x1f, 0xe5, 0x72, 0x3d, 0x2b, 0x48, 0x8f, 0x2f, 0x1b, 0x24, 0x70, 0x8a, 0xe4, 0xa3,
+ 0x90, 0x30, 0xaf, 0xc3, 0x10, 0x9f, 0x7b, 0x21, 0x5e, 0x66, 0x2a, 0x83, 0xd9, 0x20, 0x8a, 0x5d,
+ 0x90, 0x48, 0xbf, 0x5c, 0xfb, 0x2c, 0xaa, 0xa3, 0x77, 0x61, 0x7c, 0xc3, 0x0b, 0xa3, 0x98, 0x8a,
+ 0x86, 0x51, 0xec, 0xec, 0xb4, 0x1e, 0x42, 0xa2, 0x54, 0xe3, 0xb0, 0x64, 0x50, 0xc2, 0x29, 0xca,
+ 0x68, 0x13, 0xc6, 0xa8, 0x90, 0x93, 0x34, 0x35, 0x7c, 0xe4, 0xa6, 0x94, 0xca, 0xe8, 0x96, 0x4e,
+ 0x08, 0x9b, 0x74, 0xe9, 0x61, 0xd2, 0x60, 0x42, 0x51, 0x89, 0x71, 0x14, 0xea, 0x30, 0xe1, 0xd2,
+ 0x10, 0x87, 0xd1, 0x33, 0x89, 0x99, 0xad, 0x94, 0xcd, 0x33, 0x49, 0x33, 0x4e, 0x79, 0x07, 0xca,
+ 0x84, 0x0e, 0x21, 0x25, 0x2c, 0x14, 0xe3, 0x57, 0xfb, 0xeb, 0xeb, 0x8a, 0xd7, 0x08, 0x03, 0x53,
+ 0x96, 0x5f, 0x94, 0x94, 0x70, 0x42, 0x14, 0x2d, 0xc0, 0x50, 0x44, 0x42, 0x8f, 0x44, 0x42, 0x45,
+ 0xde, 0x65, 0x1a, 0x19, 0x1a, 0xb7, 0xf8, 0xe4, 0xbf, 0xb1, 0xa8, 0x4a, 0x97, 0x97, 0xc3, 0xa4,
+ 0x21, 0xa6, 0x15, 0xd7, 0x96, 0xd7, 0x1c, 0x2b, 0xc5, 0x02, 0x8a, 0xde, 0x80, 0xe1, 0x90, 0x34,
+ 0x99, 0xb2, 0x68, 0xac, 0xff, 0x45, 0xce, 0x75, 0x4f, 0xbc, 0x1e, 0x96, 0x04, 0xd0, 0x4d, 0x40,
+ 0x21, 0xa1, 0x3c, 0x84, 0xe7, 0x6f, 0x2a, 0x63, 0x0e, 0xa1, 0xeb, 0x7e, 0x5c, 0xb4, 0x7f, 0x0a,
+ 0x27, 0x18, 0xd2, 0xf8, 0x16, 0x67, 0x54, 0x43, 0xd7, 0x61, 0x4a, 0x95, 0x2e, 0xfb, 0x51, 0xec,
+ 0xf8, 0x0d, 0xc2, 0xd4, 0xdc, 0xe5, 0x84, 0x2b, 0xc2, 0x69, 0x04, 0xdc, 0x59, 0xc7, 0xfe, 0x69,
+ 0xca, 0xce, 0xd0, 0xd1, 0x3a, 0x01, 0x5e, 0xe0, 0x75, 0x93, 0x17, 0x38, 0x97, 0x3b, 0x73, 0x39,
+ 0x7c, 0xc0, 0x81, 0x05, 0x23, 0xda, 0xcc, 0x26, 0x6b, 0xd6, 0xea, 0xb2, 0x66, 0xdb, 0x30, 0x49,
+ 0x57, 0xfa, 0xed, 0xf5, 0x88, 0x84, 0xbb, 0xc4, 0x65, 0x0b, 0xb3, 0xf0, 0x70, 0x0b, 0x53, 0xbd,
+ 0x32, 0xdf, 0x4a, 0x11, 0xc4, 0x1d, 0x4d, 0xa0, 0x97, 0xa5, 0xe6, 0xa4, 0x68, 0x18, 0x69, 0x71,
+ 0xad, 0xc8, 0xe1, 0x7e, 0x65, 0x52, 0xfb, 0x10, 0x5d, 0x53, 0x62, 0xbf, 0x23, 0xbf, 0x51, 0xbd,
+ 0xe6, 0x37, 0xd4, 0x62, 0x49, 0xbd, 0xe6, 0xab, 0xe5, 0x80, 0x13, 0x1c, 0xba, 0x47, 0xa9, 0x08,
+ 0x92, 0x7e, 0xcd, 0xa7, 0x02, 0x0a, 0x66, 0x10, 0xfb, 0x05, 0x80, 0xc5, 0x07, 0xa4, 0xc1, 0x97,
+ 0xba, 0xfe, 0x00, 0x69, 0xe5, 0x3f, 0x40, 0xda, 0xff, 0xd9, 0x82, 0xf1, 0xa5, 0x05, 0x43, 0x4c,
+ 0x9c, 0x05, 0xe0, 0xb2, 0xd1, 0xbd, 0x7b, 0xab, 0x52, 0xb7, 0xce, 0xd5, 0xa3, 0xaa, 0x14, 0x6b,
+ 0x18, 0xe8, 0x1c, 0x14, 0x9b, 0x6d, 0x5f, 0x88, 0x2c, 0xc3, 0x07, 0xfb, 0x95, 0xe2, 0xad, 0xb6,
+ 0x8f, 0x69, 0x99, 0x66, 0x21, 0x58, 0xec, 0xdb, 0x42, 0xb0, 0xa7, 0xa7, 0x1e, 0xaa, 0xc0, 0xe0,
+ 0xfd, 0xfb, 0x9e, 0xcb, 0xfd, 0x21, 0x84, 0xde, 0xff, 0xde, 0xbd, 0xe5, 0x6a, 0x84, 0x79, 0xb9,
+ 0xfd, 0x95, 0x22, 0xcc, 0x2c, 0x35, 0xc9, 0x83, 0x0f, 0xe8, 0x13, 0xd2, 0xaf, 0x7d, 0xe3, 0xd1,
+ 0xf8, 0xc5, 0xa3, 0xda, 0xb0, 0xf6, 0x1e, 0x8f, 0x0d, 0x18, 0xe6, 0x8f, 0xd9, 0xd2, 0x43, 0xe4,
+ 0xd5, 0xac, 0xd6, 0xf3, 0x07, 0x64, 0x96, 0x3f, 0x8a, 0x0b, 0x03, 0x77, 0x75, 0xd3, 0x8a, 0x52,
+ 0x2c, 0x89, 0xcf, 0x7c, 0x06, 0x46, 0x75, 0xcc, 0x23, 0x59, 0x93, 0xff, 0x95, 0x22, 0x4c, 0xd2,
+ 0x1e, 0x3c, 0xd2, 0x89, 0xb8, 0xd3, 0x39, 0x11, 0xc7, 0x6d, 0x51, 0xdc, 0x7b, 0x36, 0xde, 0x4e,
+ 0xcf, 0xc6, 0xf3, 0x79, 0xb3, 0x71, 0xd2, 0x73, 0xf0, 0x3d, 0x16, 0x9c, 0x5a, 0x6a, 0x06, 0x8d,
+ 0xed, 0x94, 0xd5, 0xef, 0x4b, 0x30, 0x42, 0xcf, 0xf1, 0xc8, 0x70, 0x48, 0x33, 0x5c, 0x14, 0x05,
+ 0x08, 0xeb, 0x78, 0x5a, 0xb5, 0x3b, 0x77, 0x96, 0xab, 0x59, 0x9e, 0x8d, 0x02, 0x84, 0x75, 0x3c,
+ 0xfb, 0x6b, 0x16, 0x5c, 0xb8, 0xbe, 0xb0, 0x98, 0x2c, 0xc5, 0x0e, 0xe7, 0x4a, 0x2a, 0x05, 0xba,
+ 0x5a, 0x57, 0x12, 0x29, 0xb0, 0xca, 0x7a, 0x21, 0xa0, 0x1f, 0x15, 0xc7, 0xe1, 0x9f, 0xb2, 0xe0,
+ 0xd4, 0x75, 0x2f, 0xa6, 0xd7, 0x72, 0xda, 0xcd, 0x8f, 0xde, 0xcb, 0x91, 0x17, 0x07, 0xe1, 0x5e,
+ 0xda, 0xcd, 0x0f, 0x2b, 0x08, 0xd6, 0xb0, 0x78, 0xcb, 0xbb, 0x1e, 0x33, 0xa3, 0x2a, 0x98, 0xaa,
+ 0x28, 0x2c, 0xca, 0xb1, 0xc2, 0xa0, 0x1f, 0xe6, 0x7a, 0x21, 0x13, 0x25, 0xf6, 0xc4, 0x09, 0xab,
+ 0x3e, 0xac, 0x2a, 0x01, 0x38, 0xc1, 0xb1, 0xff, 0xc8, 0x82, 0xca, 0xf5, 0x66, 0x3b, 0x8a, 0x49,
+ 0xb8, 0x11, 0xe5, 0x9c, 0x8e, 0x2f, 0x40, 0x99, 0x48, 0xc1, 0x5d, 0xf4, 0x5a, 0xb1, 0x9a, 0x4a,
+ 0xa2, 0xe7, 0xde, 0x86, 0x0a, 0xaf, 0x0f, 0x1f, 0x82, 0xa3, 0x19, 0x81, 0x2f, 0x01, 0x22, 0x7a,
+ 0x5b, 0xba, 0xfb, 0x25, 0xf3, 0xe3, 0x5a, 0xec, 0x80, 0xe2, 0x8c, 0x1a, 0xf6, 0x8f, 0x5a, 0x70,
+ 0x46, 0x7d, 0xf0, 0x47, 0xee, 0x33, 0xed, 0x9f, 0x2f, 0xc0, 0xd8, 0x8d, 0xb5, 0xb5, 0xda, 0x75,
+ 0x12, 0x8b, 0x6b, 0xbb, 0xb7, 0x6e, 0x1d, 0x6b, 0x2a, 0xc2, 0x6e, 0x52, 0x60, 0x3b, 0xf6, 0x9a,
+ 0xb3, 0xdc, 0x8b, 0x7f, 0x76, 0xd9, 0x8f, 0x6f, 0x87, 0xf5, 0x38, 0xf4, 0xfc, 0xcd, 0x4c, 0xa5,
+ 0xa2, 0x64, 0x2e, 0x8a, 0x79, 0xcc, 0x05, 0x7a, 0x01, 0x86, 0x58, 0x18, 0x01, 0x39, 0x09, 0x8f,
+ 0x2b, 0x21, 0x8a, 0x95, 0x1e, 0xee, 0x57, 0xca, 0x77, 0xf0, 0x32, 0xff, 0x83, 0x05, 0x2a, 0xba,
+ 0x03, 0x23, 0x5b, 0x71, 0xdc, 0xba, 0x41, 0x1c, 0x97, 0x84, 0xf2, 0x38, 0xbc, 0x98, 0x75, 0x1c,
+ 0xd2, 0x41, 0xe0, 0x68, 0xc9, 0x09, 0x92, 0x94, 0x45, 0x58, 0xa7, 0x63, 0xd7, 0x01, 0x12, 0xd8,
+ 0x31, 0x29, 0x54, 0xec, 0xdf, 0xb7, 0x60, 0x98, 0x7b, 0x74, 0x86, 0xe8, 0x35, 0x18, 0x20, 0x0f,
+ 0x48, 0x43, 0xb0, 0xca, 0x99, 0x1d, 0x4e, 0x38, 0x2d, 0xfe, 0x3c, 0x40, 0xff, 0x63, 0x56, 0x0b,
+ 0xdd, 0x80, 0x61, 0xda, 0xdb, 0xeb, 0xca, 0xbd, 0xf5, 0x89, 0xbc, 0x2f, 0x56, 0xd3, 0xce, 0x99,
+ 0x33, 0x51, 0x84, 0x65, 0x75, 0xa6, 0xea, 0x6e, 0xb4, 0xea, 0xf4, 0xc4, 0x8e, 0xbb, 0x31, 0x16,
+ 0x6b, 0x0b, 0x35, 0x8e, 0x24, 0xa8, 0x71, 0x55, 0xb7, 0x2c, 0xc4, 0x09, 0x11, 0x7b, 0x0d, 0xca,
+ 0x74, 0x52, 0xe7, 0x9a, 0x9e, 0xd3, 0x5d, 0xcb, 0xfe, 0x0c, 0x94, 0xa5, 0xc6, 0x3b, 0x12, 0x9e,
+ 0x5c, 0x8c, 0xaa, 0x54, 0x88, 0x47, 0x38, 0x81, 0xdb, 0x1b, 0x70, 0x9a, 0x99, 0x3a, 0x38, 0xf1,
+ 0x96, 0xb1, 0xc7, 0x7a, 0x2f, 0xe6, 0x67, 0x85, 0xe4, 0xc9, 0x67, 0x66, 0x5a, 0x73, 0x96, 0x18,
+ 0x95, 0x14, 0x13, 0x29, 0xd4, 0xfe, 0xc3, 0x01, 0x78, 0x7c, 0xb9, 0x9e, 0xef, 0xec, 0xfb, 0x0a,
+ 0x8c, 0x72, 0xbe, 0x94, 0x2e, 0x6d, 0xa7, 0x29, 0xda, 0x55, 0x0f, 0x81, 0x6b, 0x1a, 0x0c, 0x1b,
+ 0x98, 0xe8, 0x02, 0x14, 0xbd, 0xf7, 0xfc, 0xb4, 0xdd, 0xf1, 0xf2, 0x9b, 0xab, 0x98, 0x96, 0x53,
+ 0x30, 0x65, 0x71, 0xf9, 0xdd, 0xa1, 0xc0, 0x8a, 0xcd, 0x7d, 0x1d, 0xc6, 0xbd, 0xa8, 0x11, 0x79,
+ 0xcb, 0x3e, 0x3d, 0x67, 0xb4, 0x93, 0x4a, 0x69, 0x45, 0x68, 0xa7, 0x15, 0x14, 0xa7, 0xb0, 0xb5,
+ 0x8b, 0x6c, 0xb0, 0x6f, 0x36, 0xb9, 0xa7, 0x6b, 0x13, 0x95, 0x00, 0x5a, 0xec, 0xeb, 0x22, 0x66,
+ 0xc5, 0x27, 0x24, 0x00, 0xfe, 0xc1, 0x11, 0x96, 0x30, 0x2a, 0x72, 0x36, 0xb6, 0x9c, 0xd6, 0x5c,
+ 0x3b, 0xde, 0xaa, 0x7a, 0x51, 0x23, 0xd8, 0x25, 0xe1, 0x1e, 0xd3, 0x16, 0x94, 0x12, 0x91, 0x53,
+ 0x01, 0x16, 0x6e, 0xcc, 0xd5, 0x28, 0x26, 0xee, 0xac, 0x63, 0xb2, 0xc1, 0x70, 0x1c, 0x6c, 0xf0,
+ 0x1c, 0x4c, 0xc8, 0x66, 0xea, 0x24, 0x62, 0x97, 0xe2, 0x08, 0xeb, 0x98, 0xb2, 0x2d, 0x16, 0xc5,
+ 0xaa, 0x5b, 0x69, 0x7c, 0xf4, 0x32, 0x8c, 0x79, 0xbe, 0x17, 0x7b, 0x4e, 0x1c, 0x84, 0x8c, 0xa5,
+ 0xe0, 0x8a, 0x01, 0x66, 0xba, 0xb7, 0xac, 0x03, 0xb0, 0x89, 0x67, 0xff, 0xf7, 0x01, 0x98, 0x62,
+ 0xd3, 0xf6, 0xcd, 0x15, 0xf6, 0x91, 0x59, 0x61, 0x77, 0x3a, 0x57, 0xd8, 0x71, 0xf0, 0xf7, 0x1f,
+ 0xe6, 0x32, 0x7b, 0x17, 0xca, 0xca, 0xf8, 0x59, 0x7a, 0x3f, 0x58, 0x39, 0xde, 0x0f, 0xbd, 0xb9,
+ 0x0f, 0xf9, 0x6e, 0x5d, 0xcc, 0x7c, 0xb7, 0xfe, 0x7b, 0x16, 0x24, 0x36, 0xa0, 0xe8, 0x06, 0x94,
+ 0x5b, 0x01, 0xb3, 0xb3, 0x08, 0xa5, 0xf1, 0xd2, 0xe3, 0x99, 0x17, 0x15, 0xbf, 0x14, 0xf9, 0xf8,
+ 0xd5, 0x64, 0x0d, 0x9c, 0x54, 0x46, 0xf3, 0x30, 0xdc, 0x0a, 0x49, 0x3d, 0x66, 0x3e, 0xbf, 0x3d,
+ 0xe9, 0xf0, 0x35, 0xc2, 0xf1, 0xb1, 0xac, 0x68, 0xff, 0x82, 0x05, 0xc0, 0x9f, 0x86, 0x1d, 0x7f,
+ 0x93, 0x9c, 0x80, 0xba, 0xbb, 0x0a, 0x03, 0x51, 0x8b, 0x34, 0xba, 0x59, 0xc0, 0x24, 0xfd, 0xa9,
+ 0xb7, 0x48, 0x23, 0x19, 0x70, 0xfa, 0x0f, 0xb3, 0xda, 0xf6, 0xf7, 0x02, 0x8c, 0x27, 0x68, 0xcb,
+ 0x31, 0xd9, 0x41, 0xcf, 0x19, 0x3e, 0x80, 0xe7, 0x52, 0x3e, 0x80, 0x65, 0x86, 0xad, 0x69, 0x56,
+ 0xdf, 0x85, 0xe2, 0x8e, 0xf3, 0x40, 0xa8, 0xce, 0x9e, 0xe9, 0xde, 0x0d, 0x4a, 0x7f, 0x76, 0xc5,
+ 0x79, 0xc0, 0x85, 0xc4, 0x67, 0xe4, 0x02, 0x59, 0x71, 0x1e, 0x1c, 0x72, 0x3b, 0x17, 0x76, 0x48,
+ 0xdd, 0xf2, 0xa2, 0xf8, 0x4b, 0xff, 0x2d, 0xf9, 0xcf, 0x96, 0x1d, 0x6d, 0x84, 0xb5, 0xe5, 0xf9,
+ 0xe2, 0xa1, 0xb4, 0xaf, 0xb6, 0x3c, 0x3f, 0xdd, 0x96, 0xe7, 0xf7, 0xd1, 0x96, 0xe7, 0xa3, 0xf7,
+ 0x61, 0x58, 0x18, 0x25, 0x08, 0x9f, 0xfb, 0xab, 0x7d, 0xb4, 0x27, 0x6c, 0x1a, 0x78, 0x9b, 0x57,
+ 0xa5, 0x10, 0x2c, 0x4a, 0x7b, 0xb6, 0x2b, 0x1b, 0x44, 0x7f, 0xdb, 0x82, 0x71, 0xf1, 0x1b, 0x93,
+ 0xf7, 0xda, 0x24, 0x8a, 0x05, 0xef, 0xf9, 0xe9, 0xfe, 0xfb, 0x20, 0x2a, 0xf2, 0xae, 0x7c, 0x5a,
+ 0x1e, 0xb3, 0x26, 0xb0, 0x67, 0x8f, 0x52, 0xbd, 0x40, 0xff, 0xc4, 0x82, 0xd3, 0x3b, 0xce, 0x03,
+ 0xde, 0x22, 0x2f, 0xc3, 0x4e, 0xec, 0x05, 0xc2, 0x58, 0xff, 0xb5, 0xfe, 0xa6, 0xbf, 0xa3, 0x3a,
+ 0xef, 0xa4, 0xb4, 0xeb, 0x3d, 0x9d, 0x85, 0xd2, 0xb3, 0xab, 0x99, 0xfd, 0x9a, 0xd9, 0x80, 0x92,
+ 0x5c, 0x6f, 0x19, 0xaa, 0x86, 0xaa, 0xce, 0x58, 0x1f, 0xd9, 0x26, 0x44, 0x77, 0xc4, 0xa3, 0xed,
+ 0x88, 0xb5, 0xf6, 0x48, 0xdb, 0x79, 0x17, 0x46, 0xf5, 0x35, 0xf6, 0x48, 0xdb, 0x7a, 0x0f, 0x4e,
+ 0x65, 0xac, 0xa5, 0x47, 0xda, 0xe4, 0x7d, 0x38, 0x97, 0xbb, 0x3e, 0x1e, 0x65, 0xc3, 0xf6, 0xcf,
+ 0x5b, 0xfa, 0x39, 0x78, 0x02, 0x6f, 0x0e, 0x0b, 0xe6, 0x9b, 0xc3, 0xc5, 0xee, 0x3b, 0x27, 0xe7,
+ 0xe1, 0xe1, 0x6d, 0xbd, 0xd3, 0xf4, 0x54, 0x47, 0x6f, 0xc0, 0x50, 0x93, 0x96, 0x48, 0x6b, 0x18,
+ 0xbb, 0xf7, 0x8e, 0x4c, 0x78, 0x29, 0x56, 0x1e, 0x61, 0x41, 0xc1, 0xfe, 0x65, 0x0b, 0x06, 0x4e,
+ 0x60, 0x24, 0xb0, 0x39, 0x12, 0xcf, 0xe5, 0x92, 0x16, 0xe1, 0x00, 0x67, 0xb1, 0x73, 0x7f, 0xf1,
+ 0x41, 0x4c, 0xfc, 0x88, 0x89, 0x8a, 0x99, 0x03, 0xf3, 0x1d, 0x70, 0xea, 0x56, 0xe0, 0xb8, 0xf3,
+ 0x4e, 0xd3, 0xf1, 0x1b, 0x24, 0x5c, 0xf6, 0x37, 0x7b, 0x9a, 0x65, 0xe9, 0x46, 0x54, 0x85, 0x5e,
+ 0x46, 0x54, 0xf6, 0x16, 0x20, 0xbd, 0x01, 0x61, 0xb8, 0x8a, 0x61, 0xd8, 0xe3, 0x4d, 0x89, 0xe1,
+ 0x7f, 0x2a, 0x9b, 0xbb, 0xeb, 0xe8, 0x99, 0x66, 0x92, 0xc9, 0x0b, 0xb0, 0x24, 0x64, 0xbf, 0x02,
+ 0x99, 0xce, 0x6a, 0xbd, 0xd5, 0x06, 0xf6, 0xe7, 0x61, 0x8a, 0xd5, 0x3c, 0xa2, 0x48, 0x6b, 0xa7,
+ 0xb4, 0x92, 0x19, 0x91, 0x69, 0xec, 0x2f, 0x5b, 0x30, 0xb1, 0x9a, 0x0a, 0xd8, 0x71, 0x99, 0x3d,
+ 0x80, 0x66, 0x28, 0xc3, 0xeb, 0xac, 0x14, 0x0b, 0xe8, 0xb1, 0xeb, 0xa0, 0xfe, 0xdc, 0x82, 0xc4,
+ 0x7f, 0xf4, 0x04, 0x18, 0xaf, 0x05, 0x83, 0xf1, 0xca, 0xd4, 0x8d, 0xa8, 0xee, 0xe4, 0xf1, 0x5d,
+ 0xe8, 0xa6, 0x0a, 0x96, 0xd0, 0x45, 0x2d, 0x92, 0x90, 0xe1, 0xae, 0xf5, 0xe3, 0x66, 0x44, 0x05,
+ 0x19, 0x3e, 0x81, 0xd9, 0x4e, 0x29, 0xdc, 0x8f, 0x88, 0xed, 0x94, 0xea, 0x4f, 0xce, 0x0e, 0xad,
+ 0x69, 0x5d, 0x66, 0x27, 0xd7, 0xb7, 0x32, 0x5b, 0x78, 0xa7, 0xe9, 0xbd, 0x4f, 0x54, 0xc4, 0x97,
+ 0x8a, 0xb0, 0x6d, 0x17, 0xa5, 0x87, 0xfb, 0x95, 0x31, 0xf5, 0x8f, 0x47, 0x98, 0x4b, 0xaa, 0xd8,
+ 0x37, 0x60, 0x22, 0x35, 0x60, 0xe8, 0x25, 0x18, 0x6c, 0x6d, 0x39, 0x11, 0x49, 0xd9, 0x8b, 0x0e,
+ 0xd6, 0x68, 0xe1, 0xe1, 0x7e, 0x65, 0x5c, 0x55, 0x60, 0x25, 0x98, 0x63, 0xdb, 0xff, 0xcb, 0x82,
+ 0x81, 0xd5, 0xc0, 0x3d, 0x89, 0xc5, 0xf4, 0xba, 0xb1, 0x98, 0xce, 0xe7, 0xc5, 0xe7, 0xcc, 0x5d,
+ 0x47, 0x4b, 0xa9, 0x75, 0x74, 0x31, 0x97, 0x42, 0xf7, 0x25, 0xb4, 0x03, 0x23, 0x2c, 0xea, 0xa7,
+ 0xb0, 0x5f, 0x7d, 0xc1, 0x90, 0x01, 0x2a, 0x29, 0x19, 0x60, 0x42, 0x43, 0xd5, 0x24, 0x81, 0xa7,
+ 0x61, 0x58, 0xd8, 0x50, 0xa6, 0xad, 0xfe, 0x05, 0x2e, 0x96, 0x70, 0xfb, 0xc7, 0x8a, 0x60, 0x44,
+ 0x19, 0x45, 0xbf, 0x6a, 0xc1, 0x6c, 0xc8, 0xdd, 0x28, 0xdd, 0x6a, 0x3b, 0xf4, 0xfc, 0xcd, 0x7a,
+ 0x63, 0x8b, 0xb8, 0xed, 0xa6, 0xe7, 0x6f, 0x2e, 0x6f, 0xfa, 0x81, 0x2a, 0x5e, 0x7c, 0x40, 0x1a,
+ 0x6d, 0xf6, 0x10, 0xd2, 0x23, 0xa4, 0xa9, 0xb2, 0x51, 0xba, 0x76, 0xb0, 0x5f, 0x99, 0xc5, 0x47,
+ 0xa2, 0x8d, 0x8f, 0xd8, 0x17, 0xf4, 0x35, 0x0b, 0xae, 0xf2, 0xe0, 0x9b, 0xfd, 0xf7, 0xbf, 0x8b,
+ 0xc4, 0x54, 0x93, 0xa4, 0x12, 0x22, 0x6b, 0x24, 0xdc, 0x99, 0x7f, 0x59, 0x0c, 0xe8, 0xd5, 0xda,
+ 0xd1, 0xda, 0xc2, 0x47, 0xed, 0x9c, 0xfd, 0x6f, 0x8a, 0x30, 0x26, 0x3c, 0xf8, 0x45, 0x68, 0x98,
+ 0x97, 0x8c, 0x25, 0xf1, 0x44, 0x6a, 0x49, 0x4c, 0x19, 0xc8, 0xc7, 0x13, 0x15, 0x26, 0x82, 0xa9,
+ 0xa6, 0x13, 0xc5, 0x37, 0x88, 0x13, 0xc6, 0xeb, 0xc4, 0xe1, 0xb6, 0x3b, 0xc5, 0x23, 0xdb, 0x19,
+ 0x29, 0x15, 0xcd, 0xad, 0x34, 0x31, 0xdc, 0x49, 0x1f, 0xed, 0x02, 0x62, 0x06, 0x48, 0xa1, 0xe3,
+ 0x47, 0xfc, 0x5b, 0x3c, 0xf1, 0x66, 0x70, 0xb4, 0x56, 0x67, 0x44, 0xab, 0xe8, 0x56, 0x07, 0x35,
+ 0x9c, 0xd1, 0x82, 0x66, 0x58, 0x36, 0xd8, 0xaf, 0x61, 0xd9, 0x50, 0x0f, 0xd7, 0x1a, 0x1f, 0x26,
+ 0x3b, 0x82, 0x30, 0xbc, 0x05, 0x65, 0x65, 0x00, 0x28, 0x0e, 0x9d, 0xee, 0xb1, 0x4c, 0xd2, 0x14,
+ 0xb8, 0x1a, 0x25, 0x31, 0x3e, 0x4d, 0xc8, 0xd9, 0xff, 0xb4, 0x60, 0x34, 0xc8, 0x27, 0x71, 0x15,
+ 0x4a, 0x4e, 0x14, 0x79, 0x9b, 0x3e, 0x71, 0xc5, 0x8e, 0xfd, 0x78, 0xde, 0x8e, 0x35, 0x9a, 0x61,
+ 0x46, 0x98, 0x73, 0xa2, 0x26, 0x56, 0x34, 0xd0, 0x0d, 0x6e, 0x21, 0xb5, 0x2b, 0x79, 0xfe, 0xfe,
+ 0xa8, 0x81, 0xb4, 0xa1, 0xda, 0x25, 0x58, 0xd4, 0x47, 0x5f, 0xe0, 0x26, 0x6c, 0x37, 0xfd, 0xe0,
+ 0xbe, 0x7f, 0x3d, 0x08, 0xa4, 0xdb, 0x5d, 0x7f, 0x04, 0xa7, 0xa4, 0xe1, 0x9a, 0xaa, 0x8e, 0x4d,
+ 0x6a, 0xfd, 0x05, 0x2a, 0xfa, 0x4e, 0x38, 0x45, 0x49, 0x9b, 0xce, 0x33, 0x11, 0x22, 0x30, 0x21,
+ 0xc2, 0x43, 0xc8, 0x32, 0x31, 0x76, 0x99, 0xec, 0xbc, 0x59, 0x3b, 0x51, 0xfa, 0xdd, 0x34, 0x49,
+ 0xe0, 0x34, 0x4d, 0xfb, 0x27, 0x2d, 0x60, 0x66, 0xff, 0x27, 0xc0, 0x32, 0x7c, 0xd6, 0x64, 0x19,
+ 0xa6, 0xf3, 0x06, 0x39, 0x87, 0x5b, 0x78, 0x91, 0xaf, 0xac, 0x5a, 0x18, 0x3c, 0xd8, 0x13, 0xe6,
+ 0x03, 0xbd, 0x39, 0x59, 0xfb, 0xff, 0x5a, 0xfc, 0x10, 0x53, 0x9e, 0xf8, 0xe8, 0xbb, 0xa0, 0xd4,
+ 0x70, 0x5a, 0x4e, 0x83, 0x87, 0xc4, 0xce, 0xd5, 0xea, 0x18, 0x95, 0x66, 0x17, 0x44, 0x0d, 0xae,
+ 0xa5, 0x90, 0x61, 0x46, 0x4a, 0xb2, 0xb8, 0xa7, 0x66, 0x42, 0x35, 0x39, 0xb3, 0x0d, 0x63, 0x06,
+ 0xb1, 0x47, 0x2a, 0xd2, 0x7e, 0x17, 0xbf, 0x62, 0x55, 0x58, 0x9c, 0x1d, 0x98, 0xf2, 0xb5, 0xff,
+ 0xf4, 0x42, 0x91, 0x62, 0xca, 0xc7, 0x7b, 0x5d, 0xa2, 0xec, 0xf6, 0xd1, 0xdc, 0x1a, 0x52, 0x64,
+ 0x70, 0x27, 0x65, 0xfb, 0xc7, 0x2d, 0x78, 0x4c, 0x47, 0xd4, 0x82, 0x24, 0xf4, 0xd2, 0x13, 0x57,
+ 0xa1, 0x14, 0xb4, 0x48, 0xe8, 0xc4, 0x41, 0x28, 0x6e, 0x8d, 0x2b, 0x72, 0xd0, 0x6f, 0x8b, 0xf2,
+ 0x43, 0x11, 0x50, 0x52, 0x52, 0x97, 0xe5, 0x58, 0xd5, 0xa4, 0x72, 0x0c, 0x1b, 0x8c, 0x48, 0x04,
+ 0xb0, 0x60, 0x67, 0x00, 0x7b, 0x32, 0x8d, 0xb0, 0x80, 0xd8, 0x7f, 0x68, 0xf1, 0x85, 0xa5, 0x77,
+ 0x1d, 0xbd, 0x07, 0x93, 0x3b, 0x4e, 0xdc, 0xd8, 0x5a, 0x7c, 0xd0, 0x0a, 0xb9, 0x7a, 0x5c, 0x8e,
+ 0xd3, 0x33, 0xbd, 0xc6, 0x49, 0xfb, 0xc8, 0xc4, 0x2a, 0x6f, 0x25, 0x45, 0x0c, 0x77, 0x90, 0x47,
+ 0xeb, 0x30, 0xc2, 0xca, 0x98, 0xf9, 0x77, 0xd4, 0x8d, 0x35, 0xc8, 0x6b, 0x4d, 0xbd, 0x3a, 0xaf,
+ 0x24, 0x74, 0xb0, 0x4e, 0xd4, 0xfe, 0x52, 0x91, 0xef, 0x76, 0xc6, 0x6d, 0x3f, 0x0d, 0xc3, 0xad,
+ 0xc0, 0x5d, 0x58, 0xae, 0x62, 0x31, 0x0b, 0xea, 0x1a, 0xa9, 0xf1, 0x62, 0x2c, 0xe1, 0xe8, 0x55,
+ 0x00, 0xf2, 0x20, 0x26, 0xa1, 0xef, 0x34, 0x95, 0x95, 0x8c, 0xb2, 0x0b, 0xad, 0x06, 0xab, 0x41,
+ 0x7c, 0x27, 0x22, 0xdf, 0xb1, 0xa8, 0x50, 0xb0, 0x86, 0x8e, 0xae, 0x01, 0xb4, 0xc2, 0x60, 0xd7,
+ 0x73, 0x99, 0x3f, 0x61, 0xd1, 0xb4, 0x21, 0xa9, 0x29, 0x08, 0xd6, 0xb0, 0xd0, 0xab, 0x30, 0xd6,
+ 0xf6, 0x23, 0xce, 0xa1, 0x38, 0xeb, 0x22, 0x1c, 0x63, 0x29, 0xb1, 0x6e, 0xb8, 0xa3, 0x03, 0xb1,
+ 0x89, 0x8b, 0xe6, 0x60, 0x28, 0x76, 0x98, 0x4d, 0xc4, 0x60, 0xbe, 0x31, 0xe7, 0x1a, 0xc5, 0xd0,
+ 0x03, 0x32, 0xd3, 0x0a, 0x58, 0x54, 0x44, 0x6f, 0x49, 0xe7, 0x0c, 0x7e, 0xd6, 0x0b, 0x2b, 0xea,
+ 0xfe, 0xee, 0x05, 0xcd, 0x35, 0x43, 0x58, 0x67, 0x1b, 0xb4, 0xec, 0xaf, 0x95, 0x01, 0x12, 0x76,
+ 0x1c, 0xbd, 0xdf, 0x71, 0x1e, 0x3d, 0xdb, 0x9d, 0x81, 0x3f, 0xbe, 0xc3, 0x08, 0x7d, 0x9f, 0x05,
+ 0x23, 0x4e, 0xb3, 0x19, 0x34, 0x9c, 0x98, 0x8d, 0x72, 0xa1, 0xfb, 0x79, 0x28, 0xda, 0x9f, 0x4b,
+ 0x6a, 0xf0, 0x2e, 0xbc, 0x20, 0x17, 0x9e, 0x06, 0xe9, 0xd9, 0x0b, 0xbd, 0x61, 0xf4, 0x29, 0x29,
+ 0xa5, 0xf1, 0xe5, 0x31, 0x93, 0x96, 0xd2, 0xca, 0xec, 0xe8, 0xd7, 0x04, 0x34, 0x74, 0xc7, 0x88,
+ 0xb4, 0x37, 0x90, 0x1f, 0x74, 0xc2, 0xe0, 0x4a, 0x7b, 0x05, 0xd9, 0x43, 0x35, 0xdd, 0x9b, 0x6c,
+ 0x30, 0x3f, 0x32, 0x8b, 0x26, 0xfe, 0xf4, 0xf0, 0x24, 0x7b, 0x17, 0x26, 0x5c, 0xf3, 0x6e, 0x17,
+ 0xab, 0xe9, 0xa9, 0x3c, 0xba, 0x29, 0x56, 0x20, 0xb9, 0xcd, 0x53, 0x00, 0x9c, 0x26, 0x8c, 0x6a,
+ 0xdc, 0xaf, 0x6f, 0xd9, 0xdf, 0x08, 0x84, 0x35, 0xbe, 0x9d, 0x3b, 0x97, 0x7b, 0x51, 0x4c, 0x76,
+ 0x28, 0x66, 0x72, 0x69, 0xaf, 0x8a, 0xba, 0x58, 0x51, 0x41, 0x6f, 0xc0, 0x10, 0x73, 0x0c, 0x8e,
+ 0xa6, 0x4b, 0xf9, 0xca, 0x44, 0x33, 0xa6, 0x45, 0xb2, 0xa9, 0xd8, 0xdf, 0x08, 0x0b, 0x0a, 0xe8,
+ 0x86, 0x0c, 0x7c, 0x13, 0x2d, 0xfb, 0x77, 0x22, 0xc2, 0x02, 0xdf, 0x94, 0xe7, 0x3f, 0x9e, 0xc4,
+ 0xb4, 0xe1, 0xe5, 0x99, 0xa9, 0x17, 0x8c, 0x9a, 0x94, 0x39, 0x12, 0xff, 0x65, 0x46, 0x87, 0x69,
+ 0xc8, 0xef, 0x9e, 0x99, 0xf5, 0x21, 0x19, 0xce, 0xbb, 0x26, 0x09, 0x9c, 0xa6, 0x49, 0x19, 0x4d,
+ 0xbe, 0x73, 0x85, 0x3d, 0x7f, 0xaf, 0xfd, 0xcf, 0xe5, 0x6b, 0x76, 0xc9, 0xf0, 0x12, 0x2c, 0xea,
+ 0x9f, 0xe8, 0xad, 0x3f, 0xe3, 0xc3, 0x64, 0x7a, 0x8b, 0x3e, 0x52, 0x2e, 0xe3, 0xf7, 0x07, 0x60,
+ 0xdc, 0x5c, 0x52, 0xe8, 0x2a, 0x94, 0x05, 0x11, 0x15, 0x85, 0x55, 0xed, 0x92, 0x15, 0x09, 0xc0,
+ 0x09, 0x0e, 0x0b, 0xbe, 0xcb, 0xaa, 0x6b, 0x76, 0x98, 0x49, 0xf0, 0x5d, 0x05, 0xc1, 0x1a, 0x16,
+ 0x95, 0x97, 0xd6, 0x83, 0x20, 0x56, 0x97, 0x8a, 0x5a, 0x77, 0xf3, 0xac, 0x14, 0x0b, 0x28, 0xbd,
+ 0x4c, 0xb6, 0x49, 0xe8, 0x93, 0xa6, 0x19, 0xdc, 0x4d, 0x5d, 0x26, 0x37, 0x75, 0x20, 0x36, 0x71,
+ 0xe9, 0x2d, 0x19, 0x44, 0x6c, 0x21, 0x0b, 0xa9, 0x2c, 0xb1, 0x6b, 0xad, 0x73, 0x17, 0x7b, 0x09,
+ 0x47, 0x9f, 0x87, 0xc7, 0x94, 0x47, 0x3c, 0xe6, 0x8a, 0x6a, 0xd9, 0xe2, 0x90, 0xa1, 0x44, 0x79,
+ 0x6c, 0x21, 0x1b, 0x0d, 0xe7, 0xd5, 0x47, 0xaf, 0xc3, 0xb8, 0xe0, 0xdc, 0x25, 0xc5, 0x61, 0xd3,
+ 0x76, 0xe2, 0xa6, 0x01, 0xc5, 0x29, 0x6c, 0x19, 0x9e, 0x8e, 0x31, 0xcf, 0x92, 0x42, 0xa9, 0x33,
+ 0x3c, 0x9d, 0x0e, 0xc7, 0x1d, 0x35, 0xd0, 0x1c, 0x4c, 0x70, 0xd6, 0xca, 0xf3, 0x37, 0xf9, 0x9c,
+ 0x08, 0x77, 0x1b, 0xb5, 0xa5, 0x6e, 0x9b, 0x60, 0x9c, 0xc6, 0x47, 0xaf, 0xc0, 0xa8, 0x13, 0x36,
+ 0xb6, 0xbc, 0x98, 0x34, 0xe2, 0x76, 0xc8, 0xfd, 0x70, 0x34, 0xe3, 0x93, 0x39, 0x0d, 0x86, 0x0d,
+ 0x4c, 0xfb, 0x7d, 0x38, 0x95, 0xe1, 0xa9, 0x47, 0x17, 0x8e, 0xd3, 0xf2, 0xe4, 0x37, 0xa5, 0x2c,
+ 0x54, 0xe7, 0x6a, 0xcb, 0xf2, 0x6b, 0x34, 0x2c, 0xba, 0x3a, 0x99, 0x47, 0x9f, 0x96, 0xc0, 0x45,
+ 0xad, 0xce, 0x25, 0x09, 0xc0, 0x09, 0x8e, 0xfd, 0xbf, 0x0b, 0x30, 0x91, 0xa1, 0x7c, 0x67, 0x49,
+ 0x44, 0x52, 0xb2, 0x47, 0x92, 0x33, 0xc4, 0x8c, 0x76, 0x58, 0x38, 0x42, 0xb4, 0xc3, 0x62, 0xaf,
+ 0x68, 0x87, 0x03, 0x1f, 0x24, 0xda, 0xa1, 0x39, 0x62, 0x83, 0x7d, 0x8d, 0x58, 0x46, 0x84, 0xc4,
+ 0xa1, 0x23, 0x46, 0x48, 0x34, 0x06, 0x7d, 0xb8, 0x8f, 0x41, 0xff, 0xa1, 0x02, 0x4c, 0xa6, 0x8d,
+ 0xe4, 0x4e, 0x40, 0x1d, 0xfb, 0x86, 0xa1, 0x8e, 0xcd, 0x4e, 0xc9, 0x93, 0x36, 0xdd, 0xcb, 0x53,
+ 0xcd, 0xe2, 0x94, 0x6a, 0xf6, 0x93, 0x7d, 0x51, 0xeb, 0xae, 0xa6, 0xfd, 0x07, 0x05, 0x38, 0x93,
+ 0xae, 0xb2, 0xd0, 0x74, 0xbc, 0x9d, 0x13, 0x18, 0x9b, 0xdb, 0xc6, 0xd8, 0x3c, 0xd7, 0xcf, 0xd7,
+ 0xb0, 0xae, 0xe5, 0x0e, 0xd0, 0xbd, 0xd4, 0x00, 0x5d, 0xed, 0x9f, 0x64, 0xf7, 0x51, 0xfa, 0x7a,
+ 0x11, 0x2e, 0x66, 0xd6, 0x4b, 0xb4, 0x99, 0x4b, 0x86, 0x36, 0xf3, 0x5a, 0x4a, 0x9b, 0x69, 0x77,
+ 0xaf, 0x7d, 0x3c, 0xea, 0x4d, 0xe1, 0x42, 0xc9, 0x22, 0xe2, 0x3d, 0xa4, 0x6a, 0xd3, 0x70, 0xa1,
+ 0x54, 0x84, 0xb0, 0x49, 0xf7, 0x1b, 0x49, 0xa5, 0xf9, 0xef, 0x2d, 0x38, 0x97, 0x39, 0x37, 0x27,
+ 0xa0, 0xc2, 0x5a, 0x35, 0x55, 0x58, 0x4f, 0xf7, 0xbd, 0x5a, 0x73, 0x74, 0x5a, 0xbf, 0x31, 0x90,
+ 0xf3, 0x2d, 0x4c, 0x40, 0xbf, 0x0d, 0x23, 0x4e, 0xa3, 0x41, 0xa2, 0x68, 0x25, 0x70, 0x55, 0x84,
+ 0xb8, 0xe7, 0x98, 0x9c, 0x95, 0x14, 0x1f, 0xee, 0x57, 0x66, 0xd2, 0x24, 0x12, 0x30, 0xd6, 0x29,
+ 0x98, 0x41, 0x2d, 0x0b, 0xc7, 0x1a, 0xd4, 0xf2, 0x1a, 0xc0, 0xae, 0xe2, 0xd6, 0xd3, 0x42, 0xbe,
+ 0xc6, 0xc7, 0x6b, 0x58, 0xe8, 0x0b, 0x50, 0x8a, 0xc4, 0x35, 0x2e, 0x96, 0xe2, 0x0b, 0x7d, 0xce,
+ 0x95, 0xb3, 0x4e, 0x9a, 0xa6, 0xaf, 0xbe, 0xd2, 0x87, 0x28, 0x92, 0xe8, 0xdb, 0x60, 0x32, 0xe2,
+ 0xa1, 0x60, 0x16, 0x9a, 0x4e, 0xc4, 0xfc, 0x20, 0xc4, 0x2a, 0x64, 0x0e, 0xf8, 0xf5, 0x14, 0x0c,
+ 0x77, 0x60, 0xa3, 0x25, 0xf9, 0x51, 0x2c, 0x6e, 0x0d, 0x5f, 0x98, 0x97, 0x93, 0x0f, 0x12, 0x29,
+ 0xcc, 0x4e, 0xa7, 0x87, 0x9f, 0x0d, 0xbc, 0x56, 0x13, 0x7d, 0x01, 0x80, 0x2e, 0x1f, 0xa1, 0x4b,
+ 0x18, 0xce, 0x3f, 0x3c, 0xe9, 0xa9, 0xe2, 0x66, 0x5a, 0x7e, 0x32, 0xe7, 0xc5, 0xaa, 0x22, 0x82,
+ 0x35, 0x82, 0xf6, 0x0f, 0x0d, 0xc0, 0xe3, 0x5d, 0xce, 0x48, 0x34, 0x67, 0x3e, 0x81, 0x3e, 0x93,
+ 0x16, 0xae, 0x67, 0x32, 0x2b, 0x1b, 0xd2, 0x76, 0x6a, 0x29, 0x16, 0x3e, 0xf0, 0x52, 0xfc, 0x01,
+ 0x4b, 0x53, 0x7b, 0x70, 0x63, 0xbe, 0xcf, 0x1e, 0xf1, 0xec, 0x3f, 0x46, 0x3d, 0xc8, 0x46, 0x86,
+ 0x32, 0xe1, 0x5a, 0xdf, 0xdd, 0xe9, 0x5b, 0xbb, 0x70, 0xb2, 0xca, 0xdf, 0x2f, 0x59, 0xf0, 0x44,
+ 0x66, 0x7f, 0x0d, 0x93, 0x8d, 0xab, 0x50, 0x6e, 0xd0, 0x42, 0xcd, 0x57, 0x2d, 0x71, 0xe2, 0x95,
+ 0x00, 0x9c, 0xe0, 0x18, 0x96, 0x19, 0x85, 0x9e, 0x96, 0x19, 0xff, 0xda, 0x82, 0x8e, 0xfd, 0x71,
+ 0x02, 0x07, 0xf5, 0xb2, 0x79, 0x50, 0x7f, 0xbc, 0x9f, 0xb9, 0xcc, 0x39, 0xa3, 0xff, 0x78, 0x02,
+ 0xce, 0xe6, 0xf8, 0x6a, 0xec, 0xc2, 0xd4, 0x66, 0x83, 0x98, 0x5e, 0x80, 0xe2, 0x63, 0x32, 0x1d,
+ 0x26, 0xbb, 0xba, 0x0c, 0xb2, 0x7c, 0x44, 0x53, 0x1d, 0x28, 0xb8, 0xb3, 0x09, 0xf4, 0x25, 0x0b,
+ 0x4e, 0x3b, 0xf7, 0xa3, 0x8e, 0x04, 0xa6, 0x62, 0xcd, 0xbc, 0x98, 0xa9, 0x04, 0xe9, 0x91, 0xf0,
+ 0x94, 0x27, 0x68, 0xca, 0xc2, 0xc2, 0x99, 0x6d, 0x21, 0x2c, 0x62, 0x86, 0x52, 0x76, 0xbe, 0x8b,
+ 0x9f, 0x6a, 0x96, 0x53, 0x0d, 0x3f, 0xb2, 0x25, 0x04, 0x2b, 0x3a, 0xe8, 0x1d, 0x28, 0x6f, 0x4a,
+ 0x4f, 0xb7, 0x8c, 0x2b, 0x21, 0x19, 0xc8, 0xee, 0xfe, 0x7f, 0xfc, 0x81, 0x52, 0x21, 0xe1, 0x84,
+ 0x28, 0x7a, 0x1d, 0x8a, 0xfe, 0x46, 0xd4, 0x2d, 0xc7, 0x51, 0xca, 0xa6, 0x89, 0x7b, 0x83, 0xaf,
+ 0x2e, 0xd5, 0x31, 0xad, 0x88, 0x6e, 0x40, 0x31, 0x5c, 0x77, 0x85, 0x06, 0x2f, 0xf3, 0x0c, 0xc7,
+ 0xf3, 0xd5, 0x9c, 0x5e, 0x31, 0x4a, 0x78, 0xbe, 0x8a, 0x29, 0x09, 0x54, 0x83, 0x41, 0xe6, 0xe0,
+ 0x20, 0xee, 0x83, 0x4c, 0xce, 0xb7, 0x8b, 0xa3, 0x10, 0x77, 0x19, 0x67, 0x08, 0x98, 0x13, 0x42,
+ 0x6b, 0x30, 0xd4, 0x60, 0xf9, 0x70, 0x44, 0xc0, 0xea, 0x4f, 0x65, 0xea, 0xea, 0xba, 0x24, 0x0a,
+ 0x12, 0xaa, 0x2b, 0x86, 0x81, 0x05, 0x2d, 0x46, 0x95, 0xb4, 0xb6, 0x36, 0x22, 0x91, 0xbf, 0x2d,
+ 0x9b, 0x6a, 0x97, 0xfc, 0x57, 0x82, 0x2a, 0xc3, 0xc0, 0x82, 0x16, 0xfa, 0x0c, 0x14, 0x36, 0x1a,
+ 0xc2, 0xff, 0x21, 0x53, 0x69, 0x67, 0x3a, 0xf4, 0xcf, 0x0f, 0x1d, 0xec, 0x57, 0x0a, 0x4b, 0x0b,
+ 0xb8, 0xb0, 0xd1, 0x40, 0xab, 0x30, 0xbc, 0xc1, 0x5d, 0x80, 0x85, 0x5e, 0xee, 0xa9, 0x6c, 0xef,
+ 0xe4, 0x0e, 0x2f, 0x61, 0x6e, 0xb7, 0x2f, 0x00, 0x58, 0x12, 0x61, 0x21, 0x38, 0x95, 0x2b, 0xb3,
+ 0x88, 0x45, 0x3d, 0x7b, 0x34, 0xf7, 0x73, 0x7e, 0x3f, 0x27, 0x0e, 0xd1, 0x58, 0xa3, 0x48, 0x57,
+ 0xb5, 0x23, 0x93, 0x68, 0x8a, 0x58, 0x1d, 0x99, 0xab, 0xba, 0x47, 0x7e, 0x51, 0xbe, 0xaa, 0x15,
+ 0x12, 0x4e, 0x88, 0xa2, 0x6d, 0x18, 0xdb, 0x8d, 0x5a, 0x5b, 0x44, 0x6e, 0x69, 0x16, 0xba, 0x23,
+ 0xe7, 0x0a, 0xbb, 0x2b, 0x10, 0xbd, 0x30, 0x6e, 0x3b, 0xcd, 0x8e, 0x53, 0x88, 0xbd, 0x6a, 0xdf,
+ 0xd5, 0x89, 0x61, 0x93, 0x36, 0x1d, 0xfe, 0xf7, 0xda, 0xc1, 0xfa, 0x5e, 0x4c, 0x44, 0xf0, 0xea,
+ 0xcc, 0xe1, 0x7f, 0x93, 0xa3, 0x74, 0x0e, 0xbf, 0x00, 0x60, 0x49, 0x04, 0xdd, 0x15, 0xc3, 0xc3,
+ 0x4e, 0xcf, 0xc9, 0xfc, 0x60, 0x4a, 0x99, 0x59, 0x6c, 0xb5, 0x41, 0x61, 0xa7, 0x65, 0x42, 0x8a,
+ 0x9d, 0x92, 0xad, 0xad, 0x20, 0x0e, 0xfc, 0xd4, 0x09, 0x3d, 0x95, 0x7f, 0x4a, 0xd6, 0x32, 0xf0,
+ 0x3b, 0x4f, 0xc9, 0x2c, 0x2c, 0x9c, 0xd9, 0x16, 0x72, 0x61, 0xbc, 0x15, 0x84, 0xf1, 0xfd, 0x20,
+ 0x94, 0xeb, 0x0b, 0x75, 0xd1, 0x2b, 0x18, 0x98, 0xa2, 0x45, 0x16, 0x4c, 0xdd, 0x84, 0xe0, 0x14,
+ 0x4d, 0xf4, 0x39, 0x18, 0x8e, 0x1a, 0x4e, 0x93, 0x2c, 0xdf, 0x9e, 0x3e, 0x95, 0x7f, 0xfd, 0xd4,
+ 0x39, 0x4a, 0xce, 0xea, 0x62, 0x93, 0x23, 0x50, 0xb0, 0x24, 0x87, 0x96, 0x60, 0x90, 0x65, 0x44,
+ 0x60, 0x71, 0xb7, 0x73, 0x62, 0x42, 0x75, 0x58, 0x98, 0xf2, 0xb3, 0x89, 0x15, 0x63, 0x5e, 0x9d,
+ 0xee, 0x01, 0xc1, 0x5e, 0x07, 0xd1, 0xf4, 0x99, 0xfc, 0x3d, 0x20, 0xb8, 0xf2, 0xdb, 0xf5, 0x6e,
+ 0x7b, 0x40, 0x21, 0xe1, 0x84, 0x28, 0x3d, 0x99, 0xe9, 0x69, 0x7a, 0xb6, 0x8b, 0x41, 0x4b, 0xee,
+ 0x59, 0xca, 0x4e, 0x66, 0x7a, 0x92, 0x52, 0x12, 0xf6, 0xef, 0x0e, 0x77, 0xf2, 0x2c, 0x4c, 0x20,
+ 0xfb, 0xab, 0x56, 0xc7, 0x5b, 0xdd, 0xa7, 0xfb, 0xd5, 0x0f, 0x1d, 0x23, 0xb7, 0xfa, 0x25, 0x0b,
+ 0xce, 0xb6, 0x32, 0x3f, 0x44, 0x30, 0x00, 0xfd, 0xa9, 0x99, 0xf8, 0xa7, 0xab, 0xd8, 0xf8, 0xd9,
+ 0x70, 0x9c, 0xd3, 0x52, 0x5a, 0x22, 0x28, 0x7e, 0x60, 0x89, 0x60, 0x05, 0x4a, 0x8c, 0xc9, 0xec,
+ 0x91, 0x1f, 0x2e, 0x2d, 0x18, 0x31, 0x56, 0x62, 0x41, 0x54, 0xc4, 0x8a, 0x04, 0xfa, 0x41, 0x0b,
+ 0x2e, 0xa4, 0xbb, 0x8e, 0x09, 0x03, 0x8b, 0x48, 0xf2, 0x5c, 0x16, 0x5c, 0x12, 0xdf, 0x7f, 0xa1,
+ 0xd6, 0x0d, 0xf9, 0xb0, 0x17, 0x02, 0xee, 0xde, 0x18, 0xaa, 0x66, 0x08, 0xa3, 0x43, 0xa6, 0x02,
+ 0xbe, 0x0f, 0x81, 0xf4, 0x45, 0x18, 0xdd, 0x09, 0xda, 0x7e, 0x2c, 0xec, 0x5f, 0x84, 0xc7, 0x22,
+ 0x7b, 0x70, 0x5e, 0xd1, 0xca, 0xb1, 0x81, 0x95, 0x12, 0x63, 0x4b, 0x0f, 0x2d, 0xc6, 0xbe, 0x9d,
+ 0x4a, 0x28, 0x5f, 0xce, 0x8f, 0x58, 0x28, 0x24, 0xfe, 0x23, 0xa4, 0x95, 0x3f, 0x59, 0xd9, 0xe8,
+ 0xa7, 0xad, 0x0c, 0xa6, 0x9e, 0x4b, 0xcb, 0xaf, 0x99, 0xd2, 0xf2, 0xe5, 0xb4, 0xb4, 0xdc, 0xa1,
+ 0x7c, 0x35, 0x04, 0xe5, 0xfe, 0xc3, 0x5e, 0xf7, 0x1b, 0x47, 0xce, 0x6e, 0xc2, 0xa5, 0x5e, 0xd7,
+ 0x12, 0x33, 0x84, 0x72, 0xd5, 0x53, 0x5b, 0x62, 0x08, 0xe5, 0x2e, 0x57, 0x31, 0x83, 0xf4, 0x1b,
+ 0x68, 0xc4, 0xfe, 0x9f, 0x16, 0x14, 0x6b, 0x81, 0x7b, 0x02, 0xca, 0xe4, 0xcf, 0x1a, 0xca, 0xe4,
+ 0xc7, 0x73, 0x12, 0xfd, 0xe7, 0xaa, 0x8e, 0x17, 0x53, 0xaa, 0xe3, 0x0b, 0x79, 0x04, 0xba, 0x2b,
+ 0x8a, 0x7f, 0xa2, 0x08, 0x23, 0xb5, 0xc0, 0x55, 0x56, 0xc8, 0xbf, 0xf1, 0x30, 0x56, 0xc8, 0xb9,
+ 0x61, 0x61, 0x35, 0xca, 0xcc, 0x7e, 0x4a, 0x3a, 0xe1, 0xfd, 0x05, 0x33, 0x46, 0xbe, 0x47, 0xbc,
+ 0xcd, 0xad, 0x98, 0xb8, 0xe9, 0xcf, 0x39, 0x39, 0x63, 0xe4, 0xff, 0x61, 0xc1, 0x44, 0xaa, 0x75,
+ 0xd4, 0x84, 0xb1, 0xa6, 0xae, 0x09, 0x14, 0xeb, 0xf4, 0xa1, 0x94, 0x88, 0xc2, 0x98, 0x53, 0x2b,
+ 0xc2, 0x26, 0x71, 0x34, 0x0b, 0xa0, 0x5e, 0xea, 0xa4, 0x06, 0x8c, 0x71, 0xfd, 0xea, 0x29, 0x2f,
+ 0xc2, 0x1a, 0x06, 0x7a, 0x09, 0x46, 0xe2, 0xa0, 0x15, 0x34, 0x83, 0xcd, 0xbd, 0x9b, 0x44, 0x86,
+ 0xb6, 0x51, 0x26, 0x5a, 0x6b, 0x09, 0x08, 0xeb, 0x78, 0xf6, 0x4f, 0x15, 0xf9, 0x87, 0xfa, 0xb1,
+ 0xf7, 0xcd, 0x35, 0xf9, 0xd1, 0x5e, 0x93, 0x5f, 0xb7, 0x60, 0x92, 0xb6, 0xce, 0xcc, 0x45, 0xe4,
+ 0x65, 0xab, 0xd2, 0xef, 0x58, 0x5d, 0xd2, 0xef, 0x5c, 0xa6, 0x67, 0x97, 0x1b, 0xb4, 0x63, 0xa1,
+ 0x41, 0xd3, 0x0e, 0x27, 0x5a, 0x8a, 0x05, 0x54, 0xe0, 0x91, 0x30, 0x14, 0x3e, 0x50, 0x3a, 0x1e,
+ 0x09, 0x43, 0x2c, 0xa0, 0x32, 0x3b, 0xcf, 0x40, 0x4e, 0x76, 0x1e, 0x16, 0xa8, 0x4f, 0x18, 0x16,
+ 0x08, 0xb6, 0x47, 0x0b, 0xd4, 0x27, 0x2d, 0x0e, 0x12, 0x1c, 0xfb, 0xe7, 0x8b, 0x30, 0x5a, 0x0b,
+ 0xdc, 0xe4, 0xad, 0xec, 0x45, 0xe3, 0xad, 0xec, 0x52, 0xea, 0xad, 0x6c, 0x52, 0xc7, 0xfd, 0xe6,
+ 0xcb, 0xd8, 0x87, 0xf5, 0x32, 0xf6, 0xaf, 0x2c, 0x36, 0x6b, 0xd5, 0xd5, 0xba, 0xc8, 0x0e, 0xfc,
+ 0x3c, 0x8c, 0xb0, 0x03, 0x89, 0x39, 0xdd, 0xc9, 0x07, 0x24, 0x16, 0x78, 0x7f, 0x35, 0x29, 0xc6,
+ 0x3a, 0x0e, 0xba, 0x02, 0xa5, 0x88, 0x38, 0x61, 0x63, 0x4b, 0x9d, 0x71, 0xe2, 0x79, 0x85, 0x97,
+ 0x61, 0x05, 0x45, 0x6f, 0x26, 0x31, 0xe2, 0x8a, 0xf9, 0x79, 0x6e, 0xf5, 0xfe, 0xf0, 0x2d, 0x92,
+ 0x1f, 0x18, 0xce, 0xbe, 0x07, 0xa8, 0x13, 0xbf, 0x8f, 0xe0, 0x48, 0x15, 0x33, 0x38, 0x52, 0xb9,
+ 0x23, 0x30, 0xd2, 0x9f, 0x59, 0x30, 0x5e, 0x0b, 0x5c, 0xba, 0x75, 0xbf, 0x91, 0xf6, 0xa9, 0x1e,
+ 0x20, 0x73, 0xa8, 0x4b, 0x80, 0xcc, 0x7f, 0x68, 0xc1, 0x70, 0x2d, 0x70, 0x4f, 0x40, 0xef, 0xfe,
+ 0x9a, 0xa9, 0x77, 0x7f, 0x2c, 0x67, 0x49, 0xe4, 0xa8, 0xda, 0x7f, 0xb1, 0x08, 0x63, 0xb4, 0x9f,
+ 0xc1, 0xa6, 0x9c, 0x25, 0x63, 0x44, 0xac, 0x3e, 0x46, 0x84, 0xb2, 0xb9, 0x41, 0xb3, 0x19, 0xdc,
+ 0x4f, 0xcf, 0xd8, 0x12, 0x2b, 0xc5, 0x02, 0x8a, 0x9e, 0x85, 0x52, 0x2b, 0x24, 0xbb, 0x5e, 0x20,
+ 0xf8, 0x47, 0xed, 0x15, 0xa3, 0x26, 0xca, 0xb1, 0xc2, 0xa0, 0x72, 0x57, 0xe4, 0xf9, 0x0d, 0x22,
+ 0x93, 0x6c, 0x0f, 0xb0, 0x3c, 0x5c, 0x3c, 0xf2, 0xb5, 0x56, 0x8e, 0x0d, 0x2c, 0x74, 0x0f, 0xca,
+ 0xec, 0x3f, 0x3b, 0x51, 0x8e, 0x9e, 0x37, 0x48, 0xa4, 0x9b, 0x10, 0x04, 0x70, 0x42, 0x0b, 0x5d,
+ 0x03, 0x88, 0x65, 0x74, 0xe4, 0x48, 0xc4, 0xb8, 0x51, 0xbc, 0xb6, 0x8a, 0x9b, 0x1c, 0x61, 0x0d,
+ 0x0b, 0x3d, 0x03, 0xe5, 0xd8, 0xf1, 0x9a, 0xb7, 0x3c, 0x9f, 0x44, 0x4c, 0xe5, 0x5c, 0x94, 0xd9,
+ 0x24, 0x44, 0x21, 0x4e, 0xe0, 0x94, 0xd7, 0x61, 0x0e, 0xe0, 0x3c, 0xeb, 0x58, 0x89, 0x61, 0x33,
+ 0x5e, 0xe7, 0x96, 0x2a, 0xc5, 0x1a, 0x86, 0xfd, 0x0a, 0x9c, 0xa9, 0x05, 0x6e, 0x2d, 0x08, 0xe3,
+ 0xa5, 0x20, 0xbc, 0xef, 0x84, 0xae, 0x9c, 0xbf, 0x8a, 0x4c, 0x6c, 0x40, 0xcf, 0x9e, 0x41, 0xbe,
+ 0x33, 0x8d, 0x94, 0x05, 0x2f, 0x30, 0x6e, 0xe7, 0x88, 0x4e, 0x1d, 0x0d, 0x76, 0xef, 0xaa, 0x04,
+ 0x83, 0xd7, 0x9d, 0x98, 0xa0, 0xdb, 0x2c, 0x29, 0x59, 0x72, 0x05, 0x89, 0xea, 0x4f, 0x6b, 0x49,
+ 0xc9, 0x12, 0x60, 0xe6, 0x9d, 0x65, 0xd6, 0xb7, 0x7f, 0x76, 0x80, 0x9d, 0x46, 0xa9, 0x7c, 0x7b,
+ 0xe8, 0x8b, 0x30, 0x1e, 0x91, 0x5b, 0x9e, 0xdf, 0x7e, 0x20, 0x85, 0xf0, 0x2e, 0x6e, 0x39, 0xf5,
+ 0x45, 0x1d, 0x93, 0xab, 0xf2, 0xcc, 0x32, 0x9c, 0xa2, 0x46, 0xe7, 0x29, 0x6c, 0xfb, 0x73, 0xd1,
+ 0x9d, 0x88, 0x84, 0x22, 0xdf, 0x1b, 0x9b, 0x27, 0x2c, 0x0b, 0x71, 0x02, 0xa7, 0xeb, 0x92, 0xfd,
+ 0x59, 0x0d, 0x7c, 0x1c, 0x04, 0xb1, 0x5c, 0xc9, 0x2c, 0x63, 0x90, 0x56, 0x8e, 0x0d, 0x2c, 0xb4,
+ 0x04, 0x28, 0x6a, 0xb7, 0x5a, 0x4d, 0xf6, 0xb0, 0xef, 0x34, 0xaf, 0x87, 0x41, 0xbb, 0xc5, 0x5f,
+ 0x3d, 0x8b, 0x3c, 0x30, 0x61, 0xbd, 0x03, 0x8a, 0x33, 0x6a, 0xd0, 0xd3, 0x67, 0x23, 0x62, 0xbf,
+ 0xd9, 0xea, 0x2e, 0x0a, 0xf5, 0x7a, 0x9d, 0x15, 0x61, 0x09, 0xa3, 0x8b, 0x89, 0x35, 0xcf, 0x31,
+ 0x87, 0x92, 0xc5, 0x84, 0x55, 0x29, 0xd6, 0x30, 0xd0, 0x22, 0x0c, 0x47, 0x7b, 0x51, 0x23, 0x16,
+ 0x11, 0x99, 0x72, 0x32, 0x77, 0xd6, 0x19, 0x8a, 0x96, 0x4d, 0x82, 0x57, 0xc1, 0xb2, 0x2e, 0xda,
+ 0x81, 0xf1, 0xfb, 0x9e, 0xef, 0x06, 0xf7, 0x23, 0x39, 0x51, 0xa5, 0x7c, 0xd5, 0xe8, 0x3d, 0x8e,
+ 0x99, 0x9a, 0x6c, 0x63, 0xde, 0xee, 0x19, 0xc4, 0x70, 0x8a, 0xb8, 0xfd, 0x5d, 0xec, 0xee, 0x65,
+ 0xc9, 0xc8, 0xe2, 0x76, 0x48, 0xd0, 0x0e, 0x8c, 0xb5, 0xd8, 0x0a, 0x13, 0xa1, 0xb2, 0xc5, 0x32,
+ 0x79, 0xb1, 0x4f, 0x21, 0xfa, 0x3e, 0x3d, 0xd7, 0x94, 0x92, 0x8b, 0x49, 0x27, 0x35, 0x9d, 0x1c,
+ 0x36, 0xa9, 0xdb, 0xbf, 0x75, 0x9a, 0x1d, 0xf1, 0x75, 0x2e, 0x19, 0x0f, 0x0b, 0x4b, 0x66, 0x21,
+ 0x06, 0xcc, 0xe4, 0xab, 0x68, 0x92, 0x01, 0x14, 0xd6, 0xd0, 0x58, 0xd6, 0x45, 0x6f, 0xb2, 0x47,
+ 0x71, 0x7e, 0xae, 0xf6, 0xca, 0x09, 0xcd, 0xb1, 0x8c, 0xf7, 0x6f, 0x51, 0x11, 0x6b, 0x44, 0xd0,
+ 0x2d, 0x18, 0x13, 0xb9, 0xab, 0x84, 0x0e, 0xae, 0x68, 0xe8, 0x58, 0xc6, 0xb0, 0x0e, 0x3c, 0x4c,
+ 0x17, 0x60, 0xb3, 0x32, 0xda, 0x84, 0x0b, 0x5a, 0x22, 0xc7, 0xeb, 0xa1, 0xc3, 0x1e, 0x4a, 0x3d,
+ 0xb6, 0x67, 0xb5, 0x63, 0xfa, 0x89, 0x83, 0xfd, 0xca, 0x85, 0xb5, 0x6e, 0x88, 0xb8, 0x3b, 0x1d,
+ 0x74, 0x1b, 0xce, 0x70, 0x87, 0xc1, 0x2a, 0x71, 0xdc, 0xa6, 0xe7, 0xab, 0x7b, 0x80, 0x2f, 0xfb,
+ 0x73, 0x07, 0xfb, 0x95, 0x33, 0x73, 0x59, 0x08, 0x38, 0xbb, 0x1e, 0x7a, 0x0d, 0xca, 0xae, 0x1f,
+ 0x89, 0x31, 0x18, 0x32, 0x72, 0x94, 0x96, 0xab, 0xab, 0x75, 0xf5, 0xfd, 0xc9, 0x1f, 0x9c, 0x54,
+ 0x40, 0x9b, 0x5c, 0x0f, 0xa7, 0xc4, 0xde, 0xe1, 0xfc, 0x7c, 0xf4, 0x62, 0x49, 0x18, 0x2e, 0x43,
+ 0x5c, 0x01, 0xad, 0x4c, 0x6e, 0x0d, 0x6f, 0x22, 0x83, 0x30, 0x7a, 0x03, 0x10, 0xe5, 0x0b, 0xbd,
+ 0x06, 0x99, 0x6b, 0xb0, 0x88, 0xe5, 0x4c, 0x6d, 0x59, 0x32, 0x5c, 0x34, 0x50, 0xbd, 0x03, 0x03,
+ 0x67, 0xd4, 0x42, 0x37, 0xe8, 0xb9, 0xa9, 0x97, 0x0a, 0xd3, 0x61, 0x29, 0x4b, 0x4c, 0x57, 0x49,
+ 0x2b, 0x24, 0x0d, 0x27, 0x26, 0xae, 0x49, 0x11, 0xa7, 0xea, 0xd1, 0xab, 0x5b, 0x25, 0x2f, 0x02,
+ 0x33, 0x4a, 0x47, 0x67, 0x02, 0x23, 0x2a, 0x86, 0x6f, 0x05, 0x51, 0xbc, 0x4a, 0xe2, 0xfb, 0x41,
+ 0xb8, 0x2d, 0x82, 0xa2, 0x25, 0xf1, 0x39, 0x13, 0x10, 0xd6, 0xf1, 0x28, 0xdb, 0xcd, 0x5e, 0xa5,
+ 0x97, 0xab, 0xec, 0x41, 0xb0, 0x94, 0xec, 0x93, 0x1b, 0xbc, 0x18, 0x4b, 0xb8, 0x44, 0x5d, 0xae,
+ 0x2d, 0xb0, 0xc7, 0xbd, 0x14, 0xea, 0x72, 0x6d, 0x01, 0x4b, 0x38, 0x22, 0x9d, 0xf9, 0x5f, 0xc7,
+ 0xf3, 0x95, 0xa8, 0x9d, 0xb7, 0x4f, 0x9f, 0x29, 0x60, 0x7d, 0x98, 0x54, 0x99, 0x67, 0x79, 0xb4,
+ 0xb8, 0x68, 0x7a, 0x82, 0x2d, 0x92, 0xfe, 0x43, 0xcd, 0x29, 0xb5, 0xf4, 0x72, 0x8a, 0x12, 0xee,
+ 0xa0, 0x6d, 0xc4, 0x4d, 0x99, 0xec, 0x99, 0x7c, 0xea, 0x2a, 0x94, 0xa3, 0xf6, 0xba, 0x1b, 0xec,
+ 0x38, 0x9e, 0xcf, 0xde, 0xe2, 0x34, 0x9e, 0xae, 0x2e, 0x01, 0x38, 0xc1, 0x41, 0x4b, 0x50, 0x72,
+ 0xa4, 0xce, 0x19, 0xe5, 0x07, 0x49, 0x50, 0x9a, 0x66, 0xee, 0x37, 0x2c, 0xb5, 0xcc, 0xaa, 0x2e,
+ 0x7a, 0x15, 0xc6, 0x84, 0x9b, 0x18, 0x0f, 0x1d, 0xc1, 0xde, 0xca, 0x34, 0x3f, 0x80, 0xba, 0x0e,
+ 0xc4, 0x26, 0x2e, 0xfa, 0x02, 0x8c, 0x53, 0x2a, 0xc9, 0xc1, 0x36, 0x7d, 0xba, 0x9f, 0x13, 0x51,
+ 0x4b, 0x2a, 0xa2, 0x57, 0xc6, 0x29, 0x62, 0xc8, 0x85, 0xf3, 0x4e, 0x3b, 0x0e, 0x98, 0xde, 0xde,
+ 0x5c, 0xff, 0x6b, 0xc1, 0x36, 0xf1, 0xd9, 0x93, 0x59, 0x69, 0xfe, 0xd2, 0xc1, 0x7e, 0xe5, 0xfc,
+ 0x5c, 0x17, 0x3c, 0xdc, 0x95, 0x0a, 0xba, 0x03, 0x23, 0x71, 0xd0, 0x64, 0x16, 0xf9, 0xf4, 0x42,
+ 0x3c, 0x9b, 0x1f, 0x77, 0x68, 0x4d, 0xa1, 0xe9, 0x3a, 0x2b, 0x55, 0x15, 0xeb, 0x74, 0xd0, 0x1a,
+ 0xdf, 0x63, 0x2c, 0x22, 0x2b, 0x89, 0xa6, 0x1f, 0xcb, 0x1f, 0x18, 0x15, 0xb8, 0xd5, 0xdc, 0x82,
+ 0xa2, 0x26, 0xd6, 0xc9, 0xa0, 0xeb, 0x30, 0xd5, 0x0a, 0xbd, 0x80, 0x2d, 0x6c, 0xf5, 0x66, 0x32,
+ 0x6d, 0xe6, 0x91, 0xa8, 0xa5, 0x11, 0x70, 0x67, 0x1d, 0x2a, 0xd3, 0xca, 0xc2, 0xe9, 0x73, 0x3c,
+ 0x29, 0x19, 0xe7, 0xf3, 0x79, 0x19, 0x56, 0x50, 0xb4, 0xc2, 0xce, 0x65, 0x2e, 0x7d, 0x4e, 0xcf,
+ 0xe4, 0x07, 0x97, 0xd0, 0xa5, 0x54, 0xce, 0x9e, 0xa9, 0xbf, 0x38, 0xa1, 0x40, 0xef, 0x8d, 0x68,
+ 0xcb, 0x09, 0x49, 0x2d, 0x0c, 0x1a, 0x24, 0xd2, 0x82, 0x40, 0x3f, 0xce, 0x03, 0x47, 0xd2, 0x7b,
+ 0xa3, 0x9e, 0x85, 0x80, 0xb3, 0xeb, 0x21, 0x57, 0xcb, 0xc5, 0x4d, 0xb9, 0xde, 0x68, 0xfa, 0x7c,
+ 0x17, 0xfb, 0xa6, 0x14, 0x8b, 0x9c, 0xac, 0x45, 0xa3, 0x38, 0xc2, 0x29, 0x9a, 0xe8, 0xdb, 0x60,
+ 0x52, 0xc4, 0x59, 0x4a, 0xc6, 0xfd, 0x42, 0x62, 0x38, 0x89, 0x53, 0x30, 0xdc, 0x81, 0xcd, 0x43,
+ 0x5f, 0x3b, 0xeb, 0x4d, 0x22, 0x16, 0xe1, 0x2d, 0xcf, 0xdf, 0x8e, 0xa6, 0x2f, 0xb2, 0xaf, 0x16,
+ 0xa1, 0xaf, 0xd3, 0x50, 0x9c, 0x51, 0x03, 0xad, 0xc1, 0x64, 0x2b, 0x24, 0x64, 0x87, 0xf1, 0x58,
+ 0xe2, 0xba, 0xac, 0x70, 0x6f, 0x60, 0xda, 0x93, 0x5a, 0x0a, 0x76, 0x98, 0x51, 0x86, 0x3b, 0x28,
+ 0xa0, 0xfb, 0x50, 0x0a, 0x76, 0x49, 0xb8, 0x45, 0x1c, 0x77, 0xfa, 0x52, 0x17, 0x43, 0x5e, 0x71,
+ 0x77, 0xde, 0x16, 0xb8, 0xa9, 0x87, 0x5b, 0x59, 0xdc, 0xfb, 0xe1, 0x56, 0x36, 0x36, 0xf3, 0xad,
+ 0x30, 0xd5, 0x71, 0x11, 0x1f, 0x25, 0xfa, 0xfd, 0xcc, 0x36, 0x8c, 0x19, 0xbd, 0x79, 0xa4, 0x6f,
+ 0x64, 0x7f, 0x3a, 0x08, 0x65, 0xf5, 0x7e, 0x82, 0xae, 0x9a, 0xcf, 0x62, 0xe7, 0xd2, 0xcf, 0x62,
+ 0x25, 0x2a, 0xc6, 0xe9, 0x2f, 0x61, 0x6b, 0x86, 0x4d, 0x65, 0x21, 0x3f, 0xb1, 0x9d, 0x2e, 0x88,
+ 0xf5, 0xf4, 0xcf, 0xd4, 0xd4, 0x61, 0xc5, 0xbe, 0xdf, 0xd7, 0x06, 0xba, 0x6a, 0xd8, 0xfa, 0xcc,
+ 0x2b, 0x8d, 0x9e, 0xa4, 0xb2, 0xac, 0xbb, 0x5c, 0x4b, 0x27, 0x5a, 0xad, 0xd1, 0x42, 0xcc, 0x61,
+ 0x4c, 0xe6, 0xa7, 0x2c, 0x2a, 0x93, 0xf9, 0x87, 0x1f, 0x52, 0xe6, 0x97, 0x04, 0x70, 0x42, 0x0b,
+ 0x35, 0x61, 0xaa, 0x61, 0xe6, 0xc8, 0x55, 0x3e, 0x99, 0x4f, 0xf6, 0xcc, 0x56, 0xdb, 0xd6, 0x12,
+ 0x12, 0x2e, 0xa4, 0xa9, 0xe0, 0x4e, 0xc2, 0xe8, 0x55, 0x28, 0xbd, 0x17, 0x44, 0x6c, 0x43, 0x0b,
+ 0x3e, 0x4d, 0xfa, 0xae, 0x95, 0xde, 0xbc, 0x5d, 0x67, 0xe5, 0x87, 0xfb, 0x95, 0x91, 0x5a, 0xe0,
+ 0xca, 0xbf, 0x58, 0x55, 0x40, 0x0f, 0xe0, 0x8c, 0x71, 0xbb, 0xa9, 0xee, 0x42, 0xff, 0xdd, 0xbd,
+ 0x20, 0x9a, 0x3b, 0xb3, 0x9c, 0x45, 0x09, 0x67, 0x37, 0x40, 0xaf, 0x0c, 0x3f, 0x10, 0xf9, 0xa5,
+ 0x25, 0x2f, 0xc8, 0x58, 0xbe, 0xb2, 0x1e, 0xb9, 0x20, 0x85, 0x80, 0x3b, 0xeb, 0xd8, 0xbf, 0xc2,
+ 0x9f, 0x9b, 0x84, 0x52, 0x9a, 0x44, 0xed, 0xe6, 0x49, 0xa4, 0x2f, 0x5b, 0x34, 0xf4, 0xe5, 0x0f,
+ 0xfd, 0xa4, 0xf9, 0xeb, 0x16, 0x7b, 0xd2, 0x5c, 0x23, 0x3b, 0xad, 0xa6, 0x13, 0x9f, 0x84, 0xcf,
+ 0xd4, 0x9b, 0x50, 0x8a, 0x45, 0x6b, 0xdd, 0x32, 0xae, 0x69, 0x9d, 0x62, 0xcf, 0xba, 0x8a, 0x4b,
+ 0x94, 0xa5, 0x58, 0x91, 0xb1, 0xff, 0x39, 0x9f, 0x01, 0x09, 0x39, 0x01, 0xdd, 0x65, 0xd5, 0xd4,
+ 0x5d, 0x56, 0x7a, 0x7c, 0x41, 0x8e, 0x0e, 0xf3, 0x9f, 0x99, 0xfd, 0x66, 0x02, 0xf9, 0x47, 0xfd,
+ 0x2d, 0xdd, 0xfe, 0x61, 0x0b, 0x4e, 0x67, 0x19, 0x9f, 0x51, 0xce, 0x9e, 0xab, 0x03, 0x94, 0x6d,
+ 0x81, 0x1a, 0xc1, 0xbb, 0xa2, 0x1c, 0x2b, 0x8c, 0xbe, 0x93, 0x99, 0x1c, 0x2d, 0xb8, 0xdf, 0x6d,
+ 0x18, 0xab, 0x85, 0x44, 0xbb, 0x03, 0x5e, 0xe7, 0x4e, 0x90, 0xbc, 0x3f, 0xcf, 0x1e, 0xd9, 0x01,
+ 0xd2, 0xfe, 0x99, 0x02, 0x9c, 0xe6, 0x8f, 0x83, 0x73, 0xbb, 0x81, 0xe7, 0xd6, 0x02, 0x57, 0x24,
+ 0xa2, 0x79, 0x0b, 0x46, 0x5b, 0x9a, 0x0e, 0xa7, 0x5b, 0x78, 0x31, 0x5d, 0xd7, 0x93, 0xc8, 0xd2,
+ 0x7a, 0x29, 0x36, 0x68, 0x21, 0x17, 0x46, 0xc9, 0xae, 0xd7, 0x50, 0x2f, 0x4c, 0x85, 0x23, 0xdf,
+ 0x0d, 0xaa, 0x95, 0x45, 0x8d, 0x0e, 0x36, 0xa8, 0x3e, 0x82, 0xdc, 0x84, 0xf6, 0x8f, 0x58, 0xf0,
+ 0x58, 0x4e, 0x30, 0x32, 0xda, 0xdc, 0x7d, 0xf6, 0x0c, 0x2b, 0xd2, 0x9c, 0xa9, 0xe6, 0xf8, 0xe3,
+ 0x2c, 0x16, 0x50, 0xf4, 0x39, 0x00, 0xfe, 0xb8, 0x4a, 0x45, 0xcb, 0x5e, 0x51, 0x9b, 0x8c, 0x80,
+ 0x33, 0x5a, 0xa0, 0x10, 0x59, 0x1f, 0x6b, 0xb4, 0xec, 0x9f, 0x2c, 0xc2, 0x20, 0x7b, 0xcc, 0x43,
+ 0x4b, 0x30, 0xbc, 0xc5, 0xc3, 0x73, 0xf7, 0x13, 0x09, 0x3c, 0x91, 0xd1, 0x79, 0x01, 0x96, 0x95,
+ 0xd1, 0x0a, 0x9c, 0xe2, 0xe1, 0xcd, 0x9b, 0x55, 0xd2, 0x74, 0xf6, 0xa4, 0xaa, 0x87, 0xa7, 0x06,
+ 0x53, 0x41, 0x4f, 0x96, 0x3b, 0x51, 0x70, 0x56, 0x3d, 0xf4, 0x3a, 0x8c, 0x53, 0xde, 0x38, 0x68,
+ 0xc7, 0x92, 0x12, 0x0f, 0x6c, 0xae, 0x98, 0xf1, 0x35, 0x03, 0x8a, 0x53, 0xd8, 0x54, 0x68, 0x6d,
+ 0x75, 0x28, 0xb5, 0x06, 0x13, 0xa1, 0xd5, 0x54, 0x64, 0x99, 0xb8, 0xcc, 0xea, 0xac, 0xcd, 0x6c,
+ 0xec, 0xd6, 0xb6, 0x42, 0x12, 0x6d, 0x05, 0x4d, 0x57, 0x64, 0x96, 0x4f, 0xac, 0xce, 0x52, 0x70,
+ 0xdc, 0x51, 0x83, 0x52, 0xd9, 0x70, 0xbc, 0x66, 0x3b, 0x24, 0x09, 0x95, 0x21, 0x93, 0xca, 0x52,
+ 0x0a, 0x8e, 0x3b, 0x6a, 0xd0, 0x75, 0x74, 0x46, 0xa4, 0x7a, 0x97, 0xa1, 0x18, 0x94, 0x29, 0xe1,
+ 0xb0, 0x74, 0x4a, 0xeb, 0x12, 0x8b, 0x48, 0x18, 0x5b, 0xa9, 0x64, 0xf1, 0x9a, 0xea, 0x57, 0xb8,
+ 0xa3, 0x49, 0x2a, 0x0f, 0x93, 0x70, 0xfc, 0xfb, 0x0b, 0x70, 0x2a, 0xc3, 0x64, 0x99, 0x1f, 0x55,
+ 0x9b, 0x5e, 0x14, 0xab, 0xf4, 0x47, 0xda, 0x51, 0xc5, 0xcb, 0xb1, 0xc2, 0xa0, 0xfb, 0x81, 0x1f,
+ 0x86, 0xe9, 0x03, 0x50, 0x98, 0x04, 0x0a, 0xe8, 0x11, 0x13, 0x09, 0x5d, 0x82, 0x81, 0x76, 0x44,
+ 0x64, 0x14, 0x31, 0x75, 0x7e, 0xb3, 0xc7, 0x00, 0x06, 0xa1, 0xac, 0xe9, 0xa6, 0xd2, 0xc3, 0x6b,
+ 0xac, 0x29, 0x57, 0xae, 0x73, 0x18, 0xed, 0x5c, 0x4c, 0x7c, 0xc7, 0x8f, 0x05, 0x03, 0x9b, 0xc4,
+ 0xbe, 0x61, 0xa5, 0x58, 0x40, 0xed, 0xaf, 0x14, 0xe1, 0x5c, 0xae, 0x13, 0x03, 0xed, 0xfa, 0x4e,
+ 0xe0, 0x7b, 0x71, 0xa0, 0x1e, 0x94, 0x79, 0xbc, 0x1b, 0xd2, 0xda, 0x5a, 0x11, 0xe5, 0x58, 0x61,
+ 0xa0, 0xcb, 0x30, 0xc8, 0xb4, 0x45, 0x1d, 0x89, 0xa0, 0xe6, 0xab, 0x3c, 0x78, 0x02, 0x07, 0xf7,
+ 0x9d, 0x64, 0xef, 0x49, 0x18, 0x68, 0x05, 0x41, 0x33, 0x7d, 0x68, 0xd1, 0xee, 0x06, 0x41, 0x13,
+ 0x33, 0x20, 0xfa, 0x84, 0x18, 0xaf, 0xd4, 0x0b, 0x2a, 0x76, 0xdc, 0x20, 0xd2, 0x06, 0xed, 0x69,
+ 0x18, 0xde, 0x26, 0x7b, 0xa1, 0xe7, 0x6f, 0xa6, 0x5f, 0xd6, 0x6f, 0xf2, 0x62, 0x2c, 0xe1, 0x66,
+ 0x5a, 0x90, 0xe1, 0xe3, 0xce, 0x8e, 0x57, 0xea, 0x79, 0x05, 0xfe, 0x40, 0x11, 0x26, 0xf0, 0x7c,
+ 0xf5, 0x9b, 0x13, 0x71, 0xa7, 0x73, 0x22, 0x8e, 0x3b, 0x3b, 0x5e, 0xef, 0xd9, 0xf8, 0x45, 0x0b,
+ 0x26, 0x58, 0xe8, 0x6c, 0x11, 0x65, 0xc5, 0x0b, 0xfc, 0x13, 0x60, 0xf1, 0x9e, 0x84, 0xc1, 0x90,
+ 0x36, 0x9a, 0xce, 0x00, 0xc5, 0x7a, 0x82, 0x39, 0x0c, 0x9d, 0x87, 0x01, 0xd6, 0x05, 0x3a, 0x79,
+ 0xa3, 0x3c, 0x79, 0x46, 0xd5, 0x89, 0x1d, 0xcc, 0x4a, 0x59, 0xe8, 0x00, 0x4c, 0x5a, 0x4d, 0x8f,
+ 0x77, 0x3a, 0x79, 0x3e, 0xfa, 0x68, 0x84, 0x0e, 0xc8, 0xec, 0xda, 0x07, 0x0b, 0x1d, 0x90, 0x4d,
+ 0xb2, 0xbb, 0xf8, 0xf4, 0x47, 0x05, 0xb8, 0x98, 0x59, 0xaf, 0xef, 0xd0, 0x01, 0xdd, 0x6b, 0x1f,
+ 0x8f, 0x81, 0x54, 0xb6, 0xdd, 0x52, 0xf1, 0x04, 0xed, 0x96, 0x06, 0xfa, 0xe5, 0x30, 0x07, 0xfb,
+ 0xf0, 0xe8, 0xcf, 0x1c, 0xb2, 0x8f, 0x88, 0x47, 0x7f, 0x66, 0xdf, 0x72, 0xc4, 0xbf, 0x3f, 0x2f,
+ 0xe4, 0x7c, 0x0b, 0x13, 0x04, 0xaf, 0xd0, 0x73, 0x86, 0x01, 0x23, 0xc1, 0x31, 0x8f, 0xf2, 0x33,
+ 0x86, 0x97, 0x61, 0x05, 0x45, 0x9e, 0xe6, 0x1b, 0x5f, 0xc8, 0x4f, 0x88, 0x9a, 0xdb, 0xd4, 0xac,
+ 0xf9, 0xda, 0xa7, 0x86, 0x20, 0xc3, 0x4f, 0x7e, 0x45, 0x13, 0xde, 0x8b, 0xfd, 0x0b, 0xef, 0xa3,
+ 0xd9, 0x82, 0x3b, 0x9a, 0x83, 0x89, 0x1d, 0xcf, 0xa7, 0xc7, 0xe6, 0x9e, 0xc9, 0xb2, 0xaa, 0x50,
+ 0x31, 0x2b, 0x26, 0x18, 0xa7, 0xf1, 0x67, 0x5e, 0x85, 0xb1, 0x87, 0xd6, 0x91, 0xda, 0x5f, 0x2f,
+ 0xc2, 0xe3, 0x5d, 0xb6, 0x3d, 0x3f, 0xeb, 0x8d, 0x39, 0xd0, 0xce, 0xfa, 0x8e, 0x79, 0xa8, 0xc1,
+ 0xe9, 0x8d, 0x76, 0xb3, 0xb9, 0xc7, 0x4c, 0x83, 0x89, 0x2b, 0x31, 0x04, 0x4f, 0x79, 0x5e, 0xa6,
+ 0x2b, 0x59, 0xca, 0xc0, 0xc1, 0x99, 0x35, 0xd1, 0x1b, 0x80, 0x02, 0x91, 0x8d, 0xf9, 0x3a, 0xf1,
+ 0xc5, 0x1b, 0x0a, 0x1b, 0xf8, 0x62, 0xb2, 0x19, 0x6f, 0x77, 0x60, 0xe0, 0x8c, 0x5a, 0x54, 0x38,
+ 0xa0, 0xb7, 0xd2, 0x9e, 0xea, 0x56, 0x4a, 0x38, 0xc0, 0x3a, 0x10, 0x9b, 0xb8, 0xe8, 0x3a, 0x4c,
+ 0x39, 0xbb, 0x8e, 0xc7, 0x43, 0x28, 0x4a, 0x02, 0x5c, 0x3a, 0x50, 0xca, 0xb2, 0xb9, 0x34, 0x02,
+ 0xee, 0xac, 0x93, 0xf2, 0x9e, 0x1f, 0xca, 0xf7, 0x9e, 0xef, 0x7e, 0x2e, 0xf6, 0xd2, 0xfd, 0xda,
+ 0xff, 0xd5, 0xa2, 0xd7, 0x17, 0x67, 0xf2, 0xcd, 0x20, 0x50, 0xaf, 0x32, 0xe3, 0x1f, 0xae, 0x0c,
+ 0xd4, 0x1c, 0xd9, 0xcf, 0x68, 0xc6, 0x3f, 0x09, 0x10, 0x9b, 0xb8, 0x7c, 0x41, 0x44, 0x89, 0xff,
+ 0x94, 0xc1, 0xe2, 0x8b, 0x40, 0x18, 0x0a, 0x03, 0x7d, 0x1e, 0x86, 0x5d, 0x6f, 0xd7, 0x8b, 0x82,
+ 0x50, 0x6c, 0x96, 0x23, 0x6a, 0xd8, 0x93, 0x73, 0xb0, 0xca, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0x81,
+ 0x02, 0x8c, 0xc9, 0x16, 0xdf, 0x6c, 0x07, 0xb1, 0x73, 0x02, 0xd7, 0xf2, 0x75, 0xe3, 0x5a, 0xfe,
+ 0x44, 0xb7, 0x68, 0x20, 0xac, 0x4b, 0xb9, 0xd7, 0xf1, 0xed, 0xd4, 0x75, 0xfc, 0x54, 0x6f, 0x52,
+ 0xdd, 0xaf, 0xe1, 0x7f, 0x61, 0xc1, 0x94, 0x81, 0x7f, 0x02, 0xb7, 0xc1, 0x92, 0x79, 0x1b, 0x3c,
+ 0xd1, 0xf3, 0x1b, 0x72, 0x6e, 0x81, 0xef, 0x2d, 0xa6, 0xfa, 0xce, 0x4e, 0xff, 0xf7, 0x60, 0x60,
+ 0xcb, 0x09, 0xdd, 0x6e, 0x51, 0x87, 0x3b, 0x2a, 0xcd, 0xde, 0x70, 0x42, 0xf1, 0xf2, 0xf4, 0xac,
+ 0x4a, 0x69, 0xea, 0x84, 0xbd, 0x5f, 0x9d, 0x58, 0x53, 0xe8, 0x15, 0x18, 0x8a, 0x1a, 0x41, 0x4b,
+ 0x19, 0xf3, 0x5e, 0xe2, 0xe9, 0x4e, 0x69, 0xc9, 0xe1, 0x7e, 0x05, 0x99, 0xcd, 0xd1, 0x62, 0x2c,
+ 0xf0, 0xd1, 0x5b, 0x30, 0xc6, 0x7e, 0x29, 0x2b, 0x93, 0x62, 0x7e, 0xae, 0x8b, 0xba, 0x8e, 0xc8,
+ 0x8d, 0x95, 0x8c, 0x22, 0x6c, 0x92, 0x9a, 0xd9, 0x84, 0xb2, 0xfa, 0xac, 0x47, 0xfa, 0x84, 0xf5,
+ 0x9f, 0x8a, 0x70, 0x2a, 0x63, 0xcd, 0xa1, 0xc8, 0x98, 0x89, 0xe7, 0xfb, 0x5c, 0xaa, 0x1f, 0x70,
+ 0x2e, 0x22, 0x26, 0x0d, 0xb9, 0x62, 0x6d, 0xf5, 0xdd, 0xe8, 0x9d, 0x88, 0xa4, 0x1b, 0xa5, 0x45,
+ 0xbd, 0x1b, 0xa5, 0x8d, 0x9d, 0xd8, 0x50, 0xd3, 0x86, 0x54, 0x4f, 0x1f, 0xe9, 0x9c, 0xfe, 0x49,
+ 0x11, 0x4e, 0x67, 0x05, 0x28, 0x42, 0xdf, 0x99, 0xca, 0x7b, 0xf4, 0x62, 0xbf, 0xa1, 0x8d, 0x78,
+ 0x32, 0x24, 0x91, 0xb6, 0x7c, 0xd6, 0xcc, 0x84, 0xd4, 0x73, 0x98, 0x45, 0x9b, 0xcc, 0x37, 0x38,
+ 0xe4, 0xf9, 0xaa, 0xe4, 0xf1, 0xf1, 0xe9, 0xbe, 0x3b, 0x20, 0x12, 0x5d, 0x45, 0xa9, 0x27, 0x66,
+ 0x59, 0xdc, 0xfb, 0x89, 0x59, 0xb6, 0x3c, 0xe3, 0xc1, 0x88, 0xf6, 0x35, 0x8f, 0x74, 0xc6, 0xb7,
+ 0xe9, 0x6d, 0xa5, 0xf5, 0xfb, 0x91, 0xce, 0xfa, 0x8f, 0x58, 0x90, 0xb2, 0x9c, 0x55, 0x6a, 0x31,
+ 0x2b, 0x57, 0x2d, 0x76, 0x09, 0x06, 0xc2, 0xa0, 0x49, 0xd2, 0x69, 0x86, 0x70, 0xd0, 0x24, 0x98,
+ 0x41, 0x28, 0x46, 0x9c, 0x28, 0x3b, 0x46, 0x75, 0x41, 0x4e, 0x88, 0x68, 0x4f, 0xc2, 0x60, 0x93,
+ 0xec, 0x92, 0x66, 0x3a, 0x86, 0xff, 0x2d, 0x5a, 0x88, 0x39, 0xcc, 0xfe, 0xc5, 0x01, 0xb8, 0xd0,
+ 0xd5, 0xbb, 0x9e, 0x8a, 0x43, 0x9b, 0x4e, 0x4c, 0xee, 0x3b, 0x7b, 0xe9, 0x60, 0xdb, 0xd7, 0x79,
+ 0x31, 0x96, 0x70, 0xe6, 0x4c, 0xc0, 0x83, 0x6b, 0xa6, 0x94, 0x88, 0x22, 0xa6, 0xa6, 0x80, 0x9a,
+ 0x4a, 0xa9, 0xe2, 0x71, 0x28, 0xa5, 0xae, 0x01, 0x44, 0x51, 0x93, 0xdb, 0x66, 0xb8, 0xc2, 0x4b,
+ 0x21, 0x09, 0xc2, 0x5a, 0xbf, 0x25, 0x20, 0x58, 0xc3, 0x42, 0x55, 0x98, 0x6c, 0x85, 0x41, 0xcc,
+ 0x75, 0xb2, 0x55, 0x6e, 0xd4, 0x35, 0x68, 0x3a, 0x36, 0xd7, 0x52, 0x70, 0xdc, 0x51, 0x03, 0xbd,
+ 0x04, 0x23, 0xc2, 0xd9, 0xb9, 0x16, 0x04, 0x4d, 0xa1, 0x06, 0x52, 0x26, 0x42, 0xf5, 0x04, 0x84,
+ 0x75, 0x3c, 0xad, 0x1a, 0x53, 0xf4, 0x0e, 0x67, 0x56, 0xe3, 0xca, 0x5e, 0x0d, 0x2f, 0x15, 0xac,
+ 0xac, 0xd4, 0x57, 0xb0, 0xb2, 0x44, 0x31, 0x56, 0xee, 0xfb, 0x6d, 0x0b, 0x7a, 0xaa, 0x92, 0x7e,
+ 0x6e, 0x00, 0x4e, 0x89, 0x85, 0xf3, 0xa8, 0x97, 0xcb, 0x9d, 0xce, 0xe5, 0x72, 0x1c, 0xaa, 0xb3,
+ 0x6f, 0xae, 0x99, 0x93, 0x5e, 0x33, 0x3f, 0x68, 0x81, 0xc9, 0x5e, 0xa1, 0xbf, 0x94, 0x9b, 0xad,
+ 0xe0, 0xa5, 0x5c, 0x76, 0xcd, 0x95, 0x17, 0xc8, 0x07, 0xcc, 0x5b, 0x60, 0xff, 0x17, 0x0b, 0x9e,
+ 0xe8, 0x49, 0x11, 0x2d, 0x42, 0x99, 0xf1, 0x80, 0x9a, 0x74, 0xf6, 0x94, 0x32, 0xfa, 0x94, 0x80,
+ 0x1c, 0x96, 0x34, 0xa9, 0x89, 0x16, 0x3b, 0xd2, 0x42, 0x3c, 0x9d, 0x91, 0x16, 0xe2, 0x8c, 0x31,
+ 0x3c, 0x0f, 0x99, 0x17, 0xe2, 0x57, 0x8a, 0x30, 0xc4, 0x57, 0xfc, 0x09, 0x88, 0x61, 0x4b, 0x42,
+ 0x6f, 0xdb, 0x25, 0x5c, 0x19, 0xef, 0xcb, 0x6c, 0xd5, 0x89, 0x1d, 0xce, 0x26, 0xa8, 0xdb, 0x2a,
+ 0xd1, 0xf0, 0xa2, 0x59, 0xe3, 0x3e, 0x9b, 0x49, 0x29, 0x26, 0x81, 0xd3, 0xd0, 0x6e, 0xb7, 0x2f,
+ 0x02, 0x44, 0x71, 0xe8, 0xf9, 0x9b, 0x94, 0x86, 0x08, 0x7c, 0xf7, 0xc9, 0x2e, 0xad, 0xd7, 0x15,
+ 0x32, 0xef, 0x43, 0xb2, 0xd3, 0x15, 0x00, 0x6b, 0x14, 0x67, 0x5e, 0x86, 0xb2, 0x42, 0xee, 0xa5,
+ 0xc5, 0x19, 0xd5, 0x99, 0x8b, 0xcf, 0xc2, 0x44, 0xaa, 0xad, 0x23, 0x29, 0x81, 0x7e, 0xc9, 0x82,
+ 0x09, 0xde, 0xe5, 0x45, 0x7f, 0x57, 0x9c, 0xa9, 0xef, 0xc3, 0xe9, 0x66, 0xc6, 0xd9, 0x26, 0x66,
+ 0xb4, 0xff, 0xb3, 0x50, 0x29, 0x7d, 0xb2, 0xa0, 0x38, 0xb3, 0x0d, 0x74, 0x85, 0xae, 0x5b, 0x7a,
+ 0x76, 0x39, 0x4d, 0xe1, 0x98, 0x36, 0xca, 0xd7, 0x2c, 0x2f, 0xc3, 0x0a, 0x6a, 0xff, 0xb6, 0x05,
+ 0x53, 0xbc, 0xe7, 0x37, 0xc9, 0x9e, 0xda, 0xe1, 0x1f, 0x66, 0xdf, 0x45, 0xa6, 0x96, 0x42, 0x4e,
+ 0xa6, 0x16, 0xfd, 0xd3, 0x8a, 0x5d, 0x3f, 0xed, 0x67, 0x2c, 0x10, 0x2b, 0xf0, 0x04, 0x44, 0xf9,
+ 0x6f, 0x35, 0x45, 0xf9, 0x99, 0xfc, 0x45, 0x9d, 0x23, 0xc3, 0xff, 0x99, 0x05, 0x93, 0x1c, 0x21,
+ 0x79, 0x73, 0xfe, 0x50, 0xe7, 0xa1, 0x9f, 0x94, 0x8b, 0x2a, 0x0f, 0x7b, 0xf6, 0x47, 0x19, 0x93,
+ 0x35, 0xd0, 0x75, 0xb2, 0x5c, 0xb9, 0x81, 0x8e, 0x90, 0x6e, 0xf4, 0xc8, 0x11, 0xcf, 0xed, 0x3f,
+ 0xb4, 0x00, 0xf1, 0x66, 0x0c, 0xf6, 0x87, 0x32, 0x15, 0xac, 0x54, 0xbb, 0x2e, 0x92, 0xa3, 0x46,
+ 0x41, 0xb0, 0x86, 0x75, 0x2c, 0xc3, 0x93, 0x32, 0x1c, 0x28, 0xf6, 0x36, 0x1c, 0x38, 0xc2, 0x88,
+ 0xfe, 0xc1, 0x20, 0xa4, 0x5d, 0x37, 0xd0, 0x5d, 0x18, 0x6d, 0x38, 0x2d, 0x67, 0xdd, 0x6b, 0x7a,
+ 0xb1, 0x47, 0xa2, 0x6e, 0x16, 0x47, 0x0b, 0x1a, 0x9e, 0x78, 0xea, 0xd5, 0x4a, 0xb0, 0x41, 0x07,
+ 0xcd, 0x02, 0xb4, 0x42, 0x6f, 0xd7, 0x6b, 0x92, 0x4d, 0xa6, 0x71, 0x60, 0xae, 0xb0, 0xdc, 0x8c,
+ 0x46, 0x96, 0x62, 0x0d, 0x23, 0xc3, 0xab, 0xb1, 0xf8, 0xe8, 0xbc, 0x1a, 0x07, 0x8e, 0xe8, 0xd5,
+ 0x38, 0xd8, 0x97, 0x57, 0x23, 0x86, 0xb3, 0x92, 0x45, 0xa2, 0xff, 0x97, 0xbc, 0x26, 0x11, 0x7c,
+ 0x31, 0x77, 0x90, 0x9d, 0x39, 0xd8, 0xaf, 0x9c, 0xc5, 0x99, 0x18, 0x38, 0xa7, 0x26, 0xfa, 0x1c,
+ 0x4c, 0x3b, 0xcd, 0x66, 0x70, 0x5f, 0x8d, 0xda, 0x62, 0xd4, 0x70, 0x9a, 0x5c, 0x63, 0x3f, 0xcc,
+ 0xa8, 0x9e, 0x3f, 0xd8, 0xaf, 0x4c, 0xcf, 0xe5, 0xe0, 0xe0, 0xdc, 0xda, 0x29, 0xa7, 0xc8, 0x52,
+ 0x4f, 0xa7, 0xc8, 0xd7, 0xa0, 0xdc, 0x0a, 0x83, 0xc6, 0x8a, 0xe6, 0x39, 0x75, 0x91, 0x0e, 0x60,
+ 0x4d, 0x16, 0x1e, 0xee, 0x57, 0xc6, 0xd4, 0x1f, 0x76, 0xc3, 0x27, 0x15, 0x32, 0x7c, 0x21, 0xe1,
+ 0x51, 0xfa, 0x42, 0x6e, 0xc3, 0xa9, 0x3a, 0x09, 0x3d, 0x96, 0x95, 0xd5, 0x4d, 0xce, 0x8f, 0x35,
+ 0x28, 0x87, 0xa9, 0x13, 0xb3, 0xaf, 0x10, 0x5f, 0x5a, 0xe4, 0x69, 0x79, 0x42, 0x26, 0x84, 0xec,
+ 0x3f, 0xb5, 0x60, 0x58, 0x38, 0x0d, 0x9c, 0x00, 0xa3, 0x36, 0x67, 0xe8, 0xcb, 0x2b, 0xd9, 0xb7,
+ 0x0a, 0xeb, 0x4c, 0xae, 0xa6, 0x7c, 0x39, 0xa5, 0x29, 0x7f, 0xa2, 0x1b, 0x91, 0xee, 0x3a, 0xf2,
+ 0xbf, 0x5b, 0x84, 0x71, 0xd3, 0xcf, 0xe7, 0x04, 0x86, 0x60, 0x15, 0x86, 0x23, 0xe1, 0x54, 0x56,
+ 0xc8, 0x37, 0xe8, 0x4e, 0x4f, 0x62, 0x62, 0xad, 0x25, 0xdc, 0xc8, 0x24, 0x91, 0x4c, 0x6f, 0xb5,
+ 0xe2, 0x23, 0xf4, 0x56, 0xeb, 0xe5, 0x6a, 0x35, 0x70, 0x1c, 0xae, 0x56, 0xf6, 0x57, 0xd9, 0xcd,
+ 0xa6, 0x97, 0x9f, 0x00, 0xd3, 0x73, 0xdd, 0xbc, 0x03, 0xed, 0x2e, 0x2b, 0x4b, 0x74, 0x2a, 0x87,
+ 0xf9, 0xf9, 0x05, 0x0b, 0x2e, 0x64, 0x7c, 0x95, 0xc6, 0x09, 0x3d, 0x0b, 0x25, 0xa7, 0xed, 0x7a,
+ 0x6a, 0x2f, 0x6b, 0xaf, 0x66, 0x73, 0xa2, 0x1c, 0x2b, 0x0c, 0xb4, 0x00, 0x53, 0xe4, 0x41, 0xcb,
+ 0xe3, 0xcf, 0x96, 0xba, 0x49, 0x65, 0x91, 0x87, 0x3d, 0x5e, 0x4c, 0x03, 0x71, 0x27, 0xbe, 0x0a,
+ 0x0c, 0x50, 0xcc, 0x0d, 0x0c, 0xf0, 0x8f, 0x2d, 0x18, 0x51, 0x0e, 0x44, 0x8f, 0x7c, 0xb4, 0xbf,
+ 0xcd, 0x1c, 0xed, 0xc7, 0xbb, 0x8c, 0x76, 0xce, 0x30, 0xff, 0xfd, 0x82, 0xea, 0x6f, 0x2d, 0x08,
+ 0xe3, 0x3e, 0x38, 0xac, 0x57, 0xa0, 0xd4, 0x0a, 0x83, 0x38, 0x68, 0x04, 0x4d, 0xc1, 0x60, 0x9d,
+ 0x4f, 0xe2, 0x56, 0xf0, 0xf2, 0x43, 0xed, 0x37, 0x56, 0xd8, 0x6c, 0xf4, 0x82, 0x30, 0x16, 0x4c,
+ 0x4d, 0x32, 0x7a, 0x41, 0x18, 0x63, 0x06, 0x41, 0x2e, 0x40, 0xec, 0x84, 0x9b, 0x24, 0xa6, 0x65,
+ 0x22, 0x04, 0x4e, 0xfe, 0xe1, 0xd1, 0x8e, 0xbd, 0xe6, 0xac, 0xe7, 0xc7, 0x51, 0x1c, 0xce, 0x2e,
+ 0xfb, 0xf1, 0xed, 0x90, 0xcb, 0x6b, 0x5a, 0x20, 0x0a, 0x45, 0x0b, 0x6b, 0x74, 0xa5, 0xfb, 0x2e,
+ 0x6b, 0x63, 0xd0, 0x7c, 0x7f, 0x5f, 0x15, 0xe5, 0x58, 0x61, 0xd8, 0x2f, 0xb3, 0xab, 0x84, 0x0d,
+ 0xd0, 0xd1, 0x62, 0x44, 0x7c, 0xad, 0xa4, 0x86, 0x96, 0x3d, 0xbe, 0x55, 0xf5, 0x48, 0x14, 0xdd,
+ 0x4f, 0x6e, 0xda, 0xb0, 0xee, 0xde, 0x93, 0x84, 0xab, 0x40, 0xdf, 0xde, 0x61, 0x96, 0xf1, 0x5c,
+ 0x8f, 0x2b, 0xe0, 0x08, 0x86, 0x18, 0x2c, 0x14, 0x3b, 0x0b, 0x54, 0xbd, 0x5c, 0x13, 0x8b, 0x5c,
+ 0x0b, 0xc5, 0x2e, 0x00, 0x38, 0xc1, 0x41, 0x57, 0x85, 0xb4, 0x3f, 0x60, 0x24, 0x64, 0x94, 0xd2,
+ 0xbe, 0xfc, 0x7c, 0x4d, 0xdc, 0x7f, 0x1e, 0x46, 0x54, 0x62, 0xc6, 0x1a, 0xcf, 0x6f, 0x27, 0x02,
+ 0x02, 0x2d, 0x26, 0xc5, 0x58, 0xc7, 0x41, 0x6b, 0x30, 0x11, 0x71, 0x55, 0x8f, 0x8a, 0xfb, 0xc8,
+ 0x55, 0x66, 0x9f, 0x94, 0xe6, 0x1c, 0x75, 0x13, 0x7c, 0xc8, 0x8a, 0xf8, 0xd1, 0x21, 0x7d, 0x70,
+ 0xd3, 0x24, 0xd0, 0xeb, 0x30, 0xde, 0x0c, 0x1c, 0x77, 0xde, 0x69, 0x3a, 0x7e, 0x83, 0x7d, 0x6f,
+ 0xc9, 0xcc, 0x67, 0x75, 0xcb, 0x80, 0xe2, 0x14, 0x36, 0x65, 0xcc, 0xf4, 0x12, 0x11, 0xab, 0xd4,
+ 0xf1, 0x37, 0x49, 0x24, 0xd2, 0xca, 0x31, 0xc6, 0xec, 0x56, 0x0e, 0x0e, 0xce, 0xad, 0x8d, 0x5e,
+ 0x81, 0x51, 0xf9, 0xf9, 0x9a, 0x87, 0x79, 0x62, 0x7b, 0xaf, 0xc1, 0xb0, 0x81, 0x89, 0xee, 0xc3,
+ 0x19, 0xf9, 0x7f, 0x2d, 0x74, 0x36, 0x36, 0xbc, 0x86, 0xf0, 0x58, 0xe4, 0x0e, 0x48, 0x73, 0xd2,
+ 0xa3, 0x69, 0x31, 0x0b, 0xe9, 0x70, 0xbf, 0x72, 0x49, 0x8c, 0x5a, 0x26, 0x9c, 0x4d, 0x62, 0x36,
+ 0x7d, 0xb4, 0x02, 0xa7, 0xb6, 0x88, 0xd3, 0x8c, 0xb7, 0x16, 0xb6, 0x48, 0x63, 0x5b, 0x6e, 0x22,
+ 0xe6, 0xb7, 0xae, 0x59, 0xac, 0xdf, 0xe8, 0x44, 0xc1, 0x59, 0xf5, 0xd0, 0xdb, 0x30, 0xdd, 0x6a,
+ 0xaf, 0x37, 0xbd, 0x68, 0x6b, 0x35, 0x88, 0x99, 0x05, 0x89, 0xca, 0x6b, 0x28, 0x1c, 0xdc, 0x95,
+ 0xcf, 0x7e, 0x2d, 0x07, 0x0f, 0xe7, 0x52, 0x40, 0xef, 0xc3, 0x99, 0xd4, 0x62, 0x10, 0xee, 0xb6,
+ 0xe3, 0xf9, 0x91, 0x9f, 0xeb, 0x59, 0x15, 0x84, 0xfb, 0x6c, 0x16, 0x08, 0x67, 0x37, 0xf1, 0xc1,
+ 0xec, 0x8a, 0xde, 0xa3, 0x95, 0x35, 0xa6, 0x0c, 0xbd, 0x03, 0xa3, 0xfa, 0x2a, 0x12, 0x17, 0xcc,
+ 0xe5, 0x6c, 0x9e, 0x45, 0x5b, 0x6d, 0x9c, 0xa5, 0x53, 0x2b, 0x4a, 0x87, 0x61, 0x83, 0xa2, 0x4d,
+ 0x20, 0xfb, 0xfb, 0xd0, 0x2d, 0x28, 0x35, 0x9a, 0x1e, 0xf1, 0xe3, 0xe5, 0x5a, 0xb7, 0xf0, 0x33,
+ 0x0b, 0x02, 0x47, 0x0c, 0x98, 0x08, 0x95, 0xcb, 0xcb, 0xb0, 0xa2, 0x60, 0xff, 0x5a, 0x01, 0x2a,
+ 0x3d, 0xe2, 0x2e, 0xa7, 0xd4, 0xdf, 0x56, 0x5f, 0xea, 0xef, 0x39, 0x99, 0xa5, 0x71, 0x35, 0xa5,
+ 0x13, 0x48, 0x65, 0x60, 0x4c, 0x34, 0x03, 0x69, 0xfc, 0xbe, 0xcd, 0x91, 0x75, 0x0d, 0xfa, 0x40,
+ 0x4f, 0x83, 0x7a, 0xe3, 0xe5, 0x6c, 0xb0, 0x7f, 0x41, 0x24, 0xf7, 0x15, 0xc4, 0xfe, 0x6a, 0x01,
+ 0xce, 0xa8, 0x21, 0xfc, 0xc6, 0x1d, 0xb8, 0x3b, 0x9d, 0x03, 0x77, 0x0c, 0x6f, 0x48, 0xf6, 0x6d,
+ 0x18, 0xe2, 0xe1, 0x7b, 0xfa, 0x60, 0x80, 0x9e, 0x34, 0x63, 0xbd, 0xa9, 0x6b, 0xda, 0x88, 0xf7,
+ 0xf6, 0xd7, 0x2c, 0x98, 0x58, 0x5b, 0xa8, 0xd5, 0x83, 0xc6, 0x36, 0x89, 0xe7, 0x38, 0xc3, 0x8a,
+ 0x05, 0xff, 0x63, 0x3d, 0x24, 0x5f, 0x93, 0xc5, 0x31, 0x5d, 0x82, 0x81, 0xad, 0x20, 0x8a, 0xd3,
+ 0x0f, 0xcc, 0x37, 0x82, 0x28, 0xc6, 0x0c, 0x62, 0xff, 0x8e, 0x05, 0x83, 0x2c, 0xb7, 0x70, 0xaf,
+ 0x84, 0xd7, 0xfd, 0x7c, 0x17, 0x7a, 0x09, 0x86, 0xc8, 0xc6, 0x06, 0x69, 0xc4, 0x62, 0x56, 0xa5,
+ 0x97, 0xec, 0xd0, 0x22, 0x2b, 0xa5, 0x97, 0x3e, 0x6b, 0x8c, 0xff, 0xc5, 0x02, 0x19, 0xdd, 0x83,
+ 0x72, 0xec, 0xed, 0x90, 0x39, 0xd7, 0x15, 0x4f, 0x74, 0x0f, 0xe1, 0x94, 0xbc, 0x26, 0x09, 0xe0,
+ 0x84, 0x96, 0xfd, 0x95, 0x02, 0x40, 0x12, 0x15, 0xa2, 0xd7, 0x27, 0xce, 0x77, 0x3c, 0xde, 0x5c,
+ 0xce, 0x78, 0xbc, 0x41, 0x09, 0xc1, 0x8c, 0x97, 0x1b, 0x35, 0x4c, 0xc5, 0xbe, 0x86, 0x69, 0xe0,
+ 0x28, 0xc3, 0xb4, 0x00, 0x53, 0x49, 0x54, 0x0b, 0x33, 0xc4, 0x0f, 0x13, 0x52, 0xd6, 0xd2, 0x40,
+ 0xdc, 0x89, 0x6f, 0x13, 0xb8, 0x24, 0x63, 0xbb, 0xca, 0xbb, 0x86, 0x59, 0x80, 0x1e, 0x21, 0xf7,
+ 0x79, 0xf2, 0x3a, 0x55, 0xc8, 0x7d, 0x9d, 0xfa, 0x71, 0x0b, 0x4e, 0xa7, 0xdb, 0x61, 0x2e, 0x79,
+ 0x5f, 0xb6, 0xe0, 0x0c, 0x7b, 0xa3, 0x63, 0xad, 0x76, 0xbe, 0x08, 0xbe, 0x98, 0x1d, 0xed, 0xa3,
+ 0x7b, 0x8f, 0x13, 0x77, 0xec, 0x95, 0x2c, 0xd2, 0x38, 0xbb, 0x45, 0xfb, 0xcb, 0x16, 0x9c, 0xcb,
+ 0x4d, 0x69, 0x85, 0xae, 0x40, 0xc9, 0x69, 0x79, 0x5c, 0x01, 0x26, 0xf6, 0x3b, 0x93, 0x1e, 0x6b,
+ 0xcb, 0x5c, 0xfd, 0xa5, 0xa0, 0x2a, 0xd5, 0x66, 0x21, 0x37, 0xd5, 0x66, 0xcf, 0xcc, 0x99, 0xf6,
+ 0xf7, 0x59, 0x20, 0xbc, 0xb0, 0xfa, 0x38, 0x64, 0xde, 0x92, 0x99, 0x8a, 0x8d, 0xb0, 0xfa, 0x97,
+ 0xf2, 0xdd, 0xd2, 0x44, 0x30, 0x7d, 0x75, 0xa9, 0x1b, 0x21, 0xf4, 0x0d, 0x5a, 0xb6, 0x0b, 0x02,
+ 0x5a, 0x25, 0x4c, 0x67, 0xd5, 0xbb, 0x37, 0xd7, 0x00, 0x5c, 0x86, 0xab, 0xe5, 0x2b, 0x55, 0x57,
+ 0x48, 0x55, 0x41, 0xb0, 0x86, 0x65, 0xff, 0x87, 0x02, 0x8c, 0xc8, 0x30, 0xee, 0x6d, 0xbf, 0x1f,
+ 0xc9, 0xf2, 0x48, 0x79, 0x9d, 0x58, 0x82, 0x5f, 0x4a, 0xb8, 0x96, 0x08, 0xe4, 0x49, 0x82, 0x5f,
+ 0x09, 0xc0, 0x09, 0x0e, 0x7a, 0x1a, 0x86, 0xa3, 0xf6, 0x3a, 0x43, 0x4f, 0xf9, 0x0c, 0xd5, 0x79,
+ 0x31, 0x96, 0x70, 0xf4, 0x39, 0x98, 0xe4, 0xf5, 0xc2, 0xa0, 0xe5, 0x6c, 0x72, 0x6d, 0xeb, 0xa0,
+ 0x72, 0xf6, 0x9d, 0x5c, 0x49, 0xc1, 0x0e, 0xf7, 0x2b, 0xa7, 0xd3, 0x65, 0x4c, 0x4f, 0xdf, 0x41,
+ 0x85, 0xbd, 0xfd, 0xf3, 0x46, 0xe8, 0x32, 0xed, 0x30, 0x19, 0x48, 0x40, 0x58, 0xc7, 0xb3, 0xdf,
+ 0x01, 0xd4, 0x19, 0xd0, 0x1e, 0xbd, 0xc1, 0x0d, 0xbe, 0xbc, 0x90, 0xb8, 0xdd, 0xf4, 0xf6, 0xba,
+ 0x4b, 0xab, 0x34, 0xf7, 0xe7, 0xb5, 0xb0, 0xaa, 0x6f, 0xff, 0x8d, 0x22, 0x4c, 0xa6, 0x1d, 0x1c,
+ 0xd1, 0x0d, 0x18, 0xe2, 0x77, 0xa4, 0x20, 0xdf, 0xe5, 0x59, 0x58, 0x73, 0x8b, 0x64, 0xa7, 0x85,
+ 0xb8, 0x66, 0x45, 0x7d, 0xf4, 0x36, 0x8c, 0xb8, 0xc1, 0x7d, 0xff, 0xbe, 0x13, 0xba, 0x73, 0xb5,
+ 0x65, 0xb1, 0x9c, 0x33, 0x59, 0xed, 0x6a, 0x82, 0xa6, 0xbb, 0x5a, 0xb2, 0x27, 0x90, 0x04, 0x84,
+ 0x75, 0x72, 0x68, 0x8d, 0x05, 0xe9, 0xdc, 0xf0, 0x36, 0x57, 0x9c, 0x56, 0x37, 0xeb, 0xdf, 0x05,
+ 0x89, 0xa4, 0x51, 0x1e, 0x13, 0x91, 0x3c, 0x39, 0x00, 0x27, 0x84, 0xd0, 0x77, 0xc2, 0xa9, 0x28,
+ 0x47, 0x3b, 0x97, 0x97, 0xdf, 0xa4, 0x9b, 0xc2, 0x6a, 0xfe, 0x31, 0x2a, 0x04, 0x65, 0xe9, 0xf1,
+ 0xb2, 0x9a, 0xb1, 0x7f, 0xfd, 0x14, 0x18, 0x9b, 0xd8, 0x48, 0x77, 0x65, 0x1d, 0x53, 0xba, 0x2b,
+ 0x0c, 0x25, 0xb2, 0xd3, 0x8a, 0xf7, 0xaa, 0x5e, 0xd8, 0x2d, 0x1d, 0xe3, 0xa2, 0xc0, 0xe9, 0xa4,
+ 0x29, 0x21, 0x58, 0xd1, 0xc9, 0xce, 0x49, 0x56, 0xfc, 0x10, 0x73, 0x92, 0x0d, 0x9c, 0x60, 0x4e,
+ 0xb2, 0x55, 0x18, 0xde, 0xf4, 0x62, 0x4c, 0x5a, 0x81, 0xe0, 0x4e, 0x33, 0xd7, 0xe1, 0x75, 0x8e,
+ 0xd2, 0x99, 0xfd, 0x46, 0x00, 0xb0, 0x24, 0x82, 0xde, 0x50, 0x3b, 0x70, 0x28, 0x5f, 0xb8, 0xeb,
+ 0x7c, 0xbf, 0xcc, 0xdc, 0x83, 0x22, 0xf3, 0xd8, 0xf0, 0xc3, 0x66, 0x1e, 0x5b, 0x92, 0xf9, 0xc2,
+ 0x4a, 0xf9, 0xa6, 0xfa, 0x2c, 0x1d, 0x58, 0x8f, 0x2c, 0x61, 0x77, 0xf5, 0x1c, 0x6b, 0xe5, 0xfc,
+ 0x93, 0x40, 0xa5, 0x4f, 0xeb, 0x33, 0xb3, 0xda, 0xf7, 0x59, 0x70, 0xa6, 0x95, 0x95, 0x6e, 0x50,
+ 0xbc, 0x35, 0xbd, 0xd4, 0x77, 0x3e, 0x45, 0xa3, 0x41, 0x26, 0xe5, 0x67, 0xa2, 0xe1, 0xec, 0xe6,
+ 0xe8, 0x40, 0x87, 0xeb, 0xae, 0x48, 0x0d, 0xf6, 0x64, 0x4e, 0x8a, 0xb6, 0x2e, 0x89, 0xd9, 0xd6,
+ 0x32, 0xd2, 0x81, 0x7d, 0x3c, 0x2f, 0x1d, 0x58, 0xdf, 0x49, 0xc0, 0xde, 0x50, 0xc9, 0xd9, 0xc6,
+ 0xf2, 0x97, 0x12, 0x4f, 0xbd, 0xd6, 0x33, 0x25, 0xdb, 0x1b, 0x2a, 0x25, 0x5b, 0x97, 0xe8, 0x81,
+ 0x3c, 0xe1, 0x5a, 0xcf, 0x44, 0x6c, 0x5a, 0x32, 0xb5, 0x89, 0xe3, 0x49, 0xa6, 0x66, 0x5c, 0x35,
+ 0x3c, 0x9f, 0xd7, 0x33, 0x3d, 0xae, 0x1a, 0x83, 0x6e, 0xf7, 0xcb, 0x86, 0x27, 0x8e, 0x9b, 0x7a,
+ 0xa8, 0xc4, 0x71, 0x77, 0xf5, 0x44, 0x6c, 0xa8, 0x47, 0xa6, 0x31, 0x8a, 0xd4, 0x67, 0xfa, 0xb5,
+ 0xbb, 0xfa, 0x05, 0x78, 0x2a, 0x9f, 0xae, 0xba, 0xe7, 0x3a, 0xe9, 0x66, 0x5e, 0x81, 0x1d, 0x69,
+ 0xdd, 0x4e, 0x9f, 0x4c, 0x5a, 0xb7, 0x33, 0xc7, 0x9e, 0xd6, 0xed, 0xec, 0x09, 0xa4, 0x75, 0x7b,
+ 0xec, 0x43, 0x4d, 0xeb, 0x36, 0xfd, 0x08, 0xd2, 0xba, 0xad, 0x26, 0x69, 0xdd, 0xce, 0xe5, 0x4f,
+ 0x49, 0x86, 0xfd, 0x70, 0x4e, 0x32, 0xb7, 0xbb, 0xcc, 0x88, 0x80, 0x47, 0xe0, 0x10, 0xe1, 0x0d,
+ 0xb3, 0x23, 0xdf, 0x65, 0x85, 0xe9, 0xe0, 0x53, 0xa2, 0x40, 0x38, 0x21, 0x45, 0xe9, 0x26, 0xc9,
+ 0xdd, 0x1e, 0xef, 0xa2, 0xc7, 0xcd, 0xd2, 0x90, 0x75, 0x49, 0xe9, 0xf6, 0x3a, 0x4f, 0xe9, 0x76,
+ 0x3e, 0xff, 0x24, 0x4f, 0x5f, 0x77, 0x66, 0x22, 0xb7, 0xef, 0x2f, 0xc0, 0xc5, 0xee, 0xfb, 0x22,
+ 0x51, 0xcf, 0xd5, 0x92, 0xe7, 0xa4, 0x94, 0x7a, 0x8e, 0xcb, 0x56, 0x09, 0x56, 0xdf, 0x61, 0x8e,
+ 0xae, 0xc3, 0x94, 0x32, 0x3c, 0x6e, 0x7a, 0x8d, 0x3d, 0x2d, 0x35, 0xb6, 0x72, 0xb0, 0xac, 0xa7,
+ 0x11, 0x70, 0x67, 0x1d, 0x34, 0x07, 0x13, 0x46, 0xe1, 0x72, 0x55, 0xc8, 0x50, 0x4a, 0x1f, 0x58,
+ 0x37, 0xc1, 0x38, 0x8d, 0x6f, 0xff, 0xb4, 0x05, 0x8f, 0xe5, 0x64, 0x4c, 0xe9, 0x3b, 0x8a, 0xcf,
+ 0x06, 0x4c, 0xb4, 0xcc, 0xaa, 0x3d, 0x82, 0x7d, 0x19, 0x79, 0x59, 0x54, 0x5f, 0x53, 0x00, 0x9c,
+ 0x26, 0x6a, 0x7f, 0xd5, 0x82, 0x0b, 0x5d, 0x8d, 0x50, 0x10, 0x86, 0xb3, 0x9b, 0x3b, 0x91, 0xb3,
+ 0x10, 0x12, 0x97, 0xf8, 0xb1, 0xe7, 0x34, 0xeb, 0x2d, 0xd2, 0xd0, 0x14, 0xac, 0xcc, 0xd6, 0xe7,
+ 0xfa, 0x4a, 0x7d, 0xae, 0x13, 0x03, 0xe7, 0xd4, 0x44, 0x4b, 0x80, 0x3a, 0x21, 0x62, 0x86, 0x59,
+ 0xcc, 0xca, 0x4e, 0x7a, 0x38, 0xa3, 0xc6, 0xfc, 0x95, 0xdf, 0xfc, 0xbd, 0x8b, 0x1f, 0xfb, 0xad,
+ 0xdf, 0xbb, 0xf8, 0xb1, 0xdf, 0xfe, 0xbd, 0x8b, 0x1f, 0xfb, 0xee, 0x83, 0x8b, 0xd6, 0x6f, 0x1e,
+ 0x5c, 0xb4, 0x7e, 0xeb, 0xe0, 0xa2, 0xf5, 0xdb, 0x07, 0x17, 0xad, 0xdf, 0x3d, 0xb8, 0x68, 0x7d,
+ 0xe5, 0xf7, 0x2f, 0x7e, 0xec, 0xad, 0xc2, 0xee, 0xf3, 0xff, 0x3f, 0x00, 0x00, 0xff, 0xff, 0x3d,
+ 0xfc, 0x67, 0x65, 0x12, 0xee, 0x00, 0x00,
}
diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto
index bfc1fe79b..742627b09 100644
--- a/vendor/k8s.io/api/core/v1/generated.proto
+++ b/vendor/k8s.io/api/core/v1/generated.proto
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -21,7 +21,6 @@ syntax = 'proto2';
package k8s.io.api.core.v1;
-import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto";
import "k8s.io/apimachinery/pkg/api/resource/generated.proto";
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
@@ -32,7 +31,7 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
option go_package = "v1";
// Represents a Persistent Disk resource in AWS.
-//
+//
// An AWS EBS disk must exist before mounting to a container. The disk
// must also be in the same AWS zone as the kubelet. An AWS EBS disk
// can only be mounted as read/write once. AWS EBS volumes support
@@ -199,7 +198,7 @@ message CSIPersistentVolumeSource {
// ControllerPublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// ControllerPublishVolume and ControllerUnpublishVolume calls.
- // This field is optional, and may be empty if no secret is required. If the
+ // This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
optional SecretReference controllerPublishSecretRef = 6;
@@ -207,7 +206,7 @@ message CSIPersistentVolumeSource {
// NodeStageSecretRef is a reference to the secret object containing sensitive
// information to pass to the CSI driver to complete the CSI NodeStageVolume
// and NodeStageVolume and NodeUnstageVolume calls.
- // This field is optional, and may be empty if no secret is required. If the
+ // This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
optional SecretReference nodeStageSecretRef = 7;
@@ -215,10 +214,50 @@ message CSIPersistentVolumeSource {
// NodePublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// NodePublishVolume and NodeUnpublishVolume calls.
- // This field is optional, and may be empty if no secret is required. If the
+ // This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
optional SecretReference nodePublishSecretRef = 8;
+
+ // ControllerExpandSecretRef is a reference to the secret object containing
+ // sensitive information to pass to the CSI driver to complete the CSI
+ // ControllerExpandVolume call.
+ // This is an alpha field and requires enabling ExpandCSIVolumes feature gate.
+ // This field is optional, and may be empty if no secret is required. If the
+ // secret object contains more than one secret, all secrets are passed.
+ // +optional
+ optional SecretReference controllerExpandSecretRef = 9;
+}
+
+// Represents a source location of a volume to mount, managed by an external CSI driver
+message CSIVolumeSource {
+ // Driver is the name of the CSI driver that handles this volume.
+ // Consult with your admin for the correct name as registered in the cluster.
+ optional string driver = 1;
+
+ // Specifies a read-only configuration for the volume.
+ // Defaults to false (read/write).
+ // +optional
+ optional bool readOnly = 2;
+
+ // Filesystem type to mount. Ex. "ext4", "xfs", "ntfs".
+ // If not provided, the empty value is passed to the associated CSI driver
+ // which will determine the default filesystem to apply.
+ // +optional
+ optional string fsType = 3;
+
+ // VolumeAttributes stores driver-specific properties that are passed to the CSI
+ // driver. Consult your driver's documentation for supported values.
+ // +optional
+ map<string, string> volumeAttributes = 4;
+
+ // NodePublishSecretRef is a reference to the secret object containing
+ // sensitive information to pass to the CSI driver to complete the CSI
+ // NodePublishVolume and NodeUnpublishVolume calls.
+ // This field is optional, and may be empty if no secret is required. If the
+ // secret object contains more than one secret, all secret references are passed.
+ // +optional
+ optional LocalObjectReference nodePublishSecretRef = 5;
}
// Adds and removes POSIX capabilities from running containers.
@@ -302,6 +341,34 @@ message CephFSVolumeSource {
// A Cinder volume must exist before mounting to a container.
// The volume must also be in the same region as the kubelet.
// Cinder volumes support ownership management and SELinux relabeling.
+message CinderPersistentVolumeSource {
+ // volume id used to identify the volume in cinder
+ // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ optional string volumeID = 1;
+
+ // Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ // +optional
+ optional string fsType = 2;
+
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ // +optional
+ optional bool readOnly = 3;
+
+ // Optional: points to a secret object containing parameters used to connect
+ // to OpenStack.
+ // +optional
+ optional SecretReference secretRef = 4;
+}
+
+// Represents a cinder volume resource in Openstack.
+// A Cinder volume must exist before mounting to a container.
+// The volume must also be in the same region as the kubelet.
+// Cinder volumes support ownership management and SELinux relabeling.
message CinderVolumeSource {
// volume id used to identify the volume in cinder
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
@@ -319,6 +386,11 @@ message CinderVolumeSource {
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
optional bool readOnly = 3;
+
+ // Optional: points to a secret object containing parameters used to connect
+ // to OpenStack.
+ // +optional
+ optional LocalObjectReference secretRef = 4;
}
// ClientIPConfig represents the configurations of Client IP based session affinity.
@@ -404,7 +476,7 @@ message ConfigMap {
// ConfigMapEnvSource selects a ConfigMap to populate the environment
// variables with.
-//
+//
// The contents of the target ConfigMap's Data field will represent the
// key-value pairs as environment variables.
message ConfigMapEnvSource {
@@ -424,7 +496,7 @@ message ConfigMapKeySelector {
// The key to select.
optional string key = 2;
- // Specify whether the ConfigMap or it's key must be defined
+ // Specify whether the ConfigMap or its key must be defined
// +optional
optional bool optional = 3;
}
@@ -439,8 +511,33 @@ message ConfigMapList {
repeated ConfigMap items = 2;
}
+// ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.
+message ConfigMapNodeConfigSource {
+ // Namespace is the metadata.namespace of the referenced ConfigMap.
+ // This field is required in all cases.
+ optional string namespace = 1;
+
+ // Name is the metadata.name of the referenced ConfigMap.
+ // This field is required in all cases.
+ optional string name = 2;
+
+ // UID is the metadata.UID of the referenced ConfigMap.
+ // This field is forbidden in Node.Spec, and required in Node.Status.
+ // +optional
+ optional string uid = 3;
+
+ // ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap.
+ // This field is forbidden in Node.Spec, and required in Node.Status.
+ // +optional
+ optional string resourceVersion = 4;
+
+ // KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure
+ // This field is required in all cases.
+ optional string kubeletConfigKey = 5;
+}
+
// Adapts a ConfigMap into a projected volume.
-//
+//
// The contents of the target ConfigMap's Data field will be presented in a
// projected volume as files using the keys in the Data field as the file names,
// unless the items element is populated with specific mappings of keys to paths.
@@ -459,13 +556,13 @@ message ConfigMapProjection {
// +optional
repeated KeyToPath items = 2;
- // Specify whether the ConfigMap or it's keys must be defined
+ // Specify whether the ConfigMap or its keys must be defined
// +optional
optional bool optional = 4;
}
// Adapts a ConfigMap into a volume.
-//
+//
// The contents of the target ConfigMap's Data field will be presented in a
// volume as files using the keys in the Data field as the file names, unless
// the items element is populated with specific mappings of keys to paths.
@@ -491,7 +588,7 @@ message ConfigMapVolumeSource {
// +optional
optional int32 defaultMode = 3;
- // Specify whether the ConfigMap or it's keys must be defined
+ // Specify whether the ConfigMap or its keys must be defined
// +optional
optional bool optional = 4;
}
@@ -549,6 +646,9 @@ message Container {
// +optional
// +patchMergeKey=containerPort
// +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=containerPort
+ // +listMapKey=protocol
repeated ContainerPort ports = 6;
// List of sources to populate environment variables in the container.
@@ -569,7 +669,7 @@ message Container {
// Compute Resources required by this container.
// Cannot be updated.
- // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
// +optional
optional ResourceRequirements resources = 8;
@@ -581,7 +681,7 @@ message Container {
repeated VolumeMount volumeMounts = 9;
// volumeDevices is the list of block devices to be used by the container.
- // This is an alpha feature and may change in the future.
+ // This is a beta feature.
// +patchMergeKey=devicePath
// +patchStrategy=merge
// +optional
@@ -692,7 +792,7 @@ message ContainerPort {
// This must be a valid port number, 0 < x < 65536.
optional int32 containerPort = 3;
- // Protocol for port. Must be UDP or TCP.
+ // Protocol for port. Must be UDP, TCP, or SCTP.
// Defaults to "TCP".
// +optional
optional string protocol = 4;
@@ -809,41 +909,6 @@ message DaemonEndpoint {
optional int32 Port = 1;
}
-// DeleteOptions may be provided when deleting an API object
-// DEPRECATED: This type has been moved to meta/v1 and will be removed soon.
-// +k8s:openapi-gen=false
-message DeleteOptions {
- // The duration in seconds before the object should be deleted. Value must be non-negative integer.
- // The value zero indicates delete immediately. If this value is nil, the default grace period for the
- // specified type will be used.
- // Defaults to a per object value if not specified. zero means delete immediately.
- // +optional
- optional int64 gracePeriodSeconds = 1;
-
- // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be
- // returned.
- // +optional
- optional Preconditions preconditions = 2;
-
- // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.
- // Should the dependent objects be orphaned. If true/false, the "orphan"
- // finalizer will be added to/removed from the object's finalizers list.
- // Either this field or PropagationPolicy may be set, but not both.
- // +optional
- optional bool orphanDependents = 3;
-
- // Whether and how garbage collection will be performed.
- // Either this field or OrphanDependents may be set, but not both.
- // The default policy is decided by the existing finalizer set in the
- // metadata.finalizers and the resource-specific default policy.
- // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' -
- // allow the garbage collector to delete the dependents in the background;
- // 'Foreground' - a cascading policy that deletes all dependents in the
- // foreground.
- // +optional
- optional string propagationPolicy = 4;
-}
-
// Represents downward API info for projecting into a projected volume.
// Note that this is identical to a downwardAPI volume source without the default
// mode.
@@ -946,7 +1011,7 @@ message EndpointPort {
optional int32 port = 2;
// The IP protocol for this port.
- // Must be UDP or TCP.
+ // Must be UDP, TCP, or SCTP.
// Default is TCP.
// +optional
optional string protocol = 3;
@@ -1162,6 +1227,7 @@ message EventSeries {
optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2;
// State of this Series: Ongoing or Finished
+ // Deprecated. Planned removal for 1.18
optional string state = 3;
}
@@ -1292,7 +1358,7 @@ message FlockerVolumeSource {
}
// Represents a Persistent Disk resource in Google Compute Engine.
-//
+//
// A GCE PD must exist before mounting to a container. The disk must
// also be in the same GCE project and zone as the kubelet. A GCE PD
// can only be mounted as read/write once or read-only many times. GCE
@@ -1328,6 +1394,10 @@ message GCEPersistentDiskVolumeSource {
// Represents a volume that is populated with the contents of a git repository.
// Git repo volumes do not support ownership management.
// Git repo volumes support SELinux relabeling.
+//
+// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+// into the Pod's container.
message GitRepoVolumeSource {
// Repository URL
optional string repository = 1;
@@ -1346,6 +1416,30 @@ message GitRepoVolumeSource {
// Represents a Glusterfs mount that lasts the lifetime of a pod.
// Glusterfs volumes do not support ownership management or SELinux relabeling.
+message GlusterfsPersistentVolumeSource {
+ // EndpointsName is the endpoint name that details Glusterfs topology.
+ // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+ optional string endpoints = 1;
+
+ // Path is the Glusterfs volume path.
+ // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+ optional string path = 2;
+
+ // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+ // Defaults to false.
+ // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+ // +optional
+ optional bool readOnly = 3;
+
+ // EndpointsNamespace is the namespace that contains Glusterfs endpoint.
+ // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC.
+ // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+ // +optional
+ optional string endpointsNamespace = 4;
+}
+
+// Represents a Glusterfs mount that lasts the lifetime of a pod.
+// Glusterfs volumes do not support ownership management or SELinux relabeling.
message GlusterfsVolumeSource {
// EndpointsName is the endpoint name that details Glusterfs topology.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
@@ -1583,11 +1677,15 @@ message Lifecycle {
// +optional
optional Handler postStart = 1;
- // PreStop is called immediately before a container is terminated.
- // The container is terminated after the handler completes.
- // The reason for termination is passed to the handler.
- // Regardless of the outcome of the handler, the container is eventually terminated.
- // Other management of the container blocks until the hook completes.
+ // PreStop is called immediately before a container is terminated due to an
+ // API request or management event such as liveness probe failure,
+ // preemption, resource contention, etc. The handler is not called if the
+ // container crashes or exits. The reason for termination is passed to the
+ // handler. The Pod's termination grace period countdown begins before the
+ // PreStop hooked is executed. Regardless of the outcome of the handler, the
+ // container will eventually terminate within the Pod's termination grace
+ // period. Other management of the container blocks until the hook completes
+ // or until the termination grace period is reached.
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
optional Handler preStop = 2;
@@ -1662,43 +1760,6 @@ message List {
repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2;
}
-// ListOptions is the query options to a standard REST list call.
-// DEPRECATED: This type has been moved to meta/v1 and will be removed soon.
-// +k8s:openapi-gen=false
-message ListOptions {
- // A selector to restrict the list of returned objects by their labels.
- // Defaults to everything.
- // +optional
- optional string labelSelector = 1;
-
- // A selector to restrict the list of returned objects by their fields.
- // Defaults to everything.
- // +optional
- optional string fieldSelector = 2;
-
- // If true, partially initialized resources are included in the response.
- // +optional
- optional bool includeUninitialized = 6;
-
- // Watch for changes to the described resources and return them as a stream of
- // add, update, and remove notifications. Specify resourceVersion.
- // +optional
- optional bool watch = 3;
-
- // When specified with a watch call, shows changes that occur after that particular version of a resource.
- // Defaults to changes from the beginning of history.
- // When specified for list:
- // - if unset, then the result is returned from remote storage based on quorum-read flag;
- // - if it's 0, then we simply return what we currently have in cache, no guarantee;
- // - if set to non zero, then the result is at least as fresh as given rv.
- // +optional
- optional string resourceVersion = 4;
-
- // Timeout for the list/watch call.
- // +optional
- optional int64 timeoutSeconds = 5;
-}
-
// LoadBalancerIngress represents the status of a load-balancer ingress point:
// traffic intended for the service should be sent to an ingress point.
message LoadBalancerIngress {
@@ -1731,12 +1792,18 @@ message LocalObjectReference {
optional string name = 1;
}
-// Local represents directly-attached storage with node affinity
+// Local represents directly-attached storage with node affinity (Beta feature)
message LocalVolumeSource {
- // The full path to the volume on the node
- // For alpha, this path must be a directory
- // Once block as a source is supported, then this path can point to a block device
+ // The full path to the volume on the node.
+ // It can be either a directory or block device (disk, partition, ...).
optional string path = 1;
+
+ // Filesystem type to mount.
+ // It applies only when the Path is a block device.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a fileystem if unspecified.
+ // +optional
+ optional string fsType = 2;
}
// Represents an NFS mount that lasts the lifetime of a pod.
@@ -1885,7 +1952,58 @@ message NodeCondition {
// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.
message NodeConfigSource {
- optional ObjectReference configMapRef = 1;
+ // ConfigMap is a reference to a Node's ConfigMap
+ optional ConfigMapNodeConfigSource configMap = 2;
+}
+
+// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
+message NodeConfigStatus {
+ // Assigned reports the checkpointed config the node will try to use.
+ // When Node.Spec.ConfigSource is updated, the node checkpoints the associated
+ // config payload to local disk, along with a record indicating intended
+ // config. The node refers to this record to choose its config checkpoint, and
+ // reports this record in Assigned. Assigned only updates in the status after
+ // the record has been checkpointed to disk. When the Kubelet is restarted,
+ // it tries to make the Assigned config the Active config by loading and
+ // validating the checkpointed payload identified by Assigned.
+ // +optional
+ optional NodeConfigSource assigned = 1;
+
+ // Active reports the checkpointed config the node is actively using.
+ // Active will represent either the current version of the Assigned config,
+ // or the current LastKnownGood config, depending on whether attempting to use the
+ // Assigned config results in an error.
+ // +optional
+ optional NodeConfigSource active = 2;
+
+ // LastKnownGood reports the checkpointed config the node will fall back to
+ // when it encounters an error attempting to use the Assigned config.
+ // The Assigned config becomes the LastKnownGood config when the node determines
+ // that the Assigned config is stable and correct.
+ // This is currently implemented as a 10-minute soak period starting when the local
+ // record of Assigned config is updated. If the Assigned config is Active at the end
+ // of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is
+ // reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil,
+ // because the local default config is always assumed good.
+ // You should not make assumptions about the node's method of determining config stability
+ // and correctness, as this may change or become configurable in the future.
+ // +optional
+ optional NodeConfigSource lastKnownGood = 3;
+
+ // Error describes any problems reconciling the Spec.ConfigSource to the Active config.
+ // Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned
+ // record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting
+ // to load or validate the Assigned config, etc.
+ // Errors may occur at different points while syncing config. Earlier errors (e.g. download or
+ // checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across
+ // Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in
+ // a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error
+ // by fixing the config assigned in Spec.ConfigSource.
+ // You can find additional information for debugging by searching the error message in the Kubelet log.
+ // Error is a human-readable description of the error state; machines can check whether or not Error
+ // is empty, but should not rely on the stability of the Error text across Kubelet versions.
+ // +optional
+ optional string error = 4;
}
// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
@@ -1947,10 +2065,17 @@ message NodeSelectorRequirement {
repeated string values = 3;
}
-// A null or empty node selector term matches no objects.
+// A null or empty node selector term matches no objects. The requirements of
+// them are ANDed.
+// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
message NodeSelectorTerm {
- // Required. A list of node selector requirements. The requirements are ANDed.
+ // A list of node selector requirements by node's labels.
+ // +optional
repeated NodeSelectorRequirement matchExpressions = 1;
+
+ // A list of node selector requirements by node's fields.
+ // +optional
+ repeated NodeSelectorRequirement matchFields = 2;
}
// NodeSpec describes the attributes that a node is created with.
@@ -1959,11 +2084,6 @@ message NodeSpec {
// +optional
optional string podCIDR = 1;
- // External ID of the node assigned by some machine database (e.g. a cloud provider).
- // Deprecated.
- // +optional
- optional string externalID = 2;
-
// ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>
// +optional
optional string providerID = 3;
@@ -1981,6 +2101,11 @@ message NodeSpec {
// The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field
// +optional
optional NodeConfigSource configSource = 6;
+
+ // Deprecated. Not all kubelets will set this field. Remove field after 1.13.
+ // see: https://issues.k8s.io/61966
+ // +optional
+ optional string externalID = 2;
}
// NodeStatus is information about the current status of a node.
@@ -2036,6 +2161,10 @@ message NodeStatus {
// List of volumes that are attached to the node.
// +optional
repeated AttachedVolume volumesAttached = 10;
+
+ // Status of the config assigned to the node via the dynamic Kubelet config feature.
+ // +optional
+ optional NodeConfigStatus config = 11;
}
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
@@ -2085,170 +2214,6 @@ message ObjectFieldSelector {
optional string fieldPath = 2;
}
-// ObjectMeta is metadata that all persisted resources must have, which includes all objects
-// users must create.
-// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon.
-// +k8s:openapi-gen=false
-message ObjectMeta {
- // Name must be unique within a namespace. Is required when creating resources, although
- // some resources may allow a client to request the generation of an appropriate name
- // automatically. Name is primarily intended for creation idempotence and configuration
- // definition.
- // Cannot be updated.
- // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- // +optional
- optional string name = 1;
-
- // GenerateName is an optional prefix, used by the server, to generate a unique
- // name ONLY IF the Name field has not been provided.
- // If this field is used, the name returned to the client will be different
- // than the name passed. This value will also be combined with a unique suffix.
- // The provided value has the same validation rules as the Name field,
- // and may be truncated by the length of the suffix required to make the value
- // unique on the server.
- //
- // If this field is specified and the generated name exists, the server will
- // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason
- // ServerTimeout indicating a unique name could not be found in the time allotted, and the client
- // should retry (optionally after the time indicated in the Retry-After header).
- //
- // Applied only if Name is not specified.
- // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency
- // +optional
- optional string generateName = 2;
-
- // Namespace defines the space within each name must be unique. An empty namespace is
- // equivalent to the "default" namespace, but "default" is the canonical representation.
- // Not all objects are required to be scoped to a namespace - the value of this field for
- // those objects will be empty.
- //
- // Must be a DNS_LABEL.
- // Cannot be updated.
- // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
- // +optional
- optional string namespace = 3;
-
- // SelfLink is a URL representing this object.
- // Populated by the system.
- // Read-only.
- // +optional
- optional string selfLink = 4;
-
- // UID is the unique in time and space value for this object. It is typically generated by
- // the server on successful creation of a resource and is not allowed to change on PUT
- // operations.
- //
- // Populated by the system.
- // Read-only.
- // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
- // +optional
- optional string uid = 5;
-
- // An opaque value that represents the internal version of this object that can
- // be used by clients to determine when objects have changed. May be used for optimistic
- // concurrency, change detection, and the watch operation on a resource or set of resources.
- // Clients must treat these values as opaque and passed unmodified back to the server.
- // They may only be valid for a particular resource or set of resources.
- //
- // Populated by the system.
- // Read-only.
- // Value must be treated as opaque by clients and .
- // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
- // +optional
- optional string resourceVersion = 6;
-
- // A sequence number representing a specific generation of the desired state.
- // Populated by the system. Read-only.
- // +optional
- optional int64 generation = 7;
-
- // CreationTimestamp is a timestamp representing the server time when this object was
- // created. It is not guaranteed to be set in happens-before order across separate operations.
- // Clients may not set this value. It is represented in RFC3339 form and is in UTC.
- //
- // Populated by the system.
- // Read-only.
- // Null for lists.
- // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
- // +optional
- optional k8s.io.apimachinery.pkg.apis.meta.v1.Time creationTimestamp = 8;
-
- // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This
- // field is set by the server when a graceful deletion is requested by the user, and is not
- // directly settable by a client. The resource is expected to be deleted (no longer visible
- // from resource lists, and not reachable by name) after the time in this field. Once set,
- // this value may not be unset or be set further into the future, although it may be shortened
- // or the resource may be deleted prior to this time. For example, a user may request that
- // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination
- // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard
- // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the
- // API. In the presence of network partitions, this object may still exist after this
- // timestamp, until an administrator or automated process can determine the resource is
- // fully terminated.
- // If not set, graceful deletion of the object has not been requested.
- //
- // Populated by the system when a graceful deletion is requested.
- // Read-only.
- // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
- // +optional
- optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deletionTimestamp = 9;
-
- // Number of seconds allowed for this object to gracefully terminate before
- // it will be removed from the system. Only set when deletionTimestamp is also set.
- // May only be shortened.
- // Read-only.
- // +optional
- optional int64 deletionGracePeriodSeconds = 10;
-
- // Map of string keys and values that can be used to organize and categorize
- // (scope and select) objects. May match selectors of replication controllers
- // and services.
- // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
- // +optional
- map<string, string> labels = 11;
-
- // Annotations is an unstructured key value map stored with a resource that may be
- // set by external tools to store and retrieve arbitrary metadata. They are not
- // queryable and should be preserved when modifying objects.
- // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
- // +optional
- map<string, string> annotations = 12;
-
- // List of objects depended by this object. If ALL objects in the list have
- // been deleted, this object will be garbage collected. If this object is managed by a controller,
- // then an entry in this list will point to this controller, with the controller field set to true.
- // There cannot be more than one managing controller.
- // +optional
- // +patchMergeKey=uid
- // +patchStrategy=merge
- repeated k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference ownerReferences = 13;
-
- // An initializer is a controller which enforces some system invariant at object creation time.
- // This field is a list of initializers that have not yet acted on this object. If nil or empty,
- // this object has been completely initialized. Otherwise, the object is considered uninitialized
- // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to
- // observe uninitialized objects.
- //
- // When an object is created, the system will populate this list with the current set of initializers.
- // Only privileged users may set or modify this list. Once it is empty, it may not be modified further
- // by any user.
- optional k8s.io.apimachinery.pkg.apis.meta.v1.Initializers initializers = 16;
-
- // Must be empty before the object is deleted from the registry. Each entry
- // is an identifier for the responsible component that will remove the entry
- // from the list. If the deletionTimestamp of the object is non-nil, entries
- // in this list can only be removed.
- // +optional
- // +patchStrategy=merge
- repeated string finalizers = 14;
-
- // The name of the cluster which the object belongs to.
- // This is used to distinguish resources with same name and namespace in different clusters.
- // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.
- // +optional
- optional string clusterName = 15;
-}
-
// ObjectReference contains enough information to let you inspect or modify the referred object.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
message ObjectReference {
@@ -2400,9 +2365,20 @@ message PersistentVolumeClaimSpec {
// volumeMode defines what type of volume is required by the claim.
// Value of Filesystem is implied when not included in claim spec.
- // This is an alpha feature and may change in the future.
+ // This is a beta feature.
// +optional
optional string volumeMode = 6;
+
+ // This field requires the VolumeSnapshotDataSource alpha feature gate to be
+ // enabled and currently VolumeSnapshot is the only supported data source.
+ // If the provisioner can support VolumeSnapshot data source, it will create
+ // a new volume and data will be restored to the volume at the same time.
+ // If the provisioner does not support VolumeSnapshot data source, volume will
+ // not be created and the failure will be reported as an event.
+ // In the future, we plan to support more data source types and the behavior
+ // of the provisioner may change.
+ // +optional
+ optional TypedLocalObjectReference dataSource = 7;
}
// PersistentVolumeClaimStatus is the current status of a persistent volume claim.
@@ -2482,7 +2458,7 @@ message PersistentVolumeSource {
// exposed to the pod. Provisioned by an admin.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
// +optional
- optional GlusterfsVolumeSource glusterfs = 4;
+ optional GlusterfsPersistentVolumeSource glusterfs = 4;
// NFS represents an NFS mount on the host. Provisioned by an admin.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
@@ -2502,7 +2478,7 @@ message PersistentVolumeSource {
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
- optional CinderVolumeSource cinder = 8;
+ optional CinderPersistentVolumeSource cinder = 8;
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
// +optional
@@ -2557,7 +2533,7 @@ message PersistentVolumeSource {
// +optional
optional StorageOSPersistentVolumeSource storageos = 21;
- // CSI represents storage that handled by an external CSI driver (Beta feature).
+ // CSI represents storage that is handled by an external CSI driver (Beta feature).
// +optional
optional CSIPersistentVolumeSource csi = 22;
}
@@ -2605,7 +2581,7 @@ message PersistentVolumeSpec {
// volumeMode defines if a volume is intended to be used with a formatted filesystem
// or to remain in raw block state. Value of Filesystem is implied when not included in spec.
- // This is an alpha feature and may change in the future.
+ // This is a beta feature.
// +optional
optional string volumeMode = 8;
@@ -2775,7 +2751,6 @@ message PodAttachOptions {
// PodCondition contains details for the current condition of this pod.
message PodCondition {
// Type is the type of the condition.
- // Currently only Ready.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
optional string type = 1;
@@ -2944,6 +2919,12 @@ message PodProxyOptions {
optional string path = 1;
}
+// PodReadinessGate contains the reference to a pod condition
+message PodReadinessGate {
+ // ConditionType refers to a condition in the pod's condition list with matching type.
+ optional string conditionType = 1;
+}
+
// PodSecurityContext holds pod-level security attributes and common container settings.
// Some fields are also present in container.securityContext. Field values of
// container.securityContext take precedence over field values of PodSecurityContext.
@@ -2956,6 +2937,10 @@ message PodSecurityContext {
// +optional
optional SELinuxOptions seLinuxOptions = 1;
+ // Windows security options.
+ // +optional
+ optional WindowsSecurityContextOptions windowsOptions = 8;
+
// The UID to run the entrypoint of the container process.
// Defaults to user specified in image metadata if unspecified.
// May also be set in SecurityContext. If set in both SecurityContext and
@@ -2990,14 +2975,19 @@ message PodSecurityContext {
// A special supplemental group that applies to all containers in a pod.
// Some volume types allow the Kubelet to change the ownership of that volume
// to be owned by the pod:
- //
+ //
// 1. The owning GID will be the FSGroup
// 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
// 3. The permission bits are OR'd with rw-rw----
- //
+ //
// If unset, the Kubelet will not modify the ownership and permissions of any volume.
// +optional
optional int64 fsGroup = 5;
+
+ // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
+ // sysctls (by the container runtime) might fail to launch.
+ // +optional
+ repeated Sysctl sysctls = 7;
}
// Describes the class of pods that should avoid this node.
@@ -3125,7 +3115,7 @@ message PodSpec {
// in the same pod, and the first process in each container will not be assigned PID 1.
// HostPID and ShareProcessNamespace cannot both be set.
// Optional: Default to false.
- // This field is alpha-level and is honored only by servers that enable the PodShareProcessNamespace feature.
+ // This field is beta-level and may be disabled with the PodShareProcessNamespace feature.
// +k8s:conversion-gen=false
// +optional
optional bool shareProcessNamespace = 27;
@@ -3196,12 +3186,69 @@ message PodSpec {
// configuration based on DNSPolicy.
// +optional
optional PodDNSConfig dnsConfig = 26;
+
+ // If specified, all readiness gates will be evaluated for pod readiness.
+ // A pod is ready when all its containers are ready AND
+ // all conditions specified in the readiness gates have status equal to "True"
+ // More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md
+ // +optional
+ repeated PodReadinessGate readinessGates = 28;
+
+ // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used
+ // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.
+ // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an
+ // empty definition that uses the default runtime handler.
+ // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md
+ // This is a beta feature as of Kubernetes v1.14.
+ // +optional
+ optional string runtimeClassName = 29;
+
+ // EnableServiceLinks indicates whether information about services should be injected into pod's
+ // environment variables, matching the syntax of Docker links.
+ // Optional: Defaults to true.
+ // +optional
+ optional bool enableServiceLinks = 30;
+
+ // PreemptionPolicy is the Policy for preempting pods with lower priority.
+ // One of Never, PreemptLowerPriority.
+ // Defaults to PreemptLowerPriority if unset.
+ // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.
+ // +optional
+ optional string preemptionPolicy = 31;
+
+ // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.
+ // This field will be autopopulated at admission time by the RuntimeClass admission controller. If
+ // the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.
+ // The RuntimeClass admission controller will reject Pod create requests which have the overhead already
+ // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value
+ // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.
+ // More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md
+ // This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.
+ // +optional
+ map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> overhead = 32;
}
// PodStatus represents information about the status of a pod. Status may trail the actual
-// state of a system.
+// state of a system, especially if the node that hosts the pod cannot contact the control
+// plane.
message PodStatus {
- // Current condition of the pod.
+ // The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle.
+ // The conditions array, the reason and message fields, and the individual container status
+ // arrays contain more detail about the pod's status.
+ // There are five possible phase values:
+ //
+ // Pending: The pod has been accepted by the Kubernetes system, but one or more of the
+ // container images has not been created. This includes time before being scheduled as
+ // well as time spent downloading images over the network, which could take a while.
+ // Running: The pod has been bound to a node, and all of the containers have been created.
+ // At least one container is still running, or is in the process of starting or restarting.
+ // Succeeded: All containers in the pod have terminated in success, and will not be restarted.
+ // Failed: All containers in the pod have terminated, and at least one container has
+ // terminated in failure. The container either exited with non-zero status or was terminated
+ // by the system.
+ // Unknown: For some reason the state of the pod could not be obtained, typically due to an
+ // error in communicating with the host of the pod.
+ //
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase
// +optional
optional string phase = 1;
@@ -3442,6 +3489,11 @@ message QuobyteVolumeSource {
// Default is no group
// +optional
optional string group = 5;
+
+ // Tenant owning the given Quobyte volume in the Backend
+ // Used with dynamically provisioned Quobyte volumes, value is set by the plugin
+ // +optional
+ optional string tenant = 6;
}
// Represents a Rados Block Device mount that lasts the lifetime of a pod.
@@ -3721,7 +3773,7 @@ message ResourceQuotaList {
// ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
message ResourceQuotaSpec {
- // Hard is the set of desired hard limits for each named resource.
+ // hard is the set of desired hard limits for each named resource.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
// +optional
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> hard = 1;
@@ -3730,6 +3782,12 @@ message ResourceQuotaSpec {
// If not specified, the quota matches all objects.
// +optional
repeated string scopes = 2;
+
+ // scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota
+ // but expressed using ScopeSelectorOperator in combination with possible values.
+ // For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.
+ // +optional
+ optional ScopeSelector scopeSelector = 3;
}
// ResourceQuotaStatus defines the enforced hard limits and observed use.
@@ -3803,6 +3861,7 @@ message ScaleIOPersistentVolumeSource {
optional string storagePool = 6;
// Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+ // Default is ThinProvisioned.
// +optional
optional string storageMode = 7;
@@ -3812,7 +3871,8 @@ message ScaleIOPersistentVolumeSource {
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
- // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // Ex. "ext4", "xfs", "ntfs".
+ // Default is "xfs"
// +optional
optional string fsType = 9;
@@ -3847,6 +3907,7 @@ message ScaleIOVolumeSource {
optional string storagePool = 6;
// Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+ // Default is ThinProvisioned.
// +optional
optional string storageMode = 7;
@@ -3856,7 +3917,8 @@ message ScaleIOVolumeSource {
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
- // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // Ex. "ext4", "xfs", "ntfs".
+ // Default is "xfs".
// +optional
optional string fsType = 9;
@@ -3866,6 +3928,32 @@ message ScaleIOVolumeSource {
optional bool readOnly = 10;
}
+// A scope selector represents the AND of the selectors represented
+// by the scoped-resource selector requirements.
+message ScopeSelector {
+ // A list of scope selector requirements by scope of the resources.
+ // +optional
+ repeated ScopedResourceSelectorRequirement matchExpressions = 1;
+}
+
+// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator
+// that relates the scope name and values.
+message ScopedResourceSelectorRequirement {
+ // The name of the scope that the selector applies to.
+ optional string scopeName = 1;
+
+ // Represents a scope's relationship to a set of values.
+ // Valid operators are In, NotIn, Exists, DoesNotExist.
+ optional string operator = 2;
+
+ // An array of string values. If the operator is In or NotIn,
+ // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ // the values array must be empty.
+ // This array is replaced during a strategic merge patch.
+ // +optional
+ repeated string values = 3;
+}
+
// Secret holds secret data of a certain type. The total bytes of the values in
// the Data field must be less than MaxSecretSize bytes.
message Secret {
@@ -3896,7 +3984,7 @@ message Secret {
// SecretEnvSource selects a Secret to populate the environment
// variables with.
-//
+//
// The contents of the target Secret's Data field will represent the
// key-value pairs as environment variables.
message SecretEnvSource {
@@ -3916,7 +4004,7 @@ message SecretKeySelector {
// The key of the secret to select from. Must be a valid secret key.
optional string key = 2;
- // Specify whether the Secret or it's key must be defined
+ // Specify whether the Secret or its key must be defined
// +optional
optional bool optional = 3;
}
@@ -3934,7 +4022,7 @@ message SecretList {
}
// Adapts a secret into a projected volume.
-//
+//
// The contents of the target Secret's Data field will be presented in a
// projected volume as files using the keys in the Data field as the file names.
// Note that this is identical to a secret volume source without the default
@@ -3970,7 +4058,7 @@ message SecretReference {
}
// Adapts a Secret into a volume.
-//
+//
// The contents of the target Secret's Data field will be presented in a volume
// as files using the keys in the Data field as the file names.
// Secret volumes support ownership management and SELinux relabeling.
@@ -3998,7 +4086,7 @@ message SecretVolumeSource {
// +optional
optional int32 defaultMode = 3;
- // Specify whether the Secret or it's keys must be defined
+ // Specify whether the Secret or its keys must be defined
// +optional
optional bool optional = 4;
}
@@ -4025,6 +4113,10 @@ message SecurityContext {
// +optional
optional SELinuxOptions seLinuxOptions = 3;
+ // Windows security options.
+ // +optional
+ optional WindowsSecurityContextOptions windowsOptions = 10;
+
// The UID to run the entrypoint of the container process.
// Defaults to user specified in image metadata if unspecified.
// May also be set in PodSecurityContext. If set in both SecurityContext and
@@ -4061,6 +4153,13 @@ message SecurityContext {
// 2) has CAP_SYS_ADMIN
// +optional
optional bool allowPrivilegeEscalation = 7;
+
+ // procMount denotes the type of proc mount to use for the containers.
+ // The default is DefaultProcMount which uses the container runtime defaults for
+ // readonly paths and masked paths.
+ // This requires the ProcMountType feature flag to be enabled.
+ // +optional
+ optional string procMount = 9;
}
// SerializedReference is a reference to serialized object.
@@ -4134,6 +4233,32 @@ message ServiceAccountList {
repeated ServiceAccount items = 2;
}
+// ServiceAccountTokenProjection represents a projected service account token
+// volume. This projection can be used to insert a service account token into
+// the pods runtime filesystem for use against APIs (Kubernetes API Server or
+// otherwise).
+message ServiceAccountTokenProjection {
+ // Audience is the intended audience of the token. A recipient of a token
+ // must identify itself with an identifier specified in the audience of the
+ // token, and otherwise should reject the token. The audience defaults to the
+ // identifier of the apiserver.
+ // +optional
+ optional string audience = 1;
+
+ // ExpirationSeconds is the requested duration of validity of the service
+ // account token. As the token approaches expiration, the kubelet volume
+ // plugin will proactively rotate the service account token. The kubelet will
+ // start trying to rotate the token if the token is older than 80 percent of
+ // its time to live or if the token is older than 24 hours.Defaults to 1 hour
+ // and must be at least 10 minutes.
+ // +optional
+ optional int64 expirationSeconds = 2;
+
+ // Path is the path relative to the mount point of the file to project the
+ // token into.
+ optional string path = 3;
+}
+
// ServiceList holds a list of services.
message ServiceList {
// Standard list metadata.
@@ -4154,7 +4279,7 @@ message ServicePort {
// +optional
optional string name = 1;
- // The IP protocol for this port. Supports "TCP" and "UDP".
+ // The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
// Default is TCP.
// +optional
optional string protocol = 2;
@@ -4199,6 +4324,9 @@ message ServiceSpec {
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +patchMergeKey=port
// +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=port
+ // +listMapKey=protocol
repeated ServicePort ports = 1;
// Route service traffic to pods with label keys and values matching this
@@ -4235,7 +4363,7 @@ message ServiceSpec {
// "LoadBalancer" builds on NodePort and creates an
// external load-balancer (if supported in the current cloud) which routes
// to the clusterIP.
- // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types
+ // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
// +optional
optional string type = 4;
@@ -4300,9 +4428,6 @@ message ServiceSpec {
// The primary use case for setting this field is to use a StatefulSet's Headless Service
// to propagate SRV records for its Pods without respect to their readiness for purpose
// of peer discovery.
- // This field will replace the service.alpha.kubernetes.io/tolerate-unready-endpoints
- // when that annotation is deprecated and all clients have been converted to use this
- // field.
// +optional
optional bool publishNotReadyAddresses = 13;
@@ -4465,6 +4590,44 @@ message Toleration {
optional int64 tolerationSeconds = 5;
}
+// A topology selector requirement is a selector that matches given label.
+// This is an alpha feature and may change in the future.
+message TopologySelectorLabelRequirement {
+ // The label key that the selector applies to.
+ optional string key = 1;
+
+ // An array of string values. One value must match the label to be selected.
+ // Each entry in Values is ORed.
+ repeated string values = 2;
+}
+
+// A topology selector term represents the result of label queries.
+// A null or empty topology selector term matches no objects.
+// The requirements of them are ANDed.
+// It provides a subset of functionality as NodeSelectorTerm.
+// This is an alpha feature and may change in the future.
+message TopologySelectorTerm {
+ // A list of topology selector requirements by labels.
+ // +optional
+ repeated TopologySelectorLabelRequirement matchLabelExpressions = 1;
+}
+
+// TypedLocalObjectReference contains enough information to let you locate the
+// typed referenced object inside the same namespace.
+message TypedLocalObjectReference {
+ // APIGroup is the group for the resource being referenced.
+ // If APIGroup is not specified, the specified Kind must be in the core API group.
+ // For any other third-party types, APIGroup is required.
+ // +optional
+ optional string apiGroup = 1;
+
+ // Kind is the type of resource being referenced
+ optional string kind = 2;
+
+ // Name is the name of resource being referenced
+ optional string name = 3;
+}
+
// Volume represents a named volume in a pod that may be accessed by any container in the pod.
message Volume {
// Volume's name.
@@ -4512,6 +4675,14 @@ message VolumeMount {
// This field is beta in 1.10.
// +optional
optional string mountPropagation = 5;
+
+ // Expanded path within the volume from which the container's volume should be mounted.
+ // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ // Defaults to "" (volume's root).
+ // SubPathExpr and SubPath are mutually exclusive.
+ // This field is beta in 1.15.
+ // +optional
+ optional string subPathExpr = 6;
}
// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.
@@ -4523,13 +4694,20 @@ message VolumeNodeAffinity {
// Projection that may be projected along with other supported volume types
message VolumeProjection {
// information about the secret data to project
+ // +optional
optional SecretProjection secret = 1;
// information about the downwardAPI data to project
+ // +optional
optional DownwardAPIProjection downwardAPI = 2;
// information about the configMap data to project
+ // +optional
optional ConfigMapProjection configMap = 3;
+
+ // information about the serviceAccountToken data to project
+ // +optional
+ optional ServiceAccountTokenProjection serviceAccountToken = 4;
}
// Represents the source of a volume to mount.
@@ -4564,6 +4742,9 @@ message VolumeSource {
optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4;
// GitRepo represents a git repository at a particular revision.
+ // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+ // into the Pod's container.
// +optional
optional GitRepoVolumeSource gitRepo = 5;
@@ -4662,6 +4843,10 @@ message VolumeSource {
// StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
// +optional
optional StorageOSVolumeSource storageos = 27;
+
+ // CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).
+ // +optional
+ optional CSIVolumeSource csi = 28;
}
// Represents a vSphere volume resource.
@@ -4694,3 +4879,18 @@ message WeightedPodAffinityTerm {
optional PodAffinityTerm podAffinityTerm = 2;
}
+// WindowsSecurityContextOptions contain Windows-specific options and credentials.
+message WindowsSecurityContextOptions {
+ // GMSACredentialSpecName is the name of the GMSA credential spec to use.
+ // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.
+ // +optional
+ optional string gmsaCredentialSpecName = 1;
+
+ // GMSACredentialSpec is where the GMSA admission webhook
+ // (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ // GMSA credential spec named by the GMSACredentialSpecName field.
+ // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.
+ // +optional
+ optional string gmsaCredentialSpec = 2;
+}
+
diff --git a/vendor/k8s.io/api/core/v1/meta.go b/vendor/k8s.io/api/core/v1/meta.go
deleted file mode 100644
index 0e3f5d920..000000000
--- a/vendor/k8s.io/api/core/v1/meta.go
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
-)
-
-func (obj *ObjectMeta) GetObjectMeta() metav1.Object { return obj }
-
-// Namespace implements metav1.Object for any object with an ObjectMeta typed field. Allows
-// fast, direct access to metadata fields for API objects.
-func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace }
-func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace }
-func (meta *ObjectMeta) GetName() string { return meta.Name }
-func (meta *ObjectMeta) SetName(name string) { meta.Name = name }
-func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName }
-func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName }
-func (meta *ObjectMeta) GetUID() types.UID { return meta.UID }
-func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid }
-func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion }
-func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version }
-func (meta *ObjectMeta) GetGeneration() int64 { return meta.Generation }
-func (meta *ObjectMeta) SetGeneration(generation int64) { meta.Generation = generation }
-func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink }
-func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink }
-func (meta *ObjectMeta) GetCreationTimestamp() metav1.Time { return meta.CreationTimestamp }
-func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp metav1.Time) {
- meta.CreationTimestamp = creationTimestamp
-}
-func (meta *ObjectMeta) GetDeletionTimestamp() *metav1.Time { return meta.DeletionTimestamp }
-func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *metav1.Time) {
- meta.DeletionTimestamp = deletionTimestamp
-}
-func (meta *ObjectMeta) GetDeletionGracePeriodSeconds() *int64 { return meta.DeletionGracePeriodSeconds }
-func (meta *ObjectMeta) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) {
- meta.DeletionGracePeriodSeconds = deletionGracePeriodSeconds
-}
-func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels }
-func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels }
-func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations }
-func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations }
-func (meta *ObjectMeta) GetInitializers() *metav1.Initializers { return meta.Initializers }
-func (meta *ObjectMeta) SetInitializers(initializers *metav1.Initializers) {
- meta.Initializers = initializers
-}
-func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers }
-func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers }
-
-func (meta *ObjectMeta) GetOwnerReferences() []metav1.OwnerReference {
- ret := make([]metav1.OwnerReference, len(meta.OwnerReferences))
- for i := 0; i < len(meta.OwnerReferences); i++ {
- ret[i].Kind = meta.OwnerReferences[i].Kind
- ret[i].Name = meta.OwnerReferences[i].Name
- ret[i].UID = meta.OwnerReferences[i].UID
- ret[i].APIVersion = meta.OwnerReferences[i].APIVersion
- if meta.OwnerReferences[i].Controller != nil {
- value := *meta.OwnerReferences[i].Controller
- ret[i].Controller = &value
- }
- if meta.OwnerReferences[i].BlockOwnerDeletion != nil {
- value := *meta.OwnerReferences[i].BlockOwnerDeletion
- ret[i].BlockOwnerDeletion = &value
- }
- }
- return ret
-}
-
-func (meta *ObjectMeta) SetOwnerReferences(references []metav1.OwnerReference) {
- newReferences := make([]metav1.OwnerReference, len(references))
- for i := 0; i < len(references); i++ {
- newReferences[i].Kind = references[i].Kind
- newReferences[i].Name = references[i].Name
- newReferences[i].UID = references[i].UID
- newReferences[i].APIVersion = references[i].APIVersion
- if references[i].Controller != nil {
- value := *references[i].Controller
- newReferences[i].Controller = &value
- }
- if references[i].BlockOwnerDeletion != nil {
- value := *references[i].BlockOwnerDeletion
- newReferences[i].BlockOwnerDeletion = &value
- }
- }
- meta.OwnerReferences = newReferences
-}
-
-func (meta *ObjectMeta) GetClusterName() string {
- return meta.ClusterName
-}
-func (meta *ObjectMeta) SetClusterName(clusterName string) {
- meta.ClusterName = clusterName
-}
diff --git a/vendor/k8s.io/api/core/v1/register.go b/vendor/k8s.io/api/core/v1/register.go
index 526e1320a..1aac0cb41 100644
--- a/vendor/k8s.io/api/core/v1/register.go
+++ b/vendor/k8s.io/api/core/v1/register.go
@@ -57,7 +57,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&Endpoints{},
&EndpointsList{},
&Node{},
- &NodeConfigSource{},
&NodeList{},
&NodeProxyOptions{},
&Binding{},
diff --git a/vendor/k8s.io/api/core/v1/resource.go b/vendor/k8s.io/api/core/v1/resource.go
index 3bd6fec62..bb8041254 100644
--- a/vendor/k8s.io/api/core/v1/resource.go
+++ b/vendor/k8s.io/api/core/v1/resource.go
@@ -48,13 +48,6 @@ func (self *ResourceList) Pods() *resource.Quantity {
return &resource.Quantity{}
}
-func (self *ResourceList) NvidiaGPU() *resource.Quantity {
- if val, ok := (*self)[ResourceNvidiaGPU]; ok {
- return &val
- }
- return &resource.Quantity{}
-}
-
func (self *ResourceList) StorageEphemeral() *resource.Quantity {
if val, ok := (*self)[ResourceEphemeralStorage]; ok {
return &val
diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go
index 36f456702..2279a4b7a 100644
--- a/vendor/k8s.io/api/core/v1/types.go
+++ b/vendor/k8s.io/api/core/v1/types.go
@@ -23,214 +23,13 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
)
-// The comments for the structs and fields can be used from go-restful to
-// generate Swagger API documentation for its models. Please read this PR for more
-// information on the implementation: https://github.com/emicklei/go-restful/pull/215
-//
-// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
-// they are on one line! For multiple line or blocks that you want to ignore use ---.
-// Any context after a --- is ignored and not exported to the SwaggerAPI.
-//
-// The aforementioned methods can be generated by hack/update-generated-swagger-docs.sh
-
-// Common string formats
-// ---------------------
-// Many fields in this API have formatting requirements. The commonly used
-// formats are defined here.
-//
-// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier"
-// in the C language. This is captured by the following regex:
-// [A-Za-z_][A-Za-z0-9_]*
-// This defines the format, but not the length restriction, which should be
-// specified at the definition of any field of this type.
-//
-// DNS_LABEL: This is a string, no more than 63 characters long, that conforms
-// to the definition of a "label" in RFCs 1035 and 1123. This is captured
-// by the following regex:
-// [a-z0-9]([-a-z0-9]*[a-z0-9])?
-//
-// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms
-// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured
-// by the following regex:
-// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
-// or more simply:
-// DNS_LABEL(\.DNS_LABEL)*
-//
-// IANA_SVC_NAME: This is a string, no more than 15 characters long, that
-// conforms to the definition of IANA service name in RFC 6335.
-// It must contains at least one letter [a-z] and it must contains only [a-z0-9-].
-// Hypens ('-') cannot be leading or trailing character of the string
-// and cannot be adjacent to other hyphens.
-
-// ObjectMeta is metadata that all persisted resources must have, which includes all objects
-// users must create.
-// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon.
-// +k8s:openapi-gen=false
-type ObjectMeta struct {
- // Name must be unique within a namespace. Is required when creating resources, although
- // some resources may allow a client to request the generation of an appropriate name
- // automatically. Name is primarily intended for creation idempotence and configuration
- // definition.
- // Cannot be updated.
- // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- // +optional
- Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
-
- // GenerateName is an optional prefix, used by the server, to generate a unique
- // name ONLY IF the Name field has not been provided.
- // If this field is used, the name returned to the client will be different
- // than the name passed. This value will also be combined with a unique suffix.
- // The provided value has the same validation rules as the Name field,
- // and may be truncated by the length of the suffix required to make the value
- // unique on the server.
- //
- // If this field is specified and the generated name exists, the server will
- // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason
- // ServerTimeout indicating a unique name could not be found in the time allotted, and the client
- // should retry (optionally after the time indicated in the Retry-After header).
- //
- // Applied only if Name is not specified.
- // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency
- // +optional
- GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"`
-
- // Namespace defines the space within each name must be unique. An empty namespace is
- // equivalent to the "default" namespace, but "default" is the canonical representation.
- // Not all objects are required to be scoped to a namespace - the value of this field for
- // those objects will be empty.
- //
- // Must be a DNS_LABEL.
- // Cannot be updated.
- // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
- // +optional
- Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
-
- // SelfLink is a URL representing this object.
- // Populated by the system.
- // Read-only.
- // +optional
- SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"`
-
- // UID is the unique in time and space value for this object. It is typically generated by
- // the server on successful creation of a resource and is not allowed to change on PUT
- // operations.
- //
- // Populated by the system.
- // Read-only.
- // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
- // +optional
- UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
-
- // An opaque value that represents the internal version of this object that can
- // be used by clients to determine when objects have changed. May be used for optimistic
- // concurrency, change detection, and the watch operation on a resource or set of resources.
- // Clients must treat these values as opaque and passed unmodified back to the server.
- // They may only be valid for a particular resource or set of resources.
- //
- // Populated by the system.
- // Read-only.
- // Value must be treated as opaque by clients and .
- // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
- // +optional
- ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
-
- // A sequence number representing a specific generation of the desired state.
- // Populated by the system. Read-only.
- // +optional
- Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"`
-
- // CreationTimestamp is a timestamp representing the server time when this object was
- // created. It is not guaranteed to be set in happens-before order across separate operations.
- // Clients may not set this value. It is represented in RFC3339 form and is in UTC.
- //
- // Populated by the system.
- // Read-only.
- // Null for lists.
- // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
- // +optional
- CreationTimestamp metav1.Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"`
-
- // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This
- // field is set by the server when a graceful deletion is requested by the user, and is not
- // directly settable by a client. The resource is expected to be deleted (no longer visible
- // from resource lists, and not reachable by name) after the time in this field. Once set,
- // this value may not be unset or be set further into the future, although it may be shortened
- // or the resource may be deleted prior to this time. For example, a user may request that
- // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination
- // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard
- // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the
- // API. In the presence of network partitions, this object may still exist after this
- // timestamp, until an administrator or automated process can determine the resource is
- // fully terminated.
- // If not set, graceful deletion of the object has not been requested.
- //
- // Populated by the system when a graceful deletion is requested.
- // Read-only.
- // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
- // +optional
- DeletionTimestamp *metav1.Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"`
-
- // Number of seconds allowed for this object to gracefully terminate before
- // it will be removed from the system. Only set when deletionTimestamp is also set.
- // May only be shortened.
- // Read-only.
- // +optional
- DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"`
-
- // Map of string keys and values that can be used to organize and categorize
- // (scope and select) objects. May match selectors of replication controllers
- // and services.
- // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
- // +optional
- Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"`
-
- // Annotations is an unstructured key value map stored with a resource that may be
- // set by external tools to store and retrieve arbitrary metadata. They are not
- // queryable and should be preserved when modifying objects.
- // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
- // +optional
- Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"`
-
- // List of objects depended by this object. If ALL objects in the list have
- // been deleted, this object will be garbage collected. If this object is managed by a controller,
- // then an entry in this list will point to this controller, with the controller field set to true.
- // There cannot be more than one managing controller.
- // +optional
- // +patchMergeKey=uid
- // +patchStrategy=merge
- OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"`
-
- // An initializer is a controller which enforces some system invariant at object creation time.
- // This field is a list of initializers that have not yet acted on this object. If nil or empty,
- // this object has been completely initialized. Otherwise, the object is considered uninitialized
- // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to
- // observe uninitialized objects.
- //
- // When an object is created, the system will populate this list with the current set of initializers.
- // Only privileged users may set or modify this list. Once it is empty, it may not be modified further
- // by any user.
- Initializers *metav1.Initializers `json:"initializers,omitempty" patchStrategy:"merge" protobuf:"bytes,16,rep,name=initializers"`
-
- // Must be empty before the object is deleted from the registry. Each entry
- // is an identifier for the responsible component that will remove the entry
- // from the list. If the deletionTimestamp of the object is non-nil, entries
- // in this list can only be removed.
- // +optional
- // +patchStrategy=merge
- Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"`
-
- // The name of the cluster which the object belongs to.
- // This is used to distinguish resources with same name and namespace in different clusters.
- // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.
- // +optional
- ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"`
-}
-
const (
// NamespaceDefault means the object is in the default namespace which is applied when not specified by clients
NamespaceDefault string = "default"
// NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces
NamespaceAll string = ""
+ // NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats)
+ NamespaceNodeLease string = "kube-node-lease"
)
// Volume represents a named volume in a pod that may be accessed by any container in the pod.
@@ -273,6 +72,9 @@ type VolumeSource struct {
// +optional
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"`
// GitRepo represents a git repository at a particular revision.
+ // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+ // into the Pod's container.
// +optional
GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"`
// Secret represents a secret that should populate this volume.
@@ -349,6 +151,9 @@ type VolumeSource struct {
// StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
// +optional
StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"`
+ // CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).
+ // +optional
+ CSI *CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,28,opt,name=csi"`
}
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
@@ -389,7 +194,7 @@ type PersistentVolumeSource struct {
// exposed to the pod. Provisioned by an admin.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
// +optional
- Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"`
+ Glusterfs *GlusterfsPersistentVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"`
// NFS represents an NFS mount on the host. Provisioned by an admin.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
@@ -405,7 +210,7 @@ type PersistentVolumeSource struct {
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
- Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"`
+ Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"`
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
// +optional
CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"`
@@ -446,7 +251,7 @@ type PersistentVolumeSource struct {
// More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md
// +optional
StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"`
- // CSI represents storage that handled by an external CSI driver (Beta feature).
+ // CSI represents storage that is handled by an external CSI driver (Beta feature).
// +optional
CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"`
}
@@ -458,10 +263,6 @@ const (
// MountOptionAnnotation defines mount option annotation used in PVs
MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options"
-
- // AlphaStorageNodeAffinityAnnotation defines node affinity policies for a PersistentVolume.
- // Value is a string of the json representation of type NodeAffinity
- AlphaStorageNodeAffinityAnnotation = "volume.alpha.kubernetes.io/node-affinity"
)
// +genclient
@@ -528,7 +329,7 @@ type PersistentVolumeSpec struct {
MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"`
// volumeMode defines if a volume is intended to be used with a formatted filesystem
// or to remain in raw block state. Value of Filesystem is implied when not included in spec.
- // This is an alpha feature and may change in the future.
+ // This is a beta feature.
// +optional
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"`
// NodeAffinity defines constraints that limit what nodes this volume can be accessed from.
@@ -657,9 +458,19 @@ type PersistentVolumeClaimSpec struct {
StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"`
// volumeMode defines what type of volume is required by the claim.
// Value of Filesystem is implied when not included in claim spec.
- // This is an alpha feature and may change in the future.
+ // This is a beta feature.
// +optional
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"`
+ // This field requires the VolumeSnapshotDataSource alpha feature gate to be
+ // enabled and currently VolumeSnapshot is the only supported data source.
+ // If the provisioner can support VolumeSnapshot data source, it will create
+ // a new volume and data will be restored to the volume at the same time.
+ // If the provisioner does not support VolumeSnapshot data source, volume will
+ // not be created and the failure will be reported as an event.
+ // In the future, we plan to support more data source types and the behavior
+ // of the provisioner may change.
+ // +optional
+ DataSource *TypedLocalObjectReference `json:"dataSource,omitempty" protobuf:"bytes,7,opt,name=dataSource"`
}
// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type
@@ -715,7 +526,7 @@ type PersistentVolumeClaimStatus struct {
type PersistentVolumeAccessMode string
const (
- // can be mounted read/write mode to exactly 1 host
+ // can be mounted in read/write mode to exactly 1 host
ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce"
// can be mounted in read-only mode to many hosts
ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany"
@@ -828,6 +639,30 @@ type GlusterfsVolumeSource struct {
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
+// Represents a Glusterfs mount that lasts the lifetime of a pod.
+// Glusterfs volumes do not support ownership management or SELinux relabeling.
+type GlusterfsPersistentVolumeSource struct {
+ // EndpointsName is the endpoint name that details Glusterfs topology.
+ // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+ EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
+
+ // Path is the Glusterfs volume path.
+ // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+ Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
+
+ // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+ // Defaults to false.
+ // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+ // +optional
+ ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+
+ // EndpointsNamespace is the namespace that contains Glusterfs endpoint.
+ // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC.
+ // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+ // +optional
+ EndpointsNamespace *string `json:"endpointsNamespace,omitempty" protobuf:"bytes,4,opt,name=endpointsNamespace"`
+}
+
// Represents a Rados Block Device mount that lasts the lifetime of a pod.
// RBD volumes support ownership management and SELinux relabeling.
type RBDVolumeSource struct {
@@ -935,6 +770,35 @@ type CinderVolumeSource struct {
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+ // Optional: points to a secret object containing parameters used to connect
+ // to OpenStack.
+ // +optional
+ SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"`
+}
+
+// Represents a cinder volume resource in Openstack.
+// A Cinder volume must exist before mounting to a container.
+// The volume must also be in the same region as the kubelet.
+// Cinder volumes support ownership management and SELinux relabeling.
+type CinderPersistentVolumeSource struct {
+ // volume id used to identify the volume in cinder
+ // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
+ // Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ // +optional
+ FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ // +optional
+ ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+ // Optional: points to a secret object containing parameters used to connect
+ // to OpenStack.
+ // +optional
+ SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"`
}
// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
@@ -1034,6 +898,8 @@ const (
ProtocolTCP Protocol = "TCP"
// ProtocolUDP is the UDP protocol.
ProtocolUDP Protocol = "UDP"
+ // ProtocolSCTP is the SCTP protocol.
+ ProtocolSCTP Protocol = "SCTP"
)
// Represents a Persistent Disk resource in Google Compute Engine.
@@ -1092,6 +958,11 @@ type QuobyteVolumeSource struct {
// Default is no group
// +optional
Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"`
+
+ // Tenant owning the given Quobyte volume in the Backend
+ // Used with dynamically provisioned Quobyte volumes, value is set by the plugin
+ // +optional
+ Tenant string `json:"tenant,omitempty" protobuf:"bytes,6,opt,name=tenant"`
}
// FlexPersistentVolumeSource represents a generic persistent volume resource that is
@@ -1179,6 +1050,10 @@ type AWSElasticBlockStoreVolumeSource struct {
// Represents a volume that is populated with the contents of a git repository.
// Git repo volumes do not support ownership management.
// Git repo volumes support SELinux relabeling.
+//
+// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+// into the Pod's container.
type GitRepoVolumeSource struct {
// Repository URL
Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"`
@@ -1219,7 +1094,7 @@ type SecretVolumeSource struct {
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"`
- // Specify whether the Secret or it's keys must be defined
+ // Specify whether the Secret or its keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
@@ -1510,6 +1385,7 @@ type ScaleIOVolumeSource struct {
// +optional
StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
// Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+ // Default is ThinProvisioned.
// +optional
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
// The name of a volume already created in the ScaleIO system
@@ -1517,7 +1393,8 @@ type ScaleIOVolumeSource struct {
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
- // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // Ex. "ext4", "xfs", "ntfs".
+ // Default is "xfs".
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
@@ -1545,6 +1422,7 @@ type ScaleIOPersistentVolumeSource struct {
// +optional
StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
// Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+ // Default is ThinProvisioned.
// +optional
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
// The name of a volume already created in the ScaleIO system
@@ -1552,7 +1430,8 @@ type ScaleIOPersistentVolumeSource struct {
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
- // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // Ex. "ext4", "xfs", "ntfs".
+ // Default is "xfs"
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
@@ -1641,7 +1520,7 @@ type ConfigMapVolumeSource struct {
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"`
- // Specify whether the ConfigMap or it's keys must be defined
+ // Specify whether the ConfigMap or its keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
@@ -1668,11 +1547,35 @@ type ConfigMapProjection struct {
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
- // Specify whether the ConfigMap or it's keys must be defined
+ // Specify whether the ConfigMap or its keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
+// ServiceAccountTokenProjection represents a projected service account token
+// volume. This projection can be used to insert a service account token into
+// the pods runtime filesystem for use against APIs (Kubernetes API Server or
+// otherwise).
+type ServiceAccountTokenProjection struct {
+ // Audience is the intended audience of the token. A recipient of a token
+ // must identify itself with an identifier specified in the audience of the
+ // token, and otherwise should reject the token. The audience defaults to the
+ // identifier of the apiserver.
+ //+optional
+ Audience string `json:"audience,omitempty" protobuf:"bytes,1,rep,name=audience"`
+ // ExpirationSeconds is the requested duration of validity of the service
+ // account token. As the token approaches expiration, the kubelet volume
+ // plugin will proactively rotate the service account token. The kubelet will
+ // start trying to rotate the token if the token is older than 80 percent of
+ // its time to live or if the token is older than 24 hours.Defaults to 1 hour
+ // and must be at least 10 minutes.
+ //+optional
+ ExpirationSeconds *int64 `json:"expirationSeconds,omitempty" protobuf:"varint,2,opt,name=expirationSeconds"`
+ // Path is the path relative to the mount point of the file to project the
+ // token into.
+ Path string `json:"path" protobuf:"bytes,3,opt,name=path"`
+}
+
// Represents a projected volume source
type ProjectedVolumeSource struct {
// list of volume projections
@@ -1691,11 +1594,17 @@ type VolumeProjection struct {
// all types below are the supported types for projection into the same volume
// information about the secret data to project
+ // +optional
Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"`
// information about the downwardAPI data to project
+ // +optional
DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" protobuf:"bytes,2,opt,name=downwardAPI"`
// information about the configMap data to project
+ // +optional
ConfigMap *ConfigMapProjection `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"`
+ // information about the serviceAccountToken data to project
+ // +optional
+ ServiceAccountToken *ServiceAccountTokenProjection `json:"serviceAccountToken,omitempty" protobuf:"bytes,4,opt,name=serviceAccountToken"`
}
const (
@@ -1720,12 +1629,18 @@ type KeyToPath struct {
Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"`
}
-// Local represents directly-attached storage with node affinity
+// Local represents directly-attached storage with node affinity (Beta feature)
type LocalVolumeSource struct {
- // The full path to the volume on the node
- // For alpha, this path must be a directory
- // Once block as a source is supported, then this path can point to a block device
+ // The full path to the volume on the node.
+ // It can be either a directory or block device (disk, partition, ...).
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
+
+ // Filesystem type to mount.
+ // It applies only when the Path is a block device.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a fileystem if unspecified.
+ // +optional
+ FSType *string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
}
// Represents storage that is managed by an external CSI volume driver (Beta feature)
@@ -1757,7 +1672,7 @@ type CSIPersistentVolumeSource struct {
// ControllerPublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// ControllerPublishVolume and ControllerUnpublishVolume calls.
- // This field is optional, and may be empty if no secret is required. If the
+ // This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
ControllerPublishSecretRef *SecretReference `json:"controllerPublishSecretRef,omitempty" protobuf:"bytes,6,opt,name=controllerPublishSecretRef"`
@@ -1765,7 +1680,7 @@ type CSIPersistentVolumeSource struct {
// NodeStageSecretRef is a reference to the secret object containing sensitive
// information to pass to the CSI driver to complete the CSI NodeStageVolume
// and NodeStageVolume and NodeUnstageVolume calls.
- // This field is optional, and may be empty if no secret is required. If the
+ // This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
NodeStageSecretRef *SecretReference `json:"nodeStageSecretRef,omitempty" protobuf:"bytes,7,opt,name=nodeStageSecretRef"`
@@ -1773,10 +1688,50 @@ type CSIPersistentVolumeSource struct {
// NodePublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// NodePublishVolume and NodeUnpublishVolume calls.
- // This field is optional, and may be empty if no secret is required. If the
+ // This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"`
+
+ // ControllerExpandSecretRef is a reference to the secret object containing
+ // sensitive information to pass to the CSI driver to complete the CSI
+ // ControllerExpandVolume call.
+ // This is an alpha field and requires enabling ExpandCSIVolumes feature gate.
+ // This field is optional, and may be empty if no secret is required. If the
+ // secret object contains more than one secret, all secrets are passed.
+ // +optional
+ ControllerExpandSecretRef *SecretReference `json:"controllerExpandSecretRef,omitempty" protobuf:"bytes,9,opt,name=controllerExpandSecretRef"`
+}
+
+// Represents a source location of a volume to mount, managed by an external CSI driver
+type CSIVolumeSource struct {
+ // Driver is the name of the CSI driver that handles this volume.
+ // Consult with your admin for the correct name as registered in the cluster.
+ Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
+
+ // Specifies a read-only configuration for the volume.
+ // Defaults to false (read/write).
+ // +optional
+ ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
+
+ // Filesystem type to mount. Ex. "ext4", "xfs", "ntfs".
+ // If not provided, the empty value is passed to the associated CSI driver
+ // which will determine the default filesystem to apply.
+ // +optional
+ FSType *string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
+
+ // VolumeAttributes stores driver-specific properties that are passed to the CSI
+ // driver. Consult your driver's documentation for supported values.
+ // +optional
+ VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,4,rep,name=volumeAttributes"`
+
+ // NodePublishSecretRef is a reference to the secret object containing
+ // sensitive information to pass to the CSI driver to complete the CSI
+ // NodePublishVolume and NodeUnpublishVolume calls.
+ // This field is optional, and may be empty if no secret is required. If the
+ // secret object contains more than one secret, all secret references are passed.
+ // +optional
+ NodePublishSecretRef *LocalObjectReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,5,opt,name=nodePublishSecretRef"`
}
// ContainerPort represents a network port in a single container.
@@ -1795,7 +1750,7 @@ type ContainerPort struct {
// Number of port to expose on the pod's IP address.
// This must be a valid port number, 0 < x < 65536.
ContainerPort int32 `json:"containerPort" protobuf:"varint,3,opt,name=containerPort"`
- // Protocol for port. Must be UDP or TCP.
+ // Protocol for port. Must be UDP, TCP, or SCTP.
// Defaults to "TCP".
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"`
@@ -1825,6 +1780,13 @@ type VolumeMount struct {
// This field is beta in 1.10.
// +optional
MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"`
+ // Expanded path within the volume from which the container's volume should be mounted.
+ // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ // Defaults to "" (volume's root).
+ // SubPathExpr and SubPath are mutually exclusive.
+ // This field is beta in 1.15.
+ // +optional
+ SubPathExpr string `json:"subPathExpr,omitempty" protobuf:"bytes,6,opt,name=subPathExpr"`
}
// MountPropagationMode describes mount propagation.
@@ -1927,7 +1889,7 @@ type ConfigMapKeySelector struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key to select.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
- // Specify whether the ConfigMap or it's key must be defined
+ // Specify whether the ConfigMap or its key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
}
@@ -1938,7 +1900,7 @@ type SecretKeySelector struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key of the secret to select from. Must be a valid secret key.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
- // Specify whether the Secret or it's key must be defined
+ // Specify whether the Secret or its key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
}
@@ -2084,6 +2046,16 @@ const (
PullIfNotPresent PullPolicy = "IfNotPresent"
)
+// PreemptionPolicy describes a policy for if/when to preempt a pod.
+type PreemptionPolicy string
+
+const (
+ // PreemptLowerPriority means that pod can preempt other pods with lower priority.
+ PreemptLowerPriority PreemptionPolicy = "PreemptLowerPriority"
+ // PreemptNever means that pod never preempts other pods with lower priority.
+ PreemptNever PreemptionPolicy = "Never"
+)
+
// TerminationMessagePolicy describes how termination messages are retrieved from a container.
type TerminationMessagePolicy string
@@ -2177,6 +2149,9 @@ type Container struct {
// +optional
// +patchMergeKey=containerPort
// +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=containerPort
+ // +listMapKey=protocol
Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
// List of sources to populate environment variables in the container.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
@@ -2194,7 +2169,7 @@ type Container struct {
Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
// Compute Resources required by this container.
// Cannot be updated.
- // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
// +optional
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
// Pod volumes to mount into the container's filesystem.
@@ -2204,7 +2179,7 @@ type Container struct {
// +patchStrategy=merge
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
// volumeDevices is the list of block devices to be used by the container.
- // This is an alpha feature and may change in the future.
+ // This is a beta feature.
// +patchMergeKey=devicePath
// +patchStrategy=merge
// +optional
@@ -2306,11 +2281,15 @@ type Lifecycle struct {
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"`
- // PreStop is called immediately before a container is terminated.
- // The container is terminated after the handler completes.
- // The reason for termination is passed to the handler.
- // Regardless of the outcome of the handler, the container is eventually terminated.
- // Other management of the container blocks until the hook completes.
+ // PreStop is called immediately before a container is terminated due to an
+ // API request or management event such as liveness probe failure,
+ // preemption, resource contention, etc. The handler is not called if the
+ // container crashes or exits. The reason for termination is passed to the
+ // handler. The Pod's termination grace period countdown begins before the
+ // PreStop hooked is executed. Regardless of the outcome of the handler, the
+ // container will eventually terminate within the Pod's termination grace
+ // period. Other management of the container blocks until the hook completes
+ // or until the termination grace period is reached.
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"`
@@ -2441,13 +2420,19 @@ type PodConditionType string
// These are valid conditions of pod.
const (
- // PodScheduled represents status of the scheduling process for this pod.
- PodScheduled PodConditionType = "PodScheduled"
+ // ContainersReady indicates whether all containers in the pod are ready.
+ ContainersReady PodConditionType = "ContainersReady"
+ // PodInitialized means that all init containers in the pod have started successfully.
+ PodInitialized PodConditionType = "Initialized"
// PodReady means the pod is able to service requests and should be added to the
// load balancing pools of all matching services.
PodReady PodConditionType = "Ready"
- // PodInitialized means that all init containers in the pod have started successfully.
- PodInitialized PodConditionType = "Initialized"
+ // PodScheduled represents status of the scheduling process for this pod.
+ PodScheduled PodConditionType = "PodScheduled"
+)
+
+// These are reasons for a pod's transition to a condition.
+const (
// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
// can't schedule the pod right now, for example due to insufficient resources in the cluster.
PodReasonUnschedulable = "Unschedulable"
@@ -2456,7 +2441,6 @@ const (
// PodCondition contains details for the current condition of this pod.
type PodCondition struct {
// Type is the type of the condition.
- // Currently only Ready.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"`
// Status is the status of the condition.
@@ -2527,10 +2511,16 @@ type NodeSelector struct {
NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"`
}
-// A null or empty node selector term matches no objects.
+// A null or empty node selector term matches no objects. The requirements of
+// them are ANDed.
+// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
type NodeSelectorTerm struct {
- //Required. A list of node selector requirements. The requirements are ANDed.
- MatchExpressions []NodeSelectorRequirement `json:"matchExpressions" protobuf:"bytes,1,rep,name=matchExpressions"`
+ // A list of node selector requirements by node's labels.
+ // +optional
+ MatchExpressions []NodeSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"`
+ // A list of node selector requirements by node's fields.
+ // +optional
+ MatchFields []NodeSelectorRequirement `json:"matchFields,omitempty" protobuf:"bytes,2,rep,name=matchFields"`
}
// A node selector requirement is a selector that contains values, a key, and an operator
@@ -2563,6 +2553,27 @@ const (
NodeSelectorOpLt NodeSelectorOperator = "Lt"
)
+// A topology selector term represents the result of label queries.
+// A null or empty topology selector term matches no objects.
+// The requirements of them are ANDed.
+// It provides a subset of functionality as NodeSelectorTerm.
+// This is an alpha feature and may change in the future.
+type TopologySelectorTerm struct {
+ // A list of topology selector requirements by labels.
+ // +optional
+ MatchLabelExpressions []TopologySelectorLabelRequirement `json:"matchLabelExpressions,omitempty" protobuf:"bytes,1,rep,name=matchLabelExpressions"`
+}
+
+// A topology selector requirement is a selector that matches given label.
+// This is an alpha feature and may change in the future.
+type TopologySelectorLabelRequirement struct {
+ // The label key that the selector applies to.
+ Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
+ // An array of string values. One value must match the label to be selected.
+ // Each entry in Values is ORed.
+ Values []string `json:"values" protobuf:"bytes,2,rep,name=values"`
+}
+
// Affinity is a group of affinity scheduling rules.
type Affinity struct {
// Describes node affinity scheduling rules for the pod.
@@ -2795,6 +2806,12 @@ const (
TolerationOpEqual TolerationOperator = "Equal"
)
+// PodReadinessGate contains the reference to a pod condition
+type PodReadinessGate struct {
+ // ConditionType refers to a condition in the pod's condition list with matching type.
+ ConditionType PodConditionType `json:"conditionType" protobuf:"bytes,1,opt,name=conditionType,casttype=PodConditionType"`
+}
+
// PodSpec is a description of a pod.
type PodSpec struct {
// List of volumes that can be mounted by containers belonging to the pod.
@@ -2899,7 +2916,7 @@ type PodSpec struct {
// in the same pod, and the first process in each container will not be assigned PID 1.
// HostPID and ShareProcessNamespace cannot both be set.
// Optional: Default to false.
- // This field is alpha-level and is honored only by servers that enable the PodShareProcessNamespace feature.
+ // This field is beta-level and may be disabled with the PodShareProcessNamespace feature.
// +k8s:conversion-gen=false
// +optional
ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"`
@@ -2959,8 +2976,48 @@ type PodSpec struct {
// configuration based on DNSPolicy.
// +optional
DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"`
+ // If specified, all readiness gates will be evaluated for pod readiness.
+ // A pod is ready when all its containers are ready AND
+ // all conditions specified in the readiness gates have status equal to "True"
+ // More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md
+ // +optional
+ ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" protobuf:"bytes,28,opt,name=readinessGates"`
+ // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used
+ // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.
+ // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an
+ // empty definition that uses the default runtime handler.
+ // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md
+ // This is a beta feature as of Kubernetes v1.14.
+ // +optional
+ RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"`
+ // EnableServiceLinks indicates whether information about services should be injected into pod's
+ // environment variables, matching the syntax of Docker links.
+ // Optional: Defaults to true.
+ // +optional
+ EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,30,opt,name=enableServiceLinks"`
+ // PreemptionPolicy is the Policy for preempting pods with lower priority.
+ // One of Never, PreemptLowerPriority.
+ // Defaults to PreemptLowerPriority if unset.
+ // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.
+ // +optional
+ PreemptionPolicy *PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,31,opt,name=preemptionPolicy"`
+ // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.
+ // This field will be autopopulated at admission time by the RuntimeClass admission controller. If
+ // the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.
+ // The RuntimeClass admission controller will reject Pod create requests which have the overhead already
+ // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value
+ // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.
+ // More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md
+ // This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.
+ // +optional
+ Overhead ResourceList `json:"overhead,omitempty" protobuf:"bytes,32,opt,name=overhead"`
}
+const (
+ // The default value for enableServiceLinks attribute.
+ DefaultEnableServiceLinks = true
+)
+
// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
// pod's hosts file.
type HostAlias struct {
@@ -2981,6 +3038,9 @@ type PodSecurityContext struct {
// takes precedence for that container.
// +optional
SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"`
+ // Windows security options.
+ // +optional
+ WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,8,opt,name=windowsOptions"`
// The UID to run the entrypoint of the container process.
// Defaults to user specified in image metadata if unspecified.
// May also be set in SecurityContext. If set in both SecurityContext and
@@ -3019,6 +3079,10 @@ type PodSecurityContext struct {
// If unset, the Kubelet will not modify the ownership and permissions of any volume.
// +optional
FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"`
+ // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
+ // sysctls (by the container runtime) might fail to launch.
+ // +optional
+ Sysctls []Sysctl `json:"sysctls,omitempty" protobuf:"bytes,7,rep,name=sysctls"`
}
// PodQOSClass defines the supported qos classes of Pods.
@@ -3063,9 +3127,26 @@ type PodDNSConfigOption struct {
}
// PodStatus represents information about the status of a pod. Status may trail the actual
-// state of a system.
+// state of a system, especially if the node that hosts the pod cannot contact the control
+// plane.
type PodStatus struct {
- // Current condition of the pod.
+ // The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle.
+ // The conditions array, the reason and message fields, and the individual container status
+ // arrays contain more detail about the pod's status.
+ // There are five possible phase values:
+ //
+ // Pending: The pod has been accepted by the Kubernetes system, but one or more of the
+ // container images has not been created. This includes time before being scheduled as
+ // well as time spent downloading images over the network, which could take a while.
+ // Running: The pod has been bound to a node, and all of the containers have been created.
+ // At least one container is still running, or is in the process of starting or restarting.
+ // Succeeded: All containers in the pod have terminated in success, and will not be restarted.
+ // Failed: All containers in the pod have terminated, and at least one container has
+ // terminated in failure. The container either exited with non-zero status or was terminated
+ // by the system.
+ // Unknown: For some reason the state of the pod could not be obtained, typically due to an
+ // error in communicating with the host of the pod.
+ //
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase
// +optional
Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"`
@@ -3320,8 +3401,8 @@ type ReplicationControllerCondition struct {
}
// +genclient
-// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale
-// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale
+// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
+// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicationController represents the configuration of a replication controller.
@@ -3461,6 +3542,9 @@ type ServiceSpec struct {
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +patchMergeKey=port
// +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=port
+ // +listMapKey=protocol
Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"`
// Route service traffic to pods with label keys and values matching this
@@ -3497,7 +3581,7 @@ type ServiceSpec struct {
// "LoadBalancer" builds on NodePort and creates an
// external load-balancer (if supported in the current cloud) which routes
// to the clusterIP.
- // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types
+ // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
// +optional
Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"`
@@ -3562,9 +3646,6 @@ type ServiceSpec struct {
// The primary use case for setting this field is to use a StatefulSet's Headless Service
// to propagate SRV records for its Pods without respect to their readiness for purpose
// of peer discovery.
- // This field will replace the service.alpha.kubernetes.io/tolerate-unready-endpoints
- // when that annotation is deprecated and all clients have been converted to use this
- // field.
// +optional
PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty" protobuf:"varint,13,opt,name=publishNotReadyAddresses"`
// sessionAffinityConfig contains the configurations of session affinity.
@@ -3581,7 +3662,7 @@ type ServicePort struct {
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
- // The IP protocol for this port. Supports "TCP" and "UDP".
+ // The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
// Default is TCP.
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"`
@@ -3610,6 +3691,7 @@ type ServicePort struct {
}
// +genclient
+// +genclient:skipVerbs=deleteCollection
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Service is a named abstraction of software service (for example, mysql) consisting of local port
@@ -3794,7 +3876,7 @@ type EndpointPort struct {
Port int32 `json:"port" protobuf:"varint,2,opt,name=port"`
// The IP protocol for this port.
- // Must be UDP or TCP.
+ // Must be UDP, TCP, or SCTP.
// Default is TCP.
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"`
@@ -3819,10 +3901,6 @@ type NodeSpec struct {
// PodCIDR represents the pod IP range assigned to the node.
// +optional
PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"`
- // External ID of the node assigned by some machine database (e.g. a cloud provider).
- // Deprecated.
- // +optional
- ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"`
// ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>
// +optional
ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"`
@@ -3837,14 +3915,53 @@ type NodeSpec struct {
// The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field
// +optional
ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"`
-}
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+ // Deprecated. Not all kubelets will set this field. Remove field after 1.13.
+ // see: https://issues.k8s.io/61966
+ // +optional
+ DoNotUse_ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"`
+}
// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.
type NodeConfigSource struct {
- metav1.TypeMeta `json:",inline"`
- ConfigMapRef *ObjectReference `json:"configMapRef,omitempty" protobuf:"bytes,1,opt,name=configMapRef"`
+ // For historical context, regarding the below kind, apiVersion, and configMapRef deprecation tags:
+ // 1. kind/apiVersion were used by the kubelet to persist this struct to disk (they had no protobuf tags)
+ // 2. configMapRef and proto tag 1 were used by the API to refer to a configmap,
+ // but used a generic ObjectReference type that didn't really have the fields we needed
+ // All uses/persistence of the NodeConfigSource struct prior to 1.11 were gated by alpha feature flags,
+ // so there was no persisted data for these fields that needed to be migrated/handled.
+
+ // +k8s:deprecated=kind
+ // +k8s:deprecated=apiVersion
+ // +k8s:deprecated=configMapRef,protobuf=1
+
+ // ConfigMap is a reference to a Node's ConfigMap
+ ConfigMap *ConfigMapNodeConfigSource `json:"configMap,omitempty" protobuf:"bytes,2,opt,name=configMap"`
+}
+
+// ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.
+type ConfigMapNodeConfigSource struct {
+ // Namespace is the metadata.namespace of the referenced ConfigMap.
+ // This field is required in all cases.
+ Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
+
+ // Name is the metadata.name of the referenced ConfigMap.
+ // This field is required in all cases.
+ Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
+
+ // UID is the metadata.UID of the referenced ConfigMap.
+ // This field is forbidden in Node.Spec, and required in Node.Status.
+ // +optional
+ UID types.UID `json:"uid,omitempty" protobuf:"bytes,3,opt,name=uid"`
+
+ // ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap.
+ // This field is forbidden in Node.Spec, and required in Node.Status.
+ // +optional
+ ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"`
+
+ // KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure
+ // This field is required in all cases.
+ KubeletConfigKey string `json:"kubeletConfigKey" protobuf:"bytes,5,opt,name=kubeletConfigKey"`
}
// DaemonEndpoint contains information about a single Daemon endpoint.
@@ -3894,6 +4011,53 @@ type NodeSystemInfo struct {
Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"`
}
+// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
+type NodeConfigStatus struct {
+ // Assigned reports the checkpointed config the node will try to use.
+ // When Node.Spec.ConfigSource is updated, the node checkpoints the associated
+ // config payload to local disk, along with a record indicating intended
+ // config. The node refers to this record to choose its config checkpoint, and
+ // reports this record in Assigned. Assigned only updates in the status after
+ // the record has been checkpointed to disk. When the Kubelet is restarted,
+ // it tries to make the Assigned config the Active config by loading and
+ // validating the checkpointed payload identified by Assigned.
+ // +optional
+ Assigned *NodeConfigSource `json:"assigned,omitempty" protobuf:"bytes,1,opt,name=assigned"`
+ // Active reports the checkpointed config the node is actively using.
+ // Active will represent either the current version of the Assigned config,
+ // or the current LastKnownGood config, depending on whether attempting to use the
+ // Assigned config results in an error.
+ // +optional
+ Active *NodeConfigSource `json:"active,omitempty" protobuf:"bytes,2,opt,name=active"`
+ // LastKnownGood reports the checkpointed config the node will fall back to
+ // when it encounters an error attempting to use the Assigned config.
+ // The Assigned config becomes the LastKnownGood config when the node determines
+ // that the Assigned config is stable and correct.
+ // This is currently implemented as a 10-minute soak period starting when the local
+ // record of Assigned config is updated. If the Assigned config is Active at the end
+ // of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is
+ // reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil,
+ // because the local default config is always assumed good.
+ // You should not make assumptions about the node's method of determining config stability
+ // and correctness, as this may change or become configurable in the future.
+ // +optional
+ LastKnownGood *NodeConfigSource `json:"lastKnownGood,omitempty" protobuf:"bytes,3,opt,name=lastKnownGood"`
+ // Error describes any problems reconciling the Spec.ConfigSource to the Active config.
+ // Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned
+ // record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting
+ // to load or validate the Assigned config, etc.
+ // Errors may occur at different points while syncing config. Earlier errors (e.g. download or
+ // checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across
+ // Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in
+ // a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error
+ // by fixing the config assigned in Spec.ConfigSource.
+ // You can find additional information for debugging by searching the error message in the Kubelet log.
+ // Error is a human-readable description of the error state; machines can check whether or not Error
+ // is empty, but should not rely on the stability of the Error text across Kubelet versions.
+ // +optional
+ Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"`
+}
+
// NodeStatus is information about the current status of a node.
type NodeStatus struct {
// Capacity represents the total resources of a node.
@@ -3938,6 +4102,9 @@ type NodeStatus struct {
// List of volumes that are attached to the node.
// +optional
VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"`
+ // Status of the config assigned to the node via the dynamic Kubelet config feature.
+ // +optional
+ Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"`
}
type UniqueVolumeName string
@@ -4025,8 +4192,6 @@ const (
NodePIDPressure NodeConditionType = "PIDPressure"
// NodeNetworkUnavailable means that network for the node is not correctly configured.
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
- // NodeKubeletConfigOk indicates whether the kubelet is correctly configured
- NodeKubeletConfigOk NodeConditionType = "KubeletConfigOk"
)
// NodeCondition contains condition information for a node.
@@ -4086,8 +4251,6 @@ const (
// Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// The resource name for ResourceEphemeralStorage is alpha and it can change across releases.
ResourceEphemeralStorage ResourceName = "ephemeral-storage"
- // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned.
- ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu"
)
const (
@@ -4095,6 +4258,8 @@ const (
ResourceDefaultNamespacePrefix = "kubernetes.io/"
// Name prefix for huge page resources (alpha).
ResourceHugePagesPrefix = "hugepages-"
+ // Name prefix for storage resource limits
+ ResourceAttachableVolumesPrefix = "attachable-volumes-"
)
// ResourceList is a set of (resource name, quantity) pairs.
@@ -4177,6 +4342,7 @@ const (
// +genclient
// +genclient:nonNamespaced
+// +genclient:skipVerbs=deleteCollection
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Namespace provides a scope for Names.
@@ -4237,95 +4403,6 @@ type Preconditions struct {
UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
}
-// DeletionPropagation decides if a deletion will propagate to the dependents of the object, and how the garbage collector will handle the propagation.
-type DeletionPropagation string
-
-const (
- // Orphans the dependents.
- DeletePropagationOrphan DeletionPropagation = "Orphan"
- // Deletes the object from the key-value store, the garbage collector will delete the dependents in the background.
- DeletePropagationBackground DeletionPropagation = "Background"
- // The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store.
- // API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp.
- // This policy is cascading, i.e., the dependents will be deleted with Foreground.
- DeletePropagationForeground DeletionPropagation = "Foreground"
-)
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// DeleteOptions may be provided when deleting an API object
-// DEPRECATED: This type has been moved to meta/v1 and will be removed soon.
-// +k8s:openapi-gen=false
-type DeleteOptions struct {
- metav1.TypeMeta `json:",inline"`
-
- // The duration in seconds before the object should be deleted. Value must be non-negative integer.
- // The value zero indicates delete immediately. If this value is nil, the default grace period for the
- // specified type will be used.
- // Defaults to a per object value if not specified. zero means delete immediately.
- // +optional
- GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=gracePeriodSeconds"`
-
- // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be
- // returned.
- // +optional
- Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"`
-
- // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.
- // Should the dependent objects be orphaned. If true/false, the "orphan"
- // finalizer will be added to/removed from the object's finalizers list.
- // Either this field or PropagationPolicy may be set, but not both.
- // +optional
- OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"`
-
- // Whether and how garbage collection will be performed.
- // Either this field or OrphanDependents may be set, but not both.
- // The default policy is decided by the existing finalizer set in the
- // metadata.finalizers and the resource-specific default policy.
- // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' -
- // allow the garbage collector to delete the dependents in the background;
- // 'Foreground' - a cascading policy that deletes all dependents in the
- // foreground.
- // +optional
- PropagationPolicy *DeletionPropagation `protobuf:"bytes,4,opt,name=propagationPolicy,casttype=DeletionPropagation"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// ListOptions is the query options to a standard REST list call.
-// DEPRECATED: This type has been moved to meta/v1 and will be removed soon.
-// +k8s:openapi-gen=false
-type ListOptions struct {
- metav1.TypeMeta `json:",inline"`
-
- // A selector to restrict the list of returned objects by their labels.
- // Defaults to everything.
- // +optional
- LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
- // A selector to restrict the list of returned objects by their fields.
- // Defaults to everything.
- // +optional
- FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"`
- // If true, partially initialized resources are included in the response.
- // +optional
- IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,6,opt,name=includeUninitialized"`
- // Watch for changes to the described resources and return them as a stream of
- // add, update, and remove notifications. Specify resourceVersion.
- // +optional
- Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"`
- // When specified with a watch call, shows changes that occur after that particular version of a resource.
- // Defaults to changes from the beginning of history.
- // When specified for list:
- // - if unset, then the result is returned from remote storage based on quorum-read flag;
- // - if it's 0, then we simply return what we currently have in cache, no guarantee;
- // - if set to non zero, then the result is at least as fresh as given rv.
- // +optional
- ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"`
- // Timeout for the list/watch call.
- // +optional
- TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"`
-}
-
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodLogOptions is the query options for a Pod's logs REST call.
@@ -4546,6 +4623,20 @@ type LocalObjectReference struct {
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
}
+// TypedLocalObjectReference contains enough information to let you locate the
+// typed referenced object inside the same namespace.
+type TypedLocalObjectReference struct {
+ // APIGroup is the group for the resource being referenced.
+ // If APIGroup is not specified, the specified Kind must be in the core API group.
+ // For any other third-party types, APIGroup is required.
+ // +optional
+ APIGroup *string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"`
+ // Kind is the type of resource being referenced
+ Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
+ // Name is the name of resource being referenced
+ Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
+}
+
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SerializedReference is a reference to serialized object.
@@ -4651,6 +4742,7 @@ type EventSeries struct {
// Time of the last occurrence observed
LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"`
// State of this Series: Ongoing or Finished
+ // Deprecated. Planned removal for 1.18
State EventSeriesState `json:"state,omitempty" protobuf:"bytes,3,name=state"`
}
@@ -4810,11 +4902,13 @@ const (
ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort"
// Match all pod objects that do not have best effort quality of service
ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort"
+ // Match all pod objects that have priority class mentioned
+ ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass"
)
// ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
type ResourceQuotaSpec struct {
- // Hard is the set of desired hard limits for each named resource.
+ // hard is the set of desired hard limits for each named resource.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
// +optional
Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
@@ -4822,8 +4916,48 @@ type ResourceQuotaSpec struct {
// If not specified, the quota matches all objects.
// +optional
Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"`
+ // scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota
+ // but expressed using ScopeSelectorOperator in combination with possible values.
+ // For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.
+ // +optional
+ ScopeSelector *ScopeSelector `json:"scopeSelector,omitempty" protobuf:"bytes,3,opt,name=scopeSelector"`
+}
+
+// A scope selector represents the AND of the selectors represented
+// by the scoped-resource selector requirements.
+type ScopeSelector struct {
+ // A list of scope selector requirements by scope of the resources.
+ // +optional
+ MatchExpressions []ScopedResourceSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"`
+}
+
+// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator
+// that relates the scope name and values.
+type ScopedResourceSelectorRequirement struct {
+ // The name of the scope that the selector applies to.
+ ScopeName ResourceQuotaScope `json:"scopeName" protobuf:"bytes,1,opt,name=scopeName"`
+ // Represents a scope's relationship to a set of values.
+ // Valid operators are In, NotIn, Exists, DoesNotExist.
+ Operator ScopeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=ScopedResourceSelectorOperator"`
+ // An array of string values. If the operator is In or NotIn,
+ // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ // the values array must be empty.
+ // This array is replaced during a strategic merge patch.
+ // +optional
+ Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
}
+// A scope selector operator is the set of operators that can be used in
+// a scope selector requirement.
+type ScopeSelectorOperator string
+
+const (
+ ScopeSelectorOpIn ScopeSelectorOperator = "In"
+ ScopeSelectorOpNotIn ScopeSelectorOperator = "NotIn"
+ ScopeSelectorOpExists ScopeSelectorOperator = "Exists"
+ ScopeSelectorOpDoesNotExist ScopeSelectorOperator = "DoesNotExist"
+)
+
// ResourceQuotaStatus defines the enforced hard limits and observed use.
type ResourceQuotaStatus struct {
// Hard is the set of enforced hard limits for each named resource.
@@ -4985,6 +5119,10 @@ const (
TLSCertKey = "tls.crt"
// TLSPrivateKeyKey is the key for the private key field in a TLS secret.
TLSPrivateKeyKey = "tls.key"
+ // SecretTypeBootstrapToken is used during the automated bootstrap process (first
+ // implemented by kubeadm). It stores tokens that are used to sign well known
+ // ConfigMaps. They are used for authn.
+ SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -5171,6 +5309,9 @@ type SecurityContext struct {
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"`
+ // Windows security options.
+ // +optional
+ WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,10,opt,name=windowsOptions"`
// The UID to run the entrypoint of the container process.
// Defaults to user specified in image metadata if unspecified.
// May also be set in PodSecurityContext. If set in both SecurityContext and
@@ -5203,8 +5344,28 @@ type SecurityContext struct {
// 2) has CAP_SYS_ADMIN
// +optional
AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"`
+ // procMount denotes the type of proc mount to use for the containers.
+ // The default is DefaultProcMount which uses the container runtime defaults for
+ // readonly paths and masked paths.
+ // This requires the ProcMountType feature flag to be enabled.
+ // +optional
+ ProcMount *ProcMountType `json:"procMount,omitempty" protobuf:"bytes,9,opt,name=procMount"`
}
+type ProcMountType string
+
+const (
+ // DefaultProcMount uses the container runtime defaults for readonly and masked
+ // paths for /proc. Most container runtimes mask certain paths in /proc to avoid
+ // accidental security exposure of special devices or information.
+ DefaultProcMount ProcMountType = "Default"
+
+ // UnmaskedProcMount bypasses the default masking behavior of the container
+ // runtime and ensures the newly created /proc the container stays in tact with
+ // no modifications.
+ UnmaskedProcMount ProcMountType = "Unmasked"
+)
+
// SELinuxOptions are the labels to be applied to the container
type SELinuxOptions struct {
// User is a SELinux user label that applies to the container.
@@ -5221,6 +5382,21 @@ type SELinuxOptions struct {
Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"`
}
+// WindowsSecurityContextOptions contain Windows-specific options and credentials.
+type WindowsSecurityContextOptions struct {
+ // GMSACredentialSpecName is the name of the GMSA credential spec to use.
+ // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.
+ // +optional
+ GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty" protobuf:"bytes,1,opt,name=gmsaCredentialSpecName"`
+
+ // GMSACredentialSpec is where the GMSA admission webhook
+ // (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ // GMSA credential spec named by the GMSACredentialSpecName field.
+ // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.
+ // +optional
+ GMSACredentialSpec *string `json:"gmsaCredentialSpec,omitempty" protobuf:"bytes,2,opt,name=gmsaCredentialSpec"`
+}
+
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RangeAllocation is not a public type.
@@ -5251,9 +5427,9 @@ const (
// Sysctl defines a kernel parameter to be set
type Sysctl struct {
// Name of a property to set
- Name string `protobuf:"bytes,1,opt,name=name"`
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Value of a property to set
- Value string `protobuf:"bytes,2,opt,name=value"`
+ Value string `json:"value" protobuf:"bytes,2,opt,name=value"`
}
// NodeResources is an object for conveying resource information about a node.
diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
index 83433be48..89723b821 100644
--- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
@@ -1,5 +1,5 @@
/*
-Copyright 2016 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -26,7 +26,7 @@ package v1
//
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
-// AUTO-GENERATED FUNCTIONS START HERE
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_AWSElasticBlockStoreVolumeSource = map[string]string{
"": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.",
"volumeID": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
@@ -123,15 +123,29 @@ var map_CSIPersistentVolumeSource = map[string]string{
"readOnly": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).",
"fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\".",
"volumeAttributes": "Attributes of the volume to publish.",
- "controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
- "nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
- "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
+ "controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
+ "nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
+ "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
+ "controllerExpandSecretRef": "ControllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an alpha field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
}
func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string {
return map_CSIPersistentVolumeSource
}
+var map_CSIVolumeSource = map[string]string{
+ "": "Represents a source location of a volume to mount, managed by an external CSI driver",
+ "driver": "Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.",
+ "readOnly": "Specifies a read-only configuration for the volume. Defaults to false (read/write).",
+ "fsType": "Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.",
+ "volumeAttributes": "VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.",
+ "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.",
+}
+
+func (CSIVolumeSource) SwaggerDoc() map[string]string {
+ return map_CSIVolumeSource
+}
+
var map_Capabilities = map[string]string{
"": "Adds and removes POSIX capabilities from running containers.",
"add": "Added capabilities",
@@ -170,11 +184,24 @@ func (CephFSVolumeSource) SwaggerDoc() map[string]string {
return map_CephFSVolumeSource
}
+var map_CinderPersistentVolumeSource = map[string]string{
+ "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.",
+ "volumeID": "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+ "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+ "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+ "secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.",
+}
+
+func (CinderPersistentVolumeSource) SwaggerDoc() map[string]string {
+ return map_CinderPersistentVolumeSource
+}
+
var map_CinderVolumeSource = map[string]string{
- "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.",
- "volumeID": "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
- "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
- "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+ "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.",
+ "volumeID": "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+ "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+ "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+ "secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.",
}
func (CinderVolumeSource) SwaggerDoc() map[string]string {
@@ -245,7 +272,7 @@ func (ConfigMapEnvSource) SwaggerDoc() map[string]string {
var map_ConfigMapKeySelector = map[string]string{
"": "Selects a key from a ConfigMap.",
"key": "The key to select.",
- "optional": "Specify whether the ConfigMap or it's key must be defined",
+ "optional": "Specify whether the ConfigMap or its key must be defined",
}
func (ConfigMapKeySelector) SwaggerDoc() map[string]string {
@@ -262,10 +289,23 @@ func (ConfigMapList) SwaggerDoc() map[string]string {
return map_ConfigMapList
}
+var map_ConfigMapNodeConfigSource = map[string]string{
+ "": "ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.",
+ "namespace": "Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases.",
+ "name": "Name is the metadata.name of the referenced ConfigMap. This field is required in all cases.",
+ "uid": "UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.",
+ "resourceVersion": "ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.",
+ "kubeletConfigKey": "KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases.",
+}
+
+func (ConfigMapNodeConfigSource) SwaggerDoc() map[string]string {
+ return map_ConfigMapNodeConfigSource
+}
+
var map_ConfigMapProjection = map[string]string{
"": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.",
"items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
- "optional": "Specify whether the ConfigMap or it's keys must be defined",
+ "optional": "Specify whether the ConfigMap or its keys must be defined",
}
func (ConfigMapProjection) SwaggerDoc() map[string]string {
@@ -276,7 +316,7 @@ var map_ConfigMapVolumeSource = map[string]string{
"": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.",
"items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
"defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
- "optional": "Specify whether the ConfigMap or it's keys must be defined",
+ "optional": "Specify whether the ConfigMap or its keys must be defined",
}
func (ConfigMapVolumeSource) SwaggerDoc() map[string]string {
@@ -293,9 +333,9 @@ var map_Container = map[string]string{
"ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.",
"envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
"env": "List of environment variables to set in the container. Cannot be updated.",
- "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources",
+ "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
"volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.",
- "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future.",
+ "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is a beta feature.",
"livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
"readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
"lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.",
@@ -327,7 +367,7 @@ var map_ContainerPort = map[string]string{
"name": "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.",
"hostPort": "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.",
"containerPort": "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.",
- "protocol": "Protocol for port. Must be UDP or TCP. Defaults to \"TCP\".",
+ "protocol": "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".",
"hostIP": "What host IP to bind the external port to.",
}
@@ -405,18 +445,6 @@ func (DaemonEndpoint) SwaggerDoc() map[string]string {
return map_DaemonEndpoint
}
-var map_DeleteOptions = map[string]string{
- "": "DeleteOptions may be provided when deleting an API object DEPRECATED: This type has been moved to meta/v1 and will be removed soon.",
- "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
- "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.",
- "orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
- "PropagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
-}
-
-func (DeleteOptions) SwaggerDoc() map[string]string {
- return map_DeleteOptions
-}
-
var map_DownwardAPIProjection = map[string]string{
"": "Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.",
"items": "Items is a list of DownwardAPIVolume file",
@@ -474,7 +502,7 @@ var map_EndpointPort = map[string]string{
"": "EndpointPort is a tuple that describes a single port.",
"name": "The name of this port (corresponds to ServicePort.Name). Must be a DNS_LABEL. Optional only if one port is defined.",
"port": "The port number of the endpoint.",
- "protocol": "The IP protocol for this port. Must be UDP or TCP. Default is TCP.",
+ "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
}
func (EndpointPort) SwaggerDoc() map[string]string {
@@ -583,7 +611,7 @@ var map_EventSeries = map[string]string{
"": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.",
"count": "Number of occurrences in this series up to the last heartbeat time",
"lastObservedTime": "Time of the last occurrence observed",
- "state": "State of this Series: Ongoing or Finished",
+ "state": "State of this Series: Ongoing or Finished Deprecated. Planned removal for 1.18",
}
func (EventSeries) SwaggerDoc() map[string]string {
@@ -671,7 +699,7 @@ func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string {
}
var map_GitRepoVolumeSource = map[string]string{
- "": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.",
+ "": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.",
"repository": "Repository URL",
"revision": "Commit hash for the specified revision.",
"directory": "Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.",
@@ -681,6 +709,18 @@ func (GitRepoVolumeSource) SwaggerDoc() map[string]string {
return map_GitRepoVolumeSource
}
+var map_GlusterfsPersistentVolumeSource = map[string]string{
+ "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.",
+ "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod",
+ "path": "Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod",
+ "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod",
+ "endpointsNamespace": "EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod",
+}
+
+func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string {
+ return map_GlusterfsPersistentVolumeSource
+}
+
var map_GlusterfsVolumeSource = map[string]string{
"": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.",
"endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod",
@@ -798,7 +838,7 @@ func (KeyToPath) SwaggerDoc() map[string]string {
var map_Lifecycle = map[string]string{
"": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.",
"postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
- "preStop": "PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
+ "preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
}
func (Lifecycle) SwaggerDoc() map[string]string {
@@ -848,20 +888,6 @@ func (LimitRangeSpec) SwaggerDoc() map[string]string {
return map_LimitRangeSpec
}
-var map_ListOptions = map[string]string{
- "": "ListOptions is the query options to a standard REST list call. DEPRECATED: This type has been moved to meta/v1 and will be removed soon.",
- "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
- "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
- "includeUninitialized": "If true, partially initialized resources are included in the response.",
- "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
- "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
- "timeoutSeconds": "Timeout for the list/watch call.",
-}
-
-func (ListOptions) SwaggerDoc() map[string]string {
- return map_ListOptions
-}
-
var map_LoadBalancerIngress = map[string]string{
"": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.",
"ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)",
@@ -891,8 +917,9 @@ func (LocalObjectReference) SwaggerDoc() map[string]string {
}
var map_LocalVolumeSource = map[string]string{
- "": "Local represents directly-attached storage with node affinity",
- "path": "The full path to the volume on the node For alpha, this path must be a directory Once block as a source is supported, then this path can point to a block device",
+ "": "Local represents directly-attached storage with node affinity (Beta feature)",
+ "path": "The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...).",
+ "fsType": "Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a fileystem if unspecified.",
}
func (LocalVolumeSource) SwaggerDoc() map[string]string {
@@ -995,13 +1022,26 @@ func (NodeCondition) SwaggerDoc() map[string]string {
}
var map_NodeConfigSource = map[string]string{
- "": "NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.",
+ "": "NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.",
+ "configMap": "ConfigMap is a reference to a Node's ConfigMap",
}
func (NodeConfigSource) SwaggerDoc() map[string]string {
return map_NodeConfigSource
}
+var map_NodeConfigStatus = map[string]string{
+ "": "NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.",
+ "assigned": "Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned.",
+ "active": "Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error.",
+ "lastKnownGood": "LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future.",
+ "error": "Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions.",
+}
+
+func (NodeConfigStatus) SwaggerDoc() map[string]string {
+ return map_NodeConfigStatus
+}
+
var map_NodeDaemonEndpoints = map[string]string{
"": "NodeDaemonEndpoints lists ports opened by daemons running on the Node.",
"kubeletEndpoint": "Endpoint on which Kubelet is listening.",
@@ -1060,8 +1100,9 @@ func (NodeSelectorRequirement) SwaggerDoc() map[string]string {
}
var map_NodeSelectorTerm = map[string]string{
- "": "A null or empty node selector term matches no objects.",
- "matchExpressions": "Required. A list of node selector requirements. The requirements are ANDed.",
+ "": "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.",
+ "matchExpressions": "A list of node selector requirements by node's labels.",
+ "matchFields": "A list of node selector requirements by node's fields.",
}
func (NodeSelectorTerm) SwaggerDoc() map[string]string {
@@ -1071,11 +1112,11 @@ func (NodeSelectorTerm) SwaggerDoc() map[string]string {
var map_NodeSpec = map[string]string{
"": "NodeSpec describes the attributes that a node is created with.",
"podCIDR": "PodCIDR represents the pod IP range assigned to the node.",
- "externalID": "External ID of the node assigned by some machine database (e.g. a cloud provider). Deprecated.",
"providerID": "ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>",
"unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration",
"taints": "If specified, the node's taints.",
"configSource": "If specified, the source to get node configuration from The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field",
+ "externalID": "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966",
}
func (NodeSpec) SwaggerDoc() map[string]string {
@@ -1094,6 +1135,7 @@ var map_NodeStatus = map[string]string{
"images": "List of container images on this node",
"volumesInUse": "List of attachable volumes in use (mounted) by the node.",
"volumesAttached": "List of volumes that are attached to the node.",
+ "config": "Status of the config assigned to the node via the dynamic Kubelet config feature.",
}
func (NodeStatus) SwaggerDoc() map[string]string {
@@ -1128,30 +1170,6 @@ func (ObjectFieldSelector) SwaggerDoc() map[string]string {
return map_ObjectFieldSelector
}
-var map_ObjectMeta = map[string]string{
- "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon.",
- "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
- "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency",
- "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/",
- "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.",
- "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids",
- "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency",
- "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.",
- "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
- "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
- "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.",
- "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/",
- "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/",
- "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.",
- "initializers": "An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.",
- "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.",
- "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.",
-}
-
-func (ObjectMeta) SwaggerDoc() map[string]string {
- return map_ObjectMeta
-}
-
var map_ObjectReference = map[string]string{
"": "ObjectReference contains enough information to let you inspect or modify the referred object.",
"kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
@@ -1218,7 +1236,8 @@ var map_PersistentVolumeClaimSpec = map[string]string{
"resources": "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources",
"volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.",
"storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1",
- "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is an alpha feature and may change in the future.",
+ "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is a beta feature.",
+ "dataSource": "This field requires the VolumeSnapshotDataSource alpha feature gate to be enabled and currently VolumeSnapshot is the only supported data source. If the provisioner can support VolumeSnapshot data source, it will create a new volume and data will be restored to the volume at the same time. If the provisioner does not support VolumeSnapshot data source, volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.",
}
func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
@@ -1280,7 +1299,7 @@ var map_PersistentVolumeSource = map[string]string{
"scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
"local": "Local represents directly-attached storage with node affinity",
"storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md",
- "csi": "CSI represents storage that handled by an external CSI driver (Beta feature).",
+ "csi": "CSI represents storage that is handled by an external CSI driver (Beta feature).",
}
func (PersistentVolumeSource) SwaggerDoc() map[string]string {
@@ -1295,7 +1314,7 @@ var map_PersistentVolumeSpec = map[string]string{
"persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming",
"storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.",
"mountOptions": "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options",
- "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is an alpha feature and may change in the future.",
+ "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is a beta feature.",
"nodeAffinity": "NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.",
}
@@ -1381,7 +1400,7 @@ func (PodAttachOptions) SwaggerDoc() map[string]string {
var map_PodCondition = map[string]string{
"": "PodCondition contains details for the current condition of this pod.",
- "type": "Type is the type of the condition. Currently only Ready. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
+ "type": "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
"status": "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
"lastProbeTime": "Last time we probed the condition.",
"lastTransitionTime": "Last time the condition transitioned from one status to another.",
@@ -1471,14 +1490,25 @@ func (PodProxyOptions) SwaggerDoc() map[string]string {
return map_PodProxyOptions
}
+var map_PodReadinessGate = map[string]string{
+ "": "PodReadinessGate contains the reference to a pod condition",
+ "conditionType": "ConditionType refers to a condition in the pod's condition list with matching type.",
+}
+
+func (PodReadinessGate) SwaggerDoc() map[string]string {
+ return map_PodReadinessGate
+}
+
var map_PodSecurityContext = map[string]string{
"": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.",
"seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
+ "windowsOptions": "Windows security options.",
"runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
"runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
"runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
"supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.",
"fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ",
+ "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.",
}
func (PodSecurityContext) SwaggerDoc() map[string]string {
@@ -1511,7 +1541,7 @@ var map_PodSpec = map[string]string{
"hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
"hostPID": "Use the host's pid namespace. Optional: Default to false.",
"hostIPC": "Use the host's ipc namespace. Optional: Default to false.",
- "shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. This field is alpha-level and is honored only by servers that enable the PodShareProcessNamespace feature.",
+ "shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. This field is beta-level and may be disabled with the PodShareProcessNamespace feature.",
"securityContext": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.",
"imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod",
"hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.",
@@ -1523,6 +1553,11 @@ var map_PodSpec = map[string]string{
"priorityClassName": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
"priority": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.",
"dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.",
+ "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md",
+ "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.",
+ "enableServiceLinks": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.",
+ "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.",
+ "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.",
}
func (PodSpec) SwaggerDoc() map[string]string {
@@ -1530,8 +1565,8 @@ func (PodSpec) SwaggerDoc() map[string]string {
}
var map_PodStatus = map[string]string{
- "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system.",
- "phase": "Current condition of the pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase",
+ "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.",
+ "phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase",
"conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
"message": "A human readable message indicating details about why the pod is in this condition.",
"reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'",
@@ -1631,7 +1666,7 @@ func (PreferredSchedulingTerm) SwaggerDoc() map[string]string {
}
var map_Probe = map[string]string{
- "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.",
+ "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.",
"initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
"timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
"periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.",
@@ -1660,6 +1695,7 @@ var map_QuobyteVolumeSource = map[string]string{
"readOnly": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.",
"user": "User to map volume access to Defaults to serivceaccount user",
"group": "Group to map volume access to Default is no group",
+ "tenant": "Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin",
}
func (QuobyteVolumeSource) SwaggerDoc() map[string]string {
@@ -1802,9 +1838,10 @@ func (ResourceQuotaList) SwaggerDoc() map[string]string {
}
var map_ResourceQuotaSpec = map[string]string{
- "": "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.",
- "hard": "Hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/",
- "scopes": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.",
+ "": "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.",
+ "hard": "hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/",
+ "scopes": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.",
+ "scopeSelector": "scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota but expressed using ScopeSelectorOperator in combination with possible values. For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.",
}
func (ResourceQuotaSpec) SwaggerDoc() map[string]string {
@@ -1851,9 +1888,9 @@ var map_ScaleIOPersistentVolumeSource = map[string]string{
"sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false",
"protectionDomain": "The name of the ScaleIO Protection Domain for the configured storage.",
"storagePool": "The ScaleIO Storage Pool associated with the protection domain.",
- "storageMode": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.",
+ "storageMode": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.",
"volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.",
- "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+ "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\"",
"readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
}
@@ -1869,9 +1906,9 @@ var map_ScaleIOVolumeSource = map[string]string{
"sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false",
"protectionDomain": "The name of the ScaleIO Protection Domain for the configured storage.",
"storagePool": "The ScaleIO Storage Pool associated with the protection domain.",
- "storageMode": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.",
+ "storageMode": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.",
"volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.",
- "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+ "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".",
"readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
}
@@ -1879,6 +1916,26 @@ func (ScaleIOVolumeSource) SwaggerDoc() map[string]string {
return map_ScaleIOVolumeSource
}
+var map_ScopeSelector = map[string]string{
+ "": "A scope selector represents the AND of the selectors represented by the scoped-resource selector requirements.",
+ "matchExpressions": "A list of scope selector requirements by scope of the resources.",
+}
+
+func (ScopeSelector) SwaggerDoc() map[string]string {
+ return map_ScopeSelector
+}
+
+var map_ScopedResourceSelectorRequirement = map[string]string{
+ "": "A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator that relates the scope name and values.",
+ "scopeName": "The name of the scope that the selector applies to.",
+ "operator": "Represents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist.",
+ "values": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.",
+}
+
+func (ScopedResourceSelectorRequirement) SwaggerDoc() map[string]string {
+ return map_ScopedResourceSelectorRequirement
+}
+
var map_Secret = map[string]string{
"": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.",
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
@@ -1903,7 +1960,7 @@ func (SecretEnvSource) SwaggerDoc() map[string]string {
var map_SecretKeySelector = map[string]string{
"": "SecretKeySelector selects a key of a Secret.",
"key": "The key of the secret to select from. Must be a valid secret key.",
- "optional": "Specify whether the Secret or it's key must be defined",
+ "optional": "Specify whether the Secret or its key must be defined",
}
func (SecretKeySelector) SwaggerDoc() map[string]string {
@@ -1945,7 +2002,7 @@ var map_SecretVolumeSource = map[string]string{
"secretName": "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
"items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
"defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
- "optional": "Specify whether the Secret or it's keys must be defined",
+ "optional": "Specify whether the Secret or its keys must be defined",
}
func (SecretVolumeSource) SwaggerDoc() map[string]string {
@@ -1957,11 +2014,13 @@ var map_SecurityContext = map[string]string{
"capabilities": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.",
"privileged": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.",
"seLinuxOptions": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+ "windowsOptions": "Windows security options.",
"runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
"runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
"runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
"readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false.",
"allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN",
+ "procMount": "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.",
}
func (SecurityContext) SwaggerDoc() map[string]string {
@@ -2010,6 +2069,17 @@ func (ServiceAccountList) SwaggerDoc() map[string]string {
return map_ServiceAccountList
}
+var map_ServiceAccountTokenProjection = map[string]string{
+ "": "ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).",
+ "audience": "Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.",
+ "expirationSeconds": "ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.",
+ "path": "Path is the path relative to the mount point of the file to project the token into.",
+}
+
+func (ServiceAccountTokenProjection) SwaggerDoc() map[string]string {
+ return map_ServiceAccountTokenProjection
+}
+
var map_ServiceList = map[string]string{
"": "ServiceList holds a list of services.",
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
@@ -2023,7 +2093,7 @@ func (ServiceList) SwaggerDoc() map[string]string {
var map_ServicePort = map[string]string{
"": "ServicePort contains information on service's port.",
"name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.",
- "protocol": "The IP protocol for this port. Supports \"TCP\" and \"UDP\". Default is TCP.",
+ "protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.",
"port": "The port that will be exposed by this service.",
"targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service",
"nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport",
@@ -2047,7 +2117,7 @@ var map_ServiceSpec = map[string]string{
"ports": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
"selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/",
"clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
- "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services ",
+ "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types",
"externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.",
"sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
"loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.",
@@ -2055,7 +2125,7 @@ var map_ServiceSpec = map[string]string{
"externalName": "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be ExternalName.",
"externalTrafficPolicy": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.",
"healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.",
- "publishNotReadyAddresses": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery. This field will replace the service.alpha.kubernetes.io/tolerate-unready-endpoints when that annotation is deprecated and all clients have been converted to use this field.",
+ "publishNotReadyAddresses": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.",
"sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.",
}
@@ -2109,8 +2179,8 @@ func (StorageOSVolumeSource) SwaggerDoc() map[string]string {
var map_Sysctl = map[string]string{
"": "Sysctl defines a kernel parameter to be set",
- "Name": "Name of a property to set",
- "Value": "Value of a property to set",
+ "name": "Name of a property to set",
+ "value": "Value of a property to set",
}
func (Sysctl) SwaggerDoc() map[string]string {
@@ -2152,6 +2222,36 @@ func (Toleration) SwaggerDoc() map[string]string {
return map_Toleration
}
+var map_TopologySelectorLabelRequirement = map[string]string{
+ "": "A topology selector requirement is a selector that matches given label. This is an alpha feature and may change in the future.",
+ "key": "The label key that the selector applies to.",
+ "values": "An array of string values. One value must match the label to be selected. Each entry in Values is ORed.",
+}
+
+func (TopologySelectorLabelRequirement) SwaggerDoc() map[string]string {
+ return map_TopologySelectorLabelRequirement
+}
+
+var map_TopologySelectorTerm = map[string]string{
+ "": "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.",
+ "matchLabelExpressions": "A list of topology selector requirements by labels.",
+}
+
+func (TopologySelectorTerm) SwaggerDoc() map[string]string {
+ return map_TopologySelectorTerm
+}
+
+var map_TypedLocalObjectReference = map[string]string{
+ "": "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.",
+ "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.",
+ "kind": "Kind is the type of resource being referenced",
+ "name": "Name is the name of resource being referenced",
+}
+
+func (TypedLocalObjectReference) SwaggerDoc() map[string]string {
+ return map_TypedLocalObjectReference
+}
+
var map_Volume = map[string]string{
"": "Volume represents a named volume in a pod that may be accessed by any container in the pod.",
"name": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
@@ -2178,6 +2278,7 @@ var map_VolumeMount = map[string]string{
"mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.",
"subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).",
"mountPropagation": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.",
+ "subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.",
}
func (VolumeMount) SwaggerDoc() map[string]string {
@@ -2194,10 +2295,11 @@ func (VolumeNodeAffinity) SwaggerDoc() map[string]string {
}
var map_VolumeProjection = map[string]string{
- "": "Projection that may be projected along with other supported volume types",
- "secret": "information about the secret data to project",
- "downwardAPI": "information about the downwardAPI data to project",
- "configMap": "information about the configMap data to project",
+ "": "Projection that may be projected along with other supported volume types",
+ "secret": "information about the secret data to project",
+ "downwardAPI": "information about the downwardAPI data to project",
+ "configMap": "information about the configMap data to project",
+ "serviceAccountToken": "information about the serviceAccountToken data to project",
}
func (VolumeProjection) SwaggerDoc() map[string]string {
@@ -2210,29 +2312,30 @@ var map_VolumeSource = map[string]string{
"emptyDir": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir",
"gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
"awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
- "gitRepo": "GitRepo represents a git repository at a particular revision.",
+ "gitRepo": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.",
"secret": "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
"nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
"iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md",
"glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md",
"persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
- "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md",
- "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
- "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
- "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
- "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
- "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume",
- "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
- "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
- "configMap": "ConfigMap represents a configMap that should populate this volume",
- "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
- "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
- "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
- "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
- "projected": "Items for all in one resources secrets, configmaps, and downward API",
- "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine",
- "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
- "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
+ "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md",
+ "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
+ "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+ "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
+ "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
+ "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume",
+ "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
+ "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
+ "configMap": "ConfigMap represents a configMap that should populate this volume",
+ "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
+ "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
+ "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
+ "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
+ "projected": "Items for all in one resources secrets, configmaps, and downward API",
+ "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine",
+ "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
+ "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
+ "csi": "CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).",
}
func (VolumeSource) SwaggerDoc() map[string]string {
@@ -2261,4 +2364,14 @@ func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string {
return map_WeightedPodAffinityTerm
}
+var map_WindowsSecurityContextOptions = map[string]string{
+ "": "WindowsSecurityContextOptions contain Windows-specific options and credentials.",
+ "gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.",
+ "gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.",
+}
+
+func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string {
+ return map_WindowsSecurityContextOptions
+}
+
// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/core/v1/well_known_labels.go b/vendor/k8s.io/api/core/v1/well_known_labels.go
new file mode 100644
index 000000000..4497760d3
--- /dev/null
+++ b/vendor/k8s.io/api/core/v1/well_known_labels.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+const (
+ LabelHostname = "kubernetes.io/hostname"
+ LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone"
+ LabelZoneRegion = "failure-domain.beta.kubernetes.io/region"
+
+ LabelInstanceType = "beta.kubernetes.io/instance-type"
+
+ LabelOSStable = "kubernetes.io/os"
+ LabelArchStable = "kubernetes.io/arch"
+
+ // LabelNamespaceSuffixKubelet is an allowed label namespace suffix kubelets can self-set ([*.]kubelet.kubernetes.io/*)
+ LabelNamespaceSuffixKubelet = "kubelet.kubernetes.io"
+ // LabelNamespaceSuffixNode is an allowed label namespace suffix kubelets can self-set ([*.]node.kubernetes.io/*)
+ LabelNamespaceSuffixNode = "node.kubernetes.io"
+
+ // LabelNamespaceNodeRestriction is a forbidden label namespace that kubelets may not self-set when the NodeRestriction admission plugin is enabled
+ LabelNamespaceNodeRestriction = "node-restriction.kubernetes.io"
+)
diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
index ebdcb890d..3dc72a17e 100644
--- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ limitations under the License.
package v1
import (
- meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
)
@@ -47,30 +47,18 @@ func (in *Affinity) DeepCopyInto(out *Affinity) {
*out = *in
if in.NodeAffinity != nil {
in, out := &in.NodeAffinity, &out.NodeAffinity
- if *in == nil {
- *out = nil
- } else {
- *out = new(NodeAffinity)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(NodeAffinity)
+ (*in).DeepCopyInto(*out)
}
if in.PodAffinity != nil {
in, out := &in.PodAffinity, &out.PodAffinity
- if *in == nil {
- *out = nil
- } else {
- *out = new(PodAffinity)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(PodAffinity)
+ (*in).DeepCopyInto(*out)
}
if in.PodAntiAffinity != nil {
in, out := &in.PodAntiAffinity, &out.PodAntiAffinity
- if *in == nil {
- *out = nil
- } else {
- *out = new(PodAntiAffinity)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(PodAntiAffinity)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -129,39 +117,23 @@ func (in *AzureDiskVolumeSource) DeepCopyInto(out *AzureDiskVolumeSource) {
*out = *in
if in.CachingMode != nil {
in, out := &in.CachingMode, &out.CachingMode
- if *in == nil {
- *out = nil
- } else {
- *out = new(AzureDataDiskCachingMode)
- **out = **in
- }
+ *out = new(AzureDataDiskCachingMode)
+ **out = **in
}
if in.FSType != nil {
in, out := &in.FSType, &out.FSType
- if *in == nil {
- *out = nil
- } else {
- *out = new(string)
- **out = **in
- }
+ *out = new(string)
+ **out = **in
}
if in.ReadOnly != nil {
in, out := &in.ReadOnly, &out.ReadOnly
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
+ *out = new(bool)
+ **out = **in
}
if in.Kind != nil {
in, out := &in.Kind, &out.Kind
- if *in == nil {
- *out = nil
- } else {
- *out = new(AzureDataDiskKind)
- **out = **in
- }
+ *out = new(AzureDataDiskKind)
+ **out = **in
}
return
}
@@ -181,12 +153,8 @@ func (in *AzureFilePersistentVolumeSource) DeepCopyInto(out *AzureFilePersistent
*out = *in
if in.SecretNamespace != nil {
in, out := &in.SecretNamespace, &out.SecretNamespace
- if *in == nil {
- *out = nil
- } else {
- *out = new(string)
- **out = **in
- }
+ *out = new(string)
+ **out = **in
}
return
}
@@ -256,30 +224,23 @@ func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource
}
if in.ControllerPublishSecretRef != nil {
in, out := &in.ControllerPublishSecretRef, &out.ControllerPublishSecretRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(SecretReference)
- **out = **in
- }
+ *out = new(SecretReference)
+ **out = **in
}
if in.NodeStageSecretRef != nil {
in, out := &in.NodeStageSecretRef, &out.NodeStageSecretRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(SecretReference)
- **out = **in
- }
+ *out = new(SecretReference)
+ **out = **in
}
if in.NodePublishSecretRef != nil {
in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(SecretReference)
- **out = **in
- }
+ *out = new(SecretReference)
+ **out = **in
+ }
+ if in.ControllerExpandSecretRef != nil {
+ in, out := &in.ControllerExpandSecretRef, &out.ControllerExpandSecretRef
+ *out = new(SecretReference)
+ **out = **in
}
return
}
@@ -295,6 +256,44 @@ func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CSIVolumeSource) DeepCopyInto(out *CSIVolumeSource) {
+ *out = *in
+ if in.ReadOnly != nil {
+ in, out := &in.ReadOnly, &out.ReadOnly
+ *out = new(bool)
+ **out = **in
+ }
+ if in.FSType != nil {
+ in, out := &in.FSType, &out.FSType
+ *out = new(string)
+ **out = **in
+ }
+ if in.VolumeAttributes != nil {
+ in, out := &in.VolumeAttributes, &out.VolumeAttributes
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.NodePublishSecretRef != nil {
+ in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef
+ *out = new(LocalObjectReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIVolumeSource.
+func (in *CSIVolumeSource) DeepCopy() *CSIVolumeSource {
+ if in == nil {
+ return nil
+ }
+ out := new(CSIVolumeSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Capabilities) DeepCopyInto(out *Capabilities) {
*out = *in
if in.Add != nil {
@@ -330,12 +329,8 @@ func (in *CephFSPersistentVolumeSource) DeepCopyInto(out *CephFSPersistentVolume
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(SecretReference)
- **out = **in
- }
+ *out = new(SecretReference)
+ **out = **in
}
return
}
@@ -360,12 +355,8 @@ func (in *CephFSVolumeSource) DeepCopyInto(out *CephFSVolumeSource) {
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(LocalObjectReference)
- **out = **in
- }
+ *out = new(LocalObjectReference)
+ **out = **in
}
return
}
@@ -381,8 +372,34 @@ func (in *CephFSVolumeSource) DeepCopy() *CephFSVolumeSource {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CinderPersistentVolumeSource) DeepCopyInto(out *CinderPersistentVolumeSource) {
+ *out = *in
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(SecretReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderPersistentVolumeSource.
+func (in *CinderPersistentVolumeSource) DeepCopy() *CinderPersistentVolumeSource {
+ if in == nil {
+ return nil
+ }
+ out := new(CinderPersistentVolumeSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CinderVolumeSource) DeepCopyInto(out *CinderVolumeSource) {
*out = *in
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(LocalObjectReference)
+ **out = **in
+ }
return
}
@@ -401,12 +418,8 @@ func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) {
*out = *in
if in.TimeoutSeconds != nil {
in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
- if *in == nil {
- *out = nil
- } else {
- *out = new(int32)
- **out = **in
- }
+ *out = new(int32)
+ **out = **in
}
return
}
@@ -472,7 +485,7 @@ func (in *ComponentStatus) DeepCopyObject() runtime.Object {
func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ComponentStatus, len(*in))
@@ -517,12 +530,15 @@ func (in *ConfigMap) DeepCopyInto(out *ConfigMap) {
in, out := &in.BinaryData, &out.BinaryData
*out = make(map[string][]byte, len(*in))
for key, val := range *in {
+ var outVal []byte
if val == nil {
(*out)[key] = nil
} else {
- (*out)[key] = make([]byte, len(val))
- copy((*out)[key], val)
+ in, out := &val, &outVal
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
}
+ (*out)[key] = outVal
}
}
return
@@ -552,12 +568,8 @@ func (in *ConfigMapEnvSource) DeepCopyInto(out *ConfigMapEnvSource) {
out.LocalObjectReference = in.LocalObjectReference
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
+ *out = new(bool)
+ **out = **in
}
return
}
@@ -578,12 +590,8 @@ func (in *ConfigMapKeySelector) DeepCopyInto(out *ConfigMapKeySelector) {
out.LocalObjectReference = in.LocalObjectReference
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
+ *out = new(bool)
+ **out = **in
}
return
}
@@ -602,7 +610,7 @@ func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector {
func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ConfigMap, len(*in))
@@ -632,6 +640,22 @@ func (in *ConfigMapList) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMapNodeConfigSource) DeepCopyInto(out *ConfigMapNodeConfigSource) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNodeConfigSource.
+func (in *ConfigMapNodeConfigSource) DeepCopy() *ConfigMapNodeConfigSource {
+ if in == nil {
+ return nil
+ }
+ out := new(ConfigMapNodeConfigSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConfigMapProjection) DeepCopyInto(out *ConfigMapProjection) {
*out = *in
out.LocalObjectReference = in.LocalObjectReference
@@ -644,12 +668,8 @@ func (in *ConfigMapProjection) DeepCopyInto(out *ConfigMapProjection) {
}
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
+ *out = new(bool)
+ **out = **in
}
return
}
@@ -677,21 +697,13 @@ func (in *ConfigMapVolumeSource) DeepCopyInto(out *ConfigMapVolumeSource) {
}
if in.DefaultMode != nil {
in, out := &in.DefaultMode, &out.DefaultMode
- if *in == nil {
- *out = nil
- } else {
- *out = new(int32)
- **out = **in
- }
+ *out = new(int32)
+ **out = **in
}
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
+ *out = new(bool)
+ **out = **in
}
return
}
@@ -753,39 +765,23 @@ func (in *Container) DeepCopyInto(out *Container) {
}
if in.LivenessProbe != nil {
in, out := &in.LivenessProbe, &out.LivenessProbe
- if *in == nil {
- *out = nil
- } else {
- *out = new(Probe)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(Probe)
+ (*in).DeepCopyInto(*out)
}
if in.ReadinessProbe != nil {
in, out := &in.ReadinessProbe, &out.ReadinessProbe
- if *in == nil {
- *out = nil
- } else {
- *out = new(Probe)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(Probe)
+ (*in).DeepCopyInto(*out)
}
if in.Lifecycle != nil {
in, out := &in.Lifecycle, &out.Lifecycle
- if *in == nil {
- *out = nil
- } else {
- *out = new(Lifecycle)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(Lifecycle)
+ (*in).DeepCopyInto(*out)
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
- if *in == nil {
- *out = nil
- } else {
- *out = new(SecurityContext)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(SecurityContext)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -842,30 +838,18 @@ func (in *ContainerState) DeepCopyInto(out *ContainerState) {
*out = *in
if in.Waiting != nil {
in, out := &in.Waiting, &out.Waiting
- if *in == nil {
- *out = nil
- } else {
- *out = new(ContainerStateWaiting)
- **out = **in
- }
+ *out = new(ContainerStateWaiting)
+ **out = **in
}
if in.Running != nil {
in, out := &in.Running, &out.Running
- if *in == nil {
- *out = nil
- } else {
- *out = new(ContainerStateRunning)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(ContainerStateRunning)
+ (*in).DeepCopyInto(*out)
}
if in.Terminated != nil {
in, out := &in.Terminated, &out.Terminated
- if *in == nil {
- *out = nil
- } else {
- *out = new(ContainerStateTerminated)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(ContainerStateTerminated)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -966,67 +950,6 @@ func (in *DaemonEndpoint) DeepCopy() *DaemonEndpoint {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- if in.GracePeriodSeconds != nil {
- in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds
- if *in == nil {
- *out = nil
- } else {
- *out = new(int64)
- **out = **in
- }
- }
- if in.Preconditions != nil {
- in, out := &in.Preconditions, &out.Preconditions
- if *in == nil {
- *out = nil
- } else {
- *out = new(Preconditions)
- (*in).DeepCopyInto(*out)
- }
- }
- if in.OrphanDependents != nil {
- in, out := &in.OrphanDependents, &out.OrphanDependents
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
- }
- if in.PropagationPolicy != nil {
- in, out := &in.PropagationPolicy, &out.PropagationPolicy
- if *in == nil {
- *out = nil
- } else {
- *out = new(DeletionPropagation)
- **out = **in
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteOptions.
-func (in *DeleteOptions) DeepCopy() *DeleteOptions {
- if in == nil {
- return nil
- }
- out := new(DeleteOptions)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *DeleteOptions) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DownwardAPIProjection) DeepCopyInto(out *DownwardAPIProjection) {
*out = *in
if in.Items != nil {
@@ -1054,30 +977,18 @@ func (in *DownwardAPIVolumeFile) DeepCopyInto(out *DownwardAPIVolumeFile) {
*out = *in
if in.FieldRef != nil {
in, out := &in.FieldRef, &out.FieldRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(ObjectFieldSelector)
- **out = **in
- }
+ *out = new(ObjectFieldSelector)
+ **out = **in
}
if in.ResourceFieldRef != nil {
in, out := &in.ResourceFieldRef, &out.ResourceFieldRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(ResourceFieldSelector)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(ResourceFieldSelector)
+ (*in).DeepCopyInto(*out)
}
if in.Mode != nil {
in, out := &in.Mode, &out.Mode
- if *in == nil {
- *out = nil
- } else {
- *out = new(int32)
- **out = **in
- }
+ *out = new(int32)
+ **out = **in
}
return
}
@@ -1104,12 +1015,8 @@ func (in *DownwardAPIVolumeSource) DeepCopyInto(out *DownwardAPIVolumeSource) {
}
if in.DefaultMode != nil {
in, out := &in.DefaultMode, &out.DefaultMode
- if *in == nil {
- *out = nil
- } else {
- *out = new(int32)
- **out = **in
- }
+ *out = new(int32)
+ **out = **in
}
return
}
@@ -1129,12 +1036,8 @@ func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) {
*out = *in
if in.SizeLimit != nil {
in, out := &in.SizeLimit, &out.SizeLimit
- if *in == nil {
- *out = nil
- } else {
- x := (*in).DeepCopy()
- *out = &x
- }
+ x := (*in).DeepCopy()
+ *out = &x
}
return
}
@@ -1154,21 +1057,13 @@ func (in *EndpointAddress) DeepCopyInto(out *EndpointAddress) {
*out = *in
if in.NodeName != nil {
in, out := &in.NodeName, &out.NodeName
- if *in == nil {
- *out = nil
- } else {
- *out = new(string)
- **out = **in
- }
+ *out = new(string)
+ **out = **in
}
if in.TargetRef != nil {
in, out := &in.TargetRef, &out.TargetRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(ObjectReference)
- **out = **in
- }
+ *out = new(ObjectReference)
+ **out = **in
}
return
}
@@ -1271,7 +1166,7 @@ func (in *Endpoints) DeepCopyObject() runtime.Object {
func (in *EndpointsList) DeepCopyInto(out *EndpointsList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Endpoints, len(*in))
@@ -1305,21 +1200,13 @@ func (in *EnvFromSource) DeepCopyInto(out *EnvFromSource) {
*out = *in
if in.ConfigMapRef != nil {
in, out := &in.ConfigMapRef, &out.ConfigMapRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(ConfigMapEnvSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(ConfigMapEnvSource)
+ (*in).DeepCopyInto(*out)
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(SecretEnvSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(SecretEnvSource)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -1339,12 +1226,8 @@ func (in *EnvVar) DeepCopyInto(out *EnvVar) {
*out = *in
if in.ValueFrom != nil {
in, out := &in.ValueFrom, &out.ValueFrom
- if *in == nil {
- *out = nil
- } else {
- *out = new(EnvVarSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(EnvVarSource)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -1364,39 +1247,23 @@ func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) {
*out = *in
if in.FieldRef != nil {
in, out := &in.FieldRef, &out.FieldRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(ObjectFieldSelector)
- **out = **in
- }
+ *out = new(ObjectFieldSelector)
+ **out = **in
}
if in.ResourceFieldRef != nil {
in, out := &in.ResourceFieldRef, &out.ResourceFieldRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(ResourceFieldSelector)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(ResourceFieldSelector)
+ (*in).DeepCopyInto(*out)
}
if in.ConfigMapKeyRef != nil {
in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(ConfigMapKeySelector)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(ConfigMapKeySelector)
+ (*in).DeepCopyInto(*out)
}
if in.SecretKeyRef != nil {
in, out := &in.SecretKeyRef, &out.SecretKeyRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(SecretKeySelector)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(SecretKeySelector)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -1423,21 +1290,13 @@ func (in *Event) DeepCopyInto(out *Event) {
in.EventTime.DeepCopyInto(&out.EventTime)
if in.Series != nil {
in, out := &in.Series, &out.Series
- if *in == nil {
- *out = nil
- } else {
- *out = new(EventSeries)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(EventSeries)
+ (*in).DeepCopyInto(*out)
}
if in.Related != nil {
in, out := &in.Related, &out.Related
- if *in == nil {
- *out = nil
- } else {
- *out = new(ObjectReference)
- **out = **in
- }
+ *out = new(ObjectReference)
+ **out = **in
}
return
}
@@ -1464,7 +1323,7 @@ func (in *Event) DeepCopyObject() runtime.Object {
func (in *EventList) DeepCopyInto(out *EventList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Event, len(*in))
@@ -1557,12 +1416,8 @@ func (in *FCVolumeSource) DeepCopyInto(out *FCVolumeSource) {
}
if in.Lun != nil {
in, out := &in.Lun, &out.Lun
- if *in == nil {
- *out = nil
- } else {
- *out = new(int32)
- **out = **in
- }
+ *out = new(int32)
+ **out = **in
}
if in.WWIDs != nil {
in, out := &in.WWIDs, &out.WWIDs
@@ -1587,12 +1442,8 @@ func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSour
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(SecretReference)
- **out = **in
- }
+ *out = new(SecretReference)
+ **out = **in
}
if in.Options != nil {
in, out := &in.Options, &out.Options
@@ -1619,12 +1470,8 @@ func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(LocalObjectReference)
- **out = **in
- }
+ *out = new(LocalObjectReference)
+ **out = **in
}
if in.Options != nil {
in, out := &in.Options, &out.Options
@@ -1695,6 +1542,27 @@ func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GlusterfsPersistentVolumeSource) DeepCopyInto(out *GlusterfsPersistentVolumeSource) {
+ *out = *in
+ if in.EndpointsNamespace != nil {
+ in, out := &in.EndpointsNamespace, &out.EndpointsNamespace
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsPersistentVolumeSource.
+func (in *GlusterfsPersistentVolumeSource) DeepCopy() *GlusterfsPersistentVolumeSource {
+ if in == nil {
+ return nil
+ }
+ out := new(GlusterfsPersistentVolumeSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) {
*out = *in
return
@@ -1753,30 +1621,18 @@ func (in *Handler) DeepCopyInto(out *Handler) {
*out = *in
if in.Exec != nil {
in, out := &in.Exec, &out.Exec
- if *in == nil {
- *out = nil
- } else {
- *out = new(ExecAction)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(ExecAction)
+ (*in).DeepCopyInto(*out)
}
if in.HTTPGet != nil {
in, out := &in.HTTPGet, &out.HTTPGet
- if *in == nil {
- *out = nil
- } else {
- *out = new(HTTPGetAction)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(HTTPGetAction)
+ (*in).DeepCopyInto(*out)
}
if in.TCPSocket != nil {
in, out := &in.TCPSocket, &out.TCPSocket
- if *in == nil {
- *out = nil
- } else {
- *out = new(TCPSocketAction)
- **out = **in
- }
+ *out = new(TCPSocketAction)
+ **out = **in
}
return
}
@@ -1817,12 +1673,8 @@ func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) {
*out = *in
if in.Type != nil {
in, out := &in.Type, &out.Type
- if *in == nil {
- *out = nil
- } else {
- *out = new(HostPathType)
- **out = **in
- }
+ *out = new(HostPathType)
+ **out = **in
}
return
}
@@ -1847,21 +1699,13 @@ func (in *ISCSIPersistentVolumeSource) DeepCopyInto(out *ISCSIPersistentVolumeSo
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(SecretReference)
- **out = **in
- }
+ *out = new(SecretReference)
+ **out = **in
}
if in.InitiatorName != nil {
in, out := &in.InitiatorName, &out.InitiatorName
- if *in == nil {
- *out = nil
- } else {
- *out = new(string)
- **out = **in
- }
+ *out = new(string)
+ **out = **in
}
return
}
@@ -1886,21 +1730,13 @@ func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) {
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(LocalObjectReference)
- **out = **in
- }
+ *out = new(LocalObjectReference)
+ **out = **in
}
if in.InitiatorName != nil {
in, out := &in.InitiatorName, &out.InitiatorName
- if *in == nil {
- *out = nil
- } else {
- *out = new(string)
- **out = **in
- }
+ *out = new(string)
+ **out = **in
}
return
}
@@ -1920,12 +1756,8 @@ func (in *KeyToPath) DeepCopyInto(out *KeyToPath) {
*out = *in
if in.Mode != nil {
in, out := &in.Mode, &out.Mode
- if *in == nil {
- *out = nil
- } else {
- *out = new(int32)
- **out = **in
- }
+ *out = new(int32)
+ **out = **in
}
return
}
@@ -1945,21 +1777,13 @@ func (in *Lifecycle) DeepCopyInto(out *Lifecycle) {
*out = *in
if in.PostStart != nil {
in, out := &in.PostStart, &out.PostStart
- if *in == nil {
- *out = nil
- } else {
- *out = new(Handler)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(Handler)
+ (*in).DeepCopyInto(*out)
}
if in.PreStop != nil {
in, out := &in.PreStop, &out.PreStop
- if *in == nil {
- *out = nil
- } else {
- *out = new(Handler)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(Handler)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -2056,7 +1880,7 @@ func (in *LimitRangeItem) DeepCopy() *LimitRangeItem {
func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]LimitRange, len(*in))
@@ -2112,7 +1936,7 @@ func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec {
func (in *List) DeepCopyInto(out *List) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]runtime.RawExtension, len(*in))
@@ -2142,40 +1966,6 @@ func (in *List) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ListOptions) DeepCopyInto(out *ListOptions) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- if in.TimeoutSeconds != nil {
- in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
- if *in == nil {
- *out = nil
- } else {
- *out = new(int64)
- **out = **in
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions.
-func (in *ListOptions) DeepCopy() *ListOptions {
- if in == nil {
- return nil
- }
- out := new(ListOptions)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ListOptions) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) {
*out = *in
return
@@ -2231,6 +2021,11 @@ func (in *LocalObjectReference) DeepCopy() *LocalObjectReference {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocalVolumeSource) DeepCopyInto(out *LocalVolumeSource) {
*out = *in
+ if in.FSType != nil {
+ in, out := &in.FSType, &out.FSType
+ *out = new(string)
+ **out = **in
+ }
return
}
@@ -2292,7 +2087,7 @@ func (in *Namespace) DeepCopyObject() runtime.Object {
func (in *NamespaceList) DeepCopyInto(out *NamespaceList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Namespace, len(*in))
@@ -2407,12 +2202,8 @@ func (in *NodeAffinity) DeepCopyInto(out *NodeAffinity) {
*out = *in
if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
- if *in == nil {
- *out = nil
- } else {
- *out = new(NodeSelector)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(NodeSelector)
+ (*in).DeepCopyInto(*out)
}
if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
@@ -2455,15 +2246,10 @@ func (in *NodeCondition) DeepCopy() *NodeCondition {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeConfigSource) DeepCopyInto(out *NodeConfigSource) {
*out = *in
- out.TypeMeta = in.TypeMeta
- if in.ConfigMapRef != nil {
- in, out := &in.ConfigMapRef, &out.ConfigMapRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(ObjectReference)
- **out = **in
- }
+ if in.ConfigMap != nil {
+ in, out := &in.ConfigMap, &out.ConfigMap
+ *out = new(ConfigMapNodeConfigSource)
+ **out = **in
}
return
}
@@ -2478,12 +2264,35 @@ func (in *NodeConfigSource) DeepCopy() *NodeConfigSource {
return out
}
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *NodeConfigSource) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeConfigStatus) DeepCopyInto(out *NodeConfigStatus) {
+ *out = *in
+ if in.Assigned != nil {
+ in, out := &in.Assigned, &out.Assigned
+ *out = new(NodeConfigSource)
+ (*in).DeepCopyInto(*out)
}
- return nil
+ if in.Active != nil {
+ in, out := &in.Active, &out.Active
+ *out = new(NodeConfigSource)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LastKnownGood != nil {
+ in, out := &in.LastKnownGood, &out.LastKnownGood
+ *out = new(NodeConfigSource)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigStatus.
+func (in *NodeConfigStatus) DeepCopy() *NodeConfigStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeConfigStatus)
+ in.DeepCopyInto(out)
+ return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -2507,7 +2316,7 @@ func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints {
func (in *NodeList) DeepCopyInto(out *NodeList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Node, len(*in))
@@ -2638,6 +2447,13 @@ func (in *NodeSelectorTerm) DeepCopyInto(out *NodeSelectorTerm) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.MatchFields != nil {
+ in, out := &in.MatchFields, &out.MatchFields
+ *out = make([]NodeSelectorRequirement, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
return
}
@@ -2663,12 +2479,8 @@ func (in *NodeSpec) DeepCopyInto(out *NodeSpec) {
}
if in.ConfigSource != nil {
in, out := &in.ConfigSource, &out.ConfigSource
- if *in == nil {
- *out = nil
- } else {
- *out = new(NodeConfigSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(NodeConfigSource)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -2731,6 +2543,11 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
*out = make([]AttachedVolume, len(*in))
copy(*out, *in)
}
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ *out = new(NodeConfigStatus)
+ (*in).DeepCopyInto(*out)
+ }
return
}
@@ -2777,75 +2594,6 @@ func (in *ObjectFieldSelector) DeepCopy() *ObjectFieldSelector {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) {
- *out = *in
- in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp)
- if in.DeletionTimestamp != nil {
- in, out := &in.DeletionTimestamp, &out.DeletionTimestamp
- if *in == nil {
- *out = nil
- } else {
- *out = (*in).DeepCopy()
- }
- }
- if in.DeletionGracePeriodSeconds != nil {
- in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds
- if *in == nil {
- *out = nil
- } else {
- *out = new(int64)
- **out = **in
- }
- }
- if in.Labels != nil {
- in, out := &in.Labels, &out.Labels
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.Annotations != nil {
- in, out := &in.Annotations, &out.Annotations
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.OwnerReferences != nil {
- in, out := &in.OwnerReferences, &out.OwnerReferences
- *out = make([]meta_v1.OwnerReference, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Initializers != nil {
- in, out := &in.Initializers, &out.Initializers
- if *in == nil {
- *out = nil
- } else {
- *out = new(meta_v1.Initializers)
- (*in).DeepCopyInto(*out)
- }
- }
- if in.Finalizers != nil {
- in, out := &in.Finalizers, &out.Finalizers
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta.
-func (in *ObjectMeta) DeepCopy() *ObjectMeta {
- if in == nil {
- return nil
- }
- out := new(ObjectMeta)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObjectReference) DeepCopyInto(out *ObjectReference) {
*out = *in
return
@@ -2947,7 +2695,7 @@ func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondi
func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PersistentVolumeClaim, len(*in))
@@ -2986,31 +2734,24 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
- if *in == nil {
- *out = nil
- } else {
- *out = new(meta_v1.LabelSelector)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
}
in.Resources.DeepCopyInto(&out.Resources)
if in.StorageClassName != nil {
in, out := &in.StorageClassName, &out.StorageClassName
- if *in == nil {
- *out = nil
- } else {
- *out = new(string)
- **out = **in
- }
+ *out = new(string)
+ **out = **in
}
if in.VolumeMode != nil {
in, out := &in.VolumeMode, &out.VolumeMode
- if *in == nil {
- *out = nil
- } else {
- *out = new(PersistentVolumeMode)
- **out = **in
- }
+ *out = new(PersistentVolumeMode)
+ **out = **in
+ }
+ if in.DataSource != nil {
+ in, out := &in.DataSource, &out.DataSource
+ *out = new(TypedLocalObjectReference)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -3080,7 +2821,7 @@ func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVo
func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PersistentVolume, len(*in))
@@ -3114,201 +2855,113 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) {
*out = *in
if in.GCEPersistentDisk != nil {
in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk
- if *in == nil {
- *out = nil
- } else {
- *out = new(GCEPersistentDiskVolumeSource)
- **out = **in
- }
+ *out = new(GCEPersistentDiskVolumeSource)
+ **out = **in
}
if in.AWSElasticBlockStore != nil {
in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore
- if *in == nil {
- *out = nil
- } else {
- *out = new(AWSElasticBlockStoreVolumeSource)
- **out = **in
- }
+ *out = new(AWSElasticBlockStoreVolumeSource)
+ **out = **in
}
if in.HostPath != nil {
in, out := &in.HostPath, &out.HostPath
- if *in == nil {
- *out = nil
- } else {
- *out = new(HostPathVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(HostPathVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.Glusterfs != nil {
in, out := &in.Glusterfs, &out.Glusterfs
- if *in == nil {
- *out = nil
- } else {
- *out = new(GlusterfsVolumeSource)
- **out = **in
- }
+ *out = new(GlusterfsPersistentVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.NFS != nil {
in, out := &in.NFS, &out.NFS
- if *in == nil {
- *out = nil
- } else {
- *out = new(NFSVolumeSource)
- **out = **in
- }
+ *out = new(NFSVolumeSource)
+ **out = **in
}
if in.RBD != nil {
in, out := &in.RBD, &out.RBD
- if *in == nil {
- *out = nil
- } else {
- *out = new(RBDPersistentVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(RBDPersistentVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.ISCSI != nil {
in, out := &in.ISCSI, &out.ISCSI
- if *in == nil {
- *out = nil
- } else {
- *out = new(ISCSIPersistentVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(ISCSIPersistentVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.Cinder != nil {
in, out := &in.Cinder, &out.Cinder
- if *in == nil {
- *out = nil
- } else {
- *out = new(CinderVolumeSource)
- **out = **in
- }
+ *out = new(CinderPersistentVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.CephFS != nil {
in, out := &in.CephFS, &out.CephFS
- if *in == nil {
- *out = nil
- } else {
- *out = new(CephFSPersistentVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(CephFSPersistentVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.FC != nil {
in, out := &in.FC, &out.FC
- if *in == nil {
- *out = nil
- } else {
- *out = new(FCVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(FCVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.Flocker != nil {
in, out := &in.Flocker, &out.Flocker
- if *in == nil {
- *out = nil
- } else {
- *out = new(FlockerVolumeSource)
- **out = **in
- }
+ *out = new(FlockerVolumeSource)
+ **out = **in
}
if in.FlexVolume != nil {
in, out := &in.FlexVolume, &out.FlexVolume
- if *in == nil {
- *out = nil
- } else {
- *out = new(FlexPersistentVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(FlexPersistentVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.AzureFile != nil {
in, out := &in.AzureFile, &out.AzureFile
- if *in == nil {
- *out = nil
- } else {
- *out = new(AzureFilePersistentVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(AzureFilePersistentVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.VsphereVolume != nil {
in, out := &in.VsphereVolume, &out.VsphereVolume
- if *in == nil {
- *out = nil
- } else {
- *out = new(VsphereVirtualDiskVolumeSource)
- **out = **in
- }
+ *out = new(VsphereVirtualDiskVolumeSource)
+ **out = **in
}
if in.Quobyte != nil {
in, out := &in.Quobyte, &out.Quobyte
- if *in == nil {
- *out = nil
- } else {
- *out = new(QuobyteVolumeSource)
- **out = **in
- }
+ *out = new(QuobyteVolumeSource)
+ **out = **in
}
if in.AzureDisk != nil {
in, out := &in.AzureDisk, &out.AzureDisk
- if *in == nil {
- *out = nil
- } else {
- *out = new(AzureDiskVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(AzureDiskVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.PhotonPersistentDisk != nil {
in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk
- if *in == nil {
- *out = nil
- } else {
- *out = new(PhotonPersistentDiskVolumeSource)
- **out = **in
- }
+ *out = new(PhotonPersistentDiskVolumeSource)
+ **out = **in
}
if in.PortworxVolume != nil {
in, out := &in.PortworxVolume, &out.PortworxVolume
- if *in == nil {
- *out = nil
- } else {
- *out = new(PortworxVolumeSource)
- **out = **in
- }
+ *out = new(PortworxVolumeSource)
+ **out = **in
}
if in.ScaleIO != nil {
in, out := &in.ScaleIO, &out.ScaleIO
- if *in == nil {
- *out = nil
- } else {
- *out = new(ScaleIOPersistentVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(ScaleIOPersistentVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.Local != nil {
in, out := &in.Local, &out.Local
- if *in == nil {
- *out = nil
- } else {
- *out = new(LocalVolumeSource)
- **out = **in
- }
+ *out = new(LocalVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.StorageOS != nil {
in, out := &in.StorageOS, &out.StorageOS
- if *in == nil {
- *out = nil
- } else {
- *out = new(StorageOSPersistentVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(StorageOSPersistentVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.CSI != nil {
in, out := &in.CSI, &out.CSI
- if *in == nil {
- *out = nil
- } else {
- *out = new(CSIPersistentVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(CSIPersistentVolumeSource)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -3341,12 +2994,8 @@ func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) {
}
if in.ClaimRef != nil {
in, out := &in.ClaimRef, &out.ClaimRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(ObjectReference)
- **out = **in
- }
+ *out = new(ObjectReference)
+ **out = **in
}
if in.MountOptions != nil {
in, out := &in.MountOptions, &out.MountOptions
@@ -3355,21 +3004,13 @@ func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) {
}
if in.VolumeMode != nil {
in, out := &in.VolumeMode, &out.VolumeMode
- if *in == nil {
- *out = nil
- } else {
- *out = new(PersistentVolumeMode)
- **out = **in
- }
+ *out = new(PersistentVolumeMode)
+ **out = **in
}
if in.NodeAffinity != nil {
in, out := &in.NodeAffinity, &out.NodeAffinity
- if *in == nil {
- *out = nil
- } else {
- *out = new(VolumeNodeAffinity)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(VolumeNodeAffinity)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -3479,12 +3120,8 @@ func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) {
*out = *in
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
- if *in == nil {
- *out = nil
- } else {
- *out = new(meta_v1.LabelSelector)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
}
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
@@ -3615,12 +3252,8 @@ func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) {
*out = *in
if in.Value != nil {
in, out := &in.Value, &out.Value
- if *in == nil {
- *out = nil
- } else {
- *out = new(string)
- **out = **in
- }
+ *out = new(string)
+ **out = **in
}
return
}
@@ -3669,7 +3302,7 @@ func (in *PodExecOptions) DeepCopyObject() runtime.Object {
func (in *PodList) DeepCopyInto(out *PodList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Pod, len(*in))
@@ -3704,38 +3337,22 @@ func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) {
out.TypeMeta = in.TypeMeta
if in.SinceSeconds != nil {
in, out := &in.SinceSeconds, &out.SinceSeconds
- if *in == nil {
- *out = nil
- } else {
- *out = new(int64)
- **out = **in
- }
+ *out = new(int64)
+ **out = **in
}
if in.SinceTime != nil {
in, out := &in.SinceTime, &out.SinceTime
- if *in == nil {
- *out = nil
- } else {
- *out = (*in).DeepCopy()
- }
+ *out = (*in).DeepCopy()
}
if in.TailLines != nil {
in, out := &in.TailLines, &out.TailLines
- if *in == nil {
- *out = nil
- } else {
- *out = new(int64)
- **out = **in
- }
+ *out = new(int64)
+ **out = **in
}
if in.LimitBytes != nil {
in, out := &in.LimitBytes, &out.LimitBytes
- if *in == nil {
- *out = nil
- } else {
- *out = new(int64)
- **out = **in
- }
+ *out = new(int64)
+ **out = **in
}
return
}
@@ -3814,43 +3431,48 @@ func (in *PodProxyOptions) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodReadinessGate) DeepCopyInto(out *PodReadinessGate) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodReadinessGate.
+func (in *PodReadinessGate) DeepCopy() *PodReadinessGate {
+ if in == nil {
+ return nil
+ }
+ out := new(PodReadinessGate)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) {
*out = *in
if in.SELinuxOptions != nil {
in, out := &in.SELinuxOptions, &out.SELinuxOptions
- if *in == nil {
- *out = nil
- } else {
- *out = new(SELinuxOptions)
- **out = **in
- }
+ *out = new(SELinuxOptions)
+ **out = **in
+ }
+ if in.WindowsOptions != nil {
+ in, out := &in.WindowsOptions, &out.WindowsOptions
+ *out = new(WindowsSecurityContextOptions)
+ (*in).DeepCopyInto(*out)
}
if in.RunAsUser != nil {
in, out := &in.RunAsUser, &out.RunAsUser
- if *in == nil {
- *out = nil
- } else {
- *out = new(int64)
- **out = **in
- }
+ *out = new(int64)
+ **out = **in
}
if in.RunAsGroup != nil {
in, out := &in.RunAsGroup, &out.RunAsGroup
- if *in == nil {
- *out = nil
- } else {
- *out = new(int64)
- **out = **in
- }
+ *out = new(int64)
+ **out = **in
}
if in.RunAsNonRoot != nil {
in, out := &in.RunAsNonRoot, &out.RunAsNonRoot
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
+ *out = new(bool)
+ **out = **in
}
if in.SupplementalGroups != nil {
in, out := &in.SupplementalGroups, &out.SupplementalGroups
@@ -3859,12 +3481,13 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) {
}
if in.FSGroup != nil {
in, out := &in.FSGroup, &out.FSGroup
- if *in == nil {
- *out = nil
- } else {
- *out = new(int64)
- **out = **in
- }
+ *out = new(int64)
+ **out = **in
+ }
+ if in.Sysctls != nil {
+ in, out := &in.Sysctls, &out.Sysctls
+ *out = make([]Sysctl, len(*in))
+ copy(*out, *in)
}
return
}
@@ -3884,12 +3507,8 @@ func (in *PodSignature) DeepCopyInto(out *PodSignature) {
*out = *in
if in.PodController != nil {
in, out := &in.PodController, &out.PodController
- if *in == nil {
- *out = nil
- } else {
- *out = new(meta_v1.OwnerReference)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(metav1.OwnerReference)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -3930,21 +3549,13 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
}
if in.TerminationGracePeriodSeconds != nil {
in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
- if *in == nil {
- *out = nil
- } else {
- *out = new(int64)
- **out = **in
- }
+ *out = new(int64)
+ **out = **in
}
if in.ActiveDeadlineSeconds != nil {
in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds
- if *in == nil {
- *out = nil
- } else {
- *out = new(int64)
- **out = **in
- }
+ *out = new(int64)
+ **out = **in
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
@@ -3955,30 +3566,18 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
}
if in.AutomountServiceAccountToken != nil {
in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
+ *out = new(bool)
+ **out = **in
}
if in.ShareProcessNamespace != nil {
in, out := &in.ShareProcessNamespace, &out.ShareProcessNamespace
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
+ *out = new(bool)
+ **out = **in
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
- if *in == nil {
- *out = nil
- } else {
- *out = new(PodSecurityContext)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(PodSecurityContext)
+ (*in).DeepCopyInto(*out)
}
if in.ImagePullSecrets != nil {
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
@@ -3987,12 +3586,8 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
}
if in.Affinity != nil {
in, out := &in.Affinity, &out.Affinity
- if *in == nil {
- *out = nil
- } else {
- *out = new(Affinity)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(Affinity)
+ (*in).DeepCopyInto(*out)
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
@@ -4010,20 +3605,39 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
}
if in.Priority != nil {
in, out := &in.Priority, &out.Priority
- if *in == nil {
- *out = nil
- } else {
- *out = new(int32)
- **out = **in
- }
+ *out = new(int32)
+ **out = **in
}
if in.DNSConfig != nil {
in, out := &in.DNSConfig, &out.DNSConfig
- if *in == nil {
- *out = nil
- } else {
- *out = new(PodDNSConfig)
- (*in).DeepCopyInto(*out)
+ *out = new(PodDNSConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ReadinessGates != nil {
+ in, out := &in.ReadinessGates, &out.ReadinessGates
+ *out = make([]PodReadinessGate, len(*in))
+ copy(*out, *in)
+ }
+ if in.RuntimeClassName != nil {
+ in, out := &in.RuntimeClassName, &out.RuntimeClassName
+ *out = new(string)
+ **out = **in
+ }
+ if in.EnableServiceLinks != nil {
+ in, out := &in.EnableServiceLinks, &out.EnableServiceLinks
+ *out = new(bool)
+ **out = **in
+ }
+ if in.PreemptionPolicy != nil {
+ in, out := &in.PreemptionPolicy, &out.PreemptionPolicy
+ *out = new(PreemptionPolicy)
+ **out = **in
+ }
+ if in.Overhead != nil {
+ in, out := &in.Overhead, &out.Overhead
+ *out = make(ResourceList, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val.DeepCopy()
}
}
return
@@ -4051,11 +3665,7 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) {
}
if in.StartTime != nil {
in, out := &in.StartTime, &out.StartTime
- if *in == nil {
- *out = nil
- } else {
- *out = (*in).DeepCopy()
- }
+ *out = (*in).DeepCopy()
}
if in.InitContainerStatuses != nil {
in, out := &in.InitContainerStatuses, &out.InitContainerStatuses
@@ -4142,7 +3752,7 @@ func (in *PodTemplate) DeepCopyObject() runtime.Object {
func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PodTemplate, len(*in))
@@ -4210,12 +3820,8 @@ func (in *Preconditions) DeepCopyInto(out *Preconditions) {
*out = *in
if in.UID != nil {
in, out := &in.UID, &out.UID
- if *in == nil {
- *out = nil
- } else {
- *out = new(types.UID)
- **out = **in
- }
+ *out = new(types.UID)
+ **out = **in
}
return
}
@@ -4294,12 +3900,8 @@ func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) {
}
if in.DefaultMode != nil {
in, out := &in.DefaultMode, &out.DefaultMode
- if *in == nil {
- *out = nil
- } else {
- *out = new(int32)
- **out = **in
- }
+ *out = new(int32)
+ **out = **in
}
return
}
@@ -4340,12 +3942,8 @@ func (in *RBDPersistentVolumeSource) DeepCopyInto(out *RBDPersistentVolumeSource
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(SecretReference)
- **out = **in
- }
+ *out = new(SecretReference)
+ **out = **in
}
return
}
@@ -4370,12 +3968,8 @@ func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) {
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(LocalObjectReference)
- **out = **in
- }
+ *out = new(LocalObjectReference)
+ **out = **in
}
return
}
@@ -4470,7 +4064,7 @@ func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondi
func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ReplicationController, len(*in))
@@ -4504,12 +4098,8 @@ func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec
*out = *in
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
- if *in == nil {
- *out = nil
- } else {
- *out = new(int32)
- **out = **in
- }
+ *out = new(int32)
+ **out = **in
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
@@ -4520,12 +4110,8 @@ func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec
}
if in.Template != nil {
in, out := &in.Template, &out.Template
- if *in == nil {
- *out = nil
- } else {
- *out = new(PodTemplateSpec)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(PodTemplateSpec)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -4634,7 +4220,7 @@ func (in *ResourceQuota) DeepCopyObject() runtime.Object {
func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResourceQuota, len(*in))
@@ -4678,6 +4264,11 @@ func (in *ResourceQuotaSpec) DeepCopyInto(out *ResourceQuotaSpec) {
*out = make([]ResourceQuotaScope, len(*in))
copy(*out, *in)
}
+ if in.ScopeSelector != nil {
+ in, out := &in.ScopeSelector, &out.ScopeSelector
+ *out = new(ScopeSelector)
+ (*in).DeepCopyInto(*out)
+ }
return
}
@@ -4772,12 +4363,8 @@ func (in *ScaleIOPersistentVolumeSource) DeepCopyInto(out *ScaleIOPersistentVolu
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(SecretReference)
- **out = **in
- }
+ *out = new(SecretReference)
+ **out = **in
}
return
}
@@ -4797,12 +4384,8 @@ func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(LocalObjectReference)
- **out = **in
- }
+ *out = new(LocalObjectReference)
+ **out = **in
}
return
}
@@ -4818,6 +4401,50 @@ func (in *ScaleIOVolumeSource) DeepCopy() *ScaleIOVolumeSource {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScopeSelector) DeepCopyInto(out *ScopeSelector) {
+ *out = *in
+ if in.MatchExpressions != nil {
+ in, out := &in.MatchExpressions, &out.MatchExpressions
+ *out = make([]ScopedResourceSelectorRequirement, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeSelector.
+func (in *ScopeSelector) DeepCopy() *ScopeSelector {
+ if in == nil {
+ return nil
+ }
+ out := new(ScopeSelector)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScopedResourceSelectorRequirement) DeepCopyInto(out *ScopedResourceSelectorRequirement) {
+ *out = *in
+ if in.Values != nil {
+ in, out := &in.Values, &out.Values
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopedResourceSelectorRequirement.
+func (in *ScopedResourceSelectorRequirement) DeepCopy() *ScopedResourceSelectorRequirement {
+ if in == nil {
+ return nil
+ }
+ out := new(ScopedResourceSelectorRequirement)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Secret) DeepCopyInto(out *Secret) {
*out = *in
out.TypeMeta = in.TypeMeta
@@ -4826,12 +4453,15 @@ func (in *Secret) DeepCopyInto(out *Secret) {
in, out := &in.Data, &out.Data
*out = make(map[string][]byte, len(*in))
for key, val := range *in {
+ var outVal []byte
if val == nil {
(*out)[key] = nil
} else {
- (*out)[key] = make([]byte, len(val))
- copy((*out)[key], val)
+ in, out := &val, &outVal
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
}
+ (*out)[key] = outVal
}
}
if in.StringData != nil {
@@ -4868,12 +4498,8 @@ func (in *SecretEnvSource) DeepCopyInto(out *SecretEnvSource) {
out.LocalObjectReference = in.LocalObjectReference
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
+ *out = new(bool)
+ **out = **in
}
return
}
@@ -4894,12 +4520,8 @@ func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) {
out.LocalObjectReference = in.LocalObjectReference
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
+ *out = new(bool)
+ **out = **in
}
return
}
@@ -4918,7 +4540,7 @@ func (in *SecretKeySelector) DeepCopy() *SecretKeySelector {
func (in *SecretList) DeepCopyInto(out *SecretList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Secret, len(*in))
@@ -4960,12 +4582,8 @@ func (in *SecretProjection) DeepCopyInto(out *SecretProjection) {
}
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
+ *out = new(bool)
+ **out = **in
}
return
}
@@ -5008,21 +4626,13 @@ func (in *SecretVolumeSource) DeepCopyInto(out *SecretVolumeSource) {
}
if in.DefaultMode != nil {
in, out := &in.DefaultMode, &out.DefaultMode
- if *in == nil {
- *out = nil
- } else {
- *out = new(int32)
- **out = **in
- }
+ *out = new(int32)
+ **out = **in
}
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
+ *out = new(bool)
+ **out = **in
}
return
}
@@ -5042,75 +4652,53 @@ func (in *SecurityContext) DeepCopyInto(out *SecurityContext) {
*out = *in
if in.Capabilities != nil {
in, out := &in.Capabilities, &out.Capabilities
- if *in == nil {
- *out = nil
- } else {
- *out = new(Capabilities)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(Capabilities)
+ (*in).DeepCopyInto(*out)
}
if in.Privileged != nil {
in, out := &in.Privileged, &out.Privileged
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
+ *out = new(bool)
+ **out = **in
}
if in.SELinuxOptions != nil {
in, out := &in.SELinuxOptions, &out.SELinuxOptions
- if *in == nil {
- *out = nil
- } else {
- *out = new(SELinuxOptions)
- **out = **in
- }
+ *out = new(SELinuxOptions)
+ **out = **in
+ }
+ if in.WindowsOptions != nil {
+ in, out := &in.WindowsOptions, &out.WindowsOptions
+ *out = new(WindowsSecurityContextOptions)
+ (*in).DeepCopyInto(*out)
}
if in.RunAsUser != nil {
in, out := &in.RunAsUser, &out.RunAsUser
- if *in == nil {
- *out = nil
- } else {
- *out = new(int64)
- **out = **in
- }
+ *out = new(int64)
+ **out = **in
}
if in.RunAsGroup != nil {
in, out := &in.RunAsGroup, &out.RunAsGroup
- if *in == nil {
- *out = nil
- } else {
- *out = new(int64)
- **out = **in
- }
+ *out = new(int64)
+ **out = **in
}
if in.RunAsNonRoot != nil {
in, out := &in.RunAsNonRoot, &out.RunAsNonRoot
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
+ *out = new(bool)
+ **out = **in
}
if in.ReadOnlyRootFilesystem != nil {
in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
+ *out = new(bool)
+ **out = **in
}
if in.AllowPrivilegeEscalation != nil {
in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
+ *out = new(bool)
+ **out = **in
+ }
+ if in.ProcMount != nil {
+ in, out := &in.ProcMount, &out.ProcMount
+ *out = new(ProcMountType)
+ **out = **in
}
return
}
@@ -5196,12 +4784,8 @@ func (in *ServiceAccount) DeepCopyInto(out *ServiceAccount) {
}
if in.AutomountServiceAccountToken != nil {
in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
+ *out = new(bool)
+ **out = **in
}
return
}
@@ -5228,7 +4812,7 @@ func (in *ServiceAccount) DeepCopyObject() runtime.Object {
func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ServiceAccount, len(*in))
@@ -5258,10 +4842,31 @@ func (in *ServiceAccountList) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceAccountTokenProjection) DeepCopyInto(out *ServiceAccountTokenProjection) {
+ *out = *in
+ if in.ExpirationSeconds != nil {
+ in, out := &in.ExpirationSeconds, &out.ExpirationSeconds
+ *out = new(int64)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountTokenProjection.
+func (in *ServiceAccountTokenProjection) DeepCopy() *ServiceAccountTokenProjection {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceAccountTokenProjection)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceList) DeepCopyInto(out *ServiceList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Service, len(*in))
@@ -5359,12 +4964,8 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
}
if in.SessionAffinityConfig != nil {
in, out := &in.SessionAffinityConfig, &out.SessionAffinityConfig
- if *in == nil {
- *out = nil
- } else {
- *out = new(SessionAffinityConfig)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(SessionAffinityConfig)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -5401,12 +5002,8 @@ func (in *SessionAffinityConfig) DeepCopyInto(out *SessionAffinityConfig) {
*out = *in
if in.ClientIP != nil {
in, out := &in.ClientIP, &out.ClientIP
- if *in == nil {
- *out = nil
- } else {
- *out = new(ClientIPConfig)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(ClientIPConfig)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -5426,12 +5023,8 @@ func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistent
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(ObjectReference)
- **out = **in
- }
+ *out = new(ObjectReference)
+ **out = **in
}
return
}
@@ -5451,12 +5044,8 @@ func (in *StorageOSVolumeSource) DeepCopyInto(out *StorageOSVolumeSource) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
- if *in == nil {
- *out = nil
- } else {
- *out = new(LocalObjectReference)
- **out = **in
- }
+ *out = new(LocalObjectReference)
+ **out = **in
}
return
}
@@ -5509,11 +5098,7 @@ func (in *Taint) DeepCopyInto(out *Taint) {
*out = *in
if in.TimeAdded != nil {
in, out := &in.TimeAdded, &out.TimeAdded
- if *in == nil {
- *out = nil
- } else {
- *out = (*in).DeepCopy()
- }
+ *out = (*in).DeepCopy()
}
return
}
@@ -5533,12 +5118,8 @@ func (in *Toleration) DeepCopyInto(out *Toleration) {
*out = *in
if in.TolerationSeconds != nil {
in, out := &in.TolerationSeconds, &out.TolerationSeconds
- if *in == nil {
- *out = nil
- } else {
- *out = new(int64)
- **out = **in
- }
+ *out = new(int64)
+ **out = **in
}
return
}
@@ -5554,6 +5135,71 @@ func (in *Toleration) DeepCopy() *Toleration {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TopologySelectorLabelRequirement) DeepCopyInto(out *TopologySelectorLabelRequirement) {
+ *out = *in
+ if in.Values != nil {
+ in, out := &in.Values, &out.Values
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorLabelRequirement.
+func (in *TopologySelectorLabelRequirement) DeepCopy() *TopologySelectorLabelRequirement {
+ if in == nil {
+ return nil
+ }
+ out := new(TopologySelectorLabelRequirement)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TopologySelectorTerm) DeepCopyInto(out *TopologySelectorTerm) {
+ *out = *in
+ if in.MatchLabelExpressions != nil {
+ in, out := &in.MatchLabelExpressions, &out.MatchLabelExpressions
+ *out = make([]TopologySelectorLabelRequirement, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorTerm.
+func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm {
+ if in == nil {
+ return nil
+ }
+ out := new(TopologySelectorTerm)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) {
+ *out = *in
+ if in.APIGroup != nil {
+ in, out := &in.APIGroup, &out.APIGroup
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedLocalObjectReference.
+func (in *TypedLocalObjectReference) DeepCopy() *TypedLocalObjectReference {
+ if in == nil {
+ return nil
+ }
+ out := new(TypedLocalObjectReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Volume) DeepCopyInto(out *Volume) {
*out = *in
in.VolumeSource.DeepCopyInto(&out.VolumeSource)
@@ -5591,12 +5237,8 @@ func (in *VolumeMount) DeepCopyInto(out *VolumeMount) {
*out = *in
if in.MountPropagation != nil {
in, out := &in.MountPropagation, &out.MountPropagation
- if *in == nil {
- *out = nil
- } else {
- *out = new(MountPropagationMode)
- **out = **in
- }
+ *out = new(MountPropagationMode)
+ **out = **in
}
return
}
@@ -5616,12 +5258,8 @@ func (in *VolumeNodeAffinity) DeepCopyInto(out *VolumeNodeAffinity) {
*out = *in
if in.Required != nil {
in, out := &in.Required, &out.Required
- if *in == nil {
- *out = nil
- } else {
- *out = new(NodeSelector)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(NodeSelector)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -5641,30 +5279,23 @@ func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) {
*out = *in
if in.Secret != nil {
in, out := &in.Secret, &out.Secret
- if *in == nil {
- *out = nil
- } else {
- *out = new(SecretProjection)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(SecretProjection)
+ (*in).DeepCopyInto(*out)
}
if in.DownwardAPI != nil {
in, out := &in.DownwardAPI, &out.DownwardAPI
- if *in == nil {
- *out = nil
- } else {
- *out = new(DownwardAPIProjection)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(DownwardAPIProjection)
+ (*in).DeepCopyInto(*out)
}
if in.ConfigMap != nil {
in, out := &in.ConfigMap, &out.ConfigMap
- if *in == nil {
- *out = nil
- } else {
- *out = new(ConfigMapProjection)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(ConfigMapProjection)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ServiceAccountToken != nil {
+ in, out := &in.ServiceAccountToken, &out.ServiceAccountToken
+ *out = new(ServiceAccountTokenProjection)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -5684,246 +5315,143 @@ func (in *VolumeSource) DeepCopyInto(out *VolumeSource) {
*out = *in
if in.HostPath != nil {
in, out := &in.HostPath, &out.HostPath
- if *in == nil {
- *out = nil
- } else {
- *out = new(HostPathVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(HostPathVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.EmptyDir != nil {
in, out := &in.EmptyDir, &out.EmptyDir
- if *in == nil {
- *out = nil
- } else {
- *out = new(EmptyDirVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(EmptyDirVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.GCEPersistentDisk != nil {
in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk
- if *in == nil {
- *out = nil
- } else {
- *out = new(GCEPersistentDiskVolumeSource)
- **out = **in
- }
+ *out = new(GCEPersistentDiskVolumeSource)
+ **out = **in
}
if in.AWSElasticBlockStore != nil {
in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore
- if *in == nil {
- *out = nil
- } else {
- *out = new(AWSElasticBlockStoreVolumeSource)
- **out = **in
- }
+ *out = new(AWSElasticBlockStoreVolumeSource)
+ **out = **in
}
if in.GitRepo != nil {
in, out := &in.GitRepo, &out.GitRepo
- if *in == nil {
- *out = nil
- } else {
- *out = new(GitRepoVolumeSource)
- **out = **in
- }
+ *out = new(GitRepoVolumeSource)
+ **out = **in
}
if in.Secret != nil {
in, out := &in.Secret, &out.Secret
- if *in == nil {
- *out = nil
- } else {
- *out = new(SecretVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(SecretVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.NFS != nil {
in, out := &in.NFS, &out.NFS
- if *in == nil {
- *out = nil
- } else {
- *out = new(NFSVolumeSource)
- **out = **in
- }
+ *out = new(NFSVolumeSource)
+ **out = **in
}
if in.ISCSI != nil {
in, out := &in.ISCSI, &out.ISCSI
- if *in == nil {
- *out = nil
- } else {
- *out = new(ISCSIVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(ISCSIVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.Glusterfs != nil {
in, out := &in.Glusterfs, &out.Glusterfs
- if *in == nil {
- *out = nil
- } else {
- *out = new(GlusterfsVolumeSource)
- **out = **in
- }
+ *out = new(GlusterfsVolumeSource)
+ **out = **in
}
if in.PersistentVolumeClaim != nil {
in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim
- if *in == nil {
- *out = nil
- } else {
- *out = new(PersistentVolumeClaimVolumeSource)
- **out = **in
- }
+ *out = new(PersistentVolumeClaimVolumeSource)
+ **out = **in
}
if in.RBD != nil {
in, out := &in.RBD, &out.RBD
- if *in == nil {
- *out = nil
- } else {
- *out = new(RBDVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(RBDVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.FlexVolume != nil {
in, out := &in.FlexVolume, &out.FlexVolume
- if *in == nil {
- *out = nil
- } else {
- *out = new(FlexVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(FlexVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.Cinder != nil {
in, out := &in.Cinder, &out.Cinder
- if *in == nil {
- *out = nil
- } else {
- *out = new(CinderVolumeSource)
- **out = **in
- }
+ *out = new(CinderVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.CephFS != nil {
in, out := &in.CephFS, &out.CephFS
- if *in == nil {
- *out = nil
- } else {
- *out = new(CephFSVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(CephFSVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.Flocker != nil {
in, out := &in.Flocker, &out.Flocker
- if *in == nil {
- *out = nil
- } else {
- *out = new(FlockerVolumeSource)
- **out = **in
- }
+ *out = new(FlockerVolumeSource)
+ **out = **in
}
if in.DownwardAPI != nil {
in, out := &in.DownwardAPI, &out.DownwardAPI
- if *in == nil {
- *out = nil
- } else {
- *out = new(DownwardAPIVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(DownwardAPIVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.FC != nil {
in, out := &in.FC, &out.FC
- if *in == nil {
- *out = nil
- } else {
- *out = new(FCVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(FCVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.AzureFile != nil {
in, out := &in.AzureFile, &out.AzureFile
- if *in == nil {
- *out = nil
- } else {
- *out = new(AzureFileVolumeSource)
- **out = **in
- }
+ *out = new(AzureFileVolumeSource)
+ **out = **in
}
if in.ConfigMap != nil {
in, out := &in.ConfigMap, &out.ConfigMap
- if *in == nil {
- *out = nil
- } else {
- *out = new(ConfigMapVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(ConfigMapVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.VsphereVolume != nil {
in, out := &in.VsphereVolume, &out.VsphereVolume
- if *in == nil {
- *out = nil
- } else {
- *out = new(VsphereVirtualDiskVolumeSource)
- **out = **in
- }
+ *out = new(VsphereVirtualDiskVolumeSource)
+ **out = **in
}
if in.Quobyte != nil {
in, out := &in.Quobyte, &out.Quobyte
- if *in == nil {
- *out = nil
- } else {
- *out = new(QuobyteVolumeSource)
- **out = **in
- }
+ *out = new(QuobyteVolumeSource)
+ **out = **in
}
if in.AzureDisk != nil {
in, out := &in.AzureDisk, &out.AzureDisk
- if *in == nil {
- *out = nil
- } else {
- *out = new(AzureDiskVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(AzureDiskVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.PhotonPersistentDisk != nil {
in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk
- if *in == nil {
- *out = nil
- } else {
- *out = new(PhotonPersistentDiskVolumeSource)
- **out = **in
- }
+ *out = new(PhotonPersistentDiskVolumeSource)
+ **out = **in
}
if in.Projected != nil {
in, out := &in.Projected, &out.Projected
- if *in == nil {
- *out = nil
- } else {
- *out = new(ProjectedVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(ProjectedVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.PortworxVolume != nil {
in, out := &in.PortworxVolume, &out.PortworxVolume
- if *in == nil {
- *out = nil
- } else {
- *out = new(PortworxVolumeSource)
- **out = **in
- }
+ *out = new(PortworxVolumeSource)
+ **out = **in
}
if in.ScaleIO != nil {
in, out := &in.ScaleIO, &out.ScaleIO
- if *in == nil {
- *out = nil
- } else {
- *out = new(ScaleIOVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(ScaleIOVolumeSource)
+ (*in).DeepCopyInto(*out)
}
if in.StorageOS != nil {
in, out := &in.StorageOS, &out.StorageOS
- if *in == nil {
- *out = nil
- } else {
- *out = new(StorageOSVolumeSource)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(StorageOSVolumeSource)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.CSI != nil {
+ in, out := &in.CSI, &out.CSI
+ *out = new(CSIVolumeSource)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -5970,3 +5498,29 @@ func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm {
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WindowsSecurityContextOptions) DeepCopyInto(out *WindowsSecurityContextOptions) {
+ *out = *in
+ if in.GMSACredentialSpecName != nil {
+ in, out := &in.GMSACredentialSpecName, &out.GMSACredentialSpecName
+ *out = new(string)
+ **out = **in
+ }
+ if in.GMSACredentialSpec != nil {
+ in, out := &in.GMSACredentialSpec, &out.GMSACredentialSpec
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsSecurityContextOptions.
+func (in *WindowsSecurityContextOptions) DeepCopy() *WindowsSecurityContextOptions {
+ if in == nil {
+ return nil
+ }
+ out := new(WindowsSecurityContextOptions)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/k8s.io/apimachinery/README.md b/vendor/k8s.io/apimachinery/README.md
deleted file mode 100644
index 258250ce2..000000000
--- a/vendor/k8s.io/apimachinery/README.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# apimachinery
-
-Scheme, typing, encoding, decoding, and conversion packages for Kubernetes and Kubernetes-like API objects.
-
-
-## Purpose
-
-This library is a shared dependency for servers and clients to work with Kubernetes API infrastructure without direct
-type dependencies. Its first consumers are `k8s.io/kubernetes`, `k8s.io/client-go`, and `k8s.io/apiserver`.
-
-
-## Compatibility
-
-There are *NO compatibility guarantees* for this repository. It is in direct support of Kubernetes, so branches
-will track Kubernetes and be compatible with that repo. As we more cleanly separate the layers, we will review the
-compatibility guarantee.
-
-
-## Where does it come from?
-
-`apimachinery` is synced from https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery.
-Code changes are made in that location, merged into `k8s.io/kubernetes` and later synced here.
-
-
-## Things you should *NOT* do
-
- 1. Add API types to this repo. This is for the machinery, not for the types.
- 2. Directly modify any files under `pkg` in this repo. Those are driven from `k8s.io/kubernetes/staging/src/k8s.io/apimachinery`.
- 3. Expect compatibility. This repo is direct support of Kubernetes and the API isn't yet stable enough for API guarantees.
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS
new file mode 100644
index 000000000..63434030c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS
@@ -0,0 +1,26 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+- thockin
+- lavalamp
+- smarterclayton
+- wojtek-t
+- deads2k
+- brendandburns
+- derekwaynecarr
+- caesarxuchao
+- mikedanese
+- liggitt
+- nikhiljindal
+- gmarek
+- erictune
+- saad-ali
+- janetkuo
+- tallclair
+- eparis
+- dims
+- hongchaodeng
+- krousey
+- cjcullen
+- david-mcmahon
+- goltermann
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
index bcc032df9..f4201eb69 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
@@ -20,6 +20,7 @@ import (
"encoding/json"
"fmt"
"net/http"
+ "reflect"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -82,7 +83,20 @@ func (u *UnexpectedObjectError) Error() string {
func FromObject(obj runtime.Object) error {
switch t := obj.(type) {
case *metav1.Status:
- return &StatusError{*t}
+ return &StatusError{ErrStatus: *t}
+ case runtime.Unstructured:
+ var status metav1.Status
+ obj := t.UnstructuredContent()
+ if !reflect.DeepEqual(obj["kind"], "Status") {
+ break
+ }
+ if err := runtime.DefaultUnstructuredConverter.FromUnstructured(t.UnstructuredContent(), &status); err != nil {
+ return err
+ }
+ if status.APIVersion != "v1" && status.APIVersion != "meta.k8s.io/v1" {
+ break
+ }
+ return &StatusError{ErrStatus: status}
}
return &UnexpectedObjectError{obj}
}
@@ -170,6 +184,20 @@ func NewConflict(qualifiedResource schema.GroupResource, name string, err error)
}}
}
+// NewApplyConflict returns an error including details on the requests apply conflicts
+func NewApplyConflict(causes []metav1.StatusCause, message string) *StatusError {
+ return &StatusError{ErrStatus: metav1.Status{
+ Status: metav1.StatusFailure,
+ Code: http.StatusConflict,
+ Reason: metav1.StatusReasonConflict,
+ Details: &metav1.StatusDetails{
+ // TODO: Get obj details here?
+ Causes: causes,
+ },
+ Message: message,
+ }}
+}
+
// NewGone returns an error indicating the item no longer available at the server and no forwarding address is known.
func NewGone(message string) *StatusError {
return &StatusError{metav1.Status{
@@ -327,6 +355,17 @@ func NewTooManyRequestsError(message string) *StatusError {
}}
}
+// NewRequestEntityTooLargeError returns an error indicating that the request
+// entity was too large.
+func NewRequestEntityTooLargeError(message string) *StatusError {
+ return &StatusError{metav1.Status{
+ Status: metav1.StatusFailure,
+ Code: http.StatusRequestEntityTooLarge,
+ Reason: metav1.StatusReasonRequestEntityTooLarge,
+ Message: fmt.Sprintf("Request entity too large: %s", message),
+ }}
+}
+
// NewGenericServerResponse returns a new error for server responses that are not in a recognizable form.
func NewGenericServerResponse(code int, verb string, qualifiedResource schema.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError {
reason := metav1.StatusReasonUnknown
@@ -355,7 +394,11 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr
case http.StatusNotAcceptable:
reason = metav1.StatusReasonNotAcceptable
// the server message has details about what types are acceptable
- message = serverMessage
+ if len(serverMessage) == 0 || serverMessage == "unknown" {
+ message = "the server was unable to respond with a content type that the client supports"
+ } else {
+ message = serverMessage
+ }
case http.StatusUnsupportedMediaType:
reason = metav1.StatusReasonUnsupportedMediaType
// the server message has details about what types are acceptable
@@ -513,6 +556,19 @@ func IsTooManyRequests(err error) bool {
return false
}
+// IsRequestEntityTooLargeError determines if err is an error which indicates
+// the request entity is too large.
+func IsRequestEntityTooLargeError(err error) bool {
+ if ReasonForError(err) == metav1.StatusReasonRequestEntityTooLarge {
+ return true
+ }
+ switch t := err.(type) {
+ case APIStatus:
+ return t.Status().Code == http.StatusRequestEntityTooLarge
+ }
+ return false
+}
+
// IsUnexpectedServerError returns true if the server response was not in the expected API format,
// and may be the result of another HTTP actor.
func IsUnexpectedServerError(err error) bool {
@@ -565,3 +621,46 @@ func ReasonForError(err error) metav1.StatusReason {
}
return metav1.StatusReasonUnknown
}
+
+// ErrorReporter converts generic errors into runtime.Object errors without
+// requiring the caller to take a dependency on meta/v1 (where Status lives).
+// This prevents circular dependencies in core watch code.
+type ErrorReporter struct {
+ code int
+ verb string
+ reason string
+}
+
+// NewClientErrorReporter will respond with valid v1.Status objects that report
+// unexpected server responses. Primarily used by watch to report errors when
+// we attempt to decode a response from the server and it is not in the form
+// we expect. Because watch is a dependency of the core api, we can't return
+// meta/v1.Status in that package and so much inject this interface to convert a
+// generic error as appropriate. The reason is passed as a unique status cause
+// on the returned status, otherwise the generic "ClientError" is returned.
+func NewClientErrorReporter(code int, verb string, reason string) *ErrorReporter {
+ return &ErrorReporter{
+ code: code,
+ verb: verb,
+ reason: reason,
+ }
+}
+
+// AsObject returns a valid error runtime.Object (a v1.Status) for the given
+// error, using the code and verb of the reporter type. The error is set to
+// indicate that this was an unexpected server response.
+func (r *ErrorReporter) AsObject(err error) runtime.Object {
+ status := NewGenericServerResponse(r.code, r.verb, schema.GroupResource{}, "", err.Error(), 0, true)
+ if status.ErrStatus.Details == nil {
+ status.ErrStatus.Details = &metav1.StatusDetails{}
+ }
+ reason := r.reason
+ if len(reason) == 0 {
+ reason = "ClientError"
+ }
+ status.ErrStatus.Details.Causes = append(status.ErrStatus.Details.Causes, metav1.StatusCause{
+ Type: metav1.CauseType(reason),
+ Message: err.Error(),
+ })
+ return &status.ErrStatus
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS
new file mode 100644
index 000000000..8454be55e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS
@@ -0,0 +1,18 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+- thockin
+- lavalamp
+- smarterclayton
+- wojtek-t
+- derekwaynecarr
+- mikedanese
+- saad-ali
+- janetkuo
+- tallclair
+- eparis
+- jbeda
+- xiang90
+- mbohlool
+- david-mcmahon
+- goltermann
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go
index 6de71e508..9d7835bc2 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,18 +14,17 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto
-// DO NOT EDIT!
/*
- Package resource is a generated protocol buffer package.
+Package resource is a generated protocol buffer package.
- It is generated from these files:
- k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto
+It is generated from these files:
+ k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto
- It has these top-level messages:
- Quantity
+It has these top-level messages:
+ Quantity
*/
package resource
@@ -57,21 +56,20 @@ func init() {
}
var fileDescriptorGenerated = []byte{
- // 255 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x8f, 0xa1, 0x4e, 0x03, 0x41,
- 0x10, 0x86, 0x77, 0x0d, 0x29, 0x95, 0x0d, 0x21, 0xa4, 0x62, 0xaf, 0x21, 0x08, 0x0c, 0x3b, 0x02,
- 0xd3, 0x20, 0xf1, 0x08, 0x90, 0xb8, 0xbb, 0xeb, 0xb0, 0xdd, 0x1c, 0xdd, 0xbd, 0xcc, 0xce, 0x92,
- 0xd4, 0x55, 0x22, 0x2b, 0x91, 0xbd, 0xb7, 0xa9, 0xac, 0xac, 0x40, 0x70, 0xcb, 0x8b, 0x90, 0x5e,
- 0xdb, 0x84, 0x90, 0xe0, 0xe6, 0xfb, 0x27, 0xdf, 0xe4, 0x9f, 0xfe, 0x43, 0x35, 0x0e, 0xda, 0x7a,
- 0xa8, 0x62, 0x81, 0xe4, 0x90, 0x31, 0xc0, 0x1b, 0xba, 0x89, 0x27, 0x38, 0x2c, 0xf2, 0xda, 0xce,
- 0xf2, 0x72, 0x6a, 0x1d, 0xd2, 0x1c, 0xea, 0xca, 0xec, 0x02, 0x20, 0x0c, 0x3e, 0x52, 0x89, 0x60,
- 0xd0, 0x21, 0xe5, 0x8c, 0x13, 0x5d, 0x93, 0x67, 0x3f, 0xb8, 0xda, 0x5b, 0xfa, 0xb7, 0xa5, 0xeb,
- 0xca, 0xec, 0x02, 0x7d, 0xb4, 0x86, 0x37, 0xc6, 0xf2, 0x34, 0x16, 0xba, 0xf4, 0x33, 0x30, 0xde,
- 0x78, 0xe8, 0xe4, 0x22, 0xbe, 0x74, 0xd4, 0x41, 0x37, 0xed, 0x8f, 0x0e, 0x6f, 0xff, 0xab, 0x12,
- 0xd9, 0xbe, 0x82, 0x75, 0x1c, 0x98, 0xfe, 0x36, 0xb9, 0x1c, 0xf7, 0x7b, 0x8f, 0x31, 0x77, 0x6c,
- 0x79, 0x3e, 0x38, 0xef, 0x9f, 0x04, 0x26, 0xeb, 0xcc, 0x85, 0x1c, 0xc9, 0xeb, 0xd3, 0xa7, 0x03,
- 0xdd, 0x9d, 0x7d, 0xac, 0x32, 0xf1, 0xde, 0x64, 0x62, 0xd9, 0x64, 0x62, 0xd5, 0x64, 0x62, 0xf1,
- 0x39, 0x12, 0xf7, 0x7a, 0xdd, 0x2a, 0xb1, 0x69, 0x95, 0xd8, 0xb6, 0x4a, 0x2c, 0x92, 0x92, 0xeb,
- 0xa4, 0xe4, 0x26, 0x29, 0xb9, 0x4d, 0x4a, 0x7e, 0x25, 0x25, 0x97, 0xdf, 0x4a, 0x3c, 0xf7, 0x8e,
- 0xdf, 0xfc, 0x04, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x5e, 0xda, 0xf9, 0x43, 0x01, 0x00, 0x00,
+ // 237 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8e, 0xb1, 0x4e, 0xc3, 0x30,
+ 0x10, 0x40, 0xcf, 0x0b, 0x2a, 0x19, 0x2b, 0x84, 0x10, 0xc3, 0xa5, 0x42, 0x0c, 0x2c, 0xd8, 0x6b,
+ 0xc5, 0xc8, 0xce, 0x00, 0x23, 0x5b, 0x92, 0x1e, 0xae, 0x15, 0xd5, 0x8e, 0x2e, 0x36, 0x52, 0xb7,
+ 0x8e, 0x8c, 0x1d, 0x19, 0x9b, 0xbf, 0xe9, 0xd8, 0xb1, 0x03, 0x03, 0x31, 0x3f, 0x82, 0xea, 0x36,
+ 0x52, 0xb7, 0x7b, 0xef, 0xf4, 0x4e, 0x97, 0xbd, 0xd4, 0xd3, 0x56, 0x1a, 0xa7, 0xea, 0x50, 0x12,
+ 0x5b, 0xf2, 0xd4, 0xaa, 0x4f, 0xb2, 0x33, 0xc7, 0xea, 0xb4, 0x28, 0x1a, 0xb3, 0x28, 0xaa, 0xb9,
+ 0xb1, 0xc4, 0x4b, 0xd5, 0xd4, 0xfa, 0x20, 0x14, 0x53, 0xeb, 0x02, 0x57, 0xa4, 0x34, 0x59, 0xe2,
+ 0xc2, 0xd3, 0x4c, 0x36, 0xec, 0xbc, 0x1b, 0xdf, 0x1f, 0x2b, 0x79, 0x5e, 0xc9, 0xa6, 0xd6, 0x07,
+ 0x21, 0x87, 0xea, 0xf6, 0x51, 0x1b, 0x3f, 0x0f, 0xa5, 0xac, 0xdc, 0x42, 0x69, 0xa7, 0x9d, 0x4a,
+ 0x71, 0x19, 0x3e, 0x12, 0x25, 0x48, 0xd3, 0xf1, 0xe8, 0xdd, 0x34, 0x1b, 0xbd, 0x86, 0xc2, 0x7a,
+ 0xe3, 0x97, 0xe3, 0xeb, 0xec, 0xa2, 0xf5, 0x6c, 0xac, 0xbe, 0x11, 0x13, 0xf1, 0x70, 0xf9, 0x76,
+ 0xa2, 0xa7, 0xab, 0xef, 0x4d, 0x0e, 0x5f, 0x5d, 0x0e, 0xeb, 0x2e, 0x87, 0x4d, 0x97, 0xc3, 0xea,
+ 0x67, 0x02, 0xcf, 0x72, 0xdb, 0x23, 0xec, 0x7a, 0x84, 0x7d, 0x8f, 0xb0, 0x8a, 0x28, 0xb6, 0x11,
+ 0xc5, 0x2e, 0xa2, 0xd8, 0x47, 0x14, 0xbf, 0x11, 0xc5, 0xfa, 0x0f, 0xe1, 0x7d, 0x34, 0x3c, 0xf6,
+ 0x1f, 0x00, 0x00, 0xff, 0xff, 0x3c, 0x08, 0x88, 0x49, 0x0e, 0x01, 0x00, 0x00,
}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto
index 40185777e..acc904445 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -21,17 +21,15 @@ syntax = 'proto2';
package k8s.io.apimachinery.pkg.api.resource;
-import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
-
// Package-wide variables from generator "generated".
option go_package = "resource";
// Quantity is a fixed-point representation of a number.
// It provides convenient marshaling/unmarshaling in JSON and YAML,
// in addition to String() and Int64() accessors.
-//
+//
// The serialization format is:
-//
+//
// <quantity> ::= <signedNumber><suffix>
// (Note that <suffix> may be empty, from the "" case in <decimalSI>.)
// <digit> ::= 0 | 1 | ... | 9
@@ -45,16 +43,16 @@ option go_package = "resource";
// <decimalSI> ::= m | "" | k | M | G | T | P | E
// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)
// <decimalExponent> ::= "e" <signedNumber> | "E" <signedNumber>
-//
+//
// No matter which of the three exponent forms is used, no quantity may represent
// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal
// places. Numbers larger or more precise will be capped or rounded up.
// (E.g.: 0.1m will rounded up to 1m.)
// This may be extended in the future if we require larger or smaller quantities.
-//
+//
// When a Quantity is parsed from a string, it will remember the type of suffix
// it had, and will use the same type again when it is serialized.
-//
+//
// Before serializing, Quantity will be put in "canonical form".
// This means that Exponent/suffix will be adjusted up or down (with a
// corresponding increase or decrease in Mantissa) such that:
@@ -62,27 +60,22 @@ option go_package = "resource";
// b. No fractional digits will be emitted
// c. The exponent (or suffix) is as large as possible.
// The sign will be omitted unless the number is negative.
-//
+//
// Examples:
// 1.5 will be serialized as "1500m"
// 1.5Gi will be serialized as "1536Mi"
-//
-// NOTE: We reserve the right to amend this canonical format, perhaps to
-// allow 1.5 to be canonical.
-// TODO: Remove above disclaimer after all bikeshedding about format is over,
-// or after March 2015.
-//
+//
// Note that the quantity will NEVER be internally represented by a
// floating point number. That is the whole point of this exercise.
-//
+//
// Non-canonical values will still parse as long as they are well formed,
// but will be re-emitted in their canonical form. (So always use canonical
// form, or don't diff.)
-//
+//
// This format is intended to make it difficult to use these numbers without
// writing some sort of special handling code in the hopes that that will
// cause implementors to also use a fixed point implementation.
-//
+//
// +protobuf=true
// +protobuf.embed=string
// +protobuf.options.marshal=false
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go
index 72d3880c0..7f63175d3 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go
@@ -194,9 +194,9 @@ func negativeScaleInt64(base int64, scale Scale) (result int64, exact bool) {
}
if fraction {
if base > 0 {
- value += 1
+ value++
} else {
- value += -1
+ value--
}
}
return value, !fraction
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
index 6a8bb9972..93a6c0c50 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
@@ -21,12 +21,9 @@ import (
"errors"
"fmt"
"math/big"
- "regexp"
"strconv"
"strings"
- flag "github.com/spf13/pflag"
-
inf "gopkg.in/inf.v0"
)
@@ -71,11 +68,6 @@ import (
// 1.5 will be serialized as "1500m"
// 1.5Gi will be serialized as "1536Mi"
//
-// NOTE: We reserve the right to amend this canonical format, perhaps to
-// allow 1.5 to be canonical.
-// TODO: Remove above disclaimer after all bikeshedding about format is over,
-// or after March 2015.
-//
// Note that the quantity will NEVER be internally represented by a
// floating point number. That is the whole point of this exercise.
//
@@ -144,9 +136,6 @@ const (
)
var (
- // splitRE is used to get the various parts of a number.
- splitRE = regexp.MustCompile(splitREString)
-
// Errors that could happen while parsing a string.
ErrFormatWrong = errors.New("quantities must match the regular expression '" + splitREString + "'")
ErrNumeric = errors.New("unable to parse numeric part of quantity")
@@ -508,7 +497,7 @@ func (q *Quantity) Sign() int {
return q.i.Sign()
}
-// AsScaled returns the current value, rounded up to the provided scale, and returns
+// AsScale returns the current value, rounded up to the provided scale, and returns
// false if the scale resulted in a loss of precision.
func (q *Quantity) AsScale(scale Scale) (CanonicalValue, bool) {
if q.d.Dec != nil {
@@ -595,6 +584,12 @@ func (q *Quantity) Neg() {
q.d.Dec.Neg(q.d.Dec)
}
+// Equal checks equality of two Quantities. This is useful for testing with
+// cmp.Equal.
+func (q Quantity) Equal(v Quantity) bool {
+ return q.Cmp(v) == 0
+}
+
// int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation
// of most Quantity values.
const int64QuantityExpectedBytes = 18
@@ -691,7 +686,7 @@ func NewScaledQuantity(value int64, scale Scale) *Quantity {
}
}
-// Value returns the value of q; any fractional part will be lost.
+// Value returns the unscaled value of q rounded up to the nearest integer away from 0.
func (q *Quantity) Value() int64 {
return q.ScaledValue(0)
}
@@ -747,43 +742,3 @@ func (q *Quantity) Copy() *Quantity {
Format: q.Format,
}
}
-
-// qFlag is a helper type for the Flag function
-type qFlag struct {
- dest *Quantity
-}
-
-// Sets the value of the internal Quantity. (used by flag & pflag)
-func (qf qFlag) Set(val string) error {
- q, err := ParseQuantity(val)
- if err != nil {
- return err
- }
- // This copy is OK because q will not be referenced again.
- *qf.dest = q
- return nil
-}
-
-// Converts the value of the internal Quantity to a string. (used by flag & pflag)
-func (qf qFlag) String() string {
- return qf.dest.String()
-}
-
-// States the type of flag this is (Quantity). (used by pflag)
-func (qf qFlag) Type() string {
- return "quantity"
-}
-
-// QuantityFlag is a helper that makes a quantity flag (using standard flag package).
-// Will panic if defaultValue is not a valid quantity.
-func QuantityFlag(flagName, defaultValue, description string) *Quantity {
- q := MustParse(defaultValue)
- flag.Var(NewQuantityFlagValue(&q), flagName, description)
- return &q
-}
-
-// NewQuantityFlagValue returns an object that can be used to back a flag,
-// pointing at the given Quantity variable.
-func NewQuantityFlagValue(q *Quantity) flag.Value {
- return qFlag{q}
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go
index fc36d9811..ab4740790 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go
@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
new file mode 100644
index 000000000..44929b1c0
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
@@ -0,0 +1,33 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+- thockin
+- smarterclayton
+- wojtek-t
+- deads2k
+- brendandburns
+- caesarxuchao
+- liggitt
+- nikhiljindal
+- gmarek
+- erictune
+- davidopp
+- sttts
+- quinton-hoole
+- luxas
+- janetkuo
+- justinsb
+- ncdc
+- soltysh
+- dims
+- madhusudancs
+- hongchaodeng
+- krousey
+- mml
+- mbohlool
+- david-mcmahon
+- therc
+- mqliang
+- kevin-wangzefeng
+- jianhuiz
+- feihujiang
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go
index cd651bcd5..d07069ef2 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go
@@ -33,17 +33,17 @@ func AddConversionFuncs(scheme *runtime.Scheme) error {
return scheme.AddConversionFuncs(
Convert_v1_TypeMeta_To_v1_TypeMeta,
- Convert_unversioned_ListMeta_To_unversioned_ListMeta,
+ Convert_v1_ListMeta_To_v1_ListMeta,
Convert_intstr_IntOrString_To_intstr_IntOrString,
- Convert_unversioned_Time_To_unversioned_Time,
- Convert_unversioned_MicroTime_To_unversioned_MicroTime,
-
Convert_Pointer_v1_Duration_To_v1_Duration,
Convert_v1_Duration_To_Pointer_v1_Duration,
- Convert_Slice_string_To_unversioned_Time,
+ Convert_Slice_string_To_v1_Time,
+
+ Convert_v1_Time_To_v1_Time,
+ Convert_v1_MicroTime_To_v1_MicroTime,
Convert_resource_Quantity_To_resource_Quantity,
@@ -71,10 +71,14 @@ func AddConversionFuncs(scheme *runtime.Scheme) error {
Convert_Pointer_float64_To_float64,
Convert_float64_To_Pointer_float64,
- Convert_map_to_unversioned_LabelSelector,
- Convert_unversioned_LabelSelector_to_map,
+ Convert_Map_string_To_string_To_v1_LabelSelector,
+ Convert_v1_LabelSelector_To_Map_string_To_string,
Convert_Slice_string_To_Slice_int32,
+
+ Convert_Slice_string_To_v1_DeletionPropagation,
+
+ Convert_Slice_string_To_v1_IncludeObjectPolicy,
)
}
@@ -185,7 +189,7 @@ func Convert_v1_TypeMeta_To_v1_TypeMeta(in, out *TypeMeta, s conversion.Scope) e
}
// +k8s:conversion-fn=copy-only
-func Convert_unversioned_ListMeta_To_unversioned_ListMeta(in, out *ListMeta, s conversion.Scope) error {
+func Convert_v1_ListMeta_To_v1_ListMeta(in, out *ListMeta, s conversion.Scope) error {
*out = *in
return nil
}
@@ -197,7 +201,14 @@ func Convert_intstr_IntOrString_To_intstr_IntOrString(in, out *intstr.IntOrStrin
}
// +k8s:conversion-fn=copy-only
-func Convert_unversioned_Time_To_unversioned_Time(in *Time, out *Time, s conversion.Scope) error {
+func Convert_v1_Time_To_v1_Time(in *Time, out *Time, s conversion.Scope) error {
+ // Cannot deep copy these, because time.Time has unexported fields.
+ *out = *in
+ return nil
+}
+
+// +k8s:conversion-fn=copy-only
+func Convert_v1_MicroTime_To_v1_MicroTime(in *MicroTime, out *MicroTime, s conversion.Scope) error {
// Cannot deep copy these, because time.Time has unexported fields.
*out = *in
return nil
@@ -218,14 +229,8 @@ func Convert_v1_Duration_To_Pointer_v1_Duration(in *Duration, out **Duration, s
return nil
}
-func Convert_unversioned_MicroTime_To_unversioned_MicroTime(in *MicroTime, out *MicroTime, s conversion.Scope) error {
- // Cannot deep copy these, because time.Time has unexported fields.
- *out = *in
- return nil
-}
-
-// Convert_Slice_string_To_unversioned_Time allows converting a URL query parameter value
-func Convert_Slice_string_To_unversioned_Time(input *[]string, out *Time, s conversion.Scope) error {
+// Convert_Slice_string_To_v1_Time allows converting a URL query parameter value
+func Convert_Slice_string_To_v1_Time(input *[]string, out *Time, s conversion.Scope) error {
str := ""
if len(*input) > 0 {
str = (*input)[0]
@@ -273,7 +278,7 @@ func Convert_resource_Quantity_To_resource_Quantity(in *resource.Quantity, out *
return nil
}
-func Convert_map_to_unversioned_LabelSelector(in *map[string]string, out *LabelSelector, s conversion.Scope) error {
+func Convert_Map_string_To_string_To_v1_LabelSelector(in *map[string]string, out *LabelSelector, s conversion.Scope) error {
if in == nil {
return nil
}
@@ -283,7 +288,7 @@ func Convert_map_to_unversioned_LabelSelector(in *map[string]string, out *LabelS
return nil
}
-func Convert_unversioned_LabelSelector_to_map(in *LabelSelector, out *map[string]string, s conversion.Scope) error {
+func Convert_v1_LabelSelector_To_Map_string_To_string(in *LabelSelector, out *map[string]string, s conversion.Scope) error {
var err error
*out, err = LabelSelectorAsMap(in)
return err
@@ -304,3 +309,21 @@ func Convert_Slice_string_To_Slice_int32(in *[]string, out *[]int32, s conversio
}
return nil
}
+
+// Convert_Slice_string_To_v1_DeletionPropagation allows converting a URL query parameter propagationPolicy
+func Convert_Slice_string_To_v1_DeletionPropagation(input *[]string, out *DeletionPropagation, s conversion.Scope) error {
+ if len(*input) > 0 {
+ *out = DeletionPropagation((*input)[0])
+ } else {
+ *out = ""
+ }
+ return nil
+}
+
+// Convert_Slice_string_To_v1_IncludeObjectPolicy allows converting a URL query parameter value
+func Convert_Slice_string_To_v1_IncludeObjectPolicy(input *[]string, out *IncludeObjectPolicy, s conversion.Scope) error {
+ if len(*input) > 0 {
+ *out = IncludeObjectPolicy((*input)[0])
+ }
+ return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go
new file mode 100644
index 000000000..8751d0524
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func (in *TableRow) DeepCopy() *TableRow {
+ if in == nil {
+ return nil
+ }
+
+ out := new(TableRow)
+
+ if in.Cells != nil {
+ out.Cells = make([]interface{}, len(in.Cells))
+ for i := range in.Cells {
+ out.Cells[i] = runtime.DeepCopyJSONValue(in.Cells[i])
+ }
+ }
+
+ if in.Conditions != nil {
+ out.Conditions = make([]TableRowCondition, len(in.Conditions))
+ for i := range in.Conditions {
+ in.Conditions[i].DeepCopyInto(&out.Conditions[i])
+ }
+ }
+
+ in.Object.DeepCopyInto(&out.Object)
+ return out
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go
index 61f201cdf..dbaa87c87 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go
@@ -19,4 +19,5 @@ limitations under the License.
// +k8s:defaulter-gen=TypeMeta
// +groupName=meta.k8s.io
+
package v1 // import "k8s.io/apimachinery/pkg/apis/meta/v1"
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go
index fea458dfb..babe8a8b5 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go
@@ -31,7 +31,10 @@ type Duration struct {
// UnmarshalJSON implements the json.Unmarshaller interface.
func (d *Duration) UnmarshalJSON(b []byte) error {
var str string
- json.Unmarshal(b, &str)
+ err := json.Unmarshal(b, &str)
+ if err != nil {
+ return err
+ }
pd, err := time.ParseDuration(str)
if err != nil {
@@ -45,3 +48,13 @@ func (d *Duration) UnmarshalJSON(b []byte) error {
func (d Duration) MarshalJSON() ([]byte, error) {
return json.Marshal(d.Duration.String())
}
+
+// OpenAPISchemaType is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+//
+// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
+func (_ Duration) OpenAPISchemaType() []string { return []string{"string"} }
+
+// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+func (_ Duration) OpenAPISchemaFormat() string { return "" }
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
index 1fa478f5a..c8ff6e396 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
-// DO NOT EDIT!
/*
Package v1 is a generated protocol buffer package.
@@ -30,9 +29,11 @@ limitations under the License.
APIResource
APIResourceList
APIVersions
+ CreateOptions
DeleteOptions
Duration
ExportOptions
+ Fields
GetOptions
GroupKind
GroupResource
@@ -47,19 +48,25 @@ limitations under the License.
List
ListMeta
ListOptions
+ ManagedFieldsEntry
MicroTime
ObjectMeta
OwnerReference
+ PartialObjectMetadata
+ PartialObjectMetadataList
Patch
+ PatchOptions
Preconditions
RootPaths
ServerAddressByClientCIDR
Status
StatusCause
StatusDetails
+ TableOptions
Time
Timestamp
TypeMeta
+ UpdateOptions
Verbs
WatchEvent
*/
@@ -113,139 +120,173 @@ func (m *APIVersions) Reset() { *m = APIVersions{} }
func (*APIVersions) ProtoMessage() {}
func (*APIVersions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} }
+func (m *CreateOptions) Reset() { *m = CreateOptions{} }
+func (*CreateOptions) ProtoMessage() {}
+func (*CreateOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} }
+
func (m *DeleteOptions) Reset() { *m = DeleteOptions{} }
func (*DeleteOptions) ProtoMessage() {}
-func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} }
+func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} }
func (m *Duration) Reset() { *m = Duration{} }
func (*Duration) ProtoMessage() {}
-func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} }
+func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} }
func (m *ExportOptions) Reset() { *m = ExportOptions{} }
func (*ExportOptions) ProtoMessage() {}
-func (*ExportOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} }
+func (*ExportOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} }
+
+func (m *Fields) Reset() { *m = Fields{} }
+func (*Fields) ProtoMessage() {}
+func (*Fields) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} }
func (m *GetOptions) Reset() { *m = GetOptions{} }
func (*GetOptions) ProtoMessage() {}
-func (*GetOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} }
+func (*GetOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} }
func (m *GroupKind) Reset() { *m = GroupKind{} }
func (*GroupKind) ProtoMessage() {}
-func (*GroupKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} }
+func (*GroupKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} }
func (m *GroupResource) Reset() { *m = GroupResource{} }
func (*GroupResource) ProtoMessage() {}
-func (*GroupResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} }
+func (*GroupResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} }
func (m *GroupVersion) Reset() { *m = GroupVersion{} }
func (*GroupVersion) ProtoMessage() {}
-func (*GroupVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} }
+func (*GroupVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} }
func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} }
func (*GroupVersionForDiscovery) ProtoMessage() {}
func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{12}
+ return fileDescriptorGenerated, []int{14}
}
func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} }
func (*GroupVersionKind) ProtoMessage() {}
-func (*GroupVersionKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} }
+func (*GroupVersionKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} }
func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} }
func (*GroupVersionResource) ProtoMessage() {}
-func (*GroupVersionResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} }
+func (*GroupVersionResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} }
func (m *Initializer) Reset() { *m = Initializer{} }
func (*Initializer) ProtoMessage() {}
-func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} }
+func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} }
func (m *Initializers) Reset() { *m = Initializers{} }
func (*Initializers) ProtoMessage() {}
-func (*Initializers) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} }
+func (*Initializers) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} }
func (m *LabelSelector) Reset() { *m = LabelSelector{} }
func (*LabelSelector) ProtoMessage() {}
-func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} }
+func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} }
func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} }
func (*LabelSelectorRequirement) ProtoMessage() {}
func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{18}
+ return fileDescriptorGenerated, []int{20}
}
func (m *List) Reset() { *m = List{} }
func (*List) ProtoMessage() {}
-func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} }
+func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} }
func (m *ListMeta) Reset() { *m = ListMeta{} }
func (*ListMeta) ProtoMessage() {}
-func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} }
+func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} }
func (m *ListOptions) Reset() { *m = ListOptions{} }
func (*ListOptions) ProtoMessage() {}
-func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} }
+func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} }
+
+func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} }
+func (*ManagedFieldsEntry) ProtoMessage() {}
+func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} }
func (m *MicroTime) Reset() { *m = MicroTime{} }
func (*MicroTime) ProtoMessage() {}
-func (*MicroTime) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} }
+func (*MicroTime) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} }
func (m *ObjectMeta) Reset() { *m = ObjectMeta{} }
func (*ObjectMeta) ProtoMessage() {}
-func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} }
+func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} }
func (m *OwnerReference) Reset() { *m = OwnerReference{} }
func (*OwnerReference) ProtoMessage() {}
-func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} }
+func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} }
+
+func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} }
+func (*PartialObjectMetadata) ProtoMessage() {}
+func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} }
+
+func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} }
+func (*PartialObjectMetadataList) ProtoMessage() {}
+func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) {
+ return fileDescriptorGenerated, []int{29}
+}
func (m *Patch) Reset() { *m = Patch{} }
func (*Patch) ProtoMessage() {}
-func (*Patch) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} }
+func (*Patch) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} }
+
+func (m *PatchOptions) Reset() { *m = PatchOptions{} }
+func (*PatchOptions) ProtoMessage() {}
+func (*PatchOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} }
func (m *Preconditions) Reset() { *m = Preconditions{} }
func (*Preconditions) ProtoMessage() {}
-func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} }
+func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} }
func (m *RootPaths) Reset() { *m = RootPaths{} }
func (*RootPaths) ProtoMessage() {}
-func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} }
+func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} }
func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} }
func (*ServerAddressByClientCIDR) ProtoMessage() {}
func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) {
- return fileDescriptorGenerated, []int{28}
+ return fileDescriptorGenerated, []int{34}
}
func (m *Status) Reset() { *m = Status{} }
func (*Status) ProtoMessage() {}
-func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} }
+func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} }
func (m *StatusCause) Reset() { *m = StatusCause{} }
func (*StatusCause) ProtoMessage() {}
-func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} }
+func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} }
func (m *StatusDetails) Reset() { *m = StatusDetails{} }
func (*StatusDetails) ProtoMessage() {}
-func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} }
+func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} }
+
+func (m *TableOptions) Reset() { *m = TableOptions{} }
+func (*TableOptions) ProtoMessage() {}
+func (*TableOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} }
func (m *Time) Reset() { *m = Time{} }
func (*Time) ProtoMessage() {}
-func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} }
+func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} }
func (m *Timestamp) Reset() { *m = Timestamp{} }
func (*Timestamp) ProtoMessage() {}
-func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} }
+func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} }
func (m *TypeMeta) Reset() { *m = TypeMeta{} }
func (*TypeMeta) ProtoMessage() {}
-func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} }
+func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} }
+
+func (m *UpdateOptions) Reset() { *m = UpdateOptions{} }
+func (*UpdateOptions) ProtoMessage() {}
+func (*UpdateOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} }
func (m *Verbs) Reset() { *m = Verbs{} }
func (*Verbs) ProtoMessage() {}
-func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} }
+func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} }
func (m *WatchEvent) Reset() { *m = WatchEvent{} }
func (*WatchEvent) ProtoMessage() {}
-func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} }
+func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} }
func init() {
proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup")
@@ -253,9 +294,11 @@ func init() {
proto.RegisterType((*APIResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResource")
proto.RegisterType((*APIResourceList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResourceList")
proto.RegisterType((*APIVersions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIVersions")
+ proto.RegisterType((*CreateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions")
proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions")
proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration")
proto.RegisterType((*ExportOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ExportOptions")
+ proto.RegisterType((*Fields)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Fields")
proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions")
proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind")
proto.RegisterType((*GroupResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupResource")
@@ -270,19 +313,25 @@ func init() {
proto.RegisterType((*List)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.List")
proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta")
proto.RegisterType((*ListOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions")
+ proto.RegisterType((*ManagedFieldsEntry)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry")
proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime")
proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta")
proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference")
+ proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata")
+ proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadataList")
proto.RegisterType((*Patch)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Patch")
+ proto.RegisterType((*PatchOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PatchOptions")
proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions")
proto.RegisterType((*RootPaths)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.RootPaths")
proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR")
proto.RegisterType((*Status)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Status")
proto.RegisterType((*StatusCause)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusCause")
proto.RegisterType((*StatusDetails)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusDetails")
+ proto.RegisterType((*TableOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TableOptions")
proto.RegisterType((*Time)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Time")
proto.RegisterType((*Timestamp)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Timestamp")
proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TypeMeta")
+ proto.RegisterType((*UpdateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.UpdateOptions")
proto.RegisterType((*Verbs)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Verbs")
proto.RegisterType((*WatchEvent)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.WatchEvent")
}
@@ -453,6 +502,10 @@ func (m *APIResource) MarshalTo(dAtA []byte) (int, error) {
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
i += copy(dAtA[i:], m.Version)
+ dAtA[i] = 0x52
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageVersionHash)))
+ i += copy(dAtA[i:], m.StorageVersionHash)
return i, nil
}
@@ -535,6 +588,43 @@ func (m *APIVersions) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
+func (m *CreateOptions) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.DryRun) > 0 {
+ for _, s := range m.DryRun {
+ dAtA[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager)))
+ i += copy(dAtA[i:], m.FieldManager)
+ return i, nil
+}
+
func (m *DeleteOptions) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -581,6 +671,21 @@ func (m *DeleteOptions) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy)))
i += copy(dAtA[i:], *m.PropagationPolicy)
}
+ if len(m.DryRun) > 0 {
+ for _, s := range m.DryRun {
+ dAtA[i] = 0x2a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
return i, nil
}
@@ -639,6 +744,55 @@ func (m *ExportOptions) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
+func (m *Fields) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Fields) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Map) > 0 {
+ keysForMap := make([]string, 0, len(m.Map))
+ for k := range m.Map {
+ keysForMap = append(keysForMap, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForMap)
+ for _, k := range keysForMap {
+ dAtA[i] = 0xa
+ i++
+ v := m.Map[string(k)]
+ msgSize := 0
+ if (&v) != nil {
+ msgSize = (&v).Size()
+ msgSize += 1 + sovGenerated(uint64(msgSize))
+ }
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize
+ i = encodeVarintGenerated(dAtA, i, uint64(mapSize))
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(k)))
+ i += copy(dAtA[i:], k)
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size()))
+ n4, err := (&v).MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ }
+ }
+ return i, nil
+}
+
func (m *GetOptions) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -658,14 +812,6 @@ func (m *GetOptions) MarshalTo(dAtA []byte) (int, error) {
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion)))
i += copy(dAtA[i:], m.ResourceVersion)
- dAtA[i] = 0x10
- i++
- if m.IncludeUninitialized {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i++
return i, nil
}
@@ -886,11 +1032,11 @@ func (m *Initializers) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size()))
- n4, err := m.Result.MarshalTo(dAtA[i:])
+ n5, err := m.Result.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n4
+ i += n5
}
return i, nil
}
@@ -1006,11 +1152,11 @@ func (m *List) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
- n5, err := m.ListMeta.MarshalTo(dAtA[i:])
+ n6, err := m.ListMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n5
+ i += n6
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
@@ -1053,6 +1199,11 @@ func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) {
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue)))
i += copy(dAtA[i:], m.Continue)
+ if m.RemainingItemCount != nil {
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.RemainingItemCount))
+ }
return i, nil
}
@@ -1096,21 +1247,71 @@ func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) {
i++
i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
}
- dAtA[i] = 0x30
+ dAtA[i] = 0x38
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Limit))
+ dAtA[i] = 0x42
i++
- if m.IncludeUninitialized {
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue)))
+ i += copy(dAtA[i:], m.Continue)
+ dAtA[i] = 0x48
+ i++
+ if m.AllowWatchBookmarks {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
- dAtA[i] = 0x38
+ return i, nil
+}
+
+func (m *ManagedFieldsEntry) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ManagedFieldsEntry) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
i++
- i = encodeVarintGenerated(dAtA, i, uint64(m.Limit))
- dAtA[i] = 0x42
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manager)))
+ i += copy(dAtA[i:], m.Manager)
+ dAtA[i] = 0x12
i++
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue)))
- i += copy(dAtA[i:], m.Continue)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation)))
+ i += copy(dAtA[i:], m.Operation)
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
+ i += copy(dAtA[i:], m.APIVersion)
+ if m.Time != nil {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size()))
+ n7, err := m.Time.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
+ }
+ if m.Fields != nil {
+ dAtA[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Fields.Size()))
+ n8, err := m.Fields.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n8
+ }
return i, nil
}
@@ -1159,20 +1360,20 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x42
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size()))
- n6, err := m.CreationTimestamp.MarshalTo(dAtA[i:])
+ n9, err := m.CreationTimestamp.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n6
+ i += n9
if m.DeletionTimestamp != nil {
dAtA[i] = 0x4a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size()))
- n7, err := m.DeletionTimestamp.MarshalTo(dAtA[i:])
+ n10, err := m.DeletionTimestamp.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n7
+ i += n10
}
if m.DeletionGracePeriodSeconds != nil {
dAtA[i] = 0x50
@@ -1260,11 +1461,25 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size()))
- n8, err := m.Initializers.MarshalTo(dAtA[i:])
+ n11, err := m.Initializers.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n8
+ i += n11
+ }
+ if len(m.ManagedFields) > 0 {
+ for _, msg := range m.ManagedFields {
+ dAtA[i] = 0x8a
+ i++
+ dAtA[i] = 0x1
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
}
return i, nil
}
@@ -1323,6 +1538,70 @@ func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
+func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PartialObjectMetadata) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
+ n12, err := m.ObjectMeta.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n12
+ return i, nil
+}
+
+func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
+ n13, err := m.ListMeta.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n13
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
func (m *Patch) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -1341,6 +1620,53 @@ func (m *Patch) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
+func (m *PatchOptions) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PatchOptions) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.DryRun) > 0 {
+ for _, s := range m.DryRun {
+ dAtA[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ if m.Force != nil {
+ dAtA[i] = 0x10
+ i++
+ if *m.Force {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager)))
+ i += copy(dAtA[i:], m.FieldManager)
+ return i, nil
+}
+
func (m *Preconditions) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -1362,6 +1688,12 @@ func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID)))
i += copy(dAtA[i:], *m.UID)
}
+ if m.ResourceVersion != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceVersion)))
+ i += copy(dAtA[i:], *m.ResourceVersion)
+ }
return i, nil
}
@@ -1442,11 +1774,11 @@ func (m *Status) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
- n9, err := m.ListMeta.MarshalTo(dAtA[i:])
+ n14, err := m.ListMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n9
+ i += n14
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
@@ -1463,11 +1795,11 @@ func (m *Status) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x2a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Details.Size()))
- n10, err := m.Details.MarshalTo(dAtA[i:])
+ n15, err := m.Details.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n10
+ i += n15
}
dAtA[i] = 0x30
i++
@@ -1554,6 +1886,28 @@ func (m *StatusDetails) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
+func (m *TableOptions) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.IncludeObject)))
+ i += copy(dAtA[i:], m.IncludeObject)
+ return i, nil
+}
+
func (m *Timestamp) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -1604,6 +1958,43 @@ func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
+func (m *UpdateOptions) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UpdateOptions) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.DryRun) > 0 {
+ for _, s := range m.DryRun {
+ dAtA[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager)))
+ i += copy(dAtA[i:], m.FieldManager)
+ return i, nil
+}
+
func (m Verbs) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -1659,32 +2050,14 @@ func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size()))
- n11, err := m.Object.MarshalTo(dAtA[i:])
+ n16, err := m.Object.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n11
+ i += n16
return i, nil
}
-func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
- dAtA[offset] = uint8(v)
- dAtA[offset+1] = uint8(v >> 8)
- dAtA[offset+2] = uint8(v >> 16)
- dAtA[offset+3] = uint8(v >> 24)
- dAtA[offset+4] = uint8(v >> 32)
- dAtA[offset+5] = uint8(v >> 40)
- dAtA[offset+6] = uint8(v >> 48)
- dAtA[offset+7] = uint8(v >> 56)
- return offset + 8
-}
-func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
- dAtA[offset] = uint8(v)
- dAtA[offset+1] = uint8(v >> 8)
- dAtA[offset+2] = uint8(v >> 16)
- dAtA[offset+3] = uint8(v >> 24)
- return offset + 4
-}
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -1758,6 +2131,8 @@ func (m *APIResource) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Version)
n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.StorageVersionHash)
+ n += 1 + l + sovGenerated(uint64(l))
return n
}
@@ -1793,6 +2168,20 @@ func (m *APIVersions) Size() (n int) {
return n
}
+func (m *CreateOptions) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.DryRun) > 0 {
+ for _, s := range m.DryRun {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.FieldManager)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func (m *DeleteOptions) Size() (n int) {
var l int
_ = l
@@ -1810,6 +2199,12 @@ func (m *DeleteOptions) Size() (n int) {
l = len(*m.PropagationPolicy)
n += 1 + l + sovGenerated(uint64(l))
}
+ if len(m.DryRun) > 0 {
+ for _, s := range m.DryRun {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
return n
}
@@ -1828,12 +2223,26 @@ func (m *ExportOptions) Size() (n int) {
return n
}
+func (m *Fields) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Map) > 0 {
+ for k, v := range m.Map {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
func (m *GetOptions) Size() (n int) {
var l int
_ = l
l = len(m.ResourceVersion)
n += 1 + l + sovGenerated(uint64(l))
- n += 2
return n
}
@@ -1984,6 +2393,9 @@ func (m *ListMeta) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Continue)
n += 1 + l + sovGenerated(uint64(l))
+ if m.RemainingItemCount != nil {
+ n += 1 + sovGenerated(uint64(*m.RemainingItemCount))
+ }
return n
}
@@ -2000,10 +2412,30 @@ func (m *ListOptions) Size() (n int) {
if m.TimeoutSeconds != nil {
n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
}
- n += 2
n += 1 + sovGenerated(uint64(m.Limit))
l = len(m.Continue)
n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ return n
+}
+
+func (m *ManagedFieldsEntry) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Manager)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Operation)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.APIVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Time != nil {
+ l = m.Time.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Fields != nil {
+ l = m.Fields.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -2066,6 +2498,12 @@ func (m *ObjectMeta) Size() (n int) {
l = m.Initializers.Size()
n += 2 + l + sovGenerated(uint64(l))
}
+ if len(m.ManagedFields) > 0 {
+ for _, e := range m.ManagedFields {
+ l = e.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ }
return n
}
@@ -2089,12 +2527,51 @@ func (m *OwnerReference) Size() (n int) {
return n
}
+func (m *PartialObjectMetadata) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PartialObjectMetadataList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
func (m *Patch) Size() (n int) {
var l int
_ = l
return n
}
+func (m *PatchOptions) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.DryRun) > 0 {
+ for _, s := range m.DryRun {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.Force != nil {
+ n += 2
+ }
+ l = len(m.FieldManager)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func (m *Preconditions) Size() (n int) {
var l int
_ = l
@@ -2102,6 +2579,10 @@ func (m *Preconditions) Size() (n int) {
l = len(*m.UID)
n += 1 + l + sovGenerated(uint64(l))
}
+ if m.ResourceVersion != nil {
+ l = len(*m.ResourceVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -2179,6 +2660,14 @@ func (m *StatusDetails) Size() (n int) {
return n
}
+func (m *TableOptions) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.IncludeObject)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func (m *Timestamp) Size() (n int) {
var l int
_ = l
@@ -2197,6 +2686,20 @@ func (m *TypeMeta) Size() (n int) {
return n
}
+func (m *UpdateOptions) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.DryRun) > 0 {
+ for _, s := range m.DryRun {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.FieldManager)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func (m Verbs) Size() (n int) {
var l int
_ = l
@@ -2269,6 +2772,7 @@ func (this *APIResource) String() string {
`Categories:` + fmt.Sprintf("%v", this.Categories) + `,`,
`Group:` + fmt.Sprintf("%v", this.Group) + `,`,
`Version:` + fmt.Sprintf("%v", this.Version) + `,`,
+ `StorageVersionHash:` + fmt.Sprintf("%v", this.StorageVersionHash) + `,`,
`}`,
}, "")
return s
@@ -2284,6 +2788,17 @@ func (this *APIResourceList) String() string {
}, "")
return s
}
+func (this *CreateOptions) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CreateOptions{`,
+ `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`,
+ `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *DeleteOptions) String() string {
if this == nil {
return "nil"
@@ -2293,6 +2808,7 @@ func (this *DeleteOptions) String() string {
`Preconditions:` + strings.Replace(fmt.Sprintf("%v", this.Preconditions), "Preconditions", "Preconditions", 1) + `,`,
`OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`,
`PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`,
+ `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`,
`}`,
}, "")
return s
@@ -2318,13 +2834,32 @@ func (this *ExportOptions) String() string {
}, "")
return s
}
+func (this *Fields) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForMap := make([]string, 0, len(this.Map))
+ for k := range this.Map {
+ keysForMap = append(keysForMap, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForMap)
+ mapStringForMap := "map[string]Fields{"
+ for _, k := range keysForMap {
+ mapStringForMap += fmt.Sprintf("%v: %v,", k, this.Map[k])
+ }
+ mapStringForMap += "}"
+ s := strings.Join([]string{`&Fields{`,
+ `Map:` + mapStringForMap + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *GetOptions) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&GetOptions{`,
`ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
- `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`,
`}`,
}, "")
return s
@@ -2413,6 +2948,7 @@ func (this *ListMeta) String() string {
`SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`,
`ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
`Continue:` + fmt.Sprintf("%v", this.Continue) + `,`,
+ `RemainingItemCount:` + valueToStringGenerated(this.RemainingItemCount) + `,`,
`}`,
}, "")
return s
@@ -2427,9 +2963,23 @@ func (this *ListOptions) String() string {
`Watch:` + fmt.Sprintf("%v", this.Watch) + `,`,
`ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
`TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
- `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`,
`Limit:` + fmt.Sprintf("%v", this.Limit) + `,`,
`Continue:` + fmt.Sprintf("%v", this.Continue) + `,`,
+ `AllowWatchBookmarks:` + fmt.Sprintf("%v", this.AllowWatchBookmarks) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ManagedFieldsEntry) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ManagedFieldsEntry{`,
+ `Manager:` + fmt.Sprintf("%v", this.Manager) + `,`,
+ `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`,
+ `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
+ `Time:` + strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "Time", 1) + `,`,
+ `Fields:` + strings.Replace(fmt.Sprintf("%v", this.Fields), "Fields", "Fields", 1) + `,`,
`}`,
}, "")
return s
@@ -2475,6 +3025,7 @@ func (this *ObjectMeta) String() string {
`Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`,
`ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`,
`Initializers:` + strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializers", "Initializers", 1) + `,`,
+ `ManagedFields:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ManagedFields), "ManagedFieldsEntry", "ManagedFieldsEntry", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
@@ -2494,6 +3045,27 @@ func (this *OwnerReference) String() string {
}, "")
return s
}
+func (this *PartialObjectMetadata) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&PartialObjectMetadata{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PartialObjectMetadataList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&PartialObjectMetadataList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PartialObjectMetadata", "PartialObjectMetadata", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *Patch) String() string {
if this == nil {
return "nil"
@@ -2503,12 +3075,25 @@ func (this *Patch) String() string {
}, "")
return s
}
+func (this *PatchOptions) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&PatchOptions{`,
+ `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`,
+ `Force:` + valueToStringGenerated(this.Force) + `,`,
+ `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *Preconditions) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Preconditions{`,
`UID:` + valueToStringGenerated(this.UID) + `,`,
+ `ResourceVersion:` + valueToStringGenerated(this.ResourceVersion) + `,`,
`}`,
}, "")
return s
@@ -2576,6 +3161,16 @@ func (this *StatusDetails) String() string {
}, "")
return s
}
+func (this *TableOptions) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TableOptions{`,
+ `IncludeObject:` + fmt.Sprintf("%v", this.IncludeObject) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *Timestamp) String() string {
if this == nil {
return "nil"
@@ -2598,6 +3193,17 @@ func (this *TypeMeta) String() string {
}, "")
return s
}
+func (this *UpdateOptions) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&UpdateOptions{`,
+ `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`,
+ `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *WatchEvent) String() string {
if this == nil {
return "nil"
@@ -3154,6 +3760,35 @@ func (m *APIResource) Unmarshal(dAtA []byte) error {
}
m.Version = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StorageVersionHash", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.StorageVersionHash = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -3395,6 +4030,114 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *CreateOptions) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CreateOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CreateOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FieldManager = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *DeleteOptions) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -3528,6 +4271,35 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error {
s := DeletionPropagation(dAtA[iNdEx:postIndex])
m.PropagationPolicy = &s
iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -3708,7 +4480,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *GetOptions) Unmarshal(dAtA []byte) error {
+func (m *Fields) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -3731,17 +4503,17 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: GetOptions: wiretype end group for non-group")
+ return fmt.Errorf("proto: Fields: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: GetOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: Fields: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Map", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -3751,26 +4523,170 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.ResourceVersion = string(dAtA[iNdEx:postIndex])
+ if m.Map == nil {
+ m.Map = make(map[string]Fields)
+ }
+ var mapkey string
+ mapvalue := &Fields{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &Fields{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Map[mapkey] = *mapvalue
iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
}
- var v int
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetOptions) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
+ }
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -3780,12 +4696,21 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= (int(b) & 0x7F) << shift
+ stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- m.IncludeUninitialized = bool(v != 0)
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ResourceVersion = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -4761,51 +5686,14 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.MatchLabels == nil {
m.MatchLabels = make(map[string]string)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -4815,41 +5703,80 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- m.MatchLabels[mapkey] = mapvalue
- } else {
- var mapvalue string
- m.MatchLabels[mapkey] = mapvalue
}
+ m.MatchLabels[mapkey] = mapvalue
iNdEx = postIndex
case 2:
if wireType != 2 {
@@ -5267,6 +6194,26 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error {
}
m.Continue = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RemainingItemCount", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.RemainingItemCount = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -5444,9 +6391,57 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error {
}
}
m.TimeoutSeconds = &v
- case 6:
+ case 7:
if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
+ }
+ m.Limit = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Limit |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Continue = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllowWatchBookmarks", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
@@ -5463,12 +6458,62 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error {
break
}
}
- m.IncludeUninitialized = bool(v != 0)
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
+ m.AllowWatchBookmarks = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
}
- m.Limit = 0
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ManagedFieldsEntry: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ManagedFieldsEntry: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Manager", wireType)
+ }
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5478,14 +6523,24 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Limit |= (int64(b) & 0x7F) << shift
+ stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- case 8:
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Manager = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -5510,7 +6565,102 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Continue = string(dAtA[iNdEx:postIndex])
+ m.Operation = ManagedFieldsOperationType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.APIVersion = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Time == nil {
+ m.Time = &Time{}
+ }
+ if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Fields == nil {
+ m.Fields = &Fields{}
+ }
+ if err := m.Fields.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -5864,51 +7014,14 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Labels == nil {
m.Labels = make(map[string]string)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5918,41 +7031,80 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- m.Labels[mapkey] = mapvalue
- } else {
- var mapvalue string
- m.Labels[mapkey] = mapvalue
}
+ m.Labels[mapkey] = mapvalue
iNdEx = postIndex
case 12:
if wireType != 2 {
@@ -5980,51 +7132,14 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
if m.Annotations == nil {
m.Annotations = make(map[string]string)
}
- if iNdEx < postIndex {
- var valuekey uint64
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -6034,41 +7149,80 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
+ wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if iNdEx >= l {
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- m.Annotations[mapkey] = mapvalue
- } else {
- var mapvalue string
- m.Annotations[mapkey] = mapvalue
}
+ m.Annotations[mapkey] = mapvalue
iNdEx = postIndex
case 13:
if wireType != 2 {
@@ -6192,6 +7346,37 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 17:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ManagedFields", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ManagedFields = append(m.ManagedFields, ManagedFieldsEntry{})
+ if err := m.ManagedFields[len(m.ManagedFields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -6421,6 +7606,197 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PartialObjectMetadata: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PartialObjectMetadata: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, PartialObjectMetadata{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *Patch) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -6471,6 +7847,135 @@ func (m *Patch) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *PatchOptions) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PatchOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PatchOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Force = &b
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FieldManager = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *Preconditions) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -6530,6 +8035,36 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error {
s := k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
m.UID = &s
iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.ResourceVersion = &s
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -7310,6 +8845,85 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *TableOptions) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TableOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TableOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IncludeObject", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.IncludeObject = IncludeObjectPolicy(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *Timestamp) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -7506,6 +9120,114 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *UpdateOptions) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UpdateOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UpdateOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FieldManager = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *Verbs) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -7804,158 +9526,182 @@ func init() {
}
var fileDescriptorGenerated = []byte{
- // 2435 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4d, 0x6c, 0x23, 0x49,
- 0x15, 0x4e, 0xdb, 0xb1, 0x63, 0x3f, 0xc7, 0xf9, 0xa9, 0xcd, 0x80, 0x37, 0x02, 0x3b, 0xdb, 0x8b,
- 0x56, 0x59, 0x98, 0xb5, 0x49, 0x16, 0x56, 0xc3, 0x00, 0x03, 0xe9, 0x38, 0x33, 0x8a, 0x76, 0x32,
- 0x63, 0x55, 0x76, 0x06, 0x31, 0x8c, 0x10, 0x9d, 0x76, 0xc5, 0x69, 0xd2, 0xee, 0xf6, 0x56, 0x95,
- 0x33, 0x09, 0x1c, 0xd8, 0x03, 0x48, 0x1c, 0x10, 0x9a, 0x23, 0x27, 0xb4, 0x23, 0xb8, 0x70, 0xe5,
- 0xc4, 0x05, 0x4e, 0x48, 0xcc, 0x71, 0x24, 0x2e, 0x7b, 0x40, 0xd6, 0x8e, 0xf7, 0xc0, 0x09, 0x71,
- 0xcf, 0x09, 0x55, 0x75, 0xf5, 0x9f, 0x1d, 0x4f, 0xda, 0x3b, 0x0b, 0xe2, 0x14, 0xf7, 0xfb, 0xf9,
- 0xde, 0xab, 0x57, 0xaf, 0x5e, 0xbd, 0x7a, 0x81, 0xbd, 0xe3, 0x6b, 0xac, 0x6e, 0x7b, 0x8d, 0xe3,
- 0xfe, 0x01, 0xa1, 0x2e, 0xe1, 0x84, 0x35, 0x4e, 0x88, 0xdb, 0xf6, 0x68, 0x43, 0x31, 0xcc, 0x9e,
- 0xdd, 0x35, 0xad, 0x23, 0xdb, 0x25, 0xf4, 0xac, 0xd1, 0x3b, 0xee, 0x08, 0x02, 0x6b, 0x74, 0x09,
- 0x37, 0x1b, 0x27, 0x1b, 0x8d, 0x0e, 0x71, 0x09, 0x35, 0x39, 0x69, 0xd7, 0x7b, 0xd4, 0xe3, 0x1e,
- 0xfa, 0x92, 0xaf, 0x55, 0x8f, 0x6b, 0xd5, 0x7b, 0xc7, 0x1d, 0x41, 0x60, 0x75, 0xa1, 0x55, 0x3f,
- 0xd9, 0x58, 0x7d, 0xab, 0x63, 0xf3, 0xa3, 0xfe, 0x41, 0xdd, 0xf2, 0xba, 0x8d, 0x8e, 0xd7, 0xf1,
- 0x1a, 0x52, 0xf9, 0xa0, 0x7f, 0x28, 0xbf, 0xe4, 0x87, 0xfc, 0xe5, 0x83, 0xae, 0x4e, 0x74, 0x85,
- 0xf6, 0x5d, 0x6e, 0x77, 0xc9, 0xa8, 0x17, 0xab, 0xef, 0x5c, 0xa6, 0xc0, 0xac, 0x23, 0xd2, 0x35,
- 0xc7, 0xf4, 0xde, 0x9e, 0xa4, 0xd7, 0xe7, 0xb6, 0xd3, 0xb0, 0x5d, 0xce, 0x38, 0x1d, 0x55, 0xd2,
- 0xff, 0x96, 0x85, 0xc2, 0x56, 0x6b, 0xf7, 0x16, 0xf5, 0xfa, 0x3d, 0xb4, 0x06, 0xb3, 0xae, 0xd9,
- 0x25, 0x15, 0x6d, 0x4d, 0x5b, 0x2f, 0x1a, 0xf3, 0x4f, 0x07, 0xb5, 0x99, 0xe1, 0xa0, 0x36, 0x7b,
- 0xc7, 0xec, 0x12, 0x2c, 0x39, 0xc8, 0x81, 0xc2, 0x09, 0xa1, 0xcc, 0xf6, 0x5c, 0x56, 0xc9, 0xac,
- 0x65, 0xd7, 0x4b, 0x9b, 0x37, 0xea, 0x69, 0x82, 0x56, 0x97, 0x06, 0xee, 0xfb, 0xaa, 0x37, 0x3d,
- 0xda, 0xb4, 0x99, 0xe5, 0x9d, 0x10, 0x7a, 0x66, 0x2c, 0x29, 0x2b, 0x05, 0xc5, 0x64, 0x38, 0xb4,
- 0x80, 0x7e, 0xae, 0xc1, 0x52, 0x8f, 0x92, 0x43, 0x42, 0x29, 0x69, 0x2b, 0x7e, 0x25, 0xbb, 0xa6,
- 0x7d, 0x06, 0x66, 0x2b, 0xca, 0xec, 0x52, 0x6b, 0x04, 0x1f, 0x8f, 0x59, 0x44, 0xbf, 0xd3, 0x60,
- 0x95, 0x11, 0x7a, 0x42, 0xe8, 0x56, 0xbb, 0x4d, 0x09, 0x63, 0xc6, 0xd9, 0xb6, 0x63, 0x13, 0x97,
- 0x6f, 0xef, 0x36, 0x31, 0xab, 0xcc, 0xca, 0x38, 0x7c, 0x27, 0x9d, 0x43, 0xfb, 0x93, 0x70, 0x0c,
- 0x5d, 0x79, 0xb4, 0x3a, 0x51, 0x84, 0xe1, 0x17, 0xb8, 0xa1, 0x1f, 0xc2, 0x7c, 0xb0, 0x91, 0xb7,
- 0x6d, 0xc6, 0xd1, 0x7d, 0xc8, 0x77, 0xc4, 0x07, 0xab, 0x68, 0xd2, 0xc1, 0x7a, 0x3a, 0x07, 0x03,
- 0x0c, 0x63, 0x41, 0xf9, 0x93, 0x97, 0x9f, 0x0c, 0x2b, 0x34, 0xfd, 0xcf, 0x59, 0x28, 0x6d, 0xb5,
- 0x76, 0x31, 0x61, 0x5e, 0x9f, 0x5a, 0x24, 0x45, 0xd2, 0x6c, 0x02, 0x88, 0xbf, 0xac, 0x67, 0x5a,
- 0xa4, 0x5d, 0xc9, 0xac, 0x69, 0xeb, 0x05, 0x03, 0x29, 0x39, 0xb8, 0x13, 0x72, 0x70, 0x4c, 0x4a,
- 0xa0, 0x1e, 0xdb, 0x6e, 0x5b, 0xee, 0x76, 0x0c, 0xf5, 0x5d, 0xdb, 0x6d, 0x63, 0xc9, 0x41, 0xb7,
- 0x21, 0x77, 0x42, 0xe8, 0x81, 0x88, 0xbf, 0x48, 0x88, 0xaf, 0xa4, 0x5b, 0xde, 0x7d, 0xa1, 0x62,
- 0x14, 0x87, 0x83, 0x5a, 0x4e, 0xfe, 0xc4, 0x3e, 0x08, 0xaa, 0x03, 0xb0, 0x23, 0x8f, 0x72, 0xe9,
- 0x4e, 0x25, 0xb7, 0x96, 0x5d, 0x2f, 0x1a, 0x0b, 0xc2, 0xbf, 0xfd, 0x90, 0x8a, 0x63, 0x12, 0xe8,
- 0x1a, 0xcc, 0x33, 0xdb, 0xed, 0xf4, 0x1d, 0x93, 0x0a, 0x42, 0x25, 0x2f, 0xfd, 0x5c, 0x51, 0x7e,
- 0xce, 0xef, 0xc7, 0x78, 0x38, 0x21, 0x29, 0x2c, 0x59, 0x26, 0x27, 0x1d, 0x8f, 0xda, 0x84, 0x55,
- 0xe6, 0x22, 0x4b, 0xdb, 0x21, 0x15, 0xc7, 0x24, 0xd0, 0xeb, 0x90, 0x93, 0x91, 0xaf, 0x14, 0xa4,
- 0x89, 0xb2, 0x32, 0x91, 0x93, 0xdb, 0x82, 0x7d, 0x1e, 0x7a, 0x13, 0xe6, 0xd4, 0xa9, 0xa9, 0x14,
- 0xa5, 0xd8, 0xa2, 0x12, 0x9b, 0x0b, 0xd2, 0x3a, 0xe0, 0xeb, 0x7f, 0xd4, 0x60, 0x31, 0xb6, 0x7f,
- 0x32, 0x57, 0xae, 0xc1, 0x7c, 0x27, 0x76, 0x52, 0xd4, 0x5e, 0x86, 0xab, 0x89, 0x9f, 0x22, 0x9c,
- 0x90, 0x44, 0x04, 0x8a, 0x54, 0x21, 0x05, 0x15, 0x61, 0x23, 0x75, 0xa2, 0x05, 0x3e, 0x44, 0x96,
- 0x62, 0x44, 0x86, 0x23, 0x64, 0xfd, 0x9f, 0x9a, 0x4c, 0xba, 0xa0, 0x46, 0xa0, 0xf5, 0x58, 0x1d,
- 0xd2, 0x64, 0x08, 0xe7, 0x27, 0xd4, 0x90, 0x4b, 0x0e, 0x6f, 0xe6, 0xff, 0xe2, 0xf0, 0x5e, 0x2f,
- 0xfc, 0xe6, 0xc3, 0xda, 0xcc, 0x07, 0xff, 0x58, 0x9b, 0xd1, 0x3f, 0xc9, 0x40, 0xb9, 0x49, 0x1c,
- 0xc2, 0xc9, 0xdd, 0x1e, 0x97, 0x2b, 0xb8, 0x09, 0xa8, 0x43, 0x4d, 0x8b, 0xb4, 0x08, 0xb5, 0xbd,
- 0xf6, 0x3e, 0xb1, 0x3c, 0xb7, 0xcd, 0xe4, 0x16, 0x65, 0x8d, 0xcf, 0x0d, 0x07, 0x35, 0x74, 0x6b,
- 0x8c, 0x8b, 0x2f, 0xd0, 0x40, 0x0e, 0x94, 0x7b, 0x54, 0xfe, 0xb6, 0xb9, 0x2a, 0xe0, 0xe2, 0xe0,
- 0xbc, 0x9d, 0x6e, 0xed, 0xad, 0xb8, 0xaa, 0xb1, 0x3c, 0x1c, 0xd4, 0xca, 0x09, 0x12, 0x4e, 0x82,
- 0xa3, 0xef, 0xc2, 0x92, 0x47, 0x7b, 0x47, 0xa6, 0xdb, 0x24, 0x3d, 0xe2, 0xb6, 0x89, 0xcb, 0x99,
- 0x3c, 0xcc, 0x05, 0x63, 0x45, 0x94, 0xdd, 0xbb, 0x23, 0x3c, 0x3c, 0x26, 0x8d, 0x1e, 0xc0, 0x72,
- 0x8f, 0x7a, 0x3d, 0xb3, 0x63, 0x0a, 0xc4, 0x96, 0xe7, 0xd8, 0xd6, 0x99, 0x3c, 0xec, 0x45, 0xe3,
- 0xea, 0x70, 0x50, 0x5b, 0x6e, 0x8d, 0x32, 0xcf, 0x07, 0xb5, 0x57, 0x64, 0xe8, 0x04, 0x25, 0x62,
- 0xe2, 0x71, 0x18, 0x7d, 0x17, 0x0a, 0xcd, 0x3e, 0x95, 0x14, 0xf4, 0x6d, 0x28, 0xb4, 0xd5, 0x6f,
- 0x15, 0xd5, 0xd7, 0x82, 0x3b, 0x29, 0x90, 0x39, 0x1f, 0xd4, 0xca, 0xe2, 0xea, 0xad, 0x07, 0x04,
- 0x1c, 0xaa, 0xe8, 0x0f, 0xa1, 0xbc, 0x73, 0xda, 0xf3, 0x28, 0x0f, 0xf6, 0xeb, 0x0d, 0xc8, 0x13,
- 0x49, 0x90, 0x68, 0x85, 0xa8, 0x90, 0xfa, 0x62, 0x58, 0x71, 0xc5, 0xc1, 0x26, 0xa7, 0xa6, 0xc5,
- 0x55, 0x45, 0x0c, 0x0f, 0xf6, 0x8e, 0x20, 0x62, 0x9f, 0xa7, 0x3f, 0xd1, 0x00, 0x6e, 0x91, 0x10,
- 0x7b, 0x0b, 0x16, 0x83, 0x43, 0x91, 0x3c, 0xab, 0x9f, 0x57, 0xda, 0x8b, 0x38, 0xc9, 0xc6, 0xa3,
- 0xf2, 0xa8, 0x05, 0x2b, 0xb6, 0x6b, 0x39, 0xfd, 0x36, 0xb9, 0xe7, 0xda, 0xae, 0xcd, 0x6d, 0xd3,
- 0xb1, 0x7f, 0x12, 0xd6, 0xe5, 0x2f, 0x28, 0x9c, 0x95, 0xdd, 0x0b, 0x64, 0xf0, 0x85, 0x9a, 0xfa,
- 0x43, 0x28, 0xca, 0x0a, 0x21, 0x8a, 0x73, 0x54, 0xae, 0xb4, 0x17, 0x94, 0xab, 0xa0, 0xba, 0x67,
- 0x26, 0x55, 0xf7, 0xd8, 0x81, 0x70, 0xa0, 0xec, 0xeb, 0x06, 0x17, 0x4e, 0x2a, 0x0b, 0x57, 0xa1,
- 0x10, 0x2c, 0x5c, 0x59, 0x09, 0x1b, 0x8d, 0x00, 0x08, 0x87, 0x12, 0x31, 0x6b, 0x47, 0x90, 0xa8,
- 0x76, 0xe9, 0x8c, 0xc5, 0xaa, 0x6f, 0xe6, 0xc5, 0xd5, 0x37, 0x66, 0xe9, 0x67, 0x50, 0x99, 0xd4,
- 0x9d, 0xbc, 0x44, 0x3d, 0x4e, 0xef, 0x8a, 0xfe, 0x6b, 0x0d, 0x96, 0xe2, 0x48, 0xe9, 0xb7, 0x2f,
- 0xbd, 0x91, 0xcb, 0xef, 0xf1, 0x58, 0x44, 0x7e, 0xab, 0xc1, 0x4a, 0x62, 0x69, 0x53, 0xed, 0xf8,
- 0x14, 0x4e, 0xc5, 0x93, 0x23, 0x3b, 0x45, 0x72, 0x34, 0xa0, 0xb4, 0x1b, 0xe6, 0x3d, 0xbd, 0xbc,
- 0xf3, 0xd1, 0xff, 0xa2, 0xc1, 0x7c, 0x4c, 0x83, 0xa1, 0x87, 0x30, 0x27, 0xea, 0x9b, 0xed, 0x76,
- 0x54, 0x57, 0x96, 0xf2, 0xb2, 0x8c, 0x81, 0x44, 0xeb, 0x6a, 0xf9, 0x48, 0x38, 0x80, 0x44, 0x2d,
- 0xc8, 0x53, 0xc2, 0xfa, 0x0e, 0x57, 0xa5, 0xfd, 0x6a, 0xca, 0x6b, 0x8d, 0x9b, 0xbc, 0xcf, 0x0c,
- 0x10, 0x35, 0x0a, 0x4b, 0x7d, 0xac, 0x70, 0xf4, 0xbf, 0x67, 0xa0, 0x7c, 0xdb, 0x3c, 0x20, 0xce,
- 0x3e, 0x71, 0x88, 0xc5, 0x3d, 0x8a, 0x7e, 0x0a, 0xa5, 0xae, 0xc9, 0xad, 0x23, 0x49, 0x0d, 0x7a,
- 0xcb, 0x66, 0x3a, 0x43, 0x09, 0xa4, 0xfa, 0x5e, 0x04, 0xb3, 0xe3, 0x72, 0x7a, 0x66, 0xbc, 0xa2,
- 0x16, 0x56, 0x8a, 0x71, 0x70, 0xdc, 0x9a, 0x7c, 0x10, 0xc8, 0xef, 0x9d, 0xd3, 0x9e, 0xb8, 0x44,
- 0xa7, 0x7f, 0x87, 0x24, 0x5c, 0xc0, 0xe4, 0xfd, 0xbe, 0x4d, 0x49, 0x97, 0xb8, 0x3c, 0x7a, 0x10,
- 0xec, 0x8d, 0xe0, 0xe3, 0x31, 0x8b, 0xab, 0x37, 0x60, 0x69, 0xd4, 0x79, 0xb4, 0x04, 0xd9, 0x63,
- 0x72, 0xe6, 0xe7, 0x02, 0x16, 0x3f, 0xd1, 0x0a, 0xe4, 0x4e, 0x4c, 0xa7, 0xaf, 0xea, 0x0f, 0xf6,
- 0x3f, 0xae, 0x67, 0xae, 0x69, 0xfa, 0xef, 0x35, 0xa8, 0x4c, 0x72, 0x04, 0x7d, 0x31, 0x06, 0x64,
- 0x94, 0x94, 0x57, 0xd9, 0x77, 0xc9, 0x99, 0x8f, 0xba, 0x03, 0x05, 0xaf, 0x27, 0x9e, 0x70, 0x1e,
- 0x55, 0x79, 0xfe, 0x66, 0x90, 0xbb, 0x77, 0x15, 0xfd, 0x7c, 0x50, 0xbb, 0x92, 0x80, 0x0f, 0x18,
- 0x38, 0x54, 0x45, 0x3a, 0xe4, 0xa5, 0x3f, 0xe2, 0x52, 0x16, 0xed, 0x93, 0xdc, 0xfc, 0xfb, 0x92,
- 0x82, 0x15, 0x47, 0xff, 0x93, 0x06, 0xb3, 0xb2, 0x3d, 0x7c, 0x08, 0x05, 0x11, 0xbf, 0xb6, 0xc9,
- 0x4d, 0xe9, 0x57, 0xea, 0xc7, 0x84, 0xd0, 0xde, 0x23, 0xdc, 0x8c, 0xce, 0x57, 0x40, 0xc1, 0x21,
- 0x22, 0xc2, 0x90, 0xb3, 0x39, 0xe9, 0x06, 0x1b, 0xf9, 0xd6, 0x44, 0x68, 0xf5, 0xfe, 0xad, 0x63,
- 0xf3, 0xd1, 0xce, 0x29, 0x27, 0xae, 0xd8, 0x8c, 0xa8, 0x18, 0xec, 0x0a, 0x0c, 0xec, 0x43, 0xe9,
- 0x7f, 0xd0, 0x20, 0x34, 0x25, 0x8e, 0x3b, 0x23, 0xce, 0xe1, 0x6d, 0xdb, 0x3d, 0x56, 0x61, 0x0d,
- 0xdd, 0xd9, 0x57, 0x74, 0x1c, 0x4a, 0x5c, 0x74, 0xc5, 0x66, 0xa6, 0xbc, 0x62, 0xaf, 0x42, 0xc1,
- 0xf2, 0x5c, 0x6e, 0xbb, 0xfd, 0xb1, 0xfa, 0xb2, 0xad, 0xe8, 0x38, 0x94, 0xd0, 0x9f, 0x65, 0xa1,
- 0x24, 0x7c, 0x0d, 0xee, 0xf8, 0x6f, 0x42, 0xd9, 0x89, 0xef, 0x9e, 0xf2, 0xf9, 0x8a, 0x82, 0x48,
- 0x9e, 0x47, 0x9c, 0x94, 0x15, 0xca, 0x87, 0x36, 0x71, 0xda, 0xa1, 0x72, 0x26, 0xa9, 0x7c, 0x33,
- 0xce, 0xc4, 0x49, 0x59, 0x51, 0x67, 0x1f, 0x89, 0xbc, 0x56, 0x8d, 0x5a, 0x18, 0xda, 0xef, 0x09,
- 0x22, 0xf6, 0x79, 0x17, 0xc5, 0x67, 0x76, 0xca, 0xf8, 0x5c, 0x87, 0x05, 0xb1, 0x91, 0x5e, 0x9f,
- 0x07, 0xdd, 0x6c, 0x4e, 0xf6, 0x5d, 0x68, 0x38, 0xa8, 0x2d, 0xbc, 0x97, 0xe0, 0xe0, 0x11, 0xc9,
- 0x89, 0xed, 0x4b, 0xfe, 0xd3, 0xb6, 0x2f, 0x62, 0xd5, 0x8e, 0xdd, 0xb5, 0x79, 0x65, 0x4e, 0x3a,
- 0x11, 0xae, 0xfa, 0xb6, 0x20, 0x62, 0x9f, 0x97, 0xd8, 0xd2, 0xc2, 0xa5, 0x5b, 0xfa, 0x3e, 0x14,
- 0xf7, 0x6c, 0x8b, 0x7a, 0x62, 0x2d, 0xe2, 0x62, 0x62, 0x89, 0xa6, 0x3d, 0x2c, 0xe0, 0xc1, 0x1a,
- 0x03, 0xbe, 0x70, 0xc5, 0x35, 0x5d, 0xcf, 0x6f, 0xcd, 0x73, 0x91, 0x2b, 0x77, 0x04, 0x11, 0xfb,
- 0xbc, 0xeb, 0x2b, 0xe2, 0x3e, 0xfa, 0xe5, 0x93, 0xda, 0xcc, 0xe3, 0x27, 0xb5, 0x99, 0x0f, 0x9f,
- 0xa8, 0xbb, 0xe9, 0x5f, 0x00, 0x70, 0xf7, 0xe0, 0xc7, 0xc4, 0xf2, 0x73, 0xfe, 0xf2, 0x57, 0xb9,
- 0xe8, 0x31, 0xd4, 0x30, 0x48, 0xbe, 0x60, 0x33, 0x23, 0x3d, 0x46, 0x8c, 0x87, 0x13, 0x92, 0xa8,
- 0x01, 0xc5, 0xf0, 0xa5, 0xae, 0xf2, 0x7b, 0x59, 0xa9, 0x15, 0xc3, 0xe7, 0x3c, 0x8e, 0x64, 0x12,
- 0x07, 0x70, 0xf6, 0xd2, 0x03, 0x68, 0x40, 0xb6, 0x6f, 0xb7, 0x65, 0x4a, 0x14, 0x8d, 0xaf, 0x06,
- 0x05, 0xf0, 0xde, 0x6e, 0xf3, 0x7c, 0x50, 0x7b, 0x6d, 0xd2, 0x8c, 0x8b, 0x9f, 0xf5, 0x08, 0xab,
- 0xdf, 0xdb, 0x6d, 0x62, 0xa1, 0x7c, 0x51, 0x92, 0xe6, 0xa7, 0x4c, 0xd2, 0x4d, 0x00, 0xb5, 0x6a,
- 0xa1, 0xed, 0xe7, 0x46, 0x38, 0xb5, 0xb8, 0x15, 0x72, 0x70, 0x4c, 0x0a, 0x31, 0x58, 0xb6, 0x28,
- 0x91, 0xbf, 0xc5, 0xd6, 0x33, 0x6e, 0x76, 0xfd, 0x77, 0x7b, 0x69, 0xf3, 0xcb, 0xe9, 0x2a, 0xa6,
- 0x50, 0x33, 0x5e, 0x55, 0x66, 0x96, 0xb7, 0x47, 0xc1, 0xf0, 0x38, 0x3e, 0xf2, 0x60, 0xb9, 0xad,
- 0x5e, 0x3d, 0x91, 0xd1, 0xe2, 0xd4, 0x46, 0xaf, 0x08, 0x83, 0xcd, 0x51, 0x20, 0x3c, 0x8e, 0x8d,
- 0x7e, 0x08, 0xab, 0x01, 0x71, 0xfc, 0xe9, 0x59, 0x01, 0x19, 0xa9, 0xaa, 0x78, 0x0c, 0x37, 0x27,
- 0x4a, 0xe1, 0x17, 0x20, 0xa0, 0x36, 0xe4, 0x1d, 0xbf, 0xbb, 0x28, 0xc9, 0x1b, 0xe1, 0x5b, 0xe9,
- 0x56, 0x11, 0x65, 0x7f, 0x3d, 0xde, 0x55, 0x84, 0xcf, 0x2f, 0xd5, 0x50, 0x28, 0x6c, 0x74, 0x0a,
- 0x25, 0xd3, 0x75, 0x3d, 0x6e, 0xfa, 0x8f, 0xe1, 0x79, 0x69, 0x6a, 0x6b, 0x6a, 0x53, 0x5b, 0x11,
- 0xc6, 0x48, 0x17, 0x13, 0xe3, 0xe0, 0xb8, 0x29, 0xf4, 0x08, 0x16, 0xbd, 0x47, 0x2e, 0xa1, 0x98,
- 0x1c, 0x12, 0x4a, 0x5c, 0x8b, 0xb0, 0x4a, 0x59, 0x5a, 0xff, 0x5a, 0x4a, 0xeb, 0x09, 0xe5, 0x28,
- 0xa5, 0x93, 0x74, 0x86, 0x47, 0xad, 0xa0, 0x3a, 0xc0, 0xa1, 0xed, 0xaa, 0x5e, 0xb4, 0xb2, 0x10,
- 0x8d, 0x9e, 0x6e, 0x86, 0x54, 0x1c, 0x93, 0x40, 0x5f, 0x87, 0x92, 0xe5, 0xf4, 0x19, 0x27, 0xfe,
- 0x8c, 0x6b, 0x51, 0x9e, 0xa0, 0x70, 0x7d, 0xdb, 0x11, 0x0b, 0xc7, 0xe5, 0xd0, 0x11, 0xcc, 0xdb,
- 0xb1, 0xa6, 0xb7, 0xb2, 0x24, 0x73, 0x71, 0x73, 0xea, 0x4e, 0x97, 0x19, 0x4b, 0xa2, 0x12, 0xc5,
- 0x29, 0x38, 0x81, 0xbc, 0xfa, 0x0d, 0x28, 0x7d, 0xca, 0x1e, 0x4c, 0xf4, 0x70, 0xa3, 0x5b, 0x37,
- 0x55, 0x0f, 0xf7, 0xd7, 0x0c, 0x2c, 0x24, 0x03, 0x1e, 0xbe, 0x75, 0xb4, 0x89, 0x33, 0xcb, 0xa0,
- 0x2a, 0x67, 0x27, 0x56, 0x65, 0x55, 0xfc, 0x66, 0x5f, 0xa6, 0xf8, 0x6d, 0x02, 0x98, 0x3d, 0x3b,
- 0xa8, 0x7b, 0x7e, 0x1d, 0x0d, 0x2b, 0x57, 0x34, 0x45, 0xc3, 0x31, 0x29, 0x39, 0x95, 0xf4, 0x5c,
- 0x4e, 0x3d, 0xc7, 0x21, 0x54, 0x5d, 0xa6, 0xfe, 0x54, 0x32, 0xa4, 0xe2, 0x98, 0x04, 0xba, 0x09,
- 0xe8, 0xc0, 0xf1, 0xac, 0x63, 0x19, 0x82, 0xe0, 0x9c, 0xcb, 0x2a, 0x59, 0xf0, 0x87, 0x52, 0xc6,
- 0x18, 0x17, 0x5f, 0xa0, 0xa1, 0xcf, 0x41, 0xae, 0x25, 0xda, 0x0a, 0xfd, 0x2e, 0x24, 0xe7, 0x49,
- 0xe8, 0x86, 0x1f, 0x09, 0x2d, 0x1c, 0xf8, 0x4c, 0x17, 0x05, 0xfd, 0x2a, 0x14, 0xb1, 0xe7, 0xf1,
- 0x96, 0xc9, 0x8f, 0x18, 0xaa, 0x41, 0xae, 0x27, 0x7e, 0xa8, 0x61, 0xa1, 0x9c, 0xff, 0x4a, 0x0e,
- 0xf6, 0xe9, 0xfa, 0xaf, 0x34, 0x78, 0x75, 0xe2, 0xec, 0x4e, 0x44, 0xd4, 0x0a, 0xbf, 0x94, 0x4b,
- 0x61, 0x44, 0x23, 0x39, 0x1c, 0x93, 0x12, 0x9d, 0x58, 0x62, 0xe0, 0x37, 0xda, 0x89, 0x25, 0xac,
- 0xe1, 0xa4, 0xac, 0xfe, 0xef, 0x0c, 0xe4, 0xfd, 0x67, 0xd9, 0x7f, 0xb9, 0xf9, 0x7e, 0x03, 0xf2,
- 0x4c, 0xda, 0x51, 0xee, 0x85, 0xd5, 0xd2, 0xb7, 0x8e, 0x15, 0x57, 0x34, 0x31, 0x5d, 0xc2, 0x98,
- 0xd9, 0x09, 0x92, 0x37, 0x6c, 0x62, 0xf6, 0x7c, 0x32, 0x0e, 0xf8, 0xe8, 0x1d, 0xf1, 0x0a, 0x35,
- 0x59, 0xd8, 0x17, 0x56, 0x03, 0x48, 0x2c, 0xa9, 0xe7, 0x83, 0xda, 0xbc, 0x02, 0x97, 0xdf, 0x58,
- 0x49, 0xa3, 0x07, 0x30, 0xd7, 0x26, 0xdc, 0xb4, 0x1d, 0xbf, 0x1d, 0x4c, 0x3d, 0x99, 0xf4, 0xc1,
- 0x9a, 0xbe, 0xaa, 0x51, 0x12, 0x3e, 0xa9, 0x0f, 0x1c, 0x00, 0x8a, 0x83, 0x67, 0x79, 0x6d, 0x7f,
- 0x4c, 0x9f, 0x8b, 0x0e, 0xde, 0xb6, 0xd7, 0x26, 0x58, 0x72, 0xf4, 0xc7, 0x1a, 0x94, 0x7c, 0xa4,
- 0x6d, 0xb3, 0xcf, 0x08, 0xda, 0x08, 0x57, 0xe1, 0x6f, 0x77, 0x70, 0x27, 0xcf, 0xbe, 0x77, 0xd6,
- 0x23, 0xe7, 0x83, 0x5a, 0x51, 0x8a, 0x89, 0x8f, 0x70, 0x01, 0xb1, 0x18, 0x65, 0x2e, 0x89, 0xd1,
- 0xeb, 0x90, 0x93, 0xad, 0xb7, 0x0a, 0x66, 0xd8, 0xe8, 0xc9, 0xf6, 0x1c, 0xfb, 0x3c, 0xfd, 0xe3,
- 0x0c, 0x94, 0x13, 0x8b, 0x4b, 0xd1, 0xd5, 0x85, 0xa3, 0x92, 0x4c, 0x8a, 0xf1, 0xdb, 0xe4, 0x7f,
- 0xae, 0x7c, 0x1f, 0xf2, 0x96, 0x58, 0x5f, 0xf0, 0xdf, 0xad, 0x8d, 0x69, 0xb6, 0x42, 0x46, 0x26,
- 0xca, 0x24, 0xf9, 0xc9, 0xb0, 0x02, 0x44, 0xb7, 0x60, 0x99, 0x12, 0x4e, 0xcf, 0xb6, 0x0e, 0x39,
- 0xa1, 0xf1, 0xfe, 0x3f, 0x17, 0xf5, 0x3d, 0x78, 0x54, 0x00, 0x8f, 0xeb, 0x04, 0xa5, 0x32, 0xff,
- 0x12, 0xa5, 0x52, 0x77, 0x60, 0xf6, 0x7f, 0xd8, 0xa3, 0xff, 0x00, 0x8a, 0x51, 0x17, 0xf5, 0x19,
- 0x9b, 0xd4, 0x7f, 0x04, 0x05, 0x91, 0x8d, 0x41, 0xf7, 0x7f, 0xc9, 0x4d, 0x94, 0xbc, 0x23, 0x32,
- 0x69, 0xee, 0x08, 0x7d, 0x13, 0xfc, 0xff, 0x99, 0x89, 0x6a, 0xea, 0xbf, 0xd8, 0x63, 0xd5, 0x34,
- 0xfe, 0xfc, 0x8e, 0x8d, 0xcc, 0x7e, 0xa1, 0x01, 0xc8, 0xe7, 0xe3, 0xce, 0x09, 0x71, 0xb9, 0x70,
- 0x4c, 0xec, 0xc0, 0xa8, 0x63, 0xf2, 0x18, 0x49, 0x0e, 0xba, 0x07, 0x79, 0x4f, 0x76, 0x57, 0x6a,
- 0x86, 0x35, 0xe5, 0x38, 0x20, 0xcc, 0x3a, 0xbf, 0x45, 0xc3, 0x0a, 0xcc, 0x58, 0x7f, 0xfa, 0xbc,
- 0x3a, 0xf3, 0xec, 0x79, 0x75, 0xe6, 0xa3, 0xe7, 0xd5, 0x99, 0x0f, 0x86, 0x55, 0xed, 0xe9, 0xb0,
- 0xaa, 0x3d, 0x1b, 0x56, 0xb5, 0x8f, 0x86, 0x55, 0xed, 0xe3, 0x61, 0x55, 0x7b, 0xfc, 0x49, 0x75,
- 0xe6, 0x41, 0xe6, 0x64, 0xe3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x6c, 0xc5, 0x28, 0xb2, 0x54,
- 0x20, 0x00, 0x00,
+ // 2820 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0xcf, 0x6f, 0x1c, 0x57,
+ 0xd9, 0xb3, 0xeb, 0x5d, 0xef, 0x7e, 0xeb, 0x4d, 0xec, 0x97, 0x04, 0xb6, 0x46, 0x78, 0xdd, 0x29,
+ 0xaa, 0x52, 0x48, 0xd7, 0x4d, 0x4a, 0xab, 0x90, 0xd2, 0x82, 0xd7, 0x76, 0x52, 0xd3, 0xb8, 0xb1,
+ 0x9e, 0x93, 0x20, 0x42, 0x84, 0x3a, 0xde, 0x79, 0x5e, 0x0f, 0x9e, 0x9d, 0x99, 0xbe, 0x37, 0xeb,
+ 0xc4, 0x70, 0xa0, 0x07, 0x10, 0x20, 0x41, 0xd5, 0x23, 0x27, 0xd4, 0x0a, 0xfe, 0x02, 0x4e, 0x9c,
+ 0x38, 0x55, 0xa2, 0x17, 0xa4, 0x4a, 0x5c, 0x2a, 0x81, 0x56, 0xad, 0x41, 0x82, 0x1b, 0xe2, 0xea,
+ 0x13, 0x7a, 0xbf, 0x66, 0xde, 0xec, 0x7a, 0xe3, 0x59, 0x52, 0x2a, 0x4e, 0x3b, 0xf3, 0xfd, 0x7e,
+ 0xef, 0x7d, 0xef, 0xfb, 0x35, 0x0b, 0x9b, 0xfb, 0x57, 0x59, 0xcb, 0x0b, 0x97, 0xf7, 0xfb, 0x3b,
+ 0x84, 0x06, 0x24, 0x26, 0x6c, 0xf9, 0x80, 0x04, 0x6e, 0x48, 0x97, 0x15, 0xc2, 0x89, 0xbc, 0x9e,
+ 0xd3, 0xd9, 0xf3, 0x02, 0x42, 0x0f, 0x97, 0xa3, 0xfd, 0x2e, 0x07, 0xb0, 0xe5, 0x1e, 0x89, 0x9d,
+ 0xe5, 0x83, 0xcb, 0xcb, 0x5d, 0x12, 0x10, 0xea, 0xc4, 0xc4, 0x6d, 0x45, 0x34, 0x8c, 0x43, 0xf4,
+ 0x25, 0xc9, 0xd5, 0x32, 0xb9, 0x5a, 0xd1, 0x7e, 0x97, 0x03, 0x58, 0x8b, 0x73, 0xb5, 0x0e, 0x2e,
+ 0x2f, 0x3c, 0xdb, 0xf5, 0xe2, 0xbd, 0xfe, 0x4e, 0xab, 0x13, 0xf6, 0x96, 0xbb, 0x61, 0x37, 0x5c,
+ 0x16, 0xcc, 0x3b, 0xfd, 0x5d, 0xf1, 0x26, 0x5e, 0xc4, 0x93, 0x14, 0xba, 0x30, 0xd6, 0x14, 0xda,
+ 0x0f, 0x62, 0xaf, 0x47, 0x86, 0xad, 0x58, 0x78, 0xf1, 0x34, 0x06, 0xd6, 0xd9, 0x23, 0x3d, 0x67,
+ 0x98, 0xcf, 0xfe, 0x63, 0x11, 0x2a, 0x2b, 0x5b, 0x1b, 0x37, 0x68, 0xd8, 0x8f, 0xd0, 0x12, 0x4c,
+ 0x07, 0x4e, 0x8f, 0x34, 0xac, 0x25, 0xeb, 0x62, 0xb5, 0x3d, 0xfb, 0xc1, 0xa0, 0x39, 0x75, 0x34,
+ 0x68, 0x4e, 0xbf, 0xee, 0xf4, 0x08, 0x16, 0x18, 0xe4, 0x43, 0xe5, 0x80, 0x50, 0xe6, 0x85, 0x01,
+ 0x6b, 0x14, 0x96, 0x8a, 0x17, 0x6b, 0x57, 0x5e, 0x69, 0xe5, 0x59, 0x7f, 0x4b, 0x28, 0xb8, 0x2b,
+ 0x59, 0xaf, 0x87, 0x74, 0xcd, 0x63, 0x9d, 0xf0, 0x80, 0xd0, 0xc3, 0xf6, 0x9c, 0xd2, 0x52, 0x51,
+ 0x48, 0x86, 0x13, 0x0d, 0xe8, 0xc7, 0x16, 0xcc, 0x45, 0x94, 0xec, 0x12, 0x4a, 0x89, 0xab, 0xf0,
+ 0x8d, 0xe2, 0x92, 0xf5, 0x29, 0xa8, 0x6d, 0x28, 0xb5, 0x73, 0x5b, 0x43, 0xf2, 0xf1, 0x88, 0x46,
+ 0xf4, 0x1b, 0x0b, 0x16, 0x18, 0xa1, 0x07, 0x84, 0xae, 0xb8, 0x2e, 0x25, 0x8c, 0xb5, 0x0f, 0x57,
+ 0x7d, 0x8f, 0x04, 0xf1, 0xea, 0xc6, 0x1a, 0x66, 0x8d, 0x69, 0xb1, 0x0f, 0xdf, 0xc8, 0x67, 0xd0,
+ 0xf6, 0x38, 0x39, 0x6d, 0x5b, 0x59, 0xb4, 0x30, 0x96, 0x84, 0xe1, 0x47, 0x98, 0x61, 0xef, 0xc2,
+ 0xac, 0x3e, 0xc8, 0x9b, 0x1e, 0x8b, 0xd1, 0x5d, 0x28, 0x77, 0xf9, 0x0b, 0x6b, 0x58, 0xc2, 0xc0,
+ 0x56, 0x3e, 0x03, 0xb5, 0x8c, 0xf6, 0x19, 0x65, 0x4f, 0x59, 0xbc, 0x32, 0xac, 0xa4, 0xd9, 0x3f,
+ 0x9f, 0x86, 0xda, 0xca, 0xd6, 0x06, 0x26, 0x2c, 0xec, 0xd3, 0x0e, 0xc9, 0xe1, 0x34, 0x57, 0x00,
+ 0xf8, 0x2f, 0x8b, 0x9c, 0x0e, 0x71, 0x1b, 0x85, 0x25, 0xeb, 0x62, 0xa5, 0x8d, 0x14, 0x1d, 0xbc,
+ 0x9e, 0x60, 0xb0, 0x41, 0xc5, 0xa5, 0xee, 0x7b, 0x81, 0x2b, 0x4e, 0xdb, 0x90, 0xfa, 0x9a, 0x17,
+ 0xb8, 0x58, 0x60, 0xd0, 0x4d, 0x28, 0x1d, 0x10, 0xba, 0xc3, 0xf7, 0x9f, 0x3b, 0xc4, 0x57, 0xf2,
+ 0x2d, 0xef, 0x2e, 0x67, 0x69, 0x57, 0x8f, 0x06, 0xcd, 0x92, 0x78, 0xc4, 0x52, 0x08, 0x6a, 0x01,
+ 0xb0, 0xbd, 0x90, 0xc6, 0xc2, 0x9c, 0x46, 0x69, 0xa9, 0x78, 0xb1, 0xda, 0x3e, 0xc3, 0xed, 0xdb,
+ 0x4e, 0xa0, 0xd8, 0xa0, 0x40, 0x57, 0x61, 0x96, 0x79, 0x41, 0xb7, 0xef, 0x3b, 0x94, 0x03, 0x1a,
+ 0x65, 0x61, 0xe7, 0x79, 0x65, 0xe7, 0xec, 0xb6, 0x81, 0xc3, 0x19, 0x4a, 0xae, 0xa9, 0xe3, 0xc4,
+ 0xa4, 0x1b, 0x52, 0x8f, 0xb0, 0xc6, 0x4c, 0xaa, 0x69, 0x35, 0x81, 0x62, 0x83, 0x02, 0x3d, 0x05,
+ 0x25, 0xb1, 0xf3, 0x8d, 0x8a, 0x50, 0x51, 0x57, 0x2a, 0x4a, 0xe2, 0x58, 0xb0, 0xc4, 0xa1, 0x67,
+ 0x60, 0x46, 0xdd, 0x9a, 0x46, 0x55, 0x90, 0x9d, 0x55, 0x64, 0x33, 0xda, 0xad, 0x35, 0x1e, 0x7d,
+ 0x0b, 0x10, 0x8b, 0x43, 0xea, 0x74, 0x89, 0x42, 0xbd, 0xea, 0xb0, 0xbd, 0x06, 0x08, 0xae, 0x05,
+ 0xc5, 0x85, 0xb6, 0x47, 0x28, 0xf0, 0x09, 0x5c, 0xf6, 0xef, 0x2c, 0x38, 0x6b, 0xf8, 0x82, 0xf0,
+ 0xbb, 0xab, 0x30, 0xdb, 0x35, 0x6e, 0x9d, 0xf2, 0x8b, 0x64, 0x67, 0xcc, 0x1b, 0x89, 0x33, 0x94,
+ 0x88, 0x40, 0x95, 0x2a, 0x49, 0x3a, 0xba, 0x5c, 0xce, 0xed, 0xb4, 0xda, 0x86, 0x54, 0x93, 0x01,
+ 0x64, 0x38, 0x95, 0x6c, 0xff, 0xc3, 0x12, 0x0e, 0xac, 0xe3, 0x0d, 0xba, 0x68, 0xc4, 0x34, 0x4b,
+ 0x1c, 0xc7, 0xec, 0x98, 0x78, 0x74, 0x4a, 0x20, 0x28, 0xfc, 0x5f, 0x04, 0x82, 0x6b, 0x95, 0x5f,
+ 0xbd, 0xdb, 0x9c, 0x7a, 0xeb, 0xaf, 0x4b, 0x53, 0x76, 0x0f, 0xea, 0xab, 0x94, 0x38, 0x31, 0xb9,
+ 0x15, 0xc5, 0x62, 0x01, 0x36, 0x94, 0x5d, 0x7a, 0x88, 0xfb, 0x81, 0x5a, 0x28, 0xf0, 0xfb, 0xbd,
+ 0x26, 0x20, 0x58, 0x61, 0xf8, 0xf9, 0xed, 0x7a, 0xc4, 0x77, 0x37, 0x9d, 0xc0, 0xe9, 0x12, 0xaa,
+ 0x6e, 0x60, 0xb2, 0xab, 0xd7, 0x0d, 0x1c, 0xce, 0x50, 0xda, 0x3f, 0x2d, 0x42, 0x7d, 0x8d, 0xf8,
+ 0x24, 0xd5, 0x77, 0x1d, 0x50, 0x97, 0x3a, 0x1d, 0xb2, 0x45, 0xa8, 0x17, 0xba, 0xdb, 0xa4, 0x13,
+ 0x06, 0x2e, 0x13, 0x1e, 0x51, 0x6c, 0x7f, 0x8e, 0xfb, 0xd9, 0x8d, 0x11, 0x2c, 0x3e, 0x81, 0x03,
+ 0xf9, 0x50, 0x8f, 0xa8, 0x78, 0xf6, 0x62, 0x95, 0x7b, 0xf8, 0x9d, 0x7f, 0x3e, 0xdf, 0x56, 0x6f,
+ 0x99, 0xac, 0xed, 0xf9, 0xa3, 0x41, 0xb3, 0x9e, 0x01, 0xe1, 0xac, 0x70, 0xf4, 0x4d, 0x98, 0x0b,
+ 0x69, 0xb4, 0xe7, 0x04, 0x6b, 0x24, 0x22, 0x81, 0x4b, 0x82, 0x98, 0x89, 0x5d, 0xa8, 0xb4, 0xcf,
+ 0xf3, 0x8c, 0x71, 0x6b, 0x08, 0x87, 0x47, 0xa8, 0xd1, 0x3d, 0x98, 0x8f, 0x68, 0x18, 0x39, 0x5d,
+ 0x87, 0x4b, 0xdc, 0x0a, 0x7d, 0xaf, 0x73, 0x28, 0xe2, 0x54, 0xb5, 0x7d, 0xe9, 0x68, 0xd0, 0x9c,
+ 0xdf, 0x1a, 0x46, 0x1e, 0x0f, 0x9a, 0xe7, 0xc4, 0xd6, 0x71, 0x48, 0x8a, 0xc4, 0xa3, 0x62, 0x8c,
+ 0x33, 0x2c, 0x8d, 0x3b, 0x43, 0x7b, 0x03, 0x2a, 0x6b, 0x7d, 0x2a, 0xb8, 0xd0, 0xcb, 0x50, 0x71,
+ 0xd5, 0xb3, 0xda, 0xf9, 0x27, 0x75, 0xca, 0xd5, 0x34, 0xc7, 0x83, 0x66, 0x9d, 0x17, 0x09, 0x2d,
+ 0x0d, 0xc0, 0x09, 0x8b, 0x7d, 0x1f, 0xea, 0xeb, 0x0f, 0xa3, 0x90, 0xc6, 0xfa, 0x4c, 0x9f, 0x86,
+ 0x32, 0x11, 0x00, 0x21, 0xad, 0x92, 0xe6, 0x09, 0x49, 0x86, 0x15, 0x96, 0xc7, 0x2d, 0xf2, 0xd0,
+ 0xe9, 0xc4, 0x2a, 0xe0, 0x27, 0x71, 0x6b, 0x9d, 0x03, 0xb1, 0xc4, 0xd9, 0xef, 0x5b, 0x50, 0x16,
+ 0x1e, 0xc5, 0xd0, 0x6d, 0x28, 0xf6, 0x9c, 0x48, 0x25, 0xab, 0x17, 0xf2, 0x9d, 0xac, 0x64, 0x6d,
+ 0x6d, 0x3a, 0xd1, 0x7a, 0x10, 0xd3, 0xc3, 0x76, 0x4d, 0x29, 0x29, 0x6e, 0x3a, 0x11, 0xe6, 0xe2,
+ 0x16, 0x5c, 0xa8, 0x68, 0x2c, 0x9a, 0x83, 0xe2, 0x3e, 0x39, 0x94, 0x01, 0x09, 0xf3, 0x47, 0xd4,
+ 0x86, 0xd2, 0x81, 0xe3, 0xf7, 0x89, 0xf2, 0xa7, 0x4b, 0x93, 0x68, 0xc5, 0x92, 0xf5, 0x5a, 0xe1,
+ 0xaa, 0x65, 0xdf, 0x02, 0xb8, 0x41, 0x92, 0x1d, 0x5a, 0x81, 0xb3, 0x3a, 0xda, 0x64, 0x83, 0xe0,
+ 0xe7, 0x95, 0x79, 0x67, 0x71, 0x16, 0x8d, 0x87, 0xe9, 0xed, 0xfb, 0x50, 0x15, 0x81, 0x92, 0xe7,
+ 0xbb, 0x34, 0x03, 0x58, 0x8f, 0xc8, 0x00, 0x3a, 0x61, 0x16, 0xc6, 0x25, 0x4c, 0x23, 0x2e, 0xf8,
+ 0x50, 0x97, 0xbc, 0x3a, 0x87, 0xe7, 0xd2, 0x70, 0x09, 0x2a, 0xda, 0x4c, 0xa5, 0x25, 0xa9, 0xdd,
+ 0xb4, 0x20, 0x9c, 0x50, 0x18, 0xda, 0xf6, 0x20, 0x13, 0xf4, 0xf3, 0x29, 0x33, 0x12, 0x5a, 0xe1,
+ 0xd1, 0x09, 0xcd, 0xd0, 0xf4, 0x23, 0x68, 0x8c, 0x2b, 0xf8, 0x1e, 0x23, 0x2d, 0xe5, 0x37, 0xc5,
+ 0x7e, 0xdb, 0x82, 0x39, 0x53, 0x52, 0xfe, 0xe3, 0xcb, 0xaf, 0xe4, 0xf4, 0xd2, 0xc8, 0xd8, 0x91,
+ 0x5f, 0x5b, 0x70, 0x3e, 0xb3, 0xb4, 0x89, 0x4e, 0x7c, 0x02, 0xa3, 0x4c, 0xe7, 0x28, 0x4e, 0xe0,
+ 0x1c, 0xcb, 0x50, 0xdb, 0x08, 0xbc, 0xd8, 0x73, 0x7c, 0xef, 0x07, 0x84, 0x9e, 0x5e, 0x4c, 0xda,
+ 0x7f, 0xb0, 0x60, 0xd6, 0xe0, 0x60, 0xe8, 0x3e, 0xcc, 0xf0, 0xb8, 0xeb, 0x05, 0x5d, 0x15, 0x3b,
+ 0x72, 0xd6, 0x0c, 0x86, 0x90, 0x74, 0x5d, 0x5b, 0x52, 0x12, 0xd6, 0x22, 0xd1, 0x16, 0x94, 0x29,
+ 0x61, 0x7d, 0x3f, 0x9e, 0x2c, 0x44, 0x6c, 0xc7, 0x4e, 0xdc, 0x67, 0x32, 0x36, 0x63, 0xc1, 0x8f,
+ 0x95, 0x1c, 0xfb, 0xcf, 0x05, 0xa8, 0xdf, 0x74, 0x76, 0x88, 0xbf, 0x4d, 0x7c, 0xd2, 0x89, 0x43,
+ 0x8a, 0x7e, 0x08, 0xb5, 0x9e, 0x13, 0x77, 0xf6, 0x04, 0x54, 0x97, 0xeb, 0x6b, 0xf9, 0x14, 0x65,
+ 0x24, 0xb5, 0x36, 0x53, 0x31, 0x32, 0x20, 0x9e, 0x53, 0x0b, 0xab, 0x19, 0x18, 0x6c, 0x6a, 0x13,
+ 0x3d, 0x96, 0x78, 0x5f, 0x7f, 0x18, 0xf1, 0x5a, 0x62, 0xf2, 0xd6, 0x2e, 0x63, 0x02, 0x26, 0x6f,
+ 0xf6, 0x3d, 0x4a, 0x7a, 0x24, 0x88, 0xd3, 0x1e, 0x6b, 0x73, 0x48, 0x3e, 0x1e, 0xd1, 0xb8, 0xf0,
+ 0x0a, 0xcc, 0x0d, 0x1b, 0x7f, 0x42, 0xbc, 0x3e, 0x6f, 0xc6, 0xeb, 0xaa, 0x19, 0x81, 0x7f, 0x6b,
+ 0x41, 0x63, 0x9c, 0x21, 0xe8, 0x8b, 0x86, 0xa0, 0x34, 0x47, 0xbc, 0x46, 0x0e, 0xa5, 0xd4, 0x75,
+ 0xa8, 0x84, 0x11, 0xef, 0x8a, 0x43, 0xaa, 0xfc, 0xfc, 0x19, 0xed, 0xbb, 0xb7, 0x14, 0xfc, 0x78,
+ 0xd0, 0xbc, 0x90, 0x11, 0xaf, 0x11, 0x38, 0x61, 0xe5, 0x89, 0x59, 0xd8, 0xc3, 0x8b, 0x85, 0x24,
+ 0x31, 0xdf, 0x15, 0x10, 0xac, 0x30, 0xf6, 0xef, 0x2d, 0x98, 0x16, 0x55, 0xf2, 0x7d, 0xa8, 0xf0,
+ 0xfd, 0x73, 0x9d, 0xd8, 0x11, 0x76, 0xe5, 0xee, 0xcf, 0x38, 0xf7, 0x26, 0x89, 0x9d, 0xf4, 0x7e,
+ 0x69, 0x08, 0x4e, 0x24, 0x22, 0x0c, 0x25, 0x2f, 0x26, 0x3d, 0x7d, 0x90, 0xcf, 0x8e, 0x15, 0xad,
+ 0xa6, 0x03, 0x2d, 0xec, 0x3c, 0x58, 0x7f, 0x18, 0x93, 0x80, 0x1f, 0x46, 0x1a, 0x0c, 0x36, 0xb8,
+ 0x0c, 0x2c, 0x45, 0xd9, 0xff, 0xb6, 0x20, 0x51, 0xc5, 0xaf, 0x3b, 0x23, 0xfe, 0xee, 0x4d, 0x2f,
+ 0xd8, 0x57, 0xdb, 0x9a, 0x98, 0xb3, 0xad, 0xe0, 0x38, 0xa1, 0x38, 0x29, 0x21, 0x16, 0x26, 0x4b,
+ 0x88, 0x5c, 0x61, 0x27, 0x0c, 0x62, 0x2f, 0xe8, 0x8f, 0xc4, 0x97, 0x55, 0x05, 0xc7, 0x09, 0x05,
+ 0xaf, 0x3b, 0x29, 0xe9, 0x39, 0x5e, 0xe0, 0x05, 0x5d, 0xbe, 0x88, 0xd5, 0xb0, 0x1f, 0xc4, 0xa2,
+ 0x00, 0x53, 0x75, 0x27, 0x1e, 0xc1, 0xe2, 0x13, 0x38, 0xec, 0x3f, 0x15, 0xa1, 0xc6, 0xd7, 0xac,
+ 0x33, 0xfb, 0x4b, 0x50, 0xf7, 0x4d, 0x2f, 0x50, 0x6b, 0xbf, 0xa0, 0x4c, 0xc9, 0xde, 0x6b, 0x9c,
+ 0xa5, 0xe5, 0xcc, 0xa2, 0x5c, 0x4e, 0x98, 0x0b, 0x59, 0xe6, 0xeb, 0x26, 0x12, 0x67, 0x69, 0x79,
+ 0xbc, 0x7e, 0xc0, 0xef, 0x87, 0x2a, 0x44, 0x93, 0x23, 0xfa, 0x36, 0x07, 0x62, 0x89, 0x3b, 0x69,
+ 0x9f, 0xa7, 0x27, 0xdc, 0xe7, 0x6b, 0x70, 0x86, 0x3b, 0x44, 0xd8, 0x8f, 0x75, 0xb5, 0x5e, 0x12,
+ 0xbb, 0x86, 0x8e, 0x06, 0xcd, 0x33, 0xb7, 0x33, 0x18, 0x3c, 0x44, 0xc9, 0x6d, 0xf4, 0xbd, 0x9e,
+ 0x17, 0x37, 0x66, 0x04, 0x4b, 0x62, 0xe3, 0x4d, 0x0e, 0xc4, 0x12, 0x97, 0x39, 0xc8, 0xca, 0xa9,
+ 0x07, 0xb9, 0x09, 0xe7, 0x1c, 0xdf, 0x0f, 0x1f, 0x88, 0x65, 0xb6, 0xc3, 0x70, 0xbf, 0xe7, 0xd0,
+ 0x7d, 0x26, 0x7a, 0xdc, 0x4a, 0xfb, 0x0b, 0x8a, 0xf1, 0xdc, 0xca, 0x28, 0x09, 0x3e, 0x89, 0xcf,
+ 0xfe, 0x67, 0x01, 0x90, 0xec, 0x56, 0x5c, 0x59, 0xc4, 0xc9, 0x40, 0xf3, 0x0c, 0xcc, 0xf4, 0x54,
+ 0xb7, 0x63, 0x65, 0xf3, 0x9c, 0x6e, 0x74, 0x34, 0x1e, 0x6d, 0x42, 0x55, 0x5e, 0xf8, 0xd4, 0x89,
+ 0x97, 0x15, 0x71, 0xf5, 0x96, 0x46, 0x1c, 0x0f, 0x9a, 0x0b, 0x19, 0x35, 0x09, 0xe6, 0xf6, 0x61,
+ 0x44, 0x70, 0x2a, 0x01, 0x5d, 0x01, 0x70, 0x22, 0xcf, 0x1c, 0x6d, 0x55, 0xd3, 0xd1, 0x48, 0xda,
+ 0xa4, 0x62, 0x83, 0x0a, 0xbd, 0x0a, 0xd3, 0x7c, 0xe3, 0xd5, 0xdc, 0xe3, 0xcb, 0xf9, 0xc2, 0x06,
+ 0x3f, 0xba, 0x76, 0x85, 0xe7, 0x52, 0xfe, 0x84, 0x85, 0x04, 0x74, 0x0f, 0xca, 0xc2, 0xcb, 0xe4,
+ 0x21, 0x4f, 0x58, 0xff, 0x8a, 0x66, 0x48, 0x15, 0xef, 0xc7, 0xc9, 0x13, 0x56, 0x12, 0xed, 0x37,
+ 0xa1, 0xba, 0xe9, 0x75, 0x68, 0xc8, 0xd5, 0xf1, 0x0d, 0x66, 0x99, 0xe6, 0x2f, 0xd9, 0x60, 0xed,
+ 0x4b, 0x1a, 0xcf, 0x9d, 0x28, 0x70, 0x82, 0x50, 0xb6, 0x78, 0xa5, 0xd4, 0x89, 0x5e, 0xe7, 0x40,
+ 0x2c, 0x71, 0xd7, 0xce, 0xf3, 0xfa, 0xe1, 0x67, 0xef, 0x35, 0xa7, 0xde, 0x79, 0xaf, 0x39, 0xf5,
+ 0xee, 0x7b, 0xaa, 0x96, 0xf8, 0x7b, 0x0d, 0xe0, 0xd6, 0xce, 0xf7, 0x49, 0x47, 0xc6, 0xa8, 0xd3,
+ 0x07, 0x53, 0xbc, 0x26, 0x54, 0xf3, 0x50, 0x31, 0xc4, 0x29, 0x0c, 0xd5, 0x84, 0x06, 0x0e, 0x67,
+ 0x28, 0xd1, 0x32, 0x54, 0x93, 0x61, 0x95, 0x3a, 0xb6, 0x79, 0xed, 0x06, 0xc9, 0x44, 0x0b, 0xa7,
+ 0x34, 0x99, 0x80, 0x39, 0x7d, 0x6a, 0xc0, 0x6c, 0x43, 0xb1, 0xef, 0xb9, 0xe2, 0x54, 0xaa, 0xed,
+ 0xe7, 0x74, 0xc2, 0xba, 0xb3, 0xb1, 0x76, 0x3c, 0x68, 0x3e, 0x39, 0x6e, 0xd2, 0x1b, 0x1f, 0x46,
+ 0x84, 0xb5, 0xee, 0x6c, 0xac, 0x61, 0xce, 0x7c, 0x52, 0x30, 0x28, 0x4f, 0x18, 0x0c, 0xae, 0x00,
+ 0xa8, 0x55, 0x73, 0x6e, 0x79, 0xab, 0x13, 0xef, 0xbc, 0x91, 0x60, 0xb0, 0x41, 0x85, 0x18, 0xcc,
+ 0x77, 0x28, 0x91, 0xce, 0xee, 0xf5, 0x08, 0x8b, 0x9d, 0x9e, 0x1c, 0x5d, 0x4d, 0xe6, 0xaa, 0x4f,
+ 0x28, 0x35, 0xf3, 0xab, 0xc3, 0xc2, 0xf0, 0xa8, 0x7c, 0x14, 0xc2, 0xbc, 0xab, 0xba, 0xe7, 0x54,
+ 0x69, 0x75, 0x62, 0xa5, 0x17, 0xb8, 0xc2, 0xb5, 0x61, 0x41, 0x78, 0x54, 0x36, 0xfa, 0x1e, 0x2c,
+ 0x68, 0xe0, 0xe8, 0x08, 0x43, 0x0c, 0xd3, 0x8a, 0xed, 0xc5, 0xa3, 0x41, 0x73, 0x61, 0x6d, 0x2c,
+ 0x15, 0x7e, 0x84, 0x04, 0xe4, 0x42, 0xd9, 0x97, 0xd5, 0x60, 0x4d, 0x64, 0xf0, 0xaf, 0xe7, 0x5b,
+ 0x45, 0xea, 0xfd, 0x2d, 0xb3, 0x0a, 0x4c, 0x5a, 0x74, 0x55, 0x00, 0x2a, 0xd9, 0xe8, 0x21, 0xd4,
+ 0x9c, 0x20, 0x08, 0x63, 0x47, 0x0e, 0x55, 0x66, 0x85, 0xaa, 0x95, 0x89, 0x55, 0xad, 0xa4, 0x32,
+ 0x86, 0xaa, 0x4e, 0x03, 0x83, 0x4d, 0x55, 0xe8, 0x01, 0x9c, 0x0d, 0x1f, 0x04, 0x84, 0x62, 0xb2,
+ 0x4b, 0x28, 0x09, 0x3a, 0x84, 0x35, 0xea, 0x42, 0xfb, 0x57, 0x73, 0x6a, 0xcf, 0x30, 0xa7, 0x2e,
+ 0x9d, 0x85, 0x33, 0x3c, 0xac, 0x05, 0xb5, 0x00, 0x76, 0xbd, 0x40, 0xf5, 0x0e, 0x8d, 0x33, 0xe9,
+ 0xf4, 0xf5, 0x7a, 0x02, 0xc5, 0x06, 0x05, 0x7a, 0x01, 0x6a, 0x1d, 0xbf, 0xcf, 0x62, 0x22, 0xc7,
+ 0xbc, 0x67, 0xc5, 0x0d, 0x4a, 0xd6, 0xb7, 0x9a, 0xa2, 0xb0, 0x49, 0x87, 0xf6, 0x60, 0xd6, 0x33,
+ 0x9a, 0x94, 0xc6, 0x9c, 0xf0, 0xc5, 0x2b, 0x13, 0x77, 0x26, 0xac, 0x3d, 0xc7, 0x23, 0x91, 0x09,
+ 0xc1, 0x19, 0xc9, 0xa8, 0x0f, 0xf5, 0x9e, 0x99, 0x6a, 0x1a, 0xf3, 0x62, 0x1f, 0xaf, 0xe6, 0x53,
+ 0x35, 0x9a, 0x0c, 0xd3, 0x7a, 0x24, 0x83, 0xc3, 0x59, 0x2d, 0x0b, 0x5f, 0x83, 0xda, 0x7f, 0x59,
+ 0xaa, 0xf3, 0x52, 0x7f, 0xd8, 0x63, 0x26, 0x2a, 0xf5, 0xdf, 0x2f, 0xc0, 0x99, 0xec, 0x39, 0x27,
+ 0x2d, 0xb1, 0x35, 0xf6, 0x6b, 0x81, 0x4e, 0x06, 0xc5, 0xb1, 0xc9, 0x40, 0xc5, 0xdc, 0xe9, 0xc7,
+ 0x89, 0xb9, 0xd9, 0x74, 0x5e, 0xca, 0x95, 0xce, 0x5b, 0x00, 0xbc, 0xdc, 0xa1, 0xa1, 0xef, 0x13,
+ 0x2a, 0x42, 0x74, 0x45, 0x7d, 0x0f, 0x48, 0xa0, 0xd8, 0xa0, 0xe0, 0xb5, 0xed, 0x8e, 0x1f, 0x76,
+ 0xf6, 0xc5, 0x16, 0xe8, 0xf0, 0x22, 0x82, 0x73, 0x45, 0xd6, 0xb6, 0xed, 0x11, 0x2c, 0x3e, 0x81,
+ 0xc3, 0x3e, 0x84, 0x0b, 0x5b, 0x0e, 0xe5, 0x8e, 0x94, 0x5e, 0x65, 0xd1, 0x3c, 0xbc, 0x31, 0xd2,
+ 0x9a, 0x3c, 0x37, 0x69, 0x48, 0x48, 0x17, 0x9d, 0xc2, 0xd2, 0xf6, 0xc4, 0xfe, 0x8b, 0x05, 0x4f,
+ 0x9c, 0xa8, 0xfb, 0x33, 0x68, 0x8d, 0xde, 0xc8, 0xb6, 0x46, 0x2f, 0xe5, 0x1c, 0x21, 0x9f, 0x64,
+ 0xed, 0x98, 0x46, 0x69, 0x06, 0x4a, 0x5b, 0xbc, 0xec, 0xb4, 0x7f, 0x69, 0xc1, 0xac, 0x78, 0x9a,
+ 0x64, 0xfc, 0xde, 0x84, 0xd2, 0x6e, 0xa8, 0x47, 0x6c, 0x15, 0xf9, 0xa5, 0xea, 0x3a, 0x07, 0x60,
+ 0x09, 0x7f, 0x8c, 0xf9, 0xfc, 0xdb, 0x16, 0x64, 0x07, 0xdf, 0xe8, 0x15, 0xe9, 0xf3, 0x56, 0x32,
+ 0x99, 0x9e, 0xd0, 0xdf, 0x5f, 0x1e, 0xd7, 0xd8, 0x9d, 0xcb, 0x35, 0xe5, 0xbc, 0x04, 0x55, 0x1c,
+ 0x86, 0xf1, 0x96, 0x13, 0xef, 0x31, 0xbe, 0xf0, 0x88, 0x3f, 0xa8, 0xbd, 0x11, 0x0b, 0x17, 0x18,
+ 0x2c, 0xe1, 0xf6, 0x2f, 0x2c, 0x78, 0x62, 0xec, 0x27, 0x11, 0x7e, 0xf5, 0x3a, 0xc9, 0x9b, 0x5a,
+ 0x51, 0xe2, 0x85, 0x29, 0x1d, 0x36, 0xa8, 0x78, 0x47, 0x96, 0xf9, 0x8e, 0x32, 0xdc, 0x91, 0x65,
+ 0xb4, 0xe1, 0x2c, 0xad, 0xfd, 0xaf, 0x02, 0x94, 0xe5, 0x98, 0xe7, 0x7f, 0xec, 0xb1, 0x4f, 0x43,
+ 0x99, 0x09, 0x3d, 0xca, 0xbc, 0x24, 0x9b, 0x4b, 0xed, 0x58, 0x61, 0x45, 0x17, 0x43, 0x18, 0x73,
+ 0xba, 0x3a, 0xca, 0xa5, 0x5d, 0x8c, 0x04, 0x63, 0x8d, 0x47, 0x2f, 0x42, 0x99, 0x12, 0x87, 0x25,
+ 0xfd, 0xe1, 0xa2, 0x16, 0x89, 0x05, 0xf4, 0x78, 0xd0, 0x9c, 0x55, 0xc2, 0xc5, 0x3b, 0x56, 0xd4,
+ 0xe8, 0x1e, 0xcc, 0xb8, 0x24, 0x76, 0x3c, 0x5f, 0x77, 0x0c, 0xcf, 0x4f, 0x32, 0x0e, 0x5b, 0x93,
+ 0xac, 0xed, 0x1a, 0xb7, 0x49, 0xbd, 0x60, 0x2d, 0x90, 0x47, 0xe8, 0x4e, 0xe8, 0xca, 0x2f, 0xa9,
+ 0xa5, 0x34, 0x42, 0xaf, 0x86, 0x2e, 0xc1, 0x02, 0x63, 0xbf, 0x63, 0x41, 0x4d, 0x4a, 0x5a, 0x75,
+ 0xfa, 0x8c, 0xa0, 0xcb, 0xc9, 0x2a, 0xe4, 0x71, 0xeb, 0x9a, 0x71, 0x9a, 0x77, 0x59, 0xc7, 0x83,
+ 0x66, 0x55, 0x90, 0x89, 0x96, 0x4b, 0x2f, 0xc0, 0xd8, 0xa3, 0xc2, 0x29, 0x7b, 0xf4, 0x14, 0x94,
+ 0xc4, 0xed, 0x51, 0x9b, 0x99, 0xdc, 0x75, 0x71, 0xc1, 0xb0, 0xc4, 0xd9, 0x1f, 0x17, 0xa0, 0x9e,
+ 0x59, 0x5c, 0x8e, 0xae, 0x23, 0x19, 0xbd, 0x16, 0x72, 0x8c, 0xf3, 0xc7, 0x7f, 0xff, 0xfe, 0x0e,
+ 0x94, 0x3b, 0x7c, 0x7d, 0xfa, 0x0f, 0x08, 0x97, 0x27, 0x39, 0x0a, 0xb1, 0x33, 0xa9, 0x27, 0x89,
+ 0x57, 0x86, 0x95, 0x40, 0x74, 0x03, 0xe6, 0x29, 0x89, 0xe9, 0xe1, 0xca, 0x6e, 0x4c, 0xa8, 0x39,
+ 0x07, 0x28, 0xa5, 0x75, 0x39, 0x1e, 0x26, 0xc0, 0xa3, 0x3c, 0x3a, 0xa7, 0x96, 0x1f, 0x23, 0xa7,
+ 0xda, 0x3b, 0x30, 0x7b, 0xdb, 0xd9, 0xf1, 0x93, 0x6f, 0x8a, 0x18, 0xea, 0x5e, 0xd0, 0xf1, 0xfb,
+ 0x2e, 0x91, 0xd1, 0x58, 0x47, 0x2f, 0x7d, 0x69, 0x37, 0x4c, 0xe4, 0xf1, 0xa0, 0x79, 0x2e, 0x03,
+ 0x90, 0x1f, 0xd1, 0x70, 0x56, 0x84, 0xed, 0xc3, 0xf4, 0x67, 0xd8, 0xa7, 0x7e, 0x17, 0xaa, 0x69,
+ 0x27, 0xf1, 0x29, 0xab, 0xb4, 0xdf, 0x80, 0x0a, 0xf7, 0x78, 0xdd, 0x01, 0x9f, 0x52, 0x16, 0x65,
+ 0x0b, 0x96, 0x42, 0x9e, 0x82, 0xc5, 0xee, 0x41, 0xfd, 0x4e, 0xe4, 0x3e, 0xe6, 0x57, 0xe5, 0x42,
+ 0xee, 0xac, 0x75, 0x05, 0xe4, 0x3f, 0x35, 0x78, 0x82, 0x90, 0x99, 0xdb, 0x48, 0x10, 0x66, 0xe2,
+ 0x35, 0xbe, 0x2a, 0xfc, 0xc4, 0x02, 0x10, 0xa3, 0x9f, 0xf5, 0x03, 0x12, 0xc4, 0x7c, 0x1f, 0xb8,
+ 0x53, 0x0d, 0xef, 0x83, 0x88, 0x0c, 0x02, 0x83, 0xee, 0x40, 0x39, 0x94, 0xde, 0x24, 0xc7, 0xfc,
+ 0x13, 0x4e, 0x4c, 0x93, 0x8b, 0x24, 0xfd, 0x09, 0x2b, 0x61, 0xed, 0x8b, 0x1f, 0x7c, 0xb2, 0x38,
+ 0xf5, 0xe1, 0x27, 0x8b, 0x53, 0x1f, 0x7d, 0xb2, 0x38, 0xf5, 0xd6, 0xd1, 0xa2, 0xf5, 0xc1, 0xd1,
+ 0xa2, 0xf5, 0xe1, 0xd1, 0xa2, 0xf5, 0xd1, 0xd1, 0xa2, 0xf5, 0xf1, 0xd1, 0xa2, 0xf5, 0xce, 0xdf,
+ 0x16, 0xa7, 0xee, 0x15, 0x0e, 0x2e, 0xff, 0x27, 0x00, 0x00, 0xff, 0xff, 0xe5, 0xe0, 0x33, 0x2e,
+ 0x95, 0x26, 0x00, 0x00,
}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
index bd5abcb79..cc9099a65 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -23,7 +23,6 @@ package k8s.io.apimachinery.pkg.apis.meta.v1;
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
-import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "v1";
@@ -49,6 +48,7 @@ message APIGroup {
// The server returns only those CIDRs that it thinks that the client can match.
// For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
// Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
+ // +optional
repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4;
}
@@ -92,6 +92,16 @@ message APIResource {
// categories is a list of the grouped resources this resource belongs to (e.g. 'all')
repeated string categories = 7;
+
+ // The hash value of the storage version, the version this resource is
+ // converted to when written to the data store. Value must be treated
+ // as opaque by clients. Only equality comparison on the value is valid.
+ // This is an alpha feature and may change or be removed in the future.
+ // The field is populated by the apiserver only if the
+ // StorageVersionHash feature gate is enabled.
+ // This field will remain optional even if it graduates.
+ // +optional
+ optional string storageVersionHash = 10;
}
// APIResourceList is a list of APIResource, it is used to expose the name of the
@@ -107,7 +117,7 @@ message APIResourceList {
// APIVersions lists the versions that are available, to allow clients to
// discover the API at /api, which is the root path of the legacy v1 API.
-//
+//
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
message APIVersions {
@@ -124,6 +134,24 @@ message APIVersions {
repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2;
}
+// CreateOptions may be provided when creating an API object.
+message CreateOptions {
+ // When present, indicates that modifications should not be
+ // persisted. An invalid or unrecognized dryRun directive will
+ // result in an error response and no further processing of the
+ // request. Valid values are:
+ // - All: all dry run stages will be processed
+ // +optional
+ repeated string dryRun = 1;
+
+ // fieldManager is a name associated with the actor or entity
+ // that is making these changes. The value must be less than or
+ // 128 characters long, and only contain printable characters,
+ // as defined by https://golang.org/pkg/unicode/#IsPrint.
+ // +optional
+ optional string fieldManager = 3;
+}
+
// DeleteOptions may be provided when deleting an API object.
message DeleteOptions {
// The duration in seconds before the object should be deleted. Value must be non-negative integer.
@@ -155,6 +183,14 @@ message DeleteOptions {
// foreground.
// +optional
optional string propagationPolicy = 4;
+
+ // When present, indicates that modifications should not be
+ // persisted. An invalid or unrecognized dryRun directive will
+ // result in an error response and no further processing of the
+ // request. Valid values are:
+ // - All: all dry run stages will be processed
+ // +optional
+ repeated string dryRun = 5;
}
// Duration is a wrapper around time.Duration which supports correct
@@ -165,14 +201,34 @@ message Duration {
}
// ExportOptions is the query options to the standard REST get call.
+// Deprecated. Planned for removal in 1.18.
message ExportOptions {
// Should this value be exported. Export strips fields that a user can not specify.
+ // Deprecated. Planned for removal in 1.18.
optional bool export = 1;
// Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
+ // Deprecated. Planned for removal in 1.18.
optional bool exact = 2;
}
+// Fields stores a set of fields in a data structure like a Trie.
+// To understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff
+message Fields {
+ // Map stores a set of fields in a data structure like a Trie.
+ //
+ // Each key is either a '.' representing the field itself, and will always map to an empty set,
+ // or a string representing a sub-field or item. The string will follow one of these four formats:
+ // 'f:<name>', where <name> is the name of a field in a struct, or key in a map
+ // 'v:<value>', where <value> is the exact json formatted value of a list item
+ // 'i:<index>', where <index> is position of a item in a list
+ // 'k:<keys>', where <keys> is a map of a list item's key fields to their unique values
+ // If a key maps to an empty Fields value, the field that key represents is part of the set.
+ //
+ // The exact format is defined in k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal
+ map<string, Fields> map = 1;
+}
+
// GetOptions is the standard query options to the standard REST get call.
message GetOptions {
// When specified:
@@ -180,15 +236,11 @@ message GetOptions {
// - if it's 0, then we simply return what we currently have in cache, no guarantee;
// - if set to non zero, then the result is at least as fresh as given rv.
optional string resourceVersion = 1;
-
- // If true, partially initialized resources are included in the response.
- // +optional
- optional bool includeUninitialized = 2;
}
// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying
// concepts during lookup stages without having partially valid types
-//
+//
// +protobuf.options.(gogoproto.goproto_stringer)=false
message GroupKind {
optional string group = 1;
@@ -198,7 +250,7 @@ message GroupKind {
// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying
// concepts during lookup stages without having partially valid types
-//
+//
// +protobuf.options.(gogoproto.goproto_stringer)=false
message GroupResource {
optional string group = 1;
@@ -207,7 +259,7 @@ message GroupResource {
}
// GroupVersion contains the "group" and the "version", which uniquely identifies the API.
-//
+//
// +protobuf.options.(gogoproto.goproto_stringer)=false
message GroupVersion {
optional string group = 1;
@@ -228,7 +280,7 @@ message GroupVersionForDiscovery {
// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion
// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling
-//
+//
// +protobuf.options.(gogoproto.goproto_stringer)=false
message GroupVersionKind {
optional string group = 1;
@@ -240,7 +292,7 @@ message GroupVersionKind {
// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion
// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling
-//
+//
// +protobuf.options.(gogoproto.goproto_stringer)=false
message GroupVersionResource {
optional string group = 1;
@@ -338,10 +390,26 @@ message ListMeta {
// continue may be set if the user set a limit on the number of items returned, and indicates that
// the server has more data available. The value is opaque and may be used to issue another request
// to the endpoint that served this list to retrieve the next set of available objects. Continuing a
- // list may not be possible if the server configuration has changed or more than a few minutes have
- // passed. The resourceVersion field returned when using this continue value will be identical to
- // the value in the first response.
+ // consistent list may not be possible if the server configuration has changed or more than a few
+ // minutes have passed. The resourceVersion field returned when using this continue value will be
+ // identical to the value in the first response, unless you have received this token from an error
+ // message.
optional string continue = 3;
+
+ // remainingItemCount is the number of subsequent items in the list which are not included in this
+ // list response. If the list request contained label or field selectors, then the number of
+ // remaining items is unknown and the field will be left unset and omitted during serialization.
+ // If the list is complete (either because it is not chunking or because this is the last chunk),
+ // then there are no more remaining items and this field will be left unset and omitted during
+ // serialization.
+ // Servers older than v1.15 do not set this field.
+ // The intended use of the remainingItemCount is *estimating* the size of a collection. Clients
+ // should not rely on the remainingItemCount to be set or to be exact.
+ //
+ // This field is alpha and can be changed or removed without notice.
+ //
+ // +optional
+ optional int64 remainingItemCount = 4;
}
// ListOptions is the query options to a standard REST list call.
@@ -356,15 +424,25 @@ message ListOptions {
// +optional
optional string fieldSelector = 2;
- // If true, partially initialized resources are included in the response.
- // +optional
- optional bool includeUninitialized = 6;
-
// Watch for changes to the described resources and return them as a stream of
// add, update, and remove notifications. Specify resourceVersion.
// +optional
optional bool watch = 3;
+ // allowWatchBookmarks requests watch events with type "BOOKMARK".
+ // Servers that do not implement bookmarks may ignore this flag and
+ // bookmarks are sent at the server's discretion. Clients should not
+ // assume bookmarks are returned at any specific interval, nor may they
+ // assume the server will send any BOOKMARK event during a session.
+ // If this is not a watch, this field is ignored.
+ // If the feature gate WatchBookmarks is not enabled in apiserver,
+ // this field is ignored.
+ //
+ // This field is alpha and can be changed or removed without notice.
+ //
+ // +optional
+ optional bool allowWatchBookmarks = 9;
+
// When specified with a watch call, shows changes that occur after that particular version of a resource.
// Defaults to changes from the beginning of history.
// When specified for list:
@@ -387,7 +465,7 @@ message ListOptions {
// more results are available. Servers may choose not to support the limit argument and will return
// all of the available results. If limit is specified and the continue field is empty, clients may
// assume that no more results are available. This field is not supported if watch is true.
- //
+ //
// The server guarantees that the objects returned when using continue will be identical to issuing
// a single list call without a limit - that is, no objects created, modified, or deleted after the
// first request is issued will be included in any subsequent continued requests. This is sometimes
@@ -397,19 +475,50 @@ message ListOptions {
// result was calculated is returned.
optional int64 limit = 7;
- // The continue option should be set when retrieving more results from the server. Since this value
- // is server defined, clients may only use the continue value from a previous query result with
- // identical query parameters (except for the value of continue) and the server may reject a continue
- // value it does not recognize. If the specified continue value is no longer valid whether due to
- // expiration (generally five to fifteen minutes) or a configuration change on the server the server
- // will respond with a 410 ResourceExpired error indicating the client must restart their list without
- // the continue field. This field is not supported when watch is true. Clients may start a watch from
- // the last resourceVersion value returned by the server and not miss any modifications.
+ // The continue option should be set when retrieving more results from the server. Since this value is
+ // server defined, clients may only use the continue value from a previous query result with identical
+ // query parameters (except for the value of continue) and the server may reject a continue value it
+ // does not recognize. If the specified continue value is no longer valid whether due to expiration
+ // (generally five to fifteen minutes) or a configuration change on the server, the server will
+ // respond with a 410 ResourceExpired error together with a continue token. If the client needs a
+ // consistent list, it must restart their list without the continue field. Otherwise, the client may
+ // send another list request with the token received with the 410 error, the server will respond with
+ // a list starting from the next key, but from the latest snapshot, which is inconsistent from the
+ // previous list results - objects that are created, modified, or deleted after the first list request
+ // will be included in the response, as long as their keys are after the "next key".
+ //
+ // This field is not supported when watch is true. Clients may start a watch from the last
+ // resourceVersion value returned by the server and not miss any modifications.
optional string continue = 8;
}
+// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource
+// that the fieldset applies to.
+message ManagedFieldsEntry {
+ // Manager is an identifier of the workflow managing these fields.
+ optional string manager = 1;
+
+ // Operation is the type of operation which lead to this ManagedFieldsEntry being created.
+ // The only valid values for this field are 'Apply' and 'Update'.
+ optional string operation = 2;
+
+ // APIVersion defines the version of this resource that this field set
+ // applies to. The format is "group/version" just like the top-level
+ // APIVersion field. It is necessary to track the version of a field
+ // set because it cannot be automatically converted.
+ optional string apiVersion = 3;
+
+ // Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'
+ // +optional
+ optional Time time = 4;
+
+ // Fields identifies a set of fields.
+ // +optional
+ optional Fields fields = 5;
+}
+
// MicroTime is version of Time with microsecond level precision.
-//
+//
// +protobuf.options.marshal=false
// +protobuf.as=Timestamp
// +protobuf.options.(gogoproto.goproto_stringer)=false
@@ -445,12 +554,12 @@ message ObjectMeta {
// The provided value has the same validation rules as the Name field,
// and may be truncated by the length of the suffix required to make the value
// unique on the server.
- //
+ //
// If this field is specified and the generated name exists, the server will
// NOT return a 409 - instead, it will either return 201 Created or 500 with Reason
// ServerTimeout indicating a unique name could not be found in the time allotted, and the client
// should retry (optionally after the time indicated in the Retry-After header).
- //
+ //
// Applied only if Name is not specified.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency
// +optional
@@ -460,7 +569,7 @@ message ObjectMeta {
// equivalent to the "default" namespace, but "default" is the canonical representation.
// Not all objects are required to be scoped to a namespace - the value of this field for
// those objects will be empty.
- //
+ //
// Must be a DNS_LABEL.
// Cannot be updated.
// More info: http://kubernetes.io/docs/user-guide/namespaces
@@ -476,7 +585,7 @@ message ObjectMeta {
// UID is the unique in time and space value for this object. It is typically generated by
// the server on successful creation of a resource and is not allowed to change on PUT
// operations.
- //
+ //
// Populated by the system.
// Read-only.
// More info: http://kubernetes.io/docs/user-guide/identifiers#uids
@@ -488,7 +597,7 @@ message ObjectMeta {
// concurrency, change detection, and the watch operation on a resource or set of resources.
// Clients must treat these values as opaque and passed unmodified back to the server.
// They may only be valid for a particular resource or set of resources.
- //
+ //
// Populated by the system.
// Read-only.
// Value must be treated as opaque by clients and .
@@ -504,7 +613,7 @@ message ObjectMeta {
// CreationTimestamp is a timestamp representing the server time when this object was
// created. It is not guaranteed to be set in happens-before order across separate operations.
// Clients may not set this value. It is represented in RFC3339 form and is in UTC.
- //
+ //
// Populated by the system.
// Read-only.
// Null for lists.
@@ -526,7 +635,7 @@ message ObjectMeta {
// exist after this timestamp, until an administrator or automated process can determine the
// resource is fully terminated.
// If not set, graceful deletion of the object has not been requested.
- //
+ //
// Populated by the system when a graceful deletion is requested.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
@@ -568,10 +677,12 @@ message ObjectMeta {
// this object has been completely initialized. Otherwise, the object is considered uninitialized
// and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to
// observe uninitialized objects.
- //
+ //
// When an object is created, the system will populate this list with the current set of initializers.
// Only privileged users may set or modify this list. Once it is empty, it may not be modified further
// by any user.
+ //
+ // DEPRECATED - initializers are an alpha field and will be removed in v1.15.
optional Initializers initializers = 16;
// Must be empty before the object is deleted from the registry. Each entry
@@ -587,11 +698,24 @@ message ObjectMeta {
// This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.
// +optional
optional string clusterName = 15;
+
+ // ManagedFields maps workflow-id and version to the set of fields
+ // that are managed by that workflow. This is mostly for internal
+ // housekeeping, and users typically shouldn't need to set or
+ // understand this field. A workflow can be the user's name, a
+ // controller's name, or the name of a specific apply path like
+ // "ci-cd". The set of fields is always in the version that the
+ // workflow used when modifying the object.
+ //
+ // This field is alpha and can be changed or removed without notice.
+ //
+ // +optional
+ repeated ManagedFieldsEntry managedFields = 17;
}
// OwnerReference contains enough information to let you identify an owning
-// object. Currently, an owning object must be in the same namespace, so there
-// is no namespace field.
+// object. An owning object must be in the same namespace as the dependent, or
+// be cluster-scoped, so there is no namespace field.
message OwnerReference {
// API version of the referent.
optional string apiVersion = 5;
@@ -622,15 +746,69 @@ message OwnerReference {
optional bool blockOwnerDeletion = 7;
}
+// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients
+// to get access to a particular ObjectMeta schema without knowing the details of the version.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+message PartialObjectMetadata {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+ // +optional
+ optional ObjectMeta metadata = 1;
+}
+
+// PartialObjectMetadataList contains a list of objects containing only their metadata
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+message PartialObjectMetadataList {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+ // +optional
+ optional ListMeta metadata = 1;
+
+ // items contains each of the included items.
+ repeated PartialObjectMetadata items = 2;
+}
+
// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.
message Patch {
}
+// PatchOptions may be provided when patching an API object.
+// PatchOptions is meant to be a superset of UpdateOptions.
+message PatchOptions {
+ // When present, indicates that modifications should not be
+ // persisted. An invalid or unrecognized dryRun directive will
+ // result in an error response and no further processing of the
+ // request. Valid values are:
+ // - All: all dry run stages will be processed
+ // +optional
+ repeated string dryRun = 1;
+
+ // Force is going to "force" Apply requests. It means user will
+ // re-acquire conflicting fields owned by other people. Force
+ // flag must be unset for non-apply patch requests.
+ // +optional
+ optional bool force = 2;
+
+ // fieldManager is a name associated with the actor or entity
+ // that is making these changes. The value must be less than or
+ // 128 characters long, and only contain printable characters,
+ // as defined by https://golang.org/pkg/unicode/#IsPrint. This
+ // field is required for apply requests
+ // (application/apply-patch) but optional for non-apply patch
+ // types (JsonPatch, MergePatch, StrategicMergePatch).
+ // +optional
+ optional string fieldManager = 3;
+}
+
// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
message Preconditions {
// Specifies the target UID.
// +optional
optional string uid = 1;
+
+ // Specifies the target ResourceVersion
+ // +optional
+ optional string resourceVersion = 2;
}
// RootPaths lists the paths available at root.
@@ -704,7 +882,7 @@ message StatusCause {
// Arrays are zero-indexed. Fields may appear more than once in an array of
// causes due to fields having multiple errors.
// Optional.
- //
+ //
// Examples:
// "name" - the field "name" on the current resource
// "items[0].name" - the field "name" on the first array entry in "items"
@@ -752,10 +930,20 @@ message StatusDetails {
optional int32 retryAfterSeconds = 5;
}
+// TableOptions are used when a Table is requested by the caller.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+message TableOptions {
+ // includeObject decides whether to include each object along with its columnar information.
+ // Specifying "None" will return no object, specifying "Object" will return the full object contents, and
+ // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind
+ // in version v1beta1 of the meta.k8s.io API group.
+ optional string includeObject = 1;
+}
+
// Time is a wrapper around time.Time which supports correct
// marshaling to YAML and JSON. Wrappers are provided for many
// of the factory methods that the time package offers.
-//
+//
// +protobuf.options.marshal=false
// +protobuf.as=Timestamp
// +protobuf.options.(gogoproto.goproto_stringer)=false
@@ -791,7 +979,7 @@ message Timestamp {
// TypeMeta describes an individual object in an API response or request
// with strings representing the type of the object and its API schema version.
// Structures that are versioned or persisted should inline TypeMeta.
-//
+//
// +k8s:deepcopy-gen=false
message TypeMeta {
// Kind is a string value representing the REST resource this object represents.
@@ -810,8 +998,27 @@ message TypeMeta {
optional string apiVersion = 2;
}
+// UpdateOptions may be provided when updating an API object.
+// All fields in UpdateOptions should also be present in PatchOptions.
+message UpdateOptions {
+ // When present, indicates that modifications should not be
+ // persisted. An invalid or unrecognized dryRun directive will
+ // result in an error response and no further processing of the
+ // request. Valid values are:
+ // - All: all dry run stages will be processed
+ // +optional
+ repeated string dryRun = 1;
+
+ // fieldManager is a name associated with the actor or entity
+ // that is making these changes. The value must be less than or
+ // 128 characters long, and only contain printable characters,
+ // as defined by https://golang.org/pkg/unicode/#IsPrint.
+ // +optional
+ optional string fieldManager = 2;
+}
+
// Verbs masks the value so protobuf can generate
-//
+//
// +protobuf.nullable=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
message Verbs {
@@ -821,7 +1028,7 @@ message Verbs {
}
// Event represents a single event to a watched resource.
-//
+//
// +protobuf=true
// +k8s:deepcopy-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
index d845d7b0f..b4dc78b3e 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
@@ -17,6 +17,7 @@ limitations under the License.
package v1
import (
+ "encoding/json"
"fmt"
"k8s.io/apimachinery/pkg/fields"
@@ -227,8 +228,40 @@ func NewUIDPreconditions(uid string) *Preconditions {
return &Preconditions{UID: &u}
}
+// NewRVDeletionPrecondition returns a DeleteOptions with a ResourceVersion precondition set.
+func NewRVDeletionPrecondition(rv string) *DeleteOptions {
+ p := Preconditions{ResourceVersion: &rv}
+ return &DeleteOptions{Preconditions: &p}
+}
+
// HasObjectMetaSystemFieldValues returns true if fields that are managed by the system on ObjectMeta have values.
func HasObjectMetaSystemFieldValues(meta Object) bool {
return !meta.GetCreationTimestamp().Time.IsZero() ||
len(meta.GetUID()) != 0
}
+
+// ResetObjectMetaForStatus forces the meta fields for a status update to match the meta fields
+// for a pre-existing object. This is opt-in for new objects with Status subresource.
+func ResetObjectMetaForStatus(meta, existingMeta Object) {
+ meta.SetDeletionTimestamp(existingMeta.GetDeletionTimestamp())
+ meta.SetGeneration(existingMeta.GetGeneration())
+ meta.SetSelfLink(existingMeta.GetSelfLink())
+ meta.SetLabels(existingMeta.GetLabels())
+ meta.SetAnnotations(existingMeta.GetAnnotations())
+ meta.SetFinalizers(existingMeta.GetFinalizers())
+ meta.SetOwnerReferences(existingMeta.GetOwnerReferences())
+ meta.SetManagedFields(existingMeta.GetManagedFields())
+}
+
+// MarshalJSON implements json.Marshaler
+func (f Fields) MarshalJSON() ([]byte, error) {
+ return json.Marshal(&f.Map)
+}
+
+// UnmarshalJSON implements json.Unmarshaler
+func (f *Fields) UnmarshalJSON(b []byte) error {
+ return json.Unmarshal(b, &f.Map)
+}
+
+var _ json.Marshaler = Fields{}
+var _ json.Unmarshaler = &Fields{}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go
index c13fe4af8..37141bd5d 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go
@@ -63,6 +63,8 @@ type Object interface {
SetOwnerReferences([]OwnerReference)
GetClusterName() string
SetClusterName(clusterName string)
+ GetManagedFields() []ManagedFieldsEntry
+ SetManagedFields(managedFields []ManagedFieldsEntry)
}
// ListMetaAccessor retrieves the list interface from an object
@@ -92,6 +94,8 @@ type ListInterface interface {
SetSelfLink(selfLink string)
GetContinue() string
SetContinue(c string)
+ GetRemainingItemCount() *int64
+ SetRemainingItemCount(c *int64)
}
// Type exposes the type and APIVersion of versioned or internal API objects.
@@ -103,12 +107,16 @@ type Type interface {
SetKind(kind string)
}
+var _ ListInterface = &ListMeta{}
+
func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceVersion }
func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version }
func (meta *ListMeta) GetSelfLink() string { return meta.SelfLink }
func (meta *ListMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink }
func (meta *ListMeta) GetContinue() string { return meta.Continue }
func (meta *ListMeta) SetContinue(c string) { meta.Continue = c }
+func (meta *ListMeta) GetRemainingItemCount() *int64 { return meta.RemainingItemCount }
+func (meta *ListMeta) SetRemainingItemCount(c *int64) { meta.RemainingItemCount = c }
func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj }
@@ -162,55 +170,13 @@ func (meta *ObjectMeta) GetInitializers() *Initializers { return m
func (meta *ObjectMeta) SetInitializers(initializers *Initializers) { meta.Initializers = initializers }
func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers }
func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers }
-
-func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference {
- if meta.OwnerReferences == nil {
- return nil
- }
- ret := make([]OwnerReference, len(meta.OwnerReferences))
- for i := 0; i < len(meta.OwnerReferences); i++ {
- ret[i].Kind = meta.OwnerReferences[i].Kind
- ret[i].Name = meta.OwnerReferences[i].Name
- ret[i].UID = meta.OwnerReferences[i].UID
- ret[i].APIVersion = meta.OwnerReferences[i].APIVersion
- if meta.OwnerReferences[i].Controller != nil {
- value := *meta.OwnerReferences[i].Controller
- ret[i].Controller = &value
- }
- if meta.OwnerReferences[i].BlockOwnerDeletion != nil {
- value := *meta.OwnerReferences[i].BlockOwnerDeletion
- ret[i].BlockOwnerDeletion = &value
- }
- }
- return ret
-}
-
+func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference { return meta.OwnerReferences }
func (meta *ObjectMeta) SetOwnerReferences(references []OwnerReference) {
- if references == nil {
- meta.OwnerReferences = nil
- return
- }
- newReferences := make([]OwnerReference, len(references))
- for i := 0; i < len(references); i++ {
- newReferences[i].Kind = references[i].Kind
- newReferences[i].Name = references[i].Name
- newReferences[i].UID = references[i].UID
- newReferences[i].APIVersion = references[i].APIVersion
- if references[i].Controller != nil {
- value := *references[i].Controller
- newReferences[i].Controller = &value
- }
- if references[i].BlockOwnerDeletion != nil {
- value := *references[i].BlockOwnerDeletion
- newReferences[i].BlockOwnerDeletion = &value
- }
- }
- meta.OwnerReferences = newReferences
-}
-
-func (meta *ObjectMeta) GetClusterName() string {
- return meta.ClusterName
+ meta.OwnerReferences = references
}
-func (meta *ObjectMeta) SetClusterName(clusterName string) {
- meta.ClusterName = clusterName
+func (meta *ObjectMeta) GetClusterName() string { return meta.ClusterName }
+func (meta *ObjectMeta) SetClusterName(clusterName string) { meta.ClusterName = clusterName }
+func (meta *ObjectMeta) GetManagedFields() []ManagedFieldsEntry { return meta.ManagedFields }
+func (meta *ObjectMeta) SetManagedFields(managedFields []ManagedFieldsEntry) {
+ meta.ManagedFields = managedFields
}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go
index 7e5bc2d4e..cdd9a6a7a 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go
@@ -41,11 +41,6 @@ func (t *MicroTime) DeepCopyInto(out *MicroTime) {
*out = *t
}
-// String returns the representation of the time.
-func (t MicroTime) String() string {
- return t.Time.String()
-}
-
// NewMicroTime returns a wrapped instance of the provided time
func NewMicroTime(time time.Time) MicroTime {
return MicroTime{time}
@@ -72,22 +67,40 @@ func (t *MicroTime) IsZero() bool {
// Before reports whether the time instant t is before u.
func (t *MicroTime) Before(u *MicroTime) bool {
- return t.Time.Before(u.Time)
+ if t != nil && u != nil {
+ return t.Time.Before(u.Time)
+ }
+ return false
}
// Equal reports whether the time instant t is equal to u.
func (t *MicroTime) Equal(u *MicroTime) bool {
- return t.Time.Equal(u.Time)
+ if t == nil && u == nil {
+ return true
+ }
+ if t != nil && u != nil {
+ return t.Time.Equal(u.Time)
+ }
+ return false
}
// BeforeTime reports whether the time instant t is before second-lever precision u.
func (t *MicroTime) BeforeTime(u *Time) bool {
- return t.Time.Before(u.Time)
+ if t != nil && u != nil {
+ return t.Time.Before(u.Time)
+ }
+ return false
}
// EqualTime reports whether the time instant t is equal to second-lever precision u.
func (t *MicroTime) EqualTime(u *Time) bool {
- return t.Time.Equal(u.Time)
+ if t == nil && u == nil {
+ return true
+ }
+ if t != nil && u != nil {
+ return t.Time.Equal(u.Time)
+ }
+ return false
}
// UnixMicro returns the local time corresponding to the given Unix time
@@ -104,7 +117,10 @@ func (t *MicroTime) UnmarshalJSON(b []byte) error {
}
var str string
- json.Unmarshal(b, &str)
+ err := json.Unmarshal(b, &str)
+ if err != nil {
+ return err
+ }
pt, err := time.Parse(RFC3339Micro, str)
if err != nil {
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go
index b300d3701..368efe1ef 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go
@@ -19,6 +19,7 @@ package v1
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
// GroupName is the group name for this API.
@@ -52,14 +53,16 @@ func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion)
&ExportOptions{},
&GetOptions{},
&DeleteOptions{},
+ &CreateOptions{},
+ &UpdateOptions{},
+ &PatchOptions{},
)
- scheme.AddConversionFuncs(
- Convert_versioned_Event_to_watch_Event,
- Convert_versioned_InternalEvent_to_versioned_Event,
- Convert_watch_Event_to_versioned_Event,
- Convert_versioned_Event_to_versioned_InternalEvent,
- )
-
+ utilruntime.Must(scheme.AddConversionFuncs(
+ Convert_v1_WatchEvent_To_watch_Event,
+ Convert_v1_InternalEvent_To_v1_WatchEvent,
+ Convert_watch_Event_To_v1_WatchEvent,
+ Convert_v1_WatchEvent_To_v1_InternalEvent,
+ ))
// Register Unversioned types under their own special group
scheme.AddUnversionedTypes(Unversioned,
&Status{},
@@ -70,8 +73,8 @@ func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion)
)
// register manually. This usually goes through the SchemeBuilder, which we cannot use here.
- AddConversionFuncs(scheme)
- RegisterDefaults(scheme)
+ utilruntime.Must(AddConversionFuncs(scheme))
+ utilruntime.Must(RegisterDefaults(scheme))
}
// scheme is the registry for the common types that adhere to the meta v1 API spec.
@@ -86,8 +89,28 @@ func init() {
&ExportOptions{},
&GetOptions{},
&DeleteOptions{},
+ &CreateOptions{},
+ &UpdateOptions{},
+ &PatchOptions{},
)
+ if err := AddMetaToScheme(scheme); err != nil {
+ panic(err)
+ }
+
// register manually. This usually goes through the SchemeBuilder, which we cannot use here.
- RegisterDefaults(scheme)
+ utilruntime.Must(RegisterDefaults(scheme))
+}
+
+func AddMetaToScheme(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Table{},
+ &TableOptions{},
+ &PartialObjectMetadata{},
+ &PartialObjectMetadataList{},
+ )
+
+ return scheme.AddConversionFuncs(
+ Convert_Slice_string_To_v1_IncludeObjectPolicy,
+ )
}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
index 5041954f7..fe510ed9e 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
@@ -20,7 +20,7 @@ import (
"encoding/json"
"time"
- "github.com/google/gofuzz"
+ fuzz "github.com/google/gofuzz"
)
// Time is a wrapper around time.Time which supports correct
@@ -41,11 +41,6 @@ func (t *Time) DeepCopyInto(out *Time) {
*out = *t
}
-// String returns the representation of the time.
-func (t Time) String() string {
- return t.Time.String()
-}
-
// NewTime returns a wrapped instance of the provided time
func NewTime(time time.Time) Time {
return Time{time}
@@ -72,7 +67,10 @@ func (t *Time) IsZero() bool {
// Before reports whether the time instant t is before u.
func (t *Time) Before(u *Time) bool {
- return t.Time.Before(u.Time)
+ if t != nil && u != nil {
+ return t.Time.Before(u.Time)
+ }
+ return false
}
// Equal reports whether the time instant t is equal to u.
@@ -106,7 +104,10 @@ func (t *Time) UnmarshalJSON(b []byte) error {
}
var str string
- json.Unmarshal(b, &str)
+ err := json.Unmarshal(b, &str)
+ if err != nil {
+ return err
+ }
pt, err := time.Parse(time.RFC3339, str)
if err != nil {
@@ -144,8 +145,12 @@ func (t Time) MarshalJSON() ([]byte, error) {
// Encode unset/nil objects as JSON's "null".
return []byte("null"), nil
}
-
- return json.Marshal(t.UTC().Format(time.RFC3339))
+ buf := make([]byte, 0, len(time.RFC3339)+2)
+ buf = append(buf, '"')
+ // time cannot contain non escapable JSON characters
+ buf = t.UTC().AppendFormat(buf, time.RFC3339)
+ buf = append(buf, '"')
+ return buf, nil
}
// OpenAPISchemaType is used by the kube-openapi generator when constructing
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
index 917efb37f..46ef65f45 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
@@ -76,10 +76,26 @@ type ListMeta struct {
// continue may be set if the user set a limit on the number of items returned, and indicates that
// the server has more data available. The value is opaque and may be used to issue another request
// to the endpoint that served this list to retrieve the next set of available objects. Continuing a
- // list may not be possible if the server configuration has changed or more than a few minutes have
- // passed. The resourceVersion field returned when using this continue value will be identical to
- // the value in the first response.
+ // consistent list may not be possible if the server configuration has changed or more than a few
+ // minutes have passed. The resourceVersion field returned when using this continue value will be
+ // identical to the value in the first response, unless you have received this token from an error
+ // message.
Continue string `json:"continue,omitempty" protobuf:"bytes,3,opt,name=continue"`
+
+ // remainingItemCount is the number of subsequent items in the list which are not included in this
+ // list response. If the list request contained label or field selectors, then the number of
+ // remaining items is unknown and the field will be left unset and omitted during serialization.
+ // If the list is complete (either because it is not chunking or because this is the last chunk),
+ // then there are no more remaining items and this field will be left unset and omitted during
+ // serialization.
+ // Servers older than v1.15 do not set this field.
+ // The intended use of the remainingItemCount is *estimating* the size of a collection. Clients
+ // should not rely on the remainingItemCount to be set or to be exact.
+ //
+ // This field is alpha and can be changed or removed without notice.
+ //
+ // +optional
+ RemainingItemCount *int64 `json:"remainingItemCount,omitempty" protobuf:"bytes,4,opt,name=remainingItemCount"`
}
// These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here
@@ -234,6 +250,8 @@ type ObjectMeta struct {
// When an object is created, the system will populate this list with the current set of initializers.
// Only privileged users may set or modify this list. Once it is empty, it may not be modified further
// by any user.
+ //
+ // DEPRECATED - initializers are an alpha field and will be removed in v1.15.
Initializers *Initializers `json:"initializers,omitempty" protobuf:"bytes,16,opt,name=initializers"`
// Must be empty before the object is deleted from the registry. Each entry
@@ -249,6 +267,19 @@ type ObjectMeta struct {
// This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.
// +optional
ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"`
+
+ // ManagedFields maps workflow-id and version to the set of fields
+ // that are managed by that workflow. This is mostly for internal
+ // housekeeping, and users typically shouldn't need to set or
+ // understand this field. A workflow can be the user's name, a
+ // controller's name, or the name of a specific apply path like
+ // "ci-cd". The set of fields is always in the version that the
+ // workflow used when modifying the object.
+ //
+ // This field is alpha and can be changed or removed without notice.
+ //
+ // +optional
+ ManagedFields []ManagedFieldsEntry `json:"managedFields,omitempty" protobuf:"bytes,17,rep,name=managedFields"`
}
// Initializers tracks the progress of initialization.
@@ -285,8 +316,8 @@ const (
)
// OwnerReference contains enough information to let you identify an owning
-// object. Currently, an owning object must be in the same namespace, so there
-// is no namespace field.
+// object. An owning object must be in the same namespace as the dependent, or
+// be cluster-scoped, so there is no namespace field.
type OwnerReference struct {
// API version of the referent.
APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"`
@@ -326,13 +357,27 @@ type ListOptions struct {
// Defaults to everything.
// +optional
FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"`
- // If true, partially initialized resources are included in the response.
- // +optional
- IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,6,opt,name=includeUninitialized"`
+
+ // +k8s:deprecated=includeUninitialized,protobuf=6
+
// Watch for changes to the described resources and return them as a stream of
// add, update, and remove notifications. Specify resourceVersion.
// +optional
Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"`
+ // allowWatchBookmarks requests watch events with type "BOOKMARK".
+ // Servers that do not implement bookmarks may ignore this flag and
+ // bookmarks are sent at the server's discretion. Clients should not
+ // assume bookmarks are returned at any specific interval, nor may they
+ // assume the server will send any BOOKMARK event during a session.
+ // If this is not a watch, this field is ignored.
+ // If the feature gate WatchBookmarks is not enabled in apiserver,
+ // this field is ignored.
+ //
+ // This field is alpha and can be changed or removed without notice.
+ //
+ // +optional
+ AllowWatchBookmarks bool `json:"allowWatchBookmarks,omitempty" protobuf:"varint,9,opt,name=allowWatchBookmarks"`
+
// When specified with a watch call, shows changes that occur after that particular version of a resource.
// Defaults to changes from the beginning of history.
// When specified for list:
@@ -363,25 +408,34 @@ type ListOptions struct {
// updated during a chunked list the version of the object that was present at the time the first list
// result was calculated is returned.
Limit int64 `json:"limit,omitempty" protobuf:"varint,7,opt,name=limit"`
- // The continue option should be set when retrieving more results from the server. Since this value
- // is server defined, clients may only use the continue value from a previous query result with
- // identical query parameters (except for the value of continue) and the server may reject a continue
- // value it does not recognize. If the specified continue value is no longer valid whether due to
- // expiration (generally five to fifteen minutes) or a configuration change on the server the server
- // will respond with a 410 ResourceExpired error indicating the client must restart their list without
- // the continue field. This field is not supported when watch is true. Clients may start a watch from
- // the last resourceVersion value returned by the server and not miss any modifications.
+ // The continue option should be set when retrieving more results from the server. Since this value is
+ // server defined, clients may only use the continue value from a previous query result with identical
+ // query parameters (except for the value of continue) and the server may reject a continue value it
+ // does not recognize. If the specified continue value is no longer valid whether due to expiration
+ // (generally five to fifteen minutes) or a configuration change on the server, the server will
+ // respond with a 410 ResourceExpired error together with a continue token. If the client needs a
+ // consistent list, it must restart their list without the continue field. Otherwise, the client may
+ // send another list request with the token received with the 410 error, the server will respond with
+ // a list starting from the next key, but from the latest snapshot, which is inconsistent from the
+ // previous list results - objects that are created, modified, or deleted after the first list request
+ // will be included in the response, as long as their keys are after the "next key".
+ //
+ // This field is not supported when watch is true. Clients may start a watch from the last
+ // resourceVersion value returned by the server and not miss any modifications.
Continue string `json:"continue,omitempty" protobuf:"bytes,8,opt,name=continue"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ExportOptions is the query options to the standard REST get call.
+// Deprecated. Planned for removal in 1.18.
type ExportOptions struct {
TypeMeta `json:",inline"`
// Should this value be exported. Export strips fields that a user can not specify.
+ // Deprecated. Planned for removal in 1.18.
Export bool `json:"export" protobuf:"varint,1,opt,name=export"`
// Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
+ // Deprecated. Planned for removal in 1.18.
Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"`
}
@@ -395,9 +449,7 @@ type GetOptions struct {
// - if it's 0, then we simply return what we currently have in cache, no guarantee;
// - if set to non zero, then the result is at least as fresh as given rv.
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,1,opt,name=resourceVersion"`
- // If true, partially initialized resources are included in the response.
- // +optional
- IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,2,opt,name=includeUninitialized"`
+ // +k8s:deprecated=includeUninitialized,protobuf=2
}
// DeletionPropagation decides if a deletion will propagate to the dependents of
@@ -418,6 +470,12 @@ const (
DeletePropagationForeground DeletionPropagation = "Foreground"
)
+const (
+ // DryRunAll means to complete all processing stages, but don't
+ // persist changes to storage.
+ DryRunAll = "All"
+)
+
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DeleteOptions may be provided when deleting an API object.
@@ -453,6 +511,92 @@ type DeleteOptions struct {
// foreground.
// +optional
PropagationPolicy *DeletionPropagation `json:"propagationPolicy,omitempty" protobuf:"varint,4,opt,name=propagationPolicy"`
+
+ // When present, indicates that modifications should not be
+ // persisted. An invalid or unrecognized dryRun directive will
+ // result in an error response and no further processing of the
+ // request. Valid values are:
+ // - All: all dry run stages will be processed
+ // +optional
+ DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// CreateOptions may be provided when creating an API object.
+type CreateOptions struct {
+ TypeMeta `json:",inline"`
+
+ // When present, indicates that modifications should not be
+ // persisted. An invalid or unrecognized dryRun directive will
+ // result in an error response and no further processing of the
+ // request. Valid values are:
+ // - All: all dry run stages will be processed
+ // +optional
+ DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"`
+ // +k8s:deprecated=includeUninitialized,protobuf=2
+
+ // fieldManager is a name associated with the actor or entity
+ // that is making these changes. The value must be less than or
+ // 128 characters long, and only contain printable characters,
+ // as defined by https://golang.org/pkg/unicode/#IsPrint.
+ // +optional
+ FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PatchOptions may be provided when patching an API object.
+// PatchOptions is meant to be a superset of UpdateOptions.
+type PatchOptions struct {
+ TypeMeta `json:",inline"`
+
+ // When present, indicates that modifications should not be
+ // persisted. An invalid or unrecognized dryRun directive will
+ // result in an error response and no further processing of the
+ // request. Valid values are:
+ // - All: all dry run stages will be processed
+ // +optional
+ DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"`
+
+ // Force is going to "force" Apply requests. It means user will
+ // re-acquire conflicting fields owned by other people. Force
+ // flag must be unset for non-apply patch requests.
+ // +optional
+ Force *bool `json:"force,omitempty" protobuf:"varint,2,opt,name=force"`
+
+ // fieldManager is a name associated with the actor or entity
+ // that is making these changes. The value must be less than or
+ // 128 characters long, and only contain printable characters,
+ // as defined by https://golang.org/pkg/unicode/#IsPrint. This
+ // field is required for apply requests
+ // (application/apply-patch) but optional for non-apply patch
+ // types (JsonPatch, MergePatch, StrategicMergePatch).
+ // +optional
+ FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// UpdateOptions may be provided when updating an API object.
+// All fields in UpdateOptions should also be present in PatchOptions.
+type UpdateOptions struct {
+ TypeMeta `json:",inline"`
+
+ // When present, indicates that modifications should not be
+ // persisted. An invalid or unrecognized dryRun directive will
+ // result in an error response and no further processing of the
+ // request. Valid values are:
+ // - All: all dry run stages will be processed
+ // +optional
+ DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"`
+
+ // fieldManager is a name associated with the actor or entity
+ // that is making these changes. The value must be less than or
+ // 128 characters long, and only contain printable characters,
+ // as defined by https://golang.org/pkg/unicode/#IsPrint.
+ // +optional
+ FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,2,name=fieldManager"`
}
// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
@@ -460,6 +604,9 @@ type Preconditions struct {
// Specifies the target UID.
// +optional
UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
+ // Specifies the target ResourceVersion
+ // +optional
+ ResourceVersion *string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -658,6 +805,10 @@ const (
// Status code 406
StatusReasonNotAcceptable StatusReason = "NotAcceptable"
+ // StatusReasonRequestEntityTooLarge means that the request entity is too large.
+ // Status code 413
+ StatusReasonRequestEntityTooLarge StatusReason = "RequestEntityTooLarge"
+
// StatusReasonUnsupportedMediaType means that the content type sent by the client is not acceptable
// to the server - for instance, attempting to send protobuf for a resource that supports only json and yaml.
// API calls that return UnsupportedMediaType can never succeed.
@@ -733,6 +884,9 @@ const (
// without the expected return type. The presence of this cause indicates the error may be
// due to an intervening proxy or the server software malfunctioning.
CauseTypeUnexpectedServerResponse CauseType = "UnexpectedServerResponse"
+ // FieldManagerConflict is used to report when another client claims to manage this field,
+ // It should only be returned for a request using server-side apply.
+ CauseTypeFieldManagerConflict CauseType = "FieldManagerConflict"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -799,7 +953,8 @@ type APIGroup struct {
// The server returns only those CIDRs that it thinks that the client can match.
// For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
// Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
- ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"`
+ // +optional
+ ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs,omitempty" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"`
}
// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.
@@ -846,6 +1001,15 @@ type APIResource struct {
ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,5,rep,name=shortNames"`
// categories is a list of the grouped resources this resource belongs to (e.g. 'all')
Categories []string `json:"categories,omitempty" protobuf:"bytes,7,rep,name=categories"`
+ // The hash value of the storage version, the version this resource is
+ // converted to when written to the data store. Value must be treated
+ // as opaque by clients. Only equality comparison on the value is valid.
+ // This is an alpha feature and may change or be removed in the future.
+ // The field is populated by the apiserver only if the
+ // StorageVersionHash feature gate is enabled.
+ // This field will remain optional even if it graduates.
+ // +optional
+ StorageVersionHash string `json:"storageVersionHash,omitempty" protobuf:"bytes,10,opt,name=storageVersionHash"`
}
// Verbs masks the value so protobuf can generate
@@ -947,3 +1111,210 @@ const (
LabelSelectorOpExists LabelSelectorOperator = "Exists"
LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist"
)
+
+// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource
+// that the fieldset applies to.
+type ManagedFieldsEntry struct {
+ // Manager is an identifier of the workflow managing these fields.
+ Manager string `json:"manager,omitempty" protobuf:"bytes,1,opt,name=manager"`
+ // Operation is the type of operation which lead to this ManagedFieldsEntry being created.
+ // The only valid values for this field are 'Apply' and 'Update'.
+ Operation ManagedFieldsOperationType `json:"operation,omitempty" protobuf:"bytes,2,opt,name=operation,casttype=ManagedFieldsOperationType"`
+ // APIVersion defines the version of this resource that this field set
+ // applies to. The format is "group/version" just like the top-level
+ // APIVersion field. It is necessary to track the version of a field
+ // set because it cannot be automatically converted.
+ APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"`
+ // Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'
+ // +optional
+ Time *Time `json:"time,omitempty" protobuf:"bytes,4,opt,name=time"`
+ // Fields identifies a set of fields.
+ // +optional
+ Fields *Fields `json:"fields,omitempty" protobuf:"bytes,5,opt,name=fields,casttype=Fields"`
+}
+
+// ManagedFieldsOperationType is the type of operation which lead to a ManagedFieldsEntry being created.
+type ManagedFieldsOperationType string
+
+const (
+ ManagedFieldsOperationApply ManagedFieldsOperationType = "Apply"
+ ManagedFieldsOperationUpdate ManagedFieldsOperationType = "Update"
+)
+
+// Fields stores a set of fields in a data structure like a Trie.
+// To understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff
+type Fields struct {
+ // Map stores a set of fields in a data structure like a Trie.
+ //
+ // Each key is either a '.' representing the field itself, and will always map to an empty set,
+ // or a string representing a sub-field or item. The string will follow one of these four formats:
+ // 'f:<name>', where <name> is the name of a field in a struct, or key in a map
+ // 'v:<value>', where <value> is the exact json formatted value of a list item
+ // 'i:<index>', where <index> is position of a item in a list
+ // 'k:<keys>', where <keys> is a map of a list item's key fields to their unique values
+ // If a key maps to an empty Fields value, the field that key represents is part of the set.
+ //
+ // The exact format is defined in k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal
+ Map map[string]Fields `json:",inline" protobuf:"bytes,1,rep,name=map"`
+}
+
+// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf
+// generation to support a meta type that can accept any valid JSON. This can be introduced
+// in a v1 because clients a) receive an error if they try to access proto today, and b)
+// once introduced they would be able to gracefully switch over to using it.
+
+// Table is a tabular representation of a set of API resources. The server transforms the
+// object into a set of preferred columns for quickly reviewing the objects.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +protobuf=false
+type Table struct {
+ TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+ // +optional
+ ListMeta `json:"metadata,omitempty"`
+
+ // columnDefinitions describes each column in the returned items array. The number of cells per row
+ // will always match the number of column definitions.
+ ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"`
+ // rows is the list of items in the table.
+ Rows []TableRow `json:"rows"`
+}
+
+// TableColumnDefinition contains information about a column returned in the Table.
+// +protobuf=false
+type TableColumnDefinition struct {
+ // name is a human readable name for the column.
+ Name string `json:"name"`
+ // type is an OpenAPI type definition for this column, such as number, integer, string, or
+ // array.
+ // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.
+ Type string `json:"type"`
+ // format is an optional OpenAPI type modifier for this column. A format modifies the type and
+ // imposes additional rules, like date or time formatting for a string. The 'name' format is applied
+ // to the primary identifier column which has type 'string' to assist in clients identifying column
+ // is the resource name.
+ // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.
+ Format string `json:"format"`
+ // description is a human readable description of this column.
+ Description string `json:"description"`
+ // priority is an integer defining the relative importance of this column compared to others. Lower
+ // numbers are considered higher priority. Columns that may be omitted in limited space scenarios
+ // should be given a higher priority.
+ Priority int32 `json:"priority"`
+}
+
+// TableRow is an individual row in a table.
+// +protobuf=false
+type TableRow struct {
+ // cells will be as wide as the column definitions array and may contain strings, numbers (float64 or
+ // int64), booleans, simple maps, lists, or null. See the type field of the column definition for a
+ // more detailed description.
+ Cells []interface{} `json:"cells"`
+ // conditions describe additional status of a row that are relevant for a human user. These conditions
+ // apply to the row, not to the object, and will be specific to table output. The only defined
+ // condition type is 'Completed', for a row that indicates a resource that has run to completion and
+ // can be given less visual priority.
+ // +optional
+ Conditions []TableRowCondition `json:"conditions,omitempty"`
+ // This field contains the requested additional information about each object based on the includeObject
+ // policy when requesting the Table. If "None", this field is empty, if "Object" this will be the
+ // default serialization of the object for the current API version, and if "Metadata" (the default) will
+ // contain the object metadata. Check the returned kind and apiVersion of the object before parsing.
+ // The media type of the object will always match the enclosing list - if this as a JSON table, these
+ // will be JSON encoded objects.
+ // +optional
+ Object runtime.RawExtension `json:"object,omitempty"`
+}
+
+// TableRowCondition allows a row to be marked with additional information.
+// +protobuf=false
+type TableRowCondition struct {
+ // Type of row condition. The only defined value is 'Completed' indicating that the
+ // object this row represents has reached a completed state and may be given less visual
+ // priority than other rows. Clients are not required to honor any conditions but should
+ // be consistent where possible about handling the conditions.
+ Type RowConditionType `json:"type"`
+ // Status of the condition, one of True, False, Unknown.
+ Status ConditionStatus `json:"status"`
+ // (brief) machine readable reason for the condition's last transition.
+ // +optional
+ Reason string `json:"reason,omitempty"`
+ // Human readable message indicating details about last transition.
+ // +optional
+ Message string `json:"message,omitempty"`
+}
+
+type RowConditionType string
+
+// These are valid conditions of a row. This list is not exhaustive and new conditions may be
+// included by other resources.
+const (
+ // RowCompleted means the underlying resource has reached completion and may be given less
+ // visual priority than other resources.
+ RowCompleted RowConditionType = "Completed"
+)
+
+type ConditionStatus string
+
+// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
+// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
+// can't decide if a resource is in the condition or not. In the future, we could add other
+// intermediate conditions, e.g. ConditionDegraded.
+const (
+ ConditionTrue ConditionStatus = "True"
+ ConditionFalse ConditionStatus = "False"
+ ConditionUnknown ConditionStatus = "Unknown"
+)
+
+// IncludeObjectPolicy controls which portion of the object is returned with a Table.
+type IncludeObjectPolicy string
+
+const (
+ // IncludeNone returns no object.
+ IncludeNone IncludeObjectPolicy = "None"
+ // IncludeMetadata serializes the object containing only its metadata field.
+ IncludeMetadata IncludeObjectPolicy = "Metadata"
+ // IncludeObject contains the full object.
+ IncludeObject IncludeObjectPolicy = "Object"
+)
+
+// TableOptions are used when a Table is requested by the caller.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type TableOptions struct {
+ TypeMeta `json:",inline"`
+
+ // NoHeaders is only exposed for internal callers. It is not included in our OpenAPI definitions
+ // and may be removed as a field in a future release.
+ NoHeaders bool `json:"-"`
+
+ // includeObject decides whether to include each object along with its columnar information.
+ // Specifying "None" will return no object, specifying "Object" will return the full object contents, and
+ // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind
+ // in version v1beta1 of the meta.k8s.io API group.
+ IncludeObject IncludeObjectPolicy `json:"includeObject,omitempty" protobuf:"bytes,1,opt,name=includeObject,casttype=IncludeObjectPolicy"`
+}
+
+// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients
+// to get access to a particular ObjectMeta schema without knowing the details of the version.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type PartialObjectMetadata struct {
+ TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+ // +optional
+ ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+}
+
+// PartialObjectMetadataList contains a list of objects containing only their metadata
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type PartialObjectMetadataList struct {
+ TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+ // +optional
+ ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // items contains each of the included items.
+ Items []PartialObjectMetadata `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
index caf929ee0..f35c22bf1 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
@@ -1,5 +1,5 @@
/*
-Copyright 2016 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -26,7 +26,7 @@ package v1
//
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
-// AUTO-GENERATED FUNCTIONS START HERE
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_APIGroup = map[string]string{
"": "APIGroup contains the name, the supported versions, and the preferred version of a group.",
"name": "name is the name of the group.",
@@ -49,16 +49,17 @@ func (APIGroupList) SwaggerDoc() map[string]string {
}
var map_APIResource = map[string]string{
- "": "APIResource specifies the name of a resource and whether it is namespaced.",
- "name": "name is the plural name of the resource.",
- "singularName": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.",
- "namespaced": "namespaced indicates if a resource is namespaced or not.",
- "group": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".",
- "version": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".",
- "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')",
- "verbs": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)",
- "shortNames": "shortNames is a list of suggested short names of the resource.",
- "categories": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')",
+ "": "APIResource specifies the name of a resource and whether it is namespaced.",
+ "name": "name is the plural name of the resource.",
+ "singularName": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.",
+ "namespaced": "namespaced indicates if a resource is namespaced or not.",
+ "group": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".",
+ "version": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".",
+ "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')",
+ "verbs": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)",
+ "shortNames": "shortNames is a list of suggested short names of the resource.",
+ "categories": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')",
+ "storageVersionHash": "The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.",
}
func (APIResource) SwaggerDoc() map[string]string {
@@ -85,12 +86,23 @@ func (APIVersions) SwaggerDoc() map[string]string {
return map_APIVersions
}
+var map_CreateOptions = map[string]string{
+ "": "CreateOptions may be provided when creating an API object.",
+ "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+}
+
+func (CreateOptions) SwaggerDoc() map[string]string {
+ return map_CreateOptions
+}
+
var map_DeleteOptions = map[string]string{
"": "DeleteOptions may be provided when deleting an API object.",
"gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
"preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.",
"orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
"propagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
}
func (DeleteOptions) SwaggerDoc() map[string]string {
@@ -98,19 +110,26 @@ func (DeleteOptions) SwaggerDoc() map[string]string {
}
var map_ExportOptions = map[string]string{
- "": "ExportOptions is the query options to the standard REST get call.",
- "export": "Should this value be exported. Export strips fields that a user can not specify.",
- "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.",
+ "": "ExportOptions is the query options to the standard REST get call. Deprecated. Planned for removal in 1.18.",
+ "export": "Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.",
+ "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.",
}
func (ExportOptions) SwaggerDoc() map[string]string {
return map_ExportOptions
}
+var map_Fields = map[string]string{
+ "": "Fields stores a set of fields in a data structure like a Trie. To understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff",
+}
+
+func (Fields) SwaggerDoc() map[string]string {
+ return map_Fields
+}
+
var map_GetOptions = map[string]string{
- "": "GetOptions is the standard query options to the standard REST get call.",
- "resourceVersion": "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
- "includeUninitialized": "If true, partially initialized resources are included in the response.",
+ "": "GetOptions is the standard query options to the standard REST get call.",
+ "resourceVersion": "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
}
func (GetOptions) SwaggerDoc() map[string]string {
@@ -178,10 +197,11 @@ func (List) SwaggerDoc() map[string]string {
}
var map_ListMeta = map[string]string{
- "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.",
- "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.",
- "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency",
- "continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response.",
+ "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.",
+ "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.",
+ "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency",
+ "continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.",
+ "remainingItemCount": "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.\n\nThis field is alpha and can be changed or removed without notice.",
}
func (ListMeta) SwaggerDoc() map[string]string {
@@ -189,21 +209,34 @@ func (ListMeta) SwaggerDoc() map[string]string {
}
var map_ListOptions = map[string]string{
- "": "ListOptions is the query options to a standard REST list call.",
- "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
- "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
- "includeUninitialized": "If true, partially initialized resources are included in the response.",
- "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
- "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
- "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
- "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
- "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "": "ListOptions is the query options to a standard REST list call.",
+ "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "allowWatchBookmarks": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.\n\nThis field is alpha and can be changed or removed without notice.",
+ "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
}
func (ListOptions) SwaggerDoc() map[string]string {
return map_ListOptions
}
+var map_ManagedFieldsEntry = map[string]string{
+ "": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.",
+ "manager": "Manager is an identifier of the workflow managing these fields.",
+ "operation": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.",
+ "apiVersion": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.",
+ "time": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'",
+ "fields": "Fields identifies a set of fields.",
+}
+
+func (ManagedFieldsEntry) SwaggerDoc() map[string]string {
+ return map_ManagedFieldsEntry
+}
+
var map_ObjectMeta = map[string]string{
"": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.",
"name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
@@ -219,9 +252,10 @@ var map_ObjectMeta = map[string]string{
"labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels",
"annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations",
"ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.",
- "initializers": "An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.",
+ "initializers": "An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.\n\nDEPRECATED - initializers are an alpha field and will be removed in v1.15.",
"finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.",
"clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.",
+ "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.\n\nThis field is alpha and can be changed or removed without notice.",
}
func (ObjectMeta) SwaggerDoc() map[string]string {
@@ -229,7 +263,7 @@ func (ObjectMeta) SwaggerDoc() map[string]string {
}
var map_OwnerReference = map[string]string{
- "": "OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field.",
+ "": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.",
"apiVersion": "API version of the referent.",
"kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
"name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
@@ -242,6 +276,25 @@ func (OwnerReference) SwaggerDoc() map[string]string {
return map_OwnerReference
}
+var map_PartialObjectMetadata = map[string]string{
+ "": "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.",
+ "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+}
+
+func (PartialObjectMetadata) SwaggerDoc() map[string]string {
+ return map_PartialObjectMetadata
+}
+
+var map_PartialObjectMetadataList = map[string]string{
+ "": "PartialObjectMetadataList contains a list of objects containing only their metadata",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+ "items": "items contains each of the included items.",
+}
+
+func (PartialObjectMetadataList) SwaggerDoc() map[string]string {
+ return map_PartialObjectMetadataList
+}
+
var map_Patch = map[string]string{
"": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.",
}
@@ -250,9 +303,21 @@ func (Patch) SwaggerDoc() map[string]string {
return map_Patch
}
+var map_PatchOptions = map[string]string{
+ "": "PatchOptions may be provided when patching an API object. PatchOptions is meant to be a superset of UpdateOptions.",
+ "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "force": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+}
+
+func (PatchOptions) SwaggerDoc() map[string]string {
+ return map_PatchOptions
+}
+
var map_Preconditions = map[string]string{
- "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.",
- "uid": "Specifies the target UID.",
+ "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.",
+ "uid": "Specifies the target UID.",
+ "resourceVersion": "Specifies the target ResourceVersion",
}
func (Preconditions) SwaggerDoc() map[string]string {
@@ -317,6 +382,62 @@ func (StatusDetails) SwaggerDoc() map[string]string {
return map_StatusDetails
}
+var map_Table = map[string]string{
+ "": "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+ "columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.",
+ "rows": "rows is the list of items in the table.",
+}
+
+func (Table) SwaggerDoc() map[string]string {
+ return map_Table
+}
+
+var map_TableColumnDefinition = map[string]string{
+ "": "TableColumnDefinition contains information about a column returned in the Table.",
+ "name": "name is a human readable name for the column.",
+ "type": "type is an OpenAPI type definition for this column, such as number, integer, string, or array. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.",
+ "format": "format is an optional OpenAPI type modifier for this column. A format modifies the type and imposes additional rules, like date or time formatting for a string. The 'name' format is applied to the primary identifier column which has type 'string' to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.",
+ "description": "description is a human readable description of this column.",
+ "priority": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.",
+}
+
+func (TableColumnDefinition) SwaggerDoc() map[string]string {
+ return map_TableColumnDefinition
+}
+
+var map_TableOptions = map[string]string{
+ "": "TableOptions are used when a Table is requested by the caller.",
+ "includeObject": "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.",
+}
+
+func (TableOptions) SwaggerDoc() map[string]string {
+ return map_TableOptions
+}
+
+var map_TableRow = map[string]string{
+ "": "TableRow is an individual row in a table.",
+ "cells": "cells will be as wide as the column definitions array and may contain strings, numbers (float64 or int64), booleans, simple maps, lists, or null. See the type field of the column definition for a more detailed description.",
+ "conditions": "conditions describe additional status of a row that are relevant for a human user. These conditions apply to the row, not to the object, and will be specific to table output. The only defined condition type is 'Completed', for a row that indicates a resource that has run to completion and can be given less visual priority.",
+ "object": "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing. The media type of the object will always match the enclosing list - if this as a JSON table, these will be JSON encoded objects.",
+}
+
+func (TableRow) SwaggerDoc() map[string]string {
+ return map_TableRow
+}
+
+var map_TableRowCondition = map[string]string{
+ "": "TableRowCondition allows a row to be marked with additional information.",
+ "type": "Type of row condition. The only defined value is 'Completed' indicating that the object this row represents has reached a completed state and may be given less visual priority than other rows. Clients are not required to honor any conditions but should be consistent where possible about handling the conditions.",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "reason": "(brief) machine readable reason for the condition's last transition.",
+ "message": "Human readable message indicating details about last transition.",
+}
+
+func (TableRowCondition) SwaggerDoc() map[string]string {
+ return map_TableRowCondition
+}
+
var map_TypeMeta = map[string]string{
"": "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.",
"kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
@@ -327,4 +448,14 @@ func (TypeMeta) SwaggerDoc() map[string]string {
return map_TypeMeta
}
+var map_UpdateOptions = map[string]string{
+ "": "UpdateOptions may be provided when updating an API object. All fields in UpdateOptions should also be present in PatchOptions.",
+ "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+}
+
+func (UpdateOptions) SwaggerDoc() map[string]string {
+ return map_UpdateOptions
+}
+
// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
new file mode 100644
index 000000000..3b07e86db
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
@@ -0,0 +1,470 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unstructured
+
+import (
+ gojson "encoding/json"
+ "fmt"
+ "io"
+ "strings"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/json"
+)
+
+// NestedFieldCopy returns a deep copy of the value of a nested field.
+// Returns false if the value is missing.
+// No error is returned for a nil field.
+func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) {
+ val, found, err := NestedFieldNoCopy(obj, fields...)
+ if !found || err != nil {
+ return nil, found, err
+ }
+ return runtime.DeepCopyJSONValue(val), true, nil
+}
+
+// NestedFieldNoCopy returns a reference to a nested field.
+// Returns false if value is not found and an error if unable
+// to traverse obj.
+func NestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) {
+ var val interface{} = obj
+
+ for i, field := range fields {
+ if val == nil {
+ return nil, false, nil
+ }
+ if m, ok := val.(map[string]interface{}); ok {
+ val, ok = m[field]
+ if !ok {
+ return nil, false, nil
+ }
+ } else {
+ return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields[:i+1]), val, val)
+ }
+ }
+ return val, true, nil
+}
+
+// NestedString returns the string value of a nested field.
+// Returns false if value is not found and an error if not a string.
+func NestedString(obj map[string]interface{}, fields ...string) (string, bool, error) {
+ val, found, err := NestedFieldNoCopy(obj, fields...)
+ if !found || err != nil {
+ return "", found, err
+ }
+ s, ok := val.(string)
+ if !ok {
+ return "", false, fmt.Errorf("%v accessor error: %v is of the type %T, expected string", jsonPath(fields), val, val)
+ }
+ return s, true, nil
+}
+
+// NestedBool returns the bool value of a nested field.
+// Returns false if value is not found and an error if not a bool.
+func NestedBool(obj map[string]interface{}, fields ...string) (bool, bool, error) {
+ val, found, err := NestedFieldNoCopy(obj, fields...)
+ if !found || err != nil {
+ return false, found, err
+ }
+ b, ok := val.(bool)
+ if !ok {
+ return false, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected bool", jsonPath(fields), val, val)
+ }
+ return b, true, nil
+}
+
+// NestedFloat64 returns the float64 value of a nested field.
+// Returns false if value is not found and an error if not a float64.
+func NestedFloat64(obj map[string]interface{}, fields ...string) (float64, bool, error) {
+ val, found, err := NestedFieldNoCopy(obj, fields...)
+ if !found || err != nil {
+ return 0, found, err
+ }
+ f, ok := val.(float64)
+ if !ok {
+ return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected float64", jsonPath(fields), val, val)
+ }
+ return f, true, nil
+}
+
+// NestedInt64 returns the int64 value of a nested field.
+// Returns false if value is not found and an error if not an int64.
+func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool, error) {
+ val, found, err := NestedFieldNoCopy(obj, fields...)
+ if !found || err != nil {
+ return 0, found, err
+ }
+ i, ok := val.(int64)
+ if !ok {
+ return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected int64", jsonPath(fields), val, val)
+ }
+ return i, true, nil
+}
+
+// NestedStringSlice returns a copy of []string value of a nested field.
+// Returns false if value is not found and an error if not a []interface{} or contains non-string items in the slice.
+func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, bool, error) {
+ val, found, err := NestedFieldNoCopy(obj, fields...)
+ if !found || err != nil {
+ return nil, found, err
+ }
+ m, ok := val.([]interface{})
+ if !ok {
+ return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected []interface{}", jsonPath(fields), val, val)
+ }
+ strSlice := make([]string, 0, len(m))
+ for _, v := range m {
+ if str, ok := v.(string); ok {
+ strSlice = append(strSlice, str)
+ } else {
+ return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the slice: %v is of the type %T, expected string", jsonPath(fields), v, v)
+ }
+ }
+ return strSlice, true, nil
+}
+
+// NestedSlice returns a deep copy of []interface{} value of a nested field.
+// Returns false if value is not found and an error if not a []interface{}.
+func NestedSlice(obj map[string]interface{}, fields ...string) ([]interface{}, bool, error) {
+ val, found, err := NestedFieldNoCopy(obj, fields...)
+ if !found || err != nil {
+ return nil, found, err
+ }
+ _, ok := val.([]interface{})
+ if !ok {
+ return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected []interface{}", jsonPath(fields), val, val)
+ }
+ return runtime.DeepCopyJSONValue(val).([]interface{}), true, nil
+}
+
+// NestedStringMap returns a copy of map[string]string value of a nested field.
+// Returns false if value is not found and an error if not a map[string]interface{} or contains non-string values in the map.
+func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool, error) {
+ m, found, err := nestedMapNoCopy(obj, fields...)
+ if !found || err != nil {
+ return nil, found, err
+ }
+ strMap := make(map[string]string, len(m))
+ for k, v := range m {
+ if str, ok := v.(string); ok {
+ strMap[k] = str
+ } else {
+ return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the map: %v is of the type %T, expected string", jsonPath(fields), v, v)
+ }
+ }
+ return strMap, true, nil
+}
+
+// NestedMap returns a deep copy of map[string]interface{} value of a nested field.
+// Returns false if value is not found and an error if not a map[string]interface{}.
+func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) {
+ m, found, err := nestedMapNoCopy(obj, fields...)
+ if !found || err != nil {
+ return nil, found, err
+ }
+ return runtime.DeepCopyJSON(m), true, nil
+}
+
+// nestedMapNoCopy returns a map[string]interface{} value of a nested field.
+// Returns false if value is not found and an error if not a map[string]interface{}.
+func nestedMapNoCopy(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) {
+ val, found, err := NestedFieldNoCopy(obj, fields...)
+ if !found || err != nil {
+ return nil, found, err
+ }
+ m, ok := val.(map[string]interface{})
+ if !ok {
+ return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields), val, val)
+ }
+ return m, true, nil
+}
+
+// SetNestedField sets the value of a nested field to a deep copy of the value provided.
+// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
+func SetNestedField(obj map[string]interface{}, value interface{}, fields ...string) error {
+ return setNestedFieldNoCopy(obj, runtime.DeepCopyJSONValue(value), fields...)
+}
+
+func setNestedFieldNoCopy(obj map[string]interface{}, value interface{}, fields ...string) error {
+ m := obj
+
+ for i, field := range fields[:len(fields)-1] {
+ if val, ok := m[field]; ok {
+ if valMap, ok := val.(map[string]interface{}); ok {
+ m = valMap
+ } else {
+ return fmt.Errorf("value cannot be set because %v is not a map[string]interface{}", jsonPath(fields[:i+1]))
+ }
+ } else {
+ newVal := make(map[string]interface{})
+ m[field] = newVal
+ m = newVal
+ }
+ }
+ m[fields[len(fields)-1]] = value
+ return nil
+}
+
+// SetNestedStringSlice sets the string slice value of a nested field.
+// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
+func SetNestedStringSlice(obj map[string]interface{}, value []string, fields ...string) error {
+ m := make([]interface{}, 0, len(value)) // convert []string into []interface{}
+ for _, v := range value {
+ m = append(m, v)
+ }
+ return setNestedFieldNoCopy(obj, m, fields...)
+}
+
+// SetNestedSlice sets the slice value of a nested field.
+// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
+func SetNestedSlice(obj map[string]interface{}, value []interface{}, fields ...string) error {
+ return SetNestedField(obj, value, fields...)
+}
+
+// SetNestedStringMap sets the map[string]string value of a nested field.
+// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
+func SetNestedStringMap(obj map[string]interface{}, value map[string]string, fields ...string) error {
+ m := make(map[string]interface{}, len(value)) // convert map[string]string into map[string]interface{}
+ for k, v := range value {
+ m[k] = v
+ }
+ return setNestedFieldNoCopy(obj, m, fields...)
+}
+
+// SetNestedMap sets the map[string]interface{} value of a nested field.
+// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
+func SetNestedMap(obj map[string]interface{}, value map[string]interface{}, fields ...string) error {
+ return SetNestedField(obj, value, fields...)
+}
+
+// RemoveNestedField removes the nested field from the obj.
+func RemoveNestedField(obj map[string]interface{}, fields ...string) {
+ m := obj
+ for _, field := range fields[:len(fields)-1] {
+ if x, ok := m[field].(map[string]interface{}); ok {
+ m = x
+ } else {
+ return
+ }
+ }
+ delete(m, fields[len(fields)-1])
+}
+
+func getNestedString(obj map[string]interface{}, fields ...string) string {
+ val, found, err := NestedString(obj, fields...)
+ if !found || err != nil {
+ return ""
+ }
+ return val
+}
+
+func getNestedInt64(obj map[string]interface{}, fields ...string) int64 {
+ val, found, err := NestedInt64(obj, fields...)
+ if !found || err != nil {
+ return 0
+ }
+ return val
+}
+
+func getNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 {
+ val, found, err := NestedInt64(obj, fields...)
+ if !found || err != nil {
+ return nil
+ }
+ return &val
+}
+
+func jsonPath(fields []string) string {
+ return "." + strings.Join(fields, ".")
+}
+
+func extractOwnerReference(v map[string]interface{}) metav1.OwnerReference {
+ // though this field is a *bool, but when decoded from JSON, it's
+ // unmarshalled as bool.
+ var controllerPtr *bool
+ if controller, found, err := NestedBool(v, "controller"); err == nil && found {
+ controllerPtr = &controller
+ }
+ var blockOwnerDeletionPtr *bool
+ if blockOwnerDeletion, found, err := NestedBool(v, "blockOwnerDeletion"); err == nil && found {
+ blockOwnerDeletionPtr = &blockOwnerDeletion
+ }
+ return metav1.OwnerReference{
+ Kind: getNestedString(v, "kind"),
+ Name: getNestedString(v, "name"),
+ APIVersion: getNestedString(v, "apiVersion"),
+ UID: types.UID(getNestedString(v, "uid")),
+ Controller: controllerPtr,
+ BlockOwnerDeletion: blockOwnerDeletionPtr,
+ }
+}
+
+// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured
+// type, which can be used for generic access to objects without a predefined scheme.
+// TODO: move into serializer/json.
+var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{}
+
+type unstructuredJSONScheme struct{}
+
+func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+ var err error
+ if obj != nil {
+ err = s.decodeInto(data, obj)
+ } else {
+ obj, err = s.decode(data)
+ }
+
+ if err != nil {
+ return nil, nil, err
+ }
+
+ gvk := obj.GetObjectKind().GroupVersionKind()
+ if len(gvk.Kind) == 0 {
+ return nil, &gvk, runtime.NewMissingKindErr(string(data))
+ }
+
+ return obj, &gvk, nil
+}
+
+func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error {
+ switch t := obj.(type) {
+ case *Unstructured:
+ return json.NewEncoder(w).Encode(t.Object)
+ case *UnstructuredList:
+ items := make([]interface{}, 0, len(t.Items))
+ for _, i := range t.Items {
+ items = append(items, i.Object)
+ }
+ listObj := make(map[string]interface{}, len(t.Object)+1)
+ for k, v := range t.Object { // Make a shallow copy
+ listObj[k] = v
+ }
+ listObj["items"] = items
+ return json.NewEncoder(w).Encode(listObj)
+ case *runtime.Unknown:
+ // TODO: Unstructured needs to deal with ContentType.
+ _, err := w.Write(t.Raw)
+ return err
+ default:
+ return json.NewEncoder(w).Encode(t)
+ }
+}
+
+func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) {
+ type detector struct {
+ Items gojson.RawMessage
+ }
+ var det detector
+ if err := json.Unmarshal(data, &det); err != nil {
+ return nil, err
+ }
+
+ if det.Items != nil {
+ list := &UnstructuredList{}
+ err := s.decodeToList(data, list)
+ return list, err
+ }
+
+ // No Items field, so it wasn't a list.
+ unstruct := &Unstructured{}
+ err := s.decodeToUnstructured(data, unstruct)
+ return unstruct, err
+}
+
+func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) error {
+ switch x := obj.(type) {
+ case *Unstructured:
+ return s.decodeToUnstructured(data, x)
+ case *UnstructuredList:
+ return s.decodeToList(data, x)
+ case *runtime.VersionedObjects:
+ o, err := s.decode(data)
+ if err == nil {
+ x.Objects = []runtime.Object{o}
+ }
+ return err
+ default:
+ return json.Unmarshal(data, x)
+ }
+}
+
+func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error {
+ m := make(map[string]interface{})
+ if err := json.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ unstruct.Object = m
+
+ return nil
+}
+
+func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error {
+ type decodeList struct {
+ Items []gojson.RawMessage
+ }
+
+ var dList decodeList
+ if err := json.Unmarshal(data, &dList); err != nil {
+ return err
+ }
+
+ if err := json.Unmarshal(data, &list.Object); err != nil {
+ return err
+ }
+
+ // For typed lists, e.g., a PodList, API server doesn't set each item's
+ // APIVersion and Kind. We need to set it.
+ listAPIVersion := list.GetAPIVersion()
+ listKind := list.GetKind()
+ itemKind := strings.TrimSuffix(listKind, "List")
+
+ delete(list.Object, "items")
+ list.Items = make([]Unstructured, 0, len(dList.Items))
+ for _, i := range dList.Items {
+ unstruct := &Unstructured{}
+ if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil {
+ return err
+ }
+ // This is hacky. Set the item's Kind and APIVersion to those inferred
+ // from the List.
+ if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 {
+ unstruct.SetKind(itemKind)
+ unstruct.SetAPIVersion(listAPIVersion)
+ }
+ list.Items = append(list.Items, *unstruct)
+ }
+ return nil
+}
+
+type JSONFallbackEncoder struct {
+ runtime.Encoder
+}
+
+func (c JSONFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error {
+ err := c.Encoder.Encode(obj, w)
+ if runtime.IsNotRegisteredError(err) {
+ switch obj.(type) {
+ case *Unstructured, *UnstructuredList:
+ return UnstructuredJSONScheme.Encode(obj, w)
+ }
+ }
+ return err
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
new file mode 100644
index 000000000..0ba18d45d
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
@@ -0,0 +1,521 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unstructured
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// Unstructured allows objects that do not have Golang structs registered to be manipulated
+// generically. This can be used to deal with the API objects from a plug-in. Unstructured
+// objects still have functioning TypeMeta features-- kind, version, etc.
+//
+// WARNING: This object has accessors for the v1 standard metadata. You *MUST NOT* use this
+// type if you are dealing with objects that are not in the server meta v1 schema.
+//
+// TODO: make the serialization part of this type distinct from the field accessors.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:deepcopy-gen=true
+type Unstructured struct {
+ // Object is a JSON compatible map with string, float, int, bool, []interface{}, or
+ // map[string]interface{}
+ // children.
+ Object map[string]interface{}
+}
+
+var _ metav1.Object = &Unstructured{}
+var _ runtime.Unstructured = &Unstructured{}
+var _ metav1.ListInterface = &Unstructured{}
+
+func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj }
+
+func (obj *Unstructured) IsList() bool {
+ field, ok := obj.Object["items"]
+ if !ok {
+ return false
+ }
+ _, ok = field.([]interface{})
+ return ok
+}
+func (obj *Unstructured) ToList() (*UnstructuredList, error) {
+ if !obj.IsList() {
+ // return an empty list back
+ return &UnstructuredList{Object: obj.Object}, nil
+ }
+
+ ret := &UnstructuredList{}
+ ret.Object = obj.Object
+
+ err := obj.EachListItem(func(item runtime.Object) error {
+ castItem := item.(*Unstructured)
+ ret.Items = append(ret.Items, *castItem)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return ret, nil
+}
+
+func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error {
+ field, ok := obj.Object["items"]
+ if !ok {
+ return errors.New("content is not a list")
+ }
+ items, ok := field.([]interface{})
+ if !ok {
+ return fmt.Errorf("content is not a list: %T", field)
+ }
+ for _, item := range items {
+ child, ok := item.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("items member is not an object: %T", child)
+ }
+ if err := fn(&Unstructured{Object: child}); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (obj *Unstructured) UnstructuredContent() map[string]interface{} {
+ if obj.Object == nil {
+ return make(map[string]interface{})
+ }
+ return obj.Object
+}
+
+func (obj *Unstructured) SetUnstructuredContent(content map[string]interface{}) {
+ obj.Object = content
+}
+
+// MarshalJSON ensures that the unstructured object produces proper
+// JSON when passed to Go's standard JSON library.
+func (u *Unstructured) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ err := UnstructuredJSONScheme.Encode(u, &buf)
+ return buf.Bytes(), err
+}
+
+// UnmarshalJSON ensures that the unstructured object properly decodes
+// JSON when passed to Go's standard JSON library.
+func (u *Unstructured) UnmarshalJSON(b []byte) error {
+ _, _, err := UnstructuredJSONScheme.Decode(b, nil, u)
+ return err
+}
+
+// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data.
+// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info.
+func (in *Unstructured) NewEmptyInstance() runtime.Unstructured {
+ out := new(Unstructured)
+ if in != nil {
+ out.GetObjectKind().SetGroupVersionKind(in.GetObjectKind().GroupVersionKind())
+ }
+ return out
+}
+
+func (in *Unstructured) DeepCopy() *Unstructured {
+ if in == nil {
+ return nil
+ }
+ out := new(Unstructured)
+ *out = *in
+ out.Object = runtime.DeepCopyJSON(in.Object)
+ return out
+}
+
+func (u *Unstructured) setNestedField(value interface{}, fields ...string) {
+ if u.Object == nil {
+ u.Object = make(map[string]interface{})
+ }
+ SetNestedField(u.Object, value, fields...)
+}
+
+func (u *Unstructured) setNestedStringSlice(value []string, fields ...string) {
+ if u.Object == nil {
+ u.Object = make(map[string]interface{})
+ }
+ SetNestedStringSlice(u.Object, value, fields...)
+}
+
+func (u *Unstructured) setNestedSlice(value []interface{}, fields ...string) {
+ if u.Object == nil {
+ u.Object = make(map[string]interface{})
+ }
+ SetNestedSlice(u.Object, value, fields...)
+}
+
+func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) {
+ if u.Object == nil {
+ u.Object = make(map[string]interface{})
+ }
+ SetNestedStringMap(u.Object, value, fields...)
+}
+
+func (u *Unstructured) GetOwnerReferences() []metav1.OwnerReference {
+ field, found, err := NestedFieldNoCopy(u.Object, "metadata", "ownerReferences")
+ if !found || err != nil {
+ return nil
+ }
+ original, ok := field.([]interface{})
+ if !ok {
+ return nil
+ }
+ ret := make([]metav1.OwnerReference, 0, len(original))
+ for _, obj := range original {
+ o, ok := obj.(map[string]interface{})
+ if !ok {
+ // expected map[string]interface{}, got something else
+ return nil
+ }
+ ret = append(ret, extractOwnerReference(o))
+ }
+ return ret
+}
+
+func (u *Unstructured) SetOwnerReferences(references []metav1.OwnerReference) {
+ if references == nil {
+ RemoveNestedField(u.Object, "metadata", "ownerReferences")
+ return
+ }
+
+ newReferences := make([]interface{}, 0, len(references))
+ for _, reference := range references {
+ out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&reference)
+ if err != nil {
+ utilruntime.HandleError(fmt.Errorf("unable to convert Owner Reference: %v", err))
+ continue
+ }
+ newReferences = append(newReferences, out)
+ }
+ u.setNestedField(newReferences, "metadata", "ownerReferences")
+}
+
+func (u *Unstructured) GetAPIVersion() string {
+ return getNestedString(u.Object, "apiVersion")
+}
+
+func (u *Unstructured) SetAPIVersion(version string) {
+ u.setNestedField(version, "apiVersion")
+}
+
+func (u *Unstructured) GetKind() string {
+ return getNestedString(u.Object, "kind")
+}
+
+func (u *Unstructured) SetKind(kind string) {
+ u.setNestedField(kind, "kind")
+}
+
+func (u *Unstructured) GetNamespace() string {
+ return getNestedString(u.Object, "metadata", "namespace")
+}
+
+func (u *Unstructured) SetNamespace(namespace string) {
+ if len(namespace) == 0 {
+ RemoveNestedField(u.Object, "metadata", "namespace")
+ return
+ }
+ u.setNestedField(namespace, "metadata", "namespace")
+}
+
+func (u *Unstructured) GetName() string {
+ return getNestedString(u.Object, "metadata", "name")
+}
+
+func (u *Unstructured) SetName(name string) {
+ if len(name) == 0 {
+ RemoveNestedField(u.Object, "metadata", "name")
+ return
+ }
+ u.setNestedField(name, "metadata", "name")
+}
+
+func (u *Unstructured) GetGenerateName() string {
+ return getNestedString(u.Object, "metadata", "generateName")
+}
+
+func (u *Unstructured) SetGenerateName(generateName string) {
+ if len(generateName) == 0 {
+ RemoveNestedField(u.Object, "metadata", "generateName")
+ return
+ }
+ u.setNestedField(generateName, "metadata", "generateName")
+}
+
+func (u *Unstructured) GetUID() types.UID {
+ return types.UID(getNestedString(u.Object, "metadata", "uid"))
+}
+
+func (u *Unstructured) SetUID(uid types.UID) {
+ if len(string(uid)) == 0 {
+ RemoveNestedField(u.Object, "metadata", "uid")
+ return
+ }
+ u.setNestedField(string(uid), "metadata", "uid")
+}
+
+func (u *Unstructured) GetResourceVersion() string {
+ return getNestedString(u.Object, "metadata", "resourceVersion")
+}
+
+func (u *Unstructured) SetResourceVersion(resourceVersion string) {
+ if len(resourceVersion) == 0 {
+ RemoveNestedField(u.Object, "metadata", "resourceVersion")
+ return
+ }
+ u.setNestedField(resourceVersion, "metadata", "resourceVersion")
+}
+
+func (u *Unstructured) GetGeneration() int64 {
+ val, found, err := NestedInt64(u.Object, "metadata", "generation")
+ if !found || err != nil {
+ return 0
+ }
+ return val
+}
+
+func (u *Unstructured) SetGeneration(generation int64) {
+ if generation == 0 {
+ RemoveNestedField(u.Object, "metadata", "generation")
+ return
+ }
+ u.setNestedField(generation, "metadata", "generation")
+}
+
+func (u *Unstructured) GetSelfLink() string {
+ return getNestedString(u.Object, "metadata", "selfLink")
+}
+
+func (u *Unstructured) SetSelfLink(selfLink string) {
+ if len(selfLink) == 0 {
+ RemoveNestedField(u.Object, "metadata", "selfLink")
+ return
+ }
+ u.setNestedField(selfLink, "metadata", "selfLink")
+}
+
+func (u *Unstructured) GetContinue() string {
+ return getNestedString(u.Object, "metadata", "continue")
+}
+
+func (u *Unstructured) SetContinue(c string) {
+ if len(c) == 0 {
+ RemoveNestedField(u.Object, "metadata", "continue")
+ return
+ }
+ u.setNestedField(c, "metadata", "continue")
+}
+
+func (u *Unstructured) GetRemainingItemCount() *int64 {
+ return getNestedInt64Pointer(u.Object, "metadata", "remainingItemCount")
+}
+
+func (u *Unstructured) SetRemainingItemCount(c *int64) {
+ if c == nil {
+ RemoveNestedField(u.Object, "metadata", "remainingItemCount")
+ } else {
+ u.setNestedField(*c, "metadata", "remainingItemCount")
+ }
+}
+
+func (u *Unstructured) GetCreationTimestamp() metav1.Time {
+ var timestamp metav1.Time
+ timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp"))
+ return timestamp
+}
+
+func (u *Unstructured) SetCreationTimestamp(timestamp metav1.Time) {
+ ts, _ := timestamp.MarshalQueryParameter()
+ if len(ts) == 0 || timestamp.Time.IsZero() {
+ RemoveNestedField(u.Object, "metadata", "creationTimestamp")
+ return
+ }
+ u.setNestedField(ts, "metadata", "creationTimestamp")
+}
+
+func (u *Unstructured) GetDeletionTimestamp() *metav1.Time {
+ var timestamp metav1.Time
+ timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "deletionTimestamp"))
+ if timestamp.IsZero() {
+ return nil
+ }
+ return &timestamp
+}
+
+func (u *Unstructured) SetDeletionTimestamp(timestamp *metav1.Time) {
+ if timestamp == nil {
+ RemoveNestedField(u.Object, "metadata", "deletionTimestamp")
+ return
+ }
+ ts, _ := timestamp.MarshalQueryParameter()
+ u.setNestedField(ts, "metadata", "deletionTimestamp")
+}
+
+func (u *Unstructured) GetDeletionGracePeriodSeconds() *int64 {
+ val, found, err := NestedInt64(u.Object, "metadata", "deletionGracePeriodSeconds")
+ if !found || err != nil {
+ return nil
+ }
+ return &val
+}
+
+func (u *Unstructured) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) {
+ if deletionGracePeriodSeconds == nil {
+ RemoveNestedField(u.Object, "metadata", "deletionGracePeriodSeconds")
+ return
+ }
+ u.setNestedField(*deletionGracePeriodSeconds, "metadata", "deletionGracePeriodSeconds")
+}
+
+func (u *Unstructured) GetLabels() map[string]string {
+ m, _, _ := NestedStringMap(u.Object, "metadata", "labels")
+ return m
+}
+
+func (u *Unstructured) SetLabels(labels map[string]string) {
+ if labels == nil {
+ RemoveNestedField(u.Object, "metadata", "labels")
+ return
+ }
+ u.setNestedMap(labels, "metadata", "labels")
+}
+
+func (u *Unstructured) GetAnnotations() map[string]string {
+ m, _, _ := NestedStringMap(u.Object, "metadata", "annotations")
+ return m
+}
+
+func (u *Unstructured) SetAnnotations(annotations map[string]string) {
+ if annotations == nil {
+ RemoveNestedField(u.Object, "metadata", "annotations")
+ return
+ }
+ u.setNestedMap(annotations, "metadata", "annotations")
+}
+
+func (u *Unstructured) SetGroupVersionKind(gvk schema.GroupVersionKind) {
+ u.SetAPIVersion(gvk.GroupVersion().String())
+ u.SetKind(gvk.Kind)
+}
+
+func (u *Unstructured) GroupVersionKind() schema.GroupVersionKind {
+ gv, err := schema.ParseGroupVersion(u.GetAPIVersion())
+ if err != nil {
+ return schema.GroupVersionKind{}
+ }
+ gvk := gv.WithKind(u.GetKind())
+ return gvk
+}
+
+func (u *Unstructured) GetInitializers() *metav1.Initializers {
+ m, found, err := nestedMapNoCopy(u.Object, "metadata", "initializers")
+ if !found || err != nil {
+ return nil
+ }
+ out := &metav1.Initializers{}
+ if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, out); err != nil {
+ utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err))
+ return nil
+ }
+ return out
+}
+
+func (u *Unstructured) SetInitializers(initializers *metav1.Initializers) {
+ if initializers == nil {
+ RemoveNestedField(u.Object, "metadata", "initializers")
+ return
+ }
+ out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(initializers)
+ if err != nil {
+ utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err))
+ }
+ u.setNestedField(out, "metadata", "initializers")
+}
+
+func (u *Unstructured) GetFinalizers() []string {
+ val, _, _ := NestedStringSlice(u.Object, "metadata", "finalizers")
+ return val
+}
+
+func (u *Unstructured) SetFinalizers(finalizers []string) {
+ if finalizers == nil {
+ RemoveNestedField(u.Object, "metadata", "finalizers")
+ return
+ }
+ u.setNestedStringSlice(finalizers, "metadata", "finalizers")
+}
+
+func (u *Unstructured) GetClusterName() string {
+ return getNestedString(u.Object, "metadata", "clusterName")
+}
+
+func (u *Unstructured) SetClusterName(clusterName string) {
+ if len(clusterName) == 0 {
+ RemoveNestedField(u.Object, "metadata", "clusterName")
+ return
+ }
+ u.setNestedField(clusterName, "metadata", "clusterName")
+}
+
+func (u *Unstructured) GetManagedFields() []metav1.ManagedFieldsEntry {
+ items, found, err := NestedSlice(u.Object, "metadata", "managedFields")
+ if !found || err != nil {
+ return nil
+ }
+ managedFields := []metav1.ManagedFieldsEntry{}
+ for _, item := range items {
+ m, ok := item.(map[string]interface{})
+ if !ok {
+ utilruntime.HandleError(fmt.Errorf("unable to retrieve managedFields for object, item %v is not a map", item))
+ return nil
+ }
+ out := metav1.ManagedFieldsEntry{}
+ if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, &out); err != nil {
+ utilruntime.HandleError(fmt.Errorf("unable to retrieve managedFields for object: %v", err))
+ return nil
+ }
+ managedFields = append(managedFields, out)
+ }
+ return managedFields
+}
+
+func (u *Unstructured) SetManagedFields(managedFields []metav1.ManagedFieldsEntry) {
+ if managedFields == nil {
+ RemoveNestedField(u.Object, "metadata", "managedFields")
+ return
+ }
+ items := []interface{}{}
+ for _, managedFieldsEntry := range managedFields {
+ out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&managedFieldsEntry)
+ if err != nil {
+ utilruntime.HandleError(fmt.Errorf("unable to set managedFields for object: %v", err))
+ return
+ }
+ items = append(items, out)
+ }
+ u.setNestedSlice(items, "metadata", "managedFields")
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go
new file mode 100644
index 000000000..5028f5fb5
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go
@@ -0,0 +1,210 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unstructured
+
+import (
+ "bytes"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var _ runtime.Unstructured = &UnstructuredList{}
+var _ metav1.ListInterface = &UnstructuredList{}
+
+// UnstructuredList allows lists that do not have Golang structs
+// registered to be manipulated generically. This can be used to deal
+// with the API lists from a plug-in.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:deepcopy-gen=true
+type UnstructuredList struct {
+ Object map[string]interface{}
+
+ // Items is a list of unstructured objects.
+ Items []Unstructured `json:"items"`
+}
+
+func (u *UnstructuredList) GetObjectKind() schema.ObjectKind { return u }
+
+func (u *UnstructuredList) IsList() bool { return true }
+
+func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error {
+ for i := range u.Items {
+ if err := fn(&u.Items[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data.
+// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info.
+func (u *UnstructuredList) NewEmptyInstance() runtime.Unstructured {
+ out := new(UnstructuredList)
+ if u != nil {
+ out.SetGroupVersionKind(u.GroupVersionKind())
+ }
+ return out
+}
+
+// UnstructuredContent returns a map contain an overlay of the Items field onto
+// the Object field. Items always overwrites overlay.
+func (u *UnstructuredList) UnstructuredContent() map[string]interface{} {
+ out := make(map[string]interface{}, len(u.Object)+1)
+
+ // shallow copy every property
+ for k, v := range u.Object {
+ out[k] = v
+ }
+
+ items := make([]interface{}, len(u.Items))
+ for i, item := range u.Items {
+ items[i] = item.UnstructuredContent()
+ }
+ out["items"] = items
+ return out
+}
+
+// SetUnstructuredContent obeys the conventions of List and keeps Items and the items
+// array in sync. If items is not an array of objects in the incoming map, then any
+// mismatched item will be removed.
+func (obj *UnstructuredList) SetUnstructuredContent(content map[string]interface{}) {
+ obj.Object = content
+ if content == nil {
+ obj.Items = nil
+ return
+ }
+ items, ok := obj.Object["items"].([]interface{})
+ if !ok || items == nil {
+ items = []interface{}{}
+ }
+ unstructuredItems := make([]Unstructured, 0, len(items))
+ newItems := make([]interface{}, 0, len(items))
+ for _, item := range items {
+ o, ok := item.(map[string]interface{})
+ if !ok {
+ continue
+ }
+ unstructuredItems = append(unstructuredItems, Unstructured{Object: o})
+ newItems = append(newItems, o)
+ }
+ obj.Items = unstructuredItems
+ obj.Object["items"] = newItems
+}
+
+func (u *UnstructuredList) DeepCopy() *UnstructuredList {
+ if u == nil {
+ return nil
+ }
+ out := new(UnstructuredList)
+ *out = *u
+ out.Object = runtime.DeepCopyJSON(u.Object)
+ out.Items = make([]Unstructured, len(u.Items))
+ for i := range u.Items {
+ u.Items[i].DeepCopyInto(&out.Items[i])
+ }
+ return out
+}
+
+// MarshalJSON ensures that the unstructured list object produces proper
+// JSON when passed to Go's standard JSON library.
+func (u *UnstructuredList) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ err := UnstructuredJSONScheme.Encode(u, &buf)
+ return buf.Bytes(), err
+}
+
+// UnmarshalJSON ensures that the unstructured list object properly
+// decodes JSON when passed to Go's standard JSON library.
+func (u *UnstructuredList) UnmarshalJSON(b []byte) error {
+ _, _, err := UnstructuredJSONScheme.Decode(b, nil, u)
+ return err
+}
+
+func (u *UnstructuredList) GetAPIVersion() string {
+ return getNestedString(u.Object, "apiVersion")
+}
+
+func (u *UnstructuredList) SetAPIVersion(version string) {
+ u.setNestedField(version, "apiVersion")
+}
+
+func (u *UnstructuredList) GetKind() string {
+ return getNestedString(u.Object, "kind")
+}
+
+func (u *UnstructuredList) SetKind(kind string) {
+ u.setNestedField(kind, "kind")
+}
+
+func (u *UnstructuredList) GetResourceVersion() string {
+ return getNestedString(u.Object, "metadata", "resourceVersion")
+}
+
+func (u *UnstructuredList) SetResourceVersion(version string) {
+ u.setNestedField(version, "metadata", "resourceVersion")
+}
+
+func (u *UnstructuredList) GetSelfLink() string {
+ return getNestedString(u.Object, "metadata", "selfLink")
+}
+
+func (u *UnstructuredList) SetSelfLink(selfLink string) {
+ u.setNestedField(selfLink, "metadata", "selfLink")
+}
+
+func (u *UnstructuredList) GetContinue() string {
+ return getNestedString(u.Object, "metadata", "continue")
+}
+
+func (u *UnstructuredList) SetContinue(c string) {
+ u.setNestedField(c, "metadata", "continue")
+}
+
+func (u *UnstructuredList) GetRemainingItemCount() *int64 {
+ return getNestedInt64Pointer(u.Object, "metadata", "remainingItemCount")
+}
+
+func (u *UnstructuredList) SetRemainingItemCount(c *int64) {
+ if c == nil {
+ RemoveNestedField(u.Object, "metadata", "remainingItemCount")
+ } else {
+ u.setNestedField(*c, "metadata", "remainingItemCount")
+ }
+}
+
+func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) {
+ u.SetAPIVersion(gvk.GroupVersion().String())
+ u.SetKind(gvk.Kind)
+}
+
+func (u *UnstructuredList) GroupVersionKind() schema.GroupVersionKind {
+ gv, err := schema.ParseGroupVersion(u.GetAPIVersion())
+ if err != nil {
+ return schema.GroupVersionKind{}
+ }
+ gvk := gv.WithKind(u.GetKind())
+ return gvk
+}
+
+func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) {
+ if u.Object == nil {
+ u.Object = make(map[string]interface{})
+ }
+ SetNestedField(u.Object, value, fields...)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go
new file mode 100644
index 000000000..9a9f25e8f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go
@@ -0,0 +1,55 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package unstructured
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Unstructured) DeepCopyInto(out *Unstructured) {
+ clone := in.DeepCopy()
+ *out = *clone
+ return
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Unstructured) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UnstructuredList) DeepCopyInto(out *UnstructuredList) {
+ clone := in.DeepCopy()
+ *out = *clone
+ return
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *UnstructuredList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go
index b7ec50318..58f077380 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go
@@ -39,7 +39,7 @@ type WatchEvent struct {
Object runtime.RawExtension `json:"object" protobuf:"bytes,2,opt,name=object"`
}
-func Convert_watch_Event_to_versioned_Event(in *watch.Event, out *WatchEvent, s conversion.Scope) error {
+func Convert_watch_Event_To_v1_WatchEvent(in *watch.Event, out *WatchEvent, s conversion.Scope) error {
out.Type = string(in.Type)
switch t := in.Object.(type) {
case *runtime.Unknown:
@@ -52,11 +52,11 @@ func Convert_watch_Event_to_versioned_Event(in *watch.Event, out *WatchEvent, s
return nil
}
-func Convert_versioned_InternalEvent_to_versioned_Event(in *InternalEvent, out *WatchEvent, s conversion.Scope) error {
- return Convert_watch_Event_to_versioned_Event((*watch.Event)(in), out, s)
+func Convert_v1_InternalEvent_To_v1_WatchEvent(in *InternalEvent, out *WatchEvent, s conversion.Scope) error {
+ return Convert_watch_Event_To_v1_WatchEvent((*watch.Event)(in), out, s)
}
-func Convert_versioned_Event_to_watch_Event(in *WatchEvent, out *watch.Event, s conversion.Scope) error {
+func Convert_v1_WatchEvent_To_watch_Event(in *WatchEvent, out *watch.Event, s conversion.Scope) error {
out.Type = watch.EventType(in.Type)
if in.Object.Object != nil {
out.Object = in.Object.Object
@@ -70,8 +70,8 @@ func Convert_versioned_Event_to_watch_Event(in *WatchEvent, out *watch.Event, s
return nil
}
-func Convert_versioned_Event_to_versioned_InternalEvent(in *WatchEvent, out *InternalEvent, s conversion.Scope) error {
- return Convert_versioned_Event_to_watch_Event(in, (*watch.Event)(out), s)
+func Convert_v1_WatchEvent_To_v1_InternalEvent(in *WatchEvent, out *InternalEvent, s conversion.Scope) error {
+ return Convert_v1_WatchEvent_To_watch_Event(in, (*watch.Event)(out), s)
}
// InternalEvent makes watch.Event versioned
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
index 73308d86e..fa179ac7b 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -192,44 +192,63 @@ func (in *APIVersions) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CreateOptions) DeepCopyInto(out *CreateOptions) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.DryRun != nil {
+ in, out := &in.DryRun, &out.DryRun
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreateOptions.
+func (in *CreateOptions) DeepCopy() *CreateOptions {
+ if in == nil {
+ return nil
+ }
+ out := new(CreateOptions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CreateOptions) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.GracePeriodSeconds != nil {
in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds
- if *in == nil {
- *out = nil
- } else {
- *out = new(int64)
- **out = **in
- }
+ *out = new(int64)
+ **out = **in
}
if in.Preconditions != nil {
in, out := &in.Preconditions, &out.Preconditions
- if *in == nil {
- *out = nil
- } else {
- *out = new(Preconditions)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(Preconditions)
+ (*in).DeepCopyInto(*out)
}
if in.OrphanDependents != nil {
in, out := &in.OrphanDependents, &out.OrphanDependents
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
+ *out = new(bool)
+ **out = **in
}
if in.PropagationPolicy != nil {
in, out := &in.PropagationPolicy, &out.PropagationPolicy
- if *in == nil {
- *out = nil
- } else {
- *out = new(DeletionPropagation)
- **out = **in
- }
+ *out = new(DeletionPropagation)
+ **out = **in
+ }
+ if in.DryRun != nil {
+ in, out := &in.DryRun, &out.DryRun
+ *out = make([]string, len(*in))
+ copy(*out, *in)
}
return
}
@@ -294,6 +313,29 @@ func (in *ExportOptions) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Fields) DeepCopyInto(out *Fields) {
+ *out = *in
+ if in.Map != nil {
+ in, out := &in.Map, &out.Map
+ *out = make(map[string]Fields, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fields.
+func (in *Fields) DeepCopy() *Fields {
+ if in == nil {
+ return nil
+ }
+ out := new(Fields)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GetOptions) DeepCopyInto(out *GetOptions) {
*out = *in
out.TypeMeta = in.TypeMeta
@@ -440,12 +482,8 @@ func (in *Initializers) DeepCopyInto(out *Initializers) {
}
if in.Result != nil {
in, out := &in.Result, &out.Result
- if *in == nil {
- *out = nil
- } else {
- *out = new(Status)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(Status)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -463,9 +501,7 @@ func (in *Initializers) DeepCopy() *Initializers {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InternalEvent) DeepCopyInto(out *InternalEvent) {
*out = *in
- if in.Object == nil {
- out.Object = nil
- } else {
+ if in.Object != nil {
out.Object = in.Object.DeepCopyObject()
}
return
@@ -536,7 +572,7 @@ func (in *LabelSelectorRequirement) DeepCopy() *LabelSelectorRequirement {
func (in *List) DeepCopyInto(out *List) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]runtime.RawExtension, len(*in))
@@ -568,6 +604,11 @@ func (in *List) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ListMeta) DeepCopyInto(out *ListMeta) {
*out = *in
+ if in.RemainingItemCount != nil {
+ in, out := &in.RemainingItemCount, &out.RemainingItemCount
+ *out = new(int64)
+ **out = **in
+ }
return
}
@@ -587,12 +628,8 @@ func (in *ListOptions) DeepCopyInto(out *ListOptions) {
out.TypeMeta = in.TypeMeta
if in.TimeoutSeconds != nil {
in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
- if *in == nil {
- *out = nil
- } else {
- *out = new(int64)
- **out = **in
- }
+ *out = new(int64)
+ **out = **in
}
return
}
@@ -615,6 +652,31 @@ func (in *ListOptions) DeepCopyObject() runtime.Object {
return nil
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ManagedFieldsEntry) DeepCopyInto(out *ManagedFieldsEntry) {
+ *out = *in
+ if in.Time != nil {
+ in, out := &in.Time, &out.Time
+ *out = (*in).DeepCopy()
+ }
+ if in.Fields != nil {
+ in, out := &in.Fields, &out.Fields
+ *out = new(Fields)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedFieldsEntry.
+func (in *ManagedFieldsEntry) DeepCopy() *ManagedFieldsEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(ManagedFieldsEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicroTime.
func (in *MicroTime) DeepCopy() *MicroTime {
if in == nil {
@@ -631,20 +693,12 @@ func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) {
in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp)
if in.DeletionTimestamp != nil {
in, out := &in.DeletionTimestamp, &out.DeletionTimestamp
- if *in == nil {
- *out = nil
- } else {
- *out = (*in).DeepCopy()
- }
+ *out = (*in).DeepCopy()
}
if in.DeletionGracePeriodSeconds != nil {
in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds
- if *in == nil {
- *out = nil
- } else {
- *out = new(int64)
- **out = **in
- }
+ *out = new(int64)
+ **out = **in
}
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
@@ -669,18 +723,21 @@ func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) {
}
if in.Initializers != nil {
in, out := &in.Initializers, &out.Initializers
- if *in == nil {
- *out = nil
- } else {
- *out = new(Initializers)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(Initializers)
+ (*in).DeepCopyInto(*out)
}
if in.Finalizers != nil {
in, out := &in.Finalizers, &out.Finalizers
*out = make([]string, len(*in))
copy(*out, *in)
}
+ if in.ManagedFields != nil {
+ in, out := &in.ManagedFields, &out.ManagedFields
+ *out = make([]ManagedFieldsEntry, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
return
}
@@ -699,21 +756,13 @@ func (in *OwnerReference) DeepCopyInto(out *OwnerReference) {
*out = *in
if in.Controller != nil {
in, out := &in.Controller, &out.Controller
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
+ *out = new(bool)
+ **out = **in
}
if in.BlockOwnerDeletion != nil {
in, out := &in.BlockOwnerDeletion, &out.BlockOwnerDeletion
- if *in == nil {
- *out = nil
- } else {
- *out = new(bool)
- **out = **in
- }
+ *out = new(bool)
+ **out = **in
}
return
}
@@ -729,6 +778,65 @@ func (in *OwnerReference) DeepCopy() *OwnerReference {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadata.
+func (in *PartialObjectMetadata) DeepCopy() *PartialObjectMetadata {
+ if in == nil {
+ return nil
+ }
+ out := new(PartialObjectMetadata)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PartialObjectMetadata) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]PartialObjectMetadata, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList.
+func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList {
+ if in == nil {
+ return nil
+ }
+ out := new(PartialObjectMetadataList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Patch) DeepCopyInto(out *Patch) {
*out = *in
return
@@ -745,16 +853,52 @@ func (in *Patch) DeepCopy() *Patch {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PatchOptions) DeepCopyInto(out *PatchOptions) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.DryRun != nil {
+ in, out := &in.DryRun, &out.DryRun
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Force != nil {
+ in, out := &in.Force, &out.Force
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchOptions.
+func (in *PatchOptions) DeepCopy() *PatchOptions {
+ if in == nil {
+ return nil
+ }
+ out := new(PatchOptions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PatchOptions) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Preconditions) DeepCopyInto(out *Preconditions) {
*out = *in
if in.UID != nil {
in, out := &in.UID, &out.UID
- if *in == nil {
- *out = nil
- } else {
- *out = new(types.UID)
- **out = **in
- }
+ *out = new(types.UID)
+ **out = **in
+ }
+ if in.ResourceVersion != nil {
+ in, out := &in.ResourceVersion, &out.ResourceVersion
+ *out = new(string)
+ **out = **in
}
return
}
@@ -810,15 +954,11 @@ func (in *ServerAddressByClientCIDR) DeepCopy() *ServerAddressByClientCIDR {
func (in *Status) DeepCopyInto(out *Status) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Details != nil {
in, out := &in.Details, &out.Details
- if *in == nil {
- *out = nil
- } else {
- *out = new(StatusDetails)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(StatusDetails)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -878,6 +1018,108 @@ func (in *StatusDetails) DeepCopy() *StatusDetails {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Table) DeepCopyInto(out *Table) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.ColumnDefinitions != nil {
+ in, out := &in.ColumnDefinitions, &out.ColumnDefinitions
+ *out = make([]TableColumnDefinition, len(*in))
+ copy(*out, *in)
+ }
+ if in.Rows != nil {
+ in, out := &in.Rows, &out.Rows
+ *out = make([]TableRow, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table.
+func (in *Table) DeepCopy() *Table {
+ if in == nil {
+ return nil
+ }
+ out := new(Table)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Table) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TableColumnDefinition) DeepCopyInto(out *TableColumnDefinition) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableColumnDefinition.
+func (in *TableColumnDefinition) DeepCopy() *TableColumnDefinition {
+ if in == nil {
+ return nil
+ }
+ out := new(TableColumnDefinition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TableOptions) DeepCopyInto(out *TableOptions) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableOptions.
+func (in *TableOptions) DeepCopy() *TableOptions {
+ if in == nil {
+ return nil
+ }
+ out := new(TableOptions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *TableOptions) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TableRow) DeepCopyInto(out *TableRow) {
+ clone := in.DeepCopy()
+ *out = *clone
+ return
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TableRowCondition) DeepCopyInto(out *TableRowCondition) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableRowCondition.
+func (in *TableRowCondition) DeepCopy() *TableRowCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(TableRowCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time.
func (in *Time) DeepCopy() *Time {
if in == nil {
@@ -905,6 +1147,36 @@ func (in *Timestamp) DeepCopy() *Timestamp {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UpdateOptions) DeepCopyInto(out *UpdateOptions) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.DryRun != nil {
+ in, out := &in.DryRun, &out.DryRun
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateOptions.
+func (in *UpdateOptions) DeepCopy() *UpdateOptions {
+ if in == nil {
+ return nil
+ }
+ out := new(UpdateOptions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *UpdateOptions) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Verbs) DeepCopyInto(out *Verbs) {
{
in := &in
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go
index 40d9ab00e..cce2e603a 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go
@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go
index 7854c207c..bc615dc3a 100644
--- a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go
+++ b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go
@@ -40,7 +40,11 @@ type NameFunc func(t reflect.Type) string
var DefaultNameFunc = func(t reflect.Type) string { return t.Name() }
-type GenericConversionFunc func(a, b interface{}, scope Scope) (bool, error)
+// ConversionFunc converts the object a into the object b, reusing arrays or objects
+// or pointers if necessary. It should return an error if the object cannot be converted
+// or if some data is invalid. If you do not wish a and b to share fields or nested
+// objects, you must copy a before calling this function.
+type ConversionFunc func(a, b interface{}, scope Scope) error
// Converter knows how to convert one type to another.
type Converter struct {
@@ -49,11 +53,6 @@ type Converter struct {
conversionFuncs ConversionFuncs
generatedConversionFuncs ConversionFuncs
- // genericConversions are called during normal conversion to offer a "fast-path"
- // that avoids all reflection. These methods are not called outside of the .Convert()
- // method.
- genericConversions []GenericConversionFunc
-
// Set of conversions that should be treated as a no-op
ignoredConversions map[typePair]struct{}
@@ -98,14 +97,6 @@ func NewConverter(nameFn NameFunc) *Converter {
return c
}
-// AddGenericConversionFunc adds a function that accepts the ConversionFunc call pattern
-// (for two conversion types) to the converter. These functions are checked first during
-// a normal conversion, but are otherwise not called. Use AddConversionFuncs when registering
-// typed conversions.
-func (c *Converter) AddGenericConversionFunc(fn GenericConversionFunc) {
- c.genericConversions = append(c.genericConversions, fn)
-}
-
// WithConversions returns a Converter that is a copy of c but with the additional
// fns merged on top.
func (c *Converter) WithConversions(fns ConversionFuncs) *Converter {
@@ -161,11 +152,15 @@ type Scope interface {
type FieldMappingFunc func(key string, sourceTag, destTag reflect.StructTag) (source string, dest string)
func NewConversionFuncs() ConversionFuncs {
- return ConversionFuncs{fns: make(map[typePair]reflect.Value)}
+ return ConversionFuncs{
+ fns: make(map[typePair]reflect.Value),
+ untyped: make(map[typePair]ConversionFunc),
+ }
}
type ConversionFuncs struct {
- fns map[typePair]reflect.Value
+ fns map[typePair]reflect.Value
+ untyped map[typePair]ConversionFunc
}
// Add adds the provided conversion functions to the lookup table - they must have the signature
@@ -183,6 +178,21 @@ func (c ConversionFuncs) Add(fns ...interface{}) error {
return nil
}
+// AddUntyped adds the provided conversion function to the lookup table for the types that are
+// supplied as a and b. a and b must be pointers or an error is returned. This method overwrites
+// previously defined functions.
+func (c ConversionFuncs) AddUntyped(a, b interface{}, fn ConversionFunc) error {
+ tA, tB := reflect.TypeOf(a), reflect.TypeOf(b)
+ if tA.Kind() != reflect.Ptr {
+ return fmt.Errorf("the type %T must be a pointer to register as an untyped conversion", a)
+ }
+ if tB.Kind() != reflect.Ptr {
+ return fmt.Errorf("the type %T must be a pointer to register as an untyped conversion", b)
+ }
+ c.untyped[typePair{tA, tB}] = fn
+ return nil
+}
+
// Merge returns a new ConversionFuncs that contains all conversions from
// both other and c, with other conversions taking precedence.
func (c ConversionFuncs) Merge(other ConversionFuncs) ConversionFuncs {
@@ -193,6 +203,12 @@ func (c ConversionFuncs) Merge(other ConversionFuncs) ConversionFuncs {
for k, v := range other.fns {
merged.fns[k] = v
}
+ for k, v := range c.untyped {
+ merged.untyped[k] = v
+ }
+ for k, v := range other.untyped {
+ merged.untyped[k] = v
+ }
return merged
}
@@ -355,16 +371,32 @@ func verifyConversionFunctionSignature(ft reflect.Type) error {
// // conversion logic...
// return nil
// })
+// DEPRECATED: Will be removed in favor of RegisterUntypedConversionFunc
func (c *Converter) RegisterConversionFunc(conversionFunc interface{}) error {
return c.conversionFuncs.Add(conversionFunc)
}
// Similar to RegisterConversionFunc, but registers conversion function that were
// automatically generated.
+// DEPRECATED: Will be removed in favor of RegisterGeneratedUntypedConversionFunc
func (c *Converter) RegisterGeneratedConversionFunc(conversionFunc interface{}) error {
return c.generatedConversionFuncs.Add(conversionFunc)
}
+// RegisterUntypedConversionFunc registers a function that converts between a and b by passing objects of those
+// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce
+// any other guarantee.
+func (c *Converter) RegisterUntypedConversionFunc(a, b interface{}, fn ConversionFunc) error {
+ return c.conversionFuncs.AddUntyped(a, b, fn)
+}
+
+// RegisterGeneratedUntypedConversionFunc registers a function that converts between a and b by passing objects of those
+// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce
+// any other guarantee.
+func (c *Converter) RegisterGeneratedUntypedConversionFunc(a, b interface{}, fn ConversionFunc) error {
+ return c.generatedConversionFuncs.AddUntyped(a, b, fn)
+}
+
// RegisterIgnoredConversion registers a "no-op" for conversion, where any requested
// conversion between from and to is ignored.
func (c *Converter) RegisterIgnoredConversion(from, to interface{}) error {
@@ -380,39 +412,6 @@ func (c *Converter) RegisterIgnoredConversion(from, to interface{}) error {
return nil
}
-// IsConversionIgnored returns true if the specified objects should be dropped during
-// conversion.
-func (c *Converter) IsConversionIgnored(inType, outType reflect.Type) bool {
- _, found := c.ignoredConversions[typePair{inType, outType}]
- return found
-}
-
-func (c *Converter) HasConversionFunc(inType, outType reflect.Type) bool {
- _, found := c.conversionFuncs.fns[typePair{inType, outType}]
- return found
-}
-
-func (c *Converter) ConversionFuncValue(inType, outType reflect.Type) (reflect.Value, bool) {
- value, found := c.conversionFuncs.fns[typePair{inType, outType}]
- return value, found
-}
-
-// SetStructFieldCopy registers a correspondence. Whenever a struct field is encountered
-// which has a type and name matching srcFieldType and srcFieldName, it wil be copied
-// into the field in the destination struct matching destFieldType & Name, if such a
-// field exists.
-// May be called multiple times, even for the same source field & type--all applicable
-// copies will be performed.
-func (c *Converter) SetStructFieldCopy(srcFieldType interface{}, srcFieldName string, destFieldType interface{}, destFieldName string) error {
- st := reflect.TypeOf(srcFieldType)
- dt := reflect.TypeOf(destFieldType)
- srcKey := typeNamePair{st, srcFieldName}
- destKey := typeNamePair{dt, destFieldName}
- c.structFieldDests[srcKey] = append(c.structFieldDests[srcKey], destKey)
- c.structFieldSources[destKey] = append(c.structFieldSources[destKey], srcKey)
- return nil
-}
-
// RegisterInputDefaults registers a field name mapping function, used when converting
// from maps to structs. Inputs to the conversion methods are checked for this type and a mapping
// applied automatically if the input matches in. A set of default flags for the input conversion
@@ -468,15 +467,6 @@ func (f FieldMatchingFlags) IsSet(flag FieldMatchingFlags) bool {
// it is not used by Convert() other than storing it in the scope.
// Not safe for objects with cyclic references!
func (c *Converter) Convert(src, dest interface{}, flags FieldMatchingFlags, meta *Meta) error {
- if len(c.genericConversions) > 0 {
- // TODO: avoid scope allocation
- s := &scope{converter: c, flags: flags, meta: meta}
- for _, fn := range c.genericConversions {
- if ok, err := fn(src, dest, s); ok {
- return err
- }
- }
- }
return c.doConversion(src, dest, flags, meta, c.convert)
}
@@ -495,6 +485,21 @@ func (c *Converter) DefaultConvert(src, dest interface{}, flags FieldMatchingFla
type conversionFunc func(sv, dv reflect.Value, scope *scope) error
func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags, meta *Meta, f conversionFunc) error {
+ pair := typePair{reflect.TypeOf(src), reflect.TypeOf(dest)}
+ scope := &scope{
+ converter: c,
+ flags: flags,
+ meta: meta,
+ }
+ if fn, ok := c.conversionFuncs.untyped[pair]; ok {
+ return fn(src, dest, scope)
+ }
+ if fn, ok := c.generatedConversionFuncs.untyped[pair]; ok {
+ return fn(src, dest, scope)
+ }
+ // TODO: consider everything past this point deprecated - we want to support only point to point top level
+ // conversions
+
dv, err := EnforcePtr(dest)
if err != nil {
return err
@@ -506,15 +511,10 @@ func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags
if err != nil {
return err
}
- s := &scope{
- converter: c,
- flags: flags,
- meta: meta,
- }
// Leave something on the stack, so that calls to struct tag getters never fail.
- s.srcStack.push(scopeStackElem{})
- s.destStack.push(scopeStackElem{})
- return f(sv, dv, s)
+ scope.srcStack.push(scopeStackElem{})
+ scope.destStack.push(scopeStackElem{})
+ return f(sv, dv, scope)
}
// callCustom calls 'custom' with sv & dv. custom must be a conversion function.
diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go
index 17b366617..2f0dd0074 100644
--- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go
+++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go
@@ -54,10 +54,6 @@ func jsonTag(field reflect.StructField) (string, bool) {
return tag, omitempty
}
-func formatValue(value interface{}) string {
- return fmt.Sprintf("%v", value)
-}
-
func isPointerKind(kind reflect.Kind) bool {
return kind == reflect.Ptr
}
@@ -174,6 +170,9 @@ func convertStruct(result url.Values, st reflect.Type, sv reflect.Value) {
kind = ft.Kind()
if !field.IsNil() {
field = reflect.Indirect(field)
+ // If the field is non-nil, it should be added to params
+ // and the omitempty should be overwite to false
+ omitempty = false
}
}
diff --git a/vendor/k8s.io/apimachinery/pkg/fields/selector.go b/vendor/k8s.io/apimachinery/pkg/fields/selector.go
index 3785d8c2f..e3e4453b6 100644
--- a/vendor/k8s.io/apimachinery/pkg/fields/selector.go
+++ b/vendor/k8s.io/apimachinery/pkg/fields/selector.go
@@ -55,6 +55,21 @@ type Selector interface {
DeepCopySelector() Selector
}
+type nothingSelector struct{}
+
+func (n nothingSelector) Matches(_ Fields) bool { return false }
+func (n nothingSelector) Empty() bool { return false }
+func (n nothingSelector) String() string { return "" }
+func (n nothingSelector) Requirements() Requirements { return nil }
+func (n nothingSelector) DeepCopySelector() Selector { return n }
+func (n nothingSelector) RequiresExactMatch(field string) (value string, found bool) { return "", false }
+func (n nothingSelector) Transform(fn TransformFunc) (Selector, error) { return n, nil }
+
+// Nothing returns a selector that matches no fields
+func Nothing() Selector {
+ return nothingSelector{}
+}
+
// Everything returns a selector that matches all fields.
func Everything() Selector {
return andTerm{}
@@ -449,6 +464,12 @@ func OneTermEqualSelector(k, v string) Selector {
return &hasTerm{field: k, value: v}
}
+// OneTermNotEqualSelector returns an object that matches objects where one field/field does not equal one value.
+// Cannot return an error.
+func OneTermNotEqualSelector(k, v string) Selector {
+ return &notHasTerm{field: k, value: v}
+}
+
// AndSelectors creates a selector that is the logical AND of all the given selectors
func AndSelectors(selectors ...Selector) Selector {
return andTerm(selectors)
diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go
index 32db4d96f..abf3ace6f 100644
--- a/vendor/k8s.io/apimachinery/pkg/labels/labels.go
+++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go
@@ -172,7 +172,7 @@ func ConvertSelectorToLabelsMap(selector string) (Set, error) {
return labelsMap, err
}
value := strings.TrimSpace(l[1])
- if err := validateLabelValue(value); err != nil {
+ if err := validateLabelValue(key, value); err != nil {
return labelsMap, err
}
labelsMap[key] = value
diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go
index b301b4284..9be9e57d3 100644
--- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go
+++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go
@@ -23,10 +23,10 @@ import (
"strconv"
"strings"
- "github.com/golang/glog"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
+ "k8s.io/klog"
)
// Requirements is AND of all requirements.
@@ -162,11 +162,10 @@ func NewRequirement(key string, op selection.Operator, vals []string) (*Requirem
}
for i := range vals {
- if err := validateLabelValue(vals[i]); err != nil {
+ if err := validateLabelValue(key, vals[i]); err != nil {
return nil, err
}
}
- sort.Strings(vals)
return &Requirement{key: key, operator: op, strValues: vals}, nil
}
@@ -212,13 +211,13 @@ func (r *Requirement) Matches(ls Labels) bool {
}
lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64)
if err != nil {
- glog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err)
+ klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err)
return false
}
// There should be only one strValue in r.strValues, and can be converted to a integer.
if len(r.strValues) != 1 {
- glog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r)
+ klog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r)
return false
}
@@ -226,7 +225,7 @@ func (r *Requirement) Matches(ls Labels) bool {
for i := range r.strValues {
rValue, err = strconv.ParseInt(r.strValues[i], 10, 64)
if err != nil {
- glog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r)
+ klog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r)
return false
}
}
@@ -299,7 +298,9 @@ func (r *Requirement) String() string {
if len(r.strValues) == 1 {
buffer.WriteString(r.strValues[0])
} else { // only > 1 since == 0 prohibited by NewRequirement
- buffer.WriteString(strings.Join(r.strValues, ","))
+ // normalizes value order on output, without mutating the in-memory selector representation
+ // also avoids normalization when it is not required, and ensures we do not mutate shared data
+ buffer.WriteString(strings.Join(safeSort(r.strValues), ","))
}
switch r.operator {
@@ -309,6 +310,17 @@ func (r *Requirement) String() string {
return buffer.String()
}
+// safeSort sort input strings without modification
+func safeSort(in []string) []string {
+ if sort.StringsAreSorted(in) {
+ return in
+ }
+ out := make([]string, len(in))
+ copy(out, in)
+ sort.Strings(out)
+ return out
+}
+
// Add adds requirements to the selector. It copies the current selector returning a new one
func (lsel internalSelector) Add(reqs ...Requirement) Selector {
var sel internalSelector
@@ -825,9 +837,9 @@ func validateLabelKey(k string) error {
return nil
}
-func validateLabelValue(v string) error {
+func validateLabelValue(k, v string) error {
if errs := validation.IsValidLabelValue(v); len(errs) != 0 {
- return fmt.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; "))
+ return fmt.Errorf("invalid label value: %q: at key: %q: %s", v, k, strings.Join(errs, "; "))
}
return nil
}
diff --git a/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go
index a536f9ec9..4d482947f 100644
--- a/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go
@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go
index 10dc12cca..284e32bc3 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go
@@ -76,24 +76,6 @@ func EncodeOrDie(e Encoder, obj Object) string {
return string(bytes)
}
-// DefaultingSerializer invokes defaulting after decoding.
-type DefaultingSerializer struct {
- Defaulter ObjectDefaulter
- Decoder Decoder
- // Encoder is optional to allow this type to be used as both a Decoder and an Encoder
- Encoder
-}
-
-// Decode performs a decode and then allows the defaulter to act on the provided object.
-func (d DefaultingSerializer) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) {
- obj, gvk, err := d.Decoder.Decode(data, defaultGVK, into)
- if err != nil {
- return obj, gvk, err
- }
- d.Defaulter.Default(obj)
- return obj, gvk, nil
-}
-
// UseOrCreateObject returns obj if the canonical ObjectKind returned by the provided typer matches gvk, or
// invokes the ObjectCreator to instantiate a new gvk. Returns an error if the typer cannot find the object.
func UseOrCreateObject(t ObjectTyper, c ObjectCreater, gvk schema.GroupVersionKind, obj Object) (Object, error) {
@@ -301,6 +283,7 @@ var _ GroupVersioner = multiGroupVersioner{}
type multiGroupVersioner struct {
target schema.GroupVersion
acceptedGroupKinds []schema.GroupKind
+ coerce bool
}
// NewMultiGroupVersioner returns the provided group version for any kind that matches one of the provided group kinds.
@@ -312,6 +295,22 @@ func NewMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKi
return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds}
}
+// NewCoercingMultiGroupVersioner returns the provided group version for any incoming kind.
+// Incoming kinds that match the provided groupKinds are preferred.
+// Kind may be empty in the provided group kind, in which case any kind will match.
+// Examples:
+// gv=mygroup/__internal, groupKinds=mygroup/Foo, anothergroup/Bar
+// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group/kind)
+//
+// gv=mygroup/__internal, groupKinds=mygroup, anothergroup
+// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group)
+//
+// gv=mygroup/__internal, groupKinds=mygroup, anothergroup
+// KindForGroupVersionKinds(yetanother/v1/Baz, yetanother/v1/Bar) -> mygroup/__internal/Baz (no preferred group/kind match, uses first kind in list)
+func NewCoercingMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner {
+ return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds, coerce: true}
+}
+
// KindForGroupVersionKinds returns the target group version if any kind matches any of the original group kinds. It will
// use the originating kind where possible.
func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) {
@@ -326,5 +325,8 @@ func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersio
return v.target.WithKind(src.Kind), true
}
}
+ if v.coerce && len(kinds) > 0 {
+ return v.target.WithKind(kinds[0].Kind), true
+ }
return schema.GroupVersionKind{}, false
}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go
index afe4fab15..08d2abfe6 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Defines conversions between generic types and structs to map query strings
+// Package runtime defines conversions between generic types and structs to map query strings
// to struct objects.
package runtime
@@ -27,7 +27,7 @@ import (
"k8s.io/apimachinery/pkg/conversion"
)
-// DefaultFieldSelectorConversion auto-accepts metav1 values for name and namespace.
+// DefaultMetaV1FieldSelectorConversion auto-accepts metav1 values for name and namespace.
// A cluster scoped resource specifying namespace empty works fine and specifying a particular
// namespace will return no results, as expected.
func DefaultMetaV1FieldSelectorConversion(label, value string) (string, string, error) {
@@ -82,7 +82,7 @@ func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope)
return nil
}
-// Conver_Slice_string_To_bool will convert a string parameter to boolean.
+// Convert_Slice_string_To_bool will convert a string parameter to boolean.
// Only the absence of a value, a value of "false", or a value of "0" resolve to false.
// Any other value (including empty string) resolves to true.
func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope) error {
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go
index f6f7c10de..80343081f 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go
@@ -33,7 +33,7 @@ import (
"k8s.io/apimachinery/pkg/util/json"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
- "github.com/golang/glog"
+ "k8s.io/klog"
)
// UnstructuredConverter is an interface for converting between interface{}
@@ -73,7 +73,6 @@ var (
mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{})
stringType = reflect.TypeOf(string(""))
int64Type = reflect.TypeOf(int64(0))
- uint64Type = reflect.TypeOf(uint64(0))
float64Type = reflect.TypeOf(float64(0))
boolType = reflect.TypeOf(bool(false))
fieldCache = newFieldsCache()
@@ -134,10 +133,10 @@ func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj i
newObj := reflect.New(t.Elem()).Interface()
newErr := fromUnstructuredViaJSON(u, newObj)
if (err != nil) != (newErr != nil) {
- glog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err)
+ klog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err)
}
if err == nil && !c.comparison.DeepEqual(obj, newObj) {
- glog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj)
+ klog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj)
}
}
return err
@@ -411,8 +410,7 @@ func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]inte
var u map[string]interface{}
var err error
if unstr, ok := obj.(Unstructured); ok {
- // UnstructuredContent() mutates the object so we need to make a copy first
- u = unstr.DeepCopyObject().(Unstructured).UnstructuredContent()
+ u = unstr.UnstructuredContent()
} else {
t := reflect.TypeOf(obj)
value := reflect.ValueOf(obj)
@@ -426,10 +424,10 @@ func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]inte
newUnstr := map[string]interface{}{}
newErr := toUnstructuredViaJSON(obj, &newUnstr)
if (err != nil) != (newErr != nil) {
- glog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr)
+ klog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr)
}
if err == nil && !c.comparison.DeepEqual(u, newUnstr) {
- glog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr)
+ klog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr)
}
}
if err != nil {
@@ -439,22 +437,32 @@ func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]inte
}
// DeepCopyJSON deep copies the passed value, assuming it is a valid JSON representation i.e. only contains
-// types produced by json.Unmarshal().
+// types produced by json.Unmarshal() and also int64.
+// bool, int64, float64, string, []interface{}, map[string]interface{}, json.Number and nil
func DeepCopyJSON(x map[string]interface{}) map[string]interface{} {
return DeepCopyJSONValue(x).(map[string]interface{})
}
// DeepCopyJSONValue deep copies the passed value, assuming it is a valid JSON representation i.e. only contains
-// types produced by json.Unmarshal().
+// types produced by json.Unmarshal() and also int64.
+// bool, int64, float64, string, []interface{}, map[string]interface{}, json.Number and nil
func DeepCopyJSONValue(x interface{}) interface{} {
switch x := x.(type) {
case map[string]interface{}:
+ if x == nil {
+ // Typed nil - an interface{} that contains a type map[string]interface{} with a value of nil
+ return x
+ }
clone := make(map[string]interface{}, len(x))
for k, v := range x {
clone[k] = DeepCopyJSONValue(v)
}
return clone
case []interface{}:
+ if x == nil {
+ // Typed nil - an interface{} that contains a type []interface{} with a value of nil
+ return x
+ }
clone := make([]interface{}, len(x))
for i, v := range x {
clone[i] = DeepCopyJSONValue(v)
@@ -584,10 +592,14 @@ func toUnstructured(sv, dv reflect.Value) error {
dv.Set(reflect.ValueOf(sv.Int()))
return nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ uVal := sv.Uint()
+ if uVal > math.MaxInt64 {
+ return fmt.Errorf("unsigned value %d does not fit into int64 (overflow)", uVal)
+ }
if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
- dv.Set(reflect.New(uint64Type))
+ dv.Set(reflect.New(int64Type))
}
- dv.Set(reflect.ValueOf(sv.Uint()))
+ dv.Set(reflect.ValueOf(int64(uVal)))
return nil
case reflect.Float32, reflect.Float64:
if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
@@ -734,7 +746,7 @@ func isZero(v reflect.Value) bool {
func structToUnstructured(sv, dv reflect.Value) error {
st, dt := sv.Type(), dv.Type()
if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
- dv.Set(reflect.MakeMap(mapStringInterfaceType))
+ dv.Set(reflect.MakeMapWithSize(mapStringInterfaceType, st.NumField()))
dv = dv.Elem()
dt = dv.Type()
}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go
index 06b45df66..89feb4010 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go
@@ -18,20 +18,27 @@ limitations under the License.
// that follow the kubernetes API object conventions, which are:
//
// 0. Your API objects have a common metadata struct member, TypeMeta.
+//
// 1. Your code refers to an internal set of API objects.
+//
// 2. In a separate package, you have an external set of API objects.
+//
// 3. The external set is considered to be versioned, and no breaking
-// changes are ever made to it (fields may be added but not changed
-// or removed).
+// changes are ever made to it (fields may be added but not changed
+// or removed).
+//
// 4. As your api evolves, you'll make an additional versioned package
-// with every major change.
+// with every major change.
+//
// 5. Versioned packages have conversion functions which convert to
-// and from the internal version.
+// and from the internal version.
+//
// 6. You'll continue to support older versions according to your
-// deprecation policy, and you can easily provide a program/library
-// to update old versions into new versions because of 5.
+// deprecation policy, and you can easily provide a program/library
+// to update old versions into new versions because of 5.
+//
// 7. All of your serializations and deserializations are handled in a
-// centralized place.
+// centralized place.
//
// Package runtime provides a conversion helper to make 5 easy, and the
// Encode/Decode/DecodeInto trio to accomplish 7. You can also register
@@ -41,5 +48,4 @@ limitations under the License.
//
// As a bonus, a few common types useful from all api objects and versions
// are provided in types.go.
-
package runtime // import "k8s.io/apimachinery/pkg/runtime"
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go
index 2cdac9e14..db11eb8bc 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go
@@ -31,7 +31,7 @@ type encodable struct {
func (e encodable) GetObjectKind() schema.ObjectKind { return e.obj.GetObjectKind() }
func (e encodable) DeepCopyObject() Object {
- var out encodable = e
+ out := e
out.obj = e.obj.DeepCopyObject()
copy(out.versions, e.versions)
return out
@@ -46,14 +46,14 @@ func NewEncodable(e Encoder, obj Object, versions ...schema.GroupVersion) Object
return encodable{e, obj, versions}
}
-func (re encodable) UnmarshalJSON(in []byte) error {
+func (e encodable) UnmarshalJSON(in []byte) error {
return errors.New("runtime.encodable cannot be unmarshalled from JSON")
}
// Marshal may get called on pointers or values, so implement MarshalJSON on value.
// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go
-func (re encodable) MarshalJSON() ([]byte, error) {
- return Encode(re.E, re.obj)
+func (e encodable) MarshalJSON() ([]byte, error) {
+ return Encode(e.E, e.obj)
}
// NewEncodableList creates an object that will be encoded with the provided codec on demand.
@@ -70,28 +70,28 @@ func NewEncodableList(e Encoder, objects []Object, versions ...schema.GroupVersi
return out
}
-func (re *Unknown) UnmarshalJSON(in []byte) error {
- if re == nil {
+func (e *Unknown) UnmarshalJSON(in []byte) error {
+ if e == nil {
return errors.New("runtime.Unknown: UnmarshalJSON on nil pointer")
}
- re.TypeMeta = TypeMeta{}
- re.Raw = append(re.Raw[0:0], in...)
- re.ContentEncoding = ""
- re.ContentType = ContentTypeJSON
+ e.TypeMeta = TypeMeta{}
+ e.Raw = append(e.Raw[0:0], in...)
+ e.ContentEncoding = ""
+ e.ContentType = ContentTypeJSON
return nil
}
// Marshal may get called on pointers or values, so implement MarshalJSON on value.
// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go
-func (re Unknown) MarshalJSON() ([]byte, error) {
+func (e Unknown) MarshalJSON() ([]byte, error) {
// If ContentType is unset, we assume this is JSON.
- if re.ContentType != "" && re.ContentType != ContentTypeJSON {
+ if e.ContentType != "" && e.ContentType != ContentTypeJSON {
return nil, errors.New("runtime.Unknown: MarshalJSON on non-json data")
}
- if re.Raw == nil {
+ if e.Raw == nil {
return []byte("null"), nil
}
- return re.Raw, nil
+ return e.Raw, nil
}
func Convert_runtime_Object_To_runtime_RawExtension(in *Object, out *RawExtension, s conversion.Scope) error {
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go
index 86b24840f..be0c5edc8 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/error.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go
@@ -24,38 +24,47 @@ import (
)
type notRegisteredErr struct {
- gvk schema.GroupVersionKind
- target GroupVersioner
- t reflect.Type
+ schemeName string
+ gvk schema.GroupVersionKind
+ target GroupVersioner
+ t reflect.Type
}
-func NewNotRegisteredErrForKind(gvk schema.GroupVersionKind) error {
- return &notRegisteredErr{gvk: gvk}
+func NewNotRegisteredErrForKind(schemeName string, gvk schema.GroupVersionKind) error {
+ return &notRegisteredErr{schemeName: schemeName, gvk: gvk}
}
-func NewNotRegisteredErrForType(t reflect.Type) error {
- return &notRegisteredErr{t: t}
+func NewNotRegisteredErrForType(schemeName string, t reflect.Type) error {
+ return &notRegisteredErr{schemeName: schemeName, t: t}
}
-func NewNotRegisteredErrForTarget(t reflect.Type, target GroupVersioner) error {
- return &notRegisteredErr{t: t, target: target}
+func NewNotRegisteredErrForTarget(schemeName string, t reflect.Type, target GroupVersioner) error {
+ return &notRegisteredErr{schemeName: schemeName, t: t, target: target}
+}
+
+func NewNotRegisteredGVKErrForTarget(schemeName string, gvk schema.GroupVersionKind, target GroupVersioner) error {
+ return &notRegisteredErr{schemeName: schemeName, gvk: gvk, target: target}
}
func (k *notRegisteredErr) Error() string {
if k.t != nil && k.target != nil {
- return fmt.Sprintf("%v is not suitable for converting to %q", k.t, k.target)
+ return fmt.Sprintf("%v is not suitable for converting to %q in scheme %q", k.t, k.target, k.schemeName)
+ }
+ nullGVK := schema.GroupVersionKind{}
+ if k.gvk != nullGVK && k.target != nil {
+ return fmt.Sprintf("%q is not suitable for converting to %q in scheme %q", k.gvk.GroupVersion(), k.target, k.schemeName)
}
if k.t != nil {
- return fmt.Sprintf("no kind is registered for the type %v", k.t)
+ return fmt.Sprintf("no kind is registered for the type %v in scheme %q", k.t, k.schemeName)
}
if len(k.gvk.Kind) == 0 {
- return fmt.Sprintf("no version %q has been registered", k.gvk.GroupVersion())
+ return fmt.Sprintf("no version %q has been registered in scheme %q", k.gvk.GroupVersion(), k.schemeName)
}
if k.gvk.Version == APIVersionInternal {
- return fmt.Sprintf("no kind %q is registered for the internal version of group %q", k.gvk.Kind, k.gvk.Group)
+ return fmt.Sprintf("no kind %q is registered for the internal version of group %q in scheme %q", k.gvk.Kind, k.gvk.Group, k.schemeName)
}
- return fmt.Sprintf("no kind %q is registered for version %q", k.gvk.Kind, k.gvk.GroupVersion())
+ return fmt.Sprintf("no kind %q is registered for version %q in scheme %q", k.gvk.Kind, k.gvk.GroupVersion(), k.schemeName)
}
// IsNotRegisteredError returns true if the error indicates the provided
@@ -111,3 +120,32 @@ func IsMissingVersion(err error) bool {
_, ok := err.(*missingVersionErr)
return ok
}
+
+// strictDecodingError is a base error type that is returned by a strict Decoder such
+// as UniversalStrictDecoder.
+type strictDecodingError struct {
+ message string
+ data string
+}
+
+// NewStrictDecodingError creates a new strictDecodingError object.
+func NewStrictDecodingError(message string, data string) error {
+ return &strictDecodingError{
+ message: message,
+ data: data,
+ }
+}
+
+func (e *strictDecodingError) Error() string {
+ return fmt.Sprintf("strict decoder error for %s: %s", e.data, e.message)
+}
+
+// IsStrictDecodingError returns true if the error indicates that the provided object
+// strictness violations.
+func IsStrictDecodingError(err error) bool {
+ if err == nil {
+ return false
+ }
+ _, ok := err.(*strictDecodingError)
+ return ok
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go
index 737e2e9ff..9056397fa 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go
@@ -32,7 +32,7 @@ func (re *RawExtension) UnmarshalJSON(in []byte) error {
return nil
}
-// Marshal may get called on pointers or values, so implement MarshalJSON on value.
+// MarshalJSON may get called on pointers or values, so implement MarshalJSON on value.
// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go
func (re RawExtension) MarshalJSON() ([]byte, error) {
if re.Raw == nil {
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go
index f561fd476..9b15989c8 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto
-// DO NOT EDIT!
/*
Package runtime is a generated protocol buffer package.
@@ -158,24 +157,6 @@ func (m *Unknown) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
-func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
- dAtA[offset] = uint8(v)
- dAtA[offset+1] = uint8(v >> 8)
- dAtA[offset+2] = uint8(v >> 16)
- dAtA[offset+3] = uint8(v >> 24)
- dAtA[offset+4] = uint8(v >> 32)
- dAtA[offset+5] = uint8(v >> 40)
- dAtA[offset+6] = uint8(v >> 48)
- dAtA[offset+7] = uint8(v >> 56)
- return offset + 8
-}
-func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
- dAtA[offset] = uint8(v)
- dAtA[offset+1] = uint8(v >> 8)
- dAtA[offset+2] = uint8(v >> 16)
- dAtA[offset+3] = uint8(v >> 24)
- return offset + 4
-}
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -744,30 +725,29 @@ func init() {
}
var fileDescriptorGenerated = []byte{
- // 395 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x90, 0x4f, 0x6f, 0xd3, 0x30,
- 0x18, 0xc6, 0xe3, 0xb5, 0x52, 0x87, 0x5b, 0x69, 0xc8, 0x1c, 0x08, 0x3b, 0x38, 0x53, 0x4f, 0xec,
- 0x30, 0x5b, 0x1a, 0x42, 0xe2, 0xba, 0x4c, 0x93, 0x40, 0x08, 0x09, 0x59, 0xfc, 0x91, 0x38, 0xe1,
- 0x26, 0x26, 0xb3, 0x42, 0x5f, 0x47, 0x8e, 0x43, 0xd8, 0x8d, 0x8f, 0xc0, 0xc7, 0xea, 0x71, 0xc7,
- 0x9e, 0x2a, 0x1a, 0x3e, 0x04, 0x57, 0x54, 0xd7, 0x2d, 0xa5, 0x08, 0xed, 0x16, 0xbf, 0xcf, 0xf3,
- 0x7b, 0xde, 0xe7, 0x0d, 0x7e, 0x5e, 0x3e, 0xab, 0x99, 0x36, 0xbc, 0x6c, 0x26, 0xca, 0x82, 0x72,
- 0xaa, 0xe6, 0x5f, 0x14, 0xe4, 0xc6, 0xf2, 0x20, 0xc8, 0x4a, 0x4f, 0x65, 0x76, 0xad, 0x41, 0xd9,
- 0x1b, 0x5e, 0x95, 0x05, 0xb7, 0x0d, 0x38, 0x3d, 0x55, 0xbc, 0x50, 0xa0, 0xac, 0x74, 0x2a, 0x67,
- 0x95, 0x35, 0xce, 0x90, 0x64, 0x0d, 0xb0, 0x5d, 0x80, 0x55, 0x65, 0xc1, 0x02, 0x70, 0x7c, 0x56,
- 0x68, 0x77, 0xdd, 0x4c, 0x58, 0x66, 0xa6, 0xbc, 0x30, 0x85, 0xe1, 0x9e, 0x9b, 0x34, 0x9f, 0xfc,
- 0xcb, 0x3f, 0xfc, 0xd7, 0x3a, 0xef, 0xf8, 0xc9, 0xff, 0x0a, 0x34, 0x4e, 0x7f, 0xe6, 0x1a, 0x5c,
- 0xed, 0xec, 0x7e, 0x89, 0xf1, 0x29, 0x1e, 0x09, 0xd9, 0x5e, 0x7d, 0x75, 0x0a, 0x6a, 0x6d, 0x80,
- 0x3c, 0xc2, 0x3d, 0x2b, 0xdb, 0x18, 0x9d, 0xa0, 0xc7, 0xa3, 0x74, 0xd0, 0x2d, 0x92, 0x9e, 0x90,
- 0xad, 0x58, 0xcd, 0xc6, 0x1f, 0xf1, 0xe1, 0x9b, 0x9b, 0x4a, 0xbd, 0x52, 0x4e, 0x92, 0x73, 0x8c,
- 0x65, 0xa5, 0xdf, 0x29, 0xbb, 0x82, 0xbc, 0xfb, 0x5e, 0x4a, 0x66, 0x8b, 0x24, 0xea, 0x16, 0x09,
- 0xbe, 0x78, 0xfd, 0x22, 0x28, 0x62, 0xc7, 0x45, 0x4e, 0x70, 0xbf, 0xd4, 0x90, 0xc7, 0x07, 0xde,
- 0x3d, 0x0a, 0xee, 0xfe, 0x4b, 0x0d, 0xb9, 0xf0, 0xca, 0xf8, 0x17, 0xc2, 0x83, 0xb7, 0x50, 0x82,
- 0x69, 0x81, 0xbc, 0xc7, 0x87, 0x2e, 0x6c, 0xf3, 0xf9, 0xc3, 0xf3, 0x53, 0x76, 0xc7, 0x0f, 0x63,
- 0x9b, 0x7a, 0xe9, 0xfd, 0x10, 0xbe, 0x2d, 0x2c, 0xb6, 0x61, 0x9b, 0x0b, 0x0f, 0xfe, 0xbd, 0x90,
- 0x5c, 0xe0, 0xa3, 0xcc, 0x80, 0x53, 0xe0, 0xae, 0x20, 0x33, 0xb9, 0x86, 0x22, 0xee, 0xf9, 0xb2,
- 0x0f, 0x43, 0xde, 0xd1, 0xe5, 0xdf, 0xb2, 0xd8, 0xf7, 0x93, 0xa7, 0x78, 0x18, 0x46, 0xab, 0xd5,
- 0x71, 0xdf, 0xe3, 0x0f, 0x02, 0x3e, 0xbc, 0xfc, 0x23, 0x89, 0x5d, 0x5f, 0x7a, 0x36, 0x5b, 0xd2,
- 0xe8, 0x76, 0x49, 0xa3, 0xf9, 0x92, 0x46, 0xdf, 0x3a, 0x8a, 0x66, 0x1d, 0x45, 0xb7, 0x1d, 0x45,
- 0xf3, 0x8e, 0xa2, 0x1f, 0x1d, 0x45, 0xdf, 0x7f, 0xd2, 0xe8, 0xc3, 0x20, 0x1c, 0xfa, 0x3b, 0x00,
- 0x00, 0xff, 0xff, 0x3f, 0x1e, 0x24, 0x09, 0x85, 0x02, 0x00, 0x00,
+ // 378 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31,
+ 0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5,
+ 0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74,
+ 0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65,
+ 0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b,
+ 0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05,
+ 0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc,
+ 0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b,
+ 0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd,
+ 0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff,
+ 0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20,
+ 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64,
+ 0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1,
+ 0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1,
+ 0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd,
+ 0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98,
+ 0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e,
+ 0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19,
+ 0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f,
+ 0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3,
+ 0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68,
+ 0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6,
+ 0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00,
+ 0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00,
}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto
index 02e388e90..0e212ec94 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -21,17 +21,15 @@ syntax = 'proto2';
package k8s.io.apimachinery.pkg.runtime;
-import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
-
// Package-wide variables from generator "generated".
option go_package = "runtime";
// RawExtension is used to hold extensions in external versions.
-//
+//
// To use this, make a field which has RawExtension as its type in your external, versioned
// struct, and Object in your internal struct. You also need to register your
// various plugin types.
-//
+//
// // Internal package:
// type MyAPIObject struct {
// runtime.TypeMeta `json:",inline"`
@@ -40,7 +38,7 @@ option go_package = "runtime";
// type PluginA struct {
// AOption string `json:"aOption"`
// }
-//
+//
// // External package:
// type MyAPIObject struct {
// runtime.TypeMeta `json:",inline"`
@@ -49,7 +47,7 @@ option go_package = "runtime";
// type PluginA struct {
// AOption string `json:"aOption"`
// }
-//
+//
// // On the wire, the JSON will look something like this:
// {
// "kind":"MyAPIObject",
@@ -59,7 +57,7 @@ option go_package = "runtime";
// "aOption":"foo",
// },
// }
-//
+//
// So what happens? Decode first uses json or yaml to unmarshal the serialized data into
// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.
// The next step is to copy (using pkg/conversion) into the internal struct. The runtime
@@ -67,13 +65,13 @@ option go_package = "runtime";
// JSON stored in RawExtension, turning it into the correct object type, and storing it
// in the Object. (TODO: In the case where the object is of an unknown type, a
// runtime.Unknown object will be created and stored.)
-//
+//
// +k8s:deepcopy-gen=true
// +protobuf=true
// +k8s:openapi-gen=true
message RawExtension {
// Raw is the underlying serialization of this object.
- //
+ //
// TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data.
optional bytes raw = 1;
}
@@ -85,10 +83,10 @@ message RawExtension {
// ... // other fields
// }
// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind
-//
+//
// TypeMeta is provided here for convenience. You may use it directly from this package or define
// your own with the same fields.
-//
+//
// +k8s:deepcopy-gen=false
// +protobuf=true
// +k8s:openapi-gen=true
@@ -105,7 +103,7 @@ message TypeMeta {
// TypeMeta features-- kind, version, etc.
// TODO: Make this object have easy access to field based accessors and settors for
// metadata and field mutatation.
-//
+//
// +k8s:deepcopy-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +protobuf=true
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
index a6c1a8d34..7bd1a3a6a 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
@@ -51,7 +51,7 @@ func UnsafeObjectConvertor(scheme *Scheme) ObjectConvertor {
func SetField(src interface{}, v reflect.Value, fieldName string) error {
field := v.FieldByName(fieldName)
if !field.IsValid() {
- return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface())
+ return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface())
}
srcValue := reflect.ValueOf(src)
if srcValue.Type().AssignableTo(field.Type()) {
@@ -70,7 +70,7 @@ func SetField(src interface{}, v reflect.Value, fieldName string) error {
func Field(v reflect.Value, fieldName string, dest interface{}) error {
field := v.FieldByName(fieldName)
if !field.IsValid() {
- return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface())
+ return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface())
}
destValue, err := conversion.EnforcePtr(dest)
if err != nil {
@@ -87,13 +87,13 @@ func Field(v reflect.Value, fieldName string, dest interface{}) error {
return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), destValue.Type())
}
-// fieldPtr puts the address of fieldName, which must be a member of v,
+// FieldPtr puts the address of fieldName, which must be a member of v,
// into dest, which must be an address of a variable to which this field's
// address can be assigned.
func FieldPtr(v reflect.Value, fieldName string, dest interface{}) error {
field := v.FieldByName(fieldName)
if !field.IsValid() {
- return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface())
+ return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface())
}
v, err := conversion.EnforcePtr(dest)
if err != nil {
@@ -210,3 +210,50 @@ type defaultFramer struct{}
func (defaultFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { return r }
func (defaultFramer) NewFrameWriter(w io.Writer) io.Writer { return w }
+
+// WithVersionEncoder serializes an object and ensures the GVK is set.
+type WithVersionEncoder struct {
+ Version GroupVersioner
+ Encoder
+ ObjectTyper
+}
+
+// Encode does not do conversion. It sets the gvk during serialization.
+func (e WithVersionEncoder) Encode(obj Object, stream io.Writer) error {
+ gvks, _, err := e.ObjectTyper.ObjectKinds(obj)
+ if err != nil {
+ if IsNotRegisteredError(err) {
+ return e.Encoder.Encode(obj, stream)
+ }
+ return err
+ }
+ kind := obj.GetObjectKind()
+ oldGVK := kind.GroupVersionKind()
+ gvk := gvks[0]
+ if e.Version != nil {
+ preferredGVK, ok := e.Version.KindForGroupVersionKinds(gvks)
+ if ok {
+ gvk = preferredGVK
+ }
+ }
+ kind.SetGroupVersionKind(gvk)
+ err = e.Encoder.Encode(obj, stream)
+ kind.SetGroupVersionKind(oldGVK)
+ return err
+}
+
+// WithoutVersionDecoder clears the group version kind of a deserialized object.
+type WithoutVersionDecoder struct {
+ Decoder
+}
+
+// Decode does not do conversion. It removes the gvk during deserialization.
+func (d WithoutVersionDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) {
+ obj, gvk, err := d.Decoder.Decode(data, defaults, into)
+ if obj != nil {
+ kind := obj.GetObjectKind()
+ // clearing the gvk is just a convention of a codec
+ kind.SetGroupVersionKind(schema.GroupVersionKind{})
+ }
+ return obj, gvk, err
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
index 9d00f1650..bded5bf15 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
@@ -39,14 +39,14 @@ type GroupVersioner interface {
KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (target schema.GroupVersionKind, ok bool)
}
-// Encoders write objects to a serialized form
+// Encoder writes objects to a serialized form
type Encoder interface {
// Encode writes an object to a stream. Implementations may return errors if the versions are
// incompatible, or if no conversion is defined.
Encode(obj Object, w io.Writer) error
}
-// Decoders attempt to load an object from data.
+// Decoder attempts to load an object from data.
type Decoder interface {
// Decode attempts to deserialize the provided data using either the innate typing of the scheme or the
// default kind, group, and version provided. It returns a decoded object as well as the kind, group, and
@@ -91,6 +91,10 @@ type Framer interface {
type SerializerInfo struct {
// MediaType is the value that represents this serializer over the wire.
MediaType string
+ // MediaTypeType is the first part of the MediaType ("application" in "application/json").
+ MediaTypeType string
+ // MediaTypeSubType is the second part of the MediaType ("json" in "application/json").
+ MediaTypeSubType string
// EncodesAsText indicates this serializer can be encoded to UTF-8 safely.
EncodesAsText bool
// Serializer is the individual object serializer for this media type.
@@ -174,15 +178,18 @@ type ObjectVersioner interface {
// ObjectConvertor converts an object to a different version.
type ObjectConvertor interface {
- // Convert attempts to convert one object into another, or returns an error. This method does
- // not guarantee the in object is not mutated. The context argument will be passed to
- // all nested conversions.
+ // Convert attempts to convert one object into another, or returns an error. This
+ // method does not mutate the in object, but the in and out object might share data structures,
+ // i.e. the out object cannot be mutated without mutating the in object as well.
+ // The context argument will be passed to all nested conversions.
Convert(in, out, context interface{}) error
// ConvertToVersion takes the provided object and converts it the provided version. This
- // method does not guarantee that the in object is not mutated. This method is similar to
- // Convert() but handles specific details of choosing the correct output version.
+ // method does not mutate the in object, but the in and out object might share data structures,
+ // i.e. the out object cannot be mutated without mutating the in object as well.
+ // This method is similar to Convert() but handles specific details of choosing the correct
+ // output version.
ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error)
- ConvertFieldLabel(version, kind, label, value string) (string, string, error)
+ ConvertFieldLabel(gvk schema.GroupVersionKind, label, value string) (string, string, error)
}
// ObjectTyper contains methods for extracting the APIVersion and Kind
@@ -203,6 +210,25 @@ type ObjectCreater interface {
New(kind schema.GroupVersionKind) (out Object, err error)
}
+// EquivalentResourceMapper provides information about resources that address the same underlying data as a specified resource
+type EquivalentResourceMapper interface {
+ // EquivalentResourcesFor returns a list of resources that address the same underlying data as resource.
+ // If subresource is specified, only equivalent resources which also have the same subresource are included.
+ // The specified resource can be included in the returned list.
+ EquivalentResourcesFor(resource schema.GroupVersionResource, subresource string) []schema.GroupVersionResource
+ // KindFor returns the kind expected by the specified resource[/subresource].
+ // A zero value is returned if the kind is unknown.
+ KindFor(resource schema.GroupVersionResource, subresource string) schema.GroupVersionKind
+}
+
+// EquivalentResourceRegistry provides an EquivalentResourceMapper interface,
+// and allows registering known resource[/subresource] -> kind
+type EquivalentResourceRegistry interface {
+ EquivalentResourceMapper
+ // RegisterKindFor registers the existence of the specified resource[/subresource] along with its expected kind.
+ RegisterKindFor(resource schema.GroupVersionResource, subresource string, kind schema.GroupVersionKind)
+}
+
// ResourceVersioner provides methods for setting and retrieving
// the resource version from an API object.
type ResourceVersioner interface {
@@ -221,7 +247,7 @@ type SelfLinker interface {
Namespace(obj Object) (string, error)
}
-// All API types registered with Scheme must support the Object interface. Since objects in a scheme are
+// Object interface must be supported by all API types registered with Scheme. Since objects in a scheme are
// expected to be serialized to the wire, the interface an Object must provide to the Scheme allows
// serializers to set the kind, version, and group the object is represented as. An Object may choose
// to return a no-op ObjectKindAccessor in cases where it is not expected to be serialized.
@@ -234,9 +260,12 @@ type Object interface {
// to JSON allowed.
type Unstructured interface {
Object
- // UnstructuredContent returns a non-nil, mutable map of the contents of this object. Values may be
+ // NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data.
+ // This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info.
+ NewEmptyInstance() Unstructured
+ // UnstructuredContent returns a non-nil map with this object's contents. Values may be
// []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to
- // and from JSON.
+ // and from JSON. SetUnstructuredContent should be used to mutate the contents.
UnstructuredContent() map[string]interface{}
// SetUnstructuredContent updates the object content to match the provided map.
SetUnstructuredContent(map[string]interface{})
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go b/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go
new file mode 100644
index 000000000..3ff84611a
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go
@@ -0,0 +1,98 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+ "sync"
+
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+type equivalentResourceRegistry struct {
+ // keyFunc computes a key for the specified resource (this allows honoring colocated resources across API groups).
+ // if null, or if "" is returned, resource.String() is used as the key
+ keyFunc func(resource schema.GroupResource) string
+ // resources maps key -> subresource -> equivalent resources (subresource is not included in the returned resources).
+ // main resources are stored with subresource="".
+ resources map[string]map[string][]schema.GroupVersionResource
+ // kinds maps resource -> subresource -> kind
+ kinds map[schema.GroupVersionResource]map[string]schema.GroupVersionKind
+ // keys caches the computed key for each GroupResource
+ keys map[schema.GroupResource]string
+
+ mutex sync.RWMutex
+}
+
+var _ EquivalentResourceMapper = (*equivalentResourceRegistry)(nil)
+var _ EquivalentResourceRegistry = (*equivalentResourceRegistry)(nil)
+
+// NewEquivalentResourceRegistry creates a resource registry that considers all versions of a GroupResource to be equivalent.
+func NewEquivalentResourceRegistry() EquivalentResourceRegistry {
+ return &equivalentResourceRegistry{}
+}
+
+// NewEquivalentResourceRegistryWithIdentity creates a resource mapper with a custom identity function.
+// If "" is returned by the function, GroupResource#String is used as the identity.
+// GroupResources with the same identity string are considered equivalent.
+func NewEquivalentResourceRegistryWithIdentity(keyFunc func(schema.GroupResource) string) EquivalentResourceRegistry {
+ return &equivalentResourceRegistry{keyFunc: keyFunc}
+}
+
+func (r *equivalentResourceRegistry) EquivalentResourcesFor(resource schema.GroupVersionResource, subresource string) []schema.GroupVersionResource {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+ return r.resources[r.keys[resource.GroupResource()]][subresource]
+}
+func (r *equivalentResourceRegistry) KindFor(resource schema.GroupVersionResource, subresource string) schema.GroupVersionKind {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+ return r.kinds[resource][subresource]
+}
+func (r *equivalentResourceRegistry) RegisterKindFor(resource schema.GroupVersionResource, subresource string, kind schema.GroupVersionKind) {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ if r.kinds == nil {
+ r.kinds = map[schema.GroupVersionResource]map[string]schema.GroupVersionKind{}
+ }
+ if r.kinds[resource] == nil {
+ r.kinds[resource] = map[string]schema.GroupVersionKind{}
+ }
+ r.kinds[resource][subresource] = kind
+
+ // get the shared key of the parent resource
+ key := ""
+ gr := resource.GroupResource()
+ if r.keyFunc != nil {
+ key = r.keyFunc(gr)
+ }
+ if key == "" {
+ key = gr.String()
+ }
+
+ if r.keys == nil {
+ r.keys = map[schema.GroupResource]string{}
+ }
+ r.keys[gr] = key
+
+ if r.resources == nil {
+ r.resources = map[string]map[string][]schema.GroupVersionResource{}
+ }
+ if r.resources[key] == nil {
+ r.resources[key] = map[string][]schema.GroupVersionResource{}
+ }
+ r.resources[key][subresource] = append(r.resources[key][subresource], resource)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go
index 5357628ad..28a61d5fb 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,17 +14,16 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto
-// DO NOT EDIT!
/*
- Package schema is a generated protocol buffer package.
+Package schema is a generated protocol buffer package.
- It is generated from these files:
- k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto
+It is generated from these files:
+ k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto
- It has these top-level messages:
+It has these top-level messages:
*/
package schema
@@ -48,18 +47,17 @@ func init() {
}
var fileDescriptorGenerated = []byte{
- // 202 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0xce, 0xaf, 0x4e, 0x04, 0x31,
- 0x10, 0xc7, 0xf1, 0xd6, 0x20, 0x90, 0xc8, 0x13, 0x23, 0x51, 0xd0, 0x11, 0x18, 0x34, 0x2f, 0x80,
- 0xc7, 0x75, 0xf7, 0x86, 0x6e, 0x53, 0xfa, 0x27, 0xed, 0x94, 0x04, 0xc7, 0x23, 0xf0, 0x58, 0x27,
- 0x4f, 0xae, 0x64, 0xcb, 0x8b, 0x90, 0xb4, 0x2b, 0x08, 0xc9, 0xb9, 0xfe, 0xd2, 0x7c, 0x26, 0xdf,
- 0xeb, 0x67, 0xf7, 0x58, 0x94, 0x8d, 0xe8, 0xea, 0x44, 0x39, 0x10, 0x53, 0xc1, 0x77, 0x0a, 0xc7,
- 0x98, 0x71, 0xff, 0xd0, 0xc9, 0x7a, 0x3d, 0x2f, 0x36, 0x50, 0xfe, 0xc0, 0xe4, 0x0c, 0xe6, 0x1a,
- 0xd8, 0x7a, 0xc2, 0x32, 0x2f, 0xe4, 0x35, 0x1a, 0x0a, 0x94, 0x35, 0xd3, 0x51, 0xa5, 0x1c, 0x39,
- 0xde, 0xdc, 0x0e, 0xa7, 0xfe, 0x3a, 0x95, 0x9c, 0x51, 0xbb, 0x53, 0xc3, 0x1d, 0xee, 0x8d, 0xe5,
- 0xa5, 0x4e, 0x6a, 0x8e, 0x1e, 0x4d, 0x34, 0x11, 0x3b, 0x9f, 0xea, 0x6b, 0x5f, 0x7d, 0xf4, 0xd7,
- 0x38, 0x7b, 0x78, 0xb8, 0x94, 0x53, 0xd9, 0xbe, 0xa1, 0x0d, 0x5c, 0x38, 0xff, 0x6f, 0x79, 0xba,
- 0x3b, 0x6d, 0x20, 0xce, 0x1b, 0x88, 0x75, 0x03, 0xf1, 0xd9, 0x40, 0x9e, 0x1a, 0xc8, 0x73, 0x03,
- 0xb9, 0x36, 0x90, 0xdf, 0x0d, 0xe4, 0xd7, 0x0f, 0x88, 0x97, 0xab, 0x51, 0xf4, 0x1b, 0x00, 0x00,
- 0xff, 0xff, 0xfd, 0x59, 0x57, 0x93, 0x0b, 0x01, 0x00, 0x00,
+ // 185 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0xcc, 0xaf, 0x6e, 0xc3, 0x30,
+ 0x10, 0xc7, 0x71, 0x9b, 0x0c, 0x0c, 0x0e, 0x0e, 0x1c, 0x1c, 0xda, 0x7c, 0x74, 0xb8, 0x2f, 0x50,
+ 0x5e, 0xe6, 0x24, 0x57, 0xc7, 0xb2, 0xfc, 0x47, 0x8e, 0x5d, 0xa9, 0xac, 0x8f, 0xd0, 0xc7, 0x0a,
+ 0x0c, 0x0c, 0x6c, 0xdc, 0x17, 0xa9, 0x64, 0x07, 0x94, 0xdd, 0x4f, 0xa7, 0xcf, 0xf7, 0xf3, 0x68,
+ 0xfe, 0x27, 0xa1, 0x3d, 0x9a, 0xdc, 0x51, 0x74, 0x94, 0x68, 0xc2, 0x0b, 0xb9, 0xc1, 0x47, 0xdc,
+ 0x1f, 0x32, 0x68, 0x2b, 0xfb, 0x51, 0x3b, 0x8a, 0x57, 0x0c, 0x46, 0x61, 0xcc, 0x2e, 0x69, 0x4b,
+ 0x38, 0xf5, 0x23, 0x59, 0x89, 0x8a, 0x1c, 0x45, 0x99, 0x68, 0x10, 0x21, 0xfa, 0xe4, 0xbf, 0x7e,
+ 0x9a, 0x13, 0xef, 0x4e, 0x04, 0xa3, 0xc4, 0xee, 0x44, 0x73, 0xdf, 0x7f, 0x4a, 0xa7, 0x31, 0x77,
+ 0xa2, 0xf7, 0x16, 0x95, 0x57, 0x1e, 0x2b, 0xef, 0xf2, 0xb9, 0xae, 0x3a, 0xea, 0xd5, 0xb2, 0x87,
+ 0xdf, 0x79, 0x03, 0xb6, 0x6c, 0xc0, 0xd6, 0x0d, 0xd8, 0xad, 0x00, 0x9f, 0x0b, 0xf0, 0xa5, 0x00,
+ 0x5f, 0x0b, 0xf0, 0x47, 0x01, 0x7e, 0x7f, 0x02, 0x3b, 0x7d, 0xb4, 0xf8, 0x2b, 0x00, 0x00, 0xff,
+ 0xff, 0xba, 0x7e, 0x65, 0xf4, 0xd6, 0x00, 0x00, 0x00,
}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto
index 50c2f2a63..5aeeaa100 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -21,8 +21,6 @@ syntax = 'proto2';
package k8s.io.apimachinery.pkg.runtime.schema;
-import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
-
// Package-wide variables from generator "generated".
option go_package = "schema";
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go
index da642fa73..4c67ed598 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go
@@ -66,7 +66,7 @@ func (gr GroupResource) Empty() bool {
return len(gr.Group) == 0 && len(gr.Resource) == 0
}
-func (gr *GroupResource) String() string {
+func (gr GroupResource) String() string {
if len(gr.Group) == 0 {
return gr.Resource
}
@@ -85,11 +85,10 @@ func ParseGroupKind(gk string) GroupKind {
// ParseGroupResource turns "resource.group" string into a GroupResource struct. Empty strings are allowed
// for each field.
func ParseGroupResource(gr string) GroupResource {
- if i := strings.Index(gr, "."); i == -1 {
- return GroupResource{Resource: gr}
- } else {
+ if i := strings.Index(gr, "."); i >= 0 {
return GroupResource{Group: gr[i+1:], Resource: gr[:i]}
}
+ return GroupResource{Resource: gr}
}
// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion
@@ -112,7 +111,7 @@ func (gvr GroupVersionResource) GroupVersion() GroupVersion {
return GroupVersion{Group: gvr.Group, Version: gvr.Version}
}
-func (gvr *GroupVersionResource) String() string {
+func (gvr GroupVersionResource) String() string {
return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "")
}
@@ -131,7 +130,7 @@ func (gk GroupKind) WithVersion(version string) GroupVersionKind {
return GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind}
}
-func (gk *GroupKind) String() string {
+func (gk GroupKind) String() string {
if len(gk.Group) == 0 {
return gk.Kind
}
@@ -282,8 +281,8 @@ func bestMatch(kinds []GroupVersionKind, targets []GroupVersionKind) GroupVersio
// ToAPIVersionAndKind is a convenience method for satisfying runtime.Object on types that
// do not use TypeMeta.
-func (gvk *GroupVersionKind) ToAPIVersionAndKind() (string, string) {
- if gvk == nil {
+func (gvk GroupVersionKind) ToAPIVersionAndKind() (string, string) {
+ if gvk.Empty() {
return "", ""
}
return gvk.GroupVersion().String(), gvk.Kind
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
index 3d94a3041..fd37e293a 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
@@ -20,9 +20,13 @@ import (
"fmt"
"net/url"
"reflect"
+ "strings"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/naming"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/apimachinery/pkg/util/sets"
)
// Scheme defines methods for serializing and deserializing API objects, a type
@@ -59,18 +63,29 @@ type Scheme struct {
// Map from version and resource to the corresponding func to convert
// resource field labels in that version to internal version.
- fieldLabelConversionFuncs map[string]map[string]FieldLabelConversionFunc
+ fieldLabelConversionFuncs map[schema.GroupVersionKind]FieldLabelConversionFunc
// defaulterFuncs is an array of interfaces to be called with an object to provide defaulting
// the provided object must be a pointer.
defaulterFuncs map[reflect.Type]func(interface{})
// converter stores all registered conversion functions. It also has
- // default coverting behavior.
+ // default converting behavior.
converter *conversion.Converter
+
+ // versionPriority is a map of groups to ordered lists of versions for those groups indicating the
+ // default priorities of these versions as registered in the scheme
+ versionPriority map[string][]string
+
+ // observedVersions keeps track of the order we've seen versions during type registration
+ observedVersions []schema.GroupVersion
+
+ // schemeName is the name of this scheme. If you don't specify a name, the stack of the NewScheme caller will be used.
+ // This is useful for error reporting to indicate the origin of the scheme.
+ schemeName string
}
-// Function to convert a field selector to internal representation.
+// FieldLabelConversionFunc converts a field selector to internal representation.
type FieldLabelConversionFunc func(label, value string) (internalLabel, internalValue string, err error)
// NewScheme creates a new Scheme. This scheme is pluggable by default.
@@ -80,23 +95,19 @@ func NewScheme() *Scheme {
typeToGVK: map[reflect.Type][]schema.GroupVersionKind{},
unversionedTypes: map[reflect.Type]schema.GroupVersionKind{},
unversionedKinds: map[string]reflect.Type{},
- fieldLabelConversionFuncs: map[string]map[string]FieldLabelConversionFunc{},
+ fieldLabelConversionFuncs: map[schema.GroupVersionKind]FieldLabelConversionFunc{},
defaulterFuncs: map[reflect.Type]func(interface{}){},
+ versionPriority: map[string][]string{},
+ schemeName: naming.GetNameFromCallsite(internalPackages...),
}
s.converter = conversion.NewConverter(s.nameFunc)
- s.AddConversionFuncs(DefaultEmbeddedConversions()...)
+ utilruntime.Must(s.AddConversionFuncs(DefaultEmbeddedConversions()...))
// Enable map[string][]string conversions by default
- if err := s.AddConversionFuncs(DefaultStringConversions...); err != nil {
- panic(err)
- }
- if err := s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields); err != nil {
- panic(err)
- }
- if err := s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields); err != nil {
- panic(err)
- }
+ utilruntime.Must(s.AddConversionFuncs(DefaultStringConversions...))
+ utilruntime.Must(s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields))
+ utilruntime.Must(s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields))
return s
}
@@ -111,7 +122,7 @@ func (s *Scheme) nameFunc(t reflect.Type) string {
for _, gvk := range gvks {
internalGV := gvk.GroupVersion()
- internalGV.Version = "__internal" // this is hacky and maybe should be passed in
+ internalGV.Version = APIVersionInternal // this is hacky and maybe should be passed in
internalGVK := internalGV.WithKind(gvk.Kind)
if internalType, exists := s.gvkToType[internalGVK]; exists {
@@ -141,13 +152,14 @@ func (s *Scheme) Converter() *conversion.Converter {
// TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into
// every version with particular schemas. Resolve this method at that point.
func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Object) {
+ s.addObservedVersion(version)
s.AddKnownTypes(version, types...)
for _, obj := range types {
t := reflect.TypeOf(obj).Elem()
gvk := version.WithKind(t.Name())
s.unversionedTypes[t] = gvk
if old, ok := s.unversionedKinds[gvk.Kind]; ok && t != old {
- panic(fmt.Sprintf("%v.%v has already been registered as unversioned kind %q - kind name must be unique", old.PkgPath(), old.Name(), gvk))
+ panic(fmt.Sprintf("%v.%v has already been registered as unversioned kind %q - kind name must be unique in scheme %q", old.PkgPath(), old.Name(), gvk, s.schemeName))
}
s.unversionedKinds[gvk.Kind] = t
}
@@ -158,6 +170,7 @@ func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Objec
// the struct becomes the "kind" field when encoding. Version may not be empty - use the
// APIVersionInternal constant if you have a type that does not have a formal version.
func (s *Scheme) AddKnownTypes(gv schema.GroupVersion, types ...Object) {
+ s.addObservedVersion(gv)
for _, obj := range types {
t := reflect.TypeOf(obj)
if t.Kind() != reflect.Ptr {
@@ -173,6 +186,7 @@ func (s *Scheme) AddKnownTypes(gv schema.GroupVersion, types ...Object) {
// your structs. Version may not be empty - use the APIVersionInternal constant if you have a
// type that does not have a formal version.
func (s *Scheme) AddKnownTypeWithName(gvk schema.GroupVersionKind, obj Object) {
+ s.addObservedVersion(gvk.GroupVersion())
t := reflect.TypeOf(obj)
if len(gvk.Version) == 0 {
panic(fmt.Sprintf("version is required on all types: %s %v", gvk, t))
@@ -186,7 +200,7 @@ func (s *Scheme) AddKnownTypeWithName(gvk schema.GroupVersionKind, obj Object) {
}
if oldT, found := s.gvkToType[gvk]; found && oldT != t {
- panic(fmt.Sprintf("Double registration of different types for %v: old=%v.%v, new=%v.%v", gvk, oldT.PkgPath(), oldT.Name(), t.PkgPath(), t.Name()))
+ panic(fmt.Sprintf("Double registration of different types for %v: old=%v.%v, new=%v.%v in scheme %q", gvk, oldT.PkgPath(), oldT.Name(), t.PkgPath(), t.Name(), s.schemeName))
}
s.gvkToType[gvk] = t
@@ -241,7 +255,7 @@ func (s *Scheme) ObjectKinds(obj Object) ([]schema.GroupVersionKind, bool, error
gvks, ok := s.typeToGVK[t]
if !ok {
- return nil, false, NewNotRegisteredErrForType(t)
+ return nil, false, NewNotRegisteredErrForType(s.schemeName, t)
}
_, unversionedType := s.unversionedTypes[t]
@@ -279,15 +293,7 @@ func (s *Scheme) New(kind schema.GroupVersionKind) (Object, error) {
if t, exists := s.unversionedKinds[kind.Kind]; exists {
return reflect.New(t).Interface().(Object), nil
}
- return nil, NewNotRegisteredErrForKind(kind)
-}
-
-// AddGenericConversionFunc adds a function that accepts the ConversionFunc call pattern
-// (for two conversion types) to the converter. These functions are checked first during
-// a normal conversion, but are otherwise not called. Use AddConversionFuncs when registering
-// typed conversions.
-func (s *Scheme) AddGenericConversionFunc(fn conversion.GenericConversionFunc) {
- s.converter.AddGenericConversionFunc(fn)
+ return nil, NewNotRegisteredErrForKind(s.schemeName, kind)
}
// Log sets a logger on the scheme. For test purposes only
@@ -341,36 +347,27 @@ func (s *Scheme) AddConversionFuncs(conversionFuncs ...interface{}) error {
return nil
}
-// AddGeneratedConversionFuncs registers conversion functions that were
-// automatically generated.
-func (s *Scheme) AddGeneratedConversionFuncs(conversionFuncs ...interface{}) error {
- for _, f := range conversionFuncs {
- if err := s.converter.RegisterGeneratedConversionFunc(f); err != nil {
- return err
- }
- }
- return nil
+// AddConversionFunc registers a function that converts between a and b by passing objects of those
+// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce
+// any other guarantee.
+func (s *Scheme) AddConversionFunc(a, b interface{}, fn conversion.ConversionFunc) error {
+ return s.converter.RegisterUntypedConversionFunc(a, b, fn)
+}
+
+// AddGeneratedConversionFunc registers a function that converts between a and b by passing objects of those
+// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce
+// any other guarantee.
+func (s *Scheme) AddGeneratedConversionFunc(a, b interface{}, fn conversion.ConversionFunc) error {
+ return s.converter.RegisterGeneratedUntypedConversionFunc(a, b, fn)
}
// AddFieldLabelConversionFunc adds a conversion function to convert field selectors
// of the given kind from the given version to internal version representation.
-func (s *Scheme) AddFieldLabelConversionFunc(version, kind string, conversionFunc FieldLabelConversionFunc) error {
- if s.fieldLabelConversionFuncs[version] == nil {
- s.fieldLabelConversionFuncs[version] = map[string]FieldLabelConversionFunc{}
- }
-
- s.fieldLabelConversionFuncs[version][kind] = conversionFunc
+func (s *Scheme) AddFieldLabelConversionFunc(gvk schema.GroupVersionKind, conversionFunc FieldLabelConversionFunc) error {
+ s.fieldLabelConversionFuncs[gvk] = conversionFunc
return nil
}
-// AddStructFieldConversion allows you to specify a mechanical copy for a moved
-// or renamed struct field without writing an entire conversion function. See
-// the comment in conversion.Converter.SetStructFieldCopy for parameter details.
-// Call as many times as needed, even on the same fields.
-func (s *Scheme) AddStructFieldConversion(srcFieldType interface{}, srcFieldName string, destFieldType interface{}, destFieldName string) error {
- return s.converter.SetStructFieldCopy(srcFieldType, srcFieldName, destFieldType, destFieldName)
-}
-
// RegisterInputDefaults sets the provided field mapping function and field matching
// as the defaults for the provided input type. The fn may be nil, in which case no
// mapping will happen by default. Use this method to register a mechanism for handling
@@ -379,7 +376,7 @@ func (s *Scheme) RegisterInputDefaults(in interface{}, fn conversion.FieldMappin
return s.converter.RegisterInputDefaults(in, fn, defaultFlags)
}
-// AddTypeDefaultingFuncs registers a function that is passed a pointer to an
+// AddTypeDefaultingFunc registers a function that is passed a pointer to an
// object and can default fields on the object. These functions will be invoked
// when Default() is called. The function will never be called unless the
// defaulted object matches srcType. If this function is invoked twice with the
@@ -472,11 +469,8 @@ func (s *Scheme) Convert(in, out interface{}, context interface{}) error {
// ConvertFieldLabel alters the given field label and value for an kind field selector from
// versioned representation to an unversioned one or returns an error.
-func (s *Scheme) ConvertFieldLabel(version, kind, label, value string) (string, string, error) {
- if s.fieldLabelConversionFuncs[version] == nil {
- return DefaultMetaV1FieldSelectorConversion(label, value)
- }
- conversionFunc, ok := s.fieldLabelConversionFuncs[version][kind]
+func (s *Scheme) ConvertFieldLabel(gvk schema.GroupVersionKind, label, value string) (string, string, error) {
+ conversionFunc, ok := s.fieldLabelConversionFuncs[gvk]
if !ok {
return DefaultMetaV1FieldSelectorConversion(label, value)
}
@@ -527,7 +521,7 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) (
kinds, ok := s.typeToGVK[t]
if !ok || len(kinds) == 0 {
- return nil, NewNotRegisteredErrForType(t)
+ return nil, NewNotRegisteredErrForType(s.schemeName, t)
}
gvk, ok := target.KindForGroupVersionKinds(kinds)
@@ -540,7 +534,7 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) (
}
return copyAndSetTargetKind(copy, in, unversionedKind)
}
- return nil, NewNotRegisteredErrForTarget(t, target)
+ return nil, NewNotRegisteredErrForTarget(s.schemeName, t, target)
}
// target wants to use the existing type, set kind and return (no conversion necessary)
@@ -620,3 +614,141 @@ func setTargetKind(obj Object, kind schema.GroupVersionKind) {
}
obj.GetObjectKind().SetGroupVersionKind(kind)
}
+
+// SetVersionPriority allows specifying a precise order of priority. All specified versions must be in the same group,
+// and the specified order overwrites any previously specified order for this group
+func (s *Scheme) SetVersionPriority(versions ...schema.GroupVersion) error {
+ groups := sets.String{}
+ order := []string{}
+ for _, version := range versions {
+ if len(version.Version) == 0 || version.Version == APIVersionInternal {
+ return fmt.Errorf("internal versions cannot be prioritized: %v", version)
+ }
+
+ groups.Insert(version.Group)
+ order = append(order, version.Version)
+ }
+ if len(groups) != 1 {
+ return fmt.Errorf("must register versions for exactly one group: %v", strings.Join(groups.List(), ", "))
+ }
+
+ s.versionPriority[groups.List()[0]] = order
+ return nil
+}
+
+// PrioritizedVersionsForGroup returns versions for a single group in priority order
+func (s *Scheme) PrioritizedVersionsForGroup(group string) []schema.GroupVersion {
+ ret := []schema.GroupVersion{}
+ for _, version := range s.versionPriority[group] {
+ ret = append(ret, schema.GroupVersion{Group: group, Version: version})
+ }
+ for _, observedVersion := range s.observedVersions {
+ if observedVersion.Group != group {
+ continue
+ }
+ found := false
+ for _, existing := range ret {
+ if existing == observedVersion {
+ found = true
+ break
+ }
+ }
+ if !found {
+ ret = append(ret, observedVersion)
+ }
+ }
+
+ return ret
+}
+
+// PrioritizedVersionsAllGroups returns all known versions in their priority order. Groups are random, but
+// versions for a single group are prioritized
+func (s *Scheme) PrioritizedVersionsAllGroups() []schema.GroupVersion {
+ ret := []schema.GroupVersion{}
+ for group, versions := range s.versionPriority {
+ for _, version := range versions {
+ ret = append(ret, schema.GroupVersion{Group: group, Version: version})
+ }
+ }
+ for _, observedVersion := range s.observedVersions {
+ found := false
+ for _, existing := range ret {
+ if existing == observedVersion {
+ found = true
+ break
+ }
+ }
+ if !found {
+ ret = append(ret, observedVersion)
+ }
+ }
+ return ret
+}
+
+// PreferredVersionAllGroups returns the most preferred version for every group.
+// group ordering is random.
+func (s *Scheme) PreferredVersionAllGroups() []schema.GroupVersion {
+ ret := []schema.GroupVersion{}
+ for group, versions := range s.versionPriority {
+ for _, version := range versions {
+ ret = append(ret, schema.GroupVersion{Group: group, Version: version})
+ break
+ }
+ }
+ for _, observedVersion := range s.observedVersions {
+ found := false
+ for _, existing := range ret {
+ if existing.Group == observedVersion.Group {
+ found = true
+ break
+ }
+ }
+ if !found {
+ ret = append(ret, observedVersion)
+ }
+ }
+
+ return ret
+}
+
+// IsGroupRegistered returns true if types for the group have been registered with the scheme
+func (s *Scheme) IsGroupRegistered(group string) bool {
+ for _, observedVersion := range s.observedVersions {
+ if observedVersion.Group == group {
+ return true
+ }
+ }
+ return false
+}
+
+// IsVersionRegistered returns true if types for the version have been registered with the scheme
+func (s *Scheme) IsVersionRegistered(version schema.GroupVersion) bool {
+ for _, observedVersion := range s.observedVersions {
+ if observedVersion == version {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (s *Scheme) addObservedVersion(version schema.GroupVersion) {
+ if len(version.Version) == 0 || version.Version == APIVersionInternal {
+ return
+ }
+ for _, observedVersion := range s.observedVersions {
+ if observedVersion == version {
+ return
+ }
+ }
+
+ s.observedVersions = append(s.observedVersions, version)
+}
+
+func (s *Scheme) Name() string {
+ return s.schemeName
+}
+
+// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
+// call chains to NewReflector, so they'd be low entropy names for reflectors
+var internalPackages = []string{"k8s.io/apimachinery/pkg/runtime/scheme.go"}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
index 65f451124..01f56c987 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
@@ -17,9 +17,13 @@ limitations under the License.
package serializer
import (
+ "mime"
+ "strings"
+
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
+ "k8s.io/apimachinery/pkg/runtime/serializer/protobuf"
"k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
"k8s.io/apimachinery/pkg/runtime/serializer/versioning"
)
@@ -48,6 +52,8 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory) []seri
jsonSerializer := json.NewSerializer(mf, scheme, scheme, false)
jsonPrettySerializer := json.NewSerializer(mf, scheme, scheme, true)
yamlSerializer := json.NewYAMLSerializer(mf, scheme, scheme)
+ serializer := protobuf.NewSerializer(scheme, scheme)
+ raw := protobuf.NewRawSerializer(scheme, scheme)
serializers := []serializerType{
{
@@ -68,6 +74,15 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory) []seri
EncodesAsText: true,
Serializer: yamlSerializer,
},
+ {
+ AcceptContentTypes: []string{runtime.ContentTypeProtobuf},
+ ContentType: runtime.ContentTypeProtobuf,
+ FileExtensions: []string{"pb"},
+ Serializer: serializer,
+
+ Framer: protobuf.LengthDelimitedFramer,
+ StreamSerializer: raw,
+ },
}
for _, fn := range serializerExtensions {
@@ -120,6 +135,15 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec
Serializer: d.Serializer,
PrettySerializer: d.PrettySerializer,
}
+
+ mediaType, _, err := mime.ParseMediaType(info.MediaType)
+ if err != nil {
+ panic(err)
+ }
+ parts := strings.SplitN(mediaType, "/", 2)
+ info.MediaTypeType = parts[0]
+ info.MediaTypeSubType = parts[1]
+
if d.StreamSerializer != nil {
info.StreamSerializer = &runtime.StreamSerializerInfo{
Serializer: d.StreamSerializer,
@@ -148,6 +172,12 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec
}
}
+// WithoutConversion returns a NegotiatedSerializer that performs no conversion, even if the
+// caller requests it.
+func (f CodecFactory) WithoutConversion() runtime.NegotiatedSerializer {
+ return WithoutConversionCodecFactory{f}
+}
+
// SupportedMediaTypes returns the RFC2046 media types that this factory has serializers for.
func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo {
return f.accepts
@@ -215,23 +245,30 @@ func (f CodecFactory) EncoderForVersion(encoder runtime.Encoder, gv runtime.Grou
return f.CodecForVersions(encoder, nil, gv, nil)
}
-// DirectCodecFactory provides methods for retrieving "DirectCodec"s, which do not do conversion.
-type DirectCodecFactory struct {
+// WithoutConversionCodecFactory is a CodecFactory that will explicitly ignore requests to perform conversion.
+// This wrapper is used while code migrates away from using conversion (such as external clients) and in the future
+// will be unnecessary when we change the signature of NegotiatedSerializer.
+type WithoutConversionCodecFactory struct {
CodecFactory
}
-// EncoderForVersion returns an encoder that does not do conversion.
-func (f DirectCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder {
- return versioning.DirectEncoder{
+// EncoderForVersion returns an encoder that does not do conversion, but does set the group version kind of the object
+// when serialized.
+func (f WithoutConversionCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder {
+ return runtime.WithVersionEncoder{
Version: version,
Encoder: serializer,
ObjectTyper: f.CodecFactory.scheme,
}
}
-// DecoderToVersion returns an decoder that does not do conversion. gv is ignored.
-func (f DirectCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder {
- return versioning.DirectDecoder{
+// DecoderToVersion returns an decoder that does not do conversion.
+func (f WithoutConversionCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder {
+ return runtime.WithoutVersionDecoder{
Decoder: serializer,
}
}
+
+// DirectCodecFactory was renamed to WithoutConversionCodecFactory in 1.15.
+// TODO: remove in 1.16.
+type DirectCodecFactory = WithoutConversionCodecFactory
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
index 111f056c8..69ada8ecf 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
@@ -22,8 +22,9 @@ import (
"strconv"
"unsafe"
- "github.com/ghodss/yaml"
jsoniter "github.com/json-iterator/go"
+ "github.com/modern-go/reflect2"
+ "sigs.k8s.io/yaml"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -34,83 +35,134 @@ import (
// NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer
// is not nil, the object has the group, version, and kind fields set.
+// Deprecated: use NewSerializerWithOptions instead.
func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer {
- return &Serializer{
- meta: meta,
- creater: creater,
- typer: typer,
- yaml: false,
- pretty: pretty,
- }
+ return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false})
}
// NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer
// is not nil, the object has the group, version, and kind fields set. This serializer supports only the subset of YAML that
// matches JSON, and will error if constructs are used that do not serialize to JSON.
+// Deprecated: use NewSerializerWithOptions instead.
func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer {
+ return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false})
+}
+
+// NewSerializerWithOptions creates a JSON/YAML serializer that handles encoding versioned objects into the proper JSON/YAML
+// form. If typer is not nil, the object has the group, version, and kind fields set. Options are copied into the Serializer
+// and are immutable.
+func NewSerializerWithOptions(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, options SerializerOptions) *Serializer {
return &Serializer{
meta: meta,
creater: creater,
typer: typer,
- yaml: true,
+ options: options,
}
}
+// SerializerOptions holds the options which are used to configure a JSON/YAML serializer.
+// example:
+// (1) To configure a JSON serializer, set `Yaml` to `false`.
+// (2) To configure a YAML serializer, set `Yaml` to `true`.
+// (3) To configure a strict serializer that can return strictDecodingError, set `Strict` to `true`.
+type SerializerOptions struct {
+ // Yaml: configures the Serializer to work with JSON(false) or YAML(true).
+ // When `Yaml` is enabled, this serializer only supports the subset of YAML that
+ // matches JSON, and will error if constructs are used that do not serialize to JSON.
+ Yaml bool
+
+ // Pretty: configures a JSON enabled Serializer(`Yaml: false`) to produce human-readable output.
+ // This option is silently ignored when `Yaml` is `true`.
+ Pretty bool
+
+ // Strict: configures the Serializer to return strictDecodingError's when duplicate fields are present decoding JSON or YAML.
+ // Note that enabling this option is not as performant as the non-strict variant, and should not be used in fast paths.
+ Strict bool
+}
+
type Serializer struct {
meta MetaFactory
+ options SerializerOptions
creater runtime.ObjectCreater
typer runtime.ObjectTyper
- yaml bool
- pretty bool
}
// Serializer implements Serializer
var _ runtime.Serializer = &Serializer{}
var _ recognizer.RecognizingDecoder = &Serializer{}
-func init() {
- // Force jsoniter to decode number to interface{} via ints, if possible.
- decodeNumberAsInt64IfPossible := func(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
- switch iter.WhatIsNext() {
- case jsoniter.NumberValue:
- var number json.Number
- iter.ReadVal(&number)
- u64, err := strconv.ParseUint(string(number), 10, 64)
- if err == nil {
- *(*interface{})(ptr) = u64
- return
- }
- i64, err := strconv.ParseInt(string(number), 10, 64)
- if err == nil {
- *(*interface{})(ptr) = i64
- return
- }
- f64, err := strconv.ParseFloat(string(number), 64)
- if err == nil {
- *(*interface{})(ptr) = f64
- return
- }
- // Not much we can do here.
- default:
- *(*interface{})(ptr) = iter.Read()
+type customNumberExtension struct {
+ jsoniter.DummyExtension
+}
+
+func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder {
+ if typ.String() == "interface {}" {
+ return customNumberDecoder{}
+ }
+ return nil
+}
+
+type customNumberDecoder struct {
+}
+
+func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+ switch iter.WhatIsNext() {
+ case jsoniter.NumberValue:
+ var number jsoniter.Number
+ iter.ReadVal(&number)
+ i64, err := strconv.ParseInt(string(number), 10, 64)
+ if err == nil {
+ *(*interface{})(ptr) = i64
+ return
+ }
+ f64, err := strconv.ParseFloat(string(number), 64)
+ if err == nil {
+ *(*interface{})(ptr) = f64
+ return
}
+ iter.ReportError("DecodeNumber", err.Error())
+ default:
+ *(*interface{})(ptr) = iter.Read()
}
- jsoniter.RegisterTypeDecoderFunc("interface {}", decodeNumberAsInt64IfPossible)
}
// CaseSensitiveJsonIterator returns a jsoniterator API that's configured to be
// case-sensitive when unmarshalling, and otherwise compatible with
// the encoding/json standard library.
func CaseSensitiveJsonIterator() jsoniter.API {
- return jsoniter.Config{
+ config := jsoniter.Config{
+ EscapeHTML: true,
+ SortMapKeys: true,
+ ValidateJsonRawMessage: true,
+ CaseSensitive: true,
+ }.Froze()
+ // Force jsoniter to decode number to interface{} via int64/float64, if possible.
+ config.RegisterExtension(&customNumberExtension{})
+ return config
+}
+
+// StrictCaseSensitiveJsonIterator returns a jsoniterator API that's configured to be
+// case-sensitive, but also disallows unknown fields when unmarshalling. It is compatible with
+// the encoding/json standard library.
+func StrictCaseSensitiveJsonIterator() jsoniter.API {
+ config := jsoniter.Config{
EscapeHTML: true,
SortMapKeys: true,
ValidateJsonRawMessage: true,
CaseSensitive: true,
+ DisallowUnknownFields: true,
}.Froze()
+ // Force jsoniter to decode number to interface{} via int64/float64, if possible.
+ config.RegisterExtension(&customNumberExtension{})
+ return config
}
+// Private copies of jsoniter to try to shield against possible mutations
+// from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them
+// in some other library will mess with every usage of the jsoniter library in the whole program.
+// See https://github.com/json-iterator/go/issues/265
var caseSensitiveJsonIterator = CaseSensitiveJsonIterator()
+var strictCaseSensitiveJsonIterator = StrictCaseSensitiveJsonIterator()
// gvkWithDefaults returns group kind and version defaulting from provided default
func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind {
@@ -147,7 +199,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
}
data := originalData
- if s.yaml {
+ if s.options.Yaml {
altered, err := yaml.YAMLToJSON(data)
if err != nil {
return nil, nil, err
@@ -203,12 +255,38 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
if err := caseSensitiveJsonIterator.Unmarshal(data, obj); err != nil {
return nil, actual, err
}
+
+ // If the deserializer is non-strict, return successfully here.
+ if !s.options.Strict {
+ return obj, actual, nil
+ }
+
+ // In strict mode pass the data trough the YAMLToJSONStrict converter.
+ // This is done to catch duplicate fields regardless of encoding (JSON or YAML). For JSON data,
+ // the output would equal the input, unless there is a parsing error such as duplicate fields.
+ // As we know this was successful in the non-strict case, the only error that may be returned here
+ // is because of the newly-added strictness. hence we know we can return the typed strictDecoderError
+ // the actual error is that the object contains duplicate fields.
+ altered, err := yaml.YAMLToJSONStrict(originalData)
+ if err != nil {
+ return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData))
+ }
+ // As performance is not an issue for now for the strict deserializer (one has regardless to do
+ // the unmarshal twice), we take the sanitized, altered data that is guaranteed to have no duplicated
+ // fields, and unmarshal this into a copy of the already-populated obj. Any error that occurs here is
+ // due to that a matching field doesn't exist in the object. hence we can return a typed strictDecoderError,
+ // the actual error is that the object contains unknown field.
+ strictObj := obj.DeepCopyObject()
+ if err := strictCaseSensitiveJsonIterator.Unmarshal(altered, strictObj); err != nil {
+ return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData))
+ }
+ // Always return the same object as the non-strict serializer to avoid any deviations.
return obj, actual, nil
}
// Encode serializes the provided object to the given writer.
func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
- if s.yaml {
+ if s.options.Yaml {
json, err := caseSensitiveJsonIterator.Marshal(obj)
if err != nil {
return err
@@ -221,7 +299,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
return err
}
- if s.pretty {
+ if s.options.Pretty {
data, err := caseSensitiveJsonIterator.MarshalIndent(obj, "", " ")
if err != nil {
return err
@@ -235,7 +313,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
// RecognizesData implements the RecognizingDecoder interface.
func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) {
- if s.yaml {
+ if s.options.Yaml {
// we could potentially look for '---'
return false, true, nil
}
@@ -260,7 +338,7 @@ func (jsonFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser {
return framer.NewJSONFramedReader(r)
}
-// Framer is the default JSON framing behavior, with newlines delimiting individual objects.
+// YAMLFramer is the default JSON framing behavior, with newlines delimiting individual objects.
var YAMLFramer = yamlFramer{}
type yamlFramer struct{}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
index 8d4ea7118..8af889d35 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
@@ -20,10 +20,12 @@ import (
"bytes"
"fmt"
"io"
+ "net/http"
"reflect"
"github.com/gogo/protobuf/proto"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
@@ -50,6 +52,15 @@ func (e errNotMarshalable) Error() string {
return fmt.Sprintf("object %v does not implement the protobuf marshalling interface and cannot be encoded to a protobuf message", e.t)
}
+func (e errNotMarshalable) Status() metav1.Status {
+ return metav1.Status{
+ Status: metav1.StatusFailure,
+ Code: http.StatusNotAcceptable,
+ Reason: metav1.StatusReason("NotAcceptable"),
+ Message: e.Error(),
+ }
+}
+
func IsNotMarshalable(err error) bool {
_, ok := err.(errNotMarshalable)
return err != nil && ok
@@ -58,22 +69,18 @@ func IsNotMarshalable(err error) bool {
// NewSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer
// is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written
// as-is (any type info passed with the object will be used).
-//
-// This encoding scheme is experimental, and is subject to change at any time.
-func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *Serializer {
+func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer {
return &Serializer{
- prefix: protoEncodingPrefix,
- creater: creater,
- typer: typer,
- contentType: defaultContentType,
+ prefix: protoEncodingPrefix,
+ creater: creater,
+ typer: typer,
}
}
type Serializer struct {
- prefix []byte
- creater runtime.ObjectCreater
- typer runtime.ObjectTyper
- contentType string
+ prefix []byte
+ creater runtime.ObjectCreater
+ typer runtime.ObjectTyper
}
var _ runtime.Serializer = &Serializer{}
@@ -127,7 +134,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil {
*intoUnknown = unk
if ok, _, _ := s.RecognizesData(bytes.NewBuffer(unk.Raw)); ok {
- intoUnknown.ContentType = s.contentType
+ intoUnknown.ContentType = runtime.ContentTypeProtobuf
}
return intoUnknown, &actual, nil
}
@@ -292,20 +299,18 @@ func estimateUnknownSize(unk *runtime.Unknown, byteSize uint64) uint64 {
// encoded object, and thus is not self describing (callers must know what type is being described in order to decode).
//
// This encoding scheme is experimental, and is subject to change at any time.
-func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *RawSerializer {
+func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *RawSerializer {
return &RawSerializer{
- creater: creater,
- typer: typer,
- contentType: defaultContentType,
+ creater: creater,
+ typer: typer,
}
}
// RawSerializer encodes and decodes objects without adding a runtime.Unknown wrapper (objects are encoded without identifying
// type).
type RawSerializer struct {
- creater runtime.ObjectCreater
- typer runtime.ObjectTyper
- contentType string
+ creater runtime.ObjectCreater
+ typer runtime.ObjectTyper
}
var _ runtime.Serializer = &RawSerializer{}
@@ -347,7 +352,7 @@ func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind
if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil {
intoUnknown.Raw = data
intoUnknown.ContentEncoding = ""
- intoUnknown.ContentType = s.contentType
+ intoUnknown.ContentType = runtime.ContentTypeProtobuf
intoUnknown.SetGroupVersionKind(*actual)
return intoUnknown, actual, nil
}
@@ -400,6 +405,9 @@ func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater,
if err := proto.Unmarshal(data, pb); err != nil {
return nil, actual, err
}
+ if actual != nil {
+ obj.GetObjectKind().SetGroupVersionKind(*actual)
+ }
return obj, actual, nil
}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go
deleted file mode 100644
index 545cf78df..000000000
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package serializer
-
-import (
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/serializer/protobuf"
-)
-
-const (
- // contentTypeProtobuf is the protobuf type exposed for Kubernetes. It is private to prevent others from
- // depending on it unintentionally.
- // TODO: potentially move to pkg/api (since it's part of the Kube public API) and pass it in to the
- // CodecFactory on initialization.
- contentTypeProtobuf = "application/vnd.kubernetes.protobuf"
-)
-
-func protobufSerializer(scheme *runtime.Scheme) (serializerType, bool) {
- serializer := protobuf.NewSerializer(scheme, scheme, contentTypeProtobuf)
- raw := protobuf.NewRawSerializer(scheme, scheme, contentTypeProtobuf)
- return serializerType{
- AcceptContentTypes: []string{contentTypeProtobuf},
- ContentType: contentTypeProtobuf,
- FileExtensions: []string{"pb"},
- Serializer: serializer,
-
- Framer: protobuf.LengthDelimitedFramer,
- StreamSerializer: raw,
- }, true
-}
-
-func init() {
- serializerExtensions = append(serializerExtensions, protobufSerializer)
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
index 91fd4ed4f..a60a7c041 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
@@ -64,7 +64,7 @@ func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder {
reader: r,
decoder: d,
buf: make([]byte, 1024),
- maxBytes: 1024 * 1024,
+ maxBytes: 16 * 1024 * 1024,
}
}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go
index b717fe8fe..a04a2e98b 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go
@@ -18,23 +18,13 @@ package versioning
import (
"io"
+ "reflect"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
-// NewCodecForScheme is a convenience method for callers that are using a scheme.
-func NewCodecForScheme(
- // TODO: I should be a scheme interface?
- scheme *runtime.Scheme,
- encoder runtime.Encoder,
- decoder runtime.Decoder,
- encodeVersion runtime.GroupVersioner,
- decodeVersion runtime.GroupVersioner,
-) runtime.Codec {
- return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, nil, encodeVersion, decodeVersion)
-}
-
// NewDefaultingCodecForScheme is a convenience method for callers that are using a scheme.
func NewDefaultingCodecForScheme(
// TODO: I should be a scheme interface?
@@ -44,7 +34,7 @@ func NewDefaultingCodecForScheme(
encodeVersion runtime.GroupVersioner,
decodeVersion runtime.GroupVersioner,
) runtime.Codec {
- return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, encodeVersion, decodeVersion)
+ return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, encodeVersion, decodeVersion, scheme.Name())
}
// NewCodec takes objects in their internal versions and converts them to external versions before
@@ -59,6 +49,7 @@ func NewCodec(
defaulter runtime.ObjectDefaulter,
encodeVersion runtime.GroupVersioner,
decodeVersion runtime.GroupVersioner,
+ originalSchemeName string,
) runtime.Codec {
internal := &codec{
encoder: encoder,
@@ -70,6 +61,8 @@ func NewCodec(
encodeVersion: encodeVersion,
decodeVersion: decodeVersion,
+
+ originalSchemeName: originalSchemeName,
}
return internal
}
@@ -84,6 +77,9 @@ type codec struct {
encodeVersion runtime.GroupVersioner
decodeVersion runtime.GroupVersioner
+
+ // originalSchemeName is optional, but when filled in it holds the name of the scheme from which this codec originates
+ originalSchemeName string
}
// Decode attempts a decode of the object, then tries to convert it to the internal version. If into is provided and the decoding is
@@ -95,26 +91,28 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru
into = versioned.Last()
}
- obj, gvk, err := c.decoder.Decode(data, defaultGVK, into)
+ // If the into object is unstructured and expresses an opinion about its group/version,
+ // create a new instance of the type so we always exercise the conversion path (skips short-circuiting on `into == obj`)
+ decodeInto := into
+ if into != nil {
+ if _, ok := into.(runtime.Unstructured); ok && !into.GetObjectKind().GroupVersionKind().GroupVersion().Empty() {
+ decodeInto = reflect.New(reflect.TypeOf(into).Elem()).Interface().(runtime.Object)
+ }
+ }
+
+ obj, gvk, err := c.decoder.Decode(data, defaultGVK, decodeInto)
if err != nil {
return nil, gvk, err
}
if d, ok := obj.(runtime.NestedObjectDecoder); ok {
- if err := d.DecodeNestedObjects(DirectDecoder{c.decoder}); err != nil {
+ if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil {
return nil, gvk, err
}
}
// if we specify a target, use generic conversion.
if into != nil {
- if into == obj {
- if isVersioned {
- return versioned, gvk, nil
- }
- return into, gvk, nil
- }
-
// perform defaulting if requested
if c.defaulter != nil {
// create a copy to ensure defaulting is not applied to the original versioned objects
@@ -128,6 +126,14 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru
}
}
+ // Short-circuit conversion if the into object is same object
+ if into == obj {
+ if isVersioned {
+ return versioned, gvk, nil
+ }
+ return into, gvk, nil
+ }
+
if err := c.convertor.Convert(obj, into, c.decodeVersion); err != nil {
return nil, gvk, err
}
@@ -166,9 +172,27 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru
// Encode ensures the provided object is output in the appropriate group and version, invoking
// conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is.
func (c *codec) Encode(obj runtime.Object, w io.Writer) error {
- switch obj.(type) {
- case *runtime.Unknown, runtime.Unstructured:
+ switch obj := obj.(type) {
+ case *runtime.Unknown:
return c.encoder.Encode(obj, w)
+ case runtime.Unstructured:
+ // An unstructured list can contain objects of multiple group version kinds. don't short-circuit just
+ // because the top-level type matches our desired destination type. actually send the object to the converter
+ // to give it a chance to convert the list items if needed.
+ if _, ok := obj.(*unstructured.UnstructuredList); !ok {
+ // avoid conversion roundtrip if GVK is the right one already or is empty (yes, this is a hack, but the old behaviour we rely on in kubectl)
+ objGVK := obj.GetObjectKind().GroupVersionKind()
+ if len(objGVK.Version) == 0 {
+ return c.encoder.Encode(obj, w)
+ }
+ targetGVK, ok := c.encodeVersion.KindForGroupVersionKinds([]schema.GroupVersionKind{objGVK})
+ if !ok {
+ return runtime.NewNotRegisteredGVKErrForTarget(c.originalSchemeName, objGVK, c.encodeVersion)
+ }
+ if targetGVK == objGVK {
+ return c.encoder.Encode(obj, w)
+ }
+ }
}
gvks, isUnversioned, err := c.typer.ObjectKinds(obj)
@@ -176,84 +200,41 @@ func (c *codec) Encode(obj runtime.Object, w io.Writer) error {
return err
}
+ objectKind := obj.GetObjectKind()
+ old := objectKind.GroupVersionKind()
+ // restore the old GVK after encoding
+ defer objectKind.SetGroupVersionKind(old)
+
if c.encodeVersion == nil || isUnversioned {
if e, ok := obj.(runtime.NestedObjectEncoder); ok {
- if err := e.EncodeNestedObjects(DirectEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil {
+ if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil {
return err
}
}
- objectKind := obj.GetObjectKind()
- old := objectKind.GroupVersionKind()
objectKind.SetGroupVersionKind(gvks[0])
- err = c.encoder.Encode(obj, w)
- objectKind.SetGroupVersionKind(old)
- return err
+ return c.encoder.Encode(obj, w)
}
// Perform a conversion if necessary
- objectKind := obj.GetObjectKind()
- old := objectKind.GroupVersionKind()
out, err := c.convertor.ConvertToVersion(obj, c.encodeVersion)
if err != nil {
return err
}
if e, ok := out.(runtime.NestedObjectEncoder); ok {
- if err := e.EncodeNestedObjects(DirectEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil {
+ if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil {
return err
}
}
// Conversion is responsible for setting the proper group, version, and kind onto the outgoing object
- err = c.encoder.Encode(out, w)
- // restore the old GVK, in case conversion returned the same object
- objectKind.SetGroupVersionKind(old)
- return err
+ return c.encoder.Encode(out, w)
}
-// DirectEncoder serializes an object and ensures the GVK is set.
-type DirectEncoder struct {
- Version runtime.GroupVersioner
- runtime.Encoder
- runtime.ObjectTyper
-}
+// DirectEncoder was moved and renamed to runtime.WithVersionEncoder in 1.15.
+// TODO: remove in 1.16.
+type DirectEncoder = runtime.WithVersionEncoder
-// Encode does not do conversion. It sets the gvk during serialization.
-func (e DirectEncoder) Encode(obj runtime.Object, stream io.Writer) error {
- gvks, _, err := e.ObjectTyper.ObjectKinds(obj)
- if err != nil {
- if runtime.IsNotRegisteredError(err) {
- return e.Encoder.Encode(obj, stream)
- }
- return err
- }
- kind := obj.GetObjectKind()
- oldGVK := kind.GroupVersionKind()
- gvk := gvks[0]
- if e.Version != nil {
- preferredGVK, ok := e.Version.KindForGroupVersionKinds(gvks)
- if ok {
- gvk = preferredGVK
- }
- }
- kind.SetGroupVersionKind(gvk)
- err = e.Encoder.Encode(obj, stream)
- kind.SetGroupVersionKind(oldGVK)
- return err
-}
-
-// DirectDecoder clears the group version kind of a deserialized object.
-type DirectDecoder struct {
- runtime.Decoder
-}
-
-// Decode does not do conversion. It removes the gvk during deserialization.
-func (d DirectDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
- obj, gvk, err := d.Decoder.Decode(data, defaults, into)
- if obj != nil {
- kind := obj.GetObjectKind()
- // clearing the gvk is just a convention of a codec
- kind.SetGroupVersionKind(schema.GroupVersionKind{})
- }
- return obj, gvk, err
-}
+// DirectDecoder was moved and renamed to runtime.WithoutVersionDecoder in 1.15.
+// TODO: remove in 1.16.
+type DirectDecoder = runtime.WithoutVersionDecoder
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go
index e4515d8ed..3d3ebe5f9 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go
@@ -41,7 +41,9 @@ type TypeMeta struct {
}
const (
- ContentTypeJSON string = "application/json"
+ ContentTypeJSON string = "application/json"
+ ContentTypeYAML string = "application/yaml"
+ ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf"
)
// RawExtension is used to hold extensions in external versions.
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go
index ba89cd235..8b9182f35 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go
@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -28,9 +28,7 @@ func (in *RawExtension) DeepCopyInto(out *RawExtension) {
*out = make([]byte, len(*in))
copy(*out, *in)
}
- if in.Object == nil {
- out.Object = nil
- } else {
+ if in.Object != nil {
out.Object = in.Object.DeepCopyObject()
}
return
@@ -83,9 +81,7 @@ func (in *VersionedObjects) DeepCopyInto(out *VersionedObjects) {
in, out := &in.Objects, &out.Objects
*out = make([]Object, len(*in))
for i := range *in {
- if (*in)[i] == nil {
- (*out)[i] = nil
- } else {
+ if (*in)[i] != nil {
(*out)[i] = (*in)[i].DeepCopyObject()
}
}
diff --git a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go
index 1e2130da0..88f0de36d 100644
--- a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go
+++ b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go
@@ -18,7 +18,6 @@ package types
import (
"fmt"
- "strings"
)
// NamespacedName comprises a resource name, with a mandatory namespace,
@@ -42,19 +41,3 @@ const (
func (n NamespacedName) String() string {
return fmt.Sprintf("%s%c%s", n.Namespace, Separator, n.Name)
}
-
-// NewNamespacedNameFromString parses the provided string and returns a NamespacedName.
-// The expected format is as per String() above.
-// If the input string is invalid, the returned NamespacedName has all empty string field values.
-// This allows a single-value return from this function, while still allowing error checks in the caller.
-// Note that an input string which does not include exactly one Separator is not a valid input (as it could never
-// have neem returned by String() )
-func NewNamespacedNameFromString(s string) NamespacedName {
- nn := NamespacedName{}
- result := strings.Split(s, string(Separator))
- if len(result) == 2 {
- nn.Namespace = result[0]
- nn.Name = result[1]
- }
- return nn
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/types/patch.go b/vendor/k8s.io/apimachinery/pkg/types/patch.go
index d522d1dbd..fe8ecaaff 100644
--- a/vendor/k8s.io/apimachinery/pkg/types/patch.go
+++ b/vendor/k8s.io/apimachinery/pkg/types/patch.go
@@ -25,4 +25,5 @@ const (
JSONPatchType PatchType = "application/json-patch+json"
MergePatchType PatchType = "application/merge-patch+json"
StrategicMergePatchType PatchType = "application/strategic-merge-patch+json"
+ ApplyPatchType PatchType = "application/apply-patch+yaml"
)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go
index c303a212a..9567f9006 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go
@@ -26,18 +26,12 @@ import (
type Clock interface {
Now() time.Time
Since(time.Time) time.Duration
- After(d time.Duration) <-chan time.Time
- NewTimer(d time.Duration) Timer
- Sleep(d time.Duration)
- Tick(d time.Duration) <-chan time.Time
+ After(time.Duration) <-chan time.Time
+ NewTimer(time.Duration) Timer
+ Sleep(time.Duration)
+ NewTicker(time.Duration) Ticker
}
-var (
- _ = Clock(RealClock{})
- _ = Clock(&FakeClock{})
- _ = Clock(&IntervalClock{})
-)
-
// RealClock really calls time.Now()
type RealClock struct{}
@@ -62,8 +56,10 @@ func (RealClock) NewTimer(d time.Duration) Timer {
}
}
-func (RealClock) Tick(d time.Duration) <-chan time.Time {
- return time.Tick(d)
+func (RealClock) NewTicker(d time.Duration) Ticker {
+ return &realTicker{
+ ticker: time.NewTicker(d),
+ }
}
func (RealClock) Sleep(d time.Duration) {
@@ -137,7 +133,7 @@ func (f *FakeClock) NewTimer(d time.Duration) Timer {
return timer
}
-func (f *FakeClock) Tick(d time.Duration) <-chan time.Time {
+func (f *FakeClock) NewTicker(d time.Duration) Ticker {
f.lock.Lock()
defer f.lock.Unlock()
tickTime := f.time.Add(d)
@@ -149,7 +145,9 @@ func (f *FakeClock) Tick(d time.Duration) <-chan time.Time {
destChan: ch,
})
- return ch
+ return &fakeTicker{
+ c: ch,
+ }
}
// Move clock by Duration, notify anyone that's called After, Tick, or NewTimer
@@ -242,8 +240,8 @@ func (*IntervalClock) NewTimer(d time.Duration) Timer {
// Unimplemented, will panic.
// TODO: make interval clock use FakeClock so this can be implemented.
-func (*IntervalClock) Tick(d time.Duration) <-chan time.Time {
- panic("IntervalClock doesn't implement Tick")
+func (*IntervalClock) NewTicker(d time.Duration) Ticker {
+ panic("IntervalClock doesn't implement NewTicker")
}
func (*IntervalClock) Sleep(d time.Duration) {
@@ -258,11 +256,6 @@ type Timer interface {
Reset(d time.Duration) bool
}
-var (
- _ = Timer(&realTimer{})
- _ = Timer(&fakeTimer{})
-)
-
// realTimer is backed by an actual time.Timer.
type realTimer struct {
timer *time.Timer
@@ -325,3 +318,31 @@ func (f *fakeTimer) Reset(d time.Duration) bool {
return active
}
+
+type Ticker interface {
+ C() <-chan time.Time
+ Stop()
+}
+
+type realTicker struct {
+ ticker *time.Ticker
+}
+
+func (t *realTicker) C() <-chan time.Time {
+ return t.ticker.C
+}
+
+func (t *realTicker) Stop() {
+ t.ticker.Stop()
+}
+
+type fakeTicker struct {
+ c <-chan time.Time
+}
+
+func (t *fakeTicker) C() <-chan time.Time {
+ return t.c
+}
+
+func (t *fakeTicker) Stop() {
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go
index 88e937679..62a73f34e 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go
@@ -19,6 +19,8 @@ package errors
import (
"errors"
"fmt"
+
+ "k8s.io/apimachinery/pkg/util/sets"
)
// MessageCountMap contains occurrence for each error message.
@@ -67,12 +69,38 @@ func (agg aggregate) Error() string {
if len(agg) == 1 {
return agg[0].Error()
}
- result := fmt.Sprintf("[%s", agg[0].Error())
- for i := 1; i < len(agg); i++ {
- result += fmt.Sprintf(", %s", agg[i].Error())
+ seenerrs := sets.NewString()
+ result := ""
+ agg.visit(func(err error) {
+ msg := err.Error()
+ if seenerrs.Has(msg) {
+ return
+ }
+ seenerrs.Insert(msg)
+ if len(seenerrs) > 1 {
+ result += ", "
+ }
+ result += msg
+ })
+ if len(seenerrs) == 1 {
+ return result
+ }
+ return "[" + result + "]"
+}
+
+func (agg aggregate) visit(f func(err error)) {
+ for _, err := range agg {
+ switch err := err.(type) {
+ case aggregate:
+ err.visit(f)
+ case Aggregate:
+ for _, nestedErr := range err.Errors() {
+ f(nestedErr)
+ }
+ default:
+ f(err)
+ }
}
- result += "]"
- return result
}
// Errors is part of the Aggregate interface.
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
index 7c9b791d4..50d9a366f 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
@@ -136,12 +136,12 @@ func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []strin
negotiatedProtocol := negotiateProtocol(clientProtocols, serverProtocols)
if len(negotiatedProtocol) == 0 {
- w.WriteHeader(http.StatusForbidden)
for i := range serverProtocols {
w.Header().Add(HeaderAcceptedProtocolVersions, serverProtocols[i])
}
- fmt.Fprintf(w, "unable to upgrade: unable to negotiate protocol: client supports %v, server accepts %v", clientProtocols, serverProtocols)
- return "", fmt.Errorf("unable to upgrade: unable to negotiate protocol: client supports %v, server supports %v", clientProtocols, serverProtocols)
+ err := fmt.Errorf("unable to upgrade: unable to negotiate protocol: client supports %v, server accepts %v", clientProtocols, serverProtocols)
+ http.Error(w, err.Error(), http.StatusForbidden)
+ return "", err
}
w.Header().Add(HeaderProtocolVersion, negotiatedProtocol)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go
index 3dc8e23ae..9d222faa8 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go
@@ -23,8 +23,8 @@ import (
"time"
"github.com/docker/spdystream"
- "github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/httpstream"
+ "k8s.io/klog"
)
// connection maintains state about a spdystream.Connection and its associated
@@ -128,7 +128,7 @@ func (c *connection) newSpdyStream(stream *spdystream.Stream) {
err := c.newStreamHandler(stream, replySent)
rejectStream := (err != nil)
if rejectStream {
- glog.Warningf("Stream rejected: %v", err)
+ klog.Warningf("Stream rejected: %v", err)
stream.Reset()
return
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
index d2d3ad8cb..2699597e7 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
@@ -19,6 +19,7 @@ package spdy
import (
"bufio"
"bytes"
+ "context"
"crypto/tls"
"encoding/base64"
"fmt"
@@ -66,6 +67,9 @@ type SpdyRoundTripper struct {
// followRedirects indicates if the round tripper should examine responses for redirects and
// follow them.
followRedirects bool
+ // requireSameHostRedirects restricts redirect following to only follow redirects to the same host
+ // as the original request.
+ requireSameHostRedirects bool
}
var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{}
@@ -74,14 +78,18 @@ var _ utilnet.Dialer = &SpdyRoundTripper{}
// NewRoundTripper creates a new SpdyRoundTripper that will use
// the specified tlsConfig.
-func NewRoundTripper(tlsConfig *tls.Config, followRedirects bool) httpstream.UpgradeRoundTripper {
- return NewSpdyRoundTripper(tlsConfig, followRedirects)
+func NewRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) httpstream.UpgradeRoundTripper {
+ return NewSpdyRoundTripper(tlsConfig, followRedirects, requireSameHostRedirects)
}
// NewSpdyRoundTripper creates a new SpdyRoundTripper that will use
// the specified tlsConfig. This function is mostly meant for unit tests.
-func NewSpdyRoundTripper(tlsConfig *tls.Config, followRedirects bool) *SpdyRoundTripper {
- return &SpdyRoundTripper{tlsConfig: tlsConfig, followRedirects: followRedirects}
+func NewSpdyRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) *SpdyRoundTripper {
+ return &SpdyRoundTripper{
+ tlsConfig: tlsConfig,
+ followRedirects: followRedirects,
+ requireSameHostRedirects: requireSameHostRedirects,
+ }
}
// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during
@@ -118,7 +126,7 @@ func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
}
if proxyURL == nil {
- return s.dialWithoutProxy(req.URL)
+ return s.dialWithoutProxy(req.Context(), req.URL)
}
// ensure we use a canonical host with proxyReq
@@ -136,7 +144,7 @@ func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
proxyReq.Header.Set("Proxy-Authorization", pa)
}
- proxyDialConn, err := s.dialWithoutProxy(proxyURL)
+ proxyDialConn, err := s.dialWithoutProxy(req.Context(), proxyURL)
if err != nil {
return nil, err
}
@@ -187,14 +195,15 @@ func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
}
// dialWithoutProxy dials the host specified by url, using TLS if appropriate.
-func (s *SpdyRoundTripper) dialWithoutProxy(url *url.URL) (net.Conn, error) {
+func (s *SpdyRoundTripper) dialWithoutProxy(ctx context.Context, url *url.URL) (net.Conn, error) {
dialAddr := netutil.CanonicalAddr(url)
if url.Scheme == "http" {
if s.Dialer == nil {
- return net.Dial("tcp", dialAddr)
+ var d net.Dialer
+ return d.DialContext(ctx, "tcp", dialAddr)
} else {
- return s.Dialer.Dial("tcp", dialAddr)
+ return s.Dialer.DialContext(ctx, "tcp", dialAddr)
}
}
@@ -255,7 +264,7 @@ func (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error)
)
if s.followRedirects {
- conn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, req.URL, header, req.Body, s)
+ conn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, req.URL, header, req.Body, s, s.requireSameHostRedirects)
} else {
clone := utilnet.CloneRequest(req)
clone.Header = header
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go
index 13353988f..045d214d2 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go
@@ -74,15 +74,15 @@ func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Reque
connectionHeader := strings.ToLower(req.Header.Get(httpstream.HeaderConnection))
upgradeHeader := strings.ToLower(req.Header.Get(httpstream.HeaderUpgrade))
if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {
- w.WriteHeader(http.StatusBadRequest)
- fmt.Fprintf(w, "unable to upgrade: missing upgrade headers in request: %#v", req.Header)
+ errorMsg := fmt.Sprintf("unable to upgrade: missing upgrade headers in request: %#v", req.Header)
+ http.Error(w, errorMsg, http.StatusBadRequest)
return nil
}
hijacker, ok := w.(http.Hijacker)
if !ok {
- w.WriteHeader(http.StatusInternalServerError)
- fmt.Fprintf(w, "unable to upgrade: unable to hijack response")
+ errorMsg := fmt.Sprintf("unable to upgrade: unable to hijack response")
+ http.Error(w, errorMsg, http.StatusInternalServerError)
return nil
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go
index 161e9a6f8..48dd7d9c5 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
-// DO NOT EDIT!
/*
Package intstr is a generated protocol buffer package.
@@ -81,24 +80,6 @@ func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
-func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
- dAtA[offset] = uint8(v)
- dAtA[offset+1] = uint8(v >> 8)
- dAtA[offset+2] = uint8(v >> 16)
- dAtA[offset+3] = uint8(v >> 24)
- dAtA[offset+4] = uint8(v >> 32)
- dAtA[offset+5] = uint8(v >> 40)
- dAtA[offset+6] = uint8(v >> 48)
- dAtA[offset+7] = uint8(v >> 56)
- return offset + 8
-}
-func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
- dAtA[offset] = uint8(v)
- dAtA[offset+1] = uint8(v >> 8)
- dAtA[offset+2] = uint8(v >> 16)
- dAtA[offset+3] = uint8(v >> 24)
- return offset + 4
-}
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
index 6819d468d..e79fb9e57 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -29,7 +29,7 @@ option go_package = "intstr";
// inner type. This allows you to have, for example, a JSON field that can
// accept a name or number.
// TODO: Rename to Int32OrString
-//
+//
// +protobuf=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:openapi-gen=true
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
index 231498ca0..5b26ed262 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
@@ -18,14 +18,15 @@ package intstr
import (
"encoding/json"
+ "errors"
"fmt"
"math"
"runtime/debug"
"strconv"
"strings"
- "github.com/golang/glog"
"github.com/google/gofuzz"
+ "k8s.io/klog"
)
// IntOrString is a type that can hold an int32 or a string. When used in
@@ -57,7 +58,7 @@ const (
// TODO: convert to (val int32)
func FromInt(val int) IntOrString {
if val > math.MaxInt32 || val < math.MinInt32 {
- glog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack())
+ klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack())
}
return IntOrString{Type: Int, IntVal: int32(val)}
}
@@ -142,7 +143,17 @@ func (intstr *IntOrString) Fuzz(c fuzz.Continue) {
}
}
+func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrString {
+ if intOrPercent == nil {
+ return &defaultValue
+ }
+ return intOrPercent
+}
+
func GetValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) {
+ if intOrPercent == nil {
+ return 0, errors.New("nil value for IntOrString")
+ }
value, isPercent, err := getIntOrPercentValue(intOrPercent)
if err != nil {
return 0, fmt.Errorf("invalid value for IntOrString: %v", err)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go b/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go
new file mode 100644
index 000000000..2965d5a8b
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go
@@ -0,0 +1,93 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package naming
+
+import (
+ "fmt"
+ "regexp"
+ goruntime "runtime"
+ "runtime/debug"
+ "strconv"
+ "strings"
+)
+
+// GetNameFromCallsite walks back through the call stack until we find a caller from outside of the ignoredPackages
+// it returns back a shortpath/filename:line to aid in identification of this reflector when it starts logging
+func GetNameFromCallsite(ignoredPackages ...string) string {
+ name := "????"
+ const maxStack = 10
+ for i := 1; i < maxStack; i++ {
+ _, file, line, ok := goruntime.Caller(i)
+ if !ok {
+ file, line, ok = extractStackCreator()
+ if !ok {
+ break
+ }
+ i += maxStack
+ }
+ if hasPackage(file, append(ignoredPackages, "/runtime/asm_")) {
+ continue
+ }
+
+ file = trimPackagePrefix(file)
+ name = fmt.Sprintf("%s:%d", file, line)
+ break
+ }
+ return name
+}
+
+// hasPackage returns true if the file is in one of the ignored packages.
+func hasPackage(file string, ignoredPackages []string) bool {
+ for _, ignoredPackage := range ignoredPackages {
+ if strings.Contains(file, ignoredPackage) {
+ return true
+ }
+ }
+ return false
+}
+
+// trimPackagePrefix reduces duplicate values off the front of a package name.
+func trimPackagePrefix(file string) string {
+ if l := strings.LastIndex(file, "/vendor/"); l >= 0 {
+ return file[l+len("/vendor/"):]
+ }
+ if l := strings.LastIndex(file, "/src/"); l >= 0 {
+ return file[l+5:]
+ }
+ if l := strings.LastIndex(file, "/pkg/"); l >= 0 {
+ return file[l+1:]
+ }
+ return file
+}
+
+var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[[:xdigit:]]+$`)
+
+// extractStackCreator retrieves the goroutine file and line that launched this stack. Returns false
+// if the creator cannot be located.
+// TODO: Go does not expose this via runtime https://github.com/golang/go/issues/11440
+func extractStackCreator() (string, int, bool) {
+ stack := debug.Stack()
+ matches := stackCreator.FindStringSubmatch(string(stack))
+ if matches == nil || len(matches) != 4 {
+ return "", 0, false
+ }
+ line, err := strconv.Atoi(matches[3])
+ if err != nil {
+ return "", 0, false
+ }
+ return matches[2], line, true
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go
index 76eb8b4fa..078f00d9b 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go
@@ -19,6 +19,7 @@ package net
import (
"bufio"
"bytes"
+ "context"
"crypto/tls"
"fmt"
"io"
@@ -30,8 +31,8 @@ import (
"strconv"
"strings"
- "github.com/golang/glog"
"golang.org/x/net/http2"
+ "k8s.io/klog"
)
// JoinPreservingTrailingSlash does a path.Join of the specified elements,
@@ -67,14 +68,17 @@ func IsProbableEOF(err error) bool {
if uerr, ok := err.(*url.Error); ok {
err = uerr.Err
}
+ msg := err.Error()
switch {
case err == io.EOF:
return true
- case err.Error() == "http: can't write HTTP request on broken connection":
+ case msg == "http: can't write HTTP request on broken connection":
return true
- case strings.Contains(err.Error(), "connection reset by peer"):
+ case strings.Contains(msg, "http2: server sent GOAWAY and closed the connection"):
return true
- case strings.Contains(strings.ToLower(err.Error()), "use of closed network connection"):
+ case strings.Contains(msg, "connection reset by peer"):
+ return true
+ case strings.Contains(strings.ToLower(msg), "use of closed network connection"):
return true
}
return false
@@ -90,8 +94,9 @@ func SetOldTransportDefaults(t *http.Transport) *http.Transport {
// ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY
t.Proxy = NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
}
- if t.Dial == nil {
- t.Dial = defaultTransport.Dial
+ // If no custom dialer is set, use the default context dialer
+ if t.DialContext == nil && t.Dial == nil {
+ t.DialContext = defaultTransport.DialContext
}
if t.TLSHandshakeTimeout == 0 {
t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout
@@ -105,10 +110,10 @@ func SetTransportDefaults(t *http.Transport) *http.Transport {
t = SetOldTransportDefaults(t)
// Allow clients to disable http2 if needed.
if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 {
- glog.Infof("HTTP2 has been explicitly disabled")
+ klog.Infof("HTTP2 has been explicitly disabled")
} else {
if err := http2.ConfigureTransport(t); err != nil {
- glog.Warningf("Transport failed http2 configuration: %v", err)
+ klog.Warningf("Transport failed http2 configuration: %v", err)
}
}
return t
@@ -119,7 +124,7 @@ type RoundTripperWrapper interface {
WrappedRoundTripper() http.RoundTripper
}
-type DialFunc func(net, addr string) (net.Conn, error)
+type DialFunc func(ctx context.Context, net, addr string) (net.Conn, error)
func DialerFor(transport http.RoundTripper) (DialFunc, error) {
if transport == nil {
@@ -128,7 +133,18 @@ func DialerFor(transport http.RoundTripper) (DialFunc, error) {
switch transport := transport.(type) {
case *http.Transport:
- return transport.Dial, nil
+ // transport.DialContext takes precedence over transport.Dial
+ if transport.DialContext != nil {
+ return transport.DialContext, nil
+ }
+ // adapt transport.Dial to the DialWithContext signature
+ if transport.Dial != nil {
+ return func(ctx context.Context, net, addr string) (net.Conn, error) {
+ return transport.Dial(net, addr)
+ }, nil
+ }
+ // otherwise return nil
+ return nil, nil
case RoundTripperWrapper:
return DialerFor(transport.WrappedRoundTripper())
default:
@@ -166,10 +182,8 @@ func FormatURL(scheme string, host string, port int, path string) *url.URL {
}
func GetHTTPClient(req *http.Request) string {
- if userAgent, ok := req.Header["User-Agent"]; ok {
- if len(userAgent) > 0 {
- return userAgent[0]
- }
+ if ua := req.UserAgent(); len(ua) != 0 {
+ return ua
}
return "unknown"
}
@@ -310,9 +324,10 @@ type Dialer interface {
// ConnectWithRedirects uses dialer to send req, following up to 10 redirects (relative to
// originalLocation). It returns the opened net.Conn and the raw response bytes.
-func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer) (net.Conn, []byte, error) {
+// If requireSameHostRedirects is true, only redirects to the same host are permitted.
+func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer, requireSameHostRedirects bool) (net.Conn, []byte, error) {
const (
- maxRedirects = 10
+ maxRedirects = 9 // Fail on the 10th redirect
maxResponseSize = 16384 // play it safe to allow the potential for lots of / large headers
)
@@ -356,7 +371,7 @@ redirectLoop:
resp, err := http.ReadResponse(respReader, nil)
if err != nil {
// Unable to read the backend response; let the client handle it.
- glog.Warningf("Error reading backend response: %v", err)
+ klog.Warningf("Error reading backend response: %v", err)
break redirectLoop
}
@@ -376,10 +391,6 @@ redirectLoop:
resp.Body.Close() // not used
- // Reset the connection.
- intermediateConn.Close()
- intermediateConn = nil
-
// Prepare to follow the redirect.
redirectStr := resp.Header.Get("Location")
if redirectStr == "" {
@@ -393,6 +404,15 @@ redirectLoop:
if err != nil {
return nil, nil, fmt.Errorf("malformed Location header: %v", err)
}
+
+ // Only follow redirects to the same host. Otherwise, propagate the redirect response back.
+ if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() {
+ break redirectLoop
+ }
+
+ // Reset the connection.
+ intermediateConn.Close()
+ intermediateConn = nil
}
connToReturn := intermediateConn
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go
index 42816bd70..daf5d2496 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go
@@ -26,7 +26,7 @@ import (
"strings"
- "github.com/golang/glog"
+ "k8s.io/klog"
)
type AddressFamily uint
@@ -53,6 +53,28 @@ type RouteFile struct {
parse func(input io.Reader) ([]Route, error)
}
+// noRoutesError can be returned by ChooseBindAddress() in case of no routes
+type noRoutesError struct {
+ message string
+}
+
+func (e noRoutesError) Error() string {
+ return e.message
+}
+
+// IsNoRoutesError checks if an error is of type noRoutesError
+func IsNoRoutesError(err error) bool {
+ if err == nil {
+ return false
+ }
+ switch err.(type) {
+ case noRoutesError:
+ return true
+ default:
+ return false
+ }
+}
+
var (
v4File = RouteFile{name: ipv4RouteFile, parse: getIPv4DefaultRoutes}
v6File = RouteFile{name: ipv6RouteFile, parse: getIPv6DefaultRoutes}
@@ -171,7 +193,7 @@ func isInterfaceUp(intf *net.Interface) bool {
return false
}
if intf.Flags&net.FlagUp != 0 {
- glog.V(4).Infof("Interface %v is up", intf.Name)
+ klog.V(4).Infof("Interface %v is up", intf.Name)
return true
}
return false
@@ -186,20 +208,20 @@ func isLoopbackOrPointToPoint(intf *net.Interface) bool {
func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error) {
if len(addrs) > 0 {
for i := range addrs {
- glog.V(4).Infof("Checking addr %s.", addrs[i].String())
+ klog.V(4).Infof("Checking addr %s.", addrs[i].String())
ip, _, err := net.ParseCIDR(addrs[i].String())
if err != nil {
return nil, err
}
if memberOf(ip, family) {
if ip.IsGlobalUnicast() {
- glog.V(4).Infof("IP found %v", ip)
+ klog.V(4).Infof("IP found %v", ip)
return ip, nil
} else {
- glog.V(4).Infof("Non-global unicast address found %v", ip)
+ klog.V(4).Infof("Non-global unicast address found %v", ip)
}
} else {
- glog.V(4).Infof("%v is not an IPv%d address", ip, int(family))
+ klog.V(4).Infof("%v is not an IPv%d address", ip, int(family))
}
}
@@ -219,13 +241,13 @@ func getIPFromInterface(intfName string, forFamily AddressFamily, nw networkInte
if err != nil {
return nil, err
}
- glog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs)
+ klog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs)
matchingIP, err := getMatchingGlobalIP(addrs, forFamily)
if err != nil {
return nil, err
}
if matchingIP != nil {
- glog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName)
+ klog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName)
return matchingIP, nil
}
}
@@ -253,14 +275,14 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) {
return nil, fmt.Errorf("no interfaces found on host.")
}
for _, family := range []AddressFamily{familyIPv4, familyIPv6} {
- glog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family))
+ klog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family))
for _, intf := range intfs {
if !isInterfaceUp(&intf) {
- glog.V(4).Infof("Skipping: down interface %q", intf.Name)
+ klog.V(4).Infof("Skipping: down interface %q", intf.Name)
continue
}
if isLoopbackOrPointToPoint(&intf) {
- glog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name)
+ klog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name)
continue
}
addrs, err := nw.Addrs(&intf)
@@ -268,7 +290,7 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) {
return nil, err
}
if len(addrs) == 0 {
- glog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name)
+ klog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name)
continue
}
for _, addr := range addrs {
@@ -277,15 +299,15 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) {
return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err)
}
if !memberOf(ip, family) {
- glog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name)
+ klog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name)
continue
}
// TODO: Decide if should open up to allow IPv6 LLAs in future.
if !ip.IsGlobalUnicast() {
- glog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name)
+ klog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name)
continue
}
- glog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name)
+ klog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name)
return ip, nil
}
}
@@ -347,7 +369,9 @@ func getAllDefaultRoutes() ([]Route, error) {
v6Routes, _ := v6File.extract()
routes = append(routes, v6Routes...)
if len(routes) == 0 {
- return nil, fmt.Errorf("No default routes.")
+ return nil, noRoutesError{
+ message: fmt.Sprintf("no default routes found in %q or %q", v4File.name, v6File.name),
+ }
}
return routes, nil
}
@@ -357,23 +381,23 @@ func getAllDefaultRoutes() ([]Route, error) {
// an IPv4 IP, and then will look at each IPv6 route for an IPv6 IP.
func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer) (net.IP, error) {
for _, family := range []AddressFamily{familyIPv4, familyIPv6} {
- glog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family))
+ klog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family))
for _, route := range routes {
if route.Family != family {
continue
}
- glog.V(4).Infof("Default route transits interface %q", route.Interface)
+ klog.V(4).Infof("Default route transits interface %q", route.Interface)
finalIP, err := getIPFromInterface(route.Interface, family, nw)
if err != nil {
return nil, err
}
if finalIP != nil {
- glog.V(4).Infof("Found active IP %v ", finalIP)
+ klog.V(4).Infof("Found active IP %v ", finalIP)
return finalIP, nil
}
}
}
- glog.V(4).Infof("No active IP found by looking at default routes")
+ klog.V(4).Infof("No active IP found by looking at default routes")
return nil, fmt.Errorf("unable to select an IP from default routes.")
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go
index 6a50e6186..7b6eca893 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go
@@ -43,14 +43,19 @@ func (pr PortRange) String() string {
return fmt.Sprintf("%d-%d", pr.Base, pr.Base+pr.Size-1)
}
-// Set parses a string of the form "min-max", inclusive at both ends, and
+// Set parses a string of the form "value", "min-max", or "min+offset", inclusive at both ends, and
// sets the PortRange from it. This is part of the flag.Value and pflag.Value
// interfaces.
func (pr *PortRange) Set(value string) error {
- value = strings.TrimSpace(value)
+ const (
+ SinglePortNotation = 1 << iota
+ HyphenNotation
+ PlusNotation
+ )
- // TODO: Accept "80" syntax
- // TODO: Accept "80+8" syntax
+ value = strings.TrimSpace(value)
+ hyphenIndex := strings.Index(value, "-")
+ plusIndex := strings.Index(value, "+")
if value == "" {
pr.Base = 0
@@ -58,20 +63,51 @@ func (pr *PortRange) Set(value string) error {
return nil
}
- hyphenIndex := strings.Index(value, "-")
- if hyphenIndex == -1 {
- return fmt.Errorf("expected hyphen in port range")
+ var err error
+ var low, high int
+ var notation int
+
+ if plusIndex == -1 && hyphenIndex == -1 {
+ notation |= SinglePortNotation
+ }
+ if hyphenIndex != -1 {
+ notation |= HyphenNotation
+ }
+ if plusIndex != -1 {
+ notation |= PlusNotation
}
- var err error
- var low int
- var high int
- low, err = strconv.Atoi(value[:hyphenIndex])
- if err == nil {
+ switch notation {
+ case SinglePortNotation:
+ var port int
+ port, err = strconv.Atoi(value)
+ if err != nil {
+ return err
+ }
+ low = port
+ high = port
+ case HyphenNotation:
+ low, err = strconv.Atoi(value[:hyphenIndex])
+ if err != nil {
+ return err
+ }
high, err = strconv.Atoi(value[hyphenIndex+1:])
- }
- if err != nil {
- return fmt.Errorf("unable to parse port range: %s: %v", value, err)
+ if err != nil {
+ return err
+ }
+ case PlusNotation:
+ var offset int
+ low, err = strconv.Atoi(value[:plusIndex])
+ if err != nil {
+ return err
+ }
+ offset, err = strconv.Atoi(value[plusIndex+1:])
+ if err != nil {
+ return err
+ }
+ high = low + offset
+ default:
+ return fmt.Errorf("unable to parse port range: %s", value)
}
if low > 65535 || high > 65535 {
diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
index 1e7d3ce0b..3c886f46c 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
@@ -22,7 +22,7 @@ import (
"sync"
"time"
- "github.com/golang/glog"
+ "k8s.io/klog"
)
var (
@@ -62,27 +62,18 @@ func HandleCrash(additionalHandlers ...func(interface{})) {
// logPanic logs the caller tree when a panic occurs.
func logPanic(r interface{}) {
- callers := getCallers(r)
+ // Same as stdlib http server code. Manually allocate stack trace buffer size
+ // to prevent excessively large logs
+ const size = 64 << 10
+ stacktrace := make([]byte, size)
+ stacktrace = stacktrace[:runtime.Stack(stacktrace, false)]
if _, ok := r.(string); ok {
- glog.Errorf("Observed a panic: %s\n%v", r, callers)
+ klog.Errorf("Observed a panic: %s\n%s", r, stacktrace)
} else {
- glog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers)
+ klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace)
}
}
-func getCallers(r interface{}) string {
- callers := ""
- for i := 0; true; i++ {
- _, file, line, ok := runtime.Caller(i)
- if !ok {
- break
- }
- callers = callers + fmt.Sprintf("%v:%v\n", file, line)
- }
-
- return callers
-}
-
// ErrorHandlers is a list of functions which will be invoked when an unreturnable
// error occurs.
// TODO(lavalamp): for testability, this and the below HandleError function
@@ -115,7 +106,7 @@ func HandleError(err error) {
// logError prints an error with the call stack of the location it was reported
func logError(err error) {
- glog.ErrorDepth(2, err)
+ klog.ErrorDepth(2, err)
}
type rudimentaryErrorBackoff struct {
@@ -132,9 +123,8 @@ func (r *rudimentaryErrorBackoff) OnError(error) {
r.lastErrorTimeLock.Lock()
defer r.lastErrorTimeLock.Unlock()
d := time.Since(r.lastErrorTime)
- if d < r.minPeriod && d >= 0 {
+ if d < r.minPeriod {
// If the time moves backwards for any reason, do nothing
- // TODO: remove check "d >= 0" after go 1.8 is no longer supported
time.Sleep(r.minPeriod - d)
}
r.lastErrorTime = time.Now()
@@ -156,12 +146,23 @@ func GetCaller() string {
// handlers to handle errors and panics the same way.
func RecoverFromPanic(err *error) {
if r := recover(); r != nil {
- callers := getCallers(r)
+ // Same as stdlib http server code. Manually allocate stack trace buffer size
+ // to prevent excessively large logs
+ const size = 64 << 10
+ stacktrace := make([]byte, size)
+ stacktrace = stacktrace[:runtime.Stack(stacktrace, false)]
*err = fmt.Errorf(
- "recovered from panic %q. (err=%v) Call stack:\n%v",
+ "recovered from panic %q. (err=%v) Call stack:\n%s",
r,
*err,
- callers)
+ stacktrace)
+ }
+}
+
+// Must panics on non-nil errors. Useful to handling programmer level errors.
+func Must(err error) {
+ if err != nil {
+ panic(err)
}
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go
index a460e4b1f..766f4501e 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go
@@ -1,5 +1,5 @@
/*
-Copyright 2017 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// This file was autogenerated by set-gen. Do not edit it manually!
+// Code generated by set-gen. DO NOT EDIT.
package sets
@@ -26,7 +26,7 @@ import (
// sets.Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption.
type Byte map[byte]Empty
-// New creates a Byte from a list of values.
+// NewByte creates a Byte from a list of values.
func NewByte(items ...byte) Byte {
ss := Byte{}
ss.Insert(items...)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go
index 28a6a7d5c..b152a0bf0 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go
@@ -1,5 +1,5 @@
/*
-Copyright 2017 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// This file was autogenerated by set-gen. Do not edit it manually!
+// Code generated by set-gen. DO NOT EDIT.
// Package sets has auto-generated set types.
package sets
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go
index cd22b953a..e11e622c5 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go
@@ -1,5 +1,5 @@
/*
-Copyright 2017 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// This file was autogenerated by set-gen. Do not edit it manually!
+// Code generated by set-gen. DO NOT EDIT.
package sets
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go
index 0614e9fb0..a0a513cd9 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go
@@ -1,5 +1,5 @@
/*
-Copyright 2017 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// This file was autogenerated by set-gen. Do not edit it manually!
+// Code generated by set-gen. DO NOT EDIT.
package sets
@@ -26,7 +26,7 @@ import (
// sets.Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption.
type Int map[int]Empty
-// New creates a Int from a list of values.
+// NewInt creates a Int from a list of values.
func NewInt(items ...int) Int {
ss := Int{}
ss.Insert(items...)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go
new file mode 100644
index 000000000..584eabc8b
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go
@@ -0,0 +1,203 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by set-gen. DO NOT EDIT.
+
+package sets
+
+import (
+ "reflect"
+ "sort"
+)
+
+// sets.Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption.
+type Int32 map[int32]Empty
+
+// NewInt32 creates a Int32 from a list of values.
+func NewInt32(items ...int32) Int32 {
+ ss := Int32{}
+ ss.Insert(items...)
+ return ss
+}
+
+// Int32KeySet creates a Int32 from a keys of a map[int32](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func Int32KeySet(theMap interface{}) Int32 {
+ v := reflect.ValueOf(theMap)
+ ret := Int32{}
+
+ for _, keyValue := range v.MapKeys() {
+ ret.Insert(keyValue.Interface().(int32))
+ }
+ return ret
+}
+
+// Insert adds items to the set.
+func (s Int32) Insert(items ...int32) {
+ for _, item := range items {
+ s[item] = Empty{}
+ }
+}
+
+// Delete removes all items from the set.
+func (s Int32) Delete(items ...int32) {
+ for _, item := range items {
+ delete(s, item)
+ }
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Int32) Has(item int32) bool {
+ _, contained := s[item]
+ return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Int32) HasAll(items ...int32) bool {
+ for _, item := range items {
+ if !s.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Int32) HasAny(items ...int32) bool {
+ for _, item := range items {
+ if s.Has(item) {
+ return true
+ }
+ }
+ return false
+}
+
+// Difference returns a set of objects that are not in s2
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s Int32) Difference(s2 Int32) Int32 {
+ result := NewInt32()
+ for key := range s {
+ if !s2.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Int32) Union(s2 Int32) Int32 {
+ result := NewInt32()
+ for key := range s1 {
+ result.Insert(key)
+ }
+ for key := range s2 {
+ result.Insert(key)
+ }
+ return result
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Int32) Intersection(s2 Int32) Int32 {
+ var walk, other Int32
+ result := NewInt32()
+ if s1.Len() < s2.Len() {
+ walk = s1
+ other = s2
+ } else {
+ walk = s2
+ other = s1
+ }
+ for key := range walk {
+ if other.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Int32) IsSuperset(s2 Int32) bool {
+ for item := range s2 {
+ if !s1.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Int32) Equal(s2 Int32) bool {
+ return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+type sortableSliceOfInt32 []int32
+
+func (s sortableSliceOfInt32) Len() int { return len(s) }
+func (s sortableSliceOfInt32) Less(i, j int) bool { return lessInt32(s[i], s[j]) }
+func (s sortableSliceOfInt32) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// List returns the contents as a sorted int32 slice.
+func (s Int32) List() []int32 {
+ res := make(sortableSliceOfInt32, 0, len(s))
+ for key := range s {
+ res = append(res, key)
+ }
+ sort.Sort(res)
+ return []int32(res)
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Int32) UnsortedList() []int32 {
+ res := make([]int32, 0, len(s))
+ for key := range s {
+ res = append(res, key)
+ }
+ return res
+}
+
+// Returns a single element from the set.
+func (s Int32) PopAny() (int32, bool) {
+ for key := range s {
+ s.Delete(key)
+ return key, true
+ }
+ var zeroValue int32
+ return zeroValue, false
+}
+
+// Len returns the size of the set.
+func (s Int32) Len() int {
+ return len(s)
+}
+
+func lessInt32(lhs, rhs int32) bool {
+ return lhs < rhs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go
index 82e1ba782..9ca9af0c5 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go
@@ -1,5 +1,5 @@
/*
-Copyright 2017 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// This file was autogenerated by set-gen. Do not edit it manually!
+// Code generated by set-gen. DO NOT EDIT.
package sets
@@ -26,7 +26,7 @@ import (
// sets.Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption.
type Int64 map[int64]Empty
-// New creates a Int64 from a list of values.
+// NewInt64 creates a Int64 from a list of values.
func NewInt64(items ...int64) Int64 {
ss := Int64{}
ss.Insert(items...)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go
index baef7a6a2..ba00ad7df 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go
@@ -1,5 +1,5 @@
/*
-Copyright 2017 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// This file was autogenerated by set-gen. Do not edit it manually!
+// Code generated by set-gen. DO NOT EDIT.
package sets
@@ -26,7 +26,7 @@ import (
// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption.
type String map[string]Empty
-// New creates a String from a list of values.
+// NewString creates a String from a list of values.
func NewString(items ...string) String {
ss := String{}
ss.Insert(items...)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
index 31705dee3..4767fd1dd 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
@@ -48,7 +48,7 @@ func (v *Error) ErrorBody() string {
var s string
switch v.Type {
case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal:
- s = fmt.Sprintf("%s", v.Type)
+ s = v.Type.String()
default:
value := v.BadValue
valueType := reflect.TypeOf(value)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
index 7da6a17d9..2dd99992d 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
@@ -21,6 +21,7 @@ import (
"math"
"net"
"regexp"
+ "strconv"
"strings"
"k8s.io/apimachinery/pkg/util/validation/field"
@@ -86,6 +87,8 @@ func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList {
const labelValueFmt string = "(" + qualifiedNameFmt + ")?"
const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
+
+// LabelValueMaxLength is a label's max length
const LabelValueMaxLength int = 63
var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$")
@@ -106,6 +109,8 @@ func IsValidLabelValue(value string) []string {
const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?"
const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character"
+
+// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123)
const DNS1123LabelMaxLength int = 63
var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$")
@@ -125,6 +130,8 @@ func IsDNS1123Label(value string) []string {
const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*"
const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character"
+
+// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123)
const DNS1123SubdomainMaxLength int = 253
var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$")
@@ -144,6 +151,8 @@ func IsDNS1123Subdomain(value string) []string {
const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?"
const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character"
+
+// DNS1035LabelMaxLength is a label's max length in DNS (RFC 1035)
const DNS1035LabelMaxLength int = 63
var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$")
@@ -281,6 +290,7 @@ const percentErrMsg string = "a valid percent string must be a numeric string fo
var percentRegexp = regexp.MustCompile("^" + percentFmt + "$")
+// IsValidPercent checks that string is in the form of a percentage
func IsValidPercent(percent string) []string {
if !percentRegexp.MatchString(percent) {
return []string{RegexError(percentErrMsg, percentFmt, "1%", "93%")}
@@ -389,3 +399,18 @@ func hasChDirPrefix(value string) []string {
}
return errs
}
+
+// IsValidSocketAddr checks that string represents a valid socket address
+// as defined in RFC 789. (e.g 0.0.0.0:10254 or [::]:10254))
+func IsValidSocketAddr(value string) []string {
+ var errs []string
+ ip, port, err := net.SplitHostPort(value)
+ if err != nil {
+ errs = append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)")
+ return errs
+ }
+ portInt, _ := strconv.Atoi(port)
+ errs = append(errs, IsValidPortNum(portInt)...)
+ errs = append(errs, IsValidIP(ip)...)
+ return errs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
index 0997de806..bc6b18d2b 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
@@ -88,6 +88,15 @@ func Until(f func(), period time.Duration, stopCh <-chan struct{}) {
JitterUntil(f, period, 0.0, true, stopCh)
}
+// UntilWithContext loops until context is done, running f every period.
+//
+// UntilWithContext is syntactic sugar on top of JitterUntilWithContext
+// with zero jitter factor and with sliding = true (which means the timer
+// for period starts after the f completes).
+func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) {
+ JitterUntilWithContext(ctx, f, period, 0.0, true)
+}
+
// NonSlidingUntil loops until stop channel is closed, running f every
// period.
//
@@ -98,6 +107,16 @@ func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) {
JitterUntil(f, period, 0.0, false, stopCh)
}
+// NonSlidingUntilWithContext loops until context is done, running f every
+// period.
+//
+// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext
+// with zero jitter factor, with sliding = false (meaning the timer for period
+// starts at the same time as the function starts).
+func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) {
+ JitterUntilWithContext(ctx, f, period, 0.0, false)
+}
+
// JitterUntil loops until stop channel is closed, running f every period.
//
// If jitterFactor is positive, the period is jittered before every run of f.
@@ -151,6 +170,19 @@ func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding b
}
}
+// JitterUntilWithContext loops until context is done, running f every period.
+//
+// If jitterFactor is positive, the period is jittered before every run of f.
+// If jitterFactor is not positive, the period is unchanged and not jittered.
+//
+// If sliding is true, the period is computed after f runs. If it is false then
+// period includes the runtime for f.
+//
+// Cancel context to stop. f may not be invoked if context is already expired.
+func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) {
+ JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done())
+}
+
// Jitter returns a time.Duration between duration and duration + maxFactor *
// duration.
//
@@ -173,10 +205,68 @@ type ConditionFunc func() (done bool, err error)
// Backoff holds parameters applied to a Backoff function.
type Backoff struct {
- Duration time.Duration // the base duration
- Factor float64 // Duration is multiplied by factor each iteration
- Jitter float64 // The amount of jitter applied each iteration
- Steps int // Exit with error after this many steps
+ // The initial duration.
+ Duration time.Duration
+ // Duration is multiplied by factor each iteration. Must be greater
+ // than or equal to zero.
+ Factor float64
+ // The amount of jitter applied each iteration. Jitter is applied after
+ // cap.
+ Jitter float64
+ // The number of steps before duration stops changing. If zero, initial
+ // duration is always used. Used for exponential backoff in combination
+ // with Factor.
+ Steps int
+ // The returned duration will never be greater than cap *before* jitter
+ // is applied. The actual maximum cap is `cap * (1.0 + jitter)`.
+ Cap time.Duration
+}
+
+// Step returns the next interval in the exponential backoff. This method
+// will mutate the provided backoff.
+func (b *Backoff) Step() time.Duration {
+ if b.Steps < 1 {
+ if b.Jitter > 0 {
+ return Jitter(b.Duration, b.Jitter)
+ }
+ return b.Duration
+ }
+ b.Steps--
+
+ duration := b.Duration
+
+ // calculate the next step
+ if b.Factor != 0 {
+ b.Duration = time.Duration(float64(b.Duration) * b.Factor)
+ if b.Cap > 0 && b.Duration > b.Cap {
+ b.Duration = b.Cap
+ b.Steps = 0
+ }
+ }
+
+ if b.Jitter > 0 {
+ duration = Jitter(duration, b.Jitter)
+ }
+ return duration
+}
+
+// contextForChannel derives a child context from a parent channel.
+//
+// The derived context's Done channel is closed when the returned cancel function
+// is called or when the parent channel is closed, whichever happens first.
+//
+// Note the caller must *always* call the CancelFunc, otherwise resources may be leaked.
+func contextForChannel(parentCh <-chan struct{}) (context.Context, context.CancelFunc) {
+ ctx, cancel := context.WithCancel(context.Background())
+
+ go func() {
+ select {
+ case <-parentCh:
+ cancel()
+ case <-ctx.Done():
+ }
+ }()
+ return ctx, cancel
}
// ExponentialBackoff repeats a condition check with exponential backoff.
@@ -190,19 +280,14 @@ type Backoff struct {
// If the condition never returns true, ErrWaitTimeout is returned. All other
// errors terminate immediately.
func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
- duration := backoff.Duration
- for i := 0; i < backoff.Steps; i++ {
- if i != 0 {
- adjusted := duration
- if backoff.Jitter > 0.0 {
- adjusted = Jitter(duration, backoff.Jitter)
- }
- time.Sleep(adjusted)
- duration = time.Duration(float64(duration) * backoff.Factor)
- }
+ for backoff.Steps > 0 {
if ok, err := condition(); err != nil || ok {
return err
}
+ if backoff.Steps == 1 {
+ break
+ }
+ time.Sleep(backoff.Step())
}
return ErrWaitTimeout
}
@@ -230,13 +315,13 @@ func pollInternal(wait WaitFunc, condition ConditionFunc) error {
// PollImmediate tries a condition func until it returns true, an error, or the timeout
// is reached.
//
-// Poll always checks 'condition' before waiting for the interval. 'condition'
+// PollImmediate always checks 'condition' before waiting for the interval. 'condition'
// will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
-// If you want to Poll something forever, see PollInfinite.
+// If you want to immediately Poll something forever, see PollImmediateInfinite.
func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error {
return pollImmediateInternal(poller(interval, timeout), condition)
}
@@ -284,10 +369,32 @@ func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) erro
// PollUntil tries a condition func until it returns true, an error or stopCh is
// closed.
//
-// PolUntil always waits interval before the first run of 'condition'.
+// PollUntil always waits interval before the first run of 'condition'.
// 'condition' will always be invoked at least once.
func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
- return WaitFor(poller(interval, 0), condition, stopCh)
+ ctx, cancel := contextForChannel(stopCh)
+ defer cancel()
+ return WaitFor(poller(interval, 0), condition, ctx.Done())
+}
+
+// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed.
+//
+// PollImmediateUntil runs the 'condition' before waiting for the interval.
+// 'condition' will always be invoked at least once.
+func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
+ done, err := condition()
+ if err != nil {
+ return err
+ }
+ if done {
+ return nil
+ }
+ select {
+ case <-stopCh:
+ return ErrWaitTimeout
+ default:
+ return PollUntil(interval, condition, stopCh)
+ }
}
// WaitFunc creates a channel that receives an item every time a test
@@ -297,36 +404,48 @@ type WaitFunc func(done <-chan struct{}) <-chan struct{}
// WaitFor continually checks 'fn' as driven by 'wait'.
//
// WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value
-// placed on the channel and once more when the channel is closed.
+// placed on the channel and once more when the channel is closed. If the channel is closed
+// and 'fn' returns false without error, WaitFor returns ErrWaitTimeout.
//
-// If 'fn' returns an error the loop ends and that error is returned, and if
+// If 'fn' returns an error the loop ends and that error is returned. If
// 'fn' returns true the loop ends and nil is returned.
//
-// ErrWaitTimeout will be returned if the channel is closed without fn ever
+// ErrWaitTimeout will be returned if the 'done' channel is closed without fn ever
// returning true.
+//
+// When the done channel is closed, because the golang `select` statement is
+// "uniform pseudo-random", the `fn` might still run one or multiple time,
+// though eventually `WaitFor` will return.
func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
- c := wait(done)
+ stopCh := make(chan struct{})
+ defer close(stopCh)
+ c := wait(stopCh)
for {
- _, open := <-c
- ok, err := fn()
- if err != nil {
- return err
- }
- if ok {
- return nil
- }
- if !open {
- break
+ select {
+ case _, open := <-c:
+ ok, err := fn()
+ if err != nil {
+ return err
+ }
+ if ok {
+ return nil
+ }
+ if !open {
+ return ErrWaitTimeout
+ }
+ case <-done:
+ return ErrWaitTimeout
}
}
- return ErrWaitTimeout
}
// poller returns a WaitFunc that will send to the channel every interval until
// timeout has elapsed and then closes the channel.
//
// Over very short intervals you may receive no ticks before the channel is
-// closed. A timeout of 0 is interpreted as an infinity.
+// closed. A timeout of 0 is interpreted as an infinity, and in such a case
+// it would be the caller's responsibility to close the done channel.
+// Failure to do so would result in a leaked goroutine.
//
// Output ticks are not buffered. If the channel is not ready to receive an
// item, the tick is skipped.
diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
index 3cd85515d..a9a3853ac 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
@@ -26,8 +26,8 @@ import (
"strings"
"unicode"
- "github.com/ghodss/yaml"
- "github.com/golang/glog"
+ "k8s.io/klog"
+ "sigs.k8s.io/yaml"
)
// ToJSON converts a single YAML document into a JSON document
@@ -217,11 +217,9 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
if d.decoder == nil {
buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize)
if isJSON {
- glog.V(4).Infof("decoding stream as JSON")
d.decoder = json.NewDecoder(buffer)
d.rawData = origData
} else {
- glog.V(4).Infof("decoding stream as YAML")
d.decoder = NewYAMLToJSONDecoder(buffer)
}
}
@@ -230,7 +228,7 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
if syntax, ok := err.(*json.SyntaxError); ok {
data, readErr := ioutil.ReadAll(jsonDecoder.Buffered())
if readErr != nil {
- glog.V(4).Infof("reading stream failed: %v", readErr)
+ klog.V(4).Infof("reading stream failed: %v", readErr)
}
js := string(data)
diff --git a/vendor/k8s.io/apimachinery/pkg/version/doc.go b/vendor/k8s.io/apimachinery/pkg/version/doc.go
index 5e77af7ea..29574fd6d 100644
--- a/vendor/k8s.io/apimachinery/pkg/version/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/version/doc.go
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Package version supplies the type for version information collected at build time.
// +k8s:openapi-gen=true
+
+// Package version supplies the type for version information collected at build time.
package version // import "k8s.io/apimachinery/pkg/version"
diff --git a/vendor/k8s.io/apimachinery/pkg/version/helpers.go b/vendor/k8s.io/apimachinery/pkg/version/helpers.go
new file mode 100644
index 000000000..5e041d6f3
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/version/helpers.go
@@ -0,0 +1,88 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+import (
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+type versionType int
+
+const (
+ // Bigger the version type number, higher priority it is
+ versionTypeAlpha versionType = iota
+ versionTypeBeta
+ versionTypeGA
+)
+
+var kubeVersionRegex = regexp.MustCompile("^v([\\d]+)(?:(alpha|beta)([\\d]+))?$")
+
+func parseKubeVersion(v string) (majorVersion int, vType versionType, minorVersion int, ok bool) {
+ var err error
+ submatches := kubeVersionRegex.FindStringSubmatch(v)
+ if len(submatches) != 4 {
+ return 0, 0, 0, false
+ }
+ switch submatches[2] {
+ case "alpha":
+ vType = versionTypeAlpha
+ case "beta":
+ vType = versionTypeBeta
+ case "":
+ vType = versionTypeGA
+ default:
+ return 0, 0, 0, false
+ }
+ if majorVersion, err = strconv.Atoi(submatches[1]); err != nil {
+ return 0, 0, 0, false
+ }
+ if vType != versionTypeGA {
+ if minorVersion, err = strconv.Atoi(submatches[3]); err != nil {
+ return 0, 0, 0, false
+ }
+ }
+ return majorVersion, vType, minorVersion, true
+}
+
+// CompareKubeAwareVersionStrings compares two kube-like version strings.
+// Kube-like version strings are starting with a v, followed by a major version, optional "alpha" or "beta" strings
+// followed by a minor version (e.g. v1, v2beta1). Versions will be sorted based on GA/alpha/beta first and then major
+// and minor versions. e.g. v2, v1, v1beta2, v1beta1, v1alpha1.
+func CompareKubeAwareVersionStrings(v1, v2 string) int {
+ if v1 == v2 {
+ return 0
+ }
+ v1major, v1type, v1minor, ok1 := parseKubeVersion(v1)
+ v2major, v2type, v2minor, ok2 := parseKubeVersion(v2)
+ switch {
+ case !ok1 && !ok2:
+ return strings.Compare(v2, v1)
+ case !ok1 && ok2:
+ return -1
+ case ok1 && !ok2:
+ return 1
+ }
+ if v1type != v2type {
+ return int(v1type) - int(v2type)
+ }
+ if v1major != v2major {
+ return v1major - v2major
+ }
+ return v1minor - v2minor
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/filter.go b/vendor/k8s.io/apimachinery/pkg/watch/filter.go
index 3ca27f22c..22c9449f5 100644
--- a/vendor/k8s.io/apimachinery/pkg/watch/filter.go
+++ b/vendor/k8s.io/apimachinery/pkg/watch/filter.go
@@ -62,11 +62,7 @@ func (fw *filteredWatch) Stop() {
// loop waits for new values, filters them, and resends them.
func (fw *filteredWatch) loop() {
defer close(fw.result)
- for {
- event, ok := <-fw.incoming.ResultChan()
- if !ok {
- break
- }
+ for event := range fw.incoming.ResultChan() {
filtered, keep := fw.f(event)
if keep {
fw.result <- filtered
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/mux.go b/vendor/k8s.io/apimachinery/pkg/watch/mux.go
index a65088c1c..0ac8dc4ef 100644
--- a/vendor/k8s.io/apimachinery/pkg/watch/mux.go
+++ b/vendor/k8s.io/apimachinery/pkg/watch/mux.go
@@ -204,11 +204,7 @@ func (m *Broadcaster) Shutdown() {
func (m *Broadcaster) loop() {
// Deliberately not catching crashes here. Yes, bring down the process if there's a
// bug in watch.Broadcaster.
- for {
- event, ok := <-m.incoming
- if !ok {
- break
- }
+ for event := range m.incoming {
if event.Type == internalRunFunctionMarker {
event.Object.(functionFakeRuntimeObject)()
continue
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
index 93bb1cdf7..8af256eb1 100644
--- a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
+++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
@@ -17,10 +17,12 @@ limitations under the License.
package watch
import (
+ "fmt"
"io"
"sync"
- "github.com/golang/glog"
+ "k8s.io/klog"
+
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/net"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@@ -39,19 +41,28 @@ type Decoder interface {
Close()
}
+// Reporter hides the details of how an error is turned into a runtime.Object for
+// reporting on a watch stream since this package may not import a higher level report.
+type Reporter interface {
+ // AsObject must convert err into a valid runtime.Object for the watch stream.
+ AsObject(err error) runtime.Object
+}
+
// StreamWatcher turns any stream for which you can write a Decoder interface
// into a watch.Interface.
type StreamWatcher struct {
sync.Mutex
- source Decoder
- result chan Event
- stopped bool
+ source Decoder
+ reporter Reporter
+ result chan Event
+ stopped bool
}
// NewStreamWatcher creates a StreamWatcher from the given decoder.
-func NewStreamWatcher(d Decoder) *StreamWatcher {
+func NewStreamWatcher(d Decoder, r Reporter) *StreamWatcher {
sw := &StreamWatcher{
- source: d,
+ source: d,
+ reporter: r,
// It's easy for a consumer to add buffering via an extra
// goroutine/channel, but impossible for them to remove it,
// so nonbuffered is better.
@@ -100,13 +111,15 @@ func (sw *StreamWatcher) receive() {
case io.EOF:
// watch closed normally
case io.ErrUnexpectedEOF:
- glog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err)
+ klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err)
default:
- msg := "Unable to decode an event from the watch stream: %v"
if net.IsProbableEOF(err) {
- glog.V(5).Infof(msg, err)
+ klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err)
} else {
- glog.Errorf(msg, err)
+ sw.result <- Event{
+ Type: Error,
+ Object: sw.reporter.AsObject(fmt.Errorf("unable to decode an event from the watch stream: %v", err)),
+ }
}
}
return
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/until.go b/vendor/k8s.io/apimachinery/pkg/watch/until.go
deleted file mode 100644
index c2772ddb5..000000000
--- a/vendor/k8s.io/apimachinery/pkg/watch/until.go
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package watch
-
-import (
- "errors"
- "time"
-
- "k8s.io/apimachinery/pkg/util/wait"
-)
-
-// ConditionFunc returns true if the condition has been reached, false if it has not been reached yet,
-// or an error if the condition cannot be checked and should terminate. In general, it is better to define
-// level driven conditions over edge driven conditions (pod has ready=true, vs pod modified and ready changed
-// from false to true).
-type ConditionFunc func(event Event) (bool, error)
-
-// ErrWatchClosed is returned when the watch channel is closed before timeout in Until.
-var ErrWatchClosed = errors.New("watch closed before Until timeout")
-
-// Until reads items from the watch until each provided condition succeeds, and then returns the last watch
-// encountered. The first condition that returns an error terminates the watch (and the event is also returned).
-// If no event has been received, the returned event will be nil.
-// Conditions are satisfied sequentially so as to provide a useful primitive for higher level composition.
-// A zero timeout means to wait forever.
-func Until(timeout time.Duration, watcher Interface, conditions ...ConditionFunc) (*Event, error) {
- ch := watcher.ResultChan()
- defer watcher.Stop()
- var after <-chan time.Time
- if timeout > 0 {
- after = time.After(timeout)
- } else {
- ch := make(chan time.Time)
- defer close(ch)
- after = ch
- }
- var lastEvent *Event
- for _, condition := range conditions {
- // check the next condition against the previous event and short circuit waiting for the next watch
- if lastEvent != nil {
- done, err := condition(*lastEvent)
- if err != nil {
- return lastEvent, err
- }
- if done {
- continue
- }
- }
- ConditionSucceeded:
- for {
- select {
- case event, ok := <-ch:
- if !ok {
- return lastEvent, ErrWatchClosed
- }
- lastEvent = &event
-
- // TODO: check for watch expired error and retry watch from latest point?
- done, err := condition(event)
- if err != nil {
- return lastEvent, err
- }
- if done {
- break ConditionSucceeded
- }
-
- case <-after:
- return lastEvent, wait.ErrWaitTimeout
- }
- }
- }
- return lastEvent, nil
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go
index 5c1380b23..3945be3ae 100644
--- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go
+++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go
@@ -20,7 +20,7 @@ import (
"fmt"
"sync"
- "github.com/golang/glog"
+ "k8s.io/klog"
"k8s.io/apimachinery/pkg/runtime"
)
@@ -44,6 +44,7 @@ const (
Added EventType = "ADDED"
Modified EventType = "MODIFIED"
Deleted EventType = "DELETED"
+ Bookmark EventType = "BOOKMARK"
Error EventType = "ERROR"
DefaultChanSize int32 = 100
@@ -57,6 +58,10 @@ type Event struct {
// Object is:
// * If Type is Added or Modified: the new state of the object.
// * If Type is Deleted: the state of the object immediately before deletion.
+ // * If Type is Bookmark: the object (instance of a type being watched) where
+ // only ResourceVersion field is set. On successful restart of watch from a
+ // bookmark resourceVersion, client is guaranteed to not get repeat event
+ // nor miss any events.
// * If Type is Error: *api.Status is recommended; other types may make sense
// depending on context.
Object runtime.Object
@@ -106,7 +111,7 @@ func (f *FakeWatcher) Stop() {
f.Lock()
defer f.Unlock()
if !f.Stopped {
- glog.V(4).Infof("Stopping fake watcher.")
+ klog.V(4).Infof("Stopping fake watcher.")
close(f.result)
f.Stopped = true
}
@@ -173,7 +178,7 @@ func (f *RaceFreeFakeWatcher) Stop() {
f.Lock()
defer f.Unlock()
if !f.Stopped {
- glog.V(4).Infof("Stopping fake watcher.")
+ klog.V(4).Infof("Stopping fake watcher.")
close(f.result)
f.Stopped = true
}
@@ -268,3 +273,50 @@ func (f *RaceFreeFakeWatcher) Action(action EventType, obj runtime.Object) {
}
}
}
+
+// ProxyWatcher lets you wrap your channel in watch Interface. Threadsafe.
+type ProxyWatcher struct {
+ result chan Event
+ stopCh chan struct{}
+
+ mutex sync.Mutex
+ stopped bool
+}
+
+var _ Interface = &ProxyWatcher{}
+
+// NewProxyWatcher creates new ProxyWatcher by wrapping a channel
+func NewProxyWatcher(ch chan Event) *ProxyWatcher {
+ return &ProxyWatcher{
+ result: ch,
+ stopCh: make(chan struct{}),
+ stopped: false,
+ }
+}
+
+// Stop implements Interface
+func (pw *ProxyWatcher) Stop() {
+ pw.mutex.Lock()
+ defer pw.mutex.Unlock()
+ if !pw.stopped {
+ pw.stopped = true
+ close(pw.stopCh)
+ }
+}
+
+// Stopping returns true if Stop() has been called
+func (pw *ProxyWatcher) Stopping() bool {
+ pw.mutex.Lock()
+ defer pw.mutex.Unlock()
+ return pw.stopped
+}
+
+// ResultChan implements Interface
+func (pw *ProxyWatcher) ResultChan() <-chan Event {
+ return pw.result
+}
+
+// StopChan returns stop channel
+func (pw *ProxyWatcher) StopChan() <-chan struct{} {
+ return pw.stopCh
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go
index b1b19d118..71ef4da33 100644
--- a/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go
@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -23,9 +23,7 @@ package watch
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Event) DeepCopyInto(out *Event) {
*out = *in
- if in.Object == nil {
- out.Object = nil
- } else {
+ if in.Object != nil {
out.Object = in.Object.DeepCopyObject()
}
return
diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go
index 9e45dbe1d..7ed1d1cff 100644
--- a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go
+++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go
@@ -44,7 +44,7 @@ func (e Equalities) AddFunc(eqFunc interface{}) error {
return fmt.Errorf("expected func, got: %v", ft)
}
if ft.NumIn() != 2 {
- return fmt.Errorf("expected three 'in' params, got: %v", ft)
+ return fmt.Errorf("expected two 'in' params, got: %v", ft)
}
if ft.NumOut() != 1 {
return fmt.Errorf("expected one 'out' param, got: %v", ft)
diff --git a/vendor/k8s.io/client-go/README.md b/vendor/k8s.io/client-go/README.md
deleted file mode 100644
index 82f41d738..000000000
--- a/vendor/k8s.io/client-go/README.md
+++ /dev/null
@@ -1,190 +0,0 @@
-# client-go
-
-Go clients for talking to a [kubernetes](http://kubernetes.io/) cluster.
-
-We currently recommend using the v6.0.0 tag. See [INSTALL.md](/INSTALL.md) for
-detailed installation instructions. `go get k8s.io/client-go/...` works, but
-will give you head and doesn't handle the dependencies well.
-
-[![BuildStatus Widget]][BuildStatus Result]
-[![GoReport Widget]][GoReport Status]
-[![GoDocWidget]][GoDocReference]
-
-[BuildStatus Result]: https://travis-ci.org/kubernetes/client-go
-[BuildStatus Widget]: https://travis-ci.org/kubernetes/client-go.svg?branch=master
-
-[GoReport Status]: https://goreportcard.com/report/github.com/kubernetes/client-go
-[GoReport Widget]: https://goreportcard.com/badge/github.com/kubernetes/client-go
-
-[GoDocWidget]: https://godoc.org/k8s.io/client-go?status.svg
-[GoDocReference]:https://godoc.org/k8s.io/client-go
-
-## Table of Contents
-
-- [What's included](#whats-included)
-- [Versioning](#versioning)
- - [Compatibility: your code <-> client-go](#compatibility-your-code---client-go)
- - [Compatibility: client-go <-> Kubernetes clusters](#compatibility-client-go---kubernetes-clusters)
- - [Compatibility matrix](#compatibility-matrix)
- - [Why do the 1.4 and 1.5 branch contain top-level folder named after the version?](#why-do-the-14-and-15-branch-contain-top-level-folder-named-after-the-version)
-- [Kubernetes tags](#kubernetes-tags)
-- [How to get it](#how-to-get-it)
-- [How to use it](#how-to-use-it)
-- [Dependency management](#dependency-management)
-- [Contributing code](#contributing-code)
-
-### What's included
-
-* The `kubernetes` package contains the clientset to access Kubernetes API.
-* The `discovery` package is used to discover APIs supported by a Kubernetes API server.
-* The `dynamic` package contains a dynamic client that can perform generic operations on arbitrary Kubernetes API objects.
-* The `transport` package is used to set up auth and start a connection.
-* The `tools/cache` package is useful for writing controllers.
-
-### Versioning
-
-`client-go` follows [semver](http://semver.org/). We will not make
-backwards-incompatible changes without incrementing the major version number. A
-change is backwards-incompatible either if it *i)* changes the public interfaces
-of `client-go`, or *ii)* makes `client-go` incompatible with otherwise supported
-versions of Kubernetes clusters.
-
-Changes that add features in a backwards-compatible way will result in bumping
-the minor version (second digit) number.
-
-Bugfixes will result in the patch version (third digit) changing. PRs that are
-cherry-picked into an older Kubernetes release branch will result in an update
-to the corresponding branch in `client-go`, with a corresponding new tag
-changing the patch version.
-
-A consequence of this is that `client-go` version numbers will be unrelated to
-Kubernetes version numbers.
-
-#### Branches and tags.
-
-We will create a new branch and tag for each increment in the major version number or
-minor version number. We will create only a new tag for each increment in the patch
-version number. See [semver](http://semver.org/) for definitions of major,
-minor, and patch.
-
-The master branch will track HEAD in the main Kubernetes repo and
-accumulate changes. Consider HEAD to have the version `x.(y+1).0-alpha` or
-`(x+1).0.0-alpha` (depending on whether it has accumulated a breaking change or
-not), where `x` and `y` are the current major and minor versions.
-
-#### Compatibility: your code <-> client-go
-
-`client-go` follows [semver](http://semver.org/), so until the major version of
-client-go gets increased, your code will compile and will continue to work with
-explicitly supported versions of Kubernetes clusters. You must use a dependency
-management system and pin a specific major version of `client-go` to get this
-benefit, as HEAD follows the upstream Kubernetes repo.
-
-#### Compatibility: client-go <-> Kubernetes clusters
-
-Since Kubernetes is backwards compatible with clients, older `client-go`
-versions will work with many different Kubernetes cluster versions.
-
-We will backport bugfixes--but not new features--into older versions of
-`client-go`.
-
-
-#### Compatibility matrix
-
-| | Kubernetes 1.4 | Kubernetes 1.5 | Kubernetes 1.6 | Kubernetes 1.7 | Kubernetes 1.8 | Kubernetes 1.9 |
-|---------------------|----------------|----------------|----------------|----------------|----------------|----------------|
-| client-go 1.4 | ✓ | - | - | - | - | - |
-| client-go 1.5 | + | - | - | - | - | - |
-| client-go 2.0 | +- | ✓ | +- | +- | +- | +- |
-| client-go 3.0 | +- | +- | ✓ | - | +- | +- |
-| client-go 4.0 | +- | +- | +- | ✓ | +- | +- |
-| client-go 5.0 | +- | +- | +- | +- | ✓ | +- |
-| client-go 6.0 | +- | +- | +- | +- | +- | ✓ |
-| client-go HEAD | +- | +- | +- | +- | +- | + |
-
-Key:
-
-* `✓` Exactly the same features / API objects in both client-go and the Kubernetes
- version.
-* `+` client-go has features or API objects that may not be present in the
- Kubernetes cluster, either due to that client-go has additional new API, or
- that the server has removed old API. However, everything they have in
- common (i.e., most APIs) will work. Please note that alpha APIs may vanish or
- change significantly in a single release.
-* `-` The Kubernetes cluster has features the client-go library can't use,
- either due to the server has additional new API, or that client-go has
- removed old API. However, everything they share in common (i.e., most APIs)
- will work.
-
-See the [CHANGELOG](./CHANGELOG.md) for a detailed description of changes
-between client-go versions.
-
-| Branch | Canonical source code location | Maintenance status |
-|----------------|--------------------------------------|-------------------------------|
-| client-go 1.4 | Kubernetes main repo, 1.4 branch | = - |
-| client-go 1.5 | Kubernetes main repo, 1.5 branch | = - |
-| client-go 2.0 | Kubernetes main repo, 1.5 branch | = - |
-| client-go 3.0 | Kubernetes main repo, 1.6 branch | = - |
-| client-go 4.0 | Kubernetes main repo, 1.7 branch | ✓ |
-| client-go 5.0 | Kubernetes main repo, 1.8 branch | ✓ |
-| client-go 6.0 | Kubernetes main repo, 1.9 branch | ✓ |
-| client-go HEAD | Kubernetes main repo, master branch | ✓ |
-
-Key:
-
-* `✓` Changes in main Kubernetes repo are actively published to client-go by a bot
-* `=` Maintenance is manual, only severe security bugs will be patched.
-* `-` Deprecated; please upgrade.
-
-#### Deprecation policy
-
-We will maintain branches for at least six months after their first stable tag
-is cut. (E.g., the clock for the release-2.0 branch started ticking when we
-tagged v2.0.0, not when we made the first alpha.) This policy applies to
-every version greater than or equal to 2.0.
-
-#### Why do the 1.4 and 1.5 branch contain top-level folder named after the version?
-
-For the initial release of client-go, we thought it would be easiest to keep
-separate directories for each minor version. That soon proved to be a mistake.
-We are keeping the top-level folders in the 1.4 and 1.5 branches so that
-existing users won't be broken.
-
-### Kubernetes tags
-
-As of October 2017, client-go is still a mirror of
-[k8s.io/kubernetes/staging/src/client-go](https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/client-go),
-the code development is still done in the staging area. Since Kubernetes 1.8
-release, when syncing the code from the staging area, we also sync the Kubernetes
-version tags to client-go, prefixed with "kubernetes-". For example, if you check
-out the `kubernetes-v1.8.0` tag in client-go, the code you get is exactly the
-same as if you check out the `v1.8.0` tag in kubernetes, and change directory to
-`staging/src/k8s.io/client-go`. The purpose is to let users quickly find matching
-commits among published repos, like
-[sample-apiserver](https://github.com/kubernetes/sample-apiserver),
-[apiextension-apiserver](https://github.com/kubernetes/apiextensions-apiserver),
-etc. The Kubernetes version tag does NOT claim any backwards compatibility
-guarantees for client-go. Please check the [semantic versions](#versioning) if
-you care about backwards compatibility.
-
-### How to get it
-
-You can use `go get k8s.io/client-go/...` to get client-go, but **you will get
-the unstable master branch** and `client-go`'s vendored dependencies will not be
-added to your `$GOPATH`. So we think most users will want to use a dependency
-management system. See [INSTALL.md](/INSTALL.md) for detailed instructions.
-
-### How to use it
-
-If your application runs in a Pod in the cluster, please refer to the
-in-cluster [example](examples/in-cluster-client-configuration), otherwise please
-refer to the out-of-cluster [example](examples/out-of-cluster-client-configuration).
-
-### Dependency management
-
-If your application depends on a package that client-go depends on, and you let the Go compiler find the dependency in `GOPATH`, you will end up with duplicated dependencies: one copy from the `GOPATH`, and one from the vendor folder of client-go. This will cause unexpected runtime error like flag redefinition, since the go compiler ends up importing both packages separately, even if they are exactly the same thing. If this happens, you can either
-* run `godep restore` ([godep](https://github.com/tools/godep)) in the client-go/ folder, then remove the vendor folder of client-go. Then the packages in your GOPATH will be the only copy
-* or run `godep save` in your application folder to flatten all dependencies.
-
-### Contributing code
-Please send pull requests against the client packages in the Kubernetes main [repository](https://github.com/kubernetes/kubernetes). Changes in the staging area will be published to this repository every day.
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS
new file mode 100644
index 000000000..e0ec62deb
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS
@@ -0,0 +1,9 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+# approval on api packages bubbles to api-approvers
+reviewers:
+- sig-auth-authenticators-approvers
+- sig-auth-authenticators-reviewers
+labels:
+- sig/auth
+
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go
index d06482d55..b99459757 100644
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go
@@ -16,4 +16,5 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// +groupName=client.authentication.k8s.io
+
package clientauthentication // import "k8s.io/client-go/pkg/apis/clientauthentication"
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go
index 5c05825f6..6fb53cecf 100644
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go
@@ -57,7 +57,14 @@ type ExecCredentialStatus struct {
// +optional
ExpirationTimestamp *metav1.Time
// Token is a bearer token used by the client for request authentication.
+ // +optional
Token string
+ // PEM-encoded client TLS certificate.
+ // +optional
+ ClientCertificateData string
+ // PEM-encoded client TLS private key.
+ // +optional
+ ClientKeyData string
}
// Response defines metadata about a failed request, including HTTP status code and
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go
index 016adb28a..19ab77614 100644
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go
@@ -20,4 +20,5 @@ limitations under the License.
// +k8s:defaulter-gen=TypeMeta
// +groupName=client.authentication.k8s.io
+
package v1alpha1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1"
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go
index 8920d3187..c714e2457 100644
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go
@@ -22,7 +22,7 @@ import (
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// ExecCredentials is used by exec-based plugins to communicate credentials to
+// ExecCredential is used by exec-based plugins to communicate credentials to
// HTTP transports.
type ExecCredential struct {
metav1.TypeMeta `json:",inline"`
@@ -52,12 +52,20 @@ type ExecCredentialSpec struct {
}
// ExecCredentialStatus holds credentials for the transport to use.
+//
+// Token and ClientKeyData are sensitive fields. This data should only be
+// transmitted in-memory between client and exec plugin process. Exec plugin
+// itself should at least be protected via file permissions.
type ExecCredentialStatus struct {
// ExpirationTimestamp indicates a time when the provided credentials expire.
// +optional
ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"`
// Token is a bearer token used by the client for request authentication.
Token string `json:"token,omitempty"`
+ // PEM-encoded client TLS certificates (including intermediates, if any).
+ ClientCertificateData string `json:"clientCertificateData,omitempty"`
+ // PEM-encoded private key for the above certificate.
+ ClientKeyData string `json:"clientKeyData,omitempty"`
}
// Response defines metadata about a failed request, including HTTP status code and
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go
index bf28baef2..461c20b29 100644
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go
@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -35,17 +35,48 @@ func init() {
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
-func RegisterConversions(scheme *runtime.Scheme) error {
- return scheme.AddGeneratedConversionFuncs(
- Convert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential,
- Convert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential,
- Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec,
- Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec,
- Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus,
- Convert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus,
- Convert_v1alpha1_Response_To_clientauthentication_Response,
- Convert_clientauthentication_Response_To_v1alpha1_Response,
- )
+func RegisterConversions(s *runtime.Scheme) error {
+ if err := s.AddGeneratedConversionFunc((*ExecCredential)(nil), (*clientauthentication.ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(a.(*ExecCredential), b.(*clientauthentication.ExecCredential), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredential)(nil), (*ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(a.(*clientauthentication.ExecCredential), b.(*ExecCredential), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ExecCredentialSpec)(nil), (*clientauthentication.ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(a.(*ExecCredentialSpec), b.(*clientauthentication.ExecCredentialSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialStatus)(nil), (*ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(a.(*clientauthentication.ExecCredentialStatus), b.(*ExecCredentialStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Response)(nil), (*clientauthentication.Response)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha1_Response_To_clientauthentication_Response(a.(*Response), b.(*clientauthentication.Response), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*clientauthentication.Response)(nil), (*Response)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_clientauthentication_Response_To_v1alpha1_Response(a.(*clientauthentication.Response), b.(*Response), scope)
+ }); err != nil {
+ return err
+ }
+ return nil
}
func autoConvert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error {
@@ -99,6 +130,8 @@ func Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialS
func autoConvert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error {
out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp))
out.Token = in.Token
+ out.ClientCertificateData = in.ClientCertificateData
+ out.ClientKeyData = in.ClientKeyData
return nil
}
@@ -110,6 +143,8 @@ func Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentia
func autoConvert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error {
out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp))
out.Token = in.Token
+ out.ClientCertificateData = in.ClientCertificateData
+ out.ClientKeyData = in.ClientKeyData
return nil
}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go
index c6dbbce4d..a73d31b3f 100644
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go
@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -31,12 +31,8 @@ func (in *ExecCredential) DeepCopyInto(out *ExecCredential) {
in.Spec.DeepCopyInto(&out.Spec)
if in.Status != nil {
in, out := &in.Status, &out.Status
- if *in == nil {
- *out = nil
- } else {
- *out = new(ExecCredentialStatus)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(ExecCredentialStatus)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -64,12 +60,8 @@ func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) {
*out = *in
if in.Response != nil {
in, out := &in.Response, &out.Response
- if *in == nil {
- *out = nil
- } else {
- *out = new(Response)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(Response)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -89,11 +81,7 @@ func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) {
*out = *in
if in.ExpirationTimestamp != nil {
in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp
- if *in == nil {
- *out = nil
- } else {
- *out = (*in).DeepCopy()
- }
+ *out = (*in).DeepCopy()
}
return
}
@@ -115,12 +103,15 @@ func (in *Response) DeepCopyInto(out *Response) {
in, out := &in.Header, &out.Header
*out = make(map[string][]string, len(*in))
for key, val := range *in {
+ var outVal []string
if val == nil {
(*out)[key] = nil
} else {
- (*out)[key] = make([]string, len(val))
- copy((*out)[key], val)
+ in, out := &val, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
}
+ (*out)[key] = outVal
}
}
return
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go
index 2bd0078a3..dd621a3ac 100644
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go
@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go
new file mode 100644
index 000000000..f543806ac
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go
@@ -0,0 +1,26 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ conversion "k8s.io/apimachinery/pkg/conversion"
+ clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication"
+)
+
+func Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error {
+ return nil
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go
new file mode 100644
index 000000000..22d1c588b
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:conversion-gen=k8s.io/client-go/pkg/apis/clientauthentication
+// +k8s:openapi-gen=true
+// +k8s:defaulter-gen=TypeMeta
+
+// +groupName=client.authentication.k8s.io
+
+package v1beta1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go
new file mode 100644
index 000000000..0bb92f16a
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "client.authentication.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ SchemeBuilder runtime.SchemeBuilder
+ localSchemeBuilder = &SchemeBuilder
+ AddToScheme = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+ // We only register manually written functions here. The registration of the
+ // generated functions takes place in the generated files. The separation
+ // makes the code compile even when the generated files are missing.
+ localSchemeBuilder.Register(addKnownTypes)
+}
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &ExecCredential{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go
new file mode 100644
index 000000000..d6e267452
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ExecCredentials is used by exec-based plugins to communicate credentials to
+// HTTP transports.
+type ExecCredential struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // Spec holds information passed to the plugin by the transport. This contains
+ // request and runtime specific information, such as if the session is interactive.
+ Spec ExecCredentialSpec `json:"spec,omitempty"`
+
+ // Status is filled in by the plugin and holds the credentials that the transport
+ // should use to contact the API.
+ // +optional
+ Status *ExecCredentialStatus `json:"status,omitempty"`
+}
+
+// ExecCredenitalSpec holds request and runtime specific information provided by
+// the transport.
+type ExecCredentialSpec struct{}
+
+// ExecCredentialStatus holds credentials for the transport to use.
+//
+// Token and ClientKeyData are sensitive fields. This data should only be
+// transmitted in-memory between client and exec plugin process. Exec plugin
+// itself should at least be protected via file permissions.
+type ExecCredentialStatus struct {
+ // ExpirationTimestamp indicates a time when the provided credentials expire.
+ // +optional
+ ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"`
+ // Token is a bearer token used by the client for request authentication.
+ Token string `json:"token,omitempty"`
+ // PEM-encoded client TLS certificates (including intermediates, if any).
+ ClientCertificateData string `json:"clientCertificateData,omitempty"`
+ // PEM-encoded private key for the above certificate.
+ ClientKeyData string `json:"clientKeyData,omitempty"`
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go
new file mode 100644
index 000000000..94ef4b733
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go
@@ -0,0 +1,142 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ unsafe "unsafe"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ conversion "k8s.io/apimachinery/pkg/conversion"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication"
+)
+
+func init() {
+ localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+ if err := s.AddGeneratedConversionFunc((*ExecCredential)(nil), (*clientauthentication.ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(a.(*ExecCredential), b.(*clientauthentication.ExecCredential), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredential)(nil), (*ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(a.(*clientauthentication.ExecCredential), b.(*ExecCredential), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ExecCredentialSpec)(nil), (*clientauthentication.ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(a.(*ExecCredentialSpec), b.(*clientauthentication.ExecCredentialSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialStatus)(nil), (*ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(a.(*clientauthentication.ExecCredentialStatus), b.(*ExecCredentialStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope)
+ }); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error {
+ if err := Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ out.Status = (*clientauthentication.ExecCredentialStatus)(unsafe.Pointer(in.Status))
+ return nil
+}
+
+// Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential is an autogenerated conversion function.
+func Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error {
+ return autoConvert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in, out, s)
+}
+
+func autoConvert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error {
+ if err := Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ out.Status = (*ExecCredentialStatus)(unsafe.Pointer(in.Status))
+ return nil
+}
+
+// Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential is an autogenerated conversion function.
+func Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error {
+ return autoConvert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in, out, s)
+}
+
+func autoConvert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error {
+ return nil
+}
+
+// Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec is an autogenerated conversion function.
+func Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in, out, s)
+}
+
+func autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error {
+ // WARNING: in.Response requires manual conversion: does not exist in peer-type
+ // WARNING: in.Interactive requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error {
+ out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp))
+ out.Token = in.Token
+ out.ClientCertificateData = in.ClientCertificateData
+ out.ClientKeyData = in.ClientKeyData
+ return nil
+}
+
+// Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus is an autogenerated conversion function.
+func Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in, out, s)
+}
+
+func autoConvert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error {
+ out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp))
+ out.Token = in.Token
+ out.ClientCertificateData = in.ClientCertificateData
+ out.ClientKeyData = in.ClientKeyData
+ return nil
+}
+
+// Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus is an autogenerated conversion function.
+func Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error {
+ return autoConvert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in, out, s)
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..736b8cf00
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,92 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredential) DeepCopyInto(out *ExecCredential) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.Spec = in.Spec
+ if in.Status != nil {
+ in, out := &in.Status, &out.Status
+ *out = new(ExecCredentialStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential.
+func (in *ExecCredential) DeepCopy() *ExecCredential {
+ if in == nil {
+ return nil
+ }
+ out := new(ExecCredential)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ExecCredential) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec.
+func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ExecCredentialSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) {
+ *out = *in
+ if in.ExpirationTimestamp != nil {
+ in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp
+ *out = (*in).DeepCopy()
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus.
+func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ExecCredentialStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go
new file mode 100644
index 000000000..73e63fc11
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go
@@ -0,0 +1,32 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+ return nil
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go
index f8b15d848..c568a6fc8 100644
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go
@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -31,12 +31,8 @@ func (in *ExecCredential) DeepCopyInto(out *ExecCredential) {
in.Spec.DeepCopyInto(&out.Spec)
if in.Status != nil {
in, out := &in.Status, &out.Status
- if *in == nil {
- *out = nil
- } else {
- *out = new(ExecCredentialStatus)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(ExecCredentialStatus)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -64,12 +60,8 @@ func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) {
*out = *in
if in.Response != nil {
in, out := &in.Response, &out.Response
- if *in == nil {
- *out = nil
- } else {
- *out = new(Response)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(Response)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -89,11 +81,7 @@ func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) {
*out = *in
if in.ExpirationTimestamp != nil {
in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp
- if *in == nil {
- *out = nil
- } else {
- *out = (*in).DeepCopy()
- }
+ *out = (*in).DeepCopy()
}
return
}
@@ -115,12 +103,15 @@ func (in *Response) DeepCopyInto(out *Response) {
in, out := &in.Header, &out.Header
*out = make(map[string][]string, len(*in))
for key, val := range *in {
+ var outVal []string
if val == nil {
(*out)[key] = nil
} else {
- (*out)[key] = make([]string, len(val))
- copy((*out)[key], val)
+ in, out := &val, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
}
+ (*out)[key] = outVal
}
}
return
diff --git a/vendor/k8s.io/client-go/pkg/version/.gitattributes b/vendor/k8s.io/client-go/pkg/version/.gitattributes
new file mode 100644
index 000000000..7e349eff6
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/version/.gitattributes
@@ -0,0 +1 @@
+base.go export-subst
diff --git a/vendor/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go
index 7ab0ed3a7..9b4c79f89 100644
--- a/vendor/k8s.io/client-go/pkg/version/base.go
+++ b/vendor/k8s.io/client-go/pkg/version/base.go
@@ -43,7 +43,7 @@ var (
gitMinor string = "" // minor version, numeric possibly followed by "+"
// semantic version, derived by build scripts (see
- // https://github.com/kubernetes/kubernetes/blob/master/docs/design/versioning.md
+ // https://git.k8s.io/community/contributors/design-proposals/release/versioning.md
// for a detailed discussion of this field)
//
// TODO: This field is still called "gitVersion" for legacy
diff --git a/vendor/k8s.io/client-go/pkg/version/def.bzl b/vendor/k8s.io/client-go/pkg/version/def.bzl
new file mode 100644
index 000000000..9c018a4ef
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/version/def.bzl
@@ -0,0 +1,38 @@
+# Copyright 2017 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Implements hack/lib/version.sh's kube::version::ldflags() for Bazel.
+def version_x_defs():
+ # This should match the list of packages in kube::version::ldflag
+ stamp_pkgs = [
+ "k8s.io/kubernetes/pkg/version",
+ # In hack/lib/version.sh, this has a vendor/ prefix. That isn't needed here?
+ "k8s.io/client-go/pkg/version",
+ ]
+ # This should match the list of vars in kube::version::ldflags
+ # It should also match the list of vars set in hack/print-workspace-status.sh.
+ stamp_vars = [
+ "buildDate",
+ "gitCommit",
+ "gitMajor",
+ "gitMinor",
+ "gitTreeState",
+ "gitVersion",
+ ]
+ # Generate the cross-product.
+ x_defs = {}
+ for pkg in stamp_pkgs:
+ for var in stamp_vars:
+ x_defs["%s.%s" % (pkg, var)] = "{%s}" % var
+ return x_defs
diff --git a/vendor/k8s.io/client-go/pkg/version/doc.go b/vendor/k8s.io/client-go/pkg/version/doc.go
index 30399fb02..05e997e13 100644
--- a/vendor/k8s.io/client-go/pkg/version/doc.go
+++ b/vendor/k8s.io/client-go/pkg/version/doc.go
@@ -14,7 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// +k8s:openapi-gen=true
+
// Package version supplies version information collected at build time to
// kubernetes components.
-// +k8s:openapi-gen=true
package version // import "k8s.io/client-go/pkg/version"
diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
index dfd434d0c..b88902c10 100644
--- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
+++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
@@ -18,23 +18,33 @@ package exec
import (
"bytes"
+ "context"
+ "crypto/tls"
+ "errors"
"fmt"
"io"
+ "net"
"net/http"
"os"
"os/exec"
+ "reflect"
"sync"
"time"
- "github.com/golang/glog"
+ "github.com/davecgh/go-spew/spew"
"golang.org/x/crypto/ssh/terminal"
- "k8s.io/apimachinery/pkg/apis/meta/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/pkg/apis/clientauthentication"
"k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1"
+ "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
"k8s.io/client-go/tools/clientcmd/api"
+ "k8s.io/client-go/transport"
+ "k8s.io/client-go/util/connrotation"
+ "k8s.io/klog"
)
const execInfoEnv = "KUBERNETES_EXEC_INFO"
@@ -44,8 +54,9 @@ var codecs = serializer.NewCodecFactory(scheme)
func init() {
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
- v1alpha1.AddToScheme(scheme)
- clientauthentication.AddToScheme(scheme)
+ utilruntime.Must(v1alpha1.AddToScheme(scheme))
+ utilruntime.Must(v1beta1.AddToScheme(scheme))
+ utilruntime.Must(clientauthentication.AddToScheme(scheme))
}
var (
@@ -55,6 +66,7 @@ var (
// The list of API versions we accept.
apiVersions = map[string]schema.GroupVersion{
v1alpha1.SchemeGroupVersion.String(): v1alpha1.SchemeGroupVersion,
+ v1beta1.SchemeGroupVersion.String(): v1beta1.SchemeGroupVersion,
}
)
@@ -62,8 +74,10 @@ func newCache() *cache {
return &cache{m: make(map[string]*Authenticator)}
}
+var spewConfig = &spew.ConfigState{DisableMethods: true, Indent: " "}
+
func cacheKey(c *api.ExecConfig) string {
- return fmt.Sprintf("%#v", c)
+ return spewConfig.Sprint(c)
}
type cache struct {
@@ -147,14 +161,40 @@ type Authenticator struct {
// The mutex also guards calling the plugin. Since the plugin could be
// interactive we want to make sure it's only called once.
mu sync.Mutex
- cachedToken string
+ cachedCreds *credentials
exp time.Time
+
+ onRotate func()
}
-// WrapTransport instruments an existing http.RoundTripper with credentials returned
-// by the plugin.
-func (a *Authenticator) WrapTransport(rt http.RoundTripper) http.RoundTripper {
- return &roundTripper{a, rt}
+type credentials struct {
+ token string
+ cert *tls.Certificate
+}
+
+// UpdateTransportConfig updates the transport.Config to use credentials
+// returned by the plugin.
+func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error {
+ c.Wrap(func(rt http.RoundTripper) http.RoundTripper {
+ return &roundTripper{a, rt}
+ })
+
+ if c.TLS.GetCert != nil {
+ return errors.New("can't add TLS certificate callback: transport.Config.TLS.GetCert already set")
+ }
+ c.TLS.GetCert = a.cert
+
+ var dial func(ctx context.Context, network, addr string) (net.Conn, error)
+ if c.Dial != nil {
+ dial = c.Dial
+ } else {
+ dial = (&net.Dialer{Timeout: 30 * time.Second, KeepAlive: 30 * time.Second}).DialContext
+ }
+ d := connrotation.NewDialer(dial)
+ a.onRotate = d.CloseAll
+ c.Dial = d.DialContext
+
+ return nil
}
type roundTripper struct {
@@ -169,11 +209,13 @@ func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
return r.base.RoundTrip(req)
}
- token, err := r.a.token()
+ creds, err := r.a.getCreds()
if err != nil {
- return nil, fmt.Errorf("getting token: %v", err)
+ return nil, fmt.Errorf("getting credentials: %v", err)
+ }
+ if creds.token != "" {
+ req.Header.Set("Authorization", "Bearer "+creds.token)
}
- req.Header.Set("Authorization", "Bearer "+token)
res, err := r.base.RoundTrip(req)
if err != nil {
@@ -184,47 +226,60 @@ func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
Header: res.Header,
Code: int32(res.StatusCode),
}
- if err := r.a.refresh(token, resp); err != nil {
- glog.Errorf("refreshing token: %v", err)
+ if err := r.a.maybeRefreshCreds(creds, resp); err != nil {
+ klog.Errorf("refreshing credentials: %v", err)
}
}
return res, nil
}
-func (a *Authenticator) tokenExpired() bool {
+func (a *Authenticator) credsExpired() bool {
if a.exp.IsZero() {
return false
}
return a.now().After(a.exp)
}
-func (a *Authenticator) token() (string, error) {
+func (a *Authenticator) cert() (*tls.Certificate, error) {
+ creds, err := a.getCreds()
+ if err != nil {
+ return nil, err
+ }
+ return creds.cert, nil
+}
+
+func (a *Authenticator) getCreds() (*credentials, error) {
a.mu.Lock()
defer a.mu.Unlock()
- if a.cachedToken != "" && !a.tokenExpired() {
- return a.cachedToken, nil
+ if a.cachedCreds != nil && !a.credsExpired() {
+ return a.cachedCreds, nil
}
- return a.getToken(nil)
+ if err := a.refreshCredsLocked(nil); err != nil {
+ return nil, err
+ }
+ return a.cachedCreds, nil
}
-// refresh executes the plugin to force a rotation of the token.
-func (a *Authenticator) refresh(token string, r *clientauthentication.Response) error {
+// maybeRefreshCreds executes the plugin to force a rotation of the
+// credentials, unless they were rotated already.
+func (a *Authenticator) maybeRefreshCreds(creds *credentials, r *clientauthentication.Response) error {
a.mu.Lock()
defer a.mu.Unlock()
- if token != a.cachedToken {
- // Token already rotated.
+ // Since we're not making a new pointer to a.cachedCreds in getCreds, no
+ // need to do deep comparison.
+ if creds != a.cachedCreds {
+ // Credentials already rotated.
return nil
}
- _, err := a.getToken(r)
- return err
+ return a.refreshCredsLocked(r)
}
-// getToken executes the plugin and reads the credentials from stdout. It must be
-// called while holding the Authenticator's mutex.
-func (a *Authenticator) getToken(r *clientauthentication.Response) (string, error) {
+// refreshCredsLocked executes the plugin and reads the credentials from
+// stdout. It must be called while holding the Authenticator's mutex.
+func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) error {
cred := &clientauthentication.ExecCredential{
Spec: clientauthentication.ExecCredentialSpec{
Response: r,
@@ -232,13 +287,18 @@ func (a *Authenticator) getToken(r *clientauthentication.Response) (string, erro
},
}
- data, err := runtime.Encode(codecs.LegacyCodec(a.group), cred)
- if err != nil {
- return "", fmt.Errorf("encode ExecCredentials: %v", err)
- }
-
env := append(a.environ(), a.env...)
- env = append(env, fmt.Sprintf("%s=%s", execInfoEnv, data))
+ if a.group == v1alpha1.SchemeGroupVersion {
+ // Input spec disabled for beta due to lack of use. Possibly re-enable this later if
+ // someone wants it back.
+ //
+ // See: https://github.com/kubernetes/kubernetes/issues/61796
+ data, err := runtime.Encode(codecs.LegacyCodec(a.group), cred)
+ if err != nil {
+ return fmt.Errorf("encode ExecCredentials: %v", err)
+ }
+ env = append(env, fmt.Sprintf("%s=%s", execInfoEnv, data))
+ }
stdout := &bytes.Buffer{}
cmd := exec.Command(a.cmd, a.args...)
@@ -250,23 +310,26 @@ func (a *Authenticator) getToken(r *clientauthentication.Response) (string, erro
}
if err := cmd.Run(); err != nil {
- return "", fmt.Errorf("exec: %v", err)
+ return fmt.Errorf("exec: %v", err)
}
_, gvk, err := codecs.UniversalDecoder(a.group).Decode(stdout.Bytes(), nil, cred)
if err != nil {
- return "", fmt.Errorf("decode stdout: %v", err)
+ return fmt.Errorf("decoding stdout: %v", err)
}
if gvk.Group != a.group.Group || gvk.Version != a.group.Version {
- return "", fmt.Errorf("exec plugin is configured to use API version %s, plugin returned version %s",
+ return fmt.Errorf("exec plugin is configured to use API version %s, plugin returned version %s",
a.group, schema.GroupVersion{Group: gvk.Group, Version: gvk.Version})
}
if cred.Status == nil {
- return "", fmt.Errorf("exec plugin didn't return a status field")
+ return fmt.Errorf("exec plugin didn't return a status field")
+ }
+ if cred.Status.Token == "" && cred.Status.ClientCertificateData == "" && cred.Status.ClientKeyData == "" {
+ return fmt.Errorf("exec plugin didn't return a token or cert/key pair")
}
- if cred.Status.Token == "" {
- return "", fmt.Errorf("exec plugin didn't return a token")
+ if (cred.Status.ClientCertificateData == "") != (cred.Status.ClientKeyData == "") {
+ return fmt.Errorf("exec plugin returned only certificate or key, not both")
}
if cred.Status.ExpirationTimestamp != nil {
@@ -274,7 +337,24 @@ func (a *Authenticator) getToken(r *clientauthentication.Response) (string, erro
} else {
a.exp = time.Time{}
}
- a.cachedToken = cred.Status.Token
- return a.cachedToken, nil
+ newCreds := &credentials{
+ token: cred.Status.Token,
+ }
+ if cred.Status.ClientKeyData != "" && cred.Status.ClientCertificateData != "" {
+ cert, err := tls.X509KeyPair([]byte(cred.Status.ClientCertificateData), []byte(cred.Status.ClientKeyData))
+ if err != nil {
+ return fmt.Errorf("failed parsing client key/certificate: %v", err)
+ }
+ newCreds.cert = &cert
+ }
+
+ oldCreds := a.cachedCreds
+ a.cachedCreds = newCreds
+ // Only close all connections when TLS cert rotates. Token rotation doesn't
+ // need the extra noise.
+ if a.onRotate != nil && oldCreds != nil && !reflect.DeepEqual(oldCreds.cert, a.cachedCreds.cert) {
+ a.onRotate()
+ }
+ return nil
}
diff --git a/vendor/k8s.io/client-go/rest/OWNERS b/vendor/k8s.io/client-go/rest/OWNERS
new file mode 100644
index 000000000..49dabc61b
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/OWNERS
@@ -0,0 +1,26 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+- thockin
+- smarterclayton
+- caesarxuchao
+- wojtek-t
+- deads2k
+- brendandburns
+- liggitt
+- nikhiljindal
+- gmarek
+- erictune
+- sttts
+- luxas
+- dims
+- errordeveloper
+- hongchaodeng
+- krousey
+- resouer
+- cjcullen
+- rmmh
+- lixiaobing10051267
+- asalkeld
+- juanvallejo
+- lojies
diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go
index 72a78bc0a..c75825ec5 100644
--- a/vendor/k8s.io/client-go/rest/config.go
+++ b/vendor/k8s.io/client-go/rest/config.go
@@ -17,6 +17,8 @@ limitations under the License.
package rest
import (
+ "context"
+ "errors"
"fmt"
"io/ioutil"
"net"
@@ -27,16 +29,15 @@ import (
"strings"
"time"
- "github.com/golang/glog"
-
- "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/pkg/version"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+ "k8s.io/client-go/transport"
certutil "k8s.io/client-go/util/cert"
"k8s.io/client-go/util/flowcontrol"
+ "k8s.io/klog"
)
const (
@@ -44,6 +45,8 @@ const (
DefaultBurst int = 10
)
+var ErrNotInCluster = errors.New("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined")
+
// Config holds the common attributes that can be passed to a Kubernetes client on
// initialization.
type Config struct {
@@ -68,6 +71,11 @@ type Config struct {
// TODO: demonstrate an OAuth2 compatible client.
BearerToken string
+ // Path to a file containing a BearerToken.
+ // If set, the contents are periodically read.
+ // The last successfully read value takes precedence over BearerToken.
+ BearerTokenFile string
+
// Impersonate is the configuration that RESTClient will use for impersonation.
Impersonate ImpersonationConfig
@@ -88,13 +96,16 @@ type Config struct {
// Transport may be used for custom HTTP behavior. This attribute may not
// be specified with the TLS client certificate options. Use WrapTransport
- // for most client level operations.
+ // to provide additional per-server middleware behavior.
Transport http.RoundTripper
// WrapTransport will be invoked for custom HTTP behavior after the underlying
// transport is initialized (either the transport created from TLSClientConfig,
// Transport, or http.DefaultTransport). The config may layer other RoundTrippers
// on top of the returned RoundTripper.
- WrapTransport func(rt http.RoundTripper) http.RoundTripper
+ //
+ // A future release will change this field to an array. Use config.Wrap()
+ // instead of setting this value directly.
+ WrapTransport transport.WrapperFunc
// QPS indicates the maximum QPS to the master from this client.
// If it's zero, the created RESTClient will use DefaultQPS: 5
@@ -111,13 +122,54 @@ type Config struct {
Timeout time.Duration
// Dial specifies the dial function for creating unencrypted TCP connections.
- Dial func(network, addr string) (net.Conn, error)
+ Dial func(ctx context.Context, network, address string) (net.Conn, error)
// Version forces a specific version to be used (if registered)
// Do we need this?
// Version string
}
+var _ fmt.Stringer = new(Config)
+var _ fmt.GoStringer = new(Config)
+
+type sanitizedConfig *Config
+
+type sanitizedAuthConfigPersister struct{ AuthProviderConfigPersister }
+
+func (sanitizedAuthConfigPersister) GoString() string {
+ return "rest.AuthProviderConfigPersister(--- REDACTED ---)"
+}
+func (sanitizedAuthConfigPersister) String() string {
+ return "rest.AuthProviderConfigPersister(--- REDACTED ---)"
+}
+
+// GoString implements fmt.GoStringer and sanitizes sensitive fields of Config
+// to prevent accidental leaking via logs.
+func (c *Config) GoString() string {
+ return c.String()
+}
+
+// String implements fmt.Stringer and sanitizes sensitive fields of Config to
+// prevent accidental leaking via logs.
+func (c *Config) String() string {
+ if c == nil {
+ return "<nil>"
+ }
+ cc := sanitizedConfig(CopyConfig(c))
+ // Explicitly mark non-empty credential fields as redacted.
+ if cc.Password != "" {
+ cc.Password = "--- REDACTED ---"
+ }
+ if cc.BearerToken != "" {
+ cc.BearerToken = "--- REDACTED ---"
+ }
+ if cc.AuthConfigPersister != nil {
+ cc.AuthConfigPersister = sanitizedAuthConfigPersister{cc.AuthConfigPersister}
+ }
+
+ return fmt.Sprintf("%#v", cc)
+}
+
// ImpersonationConfig has all the available impersonation options
type ImpersonationConfig struct {
// UserName is the username to impersonate on each request.
@@ -157,6 +209,40 @@ type TLSClientConfig struct {
CAData []byte
}
+var _ fmt.Stringer = TLSClientConfig{}
+var _ fmt.GoStringer = TLSClientConfig{}
+
+type sanitizedTLSClientConfig TLSClientConfig
+
+// GoString implements fmt.GoStringer and sanitizes sensitive fields of
+// TLSClientConfig to prevent accidental leaking via logs.
+func (c TLSClientConfig) GoString() string {
+ return c.String()
+}
+
+// String implements fmt.Stringer and sanitizes sensitive fields of
+// TLSClientConfig to prevent accidental leaking via logs.
+func (c TLSClientConfig) String() string {
+ cc := sanitizedTLSClientConfig{
+ Insecure: c.Insecure,
+ ServerName: c.ServerName,
+ CertFile: c.CertFile,
+ KeyFile: c.KeyFile,
+ CAFile: c.CAFile,
+ CertData: c.CertData,
+ KeyData: c.KeyData,
+ CAData: c.CAData,
+ }
+ // Explicitly mark non-empty credential fields as redacted.
+ if len(cc.CertData) != 0 {
+ cc.CertData = []byte("--- TRUNCATED ---")
+ }
+ if len(cc.KeyData) != 0 {
+ cc.KeyData = []byte("--- REDACTED ---")
+ }
+ return fmt.Sprintf("%#v", cc)
+}
+
type ContentConfig struct {
// AcceptContentTypes specifies the types the client will accept and is optional.
// If not set, ContentType will be used to define the Accept header
@@ -220,7 +306,7 @@ func RESTClientFor(config *Config) (*RESTClient, error) {
// the config.Version to be empty.
func UnversionedRESTClientFor(config *Config) (*RESTClient, error) {
if config.NegotiatedSerializer == nil {
- return nil, fmt.Errorf("NeogitatedSerializer is required when initializing a RESTClient")
+ return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient")
}
baseURL, versionedAPIPath, err := defaultServerUrlFor(config)
@@ -308,22 +394,27 @@ func DefaultKubernetesUserAgent() string {
// InClusterConfig returns a config object which uses the service account
// kubernetes gives to pods. It's intended for clients that expect to be
-// running inside a pod running on kubernetes. It will return an error if
-// called from a process not running in a kubernetes environment.
+// running inside a pod running on kubernetes. It will return ErrNotInCluster
+// if called from a process not running in a kubernetes environment.
func InClusterConfig() (*Config, error) {
+ const (
+ tokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token"
+ rootCAFile = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+ )
host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT")
if len(host) == 0 || len(port) == 0 {
- return nil, fmt.Errorf("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined")
+ return nil, ErrNotInCluster
}
- token, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/" + v1.ServiceAccountTokenKey)
+ token, err := ioutil.ReadFile(tokenFile)
if err != nil {
return nil, err
}
+
tlsClientConfig := TLSClientConfig{}
- rootCAFile := "/var/run/secrets/kubernetes.io/serviceaccount/" + v1.ServiceAccountRootCAKey
+
if _, err := certutil.NewPool(rootCAFile); err != nil {
- glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err)
+ klog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err)
} else {
tlsClientConfig.CAFile = rootCAFile
}
@@ -331,8 +422,9 @@ func InClusterConfig() (*Config, error) {
return &Config{
// TODO: switch to using cluster DNS.
Host: "https://" + net.JoinHostPort(host, port),
- BearerToken: string(token),
TLSClientConfig: tlsClientConfig,
+ BearerToken: string(token),
+ BearerTokenFile: tokenFile,
}, nil
}
@@ -395,7 +487,7 @@ func AddUserAgent(config *Config, userAgent string) *Config {
return config
}
-// AnonymousClientConfig returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) removed
+// AnonymousClientConfig returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) and custom transports (WrapTransport, Transport) removed
func AnonymousClientConfig(config *Config) *Config {
// copy only known safe fields
return &Config{
@@ -408,26 +500,25 @@ func AnonymousClientConfig(config *Config) *Config {
CAFile: config.TLSClientConfig.CAFile,
CAData: config.TLSClientConfig.CAData,
},
- RateLimiter: config.RateLimiter,
- UserAgent: config.UserAgent,
- Transport: config.Transport,
- WrapTransport: config.WrapTransport,
- QPS: config.QPS,
- Burst: config.Burst,
- Timeout: config.Timeout,
- Dial: config.Dial,
+ RateLimiter: config.RateLimiter,
+ UserAgent: config.UserAgent,
+ QPS: config.QPS,
+ Burst: config.Burst,
+ Timeout: config.Timeout,
+ Dial: config.Dial,
}
}
// CopyConfig returns a copy of the given config
func CopyConfig(config *Config) *Config {
return &Config{
- Host: config.Host,
- APIPath: config.APIPath,
- ContentConfig: config.ContentConfig,
- Username: config.Username,
- Password: config.Password,
- BearerToken: config.BearerToken,
+ Host: config.Host,
+ APIPath: config.APIPath,
+ ContentConfig: config.ContentConfig,
+ Username: config.Username,
+ Password: config.Password,
+ BearerToken: config.BearerToken,
+ BearerTokenFile: config.BearerTokenFile,
Impersonate: ImpersonationConfig{
Groups: config.Impersonate.Groups,
Extra: config.Impersonate.Extra,
diff --git a/vendor/k8s.io/client-go/rest/plugin.go b/vendor/k8s.io/client-go/rest/plugin.go
index cf8fbabfd..83ef5ae32 100644
--- a/vendor/k8s.io/client-go/rest/plugin.go
+++ b/vendor/k8s.io/client-go/rest/plugin.go
@@ -21,7 +21,7 @@ import (
"net/http"
"sync"
- "github.com/golang/glog"
+ "k8s.io/klog"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
@@ -57,7 +57,7 @@ func RegisterAuthProviderPlugin(name string, plugin Factory) error {
if _, found := plugins[name]; found {
return fmt.Errorf("Auth Provider Plugin %q was registered twice", name)
}
- glog.V(4).Infof("Registered Auth Provider Plugin %q", name)
+ klog.V(4).Infof("Registered Auth Provider Plugin %q", name)
plugins[name] = plugin
return nil
}
diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go
index 6ca9e0197..0570615fc 100644
--- a/vendor/k8s.io/client-go/rest/request.go
+++ b/vendor/k8s.io/client-go/rest/request.go
@@ -32,7 +32,6 @@ import (
"strings"
"time"
- "github.com/golang/glog"
"golang.org/x/net/http2"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -44,6 +43,7 @@ import (
restclientwatch "k8s.io/client-go/rest/watch"
"k8s.io/client-go/tools/metrics"
"k8s.io/client-go/util/flowcontrol"
+ "k8s.io/klog"
)
var (
@@ -114,7 +114,7 @@ type Request struct {
// NewRequest creates a new request helper object for accessing runtime.Objects on a server.
func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter, timeout time.Duration) *Request {
if backoff == nil {
- glog.V(2).Infof("Not implementing request backoff strategy.")
+ klog.V(2).Infof("Not implementing request backoff strategy.")
backoff = &NoBackoff{}
}
@@ -198,7 +198,7 @@ func (r *Request) Throttle(limiter flowcontrol.RateLimiter) *Request {
return r
}
-// SubResource sets a sub-resource path which can be multiple segments segment after the resource
+// SubResource sets a sub-resource path which can be multiple segments after the resource
// name but before the suffix.
func (r *Request) SubResource(subresources ...string) *Request {
if r.err != nil {
@@ -317,10 +317,14 @@ func (r *Request) Param(paramName, s string) *Request {
// VersionedParams will not write query parameters that have omitempty set and are empty. If a
// parameter has already been set it is appended to (Params and VersionedParams are additive).
func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCodec) *Request {
+ return r.SpecificallyVersionedParams(obj, codec, *r.content.GroupVersion)
+}
+
+func (r *Request) SpecificallyVersionedParams(obj runtime.Object, codec runtime.ParameterCodec, version schema.GroupVersion) *Request {
if r.err != nil {
return r
}
- params, err := codec.EncodeParameters(obj, *r.content.GroupVersion)
+ params, err := codec.EncodeParameters(obj, version)
if err != nil {
r.err = err
return r
@@ -353,8 +357,8 @@ func (r *Request) SetHeader(key string, values ...string) *Request {
return r
}
-// Timeout makes the request use the given duration as a timeout. Sets the "timeout"
-// parameter.
+// Timeout makes the request use the given duration as an overall timeout for the
+// request. Additionally, if set passes the value as "timeout" parameter in URL.
func (r *Request) Timeout(d time.Duration) *Request {
if r.err != nil {
return r
@@ -451,17 +455,9 @@ func (r *Request) URL() *url.URL {
// finalURLTemplate is similar to URL(), but will make all specific parameter values equal
// - instead of name or namespace, "{name}" and "{namespace}" will be used, and all query
-// parameters will be reset. This creates a copy of the request so as not to change the
-// underlying object. This means some useful request info (like the types of field
-// selectors in use) will be lost.
-// TODO: preserve field selector keys
+// parameters will be reset. This creates a copy of the url so as not to change the
+// underlying object.
func (r Request) finalURLTemplate() url.URL {
- if len(r.resourceName) != 0 {
- r.resourceName = "{name}"
- }
- if r.namespaceSet && len(r.namespace) != 0 {
- r.namespace = "{namespace}"
- }
newParams := url.Values{}
v := []string{"{value}"}
for k := range r.params {
@@ -469,6 +465,59 @@ func (r Request) finalURLTemplate() url.URL {
}
r.params = newParams
url := r.URL()
+ segments := strings.Split(r.URL().Path, "/")
+ groupIndex := 0
+ index := 0
+ if r.URL() != nil && r.baseURL != nil && strings.Contains(r.URL().Path, r.baseURL.Path) {
+ groupIndex += len(strings.Split(r.baseURL.Path, "/"))
+ }
+ if groupIndex >= len(segments) {
+ return *url
+ }
+
+ const CoreGroupPrefix = "api"
+ const NamedGroupPrefix = "apis"
+ isCoreGroup := segments[groupIndex] == CoreGroupPrefix
+ isNamedGroup := segments[groupIndex] == NamedGroupPrefix
+ if isCoreGroup {
+ // checking the case of core group with /api/v1/... format
+ index = groupIndex + 2
+ } else if isNamedGroup {
+ // checking the case of named group with /apis/apps/v1/... format
+ index = groupIndex + 3
+ } else {
+ // this should not happen that the only two possibilities are /api... and /apis..., just want to put an
+ // outlet here in case more API groups are added in future if ever possible:
+ // https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-groups
+ // if a wrong API groups name is encountered, return the {prefix} for url.Path
+ url.Path = "/{prefix}"
+ url.RawQuery = ""
+ return *url
+ }
+ //switch segLength := len(segments) - index; segLength {
+ switch {
+ // case len(segments) - index == 1:
+ // resource (with no name) do nothing
+ case len(segments)-index == 2:
+ // /$RESOURCE/$NAME: replace $NAME with {name}
+ segments[index+1] = "{name}"
+ case len(segments)-index == 3:
+ if segments[index+2] == "finalize" || segments[index+2] == "status" {
+ // /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name}
+ segments[index+1] = "{name}"
+ } else {
+ // /namespace/$NAMESPACE/$RESOURCE: replace $NAMESPACE with {namespace}
+ segments[index+1] = "{namespace}"
+ }
+ case len(segments)-index >= 4:
+ segments[index+1] = "{namespace}"
+ // /namespace/$NAMESPACE/$RESOURCE/$NAME: replace $NAMESPACE with {namespace}, $NAME with {name}
+ if segments[index+3] != "finalize" && segments[index+3] != "status" {
+ // /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name}
+ segments[index+3] = "{name}"
+ }
+ }
+ url.Path = path.Join(segments...)
return *url
}
@@ -478,13 +527,26 @@ func (r *Request) tryThrottle() {
r.throttle.Accept()
}
if latency := time.Since(now); latency > longThrottleLatency {
- glog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String())
+ klog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String())
}
}
// Watch attempts to begin watching the requested location.
// Returns a watch.Interface, or an error.
func (r *Request) Watch() (watch.Interface, error) {
+ return r.WatchWithSpecificDecoders(
+ func(body io.ReadCloser) streaming.Decoder {
+ framer := r.serializers.Framer.NewFrameReader(body)
+ return streaming.NewDecoder(framer, r.serializers.StreamingSerializer)
+ },
+ r.serializers.Decoder,
+ )
+}
+
+// WatchWithSpecificDecoders attempts to begin watching the requested location with a *different* decoder.
+// Turns out that you want one "standard" decoder for the watch event and one "personal" decoder for the content
+// Returns a watch.Interface, or an error.
+func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser) streaming.Decoder, embeddedDecoder runtime.Decoder) (watch.Interface, error) {
// We specifically don't want to rate limit watches, so we
// don't use r.throttle here.
if r.err != nil {
@@ -530,11 +592,15 @@ func (r *Request) Watch() (watch.Interface, error) {
if result := r.transformResponse(resp, req); result.err != nil {
return nil, result.err
}
- return nil, fmt.Errorf("for request '%+v', got status: %v", url, resp.StatusCode)
- }
- framer := r.serializers.Framer.NewFrameReader(resp.Body)
- decoder := streaming.NewDecoder(framer, r.serializers.StreamingSerializer)
- return watch.NewStreamWatcher(restclientwatch.NewDecoder(decoder, r.serializers.Decoder)), nil
+ return nil, fmt.Errorf("for request %s, got status: %v", url, resp.StatusCode)
+ }
+ wrapperDecoder := wrapperDecoderFn(resp.Body)
+ return watch.NewStreamWatcher(
+ restclientwatch.NewDecoder(wrapperDecoder, embeddedDecoder),
+ // use 500 to indicate that the cause of the error is unknown - other error codes
+ // are more specific to HTTP interactions, and set a reason
+ errors.NewClientErrorReporter(http.StatusInternalServerError, r.verb, "ClientWatchDecoding"),
+ ), nil
}
// updateURLMetrics is a convenience function for pushing metrics.
@@ -622,7 +688,7 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error {
}()
if r.err != nil {
- glog.V(4).Infof("Error in request: %v", r.err)
+ klog.V(4).Infof("Error in request: %v", r.err)
return r.err
}
@@ -640,7 +706,6 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error {
}
// Right now we make about ten retry attempts if we get a Retry-After response.
- // TODO: Change to a timeout based approach.
maxRetries := 10
retries := 0
for {
@@ -649,6 +714,14 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error {
if err != nil {
return err
}
+ if r.timeout > 0 {
+ if r.ctx == nil {
+ r.ctx = context.Background()
+ }
+ var cancelFn context.CancelFunc
+ r.ctx, cancelFn = context.WithTimeout(r.ctx, r.timeout)
+ defer cancelFn()
+ }
if r.ctx != nil {
req = req.WithContext(r.ctx)
}
@@ -702,13 +775,13 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error {
if seeker, ok := r.body.(io.Seeker); ok && r.body != nil {
_, err := seeker.Seek(0, 0)
if err != nil {
- glog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body)
+ klog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body)
fn(req, resp)
return true
}
}
- glog.V(4).Infof("Got a Retry-After %s response for attempt %d to %v", seconds, retries, url)
+ klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url)
r.backoffMgr.Sleep(time.Duration(seconds) * time.Second)
return false
}
@@ -776,14 +849,14 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu
// 2. Apiserver sends back the headers and then part of the body
// 3. Apiserver closes connection.
// 4. client-go should catch this and return an error.
- glog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err)
- streamErr := fmt.Errorf("Stream error %#v when reading response body, may be caused by closed connection. Please retry.", err)
+ klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err)
+ streamErr := fmt.Errorf("Stream error when reading response body, may be caused by closed connection. Please retry. Original error: %v", err)
return Result{
err: streamErr,
}
default:
- glog.Errorf("Unexpected error when reading response body: %#v", err)
- unexpectedErr := fmt.Errorf("Unexpected error %#v when reading response body. Please retry.", err)
+ klog.Errorf("Unexpected error when reading response body: %v", err)
+ unexpectedErr := fmt.Errorf("Unexpected error when reading response body. Please retry. Original error: %v", err)
return Result{
err: unexpectedErr,
}
@@ -846,11 +919,11 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu
func truncateBody(body string) string {
max := 0
switch {
- case bool(glog.V(10)):
+ case bool(klog.V(10)):
return body
- case bool(glog.V(9)):
+ case bool(klog.V(9)):
max = 10240
- case bool(glog.V(8)):
+ case bool(klog.V(8)):
max = 1024
}
@@ -865,13 +938,13 @@ func truncateBody(body string) string {
// allocating a new string for the body output unless necessary. Uses a simple heuristic to determine
// whether the body is printable.
func glogBody(prefix string, body []byte) {
- if glog.V(8) {
+ if klog.V(8) {
if bytes.IndexFunc(body, func(r rune) bool {
return r < 0x0a
}) != -1 {
- glog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body)))
+ klog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body)))
} else {
- glog.Infof("%s: %s", prefix, truncateBody(string(body)))
+ klog.Infof("%s: %s", prefix, truncateBody(string(body)))
}
}
}
@@ -1032,7 +1105,8 @@ func (r Result) Into(obj runtime.Object) error {
return fmt.Errorf("serializer for %s doesn't exist", r.contentType)
}
if len(r.body) == 0 {
- return fmt.Errorf("0-length response")
+ return fmt.Errorf("0-length response with status code: %d and content type: %s",
+ r.statusCode, r.contentType)
}
out, _, err := r.decoder.Decode(r.body, nil, obj)
@@ -1073,7 +1147,7 @@ func (r Result) Error() error {
// to be backwards compatible with old servers that do not return a version, default to "v1"
out, _, err := r.decoder.Decode(r.body, &schema.GroupVersionKind{Version: "v1"}, nil)
if err != nil {
- glog.V(5).Infof("body was not decodable (unable to check for Status): %v", err)
+ klog.V(5).Infof("body was not decodable (unable to check for Status): %v", err)
return r.err
}
switch t := out.(type) {
@@ -1127,7 +1201,6 @@ func IsValidPathSegmentPrefix(name string) []string {
func ValidatePathSegmentName(name string, prefix bool) []string {
if prefix {
return IsValidPathSegmentPrefix(name)
- } else {
- return IsValidPathSegmentName(name)
}
+ return IsValidPathSegmentName(name)
}
diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go
index b6a067632..de33ecbfc 100644
--- a/vendor/k8s.io/client-go/rest/transport.go
+++ b/vendor/k8s.io/client-go/rest/transport.go
@@ -18,6 +18,7 @@ package rest
import (
"crypto/tls"
+ "errors"
"net/http"
"k8s.io/client-go/plugin/pkg/client/auth/exec"
@@ -59,39 +60,10 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip
// TransportConfig converts a client config to an appropriate transport config.
func (c *Config) TransportConfig() (*transport.Config, error) {
- wt := c.WrapTransport
- if c.ExecProvider != nil {
- provider, err := exec.GetAuthenticator(c.ExecProvider)
- if err != nil {
- return nil, err
- }
- if wt != nil {
- previousWT := wt
- wt = func(rt http.RoundTripper) http.RoundTripper {
- return provider.WrapTransport(previousWT(rt))
- }
- } else {
- wt = provider.WrapTransport
- }
- }
- if c.AuthProvider != nil {
- provider, err := GetAuthProvider(c.Host, c.AuthProvider, c.AuthConfigPersister)
- if err != nil {
- return nil, err
- }
- if wt != nil {
- previousWT := wt
- wt = func(rt http.RoundTripper) http.RoundTripper {
- return provider.WrapTransport(previousWT(rt))
- }
- } else {
- wt = provider.WrapTransport
- }
- }
- return &transport.Config{
+ conf := &transport.Config{
UserAgent: c.UserAgent,
Transport: c.Transport,
- WrapTransport: wt,
+ WrapTransport: c.WrapTransport,
TLS: transport.TLSConfig{
Insecure: c.Insecure,
ServerName: c.ServerName,
@@ -102,14 +74,45 @@ func (c *Config) TransportConfig() (*transport.Config, error) {
KeyFile: c.KeyFile,
KeyData: c.KeyData,
},
- Username: c.Username,
- Password: c.Password,
- BearerToken: c.BearerToken,
+ Username: c.Username,
+ Password: c.Password,
+ BearerToken: c.BearerToken,
+ BearerTokenFile: c.BearerTokenFile,
Impersonate: transport.ImpersonationConfig{
UserName: c.Impersonate.UserName,
Groups: c.Impersonate.Groups,
Extra: c.Impersonate.Extra,
},
Dial: c.Dial,
- }, nil
+ }
+
+ if c.ExecProvider != nil && c.AuthProvider != nil {
+ return nil, errors.New("execProvider and authProvider cannot be used in combination")
+ }
+
+ if c.ExecProvider != nil {
+ provider, err := exec.GetAuthenticator(c.ExecProvider)
+ if err != nil {
+ return nil, err
+ }
+ if err := provider.UpdateTransportConfig(conf); err != nil {
+ return nil, err
+ }
+ }
+ if c.AuthProvider != nil {
+ provider, err := GetAuthProvider(c.Host, c.AuthProvider, c.AuthConfigPersister)
+ if err != nil {
+ return nil, err
+ }
+ conf.Wrap(provider.WrapTransport)
+ }
+ return conf, nil
+}
+
+// Wrap adds a transport middleware function that will give the caller
+// an opportunity to wrap the underlying http.RoundTripper prior to the
+// first API call being made. The provided function is invoked after any
+// existing transport wrappers are invoked.
+func (c *Config) Wrap(fn transport.WrapperFunc) {
+ c.WrapTransport = transport.Wrappers(c.WrapTransport, fn)
}
diff --git a/vendor/k8s.io/client-go/rest/urlbackoff.go b/vendor/k8s.io/client-go/rest/urlbackoff.go
index eff848abc..d00e42f86 100644
--- a/vendor/k8s.io/client-go/rest/urlbackoff.go
+++ b/vendor/k8s.io/client-go/rest/urlbackoff.go
@@ -20,9 +20,9 @@ import (
"net/url"
"time"
- "github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/flowcontrol"
+ "k8s.io/klog"
)
// Set of resp. Codes that we backoff for.
@@ -64,7 +64,7 @@ func (n *NoBackoff) Sleep(d time.Duration) {
// Disable makes the backoff trivial, i.e., sets it to zero. This might be used
// by tests which want to run 1000s of mock requests without slowing down.
func (b *URLBackoff) Disable() {
- glog.V(4).Infof("Disabling backoff strategy")
+ klog.V(4).Infof("Disabling backoff strategy")
b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second)
}
@@ -76,7 +76,7 @@ func (b *URLBackoff) baseUrlKey(rawurl *url.URL) string {
// in the future.
host, err := url.Parse(rawurl.String())
if err != nil {
- glog.V(4).Infof("Error extracting url: %v", rawurl)
+ klog.V(4).Infof("Error extracting url: %v", rawurl)
panic("bad url!")
}
return host.Host
@@ -89,7 +89,7 @@ func (b *URLBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode i
b.Backoff.Next(b.baseUrlKey(actualUrl), b.Backoff.Clock.Now())
return
} else if responseCode >= 300 || err != nil {
- glog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err)
+ klog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err)
}
//If we got this far, there is no backoff required for this URL anymore.
diff --git a/vendor/k8s.io/client-go/rest/watch/decoder.go b/vendor/k8s.io/client-go/rest/watch/decoder.go
index 73bb63add..e95c020b2 100644
--- a/vendor/k8s.io/client-go/rest/watch/decoder.go
+++ b/vendor/k8s.io/client-go/rest/watch/decoder.go
@@ -54,7 +54,7 @@ func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) {
return "", nil, fmt.Errorf("unable to decode to metav1.Event")
}
switch got.Type {
- case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error):
+ case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error), string(watch.Bookmark):
default:
return "", nil, fmt.Errorf("got invalid watch event type: %v", got.Type)
}
diff --git a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go
index 67568bf0b..c1ab45f33 100644
--- a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go
@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go
index 0a081871a..5871575a6 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go
@@ -15,4 +15,5 @@ limitations under the License.
*/
// +k8s:deepcopy-gen=package
+
package api
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
index 43e26487c..65a36936b 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
@@ -29,6 +29,8 @@ import (
func init() {
sDec, _ := base64.StdEncoding.DecodeString("REDACTED+")
redactedBytes = []byte(string(sDec))
+ sDec, _ = base64.StdEncoding.DecodeString("DATA+OMITTED")
+ dataOmittedBytes = []byte(string(sDec))
}
// IsConfigEmpty returns true if the config is empty.
@@ -79,7 +81,10 @@ func MinifyConfig(config *Config) error {
return nil
}
-var redactedBytes []byte
+var (
+ redactedBytes []byte
+ dataOmittedBytes []byte
+)
// Flatten redacts raw data entries from the config object for a human-readable view.
func ShortenConfig(config *Config) {
@@ -97,7 +102,7 @@ func ShortenConfig(config *Config) {
}
for key, cluster := range config.Clusters {
if len(cluster.CertificateAuthorityData) > 0 {
- cluster.CertificateAuthorityData = redactedBytes
+ cluster.CertificateAuthorityData = dataOmittedBytes
}
config.Clusters[key] = cluster
}
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
index 1391df702..990a440c6 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
@@ -17,6 +17,8 @@ limitations under the License.
package api
import (
+ "fmt"
+
"k8s.io/apimachinery/pkg/runtime"
)
@@ -150,6 +152,25 @@ type AuthProviderConfig struct {
Config map[string]string `json:"config,omitempty"`
}
+var _ fmt.Stringer = new(AuthProviderConfig)
+var _ fmt.GoStringer = new(AuthProviderConfig)
+
+// GoString implements fmt.GoStringer and sanitizes sensitive fields of
+// AuthProviderConfig to prevent accidental leaking via logs.
+func (c AuthProviderConfig) GoString() string {
+ return c.String()
+}
+
+// String implements fmt.Stringer and sanitizes sensitive fields of
+// AuthProviderConfig to prevent accidental leaking via logs.
+func (c AuthProviderConfig) String() string {
+ cfg := "<nil>"
+ if c.Config != nil {
+ cfg = "--- REDACTED ---"
+ }
+ return fmt.Sprintf("api.AuthProviderConfig{Name: %q, Config: map[string]string{%s}}", c.Name, cfg)
+}
+
// ExecConfig specifies a command to provide client credentials. The command is exec'd
// and outputs structured stdout holding credentials.
//
@@ -172,6 +193,29 @@ type ExecConfig struct {
APIVersion string `json:"apiVersion,omitempty"`
}
+var _ fmt.Stringer = new(ExecConfig)
+var _ fmt.GoStringer = new(ExecConfig)
+
+// GoString implements fmt.GoStringer and sanitizes sensitive fields of
+// ExecConfig to prevent accidental leaking via logs.
+func (c ExecConfig) GoString() string {
+ return c.String()
+}
+
+// String implements fmt.Stringer and sanitizes sensitive fields of ExecConfig
+// to prevent accidental leaking via logs.
+func (c ExecConfig) String() string {
+ var args []string
+ if len(c.Args) > 0 {
+ args = []string{"--- REDACTED ---"}
+ }
+ env := "[]ExecEnvVar(nil)"
+ if len(c.Env) > 0 {
+ env = "[]ExecEnvVar{--- REDACTED ---}"
+ }
+ return fmt.Sprintf("api.AuthProviderConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q}", c.Command, args, env, c.APIVersion)
+}
+
// ExecEnvVar is used for setting environment variables when executing an exec-based
// credential plugin.
type ExecEnvVar struct {
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
index 085c088ee..3240a7a98 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -46,31 +46,26 @@ func (in *AuthInfo) DeepCopyInto(out *AuthInfo) {
in, out := &in.ImpersonateUserExtra, &out.ImpersonateUserExtra
*out = make(map[string][]string, len(*in))
for key, val := range *in {
+ var outVal []string
if val == nil {
(*out)[key] = nil
} else {
- (*out)[key] = make([]string, len(val))
- copy((*out)[key], val)
+ in, out := &val, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
}
+ (*out)[key] = outVal
}
}
if in.AuthProvider != nil {
in, out := &in.AuthProvider, &out.AuthProvider
- if *in == nil {
- *out = nil
- } else {
- *out = new(AuthProviderConfig)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(AuthProviderConfig)
+ (*in).DeepCopyInto(*out)
}
if in.Exec != nil {
in, out := &in.Exec, &out.Exec
- if *in == nil {
- *out = nil
- } else {
- *out = new(ExecConfig)
- (*in).DeepCopyInto(*out)
- }
+ *out = new(ExecConfig)
+ (*in).DeepCopyInto(*out)
}
if in.Extensions != nil {
in, out := &in.Extensions, &out.Extensions
@@ -159,36 +154,45 @@ func (in *Config) DeepCopyInto(out *Config) {
in, out := &in.Clusters, &out.Clusters
*out = make(map[string]*Cluster, len(*in))
for key, val := range *in {
+ var outVal *Cluster
if val == nil {
(*out)[key] = nil
} else {
- (*out)[key] = new(Cluster)
- val.DeepCopyInto((*out)[key])
+ in, out := &val, &outVal
+ *out = new(Cluster)
+ (*in).DeepCopyInto(*out)
}
+ (*out)[key] = outVal
}
}
if in.AuthInfos != nil {
in, out := &in.AuthInfos, &out.AuthInfos
*out = make(map[string]*AuthInfo, len(*in))
for key, val := range *in {
+ var outVal *AuthInfo
if val == nil {
(*out)[key] = nil
} else {
- (*out)[key] = new(AuthInfo)
- val.DeepCopyInto((*out)[key])
+ in, out := &val, &outVal
+ *out = new(AuthInfo)
+ (*in).DeepCopyInto(*out)
}
+ (*out)[key] = outVal
}
}
if in.Contexts != nil {
in, out := &in.Contexts, &out.Contexts
*out = make(map[string]*Context, len(*in))
for key, val := range *in {
+ var outVal *Context
if val == nil {
(*out)[key] = nil
} else {
- (*out)[key] = new(Context)
- val.DeepCopyInto((*out)[key])
+ in, out := &val, &outVal
+ *out = new(Context)
+ (*in).DeepCopyInto(*out)
}
+ (*out)[key] = outVal
}
}
if in.Extensions != nil {
diff --git a/vendor/k8s.io/client-go/tools/metrics/OWNERS b/vendor/k8s.io/client-go/tools/metrics/OWNERS
new file mode 100644
index 000000000..f150be536
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/metrics/OWNERS
@@ -0,0 +1,9 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+- wojtek-t
+- eparis
+- krousey
+- jayunit100
+- fgrzadkowski
+- tmrts
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/reader.go b/vendor/k8s.io/client-go/tools/remotecommand/reader.go
new file mode 100644
index 000000000..d1f1be34c
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/remotecommand/reader.go
@@ -0,0 +1,41 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package remotecommand
+
+import (
+ "io"
+)
+
+// readerWrapper delegates to an io.Reader so that only the io.Reader interface is implemented,
+// to keep io.Copy from doing things we don't want when copying from the reader to the data stream.
+//
+// If the Stdin io.Reader provided to remotecommand implements a WriteTo function (like bytes.Buffer does[1]),
+// io.Copy calls that method[2] to attempt to write the entire buffer to the stream in one call.
+// That results in an oversized call to spdystream.Stream#Write [3],
+// which results in a single oversized data frame[4] that is too large.
+//
+// [1] https://golang.org/pkg/bytes/#Buffer.WriteTo
+// [2] https://golang.org/pkg/io/#Copy
+// [3] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/stream.go#L66-L73
+// [4] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/spdy/write.go#L302-L304
+type readerWrapper struct {
+ reader io.Reader
+}
+
+func (r readerWrapper) Read(p []byte) (int, error) {
+ return r.reader.Read(p)
+}
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go
index 6b69f366e..892d8d105 100644
--- a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go
+++ b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go
@@ -22,7 +22,7 @@ import (
"net/http"
"net/url"
- "github.com/golang/glog"
+ "k8s.io/klog"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/apimachinery/pkg/util/remotecommand"
@@ -30,8 +30,8 @@ import (
spdy "k8s.io/client-go/transport/spdy"
)
-// StreamOptions holds information pertaining to the current streaming session: supported stream
-// protocols, input/output streams, if the client is requesting a TTY, and a terminal size queue to
+// StreamOptions holds information pertaining to the current streaming session:
+// input/output streams, if the client is requesting a TTY, and a terminal size queue to
// support terminal resizing.
type StreamOptions struct {
Stdin io.Reader
@@ -132,7 +132,7 @@ func (e *streamExecutor) Stream(options StreamOptions) error {
case remotecommand.StreamProtocolV2Name:
streamer = newStreamProtocolV2(options)
case "":
- glog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name)
+ klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name)
fallthrough
case remotecommand.StreamProtocolV1Name:
streamer = newStreamProtocolV1(options)
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v1.go b/vendor/k8s.io/client-go/tools/remotecommand/v1.go
index 92dad727f..4120f1f5f 100644
--- a/vendor/k8s.io/client-go/tools/remotecommand/v1.go
+++ b/vendor/k8s.io/client-go/tools/remotecommand/v1.go
@@ -22,9 +22,9 @@ import (
"io/ioutil"
"net/http"
- "github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/httpstream"
+ "k8s.io/klog"
)
// streamProtocolV1 implements the first version of the streaming exec & attach
@@ -53,10 +53,10 @@ func (p *streamProtocolV1) stream(conn streamCreator) error {
errorChan := make(chan error)
cp := func(s string, dst io.Writer, src io.Reader) {
- glog.V(6).Infof("Copying %s", s)
- defer glog.V(6).Infof("Done copying %s", s)
+ klog.V(6).Infof("Copying %s", s)
+ defer klog.V(6).Infof("Done copying %s", s)
if _, err := io.Copy(dst, src); err != nil && err != io.EOF {
- glog.Errorf("Error copying %s: %v", s, err)
+ klog.Errorf("Error copying %s: %v", s, err)
}
if s == v1.StreamTypeStdout || s == v1.StreamTypeStderr {
doneChan <- struct{}{}
@@ -127,7 +127,7 @@ func (p *streamProtocolV1) stream(conn streamCreator) error {
// because stdin is not closed until the process exits. If we try to call
// stdin.Close(), it returns no error but doesn't unblock the copy. It will
// exit when the process exits, instead.
- go cp(v1.StreamTypeStdin, p.remoteStdin, p.Stdin)
+ go cp(v1.StreamTypeStdin, p.remoteStdin, readerWrapper{p.Stdin})
}
waitCount := 0
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v2.go b/vendor/k8s.io/client-go/tools/remotecommand/v2.go
index b74ae8de2..4b0001502 100644
--- a/vendor/k8s.io/client-go/tools/remotecommand/v2.go
+++ b/vendor/k8s.io/client-go/tools/remotecommand/v2.go
@@ -101,7 +101,7 @@ func (p *streamProtocolV2) copyStdin() {
// the executed command will remain running.
defer once.Do(func() { p.remoteStdin.Close() })
- if _, err := io.Copy(p.remoteStdin, p.Stdin); err != nil {
+ if _, err := io.Copy(p.remoteStdin, readerWrapper{p.Stdin}); err != nil {
runtime.HandleError(err)
}
}()
diff --git a/vendor/k8s.io/client-go/transport/OWNERS b/vendor/k8s.io/client-go/transport/OWNERS
new file mode 100644
index 000000000..a52176903
--- /dev/null
+++ b/vendor/k8s.io/client-go/transport/OWNERS
@@ -0,0 +1,9 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+- smarterclayton
+- wojtek-t
+- deads2k
+- liggitt
+- krousey
+- caesarxuchao
diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go
index 83291c575..7cffe2a5f 100644
--- a/vendor/k8s.io/client-go/transport/cache.go
+++ b/vendor/k8s.io/client-go/transport/cache.go
@@ -43,6 +43,7 @@ type tlsCacheKey struct {
caData string
certData string
keyData string
+ getCert string
serverName string
dial string
}
@@ -52,7 +53,7 @@ func (t tlsCacheKey) String() string {
if len(t.keyData) > 0 {
keyText = "<redacted>"
}
- return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, serverName:%s, dial:%s", t.insecure, t.caData, t.certData, keyText, t.serverName, t.dial)
+ return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, getCert: %s, serverName:%s, dial:%s", t.insecure, t.caData, t.certData, keyText, t.getCert, t.serverName, t.dial)
}
func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) {
@@ -85,7 +86,7 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) {
dial = (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
- }).Dial
+ }).DialContext
}
// Cache a single transport for these options
c.transports[key] = utilnet.SetTransportDefaults(&http.Transport{
@@ -93,7 +94,7 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) {
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: tlsConfig,
MaxIdleConnsPerHost: idleConnsPerHost,
- Dial: dial,
+ DialContext: dial,
})
return c.transports[key], nil
}
@@ -109,6 +110,7 @@ func tlsConfigKey(c *Config) (tlsCacheKey, error) {
caData: string(c.TLS.CAData),
certData: string(c.TLS.CertData),
keyData: string(c.TLS.KeyData),
+ getCert: fmt.Sprintf("%p", c.TLS.GetCert),
serverName: c.TLS.ServerName,
dial: fmt.Sprintf("%p", c.Dial),
}, nil
diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go
index af347dafe..5de0a2cb1 100644
--- a/vendor/k8s.io/client-go/transport/config.go
+++ b/vendor/k8s.io/client-go/transport/config.go
@@ -17,6 +17,8 @@ limitations under the License.
package transport
import (
+ "context"
+ "crypto/tls"
"net"
"net/http"
)
@@ -37,6 +39,11 @@ type Config struct {
// Bearer token for authentication
BearerToken string
+ // Path to a file containing a BearerToken.
+ // If set, the contents are periodically read.
+ // The last successfully read value takes precedence over BearerToken.
+ BearerTokenFile string
+
// Impersonate is the config that this Config will impersonate using
Impersonate ImpersonationConfig
@@ -50,10 +57,13 @@ type Config struct {
// from TLSClientConfig, Transport, or http.DefaultTransport). The
// config may layer other RoundTrippers on top of the returned
// RoundTripper.
- WrapTransport func(rt http.RoundTripper) http.RoundTripper
+ //
+ // A future release will change this field to an array. Use config.Wrap()
+ // instead of setting this value directly.
+ WrapTransport WrapperFunc
// Dial specifies the dial function for creating unencrypted TCP connections.
- Dial func(network, addr string) (net.Conn, error)
+ Dial func(ctx context.Context, network, address string) (net.Conn, error)
}
// ImpersonationConfig has all the available impersonation options
@@ -78,12 +88,25 @@ func (c *Config) HasBasicAuth() bool {
// HasTokenAuth returns whether the configuration has token authentication or not.
func (c *Config) HasTokenAuth() bool {
- return len(c.BearerToken) != 0
+ return len(c.BearerToken) != 0 || len(c.BearerTokenFile) != 0
}
// HasCertAuth returns whether the configuration has certificate authentication or not.
func (c *Config) HasCertAuth() bool {
- return len(c.TLS.CertData) != 0 || len(c.TLS.CertFile) != 0
+ return (len(c.TLS.CertData) != 0 || len(c.TLS.CertFile) != 0) && (len(c.TLS.KeyData) != 0 || len(c.TLS.KeyFile) != 0)
+}
+
+// HasCertCallbacks returns whether the configuration has certificate callback or not.
+func (c *Config) HasCertCallback() bool {
+ return c.TLS.GetCert != nil
+}
+
+// Wrap adds a transport middleware function that will give the caller
+// an opportunity to wrap the underlying http.RoundTripper prior to the
+// first API call being made. The provided function is invoked after any
+// existing transport wrappers are invoked.
+func (c *Config) Wrap(fn WrapperFunc) {
+ c.WrapTransport = Wrappers(c.WrapTransport, fn)
}
// TLSConfig holds the information needed to set up a TLS transport.
@@ -98,4 +121,6 @@ type TLSConfig struct {
CAData []byte // Bytes of the PEM-encoded server trusted root certificates. Supercedes CAFile.
CertData []byte // Bytes of the PEM-encoded client certificate. Supercedes CertFile.
KeyData []byte // Bytes of the PEM-encoded client key. Supercedes KeyFile.
+
+ GetCert func() (*tls.Certificate, error) // Callback that returns a TLS client certificate. CertData, CertFile, KeyData and KeyFile supercede this field.
}
diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go
index de64e0078..117a9c8c4 100644
--- a/vendor/k8s.io/client-go/transport/round_trippers.go
+++ b/vendor/k8s.io/client-go/transport/round_trippers.go
@@ -17,13 +17,13 @@ limitations under the License.
package transport
import (
- "bytes"
"fmt"
"net/http"
"strings"
"time"
- "github.com/golang/glog"
+ "golang.org/x/oauth2"
+ "k8s.io/klog"
utilnet "k8s.io/apimachinery/pkg/util/net"
)
@@ -45,7 +45,11 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip
case config.HasBasicAuth() && config.HasTokenAuth():
return nil, fmt.Errorf("username/password or bearer token may be set, but not both")
case config.HasTokenAuth():
- rt = NewBearerAuthRoundTripper(config.BearerToken, rt)
+ var err error
+ rt, err = NewBearerAuthWithRefreshRoundTripper(config.BearerToken, config.BearerTokenFile, rt)
+ if err != nil {
+ return nil, err
+ }
case config.HasBasicAuth():
rt = NewBasicAuthRoundTripper(config.Username, config.Password, rt)
}
@@ -63,13 +67,13 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip
// DebugWrappers wraps a round tripper and logs based on the current log level.
func DebugWrappers(rt http.RoundTripper) http.RoundTripper {
switch {
- case bool(glog.V(9)):
+ case bool(klog.V(9)):
rt = newDebuggingRoundTripper(rt, debugCurlCommand, debugURLTiming, debugResponseHeaders)
- case bool(glog.V(8)):
+ case bool(klog.V(8)):
rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus, debugResponseHeaders)
- case bool(glog.V(7)):
+ case bool(klog.V(7)):
rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus)
- case bool(glog.V(6)):
+ case bool(klog.V(6)):
rt = newDebuggingRoundTripper(rt, debugURLTiming)
}
@@ -139,7 +143,7 @@ func (rt *authProxyRoundTripper) CancelRequest(req *http.Request) {
if canceler, ok := rt.rt.(requestCanceler); ok {
canceler.CancelRequest(req)
} else {
- glog.Errorf("CancelRequest not implemented")
+ klog.Errorf("CancelRequest not implemented by %T", rt.rt)
}
}
@@ -167,7 +171,7 @@ func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) {
if canceler, ok := rt.rt.(requestCanceler); ok {
canceler.CancelRequest(req)
} else {
- glog.Errorf("CancelRequest not implemented")
+ klog.Errorf("CancelRequest not implemented by %T", rt.rt)
}
}
@@ -198,7 +202,7 @@ func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) {
if canceler, ok := rt.rt.(requestCanceler); ok {
canceler.CancelRequest(req)
} else {
- glog.Errorf("CancelRequest not implemented")
+ klog.Errorf("CancelRequest not implemented by %T", rt.rt)
}
}
@@ -258,7 +262,7 @@ func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) {
if canceler, ok := rt.delegate.(requestCanceler); ok {
canceler.CancelRequest(req)
} else {
- glog.Errorf("CancelRequest not implemented")
+ klog.Errorf("CancelRequest not implemented by %T", rt.delegate)
}
}
@@ -266,13 +270,35 @@ func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { r
type bearerAuthRoundTripper struct {
bearer string
+ source oauth2.TokenSource
rt http.RoundTripper
}
// NewBearerAuthRoundTripper adds the provided bearer token to a request
// unless the authorization header has already been set.
func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper {
- return &bearerAuthRoundTripper{bearer, rt}
+ return &bearerAuthRoundTripper{bearer, nil, rt}
+}
+
+// NewBearerAuthRoundTripper adds the provided bearer token to a request
+// unless the authorization header has already been set.
+// If tokenFile is non-empty, it is periodically read,
+// and the last successfully read content is used as the bearer token.
+// If tokenFile is non-empty and bearer is empty, the tokenFile is read
+// immediately to populate the initial bearer token.
+func NewBearerAuthWithRefreshRoundTripper(bearer string, tokenFile string, rt http.RoundTripper) (http.RoundTripper, error) {
+ if len(tokenFile) == 0 {
+ return &bearerAuthRoundTripper{bearer, nil, rt}, nil
+ }
+ source := NewCachedFileTokenSource(tokenFile)
+ if len(bearer) == 0 {
+ token, err := source.Token()
+ if err != nil {
+ return nil, err
+ }
+ bearer = token.AccessToken
+ }
+ return &bearerAuthRoundTripper{bearer, source, rt}, nil
}
func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
@@ -281,7 +307,13 @@ func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response,
}
req = utilnet.CloneRequest(req)
- req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", rt.bearer))
+ token := rt.bearer
+ if rt.source != nil {
+ if refreshedToken, err := rt.source.Token(); err == nil {
+ token = refreshedToken.AccessToken
+ }
+ }
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
return rt.rt.RoundTrip(req)
}
@@ -289,7 +321,7 @@ func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) {
if canceler, ok := rt.rt.(requestCanceler); ok {
canceler.CancelRequest(req)
} else {
- glog.Errorf("CancelRequest not implemented")
+ klog.Errorf("CancelRequest not implemented by %T", rt.rt)
}
}
@@ -336,7 +368,7 @@ func (r *requestInfo) toCurl() string {
}
}
- return fmt.Sprintf("curl -k -v -X%s %s %s", r.RequestVerb, headers, r.RequestURL)
+ return fmt.Sprintf("curl -k -v -X%s %s '%s'", r.RequestVerb, headers, r.RequestURL)
}
// debuggingRoundTripper will display information about the requests passing
@@ -373,7 +405,7 @@ func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) {
if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok {
canceler.CancelRequest(req)
} else {
- glog.Errorf("CancelRequest not implemented")
+ klog.Errorf("CancelRequest not implemented by %T", rt.delegatedRoundTripper)
}
}
@@ -381,17 +413,17 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e
reqInfo := newRequestInfo(req)
if rt.levels[debugJustURL] {
- glog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL)
+ klog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL)
}
if rt.levels[debugCurlCommand] {
- glog.Infof("%s", reqInfo.toCurl())
+ klog.Infof("%s", reqInfo.toCurl())
}
if rt.levels[debugRequestHeaders] {
- glog.Infof("Request Headers:")
+ klog.Infof("Request Headers:")
for key, values := range reqInfo.RequestHeaders {
for _, value := range values {
- glog.Infof(" %s: %s", key, value)
+ klog.Infof(" %s: %s", key, value)
}
}
}
@@ -403,16 +435,16 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e
reqInfo.complete(response, err)
if rt.levels[debugURLTiming] {
- glog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond))
+ klog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond))
}
if rt.levels[debugResponseStatus] {
- glog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond))
+ klog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond))
}
if rt.levels[debugResponseHeaders] {
- glog.Infof("Response Headers:")
+ klog.Infof("Response Headers:")
for key, values := range reqInfo.ResponseHeaders {
for _, value := range values {
- glog.Infof(" %s: %s", key, value)
+ klog.Infof(" %s: %s", key, value)
}
}
}
@@ -435,7 +467,7 @@ func shouldEscape(b byte) bool {
}
func headerKeyEscape(key string) string {
- var buf bytes.Buffer
+ buf := strings.Builder{}
for i := 0; i < len(key); i++ {
b := key[i]
if shouldEscape(b) {
diff --git a/vendor/k8s.io/client-go/transport/spdy/spdy.go b/vendor/k8s.io/client-go/transport/spdy/spdy.go
index e0eb468ba..53cc7ee18 100644
--- a/vendor/k8s.io/client-go/transport/spdy/spdy.go
+++ b/vendor/k8s.io/client-go/transport/spdy/spdy.go
@@ -38,7 +38,7 @@ func RoundTripperFor(config *restclient.Config) (http.RoundTripper, Upgrader, er
if err != nil {
return nil, nil, err
}
- upgradeRoundTripper := spdy.NewRoundTripper(tlsConfig, true)
+ upgradeRoundTripper := spdy.NewRoundTripper(tlsConfig, true, false)
wrapper, err := restclient.HTTPWrappersForConfig(config, upgradeRoundTripper)
if err != nil {
return nil, nil, err
diff --git a/vendor/k8s.io/client-go/transport/token_source.go b/vendor/k8s.io/client-go/transport/token_source.go
new file mode 100644
index 000000000..b8cadd382
--- /dev/null
+++ b/vendor/k8s.io/client-go/transport/token_source.go
@@ -0,0 +1,149 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transport
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/oauth2"
+ "k8s.io/klog"
+)
+
+// TokenSourceWrapTransport returns a WrapTransport that injects bearer tokens
+// authentication from an oauth2.TokenSource.
+func TokenSourceWrapTransport(ts oauth2.TokenSource) func(http.RoundTripper) http.RoundTripper {
+ return func(rt http.RoundTripper) http.RoundTripper {
+ return &tokenSourceTransport{
+ base: rt,
+ ort: &oauth2.Transport{
+ Source: ts,
+ Base: rt,
+ },
+ }
+ }
+}
+
+// NewCachedFileTokenSource returns a oauth2.TokenSource reads a token from a
+// file at a specified path and periodically reloads it.
+func NewCachedFileTokenSource(path string) oauth2.TokenSource {
+ return &cachingTokenSource{
+ now: time.Now,
+ leeway: 10 * time.Second,
+ base: &fileTokenSource{
+ path: path,
+ // This period was picked because it is half of the duration between when the kubelet
+ // refreshes a projected service account token and when the original token expires.
+ // Default token lifetime is 10 minutes, and the kubelet starts refreshing at 80% of lifetime.
+ // This should induce re-reading at a frequency that works with the token volume source.
+ period: time.Minute,
+ },
+ }
+}
+
+// NewCachedTokenSource returns a oauth2.TokenSource reads a token from a
+// designed TokenSource. The ts would provide the source of token.
+func NewCachedTokenSource(ts oauth2.TokenSource) oauth2.TokenSource {
+ return &cachingTokenSource{
+ now: time.Now,
+ base: ts,
+ }
+}
+
+type tokenSourceTransport struct {
+ base http.RoundTripper
+ ort http.RoundTripper
+}
+
+func (tst *tokenSourceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ // This is to allow --token to override other bearer token providers.
+ if req.Header.Get("Authorization") != "" {
+ return tst.base.RoundTrip(req)
+ }
+ return tst.ort.RoundTrip(req)
+}
+
+type fileTokenSource struct {
+ path string
+ period time.Duration
+}
+
+var _ = oauth2.TokenSource(&fileTokenSource{})
+
+func (ts *fileTokenSource) Token() (*oauth2.Token, error) {
+ tokb, err := ioutil.ReadFile(ts.path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read token file %q: %v", ts.path, err)
+ }
+ tok := strings.TrimSpace(string(tokb))
+ if len(tok) == 0 {
+ return nil, fmt.Errorf("read empty token from file %q", ts.path)
+ }
+
+ return &oauth2.Token{
+ AccessToken: tok,
+ Expiry: time.Now().Add(ts.period),
+ }, nil
+}
+
+type cachingTokenSource struct {
+ base oauth2.TokenSource
+ leeway time.Duration
+
+ sync.RWMutex
+ tok *oauth2.Token
+
+ // for testing
+ now func() time.Time
+}
+
+var _ = oauth2.TokenSource(&cachingTokenSource{})
+
+func (ts *cachingTokenSource) Token() (*oauth2.Token, error) {
+ now := ts.now()
+ // fast path
+ ts.RLock()
+ tok := ts.tok
+ ts.RUnlock()
+
+ if tok != nil && tok.Expiry.Add(-1*ts.leeway).After(now) {
+ return tok, nil
+ }
+
+ // slow path
+ ts.Lock()
+ defer ts.Unlock()
+ if tok := ts.tok; tok != nil && tok.Expiry.Add(-1*ts.leeway).After(now) {
+ return tok, nil
+ }
+
+ tok, err := ts.base.Token()
+ if err != nil {
+ if ts.tok == nil {
+ return nil, err
+ }
+ klog.Errorf("Unable to rotate token: %v", err)
+ return ts.tok, nil
+ }
+
+ ts.tok = tok
+ return tok, nil
+}
diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go
index c2bb7ae5e..2a145c971 100644
--- a/vendor/k8s.io/client-go/transport/transport.go
+++ b/vendor/k8s.io/client-go/transport/transport.go
@@ -17,6 +17,7 @@ limitations under the License.
package transport
import (
+ "context"
"crypto/tls"
"crypto/x509"
"fmt"
@@ -28,7 +29,7 @@ import (
// or transport level security defined by the provided Config.
func New(config *Config) (http.RoundTripper, error) {
// Set transport level security
- if config.Transport != nil && (config.HasCA() || config.HasCertAuth() || config.TLS.Insecure) {
+ if config.Transport != nil && (config.HasCA() || config.HasCertAuth() || config.HasCertCallback() || config.TLS.Insecure) {
return nil, fmt.Errorf("using a custom transport with TLS certificate options or the insecure flag is not allowed")
}
@@ -52,7 +53,7 @@ func New(config *Config) (http.RoundTripper, error) {
// TLSConfigFor returns a tls.Config that will provide the transport level security defined
// by the provided Config. Will return nil if no transport level security is requested.
func TLSConfigFor(c *Config) (*tls.Config, error) {
- if !(c.HasCA() || c.HasCertAuth() || c.TLS.Insecure || len(c.TLS.ServerName) > 0) {
+ if !(c.HasCA() || c.HasCertAuth() || c.HasCertCallback() || c.TLS.Insecure || len(c.TLS.ServerName) > 0) {
return nil, nil
}
if c.HasCA() && c.TLS.Insecure {
@@ -75,12 +76,40 @@ func TLSConfigFor(c *Config) (*tls.Config, error) {
tlsConfig.RootCAs = rootCertPool(c.TLS.CAData)
}
+ var staticCert *tls.Certificate
if c.HasCertAuth() {
+ // If key/cert were provided, verify them before setting up
+ // tlsConfig.GetClientCertificate.
cert, err := tls.X509KeyPair(c.TLS.CertData, c.TLS.KeyData)
if err != nil {
return nil, err
}
- tlsConfig.Certificates = []tls.Certificate{cert}
+ staticCert = &cert
+ }
+
+ if c.HasCertAuth() || c.HasCertCallback() {
+ tlsConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ // Note: static key/cert data always take precedence over cert
+ // callback.
+ if staticCert != nil {
+ return staticCert, nil
+ }
+ if c.HasCertCallback() {
+ cert, err := c.TLS.GetCert()
+ if err != nil {
+ return nil, err
+ }
+ // GetCert may return empty value, meaning no cert.
+ if cert != nil {
+ return cert, nil
+ }
+ }
+
+ // Both c.TLS.CertData/KeyData were unset and GetCert didn't return
+ // anything. Return an empty tls.Certificate, no client cert will
+ // be sent to the server.
+ return &tls.Certificate{}, nil
+ }
}
return tlsConfig, nil
@@ -139,3 +168,60 @@ func rootCertPool(caData []byte) *x509.CertPool {
certPool.AppendCertsFromPEM(caData)
return certPool
}
+
+// WrapperFunc wraps an http.RoundTripper when a new transport
+// is created for a client, allowing per connection behavior
+// to be injected.
+type WrapperFunc func(rt http.RoundTripper) http.RoundTripper
+
+// Wrappers accepts any number of wrappers and returns a wrapper
+// function that is the equivalent of calling each of them in order. Nil
+// values are ignored, which makes this function convenient for incrementally
+// wrapping a function.
+func Wrappers(fns ...WrapperFunc) WrapperFunc {
+ if len(fns) == 0 {
+ return nil
+ }
+ // optimize the common case of wrapping a possibly nil transport wrapper
+ // with an additional wrapper
+ if len(fns) == 2 && fns[0] == nil {
+ return fns[1]
+ }
+ return func(rt http.RoundTripper) http.RoundTripper {
+ base := rt
+ for _, fn := range fns {
+ if fn != nil {
+ base = fn(base)
+ }
+ }
+ return base
+ }
+}
+
+// ContextCanceller prevents new requests after the provided context is finished.
+// err is returned when the context is closed, allowing the caller to provide a context
+// appropriate error.
+func ContextCanceller(ctx context.Context, err error) WrapperFunc {
+ return func(rt http.RoundTripper) http.RoundTripper {
+ return &contextCanceller{
+ ctx: ctx,
+ rt: rt,
+ err: err,
+ }
+ }
+}
+
+type contextCanceller struct {
+ ctx context.Context
+ rt http.RoundTripper
+ err error
+}
+
+func (b *contextCanceller) RoundTrip(req *http.Request) (*http.Response, error) {
+ select {
+ case <-b.ctx.Done():
+ return nil, b.err
+ default:
+ return b.rt.RoundTrip(req)
+ }
+}
diff --git a/vendor/k8s.io/client-go/util/cert/OWNERS b/vendor/k8s.io/client-go/util/cert/OWNERS
new file mode 100644
index 000000000..3cf036438
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/cert/OWNERS
@@ -0,0 +1,9 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+- sig-auth-certificates-approvers
+reviewers:
+- sig-auth-certificates-reviewers
+labels:
+- sig/auth
+
diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go
index fb7f5facc..9fd097af5 100644
--- a/vendor/k8s.io/client-go/util/cert/cert.go
+++ b/vendor/k8s.io/client-go/util/cert/cert.go
@@ -18,26 +18,25 @@ package cert
import (
"bytes"
- "crypto/ecdsa"
- "crypto/elliptic"
+ "crypto"
cryptorand "crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
- "errors"
"fmt"
- "math"
+ "io/ioutil"
"math/big"
"net"
+ "path"
+ "strings"
"time"
-)
-const (
- rsaKeySize = 2048
- duration365d = time.Hour * 24 * 365
+ "k8s.io/client-go/util/keyutil"
)
+const duration365d = time.Hour * 24 * 365
+
// Config contains the basic fields required for creating a certificate
type Config struct {
CommonName string
@@ -54,13 +53,8 @@ type AltNames struct {
IPs []net.IP
}
-// NewPrivateKey creates an RSA private key
-func NewPrivateKey() (*rsa.PrivateKey, error) {
- return rsa.GenerateKey(cryptorand.Reader, rsaKeySize)
-}
-
// NewSelfSignedCACert creates a CA certificate
-func NewSelfSignedCACert(cfg Config, key *rsa.PrivateKey) (*x509.Certificate, error) {
+func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) {
now := time.Now()
tmpl := x509.Certificate{
SerialNumber: new(big.Int).SetInt64(0),
@@ -72,7 +66,7 @@ func NewSelfSignedCACert(cfg Config, key *rsa.PrivateKey) (*x509.Certificate, er
NotAfter: now.Add(duration365d * 10).UTC(),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
- IsCA: true,
+ IsCA: true,
}
certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key)
@@ -82,62 +76,40 @@ func NewSelfSignedCACert(cfg Config, key *rsa.PrivateKey) (*x509.Certificate, er
return x509.ParseCertificate(certDERBytes)
}
-// NewSignedCert creates a signed certificate using the given CA certificate and key
-func NewSignedCert(cfg Config, key *rsa.PrivateKey, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, error) {
- serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64))
- if err != nil {
- return nil, err
- }
- if len(cfg.CommonName) == 0 {
- return nil, errors.New("must specify a CommonName")
- }
- if len(cfg.Usages) == 0 {
- return nil, errors.New("must specify at least one ExtKeyUsage")
- }
-
- certTmpl := x509.Certificate{
- Subject: pkix.Name{
- CommonName: cfg.CommonName,
- Organization: cfg.Organization,
- },
- DNSNames: cfg.AltNames.DNSNames,
- IPAddresses: cfg.AltNames.IPs,
- SerialNumber: serial,
- NotBefore: caCert.NotBefore,
- NotAfter: time.Now().Add(duration365d).UTC(),
- KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
- ExtKeyUsage: cfg.Usages,
- }
- certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &certTmpl, caCert, key.Public(), caKey)
- if err != nil {
- return nil, err
- }
- return x509.ParseCertificate(certDERBytes)
+// GenerateSelfSignedCertKey creates a self-signed certificate and key for the given host.
+// Host may be an IP or a DNS name
+// You may also specify additional subject alt names (either ip or dns names) for the certificate.
+func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS []string) ([]byte, []byte, error) {
+ return GenerateSelfSignedCertKeyWithFixtures(host, alternateIPs, alternateDNS, "")
}
-// MakeEllipticPrivateKeyPEM creates an ECDSA private key
-func MakeEllipticPrivateKeyPEM() ([]byte, error) {
- privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader)
- if err != nil {
- return nil, err
- }
-
- derBytes, err := x509.MarshalECPrivateKey(privateKey)
- if err != nil {
- return nil, err
+// GenerateSelfSignedCertKeyWithFixtures creates a self-signed certificate and key for the given host.
+// Host may be an IP or a DNS name. You may also specify additional subject alt names (either ip or dns names)
+// for the certificate.
+//
+// If fixtureDirectory is non-empty, it is a directory path which can contain pre-generated certs. The format is:
+// <host>_<ip>-<ip>_<alternateDNS>-<alternateDNS>.crt
+// <host>_<ip>-<ip>_<alternateDNS>-<alternateDNS>.key
+// Certs/keys not existing in that directory are created.
+func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, alternateDNS []string, fixtureDirectory string) ([]byte, []byte, error) {
+ validFrom := time.Now().Add(-time.Hour) // valid an hour earlier to avoid flakes due to clock skew
+ maxAge := time.Hour * 24 * 365 // one year self-signed certs
+
+ baseName := fmt.Sprintf("%s_%s_%s", host, strings.Join(ipsToStrings(alternateIPs), "-"), strings.Join(alternateDNS, "-"))
+ certFixturePath := path.Join(fixtureDirectory, baseName+".crt")
+ keyFixturePath := path.Join(fixtureDirectory, baseName+".key")
+ if len(fixtureDirectory) > 0 {
+ cert, err := ioutil.ReadFile(certFixturePath)
+ if err == nil {
+ key, err := ioutil.ReadFile(keyFixturePath)
+ if err == nil {
+ return cert, key, nil
+ }
+ return nil, nil, fmt.Errorf("cert %s can be read, but key %s cannot: %v", certFixturePath, keyFixturePath, err)
+ }
+ maxAge = 100 * time.Hour * 24 * 365 // 100 years fixtures
}
- privateKeyPemBlock := &pem.Block{
- Type: ECPrivateKeyBlockType,
- Bytes: derBytes,
- }
- return pem.EncodeToMemory(privateKeyPemBlock), nil
-}
-
-// GenerateSelfSignedCertKey creates a self-signed certificate and key for the given host.
-// Host may be an IP or a DNS name
-// You may also specify additional subject alt names (either ip or dns names) for the certificate
-func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS []string) ([]byte, []byte, error) {
caKey, err := rsa.GenerateKey(cryptorand.Reader, 2048)
if err != nil {
return nil, nil, err
@@ -148,12 +120,12 @@ func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS
Subject: pkix.Name{
CommonName: fmt.Sprintf("%s-ca@%d", host, time.Now().Unix()),
},
- NotBefore: time.Now(),
- NotAfter: time.Now().Add(time.Hour * 24 * 365),
+ NotBefore: validFrom,
+ NotAfter: validFrom.Add(maxAge),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
- IsCA: true,
+ IsCA: true,
}
caDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &caTemplate, &caTemplate, &caKey.PublicKey, caKey)
@@ -176,8 +148,8 @@ func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS
Subject: pkix.Name{
CommonName: fmt.Sprintf("%s@%d", host, time.Now().Unix()),
},
- NotBefore: time.Now(),
- NotAfter: time.Now().Add(time.Hour * 24 * 365),
+ NotBefore: validFrom,
+ NotAfter: validFrom.Add(maxAge),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
@@ -209,37 +181,26 @@ func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS
// Generate key
keyBuffer := bytes.Buffer{}
- if err := pem.Encode(&keyBuffer, &pem.Block{Type: RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil {
+ if err := pem.Encode(&keyBuffer, &pem.Block{Type: keyutil.RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil {
return nil, nil, err
}
+ if len(fixtureDirectory) > 0 {
+ if err := ioutil.WriteFile(certFixturePath, certBuffer.Bytes(), 0644); err != nil {
+ return nil, nil, fmt.Errorf("failed to write cert fixture to %s: %v", certFixturePath, err)
+ }
+ if err := ioutil.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0644); err != nil {
+ return nil, nil, fmt.Errorf("failed to write key fixture to %s: %v", certFixturePath, err)
+ }
+ }
+
return certBuffer.Bytes(), keyBuffer.Bytes(), nil
}
-// FormatBytesCert receives byte array certificate and formats in human-readable format
-func FormatBytesCert(cert []byte) (string, error) {
- block, _ := pem.Decode(cert)
- c, err := x509.ParseCertificate(block.Bytes)
- if err != nil {
- return "", fmt.Errorf("failed to parse certificate [%v]", err)
+func ipsToStrings(ips []net.IP) []string {
+ ss := make([]string, 0, len(ips))
+ for _, ip := range ips {
+ ss = append(ss, ip.String())
}
- return FormatCert(c), nil
-}
-
-// FormatCert receives certificate and formats in human-readable format
-func FormatCert(c *x509.Certificate) string {
- var ips []string
- for _, ip := range c.IPAddresses {
- ips = append(ips, ip.String())
- }
- altNames := append(ips, c.DNSNames...)
- res := fmt.Sprintf(
- "Issuer: CN=%s | Subject: CN=%s | CA: %t\n",
- c.Issuer.CommonName, c.Subject.CommonName, c.IsCA,
- )
- res += fmt.Sprintf("Not before: %s Not After: %s", c.NotBefore, c.NotAfter)
- if len(altNames) > 0 {
- res += fmt.Sprintf("\nAlternate Names: %v", altNames)
- }
- return res
+ return ss
}
diff --git a/vendor/k8s.io/client-go/util/cert/io.go b/vendor/k8s.io/client-go/util/cert/io.go
index a41f8054a..5efb24894 100644
--- a/vendor/k8s.io/client-go/util/cert/io.go
+++ b/vendor/k8s.io/client-go/util/cert/io.go
@@ -69,38 +69,6 @@ func WriteCert(certPath string, data []byte) error {
return ioutil.WriteFile(certPath, data, os.FileMode(0644))
}
-// WriteKey writes the pem-encoded key data to keyPath.
-// The key file will be created with file mode 0600.
-// If the key file already exists, it will be overwritten.
-// The parent directory of the keyPath will be created as needed with file mode 0755.
-func WriteKey(keyPath string, data []byte) error {
- if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil {
- return err
- }
- return ioutil.WriteFile(keyPath, data, os.FileMode(0600))
-}
-
-// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it
-// can't find one, it will generate a new key and store it there.
-func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) {
- loadedData, err := ioutil.ReadFile(keyPath)
- if err == nil {
- return loadedData, false, err
- }
- if !os.IsNotExist(err) {
- return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err)
- }
-
- generatedData, err := MakeEllipticPrivateKeyPEM()
- if err != nil {
- return nil, false, fmt.Errorf("error generating key: %v", err)
- }
- if err := WriteKey(keyPath, generatedData); err != nil {
- return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err)
- }
- return generatedData, true, nil
-}
-
// NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file.
// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
func NewPool(filename string) (*x509.CertPool, error) {
@@ -128,31 +96,3 @@ func CertsFromFile(file string) ([]*x509.Certificate, error) {
}
return certs, nil
}
-
-// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file.
-// Returns an error if the file could not be read or if the private key could not be parsed.
-func PrivateKeyFromFile(file string) (interface{}, error) {
- data, err := ioutil.ReadFile(file)
- if err != nil {
- return nil, err
- }
- key, err := ParsePrivateKeyPEM(data)
- if err != nil {
- return nil, fmt.Errorf("error reading private key file %s: %v", file, err)
- }
- return key, nil
-}
-
-// PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file.
-// Reads public keys from both public and private key files.
-func PublicKeysFromFile(file string) ([]interface{}, error) {
- data, err := ioutil.ReadFile(file)
- if err != nil {
- return nil, err
- }
- keys, err := ParsePublicKeysPEM(data)
- if err != nil {
- return nil, fmt.Errorf("error reading public key file %s: %v", file, err)
- }
- return keys, nil
-}
diff --git a/vendor/k8s.io/client-go/util/cert/pem.go b/vendor/k8s.io/client-go/util/cert/pem.go
index b99e36651..9185e2e22 100644
--- a/vendor/k8s.io/client-go/util/cert/pem.go
+++ b/vendor/k8s.io/client-go/util/cert/pem.go
@@ -17,136 +17,18 @@ limitations under the License.
package cert
import (
- "crypto/ecdsa"
- "crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors"
- "fmt"
)
const (
- // ECPrivateKeyBlockType is a possible value for pem.Block.Type.
- ECPrivateKeyBlockType = "EC PRIVATE KEY"
- // RSAPrivateKeyBlockType is a possible value for pem.Block.Type.
- RSAPrivateKeyBlockType = "RSA PRIVATE KEY"
- // PrivateKeyBlockType is a possible value for pem.Block.Type.
- PrivateKeyBlockType = "PRIVATE KEY"
- // PublicKeyBlockType is a possible value for pem.Block.Type.
- PublicKeyBlockType = "PUBLIC KEY"
// CertificateBlockType is a possible value for pem.Block.Type.
CertificateBlockType = "CERTIFICATE"
// CertificateRequestBlockType is a possible value for pem.Block.Type.
CertificateRequestBlockType = "CERTIFICATE REQUEST"
)
-// EncodePublicKeyPEM returns PEM-encoded public data
-func EncodePublicKeyPEM(key *rsa.PublicKey) ([]byte, error) {
- der, err := x509.MarshalPKIXPublicKey(key)
- if err != nil {
- return []byte{}, err
- }
- block := pem.Block{
- Type: PublicKeyBlockType,
- Bytes: der,
- }
- return pem.EncodeToMemory(&block), nil
-}
-
-// EncodePrivateKeyPEM returns PEM-encoded private key data
-func EncodePrivateKeyPEM(key *rsa.PrivateKey) []byte {
- block := pem.Block{
- Type: RSAPrivateKeyBlockType,
- Bytes: x509.MarshalPKCS1PrivateKey(key),
- }
- return pem.EncodeToMemory(&block)
-}
-
-// EncodeCertPEM returns PEM-endcoded certificate data
-func EncodeCertPEM(cert *x509.Certificate) []byte {
- block := pem.Block{
- Type: CertificateBlockType,
- Bytes: cert.Raw,
- }
- return pem.EncodeToMemory(&block)
-}
-
-// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data.
-// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY"
-func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) {
- var privateKeyPemBlock *pem.Block
- for {
- privateKeyPemBlock, keyData = pem.Decode(keyData)
- if privateKeyPemBlock == nil {
- break
- }
-
- switch privateKeyPemBlock.Type {
- case ECPrivateKeyBlockType:
- // ECDSA Private Key in ASN.1 format
- if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil {
- return key, nil
- }
- case RSAPrivateKeyBlockType:
- // RSA Private Key in PKCS#1 format
- if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil {
- return key, nil
- }
- case PrivateKeyBlockType:
- // RSA or ECDSA Private Key in unencrypted PKCS#8 format
- if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil {
- return key, nil
- }
- }
-
- // tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks
- // originally, only the first PEM block was parsed and expected to be a key block
- }
-
- // we read all the PEM blocks and didn't recognize one
- return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key")
-}
-
-// ParsePublicKeysPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array.
-// Reads public keys from both public and private key files.
-func ParsePublicKeysPEM(keyData []byte) ([]interface{}, error) {
- var block *pem.Block
- keys := []interface{}{}
- for {
- // read the next block
- block, keyData = pem.Decode(keyData)
- if block == nil {
- break
- }
-
- // test block against parsing functions
- if privateKey, err := parseRSAPrivateKey(block.Bytes); err == nil {
- keys = append(keys, &privateKey.PublicKey)
- continue
- }
- if publicKey, err := parseRSAPublicKey(block.Bytes); err == nil {
- keys = append(keys, publicKey)
- continue
- }
- if privateKey, err := parseECPrivateKey(block.Bytes); err == nil {
- keys = append(keys, &privateKey.PublicKey)
- continue
- }
- if publicKey, err := parseECPublicKey(block.Bytes); err == nil {
- keys = append(keys, publicKey)
- continue
- }
-
- // tolerate non-key PEM blocks for backwards compatibility
- // originally, only the first PEM block was parsed and expected to be a key block
- }
-
- if len(keys) == 0 {
- return nil, fmt.Errorf("data does not contain any valid RSA or ECDSA public keys")
- }
- return keys, nil
-}
-
// ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array
// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates
func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) {
@@ -177,93 +59,3 @@ func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) {
}
return certs, nil
}
-
-// parseRSAPublicKey parses a single RSA public key from the provided data
-func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) {
- var err error
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil {
- if cert, err := x509.ParseCertificate(data); err == nil {
- parsedKey = cert.PublicKey
- } else {
- return nil, err
- }
- }
-
- // Test if parsed key is an RSA Public Key
- var pubKey *rsa.PublicKey
- var ok bool
- if pubKey, ok = parsedKey.(*rsa.PublicKey); !ok {
- return nil, fmt.Errorf("data doesn't contain valid RSA Public Key")
- }
-
- return pubKey, nil
-}
-
-// parseRSAPrivateKey parses a single RSA private key from the provided data
-func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) {
- var err error
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKCS1PrivateKey(data); err != nil {
- if parsedKey, err = x509.ParsePKCS8PrivateKey(data); err != nil {
- return nil, err
- }
- }
-
- // Test if parsed key is an RSA Private Key
- var privKey *rsa.PrivateKey
- var ok bool
- if privKey, ok = parsedKey.(*rsa.PrivateKey); !ok {
- return nil, fmt.Errorf("data doesn't contain valid RSA Private Key")
- }
-
- return privKey, nil
-}
-
-// parseECPublicKey parses a single ECDSA public key from the provided data
-func parseECPublicKey(data []byte) (*ecdsa.PublicKey, error) {
- var err error
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil {
- if cert, err := x509.ParseCertificate(data); err == nil {
- parsedKey = cert.PublicKey
- } else {
- return nil, err
- }
- }
-
- // Test if parsed key is an ECDSA Public Key
- var pubKey *ecdsa.PublicKey
- var ok bool
- if pubKey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
- return nil, fmt.Errorf("data doesn't contain valid ECDSA Public Key")
- }
-
- return pubKey, nil
-}
-
-// parseECPrivateKey parses a single ECDSA private key from the provided data
-func parseECPrivateKey(data []byte) (*ecdsa.PrivateKey, error) {
- var err error
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParseECPrivateKey(data); err != nil {
- return nil, err
- }
-
- // Test if parsed key is an ECDSA Private Key
- var privKey *ecdsa.PrivateKey
- var ok bool
- if privKey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
- return nil, fmt.Errorf("data doesn't contain valid ECDSA Private Key")
- }
-
- return privKey, nil
-}
diff --git a/vendor/k8s.io/client-go/util/connrotation/connrotation.go b/vendor/k8s.io/client-go/util/connrotation/connrotation.go
new file mode 100644
index 000000000..235a9e019
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/connrotation/connrotation.go
@@ -0,0 +1,105 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package connrotation implements a connection dialer that tracks and can close
+// all created connections.
+//
+// This is used for credential rotation of long-lived connections, when there's
+// no way to re-authenticate on a live connection.
+package connrotation
+
+import (
+ "context"
+ "net"
+ "sync"
+)
+
+// DialFunc is a shorthand for signature of net.DialContext.
+type DialFunc func(ctx context.Context, network, address string) (net.Conn, error)
+
+// Dialer opens connections through Dial and tracks them.
+type Dialer struct {
+ dial DialFunc
+
+ mu sync.Mutex
+ conns map[*closableConn]struct{}
+}
+
+// NewDialer creates a new Dialer instance.
+//
+// If dial is not nil, it will be used to create new underlying connections.
+// Otherwise net.DialContext is used.
+func NewDialer(dial DialFunc) *Dialer {
+ return &Dialer{
+ dial: dial,
+ conns: make(map[*closableConn]struct{}),
+ }
+}
+
+// CloseAll forcibly closes all tracked connections.
+//
+// Note: new connections may get created before CloseAll returns.
+func (d *Dialer) CloseAll() {
+ d.mu.Lock()
+ conns := d.conns
+ d.conns = make(map[*closableConn]struct{})
+ d.mu.Unlock()
+
+ for conn := range conns {
+ conn.Close()
+ }
+}
+
+// Dial creates a new tracked connection.
+func (d *Dialer) Dial(network, address string) (net.Conn, error) {
+ return d.DialContext(context.Background(), network, address)
+}
+
+// DialContext creates a new tracked connection.
+func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
+ conn, err := d.dial(ctx, network, address)
+ if err != nil {
+ return nil, err
+ }
+
+ closable := &closableConn{Conn: conn}
+
+ // Start tracking the connection
+ d.mu.Lock()
+ d.conns[closable] = struct{}{}
+ d.mu.Unlock()
+
+ // When the connection is closed, remove it from the map. This will
+ // be no-op if the connection isn't in the map, e.g. if CloseAll()
+ // is called.
+ closable.onClose = func() {
+ d.mu.Lock()
+ delete(d.conns, closable)
+ d.mu.Unlock()
+ }
+
+ return closable, nil
+}
+
+type closableConn struct {
+ onClose func()
+ net.Conn
+}
+
+func (c *closableConn) Close() error {
+ go c.onClose()
+ return c.Conn.Close()
+}
diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go
index 71d442a62..39cd72f95 100644
--- a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go
+++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go
@@ -21,7 +21,7 @@ import (
"time"
"k8s.io/apimachinery/pkg/util/clock"
- "k8s.io/client-go/util/integer"
+ "k8s.io/utils/integer"
)
type backoffEntry struct {
@@ -99,7 +99,7 @@ func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
return false
}
- return p.Clock.Now().Sub(eventTime) < entry.backoff
+ return p.Clock.Since(eventTime) < entry.backoff
}
// Returns True if time since lastupdate is less than the current backoff window.
diff --git a/vendor/k8s.io/client-go/util/keyutil/OWNERS b/vendor/k8s.io/client-go/util/keyutil/OWNERS
new file mode 100644
index 000000000..470b7a1c9
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/keyutil/OWNERS
@@ -0,0 +1,7 @@
+approvers:
+- sig-auth-certificates-approvers
+reviewers:
+- sig-auth-certificates-reviewers
+labels:
+- sig/auth
+
diff --git a/vendor/k8s.io/client-go/util/keyutil/key.go b/vendor/k8s.io/client-go/util/keyutil/key.go
new file mode 100644
index 000000000..83c2c6254
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/keyutil/key.go
@@ -0,0 +1,323 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package keyutil contains utilities for managing public/private key pairs.
+package keyutil
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ cryptorand "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+const (
+ // ECPrivateKeyBlockType is a possible value for pem.Block.Type.
+ ECPrivateKeyBlockType = "EC PRIVATE KEY"
+ // RSAPrivateKeyBlockType is a possible value for pem.Block.Type.
+ RSAPrivateKeyBlockType = "RSA PRIVATE KEY"
+ // PrivateKeyBlockType is a possible value for pem.Block.Type.
+ PrivateKeyBlockType = "PRIVATE KEY"
+ // PublicKeyBlockType is a possible value for pem.Block.Type.
+ PublicKeyBlockType = "PUBLIC KEY"
+)
+
+// MakeEllipticPrivateKeyPEM creates an ECDSA private key
+func MakeEllipticPrivateKeyPEM() ([]byte, error) {
+ privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader)
+ if err != nil {
+ return nil, err
+ }
+
+ derBytes, err := x509.MarshalECPrivateKey(privateKey)
+ if err != nil {
+ return nil, err
+ }
+
+ privateKeyPemBlock := &pem.Block{
+ Type: ECPrivateKeyBlockType,
+ Bytes: derBytes,
+ }
+ return pem.EncodeToMemory(privateKeyPemBlock), nil
+}
+
+// WriteKey writes the pem-encoded key data to keyPath.
+// The key file will be created with file mode 0600.
+// If the key file already exists, it will be overwritten.
+// The parent directory of the keyPath will be created as needed with file mode 0755.
+func WriteKey(keyPath string, data []byte) error {
+ if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil {
+ return err
+ }
+ return ioutil.WriteFile(keyPath, data, os.FileMode(0600))
+}
+
+// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it
+// can't find one, it will generate a new key and store it there.
+func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) {
+ loadedData, err := ioutil.ReadFile(keyPath)
+ // Call verifyKeyData to ensure the file wasn't empty/corrupt.
+ if err == nil && verifyKeyData(loadedData) {
+ return loadedData, false, err
+ }
+ if !os.IsNotExist(err) {
+ return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err)
+ }
+
+ generatedData, err := MakeEllipticPrivateKeyPEM()
+ if err != nil {
+ return nil, false, fmt.Errorf("error generating key: %v", err)
+ }
+ if err := WriteKey(keyPath, generatedData); err != nil {
+ return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err)
+ }
+ return generatedData, true, nil
+}
+
+// MarshalPrivateKeyToPEM converts a known private key type of RSA or ECDSA to
+// a PEM encoded block or returns an error.
+func MarshalPrivateKeyToPEM(privateKey crypto.PrivateKey) ([]byte, error) {
+ switch t := privateKey.(type) {
+ case *ecdsa.PrivateKey:
+ derBytes, err := x509.MarshalECPrivateKey(t)
+ if err != nil {
+ return nil, err
+ }
+ block := &pem.Block{
+ Type: ECPrivateKeyBlockType,
+ Bytes: derBytes,
+ }
+ return pem.EncodeToMemory(block), nil
+ case *rsa.PrivateKey:
+ block := &pem.Block{
+ Type: RSAPrivateKeyBlockType,
+ Bytes: x509.MarshalPKCS1PrivateKey(t),
+ }
+ return pem.EncodeToMemory(block), nil
+ default:
+ return nil, fmt.Errorf("private key is not a recognized type: %T", privateKey)
+ }
+}
+
+// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file.
+// Returns an error if the file could not be read or if the private key could not be parsed.
+func PrivateKeyFromFile(file string) (interface{}, error) {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ return nil, err
+ }
+ key, err := ParsePrivateKeyPEM(data)
+ if err != nil {
+ return nil, fmt.Errorf("error reading private key file %s: %v", file, err)
+ }
+ return key, nil
+}
+
+// PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file.
+// Reads public keys from both public and private key files.
+func PublicKeysFromFile(file string) ([]interface{}, error) {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ return nil, err
+ }
+ keys, err := ParsePublicKeysPEM(data)
+ if err != nil {
+ return nil, fmt.Errorf("error reading public key file %s: %v", file, err)
+ }
+ return keys, nil
+}
+
+// verifyKeyData returns true if the provided data appears to be a valid private key.
+func verifyKeyData(data []byte) bool {
+ if len(data) == 0 {
+ return false
+ }
+ _, err := ParsePrivateKeyPEM(data)
+ return err == nil
+}
+
+// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data.
+// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY"
+func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) {
+ var privateKeyPemBlock *pem.Block
+ for {
+ privateKeyPemBlock, keyData = pem.Decode(keyData)
+ if privateKeyPemBlock == nil {
+ break
+ }
+
+ switch privateKeyPemBlock.Type {
+ case ECPrivateKeyBlockType:
+ // ECDSA Private Key in ASN.1 format
+ if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil {
+ return key, nil
+ }
+ case RSAPrivateKeyBlockType:
+ // RSA Private Key in PKCS#1 format
+ if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil {
+ return key, nil
+ }
+ case PrivateKeyBlockType:
+ // RSA or ECDSA Private Key in unencrypted PKCS#8 format
+ if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil {
+ return key, nil
+ }
+ }
+
+ // tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks
+ // originally, only the first PEM block was parsed and expected to be a key block
+ }
+
+ // we read all the PEM blocks and didn't recognize one
+ return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key")
+}
+
+// ParsePublicKeysPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array.
+// Reads public keys from both public and private key files.
+func ParsePublicKeysPEM(keyData []byte) ([]interface{}, error) {
+ var block *pem.Block
+ keys := []interface{}{}
+ for {
+ // read the next block
+ block, keyData = pem.Decode(keyData)
+ if block == nil {
+ break
+ }
+
+ // test block against parsing functions
+ if privateKey, err := parseRSAPrivateKey(block.Bytes); err == nil {
+ keys = append(keys, &privateKey.PublicKey)
+ continue
+ }
+ if publicKey, err := parseRSAPublicKey(block.Bytes); err == nil {
+ keys = append(keys, publicKey)
+ continue
+ }
+ if privateKey, err := parseECPrivateKey(block.Bytes); err == nil {
+ keys = append(keys, &privateKey.PublicKey)
+ continue
+ }
+ if publicKey, err := parseECPublicKey(block.Bytes); err == nil {
+ keys = append(keys, publicKey)
+ continue
+ }
+
+ // tolerate non-key PEM blocks for backwards compatibility
+ // originally, only the first PEM block was parsed and expected to be a key block
+ }
+
+ if len(keys) == 0 {
+ return nil, fmt.Errorf("data does not contain any valid RSA or ECDSA public keys")
+ }
+ return keys, nil
+}
+
+// parseRSAPublicKey parses a single RSA public key from the provided data
+func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) {
+ var err error
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil {
+ if cert, err := x509.ParseCertificate(data); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ return nil, err
+ }
+ }
+
+ // Test if parsed key is an RSA Public Key
+ var pubKey *rsa.PublicKey
+ var ok bool
+ if pubKey, ok = parsedKey.(*rsa.PublicKey); !ok {
+ return nil, fmt.Errorf("data doesn't contain valid RSA Public Key")
+ }
+
+ return pubKey, nil
+}
+
+// parseRSAPrivateKey parses a single RSA private key from the provided data
+func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(data); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(data); err != nil {
+ return nil, err
+ }
+ }
+
+ // Test if parsed key is an RSA Private Key
+ var privKey *rsa.PrivateKey
+ var ok bool
+ if privKey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, fmt.Errorf("data doesn't contain valid RSA Private Key")
+ }
+
+ return privKey, nil
+}
+
+// parseECPublicKey parses a single ECDSA public key from the provided data
+func parseECPublicKey(data []byte) (*ecdsa.PublicKey, error) {
+ var err error
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil {
+ if cert, err := x509.ParseCertificate(data); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ return nil, err
+ }
+ }
+
+ // Test if parsed key is an ECDSA Public Key
+ var pubKey *ecdsa.PublicKey
+ var ok bool
+ if pubKey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
+ return nil, fmt.Errorf("data doesn't contain valid ECDSA Public Key")
+ }
+
+ return pubKey, nil
+}
+
+// parseECPrivateKey parses a single ECDSA private key from the provided data
+func parseECPrivateKey(data []byte) (*ecdsa.PrivateKey, error) {
+ var err error
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParseECPrivateKey(data); err != nil {
+ return nil, err
+ }
+
+ // Test if parsed key is an ECDSA Private Key
+ var privKey *ecdsa.PrivateKey
+ var ok bool
+ if privKey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
+ return nil, fmt.Errorf("data doesn't contain valid ECDSA Private Key")
+ }
+
+ return privKey, nil
+}
diff --git a/vendor/k8s.io/klog/.travis.yml b/vendor/k8s.io/klog/.travis.yml
new file mode 100644
index 000000000..0f508dae6
--- /dev/null
+++ b/vendor/k8s.io/klog/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+go_import_path: k8s.io/klog
+dist: xenial
+go:
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+script:
+ - go get -t -v ./...
+ - diff -u <(echo -n) <(gofmt -d .)
+ - diff -u <(echo -n) <(golint $(go list -e ./...))
+ - go tool vet .
+ - go test -v -race ./...
+install:
+ - go get golang.org/x/lint/golint
diff --git a/vendor/k8s.io/klog/CONTRIBUTING.md b/vendor/k8s.io/klog/CONTRIBUTING.md
new file mode 100644
index 000000000..574a56abb
--- /dev/null
+++ b/vendor/k8s.io/klog/CONTRIBUTING.md
@@ -0,0 +1,22 @@
+# Contributing Guidelines
+
+Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
+
+_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
+
+## Getting Started
+
+We have full documentation on how to get started contributing here:
+
+- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
+- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
+- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers
+
+## Mentorship
+
+- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
+
+## Contact Information
+
+- [Slack](https://kubernetes.slack.com/messages/sig-architecture)
+- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture)
diff --git a/vendor/github.com/golang/glog/LICENSE b/vendor/k8s.io/klog/LICENSE
index 37ec93a14..37ec93a14 100644
--- a/vendor/github.com/golang/glog/LICENSE
+++ b/vendor/k8s.io/klog/LICENSE
diff --git a/vendor/k8s.io/klog/OWNERS b/vendor/k8s.io/klog/OWNERS
new file mode 100644
index 000000000..380e514f2
--- /dev/null
+++ b/vendor/k8s.io/klog/OWNERS
@@ -0,0 +1,19 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+reviewers:
+ - jayunit100
+ - hoegaarden
+ - andyxning
+ - neolit123
+ - pohly
+ - yagonobre
+ - vincepri
+ - detiber
+approvers:
+ - dims
+ - thockin
+ - justinsb
+ - tallclair
+ - piosz
+ - brancz
+ - DirectXMan12
+ - lavalamp
diff --git a/vendor/k8s.io/klog/README.md b/vendor/k8s.io/klog/README.md
new file mode 100644
index 000000000..bee306f39
--- /dev/null
+++ b/vendor/k8s.io/klog/README.md
@@ -0,0 +1,97 @@
+klog
+====
+
+klog is a permanent fork of https://github.com/golang/glog.
+
+## Why was klog created?
+
+The decision to create klog was one that wasn't made lightly, but it was necessary due to some
+drawbacks that are present in [glog](https://github.com/golang/glog). Ultimately, the fork was created due to glog not being under active development; this can be seen in the glog README:
+
+> The code in this repo [...] is not itself under development
+
+This makes us unable to solve many use cases without a fork. The factors that contributed to needing feature development are listed below:
+
+ * `glog` [presents a lot "gotchas"](https://github.com/kubernetes/kubernetes/issues/61006) and introduces challenges in containerized environments, all of which aren't well documented.
+ * `glog` doesn't provide an easy way to test logs, which detracts from the stability of software using it
+ * A long term goal is to implement a logging interface that allows us to add context, change output format, etc.
+
+Historical context is available here:
+
+ * https://github.com/kubernetes/kubernetes/issues/61006
+ * https://github.com/kubernetes/kubernetes/issues/70264
+ * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ
+ * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ
+
+----
+
+How to use klog
+===============
+- Replace imports for `github.com/golang/glog` with `k8s.io/klog`
+- Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags
+- You can now use `log-file` instead of `log-dir` for logging to a single file (See `examples/log_file/usage_log_file.go`)
+- If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`)
+- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md))
+
+### Coexisting with glog
+This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and syncronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`.
+
+## Community, discussion, contribution, and support
+
+Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
+
+You can reach the maintainers of this project at:
+
+- [Slack](https://kubernetes.slack.com/messages/sig-architecture)
+- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture)
+
+### Code of conduct
+
+Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md).
+
+----
+
+glog
+====
+
+Leveled execution logs for Go.
+
+This is an efficient pure Go implementation of leveled logs in the
+manner of the open source C++ package
+ https://github.com/google/glog
+
+By binding methods to booleans it is possible to use the log package
+without paying the expense of evaluating the arguments to the log.
+Through the -vmodule flag, the package also provides fine-grained
+control over logging at the file level.
+
+The comment from glog.go introduces the ideas:
+
+ Package glog implements logging analogous to the Google-internal
+ C++ INFO/ERROR/V setup. It provides functions Info, Warning,
+ Error, Fatal, plus formatting variants such as Infof. It
+ also provides V-style logging controlled by the -v and
+ -vmodule=file=2 flags.
+
+ Basic examples:
+
+ glog.Info("Prepare to repel boarders")
+
+ glog.Fatalf("Initialization failed: %s", err)
+
+ See the documentation for the V function for an explanation
+ of these examples:
+
+ if glog.V(2) {
+ glog.Info("Starting transaction...")
+ }
+
+ glog.V(2).Infoln("Processed", nItems, "elements")
+
+
+The repository contains an open source version of the log package
+used inside Google. The master copy of the source lives inside
+Google, not here. The code in this repo is for export only and is not itself
+under development. Feature requests will be ignored.
+
+Send bug reports to golang-nuts@googlegroups.com.
diff --git a/vendor/k8s.io/klog/RELEASE.md b/vendor/k8s.io/klog/RELEASE.md
new file mode 100644
index 000000000..b53eb960c
--- /dev/null
+++ b/vendor/k8s.io/klog/RELEASE.md
@@ -0,0 +1,9 @@
+# Release Process
+
+The `klog` is released on an as-needed basis. The process is as follows:
+
+1. An issue is proposing a new release with a changelog since the last release
+1. All [OWNERS](OWNERS) must LGTM this release
+1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
+1. The release issue is closed
+1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released`
diff --git a/vendor/k8s.io/klog/SECURITY_CONTACTS b/vendor/k8s.io/klog/SECURITY_CONTACTS
new file mode 100644
index 000000000..6128a5869
--- /dev/null
+++ b/vendor/k8s.io/klog/SECURITY_CONTACTS
@@ -0,0 +1,20 @@
+# Defined below are the security contacts for this repo.
+#
+# They are the contact point for the Product Security Committee to reach out
+# to for triaging and handling of incoming issues.
+#
+# The below names agree to abide by the
+# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy)
+# and will be removed and replaced if they violate that agreement.
+#
+# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
+# INSTRUCTIONS AT https://kubernetes.io/security/
+
+dims
+thockin
+justinsb
+tallclair
+piosz
+brancz
+DirectXMan12
+lavalamp
diff --git a/vendor/k8s.io/klog/code-of-conduct.md b/vendor/k8s.io/klog/code-of-conduct.md
new file mode 100644
index 000000000..0d15c00cf
--- /dev/null
+++ b/vendor/k8s.io/klog/code-of-conduct.md
@@ -0,0 +1,3 @@
+# Kubernetes Community Code of Conduct
+
+Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/k8s.io/klog/klog.go
index 54bd7afdc..10330d7ef 100644
--- a/vendor/github.com/golang/glog/glog.go
+++ b/vendor/k8s.io/klog/klog.go
@@ -14,7 +14,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
+// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
//
@@ -35,11 +35,11 @@
// Log output is buffered and written periodically using Flush. Programs
// should call Flush before exiting to guarantee all log output is written.
//
-// By default, all log statements write to files in a temporary directory.
+// By default, all log statements write to standard error.
// This package provides several flags that modify this behavior.
// As a result, flag.Parse must be called before any logging is done.
//
-// -logtostderr=false
+// -logtostderr=true
// Logs are written to standard error instead of to files.
// -alsologtostderr=false
// Logs are written to standard error as well as to files.
@@ -68,7 +68,7 @@
// -vmodule=gopher*=3
// sets the V level to 3 in all Go files whose names begin "gopher".
//
-package glog
+package klog
import (
"bufio"
@@ -78,6 +78,7 @@ import (
"fmt"
"io"
stdLog "log"
+ "math"
"os"
"path/filepath"
"runtime"
@@ -396,13 +397,6 @@ type flushSyncWriter interface {
}
func init() {
- flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
- flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
- flag.Var(&logging.verbosity, "v", "log level for V logs")
- flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
- flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
- flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
-
// Default stderrThreshold is ERROR.
logging.stderrThreshold = errorLog
@@ -410,6 +404,41 @@ func init() {
go logging.flushDaemon()
}
+var initDefaultsOnce sync.Once
+
+// InitFlags is for explicitly initializing the flags.
+func InitFlags(flagset *flag.FlagSet) {
+
+ // Initialize defaults.
+ initDefaultsOnce.Do(func() {
+ logging.logDir = ""
+ logging.logFile = ""
+ logging.logFileMaxSizeMB = 1800
+ logging.toStderr = true
+ logging.alsoToStderr = false
+ logging.skipHeaders = false
+ logging.skipLogHeaders = false
+ })
+
+ if flagset == nil {
+ flagset = flag.CommandLine
+ }
+
+ flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory")
+ flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file")
+ flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB,
+ "Defines the maximum size a log file can grow to. Unit is megabytes. "+
+ "If the value is 0, the maximum file size is unlimited.")
+ flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files")
+ flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files")
+ flagset.Var(&logging.verbosity, "v", "number for the log level verbosity")
+ flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages")
+ flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files")
+ flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
+ flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
+ flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
+}
+
// Flush flushes all pending log I/O.
func Flush() {
logging.lockAndFlushAll()
@@ -453,6 +482,24 @@ type loggingT struct {
// safely using atomic.LoadInt32.
vmodule moduleSpec // The state of the -vmodule flag.
verbosity Level // V logging level, the value of the -v flag/
+
+ // If non-empty, overrides the choice of directory in which to write logs.
+ // See createLogDirs for the full list of possible destinations.
+ logDir string
+
+ // If non-empty, specifies the path of the file to write logs. mutually exclusive
+ // with the log-dir option.
+ logFile string
+
+ // When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the
+ // logFile will be cleaned up. If this value is 0, no size limitation will be applied to logFile.
+ logFileMaxSizeMB uint64
+
+ // If true, do not add the prefix headers, useful when used with SetOutput
+ skipHeaders bool
+
+ // If true, do not add the headers to log files
+ skipLogHeaders bool
}
// buffer holds a byte Buffer for reuse. The zero value is ready for use.
@@ -556,6 +603,9 @@ func (l *loggingT) formatHeader(s severity, file string, line int) *buffer {
s = infoLog // for safety.
}
buf := l.getBuffer()
+ if l.skipHeaders {
+ return buf
+ }
// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
// It's worth about 3X. Fprintf is hard.
@@ -667,6 +717,45 @@ func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToSt
l.output(s, buf, file, line, alsoToStderr)
}
+// redirectBuffer is used to set an alternate destination for the logs
+type redirectBuffer struct {
+ w io.Writer
+}
+
+func (rb *redirectBuffer) Sync() error {
+ return nil
+}
+
+func (rb *redirectBuffer) Flush() error {
+ return nil
+}
+
+func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) {
+ return rb.w.Write(bytes)
+}
+
+// SetOutput sets the output destination for all severities
+func SetOutput(w io.Writer) {
+ for s := fatalLog; s >= infoLog; s-- {
+ rb := &redirectBuffer{
+ w: w,
+ }
+ logging.file[s] = rb
+ }
+}
+
+// SetOutputBySeverity sets the output destination for specific severity
+func SetOutputBySeverity(name string, w io.Writer) {
+ sev, ok := severityByName(name)
+ if !ok {
+ panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name))
+ }
+ rb := &redirectBuffer{
+ w: w,
+ }
+ logging.file[sev] = rb
+}
+
// output writes the data to the log files and releases the buffer.
func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) {
l.mu.Lock()
@@ -676,10 +765,7 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo
}
}
data := buf.Bytes()
- if !flag.Parsed() {
- os.Stderr.Write([]byte("ERROR: logging before flag.Parse: "))
- os.Stderr.Write(data)
- } else if l.toStderr {
+ if l.toStderr {
os.Stderr.Write(data)
} else {
if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
@@ -802,18 +888,33 @@ func (l *loggingT) exit(err error) {
type syncBuffer struct {
logger *loggingT
*bufio.Writer
- file *os.File
- sev severity
- nbytes uint64 // The number of bytes written to this file
+ file *os.File
+ sev severity
+ nbytes uint64 // The number of bytes written to this file
+ maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up.
}
func (sb *syncBuffer) Sync() error {
return sb.file.Sync()
}
+// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options.
+func CalculateMaxSize() uint64 {
+ if logging.logFile != "" {
+ if logging.logFileMaxSizeMB == 0 {
+ // If logFileMaxSizeMB is zero, we don't have limitations on the log size.
+ return math.MaxUint64
+ }
+ // Flag logFileMaxSizeMB is in MB for user convenience.
+ return logging.logFileMaxSizeMB * 1024 * 1024
+ }
+ // If "log_file" flag is not specified, the target file (sb.file) will be cleaned up when reaches a fixed size.
+ return MaxSize
+}
+
func (sb *syncBuffer) Write(p []byte) (n int, err error) {
- if sb.nbytes+uint64(len(p)) >= MaxSize {
- if err := sb.rotateFile(time.Now()); err != nil {
+ if sb.nbytes+uint64(len(p)) >= sb.maxbytes {
+ if err := sb.rotateFile(time.Now(), false); err != nil {
sb.logger.exit(err)
}
}
@@ -826,13 +927,15 @@ func (sb *syncBuffer) Write(p []byte) (n int, err error) {
}
// rotateFile closes the syncBuffer's file and starts a new one.
-func (sb *syncBuffer) rotateFile(now time.Time) error {
+// The startup argument indicates whether this is the initial startup of klog.
+// If startup is true, existing files are opened for appending instead of truncated.
+func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error {
if sb.file != nil {
sb.Flush()
sb.file.Close()
}
var err error
- sb.file, _, err = create(severityName[sb.sev], now)
+ sb.file, _, err = create(severityName[sb.sev], now, startup)
sb.nbytes = 0
if err != nil {
return err
@@ -840,6 +943,10 @@ func (sb *syncBuffer) rotateFile(now time.Time) error {
sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
+ if sb.logger.skipLogHeaders {
+ return nil
+ }
+
// Write header.
var buf bytes.Buffer
fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
@@ -864,10 +971,11 @@ func (l *loggingT) createFiles(sev severity) error {
// has already been created, we can stop.
for s := sev; s >= infoLog && l.file[s] == nil; s-- {
sb := &syncBuffer{
- logger: l,
- sev: s,
+ logger: l,
+ sev: s,
+ maxbytes: CalculateMaxSize(),
}
- if err := sb.rotateFile(now); err != nil {
+ if err := sb.rotateFile(now, true); err != nil {
return err
}
l.file[s] = sb
@@ -875,11 +983,11 @@ func (l *loggingT) createFiles(sev severity) error {
return nil
}
-const flushInterval = 30 * time.Second
+const flushInterval = 5 * time.Second
// flushDaemon periodically flushes the log file buffers.
func (l *loggingT) flushDaemon() {
- for _ = range time.NewTicker(flushInterval).C {
+ for range time.NewTicker(flushInterval).C {
l.lockAndFlushAll()
}
}
diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/k8s.io/klog/klog_file.go
index 65075d281..e4010ad4d 100644
--- a/vendor/github.com/golang/glog/glog_file.go
+++ b/vendor/k8s.io/klog/klog_file.go
@@ -16,11 +16,10 @@
// File I/O for logs.
-package glog
+package klog
import (
"errors"
- "flag"
"fmt"
"os"
"os/user"
@@ -36,13 +35,9 @@ var MaxSize uint64 = 1024 * 1024 * 1800
// logDirs lists the candidate directories for new log files.
var logDirs []string
-// If non-empty, overrides the choice of directory in which to write logs.
-// See createLogDirs for the full list of possible destinations.
-var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
-
func createLogDirs() {
- if *logDir != "" {
- logDirs = append(logDirs, *logDir)
+ if logging.logDir != "" {
+ logDirs = append(logDirs, logging.logDir)
}
logDirs = append(logDirs, os.TempDir())
}
@@ -102,7 +97,16 @@ var onceLogDirs sync.Once
// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
// successfully, create also attempts to update the symlink for that tag, ignoring
// errors.
-func create(tag string, t time.Time) (f *os.File, filename string, err error) {
+// The startup argument indicates whether this is the initial startup of klog.
+// If startup is true, existing files are opened for appending instead of truncated.
+func create(tag string, t time.Time, startup bool) (f *os.File, filename string, err error) {
+ if logging.logFile != "" {
+ f, err := openOrCreate(logging.logFile, startup)
+ if err == nil {
+ return f, logging.logFile, nil
+ }
+ return nil, "", fmt.Errorf("log: unable to create log: %v", err)
+ }
onceLogDirs.Do(createLogDirs)
if len(logDirs) == 0 {
return nil, "", errors.New("log: no log dirs")
@@ -111,7 +115,7 @@ func create(tag string, t time.Time) (f *os.File, filename string, err error) {
var lastErr error
for _, dir := range logDirs {
fname := filepath.Join(dir, name)
- f, err := os.Create(fname)
+ f, err := openOrCreate(fname, startup)
if err == nil {
symlink := filepath.Join(dir, link)
os.Remove(symlink) // ignore err
@@ -122,3 +126,14 @@ func create(tag string, t time.Time) (f *os.File, filename string, err error) {
}
return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
}
+
+// The startup argument indicates whether this is the initial startup of klog.
+// If startup is true, existing files are opened for appending instead of truncated.
+func openOrCreate(name string, startup bool) (*os.File, error) {
+ if startup {
+ f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
+ return f, err
+ }
+ f, err := os.Create(name)
+ return f, err
+}
diff --git a/vendor/k8s.io/utils/LICENSE b/vendor/k8s.io/utils/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/k8s.io/utils/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/k8s.io/client-go/util/integer/integer.go b/vendor/k8s.io/utils/integer/integer.go
index c6ea106f9..e4e740cad 100644
--- a/vendor/k8s.io/client-go/util/integer/integer.go
+++ b/vendor/k8s.io/utils/integer/integer.go
@@ -16,6 +16,7 @@ limitations under the License.
package integer
+// IntMax returns the maximum of the params
func IntMax(a, b int) int {
if b > a {
return b
@@ -23,6 +24,7 @@ func IntMax(a, b int) int {
return a
}
+// IntMin returns the minimum of the params
func IntMin(a, b int) int {
if b < a {
return b
@@ -30,6 +32,7 @@ func IntMin(a, b int) int {
return a
}
+// Int32Max returns the maximum of the params
func Int32Max(a, b int32) int32 {
if b > a {
return b
@@ -37,6 +40,7 @@ func Int32Max(a, b int32) int32 {
return a
}
+// Int32Min returns the minimum of the params
func Int32Min(a, b int32) int32 {
if b < a {
return b
@@ -44,6 +48,7 @@ func Int32Min(a, b int32) int32 {
return a
}
+// Int64Max returns the maximum of the params
func Int64Max(a, b int64) int64 {
if b > a {
return b
@@ -51,6 +56,7 @@ func Int64Max(a, b int64) int64 {
return a
}
+// Int64Min returns the minimum of the params
func Int64Min(a, b int64) int64 {
if b < a {
return b
diff --git a/vendor/modules.txt b/vendor/modules.txt
new file mode 100644
index 000000000..17b11fd58
--- /dev/null
+++ b/vendor/modules.txt
@@ -0,0 +1,602 @@
+# github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78
+github.com/Azure/go-ansiterm/winterm
+github.com/Azure/go-ansiterm
+# github.com/BurntSushi/toml v0.3.1
+github.com/BurntSushi/toml
+# github.com/DataDog/zstd v1.4.0
+github.com/DataDog/zstd
+# github.com/Microsoft/go-winio v0.4.12
+github.com/Microsoft/go-winio
+github.com/Microsoft/go-winio/archive/tar
+github.com/Microsoft/go-winio/backuptar
+# github.com/Microsoft/hcsshim v0.8.6
+github.com/Microsoft/hcsshim
+github.com/Microsoft/hcsshim/internal/guid
+github.com/Microsoft/hcsshim/internal/hcs
+github.com/Microsoft/hcsshim/internal/hcserror
+github.com/Microsoft/hcsshim/internal/hns
+github.com/Microsoft/hcsshim/internal/mergemaps
+github.com/Microsoft/hcsshim/internal/schema1
+github.com/Microsoft/hcsshim/internal/wclayer
+github.com/Microsoft/hcsshim/internal/guestrequest
+github.com/Microsoft/hcsshim/internal/interop
+github.com/Microsoft/hcsshim/internal/logfields
+github.com/Microsoft/hcsshim/internal/timeout
+github.com/Microsoft/hcsshim/internal/schema2
+github.com/Microsoft/hcsshim/internal/longpath
+github.com/Microsoft/hcsshim/internal/safefile
+# github.com/VividCortex/ewma v1.1.1
+github.com/VividCortex/ewma
+# github.com/beorn7/perks v1.0.0
+github.com/beorn7/perks/quantile
+# github.com/blang/semver v3.5.1+incompatible
+github.com/blang/semver
+# github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37
+github.com/buger/goterm
+# github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b
+github.com/checkpoint-restore/go-criu
+github.com/checkpoint-restore/go-criu/rpc
+# github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc
+github.com/containerd/continuity/pathdriver
+# github.com/containernetworking/cni v0.7.1
+github.com/containernetworking/cni/pkg/types
+github.com/containernetworking/cni/pkg/types/current
+github.com/containernetworking/cni/libcni
+github.com/containernetworking/cni/pkg/invoke
+github.com/containernetworking/cni/pkg/version
+github.com/containernetworking/cni/pkg/types/020
+# github.com/containernetworking/plugins v0.8.1
+github.com/containernetworking/plugins/pkg/ns
+# github.com/containers/buildah v1.9.0
+github.com/containers/buildah
+github.com/containers/buildah/imagebuildah
+github.com/containers/buildah/pkg/chrootuser
+github.com/containers/buildah/pkg/cli
+github.com/containers/buildah/pkg/formats
+github.com/containers/buildah/util
+github.com/containers/buildah/pkg/secrets
+github.com/containers/buildah/pkg/parse
+github.com/containers/buildah/bind
+github.com/containers/buildah/chroot
+github.com/containers/buildah/docker
+github.com/containers/buildah/pkg/blobcache
+github.com/containers/buildah/pkg/overlay
+github.com/containers/buildah/pkg/unshare
+# github.com/containers/image v2.0.1+incompatible
+github.com/containers/image/directory
+github.com/containers/image/docker
+github.com/containers/image/docker/archive
+github.com/containers/image/manifest
+github.com/containers/image/pkg/docker/config
+github.com/containers/image/signature
+github.com/containers/image/transports
+github.com/containers/image/transports/alltransports
+github.com/containers/image/types
+github.com/containers/image/oci/archive
+github.com/containers/image/storage
+github.com/containers/image/copy
+github.com/containers/image/docker/reference
+github.com/containers/image/docker/tarfile
+github.com/containers/image/tarball
+github.com/containers/image/pkg/sysregistriesv2
+github.com/containers/image/image
+github.com/containers/image/oci/layout
+github.com/containers/image/pkg/sysregistries
+github.com/containers/image/directory/explicitfilepath
+github.com/containers/image/docker/policyconfiguration
+github.com/containers/image/pkg/blobinfocache/none
+github.com/containers/image/pkg/tlsclientconfig
+github.com/containers/image/pkg/strslice
+github.com/containers/image/version
+github.com/containers/image/docker/daemon
+github.com/containers/image/openshift
+github.com/containers/image/ostree
+github.com/containers/image/internal/tmpdir
+github.com/containers/image/oci/internal
+github.com/containers/image/pkg/blobinfocache
+github.com/containers/image/pkg/compression
+github.com/containers/image/pkg/blobinfocache/boltdb
+github.com/containers/image/pkg/blobinfocache/memory
+github.com/containers/image/pkg/blobinfocache/internal/prioritize
+# github.com/containers/psgo v1.3.1
+github.com/containers/psgo
+github.com/containers/psgo/internal/capabilities
+github.com/containers/psgo/internal/dev
+github.com/containers/psgo/internal/proc
+github.com/containers/psgo/internal/process
+github.com/containers/psgo/internal/host
+# github.com/containers/storage v1.12.13
+github.com/containers/storage
+github.com/containers/storage/pkg/archive
+github.com/containers/storage/pkg/chrootarchive
+github.com/containers/storage/pkg/idtools
+github.com/containers/storage/pkg/reexec
+github.com/containers/storage/pkg/mount
+github.com/containers/storage/pkg/stringid
+github.com/containers/storage/pkg/system
+github.com/containers/storage/pkg/truncindex
+github.com/containers/storage/pkg/parsers/kernel
+github.com/containers/storage/pkg/fileutils
+github.com/containers/storage/pkg/ioutils
+github.com/containers/storage/pkg/pools
+github.com/containers/storage/pkg/homedir
+github.com/containers/storage/drivers
+github.com/containers/storage/drivers/register
+github.com/containers/storage/pkg/config
+github.com/containers/storage/pkg/directory
+github.com/containers/storage/pkg/parsers
+github.com/containers/storage/pkg/stringutils
+github.com/containers/storage/pkg/longpath
+github.com/containers/storage/pkg/promise
+github.com/containers/storage/drivers/aufs
+github.com/containers/storage/drivers/btrfs
+github.com/containers/storage/drivers/devmapper
+github.com/containers/storage/drivers/overlay
+github.com/containers/storage/drivers/vfs
+github.com/containers/storage/drivers/windows
+github.com/containers/storage/drivers/zfs
+github.com/containers/storage/pkg/locker
+github.com/containers/storage/pkg/devicemapper
+github.com/containers/storage/pkg/dmesg
+github.com/containers/storage/pkg/loopback
+github.com/containers/storage/drivers/overlayutils
+github.com/containers/storage/drivers/quota
+github.com/containers/storage/pkg/fsutils
+github.com/containers/storage/pkg/ostree
+github.com/containers/storage/drivers/copy
+# github.com/coreos/go-iptables v0.4.1
+github.com/coreos/go-iptables/iptables
+# github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a
+github.com/coreos/go-systemd/activation
+github.com/coreos/go-systemd/dbus
+github.com/coreos/go-systemd/sdjournal
+github.com/coreos/go-systemd/journal
+# github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f
+github.com/coreos/pkg/dlopen
+# github.com/cri-o/ocicni v0.1.1-0.20190702175919-7762645d18ca
+github.com/cri-o/ocicni/pkg/ocicni
+# github.com/cyphar/filepath-securejoin v0.2.2
+github.com/cyphar/filepath-securejoin
+# github.com/davecgh/go-spew v1.1.1
+github.com/davecgh/go-spew/spew
+# github.com/docker/distribution v2.7.1+incompatible
+github.com/docker/distribution/reference
+github.com/docker/distribution/registry/api/errcode
+github.com/docker/distribution/registry/api/v2
+github.com/docker/distribution/registry/client
+github.com/docker/distribution/digestset
+github.com/docker/distribution
+github.com/docker/distribution/registry/client/auth/challenge
+github.com/docker/distribution/registry/client/transport
+github.com/docker/distribution/registry/storage/cache
+github.com/docker/distribution/registry/storage/cache/memory
+github.com/docker/distribution/metrics
+# github.com/docker/docker v0.7.3-0.20190309235953-33c3200e0d16
+github.com/docker/docker/pkg/homedir
+github.com/docker/docker/pkg/signal
+github.com/docker/docker/oci/caps
+github.com/docker/docker/pkg/namesgenerator
+github.com/docker/docker/pkg/term
+github.com/docker/docker/pkg/ioutils
+github.com/docker/docker/profiles/seccomp
+github.com/docker/docker/pkg/parsers
+github.com/docker/docker/api/types/versions
+github.com/docker/docker/pkg/idtools
+github.com/docker/docker/errdefs
+github.com/docker/docker/pkg/term/windows
+github.com/docker/docker/pkg/longpath
+github.com/docker/docker/api/types
+github.com/docker/docker/pkg/parsers/kernel
+github.com/docker/docker/api/types/registry
+github.com/docker/docker/api/types/swarm
+github.com/docker/docker/pkg/fileutils
+github.com/docker/docker/pkg/stdcopy
+github.com/docker/docker/pkg/system
+github.com/docker/docker/client
+github.com/docker/docker/api/types/container
+github.com/docker/docker/api/types/filters
+github.com/docker/docker/api/types/mount
+github.com/docker/docker/api/types/network
+github.com/docker/docker/api/types/swarm/runtime
+github.com/docker/docker/pkg/pools
+github.com/docker/docker/pkg/mount
+github.com/docker/docker/api
+github.com/docker/docker/api/types/events
+github.com/docker/docker/api/types/image
+github.com/docker/docker/api/types/time
+github.com/docker/docker/api/types/volume
+github.com/docker/docker/api/types/blkiodev
+github.com/docker/docker/api/types/strslice
+# github.com/docker/docker-credential-helpers v0.6.2
+github.com/docker/docker-credential-helpers/credentials
+github.com/docker/docker-credential-helpers/client
+# github.com/docker/go-connections v0.4.0
+github.com/docker/go-connections/nat
+github.com/docker/go-connections/tlsconfig
+github.com/docker/go-connections/sockets
+# github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82
+github.com/docker/go-metrics
+# github.com/docker/go-units v0.4.0
+github.com/docker/go-units
+# github.com/docker/libnetwork v0.8.0-dev.2.0.20180608203834-19279f049241
+github.com/docker/libnetwork/resolvconf
+github.com/docker/libnetwork/types
+github.com/docker/libnetwork/resolvconf/dns
+# github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7
+github.com/docker/libtrust
+# github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c
+github.com/docker/spdystream
+github.com/docker/spdystream/spdy
+# github.com/etcd-io/bbolt v1.3.3
+github.com/etcd-io/bbolt
+# github.com/fatih/camelcase v1.0.0
+github.com/fatih/camelcase
+# github.com/fsnotify/fsnotify v1.4.7
+github.com/fsnotify/fsnotify
+# github.com/fsouza/go-dockerclient v1.4.1
+github.com/fsouza/go-dockerclient
+github.com/fsouza/go-dockerclient/internal/archive
+github.com/fsouza/go-dockerclient/internal/jsonmessage
+github.com/fsouza/go-dockerclient/internal/term
+# github.com/ghodss/yaml v1.0.0
+github.com/ghodss/yaml
+# github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f
+github.com/godbus/dbus
+# github.com/gogo/protobuf v1.2.1
+github.com/gogo/protobuf/proto
+github.com/gogo/protobuf/sortkeys
+# github.com/golang/protobuf v1.3.1
+github.com/golang/protobuf/proto
+# github.com/google/gofuzz v1.0.0
+github.com/google/gofuzz
+# github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf
+github.com/google/shlex
+# github.com/gorilla/mux v1.7.2
+github.com/gorilla/mux
+# github.com/hashicorp/errwrap v1.0.0
+github.com/hashicorp/errwrap
+# github.com/hashicorp/go-multierror v1.0.0
+github.com/hashicorp/go-multierror
+# github.com/hpcloud/tail v1.0.0
+github.com/hpcloud/tail
+github.com/hpcloud/tail/ratelimiter
+github.com/hpcloud/tail/util
+github.com/hpcloud/tail/watch
+github.com/hpcloud/tail/winfile
+# github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd
+github.com/ijc/Gotty
+# github.com/imdario/mergo v0.3.7
+github.com/imdario/mergo
+# github.com/inconshreveable/mousetrap v1.0.0
+github.com/inconshreveable/mousetrap
+# github.com/ishidawataru/sctp v0.0.0-20180213033435-07191f837fed
+github.com/ishidawataru/sctp
+# github.com/json-iterator/go v1.1.6
+github.com/json-iterator/go
+# github.com/klauspost/compress v1.7.1
+github.com/klauspost/compress/flate
+# github.com/klauspost/cpuid v1.2.1
+github.com/klauspost/cpuid
+# github.com/klauspost/pgzip v1.2.1
+github.com/klauspost/pgzip
+# github.com/konsorten/go-windows-terminal-sequences v1.0.2
+github.com/konsorten/go-windows-terminal-sequences
+# github.com/mattn/go-isatty v0.0.8
+github.com/mattn/go-isatty
+# github.com/mattn/go-shellwords v1.0.5
+github.com/mattn/go-shellwords
+# github.com/matttproud/golang_protobuf_extensions v1.0.1
+github.com/matttproud/golang_protobuf_extensions/pbutil
+# github.com/mistifyio/go-zfs v2.1.1+incompatible
+github.com/mistifyio/go-zfs
+# github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
+github.com/modern-go/concurrent
+# github.com/modern-go/reflect2 v1.0.1
+github.com/modern-go/reflect2
+# github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618
+github.com/mrunalp/fileutils
+# github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c
+github.com/mtrmac/gpgme
+# github.com/onsi/ginkgo v1.8.0
+github.com/onsi/ginkgo/ginkgo
+github.com/onsi/ginkgo
+github.com/onsi/ginkgo/config
+github.com/onsi/ginkgo/ginkgo/convert
+github.com/onsi/ginkgo/ginkgo/interrupthandler
+github.com/onsi/ginkgo/ginkgo/nodot
+github.com/onsi/ginkgo/ginkgo/testrunner
+github.com/onsi/ginkgo/ginkgo/testsuite
+github.com/onsi/ginkgo/ginkgo/watch
+github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable
+github.com/onsi/ginkgo/types
+github.com/onsi/ginkgo/internal/codelocation
+github.com/onsi/ginkgo/internal/failer
+github.com/onsi/ginkgo/internal/remote
+github.com/onsi/ginkgo/internal/suite
+github.com/onsi/ginkgo/internal/testingtproxy
+github.com/onsi/ginkgo/internal/writer
+github.com/onsi/ginkgo/reporters
+github.com/onsi/ginkgo/reporters/stenographer
+github.com/onsi/ginkgo/extensions/table
+github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty
+github.com/onsi/ginkgo/internal/spec_iterator
+github.com/onsi/ginkgo/internal/containernode
+github.com/onsi/ginkgo/internal/leafnodes
+github.com/onsi/ginkgo/internal/spec
+github.com/onsi/ginkgo/internal/specrunner
+# github.com/onsi/gomega v1.5.0
+github.com/onsi/gomega
+github.com/onsi/gomega/gexec
+github.com/onsi/gomega/internal/assertion
+github.com/onsi/gomega/internal/asyncassertion
+github.com/onsi/gomega/internal/testingtsupport
+github.com/onsi/gomega/matchers
+github.com/onsi/gomega/types
+github.com/onsi/gomega/format
+github.com/onsi/gomega/gbytes
+github.com/onsi/gomega/internal/oraclematcher
+github.com/onsi/gomega/matchers/support/goraph/bipartitegraph
+github.com/onsi/gomega/matchers/support/goraph/edge
+github.com/onsi/gomega/matchers/support/goraph/node
+github.com/onsi/gomega/matchers/support/goraph/util
+# github.com/opencontainers/go-digest v1.0.0-rc1
+github.com/opencontainers/go-digest
+# github.com/opencontainers/image-spec v1.0.1
+github.com/opencontainers/image-spec/specs-go/v1
+github.com/opencontainers/image-spec/specs-go
+# github.com/opencontainers/runc v1.0.0-rc6
+github.com/opencontainers/runc/libcontainer/user
+github.com/opencontainers/runc/libcontainer/apparmor
+github.com/opencontainers/runc/libcontainer/configs
+github.com/opencontainers/runc/libcontainer/devices
+github.com/opencontainers/runc/libcontainer/cgroups
+github.com/opencontainers/runc/libcontainer/system
+# github.com/opencontainers/runtime-spec v0.0.0-20181111125026-1722abf79c2f
+github.com/opencontainers/runtime-spec/specs-go
+# github.com/opencontainers/runtime-tools v0.9.0
+github.com/opencontainers/runtime-tools/generate
+github.com/opencontainers/runtime-tools/validate
+github.com/opencontainers/runtime-tools/generate/seccomp
+github.com/opencontainers/runtime-tools/filepath
+github.com/opencontainers/runtime-tools/specerror
+github.com/opencontainers/runtime-tools/error
+# github.com/opencontainers/selinux v1.2.2
+github.com/opencontainers/selinux/go-selinux/label
+github.com/opencontainers/selinux/go-selinux
+# github.com/openshift/imagebuilder v1.1.0
+github.com/openshift/imagebuilder
+github.com/openshift/imagebuilder/dockerfile/parser
+github.com/openshift/imagebuilder/dockerfile/command
+github.com/openshift/imagebuilder/signal
+github.com/openshift/imagebuilder/strslice
+# github.com/opentracing/opentracing-go v1.1.0
+github.com/opentracing/opentracing-go
+github.com/opentracing/opentracing-go/log
+github.com/opentracing/opentracing-go/ext
+# github.com/ostreedev/ostree-go v0.0.0-20181213164143-d0388bd827cf
+github.com/ostreedev/ostree-go/pkg/glibobject
+github.com/ostreedev/ostree-go/pkg/otbuiltin
+# github.com/pkg/errors v0.8.1
+github.com/pkg/errors
+# github.com/pkg/profile v1.3.0
+github.com/pkg/profile
+# github.com/pmezard/go-difflib v1.0.0
+github.com/pmezard/go-difflib/difflib
+# github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7
+github.com/pquerna/ffjson/fflib/v1
+github.com/pquerna/ffjson/fflib/v1/internal
+# github.com/prometheus/client_golang v1.0.0
+github.com/prometheus/client_golang/prometheus
+github.com/prometheus/client_golang/prometheus/promhttp
+github.com/prometheus/client_golang/prometheus/internal
+# github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
+github.com/prometheus/client_model/go
+# github.com/prometheus/common v0.6.0
+github.com/prometheus/common/expfmt
+github.com/prometheus/common/model
+github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
+# github.com/prometheus/procfs v0.0.2
+github.com/prometheus/procfs
+github.com/prometheus/procfs/internal/fs
+# github.com/seccomp/containers-golang v0.0.0-20190312124753-8ca8945ccf5f
+github.com/seccomp/containers-golang
+# github.com/seccomp/libseccomp-golang v0.9.1
+github.com/seccomp/libseccomp-golang
+# github.com/sirupsen/logrus v1.4.2
+github.com/sirupsen/logrus
+github.com/sirupsen/logrus/hooks/syslog
+# github.com/spf13/cobra v0.0.5
+github.com/spf13/cobra
+# github.com/spf13/pflag v1.0.3
+github.com/spf13/pflag
+# github.com/stretchr/testify v1.3.0
+github.com/stretchr/testify/assert
+github.com/stretchr/testify/require
+# github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
+github.com/syndtr/gocapability/capability
+# github.com/tchap/go-patricia v2.3.0+incompatible
+github.com/tchap/go-patricia/patricia
+# github.com/uber/jaeger-client-go v2.16.0+incompatible
+github.com/uber/jaeger-client-go
+github.com/uber/jaeger-client-go/config
+github.com/uber/jaeger-client-go/internal/baggage
+github.com/uber/jaeger-client-go/internal/spanlog
+github.com/uber/jaeger-client-go/internal/throttler
+github.com/uber/jaeger-client-go/log
+github.com/uber/jaeger-client-go/thrift
+github.com/uber/jaeger-client-go/thrift-gen/jaeger
+github.com/uber/jaeger-client-go/thrift-gen/sampling
+github.com/uber/jaeger-client-go/thrift-gen/zipkincore
+github.com/uber/jaeger-client-go/utils
+github.com/uber/jaeger-client-go/internal/baggage/remote
+github.com/uber/jaeger-client-go/internal/throttler/remote
+github.com/uber/jaeger-client-go/rpcmetrics
+github.com/uber/jaeger-client-go/transport
+github.com/uber/jaeger-client-go/thrift-gen/agent
+github.com/uber/jaeger-client-go/thrift-gen/baggage
+# github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5
+github.com/uber/jaeger-lib/metrics
+# github.com/ulikunitz/xz v0.5.6
+github.com/ulikunitz/xz
+github.com/ulikunitz/xz/internal/xlog
+github.com/ulikunitz/xz/lzma
+github.com/ulikunitz/xz/internal/hash
+# github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b
+github.com/varlink/go/varlink
+github.com/varlink/go/cmd/varlink-go-interface-generator
+github.com/varlink/go/varlink/idl
+# github.com/vbatts/tar-split v0.11.1
+github.com/vbatts/tar-split/tar/asm
+github.com/vbatts/tar-split/tar/storage
+github.com/vbatts/tar-split/archive/tar
+# github.com/vbauerster/mpb v3.4.0+incompatible
+github.com/vbauerster/mpb
+github.com/vbauerster/mpb/decor
+github.com/vbauerster/mpb/cwriter
+github.com/vbauerster/mpb/internal
+# github.com/vishvananda/netlink v1.0.0
+github.com/vishvananda/netlink
+github.com/vishvananda/netlink/nl
+# github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc
+github.com/vishvananda/netns
+# github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f
+github.com/xeipuuv/gojsonpointer
+# github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415
+github.com/xeipuuv/gojsonreference
+# github.com/xeipuuv/gojsonschema v1.1.0
+github.com/xeipuuv/gojsonschema
+# golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4
+golang.org/x/crypto/ssh/terminal
+golang.org/x/crypto/openpgp
+golang.org/x/crypto/openpgp/armor
+golang.org/x/crypto/openpgp/errors
+golang.org/x/crypto/openpgp/packet
+golang.org/x/crypto/openpgp/s2k
+golang.org/x/crypto/cast5
+golang.org/x/crypto/openpgp/elgamal
+# golang.org/x/net v0.0.0-20190620200207-3b0461eec859
+golang.org/x/net/context
+golang.org/x/net/http2
+golang.org/x/net/html/charset
+golang.org/x/net/proxy
+golang.org/x/net/http/httpguts
+golang.org/x/net/http2/hpack
+golang.org/x/net/idna
+golang.org/x/net/html
+golang.org/x/net/internal/socks
+golang.org/x/net/html/atom
+golang.org/x/net/context/ctxhttp
+# golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
+golang.org/x/oauth2
+golang.org/x/oauth2/internal
+# golang.org/x/sync v0.0.0-20190423024810-112230192c58
+golang.org/x/sync/semaphore
+# golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0
+golang.org/x/sys/unix
+golang.org/x/sys/windows
+golang.org/x/sys/windows/registry
+# golang.org/x/text v0.3.2
+golang.org/x/text/encoding
+golang.org/x/text/encoding/charmap
+golang.org/x/text/encoding/htmlindex
+golang.org/x/text/transform
+golang.org/x/text/secure/bidirule
+golang.org/x/text/unicode/bidi
+golang.org/x/text/unicode/norm
+golang.org/x/text/encoding/internal/identifier
+golang.org/x/text/encoding/internal
+golang.org/x/text/encoding/japanese
+golang.org/x/text/encoding/korean
+golang.org/x/text/encoding/simplifiedchinese
+golang.org/x/text/encoding/traditionalchinese
+golang.org/x/text/encoding/unicode
+golang.org/x/text/language
+golang.org/x/text/internal/utf8internal
+golang.org/x/text/runes
+golang.org/x/text/internal/language
+golang.org/x/text/internal/language/compact
+golang.org/x/text/internal/tag
+# golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
+golang.org/x/time/rate
+# google.golang.org/appengine v1.6.1
+google.golang.org/appengine/urlfetch
+google.golang.org/appengine/internal
+google.golang.org/appengine/internal/urlfetch
+google.golang.org/appengine/internal/base
+google.golang.org/appengine/internal/datastore
+google.golang.org/appengine/internal/log
+google.golang.org/appengine/internal/remote_api
+# gopkg.in/fsnotify.v1 v1.4.7
+gopkg.in/fsnotify.v1
+# gopkg.in/inf.v0 v0.9.1
+gopkg.in/inf.v0
+# gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
+gopkg.in/tomb.v1
+# gopkg.in/yaml.v2 v2.2.2
+gopkg.in/yaml.v2
+# k8s.io/api v0.0.0-20190624085159-95846d7ef82a
+k8s.io/api/core/v1
+# k8s.io/apimachinery v0.0.0-20190624085041-961b39a1baa0
+k8s.io/apimachinery/pkg/apis/meta/v1
+k8s.io/apimachinery/pkg/util/wait
+k8s.io/apimachinery/pkg/util/runtime
+k8s.io/apimachinery/pkg/api/resource
+k8s.io/apimachinery/pkg/runtime
+k8s.io/apimachinery/pkg/runtime/schema
+k8s.io/apimachinery/pkg/types
+k8s.io/apimachinery/pkg/util/intstr
+k8s.io/apimachinery/pkg/conversion
+k8s.io/apimachinery/pkg/fields
+k8s.io/apimachinery/pkg/labels
+k8s.io/apimachinery/pkg/selection
+k8s.io/apimachinery/pkg/watch
+k8s.io/apimachinery/pkg/util/httpstream
+k8s.io/apimachinery/pkg/util/remotecommand
+k8s.io/apimachinery/pkg/conversion/queryparams
+k8s.io/apimachinery/pkg/util/errors
+k8s.io/apimachinery/pkg/util/json
+k8s.io/apimachinery/pkg/util/naming
+k8s.io/apimachinery/pkg/util/sets
+k8s.io/apimachinery/third_party/forked/golang/reflect
+k8s.io/apimachinery/pkg/util/validation
+k8s.io/apimachinery/pkg/util/net
+k8s.io/apimachinery/pkg/api/errors
+k8s.io/apimachinery/pkg/runtime/serializer/streaming
+k8s.io/apimachinery/pkg/util/httpstream/spdy
+k8s.io/apimachinery/pkg/util/validation/field
+k8s.io/apimachinery/pkg/version
+k8s.io/apimachinery/pkg/runtime/serializer
+k8s.io/apimachinery/pkg/util/clock
+k8s.io/apimachinery/third_party/forked/golang/netutil
+k8s.io/apimachinery/pkg/runtime/serializer/json
+k8s.io/apimachinery/pkg/runtime/serializer/protobuf
+k8s.io/apimachinery/pkg/runtime/serializer/recognizer
+k8s.io/apimachinery/pkg/runtime/serializer/versioning
+k8s.io/apimachinery/pkg/util/framer
+k8s.io/apimachinery/pkg/util/yaml
+k8s.io/apimachinery/pkg/apis/meta/v1/unstructured
+# k8s.io/client-go v0.0.0-20190620085101-78d2af792bab
+k8s.io/client-go/tools/remotecommand
+k8s.io/client-go/rest
+k8s.io/client-go/transport/spdy
+k8s.io/client-go/util/exec
+k8s.io/client-go/util/homedir
+k8s.io/client-go/pkg/version
+k8s.io/client-go/plugin/pkg/client/auth/exec
+k8s.io/client-go/rest/watch
+k8s.io/client-go/tools/clientcmd/api
+k8s.io/client-go/tools/metrics
+k8s.io/client-go/transport
+k8s.io/client-go/util/cert
+k8s.io/client-go/util/flowcontrol
+k8s.io/client-go/pkg/apis/clientauthentication
+k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1
+k8s.io/client-go/pkg/apis/clientauthentication/v1beta1
+k8s.io/client-go/util/connrotation
+k8s.io/client-go/util/keyutil
+# k8s.io/klog v0.3.3
+k8s.io/klog
+# k8s.io/utils v0.0.0-20190607212802-c55fbcfc754a
+k8s.io/utils/integer
+# sigs.k8s.io/yaml v1.1.0
+sigs.k8s.io/yaml
diff --git a/vendor/sigs.k8s.io/yaml/.gitignore b/vendor/sigs.k8s.io/yaml/.gitignore
new file mode 100644
index 000000000..e256a31e0
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/.gitignore
@@ -0,0 +1,20 @@
+# OSX leaves these everywhere on SMB shares
+._*
+
+# Eclipse files
+.classpath
+.project
+.settings/**
+
+# Emacs save files
+*~
+
+# Vim-related files
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+*.un~
+Session.vim
+.netrwhist
+
+# Go test binaries
+*.test
diff --git a/vendor/sigs.k8s.io/yaml/.travis.yml b/vendor/sigs.k8s.io/yaml/.travis.yml
new file mode 100644
index 000000000..03ddc7318
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+dist: xenial
+go:
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+script:
+ - go get -t -v ./...
+ - diff -u <(echo -n) <(gofmt -d .)
+ - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON)
+ - go tool vet .
+ - go test -v -race ./...
+install:
+ - go get golang.org/x/lint/golint
diff --git a/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md b/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md
new file mode 100644
index 000000000..de4711513
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing Guidelines
+
+Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
+
+_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
+
+## Getting Started
+
+We have full documentation on how to get started contributing here:
+
+<!---
+If your repo has certain guidelines for contribution, put them here ahead of the general k8s resources
+-->
+
+- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
+- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
+- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers
+
+## Mentorship
+
+- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
+
+<!---
+Custom Information - if you're copying this template for the first time you can add custom content here, for example:
+
+## Contact Information
+
+- [Slack channel](https://kubernetes.slack.com/messages/kubernetes-users) - Replace `kubernetes-users` with your slack channel string, this will send users directly to your channel.
+- [Mailing list](URL)
+
+-->
diff --git a/vendor/sigs.k8s.io/yaml/LICENSE b/vendor/sigs.k8s.io/yaml/LICENSE
new file mode 100644
index 000000000..7805d36de
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/LICENSE
@@ -0,0 +1,50 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Sam Ghods
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS
new file mode 100644
index 000000000..11ad7ce1a
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/OWNERS
@@ -0,0 +1,25 @@
+approvers:
+- dims
+- lavalamp
+- smarterclayton
+- deads2k
+- sttts
+- liggitt
+- caesarxuchao
+reviewers:
+- dims
+- thockin
+- lavalamp
+- smarterclayton
+- wojtek-t
+- deads2k
+- derekwaynecarr
+- caesarxuchao
+- mikedanese
+- liggitt
+- gmarek
+- sttts
+- ncdc
+- tallclair
+labels:
+- sig/api-machinery
diff --git a/vendor/sigs.k8s.io/yaml/README.md b/vendor/sigs.k8s.io/yaml/README.md
new file mode 100644
index 000000000..0200f75b4
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/README.md
@@ -0,0 +1,121 @@
+# YAML marshaling and unmarshaling support for Go
+
+[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
+
+## Introduction
+
+A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
+
+In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
+
+## Compatibility
+
+This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
+
+## Caveats
+
+**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
+
+```
+BAD:
+ exampleKey: !!binary gIGC
+
+GOOD:
+ exampleKey: gIGC
+... and decode the base64 data in your code.
+```
+
+**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
+
+## Installation and usage
+
+To install, run:
+
+```
+$ go get github.com/ghodss/yaml
+```
+
+And import using:
+
+```
+import "github.com/ghodss/yaml"
+```
+
+Usage is very similar to the JSON library:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/ghodss/yaml"
+)
+
+type Person struct {
+ Name string `json:"name"` // Affects YAML field names too.
+ Age int `json:"age"`
+}
+
+func main() {
+ // Marshal a Person struct to YAML.
+ p := Person{"John", 30}
+ y, err := yaml.Marshal(p)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(y))
+ /* Output:
+ age: 30
+ name: John
+ */
+
+ // Unmarshal the YAML back into a Person struct.
+ var p2 Person
+ err = yaml.Unmarshal(y, &p2)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(p2)
+ /* Output:
+ {John 30}
+ */
+}
+```
+
+`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/ghodss/yaml"
+)
+
+func main() {
+ j := []byte(`{"name": "John", "age": 30}`)
+ y, err := yaml.JSONToYAML(j)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(y))
+ /* Output:
+ name: John
+ age: 30
+ */
+ j2, err := yaml.YAMLToJSON(y)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(j2))
+ /* Output:
+ {"age":30,"name":"John"}
+ */
+}
+```
diff --git a/vendor/sigs.k8s.io/yaml/RELEASE.md b/vendor/sigs.k8s.io/yaml/RELEASE.md
new file mode 100644
index 000000000..6b642464e
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/RELEASE.md
@@ -0,0 +1,9 @@
+# Release Process
+
+The `yaml` Project is released on an as-needed basis. The process is as follows:
+
+1. An issue is proposing a new release with a changelog since the last release
+1. All [OWNERS](OWNERS) must LGTM this release
+1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
+1. The release issue is closed
+1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released`
diff --git a/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS b/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS
new file mode 100644
index 000000000..0648a8ebf
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS
@@ -0,0 +1,17 @@
+# Defined below are the security contacts for this repo.
+#
+# They are the contact point for the Product Security Team to reach out
+# to for triaging and handling of incoming issues.
+#
+# The below names agree to abide by the
+# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
+# and will be removed and replaced if they violate that agreement.
+#
+# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
+# INSTRUCTIONS AT https://kubernetes.io/security/
+
+cjcullen
+jessfraz
+liggitt
+philips
+tallclair
diff --git a/vendor/sigs.k8s.io/yaml/code-of-conduct.md b/vendor/sigs.k8s.io/yaml/code-of-conduct.md
new file mode 100644
index 000000000..0d15c00cf
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/code-of-conduct.md
@@ -0,0 +1,3 @@
+# Kubernetes Community Code of Conduct
+
+Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
diff --git a/vendor/sigs.k8s.io/yaml/fields.go b/vendor/sigs.k8s.io/yaml/fields.go
new file mode 100644
index 000000000..235b7f2cf
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/fields.go
@@ -0,0 +1,502 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package yaml
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/json"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ if v.CanSet() {
+ v.Set(reflect.New(v.Type().Elem()))
+ } else {
+ v = reflect.New(v.Type().Elem())
+ }
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(json.Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string
+ nameBytes []byte // []byte(name)
+ equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+ tag bool
+ index []int
+ typ reflect.Type
+ omitEmpty bool
+ quoted bool
+}
+
+func fillField(f field) field {
+ f.nameBytes = []byte(f.name)
+ f.equalFold = foldFunc(f.nameBytes)
+ return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" { // unexported
+ continue
+ }
+ tag := sf.Tag.Get("json")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if !isValidTag(name) {
+ name = ""
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, fillField(field{
+ name: name,
+ tag: tagged,
+ index: index,
+ typ: ft,
+ omitEmpty: opts.Contains("omitempty"),
+ quoted: opts.Contains("string"),
+ }))
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+const (
+ caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
+ kelvin = '\u212a'
+ smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+// * S maps to s and to U+017F 'ſ' Latin small letter long s
+// * k maps to K and to U+212A 'K' Kelvin sign
+// See http://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+ nonLetter := false
+ special := false // special letter
+ for _, b := range s {
+ if b >= utf8.RuneSelf {
+ return bytes.EqualFold
+ }
+ upper := b & caseMask
+ if upper < 'A' || upper > 'Z' {
+ nonLetter = true
+ } else if upper == 'K' || upper == 'S' {
+ // See above for why these letters are special.
+ special = true
+ }
+ }
+ if special {
+ return equalFoldRight
+ }
+ if nonLetter {
+ return asciiEqualFold
+ }
+ return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+ for _, sb := range s {
+ if len(t) == 0 {
+ return false
+ }
+ tb := t[0]
+ if tb < utf8.RuneSelf {
+ if sb != tb {
+ sbUpper := sb & caseMask
+ if 'A' <= sbUpper && sbUpper <= 'Z' {
+ if sbUpper != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ t = t[1:]
+ continue
+ }
+ // sb is ASCII and t is not. t must be either kelvin
+ // sign or long s; sb must be s, S, k, or K.
+ tr, size := utf8.DecodeRune(t)
+ switch sb {
+ case 's', 'S':
+ if tr != smallLongEss {
+ return false
+ }
+ case 'k', 'K':
+ if tr != kelvin {
+ return false
+ }
+ default:
+ return false
+ }
+ t = t[size:]
+
+ }
+ if len(t) > 0 {
+ return false
+ }
+ return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, sb := range s {
+ tb := t[i]
+ if sb == tb {
+ continue
+ }
+ if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+ if sb&caseMask != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, b := range s {
+ if b&caseMask != t[i]&caseMask {
+ return false
+ }
+ }
+ return true
+}
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
diff --git a/vendor/sigs.k8s.io/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/yaml.go
new file mode 100644
index 000000000..024596112
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/yaml.go
@@ -0,0 +1,319 @@
+package yaml
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+
+ "gopkg.in/yaml.v2"
+)
+
+// Marshal marshals the object into JSON then converts JSON to YAML and returns the
+// YAML.
+func Marshal(o interface{}) ([]byte, error) {
+ j, err := json.Marshal(o)
+ if err != nil {
+ return nil, fmt.Errorf("error marshaling into JSON: %v", err)
+ }
+
+ y, err := JSONToYAML(j)
+ if err != nil {
+ return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
+ }
+
+ return y, nil
+}
+
+// JSONOpt is a decoding option for decoding from JSON format.
+type JSONOpt func(*json.Decoder) *json.Decoder
+
+// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object,
+// optionally configuring the behavior of the JSON unmarshal.
+func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error {
+ return yamlUnmarshal(y, o, false, opts...)
+}
+
+// UnmarshalStrict strictly converts YAML to JSON then uses JSON to unmarshal
+// into an object, optionally configuring the behavior of the JSON unmarshal.
+func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error {
+ return yamlUnmarshal(y, o, true, append(opts, DisallowUnknownFields)...)
+}
+
+// yamlUnmarshal unmarshals the given YAML byte stream into the given interface,
+// optionally performing the unmarshalling strictly
+func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error {
+ vo := reflect.ValueOf(o)
+ unmarshalFn := yaml.Unmarshal
+ if strict {
+ unmarshalFn = yaml.UnmarshalStrict
+ }
+ j, err := yamlToJSON(y, &vo, unmarshalFn)
+ if err != nil {
+ return fmt.Errorf("error converting YAML to JSON: %v", err)
+ }
+
+ err = jsonUnmarshal(bytes.NewReader(j), o, opts...)
+ if err != nil {
+ return fmt.Errorf("error unmarshaling JSON: %v", err)
+ }
+
+ return nil
+}
+
+// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the
+// object, optionally applying decoder options prior to decoding. We are not
+// using json.Unmarshal directly as we want the chance to pass in non-default
+// options.
+func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error {
+ d := json.NewDecoder(r)
+ for _, opt := range opts {
+ d = opt(d)
+ }
+ if err := d.Decode(&o); err != nil {
+ return fmt.Errorf("while decoding JSON: %v", err)
+ }
+ return nil
+}
+
+// JSONToYAML Converts JSON to YAML.
+func JSONToYAML(j []byte) ([]byte, error) {
+ // Convert the JSON to an object.
+ var jsonObj interface{}
+ // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
+ // Go JSON library doesn't try to pick the right number type (int, float,
+ // etc.) when unmarshalling to interface{}, it just picks float64
+ // universally. go-yaml does go through the effort of picking the right
+ // number type, so we can preserve number type throughout this process.
+ err := yaml.Unmarshal(j, &jsonObj)
+ if err != nil {
+ return nil, err
+ }
+
+ // Marshal this object into YAML.
+ return yaml.Marshal(jsonObj)
+}
+
+// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML,
+// passing JSON through this method should be a no-op.
+//
+// Things YAML can do that are not supported by JSON:
+// * In YAML you can have binary and null keys in your maps. These are invalid
+// in JSON. (int and float keys are converted to strings.)
+// * Binary data in YAML with the !!binary tag is not supported. If you want to
+// use binary data with this library, encode the data as base64 as usual but do
+// not use the !!binary tag in your YAML. This will ensure the original base64
+// encoded data makes it all the way through to the JSON.
+//
+// For strict decoding of YAML, use YAMLToJSONStrict.
+func YAMLToJSON(y []byte) ([]byte, error) {
+ return yamlToJSON(y, nil, yaml.Unmarshal)
+}
+
+// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding,
+// returning an error on any duplicate field names.
+func YAMLToJSONStrict(y []byte) ([]byte, error) {
+ return yamlToJSON(y, nil, yaml.UnmarshalStrict)
+}
+
+func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) {
+ // Convert the YAML to an object.
+ var yamlObj interface{}
+ err := yamlUnmarshal(y, &yamlObj)
+ if err != nil {
+ return nil, err
+ }
+
+ // YAML objects are not completely compatible with JSON objects (e.g. you
+ // can have non-string keys in YAML). So, convert the YAML-compatible object
+ // to a JSON-compatible object, failing with an error if irrecoverable
+ // incompatibilties happen along the way.
+ jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
+ if err != nil {
+ return nil, err
+ }
+
+ // Convert this object to JSON and return the data.
+ return json.Marshal(jsonObj)
+}
+
+func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
+ var err error
+
+ // Resolve jsonTarget to a concrete value (i.e. not a pointer or an
+ // interface). We pass decodingNull as false because we're not actually
+ // decoding into the value, we're just checking if the ultimate target is a
+ // string.
+ if jsonTarget != nil {
+ ju, tu, pv := indirect(*jsonTarget, false)
+ // We have a JSON or Text Umarshaler at this level, so we can't be trying
+ // to decode into a string.
+ if ju != nil || tu != nil {
+ jsonTarget = nil
+ } else {
+ jsonTarget = &pv
+ }
+ }
+
+ // If yamlObj is a number or a boolean, check if jsonTarget is a string -
+ // if so, coerce. Else return normal.
+ // If yamlObj is a map or array, find the field that each key is
+ // unmarshaling to, and when you recurse pass the reflect.Value for that
+ // field back into this function.
+ switch typedYAMLObj := yamlObj.(type) {
+ case map[interface{}]interface{}:
+ // JSON does not support arbitrary keys in a map, so we must convert
+ // these keys to strings.
+ //
+ // From my reading of go-yaml v2 (specifically the resolve function),
+ // keys can only have the types string, int, int64, float64, binary
+ // (unsupported), or null (unsupported).
+ strMap := make(map[string]interface{})
+ for k, v := range typedYAMLObj {
+ // Resolve the key to a string first.
+ var keyString string
+ switch typedKey := k.(type) {
+ case string:
+ keyString = typedKey
+ case int:
+ keyString = strconv.Itoa(typedKey)
+ case int64:
+ // go-yaml will only return an int64 as a key if the system
+ // architecture is 32-bit and the key's value is between 32-bit
+ // and 64-bit. Otherwise the key type will simply be int.
+ keyString = strconv.FormatInt(typedKey, 10)
+ case float64:
+ // Stolen from go-yaml to use the same conversion to string as
+ // the go-yaml library uses to convert float to string when
+ // Marshaling.
+ s := strconv.FormatFloat(typedKey, 'g', -1, 32)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ keyString = s
+ case bool:
+ if typedKey {
+ keyString = "true"
+ } else {
+ keyString = "false"
+ }
+ default:
+ return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
+ reflect.TypeOf(k), k, v)
+ }
+
+ // jsonTarget should be a struct or a map. If it's a struct, find
+ // the field it's going to map to and pass its reflect.Value. If
+ // it's a map, find the element type of the map and pass the
+ // reflect.Value created from that type. If it's neither, just pass
+ // nil - JSON conversion will error for us if it's a real issue.
+ if jsonTarget != nil {
+ t := *jsonTarget
+ if t.Kind() == reflect.Struct {
+ keyBytes := []byte(keyString)
+ // Find the field that the JSON library would use.
+ var f *field
+ fields := cachedTypeFields(t.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, keyBytes) {
+ f = ff
+ break
+ }
+ // Do case-insensitive comparison.
+ if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
+ f = ff
+ }
+ }
+ if f != nil {
+ // Find the reflect.Value of the most preferential
+ // struct field.
+ jtf := t.Field(f.index[0])
+ strMap[keyString], err = convertToJSONableObject(v, &jtf)
+ if err != nil {
+ return nil, err
+ }
+ continue
+ }
+ } else if t.Kind() == reflect.Map {
+ // Create a zero value of the map's element type to use as
+ // the JSON target.
+ jtv := reflect.Zero(t.Type().Elem())
+ strMap[keyString], err = convertToJSONableObject(v, &jtv)
+ if err != nil {
+ return nil, err
+ }
+ continue
+ }
+ }
+ strMap[keyString], err = convertToJSONableObject(v, nil)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return strMap, nil
+ case []interface{}:
+ // We need to recurse into arrays in case there are any
+ // map[interface{}]interface{}'s inside and to convert any
+ // numbers to strings.
+
+ // If jsonTarget is a slice (which it really should be), find the
+ // thing it's going to map to. If it's not a slice, just pass nil
+ // - JSON conversion will error for us if it's a real issue.
+ var jsonSliceElemValue *reflect.Value
+ if jsonTarget != nil {
+ t := *jsonTarget
+ if t.Kind() == reflect.Slice {
+ // By default slices point to nil, but we need a reflect.Value
+ // pointing to a value of the slice type, so we create one here.
+ ev := reflect.Indirect(reflect.New(t.Type().Elem()))
+ jsonSliceElemValue = &ev
+ }
+ }
+
+ // Make and use a new array.
+ arr := make([]interface{}, len(typedYAMLObj))
+ for i, v := range typedYAMLObj {
+ arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return arr, nil
+ default:
+ // If the target type is a string and the YAML type is a number,
+ // convert the YAML type to a string.
+ if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
+ // Based on my reading of go-yaml, it may return int, int64,
+ // float64, or uint64.
+ var s string
+ switch typedVal := typedYAMLObj.(type) {
+ case int:
+ s = strconv.FormatInt(int64(typedVal), 10)
+ case int64:
+ s = strconv.FormatInt(typedVal, 10)
+ case float64:
+ s = strconv.FormatFloat(typedVal, 'g', -1, 32)
+ case uint64:
+ s = strconv.FormatUint(typedVal, 10)
+ case bool:
+ if typedVal {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ }
+ if len(s) > 0 {
+ yamlObj = interface{}(s)
+ }
+ }
+ return yamlObj, nil
+ }
+}
diff --git a/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/sigs.k8s.io/yaml/yaml_go110.go
new file mode 100644
index 000000000..ab3e06a22
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/yaml_go110.go
@@ -0,0 +1,14 @@
+// This file contains changes that are only compatible with go 1.10 and onwards.
+
+// +build go1.10
+
+package yaml
+
+import "encoding/json"
+
+// DisallowUnknownFields configures the JSON decoder to error out if unknown
+// fields come along, instead of dropping them by default.
+func DisallowUnknownFields(d *json.Decoder) *json.Decoder {
+ d.DisallowUnknownFields()
+ return d
+}